powerpc/e6500: kexec: Handle hardware threads
authorScott Wood <scottwood@freescale.com>
Wed, 7 Oct 2015 03:48:12 +0000 (22:48 -0500)
committerScott Wood <scottwood@freescale.com>
Tue, 27 Oct 2015 23:13:25 +0000 (18:13 -0500)
The new kernel will be expecting secondary threads to be disabled,
not spinning.

Signed-off-by: Scott Wood <scottwood@freescale.com>
arch/powerpc/kernel/head_64.S
arch/powerpc/platforms/85xx/smp.c

index d48125d0c0488f17729b02e735daac3b1a1173b8..8b2bf0d3b7d1bc4d5fbb342172b0cd63bbdee6fd 100644 (file)
@@ -182,6 +182,8 @@ exception_marker:
 
 #ifdef CONFIG_PPC_BOOK3E
 _GLOBAL(fsl_secondary_thread_init)
+       mfspr   r4,SPRN_BUCSR
+
        /* Enable branch prediction */
        lis     r3,BUCSR_INIT@h
        ori     r3,r3,BUCSR_INIT@l
@@ -196,10 +198,24 @@ _GLOBAL(fsl_secondary_thread_init)
         * number.  There are two threads per core, so shift everything
         * but the low bit right by two bits so that the cpu numbering is
         * continuous.
+        *
+        * If the old value of BUCSR is non-zero, this thread has run
+        * before.  Thus, we assume we are coming from kexec or a similar
+        * scenario, and PIR is already set to the correct value.  This
+        * is a bit of a hack, but there are limited opportunities for
+        * getting information into the thread and the alternatives
+        * seemed like they'd be overkill.  We can't tell just by looking
+        * at the old PIR value which state it's in, since the same value
+        * could be valid for one thread out of reset and for a different
+        * thread in Linux.
         */
+
        mfspr   r3, SPRN_PIR
+       cmpwi   r4,0
+       bne     1f
        rlwimi  r3, r3, 30, 2, 30
        mtspr   SPRN_PIR, r3
+1:
 #endif
 
 _GLOBAL(generic_secondary_thread_init)
index 2e46684353078aecd67203984cd2ca31ba709905..712764f3dcbcfd299d4aaf522694c5b66f2262fd 100644 (file)
@@ -374,9 +374,55 @@ static void mpc85xx_smp_kexec_down(void *arg)
 #else
 void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary)
 {
+       int cpu = smp_processor_id();
+       int sibling = cpu_last_thread_sibling(cpu);
+       bool notified = false;
+       int disable_cpu;
+       int disable_threadbit = 0;
+       long start = mftb();
+       long now;
+
        local_irq_disable();
        hard_irq_disable();
        mpic_teardown_this_cpu(secondary);
+
+       if (cpu == crashing_cpu && cpu_thread_in_core(cpu) != 0) {
+               /*
+                * We enter the crash kernel on whatever cpu crashed,
+                * even if it's a secondary thread.  If that's the case,
+                * disable the corresponding primary thread.
+                */
+               disable_threadbit = 1;
+               disable_cpu = cpu_first_thread_sibling(cpu);
+       } else if (sibling != crashing_cpu &&
+                  cpu_thread_in_core(cpu) == 0 &&
+                  cpu_thread_in_core(sibling) != 0) {
+               disable_threadbit = 2;
+               disable_cpu = sibling;
+       }
+
+       if (disable_threadbit) {
+               while (paca[disable_cpu].kexec_state < KEXEC_STATE_REAL_MODE) {
+                       barrier();
+                       now = mftb();
+                       if (!notified && now - start > 1000000) {
+                               pr_info("%s/%d: waiting for cpu %d to enter KEXEC_STATE_REAL_MODE (%d)\n",
+                                       __func__, smp_processor_id(),
+                                       disable_cpu,
+                                       paca[disable_cpu].kexec_state);
+                               notified = true;
+                       }
+               }
+
+               if (notified) {
+                       pr_info("%s: cpu %d done waiting\n",
+                               __func__, disable_cpu);
+               }
+
+               mtspr(SPRN_TENC, disable_threadbit);
+               while (mfspr(SPRN_TENSR) & disable_threadbit)
+                       cpu_relax();
+       }
 }
 #endif