Merge tag 'iwlwifi-next-for-kalle-2016-03-30' of https://git.kernel.org/pub/scm/linux...
[sfrench/cifs-2.6.git] / arch / powerpc / platforms / 85xx / smp.c
1 /*
2  * Author: Andy Fleming <afleming@freescale.com>
3  *         Kumar Gala <galak@kernel.crashing.org>
4  *
5  * Copyright 2006-2008, 2011-2012, 2015 Freescale Semiconductor Inc.
6  *
7  * This program is free software; you can redistribute  it and/or modify it
8  * under  the terms of  the GNU General  Public License as published by the
9  * Free Software Foundation;  either version 2 of the  License, or (at your
10  * option) any later version.
11  */
12
13 #include <linux/stddef.h>
14 #include <linux/kernel.h>
15 #include <linux/init.h>
16 #include <linux/delay.h>
17 #include <linux/of.h>
18 #include <linux/kexec.h>
19 #include <linux/highmem.h>
20 #include <linux/cpu.h>
21 #include <linux/fsl/guts.h>
22
23 #include <asm/machdep.h>
24 #include <asm/pgtable.h>
25 #include <asm/page.h>
26 #include <asm/mpic.h>
27 #include <asm/cacheflush.h>
28 #include <asm/dbell.h>
29 #include <asm/code-patching.h>
30 #include <asm/cputhreads.h>
31 #include <asm/fsl_pm.h>
32
33 #include <sysdev/fsl_soc.h>
34 #include <sysdev/mpic.h>
35 #include "smp.h"
36
37 struct epapr_spin_table {
38         u32     addr_h;
39         u32     addr_l;
40         u32     r3_h;
41         u32     r3_l;
42         u32     reserved;
43         u32     pir;
44 };
45
46 #ifdef CONFIG_HOTPLUG_CPU
47 static u64 timebase;
48 static int tb_req;
49 static int tb_valid;
50
51 static void mpc85xx_give_timebase(void)
52 {
53         unsigned long flags;
54
55         local_irq_save(flags);
56         hard_irq_disable();
57
58         while (!tb_req)
59                 barrier();
60         tb_req = 0;
61
62         qoriq_pm_ops->freeze_time_base(true);
63 #ifdef CONFIG_PPC64
64         /*
65          * e5500/e6500 have a workaround for erratum A-006958 in place
66          * that will reread the timebase until TBL is non-zero.
67          * That would be a bad thing when the timebase is frozen.
68          *
69          * Thus, we read it manually, and instead of checking that
70          * TBL is non-zero, we ensure that TB does not change.  We don't
71          * do that for the main mftb implementation, because it requires
72          * a scratch register
73          */
74         {
75                 u64 prev;
76
77                 asm volatile("mfspr %0, %1" : "=r" (timebase) :
78                              "i" (SPRN_TBRL));
79
80                 do {
81                         prev = timebase;
82                         asm volatile("mfspr %0, %1" : "=r" (timebase) :
83                                      "i" (SPRN_TBRL));
84                 } while (prev != timebase);
85         }
86 #else
87         timebase = get_tb();
88 #endif
89         mb();
90         tb_valid = 1;
91
92         while (tb_valid)
93                 barrier();
94
95         qoriq_pm_ops->freeze_time_base(false);
96
97         local_irq_restore(flags);
98 }
99
100 static void mpc85xx_take_timebase(void)
101 {
102         unsigned long flags;
103
104         local_irq_save(flags);
105         hard_irq_disable();
106
107         tb_req = 1;
108         while (!tb_valid)
109                 barrier();
110
111         set_tb(timebase >> 32, timebase & 0xffffffff);
112         isync();
113         tb_valid = 0;
114
115         local_irq_restore(flags);
116 }
117
118 static void smp_85xx_mach_cpu_die(void)
119 {
120         unsigned int cpu = smp_processor_id();
121
122         local_irq_disable();
123         hard_irq_disable();
124         /* mask all irqs to prevent cpu wakeup */
125         qoriq_pm_ops->irq_mask(cpu);
126
127         idle_task_exit();
128
129         mtspr(SPRN_TCR, 0);
130         mtspr(SPRN_TSR, mfspr(SPRN_TSR));
131
132         generic_set_cpu_dead(cpu);
133
134         cur_cpu_spec->cpu_down_flush();
135
136         qoriq_pm_ops->cpu_die(cpu);
137
138         while (1)
139                 ;
140 }
141
142 static void qoriq_cpu_kill(unsigned int cpu)
143 {
144         int i;
145
146         for (i = 0; i < 500; i++) {
147                 if (is_cpu_dead(cpu)) {
148 #ifdef CONFIG_PPC64
149                         paca[cpu].cpu_start = 0;
150 #endif
151                         return;
152                 }
153                 msleep(20);
154         }
155         pr_err("CPU%d didn't die...\n", cpu);
156 }
157 #endif
158
159 /*
160  * To keep it compatible with old boot program which uses
161  * cache-inhibit spin table, we need to flush the cache
162  * before accessing spin table to invalidate any staled data.
163  * We also need to flush the cache after writing to spin
164  * table to push data out.
165  */
166 static inline void flush_spin_table(void *spin_table)
167 {
168         flush_dcache_range((ulong)spin_table,
169                 (ulong)spin_table + sizeof(struct epapr_spin_table));
170 }
171
172 static inline u32 read_spin_table_addr_l(void *spin_table)
173 {
174         flush_dcache_range((ulong)spin_table,
175                 (ulong)spin_table + sizeof(struct epapr_spin_table));
176         return in_be32(&((struct epapr_spin_table *)spin_table)->addr_l);
177 }
178
179 #ifdef CONFIG_PPC64
180 static void wake_hw_thread(void *info)
181 {
182         void fsl_secondary_thread_init(void);
183         unsigned long inia;
184         int cpu = *(const int *)info;
185
186         inia = *(unsigned long *)fsl_secondary_thread_init;
187         book3e_start_thread(cpu_thread_in_core(cpu), inia);
188 }
189 #endif
190
191 static int smp_85xx_start_cpu(int cpu)
192 {
193         int ret = 0;
194         struct device_node *np;
195         const u64 *cpu_rel_addr;
196         unsigned long flags;
197         int ioremappable;
198         int hw_cpu = get_hard_smp_processor_id(cpu);
199         struct epapr_spin_table __iomem *spin_table;
200
201         np = of_get_cpu_node(cpu, NULL);
202         cpu_rel_addr = of_get_property(np, "cpu-release-addr", NULL);
203         if (!cpu_rel_addr) {
204                 pr_err("No cpu-release-addr for cpu %d\n", cpu);
205                 return -ENOENT;
206         }
207
208         /*
209          * A secondary core could be in a spinloop in the bootpage
210          * (0xfffff000), somewhere in highmem, or somewhere in lowmem.
211          * The bootpage and highmem can be accessed via ioremap(), but
212          * we need to directly access the spinloop if its in lowmem.
213          */
214         ioremappable = *cpu_rel_addr > virt_to_phys(high_memory);
215
216         /* Map the spin table */
217         if (ioremappable)
218                 spin_table = ioremap_prot(*cpu_rel_addr,
219                         sizeof(struct epapr_spin_table), _PAGE_COHERENT);
220         else
221                 spin_table = phys_to_virt(*cpu_rel_addr);
222
223         local_irq_save(flags);
224         hard_irq_disable();
225
226         if (qoriq_pm_ops)
227                 qoriq_pm_ops->cpu_up_prepare(cpu);
228
229         /* if cpu is not spinning, reset it */
230         if (read_spin_table_addr_l(spin_table) != 1) {
231                 /*
232                  * We don't set the BPTR register here since it already points
233                  * to the boot page properly.
234                  */
235                 mpic_reset_core(cpu);
236
237                 /*
238                  * wait until core is ready...
239                  * We need to invalidate the stale data, in case the boot
240                  * loader uses a cache-inhibited spin table.
241                  */
242                 if (!spin_event_timeout(
243                                 read_spin_table_addr_l(spin_table) == 1,
244                                 10000, 100)) {
245                         pr_err("timeout waiting for cpu %d to reset\n",
246                                 hw_cpu);
247                         ret = -EAGAIN;
248                         goto err;
249                 }
250         }
251
252         flush_spin_table(spin_table);
253         out_be32(&spin_table->pir, hw_cpu);
254 #ifdef CONFIG_PPC64
255         out_be64((u64 *)(&spin_table->addr_h),
256                 __pa(ppc_function_entry(generic_secondary_smp_init)));
257 #else
258         out_be32(&spin_table->addr_l, __pa(__early_start));
259 #endif
260         flush_spin_table(spin_table);
261 err:
262         local_irq_restore(flags);
263
264         if (ioremappable)
265                 iounmap(spin_table);
266
267         return ret;
268 }
269
270 static int smp_85xx_kick_cpu(int nr)
271 {
272         int ret = 0;
273 #ifdef CONFIG_PPC64
274         int primary = nr;
275 #endif
276
277         WARN_ON(nr < 0 || nr >= num_possible_cpus());
278
279         pr_debug("kick CPU #%d\n", nr);
280
281 #ifdef CONFIG_PPC64
282         if (threads_per_core == 2) {
283                 if (WARN_ON_ONCE(!cpu_has_feature(CPU_FTR_SMT)))
284                         return -ENOENT;
285
286                 booting_thread_hwid = cpu_thread_in_core(nr);
287                 primary = cpu_first_thread_sibling(nr);
288
289                 if (qoriq_pm_ops)
290                         qoriq_pm_ops->cpu_up_prepare(nr);
291
292                 /*
293                  * If either thread in the core is online, use it to start
294                  * the other.
295                  */
296                 if (cpu_online(primary)) {
297                         smp_call_function_single(primary,
298                                         wake_hw_thread, &nr, 1);
299                         goto done;
300                 } else if (cpu_online(primary + 1)) {
301                         smp_call_function_single(primary + 1,
302                                         wake_hw_thread, &nr, 1);
303                         goto done;
304                 }
305
306                 /*
307                  * If getting here, it means both threads in the core are
308                  * offline. So start the primary thread, then it will start
309                  * the thread specified in booting_thread_hwid, the one
310                  * corresponding to nr.
311                  */
312
313         } else if (threads_per_core == 1) {
314                 /*
315                  * If one core has only one thread, set booting_thread_hwid to
316                  * an invalid value.
317                  */
318                 booting_thread_hwid = INVALID_THREAD_HWID;
319
320         } else if (threads_per_core > 2) {
321                 pr_err("Do not support more than 2 threads per CPU.");
322                 return -EINVAL;
323         }
324
325         ret = smp_85xx_start_cpu(primary);
326         if (ret)
327                 return ret;
328
329 done:
330         paca[nr].cpu_start = 1;
331         generic_set_cpu_up(nr);
332
333         return ret;
334 #else
335         ret = smp_85xx_start_cpu(nr);
336         if (ret)
337                 return ret;
338
339         generic_set_cpu_up(nr);
340
341         return ret;
342 #endif
343 }
344
345 struct smp_ops_t smp_85xx_ops = {
346         .kick_cpu = smp_85xx_kick_cpu,
347         .cpu_bootable = smp_generic_cpu_bootable,
348 #ifdef CONFIG_HOTPLUG_CPU
349         .cpu_disable    = generic_cpu_disable,
350         .cpu_die        = generic_cpu_die,
351 #endif
352 #if defined(CONFIG_KEXEC) && !defined(CONFIG_PPC64)
353         .give_timebase  = smp_generic_give_timebase,
354         .take_timebase  = smp_generic_take_timebase,
355 #endif
356 };
357
358 #ifdef CONFIG_KEXEC
359 #ifdef CONFIG_PPC32
360 atomic_t kexec_down_cpus = ATOMIC_INIT(0);
361
362 void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary)
363 {
364         local_irq_disable();
365
366         if (secondary) {
367                 cur_cpu_spec->cpu_down_flush();
368                 atomic_inc(&kexec_down_cpus);
369                 /* loop forever */
370                 while (1);
371         }
372 }
373
374 static void mpc85xx_smp_kexec_down(void *arg)
375 {
376         if (ppc_md.kexec_cpu_down)
377                 ppc_md.kexec_cpu_down(0,1);
378 }
379 #else
380 void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary)
381 {
382         int cpu = smp_processor_id();
383         int sibling = cpu_last_thread_sibling(cpu);
384         bool notified = false;
385         int disable_cpu;
386         int disable_threadbit = 0;
387         long start = mftb();
388         long now;
389
390         local_irq_disable();
391         hard_irq_disable();
392         mpic_teardown_this_cpu(secondary);
393
394         if (cpu == crashing_cpu && cpu_thread_in_core(cpu) != 0) {
395                 /*
396                  * We enter the crash kernel on whatever cpu crashed,
397                  * even if it's a secondary thread.  If that's the case,
398                  * disable the corresponding primary thread.
399                  */
400                 disable_threadbit = 1;
401                 disable_cpu = cpu_first_thread_sibling(cpu);
402         } else if (sibling != crashing_cpu &&
403                    cpu_thread_in_core(cpu) == 0 &&
404                    cpu_thread_in_core(sibling) != 0) {
405                 disable_threadbit = 2;
406                 disable_cpu = sibling;
407         }
408
409         if (disable_threadbit) {
410                 while (paca[disable_cpu].kexec_state < KEXEC_STATE_REAL_MODE) {
411                         barrier();
412                         now = mftb();
413                         if (!notified && now - start > 1000000) {
414                                 pr_info("%s/%d: waiting for cpu %d to enter KEXEC_STATE_REAL_MODE (%d)\n",
415                                         __func__, smp_processor_id(),
416                                         disable_cpu,
417                                         paca[disable_cpu].kexec_state);
418                                 notified = true;
419                         }
420                 }
421
422                 if (notified) {
423                         pr_info("%s: cpu %d done waiting\n",
424                                 __func__, disable_cpu);
425                 }
426
427                 mtspr(SPRN_TENC, disable_threadbit);
428                 while (mfspr(SPRN_TENSR) & disable_threadbit)
429                         cpu_relax();
430         }
431 }
432 #endif
433
434 static void mpc85xx_smp_machine_kexec(struct kimage *image)
435 {
436 #ifdef CONFIG_PPC32
437         int timeout = INT_MAX;
438         int i, num_cpus = num_present_cpus();
439
440         if (image->type == KEXEC_TYPE_DEFAULT)
441                 smp_call_function(mpc85xx_smp_kexec_down, NULL, 0);
442
443         while ( (atomic_read(&kexec_down_cpus) != (num_cpus - 1)) &&
444                 ( timeout > 0 ) )
445         {
446                 timeout--;
447         }
448
449         if ( !timeout )
450                 printk(KERN_ERR "Unable to bring down secondary cpu(s)");
451
452         for_each_online_cpu(i)
453         {
454                 if ( i == smp_processor_id() ) continue;
455                 mpic_reset_core(i);
456         }
457 #endif
458
459         default_machine_kexec(image);
460 }
461 #endif /* CONFIG_KEXEC */
462
463 static void smp_85xx_basic_setup(int cpu_nr)
464 {
465         if (cpu_has_feature(CPU_FTR_DBELL))
466                 doorbell_setup_this_cpu();
467 }
468
469 static void smp_85xx_setup_cpu(int cpu_nr)
470 {
471         mpic_setup_this_cpu();
472         smp_85xx_basic_setup(cpu_nr);
473 }
474
475 void __init mpc85xx_smp_init(void)
476 {
477         struct device_node *np;
478
479
480         np = of_find_node_by_type(NULL, "open-pic");
481         if (np) {
482                 smp_85xx_ops.probe = smp_mpic_probe;
483                 smp_85xx_ops.setup_cpu = smp_85xx_setup_cpu;
484                 smp_85xx_ops.message_pass = smp_mpic_message_pass;
485         } else
486                 smp_85xx_ops.setup_cpu = smp_85xx_basic_setup;
487
488         if (cpu_has_feature(CPU_FTR_DBELL)) {
489                 /*
490                  * If left NULL, .message_pass defaults to
491                  * smp_muxed_ipi_message_pass
492                  */
493                 smp_85xx_ops.message_pass = NULL;
494                 smp_85xx_ops.cause_ipi = doorbell_cause_ipi;
495                 smp_85xx_ops.probe = NULL;
496         }
497
498 #ifdef CONFIG_HOTPLUG_CPU
499 #ifdef CONFIG_FSL_CORENET_RCPM
500         fsl_rcpm_init();
501 #endif
502
503 #ifdef CONFIG_FSL_PMC
504         mpc85xx_setup_pmc();
505 #endif
506         if (qoriq_pm_ops) {
507                 smp_85xx_ops.give_timebase = mpc85xx_give_timebase;
508                 smp_85xx_ops.take_timebase = mpc85xx_take_timebase;
509                 ppc_md.cpu_die = smp_85xx_mach_cpu_die;
510                 smp_85xx_ops.cpu_die = qoriq_cpu_kill;
511         }
512 #endif
513         smp_ops = &smp_85xx_ops;
514
515 #ifdef CONFIG_KEXEC
516         ppc_md.kexec_cpu_down = mpc85xx_smp_kexec_cpu_down;
517         ppc_md.machine_kexec = mpc85xx_smp_machine_kexec;
518 #endif
519 }