Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[sfrench/cifs-2.6.git] / arch / sparc / kernel / smp_64.c
1 /* smp.c: Sparc64 SMP support.
2  *
3  * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net)
4  */
5
6 #include <linux/export.h>
7 #include <linux/kernel.h>
8 #include <linux/sched/mm.h>
9 #include <linux/sched/hotplug.h>
10 #include <linux/mm.h>
11 #include <linux/pagemap.h>
12 #include <linux/threads.h>
13 #include <linux/smp.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/delay.h>
17 #include <linux/init.h>
18 #include <linux/spinlock.h>
19 #include <linux/fs.h>
20 #include <linux/seq_file.h>
21 #include <linux/cache.h>
22 #include <linux/jiffies.h>
23 #include <linux/profile.h>
24 #include <linux/bootmem.h>
25 #include <linux/vmalloc.h>
26 #include <linux/ftrace.h>
27 #include <linux/cpu.h>
28 #include <linux/slab.h>
29 #include <linux/kgdb.h>
30
31 #include <asm/head.h>
32 #include <asm/ptrace.h>
33 #include <linux/atomic.h>
34 #include <asm/tlbflush.h>
35 #include <asm/mmu_context.h>
36 #include <asm/cpudata.h>
37 #include <asm/hvtramp.h>
38 #include <asm/io.h>
39 #include <asm/timer.h>
40 #include <asm/setup.h>
41
42 #include <asm/irq.h>
43 #include <asm/irq_regs.h>
44 #include <asm/page.h>
45 #include <asm/pgtable.h>
46 #include <asm/oplib.h>
47 #include <linux/uaccess.h>
48 #include <asm/starfire.h>
49 #include <asm/tlb.h>
50 #include <asm/sections.h>
51 #include <asm/prom.h>
52 #include <asm/mdesc.h>
53 #include <asm/ldc.h>
54 #include <asm/hypervisor.h>
55 #include <asm/pcr.h>
56
57 #include "cpumap.h"
58 #include "kernel.h"
59
60 DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
61 cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
62         { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
63
64 cpumask_t cpu_core_sib_map[NR_CPUS] __read_mostly = {
65         [0 ... NR_CPUS-1] = CPU_MASK_NONE };
66
67 cpumask_t cpu_core_sib_cache_map[NR_CPUS] __read_mostly = {
68         [0 ... NR_CPUS - 1] = CPU_MASK_NONE };
69
70 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
71 EXPORT_SYMBOL(cpu_core_map);
72 EXPORT_SYMBOL(cpu_core_sib_map);
73 EXPORT_SYMBOL(cpu_core_sib_cache_map);
74
75 static cpumask_t smp_commenced_mask;
76
77 void smp_info(struct seq_file *m)
78 {
79         int i;
80         
81         seq_printf(m, "State:\n");
82         for_each_online_cpu(i)
83                 seq_printf(m, "CPU%d:\t\tonline\n", i);
84 }
85
86 void smp_bogo(struct seq_file *m)
87 {
88         int i;
89         
90         for_each_online_cpu(i)
91                 seq_printf(m,
92                            "Cpu%dClkTck\t: %016lx\n",
93                            i, cpu_data(i).clock_tick);
94 }
95
96 extern void setup_sparc64_timer(void);
97
98 static volatile unsigned long callin_flag = 0;
99
100 void smp_callin(void)
101 {
102         int cpuid = hard_smp_processor_id();
103
104         __local_per_cpu_offset = __per_cpu_offset(cpuid);
105
106         if (tlb_type == hypervisor)
107                 sun4v_ktsb_register();
108
109         __flush_tlb_all();
110
111         setup_sparc64_timer();
112
113         if (cheetah_pcache_forced_on)
114                 cheetah_enable_pcache();
115
116         callin_flag = 1;
117         __asm__ __volatile__("membar #Sync\n\t"
118                              "flush  %%g6" : : : "memory");
119
120         /* Clear this or we will die instantly when we
121          * schedule back to this idler...
122          */
123         current_thread_info()->new_child = 0;
124
125         /* Attach to the address space of init_task. */
126         mmgrab(&init_mm);
127         current->active_mm = &init_mm;
128
129         /* inform the notifiers about the new cpu */
130         notify_cpu_starting(cpuid);
131
132         while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
133                 rmb();
134
135         set_cpu_online(cpuid, true);
136
137         /* idle thread is expected to have preempt disabled */
138         preempt_disable();
139
140         local_irq_enable();
141
142         cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
143 }
144
145 void cpu_panic(void)
146 {
147         printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
148         panic("SMP bolixed\n");
149 }
150
151 /* This tick register synchronization scheme is taken entirely from
152  * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
153  *
154  * The only change I've made is to rework it so that the master
155  * initiates the synchonization instead of the slave. -DaveM
156  */
157
158 #define MASTER  0
159 #define SLAVE   (SMP_CACHE_BYTES/sizeof(unsigned long))
160
161 #define NUM_ROUNDS      64      /* magic value */
162 #define NUM_ITERS       5       /* likewise */
163
164 static DEFINE_RAW_SPINLOCK(itc_sync_lock);
165 static unsigned long go[SLAVE + 1];
166
167 #define DEBUG_TICK_SYNC 0
168
169 static inline long get_delta (long *rt, long *master)
170 {
171         unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
172         unsigned long tcenter, t0, t1, tm;
173         unsigned long i;
174
175         for (i = 0; i < NUM_ITERS; i++) {
176                 t0 = tick_ops->get_tick();
177                 go[MASTER] = 1;
178                 membar_safe("#StoreLoad");
179                 while (!(tm = go[SLAVE]))
180                         rmb();
181                 go[SLAVE] = 0;
182                 wmb();
183                 t1 = tick_ops->get_tick();
184
185                 if (t1 - t0 < best_t1 - best_t0)
186                         best_t0 = t0, best_t1 = t1, best_tm = tm;
187         }
188
189         *rt = best_t1 - best_t0;
190         *master = best_tm - best_t0;
191
192         /* average best_t0 and best_t1 without overflow: */
193         tcenter = (best_t0/2 + best_t1/2);
194         if (best_t0 % 2 + best_t1 % 2 == 2)
195                 tcenter++;
196         return tcenter - best_tm;
197 }
198
199 void smp_synchronize_tick_client(void)
200 {
201         long i, delta, adj, adjust_latency = 0, done = 0;
202         unsigned long flags, rt, master_time_stamp;
203 #if DEBUG_TICK_SYNC
204         struct {
205                 long rt;        /* roundtrip time */
206                 long master;    /* master's timestamp */
207                 long diff;      /* difference between midpoint and master's timestamp */
208                 long lat;       /* estimate of itc adjustment latency */
209         } t[NUM_ROUNDS];
210 #endif
211
212         go[MASTER] = 1;
213
214         while (go[MASTER])
215                 rmb();
216
217         local_irq_save(flags);
218         {
219                 for (i = 0; i < NUM_ROUNDS; i++) {
220                         delta = get_delta(&rt, &master_time_stamp);
221                         if (delta == 0)
222                                 done = 1;       /* let's lock on to this... */
223
224                         if (!done) {
225                                 if (i > 0) {
226                                         adjust_latency += -delta;
227                                         adj = -delta + adjust_latency/4;
228                                 } else
229                                         adj = -delta;
230
231                                 tick_ops->add_tick(adj);
232                         }
233 #if DEBUG_TICK_SYNC
234                         t[i].rt = rt;
235                         t[i].master = master_time_stamp;
236                         t[i].diff = delta;
237                         t[i].lat = adjust_latency/4;
238 #endif
239                 }
240         }
241         local_irq_restore(flags);
242
243 #if DEBUG_TICK_SYNC
244         for (i = 0; i < NUM_ROUNDS; i++)
245                 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
246                        t[i].rt, t[i].master, t[i].diff, t[i].lat);
247 #endif
248
249         printk(KERN_INFO "CPU %d: synchronized TICK with master CPU "
250                "(last diff %ld cycles, maxerr %lu cycles)\n",
251                smp_processor_id(), delta, rt);
252 }
253
254 static void smp_start_sync_tick_client(int cpu);
255
256 static void smp_synchronize_one_tick(int cpu)
257 {
258         unsigned long flags, i;
259
260         go[MASTER] = 0;
261
262         smp_start_sync_tick_client(cpu);
263
264         /* wait for client to be ready */
265         while (!go[MASTER])
266                 rmb();
267
268         /* now let the client proceed into his loop */
269         go[MASTER] = 0;
270         membar_safe("#StoreLoad");
271
272         raw_spin_lock_irqsave(&itc_sync_lock, flags);
273         {
274                 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
275                         while (!go[MASTER])
276                                 rmb();
277                         go[MASTER] = 0;
278                         wmb();
279                         go[SLAVE] = tick_ops->get_tick();
280                         membar_safe("#StoreLoad");
281                 }
282         }
283         raw_spin_unlock_irqrestore(&itc_sync_lock, flags);
284 }
285
286 #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
287 static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg,
288                                 void **descrp)
289 {
290         extern unsigned long sparc64_ttable_tl0;
291         extern unsigned long kern_locked_tte_data;
292         struct hvtramp_descr *hdesc;
293         unsigned long trampoline_ra;
294         struct trap_per_cpu *tb;
295         u64 tte_vaddr, tte_data;
296         unsigned long hv_err;
297         int i;
298
299         hdesc = kzalloc(sizeof(*hdesc) +
300                         (sizeof(struct hvtramp_mapping) *
301                          num_kernel_image_mappings - 1),
302                         GFP_KERNEL);
303         if (!hdesc) {
304                 printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate "
305                        "hvtramp_descr.\n");
306                 return;
307         }
308         *descrp = hdesc;
309
310         hdesc->cpu = cpu;
311         hdesc->num_mappings = num_kernel_image_mappings;
312
313         tb = &trap_block[cpu];
314
315         hdesc->fault_info_va = (unsigned long) &tb->fault_info;
316         hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info);
317
318         hdesc->thread_reg = thread_reg;
319
320         tte_vaddr = (unsigned long) KERNBASE;
321         tte_data = kern_locked_tte_data;
322
323         for (i = 0; i < hdesc->num_mappings; i++) {
324                 hdesc->maps[i].vaddr = tte_vaddr;
325                 hdesc->maps[i].tte   = tte_data;
326                 tte_vaddr += 0x400000;
327                 tte_data  += 0x400000;
328         }
329
330         trampoline_ra = kimage_addr_to_ra(hv_cpu_startup);
331
332         hv_err = sun4v_cpu_start(cpu, trampoline_ra,
333                                  kimage_addr_to_ra(&sparc64_ttable_tl0),
334                                  __pa(hdesc));
335         if (hv_err)
336                 printk(KERN_ERR "ldom_startcpu_cpuid: sun4v_cpu_start() "
337                        "gives error %lu\n", hv_err);
338 }
339 #endif
340
341 extern unsigned long sparc64_cpu_startup;
342
343 /* The OBP cpu startup callback truncates the 3rd arg cookie to
344  * 32-bits (I think) so to be safe we have it read the pointer
345  * contained here so we work on >4GB machines. -DaveM
346  */
347 static struct thread_info *cpu_new_thread = NULL;
348
349 static int smp_boot_one_cpu(unsigned int cpu, struct task_struct *idle)
350 {
351         unsigned long entry =
352                 (unsigned long)(&sparc64_cpu_startup);
353         unsigned long cookie =
354                 (unsigned long)(&cpu_new_thread);
355         void *descr = NULL;
356         int timeout, ret;
357
358         callin_flag = 0;
359         cpu_new_thread = task_thread_info(idle);
360
361         if (tlb_type == hypervisor) {
362 #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
363                 if (ldom_domaining_enabled)
364                         ldom_startcpu_cpuid(cpu,
365                                             (unsigned long) cpu_new_thread,
366                                             &descr);
367                 else
368 #endif
369                         prom_startcpu_cpuid(cpu, entry, cookie);
370         } else {
371                 struct device_node *dp = of_find_node_by_cpuid(cpu);
372
373                 prom_startcpu(dp->phandle, entry, cookie);
374         }
375
376         for (timeout = 0; timeout < 50000; timeout++) {
377                 if (callin_flag)
378                         break;
379                 udelay(100);
380         }
381
382         if (callin_flag) {
383                 ret = 0;
384         } else {
385                 printk("Processor %d is stuck.\n", cpu);
386                 ret = -ENODEV;
387         }
388         cpu_new_thread = NULL;
389
390         kfree(descr);
391
392         return ret;
393 }
394
395 static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu)
396 {
397         u64 result, target;
398         int stuck, tmp;
399
400         if (this_is_starfire) {
401                 /* map to real upaid */
402                 cpu = (((cpu & 0x3c) << 1) |
403                         ((cpu & 0x40) >> 4) |
404                         (cpu & 0x3));
405         }
406
407         target = (cpu << 14) | 0x70;
408 again:
409         /* Ok, this is the real Spitfire Errata #54.
410          * One must read back from a UDB internal register
411          * after writes to the UDB interrupt dispatch, but
412          * before the membar Sync for that write.
413          * So we use the high UDB control register (ASI 0x7f,
414          * ADDR 0x20) for the dummy read. -DaveM
415          */
416         tmp = 0x40;
417         __asm__ __volatile__(
418         "wrpr   %1, %2, %%pstate\n\t"
419         "stxa   %4, [%0] %3\n\t"
420         "stxa   %5, [%0+%8] %3\n\t"
421         "add    %0, %8, %0\n\t"
422         "stxa   %6, [%0+%8] %3\n\t"
423         "membar #Sync\n\t"
424         "stxa   %%g0, [%7] %3\n\t"
425         "membar #Sync\n\t"
426         "mov    0x20, %%g1\n\t"
427         "ldxa   [%%g1] 0x7f, %%g0\n\t"
428         "membar #Sync"
429         : "=r" (tmp)
430         : "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W),
431           "r" (data0), "r" (data1), "r" (data2), "r" (target),
432           "r" (0x10), "0" (tmp)
433         : "g1");
434
435         /* NOTE: PSTATE_IE is still clear. */
436         stuck = 100000;
437         do {
438                 __asm__ __volatile__("ldxa [%%g0] %1, %0"
439                         : "=r" (result)
440                         : "i" (ASI_INTR_DISPATCH_STAT));
441                 if (result == 0) {
442                         __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
443                                              : : "r" (pstate));
444                         return;
445                 }
446                 stuck -= 1;
447                 if (stuck == 0)
448                         break;
449         } while (result & 0x1);
450         __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
451                              : : "r" (pstate));
452         if (stuck == 0) {
453                 printk("CPU[%d]: mondo stuckage result[%016llx]\n",
454                        smp_processor_id(), result);
455         } else {
456                 udelay(2);
457                 goto again;
458         }
459 }
460
461 static void spitfire_xcall_deliver(struct trap_per_cpu *tb, int cnt)
462 {
463         u64 *mondo, data0, data1, data2;
464         u16 *cpu_list;
465         u64 pstate;
466         int i;
467
468         __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
469         cpu_list = __va(tb->cpu_list_pa);
470         mondo = __va(tb->cpu_mondo_block_pa);
471         data0 = mondo[0];
472         data1 = mondo[1];
473         data2 = mondo[2];
474         for (i = 0; i < cnt; i++)
475                 spitfire_xcall_helper(data0, data1, data2, pstate, cpu_list[i]);
476 }
477
478 /* Cheetah now allows to send the whole 64-bytes of data in the interrupt
479  * packet, but we have no use for that.  However we do take advantage of
480  * the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
481  */
482 static void cheetah_xcall_deliver(struct trap_per_cpu *tb, int cnt)
483 {
484         int nack_busy_id, is_jbus, need_more;
485         u64 *mondo, pstate, ver, busy_mask;
486         u16 *cpu_list;
487
488         cpu_list = __va(tb->cpu_list_pa);
489         mondo = __va(tb->cpu_mondo_block_pa);
490
491         /* Unfortunately, someone at Sun had the brilliant idea to make the
492          * busy/nack fields hard-coded by ITID number for this Ultra-III
493          * derivative processor.
494          */
495         __asm__ ("rdpr %%ver, %0" : "=r" (ver));
496         is_jbus = ((ver >> 32) == __JALAPENO_ID ||
497                    (ver >> 32) == __SERRANO_ID);
498
499         __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
500
501 retry:
502         need_more = 0;
503         __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t"
504                              : : "r" (pstate), "i" (PSTATE_IE));
505
506         /* Setup the dispatch data registers. */
507         __asm__ __volatile__("stxa      %0, [%3] %6\n\t"
508                              "stxa      %1, [%4] %6\n\t"
509                              "stxa      %2, [%5] %6\n\t"
510                              "membar    #Sync\n\t"
511                              : /* no outputs */
512                              : "r" (mondo[0]), "r" (mondo[1]), "r" (mondo[2]),
513                                "r" (0x40), "r" (0x50), "r" (0x60),
514                                "i" (ASI_INTR_W));
515
516         nack_busy_id = 0;
517         busy_mask = 0;
518         {
519                 int i;
520
521                 for (i = 0; i < cnt; i++) {
522                         u64 target, nr;
523
524                         nr = cpu_list[i];
525                         if (nr == 0xffff)
526                                 continue;
527
528                         target = (nr << 14) | 0x70;
529                         if (is_jbus) {
530                                 busy_mask |= (0x1UL << (nr * 2));
531                         } else {
532                                 target |= (nack_busy_id << 24);
533                                 busy_mask |= (0x1UL <<
534                                               (nack_busy_id * 2));
535                         }
536                         __asm__ __volatile__(
537                                 "stxa   %%g0, [%0] %1\n\t"
538                                 "membar #Sync\n\t"
539                                 : /* no outputs */
540                                 : "r" (target), "i" (ASI_INTR_W));
541                         nack_busy_id++;
542                         if (nack_busy_id == 32) {
543                                 need_more = 1;
544                                 break;
545                         }
546                 }
547         }
548
549         /* Now, poll for completion. */
550         {
551                 u64 dispatch_stat, nack_mask;
552                 long stuck;
553
554                 stuck = 100000 * nack_busy_id;
555                 nack_mask = busy_mask << 1;
556                 do {
557                         __asm__ __volatile__("ldxa      [%%g0] %1, %0"
558                                              : "=r" (dispatch_stat)
559                                              : "i" (ASI_INTR_DISPATCH_STAT));
560                         if (!(dispatch_stat & (busy_mask | nack_mask))) {
561                                 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
562                                                      : : "r" (pstate));
563                                 if (unlikely(need_more)) {
564                                         int i, this_cnt = 0;
565                                         for (i = 0; i < cnt; i++) {
566                                                 if (cpu_list[i] == 0xffff)
567                                                         continue;
568                                                 cpu_list[i] = 0xffff;
569                                                 this_cnt++;
570                                                 if (this_cnt == 32)
571                                                         break;
572                                         }
573                                         goto retry;
574                                 }
575                                 return;
576                         }
577                         if (!--stuck)
578                                 break;
579                 } while (dispatch_stat & busy_mask);
580
581                 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
582                                      : : "r" (pstate));
583
584                 if (dispatch_stat & busy_mask) {
585                         /* Busy bits will not clear, continue instead
586                          * of freezing up on this cpu.
587                          */
588                         printk("CPU[%d]: mondo stuckage result[%016llx]\n",
589                                smp_processor_id(), dispatch_stat);
590                 } else {
591                         int i, this_busy_nack = 0;
592
593                         /* Delay some random time with interrupts enabled
594                          * to prevent deadlock.
595                          */
596                         udelay(2 * nack_busy_id);
597
598                         /* Clear out the mask bits for cpus which did not
599                          * NACK us.
600                          */
601                         for (i = 0; i < cnt; i++) {
602                                 u64 check_mask, nr;
603
604                                 nr = cpu_list[i];
605                                 if (nr == 0xffff)
606                                         continue;
607
608                                 if (is_jbus)
609                                         check_mask = (0x2UL << (2*nr));
610                                 else
611                                         check_mask = (0x2UL <<
612                                                       this_busy_nack);
613                                 if ((dispatch_stat & check_mask) == 0)
614                                         cpu_list[i] = 0xffff;
615                                 this_busy_nack += 2;
616                                 if (this_busy_nack == 64)
617                                         break;
618                         }
619
620                         goto retry;
621                 }
622         }
623 }
624
625 #define CPU_MONDO_COUNTER(cpuid)        (cpu_mondo_counter[cpuid])
626 #define MONDO_USEC_WAIT_MIN             2
627 #define MONDO_USEC_WAIT_MAX             100
628 #define MONDO_RETRY_LIMIT               500000
629
630 /* Multi-cpu list version.
631  *
632  * Deliver xcalls to 'cnt' number of cpus in 'cpu_list'.
633  * Sometimes not all cpus receive the mondo, requiring us to re-send
634  * the mondo until all cpus have received, or cpus are truly stuck
635  * unable to receive mondo, and we timeout.
636  * Occasionally a target cpu strand is borrowed briefly by hypervisor to
637  * perform guest service, such as PCIe error handling. Consider the
638  * service time, 1 second overall wait is reasonable for 1 cpu.
639  * Here two in-between mondo check wait time are defined: 2 usec for
640  * single cpu quick turn around and up to 100usec for large cpu count.
641  * Deliver mondo to large number of cpus could take longer, we adjusts
642  * the retry count as long as target cpus are making forward progress.
643  */
644 static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
645 {
646         int this_cpu, tot_cpus, prev_sent, i, rem;
647         int usec_wait, retries, tot_retries;
648         u16 first_cpu = 0xffff;
649         unsigned long xc_rcvd = 0;
650         unsigned long status;
651         int ecpuerror_id = 0;
652         int enocpu_id = 0;
653         u16 *cpu_list;
654         u16 cpu;
655
656         this_cpu = smp_processor_id();
657         cpu_list = __va(tb->cpu_list_pa);
658         usec_wait = cnt * MONDO_USEC_WAIT_MIN;
659         if (usec_wait > MONDO_USEC_WAIT_MAX)
660                 usec_wait = MONDO_USEC_WAIT_MAX;
661         retries = tot_retries = 0;
662         tot_cpus = cnt;
663         prev_sent = 0;
664
665         do {
666                 int n_sent, mondo_delivered, target_cpu_busy;
667
668                 status = sun4v_cpu_mondo_send(cnt,
669                                               tb->cpu_list_pa,
670                                               tb->cpu_mondo_block_pa);
671
672                 /* HV_EOK means all cpus received the xcall, we're done.  */
673                 if (likely(status == HV_EOK))
674                         goto xcall_done;
675
676                 /* If not these non-fatal errors, panic */
677                 if (unlikely((status != HV_EWOULDBLOCK) &&
678                         (status != HV_ECPUERROR) &&
679                         (status != HV_ENOCPU)))
680                         goto fatal_errors;
681
682                 /* First, see if we made any forward progress.
683                  *
684                  * Go through the cpu_list, count the target cpus that have
685                  * received our mondo (n_sent), and those that did not (rem).
686                  * Re-pack cpu_list with the cpus remain to be retried in the
687                  * front - this simplifies tracking the truly stalled cpus.
688                  *
689                  * The hypervisor indicates successful sends by setting
690                  * cpu list entries to the value 0xffff.
691                  *
692                  * EWOULDBLOCK means some target cpus did not receive the
693                  * mondo and retry usually helps.
694                  *
695                  * ECPUERROR means at least one target cpu is in error state,
696                  * it's usually safe to skip the faulty cpu and retry.
697                  *
698                  * ENOCPU means one of the target cpu doesn't belong to the
699                  * domain, perhaps offlined which is unexpected, but not
700                  * fatal and it's okay to skip the offlined cpu.
701                  */
702                 rem = 0;
703                 n_sent = 0;
704                 for (i = 0; i < cnt; i++) {
705                         cpu = cpu_list[i];
706                         if (likely(cpu == 0xffff)) {
707                                 n_sent++;
708                         } else if ((status == HV_ECPUERROR) &&
709                                 (sun4v_cpu_state(cpu) == HV_CPU_STATE_ERROR)) {
710                                 ecpuerror_id = cpu + 1;
711                         } else if (status == HV_ENOCPU && !cpu_online(cpu)) {
712                                 enocpu_id = cpu + 1;
713                         } else {
714                                 cpu_list[rem++] = cpu;
715                         }
716                 }
717
718                 /* No cpu remained, we're done. */
719                 if (rem == 0)
720                         break;
721
722                 /* Otherwise, update the cpu count for retry. */
723                 cnt = rem;
724
725                 /* Record the overall number of mondos received by the
726                  * first of the remaining cpus.
727                  */
728                 if (first_cpu != cpu_list[0]) {
729                         first_cpu = cpu_list[0];
730                         xc_rcvd = CPU_MONDO_COUNTER(first_cpu);
731                 }
732
733                 /* Was any mondo delivered successfully? */
734                 mondo_delivered = (n_sent > prev_sent);
735                 prev_sent = n_sent;
736
737                 /* or, was any target cpu busy processing other mondos? */
738                 target_cpu_busy = (xc_rcvd < CPU_MONDO_COUNTER(first_cpu));
739                 xc_rcvd = CPU_MONDO_COUNTER(first_cpu);
740
741                 /* Retry count is for no progress. If we're making progress,
742                  * reset the retry count.
743                  */
744                 if (likely(mondo_delivered || target_cpu_busy)) {
745                         tot_retries += retries;
746                         retries = 0;
747                 } else if (unlikely(retries > MONDO_RETRY_LIMIT)) {
748                         goto fatal_mondo_timeout;
749                 }
750
751                 /* Delay a little bit to let other cpus catch up on
752                  * their cpu mondo queue work.
753                  */
754                 if (!mondo_delivered)
755                         udelay(usec_wait);
756
757                 retries++;
758         } while (1);
759
760 xcall_done:
761         if (unlikely(ecpuerror_id > 0)) {
762                 pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) was in error state\n",
763                        this_cpu, ecpuerror_id - 1);
764         } else if (unlikely(enocpu_id > 0)) {
765                 pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) does not belong to the domain\n",
766                        this_cpu, enocpu_id - 1);
767         }
768         return;
769
770 fatal_errors:
771         /* fatal errors include bad alignment, etc */
772         pr_crit("CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) mondo_block_pa(%lx)\n",
773                this_cpu, tot_cpus, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
774         panic("Unexpected SUN4V mondo error %lu\n", status);
775
776 fatal_mondo_timeout:
777         /* some cpus being non-responsive to the cpu mondo */
778         pr_crit("CPU[%d]: SUN4V mondo timeout, cpu(%d) made no forward progress after %d retries. Total target cpus(%d).\n",
779                this_cpu, first_cpu, (tot_retries + retries), tot_cpus);
780         panic("SUN4V mondo timeout panic\n");
781 }
782
783 static void (*xcall_deliver_impl)(struct trap_per_cpu *, int);
784
785 static void xcall_deliver(u64 data0, u64 data1, u64 data2, const cpumask_t *mask)
786 {
787         struct trap_per_cpu *tb;
788         int this_cpu, i, cnt;
789         unsigned long flags;
790         u16 *cpu_list;
791         u64 *mondo;
792
793         /* We have to do this whole thing with interrupts fully disabled.
794          * Otherwise if we send an xcall from interrupt context it will
795          * corrupt both our mondo block and cpu list state.
796          *
797          * One consequence of this is that we cannot use timeout mechanisms
798          * that depend upon interrupts being delivered locally.  So, for
799          * example, we cannot sample jiffies and expect it to advance.
800          *
801          * Fortunately, udelay() uses %stick/%tick so we can use that.
802          */
803         local_irq_save(flags);
804
805         this_cpu = smp_processor_id();
806         tb = &trap_block[this_cpu];
807
808         mondo = __va(tb->cpu_mondo_block_pa);
809         mondo[0] = data0;
810         mondo[1] = data1;
811         mondo[2] = data2;
812         wmb();
813
814         cpu_list = __va(tb->cpu_list_pa);
815
816         /* Setup the initial cpu list.  */
817         cnt = 0;
818         for_each_cpu(i, mask) {
819                 if (i == this_cpu || !cpu_online(i))
820                         continue;
821                 cpu_list[cnt++] = i;
822         }
823
824         if (cnt)
825                 xcall_deliver_impl(tb, cnt);
826
827         local_irq_restore(flags);
828 }
829
830 /* Send cross call to all processors mentioned in MASK_P
831  * except self.  Really, there are only two cases currently,
832  * "cpu_online_mask" and "mm_cpumask(mm)".
833  */
834 static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, const cpumask_t *mask)
835 {
836         u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff));
837
838         xcall_deliver(data0, data1, data2, mask);
839 }
840
841 /* Send cross call to all processors except self. */
842 static void smp_cross_call(unsigned long *func, u32 ctx, u64 data1, u64 data2)
843 {
844         smp_cross_call_masked(func, ctx, data1, data2, cpu_online_mask);
845 }
846
847 extern unsigned long xcall_sync_tick;
848
849 static void smp_start_sync_tick_client(int cpu)
850 {
851         xcall_deliver((u64) &xcall_sync_tick, 0, 0,
852                       cpumask_of(cpu));
853 }
854
855 extern unsigned long xcall_call_function;
856
857 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
858 {
859         xcall_deliver((u64) &xcall_call_function, 0, 0, mask);
860 }
861
862 extern unsigned long xcall_call_function_single;
863
864 void arch_send_call_function_single_ipi(int cpu)
865 {
866         xcall_deliver((u64) &xcall_call_function_single, 0, 0,
867                       cpumask_of(cpu));
868 }
869
870 void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs)
871 {
872         clear_softint(1 << irq);
873         irq_enter();
874         generic_smp_call_function_interrupt();
875         irq_exit();
876 }
877
878 void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs)
879 {
880         clear_softint(1 << irq);
881         irq_enter();
882         generic_smp_call_function_single_interrupt();
883         irq_exit();
884 }
885
886 static void tsb_sync(void *info)
887 {
888         struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()];
889         struct mm_struct *mm = info;
890
891         /* It is not valid to test "current->active_mm == mm" here.
892          *
893          * The value of "current" is not changed atomically with
894          * switch_mm().  But that's OK, we just need to check the
895          * current cpu's trap block PGD physical address.
896          */
897         if (tp->pgd_paddr == __pa(mm->pgd))
898                 tsb_context_switch(mm);
899 }
900
901 void smp_tsb_sync(struct mm_struct *mm)
902 {
903         smp_call_function_many(mm_cpumask(mm), tsb_sync, mm, 1);
904 }
905
906 extern unsigned long xcall_flush_tlb_mm;
907 extern unsigned long xcall_flush_tlb_page;
908 extern unsigned long xcall_flush_tlb_kernel_range;
909 extern unsigned long xcall_fetch_glob_regs;
910 extern unsigned long xcall_fetch_glob_pmu;
911 extern unsigned long xcall_fetch_glob_pmu_n4;
912 extern unsigned long xcall_receive_signal;
913 extern unsigned long xcall_new_mmu_context_version;
914 #ifdef CONFIG_KGDB
915 extern unsigned long xcall_kgdb_capture;
916 #endif
917
918 #ifdef DCACHE_ALIASING_POSSIBLE
919 extern unsigned long xcall_flush_dcache_page_cheetah;
920 #endif
921 extern unsigned long xcall_flush_dcache_page_spitfire;
922
923 static inline void __local_flush_dcache_page(struct page *page)
924 {
925 #ifdef DCACHE_ALIASING_POSSIBLE
926         __flush_dcache_page(page_address(page),
927                             ((tlb_type == spitfire) &&
928                              page_mapping(page) != NULL));
929 #else
930         if (page_mapping(page) != NULL &&
931             tlb_type == spitfire)
932                 __flush_icache_page(__pa(page_address(page)));
933 #endif
934 }
935
936 void smp_flush_dcache_page_impl(struct page *page, int cpu)
937 {
938         int this_cpu;
939
940         if (tlb_type == hypervisor)
941                 return;
942
943 #ifdef CONFIG_DEBUG_DCFLUSH
944         atomic_inc(&dcpage_flushes);
945 #endif
946
947         this_cpu = get_cpu();
948
949         if (cpu == this_cpu) {
950                 __local_flush_dcache_page(page);
951         } else if (cpu_online(cpu)) {
952                 void *pg_addr = page_address(page);
953                 u64 data0 = 0;
954
955                 if (tlb_type == spitfire) {
956                         data0 = ((u64)&xcall_flush_dcache_page_spitfire);
957                         if (page_mapping(page) != NULL)
958                                 data0 |= ((u64)1 << 32);
959                 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
960 #ifdef DCACHE_ALIASING_POSSIBLE
961                         data0 = ((u64)&xcall_flush_dcache_page_cheetah);
962 #endif
963                 }
964                 if (data0) {
965                         xcall_deliver(data0, __pa(pg_addr),
966                                       (u64) pg_addr, cpumask_of(cpu));
967 #ifdef CONFIG_DEBUG_DCFLUSH
968                         atomic_inc(&dcpage_flushes_xcall);
969 #endif
970                 }
971         }
972
973         put_cpu();
974 }
975
976 void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
977 {
978         void *pg_addr;
979         u64 data0;
980
981         if (tlb_type == hypervisor)
982                 return;
983
984         preempt_disable();
985
986 #ifdef CONFIG_DEBUG_DCFLUSH
987         atomic_inc(&dcpage_flushes);
988 #endif
989         data0 = 0;
990         pg_addr = page_address(page);
991         if (tlb_type == spitfire) {
992                 data0 = ((u64)&xcall_flush_dcache_page_spitfire);
993                 if (page_mapping(page) != NULL)
994                         data0 |= ((u64)1 << 32);
995         } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
996 #ifdef DCACHE_ALIASING_POSSIBLE
997                 data0 = ((u64)&xcall_flush_dcache_page_cheetah);
998 #endif
999         }
1000         if (data0) {
1001                 xcall_deliver(data0, __pa(pg_addr),
1002                               (u64) pg_addr, cpu_online_mask);
1003 #ifdef CONFIG_DEBUG_DCFLUSH
1004                 atomic_inc(&dcpage_flushes_xcall);
1005 #endif
1006         }
1007         __local_flush_dcache_page(page);
1008
1009         preempt_enable();
1010 }
1011
1012 #ifdef CONFIG_KGDB
1013 void kgdb_roundup_cpus(unsigned long flags)
1014 {
1015         smp_cross_call(&xcall_kgdb_capture, 0, 0, 0);
1016 }
1017 #endif
1018
1019 void smp_fetch_global_regs(void)
1020 {
1021         smp_cross_call(&xcall_fetch_glob_regs, 0, 0, 0);
1022 }
1023
1024 void smp_fetch_global_pmu(void)
1025 {
1026         if (tlb_type == hypervisor &&
1027             sun4v_chip_type >= SUN4V_CHIP_NIAGARA4)
1028                 smp_cross_call(&xcall_fetch_glob_pmu_n4, 0, 0, 0);
1029         else
1030                 smp_cross_call(&xcall_fetch_glob_pmu, 0, 0, 0);
1031 }
1032
1033 /* We know that the window frames of the user have been flushed
1034  * to the stack before we get here because all callers of us
1035  * are flush_tlb_*() routines, and these run after flush_cache_*()
1036  * which performs the flushw.
1037  *
1038  * The SMP TLB coherency scheme we use works as follows:
1039  *
1040  * 1) mm->cpu_vm_mask is a bit mask of which cpus an address
1041  *    space has (potentially) executed on, this is the heuristic
1042  *    we use to avoid doing cross calls.
1043  *
1044  *    Also, for flushing from kswapd and also for clones, we
1045  *    use cpu_vm_mask as the list of cpus to make run the TLB.
1046  *
1047  * 2) TLB context numbers are shared globally across all processors
1048  *    in the system, this allows us to play several games to avoid
1049  *    cross calls.
1050  *
1051  *    One invariant is that when a cpu switches to a process, and
1052  *    that processes tsk->active_mm->cpu_vm_mask does not have the
1053  *    current cpu's bit set, that tlb context is flushed locally.
1054  *
1055  *    If the address space is non-shared (ie. mm->count == 1) we avoid
1056  *    cross calls when we want to flush the currently running process's
1057  *    tlb state.  This is done by clearing all cpu bits except the current
1058  *    processor's in current->mm->cpu_vm_mask and performing the
1059  *    flush locally only.  This will force any subsequent cpus which run
1060  *    this task to flush the context from the local tlb if the process
1061  *    migrates to another cpu (again).
1062  *
1063  * 3) For shared address spaces (threads) and swapping we bite the
1064  *    bullet for most cases and perform the cross call (but only to
1065  *    the cpus listed in cpu_vm_mask).
1066  *
1067  *    The performance gain from "optimizing" away the cross call for threads is
1068  *    questionable (in theory the big win for threads is the massive sharing of
1069  *    address space state across processors).
1070  */
1071
1072 /* This currently is only used by the hugetlb arch pre-fault
1073  * hook on UltraSPARC-III+ and later when changing the pagesize
1074  * bits of the context register for an address space.
1075  */
1076 void smp_flush_tlb_mm(struct mm_struct *mm)
1077 {
1078         u32 ctx = CTX_HWBITS(mm->context);
1079         int cpu = get_cpu();
1080
1081         if (atomic_read(&mm->mm_users) == 1) {
1082                 cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
1083                 goto local_flush_and_out;
1084         }
1085
1086         smp_cross_call_masked(&xcall_flush_tlb_mm,
1087                               ctx, 0, 0,
1088                               mm_cpumask(mm));
1089
1090 local_flush_and_out:
1091         __flush_tlb_mm(ctx, SECONDARY_CONTEXT);
1092
1093         put_cpu();
1094 }
1095
1096 struct tlb_pending_info {
1097         unsigned long ctx;
1098         unsigned long nr;
1099         unsigned long *vaddrs;
1100 };
1101
1102 static void tlb_pending_func(void *info)
1103 {
1104         struct tlb_pending_info *t = info;
1105
1106         __flush_tlb_pending(t->ctx, t->nr, t->vaddrs);
1107 }
1108
1109 void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
1110 {
1111         u32 ctx = CTX_HWBITS(mm->context);
1112         struct tlb_pending_info info;
1113         int cpu = get_cpu();
1114
1115         info.ctx = ctx;
1116         info.nr = nr;
1117         info.vaddrs = vaddrs;
1118
1119         if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
1120                 cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
1121         else
1122                 smp_call_function_many(mm_cpumask(mm), tlb_pending_func,
1123                                        &info, 1);
1124
1125         __flush_tlb_pending(ctx, nr, vaddrs);
1126
1127         put_cpu();
1128 }
1129
1130 void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
1131 {
1132         unsigned long context = CTX_HWBITS(mm->context);
1133         int cpu = get_cpu();
1134
1135         if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
1136                 cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
1137         else
1138                 smp_cross_call_masked(&xcall_flush_tlb_page,
1139                                       context, vaddr, 0,
1140                                       mm_cpumask(mm));
1141         __flush_tlb_page(context, vaddr);
1142
1143         put_cpu();
1144 }
1145
1146 void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
1147 {
1148         start &= PAGE_MASK;
1149         end    = PAGE_ALIGN(end);
1150         if (start != end) {
1151                 smp_cross_call(&xcall_flush_tlb_kernel_range,
1152                                0, start, end);
1153
1154                 __flush_tlb_kernel_range(start, end);
1155         }
1156 }
1157
1158 /* CPU capture. */
1159 /* #define CAPTURE_DEBUG */
1160 extern unsigned long xcall_capture;
1161
1162 static atomic_t smp_capture_depth = ATOMIC_INIT(0);
1163 static atomic_t smp_capture_registry = ATOMIC_INIT(0);
1164 static unsigned long penguins_are_doing_time;
1165
1166 void smp_capture(void)
1167 {
1168         int result = atomic_add_return(1, &smp_capture_depth);
1169
1170         if (result == 1) {
1171                 int ncpus = num_online_cpus();
1172
1173 #ifdef CAPTURE_DEBUG
1174                 printk("CPU[%d]: Sending penguins to jail...",
1175                        smp_processor_id());
1176 #endif
1177                 penguins_are_doing_time = 1;
1178                 atomic_inc(&smp_capture_registry);
1179                 smp_cross_call(&xcall_capture, 0, 0, 0);
1180                 while (atomic_read(&smp_capture_registry) != ncpus)
1181                         rmb();
1182 #ifdef CAPTURE_DEBUG
1183                 printk("done\n");
1184 #endif
1185         }
1186 }
1187
1188 void smp_release(void)
1189 {
1190         if (atomic_dec_and_test(&smp_capture_depth)) {
1191 #ifdef CAPTURE_DEBUG
1192                 printk("CPU[%d]: Giving pardon to "
1193                        "imprisoned penguins\n",
1194                        smp_processor_id());
1195 #endif
1196                 penguins_are_doing_time = 0;
1197                 membar_safe("#StoreLoad");
1198                 atomic_dec(&smp_capture_registry);
1199         }
1200 }
1201
1202 /* Imprisoned penguins run with %pil == PIL_NORMAL_MAX, but PSTATE_IE
1203  * set, so they can service tlb flush xcalls...
1204  */
1205 extern void prom_world(int);
1206
1207 void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs)
1208 {
1209         clear_softint(1 << irq);
1210
1211         preempt_disable();
1212
1213         __asm__ __volatile__("flushw");
1214         prom_world(1);
1215         atomic_inc(&smp_capture_registry);
1216         membar_safe("#StoreLoad");
1217         while (penguins_are_doing_time)
1218                 rmb();
1219         atomic_dec(&smp_capture_registry);
1220         prom_world(0);
1221
1222         preempt_enable();
1223 }
1224
1225 /* /proc/profile writes can call this, don't __init it please. */
1226 int setup_profiling_timer(unsigned int multiplier)
1227 {
1228         return -EINVAL;
1229 }
1230
1231 void __init smp_prepare_cpus(unsigned int max_cpus)
1232 {
1233 }
1234
1235 void smp_prepare_boot_cpu(void)
1236 {
1237 }
1238
1239 void __init smp_setup_processor_id(void)
1240 {
1241         if (tlb_type == spitfire)
1242                 xcall_deliver_impl = spitfire_xcall_deliver;
1243         else if (tlb_type == cheetah || tlb_type == cheetah_plus)
1244                 xcall_deliver_impl = cheetah_xcall_deliver;
1245         else
1246                 xcall_deliver_impl = hypervisor_xcall_deliver;
1247 }
1248
1249 void __init smp_fill_in_cpu_possible_map(void)
1250 {
1251         int possible_cpus = num_possible_cpus();
1252         int i;
1253
1254         if (possible_cpus > nr_cpu_ids)
1255                 possible_cpus = nr_cpu_ids;
1256
1257         for (i = 0; i < possible_cpus; i++)
1258                 set_cpu_possible(i, true);
1259         for (; i < NR_CPUS; i++)
1260                 set_cpu_possible(i, false);
1261 }
1262
1263 void smp_fill_in_sib_core_maps(void)
1264 {
1265         unsigned int i;
1266
1267         for_each_present_cpu(i) {
1268                 unsigned int j;
1269
1270                 cpumask_clear(&cpu_core_map[i]);
1271                 if (cpu_data(i).core_id == 0) {
1272                         cpumask_set_cpu(i, &cpu_core_map[i]);
1273                         continue;
1274                 }
1275
1276                 for_each_present_cpu(j) {
1277                         if (cpu_data(i).core_id ==
1278                             cpu_data(j).core_id)
1279                                 cpumask_set_cpu(j, &cpu_core_map[i]);
1280                 }
1281         }
1282
1283         for_each_present_cpu(i)  {
1284                 unsigned int j;
1285
1286                 for_each_present_cpu(j)  {
1287                         if (cpu_data(i).max_cache_id ==
1288                             cpu_data(j).max_cache_id)
1289                                 cpumask_set_cpu(j, &cpu_core_sib_cache_map[i]);
1290
1291                         if (cpu_data(i).sock_id == cpu_data(j).sock_id)
1292                                 cpumask_set_cpu(j, &cpu_core_sib_map[i]);
1293                 }
1294         }
1295
1296         for_each_present_cpu(i) {
1297                 unsigned int j;
1298
1299                 cpumask_clear(&per_cpu(cpu_sibling_map, i));
1300                 if (cpu_data(i).proc_id == -1) {
1301                         cpumask_set_cpu(i, &per_cpu(cpu_sibling_map, i));
1302                         continue;
1303                 }
1304
1305                 for_each_present_cpu(j) {
1306                         if (cpu_data(i).proc_id ==
1307                             cpu_data(j).proc_id)
1308                                 cpumask_set_cpu(j, &per_cpu(cpu_sibling_map, i));
1309                 }
1310         }
1311 }
1312
1313 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
1314 {
1315         int ret = smp_boot_one_cpu(cpu, tidle);
1316
1317         if (!ret) {
1318                 cpumask_set_cpu(cpu, &smp_commenced_mask);
1319                 while (!cpu_online(cpu))
1320                         mb();
1321                 if (!cpu_online(cpu)) {
1322                         ret = -ENODEV;
1323                 } else {
1324                         /* On SUN4V, writes to %tick and %stick are
1325                          * not allowed.
1326                          */
1327                         if (tlb_type != hypervisor)
1328                                 smp_synchronize_one_tick(cpu);
1329                 }
1330         }
1331         return ret;
1332 }
1333
1334 #ifdef CONFIG_HOTPLUG_CPU
1335 void cpu_play_dead(void)
1336 {
1337         int cpu = smp_processor_id();
1338         unsigned long pstate;
1339
1340         idle_task_exit();
1341
1342         if (tlb_type == hypervisor) {
1343                 struct trap_per_cpu *tb = &trap_block[cpu];
1344
1345                 sun4v_cpu_qconf(HV_CPU_QUEUE_CPU_MONDO,
1346                                 tb->cpu_mondo_pa, 0);
1347                 sun4v_cpu_qconf(HV_CPU_QUEUE_DEVICE_MONDO,
1348                                 tb->dev_mondo_pa, 0);
1349                 sun4v_cpu_qconf(HV_CPU_QUEUE_RES_ERROR,
1350                                 tb->resum_mondo_pa, 0);
1351                 sun4v_cpu_qconf(HV_CPU_QUEUE_NONRES_ERROR,
1352                                 tb->nonresum_mondo_pa, 0);
1353         }
1354
1355         cpumask_clear_cpu(cpu, &smp_commenced_mask);
1356         membar_safe("#Sync");
1357
1358         local_irq_disable();
1359
1360         __asm__ __volatile__(
1361                 "rdpr   %%pstate, %0\n\t"
1362                 "wrpr   %0, %1, %%pstate"
1363                 : "=r" (pstate)
1364                 : "i" (PSTATE_IE));
1365
1366         while (1)
1367                 barrier();
1368 }
1369
1370 int __cpu_disable(void)
1371 {
1372         int cpu = smp_processor_id();
1373         cpuinfo_sparc *c;
1374         int i;
1375
1376         for_each_cpu(i, &cpu_core_map[cpu])
1377                 cpumask_clear_cpu(cpu, &cpu_core_map[i]);
1378         cpumask_clear(&cpu_core_map[cpu]);
1379
1380         for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu))
1381                 cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i));
1382         cpumask_clear(&per_cpu(cpu_sibling_map, cpu));
1383
1384         c = &cpu_data(cpu);
1385
1386         c->core_id = 0;
1387         c->proc_id = -1;
1388
1389         smp_wmb();
1390
1391         /* Make sure no interrupts point to this cpu.  */
1392         fixup_irqs();
1393
1394         local_irq_enable();
1395         mdelay(1);
1396         local_irq_disable();
1397
1398         set_cpu_online(cpu, false);
1399
1400         cpu_map_rebuild();
1401
1402         return 0;
1403 }
1404
1405 void __cpu_die(unsigned int cpu)
1406 {
1407         int i;
1408
1409         for (i = 0; i < 100; i++) {
1410                 smp_rmb();
1411                 if (!cpumask_test_cpu(cpu, &smp_commenced_mask))
1412                         break;
1413                 msleep(100);
1414         }
1415         if (cpumask_test_cpu(cpu, &smp_commenced_mask)) {
1416                 printk(KERN_ERR "CPU %u didn't die...\n", cpu);
1417         } else {
1418 #if defined(CONFIG_SUN_LDOMS)
1419                 unsigned long hv_err;
1420                 int limit = 100;
1421
1422                 do {
1423                         hv_err = sun4v_cpu_stop(cpu);
1424                         if (hv_err == HV_EOK) {
1425                                 set_cpu_present(cpu, false);
1426                                 break;
1427                         }
1428                 } while (--limit > 0);
1429                 if (limit <= 0) {
1430                         printk(KERN_ERR "sun4v_cpu_stop() fails err=%lu\n",
1431                                hv_err);
1432                 }
1433 #endif
1434         }
1435 }
1436 #endif
1437
1438 void __init smp_cpus_done(unsigned int max_cpus)
1439 {
1440 }
1441
1442 void smp_send_reschedule(int cpu)
1443 {
1444         if (cpu == smp_processor_id()) {
1445                 WARN_ON_ONCE(preemptible());
1446                 set_softint(1 << PIL_SMP_RECEIVE_SIGNAL);
1447         } else {
1448                 xcall_deliver((u64) &xcall_receive_signal,
1449                               0, 0, cpumask_of(cpu));
1450         }
1451 }
1452
1453 void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs)
1454 {
1455         clear_softint(1 << irq);
1456         scheduler_ipi();
1457 }
1458
1459 static void stop_this_cpu(void *dummy)
1460 {
1461         set_cpu_online(smp_processor_id(), false);
1462         prom_stopself();
1463 }
1464
1465 void smp_send_stop(void)
1466 {
1467         int cpu;
1468
1469         if (tlb_type == hypervisor) {
1470                 int this_cpu = smp_processor_id();
1471 #ifdef CONFIG_SERIAL_SUNHV
1472                 sunhv_migrate_hvcons_irq(this_cpu);
1473 #endif
1474                 for_each_online_cpu(cpu) {
1475                         if (cpu == this_cpu)
1476                                 continue;
1477
1478                         set_cpu_online(cpu, false);
1479 #ifdef CONFIG_SUN_LDOMS
1480                         if (ldom_domaining_enabled) {
1481                                 unsigned long hv_err;
1482                                 hv_err = sun4v_cpu_stop(cpu);
1483                                 if (hv_err)
1484                                         printk(KERN_ERR "sun4v_cpu_stop() "
1485                                                "failed err=%lu\n", hv_err);
1486                         } else
1487 #endif
1488                                 prom_stopcpu_cpuid(cpu);
1489                 }
1490         } else
1491                 smp_call_function(stop_this_cpu, NULL, 0);
1492 }
1493
1494 /**
1495  * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
1496  * @cpu: cpu to allocate for
1497  * @size: size allocation in bytes
1498  * @align: alignment
1499  *
1500  * Allocate @size bytes aligned at @align for cpu @cpu.  This wrapper
1501  * does the right thing for NUMA regardless of the current
1502  * configuration.
1503  *
1504  * RETURNS:
1505  * Pointer to the allocated area on success, NULL on failure.
1506  */
1507 static void * __init pcpu_alloc_bootmem(unsigned int cpu, size_t size,
1508                                         size_t align)
1509 {
1510         const unsigned long goal = __pa(MAX_DMA_ADDRESS);
1511 #ifdef CONFIG_NEED_MULTIPLE_NODES
1512         int node = cpu_to_node(cpu);
1513         void *ptr;
1514
1515         if (!node_online(node) || !NODE_DATA(node)) {
1516                 ptr = __alloc_bootmem(size, align, goal);
1517                 pr_info("cpu %d has no node %d or node-local memory\n",
1518                         cpu, node);
1519                 pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
1520                          cpu, size, __pa(ptr));
1521         } else {
1522                 ptr = __alloc_bootmem_node(NODE_DATA(node),
1523                                            size, align, goal);
1524                 pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
1525                          "%016lx\n", cpu, size, node, __pa(ptr));
1526         }
1527         return ptr;
1528 #else
1529         return __alloc_bootmem(size, align, goal);
1530 #endif
1531 }
1532
1533 static void __init pcpu_free_bootmem(void *ptr, size_t size)
1534 {
1535         free_bootmem(__pa(ptr), size);
1536 }
1537
1538 static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
1539 {
1540         if (cpu_to_node(from) == cpu_to_node(to))
1541                 return LOCAL_DISTANCE;
1542         else
1543                 return REMOTE_DISTANCE;
1544 }
1545
1546 static void __init pcpu_populate_pte(unsigned long addr)
1547 {
1548         pgd_t *pgd = pgd_offset_k(addr);
1549         pud_t *pud;
1550         pmd_t *pmd;
1551
1552         if (pgd_none(*pgd)) {
1553                 pud_t *new;
1554
1555                 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1556                 pgd_populate(&init_mm, pgd, new);
1557         }
1558
1559         pud = pud_offset(pgd, addr);
1560         if (pud_none(*pud)) {
1561                 pmd_t *new;
1562
1563                 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1564                 pud_populate(&init_mm, pud, new);
1565         }
1566
1567         pmd = pmd_offset(pud, addr);
1568         if (!pmd_present(*pmd)) {
1569                 pte_t *new;
1570
1571                 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1572                 pmd_populate_kernel(&init_mm, pmd, new);
1573         }
1574 }
1575
1576 void __init setup_per_cpu_areas(void)
1577 {
1578         unsigned long delta;
1579         unsigned int cpu;
1580         int rc = -EINVAL;
1581
1582         if (pcpu_chosen_fc != PCPU_FC_PAGE) {
1583                 rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
1584                                             PERCPU_DYNAMIC_RESERVE, 4 << 20,
1585                                             pcpu_cpu_distance,
1586                                             pcpu_alloc_bootmem,
1587                                             pcpu_free_bootmem);
1588                 if (rc)
1589                         pr_warning("PERCPU: %s allocator failed (%d), "
1590                                    "falling back to page size\n",
1591                                    pcpu_fc_names[pcpu_chosen_fc], rc);
1592         }
1593         if (rc < 0)
1594                 rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE,
1595                                            pcpu_alloc_bootmem,
1596                                            pcpu_free_bootmem,
1597                                            pcpu_populate_pte);
1598         if (rc < 0)
1599                 panic("cannot initialize percpu area (err=%d)", rc);
1600
1601         delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
1602         for_each_possible_cpu(cpu)
1603                 __per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
1604
1605         /* Setup %g5 for the boot cpu.  */
1606         __local_per_cpu_offset = __per_cpu_offset(smp_processor_id());
1607
1608         of_fill_in_cpu_data();
1609         if (tlb_type == hypervisor)
1610                 mdesc_fill_in_cpu_data(cpu_all_mask);
1611 }