Merge branch '4.14-features' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus
[sfrench/cifs-2.6.git] / arch / mips / kernel / smp.c
1 /*
2  * This program is free software; you can redistribute it and/or
3  * modify it under the terms of the GNU General Public License
4  * as published by the Free Software Foundation; either version 2
5  * of the License, or (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software
14  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
15  *
16  * Copyright (C) 2000, 2001 Kanoj Sarcar
17  * Copyright (C) 2000, 2001 Ralf Baechle
18  * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
19  * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
20  */
21 #include <linux/cache.h>
22 #include <linux/delay.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/smp.h>
26 #include <linux/spinlock.h>
27 #include <linux/threads.h>
28 #include <linux/export.h>
29 #include <linux/time.h>
30 #include <linux/timex.h>
31 #include <linux/sched/mm.h>
32 #include <linux/cpumask.h>
33 #include <linux/cpu.h>
34 #include <linux/err.h>
35 #include <linux/ftrace.h>
36 #include <linux/irqdomain.h>
37 #include <linux/of.h>
38 #include <linux/of_irq.h>
39
40 #include <linux/atomic.h>
41 #include <asm/cpu.h>
42 #include <asm/processor.h>
43 #include <asm/idle.h>
44 #include <asm/r4k-timer.h>
45 #include <asm/mips-cpc.h>
46 #include <asm/mmu_context.h>
47 #include <asm/time.h>
48 #include <asm/setup.h>
49 #include <asm/maar.h>
50
51 int __cpu_number_map[NR_CPUS];          /* Map physical to logical */
52 EXPORT_SYMBOL(__cpu_number_map);
53
54 int __cpu_logical_map[NR_CPUS];         /* Map logical to physical */
55 EXPORT_SYMBOL(__cpu_logical_map);
56
57 /* Number of TCs (or siblings in Intel speak) per CPU core */
58 int smp_num_siblings = 1;
59 EXPORT_SYMBOL(smp_num_siblings);
60
61 /* representing the TCs (or siblings in Intel speak) of each logical CPU */
62 cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
63 EXPORT_SYMBOL(cpu_sibling_map);
64
65 /* representing the core map of multi-core chips of each logical CPU */
66 cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
67 EXPORT_SYMBOL(cpu_core_map);
68
69 static DECLARE_COMPLETION(cpu_running);
70
71 /*
72  * A logcal cpu mask containing only one VPE per core to
73  * reduce the number of IPIs on large MT systems.
74  */
75 cpumask_t cpu_foreign_map[NR_CPUS] __read_mostly;
76 EXPORT_SYMBOL(cpu_foreign_map);
77
78 /* representing cpus for which sibling maps can be computed */
79 static cpumask_t cpu_sibling_setup_map;
80
81 /* representing cpus for which core maps can be computed */
82 static cpumask_t cpu_core_setup_map;
83
84 cpumask_t cpu_coherent_mask;
85
86 #ifdef CONFIG_GENERIC_IRQ_IPI
87 static struct irq_desc *call_desc;
88 static struct irq_desc *sched_desc;
89 #endif
90
91 static inline void set_cpu_sibling_map(int cpu)
92 {
93         int i;
94
95         cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
96
97         if (smp_num_siblings > 1) {
98                 for_each_cpu(i, &cpu_sibling_setup_map) {
99                         if (cpus_are_siblings(cpu, i)) {
100                                 cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
101                                 cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
102                         }
103                 }
104         } else
105                 cpumask_set_cpu(cpu, &cpu_sibling_map[cpu]);
106 }
107
108 static inline void set_cpu_core_map(int cpu)
109 {
110         int i;
111
112         cpumask_set_cpu(cpu, &cpu_core_setup_map);
113
114         for_each_cpu(i, &cpu_core_setup_map) {
115                 if (cpu_data[cpu].package == cpu_data[i].package) {
116                         cpumask_set_cpu(i, &cpu_core_map[cpu]);
117                         cpumask_set_cpu(cpu, &cpu_core_map[i]);
118                 }
119         }
120 }
121
122 /*
123  * Calculate a new cpu_foreign_map mask whenever a
124  * new cpu appears or disappears.
125  */
126 void calculate_cpu_foreign_map(void)
127 {
128         int i, k, core_present;
129         cpumask_t temp_foreign_map;
130
131         /* Re-calculate the mask */
132         cpumask_clear(&temp_foreign_map);
133         for_each_online_cpu(i) {
134                 core_present = 0;
135                 for_each_cpu(k, &temp_foreign_map)
136                         if (cpus_are_siblings(i, k))
137                                 core_present = 1;
138                 if (!core_present)
139                         cpumask_set_cpu(i, &temp_foreign_map);
140         }
141
142         for_each_online_cpu(i)
143                 cpumask_andnot(&cpu_foreign_map[i],
144                                &temp_foreign_map, &cpu_sibling_map[i]);
145 }
146
147 const struct plat_smp_ops *mp_ops;
148 EXPORT_SYMBOL(mp_ops);
149
150 void register_smp_ops(const struct plat_smp_ops *ops)
151 {
152         if (mp_ops)
153                 printk(KERN_WARNING "Overriding previously set SMP ops\n");
154
155         mp_ops = ops;
156 }
157
158 #ifdef CONFIG_GENERIC_IRQ_IPI
159 void mips_smp_send_ipi_single(int cpu, unsigned int action)
160 {
161         mips_smp_send_ipi_mask(cpumask_of(cpu), action);
162 }
163
164 void mips_smp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
165 {
166         unsigned long flags;
167         unsigned int core;
168         int cpu;
169
170         local_irq_save(flags);
171
172         switch (action) {
173         case SMP_CALL_FUNCTION:
174                 __ipi_send_mask(call_desc, mask);
175                 break;
176
177         case SMP_RESCHEDULE_YOURSELF:
178                 __ipi_send_mask(sched_desc, mask);
179                 break;
180
181         default:
182                 BUG();
183         }
184
185         if (mips_cpc_present()) {
186                 for_each_cpu(cpu, mask) {
187                         if (cpus_are_siblings(cpu, smp_processor_id()))
188                                 continue;
189
190                         core = cpu_core(&cpu_data[cpu]);
191
192                         while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) {
193                                 mips_cm_lock_other_cpu(cpu, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
194                                 mips_cpc_lock_other(core);
195                                 write_cpc_co_cmd(CPC_Cx_CMD_PWRUP);
196                                 mips_cpc_unlock_other();
197                                 mips_cm_unlock_other();
198                         }
199                 }
200         }
201
202         local_irq_restore(flags);
203 }
204
205
206 static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
207 {
208         scheduler_ipi();
209
210         return IRQ_HANDLED;
211 }
212
213 static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
214 {
215         generic_smp_call_function_interrupt();
216
217         return IRQ_HANDLED;
218 }
219
220 static struct irqaction irq_resched = {
221         .handler        = ipi_resched_interrupt,
222         .flags          = IRQF_PERCPU,
223         .name           = "IPI resched"
224 };
225
226 static struct irqaction irq_call = {
227         .handler        = ipi_call_interrupt,
228         .flags          = IRQF_PERCPU,
229         .name           = "IPI call"
230 };
231
232 static void smp_ipi_init_one(unsigned int virq,
233                                     struct irqaction *action)
234 {
235         int ret;
236
237         irq_set_handler(virq, handle_percpu_irq);
238         ret = setup_irq(virq, action);
239         BUG_ON(ret);
240 }
241
242 static unsigned int call_virq, sched_virq;
243
244 int mips_smp_ipi_allocate(const struct cpumask *mask)
245 {
246         int virq;
247         struct irq_domain *ipidomain;
248         struct device_node *node;
249
250         node = of_irq_find_parent(of_root);
251         ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
252
253         /*
254          * Some platforms have half DT setup. So if we found irq node but
255          * didn't find an ipidomain, try to search for one that is not in the
256          * DT.
257          */
258         if (node && !ipidomain)
259                 ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
260
261         /*
262          * There are systems which use IPI IRQ domains, but only have one
263          * registered when some runtime condition is met. For example a Malta
264          * kernel may include support for GIC & CPU interrupt controller IPI
265          * IRQ domains, but if run on a system with no GIC & no MT ASE then
266          * neither will be supported or registered.
267          *
268          * We only have a problem if we're actually using multiple CPUs so fail
269          * loudly if that is the case. Otherwise simply return, skipping IPI
270          * setup, if we're running with only a single CPU.
271          */
272         if (!ipidomain) {
273                 BUG_ON(num_present_cpus() > 1);
274                 return 0;
275         }
276
277         virq = irq_reserve_ipi(ipidomain, mask);
278         BUG_ON(!virq);
279         if (!call_virq)
280                 call_virq = virq;
281
282         virq = irq_reserve_ipi(ipidomain, mask);
283         BUG_ON(!virq);
284         if (!sched_virq)
285                 sched_virq = virq;
286
287         if (irq_domain_is_ipi_per_cpu(ipidomain)) {
288                 int cpu;
289
290                 for_each_cpu(cpu, mask) {
291                         smp_ipi_init_one(call_virq + cpu, &irq_call);
292                         smp_ipi_init_one(sched_virq + cpu, &irq_resched);
293                 }
294         } else {
295                 smp_ipi_init_one(call_virq, &irq_call);
296                 smp_ipi_init_one(sched_virq, &irq_resched);
297         }
298
299         return 0;
300 }
301
302 int mips_smp_ipi_free(const struct cpumask *mask)
303 {
304         struct irq_domain *ipidomain;
305         struct device_node *node;
306
307         node = of_irq_find_parent(of_root);
308         ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
309
310         /*
311          * Some platforms have half DT setup. So if we found irq node but
312          * didn't find an ipidomain, try to search for one that is not in the
313          * DT.
314          */
315         if (node && !ipidomain)
316                 ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
317
318         BUG_ON(!ipidomain);
319
320         if (irq_domain_is_ipi_per_cpu(ipidomain)) {
321                 int cpu;
322
323                 for_each_cpu(cpu, mask) {
324                         remove_irq(call_virq + cpu, &irq_call);
325                         remove_irq(sched_virq + cpu, &irq_resched);
326                 }
327         }
328         irq_destroy_ipi(call_virq, mask);
329         irq_destroy_ipi(sched_virq, mask);
330         return 0;
331 }
332
333
334 static int __init mips_smp_ipi_init(void)
335 {
336         if (num_possible_cpus() == 1)
337                 return 0;
338
339         mips_smp_ipi_allocate(cpu_possible_mask);
340
341         call_desc = irq_to_desc(call_virq);
342         sched_desc = irq_to_desc(sched_virq);
343
344         return 0;
345 }
346 early_initcall(mips_smp_ipi_init);
347 #endif
348
349 /*
350  * First C code run on the secondary CPUs after being started up by
351  * the master.
352  */
353 asmlinkage void start_secondary(void)
354 {
355         unsigned int cpu;
356
357         cpu_probe();
358         per_cpu_trap_init(false);
359         mips_clockevent_init();
360         mp_ops->init_secondary();
361         cpu_report();
362         maar_init();
363
364         /*
365          * XXX parity protection should be folded in here when it's converted
366          * to an option instead of something based on .cputype
367          */
368
369         calibrate_delay();
370         preempt_disable();
371         cpu = smp_processor_id();
372         cpu_data[cpu].udelay_val = loops_per_jiffy;
373
374         cpumask_set_cpu(cpu, &cpu_coherent_mask);
375         notify_cpu_starting(cpu);
376
377         set_cpu_online(cpu, true);
378
379         set_cpu_sibling_map(cpu);
380         set_cpu_core_map(cpu);
381
382         calculate_cpu_foreign_map();
383
384         complete(&cpu_running);
385         synchronise_count_slave(cpu);
386
387         /*
388          * irq will be enabled in ->smp_finish(), enabling it too early
389          * is dangerous.
390          */
391         WARN_ON_ONCE(!irqs_disabled());
392         mp_ops->smp_finish();
393
394         cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
395 }
396
397 static void stop_this_cpu(void *dummy)
398 {
399         /*
400          * Remove this CPU:
401          */
402
403         set_cpu_online(smp_processor_id(), false);
404         calculate_cpu_foreign_map();
405         local_irq_disable();
406         while (1);
407 }
408
409 void smp_send_stop(void)
410 {
411         smp_call_function(stop_this_cpu, NULL, 0);
412 }
413
414 void __init smp_cpus_done(unsigned int max_cpus)
415 {
416 }
417
418 /* called from main before smp_init() */
419 void __init smp_prepare_cpus(unsigned int max_cpus)
420 {
421         init_new_context(current, &init_mm);
422         current_thread_info()->cpu = 0;
423         mp_ops->prepare_cpus(max_cpus);
424         set_cpu_sibling_map(0);
425         set_cpu_core_map(0);
426         calculate_cpu_foreign_map();
427 #ifndef CONFIG_HOTPLUG_CPU
428         init_cpu_present(cpu_possible_mask);
429 #endif
430         cpumask_copy(&cpu_coherent_mask, cpu_possible_mask);
431 }
432
433 /* preload SMP state for boot cpu */
434 void smp_prepare_boot_cpu(void)
435 {
436         set_cpu_possible(0, true);
437         set_cpu_online(0, true);
438 }
439
440 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
441 {
442         int err;
443
444         err = mp_ops->boot_secondary(cpu, tidle);
445         if (err)
446                 return err;
447
448         /*
449          * We must check for timeout here, as the CPU will not be marked
450          * online until the counters are synchronised.
451          */
452         if (!wait_for_completion_timeout(&cpu_running,
453                                          msecs_to_jiffies(1000))) {
454                 pr_crit("CPU%u: failed to start\n", cpu);
455                 return -EIO;
456         }
457
458         synchronise_count_master(cpu);
459         return 0;
460 }
461
462 /* Not really SMP stuff ... */
463 int setup_profiling_timer(unsigned int multiplier)
464 {
465         return 0;
466 }
467
468 static void flush_tlb_all_ipi(void *info)
469 {
470         local_flush_tlb_all();
471 }
472
473 void flush_tlb_all(void)
474 {
475         on_each_cpu(flush_tlb_all_ipi, NULL, 1);
476 }
477
478 static void flush_tlb_mm_ipi(void *mm)
479 {
480         local_flush_tlb_mm((struct mm_struct *)mm);
481 }
482
483 /*
484  * Special Variant of smp_call_function for use by TLB functions:
485  *
486  *  o No return value
487  *  o collapses to normal function call on UP kernels
488  *  o collapses to normal function call on systems with a single shared
489  *    primary cache.
490  */
491 static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
492 {
493         smp_call_function(func, info, 1);
494 }
495
496 static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
497 {
498         preempt_disable();
499
500         smp_on_other_tlbs(func, info);
501         func(info);
502
503         preempt_enable();
504 }
505
506 /*
507  * The following tlb flush calls are invoked when old translations are
508  * being torn down, or pte attributes are changing. For single threaded
509  * address spaces, a new context is obtained on the current cpu, and tlb
510  * context on other cpus are invalidated to force a new context allocation
511  * at switch_mm time, should the mm ever be used on other cpus. For
512  * multithreaded address spaces, intercpu interrupts have to be sent.
513  * Another case where intercpu interrupts are required is when the target
514  * mm might be active on another cpu (eg debuggers doing the flushes on
515  * behalf of debugees, kswapd stealing pages from another process etc).
516  * Kanoj 07/00.
517  */
518
519 void flush_tlb_mm(struct mm_struct *mm)
520 {
521         preempt_disable();
522
523         if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
524                 smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
525         } else {
526                 unsigned int cpu;
527
528                 for_each_online_cpu(cpu) {
529                         if (cpu != smp_processor_id() && cpu_context(cpu, mm))
530                                 cpu_context(cpu, mm) = 0;
531                 }
532         }
533         local_flush_tlb_mm(mm);
534
535         preempt_enable();
536 }
537
538 struct flush_tlb_data {
539         struct vm_area_struct *vma;
540         unsigned long addr1;
541         unsigned long addr2;
542 };
543
544 static void flush_tlb_range_ipi(void *info)
545 {
546         struct flush_tlb_data *fd = info;
547
548         local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
549 }
550
551 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
552 {
553         struct mm_struct *mm = vma->vm_mm;
554
555         preempt_disable();
556         if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
557                 struct flush_tlb_data fd = {
558                         .vma = vma,
559                         .addr1 = start,
560                         .addr2 = end,
561                 };
562
563                 smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
564         } else {
565                 unsigned int cpu;
566                 int exec = vma->vm_flags & VM_EXEC;
567
568                 for_each_online_cpu(cpu) {
569                         /*
570                          * flush_cache_range() will only fully flush icache if
571                          * the VMA is executable, otherwise we must invalidate
572                          * ASID without it appearing to has_valid_asid() as if
573                          * mm has been completely unused by that CPU.
574                          */
575                         if (cpu != smp_processor_id() && cpu_context(cpu, mm))
576                                 cpu_context(cpu, mm) = !exec;
577                 }
578         }
579         local_flush_tlb_range(vma, start, end);
580         preempt_enable();
581 }
582
583 static void flush_tlb_kernel_range_ipi(void *info)
584 {
585         struct flush_tlb_data *fd = info;
586
587         local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
588 }
589
590 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
591 {
592         struct flush_tlb_data fd = {
593                 .addr1 = start,
594                 .addr2 = end,
595         };
596
597         on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
598 }
599
600 static void flush_tlb_page_ipi(void *info)
601 {
602         struct flush_tlb_data *fd = info;
603
604         local_flush_tlb_page(fd->vma, fd->addr1);
605 }
606
607 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
608 {
609         preempt_disable();
610         if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
611                 struct flush_tlb_data fd = {
612                         .vma = vma,
613                         .addr1 = page,
614                 };
615
616                 smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
617         } else {
618                 unsigned int cpu;
619
620                 for_each_online_cpu(cpu) {
621                         /*
622                          * flush_cache_page() only does partial flushes, so
623                          * invalidate ASID without it appearing to
624                          * has_valid_asid() as if mm has been completely unused
625                          * by that CPU.
626                          */
627                         if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
628                                 cpu_context(cpu, vma->vm_mm) = 1;
629                 }
630         }
631         local_flush_tlb_page(vma, page);
632         preempt_enable();
633 }
634
635 static void flush_tlb_one_ipi(void *info)
636 {
637         unsigned long vaddr = (unsigned long) info;
638
639         local_flush_tlb_one(vaddr);
640 }
641
642 void flush_tlb_one(unsigned long vaddr)
643 {
644         smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr);
645 }
646
647 EXPORT_SYMBOL(flush_tlb_page);
648 EXPORT_SYMBOL(flush_tlb_one);
649
650 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
651
652 static DEFINE_PER_CPU(atomic_t, tick_broadcast_count);
653 static DEFINE_PER_CPU(call_single_data_t, tick_broadcast_csd);
654
655 void tick_broadcast(const struct cpumask *mask)
656 {
657         atomic_t *count;
658         call_single_data_t *csd;
659         int cpu;
660
661         for_each_cpu(cpu, mask) {
662                 count = &per_cpu(tick_broadcast_count, cpu);
663                 csd = &per_cpu(tick_broadcast_csd, cpu);
664
665                 if (atomic_inc_return(count) == 1)
666                         smp_call_function_single_async(cpu, csd);
667         }
668 }
669
670 static void tick_broadcast_callee(void *info)
671 {
672         int cpu = smp_processor_id();
673         tick_receive_broadcast();
674         atomic_set(&per_cpu(tick_broadcast_count, cpu), 0);
675 }
676
677 static int __init tick_broadcast_init(void)
678 {
679         call_single_data_t *csd;
680         int cpu;
681
682         for (cpu = 0; cpu < NR_CPUS; cpu++) {
683                 csd = &per_cpu(tick_broadcast_csd, cpu);
684                 csd->func = tick_broadcast_callee;
685         }
686
687         return 0;
688 }
689 early_initcall(tick_broadcast_init);
690
691 #endif /* CONFIG_GENERIC_CLOCKEVENTS_BROADCAST */