1 // SPDX-License-Identifier: GPL-2.0-or-later
5 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
6 * deal of code from the sparc and intel versions.
8 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
10 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
11 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
16 #include <linux/kernel.h>
17 #include <linux/export.h>
18 #include <linux/sched/mm.h>
19 #include <linux/sched/task_stack.h>
20 #include <linux/sched/topology.h>
21 #include <linux/smp.h>
22 #include <linux/interrupt.h>
23 #include <linux/delay.h>
24 #include <linux/init.h>
25 #include <linux/spinlock.h>
26 #include <linux/cache.h>
27 #include <linux/err.h>
28 #include <linux/device.h>
29 #include <linux/cpu.h>
30 #include <linux/notifier.h>
31 #include <linux/topology.h>
32 #include <linux/profile.h>
33 #include <linux/processor.h>
34 #include <linux/random.h>
35 #include <linux/stackprotector.h>
36 #include <linux/pgtable.h>
38 #include <asm/ptrace.h>
39 #include <linux/atomic.h>
41 #include <asm/hw_irq.h>
42 #include <asm/kvm_ppc.h>
43 #include <asm/dbell.h>
48 #include <asm/machdep.h>
49 #include <asm/cputhreads.h>
50 #include <asm/cputable.h>
52 #include <asm/vdso_datapage.h>
57 #include <asm/debug.h>
58 #include <asm/kexec.h>
59 #include <asm/asm-prototypes.h>
60 #include <asm/cpu_has_feature.h>
61 #include <asm/ftrace.h>
66 #define DBG(fmt...) udbg_printf(fmt)
71 #ifdef CONFIG_HOTPLUG_CPU
72 /* State of each CPU during hotplug phases */
73 static DEFINE_PER_CPU(int, cpu_state) = { 0 };
76 struct task_struct *secondary_current;
78 bool coregroup_enabled;
80 DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
81 DEFINE_PER_CPU(cpumask_var_t, cpu_smallcore_map);
82 DEFINE_PER_CPU(cpumask_var_t, cpu_l2_cache_map);
83 DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
84 DEFINE_PER_CPU(cpumask_var_t, cpu_coregroup_map);
86 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
87 EXPORT_PER_CPU_SYMBOL(cpu_l2_cache_map);
88 EXPORT_PER_CPU_SYMBOL(cpu_core_map);
89 EXPORT_SYMBOL_GPL(has_big_cores);
92 #ifdef CONFIG_SCHED_SMT
100 #define MAX_THREAD_LIST_SIZE 8
101 #define THREAD_GROUP_SHARE_L1 1
102 struct thread_groups {
103 unsigned int property;
104 unsigned int nr_groups;
105 unsigned int threads_per_group;
106 unsigned int thread_list[MAX_THREAD_LIST_SIZE];
110 * On big-cores system, cpu_l1_cache_map for each CPU corresponds to
111 * the set its siblings that share the L1-cache.
113 DEFINE_PER_CPU(cpumask_var_t, cpu_l1_cache_map);
115 /* SMP operations for this machine */
116 struct smp_ops_t *smp_ops;
118 /* Can't be static due to PowerMac hackery */
119 volatile unsigned int cpu_callin_map[NR_CPUS];
121 int smt_enabled_at_boot = 1;
124 * Returns 1 if the specified cpu should be brought up during boot.
125 * Used to inhibit booting threads if they've been disabled or
126 * limited on the command line
128 int smp_generic_cpu_bootable(unsigned int nr)
130 /* Special case - we inhibit secondary thread startup
131 * during boot if the user requests it.
133 if (system_state < SYSTEM_RUNNING && cpu_has_feature(CPU_FTR_SMT)) {
134 if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
136 if (smt_enabled_at_boot
137 && cpu_thread_in_core(nr) >= smt_enabled_at_boot)
146 int smp_generic_kick_cpu(int nr)
148 if (nr < 0 || nr >= nr_cpu_ids)
152 * The processor is currently spinning, waiting for the
153 * cpu_start field to become non-zero After we set cpu_start,
154 * the processor will continue on to secondary_start
156 if (!paca_ptrs[nr]->cpu_start) {
157 paca_ptrs[nr]->cpu_start = 1;
162 #ifdef CONFIG_HOTPLUG_CPU
164 * Ok it's not there, so it might be soft-unplugged, let's
165 * try to bring it back
167 generic_set_cpu_up(nr);
169 smp_send_reschedule(nr);
170 #endif /* CONFIG_HOTPLUG_CPU */
174 #endif /* CONFIG_PPC64 */
176 static irqreturn_t call_function_action(int irq, void *data)
178 generic_smp_call_function_interrupt();
182 static irqreturn_t reschedule_action(int irq, void *data)
188 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
189 static irqreturn_t tick_broadcast_ipi_action(int irq, void *data)
191 timer_broadcast_interrupt();
196 #ifdef CONFIG_NMI_IPI
197 static irqreturn_t nmi_ipi_action(int irq, void *data)
199 smp_handle_nmi_ipi(get_irq_regs());
204 static irq_handler_t smp_ipi_action[] = {
205 [PPC_MSG_CALL_FUNCTION] = call_function_action,
206 [PPC_MSG_RESCHEDULE] = reschedule_action,
207 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
208 [PPC_MSG_TICK_BROADCAST] = tick_broadcast_ipi_action,
210 #ifdef CONFIG_NMI_IPI
211 [PPC_MSG_NMI_IPI] = nmi_ipi_action,
216 * The NMI IPI is a fallback and not truly non-maskable. It is simpler
217 * than going through the call function infrastructure, and strongly
218 * serialized, so it is more appropriate for debugging.
220 const char *smp_ipi_name[] = {
221 [PPC_MSG_CALL_FUNCTION] = "ipi call function",
222 [PPC_MSG_RESCHEDULE] = "ipi reschedule",
223 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
224 [PPC_MSG_TICK_BROADCAST] = "ipi tick-broadcast",
226 #ifdef CONFIG_NMI_IPI
227 [PPC_MSG_NMI_IPI] = "nmi ipi",
231 /* optional function to request ipi, for controllers with >= 4 ipis */
232 int smp_request_message_ipi(int virq, int msg)
236 if (msg < 0 || msg > PPC_MSG_NMI_IPI)
238 #ifndef CONFIG_NMI_IPI
239 if (msg == PPC_MSG_NMI_IPI)
243 err = request_irq(virq, smp_ipi_action[msg],
244 IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND,
245 smp_ipi_name[msg], NULL);
246 WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
247 virq, smp_ipi_name[msg], err);
252 #ifdef CONFIG_PPC_SMP_MUXED_IPI
253 struct cpu_messages {
254 long messages; /* current messages */
256 static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message);
258 void smp_muxed_ipi_set_message(int cpu, int msg)
260 struct cpu_messages *info = &per_cpu(ipi_message, cpu);
261 char *message = (char *)&info->messages;
264 * Order previous accesses before accesses in the IPI handler.
270 void smp_muxed_ipi_message_pass(int cpu, int msg)
272 smp_muxed_ipi_set_message(cpu, msg);
275 * cause_ipi functions are required to include a full barrier
276 * before doing whatever causes the IPI.
278 smp_ops->cause_ipi(cpu);
281 #ifdef __BIG_ENDIAN__
282 #define IPI_MESSAGE(A) (1uL << ((BITS_PER_LONG - 8) - 8 * (A)))
284 #define IPI_MESSAGE(A) (1uL << (8 * (A)))
287 irqreturn_t smp_ipi_demux(void)
289 mb(); /* order any irq clear */
291 return smp_ipi_demux_relaxed();
294 /* sync-free variant. Callers should ensure synchronization */
295 irqreturn_t smp_ipi_demux_relaxed(void)
297 struct cpu_messages *info;
300 info = this_cpu_ptr(&ipi_message);
302 all = xchg(&info->messages, 0);
303 #if defined(CONFIG_KVM_XICS) && defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
305 * Must check for PPC_MSG_RM_HOST_ACTION messages
306 * before PPC_MSG_CALL_FUNCTION messages because when
307 * a VM is destroyed, we call kick_all_cpus_sync()
308 * to ensure that any pending PPC_MSG_RM_HOST_ACTION
309 * messages have completed before we free any VCPUs.
311 if (all & IPI_MESSAGE(PPC_MSG_RM_HOST_ACTION))
312 kvmppc_xics_ipi_action();
314 if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION))
315 generic_smp_call_function_interrupt();
316 if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE))
318 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
319 if (all & IPI_MESSAGE(PPC_MSG_TICK_BROADCAST))
320 timer_broadcast_interrupt();
322 #ifdef CONFIG_NMI_IPI
323 if (all & IPI_MESSAGE(PPC_MSG_NMI_IPI))
324 nmi_ipi_action(0, NULL);
326 } while (info->messages);
330 #endif /* CONFIG_PPC_SMP_MUXED_IPI */
332 static inline void do_message_pass(int cpu, int msg)
334 if (smp_ops->message_pass)
335 smp_ops->message_pass(cpu, msg);
336 #ifdef CONFIG_PPC_SMP_MUXED_IPI
338 smp_muxed_ipi_message_pass(cpu, msg);
342 void smp_send_reschedule(int cpu)
345 do_message_pass(cpu, PPC_MSG_RESCHEDULE);
347 EXPORT_SYMBOL_GPL(smp_send_reschedule);
349 void arch_send_call_function_single_ipi(int cpu)
351 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
354 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
358 for_each_cpu(cpu, mask)
359 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
362 #ifdef CONFIG_NMI_IPI
367 * NMI IPIs may not be recoverable, so should not be used as ongoing part of
368 * a running system. They can be used for crash, debug, halt/reboot, etc.
370 * The IPI call waits with interrupts disabled until all targets enter the
371 * NMI handler, then returns. Subsequent IPIs can be issued before targets
372 * have returned from their handlers, so there is no guarantee about
373 * concurrency or re-entrancy.
375 * A new NMI can be issued before all targets exit the handler.
377 * The IPI call may time out without all targets entering the NMI handler.
378 * In that case, there is some logic to recover (and ignore subsequent
379 * NMI interrupts that may eventually be raised), but the platform interrupt
380 * handler may not be able to distinguish this from other exception causes,
381 * which may cause a crash.
384 static atomic_t __nmi_ipi_lock = ATOMIC_INIT(0);
385 static struct cpumask nmi_ipi_pending_mask;
386 static bool nmi_ipi_busy = false;
387 static void (*nmi_ipi_function)(struct pt_regs *) = NULL;
389 static void nmi_ipi_lock_start(unsigned long *flags)
391 raw_local_irq_save(*flags);
393 while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) {
394 raw_local_irq_restore(*flags);
395 spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
396 raw_local_irq_save(*flags);
401 static void nmi_ipi_lock(void)
403 while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1)
404 spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
407 static void nmi_ipi_unlock(void)
410 WARN_ON(atomic_read(&__nmi_ipi_lock) != 1);
411 atomic_set(&__nmi_ipi_lock, 0);
414 static void nmi_ipi_unlock_end(unsigned long *flags)
417 raw_local_irq_restore(*flags);
421 * Platform NMI handler calls this to ack
423 int smp_handle_nmi_ipi(struct pt_regs *regs)
425 void (*fn)(struct pt_regs *) = NULL;
427 int me = raw_smp_processor_id();
431 * Unexpected NMIs are possible here because the interrupt may not
432 * be able to distinguish NMI IPIs from other types of NMIs, or
433 * because the caller may have timed out.
435 nmi_ipi_lock_start(&flags);
436 if (cpumask_test_cpu(me, &nmi_ipi_pending_mask)) {
437 cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
438 fn = READ_ONCE(nmi_ipi_function);
442 nmi_ipi_unlock_end(&flags);
450 static void do_smp_send_nmi_ipi(int cpu, bool safe)
452 if (!safe && smp_ops->cause_nmi_ipi && smp_ops->cause_nmi_ipi(cpu))
456 do_message_pass(cpu, PPC_MSG_NMI_IPI);
460 for_each_online_cpu(c) {
461 if (c == raw_smp_processor_id())
463 do_message_pass(c, PPC_MSG_NMI_IPI);
469 * - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS.
470 * - fn is the target callback function.
471 * - delay_us > 0 is the delay before giving up waiting for targets to
472 * begin executing the handler, == 0 specifies indefinite delay.
474 static int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *),
475 u64 delay_us, bool safe)
478 int me = raw_smp_processor_id();
482 BUG_ON(cpu < 0 && cpu != NMI_IPI_ALL_OTHERS);
484 if (unlikely(!smp_ops))
487 nmi_ipi_lock_start(&flags);
488 while (nmi_ipi_busy) {
489 nmi_ipi_unlock_end(&flags);
490 spin_until_cond(!nmi_ipi_busy);
491 nmi_ipi_lock_start(&flags);
494 nmi_ipi_function = fn;
496 WARN_ON_ONCE(!cpumask_empty(&nmi_ipi_pending_mask));
500 cpumask_copy(&nmi_ipi_pending_mask, cpu_online_mask);
501 cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
503 cpumask_set_cpu(cpu, &nmi_ipi_pending_mask);
508 /* Interrupts remain hard disabled */
510 do_smp_send_nmi_ipi(cpu, safe);
513 /* nmi_ipi_busy is set here, so unlock/lock is okay */
514 while (!cpumask_empty(&nmi_ipi_pending_mask)) {
525 if (!cpumask_empty(&nmi_ipi_pending_mask)) {
526 /* Timeout waiting for CPUs to call smp_handle_nmi_ipi */
528 cpumask_clear(&nmi_ipi_pending_mask);
531 nmi_ipi_function = NULL;
532 nmi_ipi_busy = false;
534 nmi_ipi_unlock_end(&flags);
539 int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
541 return __smp_send_nmi_ipi(cpu, fn, delay_us, false);
544 int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
546 return __smp_send_nmi_ipi(cpu, fn, delay_us, true);
548 #endif /* CONFIG_NMI_IPI */
550 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
551 void tick_broadcast(const struct cpumask *mask)
555 for_each_cpu(cpu, mask)
556 do_message_pass(cpu, PPC_MSG_TICK_BROADCAST);
560 #ifdef CONFIG_DEBUGGER
561 void debugger_ipi_callback(struct pt_regs *regs)
566 void smp_send_debugger_break(void)
568 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, debugger_ipi_callback, 1000000);
572 #ifdef CONFIG_KEXEC_CORE
573 void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
577 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, crash_ipi_callback, 1000000);
578 if (kdump_in_progress() && crash_wake_offline) {
579 for_each_present_cpu(cpu) {
583 * crash_ipi_callback will wait for
584 * all cpus, including offline CPUs.
585 * We don't care about nmi_ipi_function.
586 * Offline cpus will jump straight into
587 * crash_ipi_callback, we can skip the
588 * entire NMI dance and waiting for
589 * cpus to clear pending mask, etc.
591 do_smp_send_nmi_ipi(cpu, false);
597 #ifdef CONFIG_NMI_IPI
598 static void nmi_stop_this_cpu(struct pt_regs *regs)
601 * IRQs are already hard disabled by the smp_handle_nmi_ipi.
608 void smp_send_stop(void)
610 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, nmi_stop_this_cpu, 1000000);
613 #else /* CONFIG_NMI_IPI */
615 static void stop_this_cpu(void *dummy)
623 void smp_send_stop(void)
625 static bool stopped = false;
628 * Prevent waiting on csd lock from a previous smp_send_stop.
629 * This is racy, but in general callers try to do the right
630 * thing and only fire off one smp_send_stop (e.g., see
638 smp_call_function(stop_this_cpu, NULL, 0);
640 #endif /* CONFIG_NMI_IPI */
642 struct task_struct *current_set[NR_CPUS];
644 static void smp_store_cpu_info(int id)
646 per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
647 #ifdef CONFIG_PPC_FSL_BOOK3E
648 per_cpu(next_tlbcam_idx, id)
649 = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
654 * Relationships between CPUs are maintained in a set of per-cpu cpumasks so
655 * rather than just passing around the cpumask we pass around a function that
656 * returns the that cpumask for the given CPU.
658 static void set_cpus_related(int i, int j, struct cpumask *(*get_cpumask)(int))
660 cpumask_set_cpu(i, get_cpumask(j));
661 cpumask_set_cpu(j, get_cpumask(i));
664 #ifdef CONFIG_HOTPLUG_CPU
665 static void set_cpus_unrelated(int i, int j,
666 struct cpumask *(*get_cpumask)(int))
668 cpumask_clear_cpu(i, get_cpumask(j));
669 cpumask_clear_cpu(j, get_cpumask(i));
674 * Extends set_cpus_related. Instead of setting one CPU at a time in
675 * dstmask, set srcmask at oneshot. dstmask should be super set of srcmask.
677 static void or_cpumasks_related(int i, int j, struct cpumask *(*srcmask)(int),
678 struct cpumask *(*dstmask)(int))
680 struct cpumask *mask;
684 for_each_cpu(k, srcmask(i))
685 cpumask_or(dstmask(k), dstmask(k), mask);
691 for_each_cpu(k, srcmask(j))
692 cpumask_or(dstmask(k), dstmask(k), mask);
696 * parse_thread_groups: Parses the "ibm,thread-groups" device tree
697 * property for the CPU device node @dn and stores
698 * the parsed output in the thread_groups
699 * structure @tg if the ibm,thread-groups[0]
702 * @dn: The device node of the CPU device.
703 * @tg: Pointer to a thread group structure into which the parsed
704 * output of "ibm,thread-groups" is stored.
705 * @property: The property of the thread-group that the caller is
708 * ibm,thread-groups[0..N-1] array defines which group of threads in
709 * the CPU-device node can be grouped together based on the property.
711 * ibm,thread-groups[0] tells us the property based on which the
712 * threads are being grouped together. If this value is 1, it implies
713 * that the threads in the same group share L1, translation cache.
715 * ibm,thread-groups[1] tells us how many such thread groups exist.
717 * ibm,thread-groups[2] tells us the number of threads in each such
720 * ibm,thread-groups[3..N-1] is the list of threads identified by
721 * "ibm,ppc-interrupt-server#s" arranged as per their membership in
724 * Example: If ibm,thread-groups = [1,2,4,5,6,7,8,9,10,11,12] it
725 * implies that there are 2 groups of 4 threads each, where each group
726 * of threads share L1, translation cache.
728 * The "ibm,ppc-interrupt-server#s" of the first group is {5,6,7,8}
729 * and the "ibm,ppc-interrupt-server#s" of the second group is {9, 10,
732 * Returns 0 on success, -EINVAL if the property does not exist,
733 * -ENODATA if property does not have a value, and -EOVERFLOW if the
734 * property data isn't large enough.
736 static int parse_thread_groups(struct device_node *dn,
737 struct thread_groups *tg,
738 unsigned int property)
741 u32 thread_group_array[3 + MAX_THREAD_LIST_SIZE];
743 size_t total_threads;
746 ret = of_property_read_u32_array(dn, "ibm,thread-groups",
747 thread_group_array, 3);
751 tg->property = thread_group_array[0];
752 tg->nr_groups = thread_group_array[1];
753 tg->threads_per_group = thread_group_array[2];
754 if (tg->property != property ||
756 tg->threads_per_group < 1)
759 total_threads = tg->nr_groups * tg->threads_per_group;
761 ret = of_property_read_u32_array(dn, "ibm,thread-groups",
767 thread_list = &thread_group_array[3];
769 for (i = 0 ; i < total_threads; i++)
770 tg->thread_list[i] = thread_list[i];
776 * get_cpu_thread_group_start : Searches the thread group in tg->thread_list
777 * that @cpu belongs to.
779 * @cpu : The logical CPU whose thread group is being searched.
780 * @tg : The thread-group structure of the CPU node which @cpu belongs
783 * Returns the index to tg->thread_list that points to the the start
784 * of the thread_group that @cpu belongs to.
786 * Returns -1 if cpu doesn't belong to any of the groups pointed to by
789 static int get_cpu_thread_group_start(int cpu, struct thread_groups *tg)
791 int hw_cpu_id = get_hard_smp_processor_id(cpu);
794 for (i = 0; i < tg->nr_groups; i++) {
795 int group_start = i * tg->threads_per_group;
797 for (j = 0; j < tg->threads_per_group; j++) {
798 int idx = group_start + j;
800 if (tg->thread_list[idx] == hw_cpu_id)
808 static int init_cpu_l1_cache_map(int cpu)
811 struct device_node *dn = of_get_cpu_node(cpu, NULL);
812 struct thread_groups tg = {.property = 0,
814 .threads_per_group = 0};
815 int first_thread = cpu_first_thread_sibling(cpu);
816 int i, cpu_group_start = -1, err = 0;
821 err = parse_thread_groups(dn, &tg, THREAD_GROUP_SHARE_L1);
825 cpu_group_start = get_cpu_thread_group_start(cpu, &tg);
827 if (unlikely(cpu_group_start == -1)) {
833 zalloc_cpumask_var_node(&per_cpu(cpu_l1_cache_map, cpu),
834 GFP_KERNEL, cpu_to_node(cpu));
836 for (i = first_thread; i < first_thread + threads_per_core; i++) {
837 int i_group_start = get_cpu_thread_group_start(i, &tg);
839 if (unlikely(i_group_start == -1)) {
845 if (i_group_start == cpu_group_start)
846 cpumask_set_cpu(i, per_cpu(cpu_l1_cache_map, cpu));
854 static bool shared_caches;
856 #ifdef CONFIG_SCHED_SMT
857 /* cpumask of CPUs with asymmetric SMT dependency */
858 static int powerpc_smt_flags(void)
860 int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
862 if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
863 printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
864 flags |= SD_ASYM_PACKING;
871 * P9 has a slightly odd architecture where pairs of cores share an L2 cache.
872 * This topology makes it *much* cheaper to migrate tasks between adjacent cores
873 * since the migrated task remains cache hot. We want to take advantage of this
874 * at the scheduler level so an extra topology level is required.
876 static int powerpc_shared_cache_flags(void)
878 return SD_SHARE_PKG_RESOURCES;
882 * We can't just pass cpu_l2_cache_mask() directly because
883 * returns a non-const pointer and the compiler barfs on that.
885 static const struct cpumask *shared_cache_mask(int cpu)
887 return per_cpu(cpu_l2_cache_map, cpu);
890 #ifdef CONFIG_SCHED_SMT
891 static const struct cpumask *smallcore_smt_mask(int cpu)
893 return cpu_smallcore_mask(cpu);
897 static struct cpumask *cpu_coregroup_mask(int cpu)
899 return per_cpu(cpu_coregroup_map, cpu);
902 static bool has_coregroup_support(void)
904 return coregroup_enabled;
907 static const struct cpumask *cpu_mc_mask(int cpu)
909 return cpu_coregroup_mask(cpu);
912 static struct sched_domain_topology_level powerpc_topology[] = {
913 #ifdef CONFIG_SCHED_SMT
914 { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
916 { shared_cache_mask, powerpc_shared_cache_flags, SD_INIT_NAME(CACHE) },
917 { cpu_mc_mask, SD_INIT_NAME(MC) },
918 { cpu_cpu_mask, SD_INIT_NAME(DIE) },
922 static int init_big_cores(void)
926 for_each_possible_cpu(cpu) {
927 int err = init_cpu_l1_cache_map(cpu);
932 zalloc_cpumask_var_node(&per_cpu(cpu_smallcore_map, cpu),
937 has_big_cores = true;
941 void __init smp_prepare_cpus(unsigned int max_cpus)
945 DBG("smp_prepare_cpus\n");
948 * setup_cpu may need to be called on the boot cpu. We havent
949 * spun any cpus up but lets be paranoid.
951 BUG_ON(boot_cpuid != smp_processor_id());
954 smp_store_cpu_info(boot_cpuid);
955 cpu_callin_map[boot_cpuid] = 1;
957 for_each_possible_cpu(cpu) {
958 zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
959 GFP_KERNEL, cpu_to_node(cpu));
960 zalloc_cpumask_var_node(&per_cpu(cpu_l2_cache_map, cpu),
961 GFP_KERNEL, cpu_to_node(cpu));
962 zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
963 GFP_KERNEL, cpu_to_node(cpu));
964 if (has_coregroup_support())
965 zalloc_cpumask_var_node(&per_cpu(cpu_coregroup_map, cpu),
966 GFP_KERNEL, cpu_to_node(cpu));
968 #ifdef CONFIG_NEED_MULTIPLE_NODES
970 * numa_node_id() works after this.
972 if (cpu_present(cpu)) {
973 set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]);
974 set_cpu_numa_mem(cpu,
975 local_memory_node(numa_cpu_lookup_table[cpu]));
979 * cpu_core_map is now more updated and exists only since
980 * its been exported for long. It only will have a snapshot
983 cpumask_copy(per_cpu(cpu_core_map, cpu), cpu_cpu_mask(cpu));
986 /* Init the cpumasks so the boot CPU is related to itself */
987 cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
988 cpumask_set_cpu(boot_cpuid, cpu_l2_cache_mask(boot_cpuid));
990 if (has_coregroup_support())
991 cpumask_set_cpu(boot_cpuid, cpu_coregroup_mask(boot_cpuid));
995 cpumask_set_cpu(boot_cpuid,
996 cpu_smallcore_mask(boot_cpuid));
999 if (smp_ops && smp_ops->probe)
1003 void smp_prepare_boot_cpu(void)
1005 BUG_ON(smp_processor_id() != boot_cpuid);
1007 paca_ptrs[boot_cpuid]->__current = current;
1009 set_numa_node(numa_cpu_lookup_table[boot_cpuid]);
1010 current_set[boot_cpuid] = current;
1013 #ifdef CONFIG_HOTPLUG_CPU
1015 int generic_cpu_disable(void)
1017 unsigned int cpu = smp_processor_id();
1019 if (cpu == boot_cpuid)
1022 set_cpu_online(cpu, false);
1024 vdso_data->processorCount--;
1026 /* Update affinity of all IRQs previously aimed at this CPU */
1027 irq_migrate_all_off_this_cpu();
1030 * Depending on the details of the interrupt controller, it's possible
1031 * that one of the interrupts we just migrated away from this CPU is
1032 * actually already pending on this CPU. If we leave it in that state
1033 * the interrupt will never be EOI'ed, and will never fire again. So
1034 * temporarily enable interrupts here, to allow any pending interrupt to
1035 * be received (and EOI'ed), before we take this CPU offline.
1039 local_irq_disable();
1044 void generic_cpu_die(unsigned int cpu)
1048 for (i = 0; i < 100; i++) {
1050 if (is_cpu_dead(cpu))
1054 printk(KERN_ERR "CPU%d didn't die...\n", cpu);
1057 void generic_set_cpu_dead(unsigned int cpu)
1059 per_cpu(cpu_state, cpu) = CPU_DEAD;
1063 * The cpu_state should be set to CPU_UP_PREPARE in kick_cpu(), otherwise
1064 * the cpu_state is always CPU_DEAD after calling generic_set_cpu_dead(),
1065 * which makes the delay in generic_cpu_die() not happen.
1067 void generic_set_cpu_up(unsigned int cpu)
1069 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
1072 int generic_check_cpu_restart(unsigned int cpu)
1074 return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
1077 int is_cpu_dead(unsigned int cpu)
1079 return per_cpu(cpu_state, cpu) == CPU_DEAD;
1082 static bool secondaries_inhibited(void)
1084 return kvm_hv_mode_active();
1087 #else /* HOTPLUG_CPU */
1089 #define secondaries_inhibited() 0
1093 static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
1096 paca_ptrs[cpu]->__current = idle;
1097 paca_ptrs[cpu]->kstack = (unsigned long)task_stack_page(idle) +
1098 THREAD_SIZE - STACK_FRAME_OVERHEAD;
1101 secondary_current = current_set[cpu] = idle;
1104 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
1109 * Don't allow secondary threads to come online if inhibited
1111 if (threads_per_core > 1 && secondaries_inhibited() &&
1112 cpu_thread_in_subcore(cpu))
1115 if (smp_ops == NULL ||
1116 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
1119 cpu_idle_thread_init(cpu, tidle);
1122 * The platform might need to allocate resources prior to bringing
1125 if (smp_ops->prepare_cpu) {
1126 rc = smp_ops->prepare_cpu(cpu);
1131 /* Make sure callin-map entry is 0 (can be leftover a CPU
1134 cpu_callin_map[cpu] = 0;
1136 /* The information for processor bringup must
1137 * be written out to main store before we release
1143 DBG("smp: kicking cpu %d\n", cpu);
1144 rc = smp_ops->kick_cpu(cpu);
1146 pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc);
1151 * wait to see if the cpu made a callin (is actually up).
1152 * use this value that I found through experimentation.
1155 if (system_state < SYSTEM_RUNNING)
1156 for (c = 50000; c && !cpu_callin_map[cpu]; c--)
1158 #ifdef CONFIG_HOTPLUG_CPU
1161 * CPUs can take much longer to come up in the
1162 * hotplug case. Wait five seconds.
1164 for (c = 5000; c && !cpu_callin_map[cpu]; c--)
1168 if (!cpu_callin_map[cpu]) {
1169 printk(KERN_ERR "Processor %u is stuck.\n", cpu);
1173 DBG("Processor %u found.\n", cpu);
1175 if (smp_ops->give_timebase)
1176 smp_ops->give_timebase();
1178 /* Wait until cpu puts itself in the online & active maps */
1179 spin_until_cond(cpu_online(cpu));
1184 /* Return the value of the reg property corresponding to the given
1187 int cpu_to_core_id(int cpu)
1189 struct device_node *np;
1193 np = of_get_cpu_node(cpu, NULL);
1197 reg = of_get_property(np, "reg", NULL);
1201 id = be32_to_cpup(reg);
1206 EXPORT_SYMBOL_GPL(cpu_to_core_id);
1208 /* Helper routines for cpu to core mapping */
1209 int cpu_core_index_of_thread(int cpu)
1211 return cpu >> threads_shift;
1213 EXPORT_SYMBOL_GPL(cpu_core_index_of_thread);
1215 int cpu_first_thread_of_core(int core)
1217 return core << threads_shift;
1219 EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
1221 /* Must be called when no change can occur to cpu_present_mask,
1222 * i.e. during cpu online or offline.
1224 static struct device_node *cpu_to_l2cache(int cpu)
1226 struct device_node *np;
1227 struct device_node *cache;
1229 if (!cpu_present(cpu))
1232 np = of_get_cpu_node(cpu, NULL);
1236 cache = of_find_next_cache_node(np);
1243 static bool update_mask_by_l2(int cpu, cpumask_var_t *mask)
1245 struct cpumask *(*submask_fn)(int) = cpu_sibling_mask;
1246 struct device_node *l2_cache, *np;
1250 submask_fn = cpu_smallcore_mask;
1252 l2_cache = cpu_to_l2cache(cpu);
1253 if (!l2_cache || !*mask) {
1254 /* Assume only core siblings share cache with this CPU */
1255 for_each_cpu(i, submask_fn(cpu))
1256 set_cpus_related(cpu, i, cpu_l2_cache_mask);
1261 cpumask_and(*mask, cpu_online_mask, cpu_cpu_mask(cpu));
1263 /* Update l2-cache mask with all the CPUs that are part of submask */
1264 or_cpumasks_related(cpu, cpu, submask_fn, cpu_l2_cache_mask);
1266 /* Skip all CPUs already part of current CPU l2-cache mask */
1267 cpumask_andnot(*mask, *mask, cpu_l2_cache_mask(cpu));
1269 for_each_cpu(i, *mask) {
1271 * when updating the marks the current CPU has not been marked
1272 * online, but we need to update the cache masks
1274 np = cpu_to_l2cache(i);
1276 /* Skip all CPUs already part of current CPU l2-cache */
1277 if (np == l2_cache) {
1278 or_cpumasks_related(cpu, i, submask_fn, cpu_l2_cache_mask);
1279 cpumask_andnot(*mask, *mask, submask_fn(i));
1281 cpumask_andnot(*mask, *mask, cpu_l2_cache_mask(i));
1286 of_node_put(l2_cache);
1291 #ifdef CONFIG_HOTPLUG_CPU
1292 static void remove_cpu_from_masks(int cpu)
1294 struct cpumask *(*mask_fn)(int) = cpu_sibling_mask;
1298 mask_fn = cpu_l2_cache_mask;
1300 for_each_cpu(i, mask_fn(cpu)) {
1301 set_cpus_unrelated(cpu, i, cpu_l2_cache_mask);
1302 set_cpus_unrelated(cpu, i, cpu_sibling_mask);
1304 set_cpus_unrelated(cpu, i, cpu_smallcore_mask);
1307 if (has_coregroup_support()) {
1308 for_each_cpu(i, cpu_coregroup_mask(cpu))
1309 set_cpus_unrelated(cpu, i, cpu_coregroup_mask);
1314 static inline void add_cpu_to_smallcore_masks(int cpu)
1321 cpumask_set_cpu(cpu, cpu_smallcore_mask(cpu));
1323 for_each_cpu(i, per_cpu(cpu_l1_cache_map, cpu)) {
1325 set_cpus_related(i, cpu, cpu_smallcore_mask);
1329 static void update_coregroup_mask(int cpu, cpumask_var_t *mask)
1331 struct cpumask *(*submask_fn)(int) = cpu_sibling_mask;
1332 int coregroup_id = cpu_to_coregroup_id(cpu);
1336 submask_fn = cpu_l2_cache_mask;
1339 /* Assume only siblings are part of this CPU's coregroup */
1340 for_each_cpu(i, submask_fn(cpu))
1341 set_cpus_related(cpu, i, cpu_coregroup_mask);
1346 cpumask_and(*mask, cpu_online_mask, cpu_cpu_mask(cpu));
1348 /* Update coregroup mask with all the CPUs that are part of submask */
1349 or_cpumasks_related(cpu, cpu, submask_fn, cpu_coregroup_mask);
1351 /* Skip all CPUs already part of coregroup mask */
1352 cpumask_andnot(*mask, *mask, cpu_coregroup_mask(cpu));
1354 for_each_cpu(i, *mask) {
1355 /* Skip all CPUs not part of this coregroup */
1356 if (coregroup_id == cpu_to_coregroup_id(i)) {
1357 or_cpumasks_related(cpu, i, submask_fn, cpu_coregroup_mask);
1358 cpumask_andnot(*mask, *mask, submask_fn(i));
1360 cpumask_andnot(*mask, *mask, cpu_coregroup_mask(i));
1365 static void add_cpu_to_masks(int cpu)
1367 int first_thread = cpu_first_thread_sibling(cpu);
1372 * This CPU will not be in the online mask yet so we need to manually
1373 * add it to it's own thread sibling mask.
1375 cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
1377 for (i = first_thread; i < first_thread + threads_per_core; i++)
1379 set_cpus_related(i, cpu, cpu_sibling_mask);
1381 add_cpu_to_smallcore_masks(cpu);
1383 /* In CPU-hotplug path, hence use GFP_ATOMIC */
1384 alloc_cpumask_var_node(&mask, GFP_ATOMIC, cpu_to_node(cpu));
1385 update_mask_by_l2(cpu, &mask);
1387 if (has_coregroup_support())
1388 update_coregroup_mask(cpu, &mask);
1390 free_cpumask_var(mask);
1393 /* Activate a secondary processor. */
1394 void start_secondary(void *unused)
1396 unsigned int cpu = smp_processor_id();
1399 current->active_mm = &init_mm;
1401 smp_store_cpu_info(cpu);
1402 set_dec(tb_ticks_per_jiffy);
1404 cpu_callin_map[cpu] = 1;
1406 if (smp_ops->setup_cpu)
1407 smp_ops->setup_cpu(cpu);
1408 if (smp_ops->take_timebase)
1409 smp_ops->take_timebase();
1411 secondary_cpu_time_init();
1414 if (system_state == SYSTEM_RUNNING)
1415 vdso_data->processorCount++;
1419 /* Update topology CPU masks */
1420 add_cpu_to_masks(cpu);
1423 * Check for any shared caches. Note that this must be done on a
1424 * per-core basis because one core in the pair might be disabled.
1426 if (!shared_caches) {
1427 struct cpumask *(*sibling_mask)(int) = cpu_sibling_mask;
1428 struct cpumask *mask = cpu_l2_cache_mask(cpu);
1431 sibling_mask = cpu_smallcore_mask;
1433 if (cpumask_weight(mask) > cpumask_weight(sibling_mask(cpu)))
1434 shared_caches = true;
1437 set_numa_node(numa_cpu_lookup_table[cpu]);
1438 set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
1441 notify_cpu_starting(cpu);
1442 set_cpu_online(cpu, true);
1444 boot_init_stack_canary();
1448 /* We can enable ftrace for secondary cpus now */
1449 this_cpu_enable_ftrace();
1451 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
1456 int setup_profiling_timer(unsigned int multiplier)
1461 static void fixup_topology(void)
1465 #ifdef CONFIG_SCHED_SMT
1466 if (has_big_cores) {
1467 pr_info("Big cores detected but using small core scheduling\n");
1468 powerpc_topology[smt_idx].mask = smallcore_smt_mask;
1472 if (!has_coregroup_support())
1473 powerpc_topology[mc_idx].mask = powerpc_topology[cache_idx].mask;
1476 * Try to consolidate topology levels here instead of
1477 * allowing scheduler to degenerate.
1478 * - Dont consolidate if masks are different.
1479 * - Dont consolidate if sd_flags exists and are different.
1481 for (i = 1; i <= die_idx; i++) {
1482 if (powerpc_topology[i].mask != powerpc_topology[i - 1].mask)
1485 if (powerpc_topology[i].sd_flags && powerpc_topology[i - 1].sd_flags &&
1486 powerpc_topology[i].sd_flags != powerpc_topology[i - 1].sd_flags)
1489 if (!powerpc_topology[i - 1].sd_flags)
1490 powerpc_topology[i - 1].sd_flags = powerpc_topology[i].sd_flags;
1492 powerpc_topology[i].mask = powerpc_topology[i + 1].mask;
1493 powerpc_topology[i].sd_flags = powerpc_topology[i + 1].sd_flags;
1494 #ifdef CONFIG_SCHED_DEBUG
1495 powerpc_topology[i].name = powerpc_topology[i + 1].name;
1500 void __init smp_cpus_done(unsigned int max_cpus)
1503 * We are running pinned to the boot CPU, see rest_init().
1505 if (smp_ops && smp_ops->setup_cpu)
1506 smp_ops->setup_cpu(boot_cpuid);
1508 if (smp_ops && smp_ops->bringup_done)
1509 smp_ops->bringup_done();
1511 dump_numa_cpu_topology();
1514 set_sched_topology(powerpc_topology);
1517 #ifdef CONFIG_HOTPLUG_CPU
1518 int __cpu_disable(void)
1520 int cpu = smp_processor_id();
1523 if (!smp_ops->cpu_disable)
1526 this_cpu_disable_ftrace();
1528 err = smp_ops->cpu_disable();
1532 /* Update sibling maps */
1533 remove_cpu_from_masks(cpu);
1538 void __cpu_die(unsigned int cpu)
1540 if (smp_ops->cpu_die)
1541 smp_ops->cpu_die(cpu);
1544 void arch_cpu_idle_dead(void)
1546 sched_preempt_enable_no_resched();
1549 * Disable on the down path. This will be re-enabled by
1550 * start_secondary() via start_secondary_resume() below
1552 this_cpu_disable_ftrace();
1554 if (smp_ops->cpu_offline_self)
1555 smp_ops->cpu_offline_self();
1557 /* If we return, we re-enter start_secondary */
1558 start_secondary_resume();