4 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
5 * deal of code from the sparc and intel versions.
7 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
9 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
10 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
20 #include <linux/kernel.h>
21 #include <linux/export.h>
22 #include <linux/sched/mm.h>
23 #include <linux/sched/topology.h>
24 #include <linux/smp.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/spinlock.h>
29 #include <linux/cache.h>
30 #include <linux/err.h>
31 #include <linux/device.h>
32 #include <linux/cpu.h>
33 #include <linux/notifier.h>
34 #include <linux/topology.h>
35 #include <linux/profile.h>
36 #include <linux/processor.h>
37 #include <linux/random.h>
38 #include <linux/stackprotector.h>
40 #include <asm/ptrace.h>
41 #include <linux/atomic.h>
43 #include <asm/hw_irq.h>
44 #include <asm/kvm_ppc.h>
45 #include <asm/dbell.h>
47 #include <asm/pgtable.h>
51 #include <asm/machdep.h>
52 #include <asm/cputhreads.h>
53 #include <asm/cputable.h>
55 #include <asm/vdso_datapage.h>
60 #include <asm/debug.h>
61 #include <asm/kexec.h>
62 #include <asm/asm-prototypes.h>
63 #include <asm/cpu_has_feature.h>
64 #include <asm/ftrace.h>
68 #define DBG(fmt...) udbg_printf(fmt)
73 #ifdef CONFIG_HOTPLUG_CPU
74 /* State of each CPU during hotplug phases */
75 static DEFINE_PER_CPU(int, cpu_state) = { 0 };
78 struct thread_info *secondary_ti;
81 DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
82 DEFINE_PER_CPU(cpumask_var_t, cpu_smallcore_map);
83 DEFINE_PER_CPU(cpumask_var_t, cpu_l2_cache_map);
84 DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
86 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
87 EXPORT_PER_CPU_SYMBOL(cpu_l2_cache_map);
88 EXPORT_PER_CPU_SYMBOL(cpu_core_map);
89 EXPORT_SYMBOL_GPL(has_big_cores);
91 #define MAX_THREAD_LIST_SIZE 8
92 #define THREAD_GROUP_SHARE_L1 1
93 struct thread_groups {
94 unsigned int property;
95 unsigned int nr_groups;
96 unsigned int threads_per_group;
97 unsigned int thread_list[MAX_THREAD_LIST_SIZE];
101 * On big-cores system, cpu_l1_cache_map for each CPU corresponds to
102 * the set its siblings that share the L1-cache.
104 DEFINE_PER_CPU(cpumask_var_t, cpu_l1_cache_map);
106 /* SMP operations for this machine */
107 struct smp_ops_t *smp_ops;
109 /* Can't be static due to PowerMac hackery */
110 volatile unsigned int cpu_callin_map[NR_CPUS];
112 int smt_enabled_at_boot = 1;
115 * Returns 1 if the specified cpu should be brought up during boot.
116 * Used to inhibit booting threads if they've been disabled or
117 * limited on the command line
119 int smp_generic_cpu_bootable(unsigned int nr)
121 /* Special case - we inhibit secondary thread startup
122 * during boot if the user requests it.
124 if (system_state < SYSTEM_RUNNING && cpu_has_feature(CPU_FTR_SMT)) {
125 if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
127 if (smt_enabled_at_boot
128 && cpu_thread_in_core(nr) >= smt_enabled_at_boot)
137 int smp_generic_kick_cpu(int nr)
139 if (nr < 0 || nr >= nr_cpu_ids)
143 * The processor is currently spinning, waiting for the
144 * cpu_start field to become non-zero After we set cpu_start,
145 * the processor will continue on to secondary_start
147 if (!paca_ptrs[nr]->cpu_start) {
148 paca_ptrs[nr]->cpu_start = 1;
153 #ifdef CONFIG_HOTPLUG_CPU
155 * Ok it's not there, so it might be soft-unplugged, let's
156 * try to bring it back
158 generic_set_cpu_up(nr);
160 smp_send_reschedule(nr);
161 #endif /* CONFIG_HOTPLUG_CPU */
165 #endif /* CONFIG_PPC64 */
167 static irqreturn_t call_function_action(int irq, void *data)
169 generic_smp_call_function_interrupt();
173 static irqreturn_t reschedule_action(int irq, void *data)
179 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
180 static irqreturn_t tick_broadcast_ipi_action(int irq, void *data)
182 timer_broadcast_interrupt();
187 #ifdef CONFIG_NMI_IPI
188 static irqreturn_t nmi_ipi_action(int irq, void *data)
190 smp_handle_nmi_ipi(get_irq_regs());
195 static irq_handler_t smp_ipi_action[] = {
196 [PPC_MSG_CALL_FUNCTION] = call_function_action,
197 [PPC_MSG_RESCHEDULE] = reschedule_action,
198 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
199 [PPC_MSG_TICK_BROADCAST] = tick_broadcast_ipi_action,
201 #ifdef CONFIG_NMI_IPI
202 [PPC_MSG_NMI_IPI] = nmi_ipi_action,
207 * The NMI IPI is a fallback and not truly non-maskable. It is simpler
208 * than going through the call function infrastructure, and strongly
209 * serialized, so it is more appropriate for debugging.
211 const char *smp_ipi_name[] = {
212 [PPC_MSG_CALL_FUNCTION] = "ipi call function",
213 [PPC_MSG_RESCHEDULE] = "ipi reschedule",
214 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
215 [PPC_MSG_TICK_BROADCAST] = "ipi tick-broadcast",
217 #ifdef CONFIG_NMI_IPI
218 [PPC_MSG_NMI_IPI] = "nmi ipi",
222 /* optional function to request ipi, for controllers with >= 4 ipis */
223 int smp_request_message_ipi(int virq, int msg)
227 if (msg < 0 || msg > PPC_MSG_NMI_IPI)
229 #ifndef CONFIG_NMI_IPI
230 if (msg == PPC_MSG_NMI_IPI)
234 err = request_irq(virq, smp_ipi_action[msg],
235 IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND,
236 smp_ipi_name[msg], NULL);
237 WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
238 virq, smp_ipi_name[msg], err);
243 #ifdef CONFIG_PPC_SMP_MUXED_IPI
244 struct cpu_messages {
245 long messages; /* current messages */
247 static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message);
249 void smp_muxed_ipi_set_message(int cpu, int msg)
251 struct cpu_messages *info = &per_cpu(ipi_message, cpu);
252 char *message = (char *)&info->messages;
255 * Order previous accesses before accesses in the IPI handler.
261 void smp_muxed_ipi_message_pass(int cpu, int msg)
263 smp_muxed_ipi_set_message(cpu, msg);
266 * cause_ipi functions are required to include a full barrier
267 * before doing whatever causes the IPI.
269 smp_ops->cause_ipi(cpu);
272 #ifdef __BIG_ENDIAN__
273 #define IPI_MESSAGE(A) (1uL << ((BITS_PER_LONG - 8) - 8 * (A)))
275 #define IPI_MESSAGE(A) (1uL << (8 * (A)))
278 irqreturn_t smp_ipi_demux(void)
280 mb(); /* order any irq clear */
282 return smp_ipi_demux_relaxed();
285 /* sync-free variant. Callers should ensure synchronization */
286 irqreturn_t smp_ipi_demux_relaxed(void)
288 struct cpu_messages *info;
291 info = this_cpu_ptr(&ipi_message);
293 all = xchg(&info->messages, 0);
294 #if defined(CONFIG_KVM_XICS) && defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
296 * Must check for PPC_MSG_RM_HOST_ACTION messages
297 * before PPC_MSG_CALL_FUNCTION messages because when
298 * a VM is destroyed, we call kick_all_cpus_sync()
299 * to ensure that any pending PPC_MSG_RM_HOST_ACTION
300 * messages have completed before we free any VCPUs.
302 if (all & IPI_MESSAGE(PPC_MSG_RM_HOST_ACTION))
303 kvmppc_xics_ipi_action();
305 if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION))
306 generic_smp_call_function_interrupt();
307 if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE))
309 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
310 if (all & IPI_MESSAGE(PPC_MSG_TICK_BROADCAST))
311 timer_broadcast_interrupt();
313 #ifdef CONFIG_NMI_IPI
314 if (all & IPI_MESSAGE(PPC_MSG_NMI_IPI))
315 nmi_ipi_action(0, NULL);
317 } while (info->messages);
321 #endif /* CONFIG_PPC_SMP_MUXED_IPI */
323 static inline void do_message_pass(int cpu, int msg)
325 if (smp_ops->message_pass)
326 smp_ops->message_pass(cpu, msg);
327 #ifdef CONFIG_PPC_SMP_MUXED_IPI
329 smp_muxed_ipi_message_pass(cpu, msg);
333 void smp_send_reschedule(int cpu)
336 do_message_pass(cpu, PPC_MSG_RESCHEDULE);
338 EXPORT_SYMBOL_GPL(smp_send_reschedule);
340 void arch_send_call_function_single_ipi(int cpu)
342 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
345 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
349 for_each_cpu(cpu, mask)
350 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
353 #ifdef CONFIG_NMI_IPI
358 * NMI IPIs may not be recoverable, so should not be used as ongoing part of
359 * a running system. They can be used for crash, debug, halt/reboot, etc.
361 * NMI IPIs are globally single threaded. No more than one in progress at
364 * The IPI call waits with interrupts disabled until all targets enter the
365 * NMI handler, then the call returns.
367 * No new NMI can be initiated until targets exit the handler.
369 * The IPI call may time out without all targets entering the NMI handler.
370 * In that case, there is some logic to recover (and ignore subsequent
371 * NMI interrupts that may eventually be raised), but the platform interrupt
372 * handler may not be able to distinguish this from other exception causes,
373 * which may cause a crash.
376 static atomic_t __nmi_ipi_lock = ATOMIC_INIT(0);
377 static struct cpumask nmi_ipi_pending_mask;
378 static int nmi_ipi_busy_count = 0;
379 static void (*nmi_ipi_function)(struct pt_regs *) = NULL;
381 static void nmi_ipi_lock_start(unsigned long *flags)
383 raw_local_irq_save(*flags);
385 while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) {
386 raw_local_irq_restore(*flags);
387 spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
388 raw_local_irq_save(*flags);
393 static void nmi_ipi_lock(void)
395 while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1)
396 spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
399 static void nmi_ipi_unlock(void)
402 WARN_ON(atomic_read(&__nmi_ipi_lock) != 1);
403 atomic_set(&__nmi_ipi_lock, 0);
406 static void nmi_ipi_unlock_end(unsigned long *flags)
409 raw_local_irq_restore(*flags);
413 * Platform NMI handler calls this to ack
415 int smp_handle_nmi_ipi(struct pt_regs *regs)
417 void (*fn)(struct pt_regs *);
419 int me = raw_smp_processor_id();
423 * Unexpected NMIs are possible here because the interrupt may not
424 * be able to distinguish NMI IPIs from other types of NMIs, or
425 * because the caller may have timed out.
427 nmi_ipi_lock_start(&flags);
428 if (!nmi_ipi_busy_count)
430 if (!cpumask_test_cpu(me, &nmi_ipi_pending_mask))
433 fn = nmi_ipi_function;
437 cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
438 nmi_ipi_busy_count++;
446 if (nmi_ipi_busy_count > 1) /* Can race with caller time-out */
447 nmi_ipi_busy_count--;
449 nmi_ipi_unlock_end(&flags);
454 static void do_smp_send_nmi_ipi(int cpu, bool safe)
456 if (!safe && smp_ops->cause_nmi_ipi && smp_ops->cause_nmi_ipi(cpu))
460 do_message_pass(cpu, PPC_MSG_NMI_IPI);
464 for_each_online_cpu(c) {
465 if (c == raw_smp_processor_id())
467 do_message_pass(c, PPC_MSG_NMI_IPI);
473 * - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS.
474 * - fn is the target callback function.
475 * - delay_us > 0 is the delay before giving up waiting for targets to
476 * complete executing the handler, == 0 specifies indefinite delay.
478 int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us, bool safe)
481 int me = raw_smp_processor_id();
485 BUG_ON(cpu < 0 && cpu != NMI_IPI_ALL_OTHERS);
487 if (unlikely(!smp_ops))
490 /* Take the nmi_ipi_busy count/lock with interrupts hard disabled */
491 nmi_ipi_lock_start(&flags);
492 while (nmi_ipi_busy_count) {
493 nmi_ipi_unlock_end(&flags);
494 spin_until_cond(nmi_ipi_busy_count == 0);
495 nmi_ipi_lock_start(&flags);
498 nmi_ipi_function = fn;
502 cpumask_copy(&nmi_ipi_pending_mask, cpu_online_mask);
503 cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
505 /* cpumask starts clear */
506 cpumask_set_cpu(cpu, &nmi_ipi_pending_mask);
508 nmi_ipi_busy_count++;
511 do_smp_send_nmi_ipi(cpu, safe);
514 /* nmi_ipi_busy_count is held here, so unlock/lock is okay */
515 while (!cpumask_empty(&nmi_ipi_pending_mask)) {
526 while (nmi_ipi_busy_count > 1) {
537 if (!cpumask_empty(&nmi_ipi_pending_mask)) {
538 /* Timeout waiting for CPUs to call smp_handle_nmi_ipi */
540 cpumask_clear(&nmi_ipi_pending_mask);
542 if (nmi_ipi_busy_count > 1) {
543 /* Timeout waiting for CPUs to execute fn */
545 nmi_ipi_busy_count = 1;
548 nmi_ipi_busy_count--;
549 nmi_ipi_unlock_end(&flags);
554 int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
556 return __smp_send_nmi_ipi(cpu, fn, delay_us, false);
559 int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
561 return __smp_send_nmi_ipi(cpu, fn, delay_us, true);
563 #endif /* CONFIG_NMI_IPI */
565 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
566 void tick_broadcast(const struct cpumask *mask)
570 for_each_cpu(cpu, mask)
571 do_message_pass(cpu, PPC_MSG_TICK_BROADCAST);
575 #ifdef CONFIG_DEBUGGER
576 void debugger_ipi_callback(struct pt_regs *regs)
581 void smp_send_debugger_break(void)
583 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, debugger_ipi_callback, 1000000);
587 #ifdef CONFIG_KEXEC_CORE
588 void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
592 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, crash_ipi_callback, 1000000);
593 if (kdump_in_progress() && crash_wake_offline) {
594 for_each_present_cpu(cpu) {
598 * crash_ipi_callback will wait for
599 * all cpus, including offline CPUs.
600 * We don't care about nmi_ipi_function.
601 * Offline cpus will jump straight into
602 * crash_ipi_callback, we can skip the
603 * entire NMI dance and waiting for
604 * cpus to clear pending mask, etc.
606 do_smp_send_nmi_ipi(cpu, false);
612 #ifdef CONFIG_NMI_IPI
613 static void nmi_stop_this_cpu(struct pt_regs *regs)
616 * This is a special case because it never returns, so the NMI IPI
617 * handling would never mark it as done, which makes any later
618 * smp_send_nmi_ipi() call spin forever. Mark it done now.
620 * IRQs are already hard disabled by the smp_handle_nmi_ipi.
623 if (nmi_ipi_busy_count > 1)
624 nmi_ipi_busy_count--;
632 void smp_send_stop(void)
634 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, nmi_stop_this_cpu, 1000000);
637 #else /* CONFIG_NMI_IPI */
639 static void stop_this_cpu(void *dummy)
647 void smp_send_stop(void)
649 static bool stopped = false;
652 * Prevent waiting on csd lock from a previous smp_send_stop.
653 * This is racy, but in general callers try to do the right
654 * thing and only fire off one smp_send_stop (e.g., see
662 smp_call_function(stop_this_cpu, NULL, 0);
664 #endif /* CONFIG_NMI_IPI */
666 struct thread_info *current_set[NR_CPUS];
668 static void smp_store_cpu_info(int id)
670 per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
671 #ifdef CONFIG_PPC_FSL_BOOK3E
672 per_cpu(next_tlbcam_idx, id)
673 = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
678 * Relationships between CPUs are maintained in a set of per-cpu cpumasks so
679 * rather than just passing around the cpumask we pass around a function that
680 * returns the that cpumask for the given CPU.
682 static void set_cpus_related(int i, int j, struct cpumask *(*get_cpumask)(int))
684 cpumask_set_cpu(i, get_cpumask(j));
685 cpumask_set_cpu(j, get_cpumask(i));
688 #ifdef CONFIG_HOTPLUG_CPU
689 static void set_cpus_unrelated(int i, int j,
690 struct cpumask *(*get_cpumask)(int))
692 cpumask_clear_cpu(i, get_cpumask(j));
693 cpumask_clear_cpu(j, get_cpumask(i));
698 * parse_thread_groups: Parses the "ibm,thread-groups" device tree
699 * property for the CPU device node @dn and stores
700 * the parsed output in the thread_groups
701 * structure @tg if the ibm,thread-groups[0]
704 * @dn: The device node of the CPU device.
705 * @tg: Pointer to a thread group structure into which the parsed
706 * output of "ibm,thread-groups" is stored.
707 * @property: The property of the thread-group that the caller is
710 * ibm,thread-groups[0..N-1] array defines which group of threads in
711 * the CPU-device node can be grouped together based on the property.
713 * ibm,thread-groups[0] tells us the property based on which the
714 * threads are being grouped together. If this value is 1, it implies
715 * that the threads in the same group share L1, translation cache.
717 * ibm,thread-groups[1] tells us how many such thread groups exist.
719 * ibm,thread-groups[2] tells us the number of threads in each such
722 * ibm,thread-groups[3..N-1] is the list of threads identified by
723 * "ibm,ppc-interrupt-server#s" arranged as per their membership in
726 * Example: If ibm,thread-groups = [1,2,4,5,6,7,8,9,10,11,12] it
727 * implies that there are 2 groups of 4 threads each, where each group
728 * of threads share L1, translation cache.
730 * The "ibm,ppc-interrupt-server#s" of the first group is {5,6,7,8}
731 * and the "ibm,ppc-interrupt-server#s" of the second group is {9, 10,
734 * Returns 0 on success, -EINVAL if the property does not exist,
735 * -ENODATA if property does not have a value, and -EOVERFLOW if the
736 * property data isn't large enough.
738 static int parse_thread_groups(struct device_node *dn,
739 struct thread_groups *tg,
740 unsigned int property)
743 u32 thread_group_array[3 + MAX_THREAD_LIST_SIZE];
745 size_t total_threads;
748 ret = of_property_read_u32_array(dn, "ibm,thread-groups",
749 thread_group_array, 3);
753 tg->property = thread_group_array[0];
754 tg->nr_groups = thread_group_array[1];
755 tg->threads_per_group = thread_group_array[2];
756 if (tg->property != property ||
758 tg->threads_per_group < 1)
761 total_threads = tg->nr_groups * tg->threads_per_group;
763 ret = of_property_read_u32_array(dn, "ibm,thread-groups",
769 thread_list = &thread_group_array[3];
771 for (i = 0 ; i < total_threads; i++)
772 tg->thread_list[i] = thread_list[i];
778 * get_cpu_thread_group_start : Searches the thread group in tg->thread_list
779 * that @cpu belongs to.
781 * @cpu : The logical CPU whose thread group is being searched.
782 * @tg : The thread-group structure of the CPU node which @cpu belongs
785 * Returns the index to tg->thread_list that points to the the start
786 * of the thread_group that @cpu belongs to.
788 * Returns -1 if cpu doesn't belong to any of the groups pointed to by
791 static int get_cpu_thread_group_start(int cpu, struct thread_groups *tg)
793 int hw_cpu_id = get_hard_smp_processor_id(cpu);
796 for (i = 0; i < tg->nr_groups; i++) {
797 int group_start = i * tg->threads_per_group;
799 for (j = 0; j < tg->threads_per_group; j++) {
800 int idx = group_start + j;
802 if (tg->thread_list[idx] == hw_cpu_id)
810 static int init_cpu_l1_cache_map(int cpu)
813 struct device_node *dn = of_get_cpu_node(cpu, NULL);
814 struct thread_groups tg = {.property = 0,
816 .threads_per_group = 0};
817 int first_thread = cpu_first_thread_sibling(cpu);
818 int i, cpu_group_start = -1, err = 0;
823 err = parse_thread_groups(dn, &tg, THREAD_GROUP_SHARE_L1);
827 zalloc_cpumask_var_node(&per_cpu(cpu_l1_cache_map, cpu),
831 cpu_group_start = get_cpu_thread_group_start(cpu, &tg);
833 if (unlikely(cpu_group_start == -1)) {
839 for (i = first_thread; i < first_thread + threads_per_core; i++) {
840 int i_group_start = get_cpu_thread_group_start(i, &tg);
842 if (unlikely(i_group_start == -1)) {
848 if (i_group_start == cpu_group_start)
849 cpumask_set_cpu(i, per_cpu(cpu_l1_cache_map, cpu));
857 static int init_big_cores(void)
861 for_each_possible_cpu(cpu) {
862 int err = init_cpu_l1_cache_map(cpu);
867 zalloc_cpumask_var_node(&per_cpu(cpu_smallcore_map, cpu),
872 has_big_cores = true;
876 void __init smp_prepare_cpus(unsigned int max_cpus)
880 DBG("smp_prepare_cpus\n");
883 * setup_cpu may need to be called on the boot cpu. We havent
884 * spun any cpus up but lets be paranoid.
886 BUG_ON(boot_cpuid != smp_processor_id());
889 smp_store_cpu_info(boot_cpuid);
890 cpu_callin_map[boot_cpuid] = 1;
892 for_each_possible_cpu(cpu) {
893 zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
894 GFP_KERNEL, cpu_to_node(cpu));
895 zalloc_cpumask_var_node(&per_cpu(cpu_l2_cache_map, cpu),
896 GFP_KERNEL, cpu_to_node(cpu));
897 zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
898 GFP_KERNEL, cpu_to_node(cpu));
900 * numa_node_id() works after this.
902 if (cpu_present(cpu)) {
903 set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]);
904 set_cpu_numa_mem(cpu,
905 local_memory_node(numa_cpu_lookup_table[cpu]));
909 /* Init the cpumasks so the boot CPU is related to itself */
910 cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
911 cpumask_set_cpu(boot_cpuid, cpu_l2_cache_mask(boot_cpuid));
912 cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
916 cpumask_set_cpu(boot_cpuid,
917 cpu_smallcore_mask(boot_cpuid));
920 if (smp_ops && smp_ops->probe)
924 void smp_prepare_boot_cpu(void)
926 BUG_ON(smp_processor_id() != boot_cpuid);
928 paca_ptrs[boot_cpuid]->__current = current;
930 set_numa_node(numa_cpu_lookup_table[boot_cpuid]);
931 current_set[boot_cpuid] = task_thread_info(current);
934 #ifdef CONFIG_HOTPLUG_CPU
936 int generic_cpu_disable(void)
938 unsigned int cpu = smp_processor_id();
940 if (cpu == boot_cpuid)
943 set_cpu_online(cpu, false);
945 vdso_data->processorCount--;
947 /* Update affinity of all IRQs previously aimed at this CPU */
948 irq_migrate_all_off_this_cpu();
951 * Depending on the details of the interrupt controller, it's possible
952 * that one of the interrupts we just migrated away from this CPU is
953 * actually already pending on this CPU. If we leave it in that state
954 * the interrupt will never be EOI'ed, and will never fire again. So
955 * temporarily enable interrupts here, to allow any pending interrupt to
956 * be received (and EOI'ed), before we take this CPU offline.
965 void generic_cpu_die(unsigned int cpu)
969 for (i = 0; i < 100; i++) {
971 if (is_cpu_dead(cpu))
975 printk(KERN_ERR "CPU%d didn't die...\n", cpu);
978 void generic_set_cpu_dead(unsigned int cpu)
980 per_cpu(cpu_state, cpu) = CPU_DEAD;
984 * The cpu_state should be set to CPU_UP_PREPARE in kick_cpu(), otherwise
985 * the cpu_state is always CPU_DEAD after calling generic_set_cpu_dead(),
986 * which makes the delay in generic_cpu_die() not happen.
988 void generic_set_cpu_up(unsigned int cpu)
990 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
993 int generic_check_cpu_restart(unsigned int cpu)
995 return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
998 int is_cpu_dead(unsigned int cpu)
1000 return per_cpu(cpu_state, cpu) == CPU_DEAD;
1003 static bool secondaries_inhibited(void)
1005 return kvm_hv_mode_active();
1008 #else /* HOTPLUG_CPU */
1010 #define secondaries_inhibited() 0
1014 static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
1016 struct thread_info *ti = task_thread_info(idle);
1019 paca_ptrs[cpu]->__current = idle;
1020 paca_ptrs[cpu]->kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD;
1023 secondary_ti = current_set[cpu] = ti;
1026 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
1031 * Don't allow secondary threads to come online if inhibited
1033 if (threads_per_core > 1 && secondaries_inhibited() &&
1034 cpu_thread_in_subcore(cpu))
1037 if (smp_ops == NULL ||
1038 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
1041 cpu_idle_thread_init(cpu, tidle);
1044 * The platform might need to allocate resources prior to bringing
1047 if (smp_ops->prepare_cpu) {
1048 rc = smp_ops->prepare_cpu(cpu);
1053 /* Make sure callin-map entry is 0 (can be leftover a CPU
1056 cpu_callin_map[cpu] = 0;
1058 /* The information for processor bringup must
1059 * be written out to main store before we release
1065 DBG("smp: kicking cpu %d\n", cpu);
1066 rc = smp_ops->kick_cpu(cpu);
1068 pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc);
1073 * wait to see if the cpu made a callin (is actually up).
1074 * use this value that I found through experimentation.
1077 if (system_state < SYSTEM_RUNNING)
1078 for (c = 50000; c && !cpu_callin_map[cpu]; c--)
1080 #ifdef CONFIG_HOTPLUG_CPU
1083 * CPUs can take much longer to come up in the
1084 * hotplug case. Wait five seconds.
1086 for (c = 5000; c && !cpu_callin_map[cpu]; c--)
1090 if (!cpu_callin_map[cpu]) {
1091 printk(KERN_ERR "Processor %u is stuck.\n", cpu);
1095 DBG("Processor %u found.\n", cpu);
1097 if (smp_ops->give_timebase)
1098 smp_ops->give_timebase();
1100 /* Wait until cpu puts itself in the online & active maps */
1101 spin_until_cond(cpu_online(cpu));
1106 /* Return the value of the reg property corresponding to the given
1109 int cpu_to_core_id(int cpu)
1111 struct device_node *np;
1115 np = of_get_cpu_node(cpu, NULL);
1119 reg = of_get_property(np, "reg", NULL);
1123 id = be32_to_cpup(reg);
1128 EXPORT_SYMBOL_GPL(cpu_to_core_id);
1130 /* Helper routines for cpu to core mapping */
1131 int cpu_core_index_of_thread(int cpu)
1133 return cpu >> threads_shift;
1135 EXPORT_SYMBOL_GPL(cpu_core_index_of_thread);
1137 int cpu_first_thread_of_core(int core)
1139 return core << threads_shift;
1141 EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
1143 /* Must be called when no change can occur to cpu_present_mask,
1144 * i.e. during cpu online or offline.
1146 static struct device_node *cpu_to_l2cache(int cpu)
1148 struct device_node *np;
1149 struct device_node *cache;
1151 if (!cpu_present(cpu))
1154 np = of_get_cpu_node(cpu, NULL);
1158 cache = of_find_next_cache_node(np);
1165 static bool update_mask_by_l2(int cpu, struct cpumask *(*mask_fn)(int))
1167 struct device_node *l2_cache, *np;
1170 l2_cache = cpu_to_l2cache(cpu);
1174 for_each_cpu(i, cpu_online_mask) {
1176 * when updating the marks the current CPU has not been marked
1177 * online, but we need to update the cache masks
1179 np = cpu_to_l2cache(i);
1184 set_cpus_related(cpu, i, mask_fn);
1188 of_node_put(l2_cache);
1193 #ifdef CONFIG_HOTPLUG_CPU
1194 static void remove_cpu_from_masks(int cpu)
1198 /* NB: cpu_core_mask is a superset of the others */
1199 for_each_cpu(i, cpu_core_mask(cpu)) {
1200 set_cpus_unrelated(cpu, i, cpu_core_mask);
1201 set_cpus_unrelated(cpu, i, cpu_l2_cache_mask);
1202 set_cpus_unrelated(cpu, i, cpu_sibling_mask);
1204 set_cpus_unrelated(cpu, i, cpu_smallcore_mask);
1209 static inline void add_cpu_to_smallcore_masks(int cpu)
1211 struct cpumask *this_l1_cache_map = per_cpu(cpu_l1_cache_map, cpu);
1212 int i, first_thread = cpu_first_thread_sibling(cpu);
1217 cpumask_set_cpu(cpu, cpu_smallcore_mask(cpu));
1219 for (i = first_thread; i < first_thread + threads_per_core; i++) {
1220 if (cpu_online(i) && cpumask_test_cpu(i, this_l1_cache_map))
1221 set_cpus_related(i, cpu, cpu_smallcore_mask);
1225 static void add_cpu_to_masks(int cpu)
1227 int first_thread = cpu_first_thread_sibling(cpu);
1228 int chipid = cpu_to_chip_id(cpu);
1232 * This CPU will not be in the online mask yet so we need to manually
1233 * add it to it's own thread sibling mask.
1235 cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
1237 for (i = first_thread; i < first_thread + threads_per_core; i++)
1239 set_cpus_related(i, cpu, cpu_sibling_mask);
1241 add_cpu_to_smallcore_masks(cpu);
1243 * Copy the thread sibling mask into the cache sibling mask
1244 * and mark any CPUs that share an L2 with this CPU.
1246 for_each_cpu(i, cpu_sibling_mask(cpu))
1247 set_cpus_related(cpu, i, cpu_l2_cache_mask);
1248 update_mask_by_l2(cpu, cpu_l2_cache_mask);
1251 * Copy the cache sibling mask into core sibling mask and mark
1252 * any CPUs on the same chip as this CPU.
1254 for_each_cpu(i, cpu_l2_cache_mask(cpu))
1255 set_cpus_related(cpu, i, cpu_core_mask);
1260 for_each_cpu(i, cpu_online_mask)
1261 if (cpu_to_chip_id(i) == chipid)
1262 set_cpus_related(cpu, i, cpu_core_mask);
1265 static bool shared_caches;
1267 /* Activate a secondary processor. */
1268 void start_secondary(void *unused)
1270 unsigned int cpu = smp_processor_id();
1271 struct cpumask *(*sibling_mask)(int) = cpu_sibling_mask;
1274 current->active_mm = &init_mm;
1276 smp_store_cpu_info(cpu);
1277 set_dec(tb_ticks_per_jiffy);
1279 cpu_callin_map[cpu] = 1;
1281 if (smp_ops->setup_cpu)
1282 smp_ops->setup_cpu(cpu);
1283 if (smp_ops->take_timebase)
1284 smp_ops->take_timebase();
1286 secondary_cpu_time_init();
1289 if (system_state == SYSTEM_RUNNING)
1290 vdso_data->processorCount++;
1294 /* Update topology CPU masks */
1295 add_cpu_to_masks(cpu);
1298 sibling_mask = cpu_smallcore_mask;
1300 * Check for any shared caches. Note that this must be done on a
1301 * per-core basis because one core in the pair might be disabled.
1303 if (!cpumask_equal(cpu_l2_cache_mask(cpu), sibling_mask(cpu)))
1304 shared_caches = true;
1306 set_numa_node(numa_cpu_lookup_table[cpu]);
1307 set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
1310 notify_cpu_starting(cpu);
1311 set_cpu_online(cpu, true);
1313 boot_init_stack_canary();
1317 /* We can enable ftrace for secondary cpus now */
1318 this_cpu_enable_ftrace();
1320 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
1325 int setup_profiling_timer(unsigned int multiplier)
1330 #ifdef CONFIG_SCHED_SMT
1331 /* cpumask of CPUs with asymetric SMT dependancy */
1332 static int powerpc_smt_flags(void)
1334 int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
1336 if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
1337 printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
1338 flags |= SD_ASYM_PACKING;
1344 static struct sched_domain_topology_level powerpc_topology[] = {
1345 #ifdef CONFIG_SCHED_SMT
1346 { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
1348 { cpu_cpu_mask, SD_INIT_NAME(DIE) },
1353 * P9 has a slightly odd architecture where pairs of cores share an L2 cache.
1354 * This topology makes it *much* cheaper to migrate tasks between adjacent cores
1355 * since the migrated task remains cache hot. We want to take advantage of this
1356 * at the scheduler level so an extra topology level is required.
1358 static int powerpc_shared_cache_flags(void)
1360 return SD_SHARE_PKG_RESOURCES;
1364 * We can't just pass cpu_l2_cache_mask() directly because
1365 * returns a non-const pointer and the compiler barfs on that.
1367 static const struct cpumask *shared_cache_mask(int cpu)
1369 return cpu_l2_cache_mask(cpu);
1372 #ifdef CONFIG_SCHED_SMT
1373 static const struct cpumask *smallcore_smt_mask(int cpu)
1375 return cpu_smallcore_mask(cpu);
1379 static struct sched_domain_topology_level power9_topology[] = {
1380 #ifdef CONFIG_SCHED_SMT
1381 { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
1383 { shared_cache_mask, powerpc_shared_cache_flags, SD_INIT_NAME(CACHE) },
1384 { cpu_cpu_mask, SD_INIT_NAME(DIE) },
1388 void __init smp_cpus_done(unsigned int max_cpus)
1391 * We are running pinned to the boot CPU, see rest_init().
1393 if (smp_ops && smp_ops->setup_cpu)
1394 smp_ops->setup_cpu(boot_cpuid);
1396 if (smp_ops && smp_ops->bringup_done)
1397 smp_ops->bringup_done();
1400 * On a shared LPAR, associativity needs to be requested.
1401 * Hence, get numa topology before dumping cpu topology
1403 shared_proc_topology_init();
1404 dump_numa_cpu_topology();
1406 #ifdef CONFIG_SCHED_SMT
1407 if (has_big_cores) {
1408 pr_info("Using small cores at SMT level\n");
1409 power9_topology[0].mask = smallcore_smt_mask;
1410 powerpc_topology[0].mask = smallcore_smt_mask;
1414 * If any CPU detects that it's sharing a cache with another CPU then
1415 * use the deeper topology that is aware of this sharing.
1417 if (shared_caches) {
1418 pr_info("Using shared cache scheduler topology\n");
1419 set_sched_topology(power9_topology);
1421 pr_info("Using standard scheduler topology\n");
1422 set_sched_topology(powerpc_topology);
1426 #ifdef CONFIG_HOTPLUG_CPU
1427 int __cpu_disable(void)
1429 int cpu = smp_processor_id();
1432 if (!smp_ops->cpu_disable)
1435 this_cpu_disable_ftrace();
1437 err = smp_ops->cpu_disable();
1441 /* Update sibling maps */
1442 remove_cpu_from_masks(cpu);
1447 void __cpu_die(unsigned int cpu)
1449 if (smp_ops->cpu_die)
1450 smp_ops->cpu_die(cpu);
1456 * Disable on the down path. This will be re-enabled by
1457 * start_secondary() via start_secondary_resume() below
1459 this_cpu_disable_ftrace();
1464 /* If we return, we re-enter start_secondary */
1465 start_secondary_resume();