2 * x86 SMP booting functions
4 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
5 * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
6 * Copyright 2001 Andi Kleen, SuSE Labs.
8 * Much of the core SMP work is based on previous work by Thomas Radke, to
9 * whom a great many thanks are extended.
11 * Thanks to Intel for making available several different Pentium,
12 * Pentium Pro and Pentium-II/Xeon MP machines.
13 * Original development of Linux SMP code supported by Caldera.
15 * This code is released under the GNU General Public License version 2
18 * Felix Koop : NR_CPUS used properly
19 * Jose Renau : Handle single CPU case.
20 * Alan Cox : By repeated request 8) - Total BogoMIP report.
21 * Greg Wright : Fix for kernel stacks panic.
22 * Erich Boleyn : MP v1.4 and additional changes.
23 * Matthias Sattler : Changes for 2.1 kernel map.
24 * Michel Lespinasse : Changes for 2.1 kernel map.
25 * Michael Chastain : Change trampoline.S to gnu as.
26 * Alan Cox : Dumb bug: 'B' step PPro's are fine
27 * Ingo Molnar : Added APIC timers, based on code
29 * Ingo Molnar : various cleanups and rewrites
30 * Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug.
31 * Maciej W. Rozycki : Bits for genuine 82489DX APICs
32 * Andi Kleen : Changed for SMP boot into long mode.
33 * Rusty Russell : Hacked into shape for new "hotplug" boot process.
34 * Andi Kleen : Converted to new state machine.
36 * Probably mostly hotplug CPU ready now.
37 * Ashok Raj : CPU hotplug support
41 #include <linux/init.h>
44 #include <linux/kernel_stat.h>
45 #include <linux/bootmem.h>
46 #include <linux/thread_info.h>
47 #include <linux/module.h>
48 #include <linux/delay.h>
49 #include <linux/mc146818rtc.h>
50 #include <linux/smp.h>
51 #include <linux/kdebug.h>
54 #include <asm/pgalloc.h>
56 #include <asm/tlbflush.h>
57 #include <asm/proto.h>
60 #include <asm/hw_irq.h>
63 /* Set when the idlers are all forked */
64 int smp_threads_ready;
67 * Trampoline 80x86 program as an array.
70 extern const unsigned char trampoline_data[];
71 extern const unsigned char trampoline_end[];
73 /* State of each CPU */
74 DEFINE_PER_CPU(int, cpu_state) = { 0 };
77 * Store all idle threads, this can be reused instead of creating
78 * a new thread. Also avoids complicated thread destroy functionality
81 #ifdef CONFIG_HOTPLUG_CPU
83 * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is
84 * removed after init for !CONFIG_HOTPLUG_CPU.
86 static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
87 #define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x))
88 #define set_idle_for_cpu(x,p) (per_cpu(idle_thread_array, x) = (p))
90 struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
91 #define get_idle_for_cpu(x) (idle_thread_array[(x)])
92 #define set_idle_for_cpu(x,p) (idle_thread_array[(x)] = (p))
97 * Currently trivial. Write the real->protected mode
98 * bootstrap into the page concerned. The caller
99 * has made sure it's suitably aligned.
102 static unsigned long __cpuinit setup_trampoline(void)
104 void *tramp = __va(SMP_TRAMPOLINE_BASE);
105 memcpy(tramp, trampoline_data, trampoline_end - trampoline_data);
106 return virt_to_phys(tramp);
110 * The bootstrap kernel entry code has set these up. Save them for
114 static void __cpuinit smp_store_cpu_info(int id)
116 struct cpuinfo_x86 *c = &cpu_data(id);
124 static atomic_t init_deasserted __cpuinitdata;
127 * Report back to the Boot Processor.
130 void __cpuinit smp_callin(void)
133 unsigned long timeout;
136 * If waken up by an INIT in an 82489DX configuration
137 * we may get here before an INIT-deassert IPI reaches
138 * our local APIC. We have to wait for the IPI or we'll
139 * lock up on an APIC access.
141 while (!atomic_read(&init_deasserted))
145 * (This works even if the APIC is not enabled.)
147 phys_id = GET_APIC_ID(apic_read(APIC_ID));
148 cpuid = smp_processor_id();
149 if (cpu_isset(cpuid, cpu_callin_map)) {
150 panic("smp_callin: phys CPU#%d, CPU#%d already present??\n",
153 Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);
156 * STARTUP IPIs are fragile beasts as they might sometimes
157 * trigger some glue motherboard logic. Complete APIC bus
158 * silence for 1 second, this overestimates the time the
159 * boot CPU is spending to send the up to 2 STARTUP IPIs
160 * by a factor of two. This should be enough.
164 * Waiting 2s total for startup (udelay is not yet working)
166 timeout = jiffies + 2*HZ;
167 while (time_before(jiffies, timeout)) {
169 * Has the boot CPU finished it's STARTUP sequence?
171 if (cpu_isset(cpuid, cpu_callout_map))
176 if (!time_before(jiffies, timeout)) {
177 panic("smp_callin: CPU%d started up but did not get a callout!\n",
182 * the boot CPU has finished the init stage and is spinning
183 * on callin_map until we finish. We are free to set up this
184 * CPU, first the APIC. (this is probably redundant on most
188 Dprintk("CALLIN, before setup_local_APIC().\n");
190 end_local_APIC_setup();
195 * Need to enable IRQs because it can take longer and then
196 * the NMI watchdog might kill us.
201 Dprintk("Stack at about %p\n",&cpuid);
204 * Save our processor parameters
206 smp_store_cpu_info(cpuid);
209 * Allow the master to continue.
211 cpu_set(cpuid, cpu_callin_map);
214 /* maps the cpu to the sched domain representing multi-core */
215 cpumask_t cpu_coregroup_map(int cpu)
217 struct cpuinfo_x86 *c = &cpu_data(cpu);
219 * For perf, we return last level cache shared map.
220 * And for power savings, we return cpu_core_map
222 if (sched_mc_power_savings || sched_smt_power_savings)
223 return per_cpu(cpu_core_map, cpu);
225 return c->llc_shared_map;
228 /* representing cpus for which sibling maps can be computed */
229 static cpumask_t cpu_sibling_setup_map;
231 void __cpuinit set_cpu_sibling_map(int cpu)
234 struct cpuinfo_x86 *c = &cpu_data(cpu);
236 cpu_set(cpu, cpu_sibling_setup_map);
238 if (smp_num_siblings > 1) {
239 for_each_cpu_mask(i, cpu_sibling_setup_map) {
240 if (c->phys_proc_id == cpu_data(i).phys_proc_id &&
241 c->cpu_core_id == cpu_data(i).cpu_core_id) {
242 cpu_set(i, per_cpu(cpu_sibling_map, cpu));
243 cpu_set(cpu, per_cpu(cpu_sibling_map, i));
244 cpu_set(i, per_cpu(cpu_core_map, cpu));
245 cpu_set(cpu, per_cpu(cpu_core_map, i));
246 cpu_set(i, c->llc_shared_map);
247 cpu_set(cpu, cpu_data(i).llc_shared_map);
251 cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
254 cpu_set(cpu, c->llc_shared_map);
256 if (current_cpu_data.x86_max_cores == 1) {
257 per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
262 for_each_cpu_mask(i, cpu_sibling_setup_map) {
263 if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
264 per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
265 cpu_set(i, c->llc_shared_map);
266 cpu_set(cpu, cpu_data(i).llc_shared_map);
268 if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
269 cpu_set(i, per_cpu(cpu_core_map, cpu));
270 cpu_set(cpu, per_cpu(cpu_core_map, i));
272 * Does this new cpu bringup a new core?
274 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) {
276 * for each core in package, increment
277 * the booted_cores for this new cpu
279 if (first_cpu(per_cpu(cpu_sibling_map, i)) == i)
282 * increment the core count for all
283 * the other cpus in this package
286 cpu_data(i).booted_cores++;
287 } else if (i != cpu && !c->booted_cores)
288 c->booted_cores = cpu_data(i).booted_cores;
294 * Setup code on secondary processor (after comming out of the trampoline)
296 void __cpuinit start_secondary(void)
299 * Dont put anything before smp_callin(), SMP
300 * booting is too fragile that we want to limit the
301 * things done here to the most necessary things.
307 /* otherwise gcc will move up the smp_processor_id before the cpu_init */
311 * Check TSC sync first:
313 check_tsc_sync_target();
315 if (nmi_watchdog == NMI_IO_APIC) {
316 disable_8259A_irq(0);
317 enable_NMI_through_LVT0();
322 * The sibling maps must be set before turing the online map on for
325 set_cpu_sibling_map(smp_processor_id());
328 * We need to hold call_lock, so there is no inconsistency
329 * between the time smp_call_function() determines number of
330 * IPI recipients, and the time when the determination is made
331 * for which cpus receive the IPI in genapic_flat.c. Holding this
332 * lock helps us to not include this cpu in a currently in progress
333 * smp_call_function().
335 lock_ipi_call_lock();
336 spin_lock(&vector_lock);
338 /* Setup the per cpu irq handling data structures */
339 __setup_vector_irq(smp_processor_id());
341 * Allow the master to continue.
343 cpu_set(smp_processor_id(), cpu_online_map);
344 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
345 spin_unlock(&vector_lock);
347 unlock_ipi_call_lock();
349 setup_secondary_clock();
354 extern volatile unsigned long init_rsp;
355 extern void (*initial_code)(void);
358 static void inquire_remote_apic(int apicid)
360 unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
361 char *names[] = { "ID", "VERSION", "SPIV" };
365 printk(KERN_INFO "Inquiring remote APIC #%d...\n", apicid);
367 for (i = 0; i < ARRAY_SIZE(regs); i++) {
368 printk(KERN_INFO "... APIC #%d %s: ", apicid, names[i]);
373 status = safe_apic_wait_icr_idle();
376 "a previous APIC delivery may have failed\n");
378 apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(apicid));
379 apic_write(APIC_ICR, APIC_DM_REMRD | regs[i]);
384 status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK;
385 } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000);
388 case APIC_ICR_RR_VALID:
389 status = apic_read(APIC_RRR);
390 printk(KERN_CONT "%08x\n", status);
393 printk(KERN_CONT "failed\n");
400 * Kick the secondary to wake up.
402 static int __cpuinit wakeup_secondary_via_INIT(int phys_apicid, unsigned int start_rip)
404 unsigned long send_status, accept_status = 0;
405 int maxlvt, num_starts, j;
407 Dprintk("Asserting INIT.\n");
410 * Turn INIT on target chip
412 apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
417 apic_write(APIC_ICR, APIC_INT_LEVELTRIG | APIC_INT_ASSERT
420 Dprintk("Waiting for send to finish...\n");
421 send_status = safe_apic_wait_icr_idle();
425 Dprintk("Deasserting INIT.\n");
428 apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
431 apic_write(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT);
433 Dprintk("Waiting for send to finish...\n");
434 send_status = safe_apic_wait_icr_idle();
437 atomic_set(&init_deasserted, 1);
442 * Run STARTUP IPI loop.
444 Dprintk("#startup loops: %d.\n", num_starts);
446 maxlvt = lapic_get_maxlvt();
448 for (j = 1; j <= num_starts; j++) {
449 Dprintk("Sending STARTUP #%d.\n",j);
450 apic_write(APIC_ESR, 0);
452 Dprintk("After apic_write.\n");
459 apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
461 /* Boot on the stack */
462 /* Kick the second */
463 apic_write(APIC_ICR, APIC_DM_STARTUP | (start_rip >> 12));
466 * Give the other CPU some time to accept the IPI.
470 Dprintk("Startup point 1.\n");
472 Dprintk("Waiting for send to finish...\n");
473 send_status = safe_apic_wait_icr_idle();
476 * Give the other CPU some time to accept the IPI.
480 * Due to the Pentium erratum 3AP.
483 apic_write(APIC_ESR, 0);
485 accept_status = (apic_read(APIC_ESR) & 0xEF);
486 if (send_status || accept_status)
489 Dprintk("After Startup.\n");
492 printk(KERN_ERR "APIC never delivered???\n");
494 printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status);
496 return (send_status | accept_status);
500 struct work_struct work;
501 struct task_struct *idle;
502 struct completion done;
506 static void __cpuinit do_fork_idle(struct work_struct *work)
508 struct create_idle *c_idle =
509 container_of(work, struct create_idle, work);
511 c_idle->idle = fork_idle(c_idle->cpu);
512 complete(&c_idle->done);
518 static int __cpuinit do_boot_cpu(int cpu, int apicid)
520 unsigned long boot_error;
522 unsigned long start_rip;
523 struct create_idle c_idle = {
525 .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
527 INIT_WORK(&c_idle.work, do_fork_idle);
529 /* allocate memory for gdts of secondary cpus. Hotplug is considered */
530 if (!cpu_gdt_descr[cpu].address &&
531 !(cpu_gdt_descr[cpu].address = get_zeroed_page(GFP_KERNEL))) {
532 printk(KERN_ERR "Failed to allocate GDT for CPU %d\n", cpu);
536 /* Allocate node local memory for AP pdas */
537 if (cpu_pda(cpu) == &boot_cpu_pda[cpu]) {
538 struct x8664_pda *newpda, *pda;
539 int node = cpu_to_node(cpu);
541 newpda = kmalloc_node(sizeof (struct x8664_pda), GFP_ATOMIC,
544 memcpy(newpda, pda, sizeof (struct x8664_pda));
545 cpu_pda(cpu) = newpda;
548 "Could not allocate node local PDA for CPU %d on node %d\n",
552 alternatives_smp_switch(1);
554 c_idle.idle = get_idle_for_cpu(cpu);
557 c_idle.idle->thread.sp = (unsigned long) (((struct pt_regs *)
558 (THREAD_SIZE + task_stack_page(c_idle.idle))) - 1);
559 init_idle(c_idle.idle, cpu);
564 * During cold boot process, keventd thread is not spun up yet.
565 * When we do cpu hot-add, we create idle threads on the fly, we should
566 * not acquire any attributes from the calling context. Hence the clean
567 * way to create kernel_threads() is to do that from keventd().
568 * We do the current_is_keventd() due to the fact that ACPI notifier
569 * was also queuing to keventd() and when the caller is already running
570 * in context of keventd(), we would end up with locking up the keventd
573 if (!keventd_up() || current_is_keventd())
574 c_idle.work.func(&c_idle.work);
576 schedule_work(&c_idle.work);
577 wait_for_completion(&c_idle.done);
580 if (IS_ERR(c_idle.idle)) {
581 printk("failed fork for CPU %d\n", cpu);
582 return PTR_ERR(c_idle.idle);
585 set_idle_for_cpu(cpu, c_idle.idle);
589 cpu_pda(cpu)->pcurrent = c_idle.idle;
591 start_rip = setup_trampoline();
593 init_rsp = c_idle.idle->thread.sp;
594 load_sp0(&per_cpu(init_tss, cpu), &c_idle.idle->thread);
595 initial_code = start_secondary;
596 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
598 printk(KERN_INFO "Booting processor %d/%d APIC 0x%x\n", cpu,
599 cpus_weight(cpu_present_map),
603 * This grunge runs the startup process for
604 * the targeted processor.
607 atomic_set(&init_deasserted, 0);
609 Dprintk("Setting warm reset code and vector.\n");
611 CMOS_WRITE(0xa, 0xf);
614 *((volatile unsigned short *) phys_to_virt(0x469)) = start_rip >> 4;
616 *((volatile unsigned short *) phys_to_virt(0x467)) = start_rip & 0xf;
620 * Be paranoid about clearing APIC errors.
622 apic_write(APIC_ESR, 0);
626 * Status is now clean
631 * Starting actual IPI sequence...
633 boot_error = wakeup_secondary_via_INIT(apicid, start_rip);
637 * allow APs to start initializing.
639 Dprintk("Before Callout %d.\n", cpu);
640 cpu_set(cpu, cpu_callout_map);
641 Dprintk("After Callout %d.\n", cpu);
644 * Wait 5s total for a response
646 for (timeout = 0; timeout < 50000; timeout++) {
647 if (cpu_isset(cpu, cpu_callin_map))
648 break; /* It has booted */
652 if (cpu_isset(cpu, cpu_callin_map)) {
653 /* number CPUs logically, starting from 1 (BSP is 0) */
654 Dprintk("CPU has booted.\n");
657 if (*((volatile unsigned char *)phys_to_virt(SMP_TRAMPOLINE_BASE))
659 /* trampoline started but...? */
660 printk("Stuck ??\n");
662 /* trampoline code not run */
663 printk("Not responding.\n");
665 inquire_remote_apic(apicid);
670 cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */
671 clear_bit(cpu, (unsigned long *)&cpu_initialized); /* was set by cpu_init() */
672 clear_node_cpumask(cpu); /* was set by numa_add_cpu */
673 cpu_clear(cpu, cpu_present_map);
674 cpu_clear(cpu, cpu_possible_map);
675 per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID;
682 cycles_t cacheflush_time;
683 unsigned long cache_decay_ticks;
686 * Cleanup possible dangling ends...
688 static __cpuinit void smp_cleanup_boot(void)
691 * Paranoid: Set warm reset code and vector here back
697 * Reset trampoline flag
699 *((volatile int *) phys_to_virt(0x467)) = 0;
703 * Fall back to non SMP mode after errors.
705 * RED-PEN audit/test this more. I bet there is more state messed up here.
707 static __init void disable_smp(void)
709 cpu_present_map = cpumask_of_cpu(0);
710 cpu_possible_map = cpumask_of_cpu(0);
711 if (smp_found_config)
712 phys_cpu_present_map = physid_mask_of_physid(boot_cpu_id);
714 phys_cpu_present_map = physid_mask_of_physid(0);
715 cpu_set(0, per_cpu(cpu_sibling_map, 0));
716 cpu_set(0, per_cpu(cpu_core_map, 0));
720 * Various sanity checks.
722 static int __init smp_sanity_check(unsigned max_cpus)
724 if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) {
725 printk("weird, boot CPU (#%d) not listed by the BIOS.\n",
726 hard_smp_processor_id());
727 physid_set(hard_smp_processor_id(), phys_cpu_present_map);
731 * If we couldn't find an SMP configuration at boot time,
732 * get out of here now!
734 if (!smp_found_config) {
735 printk(KERN_NOTICE "SMP motherboard not detected.\n");
737 if (APIC_init_uniprocessor())
738 printk(KERN_NOTICE "Local APIC not detected."
739 " Using dummy APIC emulation.\n");
744 * Should not be necessary because the MP table should list the boot
745 * CPU too, but we do it for the sake of robustness anyway.
747 if (!physid_isset(boot_cpu_id, phys_cpu_present_map)) {
748 printk(KERN_NOTICE "weird, boot CPU (#%d) not listed by the BIOS.\n",
750 physid_set(hard_smp_processor_id(), phys_cpu_present_map);
754 * If we couldn't find a local APIC, then get out of here now!
757 printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
759 printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
765 * If SMP should be disabled, then really disable it!
768 printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n");
776 static void __init smp_cpu_index_default(void)
779 struct cpuinfo_x86 *c;
781 for_each_cpu_mask(i, cpu_possible_map) {
783 /* mark all to hotplug */
784 c->cpu_index = NR_CPUS;
789 * Prepare for SMP bootup. The MP table or ACPI has been read
790 * earlier. Just do some sanity checking here and enable APIC mode.
792 void __init native_smp_prepare_cpus(unsigned int max_cpus)
794 nmi_watchdog_default();
795 smp_cpu_index_default();
796 current_cpu_data = boot_cpu_data;
797 current_thread_info()->cpu = 0; /* needed? */
798 set_cpu_sibling_map(0);
800 if (smp_sanity_check(max_cpus) < 0) {
801 printk(KERN_INFO "SMP disabled\n");
808 * Switch from PIC to APIC mode.
813 * Enable IO APIC before setting up error vector
815 if (!skip_ioapic_setup && nr_ioapics)
817 end_local_APIC_setup();
819 if (GET_APIC_ID(apic_read(APIC_ID)) != boot_cpu_id) {
820 panic("Boot APIC ID in local APIC unexpected (%d vs %d)",
821 GET_APIC_ID(apic_read(APIC_ID)), boot_cpu_id);
822 /* Or can we switch back to PIC here? */
826 * Now start the IO-APICs
828 if (!skip_ioapic_setup && nr_ioapics)
834 * Set up local APIC timer on boot CPU.
841 * Early setup to make printk work.
843 void __init native_smp_prepare_boot_cpu(void)
845 int me = smp_processor_id();
846 /* already set me in cpu_online_map in boot_cpu_init() */
847 cpu_set(me, cpu_callout_map);
848 per_cpu(cpu_state, me) = CPU_ONLINE;
852 * Entry point to boot a CPU.
854 int __cpuinit native_cpu_up(unsigned int cpu)
856 int apicid = cpu_present_to_apicid(cpu);
860 WARN_ON(irqs_disabled());
862 Dprintk("++++++++++++++++++++=_---CPU UP %u\n", cpu);
864 if (apicid == BAD_APICID || apicid == boot_cpu_id ||
865 !physid_isset(apicid, phys_cpu_present_map)) {
866 printk("__cpu_up: bad cpu %d\n", cpu);
871 * Already booted CPU?
873 if (cpu_isset(cpu, cpu_callin_map)) {
874 Dprintk("do_boot_cpu %d Already started\n", cpu);
879 * Save current MTRR state in case it was changed since early boot
880 * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync:
884 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
886 err = do_boot_cpu(cpu, apicid);
888 Dprintk("do_boot_cpu failed %d\n", err);
892 /* Unleash the CPU! */
893 Dprintk("waiting for cpu %d\n", cpu);
896 * Make sure and check TSC sync:
898 local_irq_save(flags);
899 check_tsc_sync_source(cpu);
900 local_irq_restore(flags);
902 while (!cpu_isset(cpu, cpu_online_map))
910 * Finish the SMP boot.
912 void __init native_smp_cpus_done(unsigned int max_cpus)
916 check_nmi_watchdog();
919 #ifdef CONFIG_HOTPLUG_CPU
921 void remove_siblinginfo(int cpu)
924 struct cpuinfo_x86 *c = &cpu_data(cpu);
926 for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
927 cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
929 * last thread sibling in this cpu core going down
931 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
932 cpu_data(sibling).booted_cores--;
935 for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
936 cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
937 cpus_clear(per_cpu(cpu_sibling_map, cpu));
938 cpus_clear(per_cpu(cpu_core_map, cpu));
941 cpu_clear(cpu, cpu_sibling_setup_map);
944 static void __ref remove_cpu_from_maps(void)
946 int cpu = smp_processor_id();
948 cpu_clear(cpu, cpu_callout_map);
949 cpu_clear(cpu, cpu_callin_map);
950 clear_bit(cpu, (unsigned long *)&cpu_initialized); /* was set by cpu_init() */
951 clear_node_cpumask(cpu);
954 int __cpu_disable(void)
956 int cpu = smp_processor_id();
959 * Perhaps use cpufreq to drop frequency, but that could go
962 * We won't take down the boot processor on i386 due to some
963 * interrupts only being able to be serviced by the BSP.
964 * Especially so if we're not using an IOAPIC -zwane
969 if (nmi_watchdog == NMI_LOCAL_APIC)
970 stop_apic_nmi_watchdog(NULL);
975 * Allow any queued timer interrupts to get serviced
976 * This is only a temporary solution until we cleanup
977 * fixup_irqs as we do for IA64.
983 remove_siblinginfo(cpu);
985 spin_lock(&vector_lock);
986 /* It's now safe to remove this processor from the online map */
987 cpu_clear(cpu, cpu_online_map);
988 spin_unlock(&vector_lock);
989 remove_cpu_from_maps();
990 fixup_irqs(cpu_online_map);
994 void __cpu_die(unsigned int cpu)
996 /* We don't do anything here: idle task is faking death itself. */
999 for (i = 0; i < 10; i++) {
1000 /* They ack this in play_dead by setting CPU_DEAD */
1001 if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
1002 printk ("CPU %d is now offline\n", cpu);
1003 if (1 == num_online_cpus())
1004 alternatives_smp_switch(0);
1009 printk(KERN_ERR "CPU %u didn't die...\n", cpu);
1012 #else /* ... !CONFIG_HOTPLUG_CPU */
1014 int __cpu_disable(void)
1019 void __cpu_die(unsigned int cpu)
1021 /* We said "no" in __cpu_disable */
1024 #endif /* CONFIG_HOTPLUG_CPU */