x86: boot cpus from cpu_up, instead of prepare_cpus
[sfrench/cifs-2.6.git] / arch / x86 / kernel / smpboot_32.c
index 579b9b740c7c79326c079440cda33341b3ba6472..978e13708ddbcc6cc0d457416efbbe7989b7953b 100644 (file)
 #include <asm/vmi.h>
 #include <asm/mtrr.h>
 
-/* Set if we find a B stepping CPU */
-static int __cpuinitdata smp_b_stepping;
-
-/* Number of siblings per CPU package */
-int smp_num_siblings = 1;
-EXPORT_SYMBOL(smp_num_siblings);
-
-/* Last level cache ID of each logical CPU */
-DEFINE_PER_CPU(u8, cpu_llc_id) = BAD_APICID;
-
-/* representing HT siblings of each logical CPU */
-DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
-EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
-
-/* representing HT and core siblings of each logical CPU */
-DEFINE_PER_CPU(cpumask_t, cpu_core_map);
-EXPORT_PER_CPU_SYMBOL(cpu_core_map);
-
-/* bitmap of online cpus */
-cpumask_t cpu_online_map __read_mostly;
-EXPORT_SYMBOL(cpu_online_map);
-
-cpumask_t cpu_callin_map;
-cpumask_t cpu_callout_map;
-cpumask_t cpu_possible_map;
-EXPORT_SYMBOL(cpu_possible_map);
 static cpumask_t smp_commenced_mask;
 
-/* Per CPU bogomips and other parameters */
-DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
-EXPORT_PER_CPU_SYMBOL(cpu_info);
-
 /* which logical CPU number maps to which CPU (physical APIC ID) */
-u8 x86_cpu_to_apicid_init[NR_CPUS] __initdata =
+u16 x86_cpu_to_apicid_init[NR_CPUS] __initdata =
                        { [0 ... NR_CPUS-1] = BAD_APICID };
 void *x86_cpu_to_apicid_early_ptr;
-DEFINE_PER_CPU(u8, x86_cpu_to_apicid) = BAD_APICID;
+DEFINE_PER_CPU(u16, x86_cpu_to_apicid) = BAD_APICID;
 EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid);
 
-u8 apicid_2_node[MAX_APICID];
-
-/*
- * Trampoline 80x86 program as an array.
- */
+u16 x86_bios_cpu_apicid_init[NR_CPUS] __initdata
+                               = { [0 ... NR_CPUS-1] = BAD_APICID };
+void *x86_bios_cpu_apicid_early_ptr;
+DEFINE_PER_CPU(u16, x86_bios_cpu_apicid) = BAD_APICID;
+EXPORT_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
 
-extern const unsigned char trampoline_data [];
-extern const unsigned char trampoline_end  [];
-static unsigned char *trampoline_base;
+u8 apicid_2_node[MAX_APICID];
 
 static void map_cpu_to_logical_apicid(void);
 
 /* State of each CPU. */
 DEFINE_PER_CPU(int, cpu_state) = { 0 };
 
-/*
- * Currently trivial. Write the real->protected mode
- * bootstrap into the page concerned. The caller
- * has made sure it's suitably aligned.
- */
-
-static unsigned long __cpuinit setup_trampoline(void)
-{
-       memcpy(trampoline_base, trampoline_data, trampoline_end - trampoline_data);
-       return virt_to_phys(trampoline_base);
-}
-
-/*
- * We are called very early to get the low memory for the
- * SMP bootup trampoline page.
- */
-void __init smp_alloc_memory(void)
-{
-       trampoline_base = alloc_bootmem_low_pages(PAGE_SIZE);
-       /*
-        * Has to be in very low memory so we can execute
-        * real-mode AP code.
-        */
-       if (__pa(trampoline_base) >= 0x9F000)
-               BUG();
-}
-
-/*
- * The bootstrap kernel entry code has set these up. Save them for
- * a given CPU
- */
-
-void __cpuinit smp_store_cpu_info(int id)
-{
-       struct cpuinfo_x86 *c = &cpu_data(id);
-
-       *c = boot_cpu_data;
-       c->cpu_index = id;
-       if (id!=0)
-               identify_secondary_cpu(c);
-       /*
-        * Mask B, Pentium, but not Pentium MMX
-        */
-       if (c->x86_vendor == X86_VENDOR_INTEL &&
-           c->x86 == 5 &&
-           c->x86_mask >= 1 && c->x86_mask <= 4 &&
-           c->x86_model <= 3)
-               /*
-                * Remember we have B step Pentia with bugs
-                */
-               smp_b_stepping = 1;
-
-       /*
-        * Certain Athlons might work (for various values of 'work') in SMP
-        * but they are not certified as MP capable.
-        */
-       if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) {
-
-               if (num_possible_cpus() == 1)
-                       goto valid_k7;
-
-               /* Athlon 660/661 is valid. */  
-               if ((c->x86_model==6) && ((c->x86_mask==0) || (c->x86_mask==1)))
-                       goto valid_k7;
-
-               /* Duron 670 is valid */
-               if ((c->x86_model==7) && (c->x86_mask==0))
-                       goto valid_k7;
-
-               /*
-                * Athlon 662, Duron 671, and Athlon >model 7 have capability bit.
-                * It's worth noting that the A5 stepping (662) of some Athlon XP's
-                * have the MP bit set.
-                * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for more.
-                */
-               if (((c->x86_model==6) && (c->x86_mask>=2)) ||
-                   ((c->x86_model==7) && (c->x86_mask>=1)) ||
-                    (c->x86_model> 7))
-                       if (cpu_has_mp)
-                               goto valid_k7;
-
-               /* If we get here, it's not a certified SMP capable AMD system. */
-               add_taint(TAINT_UNSAFE_SMP);
-       }
-
-valid_k7:
-       ;
-}
-
 static atomic_t init_deasserted;
 
 static void __cpuinit smp_callin(void)
@@ -247,7 +126,7 @@ static void __cpuinit smp_callin(void)
                 */
                if (cpu_isset(cpuid, cpu_callout_map))
                        break;
-               rep_nop();
+               cpu_relax();
        }
 
        if (!time_before(jiffies, timeout)) {
@@ -285,87 +164,6 @@ static void __cpuinit smp_callin(void)
        cpu_set(cpuid, cpu_callin_map);
 }
 
-static int cpucount;
-
-/* maps the cpu to the sched domain representing multi-core */
-cpumask_t cpu_coregroup_map(int cpu)
-{
-       struct cpuinfo_x86 *c = &cpu_data(cpu);
-       /*
-        * For perf, we return last level cache shared map.
-        * And for power savings, we return cpu_core_map
-        */
-       if (sched_mc_power_savings || sched_smt_power_savings)
-               return per_cpu(cpu_core_map, cpu);
-       else
-               return c->llc_shared_map;
-}
-
-/* representing cpus for which sibling maps can be computed */
-static cpumask_t cpu_sibling_setup_map;
-
-void __cpuinit set_cpu_sibling_map(int cpu)
-{
-       int i;
-       struct cpuinfo_x86 *c = &cpu_data(cpu);
-
-       cpu_set(cpu, cpu_sibling_setup_map);
-
-       if (smp_num_siblings > 1) {
-               for_each_cpu_mask(i, cpu_sibling_setup_map) {
-                       if (c->phys_proc_id == cpu_data(i).phys_proc_id &&
-                           c->cpu_core_id == cpu_data(i).cpu_core_id) {
-                               cpu_set(i, per_cpu(cpu_sibling_map, cpu));
-                               cpu_set(cpu, per_cpu(cpu_sibling_map, i));
-                               cpu_set(i, per_cpu(cpu_core_map, cpu));
-                               cpu_set(cpu, per_cpu(cpu_core_map, i));
-                               cpu_set(i, c->llc_shared_map);
-                               cpu_set(cpu, cpu_data(i).llc_shared_map);
-                       }
-               }
-       } else {
-               cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
-       }
-
-       cpu_set(cpu, c->llc_shared_map);
-
-       if (current_cpu_data.x86_max_cores == 1) {
-               per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
-               c->booted_cores = 1;
-               return;
-       }
-
-       for_each_cpu_mask(i, cpu_sibling_setup_map) {
-               if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
-                   per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
-                       cpu_set(i, c->llc_shared_map);
-                       cpu_set(cpu, cpu_data(i).llc_shared_map);
-               }
-               if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
-                       cpu_set(i, per_cpu(cpu_core_map, cpu));
-                       cpu_set(cpu, per_cpu(cpu_core_map, i));
-                       /*
-                        *  Does this new cpu bringup a new core?
-                        */
-                       if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) {
-                               /*
-                                * for each core in package, increment
-                                * the booted_cores for this new cpu
-                                */
-                               if (first_cpu(per_cpu(cpu_sibling_map, i)) == i)
-                                       c->booted_cores++;
-                               /*
-                                * increment the core count for all
-                                * the other cpus in this package
-                                */
-                               if (i != cpu)
-                                       cpu_data(i).booted_cores++;
-                       } else if (i != cpu && !c->booted_cores)
-                               c->booted_cores = cpu_data(i).booted_cores;
-               }
-       }
-}
-
 /*
  * Activate a secondary processor.
  */
@@ -383,23 +181,20 @@ static void __cpuinit start_secondary(void *unused)
        preempt_disable();
        smp_callin();
        while (!cpu_isset(smp_processor_id(), smp_commenced_mask))
-               rep_nop();
+               cpu_relax();
+
+       /* otherwise gcc will move up smp_processor_id before the cpu_init */
+       barrier();
        /*
         * Check TSC synchronization with the BP:
         */
        check_tsc_sync_target();
 
-       setup_secondary_clock();
        if (nmi_watchdog == NMI_IO_APIC) {
                disable_8259A_irq(0);
                enable_NMI_through_LVT0();
                enable_8259A_irq(0);
        }
-       /*
-        * low-memory mappings have been cleared, flush them from
-        * the local TLBs too.
-        */
-       local_flush_tlb();
 
        /* This must be done before setting cpu_online_map */
        set_cpu_sibling_map(raw_smp_processor_id());
@@ -418,8 +213,7 @@ static void __cpuinit start_secondary(void *unused)
        unlock_ipi_call_lock();
        per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
 
-       /* We can take interrupts now: we're officially "up". */
-       local_irq_enable();
+       setup_secondary_clock();
 
        wmb();
        cpu_idle();
@@ -509,22 +303,23 @@ static void unmap_cpu_to_logical_apicid(int cpu)
 
 static inline void __inquire_remote_apic(int apicid)
 {
-       int i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
+       unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
        char *names[] = { "ID", "VERSION", "SPIV" };
        int timeout;
-       unsigned long status;
+       u32 status;
 
-       printk("Inquiring remote APIC #%d...\n", apicid);
+       printk(KERN_INFO "Inquiring remote APIC #%d...\n", apicid);
 
        for (i = 0; i < ARRAY_SIZE(regs); i++) {
-               printk("... APIC #%d %s: ", apicid, names[i]);
+               printk(KERN_INFO "... APIC #%d %s: ", apicid, names[i]);
 
                /*
                 * Wait for idle.
                 */
                status = safe_apic_wait_icr_idle();
                if (status)
-                       printk("a previous APIC delivery may have failed\n");
+                       printk(KERN_CONT
+                              "a previous APIC delivery may have failed\n");
 
                apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(apicid));
                apic_write_around(APIC_ICR, APIC_DM_REMRD | regs[i]);
@@ -538,10 +333,10 @@ static inline void __inquire_remote_apic(int apicid)
                switch (status) {
                case APIC_ICR_RR_VALID:
                        status = apic_read(APIC_RRR);
-                       printk("%lx\n", status);
+                       printk(KERN_CONT "%08x\n", status);
                        break;
                default:
-                       printk("failed\n");
+                       printk(KERN_CONT "failed\n");
                }
        }
 }
@@ -637,6 +432,7 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
        Dprintk("Waiting for send to finish...\n");
        send_status = safe_apic_wait_icr_idle();
 
+       mb();
        atomic_set(&init_deasserted, 1);
 
        /*
@@ -720,16 +516,6 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
 #endif /* WAKE_SECONDARY_VIA_INIT */
 
 extern cpumask_t cpu_initialized;
-static inline int alloc_cpu_id(void)
-{
-       cpumask_t       tmp_map;
-       int cpu;
-       cpus_complement(tmp_map, cpu_present_map);
-       cpu = first_cpu(tmp_map);
-       if (cpu >= NR_CPUS)
-               return -ENODEV;
-       return cpu;
-}
 
 #ifdef CONFIG_HOTPLUG_CPU
 static struct task_struct * __cpuinitdata cpu_idle_tasks[NR_CPUS];
@@ -790,7 +576,6 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
        /* start_eip had better be page-aligned! */
        start_eip = setup_trampoline();
 
-       ++cpucount;
        alternatives_smp_switch(1);
 
        /* So we see what's up   */
@@ -800,7 +585,6 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
 
        irq_ctx_init(cpu);
 
-       per_cpu(x86_cpu_to_apicid, cpu) = apicid;
        /*
         * This grunge runs the startup process for
         * the targeted processor.
@@ -860,10 +644,8 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
                unmap_cpu_to_logical_apicid(cpu);
                cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */
                cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */
-               cpucount--;
-       } else {
-               per_cpu(x86_cpu_to_apicid, cpu) = apicid;
-               cpu_set(cpu, cpu_present_map);
+               cpu_clear(cpu, cpu_possible_map);
+               per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID;
        }
 
        /* mark "stuck" area as not stuck */
@@ -879,7 +661,6 @@ void cpu_exit_clear(void)
 
        idle_task_exit();
 
-       cpucount --;
        cpu_uninit();
        irq_ctx_exit(cpu);
 
@@ -889,6 +670,7 @@ void cpu_exit_clear(void)
        cpu_clear(cpu, smp_commenced_mask);
        unmap_cpu_to_logical_apicid(cpu);
 }
+#endif
 
 struct warm_boot_cpu_info {
        struct completion *complete;
@@ -905,17 +687,13 @@ static void __cpuinit do_warm_boot_cpu(struct work_struct *work)
        complete(info->complete);
 }
 
-static int __cpuinit __smp_prepare_cpu(int cpu)
+static void __cpuinit __smp_prepare_cpu(int cpu)
 {
        DECLARE_COMPLETION_ONSTACK(done);
        struct warm_boot_cpu_info info;
-       int     apicid, ret;
+       int     apicid;
 
        apicid = per_cpu(x86_cpu_to_apicid, cpu);
-       if (apicid == BAD_APICID) {
-               ret = -ENODEV;
-               goto exit;
-       }
 
        info.complete = &done;
        info.apicid = apicid;
@@ -926,19 +704,13 @@ static int __cpuinit __smp_prepare_cpu(int cpu)
        clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS,
                        min_t(unsigned long, KERNEL_PGD_PTRS, USER_PGD_PTRS));
        flush_tlb_all();
-       schedule_work(&info.task);
-       wait_for_completion(&done);
-
-       zap_low_mappings();
-       ret = 0;
-exit:
-       return ret;
+       if (!keventd_up() || current_is_keventd())
+               info.task.func(&info.task);
+       else {
+               schedule_work(&info.task);
+               wait_for_completion(&done);
+       }
 }
-#endif
-
-/*
- * Cycle through the processors sending APIC IPIs to boot each.
- */
 
 static int boot_cpu_logical_apicid;
 /* Where the IO area was mapped on multiquad, always 0 otherwise */
@@ -947,41 +719,30 @@ void *xquad_portio;
 EXPORT_SYMBOL(xquad_portio);
 #endif
 
-static void __init smp_boot_cpus(unsigned int max_cpus)
+static void __init disable_smp(void)
 {
-       int apicid, cpu, bit, kicked;
-       unsigned long bogosum = 0;
-
-       /*
-        * Setup boot CPU information
-        */
-       smp_store_cpu_info(0); /* Final full version of the data */
-       printk("CPU%d: ", 0);
-       print_cpu_info(&cpu_data(0));
-
-       boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
-       boot_cpu_logical_apicid = logical_smp_processor_id();
-       per_cpu(x86_cpu_to_apicid, 0) = boot_cpu_physical_apicid;
-
-       current_thread_info()->cpu = 0;
-
-       set_cpu_sibling_map(0);
+       cpu_possible_map = cpumask_of_cpu(0);
+       cpu_present_map = cpumask_of_cpu(0);
+       smpboot_clear_io_apic_irqs();
+       phys_cpu_present_map = physid_mask_of_physid(0);
+       map_cpu_to_logical_apicid();
+       cpu_set(0, per_cpu(cpu_sibling_map, 0));
+       cpu_set(0, per_cpu(cpu_core_map, 0));
+}
 
+static int __init smp_sanity_check(unsigned max_cpus)
+{
        /*
         * If we couldn't find an SMP configuration at boot time,
         * get out of here now!
         */
        if (!smp_found_config && !acpi_lapic) {
                printk(KERN_NOTICE "SMP motherboard not detected.\n");
-               smpboot_clear_io_apic_irqs();
-               phys_cpu_present_map = physid_mask_of_physid(0);
+               disable_smp();
                if (APIC_init_uniprocessor())
                        printk(KERN_NOTICE "Local APIC not detected."
                                           " Using dummy APIC emulation.\n");
-               map_cpu_to_logical_apicid();
-               cpu_set(0, per_cpu(cpu_sibling_map, 0));
-               cpu_set(0, per_cpu(cpu_core_map, 0));
-               return;
+               return -1;
        }
 
        /*
@@ -1002,12 +763,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
                printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
                        boot_cpu_physical_apicid);
                printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
-               smpboot_clear_io_apic_irqs();
-               phys_cpu_present_map = physid_mask_of_physid(0);
-               map_cpu_to_logical_apicid();
-               cpu_set(0, per_cpu(cpu_sibling_map, 0));
-               cpu_set(0, per_cpu(cpu_core_map, 0));
-               return;
+               return -1;
        }
 
        verify_local_APIC();
@@ -1024,98 +780,42 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
                        connect_bsp_APIC();
                        setup_local_APIC();
                }
-               smpboot_clear_io_apic_irqs();
-               phys_cpu_present_map = physid_mask_of_physid(0);
-               map_cpu_to_logical_apicid();
-               cpu_set(0, per_cpu(cpu_sibling_map, 0));
-               cpu_set(0, per_cpu(cpu_core_map, 0));
-               return;
+               return -1;
        }
+       return 0;
+}
 
-       connect_bsp_APIC();
-       setup_local_APIC();
-       map_cpu_to_logical_apicid();
-
-
-       setup_portio_remap();
-
+/*
+ * Cycle through the processors sending APIC IPIs to boot each.
+ */
+static void __init smp_boot_cpus(unsigned int max_cpus)
+{
        /*
-        * Scan the CPU present map and fire up the other CPUs via do_boot_cpu
-        *
-        * In clustered apic mode, phys_cpu_present_map is a constructed thus:
-        * bits 0-3 are quad0, 4-7 are quad1, etc. A perverse twist on the 
-        * clustered apic ID.
+        * Setup boot CPU information
         */
-       Dprintk("CPU present map: %lx\n", physids_coerce(phys_cpu_present_map));
+       smp_store_cpu_info(0); /* Final full version of the data */
+       printk(KERN_INFO "CPU%d: ", 0);
+       print_cpu_info(&cpu_data(0));
 
-       kicked = 1;
-       for (bit = 0; kicked < NR_CPUS && bit < MAX_APICS; bit++) {
-               apicid = cpu_present_to_apicid(bit);
-               /*
-                * Don't even attempt to start the boot CPU!
-                */
-               if ((apicid == boot_cpu_apicid) || (apicid == BAD_APICID))
-                       continue;
-
-               if (!check_apicid_present(bit))
-                       continue;
-               if (max_cpus <= cpucount+1)
-                       continue;
-
-               if (((cpu = alloc_cpu_id()) <= 0) || do_boot_cpu(apicid, cpu))
-                       printk("CPU #%d not responding - cannot use it.\n",
-                                                               apicid);
-               else
-                       ++kicked;
-       }
+       boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
+       boot_cpu_logical_apicid = logical_smp_processor_id();
 
-       /*
-        * Cleanup possible dangling ends...
-        */
-       smpboot_restore_warm_reset_vector();
+       current_thread_info()->cpu = 0;
 
-       /*
-        * Allow the user to impress friends.
-        */
-       Dprintk("Before bogomips.\n");
-       for_each_possible_cpu(cpu)
-               if (cpu_isset(cpu, cpu_callout_map))
-                       bogosum += cpu_data(cpu).loops_per_jiffy;
-       printk(KERN_INFO
-               "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
-               cpucount+1,
-               bogosum/(500000/HZ),
-               (bogosum/(5000/HZ))%100);
-       
-       Dprintk("Before bogocount - setting activated=1.\n");
-
-       if (smp_b_stepping)
-               printk(KERN_WARNING "WARNING: SMP operation may be unreliable with B stepping processors.\n");
+       set_cpu_sibling_map(0);
 
-       /*
-        * Don't taint if we are running SMP kernel on a single non-MP
-        * approved Athlon
-        */
-       if (tainted & TAINT_UNSAFE_SMP) {
-               if (cpucount)
-                       printk (KERN_INFO "WARNING: This combination of AMD processors is not suitable for SMP.\n");
-               else
-                       tainted &= ~TAINT_UNSAFE_SMP;
+       if (smp_sanity_check(max_cpus) < 0) {
+               printk(KERN_INFO "SMP disabled\n");
+               disable_smp();
+               return;
        }
 
-       Dprintk("Boot done.\n");
+       connect_bsp_APIC();
+       setup_local_APIC();
+       map_cpu_to_logical_apicid();
 
-       /*
-        * construct cpu_sibling_map, so that we can tell sibling CPUs
-        * efficiently.
-        */
-       for_each_possible_cpu(cpu) {
-               cpus_clear(per_cpu(cpu_sibling_map, cpu));
-               cpus_clear(per_cpu(cpu_core_map, cpu));
-       }
 
-       cpu_set(0, per_cpu(cpu_sibling_map, 0));
-       cpu_set(0, per_cpu(cpu_core_map, 0));
+       setup_portio_remap();
 
        smpboot_setup_io_apic();
 
@@ -1126,6 +826,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
    who understands all this stuff should rewrite it properly. --RR 15/Jul/02 */
 void __init native_smp_prepare_cpus(unsigned int max_cpus)
 {
+       nmi_watchdog_default();
        smp_commenced_mask = cpumask_of_cpu(0);
        cpu_callin_map = cpumask_of_cpu(0);
        mb();
@@ -1139,117 +840,28 @@ void __init native_smp_prepare_boot_cpu(void)
        init_gdt(cpu);
        switch_to_new_gdt();
 
-       cpu_set(cpu, cpu_online_map);
        cpu_set(cpu, cpu_callout_map);
-       cpu_set(cpu, cpu_present_map);
-       cpu_set(cpu, cpu_possible_map);
        __get_cpu_var(cpu_state) = CPU_ONLINE;
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
-void remove_siblinginfo(int cpu)
+int __cpuinit native_cpu_up(unsigned int cpu)
 {
-       int sibling;
-       struct cpuinfo_x86 *c = &cpu_data(cpu);
-
-       for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
-               cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
-               /*/
-                * last thread sibling in this cpu core going down
-                */
-               if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
-                       cpu_data(sibling).booted_cores--;
-       }
-                       
-       for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
-               cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
-       cpus_clear(per_cpu(cpu_sibling_map, cpu));
-       cpus_clear(per_cpu(cpu_core_map, cpu));
-       c->phys_proc_id = 0;
-       c->cpu_core_id = 0;
-       cpu_clear(cpu, cpu_sibling_setup_map);
-}
+       int apicid = cpu_present_to_apicid(cpu);
+       unsigned long flags;
 
-int __cpu_disable(void)
-{
-       cpumask_t map = cpu_online_map;
-       int cpu = smp_processor_id();
+       WARN_ON(irqs_disabled());
 
-       /*
-        * Perhaps use cpufreq to drop frequency, but that could go
-        * into generic code.
-        *
-        * We won't take down the boot processor on i386 due to some
-        * interrupts only being able to be serviced by the BSP.
-        * Especially so if we're not using an IOAPIC   -zwane
-        */
-       if (cpu == 0)
-               return -EBUSY;
-       if (nmi_watchdog == NMI_LOCAL_APIC)
-               stop_apic_nmi_watchdog(NULL);
-       clear_local_APIC();
-       /* Allow any queued timer interrupts to get serviced */
-       local_irq_enable();
-       mdelay(1);
-       local_irq_disable();
-
-       remove_siblinginfo(cpu);
-
-       cpu_clear(cpu, map);
-       fixup_irqs(map);
-       /* It's now safe to remove this processor from the online map */
-       cpu_clear(cpu, cpu_online_map);
-       return 0;
-}
+       Dprintk("++++++++++++++++++++=_---CPU UP  %u\n", cpu);
 
-void __cpu_die(unsigned int cpu)
-{
-       /* We don't do anything here: idle task is faking death itself. */
-       unsigned int i;
-
-       for (i = 0; i < 10; i++) {
-               /* They ack this in play_dead by setting CPU_DEAD */
-               if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
-                       printk ("CPU %d is now offline\n", cpu);
-                       if (1 == num_online_cpus())
-                               alternatives_smp_switch(0);
-                       return;
-               }
-               msleep(100);
+       if (apicid == BAD_APICID || apicid == boot_cpu_physical_apicid ||
+           !physid_isset(apicid, phys_cpu_present_map)) {
+               printk(KERN_ERR "%s: bad cpu %d\n", __func__, cpu);
+               return -EINVAL;
        }
-       printk(KERN_ERR "CPU %u didn't die...\n", cpu);
-}
-#else /* ... !CONFIG_HOTPLUG_CPU */
-int __cpu_disable(void)
-{
-       return -ENOSYS;
-}
 
-void __cpu_die(unsigned int cpu)
-{
-       /* We said "no" in __cpu_disable */
-       BUG();
-}
-#endif /* CONFIG_HOTPLUG_CPU */
-
-int __cpuinit native_cpu_up(unsigned int cpu)
-{
-       unsigned long flags;
-#ifdef CONFIG_HOTPLUG_CPU
-       int ret = 0;
-
-       /*
-        * We do warm boot only on cpus that had booted earlier
-        * Otherwise cold boot is all handled from smp_boot_cpus().
-        * cpu_callin_map is set during AP kickstart process. Its reset
-        * when a cpu is taken offline from cpu_exit_clear().
-        */
-       if (!cpu_isset(cpu, cpu_callin_map))
-               ret = __smp_prepare_cpu(cpu);
+       per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
 
-       if (ret)
-               return -EIO;
-#endif
+       __smp_prepare_cpu(cpu);
 
        /* In case one didn't come up */
        if (!cpu_isset(cpu, cpu_callin_map)) {
@@ -1257,7 +869,6 @@ int __cpuinit native_cpu_up(unsigned int cpu)
                return -EIO;
        }
 
-       per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
        /* Unleash the CPU! */
        cpu_set(cpu, smp_commenced_mask);
 
@@ -1277,44 +888,23 @@ int __cpuinit native_cpu_up(unsigned int cpu)
        return 0;
 }
 
-void __init native_smp_cpus_done(unsigned int max_cpus)
-{
-#ifdef CONFIG_X86_IO_APIC
-       setup_ioapic_dest();
-#endif
-       zap_low_mappings();
-}
+extern void impress_friends(void);
+extern void smp_checks(void);
 
-void __init smp_intr_init(void)
+void __init native_smp_cpus_done(unsigned int max_cpus)
 {
        /*
-        * IRQ0 must be given a fixed assignment and initialized,
-        * because it's used before the IO-APIC is set up.
-        */
-       set_intr_gate(FIRST_DEVICE_VECTOR, interrupt[0]);
-
-       /*
-        * The reschedule interrupt is a CPU-to-CPU reschedule-helper
-        * IPI, driven by wakeup.
+        * Cleanup possible dangling ends...
         */
-       set_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
-
-       /* IPI for invalidation */
-       set_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt);
-
-       /* IPI for generic function call */
-       set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
-}
+       smpboot_restore_warm_reset_vector();
 
-/*
- * If the BIOS enumerates physical processors before logical,
- * maxcpus=N at enumeration-time can be used to disable HT.
- */
-static int __init parse_maxcpus(char *arg)
-{
-       extern unsigned int maxcpus;
+       Dprintk("Boot done.\n");
 
-       maxcpus = simple_strtoul(arg, NULL, 0);
-       return 0;
+       impress_friends();
+       smp_checks();
+#ifdef CONFIG_X86_IO_APIC
+       setup_ioapic_dest();
+#endif
+       check_nmi_watchdog();
+       zap_low_mappings();
 }
-early_param("maxcpus", parse_maxcpus);