x86: decouple call to print_cpu_info from smp_store_cpu_info
[sfrench/cifs-2.6.git] / arch / x86 / kernel / smpboot_64.c
index fd0d3a93b995f8f1f228fd580911c832ee0c28f7..1da28c6c1f5fbb15417af7d4930f383322323c40 100644 (file)
 #include <asm/hw_irq.h>
 #include <asm/numa.h>
 
-/* Number of siblings per CPU package */
-int smp_num_siblings = 1;
-EXPORT_SYMBOL(smp_num_siblings);
-
-/* Last level cache ID of each logical CPU */
-DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID;
-
-/* Bitmask of currently online CPUs */
-cpumask_t cpu_online_map __read_mostly;
-
-EXPORT_SYMBOL(cpu_online_map);
-
-/*
- * Private maps to synchronize booting between AP and BP.
- * Probably not needed anymore, but it makes for easier debugging. -AK
- */
-cpumask_t cpu_callin_map;
-cpumask_t cpu_callout_map;
-cpumask_t cpu_possible_map;
-EXPORT_SYMBOL(cpu_possible_map);
-
-/* Per CPU bogomips and other parameters */
-DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
-EXPORT_PER_CPU_SYMBOL(cpu_info);
-
 /* Set when the idlers are all forked */
 int smp_threads_ready;
 
-/* representing HT siblings of each logical CPU */
-DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
-EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
-
-/* representing HT and core siblings of each logical CPU */
-DEFINE_PER_CPU(cpumask_t, cpu_core_map);
-EXPORT_PER_CPU_SYMBOL(cpu_core_map);
-
-/*
- * Trampoline 80x86 program as an array.
- */
-
-extern const unsigned char trampoline_data[];
-extern const unsigned char trampoline_end[];
-
 /* State of each CPU */
 DEFINE_PER_CPU(int, cpu_state) = { 0 };
 
@@ -125,20 +85,6 @@ struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
 #define set_idle_for_cpu(x,p)   (idle_thread_array[(x)] = (p))
 #endif
 
-
-/*
- * Currently trivial. Write the real->protected mode
- * bootstrap into the page concerned. The caller
- * has made sure it's suitably aligned.
- */
-
-static unsigned long __cpuinit setup_trampoline(void)
-{
-       void *tramp = __va(SMP_TRAMPOLINE_BASE); 
-       memcpy(tramp, trampoline_data, trampoline_end - trampoline_data);
-       return virt_to_phys(tramp);
-}
-
 /*
  * The bootstrap kernel entry code has set these up. Save them for
  * a given CPU
@@ -151,7 +97,13 @@ static void __cpuinit smp_store_cpu_info(int id)
        *c = boot_cpu_data;
        c->cpu_index = id;
        identify_cpu(c);
-       print_cpu_info(c);
+}
+
+static inline void wait_for_init_deassert(atomic_t *deassert)
+{
+       while (!atomic_read(deassert))
+               cpu_relax();
+       return;
 }
 
 static atomic_t init_deasserted __cpuinitdata;
@@ -171,8 +123,7 @@ void __cpuinit smp_callin(void)
         * our local APIC.  We have to wait for the IPI or we'll
         * lock up on an APIC access.
         */
-       while (!atomic_read(&init_deasserted))
-               cpu_relax();
+       wait_for_init_deassert(&init_deasserted);
 
        /*
         * (This works even if the APIC is not enabled.)
@@ -244,85 +195,6 @@ void __cpuinit smp_callin(void)
        cpu_set(cpuid, cpu_callin_map);
 }
 
-/* maps the cpu to the sched domain representing multi-core */
-cpumask_t cpu_coregroup_map(int cpu)
-{
-       struct cpuinfo_x86 *c = &cpu_data(cpu);
-       /*
-        * For perf, we return last level cache shared map.
-        * And for power savings, we return cpu_core_map
-        */
-       if (sched_mc_power_savings || sched_smt_power_savings)
-               return per_cpu(cpu_core_map, cpu);
-       else
-               return c->llc_shared_map;
-}
-
-/* representing cpus for which sibling maps can be computed */
-static cpumask_t cpu_sibling_setup_map;
-
-static inline void set_cpu_sibling_map(int cpu)
-{
-       int i;
-       struct cpuinfo_x86 *c = &cpu_data(cpu);
-
-       cpu_set(cpu, cpu_sibling_setup_map);
-
-       if (smp_num_siblings > 1) {
-               for_each_cpu_mask(i, cpu_sibling_setup_map) {
-                       if (c->phys_proc_id == cpu_data(i).phys_proc_id &&
-                           c->cpu_core_id == cpu_data(i).cpu_core_id) {
-                               cpu_set(i, per_cpu(cpu_sibling_map, cpu));
-                               cpu_set(cpu, per_cpu(cpu_sibling_map, i));
-                               cpu_set(i, per_cpu(cpu_core_map, cpu));
-                               cpu_set(cpu, per_cpu(cpu_core_map, i));
-                               cpu_set(i, c->llc_shared_map);
-                               cpu_set(cpu, cpu_data(i).llc_shared_map);
-                       }
-               }
-       } else {
-               cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
-       }
-
-       cpu_set(cpu, c->llc_shared_map);
-
-       if (current_cpu_data.x86_max_cores == 1) {
-               per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
-               c->booted_cores = 1;
-               return;
-       }
-
-       for_each_cpu_mask(i, cpu_sibling_setup_map) {
-               if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
-                   per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
-                       cpu_set(i, c->llc_shared_map);
-                       cpu_set(cpu, cpu_data(i).llc_shared_map);
-               }
-               if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
-                       cpu_set(i, per_cpu(cpu_core_map, cpu));
-                       cpu_set(cpu, per_cpu(cpu_core_map, i));
-                       /*
-                        *  Does this new cpu bringup a new core?
-                        */
-                       if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) {
-                               /*
-                                * for each core in package, increment
-                                * the booted_cores for this new cpu
-                                */
-                               if (first_cpu(per_cpu(cpu_sibling_map, i)) == i)
-                                       c->booted_cores++;
-                               /*
-                                * increment the core count for all
-                                * the other cpus in this package
-                                */
-                               if (i != cpu)
-                                       cpu_data(i).booted_cores++;
-                       } else if (i != cpu && !c->booted_cores)
-                               c->booted_cores = cpu_data(i).booted_cores;
-               }
-       }
-}
-
 /*
  * Setup code on secondary processor (after comming out of the trampoline)
  */
@@ -373,9 +245,9 @@ void __cpuinit start_secondary(void)
        /*
         * Allow the master to continue.
         */
+       spin_unlock(&vector_lock);
        cpu_set(smp_processor_id(), cpu_online_map);
        per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
-       spin_unlock(&vector_lock);
 
        unlock_ipi_call_lock();
 
@@ -408,8 +280,8 @@ static void inquire_remote_apic(int apicid)
                        printk(KERN_CONT
                               "a previous APIC delivery may have failed\n");
 
-               apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(apicid));
-               apic_write(APIC_ICR, APIC_DM_REMRD | regs[i]);
+               apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(apicid));
+               apic_write_around(APIC_ICR, APIC_DM_REMRD | regs[i]);
 
                timeout = 0;
                do {
@@ -442,12 +314,12 @@ static int __cpuinit wakeup_secondary_via_INIT(int phys_apicid, unsigned int sta
        /*
         * Turn INIT on target chip
         */
-       apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
+       apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
 
        /*
         * Send IPI
         */
-       apic_write(APIC_ICR, APIC_INT_LEVELTRIG | APIC_INT_ASSERT
+       apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_INT_ASSERT
                                | APIC_DM_INIT);
 
        Dprintk("Waiting for send to finish...\n");
@@ -458,10 +330,10 @@ static int __cpuinit wakeup_secondary_via_INIT(int phys_apicid, unsigned int sta
        Dprintk("Deasserting INIT.\n");
 
        /* Target chip */
-       apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
+       apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
 
        /* Send IPI */
-       apic_write(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT);
+       apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT);
 
        Dprintk("Waiting for send to finish...\n");
        send_status = safe_apic_wait_icr_idle();
@@ -471,6 +343,14 @@ static int __cpuinit wakeup_secondary_via_INIT(int phys_apicid, unsigned int sta
 
        num_starts = 2;
 
+       /*
+        * Paravirt / VMI wants a startup IPI hook here to set up the
+        * target processor state.
+        */
+       startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
+                       (unsigned long) init_rsp);
+
+
        /*
         * Run STARTUP IPI loop.
         */
@@ -480,6 +360,7 @@ static int __cpuinit wakeup_secondary_via_INIT(int phys_apicid, unsigned int sta
 
        for (j = 1; j <= num_starts; j++) {
                Dprintk("Sending STARTUP #%d.\n",j);
+               apic_read_around(APIC_SPIV);
                apic_write(APIC_ESR, 0);
                apic_read(APIC_ESR);
                Dprintk("After apic_write.\n");
@@ -489,11 +370,11 @@ static int __cpuinit wakeup_secondary_via_INIT(int phys_apicid, unsigned int sta
                 */
 
                /* Target chip */
-               apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
+               apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
 
                /* Boot on the stack */
                /* Kick the second */
-               apic_write(APIC_ICR, APIC_DM_STARTUP | (start_rip >> 12));
+               apic_write_around(APIC_ICR, APIC_DM_STARTUP | (start_rip>>12));
 
                /*
                 * Give the other CPU some time to accept the IPI.
@@ -513,6 +394,7 @@ static int __cpuinit wakeup_secondary_via_INIT(int phys_apicid, unsigned int sta
                 * Due to the Pentium erratum 3AP.
                 */
                if (maxlvt > 3) {
+                       apic_read_around(APIC_SPIV);
                        apic_write(APIC_ESR, 0);
                }
                accept_status = (apic_read(APIC_ESR) & 0xEF);
@@ -685,6 +567,8 @@ do_rest:
                if (cpu_isset(cpu, cpu_callin_map)) {
                        /* number CPUs logically, starting from 1 (BSP is 0) */
                        Dprintk("CPU has booted.\n");
+                       printk(KERN_INFO "CPU%d: ", cpu);
+                       print_cpu_info(&cpu_data(cpu));
                } else {
                        boot_error = 1;
                        if (*((volatile unsigned char *)phys_to_virt(SMP_TRAMPOLINE_BASE))
@@ -749,51 +633,6 @@ static __init void disable_smp(void)
        cpu_set(0, per_cpu(cpu_core_map, 0));
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
-
-int additional_cpus __initdata = -1;
-
-/*
- * cpu_possible_map should be static, it cannot change as cpu's
- * are onlined, or offlined. The reason is per-cpu data-structures
- * are allocated by some modules at init time, and dont expect to
- * do this dynamically on cpu arrival/departure.
- * cpu_present_map on the other hand can change dynamically.
- * In case when cpu_hotplug is not compiled, then we resort to current
- * behaviour, which is cpu_possible == cpu_present.
- * - Ashok Raj
- *
- * Three ways to find out the number of additional hotplug CPUs:
- * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
- * - The user can overwrite it with additional_cpus=NUM
- * - Otherwise don't reserve additional CPUs.
- * We do this because additional CPUs waste a lot of memory.
- * -AK
- */
-__init void prefill_possible_map(void)
-{
-       int i;
-       int possible;
-
-       if (additional_cpus == -1) {
-               if (disabled_cpus > 0)
-                       additional_cpus = disabled_cpus;
-               else
-                       additional_cpus = 0;
-       }
-       possible = num_processors + additional_cpus;
-       if (possible > NR_CPUS) 
-               possible = NR_CPUS;
-
-       printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
-               possible,
-               max_t(int, possible - num_processors, 0));
-
-       for (i = 0; i < possible; i++)
-               cpu_set(i, cpu_possible_map);
-}
-#endif
-
 /*
  * Various sanity checks.
  */
@@ -913,6 +752,8 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
         */
 
        setup_boot_clock();
+       printk(KERN_INFO "CPU%d: ", 0);
+       print_cpu_info(&cpu_data(0));
 }
 
 /*
@@ -993,116 +834,3 @@ void __init native_smp_cpus_done(unsigned int max_cpus)
        setup_ioapic_dest();
        check_nmi_watchdog();
 }
-
-#ifdef CONFIG_HOTPLUG_CPU
-
-static void remove_siblinginfo(int cpu)
-{
-       int sibling;
-       struct cpuinfo_x86 *c = &cpu_data(cpu);
-
-       for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
-               cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
-               /*
-                * last thread sibling in this cpu core going down
-                */
-               if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
-                       cpu_data(sibling).booted_cores--;
-       }
-                       
-       for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
-               cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
-       cpus_clear(per_cpu(cpu_sibling_map, cpu));
-       cpus_clear(per_cpu(cpu_core_map, cpu));
-       c->phys_proc_id = 0;
-       c->cpu_core_id = 0;
-       cpu_clear(cpu, cpu_sibling_setup_map);
-}
-
-static void __ref remove_cpu_from_maps(void)
-{
-       int cpu = smp_processor_id();
-
-       cpu_clear(cpu, cpu_callout_map);
-       cpu_clear(cpu, cpu_callin_map);
-       clear_bit(cpu, (unsigned long *)&cpu_initialized); /* was set by cpu_init() */
-       clear_node_cpumask(cpu);
-}
-
-int __cpu_disable(void)
-{
-       int cpu = smp_processor_id();
-
-       /*
-        * Perhaps use cpufreq to drop frequency, but that could go
-        * into generic code.
-        *
-        * We won't take down the boot processor on i386 due to some
-        * interrupts only being able to be serviced by the BSP.
-        * Especially so if we're not using an IOAPIC   -zwane
-        */
-       if (cpu == 0)
-               return -EBUSY;
-
-       if (nmi_watchdog == NMI_LOCAL_APIC)
-               stop_apic_nmi_watchdog(NULL);
-       clear_local_APIC();
-
-       /*
-        * HACK:
-        * Allow any queued timer interrupts to get serviced
-        * This is only a temporary solution until we cleanup
-        * fixup_irqs as we do for IA64.
-        */
-       local_irq_enable();
-       mdelay(1);
-
-       local_irq_disable();
-       remove_siblinginfo(cpu);
-
-       spin_lock(&vector_lock);
-       /* It's now safe to remove this processor from the online map */
-       cpu_clear(cpu, cpu_online_map);
-       spin_unlock(&vector_lock);
-       remove_cpu_from_maps();
-       fixup_irqs(cpu_online_map);
-       return 0;
-}
-
-void __cpu_die(unsigned int cpu)
-{
-       /* We don't do anything here: idle task is faking death itself. */
-       unsigned int i;
-
-       for (i = 0; i < 10; i++) {
-               /* They ack this in play_dead by setting CPU_DEAD */
-               if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
-                       printk ("CPU %d is now offline\n", cpu);
-                       if (1 == num_online_cpus())
-                               alternatives_smp_switch(0);
-                       return;
-               }
-               msleep(100);
-       }
-       printk(KERN_ERR "CPU %u didn't die...\n", cpu);
-}
-
-static __init int setup_additional_cpus(char *s)
-{
-       return s && get_option(&s, &additional_cpus) ? 0 : -EINVAL;
-}
-early_param("additional_cpus", setup_additional_cpus);
-
-#else /* ... !CONFIG_HOTPLUG_CPU */
-
-int __cpu_disable(void)
-{
-       return -ENOSYS;
-}
-
-void __cpu_die(unsigned int cpu)
-{
-       /* We said "no" in __cpu_disable */
-       BUG();
-}
-#endif /* CONFIG_HOTPLUG_CPU */