#include <asm/vmi.h>
#include <asm/mtrr.h>
-/* Set if we find a B stepping CPU */
-static int __cpuinitdata smp_b_stepping;
-
static cpumask_t smp_commenced_mask;
/* which logical CPU number maps to which CPU (physical APIC ID) */
DEFINE_PER_CPU(u16, x86_cpu_to_apicid) = BAD_APICID;
EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid);
+u16 x86_bios_cpu_apicid_init[NR_CPUS] __initdata
+ = { [0 ... NR_CPUS-1] = BAD_APICID };
+void *x86_bios_cpu_apicid_early_ptr;
+DEFINE_PER_CPU(u16, x86_bios_cpu_apicid) = BAD_APICID;
+EXPORT_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
+
u8 apicid_2_node[MAX_APICID];
static void map_cpu_to_logical_apicid(void);
/* State of each CPU. */
DEFINE_PER_CPU(int, cpu_state) = { 0 };
-static void __cpuinit smp_apply_quirks(struct cpuinfo_x86 *c)
-{
- /*
- * Mask B, Pentium, but not Pentium MMX
- */
- if (c->x86_vendor == X86_VENDOR_INTEL &&
- c->x86 == 5 &&
- c->x86_mask >= 1 && c->x86_mask <= 4 &&
- c->x86_model <= 3)
- /*
- * Remember we have B step Pentia with bugs
- */
- smp_b_stepping = 1;
-
- /*
- * Certain Athlons might work (for various values of 'work') in SMP
- * but they are not certified as MP capable.
- */
- if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) {
-
- if (num_possible_cpus() == 1)
- goto valid_k7;
-
- /* Athlon 660/661 is valid. */
- if ((c->x86_model==6) && ((c->x86_mask==0) || (c->x86_mask==1)))
- goto valid_k7;
-
- /* Duron 670 is valid */
- if ((c->x86_model==7) && (c->x86_mask==0))
- goto valid_k7;
-
- /*
- * Athlon 662, Duron 671, and Athlon >model 7 have capability bit.
- * It's worth noting that the A5 stepping (662) of some Athlon XP's
- * have the MP bit set.
- * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for more.
- */
- if (((c->x86_model==6) && (c->x86_mask>=2)) ||
- ((c->x86_model==7) && (c->x86_mask>=1)) ||
- (c->x86_model> 7))
- if (cpu_has_mp)
- goto valid_k7;
-
- /* If we get here, it's not a certified SMP capable AMD system. */
- add_taint(TAINT_UNSAFE_SMP);
- }
-
-valid_k7:
- ;
-
-}
-
-/*
- * The bootstrap kernel entry code has set these up. Save them for
- * a given CPU
- */
-
-void __cpuinit smp_store_cpu_info(int id)
-{
- struct cpuinfo_x86 *c = &cpu_data(id);
-
- *c = boot_cpu_data;
- c->cpu_index = id;
- if (id != 0)
- identify_secondary_cpu(c);
- smp_apply_quirks(c);
-}
-
static atomic_t init_deasserted;
static void __cpuinit smp_callin(void)
cpu_set(cpuid, cpu_callin_map);
}
-static int cpucount;
-
/*
* Activate a secondary processor.
*/
smp_callin();
while (!cpu_isset(smp_processor_id(), smp_commenced_mask))
cpu_relax();
+
+ /* otherwise gcc will move up smp_processor_id before the cpu_init */
+ barrier();
/*
* Check TSC synchronization with the BP:
*/
check_tsc_sync_target();
- setup_secondary_clock();
if (nmi_watchdog == NMI_IO_APIC) {
disable_8259A_irq(0);
enable_NMI_through_LVT0();
enable_8259A_irq(0);
}
- /*
- * low-memory mappings have been cleared, flush them from
- * the local TLBs too.
- */
- local_flush_tlb();
/* This must be done before setting cpu_online_map */
set_cpu_sibling_map(raw_smp_processor_id());
unlock_ipi_call_lock();
per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
- /* We can take interrupts now: we're officially "up". */
- local_irq_enable();
+ setup_secondary_clock();
wmb();
cpu_idle();
Dprintk("Waiting for send to finish...\n");
send_status = safe_apic_wait_icr_idle();
+ mb();
atomic_set(&init_deasserted, 1);
/*
#endif /* WAKE_SECONDARY_VIA_INIT */
extern cpumask_t cpu_initialized;
-static inline int alloc_cpu_id(void)
-{
- cpumask_t tmp_map;
- int cpu;
- cpus_complement(tmp_map, cpu_present_map);
- cpu = first_cpu(tmp_map);
- if (cpu >= NR_CPUS)
- return -ENODEV;
- return cpu;
-}
#ifdef CONFIG_HOTPLUG_CPU
static struct task_struct * __cpuinitdata cpu_idle_tasks[NR_CPUS];
/* start_eip had better be page-aligned! */
start_eip = setup_trampoline();
- ++cpucount;
alternatives_smp_switch(1);
/* So we see what's up */
irq_ctx_init(cpu);
- per_cpu(x86_cpu_to_apicid, cpu) = apicid;
/*
* This grunge runs the startup process for
* the targeted processor.
unmap_cpu_to_logical_apicid(cpu);
cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */
cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */
- cpucount--;
- } else {
- per_cpu(x86_cpu_to_apicid, cpu) = apicid;
- cpu_set(cpu, cpu_present_map);
+ cpu_clear(cpu, cpu_possible_map);
+ per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID;
}
/* mark "stuck" area as not stuck */
idle_task_exit();
- cpucount --;
cpu_uninit();
irq_ctx_exit(cpu);
cpu_clear(cpu, smp_commenced_mask);
unmap_cpu_to_logical_apicid(cpu);
}
+#endif
struct warm_boot_cpu_info {
struct completion *complete;
complete(info->complete);
}
-static int __cpuinit __smp_prepare_cpu(int cpu)
+static void __cpuinit __smp_prepare_cpu(int cpu)
{
DECLARE_COMPLETION_ONSTACK(done);
struct warm_boot_cpu_info info;
- int apicid, ret;
+ int apicid;
apicid = per_cpu(x86_cpu_to_apicid, cpu);
- if (apicid == BAD_APICID) {
- ret = -ENODEV;
- goto exit;
- }
info.complete = &done;
info.apicid = apicid;
clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS,
min_t(unsigned long, KERNEL_PGD_PTRS, USER_PGD_PTRS));
flush_tlb_all();
- schedule_work(&info.task);
- wait_for_completion(&done);
-
- zap_low_mappings();
- ret = 0;
-exit:
- return ret;
+ if (!keventd_up() || current_is_keventd())
+ info.task.func(&info.task);
+ else {
+ schedule_work(&info.task);
+ wait_for_completion(&done);
+ }
}
-#endif
-
-/*
- * Cycle through the processors sending APIC IPIs to boot each.
- */
static int boot_cpu_logical_apicid;
/* Where the IO area was mapped on multiquad, always 0 otherwise */
EXPORT_SYMBOL(xquad_portio);
#endif
-static void __init smp_boot_cpus(unsigned int max_cpus)
+static void __init disable_smp(void)
{
- int apicid, cpu, bit, kicked;
- unsigned long bogosum = 0;
-
- /*
- * Setup boot CPU information
- */
- smp_store_cpu_info(0); /* Final full version of the data */
- printk("CPU%d: ", 0);
- print_cpu_info(&cpu_data(0));
-
- boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
- boot_cpu_logical_apicid = logical_smp_processor_id();
- per_cpu(x86_cpu_to_apicid, 0) = boot_cpu_physical_apicid;
-
- current_thread_info()->cpu = 0;
-
- set_cpu_sibling_map(0);
+ cpu_possible_map = cpumask_of_cpu(0);
+ cpu_present_map = cpumask_of_cpu(0);
+ smpboot_clear_io_apic_irqs();
+ phys_cpu_present_map = physid_mask_of_physid(0);
+ map_cpu_to_logical_apicid();
+ cpu_set(0, per_cpu(cpu_sibling_map, 0));
+ cpu_set(0, per_cpu(cpu_core_map, 0));
+}
+static int __init smp_sanity_check(unsigned max_cpus)
+{
/*
* If we couldn't find an SMP configuration at boot time,
* get out of here now!
*/
if (!smp_found_config && !acpi_lapic) {
printk(KERN_NOTICE "SMP motherboard not detected.\n");
- smpboot_clear_io_apic_irqs();
- phys_cpu_present_map = physid_mask_of_physid(0);
+ disable_smp();
if (APIC_init_uniprocessor())
printk(KERN_NOTICE "Local APIC not detected."
" Using dummy APIC emulation.\n");
- map_cpu_to_logical_apicid();
- cpu_set(0, per_cpu(cpu_sibling_map, 0));
- cpu_set(0, per_cpu(cpu_core_map, 0));
- return;
+ return -1;
}
/*
printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
boot_cpu_physical_apicid);
printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
- smpboot_clear_io_apic_irqs();
- phys_cpu_present_map = physid_mask_of_physid(0);
- map_cpu_to_logical_apicid();
- cpu_set(0, per_cpu(cpu_sibling_map, 0));
- cpu_set(0, per_cpu(cpu_core_map, 0));
- return;
+ return -1;
}
verify_local_APIC();
connect_bsp_APIC();
setup_local_APIC();
}
- smpboot_clear_io_apic_irqs();
- phys_cpu_present_map = physid_mask_of_physid(0);
- map_cpu_to_logical_apicid();
- cpu_set(0, per_cpu(cpu_sibling_map, 0));
- cpu_set(0, per_cpu(cpu_core_map, 0));
- return;
+ return -1;
}
+ return 0;
+}
- connect_bsp_APIC();
- setup_local_APIC();
- map_cpu_to_logical_apicid();
-
-
- setup_portio_remap();
-
+/*
+ * Cycle through the processors sending APIC IPIs to boot each.
+ */
+static void __init smp_boot_cpus(unsigned int max_cpus)
+{
/*
- * Scan the CPU present map and fire up the other CPUs via do_boot_cpu
- *
- * In clustered apic mode, phys_cpu_present_map is a constructed thus:
- * bits 0-3 are quad0, 4-7 are quad1, etc. A perverse twist on the
- * clustered apic ID.
+ * Setup boot CPU information
*/
- Dprintk("CPU present map: %lx\n", physids_coerce(phys_cpu_present_map));
+ smp_store_cpu_info(0); /* Final full version of the data */
+ printk(KERN_INFO "CPU%d: ", 0);
+ print_cpu_info(&cpu_data(0));
- kicked = 1;
- for (bit = 0; kicked < NR_CPUS && bit < MAX_APICS; bit++) {
- apicid = cpu_present_to_apicid(bit);
- /*
- * Don't even attempt to start the boot CPU!
- */
- if ((apicid == boot_cpu_apicid) || (apicid == BAD_APICID))
- continue;
-
- if (!check_apicid_present(bit))
- continue;
- if (max_cpus <= cpucount+1)
- continue;
-
- if (((cpu = alloc_cpu_id()) <= 0) || do_boot_cpu(apicid, cpu))
- printk("CPU #%d not responding - cannot use it.\n",
- apicid);
- else
- ++kicked;
- }
+ boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
+ boot_cpu_logical_apicid = logical_smp_processor_id();
- /*
- * Cleanup possible dangling ends...
- */
- smpboot_restore_warm_reset_vector();
+ current_thread_info()->cpu = 0;
- /*
- * Allow the user to impress friends.
- */
- Dprintk("Before bogomips.\n");
- for_each_possible_cpu(cpu)
- if (cpu_isset(cpu, cpu_callout_map))
- bogosum += cpu_data(cpu).loops_per_jiffy;
- printk(KERN_INFO
- "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
- cpucount+1,
- bogosum/(500000/HZ),
- (bogosum/(5000/HZ))%100);
-
- Dprintk("Before bogocount - setting activated=1.\n");
-
- if (smp_b_stepping)
- printk(KERN_WARNING "WARNING: SMP operation may be unreliable with B stepping processors.\n");
+ set_cpu_sibling_map(0);
- /*
- * Don't taint if we are running SMP kernel on a single non-MP
- * approved Athlon
- */
- if (tainted & TAINT_UNSAFE_SMP) {
- if (cpucount)
- printk (KERN_INFO "WARNING: This combination of AMD processors is not suitable for SMP.\n");
- else
- tainted &= ~TAINT_UNSAFE_SMP;
+ if (smp_sanity_check(max_cpus) < 0) {
+ printk(KERN_INFO "SMP disabled\n");
+ disable_smp();
+ return;
}
- Dprintk("Boot done.\n");
+ connect_bsp_APIC();
+ setup_local_APIC();
+ map_cpu_to_logical_apicid();
- /*
- * construct cpu_sibling_map, so that we can tell sibling CPUs
- * efficiently.
- */
- for_each_possible_cpu(cpu) {
- cpus_clear(per_cpu(cpu_sibling_map, cpu));
- cpus_clear(per_cpu(cpu_core_map, cpu));
- }
- cpu_set(0, per_cpu(cpu_sibling_map, 0));
- cpu_set(0, per_cpu(cpu_core_map, 0));
+ setup_portio_remap();
smpboot_setup_io_apic();
who understands all this stuff should rewrite it properly. --RR 15/Jul/02 */
void __init native_smp_prepare_cpus(unsigned int max_cpus)
{
+ nmi_watchdog_default();
smp_commenced_mask = cpumask_of_cpu(0);
cpu_callin_map = cpumask_of_cpu(0);
mb();
init_gdt(cpu);
switch_to_new_gdt();
- cpu_set(cpu, cpu_online_map);
cpu_set(cpu, cpu_callout_map);
- cpu_set(cpu, cpu_present_map);
- cpu_set(cpu, cpu_possible_map);
__get_cpu_var(cpu_state) = CPU_ONLINE;
}
int __cpuinit native_cpu_up(unsigned int cpu)
{
+ int apicid = cpu_present_to_apicid(cpu);
unsigned long flags;
-#ifdef CONFIG_HOTPLUG_CPU
- int ret = 0;
- /*
- * We do warm boot only on cpus that had booted earlier
- * Otherwise cold boot is all handled from smp_boot_cpus().
- * cpu_callin_map is set during AP kickstart process. Its reset
- * when a cpu is taken offline from cpu_exit_clear().
- */
- if (!cpu_isset(cpu, cpu_callin_map))
- ret = __smp_prepare_cpu(cpu);
+ WARN_ON(irqs_disabled());
- if (ret)
- return -EIO;
-#endif
+ Dprintk("++++++++++++++++++++=_---CPU UP %u\n", cpu);
+
+ if (apicid == BAD_APICID || apicid == boot_cpu_physical_apicid ||
+ !physid_isset(apicid, phys_cpu_present_map)) {
+ printk(KERN_ERR "%s: bad cpu %d\n", __func__, cpu);
+ return -EINVAL;
+ }
+
+ per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
+
+ __smp_prepare_cpu(cpu);
/* In case one didn't come up */
if (!cpu_isset(cpu, cpu_callin_map)) {
return -EIO;
}
- per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
/* Unleash the CPU! */
cpu_set(cpu, smp_commenced_mask);
return 0;
}
-void __init native_smp_cpus_done(unsigned int max_cpus)
-{
-#ifdef CONFIG_X86_IO_APIC
- setup_ioapic_dest();
-#endif
- zap_low_mappings();
-}
+extern void impress_friends(void);
+extern void smp_checks(void);
-void __init smp_intr_init(void)
+void __init native_smp_cpus_done(unsigned int max_cpus)
{
/*
- * IRQ0 must be given a fixed assignment and initialized,
- * because it's used before the IO-APIC is set up.
- */
- set_intr_gate(FIRST_DEVICE_VECTOR, interrupt[0]);
-
- /*
- * The reschedule interrupt is a CPU-to-CPU reschedule-helper
- * IPI, driven by wakeup.
+ * Cleanup possible dangling ends...
*/
- set_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
+ smpboot_restore_warm_reset_vector();
- /* IPI for invalidation */
- set_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt);
+ Dprintk("Boot done.\n");
- /* IPI for generic function call */
- set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
+ impress_friends();
+ smp_checks();
+#ifdef CONFIG_X86_IO_APIC
+ setup_ioapic_dest();
+#endif
+ check_nmi_watchdog();
+ zap_low_mappings();
}