[GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } },
[GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } },
- - - [GDT_ENTRY_PERCPU] = { { { 0x00000000, 0x00000000 } } },
- - - } };
+ + + [GDT_ENTRY_PERCPU] = { { { 0x0000ffff, 0x00cf9200 } } },
+++ + GDT_STACK_CANARY_INIT
#endif
+ + + } };
EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
#ifdef CONFIG_X86_32
}
#endif
- -- - ((s32)df->feature < 0 ?
- -- - (u32)df->feature > (u32)c->extended_cpuid_level :
- -- - (s32)df->feature > (s32)c->cpuid_level)) {
+ + /*
+ + * Some CPU features depend on higher CPUID levels, which may not always
+ + * be available due to CPUID level capping or broken virtualization
+ + * software. Add those features to this table to auto-disable them.
+ + */
+ + struct cpuid_dependent_feature {
+ + u32 feature;
+ + u32 level;
+ + };
+ + static const struct cpuid_dependent_feature __cpuinitconst
+ + cpuid_dependent_features[] = {
+ + { X86_FEATURE_MWAIT, 0x00000005 },
+ + { X86_FEATURE_DCA, 0x00000009 },
+ + { X86_FEATURE_XSAVE, 0x0000000d },
+ + { 0, 0 }
+ + };
+ +
+ + static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
+ + {
+ + const struct cpuid_dependent_feature *df;
+ + for (df = cpuid_dependent_features; df->feature; df++) {
+ + /*
+ + * Note: cpuid_level is set to -1 if unavailable, but
+ + * extended_extended_level is set to 0 if unavailable
+ + * and the legitimate extended levels are all negative
+ + * when signed; hence the weird messing around with
+ + * signs here...
+ + */
+ + if (cpu_has(c, df->feature) &&
- -- -}
++ ++++ ((s32)df->level < 0 ?
++ ++++ (u32)df->level > (u32)c->extended_cpuid_level :
++ ++++ (s32)df->level > (s32)c->cpuid_level)) {
+ + clear_cpu_cap(c, df->feature);
+ + if (warn)
+ + printk(KERN_WARNING
+ + "CPU: CPU feature %s disabled "
+ + "due to lack of CPUID level 0x%x\n",
+ + x86_cap_flags[df->feature],
+ + df->level);
+ + }
+ + }
++ ++++}
+ +
/*
* Naming convention should be: <Name> [(<Codename>)]
* This table only is used unless init_<vendor>() below doesn't set it;
__u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata;
+ + + void load_percpu_segment(int cpu)
+ + + {
+ + + #ifdef CONFIG_X86_32
+ + + loadsegment(fs, __KERNEL_PERCPU);
+ + + #else
+ + + loadsegment(gs, 0);
+ + + wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu));
+ + + #endif
+++ + load_stack_canary_segment();
+ + + }
+ + +
/* Current gdt points %fs at the "master" per-cpu area: after this,
* it's on the real one. */
- - - void switch_to_new_gdt(void)
+ + + void switch_to_new_gdt(int cpu)
{
struct desc_ptr gdt_descr;
core_bits = get_count_order(c->x86_max_cores);
- - - #ifdef CONFIG_X86_64
- - - c->cpu_core_id = phys_pkg_id(index_msb) &
- ((1 << core_bits) - 1);
- #else
- c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) &
+ + + c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
((1 << core_bits) - 1);
- - #else
- - c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) &
- - ((1 << core_bits) - 1);
- - - #endif
}
out:
__setup("clearcpuid=", setup_disablecpuid);
#ifdef CONFIG_X86_64
- - - struct x8664_pda **_cpu_pda __read_mostly;
- - - EXPORT_SYMBOL(_cpu_pda);
- - -
struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
- - - static char boot_cpu_stack[IRQSTACKSIZE] __page_aligned_bss;
- -
- - void __cpuinit pda_init(int cpu)
- - {
- - struct x8664_pda *pda = cpu_pda(cpu);
+ + + DEFINE_PER_CPU_FIRST(union irq_stack_union,
+ + + irq_stack_union) __aligned(PAGE_SIZE);
- #ifdef CONFIG_SMP
- DEFINE_PER_CPU(char *, irq_stack_ptr); /* will be set during per cpu init */
- #else
+ + + DEFINE_PER_CPU(char *, irq_stack_ptr) =
- per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64;
- #endif
+++ + init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64;
- void __cpuinit pda_init(int cpu)
- {
- struct x8664_pda *pda = cpu_pda(cpu);
-
- - - /* Setup up data that may be needed in __get_free_pages early */
- - - loadsegment(fs, 0);
- - - loadsegment(gs, 0);
- - - /* Memory clobbers used to order PDA accessed */
- - - mb();
- - - wrmsrl(MSR_GS_BASE, pda);
- - - mb();
- - -
- - - pda->cpunumber = cpu;
- - - pda->irqcount = -1;
- - - pda->kernelstack = (unsigned long)stack_thread_info() -
- - - PDA_STACKOFFSET + THREAD_SIZE;
- - - pda->active_mm = &init_mm;
- - - pda->mmu_state = 0;
- - -
- - - if (cpu == 0) {
- - - /* others are initialized in smpboot.c */
- - - pda->pcurrent = &init_task;
- - - pda->irqstackptr = boot_cpu_stack;
- - - pda->irqstackptr += IRQSTACKSIZE - 64;
- - - } else {
- - - if (!pda->irqstackptr) {
- - - pda->irqstackptr = (char *)
- - - __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER);
- - - if (!pda->irqstackptr)
- - - panic("cannot allocate irqstack for cpu %d",
- - - cpu);
- - - pda->irqstackptr += IRQSTACKSIZE - 64;
- - - }
+ + + DEFINE_PER_CPU(unsigned long, kernel_stack) =
+ + + (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
+ + + EXPORT_PER_CPU_SYMBOL(kernel_stack);
- - - if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE)
- - - pda->nodenumber = cpu_to_node(cpu);
- - - }
- - - }
+ + + DEFINE_PER_CPU(unsigned int, irq_count) = -1;
- - - static char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ +
- - - DEBUG_STKSZ] __page_aligned_bss;
+ + + static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
+ + + [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ])
+ + + __aligned(PAGE_SIZE);
extern asmlinkage void ignore_sysret(void);
*/
DEFINE_PER_CPU(struct orig_ist, orig_ist);
--- - #else
+++ + #else /* x86_64 */
+ +
-- /* Make sure %fs is initialized properly in idle threads */
+++ + #ifdef CONFIG_CC_STACKPROTECTOR
+++ + DEFINE_PER_CPU(unsigned long, stack_canary);
+++ + #endif
++
- - /* Make sure %fs is initialized properly in idle threads */
+++ + /* Make sure %fs and %gs are initialized properly in idle threads */
struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
{
memset(regs, 0, sizeof(struct pt_regs));