Merge tag 'v6.8-rc4' into x86/percpu, to resolve conflicts and refresh the branch
[sfrench/cifs-2.6.git] / arch / x86 / kernel / cpu / common.c
index 4cc0ab0dfbb54435595eeac063c3cb512db4a099..8f367d3765208c215c3ad1f560ad5121295cf1e7 100644 (file)
 #include <asm/intel-family.h>
 #include <asm/cpu_device_id.h>
 #include <asm/uv/uv.h>
+#include <asm/ia32.h>
 #include <asm/set_memory.h>
 #include <asm/traps.h>
 #include <asm/sev.h>
+#include <asm/tdx.h>
 
 #include "cpu.h"
 
@@ -74,18 +76,6 @@ u32 elf_hwcap2 __read_mostly;
 int smp_num_siblings = 1;
 EXPORT_SYMBOL(smp_num_siblings);
 
-/* Last level cache ID of each logical CPU */
-DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id) = BAD_APICID;
-
-u16 get_llc_id(unsigned int cpu)
-{
-       return per_cpu(cpu_llc_id, cpu);
-}
-EXPORT_SYMBOL_GPL(get_llc_id);
-
-/* L2 cache ID of each logical CPU */
-DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_l2c_id) = BAD_APICID;
-
 static struct ppin_info {
        int     feature;
        int     msr_ppin_ctl;
@@ -199,45 +189,37 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
         * TLS descriptors are currently at a different place compared to i386.
         * Hopefully nobody expects them at a fixed place (Wine?)
         */
-       [GDT_ENTRY_KERNEL32_CS]         = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
-       [GDT_ENTRY_KERNEL_CS]           = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
-       [GDT_ENTRY_KERNEL_DS]           = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
-       [GDT_ENTRY_DEFAULT_USER32_CS]   = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
-       [GDT_ENTRY_DEFAULT_USER_DS]     = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
-       [GDT_ENTRY_DEFAULT_USER_CS]     = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
+       [GDT_ENTRY_KERNEL32_CS]         = GDT_ENTRY_INIT(DESC_CODE32, 0, 0xfffff),
+       [GDT_ENTRY_KERNEL_CS]           = GDT_ENTRY_INIT(DESC_CODE64, 0, 0xfffff),
+       [GDT_ENTRY_KERNEL_DS]           = GDT_ENTRY_INIT(DESC_DATA64, 0, 0xfffff),
+       [GDT_ENTRY_DEFAULT_USER32_CS]   = GDT_ENTRY_INIT(DESC_CODE32 | DESC_USER, 0, 0xfffff),
+       [GDT_ENTRY_DEFAULT_USER_DS]     = GDT_ENTRY_INIT(DESC_DATA64 | DESC_USER, 0, 0xfffff),
+       [GDT_ENTRY_DEFAULT_USER_CS]     = GDT_ENTRY_INIT(DESC_CODE64 | DESC_USER, 0, 0xfffff),
 #else
-       [GDT_ENTRY_KERNEL_CS]           = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
-       [GDT_ENTRY_KERNEL_DS]           = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
-       [GDT_ENTRY_DEFAULT_USER_CS]     = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
-       [GDT_ENTRY_DEFAULT_USER_DS]     = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
+       [GDT_ENTRY_KERNEL_CS]           = GDT_ENTRY_INIT(DESC_CODE32, 0, 0xfffff),
+       [GDT_ENTRY_KERNEL_DS]           = GDT_ENTRY_INIT(DESC_DATA32, 0, 0xfffff),
+       [GDT_ENTRY_DEFAULT_USER_CS]     = GDT_ENTRY_INIT(DESC_CODE32 | DESC_USER, 0, 0xfffff),
+       [GDT_ENTRY_DEFAULT_USER_DS]     = GDT_ENTRY_INIT(DESC_DATA32 | DESC_USER, 0, 0xfffff),
        /*
         * Segments used for calling PnP BIOS have byte granularity.
         * They code segments and data segments have fixed 64k limits,
         * the transfer segment sizes are set at run time.
         */
-       /* 32-bit code */
-       [GDT_ENTRY_PNPBIOS_CS32]        = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
-       /* 16-bit code */
-       [GDT_ENTRY_PNPBIOS_CS16]        = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
-       /* 16-bit data */
-       [GDT_ENTRY_PNPBIOS_DS]          = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
-       /* 16-bit data */
-       [GDT_ENTRY_PNPBIOS_TS1]         = GDT_ENTRY_INIT(0x0092, 0, 0),
-       /* 16-bit data */
-       [GDT_ENTRY_PNPBIOS_TS2]         = GDT_ENTRY_INIT(0x0092, 0, 0),
+       [GDT_ENTRY_PNPBIOS_CS32]        = GDT_ENTRY_INIT(DESC_CODE32_BIOS, 0, 0xffff),
+       [GDT_ENTRY_PNPBIOS_CS16]        = GDT_ENTRY_INIT(DESC_CODE16, 0, 0xffff),
+       [GDT_ENTRY_PNPBIOS_DS]          = GDT_ENTRY_INIT(DESC_DATA16, 0, 0xffff),
+       [GDT_ENTRY_PNPBIOS_TS1]         = GDT_ENTRY_INIT(DESC_DATA16, 0, 0),
+       [GDT_ENTRY_PNPBIOS_TS2]         = GDT_ENTRY_INIT(DESC_DATA16, 0, 0),
        /*
         * The APM segments have byte granularity and their bases
         * are set at run time.  All have 64k limits.
         */
-       /* 32-bit code */
-       [GDT_ENTRY_APMBIOS_BASE]        = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
-       /* 16-bit code */
-       [GDT_ENTRY_APMBIOS_BASE+1]      = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
-       /* data */
-       [GDT_ENTRY_APMBIOS_BASE+2]      = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
-
-       [GDT_ENTRY_ESPFIX_SS]           = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
-       [GDT_ENTRY_PERCPU]              = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
+       [GDT_ENTRY_APMBIOS_BASE]        = GDT_ENTRY_INIT(DESC_CODE32_BIOS, 0, 0xffff),
+       [GDT_ENTRY_APMBIOS_BASE+1]      = GDT_ENTRY_INIT(DESC_CODE16, 0, 0xffff),
+       [GDT_ENTRY_APMBIOS_BASE+2]      = GDT_ENTRY_INIT(DESC_DATA32_BIOS, 0, 0xffff),
+
+       [GDT_ENTRY_ESPFIX_SS]           = GDT_ENTRY_INIT(DESC_DATA32, 0, 0xfffff),
+       [GDT_ENTRY_PERCPU]              = GDT_ENTRY_INIT(DESC_DATA32, 0, 0xfffff),
 #endif
 } };
 EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
@@ -914,7 +896,7 @@ void detect_ht(struct cpuinfo_x86 *c)
                return;
 
        index_msb = get_count_order(smp_num_siblings);
-       c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
+       c->topo.pkg_id = apic->phys_pkg_id(c->topo.initial_apicid, index_msb);
 
        smp_num_siblings = smp_num_siblings / c->x86_max_cores;
 
@@ -922,8 +904,8 @@ void detect_ht(struct cpuinfo_x86 *c)
 
        core_bits = get_count_order(c->x86_max_cores);
 
-       c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
-                                      ((1 << core_bits) - 1);
+       c->topo.core_id = apic->phys_pkg_id(c->topo.initial_apicid, index_msb) &
+               ((1 << core_bits) - 1);
 #endif
 }
 
@@ -1114,18 +1096,34 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
 void get_cpu_address_sizes(struct cpuinfo_x86 *c)
 {
        u32 eax, ebx, ecx, edx;
+       bool vp_bits_from_cpuid = true;
 
-       if (c->extended_cpuid_level >= 0x80000008) {
+       if (!cpu_has(c, X86_FEATURE_CPUID) ||
+           (c->extended_cpuid_level < 0x80000008))
+               vp_bits_from_cpuid = false;
+
+       if (vp_bits_from_cpuid) {
                cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
 
                c->x86_virt_bits = (eax >> 8) & 0xff;
                c->x86_phys_bits = eax & 0xff;
+       } else {
+               if (IS_ENABLED(CONFIG_X86_64)) {
+                       c->x86_clflush_size = 64;
+                       c->x86_phys_bits = 36;
+                       c->x86_virt_bits = 48;
+               } else {
+                       c->x86_clflush_size = 32;
+                       c->x86_virt_bits = 32;
+                       c->x86_phys_bits = 32;
+
+                       if (cpu_has(c, X86_FEATURE_PAE) ||
+                           cpu_has(c, X86_FEATURE_PSE36))
+                               c->x86_phys_bits = 36;
+               }
        }
-#ifdef CONFIG_X86_32
-       else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
-               c->x86_phys_bits = 36;
-#endif
        c->x86_cache_bits = c->x86_phys_bits;
+       c->x86_cache_alignment = c->x86_clflush_size;
 }
 
 static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
@@ -1303,7 +1301,7 @@ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
        VULNBL_AMD(0x15, RETBLEED),
        VULNBL_AMD(0x16, RETBLEED),
        VULNBL_AMD(0x17, RETBLEED | SMT_RSB | SRSO),
-       VULNBL_HYGON(0x18, RETBLEED | SMT_RSB),
+       VULNBL_HYGON(0x18, RETBLEED | SMT_RSB | SRSO),
        VULNBL_AMD(0x19, SRSO),
        {}
 };
@@ -1579,17 +1577,6 @@ static void __init cpu_parse_early_param(void)
  */
 static void __init early_identify_cpu(struct cpuinfo_x86 *c)
 {
-#ifdef CONFIG_X86_64
-       c->x86_clflush_size = 64;
-       c->x86_phys_bits = 36;
-       c->x86_virt_bits = 48;
-#else
-       c->x86_clflush_size = 32;
-       c->x86_phys_bits = 32;
-       c->x86_virt_bits = 32;
-#endif
-       c->x86_cache_alignment = c->x86_clflush_size;
-
        memset(&c->x86_capability, 0, sizeof(c->x86_capability));
        c->extended_cpuid_level = 0;
 
@@ -1601,7 +1588,6 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
                cpu_detect(c);
                get_cpu_vendor(c);
                get_cpu_cap(c);
-               get_cpu_address_sizes(c);
                setup_force_cpu_cap(X86_FEATURE_CPUID);
                cpu_parse_early_param();
 
@@ -1617,6 +1603,8 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
                setup_clear_cpu_cap(X86_FEATURE_CPUID);
        }
 
+       get_cpu_address_sizes(c);
+
        setup_force_cpu_cap(X86_FEATURE_ALWAYS);
 
        cpu_set_bug_bits(c);
@@ -1761,15 +1749,15 @@ static void generic_identify(struct cpuinfo_x86 *c)
        get_cpu_address_sizes(c);
 
        if (c->cpuid_level >= 0x00000001) {
-               c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF;
+               c->topo.initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF;
 #ifdef CONFIG_X86_32
 # ifdef CONFIG_SMP
-               c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
+               c->topo.apicid = apic->phys_pkg_id(c->topo.initial_apicid, 0);
 # else
-               c->apicid = c->initial_apicid;
+               c->topo.apicid = c->topo.initial_apicid;
 # endif
 #endif
-               c->phys_proc_id = c->initial_apicid;
+               c->topo.pkg_id = c->topo.initial_apicid;
        }
 
        get_model_name(c); /* Default name */
@@ -1799,18 +1787,19 @@ static void generic_identify(struct cpuinfo_x86 *c)
 static void validate_apic_and_package_id(struct cpuinfo_x86 *c)
 {
 #ifdef CONFIG_SMP
-       unsigned int apicid, cpu = smp_processor_id();
+       unsigned int cpu = smp_processor_id();
+       u32 apicid;
 
        apicid = apic->cpu_present_to_apicid(cpu);
 
-       if (apicid != c->apicid) {
+       if (apicid != c->topo.apicid) {
                pr_err(FW_BUG "CPU%u: APIC id mismatch. Firmware: %x APIC: %x\n",
-                      cpu, apicid, c->initial_apicid);
+                      cpu, apicid, c->topo.initial_apicid);
        }
-       BUG_ON(topology_update_package_map(c->phys_proc_id, cpu));
-       BUG_ON(topology_update_die_map(c->cpu_die_id, cpu));
+       BUG_ON(topology_update_package_map(c->topo.pkg_id, cpu));
+       BUG_ON(topology_update_die_map(c->topo.die_id, cpu));
 #else
-       c->logical_proc_id = 0;
+       c->topo.logical_pkg_id = 0;
 #endif
 }
 
@@ -1829,7 +1818,9 @@ static void identify_cpu(struct cpuinfo_x86 *c)
        c->x86_model_id[0] = '\0';  /* Unset */
        c->x86_max_cores = 1;
        c->x86_coreid_bits = 0;
-       c->cu_id = 0xff;
+       c->topo.cu_id = 0xff;
+       c->topo.llc_id = BAD_APICID;
+       c->topo.l2c_id = BAD_APICID;
 #ifdef CONFIG_X86_64
        c->x86_clflush_size = 64;
        c->x86_phys_bits = 36;
@@ -1855,9 +1846,16 @@ static void identify_cpu(struct cpuinfo_x86 *c)
        apply_forced_caps(c);
 
 #ifdef CONFIG_X86_64
-       c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
+       c->topo.apicid = apic->phys_pkg_id(c->topo.initial_apicid, 0);
 #endif
 
+
+       /*
+        * Set default APIC and TSC_DEADLINE MSR fencing flag. AMD and
+        * Hygon will clear it in ->c_init() below.
+        */
+       set_cpu_cap(c, X86_FEATURE_APIC_MSRS_FENCE);
+
        /*
         * Vendor-specific initialization.  In this section we
         * canonicalize the feature flags, meaning if there are
@@ -1989,6 +1987,7 @@ static __init void identify_boot_cpu(void)
        setup_cr_pinning();
 
        tsx_init();
+       tdx_init();
        lkgs_init();
 }
 
@@ -2075,24 +2074,24 @@ void syscall_init(void)
        wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS);
        wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64);
 
-#ifdef CONFIG_IA32_EMULATION
-       wrmsrl_cstar((unsigned long)entry_SYSCALL_compat);
-       /*
-        * This only works on Intel CPUs.
-        * On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP.
-        * This does not cause SYSENTER to jump to the wrong location, because
-        * AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit).
-        */
-       wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
-       wrmsrl_safe(MSR_IA32_SYSENTER_ESP,
-                   (unsigned long)(cpu_entry_stack(smp_processor_id()) + 1));
-       wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat);
-#else
-       wrmsrl_cstar((unsigned long)ignore_sysret);
-       wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG);
-       wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
-       wrmsrl_safe(MSR_IA32_SYSENTER_EIP, 0ULL);
-#endif
+       if (ia32_enabled()) {
+               wrmsrl_cstar((unsigned long)entry_SYSCALL_compat);
+               /*
+                * This only works on Intel CPUs.
+                * On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP.
+                * This does not cause SYSENTER to jump to the wrong location, because
+                * AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit).
+                */
+               wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
+               wrmsrl_safe(MSR_IA32_SYSENTER_ESP,
+                           (unsigned long)(cpu_entry_stack(smp_processor_id()) + 1));
+               wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat);
+       } else {
+               wrmsrl_cstar((unsigned long)entry_SYSCALL32_ignore);
+               wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG);
+               wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
+               wrmsrl_safe(MSR_IA32_SYSENTER_EIP, 0ULL);
+       }
 
        /*
         * Flags to clear on syscall; clear as much as possible
@@ -2167,8 +2166,6 @@ static inline void setup_getcpu(int cpu)
 }
 
 #ifdef CONFIG_X86_64
-static inline void ucode_cpu_init(int cpu) { }
-
 static inline void tss_setup_ist(struct tss_struct *tss)
 {
        /* Set up the per-CPU TSS IST stacks */
@@ -2179,16 +2176,8 @@ static inline void tss_setup_ist(struct tss_struct *tss)
        /* Only mapped when SEV-ES is active */
        tss->x86_tss.ist[IST_INDEX_VC] = __this_cpu_ist_top_va(VC);
 }
-
 #else /* CONFIG_X86_64 */
-
-static inline void ucode_cpu_init(int cpu)
-{
-       show_ucode_info_early();
-}
-
 static inline void tss_setup_ist(struct tss_struct *tss) { }
-
 #endif /* !CONFIG_X86_64 */
 
 static inline void tss_setup_io_bitmap(struct tss_struct *tss)
@@ -2244,8 +2233,6 @@ void cpu_init(void)
        struct task_struct *cur = current;
        int cpu = raw_smp_processor_id();
 
-       ucode_cpu_init(cpu);
-
 #ifdef CONFIG_NUMA
        if (this_cpu_read(numa_node) == 0 &&
            early_cpu_to_node(cpu) != NUMA_NO_NODE)