1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
4 #include <linux/string.h>
5 #include <linux/bitops.h>
7 #include <linux/sched.h>
8 #include <linux/sched/clock.h>
9 #include <linux/thread_info.h>
10 #include <linux/init.h>
11 #include <linux/uaccess.h>
13 #include <asm/cpufeature.h>
14 #include <asm/pgtable.h>
18 #include <asm/intel-family.h>
19 #include <asm/microcode_intel.h>
20 #include <asm/hwcap2.h>
24 #include <linux/topology.h>
29 #ifdef CONFIG_X86_LOCAL_APIC
30 #include <asm/mpspec.h>
35 * Just in case our CPU detection goes bad, or you have a weird system,
36 * allow a way to override the automatic disabling of MPX.
40 static int __init forcempx_setup(char *__unused)
46 __setup("intel-skd-046-workaround=disable", forcempx_setup);
48 void check_mpx_erratum(struct cpuinfo_x86 *c)
53 * Turn off the MPX feature on CPUs where SMEP is not
54 * available or disabled.
56 * Works around Intel Erratum SKD046: "Branch Instructions
57 * May Initialize MPX Bound Registers Incorrectly".
59 * This might falsely disable MPX on systems without
60 * SMEP, like Atom processors without SMEP. But there
61 * is no such hardware known at the moment.
63 if (cpu_has(c, X86_FEATURE_MPX) && !cpu_has(c, X86_FEATURE_SMEP)) {
64 setup_clear_cpu_cap(X86_FEATURE_MPX);
65 pr_warn("x86/mpx: Disabling MPX since SMEP not present\n");
69 static bool ring3mwait_disabled __read_mostly;
71 static int __init ring3mwait_disable(char *__unused)
73 ring3mwait_disabled = true;
76 __setup("ring3mwait=disable", ring3mwait_disable);
78 static void probe_xeon_phi_r3mwait(struct cpuinfo_x86 *c)
81 * Ring 3 MONITOR/MWAIT feature cannot be detected without
82 * cpu model and family comparison.
86 switch (c->x86_model) {
87 case INTEL_FAM6_XEON_PHI_KNL:
88 case INTEL_FAM6_XEON_PHI_KNM:
94 if (ring3mwait_disabled)
97 set_cpu_cap(c, X86_FEATURE_RING3MWAIT);
98 this_cpu_or(msr_misc_features_shadow,
99 1UL << MSR_MISC_FEATURES_ENABLES_RING3MWAIT_BIT);
101 if (c == &boot_cpu_data)
102 ELF_HWCAP2 |= HWCAP2_RING3MWAIT;
106 * Early microcode releases for the Spectre v2 mitigation were broken.
107 * Information taken from;
108 * - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/01/microcode-update-guidance.pdf
109 * - https://kb.vmware.com/s/article/52345
110 * - Microcode revisions observed in the wild
111 * - Release note from 20180108 microcode release
113 struct sku_microcode {
118 static const struct sku_microcode spectre_bad_microcodes[] = {
119 { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0B, 0x84 },
120 { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0A, 0x84 },
121 { INTEL_FAM6_KABYLAKE_DESKTOP, 0x09, 0x84 },
122 { INTEL_FAM6_KABYLAKE_MOBILE, 0x0A, 0x84 },
123 { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x84 },
124 { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e },
125 { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c },
126 { INTEL_FAM6_SKYLAKE_MOBILE, 0x03, 0xc2 },
127 { INTEL_FAM6_SKYLAKE_DESKTOP, 0x03, 0xc2 },
128 { INTEL_FAM6_BROADWELL_CORE, 0x04, 0x28 },
129 { INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x1b },
130 { INTEL_FAM6_BROADWELL_XEON_D, 0x02, 0x14 },
131 { INTEL_FAM6_BROADWELL_XEON_D, 0x03, 0x07000011 },
132 { INTEL_FAM6_BROADWELL_X, 0x01, 0x0b000025 },
133 { INTEL_FAM6_HASWELL_ULT, 0x01, 0x21 },
134 { INTEL_FAM6_HASWELL_GT3E, 0x01, 0x18 },
135 { INTEL_FAM6_HASWELL_CORE, 0x03, 0x23 },
136 { INTEL_FAM6_HASWELL_X, 0x02, 0x3b },
137 { INTEL_FAM6_HASWELL_X, 0x04, 0x10 },
138 { INTEL_FAM6_IVYBRIDGE_X, 0x04, 0x42a },
139 /* Updated in the 20180108 release; blacklist until we know otherwise */
140 { INTEL_FAM6_ATOM_GEMINI_LAKE, 0x01, 0x22 },
141 /* Observed in the wild */
142 { INTEL_FAM6_SANDYBRIDGE_X, 0x06, 0x61b },
143 { INTEL_FAM6_SANDYBRIDGE_X, 0x07, 0x712 },
146 static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
150 for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
151 if (c->x86_model == spectre_bad_microcodes[i].model &&
152 c->x86_mask == spectre_bad_microcodes[i].stepping)
153 return (c->microcode <= spectre_bad_microcodes[i].microcode);
158 static void early_init_intel(struct cpuinfo_x86 *c)
162 /* Unmask CPUID levels if masked: */
163 if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
164 if (msr_clear_bit(MSR_IA32_MISC_ENABLE,
165 MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT) > 0) {
166 c->cpuid_level = cpuid_eax(0);
171 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
172 (c->x86 == 0x6 && c->x86_model >= 0x0e))
173 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
175 if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64))
176 c->microcode = intel_get_microcode_revision();
179 * The Intel SPEC_CTRL CPUID bit implies IBRS and IBPB support,
180 * and they also have a different bit for STIBP support. Also,
181 * a hypervisor might have set the individual AMD bits even on
182 * Intel CPUs, for finer-grained selection of what's available.
184 if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) {
185 set_cpu_cap(c, X86_FEATURE_IBRS);
186 set_cpu_cap(c, X86_FEATURE_IBPB);
188 if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
189 set_cpu_cap(c, X86_FEATURE_STIBP);
191 /* Now if any of them are set, check the blacklist and clear the lot */
192 if ((cpu_has(c, X86_FEATURE_IBRS) || cpu_has(c, X86_FEATURE_IBPB) ||
193 cpu_has(c, X86_FEATURE_STIBP)) && bad_spectre_microcode(c)) {
194 pr_warn("Intel Spectre v2 broken microcode detected; disabling Speculation Control\n");
195 clear_cpu_cap(c, X86_FEATURE_IBRS);
196 clear_cpu_cap(c, X86_FEATURE_IBPB);
197 clear_cpu_cap(c, X86_FEATURE_STIBP);
198 clear_cpu_cap(c, X86_FEATURE_SPEC_CTRL);
199 clear_cpu_cap(c, X86_FEATURE_INTEL_STIBP);
203 * Atom erratum AAE44/AAF40/AAG38/AAH41:
205 * A race condition between speculative fetches and invalidating
206 * a large page. This is worked around in microcode, but we
207 * need the microcode to have already been loaded... so if it is
208 * not, recommend a BIOS update and disable large pages.
210 if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2 &&
211 c->microcode < 0x20e) {
212 pr_warn("Atom PSE erratum detected, BIOS microcode update recommended\n");
213 clear_cpu_cap(c, X86_FEATURE_PSE);
217 set_cpu_cap(c, X86_FEATURE_SYSENTER32);
219 /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
220 if (c->x86 == 15 && c->x86_cache_alignment == 64)
221 c->x86_cache_alignment = 128;
224 /* CPUID workaround for 0F33/0F34 CPU */
225 if (c->x86 == 0xF && c->x86_model == 0x3
226 && (c->x86_mask == 0x3 || c->x86_mask == 0x4))
227 c->x86_phys_bits = 36;
230 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
231 * with P/T states and does not stop in deep C-states.
233 * It is also reliable across cores and sockets. (but not across
234 * cabinets - we turn it off in that case explicitly.)
236 if (c->x86_power & (1 << 8)) {
237 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
238 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
241 /* Penwell and Cloverview have the TSC which doesn't sleep on S3 */
243 switch (c->x86_model) {
244 case 0x27: /* Penwell */
245 case 0x35: /* Cloverview */
246 case 0x4a: /* Merrifield */
247 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3);
255 * There is a known erratum on Pentium III and Core Solo
257 * " Page with PAT set to WC while associated MTRR is UC
258 * may consolidate to UC "
259 * Because of this erratum, it is better to stick with
260 * setting WC in MTRR rather than using PAT on these CPUs.
262 * Enable PAT WC only on P4, Core 2 or later CPUs.
264 if (c->x86 == 6 && c->x86_model < 15)
265 clear_cpu_cap(c, X86_FEATURE_PAT);
268 * If fast string is not enabled in IA32_MISC_ENABLE for any reason,
269 * clear the fast string and enhanced fast string CPU capabilities.
271 if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
272 rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
273 if (!(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING)) {
274 pr_info("Disabled fast string operations\n");
275 setup_clear_cpu_cap(X86_FEATURE_REP_GOOD);
276 setup_clear_cpu_cap(X86_FEATURE_ERMS);
281 * Intel Quark Core DevMan_001.pdf section 6.4.11
282 * "The operating system also is required to invalidate (i.e., flush)
283 * the TLB when any changes are made to any of the page table entries.
284 * The operating system must reload CR3 to cause the TLB to be flushed"
286 * As a result, boot_cpu_has(X86_FEATURE_PGE) in arch/x86/include/asm/tlbflush.h
287 * should be false so that __flush_tlb_all() causes CR3 insted of CR4.PGE
290 if (c->x86 == 5 && c->x86_model == 9) {
291 pr_info("Disabling PGE capability bit\n");
292 setup_clear_cpu_cap(X86_FEATURE_PGE);
295 if (c->cpuid_level >= 0x00000001) {
296 u32 eax, ebx, ecx, edx;
298 cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
300 * If HTT (EDX[28]) is set EBX[16:23] contain the number of
301 * apicids which are reserved per package. Store the resulting
302 * shift value for the package management code.
304 if (edx & (1U << 28))
305 c->x86_coreid_bits = get_count_order((ebx >> 16) & 0xff);
308 check_mpx_erratum(c);
313 * Early probe support logic for ppro memory erratum #50
315 * This is called before we do cpu ident work
318 int ppro_with_ram_bug(void)
320 /* Uses data from early_cpu_detect now */
321 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
322 boot_cpu_data.x86 == 6 &&
323 boot_cpu_data.x86_model == 1 &&
324 boot_cpu_data.x86_mask < 8) {
325 pr_info("Pentium Pro with Errata#50 detected. Taking evasive action.\n");
331 static void intel_smp_check(struct cpuinfo_x86 *c)
333 /* calling is from identify_secondary_cpu() ? */
338 * Mask B, Pentium, but not Pentium MMX
341 c->x86_mask >= 1 && c->x86_mask <= 4 &&
344 * Remember we have B step Pentia with bugs
346 WARN_ONCE(1, "WARNING: SMP operation may be unreliable"
347 "with B stepping processors.\n");
352 static int __init forcepae_setup(char *__unused)
357 __setup("forcepae", forcepae_setup);
359 static void intel_workarounds(struct cpuinfo_x86 *c)
361 #ifdef CONFIG_X86_F00F_BUG
363 * All models of Pentium and Pentium with MMX technology CPUs
364 * have the F0 0F bug, which lets nonprivileged users lock up the
365 * system. Announce that the fault handler will be checking for it.
366 * The Quark is also family 5, but does not have the same bug.
368 clear_cpu_bug(c, X86_BUG_F00F);
369 if (c->x86 == 5 && c->x86_model < 9) {
370 static int f00f_workaround_enabled;
372 set_cpu_bug(c, X86_BUG_F00F);
373 if (!f00f_workaround_enabled) {
374 pr_notice("Intel Pentium with F0 0F bug - workaround enabled.\n");
375 f00f_workaround_enabled = 1;
381 * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until
384 if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633)
385 clear_cpu_cap(c, X86_FEATURE_SEP);
388 * PAE CPUID issue: many Pentium M report no PAE but may have a
389 * functionally usable PAE implementation.
390 * Forcefully enable PAE if kernel parameter "forcepae" is present.
393 pr_warn("PAE forced!\n");
394 set_cpu_cap(c, X86_FEATURE_PAE);
395 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
399 * P4 Xeon erratum 037 workaround.
400 * Hardware prefetcher may cause stale data to be loaded into the cache.
402 if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) {
403 if (msr_set_bit(MSR_IA32_MISC_ENABLE,
404 MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) > 0) {
405 pr_info("CPU: C0 stepping P4 Xeon detected.\n");
406 pr_info("CPU: Disabling hardware prefetching (Erratum 037)\n");
411 * See if we have a good local APIC by checking for buggy Pentia,
412 * i.e. all B steppings and the C2 stepping of P54C when using their
413 * integrated APIC (see 11AP erratum in "Pentium Processor
414 * Specification Update").
416 if (boot_cpu_has(X86_FEATURE_APIC) && (c->x86<<8 | c->x86_model<<4) == 0x520 &&
417 (c->x86_mask < 0x6 || c->x86_mask == 0xb))
418 set_cpu_bug(c, X86_BUG_11AP);
421 #ifdef CONFIG_X86_INTEL_USERCOPY
423 * Set up the preferred alignment for movsl bulk memory moves
426 case 4: /* 486: untested */
428 case 5: /* Old Pentia: untested */
430 case 6: /* PII/PIII only like movsl with 8-byte alignment */
433 case 15: /* P4 is OK down to 8-byte alignment */
442 static void intel_workarounds(struct cpuinfo_x86 *c)
447 static void srat_detect_node(struct cpuinfo_x86 *c)
451 int cpu = smp_processor_id();
453 /* Don't do the funky fallback heuristics the AMD version employs
455 node = numa_cpu_node(cpu);
456 if (node == NUMA_NO_NODE || !node_online(node)) {
457 /* reuse the value from init_cpu_to_node() */
458 node = cpu_to_node(cpu);
460 numa_set_node(cpu, node);
465 * find out the number of processor cores on the die
467 static int intel_num_cpu_cores(struct cpuinfo_x86 *c)
469 unsigned int eax, ebx, ecx, edx;
471 if (!IS_ENABLED(CONFIG_SMP) || c->cpuid_level < 4)
474 /* Intel has a non-standard dependency on %ecx for this CPUID level. */
475 cpuid_count(4, 0, &eax, &ebx, &ecx, &edx);
477 return (eax >> 26) + 1;
482 static void detect_vmx_virtcap(struct cpuinfo_x86 *c)
484 /* Intel VMX MSR indicated features */
485 #define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW 0x00200000
486 #define X86_VMX_FEATURE_PROC_CTLS_VNMI 0x00400000
487 #define X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS 0x80000000
488 #define X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC 0x00000001
489 #define X86_VMX_FEATURE_PROC_CTLS2_EPT 0x00000002
490 #define X86_VMX_FEATURE_PROC_CTLS2_VPID 0x00000020
492 u32 vmx_msr_low, vmx_msr_high, msr_ctl, msr_ctl2;
494 clear_cpu_cap(c, X86_FEATURE_TPR_SHADOW);
495 clear_cpu_cap(c, X86_FEATURE_VNMI);
496 clear_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
497 clear_cpu_cap(c, X86_FEATURE_EPT);
498 clear_cpu_cap(c, X86_FEATURE_VPID);
500 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, vmx_msr_low, vmx_msr_high);
501 msr_ctl = vmx_msr_high | vmx_msr_low;
502 if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW)
503 set_cpu_cap(c, X86_FEATURE_TPR_SHADOW);
504 if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_VNMI)
505 set_cpu_cap(c, X86_FEATURE_VNMI);
506 if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS) {
507 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
508 vmx_msr_low, vmx_msr_high);
509 msr_ctl2 = vmx_msr_high | vmx_msr_low;
510 if ((msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC) &&
511 (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW))
512 set_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
513 if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_EPT)
514 set_cpu_cap(c, X86_FEATURE_EPT);
515 if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VPID)
516 set_cpu_cap(c, X86_FEATURE_VPID);
520 static void init_intel_energy_perf(struct cpuinfo_x86 *c)
525 * Initialize MSR_IA32_ENERGY_PERF_BIAS if not already initialized.
526 * (x86_energy_perf_policy(8) is available to change it at run-time.)
528 if (!cpu_has(c, X86_FEATURE_EPB))
531 rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
532 if ((epb & 0xF) != ENERGY_PERF_BIAS_PERFORMANCE)
535 pr_warn_once("ENERGY_PERF_BIAS: Set to 'normal', was 'performance'\n");
536 pr_warn_once("ENERGY_PERF_BIAS: View and update with x86_energy_perf_policy(8)\n");
537 epb = (epb & ~0xF) | ENERGY_PERF_BIAS_NORMAL;
538 wrmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
541 static void intel_bsp_resume(struct cpuinfo_x86 *c)
544 * MSR_IA32_ENERGY_PERF_BIAS is lost across suspend/resume,
545 * so reinitialize it properly like during bootup:
547 init_intel_energy_perf(c);
550 static void init_cpuid_fault(struct cpuinfo_x86 *c)
554 if (!rdmsrl_safe(MSR_PLATFORM_INFO, &msr)) {
555 if (msr & MSR_PLATFORM_INFO_CPUID_FAULT)
556 set_cpu_cap(c, X86_FEATURE_CPUID_FAULT);
560 static void init_intel_misc_features(struct cpuinfo_x86 *c)
564 if (rdmsrl_safe(MSR_MISC_FEATURES_ENABLES, &msr))
567 /* Clear all MISC features */
568 this_cpu_write(msr_misc_features_shadow, 0);
570 /* Check features and update capabilities and shadow control bits */
572 probe_xeon_phi_r3mwait(c);
574 msr = this_cpu_read(msr_misc_features_shadow);
575 wrmsrl(MSR_MISC_FEATURES_ENABLES, msr);
578 static void init_intel(struct cpuinfo_x86 *c)
584 intel_workarounds(c);
587 * Detect the extended topology information if available. This
588 * will reinitialise the initial_apicid which will be used
589 * in init_intel_cacheinfo()
591 detect_extended_topology(c);
593 if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
595 * let's use the legacy cpuid vector 0x1 and 0x4 for topology
598 c->x86_max_cores = intel_num_cpu_cores(c);
604 l2 = init_intel_cacheinfo(c);
606 /* Detect legacy cache sizes if init_intel_cacheinfo did not */
608 cpu_detect_cache_sizes(c);
609 l2 = c->x86_cache_size;
612 if (c->cpuid_level > 9) {
613 unsigned eax = cpuid_eax(10);
614 /* Check for version and the number of counters */
615 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
616 set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
619 if (cpu_has(c, X86_FEATURE_XMM2))
620 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
622 if (boot_cpu_has(X86_FEATURE_DS)) {
624 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
626 set_cpu_cap(c, X86_FEATURE_BTS);
628 set_cpu_cap(c, X86_FEATURE_PEBS);
631 if (c->x86 == 6 && boot_cpu_has(X86_FEATURE_CLFLUSH) &&
632 (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47))
633 set_cpu_bug(c, X86_BUG_CLFLUSH_MONITOR);
635 if (c->x86 == 6 && boot_cpu_has(X86_FEATURE_MWAIT) &&
636 ((c->x86_model == INTEL_FAM6_ATOM_GOLDMONT)))
637 set_cpu_bug(c, X86_BUG_MONITOR);
641 c->x86_cache_alignment = c->x86_clflush_size * 2;
643 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
646 * Names for the Pentium II/Celeron processors
647 * detectable only by also checking the cache size.
648 * Dixon is NOT a Celeron.
653 switch (c->x86_model) {
656 p = "Celeron (Covington)";
658 p = "Mobile Pentium II (Dixon)";
663 p = "Celeron (Mendocino)";
664 else if (c->x86_mask == 0 || c->x86_mask == 5)
670 p = "Celeron (Coppermine)";
675 strcpy(c->x86_model_id, p);
679 set_cpu_cap(c, X86_FEATURE_P4);
681 set_cpu_cap(c, X86_FEATURE_P3);
684 /* Work around errata */
687 if (cpu_has(c, X86_FEATURE_VMX))
688 detect_vmx_virtcap(c);
690 init_intel_energy_perf(c);
692 init_intel_misc_features(c);
696 static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
699 * Intel PIII Tualatin. This comes in two flavours.
700 * One has 256kb of cache, the other 512. We have no way
701 * to determine which, so we use a boottime override
702 * for the 512kb model, and assume 256 otherwise.
704 if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0))
708 * Intel Quark SoC X1000 contains a 4-way set associative
709 * 16K cache with a 16 byte cache line and 256 lines per tag
711 if ((c->x86 == 5) && (c->x86_model == 9))
717 #define TLB_INST_4K 0x01
718 #define TLB_INST_4M 0x02
719 #define TLB_INST_2M_4M 0x03
721 #define TLB_INST_ALL 0x05
722 #define TLB_INST_1G 0x06
724 #define TLB_DATA_4K 0x11
725 #define TLB_DATA_4M 0x12
726 #define TLB_DATA_2M_4M 0x13
727 #define TLB_DATA_4K_4M 0x14
729 #define TLB_DATA_1G 0x16
731 #define TLB_DATA0_4K 0x21
732 #define TLB_DATA0_4M 0x22
733 #define TLB_DATA0_2M_4M 0x23
736 #define STLB_4K_2M 0x42
738 static const struct _tlb_table intel_tlb_table[] = {
739 { 0x01, TLB_INST_4K, 32, " TLB_INST 4 KByte pages, 4-way set associative" },
740 { 0x02, TLB_INST_4M, 2, " TLB_INST 4 MByte pages, full associative" },
741 { 0x03, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way set associative" },
742 { 0x04, TLB_DATA_4M, 8, " TLB_DATA 4 MByte pages, 4-way set associative" },
743 { 0x05, TLB_DATA_4M, 32, " TLB_DATA 4 MByte pages, 4-way set associative" },
744 { 0x0b, TLB_INST_4M, 4, " TLB_INST 4 MByte pages, 4-way set associative" },
745 { 0x4f, TLB_INST_4K, 32, " TLB_INST 4 KByte pages */" },
746 { 0x50, TLB_INST_ALL, 64, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
747 { 0x51, TLB_INST_ALL, 128, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
748 { 0x52, TLB_INST_ALL, 256, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
749 { 0x55, TLB_INST_2M_4M, 7, " TLB_INST 2-MByte or 4-MByte pages, fully associative" },
750 { 0x56, TLB_DATA0_4M, 16, " TLB_DATA0 4 MByte pages, 4-way set associative" },
751 { 0x57, TLB_DATA0_4K, 16, " TLB_DATA0 4 KByte pages, 4-way associative" },
752 { 0x59, TLB_DATA0_4K, 16, " TLB_DATA0 4 KByte pages, fully associative" },
753 { 0x5a, TLB_DATA0_2M_4M, 32, " TLB_DATA0 2-MByte or 4 MByte pages, 4-way set associative" },
754 { 0x5b, TLB_DATA_4K_4M, 64, " TLB_DATA 4 KByte and 4 MByte pages" },
755 { 0x5c, TLB_DATA_4K_4M, 128, " TLB_DATA 4 KByte and 4 MByte pages" },
756 { 0x5d, TLB_DATA_4K_4M, 256, " TLB_DATA 4 KByte and 4 MByte pages" },
757 { 0x61, TLB_INST_4K, 48, " TLB_INST 4 KByte pages, full associative" },
758 { 0x63, TLB_DATA_1G, 4, " TLB_DATA 1 GByte pages, 4-way set associative" },
759 { 0x76, TLB_INST_2M_4M, 8, " TLB_INST 2-MByte or 4-MByte pages, fully associative" },
760 { 0xb0, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 4-way set associative" },
761 { 0xb1, TLB_INST_2M_4M, 4, " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" },
762 { 0xb2, TLB_INST_4K, 64, " TLB_INST 4KByte pages, 4-way set associative" },
763 { 0xb3, TLB_DATA_4K, 128, " TLB_DATA 4 KByte pages, 4-way set associative" },
764 { 0xb4, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 4-way associative" },
765 { 0xb5, TLB_INST_4K, 64, " TLB_INST 4 KByte pages, 8-way set associative" },
766 { 0xb6, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 8-way set associative" },
767 { 0xba, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way associative" },
768 { 0xc0, TLB_DATA_4K_4M, 8, " TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" },
769 { 0xc1, STLB_4K_2M, 1024, " STLB 4 KByte and 2 MByte pages, 8-way associative" },
770 { 0xc2, TLB_DATA_2M_4M, 16, " DTLB 2 MByte/4MByte pages, 4-way associative" },
771 { 0xca, STLB_4K, 512, " STLB 4 KByte pages, 4-way associative" },
775 static void intel_tlb_lookup(const unsigned char desc)
781 /* look up this descriptor in the table */
782 for (k = 0; intel_tlb_table[k].descriptor != desc && \
783 intel_tlb_table[k].descriptor != 0; k++)
786 if (intel_tlb_table[k].tlb_type == 0)
789 switch (intel_tlb_table[k].tlb_type) {
791 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
792 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
793 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
794 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
797 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
798 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
799 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
800 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
801 if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
802 tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
803 if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries)
804 tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries;
805 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
806 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
807 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
808 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
811 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
812 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
813 if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
814 tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
815 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
816 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
819 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
820 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
823 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
824 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
827 if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
828 tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
829 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
830 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
834 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
835 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
839 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
840 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
843 case TLB_DATA0_2M_4M:
844 if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries)
845 tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries;
846 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
847 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
850 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
851 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
852 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
853 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
856 if (tlb_lld_1g[ENTRIES] < intel_tlb_table[k].entries)
857 tlb_lld_1g[ENTRIES] = intel_tlb_table[k].entries;
862 static void intel_detect_tlb(struct cpuinfo_x86 *c)
865 unsigned int regs[4];
866 unsigned char *desc = (unsigned char *)regs;
868 if (c->cpuid_level < 2)
871 /* Number of times to iterate */
872 n = cpuid_eax(2) & 0xFF;
874 for (i = 0 ; i < n ; i++) {
875 cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]);
877 /* If bit 31 is set, this is an unknown format */
878 for (j = 0 ; j < 3 ; j++)
879 if (regs[j] & (1 << 31))
882 /* Byte 0 is level count, not a descriptor */
883 for (j = 1 ; j < 16 ; j++)
884 intel_tlb_lookup(desc[j]);
888 static const struct cpu_dev intel_cpu_dev = {
890 .c_ident = { "GenuineIntel" },
893 { .family = 4, .model_names =
895 [0] = "486 DX-25/33",
906 { .family = 5, .model_names =
908 [0] = "Pentium 60/66 A-step",
909 [1] = "Pentium 60/66",
910 [2] = "Pentium 75 - 200",
911 [3] = "OverDrive PODP5V83",
913 [7] = "Mobile Pentium 75 - 200",
914 [8] = "Mobile Pentium MMX",
915 [9] = "Quark SoC X1000",
918 { .family = 6, .model_names =
920 [0] = "Pentium Pro A-step",
922 [3] = "Pentium II (Klamath)",
923 [4] = "Pentium II (Deschutes)",
924 [5] = "Pentium II (Deschutes)",
925 [6] = "Mobile Pentium II",
926 [7] = "Pentium III (Katmai)",
927 [8] = "Pentium III (Coppermine)",
928 [10] = "Pentium III (Cascades)",
929 [11] = "Pentium III (Tualatin)",
932 { .family = 15, .model_names =
934 [0] = "Pentium 4 (Unknown)",
935 [1] = "Pentium 4 (Willamette)",
936 [2] = "Pentium 4 (Northwood)",
937 [4] = "Pentium 4 (Foster)",
938 [5] = "Pentium 4 (Foster)",
942 .legacy_cache_size = intel_size_cache,
944 .c_detect_tlb = intel_detect_tlb,
945 .c_early_init = early_init_intel,
946 .c_init = init_intel,
947 .c_bsp_resume = intel_bsp_resume,
948 .c_x86_vendor = X86_VENDOR_INTEL,
951 cpu_dev_register(intel_cpu_dev);