1 // SPDX-License-Identifier: GPL-2.0
3 * Intel specific MCE features.
4 * Copyright 2004 Zwane Mwaikambo <zwane@linuxpower.ca>
5 * Copyright (C) 2008, 2009 Intel Corporation
10 #include <linux/interrupt.h>
11 #include <linux/percpu.h>
12 #include <linux/sched.h>
13 #include <linux/cpumask.h>
15 #include <asm/cpufeature.h>
16 #include <asm/intel-family.h>
17 #include <asm/processor.h>
24 * Support for Intel Correct Machine Check Interrupts. This allows
25 * the CPU to raise an interrupt when a corrected machine check happened.
26 * Normally we pick those up using a regular polling timer.
27 * Also supports reliable discovery of shared banks.
31 * CMCI can be delivered to multiple cpus that share a machine check bank
32 * so we need to designate a single cpu to process errors logged in each bank
33 * in the interrupt handler (otherwise we would have many races and potential
34 * double reporting of the same error).
35 * Note that this can change when a cpu is offlined or brought online since
36 * some MCA banks are shared across cpus. When a cpu is offlined, cmci_clear()
37 * disables CMCI on all banks owned by the cpu and clears this bitfield. At
38 * this point, cmci_rediscover() kicks in and a different cpu may end up
39 * taking ownership of some of the shared MCA banks that were previously
40 * owned by the offlined cpu.
42 static DEFINE_PER_CPU(mce_banks_t, mce_banks_owned);
45 * CMCI storm detection backoff counter
47 * During storm, we reset this counter to INITIAL_CHECK_INTERVAL in case we've
48 * encountered an error. If not, we decrement it by one. We signal the end of
49 * the CMCI storm when it reaches 0.
51 static DEFINE_PER_CPU(int, cmci_backoff_cnt);
54 * cmci_discover_lock protects against parallel discovery attempts
55 * which could race against each other.
57 static DEFINE_RAW_SPINLOCK(cmci_discover_lock);
59 #define CMCI_THRESHOLD 1
60 #define CMCI_POLL_INTERVAL (30 * HZ)
61 #define CMCI_STORM_INTERVAL (HZ)
62 #define CMCI_STORM_THRESHOLD 15
64 static DEFINE_PER_CPU(unsigned long, cmci_time_stamp);
65 static DEFINE_PER_CPU(unsigned int, cmci_storm_cnt);
66 static DEFINE_PER_CPU(unsigned int, cmci_storm_state);
74 static atomic_t cmci_storm_on_cpus;
76 static int cmci_supported(int *banks)
80 if (mca_cfg.cmci_disabled || mca_cfg.ignore_ce)
84 * Vendor check is not strictly needed, but the initial
85 * initialization is vendor keyed and this
86 * makes sure none of the backdoors are entered otherwise.
88 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL &&
89 boot_cpu_data.x86_vendor != X86_VENDOR_ZHAOXIN)
92 if (!boot_cpu_has(X86_FEATURE_APIC) || lapic_get_maxlvt() < 6)
94 rdmsrl(MSR_IA32_MCG_CAP, cap);
95 *banks = min_t(unsigned, MAX_NR_BANKS, cap & 0xff);
96 return !!(cap & MCG_CMCI_P);
99 static bool lmce_supported(void)
103 if (mca_cfg.lmce_disabled)
106 rdmsrl(MSR_IA32_MCG_CAP, tmp);
109 * LMCE depends on recovery support in the processor. Hence both
110 * MCG_SER_P and MCG_LMCE_P should be present in MCG_CAP.
112 if ((tmp & (MCG_SER_P | MCG_LMCE_P)) !=
113 (MCG_SER_P | MCG_LMCE_P))
117 * BIOS should indicate support for LMCE by setting bit 20 in
118 * IA32_FEATURE_CONTROL without which touching MCG_EXT_CTL will
119 * generate a #GP fault.
121 rdmsrl(MSR_IA32_FEATURE_CONTROL, tmp);
122 if ((tmp & (FEATURE_CONTROL_LOCKED | FEATURE_CONTROL_LMCE)) ==
123 (FEATURE_CONTROL_LOCKED | FEATURE_CONTROL_LMCE))
129 bool mce_intel_cmci_poll(void)
131 if (__this_cpu_read(cmci_storm_state) == CMCI_STORM_NONE)
135 * Reset the counter if we've logged an error in the last poll
138 if (machine_check_poll(0, this_cpu_ptr(&mce_banks_owned)))
139 this_cpu_write(cmci_backoff_cnt, INITIAL_CHECK_INTERVAL);
141 this_cpu_dec(cmci_backoff_cnt);
146 void mce_intel_hcpu_update(unsigned long cpu)
148 if (per_cpu(cmci_storm_state, cpu) == CMCI_STORM_ACTIVE)
149 atomic_dec(&cmci_storm_on_cpus);
151 per_cpu(cmci_storm_state, cpu) = CMCI_STORM_NONE;
154 static void cmci_toggle_interrupt_mode(bool on)
156 unsigned long flags, *owned;
160 raw_spin_lock_irqsave(&cmci_discover_lock, flags);
161 owned = this_cpu_ptr(mce_banks_owned);
162 for_each_set_bit(bank, owned, MAX_NR_BANKS) {
163 rdmsrl(MSR_IA32_MCx_CTL2(bank), val);
166 val |= MCI_CTL2_CMCI_EN;
168 val &= ~MCI_CTL2_CMCI_EN;
170 wrmsrl(MSR_IA32_MCx_CTL2(bank), val);
172 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
175 unsigned long cmci_intel_adjust_timer(unsigned long interval)
177 if ((this_cpu_read(cmci_backoff_cnt) > 0) &&
178 (__this_cpu_read(cmci_storm_state) == CMCI_STORM_ACTIVE)) {
180 return CMCI_STORM_INTERVAL;
183 switch (__this_cpu_read(cmci_storm_state)) {
184 case CMCI_STORM_ACTIVE:
187 * We switch back to interrupt mode once the poll timer has
188 * silenced itself. That means no events recorded and the timer
189 * interval is back to our poll interval.
191 __this_cpu_write(cmci_storm_state, CMCI_STORM_SUBSIDED);
192 if (!atomic_sub_return(1, &cmci_storm_on_cpus))
193 pr_notice("CMCI storm subsided: switching to interrupt mode\n");
197 case CMCI_STORM_SUBSIDED:
199 * We wait for all CPUs to go back to SUBSIDED state. When that
200 * happens we switch back to interrupt mode.
202 if (!atomic_read(&cmci_storm_on_cpus)) {
203 __this_cpu_write(cmci_storm_state, CMCI_STORM_NONE);
204 cmci_toggle_interrupt_mode(true);
207 return CMCI_POLL_INTERVAL;
210 /* We have shiny weather. Let the poll do whatever it thinks. */
215 static bool cmci_storm_detect(void)
217 unsigned int cnt = __this_cpu_read(cmci_storm_cnt);
218 unsigned long ts = __this_cpu_read(cmci_time_stamp);
219 unsigned long now = jiffies;
222 if (__this_cpu_read(cmci_storm_state) != CMCI_STORM_NONE)
225 if (time_before_eq(now, ts + CMCI_STORM_INTERVAL)) {
229 __this_cpu_write(cmci_time_stamp, now);
231 __this_cpu_write(cmci_storm_cnt, cnt);
233 if (cnt <= CMCI_STORM_THRESHOLD)
236 cmci_toggle_interrupt_mode(false);
237 __this_cpu_write(cmci_storm_state, CMCI_STORM_ACTIVE);
238 r = atomic_add_return(1, &cmci_storm_on_cpus);
239 mce_timer_kick(CMCI_STORM_INTERVAL);
240 this_cpu_write(cmci_backoff_cnt, INITIAL_CHECK_INTERVAL);
243 pr_notice("CMCI storm detected: switching to poll mode\n");
248 * The interrupt handler. This is called on every event.
249 * Just call the poller directly to log any events.
250 * This could in theory increase the threshold under high load,
251 * but doesn't for now.
253 static void intel_threshold_interrupt(void)
255 if (cmci_storm_detect())
258 machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned));
262 * Enable CMCI (Corrected Machine Check Interrupt) for available MCE banks
263 * on this CPU. Use the algorithm recommended in the SDM to discover shared
266 static void cmci_discover(int banks)
268 unsigned long *owned = (void *)this_cpu_ptr(&mce_banks_owned);
271 int bios_wrong_thresh = 0;
273 raw_spin_lock_irqsave(&cmci_discover_lock, flags);
274 for (i = 0; i < banks; i++) {
276 int bios_zero_thresh = 0;
278 if (test_bit(i, owned))
281 /* Skip banks in firmware first mode */
282 if (test_bit(i, mce_banks_ce_disabled))
285 rdmsrl(MSR_IA32_MCx_CTL2(i), val);
287 /* Already owned by someone else? */
288 if (val & MCI_CTL2_CMCI_EN) {
290 __clear_bit(i, this_cpu_ptr(mce_poll_banks));
294 if (!mca_cfg.bios_cmci_threshold) {
295 val &= ~MCI_CTL2_CMCI_THRESHOLD_MASK;
296 val |= CMCI_THRESHOLD;
297 } else if (!(val & MCI_CTL2_CMCI_THRESHOLD_MASK)) {
299 * If bios_cmci_threshold boot option was specified
300 * but the threshold is zero, we'll try to initialize
303 bios_zero_thresh = 1;
304 val |= CMCI_THRESHOLD;
307 val |= MCI_CTL2_CMCI_EN;
308 wrmsrl(MSR_IA32_MCx_CTL2(i), val);
309 rdmsrl(MSR_IA32_MCx_CTL2(i), val);
311 /* Did the enable bit stick? -- the bank supports CMCI */
312 if (val & MCI_CTL2_CMCI_EN) {
314 __clear_bit(i, this_cpu_ptr(mce_poll_banks));
316 * We are able to set thresholds for some banks that
317 * had a threshold of 0. This means the BIOS has not
318 * set the thresholds properly or does not work with
319 * this boot option. Note down now and report later.
321 if (mca_cfg.bios_cmci_threshold && bios_zero_thresh &&
322 (val & MCI_CTL2_CMCI_THRESHOLD_MASK))
323 bios_wrong_thresh = 1;
325 WARN_ON(!test_bit(i, this_cpu_ptr(mce_poll_banks)));
328 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
329 if (mca_cfg.bios_cmci_threshold && bios_wrong_thresh) {
331 "bios_cmci_threshold: Some banks do not have valid thresholds set\n");
333 "bios_cmci_threshold: Make sure your BIOS supports this boot option\n");
338 * Just in case we missed an event during initialization check
339 * all the CMCI owned banks.
341 void cmci_recheck(void)
346 if (!mce_available(raw_cpu_ptr(&cpu_info)) || !cmci_supported(&banks))
349 local_irq_save(flags);
350 machine_check_poll(0, this_cpu_ptr(&mce_banks_owned));
351 local_irq_restore(flags);
354 /* Caller must hold the lock on cmci_discover_lock */
355 static void __cmci_disable_bank(int bank)
359 if (!test_bit(bank, this_cpu_ptr(mce_banks_owned)))
361 rdmsrl(MSR_IA32_MCx_CTL2(bank), val);
362 val &= ~MCI_CTL2_CMCI_EN;
363 wrmsrl(MSR_IA32_MCx_CTL2(bank), val);
364 __clear_bit(bank, this_cpu_ptr(mce_banks_owned));
368 * Disable CMCI on this CPU for all banks it owns when it goes down.
369 * This allows other CPUs to claim the banks on rediscovery.
371 void cmci_clear(void)
377 if (!cmci_supported(&banks))
379 raw_spin_lock_irqsave(&cmci_discover_lock, flags);
380 for (i = 0; i < banks; i++)
381 __cmci_disable_bank(i);
382 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
385 static void cmci_rediscover_work_func(void *arg)
389 /* Recheck banks in case CPUs don't all have the same */
390 if (cmci_supported(&banks))
391 cmci_discover(banks);
394 /* After a CPU went down cycle through all the others and rediscover */
395 void cmci_rediscover(void)
399 if (!cmci_supported(&banks))
402 on_each_cpu(cmci_rediscover_work_func, NULL, 1);
406 * Reenable CMCI on this CPU in case a CPU down failed.
408 void cmci_reenable(void)
411 if (cmci_supported(&banks))
412 cmci_discover(banks);
415 void cmci_disable_bank(int bank)
420 if (!cmci_supported(&banks))
423 raw_spin_lock_irqsave(&cmci_discover_lock, flags);
424 __cmci_disable_bank(bank);
425 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
428 void intel_init_cmci(void)
432 if (!cmci_supported(&banks))
435 mce_threshold_vector = intel_threshold_interrupt;
436 cmci_discover(banks);
438 * For CPU #0 this runs with still disabled APIC, but that's
439 * ok because only the vector is set up. We still do another
440 * check for the banks later for CPU #0 just to make sure
441 * to not miss any events.
443 apic_write(APIC_LVTCMCI, THRESHOLD_APIC_VECTOR|APIC_DM_FIXED);
447 void intel_init_lmce(void)
451 if (!lmce_supported())
454 rdmsrl(MSR_IA32_MCG_EXT_CTL, val);
456 if (!(val & MCG_EXT_CTL_LMCE_EN))
457 wrmsrl(MSR_IA32_MCG_EXT_CTL, val | MCG_EXT_CTL_LMCE_EN);
460 void intel_clear_lmce(void)
464 if (!lmce_supported())
467 rdmsrl(MSR_IA32_MCG_EXT_CTL, val);
468 val &= ~MCG_EXT_CTL_LMCE_EN;
469 wrmsrl(MSR_IA32_MCG_EXT_CTL, val);
472 static void intel_ppin_init(struct cpuinfo_x86 *c)
474 unsigned long long val;
477 * Even if testing the presence of the MSR would be enough, we don't
478 * want to risk the situation where other models reuse this MSR for
481 switch (c->x86_model) {
482 case INTEL_FAM6_IVYBRIDGE_X:
483 case INTEL_FAM6_HASWELL_X:
484 case INTEL_FAM6_BROADWELL_D:
485 case INTEL_FAM6_BROADWELL_X:
486 case INTEL_FAM6_SKYLAKE_X:
487 case INTEL_FAM6_ICELAKE_X:
488 case INTEL_FAM6_XEON_PHI_KNL:
489 case INTEL_FAM6_XEON_PHI_KNM:
491 if (rdmsrl_safe(MSR_PPIN_CTL, &val))
494 if ((val & 3UL) == 1UL) {
495 /* PPIN available but disabled: */
499 /* If PPIN is disabled, but not locked, try to enable: */
501 wrmsrl_safe(MSR_PPIN_CTL, val | 2UL);
502 rdmsrl_safe(MSR_PPIN_CTL, &val);
505 if ((val & 3UL) == 2UL)
506 set_cpu_cap(c, X86_FEATURE_INTEL_PPIN);
510 void mce_intel_feature_init(struct cpuinfo_x86 *c)
512 intel_init_thermal(c);
518 void mce_intel_feature_clear(struct cpuinfo_x86 *c)