2 * (c) 2005-2016 Advanced Micro Devices, Inc.
3 * Your use of this code is subject to the terms and conditions of the
4 * GNU general public license version 2. See "COPYING" or
5 * http://www.gnu.org/licenses/gpl.html
7 * Written by Jacob Shin - AMD, Inc.
8 * Maintained by: Borislav Petkov <bp@alien8.de>
10 * All MC4_MISCi registers are shared between cores on a node.
12 #include <linux/interrupt.h>
13 #include <linux/notifier.h>
14 #include <linux/kobject.h>
15 #include <linux/percpu.h>
16 #include <linux/errno.h>
17 #include <linux/sched.h>
18 #include <linux/sysfs.h>
19 #include <linux/slab.h>
20 #include <linux/init.h>
21 #include <linux/cpu.h>
22 #include <linux/smp.h>
23 #include <linux/string.h>
25 #include <asm/amd_nb.h>
26 #include <asm/traps.h>
30 #include <asm/trace/irq_vectors.h>
35 #define THRESHOLD_MAX 0xFFF
36 #define INT_TYPE_APIC 0x00020000
37 #define MASK_VALID_HI 0x80000000
38 #define MASK_CNTP_HI 0x40000000
39 #define MASK_LOCKED_HI 0x20000000
40 #define MASK_LVTOFF_HI 0x00F00000
41 #define MASK_COUNT_EN_HI 0x00080000
42 #define MASK_INT_TYPE_HI 0x00060000
43 #define MASK_OVERFLOW_HI 0x00010000
44 #define MASK_ERR_COUNT_HI 0x00000FFF
45 #define MASK_BLKPTR_LO 0xFF000000
46 #define MCG_XBLK_ADDR 0xC0000400
48 /* Deferred error settings */
49 #define MSR_CU_DEF_ERR 0xC0000410
50 #define MASK_DEF_LVTOFF 0x000000F0
51 #define MASK_DEF_INT_TYPE 0x00000006
52 #define DEF_LVT_OFF 0x2
53 #define DEF_INT_TYPE_APIC 0x2
57 /* Threshold LVT offset is at MSR0xC0000410[15:12] */
58 #define SMCA_THR_LVT_OFF 0xF000
60 static bool thresholding_irq_en;
62 static const char * const th_names[] = {
71 static const char * const smca_umc_block_names[] = {
76 struct smca_bank_name {
77 const char *name; /* Short name for sysfs */
78 const char *long_name; /* Long name for pretty-printing */
81 static struct smca_bank_name smca_names[] = {
82 [SMCA_LS] = { "load_store", "Load Store Unit" },
83 [SMCA_IF] = { "insn_fetch", "Instruction Fetch Unit" },
84 [SMCA_L2_CACHE] = { "l2_cache", "L2 Cache" },
85 [SMCA_DE] = { "decode_unit", "Decode Unit" },
86 [SMCA_RESERVED] = { "reserved", "Reserved" },
87 [SMCA_EX] = { "execution_unit", "Execution Unit" },
88 [SMCA_FP] = { "floating_point", "Floating Point Unit" },
89 [SMCA_L3_CACHE] = { "l3_cache", "L3 Cache" },
90 [SMCA_CS] = { "coherent_slave", "Coherent Slave" },
91 [SMCA_CS_V2] = { "coherent_slave", "Coherent Slave" },
92 [SMCA_PIE] = { "pie", "Power, Interrupts, etc." },
93 [SMCA_UMC] = { "umc", "Unified Memory Controller" },
94 [SMCA_PB] = { "param_block", "Parameter Block" },
95 [SMCA_PSP] = { "psp", "Platform Security Processor" },
96 [SMCA_PSP_V2] = { "psp", "Platform Security Processor" },
97 [SMCA_SMU] = { "smu", "System Management Unit" },
98 [SMCA_SMU_V2] = { "smu", "System Management Unit" },
99 [SMCA_MP5] = { "mp5", "Microprocessor 5 Unit" },
100 [SMCA_NBIO] = { "nbio", "Northbridge IO Unit" },
101 [SMCA_PCIE] = { "pcie", "PCI Express Unit" },
104 static u32 smca_bank_addrs[MAX_NR_BANKS][NR_BLOCKS] __ro_after_init =
106 [0 ... MAX_NR_BANKS - 1] = { [0 ... NR_BLOCKS - 1] = -1 }
109 static const char *smca_get_name(enum smca_bank_types t)
111 if (t >= N_SMCA_BANK_TYPES)
114 return smca_names[t].name;
117 const char *smca_get_long_name(enum smca_bank_types t)
119 if (t >= N_SMCA_BANK_TYPES)
122 return smca_names[t].long_name;
124 EXPORT_SYMBOL_GPL(smca_get_long_name);
126 static enum smca_bank_types smca_get_bank_type(unsigned int bank)
130 if (bank >= MAX_NR_BANKS)
131 return N_SMCA_BANK_TYPES;
133 b = &smca_banks[bank];
135 return N_SMCA_BANK_TYPES;
137 return b->hwid->bank_type;
140 static struct smca_hwid smca_hwid_mcatypes[] = {
141 /* { bank_type, hwid_mcatype, xec_bitmap } */
144 { SMCA_RESERVED, HWID_MCATYPE(0x00, 0x0), 0x0 },
146 /* ZN Core (HWID=0xB0) MCA types */
147 { SMCA_LS, HWID_MCATYPE(0xB0, 0x0), 0x1FFFFF },
148 { SMCA_IF, HWID_MCATYPE(0xB0, 0x1), 0x3FFF },
149 { SMCA_L2_CACHE, HWID_MCATYPE(0xB0, 0x2), 0xF },
150 { SMCA_DE, HWID_MCATYPE(0xB0, 0x3), 0x1FF },
151 /* HWID 0xB0 MCATYPE 0x4 is Reserved */
152 { SMCA_EX, HWID_MCATYPE(0xB0, 0x5), 0xFFF },
153 { SMCA_FP, HWID_MCATYPE(0xB0, 0x6), 0x7F },
154 { SMCA_L3_CACHE, HWID_MCATYPE(0xB0, 0x7), 0xFF },
156 /* Data Fabric MCA types */
157 { SMCA_CS, HWID_MCATYPE(0x2E, 0x0), 0x1FF },
158 { SMCA_PIE, HWID_MCATYPE(0x2E, 0x1), 0x1F },
159 { SMCA_CS_V2, HWID_MCATYPE(0x2E, 0x2), 0x3FFF },
161 /* Unified Memory Controller MCA type */
162 { SMCA_UMC, HWID_MCATYPE(0x96, 0x0), 0xFF },
164 /* Parameter Block MCA type */
165 { SMCA_PB, HWID_MCATYPE(0x05, 0x0), 0x1 },
167 /* Platform Security Processor MCA type */
168 { SMCA_PSP, HWID_MCATYPE(0xFF, 0x0), 0x1 },
169 { SMCA_PSP_V2, HWID_MCATYPE(0xFF, 0x1), 0x3FFFF },
171 /* System Management Unit MCA type */
172 { SMCA_SMU, HWID_MCATYPE(0x01, 0x0), 0x1 },
173 { SMCA_SMU_V2, HWID_MCATYPE(0x01, 0x1), 0x7FF },
175 /* Microprocessor 5 Unit MCA type */
176 { SMCA_MP5, HWID_MCATYPE(0x01, 0x2), 0x3FF },
178 /* Northbridge IO Unit MCA type */
179 { SMCA_NBIO, HWID_MCATYPE(0x18, 0x0), 0x1F },
181 /* PCI Express Unit MCA type */
182 { SMCA_PCIE, HWID_MCATYPE(0x46, 0x0), 0x1F },
185 struct smca_bank smca_banks[MAX_NR_BANKS];
186 EXPORT_SYMBOL_GPL(smca_banks);
189 * In SMCA enabled processors, we can have multiple banks for a given IP type.
190 * So to define a unique name for each bank, we use a temp c-string to append
191 * the MCA_IPID[InstanceId] to type's name in get_name().
193 * InstanceId is 32 bits which is 8 characters. Make sure MAX_MCATYPE_NAME_LEN
194 * is greater than 8 plus 1 (for underscore) plus length of longest type name.
196 #define MAX_MCATYPE_NAME_LEN 30
197 static char buf_mcatype[MAX_MCATYPE_NAME_LEN];
199 static DEFINE_PER_CPU(struct threshold_bank **, threshold_banks);
200 static DEFINE_PER_CPU(unsigned int, bank_map); /* see which banks are on */
202 static void amd_threshold_interrupt(void);
203 static void amd_deferred_error_interrupt(void);
205 static void default_deferred_error_interrupt(void)
207 pr_err("Unexpected deferred interrupt at vector %x\n", DEFERRED_ERROR_VECTOR);
209 void (*deferred_error_int_vector)(void) = default_deferred_error_interrupt;
211 static void smca_configure(unsigned int bank, unsigned int cpu)
213 unsigned int i, hwid_mcatype;
214 struct smca_hwid *s_hwid;
216 u32 smca_config = MSR_AMD64_SMCA_MCx_CONFIG(bank);
218 /* Set appropriate bits in MCA_CONFIG */
219 if (!rdmsr_safe(smca_config, &low, &high)) {
221 * OS is required to set the MCAX bit to acknowledge that it is
222 * now using the new MSR ranges and new registers under each
223 * bank. It also means that the OS will configure deferred
224 * errors in the new MCx_CONFIG register. If the bit is not set,
225 * uncorrectable errors will cause a system panic.
227 * MCA_CONFIG[MCAX] is bit 32 (0 in the high portion of the MSR.)
232 * SMCA sets the Deferred Error Interrupt type per bank.
234 * MCA_CONFIG[DeferredIntTypeSupported] is bit 5, and tells us
235 * if the DeferredIntType bit field is available.
237 * MCA_CONFIG[DeferredIntType] is bits [38:37] ([6:5] in the
238 * high portion of the MSR). OS should set this to 0x1 to enable
239 * APIC based interrupt. First, check that no interrupt has been
242 if ((low & BIT(5)) && !((high >> 5) & 0x3))
245 wrmsr(smca_config, low, high);
248 /* Return early if this bank was already initialized. */
249 if (smca_banks[bank].hwid)
252 if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_IPID(bank), &low, &high)) {
253 pr_warn("Failed to read MCA_IPID for bank %d\n", bank);
257 hwid_mcatype = HWID_MCATYPE(high & MCI_IPID_HWID,
258 (high & MCI_IPID_MCATYPE) >> 16);
260 for (i = 0; i < ARRAY_SIZE(smca_hwid_mcatypes); i++) {
261 s_hwid = &smca_hwid_mcatypes[i];
262 if (hwid_mcatype == s_hwid->hwid_mcatype) {
263 smca_banks[bank].hwid = s_hwid;
264 smca_banks[bank].id = low;
265 smca_banks[bank].sysfs_id = s_hwid->count++;
271 struct thresh_restart {
272 struct threshold_block *b;
279 static inline bool is_shared_bank(int bank)
282 * Scalable MCA provides for only one core to have access to the MSRs of
288 /* Bank 4 is for northbridge reporting and is thus shared */
292 static const char *bank4_names(const struct threshold_block *b)
294 switch (b->address) {
306 WARN(1, "Funny MSR: 0x%08x\n", b->address);
312 static bool lvt_interrupt_supported(unsigned int bank, u32 msr_high_bits)
315 * bank 4 supports APIC LVT interrupts implicitly since forever.
321 * IntP: interrupt present; if this bit is set, the thresholding
322 * bank can generate APIC LVT interrupts
324 return msr_high_bits & BIT(28);
327 static int lvt_off_valid(struct threshold_block *b, int apic, u32 lo, u32 hi)
329 int msr = (hi & MASK_LVTOFF_HI) >> 20;
332 pr_err(FW_BUG "cpu %d, failed to setup threshold interrupt "
333 "for bank %d, block %d (MSR%08X=0x%x%08x)\n", b->cpu,
334 b->bank, b->block, b->address, hi, lo);
340 * On SMCA CPUs, LVT offset is programmed at a different MSR, and
341 * the BIOS provides the value. The original field where LVT offset
342 * was set is reserved. Return early here:
347 pr_err(FW_BUG "cpu %d, invalid threshold interrupt offset %d "
348 "for bank %d, block %d (MSR%08X=0x%x%08x)\n",
349 b->cpu, apic, b->bank, b->block, b->address, hi, lo);
356 /* Reprogram MCx_MISC MSR behind this threshold bank. */
357 static void threshold_restart_bank(void *_tr)
359 struct thresh_restart *tr = _tr;
362 rdmsr(tr->b->address, lo, hi);
364 if (tr->b->threshold_limit < (hi & THRESHOLD_MAX))
365 tr->reset = 1; /* limit cannot be lower than err count */
367 if (tr->reset) { /* reset err count and overflow bit */
369 (hi & ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI)) |
370 (THRESHOLD_MAX - tr->b->threshold_limit);
371 } else if (tr->old_limit) { /* change limit w/o reset */
372 int new_count = (hi & THRESHOLD_MAX) +
373 (tr->old_limit - tr->b->threshold_limit);
375 hi = (hi & ~MASK_ERR_COUNT_HI) |
376 (new_count & THRESHOLD_MAX);
380 hi &= ~MASK_INT_TYPE_HI;
382 if (!tr->b->interrupt_capable)
385 if (tr->set_lvt_off) {
386 if (lvt_off_valid(tr->b, tr->lvt_off, lo, hi)) {
387 /* set new lvt offset */
388 hi &= ~MASK_LVTOFF_HI;
389 hi |= tr->lvt_off << 20;
393 if (tr->b->interrupt_enable)
398 hi |= MASK_COUNT_EN_HI;
399 wrmsr(tr->b->address, lo, hi);
402 static void mce_threshold_block_init(struct threshold_block *b, int offset)
404 struct thresh_restart tr = {
410 b->threshold_limit = THRESHOLD_MAX;
411 threshold_restart_bank(&tr);
414 static int setup_APIC_mce_threshold(int reserved, int new)
416 if (reserved < 0 && !setup_APIC_eilvt(new, THRESHOLD_APIC_VECTOR,
417 APIC_EILVT_MSG_FIX, 0))
423 static int setup_APIC_deferred_error(int reserved, int new)
425 if (reserved < 0 && !setup_APIC_eilvt(new, DEFERRED_ERROR_VECTOR,
426 APIC_EILVT_MSG_FIX, 0))
432 static void deferred_error_interrupt_enable(struct cpuinfo_x86 *c)
434 u32 low = 0, high = 0;
435 int def_offset = -1, def_new;
437 if (rdmsr_safe(MSR_CU_DEF_ERR, &low, &high))
440 def_new = (low & MASK_DEF_LVTOFF) >> 4;
441 if (!(low & MASK_DEF_LVTOFF)) {
442 pr_err(FW_BUG "Your BIOS is not setting up LVT offset 0x2 for deferred error IRQs correctly.\n");
443 def_new = DEF_LVT_OFF;
444 low = (low & ~MASK_DEF_LVTOFF) | (DEF_LVT_OFF << 4);
447 def_offset = setup_APIC_deferred_error(def_offset, def_new);
448 if ((def_offset == def_new) &&
449 (deferred_error_int_vector != amd_deferred_error_interrupt))
450 deferred_error_int_vector = amd_deferred_error_interrupt;
453 low = (low & ~MASK_DEF_INT_TYPE) | DEF_INT_TYPE_APIC;
455 wrmsr(MSR_CU_DEF_ERR, low, high);
458 static u32 smca_get_block_address(unsigned int bank, unsigned int block)
463 if (smca_get_bank_type(bank) == SMCA_RESERVED)
467 return MSR_AMD64_SMCA_MCx_MISC(bank);
469 /* Check our cache first: */
470 if (smca_bank_addrs[bank][block] != -1)
471 return smca_bank_addrs[bank][block];
474 * For SMCA enabled processors, BLKPTR field of the first MISC register
475 * (MCx_MISC0) indicates presence of additional MISC regs set (MISC1-4).
477 if (rdmsr_safe(MSR_AMD64_SMCA_MCx_CONFIG(bank), &low, &high))
480 if (!(low & MCI_CONFIG_MCAX))
483 if (!rdmsr_safe(MSR_AMD64_SMCA_MCx_MISC(bank), &low, &high) &&
484 (low & MASK_BLKPTR_LO))
485 addr = MSR_AMD64_SMCA_MCx_MISCy(bank, block - 1);
488 smca_bank_addrs[bank][block] = addr;
492 static u32 get_block_address(u32 current_addr, u32 low, u32 high,
493 unsigned int bank, unsigned int block)
495 u32 addr = 0, offset = 0;
497 if ((bank >= mca_cfg.banks) || (block >= NR_BLOCKS))
501 return smca_get_block_address(bank, block);
503 /* Fall back to method we used for older processors: */
506 addr = msr_ops.misc(bank);
509 offset = ((low & MASK_BLKPTR_LO) >> 21);
511 addr = MCG_XBLK_ADDR + offset;
514 addr = ++current_addr;
520 prepare_threshold_block(unsigned int bank, unsigned int block, u32 addr,
521 int offset, u32 misc_high)
523 unsigned int cpu = smp_processor_id();
524 u32 smca_low, smca_high;
525 struct threshold_block b;
529 per_cpu(bank_map, cpu) |= (1 << bank);
531 memset(&b, 0, sizeof(b));
536 b.interrupt_capable = lvt_interrupt_supported(bank, misc_high);
538 if (!b.interrupt_capable)
541 b.interrupt_enable = 1;
543 if (!mce_flags.smca) {
544 new = (misc_high & MASK_LVTOFF_HI) >> 20;
548 /* Gather LVT offset for thresholding: */
549 if (rdmsr_safe(MSR_CU_DEF_ERR, &smca_low, &smca_high))
552 new = (smca_low & SMCA_THR_LVT_OFF) >> 12;
555 offset = setup_APIC_mce_threshold(offset, new);
557 thresholding_irq_en = true;
560 mce_threshold_block_init(&b, offset);
566 bool amd_filter_mce(struct mce *m)
568 enum smca_bank_types bank_type = smca_get_bank_type(m->bank);
569 struct cpuinfo_x86 *c = &boot_cpu_data;
570 u8 xec = (m->status >> 16) & 0x3F;
572 /* See Family 17h Models 10h-2Fh Erratum #1114. */
573 if (c->x86 == 0x17 &&
574 c->x86_model >= 0x10 && c->x86_model <= 0x2F &&
575 bank_type == SMCA_IF && xec == 10)
582 * Turn off thresholding banks for the following conditions:
583 * - MC4_MISC thresholding is not supported on Family 0x15.
584 * - Prevent possible spurious interrupts from the IF bank on Family 0x17
585 * Models 0x10-0x2F due to Erratum #1114.
587 void disable_err_thresholding(struct cpuinfo_x86 *c, unsigned int bank)
594 if (c->x86 == 0x15 && bank == 4) {
595 msrs[0] = 0x00000413; /* MC4_MISC0 */
596 msrs[1] = 0xc0000408; /* MC4_MISC1 */
598 } else if (c->x86 == 0x17 &&
599 (c->x86_model >= 0x10 && c->x86_model <= 0x2F)) {
601 if (smca_get_bank_type(bank) != SMCA_IF)
604 msrs[0] = MSR_AMD64_SMCA_MCx_MISC(bank);
610 rdmsrl(MSR_K7_HWCR, hwcr);
612 /* McStatusWrEn has to be set */
613 need_toggle = !(hwcr & BIT(18));
615 wrmsrl(MSR_K7_HWCR, hwcr | BIT(18));
617 /* Clear CntP bit safely */
618 for (i = 0; i < num_msrs; i++)
619 msr_clear_bit(msrs[i], 62);
621 /* restore old settings */
623 wrmsrl(MSR_K7_HWCR, hwcr);
626 /* cpu init entry point, called from mce.c with preempt off */
627 void mce_amd_feature_init(struct cpuinfo_x86 *c)
629 u32 low = 0, high = 0, address = 0;
630 unsigned int bank, block, cpu = smp_processor_id();
633 for (bank = 0; bank < mca_cfg.banks; ++bank) {
635 smca_configure(bank, cpu);
637 disable_err_thresholding(c, bank);
639 for (block = 0; block < NR_BLOCKS; ++block) {
640 address = get_block_address(address, low, high, bank, block);
644 if (rdmsr_safe(address, &low, &high))
647 if (!(high & MASK_VALID_HI))
650 if (!(high & MASK_CNTP_HI) ||
651 (high & MASK_LOCKED_HI))
654 offset = prepare_threshold_block(bank, block, address, offset, high);
658 if (mce_flags.succor)
659 deferred_error_interrupt_enable(c);
662 int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr)
664 u64 dram_base_addr, dram_limit_addr, dram_hole_base;
665 /* We start from the normalized address */
666 u64 ret_addr = norm_addr;
670 u8 die_id_shift, die_id_mask, socket_id_shift, socket_id_mask;
671 u8 intlv_num_dies, intlv_num_chan, intlv_num_sockets;
672 u8 intlv_addr_sel, intlv_addr_bit;
673 u8 num_intlv_bits, hashed_bit;
674 u8 lgcy_mmio_hole_en, base = 0;
675 u8 cs_mask, cs_id = 0;
676 bool hash_enabled = false;
678 /* Read D18F0x1B4 (DramOffset), check if base 1 is used. */
679 if (amd_df_indirect_read(nid, 0, 0x1B4, umc, &tmp))
682 /* Remove HiAddrOffset from normalized address, if enabled: */
684 u64 hi_addr_offset = (tmp & GENMASK_ULL(31, 20)) << 8;
686 if (norm_addr >= hi_addr_offset) {
687 ret_addr -= hi_addr_offset;
692 /* Read D18F0x110 (DramBaseAddress). */
693 if (amd_df_indirect_read(nid, 0, 0x110 + (8 * base), umc, &tmp))
696 /* Check if address range is valid. */
697 if (!(tmp & BIT(0))) {
698 pr_err("%s: Invalid DramBaseAddress range: 0x%x.\n",
703 lgcy_mmio_hole_en = tmp & BIT(1);
704 intlv_num_chan = (tmp >> 4) & 0xF;
705 intlv_addr_sel = (tmp >> 8) & 0x7;
706 dram_base_addr = (tmp & GENMASK_ULL(31, 12)) << 16;
708 /* {0, 1, 2, 3} map to address bits {8, 9, 10, 11} respectively */
709 if (intlv_addr_sel > 3) {
710 pr_err("%s: Invalid interleave address select %d.\n",
711 __func__, intlv_addr_sel);
715 /* Read D18F0x114 (DramLimitAddress). */
716 if (amd_df_indirect_read(nid, 0, 0x114 + (8 * base), umc, &tmp))
719 intlv_num_sockets = (tmp >> 8) & 0x1;
720 intlv_num_dies = (tmp >> 10) & 0x3;
721 dram_limit_addr = ((tmp & GENMASK_ULL(31, 12)) << 16) | GENMASK_ULL(27, 0);
723 intlv_addr_bit = intlv_addr_sel + 8;
725 /* Re-use intlv_num_chan by setting it equal to log2(#channels) */
726 switch (intlv_num_chan) {
727 case 0: intlv_num_chan = 0; break;
728 case 1: intlv_num_chan = 1; break;
729 case 3: intlv_num_chan = 2; break;
730 case 5: intlv_num_chan = 3; break;
731 case 7: intlv_num_chan = 4; break;
733 case 8: intlv_num_chan = 1;
737 pr_err("%s: Invalid number of interleaved channels %d.\n",
738 __func__, intlv_num_chan);
742 num_intlv_bits = intlv_num_chan;
744 if (intlv_num_dies > 2) {
745 pr_err("%s: Invalid number of interleaved nodes/dies %d.\n",
746 __func__, intlv_num_dies);
750 num_intlv_bits += intlv_num_dies;
752 /* Add a bit if sockets are interleaved. */
753 num_intlv_bits += intlv_num_sockets;
755 /* Assert num_intlv_bits <= 4 */
756 if (num_intlv_bits > 4) {
757 pr_err("%s: Invalid interleave bits %d.\n",
758 __func__, num_intlv_bits);
762 if (num_intlv_bits > 0) {
763 u64 temp_addr_x, temp_addr_i, temp_addr_y;
764 u8 die_id_bit, sock_id_bit, cs_fabric_id;
767 * Read FabricBlockInstanceInformation3_CS[BlockFabricID].
768 * This is the fabric id for this coherent slave. Use
769 * umc/channel# as instance id of the coherent slave
772 if (amd_df_indirect_read(nid, 0, 0x50, umc, &tmp))
775 cs_fabric_id = (tmp >> 8) & 0xFF;
778 /* If interleaved over more than 1 channel: */
779 if (intlv_num_chan) {
780 die_id_bit = intlv_num_chan;
781 cs_mask = (1 << die_id_bit) - 1;
782 cs_id = cs_fabric_id & cs_mask;
785 sock_id_bit = die_id_bit;
787 /* Read D18F1x208 (SystemFabricIdMask). */
788 if (intlv_num_dies || intlv_num_sockets)
789 if (amd_df_indirect_read(nid, 1, 0x208, umc, &tmp))
792 /* If interleaved over more than 1 die. */
793 if (intlv_num_dies) {
794 sock_id_bit = die_id_bit + intlv_num_dies;
795 die_id_shift = (tmp >> 24) & 0xF;
796 die_id_mask = (tmp >> 8) & 0xFF;
798 cs_id |= ((cs_fabric_id & die_id_mask) >> die_id_shift) << die_id_bit;
801 /* If interleaved over more than 1 socket. */
802 if (intlv_num_sockets) {
803 socket_id_shift = (tmp >> 28) & 0xF;
804 socket_id_mask = (tmp >> 16) & 0xFF;
806 cs_id |= ((cs_fabric_id & socket_id_mask) >> socket_id_shift) << sock_id_bit;
810 * The pre-interleaved address consists of XXXXXXIIIYYYYY
811 * where III is the ID for this CS, and XXXXXXYYYYY are the
812 * address bits from the post-interleaved address.
813 * "num_intlv_bits" has been calculated to tell us how many "I"
814 * bits there are. "intlv_addr_bit" tells us how many "Y" bits
815 * there are (where "I" starts).
817 temp_addr_y = ret_addr & GENMASK_ULL(intlv_addr_bit-1, 0);
818 temp_addr_i = (cs_id << intlv_addr_bit);
819 temp_addr_x = (ret_addr & GENMASK_ULL(63, intlv_addr_bit)) << num_intlv_bits;
820 ret_addr = temp_addr_x | temp_addr_i | temp_addr_y;
823 /* Add dram base address */
824 ret_addr += dram_base_addr;
826 /* If legacy MMIO hole enabled */
827 if (lgcy_mmio_hole_en) {
828 if (amd_df_indirect_read(nid, 0, 0x104, umc, &tmp))
831 dram_hole_base = tmp & GENMASK(31, 24);
832 if (ret_addr >= dram_hole_base)
833 ret_addr += (BIT_ULL(32) - dram_hole_base);
837 /* Save some parentheses and grab ls-bit at the end. */
838 hashed_bit = (ret_addr >> 12) ^
844 hashed_bit &= BIT(0);
846 if (hashed_bit != ((ret_addr >> intlv_addr_bit) & BIT(0)))
847 ret_addr ^= BIT(intlv_addr_bit);
850 /* Is calculated system address is above DRAM limit address? */
851 if (ret_addr > dram_limit_addr)
854 *sys_addr = ret_addr;
860 EXPORT_SYMBOL_GPL(umc_normaddr_to_sysaddr);
862 bool amd_mce_is_memory_error(struct mce *m)
864 /* ErrCodeExt[20:16] */
865 u8 xec = (m->status >> 16) & 0x1f;
868 return smca_get_bank_type(m->bank) == SMCA_UMC && xec == 0x0;
870 return m->bank == 4 && xec == 0x8;
873 static void __log_error(unsigned int bank, u64 status, u64 addr, u64 misc)
884 if (m.status & MCI_STATUS_ADDRV) {
888 * Extract [55:<lsb>] where lsb is the least significant
889 * *valid* bit of the address bits.
891 if (mce_flags.smca) {
892 u8 lsb = (m.addr >> 56) & 0x3f;
894 m.addr &= GENMASK_ULL(55, lsb);
898 if (mce_flags.smca) {
899 rdmsrl(MSR_AMD64_SMCA_MCx_IPID(bank), m.ipid);
901 if (m.status & MCI_STATUS_SYNDV)
902 rdmsrl(MSR_AMD64_SMCA_MCx_SYND(bank), m.synd);
908 asmlinkage __visible void __irq_entry smp_deferred_error_interrupt(struct pt_regs *regs)
911 trace_deferred_error_apic_entry(DEFERRED_ERROR_VECTOR);
912 inc_irq_stat(irq_deferred_error_count);
913 deferred_error_int_vector();
914 trace_deferred_error_apic_exit(DEFERRED_ERROR_VECTOR);
919 * Returns true if the logged error is deferred. False, otherwise.
922 _log_error_bank(unsigned int bank, u32 msr_stat, u32 msr_addr, u64 misc)
924 u64 status, addr = 0;
926 rdmsrl(msr_stat, status);
927 if (!(status & MCI_STATUS_VAL))
930 if (status & MCI_STATUS_ADDRV)
931 rdmsrl(msr_addr, addr);
933 __log_error(bank, status, addr, misc);
937 return status & MCI_STATUS_DEFERRED;
941 * We have three scenarios for checking for Deferred errors:
943 * 1) Non-SMCA systems check MCA_STATUS and log error if found.
944 * 2) SMCA systems check MCA_STATUS. If error is found then log it and also
946 * 3) SMCA systems check MCA_DESTAT, if error was not found in MCA_STATUS, and
949 static void log_error_deferred(unsigned int bank)
953 defrd = _log_error_bank(bank, msr_ops.status(bank),
954 msr_ops.addr(bank), 0);
959 /* Clear MCA_DESTAT if we logged the deferred error from MCA_STATUS. */
961 wrmsrl(MSR_AMD64_SMCA_MCx_DESTAT(bank), 0);
966 * Only deferred errors are logged in MCA_DE{STAT,ADDR} so just check
969 _log_error_bank(bank, MSR_AMD64_SMCA_MCx_DESTAT(bank),
970 MSR_AMD64_SMCA_MCx_DEADDR(bank), 0);
973 /* APIC interrupt handler for deferred errors */
974 static void amd_deferred_error_interrupt(void)
978 for (bank = 0; bank < mca_cfg.banks; ++bank)
979 log_error_deferred(bank);
982 static void log_error_thresholding(unsigned int bank, u64 misc)
984 _log_error_bank(bank, msr_ops.status(bank), msr_ops.addr(bank), misc);
987 static void log_and_reset_block(struct threshold_block *block)
989 struct thresh_restart tr;
990 u32 low = 0, high = 0;
995 if (rdmsr_safe(block->address, &low, &high))
998 if (!(high & MASK_OVERFLOW_HI))
1001 /* Log the MCE which caused the threshold event. */
1002 log_error_thresholding(block->bank, ((u64)high << 32) | low);
1004 /* Reset threshold block after logging error. */
1005 memset(&tr, 0, sizeof(tr));
1007 threshold_restart_bank(&tr);
1011 * Threshold interrupt handler will service THRESHOLD_APIC_VECTOR. The interrupt
1012 * goes off when error_count reaches threshold_limit.
1014 static void amd_threshold_interrupt(void)
1016 struct threshold_block *first_block = NULL, *block = NULL, *tmp = NULL;
1017 unsigned int bank, cpu = smp_processor_id();
1019 for (bank = 0; bank < mca_cfg.banks; ++bank) {
1020 if (!(per_cpu(bank_map, cpu) & (1 << bank)))
1023 first_block = per_cpu(threshold_banks, cpu)[bank]->blocks;
1028 * The first block is also the head of the list. Check it first
1029 * before iterating over the rest.
1031 log_and_reset_block(first_block);
1032 list_for_each_entry_safe(block, tmp, &first_block->miscj, miscj)
1033 log_and_reset_block(block);
1041 struct threshold_attr {
1042 struct attribute attr;
1043 ssize_t (*show) (struct threshold_block *, char *);
1044 ssize_t (*store) (struct threshold_block *, const char *, size_t count);
1047 #define SHOW_FIELDS(name) \
1048 static ssize_t show_ ## name(struct threshold_block *b, char *buf) \
1050 return sprintf(buf, "%lu\n", (unsigned long) b->name); \
1052 SHOW_FIELDS(interrupt_enable)
1053 SHOW_FIELDS(threshold_limit)
1056 store_interrupt_enable(struct threshold_block *b, const char *buf, size_t size)
1058 struct thresh_restart tr;
1061 if (!b->interrupt_capable)
1064 if (kstrtoul(buf, 0, &new) < 0)
1067 b->interrupt_enable = !!new;
1069 memset(&tr, 0, sizeof(tr));
1072 smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
1078 store_threshold_limit(struct threshold_block *b, const char *buf, size_t size)
1080 struct thresh_restart tr;
1083 if (kstrtoul(buf, 0, &new) < 0)
1086 if (new > THRESHOLD_MAX)
1087 new = THRESHOLD_MAX;
1091 memset(&tr, 0, sizeof(tr));
1092 tr.old_limit = b->threshold_limit;
1093 b->threshold_limit = new;
1096 smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
1101 static ssize_t show_error_count(struct threshold_block *b, char *buf)
1105 rdmsr_on_cpu(b->cpu, b->address, &lo, &hi);
1107 return sprintf(buf, "%u\n", ((hi & THRESHOLD_MAX) -
1108 (THRESHOLD_MAX - b->threshold_limit)));
1111 static struct threshold_attr error_count = {
1112 .attr = {.name = __stringify(error_count), .mode = 0444 },
1113 .show = show_error_count,
1116 #define RW_ATTR(val) \
1117 static struct threshold_attr val = { \
1118 .attr = {.name = __stringify(val), .mode = 0644 }, \
1119 .show = show_## val, \
1120 .store = store_## val, \
1123 RW_ATTR(interrupt_enable);
1124 RW_ATTR(threshold_limit);
1126 static struct attribute *default_attrs[] = {
1127 &threshold_limit.attr,
1129 NULL, /* possibly interrupt_enable if supported, see below */
1133 #define to_block(k) container_of(k, struct threshold_block, kobj)
1134 #define to_attr(a) container_of(a, struct threshold_attr, attr)
1136 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1138 struct threshold_block *b = to_block(kobj);
1139 struct threshold_attr *a = to_attr(attr);
1142 ret = a->show ? a->show(b, buf) : -EIO;
1147 static ssize_t store(struct kobject *kobj, struct attribute *attr,
1148 const char *buf, size_t count)
1150 struct threshold_block *b = to_block(kobj);
1151 struct threshold_attr *a = to_attr(attr);
1154 ret = a->store ? a->store(b, buf, count) : -EIO;
1159 static const struct sysfs_ops threshold_ops = {
1164 static struct kobj_type threshold_ktype = {
1165 .sysfs_ops = &threshold_ops,
1166 .default_attrs = default_attrs,
1169 static const char *get_name(unsigned int bank, struct threshold_block *b)
1171 enum smca_bank_types bank_type;
1173 if (!mce_flags.smca) {
1175 return bank4_names(b);
1177 return th_names[bank];
1180 bank_type = smca_get_bank_type(bank);
1181 if (bank_type >= N_SMCA_BANK_TYPES)
1184 if (b && bank_type == SMCA_UMC) {
1185 if (b->block < ARRAY_SIZE(smca_umc_block_names))
1186 return smca_umc_block_names[b->block];
1190 if (smca_banks[bank].hwid->count == 1)
1191 return smca_get_name(bank_type);
1193 snprintf(buf_mcatype, MAX_MCATYPE_NAME_LEN,
1194 "%s_%x", smca_get_name(bank_type),
1195 smca_banks[bank].sysfs_id);
1199 static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank,
1200 unsigned int block, u32 address)
1202 struct threshold_block *b = NULL;
1206 if ((bank >= mca_cfg.banks) || (block >= NR_BLOCKS))
1209 if (rdmsr_safe_on_cpu(cpu, address, &low, &high))
1212 if (!(high & MASK_VALID_HI)) {
1219 if (!(high & MASK_CNTP_HI) ||
1220 (high & MASK_LOCKED_HI))
1223 b = kzalloc(sizeof(struct threshold_block), GFP_KERNEL);
1230 b->address = address;
1231 b->interrupt_enable = 0;
1232 b->interrupt_capable = lvt_interrupt_supported(bank, high);
1233 b->threshold_limit = THRESHOLD_MAX;
1235 if (b->interrupt_capable) {
1236 threshold_ktype.default_attrs[2] = &interrupt_enable.attr;
1237 b->interrupt_enable = 1;
1239 threshold_ktype.default_attrs[2] = NULL;
1242 INIT_LIST_HEAD(&b->miscj);
1244 if (per_cpu(threshold_banks, cpu)[bank]->blocks) {
1246 &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj);
1248 per_cpu(threshold_banks, cpu)[bank]->blocks = b;
1251 err = kobject_init_and_add(&b->kobj, &threshold_ktype,
1252 per_cpu(threshold_banks, cpu)[bank]->kobj,
1257 address = get_block_address(address, low, high, bank, ++block);
1261 err = allocate_threshold_blocks(cpu, bank, block, address);
1266 kobject_uevent(&b->kobj, KOBJ_ADD);
1272 kobject_put(&b->kobj);
1273 list_del(&b->miscj);
1279 static int __threshold_add_blocks(struct threshold_bank *b)
1281 struct list_head *head = &b->blocks->miscj;
1282 struct threshold_block *pos = NULL;
1283 struct threshold_block *tmp = NULL;
1286 err = kobject_add(&b->blocks->kobj, b->kobj, b->blocks->kobj.name);
1290 list_for_each_entry_safe(pos, tmp, head, miscj) {
1292 err = kobject_add(&pos->kobj, b->kobj, pos->kobj.name);
1294 list_for_each_entry_safe_reverse(pos, tmp, head, miscj)
1295 kobject_del(&pos->kobj);
1303 static int threshold_create_bank(unsigned int cpu, unsigned int bank)
1305 struct device *dev = per_cpu(mce_device, cpu);
1306 struct amd_northbridge *nb = NULL;
1307 struct threshold_bank *b = NULL;
1308 const char *name = get_name(bank, NULL);
1314 if (is_shared_bank(bank)) {
1315 nb = node_to_amd_nb(amd_get_nb_id(cpu));
1317 /* threshold descriptor already initialized on this node? */
1318 if (nb && nb->bank4) {
1321 err = kobject_add(b->kobj, &dev->kobj, name);
1325 per_cpu(threshold_banks, cpu)[bank] = b;
1326 refcount_inc(&b->cpus);
1328 err = __threshold_add_blocks(b);
1334 b = kzalloc(sizeof(struct threshold_bank), GFP_KERNEL);
1340 b->kobj = kobject_create_and_add(name, &dev->kobj);
1346 per_cpu(threshold_banks, cpu)[bank] = b;
1348 if (is_shared_bank(bank)) {
1349 refcount_set(&b->cpus, 1);
1351 /* nb is already initialized, see above */
1358 err = allocate_threshold_blocks(cpu, bank, 0, msr_ops.misc(bank));
1369 static void deallocate_threshold_block(unsigned int cpu,
1372 struct threshold_block *pos = NULL;
1373 struct threshold_block *tmp = NULL;
1374 struct threshold_bank *head = per_cpu(threshold_banks, cpu)[bank];
1379 list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) {
1380 kobject_put(&pos->kobj);
1381 list_del(&pos->miscj);
1385 kfree(per_cpu(threshold_banks, cpu)[bank]->blocks);
1386 per_cpu(threshold_banks, cpu)[bank]->blocks = NULL;
1389 static void __threshold_remove_blocks(struct threshold_bank *b)
1391 struct threshold_block *pos = NULL;
1392 struct threshold_block *tmp = NULL;
1394 kobject_del(b->kobj);
1396 list_for_each_entry_safe(pos, tmp, &b->blocks->miscj, miscj)
1397 kobject_del(&pos->kobj);
1400 static void threshold_remove_bank(unsigned int cpu, int bank)
1402 struct amd_northbridge *nb;
1403 struct threshold_bank *b;
1405 b = per_cpu(threshold_banks, cpu)[bank];
1412 if (is_shared_bank(bank)) {
1413 if (!refcount_dec_and_test(&b->cpus)) {
1414 __threshold_remove_blocks(b);
1415 per_cpu(threshold_banks, cpu)[bank] = NULL;
1419 * the last CPU on this node using the shared bank is
1420 * going away, remove that bank now.
1422 nb = node_to_amd_nb(amd_get_nb_id(cpu));
1427 deallocate_threshold_block(cpu, bank);
1430 kobject_del(b->kobj);
1431 kobject_put(b->kobj);
1433 per_cpu(threshold_banks, cpu)[bank] = NULL;
1436 int mce_threshold_remove_device(unsigned int cpu)
1440 for (bank = 0; bank < mca_cfg.banks; ++bank) {
1441 if (!(per_cpu(bank_map, cpu) & (1 << bank)))
1443 threshold_remove_bank(cpu, bank);
1445 kfree(per_cpu(threshold_banks, cpu));
1446 per_cpu(threshold_banks, cpu) = NULL;
1450 /* create dir/files for all valid threshold banks */
1451 int mce_threshold_create_device(unsigned int cpu)
1454 struct threshold_bank **bp;
1457 bp = per_cpu(threshold_banks, cpu);
1461 bp = kcalloc(mca_cfg.banks, sizeof(struct threshold_bank *),
1466 per_cpu(threshold_banks, cpu) = bp;
1468 for (bank = 0; bank < mca_cfg.banks; ++bank) {
1469 if (!(per_cpu(bank_map, cpu) & (1 << bank)))
1471 err = threshold_create_bank(cpu, bank);
1477 mce_threshold_remove_device(cpu);
1481 static __init int threshold_init_device(void)
1485 /* to hit CPUs online before the notifier is up */
1486 for_each_online_cpu(lcpu) {
1487 int err = mce_threshold_create_device(lcpu);
1493 if (thresholding_irq_en)
1494 mce_threshold_vector = amd_threshold_interrupt;
1499 * there are 3 funcs which need to be _initcalled in a logic sequence:
1500 * 1. xen_late_init_mcelog
1501 * 2. mcheck_init_device
1502 * 3. threshold_init_device
1504 * xen_late_init_mcelog must register xen_mce_chrdev_device before
1505 * native mce_chrdev_device registration if running under xen platform;
1507 * mcheck_init_device should be inited before threshold_init_device to
1508 * initialize mce_device, otherwise a NULL ptr dereference will cause panic.
1510 * so we use following _initcalls
1511 * 1. device_initcall(xen_late_init_mcelog);
1512 * 2. device_initcall_sync(mcheck_init_device);
1513 * 3. late_initcall(threshold_init_device);
1515 * when running under xen, the initcall order is 1,2,3;
1516 * on baremetal, we skip 1 and we do only 2 and 3.
1518 late_initcall(threshold_init_device);