1 // SPDX-License-Identifier: GPL-2.0-only
3 #include "amd64_edac.h"
4 #include <asm/amd_nb.h>
6 static struct edac_pci_ctl_info *pci_ctl;
9 * Set by command line parameter. If BIOS has enabled the ECC, this override is
10 * cleared to prevent re-enabling the hardware by this driver.
12 static int ecc_enable_override;
13 module_param(ecc_enable_override, int, 0644);
15 static struct msr __percpu *msrs;
17 static inline u32 get_umc_reg(struct amd64_pvt *pvt, u32 reg)
19 if (!pvt->flags.zn_regs_v2)
23 case UMCCH_ADDR_CFG: return UMCCH_ADDR_CFG_DDR5;
24 case UMCCH_ADDR_MASK_SEC: return UMCCH_ADDR_MASK_SEC_DDR5;
25 case UMCCH_DIMM_CFG: return UMCCH_DIMM_CFG_DDR5;
28 WARN_ONCE(1, "%s: unknown register 0x%x", __func__, reg);
33 static struct ecc_settings **ecc_stngs;
35 /* Device for the PCI component */
36 static struct device *pci_ctl_dev;
39 * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
40 * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
43 *FIXME: Produce a better mapping/linearisation.
45 static const struct scrubrate {
46 u32 scrubval; /* bit pattern for scrub rate */
47 u32 bandwidth; /* bandwidth consumed (bytes/sec) */
49 { 0x01, 1600000000UL},
71 { 0x00, 0UL}, /* scrubbing off */
74 int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
75 u32 *val, const char *func)
79 err = pci_read_config_dword(pdev, offset, val);
81 amd64_warn("%s: error reading F%dx%03x.\n",
82 func, PCI_FUNC(pdev->devfn), offset);
87 int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
88 u32 val, const char *func)
92 err = pci_write_config_dword(pdev, offset, val);
94 amd64_warn("%s: error writing to F%dx%03x.\n",
95 func, PCI_FUNC(pdev->devfn), offset);
101 * Select DCT to which PCI cfg accesses are routed
103 static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct)
107 amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, ®);
108 reg &= (pvt->model == 0x30) ? ~3 : ~1;
110 amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
115 * Depending on the family, F2 DCT reads need special handling:
117 * K8: has a single DCT only and no address offsets >= 0x100
119 * F10h: each DCT has its own set of regs
123 * F16h: has only 1 DCT
125 * F15h: we select which DCT we access using F1x10C[DctCfgSel]
127 static inline int amd64_read_dct_pci_cfg(struct amd64_pvt *pvt, u8 dct,
128 int offset, u32 *val)
132 if (dct || offset >= 0x100)
139 * Note: If ganging is enabled, barring the regs
140 * F2x[1,0]98 and F2x[1,0]9C; reads reads to F2x1xx
141 * return 0. (cf. Section 2.8.1 F10h BKDG)
143 if (dct_ganging_enabled(pvt))
152 * F15h: F2x1xx addresses do not map explicitly to DCT1.
153 * We should select which DCT we access using F1x10C[DctCfgSel]
155 dct = (dct && pvt->model == 0x30) ? 3 : dct;
156 f15h_select_dct(pvt, dct);
167 return amd64_read_pci_cfg(pvt->F2, offset, val);
171 * Memory scrubber control interface. For K8, memory scrubbing is handled by
172 * hardware and can involve L2 cache, dcache as well as the main memory. With
173 * F10, this is extended to L3 cache scrubbing on CPU models sporting that
176 * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
177 * (dram) over to cache lines. This is nasty, so we will use bandwidth in
178 * bytes/sec for the setting.
180 * Currently, we only do dram scrubbing. If the scrubbing is done in software on
181 * other archs, we might not have access to the caches directly.
185 * Scan the scrub rate mapping table for a close or matching bandwidth value to
186 * issue. If requested is too big, then use last maximum value found.
188 static int __set_scrub_rate(struct amd64_pvt *pvt, u32 new_bw, u32 min_rate)
194 * map the configured rate (new_bw) to a value specific to the AMD64
195 * memory controller and apply to register. Search for the first
196 * bandwidth entry that is greater or equal than the setting requested
197 * and program that. If at last entry, turn off DRAM scrubbing.
199 * If no suitable bandwidth is found, turn off DRAM scrubbing entirely
200 * by falling back to the last element in scrubrates[].
202 for (i = 0; i < ARRAY_SIZE(scrubrates) - 1; i++) {
204 * skip scrub rates which aren't recommended
205 * (see F10 BKDG, F3x58)
207 if (scrubrates[i].scrubval < min_rate)
210 if (scrubrates[i].bandwidth <= new_bw)
214 scrubval = scrubrates[i].scrubval;
216 if (pvt->fam == 0x15 && pvt->model == 0x60) {
217 f15h_select_dct(pvt, 0);
218 pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
219 f15h_select_dct(pvt, 1);
220 pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
222 pci_write_bits32(pvt->F3, SCRCTRL, scrubval, 0x001F);
226 return scrubrates[i].bandwidth;
231 static int set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
233 struct amd64_pvt *pvt = mci->pvt_info;
234 u32 min_scrubrate = 0x5;
239 if (pvt->fam == 0x15) {
241 if (pvt->model < 0x10)
242 f15h_select_dct(pvt, 0);
244 if (pvt->model == 0x60)
247 return __set_scrub_rate(pvt, bw, min_scrubrate);
250 static int get_scrub_rate(struct mem_ctl_info *mci)
252 struct amd64_pvt *pvt = mci->pvt_info;
253 int i, retval = -EINVAL;
256 if (pvt->fam == 0x15) {
258 if (pvt->model < 0x10)
259 f15h_select_dct(pvt, 0);
261 if (pvt->model == 0x60)
262 amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval);
264 amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
266 amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
269 scrubval = scrubval & 0x001F;
271 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
272 if (scrubrates[i].scrubval == scrubval) {
273 retval = scrubrates[i].bandwidth;
281 * returns true if the SysAddr given by sys_addr matches the
282 * DRAM base/limit associated with node_id
284 static bool base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, u8 nid)
288 /* The K8 treats this as a 40-bit value. However, bits 63-40 will be
289 * all ones if the most significant implemented address bit is 1.
290 * Here we discard bits 63-40. See section 3.4.2 of AMD publication
291 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
292 * Application Programming.
294 addr = sys_addr & 0x000000ffffffffffull;
296 return ((addr >= get_dram_base(pvt, nid)) &&
297 (addr <= get_dram_limit(pvt, nid)));
301 * Attempt to map a SysAddr to a node. On success, return a pointer to the
302 * mem_ctl_info structure for the node that the SysAddr maps to.
304 * On failure, return NULL.
306 static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
309 struct amd64_pvt *pvt;
314 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
315 * 3.4.4.2) registers to map the SysAddr to a node ID.
320 * The value of this field should be the same for all DRAM Base
321 * registers. Therefore we arbitrarily choose to read it from the
322 * register for node 0.
324 intlv_en = dram_intlv_en(pvt, 0);
327 for (node_id = 0; node_id < DRAM_RANGES; node_id++) {
328 if (base_limit_match(pvt, sys_addr, node_id))
334 if (unlikely((intlv_en != 0x01) &&
335 (intlv_en != 0x03) &&
336 (intlv_en != 0x07))) {
337 amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en);
341 bits = (((u32) sys_addr) >> 12) & intlv_en;
343 for (node_id = 0; ; ) {
344 if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits)
345 break; /* intlv_sel field matches */
347 if (++node_id >= DRAM_RANGES)
351 /* sanity test for sys_addr */
352 if (unlikely(!base_limit_match(pvt, sys_addr, node_id))) {
353 amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
354 "range for node %d with node interleaving enabled.\n",
355 __func__, sys_addr, node_id);
360 return edac_mc_find((int)node_id);
363 edac_dbg(2, "sys_addr 0x%lx doesn't match any node\n",
364 (unsigned long)sys_addr);
370 * compute the CS base address of the @csrow on the DRAM controller @dct.
371 * For details see F2x[5C:40] in the processor's BKDG
373 static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
374 u64 *base, u64 *mask)
376 u64 csbase, csmask, base_bits, mask_bits;
379 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
380 csbase = pvt->csels[dct].csbases[csrow];
381 csmask = pvt->csels[dct].csmasks[csrow];
382 base_bits = GENMASK_ULL(31, 21) | GENMASK_ULL(15, 9);
383 mask_bits = GENMASK_ULL(29, 21) | GENMASK_ULL(15, 9);
387 * F16h and F15h, models 30h and later need two addr_shift values:
388 * 8 for high and 6 for low (cf. F16h BKDG).
390 } else if (pvt->fam == 0x16 ||
391 (pvt->fam == 0x15 && pvt->model >= 0x30)) {
392 csbase = pvt->csels[dct].csbases[csrow];
393 csmask = pvt->csels[dct].csmasks[csrow >> 1];
395 *base = (csbase & GENMASK_ULL(15, 5)) << 6;
396 *base |= (csbase & GENMASK_ULL(30, 19)) << 8;
399 /* poke holes for the csmask */
400 *mask &= ~((GENMASK_ULL(15, 5) << 6) |
401 (GENMASK_ULL(30, 19) << 8));
403 *mask |= (csmask & GENMASK_ULL(15, 5)) << 6;
404 *mask |= (csmask & GENMASK_ULL(30, 19)) << 8;
408 csbase = pvt->csels[dct].csbases[csrow];
409 csmask = pvt->csels[dct].csmasks[csrow >> 1];
412 if (pvt->fam == 0x15)
413 base_bits = mask_bits =
414 GENMASK_ULL(30,19) | GENMASK_ULL(13,5);
416 base_bits = mask_bits =
417 GENMASK_ULL(28,19) | GENMASK_ULL(13,5);
420 *base = (csbase & base_bits) << addr_shift;
423 /* poke holes for the csmask */
424 *mask &= ~(mask_bits << addr_shift);
426 *mask |= (csmask & mask_bits) << addr_shift;
429 #define for_each_chip_select(i, dct, pvt) \
430 for (i = 0; i < pvt->csels[dct].b_cnt; i++)
432 #define chip_select_base(i, dct, pvt) \
433 pvt->csels[dct].csbases[i]
435 #define for_each_chip_select_mask(i, dct, pvt) \
436 for (i = 0; i < pvt->csels[dct].m_cnt; i++)
438 #define for_each_umc(i) \
439 for (i = 0; i < pvt->max_mcs; i++)
442 * @input_addr is an InputAddr associated with the node given by mci. Return the
443 * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
445 static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
447 struct amd64_pvt *pvt;
453 for_each_chip_select(csrow, 0, pvt) {
454 if (!csrow_enabled(csrow, 0, pvt))
457 get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
461 if ((input_addr & mask) == (base & mask)) {
462 edac_dbg(2, "InputAddr 0x%lx matches csrow %d (node %d)\n",
463 (unsigned long)input_addr, csrow,
469 edac_dbg(2, "no matching csrow for InputAddr 0x%lx (MC node %d)\n",
470 (unsigned long)input_addr, pvt->mc_node_id);
476 * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
477 * for the node represented by mci. Info is passed back in *hole_base,
478 * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if
479 * info is invalid. Info may be invalid for either of the following reasons:
481 * - The revision of the node is not E or greater. In this case, the DRAM Hole
482 * Address Register does not exist.
484 * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
485 * indicating that its contents are not valid.
487 * The values passed back in *hole_base, *hole_offset, and *hole_size are
488 * complete 32-bit values despite the fact that the bitfields in the DHAR
489 * only represent bits 31-24 of the base and offset values.
491 static int get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
492 u64 *hole_offset, u64 *hole_size)
494 struct amd64_pvt *pvt = mci->pvt_info;
496 /* only revE and later have the DRAM Hole Address Register */
497 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_E) {
498 edac_dbg(1, " revision %d for node %d does not support DHAR\n",
499 pvt->ext_model, pvt->mc_node_id);
503 /* valid for Fam10h and above */
504 if (pvt->fam >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
505 edac_dbg(1, " Dram Memory Hoisting is DISABLED on this system\n");
509 if (!dhar_valid(pvt)) {
510 edac_dbg(1, " Dram Memory Hoisting is DISABLED on this node %d\n",
515 /* This node has Memory Hoisting */
517 /* +------------------+--------------------+--------------------+-----
518 * | memory | DRAM hole | relocated |
519 * | [0, (x - 1)] | [x, 0xffffffff] | addresses from |
521 * | | | [0x100000000, |
522 * | | | (0x100000000+ |
523 * | | | (0xffffffff-x))] |
524 * +------------------+--------------------+--------------------+-----
526 * Above is a diagram of physical memory showing the DRAM hole and the
527 * relocated addresses from the DRAM hole. As shown, the DRAM hole
528 * starts at address x (the base address) and extends through address
529 * 0xffffffff. The DRAM Hole Address Register (DHAR) relocates the
530 * addresses in the hole so that they start at 0x100000000.
533 *hole_base = dhar_base(pvt);
534 *hole_size = (1ULL << 32) - *hole_base;
536 *hole_offset = (pvt->fam > 0xf) ? f10_dhar_offset(pvt)
537 : k8_dhar_offset(pvt);
539 edac_dbg(1, " DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
540 pvt->mc_node_id, (unsigned long)*hole_base,
541 (unsigned long)*hole_offset, (unsigned long)*hole_size);
546 #ifdef CONFIG_EDAC_DEBUG
547 #define EDAC_DCT_ATTR_SHOW(reg) \
548 static ssize_t reg##_show(struct device *dev, \
549 struct device_attribute *mattr, char *data) \
551 struct mem_ctl_info *mci = to_mci(dev); \
552 struct amd64_pvt *pvt = mci->pvt_info; \
554 return sprintf(data, "0x%016llx\n", (u64)pvt->reg); \
557 EDAC_DCT_ATTR_SHOW(dhar);
558 EDAC_DCT_ATTR_SHOW(dbam0);
559 EDAC_DCT_ATTR_SHOW(top_mem);
560 EDAC_DCT_ATTR_SHOW(top_mem2);
562 static ssize_t dram_hole_show(struct device *dev, struct device_attribute *mattr,
565 struct mem_ctl_info *mci = to_mci(dev);
571 get_dram_hole_info(mci, &hole_base, &hole_offset, &hole_size);
573 return sprintf(data, "%llx %llx %llx\n", hole_base, hole_offset,
578 * update NUM_DBG_ATTRS in case you add new members
580 static DEVICE_ATTR(dhar, S_IRUGO, dhar_show, NULL);
581 static DEVICE_ATTR(dbam, S_IRUGO, dbam0_show, NULL);
582 static DEVICE_ATTR(topmem, S_IRUGO, top_mem_show, NULL);
583 static DEVICE_ATTR(topmem2, S_IRUGO, top_mem2_show, NULL);
584 static DEVICE_ATTR_RO(dram_hole);
586 static struct attribute *dbg_attrs[] = {
589 &dev_attr_topmem.attr,
590 &dev_attr_topmem2.attr,
591 &dev_attr_dram_hole.attr,
595 static const struct attribute_group dbg_group = {
599 static ssize_t inject_section_show(struct device *dev,
600 struct device_attribute *mattr, char *buf)
602 struct mem_ctl_info *mci = to_mci(dev);
603 struct amd64_pvt *pvt = mci->pvt_info;
604 return sprintf(buf, "0x%x\n", pvt->injection.section);
608 * store error injection section value which refers to one of 4 16-byte sections
609 * within a 64-byte cacheline
613 static ssize_t inject_section_store(struct device *dev,
614 struct device_attribute *mattr,
615 const char *data, size_t count)
617 struct mem_ctl_info *mci = to_mci(dev);
618 struct amd64_pvt *pvt = mci->pvt_info;
622 ret = kstrtoul(data, 10, &value);
627 amd64_warn("%s: invalid section 0x%lx\n", __func__, value);
631 pvt->injection.section = (u32) value;
635 static ssize_t inject_word_show(struct device *dev,
636 struct device_attribute *mattr, char *buf)
638 struct mem_ctl_info *mci = to_mci(dev);
639 struct amd64_pvt *pvt = mci->pvt_info;
640 return sprintf(buf, "0x%x\n", pvt->injection.word);
644 * store error injection word value which refers to one of 9 16-bit word of the
645 * 16-byte (128-bit + ECC bits) section
649 static ssize_t inject_word_store(struct device *dev,
650 struct device_attribute *mattr,
651 const char *data, size_t count)
653 struct mem_ctl_info *mci = to_mci(dev);
654 struct amd64_pvt *pvt = mci->pvt_info;
658 ret = kstrtoul(data, 10, &value);
663 amd64_warn("%s: invalid word 0x%lx\n", __func__, value);
667 pvt->injection.word = (u32) value;
671 static ssize_t inject_ecc_vector_show(struct device *dev,
672 struct device_attribute *mattr,
675 struct mem_ctl_info *mci = to_mci(dev);
676 struct amd64_pvt *pvt = mci->pvt_info;
677 return sprintf(buf, "0x%x\n", pvt->injection.bit_map);
681 * store 16 bit error injection vector which enables injecting errors to the
682 * corresponding bit within the error injection word above. When used during a
683 * DRAM ECC read, it holds the contents of the of the DRAM ECC bits.
685 static ssize_t inject_ecc_vector_store(struct device *dev,
686 struct device_attribute *mattr,
687 const char *data, size_t count)
689 struct mem_ctl_info *mci = to_mci(dev);
690 struct amd64_pvt *pvt = mci->pvt_info;
694 ret = kstrtoul(data, 16, &value);
698 if (value & 0xFFFF0000) {
699 amd64_warn("%s: invalid EccVector: 0x%lx\n", __func__, value);
703 pvt->injection.bit_map = (u32) value;
708 * Do a DRAM ECC read. Assemble staged values in the pvt area, format into
709 * fields needed by the injection registers and read the NB Array Data Port.
711 static ssize_t inject_read_store(struct device *dev,
712 struct device_attribute *mattr,
713 const char *data, size_t count)
715 struct mem_ctl_info *mci = to_mci(dev);
716 struct amd64_pvt *pvt = mci->pvt_info;
718 u32 section, word_bits;
721 ret = kstrtoul(data, 10, &value);
725 /* Form value to choose 16-byte section of cacheline */
726 section = F10_NB_ARRAY_DRAM | SET_NB_ARRAY_ADDR(pvt->injection.section);
728 amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section);
730 word_bits = SET_NB_DRAM_INJECTION_READ(pvt->injection);
732 /* Issue 'word' and 'bit' along with the READ request */
733 amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
735 edac_dbg(0, "section=0x%x word_bits=0x%x\n", section, word_bits);
741 * Do a DRAM ECC write. Assemble staged values in the pvt area and format into
742 * fields needed by the injection registers.
744 static ssize_t inject_write_store(struct device *dev,
745 struct device_attribute *mattr,
746 const char *data, size_t count)
748 struct mem_ctl_info *mci = to_mci(dev);
749 struct amd64_pvt *pvt = mci->pvt_info;
750 u32 section, word_bits, tmp;
754 ret = kstrtoul(data, 10, &value);
758 /* Form value to choose 16-byte section of cacheline */
759 section = F10_NB_ARRAY_DRAM | SET_NB_ARRAY_ADDR(pvt->injection.section);
761 amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section);
763 word_bits = SET_NB_DRAM_INJECTION_WRITE(pvt->injection);
765 pr_notice_once("Don't forget to decrease MCE polling interval in\n"
766 "/sys/bus/machinecheck/devices/machinecheck<CPUNUM>/check_interval\n"
767 "so that you can get the error report faster.\n");
769 on_each_cpu(disable_caches, NULL, 1);
771 /* Issue 'word' and 'bit' along with the READ request */
772 amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
775 /* wait until injection happens */
776 amd64_read_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, &tmp);
777 if (tmp & F10_NB_ARR_ECC_WR_REQ) {
782 on_each_cpu(enable_caches, NULL, 1);
784 edac_dbg(0, "section=0x%x word_bits=0x%x\n", section, word_bits);
790 * update NUM_INJ_ATTRS in case you add new members
793 static DEVICE_ATTR_RW(inject_section);
794 static DEVICE_ATTR_RW(inject_word);
795 static DEVICE_ATTR_RW(inject_ecc_vector);
796 static DEVICE_ATTR_WO(inject_write);
797 static DEVICE_ATTR_WO(inject_read);
799 static struct attribute *inj_attrs[] = {
800 &dev_attr_inject_section.attr,
801 &dev_attr_inject_word.attr,
802 &dev_attr_inject_ecc_vector.attr,
803 &dev_attr_inject_write.attr,
804 &dev_attr_inject_read.attr,
808 static umode_t inj_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
810 struct device *dev = kobj_to_dev(kobj);
811 struct mem_ctl_info *mci = container_of(dev, struct mem_ctl_info, dev);
812 struct amd64_pvt *pvt = mci->pvt_info;
814 /* Families which have that injection hw */
815 if (pvt->fam >= 0x10 && pvt->fam <= 0x16)
821 static const struct attribute_group inj_group = {
823 .is_visible = inj_is_visible,
825 #endif /* CONFIG_EDAC_DEBUG */
828 * Return the DramAddr that the SysAddr given by @sys_addr maps to. It is
829 * assumed that sys_addr maps to the node given by mci.
831 * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
832 * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
833 * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
834 * then it is also involved in translating a SysAddr to a DramAddr. Sections
835 * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
836 * These parts of the documentation are unclear. I interpret them as follows:
838 * When node n receives a SysAddr, it processes the SysAddr as follows:
840 * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
841 * Limit registers for node n. If the SysAddr is not within the range
842 * specified by the base and limit values, then node n ignores the Sysaddr
843 * (since it does not map to node n). Otherwise continue to step 2 below.
845 * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
846 * disabled so skip to step 3 below. Otherwise see if the SysAddr is within
847 * the range of relocated addresses (starting at 0x100000000) from the DRAM
848 * hole. If not, skip to step 3 below. Else get the value of the
849 * DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
850 * offset defined by this value from the SysAddr.
852 * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
853 * Base register for node n. To obtain the DramAddr, subtract the base
854 * address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
856 static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
858 struct amd64_pvt *pvt = mci->pvt_info;
859 u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
862 dram_base = get_dram_base(pvt, pvt->mc_node_id);
864 ret = get_dram_hole_info(mci, &hole_base, &hole_offset, &hole_size);
866 if ((sys_addr >= (1ULL << 32)) &&
867 (sys_addr < ((1ULL << 32) + hole_size))) {
868 /* use DHAR to translate SysAddr to DramAddr */
869 dram_addr = sys_addr - hole_offset;
871 edac_dbg(2, "using DHAR to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
872 (unsigned long)sys_addr,
873 (unsigned long)dram_addr);
880 * Translate the SysAddr to a DramAddr as shown near the start of
881 * section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8
882 * only deals with 40-bit values. Therefore we discard bits 63-40 of
883 * sys_addr below. If bit 39 of sys_addr is 1 then the bits we
884 * discard are all 1s. Otherwise the bits we discard are all 0s. See
885 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
886 * Programmer's Manual Volume 1 Application Programming.
888 dram_addr = (sys_addr & GENMASK_ULL(39, 0)) - dram_base;
890 edac_dbg(2, "using DRAM Base register to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
891 (unsigned long)sys_addr, (unsigned long)dram_addr);
896 * @intlv_en is the value of the IntlvEn field from a DRAM Base register
897 * (section 3.4.4.1). Return the number of bits from a SysAddr that are used
898 * for node interleaving.
900 static int num_node_interleave_bits(unsigned intlv_en)
902 static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
905 BUG_ON(intlv_en > 7);
906 n = intlv_shift_table[intlv_en];
910 /* Translate the DramAddr given by @dram_addr to an InputAddr. */
911 static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
913 struct amd64_pvt *pvt;
920 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
921 * concerning translating a DramAddr to an InputAddr.
923 intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
924 input_addr = ((dram_addr >> intlv_shift) & GENMASK_ULL(35, 12)) +
927 edac_dbg(2, " Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
928 intlv_shift, (unsigned long)dram_addr,
929 (unsigned long)input_addr);
935 * Translate the SysAddr represented by @sys_addr to an InputAddr. It is
936 * assumed that @sys_addr maps to the node given by mci.
938 static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
943 dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
945 edac_dbg(2, "SysAddr 0x%lx translates to InputAddr 0x%lx\n",
946 (unsigned long)sys_addr, (unsigned long)input_addr);
951 /* Map the Error address to a PAGE and PAGE OFFSET. */
952 static inline void error_address_to_page_and_offset(u64 error_address,
953 struct err_info *err)
955 err->page = (u32) (error_address >> PAGE_SHIFT);
956 err->offset = ((u32) error_address) & ~PAGE_MASK;
960 * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
961 * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
962 * of a node that detected an ECC memory error. mci represents the node that
963 * the error address maps to (possibly different from the node that detected
964 * the error). Return the number of the csrow that sys_addr maps to, or -1 on
967 static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
971 csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
974 amd64_mc_err(mci, "Failed to translate InputAddr to csrow for "
975 "address 0x%lx\n", (unsigned long)sys_addr);
980 * See AMD PPR DF::LclNodeTypeMap
982 * This register gives information for nodes of the same type within a system.
984 * Reading this register from a GPU node will tell how many GPU nodes are in the
985 * system and what the lowest AMD Node ID value is for the GPU nodes. Use this
986 * info to fixup the Linux logical "Node ID" value set in the AMD NB code and EDAC.
988 static struct local_node_map {
993 #define PCI_DEVICE_ID_AMD_MI200_DF_F1 0x14d1
994 #define REG_LOCAL_NODE_TYPE_MAP 0x144
996 /* Local Node Type Map (LNTM) fields */
997 #define LNTM_NODE_COUNT GENMASK(27, 16)
998 #define LNTM_BASE_NODE_ID GENMASK(11, 0)
1000 static int gpu_get_node_map(struct amd64_pvt *pvt)
1002 struct pci_dev *pdev;
1007 * Mapping of nodes from hardware-provided AMD Node ID to a
1008 * Linux logical one is applicable for MI200 models. Therefore,
1009 * return early for other heterogeneous systems.
1011 if (pvt->F3->device != PCI_DEVICE_ID_AMD_MI200_DF_F3)
1015 * Node ID 0 is reserved for CPUs. Therefore, a non-zero Node ID
1016 * means the values have been already cached.
1018 if (gpu_node_map.base_node_id)
1021 pdev = pci_get_device(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_MI200_DF_F1, NULL);
1027 ret = pci_read_config_dword(pdev, REG_LOCAL_NODE_TYPE_MAP, &tmp);
1031 gpu_node_map.node_count = FIELD_GET(LNTM_NODE_COUNT, tmp);
1032 gpu_node_map.base_node_id = FIELD_GET(LNTM_BASE_NODE_ID, tmp);
1039 static int fixup_node_id(int node_id, struct mce *m)
1041 /* MCA_IPID[InstanceIdHi] give the AMD Node ID for the bank. */
1042 u8 nid = (m->ipid >> 44) & 0xF;
1044 if (smca_get_bank_type(m->extcpu, m->bank) != SMCA_UMC_V2)
1047 /* Nodes below the GPU base node are CPU nodes and don't need a fixup. */
1048 if (nid < gpu_node_map.base_node_id)
1051 /* Convert the hardware-provided AMD Node ID to a Linux logical one. */
1052 return nid - gpu_node_map.base_node_id + 1;
1055 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
1058 * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
1061 static unsigned long dct_determine_edac_cap(struct amd64_pvt *pvt)
1063 unsigned long edac_cap = EDAC_FLAG_NONE;
1066 bit = (pvt->fam > 0xf || pvt->ext_model >= K8_REV_F)
1070 if (pvt->dclr0 & BIT(bit))
1071 edac_cap = EDAC_FLAG_SECDED;
1076 static unsigned long umc_determine_edac_cap(struct amd64_pvt *pvt)
1078 u8 i, umc_en_mask = 0, dimm_ecc_en_mask = 0;
1079 unsigned long edac_cap = EDAC_FLAG_NONE;
1082 if (!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT))
1085 umc_en_mask |= BIT(i);
1087 /* UMC Configuration bit 12 (DimmEccEn) */
1088 if (pvt->umc[i].umc_cfg & BIT(12))
1089 dimm_ecc_en_mask |= BIT(i);
1092 if (umc_en_mask == dimm_ecc_en_mask)
1093 edac_cap = EDAC_FLAG_SECDED;
1099 * debug routine to display the memory sizes of all logical DIMMs and its
1102 static void dct_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
1104 u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
1105 u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
1106 int dimm, size0, size1;
1108 if (pvt->fam == 0xf) {
1109 /* K8 families < revF not supported yet */
1110 if (pvt->ext_model < K8_REV_F)
1116 if (pvt->fam == 0x10) {
1117 dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1
1119 dcsb = (ctrl && !dct_ganging_enabled(pvt)) ?
1120 pvt->csels[1].csbases :
1121 pvt->csels[0].csbases;
1124 dcsb = pvt->csels[1].csbases;
1126 edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
1129 edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
1131 /* Dump memory sizes for DIMM and its CSROWs */
1132 for (dimm = 0; dimm < 4; dimm++) {
1134 if (dcsb[dimm * 2] & DCSB_CS_ENABLE)
1136 * For F15m60h, we need multiplier for LRDIMM cs_size
1137 * calculation. We pass dimm value to the dbam_to_cs
1138 * mapper so we can find the multiplier from the
1139 * corresponding DCSM.
1141 size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
1142 DBAM_DIMM(dimm, dbam),
1146 if (dcsb[dimm * 2 + 1] & DCSB_CS_ENABLE)
1147 size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
1148 DBAM_DIMM(dimm, dbam),
1151 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
1153 dimm * 2 + 1, size1);
1158 static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
1160 edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
1162 if (pvt->dram_type == MEM_LRDDR3) {
1163 u32 dcsm = pvt->csels[chan].csmasks[0];
1165 * It's assumed all LRDIMMs in a DCT are going to be of
1166 * same 'type' until proven otherwise. So, use a cs
1167 * value of '0' here to get dcsm value.
1169 edac_dbg(1, " LRDIMM %dx rank multiply\n", (dcsm & 0x3));
1172 edac_dbg(1, "All DIMMs support ECC:%s\n",
1173 (dclr & BIT(19)) ? "yes" : "no");
1176 edac_dbg(1, " PAR/ERR parity: %s\n",
1177 (dclr & BIT(8)) ? "enabled" : "disabled");
1179 if (pvt->fam == 0x10)
1180 edac_dbg(1, " DCT 128bit mode width: %s\n",
1181 (dclr & BIT(11)) ? "128b" : "64b");
1183 edac_dbg(1, " x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
1184 (dclr & BIT(12)) ? "yes" : "no",
1185 (dclr & BIT(13)) ? "yes" : "no",
1186 (dclr & BIT(14)) ? "yes" : "no",
1187 (dclr & BIT(15)) ? "yes" : "no");
1190 #define CS_EVEN_PRIMARY BIT(0)
1191 #define CS_ODD_PRIMARY BIT(1)
1192 #define CS_EVEN_SECONDARY BIT(2)
1193 #define CS_ODD_SECONDARY BIT(3)
1194 #define CS_3R_INTERLEAVE BIT(4)
1196 #define CS_EVEN (CS_EVEN_PRIMARY | CS_EVEN_SECONDARY)
1197 #define CS_ODD (CS_ODD_PRIMARY | CS_ODD_SECONDARY)
1199 static int umc_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt)
1204 if (csrow_enabled(2 * dimm, ctrl, pvt))
1205 cs_mode |= CS_EVEN_PRIMARY;
1207 if (csrow_enabled(2 * dimm + 1, ctrl, pvt))
1208 cs_mode |= CS_ODD_PRIMARY;
1210 /* Asymmetric dual-rank DIMM support. */
1211 if (csrow_sec_enabled(2 * dimm + 1, ctrl, pvt))
1212 cs_mode |= CS_ODD_SECONDARY;
1215 * 3 Rank inteleaving support.
1216 * There should be only three bases enabled and their two masks should
1219 for_each_chip_select(base, ctrl, pvt)
1220 count += csrow_enabled(base, ctrl, pvt);
1223 pvt->csels[ctrl].csmasks[0] == pvt->csels[ctrl].csmasks[1]) {
1224 edac_dbg(1, "3R interleaving in use.\n");
1225 cs_mode |= CS_3R_INTERLEAVE;
1231 static int __addr_mask_to_cs_size(u32 addr_mask_orig, unsigned int cs_mode,
1232 int csrow_nr, int dimm)
1234 u32 msb, weight, num_zero_bits;
1235 u32 addr_mask_deinterleaved;
1239 * The number of zero bits in the mask is equal to the number of bits
1240 * in a full mask minus the number of bits in the current mask.
1242 * The MSB is the number of bits in the full mask because BIT[0] is
1245 * In the special 3 Rank interleaving case, a single bit is flipped
1246 * without swapping with the most significant bit. This can be handled
1247 * by keeping the MSB where it is and ignoring the single zero bit.
1249 msb = fls(addr_mask_orig) - 1;
1250 weight = hweight_long(addr_mask_orig);
1251 num_zero_bits = msb - weight - !!(cs_mode & CS_3R_INTERLEAVE);
1253 /* Take the number of zero bits off from the top of the mask. */
1254 addr_mask_deinterleaved = GENMASK_ULL(msb - num_zero_bits, 1);
1256 edac_dbg(1, "CS%d DIMM%d AddrMasks:\n", csrow_nr, dimm);
1257 edac_dbg(1, " Original AddrMask: 0x%x\n", addr_mask_orig);
1258 edac_dbg(1, " Deinterleaved AddrMask: 0x%x\n", addr_mask_deinterleaved);
1260 /* Register [31:1] = Address [39:9]. Size is in kBs here. */
1261 size = (addr_mask_deinterleaved >> 2) + 1;
1263 /* Return size in MBs. */
1267 static int umc_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
1268 unsigned int cs_mode, int csrow_nr)
1270 int cs_mask_nr = csrow_nr;
1274 /* No Chip Selects are enabled. */
1278 /* Requested size of an even CS but none are enabled. */
1279 if (!(cs_mode & CS_EVEN) && !(csrow_nr & 1))
1282 /* Requested size of an odd CS but none are enabled. */
1283 if (!(cs_mode & CS_ODD) && (csrow_nr & 1))
1287 * Family 17h introduced systems with one mask per DIMM,
1288 * and two Chip Selects per DIMM.
1290 * CS0 and CS1 -> MASK0 / DIMM0
1291 * CS2 and CS3 -> MASK1 / DIMM1
1293 * Family 19h Model 10h introduced systems with one mask per Chip Select,
1294 * and two Chip Selects per DIMM.
1296 * CS0 -> MASK0 -> DIMM0
1297 * CS1 -> MASK1 -> DIMM0
1298 * CS2 -> MASK2 -> DIMM1
1299 * CS3 -> MASK3 -> DIMM1
1301 * Keep the mask number equal to the Chip Select number for newer systems,
1302 * and shift the mask number for older systems.
1304 dimm = csrow_nr >> 1;
1306 if (!pvt->flags.zn_regs_v2)
1309 /* Asymmetric dual-rank DIMM support. */
1310 if ((csrow_nr & 1) && (cs_mode & CS_ODD_SECONDARY))
1311 addr_mask_orig = pvt->csels[umc].csmasks_sec[cs_mask_nr];
1313 addr_mask_orig = pvt->csels[umc].csmasks[cs_mask_nr];
1315 return __addr_mask_to_cs_size(addr_mask_orig, cs_mode, csrow_nr, dimm);
1318 static void umc_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
1320 int dimm, size0, size1, cs0, cs1, cs_mode;
1322 edac_printk(KERN_DEBUG, EDAC_MC, "UMC%d chip selects:\n", ctrl);
1324 for (dimm = 0; dimm < 2; dimm++) {
1328 cs_mode = umc_get_cs_mode(dimm, ctrl, pvt);
1330 size0 = umc_addr_mask_to_cs_size(pvt, ctrl, cs_mode, cs0);
1331 size1 = umc_addr_mask_to_cs_size(pvt, ctrl, cs_mode, cs1);
1333 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
1339 static void umc_dump_misc_regs(struct amd64_pvt *pvt)
1341 struct amd64_umc *umc;
1342 u32 i, tmp, umc_base;
1345 umc_base = get_umc_base(i);
1348 edac_dbg(1, "UMC%d DIMM cfg: 0x%x\n", i, umc->dimm_cfg);
1349 edac_dbg(1, "UMC%d UMC cfg: 0x%x\n", i, umc->umc_cfg);
1350 edac_dbg(1, "UMC%d SDP ctrl: 0x%x\n", i, umc->sdp_ctrl);
1351 edac_dbg(1, "UMC%d ECC ctrl: 0x%x\n", i, umc->ecc_ctrl);
1353 amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ECC_BAD_SYMBOL, &tmp);
1354 edac_dbg(1, "UMC%d ECC bad symbol: 0x%x\n", i, tmp);
1356 amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_UMC_CAP, &tmp);
1357 edac_dbg(1, "UMC%d UMC cap: 0x%x\n", i, tmp);
1358 edac_dbg(1, "UMC%d UMC cap high: 0x%x\n", i, umc->umc_cap_hi);
1360 edac_dbg(1, "UMC%d ECC capable: %s, ChipKill ECC capable: %s\n",
1361 i, (umc->umc_cap_hi & BIT(30)) ? "yes" : "no",
1362 (umc->umc_cap_hi & BIT(31)) ? "yes" : "no");
1363 edac_dbg(1, "UMC%d All DIMMs support ECC: %s\n",
1364 i, (umc->umc_cfg & BIT(12)) ? "yes" : "no");
1365 edac_dbg(1, "UMC%d x4 DIMMs present: %s\n",
1366 i, (umc->dimm_cfg & BIT(6)) ? "yes" : "no");
1367 edac_dbg(1, "UMC%d x16 DIMMs present: %s\n",
1368 i, (umc->dimm_cfg & BIT(7)) ? "yes" : "no");
1370 if (umc->dram_type == MEM_LRDDR4 || umc->dram_type == MEM_LRDDR5) {
1371 amd_smn_read(pvt->mc_node_id,
1372 umc_base + get_umc_reg(pvt, UMCCH_ADDR_CFG),
1374 edac_dbg(1, "UMC%d LRDIMM %dx rank multiply\n",
1375 i, 1 << ((tmp >> 4) & 0x3));
1378 umc_debug_display_dimm_sizes(pvt, i);
1382 static void dct_dump_misc_regs(struct amd64_pvt *pvt)
1384 edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
1386 edac_dbg(1, " NB two channel DRAM capable: %s\n",
1387 (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
1389 edac_dbg(1, " ECC capable: %s, ChipKill ECC capable: %s\n",
1390 (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
1391 (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
1393 debug_dump_dramcfg_low(pvt, pvt->dclr0, 0);
1395 edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
1397 edac_dbg(1, "F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, offset: 0x%08x\n",
1398 pvt->dhar, dhar_base(pvt),
1399 (pvt->fam == 0xf) ? k8_dhar_offset(pvt)
1400 : f10_dhar_offset(pvt));
1402 dct_debug_display_dimm_sizes(pvt, 0);
1404 /* everything below this point is Fam10h and above */
1405 if (pvt->fam == 0xf)
1408 dct_debug_display_dimm_sizes(pvt, 1);
1410 /* Only if NOT ganged does dclr1 have valid info */
1411 if (!dct_ganging_enabled(pvt))
1412 debug_dump_dramcfg_low(pvt, pvt->dclr1, 1);
1414 edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
1416 amd64_info("using x%u syndromes.\n", pvt->ecc_sym_sz);
1420 * See BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
1422 static void dct_prep_chip_selects(struct amd64_pvt *pvt)
1424 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
1425 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
1426 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
1427 } else if (pvt->fam == 0x15 && pvt->model == 0x30) {
1428 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4;
1429 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2;
1431 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
1432 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
1436 static void umc_prep_chip_selects(struct amd64_pvt *pvt)
1441 pvt->csels[umc].b_cnt = 4;
1442 pvt->csels[umc].m_cnt = pvt->flags.zn_regs_v2 ? 4 : 2;
1446 static void umc_read_base_mask(struct amd64_pvt *pvt)
1448 u32 umc_base_reg, umc_base_reg_sec;
1449 u32 umc_mask_reg, umc_mask_reg_sec;
1450 u32 base_reg, base_reg_sec;
1451 u32 mask_reg, mask_reg_sec;
1452 u32 *base, *base_sec;
1453 u32 *mask, *mask_sec;
1457 umc_base_reg = get_umc_base(umc) + UMCCH_BASE_ADDR;
1458 umc_base_reg_sec = get_umc_base(umc) + UMCCH_BASE_ADDR_SEC;
1460 for_each_chip_select(cs, umc, pvt) {
1461 base = &pvt->csels[umc].csbases[cs];
1462 base_sec = &pvt->csels[umc].csbases_sec[cs];
1464 base_reg = umc_base_reg + (cs * 4);
1465 base_reg_sec = umc_base_reg_sec + (cs * 4);
1467 if (!amd_smn_read(pvt->mc_node_id, base_reg, base))
1468 edac_dbg(0, " DCSB%d[%d]=0x%08x reg: 0x%x\n",
1469 umc, cs, *base, base_reg);
1471 if (!amd_smn_read(pvt->mc_node_id, base_reg_sec, base_sec))
1472 edac_dbg(0, " DCSB_SEC%d[%d]=0x%08x reg: 0x%x\n",
1473 umc, cs, *base_sec, base_reg_sec);
1476 umc_mask_reg = get_umc_base(umc) + UMCCH_ADDR_MASK;
1477 umc_mask_reg_sec = get_umc_base(umc) + get_umc_reg(pvt, UMCCH_ADDR_MASK_SEC);
1479 for_each_chip_select_mask(cs, umc, pvt) {
1480 mask = &pvt->csels[umc].csmasks[cs];
1481 mask_sec = &pvt->csels[umc].csmasks_sec[cs];
1483 mask_reg = umc_mask_reg + (cs * 4);
1484 mask_reg_sec = umc_mask_reg_sec + (cs * 4);
1486 if (!amd_smn_read(pvt->mc_node_id, mask_reg, mask))
1487 edac_dbg(0, " DCSM%d[%d]=0x%08x reg: 0x%x\n",
1488 umc, cs, *mask, mask_reg);
1490 if (!amd_smn_read(pvt->mc_node_id, mask_reg_sec, mask_sec))
1491 edac_dbg(0, " DCSM_SEC%d[%d]=0x%08x reg: 0x%x\n",
1492 umc, cs, *mask_sec, mask_reg_sec);
1498 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
1500 static void dct_read_base_mask(struct amd64_pvt *pvt)
1504 for_each_chip_select(cs, 0, pvt) {
1505 int reg0 = DCSB0 + (cs * 4);
1506 int reg1 = DCSB1 + (cs * 4);
1507 u32 *base0 = &pvt->csels[0].csbases[cs];
1508 u32 *base1 = &pvt->csels[1].csbases[cs];
1510 if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0))
1511 edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n",
1514 if (pvt->fam == 0xf)
1517 if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1))
1518 edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n",
1519 cs, *base1, (pvt->fam == 0x10) ? reg1
1523 for_each_chip_select_mask(cs, 0, pvt) {
1524 int reg0 = DCSM0 + (cs * 4);
1525 int reg1 = DCSM1 + (cs * 4);
1526 u32 *mask0 = &pvt->csels[0].csmasks[cs];
1527 u32 *mask1 = &pvt->csels[1].csmasks[cs];
1529 if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0))
1530 edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n",
1533 if (pvt->fam == 0xf)
1536 if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1))
1537 edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n",
1538 cs, *mask1, (pvt->fam == 0x10) ? reg1
1543 static void umc_determine_memory_type(struct amd64_pvt *pvt)
1545 struct amd64_umc *umc;
1551 if (!(umc->sdp_ctrl & UMC_SDP_INIT)) {
1552 umc->dram_type = MEM_EMPTY;
1557 * Check if the system supports the "DDR Type" field in UMC Config
1558 * and has DDR5 DIMMs in use.
1560 if (pvt->flags.zn_regs_v2 && ((umc->umc_cfg & GENMASK(2, 0)) == 0x1)) {
1561 if (umc->dimm_cfg & BIT(5))
1562 umc->dram_type = MEM_LRDDR5;
1563 else if (umc->dimm_cfg & BIT(4))
1564 umc->dram_type = MEM_RDDR5;
1566 umc->dram_type = MEM_DDR5;
1568 if (umc->dimm_cfg & BIT(5))
1569 umc->dram_type = MEM_LRDDR4;
1570 else if (umc->dimm_cfg & BIT(4))
1571 umc->dram_type = MEM_RDDR4;
1573 umc->dram_type = MEM_DDR4;
1576 edac_dbg(1, " UMC%d DIMM type: %s\n", i, edac_mem_types[umc->dram_type]);
1580 static void dct_determine_memory_type(struct amd64_pvt *pvt)
1582 u32 dram_ctrl, dcsm;
1586 if (pvt->ext_model >= K8_REV_F)
1589 pvt->dram_type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
1593 if (pvt->dchr0 & DDR3_MODE)
1596 pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
1600 if (pvt->model < 0x60)
1604 * Model 0x60h needs special handling:
1606 * We use a Chip Select value of '0' to obtain dcsm.
1607 * Theoretically, it is possible to populate LRDIMMs of different
1608 * 'Rank' value on a DCT. But this is not the common case. So,
1609 * it's reasonable to assume all DIMMs are going to be of same
1610 * 'type' until proven otherwise.
1612 amd64_read_dct_pci_cfg(pvt, 0, DRAM_CONTROL, &dram_ctrl);
1613 dcsm = pvt->csels[0].csmasks[0];
1615 if (((dram_ctrl >> 8) & 0x7) == 0x2)
1616 pvt->dram_type = MEM_DDR4;
1617 else if (pvt->dclr0 & BIT(16))
1618 pvt->dram_type = MEM_DDR3;
1619 else if (dcsm & 0x3)
1620 pvt->dram_type = MEM_LRDDR3;
1622 pvt->dram_type = MEM_RDDR3;
1630 WARN(1, KERN_ERR "%s: Family??? 0x%x\n", __func__, pvt->fam);
1631 pvt->dram_type = MEM_EMPTY;
1634 edac_dbg(1, " DIMM type: %s\n", edac_mem_types[pvt->dram_type]);
1638 pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
1641 /* On F10h and later ErrAddr is MC4_ADDR[47:1] */
1642 static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m)
1644 u16 mce_nid = topology_amd_node_id(m->extcpu);
1645 struct mem_ctl_info *mci;
1650 mci = edac_mc_find(mce_nid);
1654 pvt = mci->pvt_info;
1656 if (pvt->fam == 0xf) {
1661 addr = m->addr & GENMASK_ULL(end_bit, start_bit);
1664 * Erratum 637 workaround
1666 if (pvt->fam == 0x15) {
1667 u64 cc6_base, tmp_addr;
1671 if ((addr & GENMASK_ULL(47, 24)) >> 24 != 0x00fdf7)
1675 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_LIM, &tmp);
1676 intlv_en = tmp >> 21 & 0x7;
1678 /* add [47:27] + 3 trailing bits */
1679 cc6_base = (tmp & GENMASK_ULL(20, 0)) << 3;
1681 /* reverse and add DramIntlvEn */
1682 cc6_base |= intlv_en ^ 0x7;
1684 /* pin at [47:24] */
1688 return cc6_base | (addr & GENMASK_ULL(23, 0));
1690 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_BASE, &tmp);
1693 tmp_addr = (addr & GENMASK_ULL(23, 12)) << __fls(intlv_en + 1);
1695 /* OR DramIntlvSel into bits [14:12] */
1696 tmp_addr |= (tmp & GENMASK_ULL(23, 21)) >> 9;
1698 /* add remaining [11:0] bits from original MC4_ADDR */
1699 tmp_addr |= addr & GENMASK_ULL(11, 0);
1701 return cc6_base | tmp_addr;
1707 static struct pci_dev *pci_get_related_function(unsigned int vendor,
1708 unsigned int device,
1709 struct pci_dev *related)
1711 struct pci_dev *dev = NULL;
1713 while ((dev = pci_get_device(vendor, device, dev))) {
1714 if (pci_domain_nr(dev->bus) == pci_domain_nr(related->bus) &&
1715 (dev->bus->number == related->bus->number) &&
1716 (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
1723 static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
1725 struct amd_northbridge *nb;
1726 struct pci_dev *f1 = NULL;
1727 unsigned int pci_func;
1728 int off = range << 3;
1731 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo);
1732 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
1734 if (pvt->fam == 0xf)
1737 if (!dram_rw(pvt, range))
1740 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi);
1741 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
1743 /* F15h: factor in CC6 save area by reading dst node's limit reg */
1744 if (pvt->fam != 0x15)
1747 nb = node_to_amd_nb(dram_dst_node(pvt, range));
1751 if (pvt->model == 0x60)
1752 pci_func = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1;
1753 else if (pvt->model == 0x30)
1754 pci_func = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1;
1756 pci_func = PCI_DEVICE_ID_AMD_15H_NB_F1;
1758 f1 = pci_get_related_function(nb->misc->vendor, pci_func, nb->misc);
1762 amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
1764 pvt->ranges[range].lim.lo &= GENMASK_ULL(15, 0);
1766 /* {[39:27],111b} */
1767 pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
1769 pvt->ranges[range].lim.hi &= GENMASK_ULL(7, 0);
1772 pvt->ranges[range].lim.hi |= llim >> 13;
1777 static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1778 struct err_info *err)
1780 struct amd64_pvt *pvt = mci->pvt_info;
1782 error_address_to_page_and_offset(sys_addr, err);
1785 * Find out which node the error address belongs to. This may be
1786 * different from the node that detected the error.
1788 err->src_mci = find_mc_by_sys_addr(mci, sys_addr);
1789 if (!err->src_mci) {
1790 amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
1791 (unsigned long)sys_addr);
1792 err->err_code = ERR_NODE;
1796 /* Now map the sys_addr to a CSROW */
1797 err->csrow = sys_addr_to_csrow(err->src_mci, sys_addr);
1798 if (err->csrow < 0) {
1799 err->err_code = ERR_CSROW;
1803 /* CHIPKILL enabled */
1804 if (pvt->nbcfg & NBCFG_CHIPKILL) {
1805 err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
1806 if (err->channel < 0) {
1808 * Syndrome didn't map, so we don't know which of the
1809 * 2 DIMMs is in error. So we need to ID 'both' of them
1812 amd64_mc_warn(err->src_mci, "unknown syndrome 0x%04x - "
1813 "possible error reporting race\n",
1815 err->err_code = ERR_CHANNEL;
1820 * non-chipkill ecc mode
1822 * The k8 documentation is unclear about how to determine the
1823 * channel number when using non-chipkill memory. This method
1824 * was obtained from email communication with someone at AMD.
1825 * (Wish the email was placed in this comment - norsk)
1827 err->channel = ((sys_addr & BIT(3)) != 0);
1831 static int ddr2_cs_size(unsigned i, bool dct_width)
1837 else if (!(i & 0x1))
1840 shift = (i + 1) >> 1;
1842 return 128 << (shift + !!dct_width);
1845 static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1846 unsigned cs_mode, int cs_mask_nr)
1848 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1850 if (pvt->ext_model >= K8_REV_F) {
1851 WARN_ON(cs_mode > 11);
1852 return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1854 else if (pvt->ext_model >= K8_REV_D) {
1856 WARN_ON(cs_mode > 10);
1859 * the below calculation, besides trying to win an obfuscated C
1860 * contest, maps cs_mode values to DIMM chip select sizes. The
1863 * cs_mode CS size (mb)
1864 * ======= ============
1877 * Basically, it calculates a value with which to shift the
1878 * smallest CS size of 32MB.
1880 * ddr[23]_cs_size have a similar purpose.
1882 diff = cs_mode/3 + (unsigned)(cs_mode > 5);
1884 return 32 << (cs_mode - diff);
1887 WARN_ON(cs_mode > 6);
1888 return 32 << cs_mode;
1892 static int ddr3_cs_size(unsigned i, bool dct_width)
1897 if (i == 0 || i == 3 || i == 4)
1903 else if (!(i & 0x1))
1906 shift = (i + 1) >> 1;
1909 cs_size = (128 * (1 << !!dct_width)) << shift;
1914 static int ddr3_lrdimm_cs_size(unsigned i, unsigned rank_multiply)
1919 if (i < 4 || i == 6)
1923 else if (!(i & 0x1))
1926 shift = (i + 1) >> 1;
1929 cs_size = rank_multiply * (128 << shift);
1934 static int ddr4_cs_size(unsigned i)
1943 /* Min cs_size = 1G */
1944 cs_size = 1024 * (1 << (i >> 1));
1949 static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1950 unsigned cs_mode, int cs_mask_nr)
1952 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1954 WARN_ON(cs_mode > 11);
1956 if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
1957 return ddr3_cs_size(cs_mode, dclr & WIDTH_128);
1959 return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1963 * F15h supports only 64bit DCT interfaces
1965 static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1966 unsigned cs_mode, int cs_mask_nr)
1968 WARN_ON(cs_mode > 12);
1970 return ddr3_cs_size(cs_mode, false);
1973 /* F15h M60h supports DDR4 mapping as well.. */
1974 static int f15_m60h_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1975 unsigned cs_mode, int cs_mask_nr)
1978 u32 dcsm = pvt->csels[dct].csmasks[cs_mask_nr];
1980 WARN_ON(cs_mode > 12);
1982 if (pvt->dram_type == MEM_DDR4) {
1986 cs_size = ddr4_cs_size(cs_mode);
1987 } else if (pvt->dram_type == MEM_LRDDR3) {
1988 unsigned rank_multiply = dcsm & 0xf;
1990 if (rank_multiply == 3)
1992 cs_size = ddr3_lrdimm_cs_size(cs_mode, rank_multiply);
1994 /* Minimum cs size is 512mb for F15hM60h*/
1998 cs_size = ddr3_cs_size(cs_mode, false);
2005 * F16h and F15h model 30h have only limited cs_modes.
2007 static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
2008 unsigned cs_mode, int cs_mask_nr)
2010 WARN_ON(cs_mode > 12);
2012 if (cs_mode == 6 || cs_mode == 8 ||
2013 cs_mode == 9 || cs_mode == 12)
2016 return ddr3_cs_size(cs_mode, false);
2019 static void read_dram_ctl_register(struct amd64_pvt *pvt)
2022 if (pvt->fam == 0xf)
2025 if (!amd64_read_pci_cfg(pvt->F2, DCT_SEL_LO, &pvt->dct_sel_lo)) {
2026 edac_dbg(0, "F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
2027 pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
2029 edac_dbg(0, " DCTs operate in %s mode\n",
2030 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
2032 if (!dct_ganging_enabled(pvt))
2033 edac_dbg(0, " Address range split per DCT: %s\n",
2034 (dct_high_range_enabled(pvt) ? "yes" : "no"));
2036 edac_dbg(0, " data interleave for ECC: %s, DRAM cleared since last warm reset: %s\n",
2037 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
2038 (dct_memory_cleared(pvt) ? "yes" : "no"));
2040 edac_dbg(0, " channel interleave: %s, "
2041 "interleave bits selector: 0x%x\n",
2042 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
2043 dct_sel_interleave_addr(pvt));
2046 amd64_read_pci_cfg(pvt->F2, DCT_SEL_HI, &pvt->dct_sel_hi);
2050 * Determine channel (DCT) based on the interleaving mode (see F15h M30h BKDG,
2051 * 2.10.12 Memory Interleaving Modes).
2053 static u8 f15_m30h_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
2054 u8 intlv_en, int num_dcts_intlv,
2061 return (u8)(dct_sel);
2063 if (num_dcts_intlv == 2) {
2064 select = (sys_addr >> 8) & 0x3;
2065 channel = select ? 0x3 : 0;
2066 } else if (num_dcts_intlv == 4) {
2067 u8 intlv_addr = dct_sel_interleave_addr(pvt);
2068 switch (intlv_addr) {
2070 channel = (sys_addr >> 8) & 0x3;
2073 channel = (sys_addr >> 9) & 0x3;
2081 * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
2082 * Interleaving Modes.
2084 static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
2085 bool hi_range_sel, u8 intlv_en)
2087 u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1;
2089 if (dct_ganging_enabled(pvt))
2093 return dct_sel_high;
2096 * see F2x110[DctSelIntLvAddr] - channel interleave mode
2098 if (dct_interleave_enabled(pvt)) {
2099 u8 intlv_addr = dct_sel_interleave_addr(pvt);
2101 /* return DCT select function: 0=DCT0, 1=DCT1 */
2103 return sys_addr >> 6 & 1;
2105 if (intlv_addr & 0x2) {
2106 u8 shift = intlv_addr & 0x1 ? 9 : 6;
2107 u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) & 1;
2109 return ((sys_addr >> shift) & 1) ^ temp;
2112 if (intlv_addr & 0x4) {
2113 u8 shift = intlv_addr & 0x1 ? 9 : 8;
2115 return (sys_addr >> shift) & 1;
2118 return (sys_addr >> (12 + hweight8(intlv_en))) & 1;
2121 if (dct_high_range_enabled(pvt))
2122 return ~dct_sel_high & 1;
2127 /* Convert the sys_addr to the normalized DCT address */
2128 static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
2129 u64 sys_addr, bool hi_rng,
2130 u32 dct_sel_base_addr)
2133 u64 dram_base = get_dram_base(pvt, range);
2134 u64 hole_off = f10_dhar_offset(pvt);
2135 u64 dct_sel_base_off = (u64)(pvt->dct_sel_hi & 0xFFFFFC00) << 16;
2140 * base address of high range is below 4Gb
2141 * (bits [47:27] at [31:11])
2142 * DRAM address space on this DCT is hoisted above 4Gb &&
2145 * remove hole offset from sys_addr
2147 * remove high range offset from sys_addr
2149 if ((!(dct_sel_base_addr >> 16) ||
2150 dct_sel_base_addr < dhar_base(pvt)) &&
2152 (sys_addr >= BIT_64(32)))
2153 chan_off = hole_off;
2155 chan_off = dct_sel_base_off;
2159 * we have a valid hole &&
2164 * remove dram base to normalize to DCT address
2166 if (dhar_valid(pvt) && (sys_addr >= BIT_64(32)))
2167 chan_off = hole_off;
2169 chan_off = dram_base;
2172 return (sys_addr & GENMASK_ULL(47,6)) - (chan_off & GENMASK_ULL(47,23));
2176 * checks if the csrow passed in is marked as SPARED, if so returns the new
2179 static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
2183 if (online_spare_swap_done(pvt, dct) &&
2184 csrow == online_spare_bad_dramcs(pvt, dct)) {
2186 for_each_chip_select(tmp_cs, dct, pvt) {
2187 if (chip_select_base(tmp_cs, dct, pvt) & 0x2) {
2197 * Iterate over the DRAM DCT "base" and "mask" registers looking for a
2198 * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
2201 * -EINVAL: NOT FOUND
2202 * 0..csrow = Chip-Select Row
2204 static int f1x_lookup_addr_in_dct(u64 in_addr, u8 nid, u8 dct)
2206 struct mem_ctl_info *mci;
2207 struct amd64_pvt *pvt;
2208 u64 cs_base, cs_mask;
2209 int cs_found = -EINVAL;
2212 mci = edac_mc_find(nid);
2216 pvt = mci->pvt_info;
2218 edac_dbg(1, "input addr: 0x%llx, DCT: %d\n", in_addr, dct);
2220 for_each_chip_select(csrow, dct, pvt) {
2221 if (!csrow_enabled(csrow, dct, pvt))
2224 get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
2226 edac_dbg(1, " CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
2227 csrow, cs_base, cs_mask);
2231 edac_dbg(1, " (InputAddr & ~CSMask)=0x%llx (CSBase & ~CSMask)=0x%llx\n",
2232 (in_addr & cs_mask), (cs_base & cs_mask));
2234 if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
2235 if (pvt->fam == 0x15 && pvt->model >= 0x30) {
2239 cs_found = f10_process_possible_spare(pvt, dct, csrow);
2241 edac_dbg(1, " MATCH csrow=%d\n", cs_found);
2249 * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is
2250 * swapped with a region located at the bottom of memory so that the GPU can use
2251 * the interleaved region and thus two channels.
2253 static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
2255 u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr;
2257 if (pvt->fam == 0x10) {
2258 /* only revC3 and revE have that feature */
2259 if (pvt->model < 4 || (pvt->model < 0xa && pvt->stepping < 3))
2263 amd64_read_pci_cfg(pvt->F2, SWAP_INTLV_REG, &swap_reg);
2265 if (!(swap_reg & 0x1))
2268 swap_base = (swap_reg >> 3) & 0x7f;
2269 swap_limit = (swap_reg >> 11) & 0x7f;
2270 rgn_size = (swap_reg >> 20) & 0x7f;
2271 tmp_addr = sys_addr >> 27;
2273 if (!(sys_addr >> 34) &&
2274 (((tmp_addr >= swap_base) &&
2275 (tmp_addr <= swap_limit)) ||
2276 (tmp_addr < rgn_size)))
2277 return sys_addr ^ (u64)swap_base << 27;
2282 /* For a given @dram_range, check if @sys_addr falls within it. */
2283 static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
2284 u64 sys_addr, int *chan_sel)
2286 int cs_found = -EINVAL;
2290 bool high_range = false;
2292 u8 node_id = dram_dst_node(pvt, range);
2293 u8 intlv_en = dram_intlv_en(pvt, range);
2294 u32 intlv_sel = dram_intlv_sel(pvt, range);
2296 edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
2297 range, sys_addr, get_dram_limit(pvt, range));
2299 if (dhar_valid(pvt) &&
2300 dhar_base(pvt) <= sys_addr &&
2301 sys_addr < BIT_64(32)) {
2302 amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
2307 if (intlv_en && (intlv_sel != ((sys_addr >> 12) & intlv_en)))
2310 sys_addr = f1x_swap_interleaved_region(pvt, sys_addr);
2312 dct_sel_base = dct_sel_baseaddr(pvt);
2315 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
2316 * select between DCT0 and DCT1.
2318 if (dct_high_range_enabled(pvt) &&
2319 !dct_ganging_enabled(pvt) &&
2320 ((sys_addr >> 27) >= (dct_sel_base >> 11)))
2323 channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en);
2325 chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr,
2326 high_range, dct_sel_base);
2328 /* Remove node interleaving, see F1x120 */
2330 chan_addr = ((chan_addr >> (12 + hweight8(intlv_en))) << 12) |
2331 (chan_addr & 0xfff);
2333 /* remove channel interleave */
2334 if (dct_interleave_enabled(pvt) &&
2335 !dct_high_range_enabled(pvt) &&
2336 !dct_ganging_enabled(pvt)) {
2338 if (dct_sel_interleave_addr(pvt) != 1) {
2339 if (dct_sel_interleave_addr(pvt) == 0x3)
2341 chan_addr = ((chan_addr >> 10) << 9) |
2342 (chan_addr & 0x1ff);
2344 /* A[6] or hash 6 */
2345 chan_addr = ((chan_addr >> 7) << 6) |
2349 chan_addr = ((chan_addr >> 13) << 12) |
2350 (chan_addr & 0xfff);
2353 edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr);
2355 cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel);
2358 *chan_sel = channel;
2363 static int f15_m30h_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
2364 u64 sys_addr, int *chan_sel)
2366 int cs_found = -EINVAL;
2367 int num_dcts_intlv = 0;
2368 u64 chan_addr, chan_offset;
2369 u64 dct_base, dct_limit;
2370 u32 dct_cont_base_reg, dct_cont_limit_reg, tmp;
2371 u8 channel, alias_channel, leg_mmio_hole, dct_sel, dct_offset_en;
2373 u64 dhar_offset = f10_dhar_offset(pvt);
2374 u8 intlv_addr = dct_sel_interleave_addr(pvt);
2375 u8 node_id = dram_dst_node(pvt, range);
2376 u8 intlv_en = dram_intlv_en(pvt, range);
2378 amd64_read_pci_cfg(pvt->F1, DRAM_CONT_BASE, &dct_cont_base_reg);
2379 amd64_read_pci_cfg(pvt->F1, DRAM_CONT_LIMIT, &dct_cont_limit_reg);
2381 dct_offset_en = (u8) ((dct_cont_base_reg >> 3) & BIT(0));
2382 dct_sel = (u8) ((dct_cont_base_reg >> 4) & 0x7);
2384 edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
2385 range, sys_addr, get_dram_limit(pvt, range));
2387 if (!(get_dram_base(pvt, range) <= sys_addr) &&
2388 !(get_dram_limit(pvt, range) >= sys_addr))
2391 if (dhar_valid(pvt) &&
2392 dhar_base(pvt) <= sys_addr &&
2393 sys_addr < BIT_64(32)) {
2394 amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
2399 /* Verify sys_addr is within DCT Range. */
2400 dct_base = (u64) dct_sel_baseaddr(pvt);
2401 dct_limit = (dct_cont_limit_reg >> 11) & 0x1FFF;
2403 if (!(dct_cont_base_reg & BIT(0)) &&
2404 !(dct_base <= (sys_addr >> 27) &&
2405 dct_limit >= (sys_addr >> 27)))
2408 /* Verify number of dct's that participate in channel interleaving. */
2409 num_dcts_intlv = (int) hweight8(intlv_en);
2411 if (!(num_dcts_intlv % 2 == 0) || (num_dcts_intlv > 4))
2414 if (pvt->model >= 0x60)
2415 channel = f1x_determine_channel(pvt, sys_addr, false, intlv_en);
2417 channel = f15_m30h_determine_channel(pvt, sys_addr, intlv_en,
2418 num_dcts_intlv, dct_sel);
2420 /* Verify we stay within the MAX number of channels allowed */
2424 leg_mmio_hole = (u8) (dct_cont_base_reg >> 1 & BIT(0));
2426 /* Get normalized DCT addr */
2427 if (leg_mmio_hole && (sys_addr >= BIT_64(32)))
2428 chan_offset = dhar_offset;
2430 chan_offset = dct_base << 27;
2432 chan_addr = sys_addr - chan_offset;
2434 /* remove channel interleave */
2435 if (num_dcts_intlv == 2) {
2436 if (intlv_addr == 0x4)
2437 chan_addr = ((chan_addr >> 9) << 8) |
2439 else if (intlv_addr == 0x5)
2440 chan_addr = ((chan_addr >> 10) << 9) |
2441 (chan_addr & 0x1ff);
2445 } else if (num_dcts_intlv == 4) {
2446 if (intlv_addr == 0x4)
2447 chan_addr = ((chan_addr >> 10) << 8) |
2449 else if (intlv_addr == 0x5)
2450 chan_addr = ((chan_addr >> 11) << 9) |
2451 (chan_addr & 0x1ff);
2456 if (dct_offset_en) {
2457 amd64_read_pci_cfg(pvt->F1,
2458 DRAM_CONT_HIGH_OFF + (int) channel * 4,
2460 chan_addr += (u64) ((tmp >> 11) & 0xfff) << 27;
2463 f15h_select_dct(pvt, channel);
2465 edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr);
2469 * if channel = 3, then alias it to 1. This is because, in F15 M30h,
2470 * there is support for 4 DCT's, but only 2 are currently functional.
2471 * They are DCT0 and DCT3. But we have read all registers of DCT3 into
2472 * pvt->csels[1]. So we need to use '1' here to get correct info.
2473 * Refer F15 M30h BKDG Section 2.10 and 2.10.3 for clarifications.
2475 alias_channel = (channel == 3) ? 1 : channel;
2477 cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, alias_channel);
2480 *chan_sel = alias_channel;
2485 static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt,
2489 int cs_found = -EINVAL;
2492 for (range = 0; range < DRAM_RANGES; range++) {
2493 if (!dram_rw(pvt, range))
2496 if (pvt->fam == 0x15 && pvt->model >= 0x30)
2497 cs_found = f15_m30h_match_to_this_node(pvt, range,
2501 else if ((get_dram_base(pvt, range) <= sys_addr) &&
2502 (get_dram_limit(pvt, range) >= sys_addr)) {
2503 cs_found = f1x_match_to_this_node(pvt, range,
2504 sys_addr, chan_sel);
2513 * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
2514 * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
2516 * The @sys_addr is usually an error address received from the hardware
2519 static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
2520 struct err_info *err)
2522 struct amd64_pvt *pvt = mci->pvt_info;
2524 error_address_to_page_and_offset(sys_addr, err);
2526 err->csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &err->channel);
2527 if (err->csrow < 0) {
2528 err->err_code = ERR_CSROW;
2533 * We need the syndromes for channel detection only when we're
2534 * ganged. Otherwise @chan should already contain the channel at
2537 if (dct_ganging_enabled(pvt))
2538 err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
2542 * These are tables of eigenvectors (one per line) which can be used for the
2543 * construction of the syndrome tables. The modified syndrome search algorithm
2544 * uses those to find the symbol in error and thus the DIMM.
2546 * Algorithm courtesy of Ross LaFetra from AMD.
2548 static const u16 x4_vectors[] = {
2549 0x2f57, 0x1afe, 0x66cc, 0xdd88,
2550 0x11eb, 0x3396, 0x7f4c, 0xeac8,
2551 0x0001, 0x0002, 0x0004, 0x0008,
2552 0x1013, 0x3032, 0x4044, 0x8088,
2553 0x106b, 0x30d6, 0x70fc, 0xe0a8,
2554 0x4857, 0xc4fe, 0x13cc, 0x3288,
2555 0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
2556 0x1f39, 0x251e, 0xbd6c, 0x6bd8,
2557 0x15c1, 0x2a42, 0x89ac, 0x4758,
2558 0x2b03, 0x1602, 0x4f0c, 0xca08,
2559 0x1f07, 0x3a0e, 0x6b04, 0xbd08,
2560 0x8ba7, 0x465e, 0x244c, 0x1cc8,
2561 0x2b87, 0x164e, 0x642c, 0xdc18,
2562 0x40b9, 0x80de, 0x1094, 0x20e8,
2563 0x27db, 0x1eb6, 0x9dac, 0x7b58,
2564 0x11c1, 0x2242, 0x84ac, 0x4c58,
2565 0x1be5, 0x2d7a, 0x5e34, 0xa718,
2566 0x4b39, 0x8d1e, 0x14b4, 0x28d8,
2567 0x4c97, 0xc87e, 0x11fc, 0x33a8,
2568 0x8e97, 0x497e, 0x2ffc, 0x1aa8,
2569 0x16b3, 0x3d62, 0x4f34, 0x8518,
2570 0x1e2f, 0x391a, 0x5cac, 0xf858,
2571 0x1d9f, 0x3b7a, 0x572c, 0xfe18,
2572 0x15f5, 0x2a5a, 0x5264, 0xa3b8,
2573 0x1dbb, 0x3b66, 0x715c, 0xe3f8,
2574 0x4397, 0xc27e, 0x17fc, 0x3ea8,
2575 0x1617, 0x3d3e, 0x6464, 0xb8b8,
2576 0x23ff, 0x12aa, 0xab6c, 0x56d8,
2577 0x2dfb, 0x1ba6, 0x913c, 0x7328,
2578 0x185d, 0x2ca6, 0x7914, 0x9e28,
2579 0x171b, 0x3e36, 0x7d7c, 0xebe8,
2580 0x4199, 0x82ee, 0x19f4, 0x2e58,
2581 0x4807, 0xc40e, 0x130c, 0x3208,
2582 0x1905, 0x2e0a, 0x5804, 0xac08,
2583 0x213f, 0x132a, 0xadfc, 0x5ba8,
2584 0x19a9, 0x2efe, 0xb5cc, 0x6f88,
2587 static const u16 x8_vectors[] = {
2588 0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
2589 0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
2590 0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
2591 0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
2592 0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
2593 0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
2594 0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
2595 0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
2596 0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
2597 0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
2598 0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
2599 0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
2600 0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
2601 0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
2602 0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
2603 0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
2604 0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
2605 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
2606 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
2609 static int decode_syndrome(u16 syndrome, const u16 *vectors, unsigned num_vecs,
2612 unsigned int i, err_sym;
2614 for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
2616 unsigned v_idx = err_sym * v_dim;
2617 unsigned v_end = (err_sym + 1) * v_dim;
2619 /* walk over all 16 bits of the syndrome */
2620 for (i = 1; i < (1U << 16); i <<= 1) {
2622 /* if bit is set in that eigenvector... */
2623 if (v_idx < v_end && vectors[v_idx] & i) {
2624 u16 ev_comp = vectors[v_idx++];
2626 /* ... and bit set in the modified syndrome, */
2636 /* can't get to zero, move to next symbol */
2641 edac_dbg(0, "syndrome(%x) not found\n", syndrome);
2645 static int map_err_sym_to_channel(int err_sym, int sym_size)
2656 return err_sym >> 4;
2661 /* imaginary bits not in a DIMM */
2663 WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n",
2671 return err_sym >> 3;
2676 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
2678 struct amd64_pvt *pvt = mci->pvt_info;
2681 if (pvt->ecc_sym_sz == 8)
2682 err_sym = decode_syndrome(syndrome, x8_vectors,
2683 ARRAY_SIZE(x8_vectors),
2685 else if (pvt->ecc_sym_sz == 4)
2686 err_sym = decode_syndrome(syndrome, x4_vectors,
2687 ARRAY_SIZE(x4_vectors),
2690 amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz);
2694 return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz);
2697 static void __log_ecc_error(struct mem_ctl_info *mci, struct err_info *err,
2700 enum hw_event_mc_err_type err_type;
2704 err_type = HW_EVENT_ERR_CORRECTED;
2705 else if (ecc_type == 1)
2706 err_type = HW_EVENT_ERR_UNCORRECTED;
2707 else if (ecc_type == 3)
2708 err_type = HW_EVENT_ERR_DEFERRED;
2710 WARN(1, "Something is rotten in the state of Denmark.\n");
2714 switch (err->err_code) {
2719 string = "Failed to map error addr to a node";
2722 string = "Failed to map error addr to a csrow";
2725 string = "Unknown syndrome - possible error reporting race";
2728 string = "MCA_SYND not valid - unknown syndrome and csrow";
2731 string = "Cannot decode normalized address";
2734 string = "WTF error";
2738 edac_mc_handle_error(err_type, mci, 1,
2739 err->page, err->offset, err->syndrome,
2740 err->csrow, err->channel, -1,
2744 static inline void decode_bus_error(int node_id, struct mce *m)
2746 struct mem_ctl_info *mci;
2747 struct amd64_pvt *pvt;
2748 u8 ecc_type = (m->status >> 45) & 0x3;
2749 u8 xec = XEC(m->status, 0x1f);
2750 u16 ec = EC(m->status);
2752 struct err_info err;
2754 mci = edac_mc_find(node_id);
2758 pvt = mci->pvt_info;
2760 /* Bail out early if this was an 'observed' error */
2761 if (PP(ec) == NBSL_PP_OBS)
2764 /* Do only ECC errors */
2765 if (xec && xec != F10_NBSL_EXT_ERR_ECC)
2768 memset(&err, 0, sizeof(err));
2770 sys_addr = get_error_address(pvt, m);
2773 err.syndrome = extract_syndrome(m->status);
2775 pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, &err);
2777 __log_ecc_error(mci, &err, ecc_type);
2781 * To find the UMC channel represented by this bank we need to match on its
2782 * instance_id. The instance_id of a bank is held in the lower 32 bits of its
2785 * Currently, we can derive the channel number by looking at the 6th nibble in
2786 * the instance_id. For example, instance_id=0xYXXXXX where Y is the channel
2789 * For DRAM ECC errors, the Chip Select number is given in bits [2:0] of
2790 * the MCA_SYND[ErrorInformation] field.
2792 static void umc_get_err_info(struct mce *m, struct err_info *err)
2794 err->channel = (m->ipid & GENMASK(31, 0)) >> 20;
2795 err->csrow = m->synd & 0x7;
2798 static void decode_umc_error(int node_id, struct mce *m)
2800 u8 ecc_type = (m->status >> 45) & 0x3;
2801 struct mem_ctl_info *mci;
2802 unsigned long sys_addr;
2803 struct amd64_pvt *pvt;
2804 struct atl_err a_err;
2805 struct err_info err;
2807 node_id = fixup_node_id(node_id, m);
2809 mci = edac_mc_find(node_id);
2813 pvt = mci->pvt_info;
2815 memset(&err, 0, sizeof(err));
2817 if (m->status & MCI_STATUS_DEFERRED)
2820 if (!(m->status & MCI_STATUS_SYNDV)) {
2821 err.err_code = ERR_SYND;
2825 if (ecc_type == 2) {
2826 u8 length = (m->synd >> 18) & 0x3f;
2829 err.syndrome = (m->synd >> 32) & GENMASK(length - 1, 0);
2831 err.err_code = ERR_CHANNEL;
2834 pvt->ops->get_err_info(m, &err);
2836 a_err.addr = m->addr;
2837 a_err.ipid = m->ipid;
2838 a_err.cpu = m->extcpu;
2840 sys_addr = amd_convert_umc_mca_addr_to_sys_addr(&a_err);
2841 if (IS_ERR_VALUE(sys_addr)) {
2842 err.err_code = ERR_NORM_ADDR;
2846 error_address_to_page_and_offset(sys_addr, &err);
2849 __log_ecc_error(mci, &err, ecc_type);
2853 * Use pvt->F3 which contains the F3 CPU PCI device to get the related
2854 * F1 (AddrMap) and F2 (Dct) devices. Return negative value on error.
2857 reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2)
2859 /* Reserve the ADDRESS MAP Device */
2860 pvt->F1 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
2862 edac_dbg(1, "F1 not found: device 0x%x\n", pci_id1);
2866 /* Reserve the DCT Device */
2867 pvt->F2 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3);
2869 pci_dev_put(pvt->F1);
2872 edac_dbg(1, "F2 not found: device 0x%x\n", pci_id2);
2877 pci_ctl_dev = &pvt->F2->dev;
2879 edac_dbg(1, "F1: %s\n", pci_name(pvt->F1));
2880 edac_dbg(1, "F2: %s\n", pci_name(pvt->F2));
2881 edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
2886 static void determine_ecc_sym_sz(struct amd64_pvt *pvt)
2888 pvt->ecc_sym_sz = 4;
2890 if (pvt->fam >= 0x10) {
2893 amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
2894 /* F16h has only DCT0, so no need to read dbam1. */
2895 if (pvt->fam != 0x16)
2896 amd64_read_dct_pci_cfg(pvt, 1, DBAM0, &pvt->dbam1);
2898 /* F10h, revD and later can do x8 ECC too. */
2899 if ((pvt->fam > 0x10 || pvt->model > 7) && tmp & BIT(25))
2900 pvt->ecc_sym_sz = 8;
2905 * Retrieve the hardware registers of the memory controller.
2907 static void umc_read_mc_regs(struct amd64_pvt *pvt)
2909 u8 nid = pvt->mc_node_id;
2910 struct amd64_umc *umc;
2913 /* Read registers from each UMC */
2916 umc_base = get_umc_base(i);
2919 amd_smn_read(nid, umc_base + get_umc_reg(pvt, UMCCH_DIMM_CFG), &umc->dimm_cfg);
2920 amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &umc->umc_cfg);
2921 amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &umc->sdp_ctrl);
2922 amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &umc->ecc_ctrl);
2923 amd_smn_read(nid, umc_base + UMCCH_UMC_CAP_HI, &umc->umc_cap_hi);
2928 * Retrieve the hardware registers of the memory controller (this includes the
2929 * 'Address Map' and 'Misc' device regs)
2931 static void dct_read_mc_regs(struct amd64_pvt *pvt)
2937 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
2938 * those are Read-As-Zero.
2940 rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
2941 edac_dbg(0, " TOP_MEM: 0x%016llx\n", pvt->top_mem);
2943 /* Check first whether TOP_MEM2 is enabled: */
2944 rdmsrl(MSR_AMD64_SYSCFG, msr_val);
2945 if (msr_val & BIT(21)) {
2946 rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
2947 edac_dbg(0, " TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
2949 edac_dbg(0, " TOP_MEM2 disabled\n");
2952 amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
2954 read_dram_ctl_register(pvt);
2956 for (range = 0; range < DRAM_RANGES; range++) {
2959 /* read settings for this DRAM range */
2960 read_dram_base_limit_regs(pvt, range);
2962 rw = dram_rw(pvt, range);
2966 edac_dbg(1, " DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
2968 get_dram_base(pvt, range),
2969 get_dram_limit(pvt, range));
2971 edac_dbg(1, " IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
2972 dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
2973 (rw & 0x1) ? "R" : "-",
2974 (rw & 0x2) ? "W" : "-",
2975 dram_intlv_sel(pvt, range),
2976 dram_dst_node(pvt, range));
2979 amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
2980 amd64_read_dct_pci_cfg(pvt, 0, DBAM0, &pvt->dbam0);
2982 amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
2984 amd64_read_dct_pci_cfg(pvt, 0, DCLR0, &pvt->dclr0);
2985 amd64_read_dct_pci_cfg(pvt, 0, DCHR0, &pvt->dchr0);
2987 if (!dct_ganging_enabled(pvt)) {
2988 amd64_read_dct_pci_cfg(pvt, 1, DCLR0, &pvt->dclr1);
2989 amd64_read_dct_pci_cfg(pvt, 1, DCHR0, &pvt->dchr1);
2992 determine_ecc_sym_sz(pvt);
2996 * NOTE: CPU Revision Dependent code
2999 * @csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1)
3000 * k8 private pointer to -->
3001 * DRAM Bank Address mapping register
3003 * DCL register where dual_channel_active is
3005 * The DBAM register consists of 4 sets of 4 bits each definitions:
3008 * 0-3 CSROWs 0 and 1
3009 * 4-7 CSROWs 2 and 3
3010 * 8-11 CSROWs 4 and 5
3011 * 12-15 CSROWs 6 and 7
3013 * Values range from: 0 to 15
3014 * The meaning of the values depends on CPU revision and dual-channel state,
3015 * see relevant BKDG more info.
3017 * The memory controller provides for total of only 8 CSROWs in its current
3018 * architecture. Each "pair" of CSROWs normally represents just one DIMM in
3019 * single channel or two (2) DIMMs in dual channel mode.
3021 * The following code logic collapses the various tables for CSROW based on CPU
3025 * The number of PAGE_SIZE pages on the specified CSROW number it
3029 static u32 dct_get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
3031 u32 dbam = dct ? pvt->dbam1 : pvt->dbam0;
3032 u32 cs_mode, nr_pages;
3035 cs_mode = DBAM_DIMM(csrow_nr, dbam);
3037 nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, csrow_nr);
3038 nr_pages <<= 20 - PAGE_SHIFT;
3040 edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n",
3041 csrow_nr, dct, cs_mode);
3042 edac_dbg(0, "nr_pages/channel: %u\n", nr_pages);
3047 static u32 umc_get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr_orig)
3049 int csrow_nr = csrow_nr_orig;
3050 u32 cs_mode, nr_pages;
3052 cs_mode = umc_get_cs_mode(csrow_nr >> 1, dct, pvt);
3054 nr_pages = umc_addr_mask_to_cs_size(pvt, dct, cs_mode, csrow_nr);
3055 nr_pages <<= 20 - PAGE_SHIFT;
3057 edac_dbg(0, "csrow: %d, channel: %d, cs_mode %d\n",
3058 csrow_nr_orig, dct, cs_mode);
3059 edac_dbg(0, "nr_pages/channel: %u\n", nr_pages);
3064 static void umc_init_csrows(struct mem_ctl_info *mci)
3066 struct amd64_pvt *pvt = mci->pvt_info;
3067 enum edac_type edac_mode = EDAC_NONE;
3068 enum dev_type dev_type = DEV_UNKNOWN;
3069 struct dimm_info *dimm;
3072 if (mci->edac_ctl_cap & EDAC_FLAG_S16ECD16ED) {
3073 edac_mode = EDAC_S16ECD16ED;
3075 } else if (mci->edac_ctl_cap & EDAC_FLAG_S8ECD8ED) {
3076 edac_mode = EDAC_S8ECD8ED;
3078 } else if (mci->edac_ctl_cap & EDAC_FLAG_S4ECD4ED) {
3079 edac_mode = EDAC_S4ECD4ED;
3081 } else if (mci->edac_ctl_cap & EDAC_FLAG_SECDED) {
3082 edac_mode = EDAC_SECDED;
3086 for_each_chip_select(cs, umc, pvt) {
3087 if (!csrow_enabled(cs, umc, pvt))
3090 dimm = mci->csrows[cs]->channels[umc]->dimm;
3092 edac_dbg(1, "MC node: %d, csrow: %d\n",
3093 pvt->mc_node_id, cs);
3095 dimm->nr_pages = umc_get_csrow_nr_pages(pvt, umc, cs);
3096 dimm->mtype = pvt->umc[umc].dram_type;
3097 dimm->edac_mode = edac_mode;
3098 dimm->dtype = dev_type;
3105 * Initialize the array of csrow attribute instances, based on the values
3106 * from pci config hardware registers.
3108 static void dct_init_csrows(struct mem_ctl_info *mci)
3110 struct amd64_pvt *pvt = mci->pvt_info;
3111 enum edac_type edac_mode = EDAC_NONE;
3112 struct csrow_info *csrow;
3113 struct dimm_info *dimm;
3118 amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
3122 edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
3123 pvt->mc_node_id, val,
3124 !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
3127 * We iterate over DCT0 here but we look at DCT1 in parallel, if needed.
3129 for_each_chip_select(i, 0, pvt) {
3130 bool row_dct0 = !!csrow_enabled(i, 0, pvt);
3131 bool row_dct1 = false;
3133 if (pvt->fam != 0xf)
3134 row_dct1 = !!csrow_enabled(i, 1, pvt);
3136 if (!row_dct0 && !row_dct1)
3139 csrow = mci->csrows[i];
3141 edac_dbg(1, "MC node: %d, csrow: %d\n",
3142 pvt->mc_node_id, i);
3145 nr_pages = dct_get_csrow_nr_pages(pvt, 0, i);
3146 csrow->channels[0]->dimm->nr_pages = nr_pages;
3149 /* K8 has only one DCT */
3150 if (pvt->fam != 0xf && row_dct1) {
3151 int row_dct1_pages = dct_get_csrow_nr_pages(pvt, 1, i);
3153 csrow->channels[1]->dimm->nr_pages = row_dct1_pages;
3154 nr_pages += row_dct1_pages;
3157 edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages);
3159 /* Determine DIMM ECC mode: */
3160 if (pvt->nbcfg & NBCFG_ECC_ENABLE) {
3161 edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL)
3166 for (j = 0; j < pvt->max_mcs; j++) {
3167 dimm = csrow->channels[j]->dimm;
3168 dimm->mtype = pvt->dram_type;
3169 dimm->edac_mode = edac_mode;
3175 /* get all cores on this DCT */
3176 static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, u16 nid)
3180 for_each_online_cpu(cpu)
3181 if (topology_amd_node_id(cpu) == nid)
3182 cpumask_set_cpu(cpu, mask);
3185 /* check MCG_CTL on all the cpus on this node */
3186 static bool nb_mce_bank_enabled_on_node(u16 nid)
3192 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
3193 amd64_warn("%s: Error allocating mask\n", __func__);
3197 get_cpus_on_this_dct_cpumask(mask, nid);
3199 rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
3201 for_each_cpu(cpu, mask) {
3202 struct msr *reg = per_cpu_ptr(msrs, cpu);
3203 nbe = reg->l & MSR_MCGCTL_NBE;
3205 edac_dbg(0, "core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
3207 (nbe ? "enabled" : "disabled"));
3215 free_cpumask_var(mask);
3219 static int toggle_ecc_err_reporting(struct ecc_settings *s, u16 nid, bool on)
3221 cpumask_var_t cmask;
3224 if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
3225 amd64_warn("%s: error allocating mask\n", __func__);
3229 get_cpus_on_this_dct_cpumask(cmask, nid);
3231 rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
3233 for_each_cpu(cpu, cmask) {
3235 struct msr *reg = per_cpu_ptr(msrs, cpu);
3238 if (reg->l & MSR_MCGCTL_NBE)
3239 s->flags.nb_mce_enable = 1;
3241 reg->l |= MSR_MCGCTL_NBE;
3244 * Turn off NB MCE reporting only when it was off before
3246 if (!s->flags.nb_mce_enable)
3247 reg->l &= ~MSR_MCGCTL_NBE;
3250 wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
3252 free_cpumask_var(cmask);
3257 static bool enable_ecc_error_reporting(struct ecc_settings *s, u16 nid,
3261 u32 value, mask = 0x3; /* UECC/CECC enable */
3263 if (toggle_ecc_err_reporting(s, nid, ON)) {
3264 amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
3268 amd64_read_pci_cfg(F3, NBCTL, &value);
3270 s->old_nbctl = value & mask;
3271 s->nbctl_valid = true;
3274 amd64_write_pci_cfg(F3, NBCTL, value);
3276 amd64_read_pci_cfg(F3, NBCFG, &value);
3278 edac_dbg(0, "1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
3279 nid, value, !!(value & NBCFG_ECC_ENABLE));
3281 if (!(value & NBCFG_ECC_ENABLE)) {
3282 amd64_warn("DRAM ECC disabled on this node, enabling...\n");
3284 s->flags.nb_ecc_prev = 0;
3286 /* Attempt to turn on DRAM ECC Enable */
3287 value |= NBCFG_ECC_ENABLE;
3288 amd64_write_pci_cfg(F3, NBCFG, value);
3290 amd64_read_pci_cfg(F3, NBCFG, &value);
3292 if (!(value & NBCFG_ECC_ENABLE)) {
3293 amd64_warn("Hardware rejected DRAM ECC enable,"
3294 "check memory DIMM configuration.\n");
3297 amd64_info("Hardware accepted DRAM ECC Enable\n");
3300 s->flags.nb_ecc_prev = 1;
3303 edac_dbg(0, "2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
3304 nid, value, !!(value & NBCFG_ECC_ENABLE));
3309 static void restore_ecc_error_reporting(struct ecc_settings *s, u16 nid,
3312 u32 value, mask = 0x3; /* UECC/CECC enable */
3314 if (!s->nbctl_valid)
3317 amd64_read_pci_cfg(F3, NBCTL, &value);
3319 value |= s->old_nbctl;
3321 amd64_write_pci_cfg(F3, NBCTL, value);
3323 /* restore previous BIOS DRAM ECC "off" setting we force-enabled */
3324 if (!s->flags.nb_ecc_prev) {
3325 amd64_read_pci_cfg(F3, NBCFG, &value);
3326 value &= ~NBCFG_ECC_ENABLE;
3327 amd64_write_pci_cfg(F3, NBCFG, value);
3330 /* restore the NB Enable MCGCTL bit */
3331 if (toggle_ecc_err_reporting(s, nid, OFF))
3332 amd64_warn("Error restoring NB MCGCTL settings!\n");
3335 static bool dct_ecc_enabled(struct amd64_pvt *pvt)
3337 u16 nid = pvt->mc_node_id;
3338 bool nb_mce_en = false;
3342 amd64_read_pci_cfg(pvt->F3, NBCFG, &value);
3344 ecc_en = !!(value & NBCFG_ECC_ENABLE);
3346 nb_mce_en = nb_mce_bank_enabled_on_node(nid);
3348 edac_dbg(0, "NB MCE bank disabled, set MSR 0x%08x[4] on node %d to enable.\n",
3349 MSR_IA32_MCG_CTL, nid);
3351 edac_dbg(3, "Node %d: DRAM ECC %s.\n", nid, (ecc_en ? "enabled" : "disabled"));
3353 if (!ecc_en || !nb_mce_en)
3359 static bool umc_ecc_enabled(struct amd64_pvt *pvt)
3361 u8 umc_en_mask = 0, ecc_en_mask = 0;
3362 u16 nid = pvt->mc_node_id;
3363 struct amd64_umc *umc;
3369 /* Only check enabled UMCs. */
3370 if (!(umc->sdp_ctrl & UMC_SDP_INIT))
3373 umc_en_mask |= BIT(i);
3375 if (umc->umc_cap_hi & UMC_ECC_ENABLED)
3376 ecc_en_mask |= BIT(i);
3379 /* Check whether at least one UMC is enabled: */
3381 ecc_en = umc_en_mask == ecc_en_mask;
3383 edac_dbg(0, "Node %d: No enabled UMCs.\n", nid);
3385 edac_dbg(3, "Node %d: DRAM ECC %s.\n", nid, (ecc_en ? "enabled" : "disabled"));
3394 umc_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
3396 u8 i, ecc_en = 1, cpk_en = 1, dev_x4 = 1, dev_x16 = 1;
3399 if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
3400 ecc_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_ENABLED);
3401 cpk_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_CHIPKILL_CAP);
3403 dev_x4 &= !!(pvt->umc[i].dimm_cfg & BIT(6));
3404 dev_x16 &= !!(pvt->umc[i].dimm_cfg & BIT(7));
3408 /* Set chipkill only if ECC is enabled: */
3410 mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
3416 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
3418 mci->edac_ctl_cap |= EDAC_FLAG_S16ECD16ED;
3420 mci->edac_ctl_cap |= EDAC_FLAG_S8ECD8ED;
3424 static void dct_setup_mci_misc_attrs(struct mem_ctl_info *mci)
3426 struct amd64_pvt *pvt = mci->pvt_info;
3428 mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
3429 mci->edac_ctl_cap = EDAC_FLAG_NONE;
3431 if (pvt->nbcap & NBCAP_SECDED)
3432 mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
3434 if (pvt->nbcap & NBCAP_CHIPKILL)
3435 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
3437 mci->edac_cap = dct_determine_edac_cap(pvt);
3438 mci->mod_name = EDAC_MOD_STR;
3439 mci->ctl_name = pvt->ctl_name;
3440 mci->dev_name = pci_name(pvt->F3);
3441 mci->ctl_page_to_phys = NULL;
3443 /* memory scrubber interface */
3444 mci->set_sdram_scrub_rate = set_scrub_rate;
3445 mci->get_sdram_scrub_rate = get_scrub_rate;
3447 dct_init_csrows(mci);
3450 static void umc_setup_mci_misc_attrs(struct mem_ctl_info *mci)
3452 struct amd64_pvt *pvt = mci->pvt_info;
3454 mci->mtype_cap = MEM_FLAG_DDR4 | MEM_FLAG_RDDR4;
3455 mci->edac_ctl_cap = EDAC_FLAG_NONE;
3457 umc_determine_edac_ctl_cap(mci, pvt);
3459 mci->edac_cap = umc_determine_edac_cap(pvt);
3460 mci->mod_name = EDAC_MOD_STR;
3461 mci->ctl_name = pvt->ctl_name;
3462 mci->dev_name = pci_name(pvt->F3);
3463 mci->ctl_page_to_phys = NULL;
3465 umc_init_csrows(mci);
3468 static int dct_hw_info_get(struct amd64_pvt *pvt)
3470 int ret = reserve_mc_sibling_devs(pvt, pvt->f1_id, pvt->f2_id);
3475 dct_prep_chip_selects(pvt);
3476 dct_read_base_mask(pvt);
3477 dct_read_mc_regs(pvt);
3478 dct_determine_memory_type(pvt);
3483 static int umc_hw_info_get(struct amd64_pvt *pvt)
3485 pvt->umc = kcalloc(pvt->max_mcs, sizeof(struct amd64_umc), GFP_KERNEL);
3489 umc_prep_chip_selects(pvt);
3490 umc_read_base_mask(pvt);
3491 umc_read_mc_regs(pvt);
3492 umc_determine_memory_type(pvt);
3498 * The CPUs have one channel per UMC, so UMC number is equivalent to a
3499 * channel number. The GPUs have 8 channels per UMC, so the UMC number no
3500 * longer works as a channel number.
3502 * The channel number within a GPU UMC is given in MCA_IPID[15:12].
3503 * However, the IDs are split such that two UMC values go to one UMC, and
3504 * the channel numbers are split in two groups of four.
3506 * Refer to comment on gpu_get_umc_base().
3509 * UMC0 CH[3:0] = 0x0005[3:0]000
3510 * UMC0 CH[7:4] = 0x0015[3:0]000
3511 * UMC1 CH[3:0] = 0x0025[3:0]000
3512 * UMC1 CH[7:4] = 0x0035[3:0]000
3514 static void gpu_get_err_info(struct mce *m, struct err_info *err)
3516 u8 ch = (m->ipid & GENMASK(31, 0)) >> 20;
3517 u8 phy = ((m->ipid >> 12) & 0xf);
3519 err->channel = ch % 2 ? phy + 4 : phy;
3523 static int gpu_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
3524 unsigned int cs_mode, int csrow_nr)
3526 u32 addr_mask_orig = pvt->csels[umc].csmasks[csrow_nr];
3528 return __addr_mask_to_cs_size(addr_mask_orig, cs_mode, csrow_nr, csrow_nr >> 1);
3531 static void gpu_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
3533 int size, cs_mode, cs = 0;
3535 edac_printk(KERN_DEBUG, EDAC_MC, "UMC%d chip selects:\n", ctrl);
3537 cs_mode = CS_EVEN_PRIMARY | CS_ODD_PRIMARY;
3539 for_each_chip_select(cs, ctrl, pvt) {
3540 size = gpu_addr_mask_to_cs_size(pvt, ctrl, cs_mode, cs);
3541 amd64_info(EDAC_MC ": %d: %5dMB\n", cs, size);
3545 static void gpu_dump_misc_regs(struct amd64_pvt *pvt)
3547 struct amd64_umc *umc;
3553 edac_dbg(1, "UMC%d UMC cfg: 0x%x\n", i, umc->umc_cfg);
3554 edac_dbg(1, "UMC%d SDP ctrl: 0x%x\n", i, umc->sdp_ctrl);
3555 edac_dbg(1, "UMC%d ECC ctrl: 0x%x\n", i, umc->ecc_ctrl);
3556 edac_dbg(1, "UMC%d All HBMs support ECC: yes\n", i);
3558 gpu_debug_display_dimm_sizes(pvt, i);
3562 static u32 gpu_get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
3565 int cs_mode = CS_EVEN_PRIMARY | CS_ODD_PRIMARY;
3567 nr_pages = gpu_addr_mask_to_cs_size(pvt, dct, cs_mode, csrow_nr);
3568 nr_pages <<= 20 - PAGE_SHIFT;
3570 edac_dbg(0, "csrow: %d, channel: %d\n", csrow_nr, dct);
3571 edac_dbg(0, "nr_pages/channel: %u\n", nr_pages);
3576 static void gpu_init_csrows(struct mem_ctl_info *mci)
3578 struct amd64_pvt *pvt = mci->pvt_info;
3579 struct dimm_info *dimm;
3583 for_each_chip_select(cs, umc, pvt) {
3584 if (!csrow_enabled(cs, umc, pvt))
3587 dimm = mci->csrows[umc]->channels[cs]->dimm;
3589 edac_dbg(1, "MC node: %d, csrow: %d\n",
3590 pvt->mc_node_id, cs);
3592 dimm->nr_pages = gpu_get_csrow_nr_pages(pvt, umc, cs);
3593 dimm->edac_mode = EDAC_SECDED;
3594 dimm->mtype = pvt->dram_type;
3595 dimm->dtype = DEV_X16;
3601 static void gpu_setup_mci_misc_attrs(struct mem_ctl_info *mci)
3603 struct amd64_pvt *pvt = mci->pvt_info;
3605 mci->mtype_cap = MEM_FLAG_HBM2;
3606 mci->edac_ctl_cap = EDAC_FLAG_SECDED;
3608 mci->edac_cap = EDAC_FLAG_EC;
3609 mci->mod_name = EDAC_MOD_STR;
3610 mci->ctl_name = pvt->ctl_name;
3611 mci->dev_name = pci_name(pvt->F3);
3612 mci->ctl_page_to_phys = NULL;
3614 gpu_init_csrows(mci);
3617 /* ECC is enabled by default on GPU nodes */
3618 static bool gpu_ecc_enabled(struct amd64_pvt *pvt)
3623 static inline u32 gpu_get_umc_base(struct amd64_pvt *pvt, u8 umc, u8 channel)
3626 * On CPUs, there is one channel per UMC, so UMC numbering equals
3627 * channel numbering. On GPUs, there are eight channels per UMC,
3628 * so the channel numbering is different from UMC numbering.
3630 * On CPU nodes channels are selected in 6th nibble
3631 * UMC chY[3:0]= [(chY*2 + 1) : (chY*2)]50000;
3633 * On GPU nodes channels are selected in 3rd nibble
3634 * HBM chX[3:0]= [Y ]5X[3:0]000;
3635 * HBM chX[7:4]= [Y+1]5X[3:0]000
3637 * On MI300 APU nodes, same as GPU nodes but channels are selected
3638 * in the base address of 0x90000
3645 return pvt->gpu_umc_base + (umc << 20) + ((channel % 4) << 12);
3648 static void gpu_read_mc_regs(struct amd64_pvt *pvt)
3650 u8 nid = pvt->mc_node_id;
3651 struct amd64_umc *umc;
3654 /* Read registers from each UMC */
3656 umc_base = gpu_get_umc_base(pvt, i, 0);
3659 amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &umc->umc_cfg);
3660 amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &umc->sdp_ctrl);
3661 amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &umc->ecc_ctrl);
3665 static void gpu_read_base_mask(struct amd64_pvt *pvt)
3667 u32 base_reg, mask_reg;
3672 for_each_chip_select(cs, umc, pvt) {
3673 base_reg = gpu_get_umc_base(pvt, umc, cs) + UMCCH_BASE_ADDR;
3674 base = &pvt->csels[umc].csbases[cs];
3676 if (!amd_smn_read(pvt->mc_node_id, base_reg, base)) {
3677 edac_dbg(0, " DCSB%d[%d]=0x%08x reg: 0x%x\n",
3678 umc, cs, *base, base_reg);
3681 mask_reg = gpu_get_umc_base(pvt, umc, cs) + UMCCH_ADDR_MASK;
3682 mask = &pvt->csels[umc].csmasks[cs];
3684 if (!amd_smn_read(pvt->mc_node_id, mask_reg, mask)) {
3685 edac_dbg(0, " DCSM%d[%d]=0x%08x reg: 0x%x\n",
3686 umc, cs, *mask, mask_reg);
3692 static void gpu_prep_chip_selects(struct amd64_pvt *pvt)
3697 pvt->csels[umc].b_cnt = 8;
3698 pvt->csels[umc].m_cnt = 8;
3702 static int gpu_hw_info_get(struct amd64_pvt *pvt)
3706 ret = gpu_get_node_map(pvt);
3710 pvt->umc = kcalloc(pvt->max_mcs, sizeof(struct amd64_umc), GFP_KERNEL);
3714 gpu_prep_chip_selects(pvt);
3715 gpu_read_base_mask(pvt);
3716 gpu_read_mc_regs(pvt);
3721 static void hw_info_put(struct amd64_pvt *pvt)
3723 pci_dev_put(pvt->F1);
3724 pci_dev_put(pvt->F2);
3728 static struct low_ops umc_ops = {
3729 .hw_info_get = umc_hw_info_get,
3730 .ecc_enabled = umc_ecc_enabled,
3731 .setup_mci_misc_attrs = umc_setup_mci_misc_attrs,
3732 .dump_misc_regs = umc_dump_misc_regs,
3733 .get_err_info = umc_get_err_info,
3736 static struct low_ops gpu_ops = {
3737 .hw_info_get = gpu_hw_info_get,
3738 .ecc_enabled = gpu_ecc_enabled,
3739 .setup_mci_misc_attrs = gpu_setup_mci_misc_attrs,
3740 .dump_misc_regs = gpu_dump_misc_regs,
3741 .get_err_info = gpu_get_err_info,
3744 /* Use Family 16h versions for defaults and adjust as needed below. */
3745 static struct low_ops dct_ops = {
3746 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
3747 .dbam_to_cs = f16_dbam_to_chip_select,
3748 .hw_info_get = dct_hw_info_get,
3749 .ecc_enabled = dct_ecc_enabled,
3750 .setup_mci_misc_attrs = dct_setup_mci_misc_attrs,
3751 .dump_misc_regs = dct_dump_misc_regs,
3754 static int per_family_init(struct amd64_pvt *pvt)
3756 pvt->ext_model = boot_cpu_data.x86_model >> 4;
3757 pvt->stepping = boot_cpu_data.x86_stepping;
3758 pvt->model = boot_cpu_data.x86_model;
3759 pvt->fam = boot_cpu_data.x86;
3763 * Decide on which ops group to use here and do any family/model
3766 if (pvt->fam >= 0x17)
3767 pvt->ops = &umc_ops;
3769 pvt->ops = &dct_ops;
3773 pvt->ctl_name = (pvt->ext_model >= K8_REV_F) ?
3774 "K8 revF or later" : "K8 revE or earlier";
3775 pvt->f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP;
3776 pvt->f2_id = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL;
3777 pvt->ops->map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow;
3778 pvt->ops->dbam_to_cs = k8_dbam_to_chip_select;
3782 pvt->ctl_name = "F10h";
3783 pvt->f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP;
3784 pvt->f2_id = PCI_DEVICE_ID_AMD_10H_NB_DRAM;
3785 pvt->ops->dbam_to_cs = f10_dbam_to_chip_select;
3789 switch (pvt->model) {
3791 pvt->ctl_name = "F15h_M30h";
3792 pvt->f1_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1;
3793 pvt->f2_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F2;
3796 pvt->ctl_name = "F15h_M60h";
3797 pvt->f1_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1;
3798 pvt->f2_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F2;
3799 pvt->ops->dbam_to_cs = f15_m60h_dbam_to_chip_select;
3802 /* Richland is only client */
3805 pvt->ctl_name = "F15h";
3806 pvt->f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1;
3807 pvt->f2_id = PCI_DEVICE_ID_AMD_15H_NB_F2;
3808 pvt->ops->dbam_to_cs = f15_dbam_to_chip_select;
3814 switch (pvt->model) {
3816 pvt->ctl_name = "F16h_M30h";
3817 pvt->f1_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F1;
3818 pvt->f2_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F2;
3821 pvt->ctl_name = "F16h";
3822 pvt->f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1;
3823 pvt->f2_id = PCI_DEVICE_ID_AMD_16H_NB_F2;
3829 switch (pvt->model) {
3831 pvt->ctl_name = "F17h_M10h";
3834 pvt->ctl_name = "F17h_M30h";
3838 pvt->ctl_name = "F17h_M60h";
3841 pvt->ctl_name = "F17h_M70h";
3844 pvt->ctl_name = "F17h";
3850 pvt->ctl_name = "F18h";
3854 switch (pvt->model) {
3856 pvt->ctl_name = "F19h";
3860 pvt->ctl_name = "F19h_M10h";
3862 pvt->flags.zn_regs_v2 = 1;
3865 pvt->ctl_name = "F19h_M20h";
3868 if (pvt->F3->device == PCI_DEVICE_ID_AMD_MI200_DF_F3) {
3869 pvt->ctl_name = "MI200";
3871 pvt->dram_type = MEM_HBM2;
3872 pvt->gpu_umc_base = 0x50000;
3873 pvt->ops = &gpu_ops;
3875 pvt->ctl_name = "F19h_M30h";
3880 pvt->ctl_name = "F19h_M50h";
3883 pvt->ctl_name = "F19h_M60h";
3884 pvt->flags.zn_regs_v2 = 1;
3887 pvt->ctl_name = "F19h_M70h";
3888 pvt->flags.zn_regs_v2 = 1;
3891 pvt->ctl_name = "F19h_M90h";
3893 pvt->dram_type = MEM_HBM3;
3894 pvt->gpu_umc_base = 0x90000;
3895 pvt->ops = &gpu_ops;
3898 pvt->ctl_name = "F19h_MA0h";
3900 pvt->flags.zn_regs_v2 = 1;
3906 switch (pvt->model) {
3908 pvt->ctl_name = "F1Ah";
3910 pvt->flags.zn_regs_v2 = 1;
3913 pvt->ctl_name = "F1Ah_M40h";
3914 pvt->flags.zn_regs_v2 = 1;
3920 amd64_err("Unsupported family!\n");
3927 static const struct attribute_group *amd64_edac_attr_groups[] = {
3928 #ifdef CONFIG_EDAC_DEBUG
3936 * For heterogeneous and APU models EDAC CHIP_SELECT and CHANNEL layers
3937 * should be swapped to fit into the layers.
3939 static unsigned int get_layer_size(struct amd64_pvt *pvt, u8 layer)
3941 bool is_gpu = (pvt->ops == &gpu_ops);
3944 return is_gpu ? pvt->max_mcs
3945 : pvt->csels[0].b_cnt;
3947 return is_gpu ? pvt->csels[0].b_cnt
3951 static int init_one_instance(struct amd64_pvt *pvt)
3953 struct mem_ctl_info *mci = NULL;
3954 struct edac_mc_layer layers[2];
3957 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
3958 layers[0].size = get_layer_size(pvt, 0);
3959 layers[0].is_virt_csrow = true;
3960 layers[1].type = EDAC_MC_LAYER_CHANNEL;
3961 layers[1].size = get_layer_size(pvt, 1);
3962 layers[1].is_virt_csrow = false;
3964 mci = edac_mc_alloc(pvt->mc_node_id, ARRAY_SIZE(layers), layers, 0);
3968 mci->pvt_info = pvt;
3969 mci->pdev = &pvt->F3->dev;
3971 pvt->ops->setup_mci_misc_attrs(mci);
3974 if (edac_mc_add_mc_with_groups(mci, amd64_edac_attr_groups)) {
3975 edac_dbg(1, "failed edac_mc_add_mc()\n");
3983 static bool instance_has_memory(struct amd64_pvt *pvt)
3985 bool cs_enabled = false;
3986 int cs = 0, dct = 0;
3988 for (dct = 0; dct < pvt->max_mcs; dct++) {
3989 for_each_chip_select(cs, dct, pvt)
3990 cs_enabled |= csrow_enabled(cs, dct, pvt);
3996 static int probe_one_instance(unsigned int nid)
3998 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
3999 struct amd64_pvt *pvt = NULL;
4000 struct ecc_settings *s;
4004 s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL);
4010 pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
4014 pvt->mc_node_id = nid;
4017 ret = per_family_init(pvt);
4021 ret = pvt->ops->hw_info_get(pvt);
4026 if (!instance_has_memory(pvt)) {
4027 amd64_info("Node %d: No DIMMs detected.\n", nid);
4031 if (!pvt->ops->ecc_enabled(pvt)) {
4034 if (!ecc_enable_override)
4037 if (boot_cpu_data.x86 >= 0x17) {
4038 amd64_warn("Forcing ECC on is not recommended on newer systems. Please enable ECC in BIOS.");
4041 amd64_warn("Forcing ECC on!\n");
4043 if (!enable_ecc_error_reporting(s, nid, F3))
4047 ret = init_one_instance(pvt);
4049 amd64_err("Error probing instance: %d\n", nid);
4051 if (boot_cpu_data.x86 < 0x17)
4052 restore_ecc_error_reporting(s, nid, F3);
4057 amd64_info("%s detected (node %d).\n", pvt->ctl_name, pvt->mc_node_id);
4059 /* Display and decode various registers for debug purposes. */
4060 pvt->ops->dump_misc_regs(pvt);
4070 ecc_stngs[nid] = NULL;
4076 static void remove_one_instance(unsigned int nid)
4078 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
4079 struct ecc_settings *s = ecc_stngs[nid];
4080 struct mem_ctl_info *mci;
4081 struct amd64_pvt *pvt;
4083 /* Remove from EDAC CORE tracking list */
4084 mci = edac_mc_del_mc(&F3->dev);
4088 pvt = mci->pvt_info;
4090 restore_ecc_error_reporting(s, nid, F3);
4092 kfree(ecc_stngs[nid]);
4093 ecc_stngs[nid] = NULL;
4095 /* Free the EDAC CORE resources */
4096 mci->pvt_info = NULL;
4103 static void setup_pci_device(void)
4108 pci_ctl = edac_pci_create_generic_ctl(pci_ctl_dev, EDAC_MOD_STR);
4110 pr_warn("%s(): Unable to create PCI control\n", __func__);
4111 pr_warn("%s(): PCI error report via EDAC not set\n", __func__);
4115 static const struct x86_cpu_id amd64_cpuids[] = {
4116 X86_MATCH_VENDOR_FAM(AMD, 0x0F, NULL),
4117 X86_MATCH_VENDOR_FAM(AMD, 0x10, NULL),
4118 X86_MATCH_VENDOR_FAM(AMD, 0x15, NULL),
4119 X86_MATCH_VENDOR_FAM(AMD, 0x16, NULL),
4120 X86_MATCH_VENDOR_FAM(AMD, 0x17, NULL),
4121 X86_MATCH_VENDOR_FAM(HYGON, 0x18, NULL),
4122 X86_MATCH_VENDOR_FAM(AMD, 0x19, NULL),
4123 X86_MATCH_VENDOR_FAM(AMD, 0x1A, NULL),
4126 MODULE_DEVICE_TABLE(x86cpu, amd64_cpuids);
4128 static int __init amd64_edac_init(void)
4134 if (ghes_get_devices())
4137 owner = edac_get_owner();
4138 if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
4141 if (!x86_match_cpu(amd64_cpuids))
4150 ecc_stngs = kcalloc(amd_nb_num(), sizeof(ecc_stngs[0]), GFP_KERNEL);
4154 msrs = msrs_alloc();
4158 for (i = 0; i < amd_nb_num(); i++) {
4159 err = probe_one_instance(i);
4161 /* unwind properly */
4163 remove_one_instance(i);
4169 if (!edac_has_mcs()) {
4174 /* register stuff with EDAC MCE */
4175 if (boot_cpu_data.x86 >= 0x17) {
4176 amd_register_ecc_decoder(decode_umc_error);
4178 amd_register_ecc_decoder(decode_bus_error);
4182 #ifdef CONFIG_X86_32
4183 amd64_err("%s on 32-bit is unsupported. USE AT YOUR OWN RISK!\n", EDAC_MOD_STR);
4201 static void __exit amd64_edac_exit(void)
4206 edac_pci_release_generic_ctl(pci_ctl);
4208 /* unregister from EDAC MCE */
4209 if (boot_cpu_data.x86 >= 0x17)
4210 amd_unregister_ecc_decoder(decode_umc_error);
4212 amd_unregister_ecc_decoder(decode_bus_error);
4214 for (i = 0; i < amd_nb_num(); i++)
4215 remove_one_instance(i);
4226 module_init(amd64_edac_init);
4227 module_exit(amd64_edac_exit);
4229 MODULE_LICENSE("GPL");
4230 MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, Dave Peterson, Thayne Harbaugh; AMD");
4231 MODULE_DESCRIPTION("MC support for AMD64 memory controllers");
4233 module_param(edac_op_state, int, 0444);
4234 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");