1 #include "amd64_edac.h"
4 static struct edac_pci_ctl_info *amd64_ctl_pci;
6 static int report_gart_errors;
7 module_param(report_gart_errors, int, 0644);
10 * Set by command line parameter. If BIOS has enabled the ECC, this override is
11 * cleared to prevent re-enabling the hardware by this driver.
13 static int ecc_enable_override;
14 module_param(ecc_enable_override, int, 0644);
16 static struct msr *msrs;
18 /* Lookup table for all possible MC control instances */
20 static struct mem_ctl_info *mci_lookup[EDAC_MAX_NUMNODES];
21 static struct amd64_pvt *pvt_lookup[EDAC_MAX_NUMNODES];
24 * Address to DRAM bank mapping: see F2x80 for K8 and F2x[1,0]80 for Fam10 and
27 static int ddr2_dbam_revCG[] = {
37 static int ddr2_dbam_revD[] = {
49 static int ddr2_dbam[] = { [0] = 128,
58 static int ddr3_dbam[] = { [0] = -1,
69 * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
70 * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
73 *FIXME: Produce a better mapping/linearisation.
76 struct scrubrate scrubrates[] = {
77 { 0x01, 1600000000UL},
99 { 0x00, 0UL}, /* scrubbing off */
103 * Memory scrubber control interface. For K8, memory scrubbing is handled by
104 * hardware and can involve L2 cache, dcache as well as the main memory. With
105 * F10, this is extended to L3 cache scrubbing on CPU models sporting that
108 * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
109 * (dram) over to cache lines. This is nasty, so we will use bandwidth in
110 * bytes/sec for the setting.
112 * Currently, we only do dram scrubbing. If the scrubbing is done in software on
113 * other archs, we might not have access to the caches directly.
117 * scan the scrub rate mapping table for a close or matching bandwidth value to
118 * issue. If requested is too big, then use last maximum value found.
120 static int amd64_search_set_scrub_rate(struct pci_dev *ctl, u32 new_bw,
127 * map the configured rate (new_bw) to a value specific to the AMD64
128 * memory controller and apply to register. Search for the first
129 * bandwidth entry that is greater or equal than the setting requested
130 * and program that. If at last entry, turn off DRAM scrubbing.
132 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
134 * skip scrub rates which aren't recommended
135 * (see F10 BKDG, F3x58)
137 if (scrubrates[i].scrubval < min_scrubrate)
140 if (scrubrates[i].bandwidth <= new_bw)
144 * if no suitable bandwidth found, turn off DRAM scrubbing
145 * entirely by falling back to the last element in the
150 scrubval = scrubrates[i].scrubval;
152 edac_printk(KERN_DEBUG, EDAC_MC,
153 "Setting scrub rate bandwidth: %u\n",
154 scrubrates[i].bandwidth);
156 edac_printk(KERN_DEBUG, EDAC_MC, "Turning scrubbing off.\n");
158 pci_write_bits32(ctl, K8_SCRCTRL, scrubval, 0x001F);
163 static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 *bandwidth)
165 struct amd64_pvt *pvt = mci->pvt_info;
166 u32 min_scrubrate = 0x0;
168 switch (boot_cpu_data.x86) {
170 min_scrubrate = K8_MIN_SCRUB_RATE_BITS;
173 min_scrubrate = F10_MIN_SCRUB_RATE_BITS;
176 min_scrubrate = F11_MIN_SCRUB_RATE_BITS;
180 amd64_printk(KERN_ERR, "Unsupported family!\n");
183 return amd64_search_set_scrub_rate(pvt->misc_f3_ctl, *bandwidth,
187 static int amd64_get_scrub_rate(struct mem_ctl_info *mci, u32 *bw)
189 struct amd64_pvt *pvt = mci->pvt_info;
193 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_SCRCTRL, &scrubval);
195 scrubval = scrubval & 0x001F;
197 edac_printk(KERN_DEBUG, EDAC_MC,
198 "pci-read, sdram scrub control value: %d \n", scrubval);
200 for (i = 0; ARRAY_SIZE(scrubrates); i++) {
201 if (scrubrates[i].scrubval == scrubval) {
202 *bw = scrubrates[i].bandwidth;
211 /* Map from a CSROW entry to the mask entry that operates on it */
212 static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow)
214 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F)
220 /* return the 'base' address the i'th CS entry of the 'dct' DRAM controller */
221 static u32 amd64_get_dct_base(struct amd64_pvt *pvt, int dct, int csrow)
224 return pvt->dcsb0[csrow];
226 return pvt->dcsb1[csrow];
230 * Return the 'mask' address the i'th CS entry. This function is needed because
231 * there number of DCSM registers on Rev E and prior vs Rev F and later is
234 static u32 amd64_get_dct_mask(struct amd64_pvt *pvt, int dct, int csrow)
237 return pvt->dcsm0[amd64_map_to_dcs_mask(pvt, csrow)];
239 return pvt->dcsm1[amd64_map_to_dcs_mask(pvt, csrow)];
244 * In *base and *limit, pass back the full 40-bit base and limit physical
245 * addresses for the node given by node_id. This information is obtained from
246 * DRAM Base (section 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers. The
247 * base and limit addresses are of type SysAddr, as defined at the start of
248 * section 3.4.4 (p. 70). They are the lowest and highest physical addresses
249 * in the address range they represent.
251 static void amd64_get_base_and_limit(struct amd64_pvt *pvt, int node_id,
252 u64 *base, u64 *limit)
254 *base = pvt->dram_base[node_id];
255 *limit = pvt->dram_limit[node_id];
259 * Return 1 if the SysAddr given by sys_addr matches the base/limit associated
262 static int amd64_base_limit_match(struct amd64_pvt *pvt,
263 u64 sys_addr, int node_id)
265 u64 base, limit, addr;
267 amd64_get_base_and_limit(pvt, node_id, &base, &limit);
269 /* The K8 treats this as a 40-bit value. However, bits 63-40 will be
270 * all ones if the most significant implemented address bit is 1.
271 * Here we discard bits 63-40. See section 3.4.2 of AMD publication
272 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
273 * Application Programming.
275 addr = sys_addr & 0x000000ffffffffffull;
277 return (addr >= base) && (addr <= limit);
281 * Attempt to map a SysAddr to a node. On success, return a pointer to the
282 * mem_ctl_info structure for the node that the SysAddr maps to.
284 * On failure, return NULL.
286 static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
289 struct amd64_pvt *pvt;
294 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
295 * 3.4.4.2) registers to map the SysAddr to a node ID.
300 * The value of this field should be the same for all DRAM Base
301 * registers. Therefore we arbitrarily choose to read it from the
302 * register for node 0.
304 intlv_en = pvt->dram_IntlvEn[0];
307 for (node_id = 0; node_id < DRAM_REG_COUNT; node_id++) {
308 if (amd64_base_limit_match(pvt, sys_addr, node_id))
314 if (unlikely((intlv_en != 0x01) &&
315 (intlv_en != 0x03) &&
316 (intlv_en != 0x07))) {
317 amd64_printk(KERN_WARNING, "junk value of 0x%x extracted from "
318 "IntlvEn field of DRAM Base Register for node 0: "
319 "this probably indicates a BIOS bug.\n", intlv_en);
323 bits = (((u32) sys_addr) >> 12) & intlv_en;
325 for (node_id = 0; ; ) {
326 if ((pvt->dram_IntlvSel[node_id] & intlv_en) == bits)
327 break; /* intlv_sel field matches */
329 if (++node_id >= DRAM_REG_COUNT)
333 /* sanity test for sys_addr */
334 if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) {
335 amd64_printk(KERN_WARNING,
336 "%s(): sys_addr 0x%llx falls outside base/limit "
337 "address range for node %d with node interleaving "
339 __func__, sys_addr, node_id);
344 return edac_mc_find(node_id);
347 debugf2("sys_addr 0x%lx doesn't match any node\n",
348 (unsigned long)sys_addr);
354 * Extract the DRAM CS base address from selected csrow register.
356 static u64 base_from_dct_base(struct amd64_pvt *pvt, int csrow)
358 return ((u64) (amd64_get_dct_base(pvt, 0, csrow) & pvt->dcsb_base)) <<
363 * Extract the mask from the dcsb0[csrow] entry in a CPU revision-specific way.
365 static u64 mask_from_dct_mask(struct amd64_pvt *pvt, int csrow)
367 u64 dcsm_bits, other_bits;
370 /* Extract bits from DRAM CS Mask. */
371 dcsm_bits = amd64_get_dct_mask(pvt, 0, csrow) & pvt->dcsm_mask;
373 other_bits = pvt->dcsm_mask;
374 other_bits = ~(other_bits << pvt->dcs_shift);
377 * The extracted bits from DCSM belong in the spaces represented by
378 * the cleared bits in other_bits.
380 mask = (dcsm_bits << pvt->dcs_shift) | other_bits;
386 * @input_addr is an InputAddr associated with the node given by mci. Return the
387 * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
389 static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
391 struct amd64_pvt *pvt;
398 * Here we use the DRAM CS Base and DRAM CS Mask registers. For each CS
399 * base/mask register pair, test the condition shown near the start of
400 * section 3.5.4 (p. 84, BKDG #26094, K8, revA-E).
402 for (csrow = 0; csrow < pvt->cs_count; csrow++) {
404 /* This DRAM chip select is disabled on this node */
405 if ((pvt->dcsb0[csrow] & K8_DCSB_CS_ENABLE) == 0)
408 base = base_from_dct_base(pvt, csrow);
409 mask = ~mask_from_dct_mask(pvt, csrow);
411 if ((input_addr & mask) == (base & mask)) {
412 debugf2("InputAddr 0x%lx matches csrow %d (node %d)\n",
413 (unsigned long)input_addr, csrow,
420 debugf2("no matching csrow for InputAddr 0x%lx (MC node %d)\n",
421 (unsigned long)input_addr, pvt->mc_node_id);
427 * Return the base value defined by the DRAM Base register for the node
428 * represented by mci. This function returns the full 40-bit value despite the
429 * fact that the register only stores bits 39-24 of the value. See section
430 * 3.4.4.1 (BKDG #26094, K8, revA-E)
432 static inline u64 get_dram_base(struct mem_ctl_info *mci)
434 struct amd64_pvt *pvt = mci->pvt_info;
436 return pvt->dram_base[pvt->mc_node_id];
440 * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
441 * for the node represented by mci. Info is passed back in *hole_base,
442 * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if
443 * info is invalid. Info may be invalid for either of the following reasons:
445 * - The revision of the node is not E or greater. In this case, the DRAM Hole
446 * Address Register does not exist.
448 * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
449 * indicating that its contents are not valid.
451 * The values passed back in *hole_base, *hole_offset, and *hole_size are
452 * complete 32-bit values despite the fact that the bitfields in the DHAR
453 * only represent bits 31-24 of the base and offset values.
455 int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
456 u64 *hole_offset, u64 *hole_size)
458 struct amd64_pvt *pvt = mci->pvt_info;
461 /* only revE and later have the DRAM Hole Address Register */
462 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_E) {
463 debugf1(" revision %d for node %d does not support DHAR\n",
464 pvt->ext_model, pvt->mc_node_id);
468 /* only valid for Fam10h */
469 if (boot_cpu_data.x86 == 0x10 &&
470 (pvt->dhar & F10_DRAM_MEM_HOIST_VALID) == 0) {
471 debugf1(" Dram Memory Hoisting is DISABLED on this system\n");
475 if ((pvt->dhar & DHAR_VALID) == 0) {
476 debugf1(" Dram Memory Hoisting is DISABLED on this node %d\n",
481 /* This node has Memory Hoisting */
483 /* +------------------+--------------------+--------------------+-----
484 * | memory | DRAM hole | relocated |
485 * | [0, (x - 1)] | [x, 0xffffffff] | addresses from |
487 * | | | [0x100000000, |
488 * | | | (0x100000000+ |
489 * | | | (0xffffffff-x))] |
490 * +------------------+--------------------+--------------------+-----
492 * Above is a diagram of physical memory showing the DRAM hole and the
493 * relocated addresses from the DRAM hole. As shown, the DRAM hole
494 * starts at address x (the base address) and extends through address
495 * 0xffffffff. The DRAM Hole Address Register (DHAR) relocates the
496 * addresses in the hole so that they start at 0x100000000.
499 base = dhar_base(pvt->dhar);
502 *hole_size = (0x1ull << 32) - base;
504 if (boot_cpu_data.x86 > 0xf)
505 *hole_offset = f10_dhar_offset(pvt->dhar);
507 *hole_offset = k8_dhar_offset(pvt->dhar);
509 debugf1(" DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
510 pvt->mc_node_id, (unsigned long)*hole_base,
511 (unsigned long)*hole_offset, (unsigned long)*hole_size);
515 EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
518 * Return the DramAddr that the SysAddr given by @sys_addr maps to. It is
519 * assumed that sys_addr maps to the node given by mci.
521 * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
522 * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
523 * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
524 * then it is also involved in translating a SysAddr to a DramAddr. Sections
525 * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
526 * These parts of the documentation are unclear. I interpret them as follows:
528 * When node n receives a SysAddr, it processes the SysAddr as follows:
530 * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
531 * Limit registers for node n. If the SysAddr is not within the range
532 * specified by the base and limit values, then node n ignores the Sysaddr
533 * (since it does not map to node n). Otherwise continue to step 2 below.
535 * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
536 * disabled so skip to step 3 below. Otherwise see if the SysAddr is within
537 * the range of relocated addresses (starting at 0x100000000) from the DRAM
538 * hole. If not, skip to step 3 below. Else get the value of the
539 * DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
540 * offset defined by this value from the SysAddr.
542 * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
543 * Base register for node n. To obtain the DramAddr, subtract the base
544 * address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
546 static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
548 u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
551 dram_base = get_dram_base(mci);
553 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
556 if ((sys_addr >= (1ull << 32)) &&
557 (sys_addr < ((1ull << 32) + hole_size))) {
558 /* use DHAR to translate SysAddr to DramAddr */
559 dram_addr = sys_addr - hole_offset;
561 debugf2("using DHAR to translate SysAddr 0x%lx to "
563 (unsigned long)sys_addr,
564 (unsigned long)dram_addr);
571 * Translate the SysAddr to a DramAddr as shown near the start of
572 * section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8
573 * only deals with 40-bit values. Therefore we discard bits 63-40 of
574 * sys_addr below. If bit 39 of sys_addr is 1 then the bits we
575 * discard are all 1s. Otherwise the bits we discard are all 0s. See
576 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
577 * Programmer's Manual Volume 1 Application Programming.
579 dram_addr = (sys_addr & 0xffffffffffull) - dram_base;
581 debugf2("using DRAM Base register to translate SysAddr 0x%lx to "
582 "DramAddr 0x%lx\n", (unsigned long)sys_addr,
583 (unsigned long)dram_addr);
588 * @intlv_en is the value of the IntlvEn field from a DRAM Base register
589 * (section 3.4.4.1). Return the number of bits from a SysAddr that are used
590 * for node interleaving.
592 static int num_node_interleave_bits(unsigned intlv_en)
594 static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
597 BUG_ON(intlv_en > 7);
598 n = intlv_shift_table[intlv_en];
602 /* Translate the DramAddr given by @dram_addr to an InputAddr. */
603 static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
605 struct amd64_pvt *pvt;
612 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
613 * concerning translating a DramAddr to an InputAddr.
615 intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]);
616 input_addr = ((dram_addr >> intlv_shift) & 0xffffff000ull) +
619 debugf2(" Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
620 intlv_shift, (unsigned long)dram_addr,
621 (unsigned long)input_addr);
627 * Translate the SysAddr represented by @sys_addr to an InputAddr. It is
628 * assumed that @sys_addr maps to the node given by mci.
630 static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
635 dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
637 debugf2("SysAdddr 0x%lx translates to InputAddr 0x%lx\n",
638 (unsigned long)sys_addr, (unsigned long)input_addr);
645 * @input_addr is an InputAddr associated with the node represented by mci.
646 * Translate @input_addr to a DramAddr and return the result.
648 static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
650 struct amd64_pvt *pvt;
651 int node_id, intlv_shift;
656 * Near the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
657 * shows how to translate a DramAddr to an InputAddr. Here we reverse
658 * this procedure. When translating from a DramAddr to an InputAddr, the
659 * bits used for node interleaving are discarded. Here we recover these
660 * bits from the IntlvSel field of the DRAM Limit register (section
661 * 3.4.4.2) for the node that input_addr is associated with.
664 node_id = pvt->mc_node_id;
665 BUG_ON((node_id < 0) || (node_id > 7));
667 intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]);
669 if (intlv_shift == 0) {
670 debugf1(" InputAddr 0x%lx translates to DramAddr of "
671 "same value\n", (unsigned long)input_addr);
676 bits = ((input_addr & 0xffffff000ull) << intlv_shift) +
677 (input_addr & 0xfff);
679 intlv_sel = pvt->dram_IntlvSel[node_id] & ((1 << intlv_shift) - 1);
680 dram_addr = bits + (intlv_sel << 12);
682 debugf1("InputAddr 0x%lx translates to DramAddr 0x%lx "
683 "(%d node interleave bits)\n", (unsigned long)input_addr,
684 (unsigned long)dram_addr, intlv_shift);
690 * @dram_addr is a DramAddr that maps to the node represented by mci. Convert
691 * @dram_addr to a SysAddr.
693 static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr)
695 struct amd64_pvt *pvt = mci->pvt_info;
696 u64 hole_base, hole_offset, hole_size, base, limit, sys_addr;
699 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
702 if ((dram_addr >= hole_base) &&
703 (dram_addr < (hole_base + hole_size))) {
704 sys_addr = dram_addr + hole_offset;
706 debugf1("using DHAR to translate DramAddr 0x%lx to "
707 "SysAddr 0x%lx\n", (unsigned long)dram_addr,
708 (unsigned long)sys_addr);
714 amd64_get_base_and_limit(pvt, pvt->mc_node_id, &base, &limit);
715 sys_addr = dram_addr + base;
718 * The sys_addr we have computed up to this point is a 40-bit value
719 * because the k8 deals with 40-bit values. However, the value we are
720 * supposed to return is a full 64-bit physical address. The AMD
721 * x86-64 architecture specifies that the most significant implemented
722 * address bit through bit 63 of a physical address must be either all
723 * 0s or all 1s. Therefore we sign-extend the 40-bit sys_addr to a
724 * 64-bit value below. See section 3.4.2 of AMD publication 24592:
725 * AMD x86-64 Architecture Programmer's Manual Volume 1 Application
728 sys_addr |= ~((sys_addr & (1ull << 39)) - 1);
730 debugf1(" Node %d, DramAddr 0x%lx to SysAddr 0x%lx\n",
731 pvt->mc_node_id, (unsigned long)dram_addr,
732 (unsigned long)sys_addr);
738 * @input_addr is an InputAddr associated with the node given by mci. Translate
739 * @input_addr to a SysAddr.
741 static inline u64 input_addr_to_sys_addr(struct mem_ctl_info *mci,
744 return dram_addr_to_sys_addr(mci,
745 input_addr_to_dram_addr(mci, input_addr));
749 * Find the minimum and maximum InputAddr values that map to the given @csrow.
750 * Pass back these values in *input_addr_min and *input_addr_max.
752 static void find_csrow_limits(struct mem_ctl_info *mci, int csrow,
753 u64 *input_addr_min, u64 *input_addr_max)
755 struct amd64_pvt *pvt;
759 BUG_ON((csrow < 0) || (csrow >= pvt->cs_count));
761 base = base_from_dct_base(pvt, csrow);
762 mask = mask_from_dct_mask(pvt, csrow);
764 *input_addr_min = base & ~mask;
765 *input_addr_max = base | mask | pvt->dcs_mask_notused;
768 /* Map the Error address to a PAGE and PAGE OFFSET. */
769 static inline void error_address_to_page_and_offset(u64 error_address,
770 u32 *page, u32 *offset)
772 *page = (u32) (error_address >> PAGE_SHIFT);
773 *offset = ((u32) error_address) & ~PAGE_MASK;
777 * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
778 * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
779 * of a node that detected an ECC memory error. mci represents the node that
780 * the error address maps to (possibly different from the node that detected
781 * the error). Return the number of the csrow that sys_addr maps to, or -1 on
784 static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
788 csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
791 amd64_mc_printk(mci, KERN_ERR,
792 "Failed to translate InputAddr to csrow for "
793 "address 0x%lx\n", (unsigned long)sys_addr);
797 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
799 static void amd64_cpu_display_info(struct amd64_pvt *pvt)
801 if (boot_cpu_data.x86 == 0x11)
802 edac_printk(KERN_DEBUG, EDAC_MC, "F11h CPU detected\n");
803 else if (boot_cpu_data.x86 == 0x10)
804 edac_printk(KERN_DEBUG, EDAC_MC, "F10h CPU detected\n");
805 else if (boot_cpu_data.x86 == 0xf)
806 edac_printk(KERN_DEBUG, EDAC_MC, "%s detected\n",
807 (pvt->ext_model >= K8_REV_F) ?
808 "Rev F or later" : "Rev E or earlier");
810 /* we'll hardly ever ever get here */
811 edac_printk(KERN_ERR, EDAC_MC, "Unknown cpu!\n");
815 * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
818 static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt)
821 enum dev_type edac_cap = EDAC_FLAG_NONE;
823 bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= K8_REV_F)
827 if (pvt->dclr0 & BIT(bit))
828 edac_cap = EDAC_FLAG_SECDED;
834 static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt);
836 static void amd64_dump_dramcfg_low(u32 dclr, int chan)
838 debugf1("F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
840 debugf1(" DIMM type: %sbuffered; all DIMMs support ECC: %s\n",
841 (dclr & BIT(16)) ? "un" : "",
842 (dclr & BIT(19)) ? "yes" : "no");
844 debugf1(" PAR/ERR parity: %s\n",
845 (dclr & BIT(8)) ? "enabled" : "disabled");
847 debugf1(" DCT 128bit mode width: %s\n",
848 (dclr & BIT(11)) ? "128b" : "64b");
850 debugf1(" x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
851 (dclr & BIT(12)) ? "yes" : "no",
852 (dclr & BIT(13)) ? "yes" : "no",
853 (dclr & BIT(14)) ? "yes" : "no",
854 (dclr & BIT(15)) ? "yes" : "no");
857 /* Display and decode various NB registers for debug purposes. */
858 static void amd64_dump_misc_regs(struct amd64_pvt *pvt)
862 debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
864 debugf1(" NB two channel DRAM capable: %s\n",
865 (pvt->nbcap & K8_NBCAP_DCT_DUAL) ? "yes" : "no");
867 debugf1(" ECC capable: %s, ChipKill ECC capable: %s\n",
868 (pvt->nbcap & K8_NBCAP_SECDED) ? "yes" : "no",
869 (pvt->nbcap & K8_NBCAP_CHIPKILL) ? "yes" : "no");
871 amd64_dump_dramcfg_low(pvt->dclr0, 0);
873 debugf1("F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
875 debugf1("F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, "
878 dhar_base(pvt->dhar),
879 (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt->dhar)
880 : f10_dhar_offset(pvt->dhar));
882 debugf1(" DramHoleValid: %s\n",
883 (pvt->dhar & DHAR_VALID) ? "yes" : "no");
885 /* everything below this point is Fam10h and above */
886 if (boot_cpu_data.x86 == 0xf) {
887 amd64_debug_display_dimm_sizes(0, pvt);
891 /* Only if NOT ganged does dclr1 have valid info */
892 if (!dct_ganging_enabled(pvt))
893 amd64_dump_dramcfg_low(pvt->dclr1, 1);
896 * Determine if ganged and then dump memory sizes for first controller,
897 * and if NOT ganged dump info for 2nd controller.
899 ganged = dct_ganging_enabled(pvt);
901 amd64_debug_display_dimm_sizes(0, pvt);
904 amd64_debug_display_dimm_sizes(1, pvt);
907 /* Read in both of DBAM registers */
908 static void amd64_read_dbam_reg(struct amd64_pvt *pvt)
910 amd64_read_pci_cfg(pvt->dram_f2_ctl, DBAM0, &pvt->dbam0);
912 if (boot_cpu_data.x86 >= 0x10)
913 amd64_read_pci_cfg(pvt->dram_f2_ctl, DBAM1, &pvt->dbam1);
917 * NOTE: CPU Revision Dependent code: Rev E and Rev F
919 * Set the DCSB and DCSM mask values depending on the CPU revision value. Also
920 * set the shift factor for the DCSB and DCSM values.
922 * ->dcs_mask_notused, RevE:
924 * To find the max InputAddr for the csrow, start with the base address and set
925 * all bits that are "don't care" bits in the test at the start of section
928 * The "don't care" bits are all set bits in the mask and all bits in the gaps
929 * between bit ranges [35:25] and [19:13]. The value REV_E_DCS_NOTUSED_BITS
930 * represents bits [24:20] and [12:0], which are all bits in the above-mentioned
933 * ->dcs_mask_notused, RevF and later:
935 * To find the max InputAddr for the csrow, start with the base address and set
936 * all bits that are "don't care" bits in the test at the start of NPT section
939 * The "don't care" bits are all set bits in the mask and all bits in the gaps
940 * between bit ranges [36:27] and [21:13].
942 * The value REV_F_F1Xh_DCS_NOTUSED_BITS represents bits [26:22] and [12:0],
943 * which are all bits in the above-mentioned gaps.
945 static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt)
948 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) {
949 pvt->dcsb_base = REV_E_DCSB_BASE_BITS;
950 pvt->dcsm_mask = REV_E_DCSM_MASK_BITS;
951 pvt->dcs_mask_notused = REV_E_DCS_NOTUSED_BITS;
952 pvt->dcs_shift = REV_E_DCS_SHIFT;
956 pvt->dcsb_base = REV_F_F1Xh_DCSB_BASE_BITS;
957 pvt->dcsm_mask = REV_F_F1Xh_DCSM_MASK_BITS;
958 pvt->dcs_mask_notused = REV_F_F1Xh_DCS_NOTUSED_BITS;
959 pvt->dcs_shift = REV_F_F1Xh_DCS_SHIFT;
961 if (boot_cpu_data.x86 == 0x11) {
972 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask hw registers
974 static void amd64_read_dct_base_mask(struct amd64_pvt *pvt)
978 amd64_set_dct_base_and_mask(pvt);
980 for (cs = 0; cs < pvt->cs_count; cs++) {
981 reg = K8_DCSB0 + (cs * 4);
982 if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg, &pvt->dcsb0[cs]))
983 debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n",
984 cs, pvt->dcsb0[cs], reg);
986 /* If DCT are NOT ganged, then read in DCT1's base */
987 if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
988 reg = F10_DCSB1 + (cs * 4);
989 if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg,
991 debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n",
992 cs, pvt->dcsb1[cs], reg);
998 for (cs = 0; cs < pvt->num_dcsm; cs++) {
999 reg = K8_DCSM0 + (cs * 4);
1000 if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg, &pvt->dcsm0[cs]))
1001 debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n",
1002 cs, pvt->dcsm0[cs], reg);
1004 /* If DCT are NOT ganged, then read in DCT1's mask */
1005 if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
1006 reg = F10_DCSM1 + (cs * 4);
1007 if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg,
1009 debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n",
1010 cs, pvt->dcsm1[cs], reg);
1017 static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt)
1021 if (boot_cpu_data.x86 >= 0x10 || pvt->ext_model >= K8_REV_F) {
1022 if (pvt->dchr0 & DDR3_MODE)
1023 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
1025 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
1027 type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
1030 debugf1(" Memory type is: %s\n", edac_mem_types[type]);
1036 * Read the DRAM Configuration Low register. It differs between CG, D & E revs
1037 * and the later RevF memory controllers (DDR vs DDR2)
1040 * number of memory channels in operation
1042 * contents of the DCL0_LOW register
1044 static int k8_early_channel_count(struct amd64_pvt *pvt)
1048 err = amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0);
1052 if ((boot_cpu_data.x86_model >> 4) >= K8_REV_F) {
1053 /* RevF (NPT) and later */
1054 flag = pvt->dclr0 & F10_WIDTH_128;
1056 /* RevE and earlier */
1057 flag = pvt->dclr0 & REVE_WIDTH_128;
1063 return (flag) ? 2 : 1;
1066 /* extract the ERROR ADDRESS for the K8 CPUs */
1067 static u64 k8_get_error_address(struct mem_ctl_info *mci,
1068 struct err_regs *info)
1070 return (((u64) (info->nbeah & 0xff)) << 32) +
1071 (info->nbeal & ~0x03);
1075 * Read the Base and Limit registers for K8 based Memory controllers; extract
1076 * fields from the 'raw' reg into separate data fields
1078 * Isolates: BASE, LIMIT, IntlvEn, IntlvSel, RW_EN
1080 static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
1083 u32 off = dram << 3; /* 8 bytes between DRAM entries */
1085 amd64_read_pci_cfg(pvt->addr_f1_ctl, K8_DRAM_BASE_LOW + off, &low);
1087 /* Extract parts into separate data entries */
1088 pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 8;
1089 pvt->dram_IntlvEn[dram] = (low >> 8) & 0x7;
1090 pvt->dram_rw_en[dram] = (low & 0x3);
1092 amd64_read_pci_cfg(pvt->addr_f1_ctl, K8_DRAM_LIMIT_LOW + off, &low);
1095 * Extract parts into separate data entries. Limit is the HIGHEST memory
1096 * location of the region, so lower 24 bits need to be all ones
1098 pvt->dram_limit[dram] = (((u64) low & 0xFFFF0000) << 8) | 0x00FFFFFF;
1099 pvt->dram_IntlvSel[dram] = (low >> 8) & 0x7;
1100 pvt->dram_DstNode[dram] = (low & 0x7);
1103 static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
1104 struct err_regs *info,
1107 struct mem_ctl_info *src_mci;
1108 unsigned short syndrome;
1112 /* Extract the syndrome parts and form a 16-bit syndrome */
1113 syndrome = HIGH_SYNDROME(info->nbsl) << 8;
1114 syndrome |= LOW_SYNDROME(info->nbsh);
1116 /* CHIPKILL enabled */
1117 if (info->nbcfg & K8_NBCFG_CHIPKILL) {
1118 channel = get_channel_from_ecc_syndrome(mci, syndrome);
1121 * Syndrome didn't map, so we don't know which of the
1122 * 2 DIMMs is in error. So we need to ID 'both' of them
1125 amd64_mc_printk(mci, KERN_WARNING,
1126 "unknown syndrome 0x%x - possible error "
1127 "reporting race\n", syndrome);
1128 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1133 * non-chipkill ecc mode
1135 * The k8 documentation is unclear about how to determine the
1136 * channel number when using non-chipkill memory. This method
1137 * was obtained from email communication with someone at AMD.
1138 * (Wish the email was placed in this comment - norsk)
1140 channel = ((sys_addr & BIT(3)) != 0);
1144 * Find out which node the error address belongs to. This may be
1145 * different from the node that detected the error.
1147 src_mci = find_mc_by_sys_addr(mci, sys_addr);
1149 amd64_mc_printk(mci, KERN_ERR,
1150 "failed to map error address 0x%lx to a node\n",
1151 (unsigned long)sys_addr);
1152 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1156 /* Now map the sys_addr to a CSROW */
1157 csrow = sys_addr_to_csrow(src_mci, sys_addr);
1159 edac_mc_handle_ce_no_info(src_mci, EDAC_MOD_STR);
1161 error_address_to_page_and_offset(sys_addr, &page, &offset);
1163 edac_mc_handle_ce(src_mci, page, offset, syndrome, csrow,
1164 channel, EDAC_MOD_STR);
1168 static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode)
1172 if (pvt->ext_model >= K8_REV_F)
1173 dbam_map = ddr2_dbam;
1174 else if (pvt->ext_model >= K8_REV_D)
1175 dbam_map = ddr2_dbam_revD;
1177 dbam_map = ddr2_dbam_revCG;
1179 return dbam_map[cs_mode];
1183 * Get the number of DCT channels in use.
1186 * number of Memory Channels in operation
1188 * contents of the DCL0_LOW register
1190 static int f10_early_channel_count(struct amd64_pvt *pvt)
1192 int dbams[] = { DBAM0, DBAM1 };
1193 int i, j, channels = 0;
1196 /* If we are in 128 bit mode, then we are using 2 channels */
1197 if (pvt->dclr0 & F10_WIDTH_128) {
1203 * Need to check if in unganged mode: In such, there are 2 channels,
1204 * but they are not in 128 bit mode and thus the above 'dclr0' status
1207 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
1208 * their CSEnable bit on. If so, then SINGLE DIMM case.
1210 debugf0("Data width is not 128 bits - need more decoding\n");
1213 * Check DRAM Bank Address Mapping values for each DIMM to see if there
1214 * is more than just one DIMM present in unganged mode. Need to check
1215 * both controllers since DIMMs can be placed in either one.
1217 for (i = 0; i < ARRAY_SIZE(dbams); i++) {
1218 if (amd64_read_pci_cfg(pvt->dram_f2_ctl, dbams[i], &dbam))
1221 for (j = 0; j < 4; j++) {
1222 if (DBAM_DIMM(j, dbam) > 0) {
1232 debugf0("MCT channel count: %d\n", channels);
1241 static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode)
1245 if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
1246 dbam_map = ddr3_dbam;
1248 dbam_map = ddr2_dbam;
1250 return dbam_map[cs_mode];
1253 /* Enable extended configuration access via 0xCF8 feature */
1254 static void amd64_setup(struct amd64_pvt *pvt)
1258 amd64_read_pci_cfg(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, ®);
1260 pvt->flags.cf8_extcfg = !!(reg & F10_NB_CFG_LOW_ENABLE_EXT_CFG);
1261 reg |= F10_NB_CFG_LOW_ENABLE_EXT_CFG;
1262 pci_write_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, reg);
1265 /* Restore the extended configuration access via 0xCF8 feature */
1266 static void amd64_teardown(struct amd64_pvt *pvt)
1270 amd64_read_pci_cfg(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, ®);
1272 reg &= ~F10_NB_CFG_LOW_ENABLE_EXT_CFG;
1273 if (pvt->flags.cf8_extcfg)
1274 reg |= F10_NB_CFG_LOW_ENABLE_EXT_CFG;
1275 pci_write_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, reg);
1278 static u64 f10_get_error_address(struct mem_ctl_info *mci,
1279 struct err_regs *info)
1281 return (((u64) (info->nbeah & 0xffff)) << 32) +
1282 (info->nbeal & ~0x01);
1286 * Read the Base and Limit registers for F10 based Memory controllers. Extract
1287 * fields from the 'raw' reg into separate data fields.
1289 * Isolates: BASE, LIMIT, IntlvEn, IntlvSel, RW_EN.
1291 static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
1293 u32 high_offset, low_offset, high_base, low_base, high_limit, low_limit;
1295 low_offset = K8_DRAM_BASE_LOW + (dram << 3);
1296 high_offset = F10_DRAM_BASE_HIGH + (dram << 3);
1298 /* read the 'raw' DRAM BASE Address register */
1299 amd64_read_pci_cfg(pvt->addr_f1_ctl, low_offset, &low_base);
1301 /* Read from the ECS data register */
1302 amd64_read_pci_cfg(pvt->addr_f1_ctl, high_offset, &high_base);
1304 /* Extract parts into separate data entries */
1305 pvt->dram_rw_en[dram] = (low_base & 0x3);
1307 if (pvt->dram_rw_en[dram] == 0)
1310 pvt->dram_IntlvEn[dram] = (low_base >> 8) & 0x7;
1312 pvt->dram_base[dram] = (((u64)high_base & 0x000000FF) << 40) |
1313 (((u64)low_base & 0xFFFF0000) << 8);
1315 low_offset = K8_DRAM_LIMIT_LOW + (dram << 3);
1316 high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3);
1318 /* read the 'raw' LIMIT registers */
1319 amd64_read_pci_cfg(pvt->addr_f1_ctl, low_offset, &low_limit);
1321 /* Read from the ECS data register for the HIGH portion */
1322 amd64_read_pci_cfg(pvt->addr_f1_ctl, high_offset, &high_limit);
1324 pvt->dram_DstNode[dram] = (low_limit & 0x7);
1325 pvt->dram_IntlvSel[dram] = (low_limit >> 8) & 0x7;
1328 * Extract address values and form a LIMIT address. Limit is the HIGHEST
1329 * memory location of the region, so low 24 bits need to be all ones.
1331 pvt->dram_limit[dram] = (((u64)high_limit & 0x000000FF) << 40) |
1332 (((u64) low_limit & 0xFFFF0000) << 8) |
1336 static void f10_read_dram_ctl_register(struct amd64_pvt *pvt)
1339 if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCTL_SEL_LOW,
1340 &pvt->dram_ctl_select_low)) {
1341 debugf0("F2x110 (DCTL Sel. Low): 0x%08x, "
1342 "High range addresses at: 0x%x\n",
1343 pvt->dram_ctl_select_low,
1344 dct_sel_baseaddr(pvt));
1346 debugf0(" DCT mode: %s, All DCTs on: %s\n",
1347 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"),
1348 (dct_dram_enabled(pvt) ? "yes" : "no"));
1350 if (!dct_ganging_enabled(pvt))
1351 debugf0(" Address range split per DCT: %s\n",
1352 (dct_high_range_enabled(pvt) ? "yes" : "no"));
1354 debugf0(" DCT data interleave for ECC: %s, "
1355 "DRAM cleared since last warm reset: %s\n",
1356 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
1357 (dct_memory_cleared(pvt) ? "yes" : "no"));
1359 debugf0(" DCT channel interleave: %s, "
1360 "DCT interleave bits selector: 0x%x\n",
1361 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
1362 dct_sel_interleave_addr(pvt));
1365 amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCTL_SEL_HIGH,
1366 &pvt->dram_ctl_select_high);
1370 * determine channel based on the interleaving mode: F10h BKDG, 2.8.9 Memory
1371 * Interleaving Modes.
1373 static u32 f10_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1374 int hi_range_sel, u32 intlv_en)
1376 u32 cs, temp, dct_sel_high = (pvt->dram_ctl_select_low >> 1) & 1;
1378 if (dct_ganging_enabled(pvt))
1380 else if (hi_range_sel)
1382 else if (dct_interleave_enabled(pvt)) {
1384 * see F2x110[DctSelIntLvAddr] - channel interleave mode
1386 if (dct_sel_interleave_addr(pvt) == 0)
1387 cs = sys_addr >> 6 & 1;
1388 else if ((dct_sel_interleave_addr(pvt) >> 1) & 1) {
1389 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2;
1391 if (dct_sel_interleave_addr(pvt) & 1)
1392 cs = (sys_addr >> 9 & 1) ^ temp;
1394 cs = (sys_addr >> 6 & 1) ^ temp;
1395 } else if (intlv_en & 4)
1396 cs = sys_addr >> 15 & 1;
1397 else if (intlv_en & 2)
1398 cs = sys_addr >> 14 & 1;
1399 else if (intlv_en & 1)
1400 cs = sys_addr >> 13 & 1;
1402 cs = sys_addr >> 12 & 1;
1403 } else if (dct_high_range_enabled(pvt) && !dct_ganging_enabled(pvt))
1404 cs = ~dct_sel_high & 1;
1411 static inline u32 f10_map_intlv_en_to_shift(u32 intlv_en)
1415 else if (intlv_en == 3)
1417 else if (intlv_en == 7)
1423 /* See F10h BKDG, 2.8.10.2 DctSelBaseOffset Programming */
1424 static inline u64 f10_get_base_addr_offset(u64 sys_addr, int hi_range_sel,
1425 u32 dct_sel_base_addr,
1426 u64 dct_sel_base_off,
1427 u32 hole_valid, u32 hole_off,
1433 if (!(dct_sel_base_addr & 0xFFFFF800) &&
1434 hole_valid && (sys_addr >= 0x100000000ULL))
1435 chan_off = hole_off << 16;
1437 chan_off = dct_sel_base_off;
1439 if (hole_valid && (sys_addr >= 0x100000000ULL))
1440 chan_off = hole_off << 16;
1442 chan_off = dram_base & 0xFFFFF8000000ULL;
1445 return (sys_addr & 0x0000FFFFFFFFFFC0ULL) -
1446 (chan_off & 0x0000FFFFFF800000ULL);
1449 /* Hack for the time being - Can we get this from BIOS?? */
1450 #define CH0SPARE_RANK 0
1451 #define CH1SPARE_RANK 1
1454 * checks if the csrow passed in is marked as SPARED, if so returns the new
1457 static inline int f10_process_possible_spare(int csrow,
1458 u32 cs, struct amd64_pvt *pvt)
1463 /* Depending on channel, isolate respective SPARING info */
1465 swap_done = F10_ONLINE_SPARE_SWAPDONE1(pvt->online_spare);
1466 bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS1(pvt->online_spare);
1467 if (swap_done && (csrow == bad_dram_cs))
1468 csrow = CH1SPARE_RANK;
1470 swap_done = F10_ONLINE_SPARE_SWAPDONE0(pvt->online_spare);
1471 bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS0(pvt->online_spare);
1472 if (swap_done && (csrow == bad_dram_cs))
1473 csrow = CH0SPARE_RANK;
1479 * Iterate over the DRAM DCT "base" and "mask" registers looking for a
1480 * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
1483 * -EINVAL: NOT FOUND
1484 * 0..csrow = Chip-Select Row
1486 static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs)
1488 struct mem_ctl_info *mci;
1489 struct amd64_pvt *pvt;
1490 u32 cs_base, cs_mask;
1491 int cs_found = -EINVAL;
1494 mci = mci_lookup[nid];
1498 pvt = mci->pvt_info;
1500 debugf1("InputAddr=0x%x channelselect=%d\n", in_addr, cs);
1502 for (csrow = 0; csrow < pvt->cs_count; csrow++) {
1504 cs_base = amd64_get_dct_base(pvt, cs, csrow);
1505 if (!(cs_base & K8_DCSB_CS_ENABLE))
1509 * We have an ENABLED CSROW, Isolate just the MASK bits of the
1510 * target: [28:19] and [13:5], which map to [36:27] and [21:13]
1511 * of the actual address.
1513 cs_base &= REV_F_F1Xh_DCSB_BASE_BITS;
1516 * Get the DCT Mask, and ENABLE the reserved bits: [18:16] and
1517 * [4:0] to become ON. Then mask off bits [28:0] ([36:8])
1519 cs_mask = amd64_get_dct_mask(pvt, cs, csrow);
1521 debugf1(" CSROW=%d CSBase=0x%x RAW CSMask=0x%x\n",
1522 csrow, cs_base, cs_mask);
1524 cs_mask = (cs_mask | 0x0007C01F) & 0x1FFFFFFF;
1526 debugf1(" Final CSMask=0x%x\n", cs_mask);
1527 debugf1(" (InputAddr & ~CSMask)=0x%x "
1528 "(CSBase & ~CSMask)=0x%x\n",
1529 (in_addr & ~cs_mask), (cs_base & ~cs_mask));
1531 if ((in_addr & ~cs_mask) == (cs_base & ~cs_mask)) {
1532 cs_found = f10_process_possible_spare(csrow, cs, pvt);
1534 debugf1(" MATCH csrow=%d\n", cs_found);
1541 /* For a given @dram_range, check if @sys_addr falls within it. */
1542 static int f10_match_to_this_node(struct amd64_pvt *pvt, int dram_range,
1543 u64 sys_addr, int *nid, int *chan_sel)
1545 int node_id, cs_found = -EINVAL, high_range = 0;
1546 u32 intlv_en, intlv_sel, intlv_shift, hole_off;
1547 u32 hole_valid, tmp, dct_sel_base, channel;
1548 u64 dram_base, chan_addr, dct_sel_base_off;
1550 dram_base = pvt->dram_base[dram_range];
1551 intlv_en = pvt->dram_IntlvEn[dram_range];
1553 node_id = pvt->dram_DstNode[dram_range];
1554 intlv_sel = pvt->dram_IntlvSel[dram_range];
1556 debugf1("(dram=%d) Base=0x%llx SystemAddr= 0x%llx Limit=0x%llx\n",
1557 dram_range, dram_base, sys_addr, pvt->dram_limit[dram_range]);
1560 * This assumes that one node's DHAR is the same as all the other
1563 hole_off = (pvt->dhar & 0x0000FF80);
1564 hole_valid = (pvt->dhar & 0x1);
1565 dct_sel_base_off = (pvt->dram_ctl_select_high & 0xFFFFFC00) << 16;
1567 debugf1(" HoleOffset=0x%x HoleValid=0x%x IntlvSel=0x%x\n",
1568 hole_off, hole_valid, intlv_sel);
1571 (intlv_sel != ((sys_addr >> 12) & intlv_en)))
1574 dct_sel_base = dct_sel_baseaddr(pvt);
1577 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
1578 * select between DCT0 and DCT1.
1580 if (dct_high_range_enabled(pvt) &&
1581 !dct_ganging_enabled(pvt) &&
1582 ((sys_addr >> 27) >= (dct_sel_base >> 11)))
1585 channel = f10_determine_channel(pvt, sys_addr, high_range, intlv_en);
1587 chan_addr = f10_get_base_addr_offset(sys_addr, high_range, dct_sel_base,
1588 dct_sel_base_off, hole_valid,
1589 hole_off, dram_base);
1591 intlv_shift = f10_map_intlv_en_to_shift(intlv_en);
1593 /* remove Node ID (in case of memory interleaving) */
1594 tmp = chan_addr & 0xFC0;
1596 chan_addr = ((chan_addr >> intlv_shift) & 0xFFFFFFFFF000ULL) | tmp;
1598 /* remove channel interleave and hash */
1599 if (dct_interleave_enabled(pvt) &&
1600 !dct_high_range_enabled(pvt) &&
1601 !dct_ganging_enabled(pvt)) {
1602 if (dct_sel_interleave_addr(pvt) != 1)
1603 chan_addr = (chan_addr >> 1) & 0xFFFFFFFFFFFFFFC0ULL;
1605 tmp = chan_addr & 0xFC0;
1606 chan_addr = ((chan_addr & 0xFFFFFFFFFFFFC000ULL) >> 1)
1611 debugf1(" (ChannelAddrLong=0x%llx) >> 8 becomes InputAddr=0x%x\n",
1612 chan_addr, (u32)(chan_addr >> 8));
1614 cs_found = f10_lookup_addr_in_dct(chan_addr >> 8, node_id, channel);
1616 if (cs_found >= 0) {
1618 *chan_sel = channel;
1623 static int f10_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr,
1624 int *node, int *chan_sel)
1626 int dram_range, cs_found = -EINVAL;
1627 u64 dram_base, dram_limit;
1629 for (dram_range = 0; dram_range < DRAM_REG_COUNT; dram_range++) {
1631 if (!pvt->dram_rw_en[dram_range])
1634 dram_base = pvt->dram_base[dram_range];
1635 dram_limit = pvt->dram_limit[dram_range];
1637 if ((dram_base <= sys_addr) && (sys_addr <= dram_limit)) {
1639 cs_found = f10_match_to_this_node(pvt, dram_range,
1650 * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
1651 * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
1653 * The @sys_addr is usually an error address received from the hardware
1656 static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
1657 struct err_regs *info,
1660 struct amd64_pvt *pvt = mci->pvt_info;
1662 unsigned short syndrome;
1663 int nid, csrow, chan = 0;
1665 csrow = f10_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan);
1668 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1672 error_address_to_page_and_offset(sys_addr, &page, &offset);
1674 syndrome = HIGH_SYNDROME(info->nbsl) << 8;
1675 syndrome |= LOW_SYNDROME(info->nbsh);
1678 * We need the syndromes for channel detection only when we're
1679 * ganged. Otherwise @chan should already contain the channel at
1682 if (dct_ganging_enabled(pvt) && pvt->nbcfg & K8_NBCFG_CHIPKILL)
1683 chan = get_channel_from_ecc_syndrome(mci, syndrome);
1686 edac_mc_handle_ce(mci, page, offset, syndrome, csrow, chan,
1690 * Channel unknown, report all channels on this CSROW as failed.
1692 for (chan = 0; chan < mci->csrows[csrow].nr_channels; chan++)
1693 edac_mc_handle_ce(mci, page, offset, syndrome,
1694 csrow, chan, EDAC_MOD_STR);
1698 * debug routine to display the memory sizes of all logical DIMMs and its
1701 static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt)
1703 int dimm, size0, size1;
1707 if (boot_cpu_data.x86 == 0xf) {
1708 /* K8 families < revF not supported yet */
1709 if (pvt->ext_model < K8_REV_F)
1715 debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
1716 ctrl, ctrl ? pvt->dbam1 : pvt->dbam0);
1718 dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
1719 dcsb = ctrl ? pvt->dcsb1 : pvt->dcsb0;
1721 edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
1723 /* Dump memory sizes for DIMM and its CSROWs */
1724 for (dimm = 0; dimm < 4; dimm++) {
1727 if (dcsb[dimm*2] & K8_DCSB_CS_ENABLE)
1728 size0 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam));
1731 if (dcsb[dimm*2 + 1] & K8_DCSB_CS_ENABLE)
1732 size1 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam));
1734 edac_printk(KERN_DEBUG, EDAC_MC, " %d: %5dMB %d: %5dMB\n",
1735 dimm * 2, size0, dimm * 2 + 1, size1);
1740 * There currently are 3 types type of MC devices for AMD Athlon/Opterons
1741 * (as per PCI DEVICE_IDs):
1743 * Family K8: That is the Athlon64 and Opteron CPUs. They all have the same PCI
1744 * DEVICE ID, even though there is differences between the different Revisions
1747 * Family F10h and F11h.
1750 static struct amd64_family_type amd64_family_types[] = {
1753 .addr_f1_ctl = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
1754 .misc_f3_ctl = PCI_DEVICE_ID_AMD_K8_NB_MISC,
1756 .early_channel_count = k8_early_channel_count,
1757 .get_error_address = k8_get_error_address,
1758 .read_dram_base_limit = k8_read_dram_base_limit,
1759 .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
1760 .dbam_to_cs = k8_dbam_to_chip_select,
1764 .ctl_name = "Family 10h",
1765 .addr_f1_ctl = PCI_DEVICE_ID_AMD_10H_NB_MAP,
1766 .misc_f3_ctl = PCI_DEVICE_ID_AMD_10H_NB_MISC,
1768 .early_channel_count = f10_early_channel_count,
1769 .get_error_address = f10_get_error_address,
1770 .read_dram_base_limit = f10_read_dram_base_limit,
1771 .read_dram_ctl_register = f10_read_dram_ctl_register,
1772 .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow,
1773 .dbam_to_cs = f10_dbam_to_chip_select,
1777 .ctl_name = "Family 11h",
1778 .addr_f1_ctl = PCI_DEVICE_ID_AMD_11H_NB_MAP,
1779 .misc_f3_ctl = PCI_DEVICE_ID_AMD_11H_NB_MISC,
1781 .early_channel_count = f10_early_channel_count,
1782 .get_error_address = f10_get_error_address,
1783 .read_dram_base_limit = f10_read_dram_base_limit,
1784 .read_dram_ctl_register = f10_read_dram_ctl_register,
1785 .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow,
1786 .dbam_to_cs = f10_dbam_to_chip_select,
1791 static struct pci_dev *pci_get_related_function(unsigned int vendor,
1792 unsigned int device,
1793 struct pci_dev *related)
1795 struct pci_dev *dev = NULL;
1797 dev = pci_get_device(vendor, device, dev);
1799 if ((dev->bus->number == related->bus->number) &&
1800 (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
1802 dev = pci_get_device(vendor, device, dev);
1809 * These are tables of eigenvectors (one per line) which can be used for the
1810 * construction of the syndrome tables. The modified syndrome search algorithm
1811 * uses those to find the symbol in error and thus the DIMM.
1813 * Algorithm courtesy of Ross LaFetra from AMD.
1815 static u16 x4_vectors[] = {
1816 0x2f57, 0x1afe, 0x66cc, 0xdd88,
1817 0x11eb, 0x3396, 0x7f4c, 0xeac8,
1818 0x0001, 0x0002, 0x0004, 0x0008,
1819 0x1013, 0x3032, 0x4044, 0x8088,
1820 0x106b, 0x30d6, 0x70fc, 0xe0a8,
1821 0x4857, 0xc4fe, 0x13cc, 0x3288,
1822 0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
1823 0x1f39, 0x251e, 0xbd6c, 0x6bd8,
1824 0x15c1, 0x2a42, 0x89ac, 0x4758,
1825 0x2b03, 0x1602, 0x4f0c, 0xca08,
1826 0x1f07, 0x3a0e, 0x6b04, 0xbd08,
1827 0x8ba7, 0x465e, 0x244c, 0x1cc8,
1828 0x2b87, 0x164e, 0x642c, 0xdc18,
1829 0x40b9, 0x80de, 0x1094, 0x20e8,
1830 0x27db, 0x1eb6, 0x9dac, 0x7b58,
1831 0x11c1, 0x2242, 0x84ac, 0x4c58,
1832 0x1be5, 0x2d7a, 0x5e34, 0xa718,
1833 0x4b39, 0x8d1e, 0x14b4, 0x28d8,
1834 0x4c97, 0xc87e, 0x11fc, 0x33a8,
1835 0x8e97, 0x497e, 0x2ffc, 0x1aa8,
1836 0x16b3, 0x3d62, 0x4f34, 0x8518,
1837 0x1e2f, 0x391a, 0x5cac, 0xf858,
1838 0x1d9f, 0x3b7a, 0x572c, 0xfe18,
1839 0x15f5, 0x2a5a, 0x5264, 0xa3b8,
1840 0x1dbb, 0x3b66, 0x715c, 0xe3f8,
1841 0x4397, 0xc27e, 0x17fc, 0x3ea8,
1842 0x1617, 0x3d3e, 0x6464, 0xb8b8,
1843 0x23ff, 0x12aa, 0xab6c, 0x56d8,
1844 0x2dfb, 0x1ba6, 0x913c, 0x7328,
1845 0x185d, 0x2ca6, 0x7914, 0x9e28,
1846 0x171b, 0x3e36, 0x7d7c, 0xebe8,
1847 0x4199, 0x82ee, 0x19f4, 0x2e58,
1848 0x4807, 0xc40e, 0x130c, 0x3208,
1849 0x1905, 0x2e0a, 0x5804, 0xac08,
1850 0x213f, 0x132a, 0xadfc, 0x5ba8,
1851 0x19a9, 0x2efe, 0xb5cc, 0x6f88,
1854 static u16 x8_vectors[] = {
1855 0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
1856 0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
1857 0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
1858 0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
1859 0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
1860 0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
1861 0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
1862 0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
1863 0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
1864 0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
1865 0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
1866 0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
1867 0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
1868 0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
1869 0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
1870 0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
1871 0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
1872 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
1873 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
1876 static int decode_syndrome(u16 syndrome, u16 *vectors, int num_vecs,
1879 unsigned int i, err_sym;
1881 for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
1883 int v_idx = err_sym * v_dim;
1884 int v_end = (err_sym + 1) * v_dim;
1886 /* walk over all 16 bits of the syndrome */
1887 for (i = 1; i < (1U << 16); i <<= 1) {
1889 /* if bit is set in that eigenvector... */
1890 if (v_idx < v_end && vectors[v_idx] & i) {
1891 u16 ev_comp = vectors[v_idx++];
1893 /* ... and bit set in the modified syndrome, */
1903 /* can't get to zero, move to next symbol */
1908 debugf0("syndrome(%x) not found\n", syndrome);
1912 static int map_err_sym_to_channel(int err_sym, int sym_size)
1925 return err_sym >> 4;
1931 /* imaginary bits not in a DIMM */
1933 WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n",
1945 return err_sym >> 3;
1951 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
1953 struct amd64_pvt *pvt = mci->pvt_info;
1957 amd64_read_pci_cfg(pvt->misc_f3_ctl, 0x180, &value);
1959 /* F3x180[EccSymbolSize]=1, x8 symbols */
1960 if (boot_cpu_data.x86 == 0x10 &&
1961 boot_cpu_data.x86_model > 7 &&
1963 err_sym = decode_syndrome(syndrome, x8_vectors,
1964 ARRAY_SIZE(x8_vectors), 8);
1965 return map_err_sym_to_channel(err_sym, 8);
1967 err_sym = decode_syndrome(syndrome, x4_vectors,
1968 ARRAY_SIZE(x4_vectors), 4);
1969 return map_err_sym_to_channel(err_sym, 4);
1974 * Check for valid error in the NB Status High register. If so, proceed to read
1975 * NB Status Low, NB Address Low and NB Address High registers and store data
1976 * into error structure.
1979 * - 1: if hardware regs contains valid error info
1980 * - 0: if no valid error is indicated
1982 static int amd64_get_error_info_regs(struct mem_ctl_info *mci,
1983 struct err_regs *regs)
1985 struct amd64_pvt *pvt;
1986 struct pci_dev *misc_f3_ctl;
1988 pvt = mci->pvt_info;
1989 misc_f3_ctl = pvt->misc_f3_ctl;
1991 if (amd64_read_pci_cfg(misc_f3_ctl, K8_NBSH, ®s->nbsh))
1994 if (!(regs->nbsh & K8_NBSH_VALID_BIT))
1997 /* valid error, read remaining error information registers */
1998 if (amd64_read_pci_cfg(misc_f3_ctl, K8_NBSL, ®s->nbsl) ||
1999 amd64_read_pci_cfg(misc_f3_ctl, K8_NBEAL, ®s->nbeal) ||
2000 amd64_read_pci_cfg(misc_f3_ctl, K8_NBEAH, ®s->nbeah) ||
2001 amd64_read_pci_cfg(misc_f3_ctl, K8_NBCFG, ®s->nbcfg))
2008 * This function is called to retrieve the error data from hardware and store it
2009 * in the info structure.
2012 * - 1: if a valid error is found
2013 * - 0: if no error is found
2015 static int amd64_get_error_info(struct mem_ctl_info *mci,
2016 struct err_regs *info)
2018 struct amd64_pvt *pvt;
2019 struct err_regs regs;
2021 pvt = mci->pvt_info;
2023 if (!amd64_get_error_info_regs(mci, info))
2027 * Here's the problem with the K8's EDAC reporting: There are four
2028 * registers which report pieces of error information. They are shared
2029 * between CEs and UEs. Furthermore, contrary to what is stated in the
2030 * BKDG, the overflow bit is never used! Every error always updates the
2031 * reporting registers.
2033 * Can you see the race condition? All four error reporting registers
2034 * must be read before a new error updates them! There is no way to read
2035 * all four registers atomically. The best than can be done is to detect
2036 * that a race has occured and then report the error without any kind of
2039 * What is still positive is that errors are still reported and thus
2040 * problems can still be detected - just not localized because the
2041 * syndrome and address are spread out across registers.
2043 * Grrrrr!!!!! Here's hoping that AMD fixes this in some future K8 rev.
2044 * UEs and CEs should have separate register sets with proper overflow
2045 * bits that are used! At very least the problem can be fixed by
2046 * honoring the ErrValid bit in 'nbsh' and not updating registers - just
2047 * set the overflow bit - unless the current error is CE and the new
2048 * error is UE which would be the only situation for overwriting the
2054 /* Use info from the second read - most current */
2055 if (unlikely(!amd64_get_error_info_regs(mci, info)))
2058 /* clear the error bits in hardware */
2059 pci_write_bits32(pvt->misc_f3_ctl, K8_NBSH, 0, K8_NBSH_VALID_BIT);
2061 /* Check for the possible race condition */
2062 if ((regs.nbsh != info->nbsh) ||
2063 (regs.nbsl != info->nbsl) ||
2064 (regs.nbeah != info->nbeah) ||
2065 (regs.nbeal != info->nbeal)) {
2066 amd64_mc_printk(mci, KERN_WARNING,
2067 "hardware STATUS read access race condition "
2075 * Handle any Correctable Errors (CEs) that have occurred. Check for valid ERROR
2076 * ADDRESS and process.
2078 static void amd64_handle_ce(struct mem_ctl_info *mci,
2079 struct err_regs *info)
2081 struct amd64_pvt *pvt = mci->pvt_info;
2084 /* Ensure that the Error Address is VALID */
2085 if ((info->nbsh & K8_NBSH_VALID_ERROR_ADDR) == 0) {
2086 amd64_mc_printk(mci, KERN_ERR,
2087 "HW has no ERROR_ADDRESS available\n");
2088 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
2092 sys_addr = pvt->ops->get_error_address(mci, info);
2094 amd64_mc_printk(mci, KERN_ERR,
2095 "CE ERROR_ADDRESS= 0x%llx\n", sys_addr);
2097 pvt->ops->map_sysaddr_to_csrow(mci, info, sys_addr);
2100 /* Handle any Un-correctable Errors (UEs) */
2101 static void amd64_handle_ue(struct mem_ctl_info *mci,
2102 struct err_regs *info)
2104 struct amd64_pvt *pvt = mci->pvt_info;
2105 struct mem_ctl_info *log_mci, *src_mci = NULL;
2112 if ((info->nbsh & K8_NBSH_VALID_ERROR_ADDR) == 0) {
2113 amd64_mc_printk(mci, KERN_CRIT,
2114 "HW has no ERROR_ADDRESS available\n");
2115 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
2119 sys_addr = pvt->ops->get_error_address(mci, info);
2122 * Find out which node the error address belongs to. This may be
2123 * different from the node that detected the error.
2125 src_mci = find_mc_by_sys_addr(mci, sys_addr);
2127 amd64_mc_printk(mci, KERN_CRIT,
2128 "ERROR ADDRESS (0x%lx) value NOT mapped to a MC\n",
2129 (unsigned long)sys_addr);
2130 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
2136 csrow = sys_addr_to_csrow(log_mci, sys_addr);
2138 amd64_mc_printk(mci, KERN_CRIT,
2139 "ERROR_ADDRESS (0x%lx) value NOT mapped to 'csrow'\n",
2140 (unsigned long)sys_addr);
2141 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
2143 error_address_to_page_and_offset(sys_addr, &page, &offset);
2144 edac_mc_handle_ue(log_mci, page, offset, csrow, EDAC_MOD_STR);
2148 static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
2149 struct err_regs *info)
2151 u32 ec = ERROR_CODE(info->nbsl);
2152 u32 xec = EXT_ERROR_CODE(info->nbsl);
2153 int ecc_type = (info->nbsh >> 13) & 0x3;
2155 /* Bail early out if this was an 'observed' error */
2156 if (PP(ec) == K8_NBSL_PP_OBS)
2159 /* Do only ECC errors */
2160 if (xec && xec != F10_NBSL_EXT_ERR_ECC)
2164 amd64_handle_ce(mci, info);
2165 else if (ecc_type == 1)
2166 amd64_handle_ue(mci, info);
2169 * If main error is CE then overflow must be CE. If main error is UE
2170 * then overflow is unknown. We'll call the overflow a CE - if
2171 * panic_on_ue is set then we're already panic'ed and won't arrive
2172 * here. Else, then apparently someone doesn't think that UE's are
2175 if (info->nbsh & K8_NBSH_OVERFLOW)
2176 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR "Error Overflow");
2179 void amd64_decode_bus_error(int node_id, struct err_regs *regs)
2181 struct mem_ctl_info *mci = mci_lookup[node_id];
2183 __amd64_decode_bus_error(mci, regs);
2186 * Check the UE bit of the NB status high register, if set generate some
2187 * logs. If NOT a GART error, then process the event as a NO-INFO event.
2188 * If it was a GART error, skip that process.
2190 * FIXME: this should go somewhere else, if at all.
2192 if (regs->nbsh & K8_NBSH_UC_ERR && !report_gart_errors)
2193 edac_mc_handle_ue_no_info(mci, "UE bit is set");
2198 * The main polling 'check' function, called FROM the edac core to perform the
2199 * error checking and if an error is encountered, error processing.
2201 static void amd64_check(struct mem_ctl_info *mci)
2203 struct err_regs regs;
2205 if (amd64_get_error_info(mci, ®s)) {
2206 struct amd64_pvt *pvt = mci->pvt_info;
2207 amd_decode_nb_mce(pvt->mc_node_id, ®s, 1);
2213 * 1) struct amd64_pvt which contains pvt->dram_f2_ctl pointer
2214 * 2) AMD Family index value
2217 * Upon return of 0, the following filled in:
2219 * struct pvt->addr_f1_ctl
2220 * struct pvt->misc_f3_ctl
2222 * Filled in with related device funcitions of 'dram_f2_ctl'
2223 * These devices are "reserved" via the pci_get_device()
2225 * Upon return of 1 (error status):
2229 static int amd64_reserve_mc_sibling_devices(struct amd64_pvt *pvt, int mc_idx)
2231 const struct amd64_family_type *amd64_dev = &amd64_family_types[mc_idx];
2233 /* Reserve the ADDRESS MAP Device */
2234 pvt->addr_f1_ctl = pci_get_related_function(pvt->dram_f2_ctl->vendor,
2235 amd64_dev->addr_f1_ctl,
2238 if (!pvt->addr_f1_ctl) {
2239 amd64_printk(KERN_ERR, "error address map device not found: "
2240 "vendor %x device 0x%x (broken BIOS?)\n",
2241 PCI_VENDOR_ID_AMD, amd64_dev->addr_f1_ctl);
2245 /* Reserve the MISC Device */
2246 pvt->misc_f3_ctl = pci_get_related_function(pvt->dram_f2_ctl->vendor,
2247 amd64_dev->misc_f3_ctl,
2250 if (!pvt->misc_f3_ctl) {
2251 pci_dev_put(pvt->addr_f1_ctl);
2252 pvt->addr_f1_ctl = NULL;
2254 amd64_printk(KERN_ERR, "error miscellaneous device not found: "
2255 "vendor %x device 0x%x (broken BIOS?)\n",
2256 PCI_VENDOR_ID_AMD, amd64_dev->misc_f3_ctl);
2260 debugf1(" Addr Map device PCI Bus ID:\t%s\n",
2261 pci_name(pvt->addr_f1_ctl));
2262 debugf1(" DRAM MEM-CTL PCI Bus ID:\t%s\n",
2263 pci_name(pvt->dram_f2_ctl));
2264 debugf1(" Misc device PCI Bus ID:\t%s\n",
2265 pci_name(pvt->misc_f3_ctl));
2270 static void amd64_free_mc_sibling_devices(struct amd64_pvt *pvt)
2272 pci_dev_put(pvt->addr_f1_ctl);
2273 pci_dev_put(pvt->misc_f3_ctl);
2277 * Retrieve the hardware registers of the memory controller (this includes the
2278 * 'Address Map' and 'Misc' device regs)
2280 static void amd64_read_mc_registers(struct amd64_pvt *pvt)
2286 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
2287 * those are Read-As-Zero
2289 rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
2290 debugf0(" TOP_MEM: 0x%016llx\n", pvt->top_mem);
2292 /* check first whether TOP_MEM2 is enabled */
2293 rdmsrl(MSR_K8_SYSCFG, msr_val);
2294 if (msr_val & (1U << 21)) {
2295 rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
2296 debugf0(" TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
2298 debugf0(" TOP_MEM2 disabled.\n");
2300 amd64_cpu_display_info(pvt);
2302 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCAP, &pvt->nbcap);
2304 if (pvt->ops->read_dram_ctl_register)
2305 pvt->ops->read_dram_ctl_register(pvt);
2307 for (dram = 0; dram < DRAM_REG_COUNT; dram++) {
2309 * Call CPU specific READ function to get the DRAM Base and
2310 * Limit values from the DCT.
2312 pvt->ops->read_dram_base_limit(pvt, dram);
2315 * Only print out debug info on rows with both R and W Enabled.
2316 * Normal processing, compiler should optimize this whole 'if'
2317 * debug output block away.
2319 if (pvt->dram_rw_en[dram] != 0) {
2320 debugf1(" DRAM-BASE[%d]: 0x%016llx "
2321 "DRAM-LIMIT: 0x%016llx\n",
2323 pvt->dram_base[dram],
2324 pvt->dram_limit[dram]);
2326 debugf1(" IntlvEn=%s %s %s "
2327 "IntlvSel=%d DstNode=%d\n",
2328 pvt->dram_IntlvEn[dram] ?
2329 "Enabled" : "Disabled",
2330 (pvt->dram_rw_en[dram] & 0x2) ? "W" : "!W",
2331 (pvt->dram_rw_en[dram] & 0x1) ? "R" : "!R",
2332 pvt->dram_IntlvSel[dram],
2333 pvt->dram_DstNode[dram]);
2337 amd64_read_dct_base_mask(pvt);
2339 amd64_read_pci_cfg(pvt->addr_f1_ctl, K8_DHAR, &pvt->dhar);
2340 amd64_read_dbam_reg(pvt);
2342 amd64_read_pci_cfg(pvt->misc_f3_ctl,
2343 F10_ONLINE_SPARE, &pvt->online_spare);
2345 amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0);
2346 amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCHR_0, &pvt->dchr0);
2348 if (!dct_ganging_enabled(pvt)) {
2349 amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_1, &pvt->dclr1);
2350 amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCHR_1, &pvt->dchr1);
2352 amd64_dump_misc_regs(pvt);
2356 * NOTE: CPU Revision Dependent code
2359 * @csrow_nr ChipSelect Row Number (0..pvt->cs_count-1)
2360 * k8 private pointer to -->
2361 * DRAM Bank Address mapping register
2363 * DCL register where dual_channel_active is
2365 * The DBAM register consists of 4 sets of 4 bits each definitions:
2368 * 0-3 CSROWs 0 and 1
2369 * 4-7 CSROWs 2 and 3
2370 * 8-11 CSROWs 4 and 5
2371 * 12-15 CSROWs 6 and 7
2373 * Values range from: 0 to 15
2374 * The meaning of the values depends on CPU revision and dual-channel state,
2375 * see relevant BKDG more info.
2377 * The memory controller provides for total of only 8 CSROWs in its current
2378 * architecture. Each "pair" of CSROWs normally represents just one DIMM in
2379 * single channel or two (2) DIMMs in dual channel mode.
2381 * The following code logic collapses the various tables for CSROW based on CPU
2385 * The number of PAGE_SIZE pages on the specified CSROW number it
2389 static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt)
2391 u32 cs_mode, nr_pages;
2394 * The math on this doesn't look right on the surface because x/2*4 can
2395 * be simplified to x*2 but this expression makes use of the fact that
2396 * it is integral math where 1/2=0. This intermediate value becomes the
2397 * number of bits to shift the DBAM register to extract the proper CSROW
2400 cs_mode = (pvt->dbam0 >> ((csrow_nr / 2) * 4)) & 0xF;
2402 nr_pages = pvt->ops->dbam_to_cs(pvt, cs_mode) << (20 - PAGE_SHIFT);
2405 * If dual channel then double the memory size of single channel.
2406 * Channel count is 1 or 2
2408 nr_pages <<= (pvt->channel_count - 1);
2410 debugf0(" (csrow=%d) DBAM map index= %d\n", csrow_nr, cs_mode);
2411 debugf0(" nr_pages= %u channel-count = %d\n",
2412 nr_pages, pvt->channel_count);
2418 * Initialize the array of csrow attribute instances, based on the values
2419 * from pci config hardware registers.
2421 static int amd64_init_csrows(struct mem_ctl_info *mci)
2423 struct csrow_info *csrow;
2424 struct amd64_pvt *pvt;
2425 u64 input_addr_min, input_addr_max, sys_addr;
2428 pvt = mci->pvt_info;
2430 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &pvt->nbcfg);
2432 debugf0("NBCFG= 0x%x CHIPKILL= %s DRAM ECC= %s\n", pvt->nbcfg,
2433 (pvt->nbcfg & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
2434 (pvt->nbcfg & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled"
2437 for (i = 0; i < pvt->cs_count; i++) {
2438 csrow = &mci->csrows[i];
2440 if ((pvt->dcsb0[i] & K8_DCSB_CS_ENABLE) == 0) {
2441 debugf1("----CSROW %d EMPTY for node %d\n", i,
2446 debugf1("----CSROW %d VALID for MC node %d\n",
2447 i, pvt->mc_node_id);
2450 csrow->nr_pages = amd64_csrow_nr_pages(i, pvt);
2451 find_csrow_limits(mci, i, &input_addr_min, &input_addr_max);
2452 sys_addr = input_addr_to_sys_addr(mci, input_addr_min);
2453 csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT);
2454 sys_addr = input_addr_to_sys_addr(mci, input_addr_max);
2455 csrow->last_page = (u32) (sys_addr >> PAGE_SHIFT);
2456 csrow->page_mask = ~mask_from_dct_mask(pvt, i);
2457 /* 8 bytes of resolution */
2459 csrow->mtype = amd64_determine_memory_type(pvt);
2461 debugf1(" for MC node %d csrow %d:\n", pvt->mc_node_id, i);
2462 debugf1(" input_addr_min: 0x%lx input_addr_max: 0x%lx\n",
2463 (unsigned long)input_addr_min,
2464 (unsigned long)input_addr_max);
2465 debugf1(" sys_addr: 0x%lx page_mask: 0x%lx\n",
2466 (unsigned long)sys_addr, csrow->page_mask);
2467 debugf1(" nr_pages: %u first_page: 0x%lx "
2468 "last_page: 0x%lx\n",
2469 (unsigned)csrow->nr_pages,
2470 csrow->first_page, csrow->last_page);
2473 * determine whether CHIPKILL or JUST ECC or NO ECC is operating
2475 if (pvt->nbcfg & K8_NBCFG_ECC_ENABLE)
2477 (pvt->nbcfg & K8_NBCFG_CHIPKILL) ?
2478 EDAC_S4ECD4ED : EDAC_SECDED;
2480 csrow->edac_mode = EDAC_NONE;
2486 /* get all cores on this DCT */
2487 static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid)
2491 for_each_online_cpu(cpu)
2492 if (amd_get_nb_id(cpu) == nid)
2493 cpumask_set_cpu(cpu, mask);
2496 /* check MCG_CTL on all the cpus on this node */
2497 static bool amd64_nb_mce_bank_enabled_on_node(int nid)
2503 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
2504 amd64_printk(KERN_WARNING, "%s: error allocating mask\n",
2509 get_cpus_on_this_dct_cpumask(mask, nid);
2511 rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
2513 for_each_cpu(cpu, mask) {
2514 struct msr *reg = per_cpu_ptr(msrs, cpu);
2515 nbe = reg->l & K8_MSR_MCGCTL_NBE;
2517 debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
2519 (nbe ? "enabled" : "disabled"));
2527 free_cpumask_var(mask);
2531 static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on)
2533 cpumask_var_t cmask;
2536 if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
2537 amd64_printk(KERN_WARNING, "%s: error allocating mask\n",
2542 get_cpus_on_this_dct_cpumask(cmask, pvt->mc_node_id);
2544 rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2546 for_each_cpu(cpu, cmask) {
2548 struct msr *reg = per_cpu_ptr(msrs, cpu);
2551 if (reg->l & K8_MSR_MCGCTL_NBE)
2552 pvt->flags.ecc_report = 1;
2554 reg->l |= K8_MSR_MCGCTL_NBE;
2557 * Turn off ECC reporting only when it was off before
2559 if (!pvt->flags.ecc_report)
2560 reg->l &= ~K8_MSR_MCGCTL_NBE;
2563 wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2565 free_cpumask_var(cmask);
2571 * Only if 'ecc_enable_override' is set AND BIOS had ECC disabled, do "we"
2574 static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
2576 struct amd64_pvt *pvt = mci->pvt_info;
2577 u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
2579 if (!ecc_enable_override)
2582 amd64_printk(KERN_WARNING,
2583 "'ecc_enable_override' parameter is active, "
2584 "Enabling AMD ECC hardware now: CAUTION\n");
2586 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCTL, &value);
2588 /* turn on UECCn and CECCEn bits */
2589 pvt->old_nbctl = value & mask;
2590 pvt->nbctl_mcgctl_saved = 1;
2593 pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value);
2595 if (amd64_toggle_ecc_err_reporting(pvt, ON))
2596 amd64_printk(KERN_WARNING, "Error enabling ECC reporting over "
2599 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value);
2601 debugf0("NBCFG(1)= 0x%x CHIPKILL= %s ECC_ENABLE= %s\n", value,
2602 (value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
2603 (value & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled");
2605 if (!(value & K8_NBCFG_ECC_ENABLE)) {
2606 amd64_printk(KERN_WARNING,
2607 "This node reports that DRAM ECC is "
2608 "currently Disabled; ENABLING now\n");
2610 /* Attempt to turn on DRAM ECC Enable */
2611 value |= K8_NBCFG_ECC_ENABLE;
2612 pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCFG, value);
2614 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value);
2616 if (!(value & K8_NBCFG_ECC_ENABLE)) {
2617 amd64_printk(KERN_WARNING,
2618 "Hardware rejects Enabling DRAM ECC checking\n"
2619 "Check memory DIMM configuration\n");
2621 amd64_printk(KERN_DEBUG,
2622 "Hardware accepted DRAM ECC Enable\n");
2625 debugf0("NBCFG(2)= 0x%x CHIPKILL= %s ECC_ENABLE= %s\n", value,
2626 (value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
2627 (value & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled");
2629 pvt->ctl_error_info.nbcfg = value;
2632 static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt)
2634 u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
2636 if (!pvt->nbctl_mcgctl_saved)
2639 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCTL, &value);
2641 value |= pvt->old_nbctl;
2643 /* restore the NB Enable MCGCTL bit */
2644 pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value);
2646 if (amd64_toggle_ecc_err_reporting(pvt, OFF))
2647 amd64_printk(KERN_WARNING, "Error restoring ECC reporting over "
2652 * EDAC requires that the BIOS have ECC enabled before taking over the
2653 * processing of ECC errors. This is because the BIOS can properly initialize
2654 * the memory system completely. A command line option allows to force-enable
2655 * hardware ECC later in amd64_enable_ecc_error_reporting().
2657 static const char *ecc_warning =
2658 "WARNING: ECC is disabled by BIOS. Module will NOT be loaded.\n"
2659 " Either Enable ECC in the BIOS, or set 'ecc_enable_override'.\n"
2660 " Also, use of the override can cause unknown side effects.\n";
2662 static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
2666 bool nb_mce_en = false;
2668 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value);
2670 ecc_enabled = !!(value & K8_NBCFG_ECC_ENABLE);
2672 amd64_printk(KERN_WARNING, "This node reports that Memory ECC "
2673 "is currently disabled, set F3x%x[22] (%s).\n",
2674 K8_NBCFG, pci_name(pvt->misc_f3_ctl));
2676 amd64_printk(KERN_INFO, "ECC is enabled by BIOS.\n");
2678 nb_mce_en = amd64_nb_mce_bank_enabled_on_node(pvt->mc_node_id);
2680 amd64_printk(KERN_WARNING, "NB MCE bank disabled, set MSR "
2681 "0x%08x[4] on node %d to enable.\n",
2682 MSR_IA32_MCG_CTL, pvt->mc_node_id);
2684 if (!ecc_enabled || !nb_mce_en) {
2685 if (!ecc_enable_override) {
2686 amd64_printk(KERN_WARNING, "%s", ecc_warning);
2690 /* CLEAR the override, since BIOS controlled it */
2691 ecc_enable_override = 0;
2696 struct mcidev_sysfs_attribute sysfs_attrs[ARRAY_SIZE(amd64_dbg_attrs) +
2697 ARRAY_SIZE(amd64_inj_attrs) +
2700 struct mcidev_sysfs_attribute terminator = { .attr = { .name = NULL } };
2702 static void amd64_set_mc_sysfs_attributes(struct mem_ctl_info *mci)
2704 unsigned int i = 0, j = 0;
2706 for (; i < ARRAY_SIZE(amd64_dbg_attrs); i++)
2707 sysfs_attrs[i] = amd64_dbg_attrs[i];
2709 for (j = 0; j < ARRAY_SIZE(amd64_inj_attrs); j++, i++)
2710 sysfs_attrs[i] = amd64_inj_attrs[j];
2712 sysfs_attrs[i] = terminator;
2714 mci->mc_driver_sysfs_attributes = sysfs_attrs;
2717 static void amd64_setup_mci_misc_attributes(struct mem_ctl_info *mci)
2719 struct amd64_pvt *pvt = mci->pvt_info;
2721 mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
2722 mci->edac_ctl_cap = EDAC_FLAG_NONE;
2724 if (pvt->nbcap & K8_NBCAP_SECDED)
2725 mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
2727 if (pvt->nbcap & K8_NBCAP_CHIPKILL)
2728 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
2730 mci->edac_cap = amd64_determine_edac_cap(pvt);
2731 mci->mod_name = EDAC_MOD_STR;
2732 mci->mod_ver = EDAC_AMD64_VERSION;
2733 mci->ctl_name = get_amd_family_name(pvt->mc_type_index);
2734 mci->dev_name = pci_name(pvt->dram_f2_ctl);
2735 mci->ctl_page_to_phys = NULL;
2737 /* IMPORTANT: Set the polling 'check' function in this module */
2738 mci->edac_check = amd64_check;
2740 /* memory scrubber interface */
2741 mci->set_sdram_scrub_rate = amd64_set_scrub_rate;
2742 mci->get_sdram_scrub_rate = amd64_get_scrub_rate;
2746 * Init stuff for this DRAM Controller device.
2748 * Due to a hardware feature on Fam10h CPUs, the Enable Extended Configuration
2749 * Space feature MUST be enabled on ALL Processors prior to actually reading
2750 * from the ECS registers. Since the loading of the module can occur on any
2751 * 'core', and cores don't 'see' all the other processors ECS data when the
2752 * others are NOT enabled. Our solution is to first enable ECS access in this
2753 * routine on all processors, gather some data in a amd64_pvt structure and
2754 * later come back in a finish-setup function to perform that final
2755 * initialization. See also amd64_init_2nd_stage() for that.
2757 static int amd64_probe_one_instance(struct pci_dev *dram_f2_ctl,
2760 struct amd64_pvt *pvt = NULL;
2764 pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
2768 pvt->mc_node_id = get_node_id(dram_f2_ctl);
2770 pvt->dram_f2_ctl = dram_f2_ctl;
2771 pvt->ext_model = boot_cpu_data.x86_model >> 4;
2772 pvt->mc_type_index = mc_type_index;
2773 pvt->ops = family_ops(mc_type_index);
2776 * We have the dram_f2_ctl device as an argument, now go reserve its
2777 * sibling devices from the PCI system.
2780 err = amd64_reserve_mc_sibling_devices(pvt, mc_type_index);
2785 err = amd64_check_ecc_enabled(pvt);
2790 * Key operation here: setup of HW prior to performing ops on it. Some
2791 * setup is required to access ECS data. After this is performed, the
2792 * 'teardown' function must be called upon error and normal exit paths.
2794 if (boot_cpu_data.x86 >= 0x10)
2798 * Save the pointer to the private data for use in 2nd initialization
2801 pvt_lookup[pvt->mc_node_id] = pvt;
2806 amd64_free_mc_sibling_devices(pvt);
2816 * This is the finishing stage of the init code. Needs to be performed after all
2817 * MCs' hardware have been prepped for accessing extended config space.
2819 static int amd64_init_2nd_stage(struct amd64_pvt *pvt)
2821 int node_id = pvt->mc_node_id;
2822 struct mem_ctl_info *mci;
2825 amd64_read_mc_registers(pvt);
2828 * We need to determine how many memory channels there are. Then use
2829 * that information for calculating the size of the dynamic instance
2830 * tables in the 'mci' structure
2832 pvt->channel_count = pvt->ops->early_channel_count(pvt);
2833 if (pvt->channel_count < 0)
2837 mci = edac_mc_alloc(0, pvt->cs_count, pvt->channel_count, node_id);
2841 mci->pvt_info = pvt;
2843 mci->dev = &pvt->dram_f2_ctl->dev;
2844 amd64_setup_mci_misc_attributes(mci);
2846 if (amd64_init_csrows(mci))
2847 mci->edac_cap = EDAC_FLAG_NONE;
2849 amd64_enable_ecc_error_reporting(mci);
2850 amd64_set_mc_sysfs_attributes(mci);
2853 if (edac_mc_add_mc(mci)) {
2854 debugf1("failed edac_mc_add_mc()\n");
2858 mci_lookup[node_id] = mci;
2859 pvt_lookup[node_id] = NULL;
2861 /* register stuff with EDAC MCE */
2862 if (report_gart_errors)
2863 amd_report_gart_errors(true);
2865 amd_register_ecc_decoder(amd64_decode_bus_error);
2873 debugf0("failure to init 2nd stage: ret=%d\n", ret);
2875 amd64_restore_ecc_error_reporting(pvt);
2877 if (boot_cpu_data.x86 > 0xf)
2878 amd64_teardown(pvt);
2880 amd64_free_mc_sibling_devices(pvt);
2882 kfree(pvt_lookup[pvt->mc_node_id]);
2883 pvt_lookup[node_id] = NULL;
2889 static int __devinit amd64_init_one_instance(struct pci_dev *pdev,
2890 const struct pci_device_id *mc_type)
2894 debugf0("(MC node=%d,mc_type='%s')\n", get_node_id(pdev),
2895 get_amd_family_name(mc_type->driver_data));
2897 ret = pci_enable_device(pdev);
2901 ret = amd64_probe_one_instance(pdev, mc_type->driver_data);
2904 debugf0("ret=%d\n", ret);
2909 static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
2911 struct mem_ctl_info *mci;
2912 struct amd64_pvt *pvt;
2914 /* Remove from EDAC CORE tracking list */
2915 mci = edac_mc_del_mc(&pdev->dev);
2919 pvt = mci->pvt_info;
2921 amd64_restore_ecc_error_reporting(pvt);
2923 if (boot_cpu_data.x86 > 0xf)
2924 amd64_teardown(pvt);
2926 amd64_free_mc_sibling_devices(pvt);
2929 mci->pvt_info = NULL;
2931 mci_lookup[pvt->mc_node_id] = NULL;
2933 /* unregister from EDAC MCE */
2934 amd_report_gart_errors(false);
2935 amd_unregister_ecc_decoder(amd64_decode_bus_error);
2937 /* Free the EDAC CORE resources */
2942 * This table is part of the interface for loading drivers for PCI devices. The
2943 * PCI core identifies what devices are on a system during boot, and then
2944 * inquiry this table to see if this driver is for a given device found.
2946 static const struct pci_device_id amd64_pci_table[] __devinitdata = {
2948 .vendor = PCI_VENDOR_ID_AMD,
2949 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
2950 .subvendor = PCI_ANY_ID,
2951 .subdevice = PCI_ANY_ID,
2954 .driver_data = K8_CPUS
2957 .vendor = PCI_VENDOR_ID_AMD,
2958 .device = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
2959 .subvendor = PCI_ANY_ID,
2960 .subdevice = PCI_ANY_ID,
2963 .driver_data = F10_CPUS
2966 .vendor = PCI_VENDOR_ID_AMD,
2967 .device = PCI_DEVICE_ID_AMD_11H_NB_DRAM,
2968 .subvendor = PCI_ANY_ID,
2969 .subdevice = PCI_ANY_ID,
2972 .driver_data = F11_CPUS
2976 MODULE_DEVICE_TABLE(pci, amd64_pci_table);
2978 static struct pci_driver amd64_pci_driver = {
2979 .name = EDAC_MOD_STR,
2980 .probe = amd64_init_one_instance,
2981 .remove = __devexit_p(amd64_remove_one_instance),
2982 .id_table = amd64_pci_table,
2985 static void amd64_setup_pci_device(void)
2987 struct mem_ctl_info *mci;
2988 struct amd64_pvt *pvt;
2993 mci = mci_lookup[0];
2996 pvt = mci->pvt_info;
2998 edac_pci_create_generic_ctl(&pvt->dram_f2_ctl->dev,
3001 if (!amd64_ctl_pci) {
3002 pr_warning("%s(): Unable to create PCI control\n",
3005 pr_warning("%s(): PCI error report via EDAC not set\n",
3011 static int __init amd64_edac_init(void)
3013 int nb, err = -ENODEV;
3015 edac_printk(KERN_INFO, EDAC_MOD_STR, EDAC_AMD64_VERSION "\n");
3019 if (cache_k8_northbridges() < 0)
3022 msrs = msrs_alloc();
3024 err = pci_register_driver(&amd64_pci_driver);
3029 * At this point, the array 'pvt_lookup[]' contains pointers to alloc'd
3030 * amd64_pvt structs. These will be used in the 2nd stage init function
3031 * to finish initialization of the MC instances.
3033 for (nb = 0; nb < num_k8_northbridges; nb++) {
3034 if (!pvt_lookup[nb])
3037 err = amd64_init_2nd_stage(pvt_lookup[nb]);
3042 amd64_setup_pci_device();
3047 debugf0("2nd stage failed\n");
3048 pci_unregister_driver(&amd64_pci_driver);
3053 static void __exit amd64_edac_exit(void)
3056 edac_pci_release_generic_ctl(amd64_ctl_pci);
3058 pci_unregister_driver(&amd64_pci_driver);
3064 module_init(amd64_edac_init);
3065 module_exit(amd64_edac_exit);
3067 MODULE_LICENSE("GPL");
3068 MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
3069 "Dave Peterson, Thayne Harbaugh");
3070 MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
3071 EDAC_AMD64_VERSION);
3073 module_param(edac_op_state, int, 0444);
3074 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");