1 #include "amd64_edac.h"
2 #include <asm/amd_nb.h>
4 static struct edac_pci_ctl_info
*pci_ctl
;
6 static int report_gart_errors
;
7 module_param(report_gart_errors
, int, 0644);
10 * Set by command line parameter. If BIOS has enabled the ECC, this override is
11 * cleared to prevent re-enabling the hardware by this driver.
13 static int ecc_enable_override
;
14 module_param(ecc_enable_override
, int, 0644);
16 static struct msr __percpu
*msrs
;
19 static struct ecc_settings
**ecc_stngs
;
22 * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
23 * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
26 *FIXME: Produce a better mapping/linearisation.
28 static const struct scrubrate
{
29 u32 scrubval
; /* bit pattern for scrub rate */
30 u32 bandwidth
; /* bandwidth consumed (bytes/sec) */
32 { 0x01, 1600000000UL},
54 { 0x00, 0UL}, /* scrubbing off */
57 int __amd64_read_pci_cfg_dword(struct pci_dev
*pdev
, int offset
,
58 u32
*val
, const char *func
)
62 err
= pci_read_config_dword(pdev
, offset
, val
);
64 amd64_warn("%s: error reading F%dx%03x.\n",
65 func
, PCI_FUNC(pdev
->devfn
), offset
);
70 int __amd64_write_pci_cfg_dword(struct pci_dev
*pdev
, int offset
,
71 u32 val
, const char *func
)
75 err
= pci_write_config_dword(pdev
, offset
, val
);
77 amd64_warn("%s: error writing to F%dx%03x.\n",
78 func
, PCI_FUNC(pdev
->devfn
), offset
);
84 * Select DCT to which PCI cfg accesses are routed
86 static void f15h_select_dct(struct amd64_pvt
*pvt
, u8 dct
)
90 amd64_read_pci_cfg(pvt
->F1
, DCT_CFG_SEL
, ®
);
91 reg
&= (pvt
->model
== 0x30) ? ~3 : ~1;
93 amd64_write_pci_cfg(pvt
->F1
, DCT_CFG_SEL
, reg
);
98 * Depending on the family, F2 DCT reads need special handling:
100 * K8: has a single DCT only and no address offsets >= 0x100
102 * F10h: each DCT has its own set of regs
106 * F16h: has only 1 DCT
108 * F15h: we select which DCT we access using F1x10C[DctCfgSel]
110 static inline int amd64_read_dct_pci_cfg(struct amd64_pvt
*pvt
, u8 dct
,
111 int offset
, u32
*val
)
115 if (dct
|| offset
>= 0x100)
122 * Note: If ganging is enabled, barring the regs
123 * F2x[1,0]98 and F2x[1,0]9C; reads reads to F2x1xx
124 * return 0. (cf. Section 2.8.1 F10h BKDG)
126 if (dct_ganging_enabled(pvt
))
135 * F15h: F2x1xx addresses do not map explicitly to DCT1.
136 * We should select which DCT we access using F1x10C[DctCfgSel]
138 dct
= (dct
&& pvt
->model
== 0x30) ? 3 : dct
;
139 f15h_select_dct(pvt
, dct
);
150 return amd64_read_pci_cfg(pvt
->F2
, offset
, val
);
154 * Memory scrubber control interface. For K8, memory scrubbing is handled by
155 * hardware and can involve L2 cache, dcache as well as the main memory. With
156 * F10, this is extended to L3 cache scrubbing on CPU models sporting that
159 * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
160 * (dram) over to cache lines. This is nasty, so we will use bandwidth in
161 * bytes/sec for the setting.
163 * Currently, we only do dram scrubbing. If the scrubbing is done in software on
164 * other archs, we might not have access to the caches directly.
167 static inline void __f17h_set_scrubval(struct amd64_pvt
*pvt
, u32 scrubval
)
170 * Fam17h supports scrub values between 0x5 and 0x14. Also, the values
171 * are shifted down by 0x5, so scrubval 0x5 is written to the register
172 * as 0x0, scrubval 0x6 as 0x1, etc.
174 if (scrubval
>= 0x5 && scrubval
<= 0x14) {
176 pci_write_bits32(pvt
->F6
, F17H_SCR_LIMIT_ADDR
, scrubval
, 0xF);
177 pci_write_bits32(pvt
->F6
, F17H_SCR_BASE_ADDR
, 1, 0x1);
179 pci_write_bits32(pvt
->F6
, F17H_SCR_BASE_ADDR
, 0, 0x1);
183 * Scan the scrub rate mapping table for a close or matching bandwidth value to
184 * issue. If requested is too big, then use last maximum value found.
186 static int __set_scrub_rate(struct amd64_pvt
*pvt
, u32 new_bw
, u32 min_rate
)
192 * map the configured rate (new_bw) to a value specific to the AMD64
193 * memory controller and apply to register. Search for the first
194 * bandwidth entry that is greater or equal than the setting requested
195 * and program that. If at last entry, turn off DRAM scrubbing.
197 * If no suitable bandwidth is found, turn off DRAM scrubbing entirely
198 * by falling back to the last element in scrubrates[].
200 for (i
= 0; i
< ARRAY_SIZE(scrubrates
) - 1; i
++) {
202 * skip scrub rates which aren't recommended
203 * (see F10 BKDG, F3x58)
205 if (scrubrates
[i
].scrubval
< min_rate
)
208 if (scrubrates
[i
].bandwidth
<= new_bw
)
212 scrubval
= scrubrates
[i
].scrubval
;
214 if (pvt
->fam
== 0x17) {
215 __f17h_set_scrubval(pvt
, scrubval
);
216 } else if (pvt
->fam
== 0x15 && pvt
->model
== 0x60) {
217 f15h_select_dct(pvt
, 0);
218 pci_write_bits32(pvt
->F2
, F15H_M60H_SCRCTRL
, scrubval
, 0x001F);
219 f15h_select_dct(pvt
, 1);
220 pci_write_bits32(pvt
->F2
, F15H_M60H_SCRCTRL
, scrubval
, 0x001F);
222 pci_write_bits32(pvt
->F3
, SCRCTRL
, scrubval
, 0x001F);
226 return scrubrates
[i
].bandwidth
;
231 static int set_scrub_rate(struct mem_ctl_info
*mci
, u32 bw
)
233 struct amd64_pvt
*pvt
= mci
->pvt_info
;
234 u32 min_scrubrate
= 0x5;
239 if (pvt
->fam
== 0x15) {
241 if (pvt
->model
< 0x10)
242 f15h_select_dct(pvt
, 0);
244 if (pvt
->model
== 0x60)
247 return __set_scrub_rate(pvt
, bw
, min_scrubrate
);
250 static int get_scrub_rate(struct mem_ctl_info
*mci
)
252 struct amd64_pvt
*pvt
= mci
->pvt_info
;
253 int i
, retval
= -EINVAL
;
259 if (pvt
->model
< 0x10)
260 f15h_select_dct(pvt
, 0);
262 if (pvt
->model
== 0x60)
263 amd64_read_pci_cfg(pvt
->F2
, F15H_M60H_SCRCTRL
, &scrubval
);
267 amd64_read_pci_cfg(pvt
->F6
, F17H_SCR_BASE_ADDR
, &scrubval
);
268 if (scrubval
& BIT(0)) {
269 amd64_read_pci_cfg(pvt
->F6
, F17H_SCR_LIMIT_ADDR
, &scrubval
);
278 amd64_read_pci_cfg(pvt
->F3
, SCRCTRL
, &scrubval
);
282 scrubval
= scrubval
& 0x001F;
284 for (i
= 0; i
< ARRAY_SIZE(scrubrates
); i
++) {
285 if (scrubrates
[i
].scrubval
== scrubval
) {
286 retval
= scrubrates
[i
].bandwidth
;
294 * returns true if the SysAddr given by sys_addr matches the
295 * DRAM base/limit associated with node_id
297 static bool base_limit_match(struct amd64_pvt
*pvt
, u64 sys_addr
, u8 nid
)
301 /* The K8 treats this as a 40-bit value. However, bits 63-40 will be
302 * all ones if the most significant implemented address bit is 1.
303 * Here we discard bits 63-40. See section 3.4.2 of AMD publication
304 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
305 * Application Programming.
307 addr
= sys_addr
& 0x000000ffffffffffull
;
309 return ((addr
>= get_dram_base(pvt
, nid
)) &&
310 (addr
<= get_dram_limit(pvt
, nid
)));
314 * Attempt to map a SysAddr to a node. On success, return a pointer to the
315 * mem_ctl_info structure for the node that the SysAddr maps to.
317 * On failure, return NULL.
319 static struct mem_ctl_info
*find_mc_by_sys_addr(struct mem_ctl_info
*mci
,
322 struct amd64_pvt
*pvt
;
327 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
328 * 3.4.4.2) registers to map the SysAddr to a node ID.
333 * The value of this field should be the same for all DRAM Base
334 * registers. Therefore we arbitrarily choose to read it from the
335 * register for node 0.
337 intlv_en
= dram_intlv_en(pvt
, 0);
340 for (node_id
= 0; node_id
< DRAM_RANGES
; node_id
++) {
341 if (base_limit_match(pvt
, sys_addr
, node_id
))
347 if (unlikely((intlv_en
!= 0x01) &&
348 (intlv_en
!= 0x03) &&
349 (intlv_en
!= 0x07))) {
350 amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en
);
354 bits
= (((u32
) sys_addr
) >> 12) & intlv_en
;
356 for (node_id
= 0; ; ) {
357 if ((dram_intlv_sel(pvt
, node_id
) & intlv_en
) == bits
)
358 break; /* intlv_sel field matches */
360 if (++node_id
>= DRAM_RANGES
)
364 /* sanity test for sys_addr */
365 if (unlikely(!base_limit_match(pvt
, sys_addr
, node_id
))) {
366 amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
367 "range for node %d with node interleaving enabled.\n",
368 __func__
, sys_addr
, node_id
);
373 return edac_mc_find((int)node_id
);
376 edac_dbg(2, "sys_addr 0x%lx doesn't match any node\n",
377 (unsigned long)sys_addr
);
383 * compute the CS base address of the @csrow on the DRAM controller @dct.
384 * For details see F2x[5C:40] in the processor's BKDG
386 static void get_cs_base_and_mask(struct amd64_pvt
*pvt
, int csrow
, u8 dct
,
387 u64
*base
, u64
*mask
)
389 u64 csbase
, csmask
, base_bits
, mask_bits
;
392 if (pvt
->fam
== 0xf && pvt
->ext_model
< K8_REV_F
) {
393 csbase
= pvt
->csels
[dct
].csbases
[csrow
];
394 csmask
= pvt
->csels
[dct
].csmasks
[csrow
];
395 base_bits
= GENMASK_ULL(31, 21) | GENMASK_ULL(15, 9);
396 mask_bits
= GENMASK_ULL(29, 21) | GENMASK_ULL(15, 9);
400 * F16h and F15h, models 30h and later need two addr_shift values:
401 * 8 for high and 6 for low (cf. F16h BKDG).
403 } else if (pvt
->fam
== 0x16 ||
404 (pvt
->fam
== 0x15 && pvt
->model
>= 0x30)) {
405 csbase
= pvt
->csels
[dct
].csbases
[csrow
];
406 csmask
= pvt
->csels
[dct
].csmasks
[csrow
>> 1];
408 *base
= (csbase
& GENMASK_ULL(15, 5)) << 6;
409 *base
|= (csbase
& GENMASK_ULL(30, 19)) << 8;
412 /* poke holes for the csmask */
413 *mask
&= ~((GENMASK_ULL(15, 5) << 6) |
414 (GENMASK_ULL(30, 19) << 8));
416 *mask
|= (csmask
& GENMASK_ULL(15, 5)) << 6;
417 *mask
|= (csmask
& GENMASK_ULL(30, 19)) << 8;
421 csbase
= pvt
->csels
[dct
].csbases
[csrow
];
422 csmask
= pvt
->csels
[dct
].csmasks
[csrow
>> 1];
425 if (pvt
->fam
== 0x15)
426 base_bits
= mask_bits
=
427 GENMASK_ULL(30,19) | GENMASK_ULL(13,5);
429 base_bits
= mask_bits
=
430 GENMASK_ULL(28,19) | GENMASK_ULL(13,5);
433 *base
= (csbase
& base_bits
) << addr_shift
;
436 /* poke holes for the csmask */
437 *mask
&= ~(mask_bits
<< addr_shift
);
439 *mask
|= (csmask
& mask_bits
) << addr_shift
;
442 #define for_each_chip_select(i, dct, pvt) \
443 for (i = 0; i < pvt->csels[dct].b_cnt; i++)
445 #define chip_select_base(i, dct, pvt) \
446 pvt->csels[dct].csbases[i]
448 #define for_each_chip_select_mask(i, dct, pvt) \
449 for (i = 0; i < pvt->csels[dct].m_cnt; i++)
452 * @input_addr is an InputAddr associated with the node given by mci. Return the
453 * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
455 static int input_addr_to_csrow(struct mem_ctl_info
*mci
, u64 input_addr
)
457 struct amd64_pvt
*pvt
;
463 for_each_chip_select(csrow
, 0, pvt
) {
464 if (!csrow_enabled(csrow
, 0, pvt
))
467 get_cs_base_and_mask(pvt
, csrow
, 0, &base
, &mask
);
471 if ((input_addr
& mask
) == (base
& mask
)) {
472 edac_dbg(2, "InputAddr 0x%lx matches csrow %d (node %d)\n",
473 (unsigned long)input_addr
, csrow
,
479 edac_dbg(2, "no matching csrow for InputAddr 0x%lx (MC node %d)\n",
480 (unsigned long)input_addr
, pvt
->mc_node_id
);
486 * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
487 * for the node represented by mci. Info is passed back in *hole_base,
488 * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if
489 * info is invalid. Info may be invalid for either of the following reasons:
491 * - The revision of the node is not E or greater. In this case, the DRAM Hole
492 * Address Register does not exist.
494 * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
495 * indicating that its contents are not valid.
497 * The values passed back in *hole_base, *hole_offset, and *hole_size are
498 * complete 32-bit values despite the fact that the bitfields in the DHAR
499 * only represent bits 31-24 of the base and offset values.
501 int amd64_get_dram_hole_info(struct mem_ctl_info
*mci
, u64
*hole_base
,
502 u64
*hole_offset
, u64
*hole_size
)
504 struct amd64_pvt
*pvt
= mci
->pvt_info
;
506 /* only revE and later have the DRAM Hole Address Register */
507 if (pvt
->fam
== 0xf && pvt
->ext_model
< K8_REV_E
) {
508 edac_dbg(1, " revision %d for node %d does not support DHAR\n",
509 pvt
->ext_model
, pvt
->mc_node_id
);
513 /* valid for Fam10h and above */
514 if (pvt
->fam
>= 0x10 && !dhar_mem_hoist_valid(pvt
)) {
515 edac_dbg(1, " Dram Memory Hoisting is DISABLED on this system\n");
519 if (!dhar_valid(pvt
)) {
520 edac_dbg(1, " Dram Memory Hoisting is DISABLED on this node %d\n",
525 /* This node has Memory Hoisting */
527 /* +------------------+--------------------+--------------------+-----
528 * | memory | DRAM hole | relocated |
529 * | [0, (x - 1)] | [x, 0xffffffff] | addresses from |
531 * | | | [0x100000000, |
532 * | | | (0x100000000+ |
533 * | | | (0xffffffff-x))] |
534 * +------------------+--------------------+--------------------+-----
536 * Above is a diagram of physical memory showing the DRAM hole and the
537 * relocated addresses from the DRAM hole. As shown, the DRAM hole
538 * starts at address x (the base address) and extends through address
539 * 0xffffffff. The DRAM Hole Address Register (DHAR) relocates the
540 * addresses in the hole so that they start at 0x100000000.
543 *hole_base
= dhar_base(pvt
);
544 *hole_size
= (1ULL << 32) - *hole_base
;
546 *hole_offset
= (pvt
->fam
> 0xf) ? f10_dhar_offset(pvt
)
547 : k8_dhar_offset(pvt
);
549 edac_dbg(1, " DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
550 pvt
->mc_node_id
, (unsigned long)*hole_base
,
551 (unsigned long)*hole_offset
, (unsigned long)*hole_size
);
555 EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info
);
558 * Return the DramAddr that the SysAddr given by @sys_addr maps to. It is
559 * assumed that sys_addr maps to the node given by mci.
561 * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
562 * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
563 * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
564 * then it is also involved in translating a SysAddr to a DramAddr. Sections
565 * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
566 * These parts of the documentation are unclear. I interpret them as follows:
568 * When node n receives a SysAddr, it processes the SysAddr as follows:
570 * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
571 * Limit registers for node n. If the SysAddr is not within the range
572 * specified by the base and limit values, then node n ignores the Sysaddr
573 * (since it does not map to node n). Otherwise continue to step 2 below.
575 * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
576 * disabled so skip to step 3 below. Otherwise see if the SysAddr is within
577 * the range of relocated addresses (starting at 0x100000000) from the DRAM
578 * hole. If not, skip to step 3 below. Else get the value of the
579 * DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
580 * offset defined by this value from the SysAddr.
582 * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
583 * Base register for node n. To obtain the DramAddr, subtract the base
584 * address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
586 static u64
sys_addr_to_dram_addr(struct mem_ctl_info
*mci
, u64 sys_addr
)
588 struct amd64_pvt
*pvt
= mci
->pvt_info
;
589 u64 dram_base
, hole_base
, hole_offset
, hole_size
, dram_addr
;
592 dram_base
= get_dram_base(pvt
, pvt
->mc_node_id
);
594 ret
= amd64_get_dram_hole_info(mci
, &hole_base
, &hole_offset
,
597 if ((sys_addr
>= (1ULL << 32)) &&
598 (sys_addr
< ((1ULL << 32) + hole_size
))) {
599 /* use DHAR to translate SysAddr to DramAddr */
600 dram_addr
= sys_addr
- hole_offset
;
602 edac_dbg(2, "using DHAR to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
603 (unsigned long)sys_addr
,
604 (unsigned long)dram_addr
);
611 * Translate the SysAddr to a DramAddr as shown near the start of
612 * section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8
613 * only deals with 40-bit values. Therefore we discard bits 63-40 of
614 * sys_addr below. If bit 39 of sys_addr is 1 then the bits we
615 * discard are all 1s. Otherwise the bits we discard are all 0s. See
616 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
617 * Programmer's Manual Volume 1 Application Programming.
619 dram_addr
= (sys_addr
& GENMASK_ULL(39, 0)) - dram_base
;
621 edac_dbg(2, "using DRAM Base register to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
622 (unsigned long)sys_addr
, (unsigned long)dram_addr
);
627 * @intlv_en is the value of the IntlvEn field from a DRAM Base register
628 * (section 3.4.4.1). Return the number of bits from a SysAddr that are used
629 * for node interleaving.
631 static int num_node_interleave_bits(unsigned intlv_en
)
633 static const int intlv_shift_table
[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
636 BUG_ON(intlv_en
> 7);
637 n
= intlv_shift_table
[intlv_en
];
641 /* Translate the DramAddr given by @dram_addr to an InputAddr. */
642 static u64
dram_addr_to_input_addr(struct mem_ctl_info
*mci
, u64 dram_addr
)
644 struct amd64_pvt
*pvt
;
651 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
652 * concerning translating a DramAddr to an InputAddr.
654 intlv_shift
= num_node_interleave_bits(dram_intlv_en(pvt
, 0));
655 input_addr
= ((dram_addr
>> intlv_shift
) & GENMASK_ULL(35, 12)) +
658 edac_dbg(2, " Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
659 intlv_shift
, (unsigned long)dram_addr
,
660 (unsigned long)input_addr
);
666 * Translate the SysAddr represented by @sys_addr to an InputAddr. It is
667 * assumed that @sys_addr maps to the node given by mci.
669 static u64
sys_addr_to_input_addr(struct mem_ctl_info
*mci
, u64 sys_addr
)
674 dram_addr_to_input_addr(mci
, sys_addr_to_dram_addr(mci
, sys_addr
));
676 edac_dbg(2, "SysAddr 0x%lx translates to InputAddr 0x%lx\n",
677 (unsigned long)sys_addr
, (unsigned long)input_addr
);
682 /* Map the Error address to a PAGE and PAGE OFFSET. */
683 static inline void error_address_to_page_and_offset(u64 error_address
,
684 struct err_info
*err
)
686 err
->page
= (u32
) (error_address
>> PAGE_SHIFT
);
687 err
->offset
= ((u32
) error_address
) & ~PAGE_MASK
;
691 * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
692 * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
693 * of a node that detected an ECC memory error. mci represents the node that
694 * the error address maps to (possibly different from the node that detected
695 * the error). Return the number of the csrow that sys_addr maps to, or -1 on
698 static int sys_addr_to_csrow(struct mem_ctl_info
*mci
, u64 sys_addr
)
702 csrow
= input_addr_to_csrow(mci
, sys_addr_to_input_addr(mci
, sys_addr
));
705 amd64_mc_err(mci
, "Failed to translate InputAddr to csrow for "
706 "address 0x%lx\n", (unsigned long)sys_addr
);
710 static int get_channel_from_ecc_syndrome(struct mem_ctl_info
*, u16
);
713 * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
716 static unsigned long determine_edac_cap(struct amd64_pvt
*pvt
)
718 unsigned long edac_cap
= EDAC_FLAG_NONE
;
722 u8 i
, umc_en_mask
= 0, dimm_ecc_en_mask
= 0;
724 for (i
= 0; i
< NUM_UMCS
; i
++) {
725 if (!(pvt
->umc
[i
].sdp_ctrl
& UMC_SDP_INIT
))
728 umc_en_mask
|= BIT(i
);
730 /* UMC Configuration bit 12 (DimmEccEn) */
731 if (pvt
->umc
[i
].umc_cfg
& BIT(12))
732 dimm_ecc_en_mask
|= BIT(i
);
735 if (umc_en_mask
== dimm_ecc_en_mask
)
736 edac_cap
= EDAC_FLAG_SECDED
;
738 bit
= (pvt
->fam
> 0xf || pvt
->ext_model
>= K8_REV_F
)
742 if (pvt
->dclr0
& BIT(bit
))
743 edac_cap
= EDAC_FLAG_SECDED
;
749 static void debug_display_dimm_sizes(struct amd64_pvt
*, u8
);
751 static void debug_dump_dramcfg_low(struct amd64_pvt
*pvt
, u32 dclr
, int chan
)
753 edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan
, dclr
);
755 if (pvt
->dram_type
== MEM_LRDDR3
) {
756 u32 dcsm
= pvt
->csels
[chan
].csmasks
[0];
758 * It's assumed all LRDIMMs in a DCT are going to be of
759 * same 'type' until proven otherwise. So, use a cs
760 * value of '0' here to get dcsm value.
762 edac_dbg(1, " LRDIMM %dx rank multiply\n", (dcsm
& 0x3));
765 edac_dbg(1, "All DIMMs support ECC:%s\n",
766 (dclr
& BIT(19)) ? "yes" : "no");
769 edac_dbg(1, " PAR/ERR parity: %s\n",
770 (dclr
& BIT(8)) ? "enabled" : "disabled");
772 if (pvt
->fam
== 0x10)
773 edac_dbg(1, " DCT 128bit mode width: %s\n",
774 (dclr
& BIT(11)) ? "128b" : "64b");
776 edac_dbg(1, " x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
777 (dclr
& BIT(12)) ? "yes" : "no",
778 (dclr
& BIT(13)) ? "yes" : "no",
779 (dclr
& BIT(14)) ? "yes" : "no",
780 (dclr
& BIT(15)) ? "yes" : "no");
783 static void debug_display_dimm_sizes_df(struct amd64_pvt
*pvt
, u8 ctrl
)
785 u32
*dcsb
= ctrl
? pvt
->csels
[1].csbases
: pvt
->csels
[0].csbases
;
786 int dimm
, size0
, size1
;
788 edac_printk(KERN_DEBUG
, EDAC_MC
, "UMC%d chip selects:\n", ctrl
);
790 for (dimm
= 0; dimm
< 4; dimm
++) {
793 if (dcsb
[dimm
*2] & DCSB_CS_ENABLE
)
794 size0
= pvt
->ops
->dbam_to_cs(pvt
, ctrl
, 0, dimm
);
797 if (dcsb
[dimm
*2 + 1] & DCSB_CS_ENABLE
)
798 size1
= pvt
->ops
->dbam_to_cs(pvt
, ctrl
, 0, dimm
);
800 amd64_info(EDAC_MC
": %d: %5dMB %d: %5dMB\n",
802 dimm
* 2 + 1, size1
);
806 static void __dump_misc_regs_df(struct amd64_pvt
*pvt
)
808 struct amd64_umc
*umc
;
809 u32 i
, tmp
, umc_base
;
811 for (i
= 0; i
< NUM_UMCS
; i
++) {
812 umc_base
= get_umc_base(i
);
815 edac_dbg(1, "UMC%d DIMM cfg: 0x%x\n", i
, umc
->dimm_cfg
);
816 edac_dbg(1, "UMC%d UMC cfg: 0x%x\n", i
, umc
->umc_cfg
);
817 edac_dbg(1, "UMC%d SDP ctrl: 0x%x\n", i
, umc
->sdp_ctrl
);
818 edac_dbg(1, "UMC%d ECC ctrl: 0x%x\n", i
, umc
->ecc_ctrl
);
820 amd_smn_read(pvt
->mc_node_id
, umc_base
+ UMCCH_ECC_BAD_SYMBOL
, &tmp
);
821 edac_dbg(1, "UMC%d ECC bad symbol: 0x%x\n", i
, tmp
);
823 amd_smn_read(pvt
->mc_node_id
, umc_base
+ UMCCH_UMC_CAP
, &tmp
);
824 edac_dbg(1, "UMC%d UMC cap: 0x%x\n", i
, tmp
);
825 edac_dbg(1, "UMC%d UMC cap high: 0x%x\n", i
, umc
->umc_cap_hi
);
827 edac_dbg(1, "UMC%d ECC capable: %s, ChipKill ECC capable: %s\n",
828 i
, (umc
->umc_cap_hi
& BIT(30)) ? "yes" : "no",
829 (umc
->umc_cap_hi
& BIT(31)) ? "yes" : "no");
830 edac_dbg(1, "UMC%d All DIMMs support ECC: %s\n",
831 i
, (umc
->umc_cfg
& BIT(12)) ? "yes" : "no");
832 edac_dbg(1, "UMC%d x4 DIMMs present: %s\n",
833 i
, (umc
->dimm_cfg
& BIT(6)) ? "yes" : "no");
834 edac_dbg(1, "UMC%d x16 DIMMs present: %s\n",
835 i
, (umc
->dimm_cfg
& BIT(7)) ? "yes" : "no");
837 if (pvt
->dram_type
== MEM_LRDDR4
) {
838 amd_smn_read(pvt
->mc_node_id
, umc_base
+ UMCCH_ADDR_CFG
, &tmp
);
839 edac_dbg(1, "UMC%d LRDIMM %dx rank multiply\n",
840 i
, 1 << ((tmp
>> 4) & 0x3));
843 debug_display_dimm_sizes_df(pvt
, i
);
846 edac_dbg(1, "F0x104 (DRAM Hole Address): 0x%08x, base: 0x%08x\n",
847 pvt
->dhar
, dhar_base(pvt
));
850 /* Display and decode various NB registers for debug purposes. */
851 static void __dump_misc_regs(struct amd64_pvt
*pvt
)
853 edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt
->nbcap
);
855 edac_dbg(1, " NB two channel DRAM capable: %s\n",
856 (pvt
->nbcap
& NBCAP_DCT_DUAL
) ? "yes" : "no");
858 edac_dbg(1, " ECC capable: %s, ChipKill ECC capable: %s\n",
859 (pvt
->nbcap
& NBCAP_SECDED
) ? "yes" : "no",
860 (pvt
->nbcap
& NBCAP_CHIPKILL
) ? "yes" : "no");
862 debug_dump_dramcfg_low(pvt
, pvt
->dclr0
, 0);
864 edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt
->online_spare
);
866 edac_dbg(1, "F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, offset: 0x%08x\n",
867 pvt
->dhar
, dhar_base(pvt
),
868 (pvt
->fam
== 0xf) ? k8_dhar_offset(pvt
)
869 : f10_dhar_offset(pvt
));
871 debug_display_dimm_sizes(pvt
, 0);
873 /* everything below this point is Fam10h and above */
877 debug_display_dimm_sizes(pvt
, 1);
879 /* Only if NOT ganged does dclr1 have valid info */
880 if (!dct_ganging_enabled(pvt
))
881 debug_dump_dramcfg_low(pvt
, pvt
->dclr1
, 1);
884 /* Display and decode various NB registers for debug purposes. */
885 static void dump_misc_regs(struct amd64_pvt
*pvt
)
888 __dump_misc_regs_df(pvt
);
890 __dump_misc_regs(pvt
);
892 edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt
) ? "yes" : "no");
894 amd64_info("using %s syndromes.\n",
895 ((pvt
->ecc_sym_sz
== 8) ? "x8" : "x4"));
899 * See BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
901 static void prep_chip_selects(struct amd64_pvt
*pvt
)
903 if (pvt
->fam
== 0xf && pvt
->ext_model
< K8_REV_F
) {
904 pvt
->csels
[0].b_cnt
= pvt
->csels
[1].b_cnt
= 8;
905 pvt
->csels
[0].m_cnt
= pvt
->csels
[1].m_cnt
= 8;
906 } else if (pvt
->fam
== 0x15 && pvt
->model
== 0x30) {
907 pvt
->csels
[0].b_cnt
= pvt
->csels
[1].b_cnt
= 4;
908 pvt
->csels
[0].m_cnt
= pvt
->csels
[1].m_cnt
= 2;
910 pvt
->csels
[0].b_cnt
= pvt
->csels
[1].b_cnt
= 8;
911 pvt
->csels
[0].m_cnt
= pvt
->csels
[1].m_cnt
= 4;
916 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
918 static void read_dct_base_mask(struct amd64_pvt
*pvt
)
920 int base_reg0
, base_reg1
, mask_reg0
, mask_reg1
, cs
;
922 prep_chip_selects(pvt
);
925 base_reg0
= get_umc_base(0) + UMCCH_BASE_ADDR
;
926 base_reg1
= get_umc_base(1) + UMCCH_BASE_ADDR
;
927 mask_reg0
= get_umc_base(0) + UMCCH_ADDR_MASK
;
928 mask_reg1
= get_umc_base(1) + UMCCH_ADDR_MASK
;
936 for_each_chip_select(cs
, 0, pvt
) {
937 int reg0
= base_reg0
+ (cs
* 4);
938 int reg1
= base_reg1
+ (cs
* 4);
939 u32
*base0
= &pvt
->csels
[0].csbases
[cs
];
940 u32
*base1
= &pvt
->csels
[1].csbases
[cs
];
943 if (!amd_smn_read(pvt
->mc_node_id
, reg0
, base0
))
944 edac_dbg(0, " DCSB0[%d]=0x%08x reg: 0x%x\n",
947 if (!amd_smn_read(pvt
->mc_node_id
, reg1
, base1
))
948 edac_dbg(0, " DCSB1[%d]=0x%08x reg: 0x%x\n",
951 if (!amd64_read_dct_pci_cfg(pvt
, 0, reg0
, base0
))
952 edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n",
958 if (!amd64_read_dct_pci_cfg(pvt
, 1, reg0
, base1
))
959 edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n",
960 cs
, *base1
, (pvt
->fam
== 0x10) ? reg1
965 for_each_chip_select_mask(cs
, 0, pvt
) {
966 int reg0
= mask_reg0
+ (cs
* 4);
967 int reg1
= mask_reg1
+ (cs
* 4);
968 u32
*mask0
= &pvt
->csels
[0].csmasks
[cs
];
969 u32
*mask1
= &pvt
->csels
[1].csmasks
[cs
];
972 if (!amd_smn_read(pvt
->mc_node_id
, reg0
, mask0
))
973 edac_dbg(0, " DCSM0[%d]=0x%08x reg: 0x%x\n",
976 if (!amd_smn_read(pvt
->mc_node_id
, reg1
, mask1
))
977 edac_dbg(0, " DCSM1[%d]=0x%08x reg: 0x%x\n",
980 if (!amd64_read_dct_pci_cfg(pvt
, 0, reg0
, mask0
))
981 edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n",
987 if (!amd64_read_dct_pci_cfg(pvt
, 1, reg0
, mask1
))
988 edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n",
989 cs
, *mask1
, (pvt
->fam
== 0x10) ? reg1
995 static void determine_memory_type(struct amd64_pvt
*pvt
)
1001 if (pvt
->ext_model
>= K8_REV_F
)
1004 pvt
->dram_type
= (pvt
->dclr0
& BIT(18)) ? MEM_DDR
: MEM_RDDR
;
1008 if (pvt
->dchr0
& DDR3_MODE
)
1011 pvt
->dram_type
= (pvt
->dclr0
& BIT(16)) ? MEM_DDR2
: MEM_RDDR2
;
1015 if (pvt
->model
< 0x60)
1019 * Model 0x60h needs special handling:
1021 * We use a Chip Select value of '0' to obtain dcsm.
1022 * Theoretically, it is possible to populate LRDIMMs of different
1023 * 'Rank' value on a DCT. But this is not the common case. So,
1024 * it's reasonable to assume all DIMMs are going to be of same
1025 * 'type' until proven otherwise.
1027 amd64_read_dct_pci_cfg(pvt
, 0, DRAM_CONTROL
, &dram_ctrl
);
1028 dcsm
= pvt
->csels
[0].csmasks
[0];
1030 if (((dram_ctrl
>> 8) & 0x7) == 0x2)
1031 pvt
->dram_type
= MEM_DDR4
;
1032 else if (pvt
->dclr0
& BIT(16))
1033 pvt
->dram_type
= MEM_DDR3
;
1034 else if (dcsm
& 0x3)
1035 pvt
->dram_type
= MEM_LRDDR3
;
1037 pvt
->dram_type
= MEM_RDDR3
;
1045 if ((pvt
->umc
[0].dimm_cfg
| pvt
->umc
[1].dimm_cfg
) & BIT(5))
1046 pvt
->dram_type
= MEM_LRDDR4
;
1047 else if ((pvt
->umc
[0].dimm_cfg
| pvt
->umc
[1].dimm_cfg
) & BIT(4))
1048 pvt
->dram_type
= MEM_RDDR4
;
1050 pvt
->dram_type
= MEM_DDR4
;
1054 WARN(1, KERN_ERR
"%s: Family??? 0x%x\n", __func__
, pvt
->fam
);
1055 pvt
->dram_type
= MEM_EMPTY
;
1060 pvt
->dram_type
= (pvt
->dclr0
& BIT(16)) ? MEM_DDR3
: MEM_RDDR3
;
1063 /* Get the number of DCT channels the memory controller is using. */
1064 static int k8_early_channel_count(struct amd64_pvt
*pvt
)
1068 if (pvt
->ext_model
>= K8_REV_F
)
1069 /* RevF (NPT) and later */
1070 flag
= pvt
->dclr0
& WIDTH_128
;
1072 /* RevE and earlier */
1073 flag
= pvt
->dclr0
& REVE_WIDTH_128
;
1078 return (flag
) ? 2 : 1;
1081 /* On F10h and later ErrAddr is MC4_ADDR[47:1] */
1082 static u64
get_error_address(struct amd64_pvt
*pvt
, struct mce
*m
)
1084 u16 mce_nid
= amd_get_nb_id(m
->extcpu
);
1085 struct mem_ctl_info
*mci
;
1090 mci
= edac_mc_find(mce_nid
);
1094 pvt
= mci
->pvt_info
;
1096 if (pvt
->fam
== 0xf) {
1101 addr
= m
->addr
& GENMASK_ULL(end_bit
, start_bit
);
1104 * Erratum 637 workaround
1106 if (pvt
->fam
== 0x15) {
1107 u64 cc6_base
, tmp_addr
;
1111 if ((addr
& GENMASK_ULL(47, 24)) >> 24 != 0x00fdf7)
1115 amd64_read_pci_cfg(pvt
->F1
, DRAM_LOCAL_NODE_LIM
, &tmp
);
1116 intlv_en
= tmp
>> 21 & 0x7;
1118 /* add [47:27] + 3 trailing bits */
1119 cc6_base
= (tmp
& GENMASK_ULL(20, 0)) << 3;
1121 /* reverse and add DramIntlvEn */
1122 cc6_base
|= intlv_en
^ 0x7;
1124 /* pin at [47:24] */
1128 return cc6_base
| (addr
& GENMASK_ULL(23, 0));
1130 amd64_read_pci_cfg(pvt
->F1
, DRAM_LOCAL_NODE_BASE
, &tmp
);
1133 tmp_addr
= (addr
& GENMASK_ULL(23, 12)) << __fls(intlv_en
+ 1);
1135 /* OR DramIntlvSel into bits [14:12] */
1136 tmp_addr
|= (tmp
& GENMASK_ULL(23, 21)) >> 9;
1138 /* add remaining [11:0] bits from original MC4_ADDR */
1139 tmp_addr
|= addr
& GENMASK_ULL(11, 0);
1141 return cc6_base
| tmp_addr
;
1147 static struct pci_dev
*pci_get_related_function(unsigned int vendor
,
1148 unsigned int device
,
1149 struct pci_dev
*related
)
1151 struct pci_dev
*dev
= NULL
;
1153 while ((dev
= pci_get_device(vendor
, device
, dev
))) {
1154 if (pci_domain_nr(dev
->bus
) == pci_domain_nr(related
->bus
) &&
1155 (dev
->bus
->number
== related
->bus
->number
) &&
1156 (PCI_SLOT(dev
->devfn
) == PCI_SLOT(related
->devfn
)))
1163 static void read_dram_base_limit_regs(struct amd64_pvt
*pvt
, unsigned range
)
1165 struct amd_northbridge
*nb
;
1166 struct pci_dev
*f1
= NULL
;
1167 unsigned int pci_func
;
1168 int off
= range
<< 3;
1171 amd64_read_pci_cfg(pvt
->F1
, DRAM_BASE_LO
+ off
, &pvt
->ranges
[range
].base
.lo
);
1172 amd64_read_pci_cfg(pvt
->F1
, DRAM_LIMIT_LO
+ off
, &pvt
->ranges
[range
].lim
.lo
);
1174 if (pvt
->fam
== 0xf)
1177 if (!dram_rw(pvt
, range
))
1180 amd64_read_pci_cfg(pvt
->F1
, DRAM_BASE_HI
+ off
, &pvt
->ranges
[range
].base
.hi
);
1181 amd64_read_pci_cfg(pvt
->F1
, DRAM_LIMIT_HI
+ off
, &pvt
->ranges
[range
].lim
.hi
);
1183 /* F15h: factor in CC6 save area by reading dst node's limit reg */
1184 if (pvt
->fam
!= 0x15)
1187 nb
= node_to_amd_nb(dram_dst_node(pvt
, range
));
1191 if (pvt
->model
== 0x60)
1192 pci_func
= PCI_DEVICE_ID_AMD_15H_M60H_NB_F1
;
1193 else if (pvt
->model
== 0x30)
1194 pci_func
= PCI_DEVICE_ID_AMD_15H_M30H_NB_F1
;
1196 pci_func
= PCI_DEVICE_ID_AMD_15H_NB_F1
;
1198 f1
= pci_get_related_function(nb
->misc
->vendor
, pci_func
, nb
->misc
);
1202 amd64_read_pci_cfg(f1
, DRAM_LOCAL_NODE_LIM
, &llim
);
1204 pvt
->ranges
[range
].lim
.lo
&= GENMASK_ULL(15, 0);
1206 /* {[39:27],111b} */
1207 pvt
->ranges
[range
].lim
.lo
|= ((llim
& 0x1fff) << 3 | 0x7) << 16;
1209 pvt
->ranges
[range
].lim
.hi
&= GENMASK_ULL(7, 0);
1212 pvt
->ranges
[range
].lim
.hi
|= llim
>> 13;
1217 static void k8_map_sysaddr_to_csrow(struct mem_ctl_info
*mci
, u64 sys_addr
,
1218 struct err_info
*err
)
1220 struct amd64_pvt
*pvt
= mci
->pvt_info
;
1222 error_address_to_page_and_offset(sys_addr
, err
);
1225 * Find out which node the error address belongs to. This may be
1226 * different from the node that detected the error.
1228 err
->src_mci
= find_mc_by_sys_addr(mci
, sys_addr
);
1229 if (!err
->src_mci
) {
1230 amd64_mc_err(mci
, "failed to map error addr 0x%lx to a node\n",
1231 (unsigned long)sys_addr
);
1232 err
->err_code
= ERR_NODE
;
1236 /* Now map the sys_addr to a CSROW */
1237 err
->csrow
= sys_addr_to_csrow(err
->src_mci
, sys_addr
);
1238 if (err
->csrow
< 0) {
1239 err
->err_code
= ERR_CSROW
;
1243 /* CHIPKILL enabled */
1244 if (pvt
->nbcfg
& NBCFG_CHIPKILL
) {
1245 err
->channel
= get_channel_from_ecc_syndrome(mci
, err
->syndrome
);
1246 if (err
->channel
< 0) {
1248 * Syndrome didn't map, so we don't know which of the
1249 * 2 DIMMs is in error. So we need to ID 'both' of them
1252 amd64_mc_warn(err
->src_mci
, "unknown syndrome 0x%04x - "
1253 "possible error reporting race\n",
1255 err
->err_code
= ERR_CHANNEL
;
1260 * non-chipkill ecc mode
1262 * The k8 documentation is unclear about how to determine the
1263 * channel number when using non-chipkill memory. This method
1264 * was obtained from email communication with someone at AMD.
1265 * (Wish the email was placed in this comment - norsk)
1267 err
->channel
= ((sys_addr
& BIT(3)) != 0);
1271 static int ddr2_cs_size(unsigned i
, bool dct_width
)
1277 else if (!(i
& 0x1))
1280 shift
= (i
+ 1) >> 1;
1282 return 128 << (shift
+ !!dct_width
);
1285 static int k8_dbam_to_chip_select(struct amd64_pvt
*pvt
, u8 dct
,
1286 unsigned cs_mode
, int cs_mask_nr
)
1288 u32 dclr
= dct
? pvt
->dclr1
: pvt
->dclr0
;
1290 if (pvt
->ext_model
>= K8_REV_F
) {
1291 WARN_ON(cs_mode
> 11);
1292 return ddr2_cs_size(cs_mode
, dclr
& WIDTH_128
);
1294 else if (pvt
->ext_model
>= K8_REV_D
) {
1296 WARN_ON(cs_mode
> 10);
1299 * the below calculation, besides trying to win an obfuscated C
1300 * contest, maps cs_mode values to DIMM chip select sizes. The
1303 * cs_mode CS size (mb)
1304 * ======= ============
1317 * Basically, it calculates a value with which to shift the
1318 * smallest CS size of 32MB.
1320 * ddr[23]_cs_size have a similar purpose.
1322 diff
= cs_mode
/3 + (unsigned)(cs_mode
> 5);
1324 return 32 << (cs_mode
- diff
);
1327 WARN_ON(cs_mode
> 6);
1328 return 32 << cs_mode
;
1333 * Get the number of DCT channels in use.
1336 * number of Memory Channels in operation
1338 * contents of the DCL0_LOW register
1340 static int f1x_early_channel_count(struct amd64_pvt
*pvt
)
1342 int i
, j
, channels
= 0;
1344 /* On F10h, if we are in 128 bit mode, then we are using 2 channels */
1345 if (pvt
->fam
== 0x10 && (pvt
->dclr0
& WIDTH_128
))
1349 * Need to check if in unganged mode: In such, there are 2 channels,
1350 * but they are not in 128 bit mode and thus the above 'dclr0' status
1353 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
1354 * their CSEnable bit on. If so, then SINGLE DIMM case.
1356 edac_dbg(0, "Data width is not 128 bits - need more decoding\n");
1359 * Check DRAM Bank Address Mapping values for each DIMM to see if there
1360 * is more than just one DIMM present in unganged mode. Need to check
1361 * both controllers since DIMMs can be placed in either one.
1363 for (i
= 0; i
< 2; i
++) {
1364 u32 dbam
= (i
? pvt
->dbam1
: pvt
->dbam0
);
1366 for (j
= 0; j
< 4; j
++) {
1367 if (DBAM_DIMM(j
, dbam
) > 0) {
1377 amd64_info("MCT channel count: %d\n", channels
);
1382 static int f17_early_channel_count(struct amd64_pvt
*pvt
)
1384 int i
, channels
= 0;
1386 /* SDP Control bit 31 (SdpInit) is clear for unused UMC channels */
1387 for (i
= 0; i
< NUM_UMCS
; i
++)
1388 channels
+= !!(pvt
->umc
[i
].sdp_ctrl
& UMC_SDP_INIT
);
1390 amd64_info("MCT channel count: %d\n", channels
);
1395 static int ddr3_cs_size(unsigned i
, bool dct_width
)
1400 if (i
== 0 || i
== 3 || i
== 4)
1406 else if (!(i
& 0x1))
1409 shift
= (i
+ 1) >> 1;
1412 cs_size
= (128 * (1 << !!dct_width
)) << shift
;
1417 static int ddr3_lrdimm_cs_size(unsigned i
, unsigned rank_multiply
)
1422 if (i
< 4 || i
== 6)
1426 else if (!(i
& 0x1))
1429 shift
= (i
+ 1) >> 1;
1432 cs_size
= rank_multiply
* (128 << shift
);
1437 static int ddr4_cs_size(unsigned i
)
1446 /* Min cs_size = 1G */
1447 cs_size
= 1024 * (1 << (i
>> 1));
1452 static int f10_dbam_to_chip_select(struct amd64_pvt
*pvt
, u8 dct
,
1453 unsigned cs_mode
, int cs_mask_nr
)
1455 u32 dclr
= dct
? pvt
->dclr1
: pvt
->dclr0
;
1457 WARN_ON(cs_mode
> 11);
1459 if (pvt
->dchr0
& DDR3_MODE
|| pvt
->dchr1
& DDR3_MODE
)
1460 return ddr3_cs_size(cs_mode
, dclr
& WIDTH_128
);
1462 return ddr2_cs_size(cs_mode
, dclr
& WIDTH_128
);
1466 * F15h supports only 64bit DCT interfaces
1468 static int f15_dbam_to_chip_select(struct amd64_pvt
*pvt
, u8 dct
,
1469 unsigned cs_mode
, int cs_mask_nr
)
1471 WARN_ON(cs_mode
> 12);
1473 return ddr3_cs_size(cs_mode
, false);
1476 /* F15h M60h supports DDR4 mapping as well.. */
1477 static int f15_m60h_dbam_to_chip_select(struct amd64_pvt
*pvt
, u8 dct
,
1478 unsigned cs_mode
, int cs_mask_nr
)
1481 u32 dcsm
= pvt
->csels
[dct
].csmasks
[cs_mask_nr
];
1483 WARN_ON(cs_mode
> 12);
1485 if (pvt
->dram_type
== MEM_DDR4
) {
1489 cs_size
= ddr4_cs_size(cs_mode
);
1490 } else if (pvt
->dram_type
== MEM_LRDDR3
) {
1491 unsigned rank_multiply
= dcsm
& 0xf;
1493 if (rank_multiply
== 3)
1495 cs_size
= ddr3_lrdimm_cs_size(cs_mode
, rank_multiply
);
1497 /* Minimum cs size is 512mb for F15hM60h*/
1501 cs_size
= ddr3_cs_size(cs_mode
, false);
1508 * F16h and F15h model 30h have only limited cs_modes.
1510 static int f16_dbam_to_chip_select(struct amd64_pvt
*pvt
, u8 dct
,
1511 unsigned cs_mode
, int cs_mask_nr
)
1513 WARN_ON(cs_mode
> 12);
1515 if (cs_mode
== 6 || cs_mode
== 8 ||
1516 cs_mode
== 9 || cs_mode
== 12)
1519 return ddr3_cs_size(cs_mode
, false);
1522 static int f17_base_addr_to_cs_size(struct amd64_pvt
*pvt
, u8 umc
,
1523 unsigned int cs_mode
, int csrow_nr
)
1525 u32 base_addr
= pvt
->csels
[umc
].csbases
[csrow_nr
];
1527 /* Each mask is used for every two base addresses. */
1528 u32 addr_mask
= pvt
->csels
[umc
].csmasks
[csrow_nr
>> 1];
1530 /* Register [31:1] = Address [39:9]. Size is in kBs here. */
1531 u32 size
= ((addr_mask
>> 1) - (base_addr
>> 1) + 1) >> 1;
1533 edac_dbg(1, "BaseAddr: 0x%x, AddrMask: 0x%x\n", base_addr
, addr_mask
);
1535 /* Return size in MBs. */
1539 static void read_dram_ctl_register(struct amd64_pvt
*pvt
)
1542 if (pvt
->fam
== 0xf)
1545 if (!amd64_read_pci_cfg(pvt
->F2
, DCT_SEL_LO
, &pvt
->dct_sel_lo
)) {
1546 edac_dbg(0, "F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
1547 pvt
->dct_sel_lo
, dct_sel_baseaddr(pvt
));
1549 edac_dbg(0, " DCTs operate in %s mode\n",
1550 (dct_ganging_enabled(pvt
) ? "ganged" : "unganged"));
1552 if (!dct_ganging_enabled(pvt
))
1553 edac_dbg(0, " Address range split per DCT: %s\n",
1554 (dct_high_range_enabled(pvt
) ? "yes" : "no"));
1556 edac_dbg(0, " data interleave for ECC: %s, DRAM cleared since last warm reset: %s\n",
1557 (dct_data_intlv_enabled(pvt
) ? "enabled" : "disabled"),
1558 (dct_memory_cleared(pvt
) ? "yes" : "no"));
1560 edac_dbg(0, " channel interleave: %s, "
1561 "interleave bits selector: 0x%x\n",
1562 (dct_interleave_enabled(pvt
) ? "enabled" : "disabled"),
1563 dct_sel_interleave_addr(pvt
));
1566 amd64_read_pci_cfg(pvt
->F2
, DCT_SEL_HI
, &pvt
->dct_sel_hi
);
1570 * Determine channel (DCT) based on the interleaving mode (see F15h M30h BKDG,
1571 * 2.10.12 Memory Interleaving Modes).
1573 static u8
f15_m30h_determine_channel(struct amd64_pvt
*pvt
, u64 sys_addr
,
1574 u8 intlv_en
, int num_dcts_intlv
,
1581 return (u8
)(dct_sel
);
1583 if (num_dcts_intlv
== 2) {
1584 select
= (sys_addr
>> 8) & 0x3;
1585 channel
= select
? 0x3 : 0;
1586 } else if (num_dcts_intlv
== 4) {
1587 u8 intlv_addr
= dct_sel_interleave_addr(pvt
);
1588 switch (intlv_addr
) {
1590 channel
= (sys_addr
>> 8) & 0x3;
1593 channel
= (sys_addr
>> 9) & 0x3;
1601 * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
1602 * Interleaving Modes.
1604 static u8
f1x_determine_channel(struct amd64_pvt
*pvt
, u64 sys_addr
,
1605 bool hi_range_sel
, u8 intlv_en
)
1607 u8 dct_sel_high
= (pvt
->dct_sel_lo
>> 1) & 1;
1609 if (dct_ganging_enabled(pvt
))
1613 return dct_sel_high
;
1616 * see F2x110[DctSelIntLvAddr] - channel interleave mode
1618 if (dct_interleave_enabled(pvt
)) {
1619 u8 intlv_addr
= dct_sel_interleave_addr(pvt
);
1621 /* return DCT select function: 0=DCT0, 1=DCT1 */
1623 return sys_addr
>> 6 & 1;
1625 if (intlv_addr
& 0x2) {
1626 u8 shift
= intlv_addr
& 0x1 ? 9 : 6;
1627 u32 temp
= hweight_long((u32
) ((sys_addr
>> 16) & 0x1F)) & 1;
1629 return ((sys_addr
>> shift
) & 1) ^ temp
;
1632 if (intlv_addr
& 0x4) {
1633 u8 shift
= intlv_addr
& 0x1 ? 9 : 8;
1635 return (sys_addr
>> shift
) & 1;
1638 return (sys_addr
>> (12 + hweight8(intlv_en
))) & 1;
1641 if (dct_high_range_enabled(pvt
))
1642 return ~dct_sel_high
& 1;
1647 /* Convert the sys_addr to the normalized DCT address */
1648 static u64
f1x_get_norm_dct_addr(struct amd64_pvt
*pvt
, u8 range
,
1649 u64 sys_addr
, bool hi_rng
,
1650 u32 dct_sel_base_addr
)
1653 u64 dram_base
= get_dram_base(pvt
, range
);
1654 u64 hole_off
= f10_dhar_offset(pvt
);
1655 u64 dct_sel_base_off
= (u64
)(pvt
->dct_sel_hi
& 0xFFFFFC00) << 16;
1660 * base address of high range is below 4Gb
1661 * (bits [47:27] at [31:11])
1662 * DRAM address space on this DCT is hoisted above 4Gb &&
1665 * remove hole offset from sys_addr
1667 * remove high range offset from sys_addr
1669 if ((!(dct_sel_base_addr
>> 16) ||
1670 dct_sel_base_addr
< dhar_base(pvt
)) &&
1672 (sys_addr
>= BIT_64(32)))
1673 chan_off
= hole_off
;
1675 chan_off
= dct_sel_base_off
;
1679 * we have a valid hole &&
1684 * remove dram base to normalize to DCT address
1686 if (dhar_valid(pvt
) && (sys_addr
>= BIT_64(32)))
1687 chan_off
= hole_off
;
1689 chan_off
= dram_base
;
1692 return (sys_addr
& GENMASK_ULL(47,6)) - (chan_off
& GENMASK_ULL(47,23));
1696 * checks if the csrow passed in is marked as SPARED, if so returns the new
1699 static int f10_process_possible_spare(struct amd64_pvt
*pvt
, u8 dct
, int csrow
)
1703 if (online_spare_swap_done(pvt
, dct
) &&
1704 csrow
== online_spare_bad_dramcs(pvt
, dct
)) {
1706 for_each_chip_select(tmp_cs
, dct
, pvt
) {
1707 if (chip_select_base(tmp_cs
, dct
, pvt
) & 0x2) {
1717 * Iterate over the DRAM DCT "base" and "mask" registers looking for a
1718 * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
1721 * -EINVAL: NOT FOUND
1722 * 0..csrow = Chip-Select Row
1724 static int f1x_lookup_addr_in_dct(u64 in_addr
, u8 nid
, u8 dct
)
1726 struct mem_ctl_info
*mci
;
1727 struct amd64_pvt
*pvt
;
1728 u64 cs_base
, cs_mask
;
1729 int cs_found
= -EINVAL
;
1732 mci
= edac_mc_find(nid
);
1736 pvt
= mci
->pvt_info
;
1738 edac_dbg(1, "input addr: 0x%llx, DCT: %d\n", in_addr
, dct
);
1740 for_each_chip_select(csrow
, dct
, pvt
) {
1741 if (!csrow_enabled(csrow
, dct
, pvt
))
1744 get_cs_base_and_mask(pvt
, csrow
, dct
, &cs_base
, &cs_mask
);
1746 edac_dbg(1, " CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
1747 csrow
, cs_base
, cs_mask
);
1751 edac_dbg(1, " (InputAddr & ~CSMask)=0x%llx (CSBase & ~CSMask)=0x%llx\n",
1752 (in_addr
& cs_mask
), (cs_base
& cs_mask
));
1754 if ((in_addr
& cs_mask
) == (cs_base
& cs_mask
)) {
1755 if (pvt
->fam
== 0x15 && pvt
->model
>= 0x30) {
1759 cs_found
= f10_process_possible_spare(pvt
, dct
, csrow
);
1761 edac_dbg(1, " MATCH csrow=%d\n", cs_found
);
1769 * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is
1770 * swapped with a region located at the bottom of memory so that the GPU can use
1771 * the interleaved region and thus two channels.
1773 static u64
f1x_swap_interleaved_region(struct amd64_pvt
*pvt
, u64 sys_addr
)
1775 u32 swap_reg
, swap_base
, swap_limit
, rgn_size
, tmp_addr
;
1777 if (pvt
->fam
== 0x10) {
1778 /* only revC3 and revE have that feature */
1779 if (pvt
->model
< 4 || (pvt
->model
< 0xa && pvt
->stepping
< 3))
1783 amd64_read_pci_cfg(pvt
->F2
, SWAP_INTLV_REG
, &swap_reg
);
1785 if (!(swap_reg
& 0x1))
1788 swap_base
= (swap_reg
>> 3) & 0x7f;
1789 swap_limit
= (swap_reg
>> 11) & 0x7f;
1790 rgn_size
= (swap_reg
>> 20) & 0x7f;
1791 tmp_addr
= sys_addr
>> 27;
1793 if (!(sys_addr
>> 34) &&
1794 (((tmp_addr
>= swap_base
) &&
1795 (tmp_addr
<= swap_limit
)) ||
1796 (tmp_addr
< rgn_size
)))
1797 return sys_addr
^ (u64
)swap_base
<< 27;
1802 /* For a given @dram_range, check if @sys_addr falls within it. */
1803 static int f1x_match_to_this_node(struct amd64_pvt
*pvt
, unsigned range
,
1804 u64 sys_addr
, int *chan_sel
)
1806 int cs_found
= -EINVAL
;
1810 bool high_range
= false;
1812 u8 node_id
= dram_dst_node(pvt
, range
);
1813 u8 intlv_en
= dram_intlv_en(pvt
, range
);
1814 u32 intlv_sel
= dram_intlv_sel(pvt
, range
);
1816 edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1817 range
, sys_addr
, get_dram_limit(pvt
, range
));
1819 if (dhar_valid(pvt
) &&
1820 dhar_base(pvt
) <= sys_addr
&&
1821 sys_addr
< BIT_64(32)) {
1822 amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
1827 if (intlv_en
&& (intlv_sel
!= ((sys_addr
>> 12) & intlv_en
)))
1830 sys_addr
= f1x_swap_interleaved_region(pvt
, sys_addr
);
1832 dct_sel_base
= dct_sel_baseaddr(pvt
);
1835 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
1836 * select between DCT0 and DCT1.
1838 if (dct_high_range_enabled(pvt
) &&
1839 !dct_ganging_enabled(pvt
) &&
1840 ((sys_addr
>> 27) >= (dct_sel_base
>> 11)))
1843 channel
= f1x_determine_channel(pvt
, sys_addr
, high_range
, intlv_en
);
1845 chan_addr
= f1x_get_norm_dct_addr(pvt
, range
, sys_addr
,
1846 high_range
, dct_sel_base
);
1848 /* Remove node interleaving, see F1x120 */
1850 chan_addr
= ((chan_addr
>> (12 + hweight8(intlv_en
))) << 12) |
1851 (chan_addr
& 0xfff);
1853 /* remove channel interleave */
1854 if (dct_interleave_enabled(pvt
) &&
1855 !dct_high_range_enabled(pvt
) &&
1856 !dct_ganging_enabled(pvt
)) {
1858 if (dct_sel_interleave_addr(pvt
) != 1) {
1859 if (dct_sel_interleave_addr(pvt
) == 0x3)
1861 chan_addr
= ((chan_addr
>> 10) << 9) |
1862 (chan_addr
& 0x1ff);
1864 /* A[6] or hash 6 */
1865 chan_addr
= ((chan_addr
>> 7) << 6) |
1869 chan_addr
= ((chan_addr
>> 13) << 12) |
1870 (chan_addr
& 0xfff);
1873 edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr
);
1875 cs_found
= f1x_lookup_addr_in_dct(chan_addr
, node_id
, channel
);
1878 *chan_sel
= channel
;
1883 static int f15_m30h_match_to_this_node(struct amd64_pvt
*pvt
, unsigned range
,
1884 u64 sys_addr
, int *chan_sel
)
1886 int cs_found
= -EINVAL
;
1887 int num_dcts_intlv
= 0;
1888 u64 chan_addr
, chan_offset
;
1889 u64 dct_base
, dct_limit
;
1890 u32 dct_cont_base_reg
, dct_cont_limit_reg
, tmp
;
1891 u8 channel
, alias_channel
, leg_mmio_hole
, dct_sel
, dct_offset_en
;
1893 u64 dhar_offset
= f10_dhar_offset(pvt
);
1894 u8 intlv_addr
= dct_sel_interleave_addr(pvt
);
1895 u8 node_id
= dram_dst_node(pvt
, range
);
1896 u8 intlv_en
= dram_intlv_en(pvt
, range
);
1898 amd64_read_pci_cfg(pvt
->F1
, DRAM_CONT_BASE
, &dct_cont_base_reg
);
1899 amd64_read_pci_cfg(pvt
->F1
, DRAM_CONT_LIMIT
, &dct_cont_limit_reg
);
1901 dct_offset_en
= (u8
) ((dct_cont_base_reg
>> 3) & BIT(0));
1902 dct_sel
= (u8
) ((dct_cont_base_reg
>> 4) & 0x7);
1904 edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1905 range
, sys_addr
, get_dram_limit(pvt
, range
));
1907 if (!(get_dram_base(pvt
, range
) <= sys_addr
) &&
1908 !(get_dram_limit(pvt
, range
) >= sys_addr
))
1911 if (dhar_valid(pvt
) &&
1912 dhar_base(pvt
) <= sys_addr
&&
1913 sys_addr
< BIT_64(32)) {
1914 amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
1919 /* Verify sys_addr is within DCT Range. */
1920 dct_base
= (u64
) dct_sel_baseaddr(pvt
);
1921 dct_limit
= (dct_cont_limit_reg
>> 11) & 0x1FFF;
1923 if (!(dct_cont_base_reg
& BIT(0)) &&
1924 !(dct_base
<= (sys_addr
>> 27) &&
1925 dct_limit
>= (sys_addr
>> 27)))
1928 /* Verify number of dct's that participate in channel interleaving. */
1929 num_dcts_intlv
= (int) hweight8(intlv_en
);
1931 if (!(num_dcts_intlv
% 2 == 0) || (num_dcts_intlv
> 4))
1934 if (pvt
->model
>= 0x60)
1935 channel
= f1x_determine_channel(pvt
, sys_addr
, false, intlv_en
);
1937 channel
= f15_m30h_determine_channel(pvt
, sys_addr
, intlv_en
,
1938 num_dcts_intlv
, dct_sel
);
1940 /* Verify we stay within the MAX number of channels allowed */
1944 leg_mmio_hole
= (u8
) (dct_cont_base_reg
>> 1 & BIT(0));
1946 /* Get normalized DCT addr */
1947 if (leg_mmio_hole
&& (sys_addr
>= BIT_64(32)))
1948 chan_offset
= dhar_offset
;
1950 chan_offset
= dct_base
<< 27;
1952 chan_addr
= sys_addr
- chan_offset
;
1954 /* remove channel interleave */
1955 if (num_dcts_intlv
== 2) {
1956 if (intlv_addr
== 0x4)
1957 chan_addr
= ((chan_addr
>> 9) << 8) |
1959 else if (intlv_addr
== 0x5)
1960 chan_addr
= ((chan_addr
>> 10) << 9) |
1961 (chan_addr
& 0x1ff);
1965 } else if (num_dcts_intlv
== 4) {
1966 if (intlv_addr
== 0x4)
1967 chan_addr
= ((chan_addr
>> 10) << 8) |
1969 else if (intlv_addr
== 0x5)
1970 chan_addr
= ((chan_addr
>> 11) << 9) |
1971 (chan_addr
& 0x1ff);
1976 if (dct_offset_en
) {
1977 amd64_read_pci_cfg(pvt
->F1
,
1978 DRAM_CONT_HIGH_OFF
+ (int) channel
* 4,
1980 chan_addr
+= (u64
) ((tmp
>> 11) & 0xfff) << 27;
1983 f15h_select_dct(pvt
, channel
);
1985 edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr
);
1989 * if channel = 3, then alias it to 1. This is because, in F15 M30h,
1990 * there is support for 4 DCT's, but only 2 are currently functional.
1991 * They are DCT0 and DCT3. But we have read all registers of DCT3 into
1992 * pvt->csels[1]. So we need to use '1' here to get correct info.
1993 * Refer F15 M30h BKDG Section 2.10 and 2.10.3 for clarifications.
1995 alias_channel
= (channel
== 3) ? 1 : channel
;
1997 cs_found
= f1x_lookup_addr_in_dct(chan_addr
, node_id
, alias_channel
);
2000 *chan_sel
= alias_channel
;
2005 static int f1x_translate_sysaddr_to_cs(struct amd64_pvt
*pvt
,
2009 int cs_found
= -EINVAL
;
2012 for (range
= 0; range
< DRAM_RANGES
; range
++) {
2013 if (!dram_rw(pvt
, range
))
2016 if (pvt
->fam
== 0x15 && pvt
->model
>= 0x30)
2017 cs_found
= f15_m30h_match_to_this_node(pvt
, range
,
2021 else if ((get_dram_base(pvt
, range
) <= sys_addr
) &&
2022 (get_dram_limit(pvt
, range
) >= sys_addr
)) {
2023 cs_found
= f1x_match_to_this_node(pvt
, range
,
2024 sys_addr
, chan_sel
);
2033 * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
2034 * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
2036 * The @sys_addr is usually an error address received from the hardware
2039 static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info
*mci
, u64 sys_addr
,
2040 struct err_info
*err
)
2042 struct amd64_pvt
*pvt
= mci
->pvt_info
;
2044 error_address_to_page_and_offset(sys_addr
, err
);
2046 err
->csrow
= f1x_translate_sysaddr_to_cs(pvt
, sys_addr
, &err
->channel
);
2047 if (err
->csrow
< 0) {
2048 err
->err_code
= ERR_CSROW
;
2053 * We need the syndromes for channel detection only when we're
2054 * ganged. Otherwise @chan should already contain the channel at
2057 if (dct_ganging_enabled(pvt
))
2058 err
->channel
= get_channel_from_ecc_syndrome(mci
, err
->syndrome
);
2062 * debug routine to display the memory sizes of all logical DIMMs and its
2065 static void debug_display_dimm_sizes(struct amd64_pvt
*pvt
, u8 ctrl
)
2067 int dimm
, size0
, size1
;
2068 u32
*dcsb
= ctrl
? pvt
->csels
[1].csbases
: pvt
->csels
[0].csbases
;
2069 u32 dbam
= ctrl
? pvt
->dbam1
: pvt
->dbam0
;
2071 if (pvt
->fam
== 0xf) {
2072 /* K8 families < revF not supported yet */
2073 if (pvt
->ext_model
< K8_REV_F
)
2079 if (pvt
->fam
== 0x10) {
2080 dbam
= (ctrl
&& !dct_ganging_enabled(pvt
)) ? pvt
->dbam1
2082 dcsb
= (ctrl
&& !dct_ganging_enabled(pvt
)) ?
2083 pvt
->csels
[1].csbases
:
2084 pvt
->csels
[0].csbases
;
2087 dcsb
= pvt
->csels
[1].csbases
;
2089 edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
2092 edac_printk(KERN_DEBUG
, EDAC_MC
, "DCT%d chip selects:\n", ctrl
);
2094 /* Dump memory sizes for DIMM and its CSROWs */
2095 for (dimm
= 0; dimm
< 4; dimm
++) {
2098 if (dcsb
[dimm
*2] & DCSB_CS_ENABLE
)
2100 * For F15m60h, we need multiplier for LRDIMM cs_size
2101 * calculation. We pass dimm value to the dbam_to_cs
2102 * mapper so we can find the multiplier from the
2103 * corresponding DCSM.
2105 size0
= pvt
->ops
->dbam_to_cs(pvt
, ctrl
,
2106 DBAM_DIMM(dimm
, dbam
),
2110 if (dcsb
[dimm
*2 + 1] & DCSB_CS_ENABLE
)
2111 size1
= pvt
->ops
->dbam_to_cs(pvt
, ctrl
,
2112 DBAM_DIMM(dimm
, dbam
),
2115 amd64_info(EDAC_MC
": %d: %5dMB %d: %5dMB\n",
2117 dimm
* 2 + 1, size1
);
2121 static struct amd64_family_type family_types
[] = {
2124 .f1_id
= PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP
,
2125 .f2_id
= PCI_DEVICE_ID_AMD_K8_NB_MEMCTL
,
2127 .early_channel_count
= k8_early_channel_count
,
2128 .map_sysaddr_to_csrow
= k8_map_sysaddr_to_csrow
,
2129 .dbam_to_cs
= k8_dbam_to_chip_select
,
2134 .f1_id
= PCI_DEVICE_ID_AMD_10H_NB_MAP
,
2135 .f2_id
= PCI_DEVICE_ID_AMD_10H_NB_DRAM
,
2137 .early_channel_count
= f1x_early_channel_count
,
2138 .map_sysaddr_to_csrow
= f1x_map_sysaddr_to_csrow
,
2139 .dbam_to_cs
= f10_dbam_to_chip_select
,
2144 .f1_id
= PCI_DEVICE_ID_AMD_15H_NB_F1
,
2145 .f2_id
= PCI_DEVICE_ID_AMD_15H_NB_F2
,
2147 .early_channel_count
= f1x_early_channel_count
,
2148 .map_sysaddr_to_csrow
= f1x_map_sysaddr_to_csrow
,
2149 .dbam_to_cs
= f15_dbam_to_chip_select
,
2153 .ctl_name
= "F15h_M30h",
2154 .f1_id
= PCI_DEVICE_ID_AMD_15H_M30H_NB_F1
,
2155 .f2_id
= PCI_DEVICE_ID_AMD_15H_M30H_NB_F2
,
2157 .early_channel_count
= f1x_early_channel_count
,
2158 .map_sysaddr_to_csrow
= f1x_map_sysaddr_to_csrow
,
2159 .dbam_to_cs
= f16_dbam_to_chip_select
,
2163 .ctl_name
= "F15h_M60h",
2164 .f1_id
= PCI_DEVICE_ID_AMD_15H_M60H_NB_F1
,
2165 .f2_id
= PCI_DEVICE_ID_AMD_15H_M60H_NB_F2
,
2167 .early_channel_count
= f1x_early_channel_count
,
2168 .map_sysaddr_to_csrow
= f1x_map_sysaddr_to_csrow
,
2169 .dbam_to_cs
= f15_m60h_dbam_to_chip_select
,
2174 .f1_id
= PCI_DEVICE_ID_AMD_16H_NB_F1
,
2175 .f2_id
= PCI_DEVICE_ID_AMD_16H_NB_F2
,
2177 .early_channel_count
= f1x_early_channel_count
,
2178 .map_sysaddr_to_csrow
= f1x_map_sysaddr_to_csrow
,
2179 .dbam_to_cs
= f16_dbam_to_chip_select
,
2183 .ctl_name
= "F16h_M30h",
2184 .f1_id
= PCI_DEVICE_ID_AMD_16H_M30H_NB_F1
,
2185 .f2_id
= PCI_DEVICE_ID_AMD_16H_M30H_NB_F2
,
2187 .early_channel_count
= f1x_early_channel_count
,
2188 .map_sysaddr_to_csrow
= f1x_map_sysaddr_to_csrow
,
2189 .dbam_to_cs
= f16_dbam_to_chip_select
,
2194 .f0_id
= PCI_DEVICE_ID_AMD_17H_DF_F0
,
2195 .f6_id
= PCI_DEVICE_ID_AMD_17H_DF_F6
,
2197 .early_channel_count
= f17_early_channel_count
,
2198 .dbam_to_cs
= f17_base_addr_to_cs_size
,
2204 * These are tables of eigenvectors (one per line) which can be used for the
2205 * construction of the syndrome tables. The modified syndrome search algorithm
2206 * uses those to find the symbol in error and thus the DIMM.
2208 * Algorithm courtesy of Ross LaFetra from AMD.
2210 static const u16 x4_vectors
[] = {
2211 0x2f57, 0x1afe, 0x66cc, 0xdd88,
2212 0x11eb, 0x3396, 0x7f4c, 0xeac8,
2213 0x0001, 0x0002, 0x0004, 0x0008,
2214 0x1013, 0x3032, 0x4044, 0x8088,
2215 0x106b, 0x30d6, 0x70fc, 0xe0a8,
2216 0x4857, 0xc4fe, 0x13cc, 0x3288,
2217 0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
2218 0x1f39, 0x251e, 0xbd6c, 0x6bd8,
2219 0x15c1, 0x2a42, 0x89ac, 0x4758,
2220 0x2b03, 0x1602, 0x4f0c, 0xca08,
2221 0x1f07, 0x3a0e, 0x6b04, 0xbd08,
2222 0x8ba7, 0x465e, 0x244c, 0x1cc8,
2223 0x2b87, 0x164e, 0x642c, 0xdc18,
2224 0x40b9, 0x80de, 0x1094, 0x20e8,
2225 0x27db, 0x1eb6, 0x9dac, 0x7b58,
2226 0x11c1, 0x2242, 0x84ac, 0x4c58,
2227 0x1be5, 0x2d7a, 0x5e34, 0xa718,
2228 0x4b39, 0x8d1e, 0x14b4, 0x28d8,
2229 0x4c97, 0xc87e, 0x11fc, 0x33a8,
2230 0x8e97, 0x497e, 0x2ffc, 0x1aa8,
2231 0x16b3, 0x3d62, 0x4f34, 0x8518,
2232 0x1e2f, 0x391a, 0x5cac, 0xf858,
2233 0x1d9f, 0x3b7a, 0x572c, 0xfe18,
2234 0x15f5, 0x2a5a, 0x5264, 0xa3b8,
2235 0x1dbb, 0x3b66, 0x715c, 0xe3f8,
2236 0x4397, 0xc27e, 0x17fc, 0x3ea8,
2237 0x1617, 0x3d3e, 0x6464, 0xb8b8,
2238 0x23ff, 0x12aa, 0xab6c, 0x56d8,
2239 0x2dfb, 0x1ba6, 0x913c, 0x7328,
2240 0x185d, 0x2ca6, 0x7914, 0x9e28,
2241 0x171b, 0x3e36, 0x7d7c, 0xebe8,
2242 0x4199, 0x82ee, 0x19f4, 0x2e58,
2243 0x4807, 0xc40e, 0x130c, 0x3208,
2244 0x1905, 0x2e0a, 0x5804, 0xac08,
2245 0x213f, 0x132a, 0xadfc, 0x5ba8,
2246 0x19a9, 0x2efe, 0xb5cc, 0x6f88,
2249 static const u16 x8_vectors
[] = {
2250 0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
2251 0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
2252 0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
2253 0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
2254 0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
2255 0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
2256 0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
2257 0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
2258 0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
2259 0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
2260 0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
2261 0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
2262 0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
2263 0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
2264 0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
2265 0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
2266 0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
2267 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
2268 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
2271 static int decode_syndrome(u16 syndrome
, const u16
*vectors
, unsigned num_vecs
,
2274 unsigned int i
, err_sym
;
2276 for (err_sym
= 0; err_sym
< num_vecs
/ v_dim
; err_sym
++) {
2278 unsigned v_idx
= err_sym
* v_dim
;
2279 unsigned v_end
= (err_sym
+ 1) * v_dim
;
2281 /* walk over all 16 bits of the syndrome */
2282 for (i
= 1; i
< (1U << 16); i
<<= 1) {
2284 /* if bit is set in that eigenvector... */
2285 if (v_idx
< v_end
&& vectors
[v_idx
] & i
) {
2286 u16 ev_comp
= vectors
[v_idx
++];
2288 /* ... and bit set in the modified syndrome, */
2298 /* can't get to zero, move to next symbol */
2303 edac_dbg(0, "syndrome(%x) not found\n", syndrome
);
2307 static int map_err_sym_to_channel(int err_sym
, int sym_size
)
2320 return err_sym
>> 4;
2326 /* imaginary bits not in a DIMM */
2328 WARN(1, KERN_ERR
"Invalid error symbol: 0x%x\n",
2340 return err_sym
>> 3;
2346 static int get_channel_from_ecc_syndrome(struct mem_ctl_info
*mci
, u16 syndrome
)
2348 struct amd64_pvt
*pvt
= mci
->pvt_info
;
2351 if (pvt
->ecc_sym_sz
== 8)
2352 err_sym
= decode_syndrome(syndrome
, x8_vectors
,
2353 ARRAY_SIZE(x8_vectors
),
2355 else if (pvt
->ecc_sym_sz
== 4)
2356 err_sym
= decode_syndrome(syndrome
, x4_vectors
,
2357 ARRAY_SIZE(x4_vectors
),
2360 amd64_warn("Illegal syndrome type: %u\n", pvt
->ecc_sym_sz
);
2364 return map_err_sym_to_channel(err_sym
, pvt
->ecc_sym_sz
);
2367 static void __log_ecc_error(struct mem_ctl_info
*mci
, struct err_info
*err
,
2370 enum hw_event_mc_err_type err_type
;
2374 err_type
= HW_EVENT_ERR_CORRECTED
;
2375 else if (ecc_type
== 1)
2376 err_type
= HW_EVENT_ERR_UNCORRECTED
;
2377 else if (ecc_type
== 3)
2378 err_type
= HW_EVENT_ERR_DEFERRED
;
2380 WARN(1, "Something is rotten in the state of Denmark.\n");
2384 switch (err
->err_code
) {
2389 string
= "Failed to map error addr to a node";
2392 string
= "Failed to map error addr to a csrow";
2395 string
= "Unknown syndrome - possible error reporting race";
2398 string
= "MCA_SYND not valid - unknown syndrome and csrow";
2401 string
= "Cannot decode normalized address";
2404 string
= "WTF error";
2408 edac_mc_handle_error(err_type
, mci
, 1,
2409 err
->page
, err
->offset
, err
->syndrome
,
2410 err
->csrow
, err
->channel
, -1,
2414 static inline void decode_bus_error(int node_id
, struct mce
*m
)
2416 struct mem_ctl_info
*mci
;
2417 struct amd64_pvt
*pvt
;
2418 u8 ecc_type
= (m
->status
>> 45) & 0x3;
2419 u8 xec
= XEC(m
->status
, 0x1f);
2420 u16 ec
= EC(m
->status
);
2422 struct err_info err
;
2424 mci
= edac_mc_find(node_id
);
2428 pvt
= mci
->pvt_info
;
2430 /* Bail out early if this was an 'observed' error */
2431 if (PP(ec
) == NBSL_PP_OBS
)
2434 /* Do only ECC errors */
2435 if (xec
&& xec
!= F10_NBSL_EXT_ERR_ECC
)
2438 memset(&err
, 0, sizeof(err
));
2440 sys_addr
= get_error_address(pvt
, m
);
2443 err
.syndrome
= extract_syndrome(m
->status
);
2445 pvt
->ops
->map_sysaddr_to_csrow(mci
, sys_addr
, &err
);
2447 __log_ecc_error(mci
, &err
, ecc_type
);
2451 * To find the UMC channel represented by this bank we need to match on its
2452 * instance_id. The instance_id of a bank is held in the lower 32 bits of its
2455 static int find_umc_channel(struct amd64_pvt
*pvt
, struct mce
*m
)
2457 u32 umc_instance_id
[] = {0x50f00, 0x150f00};
2458 u32 instance_id
= m
->ipid
& GENMASK(31, 0);
2459 int i
, channel
= -1;
2461 for (i
= 0; i
< ARRAY_SIZE(umc_instance_id
); i
++)
2462 if (umc_instance_id
[i
] == instance_id
)
2468 static void decode_umc_error(int node_id
, struct mce
*m
)
2470 u8 ecc_type
= (m
->status
>> 45) & 0x3;
2471 struct mem_ctl_info
*mci
;
2472 struct amd64_pvt
*pvt
;
2473 struct err_info err
;
2476 mci
= edac_mc_find(node_id
);
2480 pvt
= mci
->pvt_info
;
2482 memset(&err
, 0, sizeof(err
));
2484 if (m
->status
& MCI_STATUS_DEFERRED
)
2487 err
.channel
= find_umc_channel(pvt
, m
);
2488 if (err
.channel
< 0) {
2489 err
.err_code
= ERR_CHANNEL
;
2493 if (umc_normaddr_to_sysaddr(m
->addr
, pvt
->mc_node_id
, err
.channel
, &sys_addr
)) {
2494 err
.err_code
= ERR_NORM_ADDR
;
2498 error_address_to_page_and_offset(sys_addr
, &err
);
2500 if (!(m
->status
& MCI_STATUS_SYNDV
)) {
2501 err
.err_code
= ERR_SYND
;
2505 if (ecc_type
== 2) {
2506 u8 length
= (m
->synd
>> 18) & 0x3f;
2509 err
.syndrome
= (m
->synd
>> 32) & GENMASK(length
- 1, 0);
2511 err
.err_code
= ERR_CHANNEL
;
2514 err
.csrow
= m
->synd
& 0x7;
2517 __log_ecc_error(mci
, &err
, ecc_type
);
2521 * Use pvt->F3 which contains the F3 CPU PCI device to get the related
2522 * F1 (AddrMap) and F2 (Dct) devices. Return negative value on error.
2523 * Reserve F0 and F6 on systems with a UMC.
2526 reserve_mc_sibling_devs(struct amd64_pvt
*pvt
, u16 pci_id1
, u16 pci_id2
)
2529 pvt
->F0
= pci_get_related_function(pvt
->F3
->vendor
, pci_id1
, pvt
->F3
);
2531 amd64_err("error F0 device not found: vendor %x device 0x%x (broken BIOS?)\n",
2532 PCI_VENDOR_ID_AMD
, pci_id1
);
2536 pvt
->F6
= pci_get_related_function(pvt
->F3
->vendor
, pci_id2
, pvt
->F3
);
2538 pci_dev_put(pvt
->F0
);
2541 amd64_err("error F6 device not found: vendor %x device 0x%x (broken BIOS?)\n",
2542 PCI_VENDOR_ID_AMD
, pci_id2
);
2546 edac_dbg(1, "F0: %s\n", pci_name(pvt
->F0
));
2547 edac_dbg(1, "F3: %s\n", pci_name(pvt
->F3
));
2548 edac_dbg(1, "F6: %s\n", pci_name(pvt
->F6
));
2553 /* Reserve the ADDRESS MAP Device */
2554 pvt
->F1
= pci_get_related_function(pvt
->F3
->vendor
, pci_id1
, pvt
->F3
);
2556 amd64_err("error address map device not found: vendor %x device 0x%x (broken BIOS?)\n",
2557 PCI_VENDOR_ID_AMD
, pci_id1
);
2561 /* Reserve the DCT Device */
2562 pvt
->F2
= pci_get_related_function(pvt
->F3
->vendor
, pci_id2
, pvt
->F3
);
2564 pci_dev_put(pvt
->F1
);
2567 amd64_err("error F2 device not found: vendor %x device 0x%x (broken BIOS?)\n",
2568 PCI_VENDOR_ID_AMD
, pci_id2
);
2572 edac_dbg(1, "F1: %s\n", pci_name(pvt
->F1
));
2573 edac_dbg(1, "F2: %s\n", pci_name(pvt
->F2
));
2574 edac_dbg(1, "F3: %s\n", pci_name(pvt
->F3
));
2579 static void free_mc_sibling_devs(struct amd64_pvt
*pvt
)
2582 pci_dev_put(pvt
->F0
);
2583 pci_dev_put(pvt
->F6
);
2585 pci_dev_put(pvt
->F1
);
2586 pci_dev_put(pvt
->F2
);
2590 static void determine_ecc_sym_sz(struct amd64_pvt
*pvt
)
2592 pvt
->ecc_sym_sz
= 4;
2597 for (i
= 0; i
< NUM_UMCS
; i
++) {
2598 /* Check enabled channels only: */
2599 if ((pvt
->umc
[i
].sdp_ctrl
& UMC_SDP_INIT
) &&
2600 (pvt
->umc
[i
].ecc_ctrl
& BIT(7))) {
2601 pvt
->ecc_sym_sz
= 8;
2609 if (pvt
->fam
>= 0x10) {
2612 amd64_read_pci_cfg(pvt
->F3
, EXT_NB_MCA_CFG
, &tmp
);
2613 /* F16h has only DCT0, so no need to read dbam1. */
2614 if (pvt
->fam
!= 0x16)
2615 amd64_read_dct_pci_cfg(pvt
, 1, DBAM0
, &pvt
->dbam1
);
2617 /* F10h, revD and later can do x8 ECC too. */
2618 if ((pvt
->fam
> 0x10 || pvt
->model
> 7) && tmp
& BIT(25))
2619 pvt
->ecc_sym_sz
= 8;
2624 * Retrieve the hardware registers of the memory controller.
2626 static void __read_mc_regs_df(struct amd64_pvt
*pvt
)
2628 u8 nid
= pvt
->mc_node_id
;
2629 struct amd64_umc
*umc
;
2632 /* Read registers from each UMC */
2633 for (i
= 0; i
< NUM_UMCS
; i
++) {
2635 umc_base
= get_umc_base(i
);
2638 amd_smn_read(nid
, umc_base
+ UMCCH_DIMM_CFG
, &umc
->dimm_cfg
);
2639 amd_smn_read(nid
, umc_base
+ UMCCH_UMC_CFG
, &umc
->umc_cfg
);
2640 amd_smn_read(nid
, umc_base
+ UMCCH_SDP_CTRL
, &umc
->sdp_ctrl
);
2641 amd_smn_read(nid
, umc_base
+ UMCCH_ECC_CTRL
, &umc
->ecc_ctrl
);
2642 amd_smn_read(nid
, umc_base
+ UMCCH_UMC_CAP_HI
, &umc
->umc_cap_hi
);
2647 * Retrieve the hardware registers of the memory controller (this includes the
2648 * 'Address Map' and 'Misc' device regs)
2650 static void read_mc_regs(struct amd64_pvt
*pvt
)
2656 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
2657 * those are Read-As-Zero.
2659 rdmsrl(MSR_K8_TOP_MEM1
, pvt
->top_mem
);
2660 edac_dbg(0, " TOP_MEM: 0x%016llx\n", pvt
->top_mem
);
2662 /* Check first whether TOP_MEM2 is enabled: */
2663 rdmsrl(MSR_K8_SYSCFG
, msr_val
);
2664 if (msr_val
& BIT(21)) {
2665 rdmsrl(MSR_K8_TOP_MEM2
, pvt
->top_mem2
);
2666 edac_dbg(0, " TOP_MEM2: 0x%016llx\n", pvt
->top_mem2
);
2668 edac_dbg(0, " TOP_MEM2 disabled\n");
2672 __read_mc_regs_df(pvt
);
2673 amd64_read_pci_cfg(pvt
->F0
, DF_DHAR
, &pvt
->dhar
);
2678 amd64_read_pci_cfg(pvt
->F3
, NBCAP
, &pvt
->nbcap
);
2680 read_dram_ctl_register(pvt
);
2682 for (range
= 0; range
< DRAM_RANGES
; range
++) {
2685 /* read settings for this DRAM range */
2686 read_dram_base_limit_regs(pvt
, range
);
2688 rw
= dram_rw(pvt
, range
);
2692 edac_dbg(1, " DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
2694 get_dram_base(pvt
, range
),
2695 get_dram_limit(pvt
, range
));
2697 edac_dbg(1, " IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
2698 dram_intlv_en(pvt
, range
) ? "Enabled" : "Disabled",
2699 (rw
& 0x1) ? "R" : "-",
2700 (rw
& 0x2) ? "W" : "-",
2701 dram_intlv_sel(pvt
, range
),
2702 dram_dst_node(pvt
, range
));
2705 amd64_read_pci_cfg(pvt
->F1
, DHAR
, &pvt
->dhar
);
2706 amd64_read_dct_pci_cfg(pvt
, 0, DBAM0
, &pvt
->dbam0
);
2708 amd64_read_pci_cfg(pvt
->F3
, F10_ONLINE_SPARE
, &pvt
->online_spare
);
2710 amd64_read_dct_pci_cfg(pvt
, 0, DCLR0
, &pvt
->dclr0
);
2711 amd64_read_dct_pci_cfg(pvt
, 0, DCHR0
, &pvt
->dchr0
);
2713 if (!dct_ganging_enabled(pvt
)) {
2714 amd64_read_dct_pci_cfg(pvt
, 1, DCLR0
, &pvt
->dclr1
);
2715 amd64_read_dct_pci_cfg(pvt
, 1, DCHR0
, &pvt
->dchr1
);
2719 read_dct_base_mask(pvt
);
2721 determine_memory_type(pvt
);
2722 edac_dbg(1, " DIMM type: %s\n", edac_mem_types
[pvt
->dram_type
]);
2724 determine_ecc_sym_sz(pvt
);
2726 dump_misc_regs(pvt
);
2730 * NOTE: CPU Revision Dependent code
2733 * @csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1)
2734 * k8 private pointer to -->
2735 * DRAM Bank Address mapping register
2737 * DCL register where dual_channel_active is
2739 * The DBAM register consists of 4 sets of 4 bits each definitions:
2742 * 0-3 CSROWs 0 and 1
2743 * 4-7 CSROWs 2 and 3
2744 * 8-11 CSROWs 4 and 5
2745 * 12-15 CSROWs 6 and 7
2747 * Values range from: 0 to 15
2748 * The meaning of the values depends on CPU revision and dual-channel state,
2749 * see relevant BKDG more info.
2751 * The memory controller provides for total of only 8 CSROWs in its current
2752 * architecture. Each "pair" of CSROWs normally represents just one DIMM in
2753 * single channel or two (2) DIMMs in dual channel mode.
2755 * The following code logic collapses the various tables for CSROW based on CPU
2759 * The number of PAGE_SIZE pages on the specified CSROW number it
2763 static u32
get_csrow_nr_pages(struct amd64_pvt
*pvt
, u8 dct
, int csrow_nr
)
2765 u32 cs_mode
, nr_pages
;
2766 u32 dbam
= dct
? pvt
->dbam1
: pvt
->dbam0
;
2770 * The math on this doesn't look right on the surface because x/2*4 can
2771 * be simplified to x*2 but this expression makes use of the fact that
2772 * it is integral math where 1/2=0. This intermediate value becomes the
2773 * number of bits to shift the DBAM register to extract the proper CSROW
2776 cs_mode
= DBAM_DIMM(csrow_nr
/ 2, dbam
);
2778 nr_pages
= pvt
->ops
->dbam_to_cs(pvt
, dct
, cs_mode
, (csrow_nr
/ 2))
2779 << (20 - PAGE_SHIFT
);
2781 edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n",
2782 csrow_nr
, dct
, cs_mode
);
2783 edac_dbg(0, "nr_pages/channel: %u\n", nr_pages
);
2789 * Initialize the array of csrow attribute instances, based on the values
2790 * from pci config hardware registers.
2792 static int init_csrows(struct mem_ctl_info
*mci
)
2794 struct amd64_pvt
*pvt
= mci
->pvt_info
;
2795 enum edac_type edac_mode
= EDAC_NONE
;
2796 struct csrow_info
*csrow
;
2797 struct dimm_info
*dimm
;
2798 int i
, j
, empty
= 1;
2803 amd64_read_pci_cfg(pvt
->F3
, NBCFG
, &val
);
2807 edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
2808 pvt
->mc_node_id
, val
,
2809 !!(val
& NBCFG_CHIPKILL
), !!(val
& NBCFG_ECC_ENABLE
));
2813 * We iterate over DCT0 here but we look at DCT1 in parallel, if needed.
2815 for_each_chip_select(i
, 0, pvt
) {
2816 bool row_dct0
= !!csrow_enabled(i
, 0, pvt
);
2817 bool row_dct1
= false;
2819 if (pvt
->fam
!= 0xf)
2820 row_dct1
= !!csrow_enabled(i
, 1, pvt
);
2822 if (!row_dct0
&& !row_dct1
)
2825 csrow
= mci
->csrows
[i
];
2828 edac_dbg(1, "MC node: %d, csrow: %d\n",
2829 pvt
->mc_node_id
, i
);
2832 nr_pages
= get_csrow_nr_pages(pvt
, 0, i
);
2833 csrow
->channels
[0]->dimm
->nr_pages
= nr_pages
;
2836 /* K8 has only one DCT */
2837 if (pvt
->fam
!= 0xf && row_dct1
) {
2838 int row_dct1_pages
= get_csrow_nr_pages(pvt
, 1, i
);
2840 csrow
->channels
[1]->dimm
->nr_pages
= row_dct1_pages
;
2841 nr_pages
+= row_dct1_pages
;
2844 edac_dbg(1, "Total csrow%d pages: %u\n", i
, nr_pages
);
2846 /* Determine DIMM ECC mode: */
2848 if (mci
->edac_ctl_cap
& EDAC_FLAG_S4ECD4ED
)
2849 edac_mode
= EDAC_S4ECD4ED
;
2850 else if (mci
->edac_ctl_cap
& EDAC_FLAG_SECDED
)
2851 edac_mode
= EDAC_SECDED
;
2853 } else if (pvt
->nbcfg
& NBCFG_ECC_ENABLE
) {
2854 edac_mode
= (pvt
->nbcfg
& NBCFG_CHIPKILL
)
2859 for (j
= 0; j
< pvt
->channel_count
; j
++) {
2860 dimm
= csrow
->channels
[j
]->dimm
;
2861 dimm
->mtype
= pvt
->dram_type
;
2862 dimm
->edac_mode
= edac_mode
;
2869 /* get all cores on this DCT */
2870 static void get_cpus_on_this_dct_cpumask(struct cpumask
*mask
, u16 nid
)
2874 for_each_online_cpu(cpu
)
2875 if (amd_get_nb_id(cpu
) == nid
)
2876 cpumask_set_cpu(cpu
, mask
);
2879 /* check MCG_CTL on all the cpus on this node */
2880 static bool nb_mce_bank_enabled_on_node(u16 nid
)
2886 if (!zalloc_cpumask_var(&mask
, GFP_KERNEL
)) {
2887 amd64_warn("%s: Error allocating mask\n", __func__
);
2891 get_cpus_on_this_dct_cpumask(mask
, nid
);
2893 rdmsr_on_cpus(mask
, MSR_IA32_MCG_CTL
, msrs
);
2895 for_each_cpu(cpu
, mask
) {
2896 struct msr
*reg
= per_cpu_ptr(msrs
, cpu
);
2897 nbe
= reg
->l
& MSR_MCGCTL_NBE
;
2899 edac_dbg(0, "core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
2901 (nbe
? "enabled" : "disabled"));
2909 free_cpumask_var(mask
);
2913 static int toggle_ecc_err_reporting(struct ecc_settings
*s
, u16 nid
, bool on
)
2915 cpumask_var_t cmask
;
2918 if (!zalloc_cpumask_var(&cmask
, GFP_KERNEL
)) {
2919 amd64_warn("%s: error allocating mask\n", __func__
);
2923 get_cpus_on_this_dct_cpumask(cmask
, nid
);
2925 rdmsr_on_cpus(cmask
, MSR_IA32_MCG_CTL
, msrs
);
2927 for_each_cpu(cpu
, cmask
) {
2929 struct msr
*reg
= per_cpu_ptr(msrs
, cpu
);
2932 if (reg
->l
& MSR_MCGCTL_NBE
)
2933 s
->flags
.nb_mce_enable
= 1;
2935 reg
->l
|= MSR_MCGCTL_NBE
;
2938 * Turn off NB MCE reporting only when it was off before
2940 if (!s
->flags
.nb_mce_enable
)
2941 reg
->l
&= ~MSR_MCGCTL_NBE
;
2944 wrmsr_on_cpus(cmask
, MSR_IA32_MCG_CTL
, msrs
);
2946 free_cpumask_var(cmask
);
2951 static bool enable_ecc_error_reporting(struct ecc_settings
*s
, u16 nid
,
2955 u32 value
, mask
= 0x3; /* UECC/CECC enable */
2957 if (toggle_ecc_err_reporting(s
, nid
, ON
)) {
2958 amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
2962 amd64_read_pci_cfg(F3
, NBCTL
, &value
);
2964 s
->old_nbctl
= value
& mask
;
2965 s
->nbctl_valid
= true;
2968 amd64_write_pci_cfg(F3
, NBCTL
, value
);
2970 amd64_read_pci_cfg(F3
, NBCFG
, &value
);
2972 edac_dbg(0, "1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
2973 nid
, value
, !!(value
& NBCFG_ECC_ENABLE
));
2975 if (!(value
& NBCFG_ECC_ENABLE
)) {
2976 amd64_warn("DRAM ECC disabled on this node, enabling...\n");
2978 s
->flags
.nb_ecc_prev
= 0;
2980 /* Attempt to turn on DRAM ECC Enable */
2981 value
|= NBCFG_ECC_ENABLE
;
2982 amd64_write_pci_cfg(F3
, NBCFG
, value
);
2984 amd64_read_pci_cfg(F3
, NBCFG
, &value
);
2986 if (!(value
& NBCFG_ECC_ENABLE
)) {
2987 amd64_warn("Hardware rejected DRAM ECC enable,"
2988 "check memory DIMM configuration.\n");
2991 amd64_info("Hardware accepted DRAM ECC Enable\n");
2994 s
->flags
.nb_ecc_prev
= 1;
2997 edac_dbg(0, "2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
2998 nid
, value
, !!(value
& NBCFG_ECC_ENABLE
));
3003 static void restore_ecc_error_reporting(struct ecc_settings
*s
, u16 nid
,
3006 u32 value
, mask
= 0x3; /* UECC/CECC enable */
3008 if (!s
->nbctl_valid
)
3011 amd64_read_pci_cfg(F3
, NBCTL
, &value
);
3013 value
|= s
->old_nbctl
;
3015 amd64_write_pci_cfg(F3
, NBCTL
, value
);
3017 /* restore previous BIOS DRAM ECC "off" setting we force-enabled */
3018 if (!s
->flags
.nb_ecc_prev
) {
3019 amd64_read_pci_cfg(F3
, NBCFG
, &value
);
3020 value
&= ~NBCFG_ECC_ENABLE
;
3021 amd64_write_pci_cfg(F3
, NBCFG
, value
);
3024 /* restore the NB Enable MCGCTL bit */
3025 if (toggle_ecc_err_reporting(s
, nid
, OFF
))
3026 amd64_warn("Error restoring NB MCGCTL settings!\n");
3030 * EDAC requires that the BIOS have ECC enabled before
3031 * taking over the processing of ECC errors. A command line
3032 * option allows to force-enable hardware ECC later in
3033 * enable_ecc_error_reporting().
3035 static const char *ecc_msg
=
3036 "ECC disabled in the BIOS or no ECC capability, module will not load.\n"
3037 " Either enable ECC checking or force module loading by setting "
3038 "'ecc_enable_override'.\n"
3039 " (Note that use of the override may cause unknown side effects.)\n";
3041 static bool ecc_enabled(struct pci_dev
*F3
, u16 nid
)
3043 bool nb_mce_en
= false;
3047 if (boot_cpu_data
.x86
>= 0x17) {
3048 u8 umc_en_mask
= 0, ecc_en_mask
= 0;
3050 for (i
= 0; i
< NUM_UMCS
; i
++) {
3051 u32 base
= get_umc_base(i
);
3053 /* Only check enabled UMCs. */
3054 if (amd_smn_read(nid
, base
+ UMCCH_SDP_CTRL
, &value
))
3057 if (!(value
& UMC_SDP_INIT
))
3060 umc_en_mask
|= BIT(i
);
3062 if (amd_smn_read(nid
, base
+ UMCCH_UMC_CAP_HI
, &value
))
3065 if (value
& UMC_ECC_ENABLED
)
3066 ecc_en_mask
|= BIT(i
);
3069 /* Check whether at least one UMC is enabled: */
3071 ecc_en
= umc_en_mask
== ecc_en_mask
;
3073 /* Assume UMC MCA banks are enabled. */
3076 amd64_read_pci_cfg(F3
, NBCFG
, &value
);
3078 ecc_en
= !!(value
& NBCFG_ECC_ENABLE
);
3080 nb_mce_en
= nb_mce_bank_enabled_on_node(nid
);
3082 amd64_notice("NB MCE bank disabled, set MSR 0x%08x[4] on node %d to enable.\n",
3083 MSR_IA32_MCG_CTL
, nid
);
3086 amd64_info("DRAM ECC %s.\n", (ecc_en
? "enabled" : "disabled"));
3088 if (!ecc_en
|| !nb_mce_en
) {
3089 amd64_notice("%s", ecc_msg
);
3096 f17h_determine_edac_ctl_cap(struct mem_ctl_info
*mci
, struct amd64_pvt
*pvt
)
3098 u8 i
, ecc_en
= 1, cpk_en
= 1;
3100 for (i
= 0; i
< NUM_UMCS
; i
++) {
3101 if (pvt
->umc
[i
].sdp_ctrl
& UMC_SDP_INIT
) {
3102 ecc_en
&= !!(pvt
->umc
[i
].umc_cap_hi
& UMC_ECC_ENABLED
);
3103 cpk_en
&= !!(pvt
->umc
[i
].umc_cap_hi
& UMC_ECC_CHIPKILL_CAP
);
3107 /* Set chipkill only if ECC is enabled: */
3109 mci
->edac_ctl_cap
|= EDAC_FLAG_SECDED
;
3112 mci
->edac_ctl_cap
|= EDAC_FLAG_S4ECD4ED
;
3116 static void setup_mci_misc_attrs(struct mem_ctl_info
*mci
,
3117 struct amd64_family_type
*fam
)
3119 struct amd64_pvt
*pvt
= mci
->pvt_info
;
3121 mci
->mtype_cap
= MEM_FLAG_DDR2
| MEM_FLAG_RDDR2
;
3122 mci
->edac_ctl_cap
= EDAC_FLAG_NONE
;
3125 f17h_determine_edac_ctl_cap(mci
, pvt
);
3127 if (pvt
->nbcap
& NBCAP_SECDED
)
3128 mci
->edac_ctl_cap
|= EDAC_FLAG_SECDED
;
3130 if (pvt
->nbcap
& NBCAP_CHIPKILL
)
3131 mci
->edac_ctl_cap
|= EDAC_FLAG_S4ECD4ED
;
3134 mci
->edac_cap
= determine_edac_cap(pvt
);
3135 mci
->mod_name
= EDAC_MOD_STR
;
3136 mci
->mod_ver
= EDAC_AMD64_VERSION
;
3137 mci
->ctl_name
= fam
->ctl_name
;
3138 mci
->dev_name
= pci_name(pvt
->F3
);
3139 mci
->ctl_page_to_phys
= NULL
;
3141 /* memory scrubber interface */
3142 mci
->set_sdram_scrub_rate
= set_scrub_rate
;
3143 mci
->get_sdram_scrub_rate
= get_scrub_rate
;
3147 * returns a pointer to the family descriptor on success, NULL otherwise.
3149 static struct amd64_family_type
*per_family_init(struct amd64_pvt
*pvt
)
3151 struct amd64_family_type
*fam_type
= NULL
;
3153 pvt
->ext_model
= boot_cpu_data
.x86_model
>> 4;
3154 pvt
->stepping
= boot_cpu_data
.x86_mask
;
3155 pvt
->model
= boot_cpu_data
.x86_model
;
3156 pvt
->fam
= boot_cpu_data
.x86
;
3160 fam_type
= &family_types
[K8_CPUS
];
3161 pvt
->ops
= &family_types
[K8_CPUS
].ops
;
3165 fam_type
= &family_types
[F10_CPUS
];
3166 pvt
->ops
= &family_types
[F10_CPUS
].ops
;
3170 if (pvt
->model
== 0x30) {
3171 fam_type
= &family_types
[F15_M30H_CPUS
];
3172 pvt
->ops
= &family_types
[F15_M30H_CPUS
].ops
;
3174 } else if (pvt
->model
== 0x60) {
3175 fam_type
= &family_types
[F15_M60H_CPUS
];
3176 pvt
->ops
= &family_types
[F15_M60H_CPUS
].ops
;
3180 fam_type
= &family_types
[F15_CPUS
];
3181 pvt
->ops
= &family_types
[F15_CPUS
].ops
;
3185 if (pvt
->model
== 0x30) {
3186 fam_type
= &family_types
[F16_M30H_CPUS
];
3187 pvt
->ops
= &family_types
[F16_M30H_CPUS
].ops
;
3190 fam_type
= &family_types
[F16_CPUS
];
3191 pvt
->ops
= &family_types
[F16_CPUS
].ops
;
3195 fam_type
= &family_types
[F17_CPUS
];
3196 pvt
->ops
= &family_types
[F17_CPUS
].ops
;
3200 amd64_err("Unsupported family!\n");
3204 amd64_info("%s %sdetected (node %d).\n", fam_type
->ctl_name
,
3206 (pvt
->ext_model
>= K8_REV_F
? "revF or later "
3207 : "revE or earlier ")
3208 : ""), pvt
->mc_node_id
);
3212 static const struct attribute_group
*amd64_edac_attr_groups
[] = {
3213 #ifdef CONFIG_EDAC_DEBUG
3214 &amd64_edac_dbg_group
,
3216 #ifdef CONFIG_EDAC_AMD64_ERROR_INJECTION
3217 &amd64_edac_inj_group
,
3222 static int init_one_instance(unsigned int nid
)
3224 struct pci_dev
*F3
= node_to_amd_nb(nid
)->misc
;
3225 struct amd64_family_type
*fam_type
= NULL
;
3226 struct mem_ctl_info
*mci
= NULL
;
3227 struct edac_mc_layer layers
[2];
3228 struct amd64_pvt
*pvt
= NULL
;
3229 u16 pci_id1
, pci_id2
;
3233 pvt
= kzalloc(sizeof(struct amd64_pvt
), GFP_KERNEL
);
3237 pvt
->mc_node_id
= nid
;
3241 fam_type
= per_family_init(pvt
);
3245 if (pvt
->fam
>= 0x17) {
3246 pvt
->umc
= kcalloc(NUM_UMCS
, sizeof(struct amd64_umc
), GFP_KERNEL
);
3252 pci_id1
= fam_type
->f0_id
;
3253 pci_id2
= fam_type
->f6_id
;
3255 pci_id1
= fam_type
->f1_id
;
3256 pci_id2
= fam_type
->f2_id
;
3259 err
= reserve_mc_sibling_devs(pvt
, pci_id1
, pci_id2
);
3266 * We need to determine how many memory channels there are. Then use
3267 * that information for calculating the size of the dynamic instance
3268 * tables in the 'mci' structure.
3271 pvt
->channel_count
= pvt
->ops
->early_channel_count(pvt
);
3272 if (pvt
->channel_count
< 0)
3276 layers
[0].type
= EDAC_MC_LAYER_CHIP_SELECT
;
3277 layers
[0].size
= pvt
->csels
[0].b_cnt
;
3278 layers
[0].is_virt_csrow
= true;
3279 layers
[1].type
= EDAC_MC_LAYER_CHANNEL
;
3282 * Always allocate two channels since we can have setups with DIMMs on
3283 * only one channel. Also, this simplifies handling later for the price
3284 * of a couple of KBs tops.
3287 layers
[1].is_virt_csrow
= false;
3289 mci
= edac_mc_alloc(nid
, ARRAY_SIZE(layers
), layers
, 0);
3293 mci
->pvt_info
= pvt
;
3294 mci
->pdev
= &pvt
->F3
->dev
;
3296 setup_mci_misc_attrs(mci
, fam_type
);
3298 if (init_csrows(mci
))
3299 mci
->edac_cap
= EDAC_FLAG_NONE
;
3302 if (edac_mc_add_mc_with_groups(mci
, amd64_edac_attr_groups
)) {
3303 edac_dbg(1, "failed edac_mc_add_mc()\n");
3307 /* register stuff with EDAC MCE */
3308 if (report_gart_errors
)
3309 amd_report_gart_errors(true);
3312 amd_register_ecc_decoder(decode_umc_error
);
3314 amd_register_ecc_decoder(decode_bus_error
);
3322 free_mc_sibling_devs(pvt
);
3325 if (pvt
->fam
>= 0x17)
3335 static int probe_one_instance(unsigned int nid
)
3337 struct pci_dev
*F3
= node_to_amd_nb(nid
)->misc
;
3338 struct ecc_settings
*s
;
3342 s
= kzalloc(sizeof(struct ecc_settings
), GFP_KERNEL
);
3348 if (!ecc_enabled(F3
, nid
)) {
3351 if (!ecc_enable_override
)
3354 if (boot_cpu_data
.x86
>= 0x17) {
3355 amd64_warn("Forcing ECC on is not recommended on newer systems. Please enable ECC in BIOS.");
3358 amd64_warn("Forcing ECC on!\n");
3360 if (!enable_ecc_error_reporting(s
, nid
, F3
))
3364 ret
= init_one_instance(nid
);
3366 amd64_err("Error probing instance: %d\n", nid
);
3368 if (boot_cpu_data
.x86
< 0x17)
3369 restore_ecc_error_reporting(s
, nid
, F3
);
3376 ecc_stngs
[nid
] = NULL
;
3382 static void remove_one_instance(unsigned int nid
)
3384 struct pci_dev
*F3
= node_to_amd_nb(nid
)->misc
;
3385 struct ecc_settings
*s
= ecc_stngs
[nid
];
3386 struct mem_ctl_info
*mci
;
3387 struct amd64_pvt
*pvt
;
3389 mci
= find_mci_by_dev(&F3
->dev
);
3392 /* Remove from EDAC CORE tracking list */
3393 mci
= edac_mc_del_mc(&F3
->dev
);
3397 pvt
= mci
->pvt_info
;
3399 restore_ecc_error_reporting(s
, nid
, F3
);
3401 free_mc_sibling_devs(pvt
);
3403 /* unregister from EDAC MCE */
3404 amd_report_gart_errors(false);
3407 amd_unregister_ecc_decoder(decode_umc_error
);
3409 amd_unregister_ecc_decoder(decode_bus_error
);
3411 kfree(ecc_stngs
[nid
]);
3412 ecc_stngs
[nid
] = NULL
;
3414 /* Free the EDAC CORE resources */
3415 mci
->pvt_info
= NULL
;
3421 static void setup_pci_device(void)
3423 struct mem_ctl_info
*mci
;
3424 struct amd64_pvt
*pvt
;
3429 mci
= edac_mc_find(0);
3433 pvt
= mci
->pvt_info
;
3435 pci_ctl
= edac_pci_create_generic_ctl(&pvt
->F0
->dev
, EDAC_MOD_STR
);
3437 pci_ctl
= edac_pci_create_generic_ctl(&pvt
->F2
->dev
, EDAC_MOD_STR
);
3439 pr_warn("%s(): Unable to create PCI control\n", __func__
);
3440 pr_warn("%s(): PCI error report via EDAC not set\n", __func__
);
3444 static const struct x86_cpu_id amd64_cpuids
[] = {
3445 { X86_VENDOR_AMD
, 0xF, X86_MODEL_ANY
, X86_FEATURE_ANY
, 0 },
3446 { X86_VENDOR_AMD
, 0x10, X86_MODEL_ANY
, X86_FEATURE_ANY
, 0 },
3447 { X86_VENDOR_AMD
, 0x15, X86_MODEL_ANY
, X86_FEATURE_ANY
, 0 },
3448 { X86_VENDOR_AMD
, 0x16, X86_MODEL_ANY
, X86_FEATURE_ANY
, 0 },
3449 { X86_VENDOR_AMD
, 0x17, X86_MODEL_ANY
, X86_FEATURE_ANY
, 0 },
3452 MODULE_DEVICE_TABLE(x86cpu
, amd64_cpuids
);
3454 static int __init
amd64_edac_init(void)
3459 if (amd_cache_northbridges() < 0)
3465 ecc_stngs
= kzalloc(amd_nb_num() * sizeof(ecc_stngs
[0]), GFP_KERNEL
);
3469 msrs
= msrs_alloc();
3473 for (i
= 0; i
< amd_nb_num(); i
++)
3474 if (probe_one_instance(i
)) {
3475 /* unwind properly */
3477 remove_one_instance(i
);
3484 #ifdef CONFIG_X86_32
3485 amd64_err("%s on 32-bit is unsupported. USE AT YOUR OWN RISK!\n", EDAC_MOD_STR
);
3488 printk(KERN_INFO
"AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION
);
3504 static void __exit
amd64_edac_exit(void)
3509 edac_pci_release_generic_ctl(pci_ctl
);
3511 for (i
= 0; i
< amd_nb_num(); i
++)
3512 remove_one_instance(i
);
3521 module_init(amd64_edac_init
);
3522 module_exit(amd64_edac_exit
);
3524 MODULE_LICENSE("GPL");
3525 MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
3526 "Dave Peterson, Thayne Harbaugh");
3527 MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
3528 EDAC_AMD64_VERSION
);
3530 module_param(edac_op_state
, int, 0444);
3531 MODULE_PARM_DESC(edac_op_state
, "EDAC Error Reporting state: 0=Poll,1=NMI");