]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/edac/amd64_edac.c
Merge tag 'trace-3.13' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux...
[mirror_ubuntu-artful-kernel.git] / drivers / edac / amd64_edac.c
1 #include "amd64_edac.h"
2 #include <asm/amd_nb.h>
3
4 static struct edac_pci_ctl_info *amd64_ctl_pci;
5
6 static int report_gart_errors;
7 module_param(report_gart_errors, int, 0644);
8
9 /*
10 * Set by command line parameter. If BIOS has enabled the ECC, this override is
11 * cleared to prevent re-enabling the hardware by this driver.
12 */
13 static int ecc_enable_override;
14 module_param(ecc_enable_override, int, 0644);
15
16 static struct msr __percpu *msrs;
17
18 /*
19 * count successfully initialized driver instances for setup_pci_device()
20 */
21 static atomic_t drv_instances = ATOMIC_INIT(0);
22
23 /* Per-node driver instances */
24 static struct mem_ctl_info **mcis;
25 static struct ecc_settings **ecc_stngs;
26
27 /*
28 * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
29 * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
30 * or higher value'.
31 *
32 *FIXME: Produce a better mapping/linearisation.
33 */
34 static const struct scrubrate {
35 u32 scrubval; /* bit pattern for scrub rate */
36 u32 bandwidth; /* bandwidth consumed (bytes/sec) */
37 } scrubrates[] = {
38 { 0x01, 1600000000UL},
39 { 0x02, 800000000UL},
40 { 0x03, 400000000UL},
41 { 0x04, 200000000UL},
42 { 0x05, 100000000UL},
43 { 0x06, 50000000UL},
44 { 0x07, 25000000UL},
45 { 0x08, 12284069UL},
46 { 0x09, 6274509UL},
47 { 0x0A, 3121951UL},
48 { 0x0B, 1560975UL},
49 { 0x0C, 781440UL},
50 { 0x0D, 390720UL},
51 { 0x0E, 195300UL},
52 { 0x0F, 97650UL},
53 { 0x10, 48854UL},
54 { 0x11, 24427UL},
55 { 0x12, 12213UL},
56 { 0x13, 6101UL},
57 { 0x14, 3051UL},
58 { 0x15, 1523UL},
59 { 0x16, 761UL},
60 { 0x00, 0UL}, /* scrubbing off */
61 };
62
63 int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
64 u32 *val, const char *func)
65 {
66 int err = 0;
67
68 err = pci_read_config_dword(pdev, offset, val);
69 if (err)
70 amd64_warn("%s: error reading F%dx%03x.\n",
71 func, PCI_FUNC(pdev->devfn), offset);
72
73 return err;
74 }
75
76 int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
77 u32 val, const char *func)
78 {
79 int err = 0;
80
81 err = pci_write_config_dword(pdev, offset, val);
82 if (err)
83 amd64_warn("%s: error writing to F%dx%03x.\n",
84 func, PCI_FUNC(pdev->devfn), offset);
85
86 return err;
87 }
88
89 /*
90 *
91 * Depending on the family, F2 DCT reads need special handling:
92 *
93 * K8: has a single DCT only
94 *
95 * F10h: each DCT has its own set of regs
96 * DCT0 -> F2x040..
97 * DCT1 -> F2x140..
98 *
99 * F15h: we select which DCT we access using F1x10C[DctCfgSel]
100 *
101 * F16h: has only 1 DCT
102 */
103 static int k8_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
104 const char *func)
105 {
106 if (addr >= 0x100)
107 return -EINVAL;
108
109 return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
110 }
111
112 static int f10_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
113 const char *func)
114 {
115 return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
116 }
117
118 /*
119 * Select DCT to which PCI cfg accesses are routed
120 */
121 static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct)
122 {
123 u32 reg = 0;
124
125 amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, &reg);
126 reg &= (pvt->model >= 0x30) ? ~3 : ~1;
127 reg |= dct;
128 amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
129 }
130
131 static int f15_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
132 const char *func)
133 {
134 u8 dct = 0;
135
136 /* For F15 M30h, the second dct is DCT 3, refer to BKDG Section 2.10 */
137 if (addr >= 0x140 && addr <= 0x1a0) {
138 dct = (pvt->model >= 0x30) ? 3 : 1;
139 addr -= 0x100;
140 }
141
142 f15h_select_dct(pvt, dct);
143
144 return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
145 }
146
147 /*
148 * Memory scrubber control interface. For K8, memory scrubbing is handled by
149 * hardware and can involve L2 cache, dcache as well as the main memory. With
150 * F10, this is extended to L3 cache scrubbing on CPU models sporting that
151 * functionality.
152 *
153 * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
154 * (dram) over to cache lines. This is nasty, so we will use bandwidth in
155 * bytes/sec for the setting.
156 *
157 * Currently, we only do dram scrubbing. If the scrubbing is done in software on
158 * other archs, we might not have access to the caches directly.
159 */
160
161 /*
162 * scan the scrub rate mapping table for a close or matching bandwidth value to
163 * issue. If requested is too big, then use last maximum value found.
164 */
165 static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
166 {
167 u32 scrubval;
168 int i;
169
170 /*
171 * map the configured rate (new_bw) to a value specific to the AMD64
172 * memory controller and apply to register. Search for the first
173 * bandwidth entry that is greater or equal than the setting requested
174 * and program that. If at last entry, turn off DRAM scrubbing.
175 *
176 * If no suitable bandwidth is found, turn off DRAM scrubbing entirely
177 * by falling back to the last element in scrubrates[].
178 */
179 for (i = 0; i < ARRAY_SIZE(scrubrates) - 1; i++) {
180 /*
181 * skip scrub rates which aren't recommended
182 * (see F10 BKDG, F3x58)
183 */
184 if (scrubrates[i].scrubval < min_rate)
185 continue;
186
187 if (scrubrates[i].bandwidth <= new_bw)
188 break;
189 }
190
191 scrubval = scrubrates[i].scrubval;
192
193 pci_write_bits32(ctl, SCRCTRL, scrubval, 0x001F);
194
195 if (scrubval)
196 return scrubrates[i].bandwidth;
197
198 return 0;
199 }
200
201 static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
202 {
203 struct amd64_pvt *pvt = mci->pvt_info;
204 u32 min_scrubrate = 0x5;
205
206 if (pvt->fam == 0xf)
207 min_scrubrate = 0x0;
208
209 /* Erratum #505 */
210 if (pvt->fam == 0x15 && pvt->model < 0x10)
211 f15h_select_dct(pvt, 0);
212
213 return __amd64_set_scrub_rate(pvt->F3, bw, min_scrubrate);
214 }
215
216 static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
217 {
218 struct amd64_pvt *pvt = mci->pvt_info;
219 u32 scrubval = 0;
220 int i, retval = -EINVAL;
221
222 /* Erratum #505 */
223 if (pvt->fam == 0x15 && pvt->model < 0x10)
224 f15h_select_dct(pvt, 0);
225
226 amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
227
228 scrubval = scrubval & 0x001F;
229
230 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
231 if (scrubrates[i].scrubval == scrubval) {
232 retval = scrubrates[i].bandwidth;
233 break;
234 }
235 }
236 return retval;
237 }
238
239 /*
240 * returns true if the SysAddr given by sys_addr matches the
241 * DRAM base/limit associated with node_id
242 */
243 static bool amd64_base_limit_match(struct amd64_pvt *pvt, u64 sys_addr,
244 u8 nid)
245 {
246 u64 addr;
247
248 /* The K8 treats this as a 40-bit value. However, bits 63-40 will be
249 * all ones if the most significant implemented address bit is 1.
250 * Here we discard bits 63-40. See section 3.4.2 of AMD publication
251 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
252 * Application Programming.
253 */
254 addr = sys_addr & 0x000000ffffffffffull;
255
256 return ((addr >= get_dram_base(pvt, nid)) &&
257 (addr <= get_dram_limit(pvt, nid)));
258 }
259
260 /*
261 * Attempt to map a SysAddr to a node. On success, return a pointer to the
262 * mem_ctl_info structure for the node that the SysAddr maps to.
263 *
264 * On failure, return NULL.
265 */
266 static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
267 u64 sys_addr)
268 {
269 struct amd64_pvt *pvt;
270 u8 node_id;
271 u32 intlv_en, bits;
272
273 /*
274 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
275 * 3.4.4.2) registers to map the SysAddr to a node ID.
276 */
277 pvt = mci->pvt_info;
278
279 /*
280 * The value of this field should be the same for all DRAM Base
281 * registers. Therefore we arbitrarily choose to read it from the
282 * register for node 0.
283 */
284 intlv_en = dram_intlv_en(pvt, 0);
285
286 if (intlv_en == 0) {
287 for (node_id = 0; node_id < DRAM_RANGES; node_id++) {
288 if (amd64_base_limit_match(pvt, sys_addr, node_id))
289 goto found;
290 }
291 goto err_no_match;
292 }
293
294 if (unlikely((intlv_en != 0x01) &&
295 (intlv_en != 0x03) &&
296 (intlv_en != 0x07))) {
297 amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en);
298 return NULL;
299 }
300
301 bits = (((u32) sys_addr) >> 12) & intlv_en;
302
303 for (node_id = 0; ; ) {
304 if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits)
305 break; /* intlv_sel field matches */
306
307 if (++node_id >= DRAM_RANGES)
308 goto err_no_match;
309 }
310
311 /* sanity test for sys_addr */
312 if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) {
313 amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
314 "range for node %d with node interleaving enabled.\n",
315 __func__, sys_addr, node_id);
316 return NULL;
317 }
318
319 found:
320 return edac_mc_find((int)node_id);
321
322 err_no_match:
323 edac_dbg(2, "sys_addr 0x%lx doesn't match any node\n",
324 (unsigned long)sys_addr);
325
326 return NULL;
327 }
328
329 /*
330 * compute the CS base address of the @csrow on the DRAM controller @dct.
331 * For details see F2x[5C:40] in the processor's BKDG
332 */
333 static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
334 u64 *base, u64 *mask)
335 {
336 u64 csbase, csmask, base_bits, mask_bits;
337 u8 addr_shift;
338
339 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
340 csbase = pvt->csels[dct].csbases[csrow];
341 csmask = pvt->csels[dct].csmasks[csrow];
342 base_bits = GENMASK_ULL(31, 21) | GENMASK_ULL(15, 9);
343 mask_bits = GENMASK_ULL(29, 21) | GENMASK_ULL(15, 9);
344 addr_shift = 4;
345
346 /*
347 * F16h and F15h, models 30h and later need two addr_shift values:
348 * 8 for high and 6 for low (cf. F16h BKDG).
349 */
350 } else if (pvt->fam == 0x16 ||
351 (pvt->fam == 0x15 && pvt->model >= 0x30)) {
352 csbase = pvt->csels[dct].csbases[csrow];
353 csmask = pvt->csels[dct].csmasks[csrow >> 1];
354
355 *base = (csbase & GENMASK_ULL(15, 5)) << 6;
356 *base |= (csbase & GENMASK_ULL(30, 19)) << 8;
357
358 *mask = ~0ULL;
359 /* poke holes for the csmask */
360 *mask &= ~((GENMASK_ULL(15, 5) << 6) |
361 (GENMASK_ULL(30, 19) << 8));
362
363 *mask |= (csmask & GENMASK_ULL(15, 5)) << 6;
364 *mask |= (csmask & GENMASK_ULL(30, 19)) << 8;
365
366 return;
367 } else {
368 csbase = pvt->csels[dct].csbases[csrow];
369 csmask = pvt->csels[dct].csmasks[csrow >> 1];
370 addr_shift = 8;
371
372 if (pvt->fam == 0x15)
373 base_bits = mask_bits =
374 GENMASK_ULL(30,19) | GENMASK_ULL(13,5);
375 else
376 base_bits = mask_bits =
377 GENMASK_ULL(28,19) | GENMASK_ULL(13,5);
378 }
379
380 *base = (csbase & base_bits) << addr_shift;
381
382 *mask = ~0ULL;
383 /* poke holes for the csmask */
384 *mask &= ~(mask_bits << addr_shift);
385 /* OR them in */
386 *mask |= (csmask & mask_bits) << addr_shift;
387 }
388
389 #define for_each_chip_select(i, dct, pvt) \
390 for (i = 0; i < pvt->csels[dct].b_cnt; i++)
391
392 #define chip_select_base(i, dct, pvt) \
393 pvt->csels[dct].csbases[i]
394
395 #define for_each_chip_select_mask(i, dct, pvt) \
396 for (i = 0; i < pvt->csels[dct].m_cnt; i++)
397
398 /*
399 * @input_addr is an InputAddr associated with the node given by mci. Return the
400 * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
401 */
402 static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
403 {
404 struct amd64_pvt *pvt;
405 int csrow;
406 u64 base, mask;
407
408 pvt = mci->pvt_info;
409
410 for_each_chip_select(csrow, 0, pvt) {
411 if (!csrow_enabled(csrow, 0, pvt))
412 continue;
413
414 get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
415
416 mask = ~mask;
417
418 if ((input_addr & mask) == (base & mask)) {
419 edac_dbg(2, "InputAddr 0x%lx matches csrow %d (node %d)\n",
420 (unsigned long)input_addr, csrow,
421 pvt->mc_node_id);
422
423 return csrow;
424 }
425 }
426 edac_dbg(2, "no matching csrow for InputAddr 0x%lx (MC node %d)\n",
427 (unsigned long)input_addr, pvt->mc_node_id);
428
429 return -1;
430 }
431
432 /*
433 * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
434 * for the node represented by mci. Info is passed back in *hole_base,
435 * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if
436 * info is invalid. Info may be invalid for either of the following reasons:
437 *
438 * - The revision of the node is not E or greater. In this case, the DRAM Hole
439 * Address Register does not exist.
440 *
441 * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
442 * indicating that its contents are not valid.
443 *
444 * The values passed back in *hole_base, *hole_offset, and *hole_size are
445 * complete 32-bit values despite the fact that the bitfields in the DHAR
446 * only represent bits 31-24 of the base and offset values.
447 */
448 int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
449 u64 *hole_offset, u64 *hole_size)
450 {
451 struct amd64_pvt *pvt = mci->pvt_info;
452
453 /* only revE and later have the DRAM Hole Address Register */
454 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_E) {
455 edac_dbg(1, " revision %d for node %d does not support DHAR\n",
456 pvt->ext_model, pvt->mc_node_id);
457 return 1;
458 }
459
460 /* valid for Fam10h and above */
461 if (pvt->fam >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
462 edac_dbg(1, " Dram Memory Hoisting is DISABLED on this system\n");
463 return 1;
464 }
465
466 if (!dhar_valid(pvt)) {
467 edac_dbg(1, " Dram Memory Hoisting is DISABLED on this node %d\n",
468 pvt->mc_node_id);
469 return 1;
470 }
471
472 /* This node has Memory Hoisting */
473
474 /* +------------------+--------------------+--------------------+-----
475 * | memory | DRAM hole | relocated |
476 * | [0, (x - 1)] | [x, 0xffffffff] | addresses from |
477 * | | | DRAM hole |
478 * | | | [0x100000000, |
479 * | | | (0x100000000+ |
480 * | | | (0xffffffff-x))] |
481 * +------------------+--------------------+--------------------+-----
482 *
483 * Above is a diagram of physical memory showing the DRAM hole and the
484 * relocated addresses from the DRAM hole. As shown, the DRAM hole
485 * starts at address x (the base address) and extends through address
486 * 0xffffffff. The DRAM Hole Address Register (DHAR) relocates the
487 * addresses in the hole so that they start at 0x100000000.
488 */
489
490 *hole_base = dhar_base(pvt);
491 *hole_size = (1ULL << 32) - *hole_base;
492
493 *hole_offset = (pvt->fam > 0xf) ? f10_dhar_offset(pvt)
494 : k8_dhar_offset(pvt);
495
496 edac_dbg(1, " DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
497 pvt->mc_node_id, (unsigned long)*hole_base,
498 (unsigned long)*hole_offset, (unsigned long)*hole_size);
499
500 return 0;
501 }
502 EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
503
504 /*
505 * Return the DramAddr that the SysAddr given by @sys_addr maps to. It is
506 * assumed that sys_addr maps to the node given by mci.
507 *
508 * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
509 * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
510 * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
511 * then it is also involved in translating a SysAddr to a DramAddr. Sections
512 * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
513 * These parts of the documentation are unclear. I interpret them as follows:
514 *
515 * When node n receives a SysAddr, it processes the SysAddr as follows:
516 *
517 * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
518 * Limit registers for node n. If the SysAddr is not within the range
519 * specified by the base and limit values, then node n ignores the Sysaddr
520 * (since it does not map to node n). Otherwise continue to step 2 below.
521 *
522 * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
523 * disabled so skip to step 3 below. Otherwise see if the SysAddr is within
524 * the range of relocated addresses (starting at 0x100000000) from the DRAM
525 * hole. If not, skip to step 3 below. Else get the value of the
526 * DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
527 * offset defined by this value from the SysAddr.
528 *
529 * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
530 * Base register for node n. To obtain the DramAddr, subtract the base
531 * address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
532 */
533 static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
534 {
535 struct amd64_pvt *pvt = mci->pvt_info;
536 u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
537 int ret;
538
539 dram_base = get_dram_base(pvt, pvt->mc_node_id);
540
541 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
542 &hole_size);
543 if (!ret) {
544 if ((sys_addr >= (1ULL << 32)) &&
545 (sys_addr < ((1ULL << 32) + hole_size))) {
546 /* use DHAR to translate SysAddr to DramAddr */
547 dram_addr = sys_addr - hole_offset;
548
549 edac_dbg(2, "using DHAR to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
550 (unsigned long)sys_addr,
551 (unsigned long)dram_addr);
552
553 return dram_addr;
554 }
555 }
556
557 /*
558 * Translate the SysAddr to a DramAddr as shown near the start of
559 * section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8
560 * only deals with 40-bit values. Therefore we discard bits 63-40 of
561 * sys_addr below. If bit 39 of sys_addr is 1 then the bits we
562 * discard are all 1s. Otherwise the bits we discard are all 0s. See
563 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
564 * Programmer's Manual Volume 1 Application Programming.
565 */
566 dram_addr = (sys_addr & GENMASK_ULL(39, 0)) - dram_base;
567
568 edac_dbg(2, "using DRAM Base register to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
569 (unsigned long)sys_addr, (unsigned long)dram_addr);
570 return dram_addr;
571 }
572
573 /*
574 * @intlv_en is the value of the IntlvEn field from a DRAM Base register
575 * (section 3.4.4.1). Return the number of bits from a SysAddr that are used
576 * for node interleaving.
577 */
578 static int num_node_interleave_bits(unsigned intlv_en)
579 {
580 static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
581 int n;
582
583 BUG_ON(intlv_en > 7);
584 n = intlv_shift_table[intlv_en];
585 return n;
586 }
587
588 /* Translate the DramAddr given by @dram_addr to an InputAddr. */
589 static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
590 {
591 struct amd64_pvt *pvt;
592 int intlv_shift;
593 u64 input_addr;
594
595 pvt = mci->pvt_info;
596
597 /*
598 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
599 * concerning translating a DramAddr to an InputAddr.
600 */
601 intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
602 input_addr = ((dram_addr >> intlv_shift) & GENMASK_ULL(35, 12)) +
603 (dram_addr & 0xfff);
604
605 edac_dbg(2, " Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
606 intlv_shift, (unsigned long)dram_addr,
607 (unsigned long)input_addr);
608
609 return input_addr;
610 }
611
612 /*
613 * Translate the SysAddr represented by @sys_addr to an InputAddr. It is
614 * assumed that @sys_addr maps to the node given by mci.
615 */
616 static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
617 {
618 u64 input_addr;
619
620 input_addr =
621 dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
622
623 edac_dbg(2, "SysAdddr 0x%lx translates to InputAddr 0x%lx\n",
624 (unsigned long)sys_addr, (unsigned long)input_addr);
625
626 return input_addr;
627 }
628
629 /* Map the Error address to a PAGE and PAGE OFFSET. */
630 static inline void error_address_to_page_and_offset(u64 error_address,
631 struct err_info *err)
632 {
633 err->page = (u32) (error_address >> PAGE_SHIFT);
634 err->offset = ((u32) error_address) & ~PAGE_MASK;
635 }
636
637 /*
638 * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
639 * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
640 * of a node that detected an ECC memory error. mci represents the node that
641 * the error address maps to (possibly different from the node that detected
642 * the error). Return the number of the csrow that sys_addr maps to, or -1 on
643 * error.
644 */
645 static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
646 {
647 int csrow;
648
649 csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
650
651 if (csrow == -1)
652 amd64_mc_err(mci, "Failed to translate InputAddr to csrow for "
653 "address 0x%lx\n", (unsigned long)sys_addr);
654 return csrow;
655 }
656
657 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
658
659 /*
660 * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
661 * are ECC capable.
662 */
663 static unsigned long amd64_determine_edac_cap(struct amd64_pvt *pvt)
664 {
665 u8 bit;
666 unsigned long edac_cap = EDAC_FLAG_NONE;
667
668 bit = (pvt->fam > 0xf || pvt->ext_model >= K8_REV_F)
669 ? 19
670 : 17;
671
672 if (pvt->dclr0 & BIT(bit))
673 edac_cap = EDAC_FLAG_SECDED;
674
675 return edac_cap;
676 }
677
678 static void amd64_debug_display_dimm_sizes(struct amd64_pvt *, u8);
679
680 static void amd64_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
681 {
682 edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
683
684 edac_dbg(1, " DIMM type: %sbuffered; all DIMMs support ECC: %s\n",
685 (dclr & BIT(16)) ? "un" : "",
686 (dclr & BIT(19)) ? "yes" : "no");
687
688 edac_dbg(1, " PAR/ERR parity: %s\n",
689 (dclr & BIT(8)) ? "enabled" : "disabled");
690
691 if (pvt->fam == 0x10)
692 edac_dbg(1, " DCT 128bit mode width: %s\n",
693 (dclr & BIT(11)) ? "128b" : "64b");
694
695 edac_dbg(1, " x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
696 (dclr & BIT(12)) ? "yes" : "no",
697 (dclr & BIT(13)) ? "yes" : "no",
698 (dclr & BIT(14)) ? "yes" : "no",
699 (dclr & BIT(15)) ? "yes" : "no");
700 }
701
702 /* Display and decode various NB registers for debug purposes. */
703 static void dump_misc_regs(struct amd64_pvt *pvt)
704 {
705 edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
706
707 edac_dbg(1, " NB two channel DRAM capable: %s\n",
708 (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
709
710 edac_dbg(1, " ECC capable: %s, ChipKill ECC capable: %s\n",
711 (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
712 (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
713
714 amd64_dump_dramcfg_low(pvt, pvt->dclr0, 0);
715
716 edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
717
718 edac_dbg(1, "F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, offset: 0x%08x\n",
719 pvt->dhar, dhar_base(pvt),
720 (pvt->fam == 0xf) ? k8_dhar_offset(pvt)
721 : f10_dhar_offset(pvt));
722
723 edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
724
725 amd64_debug_display_dimm_sizes(pvt, 0);
726
727 /* everything below this point is Fam10h and above */
728 if (pvt->fam == 0xf)
729 return;
730
731 amd64_debug_display_dimm_sizes(pvt, 1);
732
733 amd64_info("using %s syndromes.\n", ((pvt->ecc_sym_sz == 8) ? "x8" : "x4"));
734
735 /* Only if NOT ganged does dclr1 have valid info */
736 if (!dct_ganging_enabled(pvt))
737 amd64_dump_dramcfg_low(pvt, pvt->dclr1, 1);
738 }
739
740 /*
741 * See BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
742 */
743 static void prep_chip_selects(struct amd64_pvt *pvt)
744 {
745 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
746 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
747 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
748 } else if (pvt->fam == 0x15 && pvt->model >= 0x30) {
749 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4;
750 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2;
751 } else {
752 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
753 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
754 }
755 }
756
757 /*
758 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
759 */
760 static void read_dct_base_mask(struct amd64_pvt *pvt)
761 {
762 int cs;
763
764 prep_chip_selects(pvt);
765
766 for_each_chip_select(cs, 0, pvt) {
767 int reg0 = DCSB0 + (cs * 4);
768 int reg1 = DCSB1 + (cs * 4);
769 u32 *base0 = &pvt->csels[0].csbases[cs];
770 u32 *base1 = &pvt->csels[1].csbases[cs];
771
772 if (!amd64_read_dct_pci_cfg(pvt, reg0, base0))
773 edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n",
774 cs, *base0, reg0);
775
776 if (pvt->fam == 0xf || dct_ganging_enabled(pvt))
777 continue;
778
779 if (!amd64_read_dct_pci_cfg(pvt, reg1, base1))
780 edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n",
781 cs, *base1, reg1);
782 }
783
784 for_each_chip_select_mask(cs, 0, pvt) {
785 int reg0 = DCSM0 + (cs * 4);
786 int reg1 = DCSM1 + (cs * 4);
787 u32 *mask0 = &pvt->csels[0].csmasks[cs];
788 u32 *mask1 = &pvt->csels[1].csmasks[cs];
789
790 if (!amd64_read_dct_pci_cfg(pvt, reg0, mask0))
791 edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n",
792 cs, *mask0, reg0);
793
794 if (pvt->fam == 0xf || dct_ganging_enabled(pvt))
795 continue;
796
797 if (!amd64_read_dct_pci_cfg(pvt, reg1, mask1))
798 edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n",
799 cs, *mask1, reg1);
800 }
801 }
802
803 static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt, int cs)
804 {
805 enum mem_type type;
806
807 /* F15h supports only DDR3 */
808 if (pvt->fam >= 0x15)
809 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
810 else if (pvt->fam == 0x10 || pvt->ext_model >= K8_REV_F) {
811 if (pvt->dchr0 & DDR3_MODE)
812 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
813 else
814 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
815 } else {
816 type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
817 }
818
819 amd64_info("CS%d: %s\n", cs, edac_mem_types[type]);
820
821 return type;
822 }
823
824 /* Get the number of DCT channels the memory controller is using. */
825 static int k8_early_channel_count(struct amd64_pvt *pvt)
826 {
827 int flag;
828
829 if (pvt->ext_model >= K8_REV_F)
830 /* RevF (NPT) and later */
831 flag = pvt->dclr0 & WIDTH_128;
832 else
833 /* RevE and earlier */
834 flag = pvt->dclr0 & REVE_WIDTH_128;
835
836 /* not used */
837 pvt->dclr1 = 0;
838
839 return (flag) ? 2 : 1;
840 }
841
842 /* On F10h and later ErrAddr is MC4_ADDR[47:1] */
843 static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m)
844 {
845 u64 addr;
846 u8 start_bit = 1;
847 u8 end_bit = 47;
848
849 if (pvt->fam == 0xf) {
850 start_bit = 3;
851 end_bit = 39;
852 }
853
854 addr = m->addr & GENMASK_ULL(end_bit, start_bit);
855
856 /*
857 * Erratum 637 workaround
858 */
859 if (pvt->fam == 0x15) {
860 struct amd64_pvt *pvt;
861 u64 cc6_base, tmp_addr;
862 u32 tmp;
863 u16 mce_nid;
864 u8 intlv_en;
865
866 if ((addr & GENMASK_ULL(47, 24)) >> 24 != 0x00fdf7)
867 return addr;
868
869 mce_nid = amd_get_nb_id(m->extcpu);
870 pvt = mcis[mce_nid]->pvt_info;
871
872 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_LIM, &tmp);
873 intlv_en = tmp >> 21 & 0x7;
874
875 /* add [47:27] + 3 trailing bits */
876 cc6_base = (tmp & GENMASK_ULL(20, 0)) << 3;
877
878 /* reverse and add DramIntlvEn */
879 cc6_base |= intlv_en ^ 0x7;
880
881 /* pin at [47:24] */
882 cc6_base <<= 24;
883
884 if (!intlv_en)
885 return cc6_base | (addr & GENMASK_ULL(23, 0));
886
887 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_BASE, &tmp);
888
889 /* faster log2 */
890 tmp_addr = (addr & GENMASK_ULL(23, 12)) << __fls(intlv_en + 1);
891
892 /* OR DramIntlvSel into bits [14:12] */
893 tmp_addr |= (tmp & GENMASK_ULL(23, 21)) >> 9;
894
895 /* add remaining [11:0] bits from original MC4_ADDR */
896 tmp_addr |= addr & GENMASK_ULL(11, 0);
897
898 return cc6_base | tmp_addr;
899 }
900
901 return addr;
902 }
903
904 static struct pci_dev *pci_get_related_function(unsigned int vendor,
905 unsigned int device,
906 struct pci_dev *related)
907 {
908 struct pci_dev *dev = NULL;
909
910 while ((dev = pci_get_device(vendor, device, dev))) {
911 if (pci_domain_nr(dev->bus) == pci_domain_nr(related->bus) &&
912 (dev->bus->number == related->bus->number) &&
913 (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
914 break;
915 }
916
917 return dev;
918 }
919
920 static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
921 {
922 struct amd_northbridge *nb;
923 struct pci_dev *f1 = NULL;
924 unsigned int pci_func;
925 int off = range << 3;
926 u32 llim;
927
928 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo);
929 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
930
931 if (pvt->fam == 0xf)
932 return;
933
934 if (!dram_rw(pvt, range))
935 return;
936
937 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi);
938 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
939
940 /* F15h: factor in CC6 save area by reading dst node's limit reg */
941 if (pvt->fam != 0x15)
942 return;
943
944 nb = node_to_amd_nb(dram_dst_node(pvt, range));
945 if (WARN_ON(!nb))
946 return;
947
948 pci_func = (pvt->model == 0x30) ? PCI_DEVICE_ID_AMD_15H_M30H_NB_F1
949 : PCI_DEVICE_ID_AMD_15H_NB_F1;
950
951 f1 = pci_get_related_function(nb->misc->vendor, pci_func, nb->misc);
952 if (WARN_ON(!f1))
953 return;
954
955 amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
956
957 pvt->ranges[range].lim.lo &= GENMASK_ULL(15, 0);
958
959 /* {[39:27],111b} */
960 pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
961
962 pvt->ranges[range].lim.hi &= GENMASK_ULL(7, 0);
963
964 /* [47:40] */
965 pvt->ranges[range].lim.hi |= llim >> 13;
966
967 pci_dev_put(f1);
968 }
969
970 static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
971 struct err_info *err)
972 {
973 struct amd64_pvt *pvt = mci->pvt_info;
974
975 error_address_to_page_and_offset(sys_addr, err);
976
977 /*
978 * Find out which node the error address belongs to. This may be
979 * different from the node that detected the error.
980 */
981 err->src_mci = find_mc_by_sys_addr(mci, sys_addr);
982 if (!err->src_mci) {
983 amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
984 (unsigned long)sys_addr);
985 err->err_code = ERR_NODE;
986 return;
987 }
988
989 /* Now map the sys_addr to a CSROW */
990 err->csrow = sys_addr_to_csrow(err->src_mci, sys_addr);
991 if (err->csrow < 0) {
992 err->err_code = ERR_CSROW;
993 return;
994 }
995
996 /* CHIPKILL enabled */
997 if (pvt->nbcfg & NBCFG_CHIPKILL) {
998 err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
999 if (err->channel < 0) {
1000 /*
1001 * Syndrome didn't map, so we don't know which of the
1002 * 2 DIMMs is in error. So we need to ID 'both' of them
1003 * as suspect.
1004 */
1005 amd64_mc_warn(err->src_mci, "unknown syndrome 0x%04x - "
1006 "possible error reporting race\n",
1007 err->syndrome);
1008 err->err_code = ERR_CHANNEL;
1009 return;
1010 }
1011 } else {
1012 /*
1013 * non-chipkill ecc mode
1014 *
1015 * The k8 documentation is unclear about how to determine the
1016 * channel number when using non-chipkill memory. This method
1017 * was obtained from email communication with someone at AMD.
1018 * (Wish the email was placed in this comment - norsk)
1019 */
1020 err->channel = ((sys_addr & BIT(3)) != 0);
1021 }
1022 }
1023
1024 static int ddr2_cs_size(unsigned i, bool dct_width)
1025 {
1026 unsigned shift = 0;
1027
1028 if (i <= 2)
1029 shift = i;
1030 else if (!(i & 0x1))
1031 shift = i >> 1;
1032 else
1033 shift = (i + 1) >> 1;
1034
1035 return 128 << (shift + !!dct_width);
1036 }
1037
1038 static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1039 unsigned cs_mode)
1040 {
1041 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1042
1043 if (pvt->ext_model >= K8_REV_F) {
1044 WARN_ON(cs_mode > 11);
1045 return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1046 }
1047 else if (pvt->ext_model >= K8_REV_D) {
1048 unsigned diff;
1049 WARN_ON(cs_mode > 10);
1050
1051 /*
1052 * the below calculation, besides trying to win an obfuscated C
1053 * contest, maps cs_mode values to DIMM chip select sizes. The
1054 * mappings are:
1055 *
1056 * cs_mode CS size (mb)
1057 * ======= ============
1058 * 0 32
1059 * 1 64
1060 * 2 128
1061 * 3 128
1062 * 4 256
1063 * 5 512
1064 * 6 256
1065 * 7 512
1066 * 8 1024
1067 * 9 1024
1068 * 10 2048
1069 *
1070 * Basically, it calculates a value with which to shift the
1071 * smallest CS size of 32MB.
1072 *
1073 * ddr[23]_cs_size have a similar purpose.
1074 */
1075 diff = cs_mode/3 + (unsigned)(cs_mode > 5);
1076
1077 return 32 << (cs_mode - diff);
1078 }
1079 else {
1080 WARN_ON(cs_mode > 6);
1081 return 32 << cs_mode;
1082 }
1083 }
1084
1085 /*
1086 * Get the number of DCT channels in use.
1087 *
1088 * Return:
1089 * number of Memory Channels in operation
1090 * Pass back:
1091 * contents of the DCL0_LOW register
1092 */
1093 static int f1x_early_channel_count(struct amd64_pvt *pvt)
1094 {
1095 int i, j, channels = 0;
1096
1097 /* On F10h, if we are in 128 bit mode, then we are using 2 channels */
1098 if (pvt->fam == 0x10 && (pvt->dclr0 & WIDTH_128))
1099 return 2;
1100
1101 /*
1102 * Need to check if in unganged mode: In such, there are 2 channels,
1103 * but they are not in 128 bit mode and thus the above 'dclr0' status
1104 * bit will be OFF.
1105 *
1106 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
1107 * their CSEnable bit on. If so, then SINGLE DIMM case.
1108 */
1109 edac_dbg(0, "Data width is not 128 bits - need more decoding\n");
1110
1111 /*
1112 * Check DRAM Bank Address Mapping values for each DIMM to see if there
1113 * is more than just one DIMM present in unganged mode. Need to check
1114 * both controllers since DIMMs can be placed in either one.
1115 */
1116 for (i = 0; i < 2; i++) {
1117 u32 dbam = (i ? pvt->dbam1 : pvt->dbam0);
1118
1119 for (j = 0; j < 4; j++) {
1120 if (DBAM_DIMM(j, dbam) > 0) {
1121 channels++;
1122 break;
1123 }
1124 }
1125 }
1126
1127 if (channels > 2)
1128 channels = 2;
1129
1130 amd64_info("MCT channel count: %d\n", channels);
1131
1132 return channels;
1133 }
1134
1135 static int ddr3_cs_size(unsigned i, bool dct_width)
1136 {
1137 unsigned shift = 0;
1138 int cs_size = 0;
1139
1140 if (i == 0 || i == 3 || i == 4)
1141 cs_size = -1;
1142 else if (i <= 2)
1143 shift = i;
1144 else if (i == 12)
1145 shift = 7;
1146 else if (!(i & 0x1))
1147 shift = i >> 1;
1148 else
1149 shift = (i + 1) >> 1;
1150
1151 if (cs_size != -1)
1152 cs_size = (128 * (1 << !!dct_width)) << shift;
1153
1154 return cs_size;
1155 }
1156
1157 static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1158 unsigned cs_mode)
1159 {
1160 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1161
1162 WARN_ON(cs_mode > 11);
1163
1164 if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
1165 return ddr3_cs_size(cs_mode, dclr & WIDTH_128);
1166 else
1167 return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1168 }
1169
1170 /*
1171 * F15h supports only 64bit DCT interfaces
1172 */
1173 static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1174 unsigned cs_mode)
1175 {
1176 WARN_ON(cs_mode > 12);
1177
1178 return ddr3_cs_size(cs_mode, false);
1179 }
1180
1181 /*
1182 * F16h and F15h model 30h have only limited cs_modes.
1183 */
1184 static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1185 unsigned cs_mode)
1186 {
1187 WARN_ON(cs_mode > 12);
1188
1189 if (cs_mode == 6 || cs_mode == 8 ||
1190 cs_mode == 9 || cs_mode == 12)
1191 return -1;
1192 else
1193 return ddr3_cs_size(cs_mode, false);
1194 }
1195
1196 static void read_dram_ctl_register(struct amd64_pvt *pvt)
1197 {
1198
1199 if (pvt->fam == 0xf)
1200 return;
1201
1202 if (!amd64_read_dct_pci_cfg(pvt, DCT_SEL_LO, &pvt->dct_sel_lo)) {
1203 edac_dbg(0, "F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
1204 pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
1205
1206 edac_dbg(0, " DCTs operate in %s mode\n",
1207 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
1208
1209 if (!dct_ganging_enabled(pvt))
1210 edac_dbg(0, " Address range split per DCT: %s\n",
1211 (dct_high_range_enabled(pvt) ? "yes" : "no"));
1212
1213 edac_dbg(0, " data interleave for ECC: %s, DRAM cleared since last warm reset: %s\n",
1214 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
1215 (dct_memory_cleared(pvt) ? "yes" : "no"));
1216
1217 edac_dbg(0, " channel interleave: %s, "
1218 "interleave bits selector: 0x%x\n",
1219 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
1220 dct_sel_interleave_addr(pvt));
1221 }
1222
1223 amd64_read_dct_pci_cfg(pvt, DCT_SEL_HI, &pvt->dct_sel_hi);
1224 }
1225
1226 /*
1227 * Determine channel (DCT) based on the interleaving mode (see F15h M30h BKDG,
1228 * 2.10.12 Memory Interleaving Modes).
1229 */
1230 static u8 f15_m30h_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1231 u8 intlv_en, int num_dcts_intlv,
1232 u32 dct_sel)
1233 {
1234 u8 channel = 0;
1235 u8 select;
1236
1237 if (!(intlv_en))
1238 return (u8)(dct_sel);
1239
1240 if (num_dcts_intlv == 2) {
1241 select = (sys_addr >> 8) & 0x3;
1242 channel = select ? 0x3 : 0;
1243 } else if (num_dcts_intlv == 4)
1244 channel = (sys_addr >> 8) & 0x7;
1245
1246 return channel;
1247 }
1248
1249 /*
1250 * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
1251 * Interleaving Modes.
1252 */
1253 static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1254 bool hi_range_sel, u8 intlv_en)
1255 {
1256 u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1;
1257
1258 if (dct_ganging_enabled(pvt))
1259 return 0;
1260
1261 if (hi_range_sel)
1262 return dct_sel_high;
1263
1264 /*
1265 * see F2x110[DctSelIntLvAddr] - channel interleave mode
1266 */
1267 if (dct_interleave_enabled(pvt)) {
1268 u8 intlv_addr = dct_sel_interleave_addr(pvt);
1269
1270 /* return DCT select function: 0=DCT0, 1=DCT1 */
1271 if (!intlv_addr)
1272 return sys_addr >> 6 & 1;
1273
1274 if (intlv_addr & 0x2) {
1275 u8 shift = intlv_addr & 0x1 ? 9 : 6;
1276 u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2;
1277
1278 return ((sys_addr >> shift) & 1) ^ temp;
1279 }
1280
1281 return (sys_addr >> (12 + hweight8(intlv_en))) & 1;
1282 }
1283
1284 if (dct_high_range_enabled(pvt))
1285 return ~dct_sel_high & 1;
1286
1287 return 0;
1288 }
1289
1290 /* Convert the sys_addr to the normalized DCT address */
1291 static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
1292 u64 sys_addr, bool hi_rng,
1293 u32 dct_sel_base_addr)
1294 {
1295 u64 chan_off;
1296 u64 dram_base = get_dram_base(pvt, range);
1297 u64 hole_off = f10_dhar_offset(pvt);
1298 u64 dct_sel_base_off = (pvt->dct_sel_hi & 0xFFFFFC00) << 16;
1299
1300 if (hi_rng) {
1301 /*
1302 * if
1303 * base address of high range is below 4Gb
1304 * (bits [47:27] at [31:11])
1305 * DRAM address space on this DCT is hoisted above 4Gb &&
1306 * sys_addr > 4Gb
1307 *
1308 * remove hole offset from sys_addr
1309 * else
1310 * remove high range offset from sys_addr
1311 */
1312 if ((!(dct_sel_base_addr >> 16) ||
1313 dct_sel_base_addr < dhar_base(pvt)) &&
1314 dhar_valid(pvt) &&
1315 (sys_addr >= BIT_64(32)))
1316 chan_off = hole_off;
1317 else
1318 chan_off = dct_sel_base_off;
1319 } else {
1320 /*
1321 * if
1322 * we have a valid hole &&
1323 * sys_addr > 4Gb
1324 *
1325 * remove hole
1326 * else
1327 * remove dram base to normalize to DCT address
1328 */
1329 if (dhar_valid(pvt) && (sys_addr >= BIT_64(32)))
1330 chan_off = hole_off;
1331 else
1332 chan_off = dram_base;
1333 }
1334
1335 return (sys_addr & GENMASK_ULL(47,6)) - (chan_off & GENMASK_ULL(47,23));
1336 }
1337
1338 /*
1339 * checks if the csrow passed in is marked as SPARED, if so returns the new
1340 * spare row
1341 */
1342 static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
1343 {
1344 int tmp_cs;
1345
1346 if (online_spare_swap_done(pvt, dct) &&
1347 csrow == online_spare_bad_dramcs(pvt, dct)) {
1348
1349 for_each_chip_select(tmp_cs, dct, pvt) {
1350 if (chip_select_base(tmp_cs, dct, pvt) & 0x2) {
1351 csrow = tmp_cs;
1352 break;
1353 }
1354 }
1355 }
1356 return csrow;
1357 }
1358
1359 /*
1360 * Iterate over the DRAM DCT "base" and "mask" registers looking for a
1361 * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
1362 *
1363 * Return:
1364 * -EINVAL: NOT FOUND
1365 * 0..csrow = Chip-Select Row
1366 */
1367 static int f1x_lookup_addr_in_dct(u64 in_addr, u8 nid, u8 dct)
1368 {
1369 struct mem_ctl_info *mci;
1370 struct amd64_pvt *pvt;
1371 u64 cs_base, cs_mask;
1372 int cs_found = -EINVAL;
1373 int csrow;
1374
1375 mci = mcis[nid];
1376 if (!mci)
1377 return cs_found;
1378
1379 pvt = mci->pvt_info;
1380
1381 edac_dbg(1, "input addr: 0x%llx, DCT: %d\n", in_addr, dct);
1382
1383 for_each_chip_select(csrow, dct, pvt) {
1384 if (!csrow_enabled(csrow, dct, pvt))
1385 continue;
1386
1387 get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
1388
1389 edac_dbg(1, " CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
1390 csrow, cs_base, cs_mask);
1391
1392 cs_mask = ~cs_mask;
1393
1394 edac_dbg(1, " (InputAddr & ~CSMask)=0x%llx (CSBase & ~CSMask)=0x%llx\n",
1395 (in_addr & cs_mask), (cs_base & cs_mask));
1396
1397 if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
1398 if (pvt->fam == 0x15 && pvt->model >= 0x30) {
1399 cs_found = csrow;
1400 break;
1401 }
1402 cs_found = f10_process_possible_spare(pvt, dct, csrow);
1403
1404 edac_dbg(1, " MATCH csrow=%d\n", cs_found);
1405 break;
1406 }
1407 }
1408 return cs_found;
1409 }
1410
1411 /*
1412 * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is
1413 * swapped with a region located at the bottom of memory so that the GPU can use
1414 * the interleaved region and thus two channels.
1415 */
1416 static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
1417 {
1418 u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr;
1419
1420 if (pvt->fam == 0x10) {
1421 /* only revC3 and revE have that feature */
1422 if (pvt->model < 4 || (pvt->model < 0xa && pvt->stepping < 3))
1423 return sys_addr;
1424 }
1425
1426 amd64_read_dct_pci_cfg(pvt, SWAP_INTLV_REG, &swap_reg);
1427
1428 if (!(swap_reg & 0x1))
1429 return sys_addr;
1430
1431 swap_base = (swap_reg >> 3) & 0x7f;
1432 swap_limit = (swap_reg >> 11) & 0x7f;
1433 rgn_size = (swap_reg >> 20) & 0x7f;
1434 tmp_addr = sys_addr >> 27;
1435
1436 if (!(sys_addr >> 34) &&
1437 (((tmp_addr >= swap_base) &&
1438 (tmp_addr <= swap_limit)) ||
1439 (tmp_addr < rgn_size)))
1440 return sys_addr ^ (u64)swap_base << 27;
1441
1442 return sys_addr;
1443 }
1444
1445 /* For a given @dram_range, check if @sys_addr falls within it. */
1446 static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
1447 u64 sys_addr, int *chan_sel)
1448 {
1449 int cs_found = -EINVAL;
1450 u64 chan_addr;
1451 u32 dct_sel_base;
1452 u8 channel;
1453 bool high_range = false;
1454
1455 u8 node_id = dram_dst_node(pvt, range);
1456 u8 intlv_en = dram_intlv_en(pvt, range);
1457 u32 intlv_sel = dram_intlv_sel(pvt, range);
1458
1459 edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1460 range, sys_addr, get_dram_limit(pvt, range));
1461
1462 if (dhar_valid(pvt) &&
1463 dhar_base(pvt) <= sys_addr &&
1464 sys_addr < BIT_64(32)) {
1465 amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
1466 sys_addr);
1467 return -EINVAL;
1468 }
1469
1470 if (intlv_en && (intlv_sel != ((sys_addr >> 12) & intlv_en)))
1471 return -EINVAL;
1472
1473 sys_addr = f1x_swap_interleaved_region(pvt, sys_addr);
1474
1475 dct_sel_base = dct_sel_baseaddr(pvt);
1476
1477 /*
1478 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
1479 * select between DCT0 and DCT1.
1480 */
1481 if (dct_high_range_enabled(pvt) &&
1482 !dct_ganging_enabled(pvt) &&
1483 ((sys_addr >> 27) >= (dct_sel_base >> 11)))
1484 high_range = true;
1485
1486 channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en);
1487
1488 chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr,
1489 high_range, dct_sel_base);
1490
1491 /* Remove node interleaving, see F1x120 */
1492 if (intlv_en)
1493 chan_addr = ((chan_addr >> (12 + hweight8(intlv_en))) << 12) |
1494 (chan_addr & 0xfff);
1495
1496 /* remove channel interleave */
1497 if (dct_interleave_enabled(pvt) &&
1498 !dct_high_range_enabled(pvt) &&
1499 !dct_ganging_enabled(pvt)) {
1500
1501 if (dct_sel_interleave_addr(pvt) != 1) {
1502 if (dct_sel_interleave_addr(pvt) == 0x3)
1503 /* hash 9 */
1504 chan_addr = ((chan_addr >> 10) << 9) |
1505 (chan_addr & 0x1ff);
1506 else
1507 /* A[6] or hash 6 */
1508 chan_addr = ((chan_addr >> 7) << 6) |
1509 (chan_addr & 0x3f);
1510 } else
1511 /* A[12] */
1512 chan_addr = ((chan_addr >> 13) << 12) |
1513 (chan_addr & 0xfff);
1514 }
1515
1516 edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr);
1517
1518 cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel);
1519
1520 if (cs_found >= 0)
1521 *chan_sel = channel;
1522
1523 return cs_found;
1524 }
1525
1526 static int f15_m30h_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
1527 u64 sys_addr, int *chan_sel)
1528 {
1529 int cs_found = -EINVAL;
1530 int num_dcts_intlv = 0;
1531 u64 chan_addr, chan_offset;
1532 u64 dct_base, dct_limit;
1533 u32 dct_cont_base_reg, dct_cont_limit_reg, tmp;
1534 u8 channel, alias_channel, leg_mmio_hole, dct_sel, dct_offset_en;
1535
1536 u64 dhar_offset = f10_dhar_offset(pvt);
1537 u8 intlv_addr = dct_sel_interleave_addr(pvt);
1538 u8 node_id = dram_dst_node(pvt, range);
1539 u8 intlv_en = dram_intlv_en(pvt, range);
1540
1541 amd64_read_pci_cfg(pvt->F1, DRAM_CONT_BASE, &dct_cont_base_reg);
1542 amd64_read_pci_cfg(pvt->F1, DRAM_CONT_LIMIT, &dct_cont_limit_reg);
1543
1544 dct_offset_en = (u8) ((dct_cont_base_reg >> 3) & BIT(0));
1545 dct_sel = (u8) ((dct_cont_base_reg >> 4) & 0x7);
1546
1547 edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1548 range, sys_addr, get_dram_limit(pvt, range));
1549
1550 if (!(get_dram_base(pvt, range) <= sys_addr) &&
1551 !(get_dram_limit(pvt, range) >= sys_addr))
1552 return -EINVAL;
1553
1554 if (dhar_valid(pvt) &&
1555 dhar_base(pvt) <= sys_addr &&
1556 sys_addr < BIT_64(32)) {
1557 amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
1558 sys_addr);
1559 return -EINVAL;
1560 }
1561
1562 /* Verify sys_addr is within DCT Range. */
1563 dct_base = (u64) dct_sel_baseaddr(pvt);
1564 dct_limit = (dct_cont_limit_reg >> 11) & 0x1FFF;
1565
1566 if (!(dct_cont_base_reg & BIT(0)) &&
1567 !(dct_base <= (sys_addr >> 27) &&
1568 dct_limit >= (sys_addr >> 27)))
1569 return -EINVAL;
1570
1571 /* Verify number of dct's that participate in channel interleaving. */
1572 num_dcts_intlv = (int) hweight8(intlv_en);
1573
1574 if (!(num_dcts_intlv % 2 == 0) || (num_dcts_intlv > 4))
1575 return -EINVAL;
1576
1577 channel = f15_m30h_determine_channel(pvt, sys_addr, intlv_en,
1578 num_dcts_intlv, dct_sel);
1579
1580 /* Verify we stay within the MAX number of channels allowed */
1581 if (channel > 4 || channel < 0)
1582 return -EINVAL;
1583
1584 leg_mmio_hole = (u8) (dct_cont_base_reg >> 1 & BIT(0));
1585
1586 /* Get normalized DCT addr */
1587 if (leg_mmio_hole && (sys_addr >= BIT_64(32)))
1588 chan_offset = dhar_offset;
1589 else
1590 chan_offset = dct_base << 27;
1591
1592 chan_addr = sys_addr - chan_offset;
1593
1594 /* remove channel interleave */
1595 if (num_dcts_intlv == 2) {
1596 if (intlv_addr == 0x4)
1597 chan_addr = ((chan_addr >> 9) << 8) |
1598 (chan_addr & 0xff);
1599 else if (intlv_addr == 0x5)
1600 chan_addr = ((chan_addr >> 10) << 9) |
1601 (chan_addr & 0x1ff);
1602 else
1603 return -EINVAL;
1604
1605 } else if (num_dcts_intlv == 4) {
1606 if (intlv_addr == 0x4)
1607 chan_addr = ((chan_addr >> 10) << 8) |
1608 (chan_addr & 0xff);
1609 else if (intlv_addr == 0x5)
1610 chan_addr = ((chan_addr >> 11) << 9) |
1611 (chan_addr & 0x1ff);
1612 else
1613 return -EINVAL;
1614 }
1615
1616 if (dct_offset_en) {
1617 amd64_read_pci_cfg(pvt->F1,
1618 DRAM_CONT_HIGH_OFF + (int) channel * 4,
1619 &tmp);
1620 chan_addr += (u64) ((tmp >> 11) & 0xfff) << 27;
1621 }
1622
1623 f15h_select_dct(pvt, channel);
1624
1625 edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr);
1626
1627 /*
1628 * Find Chip select:
1629 * if channel = 3, then alias it to 1. This is because, in F15 M30h,
1630 * there is support for 4 DCT's, but only 2 are currently functional.
1631 * They are DCT0 and DCT3. But we have read all registers of DCT3 into
1632 * pvt->csels[1]. So we need to use '1' here to get correct info.
1633 * Refer F15 M30h BKDG Section 2.10 and 2.10.3 for clarifications.
1634 */
1635 alias_channel = (channel == 3) ? 1 : channel;
1636
1637 cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, alias_channel);
1638
1639 if (cs_found >= 0)
1640 *chan_sel = alias_channel;
1641
1642 return cs_found;
1643 }
1644
1645 static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt,
1646 u64 sys_addr,
1647 int *chan_sel)
1648 {
1649 int cs_found = -EINVAL;
1650 unsigned range;
1651
1652 for (range = 0; range < DRAM_RANGES; range++) {
1653 if (!dram_rw(pvt, range))
1654 continue;
1655
1656 if (pvt->fam == 0x15 && pvt->model >= 0x30)
1657 cs_found = f15_m30h_match_to_this_node(pvt, range,
1658 sys_addr,
1659 chan_sel);
1660
1661 else if ((get_dram_base(pvt, range) <= sys_addr) &&
1662 (get_dram_limit(pvt, range) >= sys_addr)) {
1663 cs_found = f1x_match_to_this_node(pvt, range,
1664 sys_addr, chan_sel);
1665 if (cs_found >= 0)
1666 break;
1667 }
1668 }
1669 return cs_found;
1670 }
1671
1672 /*
1673 * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
1674 * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
1675 *
1676 * The @sys_addr is usually an error address received from the hardware
1677 * (MCX_ADDR).
1678 */
1679 static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1680 struct err_info *err)
1681 {
1682 struct amd64_pvt *pvt = mci->pvt_info;
1683
1684 error_address_to_page_and_offset(sys_addr, err);
1685
1686 err->csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &err->channel);
1687 if (err->csrow < 0) {
1688 err->err_code = ERR_CSROW;
1689 return;
1690 }
1691
1692 /*
1693 * We need the syndromes for channel detection only when we're
1694 * ganged. Otherwise @chan should already contain the channel at
1695 * this point.
1696 */
1697 if (dct_ganging_enabled(pvt))
1698 err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
1699 }
1700
1701 /*
1702 * debug routine to display the memory sizes of all logical DIMMs and its
1703 * CSROWs
1704 */
1705 static void amd64_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
1706 {
1707 int dimm, size0, size1;
1708 u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
1709 u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
1710
1711 if (pvt->fam == 0xf) {
1712 /* K8 families < revF not supported yet */
1713 if (pvt->ext_model < K8_REV_F)
1714 return;
1715 else
1716 WARN_ON(ctrl != 0);
1717 }
1718
1719 dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1 : pvt->dbam0;
1720 dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->csels[1].csbases
1721 : pvt->csels[0].csbases;
1722
1723 edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
1724 ctrl, dbam);
1725
1726 edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
1727
1728 /* Dump memory sizes for DIMM and its CSROWs */
1729 for (dimm = 0; dimm < 4; dimm++) {
1730
1731 size0 = 0;
1732 if (dcsb[dimm*2] & DCSB_CS_ENABLE)
1733 size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
1734 DBAM_DIMM(dimm, dbam));
1735
1736 size1 = 0;
1737 if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
1738 size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
1739 DBAM_DIMM(dimm, dbam));
1740
1741 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
1742 dimm * 2, size0,
1743 dimm * 2 + 1, size1);
1744 }
1745 }
1746
1747 static struct amd64_family_type amd64_family_types[] = {
1748 [K8_CPUS] = {
1749 .ctl_name = "K8",
1750 .f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
1751 .f3_id = PCI_DEVICE_ID_AMD_K8_NB_MISC,
1752 .ops = {
1753 .early_channel_count = k8_early_channel_count,
1754 .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
1755 .dbam_to_cs = k8_dbam_to_chip_select,
1756 .read_dct_pci_cfg = k8_read_dct_pci_cfg,
1757 }
1758 },
1759 [F10_CPUS] = {
1760 .ctl_name = "F10h",
1761 .f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
1762 .f3_id = PCI_DEVICE_ID_AMD_10H_NB_MISC,
1763 .ops = {
1764 .early_channel_count = f1x_early_channel_count,
1765 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
1766 .dbam_to_cs = f10_dbam_to_chip_select,
1767 .read_dct_pci_cfg = f10_read_dct_pci_cfg,
1768 }
1769 },
1770 [F15_CPUS] = {
1771 .ctl_name = "F15h",
1772 .f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1,
1773 .f3_id = PCI_DEVICE_ID_AMD_15H_NB_F3,
1774 .ops = {
1775 .early_channel_count = f1x_early_channel_count,
1776 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
1777 .dbam_to_cs = f15_dbam_to_chip_select,
1778 .read_dct_pci_cfg = f15_read_dct_pci_cfg,
1779 }
1780 },
1781 [F15_M30H_CPUS] = {
1782 .ctl_name = "F15h_M30h",
1783 .f1_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1,
1784 .f3_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F3,
1785 .ops = {
1786 .early_channel_count = f1x_early_channel_count,
1787 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
1788 .dbam_to_cs = f16_dbam_to_chip_select,
1789 .read_dct_pci_cfg = f15_read_dct_pci_cfg,
1790 }
1791 },
1792 [F16_CPUS] = {
1793 .ctl_name = "F16h",
1794 .f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1,
1795 .f3_id = PCI_DEVICE_ID_AMD_16H_NB_F3,
1796 .ops = {
1797 .early_channel_count = f1x_early_channel_count,
1798 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
1799 .dbam_to_cs = f16_dbam_to_chip_select,
1800 .read_dct_pci_cfg = f10_read_dct_pci_cfg,
1801 }
1802 },
1803 };
1804
1805 /*
1806 * These are tables of eigenvectors (one per line) which can be used for the
1807 * construction of the syndrome tables. The modified syndrome search algorithm
1808 * uses those to find the symbol in error and thus the DIMM.
1809 *
1810 * Algorithm courtesy of Ross LaFetra from AMD.
1811 */
1812 static const u16 x4_vectors[] = {
1813 0x2f57, 0x1afe, 0x66cc, 0xdd88,
1814 0x11eb, 0x3396, 0x7f4c, 0xeac8,
1815 0x0001, 0x0002, 0x0004, 0x0008,
1816 0x1013, 0x3032, 0x4044, 0x8088,
1817 0x106b, 0x30d6, 0x70fc, 0xe0a8,
1818 0x4857, 0xc4fe, 0x13cc, 0x3288,
1819 0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
1820 0x1f39, 0x251e, 0xbd6c, 0x6bd8,
1821 0x15c1, 0x2a42, 0x89ac, 0x4758,
1822 0x2b03, 0x1602, 0x4f0c, 0xca08,
1823 0x1f07, 0x3a0e, 0x6b04, 0xbd08,
1824 0x8ba7, 0x465e, 0x244c, 0x1cc8,
1825 0x2b87, 0x164e, 0x642c, 0xdc18,
1826 0x40b9, 0x80de, 0x1094, 0x20e8,
1827 0x27db, 0x1eb6, 0x9dac, 0x7b58,
1828 0x11c1, 0x2242, 0x84ac, 0x4c58,
1829 0x1be5, 0x2d7a, 0x5e34, 0xa718,
1830 0x4b39, 0x8d1e, 0x14b4, 0x28d8,
1831 0x4c97, 0xc87e, 0x11fc, 0x33a8,
1832 0x8e97, 0x497e, 0x2ffc, 0x1aa8,
1833 0x16b3, 0x3d62, 0x4f34, 0x8518,
1834 0x1e2f, 0x391a, 0x5cac, 0xf858,
1835 0x1d9f, 0x3b7a, 0x572c, 0xfe18,
1836 0x15f5, 0x2a5a, 0x5264, 0xa3b8,
1837 0x1dbb, 0x3b66, 0x715c, 0xe3f8,
1838 0x4397, 0xc27e, 0x17fc, 0x3ea8,
1839 0x1617, 0x3d3e, 0x6464, 0xb8b8,
1840 0x23ff, 0x12aa, 0xab6c, 0x56d8,
1841 0x2dfb, 0x1ba6, 0x913c, 0x7328,
1842 0x185d, 0x2ca6, 0x7914, 0x9e28,
1843 0x171b, 0x3e36, 0x7d7c, 0xebe8,
1844 0x4199, 0x82ee, 0x19f4, 0x2e58,
1845 0x4807, 0xc40e, 0x130c, 0x3208,
1846 0x1905, 0x2e0a, 0x5804, 0xac08,
1847 0x213f, 0x132a, 0xadfc, 0x5ba8,
1848 0x19a9, 0x2efe, 0xb5cc, 0x6f88,
1849 };
1850
1851 static const u16 x8_vectors[] = {
1852 0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
1853 0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
1854 0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
1855 0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
1856 0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
1857 0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
1858 0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
1859 0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
1860 0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
1861 0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
1862 0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
1863 0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
1864 0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
1865 0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
1866 0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
1867 0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
1868 0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
1869 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
1870 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
1871 };
1872
1873 static int decode_syndrome(u16 syndrome, const u16 *vectors, unsigned num_vecs,
1874 unsigned v_dim)
1875 {
1876 unsigned int i, err_sym;
1877
1878 for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
1879 u16 s = syndrome;
1880 unsigned v_idx = err_sym * v_dim;
1881 unsigned v_end = (err_sym + 1) * v_dim;
1882
1883 /* walk over all 16 bits of the syndrome */
1884 for (i = 1; i < (1U << 16); i <<= 1) {
1885
1886 /* if bit is set in that eigenvector... */
1887 if (v_idx < v_end && vectors[v_idx] & i) {
1888 u16 ev_comp = vectors[v_idx++];
1889
1890 /* ... and bit set in the modified syndrome, */
1891 if (s & i) {
1892 /* remove it. */
1893 s ^= ev_comp;
1894
1895 if (!s)
1896 return err_sym;
1897 }
1898
1899 } else if (s & i)
1900 /* can't get to zero, move to next symbol */
1901 break;
1902 }
1903 }
1904
1905 edac_dbg(0, "syndrome(%x) not found\n", syndrome);
1906 return -1;
1907 }
1908
1909 static int map_err_sym_to_channel(int err_sym, int sym_size)
1910 {
1911 if (sym_size == 4)
1912 switch (err_sym) {
1913 case 0x20:
1914 case 0x21:
1915 return 0;
1916 break;
1917 case 0x22:
1918 case 0x23:
1919 return 1;
1920 break;
1921 default:
1922 return err_sym >> 4;
1923 break;
1924 }
1925 /* x8 symbols */
1926 else
1927 switch (err_sym) {
1928 /* imaginary bits not in a DIMM */
1929 case 0x10:
1930 WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n",
1931 err_sym);
1932 return -1;
1933 break;
1934
1935 case 0x11:
1936 return 0;
1937 break;
1938 case 0x12:
1939 return 1;
1940 break;
1941 default:
1942 return err_sym >> 3;
1943 break;
1944 }
1945 return -1;
1946 }
1947
1948 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
1949 {
1950 struct amd64_pvt *pvt = mci->pvt_info;
1951 int err_sym = -1;
1952
1953 if (pvt->ecc_sym_sz == 8)
1954 err_sym = decode_syndrome(syndrome, x8_vectors,
1955 ARRAY_SIZE(x8_vectors),
1956 pvt->ecc_sym_sz);
1957 else if (pvt->ecc_sym_sz == 4)
1958 err_sym = decode_syndrome(syndrome, x4_vectors,
1959 ARRAY_SIZE(x4_vectors),
1960 pvt->ecc_sym_sz);
1961 else {
1962 amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz);
1963 return err_sym;
1964 }
1965
1966 return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz);
1967 }
1968
1969 static void __log_bus_error(struct mem_ctl_info *mci, struct err_info *err,
1970 u8 ecc_type)
1971 {
1972 enum hw_event_mc_err_type err_type;
1973 const char *string;
1974
1975 if (ecc_type == 2)
1976 err_type = HW_EVENT_ERR_CORRECTED;
1977 else if (ecc_type == 1)
1978 err_type = HW_EVENT_ERR_UNCORRECTED;
1979 else {
1980 WARN(1, "Something is rotten in the state of Denmark.\n");
1981 return;
1982 }
1983
1984 switch (err->err_code) {
1985 case DECODE_OK:
1986 string = "";
1987 break;
1988 case ERR_NODE:
1989 string = "Failed to map error addr to a node";
1990 break;
1991 case ERR_CSROW:
1992 string = "Failed to map error addr to a csrow";
1993 break;
1994 case ERR_CHANNEL:
1995 string = "unknown syndrome - possible error reporting race";
1996 break;
1997 default:
1998 string = "WTF error";
1999 break;
2000 }
2001
2002 edac_mc_handle_error(err_type, mci, 1,
2003 err->page, err->offset, err->syndrome,
2004 err->csrow, err->channel, -1,
2005 string, "");
2006 }
2007
2008 static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
2009 struct mce *m)
2010 {
2011 struct amd64_pvt *pvt = mci->pvt_info;
2012 u8 ecc_type = (m->status >> 45) & 0x3;
2013 u8 xec = XEC(m->status, 0x1f);
2014 u16 ec = EC(m->status);
2015 u64 sys_addr;
2016 struct err_info err;
2017
2018 /* Bail out early if this was an 'observed' error */
2019 if (PP(ec) == NBSL_PP_OBS)
2020 return;
2021
2022 /* Do only ECC errors */
2023 if (xec && xec != F10_NBSL_EXT_ERR_ECC)
2024 return;
2025
2026 memset(&err, 0, sizeof(err));
2027
2028 sys_addr = get_error_address(pvt, m);
2029
2030 if (ecc_type == 2)
2031 err.syndrome = extract_syndrome(m->status);
2032
2033 pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, &err);
2034
2035 __log_bus_error(mci, &err, ecc_type);
2036 }
2037
2038 void amd64_decode_bus_error(int node_id, struct mce *m)
2039 {
2040 __amd64_decode_bus_error(mcis[node_id], m);
2041 }
2042
2043 /*
2044 * Use pvt->F2 which contains the F2 CPU PCI device to get the related
2045 * F1 (AddrMap) and F3 (Misc) devices. Return negative value on error.
2046 */
2047 static int reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 f1_id, u16 f3_id)
2048 {
2049 /* Reserve the ADDRESS MAP Device */
2050 pvt->F1 = pci_get_related_function(pvt->F2->vendor, f1_id, pvt->F2);
2051 if (!pvt->F1) {
2052 amd64_err("error address map device not found: "
2053 "vendor %x device 0x%x (broken BIOS?)\n",
2054 PCI_VENDOR_ID_AMD, f1_id);
2055 return -ENODEV;
2056 }
2057
2058 /* Reserve the MISC Device */
2059 pvt->F3 = pci_get_related_function(pvt->F2->vendor, f3_id, pvt->F2);
2060 if (!pvt->F3) {
2061 pci_dev_put(pvt->F1);
2062 pvt->F1 = NULL;
2063
2064 amd64_err("error F3 device not found: "
2065 "vendor %x device 0x%x (broken BIOS?)\n",
2066 PCI_VENDOR_ID_AMD, f3_id);
2067
2068 return -ENODEV;
2069 }
2070 edac_dbg(1, "F1: %s\n", pci_name(pvt->F1));
2071 edac_dbg(1, "F2: %s\n", pci_name(pvt->F2));
2072 edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
2073
2074 return 0;
2075 }
2076
2077 static void free_mc_sibling_devs(struct amd64_pvt *pvt)
2078 {
2079 pci_dev_put(pvt->F1);
2080 pci_dev_put(pvt->F3);
2081 }
2082
2083 /*
2084 * Retrieve the hardware registers of the memory controller (this includes the
2085 * 'Address Map' and 'Misc' device regs)
2086 */
2087 static void read_mc_regs(struct amd64_pvt *pvt)
2088 {
2089 unsigned range;
2090 u64 msr_val;
2091 u32 tmp;
2092
2093 /*
2094 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
2095 * those are Read-As-Zero
2096 */
2097 rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
2098 edac_dbg(0, " TOP_MEM: 0x%016llx\n", pvt->top_mem);
2099
2100 /* check first whether TOP_MEM2 is enabled */
2101 rdmsrl(MSR_K8_SYSCFG, msr_val);
2102 if (msr_val & (1U << 21)) {
2103 rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
2104 edac_dbg(0, " TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
2105 } else
2106 edac_dbg(0, " TOP_MEM2 disabled\n");
2107
2108 amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
2109
2110 read_dram_ctl_register(pvt);
2111
2112 for (range = 0; range < DRAM_RANGES; range++) {
2113 u8 rw;
2114
2115 /* read settings for this DRAM range */
2116 read_dram_base_limit_regs(pvt, range);
2117
2118 rw = dram_rw(pvt, range);
2119 if (!rw)
2120 continue;
2121
2122 edac_dbg(1, " DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
2123 range,
2124 get_dram_base(pvt, range),
2125 get_dram_limit(pvt, range));
2126
2127 edac_dbg(1, " IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
2128 dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
2129 (rw & 0x1) ? "R" : "-",
2130 (rw & 0x2) ? "W" : "-",
2131 dram_intlv_sel(pvt, range),
2132 dram_dst_node(pvt, range));
2133 }
2134
2135 read_dct_base_mask(pvt);
2136
2137 amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
2138 amd64_read_dct_pci_cfg(pvt, DBAM0, &pvt->dbam0);
2139
2140 amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
2141
2142 amd64_read_dct_pci_cfg(pvt, DCLR0, &pvt->dclr0);
2143 amd64_read_dct_pci_cfg(pvt, DCHR0, &pvt->dchr0);
2144
2145 if (!dct_ganging_enabled(pvt)) {
2146 amd64_read_dct_pci_cfg(pvt, DCLR1, &pvt->dclr1);
2147 amd64_read_dct_pci_cfg(pvt, DCHR1, &pvt->dchr1);
2148 }
2149
2150 pvt->ecc_sym_sz = 4;
2151
2152 if (pvt->fam >= 0x10) {
2153 amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
2154 if (pvt->fam != 0x16)
2155 /* F16h has only DCT0 */
2156 amd64_read_dct_pci_cfg(pvt, DBAM1, &pvt->dbam1);
2157
2158 /* F10h, revD and later can do x8 ECC too */
2159 if ((pvt->fam > 0x10 || pvt->model > 7) && tmp & BIT(25))
2160 pvt->ecc_sym_sz = 8;
2161 }
2162 dump_misc_regs(pvt);
2163 }
2164
2165 /*
2166 * NOTE: CPU Revision Dependent code
2167 *
2168 * Input:
2169 * @csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1)
2170 * k8 private pointer to -->
2171 * DRAM Bank Address mapping register
2172 * node_id
2173 * DCL register where dual_channel_active is
2174 *
2175 * The DBAM register consists of 4 sets of 4 bits each definitions:
2176 *
2177 * Bits: CSROWs
2178 * 0-3 CSROWs 0 and 1
2179 * 4-7 CSROWs 2 and 3
2180 * 8-11 CSROWs 4 and 5
2181 * 12-15 CSROWs 6 and 7
2182 *
2183 * Values range from: 0 to 15
2184 * The meaning of the values depends on CPU revision and dual-channel state,
2185 * see relevant BKDG more info.
2186 *
2187 * The memory controller provides for total of only 8 CSROWs in its current
2188 * architecture. Each "pair" of CSROWs normally represents just one DIMM in
2189 * single channel or two (2) DIMMs in dual channel mode.
2190 *
2191 * The following code logic collapses the various tables for CSROW based on CPU
2192 * revision.
2193 *
2194 * Returns:
2195 * The number of PAGE_SIZE pages on the specified CSROW number it
2196 * encompasses
2197 *
2198 */
2199 static u32 amd64_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
2200 {
2201 u32 cs_mode, nr_pages;
2202 u32 dbam = dct ? pvt->dbam1 : pvt->dbam0;
2203
2204
2205 /*
2206 * The math on this doesn't look right on the surface because x/2*4 can
2207 * be simplified to x*2 but this expression makes use of the fact that
2208 * it is integral math where 1/2=0. This intermediate value becomes the
2209 * number of bits to shift the DBAM register to extract the proper CSROW
2210 * field.
2211 */
2212 cs_mode = DBAM_DIMM(csrow_nr / 2, dbam);
2213
2214 nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode) << (20 - PAGE_SHIFT);
2215
2216 edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n",
2217 csrow_nr, dct, cs_mode);
2218 edac_dbg(0, "nr_pages/channel: %u\n", nr_pages);
2219
2220 return nr_pages;
2221 }
2222
2223 /*
2224 * Initialize the array of csrow attribute instances, based on the values
2225 * from pci config hardware registers.
2226 */
2227 static int init_csrows(struct mem_ctl_info *mci)
2228 {
2229 struct amd64_pvt *pvt = mci->pvt_info;
2230 struct csrow_info *csrow;
2231 struct dimm_info *dimm;
2232 enum edac_type edac_mode;
2233 enum mem_type mtype;
2234 int i, j, empty = 1;
2235 int nr_pages = 0;
2236 u32 val;
2237
2238 amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
2239
2240 pvt->nbcfg = val;
2241
2242 edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
2243 pvt->mc_node_id, val,
2244 !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
2245
2246 /*
2247 * We iterate over DCT0 here but we look at DCT1 in parallel, if needed.
2248 */
2249 for_each_chip_select(i, 0, pvt) {
2250 bool row_dct0 = !!csrow_enabled(i, 0, pvt);
2251 bool row_dct1 = false;
2252
2253 if (pvt->fam != 0xf)
2254 row_dct1 = !!csrow_enabled(i, 1, pvt);
2255
2256 if (!row_dct0 && !row_dct1)
2257 continue;
2258
2259 csrow = mci->csrows[i];
2260 empty = 0;
2261
2262 edac_dbg(1, "MC node: %d, csrow: %d\n",
2263 pvt->mc_node_id, i);
2264
2265 if (row_dct0) {
2266 nr_pages = amd64_csrow_nr_pages(pvt, 0, i);
2267 csrow->channels[0]->dimm->nr_pages = nr_pages;
2268 }
2269
2270 /* K8 has only one DCT */
2271 if (pvt->fam != 0xf && row_dct1) {
2272 int row_dct1_pages = amd64_csrow_nr_pages(pvt, 1, i);
2273
2274 csrow->channels[1]->dimm->nr_pages = row_dct1_pages;
2275 nr_pages += row_dct1_pages;
2276 }
2277
2278 mtype = amd64_determine_memory_type(pvt, i);
2279
2280 edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages);
2281
2282 /*
2283 * determine whether CHIPKILL or JUST ECC or NO ECC is operating
2284 */
2285 if (pvt->nbcfg & NBCFG_ECC_ENABLE)
2286 edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL) ?
2287 EDAC_S4ECD4ED : EDAC_SECDED;
2288 else
2289 edac_mode = EDAC_NONE;
2290
2291 for (j = 0; j < pvt->channel_count; j++) {
2292 dimm = csrow->channels[j]->dimm;
2293 dimm->mtype = mtype;
2294 dimm->edac_mode = edac_mode;
2295 }
2296 }
2297
2298 return empty;
2299 }
2300
2301 /* get all cores on this DCT */
2302 static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, u16 nid)
2303 {
2304 int cpu;
2305
2306 for_each_online_cpu(cpu)
2307 if (amd_get_nb_id(cpu) == nid)
2308 cpumask_set_cpu(cpu, mask);
2309 }
2310
2311 /* check MCG_CTL on all the cpus on this node */
2312 static bool amd64_nb_mce_bank_enabled_on_node(u16 nid)
2313 {
2314 cpumask_var_t mask;
2315 int cpu, nbe;
2316 bool ret = false;
2317
2318 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
2319 amd64_warn("%s: Error allocating mask\n", __func__);
2320 return false;
2321 }
2322
2323 get_cpus_on_this_dct_cpumask(mask, nid);
2324
2325 rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
2326
2327 for_each_cpu(cpu, mask) {
2328 struct msr *reg = per_cpu_ptr(msrs, cpu);
2329 nbe = reg->l & MSR_MCGCTL_NBE;
2330
2331 edac_dbg(0, "core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
2332 cpu, reg->q,
2333 (nbe ? "enabled" : "disabled"));
2334
2335 if (!nbe)
2336 goto out;
2337 }
2338 ret = true;
2339
2340 out:
2341 free_cpumask_var(mask);
2342 return ret;
2343 }
2344
2345 static int toggle_ecc_err_reporting(struct ecc_settings *s, u16 nid, bool on)
2346 {
2347 cpumask_var_t cmask;
2348 int cpu;
2349
2350 if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
2351 amd64_warn("%s: error allocating mask\n", __func__);
2352 return false;
2353 }
2354
2355 get_cpus_on_this_dct_cpumask(cmask, nid);
2356
2357 rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2358
2359 for_each_cpu(cpu, cmask) {
2360
2361 struct msr *reg = per_cpu_ptr(msrs, cpu);
2362
2363 if (on) {
2364 if (reg->l & MSR_MCGCTL_NBE)
2365 s->flags.nb_mce_enable = 1;
2366
2367 reg->l |= MSR_MCGCTL_NBE;
2368 } else {
2369 /*
2370 * Turn off NB MCE reporting only when it was off before
2371 */
2372 if (!s->flags.nb_mce_enable)
2373 reg->l &= ~MSR_MCGCTL_NBE;
2374 }
2375 }
2376 wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2377
2378 free_cpumask_var(cmask);
2379
2380 return 0;
2381 }
2382
2383 static bool enable_ecc_error_reporting(struct ecc_settings *s, u16 nid,
2384 struct pci_dev *F3)
2385 {
2386 bool ret = true;
2387 u32 value, mask = 0x3; /* UECC/CECC enable */
2388
2389 if (toggle_ecc_err_reporting(s, nid, ON)) {
2390 amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
2391 return false;
2392 }
2393
2394 amd64_read_pci_cfg(F3, NBCTL, &value);
2395
2396 s->old_nbctl = value & mask;
2397 s->nbctl_valid = true;
2398
2399 value |= mask;
2400 amd64_write_pci_cfg(F3, NBCTL, value);
2401
2402 amd64_read_pci_cfg(F3, NBCFG, &value);
2403
2404 edac_dbg(0, "1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
2405 nid, value, !!(value & NBCFG_ECC_ENABLE));
2406
2407 if (!(value & NBCFG_ECC_ENABLE)) {
2408 amd64_warn("DRAM ECC disabled on this node, enabling...\n");
2409
2410 s->flags.nb_ecc_prev = 0;
2411
2412 /* Attempt to turn on DRAM ECC Enable */
2413 value |= NBCFG_ECC_ENABLE;
2414 amd64_write_pci_cfg(F3, NBCFG, value);
2415
2416 amd64_read_pci_cfg(F3, NBCFG, &value);
2417
2418 if (!(value & NBCFG_ECC_ENABLE)) {
2419 amd64_warn("Hardware rejected DRAM ECC enable,"
2420 "check memory DIMM configuration.\n");
2421 ret = false;
2422 } else {
2423 amd64_info("Hardware accepted DRAM ECC Enable\n");
2424 }
2425 } else {
2426 s->flags.nb_ecc_prev = 1;
2427 }
2428
2429 edac_dbg(0, "2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
2430 nid, value, !!(value & NBCFG_ECC_ENABLE));
2431
2432 return ret;
2433 }
2434
2435 static void restore_ecc_error_reporting(struct ecc_settings *s, u16 nid,
2436 struct pci_dev *F3)
2437 {
2438 u32 value, mask = 0x3; /* UECC/CECC enable */
2439
2440
2441 if (!s->nbctl_valid)
2442 return;
2443
2444 amd64_read_pci_cfg(F3, NBCTL, &value);
2445 value &= ~mask;
2446 value |= s->old_nbctl;
2447
2448 amd64_write_pci_cfg(F3, NBCTL, value);
2449
2450 /* restore previous BIOS DRAM ECC "off" setting we force-enabled */
2451 if (!s->flags.nb_ecc_prev) {
2452 amd64_read_pci_cfg(F3, NBCFG, &value);
2453 value &= ~NBCFG_ECC_ENABLE;
2454 amd64_write_pci_cfg(F3, NBCFG, value);
2455 }
2456
2457 /* restore the NB Enable MCGCTL bit */
2458 if (toggle_ecc_err_reporting(s, nid, OFF))
2459 amd64_warn("Error restoring NB MCGCTL settings!\n");
2460 }
2461
2462 /*
2463 * EDAC requires that the BIOS have ECC enabled before
2464 * taking over the processing of ECC errors. A command line
2465 * option allows to force-enable hardware ECC later in
2466 * enable_ecc_error_reporting().
2467 */
2468 static const char *ecc_msg =
2469 "ECC disabled in the BIOS or no ECC capability, module will not load.\n"
2470 " Either enable ECC checking or force module loading by setting "
2471 "'ecc_enable_override'.\n"
2472 " (Note that use of the override may cause unknown side effects.)\n";
2473
2474 static bool ecc_enabled(struct pci_dev *F3, u16 nid)
2475 {
2476 u32 value;
2477 u8 ecc_en = 0;
2478 bool nb_mce_en = false;
2479
2480 amd64_read_pci_cfg(F3, NBCFG, &value);
2481
2482 ecc_en = !!(value & NBCFG_ECC_ENABLE);
2483 amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled"));
2484
2485 nb_mce_en = amd64_nb_mce_bank_enabled_on_node(nid);
2486 if (!nb_mce_en)
2487 amd64_notice("NB MCE bank disabled, set MSR "
2488 "0x%08x[4] on node %d to enable.\n",
2489 MSR_IA32_MCG_CTL, nid);
2490
2491 if (!ecc_en || !nb_mce_en) {
2492 amd64_notice("%s", ecc_msg);
2493 return false;
2494 }
2495 return true;
2496 }
2497
2498 static int set_mc_sysfs_attrs(struct mem_ctl_info *mci)
2499 {
2500 struct amd64_pvt *pvt = mci->pvt_info;
2501 int rc;
2502
2503 rc = amd64_create_sysfs_dbg_files(mci);
2504 if (rc < 0)
2505 return rc;
2506
2507 if (pvt->fam >= 0x10) {
2508 rc = amd64_create_sysfs_inject_files(mci);
2509 if (rc < 0)
2510 return rc;
2511 }
2512
2513 return 0;
2514 }
2515
2516 static void del_mc_sysfs_attrs(struct mem_ctl_info *mci)
2517 {
2518 struct amd64_pvt *pvt = mci->pvt_info;
2519
2520 amd64_remove_sysfs_dbg_files(mci);
2521
2522 if (pvt->fam >= 0x10)
2523 amd64_remove_sysfs_inject_files(mci);
2524 }
2525
2526 static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
2527 struct amd64_family_type *fam)
2528 {
2529 struct amd64_pvt *pvt = mci->pvt_info;
2530
2531 mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
2532 mci->edac_ctl_cap = EDAC_FLAG_NONE;
2533
2534 if (pvt->nbcap & NBCAP_SECDED)
2535 mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
2536
2537 if (pvt->nbcap & NBCAP_CHIPKILL)
2538 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
2539
2540 mci->edac_cap = amd64_determine_edac_cap(pvt);
2541 mci->mod_name = EDAC_MOD_STR;
2542 mci->mod_ver = EDAC_AMD64_VERSION;
2543 mci->ctl_name = fam->ctl_name;
2544 mci->dev_name = pci_name(pvt->F2);
2545 mci->ctl_page_to_phys = NULL;
2546
2547 /* memory scrubber interface */
2548 mci->set_sdram_scrub_rate = amd64_set_scrub_rate;
2549 mci->get_sdram_scrub_rate = amd64_get_scrub_rate;
2550 }
2551
2552 /*
2553 * returns a pointer to the family descriptor on success, NULL otherwise.
2554 */
2555 static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt)
2556 {
2557 struct amd64_family_type *fam_type = NULL;
2558
2559 pvt->ext_model = boot_cpu_data.x86_model >> 4;
2560 pvt->stepping = boot_cpu_data.x86_mask;
2561 pvt->model = boot_cpu_data.x86_model;
2562 pvt->fam = boot_cpu_data.x86;
2563
2564 switch (pvt->fam) {
2565 case 0xf:
2566 fam_type = &amd64_family_types[K8_CPUS];
2567 pvt->ops = &amd64_family_types[K8_CPUS].ops;
2568 break;
2569
2570 case 0x10:
2571 fam_type = &amd64_family_types[F10_CPUS];
2572 pvt->ops = &amd64_family_types[F10_CPUS].ops;
2573 break;
2574
2575 case 0x15:
2576 if (pvt->model == 0x30) {
2577 fam_type = &amd64_family_types[F15_M30H_CPUS];
2578 pvt->ops = &amd64_family_types[F15_M30H_CPUS].ops;
2579 break;
2580 }
2581
2582 fam_type = &amd64_family_types[F15_CPUS];
2583 pvt->ops = &amd64_family_types[F15_CPUS].ops;
2584 break;
2585
2586 case 0x16:
2587 fam_type = &amd64_family_types[F16_CPUS];
2588 pvt->ops = &amd64_family_types[F16_CPUS].ops;
2589 break;
2590
2591 default:
2592 amd64_err("Unsupported family!\n");
2593 return NULL;
2594 }
2595
2596 amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name,
2597 (pvt->fam == 0xf ?
2598 (pvt->ext_model >= K8_REV_F ? "revF or later "
2599 : "revE or earlier ")
2600 : ""), pvt->mc_node_id);
2601 return fam_type;
2602 }
2603
2604 static int amd64_init_one_instance(struct pci_dev *F2)
2605 {
2606 struct amd64_pvt *pvt = NULL;
2607 struct amd64_family_type *fam_type = NULL;
2608 struct mem_ctl_info *mci = NULL;
2609 struct edac_mc_layer layers[2];
2610 int err = 0, ret;
2611 u16 nid = amd_get_node_id(F2);
2612
2613 ret = -ENOMEM;
2614 pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
2615 if (!pvt)
2616 goto err_ret;
2617
2618 pvt->mc_node_id = nid;
2619 pvt->F2 = F2;
2620
2621 ret = -EINVAL;
2622 fam_type = amd64_per_family_init(pvt);
2623 if (!fam_type)
2624 goto err_free;
2625
2626 ret = -ENODEV;
2627 err = reserve_mc_sibling_devs(pvt, fam_type->f1_id, fam_type->f3_id);
2628 if (err)
2629 goto err_free;
2630
2631 read_mc_regs(pvt);
2632
2633 /*
2634 * We need to determine how many memory channels there are. Then use
2635 * that information for calculating the size of the dynamic instance
2636 * tables in the 'mci' structure.
2637 */
2638 ret = -EINVAL;
2639 pvt->channel_count = pvt->ops->early_channel_count(pvt);
2640 if (pvt->channel_count < 0)
2641 goto err_siblings;
2642
2643 ret = -ENOMEM;
2644 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
2645 layers[0].size = pvt->csels[0].b_cnt;
2646 layers[0].is_virt_csrow = true;
2647 layers[1].type = EDAC_MC_LAYER_CHANNEL;
2648
2649 /*
2650 * Always allocate two channels since we can have setups with DIMMs on
2651 * only one channel. Also, this simplifies handling later for the price
2652 * of a couple of KBs tops.
2653 */
2654 layers[1].size = 2;
2655 layers[1].is_virt_csrow = false;
2656
2657 mci = edac_mc_alloc(nid, ARRAY_SIZE(layers), layers, 0);
2658 if (!mci)
2659 goto err_siblings;
2660
2661 mci->pvt_info = pvt;
2662 mci->pdev = &pvt->F2->dev;
2663
2664 setup_mci_misc_attrs(mci, fam_type);
2665
2666 if (init_csrows(mci))
2667 mci->edac_cap = EDAC_FLAG_NONE;
2668
2669 ret = -ENODEV;
2670 if (edac_mc_add_mc(mci)) {
2671 edac_dbg(1, "failed edac_mc_add_mc()\n");
2672 goto err_add_mc;
2673 }
2674 if (set_mc_sysfs_attrs(mci)) {
2675 edac_dbg(1, "failed edac_mc_add_mc()\n");
2676 goto err_add_sysfs;
2677 }
2678
2679 /* register stuff with EDAC MCE */
2680 if (report_gart_errors)
2681 amd_report_gart_errors(true);
2682
2683 amd_register_ecc_decoder(amd64_decode_bus_error);
2684
2685 mcis[nid] = mci;
2686
2687 atomic_inc(&drv_instances);
2688
2689 return 0;
2690
2691 err_add_sysfs:
2692 edac_mc_del_mc(mci->pdev);
2693 err_add_mc:
2694 edac_mc_free(mci);
2695
2696 err_siblings:
2697 free_mc_sibling_devs(pvt);
2698
2699 err_free:
2700 kfree(pvt);
2701
2702 err_ret:
2703 return ret;
2704 }
2705
2706 static int amd64_probe_one_instance(struct pci_dev *pdev,
2707 const struct pci_device_id *mc_type)
2708 {
2709 u16 nid = amd_get_node_id(pdev);
2710 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
2711 struct ecc_settings *s;
2712 int ret = 0;
2713
2714 ret = pci_enable_device(pdev);
2715 if (ret < 0) {
2716 edac_dbg(0, "ret=%d\n", ret);
2717 return -EIO;
2718 }
2719
2720 ret = -ENOMEM;
2721 s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL);
2722 if (!s)
2723 goto err_out;
2724
2725 ecc_stngs[nid] = s;
2726
2727 if (!ecc_enabled(F3, nid)) {
2728 ret = -ENODEV;
2729
2730 if (!ecc_enable_override)
2731 goto err_enable;
2732
2733 amd64_warn("Forcing ECC on!\n");
2734
2735 if (!enable_ecc_error_reporting(s, nid, F3))
2736 goto err_enable;
2737 }
2738
2739 ret = amd64_init_one_instance(pdev);
2740 if (ret < 0) {
2741 amd64_err("Error probing instance: %d\n", nid);
2742 restore_ecc_error_reporting(s, nid, F3);
2743 }
2744
2745 return ret;
2746
2747 err_enable:
2748 kfree(s);
2749 ecc_stngs[nid] = NULL;
2750
2751 err_out:
2752 return ret;
2753 }
2754
2755 static void amd64_remove_one_instance(struct pci_dev *pdev)
2756 {
2757 struct mem_ctl_info *mci;
2758 struct amd64_pvt *pvt;
2759 u16 nid = amd_get_node_id(pdev);
2760 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
2761 struct ecc_settings *s = ecc_stngs[nid];
2762
2763 mci = find_mci_by_dev(&pdev->dev);
2764 WARN_ON(!mci);
2765
2766 del_mc_sysfs_attrs(mci);
2767 /* Remove from EDAC CORE tracking list */
2768 mci = edac_mc_del_mc(&pdev->dev);
2769 if (!mci)
2770 return;
2771
2772 pvt = mci->pvt_info;
2773
2774 restore_ecc_error_reporting(s, nid, F3);
2775
2776 free_mc_sibling_devs(pvt);
2777
2778 /* unregister from EDAC MCE */
2779 amd_report_gart_errors(false);
2780 amd_unregister_ecc_decoder(amd64_decode_bus_error);
2781
2782 kfree(ecc_stngs[nid]);
2783 ecc_stngs[nid] = NULL;
2784
2785 /* Free the EDAC CORE resources */
2786 mci->pvt_info = NULL;
2787 mcis[nid] = NULL;
2788
2789 kfree(pvt);
2790 edac_mc_free(mci);
2791 }
2792
2793 /*
2794 * This table is part of the interface for loading drivers for PCI devices. The
2795 * PCI core identifies what devices are on a system during boot, and then
2796 * inquiry this table to see if this driver is for a given device found.
2797 */
2798 static DEFINE_PCI_DEVICE_TABLE(amd64_pci_table) = {
2799 {
2800 .vendor = PCI_VENDOR_ID_AMD,
2801 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
2802 .subvendor = PCI_ANY_ID,
2803 .subdevice = PCI_ANY_ID,
2804 .class = 0,
2805 .class_mask = 0,
2806 },
2807 {
2808 .vendor = PCI_VENDOR_ID_AMD,
2809 .device = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
2810 .subvendor = PCI_ANY_ID,
2811 .subdevice = PCI_ANY_ID,
2812 .class = 0,
2813 .class_mask = 0,
2814 },
2815 {
2816 .vendor = PCI_VENDOR_ID_AMD,
2817 .device = PCI_DEVICE_ID_AMD_15H_NB_F2,
2818 .subvendor = PCI_ANY_ID,
2819 .subdevice = PCI_ANY_ID,
2820 .class = 0,
2821 .class_mask = 0,
2822 },
2823 {
2824 .vendor = PCI_VENDOR_ID_AMD,
2825 .device = PCI_DEVICE_ID_AMD_15H_M30H_NB_F2,
2826 .subvendor = PCI_ANY_ID,
2827 .subdevice = PCI_ANY_ID,
2828 .class = 0,
2829 .class_mask = 0,
2830 },
2831 {
2832 .vendor = PCI_VENDOR_ID_AMD,
2833 .device = PCI_DEVICE_ID_AMD_16H_NB_F2,
2834 .subvendor = PCI_ANY_ID,
2835 .subdevice = PCI_ANY_ID,
2836 .class = 0,
2837 .class_mask = 0,
2838 },
2839
2840 {0, }
2841 };
2842 MODULE_DEVICE_TABLE(pci, amd64_pci_table);
2843
2844 static struct pci_driver amd64_pci_driver = {
2845 .name = EDAC_MOD_STR,
2846 .probe = amd64_probe_one_instance,
2847 .remove = amd64_remove_one_instance,
2848 .id_table = amd64_pci_table,
2849 };
2850
2851 static void setup_pci_device(void)
2852 {
2853 struct mem_ctl_info *mci;
2854 struct amd64_pvt *pvt;
2855
2856 if (amd64_ctl_pci)
2857 return;
2858
2859 mci = mcis[0];
2860 if (mci) {
2861
2862 pvt = mci->pvt_info;
2863 amd64_ctl_pci =
2864 edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
2865
2866 if (!amd64_ctl_pci) {
2867 pr_warning("%s(): Unable to create PCI control\n",
2868 __func__);
2869
2870 pr_warning("%s(): PCI error report via EDAC not set\n",
2871 __func__);
2872 }
2873 }
2874 }
2875
2876 static int __init amd64_edac_init(void)
2877 {
2878 int err = -ENODEV;
2879
2880 printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION);
2881
2882 opstate_init();
2883
2884 if (amd_cache_northbridges() < 0)
2885 goto err_ret;
2886
2887 err = -ENOMEM;
2888 mcis = kzalloc(amd_nb_num() * sizeof(mcis[0]), GFP_KERNEL);
2889 ecc_stngs = kzalloc(amd_nb_num() * sizeof(ecc_stngs[0]), GFP_KERNEL);
2890 if (!(mcis && ecc_stngs))
2891 goto err_free;
2892
2893 msrs = msrs_alloc();
2894 if (!msrs)
2895 goto err_free;
2896
2897 err = pci_register_driver(&amd64_pci_driver);
2898 if (err)
2899 goto err_pci;
2900
2901 err = -ENODEV;
2902 if (!atomic_read(&drv_instances))
2903 goto err_no_instances;
2904
2905 setup_pci_device();
2906 return 0;
2907
2908 err_no_instances:
2909 pci_unregister_driver(&amd64_pci_driver);
2910
2911 err_pci:
2912 msrs_free(msrs);
2913 msrs = NULL;
2914
2915 err_free:
2916 kfree(mcis);
2917 mcis = NULL;
2918
2919 kfree(ecc_stngs);
2920 ecc_stngs = NULL;
2921
2922 err_ret:
2923 return err;
2924 }
2925
2926 static void __exit amd64_edac_exit(void)
2927 {
2928 if (amd64_ctl_pci)
2929 edac_pci_release_generic_ctl(amd64_ctl_pci);
2930
2931 pci_unregister_driver(&amd64_pci_driver);
2932
2933 kfree(ecc_stngs);
2934 ecc_stngs = NULL;
2935
2936 kfree(mcis);
2937 mcis = NULL;
2938
2939 msrs_free(msrs);
2940 msrs = NULL;
2941 }
2942
2943 module_init(amd64_edac_init);
2944 module_exit(amd64_edac_exit);
2945
2946 MODULE_LICENSE("GPL");
2947 MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
2948 "Dave Peterson, Thayne Harbaugh");
2949 MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
2950 EDAC_AMD64_VERSION);
2951
2952 module_param(edac_op_state, int, 0444);
2953 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");