]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/edac/amd64_edac.c
Merge tag 'vfio-v3.18-rc1' of git://github.com/awilliam/linux-vfio
[mirror_ubuntu-jammy-kernel.git] / drivers / edac / amd64_edac.c
CommitLineData
2bc65418 1#include "amd64_edac.h"
23ac4ae8 2#include <asm/amd_nb.h>
2bc65418 3
d1ea71cd 4static struct edac_pci_ctl_info *pci_ctl;
2bc65418
DT
5
6static int report_gart_errors;
7module_param(report_gart_errors, int, 0644);
8
9/*
10 * Set by command line parameter. If BIOS has enabled the ECC, this override is
11 * cleared to prevent re-enabling the hardware by this driver.
12 */
13static int ecc_enable_override;
14module_param(ecc_enable_override, int, 0644);
15
a29d8b8e 16static struct msr __percpu *msrs;
50542251 17
360b7f3c
BP
18/*
19 * count successfully initialized driver instances for setup_pci_device()
20 */
21static atomic_t drv_instances = ATOMIC_INIT(0);
22
cc4d8860
BP
23/* Per-node driver instances */
24static struct mem_ctl_info **mcis;
ae7bb7c6 25static struct ecc_settings **ecc_stngs;
2bc65418 26
b70ef010
BP
27/*
28 * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
29 * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
30 * or higher value'.
31 *
32 *FIXME: Produce a better mapping/linearisation.
33 */
c7e5301a 34static const struct scrubrate {
39094443
BP
35 u32 scrubval; /* bit pattern for scrub rate */
36 u32 bandwidth; /* bandwidth consumed (bytes/sec) */
37} scrubrates[] = {
b70ef010
BP
38 { 0x01, 1600000000UL},
39 { 0x02, 800000000UL},
40 { 0x03, 400000000UL},
41 { 0x04, 200000000UL},
42 { 0x05, 100000000UL},
43 { 0x06, 50000000UL},
44 { 0x07, 25000000UL},
45 { 0x08, 12284069UL},
46 { 0x09, 6274509UL},
47 { 0x0A, 3121951UL},
48 { 0x0B, 1560975UL},
49 { 0x0C, 781440UL},
50 { 0x0D, 390720UL},
51 { 0x0E, 195300UL},
52 { 0x0F, 97650UL},
53 { 0x10, 48854UL},
54 { 0x11, 24427UL},
55 { 0x12, 12213UL},
56 { 0x13, 6101UL},
57 { 0x14, 3051UL},
58 { 0x15, 1523UL},
59 { 0x16, 761UL},
60 { 0x00, 0UL}, /* scrubbing off */
61};
62
66fed2d4
BP
63int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
64 u32 *val, const char *func)
b2b0c605
BP
65{
66 int err = 0;
67
68 err = pci_read_config_dword(pdev, offset, val);
69 if (err)
70 amd64_warn("%s: error reading F%dx%03x.\n",
71 func, PCI_FUNC(pdev->devfn), offset);
72
73 return err;
74}
75
76int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
77 u32 val, const char *func)
78{
79 int err = 0;
80
81 err = pci_write_config_dword(pdev, offset, val);
82 if (err)
83 amd64_warn("%s: error writing to F%dx%03x.\n",
84 func, PCI_FUNC(pdev->devfn), offset);
85
86 return err;
87}
88
7981a28f
AG
89/*
90 * Select DCT to which PCI cfg accesses are routed
91 */
92static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct)
93{
94 u32 reg = 0;
95
96 amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, &reg);
97 reg &= (pvt->model == 0x30) ? ~3 : ~1;
98 reg |= dct;
99 amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
100}
101
b2b0c605
BP
102/*
103 *
104 * Depending on the family, F2 DCT reads need special handling:
105 *
7981a28f 106 * K8: has a single DCT only and no address offsets >= 0x100
b2b0c605
BP
107 *
108 * F10h: each DCT has its own set of regs
109 * DCT0 -> F2x040..
110 * DCT1 -> F2x140..
111 *
94c1acf2 112 * F16h: has only 1 DCT
7981a28f
AG
113 *
114 * F15h: we select which DCT we access using F1x10C[DctCfgSel]
b2b0c605 115 */
7981a28f
AG
116static inline int amd64_read_dct_pci_cfg(struct amd64_pvt *pvt, u8 dct,
117 int offset, u32 *val)
b2b0c605 118{
7981a28f
AG
119 switch (pvt->fam) {
120 case 0xf:
121 if (dct || offset >= 0x100)
122 return -EINVAL;
123 break;
b2b0c605 124
7981a28f
AG
125 case 0x10:
126 if (dct) {
127 /*
128 * Note: If ganging is enabled, barring the regs
129 * F2x[1,0]98 and F2x[1,0]9C; reads reads to F2x1xx
130 * return 0. (cf. Section 2.8.1 F10h BKDG)
131 */
132 if (dct_ganging_enabled(pvt))
133 return 0;
b2b0c605 134
7981a28f
AG
135 offset += 0x100;
136 }
137 break;
73ba8593 138
7981a28f
AG
139 case 0x15:
140 /*
141 * F15h: F2x1xx addresses do not map explicitly to DCT1.
142 * We should select which DCT we access using F1x10C[DctCfgSel]
143 */
144 dct = (dct && pvt->model == 0x30) ? 3 : dct;
145 f15h_select_dct(pvt, dct);
146 break;
73ba8593 147
7981a28f
AG
148 case 0x16:
149 if (dct)
150 return -EINVAL;
151 break;
b2b0c605 152
7981a28f
AG
153 default:
154 break;
b2b0c605 155 }
7981a28f 156 return amd64_read_pci_cfg(pvt->F2, offset, val);
b2b0c605
BP
157}
158
2bc65418
DT
159/*
160 * Memory scrubber control interface. For K8, memory scrubbing is handled by
161 * hardware and can involve L2 cache, dcache as well as the main memory. With
162 * F10, this is extended to L3 cache scrubbing on CPU models sporting that
163 * functionality.
164 *
165 * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
166 * (dram) over to cache lines. This is nasty, so we will use bandwidth in
167 * bytes/sec for the setting.
168 *
169 * Currently, we only do dram scrubbing. If the scrubbing is done in software on
170 * other archs, we might not have access to the caches directly.
171 */
172
173/*
174 * scan the scrub rate mapping table for a close or matching bandwidth value to
175 * issue. If requested is too big, then use last maximum value found.
176 */
d1ea71cd 177static int __set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
2bc65418
DT
178{
179 u32 scrubval;
180 int i;
181
182 /*
183 * map the configured rate (new_bw) to a value specific to the AMD64
184 * memory controller and apply to register. Search for the first
185 * bandwidth entry that is greater or equal than the setting requested
186 * and program that. If at last entry, turn off DRAM scrubbing.
168bfeef
AM
187 *
188 * If no suitable bandwidth is found, turn off DRAM scrubbing entirely
189 * by falling back to the last element in scrubrates[].
2bc65418 190 */
168bfeef 191 for (i = 0; i < ARRAY_SIZE(scrubrates) - 1; i++) {
2bc65418
DT
192 /*
193 * skip scrub rates which aren't recommended
194 * (see F10 BKDG, F3x58)
195 */
395ae783 196 if (scrubrates[i].scrubval < min_rate)
2bc65418
DT
197 continue;
198
199 if (scrubrates[i].bandwidth <= new_bw)
200 break;
2bc65418
DT
201 }
202
203 scrubval = scrubrates[i].scrubval;
2bc65418 204
5980bb9c 205 pci_write_bits32(ctl, SCRCTRL, scrubval, 0x001F);
2bc65418 206
39094443
BP
207 if (scrubval)
208 return scrubrates[i].bandwidth;
209
2bc65418
DT
210 return 0;
211}
212
d1ea71cd 213static int set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
2bc65418
DT
214{
215 struct amd64_pvt *pvt = mci->pvt_info;
87b3e0e6 216 u32 min_scrubrate = 0x5;
2bc65418 217
a4b4bedc 218 if (pvt->fam == 0xf)
87b3e0e6
BP
219 min_scrubrate = 0x0;
220
3f0aba4f
BP
221 /* Erratum #505 */
222 if (pvt->fam == 0x15 && pvt->model < 0x10)
73ba8593
BP
223 f15h_select_dct(pvt, 0);
224
d1ea71cd 225 return __set_scrub_rate(pvt->F3, bw, min_scrubrate);
2bc65418
DT
226}
227
d1ea71cd 228static int get_scrub_rate(struct mem_ctl_info *mci)
2bc65418
DT
229{
230 struct amd64_pvt *pvt = mci->pvt_info;
231 u32 scrubval = 0;
39094443 232 int i, retval = -EINVAL;
2bc65418 233
3f0aba4f
BP
234 /* Erratum #505 */
235 if (pvt->fam == 0x15 && pvt->model < 0x10)
73ba8593
BP
236 f15h_select_dct(pvt, 0);
237
5980bb9c 238 amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
2bc65418
DT
239
240 scrubval = scrubval & 0x001F;
241
926311fd 242 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
2bc65418 243 if (scrubrates[i].scrubval == scrubval) {
39094443 244 retval = scrubrates[i].bandwidth;
2bc65418
DT
245 break;
246 }
247 }
39094443 248 return retval;
2bc65418
DT
249}
250
6775763a 251/*
7f19bf75
BP
252 * returns true if the SysAddr given by sys_addr matches the
253 * DRAM base/limit associated with node_id
6775763a 254 */
d1ea71cd 255static bool base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, u8 nid)
6775763a 256{
7f19bf75 257 u64 addr;
6775763a
DT
258
259 /* The K8 treats this as a 40-bit value. However, bits 63-40 will be
260 * all ones if the most significant implemented address bit is 1.
261 * Here we discard bits 63-40. See section 3.4.2 of AMD publication
262 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
263 * Application Programming.
264 */
265 addr = sys_addr & 0x000000ffffffffffull;
266
7f19bf75
BP
267 return ((addr >= get_dram_base(pvt, nid)) &&
268 (addr <= get_dram_limit(pvt, nid)));
6775763a
DT
269}
270
271/*
272 * Attempt to map a SysAddr to a node. On success, return a pointer to the
273 * mem_ctl_info structure for the node that the SysAddr maps to.
274 *
275 * On failure, return NULL.
276 */
277static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
278 u64 sys_addr)
279{
280 struct amd64_pvt *pvt;
c7e5301a 281 u8 node_id;
6775763a
DT
282 u32 intlv_en, bits;
283
284 /*
285 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
286 * 3.4.4.2) registers to map the SysAddr to a node ID.
287 */
288 pvt = mci->pvt_info;
289
290 /*
291 * The value of this field should be the same for all DRAM Base
292 * registers. Therefore we arbitrarily choose to read it from the
293 * register for node 0.
294 */
7f19bf75 295 intlv_en = dram_intlv_en(pvt, 0);
6775763a
DT
296
297 if (intlv_en == 0) {
7f19bf75 298 for (node_id = 0; node_id < DRAM_RANGES; node_id++) {
d1ea71cd 299 if (base_limit_match(pvt, sys_addr, node_id))
8edc5445 300 goto found;
6775763a 301 }
8edc5445 302 goto err_no_match;
6775763a
DT
303 }
304
72f158fe
BP
305 if (unlikely((intlv_en != 0x01) &&
306 (intlv_en != 0x03) &&
307 (intlv_en != 0x07))) {
24f9a7fe 308 amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en);
6775763a
DT
309 return NULL;
310 }
311
312 bits = (((u32) sys_addr) >> 12) & intlv_en;
313
314 for (node_id = 0; ; ) {
7f19bf75 315 if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits)
6775763a
DT
316 break; /* intlv_sel field matches */
317
7f19bf75 318 if (++node_id >= DRAM_RANGES)
6775763a
DT
319 goto err_no_match;
320 }
321
322 /* sanity test for sys_addr */
d1ea71cd 323 if (unlikely(!base_limit_match(pvt, sys_addr, node_id))) {
24f9a7fe
BP
324 amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
325 "range for node %d with node interleaving enabled.\n",
326 __func__, sys_addr, node_id);
6775763a
DT
327 return NULL;
328 }
329
330found:
b487c33e 331 return edac_mc_find((int)node_id);
6775763a
DT
332
333err_no_match:
956b9ba1
JP
334 edac_dbg(2, "sys_addr 0x%lx doesn't match any node\n",
335 (unsigned long)sys_addr);
6775763a
DT
336
337 return NULL;
338}
e2ce7255
DT
339
340/*
11c75ead
BP
341 * compute the CS base address of the @csrow on the DRAM controller @dct.
342 * For details see F2x[5C:40] in the processor's BKDG
e2ce7255 343 */
11c75ead
BP
344static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
345 u64 *base, u64 *mask)
e2ce7255 346{
11c75ead
BP
347 u64 csbase, csmask, base_bits, mask_bits;
348 u8 addr_shift;
e2ce7255 349
18b94f66 350 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
11c75ead
BP
351 csbase = pvt->csels[dct].csbases[csrow];
352 csmask = pvt->csels[dct].csmasks[csrow];
10ef6b0d
CG
353 base_bits = GENMASK_ULL(31, 21) | GENMASK_ULL(15, 9);
354 mask_bits = GENMASK_ULL(29, 21) | GENMASK_ULL(15, 9);
11c75ead 355 addr_shift = 4;
94c1acf2
AG
356
357 /*
18b94f66
AG
358 * F16h and F15h, models 30h and later need two addr_shift values:
359 * 8 for high and 6 for low (cf. F16h BKDG).
360 */
361 } else if (pvt->fam == 0x16 ||
362 (pvt->fam == 0x15 && pvt->model >= 0x30)) {
94c1acf2
AG
363 csbase = pvt->csels[dct].csbases[csrow];
364 csmask = pvt->csels[dct].csmasks[csrow >> 1];
365
10ef6b0d
CG
366 *base = (csbase & GENMASK_ULL(15, 5)) << 6;
367 *base |= (csbase & GENMASK_ULL(30, 19)) << 8;
94c1acf2
AG
368
369 *mask = ~0ULL;
370 /* poke holes for the csmask */
10ef6b0d
CG
371 *mask &= ~((GENMASK_ULL(15, 5) << 6) |
372 (GENMASK_ULL(30, 19) << 8));
94c1acf2 373
10ef6b0d
CG
374 *mask |= (csmask & GENMASK_ULL(15, 5)) << 6;
375 *mask |= (csmask & GENMASK_ULL(30, 19)) << 8;
94c1acf2
AG
376
377 return;
11c75ead
BP
378 } else {
379 csbase = pvt->csels[dct].csbases[csrow];
380 csmask = pvt->csels[dct].csmasks[csrow >> 1];
381 addr_shift = 8;
e2ce7255 382
a4b4bedc 383 if (pvt->fam == 0x15)
10ef6b0d
CG
384 base_bits = mask_bits =
385 GENMASK_ULL(30,19) | GENMASK_ULL(13,5);
11c75ead 386 else
10ef6b0d
CG
387 base_bits = mask_bits =
388 GENMASK_ULL(28,19) | GENMASK_ULL(13,5);
11c75ead 389 }
e2ce7255 390
11c75ead 391 *base = (csbase & base_bits) << addr_shift;
e2ce7255 392
11c75ead
BP
393 *mask = ~0ULL;
394 /* poke holes for the csmask */
395 *mask &= ~(mask_bits << addr_shift);
396 /* OR them in */
397 *mask |= (csmask & mask_bits) << addr_shift;
e2ce7255
DT
398}
399
11c75ead
BP
400#define for_each_chip_select(i, dct, pvt) \
401 for (i = 0; i < pvt->csels[dct].b_cnt; i++)
402
614ec9d8
BP
403#define chip_select_base(i, dct, pvt) \
404 pvt->csels[dct].csbases[i]
405
11c75ead
BP
406#define for_each_chip_select_mask(i, dct, pvt) \
407 for (i = 0; i < pvt->csels[dct].m_cnt; i++)
408
e2ce7255
DT
409/*
410 * @input_addr is an InputAddr associated with the node given by mci. Return the
411 * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
412 */
413static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
414{
415 struct amd64_pvt *pvt;
416 int csrow;
417 u64 base, mask;
418
419 pvt = mci->pvt_info;
420
11c75ead
BP
421 for_each_chip_select(csrow, 0, pvt) {
422 if (!csrow_enabled(csrow, 0, pvt))
e2ce7255
DT
423 continue;
424
11c75ead
BP
425 get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
426
427 mask = ~mask;
e2ce7255
DT
428
429 if ((input_addr & mask) == (base & mask)) {
956b9ba1
JP
430 edac_dbg(2, "InputAddr 0x%lx matches csrow %d (node %d)\n",
431 (unsigned long)input_addr, csrow,
432 pvt->mc_node_id);
e2ce7255
DT
433
434 return csrow;
435 }
436 }
956b9ba1
JP
437 edac_dbg(2, "no matching csrow for InputAddr 0x%lx (MC node %d)\n",
438 (unsigned long)input_addr, pvt->mc_node_id);
e2ce7255
DT
439
440 return -1;
441}
442
e2ce7255
DT
443/*
444 * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
445 * for the node represented by mci. Info is passed back in *hole_base,
446 * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if
447 * info is invalid. Info may be invalid for either of the following reasons:
448 *
449 * - The revision of the node is not E or greater. In this case, the DRAM Hole
450 * Address Register does not exist.
451 *
452 * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
453 * indicating that its contents are not valid.
454 *
455 * The values passed back in *hole_base, *hole_offset, and *hole_size are
456 * complete 32-bit values despite the fact that the bitfields in the DHAR
457 * only represent bits 31-24 of the base and offset values.
458 */
459int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
460 u64 *hole_offset, u64 *hole_size)
461{
462 struct amd64_pvt *pvt = mci->pvt_info;
e2ce7255
DT
463
464 /* only revE and later have the DRAM Hole Address Register */
a4b4bedc 465 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_E) {
956b9ba1
JP
466 edac_dbg(1, " revision %d for node %d does not support DHAR\n",
467 pvt->ext_model, pvt->mc_node_id);
e2ce7255
DT
468 return 1;
469 }
470
bc21fa57 471 /* valid for Fam10h and above */
a4b4bedc 472 if (pvt->fam >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
956b9ba1 473 edac_dbg(1, " Dram Memory Hoisting is DISABLED on this system\n");
e2ce7255
DT
474 return 1;
475 }
476
c8e518d5 477 if (!dhar_valid(pvt)) {
956b9ba1
JP
478 edac_dbg(1, " Dram Memory Hoisting is DISABLED on this node %d\n",
479 pvt->mc_node_id);
e2ce7255
DT
480 return 1;
481 }
482
483 /* This node has Memory Hoisting */
484
485 /* +------------------+--------------------+--------------------+-----
486 * | memory | DRAM hole | relocated |
487 * | [0, (x - 1)] | [x, 0xffffffff] | addresses from |
488 * | | | DRAM hole |
489 * | | | [0x100000000, |
490 * | | | (0x100000000+ |
491 * | | | (0xffffffff-x))] |
492 * +------------------+--------------------+--------------------+-----
493 *
494 * Above is a diagram of physical memory showing the DRAM hole and the
495 * relocated addresses from the DRAM hole. As shown, the DRAM hole
496 * starts at address x (the base address) and extends through address
497 * 0xffffffff. The DRAM Hole Address Register (DHAR) relocates the
498 * addresses in the hole so that they start at 0x100000000.
499 */
500
1f31677e
BP
501 *hole_base = dhar_base(pvt);
502 *hole_size = (1ULL << 32) - *hole_base;
e2ce7255 503
a4b4bedc
BP
504 *hole_offset = (pvt->fam > 0xf) ? f10_dhar_offset(pvt)
505 : k8_dhar_offset(pvt);
e2ce7255 506
956b9ba1
JP
507 edac_dbg(1, " DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
508 pvt->mc_node_id, (unsigned long)*hole_base,
509 (unsigned long)*hole_offset, (unsigned long)*hole_size);
e2ce7255
DT
510
511 return 0;
512}
513EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
514
93c2df58
DT
515/*
516 * Return the DramAddr that the SysAddr given by @sys_addr maps to. It is
517 * assumed that sys_addr maps to the node given by mci.
518 *
519 * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
520 * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
521 * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
522 * then it is also involved in translating a SysAddr to a DramAddr. Sections
523 * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
524 * These parts of the documentation are unclear. I interpret them as follows:
525 *
526 * When node n receives a SysAddr, it processes the SysAddr as follows:
527 *
528 * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
529 * Limit registers for node n. If the SysAddr is not within the range
530 * specified by the base and limit values, then node n ignores the Sysaddr
531 * (since it does not map to node n). Otherwise continue to step 2 below.
532 *
533 * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
534 * disabled so skip to step 3 below. Otherwise see if the SysAddr is within
535 * the range of relocated addresses (starting at 0x100000000) from the DRAM
536 * hole. If not, skip to step 3 below. Else get the value of the
537 * DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
538 * offset defined by this value from the SysAddr.
539 *
540 * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
541 * Base register for node n. To obtain the DramAddr, subtract the base
542 * address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
543 */
544static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
545{
7f19bf75 546 struct amd64_pvt *pvt = mci->pvt_info;
93c2df58 547 u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
1f31677e 548 int ret;
93c2df58 549
7f19bf75 550 dram_base = get_dram_base(pvt, pvt->mc_node_id);
93c2df58
DT
551
552 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
553 &hole_size);
554 if (!ret) {
1f31677e
BP
555 if ((sys_addr >= (1ULL << 32)) &&
556 (sys_addr < ((1ULL << 32) + hole_size))) {
93c2df58
DT
557 /* use DHAR to translate SysAddr to DramAddr */
558 dram_addr = sys_addr - hole_offset;
559
956b9ba1
JP
560 edac_dbg(2, "using DHAR to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
561 (unsigned long)sys_addr,
562 (unsigned long)dram_addr);
93c2df58
DT
563
564 return dram_addr;
565 }
566 }
567
568 /*
569 * Translate the SysAddr to a DramAddr as shown near the start of
570 * section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8
571 * only deals with 40-bit values. Therefore we discard bits 63-40 of
572 * sys_addr below. If bit 39 of sys_addr is 1 then the bits we
573 * discard are all 1s. Otherwise the bits we discard are all 0s. See
574 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
575 * Programmer's Manual Volume 1 Application Programming.
576 */
10ef6b0d 577 dram_addr = (sys_addr & GENMASK_ULL(39, 0)) - dram_base;
93c2df58 578
956b9ba1
JP
579 edac_dbg(2, "using DRAM Base register to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
580 (unsigned long)sys_addr, (unsigned long)dram_addr);
93c2df58
DT
581 return dram_addr;
582}
583
584/*
585 * @intlv_en is the value of the IntlvEn field from a DRAM Base register
586 * (section 3.4.4.1). Return the number of bits from a SysAddr that are used
587 * for node interleaving.
588 */
589static int num_node_interleave_bits(unsigned intlv_en)
590{
591 static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
592 int n;
593
594 BUG_ON(intlv_en > 7);
595 n = intlv_shift_table[intlv_en];
596 return n;
597}
598
599/* Translate the DramAddr given by @dram_addr to an InputAddr. */
600static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
601{
602 struct amd64_pvt *pvt;
603 int intlv_shift;
604 u64 input_addr;
605
606 pvt = mci->pvt_info;
607
608 /*
609 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
610 * concerning translating a DramAddr to an InputAddr.
611 */
7f19bf75 612 intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
10ef6b0d 613 input_addr = ((dram_addr >> intlv_shift) & GENMASK_ULL(35, 12)) +
f678b8cc 614 (dram_addr & 0xfff);
93c2df58 615
956b9ba1
JP
616 edac_dbg(2, " Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
617 intlv_shift, (unsigned long)dram_addr,
618 (unsigned long)input_addr);
93c2df58
DT
619
620 return input_addr;
621}
622
623/*
624 * Translate the SysAddr represented by @sys_addr to an InputAddr. It is
625 * assumed that @sys_addr maps to the node given by mci.
626 */
627static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
628{
629 u64 input_addr;
630
631 input_addr =
632 dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
633
956b9ba1
JP
634 edac_dbg(2, "SysAdddr 0x%lx translates to InputAddr 0x%lx\n",
635 (unsigned long)sys_addr, (unsigned long)input_addr);
93c2df58
DT
636
637 return input_addr;
638}
639
93c2df58
DT
640/* Map the Error address to a PAGE and PAGE OFFSET. */
641static inline void error_address_to_page_and_offset(u64 error_address,
33ca0643 642 struct err_info *err)
93c2df58 643{
33ca0643
BP
644 err->page = (u32) (error_address >> PAGE_SHIFT);
645 err->offset = ((u32) error_address) & ~PAGE_MASK;
93c2df58
DT
646}
647
648/*
649 * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
650 * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
651 * of a node that detected an ECC memory error. mci represents the node that
652 * the error address maps to (possibly different from the node that detected
653 * the error). Return the number of the csrow that sys_addr maps to, or -1 on
654 * error.
655 */
656static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
657{
658 int csrow;
659
660 csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
661
662 if (csrow == -1)
24f9a7fe
BP
663 amd64_mc_err(mci, "Failed to translate InputAddr to csrow for "
664 "address 0x%lx\n", (unsigned long)sys_addr);
93c2df58
DT
665 return csrow;
666}
e2ce7255 667
bfc04aec 668static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
2da11654 669
2da11654
DT
670/*
671 * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
672 * are ECC capable.
673 */
d1ea71cd 674static unsigned long determine_edac_cap(struct amd64_pvt *pvt)
2da11654 675{
cb328507 676 u8 bit;
1f6189ed 677 unsigned long edac_cap = EDAC_FLAG_NONE;
2da11654 678
a4b4bedc 679 bit = (pvt->fam > 0xf || pvt->ext_model >= K8_REV_F)
2da11654
DT
680 ? 19
681 : 17;
682
584fcff4 683 if (pvt->dclr0 & BIT(bit))
2da11654
DT
684 edac_cap = EDAC_FLAG_SECDED;
685
686 return edac_cap;
687}
688
d1ea71cd 689static void debug_display_dimm_sizes(struct amd64_pvt *, u8);
2da11654 690
d1ea71cd 691static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
68798e17 692{
956b9ba1 693 edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
68798e17 694
956b9ba1
JP
695 edac_dbg(1, " DIMM type: %sbuffered; all DIMMs support ECC: %s\n",
696 (dclr & BIT(16)) ? "un" : "",
697 (dclr & BIT(19)) ? "yes" : "no");
68798e17 698
956b9ba1
JP
699 edac_dbg(1, " PAR/ERR parity: %s\n",
700 (dclr & BIT(8)) ? "enabled" : "disabled");
68798e17 701
a4b4bedc 702 if (pvt->fam == 0x10)
956b9ba1
JP
703 edac_dbg(1, " DCT 128bit mode width: %s\n",
704 (dclr & BIT(11)) ? "128b" : "64b");
68798e17 705
956b9ba1
JP
706 edac_dbg(1, " x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
707 (dclr & BIT(12)) ? "yes" : "no",
708 (dclr & BIT(13)) ? "yes" : "no",
709 (dclr & BIT(14)) ? "yes" : "no",
710 (dclr & BIT(15)) ? "yes" : "no");
68798e17
BP
711}
712
2da11654 713/* Display and decode various NB registers for debug purposes. */
b2b0c605 714static void dump_misc_regs(struct amd64_pvt *pvt)
2da11654 715{
956b9ba1 716 edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
68798e17 717
956b9ba1
JP
718 edac_dbg(1, " NB two channel DRAM capable: %s\n",
719 (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
2da11654 720
956b9ba1
JP
721 edac_dbg(1, " ECC capable: %s, ChipKill ECC capable: %s\n",
722 (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
723 (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
68798e17 724
d1ea71cd 725 debug_dump_dramcfg_low(pvt, pvt->dclr0, 0);
2da11654 726
956b9ba1 727 edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
2da11654 728
956b9ba1
JP
729 edac_dbg(1, "F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, offset: 0x%08x\n",
730 pvt->dhar, dhar_base(pvt),
a4b4bedc
BP
731 (pvt->fam == 0xf) ? k8_dhar_offset(pvt)
732 : f10_dhar_offset(pvt));
2da11654 733
956b9ba1 734 edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
2da11654 735
d1ea71cd 736 debug_display_dimm_sizes(pvt, 0);
4d796364 737
8de1d91e 738 /* everything below this point is Fam10h and above */
a4b4bedc 739 if (pvt->fam == 0xf)
2da11654 740 return;
4d796364 741
d1ea71cd 742 debug_display_dimm_sizes(pvt, 1);
2da11654 743
a3b7db09 744 amd64_info("using %s syndromes.\n", ((pvt->ecc_sym_sz == 8) ? "x8" : "x4"));
ad6a32e9 745
8de1d91e 746 /* Only if NOT ganged does dclr1 have valid info */
68798e17 747 if (!dct_ganging_enabled(pvt))
d1ea71cd 748 debug_dump_dramcfg_low(pvt, pvt->dclr1, 1);
2da11654
DT
749}
750
94be4bff 751/*
18b94f66 752 * See BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
94be4bff 753 */
11c75ead 754static void prep_chip_selects(struct amd64_pvt *pvt)
94be4bff 755{
18b94f66 756 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
11c75ead
BP
757 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
758 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
18b94f66
AG
759 } else if (pvt->fam == 0x15 && pvt->model >= 0x30) {
760 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4;
761 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2;
9d858bb1 762 } else {
11c75ead
BP
763 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
764 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
94be4bff
DT
765 }
766}
767
768/*
11c75ead 769 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
94be4bff 770 */
b2b0c605 771static void read_dct_base_mask(struct amd64_pvt *pvt)
94be4bff 772{
11c75ead 773 int cs;
94be4bff 774
11c75ead 775 prep_chip_selects(pvt);
94be4bff 776
11c75ead 777 for_each_chip_select(cs, 0, pvt) {
71d2a32e
BP
778 int reg0 = DCSB0 + (cs * 4);
779 int reg1 = DCSB1 + (cs * 4);
11c75ead
BP
780 u32 *base0 = &pvt->csels[0].csbases[cs];
781 u32 *base1 = &pvt->csels[1].csbases[cs];
b2b0c605 782
7981a28f 783 if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0))
956b9ba1
JP
784 edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n",
785 cs, *base0, reg0);
94be4bff 786
7981a28f 787 if (pvt->fam == 0xf)
11c75ead 788 continue;
b2b0c605 789
7981a28f 790 if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1))
956b9ba1 791 edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n",
7981a28f
AG
792 cs, *base1, (pvt->fam == 0x10) ? reg1
793 : reg0);
94be4bff
DT
794 }
795
11c75ead 796 for_each_chip_select_mask(cs, 0, pvt) {
71d2a32e
BP
797 int reg0 = DCSM0 + (cs * 4);
798 int reg1 = DCSM1 + (cs * 4);
11c75ead
BP
799 u32 *mask0 = &pvt->csels[0].csmasks[cs];
800 u32 *mask1 = &pvt->csels[1].csmasks[cs];
b2b0c605 801
7981a28f 802 if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0))
956b9ba1
JP
803 edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n",
804 cs, *mask0, reg0);
94be4bff 805
7981a28f 806 if (pvt->fam == 0xf)
11c75ead 807 continue;
b2b0c605 808
7981a28f 809 if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1))
956b9ba1 810 edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n",
7981a28f
AG
811 cs, *mask1, (pvt->fam == 0x10) ? reg1
812 : reg0);
94be4bff
DT
813 }
814}
815
d1ea71cd 816static enum mem_type determine_memory_type(struct amd64_pvt *pvt, int cs)
94be4bff
DT
817{
818 enum mem_type type;
819
cb328507 820 /* F15h supports only DDR3 */
a4b4bedc 821 if (pvt->fam >= 0x15)
cb328507 822 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
a4b4bedc 823 else if (pvt->fam == 0x10 || pvt->ext_model >= K8_REV_F) {
6b4c0bde
BP
824 if (pvt->dchr0 & DDR3_MODE)
825 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
826 else
827 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
94be4bff 828 } else {
94be4bff
DT
829 type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
830 }
831
24f9a7fe 832 amd64_info("CS%d: %s\n", cs, edac_mem_types[type]);
94be4bff
DT
833
834 return type;
835}
836
cb328507 837/* Get the number of DCT channels the memory controller is using. */
ddff876d
DT
838static int k8_early_channel_count(struct amd64_pvt *pvt)
839{
cb328507 840 int flag;
ddff876d 841
9f56da0e 842 if (pvt->ext_model >= K8_REV_F)
ddff876d 843 /* RevF (NPT) and later */
41d8bfab 844 flag = pvt->dclr0 & WIDTH_128;
9f56da0e 845 else
ddff876d
DT
846 /* RevE and earlier */
847 flag = pvt->dclr0 & REVE_WIDTH_128;
ddff876d
DT
848
849 /* not used */
850 pvt->dclr1 = 0;
851
852 return (flag) ? 2 : 1;
853}
854
70046624 855/* On F10h and later ErrAddr is MC4_ADDR[47:1] */
a4b4bedc 856static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m)
ddff876d 857{
c1ae6830 858 u64 addr;
70046624
BP
859 u8 start_bit = 1;
860 u8 end_bit = 47;
861
a4b4bedc 862 if (pvt->fam == 0xf) {
70046624
BP
863 start_bit = 3;
864 end_bit = 39;
865 }
866
10ef6b0d 867 addr = m->addr & GENMASK_ULL(end_bit, start_bit);
c1ae6830
BP
868
869 /*
870 * Erratum 637 workaround
871 */
a4b4bedc 872 if (pvt->fam == 0x15) {
c1ae6830
BP
873 struct amd64_pvt *pvt;
874 u64 cc6_base, tmp_addr;
875 u32 tmp;
8b84c8df
DB
876 u16 mce_nid;
877 u8 intlv_en;
c1ae6830 878
10ef6b0d 879 if ((addr & GENMASK_ULL(47, 24)) >> 24 != 0x00fdf7)
c1ae6830
BP
880 return addr;
881
882 mce_nid = amd_get_nb_id(m->extcpu);
883 pvt = mcis[mce_nid]->pvt_info;
884
885 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_LIM, &tmp);
886 intlv_en = tmp >> 21 & 0x7;
887
888 /* add [47:27] + 3 trailing bits */
10ef6b0d 889 cc6_base = (tmp & GENMASK_ULL(20, 0)) << 3;
c1ae6830
BP
890
891 /* reverse and add DramIntlvEn */
892 cc6_base |= intlv_en ^ 0x7;
893
894 /* pin at [47:24] */
895 cc6_base <<= 24;
896
897 if (!intlv_en)
10ef6b0d 898 return cc6_base | (addr & GENMASK_ULL(23, 0));
c1ae6830
BP
899
900 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_BASE, &tmp);
901
902 /* faster log2 */
10ef6b0d 903 tmp_addr = (addr & GENMASK_ULL(23, 12)) << __fls(intlv_en + 1);
c1ae6830
BP
904
905 /* OR DramIntlvSel into bits [14:12] */
10ef6b0d 906 tmp_addr |= (tmp & GENMASK_ULL(23, 21)) >> 9;
c1ae6830
BP
907
908 /* add remaining [11:0] bits from original MC4_ADDR */
10ef6b0d 909 tmp_addr |= addr & GENMASK_ULL(11, 0);
c1ae6830
BP
910
911 return cc6_base | tmp_addr;
912 }
913
914 return addr;
ddff876d
DT
915}
916
e2c0bffe
DB
917static struct pci_dev *pci_get_related_function(unsigned int vendor,
918 unsigned int device,
919 struct pci_dev *related)
920{
921 struct pci_dev *dev = NULL;
922
923 while ((dev = pci_get_device(vendor, device, dev))) {
924 if (pci_domain_nr(dev->bus) == pci_domain_nr(related->bus) &&
925 (dev->bus->number == related->bus->number) &&
926 (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
927 break;
928 }
929
930 return dev;
931}
932
7f19bf75 933static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
ddff876d 934{
e2c0bffe 935 struct amd_northbridge *nb;
18b94f66
AG
936 struct pci_dev *f1 = NULL;
937 unsigned int pci_func;
71d2a32e 938 int off = range << 3;
e2c0bffe 939 u32 llim;
ddff876d 940
7f19bf75
BP
941 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo);
942 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
ddff876d 943
18b94f66 944 if (pvt->fam == 0xf)
7f19bf75 945 return;
ddff876d 946
7f19bf75
BP
947 if (!dram_rw(pvt, range))
948 return;
ddff876d 949
7f19bf75
BP
950 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi);
951 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
f08e457c 952
e2c0bffe 953 /* F15h: factor in CC6 save area by reading dst node's limit reg */
18b94f66 954 if (pvt->fam != 0x15)
e2c0bffe 955 return;
f08e457c 956
e2c0bffe
DB
957 nb = node_to_amd_nb(dram_dst_node(pvt, range));
958 if (WARN_ON(!nb))
959 return;
f08e457c 960
18b94f66
AG
961 pci_func = (pvt->model == 0x30) ? PCI_DEVICE_ID_AMD_15H_M30H_NB_F1
962 : PCI_DEVICE_ID_AMD_15H_NB_F1;
963
964 f1 = pci_get_related_function(nb->misc->vendor, pci_func, nb->misc);
e2c0bffe
DB
965 if (WARN_ON(!f1))
966 return;
f08e457c 967
e2c0bffe 968 amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
f08e457c 969
10ef6b0d 970 pvt->ranges[range].lim.lo &= GENMASK_ULL(15, 0);
f08e457c 971
e2c0bffe
DB
972 /* {[39:27],111b} */
973 pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
f08e457c 974
10ef6b0d 975 pvt->ranges[range].lim.hi &= GENMASK_ULL(7, 0);
f08e457c 976
e2c0bffe
DB
977 /* [47:40] */
978 pvt->ranges[range].lim.hi |= llim >> 13;
979
980 pci_dev_put(f1);
ddff876d
DT
981}
982
f192c7b1 983static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
33ca0643 984 struct err_info *err)
ddff876d 985{
f192c7b1 986 struct amd64_pvt *pvt = mci->pvt_info;
ddff876d 987
33ca0643 988 error_address_to_page_and_offset(sys_addr, err);
ab5a503c
MCC
989
990 /*
991 * Find out which node the error address belongs to. This may be
992 * different from the node that detected the error.
993 */
33ca0643
BP
994 err->src_mci = find_mc_by_sys_addr(mci, sys_addr);
995 if (!err->src_mci) {
ab5a503c
MCC
996 amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
997 (unsigned long)sys_addr);
33ca0643 998 err->err_code = ERR_NODE;
ab5a503c
MCC
999 return;
1000 }
1001
1002 /* Now map the sys_addr to a CSROW */
33ca0643
BP
1003 err->csrow = sys_addr_to_csrow(err->src_mci, sys_addr);
1004 if (err->csrow < 0) {
1005 err->err_code = ERR_CSROW;
ab5a503c
MCC
1006 return;
1007 }
1008
ddff876d 1009 /* CHIPKILL enabled */
f192c7b1 1010 if (pvt->nbcfg & NBCFG_CHIPKILL) {
33ca0643
BP
1011 err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
1012 if (err->channel < 0) {
ddff876d
DT
1013 /*
1014 * Syndrome didn't map, so we don't know which of the
1015 * 2 DIMMs is in error. So we need to ID 'both' of them
1016 * as suspect.
1017 */
33ca0643 1018 amd64_mc_warn(err->src_mci, "unknown syndrome 0x%04x - "
ab5a503c 1019 "possible error reporting race\n",
33ca0643
BP
1020 err->syndrome);
1021 err->err_code = ERR_CHANNEL;
ddff876d
DT
1022 return;
1023 }
1024 } else {
1025 /*
1026 * non-chipkill ecc mode
1027 *
1028 * The k8 documentation is unclear about how to determine the
1029 * channel number when using non-chipkill memory. This method
1030 * was obtained from email communication with someone at AMD.
1031 * (Wish the email was placed in this comment - norsk)
1032 */
33ca0643 1033 err->channel = ((sys_addr & BIT(3)) != 0);
ddff876d 1034 }
ddff876d
DT
1035}
1036
41d8bfab 1037static int ddr2_cs_size(unsigned i, bool dct_width)
ddff876d 1038{
41d8bfab 1039 unsigned shift = 0;
ddff876d 1040
41d8bfab
BP
1041 if (i <= 2)
1042 shift = i;
1043 else if (!(i & 0x1))
1044 shift = i >> 1;
1433eb99 1045 else
41d8bfab 1046 shift = (i + 1) >> 1;
ddff876d 1047
41d8bfab
BP
1048 return 128 << (shift + !!dct_width);
1049}
1050
1051static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1052 unsigned cs_mode)
1053{
1054 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1055
1056 if (pvt->ext_model >= K8_REV_F) {
1057 WARN_ON(cs_mode > 11);
1058 return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1059 }
1060 else if (pvt->ext_model >= K8_REV_D) {
11b0a314 1061 unsigned diff;
41d8bfab
BP
1062 WARN_ON(cs_mode > 10);
1063
11b0a314
BP
1064 /*
1065 * the below calculation, besides trying to win an obfuscated C
1066 * contest, maps cs_mode values to DIMM chip select sizes. The
1067 * mappings are:
1068 *
1069 * cs_mode CS size (mb)
1070 * ======= ============
1071 * 0 32
1072 * 1 64
1073 * 2 128
1074 * 3 128
1075 * 4 256
1076 * 5 512
1077 * 6 256
1078 * 7 512
1079 * 8 1024
1080 * 9 1024
1081 * 10 2048
1082 *
1083 * Basically, it calculates a value with which to shift the
1084 * smallest CS size of 32MB.
1085 *
1086 * ddr[23]_cs_size have a similar purpose.
1087 */
1088 diff = cs_mode/3 + (unsigned)(cs_mode > 5);
1089
1090 return 32 << (cs_mode - diff);
41d8bfab
BP
1091 }
1092 else {
1093 WARN_ON(cs_mode > 6);
1094 return 32 << cs_mode;
1095 }
ddff876d
DT
1096}
1097
1afd3c98
DT
1098/*
1099 * Get the number of DCT channels in use.
1100 *
1101 * Return:
1102 * number of Memory Channels in operation
1103 * Pass back:
1104 * contents of the DCL0_LOW register
1105 */
7d20d14d 1106static int f1x_early_channel_count(struct amd64_pvt *pvt)
1afd3c98 1107{
6ba5dcdc 1108 int i, j, channels = 0;
1afd3c98 1109
7d20d14d 1110 /* On F10h, if we are in 128 bit mode, then we are using 2 channels */
a4b4bedc 1111 if (pvt->fam == 0x10 && (pvt->dclr0 & WIDTH_128))
7d20d14d 1112 return 2;
1afd3c98
DT
1113
1114 /*
d16149e8
BP
1115 * Need to check if in unganged mode: In such, there are 2 channels,
1116 * but they are not in 128 bit mode and thus the above 'dclr0' status
1117 * bit will be OFF.
1afd3c98
DT
1118 *
1119 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
1120 * their CSEnable bit on. If so, then SINGLE DIMM case.
1121 */
956b9ba1 1122 edac_dbg(0, "Data width is not 128 bits - need more decoding\n");
ddff876d 1123
1afd3c98
DT
1124 /*
1125 * Check DRAM Bank Address Mapping values for each DIMM to see if there
1126 * is more than just one DIMM present in unganged mode. Need to check
1127 * both controllers since DIMMs can be placed in either one.
1128 */
525a1b20
BP
1129 for (i = 0; i < 2; i++) {
1130 u32 dbam = (i ? pvt->dbam1 : pvt->dbam0);
1afd3c98 1131
57a30854
WW
1132 for (j = 0; j < 4; j++) {
1133 if (DBAM_DIMM(j, dbam) > 0) {
1134 channels++;
1135 break;
1136 }
1137 }
1afd3c98
DT
1138 }
1139
d16149e8
BP
1140 if (channels > 2)
1141 channels = 2;
1142
24f9a7fe 1143 amd64_info("MCT channel count: %d\n", channels);
1afd3c98
DT
1144
1145 return channels;
1afd3c98
DT
1146}
1147
41d8bfab 1148static int ddr3_cs_size(unsigned i, bool dct_width)
1afd3c98 1149{
41d8bfab
BP
1150 unsigned shift = 0;
1151 int cs_size = 0;
1152
1153 if (i == 0 || i == 3 || i == 4)
1154 cs_size = -1;
1155 else if (i <= 2)
1156 shift = i;
1157 else if (i == 12)
1158 shift = 7;
1159 else if (!(i & 0x1))
1160 shift = i >> 1;
1161 else
1162 shift = (i + 1) >> 1;
1163
1164 if (cs_size != -1)
1165 cs_size = (128 * (1 << !!dct_width)) << shift;
1166
1167 return cs_size;
1168}
1169
1170static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1171 unsigned cs_mode)
1172{
1173 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1174
1175 WARN_ON(cs_mode > 11);
1433eb99
BP
1176
1177 if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
41d8bfab 1178 return ddr3_cs_size(cs_mode, dclr & WIDTH_128);
1433eb99 1179 else
41d8bfab
BP
1180 return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1181}
1182
1183/*
1184 * F15h supports only 64bit DCT interfaces
1185 */
1186static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1187 unsigned cs_mode)
1188{
1189 WARN_ON(cs_mode > 12);
1433eb99 1190
41d8bfab 1191 return ddr3_cs_size(cs_mode, false);
1afd3c98
DT
1192}
1193
94c1acf2 1194/*
18b94f66 1195 * F16h and F15h model 30h have only limited cs_modes.
94c1acf2
AG
1196 */
1197static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1198 unsigned cs_mode)
1199{
1200 WARN_ON(cs_mode > 12);
1201
1202 if (cs_mode == 6 || cs_mode == 8 ||
1203 cs_mode == 9 || cs_mode == 12)
1204 return -1;
1205 else
1206 return ddr3_cs_size(cs_mode, false);
1207}
1208
5a5d2371 1209static void read_dram_ctl_register(struct amd64_pvt *pvt)
6163b5d4 1210{
6163b5d4 1211
a4b4bedc 1212 if (pvt->fam == 0xf)
5a5d2371
BP
1213 return;
1214
7981a28f 1215 if (!amd64_read_pci_cfg(pvt->F2, DCT_SEL_LO, &pvt->dct_sel_lo)) {
956b9ba1
JP
1216 edac_dbg(0, "F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
1217 pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
72381bd5 1218
956b9ba1
JP
1219 edac_dbg(0, " DCTs operate in %s mode\n",
1220 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
72381bd5
BP
1221
1222 if (!dct_ganging_enabled(pvt))
956b9ba1
JP
1223 edac_dbg(0, " Address range split per DCT: %s\n",
1224 (dct_high_range_enabled(pvt) ? "yes" : "no"));
72381bd5 1225
956b9ba1
JP
1226 edac_dbg(0, " data interleave for ECC: %s, DRAM cleared since last warm reset: %s\n",
1227 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
1228 (dct_memory_cleared(pvt) ? "yes" : "no"));
72381bd5 1229
956b9ba1
JP
1230 edac_dbg(0, " channel interleave: %s, "
1231 "interleave bits selector: 0x%x\n",
1232 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
1233 dct_sel_interleave_addr(pvt));
6163b5d4
DT
1234 }
1235
7981a28f 1236 amd64_read_pci_cfg(pvt->F2, DCT_SEL_HI, &pvt->dct_sel_hi);
6163b5d4
DT
1237}
1238
18b94f66
AG
1239/*
1240 * Determine channel (DCT) based on the interleaving mode (see F15h M30h BKDG,
1241 * 2.10.12 Memory Interleaving Modes).
1242 */
1243static u8 f15_m30h_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1244 u8 intlv_en, int num_dcts_intlv,
1245 u32 dct_sel)
1246{
1247 u8 channel = 0;
1248 u8 select;
1249
1250 if (!(intlv_en))
1251 return (u8)(dct_sel);
1252
1253 if (num_dcts_intlv == 2) {
1254 select = (sys_addr >> 8) & 0x3;
1255 channel = select ? 0x3 : 0;
9d0e8d83
AG
1256 } else if (num_dcts_intlv == 4) {
1257 u8 intlv_addr = dct_sel_interleave_addr(pvt);
1258 switch (intlv_addr) {
1259 case 0x4:
1260 channel = (sys_addr >> 8) & 0x3;
1261 break;
1262 case 0x5:
1263 channel = (sys_addr >> 9) & 0x3;
1264 break;
1265 }
1266 }
18b94f66
AG
1267 return channel;
1268}
1269
f71d0a05 1270/*
229a7a11 1271 * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
f71d0a05
DT
1272 * Interleaving Modes.
1273 */
b15f0fca 1274static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
229a7a11 1275 bool hi_range_sel, u8 intlv_en)
6163b5d4 1276{
151fa71c 1277 u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1;
6163b5d4
DT
1278
1279 if (dct_ganging_enabled(pvt))
229a7a11 1280 return 0;
6163b5d4 1281
229a7a11
BP
1282 if (hi_range_sel)
1283 return dct_sel_high;
6163b5d4 1284
229a7a11
BP
1285 /*
1286 * see F2x110[DctSelIntLvAddr] - channel interleave mode
1287 */
1288 if (dct_interleave_enabled(pvt)) {
1289 u8 intlv_addr = dct_sel_interleave_addr(pvt);
1290
1291 /* return DCT select function: 0=DCT0, 1=DCT1 */
1292 if (!intlv_addr)
1293 return sys_addr >> 6 & 1;
1294
1295 if (intlv_addr & 0x2) {
1296 u8 shift = intlv_addr & 0x1 ? 9 : 6;
1297 u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2;
1298
1299 return ((sys_addr >> shift) & 1) ^ temp;
1300 }
1301
1302 return (sys_addr >> (12 + hweight8(intlv_en))) & 1;
1303 }
1304
1305 if (dct_high_range_enabled(pvt))
1306 return ~dct_sel_high & 1;
6163b5d4
DT
1307
1308 return 0;
1309}
1310
c8e518d5 1311/* Convert the sys_addr to the normalized DCT address */
c7e5301a 1312static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
c8e518d5
BP
1313 u64 sys_addr, bool hi_rng,
1314 u32 dct_sel_base_addr)
6163b5d4
DT
1315{
1316 u64 chan_off;
c8e518d5
BP
1317 u64 dram_base = get_dram_base(pvt, range);
1318 u64 hole_off = f10_dhar_offset(pvt);
c8e518d5 1319 u64 dct_sel_base_off = (pvt->dct_sel_hi & 0xFFFFFC00) << 16;
6163b5d4 1320
c8e518d5
BP
1321 if (hi_rng) {
1322 /*
1323 * if
1324 * base address of high range is below 4Gb
1325 * (bits [47:27] at [31:11])
1326 * DRAM address space on this DCT is hoisted above 4Gb &&
1327 * sys_addr > 4Gb
1328 *
1329 * remove hole offset from sys_addr
1330 * else
1331 * remove high range offset from sys_addr
1332 */
1333 if ((!(dct_sel_base_addr >> 16) ||
1334 dct_sel_base_addr < dhar_base(pvt)) &&
972ea17a 1335 dhar_valid(pvt) &&
c8e518d5 1336 (sys_addr >= BIT_64(32)))
bc21fa57 1337 chan_off = hole_off;
6163b5d4
DT
1338 else
1339 chan_off = dct_sel_base_off;
1340 } else {
c8e518d5
BP
1341 /*
1342 * if
1343 * we have a valid hole &&
1344 * sys_addr > 4Gb
1345 *
1346 * remove hole
1347 * else
1348 * remove dram base to normalize to DCT address
1349 */
972ea17a 1350 if (dhar_valid(pvt) && (sys_addr >= BIT_64(32)))
bc21fa57 1351 chan_off = hole_off;
6163b5d4 1352 else
c8e518d5 1353 chan_off = dram_base;
6163b5d4
DT
1354 }
1355
10ef6b0d 1356 return (sys_addr & GENMASK_ULL(47,6)) - (chan_off & GENMASK_ULL(47,23));
6163b5d4
DT
1357}
1358
6163b5d4
DT
1359/*
1360 * checks if the csrow passed in is marked as SPARED, if so returns the new
1361 * spare row
1362 */
11c75ead 1363static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
6163b5d4 1364{
614ec9d8
BP
1365 int tmp_cs;
1366
1367 if (online_spare_swap_done(pvt, dct) &&
1368 csrow == online_spare_bad_dramcs(pvt, dct)) {
1369
1370 for_each_chip_select(tmp_cs, dct, pvt) {
1371 if (chip_select_base(tmp_cs, dct, pvt) & 0x2) {
1372 csrow = tmp_cs;
1373 break;
1374 }
1375 }
6163b5d4
DT
1376 }
1377 return csrow;
1378}
1379
1380/*
1381 * Iterate over the DRAM DCT "base" and "mask" registers looking for a
1382 * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
1383 *
1384 * Return:
1385 * -EINVAL: NOT FOUND
1386 * 0..csrow = Chip-Select Row
1387 */
c7e5301a 1388static int f1x_lookup_addr_in_dct(u64 in_addr, u8 nid, u8 dct)
6163b5d4
DT
1389{
1390 struct mem_ctl_info *mci;
1391 struct amd64_pvt *pvt;
11c75ead 1392 u64 cs_base, cs_mask;
6163b5d4
DT
1393 int cs_found = -EINVAL;
1394 int csrow;
1395
cc4d8860 1396 mci = mcis[nid];
6163b5d4
DT
1397 if (!mci)
1398 return cs_found;
1399
1400 pvt = mci->pvt_info;
1401
956b9ba1 1402 edac_dbg(1, "input addr: 0x%llx, DCT: %d\n", in_addr, dct);
6163b5d4 1403
11c75ead
BP
1404 for_each_chip_select(csrow, dct, pvt) {
1405 if (!csrow_enabled(csrow, dct, pvt))
6163b5d4
DT
1406 continue;
1407
11c75ead 1408 get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
6163b5d4 1409
956b9ba1
JP
1410 edac_dbg(1, " CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
1411 csrow, cs_base, cs_mask);
6163b5d4 1412
11c75ead 1413 cs_mask = ~cs_mask;
6163b5d4 1414
956b9ba1
JP
1415 edac_dbg(1, " (InputAddr & ~CSMask)=0x%llx (CSBase & ~CSMask)=0x%llx\n",
1416 (in_addr & cs_mask), (cs_base & cs_mask));
6163b5d4 1417
11c75ead 1418 if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
18b94f66
AG
1419 if (pvt->fam == 0x15 && pvt->model >= 0x30) {
1420 cs_found = csrow;
1421 break;
1422 }
11c75ead 1423 cs_found = f10_process_possible_spare(pvt, dct, csrow);
6163b5d4 1424
956b9ba1 1425 edac_dbg(1, " MATCH csrow=%d\n", cs_found);
6163b5d4
DT
1426 break;
1427 }
1428 }
1429 return cs_found;
1430}
1431
95b0ef55
BP
1432/*
1433 * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is
1434 * swapped with a region located at the bottom of memory so that the GPU can use
1435 * the interleaved region and thus two channels.
1436 */
b15f0fca 1437static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
95b0ef55
BP
1438{
1439 u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr;
1440
a4b4bedc 1441 if (pvt->fam == 0x10) {
95b0ef55 1442 /* only revC3 and revE have that feature */
a4b4bedc 1443 if (pvt->model < 4 || (pvt->model < 0xa && pvt->stepping < 3))
95b0ef55
BP
1444 return sys_addr;
1445 }
1446
7981a28f 1447 amd64_read_pci_cfg(pvt->F2, SWAP_INTLV_REG, &swap_reg);
95b0ef55
BP
1448
1449 if (!(swap_reg & 0x1))
1450 return sys_addr;
1451
1452 swap_base = (swap_reg >> 3) & 0x7f;
1453 swap_limit = (swap_reg >> 11) & 0x7f;
1454 rgn_size = (swap_reg >> 20) & 0x7f;
1455 tmp_addr = sys_addr >> 27;
1456
1457 if (!(sys_addr >> 34) &&
1458 (((tmp_addr >= swap_base) &&
1459 (tmp_addr <= swap_limit)) ||
1460 (tmp_addr < rgn_size)))
1461 return sys_addr ^ (u64)swap_base << 27;
1462
1463 return sys_addr;
1464}
1465
f71d0a05 1466/* For a given @dram_range, check if @sys_addr falls within it. */
e761359a 1467static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
33ca0643 1468 u64 sys_addr, int *chan_sel)
f71d0a05 1469{
229a7a11 1470 int cs_found = -EINVAL;
c8e518d5 1471 u64 chan_addr;
5d4b58e8 1472 u32 dct_sel_base;
11c75ead 1473 u8 channel;
229a7a11 1474 bool high_range = false;
f71d0a05 1475
7f19bf75 1476 u8 node_id = dram_dst_node(pvt, range);
229a7a11 1477 u8 intlv_en = dram_intlv_en(pvt, range);
7f19bf75 1478 u32 intlv_sel = dram_intlv_sel(pvt, range);
f71d0a05 1479
956b9ba1
JP
1480 edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1481 range, sys_addr, get_dram_limit(pvt, range));
f71d0a05 1482
355fba60
BP
1483 if (dhar_valid(pvt) &&
1484 dhar_base(pvt) <= sys_addr &&
1485 sys_addr < BIT_64(32)) {
1486 amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
1487 sys_addr);
1488 return -EINVAL;
1489 }
1490
f030ddfb 1491 if (intlv_en && (intlv_sel != ((sys_addr >> 12) & intlv_en)))
f71d0a05
DT
1492 return -EINVAL;
1493
b15f0fca 1494 sys_addr = f1x_swap_interleaved_region(pvt, sys_addr);
95b0ef55 1495
f71d0a05
DT
1496 dct_sel_base = dct_sel_baseaddr(pvt);
1497
1498 /*
1499 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
1500 * select between DCT0 and DCT1.
1501 */
1502 if (dct_high_range_enabled(pvt) &&
1503 !dct_ganging_enabled(pvt) &&
1504 ((sys_addr >> 27) >= (dct_sel_base >> 11)))
229a7a11 1505 high_range = true;
f71d0a05 1506
b15f0fca 1507 channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en);
f71d0a05 1508
b15f0fca 1509 chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr,
c8e518d5 1510 high_range, dct_sel_base);
f71d0a05 1511
e2f79dbd
BP
1512 /* Remove node interleaving, see F1x120 */
1513 if (intlv_en)
1514 chan_addr = ((chan_addr >> (12 + hweight8(intlv_en))) << 12) |
1515 (chan_addr & 0xfff);
f71d0a05 1516
5d4b58e8 1517 /* remove channel interleave */
f71d0a05
DT
1518 if (dct_interleave_enabled(pvt) &&
1519 !dct_high_range_enabled(pvt) &&
1520 !dct_ganging_enabled(pvt)) {
5d4b58e8
BP
1521
1522 if (dct_sel_interleave_addr(pvt) != 1) {
1523 if (dct_sel_interleave_addr(pvt) == 0x3)
1524 /* hash 9 */
1525 chan_addr = ((chan_addr >> 10) << 9) |
1526 (chan_addr & 0x1ff);
1527 else
1528 /* A[6] or hash 6 */
1529 chan_addr = ((chan_addr >> 7) << 6) |
1530 (chan_addr & 0x3f);
1531 } else
1532 /* A[12] */
1533 chan_addr = ((chan_addr >> 13) << 12) |
1534 (chan_addr & 0xfff);
f71d0a05
DT
1535 }
1536
956b9ba1 1537 edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr);
f71d0a05 1538
b15f0fca 1539 cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel);
f71d0a05 1540
33ca0643 1541 if (cs_found >= 0)
f71d0a05 1542 *chan_sel = channel;
33ca0643 1543
f71d0a05
DT
1544 return cs_found;
1545}
1546
18b94f66
AG
1547static int f15_m30h_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
1548 u64 sys_addr, int *chan_sel)
1549{
1550 int cs_found = -EINVAL;
1551 int num_dcts_intlv = 0;
1552 u64 chan_addr, chan_offset;
1553 u64 dct_base, dct_limit;
1554 u32 dct_cont_base_reg, dct_cont_limit_reg, tmp;
1555 u8 channel, alias_channel, leg_mmio_hole, dct_sel, dct_offset_en;
1556
1557 u64 dhar_offset = f10_dhar_offset(pvt);
1558 u8 intlv_addr = dct_sel_interleave_addr(pvt);
1559 u8 node_id = dram_dst_node(pvt, range);
1560 u8 intlv_en = dram_intlv_en(pvt, range);
1561
1562 amd64_read_pci_cfg(pvt->F1, DRAM_CONT_BASE, &dct_cont_base_reg);
1563 amd64_read_pci_cfg(pvt->F1, DRAM_CONT_LIMIT, &dct_cont_limit_reg);
1564
1565 dct_offset_en = (u8) ((dct_cont_base_reg >> 3) & BIT(0));
1566 dct_sel = (u8) ((dct_cont_base_reg >> 4) & 0x7);
1567
1568 edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1569 range, sys_addr, get_dram_limit(pvt, range));
1570
1571 if (!(get_dram_base(pvt, range) <= sys_addr) &&
1572 !(get_dram_limit(pvt, range) >= sys_addr))
1573 return -EINVAL;
1574
1575 if (dhar_valid(pvt) &&
1576 dhar_base(pvt) <= sys_addr &&
1577 sys_addr < BIT_64(32)) {
1578 amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
1579 sys_addr);
1580 return -EINVAL;
1581 }
1582
1583 /* Verify sys_addr is within DCT Range. */
4fc06b31
AG
1584 dct_base = (u64) dct_sel_baseaddr(pvt);
1585 dct_limit = (dct_cont_limit_reg >> 11) & 0x1FFF;
18b94f66
AG
1586
1587 if (!(dct_cont_base_reg & BIT(0)) &&
4fc06b31
AG
1588 !(dct_base <= (sys_addr >> 27) &&
1589 dct_limit >= (sys_addr >> 27)))
18b94f66
AG
1590 return -EINVAL;
1591
1592 /* Verify number of dct's that participate in channel interleaving. */
1593 num_dcts_intlv = (int) hweight8(intlv_en);
1594
1595 if (!(num_dcts_intlv % 2 == 0) || (num_dcts_intlv > 4))
1596 return -EINVAL;
1597
1598 channel = f15_m30h_determine_channel(pvt, sys_addr, intlv_en,
1599 num_dcts_intlv, dct_sel);
1600
1601 /* Verify we stay within the MAX number of channels allowed */
7f3f5240 1602 if (channel > 3)
18b94f66
AG
1603 return -EINVAL;
1604
1605 leg_mmio_hole = (u8) (dct_cont_base_reg >> 1 & BIT(0));
1606
1607 /* Get normalized DCT addr */
1608 if (leg_mmio_hole && (sys_addr >= BIT_64(32)))
1609 chan_offset = dhar_offset;
1610 else
4fc06b31 1611 chan_offset = dct_base << 27;
18b94f66
AG
1612
1613 chan_addr = sys_addr - chan_offset;
1614
1615 /* remove channel interleave */
1616 if (num_dcts_intlv == 2) {
1617 if (intlv_addr == 0x4)
1618 chan_addr = ((chan_addr >> 9) << 8) |
1619 (chan_addr & 0xff);
1620 else if (intlv_addr == 0x5)
1621 chan_addr = ((chan_addr >> 10) << 9) |
1622 (chan_addr & 0x1ff);
1623 else
1624 return -EINVAL;
1625
1626 } else if (num_dcts_intlv == 4) {
1627 if (intlv_addr == 0x4)
1628 chan_addr = ((chan_addr >> 10) << 8) |
1629 (chan_addr & 0xff);
1630 else if (intlv_addr == 0x5)
1631 chan_addr = ((chan_addr >> 11) << 9) |
1632 (chan_addr & 0x1ff);
1633 else
1634 return -EINVAL;
1635 }
1636
1637 if (dct_offset_en) {
1638 amd64_read_pci_cfg(pvt->F1,
1639 DRAM_CONT_HIGH_OFF + (int) channel * 4,
1640 &tmp);
4fc06b31 1641 chan_addr += (u64) ((tmp >> 11) & 0xfff) << 27;
18b94f66
AG
1642 }
1643
1644 f15h_select_dct(pvt, channel);
1645
1646 edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr);
1647
1648 /*
1649 * Find Chip select:
1650 * if channel = 3, then alias it to 1. This is because, in F15 M30h,
1651 * there is support for 4 DCT's, but only 2 are currently functional.
1652 * They are DCT0 and DCT3. But we have read all registers of DCT3 into
1653 * pvt->csels[1]. So we need to use '1' here to get correct info.
1654 * Refer F15 M30h BKDG Section 2.10 and 2.10.3 for clarifications.
1655 */
1656 alias_channel = (channel == 3) ? 1 : channel;
1657
1658 cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, alias_channel);
1659
1660 if (cs_found >= 0)
1661 *chan_sel = alias_channel;
1662
1663 return cs_found;
1664}
1665
1666static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt,
1667 u64 sys_addr,
1668 int *chan_sel)
f71d0a05 1669{
e761359a
BP
1670 int cs_found = -EINVAL;
1671 unsigned range;
f71d0a05 1672
7f19bf75 1673 for (range = 0; range < DRAM_RANGES; range++) {
7f19bf75 1674 if (!dram_rw(pvt, range))
f71d0a05
DT
1675 continue;
1676
18b94f66
AG
1677 if (pvt->fam == 0x15 && pvt->model >= 0x30)
1678 cs_found = f15_m30h_match_to_this_node(pvt, range,
1679 sys_addr,
1680 chan_sel);
f71d0a05 1681
18b94f66
AG
1682 else if ((get_dram_base(pvt, range) <= sys_addr) &&
1683 (get_dram_limit(pvt, range) >= sys_addr)) {
b15f0fca 1684 cs_found = f1x_match_to_this_node(pvt, range,
33ca0643 1685 sys_addr, chan_sel);
f71d0a05
DT
1686 if (cs_found >= 0)
1687 break;
1688 }
1689 }
1690 return cs_found;
1691}
1692
1693/*
bdc30a0c
BP
1694 * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
1695 * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
f71d0a05 1696 *
bdc30a0c
BP
1697 * The @sys_addr is usually an error address received from the hardware
1698 * (MCX_ADDR).
f71d0a05 1699 */
b15f0fca 1700static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
33ca0643 1701 struct err_info *err)
f71d0a05
DT
1702{
1703 struct amd64_pvt *pvt = mci->pvt_info;
f71d0a05 1704
33ca0643 1705 error_address_to_page_and_offset(sys_addr, err);
ab5a503c 1706
33ca0643
BP
1707 err->csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &err->channel);
1708 if (err->csrow < 0) {
1709 err->err_code = ERR_CSROW;
bdc30a0c
BP
1710 return;
1711 }
1712
bdc30a0c
BP
1713 /*
1714 * We need the syndromes for channel detection only when we're
1715 * ganged. Otherwise @chan should already contain the channel at
1716 * this point.
1717 */
a97fa68e 1718 if (dct_ganging_enabled(pvt))
33ca0643 1719 err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
f71d0a05
DT
1720}
1721
f71d0a05 1722/*
8566c4df 1723 * debug routine to display the memory sizes of all logical DIMMs and its
cb328507 1724 * CSROWs
f71d0a05 1725 */
d1ea71cd 1726static void debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
f71d0a05 1727{
bb89f5a0 1728 int dimm, size0, size1;
525a1b20
BP
1729 u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
1730 u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
f71d0a05 1731
a4b4bedc 1732 if (pvt->fam == 0xf) {
8566c4df 1733 /* K8 families < revF not supported yet */
1433eb99 1734 if (pvt->ext_model < K8_REV_F)
8566c4df
BP
1735 return;
1736 else
1737 WARN_ON(ctrl != 0);
1738 }
1739
7981a28f
AG
1740 if (pvt->fam == 0x10) {
1741 dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1
1742 : pvt->dbam0;
1743 dcsb = (ctrl && !dct_ganging_enabled(pvt)) ?
1744 pvt->csels[1].csbases :
1745 pvt->csels[0].csbases;
1746 } else if (ctrl) {
1747 dbam = pvt->dbam0;
1748 dcsb = pvt->csels[1].csbases;
1749 }
956b9ba1
JP
1750 edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
1751 ctrl, dbam);
f71d0a05 1752
8566c4df
BP
1753 edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
1754
f71d0a05
DT
1755 /* Dump memory sizes for DIMM and its CSROWs */
1756 for (dimm = 0; dimm < 4; dimm++) {
1757
1758 size0 = 0;
11c75ead 1759 if (dcsb[dimm*2] & DCSB_CS_ENABLE)
41d8bfab
BP
1760 size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
1761 DBAM_DIMM(dimm, dbam));
f71d0a05
DT
1762
1763 size1 = 0;
11c75ead 1764 if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
41d8bfab
BP
1765 size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
1766 DBAM_DIMM(dimm, dbam));
f71d0a05 1767
24f9a7fe 1768 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
bb89f5a0
BP
1769 dimm * 2, size0,
1770 dimm * 2 + 1, size1);
f71d0a05
DT
1771 }
1772}
1773
d1ea71cd 1774static struct amd64_family_type family_types[] = {
4d37607a 1775 [K8_CPUS] = {
0092b20d 1776 .ctl_name = "K8",
8d5b5d9c
BP
1777 .f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
1778 .f3_id = PCI_DEVICE_ID_AMD_K8_NB_MISC,
4d37607a 1779 .ops = {
1433eb99 1780 .early_channel_count = k8_early_channel_count,
1433eb99
BP
1781 .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
1782 .dbam_to_cs = k8_dbam_to_chip_select,
4d37607a
DT
1783 }
1784 },
1785 [F10_CPUS] = {
0092b20d 1786 .ctl_name = "F10h",
8d5b5d9c
BP
1787 .f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
1788 .f3_id = PCI_DEVICE_ID_AMD_10H_NB_MISC,
4d37607a 1789 .ops = {
7d20d14d 1790 .early_channel_count = f1x_early_channel_count,
b15f0fca 1791 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
1433eb99 1792 .dbam_to_cs = f10_dbam_to_chip_select,
b2b0c605
BP
1793 }
1794 },
1795 [F15_CPUS] = {
1796 .ctl_name = "F15h",
df71a053
BP
1797 .f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1,
1798 .f3_id = PCI_DEVICE_ID_AMD_15H_NB_F3,
b2b0c605 1799 .ops = {
7d20d14d 1800 .early_channel_count = f1x_early_channel_count,
b15f0fca 1801 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
41d8bfab 1802 .dbam_to_cs = f15_dbam_to_chip_select,
4d37607a
DT
1803 }
1804 },
18b94f66
AG
1805 [F15_M30H_CPUS] = {
1806 .ctl_name = "F15h_M30h",
1807 .f1_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1,
1808 .f3_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F3,
1809 .ops = {
1810 .early_channel_count = f1x_early_channel_count,
1811 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
1812 .dbam_to_cs = f16_dbam_to_chip_select,
18b94f66
AG
1813 }
1814 },
94c1acf2
AG
1815 [F16_CPUS] = {
1816 .ctl_name = "F16h",
1817 .f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1,
1818 .f3_id = PCI_DEVICE_ID_AMD_16H_NB_F3,
1819 .ops = {
1820 .early_channel_count = f1x_early_channel_count,
1821 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
1822 .dbam_to_cs = f16_dbam_to_chip_select,
94c1acf2
AG
1823 }
1824 },
85a8885b
AG
1825 [F16_M30H_CPUS] = {
1826 .ctl_name = "F16h_M30h",
1827 .f1_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F1,
1828 .f3_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F3,
1829 .ops = {
1830 .early_channel_count = f1x_early_channel_count,
1831 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
1832 .dbam_to_cs = f16_dbam_to_chip_select,
85a8885b
AG
1833 }
1834 },
4d37607a
DT
1835};
1836
b1289d6f 1837/*
bfc04aec
BP
1838 * These are tables of eigenvectors (one per line) which can be used for the
1839 * construction of the syndrome tables. The modified syndrome search algorithm
1840 * uses those to find the symbol in error and thus the DIMM.
b1289d6f 1841 *
bfc04aec 1842 * Algorithm courtesy of Ross LaFetra from AMD.
b1289d6f 1843 */
c7e5301a 1844static const u16 x4_vectors[] = {
bfc04aec
BP
1845 0x2f57, 0x1afe, 0x66cc, 0xdd88,
1846 0x11eb, 0x3396, 0x7f4c, 0xeac8,
1847 0x0001, 0x0002, 0x0004, 0x0008,
1848 0x1013, 0x3032, 0x4044, 0x8088,
1849 0x106b, 0x30d6, 0x70fc, 0xe0a8,
1850 0x4857, 0xc4fe, 0x13cc, 0x3288,
1851 0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
1852 0x1f39, 0x251e, 0xbd6c, 0x6bd8,
1853 0x15c1, 0x2a42, 0x89ac, 0x4758,
1854 0x2b03, 0x1602, 0x4f0c, 0xca08,
1855 0x1f07, 0x3a0e, 0x6b04, 0xbd08,
1856 0x8ba7, 0x465e, 0x244c, 0x1cc8,
1857 0x2b87, 0x164e, 0x642c, 0xdc18,
1858 0x40b9, 0x80de, 0x1094, 0x20e8,
1859 0x27db, 0x1eb6, 0x9dac, 0x7b58,
1860 0x11c1, 0x2242, 0x84ac, 0x4c58,
1861 0x1be5, 0x2d7a, 0x5e34, 0xa718,
1862 0x4b39, 0x8d1e, 0x14b4, 0x28d8,
1863 0x4c97, 0xc87e, 0x11fc, 0x33a8,
1864 0x8e97, 0x497e, 0x2ffc, 0x1aa8,
1865 0x16b3, 0x3d62, 0x4f34, 0x8518,
1866 0x1e2f, 0x391a, 0x5cac, 0xf858,
1867 0x1d9f, 0x3b7a, 0x572c, 0xfe18,
1868 0x15f5, 0x2a5a, 0x5264, 0xa3b8,
1869 0x1dbb, 0x3b66, 0x715c, 0xe3f8,
1870 0x4397, 0xc27e, 0x17fc, 0x3ea8,
1871 0x1617, 0x3d3e, 0x6464, 0xb8b8,
1872 0x23ff, 0x12aa, 0xab6c, 0x56d8,
1873 0x2dfb, 0x1ba6, 0x913c, 0x7328,
1874 0x185d, 0x2ca6, 0x7914, 0x9e28,
1875 0x171b, 0x3e36, 0x7d7c, 0xebe8,
1876 0x4199, 0x82ee, 0x19f4, 0x2e58,
1877 0x4807, 0xc40e, 0x130c, 0x3208,
1878 0x1905, 0x2e0a, 0x5804, 0xac08,
1879 0x213f, 0x132a, 0xadfc, 0x5ba8,
1880 0x19a9, 0x2efe, 0xb5cc, 0x6f88,
b1289d6f
DT
1881};
1882
c7e5301a 1883static const u16 x8_vectors[] = {
bfc04aec
BP
1884 0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
1885 0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
1886 0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
1887 0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
1888 0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
1889 0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
1890 0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
1891 0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
1892 0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
1893 0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
1894 0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
1895 0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
1896 0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
1897 0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
1898 0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
1899 0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
1900 0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
1901 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
1902 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
1903};
1904
c7e5301a 1905static int decode_syndrome(u16 syndrome, const u16 *vectors, unsigned num_vecs,
d34a6ecd 1906 unsigned v_dim)
b1289d6f 1907{
bfc04aec
BP
1908 unsigned int i, err_sym;
1909
1910 for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
1911 u16 s = syndrome;
d34a6ecd
BP
1912 unsigned v_idx = err_sym * v_dim;
1913 unsigned v_end = (err_sym + 1) * v_dim;
bfc04aec
BP
1914
1915 /* walk over all 16 bits of the syndrome */
1916 for (i = 1; i < (1U << 16); i <<= 1) {
1917
1918 /* if bit is set in that eigenvector... */
1919 if (v_idx < v_end && vectors[v_idx] & i) {
1920 u16 ev_comp = vectors[v_idx++];
1921
1922 /* ... and bit set in the modified syndrome, */
1923 if (s & i) {
1924 /* remove it. */
1925 s ^= ev_comp;
4d37607a 1926
bfc04aec
BP
1927 if (!s)
1928 return err_sym;
1929 }
b1289d6f 1930
bfc04aec
BP
1931 } else if (s & i)
1932 /* can't get to zero, move to next symbol */
1933 break;
1934 }
b1289d6f
DT
1935 }
1936
956b9ba1 1937 edac_dbg(0, "syndrome(%x) not found\n", syndrome);
b1289d6f
DT
1938 return -1;
1939}
d27bf6fa 1940
bfc04aec
BP
1941static int map_err_sym_to_channel(int err_sym, int sym_size)
1942{
1943 if (sym_size == 4)
1944 switch (err_sym) {
1945 case 0x20:
1946 case 0x21:
1947 return 0;
1948 break;
1949 case 0x22:
1950 case 0x23:
1951 return 1;
1952 break;
1953 default:
1954 return err_sym >> 4;
1955 break;
1956 }
1957 /* x8 symbols */
1958 else
1959 switch (err_sym) {
1960 /* imaginary bits not in a DIMM */
1961 case 0x10:
1962 WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n",
1963 err_sym);
1964 return -1;
1965 break;
1966
1967 case 0x11:
1968 return 0;
1969 break;
1970 case 0x12:
1971 return 1;
1972 break;
1973 default:
1974 return err_sym >> 3;
1975 break;
1976 }
1977 return -1;
1978}
1979
1980static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
1981{
1982 struct amd64_pvt *pvt = mci->pvt_info;
ad6a32e9
BP
1983 int err_sym = -1;
1984
a3b7db09 1985 if (pvt->ecc_sym_sz == 8)
ad6a32e9
BP
1986 err_sym = decode_syndrome(syndrome, x8_vectors,
1987 ARRAY_SIZE(x8_vectors),
a3b7db09
BP
1988 pvt->ecc_sym_sz);
1989 else if (pvt->ecc_sym_sz == 4)
ad6a32e9
BP
1990 err_sym = decode_syndrome(syndrome, x4_vectors,
1991 ARRAY_SIZE(x4_vectors),
a3b7db09 1992 pvt->ecc_sym_sz);
ad6a32e9 1993 else {
a3b7db09 1994 amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz);
ad6a32e9 1995 return err_sym;
bfc04aec 1996 }
ad6a32e9 1997
a3b7db09 1998 return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz);
bfc04aec
BP
1999}
2000
33ca0643
BP
2001static void __log_bus_error(struct mem_ctl_info *mci, struct err_info *err,
2002 u8 ecc_type)
d27bf6fa 2003{
33ca0643
BP
2004 enum hw_event_mc_err_type err_type;
2005 const char *string;
d27bf6fa 2006
33ca0643
BP
2007 if (ecc_type == 2)
2008 err_type = HW_EVENT_ERR_CORRECTED;
2009 else if (ecc_type == 1)
2010 err_type = HW_EVENT_ERR_UNCORRECTED;
2011 else {
2012 WARN(1, "Something is rotten in the state of Denmark.\n");
d27bf6fa
DT
2013 return;
2014 }
2015
33ca0643
BP
2016 switch (err->err_code) {
2017 case DECODE_OK:
2018 string = "";
2019 break;
2020 case ERR_NODE:
2021 string = "Failed to map error addr to a node";
2022 break;
2023 case ERR_CSROW:
2024 string = "Failed to map error addr to a csrow";
2025 break;
2026 case ERR_CHANNEL:
2027 string = "unknown syndrome - possible error reporting race";
2028 break;
2029 default:
2030 string = "WTF error";
2031 break;
d27bf6fa 2032 }
33ca0643
BP
2033
2034 edac_mc_handle_error(err_type, mci, 1,
2035 err->page, err->offset, err->syndrome,
2036 err->csrow, err->channel, -1,
2037 string, "");
d27bf6fa
DT
2038}
2039
df781d03 2040static inline void decode_bus_error(int node_id, struct mce *m)
d27bf6fa 2041{
df781d03 2042 struct mem_ctl_info *mci = mcis[node_id];
33ca0643 2043 struct amd64_pvt *pvt = mci->pvt_info;
f192c7b1 2044 u8 ecc_type = (m->status >> 45) & 0x3;
66fed2d4
BP
2045 u8 xec = XEC(m->status, 0x1f);
2046 u16 ec = EC(m->status);
33ca0643
BP
2047 u64 sys_addr;
2048 struct err_info err;
d27bf6fa 2049
66fed2d4 2050 /* Bail out early if this was an 'observed' error */
5980bb9c 2051 if (PP(ec) == NBSL_PP_OBS)
b70ef010 2052 return;
d27bf6fa 2053
ecaf5606
BP
2054 /* Do only ECC errors */
2055 if (xec && xec != F10_NBSL_EXT_ERR_ECC)
d27bf6fa 2056 return;
d27bf6fa 2057
33ca0643
BP
2058 memset(&err, 0, sizeof(err));
2059
a4b4bedc 2060 sys_addr = get_error_address(pvt, m);
33ca0643 2061
ecaf5606 2062 if (ecc_type == 2)
33ca0643
BP
2063 err.syndrome = extract_syndrome(m->status);
2064
2065 pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, &err);
2066
2067 __log_bus_error(mci, &err, ecc_type);
d27bf6fa
DT
2068}
2069
0ec449ee 2070/*
8d5b5d9c 2071 * Use pvt->F2 which contains the F2 CPU PCI device to get the related
bbd0c1f6 2072 * F1 (AddrMap) and F3 (Misc) devices. Return negative value on error.
0ec449ee 2073 */
360b7f3c 2074static int reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 f1_id, u16 f3_id)
0ec449ee 2075{
0ec449ee 2076 /* Reserve the ADDRESS MAP Device */
8d5b5d9c
BP
2077 pvt->F1 = pci_get_related_function(pvt->F2->vendor, f1_id, pvt->F2);
2078 if (!pvt->F1) {
24f9a7fe
BP
2079 amd64_err("error address map device not found: "
2080 "vendor %x device 0x%x (broken BIOS?)\n",
2081 PCI_VENDOR_ID_AMD, f1_id);
bbd0c1f6 2082 return -ENODEV;
0ec449ee
DT
2083 }
2084
2085 /* Reserve the MISC Device */
8d5b5d9c
BP
2086 pvt->F3 = pci_get_related_function(pvt->F2->vendor, f3_id, pvt->F2);
2087 if (!pvt->F3) {
2088 pci_dev_put(pvt->F1);
2089 pvt->F1 = NULL;
0ec449ee 2090
24f9a7fe
BP
2091 amd64_err("error F3 device not found: "
2092 "vendor %x device 0x%x (broken BIOS?)\n",
2093 PCI_VENDOR_ID_AMD, f3_id);
0ec449ee 2094
bbd0c1f6 2095 return -ENODEV;
0ec449ee 2096 }
956b9ba1
JP
2097 edac_dbg(1, "F1: %s\n", pci_name(pvt->F1));
2098 edac_dbg(1, "F2: %s\n", pci_name(pvt->F2));
2099 edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
0ec449ee
DT
2100
2101 return 0;
2102}
2103
360b7f3c 2104static void free_mc_sibling_devs(struct amd64_pvt *pvt)
0ec449ee 2105{
8d5b5d9c
BP
2106 pci_dev_put(pvt->F1);
2107 pci_dev_put(pvt->F3);
0ec449ee
DT
2108}
2109
2110/*
2111 * Retrieve the hardware registers of the memory controller (this includes the
2112 * 'Address Map' and 'Misc' device regs)
2113 */
360b7f3c 2114static void read_mc_regs(struct amd64_pvt *pvt)
0ec449ee 2115{
a4b4bedc 2116 unsigned range;
0ec449ee 2117 u64 msr_val;
ad6a32e9 2118 u32 tmp;
0ec449ee
DT
2119
2120 /*
2121 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
2122 * those are Read-As-Zero
2123 */
e97f8bb8 2124 rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
956b9ba1 2125 edac_dbg(0, " TOP_MEM: 0x%016llx\n", pvt->top_mem);
0ec449ee
DT
2126
2127 /* check first whether TOP_MEM2 is enabled */
2128 rdmsrl(MSR_K8_SYSCFG, msr_val);
2129 if (msr_val & (1U << 21)) {
e97f8bb8 2130 rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
956b9ba1 2131 edac_dbg(0, " TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
0ec449ee 2132 } else
956b9ba1 2133 edac_dbg(0, " TOP_MEM2 disabled\n");
0ec449ee 2134
5980bb9c 2135 amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
0ec449ee 2136
5a5d2371 2137 read_dram_ctl_register(pvt);
0ec449ee 2138
7f19bf75
BP
2139 for (range = 0; range < DRAM_RANGES; range++) {
2140 u8 rw;
0ec449ee 2141
7f19bf75
BP
2142 /* read settings for this DRAM range */
2143 read_dram_base_limit_regs(pvt, range);
2144
2145 rw = dram_rw(pvt, range);
2146 if (!rw)
2147 continue;
2148
956b9ba1
JP
2149 edac_dbg(1, " DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
2150 range,
2151 get_dram_base(pvt, range),
2152 get_dram_limit(pvt, range));
7f19bf75 2153
956b9ba1
JP
2154 edac_dbg(1, " IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
2155 dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
2156 (rw & 0x1) ? "R" : "-",
2157 (rw & 0x2) ? "W" : "-",
2158 dram_intlv_sel(pvt, range),
2159 dram_dst_node(pvt, range));
0ec449ee
DT
2160 }
2161
b2b0c605 2162 read_dct_base_mask(pvt);
0ec449ee 2163
bc21fa57 2164 amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
7981a28f 2165 amd64_read_dct_pci_cfg(pvt, 0, DBAM0, &pvt->dbam0);
0ec449ee 2166
8d5b5d9c 2167 amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
0ec449ee 2168
7981a28f
AG
2169 amd64_read_dct_pci_cfg(pvt, 0, DCLR0, &pvt->dclr0);
2170 amd64_read_dct_pci_cfg(pvt, 0, DCHR0, &pvt->dchr0);
0ec449ee 2171
78da121e 2172 if (!dct_ganging_enabled(pvt)) {
7981a28f
AG
2173 amd64_read_dct_pci_cfg(pvt, 1, DCLR0, &pvt->dclr1);
2174 amd64_read_dct_pci_cfg(pvt, 1, DCHR0, &pvt->dchr1);
0ec449ee 2175 }
ad6a32e9 2176
a3b7db09
BP
2177 pvt->ecc_sym_sz = 4;
2178
a4b4bedc 2179 if (pvt->fam >= 0x10) {
b2b0c605 2180 amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
7981a28f 2181 /* F16h has only DCT0, so no need to read dbam1 */
a4b4bedc 2182 if (pvt->fam != 0x16)
7981a28f 2183 amd64_read_dct_pci_cfg(pvt, 1, DBAM0, &pvt->dbam1);
ad6a32e9 2184
a3b7db09 2185 /* F10h, revD and later can do x8 ECC too */
a4b4bedc 2186 if ((pvt->fam > 0x10 || pvt->model > 7) && tmp & BIT(25))
a3b7db09
BP
2187 pvt->ecc_sym_sz = 8;
2188 }
b2b0c605 2189 dump_misc_regs(pvt);
0ec449ee
DT
2190}
2191
2192/*
2193 * NOTE: CPU Revision Dependent code
2194 *
2195 * Input:
11c75ead 2196 * @csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1)
0ec449ee
DT
2197 * k8 private pointer to -->
2198 * DRAM Bank Address mapping register
2199 * node_id
2200 * DCL register where dual_channel_active is
2201 *
2202 * The DBAM register consists of 4 sets of 4 bits each definitions:
2203 *
2204 * Bits: CSROWs
2205 * 0-3 CSROWs 0 and 1
2206 * 4-7 CSROWs 2 and 3
2207 * 8-11 CSROWs 4 and 5
2208 * 12-15 CSROWs 6 and 7
2209 *
2210 * Values range from: 0 to 15
2211 * The meaning of the values depends on CPU revision and dual-channel state,
2212 * see relevant BKDG more info.
2213 *
2214 * The memory controller provides for total of only 8 CSROWs in its current
2215 * architecture. Each "pair" of CSROWs normally represents just one DIMM in
2216 * single channel or two (2) DIMMs in dual channel mode.
2217 *
2218 * The following code logic collapses the various tables for CSROW based on CPU
2219 * revision.
2220 *
2221 * Returns:
2222 * The number of PAGE_SIZE pages on the specified CSROW number it
2223 * encompasses
2224 *
2225 */
d1ea71cd 2226static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
0ec449ee 2227{
1433eb99 2228 u32 cs_mode, nr_pages;
f92cae45 2229 u32 dbam = dct ? pvt->dbam1 : pvt->dbam0;
0ec449ee 2230
10de6497 2231
0ec449ee
DT
2232 /*
2233 * The math on this doesn't look right on the surface because x/2*4 can
2234 * be simplified to x*2 but this expression makes use of the fact that
2235 * it is integral math where 1/2=0. This intermediate value becomes the
2236 * number of bits to shift the DBAM register to extract the proper CSROW
2237 * field.
2238 */
0a5dfc31 2239 cs_mode = DBAM_DIMM(csrow_nr / 2, dbam);
0ec449ee 2240
41d8bfab 2241 nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode) << (20 - PAGE_SHIFT);
0ec449ee 2242
10de6497
BP
2243 edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n",
2244 csrow_nr, dct, cs_mode);
2245 edac_dbg(0, "nr_pages/channel: %u\n", nr_pages);
0ec449ee
DT
2246
2247 return nr_pages;
2248}
2249
2250/*
2251 * Initialize the array of csrow attribute instances, based on the values
2252 * from pci config hardware registers.
2253 */
360b7f3c 2254static int init_csrows(struct mem_ctl_info *mci)
0ec449ee 2255{
10de6497 2256 struct amd64_pvt *pvt = mci->pvt_info;
0ec449ee 2257 struct csrow_info *csrow;
de3910eb 2258 struct dimm_info *dimm;
084a4fcc 2259 enum edac_type edac_mode;
10de6497
BP
2260 enum mem_type mtype;
2261 int i, j, empty = 1;
a895bf8b 2262 int nr_pages = 0;
10de6497 2263 u32 val;
0ec449ee 2264
a97fa68e 2265 amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
0ec449ee 2266
2299ef71 2267 pvt->nbcfg = val;
0ec449ee 2268
956b9ba1
JP
2269 edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
2270 pvt->mc_node_id, val,
2271 !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
0ec449ee 2272
10de6497
BP
2273 /*
2274 * We iterate over DCT0 here but we look at DCT1 in parallel, if needed.
2275 */
11c75ead 2276 for_each_chip_select(i, 0, pvt) {
10de6497
BP
2277 bool row_dct0 = !!csrow_enabled(i, 0, pvt);
2278 bool row_dct1 = false;
0ec449ee 2279
a4b4bedc 2280 if (pvt->fam != 0xf)
10de6497
BP
2281 row_dct1 = !!csrow_enabled(i, 1, pvt);
2282
2283 if (!row_dct0 && !row_dct1)
0ec449ee 2284 continue;
0ec449ee 2285
10de6497 2286 csrow = mci->csrows[i];
0ec449ee 2287 empty = 0;
10de6497
BP
2288
2289 edac_dbg(1, "MC node: %d, csrow: %d\n",
2290 pvt->mc_node_id, i);
2291
1eef1282 2292 if (row_dct0) {
d1ea71cd 2293 nr_pages = get_csrow_nr_pages(pvt, 0, i);
1eef1282
MCC
2294 csrow->channels[0]->dimm->nr_pages = nr_pages;
2295 }
11c75ead 2296
10de6497 2297 /* K8 has only one DCT */
a4b4bedc 2298 if (pvt->fam != 0xf && row_dct1) {
d1ea71cd 2299 int row_dct1_pages = get_csrow_nr_pages(pvt, 1, i);
1eef1282
MCC
2300
2301 csrow->channels[1]->dimm->nr_pages = row_dct1_pages;
2302 nr_pages += row_dct1_pages;
2303 }
0ec449ee 2304
d1ea71cd 2305 mtype = determine_memory_type(pvt, i);
0ec449ee 2306
10de6497 2307 edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages);
0ec449ee
DT
2308
2309 /*
2310 * determine whether CHIPKILL or JUST ECC or NO ECC is operating
2311 */
a97fa68e 2312 if (pvt->nbcfg & NBCFG_ECC_ENABLE)
084a4fcc
MCC
2313 edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL) ?
2314 EDAC_S4ECD4ED : EDAC_SECDED;
0ec449ee 2315 else
084a4fcc
MCC
2316 edac_mode = EDAC_NONE;
2317
2318 for (j = 0; j < pvt->channel_count; j++) {
de3910eb
MCC
2319 dimm = csrow->channels[j]->dimm;
2320 dimm->mtype = mtype;
2321 dimm->edac_mode = edac_mode;
084a4fcc 2322 }
0ec449ee
DT
2323 }
2324
2325 return empty;
2326}
d27bf6fa 2327
f6d6ae96 2328/* get all cores on this DCT */
8b84c8df 2329static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, u16 nid)
f6d6ae96
BP
2330{
2331 int cpu;
2332
2333 for_each_online_cpu(cpu)
2334 if (amd_get_nb_id(cpu) == nid)
2335 cpumask_set_cpu(cpu, mask);
2336}
2337
2338/* check MCG_CTL on all the cpus on this node */
d1ea71cd 2339static bool nb_mce_bank_enabled_on_node(u16 nid)
f6d6ae96
BP
2340{
2341 cpumask_var_t mask;
50542251 2342 int cpu, nbe;
f6d6ae96
BP
2343 bool ret = false;
2344
2345 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
24f9a7fe 2346 amd64_warn("%s: Error allocating mask\n", __func__);
f6d6ae96
BP
2347 return false;
2348 }
2349
2350 get_cpus_on_this_dct_cpumask(mask, nid);
2351
f6d6ae96
BP
2352 rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
2353
2354 for_each_cpu(cpu, mask) {
50542251 2355 struct msr *reg = per_cpu_ptr(msrs, cpu);
5980bb9c 2356 nbe = reg->l & MSR_MCGCTL_NBE;
f6d6ae96 2357
956b9ba1
JP
2358 edac_dbg(0, "core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
2359 cpu, reg->q,
2360 (nbe ? "enabled" : "disabled"));
f6d6ae96
BP
2361
2362 if (!nbe)
2363 goto out;
f6d6ae96
BP
2364 }
2365 ret = true;
2366
2367out:
f6d6ae96
BP
2368 free_cpumask_var(mask);
2369 return ret;
2370}
2371
c7e5301a 2372static int toggle_ecc_err_reporting(struct ecc_settings *s, u16 nid, bool on)
f6d6ae96
BP
2373{
2374 cpumask_var_t cmask;
50542251 2375 int cpu;
f6d6ae96
BP
2376
2377 if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
24f9a7fe 2378 amd64_warn("%s: error allocating mask\n", __func__);
f6d6ae96
BP
2379 return false;
2380 }
2381
ae7bb7c6 2382 get_cpus_on_this_dct_cpumask(cmask, nid);
f6d6ae96 2383
f6d6ae96
BP
2384 rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2385
2386 for_each_cpu(cpu, cmask) {
2387
50542251
BP
2388 struct msr *reg = per_cpu_ptr(msrs, cpu);
2389
f6d6ae96 2390 if (on) {
5980bb9c 2391 if (reg->l & MSR_MCGCTL_NBE)
ae7bb7c6 2392 s->flags.nb_mce_enable = 1;
f6d6ae96 2393
5980bb9c 2394 reg->l |= MSR_MCGCTL_NBE;
f6d6ae96
BP
2395 } else {
2396 /*
d95cf4de 2397 * Turn off NB MCE reporting only when it was off before
f6d6ae96 2398 */
ae7bb7c6 2399 if (!s->flags.nb_mce_enable)
5980bb9c 2400 reg->l &= ~MSR_MCGCTL_NBE;
f6d6ae96 2401 }
f6d6ae96
BP
2402 }
2403 wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2404
f6d6ae96
BP
2405 free_cpumask_var(cmask);
2406
2407 return 0;
2408}
2409
c7e5301a 2410static bool enable_ecc_error_reporting(struct ecc_settings *s, u16 nid,
2299ef71 2411 struct pci_dev *F3)
f9431992 2412{
2299ef71 2413 bool ret = true;
c9f4f26e 2414 u32 value, mask = 0x3; /* UECC/CECC enable */
f9431992 2415
2299ef71
BP
2416 if (toggle_ecc_err_reporting(s, nid, ON)) {
2417 amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
2418 return false;
2419 }
2420
c9f4f26e 2421 amd64_read_pci_cfg(F3, NBCTL, &value);
f9431992 2422
ae7bb7c6
BP
2423 s->old_nbctl = value & mask;
2424 s->nbctl_valid = true;
f9431992
DT
2425
2426 value |= mask;
c9f4f26e 2427 amd64_write_pci_cfg(F3, NBCTL, value);
f9431992 2428
a97fa68e 2429 amd64_read_pci_cfg(F3, NBCFG, &value);
f9431992 2430
956b9ba1
JP
2431 edac_dbg(0, "1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
2432 nid, value, !!(value & NBCFG_ECC_ENABLE));
f9431992 2433
a97fa68e 2434 if (!(value & NBCFG_ECC_ENABLE)) {
24f9a7fe 2435 amd64_warn("DRAM ECC disabled on this node, enabling...\n");
f9431992 2436
ae7bb7c6 2437 s->flags.nb_ecc_prev = 0;
d95cf4de 2438
f9431992 2439 /* Attempt to turn on DRAM ECC Enable */
a97fa68e
BP
2440 value |= NBCFG_ECC_ENABLE;
2441 amd64_write_pci_cfg(F3, NBCFG, value);
f9431992 2442
a97fa68e 2443 amd64_read_pci_cfg(F3, NBCFG, &value);
f9431992 2444
a97fa68e 2445 if (!(value & NBCFG_ECC_ENABLE)) {
24f9a7fe
BP
2446 amd64_warn("Hardware rejected DRAM ECC enable,"
2447 "check memory DIMM configuration.\n");
2299ef71 2448 ret = false;
f9431992 2449 } else {
24f9a7fe 2450 amd64_info("Hardware accepted DRAM ECC Enable\n");
f9431992 2451 }
d95cf4de 2452 } else {
ae7bb7c6 2453 s->flags.nb_ecc_prev = 1;
f9431992 2454 }
d95cf4de 2455
956b9ba1
JP
2456 edac_dbg(0, "2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
2457 nid, value, !!(value & NBCFG_ECC_ENABLE));
f9431992 2458
2299ef71 2459 return ret;
f9431992
DT
2460}
2461
c7e5301a 2462static void restore_ecc_error_reporting(struct ecc_settings *s, u16 nid,
360b7f3c 2463 struct pci_dev *F3)
f9431992 2464{
c9f4f26e
BP
2465 u32 value, mask = 0x3; /* UECC/CECC enable */
2466
f9431992 2467
ae7bb7c6 2468 if (!s->nbctl_valid)
f9431992
DT
2469 return;
2470
c9f4f26e 2471 amd64_read_pci_cfg(F3, NBCTL, &value);
f9431992 2472 value &= ~mask;
ae7bb7c6 2473 value |= s->old_nbctl;
f9431992 2474
c9f4f26e 2475 amd64_write_pci_cfg(F3, NBCTL, value);
f9431992 2476
ae7bb7c6
BP
2477 /* restore previous BIOS DRAM ECC "off" setting we force-enabled */
2478 if (!s->flags.nb_ecc_prev) {
a97fa68e
BP
2479 amd64_read_pci_cfg(F3, NBCFG, &value);
2480 value &= ~NBCFG_ECC_ENABLE;
2481 amd64_write_pci_cfg(F3, NBCFG, value);
d95cf4de
BP
2482 }
2483
2484 /* restore the NB Enable MCGCTL bit */
2299ef71 2485 if (toggle_ecc_err_reporting(s, nid, OFF))
24f9a7fe 2486 amd64_warn("Error restoring NB MCGCTL settings!\n");
f9431992
DT
2487}
2488
2489/*
2299ef71
BP
2490 * EDAC requires that the BIOS have ECC enabled before
2491 * taking over the processing of ECC errors. A command line
2492 * option allows to force-enable hardware ECC later in
2493 * enable_ecc_error_reporting().
f9431992 2494 */
cab4d277
BP
2495static const char *ecc_msg =
2496 "ECC disabled in the BIOS or no ECC capability, module will not load.\n"
2497 " Either enable ECC checking or force module loading by setting "
2498 "'ecc_enable_override'.\n"
2499 " (Note that use of the override may cause unknown side effects.)\n";
be3468e8 2500
c7e5301a 2501static bool ecc_enabled(struct pci_dev *F3, u16 nid)
f9431992
DT
2502{
2503 u32 value;
2299ef71 2504 u8 ecc_en = 0;
06724535 2505 bool nb_mce_en = false;
f9431992 2506
a97fa68e 2507 amd64_read_pci_cfg(F3, NBCFG, &value);
f9431992 2508
a97fa68e 2509 ecc_en = !!(value & NBCFG_ECC_ENABLE);
2299ef71 2510 amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled"));
f9431992 2511
d1ea71cd 2512 nb_mce_en = nb_mce_bank_enabled_on_node(nid);
06724535 2513 if (!nb_mce_en)
2299ef71
BP
2514 amd64_notice("NB MCE bank disabled, set MSR "
2515 "0x%08x[4] on node %d to enable.\n",
2516 MSR_IA32_MCG_CTL, nid);
f9431992 2517
2299ef71
BP
2518 if (!ecc_en || !nb_mce_en) {
2519 amd64_notice("%s", ecc_msg);
2520 return false;
2521 }
2522 return true;
f9431992
DT
2523}
2524
c5608759 2525static int set_mc_sysfs_attrs(struct mem_ctl_info *mci)
7d6034d3 2526{
a4b4bedc 2527 struct amd64_pvt *pvt = mci->pvt_info;
c5608759 2528 int rc;
7d6034d3 2529
c5608759
MCC
2530 rc = amd64_create_sysfs_dbg_files(mci);
2531 if (rc < 0)
2532 return rc;
7d6034d3 2533
a4b4bedc 2534 if (pvt->fam >= 0x10) {
c5608759
MCC
2535 rc = amd64_create_sysfs_inject_files(mci);
2536 if (rc < 0)
2537 return rc;
2538 }
2539
2540 return 0;
2541}
7d6034d3 2542
c5608759
MCC
2543static void del_mc_sysfs_attrs(struct mem_ctl_info *mci)
2544{
a4b4bedc
BP
2545 struct amd64_pvt *pvt = mci->pvt_info;
2546
c5608759 2547 amd64_remove_sysfs_dbg_files(mci);
7d6034d3 2548
a4b4bedc 2549 if (pvt->fam >= 0x10)
c5608759 2550 amd64_remove_sysfs_inject_files(mci);
7d6034d3
DT
2551}
2552
df71a053
BP
2553static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
2554 struct amd64_family_type *fam)
7d6034d3
DT
2555{
2556 struct amd64_pvt *pvt = mci->pvt_info;
2557
2558 mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
2559 mci->edac_ctl_cap = EDAC_FLAG_NONE;
7d6034d3 2560
5980bb9c 2561 if (pvt->nbcap & NBCAP_SECDED)
7d6034d3
DT
2562 mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
2563
5980bb9c 2564 if (pvt->nbcap & NBCAP_CHIPKILL)
7d6034d3
DT
2565 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
2566
d1ea71cd 2567 mci->edac_cap = determine_edac_cap(pvt);
7d6034d3
DT
2568 mci->mod_name = EDAC_MOD_STR;
2569 mci->mod_ver = EDAC_AMD64_VERSION;
df71a053 2570 mci->ctl_name = fam->ctl_name;
8d5b5d9c 2571 mci->dev_name = pci_name(pvt->F2);
7d6034d3
DT
2572 mci->ctl_page_to_phys = NULL;
2573
7d6034d3 2574 /* memory scrubber interface */
d1ea71cd
BP
2575 mci->set_sdram_scrub_rate = set_scrub_rate;
2576 mci->get_sdram_scrub_rate = get_scrub_rate;
7d6034d3
DT
2577}
2578
0092b20d
BP
2579/*
2580 * returns a pointer to the family descriptor on success, NULL otherwise.
2581 */
d1ea71cd 2582static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
395ae783 2583{
0092b20d
BP
2584 struct amd64_family_type *fam_type = NULL;
2585
18b94f66 2586 pvt->ext_model = boot_cpu_data.x86_model >> 4;
a4b4bedc 2587 pvt->stepping = boot_cpu_data.x86_mask;
18b94f66
AG
2588 pvt->model = boot_cpu_data.x86_model;
2589 pvt->fam = boot_cpu_data.x86;
2590
2591 switch (pvt->fam) {
395ae783 2592 case 0xf:
d1ea71cd
BP
2593 fam_type = &family_types[K8_CPUS];
2594 pvt->ops = &family_types[K8_CPUS].ops;
395ae783 2595 break;
df71a053 2596
395ae783 2597 case 0x10:
d1ea71cd
BP
2598 fam_type = &family_types[F10_CPUS];
2599 pvt->ops = &family_types[F10_CPUS].ops;
df71a053
BP
2600 break;
2601
2602 case 0x15:
18b94f66 2603 if (pvt->model == 0x30) {
d1ea71cd
BP
2604 fam_type = &family_types[F15_M30H_CPUS];
2605 pvt->ops = &family_types[F15_M30H_CPUS].ops;
18b94f66
AG
2606 break;
2607 }
2608
d1ea71cd
BP
2609 fam_type = &family_types[F15_CPUS];
2610 pvt->ops = &family_types[F15_CPUS].ops;
395ae783
BP
2611 break;
2612
94c1acf2 2613 case 0x16:
85a8885b
AG
2614 if (pvt->model == 0x30) {
2615 fam_type = &family_types[F16_M30H_CPUS];
2616 pvt->ops = &family_types[F16_M30H_CPUS].ops;
2617 break;
2618 }
d1ea71cd
BP
2619 fam_type = &family_types[F16_CPUS];
2620 pvt->ops = &family_types[F16_CPUS].ops;
94c1acf2
AG
2621 break;
2622
395ae783 2623 default:
24f9a7fe 2624 amd64_err("Unsupported family!\n");
0092b20d 2625 return NULL;
395ae783 2626 }
0092b20d 2627
df71a053 2628 amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name,
18b94f66 2629 (pvt->fam == 0xf ?
24f9a7fe
BP
2630 (pvt->ext_model >= K8_REV_F ? "revF or later "
2631 : "revE or earlier ")
2632 : ""), pvt->mc_node_id);
0092b20d 2633 return fam_type;
395ae783
BP
2634}
2635
d1ea71cd 2636static int init_one_instance(struct pci_dev *F2)
7d6034d3
DT
2637{
2638 struct amd64_pvt *pvt = NULL;
0092b20d 2639 struct amd64_family_type *fam_type = NULL;
360b7f3c 2640 struct mem_ctl_info *mci = NULL;
ab5a503c 2641 struct edac_mc_layer layers[2];
7d6034d3 2642 int err = 0, ret;
772c3ff3 2643 u16 nid = amd_get_node_id(F2);
7d6034d3
DT
2644
2645 ret = -ENOMEM;
2646 pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
2647 if (!pvt)
360b7f3c 2648 goto err_ret;
7d6034d3 2649
360b7f3c 2650 pvt->mc_node_id = nid;
8d5b5d9c 2651 pvt->F2 = F2;
7d6034d3 2652
395ae783 2653 ret = -EINVAL;
d1ea71cd 2654 fam_type = per_family_init(pvt);
0092b20d 2655 if (!fam_type)
395ae783
BP
2656 goto err_free;
2657
7d6034d3 2658 ret = -ENODEV;
360b7f3c 2659 err = reserve_mc_sibling_devs(pvt, fam_type->f1_id, fam_type->f3_id);
7d6034d3
DT
2660 if (err)
2661 goto err_free;
2662
360b7f3c 2663 read_mc_regs(pvt);
7d6034d3 2664
7d6034d3
DT
2665 /*
2666 * We need to determine how many memory channels there are. Then use
2667 * that information for calculating the size of the dynamic instance
360b7f3c 2668 * tables in the 'mci' structure.
7d6034d3 2669 */
360b7f3c 2670 ret = -EINVAL;
7d6034d3
DT
2671 pvt->channel_count = pvt->ops->early_channel_count(pvt);
2672 if (pvt->channel_count < 0)
360b7f3c 2673 goto err_siblings;
7d6034d3
DT
2674
2675 ret = -ENOMEM;
ab5a503c
MCC
2676 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
2677 layers[0].size = pvt->csels[0].b_cnt;
2678 layers[0].is_virt_csrow = true;
2679 layers[1].type = EDAC_MC_LAYER_CHANNEL;
f0a56c48
BP
2680
2681 /*
2682 * Always allocate two channels since we can have setups with DIMMs on
2683 * only one channel. Also, this simplifies handling later for the price
2684 * of a couple of KBs tops.
2685 */
2686 layers[1].size = 2;
ab5a503c 2687 layers[1].is_virt_csrow = false;
f0a56c48 2688
ca0907b9 2689 mci = edac_mc_alloc(nid, ARRAY_SIZE(layers), layers, 0);
7d6034d3 2690 if (!mci)
360b7f3c 2691 goto err_siblings;
7d6034d3
DT
2692
2693 mci->pvt_info = pvt;
fd687502 2694 mci->pdev = &pvt->F2->dev;
7d6034d3 2695
df71a053 2696 setup_mci_misc_attrs(mci, fam_type);
360b7f3c
BP
2697
2698 if (init_csrows(mci))
7d6034d3
DT
2699 mci->edac_cap = EDAC_FLAG_NONE;
2700
7d6034d3
DT
2701 ret = -ENODEV;
2702 if (edac_mc_add_mc(mci)) {
956b9ba1 2703 edac_dbg(1, "failed edac_mc_add_mc()\n");
7d6034d3
DT
2704 goto err_add_mc;
2705 }
c5608759 2706 if (set_mc_sysfs_attrs(mci)) {
956b9ba1 2707 edac_dbg(1, "failed edac_mc_add_mc()\n");
c5608759
MCC
2708 goto err_add_sysfs;
2709 }
7d6034d3 2710
549d042d
BP
2711 /* register stuff with EDAC MCE */
2712 if (report_gart_errors)
2713 amd_report_gart_errors(true);
2714
df781d03 2715 amd_register_ecc_decoder(decode_bus_error);
549d042d 2716
360b7f3c
BP
2717 mcis[nid] = mci;
2718
2719 atomic_inc(&drv_instances);
2720
7d6034d3
DT
2721 return 0;
2722
c5608759
MCC
2723err_add_sysfs:
2724 edac_mc_del_mc(mci->pdev);
7d6034d3
DT
2725err_add_mc:
2726 edac_mc_free(mci);
2727
360b7f3c
BP
2728err_siblings:
2729 free_mc_sibling_devs(pvt);
7d6034d3 2730
360b7f3c
BP
2731err_free:
2732 kfree(pvt);
7d6034d3 2733
360b7f3c 2734err_ret:
7d6034d3
DT
2735 return ret;
2736}
2737
d1ea71cd
BP
2738static int probe_one_instance(struct pci_dev *pdev,
2739 const struct pci_device_id *mc_type)
7d6034d3 2740{
772c3ff3 2741 u16 nid = amd_get_node_id(pdev);
2299ef71 2742 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
ae7bb7c6 2743 struct ecc_settings *s;
2299ef71 2744 int ret = 0;
7d6034d3 2745
7d6034d3 2746 ret = pci_enable_device(pdev);
b8cfa02f 2747 if (ret < 0) {
956b9ba1 2748 edac_dbg(0, "ret=%d\n", ret);
b8cfa02f
BP
2749 return -EIO;
2750 }
7d6034d3 2751
ae7bb7c6
BP
2752 ret = -ENOMEM;
2753 s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL);
2754 if (!s)
2299ef71 2755 goto err_out;
ae7bb7c6
BP
2756
2757 ecc_stngs[nid] = s;
2758
2299ef71
BP
2759 if (!ecc_enabled(F3, nid)) {
2760 ret = -ENODEV;
2761
2762 if (!ecc_enable_override)
2763 goto err_enable;
2764
2765 amd64_warn("Forcing ECC on!\n");
2766
2767 if (!enable_ecc_error_reporting(s, nid, F3))
2768 goto err_enable;
2769 }
2770
d1ea71cd 2771 ret = init_one_instance(pdev);
360b7f3c 2772 if (ret < 0) {
ae7bb7c6 2773 amd64_err("Error probing instance: %d\n", nid);
360b7f3c
BP
2774 restore_ecc_error_reporting(s, nid, F3);
2775 }
7d6034d3
DT
2776
2777 return ret;
2299ef71
BP
2778
2779err_enable:
2780 kfree(s);
2781 ecc_stngs[nid] = NULL;
2782
2783err_out:
2784 return ret;
7d6034d3
DT
2785}
2786
d1ea71cd 2787static void remove_one_instance(struct pci_dev *pdev)
7d6034d3
DT
2788{
2789 struct mem_ctl_info *mci;
2790 struct amd64_pvt *pvt;
772c3ff3 2791 u16 nid = amd_get_node_id(pdev);
360b7f3c
BP
2792 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
2793 struct ecc_settings *s = ecc_stngs[nid];
7d6034d3 2794
c5608759 2795 mci = find_mci_by_dev(&pdev->dev);
a4b4bedc
BP
2796 WARN_ON(!mci);
2797
c5608759 2798 del_mc_sysfs_attrs(mci);
7d6034d3
DT
2799 /* Remove from EDAC CORE tracking list */
2800 mci = edac_mc_del_mc(&pdev->dev);
2801 if (!mci)
2802 return;
2803
2804 pvt = mci->pvt_info;
2805
360b7f3c 2806 restore_ecc_error_reporting(s, nid, F3);
7d6034d3 2807
360b7f3c 2808 free_mc_sibling_devs(pvt);
7d6034d3 2809
549d042d
BP
2810 /* unregister from EDAC MCE */
2811 amd_report_gart_errors(false);
df781d03 2812 amd_unregister_ecc_decoder(decode_bus_error);
549d042d 2813
360b7f3c
BP
2814 kfree(ecc_stngs[nid]);
2815 ecc_stngs[nid] = NULL;
ae7bb7c6 2816
7d6034d3 2817 /* Free the EDAC CORE resources */
8f68ed97 2818 mci->pvt_info = NULL;
360b7f3c 2819 mcis[nid] = NULL;
8f68ed97
BP
2820
2821 kfree(pvt);
7d6034d3
DT
2822 edac_mc_free(mci);
2823}
2824
2825/*
2826 * This table is part of the interface for loading drivers for PCI devices. The
2827 * PCI core identifies what devices are on a system during boot, and then
2828 * inquiry this table to see if this driver is for a given device found.
2829 */
ba935f40 2830static const struct pci_device_id amd64_pci_table[] = {
7d6034d3
DT
2831 {
2832 .vendor = PCI_VENDOR_ID_AMD,
2833 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
2834 .subvendor = PCI_ANY_ID,
2835 .subdevice = PCI_ANY_ID,
2836 .class = 0,
2837 .class_mask = 0,
7d6034d3
DT
2838 },
2839 {
2840 .vendor = PCI_VENDOR_ID_AMD,
2841 .device = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
2842 .subvendor = PCI_ANY_ID,
2843 .subdevice = PCI_ANY_ID,
2844 .class = 0,
2845 .class_mask = 0,
7d6034d3 2846 },
df71a053
BP
2847 {
2848 .vendor = PCI_VENDOR_ID_AMD,
2849 .device = PCI_DEVICE_ID_AMD_15H_NB_F2,
2850 .subvendor = PCI_ANY_ID,
2851 .subdevice = PCI_ANY_ID,
2852 .class = 0,
2853 .class_mask = 0,
2854 },
18b94f66
AG
2855 {
2856 .vendor = PCI_VENDOR_ID_AMD,
2857 .device = PCI_DEVICE_ID_AMD_15H_M30H_NB_F2,
2858 .subvendor = PCI_ANY_ID,
2859 .subdevice = PCI_ANY_ID,
2860 .class = 0,
2861 .class_mask = 0,
2862 },
94c1acf2
AG
2863 {
2864 .vendor = PCI_VENDOR_ID_AMD,
2865 .device = PCI_DEVICE_ID_AMD_16H_NB_F2,
2866 .subvendor = PCI_ANY_ID,
2867 .subdevice = PCI_ANY_ID,
2868 .class = 0,
2869 .class_mask = 0,
2870 },
85a8885b
AG
2871 {
2872 .vendor = PCI_VENDOR_ID_AMD,
2873 .device = PCI_DEVICE_ID_AMD_16H_M30H_NB_F2,
2874 .subvendor = PCI_ANY_ID,
2875 .subdevice = PCI_ANY_ID,
2876 .class = 0,
2877 .class_mask = 0,
2878 },
df71a053 2879
7d6034d3
DT
2880 {0, }
2881};
2882MODULE_DEVICE_TABLE(pci, amd64_pci_table);
2883
2884static struct pci_driver amd64_pci_driver = {
2885 .name = EDAC_MOD_STR,
d1ea71cd
BP
2886 .probe = probe_one_instance,
2887 .remove = remove_one_instance,
7d6034d3
DT
2888 .id_table = amd64_pci_table,
2889};
2890
360b7f3c 2891static void setup_pci_device(void)
7d6034d3
DT
2892{
2893 struct mem_ctl_info *mci;
2894 struct amd64_pvt *pvt;
2895
d1ea71cd 2896 if (pci_ctl)
7d6034d3
DT
2897 return;
2898
cc4d8860 2899 mci = mcis[0];
d1ea71cd
BP
2900 if (!mci)
2901 return;
7d6034d3 2902
d1ea71cd
BP
2903 pvt = mci->pvt_info;
2904 pci_ctl = edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
2905 if (!pci_ctl) {
2906 pr_warn("%s(): Unable to create PCI control\n", __func__);
2907 pr_warn("%s(): PCI error report via EDAC not set\n", __func__);
7d6034d3
DT
2908 }
2909}
2910
2911static int __init amd64_edac_init(void)
2912{
360b7f3c 2913 int err = -ENODEV;
7d6034d3 2914
df71a053 2915 printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION);
7d6034d3
DT
2916
2917 opstate_init();
2918
9653a5c7 2919 if (amd_cache_northbridges() < 0)
56b34b91 2920 goto err_ret;
7d6034d3 2921
cc4d8860 2922 err = -ENOMEM;
ae7bb7c6
BP
2923 mcis = kzalloc(amd_nb_num() * sizeof(mcis[0]), GFP_KERNEL);
2924 ecc_stngs = kzalloc(amd_nb_num() * sizeof(ecc_stngs[0]), GFP_KERNEL);
360b7f3c 2925 if (!(mcis && ecc_stngs))
a9f0fbe2 2926 goto err_free;
cc4d8860 2927
50542251 2928 msrs = msrs_alloc();
56b34b91 2929 if (!msrs)
360b7f3c 2930 goto err_free;
50542251 2931
7d6034d3
DT
2932 err = pci_register_driver(&amd64_pci_driver);
2933 if (err)
56b34b91 2934 goto err_pci;
7d6034d3 2935
56b34b91 2936 err = -ENODEV;
360b7f3c
BP
2937 if (!atomic_read(&drv_instances))
2938 goto err_no_instances;
7d6034d3 2939
360b7f3c
BP
2940 setup_pci_device();
2941 return 0;
7d6034d3 2942
360b7f3c 2943err_no_instances:
7d6034d3 2944 pci_unregister_driver(&amd64_pci_driver);
cc4d8860 2945
56b34b91
BP
2946err_pci:
2947 msrs_free(msrs);
2948 msrs = NULL;
cc4d8860 2949
360b7f3c
BP
2950err_free:
2951 kfree(mcis);
2952 mcis = NULL;
2953
2954 kfree(ecc_stngs);
2955 ecc_stngs = NULL;
2956
56b34b91 2957err_ret:
7d6034d3
DT
2958 return err;
2959}
2960
2961static void __exit amd64_edac_exit(void)
2962{
d1ea71cd
BP
2963 if (pci_ctl)
2964 edac_pci_release_generic_ctl(pci_ctl);
7d6034d3
DT
2965
2966 pci_unregister_driver(&amd64_pci_driver);
50542251 2967
ae7bb7c6
BP
2968 kfree(ecc_stngs);
2969 ecc_stngs = NULL;
2970
cc4d8860
BP
2971 kfree(mcis);
2972 mcis = NULL;
2973
50542251
BP
2974 msrs_free(msrs);
2975 msrs = NULL;
7d6034d3
DT
2976}
2977
2978module_init(amd64_edac_init);
2979module_exit(amd64_edac_exit);
2980
2981MODULE_LICENSE("GPL");
2982MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
2983 "Dave Peterson, Thayne Harbaugh");
2984MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
2985 EDAC_AMD64_VERSION);
2986
2987module_param(edac_op_state, int, 0444);
2988MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");