]>
Commit | Line | Data |
---|---|---|
2bc65418 | 1 | #include "amd64_edac.h" |
7d6034d3 | 2 | #include <asm/k8.h> |
2bc65418 DT |
3 | |
4 | static struct edac_pci_ctl_info *amd64_ctl_pci; | |
5 | ||
6 | static int report_gart_errors; | |
7 | module_param(report_gart_errors, int, 0644); | |
8 | ||
9 | /* | |
10 | * Set by command line parameter. If BIOS has enabled the ECC, this override is | |
11 | * cleared to prevent re-enabling the hardware by this driver. | |
12 | */ | |
13 | static int ecc_enable_override; | |
14 | module_param(ecc_enable_override, int, 0644); | |
15 | ||
a29d8b8e | 16 | static struct msr __percpu *msrs; |
50542251 | 17 | |
2bc65418 DT |
18 | /* Lookup table for all possible MC control instances */ |
19 | struct amd64_pvt; | |
3011b20d BP |
20 | static struct mem_ctl_info *mci_lookup[EDAC_MAX_NUMNODES]; |
21 | static struct amd64_pvt *pvt_lookup[EDAC_MAX_NUMNODES]; | |
2bc65418 | 22 | |
b70ef010 | 23 | /* |
1433eb99 BP |
24 | * Address to DRAM bank mapping: see F2x80 for K8 and F2x[1,0]80 for Fam10 and |
25 | * later. | |
b70ef010 | 26 | */ |
1433eb99 BP |
27 | static int ddr2_dbam_revCG[] = { |
28 | [0] = 32, | |
29 | [1] = 64, | |
30 | [2] = 128, | |
31 | [3] = 256, | |
32 | [4] = 512, | |
33 | [5] = 1024, | |
34 | [6] = 2048, | |
35 | }; | |
36 | ||
37 | static int ddr2_dbam_revD[] = { | |
38 | [0] = 32, | |
39 | [1] = 64, | |
40 | [2 ... 3] = 128, | |
41 | [4] = 256, | |
42 | [5] = 512, | |
43 | [6] = 256, | |
44 | [7] = 512, | |
45 | [8 ... 9] = 1024, | |
46 | [10] = 2048, | |
47 | }; | |
48 | ||
49 | static int ddr2_dbam[] = { [0] = 128, | |
50 | [1] = 256, | |
51 | [2 ... 4] = 512, | |
52 | [5 ... 6] = 1024, | |
53 | [7 ... 8] = 2048, | |
54 | [9 ... 10] = 4096, | |
55 | [11] = 8192, | |
56 | }; | |
57 | ||
58 | static int ddr3_dbam[] = { [0] = -1, | |
59 | [1] = 256, | |
60 | [2] = 512, | |
61 | [3 ... 4] = -1, | |
62 | [5 ... 6] = 1024, | |
63 | [7 ... 8] = 2048, | |
64 | [9 ... 10] = 4096, | |
65 | [11] = 8192, | |
b70ef010 BP |
66 | }; |
67 | ||
68 | /* | |
69 | * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing | |
70 | * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching- | |
71 | * or higher value'. | |
72 | * | |
73 | *FIXME: Produce a better mapping/linearisation. | |
74 | */ | |
75 | ||
76 | struct scrubrate scrubrates[] = { | |
77 | { 0x01, 1600000000UL}, | |
78 | { 0x02, 800000000UL}, | |
79 | { 0x03, 400000000UL}, | |
80 | { 0x04, 200000000UL}, | |
81 | { 0x05, 100000000UL}, | |
82 | { 0x06, 50000000UL}, | |
83 | { 0x07, 25000000UL}, | |
84 | { 0x08, 12284069UL}, | |
85 | { 0x09, 6274509UL}, | |
86 | { 0x0A, 3121951UL}, | |
87 | { 0x0B, 1560975UL}, | |
88 | { 0x0C, 781440UL}, | |
89 | { 0x0D, 390720UL}, | |
90 | { 0x0E, 195300UL}, | |
91 | { 0x0F, 97650UL}, | |
92 | { 0x10, 48854UL}, | |
93 | { 0x11, 24427UL}, | |
94 | { 0x12, 12213UL}, | |
95 | { 0x13, 6101UL}, | |
96 | { 0x14, 3051UL}, | |
97 | { 0x15, 1523UL}, | |
98 | { 0x16, 761UL}, | |
99 | { 0x00, 0UL}, /* scrubbing off */ | |
100 | }; | |
101 | ||
2bc65418 DT |
102 | /* |
103 | * Memory scrubber control interface. For K8, memory scrubbing is handled by | |
104 | * hardware and can involve L2 cache, dcache as well as the main memory. With | |
105 | * F10, this is extended to L3 cache scrubbing on CPU models sporting that | |
106 | * functionality. | |
107 | * | |
108 | * This causes the "units" for the scrubbing speed to vary from 64 byte blocks | |
109 | * (dram) over to cache lines. This is nasty, so we will use bandwidth in | |
110 | * bytes/sec for the setting. | |
111 | * | |
112 | * Currently, we only do dram scrubbing. If the scrubbing is done in software on | |
113 | * other archs, we might not have access to the caches directly. | |
114 | */ | |
115 | ||
116 | /* | |
117 | * scan the scrub rate mapping table for a close or matching bandwidth value to | |
118 | * issue. If requested is too big, then use last maximum value found. | |
119 | */ | |
120 | static int amd64_search_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, | |
121 | u32 min_scrubrate) | |
122 | { | |
123 | u32 scrubval; | |
124 | int i; | |
125 | ||
126 | /* | |
127 | * map the configured rate (new_bw) to a value specific to the AMD64 | |
128 | * memory controller and apply to register. Search for the first | |
129 | * bandwidth entry that is greater or equal than the setting requested | |
130 | * and program that. If at last entry, turn off DRAM scrubbing. | |
131 | */ | |
132 | for (i = 0; i < ARRAY_SIZE(scrubrates); i++) { | |
133 | /* | |
134 | * skip scrub rates which aren't recommended | |
135 | * (see F10 BKDG, F3x58) | |
136 | */ | |
137 | if (scrubrates[i].scrubval < min_scrubrate) | |
138 | continue; | |
139 | ||
140 | if (scrubrates[i].bandwidth <= new_bw) | |
141 | break; | |
142 | ||
143 | /* | |
144 | * if no suitable bandwidth found, turn off DRAM scrubbing | |
145 | * entirely by falling back to the last element in the | |
146 | * scrubrates array. | |
147 | */ | |
148 | } | |
149 | ||
150 | scrubval = scrubrates[i].scrubval; | |
151 | if (scrubval) | |
152 | edac_printk(KERN_DEBUG, EDAC_MC, | |
153 | "Setting scrub rate bandwidth: %u\n", | |
154 | scrubrates[i].bandwidth); | |
155 | else | |
156 | edac_printk(KERN_DEBUG, EDAC_MC, "Turning scrubbing off.\n"); | |
157 | ||
158 | pci_write_bits32(ctl, K8_SCRCTRL, scrubval, 0x001F); | |
159 | ||
160 | return 0; | |
161 | } | |
162 | ||
eba042a8 | 163 | static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bandwidth) |
2bc65418 DT |
164 | { |
165 | struct amd64_pvt *pvt = mci->pvt_info; | |
166 | u32 min_scrubrate = 0x0; | |
167 | ||
168 | switch (boot_cpu_data.x86) { | |
169 | case 0xf: | |
170 | min_scrubrate = K8_MIN_SCRUB_RATE_BITS; | |
171 | break; | |
172 | case 0x10: | |
173 | min_scrubrate = F10_MIN_SCRUB_RATE_BITS; | |
174 | break; | |
175 | case 0x11: | |
176 | min_scrubrate = F11_MIN_SCRUB_RATE_BITS; | |
177 | break; | |
178 | ||
179 | default: | |
180 | amd64_printk(KERN_ERR, "Unsupported family!\n"); | |
bc571178 | 181 | return -EINVAL; |
2bc65418 | 182 | } |
eba042a8 BP |
183 | return amd64_search_set_scrub_rate(pvt->misc_f3_ctl, bandwidth, |
184 | min_scrubrate); | |
2bc65418 DT |
185 | } |
186 | ||
187 | static int amd64_get_scrub_rate(struct mem_ctl_info *mci, u32 *bw) | |
188 | { | |
189 | struct amd64_pvt *pvt = mci->pvt_info; | |
190 | u32 scrubval = 0; | |
6ba5dcdc | 191 | int status = -1, i; |
2bc65418 | 192 | |
6ba5dcdc | 193 | amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_SCRCTRL, &scrubval); |
2bc65418 DT |
194 | |
195 | scrubval = scrubval & 0x001F; | |
196 | ||
197 | edac_printk(KERN_DEBUG, EDAC_MC, | |
198 | "pci-read, sdram scrub control value: %d \n", scrubval); | |
199 | ||
926311fd | 200 | for (i = 0; i < ARRAY_SIZE(scrubrates); i++) { |
2bc65418 DT |
201 | if (scrubrates[i].scrubval == scrubval) { |
202 | *bw = scrubrates[i].bandwidth; | |
203 | status = 0; | |
204 | break; | |
205 | } | |
206 | } | |
207 | ||
208 | return status; | |
209 | } | |
210 | ||
6775763a DT |
211 | /* Map from a CSROW entry to the mask entry that operates on it */ |
212 | static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow) | |
213 | { | |
1433eb99 | 214 | if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) |
9d858bb1 BP |
215 | return csrow; |
216 | else | |
217 | return csrow >> 1; | |
6775763a DT |
218 | } |
219 | ||
220 | /* return the 'base' address the i'th CS entry of the 'dct' DRAM controller */ | |
221 | static u32 amd64_get_dct_base(struct amd64_pvt *pvt, int dct, int csrow) | |
222 | { | |
223 | if (dct == 0) | |
224 | return pvt->dcsb0[csrow]; | |
225 | else | |
226 | return pvt->dcsb1[csrow]; | |
227 | } | |
228 | ||
229 | /* | |
230 | * Return the 'mask' address the i'th CS entry. This function is needed because | |
231 | * there number of DCSM registers on Rev E and prior vs Rev F and later is | |
232 | * different. | |
233 | */ | |
234 | static u32 amd64_get_dct_mask(struct amd64_pvt *pvt, int dct, int csrow) | |
235 | { | |
236 | if (dct == 0) | |
237 | return pvt->dcsm0[amd64_map_to_dcs_mask(pvt, csrow)]; | |
238 | else | |
239 | return pvt->dcsm1[amd64_map_to_dcs_mask(pvt, csrow)]; | |
240 | } | |
241 | ||
242 | ||
243 | /* | |
244 | * In *base and *limit, pass back the full 40-bit base and limit physical | |
245 | * addresses for the node given by node_id. This information is obtained from | |
246 | * DRAM Base (section 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers. The | |
247 | * base and limit addresses are of type SysAddr, as defined at the start of | |
248 | * section 3.4.4 (p. 70). They are the lowest and highest physical addresses | |
249 | * in the address range they represent. | |
250 | */ | |
251 | static void amd64_get_base_and_limit(struct amd64_pvt *pvt, int node_id, | |
252 | u64 *base, u64 *limit) | |
253 | { | |
254 | *base = pvt->dram_base[node_id]; | |
255 | *limit = pvt->dram_limit[node_id]; | |
256 | } | |
257 | ||
258 | /* | |
259 | * Return 1 if the SysAddr given by sys_addr matches the base/limit associated | |
260 | * with node_id | |
261 | */ | |
262 | static int amd64_base_limit_match(struct amd64_pvt *pvt, | |
263 | u64 sys_addr, int node_id) | |
264 | { | |
265 | u64 base, limit, addr; | |
266 | ||
267 | amd64_get_base_and_limit(pvt, node_id, &base, &limit); | |
268 | ||
269 | /* The K8 treats this as a 40-bit value. However, bits 63-40 will be | |
270 | * all ones if the most significant implemented address bit is 1. | |
271 | * Here we discard bits 63-40. See section 3.4.2 of AMD publication | |
272 | * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1 | |
273 | * Application Programming. | |
274 | */ | |
275 | addr = sys_addr & 0x000000ffffffffffull; | |
276 | ||
277 | return (addr >= base) && (addr <= limit); | |
278 | } | |
279 | ||
280 | /* | |
281 | * Attempt to map a SysAddr to a node. On success, return a pointer to the | |
282 | * mem_ctl_info structure for the node that the SysAddr maps to. | |
283 | * | |
284 | * On failure, return NULL. | |
285 | */ | |
286 | static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci, | |
287 | u64 sys_addr) | |
288 | { | |
289 | struct amd64_pvt *pvt; | |
290 | int node_id; | |
291 | u32 intlv_en, bits; | |
292 | ||
293 | /* | |
294 | * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section | |
295 | * 3.4.4.2) registers to map the SysAddr to a node ID. | |
296 | */ | |
297 | pvt = mci->pvt_info; | |
298 | ||
299 | /* | |
300 | * The value of this field should be the same for all DRAM Base | |
301 | * registers. Therefore we arbitrarily choose to read it from the | |
302 | * register for node 0. | |
303 | */ | |
304 | intlv_en = pvt->dram_IntlvEn[0]; | |
305 | ||
306 | if (intlv_en == 0) { | |
8edc5445 | 307 | for (node_id = 0; node_id < DRAM_REG_COUNT; node_id++) { |
6775763a | 308 | if (amd64_base_limit_match(pvt, sys_addr, node_id)) |
8edc5445 | 309 | goto found; |
6775763a | 310 | } |
8edc5445 | 311 | goto err_no_match; |
6775763a DT |
312 | } |
313 | ||
72f158fe BP |
314 | if (unlikely((intlv_en != 0x01) && |
315 | (intlv_en != 0x03) && | |
316 | (intlv_en != 0x07))) { | |
6775763a DT |
317 | amd64_printk(KERN_WARNING, "junk value of 0x%x extracted from " |
318 | "IntlvEn field of DRAM Base Register for node 0: " | |
72f158fe | 319 | "this probably indicates a BIOS bug.\n", intlv_en); |
6775763a DT |
320 | return NULL; |
321 | } | |
322 | ||
323 | bits = (((u32) sys_addr) >> 12) & intlv_en; | |
324 | ||
325 | for (node_id = 0; ; ) { | |
8edc5445 | 326 | if ((pvt->dram_IntlvSel[node_id] & intlv_en) == bits) |
6775763a DT |
327 | break; /* intlv_sel field matches */ |
328 | ||
329 | if (++node_id >= DRAM_REG_COUNT) | |
330 | goto err_no_match; | |
331 | } | |
332 | ||
333 | /* sanity test for sys_addr */ | |
334 | if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) { | |
335 | amd64_printk(KERN_WARNING, | |
8edc5445 BP |
336 | "%s(): sys_addr 0x%llx falls outside base/limit " |
337 | "address range for node %d with node interleaving " | |
338 | "enabled.\n", | |
339 | __func__, sys_addr, node_id); | |
6775763a DT |
340 | return NULL; |
341 | } | |
342 | ||
343 | found: | |
344 | return edac_mc_find(node_id); | |
345 | ||
346 | err_no_match: | |
347 | debugf2("sys_addr 0x%lx doesn't match any node\n", | |
348 | (unsigned long)sys_addr); | |
349 | ||
350 | return NULL; | |
351 | } | |
e2ce7255 DT |
352 | |
353 | /* | |
354 | * Extract the DRAM CS base address from selected csrow register. | |
355 | */ | |
356 | static u64 base_from_dct_base(struct amd64_pvt *pvt, int csrow) | |
357 | { | |
358 | return ((u64) (amd64_get_dct_base(pvt, 0, csrow) & pvt->dcsb_base)) << | |
359 | pvt->dcs_shift; | |
360 | } | |
361 | ||
362 | /* | |
363 | * Extract the mask from the dcsb0[csrow] entry in a CPU revision-specific way. | |
364 | */ | |
365 | static u64 mask_from_dct_mask(struct amd64_pvt *pvt, int csrow) | |
366 | { | |
367 | u64 dcsm_bits, other_bits; | |
368 | u64 mask; | |
369 | ||
370 | /* Extract bits from DRAM CS Mask. */ | |
371 | dcsm_bits = amd64_get_dct_mask(pvt, 0, csrow) & pvt->dcsm_mask; | |
372 | ||
373 | other_bits = pvt->dcsm_mask; | |
374 | other_bits = ~(other_bits << pvt->dcs_shift); | |
375 | ||
376 | /* | |
377 | * The extracted bits from DCSM belong in the spaces represented by | |
378 | * the cleared bits in other_bits. | |
379 | */ | |
380 | mask = (dcsm_bits << pvt->dcs_shift) | other_bits; | |
381 | ||
382 | return mask; | |
383 | } | |
384 | ||
385 | /* | |
386 | * @input_addr is an InputAddr associated with the node given by mci. Return the | |
387 | * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr). | |
388 | */ | |
389 | static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr) | |
390 | { | |
391 | struct amd64_pvt *pvt; | |
392 | int csrow; | |
393 | u64 base, mask; | |
394 | ||
395 | pvt = mci->pvt_info; | |
396 | ||
397 | /* | |
398 | * Here we use the DRAM CS Base and DRAM CS Mask registers. For each CS | |
399 | * base/mask register pair, test the condition shown near the start of | |
400 | * section 3.5.4 (p. 84, BKDG #26094, K8, revA-E). | |
401 | */ | |
9d858bb1 | 402 | for (csrow = 0; csrow < pvt->cs_count; csrow++) { |
e2ce7255 DT |
403 | |
404 | /* This DRAM chip select is disabled on this node */ | |
405 | if ((pvt->dcsb0[csrow] & K8_DCSB_CS_ENABLE) == 0) | |
406 | continue; | |
407 | ||
408 | base = base_from_dct_base(pvt, csrow); | |
409 | mask = ~mask_from_dct_mask(pvt, csrow); | |
410 | ||
411 | if ((input_addr & mask) == (base & mask)) { | |
412 | debugf2("InputAddr 0x%lx matches csrow %d (node %d)\n", | |
413 | (unsigned long)input_addr, csrow, | |
414 | pvt->mc_node_id); | |
415 | ||
416 | return csrow; | |
417 | } | |
418 | } | |
419 | ||
420 | debugf2("no matching csrow for InputAddr 0x%lx (MC node %d)\n", | |
421 | (unsigned long)input_addr, pvt->mc_node_id); | |
422 | ||
423 | return -1; | |
424 | } | |
425 | ||
426 | /* | |
427 | * Return the base value defined by the DRAM Base register for the node | |
428 | * represented by mci. This function returns the full 40-bit value despite the | |
429 | * fact that the register only stores bits 39-24 of the value. See section | |
430 | * 3.4.4.1 (BKDG #26094, K8, revA-E) | |
431 | */ | |
432 | static inline u64 get_dram_base(struct mem_ctl_info *mci) | |
433 | { | |
434 | struct amd64_pvt *pvt = mci->pvt_info; | |
435 | ||
436 | return pvt->dram_base[pvt->mc_node_id]; | |
437 | } | |
438 | ||
439 | /* | |
440 | * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094) | |
441 | * for the node represented by mci. Info is passed back in *hole_base, | |
442 | * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if | |
443 | * info is invalid. Info may be invalid for either of the following reasons: | |
444 | * | |
445 | * - The revision of the node is not E or greater. In this case, the DRAM Hole | |
446 | * Address Register does not exist. | |
447 | * | |
448 | * - The DramHoleValid bit is cleared in the DRAM Hole Address Register, | |
449 | * indicating that its contents are not valid. | |
450 | * | |
451 | * The values passed back in *hole_base, *hole_offset, and *hole_size are | |
452 | * complete 32-bit values despite the fact that the bitfields in the DHAR | |
453 | * only represent bits 31-24 of the base and offset values. | |
454 | */ | |
455 | int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base, | |
456 | u64 *hole_offset, u64 *hole_size) | |
457 | { | |
458 | struct amd64_pvt *pvt = mci->pvt_info; | |
459 | u64 base; | |
460 | ||
461 | /* only revE and later have the DRAM Hole Address Register */ | |
1433eb99 | 462 | if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_E) { |
e2ce7255 DT |
463 | debugf1(" revision %d for node %d does not support DHAR\n", |
464 | pvt->ext_model, pvt->mc_node_id); | |
465 | return 1; | |
466 | } | |
467 | ||
468 | /* only valid for Fam10h */ | |
469 | if (boot_cpu_data.x86 == 0x10 && | |
470 | (pvt->dhar & F10_DRAM_MEM_HOIST_VALID) == 0) { | |
471 | debugf1(" Dram Memory Hoisting is DISABLED on this system\n"); | |
472 | return 1; | |
473 | } | |
474 | ||
475 | if ((pvt->dhar & DHAR_VALID) == 0) { | |
476 | debugf1(" Dram Memory Hoisting is DISABLED on this node %d\n", | |
477 | pvt->mc_node_id); | |
478 | return 1; | |
479 | } | |
480 | ||
481 | /* This node has Memory Hoisting */ | |
482 | ||
483 | /* +------------------+--------------------+--------------------+----- | |
484 | * | memory | DRAM hole | relocated | | |
485 | * | [0, (x - 1)] | [x, 0xffffffff] | addresses from | | |
486 | * | | | DRAM hole | | |
487 | * | | | [0x100000000, | | |
488 | * | | | (0x100000000+ | | |
489 | * | | | (0xffffffff-x))] | | |
490 | * +------------------+--------------------+--------------------+----- | |
491 | * | |
492 | * Above is a diagram of physical memory showing the DRAM hole and the | |
493 | * relocated addresses from the DRAM hole. As shown, the DRAM hole | |
494 | * starts at address x (the base address) and extends through address | |
495 | * 0xffffffff. The DRAM Hole Address Register (DHAR) relocates the | |
496 | * addresses in the hole so that they start at 0x100000000. | |
497 | */ | |
498 | ||
499 | base = dhar_base(pvt->dhar); | |
500 | ||
501 | *hole_base = base; | |
502 | *hole_size = (0x1ull << 32) - base; | |
503 | ||
504 | if (boot_cpu_data.x86 > 0xf) | |
505 | *hole_offset = f10_dhar_offset(pvt->dhar); | |
506 | else | |
507 | *hole_offset = k8_dhar_offset(pvt->dhar); | |
508 | ||
509 | debugf1(" DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n", | |
510 | pvt->mc_node_id, (unsigned long)*hole_base, | |
511 | (unsigned long)*hole_offset, (unsigned long)*hole_size); | |
512 | ||
513 | return 0; | |
514 | } | |
515 | EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info); | |
516 | ||
93c2df58 DT |
517 | /* |
518 | * Return the DramAddr that the SysAddr given by @sys_addr maps to. It is | |
519 | * assumed that sys_addr maps to the node given by mci. | |
520 | * | |
521 | * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section | |
522 | * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a | |
523 | * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled, | |
524 | * then it is also involved in translating a SysAddr to a DramAddr. Sections | |
525 | * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting. | |
526 | * These parts of the documentation are unclear. I interpret them as follows: | |
527 | * | |
528 | * When node n receives a SysAddr, it processes the SysAddr as follows: | |
529 | * | |
530 | * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM | |
531 | * Limit registers for node n. If the SysAddr is not within the range | |
532 | * specified by the base and limit values, then node n ignores the Sysaddr | |
533 | * (since it does not map to node n). Otherwise continue to step 2 below. | |
534 | * | |
535 | * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is | |
536 | * disabled so skip to step 3 below. Otherwise see if the SysAddr is within | |
537 | * the range of relocated addresses (starting at 0x100000000) from the DRAM | |
538 | * hole. If not, skip to step 3 below. Else get the value of the | |
539 | * DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the | |
540 | * offset defined by this value from the SysAddr. | |
541 | * | |
542 | * 3. Obtain the base address for node n from the DRAMBase field of the DRAM | |
543 | * Base register for node n. To obtain the DramAddr, subtract the base | |
544 | * address from the SysAddr, as shown near the start of section 3.4.4 (p.70). | |
545 | */ | |
546 | static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr) | |
547 | { | |
548 | u64 dram_base, hole_base, hole_offset, hole_size, dram_addr; | |
549 | int ret = 0; | |
550 | ||
551 | dram_base = get_dram_base(mci); | |
552 | ||
553 | ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset, | |
554 | &hole_size); | |
555 | if (!ret) { | |
556 | if ((sys_addr >= (1ull << 32)) && | |
557 | (sys_addr < ((1ull << 32) + hole_size))) { | |
558 | /* use DHAR to translate SysAddr to DramAddr */ | |
559 | dram_addr = sys_addr - hole_offset; | |
560 | ||
561 | debugf2("using DHAR to translate SysAddr 0x%lx to " | |
562 | "DramAddr 0x%lx\n", | |
563 | (unsigned long)sys_addr, | |
564 | (unsigned long)dram_addr); | |
565 | ||
566 | return dram_addr; | |
567 | } | |
568 | } | |
569 | ||
570 | /* | |
571 | * Translate the SysAddr to a DramAddr as shown near the start of | |
572 | * section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8 | |
573 | * only deals with 40-bit values. Therefore we discard bits 63-40 of | |
574 | * sys_addr below. If bit 39 of sys_addr is 1 then the bits we | |
575 | * discard are all 1s. Otherwise the bits we discard are all 0s. See | |
576 | * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture | |
577 | * Programmer's Manual Volume 1 Application Programming. | |
578 | */ | |
579 | dram_addr = (sys_addr & 0xffffffffffull) - dram_base; | |
580 | ||
581 | debugf2("using DRAM Base register to translate SysAddr 0x%lx to " | |
582 | "DramAddr 0x%lx\n", (unsigned long)sys_addr, | |
583 | (unsigned long)dram_addr); | |
584 | return dram_addr; | |
585 | } | |
586 | ||
587 | /* | |
588 | * @intlv_en is the value of the IntlvEn field from a DRAM Base register | |
589 | * (section 3.4.4.1). Return the number of bits from a SysAddr that are used | |
590 | * for node interleaving. | |
591 | */ | |
592 | static int num_node_interleave_bits(unsigned intlv_en) | |
593 | { | |
594 | static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 }; | |
595 | int n; | |
596 | ||
597 | BUG_ON(intlv_en > 7); | |
598 | n = intlv_shift_table[intlv_en]; | |
599 | return n; | |
600 | } | |
601 | ||
602 | /* Translate the DramAddr given by @dram_addr to an InputAddr. */ | |
603 | static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr) | |
604 | { | |
605 | struct amd64_pvt *pvt; | |
606 | int intlv_shift; | |
607 | u64 input_addr; | |
608 | ||
609 | pvt = mci->pvt_info; | |
610 | ||
611 | /* | |
612 | * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E) | |
613 | * concerning translating a DramAddr to an InputAddr. | |
614 | */ | |
615 | intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]); | |
616 | input_addr = ((dram_addr >> intlv_shift) & 0xffffff000ull) + | |
617 | (dram_addr & 0xfff); | |
618 | ||
619 | debugf2(" Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n", | |
620 | intlv_shift, (unsigned long)dram_addr, | |
621 | (unsigned long)input_addr); | |
622 | ||
623 | return input_addr; | |
624 | } | |
625 | ||
626 | /* | |
627 | * Translate the SysAddr represented by @sys_addr to an InputAddr. It is | |
628 | * assumed that @sys_addr maps to the node given by mci. | |
629 | */ | |
630 | static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr) | |
631 | { | |
632 | u64 input_addr; | |
633 | ||
634 | input_addr = | |
635 | dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr)); | |
636 | ||
637 | debugf2("SysAdddr 0x%lx translates to InputAddr 0x%lx\n", | |
638 | (unsigned long)sys_addr, (unsigned long)input_addr); | |
639 | ||
640 | return input_addr; | |
641 | } | |
642 | ||
643 | ||
644 | /* | |
645 | * @input_addr is an InputAddr associated with the node represented by mci. | |
646 | * Translate @input_addr to a DramAddr and return the result. | |
647 | */ | |
648 | static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr) | |
649 | { | |
650 | struct amd64_pvt *pvt; | |
651 | int node_id, intlv_shift; | |
652 | u64 bits, dram_addr; | |
653 | u32 intlv_sel; | |
654 | ||
655 | /* | |
656 | * Near the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E) | |
657 | * shows how to translate a DramAddr to an InputAddr. Here we reverse | |
658 | * this procedure. When translating from a DramAddr to an InputAddr, the | |
659 | * bits used for node interleaving are discarded. Here we recover these | |
660 | * bits from the IntlvSel field of the DRAM Limit register (section | |
661 | * 3.4.4.2) for the node that input_addr is associated with. | |
662 | */ | |
663 | pvt = mci->pvt_info; | |
664 | node_id = pvt->mc_node_id; | |
665 | BUG_ON((node_id < 0) || (node_id > 7)); | |
666 | ||
667 | intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]); | |
668 | ||
669 | if (intlv_shift == 0) { | |
670 | debugf1(" InputAddr 0x%lx translates to DramAddr of " | |
671 | "same value\n", (unsigned long)input_addr); | |
672 | ||
673 | return input_addr; | |
674 | } | |
675 | ||
676 | bits = ((input_addr & 0xffffff000ull) << intlv_shift) + | |
677 | (input_addr & 0xfff); | |
678 | ||
679 | intlv_sel = pvt->dram_IntlvSel[node_id] & ((1 << intlv_shift) - 1); | |
680 | dram_addr = bits + (intlv_sel << 12); | |
681 | ||
682 | debugf1("InputAddr 0x%lx translates to DramAddr 0x%lx " | |
683 | "(%d node interleave bits)\n", (unsigned long)input_addr, | |
684 | (unsigned long)dram_addr, intlv_shift); | |
685 | ||
686 | return dram_addr; | |
687 | } | |
688 | ||
689 | /* | |
690 | * @dram_addr is a DramAddr that maps to the node represented by mci. Convert | |
691 | * @dram_addr to a SysAddr. | |
692 | */ | |
693 | static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr) | |
694 | { | |
695 | struct amd64_pvt *pvt = mci->pvt_info; | |
696 | u64 hole_base, hole_offset, hole_size, base, limit, sys_addr; | |
697 | int ret = 0; | |
698 | ||
699 | ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset, | |
700 | &hole_size); | |
701 | if (!ret) { | |
702 | if ((dram_addr >= hole_base) && | |
703 | (dram_addr < (hole_base + hole_size))) { | |
704 | sys_addr = dram_addr + hole_offset; | |
705 | ||
706 | debugf1("using DHAR to translate DramAddr 0x%lx to " | |
707 | "SysAddr 0x%lx\n", (unsigned long)dram_addr, | |
708 | (unsigned long)sys_addr); | |
709 | ||
710 | return sys_addr; | |
711 | } | |
712 | } | |
713 | ||
714 | amd64_get_base_and_limit(pvt, pvt->mc_node_id, &base, &limit); | |
715 | sys_addr = dram_addr + base; | |
716 | ||
717 | /* | |
718 | * The sys_addr we have computed up to this point is a 40-bit value | |
719 | * because the k8 deals with 40-bit values. However, the value we are | |
720 | * supposed to return is a full 64-bit physical address. The AMD | |
721 | * x86-64 architecture specifies that the most significant implemented | |
722 | * address bit through bit 63 of a physical address must be either all | |
723 | * 0s or all 1s. Therefore we sign-extend the 40-bit sys_addr to a | |
724 | * 64-bit value below. See section 3.4.2 of AMD publication 24592: | |
725 | * AMD x86-64 Architecture Programmer's Manual Volume 1 Application | |
726 | * Programming. | |
727 | */ | |
728 | sys_addr |= ~((sys_addr & (1ull << 39)) - 1); | |
729 | ||
730 | debugf1(" Node %d, DramAddr 0x%lx to SysAddr 0x%lx\n", | |
731 | pvt->mc_node_id, (unsigned long)dram_addr, | |
732 | (unsigned long)sys_addr); | |
733 | ||
734 | return sys_addr; | |
735 | } | |
736 | ||
737 | /* | |
738 | * @input_addr is an InputAddr associated with the node given by mci. Translate | |
739 | * @input_addr to a SysAddr. | |
740 | */ | |
741 | static inline u64 input_addr_to_sys_addr(struct mem_ctl_info *mci, | |
742 | u64 input_addr) | |
743 | { | |
744 | return dram_addr_to_sys_addr(mci, | |
745 | input_addr_to_dram_addr(mci, input_addr)); | |
746 | } | |
747 | ||
748 | /* | |
749 | * Find the minimum and maximum InputAddr values that map to the given @csrow. | |
750 | * Pass back these values in *input_addr_min and *input_addr_max. | |
751 | */ | |
752 | static void find_csrow_limits(struct mem_ctl_info *mci, int csrow, | |
753 | u64 *input_addr_min, u64 *input_addr_max) | |
754 | { | |
755 | struct amd64_pvt *pvt; | |
756 | u64 base, mask; | |
757 | ||
758 | pvt = mci->pvt_info; | |
9d858bb1 | 759 | BUG_ON((csrow < 0) || (csrow >= pvt->cs_count)); |
93c2df58 DT |
760 | |
761 | base = base_from_dct_base(pvt, csrow); | |
762 | mask = mask_from_dct_mask(pvt, csrow); | |
763 | ||
764 | *input_addr_min = base & ~mask; | |
765 | *input_addr_max = base | mask | pvt->dcs_mask_notused; | |
766 | } | |
767 | ||
93c2df58 DT |
768 | /* Map the Error address to a PAGE and PAGE OFFSET. */ |
769 | static inline void error_address_to_page_and_offset(u64 error_address, | |
770 | u32 *page, u32 *offset) | |
771 | { | |
772 | *page = (u32) (error_address >> PAGE_SHIFT); | |
773 | *offset = ((u32) error_address) & ~PAGE_MASK; | |
774 | } | |
775 | ||
776 | /* | |
777 | * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address | |
778 | * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers | |
779 | * of a node that detected an ECC memory error. mci represents the node that | |
780 | * the error address maps to (possibly different from the node that detected | |
781 | * the error). Return the number of the csrow that sys_addr maps to, or -1 on | |
782 | * error. | |
783 | */ | |
784 | static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr) | |
785 | { | |
786 | int csrow; | |
787 | ||
788 | csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr)); | |
789 | ||
790 | if (csrow == -1) | |
791 | amd64_mc_printk(mci, KERN_ERR, | |
792 | "Failed to translate InputAddr to csrow for " | |
793 | "address 0x%lx\n", (unsigned long)sys_addr); | |
794 | return csrow; | |
795 | } | |
e2ce7255 | 796 | |
bfc04aec | 797 | static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16); |
2da11654 | 798 | |
ad6a32e9 BP |
799 | static u16 extract_syndrome(struct err_regs *err) |
800 | { | |
801 | return ((err->nbsh >> 15) & 0xff) | ((err->nbsl >> 16) & 0xff00); | |
802 | } | |
803 | ||
2da11654 DT |
804 | static void amd64_cpu_display_info(struct amd64_pvt *pvt) |
805 | { | |
806 | if (boot_cpu_data.x86 == 0x11) | |
807 | edac_printk(KERN_DEBUG, EDAC_MC, "F11h CPU detected\n"); | |
808 | else if (boot_cpu_data.x86 == 0x10) | |
809 | edac_printk(KERN_DEBUG, EDAC_MC, "F10h CPU detected\n"); | |
810 | else if (boot_cpu_data.x86 == 0xf) | |
811 | edac_printk(KERN_DEBUG, EDAC_MC, "%s detected\n", | |
1433eb99 | 812 | (pvt->ext_model >= K8_REV_F) ? |
2da11654 DT |
813 | "Rev F or later" : "Rev E or earlier"); |
814 | else | |
815 | /* we'll hardly ever ever get here */ | |
816 | edac_printk(KERN_ERR, EDAC_MC, "Unknown cpu!\n"); | |
817 | } | |
818 | ||
819 | /* | |
820 | * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs | |
821 | * are ECC capable. | |
822 | */ | |
823 | static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt) | |
824 | { | |
825 | int bit; | |
584fcff4 | 826 | enum dev_type edac_cap = EDAC_FLAG_NONE; |
2da11654 | 827 | |
1433eb99 | 828 | bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= K8_REV_F) |
2da11654 DT |
829 | ? 19 |
830 | : 17; | |
831 | ||
584fcff4 | 832 | if (pvt->dclr0 & BIT(bit)) |
2da11654 DT |
833 | edac_cap = EDAC_FLAG_SECDED; |
834 | ||
835 | return edac_cap; | |
836 | } | |
837 | ||
838 | ||
8566c4df | 839 | static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt); |
2da11654 | 840 | |
68798e17 BP |
841 | static void amd64_dump_dramcfg_low(u32 dclr, int chan) |
842 | { | |
843 | debugf1("F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr); | |
844 | ||
845 | debugf1(" DIMM type: %sbuffered; all DIMMs support ECC: %s\n", | |
846 | (dclr & BIT(16)) ? "un" : "", | |
847 | (dclr & BIT(19)) ? "yes" : "no"); | |
848 | ||
849 | debugf1(" PAR/ERR parity: %s\n", | |
850 | (dclr & BIT(8)) ? "enabled" : "disabled"); | |
851 | ||
852 | debugf1(" DCT 128bit mode width: %s\n", | |
853 | (dclr & BIT(11)) ? "128b" : "64b"); | |
854 | ||
855 | debugf1(" x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n", | |
856 | (dclr & BIT(12)) ? "yes" : "no", | |
857 | (dclr & BIT(13)) ? "yes" : "no", | |
858 | (dclr & BIT(14)) ? "yes" : "no", | |
859 | (dclr & BIT(15)) ? "yes" : "no"); | |
860 | } | |
861 | ||
2da11654 DT |
862 | /* Display and decode various NB registers for debug purposes. */ |
863 | static void amd64_dump_misc_regs(struct amd64_pvt *pvt) | |
864 | { | |
865 | int ganged; | |
866 | ||
68798e17 BP |
867 | debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap); |
868 | ||
869 | debugf1(" NB two channel DRAM capable: %s\n", | |
870 | (pvt->nbcap & K8_NBCAP_DCT_DUAL) ? "yes" : "no"); | |
2da11654 | 871 | |
68798e17 BP |
872 | debugf1(" ECC capable: %s, ChipKill ECC capable: %s\n", |
873 | (pvt->nbcap & K8_NBCAP_SECDED) ? "yes" : "no", | |
874 | (pvt->nbcap & K8_NBCAP_CHIPKILL) ? "yes" : "no"); | |
875 | ||
876 | amd64_dump_dramcfg_low(pvt->dclr0, 0); | |
2da11654 | 877 | |
8de1d91e | 878 | debugf1("F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare); |
2da11654 | 879 | |
8de1d91e BP |
880 | debugf1("F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, " |
881 | "offset: 0x%08x\n", | |
882 | pvt->dhar, | |
883 | dhar_base(pvt->dhar), | |
884 | (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt->dhar) | |
885 | : f10_dhar_offset(pvt->dhar)); | |
2da11654 | 886 | |
8de1d91e BP |
887 | debugf1(" DramHoleValid: %s\n", |
888 | (pvt->dhar & DHAR_VALID) ? "yes" : "no"); | |
2da11654 | 889 | |
8de1d91e | 890 | /* everything below this point is Fam10h and above */ |
8566c4df BP |
891 | if (boot_cpu_data.x86 == 0xf) { |
892 | amd64_debug_display_dimm_sizes(0, pvt); | |
2da11654 | 893 | return; |
8566c4df | 894 | } |
2da11654 | 895 | |
ad6a32e9 BP |
896 | amd64_printk(KERN_INFO, "using %s syndromes.\n", |
897 | ((pvt->syn_type == 8) ? "x8" : "x4")); | |
898 | ||
8de1d91e | 899 | /* Only if NOT ganged does dclr1 have valid info */ |
68798e17 BP |
900 | if (!dct_ganging_enabled(pvt)) |
901 | amd64_dump_dramcfg_low(pvt->dclr1, 1); | |
2da11654 DT |
902 | |
903 | /* | |
904 | * Determine if ganged and then dump memory sizes for first controller, | |
905 | * and if NOT ganged dump info for 2nd controller. | |
906 | */ | |
907 | ganged = dct_ganging_enabled(pvt); | |
908 | ||
8566c4df | 909 | amd64_debug_display_dimm_sizes(0, pvt); |
2da11654 DT |
910 | |
911 | if (!ganged) | |
8566c4df | 912 | amd64_debug_display_dimm_sizes(1, pvt); |
2da11654 DT |
913 | } |
914 | ||
915 | /* Read in both of DBAM registers */ | |
916 | static void amd64_read_dbam_reg(struct amd64_pvt *pvt) | |
917 | { | |
6ba5dcdc | 918 | amd64_read_pci_cfg(pvt->dram_f2_ctl, DBAM0, &pvt->dbam0); |
2da11654 | 919 | |
6ba5dcdc BP |
920 | if (boot_cpu_data.x86 >= 0x10) |
921 | amd64_read_pci_cfg(pvt->dram_f2_ctl, DBAM1, &pvt->dbam1); | |
2da11654 DT |
922 | } |
923 | ||
94be4bff DT |
924 | /* |
925 | * NOTE: CPU Revision Dependent code: Rev E and Rev F | |
926 | * | |
927 | * Set the DCSB and DCSM mask values depending on the CPU revision value. Also | |
928 | * set the shift factor for the DCSB and DCSM values. | |
929 | * | |
930 | * ->dcs_mask_notused, RevE: | |
931 | * | |
932 | * To find the max InputAddr for the csrow, start with the base address and set | |
933 | * all bits that are "don't care" bits in the test at the start of section | |
934 | * 3.5.4 (p. 84). | |
935 | * | |
936 | * The "don't care" bits are all set bits in the mask and all bits in the gaps | |
937 | * between bit ranges [35:25] and [19:13]. The value REV_E_DCS_NOTUSED_BITS | |
938 | * represents bits [24:20] and [12:0], which are all bits in the above-mentioned | |
939 | * gaps. | |
940 | * | |
941 | * ->dcs_mask_notused, RevF and later: | |
942 | * | |
943 | * To find the max InputAddr for the csrow, start with the base address and set | |
944 | * all bits that are "don't care" bits in the test at the start of NPT section | |
945 | * 4.5.4 (p. 87). | |
946 | * | |
947 | * The "don't care" bits are all set bits in the mask and all bits in the gaps | |
948 | * between bit ranges [36:27] and [21:13]. | |
949 | * | |
950 | * The value REV_F_F1Xh_DCS_NOTUSED_BITS represents bits [26:22] and [12:0], | |
951 | * which are all bits in the above-mentioned gaps. | |
952 | */ | |
953 | static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt) | |
954 | { | |
9d858bb1 | 955 | |
1433eb99 | 956 | if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) { |
9d858bb1 BP |
957 | pvt->dcsb_base = REV_E_DCSB_BASE_BITS; |
958 | pvt->dcsm_mask = REV_E_DCSM_MASK_BITS; | |
959 | pvt->dcs_mask_notused = REV_E_DCS_NOTUSED_BITS; | |
960 | pvt->dcs_shift = REV_E_DCS_SHIFT; | |
961 | pvt->cs_count = 8; | |
962 | pvt->num_dcsm = 8; | |
963 | } else { | |
94be4bff DT |
964 | pvt->dcsb_base = REV_F_F1Xh_DCSB_BASE_BITS; |
965 | pvt->dcsm_mask = REV_F_F1Xh_DCSM_MASK_BITS; | |
966 | pvt->dcs_mask_notused = REV_F_F1Xh_DCS_NOTUSED_BITS; | |
967 | pvt->dcs_shift = REV_F_F1Xh_DCS_SHIFT; | |
968 | ||
9d858bb1 BP |
969 | if (boot_cpu_data.x86 == 0x11) { |
970 | pvt->cs_count = 4; | |
971 | pvt->num_dcsm = 2; | |
972 | } else { | |
973 | pvt->cs_count = 8; | |
974 | pvt->num_dcsm = 4; | |
94be4bff | 975 | } |
94be4bff DT |
976 | } |
977 | } | |
978 | ||
979 | /* | |
980 | * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask hw registers | |
981 | */ | |
982 | static void amd64_read_dct_base_mask(struct amd64_pvt *pvt) | |
983 | { | |
6ba5dcdc | 984 | int cs, reg; |
94be4bff DT |
985 | |
986 | amd64_set_dct_base_and_mask(pvt); | |
987 | ||
9d858bb1 | 988 | for (cs = 0; cs < pvt->cs_count; cs++) { |
94be4bff | 989 | reg = K8_DCSB0 + (cs * 4); |
6ba5dcdc | 990 | if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg, &pvt->dcsb0[cs])) |
94be4bff DT |
991 | debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n", |
992 | cs, pvt->dcsb0[cs], reg); | |
993 | ||
994 | /* If DCT are NOT ganged, then read in DCT1's base */ | |
995 | if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) { | |
996 | reg = F10_DCSB1 + (cs * 4); | |
6ba5dcdc BP |
997 | if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg, |
998 | &pvt->dcsb1[cs])) | |
94be4bff DT |
999 | debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n", |
1000 | cs, pvt->dcsb1[cs], reg); | |
1001 | } else { | |
1002 | pvt->dcsb1[cs] = 0; | |
1003 | } | |
1004 | } | |
1005 | ||
1006 | for (cs = 0; cs < pvt->num_dcsm; cs++) { | |
4afcd2dc | 1007 | reg = K8_DCSM0 + (cs * 4); |
6ba5dcdc | 1008 | if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg, &pvt->dcsm0[cs])) |
94be4bff DT |
1009 | debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n", |
1010 | cs, pvt->dcsm0[cs], reg); | |
1011 | ||
1012 | /* If DCT are NOT ganged, then read in DCT1's mask */ | |
1013 | if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) { | |
1014 | reg = F10_DCSM1 + (cs * 4); | |
6ba5dcdc BP |
1015 | if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg, |
1016 | &pvt->dcsm1[cs])) | |
94be4bff DT |
1017 | debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n", |
1018 | cs, pvt->dcsm1[cs], reg); | |
6ba5dcdc | 1019 | } else { |
94be4bff | 1020 | pvt->dcsm1[cs] = 0; |
6ba5dcdc | 1021 | } |
94be4bff DT |
1022 | } |
1023 | } | |
1024 | ||
1025 | static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt) | |
1026 | { | |
1027 | enum mem_type type; | |
1028 | ||
1433eb99 | 1029 | if (boot_cpu_data.x86 >= 0x10 || pvt->ext_model >= K8_REV_F) { |
6b4c0bde BP |
1030 | if (pvt->dchr0 & DDR3_MODE) |
1031 | type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3; | |
1032 | else | |
1033 | type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2; | |
94be4bff | 1034 | } else { |
94be4bff DT |
1035 | type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR; |
1036 | } | |
1037 | ||
239642fe | 1038 | debugf1(" Memory type is: %s\n", edac_mem_types[type]); |
94be4bff DT |
1039 | |
1040 | return type; | |
1041 | } | |
1042 | ||
ddff876d DT |
1043 | /* |
1044 | * Read the DRAM Configuration Low register. It differs between CG, D & E revs | |
1045 | * and the later RevF memory controllers (DDR vs DDR2) | |
1046 | * | |
1047 | * Return: | |
1048 | * number of memory channels in operation | |
1049 | * Pass back: | |
1050 | * contents of the DCL0_LOW register | |
1051 | */ | |
1052 | static int k8_early_channel_count(struct amd64_pvt *pvt) | |
1053 | { | |
1054 | int flag, err = 0; | |
1055 | ||
6ba5dcdc | 1056 | err = amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0); |
ddff876d DT |
1057 | if (err) |
1058 | return err; | |
1059 | ||
1433eb99 | 1060 | if ((boot_cpu_data.x86_model >> 4) >= K8_REV_F) { |
ddff876d DT |
1061 | /* RevF (NPT) and later */ |
1062 | flag = pvt->dclr0 & F10_WIDTH_128; | |
1063 | } else { | |
1064 | /* RevE and earlier */ | |
1065 | flag = pvt->dclr0 & REVE_WIDTH_128; | |
1066 | } | |
1067 | ||
1068 | /* not used */ | |
1069 | pvt->dclr1 = 0; | |
1070 | ||
1071 | return (flag) ? 2 : 1; | |
1072 | } | |
1073 | ||
1074 | /* extract the ERROR ADDRESS for the K8 CPUs */ | |
1075 | static u64 k8_get_error_address(struct mem_ctl_info *mci, | |
ef44cc4c | 1076 | struct err_regs *info) |
ddff876d DT |
1077 | { |
1078 | return (((u64) (info->nbeah & 0xff)) << 32) + | |
1079 | (info->nbeal & ~0x03); | |
1080 | } | |
1081 | ||
1082 | /* | |
1083 | * Read the Base and Limit registers for K8 based Memory controllers; extract | |
1084 | * fields from the 'raw' reg into separate data fields | |
1085 | * | |
1086 | * Isolates: BASE, LIMIT, IntlvEn, IntlvSel, RW_EN | |
1087 | */ | |
1088 | static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram) | |
1089 | { | |
1090 | u32 low; | |
1091 | u32 off = dram << 3; /* 8 bytes between DRAM entries */ | |
ddff876d | 1092 | |
6ba5dcdc | 1093 | amd64_read_pci_cfg(pvt->addr_f1_ctl, K8_DRAM_BASE_LOW + off, &low); |
ddff876d DT |
1094 | |
1095 | /* Extract parts into separate data entries */ | |
4997811e | 1096 | pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 8; |
ddff876d DT |
1097 | pvt->dram_IntlvEn[dram] = (low >> 8) & 0x7; |
1098 | pvt->dram_rw_en[dram] = (low & 0x3); | |
1099 | ||
6ba5dcdc | 1100 | amd64_read_pci_cfg(pvt->addr_f1_ctl, K8_DRAM_LIMIT_LOW + off, &low); |
ddff876d DT |
1101 | |
1102 | /* | |
1103 | * Extract parts into separate data entries. Limit is the HIGHEST memory | |
1104 | * location of the region, so lower 24 bits need to be all ones | |
1105 | */ | |
4997811e | 1106 | pvt->dram_limit[dram] = (((u64) low & 0xFFFF0000) << 8) | 0x00FFFFFF; |
ddff876d DT |
1107 | pvt->dram_IntlvSel[dram] = (low >> 8) & 0x7; |
1108 | pvt->dram_DstNode[dram] = (low & 0x7); | |
1109 | } | |
1110 | ||
1111 | static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, | |
ad6a32e9 | 1112 | struct err_regs *err_info, u64 sys_addr) |
ddff876d DT |
1113 | { |
1114 | struct mem_ctl_info *src_mci; | |
ddff876d DT |
1115 | int channel, csrow; |
1116 | u32 page, offset; | |
ad6a32e9 | 1117 | u16 syndrome; |
ddff876d | 1118 | |
ad6a32e9 | 1119 | syndrome = extract_syndrome(err_info); |
ddff876d DT |
1120 | |
1121 | /* CHIPKILL enabled */ | |
ad6a32e9 | 1122 | if (err_info->nbcfg & K8_NBCFG_CHIPKILL) { |
bfc04aec | 1123 | channel = get_channel_from_ecc_syndrome(mci, syndrome); |
ddff876d DT |
1124 | if (channel < 0) { |
1125 | /* | |
1126 | * Syndrome didn't map, so we don't know which of the | |
1127 | * 2 DIMMs is in error. So we need to ID 'both' of them | |
1128 | * as suspect. | |
1129 | */ | |
1130 | amd64_mc_printk(mci, KERN_WARNING, | |
ad6a32e9 BP |
1131 | "unknown syndrome 0x%04x - possible " |
1132 | "error reporting race\n", syndrome); | |
ddff876d DT |
1133 | edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); |
1134 | return; | |
1135 | } | |
1136 | } else { | |
1137 | /* | |
1138 | * non-chipkill ecc mode | |
1139 | * | |
1140 | * The k8 documentation is unclear about how to determine the | |
1141 | * channel number when using non-chipkill memory. This method | |
1142 | * was obtained from email communication with someone at AMD. | |
1143 | * (Wish the email was placed in this comment - norsk) | |
1144 | */ | |
44e9e2ee | 1145 | channel = ((sys_addr & BIT(3)) != 0); |
ddff876d DT |
1146 | } |
1147 | ||
1148 | /* | |
1149 | * Find out which node the error address belongs to. This may be | |
1150 | * different from the node that detected the error. | |
1151 | */ | |
44e9e2ee | 1152 | src_mci = find_mc_by_sys_addr(mci, sys_addr); |
2cff18c2 | 1153 | if (!src_mci) { |
ddff876d DT |
1154 | amd64_mc_printk(mci, KERN_ERR, |
1155 | "failed to map error address 0x%lx to a node\n", | |
44e9e2ee | 1156 | (unsigned long)sys_addr); |
ddff876d DT |
1157 | edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); |
1158 | return; | |
1159 | } | |
1160 | ||
44e9e2ee BP |
1161 | /* Now map the sys_addr to a CSROW */ |
1162 | csrow = sys_addr_to_csrow(src_mci, sys_addr); | |
ddff876d DT |
1163 | if (csrow < 0) { |
1164 | edac_mc_handle_ce_no_info(src_mci, EDAC_MOD_STR); | |
1165 | } else { | |
44e9e2ee | 1166 | error_address_to_page_and_offset(sys_addr, &page, &offset); |
ddff876d DT |
1167 | |
1168 | edac_mc_handle_ce(src_mci, page, offset, syndrome, csrow, | |
1169 | channel, EDAC_MOD_STR); | |
1170 | } | |
1171 | } | |
1172 | ||
1433eb99 | 1173 | static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode) |
ddff876d | 1174 | { |
1433eb99 | 1175 | int *dbam_map; |
ddff876d | 1176 | |
1433eb99 BP |
1177 | if (pvt->ext_model >= K8_REV_F) |
1178 | dbam_map = ddr2_dbam; | |
1179 | else if (pvt->ext_model >= K8_REV_D) | |
1180 | dbam_map = ddr2_dbam_revD; | |
1181 | else | |
1182 | dbam_map = ddr2_dbam_revCG; | |
ddff876d | 1183 | |
1433eb99 | 1184 | return dbam_map[cs_mode]; |
ddff876d DT |
1185 | } |
1186 | ||
1afd3c98 DT |
1187 | /* |
1188 | * Get the number of DCT channels in use. | |
1189 | * | |
1190 | * Return: | |
1191 | * number of Memory Channels in operation | |
1192 | * Pass back: | |
1193 | * contents of the DCL0_LOW register | |
1194 | */ | |
1195 | static int f10_early_channel_count(struct amd64_pvt *pvt) | |
1196 | { | |
57a30854 | 1197 | int dbams[] = { DBAM0, DBAM1 }; |
6ba5dcdc | 1198 | int i, j, channels = 0; |
1afd3c98 DT |
1199 | u32 dbam; |
1200 | ||
1afd3c98 DT |
1201 | /* If we are in 128 bit mode, then we are using 2 channels */ |
1202 | if (pvt->dclr0 & F10_WIDTH_128) { | |
1afd3c98 DT |
1203 | channels = 2; |
1204 | return channels; | |
1205 | } | |
1206 | ||
1207 | /* | |
d16149e8 BP |
1208 | * Need to check if in unganged mode: In such, there are 2 channels, |
1209 | * but they are not in 128 bit mode and thus the above 'dclr0' status | |
1210 | * bit will be OFF. | |
1afd3c98 DT |
1211 | * |
1212 | * Need to check DCT0[0] and DCT1[0] to see if only one of them has | |
1213 | * their CSEnable bit on. If so, then SINGLE DIMM case. | |
1214 | */ | |
d16149e8 | 1215 | debugf0("Data width is not 128 bits - need more decoding\n"); |
ddff876d | 1216 | |
1afd3c98 DT |
1217 | /* |
1218 | * Check DRAM Bank Address Mapping values for each DIMM to see if there | |
1219 | * is more than just one DIMM present in unganged mode. Need to check | |
1220 | * both controllers since DIMMs can be placed in either one. | |
1221 | */ | |
57a30854 | 1222 | for (i = 0; i < ARRAY_SIZE(dbams); i++) { |
6ba5dcdc | 1223 | if (amd64_read_pci_cfg(pvt->dram_f2_ctl, dbams[i], &dbam)) |
1afd3c98 DT |
1224 | goto err_reg; |
1225 | ||
57a30854 WW |
1226 | for (j = 0; j < 4; j++) { |
1227 | if (DBAM_DIMM(j, dbam) > 0) { | |
1228 | channels++; | |
1229 | break; | |
1230 | } | |
1231 | } | |
1afd3c98 DT |
1232 | } |
1233 | ||
d16149e8 BP |
1234 | if (channels > 2) |
1235 | channels = 2; | |
1236 | ||
37da0450 | 1237 | debugf0("MCT channel count: %d\n", channels); |
1afd3c98 DT |
1238 | |
1239 | return channels; | |
1240 | ||
1241 | err_reg: | |
1242 | return -1; | |
1243 | ||
1244 | } | |
1245 | ||
1433eb99 | 1246 | static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode) |
1afd3c98 | 1247 | { |
1433eb99 BP |
1248 | int *dbam_map; |
1249 | ||
1250 | if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE) | |
1251 | dbam_map = ddr3_dbam; | |
1252 | else | |
1253 | dbam_map = ddr2_dbam; | |
1254 | ||
1255 | return dbam_map[cs_mode]; | |
1afd3c98 DT |
1256 | } |
1257 | ||
1258 | /* Enable extended configuration access via 0xCF8 feature */ | |
1259 | static void amd64_setup(struct amd64_pvt *pvt) | |
1260 | { | |
1261 | u32 reg; | |
1262 | ||
6ba5dcdc | 1263 | amd64_read_pci_cfg(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, ®); |
1afd3c98 DT |
1264 | |
1265 | pvt->flags.cf8_extcfg = !!(reg & F10_NB_CFG_LOW_ENABLE_EXT_CFG); | |
1266 | reg |= F10_NB_CFG_LOW_ENABLE_EXT_CFG; | |
1267 | pci_write_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, reg); | |
1268 | } | |
1269 | ||
1270 | /* Restore the extended configuration access via 0xCF8 feature */ | |
1271 | static void amd64_teardown(struct amd64_pvt *pvt) | |
1272 | { | |
1273 | u32 reg; | |
1274 | ||
6ba5dcdc | 1275 | amd64_read_pci_cfg(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, ®); |
1afd3c98 DT |
1276 | |
1277 | reg &= ~F10_NB_CFG_LOW_ENABLE_EXT_CFG; | |
1278 | if (pvt->flags.cf8_extcfg) | |
1279 | reg |= F10_NB_CFG_LOW_ENABLE_EXT_CFG; | |
1280 | pci_write_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, reg); | |
1281 | } | |
1282 | ||
1283 | static u64 f10_get_error_address(struct mem_ctl_info *mci, | |
ef44cc4c | 1284 | struct err_regs *info) |
1afd3c98 DT |
1285 | { |
1286 | return (((u64) (info->nbeah & 0xffff)) << 32) + | |
1287 | (info->nbeal & ~0x01); | |
1288 | } | |
1289 | ||
1290 | /* | |
1291 | * Read the Base and Limit registers for F10 based Memory controllers. Extract | |
1292 | * fields from the 'raw' reg into separate data fields. | |
1293 | * | |
1294 | * Isolates: BASE, LIMIT, IntlvEn, IntlvSel, RW_EN. | |
1295 | */ | |
1296 | static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram) | |
1297 | { | |
1298 | u32 high_offset, low_offset, high_base, low_base, high_limit, low_limit; | |
1299 | ||
1300 | low_offset = K8_DRAM_BASE_LOW + (dram << 3); | |
1301 | high_offset = F10_DRAM_BASE_HIGH + (dram << 3); | |
1302 | ||
1303 | /* read the 'raw' DRAM BASE Address register */ | |
6ba5dcdc | 1304 | amd64_read_pci_cfg(pvt->addr_f1_ctl, low_offset, &low_base); |
1afd3c98 DT |
1305 | |
1306 | /* Read from the ECS data register */ | |
6ba5dcdc | 1307 | amd64_read_pci_cfg(pvt->addr_f1_ctl, high_offset, &high_base); |
1afd3c98 DT |
1308 | |
1309 | /* Extract parts into separate data entries */ | |
1310 | pvt->dram_rw_en[dram] = (low_base & 0x3); | |
1311 | ||
1312 | if (pvt->dram_rw_en[dram] == 0) | |
1313 | return; | |
1314 | ||
1315 | pvt->dram_IntlvEn[dram] = (low_base >> 8) & 0x7; | |
1316 | ||
66216a7a | 1317 | pvt->dram_base[dram] = (((u64)high_base & 0x000000FF) << 40) | |
4997811e | 1318 | (((u64)low_base & 0xFFFF0000) << 8); |
1afd3c98 DT |
1319 | |
1320 | low_offset = K8_DRAM_LIMIT_LOW + (dram << 3); | |
1321 | high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3); | |
1322 | ||
1323 | /* read the 'raw' LIMIT registers */ | |
6ba5dcdc | 1324 | amd64_read_pci_cfg(pvt->addr_f1_ctl, low_offset, &low_limit); |
1afd3c98 DT |
1325 | |
1326 | /* Read from the ECS data register for the HIGH portion */ | |
6ba5dcdc | 1327 | amd64_read_pci_cfg(pvt->addr_f1_ctl, high_offset, &high_limit); |
1afd3c98 | 1328 | |
1afd3c98 DT |
1329 | pvt->dram_DstNode[dram] = (low_limit & 0x7); |
1330 | pvt->dram_IntlvSel[dram] = (low_limit >> 8) & 0x7; | |
1331 | ||
1332 | /* | |
1333 | * Extract address values and form a LIMIT address. Limit is the HIGHEST | |
1334 | * memory location of the region, so low 24 bits need to be all ones. | |
1335 | */ | |
66216a7a | 1336 | pvt->dram_limit[dram] = (((u64)high_limit & 0x000000FF) << 40) | |
4997811e | 1337 | (((u64) low_limit & 0xFFFF0000) << 8) | |
66216a7a | 1338 | 0x00FFFFFF; |
1afd3c98 | 1339 | } |
6163b5d4 DT |
1340 | |
1341 | static void f10_read_dram_ctl_register(struct amd64_pvt *pvt) | |
1342 | { | |
6163b5d4 | 1343 | |
6ba5dcdc BP |
1344 | if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCTL_SEL_LOW, |
1345 | &pvt->dram_ctl_select_low)) { | |
72381bd5 BP |
1346 | debugf0("F2x110 (DCTL Sel. Low): 0x%08x, " |
1347 | "High range addresses at: 0x%x\n", | |
1348 | pvt->dram_ctl_select_low, | |
1349 | dct_sel_baseaddr(pvt)); | |
1350 | ||
1351 | debugf0(" DCT mode: %s, All DCTs on: %s\n", | |
1352 | (dct_ganging_enabled(pvt) ? "ganged" : "unganged"), | |
1353 | (dct_dram_enabled(pvt) ? "yes" : "no")); | |
1354 | ||
1355 | if (!dct_ganging_enabled(pvt)) | |
1356 | debugf0(" Address range split per DCT: %s\n", | |
1357 | (dct_high_range_enabled(pvt) ? "yes" : "no")); | |
1358 | ||
1359 | debugf0(" DCT data interleave for ECC: %s, " | |
1360 | "DRAM cleared since last warm reset: %s\n", | |
1361 | (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"), | |
1362 | (dct_memory_cleared(pvt) ? "yes" : "no")); | |
1363 | ||
1364 | debugf0(" DCT channel interleave: %s, " | |
1365 | "DCT interleave bits selector: 0x%x\n", | |
1366 | (dct_interleave_enabled(pvt) ? "enabled" : "disabled"), | |
6163b5d4 DT |
1367 | dct_sel_interleave_addr(pvt)); |
1368 | } | |
1369 | ||
6ba5dcdc BP |
1370 | amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCTL_SEL_HIGH, |
1371 | &pvt->dram_ctl_select_high); | |
6163b5d4 DT |
1372 | } |
1373 | ||
f71d0a05 DT |
1374 | /* |
1375 | * determine channel based on the interleaving mode: F10h BKDG, 2.8.9 Memory | |
1376 | * Interleaving Modes. | |
1377 | */ | |
6163b5d4 DT |
1378 | static u32 f10_determine_channel(struct amd64_pvt *pvt, u64 sys_addr, |
1379 | int hi_range_sel, u32 intlv_en) | |
1380 | { | |
1381 | u32 cs, temp, dct_sel_high = (pvt->dram_ctl_select_low >> 1) & 1; | |
1382 | ||
1383 | if (dct_ganging_enabled(pvt)) | |
1384 | cs = 0; | |
1385 | else if (hi_range_sel) | |
1386 | cs = dct_sel_high; | |
1387 | else if (dct_interleave_enabled(pvt)) { | |
f71d0a05 DT |
1388 | /* |
1389 | * see F2x110[DctSelIntLvAddr] - channel interleave mode | |
1390 | */ | |
6163b5d4 DT |
1391 | if (dct_sel_interleave_addr(pvt) == 0) |
1392 | cs = sys_addr >> 6 & 1; | |
1393 | else if ((dct_sel_interleave_addr(pvt) >> 1) & 1) { | |
1394 | temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2; | |
1395 | ||
1396 | if (dct_sel_interleave_addr(pvt) & 1) | |
1397 | cs = (sys_addr >> 9 & 1) ^ temp; | |
1398 | else | |
1399 | cs = (sys_addr >> 6 & 1) ^ temp; | |
1400 | } else if (intlv_en & 4) | |
1401 | cs = sys_addr >> 15 & 1; | |
1402 | else if (intlv_en & 2) | |
1403 | cs = sys_addr >> 14 & 1; | |
1404 | else if (intlv_en & 1) | |
1405 | cs = sys_addr >> 13 & 1; | |
1406 | else | |
1407 | cs = sys_addr >> 12 & 1; | |
1408 | } else if (dct_high_range_enabled(pvt) && !dct_ganging_enabled(pvt)) | |
1409 | cs = ~dct_sel_high & 1; | |
1410 | else | |
1411 | cs = 0; | |
1412 | ||
1413 | return cs; | |
1414 | } | |
1415 | ||
1416 | static inline u32 f10_map_intlv_en_to_shift(u32 intlv_en) | |
1417 | { | |
1418 | if (intlv_en == 1) | |
1419 | return 1; | |
1420 | else if (intlv_en == 3) | |
1421 | return 2; | |
1422 | else if (intlv_en == 7) | |
1423 | return 3; | |
1424 | ||
1425 | return 0; | |
1426 | } | |
1427 | ||
f71d0a05 DT |
1428 | /* See F10h BKDG, 2.8.10.2 DctSelBaseOffset Programming */ |
1429 | static inline u64 f10_get_base_addr_offset(u64 sys_addr, int hi_range_sel, | |
6163b5d4 DT |
1430 | u32 dct_sel_base_addr, |
1431 | u64 dct_sel_base_off, | |
f71d0a05 | 1432 | u32 hole_valid, u32 hole_off, |
6163b5d4 DT |
1433 | u64 dram_base) |
1434 | { | |
1435 | u64 chan_off; | |
1436 | ||
1437 | if (hi_range_sel) { | |
9975a5f2 | 1438 | if (!(dct_sel_base_addr & 0xFFFF0000) && |
f71d0a05 | 1439 | hole_valid && (sys_addr >= 0x100000000ULL)) |
6163b5d4 DT |
1440 | chan_off = hole_off << 16; |
1441 | else | |
1442 | chan_off = dct_sel_base_off; | |
1443 | } else { | |
f71d0a05 | 1444 | if (hole_valid && (sys_addr >= 0x100000000ULL)) |
6163b5d4 DT |
1445 | chan_off = hole_off << 16; |
1446 | else | |
1447 | chan_off = dram_base & 0xFFFFF8000000ULL; | |
1448 | } | |
1449 | ||
1450 | return (sys_addr & 0x0000FFFFFFFFFFC0ULL) - | |
1451 | (chan_off & 0x0000FFFFFF800000ULL); | |
1452 | } | |
1453 | ||
1454 | /* Hack for the time being - Can we get this from BIOS?? */ | |
1455 | #define CH0SPARE_RANK 0 | |
1456 | #define CH1SPARE_RANK 1 | |
1457 | ||
1458 | /* | |
1459 | * checks if the csrow passed in is marked as SPARED, if so returns the new | |
1460 | * spare row | |
1461 | */ | |
1462 | static inline int f10_process_possible_spare(int csrow, | |
1463 | u32 cs, struct amd64_pvt *pvt) | |
1464 | { | |
1465 | u32 swap_done; | |
1466 | u32 bad_dram_cs; | |
1467 | ||
1468 | /* Depending on channel, isolate respective SPARING info */ | |
1469 | if (cs) { | |
1470 | swap_done = F10_ONLINE_SPARE_SWAPDONE1(pvt->online_spare); | |
1471 | bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS1(pvt->online_spare); | |
1472 | if (swap_done && (csrow == bad_dram_cs)) | |
1473 | csrow = CH1SPARE_RANK; | |
1474 | } else { | |
1475 | swap_done = F10_ONLINE_SPARE_SWAPDONE0(pvt->online_spare); | |
1476 | bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS0(pvt->online_spare); | |
1477 | if (swap_done && (csrow == bad_dram_cs)) | |
1478 | csrow = CH0SPARE_RANK; | |
1479 | } | |
1480 | return csrow; | |
1481 | } | |
1482 | ||
1483 | /* | |
1484 | * Iterate over the DRAM DCT "base" and "mask" registers looking for a | |
1485 | * SystemAddr match on the specified 'ChannelSelect' and 'NodeID' | |
1486 | * | |
1487 | * Return: | |
1488 | * -EINVAL: NOT FOUND | |
1489 | * 0..csrow = Chip-Select Row | |
1490 | */ | |
1491 | static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs) | |
1492 | { | |
1493 | struct mem_ctl_info *mci; | |
1494 | struct amd64_pvt *pvt; | |
1495 | u32 cs_base, cs_mask; | |
1496 | int cs_found = -EINVAL; | |
1497 | int csrow; | |
1498 | ||
1499 | mci = mci_lookup[nid]; | |
1500 | if (!mci) | |
1501 | return cs_found; | |
1502 | ||
1503 | pvt = mci->pvt_info; | |
1504 | ||
1505 | debugf1("InputAddr=0x%x channelselect=%d\n", in_addr, cs); | |
1506 | ||
9d858bb1 | 1507 | for (csrow = 0; csrow < pvt->cs_count; csrow++) { |
6163b5d4 DT |
1508 | |
1509 | cs_base = amd64_get_dct_base(pvt, cs, csrow); | |
1510 | if (!(cs_base & K8_DCSB_CS_ENABLE)) | |
1511 | continue; | |
1512 | ||
1513 | /* | |
1514 | * We have an ENABLED CSROW, Isolate just the MASK bits of the | |
1515 | * target: [28:19] and [13:5], which map to [36:27] and [21:13] | |
1516 | * of the actual address. | |
1517 | */ | |
1518 | cs_base &= REV_F_F1Xh_DCSB_BASE_BITS; | |
1519 | ||
1520 | /* | |
1521 | * Get the DCT Mask, and ENABLE the reserved bits: [18:16] and | |
1522 | * [4:0] to become ON. Then mask off bits [28:0] ([36:8]) | |
1523 | */ | |
1524 | cs_mask = amd64_get_dct_mask(pvt, cs, csrow); | |
1525 | ||
1526 | debugf1(" CSROW=%d CSBase=0x%x RAW CSMask=0x%x\n", | |
1527 | csrow, cs_base, cs_mask); | |
1528 | ||
1529 | cs_mask = (cs_mask | 0x0007C01F) & 0x1FFFFFFF; | |
1530 | ||
1531 | debugf1(" Final CSMask=0x%x\n", cs_mask); | |
1532 | debugf1(" (InputAddr & ~CSMask)=0x%x " | |
1533 | "(CSBase & ~CSMask)=0x%x\n", | |
1534 | (in_addr & ~cs_mask), (cs_base & ~cs_mask)); | |
1535 | ||
1536 | if ((in_addr & ~cs_mask) == (cs_base & ~cs_mask)) { | |
1537 | cs_found = f10_process_possible_spare(csrow, cs, pvt); | |
1538 | ||
1539 | debugf1(" MATCH csrow=%d\n", cs_found); | |
1540 | break; | |
1541 | } | |
1542 | } | |
1543 | return cs_found; | |
1544 | } | |
1545 | ||
f71d0a05 DT |
1546 | /* For a given @dram_range, check if @sys_addr falls within it. */ |
1547 | static int f10_match_to_this_node(struct amd64_pvt *pvt, int dram_range, | |
1548 | u64 sys_addr, int *nid, int *chan_sel) | |
1549 | { | |
1550 | int node_id, cs_found = -EINVAL, high_range = 0; | |
1551 | u32 intlv_en, intlv_sel, intlv_shift, hole_off; | |
1552 | u32 hole_valid, tmp, dct_sel_base, channel; | |
1553 | u64 dram_base, chan_addr, dct_sel_base_off; | |
1554 | ||
1555 | dram_base = pvt->dram_base[dram_range]; | |
1556 | intlv_en = pvt->dram_IntlvEn[dram_range]; | |
1557 | ||
1558 | node_id = pvt->dram_DstNode[dram_range]; | |
1559 | intlv_sel = pvt->dram_IntlvSel[dram_range]; | |
1560 | ||
1561 | debugf1("(dram=%d) Base=0x%llx SystemAddr= 0x%llx Limit=0x%llx\n", | |
1562 | dram_range, dram_base, sys_addr, pvt->dram_limit[dram_range]); | |
1563 | ||
1564 | /* | |
1565 | * This assumes that one node's DHAR is the same as all the other | |
1566 | * nodes' DHAR. | |
1567 | */ | |
1568 | hole_off = (pvt->dhar & 0x0000FF80); | |
1569 | hole_valid = (pvt->dhar & 0x1); | |
1570 | dct_sel_base_off = (pvt->dram_ctl_select_high & 0xFFFFFC00) << 16; | |
1571 | ||
1572 | debugf1(" HoleOffset=0x%x HoleValid=0x%x IntlvSel=0x%x\n", | |
1573 | hole_off, hole_valid, intlv_sel); | |
1574 | ||
1575 | if (intlv_en || | |
1576 | (intlv_sel != ((sys_addr >> 12) & intlv_en))) | |
1577 | return -EINVAL; | |
1578 | ||
1579 | dct_sel_base = dct_sel_baseaddr(pvt); | |
1580 | ||
1581 | /* | |
1582 | * check whether addresses >= DctSelBaseAddr[47:27] are to be used to | |
1583 | * select between DCT0 and DCT1. | |
1584 | */ | |
1585 | if (dct_high_range_enabled(pvt) && | |
1586 | !dct_ganging_enabled(pvt) && | |
1587 | ((sys_addr >> 27) >= (dct_sel_base >> 11))) | |
1588 | high_range = 1; | |
1589 | ||
1590 | channel = f10_determine_channel(pvt, sys_addr, high_range, intlv_en); | |
1591 | ||
1592 | chan_addr = f10_get_base_addr_offset(sys_addr, high_range, dct_sel_base, | |
1593 | dct_sel_base_off, hole_valid, | |
1594 | hole_off, dram_base); | |
1595 | ||
1596 | intlv_shift = f10_map_intlv_en_to_shift(intlv_en); | |
1597 | ||
1598 | /* remove Node ID (in case of memory interleaving) */ | |
1599 | tmp = chan_addr & 0xFC0; | |
1600 | ||
1601 | chan_addr = ((chan_addr >> intlv_shift) & 0xFFFFFFFFF000ULL) | tmp; | |
1602 | ||
1603 | /* remove channel interleave and hash */ | |
1604 | if (dct_interleave_enabled(pvt) && | |
1605 | !dct_high_range_enabled(pvt) && | |
1606 | !dct_ganging_enabled(pvt)) { | |
1607 | if (dct_sel_interleave_addr(pvt) != 1) | |
1608 | chan_addr = (chan_addr >> 1) & 0xFFFFFFFFFFFFFFC0ULL; | |
1609 | else { | |
1610 | tmp = chan_addr & 0xFC0; | |
1611 | chan_addr = ((chan_addr & 0xFFFFFFFFFFFFC000ULL) >> 1) | |
1612 | | tmp; | |
1613 | } | |
1614 | } | |
1615 | ||
1616 | debugf1(" (ChannelAddrLong=0x%llx) >> 8 becomes InputAddr=0x%x\n", | |
1617 | chan_addr, (u32)(chan_addr >> 8)); | |
1618 | ||
1619 | cs_found = f10_lookup_addr_in_dct(chan_addr >> 8, node_id, channel); | |
1620 | ||
1621 | if (cs_found >= 0) { | |
1622 | *nid = node_id; | |
1623 | *chan_sel = channel; | |
1624 | } | |
1625 | return cs_found; | |
1626 | } | |
1627 | ||
1628 | static int f10_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr, | |
1629 | int *node, int *chan_sel) | |
1630 | { | |
1631 | int dram_range, cs_found = -EINVAL; | |
1632 | u64 dram_base, dram_limit; | |
1633 | ||
1634 | for (dram_range = 0; dram_range < DRAM_REG_COUNT; dram_range++) { | |
1635 | ||
1636 | if (!pvt->dram_rw_en[dram_range]) | |
1637 | continue; | |
1638 | ||
1639 | dram_base = pvt->dram_base[dram_range]; | |
1640 | dram_limit = pvt->dram_limit[dram_range]; | |
1641 | ||
1642 | if ((dram_base <= sys_addr) && (sys_addr <= dram_limit)) { | |
1643 | ||
1644 | cs_found = f10_match_to_this_node(pvt, dram_range, | |
1645 | sys_addr, node, | |
1646 | chan_sel); | |
1647 | if (cs_found >= 0) | |
1648 | break; | |
1649 | } | |
1650 | } | |
1651 | return cs_found; | |
1652 | } | |
1653 | ||
1654 | /* | |
bdc30a0c BP |
1655 | * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps |
1656 | * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW). | |
f71d0a05 | 1657 | * |
bdc30a0c BP |
1658 | * The @sys_addr is usually an error address received from the hardware |
1659 | * (MCX_ADDR). | |
f71d0a05 DT |
1660 | */ |
1661 | static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci, | |
ad6a32e9 | 1662 | struct err_regs *err_info, |
f71d0a05 DT |
1663 | u64 sys_addr) |
1664 | { | |
1665 | struct amd64_pvt *pvt = mci->pvt_info; | |
1666 | u32 page, offset; | |
f71d0a05 | 1667 | int nid, csrow, chan = 0; |
ad6a32e9 | 1668 | u16 syndrome; |
f71d0a05 DT |
1669 | |
1670 | csrow = f10_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan); | |
1671 | ||
bdc30a0c BP |
1672 | if (csrow < 0) { |
1673 | edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); | |
1674 | return; | |
1675 | } | |
1676 | ||
1677 | error_address_to_page_and_offset(sys_addr, &page, &offset); | |
f71d0a05 | 1678 | |
ad6a32e9 | 1679 | syndrome = extract_syndrome(err_info); |
bdc30a0c BP |
1680 | |
1681 | /* | |
1682 | * We need the syndromes for channel detection only when we're | |
1683 | * ganged. Otherwise @chan should already contain the channel at | |
1684 | * this point. | |
1685 | */ | |
962b70a1 | 1686 | if (dct_ganging_enabled(pvt) && (pvt->nbcfg & K8_NBCFG_CHIPKILL)) |
bdc30a0c | 1687 | chan = get_channel_from_ecc_syndrome(mci, syndrome); |
f71d0a05 | 1688 | |
bdc30a0c BP |
1689 | if (chan >= 0) |
1690 | edac_mc_handle_ce(mci, page, offset, syndrome, csrow, chan, | |
1691 | EDAC_MOD_STR); | |
1692 | else | |
f71d0a05 | 1693 | /* |
bdc30a0c | 1694 | * Channel unknown, report all channels on this CSROW as failed. |
f71d0a05 | 1695 | */ |
bdc30a0c | 1696 | for (chan = 0; chan < mci->csrows[csrow].nr_channels; chan++) |
f71d0a05 | 1697 | edac_mc_handle_ce(mci, page, offset, syndrome, |
bdc30a0c | 1698 | csrow, chan, EDAC_MOD_STR); |
f71d0a05 DT |
1699 | } |
1700 | ||
f71d0a05 | 1701 | /* |
8566c4df | 1702 | * debug routine to display the memory sizes of all logical DIMMs and its |
f71d0a05 DT |
1703 | * CSROWs as well |
1704 | */ | |
8566c4df | 1705 | static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt) |
f71d0a05 | 1706 | { |
603adaf6 | 1707 | int dimm, size0, size1, factor = 0; |
f71d0a05 DT |
1708 | u32 dbam; |
1709 | u32 *dcsb; | |
1710 | ||
8566c4df | 1711 | if (boot_cpu_data.x86 == 0xf) { |
603adaf6 BP |
1712 | if (pvt->dclr0 & F10_WIDTH_128) |
1713 | factor = 1; | |
1714 | ||
8566c4df | 1715 | /* K8 families < revF not supported yet */ |
1433eb99 | 1716 | if (pvt->ext_model < K8_REV_F) |
8566c4df BP |
1717 | return; |
1718 | else | |
1719 | WARN_ON(ctrl != 0); | |
1720 | } | |
1721 | ||
1722 | debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", | |
1723 | ctrl, ctrl ? pvt->dbam1 : pvt->dbam0); | |
f71d0a05 DT |
1724 | |
1725 | dbam = ctrl ? pvt->dbam1 : pvt->dbam0; | |
1726 | dcsb = ctrl ? pvt->dcsb1 : pvt->dcsb0; | |
1727 | ||
8566c4df BP |
1728 | edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl); |
1729 | ||
f71d0a05 DT |
1730 | /* Dump memory sizes for DIMM and its CSROWs */ |
1731 | for (dimm = 0; dimm < 4; dimm++) { | |
1732 | ||
1733 | size0 = 0; | |
1734 | if (dcsb[dimm*2] & K8_DCSB_CS_ENABLE) | |
1433eb99 | 1735 | size0 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam)); |
f71d0a05 DT |
1736 | |
1737 | size1 = 0; | |
1738 | if (dcsb[dimm*2 + 1] & K8_DCSB_CS_ENABLE) | |
1433eb99 | 1739 | size1 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam)); |
f71d0a05 | 1740 | |
8566c4df | 1741 | edac_printk(KERN_DEBUG, EDAC_MC, " %d: %5dMB %d: %5dMB\n", |
603adaf6 BP |
1742 | dimm * 2, size0 << factor, |
1743 | dimm * 2 + 1, size1 << factor); | |
f71d0a05 DT |
1744 | } |
1745 | } | |
1746 | ||
4d37607a DT |
1747 | /* |
1748 | * There currently are 3 types type of MC devices for AMD Athlon/Opterons | |
1749 | * (as per PCI DEVICE_IDs): | |
1750 | * | |
1751 | * Family K8: That is the Athlon64 and Opteron CPUs. They all have the same PCI | |
1752 | * DEVICE ID, even though there is differences between the different Revisions | |
1753 | * (CG,D,E,F). | |
1754 | * | |
1755 | * Family F10h and F11h. | |
1756 | * | |
1757 | */ | |
1758 | static struct amd64_family_type amd64_family_types[] = { | |
1759 | [K8_CPUS] = { | |
1760 | .ctl_name = "RevF", | |
1761 | .addr_f1_ctl = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP, | |
1762 | .misc_f3_ctl = PCI_DEVICE_ID_AMD_K8_NB_MISC, | |
1763 | .ops = { | |
1433eb99 BP |
1764 | .early_channel_count = k8_early_channel_count, |
1765 | .get_error_address = k8_get_error_address, | |
1766 | .read_dram_base_limit = k8_read_dram_base_limit, | |
1767 | .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow, | |
1768 | .dbam_to_cs = k8_dbam_to_chip_select, | |
4d37607a DT |
1769 | } |
1770 | }, | |
1771 | [F10_CPUS] = { | |
1772 | .ctl_name = "Family 10h", | |
1773 | .addr_f1_ctl = PCI_DEVICE_ID_AMD_10H_NB_MAP, | |
1774 | .misc_f3_ctl = PCI_DEVICE_ID_AMD_10H_NB_MISC, | |
1775 | .ops = { | |
1433eb99 BP |
1776 | .early_channel_count = f10_early_channel_count, |
1777 | .get_error_address = f10_get_error_address, | |
1778 | .read_dram_base_limit = f10_read_dram_base_limit, | |
1779 | .read_dram_ctl_register = f10_read_dram_ctl_register, | |
1780 | .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow, | |
1781 | .dbam_to_cs = f10_dbam_to_chip_select, | |
4d37607a DT |
1782 | } |
1783 | }, | |
1784 | [F11_CPUS] = { | |
1785 | .ctl_name = "Family 11h", | |
1786 | .addr_f1_ctl = PCI_DEVICE_ID_AMD_11H_NB_MAP, | |
1787 | .misc_f3_ctl = PCI_DEVICE_ID_AMD_11H_NB_MISC, | |
1788 | .ops = { | |
1433eb99 BP |
1789 | .early_channel_count = f10_early_channel_count, |
1790 | .get_error_address = f10_get_error_address, | |
1791 | .read_dram_base_limit = f10_read_dram_base_limit, | |
1792 | .read_dram_ctl_register = f10_read_dram_ctl_register, | |
1793 | .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow, | |
1794 | .dbam_to_cs = f10_dbam_to_chip_select, | |
4d37607a DT |
1795 | } |
1796 | }, | |
1797 | }; | |
1798 | ||
1799 | static struct pci_dev *pci_get_related_function(unsigned int vendor, | |
1800 | unsigned int device, | |
1801 | struct pci_dev *related) | |
1802 | { | |
1803 | struct pci_dev *dev = NULL; | |
1804 | ||
1805 | dev = pci_get_device(vendor, device, dev); | |
1806 | while (dev) { | |
1807 | if ((dev->bus->number == related->bus->number) && | |
1808 | (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn))) | |
1809 | break; | |
1810 | dev = pci_get_device(vendor, device, dev); | |
1811 | } | |
1812 | ||
1813 | return dev; | |
1814 | } | |
1815 | ||
b1289d6f | 1816 | /* |
bfc04aec BP |
1817 | * These are tables of eigenvectors (one per line) which can be used for the |
1818 | * construction of the syndrome tables. The modified syndrome search algorithm | |
1819 | * uses those to find the symbol in error and thus the DIMM. | |
b1289d6f | 1820 | * |
bfc04aec | 1821 | * Algorithm courtesy of Ross LaFetra from AMD. |
b1289d6f | 1822 | */ |
bfc04aec BP |
1823 | static u16 x4_vectors[] = { |
1824 | 0x2f57, 0x1afe, 0x66cc, 0xdd88, | |
1825 | 0x11eb, 0x3396, 0x7f4c, 0xeac8, | |
1826 | 0x0001, 0x0002, 0x0004, 0x0008, | |
1827 | 0x1013, 0x3032, 0x4044, 0x8088, | |
1828 | 0x106b, 0x30d6, 0x70fc, 0xe0a8, | |
1829 | 0x4857, 0xc4fe, 0x13cc, 0x3288, | |
1830 | 0x1ac5, 0x2f4a, 0x5394, 0xa1e8, | |
1831 | 0x1f39, 0x251e, 0xbd6c, 0x6bd8, | |
1832 | 0x15c1, 0x2a42, 0x89ac, 0x4758, | |
1833 | 0x2b03, 0x1602, 0x4f0c, 0xca08, | |
1834 | 0x1f07, 0x3a0e, 0x6b04, 0xbd08, | |
1835 | 0x8ba7, 0x465e, 0x244c, 0x1cc8, | |
1836 | 0x2b87, 0x164e, 0x642c, 0xdc18, | |
1837 | 0x40b9, 0x80de, 0x1094, 0x20e8, | |
1838 | 0x27db, 0x1eb6, 0x9dac, 0x7b58, | |
1839 | 0x11c1, 0x2242, 0x84ac, 0x4c58, | |
1840 | 0x1be5, 0x2d7a, 0x5e34, 0xa718, | |
1841 | 0x4b39, 0x8d1e, 0x14b4, 0x28d8, | |
1842 | 0x4c97, 0xc87e, 0x11fc, 0x33a8, | |
1843 | 0x8e97, 0x497e, 0x2ffc, 0x1aa8, | |
1844 | 0x16b3, 0x3d62, 0x4f34, 0x8518, | |
1845 | 0x1e2f, 0x391a, 0x5cac, 0xf858, | |
1846 | 0x1d9f, 0x3b7a, 0x572c, 0xfe18, | |
1847 | 0x15f5, 0x2a5a, 0x5264, 0xa3b8, | |
1848 | 0x1dbb, 0x3b66, 0x715c, 0xe3f8, | |
1849 | 0x4397, 0xc27e, 0x17fc, 0x3ea8, | |
1850 | 0x1617, 0x3d3e, 0x6464, 0xb8b8, | |
1851 | 0x23ff, 0x12aa, 0xab6c, 0x56d8, | |
1852 | 0x2dfb, 0x1ba6, 0x913c, 0x7328, | |
1853 | 0x185d, 0x2ca6, 0x7914, 0x9e28, | |
1854 | 0x171b, 0x3e36, 0x7d7c, 0xebe8, | |
1855 | 0x4199, 0x82ee, 0x19f4, 0x2e58, | |
1856 | 0x4807, 0xc40e, 0x130c, 0x3208, | |
1857 | 0x1905, 0x2e0a, 0x5804, 0xac08, | |
1858 | 0x213f, 0x132a, 0xadfc, 0x5ba8, | |
1859 | 0x19a9, 0x2efe, 0xb5cc, 0x6f88, | |
b1289d6f DT |
1860 | }; |
1861 | ||
bfc04aec BP |
1862 | static u16 x8_vectors[] = { |
1863 | 0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480, | |
1864 | 0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80, | |
1865 | 0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80, | |
1866 | 0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80, | |
1867 | 0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780, | |
1868 | 0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080, | |
1869 | 0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080, | |
1870 | 0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080, | |
1871 | 0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80, | |
1872 | 0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580, | |
1873 | 0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880, | |
1874 | 0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280, | |
1875 | 0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180, | |
1876 | 0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580, | |
1877 | 0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280, | |
1878 | 0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180, | |
1879 | 0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080, | |
1880 | 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080, | |
1881 | 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000, | |
1882 | }; | |
1883 | ||
1884 | static int decode_syndrome(u16 syndrome, u16 *vectors, int num_vecs, | |
ad6a32e9 | 1885 | int v_dim) |
b1289d6f | 1886 | { |
bfc04aec BP |
1887 | unsigned int i, err_sym; |
1888 | ||
1889 | for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) { | |
1890 | u16 s = syndrome; | |
1891 | int v_idx = err_sym * v_dim; | |
1892 | int v_end = (err_sym + 1) * v_dim; | |
1893 | ||
1894 | /* walk over all 16 bits of the syndrome */ | |
1895 | for (i = 1; i < (1U << 16); i <<= 1) { | |
1896 | ||
1897 | /* if bit is set in that eigenvector... */ | |
1898 | if (v_idx < v_end && vectors[v_idx] & i) { | |
1899 | u16 ev_comp = vectors[v_idx++]; | |
1900 | ||
1901 | /* ... and bit set in the modified syndrome, */ | |
1902 | if (s & i) { | |
1903 | /* remove it. */ | |
1904 | s ^= ev_comp; | |
4d37607a | 1905 | |
bfc04aec BP |
1906 | if (!s) |
1907 | return err_sym; | |
1908 | } | |
b1289d6f | 1909 | |
bfc04aec BP |
1910 | } else if (s & i) |
1911 | /* can't get to zero, move to next symbol */ | |
1912 | break; | |
1913 | } | |
b1289d6f DT |
1914 | } |
1915 | ||
1916 | debugf0("syndrome(%x) not found\n", syndrome); | |
1917 | return -1; | |
1918 | } | |
d27bf6fa | 1919 | |
bfc04aec BP |
1920 | static int map_err_sym_to_channel(int err_sym, int sym_size) |
1921 | { | |
1922 | if (sym_size == 4) | |
1923 | switch (err_sym) { | |
1924 | case 0x20: | |
1925 | case 0x21: | |
1926 | return 0; | |
1927 | break; | |
1928 | case 0x22: | |
1929 | case 0x23: | |
1930 | return 1; | |
1931 | break; | |
1932 | default: | |
1933 | return err_sym >> 4; | |
1934 | break; | |
1935 | } | |
1936 | /* x8 symbols */ | |
1937 | else | |
1938 | switch (err_sym) { | |
1939 | /* imaginary bits not in a DIMM */ | |
1940 | case 0x10: | |
1941 | WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n", | |
1942 | err_sym); | |
1943 | return -1; | |
1944 | break; | |
1945 | ||
1946 | case 0x11: | |
1947 | return 0; | |
1948 | break; | |
1949 | case 0x12: | |
1950 | return 1; | |
1951 | break; | |
1952 | default: | |
1953 | return err_sym >> 3; | |
1954 | break; | |
1955 | } | |
1956 | return -1; | |
1957 | } | |
1958 | ||
1959 | static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome) | |
1960 | { | |
1961 | struct amd64_pvt *pvt = mci->pvt_info; | |
ad6a32e9 BP |
1962 | int err_sym = -1; |
1963 | ||
1964 | if (pvt->syn_type == 8) | |
1965 | err_sym = decode_syndrome(syndrome, x8_vectors, | |
1966 | ARRAY_SIZE(x8_vectors), | |
1967 | pvt->syn_type); | |
1968 | else if (pvt->syn_type == 4) | |
1969 | err_sym = decode_syndrome(syndrome, x4_vectors, | |
1970 | ARRAY_SIZE(x4_vectors), | |
1971 | pvt->syn_type); | |
1972 | else { | |
1973 | amd64_printk(KERN_WARNING, "%s: Illegal syndrome type: %u\n", | |
1974 | __func__, pvt->syn_type); | |
1975 | return err_sym; | |
bfc04aec | 1976 | } |
ad6a32e9 BP |
1977 | |
1978 | return map_err_sym_to_channel(err_sym, pvt->syn_type); | |
bfc04aec BP |
1979 | } |
1980 | ||
d27bf6fa DT |
1981 | /* |
1982 | * Handle any Correctable Errors (CEs) that have occurred. Check for valid ERROR | |
1983 | * ADDRESS and process. | |
1984 | */ | |
1985 | static void amd64_handle_ce(struct mem_ctl_info *mci, | |
ef44cc4c | 1986 | struct err_regs *info) |
d27bf6fa DT |
1987 | { |
1988 | struct amd64_pvt *pvt = mci->pvt_info; | |
44e9e2ee | 1989 | u64 sys_addr; |
d27bf6fa DT |
1990 | |
1991 | /* Ensure that the Error Address is VALID */ | |
1992 | if ((info->nbsh & K8_NBSH_VALID_ERROR_ADDR) == 0) { | |
1993 | amd64_mc_printk(mci, KERN_ERR, | |
1994 | "HW has no ERROR_ADDRESS available\n"); | |
1995 | edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); | |
1996 | return; | |
1997 | } | |
1998 | ||
1f6bcee7 | 1999 | sys_addr = pvt->ops->get_error_address(mci, info); |
d27bf6fa DT |
2000 | |
2001 | amd64_mc_printk(mci, KERN_ERR, | |
44e9e2ee | 2002 | "CE ERROR_ADDRESS= 0x%llx\n", sys_addr); |
d27bf6fa | 2003 | |
44e9e2ee | 2004 | pvt->ops->map_sysaddr_to_csrow(mci, info, sys_addr); |
d27bf6fa DT |
2005 | } |
2006 | ||
2007 | /* Handle any Un-correctable Errors (UEs) */ | |
2008 | static void amd64_handle_ue(struct mem_ctl_info *mci, | |
ef44cc4c | 2009 | struct err_regs *info) |
d27bf6fa | 2010 | { |
1f6bcee7 BP |
2011 | struct amd64_pvt *pvt = mci->pvt_info; |
2012 | struct mem_ctl_info *log_mci, *src_mci = NULL; | |
d27bf6fa | 2013 | int csrow; |
44e9e2ee | 2014 | u64 sys_addr; |
d27bf6fa | 2015 | u32 page, offset; |
d27bf6fa DT |
2016 | |
2017 | log_mci = mci; | |
2018 | ||
2019 | if ((info->nbsh & K8_NBSH_VALID_ERROR_ADDR) == 0) { | |
2020 | amd64_mc_printk(mci, KERN_CRIT, | |
2021 | "HW has no ERROR_ADDRESS available\n"); | |
2022 | edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR); | |
2023 | return; | |
2024 | } | |
2025 | ||
1f6bcee7 | 2026 | sys_addr = pvt->ops->get_error_address(mci, info); |
d27bf6fa DT |
2027 | |
2028 | /* | |
2029 | * Find out which node the error address belongs to. This may be | |
2030 | * different from the node that detected the error. | |
2031 | */ | |
44e9e2ee | 2032 | src_mci = find_mc_by_sys_addr(mci, sys_addr); |
d27bf6fa DT |
2033 | if (!src_mci) { |
2034 | amd64_mc_printk(mci, KERN_CRIT, | |
2035 | "ERROR ADDRESS (0x%lx) value NOT mapped to a MC\n", | |
44e9e2ee | 2036 | (unsigned long)sys_addr); |
d27bf6fa DT |
2037 | edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR); |
2038 | return; | |
2039 | } | |
2040 | ||
2041 | log_mci = src_mci; | |
2042 | ||
44e9e2ee | 2043 | csrow = sys_addr_to_csrow(log_mci, sys_addr); |
d27bf6fa DT |
2044 | if (csrow < 0) { |
2045 | amd64_mc_printk(mci, KERN_CRIT, | |
2046 | "ERROR_ADDRESS (0x%lx) value NOT mapped to 'csrow'\n", | |
44e9e2ee | 2047 | (unsigned long)sys_addr); |
d27bf6fa DT |
2048 | edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR); |
2049 | } else { | |
44e9e2ee | 2050 | error_address_to_page_and_offset(sys_addr, &page, &offset); |
d27bf6fa DT |
2051 | edac_mc_handle_ue(log_mci, page, offset, csrow, EDAC_MOD_STR); |
2052 | } | |
2053 | } | |
2054 | ||
549d042d | 2055 | static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci, |
b69b29de | 2056 | struct err_regs *info) |
d27bf6fa | 2057 | { |
b70ef010 BP |
2058 | u32 ec = ERROR_CODE(info->nbsl); |
2059 | u32 xec = EXT_ERROR_CODE(info->nbsl); | |
17adea01 | 2060 | int ecc_type = (info->nbsh >> 13) & 0x3; |
d27bf6fa | 2061 | |
b70ef010 BP |
2062 | /* Bail early out if this was an 'observed' error */ |
2063 | if (PP(ec) == K8_NBSL_PP_OBS) | |
2064 | return; | |
d27bf6fa | 2065 | |
ecaf5606 BP |
2066 | /* Do only ECC errors */ |
2067 | if (xec && xec != F10_NBSL_EXT_ERR_ECC) | |
d27bf6fa | 2068 | return; |
d27bf6fa | 2069 | |
ecaf5606 | 2070 | if (ecc_type == 2) |
d27bf6fa | 2071 | amd64_handle_ce(mci, info); |
ecaf5606 | 2072 | else if (ecc_type == 1) |
d27bf6fa | 2073 | amd64_handle_ue(mci, info); |
d27bf6fa DT |
2074 | } |
2075 | ||
b69b29de | 2076 | void amd64_decode_bus_error(int node_id, struct err_regs *regs) |
d27bf6fa | 2077 | { |
549d042d | 2078 | struct mem_ctl_info *mci = mci_lookup[node_id]; |
d27bf6fa | 2079 | |
b69b29de | 2080 | __amd64_decode_bus_error(mci, regs); |
d27bf6fa | 2081 | |
d27bf6fa DT |
2082 | /* |
2083 | * Check the UE bit of the NB status high register, if set generate some | |
2084 | * logs. If NOT a GART error, then process the event as a NO-INFO event. | |
2085 | * If it was a GART error, skip that process. | |
549d042d BP |
2086 | * |
2087 | * FIXME: this should go somewhere else, if at all. | |
d27bf6fa | 2088 | */ |
5110dbde BP |
2089 | if (regs->nbsh & K8_NBSH_UC_ERR && !report_gart_errors) |
2090 | edac_mc_handle_ue_no_info(mci, "UE bit is set"); | |
549d042d | 2091 | |
d27bf6fa | 2092 | } |
d27bf6fa | 2093 | |
0ec449ee DT |
2094 | /* |
2095 | * Input: | |
2096 | * 1) struct amd64_pvt which contains pvt->dram_f2_ctl pointer | |
2097 | * 2) AMD Family index value | |
2098 | * | |
2099 | * Ouput: | |
2100 | * Upon return of 0, the following filled in: | |
2101 | * | |
2102 | * struct pvt->addr_f1_ctl | |
2103 | * struct pvt->misc_f3_ctl | |
2104 | * | |
2105 | * Filled in with related device funcitions of 'dram_f2_ctl' | |
2106 | * These devices are "reserved" via the pci_get_device() | |
2107 | * | |
2108 | * Upon return of 1 (error status): | |
2109 | * | |
2110 | * Nothing reserved | |
2111 | */ | |
2112 | static int amd64_reserve_mc_sibling_devices(struct amd64_pvt *pvt, int mc_idx) | |
2113 | { | |
2114 | const struct amd64_family_type *amd64_dev = &amd64_family_types[mc_idx]; | |
2115 | ||
2116 | /* Reserve the ADDRESS MAP Device */ | |
2117 | pvt->addr_f1_ctl = pci_get_related_function(pvt->dram_f2_ctl->vendor, | |
2118 | amd64_dev->addr_f1_ctl, | |
2119 | pvt->dram_f2_ctl); | |
2120 | ||
2121 | if (!pvt->addr_f1_ctl) { | |
2122 | amd64_printk(KERN_ERR, "error address map device not found: " | |
2123 | "vendor %x device 0x%x (broken BIOS?)\n", | |
2124 | PCI_VENDOR_ID_AMD, amd64_dev->addr_f1_ctl); | |
2125 | return 1; | |
2126 | } | |
2127 | ||
2128 | /* Reserve the MISC Device */ | |
2129 | pvt->misc_f3_ctl = pci_get_related_function(pvt->dram_f2_ctl->vendor, | |
2130 | amd64_dev->misc_f3_ctl, | |
2131 | pvt->dram_f2_ctl); | |
2132 | ||
2133 | if (!pvt->misc_f3_ctl) { | |
2134 | pci_dev_put(pvt->addr_f1_ctl); | |
2135 | pvt->addr_f1_ctl = NULL; | |
2136 | ||
2137 | amd64_printk(KERN_ERR, "error miscellaneous device not found: " | |
2138 | "vendor %x device 0x%x (broken BIOS?)\n", | |
2139 | PCI_VENDOR_ID_AMD, amd64_dev->misc_f3_ctl); | |
2140 | return 1; | |
2141 | } | |
2142 | ||
2143 | debugf1(" Addr Map device PCI Bus ID:\t%s\n", | |
2144 | pci_name(pvt->addr_f1_ctl)); | |
2145 | debugf1(" DRAM MEM-CTL PCI Bus ID:\t%s\n", | |
2146 | pci_name(pvt->dram_f2_ctl)); | |
2147 | debugf1(" Misc device PCI Bus ID:\t%s\n", | |
2148 | pci_name(pvt->misc_f3_ctl)); | |
2149 | ||
2150 | return 0; | |
2151 | } | |
2152 | ||
2153 | static void amd64_free_mc_sibling_devices(struct amd64_pvt *pvt) | |
2154 | { | |
2155 | pci_dev_put(pvt->addr_f1_ctl); | |
2156 | pci_dev_put(pvt->misc_f3_ctl); | |
2157 | } | |
2158 | ||
2159 | /* | |
2160 | * Retrieve the hardware registers of the memory controller (this includes the | |
2161 | * 'Address Map' and 'Misc' device regs) | |
2162 | */ | |
2163 | static void amd64_read_mc_registers(struct amd64_pvt *pvt) | |
2164 | { | |
2165 | u64 msr_val; | |
ad6a32e9 | 2166 | u32 tmp; |
6ba5dcdc | 2167 | int dram; |
0ec449ee DT |
2168 | |
2169 | /* | |
2170 | * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since | |
2171 | * those are Read-As-Zero | |
2172 | */ | |
e97f8bb8 BP |
2173 | rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem); |
2174 | debugf0(" TOP_MEM: 0x%016llx\n", pvt->top_mem); | |
0ec449ee DT |
2175 | |
2176 | /* check first whether TOP_MEM2 is enabled */ | |
2177 | rdmsrl(MSR_K8_SYSCFG, msr_val); | |
2178 | if (msr_val & (1U << 21)) { | |
e97f8bb8 BP |
2179 | rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2); |
2180 | debugf0(" TOP_MEM2: 0x%016llx\n", pvt->top_mem2); | |
0ec449ee DT |
2181 | } else |
2182 | debugf0(" TOP_MEM2 disabled.\n"); | |
2183 | ||
2184 | amd64_cpu_display_info(pvt); | |
2185 | ||
6ba5dcdc | 2186 | amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCAP, &pvt->nbcap); |
0ec449ee DT |
2187 | |
2188 | if (pvt->ops->read_dram_ctl_register) | |
2189 | pvt->ops->read_dram_ctl_register(pvt); | |
2190 | ||
2191 | for (dram = 0; dram < DRAM_REG_COUNT; dram++) { | |
2192 | /* | |
2193 | * Call CPU specific READ function to get the DRAM Base and | |
2194 | * Limit values from the DCT. | |
2195 | */ | |
2196 | pvt->ops->read_dram_base_limit(pvt, dram); | |
2197 | ||
2198 | /* | |
2199 | * Only print out debug info on rows with both R and W Enabled. | |
2200 | * Normal processing, compiler should optimize this whole 'if' | |
2201 | * debug output block away. | |
2202 | */ | |
2203 | if (pvt->dram_rw_en[dram] != 0) { | |
e97f8bb8 BP |
2204 | debugf1(" DRAM-BASE[%d]: 0x%016llx " |
2205 | "DRAM-LIMIT: 0x%016llx\n", | |
0ec449ee | 2206 | dram, |
e97f8bb8 BP |
2207 | pvt->dram_base[dram], |
2208 | pvt->dram_limit[dram]); | |
2209 | ||
0ec449ee DT |
2210 | debugf1(" IntlvEn=%s %s %s " |
2211 | "IntlvSel=%d DstNode=%d\n", | |
2212 | pvt->dram_IntlvEn[dram] ? | |
2213 | "Enabled" : "Disabled", | |
2214 | (pvt->dram_rw_en[dram] & 0x2) ? "W" : "!W", | |
2215 | (pvt->dram_rw_en[dram] & 0x1) ? "R" : "!R", | |
2216 | pvt->dram_IntlvSel[dram], | |
2217 | pvt->dram_DstNode[dram]); | |
2218 | } | |
2219 | } | |
2220 | ||
2221 | amd64_read_dct_base_mask(pvt); | |
2222 | ||
6ba5dcdc | 2223 | amd64_read_pci_cfg(pvt->addr_f1_ctl, K8_DHAR, &pvt->dhar); |
0ec449ee DT |
2224 | amd64_read_dbam_reg(pvt); |
2225 | ||
6ba5dcdc BP |
2226 | amd64_read_pci_cfg(pvt->misc_f3_ctl, |
2227 | F10_ONLINE_SPARE, &pvt->online_spare); | |
0ec449ee | 2228 | |
6ba5dcdc BP |
2229 | amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0); |
2230 | amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCHR_0, &pvt->dchr0); | |
0ec449ee | 2231 | |
ad6a32e9 BP |
2232 | if (boot_cpu_data.x86 >= 0x10) { |
2233 | if (!dct_ganging_enabled(pvt)) { | |
2234 | amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_1, &pvt->dclr1); | |
2235 | amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCHR_1, &pvt->dchr1); | |
2236 | } | |
2237 | amd64_read_pci_cfg(pvt->misc_f3_ctl, EXT_NB_MCA_CFG, &tmp); | |
0ec449ee | 2238 | } |
ad6a32e9 BP |
2239 | |
2240 | if (boot_cpu_data.x86 == 0x10 && | |
2241 | boot_cpu_data.x86_model > 7 && | |
2242 | /* F3x180[EccSymbolSize]=1 => x8 symbols */ | |
2243 | tmp & BIT(25)) | |
2244 | pvt->syn_type = 8; | |
2245 | else | |
2246 | pvt->syn_type = 4; | |
2247 | ||
0ec449ee | 2248 | amd64_dump_misc_regs(pvt); |
0ec449ee DT |
2249 | } |
2250 | ||
2251 | /* | |
2252 | * NOTE: CPU Revision Dependent code | |
2253 | * | |
2254 | * Input: | |
9d858bb1 | 2255 | * @csrow_nr ChipSelect Row Number (0..pvt->cs_count-1) |
0ec449ee DT |
2256 | * k8 private pointer to --> |
2257 | * DRAM Bank Address mapping register | |
2258 | * node_id | |
2259 | * DCL register where dual_channel_active is | |
2260 | * | |
2261 | * The DBAM register consists of 4 sets of 4 bits each definitions: | |
2262 | * | |
2263 | * Bits: CSROWs | |
2264 | * 0-3 CSROWs 0 and 1 | |
2265 | * 4-7 CSROWs 2 and 3 | |
2266 | * 8-11 CSROWs 4 and 5 | |
2267 | * 12-15 CSROWs 6 and 7 | |
2268 | * | |
2269 | * Values range from: 0 to 15 | |
2270 | * The meaning of the values depends on CPU revision and dual-channel state, | |
2271 | * see relevant BKDG more info. | |
2272 | * | |
2273 | * The memory controller provides for total of only 8 CSROWs in its current | |
2274 | * architecture. Each "pair" of CSROWs normally represents just one DIMM in | |
2275 | * single channel or two (2) DIMMs in dual channel mode. | |
2276 | * | |
2277 | * The following code logic collapses the various tables for CSROW based on CPU | |
2278 | * revision. | |
2279 | * | |
2280 | * Returns: | |
2281 | * The number of PAGE_SIZE pages on the specified CSROW number it | |
2282 | * encompasses | |
2283 | * | |
2284 | */ | |
2285 | static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt) | |
2286 | { | |
1433eb99 | 2287 | u32 cs_mode, nr_pages; |
0ec449ee DT |
2288 | |
2289 | /* | |
2290 | * The math on this doesn't look right on the surface because x/2*4 can | |
2291 | * be simplified to x*2 but this expression makes use of the fact that | |
2292 | * it is integral math where 1/2=0. This intermediate value becomes the | |
2293 | * number of bits to shift the DBAM register to extract the proper CSROW | |
2294 | * field. | |
2295 | */ | |
1433eb99 | 2296 | cs_mode = (pvt->dbam0 >> ((csrow_nr / 2) * 4)) & 0xF; |
0ec449ee | 2297 | |
1433eb99 | 2298 | nr_pages = pvt->ops->dbam_to_cs(pvt, cs_mode) << (20 - PAGE_SHIFT); |
0ec449ee DT |
2299 | |
2300 | /* | |
2301 | * If dual channel then double the memory size of single channel. | |
2302 | * Channel count is 1 or 2 | |
2303 | */ | |
2304 | nr_pages <<= (pvt->channel_count - 1); | |
2305 | ||
1433eb99 | 2306 | debugf0(" (csrow=%d) DBAM map index= %d\n", csrow_nr, cs_mode); |
0ec449ee DT |
2307 | debugf0(" nr_pages= %u channel-count = %d\n", |
2308 | nr_pages, pvt->channel_count); | |
2309 | ||
2310 | return nr_pages; | |
2311 | } | |
2312 | ||
2313 | /* | |
2314 | * Initialize the array of csrow attribute instances, based on the values | |
2315 | * from pci config hardware registers. | |
2316 | */ | |
2317 | static int amd64_init_csrows(struct mem_ctl_info *mci) | |
2318 | { | |
2319 | struct csrow_info *csrow; | |
2320 | struct amd64_pvt *pvt; | |
2321 | u64 input_addr_min, input_addr_max, sys_addr; | |
6ba5dcdc | 2322 | int i, empty = 1; |
0ec449ee DT |
2323 | |
2324 | pvt = mci->pvt_info; | |
2325 | ||
6ba5dcdc | 2326 | amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &pvt->nbcfg); |
0ec449ee DT |
2327 | |
2328 | debugf0("NBCFG= 0x%x CHIPKILL= %s DRAM ECC= %s\n", pvt->nbcfg, | |
2329 | (pvt->nbcfg & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled", | |
2330 | (pvt->nbcfg & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled" | |
2331 | ); | |
2332 | ||
9d858bb1 | 2333 | for (i = 0; i < pvt->cs_count; i++) { |
0ec449ee DT |
2334 | csrow = &mci->csrows[i]; |
2335 | ||
2336 | if ((pvt->dcsb0[i] & K8_DCSB_CS_ENABLE) == 0) { | |
2337 | debugf1("----CSROW %d EMPTY for node %d\n", i, | |
2338 | pvt->mc_node_id); | |
2339 | continue; | |
2340 | } | |
2341 | ||
2342 | debugf1("----CSROW %d VALID for MC node %d\n", | |
2343 | i, pvt->mc_node_id); | |
2344 | ||
2345 | empty = 0; | |
2346 | csrow->nr_pages = amd64_csrow_nr_pages(i, pvt); | |
2347 | find_csrow_limits(mci, i, &input_addr_min, &input_addr_max); | |
2348 | sys_addr = input_addr_to_sys_addr(mci, input_addr_min); | |
2349 | csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT); | |
2350 | sys_addr = input_addr_to_sys_addr(mci, input_addr_max); | |
2351 | csrow->last_page = (u32) (sys_addr >> PAGE_SHIFT); | |
2352 | csrow->page_mask = ~mask_from_dct_mask(pvt, i); | |
2353 | /* 8 bytes of resolution */ | |
2354 | ||
2355 | csrow->mtype = amd64_determine_memory_type(pvt); | |
2356 | ||
2357 | debugf1(" for MC node %d csrow %d:\n", pvt->mc_node_id, i); | |
2358 | debugf1(" input_addr_min: 0x%lx input_addr_max: 0x%lx\n", | |
2359 | (unsigned long)input_addr_min, | |
2360 | (unsigned long)input_addr_max); | |
2361 | debugf1(" sys_addr: 0x%lx page_mask: 0x%lx\n", | |
2362 | (unsigned long)sys_addr, csrow->page_mask); | |
2363 | debugf1(" nr_pages: %u first_page: 0x%lx " | |
2364 | "last_page: 0x%lx\n", | |
2365 | (unsigned)csrow->nr_pages, | |
2366 | csrow->first_page, csrow->last_page); | |
2367 | ||
2368 | /* | |
2369 | * determine whether CHIPKILL or JUST ECC or NO ECC is operating | |
2370 | */ | |
2371 | if (pvt->nbcfg & K8_NBCFG_ECC_ENABLE) | |
2372 | csrow->edac_mode = | |
2373 | (pvt->nbcfg & K8_NBCFG_CHIPKILL) ? | |
2374 | EDAC_S4ECD4ED : EDAC_SECDED; | |
2375 | else | |
2376 | csrow->edac_mode = EDAC_NONE; | |
2377 | } | |
2378 | ||
2379 | return empty; | |
2380 | } | |
d27bf6fa | 2381 | |
f6d6ae96 BP |
2382 | /* get all cores on this DCT */ |
2383 | static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid) | |
2384 | { | |
2385 | int cpu; | |
2386 | ||
2387 | for_each_online_cpu(cpu) | |
2388 | if (amd_get_nb_id(cpu) == nid) | |
2389 | cpumask_set_cpu(cpu, mask); | |
2390 | } | |
2391 | ||
2392 | /* check MCG_CTL on all the cpus on this node */ | |
2393 | static bool amd64_nb_mce_bank_enabled_on_node(int nid) | |
2394 | { | |
2395 | cpumask_var_t mask; | |
50542251 | 2396 | int cpu, nbe; |
f6d6ae96 BP |
2397 | bool ret = false; |
2398 | ||
2399 | if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) { | |
2400 | amd64_printk(KERN_WARNING, "%s: error allocating mask\n", | |
2401 | __func__); | |
2402 | return false; | |
2403 | } | |
2404 | ||
2405 | get_cpus_on_this_dct_cpumask(mask, nid); | |
2406 | ||
f6d6ae96 BP |
2407 | rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs); |
2408 | ||
2409 | for_each_cpu(cpu, mask) { | |
50542251 BP |
2410 | struct msr *reg = per_cpu_ptr(msrs, cpu); |
2411 | nbe = reg->l & K8_MSR_MCGCTL_NBE; | |
f6d6ae96 BP |
2412 | |
2413 | debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n", | |
50542251 | 2414 | cpu, reg->q, |
f6d6ae96 BP |
2415 | (nbe ? "enabled" : "disabled")); |
2416 | ||
2417 | if (!nbe) | |
2418 | goto out; | |
f6d6ae96 BP |
2419 | } |
2420 | ret = true; | |
2421 | ||
2422 | out: | |
f6d6ae96 BP |
2423 | free_cpumask_var(mask); |
2424 | return ret; | |
2425 | } | |
2426 | ||
2427 | static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on) | |
2428 | { | |
2429 | cpumask_var_t cmask; | |
50542251 | 2430 | int cpu; |
f6d6ae96 BP |
2431 | |
2432 | if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) { | |
2433 | amd64_printk(KERN_WARNING, "%s: error allocating mask\n", | |
2434 | __func__); | |
2435 | return false; | |
2436 | } | |
2437 | ||
2438 | get_cpus_on_this_dct_cpumask(cmask, pvt->mc_node_id); | |
2439 | ||
f6d6ae96 BP |
2440 | rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs); |
2441 | ||
2442 | for_each_cpu(cpu, cmask) { | |
2443 | ||
50542251 BP |
2444 | struct msr *reg = per_cpu_ptr(msrs, cpu); |
2445 | ||
f6d6ae96 | 2446 | if (on) { |
50542251 | 2447 | if (reg->l & K8_MSR_MCGCTL_NBE) |
d95cf4de | 2448 | pvt->flags.nb_mce_enable = 1; |
f6d6ae96 | 2449 | |
50542251 | 2450 | reg->l |= K8_MSR_MCGCTL_NBE; |
f6d6ae96 BP |
2451 | } else { |
2452 | /* | |
d95cf4de | 2453 | * Turn off NB MCE reporting only when it was off before |
f6d6ae96 | 2454 | */ |
d95cf4de | 2455 | if (!pvt->flags.nb_mce_enable) |
50542251 | 2456 | reg->l &= ~K8_MSR_MCGCTL_NBE; |
f6d6ae96 | 2457 | } |
f6d6ae96 BP |
2458 | } |
2459 | wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs); | |
2460 | ||
f6d6ae96 BP |
2461 | free_cpumask_var(cmask); |
2462 | ||
2463 | return 0; | |
2464 | } | |
2465 | ||
f9431992 DT |
2466 | static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci) |
2467 | { | |
2468 | struct amd64_pvt *pvt = mci->pvt_info; | |
f6d6ae96 | 2469 | u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn; |
f9431992 | 2470 | |
6ba5dcdc | 2471 | amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCTL, &value); |
f9431992 DT |
2472 | |
2473 | /* turn on UECCn and CECCEn bits */ | |
2474 | pvt->old_nbctl = value & mask; | |
2475 | pvt->nbctl_mcgctl_saved = 1; | |
2476 | ||
2477 | value |= mask; | |
2478 | pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value); | |
2479 | ||
f6d6ae96 BP |
2480 | if (amd64_toggle_ecc_err_reporting(pvt, ON)) |
2481 | amd64_printk(KERN_WARNING, "Error enabling ECC reporting over " | |
2482 | "MCGCTL!\n"); | |
f9431992 | 2483 | |
6ba5dcdc | 2484 | amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value); |
f9431992 DT |
2485 | |
2486 | debugf0("NBCFG(1)= 0x%x CHIPKILL= %s ECC_ENABLE= %s\n", value, | |
2487 | (value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled", | |
2488 | (value & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled"); | |
2489 | ||
2490 | if (!(value & K8_NBCFG_ECC_ENABLE)) { | |
2491 | amd64_printk(KERN_WARNING, | |
2492 | "This node reports that DRAM ECC is " | |
2493 | "currently Disabled; ENABLING now\n"); | |
2494 | ||
d95cf4de BP |
2495 | pvt->flags.nb_ecc_prev = 0; |
2496 | ||
f9431992 DT |
2497 | /* Attempt to turn on DRAM ECC Enable */ |
2498 | value |= K8_NBCFG_ECC_ENABLE; | |
2499 | pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCFG, value); | |
2500 | ||
6ba5dcdc | 2501 | amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value); |
f9431992 DT |
2502 | |
2503 | if (!(value & K8_NBCFG_ECC_ENABLE)) { | |
2504 | amd64_printk(KERN_WARNING, | |
2505 | "Hardware rejects Enabling DRAM ECC checking\n" | |
2506 | "Check memory DIMM configuration\n"); | |
2507 | } else { | |
2508 | amd64_printk(KERN_DEBUG, | |
2509 | "Hardware accepted DRAM ECC Enable\n"); | |
2510 | } | |
d95cf4de BP |
2511 | } else { |
2512 | pvt->flags.nb_ecc_prev = 1; | |
f9431992 | 2513 | } |
d95cf4de | 2514 | |
f9431992 DT |
2515 | debugf0("NBCFG(2)= 0x%x CHIPKILL= %s ECC_ENABLE= %s\n", value, |
2516 | (value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled", | |
2517 | (value & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled"); | |
2518 | ||
2519 | pvt->ctl_error_info.nbcfg = value; | |
2520 | } | |
2521 | ||
2522 | static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt) | |
2523 | { | |
f6d6ae96 | 2524 | u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn; |
f9431992 DT |
2525 | |
2526 | if (!pvt->nbctl_mcgctl_saved) | |
2527 | return; | |
2528 | ||
6ba5dcdc | 2529 | amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCTL, &value); |
f9431992 DT |
2530 | value &= ~mask; |
2531 | value |= pvt->old_nbctl; | |
2532 | ||
f9431992 DT |
2533 | pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value); |
2534 | ||
d95cf4de BP |
2535 | /* restore previous BIOS DRAM ECC "off" setting which we force-enabled */ |
2536 | if (!pvt->flags.nb_ecc_prev) { | |
2537 | amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value); | |
2538 | value &= ~K8_NBCFG_ECC_ENABLE; | |
2539 | pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCFG, value); | |
2540 | } | |
2541 | ||
2542 | /* restore the NB Enable MCGCTL bit */ | |
f6d6ae96 | 2543 | if (amd64_toggle_ecc_err_reporting(pvt, OFF)) |
d95cf4de | 2544 | amd64_printk(KERN_WARNING, "Error restoring NB MCGCTL settings!\n"); |
f9431992 DT |
2545 | } |
2546 | ||
2547 | /* | |
2548 | * EDAC requires that the BIOS have ECC enabled before taking over the | |
2549 | * processing of ECC errors. This is because the BIOS can properly initialize | |
2550 | * the memory system completely. A command line option allows to force-enable | |
2551 | * hardware ECC later in amd64_enable_ecc_error_reporting(). | |
2552 | */ | |
cab4d277 BP |
2553 | static const char *ecc_msg = |
2554 | "ECC disabled in the BIOS or no ECC capability, module will not load.\n" | |
2555 | " Either enable ECC checking or force module loading by setting " | |
2556 | "'ecc_enable_override'.\n" | |
2557 | " (Note that use of the override may cause unknown side effects.)\n"; | |
be3468e8 | 2558 | |
f9431992 DT |
2559 | static int amd64_check_ecc_enabled(struct amd64_pvt *pvt) |
2560 | { | |
2561 | u32 value; | |
06724535 BP |
2562 | u8 ecc_enabled = 0; |
2563 | bool nb_mce_en = false; | |
f9431992 | 2564 | |
6ba5dcdc | 2565 | amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value); |
f9431992 DT |
2566 | |
2567 | ecc_enabled = !!(value & K8_NBCFG_ECC_ENABLE); | |
be3468e8 | 2568 | if (!ecc_enabled) |
cab4d277 | 2569 | amd64_printk(KERN_NOTICE, "This node reports that Memory ECC " |
be3468e8 BP |
2570 | "is currently disabled, set F3x%x[22] (%s).\n", |
2571 | K8_NBCFG, pci_name(pvt->misc_f3_ctl)); | |
2572 | else | |
2573 | amd64_printk(KERN_INFO, "ECC is enabled by BIOS.\n"); | |
f9431992 | 2574 | |
06724535 BP |
2575 | nb_mce_en = amd64_nb_mce_bank_enabled_on_node(pvt->mc_node_id); |
2576 | if (!nb_mce_en) | |
cab4d277 | 2577 | amd64_printk(KERN_NOTICE, "NB MCE bank disabled, set MSR " |
be3468e8 BP |
2578 | "0x%08x[4] on node %d to enable.\n", |
2579 | MSR_IA32_MCG_CTL, pvt->mc_node_id); | |
f9431992 | 2580 | |
06724535 | 2581 | if (!ecc_enabled || !nb_mce_en) { |
f9431992 | 2582 | if (!ecc_enable_override) { |
cab4d277 | 2583 | amd64_printk(KERN_NOTICE, "%s", ecc_msg); |
be3468e8 | 2584 | return -ENODEV; |
d95cf4de BP |
2585 | } else { |
2586 | amd64_printk(KERN_WARNING, "Forcing ECC checking on!\n"); | |
be3468e8 | 2587 | } |
43f5e687 | 2588 | } |
f9431992 | 2589 | |
be3468e8 | 2590 | return 0; |
f9431992 DT |
2591 | } |
2592 | ||
7d6034d3 DT |
2593 | struct mcidev_sysfs_attribute sysfs_attrs[ARRAY_SIZE(amd64_dbg_attrs) + |
2594 | ARRAY_SIZE(amd64_inj_attrs) + | |
2595 | 1]; | |
2596 | ||
2597 | struct mcidev_sysfs_attribute terminator = { .attr = { .name = NULL } }; | |
2598 | ||
2599 | static void amd64_set_mc_sysfs_attributes(struct mem_ctl_info *mci) | |
2600 | { | |
2601 | unsigned int i = 0, j = 0; | |
2602 | ||
2603 | for (; i < ARRAY_SIZE(amd64_dbg_attrs); i++) | |
2604 | sysfs_attrs[i] = amd64_dbg_attrs[i]; | |
2605 | ||
2606 | for (j = 0; j < ARRAY_SIZE(amd64_inj_attrs); j++, i++) | |
2607 | sysfs_attrs[i] = amd64_inj_attrs[j]; | |
2608 | ||
2609 | sysfs_attrs[i] = terminator; | |
2610 | ||
2611 | mci->mc_driver_sysfs_attributes = sysfs_attrs; | |
2612 | } | |
2613 | ||
2614 | static void amd64_setup_mci_misc_attributes(struct mem_ctl_info *mci) | |
2615 | { | |
2616 | struct amd64_pvt *pvt = mci->pvt_info; | |
2617 | ||
2618 | mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2; | |
2619 | mci->edac_ctl_cap = EDAC_FLAG_NONE; | |
7d6034d3 DT |
2620 | |
2621 | if (pvt->nbcap & K8_NBCAP_SECDED) | |
2622 | mci->edac_ctl_cap |= EDAC_FLAG_SECDED; | |
2623 | ||
2624 | if (pvt->nbcap & K8_NBCAP_CHIPKILL) | |
2625 | mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED; | |
2626 | ||
2627 | mci->edac_cap = amd64_determine_edac_cap(pvt); | |
2628 | mci->mod_name = EDAC_MOD_STR; | |
2629 | mci->mod_ver = EDAC_AMD64_VERSION; | |
2630 | mci->ctl_name = get_amd_family_name(pvt->mc_type_index); | |
2631 | mci->dev_name = pci_name(pvt->dram_f2_ctl); | |
2632 | mci->ctl_page_to_phys = NULL; | |
2633 | ||
7d6034d3 DT |
2634 | /* memory scrubber interface */ |
2635 | mci->set_sdram_scrub_rate = amd64_set_scrub_rate; | |
2636 | mci->get_sdram_scrub_rate = amd64_get_scrub_rate; | |
2637 | } | |
2638 | ||
2639 | /* | |
2640 | * Init stuff for this DRAM Controller device. | |
2641 | * | |
2642 | * Due to a hardware feature on Fam10h CPUs, the Enable Extended Configuration | |
2643 | * Space feature MUST be enabled on ALL Processors prior to actually reading | |
2644 | * from the ECS registers. Since the loading of the module can occur on any | |
2645 | * 'core', and cores don't 'see' all the other processors ECS data when the | |
2646 | * others are NOT enabled. Our solution is to first enable ECS access in this | |
2647 | * routine on all processors, gather some data in a amd64_pvt structure and | |
2648 | * later come back in a finish-setup function to perform that final | |
2649 | * initialization. See also amd64_init_2nd_stage() for that. | |
2650 | */ | |
2651 | static int amd64_probe_one_instance(struct pci_dev *dram_f2_ctl, | |
2652 | int mc_type_index) | |
2653 | { | |
2654 | struct amd64_pvt *pvt = NULL; | |
2655 | int err = 0, ret; | |
2656 | ||
2657 | ret = -ENOMEM; | |
2658 | pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL); | |
2659 | if (!pvt) | |
2660 | goto err_exit; | |
2661 | ||
37da0450 | 2662 | pvt->mc_node_id = get_node_id(dram_f2_ctl); |
7d6034d3 DT |
2663 | |
2664 | pvt->dram_f2_ctl = dram_f2_ctl; | |
2665 | pvt->ext_model = boot_cpu_data.x86_model >> 4; | |
2666 | pvt->mc_type_index = mc_type_index; | |
2667 | pvt->ops = family_ops(mc_type_index); | |
7d6034d3 DT |
2668 | |
2669 | /* | |
2670 | * We have the dram_f2_ctl device as an argument, now go reserve its | |
2671 | * sibling devices from the PCI system. | |
2672 | */ | |
2673 | ret = -ENODEV; | |
2674 | err = amd64_reserve_mc_sibling_devices(pvt, mc_type_index); | |
2675 | if (err) | |
2676 | goto err_free; | |
2677 | ||
2678 | ret = -EINVAL; | |
2679 | err = amd64_check_ecc_enabled(pvt); | |
2680 | if (err) | |
2681 | goto err_put; | |
2682 | ||
2683 | /* | |
2684 | * Key operation here: setup of HW prior to performing ops on it. Some | |
2685 | * setup is required to access ECS data. After this is performed, the | |
2686 | * 'teardown' function must be called upon error and normal exit paths. | |
2687 | */ | |
2688 | if (boot_cpu_data.x86 >= 0x10) | |
2689 | amd64_setup(pvt); | |
2690 | ||
2691 | /* | |
2692 | * Save the pointer to the private data for use in 2nd initialization | |
2693 | * stage | |
2694 | */ | |
2695 | pvt_lookup[pvt->mc_node_id] = pvt; | |
2696 | ||
2697 | return 0; | |
2698 | ||
2699 | err_put: | |
2700 | amd64_free_mc_sibling_devices(pvt); | |
2701 | ||
2702 | err_free: | |
2703 | kfree(pvt); | |
2704 | ||
2705 | err_exit: | |
2706 | return ret; | |
2707 | } | |
2708 | ||
2709 | /* | |
2710 | * This is the finishing stage of the init code. Needs to be performed after all | |
2711 | * MCs' hardware have been prepped for accessing extended config space. | |
2712 | */ | |
2713 | static int amd64_init_2nd_stage(struct amd64_pvt *pvt) | |
2714 | { | |
2715 | int node_id = pvt->mc_node_id; | |
2716 | struct mem_ctl_info *mci; | |
18ba54ac | 2717 | int ret = -ENODEV; |
7d6034d3 DT |
2718 | |
2719 | amd64_read_mc_registers(pvt); | |
2720 | ||
7d6034d3 DT |
2721 | /* |
2722 | * We need to determine how many memory channels there are. Then use | |
2723 | * that information for calculating the size of the dynamic instance | |
2724 | * tables in the 'mci' structure | |
2725 | */ | |
2726 | pvt->channel_count = pvt->ops->early_channel_count(pvt); | |
2727 | if (pvt->channel_count < 0) | |
2728 | goto err_exit; | |
2729 | ||
2730 | ret = -ENOMEM; | |
9d858bb1 | 2731 | mci = edac_mc_alloc(0, pvt->cs_count, pvt->channel_count, node_id); |
7d6034d3 DT |
2732 | if (!mci) |
2733 | goto err_exit; | |
2734 | ||
2735 | mci->pvt_info = pvt; | |
2736 | ||
2737 | mci->dev = &pvt->dram_f2_ctl->dev; | |
2738 | amd64_setup_mci_misc_attributes(mci); | |
2739 | ||
2740 | if (amd64_init_csrows(mci)) | |
2741 | mci->edac_cap = EDAC_FLAG_NONE; | |
2742 | ||
2743 | amd64_enable_ecc_error_reporting(mci); | |
2744 | amd64_set_mc_sysfs_attributes(mci); | |
2745 | ||
2746 | ret = -ENODEV; | |
2747 | if (edac_mc_add_mc(mci)) { | |
2748 | debugf1("failed edac_mc_add_mc()\n"); | |
2749 | goto err_add_mc; | |
2750 | } | |
2751 | ||
2752 | mci_lookup[node_id] = mci; | |
2753 | pvt_lookup[node_id] = NULL; | |
549d042d BP |
2754 | |
2755 | /* register stuff with EDAC MCE */ | |
2756 | if (report_gart_errors) | |
2757 | amd_report_gart_errors(true); | |
2758 | ||
2759 | amd_register_ecc_decoder(amd64_decode_bus_error); | |
2760 | ||
7d6034d3 DT |
2761 | return 0; |
2762 | ||
2763 | err_add_mc: | |
2764 | edac_mc_free(mci); | |
2765 | ||
2766 | err_exit: | |
2767 | debugf0("failure to init 2nd stage: ret=%d\n", ret); | |
2768 | ||
2769 | amd64_restore_ecc_error_reporting(pvt); | |
2770 | ||
2771 | if (boot_cpu_data.x86 > 0xf) | |
2772 | amd64_teardown(pvt); | |
2773 | ||
2774 | amd64_free_mc_sibling_devices(pvt); | |
2775 | ||
2776 | kfree(pvt_lookup[pvt->mc_node_id]); | |
2777 | pvt_lookup[node_id] = NULL; | |
2778 | ||
2779 | return ret; | |
2780 | } | |
2781 | ||
2782 | ||
2783 | static int __devinit amd64_init_one_instance(struct pci_dev *pdev, | |
2784 | const struct pci_device_id *mc_type) | |
2785 | { | |
2786 | int ret = 0; | |
2787 | ||
37da0450 | 2788 | debugf0("(MC node=%d,mc_type='%s')\n", get_node_id(pdev), |
7d6034d3 DT |
2789 | get_amd_family_name(mc_type->driver_data)); |
2790 | ||
2791 | ret = pci_enable_device(pdev); | |
2792 | if (ret < 0) | |
2793 | ret = -EIO; | |
2794 | else | |
2795 | ret = amd64_probe_one_instance(pdev, mc_type->driver_data); | |
2796 | ||
2797 | if (ret < 0) | |
2798 | debugf0("ret=%d\n", ret); | |
2799 | ||
2800 | return ret; | |
2801 | } | |
2802 | ||
2803 | static void __devexit amd64_remove_one_instance(struct pci_dev *pdev) | |
2804 | { | |
2805 | struct mem_ctl_info *mci; | |
2806 | struct amd64_pvt *pvt; | |
2807 | ||
2808 | /* Remove from EDAC CORE tracking list */ | |
2809 | mci = edac_mc_del_mc(&pdev->dev); | |
2810 | if (!mci) | |
2811 | return; | |
2812 | ||
2813 | pvt = mci->pvt_info; | |
2814 | ||
2815 | amd64_restore_ecc_error_reporting(pvt); | |
2816 | ||
2817 | if (boot_cpu_data.x86 > 0xf) | |
2818 | amd64_teardown(pvt); | |
2819 | ||
2820 | amd64_free_mc_sibling_devices(pvt); | |
2821 | ||
549d042d BP |
2822 | /* unregister from EDAC MCE */ |
2823 | amd_report_gart_errors(false); | |
2824 | amd_unregister_ecc_decoder(amd64_decode_bus_error); | |
2825 | ||
7d6034d3 | 2826 | /* Free the EDAC CORE resources */ |
8f68ed97 BP |
2827 | mci->pvt_info = NULL; |
2828 | mci_lookup[pvt->mc_node_id] = NULL; | |
2829 | ||
2830 | kfree(pvt); | |
7d6034d3 DT |
2831 | edac_mc_free(mci); |
2832 | } | |
2833 | ||
2834 | /* | |
2835 | * This table is part of the interface for loading drivers for PCI devices. The | |
2836 | * PCI core identifies what devices are on a system during boot, and then | |
2837 | * inquiry this table to see if this driver is for a given device found. | |
2838 | */ | |
2839 | static const struct pci_device_id amd64_pci_table[] __devinitdata = { | |
2840 | { | |
2841 | .vendor = PCI_VENDOR_ID_AMD, | |
2842 | .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL, | |
2843 | .subvendor = PCI_ANY_ID, | |
2844 | .subdevice = PCI_ANY_ID, | |
2845 | .class = 0, | |
2846 | .class_mask = 0, | |
2847 | .driver_data = K8_CPUS | |
2848 | }, | |
2849 | { | |
2850 | .vendor = PCI_VENDOR_ID_AMD, | |
2851 | .device = PCI_DEVICE_ID_AMD_10H_NB_DRAM, | |
2852 | .subvendor = PCI_ANY_ID, | |
2853 | .subdevice = PCI_ANY_ID, | |
2854 | .class = 0, | |
2855 | .class_mask = 0, | |
2856 | .driver_data = F10_CPUS | |
2857 | }, | |
2858 | { | |
2859 | .vendor = PCI_VENDOR_ID_AMD, | |
2860 | .device = PCI_DEVICE_ID_AMD_11H_NB_DRAM, | |
2861 | .subvendor = PCI_ANY_ID, | |
2862 | .subdevice = PCI_ANY_ID, | |
2863 | .class = 0, | |
2864 | .class_mask = 0, | |
2865 | .driver_data = F11_CPUS | |
2866 | }, | |
2867 | {0, } | |
2868 | }; | |
2869 | MODULE_DEVICE_TABLE(pci, amd64_pci_table); | |
2870 | ||
2871 | static struct pci_driver amd64_pci_driver = { | |
2872 | .name = EDAC_MOD_STR, | |
2873 | .probe = amd64_init_one_instance, | |
2874 | .remove = __devexit_p(amd64_remove_one_instance), | |
2875 | .id_table = amd64_pci_table, | |
2876 | }; | |
2877 | ||
2878 | static void amd64_setup_pci_device(void) | |
2879 | { | |
2880 | struct mem_ctl_info *mci; | |
2881 | struct amd64_pvt *pvt; | |
2882 | ||
2883 | if (amd64_ctl_pci) | |
2884 | return; | |
2885 | ||
2886 | mci = mci_lookup[0]; | |
2887 | if (mci) { | |
2888 | ||
2889 | pvt = mci->pvt_info; | |
2890 | amd64_ctl_pci = | |
2891 | edac_pci_create_generic_ctl(&pvt->dram_f2_ctl->dev, | |
2892 | EDAC_MOD_STR); | |
2893 | ||
2894 | if (!amd64_ctl_pci) { | |
2895 | pr_warning("%s(): Unable to create PCI control\n", | |
2896 | __func__); | |
2897 | ||
2898 | pr_warning("%s(): PCI error report via EDAC not set\n", | |
2899 | __func__); | |
2900 | } | |
2901 | } | |
2902 | } | |
2903 | ||
2904 | static int __init amd64_edac_init(void) | |
2905 | { | |
2906 | int nb, err = -ENODEV; | |
56b34b91 | 2907 | bool load_ok = false; |
7d6034d3 DT |
2908 | |
2909 | edac_printk(KERN_INFO, EDAC_MOD_STR, EDAC_AMD64_VERSION "\n"); | |
2910 | ||
2911 | opstate_init(); | |
2912 | ||
2913 | if (cache_k8_northbridges() < 0) | |
56b34b91 | 2914 | goto err_ret; |
7d6034d3 | 2915 | |
50542251 | 2916 | msrs = msrs_alloc(); |
56b34b91 BP |
2917 | if (!msrs) |
2918 | goto err_ret; | |
50542251 | 2919 | |
7d6034d3 DT |
2920 | err = pci_register_driver(&amd64_pci_driver); |
2921 | if (err) | |
56b34b91 | 2922 | goto err_pci; |
7d6034d3 DT |
2923 | |
2924 | /* | |
2925 | * At this point, the array 'pvt_lookup[]' contains pointers to alloc'd | |
2926 | * amd64_pvt structs. These will be used in the 2nd stage init function | |
2927 | * to finish initialization of the MC instances. | |
2928 | */ | |
56b34b91 | 2929 | err = -ENODEV; |
7d6034d3 DT |
2930 | for (nb = 0; nb < num_k8_northbridges; nb++) { |
2931 | if (!pvt_lookup[nb]) | |
2932 | continue; | |
2933 | ||
2934 | err = amd64_init_2nd_stage(pvt_lookup[nb]); | |
2935 | if (err) | |
37da0450 | 2936 | goto err_2nd_stage; |
7d6034d3 | 2937 | |
56b34b91 BP |
2938 | load_ok = true; |
2939 | } | |
7d6034d3 | 2940 | |
56b34b91 BP |
2941 | if (load_ok) { |
2942 | amd64_setup_pci_device(); | |
2943 | return 0; | |
2944 | } | |
7d6034d3 | 2945 | |
37da0450 | 2946 | err_2nd_stage: |
7d6034d3 | 2947 | pci_unregister_driver(&amd64_pci_driver); |
56b34b91 BP |
2948 | err_pci: |
2949 | msrs_free(msrs); | |
2950 | msrs = NULL; | |
2951 | err_ret: | |
7d6034d3 DT |
2952 | return err; |
2953 | } | |
2954 | ||
2955 | static void __exit amd64_edac_exit(void) | |
2956 | { | |
2957 | if (amd64_ctl_pci) | |
2958 | edac_pci_release_generic_ctl(amd64_ctl_pci); | |
2959 | ||
2960 | pci_unregister_driver(&amd64_pci_driver); | |
50542251 BP |
2961 | |
2962 | msrs_free(msrs); | |
2963 | msrs = NULL; | |
7d6034d3 DT |
2964 | } |
2965 | ||
2966 | module_init(amd64_edac_init); | |
2967 | module_exit(amd64_edac_exit); | |
2968 | ||
2969 | MODULE_LICENSE("GPL"); | |
2970 | MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, " | |
2971 | "Dave Peterson, Thayne Harbaugh"); | |
2972 | MODULE_DESCRIPTION("MC support for AMD64 memory controllers - " | |
2973 | EDAC_AMD64_VERSION); | |
2974 | ||
2975 | module_param(edac_op_state, int, 0444); | |
2976 | MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); |