]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/edac/i82875p_edac.c
i2c: mv64xxx: Apply errata delay only in standard mode
[mirror_ubuntu-bionic-kernel.git] / drivers / edac / i82875p_edac.c
1 /*
2 * Intel D82875P Memory Controller kernel module
3 * (C) 2003 Linux Networx (http://lnxi.com)
4 * This file may be distributed under the terms of the
5 * GNU General Public License.
6 *
7 * Written by Thayne Harbaugh
8 * Contributors:
9 * Wang Zhenyu at intel.com
10 *
11 * $Id: edac_i82875p.c,v 1.5.2.11 2005/10/05 00:43:44 dsp_llnl Exp $
12 *
13 * Note: E7210 appears same as D82875P - zhenyu.z.wang at intel.com
14 */
15
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/pci.h>
19 #include <linux/pci_ids.h>
20 #include <linux/edac.h>
21 #include "edac_module.h"
22
23 #define EDAC_MOD_STR "i82875p_edac"
24
25 #define i82875p_printk(level, fmt, arg...) \
26 edac_printk(level, "i82875p", fmt, ##arg)
27
28 #define i82875p_mc_printk(mci, level, fmt, arg...) \
29 edac_mc_chipset_printk(mci, level, "i82875p", fmt, ##arg)
30
31 #ifndef PCI_DEVICE_ID_INTEL_82875_0
32 #define PCI_DEVICE_ID_INTEL_82875_0 0x2578
33 #endif /* PCI_DEVICE_ID_INTEL_82875_0 */
34
35 #ifndef PCI_DEVICE_ID_INTEL_82875_6
36 #define PCI_DEVICE_ID_INTEL_82875_6 0x257e
37 #endif /* PCI_DEVICE_ID_INTEL_82875_6 */
38
39 /* four csrows in dual channel, eight in single channel */
40 #define I82875P_NR_DIMMS 8
41 #define I82875P_NR_CSROWS(nr_chans) (I82875P_NR_DIMMS / (nr_chans))
42
43 /* Intel 82875p register addresses - device 0 function 0 - DRAM Controller */
44 #define I82875P_EAP 0x58 /* Error Address Pointer (32b)
45 *
46 * 31:12 block address
47 * 11:0 reserved
48 */
49
50 #define I82875P_DERRSYN 0x5c /* DRAM Error Syndrome (8b)
51 *
52 * 7:0 DRAM ECC Syndrome
53 */
54
55 #define I82875P_DES 0x5d /* DRAM Error Status (8b)
56 *
57 * 7:1 reserved
58 * 0 Error channel 0/1
59 */
60
61 #define I82875P_ERRSTS 0xc8 /* Error Status Register (16b)
62 *
63 * 15:10 reserved
64 * 9 non-DRAM lock error (ndlock)
65 * 8 Sftwr Generated SMI
66 * 7 ECC UE
67 * 6 reserved
68 * 5 MCH detects unimplemented cycle
69 * 4 AGP access outside GA
70 * 3 Invalid AGP access
71 * 2 Invalid GA translation table
72 * 1 Unsupported AGP command
73 * 0 ECC CE
74 */
75
76 #define I82875P_ERRCMD 0xca /* Error Command (16b)
77 *
78 * 15:10 reserved
79 * 9 SERR on non-DRAM lock
80 * 8 SERR on ECC UE
81 * 7 SERR on ECC CE
82 * 6 target abort on high exception
83 * 5 detect unimplemented cyc
84 * 4 AGP access outside of GA
85 * 3 SERR on invalid AGP access
86 * 2 invalid translation table
87 * 1 SERR on unsupported AGP command
88 * 0 reserved
89 */
90
91 /* Intel 82875p register addresses - device 6 function 0 - DRAM Controller */
92 #define I82875P_PCICMD6 0x04 /* PCI Command Register (16b)
93 *
94 * 15:10 reserved
95 * 9 fast back-to-back - ro 0
96 * 8 SERR enable - ro 0
97 * 7 addr/data stepping - ro 0
98 * 6 parity err enable - ro 0
99 * 5 VGA palette snoop - ro 0
100 * 4 mem wr & invalidate - ro 0
101 * 3 special cycle - ro 0
102 * 2 bus master - ro 0
103 * 1 mem access dev6 - 0(dis),1(en)
104 * 0 IO access dev3 - 0(dis),1(en)
105 */
106
107 #define I82875P_BAR6 0x10 /* Mem Delays Base ADDR Reg (32b)
108 *
109 * 31:12 mem base addr [31:12]
110 * 11:4 address mask - ro 0
111 * 3 prefetchable - ro 0(non),1(pre)
112 * 2:1 mem type - ro 0
113 * 0 mem space - ro 0
114 */
115
116 /* Intel 82875p MMIO register space - device 0 function 0 - MMR space */
117
118 #define I82875P_DRB_SHIFT 26 /* 64MiB grain */
119 #define I82875P_DRB 0x00 /* DRAM Row Boundary (8b x 8)
120 *
121 * 7 reserved
122 * 6:0 64MiB row boundary addr
123 */
124
125 #define I82875P_DRA 0x10 /* DRAM Row Attribute (4b x 8)
126 *
127 * 7 reserved
128 * 6:4 row attr row 1
129 * 3 reserved
130 * 2:0 row attr row 0
131 *
132 * 000 = 4KiB
133 * 001 = 8KiB
134 * 010 = 16KiB
135 * 011 = 32KiB
136 */
137
138 #define I82875P_DRC 0x68 /* DRAM Controller Mode (32b)
139 *
140 * 31:30 reserved
141 * 29 init complete
142 * 28:23 reserved
143 * 22:21 nr chan 00=1,01=2
144 * 20 reserved
145 * 19:18 Data Integ Mode 00=none,01=ecc
146 * 17:11 reserved
147 * 10:8 refresh mode
148 * 7 reserved
149 * 6:4 mode select
150 * 3:2 reserved
151 * 1:0 DRAM type 01=DDR
152 */
153
154 enum i82875p_chips {
155 I82875P = 0,
156 };
157
158 struct i82875p_pvt {
159 struct pci_dev *ovrfl_pdev;
160 void __iomem *ovrfl_window;
161 };
162
163 struct i82875p_dev_info {
164 const char *ctl_name;
165 };
166
167 struct i82875p_error_info {
168 u16 errsts;
169 u32 eap;
170 u8 des;
171 u8 derrsyn;
172 u16 errsts2;
173 };
174
175 static const struct i82875p_dev_info i82875p_devs[] = {
176 [I82875P] = {
177 .ctl_name = "i82875p"},
178 };
179
180 static struct pci_dev *mci_pdev; /* init dev: in case that AGP code has
181 * already registered driver
182 */
183
184 static struct edac_pci_ctl_info *i82875p_pci;
185
186 static void i82875p_get_error_info(struct mem_ctl_info *mci,
187 struct i82875p_error_info *info)
188 {
189 struct pci_dev *pdev;
190
191 pdev = to_pci_dev(mci->pdev);
192
193 /*
194 * This is a mess because there is no atomic way to read all the
195 * registers at once and the registers can transition from CE being
196 * overwritten by UE.
197 */
198 pci_read_config_word(pdev, I82875P_ERRSTS, &info->errsts);
199
200 if (!(info->errsts & 0x0081))
201 return;
202
203 pci_read_config_dword(pdev, I82875P_EAP, &info->eap);
204 pci_read_config_byte(pdev, I82875P_DES, &info->des);
205 pci_read_config_byte(pdev, I82875P_DERRSYN, &info->derrsyn);
206 pci_read_config_word(pdev, I82875P_ERRSTS, &info->errsts2);
207
208 /*
209 * If the error is the same then we can for both reads then
210 * the first set of reads is valid. If there is a change then
211 * there is a CE no info and the second set of reads is valid
212 * and should be UE info.
213 */
214 if ((info->errsts ^ info->errsts2) & 0x0081) {
215 pci_read_config_dword(pdev, I82875P_EAP, &info->eap);
216 pci_read_config_byte(pdev, I82875P_DES, &info->des);
217 pci_read_config_byte(pdev, I82875P_DERRSYN, &info->derrsyn);
218 }
219
220 pci_write_bits16(pdev, I82875P_ERRSTS, 0x0081, 0x0081);
221 }
222
223 static int i82875p_process_error_info(struct mem_ctl_info *mci,
224 struct i82875p_error_info *info,
225 int handle_errors)
226 {
227 int row, multi_chan;
228
229 multi_chan = mci->csrows[0]->nr_channels - 1;
230
231 if (!(info->errsts & 0x0081))
232 return 0;
233
234 if (!handle_errors)
235 return 1;
236
237 if ((info->errsts ^ info->errsts2) & 0x0081) {
238 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
239 -1, -1, -1,
240 "UE overwrote CE", "");
241 info->errsts = info->errsts2;
242 }
243
244 info->eap >>= PAGE_SHIFT;
245 row = edac_mc_find_csrow_by_page(mci, info->eap);
246
247 if (info->errsts & 0x0080)
248 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
249 info->eap, 0, 0,
250 row, -1, -1,
251 "i82875p UE", "");
252 else
253 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
254 info->eap, 0, info->derrsyn,
255 row, multi_chan ? (info->des & 0x1) : 0,
256 -1, "i82875p CE", "");
257
258 return 1;
259 }
260
261 static void i82875p_check(struct mem_ctl_info *mci)
262 {
263 struct i82875p_error_info info;
264
265 edac_dbg(1, "MC%d\n", mci->mc_idx);
266 i82875p_get_error_info(mci, &info);
267 i82875p_process_error_info(mci, &info, 1);
268 }
269
270 /* Return 0 on success or 1 on failure. */
271 static int i82875p_setup_overfl_dev(struct pci_dev *pdev,
272 struct pci_dev **ovrfl_pdev,
273 void __iomem **ovrfl_window)
274 {
275 struct pci_dev *dev;
276 void __iomem *window;
277
278 *ovrfl_pdev = NULL;
279 *ovrfl_window = NULL;
280 dev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL);
281
282 if (dev == NULL) {
283 /* Intel tells BIOS developers to hide device 6 which
284 * configures the overflow device access containing
285 * the DRBs - this is where we expose device 6.
286 * http://www.x86-secret.com/articles/tweak/pat/patsecrets-2.htm
287 */
288 pci_write_bits8(pdev, 0xf4, 0x2, 0x2);
289 dev = pci_scan_single_device(pdev->bus, PCI_DEVFN(6, 0));
290
291 if (dev == NULL)
292 return 1;
293
294 pci_bus_assign_resources(dev->bus);
295 pci_bus_add_device(dev);
296 }
297
298 *ovrfl_pdev = dev;
299
300 if (pci_enable_device(dev)) {
301 i82875p_printk(KERN_ERR, "%s(): Failed to enable overflow "
302 "device\n", __func__);
303 return 1;
304 }
305
306 if (pci_request_regions(dev, pci_name(dev))) {
307 #ifdef CORRECT_BIOS
308 goto fail0;
309 #endif
310 }
311
312 /* cache is irrelevant for PCI bus reads/writes */
313 window = pci_ioremap_bar(dev, 0);
314 if (window == NULL) {
315 i82875p_printk(KERN_ERR, "%s(): Failed to ioremap bar6\n",
316 __func__);
317 goto fail1;
318 }
319
320 *ovrfl_window = window;
321 return 0;
322
323 fail1:
324 pci_release_regions(dev);
325
326 #ifdef CORRECT_BIOS
327 fail0:
328 pci_disable_device(dev);
329 #endif
330 /* NOTE: the ovrfl proc entry and pci_dev are intentionally left */
331 return 1;
332 }
333
334 /* Return 1 if dual channel mode is active. Else return 0. */
335 static inline int dual_channel_active(u32 drc)
336 {
337 return (drc >> 21) & 0x1;
338 }
339
340 static void i82875p_init_csrows(struct mem_ctl_info *mci,
341 struct pci_dev *pdev,
342 void __iomem * ovrfl_window, u32 drc)
343 {
344 struct csrow_info *csrow;
345 struct dimm_info *dimm;
346 unsigned nr_chans = dual_channel_active(drc) + 1;
347 unsigned long last_cumul_size;
348 u8 value;
349 u32 drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */
350 u32 cumul_size, nr_pages;
351 int index, j;
352
353 drc_ddim = (drc >> 18) & 0x1;
354 last_cumul_size = 0;
355
356 /* The dram row boundary (DRB) reg values are boundary address
357 * for each DRAM row with a granularity of 32 or 64MB (single/dual
358 * channel operation). DRB regs are cumulative; therefore DRB7 will
359 * contain the total memory contained in all eight rows.
360 */
361
362 for (index = 0; index < mci->nr_csrows; index++) {
363 csrow = mci->csrows[index];
364
365 value = readb(ovrfl_window + I82875P_DRB + index);
366 cumul_size = value << (I82875P_DRB_SHIFT - PAGE_SHIFT);
367 edac_dbg(3, "(%d) cumul_size 0x%x\n", index, cumul_size);
368 if (cumul_size == last_cumul_size)
369 continue; /* not populated */
370
371 csrow->first_page = last_cumul_size;
372 csrow->last_page = cumul_size - 1;
373 nr_pages = cumul_size - last_cumul_size;
374 last_cumul_size = cumul_size;
375
376 for (j = 0; j < nr_chans; j++) {
377 dimm = csrow->channels[j]->dimm;
378
379 dimm->nr_pages = nr_pages / nr_chans;
380 dimm->grain = 1 << 12; /* I82875P_EAP has 4KiB reolution */
381 dimm->mtype = MEM_DDR;
382 dimm->dtype = DEV_UNKNOWN;
383 dimm->edac_mode = drc_ddim ? EDAC_SECDED : EDAC_NONE;
384 }
385 }
386 }
387
388 static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
389 {
390 int rc = -ENODEV;
391 struct mem_ctl_info *mci;
392 struct edac_mc_layer layers[2];
393 struct i82875p_pvt *pvt;
394 struct pci_dev *ovrfl_pdev;
395 void __iomem *ovrfl_window;
396 u32 drc;
397 u32 nr_chans;
398 struct i82875p_error_info discard;
399
400 edac_dbg(0, "\n");
401
402 if (i82875p_setup_overfl_dev(pdev, &ovrfl_pdev, &ovrfl_window))
403 return -ENODEV;
404 drc = readl(ovrfl_window + I82875P_DRC);
405 nr_chans = dual_channel_active(drc) + 1;
406
407 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
408 layers[0].size = I82875P_NR_CSROWS(nr_chans);
409 layers[0].is_virt_csrow = true;
410 layers[1].type = EDAC_MC_LAYER_CHANNEL;
411 layers[1].size = nr_chans;
412 layers[1].is_virt_csrow = false;
413 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
414 if (!mci) {
415 rc = -ENOMEM;
416 goto fail0;
417 }
418
419 edac_dbg(3, "init mci\n");
420 mci->pdev = &pdev->dev;
421 mci->mtype_cap = MEM_FLAG_DDR;
422 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
423 mci->edac_cap = EDAC_FLAG_UNKNOWN;
424 mci->mod_name = EDAC_MOD_STR;
425 mci->ctl_name = i82875p_devs[dev_idx].ctl_name;
426 mci->dev_name = pci_name(pdev);
427 mci->edac_check = i82875p_check;
428 mci->ctl_page_to_phys = NULL;
429 edac_dbg(3, "init pvt\n");
430 pvt = (struct i82875p_pvt *)mci->pvt_info;
431 pvt->ovrfl_pdev = ovrfl_pdev;
432 pvt->ovrfl_window = ovrfl_window;
433 i82875p_init_csrows(mci, pdev, ovrfl_window, drc);
434 i82875p_get_error_info(mci, &discard); /* clear counters */
435
436 /* Here we assume that we will never see multiple instances of this
437 * type of memory controller. The ID is therefore hardcoded to 0.
438 */
439 if (edac_mc_add_mc(mci)) {
440 edac_dbg(3, "failed edac_mc_add_mc()\n");
441 goto fail1;
442 }
443
444 /* allocating generic PCI control info */
445 i82875p_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
446 if (!i82875p_pci) {
447 printk(KERN_WARNING
448 "%s(): Unable to create PCI control\n",
449 __func__);
450 printk(KERN_WARNING
451 "%s(): PCI error report via EDAC not setup\n",
452 __func__);
453 }
454
455 /* get this far and it's successful */
456 edac_dbg(3, "success\n");
457 return 0;
458
459 fail1:
460 edac_mc_free(mci);
461
462 fail0:
463 iounmap(ovrfl_window);
464 pci_release_regions(ovrfl_pdev);
465
466 pci_disable_device(ovrfl_pdev);
467 /* NOTE: the ovrfl proc entry and pci_dev are intentionally left */
468 return rc;
469 }
470
471 /* returns count (>= 0), or negative on error */
472 static int i82875p_init_one(struct pci_dev *pdev,
473 const struct pci_device_id *ent)
474 {
475 int rc;
476
477 edac_dbg(0, "\n");
478 i82875p_printk(KERN_INFO, "i82875p init one\n");
479
480 if (pci_enable_device(pdev) < 0)
481 return -EIO;
482
483 rc = i82875p_probe1(pdev, ent->driver_data);
484
485 if (mci_pdev == NULL)
486 mci_pdev = pci_dev_get(pdev);
487
488 return rc;
489 }
490
491 static void i82875p_remove_one(struct pci_dev *pdev)
492 {
493 struct mem_ctl_info *mci;
494 struct i82875p_pvt *pvt = NULL;
495
496 edac_dbg(0, "\n");
497
498 if (i82875p_pci)
499 edac_pci_release_generic_ctl(i82875p_pci);
500
501 if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
502 return;
503
504 pvt = (struct i82875p_pvt *)mci->pvt_info;
505
506 if (pvt->ovrfl_window)
507 iounmap(pvt->ovrfl_window);
508
509 if (pvt->ovrfl_pdev) {
510 #ifdef CORRECT_BIOS
511 pci_release_regions(pvt->ovrfl_pdev);
512 #endif /*CORRECT_BIOS */
513 pci_disable_device(pvt->ovrfl_pdev);
514 pci_dev_put(pvt->ovrfl_pdev);
515 }
516
517 edac_mc_free(mci);
518 }
519
520 static const struct pci_device_id i82875p_pci_tbl[] = {
521 {
522 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
523 I82875P},
524 {
525 0,
526 } /* 0 terminated list. */
527 };
528
529 MODULE_DEVICE_TABLE(pci, i82875p_pci_tbl);
530
531 static struct pci_driver i82875p_driver = {
532 .name = EDAC_MOD_STR,
533 .probe = i82875p_init_one,
534 .remove = i82875p_remove_one,
535 .id_table = i82875p_pci_tbl,
536 };
537
538 static int __init i82875p_init(void)
539 {
540 int pci_rc;
541
542 edac_dbg(3, "\n");
543
544 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
545 opstate_init();
546
547 pci_rc = pci_register_driver(&i82875p_driver);
548
549 if (pci_rc < 0)
550 goto fail0;
551
552 if (mci_pdev == NULL) {
553 mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
554 PCI_DEVICE_ID_INTEL_82875_0, NULL);
555
556 if (!mci_pdev) {
557 edac_dbg(0, "875p pci_get_device fail\n");
558 pci_rc = -ENODEV;
559 goto fail1;
560 }
561
562 pci_rc = i82875p_init_one(mci_pdev, i82875p_pci_tbl);
563
564 if (pci_rc < 0) {
565 edac_dbg(0, "875p init fail\n");
566 pci_rc = -ENODEV;
567 goto fail1;
568 }
569 }
570
571 return 0;
572
573 fail1:
574 pci_unregister_driver(&i82875p_driver);
575
576 fail0:
577 pci_dev_put(mci_pdev);
578 return pci_rc;
579 }
580
581 static void __exit i82875p_exit(void)
582 {
583 edac_dbg(3, "\n");
584
585 i82875p_remove_one(mci_pdev);
586 pci_dev_put(mci_pdev);
587
588 pci_unregister_driver(&i82875p_driver);
589
590 }
591
592 module_init(i82875p_init);
593 module_exit(i82875p_exit);
594
595 MODULE_LICENSE("GPL");
596 MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh");
597 MODULE_DESCRIPTION("MC support for Intel 82875 memory hub controllers");
598
599 module_param(edac_op_state, int, 0444);
600 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");