]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/edac/thunderx_edac.c
x86/msr: Add definitions for new speculation control MSRs
[mirror_ubuntu-artful-kernel.git] / drivers / edac / thunderx_edac.c
1 /*
2 * Cavium ThunderX memory controller kernel module
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright Cavium, Inc. (C) 2015-2017. All rights reserved.
9 *
10 */
11
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 #include <linux/edac.h>
15 #include <linux/interrupt.h>
16 #include <linux/string.h>
17 #include <linux/stop_machine.h>
18 #include <linux/delay.h>
19 #include <linux/sizes.h>
20 #include <linux/atomic.h>
21 #include <linux/bitfield.h>
22 #include <linux/circ_buf.h>
23
24 #include <asm/page.h>
25
26 #include "edac_module.h"
27
28 #define phys_to_pfn(phys) (PFN_DOWN(phys))
29
30 #define THUNDERX_NODE GENMASK(45, 44)
31
32 enum {
33 ERR_CORRECTED = 1,
34 ERR_UNCORRECTED = 2,
35 ERR_UNKNOWN = 3,
36 };
37
38 #define MAX_SYNDROME_REGS 4
39
40 struct error_syndrome {
41 u64 reg[MAX_SYNDROME_REGS];
42 };
43
44 struct error_descr {
45 int type;
46 u64 mask;
47 char *descr;
48 };
49
50 static void decode_register(char *str, size_t size,
51 const struct error_descr *descr,
52 const uint64_t reg)
53 {
54 int ret = 0;
55
56 while (descr->type && descr->mask && descr->descr) {
57 if (reg & descr->mask) {
58 ret = snprintf(str, size, "\n\t%s, %s",
59 descr->type == ERR_CORRECTED ?
60 "Corrected" : "Uncorrected",
61 descr->descr);
62 str += ret;
63 size -= ret;
64 }
65 descr++;
66 }
67 }
68
69 static unsigned long get_bits(unsigned long data, int pos, int width)
70 {
71 return (data >> pos) & ((1 << width) - 1);
72 }
73
74 #define L2C_CTL 0x87E080800000
75 #define L2C_CTL_DISIDXALIAS BIT(0)
76
77 #define PCI_DEVICE_ID_THUNDER_LMC 0xa022
78
79 #define LMC_FADR 0x20
80 #define LMC_FADR_FDIMM(x) ((x >> 37) & 0x1)
81 #define LMC_FADR_FBUNK(x) ((x >> 36) & 0x1)
82 #define LMC_FADR_FBANK(x) ((x >> 32) & 0xf)
83 #define LMC_FADR_FROW(x) ((x >> 14) & 0xffff)
84 #define LMC_FADR_FCOL(x) ((x >> 0) & 0x1fff)
85
86 #define LMC_NXM_FADR 0x28
87 #define LMC_ECC_SYND 0x38
88
89 #define LMC_ECC_PARITY_TEST 0x108
90
91 #define LMC_INT_W1S 0x150
92
93 #define LMC_INT_ENA_W1C 0x158
94 #define LMC_INT_ENA_W1S 0x160
95
96 #define LMC_CONFIG 0x188
97
98 #define LMC_CONFIG_BG2 BIT(62)
99 #define LMC_CONFIG_RANK_ENA BIT(42)
100 #define LMC_CONFIG_PBANK_LSB(x) (((x) >> 5) & 0xF)
101 #define LMC_CONFIG_ROW_LSB(x) (((x) >> 2) & 0x7)
102
103 #define LMC_CONTROL 0x190
104 #define LMC_CONTROL_XOR_BANK BIT(16)
105
106 #define LMC_INT 0x1F0
107
108 #define LMC_INT_DDR_ERR BIT(11)
109 #define LMC_INT_DED_ERR (0xFUL << 5)
110 #define LMC_INT_SEC_ERR (0xFUL << 1)
111 #define LMC_INT_NXM_WR_MASK BIT(0)
112
113 #define LMC_DDR_PLL_CTL 0x258
114 #define LMC_DDR_PLL_CTL_DDR4 BIT(29)
115
116 #define LMC_FADR_SCRAMBLED 0x330
117
118 #define LMC_INT_UE (LMC_INT_DDR_ERR | LMC_INT_DED_ERR | \
119 LMC_INT_NXM_WR_MASK)
120
121 #define LMC_INT_CE (LMC_INT_SEC_ERR)
122
123 static const struct error_descr lmc_errors[] = {
124 {
125 .type = ERR_CORRECTED,
126 .mask = LMC_INT_SEC_ERR,
127 .descr = "Single-bit ECC error",
128 },
129 {
130 .type = ERR_UNCORRECTED,
131 .mask = LMC_INT_DDR_ERR,
132 .descr = "DDR chip error",
133 },
134 {
135 .type = ERR_UNCORRECTED,
136 .mask = LMC_INT_DED_ERR,
137 .descr = "Double-bit ECC error",
138 },
139 {
140 .type = ERR_UNCORRECTED,
141 .mask = LMC_INT_NXM_WR_MASK,
142 .descr = "Non-existent memory write",
143 },
144 {0, 0, NULL},
145 };
146
147 #define LMC_INT_EN_DDR_ERROR_ALERT_ENA BIT(5)
148 #define LMC_INT_EN_DLCRAM_DED_ERR BIT(4)
149 #define LMC_INT_EN_DLCRAM_SEC_ERR BIT(3)
150 #define LMC_INT_INTR_DED_ENA BIT(2)
151 #define LMC_INT_INTR_SEC_ENA BIT(1)
152 #define LMC_INT_INTR_NXM_WR_ENA BIT(0)
153
154 #define LMC_INT_ENA_ALL GENMASK(5, 0)
155
156 #define LMC_DDR_PLL_CTL 0x258
157 #define LMC_DDR_PLL_CTL_DDR4 BIT(29)
158
159 #define LMC_CONTROL 0x190
160 #define LMC_CONTROL_RDIMM BIT(0)
161
162 #define LMC_SCRAM_FADR 0x330
163
164 #define LMC_CHAR_MASK0 0x228
165 #define LMC_CHAR_MASK2 0x238
166
167 #define RING_ENTRIES 8
168
169 struct debugfs_entry {
170 const char *name;
171 umode_t mode;
172 const struct file_operations fops;
173 };
174
175 struct lmc_err_ctx {
176 u64 reg_int;
177 u64 reg_fadr;
178 u64 reg_nxm_fadr;
179 u64 reg_scram_fadr;
180 u64 reg_ecc_synd;
181 };
182
183 struct thunderx_lmc {
184 void __iomem *regs;
185 struct pci_dev *pdev;
186 struct msix_entry msix_ent;
187
188 atomic_t ecc_int;
189
190 u64 mask0;
191 u64 mask2;
192 u64 parity_test;
193 u64 node;
194
195 int xbits;
196 int bank_width;
197 int pbank_lsb;
198 int dimm_lsb;
199 int rank_lsb;
200 int bank_lsb;
201 int row_lsb;
202 int col_hi_lsb;
203
204 int xor_bank;
205 int l2c_alias;
206
207 struct page *mem;
208
209 struct lmc_err_ctx err_ctx[RING_ENTRIES];
210 unsigned long ring_head;
211 unsigned long ring_tail;
212 };
213
214 #define ring_pos(pos, size) ((pos) & (size - 1))
215
216 #define DEBUGFS_STRUCT(_name, _mode, _write, _read) \
217 static struct debugfs_entry debugfs_##_name = { \
218 .name = __stringify(_name), \
219 .mode = VERIFY_OCTAL_PERMISSIONS(_mode), \
220 .fops = { \
221 .open = simple_open, \
222 .write = _write, \
223 .read = _read, \
224 .llseek = generic_file_llseek, \
225 }, \
226 }
227
228 #define DEBUGFS_FIELD_ATTR(_type, _field) \
229 static ssize_t thunderx_##_type##_##_field##_read(struct file *file, \
230 char __user *data, \
231 size_t count, loff_t *ppos) \
232 { \
233 struct thunderx_##_type *pdata = file->private_data; \
234 char buf[20]; \
235 \
236 snprintf(buf, count, "0x%016llx", pdata->_field); \
237 return simple_read_from_buffer(data, count, ppos, \
238 buf, sizeof(buf)); \
239 } \
240 \
241 static ssize_t thunderx_##_type##_##_field##_write(struct file *file, \
242 const char __user *data, \
243 size_t count, loff_t *ppos) \
244 { \
245 struct thunderx_##_type *pdata = file->private_data; \
246 int res; \
247 \
248 res = kstrtoull_from_user(data, count, 0, &pdata->_field); \
249 \
250 return res ? res : count; \
251 } \
252 \
253 DEBUGFS_STRUCT(_field, 0600, \
254 thunderx_##_type##_##_field##_write, \
255 thunderx_##_type##_##_field##_read) \
256
257 #define DEBUGFS_REG_ATTR(_type, _name, _reg) \
258 static ssize_t thunderx_##_type##_##_name##_read(struct file *file, \
259 char __user *data, \
260 size_t count, loff_t *ppos) \
261 { \
262 struct thunderx_##_type *pdata = file->private_data; \
263 char buf[20]; \
264 \
265 sprintf(buf, "0x%016llx", readq(pdata->regs + _reg)); \
266 return simple_read_from_buffer(data, count, ppos, \
267 buf, sizeof(buf)); \
268 } \
269 \
270 static ssize_t thunderx_##_type##_##_name##_write(struct file *file, \
271 const char __user *data, \
272 size_t count, loff_t *ppos) \
273 { \
274 struct thunderx_##_type *pdata = file->private_data; \
275 u64 val; \
276 int res; \
277 \
278 res = kstrtoull_from_user(data, count, 0, &val); \
279 \
280 if (!res) { \
281 writeq(val, pdata->regs + _reg); \
282 res = count; \
283 } \
284 \
285 return res; \
286 } \
287 \
288 DEBUGFS_STRUCT(_name, 0600, \
289 thunderx_##_type##_##_name##_write, \
290 thunderx_##_type##_##_name##_read)
291
292 #define LMC_DEBUGFS_ENT(_field) DEBUGFS_FIELD_ATTR(lmc, _field)
293
294 /*
295 * To get an ECC error injected, the following steps are needed:
296 * - Setup the ECC injection by writing the appropriate parameters:
297 * echo <bit mask value> > /sys/kernel/debug/<device number>/ecc_mask0
298 * echo <bit mask value> > /sys/kernel/debug/<device number>/ecc_mask2
299 * echo 0x802 > /sys/kernel/debug/<device number>/ecc_parity_test
300 * - Do the actual injection:
301 * echo 1 > /sys/kernel/debug/<device number>/inject_ecc
302 */
303 static ssize_t thunderx_lmc_inject_int_write(struct file *file,
304 const char __user *data,
305 size_t count, loff_t *ppos)
306 {
307 struct thunderx_lmc *lmc = file->private_data;
308 u64 val;
309 int res;
310
311 res = kstrtoull_from_user(data, count, 0, &val);
312
313 if (!res) {
314 /* Trigger the interrupt */
315 writeq(val, lmc->regs + LMC_INT_W1S);
316 res = count;
317 }
318
319 return res;
320 }
321
322 static ssize_t thunderx_lmc_int_read(struct file *file,
323 char __user *data,
324 size_t count, loff_t *ppos)
325 {
326 struct thunderx_lmc *lmc = file->private_data;
327 char buf[20];
328 u64 lmc_int = readq(lmc->regs + LMC_INT);
329
330 snprintf(buf, sizeof(buf), "0x%016llx", lmc_int);
331 return simple_read_from_buffer(data, count, ppos, buf, sizeof(buf));
332 }
333
334 #define TEST_PATTERN 0xa5
335
336 static int inject_ecc_fn(void *arg)
337 {
338 struct thunderx_lmc *lmc = arg;
339 uintptr_t addr, phys;
340 unsigned int cline_size = cache_line_size();
341 const unsigned int lines = PAGE_SIZE / cline_size;
342 unsigned int i, cl_idx;
343
344 addr = (uintptr_t)page_address(lmc->mem);
345 phys = (uintptr_t)page_to_phys(lmc->mem);
346
347 cl_idx = (phys & 0x7f) >> 4;
348 lmc->parity_test &= ~(7ULL << 8);
349 lmc->parity_test |= (cl_idx << 8);
350
351 writeq(lmc->mask0, lmc->regs + LMC_CHAR_MASK0);
352 writeq(lmc->mask2, lmc->regs + LMC_CHAR_MASK2);
353 writeq(lmc->parity_test, lmc->regs + LMC_ECC_PARITY_TEST);
354
355 readq(lmc->regs + LMC_CHAR_MASK0);
356 readq(lmc->regs + LMC_CHAR_MASK2);
357 readq(lmc->regs + LMC_ECC_PARITY_TEST);
358
359 for (i = 0; i < lines; i++) {
360 memset((void *)addr, TEST_PATTERN, cline_size);
361 barrier();
362
363 /*
364 * Flush L1 cachelines to the PoC (L2).
365 * This will cause cacheline eviction to the L2.
366 */
367 asm volatile("dc civac, %0\n"
368 "dsb sy\n"
369 : : "r"(addr + i * cline_size));
370 }
371
372 for (i = 0; i < lines; i++) {
373 /*
374 * Flush L2 cachelines to the DRAM.
375 * This will cause cacheline eviction to the DRAM
376 * and ECC corruption according to the masks set.
377 */
378 __asm__ volatile("sys #0,c11,C1,#2, %0\n"
379 : : "r"(phys + i * cline_size));
380 }
381
382 for (i = 0; i < lines; i++) {
383 /*
384 * Invalidate L2 cachelines.
385 * The subsequent load will cause cacheline fetch
386 * from the DRAM and an error interrupt
387 */
388 __asm__ volatile("sys #0,c11,C1,#1, %0"
389 : : "r"(phys + i * cline_size));
390 }
391
392 for (i = 0; i < lines; i++) {
393 /*
394 * Invalidate L1 cachelines.
395 * The subsequent load will cause cacheline fetch
396 * from the L2 and/or DRAM
397 */
398 asm volatile("dc ivac, %0\n"
399 "dsb sy\n"
400 : : "r"(addr + i * cline_size));
401 }
402
403 return 0;
404 }
405
406 static ssize_t thunderx_lmc_inject_ecc_write(struct file *file,
407 const char __user *data,
408 size_t count, loff_t *ppos)
409 {
410 struct thunderx_lmc *lmc = file->private_data;
411
412 unsigned int cline_size = cache_line_size();
413
414 u8 tmp[cline_size];
415 void __iomem *addr;
416 unsigned int offs, timeout = 100000;
417
418 atomic_set(&lmc->ecc_int, 0);
419
420 lmc->mem = alloc_pages_node(lmc->node, GFP_KERNEL, 0);
421
422 if (!lmc->mem)
423 return -ENOMEM;
424
425 addr = page_address(lmc->mem);
426
427 while (!atomic_read(&lmc->ecc_int) && timeout--) {
428 stop_machine(inject_ecc_fn, lmc, NULL);
429
430 for (offs = 0; offs < PAGE_SIZE; offs += sizeof(tmp)) {
431 /*
432 * Do a load from the previously rigged location
433 * This should generate an error interrupt.
434 */
435 memcpy(tmp, addr + offs, cline_size);
436 asm volatile("dsb ld\n");
437 }
438 }
439
440 __free_pages(lmc->mem, 0);
441
442 return count;
443 }
444
445 LMC_DEBUGFS_ENT(mask0);
446 LMC_DEBUGFS_ENT(mask2);
447 LMC_DEBUGFS_ENT(parity_test);
448
449 DEBUGFS_STRUCT(inject_int, 0200, thunderx_lmc_inject_int_write, NULL);
450 DEBUGFS_STRUCT(inject_ecc, 0200, thunderx_lmc_inject_ecc_write, NULL);
451 DEBUGFS_STRUCT(int_w1c, 0400, NULL, thunderx_lmc_int_read);
452
453 struct debugfs_entry *lmc_dfs_ents[] = {
454 &debugfs_mask0,
455 &debugfs_mask2,
456 &debugfs_parity_test,
457 &debugfs_inject_ecc,
458 &debugfs_inject_int,
459 &debugfs_int_w1c,
460 };
461
462 static int thunderx_create_debugfs_nodes(struct dentry *parent,
463 struct debugfs_entry *attrs[],
464 void *data,
465 size_t num)
466 {
467 int i;
468 struct dentry *ent;
469
470 if (!IS_ENABLED(CONFIG_EDAC_DEBUG))
471 return 0;
472
473 if (!parent)
474 return -ENOENT;
475
476 for (i = 0; i < num; i++) {
477 ent = edac_debugfs_create_file(attrs[i]->name, attrs[i]->mode,
478 parent, data, &attrs[i]->fops);
479
480 if (!ent)
481 break;
482 }
483
484 return i;
485 }
486
487 static phys_addr_t thunderx_faddr_to_phys(u64 faddr, struct thunderx_lmc *lmc)
488 {
489 phys_addr_t addr = 0;
490 int bank, xbits;
491
492 addr |= lmc->node << 40;
493 addr |= LMC_FADR_FDIMM(faddr) << lmc->dimm_lsb;
494 addr |= LMC_FADR_FBUNK(faddr) << lmc->rank_lsb;
495 addr |= LMC_FADR_FROW(faddr) << lmc->row_lsb;
496 addr |= (LMC_FADR_FCOL(faddr) >> 4) << lmc->col_hi_lsb;
497
498 bank = LMC_FADR_FBANK(faddr) << lmc->bank_lsb;
499
500 if (lmc->xor_bank)
501 bank ^= get_bits(addr, 12 + lmc->xbits, lmc->bank_width);
502
503 addr |= bank << lmc->bank_lsb;
504
505 xbits = PCI_FUNC(lmc->pdev->devfn);
506
507 if (lmc->l2c_alias)
508 xbits ^= get_bits(addr, 20, lmc->xbits) ^
509 get_bits(addr, 12, lmc->xbits);
510
511 addr |= xbits << 7;
512
513 return addr;
514 }
515
516 static unsigned int thunderx_get_num_lmcs(unsigned int node)
517 {
518 unsigned int number = 0;
519 struct pci_dev *pdev = NULL;
520
521 do {
522 pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
523 PCI_DEVICE_ID_THUNDER_LMC,
524 pdev);
525 if (pdev) {
526 #ifdef CONFIG_NUMA
527 if (pdev->dev.numa_node == node)
528 number++;
529 #else
530 number++;
531 #endif
532 }
533 } while (pdev);
534
535 return number;
536 }
537
538 #define LMC_MESSAGE_SIZE 120
539 #define LMC_OTHER_SIZE (50 * ARRAY_SIZE(lmc_errors))
540
541 static irqreturn_t thunderx_lmc_err_isr(int irq, void *dev_id)
542 {
543 struct mem_ctl_info *mci = dev_id;
544 struct thunderx_lmc *lmc = mci->pvt_info;
545
546 unsigned long head = ring_pos(lmc->ring_head, ARRAY_SIZE(lmc->err_ctx));
547 struct lmc_err_ctx *ctx = &lmc->err_ctx[head];
548
549 writeq(0, lmc->regs + LMC_CHAR_MASK0);
550 writeq(0, lmc->regs + LMC_CHAR_MASK2);
551 writeq(0x2, lmc->regs + LMC_ECC_PARITY_TEST);
552
553 ctx->reg_int = readq(lmc->regs + LMC_INT);
554 ctx->reg_fadr = readq(lmc->regs + LMC_FADR);
555 ctx->reg_nxm_fadr = readq(lmc->regs + LMC_NXM_FADR);
556 ctx->reg_scram_fadr = readq(lmc->regs + LMC_SCRAM_FADR);
557 ctx->reg_ecc_synd = readq(lmc->regs + LMC_ECC_SYND);
558
559 lmc->ring_head++;
560
561 atomic_set(&lmc->ecc_int, 1);
562
563 /* Clear the interrupt */
564 writeq(ctx->reg_int, lmc->regs + LMC_INT);
565
566 return IRQ_WAKE_THREAD;
567 }
568
569 static irqreturn_t thunderx_lmc_threaded_isr(int irq, void *dev_id)
570 {
571 struct mem_ctl_info *mci = dev_id;
572 struct thunderx_lmc *lmc = mci->pvt_info;
573 phys_addr_t phys_addr;
574
575 unsigned long tail;
576 struct lmc_err_ctx *ctx;
577
578 irqreturn_t ret = IRQ_NONE;
579
580 char *msg;
581 char *other;
582
583 msg = kmalloc(LMC_MESSAGE_SIZE, GFP_KERNEL);
584 other = kmalloc(LMC_OTHER_SIZE, GFP_KERNEL);
585
586 if (!msg || !other)
587 goto err_free;
588
589 while (CIRC_CNT(lmc->ring_head, lmc->ring_tail,
590 ARRAY_SIZE(lmc->err_ctx))) {
591 tail = ring_pos(lmc->ring_tail, ARRAY_SIZE(lmc->err_ctx));
592
593 ctx = &lmc->err_ctx[tail];
594
595 dev_dbg(&lmc->pdev->dev, "LMC_INT: %016llx\n",
596 ctx->reg_int);
597 dev_dbg(&lmc->pdev->dev, "LMC_FADR: %016llx\n",
598 ctx->reg_fadr);
599 dev_dbg(&lmc->pdev->dev, "LMC_NXM_FADR: %016llx\n",
600 ctx->reg_nxm_fadr);
601 dev_dbg(&lmc->pdev->dev, "LMC_SCRAM_FADR: %016llx\n",
602 ctx->reg_scram_fadr);
603 dev_dbg(&lmc->pdev->dev, "LMC_ECC_SYND: %016llx\n",
604 ctx->reg_ecc_synd);
605
606 snprintf(msg, LMC_MESSAGE_SIZE,
607 "DIMM %lld rank %lld bank %lld row %lld col %lld",
608 LMC_FADR_FDIMM(ctx->reg_scram_fadr),
609 LMC_FADR_FBUNK(ctx->reg_scram_fadr),
610 LMC_FADR_FBANK(ctx->reg_scram_fadr),
611 LMC_FADR_FROW(ctx->reg_scram_fadr),
612 LMC_FADR_FCOL(ctx->reg_scram_fadr));
613
614 decode_register(other, LMC_OTHER_SIZE, lmc_errors,
615 ctx->reg_int);
616
617 phys_addr = thunderx_faddr_to_phys(ctx->reg_fadr, lmc);
618
619 if (ctx->reg_int & LMC_INT_UE)
620 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
621 phys_to_pfn(phys_addr),
622 offset_in_page(phys_addr),
623 0, -1, -1, -1, msg, other);
624 else if (ctx->reg_int & LMC_INT_CE)
625 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
626 phys_to_pfn(phys_addr),
627 offset_in_page(phys_addr),
628 0, -1, -1, -1, msg, other);
629
630 lmc->ring_tail++;
631 }
632
633 ret = IRQ_HANDLED;
634
635 err_free:
636 kfree(msg);
637 kfree(other);
638
639 return ret;
640 }
641
642 #ifdef CONFIG_PM
643 static int thunderx_lmc_suspend(struct pci_dev *pdev, pm_message_t state)
644 {
645 pci_save_state(pdev);
646 pci_disable_device(pdev);
647
648 pci_set_power_state(pdev, pci_choose_state(pdev, state));
649
650 return 0;
651 }
652
653 static int thunderx_lmc_resume(struct pci_dev *pdev)
654 {
655 pci_set_power_state(pdev, PCI_D0);
656 pci_enable_wake(pdev, PCI_D0, 0);
657 pci_restore_state(pdev);
658
659 return 0;
660 }
661 #endif
662
663 static const struct pci_device_id thunderx_lmc_pci_tbl[] = {
664 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_LMC) },
665 { 0, },
666 };
667
668 static inline int pci_dev_to_mc_idx(struct pci_dev *pdev)
669 {
670 int node = dev_to_node(&pdev->dev);
671 int ret = PCI_FUNC(pdev->devfn);
672
673 ret += max(node, 0) << 3;
674
675 return ret;
676 }
677
678 static int thunderx_lmc_probe(struct pci_dev *pdev,
679 const struct pci_device_id *id)
680 {
681 struct thunderx_lmc *lmc;
682 struct edac_mc_layer layer;
683 struct mem_ctl_info *mci;
684 u64 lmc_control, lmc_ddr_pll_ctl, lmc_config;
685 int ret;
686 u64 lmc_int;
687 void *l2c_ioaddr;
688
689 layer.type = EDAC_MC_LAYER_SLOT;
690 layer.size = 2;
691 layer.is_virt_csrow = false;
692
693 ret = pcim_enable_device(pdev);
694 if (ret) {
695 dev_err(&pdev->dev, "Cannot enable PCI device: %d\n", ret);
696 return ret;
697 }
698
699 ret = pcim_iomap_regions(pdev, BIT(0), "thunderx_lmc");
700 if (ret) {
701 dev_err(&pdev->dev, "Cannot map PCI resources: %d\n", ret);
702 return ret;
703 }
704
705 mci = edac_mc_alloc(pci_dev_to_mc_idx(pdev), 1, &layer,
706 sizeof(struct thunderx_lmc));
707 if (!mci)
708 return -ENOMEM;
709
710 mci->pdev = &pdev->dev;
711 lmc = mci->pvt_info;
712
713 pci_set_drvdata(pdev, mci);
714
715 lmc->regs = pcim_iomap_table(pdev)[0];
716
717 lmc_control = readq(lmc->regs + LMC_CONTROL);
718 lmc_ddr_pll_ctl = readq(lmc->regs + LMC_DDR_PLL_CTL);
719 lmc_config = readq(lmc->regs + LMC_CONFIG);
720
721 if (lmc_control & LMC_CONTROL_RDIMM) {
722 mci->mtype_cap = FIELD_GET(LMC_DDR_PLL_CTL_DDR4,
723 lmc_ddr_pll_ctl) ?
724 MEM_RDDR4 : MEM_RDDR3;
725 } else {
726 mci->mtype_cap = FIELD_GET(LMC_DDR_PLL_CTL_DDR4,
727 lmc_ddr_pll_ctl) ?
728 MEM_DDR4 : MEM_DDR3;
729 }
730
731 mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
732 mci->edac_cap = EDAC_FLAG_SECDED;
733
734 mci->mod_name = "thunderx-lmc";
735 mci->mod_ver = "1";
736 mci->ctl_name = "thunderx-lmc";
737 mci->dev_name = dev_name(&pdev->dev);
738 mci->scrub_mode = SCRUB_NONE;
739
740 lmc->pdev = pdev;
741 lmc->msix_ent.entry = 0;
742
743 lmc->ring_head = 0;
744 lmc->ring_tail = 0;
745
746 ret = pci_enable_msix_exact(pdev, &lmc->msix_ent, 1);
747 if (ret) {
748 dev_err(&pdev->dev, "Cannot enable interrupt: %d\n", ret);
749 goto err_free;
750 }
751
752 ret = devm_request_threaded_irq(&pdev->dev, lmc->msix_ent.vector,
753 thunderx_lmc_err_isr,
754 thunderx_lmc_threaded_isr, 0,
755 "[EDAC] ThunderX LMC", mci);
756 if (ret) {
757 dev_err(&pdev->dev, "Cannot set ISR: %d\n", ret);
758 goto err_free;
759 }
760
761 lmc->node = FIELD_GET(THUNDERX_NODE, pci_resource_start(pdev, 0));
762
763 lmc->xbits = thunderx_get_num_lmcs(lmc->node) >> 1;
764 lmc->bank_width = (FIELD_GET(LMC_DDR_PLL_CTL_DDR4, lmc_ddr_pll_ctl) &&
765 FIELD_GET(LMC_CONFIG_BG2, lmc_config)) ? 4 : 3;
766
767 lmc->pbank_lsb = (lmc_config >> 5) & 0xf;
768 lmc->dimm_lsb = 28 + lmc->pbank_lsb + lmc->xbits;
769 lmc->rank_lsb = lmc->dimm_lsb;
770 lmc->rank_lsb -= FIELD_GET(LMC_CONFIG_RANK_ENA, lmc_config) ? 1 : 0;
771 lmc->bank_lsb = 7 + lmc->xbits;
772 lmc->row_lsb = 14 + LMC_CONFIG_ROW_LSB(lmc_config) + lmc->xbits;
773
774 lmc->col_hi_lsb = lmc->bank_lsb + lmc->bank_width;
775
776 lmc->xor_bank = lmc_control & LMC_CONTROL_XOR_BANK;
777
778 l2c_ioaddr = ioremap(L2C_CTL | FIELD_PREP(THUNDERX_NODE, lmc->node),
779 PAGE_SIZE);
780
781 if (!l2c_ioaddr) {
782 dev_err(&pdev->dev, "Cannot map L2C_CTL\n");
783 goto err_free;
784 }
785
786 lmc->l2c_alias = !(readq(l2c_ioaddr) & L2C_CTL_DISIDXALIAS);
787
788 iounmap(l2c_ioaddr);
789
790 ret = edac_mc_add_mc(mci);
791 if (ret) {
792 dev_err(&pdev->dev, "Cannot add the MC: %d\n", ret);
793 goto err_free;
794 }
795
796 lmc_int = readq(lmc->regs + LMC_INT);
797 writeq(lmc_int, lmc->regs + LMC_INT);
798
799 writeq(LMC_INT_ENA_ALL, lmc->regs + LMC_INT_ENA_W1S);
800
801 if (IS_ENABLED(CONFIG_EDAC_DEBUG)) {
802 ret = thunderx_create_debugfs_nodes(mci->debugfs,
803 lmc_dfs_ents,
804 lmc,
805 ARRAY_SIZE(lmc_dfs_ents));
806
807 if (ret != ARRAY_SIZE(lmc_dfs_ents)) {
808 dev_warn(&pdev->dev, "Error creating debugfs entries: %d%s\n",
809 ret, ret >= 0 ? " created" : "");
810 }
811 }
812
813 return 0;
814
815 err_free:
816 pci_set_drvdata(pdev, NULL);
817 edac_mc_free(mci);
818
819 return ret;
820 }
821
822 static void thunderx_lmc_remove(struct pci_dev *pdev)
823 {
824 struct mem_ctl_info *mci = pci_get_drvdata(pdev);
825 struct thunderx_lmc *lmc = mci->pvt_info;
826
827 writeq(LMC_INT_ENA_ALL, lmc->regs + LMC_INT_ENA_W1C);
828
829 edac_mc_del_mc(&pdev->dev);
830 edac_mc_free(mci);
831 }
832
833 MODULE_DEVICE_TABLE(pci, thunderx_lmc_pci_tbl);
834
835 static struct pci_driver thunderx_lmc_driver = {
836 .name = "thunderx_lmc_edac",
837 .probe = thunderx_lmc_probe,
838 .remove = thunderx_lmc_remove,
839 #ifdef CONFIG_PM
840 .suspend = thunderx_lmc_suspend,
841 .resume = thunderx_lmc_resume,
842 #endif
843 .id_table = thunderx_lmc_pci_tbl,
844 };
845
846 /*---------------------- OCX driver ---------------------------------*/
847
848 #define PCI_DEVICE_ID_THUNDER_OCX 0xa013
849
850 #define OCX_LINK_INTS 3
851 #define OCX_INTS (OCX_LINK_INTS + 1)
852 #define OCX_RX_LANES 24
853 #define OCX_RX_LANE_STATS 15
854
855 #define OCX_COM_INT 0x100
856 #define OCX_COM_INT_W1S 0x108
857 #define OCX_COM_INT_ENA_W1S 0x110
858 #define OCX_COM_INT_ENA_W1C 0x118
859
860 #define OCX_COM_IO_BADID BIT(54)
861 #define OCX_COM_MEM_BADID BIT(53)
862 #define OCX_COM_COPR_BADID BIT(52)
863 #define OCX_COM_WIN_REQ_BADID BIT(51)
864 #define OCX_COM_WIN_REQ_TOUT BIT(50)
865 #define OCX_COM_RX_LANE GENMASK(23, 0)
866
867 #define OCX_COM_INT_CE (OCX_COM_IO_BADID | \
868 OCX_COM_MEM_BADID | \
869 OCX_COM_COPR_BADID | \
870 OCX_COM_WIN_REQ_BADID | \
871 OCX_COM_WIN_REQ_TOUT)
872
873 static const struct error_descr ocx_com_errors[] = {
874 {
875 .type = ERR_CORRECTED,
876 .mask = OCX_COM_IO_BADID,
877 .descr = "Invalid IO transaction node ID",
878 },
879 {
880 .type = ERR_CORRECTED,
881 .mask = OCX_COM_MEM_BADID,
882 .descr = "Invalid memory transaction node ID",
883 },
884 {
885 .type = ERR_CORRECTED,
886 .mask = OCX_COM_COPR_BADID,
887 .descr = "Invalid coprocessor transaction node ID",
888 },
889 {
890 .type = ERR_CORRECTED,
891 .mask = OCX_COM_WIN_REQ_BADID,
892 .descr = "Invalid SLI transaction node ID",
893 },
894 {
895 .type = ERR_CORRECTED,
896 .mask = OCX_COM_WIN_REQ_TOUT,
897 .descr = "Window/core request timeout",
898 },
899 {0, 0, NULL},
900 };
901
902 #define OCX_COM_LINKX_INT(x) (0x120 + (x) * 8)
903 #define OCX_COM_LINKX_INT_W1S(x) (0x140 + (x) * 8)
904 #define OCX_COM_LINKX_INT_ENA_W1S(x) (0x160 + (x) * 8)
905 #define OCX_COM_LINKX_INT_ENA_W1C(x) (0x180 + (x) * 8)
906
907 #define OCX_COM_LINK_BAD_WORD BIT(13)
908 #define OCX_COM_LINK_ALIGN_FAIL BIT(12)
909 #define OCX_COM_LINK_ALIGN_DONE BIT(11)
910 #define OCX_COM_LINK_UP BIT(10)
911 #define OCX_COM_LINK_STOP BIT(9)
912 #define OCX_COM_LINK_BLK_ERR BIT(8)
913 #define OCX_COM_LINK_REINIT BIT(7)
914 #define OCX_COM_LINK_LNK_DATA BIT(6)
915 #define OCX_COM_LINK_RXFIFO_DBE BIT(5)
916 #define OCX_COM_LINK_RXFIFO_SBE BIT(4)
917 #define OCX_COM_LINK_TXFIFO_DBE BIT(3)
918 #define OCX_COM_LINK_TXFIFO_SBE BIT(2)
919 #define OCX_COM_LINK_REPLAY_DBE BIT(1)
920 #define OCX_COM_LINK_REPLAY_SBE BIT(0)
921
922 static const struct error_descr ocx_com_link_errors[] = {
923 {
924 .type = ERR_CORRECTED,
925 .mask = OCX_COM_LINK_REPLAY_SBE,
926 .descr = "Replay buffer single-bit error",
927 },
928 {
929 .type = ERR_CORRECTED,
930 .mask = OCX_COM_LINK_TXFIFO_SBE,
931 .descr = "TX FIFO single-bit error",
932 },
933 {
934 .type = ERR_CORRECTED,
935 .mask = OCX_COM_LINK_RXFIFO_SBE,
936 .descr = "RX FIFO single-bit error",
937 },
938 {
939 .type = ERR_CORRECTED,
940 .mask = OCX_COM_LINK_BLK_ERR,
941 .descr = "Block code error",
942 },
943 {
944 .type = ERR_CORRECTED,
945 .mask = OCX_COM_LINK_ALIGN_FAIL,
946 .descr = "Link alignment failure",
947 },
948 {
949 .type = ERR_CORRECTED,
950 .mask = OCX_COM_LINK_BAD_WORD,
951 .descr = "Bad code word",
952 },
953 {
954 .type = ERR_UNCORRECTED,
955 .mask = OCX_COM_LINK_REPLAY_DBE,
956 .descr = "Replay buffer double-bit error",
957 },
958 {
959 .type = ERR_UNCORRECTED,
960 .mask = OCX_COM_LINK_TXFIFO_DBE,
961 .descr = "TX FIFO double-bit error",
962 },
963 {
964 .type = ERR_UNCORRECTED,
965 .mask = OCX_COM_LINK_RXFIFO_DBE,
966 .descr = "RX FIFO double-bit error",
967 },
968 {
969 .type = ERR_UNCORRECTED,
970 .mask = OCX_COM_LINK_STOP,
971 .descr = "Link stopped",
972 },
973 {0, 0, NULL},
974 };
975
976 #define OCX_COM_LINK_INT_UE (OCX_COM_LINK_REPLAY_DBE | \
977 OCX_COM_LINK_TXFIFO_DBE | \
978 OCX_COM_LINK_RXFIFO_DBE | \
979 OCX_COM_LINK_STOP)
980
981 #define OCX_COM_LINK_INT_CE (OCX_COM_LINK_REPLAY_SBE | \
982 OCX_COM_LINK_TXFIFO_SBE | \
983 OCX_COM_LINK_RXFIFO_SBE | \
984 OCX_COM_LINK_BLK_ERR | \
985 OCX_COM_LINK_ALIGN_FAIL | \
986 OCX_COM_LINK_BAD_WORD)
987
988 #define OCX_LNE_INT(x) (0x8018 + (x) * 0x100)
989 #define OCX_LNE_INT_EN(x) (0x8020 + (x) * 0x100)
990 #define OCX_LNE_BAD_CNT(x) (0x8028 + (x) * 0x100)
991 #define OCX_LNE_CFG(x) (0x8000 + (x) * 0x100)
992 #define OCX_LNE_STAT(x, y) (0x8040 + (x) * 0x100 + (y) * 8)
993
994 #define OCX_LNE_CFG_RX_BDRY_LOCK_DIS BIT(8)
995 #define OCX_LNE_CFG_RX_STAT_WRAP_DIS BIT(2)
996 #define OCX_LNE_CFG_RX_STAT_RDCLR BIT(1)
997 #define OCX_LNE_CFG_RX_STAT_ENA BIT(0)
998
999
1000 #define OCX_LANE_BAD_64B67B BIT(8)
1001 #define OCX_LANE_DSKEW_FIFO_OVFL BIT(5)
1002 #define OCX_LANE_SCRM_SYNC_LOSS BIT(4)
1003 #define OCX_LANE_UKWN_CNTL_WORD BIT(3)
1004 #define OCX_LANE_CRC32_ERR BIT(2)
1005 #define OCX_LANE_BDRY_SYNC_LOSS BIT(1)
1006 #define OCX_LANE_SERDES_LOCK_LOSS BIT(0)
1007
1008 #define OCX_COM_LANE_INT_UE (0)
1009 #define OCX_COM_LANE_INT_CE (OCX_LANE_SERDES_LOCK_LOSS | \
1010 OCX_LANE_BDRY_SYNC_LOSS | \
1011 OCX_LANE_CRC32_ERR | \
1012 OCX_LANE_UKWN_CNTL_WORD | \
1013 OCX_LANE_SCRM_SYNC_LOSS | \
1014 OCX_LANE_DSKEW_FIFO_OVFL | \
1015 OCX_LANE_BAD_64B67B)
1016
1017 static const struct error_descr ocx_lane_errors[] = {
1018 {
1019 .type = ERR_CORRECTED,
1020 .mask = OCX_LANE_SERDES_LOCK_LOSS,
1021 .descr = "RX SerDes lock lost",
1022 },
1023 {
1024 .type = ERR_CORRECTED,
1025 .mask = OCX_LANE_BDRY_SYNC_LOSS,
1026 .descr = "RX word boundary lost",
1027 },
1028 {
1029 .type = ERR_CORRECTED,
1030 .mask = OCX_LANE_CRC32_ERR,
1031 .descr = "CRC32 error",
1032 },
1033 {
1034 .type = ERR_CORRECTED,
1035 .mask = OCX_LANE_UKWN_CNTL_WORD,
1036 .descr = "Unknown control word",
1037 },
1038 {
1039 .type = ERR_CORRECTED,
1040 .mask = OCX_LANE_SCRM_SYNC_LOSS,
1041 .descr = "Scrambler synchronization lost",
1042 },
1043 {
1044 .type = ERR_CORRECTED,
1045 .mask = OCX_LANE_DSKEW_FIFO_OVFL,
1046 .descr = "RX deskew FIFO overflow",
1047 },
1048 {
1049 .type = ERR_CORRECTED,
1050 .mask = OCX_LANE_BAD_64B67B,
1051 .descr = "Bad 64B/67B codeword",
1052 },
1053 {0, 0, NULL},
1054 };
1055
1056 #define OCX_LNE_INT_ENA_ALL (GENMASK(9, 8) | GENMASK(6, 0))
1057 #define OCX_COM_INT_ENA_ALL (GENMASK(54, 50) | GENMASK(23, 0))
1058 #define OCX_COM_LINKX_INT_ENA_ALL (GENMASK(13, 12) | \
1059 GENMASK(9, 7) | GENMASK(5, 0))
1060
1061 #define OCX_TLKX_ECC_CTL(x) (0x10018 + (x) * 0x2000)
1062 #define OCX_RLKX_ECC_CTL(x) (0x18018 + (x) * 0x2000)
1063
1064 struct ocx_com_err_ctx {
1065 u64 reg_com_int;
1066 u64 reg_lane_int[OCX_RX_LANES];
1067 u64 reg_lane_stat11[OCX_RX_LANES];
1068 };
1069
1070 struct ocx_link_err_ctx {
1071 u64 reg_com_link_int;
1072 int link;
1073 };
1074
1075 struct thunderx_ocx {
1076 void __iomem *regs;
1077 int com_link;
1078 struct pci_dev *pdev;
1079 struct edac_device_ctl_info *edac_dev;
1080
1081 struct dentry *debugfs;
1082 struct msix_entry msix_ent[OCX_INTS];
1083
1084 struct ocx_com_err_ctx com_err_ctx[RING_ENTRIES];
1085 struct ocx_link_err_ctx link_err_ctx[RING_ENTRIES];
1086
1087 unsigned long com_ring_head;
1088 unsigned long com_ring_tail;
1089
1090 unsigned long link_ring_head;
1091 unsigned long link_ring_tail;
1092 };
1093
1094 #define OCX_MESSAGE_SIZE SZ_1K
1095 #define OCX_OTHER_SIZE (50 * ARRAY_SIZE(ocx_com_link_errors))
1096
1097 /* This handler is threaded */
1098 static irqreturn_t thunderx_ocx_com_isr(int irq, void *irq_id)
1099 {
1100 struct msix_entry *msix = irq_id;
1101 struct thunderx_ocx *ocx = container_of(msix, struct thunderx_ocx,
1102 msix_ent[msix->entry]);
1103
1104 int lane;
1105 unsigned long head = ring_pos(ocx->com_ring_head,
1106 ARRAY_SIZE(ocx->com_err_ctx));
1107 struct ocx_com_err_ctx *ctx = &ocx->com_err_ctx[head];
1108
1109 ctx->reg_com_int = readq(ocx->regs + OCX_COM_INT);
1110
1111 for (lane = 0; lane < OCX_RX_LANES; lane++) {
1112 ctx->reg_lane_int[lane] =
1113 readq(ocx->regs + OCX_LNE_INT(lane));
1114 ctx->reg_lane_stat11[lane] =
1115 readq(ocx->regs + OCX_LNE_STAT(lane, 11));
1116
1117 writeq(ctx->reg_lane_int[lane], ocx->regs + OCX_LNE_INT(lane));
1118 }
1119
1120 writeq(ctx->reg_com_int, ocx->regs + OCX_COM_INT);
1121
1122 ocx->com_ring_head++;
1123
1124 return IRQ_WAKE_THREAD;
1125 }
1126
1127 static irqreturn_t thunderx_ocx_com_threaded_isr(int irq, void *irq_id)
1128 {
1129 struct msix_entry *msix = irq_id;
1130 struct thunderx_ocx *ocx = container_of(msix, struct thunderx_ocx,
1131 msix_ent[msix->entry]);
1132
1133 irqreturn_t ret = IRQ_NONE;
1134
1135 unsigned long tail;
1136 struct ocx_com_err_ctx *ctx;
1137 int lane;
1138 char *msg;
1139 char *other;
1140
1141 msg = kmalloc(OCX_MESSAGE_SIZE, GFP_KERNEL);
1142 other = kmalloc(OCX_OTHER_SIZE, GFP_KERNEL);
1143
1144 if (!msg || !other)
1145 goto err_free;
1146
1147 while (CIRC_CNT(ocx->com_ring_head, ocx->com_ring_tail,
1148 ARRAY_SIZE(ocx->com_err_ctx))) {
1149 tail = ring_pos(ocx->com_ring_tail,
1150 ARRAY_SIZE(ocx->com_err_ctx));
1151 ctx = &ocx->com_err_ctx[tail];
1152
1153 snprintf(msg, OCX_MESSAGE_SIZE, "%s: OCX_COM_INT: %016llx",
1154 ocx->edac_dev->ctl_name, ctx->reg_com_int);
1155
1156 decode_register(other, OCX_OTHER_SIZE,
1157 ocx_com_errors, ctx->reg_com_int);
1158
1159 strncat(msg, other, OCX_MESSAGE_SIZE);
1160
1161 for (lane = 0; lane < OCX_RX_LANES; lane++)
1162 if (ctx->reg_com_int & BIT(lane)) {
1163 snprintf(other, OCX_OTHER_SIZE,
1164 "\n\tOCX_LNE_INT[%02d]: %016llx OCX_LNE_STAT11[%02d]: %016llx",
1165 lane, ctx->reg_lane_int[lane],
1166 lane, ctx->reg_lane_stat11[lane]);
1167
1168 strncat(msg, other, OCX_MESSAGE_SIZE);
1169
1170 decode_register(other, OCX_OTHER_SIZE,
1171 ocx_lane_errors,
1172 ctx->reg_lane_int[lane]);
1173 strncat(msg, other, OCX_MESSAGE_SIZE);
1174 }
1175
1176 if (ctx->reg_com_int & OCX_COM_INT_CE)
1177 edac_device_handle_ce(ocx->edac_dev, 0, 0, msg);
1178
1179 ocx->com_ring_tail++;
1180 }
1181
1182 ret = IRQ_HANDLED;
1183
1184 err_free:
1185 kfree(other);
1186 kfree(msg);
1187
1188 return ret;
1189 }
1190
1191 static irqreturn_t thunderx_ocx_lnk_isr(int irq, void *irq_id)
1192 {
1193 struct msix_entry *msix = irq_id;
1194 struct thunderx_ocx *ocx = container_of(msix, struct thunderx_ocx,
1195 msix_ent[msix->entry]);
1196 unsigned long head = ring_pos(ocx->link_ring_head,
1197 ARRAY_SIZE(ocx->link_err_ctx));
1198 struct ocx_link_err_ctx *ctx = &ocx->link_err_ctx[head];
1199
1200 ctx->link = msix->entry;
1201 ctx->reg_com_link_int = readq(ocx->regs + OCX_COM_LINKX_INT(ctx->link));
1202
1203 writeq(ctx->reg_com_link_int, ocx->regs + OCX_COM_LINKX_INT(ctx->link));
1204
1205 ocx->link_ring_head++;
1206
1207 return IRQ_WAKE_THREAD;
1208 }
1209
1210 static irqreturn_t thunderx_ocx_lnk_threaded_isr(int irq, void *irq_id)
1211 {
1212 struct msix_entry *msix = irq_id;
1213 struct thunderx_ocx *ocx = container_of(msix, struct thunderx_ocx,
1214 msix_ent[msix->entry]);
1215 irqreturn_t ret = IRQ_NONE;
1216 unsigned long tail;
1217 struct ocx_link_err_ctx *ctx;
1218
1219 char *msg;
1220 char *other;
1221
1222 msg = kmalloc(OCX_MESSAGE_SIZE, GFP_KERNEL);
1223 other = kmalloc(OCX_OTHER_SIZE, GFP_KERNEL);
1224
1225 if (!msg || !other)
1226 goto err_free;
1227
1228 while (CIRC_CNT(ocx->link_ring_head, ocx->link_ring_tail,
1229 ARRAY_SIZE(ocx->link_err_ctx))) {
1230 tail = ring_pos(ocx->link_ring_head,
1231 ARRAY_SIZE(ocx->link_err_ctx));
1232
1233 ctx = &ocx->link_err_ctx[tail];
1234
1235 snprintf(msg, OCX_MESSAGE_SIZE,
1236 "%s: OCX_COM_LINK_INT[%d]: %016llx",
1237 ocx->edac_dev->ctl_name,
1238 ctx->link, ctx->reg_com_link_int);
1239
1240 decode_register(other, OCX_OTHER_SIZE,
1241 ocx_com_link_errors, ctx->reg_com_link_int);
1242
1243 strncat(msg, other, OCX_MESSAGE_SIZE);
1244
1245 if (ctx->reg_com_link_int & OCX_COM_LINK_INT_UE)
1246 edac_device_handle_ue(ocx->edac_dev, 0, 0, msg);
1247 else if (ctx->reg_com_link_int & OCX_COM_LINK_INT_CE)
1248 edac_device_handle_ce(ocx->edac_dev, 0, 0, msg);
1249
1250 ocx->link_ring_tail++;
1251 }
1252
1253 ret = IRQ_HANDLED;
1254 err_free:
1255 kfree(other);
1256 kfree(msg);
1257
1258 return ret;
1259 }
1260
1261 #define OCX_DEBUGFS_ATTR(_name, _reg) DEBUGFS_REG_ATTR(ocx, _name, _reg)
1262
1263 OCX_DEBUGFS_ATTR(tlk0_ecc_ctl, OCX_TLKX_ECC_CTL(0));
1264 OCX_DEBUGFS_ATTR(tlk1_ecc_ctl, OCX_TLKX_ECC_CTL(1));
1265 OCX_DEBUGFS_ATTR(tlk2_ecc_ctl, OCX_TLKX_ECC_CTL(2));
1266
1267 OCX_DEBUGFS_ATTR(rlk0_ecc_ctl, OCX_RLKX_ECC_CTL(0));
1268 OCX_DEBUGFS_ATTR(rlk1_ecc_ctl, OCX_RLKX_ECC_CTL(1));
1269 OCX_DEBUGFS_ATTR(rlk2_ecc_ctl, OCX_RLKX_ECC_CTL(2));
1270
1271 OCX_DEBUGFS_ATTR(com_link0_int, OCX_COM_LINKX_INT_W1S(0));
1272 OCX_DEBUGFS_ATTR(com_link1_int, OCX_COM_LINKX_INT_W1S(1));
1273 OCX_DEBUGFS_ATTR(com_link2_int, OCX_COM_LINKX_INT_W1S(2));
1274
1275 OCX_DEBUGFS_ATTR(lne00_badcnt, OCX_LNE_BAD_CNT(0));
1276 OCX_DEBUGFS_ATTR(lne01_badcnt, OCX_LNE_BAD_CNT(1));
1277 OCX_DEBUGFS_ATTR(lne02_badcnt, OCX_LNE_BAD_CNT(2));
1278 OCX_DEBUGFS_ATTR(lne03_badcnt, OCX_LNE_BAD_CNT(3));
1279 OCX_DEBUGFS_ATTR(lne04_badcnt, OCX_LNE_BAD_CNT(4));
1280 OCX_DEBUGFS_ATTR(lne05_badcnt, OCX_LNE_BAD_CNT(5));
1281 OCX_DEBUGFS_ATTR(lne06_badcnt, OCX_LNE_BAD_CNT(6));
1282 OCX_DEBUGFS_ATTR(lne07_badcnt, OCX_LNE_BAD_CNT(7));
1283
1284 OCX_DEBUGFS_ATTR(lne08_badcnt, OCX_LNE_BAD_CNT(8));
1285 OCX_DEBUGFS_ATTR(lne09_badcnt, OCX_LNE_BAD_CNT(9));
1286 OCX_DEBUGFS_ATTR(lne10_badcnt, OCX_LNE_BAD_CNT(10));
1287 OCX_DEBUGFS_ATTR(lne11_badcnt, OCX_LNE_BAD_CNT(11));
1288 OCX_DEBUGFS_ATTR(lne12_badcnt, OCX_LNE_BAD_CNT(12));
1289 OCX_DEBUGFS_ATTR(lne13_badcnt, OCX_LNE_BAD_CNT(13));
1290 OCX_DEBUGFS_ATTR(lne14_badcnt, OCX_LNE_BAD_CNT(14));
1291 OCX_DEBUGFS_ATTR(lne15_badcnt, OCX_LNE_BAD_CNT(15));
1292
1293 OCX_DEBUGFS_ATTR(lne16_badcnt, OCX_LNE_BAD_CNT(16));
1294 OCX_DEBUGFS_ATTR(lne17_badcnt, OCX_LNE_BAD_CNT(17));
1295 OCX_DEBUGFS_ATTR(lne18_badcnt, OCX_LNE_BAD_CNT(18));
1296 OCX_DEBUGFS_ATTR(lne19_badcnt, OCX_LNE_BAD_CNT(19));
1297 OCX_DEBUGFS_ATTR(lne20_badcnt, OCX_LNE_BAD_CNT(20));
1298 OCX_DEBUGFS_ATTR(lne21_badcnt, OCX_LNE_BAD_CNT(21));
1299 OCX_DEBUGFS_ATTR(lne22_badcnt, OCX_LNE_BAD_CNT(22));
1300 OCX_DEBUGFS_ATTR(lne23_badcnt, OCX_LNE_BAD_CNT(23));
1301
1302 OCX_DEBUGFS_ATTR(com_int, OCX_COM_INT_W1S);
1303
1304 struct debugfs_entry *ocx_dfs_ents[] = {
1305 &debugfs_tlk0_ecc_ctl,
1306 &debugfs_tlk1_ecc_ctl,
1307 &debugfs_tlk2_ecc_ctl,
1308
1309 &debugfs_rlk0_ecc_ctl,
1310 &debugfs_rlk1_ecc_ctl,
1311 &debugfs_rlk2_ecc_ctl,
1312
1313 &debugfs_com_link0_int,
1314 &debugfs_com_link1_int,
1315 &debugfs_com_link2_int,
1316
1317 &debugfs_lne00_badcnt,
1318 &debugfs_lne01_badcnt,
1319 &debugfs_lne02_badcnt,
1320 &debugfs_lne03_badcnt,
1321 &debugfs_lne04_badcnt,
1322 &debugfs_lne05_badcnt,
1323 &debugfs_lne06_badcnt,
1324 &debugfs_lne07_badcnt,
1325 &debugfs_lne08_badcnt,
1326 &debugfs_lne09_badcnt,
1327 &debugfs_lne10_badcnt,
1328 &debugfs_lne11_badcnt,
1329 &debugfs_lne12_badcnt,
1330 &debugfs_lne13_badcnt,
1331 &debugfs_lne14_badcnt,
1332 &debugfs_lne15_badcnt,
1333 &debugfs_lne16_badcnt,
1334 &debugfs_lne17_badcnt,
1335 &debugfs_lne18_badcnt,
1336 &debugfs_lne19_badcnt,
1337 &debugfs_lne20_badcnt,
1338 &debugfs_lne21_badcnt,
1339 &debugfs_lne22_badcnt,
1340 &debugfs_lne23_badcnt,
1341
1342 &debugfs_com_int,
1343 };
1344
1345 static const struct pci_device_id thunderx_ocx_pci_tbl[] = {
1346 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_OCX) },
1347 { 0, },
1348 };
1349
1350 static void thunderx_ocx_clearstats(struct thunderx_ocx *ocx)
1351 {
1352 int lane, stat, cfg;
1353
1354 for (lane = 0; lane < OCX_RX_LANES; lane++) {
1355 cfg = readq(ocx->regs + OCX_LNE_CFG(lane));
1356 cfg |= OCX_LNE_CFG_RX_STAT_RDCLR;
1357 cfg &= ~OCX_LNE_CFG_RX_STAT_ENA;
1358 writeq(cfg, ocx->regs + OCX_LNE_CFG(lane));
1359
1360 for (stat = 0; stat < OCX_RX_LANE_STATS; stat++)
1361 readq(ocx->regs + OCX_LNE_STAT(lane, stat));
1362 }
1363 }
1364
1365 static int thunderx_ocx_probe(struct pci_dev *pdev,
1366 const struct pci_device_id *id)
1367 {
1368 struct thunderx_ocx *ocx;
1369 struct edac_device_ctl_info *edac_dev;
1370 char name[32];
1371 int idx;
1372 int i;
1373 int ret;
1374 u64 reg;
1375
1376 ret = pcim_enable_device(pdev);
1377 if (ret) {
1378 dev_err(&pdev->dev, "Cannot enable PCI device: %d\n", ret);
1379 return ret;
1380 }
1381
1382 ret = pcim_iomap_regions(pdev, BIT(0), "thunderx_ocx");
1383 if (ret) {
1384 dev_err(&pdev->dev, "Cannot map PCI resources: %d\n", ret);
1385 return ret;
1386 }
1387
1388 idx = edac_device_alloc_index();
1389 snprintf(name, sizeof(name), "OCX%d", idx);
1390 edac_dev = edac_device_alloc_ctl_info(sizeof(struct thunderx_ocx),
1391 name, 1, "CCPI", 1,
1392 0, NULL, 0, idx);
1393 if (!edac_dev) {
1394 dev_err(&pdev->dev, "Cannot allocate EDAC device: %d\n", ret);
1395 return -ENOMEM;
1396 }
1397 ocx = edac_dev->pvt_info;
1398 ocx->edac_dev = edac_dev;
1399 ocx->com_ring_head = 0;
1400 ocx->com_ring_tail = 0;
1401 ocx->link_ring_head = 0;
1402 ocx->link_ring_tail = 0;
1403
1404 ocx->regs = pcim_iomap_table(pdev)[0];
1405 if (!ocx->regs) {
1406 dev_err(&pdev->dev, "Cannot map PCI resources: %d\n", ret);
1407 ret = -ENODEV;
1408 goto err_free;
1409 }
1410
1411 ocx->pdev = pdev;
1412
1413 for (i = 0; i < OCX_INTS; i++) {
1414 ocx->msix_ent[i].entry = i;
1415 ocx->msix_ent[i].vector = 0;
1416 }
1417
1418 ret = pci_enable_msix_exact(pdev, ocx->msix_ent, OCX_INTS);
1419 if (ret) {
1420 dev_err(&pdev->dev, "Cannot enable interrupt: %d\n", ret);
1421 goto err_free;
1422 }
1423
1424 for (i = 0; i < OCX_INTS; i++) {
1425 ret = devm_request_threaded_irq(&pdev->dev,
1426 ocx->msix_ent[i].vector,
1427 (i == 3) ?
1428 thunderx_ocx_com_isr :
1429 thunderx_ocx_lnk_isr,
1430 (i == 3) ?
1431 thunderx_ocx_com_threaded_isr :
1432 thunderx_ocx_lnk_threaded_isr,
1433 0, "[EDAC] ThunderX OCX",
1434 &ocx->msix_ent[i]);
1435 if (ret)
1436 goto err_free;
1437 }
1438
1439 edac_dev->dev = &pdev->dev;
1440 edac_dev->dev_name = dev_name(&pdev->dev);
1441 edac_dev->mod_name = "thunderx-ocx";
1442 edac_dev->ctl_name = "thunderx-ocx";
1443
1444 ret = edac_device_add_device(edac_dev);
1445 if (ret) {
1446 dev_err(&pdev->dev, "Cannot add EDAC device: %d\n", ret);
1447 goto err_free;
1448 }
1449
1450 if (IS_ENABLED(CONFIG_EDAC_DEBUG)) {
1451 ocx->debugfs = edac_debugfs_create_dir(pdev->dev.kobj.name);
1452
1453 ret = thunderx_create_debugfs_nodes(ocx->debugfs,
1454 ocx_dfs_ents,
1455 ocx,
1456 ARRAY_SIZE(ocx_dfs_ents));
1457 if (ret != ARRAY_SIZE(ocx_dfs_ents)) {
1458 dev_warn(&pdev->dev, "Error creating debugfs entries: %d%s\n",
1459 ret, ret >= 0 ? " created" : "");
1460 }
1461 }
1462
1463 pci_set_drvdata(pdev, edac_dev);
1464
1465 thunderx_ocx_clearstats(ocx);
1466
1467 for (i = 0; i < OCX_RX_LANES; i++) {
1468 writeq(OCX_LNE_INT_ENA_ALL,
1469 ocx->regs + OCX_LNE_INT_EN(i));
1470
1471 reg = readq(ocx->regs + OCX_LNE_INT(i));
1472 writeq(reg, ocx->regs + OCX_LNE_INT(i));
1473
1474 }
1475
1476 for (i = 0; i < OCX_LINK_INTS; i++) {
1477 reg = readq(ocx->regs + OCX_COM_LINKX_INT(i));
1478 writeq(reg, ocx->regs + OCX_COM_LINKX_INT(i));
1479
1480 writeq(OCX_COM_LINKX_INT_ENA_ALL,
1481 ocx->regs + OCX_COM_LINKX_INT_ENA_W1S(i));
1482 }
1483
1484 reg = readq(ocx->regs + OCX_COM_INT);
1485 writeq(reg, ocx->regs + OCX_COM_INT);
1486
1487 writeq(OCX_COM_INT_ENA_ALL, ocx->regs + OCX_COM_INT_ENA_W1S);
1488
1489 return 0;
1490 err_free:
1491 edac_device_free_ctl_info(edac_dev);
1492
1493 return ret;
1494 }
1495
1496 static void thunderx_ocx_remove(struct pci_dev *pdev)
1497 {
1498 struct edac_device_ctl_info *edac_dev = pci_get_drvdata(pdev);
1499 struct thunderx_ocx *ocx = edac_dev->pvt_info;
1500 int i;
1501
1502 writeq(OCX_COM_INT_ENA_ALL, ocx->regs + OCX_COM_INT_ENA_W1C);
1503
1504 for (i = 0; i < OCX_INTS; i++) {
1505 writeq(OCX_COM_LINKX_INT_ENA_ALL,
1506 ocx->regs + OCX_COM_LINKX_INT_ENA_W1C(i));
1507 }
1508
1509 edac_debugfs_remove_recursive(ocx->debugfs);
1510
1511 edac_device_del_device(&pdev->dev);
1512 edac_device_free_ctl_info(edac_dev);
1513 }
1514
1515 MODULE_DEVICE_TABLE(pci, thunderx_ocx_pci_tbl);
1516
1517 static struct pci_driver thunderx_ocx_driver = {
1518 .name = "thunderx_ocx_edac",
1519 .probe = thunderx_ocx_probe,
1520 .remove = thunderx_ocx_remove,
1521 .id_table = thunderx_ocx_pci_tbl,
1522 };
1523
1524 /*---------------------- L2C driver ---------------------------------*/
1525
1526 #define PCI_DEVICE_ID_THUNDER_L2C_TAD 0xa02e
1527 #define PCI_DEVICE_ID_THUNDER_L2C_CBC 0xa02f
1528 #define PCI_DEVICE_ID_THUNDER_L2C_MCI 0xa030
1529
1530 #define L2C_TAD_INT_W1C 0x40000
1531 #define L2C_TAD_INT_W1S 0x40008
1532
1533 #define L2C_TAD_INT_ENA_W1C 0x40020
1534 #define L2C_TAD_INT_ENA_W1S 0x40028
1535
1536
1537 #define L2C_TAD_INT_L2DDBE BIT(1)
1538 #define L2C_TAD_INT_SBFSBE BIT(2)
1539 #define L2C_TAD_INT_SBFDBE BIT(3)
1540 #define L2C_TAD_INT_FBFSBE BIT(4)
1541 #define L2C_TAD_INT_FBFDBE BIT(5)
1542 #define L2C_TAD_INT_TAGDBE BIT(9)
1543 #define L2C_TAD_INT_RDDISLMC BIT(15)
1544 #define L2C_TAD_INT_WRDISLMC BIT(16)
1545 #define L2C_TAD_INT_LFBTO BIT(17)
1546 #define L2C_TAD_INT_GSYNCTO BIT(18)
1547 #define L2C_TAD_INT_RTGSBE BIT(32)
1548 #define L2C_TAD_INT_RTGDBE BIT(33)
1549 #define L2C_TAD_INT_RDDISOCI BIT(34)
1550 #define L2C_TAD_INT_WRDISOCI BIT(35)
1551
1552 #define L2C_TAD_INT_ECC (L2C_TAD_INT_L2DDBE | \
1553 L2C_TAD_INT_SBFSBE | L2C_TAD_INT_SBFDBE | \
1554 L2C_TAD_INT_FBFSBE | L2C_TAD_INT_FBFDBE)
1555
1556 #define L2C_TAD_INT_CE (L2C_TAD_INT_SBFSBE | \
1557 L2C_TAD_INT_FBFSBE)
1558
1559 #define L2C_TAD_INT_UE (L2C_TAD_INT_L2DDBE | \
1560 L2C_TAD_INT_SBFDBE | \
1561 L2C_TAD_INT_FBFDBE | \
1562 L2C_TAD_INT_TAGDBE | \
1563 L2C_TAD_INT_RTGDBE | \
1564 L2C_TAD_INT_WRDISOCI | \
1565 L2C_TAD_INT_RDDISOCI | \
1566 L2C_TAD_INT_WRDISLMC | \
1567 L2C_TAD_INT_RDDISLMC | \
1568 L2C_TAD_INT_LFBTO | \
1569 L2C_TAD_INT_GSYNCTO)
1570
1571 static const struct error_descr l2_tad_errors[] = {
1572 {
1573 .type = ERR_CORRECTED,
1574 .mask = L2C_TAD_INT_SBFSBE,
1575 .descr = "SBF single-bit error",
1576 },
1577 {
1578 .type = ERR_CORRECTED,
1579 .mask = L2C_TAD_INT_FBFSBE,
1580 .descr = "FBF single-bit error",
1581 },
1582 {
1583 .type = ERR_UNCORRECTED,
1584 .mask = L2C_TAD_INT_L2DDBE,
1585 .descr = "L2D double-bit error",
1586 },
1587 {
1588 .type = ERR_UNCORRECTED,
1589 .mask = L2C_TAD_INT_SBFDBE,
1590 .descr = "SBF double-bit error",
1591 },
1592 {
1593 .type = ERR_UNCORRECTED,
1594 .mask = L2C_TAD_INT_FBFDBE,
1595 .descr = "FBF double-bit error",
1596 },
1597 {
1598 .type = ERR_UNCORRECTED,
1599 .mask = L2C_TAD_INT_TAGDBE,
1600 .descr = "TAG double-bit error",
1601 },
1602 {
1603 .type = ERR_UNCORRECTED,
1604 .mask = L2C_TAD_INT_RTGDBE,
1605 .descr = "RTG double-bit error",
1606 },
1607 {
1608 .type = ERR_UNCORRECTED,
1609 .mask = L2C_TAD_INT_WRDISOCI,
1610 .descr = "Write to a disabled CCPI",
1611 },
1612 {
1613 .type = ERR_UNCORRECTED,
1614 .mask = L2C_TAD_INT_RDDISOCI,
1615 .descr = "Read from a disabled CCPI",
1616 },
1617 {
1618 .type = ERR_UNCORRECTED,
1619 .mask = L2C_TAD_INT_WRDISLMC,
1620 .descr = "Write to a disabled LMC",
1621 },
1622 {
1623 .type = ERR_UNCORRECTED,
1624 .mask = L2C_TAD_INT_RDDISLMC,
1625 .descr = "Read from a disabled LMC",
1626 },
1627 {
1628 .type = ERR_UNCORRECTED,
1629 .mask = L2C_TAD_INT_LFBTO,
1630 .descr = "LFB entry timeout",
1631 },
1632 {
1633 .type = ERR_UNCORRECTED,
1634 .mask = L2C_TAD_INT_GSYNCTO,
1635 .descr = "Global sync CCPI timeout",
1636 },
1637 {0, 0, NULL},
1638 };
1639
1640 #define L2C_TAD_INT_TAG (L2C_TAD_INT_TAGDBE)
1641
1642 #define L2C_TAD_INT_RTG (L2C_TAD_INT_RTGDBE)
1643
1644 #define L2C_TAD_INT_DISLMC (L2C_TAD_INT_WRDISLMC | L2C_TAD_INT_RDDISLMC)
1645
1646 #define L2C_TAD_INT_DISOCI (L2C_TAD_INT_WRDISOCI | L2C_TAD_INT_RDDISOCI)
1647
1648 #define L2C_TAD_INT_ENA_ALL (L2C_TAD_INT_ECC | L2C_TAD_INT_TAG | \
1649 L2C_TAD_INT_RTG | \
1650 L2C_TAD_INT_DISLMC | L2C_TAD_INT_DISOCI | \
1651 L2C_TAD_INT_LFBTO)
1652
1653 #define L2C_TAD_TIMETWO 0x50000
1654 #define L2C_TAD_TIMEOUT 0x50100
1655 #define L2C_TAD_ERR 0x60000
1656 #define L2C_TAD_TQD_ERR 0x60100
1657 #define L2C_TAD_TTG_ERR 0x60200
1658
1659
1660 #define L2C_CBC_INT_W1C 0x60000
1661
1662 #define L2C_CBC_INT_RSDSBE BIT(0)
1663 #define L2C_CBC_INT_RSDDBE BIT(1)
1664
1665 #define L2C_CBC_INT_RSD (L2C_CBC_INT_RSDSBE | L2C_CBC_INT_RSDDBE)
1666
1667 #define L2C_CBC_INT_MIBSBE BIT(4)
1668 #define L2C_CBC_INT_MIBDBE BIT(5)
1669
1670 #define L2C_CBC_INT_MIB (L2C_CBC_INT_MIBSBE | L2C_CBC_INT_MIBDBE)
1671
1672 #define L2C_CBC_INT_IORDDISOCI BIT(6)
1673 #define L2C_CBC_INT_IOWRDISOCI BIT(7)
1674
1675 #define L2C_CBC_INT_IODISOCI (L2C_CBC_INT_IORDDISOCI | \
1676 L2C_CBC_INT_IOWRDISOCI)
1677
1678 #define L2C_CBC_INT_CE (L2C_CBC_INT_RSDSBE | L2C_CBC_INT_MIBSBE)
1679 #define L2C_CBC_INT_UE (L2C_CBC_INT_RSDDBE | L2C_CBC_INT_MIBDBE)
1680
1681
1682 static const struct error_descr l2_cbc_errors[] = {
1683 {
1684 .type = ERR_CORRECTED,
1685 .mask = L2C_CBC_INT_RSDSBE,
1686 .descr = "RSD single-bit error",
1687 },
1688 {
1689 .type = ERR_CORRECTED,
1690 .mask = L2C_CBC_INT_MIBSBE,
1691 .descr = "MIB single-bit error",
1692 },
1693 {
1694 .type = ERR_UNCORRECTED,
1695 .mask = L2C_CBC_INT_RSDDBE,
1696 .descr = "RSD double-bit error",
1697 },
1698 {
1699 .type = ERR_UNCORRECTED,
1700 .mask = L2C_CBC_INT_MIBDBE,
1701 .descr = "MIB double-bit error",
1702 },
1703 {
1704 .type = ERR_UNCORRECTED,
1705 .mask = L2C_CBC_INT_IORDDISOCI,
1706 .descr = "Read from a disabled CCPI",
1707 },
1708 {
1709 .type = ERR_UNCORRECTED,
1710 .mask = L2C_CBC_INT_IOWRDISOCI,
1711 .descr = "Write to a disabled CCPI",
1712 },
1713 {0, 0, NULL},
1714 };
1715
1716 #define L2C_CBC_INT_W1S 0x60008
1717 #define L2C_CBC_INT_ENA_W1C 0x60020
1718
1719 #define L2C_CBC_INT_ENA_ALL (L2C_CBC_INT_RSD | L2C_CBC_INT_MIB | \
1720 L2C_CBC_INT_IODISOCI)
1721
1722 #define L2C_CBC_INT_ENA_W1S 0x60028
1723
1724 #define L2C_CBC_IODISOCIERR 0x80008
1725 #define L2C_CBC_IOCERR 0x80010
1726 #define L2C_CBC_RSDERR 0x80018
1727 #define L2C_CBC_MIBERR 0x80020
1728
1729
1730 #define L2C_MCI_INT_W1C 0x0
1731
1732 #define L2C_MCI_INT_VBFSBE BIT(0)
1733 #define L2C_MCI_INT_VBFDBE BIT(1)
1734
1735 static const struct error_descr l2_mci_errors[] = {
1736 {
1737 .type = ERR_CORRECTED,
1738 .mask = L2C_MCI_INT_VBFSBE,
1739 .descr = "VBF single-bit error",
1740 },
1741 {
1742 .type = ERR_UNCORRECTED,
1743 .mask = L2C_MCI_INT_VBFDBE,
1744 .descr = "VBF double-bit error",
1745 },
1746 {0, 0, NULL},
1747 };
1748
1749 #define L2C_MCI_INT_W1S 0x8
1750 #define L2C_MCI_INT_ENA_W1C 0x20
1751
1752 #define L2C_MCI_INT_ENA_ALL (L2C_MCI_INT_VBFSBE | L2C_MCI_INT_VBFDBE)
1753
1754 #define L2C_MCI_INT_ENA_W1S 0x28
1755
1756 #define L2C_MCI_ERR 0x10000
1757
1758 #define L2C_MESSAGE_SIZE SZ_1K
1759 #define L2C_OTHER_SIZE (50 * ARRAY_SIZE(l2_tad_errors))
1760
1761 struct l2c_err_ctx {
1762 char *reg_ext_name;
1763 u64 reg_int;
1764 u64 reg_ext;
1765 };
1766
1767 struct thunderx_l2c {
1768 void __iomem *regs;
1769 struct pci_dev *pdev;
1770 struct edac_device_ctl_info *edac_dev;
1771
1772 struct dentry *debugfs;
1773
1774 int index;
1775
1776 struct msix_entry msix_ent;
1777
1778 struct l2c_err_ctx err_ctx[RING_ENTRIES];
1779 unsigned long ring_head;
1780 unsigned long ring_tail;
1781 };
1782
1783 static irqreturn_t thunderx_l2c_tad_isr(int irq, void *irq_id)
1784 {
1785 struct msix_entry *msix = irq_id;
1786 struct thunderx_l2c *tad = container_of(msix, struct thunderx_l2c,
1787 msix_ent);
1788
1789 unsigned long head = ring_pos(tad->ring_head, ARRAY_SIZE(tad->err_ctx));
1790 struct l2c_err_ctx *ctx = &tad->err_ctx[head];
1791
1792 ctx->reg_int = readq(tad->regs + L2C_TAD_INT_W1C);
1793
1794 if (ctx->reg_int & L2C_TAD_INT_ECC) {
1795 ctx->reg_ext_name = "TQD_ERR";
1796 ctx->reg_ext = readq(tad->regs + L2C_TAD_TQD_ERR);
1797 } else if (ctx->reg_int & L2C_TAD_INT_TAG) {
1798 ctx->reg_ext_name = "TTG_ERR";
1799 ctx->reg_ext = readq(tad->regs + L2C_TAD_TTG_ERR);
1800 } else if (ctx->reg_int & L2C_TAD_INT_LFBTO) {
1801 ctx->reg_ext_name = "TIMEOUT";
1802 ctx->reg_ext = readq(tad->regs + L2C_TAD_TIMEOUT);
1803 } else if (ctx->reg_int & L2C_TAD_INT_DISOCI) {
1804 ctx->reg_ext_name = "ERR";
1805 ctx->reg_ext = readq(tad->regs + L2C_TAD_ERR);
1806 }
1807
1808 writeq(ctx->reg_int, tad->regs + L2C_TAD_INT_W1C);
1809
1810 tad->ring_head++;
1811
1812 return IRQ_WAKE_THREAD;
1813 }
1814
1815 static irqreturn_t thunderx_l2c_cbc_isr(int irq, void *irq_id)
1816 {
1817 struct msix_entry *msix = irq_id;
1818 struct thunderx_l2c *cbc = container_of(msix, struct thunderx_l2c,
1819 msix_ent);
1820
1821 unsigned long head = ring_pos(cbc->ring_head, ARRAY_SIZE(cbc->err_ctx));
1822 struct l2c_err_ctx *ctx = &cbc->err_ctx[head];
1823
1824 ctx->reg_int = readq(cbc->regs + L2C_CBC_INT_W1C);
1825
1826 if (ctx->reg_int & L2C_CBC_INT_RSD) {
1827 ctx->reg_ext_name = "RSDERR";
1828 ctx->reg_ext = readq(cbc->regs + L2C_CBC_RSDERR);
1829 } else if (ctx->reg_int & L2C_CBC_INT_MIB) {
1830 ctx->reg_ext_name = "MIBERR";
1831 ctx->reg_ext = readq(cbc->regs + L2C_CBC_MIBERR);
1832 } else if (ctx->reg_int & L2C_CBC_INT_IODISOCI) {
1833 ctx->reg_ext_name = "IODISOCIERR";
1834 ctx->reg_ext = readq(cbc->regs + L2C_CBC_IODISOCIERR);
1835 }
1836
1837 writeq(ctx->reg_int, cbc->regs + L2C_CBC_INT_W1C);
1838
1839 cbc->ring_head++;
1840
1841 return IRQ_WAKE_THREAD;
1842 }
1843
1844 static irqreturn_t thunderx_l2c_mci_isr(int irq, void *irq_id)
1845 {
1846 struct msix_entry *msix = irq_id;
1847 struct thunderx_l2c *mci = container_of(msix, struct thunderx_l2c,
1848 msix_ent);
1849
1850 unsigned long head = ring_pos(mci->ring_head, ARRAY_SIZE(mci->err_ctx));
1851 struct l2c_err_ctx *ctx = &mci->err_ctx[head];
1852
1853 ctx->reg_int = readq(mci->regs + L2C_MCI_INT_W1C);
1854 ctx->reg_ext = readq(mci->regs + L2C_MCI_ERR);
1855
1856 writeq(ctx->reg_int, mci->regs + L2C_MCI_INT_W1C);
1857
1858 ctx->reg_ext_name = "ERR";
1859
1860 mci->ring_head++;
1861
1862 return IRQ_WAKE_THREAD;
1863 }
1864
1865 static irqreturn_t thunderx_l2c_threaded_isr(int irq, void *irq_id)
1866 {
1867 struct msix_entry *msix = irq_id;
1868 struct thunderx_l2c *l2c = container_of(msix, struct thunderx_l2c,
1869 msix_ent);
1870
1871 unsigned long tail = ring_pos(l2c->ring_tail, ARRAY_SIZE(l2c->err_ctx));
1872 struct l2c_err_ctx *ctx = &l2c->err_ctx[tail];
1873 irqreturn_t ret = IRQ_NONE;
1874
1875 u64 mask_ue, mask_ce;
1876 const struct error_descr *l2_errors;
1877 char *reg_int_name;
1878
1879 char *msg;
1880 char *other;
1881
1882 msg = kmalloc(OCX_MESSAGE_SIZE, GFP_KERNEL);
1883 other = kmalloc(OCX_OTHER_SIZE, GFP_KERNEL);
1884
1885 if (!msg || !other)
1886 goto err_free;
1887
1888 switch (l2c->pdev->device) {
1889 case PCI_DEVICE_ID_THUNDER_L2C_TAD:
1890 reg_int_name = "L2C_TAD_INT";
1891 mask_ue = L2C_TAD_INT_UE;
1892 mask_ce = L2C_TAD_INT_CE;
1893 l2_errors = l2_tad_errors;
1894 break;
1895 case PCI_DEVICE_ID_THUNDER_L2C_CBC:
1896 reg_int_name = "L2C_CBC_INT";
1897 mask_ue = L2C_CBC_INT_UE;
1898 mask_ce = L2C_CBC_INT_CE;
1899 l2_errors = l2_cbc_errors;
1900 break;
1901 case PCI_DEVICE_ID_THUNDER_L2C_MCI:
1902 reg_int_name = "L2C_MCI_INT";
1903 mask_ue = L2C_MCI_INT_VBFDBE;
1904 mask_ce = L2C_MCI_INT_VBFSBE;
1905 l2_errors = l2_mci_errors;
1906 break;
1907 default:
1908 dev_err(&l2c->pdev->dev, "Unsupported device: %04x\n",
1909 l2c->pdev->device);
1910 return IRQ_NONE;
1911 }
1912
1913 while (CIRC_CNT(l2c->ring_head, l2c->ring_tail,
1914 ARRAY_SIZE(l2c->err_ctx))) {
1915 snprintf(msg, L2C_MESSAGE_SIZE,
1916 "%s: %s: %016llx, %s: %016llx",
1917 l2c->edac_dev->ctl_name, reg_int_name, ctx->reg_int,
1918 ctx->reg_ext_name, ctx->reg_ext);
1919
1920 decode_register(other, L2C_OTHER_SIZE, l2_errors, ctx->reg_int);
1921
1922 strncat(msg, other, L2C_MESSAGE_SIZE);
1923
1924 if (ctx->reg_int & mask_ue)
1925 edac_device_handle_ue(l2c->edac_dev, 0, 0, msg);
1926 else if (ctx->reg_int & mask_ce)
1927 edac_device_handle_ce(l2c->edac_dev, 0, 0, msg);
1928
1929 l2c->ring_tail++;
1930 }
1931
1932 return IRQ_HANDLED;
1933
1934 err_free:
1935 kfree(other);
1936 kfree(msg);
1937
1938 return ret;
1939 }
1940
1941 #define L2C_DEBUGFS_ATTR(_name, _reg) DEBUGFS_REG_ATTR(l2c, _name, _reg)
1942
1943 L2C_DEBUGFS_ATTR(tad_int, L2C_TAD_INT_W1S);
1944
1945 struct debugfs_entry *l2c_tad_dfs_ents[] = {
1946 &debugfs_tad_int,
1947 };
1948
1949 L2C_DEBUGFS_ATTR(cbc_int, L2C_CBC_INT_W1S);
1950
1951 struct debugfs_entry *l2c_cbc_dfs_ents[] = {
1952 &debugfs_cbc_int,
1953 };
1954
1955 L2C_DEBUGFS_ATTR(mci_int, L2C_MCI_INT_W1S);
1956
1957 struct debugfs_entry *l2c_mci_dfs_ents[] = {
1958 &debugfs_mci_int,
1959 };
1960
1961 static const struct pci_device_id thunderx_l2c_pci_tbl[] = {
1962 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_L2C_TAD), },
1963 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_L2C_CBC), },
1964 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_L2C_MCI), },
1965 { 0, },
1966 };
1967
1968 static int thunderx_l2c_probe(struct pci_dev *pdev,
1969 const struct pci_device_id *id)
1970 {
1971 struct thunderx_l2c *l2c;
1972 struct edac_device_ctl_info *edac_dev;
1973 struct debugfs_entry **l2c_devattr;
1974 size_t dfs_entries;
1975 irqreturn_t (*thunderx_l2c_isr)(int, void *) = NULL;
1976 char name[32];
1977 const char *fmt;
1978 u64 reg_en_offs, reg_en_mask;
1979 int idx;
1980 int ret;
1981
1982 ret = pcim_enable_device(pdev);
1983 if (ret) {
1984 dev_err(&pdev->dev, "Cannot enable PCI device: %d\n", ret);
1985 return ret;
1986 }
1987
1988 ret = pcim_iomap_regions(pdev, BIT(0), "thunderx_l2c");
1989 if (ret) {
1990 dev_err(&pdev->dev, "Cannot map PCI resources: %d\n", ret);
1991 return ret;
1992 }
1993
1994 switch (pdev->device) {
1995 case PCI_DEVICE_ID_THUNDER_L2C_TAD:
1996 thunderx_l2c_isr = thunderx_l2c_tad_isr;
1997 l2c_devattr = l2c_tad_dfs_ents;
1998 dfs_entries = ARRAY_SIZE(l2c_tad_dfs_ents);
1999 fmt = "L2C-TAD%d";
2000 reg_en_offs = L2C_TAD_INT_ENA_W1S;
2001 reg_en_mask = L2C_TAD_INT_ENA_ALL;
2002 break;
2003 case PCI_DEVICE_ID_THUNDER_L2C_CBC:
2004 thunderx_l2c_isr = thunderx_l2c_cbc_isr;
2005 l2c_devattr = l2c_cbc_dfs_ents;
2006 dfs_entries = ARRAY_SIZE(l2c_cbc_dfs_ents);
2007 fmt = "L2C-CBC%d";
2008 reg_en_offs = L2C_CBC_INT_ENA_W1S;
2009 reg_en_mask = L2C_CBC_INT_ENA_ALL;
2010 break;
2011 case PCI_DEVICE_ID_THUNDER_L2C_MCI:
2012 thunderx_l2c_isr = thunderx_l2c_mci_isr;
2013 l2c_devattr = l2c_mci_dfs_ents;
2014 dfs_entries = ARRAY_SIZE(l2c_mci_dfs_ents);
2015 fmt = "L2C-MCI%d";
2016 reg_en_offs = L2C_MCI_INT_ENA_W1S;
2017 reg_en_mask = L2C_MCI_INT_ENA_ALL;
2018 break;
2019 default:
2020 //Should never ever get here
2021 dev_err(&pdev->dev, "Unsupported PCI device: %04x\n",
2022 pdev->device);
2023 return -EINVAL;
2024 }
2025
2026 idx = edac_device_alloc_index();
2027 snprintf(name, sizeof(name), fmt, idx);
2028
2029 edac_dev = edac_device_alloc_ctl_info(sizeof(struct thunderx_l2c),
2030 name, 1, "L2C", 1, 0,
2031 NULL, 0, idx);
2032 if (!edac_dev) {
2033 dev_err(&pdev->dev, "Cannot allocate EDAC device\n");
2034 return -ENOMEM;
2035 }
2036
2037 l2c = edac_dev->pvt_info;
2038 l2c->edac_dev = edac_dev;
2039
2040 l2c->regs = pcim_iomap_table(pdev)[0];
2041 if (!l2c->regs) {
2042 dev_err(&pdev->dev, "Cannot map PCI resources\n");
2043 ret = -ENODEV;
2044 goto err_free;
2045 }
2046
2047 l2c->pdev = pdev;
2048
2049 l2c->ring_head = 0;
2050 l2c->ring_tail = 0;
2051
2052 l2c->msix_ent.entry = 0;
2053 l2c->msix_ent.vector = 0;
2054
2055 ret = pci_enable_msix_exact(pdev, &l2c->msix_ent, 1);
2056 if (ret) {
2057 dev_err(&pdev->dev, "Cannot enable interrupt: %d\n", ret);
2058 goto err_free;
2059 }
2060
2061 ret = devm_request_threaded_irq(&pdev->dev, l2c->msix_ent.vector,
2062 thunderx_l2c_isr,
2063 thunderx_l2c_threaded_isr,
2064 0, "[EDAC] ThunderX L2C",
2065 &l2c->msix_ent);
2066 if (ret)
2067 goto err_free;
2068
2069 edac_dev->dev = &pdev->dev;
2070 edac_dev->dev_name = dev_name(&pdev->dev);
2071 edac_dev->mod_name = "thunderx-l2c";
2072 edac_dev->ctl_name = "thunderx-l2c";
2073
2074 ret = edac_device_add_device(edac_dev);
2075 if (ret) {
2076 dev_err(&pdev->dev, "Cannot add EDAC device: %d\n", ret);
2077 goto err_free;
2078 }
2079
2080 if (IS_ENABLED(CONFIG_EDAC_DEBUG)) {
2081 l2c->debugfs = edac_debugfs_create_dir(pdev->dev.kobj.name);
2082
2083 ret = thunderx_create_debugfs_nodes(l2c->debugfs, l2c_devattr,
2084 l2c, dfs_entries);
2085
2086 if (ret != dfs_entries) {
2087 dev_warn(&pdev->dev, "Error creating debugfs entries: %d%s\n",
2088 ret, ret >= 0 ? " created" : "");
2089 }
2090 }
2091
2092 pci_set_drvdata(pdev, edac_dev);
2093
2094 writeq(reg_en_mask, l2c->regs + reg_en_offs);
2095
2096 return 0;
2097
2098 err_free:
2099 edac_device_free_ctl_info(edac_dev);
2100
2101 return ret;
2102 }
2103
2104 static void thunderx_l2c_remove(struct pci_dev *pdev)
2105 {
2106 struct edac_device_ctl_info *edac_dev = pci_get_drvdata(pdev);
2107 struct thunderx_l2c *l2c = edac_dev->pvt_info;
2108
2109 switch (pdev->device) {
2110 case PCI_DEVICE_ID_THUNDER_L2C_TAD:
2111 writeq(L2C_TAD_INT_ENA_ALL, l2c->regs + L2C_TAD_INT_ENA_W1C);
2112 break;
2113 case PCI_DEVICE_ID_THUNDER_L2C_CBC:
2114 writeq(L2C_CBC_INT_ENA_ALL, l2c->regs + L2C_CBC_INT_ENA_W1C);
2115 break;
2116 case PCI_DEVICE_ID_THUNDER_L2C_MCI:
2117 writeq(L2C_MCI_INT_ENA_ALL, l2c->regs + L2C_MCI_INT_ENA_W1C);
2118 break;
2119 }
2120
2121 edac_debugfs_remove_recursive(l2c->debugfs);
2122
2123 edac_device_del_device(&pdev->dev);
2124 edac_device_free_ctl_info(edac_dev);
2125 }
2126
2127 MODULE_DEVICE_TABLE(pci, thunderx_l2c_pci_tbl);
2128
2129 static struct pci_driver thunderx_l2c_driver = {
2130 .name = "thunderx_l2c_edac",
2131 .probe = thunderx_l2c_probe,
2132 .remove = thunderx_l2c_remove,
2133 .id_table = thunderx_l2c_pci_tbl,
2134 };
2135
2136 static int __init thunderx_edac_init(void)
2137 {
2138 int rc = 0;
2139
2140 rc = pci_register_driver(&thunderx_lmc_driver);
2141 if (rc)
2142 return rc;
2143
2144 rc = pci_register_driver(&thunderx_ocx_driver);
2145 if (rc)
2146 goto err_lmc;
2147
2148 rc = pci_register_driver(&thunderx_l2c_driver);
2149 if (rc)
2150 goto err_ocx;
2151
2152 return rc;
2153 err_ocx:
2154 pci_unregister_driver(&thunderx_ocx_driver);
2155 err_lmc:
2156 pci_unregister_driver(&thunderx_lmc_driver);
2157
2158 return rc;
2159 }
2160
2161 static void __exit thunderx_edac_exit(void)
2162 {
2163 pci_unregister_driver(&thunderx_l2c_driver);
2164 pci_unregister_driver(&thunderx_ocx_driver);
2165 pci_unregister_driver(&thunderx_lmc_driver);
2166
2167 }
2168
2169 module_init(thunderx_edac_init);
2170 module_exit(thunderx_edac_exit);
2171
2172 MODULE_LICENSE("GPL v2");
2173 MODULE_AUTHOR("Cavium, Inc.");
2174 MODULE_DESCRIPTION("EDAC Driver for Cavium ThunderX");