]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/edac/pnd2_edac.c
Merge branch 'i2c/for-4.13' of git://git.kernel.org/pub/scm/linux/kernel/git/wsa...
[mirror_ubuntu-artful-kernel.git] / drivers / edac / pnd2_edac.c
1 /*
2 * Driver for Pondicherry2 memory controller.
3 *
4 * Copyright (c) 2016, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * [Derived from sb_edac.c]
16 *
17 * Translation of system physical addresses to DIMM addresses
18 * is a two stage process:
19 *
20 * First the Pondicherry 2 memory controller handles slice and channel interleaving
21 * in "sys2pmi()". This is (almost) completley common between platforms.
22 *
23 * Then a platform specific dunit (DIMM unit) completes the process to provide DIMM,
24 * rank, bank, row and column using the appropriate "dunit_ops" functions/parameters.
25 */
26
27 #include <linux/module.h>
28 #include <linux/init.h>
29 #include <linux/pci.h>
30 #include <linux/pci_ids.h>
31 #include <linux/slab.h>
32 #include <linux/delay.h>
33 #include <linux/edac.h>
34 #include <linux/mmzone.h>
35 #include <linux/smp.h>
36 #include <linux/bitmap.h>
37 #include <linux/math64.h>
38 #include <linux/mod_devicetable.h>
39 #include <asm/cpu_device_id.h>
40 #include <asm/intel-family.h>
41 #include <asm/processor.h>
42 #include <asm/mce.h>
43
44 #include "edac_mc.h"
45 #include "edac_module.h"
46 #include "pnd2_edac.h"
47
48 #define APL_NUM_CHANNELS 4
49 #define DNV_NUM_CHANNELS 2
50 #define DNV_MAX_DIMMS 2 /* Max DIMMs per channel */
51
52 enum type {
53 APL,
54 DNV, /* All requests go to PMI CH0 on each slice (CH1 disabled) */
55 };
56
57 struct dram_addr {
58 int chan;
59 int dimm;
60 int rank;
61 int bank;
62 int row;
63 int col;
64 };
65
66 struct pnd2_pvt {
67 int dimm_geom[APL_NUM_CHANNELS];
68 u64 tolm, tohm;
69 };
70
71 /*
72 * System address space is divided into multiple regions with
73 * different interleave rules in each. The as0/as1 regions
74 * have no interleaving at all. The as2 region is interleaved
75 * between two channels. The mot region is magic and may overlap
76 * other regions, with its interleave rules taking precedence.
77 * Addresses not in any of these regions are interleaved across
78 * all four channels.
79 */
80 static struct region {
81 u64 base;
82 u64 limit;
83 u8 enabled;
84 } mot, as0, as1, as2;
85
86 static struct dunit_ops {
87 char *name;
88 enum type type;
89 int pmiaddr_shift;
90 int pmiidx_shift;
91 int channels;
92 int dimms_per_channel;
93 int (*rd_reg)(int port, int off, int op, void *data, size_t sz, char *name);
94 int (*get_registers)(void);
95 int (*check_ecc)(void);
96 void (*mk_region)(char *name, struct region *rp, void *asym);
97 void (*get_dimm_config)(struct mem_ctl_info *mci);
98 int (*pmi2mem)(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
99 struct dram_addr *daddr, char *msg);
100 } *ops;
101
102 static struct mem_ctl_info *pnd2_mci;
103
104 #define PND2_MSG_SIZE 256
105
106 /* Debug macros */
107 #define pnd2_printk(level, fmt, arg...) \
108 edac_printk(level, "pnd2", fmt, ##arg)
109
110 #define pnd2_mc_printk(mci, level, fmt, arg...) \
111 edac_mc_chipset_printk(mci, level, "pnd2", fmt, ##arg)
112
113 #define MOT_CHAN_INTLV_BIT_1SLC_2CH 12
114 #define MOT_CHAN_INTLV_BIT_2SLC_2CH 13
115 #define SELECTOR_DISABLED (-1)
116 #define _4GB (1ul << 32)
117
118 #define PMI_ADDRESS_WIDTH 31
119 #define PND_MAX_PHYS_BIT 39
120
121 #define APL_ASYMSHIFT 28
122 #define DNV_ASYMSHIFT 31
123 #define CH_HASH_MASK_LSB 6
124 #define SLICE_HASH_MASK_LSB 6
125 #define MOT_SLC_INTLV_BIT 12
126 #define LOG2_PMI_ADDR_GRANULARITY 5
127 #define MOT_SHIFT 24
128
129 #define GET_BITFIELD(v, lo, hi) (((v) & GENMASK_ULL(hi, lo)) >> (lo))
130 #define U64_LSHIFT(val, s) ((u64)(val) << (s))
131
132 #ifdef CONFIG_X86_INTEL_SBI_APL
133 #include "linux/platform_data/sbi_apl.h"
134 static int sbi_send(int port, int off, int op, u32 *data)
135 {
136 struct sbi_apl_message sbi_arg;
137 int ret, read = 0;
138
139 memset(&sbi_arg, 0, sizeof(sbi_arg));
140
141 if (op == 0 || op == 4 || op == 6)
142 read = 1;
143 else
144 sbi_arg.data = *data;
145
146 sbi_arg.opcode = op;
147 sbi_arg.port_address = port;
148 sbi_arg.register_offset = off;
149 ret = sbi_apl_commit(&sbi_arg);
150 if (ret || sbi_arg.status)
151 edac_dbg(2, "sbi_send status=%d ret=%d data=%x\n",
152 sbi_arg.status, ret, sbi_arg.data);
153
154 if (ret == 0)
155 ret = sbi_arg.status;
156
157 if (ret == 0 && read)
158 *data = sbi_arg.data;
159
160 return ret;
161 }
162 #else
163 static int sbi_send(int port, int off, int op, u32 *data)
164 {
165 return -EUNATCH;
166 }
167 #endif
168
169 static int apl_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
170 {
171 int ret = 0;
172
173 edac_dbg(2, "Read %s port=%x off=%x op=%x\n", name, port, off, op);
174 switch (sz) {
175 case 8:
176 ret = sbi_send(port, off + 4, op, (u32 *)(data + 4));
177 /* fall through */
178 case 4:
179 ret |= sbi_send(port, off, op, (u32 *)data);
180 pnd2_printk(KERN_DEBUG, "%s=%x%08x ret=%d\n", name,
181 sz == 8 ? *((u32 *)(data + 4)) : 0, *((u32 *)data), ret);
182 break;
183 }
184
185 return ret;
186 }
187
188 static u64 get_mem_ctrl_hub_base_addr(void)
189 {
190 struct b_cr_mchbar_lo_pci lo;
191 struct b_cr_mchbar_hi_pci hi;
192 struct pci_dev *pdev;
193
194 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
195 if (pdev) {
196 pci_read_config_dword(pdev, 0x48, (u32 *)&lo);
197 pci_read_config_dword(pdev, 0x4c, (u32 *)&hi);
198 pci_dev_put(pdev);
199 } else {
200 return 0;
201 }
202
203 if (!lo.enable) {
204 edac_dbg(2, "MMIO via memory controller hub base address is disabled!\n");
205 return 0;
206 }
207
208 return U64_LSHIFT(hi.base, 32) | U64_LSHIFT(lo.base, 15);
209 }
210
211 static u64 get_sideband_reg_base_addr(void)
212 {
213 struct pci_dev *pdev;
214 u32 hi, lo;
215
216 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x19dd, NULL);
217 if (pdev) {
218 pci_read_config_dword(pdev, 0x10, &lo);
219 pci_read_config_dword(pdev, 0x14, &hi);
220 pci_dev_put(pdev);
221 return (U64_LSHIFT(hi, 32) | U64_LSHIFT(lo, 0));
222 } else {
223 return 0xfd000000;
224 }
225 }
226
227 static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
228 {
229 struct pci_dev *pdev;
230 char *base;
231 u64 addr;
232
233 if (op == 4) {
234 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
235 if (!pdev)
236 return -ENODEV;
237
238 pci_read_config_dword(pdev, off, data);
239 pci_dev_put(pdev);
240 } else {
241 /* MMIO via memory controller hub base address */
242 if (op == 0 && port == 0x4c) {
243 addr = get_mem_ctrl_hub_base_addr();
244 if (!addr)
245 return -ENODEV;
246 } else {
247 /* MMIO via sideband register base address */
248 addr = get_sideband_reg_base_addr();
249 if (!addr)
250 return -ENODEV;
251 addr += (port << 16);
252 }
253
254 base = ioremap((resource_size_t)addr, 0x10000);
255 if (!base)
256 return -ENODEV;
257
258 if (sz == 8)
259 *(u32 *)(data + 4) = *(u32 *)(base + off + 4);
260 *(u32 *)data = *(u32 *)(base + off);
261
262 iounmap(base);
263 }
264
265 edac_dbg(2, "Read %s=%.8x_%.8x\n", name,
266 (sz == 8) ? *(u32 *)(data + 4) : 0, *(u32 *)data);
267
268 return 0;
269 }
270
271 #define RD_REGP(regp, regname, port) \
272 ops->rd_reg(port, \
273 regname##_offset, \
274 regname##_r_opcode, \
275 regp, sizeof(struct regname), \
276 #regname)
277
278 #define RD_REG(regp, regname) \
279 ops->rd_reg(regname ## _port, \
280 regname##_offset, \
281 regname##_r_opcode, \
282 regp, sizeof(struct regname), \
283 #regname)
284
285 static u64 top_lm, top_hm;
286 static bool two_slices;
287 static bool two_channels; /* Both PMI channels in one slice enabled */
288
289 static u8 sym_chan_mask;
290 static u8 asym_chan_mask;
291 static u8 chan_mask;
292
293 static int slice_selector = -1;
294 static int chan_selector = -1;
295 static u64 slice_hash_mask;
296 static u64 chan_hash_mask;
297
298 static void mk_region(char *name, struct region *rp, u64 base, u64 limit)
299 {
300 rp->enabled = 1;
301 rp->base = base;
302 rp->limit = limit;
303 edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, limit);
304 }
305
306 static void mk_region_mask(char *name, struct region *rp, u64 base, u64 mask)
307 {
308 if (mask == 0) {
309 pr_info(FW_BUG "MOT mask cannot be zero\n");
310 return;
311 }
312 if (mask != GENMASK_ULL(PND_MAX_PHYS_BIT, __ffs(mask))) {
313 pr_info(FW_BUG "MOT mask not power of two\n");
314 return;
315 }
316 if (base & ~mask) {
317 pr_info(FW_BUG "MOT region base/mask alignment error\n");
318 return;
319 }
320 rp->base = base;
321 rp->limit = (base | ~mask) & GENMASK_ULL(PND_MAX_PHYS_BIT, 0);
322 rp->enabled = 1;
323 edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, rp->limit);
324 }
325
326 static bool in_region(struct region *rp, u64 addr)
327 {
328 if (!rp->enabled)
329 return false;
330
331 return rp->base <= addr && addr <= rp->limit;
332 }
333
334 static int gen_sym_mask(struct b_cr_slice_channel_hash *p)
335 {
336 int mask = 0;
337
338 if (!p->slice_0_mem_disabled)
339 mask |= p->sym_slice0_channel_enabled;
340
341 if (!p->slice_1_disabled)
342 mask |= p->sym_slice1_channel_enabled << 2;
343
344 if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
345 mask &= 0x5;
346
347 return mask;
348 }
349
350 static int gen_asym_mask(struct b_cr_slice_channel_hash *p,
351 struct b_cr_asym_mem_region0_mchbar *as0,
352 struct b_cr_asym_mem_region1_mchbar *as1,
353 struct b_cr_asym_2way_mem_region_mchbar *as2way)
354 {
355 const int intlv[] = { 0x5, 0xA, 0x3, 0xC };
356 int mask = 0;
357
358 if (as2way->asym_2way_interleave_enable)
359 mask = intlv[as2way->asym_2way_intlv_mode];
360 if (as0->slice0_asym_enable)
361 mask |= (1 << as0->slice0_asym_channel_select);
362 if (as1->slice1_asym_enable)
363 mask |= (4 << as1->slice1_asym_channel_select);
364 if (p->slice_0_mem_disabled)
365 mask &= 0xc;
366 if (p->slice_1_disabled)
367 mask &= 0x3;
368 if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
369 mask &= 0x5;
370
371 return mask;
372 }
373
374 static struct b_cr_tolud_pci tolud;
375 static struct b_cr_touud_lo_pci touud_lo;
376 static struct b_cr_touud_hi_pci touud_hi;
377 static struct b_cr_asym_mem_region0_mchbar asym0;
378 static struct b_cr_asym_mem_region1_mchbar asym1;
379 static struct b_cr_asym_2way_mem_region_mchbar asym_2way;
380 static struct b_cr_mot_out_base_mchbar mot_base;
381 static struct b_cr_mot_out_mask_mchbar mot_mask;
382 static struct b_cr_slice_channel_hash chash;
383
384 /* Apollo Lake dunit */
385 /*
386 * Validated on board with just two DIMMs in the [0] and [2] positions
387 * in this array. Other port number matches documentation, but caution
388 * advised.
389 */
390 static const int apl_dports[APL_NUM_CHANNELS] = { 0x18, 0x10, 0x11, 0x19 };
391 static struct d_cr_drp0 drp0[APL_NUM_CHANNELS];
392
393 /* Denverton dunit */
394 static const int dnv_dports[DNV_NUM_CHANNELS] = { 0x10, 0x12 };
395 static struct d_cr_dsch dsch;
396 static struct d_cr_ecc_ctrl ecc_ctrl[DNV_NUM_CHANNELS];
397 static struct d_cr_drp drp[DNV_NUM_CHANNELS];
398 static struct d_cr_dmap dmap[DNV_NUM_CHANNELS];
399 static struct d_cr_dmap1 dmap1[DNV_NUM_CHANNELS];
400 static struct d_cr_dmap2 dmap2[DNV_NUM_CHANNELS];
401 static struct d_cr_dmap3 dmap3[DNV_NUM_CHANNELS];
402 static struct d_cr_dmap4 dmap4[DNV_NUM_CHANNELS];
403 static struct d_cr_dmap5 dmap5[DNV_NUM_CHANNELS];
404
405 static void apl_mk_region(char *name, struct region *rp, void *asym)
406 {
407 struct b_cr_asym_mem_region0_mchbar *a = asym;
408
409 mk_region(name, rp,
410 U64_LSHIFT(a->slice0_asym_base, APL_ASYMSHIFT),
411 U64_LSHIFT(a->slice0_asym_limit, APL_ASYMSHIFT) +
412 GENMASK_ULL(APL_ASYMSHIFT - 1, 0));
413 }
414
415 static void dnv_mk_region(char *name, struct region *rp, void *asym)
416 {
417 struct b_cr_asym_mem_region_denverton *a = asym;
418
419 mk_region(name, rp,
420 U64_LSHIFT(a->slice_asym_base, DNV_ASYMSHIFT),
421 U64_LSHIFT(a->slice_asym_limit, DNV_ASYMSHIFT) +
422 GENMASK_ULL(DNV_ASYMSHIFT - 1, 0));
423 }
424
425 static int apl_get_registers(void)
426 {
427 int ret = -ENODEV;
428 int i;
429
430 if (RD_REG(&asym_2way, b_cr_asym_2way_mem_region_mchbar))
431 return -ENODEV;
432
433 /*
434 * RD_REGP() will fail for unpopulated or non-existent
435 * DIMM slots. Return success if we find at least one DIMM.
436 */
437 for (i = 0; i < APL_NUM_CHANNELS; i++)
438 if (!RD_REGP(&drp0[i], d_cr_drp0, apl_dports[i]))
439 ret = 0;
440
441 return ret;
442 }
443
444 static int dnv_get_registers(void)
445 {
446 int i;
447
448 if (RD_REG(&dsch, d_cr_dsch))
449 return -ENODEV;
450
451 for (i = 0; i < DNV_NUM_CHANNELS; i++)
452 if (RD_REGP(&ecc_ctrl[i], d_cr_ecc_ctrl, dnv_dports[i]) ||
453 RD_REGP(&drp[i], d_cr_drp, dnv_dports[i]) ||
454 RD_REGP(&dmap[i], d_cr_dmap, dnv_dports[i]) ||
455 RD_REGP(&dmap1[i], d_cr_dmap1, dnv_dports[i]) ||
456 RD_REGP(&dmap2[i], d_cr_dmap2, dnv_dports[i]) ||
457 RD_REGP(&dmap3[i], d_cr_dmap3, dnv_dports[i]) ||
458 RD_REGP(&dmap4[i], d_cr_dmap4, dnv_dports[i]) ||
459 RD_REGP(&dmap5[i], d_cr_dmap5, dnv_dports[i]))
460 return -ENODEV;
461
462 return 0;
463 }
464
465 /*
466 * Read all the h/w config registers once here (they don't
467 * change at run time. Figure out which address ranges have
468 * which interleave characteristics.
469 */
470 static int get_registers(void)
471 {
472 const int intlv[] = { 10, 11, 12, 12 };
473
474 if (RD_REG(&tolud, b_cr_tolud_pci) ||
475 RD_REG(&touud_lo, b_cr_touud_lo_pci) ||
476 RD_REG(&touud_hi, b_cr_touud_hi_pci) ||
477 RD_REG(&asym0, b_cr_asym_mem_region0_mchbar) ||
478 RD_REG(&asym1, b_cr_asym_mem_region1_mchbar) ||
479 RD_REG(&mot_base, b_cr_mot_out_base_mchbar) ||
480 RD_REG(&mot_mask, b_cr_mot_out_mask_mchbar) ||
481 RD_REG(&chash, b_cr_slice_channel_hash))
482 return -ENODEV;
483
484 if (ops->get_registers())
485 return -ENODEV;
486
487 if (ops->type == DNV) {
488 /* PMI channel idx (always 0) for asymmetric region */
489 asym0.slice0_asym_channel_select = 0;
490 asym1.slice1_asym_channel_select = 0;
491 /* PMI channel bitmap (always 1) for symmetric region */
492 chash.sym_slice0_channel_enabled = 0x1;
493 chash.sym_slice1_channel_enabled = 0x1;
494 }
495
496 if (asym0.slice0_asym_enable)
497 ops->mk_region("as0", &as0, &asym0);
498
499 if (asym1.slice1_asym_enable)
500 ops->mk_region("as1", &as1, &asym1);
501
502 if (asym_2way.asym_2way_interleave_enable) {
503 mk_region("as2way", &as2,
504 U64_LSHIFT(asym_2way.asym_2way_base, APL_ASYMSHIFT),
505 U64_LSHIFT(asym_2way.asym_2way_limit, APL_ASYMSHIFT) +
506 GENMASK_ULL(APL_ASYMSHIFT - 1, 0));
507 }
508
509 if (mot_base.imr_en) {
510 mk_region_mask("mot", &mot,
511 U64_LSHIFT(mot_base.mot_out_base, MOT_SHIFT),
512 U64_LSHIFT(mot_mask.mot_out_mask, MOT_SHIFT));
513 }
514
515 top_lm = U64_LSHIFT(tolud.tolud, 20);
516 top_hm = U64_LSHIFT(touud_hi.touud, 32) | U64_LSHIFT(touud_lo.touud, 20);
517
518 two_slices = !chash.slice_1_disabled &&
519 !chash.slice_0_mem_disabled &&
520 (chash.sym_slice0_channel_enabled != 0) &&
521 (chash.sym_slice1_channel_enabled != 0);
522 two_channels = !chash.ch_1_disabled &&
523 !chash.enable_pmi_dual_data_mode &&
524 ((chash.sym_slice0_channel_enabled == 3) ||
525 (chash.sym_slice1_channel_enabled == 3));
526
527 sym_chan_mask = gen_sym_mask(&chash);
528 asym_chan_mask = gen_asym_mask(&chash, &asym0, &asym1, &asym_2way);
529 chan_mask = sym_chan_mask | asym_chan_mask;
530
531 if (two_slices && !two_channels) {
532 if (chash.hvm_mode)
533 slice_selector = 29;
534 else
535 slice_selector = intlv[chash.interleave_mode];
536 } else if (!two_slices && two_channels) {
537 if (chash.hvm_mode)
538 chan_selector = 29;
539 else
540 chan_selector = intlv[chash.interleave_mode];
541 } else if (two_slices && two_channels) {
542 if (chash.hvm_mode) {
543 slice_selector = 29;
544 chan_selector = 30;
545 } else {
546 slice_selector = intlv[chash.interleave_mode];
547 chan_selector = intlv[chash.interleave_mode] + 1;
548 }
549 }
550
551 if (two_slices) {
552 if (!chash.hvm_mode)
553 slice_hash_mask = chash.slice_hash_mask << SLICE_HASH_MASK_LSB;
554 if (!two_channels)
555 slice_hash_mask |= BIT_ULL(slice_selector);
556 }
557
558 if (two_channels) {
559 if (!chash.hvm_mode)
560 chan_hash_mask = chash.ch_hash_mask << CH_HASH_MASK_LSB;
561 if (!two_slices)
562 chan_hash_mask |= BIT_ULL(chan_selector);
563 }
564
565 return 0;
566 }
567
568 /* Get a contiguous memory address (remove the MMIO gap) */
569 static u64 remove_mmio_gap(u64 sys)
570 {
571 return (sys < _4GB) ? sys : sys - (_4GB - top_lm);
572 }
573
574 /* Squeeze out one address bit, shift upper part down to fill gap */
575 static void remove_addr_bit(u64 *addr, int bitidx)
576 {
577 u64 mask;
578
579 if (bitidx == -1)
580 return;
581
582 mask = (1ull << bitidx) - 1;
583 *addr = ((*addr >> 1) & ~mask) | (*addr & mask);
584 }
585
586 /* XOR all the bits from addr specified in mask */
587 static int hash_by_mask(u64 addr, u64 mask)
588 {
589 u64 result = addr & mask;
590
591 result = (result >> 32) ^ result;
592 result = (result >> 16) ^ result;
593 result = (result >> 8) ^ result;
594 result = (result >> 4) ^ result;
595 result = (result >> 2) ^ result;
596 result = (result >> 1) ^ result;
597
598 return (int)result & 1;
599 }
600
601 /*
602 * First stage decode. Take the system address and figure out which
603 * second stage will deal with it based on interleave modes.
604 */
605 static int sys2pmi(const u64 addr, u32 *pmiidx, u64 *pmiaddr, char *msg)
606 {
607 u64 contig_addr, contig_base, contig_offset, contig_base_adj;
608 int mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH :
609 MOT_CHAN_INTLV_BIT_1SLC_2CH;
610 int slice_intlv_bit_rm = SELECTOR_DISABLED;
611 int chan_intlv_bit_rm = SELECTOR_DISABLED;
612 /* Determine if address is in the MOT region. */
613 bool mot_hit = in_region(&mot, addr);
614 /* Calculate the number of symmetric regions enabled. */
615 int sym_channels = hweight8(sym_chan_mask);
616
617 /*
618 * The amount we need to shift the asym base can be determined by the
619 * number of enabled symmetric channels.
620 * NOTE: This can only work because symmetric memory is not supposed
621 * to do a 3-way interleave.
622 */
623 int sym_chan_shift = sym_channels >> 1;
624
625 /* Give up if address is out of range, or in MMIO gap */
626 if (addr >= (1ul << PND_MAX_PHYS_BIT) ||
627 (addr >= top_lm && addr < _4GB) || addr >= top_hm) {
628 snprintf(msg, PND2_MSG_SIZE, "Error address 0x%llx is not DRAM", addr);
629 return -EINVAL;
630 }
631
632 /* Get a contiguous memory address (remove the MMIO gap) */
633 contig_addr = remove_mmio_gap(addr);
634
635 if (in_region(&as0, addr)) {
636 *pmiidx = asym0.slice0_asym_channel_select;
637
638 contig_base = remove_mmio_gap(as0.base);
639 contig_offset = contig_addr - contig_base;
640 contig_base_adj = (contig_base >> sym_chan_shift) *
641 ((chash.sym_slice0_channel_enabled >> (*pmiidx & 1)) & 1);
642 contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull);
643 } else if (in_region(&as1, addr)) {
644 *pmiidx = 2u + asym1.slice1_asym_channel_select;
645
646 contig_base = remove_mmio_gap(as1.base);
647 contig_offset = contig_addr - contig_base;
648 contig_base_adj = (contig_base >> sym_chan_shift) *
649 ((chash.sym_slice1_channel_enabled >> (*pmiidx & 1)) & 1);
650 contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull);
651 } else if (in_region(&as2, addr) && (asym_2way.asym_2way_intlv_mode == 0x3ul)) {
652 bool channel1;
653
654 mot_intlv_bit = MOT_CHAN_INTLV_BIT_1SLC_2CH;
655 *pmiidx = (asym_2way.asym_2way_intlv_mode & 1) << 1;
656 channel1 = mot_hit ? ((bool)((addr >> mot_intlv_bit) & 1)) :
657 hash_by_mask(contig_addr, chan_hash_mask);
658 *pmiidx |= (u32)channel1;
659
660 contig_base = remove_mmio_gap(as2.base);
661 chan_intlv_bit_rm = mot_hit ? mot_intlv_bit : chan_selector;
662 contig_offset = contig_addr - contig_base;
663 remove_addr_bit(&contig_offset, chan_intlv_bit_rm);
664 contig_addr = (contig_base >> sym_chan_shift) + contig_offset;
665 } else {
666 /* Otherwise we're in normal, boring symmetric mode. */
667 *pmiidx = 0u;
668
669 if (two_slices) {
670 bool slice1;
671
672 if (mot_hit) {
673 slice_intlv_bit_rm = MOT_SLC_INTLV_BIT;
674 slice1 = (addr >> MOT_SLC_INTLV_BIT) & 1;
675 } else {
676 slice_intlv_bit_rm = slice_selector;
677 slice1 = hash_by_mask(addr, slice_hash_mask);
678 }
679
680 *pmiidx = (u32)slice1 << 1;
681 }
682
683 if (two_channels) {
684 bool channel1;
685
686 mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH :
687 MOT_CHAN_INTLV_BIT_1SLC_2CH;
688
689 if (mot_hit) {
690 chan_intlv_bit_rm = mot_intlv_bit;
691 channel1 = (addr >> mot_intlv_bit) & 1;
692 } else {
693 chan_intlv_bit_rm = chan_selector;
694 channel1 = hash_by_mask(contig_addr, chan_hash_mask);
695 }
696
697 *pmiidx |= (u32)channel1;
698 }
699 }
700
701 /* Remove the chan_selector bit first */
702 remove_addr_bit(&contig_addr, chan_intlv_bit_rm);
703 /* Remove the slice bit (we remove it second because it must be lower */
704 remove_addr_bit(&contig_addr, slice_intlv_bit_rm);
705 *pmiaddr = contig_addr;
706
707 return 0;
708 }
709
710 /* Translate PMI address to memory (rank, row, bank, column) */
711 #define C(n) (0x10 | (n)) /* column */
712 #define B(n) (0x20 | (n)) /* bank */
713 #define R(n) (0x40 | (n)) /* row */
714 #define RS (0x80) /* rank */
715
716 /* addrdec values */
717 #define AMAP_1KB 0
718 #define AMAP_2KB 1
719 #define AMAP_4KB 2
720 #define AMAP_RSVD 3
721
722 /* dden values */
723 #define DEN_4Gb 0
724 #define DEN_8Gb 2
725
726 /* dwid values */
727 #define X8 0
728 #define X16 1
729
730 static struct dimm_geometry {
731 u8 addrdec;
732 u8 dden;
733 u8 dwid;
734 u8 rowbits, colbits;
735 u16 bits[PMI_ADDRESS_WIDTH];
736 } dimms[] = {
737 {
738 .addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X16,
739 .rowbits = 15, .colbits = 10,
740 .bits = {
741 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
742 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
743 R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
744 0, 0, 0, 0
745 }
746 },
747 {
748 .addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X8,
749 .rowbits = 16, .colbits = 10,
750 .bits = {
751 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
752 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
753 R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
754 R(15), 0, 0, 0
755 }
756 },
757 {
758 .addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X16,
759 .rowbits = 16, .colbits = 10,
760 .bits = {
761 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
762 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
763 R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
764 R(15), 0, 0, 0
765 }
766 },
767 {
768 .addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X8,
769 .rowbits = 16, .colbits = 11,
770 .bits = {
771 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
772 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
773 R(10), C(7), C(8), C(9), R(11), RS, C(11), R(12), R(13),
774 R(14), R(15), 0, 0
775 }
776 },
777 {
778 .addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X16,
779 .rowbits = 15, .colbits = 10,
780 .bits = {
781 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
782 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
783 R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
784 0, 0, 0, 0
785 }
786 },
787 {
788 .addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X8,
789 .rowbits = 16, .colbits = 10,
790 .bits = {
791 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
792 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
793 R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
794 R(15), 0, 0, 0
795 }
796 },
797 {
798 .addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X16,
799 .rowbits = 16, .colbits = 10,
800 .bits = {
801 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
802 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
803 R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
804 R(15), 0, 0, 0
805 }
806 },
807 {
808 .addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X8,
809 .rowbits = 16, .colbits = 11,
810 .bits = {
811 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
812 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
813 R(9), R(10), C(8), C(9), R(11), RS, C(11), R(12), R(13),
814 R(14), R(15), 0, 0
815 }
816 },
817 {
818 .addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X16,
819 .rowbits = 15, .colbits = 10,
820 .bits = {
821 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
822 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
823 R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
824 0, 0, 0, 0
825 }
826 },
827 {
828 .addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X8,
829 .rowbits = 16, .colbits = 10,
830 .bits = {
831 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
832 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
833 R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
834 R(15), 0, 0, 0
835 }
836 },
837 {
838 .addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X16,
839 .rowbits = 16, .colbits = 10,
840 .bits = {
841 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
842 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
843 R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
844 R(15), 0, 0, 0
845 }
846 },
847 {
848 .addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X8,
849 .rowbits = 16, .colbits = 11,
850 .bits = {
851 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
852 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
853 R(8), R(9), R(10), C(9), R(11), RS, C(11), R(12), R(13),
854 R(14), R(15), 0, 0
855 }
856 }
857 };
858
859 static int bank_hash(u64 pmiaddr, int idx, int shft)
860 {
861 int bhash = 0;
862
863 switch (idx) {
864 case 0:
865 bhash ^= ((pmiaddr >> (12 + shft)) ^ (pmiaddr >> (9 + shft))) & 1;
866 break;
867 case 1:
868 bhash ^= (((pmiaddr >> (10 + shft)) ^ (pmiaddr >> (8 + shft))) & 1) << 1;
869 bhash ^= ((pmiaddr >> 22) & 1) << 1;
870 break;
871 case 2:
872 bhash ^= (((pmiaddr >> (13 + shft)) ^ (pmiaddr >> (11 + shft))) & 1) << 2;
873 break;
874 }
875
876 return bhash;
877 }
878
879 static int rank_hash(u64 pmiaddr)
880 {
881 return ((pmiaddr >> 16) ^ (pmiaddr >> 10)) & 1;
882 }
883
884 /* Second stage decode. Compute rank, bank, row & column. */
885 static int apl_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
886 struct dram_addr *daddr, char *msg)
887 {
888 struct d_cr_drp0 *cr_drp0 = &drp0[pmiidx];
889 struct pnd2_pvt *pvt = mci->pvt_info;
890 int g = pvt->dimm_geom[pmiidx];
891 struct dimm_geometry *d = &dimms[g];
892 int column = 0, bank = 0, row = 0, rank = 0;
893 int i, idx, type, skiprs = 0;
894
895 for (i = 0; i < PMI_ADDRESS_WIDTH; i++) {
896 int bit = (pmiaddr >> i) & 1;
897
898 if (i + skiprs >= PMI_ADDRESS_WIDTH) {
899 snprintf(msg, PND2_MSG_SIZE, "Bad dimm_geometry[] table\n");
900 return -EINVAL;
901 }
902
903 type = d->bits[i + skiprs] & ~0xf;
904 idx = d->bits[i + skiprs] & 0xf;
905
906 /*
907 * On single rank DIMMs ignore the rank select bit
908 * and shift remainder of "bits[]" down one place.
909 */
910 if (type == RS && (cr_drp0->rken0 + cr_drp0->rken1) == 1) {
911 skiprs = 1;
912 type = d->bits[i + skiprs] & ~0xf;
913 idx = d->bits[i + skiprs] & 0xf;
914 }
915
916 switch (type) {
917 case C(0):
918 column |= (bit << idx);
919 break;
920 case B(0):
921 bank |= (bit << idx);
922 if (cr_drp0->bahen)
923 bank ^= bank_hash(pmiaddr, idx, d->addrdec);
924 break;
925 case R(0):
926 row |= (bit << idx);
927 break;
928 case RS:
929 rank = bit;
930 if (cr_drp0->rsien)
931 rank ^= rank_hash(pmiaddr);
932 break;
933 default:
934 if (bit) {
935 snprintf(msg, PND2_MSG_SIZE, "Bad translation\n");
936 return -EINVAL;
937 }
938 goto done;
939 }
940 }
941
942 done:
943 daddr->col = column;
944 daddr->bank = bank;
945 daddr->row = row;
946 daddr->rank = rank;
947 daddr->dimm = 0;
948
949 return 0;
950 }
951
952 /* Pluck bit "in" from pmiaddr and return value shifted to bit "out" */
953 #define dnv_get_bit(pmi, in, out) ((int)(((pmi) >> (in)) & 1u) << (out))
954
955 static int dnv_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
956 struct dram_addr *daddr, char *msg)
957 {
958 /* Rank 0 or 1 */
959 daddr->rank = dnv_get_bit(pmiaddr, dmap[pmiidx].rs0 + 13, 0);
960 /* Rank 2 or 3 */
961 daddr->rank |= dnv_get_bit(pmiaddr, dmap[pmiidx].rs1 + 13, 1);
962
963 /*
964 * Normally ranks 0,1 are DIMM0, and 2,3 are DIMM1, but we
965 * flip them if DIMM1 is larger than DIMM0.
966 */
967 daddr->dimm = (daddr->rank >= 2) ^ drp[pmiidx].dimmflip;
968
969 daddr->bank = dnv_get_bit(pmiaddr, dmap[pmiidx].ba0 + 6, 0);
970 daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].ba1 + 6, 1);
971 daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg0 + 6, 2);
972 if (dsch.ddr4en)
973 daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg1 + 6, 3);
974 if (dmap1[pmiidx].bxor) {
975 if (dsch.ddr4en) {
976 daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 0);
977 daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 1);
978 if (dsch.chan_width == 0)
979 /* 64/72 bit dram channel width */
980 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2);
981 else
982 /* 32/40 bit dram channel width */
983 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2);
984 daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 3);
985 } else {
986 daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 0);
987 daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 1);
988 if (dsch.chan_width == 0)
989 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2);
990 else
991 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2);
992 }
993 }
994
995 daddr->row = dnv_get_bit(pmiaddr, dmap2[pmiidx].row0 + 6, 0);
996 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row1 + 6, 1);
997 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 2);
998 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row3 + 6, 3);
999 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row4 + 6, 4);
1000 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row5 + 6, 5);
1001 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 6);
1002 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 7);
1003 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row8 + 6, 8);
1004 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row9 + 6, 9);
1005 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row10 + 6, 10);
1006 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row11 + 6, 11);
1007 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row12 + 6, 12);
1008 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row13 + 6, 13);
1009 if (dmap4[pmiidx].row14 != 31)
1010 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row14 + 6, 14);
1011 if (dmap4[pmiidx].row15 != 31)
1012 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row15 + 6, 15);
1013 if (dmap4[pmiidx].row16 != 31)
1014 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row16 + 6, 16);
1015 if (dmap4[pmiidx].row17 != 31)
1016 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row17 + 6, 17);
1017
1018 daddr->col = dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 3);
1019 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 4);
1020 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca5 + 6, 5);
1021 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca6 + 6, 6);
1022 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca7 + 6, 7);
1023 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca8 + 6, 8);
1024 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca9 + 6, 9);
1025 if (!dsch.ddr4en && dmap1[pmiidx].ca11 != 0x3f)
1026 daddr->col |= dnv_get_bit(pmiaddr, dmap1[pmiidx].ca11 + 13, 11);
1027
1028 return 0;
1029 }
1030
1031 static int check_channel(int ch)
1032 {
1033 if (drp0[ch].dramtype != 0) {
1034 pnd2_printk(KERN_INFO, "Unsupported DIMM in channel %d\n", ch);
1035 return 1;
1036 } else if (drp0[ch].eccen == 0) {
1037 pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch);
1038 return 1;
1039 }
1040 return 0;
1041 }
1042
1043 static int apl_check_ecc_active(void)
1044 {
1045 int i, ret = 0;
1046
1047 /* Check dramtype and ECC mode for each present DIMM */
1048 for (i = 0; i < APL_NUM_CHANNELS; i++)
1049 if (chan_mask & BIT(i))
1050 ret += check_channel(i);
1051 return ret ? -EINVAL : 0;
1052 }
1053
1054 #define DIMMS_PRESENT(d) ((d)->rken0 + (d)->rken1 + (d)->rken2 + (d)->rken3)
1055
1056 static int check_unit(int ch)
1057 {
1058 struct d_cr_drp *d = &drp[ch];
1059
1060 if (DIMMS_PRESENT(d) && !ecc_ctrl[ch].eccen) {
1061 pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch);
1062 return 1;
1063 }
1064 return 0;
1065 }
1066
1067 static int dnv_check_ecc_active(void)
1068 {
1069 int i, ret = 0;
1070
1071 for (i = 0; i < DNV_NUM_CHANNELS; i++)
1072 ret += check_unit(i);
1073 return ret ? -EINVAL : 0;
1074 }
1075
1076 static int get_memory_error_data(struct mem_ctl_info *mci, u64 addr,
1077 struct dram_addr *daddr, char *msg)
1078 {
1079 u64 pmiaddr;
1080 u32 pmiidx;
1081 int ret;
1082
1083 ret = sys2pmi(addr, &pmiidx, &pmiaddr, msg);
1084 if (ret)
1085 return ret;
1086
1087 pmiaddr >>= ops->pmiaddr_shift;
1088 /* pmi channel idx to dimm channel idx */
1089 pmiidx >>= ops->pmiidx_shift;
1090 daddr->chan = pmiidx;
1091
1092 ret = ops->pmi2mem(mci, pmiaddr, pmiidx, daddr, msg);
1093 if (ret)
1094 return ret;
1095
1096 edac_dbg(0, "SysAddr=%llx PmiAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
1097 addr, pmiaddr, daddr->chan, daddr->dimm, daddr->rank, daddr->bank, daddr->row, daddr->col);
1098
1099 return 0;
1100 }
1101
1102 static void pnd2_mce_output_error(struct mem_ctl_info *mci, const struct mce *m,
1103 struct dram_addr *daddr)
1104 {
1105 enum hw_event_mc_err_type tp_event;
1106 char *optype, msg[PND2_MSG_SIZE];
1107 bool ripv = m->mcgstatus & MCG_STATUS_RIPV;
1108 bool overflow = m->status & MCI_STATUS_OVER;
1109 bool uc_err = m->status & MCI_STATUS_UC;
1110 bool recov = m->status & MCI_STATUS_S;
1111 u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
1112 u32 mscod = GET_BITFIELD(m->status, 16, 31);
1113 u32 errcode = GET_BITFIELD(m->status, 0, 15);
1114 u32 optypenum = GET_BITFIELD(m->status, 4, 6);
1115 int rc;
1116
1117 tp_event = uc_err ? (ripv ? HW_EVENT_ERR_FATAL : HW_EVENT_ERR_UNCORRECTED) :
1118 HW_EVENT_ERR_CORRECTED;
1119
1120 /*
1121 * According with Table 15-9 of the Intel Architecture spec vol 3A,
1122 * memory errors should fit in this mask:
1123 * 000f 0000 1mmm cccc (binary)
1124 * where:
1125 * f = Correction Report Filtering Bit. If 1, subsequent errors
1126 * won't be shown
1127 * mmm = error type
1128 * cccc = channel
1129 * If the mask doesn't match, report an error to the parsing logic
1130 */
1131 if (!((errcode & 0xef80) == 0x80)) {
1132 optype = "Can't parse: it is not a mem";
1133 } else {
1134 switch (optypenum) {
1135 case 0:
1136 optype = "generic undef request error";
1137 break;
1138 case 1:
1139 optype = "memory read error";
1140 break;
1141 case 2:
1142 optype = "memory write error";
1143 break;
1144 case 3:
1145 optype = "addr/cmd error";
1146 break;
1147 case 4:
1148 optype = "memory scrubbing error";
1149 break;
1150 default:
1151 optype = "reserved";
1152 break;
1153 }
1154 }
1155
1156 /* Only decode errors with an valid address (ADDRV) */
1157 if (!(m->status & MCI_STATUS_ADDRV))
1158 return;
1159
1160 rc = get_memory_error_data(mci, m->addr, daddr, msg);
1161 if (rc)
1162 goto address_error;
1163
1164 snprintf(msg, sizeof(msg),
1165 "%s%s err_code:%04x:%04x channel:%d DIMM:%d rank:%d row:%d bank:%d col:%d",
1166 overflow ? " OVERFLOW" : "", (uc_err && recov) ? " recoverable" : "", mscod,
1167 errcode, daddr->chan, daddr->dimm, daddr->rank, daddr->row, daddr->bank, daddr->col);
1168
1169 edac_dbg(0, "%s\n", msg);
1170
1171 /* Call the helper to output message */
1172 edac_mc_handle_error(tp_event, mci, core_err_cnt, m->addr >> PAGE_SHIFT,
1173 m->addr & ~PAGE_MASK, 0, daddr->chan, daddr->dimm, -1, optype, msg);
1174
1175 return;
1176
1177 address_error:
1178 edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0, -1, -1, -1, msg, "");
1179 }
1180
1181 static void apl_get_dimm_config(struct mem_ctl_info *mci)
1182 {
1183 struct pnd2_pvt *pvt = mci->pvt_info;
1184 struct dimm_info *dimm;
1185 struct d_cr_drp0 *d;
1186 u64 capacity;
1187 int i, g;
1188
1189 for (i = 0; i < APL_NUM_CHANNELS; i++) {
1190 if (!(chan_mask & BIT(i)))
1191 continue;
1192
1193 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, 0, 0);
1194 if (!dimm) {
1195 edac_dbg(0, "No allocated DIMM for channel %d\n", i);
1196 continue;
1197 }
1198
1199 d = &drp0[i];
1200 for (g = 0; g < ARRAY_SIZE(dimms); g++)
1201 if (dimms[g].addrdec == d->addrdec &&
1202 dimms[g].dden == d->dden &&
1203 dimms[g].dwid == d->dwid)
1204 break;
1205
1206 if (g == ARRAY_SIZE(dimms)) {
1207 edac_dbg(0, "Channel %d: unrecognized DIMM\n", i);
1208 continue;
1209 }
1210
1211 pvt->dimm_geom[i] = g;
1212 capacity = (d->rken0 + d->rken1) * 8 * (1ul << dimms[g].rowbits) *
1213 (1ul << dimms[g].colbits);
1214 edac_dbg(0, "Channel %d: %lld MByte DIMM\n", i, capacity >> (20 - 3));
1215 dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
1216 dimm->grain = 32;
1217 dimm->dtype = (d->dwid == 0) ? DEV_X8 : DEV_X16;
1218 dimm->mtype = MEM_DDR3;
1219 dimm->edac_mode = EDAC_SECDED;
1220 snprintf(dimm->label, sizeof(dimm->label), "Slice#%d_Chan#%d", i / 2, i % 2);
1221 }
1222 }
1223
1224 static const int dnv_dtypes[] = {
1225 DEV_X8, DEV_X4, DEV_X16, DEV_UNKNOWN
1226 };
1227
1228 static void dnv_get_dimm_config(struct mem_ctl_info *mci)
1229 {
1230 int i, j, ranks_of_dimm[DNV_MAX_DIMMS], banks, rowbits, colbits, memtype;
1231 struct dimm_info *dimm;
1232 struct d_cr_drp *d;
1233 u64 capacity;
1234
1235 if (dsch.ddr4en) {
1236 memtype = MEM_DDR4;
1237 banks = 16;
1238 colbits = 10;
1239 } else {
1240 memtype = MEM_DDR3;
1241 banks = 8;
1242 }
1243
1244 for (i = 0; i < DNV_NUM_CHANNELS; i++) {
1245 if (dmap4[i].row14 == 31)
1246 rowbits = 14;
1247 else if (dmap4[i].row15 == 31)
1248 rowbits = 15;
1249 else if (dmap4[i].row16 == 31)
1250 rowbits = 16;
1251 else if (dmap4[i].row17 == 31)
1252 rowbits = 17;
1253 else
1254 rowbits = 18;
1255
1256 if (memtype == MEM_DDR3) {
1257 if (dmap1[i].ca11 != 0x3f)
1258 colbits = 12;
1259 else
1260 colbits = 10;
1261 }
1262
1263 d = &drp[i];
1264 /* DIMM0 is present if rank0 and/or rank1 is enabled */
1265 ranks_of_dimm[0] = d->rken0 + d->rken1;
1266 /* DIMM1 is present if rank2 and/or rank3 is enabled */
1267 ranks_of_dimm[1] = d->rken2 + d->rken3;
1268
1269 for (j = 0; j < DNV_MAX_DIMMS; j++) {
1270 if (!ranks_of_dimm[j])
1271 continue;
1272
1273 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, j, 0);
1274 if (!dimm) {
1275 edac_dbg(0, "No allocated DIMM for channel %d DIMM %d\n", i, j);
1276 continue;
1277 }
1278
1279 capacity = ranks_of_dimm[j] * banks * (1ul << rowbits) * (1ul << colbits);
1280 edac_dbg(0, "Channel %d DIMM %d: %lld MByte DIMM\n", i, j, capacity >> (20 - 3));
1281 dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
1282 dimm->grain = 32;
1283 dimm->dtype = dnv_dtypes[j ? d->dimmdwid0 : d->dimmdwid1];
1284 dimm->mtype = memtype;
1285 dimm->edac_mode = EDAC_SECDED;
1286 snprintf(dimm->label, sizeof(dimm->label), "Chan#%d_DIMM#%d", i, j);
1287 }
1288 }
1289 }
1290
1291 static int pnd2_register_mci(struct mem_ctl_info **ppmci)
1292 {
1293 struct edac_mc_layer layers[2];
1294 struct mem_ctl_info *mci;
1295 struct pnd2_pvt *pvt;
1296 int rc;
1297
1298 rc = ops->check_ecc();
1299 if (rc < 0)
1300 return rc;
1301
1302 /* Allocate a new MC control structure */
1303 layers[0].type = EDAC_MC_LAYER_CHANNEL;
1304 layers[0].size = ops->channels;
1305 layers[0].is_virt_csrow = false;
1306 layers[1].type = EDAC_MC_LAYER_SLOT;
1307 layers[1].size = ops->dimms_per_channel;
1308 layers[1].is_virt_csrow = true;
1309 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
1310 if (!mci)
1311 return -ENOMEM;
1312
1313 pvt = mci->pvt_info;
1314 memset(pvt, 0, sizeof(*pvt));
1315
1316 mci->mod_name = "pnd2_edac.c";
1317 mci->dev_name = ops->name;
1318 mci->ctl_name = "Pondicherry2";
1319
1320 /* Get dimm basic config and the memory layout */
1321 ops->get_dimm_config(mci);
1322
1323 if (edac_mc_add_mc(mci)) {
1324 edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
1325 edac_mc_free(mci);
1326 return -EINVAL;
1327 }
1328
1329 *ppmci = mci;
1330
1331 return 0;
1332 }
1333
1334 static void pnd2_unregister_mci(struct mem_ctl_info *mci)
1335 {
1336 if (unlikely(!mci || !mci->pvt_info)) {
1337 pnd2_printk(KERN_ERR, "Couldn't find mci handler\n");
1338 return;
1339 }
1340
1341 /* Remove MC sysfs nodes */
1342 edac_mc_del_mc(NULL);
1343 edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
1344 edac_mc_free(mci);
1345 }
1346
1347 /*
1348 * Callback function registered with core kernel mce code.
1349 * Called once for each logged error.
1350 */
1351 static int pnd2_mce_check_error(struct notifier_block *nb, unsigned long val, void *data)
1352 {
1353 struct mce *mce = (struct mce *)data;
1354 struct mem_ctl_info *mci;
1355 struct dram_addr daddr;
1356 char *type;
1357
1358 if (edac_get_report_status() == EDAC_REPORTING_DISABLED)
1359 return NOTIFY_DONE;
1360
1361 mci = pnd2_mci;
1362 if (!mci)
1363 return NOTIFY_DONE;
1364
1365 /*
1366 * Just let mcelog handle it if the error is
1367 * outside the memory controller. A memory error
1368 * is indicated by bit 7 = 1 and bits = 8-11,13-15 = 0.
1369 * bit 12 has an special meaning.
1370 */
1371 if ((mce->status & 0xefff) >> 7 != 1)
1372 return NOTIFY_DONE;
1373
1374 if (mce->mcgstatus & MCG_STATUS_MCIP)
1375 type = "Exception";
1376 else
1377 type = "Event";
1378
1379 pnd2_mc_printk(mci, KERN_INFO, "HANDLING MCE MEMORY ERROR\n");
1380 pnd2_mc_printk(mci, KERN_INFO, "CPU %u: Machine Check %s: %llx Bank %u: %llx\n",
1381 mce->extcpu, type, mce->mcgstatus, mce->bank, mce->status);
1382 pnd2_mc_printk(mci, KERN_INFO, "TSC %llx ", mce->tsc);
1383 pnd2_mc_printk(mci, KERN_INFO, "ADDR %llx ", mce->addr);
1384 pnd2_mc_printk(mci, KERN_INFO, "MISC %llx ", mce->misc);
1385 pnd2_mc_printk(mci, KERN_INFO, "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n",
1386 mce->cpuvendor, mce->cpuid, mce->time, mce->socketid, mce->apicid);
1387
1388 pnd2_mce_output_error(mci, mce, &daddr);
1389
1390 /* Advice mcelog that the error were handled */
1391 return NOTIFY_STOP;
1392 }
1393
1394 static struct notifier_block pnd2_mce_dec = {
1395 .notifier_call = pnd2_mce_check_error,
1396 };
1397
1398 #ifdef CONFIG_EDAC_DEBUG
1399 /*
1400 * Write an address to this file to exercise the address decode
1401 * logic in this driver.
1402 */
1403 static u64 pnd2_fake_addr;
1404 #define PND2_BLOB_SIZE 1024
1405 static char pnd2_result[PND2_BLOB_SIZE];
1406 static struct dentry *pnd2_test;
1407 static struct debugfs_blob_wrapper pnd2_blob = {
1408 .data = pnd2_result,
1409 .size = 0
1410 };
1411
1412 static int debugfs_u64_set(void *data, u64 val)
1413 {
1414 struct dram_addr daddr;
1415 struct mce m;
1416
1417 *(u64 *)data = val;
1418 m.mcgstatus = 0;
1419 /* ADDRV + MemRd + Unknown channel */
1420 m.status = MCI_STATUS_ADDRV + 0x9f;
1421 m.addr = val;
1422 pnd2_mce_output_error(pnd2_mci, &m, &daddr);
1423 snprintf(pnd2_blob.data, PND2_BLOB_SIZE,
1424 "SysAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
1425 m.addr, daddr.chan, daddr.dimm, daddr.rank, daddr.bank, daddr.row, daddr.col);
1426 pnd2_blob.size = strlen(pnd2_blob.data);
1427
1428 return 0;
1429 }
1430 DEFINE_DEBUGFS_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
1431
1432 static void setup_pnd2_debug(void)
1433 {
1434 pnd2_test = edac_debugfs_create_dir("pnd2_test");
1435 edac_debugfs_create_file("pnd2_debug_addr", 0200, pnd2_test,
1436 &pnd2_fake_addr, &fops_u64_wo);
1437 debugfs_create_blob("pnd2_debug_results", 0400, pnd2_test, &pnd2_blob);
1438 }
1439
1440 static void teardown_pnd2_debug(void)
1441 {
1442 debugfs_remove_recursive(pnd2_test);
1443 }
1444 #else
1445 static void setup_pnd2_debug(void) {}
1446 static void teardown_pnd2_debug(void) {}
1447 #endif /* CONFIG_EDAC_DEBUG */
1448
1449
1450 static int pnd2_probe(void)
1451 {
1452 int rc;
1453
1454 edac_dbg(2, "\n");
1455 rc = get_registers();
1456 if (rc)
1457 return rc;
1458
1459 return pnd2_register_mci(&pnd2_mci);
1460 }
1461
1462 static void pnd2_remove(void)
1463 {
1464 edac_dbg(0, "\n");
1465 pnd2_unregister_mci(pnd2_mci);
1466 }
1467
1468 static struct dunit_ops apl_ops = {
1469 .name = "pnd2/apl",
1470 .type = APL,
1471 .pmiaddr_shift = LOG2_PMI_ADDR_GRANULARITY,
1472 .pmiidx_shift = 0,
1473 .channels = APL_NUM_CHANNELS,
1474 .dimms_per_channel = 1,
1475 .rd_reg = apl_rd_reg,
1476 .get_registers = apl_get_registers,
1477 .check_ecc = apl_check_ecc_active,
1478 .mk_region = apl_mk_region,
1479 .get_dimm_config = apl_get_dimm_config,
1480 .pmi2mem = apl_pmi2mem,
1481 };
1482
1483 static struct dunit_ops dnv_ops = {
1484 .name = "pnd2/dnv",
1485 .type = DNV,
1486 .pmiaddr_shift = 0,
1487 .pmiidx_shift = 1,
1488 .channels = DNV_NUM_CHANNELS,
1489 .dimms_per_channel = 2,
1490 .rd_reg = dnv_rd_reg,
1491 .get_registers = dnv_get_registers,
1492 .check_ecc = dnv_check_ecc_active,
1493 .mk_region = dnv_mk_region,
1494 .get_dimm_config = dnv_get_dimm_config,
1495 .pmi2mem = dnv_pmi2mem,
1496 };
1497
1498 static const struct x86_cpu_id pnd2_cpuids[] = {
1499 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT, 0, (kernel_ulong_t)&apl_ops },
1500 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_DENVERTON, 0, (kernel_ulong_t)&dnv_ops },
1501 { }
1502 };
1503 MODULE_DEVICE_TABLE(x86cpu, pnd2_cpuids);
1504
1505 static int __init pnd2_init(void)
1506 {
1507 const struct x86_cpu_id *id;
1508 int rc;
1509
1510 edac_dbg(2, "\n");
1511
1512 id = x86_match_cpu(pnd2_cpuids);
1513 if (!id)
1514 return -ENODEV;
1515
1516 ops = (struct dunit_ops *)id->driver_data;
1517
1518 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
1519 opstate_init();
1520
1521 rc = pnd2_probe();
1522 if (rc < 0) {
1523 pnd2_printk(KERN_ERR, "Failed to register device with error %d.\n", rc);
1524 return rc;
1525 }
1526
1527 if (!pnd2_mci)
1528 return -ENODEV;
1529
1530 mce_register_decode_chain(&pnd2_mce_dec);
1531 setup_pnd2_debug();
1532
1533 return 0;
1534 }
1535
1536 static void __exit pnd2_exit(void)
1537 {
1538 edac_dbg(2, "\n");
1539 teardown_pnd2_debug();
1540 mce_unregister_decode_chain(&pnd2_mce_dec);
1541 pnd2_remove();
1542 }
1543
1544 module_init(pnd2_init);
1545 module_exit(pnd2_exit);
1546
1547 module_param(edac_op_state, int, 0444);
1548 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
1549
1550 MODULE_LICENSE("GPL v2");
1551 MODULE_AUTHOR("Tony Luck");
1552 MODULE_DESCRIPTION("MC Driver for Intel SoC using Pondicherry memory controller");