]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/edac/pnd2_edac.c
EDAC, pnd2: Mask off the lower four bits of a BAR
[mirror_ubuntu-artful-kernel.git] / drivers / edac / pnd2_edac.c
CommitLineData
5c71ad17
TL
1/*
2 * Driver for Pondicherry2 memory controller.
3 *
4 * Copyright (c) 2016, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * [Derived from sb_edac.c]
16 *
17 * Translation of system physical addresses to DIMM addresses
18 * is a two stage process:
19 *
20 * First the Pondicherry 2 memory controller handles slice and channel interleaving
21 * in "sys2pmi()". This is (almost) completley common between platforms.
22 *
23 * Then a platform specific dunit (DIMM unit) completes the process to provide DIMM,
24 * rank, bank, row and column using the appropriate "dunit_ops" functions/parameters.
25 */
26
27#include <linux/module.h>
28#include <linux/init.h>
29#include <linux/pci.h>
30#include <linux/pci_ids.h>
31#include <linux/slab.h>
32#include <linux/delay.h>
33#include <linux/edac.h>
34#include <linux/mmzone.h>
35#include <linux/smp.h>
36#include <linux/bitmap.h>
37#include <linux/math64.h>
38#include <linux/mod_devicetable.h>
39#include <asm/cpu_device_id.h>
40#include <asm/intel-family.h>
41#include <asm/processor.h>
42#include <asm/mce.h>
43
44#include "edac_mc.h"
45#include "edac_module.h"
46#include "pnd2_edac.h"
47
48#define APL_NUM_CHANNELS 4
49#define DNV_NUM_CHANNELS 2
50#define DNV_MAX_DIMMS 2 /* Max DIMMs per channel */
51
52enum type {
53 APL,
54 DNV, /* All requests go to PMI CH0 on each slice (CH1 disabled) */
55};
56
57struct dram_addr {
58 int chan;
59 int dimm;
60 int rank;
61 int bank;
62 int row;
63 int col;
64};
65
66struct pnd2_pvt {
67 int dimm_geom[APL_NUM_CHANNELS];
68 u64 tolm, tohm;
69};
70
71/*
72 * System address space is divided into multiple regions with
73 * different interleave rules in each. The as0/as1 regions
74 * have no interleaving at all. The as2 region is interleaved
75 * between two channels. The mot region is magic and may overlap
76 * other regions, with its interleave rules taking precedence.
77 * Addresses not in any of these regions are interleaved across
78 * all four channels.
79 */
80static struct region {
81 u64 base;
82 u64 limit;
83 u8 enabled;
84} mot, as0, as1, as2;
85
86static struct dunit_ops {
87 char *name;
88 enum type type;
89 int pmiaddr_shift;
90 int pmiidx_shift;
91 int channels;
92 int dimms_per_channel;
93 int (*rd_reg)(int port, int off, int op, void *data, size_t sz, char *name);
94 int (*get_registers)(void);
95 int (*check_ecc)(void);
96 void (*mk_region)(char *name, struct region *rp, void *asym);
97 void (*get_dimm_config)(struct mem_ctl_info *mci);
98 int (*pmi2mem)(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
99 struct dram_addr *daddr, char *msg);
100} *ops;
101
102static struct mem_ctl_info *pnd2_mci;
103
104#define PND2_MSG_SIZE 256
105
106/* Debug macros */
107#define pnd2_printk(level, fmt, arg...) \
108 edac_printk(level, "pnd2", fmt, ##arg)
109
110#define pnd2_mc_printk(mci, level, fmt, arg...) \
111 edac_mc_chipset_printk(mci, level, "pnd2", fmt, ##arg)
112
113#define MOT_CHAN_INTLV_BIT_1SLC_2CH 12
114#define MOT_CHAN_INTLV_BIT_2SLC_2CH 13
115#define SELECTOR_DISABLED (-1)
116#define _4GB (1ul << 32)
117
118#define PMI_ADDRESS_WIDTH 31
119#define PND_MAX_PHYS_BIT 39
120
121#define APL_ASYMSHIFT 28
122#define DNV_ASYMSHIFT 31
123#define CH_HASH_MASK_LSB 6
124#define SLICE_HASH_MASK_LSB 6
125#define MOT_SLC_INTLV_BIT 12
126#define LOG2_PMI_ADDR_GRANULARITY 5
127#define MOT_SHIFT 24
128
129#define GET_BITFIELD(v, lo, hi) (((v) & GENMASK_ULL(hi, lo)) >> (lo))
130#define U64_LSHIFT(val, s) ((u64)(val) << (s))
131
e10d43fa
TL
132/*
133 * On Apollo Lake we access memory controller registers via a
134 * side-band mailbox style interface in a hidden PCI device
135 * configuration space.
136 */
137static struct pci_bus *p2sb_bus;
138#define P2SB_DEVFN PCI_DEVFN(0xd, 0)
139#define P2SB_ADDR_OFF 0xd0
140#define P2SB_DATA_OFF 0xd4
141#define P2SB_STAT_OFF 0xd8
142#define P2SB_ROUT_OFF 0xda
143#define P2SB_EADD_OFF 0xdc
144#define P2SB_HIDE_OFF 0xe1
145
146#define P2SB_BUSY 1
147
148#define P2SB_READ(size, off, ptr) \
149 pci_bus_read_config_##size(p2sb_bus, P2SB_DEVFN, off, ptr)
150#define P2SB_WRITE(size, off, val) \
151 pci_bus_write_config_##size(p2sb_bus, P2SB_DEVFN, off, val)
152
153static bool p2sb_is_busy(u16 *status)
5c71ad17 154{
e10d43fa 155 P2SB_READ(word, P2SB_STAT_OFF, status);
5c71ad17 156
e10d43fa
TL
157 return !!(*status & P2SB_BUSY);
158}
5c71ad17 159
e10d43fa
TL
160static int _apl_rd_reg(int port, int off, int op, u32 *data)
161{
162 int retries = 0xff, ret;
163 u16 status;
164
165 P2SB_WRITE(byte, P2SB_HIDE_OFF, 0);
166
167 if (p2sb_is_busy(&status)) {
168 ret = -EAGAIN;
169 goto out;
170 }
5c71ad17 171
e10d43fa
TL
172 P2SB_WRITE(dword, P2SB_ADDR_OFF, (port << 24) | off);
173 P2SB_WRITE(dword, P2SB_DATA_OFF, 0);
174 P2SB_WRITE(dword, P2SB_EADD_OFF, 0);
175 P2SB_WRITE(word, P2SB_ROUT_OFF, 0);
176 P2SB_WRITE(word, P2SB_STAT_OFF, (op << 8) | P2SB_BUSY);
5c71ad17 177
e10d43fa
TL
178 while (p2sb_is_busy(&status)) {
179 if (retries-- == 0) {
180 ret = -EBUSY;
181 goto out;
182 }
183 }
5c71ad17 184
e10d43fa
TL
185 P2SB_READ(dword, P2SB_DATA_OFF, data);
186 ret = (status >> 1) & 0x3;
187out:
188 P2SB_WRITE(byte, P2SB_HIDE_OFF, 1);
5c71ad17
TL
189
190 return ret;
191}
5c71ad17
TL
192
193static int apl_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
194{
ee514c7a 195 int ret = 0;
5c71ad17
TL
196
197 edac_dbg(2, "Read %s port=%x off=%x op=%x\n", name, port, off, op);
198 switch (sz) {
199 case 8:
e10d43fa 200 ret = _apl_rd_reg(port, off + 4, op, (u32 *)(data + 4));
ee514c7a 201 /* fall through */
5c71ad17 202 case 4:
e10d43fa 203 ret |= _apl_rd_reg(port, off, op, (u32 *)data);
5c71ad17
TL
204 pnd2_printk(KERN_DEBUG, "%s=%x%08x ret=%d\n", name,
205 sz == 8 ? *((u32 *)(data + 4)) : 0, *((u32 *)data), ret);
206 break;
207 }
208
209 return ret;
210}
211
212static u64 get_mem_ctrl_hub_base_addr(void)
213{
214 struct b_cr_mchbar_lo_pci lo;
215 struct b_cr_mchbar_hi_pci hi;
216 struct pci_dev *pdev;
217
218 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
219 if (pdev) {
220 pci_read_config_dword(pdev, 0x48, (u32 *)&lo);
221 pci_read_config_dword(pdev, 0x4c, (u32 *)&hi);
222 pci_dev_put(pdev);
223 } else {
224 return 0;
225 }
226
227 if (!lo.enable) {
228 edac_dbg(2, "MMIO via memory controller hub base address is disabled!\n");
229 return 0;
230 }
231
232 return U64_LSHIFT(hi.base, 32) | U64_LSHIFT(lo.base, 15);
233}
234
235static u64 get_sideband_reg_base_addr(void)
236{
237 struct pci_dev *pdev;
238 u32 hi, lo;
239
240 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x19dd, NULL);
241 if (pdev) {
242 pci_read_config_dword(pdev, 0x10, &lo);
243 pci_read_config_dword(pdev, 0x14, &hi);
56c0bfec 244 lo &= 0xfffffff0;
5c71ad17
TL
245 pci_dev_put(pdev);
246 return (U64_LSHIFT(hi, 32) | U64_LSHIFT(lo, 0));
247 } else {
248 return 0xfd000000;
249 }
250}
251
252static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
253{
254 struct pci_dev *pdev;
255 char *base;
256 u64 addr;
257
258 if (op == 4) {
259 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
260 if (!pdev)
261 return -ENODEV;
262
263 pci_read_config_dword(pdev, off, data);
264 pci_dev_put(pdev);
265 } else {
266 /* MMIO via memory controller hub base address */
267 if (op == 0 && port == 0x4c) {
268 addr = get_mem_ctrl_hub_base_addr();
269 if (!addr)
270 return -ENODEV;
271 } else {
272 /* MMIO via sideband register base address */
273 addr = get_sideband_reg_base_addr();
274 if (!addr)
275 return -ENODEV;
276 addr += (port << 16);
277 }
278
279 base = ioremap((resource_size_t)addr, 0x10000);
280 if (!base)
281 return -ENODEV;
282
283 if (sz == 8)
284 *(u32 *)(data + 4) = *(u32 *)(base + off + 4);
285 *(u32 *)data = *(u32 *)(base + off);
286
287 iounmap(base);
288 }
289
290 edac_dbg(2, "Read %s=%.8x_%.8x\n", name,
291 (sz == 8) ? *(u32 *)(data + 4) : 0, *(u32 *)data);
292
293 return 0;
294}
295
296#define RD_REGP(regp, regname, port) \
297 ops->rd_reg(port, \
298 regname##_offset, \
299 regname##_r_opcode, \
300 regp, sizeof(struct regname), \
301 #regname)
302
303#define RD_REG(regp, regname) \
304 ops->rd_reg(regname ## _port, \
305 regname##_offset, \
306 regname##_r_opcode, \
307 regp, sizeof(struct regname), \
308 #regname)
309
310static u64 top_lm, top_hm;
311static bool two_slices;
312static bool two_channels; /* Both PMI channels in one slice enabled */
313
314static u8 sym_chan_mask;
315static u8 asym_chan_mask;
316static u8 chan_mask;
317
318static int slice_selector = -1;
319static int chan_selector = -1;
320static u64 slice_hash_mask;
321static u64 chan_hash_mask;
322
323static void mk_region(char *name, struct region *rp, u64 base, u64 limit)
324{
325 rp->enabled = 1;
326 rp->base = base;
327 rp->limit = limit;
328 edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, limit);
329}
330
331static void mk_region_mask(char *name, struct region *rp, u64 base, u64 mask)
332{
333 if (mask == 0) {
334 pr_info(FW_BUG "MOT mask cannot be zero\n");
335 return;
336 }
337 if (mask != GENMASK_ULL(PND_MAX_PHYS_BIT, __ffs(mask))) {
338 pr_info(FW_BUG "MOT mask not power of two\n");
339 return;
340 }
341 if (base & ~mask) {
342 pr_info(FW_BUG "MOT region base/mask alignment error\n");
343 return;
344 }
345 rp->base = base;
346 rp->limit = (base | ~mask) & GENMASK_ULL(PND_MAX_PHYS_BIT, 0);
347 rp->enabled = 1;
348 edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, rp->limit);
349}
350
351static bool in_region(struct region *rp, u64 addr)
352{
353 if (!rp->enabled)
354 return false;
355
356 return rp->base <= addr && addr <= rp->limit;
357}
358
359static int gen_sym_mask(struct b_cr_slice_channel_hash *p)
360{
361 int mask = 0;
362
363 if (!p->slice_0_mem_disabled)
364 mask |= p->sym_slice0_channel_enabled;
365
366 if (!p->slice_1_disabled)
367 mask |= p->sym_slice1_channel_enabled << 2;
368
369 if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
370 mask &= 0x5;
371
372 return mask;
373}
374
375static int gen_asym_mask(struct b_cr_slice_channel_hash *p,
376 struct b_cr_asym_mem_region0_mchbar *as0,
377 struct b_cr_asym_mem_region1_mchbar *as1,
378 struct b_cr_asym_2way_mem_region_mchbar *as2way)
379{
380 const int intlv[] = { 0x5, 0xA, 0x3, 0xC };
381 int mask = 0;
382
383 if (as2way->asym_2way_interleave_enable)
384 mask = intlv[as2way->asym_2way_intlv_mode];
385 if (as0->slice0_asym_enable)
386 mask |= (1 << as0->slice0_asym_channel_select);
387 if (as1->slice1_asym_enable)
388 mask |= (4 << as1->slice1_asym_channel_select);
389 if (p->slice_0_mem_disabled)
390 mask &= 0xc;
391 if (p->slice_1_disabled)
392 mask &= 0x3;
393 if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
394 mask &= 0x5;
395
396 return mask;
397}
398
399static struct b_cr_tolud_pci tolud;
400static struct b_cr_touud_lo_pci touud_lo;
401static struct b_cr_touud_hi_pci touud_hi;
402static struct b_cr_asym_mem_region0_mchbar asym0;
403static struct b_cr_asym_mem_region1_mchbar asym1;
404static struct b_cr_asym_2way_mem_region_mchbar asym_2way;
405static struct b_cr_mot_out_base_mchbar mot_base;
406static struct b_cr_mot_out_mask_mchbar mot_mask;
407static struct b_cr_slice_channel_hash chash;
408
409/* Apollo Lake dunit */
410/*
411 * Validated on board with just two DIMMs in the [0] and [2] positions
412 * in this array. Other port number matches documentation, but caution
413 * advised.
414 */
415static const int apl_dports[APL_NUM_CHANNELS] = { 0x18, 0x10, 0x11, 0x19 };
416static struct d_cr_drp0 drp0[APL_NUM_CHANNELS];
417
418/* Denverton dunit */
419static const int dnv_dports[DNV_NUM_CHANNELS] = { 0x10, 0x12 };
420static struct d_cr_dsch dsch;
421static struct d_cr_ecc_ctrl ecc_ctrl[DNV_NUM_CHANNELS];
422static struct d_cr_drp drp[DNV_NUM_CHANNELS];
423static struct d_cr_dmap dmap[DNV_NUM_CHANNELS];
424static struct d_cr_dmap1 dmap1[DNV_NUM_CHANNELS];
425static struct d_cr_dmap2 dmap2[DNV_NUM_CHANNELS];
426static struct d_cr_dmap3 dmap3[DNV_NUM_CHANNELS];
427static struct d_cr_dmap4 dmap4[DNV_NUM_CHANNELS];
428static struct d_cr_dmap5 dmap5[DNV_NUM_CHANNELS];
429
430static void apl_mk_region(char *name, struct region *rp, void *asym)
431{
432 struct b_cr_asym_mem_region0_mchbar *a = asym;
433
434 mk_region(name, rp,
435 U64_LSHIFT(a->slice0_asym_base, APL_ASYMSHIFT),
436 U64_LSHIFT(a->slice0_asym_limit, APL_ASYMSHIFT) +
437 GENMASK_ULL(APL_ASYMSHIFT - 1, 0));
438}
439
440static void dnv_mk_region(char *name, struct region *rp, void *asym)
441{
442 struct b_cr_asym_mem_region_denverton *a = asym;
443
444 mk_region(name, rp,
445 U64_LSHIFT(a->slice_asym_base, DNV_ASYMSHIFT),
446 U64_LSHIFT(a->slice_asym_limit, DNV_ASYMSHIFT) +
447 GENMASK_ULL(DNV_ASYMSHIFT - 1, 0));
448}
449
450static int apl_get_registers(void)
451{
164c2924 452 int ret = -ENODEV;
5c71ad17
TL
453 int i;
454
455 if (RD_REG(&asym_2way, b_cr_asym_2way_mem_region_mchbar))
456 return -ENODEV;
457
164c2924
TL
458 /*
459 * RD_REGP() will fail for unpopulated or non-existent
460 * DIMM slots. Return success if we find at least one DIMM.
461 */
5c71ad17 462 for (i = 0; i < APL_NUM_CHANNELS; i++)
164c2924
TL
463 if (!RD_REGP(&drp0[i], d_cr_drp0, apl_dports[i]))
464 ret = 0;
5c71ad17 465
164c2924 466 return ret;
5c71ad17
TL
467}
468
469static int dnv_get_registers(void)
470{
471 int i;
472
473 if (RD_REG(&dsch, d_cr_dsch))
474 return -ENODEV;
475
476 for (i = 0; i < DNV_NUM_CHANNELS; i++)
477 if (RD_REGP(&ecc_ctrl[i], d_cr_ecc_ctrl, dnv_dports[i]) ||
478 RD_REGP(&drp[i], d_cr_drp, dnv_dports[i]) ||
479 RD_REGP(&dmap[i], d_cr_dmap, dnv_dports[i]) ||
480 RD_REGP(&dmap1[i], d_cr_dmap1, dnv_dports[i]) ||
481 RD_REGP(&dmap2[i], d_cr_dmap2, dnv_dports[i]) ||
482 RD_REGP(&dmap3[i], d_cr_dmap3, dnv_dports[i]) ||
483 RD_REGP(&dmap4[i], d_cr_dmap4, dnv_dports[i]) ||
484 RD_REGP(&dmap5[i], d_cr_dmap5, dnv_dports[i]))
485 return -ENODEV;
486
487 return 0;
488}
489
490/*
491 * Read all the h/w config registers once here (they don't
492 * change at run time. Figure out which address ranges have
493 * which interleave characteristics.
494 */
495static int get_registers(void)
496{
497 const int intlv[] = { 10, 11, 12, 12 };
498
499 if (RD_REG(&tolud, b_cr_tolud_pci) ||
500 RD_REG(&touud_lo, b_cr_touud_lo_pci) ||
501 RD_REG(&touud_hi, b_cr_touud_hi_pci) ||
502 RD_REG(&asym0, b_cr_asym_mem_region0_mchbar) ||
503 RD_REG(&asym1, b_cr_asym_mem_region1_mchbar) ||
504 RD_REG(&mot_base, b_cr_mot_out_base_mchbar) ||
505 RD_REG(&mot_mask, b_cr_mot_out_mask_mchbar) ||
506 RD_REG(&chash, b_cr_slice_channel_hash))
507 return -ENODEV;
508
509 if (ops->get_registers())
510 return -ENODEV;
511
512 if (ops->type == DNV) {
513 /* PMI channel idx (always 0) for asymmetric region */
514 asym0.slice0_asym_channel_select = 0;
515 asym1.slice1_asym_channel_select = 0;
516 /* PMI channel bitmap (always 1) for symmetric region */
517 chash.sym_slice0_channel_enabled = 0x1;
518 chash.sym_slice1_channel_enabled = 0x1;
519 }
520
521 if (asym0.slice0_asym_enable)
522 ops->mk_region("as0", &as0, &asym0);
523
524 if (asym1.slice1_asym_enable)
525 ops->mk_region("as1", &as1, &asym1);
526
527 if (asym_2way.asym_2way_interleave_enable) {
528 mk_region("as2way", &as2,
529 U64_LSHIFT(asym_2way.asym_2way_base, APL_ASYMSHIFT),
530 U64_LSHIFT(asym_2way.asym_2way_limit, APL_ASYMSHIFT) +
531 GENMASK_ULL(APL_ASYMSHIFT - 1, 0));
532 }
533
534 if (mot_base.imr_en) {
535 mk_region_mask("mot", &mot,
536 U64_LSHIFT(mot_base.mot_out_base, MOT_SHIFT),
537 U64_LSHIFT(mot_mask.mot_out_mask, MOT_SHIFT));
538 }
539
540 top_lm = U64_LSHIFT(tolud.tolud, 20);
541 top_hm = U64_LSHIFT(touud_hi.touud, 32) | U64_LSHIFT(touud_lo.touud, 20);
542
543 two_slices = !chash.slice_1_disabled &&
544 !chash.slice_0_mem_disabled &&
545 (chash.sym_slice0_channel_enabled != 0) &&
546 (chash.sym_slice1_channel_enabled != 0);
547 two_channels = !chash.ch_1_disabled &&
548 !chash.enable_pmi_dual_data_mode &&
549 ((chash.sym_slice0_channel_enabled == 3) ||
550 (chash.sym_slice1_channel_enabled == 3));
551
552 sym_chan_mask = gen_sym_mask(&chash);
553 asym_chan_mask = gen_asym_mask(&chash, &asym0, &asym1, &asym_2way);
554 chan_mask = sym_chan_mask | asym_chan_mask;
555
556 if (two_slices && !two_channels) {
557 if (chash.hvm_mode)
558 slice_selector = 29;
559 else
560 slice_selector = intlv[chash.interleave_mode];
561 } else if (!two_slices && two_channels) {
562 if (chash.hvm_mode)
563 chan_selector = 29;
564 else
565 chan_selector = intlv[chash.interleave_mode];
566 } else if (two_slices && two_channels) {
567 if (chash.hvm_mode) {
568 slice_selector = 29;
569 chan_selector = 30;
570 } else {
571 slice_selector = intlv[chash.interleave_mode];
572 chan_selector = intlv[chash.interleave_mode] + 1;
573 }
574 }
575
576 if (two_slices) {
577 if (!chash.hvm_mode)
578 slice_hash_mask = chash.slice_hash_mask << SLICE_HASH_MASK_LSB;
579 if (!two_channels)
580 slice_hash_mask |= BIT_ULL(slice_selector);
581 }
582
583 if (two_channels) {
584 if (!chash.hvm_mode)
585 chan_hash_mask = chash.ch_hash_mask << CH_HASH_MASK_LSB;
586 if (!two_slices)
587 chan_hash_mask |= BIT_ULL(chan_selector);
588 }
589
590 return 0;
591}
592
593/* Get a contiguous memory address (remove the MMIO gap) */
594static u64 remove_mmio_gap(u64 sys)
595{
596 return (sys < _4GB) ? sys : sys - (_4GB - top_lm);
597}
598
599/* Squeeze out one address bit, shift upper part down to fill gap */
600static void remove_addr_bit(u64 *addr, int bitidx)
601{
602 u64 mask;
603
604 if (bitidx == -1)
605 return;
606
607 mask = (1ull << bitidx) - 1;
608 *addr = ((*addr >> 1) & ~mask) | (*addr & mask);
609}
610
611/* XOR all the bits from addr specified in mask */
612static int hash_by_mask(u64 addr, u64 mask)
613{
614 u64 result = addr & mask;
615
616 result = (result >> 32) ^ result;
617 result = (result >> 16) ^ result;
618 result = (result >> 8) ^ result;
619 result = (result >> 4) ^ result;
620 result = (result >> 2) ^ result;
621 result = (result >> 1) ^ result;
622
623 return (int)result & 1;
624}
625
626/*
627 * First stage decode. Take the system address and figure out which
628 * second stage will deal with it based on interleave modes.
629 */
630static int sys2pmi(const u64 addr, u32 *pmiidx, u64 *pmiaddr, char *msg)
631{
632 u64 contig_addr, contig_base, contig_offset, contig_base_adj;
633 int mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH :
634 MOT_CHAN_INTLV_BIT_1SLC_2CH;
635 int slice_intlv_bit_rm = SELECTOR_DISABLED;
636 int chan_intlv_bit_rm = SELECTOR_DISABLED;
637 /* Determine if address is in the MOT region. */
638 bool mot_hit = in_region(&mot, addr);
639 /* Calculate the number of symmetric regions enabled. */
640 int sym_channels = hweight8(sym_chan_mask);
641
642 /*
643 * The amount we need to shift the asym base can be determined by the
644 * number of enabled symmetric channels.
645 * NOTE: This can only work because symmetric memory is not supposed
646 * to do a 3-way interleave.
647 */
648 int sym_chan_shift = sym_channels >> 1;
649
650 /* Give up if address is out of range, or in MMIO gap */
651 if (addr >= (1ul << PND_MAX_PHYS_BIT) ||
652 (addr >= top_lm && addr < _4GB) || addr >= top_hm) {
653 snprintf(msg, PND2_MSG_SIZE, "Error address 0x%llx is not DRAM", addr);
654 return -EINVAL;
655 }
656
657 /* Get a contiguous memory address (remove the MMIO gap) */
658 contig_addr = remove_mmio_gap(addr);
659
660 if (in_region(&as0, addr)) {
661 *pmiidx = asym0.slice0_asym_channel_select;
662
663 contig_base = remove_mmio_gap(as0.base);
664 contig_offset = contig_addr - contig_base;
665 contig_base_adj = (contig_base >> sym_chan_shift) *
666 ((chash.sym_slice0_channel_enabled >> (*pmiidx & 1)) & 1);
667 contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull);
668 } else if (in_region(&as1, addr)) {
669 *pmiidx = 2u + asym1.slice1_asym_channel_select;
670
671 contig_base = remove_mmio_gap(as1.base);
672 contig_offset = contig_addr - contig_base;
673 contig_base_adj = (contig_base >> sym_chan_shift) *
674 ((chash.sym_slice1_channel_enabled >> (*pmiidx & 1)) & 1);
675 contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull);
676 } else if (in_region(&as2, addr) && (asym_2way.asym_2way_intlv_mode == 0x3ul)) {
677 bool channel1;
678
679 mot_intlv_bit = MOT_CHAN_INTLV_BIT_1SLC_2CH;
680 *pmiidx = (asym_2way.asym_2way_intlv_mode & 1) << 1;
681 channel1 = mot_hit ? ((bool)((addr >> mot_intlv_bit) & 1)) :
682 hash_by_mask(contig_addr, chan_hash_mask);
683 *pmiidx |= (u32)channel1;
684
685 contig_base = remove_mmio_gap(as2.base);
686 chan_intlv_bit_rm = mot_hit ? mot_intlv_bit : chan_selector;
687 contig_offset = contig_addr - contig_base;
688 remove_addr_bit(&contig_offset, chan_intlv_bit_rm);
689 contig_addr = (contig_base >> sym_chan_shift) + contig_offset;
690 } else {
691 /* Otherwise we're in normal, boring symmetric mode. */
692 *pmiidx = 0u;
693
694 if (two_slices) {
695 bool slice1;
696
697 if (mot_hit) {
698 slice_intlv_bit_rm = MOT_SLC_INTLV_BIT;
699 slice1 = (addr >> MOT_SLC_INTLV_BIT) & 1;
700 } else {
701 slice_intlv_bit_rm = slice_selector;
702 slice1 = hash_by_mask(addr, slice_hash_mask);
703 }
704
705 *pmiidx = (u32)slice1 << 1;
706 }
707
708 if (two_channels) {
709 bool channel1;
710
711 mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH :
712 MOT_CHAN_INTLV_BIT_1SLC_2CH;
713
714 if (mot_hit) {
715 chan_intlv_bit_rm = mot_intlv_bit;
716 channel1 = (addr >> mot_intlv_bit) & 1;
717 } else {
718 chan_intlv_bit_rm = chan_selector;
719 channel1 = hash_by_mask(contig_addr, chan_hash_mask);
720 }
721
722 *pmiidx |= (u32)channel1;
723 }
724 }
725
726 /* Remove the chan_selector bit first */
727 remove_addr_bit(&contig_addr, chan_intlv_bit_rm);
728 /* Remove the slice bit (we remove it second because it must be lower */
729 remove_addr_bit(&contig_addr, slice_intlv_bit_rm);
730 *pmiaddr = contig_addr;
731
732 return 0;
733}
734
735/* Translate PMI address to memory (rank, row, bank, column) */
736#define C(n) (0x10 | (n)) /* column */
737#define B(n) (0x20 | (n)) /* bank */
738#define R(n) (0x40 | (n)) /* row */
739#define RS (0x80) /* rank */
740
741/* addrdec values */
742#define AMAP_1KB 0
743#define AMAP_2KB 1
744#define AMAP_4KB 2
745#define AMAP_RSVD 3
746
747/* dden values */
748#define DEN_4Gb 0
749#define DEN_8Gb 2
750
751/* dwid values */
752#define X8 0
753#define X16 1
754
755static struct dimm_geometry {
756 u8 addrdec;
757 u8 dden;
758 u8 dwid;
759 u8 rowbits, colbits;
760 u16 bits[PMI_ADDRESS_WIDTH];
761} dimms[] = {
762 {
763 .addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X16,
764 .rowbits = 15, .colbits = 10,
765 .bits = {
766 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
767 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
768 R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
769 0, 0, 0, 0
770 }
771 },
772 {
773 .addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X8,
774 .rowbits = 16, .colbits = 10,
775 .bits = {
776 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
777 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
778 R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
779 R(15), 0, 0, 0
780 }
781 },
782 {
783 .addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X16,
784 .rowbits = 16, .colbits = 10,
785 .bits = {
786 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
787 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
788 R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14),
789 R(15), 0, 0, 0
790 }
791 },
792 {
793 .addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X8,
794 .rowbits = 16, .colbits = 11,
795 .bits = {
796 C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0),
797 R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9),
798 R(10), C(7), C(8), C(9), R(11), RS, C(11), R(12), R(13),
799 R(14), R(15), 0, 0
800 }
801 },
802 {
803 .addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X16,
804 .rowbits = 15, .colbits = 10,
805 .bits = {
806 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
807 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
808 R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
809 0, 0, 0, 0
810 }
811 },
812 {
813 .addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X8,
814 .rowbits = 16, .colbits = 10,
815 .bits = {
816 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
817 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
818 R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
819 R(15), 0, 0, 0
820 }
821 },
822 {
823 .addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X16,
824 .rowbits = 16, .colbits = 10,
825 .bits = {
826 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
827 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
828 R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14),
829 R(15), 0, 0, 0
830 }
831 },
832 {
833 .addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X8,
834 .rowbits = 16, .colbits = 11,
835 .bits = {
836 C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2),
837 R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8),
838 R(9), R(10), C(8), C(9), R(11), RS, C(11), R(12), R(13),
839 R(14), R(15), 0, 0
840 }
841 },
842 {
843 .addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X16,
844 .rowbits = 15, .colbits = 10,
845 .bits = {
846 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
847 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
848 R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
849 0, 0, 0, 0
850 }
851 },
852 {
853 .addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X8,
854 .rowbits = 16, .colbits = 10,
855 .bits = {
856 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
857 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
858 R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
859 R(15), 0, 0, 0
860 }
861 },
862 {
863 .addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X16,
864 .rowbits = 16, .colbits = 10,
865 .bits = {
866 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
867 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
868 R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14),
869 R(15), 0, 0, 0
870 }
871 },
872 {
873 .addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X8,
874 .rowbits = 16, .colbits = 11,
875 .bits = {
876 C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1),
877 B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7),
878 R(8), R(9), R(10), C(9), R(11), RS, C(11), R(12), R(13),
879 R(14), R(15), 0, 0
880 }
881 }
882};
883
884static int bank_hash(u64 pmiaddr, int idx, int shft)
885{
886 int bhash = 0;
887
888 switch (idx) {
889 case 0:
890 bhash ^= ((pmiaddr >> (12 + shft)) ^ (pmiaddr >> (9 + shft))) & 1;
891 break;
892 case 1:
893 bhash ^= (((pmiaddr >> (10 + shft)) ^ (pmiaddr >> (8 + shft))) & 1) << 1;
894 bhash ^= ((pmiaddr >> 22) & 1) << 1;
895 break;
896 case 2:
897 bhash ^= (((pmiaddr >> (13 + shft)) ^ (pmiaddr >> (11 + shft))) & 1) << 2;
898 break;
899 }
900
901 return bhash;
902}
903
904static int rank_hash(u64 pmiaddr)
905{
906 return ((pmiaddr >> 16) ^ (pmiaddr >> 10)) & 1;
907}
908
909/* Second stage decode. Compute rank, bank, row & column. */
910static int apl_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
911 struct dram_addr *daddr, char *msg)
912{
913 struct d_cr_drp0 *cr_drp0 = &drp0[pmiidx];
914 struct pnd2_pvt *pvt = mci->pvt_info;
915 int g = pvt->dimm_geom[pmiidx];
916 struct dimm_geometry *d = &dimms[g];
917 int column = 0, bank = 0, row = 0, rank = 0;
918 int i, idx, type, skiprs = 0;
919
920 for (i = 0; i < PMI_ADDRESS_WIDTH; i++) {
921 int bit = (pmiaddr >> i) & 1;
922
923 if (i + skiprs >= PMI_ADDRESS_WIDTH) {
924 snprintf(msg, PND2_MSG_SIZE, "Bad dimm_geometry[] table\n");
925 return -EINVAL;
926 }
927
928 type = d->bits[i + skiprs] & ~0xf;
929 idx = d->bits[i + skiprs] & 0xf;
930
931 /*
932 * On single rank DIMMs ignore the rank select bit
933 * and shift remainder of "bits[]" down one place.
934 */
935 if (type == RS && (cr_drp0->rken0 + cr_drp0->rken1) == 1) {
936 skiprs = 1;
937 type = d->bits[i + skiprs] & ~0xf;
938 idx = d->bits[i + skiprs] & 0xf;
939 }
940
941 switch (type) {
942 case C(0):
943 column |= (bit << idx);
944 break;
945 case B(0):
946 bank |= (bit << idx);
947 if (cr_drp0->bahen)
948 bank ^= bank_hash(pmiaddr, idx, d->addrdec);
949 break;
950 case R(0):
951 row |= (bit << idx);
952 break;
953 case RS:
954 rank = bit;
955 if (cr_drp0->rsien)
956 rank ^= rank_hash(pmiaddr);
957 break;
958 default:
959 if (bit) {
960 snprintf(msg, PND2_MSG_SIZE, "Bad translation\n");
961 return -EINVAL;
962 }
963 goto done;
964 }
965 }
966
967done:
968 daddr->col = column;
969 daddr->bank = bank;
970 daddr->row = row;
971 daddr->rank = rank;
972 daddr->dimm = 0;
973
974 return 0;
975}
976
977/* Pluck bit "in" from pmiaddr and return value shifted to bit "out" */
978#define dnv_get_bit(pmi, in, out) ((int)(((pmi) >> (in)) & 1u) << (out))
979
980static int dnv_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
981 struct dram_addr *daddr, char *msg)
982{
983 /* Rank 0 or 1 */
984 daddr->rank = dnv_get_bit(pmiaddr, dmap[pmiidx].rs0 + 13, 0);
985 /* Rank 2 or 3 */
986 daddr->rank |= dnv_get_bit(pmiaddr, dmap[pmiidx].rs1 + 13, 1);
987
988 /*
989 * Normally ranks 0,1 are DIMM0, and 2,3 are DIMM1, but we
990 * flip them if DIMM1 is larger than DIMM0.
991 */
992 daddr->dimm = (daddr->rank >= 2) ^ drp[pmiidx].dimmflip;
993
994 daddr->bank = dnv_get_bit(pmiaddr, dmap[pmiidx].ba0 + 6, 0);
995 daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].ba1 + 6, 1);
996 daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg0 + 6, 2);
997 if (dsch.ddr4en)
998 daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg1 + 6, 3);
999 if (dmap1[pmiidx].bxor) {
1000 if (dsch.ddr4en) {
1001 daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 0);
1002 daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 1);
1003 if (dsch.chan_width == 0)
1004 /* 64/72 bit dram channel width */
1005 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2);
1006 else
1007 /* 32/40 bit dram channel width */
1008 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2);
1009 daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 3);
1010 } else {
1011 daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 0);
1012 daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 1);
1013 if (dsch.chan_width == 0)
1014 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2);
1015 else
1016 daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2);
1017 }
1018 }
1019
1020 daddr->row = dnv_get_bit(pmiaddr, dmap2[pmiidx].row0 + 6, 0);
1021 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row1 + 6, 1);
1022 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 2);
1023 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row3 + 6, 3);
1024 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row4 + 6, 4);
1025 daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row5 + 6, 5);
1026 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 6);
1027 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 7);
1028 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row8 + 6, 8);
1029 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row9 + 6, 9);
1030 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row10 + 6, 10);
1031 daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row11 + 6, 11);
1032 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row12 + 6, 12);
1033 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row13 + 6, 13);
1034 if (dmap4[pmiidx].row14 != 31)
1035 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row14 + 6, 14);
1036 if (dmap4[pmiidx].row15 != 31)
1037 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row15 + 6, 15);
1038 if (dmap4[pmiidx].row16 != 31)
1039 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row16 + 6, 16);
1040 if (dmap4[pmiidx].row17 != 31)
1041 daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row17 + 6, 17);
1042
1043 daddr->col = dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 3);
1044 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 4);
1045 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca5 + 6, 5);
1046 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca6 + 6, 6);
1047 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca7 + 6, 7);
1048 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca8 + 6, 8);
1049 daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca9 + 6, 9);
1050 if (!dsch.ddr4en && dmap1[pmiidx].ca11 != 0x3f)
1051 daddr->col |= dnv_get_bit(pmiaddr, dmap1[pmiidx].ca11 + 13, 11);
1052
1053 return 0;
1054}
1055
1056static int check_channel(int ch)
1057{
1058 if (drp0[ch].dramtype != 0) {
1059 pnd2_printk(KERN_INFO, "Unsupported DIMM in channel %d\n", ch);
1060 return 1;
1061 } else if (drp0[ch].eccen == 0) {
1062 pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch);
1063 return 1;
1064 }
1065 return 0;
1066}
1067
1068static int apl_check_ecc_active(void)
1069{
1070 int i, ret = 0;
1071
1072 /* Check dramtype and ECC mode for each present DIMM */
1073 for (i = 0; i < APL_NUM_CHANNELS; i++)
1074 if (chan_mask & BIT(i))
1075 ret += check_channel(i);
1076 return ret ? -EINVAL : 0;
1077}
1078
1079#define DIMMS_PRESENT(d) ((d)->rken0 + (d)->rken1 + (d)->rken2 + (d)->rken3)
1080
1081static int check_unit(int ch)
1082{
1083 struct d_cr_drp *d = &drp[ch];
1084
1085 if (DIMMS_PRESENT(d) && !ecc_ctrl[ch].eccen) {
1086 pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch);
1087 return 1;
1088 }
1089 return 0;
1090}
1091
1092static int dnv_check_ecc_active(void)
1093{
1094 int i, ret = 0;
1095
1096 for (i = 0; i < DNV_NUM_CHANNELS; i++)
1097 ret += check_unit(i);
1098 return ret ? -EINVAL : 0;
1099}
1100
1101static int get_memory_error_data(struct mem_ctl_info *mci, u64 addr,
1102 struct dram_addr *daddr, char *msg)
1103{
1104 u64 pmiaddr;
1105 u32 pmiidx;
1106 int ret;
1107
1108 ret = sys2pmi(addr, &pmiidx, &pmiaddr, msg);
1109 if (ret)
1110 return ret;
1111
1112 pmiaddr >>= ops->pmiaddr_shift;
1113 /* pmi channel idx to dimm channel idx */
1114 pmiidx >>= ops->pmiidx_shift;
1115 daddr->chan = pmiidx;
1116
1117 ret = ops->pmi2mem(mci, pmiaddr, pmiidx, daddr, msg);
1118 if (ret)
1119 return ret;
1120
1121 edac_dbg(0, "SysAddr=%llx PmiAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
1122 addr, pmiaddr, daddr->chan, daddr->dimm, daddr->rank, daddr->bank, daddr->row, daddr->col);
1123
1124 return 0;
1125}
1126
1127static void pnd2_mce_output_error(struct mem_ctl_info *mci, const struct mce *m,
1128 struct dram_addr *daddr)
1129{
1130 enum hw_event_mc_err_type tp_event;
1131 char *optype, msg[PND2_MSG_SIZE];
1132 bool ripv = m->mcgstatus & MCG_STATUS_RIPV;
1133 bool overflow = m->status & MCI_STATUS_OVER;
1134 bool uc_err = m->status & MCI_STATUS_UC;
1135 bool recov = m->status & MCI_STATUS_S;
1136 u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
1137 u32 mscod = GET_BITFIELD(m->status, 16, 31);
1138 u32 errcode = GET_BITFIELD(m->status, 0, 15);
1139 u32 optypenum = GET_BITFIELD(m->status, 4, 6);
1140 int rc;
1141
1142 tp_event = uc_err ? (ripv ? HW_EVENT_ERR_FATAL : HW_EVENT_ERR_UNCORRECTED) :
1143 HW_EVENT_ERR_CORRECTED;
1144
1145 /*
1146 * According with Table 15-9 of the Intel Architecture spec vol 3A,
1147 * memory errors should fit in this mask:
1148 * 000f 0000 1mmm cccc (binary)
1149 * where:
1150 * f = Correction Report Filtering Bit. If 1, subsequent errors
1151 * won't be shown
1152 * mmm = error type
1153 * cccc = channel
1154 * If the mask doesn't match, report an error to the parsing logic
1155 */
1156 if (!((errcode & 0xef80) == 0x80)) {
1157 optype = "Can't parse: it is not a mem";
1158 } else {
1159 switch (optypenum) {
1160 case 0:
1161 optype = "generic undef request error";
1162 break;
1163 case 1:
1164 optype = "memory read error";
1165 break;
1166 case 2:
1167 optype = "memory write error";
1168 break;
1169 case 3:
1170 optype = "addr/cmd error";
1171 break;
1172 case 4:
1173 optype = "memory scrubbing error";
1174 break;
1175 default:
1176 optype = "reserved";
1177 break;
1178 }
1179 }
1180
1181 /* Only decode errors with an valid address (ADDRV) */
1182 if (!(m->status & MCI_STATUS_ADDRV))
1183 return;
1184
1185 rc = get_memory_error_data(mci, m->addr, daddr, msg);
1186 if (rc)
1187 goto address_error;
1188
1189 snprintf(msg, sizeof(msg),
1190 "%s%s err_code:%04x:%04x channel:%d DIMM:%d rank:%d row:%d bank:%d col:%d",
1191 overflow ? " OVERFLOW" : "", (uc_err && recov) ? " recoverable" : "", mscod,
1192 errcode, daddr->chan, daddr->dimm, daddr->rank, daddr->row, daddr->bank, daddr->col);
1193
1194 edac_dbg(0, "%s\n", msg);
1195
1196 /* Call the helper to output message */
1197 edac_mc_handle_error(tp_event, mci, core_err_cnt, m->addr >> PAGE_SHIFT,
819f60fb 1198 m->addr & ~PAGE_MASK, 0, daddr->chan, daddr->dimm, -1, optype, msg);
5c71ad17
TL
1199
1200 return;
1201
1202address_error:
1203 edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0, -1, -1, -1, msg, "");
1204}
1205
1206static void apl_get_dimm_config(struct mem_ctl_info *mci)
1207{
1208 struct pnd2_pvt *pvt = mci->pvt_info;
1209 struct dimm_info *dimm;
1210 struct d_cr_drp0 *d;
1211 u64 capacity;
1212 int i, g;
1213
1214 for (i = 0; i < APL_NUM_CHANNELS; i++) {
1215 if (!(chan_mask & BIT(i)))
1216 continue;
1217
1218 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, 0, 0);
1219 if (!dimm) {
1220 edac_dbg(0, "No allocated DIMM for channel %d\n", i);
1221 continue;
1222 }
1223
1224 d = &drp0[i];
1225 for (g = 0; g < ARRAY_SIZE(dimms); g++)
1226 if (dimms[g].addrdec == d->addrdec &&
1227 dimms[g].dden == d->dden &&
1228 dimms[g].dwid == d->dwid)
1229 break;
1230
1231 if (g == ARRAY_SIZE(dimms)) {
1232 edac_dbg(0, "Channel %d: unrecognized DIMM\n", i);
1233 continue;
1234 }
1235
1236 pvt->dimm_geom[i] = g;
1237 capacity = (d->rken0 + d->rken1) * 8 * (1ul << dimms[g].rowbits) *
1238 (1ul << dimms[g].colbits);
1239 edac_dbg(0, "Channel %d: %lld MByte DIMM\n", i, capacity >> (20 - 3));
1240 dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
1241 dimm->grain = 32;
1242 dimm->dtype = (d->dwid == 0) ? DEV_X8 : DEV_X16;
1243 dimm->mtype = MEM_DDR3;
1244 dimm->edac_mode = EDAC_SECDED;
1245 snprintf(dimm->label, sizeof(dimm->label), "Slice#%d_Chan#%d", i / 2, i % 2);
1246 }
1247}
1248
1249static const int dnv_dtypes[] = {
1250 DEV_X8, DEV_X4, DEV_X16, DEV_UNKNOWN
1251};
1252
1253static void dnv_get_dimm_config(struct mem_ctl_info *mci)
1254{
1255 int i, j, ranks_of_dimm[DNV_MAX_DIMMS], banks, rowbits, colbits, memtype;
1256 struct dimm_info *dimm;
1257 struct d_cr_drp *d;
1258 u64 capacity;
1259
1260 if (dsch.ddr4en) {
1261 memtype = MEM_DDR4;
1262 banks = 16;
1263 colbits = 10;
1264 } else {
1265 memtype = MEM_DDR3;
1266 banks = 8;
1267 }
1268
1269 for (i = 0; i < DNV_NUM_CHANNELS; i++) {
1270 if (dmap4[i].row14 == 31)
1271 rowbits = 14;
1272 else if (dmap4[i].row15 == 31)
1273 rowbits = 15;
1274 else if (dmap4[i].row16 == 31)
1275 rowbits = 16;
1276 else if (dmap4[i].row17 == 31)
1277 rowbits = 17;
1278 else
1279 rowbits = 18;
1280
1281 if (memtype == MEM_DDR3) {
1282 if (dmap1[i].ca11 != 0x3f)
1283 colbits = 12;
1284 else
1285 colbits = 10;
1286 }
1287
1288 d = &drp[i];
1289 /* DIMM0 is present if rank0 and/or rank1 is enabled */
1290 ranks_of_dimm[0] = d->rken0 + d->rken1;
1291 /* DIMM1 is present if rank2 and/or rank3 is enabled */
1292 ranks_of_dimm[1] = d->rken2 + d->rken3;
1293
1294 for (j = 0; j < DNV_MAX_DIMMS; j++) {
1295 if (!ranks_of_dimm[j])
1296 continue;
1297
1298 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, j, 0);
1299 if (!dimm) {
1300 edac_dbg(0, "No allocated DIMM for channel %d DIMM %d\n", i, j);
1301 continue;
1302 }
1303
1304 capacity = ranks_of_dimm[j] * banks * (1ul << rowbits) * (1ul << colbits);
1305 edac_dbg(0, "Channel %d DIMM %d: %lld MByte DIMM\n", i, j, capacity >> (20 - 3));
1306 dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
1307 dimm->grain = 32;
1308 dimm->dtype = dnv_dtypes[j ? d->dimmdwid0 : d->dimmdwid1];
1309 dimm->mtype = memtype;
1310 dimm->edac_mode = EDAC_SECDED;
1311 snprintf(dimm->label, sizeof(dimm->label), "Chan#%d_DIMM#%d", i, j);
1312 }
1313 }
1314}
1315
1316static int pnd2_register_mci(struct mem_ctl_info **ppmci)
1317{
1318 struct edac_mc_layer layers[2];
1319 struct mem_ctl_info *mci;
1320 struct pnd2_pvt *pvt;
1321 int rc;
1322
1323 rc = ops->check_ecc();
1324 if (rc < 0)
1325 return rc;
1326
1327 /* Allocate a new MC control structure */
1328 layers[0].type = EDAC_MC_LAYER_CHANNEL;
1329 layers[0].size = ops->channels;
1330 layers[0].is_virt_csrow = false;
1331 layers[1].type = EDAC_MC_LAYER_SLOT;
1332 layers[1].size = ops->dimms_per_channel;
1333 layers[1].is_virt_csrow = true;
1334 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
1335 if (!mci)
1336 return -ENOMEM;
1337
1338 pvt = mci->pvt_info;
1339 memset(pvt, 0, sizeof(*pvt));
1340
1341 mci->mod_name = "pnd2_edac.c";
1342 mci->dev_name = ops->name;
1343 mci->ctl_name = "Pondicherry2";
1344
1345 /* Get dimm basic config and the memory layout */
1346 ops->get_dimm_config(mci);
1347
1348 if (edac_mc_add_mc(mci)) {
1349 edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
1350 edac_mc_free(mci);
1351 return -EINVAL;
1352 }
1353
1354 *ppmci = mci;
1355
1356 return 0;
1357}
1358
1359static void pnd2_unregister_mci(struct mem_ctl_info *mci)
1360{
1361 if (unlikely(!mci || !mci->pvt_info)) {
1362 pnd2_printk(KERN_ERR, "Couldn't find mci handler\n");
1363 return;
1364 }
1365
1366 /* Remove MC sysfs nodes */
1367 edac_mc_del_mc(NULL);
1368 edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
1369 edac_mc_free(mci);
1370}
1371
1372/*
1373 * Callback function registered with core kernel mce code.
1374 * Called once for each logged error.
1375 */
1376static int pnd2_mce_check_error(struct notifier_block *nb, unsigned long val, void *data)
1377{
1378 struct mce *mce = (struct mce *)data;
1379 struct mem_ctl_info *mci;
1380 struct dram_addr daddr;
1381 char *type;
1382
bffc7dec 1383 if (edac_get_report_status() == EDAC_REPORTING_DISABLED)
5c71ad17
TL
1384 return NOTIFY_DONE;
1385
1386 mci = pnd2_mci;
1387 if (!mci)
1388 return NOTIFY_DONE;
1389
1390 /*
1391 * Just let mcelog handle it if the error is
1392 * outside the memory controller. A memory error
1393 * is indicated by bit 7 = 1 and bits = 8-11,13-15 = 0.
1394 * bit 12 has an special meaning.
1395 */
1396 if ((mce->status & 0xefff) >> 7 != 1)
1397 return NOTIFY_DONE;
1398
1399 if (mce->mcgstatus & MCG_STATUS_MCIP)
1400 type = "Exception";
1401 else
1402 type = "Event";
1403
1404 pnd2_mc_printk(mci, KERN_INFO, "HANDLING MCE MEMORY ERROR\n");
1405 pnd2_mc_printk(mci, KERN_INFO, "CPU %u: Machine Check %s: %llx Bank %u: %llx\n",
1406 mce->extcpu, type, mce->mcgstatus, mce->bank, mce->status);
1407 pnd2_mc_printk(mci, KERN_INFO, "TSC %llx ", mce->tsc);
1408 pnd2_mc_printk(mci, KERN_INFO, "ADDR %llx ", mce->addr);
1409 pnd2_mc_printk(mci, KERN_INFO, "MISC %llx ", mce->misc);
1410 pnd2_mc_printk(mci, KERN_INFO, "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n",
1411 mce->cpuvendor, mce->cpuid, mce->time, mce->socketid, mce->apicid);
1412
1413 pnd2_mce_output_error(mci, mce, &daddr);
1414
1415 /* Advice mcelog that the error were handled */
1416 return NOTIFY_STOP;
1417}
1418
1419static struct notifier_block pnd2_mce_dec = {
1420 .notifier_call = pnd2_mce_check_error,
1421};
1422
1423#ifdef CONFIG_EDAC_DEBUG
1424/*
1425 * Write an address to this file to exercise the address decode
1426 * logic in this driver.
1427 */
1428static u64 pnd2_fake_addr;
1429#define PND2_BLOB_SIZE 1024
1430static char pnd2_result[PND2_BLOB_SIZE];
1431static struct dentry *pnd2_test;
1432static struct debugfs_blob_wrapper pnd2_blob = {
1433 .data = pnd2_result,
1434 .size = 0
1435};
1436
1437static int debugfs_u64_set(void *data, u64 val)
1438{
1439 struct dram_addr daddr;
1440 struct mce m;
1441
1442 *(u64 *)data = val;
1443 m.mcgstatus = 0;
1444 /* ADDRV + MemRd + Unknown channel */
1445 m.status = MCI_STATUS_ADDRV + 0x9f;
1446 m.addr = val;
1447 pnd2_mce_output_error(pnd2_mci, &m, &daddr);
1448 snprintf(pnd2_blob.data, PND2_BLOB_SIZE,
1449 "SysAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
1450 m.addr, daddr.chan, daddr.dimm, daddr.rank, daddr.bank, daddr.row, daddr.col);
1451 pnd2_blob.size = strlen(pnd2_blob.data);
1452
1453 return 0;
1454}
1455DEFINE_DEBUGFS_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
1456
1457static void setup_pnd2_debug(void)
1458{
1459 pnd2_test = edac_debugfs_create_dir("pnd2_test");
1460 edac_debugfs_create_file("pnd2_debug_addr", 0200, pnd2_test,
1461 &pnd2_fake_addr, &fops_u64_wo);
1462 debugfs_create_blob("pnd2_debug_results", 0400, pnd2_test, &pnd2_blob);
1463}
1464
1465static void teardown_pnd2_debug(void)
1466{
1467 debugfs_remove_recursive(pnd2_test);
1468}
cd1be315
BP
1469#else
1470static void setup_pnd2_debug(void) {}
1471static void teardown_pnd2_debug(void) {}
1472#endif /* CONFIG_EDAC_DEBUG */
1473
5c71ad17
TL
1474
1475static int pnd2_probe(void)
1476{
1477 int rc;
1478
1479 edac_dbg(2, "\n");
1480 rc = get_registers();
1481 if (rc)
1482 return rc;
1483
1484 return pnd2_register_mci(&pnd2_mci);
1485}
1486
1487static void pnd2_remove(void)
1488{
1489 edac_dbg(0, "\n");
1490 pnd2_unregister_mci(pnd2_mci);
1491}
1492
1493static struct dunit_ops apl_ops = {
1494 .name = "pnd2/apl",
1495 .type = APL,
1496 .pmiaddr_shift = LOG2_PMI_ADDR_GRANULARITY,
1497 .pmiidx_shift = 0,
1498 .channels = APL_NUM_CHANNELS,
1499 .dimms_per_channel = 1,
1500 .rd_reg = apl_rd_reg,
1501 .get_registers = apl_get_registers,
1502 .check_ecc = apl_check_ecc_active,
1503 .mk_region = apl_mk_region,
1504 .get_dimm_config = apl_get_dimm_config,
1505 .pmi2mem = apl_pmi2mem,
1506};
1507
1508static struct dunit_ops dnv_ops = {
1509 .name = "pnd2/dnv",
1510 .type = DNV,
1511 .pmiaddr_shift = 0,
1512 .pmiidx_shift = 1,
1513 .channels = DNV_NUM_CHANNELS,
1514 .dimms_per_channel = 2,
1515 .rd_reg = dnv_rd_reg,
1516 .get_registers = dnv_get_registers,
1517 .check_ecc = dnv_check_ecc_active,
1518 .mk_region = dnv_mk_region,
1519 .get_dimm_config = dnv_get_dimm_config,
1520 .pmi2mem = dnv_pmi2mem,
1521};
1522
1523static const struct x86_cpu_id pnd2_cpuids[] = {
1524 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT, 0, (kernel_ulong_t)&apl_ops },
1525 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_DENVERTON, 0, (kernel_ulong_t)&dnv_ops },
1526 { }
1527};
1528MODULE_DEVICE_TABLE(x86cpu, pnd2_cpuids);
1529
1530static int __init pnd2_init(void)
1531{
1532 const struct x86_cpu_id *id;
1533 int rc;
1534
1535 edac_dbg(2, "\n");
1536
1537 id = x86_match_cpu(pnd2_cpuids);
1538 if (!id)
1539 return -ENODEV;
1540
1541 ops = (struct dunit_ops *)id->driver_data;
1542
e10d43fa
TL
1543 if (ops->type == APL) {
1544 p2sb_bus = pci_find_bus(0, 0);
1545 if (!p2sb_bus)
1546 return -ENODEV;
1547 }
1548
5c71ad17
TL
1549 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
1550 opstate_init();
1551
1552 rc = pnd2_probe();
1553 if (rc < 0) {
1554 pnd2_printk(KERN_ERR, "Failed to register device with error %d.\n", rc);
1555 return rc;
1556 }
1557
1558 if (!pnd2_mci)
1559 return -ENODEV;
1560
1561 mce_register_decode_chain(&pnd2_mce_dec);
1562 setup_pnd2_debug();
1563
1564 return 0;
1565}
1566
1567static void __exit pnd2_exit(void)
1568{
1569 edac_dbg(2, "\n");
1570 teardown_pnd2_debug();
1571 mce_unregister_decode_chain(&pnd2_mce_dec);
1572 pnd2_remove();
1573}
1574
1575module_init(pnd2_init);
1576module_exit(pnd2_exit);
1577
1578module_param(edac_op_state, int, 0444);
1579MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
1580
1581MODULE_LICENSE("GPL v2");
1582MODULE_AUTHOR("Tony Luck");
1583MODULE_DESCRIPTION("MC Driver for Intel SoC using Pondicherry memory controller");