]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame_incremental - drivers/edac/i7core_edac.c
i7300_edac: convert driver to use the new edac ABI
[mirror_ubuntu-bionic-kernel.git] / drivers / edac / i7core_edac.c
... / ...
CommitLineData
1/* Intel i7 core/Nehalem Memory Controller kernel module
2 *
3 * This driver supports the memory controllers found on the Intel
4 * processor families i7core, i7core 7xx/8xx, i5core, Xeon 35xx,
5 * Xeon 55xx and Xeon 56xx also known as Nehalem, Nehalem-EP, Lynnfield
6 * and Westmere-EP.
7 *
8 * This file may be distributed under the terms of the
9 * GNU General Public License version 2 only.
10 *
11 * Copyright (c) 2009-2010 by:
12 * Mauro Carvalho Chehab <mchehab@redhat.com>
13 *
14 * Red Hat Inc. http://www.redhat.com
15 *
16 * Forked and adapted from the i5400_edac driver
17 *
18 * Based on the following public Intel datasheets:
19 * Intel Core i7 Processor Extreme Edition and Intel Core i7 Processor
20 * Datasheet, Volume 2:
21 * http://download.intel.com/design/processor/datashts/320835.pdf
22 * Intel Xeon Processor 5500 Series Datasheet Volume 2
23 * http://www.intel.com/Assets/PDF/datasheet/321322.pdf
24 * also available at:
25 * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
26 */
27
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/pci.h>
31#include <linux/pci_ids.h>
32#include <linux/slab.h>
33#include <linux/delay.h>
34#include <linux/dmi.h>
35#include <linux/edac.h>
36#include <linux/mmzone.h>
37#include <linux/smp.h>
38#include <asm/mce.h>
39#include <asm/processor.h>
40#include <asm/div64.h>
41
42#include "edac_core.h"
43
44/* Static vars */
45static LIST_HEAD(i7core_edac_list);
46static DEFINE_MUTEX(i7core_edac_lock);
47static int probed;
48
49static int use_pci_fixup;
50module_param(use_pci_fixup, int, 0444);
51MODULE_PARM_DESC(use_pci_fixup, "Enable PCI fixup to seek for hidden devices");
52/*
53 * This is used for Nehalem-EP and Nehalem-EX devices, where the non-core
54 * registers start at bus 255, and are not reported by BIOS.
55 * We currently find devices with only 2 sockets. In order to support more QPI
56 * Quick Path Interconnect, just increment this number.
57 */
58#define MAX_SOCKET_BUSES 2
59
60
61/*
62 * Alter this version for the module when modifications are made
63 */
64#define I7CORE_REVISION " Ver: 1.0.0"
65#define EDAC_MOD_STR "i7core_edac"
66
67/*
68 * Debug macros
69 */
70#define i7core_printk(level, fmt, arg...) \
71 edac_printk(level, "i7core", fmt, ##arg)
72
73#define i7core_mc_printk(mci, level, fmt, arg...) \
74 edac_mc_chipset_printk(mci, level, "i7core", fmt, ##arg)
75
76/*
77 * i7core Memory Controller Registers
78 */
79
80 /* OFFSETS for Device 0 Function 0 */
81
82#define MC_CFG_CONTROL 0x90
83 #define MC_CFG_UNLOCK 0x02
84 #define MC_CFG_LOCK 0x00
85
86 /* OFFSETS for Device 3 Function 0 */
87
88#define MC_CONTROL 0x48
89#define MC_STATUS 0x4c
90#define MC_MAX_DOD 0x64
91
92/*
93 * OFFSETS for Device 3 Function 4, as inicated on Xeon 5500 datasheet:
94 * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
95 */
96
97#define MC_TEST_ERR_RCV1 0x60
98 #define DIMM2_COR_ERR(r) ((r) & 0x7fff)
99
100#define MC_TEST_ERR_RCV0 0x64
101 #define DIMM1_COR_ERR(r) (((r) >> 16) & 0x7fff)
102 #define DIMM0_COR_ERR(r) ((r) & 0x7fff)
103
104/* OFFSETS for Device 3 Function 2, as inicated on Xeon 5500 datasheet */
105#define MC_SSRCONTROL 0x48
106 #define SSR_MODE_DISABLE 0x00
107 #define SSR_MODE_ENABLE 0x01
108 #define SSR_MODE_MASK 0x03
109
110#define MC_SCRUB_CONTROL 0x4c
111 #define STARTSCRUB (1 << 24)
112 #define SCRUBINTERVAL_MASK 0xffffff
113
114#define MC_COR_ECC_CNT_0 0x80
115#define MC_COR_ECC_CNT_1 0x84
116#define MC_COR_ECC_CNT_2 0x88
117#define MC_COR_ECC_CNT_3 0x8c
118#define MC_COR_ECC_CNT_4 0x90
119#define MC_COR_ECC_CNT_5 0x94
120
121#define DIMM_TOP_COR_ERR(r) (((r) >> 16) & 0x7fff)
122#define DIMM_BOT_COR_ERR(r) ((r) & 0x7fff)
123
124
125 /* OFFSETS for Devices 4,5 and 6 Function 0 */
126
127#define MC_CHANNEL_DIMM_INIT_PARAMS 0x58
128 #define THREE_DIMMS_PRESENT (1 << 24)
129 #define SINGLE_QUAD_RANK_PRESENT (1 << 23)
130 #define QUAD_RANK_PRESENT (1 << 22)
131 #define REGISTERED_DIMM (1 << 15)
132
133#define MC_CHANNEL_MAPPER 0x60
134 #define RDLCH(r, ch) ((((r) >> (3 + (ch * 6))) & 0x07) - 1)
135 #define WRLCH(r, ch) ((((r) >> (ch * 6)) & 0x07) - 1)
136
137#define MC_CHANNEL_RANK_PRESENT 0x7c
138 #define RANK_PRESENT_MASK 0xffff
139
140#define MC_CHANNEL_ADDR_MATCH 0xf0
141#define MC_CHANNEL_ERROR_MASK 0xf8
142#define MC_CHANNEL_ERROR_INJECT 0xfc
143 #define INJECT_ADDR_PARITY 0x10
144 #define INJECT_ECC 0x08
145 #define MASK_CACHELINE 0x06
146 #define MASK_FULL_CACHELINE 0x06
147 #define MASK_MSB32_CACHELINE 0x04
148 #define MASK_LSB32_CACHELINE 0x02
149 #define NO_MASK_CACHELINE 0x00
150 #define REPEAT_EN 0x01
151
152 /* OFFSETS for Devices 4,5 and 6 Function 1 */
153
154#define MC_DOD_CH_DIMM0 0x48
155#define MC_DOD_CH_DIMM1 0x4c
156#define MC_DOD_CH_DIMM2 0x50
157 #define RANKOFFSET_MASK ((1 << 12) | (1 << 11) | (1 << 10))
158 #define RANKOFFSET(x) ((x & RANKOFFSET_MASK) >> 10)
159 #define DIMM_PRESENT_MASK (1 << 9)
160 #define DIMM_PRESENT(x) (((x) & DIMM_PRESENT_MASK) >> 9)
161 #define MC_DOD_NUMBANK_MASK ((1 << 8) | (1 << 7))
162 #define MC_DOD_NUMBANK(x) (((x) & MC_DOD_NUMBANK_MASK) >> 7)
163 #define MC_DOD_NUMRANK_MASK ((1 << 6) | (1 << 5))
164 #define MC_DOD_NUMRANK(x) (((x) & MC_DOD_NUMRANK_MASK) >> 5)
165 #define MC_DOD_NUMROW_MASK ((1 << 4) | (1 << 3) | (1 << 2))
166 #define MC_DOD_NUMROW(x) (((x) & MC_DOD_NUMROW_MASK) >> 2)
167 #define MC_DOD_NUMCOL_MASK 3
168 #define MC_DOD_NUMCOL(x) ((x) & MC_DOD_NUMCOL_MASK)
169
170#define MC_RANK_PRESENT 0x7c
171
172#define MC_SAG_CH_0 0x80
173#define MC_SAG_CH_1 0x84
174#define MC_SAG_CH_2 0x88
175#define MC_SAG_CH_3 0x8c
176#define MC_SAG_CH_4 0x90
177#define MC_SAG_CH_5 0x94
178#define MC_SAG_CH_6 0x98
179#define MC_SAG_CH_7 0x9c
180
181#define MC_RIR_LIMIT_CH_0 0x40
182#define MC_RIR_LIMIT_CH_1 0x44
183#define MC_RIR_LIMIT_CH_2 0x48
184#define MC_RIR_LIMIT_CH_3 0x4C
185#define MC_RIR_LIMIT_CH_4 0x50
186#define MC_RIR_LIMIT_CH_5 0x54
187#define MC_RIR_LIMIT_CH_6 0x58
188#define MC_RIR_LIMIT_CH_7 0x5C
189#define MC_RIR_LIMIT_MASK ((1 << 10) - 1)
190
191#define MC_RIR_WAY_CH 0x80
192 #define MC_RIR_WAY_OFFSET_MASK (((1 << 14) - 1) & ~0x7)
193 #define MC_RIR_WAY_RANK_MASK 0x7
194
195/*
196 * i7core structs
197 */
198
199#define NUM_CHANS 3
200#define MAX_DIMMS 3 /* Max DIMMS per channel */
201#define MAX_MCR_FUNC 4
202#define MAX_CHAN_FUNC 3
203
204struct i7core_info {
205 u32 mc_control;
206 u32 mc_status;
207 u32 max_dod;
208 u32 ch_map;
209};
210
211
212struct i7core_inject {
213 int enable;
214
215 u32 section;
216 u32 type;
217 u32 eccmask;
218
219 /* Error address mask */
220 int channel, dimm, rank, bank, page, col;
221};
222
223struct i7core_channel {
224 u32 ranks;
225 u32 dimms;
226};
227
228struct pci_id_descr {
229 int dev;
230 int func;
231 int dev_id;
232 int optional;
233};
234
235struct pci_id_table {
236 const struct pci_id_descr *descr;
237 int n_devs;
238};
239
240struct i7core_dev {
241 struct list_head list;
242 u8 socket;
243 struct pci_dev **pdev;
244 int n_devs;
245 struct mem_ctl_info *mci;
246};
247
248struct i7core_pvt {
249 struct pci_dev *pci_noncore;
250 struct pci_dev *pci_mcr[MAX_MCR_FUNC + 1];
251 struct pci_dev *pci_ch[NUM_CHANS][MAX_CHAN_FUNC + 1];
252
253 struct i7core_dev *i7core_dev;
254
255 struct i7core_info info;
256 struct i7core_inject inject;
257 struct i7core_channel channel[NUM_CHANS];
258
259 int ce_count_available;
260 int csrow_map[NUM_CHANS][MAX_DIMMS];
261
262 /* ECC corrected errors counts per udimm */
263 unsigned long udimm_ce_count[MAX_DIMMS];
264 int udimm_last_ce_count[MAX_DIMMS];
265 /* ECC corrected errors counts per rdimm */
266 unsigned long rdimm_ce_count[NUM_CHANS][MAX_DIMMS];
267 int rdimm_last_ce_count[NUM_CHANS][MAX_DIMMS];
268
269 bool is_registered, enable_scrub;
270
271 /* Fifo double buffers */
272 struct mce mce_entry[MCE_LOG_LEN];
273 struct mce mce_outentry[MCE_LOG_LEN];
274
275 /* Fifo in/out counters */
276 unsigned mce_in, mce_out;
277
278 /* Count indicator to show errors not got */
279 unsigned mce_overrun;
280
281 /* DCLK Frequency used for computing scrub rate */
282 int dclk_freq;
283
284 /* Struct to control EDAC polling */
285 struct edac_pci_ctl_info *i7core_pci;
286};
287
288#define PCI_DESCR(device, function, device_id) \
289 .dev = (device), \
290 .func = (function), \
291 .dev_id = (device_id)
292
293static const struct pci_id_descr pci_dev_descr_i7core_nehalem[] = {
294 /* Memory controller */
295 { PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_I7_MCR) },
296 { PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_I7_MC_TAD) },
297 /* Exists only for RDIMM */
298 { PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_I7_MC_RAS), .optional = 1 },
299 { PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_I7_MC_TEST) },
300
301 /* Channel 0 */
302 { PCI_DESCR(4, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH0_CTRL) },
303 { PCI_DESCR(4, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH0_ADDR) },
304 { PCI_DESCR(4, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH0_RANK) },
305 { PCI_DESCR(4, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH0_TC) },
306
307 /* Channel 1 */
308 { PCI_DESCR(5, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH1_CTRL) },
309 { PCI_DESCR(5, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH1_ADDR) },
310 { PCI_DESCR(5, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH1_RANK) },
311 { PCI_DESCR(5, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH1_TC) },
312
313 /* Channel 2 */
314 { PCI_DESCR(6, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH2_CTRL) },
315 { PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH2_ADDR) },
316 { PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH2_RANK) },
317 { PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH2_TC) },
318
319 /* Generic Non-core registers */
320 /*
321 * This is the PCI device on i7core and on Xeon 35xx (8086:2c41)
322 * On Xeon 55xx, however, it has a different id (8086:2c40). So,
323 * the probing code needs to test for the other address in case of
324 * failure of this one
325 */
326 { PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_I7_NONCORE) },
327
328};
329
330static const struct pci_id_descr pci_dev_descr_lynnfield[] = {
331 { PCI_DESCR( 3, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR) },
332 { PCI_DESCR( 3, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD) },
333 { PCI_DESCR( 3, 4, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST) },
334
335 { PCI_DESCR( 4, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL) },
336 { PCI_DESCR( 4, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR) },
337 { PCI_DESCR( 4, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK) },
338 { PCI_DESCR( 4, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC) },
339
340 { PCI_DESCR( 5, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL) },
341 { PCI_DESCR( 5, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR) },
342 { PCI_DESCR( 5, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK) },
343 { PCI_DESCR( 5, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC) },
344
345 /*
346 * This is the PCI device has an alternate address on some
347 * processors like Core i7 860
348 */
349 { PCI_DESCR( 0, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE) },
350};
351
352static const struct pci_id_descr pci_dev_descr_i7core_westmere[] = {
353 /* Memory controller */
354 { PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR_REV2) },
355 { PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD_REV2) },
356 /* Exists only for RDIMM */
357 { PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_RAS_REV2), .optional = 1 },
358 { PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST_REV2) },
359
360 /* Channel 0 */
361 { PCI_DESCR(4, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL_REV2) },
362 { PCI_DESCR(4, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR_REV2) },
363 { PCI_DESCR(4, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK_REV2) },
364 { PCI_DESCR(4, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC_REV2) },
365
366 /* Channel 1 */
367 { PCI_DESCR(5, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL_REV2) },
368 { PCI_DESCR(5, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR_REV2) },
369 { PCI_DESCR(5, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK_REV2) },
370 { PCI_DESCR(5, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC_REV2) },
371
372 /* Channel 2 */
373 { PCI_DESCR(6, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_CTRL_REV2) },
374 { PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_ADDR_REV2) },
375 { PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_RANK_REV2) },
376 { PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_TC_REV2) },
377
378 /* Generic Non-core registers */
379 { PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_REV2) },
380
381};
382
383#define PCI_ID_TABLE_ENTRY(A) { .descr=A, .n_devs = ARRAY_SIZE(A) }
384static const struct pci_id_table pci_dev_table[] = {
385 PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_nehalem),
386 PCI_ID_TABLE_ENTRY(pci_dev_descr_lynnfield),
387 PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_westmere),
388 {0,} /* 0 terminated list. */
389};
390
391/*
392 * pci_device_id table for which devices we are looking for
393 */
394static DEFINE_PCI_DEVICE_TABLE(i7core_pci_tbl) = {
395 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
396 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
397 {0,} /* 0 terminated list. */
398};
399
400/****************************************************************************
401 Anciliary status routines
402 ****************************************************************************/
403
404 /* MC_CONTROL bits */
405#define CH_ACTIVE(pvt, ch) ((pvt)->info.mc_control & (1 << (8 + ch)))
406#define ECCx8(pvt) ((pvt)->info.mc_control & (1 << 1))
407
408 /* MC_STATUS bits */
409#define ECC_ENABLED(pvt) ((pvt)->info.mc_status & (1 << 4))
410#define CH_DISABLED(pvt, ch) ((pvt)->info.mc_status & (1 << ch))
411
412 /* MC_MAX_DOD read functions */
413static inline int numdimms(u32 dimms)
414{
415 return (dimms & 0x3) + 1;
416}
417
418static inline int numrank(u32 rank)
419{
420 static int ranks[4] = { 1, 2, 4, -EINVAL };
421
422 return ranks[rank & 0x3];
423}
424
425static inline int numbank(u32 bank)
426{
427 static int banks[4] = { 4, 8, 16, -EINVAL };
428
429 return banks[bank & 0x3];
430}
431
432static inline int numrow(u32 row)
433{
434 static int rows[8] = {
435 1 << 12, 1 << 13, 1 << 14, 1 << 15,
436 1 << 16, -EINVAL, -EINVAL, -EINVAL,
437 };
438
439 return rows[row & 0x7];
440}
441
442static inline int numcol(u32 col)
443{
444 static int cols[8] = {
445 1 << 10, 1 << 11, 1 << 12, -EINVAL,
446 };
447 return cols[col & 0x3];
448}
449
450static struct i7core_dev *get_i7core_dev(u8 socket)
451{
452 struct i7core_dev *i7core_dev;
453
454 list_for_each_entry(i7core_dev, &i7core_edac_list, list) {
455 if (i7core_dev->socket == socket)
456 return i7core_dev;
457 }
458
459 return NULL;
460}
461
462static struct i7core_dev *alloc_i7core_dev(u8 socket,
463 const struct pci_id_table *table)
464{
465 struct i7core_dev *i7core_dev;
466
467 i7core_dev = kzalloc(sizeof(*i7core_dev), GFP_KERNEL);
468 if (!i7core_dev)
469 return NULL;
470
471 i7core_dev->pdev = kzalloc(sizeof(*i7core_dev->pdev) * table->n_devs,
472 GFP_KERNEL);
473 if (!i7core_dev->pdev) {
474 kfree(i7core_dev);
475 return NULL;
476 }
477
478 i7core_dev->socket = socket;
479 i7core_dev->n_devs = table->n_devs;
480 list_add_tail(&i7core_dev->list, &i7core_edac_list);
481
482 return i7core_dev;
483}
484
485static void free_i7core_dev(struct i7core_dev *i7core_dev)
486{
487 list_del(&i7core_dev->list);
488 kfree(i7core_dev->pdev);
489 kfree(i7core_dev);
490}
491
492/****************************************************************************
493 Memory check routines
494 ****************************************************************************/
495static struct pci_dev *get_pdev_slot_func(u8 socket, unsigned slot,
496 unsigned func)
497{
498 struct i7core_dev *i7core_dev = get_i7core_dev(socket);
499 int i;
500
501 if (!i7core_dev)
502 return NULL;
503
504 for (i = 0; i < i7core_dev->n_devs; i++) {
505 if (!i7core_dev->pdev[i])
506 continue;
507
508 if (PCI_SLOT(i7core_dev->pdev[i]->devfn) == slot &&
509 PCI_FUNC(i7core_dev->pdev[i]->devfn) == func) {
510 return i7core_dev->pdev[i];
511 }
512 }
513
514 return NULL;
515}
516
517/**
518 * i7core_get_active_channels() - gets the number of channels and csrows
519 * @socket: Quick Path Interconnect socket
520 * @channels: Number of channels that will be returned
521 * @csrows: Number of csrows found
522 *
523 * Since EDAC core needs to know in advance the number of available channels
524 * and csrows, in order to allocate memory for csrows/channels, it is needed
525 * to run two similar steps. At the first step, implemented on this function,
526 * it checks the number of csrows/channels present at one socket.
527 * this is used in order to properly allocate the size of mci components.
528 *
529 * It should be noticed that none of the current available datasheets explain
530 * or even mention how csrows are seen by the memory controller. So, we need
531 * to add a fake description for csrows.
532 * So, this driver is attributing one DIMM memory for one csrow.
533 */
534static int i7core_get_active_channels(const u8 socket, unsigned *channels,
535 unsigned *csrows)
536{
537 struct pci_dev *pdev = NULL;
538 int i, j;
539 u32 status, control;
540
541 *channels = 0;
542 *csrows = 0;
543
544 pdev = get_pdev_slot_func(socket, 3, 0);
545 if (!pdev) {
546 i7core_printk(KERN_ERR, "Couldn't find socket %d fn 3.0!!!\n",
547 socket);
548 return -ENODEV;
549 }
550
551 /* Device 3 function 0 reads */
552 pci_read_config_dword(pdev, MC_STATUS, &status);
553 pci_read_config_dword(pdev, MC_CONTROL, &control);
554
555 for (i = 0; i < NUM_CHANS; i++) {
556 u32 dimm_dod[3];
557 /* Check if the channel is active */
558 if (!(control & (1 << (8 + i))))
559 continue;
560
561 /* Check if the channel is disabled */
562 if (status & (1 << i))
563 continue;
564
565 pdev = get_pdev_slot_func(socket, i + 4, 1);
566 if (!pdev) {
567 i7core_printk(KERN_ERR, "Couldn't find socket %d "
568 "fn %d.%d!!!\n",
569 socket, i + 4, 1);
570 return -ENODEV;
571 }
572 /* Devices 4-6 function 1 */
573 pci_read_config_dword(pdev,
574 MC_DOD_CH_DIMM0, &dimm_dod[0]);
575 pci_read_config_dword(pdev,
576 MC_DOD_CH_DIMM1, &dimm_dod[1]);
577 pci_read_config_dword(pdev,
578 MC_DOD_CH_DIMM2, &dimm_dod[2]);
579
580 (*channels)++;
581
582 for (j = 0; j < 3; j++) {
583 if (!DIMM_PRESENT(dimm_dod[j]))
584 continue;
585 (*csrows)++;
586 }
587 }
588
589 debugf0("Number of active channels on socket %d: %d\n",
590 socket, *channels);
591
592 return 0;
593}
594
595static int get_dimm_config(struct mem_ctl_info *mci)
596{
597 struct i7core_pvt *pvt = mci->pvt_info;
598 struct csrow_info *csr;
599 struct pci_dev *pdev;
600 int i, j;
601 int csrow = 0;
602 enum edac_type mode;
603 enum mem_type mtype;
604 struct dimm_info *dimm;
605
606 /* Get data from the MC register, function 0 */
607 pdev = pvt->pci_mcr[0];
608 if (!pdev)
609 return -ENODEV;
610
611 /* Device 3 function 0 reads */
612 pci_read_config_dword(pdev, MC_CONTROL, &pvt->info.mc_control);
613 pci_read_config_dword(pdev, MC_STATUS, &pvt->info.mc_status);
614 pci_read_config_dword(pdev, MC_MAX_DOD, &pvt->info.max_dod);
615 pci_read_config_dword(pdev, MC_CHANNEL_MAPPER, &pvt->info.ch_map);
616
617 debugf0("QPI %d control=0x%08x status=0x%08x dod=0x%08x map=0x%08x\n",
618 pvt->i7core_dev->socket, pvt->info.mc_control, pvt->info.mc_status,
619 pvt->info.max_dod, pvt->info.ch_map);
620
621 if (ECC_ENABLED(pvt)) {
622 debugf0("ECC enabled with x%d SDCC\n", ECCx8(pvt) ? 8 : 4);
623 if (ECCx8(pvt))
624 mode = EDAC_S8ECD8ED;
625 else
626 mode = EDAC_S4ECD4ED;
627 } else {
628 debugf0("ECC disabled\n");
629 mode = EDAC_NONE;
630 }
631
632 /* FIXME: need to handle the error codes */
633 debugf0("DOD Max limits: DIMMS: %d, %d-ranked, %d-banked "
634 "x%x x 0x%x\n",
635 numdimms(pvt->info.max_dod),
636 numrank(pvt->info.max_dod >> 2),
637 numbank(pvt->info.max_dod >> 4),
638 numrow(pvt->info.max_dod >> 6),
639 numcol(pvt->info.max_dod >> 9));
640
641 for (i = 0; i < NUM_CHANS; i++) {
642 u32 data, dimm_dod[3], value[8];
643
644 if (!pvt->pci_ch[i][0])
645 continue;
646
647 if (!CH_ACTIVE(pvt, i)) {
648 debugf0("Channel %i is not active\n", i);
649 continue;
650 }
651 if (CH_DISABLED(pvt, i)) {
652 debugf0("Channel %i is disabled\n", i);
653 continue;
654 }
655
656 /* Devices 4-6 function 0 */
657 pci_read_config_dword(pvt->pci_ch[i][0],
658 MC_CHANNEL_DIMM_INIT_PARAMS, &data);
659
660 pvt->channel[i].ranks = (data & QUAD_RANK_PRESENT) ?
661 4 : 2;
662
663 if (data & REGISTERED_DIMM)
664 mtype = MEM_RDDR3;
665 else
666 mtype = MEM_DDR3;
667#if 0
668 if (data & THREE_DIMMS_PRESENT)
669 pvt->channel[i].dimms = 3;
670 else if (data & SINGLE_QUAD_RANK_PRESENT)
671 pvt->channel[i].dimms = 1;
672 else
673 pvt->channel[i].dimms = 2;
674#endif
675
676 /* Devices 4-6 function 1 */
677 pci_read_config_dword(pvt->pci_ch[i][1],
678 MC_DOD_CH_DIMM0, &dimm_dod[0]);
679 pci_read_config_dword(pvt->pci_ch[i][1],
680 MC_DOD_CH_DIMM1, &dimm_dod[1]);
681 pci_read_config_dword(pvt->pci_ch[i][1],
682 MC_DOD_CH_DIMM2, &dimm_dod[2]);
683
684 debugf0("Ch%d phy rd%d, wr%d (0x%08x): "
685 "%d ranks, %cDIMMs\n",
686 i,
687 RDLCH(pvt->info.ch_map, i), WRLCH(pvt->info.ch_map, i),
688 data,
689 pvt->channel[i].ranks,
690 (data & REGISTERED_DIMM) ? 'R' : 'U');
691
692 for (j = 0; j < 3; j++) {
693 u32 banks, ranks, rows, cols;
694 u32 size, npages;
695
696 if (!DIMM_PRESENT(dimm_dod[j]))
697 continue;
698
699 banks = numbank(MC_DOD_NUMBANK(dimm_dod[j]));
700 ranks = numrank(MC_DOD_NUMRANK(dimm_dod[j]));
701 rows = numrow(MC_DOD_NUMROW(dimm_dod[j]));
702 cols = numcol(MC_DOD_NUMCOL(dimm_dod[j]));
703
704 /* DDR3 has 8 I/O banks */
705 size = (rows * cols * banks * ranks) >> (20 - 3);
706
707 pvt->channel[i].dimms++;
708
709 debugf0("\tdimm %d %d Mb offset: %x, "
710 "bank: %d, rank: %d, row: %#x, col: %#x\n",
711 j, size,
712 RANKOFFSET(dimm_dod[j]),
713 banks, ranks, rows, cols);
714
715 npages = MiB_TO_PAGES(size);
716
717 csr = &mci->csrows[csrow];
718
719 pvt->csrow_map[i][j] = csrow;
720
721 dimm = csr->channels[0].dimm;
722 dimm->nr_pages = npages;
723
724 switch (banks) {
725 case 4:
726 dimm->dtype = DEV_X4;
727 break;
728 case 8:
729 dimm->dtype = DEV_X8;
730 break;
731 case 16:
732 dimm->dtype = DEV_X16;
733 break;
734 default:
735 dimm->dtype = DEV_UNKNOWN;
736 }
737
738 snprintf(dimm->label, sizeof(dimm->label),
739 "CPU#%uChannel#%u_DIMM#%u",
740 pvt->i7core_dev->socket, i, j);
741 dimm->grain = 8;
742 dimm->edac_mode = mode;
743 dimm->mtype = mtype;
744 csrow++;
745 }
746
747 pci_read_config_dword(pdev, MC_SAG_CH_0, &value[0]);
748 pci_read_config_dword(pdev, MC_SAG_CH_1, &value[1]);
749 pci_read_config_dword(pdev, MC_SAG_CH_2, &value[2]);
750 pci_read_config_dword(pdev, MC_SAG_CH_3, &value[3]);
751 pci_read_config_dword(pdev, MC_SAG_CH_4, &value[4]);
752 pci_read_config_dword(pdev, MC_SAG_CH_5, &value[5]);
753 pci_read_config_dword(pdev, MC_SAG_CH_6, &value[6]);
754 pci_read_config_dword(pdev, MC_SAG_CH_7, &value[7]);
755 debugf1("\t[%i] DIVBY3\tREMOVED\tOFFSET\n", i);
756 for (j = 0; j < 8; j++)
757 debugf1("\t\t%#x\t%#x\t%#x\n",
758 (value[j] >> 27) & 0x1,
759 (value[j] >> 24) & 0x7,
760 (value[j] & ((1 << 24) - 1)));
761 }
762
763 return 0;
764}
765
766/****************************************************************************
767 Error insertion routines
768 ****************************************************************************/
769
770/* The i7core has independent error injection features per channel.
771 However, to have a simpler code, we don't allow enabling error injection
772 on more than one channel.
773 Also, since a change at an inject parameter will be applied only at enable,
774 we're disabling error injection on all write calls to the sysfs nodes that
775 controls the error code injection.
776 */
777static int disable_inject(const struct mem_ctl_info *mci)
778{
779 struct i7core_pvt *pvt = mci->pvt_info;
780
781 pvt->inject.enable = 0;
782
783 if (!pvt->pci_ch[pvt->inject.channel][0])
784 return -ENODEV;
785
786 pci_write_config_dword(pvt->pci_ch[pvt->inject.channel][0],
787 MC_CHANNEL_ERROR_INJECT, 0);
788
789 return 0;
790}
791
792/*
793 * i7core inject inject.section
794 *
795 * accept and store error injection inject.section value
796 * bit 0 - refers to the lower 32-byte half cacheline
797 * bit 1 - refers to the upper 32-byte half cacheline
798 */
799static ssize_t i7core_inject_section_store(struct mem_ctl_info *mci,
800 const char *data, size_t count)
801{
802 struct i7core_pvt *pvt = mci->pvt_info;
803 unsigned long value;
804 int rc;
805
806 if (pvt->inject.enable)
807 disable_inject(mci);
808
809 rc = strict_strtoul(data, 10, &value);
810 if ((rc < 0) || (value > 3))
811 return -EIO;
812
813 pvt->inject.section = (u32) value;
814 return count;
815}
816
817static ssize_t i7core_inject_section_show(struct mem_ctl_info *mci,
818 char *data)
819{
820 struct i7core_pvt *pvt = mci->pvt_info;
821 return sprintf(data, "0x%08x\n", pvt->inject.section);
822}
823
824/*
825 * i7core inject.type
826 *
827 * accept and store error injection inject.section value
828 * bit 0 - repeat enable - Enable error repetition
829 * bit 1 - inject ECC error
830 * bit 2 - inject parity error
831 */
832static ssize_t i7core_inject_type_store(struct mem_ctl_info *mci,
833 const char *data, size_t count)
834{
835 struct i7core_pvt *pvt = mci->pvt_info;
836 unsigned long value;
837 int rc;
838
839 if (pvt->inject.enable)
840 disable_inject(mci);
841
842 rc = strict_strtoul(data, 10, &value);
843 if ((rc < 0) || (value > 7))
844 return -EIO;
845
846 pvt->inject.type = (u32) value;
847 return count;
848}
849
850static ssize_t i7core_inject_type_show(struct mem_ctl_info *mci,
851 char *data)
852{
853 struct i7core_pvt *pvt = mci->pvt_info;
854 return sprintf(data, "0x%08x\n", pvt->inject.type);
855}
856
857/*
858 * i7core_inject_inject.eccmask_store
859 *
860 * The type of error (UE/CE) will depend on the inject.eccmask value:
861 * Any bits set to a 1 will flip the corresponding ECC bit
862 * Correctable errors can be injected by flipping 1 bit or the bits within
863 * a symbol pair (2 consecutive aligned 8-bit pairs - i.e. 7:0 and 15:8 or
864 * 23:16 and 31:24). Flipping bits in two symbol pairs will cause an
865 * uncorrectable error to be injected.
866 */
867static ssize_t i7core_inject_eccmask_store(struct mem_ctl_info *mci,
868 const char *data, size_t count)
869{
870 struct i7core_pvt *pvt = mci->pvt_info;
871 unsigned long value;
872 int rc;
873
874 if (pvt->inject.enable)
875 disable_inject(mci);
876
877 rc = strict_strtoul(data, 10, &value);
878 if (rc < 0)
879 return -EIO;
880
881 pvt->inject.eccmask = (u32) value;
882 return count;
883}
884
885static ssize_t i7core_inject_eccmask_show(struct mem_ctl_info *mci,
886 char *data)
887{
888 struct i7core_pvt *pvt = mci->pvt_info;
889 return sprintf(data, "0x%08x\n", pvt->inject.eccmask);
890}
891
892/*
893 * i7core_addrmatch
894 *
895 * The type of error (UE/CE) will depend on the inject.eccmask value:
896 * Any bits set to a 1 will flip the corresponding ECC bit
897 * Correctable errors can be injected by flipping 1 bit or the bits within
898 * a symbol pair (2 consecutive aligned 8-bit pairs - i.e. 7:0 and 15:8 or
899 * 23:16 and 31:24). Flipping bits in two symbol pairs will cause an
900 * uncorrectable error to be injected.
901 */
902
903#define DECLARE_ADDR_MATCH(param, limit) \
904static ssize_t i7core_inject_store_##param( \
905 struct mem_ctl_info *mci, \
906 const char *data, size_t count) \
907{ \
908 struct i7core_pvt *pvt; \
909 long value; \
910 int rc; \
911 \
912 debugf1("%s()\n", __func__); \
913 pvt = mci->pvt_info; \
914 \
915 if (pvt->inject.enable) \
916 disable_inject(mci); \
917 \
918 if (!strcasecmp(data, "any") || !strcasecmp(data, "any\n"))\
919 value = -1; \
920 else { \
921 rc = strict_strtoul(data, 10, &value); \
922 if ((rc < 0) || (value >= limit)) \
923 return -EIO; \
924 } \
925 \
926 pvt->inject.param = value; \
927 \
928 return count; \
929} \
930 \
931static ssize_t i7core_inject_show_##param( \
932 struct mem_ctl_info *mci, \
933 char *data) \
934{ \
935 struct i7core_pvt *pvt; \
936 \
937 pvt = mci->pvt_info; \
938 debugf1("%s() pvt=%p\n", __func__, pvt); \
939 if (pvt->inject.param < 0) \
940 return sprintf(data, "any\n"); \
941 else \
942 return sprintf(data, "%d\n", pvt->inject.param);\
943}
944
945#define ATTR_ADDR_MATCH(param) \
946 { \
947 .attr = { \
948 .name = #param, \
949 .mode = (S_IRUGO | S_IWUSR) \
950 }, \
951 .show = i7core_inject_show_##param, \
952 .store = i7core_inject_store_##param, \
953 }
954
955DECLARE_ADDR_MATCH(channel, 3);
956DECLARE_ADDR_MATCH(dimm, 3);
957DECLARE_ADDR_MATCH(rank, 4);
958DECLARE_ADDR_MATCH(bank, 32);
959DECLARE_ADDR_MATCH(page, 0x10000);
960DECLARE_ADDR_MATCH(col, 0x4000);
961
962static int write_and_test(struct pci_dev *dev, const int where, const u32 val)
963{
964 u32 read;
965 int count;
966
967 debugf0("setting pci %02x:%02x.%x reg=%02x value=%08x\n",
968 dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn),
969 where, val);
970
971 for (count = 0; count < 10; count++) {
972 if (count)
973 msleep(100);
974 pci_write_config_dword(dev, where, val);
975 pci_read_config_dword(dev, where, &read);
976
977 if (read == val)
978 return 0;
979 }
980
981 i7core_printk(KERN_ERR, "Error during set pci %02x:%02x.%x reg=%02x "
982 "write=%08x. Read=%08x\n",
983 dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn),
984 where, val, read);
985
986 return -EINVAL;
987}
988
989/*
990 * This routine prepares the Memory Controller for error injection.
991 * The error will be injected when some process tries to write to the
992 * memory that matches the given criteria.
993 * The criteria can be set in terms of a mask where dimm, rank, bank, page
994 * and col can be specified.
995 * A -1 value for any of the mask items will make the MCU to ignore
996 * that matching criteria for error injection.
997 *
998 * It should be noticed that the error will only happen after a write operation
999 * on a memory that matches the condition. if REPEAT_EN is not enabled at
1000 * inject mask, then it will produce just one error. Otherwise, it will repeat
1001 * until the injectmask would be cleaned.
1002 *
1003 * FIXME: This routine assumes that MAXNUMDIMMS value of MC_MAX_DOD
1004 * is reliable enough to check if the MC is using the
1005 * three channels. However, this is not clear at the datasheet.
1006 */
1007static ssize_t i7core_inject_enable_store(struct mem_ctl_info *mci,
1008 const char *data, size_t count)
1009{
1010 struct i7core_pvt *pvt = mci->pvt_info;
1011 u32 injectmask;
1012 u64 mask = 0;
1013 int rc;
1014 long enable;
1015
1016 if (!pvt->pci_ch[pvt->inject.channel][0])
1017 return 0;
1018
1019 rc = strict_strtoul(data, 10, &enable);
1020 if ((rc < 0))
1021 return 0;
1022
1023 if (enable) {
1024 pvt->inject.enable = 1;
1025 } else {
1026 disable_inject(mci);
1027 return count;
1028 }
1029
1030 /* Sets pvt->inject.dimm mask */
1031 if (pvt->inject.dimm < 0)
1032 mask |= 1LL << 41;
1033 else {
1034 if (pvt->channel[pvt->inject.channel].dimms > 2)
1035 mask |= (pvt->inject.dimm & 0x3LL) << 35;
1036 else
1037 mask |= (pvt->inject.dimm & 0x1LL) << 36;
1038 }
1039
1040 /* Sets pvt->inject.rank mask */
1041 if (pvt->inject.rank < 0)
1042 mask |= 1LL << 40;
1043 else {
1044 if (pvt->channel[pvt->inject.channel].dimms > 2)
1045 mask |= (pvt->inject.rank & 0x1LL) << 34;
1046 else
1047 mask |= (pvt->inject.rank & 0x3LL) << 34;
1048 }
1049
1050 /* Sets pvt->inject.bank mask */
1051 if (pvt->inject.bank < 0)
1052 mask |= 1LL << 39;
1053 else
1054 mask |= (pvt->inject.bank & 0x15LL) << 30;
1055
1056 /* Sets pvt->inject.page mask */
1057 if (pvt->inject.page < 0)
1058 mask |= 1LL << 38;
1059 else
1060 mask |= (pvt->inject.page & 0xffff) << 14;
1061
1062 /* Sets pvt->inject.column mask */
1063 if (pvt->inject.col < 0)
1064 mask |= 1LL << 37;
1065 else
1066 mask |= (pvt->inject.col & 0x3fff);
1067
1068 /*
1069 * bit 0: REPEAT_EN
1070 * bits 1-2: MASK_HALF_CACHELINE
1071 * bit 3: INJECT_ECC
1072 * bit 4: INJECT_ADDR_PARITY
1073 */
1074
1075 injectmask = (pvt->inject.type & 1) |
1076 (pvt->inject.section & 0x3) << 1 |
1077 (pvt->inject.type & 0x6) << (3 - 1);
1078
1079 /* Unlock writes to registers - this register is write only */
1080 pci_write_config_dword(pvt->pci_noncore,
1081 MC_CFG_CONTROL, 0x2);
1082
1083 write_and_test(pvt->pci_ch[pvt->inject.channel][0],
1084 MC_CHANNEL_ADDR_MATCH, mask);
1085 write_and_test(pvt->pci_ch[pvt->inject.channel][0],
1086 MC_CHANNEL_ADDR_MATCH + 4, mask >> 32L);
1087
1088 write_and_test(pvt->pci_ch[pvt->inject.channel][0],
1089 MC_CHANNEL_ERROR_MASK, pvt->inject.eccmask);
1090
1091 write_and_test(pvt->pci_ch[pvt->inject.channel][0],
1092 MC_CHANNEL_ERROR_INJECT, injectmask);
1093
1094 /*
1095 * This is something undocumented, based on my tests
1096 * Without writing 8 to this register, errors aren't injected. Not sure
1097 * why.
1098 */
1099 pci_write_config_dword(pvt->pci_noncore,
1100 MC_CFG_CONTROL, 8);
1101
1102 debugf0("Error inject addr match 0x%016llx, ecc 0x%08x,"
1103 " inject 0x%08x\n",
1104 mask, pvt->inject.eccmask, injectmask);
1105
1106
1107 return count;
1108}
1109
1110static ssize_t i7core_inject_enable_show(struct mem_ctl_info *mci,
1111 char *data)
1112{
1113 struct i7core_pvt *pvt = mci->pvt_info;
1114 u32 injectmask;
1115
1116 if (!pvt->pci_ch[pvt->inject.channel][0])
1117 return 0;
1118
1119 pci_read_config_dword(pvt->pci_ch[pvt->inject.channel][0],
1120 MC_CHANNEL_ERROR_INJECT, &injectmask);
1121
1122 debugf0("Inject error read: 0x%018x\n", injectmask);
1123
1124 if (injectmask & 0x0c)
1125 pvt->inject.enable = 1;
1126
1127 return sprintf(data, "%d\n", pvt->inject.enable);
1128}
1129
1130#define DECLARE_COUNTER(param) \
1131static ssize_t i7core_show_counter_##param( \
1132 struct mem_ctl_info *mci, \
1133 char *data) \
1134{ \
1135 struct i7core_pvt *pvt = mci->pvt_info; \
1136 \
1137 debugf1("%s() \n", __func__); \
1138 if (!pvt->ce_count_available || (pvt->is_registered)) \
1139 return sprintf(data, "data unavailable\n"); \
1140 return sprintf(data, "%lu\n", \
1141 pvt->udimm_ce_count[param]); \
1142}
1143
1144#define ATTR_COUNTER(param) \
1145 { \
1146 .attr = { \
1147 .name = __stringify(udimm##param), \
1148 .mode = (S_IRUGO | S_IWUSR) \
1149 }, \
1150 .show = i7core_show_counter_##param \
1151 }
1152
1153DECLARE_COUNTER(0);
1154DECLARE_COUNTER(1);
1155DECLARE_COUNTER(2);
1156
1157/*
1158 * Sysfs struct
1159 */
1160
1161static const struct mcidev_sysfs_attribute i7core_addrmatch_attrs[] = {
1162 ATTR_ADDR_MATCH(channel),
1163 ATTR_ADDR_MATCH(dimm),
1164 ATTR_ADDR_MATCH(rank),
1165 ATTR_ADDR_MATCH(bank),
1166 ATTR_ADDR_MATCH(page),
1167 ATTR_ADDR_MATCH(col),
1168 { } /* End of list */
1169};
1170
1171static const struct mcidev_sysfs_group i7core_inject_addrmatch = {
1172 .name = "inject_addrmatch",
1173 .mcidev_attr = i7core_addrmatch_attrs,
1174};
1175
1176static const struct mcidev_sysfs_attribute i7core_udimm_counters_attrs[] = {
1177 ATTR_COUNTER(0),
1178 ATTR_COUNTER(1),
1179 ATTR_COUNTER(2),
1180 { .attr = { .name = NULL } }
1181};
1182
1183static const struct mcidev_sysfs_group i7core_udimm_counters = {
1184 .name = "all_channel_counts",
1185 .mcidev_attr = i7core_udimm_counters_attrs,
1186};
1187
1188static const struct mcidev_sysfs_attribute i7core_sysfs_rdimm_attrs[] = {
1189 {
1190 .attr = {
1191 .name = "inject_section",
1192 .mode = (S_IRUGO | S_IWUSR)
1193 },
1194 .show = i7core_inject_section_show,
1195 .store = i7core_inject_section_store,
1196 }, {
1197 .attr = {
1198 .name = "inject_type",
1199 .mode = (S_IRUGO | S_IWUSR)
1200 },
1201 .show = i7core_inject_type_show,
1202 .store = i7core_inject_type_store,
1203 }, {
1204 .attr = {
1205 .name = "inject_eccmask",
1206 .mode = (S_IRUGO | S_IWUSR)
1207 },
1208 .show = i7core_inject_eccmask_show,
1209 .store = i7core_inject_eccmask_store,
1210 }, {
1211 .grp = &i7core_inject_addrmatch,
1212 }, {
1213 .attr = {
1214 .name = "inject_enable",
1215 .mode = (S_IRUGO | S_IWUSR)
1216 },
1217 .show = i7core_inject_enable_show,
1218 .store = i7core_inject_enable_store,
1219 },
1220 { } /* End of list */
1221};
1222
1223static const struct mcidev_sysfs_attribute i7core_sysfs_udimm_attrs[] = {
1224 {
1225 .attr = {
1226 .name = "inject_section",
1227 .mode = (S_IRUGO | S_IWUSR)
1228 },
1229 .show = i7core_inject_section_show,
1230 .store = i7core_inject_section_store,
1231 }, {
1232 .attr = {
1233 .name = "inject_type",
1234 .mode = (S_IRUGO | S_IWUSR)
1235 },
1236 .show = i7core_inject_type_show,
1237 .store = i7core_inject_type_store,
1238 }, {
1239 .attr = {
1240 .name = "inject_eccmask",
1241 .mode = (S_IRUGO | S_IWUSR)
1242 },
1243 .show = i7core_inject_eccmask_show,
1244 .store = i7core_inject_eccmask_store,
1245 }, {
1246 .grp = &i7core_inject_addrmatch,
1247 }, {
1248 .attr = {
1249 .name = "inject_enable",
1250 .mode = (S_IRUGO | S_IWUSR)
1251 },
1252 .show = i7core_inject_enable_show,
1253 .store = i7core_inject_enable_store,
1254 }, {
1255 .grp = &i7core_udimm_counters,
1256 },
1257 { } /* End of list */
1258};
1259
1260/****************************************************************************
1261 Device initialization routines: put/get, init/exit
1262 ****************************************************************************/
1263
1264/*
1265 * i7core_put_all_devices 'put' all the devices that we have
1266 * reserved via 'get'
1267 */
1268static void i7core_put_devices(struct i7core_dev *i7core_dev)
1269{
1270 int i;
1271
1272 debugf0(__FILE__ ": %s()\n", __func__);
1273 for (i = 0; i < i7core_dev->n_devs; i++) {
1274 struct pci_dev *pdev = i7core_dev->pdev[i];
1275 if (!pdev)
1276 continue;
1277 debugf0("Removing dev %02x:%02x.%d\n",
1278 pdev->bus->number,
1279 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1280 pci_dev_put(pdev);
1281 }
1282}
1283
1284static void i7core_put_all_devices(void)
1285{
1286 struct i7core_dev *i7core_dev, *tmp;
1287
1288 list_for_each_entry_safe(i7core_dev, tmp, &i7core_edac_list, list) {
1289 i7core_put_devices(i7core_dev);
1290 free_i7core_dev(i7core_dev);
1291 }
1292}
1293
1294static void __init i7core_xeon_pci_fixup(const struct pci_id_table *table)
1295{
1296 struct pci_dev *pdev = NULL;
1297 int i;
1298
1299 /*
1300 * On Xeon 55xx, the Intel Quick Path Arch Generic Non-core pci buses
1301 * aren't announced by acpi. So, we need to use a legacy scan probing
1302 * to detect them
1303 */
1304 while (table && table->descr) {
1305 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, table->descr[0].dev_id, NULL);
1306 if (unlikely(!pdev)) {
1307 for (i = 0; i < MAX_SOCKET_BUSES; i++)
1308 pcibios_scan_specific_bus(255-i);
1309 }
1310 pci_dev_put(pdev);
1311 table++;
1312 }
1313}
1314
1315static unsigned i7core_pci_lastbus(void)
1316{
1317 int last_bus = 0, bus;
1318 struct pci_bus *b = NULL;
1319
1320 while ((b = pci_find_next_bus(b)) != NULL) {
1321 bus = b->number;
1322 debugf0("Found bus %d\n", bus);
1323 if (bus > last_bus)
1324 last_bus = bus;
1325 }
1326
1327 debugf0("Last bus %d\n", last_bus);
1328
1329 return last_bus;
1330}
1331
1332/*
1333 * i7core_get_all_devices Find and perform 'get' operation on the MCH's
1334 * device/functions we want to reference for this driver
1335 *
1336 * Need to 'get' device 16 func 1 and func 2
1337 */
1338static int i7core_get_onedevice(struct pci_dev **prev,
1339 const struct pci_id_table *table,
1340 const unsigned devno,
1341 const unsigned last_bus)
1342{
1343 struct i7core_dev *i7core_dev;
1344 const struct pci_id_descr *dev_descr = &table->descr[devno];
1345
1346 struct pci_dev *pdev = NULL;
1347 u8 bus = 0;
1348 u8 socket = 0;
1349
1350 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
1351 dev_descr->dev_id, *prev);
1352
1353 /*
1354 * On Xeon 55xx, the Intel Quckpath Arch Generic Non-core regs
1355 * is at addr 8086:2c40, instead of 8086:2c41. So, we need
1356 * to probe for the alternate address in case of failure
1357 */
1358 if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_I7_NONCORE && !pdev)
1359 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
1360 PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT, *prev);
1361
1362 if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE && !pdev)
1363 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
1364 PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT,
1365 *prev);
1366
1367 if (!pdev) {
1368 if (*prev) {
1369 *prev = pdev;
1370 return 0;
1371 }
1372
1373 if (dev_descr->optional)
1374 return 0;
1375
1376 if (devno == 0)
1377 return -ENODEV;
1378
1379 i7core_printk(KERN_INFO,
1380 "Device not found: dev %02x.%d PCI ID %04x:%04x\n",
1381 dev_descr->dev, dev_descr->func,
1382 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1383
1384 /* End of list, leave */
1385 return -ENODEV;
1386 }
1387 bus = pdev->bus->number;
1388
1389 socket = last_bus - bus;
1390
1391 i7core_dev = get_i7core_dev(socket);
1392 if (!i7core_dev) {
1393 i7core_dev = alloc_i7core_dev(socket, table);
1394 if (!i7core_dev) {
1395 pci_dev_put(pdev);
1396 return -ENOMEM;
1397 }
1398 }
1399
1400 if (i7core_dev->pdev[devno]) {
1401 i7core_printk(KERN_ERR,
1402 "Duplicated device for "
1403 "dev %02x:%02x.%d PCI ID %04x:%04x\n",
1404 bus, dev_descr->dev, dev_descr->func,
1405 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1406 pci_dev_put(pdev);
1407 return -ENODEV;
1408 }
1409
1410 i7core_dev->pdev[devno] = pdev;
1411
1412 /* Sanity check */
1413 if (unlikely(PCI_SLOT(pdev->devfn) != dev_descr->dev ||
1414 PCI_FUNC(pdev->devfn) != dev_descr->func)) {
1415 i7core_printk(KERN_ERR,
1416 "Device PCI ID %04x:%04x "
1417 "has dev %02x:%02x.%d instead of dev %02x:%02x.%d\n",
1418 PCI_VENDOR_ID_INTEL, dev_descr->dev_id,
1419 bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
1420 bus, dev_descr->dev, dev_descr->func);
1421 return -ENODEV;
1422 }
1423
1424 /* Be sure that the device is enabled */
1425 if (unlikely(pci_enable_device(pdev) < 0)) {
1426 i7core_printk(KERN_ERR,
1427 "Couldn't enable "
1428 "dev %02x:%02x.%d PCI ID %04x:%04x\n",
1429 bus, dev_descr->dev, dev_descr->func,
1430 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1431 return -ENODEV;
1432 }
1433
1434 debugf0("Detected socket %d dev %02x:%02x.%d PCI ID %04x:%04x\n",
1435 socket, bus, dev_descr->dev,
1436 dev_descr->func,
1437 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1438
1439 /*
1440 * As stated on drivers/pci/search.c, the reference count for
1441 * @from is always decremented if it is not %NULL. So, as we need
1442 * to get all devices up to null, we need to do a get for the device
1443 */
1444 pci_dev_get(pdev);
1445
1446 *prev = pdev;
1447
1448 return 0;
1449}
1450
1451static int i7core_get_all_devices(void)
1452{
1453 int i, rc, last_bus;
1454 struct pci_dev *pdev = NULL;
1455 const struct pci_id_table *table = pci_dev_table;
1456
1457 last_bus = i7core_pci_lastbus();
1458
1459 while (table && table->descr) {
1460 for (i = 0; i < table->n_devs; i++) {
1461 pdev = NULL;
1462 do {
1463 rc = i7core_get_onedevice(&pdev, table, i,
1464 last_bus);
1465 if (rc < 0) {
1466 if (i == 0) {
1467 i = table->n_devs;
1468 break;
1469 }
1470 i7core_put_all_devices();
1471 return -ENODEV;
1472 }
1473 } while (pdev);
1474 }
1475 table++;
1476 }
1477
1478 return 0;
1479}
1480
1481static int mci_bind_devs(struct mem_ctl_info *mci,
1482 struct i7core_dev *i7core_dev)
1483{
1484 struct i7core_pvt *pvt = mci->pvt_info;
1485 struct pci_dev *pdev;
1486 int i, func, slot;
1487 char *family;
1488
1489 pvt->is_registered = false;
1490 pvt->enable_scrub = false;
1491 for (i = 0; i < i7core_dev->n_devs; i++) {
1492 pdev = i7core_dev->pdev[i];
1493 if (!pdev)
1494 continue;
1495
1496 func = PCI_FUNC(pdev->devfn);
1497 slot = PCI_SLOT(pdev->devfn);
1498 if (slot == 3) {
1499 if (unlikely(func > MAX_MCR_FUNC))
1500 goto error;
1501 pvt->pci_mcr[func] = pdev;
1502 } else if (likely(slot >= 4 && slot < 4 + NUM_CHANS)) {
1503 if (unlikely(func > MAX_CHAN_FUNC))
1504 goto error;
1505 pvt->pci_ch[slot - 4][func] = pdev;
1506 } else if (!slot && !func) {
1507 pvt->pci_noncore = pdev;
1508
1509 /* Detect the processor family */
1510 switch (pdev->device) {
1511 case PCI_DEVICE_ID_INTEL_I7_NONCORE:
1512 family = "Xeon 35xx/ i7core";
1513 pvt->enable_scrub = false;
1514 break;
1515 case PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT:
1516 family = "i7-800/i5-700";
1517 pvt->enable_scrub = false;
1518 break;
1519 case PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE:
1520 family = "Xeon 34xx";
1521 pvt->enable_scrub = false;
1522 break;
1523 case PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT:
1524 family = "Xeon 55xx";
1525 pvt->enable_scrub = true;
1526 break;
1527 case PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_REV2:
1528 family = "Xeon 56xx / i7-900";
1529 pvt->enable_scrub = true;
1530 break;
1531 default:
1532 family = "unknown";
1533 pvt->enable_scrub = false;
1534 }
1535 debugf0("Detected a processor type %s\n", family);
1536 } else
1537 goto error;
1538
1539 debugf0("Associated fn %d.%d, dev = %p, socket %d\n",
1540 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
1541 pdev, i7core_dev->socket);
1542
1543 if (PCI_SLOT(pdev->devfn) == 3 &&
1544 PCI_FUNC(pdev->devfn) == 2)
1545 pvt->is_registered = true;
1546 }
1547
1548 return 0;
1549
1550error:
1551 i7core_printk(KERN_ERR, "Device %d, function %d "
1552 "is out of the expected range\n",
1553 slot, func);
1554 return -EINVAL;
1555}
1556
1557/****************************************************************************
1558 Error check routines
1559 ****************************************************************************/
1560static void i7core_rdimm_update_csrow(struct mem_ctl_info *mci,
1561 const int chan,
1562 const int dimm,
1563 const int add)
1564{
1565 char *msg;
1566 struct i7core_pvt *pvt = mci->pvt_info;
1567 int row = pvt->csrow_map[chan][dimm], i;
1568
1569 for (i = 0; i < add; i++) {
1570 msg = kasprintf(GFP_KERNEL, "Corrected error "
1571 "(Socket=%d channel=%d dimm=%d)",
1572 pvt->i7core_dev->socket, chan, dimm);
1573
1574 edac_mc_handle_fbd_ce(mci, row, 0, msg);
1575 kfree (msg);
1576 }
1577}
1578
1579static void i7core_rdimm_update_ce_count(struct mem_ctl_info *mci,
1580 const int chan,
1581 const int new0,
1582 const int new1,
1583 const int new2)
1584{
1585 struct i7core_pvt *pvt = mci->pvt_info;
1586 int add0 = 0, add1 = 0, add2 = 0;
1587 /* Updates CE counters if it is not the first time here */
1588 if (pvt->ce_count_available) {
1589 /* Updates CE counters */
1590
1591 add2 = new2 - pvt->rdimm_last_ce_count[chan][2];
1592 add1 = new1 - pvt->rdimm_last_ce_count[chan][1];
1593 add0 = new0 - pvt->rdimm_last_ce_count[chan][0];
1594
1595 if (add2 < 0)
1596 add2 += 0x7fff;
1597 pvt->rdimm_ce_count[chan][2] += add2;
1598
1599 if (add1 < 0)
1600 add1 += 0x7fff;
1601 pvt->rdimm_ce_count[chan][1] += add1;
1602
1603 if (add0 < 0)
1604 add0 += 0x7fff;
1605 pvt->rdimm_ce_count[chan][0] += add0;
1606 } else
1607 pvt->ce_count_available = 1;
1608
1609 /* Store the new values */
1610 pvt->rdimm_last_ce_count[chan][2] = new2;
1611 pvt->rdimm_last_ce_count[chan][1] = new1;
1612 pvt->rdimm_last_ce_count[chan][0] = new0;
1613
1614 /*updated the edac core */
1615 if (add0 != 0)
1616 i7core_rdimm_update_csrow(mci, chan, 0, add0);
1617 if (add1 != 0)
1618 i7core_rdimm_update_csrow(mci, chan, 1, add1);
1619 if (add2 != 0)
1620 i7core_rdimm_update_csrow(mci, chan, 2, add2);
1621
1622}
1623
1624static void i7core_rdimm_check_mc_ecc_err(struct mem_ctl_info *mci)
1625{
1626 struct i7core_pvt *pvt = mci->pvt_info;
1627 u32 rcv[3][2];
1628 int i, new0, new1, new2;
1629
1630 /*Read DEV 3: FUN 2: MC_COR_ECC_CNT regs directly*/
1631 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_0,
1632 &rcv[0][0]);
1633 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_1,
1634 &rcv[0][1]);
1635 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_2,
1636 &rcv[1][0]);
1637 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_3,
1638 &rcv[1][1]);
1639 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_4,
1640 &rcv[2][0]);
1641 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_5,
1642 &rcv[2][1]);
1643 for (i = 0 ; i < 3; i++) {
1644 debugf3("MC_COR_ECC_CNT%d = 0x%x; MC_COR_ECC_CNT%d = 0x%x\n",
1645 (i * 2), rcv[i][0], (i * 2) + 1, rcv[i][1]);
1646 /*if the channel has 3 dimms*/
1647 if (pvt->channel[i].dimms > 2) {
1648 new0 = DIMM_BOT_COR_ERR(rcv[i][0]);
1649 new1 = DIMM_TOP_COR_ERR(rcv[i][0]);
1650 new2 = DIMM_BOT_COR_ERR(rcv[i][1]);
1651 } else {
1652 new0 = DIMM_TOP_COR_ERR(rcv[i][0]) +
1653 DIMM_BOT_COR_ERR(rcv[i][0]);
1654 new1 = DIMM_TOP_COR_ERR(rcv[i][1]) +
1655 DIMM_BOT_COR_ERR(rcv[i][1]);
1656 new2 = 0;
1657 }
1658
1659 i7core_rdimm_update_ce_count(mci, i, new0, new1, new2);
1660 }
1661}
1662
1663/* This function is based on the device 3 function 4 registers as described on:
1664 * Intel Xeon Processor 5500 Series Datasheet Volume 2
1665 * http://www.intel.com/Assets/PDF/datasheet/321322.pdf
1666 * also available at:
1667 * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
1668 */
1669static void i7core_udimm_check_mc_ecc_err(struct mem_ctl_info *mci)
1670{
1671 struct i7core_pvt *pvt = mci->pvt_info;
1672 u32 rcv1, rcv0;
1673 int new0, new1, new2;
1674
1675 if (!pvt->pci_mcr[4]) {
1676 debugf0("%s MCR registers not found\n", __func__);
1677 return;
1678 }
1679
1680 /* Corrected test errors */
1681 pci_read_config_dword(pvt->pci_mcr[4], MC_TEST_ERR_RCV1, &rcv1);
1682 pci_read_config_dword(pvt->pci_mcr[4], MC_TEST_ERR_RCV0, &rcv0);
1683
1684 /* Store the new values */
1685 new2 = DIMM2_COR_ERR(rcv1);
1686 new1 = DIMM1_COR_ERR(rcv0);
1687 new0 = DIMM0_COR_ERR(rcv0);
1688
1689 /* Updates CE counters if it is not the first time here */
1690 if (pvt->ce_count_available) {
1691 /* Updates CE counters */
1692 int add0, add1, add2;
1693
1694 add2 = new2 - pvt->udimm_last_ce_count[2];
1695 add1 = new1 - pvt->udimm_last_ce_count[1];
1696 add0 = new0 - pvt->udimm_last_ce_count[0];
1697
1698 if (add2 < 0)
1699 add2 += 0x7fff;
1700 pvt->udimm_ce_count[2] += add2;
1701
1702 if (add1 < 0)
1703 add1 += 0x7fff;
1704 pvt->udimm_ce_count[1] += add1;
1705
1706 if (add0 < 0)
1707 add0 += 0x7fff;
1708 pvt->udimm_ce_count[0] += add0;
1709
1710 if (add0 | add1 | add2)
1711 i7core_printk(KERN_ERR, "New Corrected error(s): "
1712 "dimm0: +%d, dimm1: +%d, dimm2 +%d\n",
1713 add0, add1, add2);
1714 } else
1715 pvt->ce_count_available = 1;
1716
1717 /* Store the new values */
1718 pvt->udimm_last_ce_count[2] = new2;
1719 pvt->udimm_last_ce_count[1] = new1;
1720 pvt->udimm_last_ce_count[0] = new0;
1721}
1722
1723/*
1724 * According with tables E-11 and E-12 of chapter E.3.3 of Intel 64 and IA-32
1725 * Architectures Software Developer’s Manual Volume 3B.
1726 * Nehalem are defined as family 0x06, model 0x1a
1727 *
1728 * The MCA registers used here are the following ones:
1729 * struct mce field MCA Register
1730 * m->status MSR_IA32_MC8_STATUS
1731 * m->addr MSR_IA32_MC8_ADDR
1732 * m->misc MSR_IA32_MC8_MISC
1733 * In the case of Nehalem, the error information is masked at .status and .misc
1734 * fields
1735 */
1736static void i7core_mce_output_error(struct mem_ctl_info *mci,
1737 const struct mce *m)
1738{
1739 struct i7core_pvt *pvt = mci->pvt_info;
1740 char *type, *optype, *err, *msg;
1741 unsigned long error = m->status & 0x1ff0000l;
1742 u32 optypenum = (m->status >> 4) & 0x07;
1743 u32 core_err_cnt = (m->status >> 38) & 0x7fff;
1744 u32 dimm = (m->misc >> 16) & 0x3;
1745 u32 channel = (m->misc >> 18) & 0x3;
1746 u32 syndrome = m->misc >> 32;
1747 u32 errnum = find_first_bit(&error, 32);
1748 int csrow;
1749
1750 if (m->mcgstatus & 1)
1751 type = "FATAL";
1752 else
1753 type = "NON_FATAL";
1754
1755 switch (optypenum) {
1756 case 0:
1757 optype = "generic undef request";
1758 break;
1759 case 1:
1760 optype = "read error";
1761 break;
1762 case 2:
1763 optype = "write error";
1764 break;
1765 case 3:
1766 optype = "addr/cmd error";
1767 break;
1768 case 4:
1769 optype = "scrubbing error";
1770 break;
1771 default:
1772 optype = "reserved";
1773 break;
1774 }
1775
1776 switch (errnum) {
1777 case 16:
1778 err = "read ECC error";
1779 break;
1780 case 17:
1781 err = "RAS ECC error";
1782 break;
1783 case 18:
1784 err = "write parity error";
1785 break;
1786 case 19:
1787 err = "redundacy loss";
1788 break;
1789 case 20:
1790 err = "reserved";
1791 break;
1792 case 21:
1793 err = "memory range error";
1794 break;
1795 case 22:
1796 err = "RTID out of range";
1797 break;
1798 case 23:
1799 err = "address parity error";
1800 break;
1801 case 24:
1802 err = "byte enable parity error";
1803 break;
1804 default:
1805 err = "unknown";
1806 }
1807
1808 /* FIXME: should convert addr into bank and rank information */
1809 msg = kasprintf(GFP_ATOMIC,
1810 "%s (addr = 0x%08llx, cpu=%d, Dimm=%d, Channel=%d, "
1811 "syndrome=0x%08x, count=%d, Err=%08llx:%08llx (%s: %s))\n",
1812 type, (long long) m->addr, m->cpu, dimm, channel,
1813 syndrome, core_err_cnt, (long long)m->status,
1814 (long long)m->misc, optype, err);
1815
1816 debugf0("%s", msg);
1817
1818 csrow = pvt->csrow_map[channel][dimm];
1819
1820 /* Call the helper to output message */
1821 if (m->mcgstatus & 1)
1822 edac_mc_handle_fbd_ue(mci, csrow, 0,
1823 0 /* FIXME: should be channel here */, msg);
1824 else if (!pvt->is_registered)
1825 edac_mc_handle_fbd_ce(mci, csrow,
1826 0 /* FIXME: should be channel here */, msg);
1827
1828 kfree(msg);
1829}
1830
1831/*
1832 * i7core_check_error Retrieve and process errors reported by the
1833 * hardware. Called by the Core module.
1834 */
1835static void i7core_check_error(struct mem_ctl_info *mci)
1836{
1837 struct i7core_pvt *pvt = mci->pvt_info;
1838 int i;
1839 unsigned count = 0;
1840 struct mce *m;
1841
1842 /*
1843 * MCE first step: Copy all mce errors into a temporary buffer
1844 * We use a double buffering here, to reduce the risk of
1845 * losing an error.
1846 */
1847 smp_rmb();
1848 count = (pvt->mce_out + MCE_LOG_LEN - pvt->mce_in)
1849 % MCE_LOG_LEN;
1850 if (!count)
1851 goto check_ce_error;
1852
1853 m = pvt->mce_outentry;
1854 if (pvt->mce_in + count > MCE_LOG_LEN) {
1855 unsigned l = MCE_LOG_LEN - pvt->mce_in;
1856
1857 memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * l);
1858 smp_wmb();
1859 pvt->mce_in = 0;
1860 count -= l;
1861 m += l;
1862 }
1863 memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * count);
1864 smp_wmb();
1865 pvt->mce_in += count;
1866
1867 smp_rmb();
1868 if (pvt->mce_overrun) {
1869 i7core_printk(KERN_ERR, "Lost %d memory errors\n",
1870 pvt->mce_overrun);
1871 smp_wmb();
1872 pvt->mce_overrun = 0;
1873 }
1874
1875 /*
1876 * MCE second step: parse errors and display
1877 */
1878 for (i = 0; i < count; i++)
1879 i7core_mce_output_error(mci, &pvt->mce_outentry[i]);
1880
1881 /*
1882 * Now, let's increment CE error counts
1883 */
1884check_ce_error:
1885 if (!pvt->is_registered)
1886 i7core_udimm_check_mc_ecc_err(mci);
1887 else
1888 i7core_rdimm_check_mc_ecc_err(mci);
1889}
1890
1891/*
1892 * i7core_mce_check_error Replicates mcelog routine to get errors
1893 * This routine simply queues mcelog errors, and
1894 * return. The error itself should be handled later
1895 * by i7core_check_error.
1896 * WARNING: As this routine should be called at NMI time, extra care should
1897 * be taken to avoid deadlocks, and to be as fast as possible.
1898 */
1899static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val,
1900 void *data)
1901{
1902 struct mce *mce = (struct mce *)data;
1903 struct i7core_dev *i7_dev;
1904 struct mem_ctl_info *mci;
1905 struct i7core_pvt *pvt;
1906
1907 i7_dev = get_i7core_dev(mce->socketid);
1908 if (!i7_dev)
1909 return NOTIFY_BAD;
1910
1911 mci = i7_dev->mci;
1912 pvt = mci->pvt_info;
1913
1914 /*
1915 * Just let mcelog handle it if the error is
1916 * outside the memory controller
1917 */
1918 if (((mce->status & 0xffff) >> 7) != 1)
1919 return NOTIFY_DONE;
1920
1921 /* Bank 8 registers are the only ones that we know how to handle */
1922 if (mce->bank != 8)
1923 return NOTIFY_DONE;
1924
1925#ifdef CONFIG_SMP
1926 /* Only handle if it is the right mc controller */
1927 if (mce->socketid != pvt->i7core_dev->socket)
1928 return NOTIFY_DONE;
1929#endif
1930
1931 smp_rmb();
1932 if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) {
1933 smp_wmb();
1934 pvt->mce_overrun++;
1935 return NOTIFY_DONE;
1936 }
1937
1938 /* Copy memory error at the ringbuffer */
1939 memcpy(&pvt->mce_entry[pvt->mce_out], mce, sizeof(*mce));
1940 smp_wmb();
1941 pvt->mce_out = (pvt->mce_out + 1) % MCE_LOG_LEN;
1942
1943 /* Handle fatal errors immediately */
1944 if (mce->mcgstatus & 1)
1945 i7core_check_error(mci);
1946
1947 /* Advise mcelog that the errors were handled */
1948 return NOTIFY_STOP;
1949}
1950
1951static struct notifier_block i7_mce_dec = {
1952 .notifier_call = i7core_mce_check_error,
1953};
1954
1955struct memdev_dmi_entry {
1956 u8 type;
1957 u8 length;
1958 u16 handle;
1959 u16 phys_mem_array_handle;
1960 u16 mem_err_info_handle;
1961 u16 total_width;
1962 u16 data_width;
1963 u16 size;
1964 u8 form;
1965 u8 device_set;
1966 u8 device_locator;
1967 u8 bank_locator;
1968 u8 memory_type;
1969 u16 type_detail;
1970 u16 speed;
1971 u8 manufacturer;
1972 u8 serial_number;
1973 u8 asset_tag;
1974 u8 part_number;
1975 u8 attributes;
1976 u32 extended_size;
1977 u16 conf_mem_clk_speed;
1978} __attribute__((__packed__));
1979
1980
1981/*
1982 * Decode the DRAM Clock Frequency, be paranoid, make sure that all
1983 * memory devices show the same speed, and if they don't then consider
1984 * all speeds to be invalid.
1985 */
1986static void decode_dclk(const struct dmi_header *dh, void *_dclk_freq)
1987{
1988 int *dclk_freq = _dclk_freq;
1989 u16 dmi_mem_clk_speed;
1990
1991 if (*dclk_freq == -1)
1992 return;
1993
1994 if (dh->type == DMI_ENTRY_MEM_DEVICE) {
1995 struct memdev_dmi_entry *memdev_dmi_entry =
1996 (struct memdev_dmi_entry *)dh;
1997 unsigned long conf_mem_clk_speed_offset =
1998 (unsigned long)&memdev_dmi_entry->conf_mem_clk_speed -
1999 (unsigned long)&memdev_dmi_entry->type;
2000 unsigned long speed_offset =
2001 (unsigned long)&memdev_dmi_entry->speed -
2002 (unsigned long)&memdev_dmi_entry->type;
2003
2004 /* Check that a DIMM is present */
2005 if (memdev_dmi_entry->size == 0)
2006 return;
2007
2008 /*
2009 * Pick the configured speed if it's available, otherwise
2010 * pick the DIMM speed, or we don't have a speed.
2011 */
2012 if (memdev_dmi_entry->length > conf_mem_clk_speed_offset) {
2013 dmi_mem_clk_speed =
2014 memdev_dmi_entry->conf_mem_clk_speed;
2015 } else if (memdev_dmi_entry->length > speed_offset) {
2016 dmi_mem_clk_speed = memdev_dmi_entry->speed;
2017 } else {
2018 *dclk_freq = -1;
2019 return;
2020 }
2021
2022 if (*dclk_freq == 0) {
2023 /* First pass, speed was 0 */
2024 if (dmi_mem_clk_speed > 0) {
2025 /* Set speed if a valid speed is read */
2026 *dclk_freq = dmi_mem_clk_speed;
2027 } else {
2028 /* Otherwise we don't have a valid speed */
2029 *dclk_freq = -1;
2030 }
2031 } else if (*dclk_freq > 0 &&
2032 *dclk_freq != dmi_mem_clk_speed) {
2033 /*
2034 * If we have a speed, check that all DIMMS are the same
2035 * speed, otherwise set the speed as invalid.
2036 */
2037 *dclk_freq = -1;
2038 }
2039 }
2040}
2041
2042/*
2043 * The default DCLK frequency is used as a fallback if we
2044 * fail to find anything reliable in the DMI. The value
2045 * is taken straight from the datasheet.
2046 */
2047#define DEFAULT_DCLK_FREQ 800
2048
2049static int get_dclk_freq(void)
2050{
2051 int dclk_freq = 0;
2052
2053 dmi_walk(decode_dclk, (void *)&dclk_freq);
2054
2055 if (dclk_freq < 1)
2056 return DEFAULT_DCLK_FREQ;
2057
2058 return dclk_freq;
2059}
2060
2061/*
2062 * set_sdram_scrub_rate This routine sets byte/sec bandwidth scrub rate
2063 * to hardware according to SCRUBINTERVAL formula
2064 * found in datasheet.
2065 */
2066static int set_sdram_scrub_rate(struct mem_ctl_info *mci, u32 new_bw)
2067{
2068 struct i7core_pvt *pvt = mci->pvt_info;
2069 struct pci_dev *pdev;
2070 u32 dw_scrub;
2071 u32 dw_ssr;
2072
2073 /* Get data from the MC register, function 2 */
2074 pdev = pvt->pci_mcr[2];
2075 if (!pdev)
2076 return -ENODEV;
2077
2078 pci_read_config_dword(pdev, MC_SCRUB_CONTROL, &dw_scrub);
2079
2080 if (new_bw == 0) {
2081 /* Prepare to disable petrol scrub */
2082 dw_scrub &= ~STARTSCRUB;
2083 /* Stop the patrol scrub engine */
2084 write_and_test(pdev, MC_SCRUB_CONTROL,
2085 dw_scrub & ~SCRUBINTERVAL_MASK);
2086
2087 /* Get current status of scrub rate and set bit to disable */
2088 pci_read_config_dword(pdev, MC_SSRCONTROL, &dw_ssr);
2089 dw_ssr &= ~SSR_MODE_MASK;
2090 dw_ssr |= SSR_MODE_DISABLE;
2091 } else {
2092 const int cache_line_size = 64;
2093 const u32 freq_dclk_mhz = pvt->dclk_freq;
2094 unsigned long long scrub_interval;
2095 /*
2096 * Translate the desired scrub rate to a register value and
2097 * program the corresponding register value.
2098 */
2099 scrub_interval = (unsigned long long)freq_dclk_mhz *
2100 cache_line_size * 1000000;
2101 do_div(scrub_interval, new_bw);
2102
2103 if (!scrub_interval || scrub_interval > SCRUBINTERVAL_MASK)
2104 return -EINVAL;
2105
2106 dw_scrub = SCRUBINTERVAL_MASK & scrub_interval;
2107
2108 /* Start the patrol scrub engine */
2109 pci_write_config_dword(pdev, MC_SCRUB_CONTROL,
2110 STARTSCRUB | dw_scrub);
2111
2112 /* Get current status of scrub rate and set bit to enable */
2113 pci_read_config_dword(pdev, MC_SSRCONTROL, &dw_ssr);
2114 dw_ssr &= ~SSR_MODE_MASK;
2115 dw_ssr |= SSR_MODE_ENABLE;
2116 }
2117 /* Disable or enable scrubbing */
2118 pci_write_config_dword(pdev, MC_SSRCONTROL, dw_ssr);
2119
2120 return new_bw;
2121}
2122
2123/*
2124 * get_sdram_scrub_rate This routine convert current scrub rate value
2125 * into byte/sec bandwidth accourding to
2126 * SCRUBINTERVAL formula found in datasheet.
2127 */
2128static int get_sdram_scrub_rate(struct mem_ctl_info *mci)
2129{
2130 struct i7core_pvt *pvt = mci->pvt_info;
2131 struct pci_dev *pdev;
2132 const u32 cache_line_size = 64;
2133 const u32 freq_dclk_mhz = pvt->dclk_freq;
2134 unsigned long long scrub_rate;
2135 u32 scrubval;
2136
2137 /* Get data from the MC register, function 2 */
2138 pdev = pvt->pci_mcr[2];
2139 if (!pdev)
2140 return -ENODEV;
2141
2142 /* Get current scrub control data */
2143 pci_read_config_dword(pdev, MC_SCRUB_CONTROL, &scrubval);
2144
2145 /* Mask highest 8-bits to 0 */
2146 scrubval &= SCRUBINTERVAL_MASK;
2147 if (!scrubval)
2148 return 0;
2149
2150 /* Calculate scrub rate value into byte/sec bandwidth */
2151 scrub_rate = (unsigned long long)freq_dclk_mhz *
2152 1000000 * cache_line_size;
2153 do_div(scrub_rate, scrubval);
2154 return (int)scrub_rate;
2155}
2156
2157static void enable_sdram_scrub_setting(struct mem_ctl_info *mci)
2158{
2159 struct i7core_pvt *pvt = mci->pvt_info;
2160 u32 pci_lock;
2161
2162 /* Unlock writes to pci registers */
2163 pci_read_config_dword(pvt->pci_noncore, MC_CFG_CONTROL, &pci_lock);
2164 pci_lock &= ~0x3;
2165 pci_write_config_dword(pvt->pci_noncore, MC_CFG_CONTROL,
2166 pci_lock | MC_CFG_UNLOCK);
2167
2168 mci->set_sdram_scrub_rate = set_sdram_scrub_rate;
2169 mci->get_sdram_scrub_rate = get_sdram_scrub_rate;
2170}
2171
2172static void disable_sdram_scrub_setting(struct mem_ctl_info *mci)
2173{
2174 struct i7core_pvt *pvt = mci->pvt_info;
2175 u32 pci_lock;
2176
2177 /* Lock writes to pci registers */
2178 pci_read_config_dword(pvt->pci_noncore, MC_CFG_CONTROL, &pci_lock);
2179 pci_lock &= ~0x3;
2180 pci_write_config_dword(pvt->pci_noncore, MC_CFG_CONTROL,
2181 pci_lock | MC_CFG_LOCK);
2182}
2183
2184static void i7core_pci_ctl_create(struct i7core_pvt *pvt)
2185{
2186 pvt->i7core_pci = edac_pci_create_generic_ctl(
2187 &pvt->i7core_dev->pdev[0]->dev,
2188 EDAC_MOD_STR);
2189 if (unlikely(!pvt->i7core_pci))
2190 i7core_printk(KERN_WARNING,
2191 "Unable to setup PCI error report via EDAC\n");
2192}
2193
2194static void i7core_pci_ctl_release(struct i7core_pvt *pvt)
2195{
2196 if (likely(pvt->i7core_pci))
2197 edac_pci_release_generic_ctl(pvt->i7core_pci);
2198 else
2199 i7core_printk(KERN_ERR,
2200 "Couldn't find mem_ctl_info for socket %d\n",
2201 pvt->i7core_dev->socket);
2202 pvt->i7core_pci = NULL;
2203}
2204
2205static void i7core_unregister_mci(struct i7core_dev *i7core_dev)
2206{
2207 struct mem_ctl_info *mci = i7core_dev->mci;
2208 struct i7core_pvt *pvt;
2209
2210 if (unlikely(!mci || !mci->pvt_info)) {
2211 debugf0("MC: " __FILE__ ": %s(): dev = %p\n",
2212 __func__, &i7core_dev->pdev[0]->dev);
2213
2214 i7core_printk(KERN_ERR, "Couldn't find mci handler\n");
2215 return;
2216 }
2217
2218 pvt = mci->pvt_info;
2219
2220 debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n",
2221 __func__, mci, &i7core_dev->pdev[0]->dev);
2222
2223 /* Disable scrubrate setting */
2224 if (pvt->enable_scrub)
2225 disable_sdram_scrub_setting(mci);
2226
2227 mce_unregister_decode_chain(&i7_mce_dec);
2228
2229 /* Disable EDAC polling */
2230 i7core_pci_ctl_release(pvt);
2231
2232 /* Remove MC sysfs nodes */
2233 edac_mc_del_mc(mci->dev);
2234
2235 debugf1("%s: free mci struct\n", mci->ctl_name);
2236 kfree(mci->ctl_name);
2237 edac_mc_free(mci);
2238 i7core_dev->mci = NULL;
2239}
2240
2241static int i7core_register_mci(struct i7core_dev *i7core_dev)
2242{
2243 struct mem_ctl_info *mci;
2244 struct i7core_pvt *pvt;
2245 int rc, channels, csrows;
2246
2247 /* Check the number of active and not disabled channels */
2248 rc = i7core_get_active_channels(i7core_dev->socket, &channels, &csrows);
2249 if (unlikely(rc < 0))
2250 return rc;
2251
2252 /* allocate a new MC control structure */
2253 mci = edac_mc_alloc(sizeof(*pvt), csrows, channels, i7core_dev->socket);
2254 if (unlikely(!mci))
2255 return -ENOMEM;
2256
2257 debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n",
2258 __func__, mci, &i7core_dev->pdev[0]->dev);
2259
2260 pvt = mci->pvt_info;
2261 memset(pvt, 0, sizeof(*pvt));
2262
2263 /* Associates i7core_dev and mci for future usage */
2264 pvt->i7core_dev = i7core_dev;
2265 i7core_dev->mci = mci;
2266
2267 /*
2268 * FIXME: how to handle RDDR3 at MCI level? It is possible to have
2269 * Mixed RDDR3/UDDR3 with Nehalem, provided that they are on different
2270 * memory channels
2271 */
2272 mci->mtype_cap = MEM_FLAG_DDR3;
2273 mci->edac_ctl_cap = EDAC_FLAG_NONE;
2274 mci->edac_cap = EDAC_FLAG_NONE;
2275 mci->mod_name = "i7core_edac.c";
2276 mci->mod_ver = I7CORE_REVISION;
2277 mci->ctl_name = kasprintf(GFP_KERNEL, "i7 core #%d",
2278 i7core_dev->socket);
2279 mci->dev_name = pci_name(i7core_dev->pdev[0]);
2280 mci->ctl_page_to_phys = NULL;
2281
2282 /* Store pci devices at mci for faster access */
2283 rc = mci_bind_devs(mci, i7core_dev);
2284 if (unlikely(rc < 0))
2285 goto fail0;
2286
2287 if (pvt->is_registered)
2288 mci->mc_driver_sysfs_attributes = i7core_sysfs_rdimm_attrs;
2289 else
2290 mci->mc_driver_sysfs_attributes = i7core_sysfs_udimm_attrs;
2291
2292 /* Get dimm basic config */
2293 get_dimm_config(mci);
2294 /* record ptr to the generic device */
2295 mci->dev = &i7core_dev->pdev[0]->dev;
2296 /* Set the function pointer to an actual operation function */
2297 mci->edac_check = i7core_check_error;
2298
2299 /* Enable scrubrate setting */
2300 if (pvt->enable_scrub)
2301 enable_sdram_scrub_setting(mci);
2302
2303 /* add this new MC control structure to EDAC's list of MCs */
2304 if (unlikely(edac_mc_add_mc(mci))) {
2305 debugf0("MC: " __FILE__
2306 ": %s(): failed edac_mc_add_mc()\n", __func__);
2307 /* FIXME: perhaps some code should go here that disables error
2308 * reporting if we just enabled it
2309 */
2310
2311 rc = -EINVAL;
2312 goto fail0;
2313 }
2314
2315 /* Default error mask is any memory */
2316 pvt->inject.channel = 0;
2317 pvt->inject.dimm = -1;
2318 pvt->inject.rank = -1;
2319 pvt->inject.bank = -1;
2320 pvt->inject.page = -1;
2321 pvt->inject.col = -1;
2322
2323 /* allocating generic PCI control info */
2324 i7core_pci_ctl_create(pvt);
2325
2326 /* DCLK for scrub rate setting */
2327 pvt->dclk_freq = get_dclk_freq();
2328
2329 mce_register_decode_chain(&i7_mce_dec);
2330
2331 return 0;
2332
2333fail0:
2334 kfree(mci->ctl_name);
2335 edac_mc_free(mci);
2336 i7core_dev->mci = NULL;
2337 return rc;
2338}
2339
2340/*
2341 * i7core_probe Probe for ONE instance of device to see if it is
2342 * present.
2343 * return:
2344 * 0 for FOUND a device
2345 * < 0 for error code
2346 */
2347
2348static int __devinit i7core_probe(struct pci_dev *pdev,
2349 const struct pci_device_id *id)
2350{
2351 int rc, count = 0;
2352 struct i7core_dev *i7core_dev;
2353
2354 /* get the pci devices we want to reserve for our use */
2355 mutex_lock(&i7core_edac_lock);
2356
2357 /*
2358 * All memory controllers are allocated at the first pass.
2359 */
2360 if (unlikely(probed >= 1)) {
2361 mutex_unlock(&i7core_edac_lock);
2362 return -ENODEV;
2363 }
2364 probed++;
2365
2366 rc = i7core_get_all_devices();
2367 if (unlikely(rc < 0))
2368 goto fail0;
2369
2370 list_for_each_entry(i7core_dev, &i7core_edac_list, list) {
2371 count++;
2372 rc = i7core_register_mci(i7core_dev);
2373 if (unlikely(rc < 0))
2374 goto fail1;
2375 }
2376
2377 /*
2378 * Nehalem-EX uses a different memory controller. However, as the
2379 * memory controller is not visible on some Nehalem/Nehalem-EP, we
2380 * need to indirectly probe via a X58 PCI device. The same devices
2381 * are found on (some) Nehalem-EX. So, on those machines, the
2382 * probe routine needs to return -ENODEV, as the actual Memory
2383 * Controller registers won't be detected.
2384 */
2385 if (!count) {
2386 rc = -ENODEV;
2387 goto fail1;
2388 }
2389
2390 i7core_printk(KERN_INFO,
2391 "Driver loaded, %d memory controller(s) found.\n",
2392 count);
2393
2394 mutex_unlock(&i7core_edac_lock);
2395 return 0;
2396
2397fail1:
2398 list_for_each_entry(i7core_dev, &i7core_edac_list, list)
2399 i7core_unregister_mci(i7core_dev);
2400
2401 i7core_put_all_devices();
2402fail0:
2403 mutex_unlock(&i7core_edac_lock);
2404 return rc;
2405}
2406
2407/*
2408 * i7core_remove destructor for one instance of device
2409 *
2410 */
2411static void __devexit i7core_remove(struct pci_dev *pdev)
2412{
2413 struct i7core_dev *i7core_dev;
2414
2415 debugf0(__FILE__ ": %s()\n", __func__);
2416
2417 /*
2418 * we have a trouble here: pdev value for removal will be wrong, since
2419 * it will point to the X58 register used to detect that the machine
2420 * is a Nehalem or upper design. However, due to the way several PCI
2421 * devices are grouped together to provide MC functionality, we need
2422 * to use a different method for releasing the devices
2423 */
2424
2425 mutex_lock(&i7core_edac_lock);
2426
2427 if (unlikely(!probed)) {
2428 mutex_unlock(&i7core_edac_lock);
2429 return;
2430 }
2431
2432 list_for_each_entry(i7core_dev, &i7core_edac_list, list)
2433 i7core_unregister_mci(i7core_dev);
2434
2435 /* Release PCI resources */
2436 i7core_put_all_devices();
2437
2438 probed--;
2439
2440 mutex_unlock(&i7core_edac_lock);
2441}
2442
2443MODULE_DEVICE_TABLE(pci, i7core_pci_tbl);
2444
2445/*
2446 * i7core_driver pci_driver structure for this module
2447 *
2448 */
2449static struct pci_driver i7core_driver = {
2450 .name = "i7core_edac",
2451 .probe = i7core_probe,
2452 .remove = __devexit_p(i7core_remove),
2453 .id_table = i7core_pci_tbl,
2454};
2455
2456/*
2457 * i7core_init Module entry function
2458 * Try to initialize this module for its devices
2459 */
2460static int __init i7core_init(void)
2461{
2462 int pci_rc;
2463
2464 debugf2("MC: " __FILE__ ": %s()\n", __func__);
2465
2466 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
2467 opstate_init();
2468
2469 if (use_pci_fixup)
2470 i7core_xeon_pci_fixup(pci_dev_table);
2471
2472 pci_rc = pci_register_driver(&i7core_driver);
2473
2474 if (pci_rc >= 0)
2475 return 0;
2476
2477 i7core_printk(KERN_ERR, "Failed to register device with error %d.\n",
2478 pci_rc);
2479
2480 return pci_rc;
2481}
2482
2483/*
2484 * i7core_exit() Module exit function
2485 * Unregister the driver
2486 */
2487static void __exit i7core_exit(void)
2488{
2489 debugf2("MC: " __FILE__ ": %s()\n", __func__);
2490 pci_unregister_driver(&i7core_driver);
2491}
2492
2493module_init(i7core_init);
2494module_exit(i7core_exit);
2495
2496MODULE_LICENSE("GPL");
2497MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
2498MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
2499MODULE_DESCRIPTION("MC Driver for Intel i7 Core memory controllers - "
2500 I7CORE_REVISION);
2501
2502module_param(edac_op_state, int, 0444);
2503MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");