]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/edac/i7core_edac.c
edac: Don't initialize csrow's first_page & friends when not needed
[mirror_ubuntu-jammy-kernel.git] / drivers / edac / i7core_edac.c
1 /* Intel i7 core/Nehalem Memory Controller kernel module
2 *
3 * This driver supports the memory controllers found on the Intel
4 * processor families i7core, i7core 7xx/8xx, i5core, Xeon 35xx,
5 * Xeon 55xx and Xeon 56xx also known as Nehalem, Nehalem-EP, Lynnfield
6 * and Westmere-EP.
7 *
8 * This file may be distributed under the terms of the
9 * GNU General Public License version 2 only.
10 *
11 * Copyright (c) 2009-2010 by:
12 * Mauro Carvalho Chehab <mchehab@redhat.com>
13 *
14 * Red Hat Inc. http://www.redhat.com
15 *
16 * Forked and adapted from the i5400_edac driver
17 *
18 * Based on the following public Intel datasheets:
19 * Intel Core i7 Processor Extreme Edition and Intel Core i7 Processor
20 * Datasheet, Volume 2:
21 * http://download.intel.com/design/processor/datashts/320835.pdf
22 * Intel Xeon Processor 5500 Series Datasheet Volume 2
23 * http://www.intel.com/Assets/PDF/datasheet/321322.pdf
24 * also available at:
25 * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
26 */
27
28 #include <linux/module.h>
29 #include <linux/init.h>
30 #include <linux/pci.h>
31 #include <linux/pci_ids.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34 #include <linux/dmi.h>
35 #include <linux/edac.h>
36 #include <linux/mmzone.h>
37 #include <linux/smp.h>
38 #include <asm/mce.h>
39 #include <asm/processor.h>
40 #include <asm/div64.h>
41
42 #include "edac_core.h"
43
44 /* Static vars */
45 static LIST_HEAD(i7core_edac_list);
46 static DEFINE_MUTEX(i7core_edac_lock);
47 static int probed;
48
49 static int use_pci_fixup;
50 module_param(use_pci_fixup, int, 0444);
51 MODULE_PARM_DESC(use_pci_fixup, "Enable PCI fixup to seek for hidden devices");
52 /*
53 * This is used for Nehalem-EP and Nehalem-EX devices, where the non-core
54 * registers start at bus 255, and are not reported by BIOS.
55 * We currently find devices with only 2 sockets. In order to support more QPI
56 * Quick Path Interconnect, just increment this number.
57 */
58 #define MAX_SOCKET_BUSES 2
59
60
61 /*
62 * Alter this version for the module when modifications are made
63 */
64 #define I7CORE_REVISION " Ver: 1.0.0"
65 #define EDAC_MOD_STR "i7core_edac"
66
67 /*
68 * Debug macros
69 */
70 #define i7core_printk(level, fmt, arg...) \
71 edac_printk(level, "i7core", fmt, ##arg)
72
73 #define i7core_mc_printk(mci, level, fmt, arg...) \
74 edac_mc_chipset_printk(mci, level, "i7core", fmt, ##arg)
75
76 /*
77 * i7core Memory Controller Registers
78 */
79
80 /* OFFSETS for Device 0 Function 0 */
81
82 #define MC_CFG_CONTROL 0x90
83 #define MC_CFG_UNLOCK 0x02
84 #define MC_CFG_LOCK 0x00
85
86 /* OFFSETS for Device 3 Function 0 */
87
88 #define MC_CONTROL 0x48
89 #define MC_STATUS 0x4c
90 #define MC_MAX_DOD 0x64
91
92 /*
93 * OFFSETS for Device 3 Function 4, as inicated on Xeon 5500 datasheet:
94 * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
95 */
96
97 #define MC_TEST_ERR_RCV1 0x60
98 #define DIMM2_COR_ERR(r) ((r) & 0x7fff)
99
100 #define MC_TEST_ERR_RCV0 0x64
101 #define DIMM1_COR_ERR(r) (((r) >> 16) & 0x7fff)
102 #define DIMM0_COR_ERR(r) ((r) & 0x7fff)
103
104 /* OFFSETS for Device 3 Function 2, as inicated on Xeon 5500 datasheet */
105 #define MC_SSRCONTROL 0x48
106 #define SSR_MODE_DISABLE 0x00
107 #define SSR_MODE_ENABLE 0x01
108 #define SSR_MODE_MASK 0x03
109
110 #define MC_SCRUB_CONTROL 0x4c
111 #define STARTSCRUB (1 << 24)
112 #define SCRUBINTERVAL_MASK 0xffffff
113
114 #define MC_COR_ECC_CNT_0 0x80
115 #define MC_COR_ECC_CNT_1 0x84
116 #define MC_COR_ECC_CNT_2 0x88
117 #define MC_COR_ECC_CNT_3 0x8c
118 #define MC_COR_ECC_CNT_4 0x90
119 #define MC_COR_ECC_CNT_5 0x94
120
121 #define DIMM_TOP_COR_ERR(r) (((r) >> 16) & 0x7fff)
122 #define DIMM_BOT_COR_ERR(r) ((r) & 0x7fff)
123
124
125 /* OFFSETS for Devices 4,5 and 6 Function 0 */
126
127 #define MC_CHANNEL_DIMM_INIT_PARAMS 0x58
128 #define THREE_DIMMS_PRESENT (1 << 24)
129 #define SINGLE_QUAD_RANK_PRESENT (1 << 23)
130 #define QUAD_RANK_PRESENT (1 << 22)
131 #define REGISTERED_DIMM (1 << 15)
132
133 #define MC_CHANNEL_MAPPER 0x60
134 #define RDLCH(r, ch) ((((r) >> (3 + (ch * 6))) & 0x07) - 1)
135 #define WRLCH(r, ch) ((((r) >> (ch * 6)) & 0x07) - 1)
136
137 #define MC_CHANNEL_RANK_PRESENT 0x7c
138 #define RANK_PRESENT_MASK 0xffff
139
140 #define MC_CHANNEL_ADDR_MATCH 0xf0
141 #define MC_CHANNEL_ERROR_MASK 0xf8
142 #define MC_CHANNEL_ERROR_INJECT 0xfc
143 #define INJECT_ADDR_PARITY 0x10
144 #define INJECT_ECC 0x08
145 #define MASK_CACHELINE 0x06
146 #define MASK_FULL_CACHELINE 0x06
147 #define MASK_MSB32_CACHELINE 0x04
148 #define MASK_LSB32_CACHELINE 0x02
149 #define NO_MASK_CACHELINE 0x00
150 #define REPEAT_EN 0x01
151
152 /* OFFSETS for Devices 4,5 and 6 Function 1 */
153
154 #define MC_DOD_CH_DIMM0 0x48
155 #define MC_DOD_CH_DIMM1 0x4c
156 #define MC_DOD_CH_DIMM2 0x50
157 #define RANKOFFSET_MASK ((1 << 12) | (1 << 11) | (1 << 10))
158 #define RANKOFFSET(x) ((x & RANKOFFSET_MASK) >> 10)
159 #define DIMM_PRESENT_MASK (1 << 9)
160 #define DIMM_PRESENT(x) (((x) & DIMM_PRESENT_MASK) >> 9)
161 #define MC_DOD_NUMBANK_MASK ((1 << 8) | (1 << 7))
162 #define MC_DOD_NUMBANK(x) (((x) & MC_DOD_NUMBANK_MASK) >> 7)
163 #define MC_DOD_NUMRANK_MASK ((1 << 6) | (1 << 5))
164 #define MC_DOD_NUMRANK(x) (((x) & MC_DOD_NUMRANK_MASK) >> 5)
165 #define MC_DOD_NUMROW_MASK ((1 << 4) | (1 << 3) | (1 << 2))
166 #define MC_DOD_NUMROW(x) (((x) & MC_DOD_NUMROW_MASK) >> 2)
167 #define MC_DOD_NUMCOL_MASK 3
168 #define MC_DOD_NUMCOL(x) ((x) & MC_DOD_NUMCOL_MASK)
169
170 #define MC_RANK_PRESENT 0x7c
171
172 #define MC_SAG_CH_0 0x80
173 #define MC_SAG_CH_1 0x84
174 #define MC_SAG_CH_2 0x88
175 #define MC_SAG_CH_3 0x8c
176 #define MC_SAG_CH_4 0x90
177 #define MC_SAG_CH_5 0x94
178 #define MC_SAG_CH_6 0x98
179 #define MC_SAG_CH_7 0x9c
180
181 #define MC_RIR_LIMIT_CH_0 0x40
182 #define MC_RIR_LIMIT_CH_1 0x44
183 #define MC_RIR_LIMIT_CH_2 0x48
184 #define MC_RIR_LIMIT_CH_3 0x4C
185 #define MC_RIR_LIMIT_CH_4 0x50
186 #define MC_RIR_LIMIT_CH_5 0x54
187 #define MC_RIR_LIMIT_CH_6 0x58
188 #define MC_RIR_LIMIT_CH_7 0x5C
189 #define MC_RIR_LIMIT_MASK ((1 << 10) - 1)
190
191 #define MC_RIR_WAY_CH 0x80
192 #define MC_RIR_WAY_OFFSET_MASK (((1 << 14) - 1) & ~0x7)
193 #define MC_RIR_WAY_RANK_MASK 0x7
194
195 /*
196 * i7core structs
197 */
198
199 #define NUM_CHANS 3
200 #define MAX_DIMMS 3 /* Max DIMMS per channel */
201 #define MAX_MCR_FUNC 4
202 #define MAX_CHAN_FUNC 3
203
204 struct i7core_info {
205 u32 mc_control;
206 u32 mc_status;
207 u32 max_dod;
208 u32 ch_map;
209 };
210
211
212 struct i7core_inject {
213 int enable;
214
215 u32 section;
216 u32 type;
217 u32 eccmask;
218
219 /* Error address mask */
220 int channel, dimm, rank, bank, page, col;
221 };
222
223 struct i7core_channel {
224 u32 ranks;
225 u32 dimms;
226 };
227
228 struct pci_id_descr {
229 int dev;
230 int func;
231 int dev_id;
232 int optional;
233 };
234
235 struct pci_id_table {
236 const struct pci_id_descr *descr;
237 int n_devs;
238 };
239
240 struct i7core_dev {
241 struct list_head list;
242 u8 socket;
243 struct pci_dev **pdev;
244 int n_devs;
245 struct mem_ctl_info *mci;
246 };
247
248 struct i7core_pvt {
249 struct pci_dev *pci_noncore;
250 struct pci_dev *pci_mcr[MAX_MCR_FUNC + 1];
251 struct pci_dev *pci_ch[NUM_CHANS][MAX_CHAN_FUNC + 1];
252
253 struct i7core_dev *i7core_dev;
254
255 struct i7core_info info;
256 struct i7core_inject inject;
257 struct i7core_channel channel[NUM_CHANS];
258
259 int ce_count_available;
260 int csrow_map[NUM_CHANS][MAX_DIMMS];
261
262 /* ECC corrected errors counts per udimm */
263 unsigned long udimm_ce_count[MAX_DIMMS];
264 int udimm_last_ce_count[MAX_DIMMS];
265 /* ECC corrected errors counts per rdimm */
266 unsigned long rdimm_ce_count[NUM_CHANS][MAX_DIMMS];
267 int rdimm_last_ce_count[NUM_CHANS][MAX_DIMMS];
268
269 bool is_registered, enable_scrub;
270
271 /* Fifo double buffers */
272 struct mce mce_entry[MCE_LOG_LEN];
273 struct mce mce_outentry[MCE_LOG_LEN];
274
275 /* Fifo in/out counters */
276 unsigned mce_in, mce_out;
277
278 /* Count indicator to show errors not got */
279 unsigned mce_overrun;
280
281 /* DCLK Frequency used for computing scrub rate */
282 int dclk_freq;
283
284 /* Struct to control EDAC polling */
285 struct edac_pci_ctl_info *i7core_pci;
286 };
287
288 #define PCI_DESCR(device, function, device_id) \
289 .dev = (device), \
290 .func = (function), \
291 .dev_id = (device_id)
292
293 static const struct pci_id_descr pci_dev_descr_i7core_nehalem[] = {
294 /* Memory controller */
295 { PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_I7_MCR) },
296 { PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_I7_MC_TAD) },
297 /* Exists only for RDIMM */
298 { PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_I7_MC_RAS), .optional = 1 },
299 { PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_I7_MC_TEST) },
300
301 /* Channel 0 */
302 { PCI_DESCR(4, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH0_CTRL) },
303 { PCI_DESCR(4, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH0_ADDR) },
304 { PCI_DESCR(4, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH0_RANK) },
305 { PCI_DESCR(4, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH0_TC) },
306
307 /* Channel 1 */
308 { PCI_DESCR(5, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH1_CTRL) },
309 { PCI_DESCR(5, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH1_ADDR) },
310 { PCI_DESCR(5, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH1_RANK) },
311 { PCI_DESCR(5, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH1_TC) },
312
313 /* Channel 2 */
314 { PCI_DESCR(6, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH2_CTRL) },
315 { PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH2_ADDR) },
316 { PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH2_RANK) },
317 { PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH2_TC) },
318
319 /* Generic Non-core registers */
320 /*
321 * This is the PCI device on i7core and on Xeon 35xx (8086:2c41)
322 * On Xeon 55xx, however, it has a different id (8086:2c40). So,
323 * the probing code needs to test for the other address in case of
324 * failure of this one
325 */
326 { PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_I7_NONCORE) },
327
328 };
329
330 static const struct pci_id_descr pci_dev_descr_lynnfield[] = {
331 { PCI_DESCR( 3, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR) },
332 { PCI_DESCR( 3, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD) },
333 { PCI_DESCR( 3, 4, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST) },
334
335 { PCI_DESCR( 4, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL) },
336 { PCI_DESCR( 4, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR) },
337 { PCI_DESCR( 4, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK) },
338 { PCI_DESCR( 4, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC) },
339
340 { PCI_DESCR( 5, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL) },
341 { PCI_DESCR( 5, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR) },
342 { PCI_DESCR( 5, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK) },
343 { PCI_DESCR( 5, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC) },
344
345 /*
346 * This is the PCI device has an alternate address on some
347 * processors like Core i7 860
348 */
349 { PCI_DESCR( 0, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE) },
350 };
351
352 static const struct pci_id_descr pci_dev_descr_i7core_westmere[] = {
353 /* Memory controller */
354 { PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR_REV2) },
355 { PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD_REV2) },
356 /* Exists only for RDIMM */
357 { PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_RAS_REV2), .optional = 1 },
358 { PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST_REV2) },
359
360 /* Channel 0 */
361 { PCI_DESCR(4, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL_REV2) },
362 { PCI_DESCR(4, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR_REV2) },
363 { PCI_DESCR(4, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK_REV2) },
364 { PCI_DESCR(4, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC_REV2) },
365
366 /* Channel 1 */
367 { PCI_DESCR(5, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL_REV2) },
368 { PCI_DESCR(5, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR_REV2) },
369 { PCI_DESCR(5, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK_REV2) },
370 { PCI_DESCR(5, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC_REV2) },
371
372 /* Channel 2 */
373 { PCI_DESCR(6, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_CTRL_REV2) },
374 { PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_ADDR_REV2) },
375 { PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_RANK_REV2) },
376 { PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_TC_REV2) },
377
378 /* Generic Non-core registers */
379 { PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_REV2) },
380
381 };
382
383 #define PCI_ID_TABLE_ENTRY(A) { .descr=A, .n_devs = ARRAY_SIZE(A) }
384 static const struct pci_id_table pci_dev_table[] = {
385 PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_nehalem),
386 PCI_ID_TABLE_ENTRY(pci_dev_descr_lynnfield),
387 PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_westmere),
388 {0,} /* 0 terminated list. */
389 };
390
391 /*
392 * pci_device_id table for which devices we are looking for
393 */
394 static DEFINE_PCI_DEVICE_TABLE(i7core_pci_tbl) = {
395 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
396 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
397 {0,} /* 0 terminated list. */
398 };
399
400 /****************************************************************************
401 Anciliary status routines
402 ****************************************************************************/
403
404 /* MC_CONTROL bits */
405 #define CH_ACTIVE(pvt, ch) ((pvt)->info.mc_control & (1 << (8 + ch)))
406 #define ECCx8(pvt) ((pvt)->info.mc_control & (1 << 1))
407
408 /* MC_STATUS bits */
409 #define ECC_ENABLED(pvt) ((pvt)->info.mc_status & (1 << 4))
410 #define CH_DISABLED(pvt, ch) ((pvt)->info.mc_status & (1 << ch))
411
412 /* MC_MAX_DOD read functions */
413 static inline int numdimms(u32 dimms)
414 {
415 return (dimms & 0x3) + 1;
416 }
417
418 static inline int numrank(u32 rank)
419 {
420 static int ranks[4] = { 1, 2, 4, -EINVAL };
421
422 return ranks[rank & 0x3];
423 }
424
425 static inline int numbank(u32 bank)
426 {
427 static int banks[4] = { 4, 8, 16, -EINVAL };
428
429 return banks[bank & 0x3];
430 }
431
432 static inline int numrow(u32 row)
433 {
434 static int rows[8] = {
435 1 << 12, 1 << 13, 1 << 14, 1 << 15,
436 1 << 16, -EINVAL, -EINVAL, -EINVAL,
437 };
438
439 return rows[row & 0x7];
440 }
441
442 static inline int numcol(u32 col)
443 {
444 static int cols[8] = {
445 1 << 10, 1 << 11, 1 << 12, -EINVAL,
446 };
447 return cols[col & 0x3];
448 }
449
450 static struct i7core_dev *get_i7core_dev(u8 socket)
451 {
452 struct i7core_dev *i7core_dev;
453
454 list_for_each_entry(i7core_dev, &i7core_edac_list, list) {
455 if (i7core_dev->socket == socket)
456 return i7core_dev;
457 }
458
459 return NULL;
460 }
461
462 static struct i7core_dev *alloc_i7core_dev(u8 socket,
463 const struct pci_id_table *table)
464 {
465 struct i7core_dev *i7core_dev;
466
467 i7core_dev = kzalloc(sizeof(*i7core_dev), GFP_KERNEL);
468 if (!i7core_dev)
469 return NULL;
470
471 i7core_dev->pdev = kzalloc(sizeof(*i7core_dev->pdev) * table->n_devs,
472 GFP_KERNEL);
473 if (!i7core_dev->pdev) {
474 kfree(i7core_dev);
475 return NULL;
476 }
477
478 i7core_dev->socket = socket;
479 i7core_dev->n_devs = table->n_devs;
480 list_add_tail(&i7core_dev->list, &i7core_edac_list);
481
482 return i7core_dev;
483 }
484
485 static void free_i7core_dev(struct i7core_dev *i7core_dev)
486 {
487 list_del(&i7core_dev->list);
488 kfree(i7core_dev->pdev);
489 kfree(i7core_dev);
490 }
491
492 /****************************************************************************
493 Memory check routines
494 ****************************************************************************/
495 static struct pci_dev *get_pdev_slot_func(u8 socket, unsigned slot,
496 unsigned func)
497 {
498 struct i7core_dev *i7core_dev = get_i7core_dev(socket);
499 int i;
500
501 if (!i7core_dev)
502 return NULL;
503
504 for (i = 0; i < i7core_dev->n_devs; i++) {
505 if (!i7core_dev->pdev[i])
506 continue;
507
508 if (PCI_SLOT(i7core_dev->pdev[i]->devfn) == slot &&
509 PCI_FUNC(i7core_dev->pdev[i]->devfn) == func) {
510 return i7core_dev->pdev[i];
511 }
512 }
513
514 return NULL;
515 }
516
517 /**
518 * i7core_get_active_channels() - gets the number of channels and csrows
519 * @socket: Quick Path Interconnect socket
520 * @channels: Number of channels that will be returned
521 * @csrows: Number of csrows found
522 *
523 * Since EDAC core needs to know in advance the number of available channels
524 * and csrows, in order to allocate memory for csrows/channels, it is needed
525 * to run two similar steps. At the first step, implemented on this function,
526 * it checks the number of csrows/channels present at one socket.
527 * this is used in order to properly allocate the size of mci components.
528 *
529 * It should be noticed that none of the current available datasheets explain
530 * or even mention how csrows are seen by the memory controller. So, we need
531 * to add a fake description for csrows.
532 * So, this driver is attributing one DIMM memory for one csrow.
533 */
534 static int i7core_get_active_channels(const u8 socket, unsigned *channels,
535 unsigned *csrows)
536 {
537 struct pci_dev *pdev = NULL;
538 int i, j;
539 u32 status, control;
540
541 *channels = 0;
542 *csrows = 0;
543
544 pdev = get_pdev_slot_func(socket, 3, 0);
545 if (!pdev) {
546 i7core_printk(KERN_ERR, "Couldn't find socket %d fn 3.0!!!\n",
547 socket);
548 return -ENODEV;
549 }
550
551 /* Device 3 function 0 reads */
552 pci_read_config_dword(pdev, MC_STATUS, &status);
553 pci_read_config_dword(pdev, MC_CONTROL, &control);
554
555 for (i = 0; i < NUM_CHANS; i++) {
556 u32 dimm_dod[3];
557 /* Check if the channel is active */
558 if (!(control & (1 << (8 + i))))
559 continue;
560
561 /* Check if the channel is disabled */
562 if (status & (1 << i))
563 continue;
564
565 pdev = get_pdev_slot_func(socket, i + 4, 1);
566 if (!pdev) {
567 i7core_printk(KERN_ERR, "Couldn't find socket %d "
568 "fn %d.%d!!!\n",
569 socket, i + 4, 1);
570 return -ENODEV;
571 }
572 /* Devices 4-6 function 1 */
573 pci_read_config_dword(pdev,
574 MC_DOD_CH_DIMM0, &dimm_dod[0]);
575 pci_read_config_dword(pdev,
576 MC_DOD_CH_DIMM1, &dimm_dod[1]);
577 pci_read_config_dword(pdev,
578 MC_DOD_CH_DIMM2, &dimm_dod[2]);
579
580 (*channels)++;
581
582 for (j = 0; j < 3; j++) {
583 if (!DIMM_PRESENT(dimm_dod[j]))
584 continue;
585 (*csrows)++;
586 }
587 }
588
589 debugf0("Number of active channels on socket %d: %d\n",
590 socket, *channels);
591
592 return 0;
593 }
594
595 static int get_dimm_config(struct mem_ctl_info *mci)
596 {
597 struct i7core_pvt *pvt = mci->pvt_info;
598 struct csrow_info *csr;
599 struct pci_dev *pdev;
600 int i, j;
601 int csrow = 0;
602 enum edac_type mode;
603 enum mem_type mtype;
604 struct dimm_info *dimm;
605
606 /* Get data from the MC register, function 0 */
607 pdev = pvt->pci_mcr[0];
608 if (!pdev)
609 return -ENODEV;
610
611 /* Device 3 function 0 reads */
612 pci_read_config_dword(pdev, MC_CONTROL, &pvt->info.mc_control);
613 pci_read_config_dword(pdev, MC_STATUS, &pvt->info.mc_status);
614 pci_read_config_dword(pdev, MC_MAX_DOD, &pvt->info.max_dod);
615 pci_read_config_dword(pdev, MC_CHANNEL_MAPPER, &pvt->info.ch_map);
616
617 debugf0("QPI %d control=0x%08x status=0x%08x dod=0x%08x map=0x%08x\n",
618 pvt->i7core_dev->socket, pvt->info.mc_control, pvt->info.mc_status,
619 pvt->info.max_dod, pvt->info.ch_map);
620
621 if (ECC_ENABLED(pvt)) {
622 debugf0("ECC enabled with x%d SDCC\n", ECCx8(pvt) ? 8 : 4);
623 if (ECCx8(pvt))
624 mode = EDAC_S8ECD8ED;
625 else
626 mode = EDAC_S4ECD4ED;
627 } else {
628 debugf0("ECC disabled\n");
629 mode = EDAC_NONE;
630 }
631
632 /* FIXME: need to handle the error codes */
633 debugf0("DOD Max limits: DIMMS: %d, %d-ranked, %d-banked "
634 "x%x x 0x%x\n",
635 numdimms(pvt->info.max_dod),
636 numrank(pvt->info.max_dod >> 2),
637 numbank(pvt->info.max_dod >> 4),
638 numrow(pvt->info.max_dod >> 6),
639 numcol(pvt->info.max_dod >> 9));
640
641 for (i = 0; i < NUM_CHANS; i++) {
642 u32 data, dimm_dod[3], value[8];
643
644 if (!pvt->pci_ch[i][0])
645 continue;
646
647 if (!CH_ACTIVE(pvt, i)) {
648 debugf0("Channel %i is not active\n", i);
649 continue;
650 }
651 if (CH_DISABLED(pvt, i)) {
652 debugf0("Channel %i is disabled\n", i);
653 continue;
654 }
655
656 /* Devices 4-6 function 0 */
657 pci_read_config_dword(pvt->pci_ch[i][0],
658 MC_CHANNEL_DIMM_INIT_PARAMS, &data);
659
660 pvt->channel[i].ranks = (data & QUAD_RANK_PRESENT) ?
661 4 : 2;
662
663 if (data & REGISTERED_DIMM)
664 mtype = MEM_RDDR3;
665 else
666 mtype = MEM_DDR3;
667 #if 0
668 if (data & THREE_DIMMS_PRESENT)
669 pvt->channel[i].dimms = 3;
670 else if (data & SINGLE_QUAD_RANK_PRESENT)
671 pvt->channel[i].dimms = 1;
672 else
673 pvt->channel[i].dimms = 2;
674 #endif
675
676 /* Devices 4-6 function 1 */
677 pci_read_config_dword(pvt->pci_ch[i][1],
678 MC_DOD_CH_DIMM0, &dimm_dod[0]);
679 pci_read_config_dword(pvt->pci_ch[i][1],
680 MC_DOD_CH_DIMM1, &dimm_dod[1]);
681 pci_read_config_dword(pvt->pci_ch[i][1],
682 MC_DOD_CH_DIMM2, &dimm_dod[2]);
683
684 debugf0("Ch%d phy rd%d, wr%d (0x%08x): "
685 "%d ranks, %cDIMMs\n",
686 i,
687 RDLCH(pvt->info.ch_map, i), WRLCH(pvt->info.ch_map, i),
688 data,
689 pvt->channel[i].ranks,
690 (data & REGISTERED_DIMM) ? 'R' : 'U');
691
692 for (j = 0; j < 3; j++) {
693 u32 banks, ranks, rows, cols;
694 u32 size, npages;
695
696 if (!DIMM_PRESENT(dimm_dod[j]))
697 continue;
698
699 banks = numbank(MC_DOD_NUMBANK(dimm_dod[j]));
700 ranks = numrank(MC_DOD_NUMRANK(dimm_dod[j]));
701 rows = numrow(MC_DOD_NUMROW(dimm_dod[j]));
702 cols = numcol(MC_DOD_NUMCOL(dimm_dod[j]));
703
704 /* DDR3 has 8 I/O banks */
705 size = (rows * cols * banks * ranks) >> (20 - 3);
706
707 pvt->channel[i].dimms++;
708
709 debugf0("\tdimm %d %d Mb offset: %x, "
710 "bank: %d, rank: %d, row: %#x, col: %#x\n",
711 j, size,
712 RANKOFFSET(dimm_dod[j]),
713 banks, ranks, rows, cols);
714
715 npages = MiB_TO_PAGES(size);
716
717 csr = &mci->csrows[csrow];
718 csr->nr_pages = npages;
719
720 csr->csrow_idx = csrow;
721 csr->nr_channels = 1;
722
723 csr->channels[0].chan_idx = i;
724 csr->channels[0].ce_count = 0;
725
726 pvt->csrow_map[i][j] = csrow;
727
728 dimm = csr->channels[0].dimm;
729 switch (banks) {
730 case 4:
731 dimm->dtype = DEV_X4;
732 break;
733 case 8:
734 dimm->dtype = DEV_X8;
735 break;
736 case 16:
737 dimm->dtype = DEV_X16;
738 break;
739 default:
740 dimm->dtype = DEV_UNKNOWN;
741 }
742
743 snprintf(dimm->label, sizeof(dimm->label),
744 "CPU#%uChannel#%u_DIMM#%u",
745 pvt->i7core_dev->socket, i, j);
746 dimm->grain = 8;
747 dimm->edac_mode = mode;
748 dimm->mtype = mtype;
749 }
750
751 pci_read_config_dword(pdev, MC_SAG_CH_0, &value[0]);
752 pci_read_config_dword(pdev, MC_SAG_CH_1, &value[1]);
753 pci_read_config_dword(pdev, MC_SAG_CH_2, &value[2]);
754 pci_read_config_dword(pdev, MC_SAG_CH_3, &value[3]);
755 pci_read_config_dword(pdev, MC_SAG_CH_4, &value[4]);
756 pci_read_config_dword(pdev, MC_SAG_CH_5, &value[5]);
757 pci_read_config_dword(pdev, MC_SAG_CH_6, &value[6]);
758 pci_read_config_dword(pdev, MC_SAG_CH_7, &value[7]);
759 debugf1("\t[%i] DIVBY3\tREMOVED\tOFFSET\n", i);
760 for (j = 0; j < 8; j++)
761 debugf1("\t\t%#x\t%#x\t%#x\n",
762 (value[j] >> 27) & 0x1,
763 (value[j] >> 24) & 0x7,
764 (value[j] & ((1 << 24) - 1)));
765 }
766
767 return 0;
768 }
769
770 /****************************************************************************
771 Error insertion routines
772 ****************************************************************************/
773
774 /* The i7core has independent error injection features per channel.
775 However, to have a simpler code, we don't allow enabling error injection
776 on more than one channel.
777 Also, since a change at an inject parameter will be applied only at enable,
778 we're disabling error injection on all write calls to the sysfs nodes that
779 controls the error code injection.
780 */
781 static int disable_inject(const struct mem_ctl_info *mci)
782 {
783 struct i7core_pvt *pvt = mci->pvt_info;
784
785 pvt->inject.enable = 0;
786
787 if (!pvt->pci_ch[pvt->inject.channel][0])
788 return -ENODEV;
789
790 pci_write_config_dword(pvt->pci_ch[pvt->inject.channel][0],
791 MC_CHANNEL_ERROR_INJECT, 0);
792
793 return 0;
794 }
795
796 /*
797 * i7core inject inject.section
798 *
799 * accept and store error injection inject.section value
800 * bit 0 - refers to the lower 32-byte half cacheline
801 * bit 1 - refers to the upper 32-byte half cacheline
802 */
803 static ssize_t i7core_inject_section_store(struct mem_ctl_info *mci,
804 const char *data, size_t count)
805 {
806 struct i7core_pvt *pvt = mci->pvt_info;
807 unsigned long value;
808 int rc;
809
810 if (pvt->inject.enable)
811 disable_inject(mci);
812
813 rc = strict_strtoul(data, 10, &value);
814 if ((rc < 0) || (value > 3))
815 return -EIO;
816
817 pvt->inject.section = (u32) value;
818 return count;
819 }
820
821 static ssize_t i7core_inject_section_show(struct mem_ctl_info *mci,
822 char *data)
823 {
824 struct i7core_pvt *pvt = mci->pvt_info;
825 return sprintf(data, "0x%08x\n", pvt->inject.section);
826 }
827
828 /*
829 * i7core inject.type
830 *
831 * accept and store error injection inject.section value
832 * bit 0 - repeat enable - Enable error repetition
833 * bit 1 - inject ECC error
834 * bit 2 - inject parity error
835 */
836 static ssize_t i7core_inject_type_store(struct mem_ctl_info *mci,
837 const char *data, size_t count)
838 {
839 struct i7core_pvt *pvt = mci->pvt_info;
840 unsigned long value;
841 int rc;
842
843 if (pvt->inject.enable)
844 disable_inject(mci);
845
846 rc = strict_strtoul(data, 10, &value);
847 if ((rc < 0) || (value > 7))
848 return -EIO;
849
850 pvt->inject.type = (u32) value;
851 return count;
852 }
853
854 static ssize_t i7core_inject_type_show(struct mem_ctl_info *mci,
855 char *data)
856 {
857 struct i7core_pvt *pvt = mci->pvt_info;
858 return sprintf(data, "0x%08x\n", pvt->inject.type);
859 }
860
861 /*
862 * i7core_inject_inject.eccmask_store
863 *
864 * The type of error (UE/CE) will depend on the inject.eccmask value:
865 * Any bits set to a 1 will flip the corresponding ECC bit
866 * Correctable errors can be injected by flipping 1 bit or the bits within
867 * a symbol pair (2 consecutive aligned 8-bit pairs - i.e. 7:0 and 15:8 or
868 * 23:16 and 31:24). Flipping bits in two symbol pairs will cause an
869 * uncorrectable error to be injected.
870 */
871 static ssize_t i7core_inject_eccmask_store(struct mem_ctl_info *mci,
872 const char *data, size_t count)
873 {
874 struct i7core_pvt *pvt = mci->pvt_info;
875 unsigned long value;
876 int rc;
877
878 if (pvt->inject.enable)
879 disable_inject(mci);
880
881 rc = strict_strtoul(data, 10, &value);
882 if (rc < 0)
883 return -EIO;
884
885 pvt->inject.eccmask = (u32) value;
886 return count;
887 }
888
889 static ssize_t i7core_inject_eccmask_show(struct mem_ctl_info *mci,
890 char *data)
891 {
892 struct i7core_pvt *pvt = mci->pvt_info;
893 return sprintf(data, "0x%08x\n", pvt->inject.eccmask);
894 }
895
896 /*
897 * i7core_addrmatch
898 *
899 * The type of error (UE/CE) will depend on the inject.eccmask value:
900 * Any bits set to a 1 will flip the corresponding ECC bit
901 * Correctable errors can be injected by flipping 1 bit or the bits within
902 * a symbol pair (2 consecutive aligned 8-bit pairs - i.e. 7:0 and 15:8 or
903 * 23:16 and 31:24). Flipping bits in two symbol pairs will cause an
904 * uncorrectable error to be injected.
905 */
906
907 #define DECLARE_ADDR_MATCH(param, limit) \
908 static ssize_t i7core_inject_store_##param( \
909 struct mem_ctl_info *mci, \
910 const char *data, size_t count) \
911 { \
912 struct i7core_pvt *pvt; \
913 long value; \
914 int rc; \
915 \
916 debugf1("%s()\n", __func__); \
917 pvt = mci->pvt_info; \
918 \
919 if (pvt->inject.enable) \
920 disable_inject(mci); \
921 \
922 if (!strcasecmp(data, "any") || !strcasecmp(data, "any\n"))\
923 value = -1; \
924 else { \
925 rc = strict_strtoul(data, 10, &value); \
926 if ((rc < 0) || (value >= limit)) \
927 return -EIO; \
928 } \
929 \
930 pvt->inject.param = value; \
931 \
932 return count; \
933 } \
934 \
935 static ssize_t i7core_inject_show_##param( \
936 struct mem_ctl_info *mci, \
937 char *data) \
938 { \
939 struct i7core_pvt *pvt; \
940 \
941 pvt = mci->pvt_info; \
942 debugf1("%s() pvt=%p\n", __func__, pvt); \
943 if (pvt->inject.param < 0) \
944 return sprintf(data, "any\n"); \
945 else \
946 return sprintf(data, "%d\n", pvt->inject.param);\
947 }
948
949 #define ATTR_ADDR_MATCH(param) \
950 { \
951 .attr = { \
952 .name = #param, \
953 .mode = (S_IRUGO | S_IWUSR) \
954 }, \
955 .show = i7core_inject_show_##param, \
956 .store = i7core_inject_store_##param, \
957 }
958
959 DECLARE_ADDR_MATCH(channel, 3);
960 DECLARE_ADDR_MATCH(dimm, 3);
961 DECLARE_ADDR_MATCH(rank, 4);
962 DECLARE_ADDR_MATCH(bank, 32);
963 DECLARE_ADDR_MATCH(page, 0x10000);
964 DECLARE_ADDR_MATCH(col, 0x4000);
965
966 static int write_and_test(struct pci_dev *dev, const int where, const u32 val)
967 {
968 u32 read;
969 int count;
970
971 debugf0("setting pci %02x:%02x.%x reg=%02x value=%08x\n",
972 dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn),
973 where, val);
974
975 for (count = 0; count < 10; count++) {
976 if (count)
977 msleep(100);
978 pci_write_config_dword(dev, where, val);
979 pci_read_config_dword(dev, where, &read);
980
981 if (read == val)
982 return 0;
983 }
984
985 i7core_printk(KERN_ERR, "Error during set pci %02x:%02x.%x reg=%02x "
986 "write=%08x. Read=%08x\n",
987 dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn),
988 where, val, read);
989
990 return -EINVAL;
991 }
992
993 /*
994 * This routine prepares the Memory Controller for error injection.
995 * The error will be injected when some process tries to write to the
996 * memory that matches the given criteria.
997 * The criteria can be set in terms of a mask where dimm, rank, bank, page
998 * and col can be specified.
999 * A -1 value for any of the mask items will make the MCU to ignore
1000 * that matching criteria for error injection.
1001 *
1002 * It should be noticed that the error will only happen after a write operation
1003 * on a memory that matches the condition. if REPEAT_EN is not enabled at
1004 * inject mask, then it will produce just one error. Otherwise, it will repeat
1005 * until the injectmask would be cleaned.
1006 *
1007 * FIXME: This routine assumes that MAXNUMDIMMS value of MC_MAX_DOD
1008 * is reliable enough to check if the MC is using the
1009 * three channels. However, this is not clear at the datasheet.
1010 */
1011 static ssize_t i7core_inject_enable_store(struct mem_ctl_info *mci,
1012 const char *data, size_t count)
1013 {
1014 struct i7core_pvt *pvt = mci->pvt_info;
1015 u32 injectmask;
1016 u64 mask = 0;
1017 int rc;
1018 long enable;
1019
1020 if (!pvt->pci_ch[pvt->inject.channel][0])
1021 return 0;
1022
1023 rc = strict_strtoul(data, 10, &enable);
1024 if ((rc < 0))
1025 return 0;
1026
1027 if (enable) {
1028 pvt->inject.enable = 1;
1029 } else {
1030 disable_inject(mci);
1031 return count;
1032 }
1033
1034 /* Sets pvt->inject.dimm mask */
1035 if (pvt->inject.dimm < 0)
1036 mask |= 1LL << 41;
1037 else {
1038 if (pvt->channel[pvt->inject.channel].dimms > 2)
1039 mask |= (pvt->inject.dimm & 0x3LL) << 35;
1040 else
1041 mask |= (pvt->inject.dimm & 0x1LL) << 36;
1042 }
1043
1044 /* Sets pvt->inject.rank mask */
1045 if (pvt->inject.rank < 0)
1046 mask |= 1LL << 40;
1047 else {
1048 if (pvt->channel[pvt->inject.channel].dimms > 2)
1049 mask |= (pvt->inject.rank & 0x1LL) << 34;
1050 else
1051 mask |= (pvt->inject.rank & 0x3LL) << 34;
1052 }
1053
1054 /* Sets pvt->inject.bank mask */
1055 if (pvt->inject.bank < 0)
1056 mask |= 1LL << 39;
1057 else
1058 mask |= (pvt->inject.bank & 0x15LL) << 30;
1059
1060 /* Sets pvt->inject.page mask */
1061 if (pvt->inject.page < 0)
1062 mask |= 1LL << 38;
1063 else
1064 mask |= (pvt->inject.page & 0xffff) << 14;
1065
1066 /* Sets pvt->inject.column mask */
1067 if (pvt->inject.col < 0)
1068 mask |= 1LL << 37;
1069 else
1070 mask |= (pvt->inject.col & 0x3fff);
1071
1072 /*
1073 * bit 0: REPEAT_EN
1074 * bits 1-2: MASK_HALF_CACHELINE
1075 * bit 3: INJECT_ECC
1076 * bit 4: INJECT_ADDR_PARITY
1077 */
1078
1079 injectmask = (pvt->inject.type & 1) |
1080 (pvt->inject.section & 0x3) << 1 |
1081 (pvt->inject.type & 0x6) << (3 - 1);
1082
1083 /* Unlock writes to registers - this register is write only */
1084 pci_write_config_dword(pvt->pci_noncore,
1085 MC_CFG_CONTROL, 0x2);
1086
1087 write_and_test(pvt->pci_ch[pvt->inject.channel][0],
1088 MC_CHANNEL_ADDR_MATCH, mask);
1089 write_and_test(pvt->pci_ch[pvt->inject.channel][0],
1090 MC_CHANNEL_ADDR_MATCH + 4, mask >> 32L);
1091
1092 write_and_test(pvt->pci_ch[pvt->inject.channel][0],
1093 MC_CHANNEL_ERROR_MASK, pvt->inject.eccmask);
1094
1095 write_and_test(pvt->pci_ch[pvt->inject.channel][0],
1096 MC_CHANNEL_ERROR_INJECT, injectmask);
1097
1098 /*
1099 * This is something undocumented, based on my tests
1100 * Without writing 8 to this register, errors aren't injected. Not sure
1101 * why.
1102 */
1103 pci_write_config_dword(pvt->pci_noncore,
1104 MC_CFG_CONTROL, 8);
1105
1106 debugf0("Error inject addr match 0x%016llx, ecc 0x%08x,"
1107 " inject 0x%08x\n",
1108 mask, pvt->inject.eccmask, injectmask);
1109
1110
1111 return count;
1112 }
1113
1114 static ssize_t i7core_inject_enable_show(struct mem_ctl_info *mci,
1115 char *data)
1116 {
1117 struct i7core_pvt *pvt = mci->pvt_info;
1118 u32 injectmask;
1119
1120 if (!pvt->pci_ch[pvt->inject.channel][0])
1121 return 0;
1122
1123 pci_read_config_dword(pvt->pci_ch[pvt->inject.channel][0],
1124 MC_CHANNEL_ERROR_INJECT, &injectmask);
1125
1126 debugf0("Inject error read: 0x%018x\n", injectmask);
1127
1128 if (injectmask & 0x0c)
1129 pvt->inject.enable = 1;
1130
1131 return sprintf(data, "%d\n", pvt->inject.enable);
1132 }
1133
1134 #define DECLARE_COUNTER(param) \
1135 static ssize_t i7core_show_counter_##param( \
1136 struct mem_ctl_info *mci, \
1137 char *data) \
1138 { \
1139 struct i7core_pvt *pvt = mci->pvt_info; \
1140 \
1141 debugf1("%s() \n", __func__); \
1142 if (!pvt->ce_count_available || (pvt->is_registered)) \
1143 return sprintf(data, "data unavailable\n"); \
1144 return sprintf(data, "%lu\n", \
1145 pvt->udimm_ce_count[param]); \
1146 }
1147
1148 #define ATTR_COUNTER(param) \
1149 { \
1150 .attr = { \
1151 .name = __stringify(udimm##param), \
1152 .mode = (S_IRUGO | S_IWUSR) \
1153 }, \
1154 .show = i7core_show_counter_##param \
1155 }
1156
1157 DECLARE_COUNTER(0);
1158 DECLARE_COUNTER(1);
1159 DECLARE_COUNTER(2);
1160
1161 /*
1162 * Sysfs struct
1163 */
1164
1165 static const struct mcidev_sysfs_attribute i7core_addrmatch_attrs[] = {
1166 ATTR_ADDR_MATCH(channel),
1167 ATTR_ADDR_MATCH(dimm),
1168 ATTR_ADDR_MATCH(rank),
1169 ATTR_ADDR_MATCH(bank),
1170 ATTR_ADDR_MATCH(page),
1171 ATTR_ADDR_MATCH(col),
1172 { } /* End of list */
1173 };
1174
1175 static const struct mcidev_sysfs_group i7core_inject_addrmatch = {
1176 .name = "inject_addrmatch",
1177 .mcidev_attr = i7core_addrmatch_attrs,
1178 };
1179
1180 static const struct mcidev_sysfs_attribute i7core_udimm_counters_attrs[] = {
1181 ATTR_COUNTER(0),
1182 ATTR_COUNTER(1),
1183 ATTR_COUNTER(2),
1184 { .attr = { .name = NULL } }
1185 };
1186
1187 static const struct mcidev_sysfs_group i7core_udimm_counters = {
1188 .name = "all_channel_counts",
1189 .mcidev_attr = i7core_udimm_counters_attrs,
1190 };
1191
1192 static const struct mcidev_sysfs_attribute i7core_sysfs_rdimm_attrs[] = {
1193 {
1194 .attr = {
1195 .name = "inject_section",
1196 .mode = (S_IRUGO | S_IWUSR)
1197 },
1198 .show = i7core_inject_section_show,
1199 .store = i7core_inject_section_store,
1200 }, {
1201 .attr = {
1202 .name = "inject_type",
1203 .mode = (S_IRUGO | S_IWUSR)
1204 },
1205 .show = i7core_inject_type_show,
1206 .store = i7core_inject_type_store,
1207 }, {
1208 .attr = {
1209 .name = "inject_eccmask",
1210 .mode = (S_IRUGO | S_IWUSR)
1211 },
1212 .show = i7core_inject_eccmask_show,
1213 .store = i7core_inject_eccmask_store,
1214 }, {
1215 .grp = &i7core_inject_addrmatch,
1216 }, {
1217 .attr = {
1218 .name = "inject_enable",
1219 .mode = (S_IRUGO | S_IWUSR)
1220 },
1221 .show = i7core_inject_enable_show,
1222 .store = i7core_inject_enable_store,
1223 },
1224 { } /* End of list */
1225 };
1226
1227 static const struct mcidev_sysfs_attribute i7core_sysfs_udimm_attrs[] = {
1228 {
1229 .attr = {
1230 .name = "inject_section",
1231 .mode = (S_IRUGO | S_IWUSR)
1232 },
1233 .show = i7core_inject_section_show,
1234 .store = i7core_inject_section_store,
1235 }, {
1236 .attr = {
1237 .name = "inject_type",
1238 .mode = (S_IRUGO | S_IWUSR)
1239 },
1240 .show = i7core_inject_type_show,
1241 .store = i7core_inject_type_store,
1242 }, {
1243 .attr = {
1244 .name = "inject_eccmask",
1245 .mode = (S_IRUGO | S_IWUSR)
1246 },
1247 .show = i7core_inject_eccmask_show,
1248 .store = i7core_inject_eccmask_store,
1249 }, {
1250 .grp = &i7core_inject_addrmatch,
1251 }, {
1252 .attr = {
1253 .name = "inject_enable",
1254 .mode = (S_IRUGO | S_IWUSR)
1255 },
1256 .show = i7core_inject_enable_show,
1257 .store = i7core_inject_enable_store,
1258 }, {
1259 .grp = &i7core_udimm_counters,
1260 },
1261 { } /* End of list */
1262 };
1263
1264 /****************************************************************************
1265 Device initialization routines: put/get, init/exit
1266 ****************************************************************************/
1267
1268 /*
1269 * i7core_put_all_devices 'put' all the devices that we have
1270 * reserved via 'get'
1271 */
1272 static void i7core_put_devices(struct i7core_dev *i7core_dev)
1273 {
1274 int i;
1275
1276 debugf0(__FILE__ ": %s()\n", __func__);
1277 for (i = 0; i < i7core_dev->n_devs; i++) {
1278 struct pci_dev *pdev = i7core_dev->pdev[i];
1279 if (!pdev)
1280 continue;
1281 debugf0("Removing dev %02x:%02x.%d\n",
1282 pdev->bus->number,
1283 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1284 pci_dev_put(pdev);
1285 }
1286 }
1287
1288 static void i7core_put_all_devices(void)
1289 {
1290 struct i7core_dev *i7core_dev, *tmp;
1291
1292 list_for_each_entry_safe(i7core_dev, tmp, &i7core_edac_list, list) {
1293 i7core_put_devices(i7core_dev);
1294 free_i7core_dev(i7core_dev);
1295 }
1296 }
1297
1298 static void __init i7core_xeon_pci_fixup(const struct pci_id_table *table)
1299 {
1300 struct pci_dev *pdev = NULL;
1301 int i;
1302
1303 /*
1304 * On Xeon 55xx, the Intel Quick Path Arch Generic Non-core pci buses
1305 * aren't announced by acpi. So, we need to use a legacy scan probing
1306 * to detect them
1307 */
1308 while (table && table->descr) {
1309 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, table->descr[0].dev_id, NULL);
1310 if (unlikely(!pdev)) {
1311 for (i = 0; i < MAX_SOCKET_BUSES; i++)
1312 pcibios_scan_specific_bus(255-i);
1313 }
1314 pci_dev_put(pdev);
1315 table++;
1316 }
1317 }
1318
1319 static unsigned i7core_pci_lastbus(void)
1320 {
1321 int last_bus = 0, bus;
1322 struct pci_bus *b = NULL;
1323
1324 while ((b = pci_find_next_bus(b)) != NULL) {
1325 bus = b->number;
1326 debugf0("Found bus %d\n", bus);
1327 if (bus > last_bus)
1328 last_bus = bus;
1329 }
1330
1331 debugf0("Last bus %d\n", last_bus);
1332
1333 return last_bus;
1334 }
1335
1336 /*
1337 * i7core_get_all_devices Find and perform 'get' operation on the MCH's
1338 * device/functions we want to reference for this driver
1339 *
1340 * Need to 'get' device 16 func 1 and func 2
1341 */
1342 static int i7core_get_onedevice(struct pci_dev **prev,
1343 const struct pci_id_table *table,
1344 const unsigned devno,
1345 const unsigned last_bus)
1346 {
1347 struct i7core_dev *i7core_dev;
1348 const struct pci_id_descr *dev_descr = &table->descr[devno];
1349
1350 struct pci_dev *pdev = NULL;
1351 u8 bus = 0;
1352 u8 socket = 0;
1353
1354 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
1355 dev_descr->dev_id, *prev);
1356
1357 /*
1358 * On Xeon 55xx, the Intel Quckpath Arch Generic Non-core regs
1359 * is at addr 8086:2c40, instead of 8086:2c41. So, we need
1360 * to probe for the alternate address in case of failure
1361 */
1362 if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_I7_NONCORE && !pdev)
1363 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
1364 PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT, *prev);
1365
1366 if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE && !pdev)
1367 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
1368 PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT,
1369 *prev);
1370
1371 if (!pdev) {
1372 if (*prev) {
1373 *prev = pdev;
1374 return 0;
1375 }
1376
1377 if (dev_descr->optional)
1378 return 0;
1379
1380 if (devno == 0)
1381 return -ENODEV;
1382
1383 i7core_printk(KERN_INFO,
1384 "Device not found: dev %02x.%d PCI ID %04x:%04x\n",
1385 dev_descr->dev, dev_descr->func,
1386 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1387
1388 /* End of list, leave */
1389 return -ENODEV;
1390 }
1391 bus = pdev->bus->number;
1392
1393 socket = last_bus - bus;
1394
1395 i7core_dev = get_i7core_dev(socket);
1396 if (!i7core_dev) {
1397 i7core_dev = alloc_i7core_dev(socket, table);
1398 if (!i7core_dev) {
1399 pci_dev_put(pdev);
1400 return -ENOMEM;
1401 }
1402 }
1403
1404 if (i7core_dev->pdev[devno]) {
1405 i7core_printk(KERN_ERR,
1406 "Duplicated device for "
1407 "dev %02x:%02x.%d PCI ID %04x:%04x\n",
1408 bus, dev_descr->dev, dev_descr->func,
1409 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1410 pci_dev_put(pdev);
1411 return -ENODEV;
1412 }
1413
1414 i7core_dev->pdev[devno] = pdev;
1415
1416 /* Sanity check */
1417 if (unlikely(PCI_SLOT(pdev->devfn) != dev_descr->dev ||
1418 PCI_FUNC(pdev->devfn) != dev_descr->func)) {
1419 i7core_printk(KERN_ERR,
1420 "Device PCI ID %04x:%04x "
1421 "has dev %02x:%02x.%d instead of dev %02x:%02x.%d\n",
1422 PCI_VENDOR_ID_INTEL, dev_descr->dev_id,
1423 bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
1424 bus, dev_descr->dev, dev_descr->func);
1425 return -ENODEV;
1426 }
1427
1428 /* Be sure that the device is enabled */
1429 if (unlikely(pci_enable_device(pdev) < 0)) {
1430 i7core_printk(KERN_ERR,
1431 "Couldn't enable "
1432 "dev %02x:%02x.%d PCI ID %04x:%04x\n",
1433 bus, dev_descr->dev, dev_descr->func,
1434 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1435 return -ENODEV;
1436 }
1437
1438 debugf0("Detected socket %d dev %02x:%02x.%d PCI ID %04x:%04x\n",
1439 socket, bus, dev_descr->dev,
1440 dev_descr->func,
1441 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1442
1443 /*
1444 * As stated on drivers/pci/search.c, the reference count for
1445 * @from is always decremented if it is not %NULL. So, as we need
1446 * to get all devices up to null, we need to do a get for the device
1447 */
1448 pci_dev_get(pdev);
1449
1450 *prev = pdev;
1451
1452 return 0;
1453 }
1454
1455 static int i7core_get_all_devices(void)
1456 {
1457 int i, rc, last_bus;
1458 struct pci_dev *pdev = NULL;
1459 const struct pci_id_table *table = pci_dev_table;
1460
1461 last_bus = i7core_pci_lastbus();
1462
1463 while (table && table->descr) {
1464 for (i = 0; i < table->n_devs; i++) {
1465 pdev = NULL;
1466 do {
1467 rc = i7core_get_onedevice(&pdev, table, i,
1468 last_bus);
1469 if (rc < 0) {
1470 if (i == 0) {
1471 i = table->n_devs;
1472 break;
1473 }
1474 i7core_put_all_devices();
1475 return -ENODEV;
1476 }
1477 } while (pdev);
1478 }
1479 table++;
1480 }
1481
1482 return 0;
1483 }
1484
1485 static int mci_bind_devs(struct mem_ctl_info *mci,
1486 struct i7core_dev *i7core_dev)
1487 {
1488 struct i7core_pvt *pvt = mci->pvt_info;
1489 struct pci_dev *pdev;
1490 int i, func, slot;
1491 char *family;
1492
1493 pvt->is_registered = false;
1494 pvt->enable_scrub = false;
1495 for (i = 0; i < i7core_dev->n_devs; i++) {
1496 pdev = i7core_dev->pdev[i];
1497 if (!pdev)
1498 continue;
1499
1500 func = PCI_FUNC(pdev->devfn);
1501 slot = PCI_SLOT(pdev->devfn);
1502 if (slot == 3) {
1503 if (unlikely(func > MAX_MCR_FUNC))
1504 goto error;
1505 pvt->pci_mcr[func] = pdev;
1506 } else if (likely(slot >= 4 && slot < 4 + NUM_CHANS)) {
1507 if (unlikely(func > MAX_CHAN_FUNC))
1508 goto error;
1509 pvt->pci_ch[slot - 4][func] = pdev;
1510 } else if (!slot && !func) {
1511 pvt->pci_noncore = pdev;
1512
1513 /* Detect the processor family */
1514 switch (pdev->device) {
1515 case PCI_DEVICE_ID_INTEL_I7_NONCORE:
1516 family = "Xeon 35xx/ i7core";
1517 pvt->enable_scrub = false;
1518 break;
1519 case PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT:
1520 family = "i7-800/i5-700";
1521 pvt->enable_scrub = false;
1522 break;
1523 case PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE:
1524 family = "Xeon 34xx";
1525 pvt->enable_scrub = false;
1526 break;
1527 case PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT:
1528 family = "Xeon 55xx";
1529 pvt->enable_scrub = true;
1530 break;
1531 case PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_REV2:
1532 family = "Xeon 56xx / i7-900";
1533 pvt->enable_scrub = true;
1534 break;
1535 default:
1536 family = "unknown";
1537 pvt->enable_scrub = false;
1538 }
1539 debugf0("Detected a processor type %s\n", family);
1540 } else
1541 goto error;
1542
1543 debugf0("Associated fn %d.%d, dev = %p, socket %d\n",
1544 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
1545 pdev, i7core_dev->socket);
1546
1547 if (PCI_SLOT(pdev->devfn) == 3 &&
1548 PCI_FUNC(pdev->devfn) == 2)
1549 pvt->is_registered = true;
1550 }
1551
1552 return 0;
1553
1554 error:
1555 i7core_printk(KERN_ERR, "Device %d, function %d "
1556 "is out of the expected range\n",
1557 slot, func);
1558 return -EINVAL;
1559 }
1560
1561 /****************************************************************************
1562 Error check routines
1563 ****************************************************************************/
1564 static void i7core_rdimm_update_csrow(struct mem_ctl_info *mci,
1565 const int chan,
1566 const int dimm,
1567 const int add)
1568 {
1569 char *msg;
1570 struct i7core_pvt *pvt = mci->pvt_info;
1571 int row = pvt->csrow_map[chan][dimm], i;
1572
1573 for (i = 0; i < add; i++) {
1574 msg = kasprintf(GFP_KERNEL, "Corrected error "
1575 "(Socket=%d channel=%d dimm=%d)",
1576 pvt->i7core_dev->socket, chan, dimm);
1577
1578 edac_mc_handle_fbd_ce(mci, row, 0, msg);
1579 kfree (msg);
1580 }
1581 }
1582
1583 static void i7core_rdimm_update_ce_count(struct mem_ctl_info *mci,
1584 const int chan,
1585 const int new0,
1586 const int new1,
1587 const int new2)
1588 {
1589 struct i7core_pvt *pvt = mci->pvt_info;
1590 int add0 = 0, add1 = 0, add2 = 0;
1591 /* Updates CE counters if it is not the first time here */
1592 if (pvt->ce_count_available) {
1593 /* Updates CE counters */
1594
1595 add2 = new2 - pvt->rdimm_last_ce_count[chan][2];
1596 add1 = new1 - pvt->rdimm_last_ce_count[chan][1];
1597 add0 = new0 - pvt->rdimm_last_ce_count[chan][0];
1598
1599 if (add2 < 0)
1600 add2 += 0x7fff;
1601 pvt->rdimm_ce_count[chan][2] += add2;
1602
1603 if (add1 < 0)
1604 add1 += 0x7fff;
1605 pvt->rdimm_ce_count[chan][1] += add1;
1606
1607 if (add0 < 0)
1608 add0 += 0x7fff;
1609 pvt->rdimm_ce_count[chan][0] += add0;
1610 } else
1611 pvt->ce_count_available = 1;
1612
1613 /* Store the new values */
1614 pvt->rdimm_last_ce_count[chan][2] = new2;
1615 pvt->rdimm_last_ce_count[chan][1] = new1;
1616 pvt->rdimm_last_ce_count[chan][0] = new0;
1617
1618 /*updated the edac core */
1619 if (add0 != 0)
1620 i7core_rdimm_update_csrow(mci, chan, 0, add0);
1621 if (add1 != 0)
1622 i7core_rdimm_update_csrow(mci, chan, 1, add1);
1623 if (add2 != 0)
1624 i7core_rdimm_update_csrow(mci, chan, 2, add2);
1625
1626 }
1627
1628 static void i7core_rdimm_check_mc_ecc_err(struct mem_ctl_info *mci)
1629 {
1630 struct i7core_pvt *pvt = mci->pvt_info;
1631 u32 rcv[3][2];
1632 int i, new0, new1, new2;
1633
1634 /*Read DEV 3: FUN 2: MC_COR_ECC_CNT regs directly*/
1635 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_0,
1636 &rcv[0][0]);
1637 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_1,
1638 &rcv[0][1]);
1639 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_2,
1640 &rcv[1][0]);
1641 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_3,
1642 &rcv[1][1]);
1643 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_4,
1644 &rcv[2][0]);
1645 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_5,
1646 &rcv[2][1]);
1647 for (i = 0 ; i < 3; i++) {
1648 debugf3("MC_COR_ECC_CNT%d = 0x%x; MC_COR_ECC_CNT%d = 0x%x\n",
1649 (i * 2), rcv[i][0], (i * 2) + 1, rcv[i][1]);
1650 /*if the channel has 3 dimms*/
1651 if (pvt->channel[i].dimms > 2) {
1652 new0 = DIMM_BOT_COR_ERR(rcv[i][0]);
1653 new1 = DIMM_TOP_COR_ERR(rcv[i][0]);
1654 new2 = DIMM_BOT_COR_ERR(rcv[i][1]);
1655 } else {
1656 new0 = DIMM_TOP_COR_ERR(rcv[i][0]) +
1657 DIMM_BOT_COR_ERR(rcv[i][0]);
1658 new1 = DIMM_TOP_COR_ERR(rcv[i][1]) +
1659 DIMM_BOT_COR_ERR(rcv[i][1]);
1660 new2 = 0;
1661 }
1662
1663 i7core_rdimm_update_ce_count(mci, i, new0, new1, new2);
1664 }
1665 }
1666
1667 /* This function is based on the device 3 function 4 registers as described on:
1668 * Intel Xeon Processor 5500 Series Datasheet Volume 2
1669 * http://www.intel.com/Assets/PDF/datasheet/321322.pdf
1670 * also available at:
1671 * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
1672 */
1673 static void i7core_udimm_check_mc_ecc_err(struct mem_ctl_info *mci)
1674 {
1675 struct i7core_pvt *pvt = mci->pvt_info;
1676 u32 rcv1, rcv0;
1677 int new0, new1, new2;
1678
1679 if (!pvt->pci_mcr[4]) {
1680 debugf0("%s MCR registers not found\n", __func__);
1681 return;
1682 }
1683
1684 /* Corrected test errors */
1685 pci_read_config_dword(pvt->pci_mcr[4], MC_TEST_ERR_RCV1, &rcv1);
1686 pci_read_config_dword(pvt->pci_mcr[4], MC_TEST_ERR_RCV0, &rcv0);
1687
1688 /* Store the new values */
1689 new2 = DIMM2_COR_ERR(rcv1);
1690 new1 = DIMM1_COR_ERR(rcv0);
1691 new0 = DIMM0_COR_ERR(rcv0);
1692
1693 /* Updates CE counters if it is not the first time here */
1694 if (pvt->ce_count_available) {
1695 /* Updates CE counters */
1696 int add0, add1, add2;
1697
1698 add2 = new2 - pvt->udimm_last_ce_count[2];
1699 add1 = new1 - pvt->udimm_last_ce_count[1];
1700 add0 = new0 - pvt->udimm_last_ce_count[0];
1701
1702 if (add2 < 0)
1703 add2 += 0x7fff;
1704 pvt->udimm_ce_count[2] += add2;
1705
1706 if (add1 < 0)
1707 add1 += 0x7fff;
1708 pvt->udimm_ce_count[1] += add1;
1709
1710 if (add0 < 0)
1711 add0 += 0x7fff;
1712 pvt->udimm_ce_count[0] += add0;
1713
1714 if (add0 | add1 | add2)
1715 i7core_printk(KERN_ERR, "New Corrected error(s): "
1716 "dimm0: +%d, dimm1: +%d, dimm2 +%d\n",
1717 add0, add1, add2);
1718 } else
1719 pvt->ce_count_available = 1;
1720
1721 /* Store the new values */
1722 pvt->udimm_last_ce_count[2] = new2;
1723 pvt->udimm_last_ce_count[1] = new1;
1724 pvt->udimm_last_ce_count[0] = new0;
1725 }
1726
1727 /*
1728 * According with tables E-11 and E-12 of chapter E.3.3 of Intel 64 and IA-32
1729 * Architectures Software Developer’s Manual Volume 3B.
1730 * Nehalem are defined as family 0x06, model 0x1a
1731 *
1732 * The MCA registers used here are the following ones:
1733 * struct mce field MCA Register
1734 * m->status MSR_IA32_MC8_STATUS
1735 * m->addr MSR_IA32_MC8_ADDR
1736 * m->misc MSR_IA32_MC8_MISC
1737 * In the case of Nehalem, the error information is masked at .status and .misc
1738 * fields
1739 */
1740 static void i7core_mce_output_error(struct mem_ctl_info *mci,
1741 const struct mce *m)
1742 {
1743 struct i7core_pvt *pvt = mci->pvt_info;
1744 char *type, *optype, *err, *msg;
1745 unsigned long error = m->status & 0x1ff0000l;
1746 u32 optypenum = (m->status >> 4) & 0x07;
1747 u32 core_err_cnt = (m->status >> 38) & 0x7fff;
1748 u32 dimm = (m->misc >> 16) & 0x3;
1749 u32 channel = (m->misc >> 18) & 0x3;
1750 u32 syndrome = m->misc >> 32;
1751 u32 errnum = find_first_bit(&error, 32);
1752 int csrow;
1753
1754 if (m->mcgstatus & 1)
1755 type = "FATAL";
1756 else
1757 type = "NON_FATAL";
1758
1759 switch (optypenum) {
1760 case 0:
1761 optype = "generic undef request";
1762 break;
1763 case 1:
1764 optype = "read error";
1765 break;
1766 case 2:
1767 optype = "write error";
1768 break;
1769 case 3:
1770 optype = "addr/cmd error";
1771 break;
1772 case 4:
1773 optype = "scrubbing error";
1774 break;
1775 default:
1776 optype = "reserved";
1777 break;
1778 }
1779
1780 switch (errnum) {
1781 case 16:
1782 err = "read ECC error";
1783 break;
1784 case 17:
1785 err = "RAS ECC error";
1786 break;
1787 case 18:
1788 err = "write parity error";
1789 break;
1790 case 19:
1791 err = "redundacy loss";
1792 break;
1793 case 20:
1794 err = "reserved";
1795 break;
1796 case 21:
1797 err = "memory range error";
1798 break;
1799 case 22:
1800 err = "RTID out of range";
1801 break;
1802 case 23:
1803 err = "address parity error";
1804 break;
1805 case 24:
1806 err = "byte enable parity error";
1807 break;
1808 default:
1809 err = "unknown";
1810 }
1811
1812 /* FIXME: should convert addr into bank and rank information */
1813 msg = kasprintf(GFP_ATOMIC,
1814 "%s (addr = 0x%08llx, cpu=%d, Dimm=%d, Channel=%d, "
1815 "syndrome=0x%08x, count=%d, Err=%08llx:%08llx (%s: %s))\n",
1816 type, (long long) m->addr, m->cpu, dimm, channel,
1817 syndrome, core_err_cnt, (long long)m->status,
1818 (long long)m->misc, optype, err);
1819
1820 debugf0("%s", msg);
1821
1822 csrow = pvt->csrow_map[channel][dimm];
1823
1824 /* Call the helper to output message */
1825 if (m->mcgstatus & 1)
1826 edac_mc_handle_fbd_ue(mci, csrow, 0,
1827 0 /* FIXME: should be channel here */, msg);
1828 else if (!pvt->is_registered)
1829 edac_mc_handle_fbd_ce(mci, csrow,
1830 0 /* FIXME: should be channel here */, msg);
1831
1832 kfree(msg);
1833 }
1834
1835 /*
1836 * i7core_check_error Retrieve and process errors reported by the
1837 * hardware. Called by the Core module.
1838 */
1839 static void i7core_check_error(struct mem_ctl_info *mci)
1840 {
1841 struct i7core_pvt *pvt = mci->pvt_info;
1842 int i;
1843 unsigned count = 0;
1844 struct mce *m;
1845
1846 /*
1847 * MCE first step: Copy all mce errors into a temporary buffer
1848 * We use a double buffering here, to reduce the risk of
1849 * losing an error.
1850 */
1851 smp_rmb();
1852 count = (pvt->mce_out + MCE_LOG_LEN - pvt->mce_in)
1853 % MCE_LOG_LEN;
1854 if (!count)
1855 goto check_ce_error;
1856
1857 m = pvt->mce_outentry;
1858 if (pvt->mce_in + count > MCE_LOG_LEN) {
1859 unsigned l = MCE_LOG_LEN - pvt->mce_in;
1860
1861 memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * l);
1862 smp_wmb();
1863 pvt->mce_in = 0;
1864 count -= l;
1865 m += l;
1866 }
1867 memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * count);
1868 smp_wmb();
1869 pvt->mce_in += count;
1870
1871 smp_rmb();
1872 if (pvt->mce_overrun) {
1873 i7core_printk(KERN_ERR, "Lost %d memory errors\n",
1874 pvt->mce_overrun);
1875 smp_wmb();
1876 pvt->mce_overrun = 0;
1877 }
1878
1879 /*
1880 * MCE second step: parse errors and display
1881 */
1882 for (i = 0; i < count; i++)
1883 i7core_mce_output_error(mci, &pvt->mce_outentry[i]);
1884
1885 /*
1886 * Now, let's increment CE error counts
1887 */
1888 check_ce_error:
1889 if (!pvt->is_registered)
1890 i7core_udimm_check_mc_ecc_err(mci);
1891 else
1892 i7core_rdimm_check_mc_ecc_err(mci);
1893 }
1894
1895 /*
1896 * i7core_mce_check_error Replicates mcelog routine to get errors
1897 * This routine simply queues mcelog errors, and
1898 * return. The error itself should be handled later
1899 * by i7core_check_error.
1900 * WARNING: As this routine should be called at NMI time, extra care should
1901 * be taken to avoid deadlocks, and to be as fast as possible.
1902 */
1903 static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val,
1904 void *data)
1905 {
1906 struct mce *mce = (struct mce *)data;
1907 struct i7core_dev *i7_dev;
1908 struct mem_ctl_info *mci;
1909 struct i7core_pvt *pvt;
1910
1911 i7_dev = get_i7core_dev(mce->socketid);
1912 if (!i7_dev)
1913 return NOTIFY_BAD;
1914
1915 mci = i7_dev->mci;
1916 pvt = mci->pvt_info;
1917
1918 /*
1919 * Just let mcelog handle it if the error is
1920 * outside the memory controller
1921 */
1922 if (((mce->status & 0xffff) >> 7) != 1)
1923 return NOTIFY_DONE;
1924
1925 /* Bank 8 registers are the only ones that we know how to handle */
1926 if (mce->bank != 8)
1927 return NOTIFY_DONE;
1928
1929 #ifdef CONFIG_SMP
1930 /* Only handle if it is the right mc controller */
1931 if (mce->socketid != pvt->i7core_dev->socket)
1932 return NOTIFY_DONE;
1933 #endif
1934
1935 smp_rmb();
1936 if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) {
1937 smp_wmb();
1938 pvt->mce_overrun++;
1939 return NOTIFY_DONE;
1940 }
1941
1942 /* Copy memory error at the ringbuffer */
1943 memcpy(&pvt->mce_entry[pvt->mce_out], mce, sizeof(*mce));
1944 smp_wmb();
1945 pvt->mce_out = (pvt->mce_out + 1) % MCE_LOG_LEN;
1946
1947 /* Handle fatal errors immediately */
1948 if (mce->mcgstatus & 1)
1949 i7core_check_error(mci);
1950
1951 /* Advise mcelog that the errors were handled */
1952 return NOTIFY_STOP;
1953 }
1954
1955 static struct notifier_block i7_mce_dec = {
1956 .notifier_call = i7core_mce_check_error,
1957 };
1958
1959 struct memdev_dmi_entry {
1960 u8 type;
1961 u8 length;
1962 u16 handle;
1963 u16 phys_mem_array_handle;
1964 u16 mem_err_info_handle;
1965 u16 total_width;
1966 u16 data_width;
1967 u16 size;
1968 u8 form;
1969 u8 device_set;
1970 u8 device_locator;
1971 u8 bank_locator;
1972 u8 memory_type;
1973 u16 type_detail;
1974 u16 speed;
1975 u8 manufacturer;
1976 u8 serial_number;
1977 u8 asset_tag;
1978 u8 part_number;
1979 u8 attributes;
1980 u32 extended_size;
1981 u16 conf_mem_clk_speed;
1982 } __attribute__((__packed__));
1983
1984
1985 /*
1986 * Decode the DRAM Clock Frequency, be paranoid, make sure that all
1987 * memory devices show the same speed, and if they don't then consider
1988 * all speeds to be invalid.
1989 */
1990 static void decode_dclk(const struct dmi_header *dh, void *_dclk_freq)
1991 {
1992 int *dclk_freq = _dclk_freq;
1993 u16 dmi_mem_clk_speed;
1994
1995 if (*dclk_freq == -1)
1996 return;
1997
1998 if (dh->type == DMI_ENTRY_MEM_DEVICE) {
1999 struct memdev_dmi_entry *memdev_dmi_entry =
2000 (struct memdev_dmi_entry *)dh;
2001 unsigned long conf_mem_clk_speed_offset =
2002 (unsigned long)&memdev_dmi_entry->conf_mem_clk_speed -
2003 (unsigned long)&memdev_dmi_entry->type;
2004 unsigned long speed_offset =
2005 (unsigned long)&memdev_dmi_entry->speed -
2006 (unsigned long)&memdev_dmi_entry->type;
2007
2008 /* Check that a DIMM is present */
2009 if (memdev_dmi_entry->size == 0)
2010 return;
2011
2012 /*
2013 * Pick the configured speed if it's available, otherwise
2014 * pick the DIMM speed, or we don't have a speed.
2015 */
2016 if (memdev_dmi_entry->length > conf_mem_clk_speed_offset) {
2017 dmi_mem_clk_speed =
2018 memdev_dmi_entry->conf_mem_clk_speed;
2019 } else if (memdev_dmi_entry->length > speed_offset) {
2020 dmi_mem_clk_speed = memdev_dmi_entry->speed;
2021 } else {
2022 *dclk_freq = -1;
2023 return;
2024 }
2025
2026 if (*dclk_freq == 0) {
2027 /* First pass, speed was 0 */
2028 if (dmi_mem_clk_speed > 0) {
2029 /* Set speed if a valid speed is read */
2030 *dclk_freq = dmi_mem_clk_speed;
2031 } else {
2032 /* Otherwise we don't have a valid speed */
2033 *dclk_freq = -1;
2034 }
2035 } else if (*dclk_freq > 0 &&
2036 *dclk_freq != dmi_mem_clk_speed) {
2037 /*
2038 * If we have a speed, check that all DIMMS are the same
2039 * speed, otherwise set the speed as invalid.
2040 */
2041 *dclk_freq = -1;
2042 }
2043 }
2044 }
2045
2046 /*
2047 * The default DCLK frequency is used as a fallback if we
2048 * fail to find anything reliable in the DMI. The value
2049 * is taken straight from the datasheet.
2050 */
2051 #define DEFAULT_DCLK_FREQ 800
2052
2053 static int get_dclk_freq(void)
2054 {
2055 int dclk_freq = 0;
2056
2057 dmi_walk(decode_dclk, (void *)&dclk_freq);
2058
2059 if (dclk_freq < 1)
2060 return DEFAULT_DCLK_FREQ;
2061
2062 return dclk_freq;
2063 }
2064
2065 /*
2066 * set_sdram_scrub_rate This routine sets byte/sec bandwidth scrub rate
2067 * to hardware according to SCRUBINTERVAL formula
2068 * found in datasheet.
2069 */
2070 static int set_sdram_scrub_rate(struct mem_ctl_info *mci, u32 new_bw)
2071 {
2072 struct i7core_pvt *pvt = mci->pvt_info;
2073 struct pci_dev *pdev;
2074 u32 dw_scrub;
2075 u32 dw_ssr;
2076
2077 /* Get data from the MC register, function 2 */
2078 pdev = pvt->pci_mcr[2];
2079 if (!pdev)
2080 return -ENODEV;
2081
2082 pci_read_config_dword(pdev, MC_SCRUB_CONTROL, &dw_scrub);
2083
2084 if (new_bw == 0) {
2085 /* Prepare to disable petrol scrub */
2086 dw_scrub &= ~STARTSCRUB;
2087 /* Stop the patrol scrub engine */
2088 write_and_test(pdev, MC_SCRUB_CONTROL,
2089 dw_scrub & ~SCRUBINTERVAL_MASK);
2090
2091 /* Get current status of scrub rate and set bit to disable */
2092 pci_read_config_dword(pdev, MC_SSRCONTROL, &dw_ssr);
2093 dw_ssr &= ~SSR_MODE_MASK;
2094 dw_ssr |= SSR_MODE_DISABLE;
2095 } else {
2096 const int cache_line_size = 64;
2097 const u32 freq_dclk_mhz = pvt->dclk_freq;
2098 unsigned long long scrub_interval;
2099 /*
2100 * Translate the desired scrub rate to a register value and
2101 * program the corresponding register value.
2102 */
2103 scrub_interval = (unsigned long long)freq_dclk_mhz *
2104 cache_line_size * 1000000;
2105 do_div(scrub_interval, new_bw);
2106
2107 if (!scrub_interval || scrub_interval > SCRUBINTERVAL_MASK)
2108 return -EINVAL;
2109
2110 dw_scrub = SCRUBINTERVAL_MASK & scrub_interval;
2111
2112 /* Start the patrol scrub engine */
2113 pci_write_config_dword(pdev, MC_SCRUB_CONTROL,
2114 STARTSCRUB | dw_scrub);
2115
2116 /* Get current status of scrub rate and set bit to enable */
2117 pci_read_config_dword(pdev, MC_SSRCONTROL, &dw_ssr);
2118 dw_ssr &= ~SSR_MODE_MASK;
2119 dw_ssr |= SSR_MODE_ENABLE;
2120 }
2121 /* Disable or enable scrubbing */
2122 pci_write_config_dword(pdev, MC_SSRCONTROL, dw_ssr);
2123
2124 return new_bw;
2125 }
2126
2127 /*
2128 * get_sdram_scrub_rate This routine convert current scrub rate value
2129 * into byte/sec bandwidth accourding to
2130 * SCRUBINTERVAL formula found in datasheet.
2131 */
2132 static int get_sdram_scrub_rate(struct mem_ctl_info *mci)
2133 {
2134 struct i7core_pvt *pvt = mci->pvt_info;
2135 struct pci_dev *pdev;
2136 const u32 cache_line_size = 64;
2137 const u32 freq_dclk_mhz = pvt->dclk_freq;
2138 unsigned long long scrub_rate;
2139 u32 scrubval;
2140
2141 /* Get data from the MC register, function 2 */
2142 pdev = pvt->pci_mcr[2];
2143 if (!pdev)
2144 return -ENODEV;
2145
2146 /* Get current scrub control data */
2147 pci_read_config_dword(pdev, MC_SCRUB_CONTROL, &scrubval);
2148
2149 /* Mask highest 8-bits to 0 */
2150 scrubval &= SCRUBINTERVAL_MASK;
2151 if (!scrubval)
2152 return 0;
2153
2154 /* Calculate scrub rate value into byte/sec bandwidth */
2155 scrub_rate = (unsigned long long)freq_dclk_mhz *
2156 1000000 * cache_line_size;
2157 do_div(scrub_rate, scrubval);
2158 return (int)scrub_rate;
2159 }
2160
2161 static void enable_sdram_scrub_setting(struct mem_ctl_info *mci)
2162 {
2163 struct i7core_pvt *pvt = mci->pvt_info;
2164 u32 pci_lock;
2165
2166 /* Unlock writes to pci registers */
2167 pci_read_config_dword(pvt->pci_noncore, MC_CFG_CONTROL, &pci_lock);
2168 pci_lock &= ~0x3;
2169 pci_write_config_dword(pvt->pci_noncore, MC_CFG_CONTROL,
2170 pci_lock | MC_CFG_UNLOCK);
2171
2172 mci->set_sdram_scrub_rate = set_sdram_scrub_rate;
2173 mci->get_sdram_scrub_rate = get_sdram_scrub_rate;
2174 }
2175
2176 static void disable_sdram_scrub_setting(struct mem_ctl_info *mci)
2177 {
2178 struct i7core_pvt *pvt = mci->pvt_info;
2179 u32 pci_lock;
2180
2181 /* Lock writes to pci registers */
2182 pci_read_config_dword(pvt->pci_noncore, MC_CFG_CONTROL, &pci_lock);
2183 pci_lock &= ~0x3;
2184 pci_write_config_dword(pvt->pci_noncore, MC_CFG_CONTROL,
2185 pci_lock | MC_CFG_LOCK);
2186 }
2187
2188 static void i7core_pci_ctl_create(struct i7core_pvt *pvt)
2189 {
2190 pvt->i7core_pci = edac_pci_create_generic_ctl(
2191 &pvt->i7core_dev->pdev[0]->dev,
2192 EDAC_MOD_STR);
2193 if (unlikely(!pvt->i7core_pci))
2194 i7core_printk(KERN_WARNING,
2195 "Unable to setup PCI error report via EDAC\n");
2196 }
2197
2198 static void i7core_pci_ctl_release(struct i7core_pvt *pvt)
2199 {
2200 if (likely(pvt->i7core_pci))
2201 edac_pci_release_generic_ctl(pvt->i7core_pci);
2202 else
2203 i7core_printk(KERN_ERR,
2204 "Couldn't find mem_ctl_info for socket %d\n",
2205 pvt->i7core_dev->socket);
2206 pvt->i7core_pci = NULL;
2207 }
2208
2209 static void i7core_unregister_mci(struct i7core_dev *i7core_dev)
2210 {
2211 struct mem_ctl_info *mci = i7core_dev->mci;
2212 struct i7core_pvt *pvt;
2213
2214 if (unlikely(!mci || !mci->pvt_info)) {
2215 debugf0("MC: " __FILE__ ": %s(): dev = %p\n",
2216 __func__, &i7core_dev->pdev[0]->dev);
2217
2218 i7core_printk(KERN_ERR, "Couldn't find mci handler\n");
2219 return;
2220 }
2221
2222 pvt = mci->pvt_info;
2223
2224 debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n",
2225 __func__, mci, &i7core_dev->pdev[0]->dev);
2226
2227 /* Disable scrubrate setting */
2228 if (pvt->enable_scrub)
2229 disable_sdram_scrub_setting(mci);
2230
2231 mce_unregister_decode_chain(&i7_mce_dec);
2232
2233 /* Disable EDAC polling */
2234 i7core_pci_ctl_release(pvt);
2235
2236 /* Remove MC sysfs nodes */
2237 edac_mc_del_mc(mci->dev);
2238
2239 debugf1("%s: free mci struct\n", mci->ctl_name);
2240 kfree(mci->ctl_name);
2241 edac_mc_free(mci);
2242 i7core_dev->mci = NULL;
2243 }
2244
2245 static int i7core_register_mci(struct i7core_dev *i7core_dev)
2246 {
2247 struct mem_ctl_info *mci;
2248 struct i7core_pvt *pvt;
2249 int rc, channels, csrows;
2250
2251 /* Check the number of active and not disabled channels */
2252 rc = i7core_get_active_channels(i7core_dev->socket, &channels, &csrows);
2253 if (unlikely(rc < 0))
2254 return rc;
2255
2256 /* allocate a new MC control structure */
2257 mci = edac_mc_alloc(sizeof(*pvt), csrows, channels, i7core_dev->socket);
2258 if (unlikely(!mci))
2259 return -ENOMEM;
2260
2261 debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n",
2262 __func__, mci, &i7core_dev->pdev[0]->dev);
2263
2264 pvt = mci->pvt_info;
2265 memset(pvt, 0, sizeof(*pvt));
2266
2267 /* Associates i7core_dev and mci for future usage */
2268 pvt->i7core_dev = i7core_dev;
2269 i7core_dev->mci = mci;
2270
2271 /*
2272 * FIXME: how to handle RDDR3 at MCI level? It is possible to have
2273 * Mixed RDDR3/UDDR3 with Nehalem, provided that they are on different
2274 * memory channels
2275 */
2276 mci->mtype_cap = MEM_FLAG_DDR3;
2277 mci->edac_ctl_cap = EDAC_FLAG_NONE;
2278 mci->edac_cap = EDAC_FLAG_NONE;
2279 mci->mod_name = "i7core_edac.c";
2280 mci->mod_ver = I7CORE_REVISION;
2281 mci->ctl_name = kasprintf(GFP_KERNEL, "i7 core #%d",
2282 i7core_dev->socket);
2283 mci->dev_name = pci_name(i7core_dev->pdev[0]);
2284 mci->ctl_page_to_phys = NULL;
2285
2286 /* Store pci devices at mci for faster access */
2287 rc = mci_bind_devs(mci, i7core_dev);
2288 if (unlikely(rc < 0))
2289 goto fail0;
2290
2291 if (pvt->is_registered)
2292 mci->mc_driver_sysfs_attributes = i7core_sysfs_rdimm_attrs;
2293 else
2294 mci->mc_driver_sysfs_attributes = i7core_sysfs_udimm_attrs;
2295
2296 /* Get dimm basic config */
2297 get_dimm_config(mci);
2298 /* record ptr to the generic device */
2299 mci->dev = &i7core_dev->pdev[0]->dev;
2300 /* Set the function pointer to an actual operation function */
2301 mci->edac_check = i7core_check_error;
2302
2303 /* Enable scrubrate setting */
2304 if (pvt->enable_scrub)
2305 enable_sdram_scrub_setting(mci);
2306
2307 /* add this new MC control structure to EDAC's list of MCs */
2308 if (unlikely(edac_mc_add_mc(mci))) {
2309 debugf0("MC: " __FILE__
2310 ": %s(): failed edac_mc_add_mc()\n", __func__);
2311 /* FIXME: perhaps some code should go here that disables error
2312 * reporting if we just enabled it
2313 */
2314
2315 rc = -EINVAL;
2316 goto fail0;
2317 }
2318
2319 /* Default error mask is any memory */
2320 pvt->inject.channel = 0;
2321 pvt->inject.dimm = -1;
2322 pvt->inject.rank = -1;
2323 pvt->inject.bank = -1;
2324 pvt->inject.page = -1;
2325 pvt->inject.col = -1;
2326
2327 /* allocating generic PCI control info */
2328 i7core_pci_ctl_create(pvt);
2329
2330 /* DCLK for scrub rate setting */
2331 pvt->dclk_freq = get_dclk_freq();
2332
2333 mce_register_decode_chain(&i7_mce_dec);
2334
2335 return 0;
2336
2337 fail0:
2338 kfree(mci->ctl_name);
2339 edac_mc_free(mci);
2340 i7core_dev->mci = NULL;
2341 return rc;
2342 }
2343
2344 /*
2345 * i7core_probe Probe for ONE instance of device to see if it is
2346 * present.
2347 * return:
2348 * 0 for FOUND a device
2349 * < 0 for error code
2350 */
2351
2352 static int __devinit i7core_probe(struct pci_dev *pdev,
2353 const struct pci_device_id *id)
2354 {
2355 int rc, count = 0;
2356 struct i7core_dev *i7core_dev;
2357
2358 /* get the pci devices we want to reserve for our use */
2359 mutex_lock(&i7core_edac_lock);
2360
2361 /*
2362 * All memory controllers are allocated at the first pass.
2363 */
2364 if (unlikely(probed >= 1)) {
2365 mutex_unlock(&i7core_edac_lock);
2366 return -ENODEV;
2367 }
2368 probed++;
2369
2370 rc = i7core_get_all_devices();
2371 if (unlikely(rc < 0))
2372 goto fail0;
2373
2374 list_for_each_entry(i7core_dev, &i7core_edac_list, list) {
2375 count++;
2376 rc = i7core_register_mci(i7core_dev);
2377 if (unlikely(rc < 0))
2378 goto fail1;
2379 }
2380
2381 /*
2382 * Nehalem-EX uses a different memory controller. However, as the
2383 * memory controller is not visible on some Nehalem/Nehalem-EP, we
2384 * need to indirectly probe via a X58 PCI device. The same devices
2385 * are found on (some) Nehalem-EX. So, on those machines, the
2386 * probe routine needs to return -ENODEV, as the actual Memory
2387 * Controller registers won't be detected.
2388 */
2389 if (!count) {
2390 rc = -ENODEV;
2391 goto fail1;
2392 }
2393
2394 i7core_printk(KERN_INFO,
2395 "Driver loaded, %d memory controller(s) found.\n",
2396 count);
2397
2398 mutex_unlock(&i7core_edac_lock);
2399 return 0;
2400
2401 fail1:
2402 list_for_each_entry(i7core_dev, &i7core_edac_list, list)
2403 i7core_unregister_mci(i7core_dev);
2404
2405 i7core_put_all_devices();
2406 fail0:
2407 mutex_unlock(&i7core_edac_lock);
2408 return rc;
2409 }
2410
2411 /*
2412 * i7core_remove destructor for one instance of device
2413 *
2414 */
2415 static void __devexit i7core_remove(struct pci_dev *pdev)
2416 {
2417 struct i7core_dev *i7core_dev;
2418
2419 debugf0(__FILE__ ": %s()\n", __func__);
2420
2421 /*
2422 * we have a trouble here: pdev value for removal will be wrong, since
2423 * it will point to the X58 register used to detect that the machine
2424 * is a Nehalem or upper design. However, due to the way several PCI
2425 * devices are grouped together to provide MC functionality, we need
2426 * to use a different method for releasing the devices
2427 */
2428
2429 mutex_lock(&i7core_edac_lock);
2430
2431 if (unlikely(!probed)) {
2432 mutex_unlock(&i7core_edac_lock);
2433 return;
2434 }
2435
2436 list_for_each_entry(i7core_dev, &i7core_edac_list, list)
2437 i7core_unregister_mci(i7core_dev);
2438
2439 /* Release PCI resources */
2440 i7core_put_all_devices();
2441
2442 probed--;
2443
2444 mutex_unlock(&i7core_edac_lock);
2445 }
2446
2447 MODULE_DEVICE_TABLE(pci, i7core_pci_tbl);
2448
2449 /*
2450 * i7core_driver pci_driver structure for this module
2451 *
2452 */
2453 static struct pci_driver i7core_driver = {
2454 .name = "i7core_edac",
2455 .probe = i7core_probe,
2456 .remove = __devexit_p(i7core_remove),
2457 .id_table = i7core_pci_tbl,
2458 };
2459
2460 /*
2461 * i7core_init Module entry function
2462 * Try to initialize this module for its devices
2463 */
2464 static int __init i7core_init(void)
2465 {
2466 int pci_rc;
2467
2468 debugf2("MC: " __FILE__ ": %s()\n", __func__);
2469
2470 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
2471 opstate_init();
2472
2473 if (use_pci_fixup)
2474 i7core_xeon_pci_fixup(pci_dev_table);
2475
2476 pci_rc = pci_register_driver(&i7core_driver);
2477
2478 if (pci_rc >= 0)
2479 return 0;
2480
2481 i7core_printk(KERN_ERR, "Failed to register device with error %d.\n",
2482 pci_rc);
2483
2484 return pci_rc;
2485 }
2486
2487 /*
2488 * i7core_exit() Module exit function
2489 * Unregister the driver
2490 */
2491 static void __exit i7core_exit(void)
2492 {
2493 debugf2("MC: " __FILE__ ": %s()\n", __func__);
2494 pci_unregister_driver(&i7core_driver);
2495 }
2496
2497 module_init(i7core_init);
2498 module_exit(i7core_exit);
2499
2500 MODULE_LICENSE("GPL");
2501 MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
2502 MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
2503 MODULE_DESCRIPTION("MC Driver for Intel i7 Core memory controllers - "
2504 I7CORE_REVISION);
2505
2506 module_param(edac_op_state, int, 0444);
2507 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");