]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/edac/i7core_edac.c
i7300_edac: convert driver to use the new edac ABI
[mirror_ubuntu-bionic-kernel.git] / drivers / edac / i7core_edac.c
CommitLineData
52707f91
MCC
1/* Intel i7 core/Nehalem Memory Controller kernel module
2 *
e7bf068a 3 * This driver supports the memory controllers found on the Intel
52707f91
MCC
4 * processor families i7core, i7core 7xx/8xx, i5core, Xeon 35xx,
5 * Xeon 55xx and Xeon 56xx also known as Nehalem, Nehalem-EP, Lynnfield
6 * and Westmere-EP.
a0c36a1f
MCC
7 *
8 * This file may be distributed under the terms of the
9 * GNU General Public License version 2 only.
10 *
52707f91 11 * Copyright (c) 2009-2010 by:
a0c36a1f
MCC
12 * Mauro Carvalho Chehab <mchehab@redhat.com>
13 *
14 * Red Hat Inc. http://www.redhat.com
15 *
16 * Forked and adapted from the i5400_edac driver
17 *
18 * Based on the following public Intel datasheets:
19 * Intel Core i7 Processor Extreme Edition and Intel Core i7 Processor
20 * Datasheet, Volume 2:
21 * http://download.intel.com/design/processor/datashts/320835.pdf
22 * Intel Xeon Processor 5500 Series Datasheet Volume 2
23 * http://www.intel.com/Assets/PDF/datasheet/321322.pdf
24 * also available at:
25 * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
26 */
27
a0c36a1f
MCC
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/pci.h>
31#include <linux/pci_ids.h>
32#include <linux/slab.h>
3b918c12 33#include <linux/delay.h>
535e9c78 34#include <linux/dmi.h>
a0c36a1f
MCC
35#include <linux/edac.h>
36#include <linux/mmzone.h>
f4742949 37#include <linux/smp.h>
4140c542 38#include <asm/mce.h>
14d2c083 39#include <asm/processor.h>
4fad8098 40#include <asm/div64.h>
a0c36a1f
MCC
41
42#include "edac_core.h"
43
18c29002
MCC
44/* Static vars */
45static LIST_HEAD(i7core_edac_list);
46static DEFINE_MUTEX(i7core_edac_lock);
47static int probed;
48
54a08ab1
MCC
49static int use_pci_fixup;
50module_param(use_pci_fixup, int, 0444);
51MODULE_PARM_DESC(use_pci_fixup, "Enable PCI fixup to seek for hidden devices");
f4742949
MCC
52/*
53 * This is used for Nehalem-EP and Nehalem-EX devices, where the non-core
54 * registers start at bus 255, and are not reported by BIOS.
55 * We currently find devices with only 2 sockets. In order to support more QPI
56 * Quick Path Interconnect, just increment this number.
57 */
58#define MAX_SOCKET_BUSES 2
59
60
a0c36a1f
MCC
61/*
62 * Alter this version for the module when modifications are made
63 */
152ba394 64#define I7CORE_REVISION " Ver: 1.0.0"
a0c36a1f
MCC
65#define EDAC_MOD_STR "i7core_edac"
66
a0c36a1f
MCC
67/*
68 * Debug macros
69 */
70#define i7core_printk(level, fmt, arg...) \
71 edac_printk(level, "i7core", fmt, ##arg)
72
73#define i7core_mc_printk(mci, level, fmt, arg...) \
74 edac_mc_chipset_printk(mci, level, "i7core", fmt, ##arg)
75
76/*
77 * i7core Memory Controller Registers
78 */
79
e9bd2e73
MCC
80 /* OFFSETS for Device 0 Function 0 */
81
82#define MC_CFG_CONTROL 0x90
e8b6a127
SG
83 #define MC_CFG_UNLOCK 0x02
84 #define MC_CFG_LOCK 0x00
e9bd2e73 85
a0c36a1f
MCC
86 /* OFFSETS for Device 3 Function 0 */
87
88#define MC_CONTROL 0x48
89#define MC_STATUS 0x4c
90#define MC_MAX_DOD 0x64
91
442305b1
MCC
92/*
93 * OFFSETS for Device 3 Function 4, as inicated on Xeon 5500 datasheet:
94 * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
95 */
96
97#define MC_TEST_ERR_RCV1 0x60
98 #define DIMM2_COR_ERR(r) ((r) & 0x7fff)
99
100#define MC_TEST_ERR_RCV0 0x64
101 #define DIMM1_COR_ERR(r) (((r) >> 16) & 0x7fff)
102 #define DIMM0_COR_ERR(r) ((r) & 0x7fff)
103
b4e8f0b6 104/* OFFSETS for Device 3 Function 2, as inicated on Xeon 5500 datasheet */
e8b6a127
SG
105#define MC_SSRCONTROL 0x48
106 #define SSR_MODE_DISABLE 0x00
107 #define SSR_MODE_ENABLE 0x01
108 #define SSR_MODE_MASK 0x03
109
110#define MC_SCRUB_CONTROL 0x4c
111 #define STARTSCRUB (1 << 24)
535e9c78 112 #define SCRUBINTERVAL_MASK 0xffffff
e8b6a127 113
b4e8f0b6
MCC
114#define MC_COR_ECC_CNT_0 0x80
115#define MC_COR_ECC_CNT_1 0x84
116#define MC_COR_ECC_CNT_2 0x88
117#define MC_COR_ECC_CNT_3 0x8c
118#define MC_COR_ECC_CNT_4 0x90
119#define MC_COR_ECC_CNT_5 0x94
120
121#define DIMM_TOP_COR_ERR(r) (((r) >> 16) & 0x7fff)
122#define DIMM_BOT_COR_ERR(r) ((r) & 0x7fff)
123
124
a0c36a1f
MCC
125 /* OFFSETS for Devices 4,5 and 6 Function 0 */
126
0b2b7b7e
MCC
127#define MC_CHANNEL_DIMM_INIT_PARAMS 0x58
128 #define THREE_DIMMS_PRESENT (1 << 24)
129 #define SINGLE_QUAD_RANK_PRESENT (1 << 23)
130 #define QUAD_RANK_PRESENT (1 << 22)
131 #define REGISTERED_DIMM (1 << 15)
132
f122a892
MCC
133#define MC_CHANNEL_MAPPER 0x60
134 #define RDLCH(r, ch) ((((r) >> (3 + (ch * 6))) & 0x07) - 1)
135 #define WRLCH(r, ch) ((((r) >> (ch * 6)) & 0x07) - 1)
136
0b2b7b7e
MCC
137#define MC_CHANNEL_RANK_PRESENT 0x7c
138 #define RANK_PRESENT_MASK 0xffff
139
a0c36a1f 140#define MC_CHANNEL_ADDR_MATCH 0xf0
194a40fe
MCC
141#define MC_CHANNEL_ERROR_MASK 0xf8
142#define MC_CHANNEL_ERROR_INJECT 0xfc
143 #define INJECT_ADDR_PARITY 0x10
144 #define INJECT_ECC 0x08
145 #define MASK_CACHELINE 0x06
146 #define MASK_FULL_CACHELINE 0x06
147 #define MASK_MSB32_CACHELINE 0x04
148 #define MASK_LSB32_CACHELINE 0x02
149 #define NO_MASK_CACHELINE 0x00
150 #define REPEAT_EN 0x01
a0c36a1f 151
0b2b7b7e 152 /* OFFSETS for Devices 4,5 and 6 Function 1 */
b990538a 153
0b2b7b7e
MCC
154#define MC_DOD_CH_DIMM0 0x48
155#define MC_DOD_CH_DIMM1 0x4c
156#define MC_DOD_CH_DIMM2 0x50
157 #define RANKOFFSET_MASK ((1 << 12) | (1 << 11) | (1 << 10))
158 #define RANKOFFSET(x) ((x & RANKOFFSET_MASK) >> 10)
159 #define DIMM_PRESENT_MASK (1 << 9)
160 #define DIMM_PRESENT(x) (((x) & DIMM_PRESENT_MASK) >> 9)
854d3349
MCC
161 #define MC_DOD_NUMBANK_MASK ((1 << 8) | (1 << 7))
162 #define MC_DOD_NUMBANK(x) (((x) & MC_DOD_NUMBANK_MASK) >> 7)
163 #define MC_DOD_NUMRANK_MASK ((1 << 6) | (1 << 5))
164 #define MC_DOD_NUMRANK(x) (((x) & MC_DOD_NUMRANK_MASK) >> 5)
41fcb7fe 165 #define MC_DOD_NUMROW_MASK ((1 << 4) | (1 << 3) | (1 << 2))
5566cb7c 166 #define MC_DOD_NUMROW(x) (((x) & MC_DOD_NUMROW_MASK) >> 2)
854d3349
MCC
167 #define MC_DOD_NUMCOL_MASK 3
168 #define MC_DOD_NUMCOL(x) ((x) & MC_DOD_NUMCOL_MASK)
0b2b7b7e 169
f122a892
MCC
170#define MC_RANK_PRESENT 0x7c
171
0b2b7b7e
MCC
172#define MC_SAG_CH_0 0x80
173#define MC_SAG_CH_1 0x84
174#define MC_SAG_CH_2 0x88
175#define MC_SAG_CH_3 0x8c
176#define MC_SAG_CH_4 0x90
177#define MC_SAG_CH_5 0x94
178#define MC_SAG_CH_6 0x98
179#define MC_SAG_CH_7 0x9c
180
181#define MC_RIR_LIMIT_CH_0 0x40
182#define MC_RIR_LIMIT_CH_1 0x44
183#define MC_RIR_LIMIT_CH_2 0x48
184#define MC_RIR_LIMIT_CH_3 0x4C
185#define MC_RIR_LIMIT_CH_4 0x50
186#define MC_RIR_LIMIT_CH_5 0x54
187#define MC_RIR_LIMIT_CH_6 0x58
188#define MC_RIR_LIMIT_CH_7 0x5C
189#define MC_RIR_LIMIT_MASK ((1 << 10) - 1)
190
191#define MC_RIR_WAY_CH 0x80
192 #define MC_RIR_WAY_OFFSET_MASK (((1 << 14) - 1) & ~0x7)
193 #define MC_RIR_WAY_RANK_MASK 0x7
194
a0c36a1f
MCC
195/*
196 * i7core structs
197 */
198
199#define NUM_CHANS 3
442305b1
MCC
200#define MAX_DIMMS 3 /* Max DIMMS per channel */
201#define MAX_MCR_FUNC 4
202#define MAX_CHAN_FUNC 3
a0c36a1f
MCC
203
204struct i7core_info {
205 u32 mc_control;
206 u32 mc_status;
207 u32 max_dod;
f122a892 208 u32 ch_map;
a0c36a1f
MCC
209};
210
194a40fe
MCC
211
212struct i7core_inject {
213 int enable;
214
215 u32 section;
216 u32 type;
217 u32 eccmask;
218
219 /* Error address mask */
220 int channel, dimm, rank, bank, page, col;
221};
222
0b2b7b7e 223struct i7core_channel {
442305b1
MCC
224 u32 ranks;
225 u32 dimms;
0b2b7b7e
MCC
226};
227
8f331907 228struct pci_id_descr {
66607706
MCC
229 int dev;
230 int func;
231 int dev_id;
de06eeef 232 int optional;
8f331907
MCC
233};
234
bd9e19ca 235struct pci_id_table {
1288c18f
MCC
236 const struct pci_id_descr *descr;
237 int n_devs;
bd9e19ca
VM
238};
239
f4742949
MCC
240struct i7core_dev {
241 struct list_head list;
242 u8 socket;
243 struct pci_dev **pdev;
de06eeef 244 int n_devs;
f4742949
MCC
245 struct mem_ctl_info *mci;
246};
247
a0c36a1f 248struct i7core_pvt {
f4742949
MCC
249 struct pci_dev *pci_noncore;
250 struct pci_dev *pci_mcr[MAX_MCR_FUNC + 1];
251 struct pci_dev *pci_ch[NUM_CHANS][MAX_CHAN_FUNC + 1];
252
253 struct i7core_dev *i7core_dev;
67166af4 254
a0c36a1f 255 struct i7core_info info;
194a40fe 256 struct i7core_inject inject;
f4742949 257 struct i7core_channel channel[NUM_CHANS];
67166af4 258
f4742949
MCC
259 int ce_count_available;
260 int csrow_map[NUM_CHANS][MAX_DIMMS];
b4e8f0b6
MCC
261
262 /* ECC corrected errors counts per udimm */
f4742949
MCC
263 unsigned long udimm_ce_count[MAX_DIMMS];
264 int udimm_last_ce_count[MAX_DIMMS];
b4e8f0b6 265 /* ECC corrected errors counts per rdimm */
f4742949
MCC
266 unsigned long rdimm_ce_count[NUM_CHANS][MAX_DIMMS];
267 int rdimm_last_ce_count[NUM_CHANS][MAX_DIMMS];
442305b1 268
27100db0 269 bool is_registered, enable_scrub;
14d2c083 270
ca9c90ba 271 /* Fifo double buffers */
d5381642 272 struct mce mce_entry[MCE_LOG_LEN];
ca9c90ba
MCC
273 struct mce mce_outentry[MCE_LOG_LEN];
274
275 /* Fifo in/out counters */
276 unsigned mce_in, mce_out;
277
278 /* Count indicator to show errors not got */
279 unsigned mce_overrun;
939747bd 280
535e9c78
NC
281 /* DCLK Frequency used for computing scrub rate */
282 int dclk_freq;
283
939747bd
MCC
284 /* Struct to control EDAC polling */
285 struct edac_pci_ctl_info *i7core_pci;
a0c36a1f
MCC
286};
287
8f331907
MCC
288#define PCI_DESCR(device, function, device_id) \
289 .dev = (device), \
290 .func = (function), \
291 .dev_id = (device_id)
292
1288c18f 293static const struct pci_id_descr pci_dev_descr_i7core_nehalem[] = {
8f331907
MCC
294 /* Memory controller */
295 { PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_I7_MCR) },
296 { PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_I7_MC_TAD) },
224e871f 297 /* Exists only for RDIMM */
de06eeef 298 { PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_I7_MC_RAS), .optional = 1 },
8f331907
MCC
299 { PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_I7_MC_TEST) },
300
301 /* Channel 0 */
302 { PCI_DESCR(4, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH0_CTRL) },
303 { PCI_DESCR(4, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH0_ADDR) },
304 { PCI_DESCR(4, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH0_RANK) },
305 { PCI_DESCR(4, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH0_TC) },
306
307 /* Channel 1 */
308 { PCI_DESCR(5, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH1_CTRL) },
309 { PCI_DESCR(5, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH1_ADDR) },
310 { PCI_DESCR(5, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH1_RANK) },
311 { PCI_DESCR(5, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH1_TC) },
312
313 /* Channel 2 */
314 { PCI_DESCR(6, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH2_CTRL) },
315 { PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH2_ADDR) },
316 { PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH2_RANK) },
317 { PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH2_TC) },
224e871f
MCC
318
319 /* Generic Non-core registers */
320 /*
321 * This is the PCI device on i7core and on Xeon 35xx (8086:2c41)
322 * On Xeon 55xx, however, it has a different id (8086:2c40). So,
323 * the probing code needs to test for the other address in case of
324 * failure of this one
325 */
326 { PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_I7_NONCORE) },
327
a0c36a1f 328};
8f331907 329
1288c18f 330static const struct pci_id_descr pci_dev_descr_lynnfield[] = {
52a2e4fc
MCC
331 { PCI_DESCR( 3, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR) },
332 { PCI_DESCR( 3, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD) },
333 { PCI_DESCR( 3, 4, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST) },
334
335 { PCI_DESCR( 4, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL) },
336 { PCI_DESCR( 4, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR) },
337 { PCI_DESCR( 4, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK) },
338 { PCI_DESCR( 4, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC) },
339
508fa179
MCC
340 { PCI_DESCR( 5, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL) },
341 { PCI_DESCR( 5, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR) },
342 { PCI_DESCR( 5, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK) },
343 { PCI_DESCR( 5, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC) },
224e871f
MCC
344
345 /*
346 * This is the PCI device has an alternate address on some
347 * processors like Core i7 860
348 */
349 { PCI_DESCR( 0, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE) },
52a2e4fc
MCC
350};
351
1288c18f 352static const struct pci_id_descr pci_dev_descr_i7core_westmere[] = {
bd9e19ca
VM
353 /* Memory controller */
354 { PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR_REV2) },
355 { PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD_REV2) },
356 /* Exists only for RDIMM */
357 { PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_RAS_REV2), .optional = 1 },
358 { PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST_REV2) },
359
360 /* Channel 0 */
361 { PCI_DESCR(4, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL_REV2) },
362 { PCI_DESCR(4, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR_REV2) },
363 { PCI_DESCR(4, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK_REV2) },
364 { PCI_DESCR(4, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC_REV2) },
365
366 /* Channel 1 */
367 { PCI_DESCR(5, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL_REV2) },
368 { PCI_DESCR(5, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR_REV2) },
369 { PCI_DESCR(5, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK_REV2) },
370 { PCI_DESCR(5, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC_REV2) },
371
372 /* Channel 2 */
373 { PCI_DESCR(6, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_CTRL_REV2) },
374 { PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_ADDR_REV2) },
375 { PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_RANK_REV2) },
376 { PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_TC_REV2) },
224e871f
MCC
377
378 /* Generic Non-core registers */
379 { PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_REV2) },
380
bd9e19ca
VM
381};
382
1288c18f
MCC
383#define PCI_ID_TABLE_ENTRY(A) { .descr=A, .n_devs = ARRAY_SIZE(A) }
384static const struct pci_id_table pci_dev_table[] = {
bd9e19ca
VM
385 PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_nehalem),
386 PCI_ID_TABLE_ENTRY(pci_dev_descr_lynnfield),
387 PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_westmere),
3c52cc57 388 {0,} /* 0 terminated list. */
bd9e19ca
VM
389};
390
8f331907
MCC
391/*
392 * pci_device_id table for which devices we are looking for
8f331907 393 */
36c46f31 394static DEFINE_PCI_DEVICE_TABLE(i7core_pci_tbl) = {
d1fd4fb6 395 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
f05da2f7 396 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
8f331907
MCC
397 {0,} /* 0 terminated list. */
398};
399
a0c36a1f
MCC
400/****************************************************************************
401 Anciliary status routines
402 ****************************************************************************/
403
404 /* MC_CONTROL bits */
ef708b53
MCC
405#define CH_ACTIVE(pvt, ch) ((pvt)->info.mc_control & (1 << (8 + ch)))
406#define ECCx8(pvt) ((pvt)->info.mc_control & (1 << 1))
a0c36a1f
MCC
407
408 /* MC_STATUS bits */
61053fde 409#define ECC_ENABLED(pvt) ((pvt)->info.mc_status & (1 << 4))
ef708b53 410#define CH_DISABLED(pvt, ch) ((pvt)->info.mc_status & (1 << ch))
a0c36a1f
MCC
411
412 /* MC_MAX_DOD read functions */
854d3349 413static inline int numdimms(u32 dimms)
a0c36a1f 414{
854d3349 415 return (dimms & 0x3) + 1;
a0c36a1f
MCC
416}
417
854d3349 418static inline int numrank(u32 rank)
a0c36a1f
MCC
419{
420 static int ranks[4] = { 1, 2, 4, -EINVAL };
421
854d3349 422 return ranks[rank & 0x3];
a0c36a1f
MCC
423}
424
854d3349 425static inline int numbank(u32 bank)
a0c36a1f
MCC
426{
427 static int banks[4] = { 4, 8, 16, -EINVAL };
428
854d3349 429 return banks[bank & 0x3];
a0c36a1f
MCC
430}
431
854d3349 432static inline int numrow(u32 row)
a0c36a1f
MCC
433{
434 static int rows[8] = {
435 1 << 12, 1 << 13, 1 << 14, 1 << 15,
436 1 << 16, -EINVAL, -EINVAL, -EINVAL,
437 };
438
854d3349 439 return rows[row & 0x7];
a0c36a1f
MCC
440}
441
854d3349 442static inline int numcol(u32 col)
a0c36a1f
MCC
443{
444 static int cols[8] = {
445 1 << 10, 1 << 11, 1 << 12, -EINVAL,
446 };
854d3349 447 return cols[col & 0x3];
a0c36a1f
MCC
448}
449
f4742949 450static struct i7core_dev *get_i7core_dev(u8 socket)
66607706
MCC
451{
452 struct i7core_dev *i7core_dev;
453
454 list_for_each_entry(i7core_dev, &i7core_edac_list, list) {
455 if (i7core_dev->socket == socket)
456 return i7core_dev;
457 }
458
459 return NULL;
460}
461
848b2f7e
HS
462static struct i7core_dev *alloc_i7core_dev(u8 socket,
463 const struct pci_id_table *table)
464{
465 struct i7core_dev *i7core_dev;
466
467 i7core_dev = kzalloc(sizeof(*i7core_dev), GFP_KERNEL);
468 if (!i7core_dev)
469 return NULL;
470
471 i7core_dev->pdev = kzalloc(sizeof(*i7core_dev->pdev) * table->n_devs,
472 GFP_KERNEL);
473 if (!i7core_dev->pdev) {
474 kfree(i7core_dev);
475 return NULL;
476 }
477
478 i7core_dev->socket = socket;
479 i7core_dev->n_devs = table->n_devs;
480 list_add_tail(&i7core_dev->list, &i7core_edac_list);
481
482 return i7core_dev;
483}
484
2aa9be44
HS
485static void free_i7core_dev(struct i7core_dev *i7core_dev)
486{
487 list_del(&i7core_dev->list);
488 kfree(i7core_dev->pdev);
489 kfree(i7core_dev);
490}
491
a0c36a1f
MCC
492/****************************************************************************
493 Memory check routines
494 ****************************************************************************/
67166af4
MCC
495static struct pci_dev *get_pdev_slot_func(u8 socket, unsigned slot,
496 unsigned func)
ef708b53 497{
66607706 498 struct i7core_dev *i7core_dev = get_i7core_dev(socket);
ef708b53 499 int i;
ef708b53 500
66607706
MCC
501 if (!i7core_dev)
502 return NULL;
503
de06eeef 504 for (i = 0; i < i7core_dev->n_devs; i++) {
66607706 505 if (!i7core_dev->pdev[i])
ef708b53
MCC
506 continue;
507
66607706
MCC
508 if (PCI_SLOT(i7core_dev->pdev[i]->devfn) == slot &&
509 PCI_FUNC(i7core_dev->pdev[i]->devfn) == func) {
510 return i7core_dev->pdev[i];
ef708b53
MCC
511 }
512 }
513
eb94fc40
MCC
514 return NULL;
515}
516
ec6df24c
MCC
517/**
518 * i7core_get_active_channels() - gets the number of channels and csrows
519 * @socket: Quick Path Interconnect socket
520 * @channels: Number of channels that will be returned
521 * @csrows: Number of csrows found
522 *
523 * Since EDAC core needs to know in advance the number of available channels
524 * and csrows, in order to allocate memory for csrows/channels, it is needed
525 * to run two similar steps. At the first step, implemented on this function,
526 * it checks the number of csrows/channels present at one socket.
527 * this is used in order to properly allocate the size of mci components.
528 *
529 * It should be noticed that none of the current available datasheets explain
530 * or even mention how csrows are seen by the memory controller. So, we need
531 * to add a fake description for csrows.
532 * So, this driver is attributing one DIMM memory for one csrow.
533 */
1288c18f 534static int i7core_get_active_channels(const u8 socket, unsigned *channels,
67166af4 535 unsigned *csrows)
eb94fc40
MCC
536{
537 struct pci_dev *pdev = NULL;
538 int i, j;
539 u32 status, control;
540
541 *channels = 0;
542 *csrows = 0;
543
67166af4 544 pdev = get_pdev_slot_func(socket, 3, 0);
b7c76151 545 if (!pdev) {
67166af4
MCC
546 i7core_printk(KERN_ERR, "Couldn't find socket %d fn 3.0!!!\n",
547 socket);
ef708b53 548 return -ENODEV;
b7c76151 549 }
ef708b53
MCC
550
551 /* Device 3 function 0 reads */
552 pci_read_config_dword(pdev, MC_STATUS, &status);
553 pci_read_config_dword(pdev, MC_CONTROL, &control);
554
555 for (i = 0; i < NUM_CHANS; i++) {
eb94fc40 556 u32 dimm_dod[3];
ef708b53
MCC
557 /* Check if the channel is active */
558 if (!(control & (1 << (8 + i))))
559 continue;
560
561 /* Check if the channel is disabled */
41fcb7fe 562 if (status & (1 << i))
ef708b53 563 continue;
ef708b53 564
67166af4 565 pdev = get_pdev_slot_func(socket, i + 4, 1);
eb94fc40 566 if (!pdev) {
67166af4
MCC
567 i7core_printk(KERN_ERR, "Couldn't find socket %d "
568 "fn %d.%d!!!\n",
569 socket, i + 4, 1);
eb94fc40
MCC
570 return -ENODEV;
571 }
572 /* Devices 4-6 function 1 */
573 pci_read_config_dword(pdev,
574 MC_DOD_CH_DIMM0, &dimm_dod[0]);
575 pci_read_config_dword(pdev,
576 MC_DOD_CH_DIMM1, &dimm_dod[1]);
577 pci_read_config_dword(pdev,
578 MC_DOD_CH_DIMM2, &dimm_dod[2]);
579
ef708b53 580 (*channels)++;
eb94fc40
MCC
581
582 for (j = 0; j < 3; j++) {
583 if (!DIMM_PRESENT(dimm_dod[j]))
584 continue;
585 (*csrows)++;
586 }
ef708b53
MCC
587 }
588
c77720b9 589 debugf0("Number of active channels on socket %d: %d\n",
67166af4 590 socket, *channels);
1c6fed80 591
ef708b53
MCC
592 return 0;
593}
594
084a4fcc 595static int get_dimm_config(struct mem_ctl_info *mci)
a0c36a1f
MCC
596{
597 struct i7core_pvt *pvt = mci->pvt_info;
1c6fed80 598 struct csrow_info *csr;
854d3349 599 struct pci_dev *pdev;
ba6c5c62 600 int i, j;
2e5185f7 601 int csrow = 0;
1c6fed80 602 enum edac_type mode;
854d3349 603 enum mem_type mtype;
084a4fcc 604 struct dimm_info *dimm;
a0c36a1f 605
854d3349 606 /* Get data from the MC register, function 0 */
f4742949 607 pdev = pvt->pci_mcr[0];
7dd6953c 608 if (!pdev)
8f331907
MCC
609 return -ENODEV;
610
f122a892 611 /* Device 3 function 0 reads */
7dd6953c
MCC
612 pci_read_config_dword(pdev, MC_CONTROL, &pvt->info.mc_control);
613 pci_read_config_dword(pdev, MC_STATUS, &pvt->info.mc_status);
614 pci_read_config_dword(pdev, MC_MAX_DOD, &pvt->info.max_dod);
615 pci_read_config_dword(pdev, MC_CHANNEL_MAPPER, &pvt->info.ch_map);
f122a892 616
17cb7b0c 617 debugf0("QPI %d control=0x%08x status=0x%08x dod=0x%08x map=0x%08x\n",
4af91889 618 pvt->i7core_dev->socket, pvt->info.mc_control, pvt->info.mc_status,
f122a892 619 pvt->info.max_dod, pvt->info.ch_map);
a0c36a1f 620
1c6fed80 621 if (ECC_ENABLED(pvt)) {
41fcb7fe 622 debugf0("ECC enabled with x%d SDCC\n", ECCx8(pvt) ? 8 : 4);
1c6fed80
MCC
623 if (ECCx8(pvt))
624 mode = EDAC_S8ECD8ED;
625 else
626 mode = EDAC_S4ECD4ED;
627 } else {
a0c36a1f 628 debugf0("ECC disabled\n");
1c6fed80
MCC
629 mode = EDAC_NONE;
630 }
a0c36a1f
MCC
631
632 /* FIXME: need to handle the error codes */
17cb7b0c
MCC
633 debugf0("DOD Max limits: DIMMS: %d, %d-ranked, %d-banked "
634 "x%x x 0x%x\n",
854d3349
MCC
635 numdimms(pvt->info.max_dod),
636 numrank(pvt->info.max_dod >> 2),
276b824c 637 numbank(pvt->info.max_dod >> 4),
854d3349
MCC
638 numrow(pvt->info.max_dod >> 6),
639 numcol(pvt->info.max_dod >> 9));
a0c36a1f 640
0b2b7b7e 641 for (i = 0; i < NUM_CHANS; i++) {
854d3349 642 u32 data, dimm_dod[3], value[8];
0b2b7b7e 643
52a2e4fc
MCC
644 if (!pvt->pci_ch[i][0])
645 continue;
646
0b2b7b7e
MCC
647 if (!CH_ACTIVE(pvt, i)) {
648 debugf0("Channel %i is not active\n", i);
649 continue;
650 }
651 if (CH_DISABLED(pvt, i)) {
652 debugf0("Channel %i is disabled\n", i);
653 continue;
654 }
655
f122a892 656 /* Devices 4-6 function 0 */
f4742949 657 pci_read_config_dword(pvt->pci_ch[i][0],
0b2b7b7e
MCC
658 MC_CHANNEL_DIMM_INIT_PARAMS, &data);
659
f4742949 660 pvt->channel[i].ranks = (data & QUAD_RANK_PRESENT) ?
67166af4 661 4 : 2;
0b2b7b7e 662
854d3349
MCC
663 if (data & REGISTERED_DIMM)
664 mtype = MEM_RDDR3;
14d2c083 665 else
854d3349
MCC
666 mtype = MEM_DDR3;
667#if 0
0b2b7b7e
MCC
668 if (data & THREE_DIMMS_PRESENT)
669 pvt->channel[i].dimms = 3;
670 else if (data & SINGLE_QUAD_RANK_PRESENT)
671 pvt->channel[i].dimms = 1;
672 else
673 pvt->channel[i].dimms = 2;
854d3349
MCC
674#endif
675
676 /* Devices 4-6 function 1 */
f4742949 677 pci_read_config_dword(pvt->pci_ch[i][1],
854d3349 678 MC_DOD_CH_DIMM0, &dimm_dod[0]);
f4742949 679 pci_read_config_dword(pvt->pci_ch[i][1],
854d3349 680 MC_DOD_CH_DIMM1, &dimm_dod[1]);
f4742949 681 pci_read_config_dword(pvt->pci_ch[i][1],
854d3349 682 MC_DOD_CH_DIMM2, &dimm_dod[2]);
0b2b7b7e 683
1c6fed80 684 debugf0("Ch%d phy rd%d, wr%d (0x%08x): "
854d3349 685 "%d ranks, %cDIMMs\n",
1c6fed80
MCC
686 i,
687 RDLCH(pvt->info.ch_map, i), WRLCH(pvt->info.ch_map, i),
688 data,
f4742949 689 pvt->channel[i].ranks,
41fcb7fe 690 (data & REGISTERED_DIMM) ? 'R' : 'U');
854d3349
MCC
691
692 for (j = 0; j < 3; j++) {
693 u32 banks, ranks, rows, cols;
5566cb7c 694 u32 size, npages;
854d3349
MCC
695
696 if (!DIMM_PRESENT(dimm_dod[j]))
697 continue;
698
699 banks = numbank(MC_DOD_NUMBANK(dimm_dod[j]));
700 ranks = numrank(MC_DOD_NUMRANK(dimm_dod[j]));
701 rows = numrow(MC_DOD_NUMROW(dimm_dod[j]));
702 cols = numcol(MC_DOD_NUMCOL(dimm_dod[j]));
703
5566cb7c
MCC
704 /* DDR3 has 8 I/O banks */
705 size = (rows * cols * banks * ranks) >> (20 - 3);
706
f4742949 707 pvt->channel[i].dimms++;
854d3349 708
17cb7b0c
MCC
709 debugf0("\tdimm %d %d Mb offset: %x, "
710 "bank: %d, rank: %d, row: %#x, col: %#x\n",
711 j, size,
854d3349
MCC
712 RANKOFFSET(dimm_dod[j]),
713 banks, ranks, rows, cols);
714
e9144601 715 npages = MiB_TO_PAGES(size);
5566cb7c 716
2e5185f7 717 csr = &mci->csrows[csrow];
854d3349 718
2e5185f7 719 pvt->csrow_map[i][j] = csrow;
b4e8f0b6 720
084a4fcc 721 dimm = csr->channels[0].dimm;
a895bf8b
MCC
722 dimm->nr_pages = npages;
723
854d3349
MCC
724 switch (banks) {
725 case 4:
084a4fcc 726 dimm->dtype = DEV_X4;
854d3349
MCC
727 break;
728 case 8:
084a4fcc 729 dimm->dtype = DEV_X8;
854d3349
MCC
730 break;
731 case 16:
084a4fcc 732 dimm->dtype = DEV_X16;
854d3349
MCC
733 break;
734 default:
084a4fcc 735 dimm->dtype = DEV_UNKNOWN;
854d3349
MCC
736 }
737
084a4fcc
MCC
738 snprintf(dimm->label, sizeof(dimm->label),
739 "CPU#%uChannel#%u_DIMM#%u",
740 pvt->i7core_dev->socket, i, j);
741 dimm->grain = 8;
742 dimm->edac_mode = mode;
743 dimm->mtype = mtype;
a895bf8b 744 csrow++;
854d3349 745 }
1c6fed80 746
854d3349
MCC
747 pci_read_config_dword(pdev, MC_SAG_CH_0, &value[0]);
748 pci_read_config_dword(pdev, MC_SAG_CH_1, &value[1]);
749 pci_read_config_dword(pdev, MC_SAG_CH_2, &value[2]);
750 pci_read_config_dword(pdev, MC_SAG_CH_3, &value[3]);
751 pci_read_config_dword(pdev, MC_SAG_CH_4, &value[4]);
752 pci_read_config_dword(pdev, MC_SAG_CH_5, &value[5]);
753 pci_read_config_dword(pdev, MC_SAG_CH_6, &value[6]);
754 pci_read_config_dword(pdev, MC_SAG_CH_7, &value[7]);
17cb7b0c 755 debugf1("\t[%i] DIVBY3\tREMOVED\tOFFSET\n", i);
854d3349 756 for (j = 0; j < 8; j++)
17cb7b0c 757 debugf1("\t\t%#x\t%#x\t%#x\n",
854d3349
MCC
758 (value[j] >> 27) & 0x1,
759 (value[j] >> 24) & 0x7,
80b8ce89 760 (value[j] & ((1 << 24) - 1)));
0b2b7b7e
MCC
761 }
762
a0c36a1f
MCC
763 return 0;
764}
765
194a40fe
MCC
766/****************************************************************************
767 Error insertion routines
768 ****************************************************************************/
769
770/* The i7core has independent error injection features per channel.
771 However, to have a simpler code, we don't allow enabling error injection
772 on more than one channel.
773 Also, since a change at an inject parameter will be applied only at enable,
774 we're disabling error injection on all write calls to the sysfs nodes that
775 controls the error code injection.
776 */
1288c18f 777static int disable_inject(const struct mem_ctl_info *mci)
194a40fe
MCC
778{
779 struct i7core_pvt *pvt = mci->pvt_info;
780
781 pvt->inject.enable = 0;
782
f4742949 783 if (!pvt->pci_ch[pvt->inject.channel][0])
8f331907
MCC
784 return -ENODEV;
785
f4742949 786 pci_write_config_dword(pvt->pci_ch[pvt->inject.channel][0],
4157d9f5 787 MC_CHANNEL_ERROR_INJECT, 0);
8f331907
MCC
788
789 return 0;
194a40fe
MCC
790}
791
792/*
793 * i7core inject inject.section
794 *
795 * accept and store error injection inject.section value
796 * bit 0 - refers to the lower 32-byte half cacheline
797 * bit 1 - refers to the upper 32-byte half cacheline
798 */
799static ssize_t i7core_inject_section_store(struct mem_ctl_info *mci,
800 const char *data, size_t count)
801{
802 struct i7core_pvt *pvt = mci->pvt_info;
803 unsigned long value;
804 int rc;
805
806 if (pvt->inject.enable)
41fcb7fe 807 disable_inject(mci);
194a40fe
MCC
808
809 rc = strict_strtoul(data, 10, &value);
810 if ((rc < 0) || (value > 3))
2068def5 811 return -EIO;
194a40fe
MCC
812
813 pvt->inject.section = (u32) value;
814 return count;
815}
816
817static ssize_t i7core_inject_section_show(struct mem_ctl_info *mci,
818 char *data)
819{
820 struct i7core_pvt *pvt = mci->pvt_info;
821 return sprintf(data, "0x%08x\n", pvt->inject.section);
822}
823
824/*
825 * i7core inject.type
826 *
827 * accept and store error injection inject.section value
828 * bit 0 - repeat enable - Enable error repetition
829 * bit 1 - inject ECC error
830 * bit 2 - inject parity error
831 */
832static ssize_t i7core_inject_type_store(struct mem_ctl_info *mci,
833 const char *data, size_t count)
834{
835 struct i7core_pvt *pvt = mci->pvt_info;
836 unsigned long value;
837 int rc;
838
839 if (pvt->inject.enable)
41fcb7fe 840 disable_inject(mci);
194a40fe
MCC
841
842 rc = strict_strtoul(data, 10, &value);
843 if ((rc < 0) || (value > 7))
2068def5 844 return -EIO;
194a40fe
MCC
845
846 pvt->inject.type = (u32) value;
847 return count;
848}
849
850static ssize_t i7core_inject_type_show(struct mem_ctl_info *mci,
851 char *data)
852{
853 struct i7core_pvt *pvt = mci->pvt_info;
854 return sprintf(data, "0x%08x\n", pvt->inject.type);
855}
856
857/*
858 * i7core_inject_inject.eccmask_store
859 *
860 * The type of error (UE/CE) will depend on the inject.eccmask value:
861 * Any bits set to a 1 will flip the corresponding ECC bit
862 * Correctable errors can be injected by flipping 1 bit or the bits within
863 * a symbol pair (2 consecutive aligned 8-bit pairs - i.e. 7:0 and 15:8 or
864 * 23:16 and 31:24). Flipping bits in two symbol pairs will cause an
865 * uncorrectable error to be injected.
866 */
867static ssize_t i7core_inject_eccmask_store(struct mem_ctl_info *mci,
868 const char *data, size_t count)
869{
870 struct i7core_pvt *pvt = mci->pvt_info;
871 unsigned long value;
872 int rc;
873
874 if (pvt->inject.enable)
41fcb7fe 875 disable_inject(mci);
194a40fe
MCC
876
877 rc = strict_strtoul(data, 10, &value);
878 if (rc < 0)
2068def5 879 return -EIO;
194a40fe
MCC
880
881 pvt->inject.eccmask = (u32) value;
882 return count;
883}
884
885static ssize_t i7core_inject_eccmask_show(struct mem_ctl_info *mci,
886 char *data)
887{
888 struct i7core_pvt *pvt = mci->pvt_info;
889 return sprintf(data, "0x%08x\n", pvt->inject.eccmask);
890}
891
892/*
893 * i7core_addrmatch
894 *
895 * The type of error (UE/CE) will depend on the inject.eccmask value:
896 * Any bits set to a 1 will flip the corresponding ECC bit
897 * Correctable errors can be injected by flipping 1 bit or the bits within
898 * a symbol pair (2 consecutive aligned 8-bit pairs - i.e. 7:0 and 15:8 or
899 * 23:16 and 31:24). Flipping bits in two symbol pairs will cause an
900 * uncorrectable error to be injected.
901 */
194a40fe 902
a5538e53
MCC
903#define DECLARE_ADDR_MATCH(param, limit) \
904static ssize_t i7core_inject_store_##param( \
905 struct mem_ctl_info *mci, \
906 const char *data, size_t count) \
907{ \
cc301b3a 908 struct i7core_pvt *pvt; \
a5538e53
MCC
909 long value; \
910 int rc; \
911 \
cc301b3a
MCC
912 debugf1("%s()\n", __func__); \
913 pvt = mci->pvt_info; \
914 \
a5538e53
MCC
915 if (pvt->inject.enable) \
916 disable_inject(mci); \
917 \
4f87fad1 918 if (!strcasecmp(data, "any") || !strcasecmp(data, "any\n"))\
a5538e53
MCC
919 value = -1; \
920 else { \
921 rc = strict_strtoul(data, 10, &value); \
922 if ((rc < 0) || (value >= limit)) \
923 return -EIO; \
924 } \
925 \
926 pvt->inject.param = value; \
927 \
928 return count; \
929} \
930 \
931static ssize_t i7core_inject_show_##param( \
932 struct mem_ctl_info *mci, \
933 char *data) \
934{ \
cc301b3a
MCC
935 struct i7core_pvt *pvt; \
936 \
937 pvt = mci->pvt_info; \
938 debugf1("%s() pvt=%p\n", __func__, pvt); \
a5538e53
MCC
939 if (pvt->inject.param < 0) \
940 return sprintf(data, "any\n"); \
941 else \
942 return sprintf(data, "%d\n", pvt->inject.param);\
194a40fe
MCC
943}
944
a5538e53
MCC
945#define ATTR_ADDR_MATCH(param) \
946 { \
947 .attr = { \
948 .name = #param, \
949 .mode = (S_IRUGO | S_IWUSR) \
950 }, \
951 .show = i7core_inject_show_##param, \
952 .store = i7core_inject_store_##param, \
953 }
194a40fe 954
a5538e53
MCC
955DECLARE_ADDR_MATCH(channel, 3);
956DECLARE_ADDR_MATCH(dimm, 3);
957DECLARE_ADDR_MATCH(rank, 4);
958DECLARE_ADDR_MATCH(bank, 32);
959DECLARE_ADDR_MATCH(page, 0x10000);
960DECLARE_ADDR_MATCH(col, 0x4000);
194a40fe 961
1288c18f 962static int write_and_test(struct pci_dev *dev, const int where, const u32 val)
276b824c
MCC
963{
964 u32 read;
965 int count;
966
4157d9f5
MCC
967 debugf0("setting pci %02x:%02x.%x reg=%02x value=%08x\n",
968 dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn),
969 where, val);
970
276b824c
MCC
971 for (count = 0; count < 10; count++) {
972 if (count)
b990538a 973 msleep(100);
276b824c
MCC
974 pci_write_config_dword(dev, where, val);
975 pci_read_config_dword(dev, where, &read);
976
977 if (read == val)
978 return 0;
979 }
980
4157d9f5
MCC
981 i7core_printk(KERN_ERR, "Error during set pci %02x:%02x.%x reg=%02x "
982 "write=%08x. Read=%08x\n",
983 dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn),
984 where, val, read);
276b824c
MCC
985
986 return -EINVAL;
987}
988
194a40fe
MCC
989/*
990 * This routine prepares the Memory Controller for error injection.
991 * The error will be injected when some process tries to write to the
992 * memory that matches the given criteria.
993 * The criteria can be set in terms of a mask where dimm, rank, bank, page
994 * and col can be specified.
995 * A -1 value for any of the mask items will make the MCU to ignore
996 * that matching criteria for error injection.
997 *
998 * It should be noticed that the error will only happen after a write operation
999 * on a memory that matches the condition. if REPEAT_EN is not enabled at
1000 * inject mask, then it will produce just one error. Otherwise, it will repeat
1001 * until the injectmask would be cleaned.
1002 *
1003 * FIXME: This routine assumes that MAXNUMDIMMS value of MC_MAX_DOD
1004 * is reliable enough to check if the MC is using the
1005 * three channels. However, this is not clear at the datasheet.
1006 */
1007static ssize_t i7core_inject_enable_store(struct mem_ctl_info *mci,
1008 const char *data, size_t count)
1009{
1010 struct i7core_pvt *pvt = mci->pvt_info;
1011 u32 injectmask;
1012 u64 mask = 0;
1013 int rc;
1014 long enable;
1015
f4742949 1016 if (!pvt->pci_ch[pvt->inject.channel][0])
8f331907
MCC
1017 return 0;
1018
194a40fe
MCC
1019 rc = strict_strtoul(data, 10, &enable);
1020 if ((rc < 0))
1021 return 0;
1022
1023 if (enable) {
1024 pvt->inject.enable = 1;
1025 } else {
1026 disable_inject(mci);
1027 return count;
1028 }
1029
1030 /* Sets pvt->inject.dimm mask */
1031 if (pvt->inject.dimm < 0)
486dd09f 1032 mask |= 1LL << 41;
194a40fe 1033 else {
f4742949 1034 if (pvt->channel[pvt->inject.channel].dimms > 2)
486dd09f 1035 mask |= (pvt->inject.dimm & 0x3LL) << 35;
194a40fe 1036 else
486dd09f 1037 mask |= (pvt->inject.dimm & 0x1LL) << 36;
194a40fe
MCC
1038 }
1039
1040 /* Sets pvt->inject.rank mask */
1041 if (pvt->inject.rank < 0)
486dd09f 1042 mask |= 1LL << 40;
194a40fe 1043 else {
f4742949 1044 if (pvt->channel[pvt->inject.channel].dimms > 2)
486dd09f 1045 mask |= (pvt->inject.rank & 0x1LL) << 34;
194a40fe 1046 else
486dd09f 1047 mask |= (pvt->inject.rank & 0x3LL) << 34;
194a40fe
MCC
1048 }
1049
1050 /* Sets pvt->inject.bank mask */
1051 if (pvt->inject.bank < 0)
486dd09f 1052 mask |= 1LL << 39;
194a40fe 1053 else
486dd09f 1054 mask |= (pvt->inject.bank & 0x15LL) << 30;
194a40fe
MCC
1055
1056 /* Sets pvt->inject.page mask */
1057 if (pvt->inject.page < 0)
486dd09f 1058 mask |= 1LL << 38;
194a40fe 1059 else
486dd09f 1060 mask |= (pvt->inject.page & 0xffff) << 14;
194a40fe
MCC
1061
1062 /* Sets pvt->inject.column mask */
1063 if (pvt->inject.col < 0)
486dd09f 1064 mask |= 1LL << 37;
194a40fe 1065 else
486dd09f 1066 mask |= (pvt->inject.col & 0x3fff);
194a40fe 1067
276b824c
MCC
1068 /*
1069 * bit 0: REPEAT_EN
1070 * bits 1-2: MASK_HALF_CACHELINE
1071 * bit 3: INJECT_ECC
1072 * bit 4: INJECT_ADDR_PARITY
1073 */
1074
1075 injectmask = (pvt->inject.type & 1) |
1076 (pvt->inject.section & 0x3) << 1 |
1077 (pvt->inject.type & 0x6) << (3 - 1);
1078
1079 /* Unlock writes to registers - this register is write only */
f4742949 1080 pci_write_config_dword(pvt->pci_noncore,
67166af4 1081 MC_CFG_CONTROL, 0x2);
e9bd2e73 1082
f4742949 1083 write_and_test(pvt->pci_ch[pvt->inject.channel][0],
194a40fe 1084 MC_CHANNEL_ADDR_MATCH, mask);
f4742949 1085 write_and_test(pvt->pci_ch[pvt->inject.channel][0],
7b029d03 1086 MC_CHANNEL_ADDR_MATCH + 4, mask >> 32L);
7b029d03 1087
f4742949 1088 write_and_test(pvt->pci_ch[pvt->inject.channel][0],
194a40fe
MCC
1089 MC_CHANNEL_ERROR_MASK, pvt->inject.eccmask);
1090
f4742949 1091 write_and_test(pvt->pci_ch[pvt->inject.channel][0],
4157d9f5 1092 MC_CHANNEL_ERROR_INJECT, injectmask);
276b824c 1093
194a40fe 1094 /*
276b824c
MCC
1095 * This is something undocumented, based on my tests
1096 * Without writing 8 to this register, errors aren't injected. Not sure
1097 * why.
194a40fe 1098 */
f4742949 1099 pci_write_config_dword(pvt->pci_noncore,
276b824c 1100 MC_CFG_CONTROL, 8);
194a40fe 1101
41fcb7fe
MCC
1102 debugf0("Error inject addr match 0x%016llx, ecc 0x%08x,"
1103 " inject 0x%08x\n",
194a40fe
MCC
1104 mask, pvt->inject.eccmask, injectmask);
1105
7b029d03 1106
194a40fe
MCC
1107 return count;
1108}
1109
1110static ssize_t i7core_inject_enable_show(struct mem_ctl_info *mci,
1111 char *data)
1112{
1113 struct i7core_pvt *pvt = mci->pvt_info;
7b029d03
MCC
1114 u32 injectmask;
1115
52a2e4fc
MCC
1116 if (!pvt->pci_ch[pvt->inject.channel][0])
1117 return 0;
1118
f4742949 1119 pci_read_config_dword(pvt->pci_ch[pvt->inject.channel][0],
4157d9f5 1120 MC_CHANNEL_ERROR_INJECT, &injectmask);
7b029d03
MCC
1121
1122 debugf0("Inject error read: 0x%018x\n", injectmask);
1123
1124 if (injectmask & 0x0c)
1125 pvt->inject.enable = 1;
1126
194a40fe
MCC
1127 return sprintf(data, "%d\n", pvt->inject.enable);
1128}
1129
f338d736
MCC
1130#define DECLARE_COUNTER(param) \
1131static ssize_t i7core_show_counter_##param( \
1132 struct mem_ctl_info *mci, \
1133 char *data) \
1134{ \
1135 struct i7core_pvt *pvt = mci->pvt_info; \
1136 \
1137 debugf1("%s() \n", __func__); \
1138 if (!pvt->ce_count_available || (pvt->is_registered)) \
1139 return sprintf(data, "data unavailable\n"); \
1140 return sprintf(data, "%lu\n", \
1141 pvt->udimm_ce_count[param]); \
1142}
442305b1 1143
f338d736
MCC
1144#define ATTR_COUNTER(param) \
1145 { \
1146 .attr = { \
1147 .name = __stringify(udimm##param), \
1148 .mode = (S_IRUGO | S_IWUSR) \
1149 }, \
1150 .show = i7core_show_counter_##param \
d88b8507 1151 }
442305b1 1152
f338d736
MCC
1153DECLARE_COUNTER(0);
1154DECLARE_COUNTER(1);
1155DECLARE_COUNTER(2);
442305b1 1156
194a40fe
MCC
1157/*
1158 * Sysfs struct
1159 */
a5538e53 1160
1288c18f 1161static const struct mcidev_sysfs_attribute i7core_addrmatch_attrs[] = {
a5538e53
MCC
1162 ATTR_ADDR_MATCH(channel),
1163 ATTR_ADDR_MATCH(dimm),
1164 ATTR_ADDR_MATCH(rank),
1165 ATTR_ADDR_MATCH(bank),
1166 ATTR_ADDR_MATCH(page),
1167 ATTR_ADDR_MATCH(col),
1288c18f 1168 { } /* End of list */
a5538e53
MCC
1169};
1170
1288c18f 1171static const struct mcidev_sysfs_group i7core_inject_addrmatch = {
a5538e53
MCC
1172 .name = "inject_addrmatch",
1173 .mcidev_attr = i7core_addrmatch_attrs,
1174};
1175
1288c18f 1176static const struct mcidev_sysfs_attribute i7core_udimm_counters_attrs[] = {
f338d736
MCC
1177 ATTR_COUNTER(0),
1178 ATTR_COUNTER(1),
1179 ATTR_COUNTER(2),
64aab720 1180 { .attr = { .name = NULL } }
f338d736
MCC
1181};
1182
1288c18f 1183static const struct mcidev_sysfs_group i7core_udimm_counters = {
f338d736
MCC
1184 .name = "all_channel_counts",
1185 .mcidev_attr = i7core_udimm_counters_attrs,
1186};
1187
1288c18f 1188static const struct mcidev_sysfs_attribute i7core_sysfs_rdimm_attrs[] = {
194a40fe
MCC
1189 {
1190 .attr = {
1191 .name = "inject_section",
1192 .mode = (S_IRUGO | S_IWUSR)
1193 },
1194 .show = i7core_inject_section_show,
1195 .store = i7core_inject_section_store,
1196 }, {
1197 .attr = {
1198 .name = "inject_type",
1199 .mode = (S_IRUGO | S_IWUSR)
1200 },
1201 .show = i7core_inject_type_show,
1202 .store = i7core_inject_type_store,
1203 }, {
1204 .attr = {
1205 .name = "inject_eccmask",
1206 .mode = (S_IRUGO | S_IWUSR)
1207 },
1208 .show = i7core_inject_eccmask_show,
1209 .store = i7core_inject_eccmask_store,
1210 }, {
a5538e53 1211 .grp = &i7core_inject_addrmatch,
194a40fe
MCC
1212 }, {
1213 .attr = {
1214 .name = "inject_enable",
1215 .mode = (S_IRUGO | S_IWUSR)
1216 },
1217 .show = i7core_inject_enable_show,
1218 .store = i7core_inject_enable_store,
1219 },
1288c18f
MCC
1220 { } /* End of list */
1221};
1222
1223static const struct mcidev_sysfs_attribute i7core_sysfs_udimm_attrs[] = {
1224 {
1225 .attr = {
1226 .name = "inject_section",
1227 .mode = (S_IRUGO | S_IWUSR)
1228 },
1229 .show = i7core_inject_section_show,
1230 .store = i7core_inject_section_store,
1231 }, {
1232 .attr = {
1233 .name = "inject_type",
1234 .mode = (S_IRUGO | S_IWUSR)
1235 },
1236 .show = i7core_inject_type_show,
1237 .store = i7core_inject_type_store,
1238 }, {
1239 .attr = {
1240 .name = "inject_eccmask",
1241 .mode = (S_IRUGO | S_IWUSR)
1242 },
1243 .show = i7core_inject_eccmask_show,
1244 .store = i7core_inject_eccmask_store,
1245 }, {
1246 .grp = &i7core_inject_addrmatch,
1247 }, {
1248 .attr = {
1249 .name = "inject_enable",
1250 .mode = (S_IRUGO | S_IWUSR)
1251 },
1252 .show = i7core_inject_enable_show,
1253 .store = i7core_inject_enable_store,
1254 }, {
1255 .grp = &i7core_udimm_counters,
1256 },
1257 { } /* End of list */
194a40fe
MCC
1258};
1259
a0c36a1f
MCC
1260/****************************************************************************
1261 Device initialization routines: put/get, init/exit
1262 ****************************************************************************/
1263
1264/*
64c10f6e 1265 * i7core_put_all_devices 'put' all the devices that we have
a0c36a1f
MCC
1266 * reserved via 'get'
1267 */
13d6e9b6 1268static void i7core_put_devices(struct i7core_dev *i7core_dev)
a0c36a1f 1269{
13d6e9b6 1270 int i;
a0c36a1f 1271
22e6bcbd 1272 debugf0(__FILE__ ": %s()\n", __func__);
de06eeef 1273 for (i = 0; i < i7core_dev->n_devs; i++) {
22e6bcbd
MCC
1274 struct pci_dev *pdev = i7core_dev->pdev[i];
1275 if (!pdev)
1276 continue;
1277 debugf0("Removing dev %02x:%02x.%d\n",
1278 pdev->bus->number,
1279 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1280 pci_dev_put(pdev);
1281 }
13d6e9b6 1282}
66607706 1283
13d6e9b6
MCC
1284static void i7core_put_all_devices(void)
1285{
42538680 1286 struct i7core_dev *i7core_dev, *tmp;
13d6e9b6 1287
39300e71 1288 list_for_each_entry_safe(i7core_dev, tmp, &i7core_edac_list, list) {
13d6e9b6 1289 i7core_put_devices(i7core_dev);
2aa9be44 1290 free_i7core_dev(i7core_dev);
39300e71 1291 }
a0c36a1f
MCC
1292}
1293
1288c18f 1294static void __init i7core_xeon_pci_fixup(const struct pci_id_table *table)
bc2d7245
KM
1295{
1296 struct pci_dev *pdev = NULL;
1297 int i;
54a08ab1 1298
bc2d7245 1299 /*
e7bf068a 1300 * On Xeon 55xx, the Intel Quick Path Arch Generic Non-core pci buses
bc2d7245
KM
1301 * aren't announced by acpi. So, we need to use a legacy scan probing
1302 * to detect them
1303 */
bd9e19ca
VM
1304 while (table && table->descr) {
1305 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, table->descr[0].dev_id, NULL);
1306 if (unlikely(!pdev)) {
1307 for (i = 0; i < MAX_SOCKET_BUSES; i++)
1308 pcibios_scan_specific_bus(255-i);
1309 }
bda14289 1310 pci_dev_put(pdev);
bd9e19ca 1311 table++;
bc2d7245
KM
1312 }
1313}
1314
bda14289
MCC
1315static unsigned i7core_pci_lastbus(void)
1316{
1317 int last_bus = 0, bus;
1318 struct pci_bus *b = NULL;
1319
1320 while ((b = pci_find_next_bus(b)) != NULL) {
1321 bus = b->number;
1322 debugf0("Found bus %d\n", bus);
1323 if (bus > last_bus)
1324 last_bus = bus;
1325 }
1326
1327 debugf0("Last bus %d\n", last_bus);
1328
1329 return last_bus;
1330}
1331
a0c36a1f 1332/*
64c10f6e 1333 * i7core_get_all_devices Find and perform 'get' operation on the MCH's
a0c36a1f
MCC
1334 * device/functions we want to reference for this driver
1335 *
1336 * Need to 'get' device 16 func 1 and func 2
1337 */
b197cba0
HS
1338static int i7core_get_onedevice(struct pci_dev **prev,
1339 const struct pci_id_table *table,
1340 const unsigned devno,
1341 const unsigned last_bus)
a0c36a1f 1342{
66607706 1343 struct i7core_dev *i7core_dev;
b197cba0 1344 const struct pci_id_descr *dev_descr = &table->descr[devno];
66607706 1345
8f331907 1346 struct pci_dev *pdev = NULL;
67166af4
MCC
1347 u8 bus = 0;
1348 u8 socket = 0;
a0c36a1f 1349
c77720b9 1350 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
de06eeef 1351 dev_descr->dev_id, *prev);
c77720b9 1352
224e871f
MCC
1353 /*
1354 * On Xeon 55xx, the Intel Quckpath Arch Generic Non-core regs
1355 * is at addr 8086:2c40, instead of 8086:2c41. So, we need
1356 * to probe for the alternate address in case of failure
1357 */
1358 if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_I7_NONCORE && !pdev)
1359 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
1360 PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT, *prev);
1361
1362 if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE && !pdev)
1363 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
1364 PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT,
1365 *prev);
1366
c77720b9
MCC
1367 if (!pdev) {
1368 if (*prev) {
1369 *prev = pdev;
1370 return 0;
d1fd4fb6
MCC
1371 }
1372
de06eeef 1373 if (dev_descr->optional)
c77720b9 1374 return 0;
310cbb72 1375
bd9e19ca
VM
1376 if (devno == 0)
1377 return -ENODEV;
1378
ab089374 1379 i7core_printk(KERN_INFO,
c77720b9 1380 "Device not found: dev %02x.%d PCI ID %04x:%04x\n",
de06eeef
MCC
1381 dev_descr->dev, dev_descr->func,
1382 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
67166af4 1383
c77720b9
MCC
1384 /* End of list, leave */
1385 return -ENODEV;
1386 }
1387 bus = pdev->bus->number;
67166af4 1388
bda14289 1389 socket = last_bus - bus;
c77720b9 1390
66607706
MCC
1391 i7core_dev = get_i7core_dev(socket);
1392 if (!i7core_dev) {
848b2f7e 1393 i7core_dev = alloc_i7core_dev(socket, table);
2896637b
HS
1394 if (!i7core_dev) {
1395 pci_dev_put(pdev);
66607706 1396 return -ENOMEM;
2896637b 1397 }
c77720b9 1398 }
67166af4 1399
66607706 1400 if (i7core_dev->pdev[devno]) {
c77720b9
MCC
1401 i7core_printk(KERN_ERR,
1402 "Duplicated device for "
1403 "dev %02x:%02x.%d PCI ID %04x:%04x\n",
de06eeef
MCC
1404 bus, dev_descr->dev, dev_descr->func,
1405 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
c77720b9
MCC
1406 pci_dev_put(pdev);
1407 return -ENODEV;
1408 }
67166af4 1409
66607706 1410 i7core_dev->pdev[devno] = pdev;
c77720b9
MCC
1411
1412 /* Sanity check */
de06eeef
MCC
1413 if (unlikely(PCI_SLOT(pdev->devfn) != dev_descr->dev ||
1414 PCI_FUNC(pdev->devfn) != dev_descr->func)) {
c77720b9
MCC
1415 i7core_printk(KERN_ERR,
1416 "Device PCI ID %04x:%04x "
1417 "has dev %02x:%02x.%d instead of dev %02x:%02x.%d\n",
de06eeef 1418 PCI_VENDOR_ID_INTEL, dev_descr->dev_id,
c77720b9 1419 bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
de06eeef 1420 bus, dev_descr->dev, dev_descr->func);
c77720b9
MCC
1421 return -ENODEV;
1422 }
ef708b53 1423
c77720b9
MCC
1424 /* Be sure that the device is enabled */
1425 if (unlikely(pci_enable_device(pdev) < 0)) {
1426 i7core_printk(KERN_ERR,
1427 "Couldn't enable "
1428 "dev %02x:%02x.%d PCI ID %04x:%04x\n",
de06eeef
MCC
1429 bus, dev_descr->dev, dev_descr->func,
1430 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
c77720b9
MCC
1431 return -ENODEV;
1432 }
ef708b53 1433
d4c27795 1434 debugf0("Detected socket %d dev %02x:%02x.%d PCI ID %04x:%04x\n",
de06eeef
MCC
1435 socket, bus, dev_descr->dev,
1436 dev_descr->func,
1437 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
8f331907 1438
a3e15416
MCC
1439 /*
1440 * As stated on drivers/pci/search.c, the reference count for
1441 * @from is always decremented if it is not %NULL. So, as we need
1442 * to get all devices up to null, we need to do a get for the device
1443 */
1444 pci_dev_get(pdev);
1445
c77720b9 1446 *prev = pdev;
ef708b53 1447
c77720b9
MCC
1448 return 0;
1449}
a0c36a1f 1450
64c10f6e 1451static int i7core_get_all_devices(void)
c77720b9 1452{
3c52cc57 1453 int i, rc, last_bus;
c77720b9 1454 struct pci_dev *pdev = NULL;
3c52cc57 1455 const struct pci_id_table *table = pci_dev_table;
bd9e19ca 1456
bda14289
MCC
1457 last_bus = i7core_pci_lastbus();
1458
3c52cc57 1459 while (table && table->descr) {
bd9e19ca
VM
1460 for (i = 0; i < table->n_devs; i++) {
1461 pdev = NULL;
1462 do {
b197cba0 1463 rc = i7core_get_onedevice(&pdev, table, i,
bda14289 1464 last_bus);
bd9e19ca
VM
1465 if (rc < 0) {
1466 if (i == 0) {
1467 i = table->n_devs;
1468 break;
1469 }
1470 i7core_put_all_devices();
1471 return -ENODEV;
1472 }
1473 } while (pdev);
1474 }
3c52cc57 1475 table++;
c77720b9 1476 }
66607706 1477
ef708b53 1478 return 0;
ef708b53
MCC
1479}
1480
f4742949
MCC
1481static int mci_bind_devs(struct mem_ctl_info *mci,
1482 struct i7core_dev *i7core_dev)
ef708b53
MCC
1483{
1484 struct i7core_pvt *pvt = mci->pvt_info;
1485 struct pci_dev *pdev;
f4742949 1486 int i, func, slot;
27100db0 1487 char *family;
ef708b53 1488
27100db0
MCC
1489 pvt->is_registered = false;
1490 pvt->enable_scrub = false;
de06eeef 1491 for (i = 0; i < i7core_dev->n_devs; i++) {
f4742949
MCC
1492 pdev = i7core_dev->pdev[i];
1493 if (!pdev)
66607706
MCC
1494 continue;
1495
f4742949
MCC
1496 func = PCI_FUNC(pdev->devfn);
1497 slot = PCI_SLOT(pdev->devfn);
1498 if (slot == 3) {
1499 if (unlikely(func > MAX_MCR_FUNC))
1500 goto error;
1501 pvt->pci_mcr[func] = pdev;
1502 } else if (likely(slot >= 4 && slot < 4 + NUM_CHANS)) {
1503 if (unlikely(func > MAX_CHAN_FUNC))
ef708b53 1504 goto error;
f4742949 1505 pvt->pci_ch[slot - 4][func] = pdev;
27100db0 1506 } else if (!slot && !func) {
f4742949 1507 pvt->pci_noncore = pdev;
27100db0
MCC
1508
1509 /* Detect the processor family */
1510 switch (pdev->device) {
1511 case PCI_DEVICE_ID_INTEL_I7_NONCORE:
1512 family = "Xeon 35xx/ i7core";
1513 pvt->enable_scrub = false;
1514 break;
1515 case PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT:
1516 family = "i7-800/i5-700";
1517 pvt->enable_scrub = false;
1518 break;
1519 case PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE:
1520 family = "Xeon 34xx";
1521 pvt->enable_scrub = false;
1522 break;
1523 case PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT:
1524 family = "Xeon 55xx";
1525 pvt->enable_scrub = true;
1526 break;
1527 case PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_REV2:
1528 family = "Xeon 56xx / i7-900";
1529 pvt->enable_scrub = true;
1530 break;
1531 default:
1532 family = "unknown";
1533 pvt->enable_scrub = false;
1534 }
1535 debugf0("Detected a processor type %s\n", family);
1536 } else
f4742949 1537 goto error;
ef708b53 1538
f4742949
MCC
1539 debugf0("Associated fn %d.%d, dev = %p, socket %d\n",
1540 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
1541 pdev, i7core_dev->socket);
14d2c083 1542
f4742949
MCC
1543 if (PCI_SLOT(pdev->devfn) == 3 &&
1544 PCI_FUNC(pdev->devfn) == 2)
27100db0 1545 pvt->is_registered = true;
a0c36a1f 1546 }
e9bd2e73 1547
a0c36a1f 1548 return 0;
ef708b53
MCC
1549
1550error:
1551 i7core_printk(KERN_ERR, "Device %d, function %d "
1552 "is out of the expected range\n",
1553 slot, func);
1554 return -EINVAL;
a0c36a1f
MCC
1555}
1556
442305b1
MCC
1557/****************************************************************************
1558 Error check routines
1559 ****************************************************************************/
f4742949 1560static void i7core_rdimm_update_csrow(struct mem_ctl_info *mci,
1288c18f
MCC
1561 const int chan,
1562 const int dimm,
1563 const int add)
b4e8f0b6
MCC
1564{
1565 char *msg;
1566 struct i7core_pvt *pvt = mci->pvt_info;
f4742949 1567 int row = pvt->csrow_map[chan][dimm], i;
b4e8f0b6
MCC
1568
1569 for (i = 0; i < add; i++) {
1570 msg = kasprintf(GFP_KERNEL, "Corrected error "
f4742949
MCC
1571 "(Socket=%d channel=%d dimm=%d)",
1572 pvt->i7core_dev->socket, chan, dimm);
b4e8f0b6
MCC
1573
1574 edac_mc_handle_fbd_ce(mci, row, 0, msg);
1575 kfree (msg);
1576 }
1577}
1578
1579static void i7core_rdimm_update_ce_count(struct mem_ctl_info *mci,
1288c18f
MCC
1580 const int chan,
1581 const int new0,
1582 const int new1,
1583 const int new2)
b4e8f0b6
MCC
1584{
1585 struct i7core_pvt *pvt = mci->pvt_info;
1586 int add0 = 0, add1 = 0, add2 = 0;
1587 /* Updates CE counters if it is not the first time here */
f4742949 1588 if (pvt->ce_count_available) {
b4e8f0b6
MCC
1589 /* Updates CE counters */
1590
f4742949
MCC
1591 add2 = new2 - pvt->rdimm_last_ce_count[chan][2];
1592 add1 = new1 - pvt->rdimm_last_ce_count[chan][1];
1593 add0 = new0 - pvt->rdimm_last_ce_count[chan][0];
b4e8f0b6
MCC
1594
1595 if (add2 < 0)
1596 add2 += 0x7fff;
f4742949 1597 pvt->rdimm_ce_count[chan][2] += add2;
b4e8f0b6
MCC
1598
1599 if (add1 < 0)
1600 add1 += 0x7fff;
f4742949 1601 pvt->rdimm_ce_count[chan][1] += add1;
b4e8f0b6
MCC
1602
1603 if (add0 < 0)
1604 add0 += 0x7fff;
f4742949 1605 pvt->rdimm_ce_count[chan][0] += add0;
b4e8f0b6 1606 } else
f4742949 1607 pvt->ce_count_available = 1;
b4e8f0b6
MCC
1608
1609 /* Store the new values */
f4742949
MCC
1610 pvt->rdimm_last_ce_count[chan][2] = new2;
1611 pvt->rdimm_last_ce_count[chan][1] = new1;
1612 pvt->rdimm_last_ce_count[chan][0] = new0;
b4e8f0b6
MCC
1613
1614 /*updated the edac core */
1615 if (add0 != 0)
f4742949 1616 i7core_rdimm_update_csrow(mci, chan, 0, add0);
b4e8f0b6 1617 if (add1 != 0)
f4742949 1618 i7core_rdimm_update_csrow(mci, chan, 1, add1);
b4e8f0b6 1619 if (add2 != 0)
f4742949 1620 i7core_rdimm_update_csrow(mci, chan, 2, add2);
b4e8f0b6
MCC
1621
1622}
1623
f4742949 1624static void i7core_rdimm_check_mc_ecc_err(struct mem_ctl_info *mci)
b4e8f0b6
MCC
1625{
1626 struct i7core_pvt *pvt = mci->pvt_info;
1627 u32 rcv[3][2];
1628 int i, new0, new1, new2;
1629
1630 /*Read DEV 3: FUN 2: MC_COR_ECC_CNT regs directly*/
f4742949 1631 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_0,
b4e8f0b6 1632 &rcv[0][0]);
f4742949 1633 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_1,
b4e8f0b6 1634 &rcv[0][1]);
f4742949 1635 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_2,
b4e8f0b6 1636 &rcv[1][0]);
f4742949 1637 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_3,
b4e8f0b6 1638 &rcv[1][1]);
f4742949 1639 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_4,
b4e8f0b6 1640 &rcv[2][0]);
f4742949 1641 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_5,
b4e8f0b6
MCC
1642 &rcv[2][1]);
1643 for (i = 0 ; i < 3; i++) {
1644 debugf3("MC_COR_ECC_CNT%d = 0x%x; MC_COR_ECC_CNT%d = 0x%x\n",
1645 (i * 2), rcv[i][0], (i * 2) + 1, rcv[i][1]);
1646 /*if the channel has 3 dimms*/
f4742949 1647 if (pvt->channel[i].dimms > 2) {
b4e8f0b6
MCC
1648 new0 = DIMM_BOT_COR_ERR(rcv[i][0]);
1649 new1 = DIMM_TOP_COR_ERR(rcv[i][0]);
1650 new2 = DIMM_BOT_COR_ERR(rcv[i][1]);
1651 } else {
1652 new0 = DIMM_TOP_COR_ERR(rcv[i][0]) +
1653 DIMM_BOT_COR_ERR(rcv[i][0]);
1654 new1 = DIMM_TOP_COR_ERR(rcv[i][1]) +
1655 DIMM_BOT_COR_ERR(rcv[i][1]);
1656 new2 = 0;
1657 }
1658
f4742949 1659 i7core_rdimm_update_ce_count(mci, i, new0, new1, new2);
b4e8f0b6
MCC
1660 }
1661}
442305b1
MCC
1662
1663/* This function is based on the device 3 function 4 registers as described on:
1664 * Intel Xeon Processor 5500 Series Datasheet Volume 2
1665 * http://www.intel.com/Assets/PDF/datasheet/321322.pdf
1666 * also available at:
1667 * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
1668 */
f4742949 1669static void i7core_udimm_check_mc_ecc_err(struct mem_ctl_info *mci)
442305b1
MCC
1670{
1671 struct i7core_pvt *pvt = mci->pvt_info;
1672 u32 rcv1, rcv0;
1673 int new0, new1, new2;
1674
f4742949 1675 if (!pvt->pci_mcr[4]) {
b990538a 1676 debugf0("%s MCR registers not found\n", __func__);
442305b1
MCC
1677 return;
1678 }
1679
b4e8f0b6 1680 /* Corrected test errors */
f4742949
MCC
1681 pci_read_config_dword(pvt->pci_mcr[4], MC_TEST_ERR_RCV1, &rcv1);
1682 pci_read_config_dword(pvt->pci_mcr[4], MC_TEST_ERR_RCV0, &rcv0);
442305b1
MCC
1683
1684 /* Store the new values */
1685 new2 = DIMM2_COR_ERR(rcv1);
1686 new1 = DIMM1_COR_ERR(rcv0);
1687 new0 = DIMM0_COR_ERR(rcv0);
1688
442305b1 1689 /* Updates CE counters if it is not the first time here */
f4742949 1690 if (pvt->ce_count_available) {
442305b1
MCC
1691 /* Updates CE counters */
1692 int add0, add1, add2;
1693
f4742949
MCC
1694 add2 = new2 - pvt->udimm_last_ce_count[2];
1695 add1 = new1 - pvt->udimm_last_ce_count[1];
1696 add0 = new0 - pvt->udimm_last_ce_count[0];
442305b1
MCC
1697
1698 if (add2 < 0)
1699 add2 += 0x7fff;
f4742949 1700 pvt->udimm_ce_count[2] += add2;
442305b1
MCC
1701
1702 if (add1 < 0)
1703 add1 += 0x7fff;
f4742949 1704 pvt->udimm_ce_count[1] += add1;
442305b1
MCC
1705
1706 if (add0 < 0)
1707 add0 += 0x7fff;
f4742949 1708 pvt->udimm_ce_count[0] += add0;
b4e8f0b6
MCC
1709
1710 if (add0 | add1 | add2)
1711 i7core_printk(KERN_ERR, "New Corrected error(s): "
1712 "dimm0: +%d, dimm1: +%d, dimm2 +%d\n",
1713 add0, add1, add2);
442305b1 1714 } else
f4742949 1715 pvt->ce_count_available = 1;
442305b1
MCC
1716
1717 /* Store the new values */
f4742949
MCC
1718 pvt->udimm_last_ce_count[2] = new2;
1719 pvt->udimm_last_ce_count[1] = new1;
1720 pvt->udimm_last_ce_count[0] = new0;
442305b1
MCC
1721}
1722
8a2f118e
MCC
1723/*
1724 * According with tables E-11 and E-12 of chapter E.3.3 of Intel 64 and IA-32
1725 * Architectures Software Developer’s Manual Volume 3B.
f237fcf2
MCC
1726 * Nehalem are defined as family 0x06, model 0x1a
1727 *
1728 * The MCA registers used here are the following ones:
8a2f118e 1729 * struct mce field MCA Register
f237fcf2
MCC
1730 * m->status MSR_IA32_MC8_STATUS
1731 * m->addr MSR_IA32_MC8_ADDR
1732 * m->misc MSR_IA32_MC8_MISC
8a2f118e
MCC
1733 * In the case of Nehalem, the error information is masked at .status and .misc
1734 * fields
1735 */
d5381642 1736static void i7core_mce_output_error(struct mem_ctl_info *mci,
1288c18f 1737 const struct mce *m)
d5381642 1738{
b4e8f0b6 1739 struct i7core_pvt *pvt = mci->pvt_info;
a639539f 1740 char *type, *optype, *err, *msg;
8a2f118e 1741 unsigned long error = m->status & 0x1ff0000l;
a639539f 1742 u32 optypenum = (m->status >> 4) & 0x07;
8cf2d239 1743 u32 core_err_cnt = (m->status >> 38) & 0x7fff;
8a2f118e
MCC
1744 u32 dimm = (m->misc >> 16) & 0x3;
1745 u32 channel = (m->misc >> 18) & 0x3;
1746 u32 syndrome = m->misc >> 32;
1747 u32 errnum = find_first_bit(&error, 32);
b4e8f0b6 1748 int csrow;
8a2f118e 1749
c5d34528
MCC
1750 if (m->mcgstatus & 1)
1751 type = "FATAL";
1752 else
1753 type = "NON_FATAL";
1754
a639539f 1755 switch (optypenum) {
b990538a
MCC
1756 case 0:
1757 optype = "generic undef request";
1758 break;
1759 case 1:
1760 optype = "read error";
1761 break;
1762 case 2:
1763 optype = "write error";
1764 break;
1765 case 3:
1766 optype = "addr/cmd error";
1767 break;
1768 case 4:
1769 optype = "scrubbing error";
1770 break;
1771 default:
1772 optype = "reserved";
1773 break;
a639539f
MCC
1774 }
1775
8a2f118e
MCC
1776 switch (errnum) {
1777 case 16:
1778 err = "read ECC error";
1779 break;
1780 case 17:
1781 err = "RAS ECC error";
1782 break;
1783 case 18:
1784 err = "write parity error";
1785 break;
1786 case 19:
1787 err = "redundacy loss";
1788 break;
1789 case 20:
1790 err = "reserved";
1791 break;
1792 case 21:
1793 err = "memory range error";
1794 break;
1795 case 22:
1796 err = "RTID out of range";
1797 break;
1798 case 23:
1799 err = "address parity error";
1800 break;
1801 case 24:
1802 err = "byte enable parity error";
1803 break;
1804 default:
1805 err = "unknown";
d5381642 1806 }
d5381642 1807
f237fcf2 1808 /* FIXME: should convert addr into bank and rank information */
8a2f118e 1809 msg = kasprintf(GFP_ATOMIC,
f4742949 1810 "%s (addr = 0x%08llx, cpu=%d, Dimm=%d, Channel=%d, "
a639539f 1811 "syndrome=0x%08x, count=%d, Err=%08llx:%08llx (%s: %s))\n",
f4742949 1812 type, (long long) m->addr, m->cpu, dimm, channel,
a639539f
MCC
1813 syndrome, core_err_cnt, (long long)m->status,
1814 (long long)m->misc, optype, err);
8a2f118e
MCC
1815
1816 debugf0("%s", msg);
d5381642 1817
f4742949 1818 csrow = pvt->csrow_map[channel][dimm];
b4e8f0b6 1819
d5381642 1820 /* Call the helper to output message */
b4e8f0b6
MCC
1821 if (m->mcgstatus & 1)
1822 edac_mc_handle_fbd_ue(mci, csrow, 0,
1823 0 /* FIXME: should be channel here */, msg);
f4742949 1824 else if (!pvt->is_registered)
b4e8f0b6
MCC
1825 edac_mc_handle_fbd_ce(mci, csrow,
1826 0 /* FIXME: should be channel here */, msg);
8a2f118e
MCC
1827
1828 kfree(msg);
d5381642
MCC
1829}
1830
87d1d272
MCC
1831/*
1832 * i7core_check_error Retrieve and process errors reported by the
1833 * hardware. Called by the Core module.
1834 */
1835static void i7core_check_error(struct mem_ctl_info *mci)
1836{
d5381642
MCC
1837 struct i7core_pvt *pvt = mci->pvt_info;
1838 int i;
1839 unsigned count = 0;
ca9c90ba 1840 struct mce *m;
d5381642 1841
ca9c90ba
MCC
1842 /*
1843 * MCE first step: Copy all mce errors into a temporary buffer
1844 * We use a double buffering here, to reduce the risk of
25985edc 1845 * losing an error.
ca9c90ba
MCC
1846 */
1847 smp_rmb();
321ece4d
MCC
1848 count = (pvt->mce_out + MCE_LOG_LEN - pvt->mce_in)
1849 % MCE_LOG_LEN;
ca9c90ba 1850 if (!count)
8a311e17 1851 goto check_ce_error;
f4742949 1852
ca9c90ba 1853 m = pvt->mce_outentry;
321ece4d
MCC
1854 if (pvt->mce_in + count > MCE_LOG_LEN) {
1855 unsigned l = MCE_LOG_LEN - pvt->mce_in;
f4742949 1856
ca9c90ba
MCC
1857 memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * l);
1858 smp_wmb();
1859 pvt->mce_in = 0;
1860 count -= l;
1861 m += l;
1862 }
1863 memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * count);
1864 smp_wmb();
1865 pvt->mce_in += count;
1866
1867 smp_rmb();
1868 if (pvt->mce_overrun) {
1869 i7core_printk(KERN_ERR, "Lost %d memory errors\n",
1870 pvt->mce_overrun);
1871 smp_wmb();
1872 pvt->mce_overrun = 0;
1873 }
d5381642 1874
ca9c90ba
MCC
1875 /*
1876 * MCE second step: parse errors and display
1877 */
d5381642 1878 for (i = 0; i < count; i++)
ca9c90ba 1879 i7core_mce_output_error(mci, &pvt->mce_outentry[i]);
d5381642 1880
ca9c90ba
MCC
1881 /*
1882 * Now, let's increment CE error counts
1883 */
8a311e17 1884check_ce_error:
f4742949
MCC
1885 if (!pvt->is_registered)
1886 i7core_udimm_check_mc_ecc_err(mci);
1887 else
1888 i7core_rdimm_check_mc_ecc_err(mci);
87d1d272
MCC
1889}
1890
d5381642
MCC
1891/*
1892 * i7core_mce_check_error Replicates mcelog routine to get errors
1893 * This routine simply queues mcelog errors, and
1894 * return. The error itself should be handled later
1895 * by i7core_check_error.
6e103be1
MCC
1896 * WARNING: As this routine should be called at NMI time, extra care should
1897 * be taken to avoid deadlocks, and to be as fast as possible.
d5381642 1898 */
4140c542
BP
1899static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val,
1900 void *data)
d5381642 1901{
4140c542
BP
1902 struct mce *mce = (struct mce *)data;
1903 struct i7core_dev *i7_dev;
1904 struct mem_ctl_info *mci;
1905 struct i7core_pvt *pvt;
1906
1907 i7_dev = get_i7core_dev(mce->socketid);
1908 if (!i7_dev)
1909 return NOTIFY_BAD;
1910
1911 mci = i7_dev->mci;
1912 pvt = mci->pvt_info;
d5381642 1913
8a2f118e
MCC
1914 /*
1915 * Just let mcelog handle it if the error is
1916 * outside the memory controller
1917 */
1918 if (((mce->status & 0xffff) >> 7) != 1)
4140c542 1919 return NOTIFY_DONE;
8a2f118e 1920
f237fcf2
MCC
1921 /* Bank 8 registers are the only ones that we know how to handle */
1922 if (mce->bank != 8)
4140c542 1923 return NOTIFY_DONE;
f237fcf2 1924
3b918c12 1925#ifdef CONFIG_SMP
f4742949 1926 /* Only handle if it is the right mc controller */
5034086b 1927 if (mce->socketid != pvt->i7core_dev->socket)
4140c542 1928 return NOTIFY_DONE;
3b918c12 1929#endif
f4742949 1930
ca9c90ba 1931 smp_rmb();
321ece4d 1932 if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) {
ca9c90ba
MCC
1933 smp_wmb();
1934 pvt->mce_overrun++;
4140c542 1935 return NOTIFY_DONE;
d5381642 1936 }
6e103be1
MCC
1937
1938 /* Copy memory error at the ringbuffer */
1939 memcpy(&pvt->mce_entry[pvt->mce_out], mce, sizeof(*mce));
ca9c90ba 1940 smp_wmb();
321ece4d 1941 pvt->mce_out = (pvt->mce_out + 1) % MCE_LOG_LEN;
d5381642 1942
c5d34528
MCC
1943 /* Handle fatal errors immediately */
1944 if (mce->mcgstatus & 1)
1945 i7core_check_error(mci);
1946
e7bf068a 1947 /* Advise mcelog that the errors were handled */
4140c542 1948 return NOTIFY_STOP;
d5381642
MCC
1949}
1950
4140c542
BP
1951static struct notifier_block i7_mce_dec = {
1952 .notifier_call = i7core_mce_check_error,
1953};
1954
535e9c78
NC
1955struct memdev_dmi_entry {
1956 u8 type;
1957 u8 length;
1958 u16 handle;
1959 u16 phys_mem_array_handle;
1960 u16 mem_err_info_handle;
1961 u16 total_width;
1962 u16 data_width;
1963 u16 size;
1964 u8 form;
1965 u8 device_set;
1966 u8 device_locator;
1967 u8 bank_locator;
1968 u8 memory_type;
1969 u16 type_detail;
1970 u16 speed;
1971 u8 manufacturer;
1972 u8 serial_number;
1973 u8 asset_tag;
1974 u8 part_number;
1975 u8 attributes;
1976 u32 extended_size;
1977 u16 conf_mem_clk_speed;
1978} __attribute__((__packed__));
1979
1980
1981/*
1982 * Decode the DRAM Clock Frequency, be paranoid, make sure that all
1983 * memory devices show the same speed, and if they don't then consider
1984 * all speeds to be invalid.
1985 */
1986static void decode_dclk(const struct dmi_header *dh, void *_dclk_freq)
1987{
1988 int *dclk_freq = _dclk_freq;
1989 u16 dmi_mem_clk_speed;
1990
1991 if (*dclk_freq == -1)
1992 return;
1993
1994 if (dh->type == DMI_ENTRY_MEM_DEVICE) {
1995 struct memdev_dmi_entry *memdev_dmi_entry =
1996 (struct memdev_dmi_entry *)dh;
1997 unsigned long conf_mem_clk_speed_offset =
1998 (unsigned long)&memdev_dmi_entry->conf_mem_clk_speed -
1999 (unsigned long)&memdev_dmi_entry->type;
2000 unsigned long speed_offset =
2001 (unsigned long)&memdev_dmi_entry->speed -
2002 (unsigned long)&memdev_dmi_entry->type;
2003
2004 /* Check that a DIMM is present */
2005 if (memdev_dmi_entry->size == 0)
2006 return;
2007
2008 /*
2009 * Pick the configured speed if it's available, otherwise
2010 * pick the DIMM speed, or we don't have a speed.
2011 */
2012 if (memdev_dmi_entry->length > conf_mem_clk_speed_offset) {
2013 dmi_mem_clk_speed =
2014 memdev_dmi_entry->conf_mem_clk_speed;
2015 } else if (memdev_dmi_entry->length > speed_offset) {
2016 dmi_mem_clk_speed = memdev_dmi_entry->speed;
2017 } else {
2018 *dclk_freq = -1;
2019 return;
2020 }
2021
2022 if (*dclk_freq == 0) {
2023 /* First pass, speed was 0 */
2024 if (dmi_mem_clk_speed > 0) {
2025 /* Set speed if a valid speed is read */
2026 *dclk_freq = dmi_mem_clk_speed;
2027 } else {
2028 /* Otherwise we don't have a valid speed */
2029 *dclk_freq = -1;
2030 }
2031 } else if (*dclk_freq > 0 &&
2032 *dclk_freq != dmi_mem_clk_speed) {
2033 /*
2034 * If we have a speed, check that all DIMMS are the same
2035 * speed, otherwise set the speed as invalid.
2036 */
2037 *dclk_freq = -1;
2038 }
2039 }
2040}
2041
2042/*
2043 * The default DCLK frequency is used as a fallback if we
2044 * fail to find anything reliable in the DMI. The value
2045 * is taken straight from the datasheet.
2046 */
2047#define DEFAULT_DCLK_FREQ 800
2048
2049static int get_dclk_freq(void)
2050{
2051 int dclk_freq = 0;
2052
2053 dmi_walk(decode_dclk, (void *)&dclk_freq);
2054
2055 if (dclk_freq < 1)
2056 return DEFAULT_DCLK_FREQ;
2057
2058 return dclk_freq;
2059}
2060
e8b6a127
SG
2061/*
2062 * set_sdram_scrub_rate This routine sets byte/sec bandwidth scrub rate
2063 * to hardware according to SCRUBINTERVAL formula
2064 * found in datasheet.
2065 */
2066static int set_sdram_scrub_rate(struct mem_ctl_info *mci, u32 new_bw)
2067{
2068 struct i7core_pvt *pvt = mci->pvt_info;
2069 struct pci_dev *pdev;
e8b6a127
SG
2070 u32 dw_scrub;
2071 u32 dw_ssr;
2072
2073 /* Get data from the MC register, function 2 */
2074 pdev = pvt->pci_mcr[2];
2075 if (!pdev)
2076 return -ENODEV;
2077
2078 pci_read_config_dword(pdev, MC_SCRUB_CONTROL, &dw_scrub);
2079
2080 if (new_bw == 0) {
2081 /* Prepare to disable petrol scrub */
2082 dw_scrub &= ~STARTSCRUB;
2083 /* Stop the patrol scrub engine */
535e9c78
NC
2084 write_and_test(pdev, MC_SCRUB_CONTROL,
2085 dw_scrub & ~SCRUBINTERVAL_MASK);
e8b6a127
SG
2086
2087 /* Get current status of scrub rate and set bit to disable */
2088 pci_read_config_dword(pdev, MC_SSRCONTROL, &dw_ssr);
2089 dw_ssr &= ~SSR_MODE_MASK;
2090 dw_ssr |= SSR_MODE_DISABLE;
2091 } else {
535e9c78
NC
2092 const int cache_line_size = 64;
2093 const u32 freq_dclk_mhz = pvt->dclk_freq;
2094 unsigned long long scrub_interval;
e8b6a127
SG
2095 /*
2096 * Translate the desired scrub rate to a register value and
535e9c78 2097 * program the corresponding register value.
e8b6a127 2098 */
535e9c78 2099 scrub_interval = (unsigned long long)freq_dclk_mhz *
4fad8098
SD
2100 cache_line_size * 1000000;
2101 do_div(scrub_interval, new_bw);
535e9c78
NC
2102
2103 if (!scrub_interval || scrub_interval > SCRUBINTERVAL_MASK)
2104 return -EINVAL;
2105
2106 dw_scrub = SCRUBINTERVAL_MASK & scrub_interval;
e8b6a127
SG
2107
2108 /* Start the patrol scrub engine */
2109 pci_write_config_dword(pdev, MC_SCRUB_CONTROL,
2110 STARTSCRUB | dw_scrub);
2111
2112 /* Get current status of scrub rate and set bit to enable */
2113 pci_read_config_dword(pdev, MC_SSRCONTROL, &dw_ssr);
2114 dw_ssr &= ~SSR_MODE_MASK;
2115 dw_ssr |= SSR_MODE_ENABLE;
2116 }
2117 /* Disable or enable scrubbing */
2118 pci_write_config_dword(pdev, MC_SSRCONTROL, dw_ssr);
2119
2120 return new_bw;
2121}
2122
2123/*
2124 * get_sdram_scrub_rate This routine convert current scrub rate value
2125 * into byte/sec bandwidth accourding to
2126 * SCRUBINTERVAL formula found in datasheet.
2127 */
2128static int get_sdram_scrub_rate(struct mem_ctl_info *mci)
2129{
2130 struct i7core_pvt *pvt = mci->pvt_info;
2131 struct pci_dev *pdev;
2132 const u32 cache_line_size = 64;
535e9c78
NC
2133 const u32 freq_dclk_mhz = pvt->dclk_freq;
2134 unsigned long long scrub_rate;
e8b6a127
SG
2135 u32 scrubval;
2136
2137 /* Get data from the MC register, function 2 */
2138 pdev = pvt->pci_mcr[2];
2139 if (!pdev)
2140 return -ENODEV;
2141
2142 /* Get current scrub control data */
2143 pci_read_config_dword(pdev, MC_SCRUB_CONTROL, &scrubval);
2144
2145 /* Mask highest 8-bits to 0 */
535e9c78 2146 scrubval &= SCRUBINTERVAL_MASK;
e8b6a127
SG
2147 if (!scrubval)
2148 return 0;
2149
2150 /* Calculate scrub rate value into byte/sec bandwidth */
535e9c78 2151 scrub_rate = (unsigned long long)freq_dclk_mhz *
4fad8098
SD
2152 1000000 * cache_line_size;
2153 do_div(scrub_rate, scrubval);
535e9c78 2154 return (int)scrub_rate;
e8b6a127
SG
2155}
2156
2157static void enable_sdram_scrub_setting(struct mem_ctl_info *mci)
2158{
2159 struct i7core_pvt *pvt = mci->pvt_info;
2160 u32 pci_lock;
2161
2162 /* Unlock writes to pci registers */
2163 pci_read_config_dword(pvt->pci_noncore, MC_CFG_CONTROL, &pci_lock);
2164 pci_lock &= ~0x3;
2165 pci_write_config_dword(pvt->pci_noncore, MC_CFG_CONTROL,
2166 pci_lock | MC_CFG_UNLOCK);
2167
2168 mci->set_sdram_scrub_rate = set_sdram_scrub_rate;
2169 mci->get_sdram_scrub_rate = get_sdram_scrub_rate;
2170}
2171
2172static void disable_sdram_scrub_setting(struct mem_ctl_info *mci)
2173{
2174 struct i7core_pvt *pvt = mci->pvt_info;
2175 u32 pci_lock;
2176
2177 /* Lock writes to pci registers */
2178 pci_read_config_dword(pvt->pci_noncore, MC_CFG_CONTROL, &pci_lock);
2179 pci_lock &= ~0x3;
2180 pci_write_config_dword(pvt->pci_noncore, MC_CFG_CONTROL,
2181 pci_lock | MC_CFG_LOCK);
2182}
2183
a3aa0a4a
HS
2184static void i7core_pci_ctl_create(struct i7core_pvt *pvt)
2185{
2186 pvt->i7core_pci = edac_pci_create_generic_ctl(
2187 &pvt->i7core_dev->pdev[0]->dev,
2188 EDAC_MOD_STR);
2189 if (unlikely(!pvt->i7core_pci))
f9902f24
MCC
2190 i7core_printk(KERN_WARNING,
2191 "Unable to setup PCI error report via EDAC\n");
a3aa0a4a
HS
2192}
2193
2194static void i7core_pci_ctl_release(struct i7core_pvt *pvt)
2195{
2196 if (likely(pvt->i7core_pci))
2197 edac_pci_release_generic_ctl(pvt->i7core_pci);
2198 else
2199 i7core_printk(KERN_ERR,
2200 "Couldn't find mem_ctl_info for socket %d\n",
2201 pvt->i7core_dev->socket);
2202 pvt->i7core_pci = NULL;
2203}
2204
1c6edbbe
HS
2205static void i7core_unregister_mci(struct i7core_dev *i7core_dev)
2206{
2207 struct mem_ctl_info *mci = i7core_dev->mci;
2208 struct i7core_pvt *pvt;
2209
2210 if (unlikely(!mci || !mci->pvt_info)) {
2211 debugf0("MC: " __FILE__ ": %s(): dev = %p\n",
2212 __func__, &i7core_dev->pdev[0]->dev);
2213
2214 i7core_printk(KERN_ERR, "Couldn't find mci handler\n");
2215 return;
2216 }
2217
2218 pvt = mci->pvt_info;
2219
2220 debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n",
2221 __func__, mci, &i7core_dev->pdev[0]->dev);
2222
e8b6a127 2223 /* Disable scrubrate setting */
27100db0
MCC
2224 if (pvt->enable_scrub)
2225 disable_sdram_scrub_setting(mci);
e8b6a127 2226
3653ada5 2227 mce_unregister_decode_chain(&i7_mce_dec);
1c6edbbe
HS
2228
2229 /* Disable EDAC polling */
2230 i7core_pci_ctl_release(pvt);
2231
2232 /* Remove MC sysfs nodes */
2233 edac_mc_del_mc(mci->dev);
2234
2235 debugf1("%s: free mci struct\n", mci->ctl_name);
2236 kfree(mci->ctl_name);
2237 edac_mc_free(mci);
2238 i7core_dev->mci = NULL;
2239}
2240
aace4283 2241static int i7core_register_mci(struct i7core_dev *i7core_dev)
a0c36a1f
MCC
2242{
2243 struct mem_ctl_info *mci;
2244 struct i7core_pvt *pvt;
aace4283
HS
2245 int rc, channels, csrows;
2246
2247 /* Check the number of active and not disabled channels */
2248 rc = i7core_get_active_channels(i7core_dev->socket, &channels, &csrows);
2249 if (unlikely(rc < 0))
2250 return rc;
a0c36a1f 2251
a0c36a1f 2252 /* allocate a new MC control structure */
aace4283 2253 mci = edac_mc_alloc(sizeof(*pvt), csrows, channels, i7core_dev->socket);
f4742949
MCC
2254 if (unlikely(!mci))
2255 return -ENOMEM;
a0c36a1f 2256
3cfd0146
MCC
2257 debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n",
2258 __func__, mci, &i7core_dev->pdev[0]->dev);
a0c36a1f 2259
a0c36a1f 2260 pvt = mci->pvt_info;
ef708b53 2261 memset(pvt, 0, sizeof(*pvt));
67166af4 2262
6d37d240
MCC
2263 /* Associates i7core_dev and mci for future usage */
2264 pvt->i7core_dev = i7core_dev;
2265 i7core_dev->mci = mci;
2266
41fcb7fe
MCC
2267 /*
2268 * FIXME: how to handle RDDR3 at MCI level? It is possible to have
2269 * Mixed RDDR3/UDDR3 with Nehalem, provided that they are on different
2270 * memory channels
2271 */
2272 mci->mtype_cap = MEM_FLAG_DDR3;
a0c36a1f
MCC
2273 mci->edac_ctl_cap = EDAC_FLAG_NONE;
2274 mci->edac_cap = EDAC_FLAG_NONE;
2275 mci->mod_name = "i7core_edac.c";
2276 mci->mod_ver = I7CORE_REVISION;
f4742949
MCC
2277 mci->ctl_name = kasprintf(GFP_KERNEL, "i7 core #%d",
2278 i7core_dev->socket);
2279 mci->dev_name = pci_name(i7core_dev->pdev[0]);
a0c36a1f 2280 mci->ctl_page_to_phys = NULL;
1288c18f 2281
ef708b53 2282 /* Store pci devices at mci for faster access */
f4742949 2283 rc = mci_bind_devs(mci, i7core_dev);
41fcb7fe 2284 if (unlikely(rc < 0))
628c5ddf 2285 goto fail0;
ef708b53 2286
5939813b
HS
2287 if (pvt->is_registered)
2288 mci->mc_driver_sysfs_attributes = i7core_sysfs_rdimm_attrs;
2289 else
2290 mci->mc_driver_sysfs_attributes = i7core_sysfs_udimm_attrs;
2291
ef708b53 2292 /* Get dimm basic config */
2e5185f7 2293 get_dimm_config(mci);
5939813b
HS
2294 /* record ptr to the generic device */
2295 mci->dev = &i7core_dev->pdev[0]->dev;
2296 /* Set the function pointer to an actual operation function */
2297 mci->edac_check = i7core_check_error;
ef708b53 2298
e8b6a127 2299 /* Enable scrubrate setting */
27100db0
MCC
2300 if (pvt->enable_scrub)
2301 enable_sdram_scrub_setting(mci);
e8b6a127 2302
a0c36a1f 2303 /* add this new MC control structure to EDAC's list of MCs */
b7c76151 2304 if (unlikely(edac_mc_add_mc(mci))) {
a0c36a1f
MCC
2305 debugf0("MC: " __FILE__
2306 ": %s(): failed edac_mc_add_mc()\n", __func__);
2307 /* FIXME: perhaps some code should go here that disables error
2308 * reporting if we just enabled it
2309 */
b7c76151
MCC
2310
2311 rc = -EINVAL;
628c5ddf 2312 goto fail0;
a0c36a1f
MCC
2313 }
2314
194a40fe 2315 /* Default error mask is any memory */
ef708b53 2316 pvt->inject.channel = 0;
194a40fe
MCC
2317 pvt->inject.dimm = -1;
2318 pvt->inject.rank = -1;
2319 pvt->inject.bank = -1;
2320 pvt->inject.page = -1;
2321 pvt->inject.col = -1;
2322
a3aa0a4a
HS
2323 /* allocating generic PCI control info */
2324 i7core_pci_ctl_create(pvt);
2325
535e9c78
NC
2326 /* DCLK for scrub rate setting */
2327 pvt->dclk_freq = get_dclk_freq();
2328
3653ada5 2329 mce_register_decode_chain(&i7_mce_dec);
f4742949 2330
628c5ddf
HS
2331 return 0;
2332
628c5ddf
HS
2333fail0:
2334 kfree(mci->ctl_name);
2335 edac_mc_free(mci);
1c6edbbe 2336 i7core_dev->mci = NULL;
f4742949
MCC
2337 return rc;
2338}
2339
2340/*
2341 * i7core_probe Probe for ONE instance of device to see if it is
2342 * present.
2343 * return:
2344 * 0 for FOUND a device
2345 * < 0 for error code
2346 */
2d95d815 2347
f4742949
MCC
2348static int __devinit i7core_probe(struct pci_dev *pdev,
2349 const struct pci_device_id *id)
2350{
40557591 2351 int rc, count = 0;
f4742949
MCC
2352 struct i7core_dev *i7core_dev;
2353
2d95d815
MCC
2354 /* get the pci devices we want to reserve for our use */
2355 mutex_lock(&i7core_edac_lock);
2356
f4742949 2357 /*
d4c27795 2358 * All memory controllers are allocated at the first pass.
f4742949 2359 */
2d95d815
MCC
2360 if (unlikely(probed >= 1)) {
2361 mutex_unlock(&i7core_edac_lock);
76a7bd81 2362 return -ENODEV;
2d95d815
MCC
2363 }
2364 probed++;
de06eeef 2365
64c10f6e 2366 rc = i7core_get_all_devices();
f4742949
MCC
2367 if (unlikely(rc < 0))
2368 goto fail0;
2369
2370 list_for_each_entry(i7core_dev, &i7core_edac_list, list) {
40557591 2371 count++;
aace4283 2372 rc = i7core_register_mci(i7core_dev);
d4c27795
MCC
2373 if (unlikely(rc < 0))
2374 goto fail1;
d5381642
MCC
2375 }
2376
40557591
MCC
2377 /*
2378 * Nehalem-EX uses a different memory controller. However, as the
2379 * memory controller is not visible on some Nehalem/Nehalem-EP, we
2380 * need to indirectly probe via a X58 PCI device. The same devices
2381 * are found on (some) Nehalem-EX. So, on those machines, the
2382 * probe routine needs to return -ENODEV, as the actual Memory
2383 * Controller registers won't be detected.
2384 */
2385 if (!count) {
2386 rc = -ENODEV;
2387 goto fail1;
2388 }
2389
2390 i7core_printk(KERN_INFO,
2391 "Driver loaded, %d memory controller(s) found.\n",
2392 count);
8f331907 2393
66607706 2394 mutex_unlock(&i7core_edac_lock);
a0c36a1f
MCC
2395 return 0;
2396
66607706 2397fail1:
88ef5ea9
MCC
2398 list_for_each_entry(i7core_dev, &i7core_edac_list, list)
2399 i7core_unregister_mci(i7core_dev);
2400
13d6e9b6 2401 i7core_put_all_devices();
66607706
MCC
2402fail0:
2403 mutex_unlock(&i7core_edac_lock);
b7c76151 2404 return rc;
a0c36a1f
MCC
2405}
2406
2407/*
2408 * i7core_remove destructor for one instance of device
2409 *
2410 */
2411static void __devexit i7core_remove(struct pci_dev *pdev)
2412{
64c10f6e 2413 struct i7core_dev *i7core_dev;
a0c36a1f
MCC
2414
2415 debugf0(__FILE__ ": %s()\n", __func__);
2416
22e6bcbd
MCC
2417 /*
2418 * we have a trouble here: pdev value for removal will be wrong, since
2419 * it will point to the X58 register used to detect that the machine
2420 * is a Nehalem or upper design. However, due to the way several PCI
2421 * devices are grouped together to provide MC functionality, we need
2422 * to use a different method for releasing the devices
2423 */
87d1d272 2424
66607706 2425 mutex_lock(&i7core_edac_lock);
71fe0170
HS
2426
2427 if (unlikely(!probed)) {
2428 mutex_unlock(&i7core_edac_lock);
2429 return;
2430 }
2431
88ef5ea9
MCC
2432 list_for_each_entry(i7core_dev, &i7core_edac_list, list)
2433 i7core_unregister_mci(i7core_dev);
64c10f6e
HS
2434
2435 /* Release PCI resources */
2436 i7core_put_all_devices();
2437
2d95d815
MCC
2438 probed--;
2439
66607706 2440 mutex_unlock(&i7core_edac_lock);
a0c36a1f
MCC
2441}
2442
a0c36a1f
MCC
2443MODULE_DEVICE_TABLE(pci, i7core_pci_tbl);
2444
2445/*
2446 * i7core_driver pci_driver structure for this module
2447 *
2448 */
2449static struct pci_driver i7core_driver = {
2450 .name = "i7core_edac",
2451 .probe = i7core_probe,
2452 .remove = __devexit_p(i7core_remove),
2453 .id_table = i7core_pci_tbl,
2454};
2455
2456/*
2457 * i7core_init Module entry function
2458 * Try to initialize this module for its devices
2459 */
2460static int __init i7core_init(void)
2461{
2462 int pci_rc;
2463
2464 debugf2("MC: " __FILE__ ": %s()\n", __func__);
2465
2466 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
2467 opstate_init();
2468
54a08ab1
MCC
2469 if (use_pci_fixup)
2470 i7core_xeon_pci_fixup(pci_dev_table);
bc2d7245 2471
a0c36a1f
MCC
2472 pci_rc = pci_register_driver(&i7core_driver);
2473
3ef288a9
MCC
2474 if (pci_rc >= 0)
2475 return 0;
2476
2477 i7core_printk(KERN_ERR, "Failed to register device with error %d.\n",
2478 pci_rc);
2479
2480 return pci_rc;
a0c36a1f
MCC
2481}
2482
2483/*
2484 * i7core_exit() Module exit function
2485 * Unregister the driver
2486 */
2487static void __exit i7core_exit(void)
2488{
2489 debugf2("MC: " __FILE__ ": %s()\n", __func__);
2490 pci_unregister_driver(&i7core_driver);
2491}
2492
2493module_init(i7core_init);
2494module_exit(i7core_exit);
2495
2496MODULE_LICENSE("GPL");
2497MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
2498MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
2499MODULE_DESCRIPTION("MC Driver for Intel i7 Core memory controllers - "
2500 I7CORE_REVISION);
2501
2502module_param(edac_op_state, int, 0444);
2503MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");