i7core_edac: Fix order of lines in i7core_register_mci
[GitHub/LineageOS/android_kernel_samsung_universal7580.git] / drivers / edac / i7core_edac.c
CommitLineData
52707f91
MCC
1/* Intel i7 core/Nehalem Memory Controller kernel module
2 *
3 * This driver supports yhe memory controllers found on the Intel
4 * processor families i7core, i7core 7xx/8xx, i5core, Xeon 35xx,
5 * Xeon 55xx and Xeon 56xx also known as Nehalem, Nehalem-EP, Lynnfield
6 * and Westmere-EP.
a0c36a1f
MCC
7 *
8 * This file may be distributed under the terms of the
9 * GNU General Public License version 2 only.
10 *
52707f91 11 * Copyright (c) 2009-2010 by:
a0c36a1f
MCC
12 * Mauro Carvalho Chehab <mchehab@redhat.com>
13 *
14 * Red Hat Inc. http://www.redhat.com
15 *
16 * Forked and adapted from the i5400_edac driver
17 *
18 * Based on the following public Intel datasheets:
19 * Intel Core i7 Processor Extreme Edition and Intel Core i7 Processor
20 * Datasheet, Volume 2:
21 * http://download.intel.com/design/processor/datashts/320835.pdf
22 * Intel Xeon Processor 5500 Series Datasheet Volume 2
23 * http://www.intel.com/Assets/PDF/datasheet/321322.pdf
24 * also available at:
25 * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
26 */
27
a0c36a1f
MCC
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/pci.h>
31#include <linux/pci_ids.h>
32#include <linux/slab.h>
3b918c12 33#include <linux/delay.h>
a0c36a1f
MCC
34#include <linux/edac.h>
35#include <linux/mmzone.h>
d5381642 36#include <linux/edac_mce.h>
f4742949 37#include <linux/smp.h>
14d2c083 38#include <asm/processor.h>
a0c36a1f
MCC
39
40#include "edac_core.h"
41
18c29002
MCC
42/* Static vars */
43static LIST_HEAD(i7core_edac_list);
44static DEFINE_MUTEX(i7core_edac_lock);
45static int probed;
46
54a08ab1
MCC
47static int use_pci_fixup;
48module_param(use_pci_fixup, int, 0444);
49MODULE_PARM_DESC(use_pci_fixup, "Enable PCI fixup to seek for hidden devices");
f4742949
MCC
50/*
51 * This is used for Nehalem-EP and Nehalem-EX devices, where the non-core
52 * registers start at bus 255, and are not reported by BIOS.
53 * We currently find devices with only 2 sockets. In order to support more QPI
54 * Quick Path Interconnect, just increment this number.
55 */
56#define MAX_SOCKET_BUSES 2
57
58
a0c36a1f
MCC
59/*
60 * Alter this version for the module when modifications are made
61 */
62#define I7CORE_REVISION " Ver: 1.0.0 " __DATE__
63#define EDAC_MOD_STR "i7core_edac"
64
a0c36a1f
MCC
65/*
66 * Debug macros
67 */
68#define i7core_printk(level, fmt, arg...) \
69 edac_printk(level, "i7core", fmt, ##arg)
70
71#define i7core_mc_printk(mci, level, fmt, arg...) \
72 edac_mc_chipset_printk(mci, level, "i7core", fmt, ##arg)
73
74/*
75 * i7core Memory Controller Registers
76 */
77
e9bd2e73
MCC
78 /* OFFSETS for Device 0 Function 0 */
79
80#define MC_CFG_CONTROL 0x90
81
a0c36a1f
MCC
82 /* OFFSETS for Device 3 Function 0 */
83
84#define MC_CONTROL 0x48
85#define MC_STATUS 0x4c
86#define MC_MAX_DOD 0x64
87
442305b1
MCC
88/*
89 * OFFSETS for Device 3 Function 4, as inicated on Xeon 5500 datasheet:
90 * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
91 */
92
93#define MC_TEST_ERR_RCV1 0x60
94 #define DIMM2_COR_ERR(r) ((r) & 0x7fff)
95
96#define MC_TEST_ERR_RCV0 0x64
97 #define DIMM1_COR_ERR(r) (((r) >> 16) & 0x7fff)
98 #define DIMM0_COR_ERR(r) ((r) & 0x7fff)
99
b4e8f0b6
MCC
100/* OFFSETS for Device 3 Function 2, as inicated on Xeon 5500 datasheet */
101#define MC_COR_ECC_CNT_0 0x80
102#define MC_COR_ECC_CNT_1 0x84
103#define MC_COR_ECC_CNT_2 0x88
104#define MC_COR_ECC_CNT_3 0x8c
105#define MC_COR_ECC_CNT_4 0x90
106#define MC_COR_ECC_CNT_5 0x94
107
108#define DIMM_TOP_COR_ERR(r) (((r) >> 16) & 0x7fff)
109#define DIMM_BOT_COR_ERR(r) ((r) & 0x7fff)
110
111
a0c36a1f
MCC
112 /* OFFSETS for Devices 4,5 and 6 Function 0 */
113
0b2b7b7e
MCC
114#define MC_CHANNEL_DIMM_INIT_PARAMS 0x58
115 #define THREE_DIMMS_PRESENT (1 << 24)
116 #define SINGLE_QUAD_RANK_PRESENT (1 << 23)
117 #define QUAD_RANK_PRESENT (1 << 22)
118 #define REGISTERED_DIMM (1 << 15)
119
f122a892
MCC
120#define MC_CHANNEL_MAPPER 0x60
121 #define RDLCH(r, ch) ((((r) >> (3 + (ch * 6))) & 0x07) - 1)
122 #define WRLCH(r, ch) ((((r) >> (ch * 6)) & 0x07) - 1)
123
0b2b7b7e
MCC
124#define MC_CHANNEL_RANK_PRESENT 0x7c
125 #define RANK_PRESENT_MASK 0xffff
126
a0c36a1f 127#define MC_CHANNEL_ADDR_MATCH 0xf0
194a40fe
MCC
128#define MC_CHANNEL_ERROR_MASK 0xf8
129#define MC_CHANNEL_ERROR_INJECT 0xfc
130 #define INJECT_ADDR_PARITY 0x10
131 #define INJECT_ECC 0x08
132 #define MASK_CACHELINE 0x06
133 #define MASK_FULL_CACHELINE 0x06
134 #define MASK_MSB32_CACHELINE 0x04
135 #define MASK_LSB32_CACHELINE 0x02
136 #define NO_MASK_CACHELINE 0x00
137 #define REPEAT_EN 0x01
a0c36a1f 138
0b2b7b7e 139 /* OFFSETS for Devices 4,5 and 6 Function 1 */
b990538a 140
0b2b7b7e
MCC
141#define MC_DOD_CH_DIMM0 0x48
142#define MC_DOD_CH_DIMM1 0x4c
143#define MC_DOD_CH_DIMM2 0x50
144 #define RANKOFFSET_MASK ((1 << 12) | (1 << 11) | (1 << 10))
145 #define RANKOFFSET(x) ((x & RANKOFFSET_MASK) >> 10)
146 #define DIMM_PRESENT_MASK (1 << 9)
147 #define DIMM_PRESENT(x) (((x) & DIMM_PRESENT_MASK) >> 9)
854d3349
MCC
148 #define MC_DOD_NUMBANK_MASK ((1 << 8) | (1 << 7))
149 #define MC_DOD_NUMBANK(x) (((x) & MC_DOD_NUMBANK_MASK) >> 7)
150 #define MC_DOD_NUMRANK_MASK ((1 << 6) | (1 << 5))
151 #define MC_DOD_NUMRANK(x) (((x) & MC_DOD_NUMRANK_MASK) >> 5)
41fcb7fe 152 #define MC_DOD_NUMROW_MASK ((1 << 4) | (1 << 3) | (1 << 2))
5566cb7c 153 #define MC_DOD_NUMROW(x) (((x) & MC_DOD_NUMROW_MASK) >> 2)
854d3349
MCC
154 #define MC_DOD_NUMCOL_MASK 3
155 #define MC_DOD_NUMCOL(x) ((x) & MC_DOD_NUMCOL_MASK)
0b2b7b7e 156
f122a892
MCC
157#define MC_RANK_PRESENT 0x7c
158
0b2b7b7e
MCC
159#define MC_SAG_CH_0 0x80
160#define MC_SAG_CH_1 0x84
161#define MC_SAG_CH_2 0x88
162#define MC_SAG_CH_3 0x8c
163#define MC_SAG_CH_4 0x90
164#define MC_SAG_CH_5 0x94
165#define MC_SAG_CH_6 0x98
166#define MC_SAG_CH_7 0x9c
167
168#define MC_RIR_LIMIT_CH_0 0x40
169#define MC_RIR_LIMIT_CH_1 0x44
170#define MC_RIR_LIMIT_CH_2 0x48
171#define MC_RIR_LIMIT_CH_3 0x4C
172#define MC_RIR_LIMIT_CH_4 0x50
173#define MC_RIR_LIMIT_CH_5 0x54
174#define MC_RIR_LIMIT_CH_6 0x58
175#define MC_RIR_LIMIT_CH_7 0x5C
176#define MC_RIR_LIMIT_MASK ((1 << 10) - 1)
177
178#define MC_RIR_WAY_CH 0x80
179 #define MC_RIR_WAY_OFFSET_MASK (((1 << 14) - 1) & ~0x7)
180 #define MC_RIR_WAY_RANK_MASK 0x7
181
a0c36a1f
MCC
182/*
183 * i7core structs
184 */
185
186#define NUM_CHANS 3
442305b1
MCC
187#define MAX_DIMMS 3 /* Max DIMMS per channel */
188#define MAX_MCR_FUNC 4
189#define MAX_CHAN_FUNC 3
a0c36a1f
MCC
190
191struct i7core_info {
192 u32 mc_control;
193 u32 mc_status;
194 u32 max_dod;
f122a892 195 u32 ch_map;
a0c36a1f
MCC
196};
197
194a40fe
MCC
198
199struct i7core_inject {
200 int enable;
201
202 u32 section;
203 u32 type;
204 u32 eccmask;
205
206 /* Error address mask */
207 int channel, dimm, rank, bank, page, col;
208};
209
0b2b7b7e 210struct i7core_channel {
442305b1
MCC
211 u32 ranks;
212 u32 dimms;
0b2b7b7e
MCC
213};
214
8f331907 215struct pci_id_descr {
66607706
MCC
216 int dev;
217 int func;
218 int dev_id;
de06eeef 219 int optional;
8f331907
MCC
220};
221
bd9e19ca 222struct pci_id_table {
1288c18f
MCC
223 const struct pci_id_descr *descr;
224 int n_devs;
bd9e19ca
VM
225};
226
f4742949
MCC
227struct i7core_dev {
228 struct list_head list;
229 u8 socket;
230 struct pci_dev **pdev;
de06eeef 231 int n_devs;
f4742949
MCC
232 struct mem_ctl_info *mci;
233};
234
a0c36a1f 235struct i7core_pvt {
f4742949
MCC
236 struct pci_dev *pci_noncore;
237 struct pci_dev *pci_mcr[MAX_MCR_FUNC + 1];
238 struct pci_dev *pci_ch[NUM_CHANS][MAX_CHAN_FUNC + 1];
239
240 struct i7core_dev *i7core_dev;
67166af4 241
a0c36a1f 242 struct i7core_info info;
194a40fe 243 struct i7core_inject inject;
f4742949 244 struct i7core_channel channel[NUM_CHANS];
67166af4 245
f4742949 246 int channels; /* Number of active channels */
442305b1 247
f4742949
MCC
248 int ce_count_available;
249 int csrow_map[NUM_CHANS][MAX_DIMMS];
b4e8f0b6
MCC
250
251 /* ECC corrected errors counts per udimm */
f4742949
MCC
252 unsigned long udimm_ce_count[MAX_DIMMS];
253 int udimm_last_ce_count[MAX_DIMMS];
b4e8f0b6 254 /* ECC corrected errors counts per rdimm */
f4742949
MCC
255 unsigned long rdimm_ce_count[NUM_CHANS][MAX_DIMMS];
256 int rdimm_last_ce_count[NUM_CHANS][MAX_DIMMS];
442305b1 257
f4742949 258 unsigned int is_registered;
14d2c083 259
d5381642
MCC
260 /* mcelog glue */
261 struct edac_mce edac_mce;
ca9c90ba
MCC
262
263 /* Fifo double buffers */
d5381642 264 struct mce mce_entry[MCE_LOG_LEN];
ca9c90ba
MCC
265 struct mce mce_outentry[MCE_LOG_LEN];
266
267 /* Fifo in/out counters */
268 unsigned mce_in, mce_out;
269
270 /* Count indicator to show errors not got */
271 unsigned mce_overrun;
939747bd
MCC
272
273 /* Struct to control EDAC polling */
274 struct edac_pci_ctl_info *i7core_pci;
a0c36a1f
MCC
275};
276
8f331907
MCC
277#define PCI_DESCR(device, function, device_id) \
278 .dev = (device), \
279 .func = (function), \
280 .dev_id = (device_id)
281
1288c18f 282static const struct pci_id_descr pci_dev_descr_i7core_nehalem[] = {
8f331907
MCC
283 /* Memory controller */
284 { PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_I7_MCR) },
285 { PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_I7_MC_TAD) },
de06eeef
MCC
286 /* Exists only for RDIMM */
287 { PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_I7_MC_RAS), .optional = 1 },
8f331907
MCC
288 { PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_I7_MC_TEST) },
289
290 /* Channel 0 */
291 { PCI_DESCR(4, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH0_CTRL) },
292 { PCI_DESCR(4, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH0_ADDR) },
293 { PCI_DESCR(4, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH0_RANK) },
294 { PCI_DESCR(4, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH0_TC) },
295
296 /* Channel 1 */
297 { PCI_DESCR(5, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH1_CTRL) },
298 { PCI_DESCR(5, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH1_ADDR) },
299 { PCI_DESCR(5, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH1_RANK) },
300 { PCI_DESCR(5, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH1_TC) },
301
302 /* Channel 2 */
303 { PCI_DESCR(6, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH2_CTRL) },
304 { PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH2_ADDR) },
305 { PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH2_RANK) },
306 { PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH2_TC) },
310cbb72
MCC
307
308 /* Generic Non-core registers */
309 /*
310 * This is the PCI device on i7core and on Xeon 35xx (8086:2c41)
311 * On Xeon 55xx, however, it has a different id (8086:2c40). So,
312 * the probing code needs to test for the other address in case of
313 * failure of this one
314 */
fd382654 315 { PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_I7_NONCORE) },
310cbb72 316
a0c36a1f 317};
8f331907 318
1288c18f 319static const struct pci_id_descr pci_dev_descr_lynnfield[] = {
52a2e4fc
MCC
320 { PCI_DESCR( 3, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR) },
321 { PCI_DESCR( 3, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD) },
322 { PCI_DESCR( 3, 4, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST) },
323
324 { PCI_DESCR( 4, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL) },
325 { PCI_DESCR( 4, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR) },
326 { PCI_DESCR( 4, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK) },
327 { PCI_DESCR( 4, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC) },
328
508fa179
MCC
329 { PCI_DESCR( 5, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL) },
330 { PCI_DESCR( 5, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR) },
331 { PCI_DESCR( 5, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK) },
332 { PCI_DESCR( 5, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC) },
52a2e4fc 333
f05da2f7
MCC
334 /*
335 * This is the PCI device has an alternate address on some
336 * processors like Core i7 860
337 */
52a2e4fc
MCC
338 { PCI_DESCR( 0, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE) },
339};
340
1288c18f 341static const struct pci_id_descr pci_dev_descr_i7core_westmere[] = {
bd9e19ca
VM
342 /* Memory controller */
343 { PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR_REV2) },
344 { PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD_REV2) },
345 /* Exists only for RDIMM */
346 { PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_RAS_REV2), .optional = 1 },
347 { PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST_REV2) },
348
349 /* Channel 0 */
350 { PCI_DESCR(4, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL_REV2) },
351 { PCI_DESCR(4, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR_REV2) },
352 { PCI_DESCR(4, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK_REV2) },
353 { PCI_DESCR(4, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC_REV2) },
354
355 /* Channel 1 */
356 { PCI_DESCR(5, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL_REV2) },
357 { PCI_DESCR(5, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR_REV2) },
358 { PCI_DESCR(5, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK_REV2) },
359 { PCI_DESCR(5, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC_REV2) },
360
361 /* Channel 2 */
362 { PCI_DESCR(6, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_CTRL_REV2) },
363 { PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_ADDR_REV2) },
364 { PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_RANK_REV2) },
365 { PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_TC_REV2) },
366
367 /* Generic Non-core registers */
368 { PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_REV2) },
369
370};
371
1288c18f
MCC
372#define PCI_ID_TABLE_ENTRY(A) { .descr=A, .n_devs = ARRAY_SIZE(A) }
373static const struct pci_id_table pci_dev_table[] = {
bd9e19ca
VM
374 PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_nehalem),
375 PCI_ID_TABLE_ENTRY(pci_dev_descr_lynnfield),
376 PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_westmere),
377};
378
8f331907
MCC
379/*
380 * pci_device_id table for which devices we are looking for
8f331907
MCC
381 */
382static const struct pci_device_id i7core_pci_tbl[] __devinitdata = {
d1fd4fb6 383 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
f05da2f7 384 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
8f331907
MCC
385 {0,} /* 0 terminated list. */
386};
387
a0c36a1f
MCC
388/****************************************************************************
389 Anciliary status routines
390 ****************************************************************************/
391
392 /* MC_CONTROL bits */
ef708b53
MCC
393#define CH_ACTIVE(pvt, ch) ((pvt)->info.mc_control & (1 << (8 + ch)))
394#define ECCx8(pvt) ((pvt)->info.mc_control & (1 << 1))
a0c36a1f
MCC
395
396 /* MC_STATUS bits */
61053fde 397#define ECC_ENABLED(pvt) ((pvt)->info.mc_status & (1 << 4))
ef708b53 398#define CH_DISABLED(pvt, ch) ((pvt)->info.mc_status & (1 << ch))
a0c36a1f
MCC
399
400 /* MC_MAX_DOD read functions */
854d3349 401static inline int numdimms(u32 dimms)
a0c36a1f 402{
854d3349 403 return (dimms & 0x3) + 1;
a0c36a1f
MCC
404}
405
854d3349 406static inline int numrank(u32 rank)
a0c36a1f
MCC
407{
408 static int ranks[4] = { 1, 2, 4, -EINVAL };
409
854d3349 410 return ranks[rank & 0x3];
a0c36a1f
MCC
411}
412
854d3349 413static inline int numbank(u32 bank)
a0c36a1f
MCC
414{
415 static int banks[4] = { 4, 8, 16, -EINVAL };
416
854d3349 417 return banks[bank & 0x3];
a0c36a1f
MCC
418}
419
854d3349 420static inline int numrow(u32 row)
a0c36a1f
MCC
421{
422 static int rows[8] = {
423 1 << 12, 1 << 13, 1 << 14, 1 << 15,
424 1 << 16, -EINVAL, -EINVAL, -EINVAL,
425 };
426
854d3349 427 return rows[row & 0x7];
a0c36a1f
MCC
428}
429
854d3349 430static inline int numcol(u32 col)
a0c36a1f
MCC
431{
432 static int cols[8] = {
433 1 << 10, 1 << 11, 1 << 12, -EINVAL,
434 };
854d3349 435 return cols[col & 0x3];
a0c36a1f
MCC
436}
437
f4742949 438static struct i7core_dev *get_i7core_dev(u8 socket)
66607706
MCC
439{
440 struct i7core_dev *i7core_dev;
441
442 list_for_each_entry(i7core_dev, &i7core_edac_list, list) {
443 if (i7core_dev->socket == socket)
444 return i7core_dev;
445 }
446
447 return NULL;
448}
449
848b2f7e
HS
450static struct i7core_dev *alloc_i7core_dev(u8 socket,
451 const struct pci_id_table *table)
452{
453 struct i7core_dev *i7core_dev;
454
455 i7core_dev = kzalloc(sizeof(*i7core_dev), GFP_KERNEL);
456 if (!i7core_dev)
457 return NULL;
458
459 i7core_dev->pdev = kzalloc(sizeof(*i7core_dev->pdev) * table->n_devs,
460 GFP_KERNEL);
461 if (!i7core_dev->pdev) {
462 kfree(i7core_dev);
463 return NULL;
464 }
465
466 i7core_dev->socket = socket;
467 i7core_dev->n_devs = table->n_devs;
468 list_add_tail(&i7core_dev->list, &i7core_edac_list);
469
470 return i7core_dev;
471}
472
2aa9be44
HS
473static void free_i7core_dev(struct i7core_dev *i7core_dev)
474{
475 list_del(&i7core_dev->list);
476 kfree(i7core_dev->pdev);
477 kfree(i7core_dev);
478}
479
a0c36a1f
MCC
480/****************************************************************************
481 Memory check routines
482 ****************************************************************************/
67166af4
MCC
483static struct pci_dev *get_pdev_slot_func(u8 socket, unsigned slot,
484 unsigned func)
ef708b53 485{
66607706 486 struct i7core_dev *i7core_dev = get_i7core_dev(socket);
ef708b53 487 int i;
ef708b53 488
66607706
MCC
489 if (!i7core_dev)
490 return NULL;
491
de06eeef 492 for (i = 0; i < i7core_dev->n_devs; i++) {
66607706 493 if (!i7core_dev->pdev[i])
ef708b53
MCC
494 continue;
495
66607706
MCC
496 if (PCI_SLOT(i7core_dev->pdev[i]->devfn) == slot &&
497 PCI_FUNC(i7core_dev->pdev[i]->devfn) == func) {
498 return i7core_dev->pdev[i];
ef708b53
MCC
499 }
500 }
501
eb94fc40
MCC
502 return NULL;
503}
504
ec6df24c
MCC
505/**
506 * i7core_get_active_channels() - gets the number of channels and csrows
507 * @socket: Quick Path Interconnect socket
508 * @channels: Number of channels that will be returned
509 * @csrows: Number of csrows found
510 *
511 * Since EDAC core needs to know in advance the number of available channels
512 * and csrows, in order to allocate memory for csrows/channels, it is needed
513 * to run two similar steps. At the first step, implemented on this function,
514 * it checks the number of csrows/channels present at one socket.
515 * this is used in order to properly allocate the size of mci components.
516 *
517 * It should be noticed that none of the current available datasheets explain
518 * or even mention how csrows are seen by the memory controller. So, we need
519 * to add a fake description for csrows.
520 * So, this driver is attributing one DIMM memory for one csrow.
521 */
1288c18f 522static int i7core_get_active_channels(const u8 socket, unsigned *channels,
67166af4 523 unsigned *csrows)
eb94fc40
MCC
524{
525 struct pci_dev *pdev = NULL;
526 int i, j;
527 u32 status, control;
528
529 *channels = 0;
530 *csrows = 0;
531
67166af4 532 pdev = get_pdev_slot_func(socket, 3, 0);
b7c76151 533 if (!pdev) {
67166af4
MCC
534 i7core_printk(KERN_ERR, "Couldn't find socket %d fn 3.0!!!\n",
535 socket);
ef708b53 536 return -ENODEV;
b7c76151 537 }
ef708b53
MCC
538
539 /* Device 3 function 0 reads */
540 pci_read_config_dword(pdev, MC_STATUS, &status);
541 pci_read_config_dword(pdev, MC_CONTROL, &control);
542
543 for (i = 0; i < NUM_CHANS; i++) {
eb94fc40 544 u32 dimm_dod[3];
ef708b53
MCC
545 /* Check if the channel is active */
546 if (!(control & (1 << (8 + i))))
547 continue;
548
549 /* Check if the channel is disabled */
41fcb7fe 550 if (status & (1 << i))
ef708b53 551 continue;
ef708b53 552
67166af4 553 pdev = get_pdev_slot_func(socket, i + 4, 1);
eb94fc40 554 if (!pdev) {
67166af4
MCC
555 i7core_printk(KERN_ERR, "Couldn't find socket %d "
556 "fn %d.%d!!!\n",
557 socket, i + 4, 1);
eb94fc40
MCC
558 return -ENODEV;
559 }
560 /* Devices 4-6 function 1 */
561 pci_read_config_dword(pdev,
562 MC_DOD_CH_DIMM0, &dimm_dod[0]);
563 pci_read_config_dword(pdev,
564 MC_DOD_CH_DIMM1, &dimm_dod[1]);
565 pci_read_config_dword(pdev,
566 MC_DOD_CH_DIMM2, &dimm_dod[2]);
567
ef708b53 568 (*channels)++;
eb94fc40
MCC
569
570 for (j = 0; j < 3; j++) {
571 if (!DIMM_PRESENT(dimm_dod[j]))
572 continue;
573 (*csrows)++;
574 }
ef708b53
MCC
575 }
576
c77720b9 577 debugf0("Number of active channels on socket %d: %d\n",
67166af4 578 socket, *channels);
1c6fed80 579
ef708b53
MCC
580 return 0;
581}
582
1288c18f 583static int get_dimm_config(const struct mem_ctl_info *mci, int *csrow)
a0c36a1f
MCC
584{
585 struct i7core_pvt *pvt = mci->pvt_info;
1c6fed80 586 struct csrow_info *csr;
854d3349 587 struct pci_dev *pdev;
ba6c5c62 588 int i, j;
5566cb7c 589 unsigned long last_page = 0;
1c6fed80 590 enum edac_type mode;
854d3349 591 enum mem_type mtype;
a0c36a1f 592
854d3349 593 /* Get data from the MC register, function 0 */
f4742949 594 pdev = pvt->pci_mcr[0];
7dd6953c 595 if (!pdev)
8f331907
MCC
596 return -ENODEV;
597
f122a892 598 /* Device 3 function 0 reads */
7dd6953c
MCC
599 pci_read_config_dword(pdev, MC_CONTROL, &pvt->info.mc_control);
600 pci_read_config_dword(pdev, MC_STATUS, &pvt->info.mc_status);
601 pci_read_config_dword(pdev, MC_MAX_DOD, &pvt->info.max_dod);
602 pci_read_config_dword(pdev, MC_CHANNEL_MAPPER, &pvt->info.ch_map);
f122a892 603
17cb7b0c 604 debugf0("QPI %d control=0x%08x status=0x%08x dod=0x%08x map=0x%08x\n",
4af91889 605 pvt->i7core_dev->socket, pvt->info.mc_control, pvt->info.mc_status,
f122a892 606 pvt->info.max_dod, pvt->info.ch_map);
a0c36a1f 607
1c6fed80 608 if (ECC_ENABLED(pvt)) {
41fcb7fe 609 debugf0("ECC enabled with x%d SDCC\n", ECCx8(pvt) ? 8 : 4);
1c6fed80
MCC
610 if (ECCx8(pvt))
611 mode = EDAC_S8ECD8ED;
612 else
613 mode = EDAC_S4ECD4ED;
614 } else {
a0c36a1f 615 debugf0("ECC disabled\n");
1c6fed80
MCC
616 mode = EDAC_NONE;
617 }
a0c36a1f
MCC
618
619 /* FIXME: need to handle the error codes */
17cb7b0c
MCC
620 debugf0("DOD Max limits: DIMMS: %d, %d-ranked, %d-banked "
621 "x%x x 0x%x\n",
854d3349
MCC
622 numdimms(pvt->info.max_dod),
623 numrank(pvt->info.max_dod >> 2),
276b824c 624 numbank(pvt->info.max_dod >> 4),
854d3349
MCC
625 numrow(pvt->info.max_dod >> 6),
626 numcol(pvt->info.max_dod >> 9));
a0c36a1f 627
0b2b7b7e 628 for (i = 0; i < NUM_CHANS; i++) {
854d3349 629 u32 data, dimm_dod[3], value[8];
0b2b7b7e 630
52a2e4fc
MCC
631 if (!pvt->pci_ch[i][0])
632 continue;
633
0b2b7b7e
MCC
634 if (!CH_ACTIVE(pvt, i)) {
635 debugf0("Channel %i is not active\n", i);
636 continue;
637 }
638 if (CH_DISABLED(pvt, i)) {
639 debugf0("Channel %i is disabled\n", i);
640 continue;
641 }
642
f122a892 643 /* Devices 4-6 function 0 */
f4742949 644 pci_read_config_dword(pvt->pci_ch[i][0],
0b2b7b7e
MCC
645 MC_CHANNEL_DIMM_INIT_PARAMS, &data);
646
f4742949 647 pvt->channel[i].ranks = (data & QUAD_RANK_PRESENT) ?
67166af4 648 4 : 2;
0b2b7b7e 649
854d3349
MCC
650 if (data & REGISTERED_DIMM)
651 mtype = MEM_RDDR3;
14d2c083 652 else
854d3349
MCC
653 mtype = MEM_DDR3;
654#if 0
0b2b7b7e
MCC
655 if (data & THREE_DIMMS_PRESENT)
656 pvt->channel[i].dimms = 3;
657 else if (data & SINGLE_QUAD_RANK_PRESENT)
658 pvt->channel[i].dimms = 1;
659 else
660 pvt->channel[i].dimms = 2;
854d3349
MCC
661#endif
662
663 /* Devices 4-6 function 1 */
f4742949 664 pci_read_config_dword(pvt->pci_ch[i][1],
854d3349 665 MC_DOD_CH_DIMM0, &dimm_dod[0]);
f4742949 666 pci_read_config_dword(pvt->pci_ch[i][1],
854d3349 667 MC_DOD_CH_DIMM1, &dimm_dod[1]);
f4742949 668 pci_read_config_dword(pvt->pci_ch[i][1],
854d3349 669 MC_DOD_CH_DIMM2, &dimm_dod[2]);
0b2b7b7e 670
1c6fed80 671 debugf0("Ch%d phy rd%d, wr%d (0x%08x): "
854d3349 672 "%d ranks, %cDIMMs\n",
1c6fed80
MCC
673 i,
674 RDLCH(pvt->info.ch_map, i), WRLCH(pvt->info.ch_map, i),
675 data,
f4742949 676 pvt->channel[i].ranks,
41fcb7fe 677 (data & REGISTERED_DIMM) ? 'R' : 'U');
854d3349
MCC
678
679 for (j = 0; j < 3; j++) {
680 u32 banks, ranks, rows, cols;
5566cb7c 681 u32 size, npages;
854d3349
MCC
682
683 if (!DIMM_PRESENT(dimm_dod[j]))
684 continue;
685
686 banks = numbank(MC_DOD_NUMBANK(dimm_dod[j]));
687 ranks = numrank(MC_DOD_NUMRANK(dimm_dod[j]));
688 rows = numrow(MC_DOD_NUMROW(dimm_dod[j]));
689 cols = numcol(MC_DOD_NUMCOL(dimm_dod[j]));
690
5566cb7c
MCC
691 /* DDR3 has 8 I/O banks */
692 size = (rows * cols * banks * ranks) >> (20 - 3);
693
f4742949 694 pvt->channel[i].dimms++;
854d3349 695
17cb7b0c
MCC
696 debugf0("\tdimm %d %d Mb offset: %x, "
697 "bank: %d, rank: %d, row: %#x, col: %#x\n",
698 j, size,
854d3349
MCC
699 RANKOFFSET(dimm_dod[j]),
700 banks, ranks, rows, cols);
701
e9144601 702 npages = MiB_TO_PAGES(size);
5566cb7c 703
ba6c5c62 704 csr = &mci->csrows[*csrow];
5566cb7c
MCC
705 csr->first_page = last_page + 1;
706 last_page += npages;
707 csr->last_page = last_page;
708 csr->nr_pages = npages;
709
854d3349 710 csr->page_mask = 0;
eb94fc40 711 csr->grain = 8;
ba6c5c62 712 csr->csrow_idx = *csrow;
eb94fc40
MCC
713 csr->nr_channels = 1;
714
715 csr->channels[0].chan_idx = i;
716 csr->channels[0].ce_count = 0;
854d3349 717
f4742949 718 pvt->csrow_map[i][j] = *csrow;
b4e8f0b6 719
854d3349
MCC
720 switch (banks) {
721 case 4:
722 csr->dtype = DEV_X4;
723 break;
724 case 8:
725 csr->dtype = DEV_X8;
726 break;
727 case 16:
728 csr->dtype = DEV_X16;
729 break;
730 default:
731 csr->dtype = DEV_UNKNOWN;
732 }
733
734 csr->edac_mode = mode;
735 csr->mtype = mtype;
736
ba6c5c62 737 (*csrow)++;
854d3349 738 }
1c6fed80 739
854d3349
MCC
740 pci_read_config_dword(pdev, MC_SAG_CH_0, &value[0]);
741 pci_read_config_dword(pdev, MC_SAG_CH_1, &value[1]);
742 pci_read_config_dword(pdev, MC_SAG_CH_2, &value[2]);
743 pci_read_config_dword(pdev, MC_SAG_CH_3, &value[3]);
744 pci_read_config_dword(pdev, MC_SAG_CH_4, &value[4]);
745 pci_read_config_dword(pdev, MC_SAG_CH_5, &value[5]);
746 pci_read_config_dword(pdev, MC_SAG_CH_6, &value[6]);
747 pci_read_config_dword(pdev, MC_SAG_CH_7, &value[7]);
17cb7b0c 748 debugf1("\t[%i] DIVBY3\tREMOVED\tOFFSET\n", i);
854d3349 749 for (j = 0; j < 8; j++)
17cb7b0c 750 debugf1("\t\t%#x\t%#x\t%#x\n",
854d3349
MCC
751 (value[j] >> 27) & 0x1,
752 (value[j] >> 24) & 0x7,
753 (value[j] && ((1 << 24) - 1)));
0b2b7b7e
MCC
754 }
755
a0c36a1f
MCC
756 return 0;
757}
758
194a40fe
MCC
759/****************************************************************************
760 Error insertion routines
761 ****************************************************************************/
762
763/* The i7core has independent error injection features per channel.
764 However, to have a simpler code, we don't allow enabling error injection
765 on more than one channel.
766 Also, since a change at an inject parameter will be applied only at enable,
767 we're disabling error injection on all write calls to the sysfs nodes that
768 controls the error code injection.
769 */
1288c18f 770static int disable_inject(const struct mem_ctl_info *mci)
194a40fe
MCC
771{
772 struct i7core_pvt *pvt = mci->pvt_info;
773
774 pvt->inject.enable = 0;
775
f4742949 776 if (!pvt->pci_ch[pvt->inject.channel][0])
8f331907
MCC
777 return -ENODEV;
778
f4742949 779 pci_write_config_dword(pvt->pci_ch[pvt->inject.channel][0],
4157d9f5 780 MC_CHANNEL_ERROR_INJECT, 0);
8f331907
MCC
781
782 return 0;
194a40fe
MCC
783}
784
785/*
786 * i7core inject inject.section
787 *
788 * accept and store error injection inject.section value
789 * bit 0 - refers to the lower 32-byte half cacheline
790 * bit 1 - refers to the upper 32-byte half cacheline
791 */
792static ssize_t i7core_inject_section_store(struct mem_ctl_info *mci,
793 const char *data, size_t count)
794{
795 struct i7core_pvt *pvt = mci->pvt_info;
796 unsigned long value;
797 int rc;
798
799 if (pvt->inject.enable)
41fcb7fe 800 disable_inject(mci);
194a40fe
MCC
801
802 rc = strict_strtoul(data, 10, &value);
803 if ((rc < 0) || (value > 3))
2068def5 804 return -EIO;
194a40fe
MCC
805
806 pvt->inject.section = (u32) value;
807 return count;
808}
809
810static ssize_t i7core_inject_section_show(struct mem_ctl_info *mci,
811 char *data)
812{
813 struct i7core_pvt *pvt = mci->pvt_info;
814 return sprintf(data, "0x%08x\n", pvt->inject.section);
815}
816
817/*
818 * i7core inject.type
819 *
820 * accept and store error injection inject.section value
821 * bit 0 - repeat enable - Enable error repetition
822 * bit 1 - inject ECC error
823 * bit 2 - inject parity error
824 */
825static ssize_t i7core_inject_type_store(struct mem_ctl_info *mci,
826 const char *data, size_t count)
827{
828 struct i7core_pvt *pvt = mci->pvt_info;
829 unsigned long value;
830 int rc;
831
832 if (pvt->inject.enable)
41fcb7fe 833 disable_inject(mci);
194a40fe
MCC
834
835 rc = strict_strtoul(data, 10, &value);
836 if ((rc < 0) || (value > 7))
2068def5 837 return -EIO;
194a40fe
MCC
838
839 pvt->inject.type = (u32) value;
840 return count;
841}
842
843static ssize_t i7core_inject_type_show(struct mem_ctl_info *mci,
844 char *data)
845{
846 struct i7core_pvt *pvt = mci->pvt_info;
847 return sprintf(data, "0x%08x\n", pvt->inject.type);
848}
849
850/*
851 * i7core_inject_inject.eccmask_store
852 *
853 * The type of error (UE/CE) will depend on the inject.eccmask value:
854 * Any bits set to a 1 will flip the corresponding ECC bit
855 * Correctable errors can be injected by flipping 1 bit or the bits within
856 * a symbol pair (2 consecutive aligned 8-bit pairs - i.e. 7:0 and 15:8 or
857 * 23:16 and 31:24). Flipping bits in two symbol pairs will cause an
858 * uncorrectable error to be injected.
859 */
860static ssize_t i7core_inject_eccmask_store(struct mem_ctl_info *mci,
861 const char *data, size_t count)
862{
863 struct i7core_pvt *pvt = mci->pvt_info;
864 unsigned long value;
865 int rc;
866
867 if (pvt->inject.enable)
41fcb7fe 868 disable_inject(mci);
194a40fe
MCC
869
870 rc = strict_strtoul(data, 10, &value);
871 if (rc < 0)
2068def5 872 return -EIO;
194a40fe
MCC
873
874 pvt->inject.eccmask = (u32) value;
875 return count;
876}
877
878static ssize_t i7core_inject_eccmask_show(struct mem_ctl_info *mci,
879 char *data)
880{
881 struct i7core_pvt *pvt = mci->pvt_info;
882 return sprintf(data, "0x%08x\n", pvt->inject.eccmask);
883}
884
885/*
886 * i7core_addrmatch
887 *
888 * The type of error (UE/CE) will depend on the inject.eccmask value:
889 * Any bits set to a 1 will flip the corresponding ECC bit
890 * Correctable errors can be injected by flipping 1 bit or the bits within
891 * a symbol pair (2 consecutive aligned 8-bit pairs - i.e. 7:0 and 15:8 or
892 * 23:16 and 31:24). Flipping bits in two symbol pairs will cause an
893 * uncorrectable error to be injected.
894 */
194a40fe 895
a5538e53
MCC
896#define DECLARE_ADDR_MATCH(param, limit) \
897static ssize_t i7core_inject_store_##param( \
898 struct mem_ctl_info *mci, \
899 const char *data, size_t count) \
900{ \
cc301b3a 901 struct i7core_pvt *pvt; \
a5538e53
MCC
902 long value; \
903 int rc; \
904 \
cc301b3a
MCC
905 debugf1("%s()\n", __func__); \
906 pvt = mci->pvt_info; \
907 \
a5538e53
MCC
908 if (pvt->inject.enable) \
909 disable_inject(mci); \
910 \
4f87fad1 911 if (!strcasecmp(data, "any") || !strcasecmp(data, "any\n"))\
a5538e53
MCC
912 value = -1; \
913 else { \
914 rc = strict_strtoul(data, 10, &value); \
915 if ((rc < 0) || (value >= limit)) \
916 return -EIO; \
917 } \
918 \
919 pvt->inject.param = value; \
920 \
921 return count; \
922} \
923 \
924static ssize_t i7core_inject_show_##param( \
925 struct mem_ctl_info *mci, \
926 char *data) \
927{ \
cc301b3a
MCC
928 struct i7core_pvt *pvt; \
929 \
930 pvt = mci->pvt_info; \
931 debugf1("%s() pvt=%p\n", __func__, pvt); \
a5538e53
MCC
932 if (pvt->inject.param < 0) \
933 return sprintf(data, "any\n"); \
934 else \
935 return sprintf(data, "%d\n", pvt->inject.param);\
194a40fe
MCC
936}
937
a5538e53
MCC
938#define ATTR_ADDR_MATCH(param) \
939 { \
940 .attr = { \
941 .name = #param, \
942 .mode = (S_IRUGO | S_IWUSR) \
943 }, \
944 .show = i7core_inject_show_##param, \
945 .store = i7core_inject_store_##param, \
946 }
194a40fe 947
a5538e53
MCC
948DECLARE_ADDR_MATCH(channel, 3);
949DECLARE_ADDR_MATCH(dimm, 3);
950DECLARE_ADDR_MATCH(rank, 4);
951DECLARE_ADDR_MATCH(bank, 32);
952DECLARE_ADDR_MATCH(page, 0x10000);
953DECLARE_ADDR_MATCH(col, 0x4000);
194a40fe 954
1288c18f 955static int write_and_test(struct pci_dev *dev, const int where, const u32 val)
276b824c
MCC
956{
957 u32 read;
958 int count;
959
4157d9f5
MCC
960 debugf0("setting pci %02x:%02x.%x reg=%02x value=%08x\n",
961 dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn),
962 where, val);
963
276b824c
MCC
964 for (count = 0; count < 10; count++) {
965 if (count)
b990538a 966 msleep(100);
276b824c
MCC
967 pci_write_config_dword(dev, where, val);
968 pci_read_config_dword(dev, where, &read);
969
970 if (read == val)
971 return 0;
972 }
973
4157d9f5
MCC
974 i7core_printk(KERN_ERR, "Error during set pci %02x:%02x.%x reg=%02x "
975 "write=%08x. Read=%08x\n",
976 dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn),
977 where, val, read);
276b824c
MCC
978
979 return -EINVAL;
980}
981
194a40fe
MCC
982/*
983 * This routine prepares the Memory Controller for error injection.
984 * The error will be injected when some process tries to write to the
985 * memory that matches the given criteria.
986 * The criteria can be set in terms of a mask where dimm, rank, bank, page
987 * and col can be specified.
988 * A -1 value for any of the mask items will make the MCU to ignore
989 * that matching criteria for error injection.
990 *
991 * It should be noticed that the error will only happen after a write operation
992 * on a memory that matches the condition. if REPEAT_EN is not enabled at
993 * inject mask, then it will produce just one error. Otherwise, it will repeat
994 * until the injectmask would be cleaned.
995 *
996 * FIXME: This routine assumes that MAXNUMDIMMS value of MC_MAX_DOD
997 * is reliable enough to check if the MC is using the
998 * three channels. However, this is not clear at the datasheet.
999 */
1000static ssize_t i7core_inject_enable_store(struct mem_ctl_info *mci,
1001 const char *data, size_t count)
1002{
1003 struct i7core_pvt *pvt = mci->pvt_info;
1004 u32 injectmask;
1005 u64 mask = 0;
1006 int rc;
1007 long enable;
1008
f4742949 1009 if (!pvt->pci_ch[pvt->inject.channel][0])
8f331907
MCC
1010 return 0;
1011
194a40fe
MCC
1012 rc = strict_strtoul(data, 10, &enable);
1013 if ((rc < 0))
1014 return 0;
1015
1016 if (enable) {
1017 pvt->inject.enable = 1;
1018 } else {
1019 disable_inject(mci);
1020 return count;
1021 }
1022
1023 /* Sets pvt->inject.dimm mask */
1024 if (pvt->inject.dimm < 0)
486dd09f 1025 mask |= 1LL << 41;
194a40fe 1026 else {
f4742949 1027 if (pvt->channel[pvt->inject.channel].dimms > 2)
486dd09f 1028 mask |= (pvt->inject.dimm & 0x3LL) << 35;
194a40fe 1029 else
486dd09f 1030 mask |= (pvt->inject.dimm & 0x1LL) << 36;
194a40fe
MCC
1031 }
1032
1033 /* Sets pvt->inject.rank mask */
1034 if (pvt->inject.rank < 0)
486dd09f 1035 mask |= 1LL << 40;
194a40fe 1036 else {
f4742949 1037 if (pvt->channel[pvt->inject.channel].dimms > 2)
486dd09f 1038 mask |= (pvt->inject.rank & 0x1LL) << 34;
194a40fe 1039 else
486dd09f 1040 mask |= (pvt->inject.rank & 0x3LL) << 34;
194a40fe
MCC
1041 }
1042
1043 /* Sets pvt->inject.bank mask */
1044 if (pvt->inject.bank < 0)
486dd09f 1045 mask |= 1LL << 39;
194a40fe 1046 else
486dd09f 1047 mask |= (pvt->inject.bank & 0x15LL) << 30;
194a40fe
MCC
1048
1049 /* Sets pvt->inject.page mask */
1050 if (pvt->inject.page < 0)
486dd09f 1051 mask |= 1LL << 38;
194a40fe 1052 else
486dd09f 1053 mask |= (pvt->inject.page & 0xffff) << 14;
194a40fe
MCC
1054
1055 /* Sets pvt->inject.column mask */
1056 if (pvt->inject.col < 0)
486dd09f 1057 mask |= 1LL << 37;
194a40fe 1058 else
486dd09f 1059 mask |= (pvt->inject.col & 0x3fff);
194a40fe 1060
276b824c
MCC
1061 /*
1062 * bit 0: REPEAT_EN
1063 * bits 1-2: MASK_HALF_CACHELINE
1064 * bit 3: INJECT_ECC
1065 * bit 4: INJECT_ADDR_PARITY
1066 */
1067
1068 injectmask = (pvt->inject.type & 1) |
1069 (pvt->inject.section & 0x3) << 1 |
1070 (pvt->inject.type & 0x6) << (3 - 1);
1071
1072 /* Unlock writes to registers - this register is write only */
f4742949 1073 pci_write_config_dword(pvt->pci_noncore,
67166af4 1074 MC_CFG_CONTROL, 0x2);
e9bd2e73 1075
f4742949 1076 write_and_test(pvt->pci_ch[pvt->inject.channel][0],
194a40fe 1077 MC_CHANNEL_ADDR_MATCH, mask);
f4742949 1078 write_and_test(pvt->pci_ch[pvt->inject.channel][0],
7b029d03 1079 MC_CHANNEL_ADDR_MATCH + 4, mask >> 32L);
7b029d03 1080
f4742949 1081 write_and_test(pvt->pci_ch[pvt->inject.channel][0],
194a40fe
MCC
1082 MC_CHANNEL_ERROR_MASK, pvt->inject.eccmask);
1083
f4742949 1084 write_and_test(pvt->pci_ch[pvt->inject.channel][0],
4157d9f5 1085 MC_CHANNEL_ERROR_INJECT, injectmask);
276b824c 1086
194a40fe 1087 /*
276b824c
MCC
1088 * This is something undocumented, based on my tests
1089 * Without writing 8 to this register, errors aren't injected. Not sure
1090 * why.
194a40fe 1091 */
f4742949 1092 pci_write_config_dword(pvt->pci_noncore,
276b824c 1093 MC_CFG_CONTROL, 8);
194a40fe 1094
41fcb7fe
MCC
1095 debugf0("Error inject addr match 0x%016llx, ecc 0x%08x,"
1096 " inject 0x%08x\n",
194a40fe
MCC
1097 mask, pvt->inject.eccmask, injectmask);
1098
7b029d03 1099
194a40fe
MCC
1100 return count;
1101}
1102
1103static ssize_t i7core_inject_enable_show(struct mem_ctl_info *mci,
1104 char *data)
1105{
1106 struct i7core_pvt *pvt = mci->pvt_info;
7b029d03
MCC
1107 u32 injectmask;
1108
52a2e4fc
MCC
1109 if (!pvt->pci_ch[pvt->inject.channel][0])
1110 return 0;
1111
f4742949 1112 pci_read_config_dword(pvt->pci_ch[pvt->inject.channel][0],
4157d9f5 1113 MC_CHANNEL_ERROR_INJECT, &injectmask);
7b029d03
MCC
1114
1115 debugf0("Inject error read: 0x%018x\n", injectmask);
1116
1117 if (injectmask & 0x0c)
1118 pvt->inject.enable = 1;
1119
194a40fe
MCC
1120 return sprintf(data, "%d\n", pvt->inject.enable);
1121}
1122
f338d736
MCC
1123#define DECLARE_COUNTER(param) \
1124static ssize_t i7core_show_counter_##param( \
1125 struct mem_ctl_info *mci, \
1126 char *data) \
1127{ \
1128 struct i7core_pvt *pvt = mci->pvt_info; \
1129 \
1130 debugf1("%s() \n", __func__); \
1131 if (!pvt->ce_count_available || (pvt->is_registered)) \
1132 return sprintf(data, "data unavailable\n"); \
1133 return sprintf(data, "%lu\n", \
1134 pvt->udimm_ce_count[param]); \
1135}
442305b1 1136
f338d736
MCC
1137#define ATTR_COUNTER(param) \
1138 { \
1139 .attr = { \
1140 .name = __stringify(udimm##param), \
1141 .mode = (S_IRUGO | S_IWUSR) \
1142 }, \
1143 .show = i7core_show_counter_##param \
d88b8507 1144 }
442305b1 1145
f338d736
MCC
1146DECLARE_COUNTER(0);
1147DECLARE_COUNTER(1);
1148DECLARE_COUNTER(2);
442305b1 1149
194a40fe
MCC
1150/*
1151 * Sysfs struct
1152 */
a5538e53 1153
1288c18f 1154static const struct mcidev_sysfs_attribute i7core_addrmatch_attrs[] = {
a5538e53
MCC
1155 ATTR_ADDR_MATCH(channel),
1156 ATTR_ADDR_MATCH(dimm),
1157 ATTR_ADDR_MATCH(rank),
1158 ATTR_ADDR_MATCH(bank),
1159 ATTR_ADDR_MATCH(page),
1160 ATTR_ADDR_MATCH(col),
1288c18f 1161 { } /* End of list */
a5538e53
MCC
1162};
1163
1288c18f 1164static const struct mcidev_sysfs_group i7core_inject_addrmatch = {
a5538e53
MCC
1165 .name = "inject_addrmatch",
1166 .mcidev_attr = i7core_addrmatch_attrs,
1167};
1168
1288c18f 1169static const struct mcidev_sysfs_attribute i7core_udimm_counters_attrs[] = {
f338d736
MCC
1170 ATTR_COUNTER(0),
1171 ATTR_COUNTER(1),
1172 ATTR_COUNTER(2),
64aab720 1173 { .attr = { .name = NULL } }
f338d736
MCC
1174};
1175
1288c18f 1176static const struct mcidev_sysfs_group i7core_udimm_counters = {
f338d736
MCC
1177 .name = "all_channel_counts",
1178 .mcidev_attr = i7core_udimm_counters_attrs,
1179};
1180
1288c18f 1181static const struct mcidev_sysfs_attribute i7core_sysfs_rdimm_attrs[] = {
194a40fe
MCC
1182 {
1183 .attr = {
1184 .name = "inject_section",
1185 .mode = (S_IRUGO | S_IWUSR)
1186 },
1187 .show = i7core_inject_section_show,
1188 .store = i7core_inject_section_store,
1189 }, {
1190 .attr = {
1191 .name = "inject_type",
1192 .mode = (S_IRUGO | S_IWUSR)
1193 },
1194 .show = i7core_inject_type_show,
1195 .store = i7core_inject_type_store,
1196 }, {
1197 .attr = {
1198 .name = "inject_eccmask",
1199 .mode = (S_IRUGO | S_IWUSR)
1200 },
1201 .show = i7core_inject_eccmask_show,
1202 .store = i7core_inject_eccmask_store,
1203 }, {
a5538e53 1204 .grp = &i7core_inject_addrmatch,
194a40fe
MCC
1205 }, {
1206 .attr = {
1207 .name = "inject_enable",
1208 .mode = (S_IRUGO | S_IWUSR)
1209 },
1210 .show = i7core_inject_enable_show,
1211 .store = i7core_inject_enable_store,
1212 },
1288c18f
MCC
1213 { } /* End of list */
1214};
1215
1216static const struct mcidev_sysfs_attribute i7core_sysfs_udimm_attrs[] = {
1217 {
1218 .attr = {
1219 .name = "inject_section",
1220 .mode = (S_IRUGO | S_IWUSR)
1221 },
1222 .show = i7core_inject_section_show,
1223 .store = i7core_inject_section_store,
1224 }, {
1225 .attr = {
1226 .name = "inject_type",
1227 .mode = (S_IRUGO | S_IWUSR)
1228 },
1229 .show = i7core_inject_type_show,
1230 .store = i7core_inject_type_store,
1231 }, {
1232 .attr = {
1233 .name = "inject_eccmask",
1234 .mode = (S_IRUGO | S_IWUSR)
1235 },
1236 .show = i7core_inject_eccmask_show,
1237 .store = i7core_inject_eccmask_store,
1238 }, {
1239 .grp = &i7core_inject_addrmatch,
1240 }, {
1241 .attr = {
1242 .name = "inject_enable",
1243 .mode = (S_IRUGO | S_IWUSR)
1244 },
1245 .show = i7core_inject_enable_show,
1246 .store = i7core_inject_enable_store,
1247 }, {
1248 .grp = &i7core_udimm_counters,
1249 },
1250 { } /* End of list */
194a40fe
MCC
1251};
1252
a0c36a1f
MCC
1253/****************************************************************************
1254 Device initialization routines: put/get, init/exit
1255 ****************************************************************************/
1256
1257/*
64c10f6e 1258 * i7core_put_all_devices 'put' all the devices that we have
a0c36a1f
MCC
1259 * reserved via 'get'
1260 */
13d6e9b6 1261static void i7core_put_devices(struct i7core_dev *i7core_dev)
a0c36a1f 1262{
13d6e9b6 1263 int i;
a0c36a1f 1264
22e6bcbd 1265 debugf0(__FILE__ ": %s()\n", __func__);
de06eeef 1266 for (i = 0; i < i7core_dev->n_devs; i++) {
22e6bcbd
MCC
1267 struct pci_dev *pdev = i7core_dev->pdev[i];
1268 if (!pdev)
1269 continue;
1270 debugf0("Removing dev %02x:%02x.%d\n",
1271 pdev->bus->number,
1272 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1273 pci_dev_put(pdev);
1274 }
13d6e9b6 1275}
66607706 1276
13d6e9b6
MCC
1277static void i7core_put_all_devices(void)
1278{
42538680 1279 struct i7core_dev *i7core_dev, *tmp;
13d6e9b6 1280
39300e71 1281 list_for_each_entry_safe(i7core_dev, tmp, &i7core_edac_list, list) {
13d6e9b6 1282 i7core_put_devices(i7core_dev);
2aa9be44 1283 free_i7core_dev(i7core_dev);
39300e71 1284 }
a0c36a1f
MCC
1285}
1286
1288c18f 1287static void __init i7core_xeon_pci_fixup(const struct pci_id_table *table)
bc2d7245
KM
1288{
1289 struct pci_dev *pdev = NULL;
1290 int i;
54a08ab1 1291
bc2d7245
KM
1292 /*
1293 * On Xeon 55xx, the Intel Quckpath Arch Generic Non-core pci buses
1294 * aren't announced by acpi. So, we need to use a legacy scan probing
1295 * to detect them
1296 */
bd9e19ca
VM
1297 while (table && table->descr) {
1298 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, table->descr[0].dev_id, NULL);
1299 if (unlikely(!pdev)) {
1300 for (i = 0; i < MAX_SOCKET_BUSES; i++)
1301 pcibios_scan_specific_bus(255-i);
1302 }
bda14289 1303 pci_dev_put(pdev);
bd9e19ca 1304 table++;
bc2d7245
KM
1305 }
1306}
1307
bda14289
MCC
1308static unsigned i7core_pci_lastbus(void)
1309{
1310 int last_bus = 0, bus;
1311 struct pci_bus *b = NULL;
1312
1313 while ((b = pci_find_next_bus(b)) != NULL) {
1314 bus = b->number;
1315 debugf0("Found bus %d\n", bus);
1316 if (bus > last_bus)
1317 last_bus = bus;
1318 }
1319
1320 debugf0("Last bus %d\n", last_bus);
1321
1322 return last_bus;
1323}
1324
a0c36a1f 1325/*
64c10f6e 1326 * i7core_get_all_devices Find and perform 'get' operation on the MCH's
a0c36a1f
MCC
1327 * device/functions we want to reference for this driver
1328 *
1329 * Need to 'get' device 16 func 1 and func 2
1330 */
b197cba0
HS
1331static int i7core_get_onedevice(struct pci_dev **prev,
1332 const struct pci_id_table *table,
1333 const unsigned devno,
1334 const unsigned last_bus)
a0c36a1f 1335{
66607706 1336 struct i7core_dev *i7core_dev;
b197cba0 1337 const struct pci_id_descr *dev_descr = &table->descr[devno];
66607706 1338
8f331907 1339 struct pci_dev *pdev = NULL;
67166af4
MCC
1340 u8 bus = 0;
1341 u8 socket = 0;
a0c36a1f 1342
c77720b9 1343 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
de06eeef 1344 dev_descr->dev_id, *prev);
c77720b9 1345
c77720b9
MCC
1346 /*
1347 * On Xeon 55xx, the Intel Quckpath Arch Generic Non-core regs
1348 * is at addr 8086:2c40, instead of 8086:2c41. So, we need
1349 * to probe for the alternate address in case of failure
1350 */
de06eeef 1351 if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_I7_NONCORE && !pdev)
c77720b9 1352 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
fd382654 1353 PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT, *prev);
d1fd4fb6 1354
bd9e19ca 1355 if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE && !pdev)
f05da2f7
MCC
1356 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
1357 PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT,
1358 *prev);
1359
c77720b9
MCC
1360 if (!pdev) {
1361 if (*prev) {
1362 *prev = pdev;
1363 return 0;
d1fd4fb6
MCC
1364 }
1365
de06eeef 1366 if (dev_descr->optional)
c77720b9 1367 return 0;
310cbb72 1368
bd9e19ca
VM
1369 if (devno == 0)
1370 return -ENODEV;
1371
ab089374 1372 i7core_printk(KERN_INFO,
c77720b9 1373 "Device not found: dev %02x.%d PCI ID %04x:%04x\n",
de06eeef
MCC
1374 dev_descr->dev, dev_descr->func,
1375 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
67166af4 1376
c77720b9
MCC
1377 /* End of list, leave */
1378 return -ENODEV;
1379 }
1380 bus = pdev->bus->number;
67166af4 1381
bda14289 1382 socket = last_bus - bus;
c77720b9 1383
66607706
MCC
1384 i7core_dev = get_i7core_dev(socket);
1385 if (!i7core_dev) {
848b2f7e 1386 i7core_dev = alloc_i7core_dev(socket, table);
66607706
MCC
1387 if (!i7core_dev)
1388 return -ENOMEM;
c77720b9 1389 }
67166af4 1390
66607706 1391 if (i7core_dev->pdev[devno]) {
c77720b9
MCC
1392 i7core_printk(KERN_ERR,
1393 "Duplicated device for "
1394 "dev %02x:%02x.%d PCI ID %04x:%04x\n",
de06eeef
MCC
1395 bus, dev_descr->dev, dev_descr->func,
1396 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
c77720b9
MCC
1397 pci_dev_put(pdev);
1398 return -ENODEV;
1399 }
67166af4 1400
66607706 1401 i7core_dev->pdev[devno] = pdev;
c77720b9
MCC
1402
1403 /* Sanity check */
de06eeef
MCC
1404 if (unlikely(PCI_SLOT(pdev->devfn) != dev_descr->dev ||
1405 PCI_FUNC(pdev->devfn) != dev_descr->func)) {
c77720b9
MCC
1406 i7core_printk(KERN_ERR,
1407 "Device PCI ID %04x:%04x "
1408 "has dev %02x:%02x.%d instead of dev %02x:%02x.%d\n",
de06eeef 1409 PCI_VENDOR_ID_INTEL, dev_descr->dev_id,
c77720b9 1410 bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
de06eeef 1411 bus, dev_descr->dev, dev_descr->func);
c77720b9
MCC
1412 return -ENODEV;
1413 }
ef708b53 1414
c77720b9
MCC
1415 /* Be sure that the device is enabled */
1416 if (unlikely(pci_enable_device(pdev) < 0)) {
1417 i7core_printk(KERN_ERR,
1418 "Couldn't enable "
1419 "dev %02x:%02x.%d PCI ID %04x:%04x\n",
de06eeef
MCC
1420 bus, dev_descr->dev, dev_descr->func,
1421 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
c77720b9
MCC
1422 return -ENODEV;
1423 }
ef708b53 1424
d4c27795 1425 debugf0("Detected socket %d dev %02x:%02x.%d PCI ID %04x:%04x\n",
de06eeef
MCC
1426 socket, bus, dev_descr->dev,
1427 dev_descr->func,
1428 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
8f331907 1429
c77720b9 1430 *prev = pdev;
ef708b53 1431
c77720b9
MCC
1432 return 0;
1433}
a0c36a1f 1434
64c10f6e 1435static int i7core_get_all_devices(void)
c77720b9 1436{
64c10f6e 1437 int i, j, rc, last_bus;
c77720b9 1438 struct pci_dev *pdev = NULL;
64c10f6e 1439 const struct pci_id_table *table;
bd9e19ca 1440
bda14289
MCC
1441 last_bus = i7core_pci_lastbus();
1442
64c10f6e
HS
1443 for (j = 0; j < ARRAY_SIZE(pci_dev_table); j++) {
1444 table = &pci_dev_table[j];
bd9e19ca
VM
1445 for (i = 0; i < table->n_devs; i++) {
1446 pdev = NULL;
1447 do {
b197cba0 1448 rc = i7core_get_onedevice(&pdev, table, i,
bda14289 1449 last_bus);
bd9e19ca
VM
1450 if (rc < 0) {
1451 if (i == 0) {
1452 i = table->n_devs;
1453 break;
1454 }
1455 i7core_put_all_devices();
1456 return -ENODEV;
1457 }
1458 } while (pdev);
1459 }
c77720b9 1460 }
66607706 1461
ef708b53 1462 return 0;
ef708b53
MCC
1463}
1464
f4742949
MCC
1465static int mci_bind_devs(struct mem_ctl_info *mci,
1466 struct i7core_dev *i7core_dev)
ef708b53
MCC
1467{
1468 struct i7core_pvt *pvt = mci->pvt_info;
1469 struct pci_dev *pdev;
f4742949 1470 int i, func, slot;
ef708b53 1471
f4742949
MCC
1472 /* Associates i7core_dev and mci for future usage */
1473 pvt->i7core_dev = i7core_dev;
1474 i7core_dev->mci = mci;
66607706 1475
f4742949 1476 pvt->is_registered = 0;
de06eeef 1477 for (i = 0; i < i7core_dev->n_devs; i++) {
f4742949
MCC
1478 pdev = i7core_dev->pdev[i];
1479 if (!pdev)
66607706
MCC
1480 continue;
1481
f4742949
MCC
1482 func = PCI_FUNC(pdev->devfn);
1483 slot = PCI_SLOT(pdev->devfn);
1484 if (slot == 3) {
1485 if (unlikely(func > MAX_MCR_FUNC))
1486 goto error;
1487 pvt->pci_mcr[func] = pdev;
1488 } else if (likely(slot >= 4 && slot < 4 + NUM_CHANS)) {
1489 if (unlikely(func > MAX_CHAN_FUNC))
ef708b53 1490 goto error;
f4742949
MCC
1491 pvt->pci_ch[slot - 4][func] = pdev;
1492 } else if (!slot && !func)
1493 pvt->pci_noncore = pdev;
1494 else
1495 goto error;
ef708b53 1496
f4742949
MCC
1497 debugf0("Associated fn %d.%d, dev = %p, socket %d\n",
1498 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
1499 pdev, i7core_dev->socket);
14d2c083 1500
f4742949
MCC
1501 if (PCI_SLOT(pdev->devfn) == 3 &&
1502 PCI_FUNC(pdev->devfn) == 2)
1503 pvt->is_registered = 1;
a0c36a1f 1504 }
e9bd2e73 1505
a0c36a1f 1506 return 0;
ef708b53
MCC
1507
1508error:
1509 i7core_printk(KERN_ERR, "Device %d, function %d "
1510 "is out of the expected range\n",
1511 slot, func);
1512 return -EINVAL;
a0c36a1f
MCC
1513}
1514
442305b1
MCC
1515/****************************************************************************
1516 Error check routines
1517 ****************************************************************************/
f4742949 1518static void i7core_rdimm_update_csrow(struct mem_ctl_info *mci,
1288c18f
MCC
1519 const int chan,
1520 const int dimm,
1521 const int add)
b4e8f0b6
MCC
1522{
1523 char *msg;
1524 struct i7core_pvt *pvt = mci->pvt_info;
f4742949 1525 int row = pvt->csrow_map[chan][dimm], i;
b4e8f0b6
MCC
1526
1527 for (i = 0; i < add; i++) {
1528 msg = kasprintf(GFP_KERNEL, "Corrected error "
f4742949
MCC
1529 "(Socket=%d channel=%d dimm=%d)",
1530 pvt->i7core_dev->socket, chan, dimm);
b4e8f0b6
MCC
1531
1532 edac_mc_handle_fbd_ce(mci, row, 0, msg);
1533 kfree (msg);
1534 }
1535}
1536
1537static void i7core_rdimm_update_ce_count(struct mem_ctl_info *mci,
1288c18f
MCC
1538 const int chan,
1539 const int new0,
1540 const int new1,
1541 const int new2)
b4e8f0b6
MCC
1542{
1543 struct i7core_pvt *pvt = mci->pvt_info;
1544 int add0 = 0, add1 = 0, add2 = 0;
1545 /* Updates CE counters if it is not the first time here */
f4742949 1546 if (pvt->ce_count_available) {
b4e8f0b6
MCC
1547 /* Updates CE counters */
1548
f4742949
MCC
1549 add2 = new2 - pvt->rdimm_last_ce_count[chan][2];
1550 add1 = new1 - pvt->rdimm_last_ce_count[chan][1];
1551 add0 = new0 - pvt->rdimm_last_ce_count[chan][0];
b4e8f0b6
MCC
1552
1553 if (add2 < 0)
1554 add2 += 0x7fff;
f4742949 1555 pvt->rdimm_ce_count[chan][2] += add2;
b4e8f0b6
MCC
1556
1557 if (add1 < 0)
1558 add1 += 0x7fff;
f4742949 1559 pvt->rdimm_ce_count[chan][1] += add1;
b4e8f0b6
MCC
1560
1561 if (add0 < 0)
1562 add0 += 0x7fff;
f4742949 1563 pvt->rdimm_ce_count[chan][0] += add0;
b4e8f0b6 1564 } else
f4742949 1565 pvt->ce_count_available = 1;
b4e8f0b6
MCC
1566
1567 /* Store the new values */
f4742949
MCC
1568 pvt->rdimm_last_ce_count[chan][2] = new2;
1569 pvt->rdimm_last_ce_count[chan][1] = new1;
1570 pvt->rdimm_last_ce_count[chan][0] = new0;
b4e8f0b6
MCC
1571
1572 /*updated the edac core */
1573 if (add0 != 0)
f4742949 1574 i7core_rdimm_update_csrow(mci, chan, 0, add0);
b4e8f0b6 1575 if (add1 != 0)
f4742949 1576 i7core_rdimm_update_csrow(mci, chan, 1, add1);
b4e8f0b6 1577 if (add2 != 0)
f4742949 1578 i7core_rdimm_update_csrow(mci, chan, 2, add2);
b4e8f0b6
MCC
1579
1580}
1581
f4742949 1582static void i7core_rdimm_check_mc_ecc_err(struct mem_ctl_info *mci)
b4e8f0b6
MCC
1583{
1584 struct i7core_pvt *pvt = mci->pvt_info;
1585 u32 rcv[3][2];
1586 int i, new0, new1, new2;
1587
1588 /*Read DEV 3: FUN 2: MC_COR_ECC_CNT regs directly*/
f4742949 1589 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_0,
b4e8f0b6 1590 &rcv[0][0]);
f4742949 1591 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_1,
b4e8f0b6 1592 &rcv[0][1]);
f4742949 1593 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_2,
b4e8f0b6 1594 &rcv[1][0]);
f4742949 1595 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_3,
b4e8f0b6 1596 &rcv[1][1]);
f4742949 1597 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_4,
b4e8f0b6 1598 &rcv[2][0]);
f4742949 1599 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_5,
b4e8f0b6
MCC
1600 &rcv[2][1]);
1601 for (i = 0 ; i < 3; i++) {
1602 debugf3("MC_COR_ECC_CNT%d = 0x%x; MC_COR_ECC_CNT%d = 0x%x\n",
1603 (i * 2), rcv[i][0], (i * 2) + 1, rcv[i][1]);
1604 /*if the channel has 3 dimms*/
f4742949 1605 if (pvt->channel[i].dimms > 2) {
b4e8f0b6
MCC
1606 new0 = DIMM_BOT_COR_ERR(rcv[i][0]);
1607 new1 = DIMM_TOP_COR_ERR(rcv[i][0]);
1608 new2 = DIMM_BOT_COR_ERR(rcv[i][1]);
1609 } else {
1610 new0 = DIMM_TOP_COR_ERR(rcv[i][0]) +
1611 DIMM_BOT_COR_ERR(rcv[i][0]);
1612 new1 = DIMM_TOP_COR_ERR(rcv[i][1]) +
1613 DIMM_BOT_COR_ERR(rcv[i][1]);
1614 new2 = 0;
1615 }
1616
f4742949 1617 i7core_rdimm_update_ce_count(mci, i, new0, new1, new2);
b4e8f0b6
MCC
1618 }
1619}
442305b1
MCC
1620
1621/* This function is based on the device 3 function 4 registers as described on:
1622 * Intel Xeon Processor 5500 Series Datasheet Volume 2
1623 * http://www.intel.com/Assets/PDF/datasheet/321322.pdf
1624 * also available at:
1625 * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
1626 */
f4742949 1627static void i7core_udimm_check_mc_ecc_err(struct mem_ctl_info *mci)
442305b1
MCC
1628{
1629 struct i7core_pvt *pvt = mci->pvt_info;
1630 u32 rcv1, rcv0;
1631 int new0, new1, new2;
1632
f4742949 1633 if (!pvt->pci_mcr[4]) {
b990538a 1634 debugf0("%s MCR registers not found\n", __func__);
442305b1
MCC
1635 return;
1636 }
1637
b4e8f0b6 1638 /* Corrected test errors */
f4742949
MCC
1639 pci_read_config_dword(pvt->pci_mcr[4], MC_TEST_ERR_RCV1, &rcv1);
1640 pci_read_config_dword(pvt->pci_mcr[4], MC_TEST_ERR_RCV0, &rcv0);
442305b1
MCC
1641
1642 /* Store the new values */
1643 new2 = DIMM2_COR_ERR(rcv1);
1644 new1 = DIMM1_COR_ERR(rcv0);
1645 new0 = DIMM0_COR_ERR(rcv0);
1646
442305b1 1647 /* Updates CE counters if it is not the first time here */
f4742949 1648 if (pvt->ce_count_available) {
442305b1
MCC
1649 /* Updates CE counters */
1650 int add0, add1, add2;
1651
f4742949
MCC
1652 add2 = new2 - pvt->udimm_last_ce_count[2];
1653 add1 = new1 - pvt->udimm_last_ce_count[1];
1654 add0 = new0 - pvt->udimm_last_ce_count[0];
442305b1
MCC
1655
1656 if (add2 < 0)
1657 add2 += 0x7fff;
f4742949 1658 pvt->udimm_ce_count[2] += add2;
442305b1
MCC
1659
1660 if (add1 < 0)
1661 add1 += 0x7fff;
f4742949 1662 pvt->udimm_ce_count[1] += add1;
442305b1
MCC
1663
1664 if (add0 < 0)
1665 add0 += 0x7fff;
f4742949 1666 pvt->udimm_ce_count[0] += add0;
b4e8f0b6
MCC
1667
1668 if (add0 | add1 | add2)
1669 i7core_printk(KERN_ERR, "New Corrected error(s): "
1670 "dimm0: +%d, dimm1: +%d, dimm2 +%d\n",
1671 add0, add1, add2);
442305b1 1672 } else
f4742949 1673 pvt->ce_count_available = 1;
442305b1
MCC
1674
1675 /* Store the new values */
f4742949
MCC
1676 pvt->udimm_last_ce_count[2] = new2;
1677 pvt->udimm_last_ce_count[1] = new1;
1678 pvt->udimm_last_ce_count[0] = new0;
442305b1
MCC
1679}
1680
8a2f118e
MCC
1681/*
1682 * According with tables E-11 and E-12 of chapter E.3.3 of Intel 64 and IA-32
1683 * Architectures Software Developer’s Manual Volume 3B.
f237fcf2
MCC
1684 * Nehalem are defined as family 0x06, model 0x1a
1685 *
1686 * The MCA registers used here are the following ones:
8a2f118e 1687 * struct mce field MCA Register
f237fcf2
MCC
1688 * m->status MSR_IA32_MC8_STATUS
1689 * m->addr MSR_IA32_MC8_ADDR
1690 * m->misc MSR_IA32_MC8_MISC
8a2f118e
MCC
1691 * In the case of Nehalem, the error information is masked at .status and .misc
1692 * fields
1693 */
d5381642 1694static void i7core_mce_output_error(struct mem_ctl_info *mci,
1288c18f 1695 const struct mce *m)
d5381642 1696{
b4e8f0b6 1697 struct i7core_pvt *pvt = mci->pvt_info;
a639539f 1698 char *type, *optype, *err, *msg;
8a2f118e 1699 unsigned long error = m->status & 0x1ff0000l;
a639539f 1700 u32 optypenum = (m->status >> 4) & 0x07;
8a2f118e
MCC
1701 u32 core_err_cnt = (m->status >> 38) && 0x7fff;
1702 u32 dimm = (m->misc >> 16) & 0x3;
1703 u32 channel = (m->misc >> 18) & 0x3;
1704 u32 syndrome = m->misc >> 32;
1705 u32 errnum = find_first_bit(&error, 32);
b4e8f0b6 1706 int csrow;
8a2f118e 1707
c5d34528
MCC
1708 if (m->mcgstatus & 1)
1709 type = "FATAL";
1710 else
1711 type = "NON_FATAL";
1712
a639539f 1713 switch (optypenum) {
b990538a
MCC
1714 case 0:
1715 optype = "generic undef request";
1716 break;
1717 case 1:
1718 optype = "read error";
1719 break;
1720 case 2:
1721 optype = "write error";
1722 break;
1723 case 3:
1724 optype = "addr/cmd error";
1725 break;
1726 case 4:
1727 optype = "scrubbing error";
1728 break;
1729 default:
1730 optype = "reserved";
1731 break;
a639539f
MCC
1732 }
1733
8a2f118e
MCC
1734 switch (errnum) {
1735 case 16:
1736 err = "read ECC error";
1737 break;
1738 case 17:
1739 err = "RAS ECC error";
1740 break;
1741 case 18:
1742 err = "write parity error";
1743 break;
1744 case 19:
1745 err = "redundacy loss";
1746 break;
1747 case 20:
1748 err = "reserved";
1749 break;
1750 case 21:
1751 err = "memory range error";
1752 break;
1753 case 22:
1754 err = "RTID out of range";
1755 break;
1756 case 23:
1757 err = "address parity error";
1758 break;
1759 case 24:
1760 err = "byte enable parity error";
1761 break;
1762 default:
1763 err = "unknown";
d5381642 1764 }
d5381642 1765
f237fcf2 1766 /* FIXME: should convert addr into bank and rank information */
8a2f118e 1767 msg = kasprintf(GFP_ATOMIC,
f4742949 1768 "%s (addr = 0x%08llx, cpu=%d, Dimm=%d, Channel=%d, "
a639539f 1769 "syndrome=0x%08x, count=%d, Err=%08llx:%08llx (%s: %s))\n",
f4742949 1770 type, (long long) m->addr, m->cpu, dimm, channel,
a639539f
MCC
1771 syndrome, core_err_cnt, (long long)m->status,
1772 (long long)m->misc, optype, err);
8a2f118e
MCC
1773
1774 debugf0("%s", msg);
d5381642 1775
f4742949 1776 csrow = pvt->csrow_map[channel][dimm];
b4e8f0b6 1777
d5381642 1778 /* Call the helper to output message */
b4e8f0b6
MCC
1779 if (m->mcgstatus & 1)
1780 edac_mc_handle_fbd_ue(mci, csrow, 0,
1781 0 /* FIXME: should be channel here */, msg);
f4742949 1782 else if (!pvt->is_registered)
b4e8f0b6
MCC
1783 edac_mc_handle_fbd_ce(mci, csrow,
1784 0 /* FIXME: should be channel here */, msg);
8a2f118e
MCC
1785
1786 kfree(msg);
d5381642
MCC
1787}
1788
87d1d272
MCC
1789/*
1790 * i7core_check_error Retrieve and process errors reported by the
1791 * hardware. Called by the Core module.
1792 */
1793static void i7core_check_error(struct mem_ctl_info *mci)
1794{
d5381642
MCC
1795 struct i7core_pvt *pvt = mci->pvt_info;
1796 int i;
1797 unsigned count = 0;
ca9c90ba 1798 struct mce *m;
d5381642 1799
ca9c90ba
MCC
1800 /*
1801 * MCE first step: Copy all mce errors into a temporary buffer
1802 * We use a double buffering here, to reduce the risk of
1803 * loosing an error.
1804 */
1805 smp_rmb();
321ece4d
MCC
1806 count = (pvt->mce_out + MCE_LOG_LEN - pvt->mce_in)
1807 % MCE_LOG_LEN;
ca9c90ba 1808 if (!count)
8a311e17 1809 goto check_ce_error;
f4742949 1810
ca9c90ba 1811 m = pvt->mce_outentry;
321ece4d
MCC
1812 if (pvt->mce_in + count > MCE_LOG_LEN) {
1813 unsigned l = MCE_LOG_LEN - pvt->mce_in;
f4742949 1814
ca9c90ba
MCC
1815 memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * l);
1816 smp_wmb();
1817 pvt->mce_in = 0;
1818 count -= l;
1819 m += l;
1820 }
1821 memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * count);
1822 smp_wmb();
1823 pvt->mce_in += count;
1824
1825 smp_rmb();
1826 if (pvt->mce_overrun) {
1827 i7core_printk(KERN_ERR, "Lost %d memory errors\n",
1828 pvt->mce_overrun);
1829 smp_wmb();
1830 pvt->mce_overrun = 0;
1831 }
d5381642 1832
ca9c90ba
MCC
1833 /*
1834 * MCE second step: parse errors and display
1835 */
d5381642 1836 for (i = 0; i < count; i++)
ca9c90ba 1837 i7core_mce_output_error(mci, &pvt->mce_outentry[i]);
d5381642 1838
ca9c90ba
MCC
1839 /*
1840 * Now, let's increment CE error counts
1841 */
8a311e17 1842check_ce_error:
f4742949
MCC
1843 if (!pvt->is_registered)
1844 i7core_udimm_check_mc_ecc_err(mci);
1845 else
1846 i7core_rdimm_check_mc_ecc_err(mci);
87d1d272
MCC
1847}
1848
d5381642
MCC
1849/*
1850 * i7core_mce_check_error Replicates mcelog routine to get errors
1851 * This routine simply queues mcelog errors, and
1852 * return. The error itself should be handled later
1853 * by i7core_check_error.
6e103be1
MCC
1854 * WARNING: As this routine should be called at NMI time, extra care should
1855 * be taken to avoid deadlocks, and to be as fast as possible.
d5381642
MCC
1856 */
1857static int i7core_mce_check_error(void *priv, struct mce *mce)
1858{
c5d34528
MCC
1859 struct mem_ctl_info *mci = priv;
1860 struct i7core_pvt *pvt = mci->pvt_info;
d5381642 1861
8a2f118e
MCC
1862 /*
1863 * Just let mcelog handle it if the error is
1864 * outside the memory controller
1865 */
1866 if (((mce->status & 0xffff) >> 7) != 1)
1867 return 0;
1868
f237fcf2
MCC
1869 /* Bank 8 registers are the only ones that we know how to handle */
1870 if (mce->bank != 8)
1871 return 0;
1872
3b918c12 1873#ifdef CONFIG_SMP
f4742949 1874 /* Only handle if it is the right mc controller */
6e103be1 1875 if (cpu_data(mce->cpu).phys_proc_id != pvt->i7core_dev->socket)
f4742949 1876 return 0;
3b918c12 1877#endif
f4742949 1878
ca9c90ba 1879 smp_rmb();
321ece4d 1880 if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) {
ca9c90ba
MCC
1881 smp_wmb();
1882 pvt->mce_overrun++;
1883 return 0;
d5381642 1884 }
6e103be1
MCC
1885
1886 /* Copy memory error at the ringbuffer */
1887 memcpy(&pvt->mce_entry[pvt->mce_out], mce, sizeof(*mce));
ca9c90ba 1888 smp_wmb();
321ece4d 1889 pvt->mce_out = (pvt->mce_out + 1) % MCE_LOG_LEN;
d5381642 1890
c5d34528
MCC
1891 /* Handle fatal errors immediately */
1892 if (mce->mcgstatus & 1)
1893 i7core_check_error(mci);
1894
d5381642 1895 /* Advice mcelog that the error were handled */
8a2f118e 1896 return 1;
d5381642
MCC
1897}
1898
a3aa0a4a
HS
1899static void i7core_pci_ctl_create(struct i7core_pvt *pvt)
1900{
1901 pvt->i7core_pci = edac_pci_create_generic_ctl(
1902 &pvt->i7core_dev->pdev[0]->dev,
1903 EDAC_MOD_STR);
1904 if (unlikely(!pvt->i7core_pci))
1905 pr_warn("Unable to setup PCI error report via EDAC\n");
1906}
1907
1908static void i7core_pci_ctl_release(struct i7core_pvt *pvt)
1909{
1910 if (likely(pvt->i7core_pci))
1911 edac_pci_release_generic_ctl(pvt->i7core_pci);
1912 else
1913 i7core_printk(KERN_ERR,
1914 "Couldn't find mem_ctl_info for socket %d\n",
1915 pvt->i7core_dev->socket);
1916 pvt->i7core_pci = NULL;
1917}
1918
f4742949 1919static int i7core_register_mci(struct i7core_dev *i7core_dev,
1288c18f 1920 const int num_channels, const int num_csrows)
a0c36a1f
MCC
1921{
1922 struct mem_ctl_info *mci;
1923 struct i7core_pvt *pvt;
ba6c5c62 1924 int csrow = 0;
f4742949 1925 int rc;
a0c36a1f 1926
a0c36a1f 1927 /* allocate a new MC control structure */
d4c27795
MCC
1928 mci = edac_mc_alloc(sizeof(*pvt), num_csrows, num_channels,
1929 i7core_dev->socket);
f4742949
MCC
1930 if (unlikely(!mci))
1931 return -ENOMEM;
a0c36a1f 1932
3cfd0146
MCC
1933 debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n",
1934 __func__, mci, &i7core_dev->pdev[0]->dev);
a0c36a1f 1935
a0c36a1f 1936 pvt = mci->pvt_info;
ef708b53 1937 memset(pvt, 0, sizeof(*pvt));
67166af4 1938
41fcb7fe
MCC
1939 /*
1940 * FIXME: how to handle RDDR3 at MCI level? It is possible to have
1941 * Mixed RDDR3/UDDR3 with Nehalem, provided that they are on different
1942 * memory channels
1943 */
1944 mci->mtype_cap = MEM_FLAG_DDR3;
a0c36a1f
MCC
1945 mci->edac_ctl_cap = EDAC_FLAG_NONE;
1946 mci->edac_cap = EDAC_FLAG_NONE;
1947 mci->mod_name = "i7core_edac.c";
1948 mci->mod_ver = I7CORE_REVISION;
f4742949
MCC
1949 mci->ctl_name = kasprintf(GFP_KERNEL, "i7 core #%d",
1950 i7core_dev->socket);
1951 mci->dev_name = pci_name(i7core_dev->pdev[0]);
a0c36a1f 1952 mci->ctl_page_to_phys = NULL;
1288c18f 1953
ef708b53 1954 /* Store pci devices at mci for faster access */
f4742949 1955 rc = mci_bind_devs(mci, i7core_dev);
41fcb7fe 1956 if (unlikely(rc < 0))
f4742949 1957 goto fail;
ef708b53 1958
5939813b
HS
1959 if (pvt->is_registered)
1960 mci->mc_driver_sysfs_attributes = i7core_sysfs_rdimm_attrs;
1961 else
1962 mci->mc_driver_sysfs_attributes = i7core_sysfs_udimm_attrs;
1963
ef708b53 1964 /* Get dimm basic config */
f4742949 1965 get_dimm_config(mci, &csrow);
5939813b
HS
1966 /* record ptr to the generic device */
1967 mci->dev = &i7core_dev->pdev[0]->dev;
1968 /* Set the function pointer to an actual operation function */
1969 mci->edac_check = i7core_check_error;
ef708b53 1970
a0c36a1f 1971 /* add this new MC control structure to EDAC's list of MCs */
b7c76151 1972 if (unlikely(edac_mc_add_mc(mci))) {
a0c36a1f
MCC
1973 debugf0("MC: " __FILE__
1974 ": %s(): failed edac_mc_add_mc()\n", __func__);
1975 /* FIXME: perhaps some code should go here that disables error
1976 * reporting if we just enabled it
1977 */
b7c76151
MCC
1978
1979 rc = -EINVAL;
f4742949 1980 goto fail;
a0c36a1f
MCC
1981 }
1982
194a40fe 1983 /* Default error mask is any memory */
ef708b53 1984 pvt->inject.channel = 0;
194a40fe
MCC
1985 pvt->inject.dimm = -1;
1986 pvt->inject.rank = -1;
1987 pvt->inject.bank = -1;
1988 pvt->inject.page = -1;
1989 pvt->inject.col = -1;
1990
a3aa0a4a
HS
1991 /* allocating generic PCI control info */
1992 i7core_pci_ctl_create(pvt);
1993
d5381642 1994 /* Registers on edac_mce in order to receive memory errors */
c5d34528 1995 pvt->edac_mce.priv = mci;
d5381642 1996 pvt->edac_mce.check_error = i7core_mce_check_error;
d5381642 1997 rc = edac_mce_register(&pvt->edac_mce);
b990538a 1998 if (unlikely(rc < 0)) {
d5381642
MCC
1999 debugf0("MC: " __FILE__
2000 ": %s(): failed edac_mce_register()\n", __func__);
f4742949
MCC
2001 }
2002
2003fail:
d4d1ef45
TL
2004 if (rc < 0)
2005 edac_mc_free(mci);
f4742949
MCC
2006 return rc;
2007}
2008
2009/*
2010 * i7core_probe Probe for ONE instance of device to see if it is
2011 * present.
2012 * return:
2013 * 0 for FOUND a device
2014 * < 0 for error code
2015 */
2d95d815 2016
f4742949
MCC
2017static int __devinit i7core_probe(struct pci_dev *pdev,
2018 const struct pci_device_id *id)
2019{
f4742949
MCC
2020 int rc;
2021 struct i7core_dev *i7core_dev;
2022
2d95d815
MCC
2023 /* get the pci devices we want to reserve for our use */
2024 mutex_lock(&i7core_edac_lock);
2025
f4742949 2026 /*
d4c27795 2027 * All memory controllers are allocated at the first pass.
f4742949 2028 */
2d95d815
MCC
2029 if (unlikely(probed >= 1)) {
2030 mutex_unlock(&i7core_edac_lock);
f4742949 2031 return -EINVAL;
2d95d815
MCC
2032 }
2033 probed++;
de06eeef 2034
64c10f6e 2035 rc = i7core_get_all_devices();
f4742949
MCC
2036 if (unlikely(rc < 0))
2037 goto fail0;
2038
2039 list_for_each_entry(i7core_dev, &i7core_edac_list, list) {
2040 int channels;
2041 int csrows;
2042
2043 /* Check the number of active and not disabled channels */
2044 rc = i7core_get_active_channels(i7core_dev->socket,
2045 &channels, &csrows);
2046 if (unlikely(rc < 0))
2047 goto fail1;
2048
d4c27795
MCC
2049 rc = i7core_register_mci(i7core_dev, channels, csrows);
2050 if (unlikely(rc < 0))
2051 goto fail1;
d5381642
MCC
2052 }
2053
ef708b53 2054 i7core_printk(KERN_INFO, "Driver loaded.\n");
8f331907 2055
66607706 2056 mutex_unlock(&i7core_edac_lock);
a0c36a1f
MCC
2057 return 0;
2058
66607706 2059fail1:
13d6e9b6 2060 i7core_put_all_devices();
66607706
MCC
2061fail0:
2062 mutex_unlock(&i7core_edac_lock);
b7c76151 2063 return rc;
a0c36a1f
MCC
2064}
2065
2066/*
2067 * i7core_remove destructor for one instance of device
2068 *
2069 */
2070static void __devexit i7core_remove(struct pci_dev *pdev)
2071{
2072 struct mem_ctl_info *mci;
64c10f6e 2073 struct i7core_dev *i7core_dev;
939747bd 2074 struct i7core_pvt *pvt;
a0c36a1f
MCC
2075
2076 debugf0(__FILE__ ": %s()\n", __func__);
2077
22e6bcbd
MCC
2078 /*
2079 * we have a trouble here: pdev value for removal will be wrong, since
2080 * it will point to the X58 register used to detect that the machine
2081 * is a Nehalem or upper design. However, due to the way several PCI
2082 * devices are grouped together to provide MC functionality, we need
2083 * to use a different method for releasing the devices
2084 */
87d1d272 2085
66607706 2086 mutex_lock(&i7core_edac_lock);
64c10f6e 2087 list_for_each_entry(i7core_dev, &i7core_edac_list, list) {
939747bd
MCC
2088 mci = find_mci_by_dev(&i7core_dev->pdev[0]->dev);
2089 if (unlikely(!mci || !mci->pvt_info)) {
3cfd0146
MCC
2090 debugf0("MC: " __FILE__ ": %s(): dev = %p\n",
2091 __func__, &i7core_dev->pdev[0]->dev);
2092
2093 i7core_printk(KERN_ERR,
939747bd
MCC
2094 "Couldn't find mci hanler\n");
2095 } else {
2096 pvt = mci->pvt_info;
22e6bcbd 2097 i7core_dev = pvt->i7core_dev;
939747bd 2098
3cfd0146
MCC
2099 debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n",
2100 __func__, mci, &i7core_dev->pdev[0]->dev);
2101
41ba6c10
MCC
2102 /* Disable MCE NMI handler */
2103 edac_mce_unregister(&pvt->edac_mce);
2104
2105 /* Disable EDAC polling */
a3aa0a4a 2106 i7core_pci_ctl_release(pvt);
939747bd 2107
41ba6c10 2108 /* Remove MC sysfs nodes */
939747bd
MCC
2109 edac_mc_del_mc(&i7core_dev->pdev[0]->dev);
2110
accf74ff 2111 debugf1("%s: free mci struct\n", mci->ctl_name);
22e6bcbd
MCC
2112 kfree(mci->ctl_name);
2113 edac_mc_free(mci);
22e6bcbd
MCC
2114 }
2115 }
64c10f6e
HS
2116
2117 /* Release PCI resources */
2118 i7core_put_all_devices();
2119
2d95d815
MCC
2120 probed--;
2121
66607706 2122 mutex_unlock(&i7core_edac_lock);
a0c36a1f
MCC
2123}
2124
a0c36a1f
MCC
2125MODULE_DEVICE_TABLE(pci, i7core_pci_tbl);
2126
2127/*
2128 * i7core_driver pci_driver structure for this module
2129 *
2130 */
2131static struct pci_driver i7core_driver = {
2132 .name = "i7core_edac",
2133 .probe = i7core_probe,
2134 .remove = __devexit_p(i7core_remove),
2135 .id_table = i7core_pci_tbl,
2136};
2137
2138/*
2139 * i7core_init Module entry function
2140 * Try to initialize this module for its devices
2141 */
2142static int __init i7core_init(void)
2143{
2144 int pci_rc;
2145
2146 debugf2("MC: " __FILE__ ": %s()\n", __func__);
2147
2148 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
2149 opstate_init();
2150
54a08ab1
MCC
2151 if (use_pci_fixup)
2152 i7core_xeon_pci_fixup(pci_dev_table);
bc2d7245 2153
a0c36a1f
MCC
2154 pci_rc = pci_register_driver(&i7core_driver);
2155
3ef288a9
MCC
2156 if (pci_rc >= 0)
2157 return 0;
2158
2159 i7core_printk(KERN_ERR, "Failed to register device with error %d.\n",
2160 pci_rc);
2161
2162 return pci_rc;
a0c36a1f
MCC
2163}
2164
2165/*
2166 * i7core_exit() Module exit function
2167 * Unregister the driver
2168 */
2169static void __exit i7core_exit(void)
2170{
2171 debugf2("MC: " __FILE__ ": %s()\n", __func__);
2172 pci_unregister_driver(&i7core_driver);
2173}
2174
2175module_init(i7core_init);
2176module_exit(i7core_exit);
2177
2178MODULE_LICENSE("GPL");
2179MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
2180MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
2181MODULE_DESCRIPTION("MC Driver for Intel i7 Core memory controllers - "
2182 I7CORE_REVISION);
2183
2184module_param(edac_op_state, int, 0444);
2185MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");