i7core_edac: Call pci_dev_put() when alloc_i7core_dev() failed
[GitHub/LineageOS/android_kernel_samsung_universal7580.git] / drivers / edac / i7core_edac.c
CommitLineData
52707f91
MCC
1/* Intel i7 core/Nehalem Memory Controller kernel module
2 *
3 * This driver supports yhe memory controllers found on the Intel
4 * processor families i7core, i7core 7xx/8xx, i5core, Xeon 35xx,
5 * Xeon 55xx and Xeon 56xx also known as Nehalem, Nehalem-EP, Lynnfield
6 * and Westmere-EP.
a0c36a1f
MCC
7 *
8 * This file may be distributed under the terms of the
9 * GNU General Public License version 2 only.
10 *
52707f91 11 * Copyright (c) 2009-2010 by:
a0c36a1f
MCC
12 * Mauro Carvalho Chehab <mchehab@redhat.com>
13 *
14 * Red Hat Inc. http://www.redhat.com
15 *
16 * Forked and adapted from the i5400_edac driver
17 *
18 * Based on the following public Intel datasheets:
19 * Intel Core i7 Processor Extreme Edition and Intel Core i7 Processor
20 * Datasheet, Volume 2:
21 * http://download.intel.com/design/processor/datashts/320835.pdf
22 * Intel Xeon Processor 5500 Series Datasheet Volume 2
23 * http://www.intel.com/Assets/PDF/datasheet/321322.pdf
24 * also available at:
25 * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
26 */
27
a0c36a1f
MCC
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/pci.h>
31#include <linux/pci_ids.h>
32#include <linux/slab.h>
3b918c12 33#include <linux/delay.h>
a0c36a1f
MCC
34#include <linux/edac.h>
35#include <linux/mmzone.h>
d5381642 36#include <linux/edac_mce.h>
f4742949 37#include <linux/smp.h>
14d2c083 38#include <asm/processor.h>
a0c36a1f
MCC
39
40#include "edac_core.h"
41
18c29002
MCC
42/* Static vars */
43static LIST_HEAD(i7core_edac_list);
44static DEFINE_MUTEX(i7core_edac_lock);
45static int probed;
46
54a08ab1
MCC
47static int use_pci_fixup;
48module_param(use_pci_fixup, int, 0444);
49MODULE_PARM_DESC(use_pci_fixup, "Enable PCI fixup to seek for hidden devices");
f4742949
MCC
50/*
51 * This is used for Nehalem-EP and Nehalem-EX devices, where the non-core
52 * registers start at bus 255, and are not reported by BIOS.
53 * We currently find devices with only 2 sockets. In order to support more QPI
54 * Quick Path Interconnect, just increment this number.
55 */
56#define MAX_SOCKET_BUSES 2
57
58
a0c36a1f
MCC
59/*
60 * Alter this version for the module when modifications are made
61 */
62#define I7CORE_REVISION " Ver: 1.0.0 " __DATE__
63#define EDAC_MOD_STR "i7core_edac"
64
a0c36a1f
MCC
65/*
66 * Debug macros
67 */
68#define i7core_printk(level, fmt, arg...) \
69 edac_printk(level, "i7core", fmt, ##arg)
70
71#define i7core_mc_printk(mci, level, fmt, arg...) \
72 edac_mc_chipset_printk(mci, level, "i7core", fmt, ##arg)
73
74/*
75 * i7core Memory Controller Registers
76 */
77
e9bd2e73
MCC
78 /* OFFSETS for Device 0 Function 0 */
79
80#define MC_CFG_CONTROL 0x90
81
a0c36a1f
MCC
82 /* OFFSETS for Device 3 Function 0 */
83
84#define MC_CONTROL 0x48
85#define MC_STATUS 0x4c
86#define MC_MAX_DOD 0x64
87
442305b1
MCC
88/*
89 * OFFSETS for Device 3 Function 4, as inicated on Xeon 5500 datasheet:
90 * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
91 */
92
93#define MC_TEST_ERR_RCV1 0x60
94 #define DIMM2_COR_ERR(r) ((r) & 0x7fff)
95
96#define MC_TEST_ERR_RCV0 0x64
97 #define DIMM1_COR_ERR(r) (((r) >> 16) & 0x7fff)
98 #define DIMM0_COR_ERR(r) ((r) & 0x7fff)
99
b4e8f0b6
MCC
100/* OFFSETS for Device 3 Function 2, as inicated on Xeon 5500 datasheet */
101#define MC_COR_ECC_CNT_0 0x80
102#define MC_COR_ECC_CNT_1 0x84
103#define MC_COR_ECC_CNT_2 0x88
104#define MC_COR_ECC_CNT_3 0x8c
105#define MC_COR_ECC_CNT_4 0x90
106#define MC_COR_ECC_CNT_5 0x94
107
108#define DIMM_TOP_COR_ERR(r) (((r) >> 16) & 0x7fff)
109#define DIMM_BOT_COR_ERR(r) ((r) & 0x7fff)
110
111
a0c36a1f
MCC
112 /* OFFSETS for Devices 4,5 and 6 Function 0 */
113
0b2b7b7e
MCC
114#define MC_CHANNEL_DIMM_INIT_PARAMS 0x58
115 #define THREE_DIMMS_PRESENT (1 << 24)
116 #define SINGLE_QUAD_RANK_PRESENT (1 << 23)
117 #define QUAD_RANK_PRESENT (1 << 22)
118 #define REGISTERED_DIMM (1 << 15)
119
f122a892
MCC
120#define MC_CHANNEL_MAPPER 0x60
121 #define RDLCH(r, ch) ((((r) >> (3 + (ch * 6))) & 0x07) - 1)
122 #define WRLCH(r, ch) ((((r) >> (ch * 6)) & 0x07) - 1)
123
0b2b7b7e
MCC
124#define MC_CHANNEL_RANK_PRESENT 0x7c
125 #define RANK_PRESENT_MASK 0xffff
126
a0c36a1f 127#define MC_CHANNEL_ADDR_MATCH 0xf0
194a40fe
MCC
128#define MC_CHANNEL_ERROR_MASK 0xf8
129#define MC_CHANNEL_ERROR_INJECT 0xfc
130 #define INJECT_ADDR_PARITY 0x10
131 #define INJECT_ECC 0x08
132 #define MASK_CACHELINE 0x06
133 #define MASK_FULL_CACHELINE 0x06
134 #define MASK_MSB32_CACHELINE 0x04
135 #define MASK_LSB32_CACHELINE 0x02
136 #define NO_MASK_CACHELINE 0x00
137 #define REPEAT_EN 0x01
a0c36a1f 138
0b2b7b7e 139 /* OFFSETS for Devices 4,5 and 6 Function 1 */
b990538a 140
0b2b7b7e
MCC
141#define MC_DOD_CH_DIMM0 0x48
142#define MC_DOD_CH_DIMM1 0x4c
143#define MC_DOD_CH_DIMM2 0x50
144 #define RANKOFFSET_MASK ((1 << 12) | (1 << 11) | (1 << 10))
145 #define RANKOFFSET(x) ((x & RANKOFFSET_MASK) >> 10)
146 #define DIMM_PRESENT_MASK (1 << 9)
147 #define DIMM_PRESENT(x) (((x) & DIMM_PRESENT_MASK) >> 9)
854d3349
MCC
148 #define MC_DOD_NUMBANK_MASK ((1 << 8) | (1 << 7))
149 #define MC_DOD_NUMBANK(x) (((x) & MC_DOD_NUMBANK_MASK) >> 7)
150 #define MC_DOD_NUMRANK_MASK ((1 << 6) | (1 << 5))
151 #define MC_DOD_NUMRANK(x) (((x) & MC_DOD_NUMRANK_MASK) >> 5)
41fcb7fe 152 #define MC_DOD_NUMROW_MASK ((1 << 4) | (1 << 3) | (1 << 2))
5566cb7c 153 #define MC_DOD_NUMROW(x) (((x) & MC_DOD_NUMROW_MASK) >> 2)
854d3349
MCC
154 #define MC_DOD_NUMCOL_MASK 3
155 #define MC_DOD_NUMCOL(x) ((x) & MC_DOD_NUMCOL_MASK)
0b2b7b7e 156
f122a892
MCC
157#define MC_RANK_PRESENT 0x7c
158
0b2b7b7e
MCC
159#define MC_SAG_CH_0 0x80
160#define MC_SAG_CH_1 0x84
161#define MC_SAG_CH_2 0x88
162#define MC_SAG_CH_3 0x8c
163#define MC_SAG_CH_4 0x90
164#define MC_SAG_CH_5 0x94
165#define MC_SAG_CH_6 0x98
166#define MC_SAG_CH_7 0x9c
167
168#define MC_RIR_LIMIT_CH_0 0x40
169#define MC_RIR_LIMIT_CH_1 0x44
170#define MC_RIR_LIMIT_CH_2 0x48
171#define MC_RIR_LIMIT_CH_3 0x4C
172#define MC_RIR_LIMIT_CH_4 0x50
173#define MC_RIR_LIMIT_CH_5 0x54
174#define MC_RIR_LIMIT_CH_6 0x58
175#define MC_RIR_LIMIT_CH_7 0x5C
176#define MC_RIR_LIMIT_MASK ((1 << 10) - 1)
177
178#define MC_RIR_WAY_CH 0x80
179 #define MC_RIR_WAY_OFFSET_MASK (((1 << 14) - 1) & ~0x7)
180 #define MC_RIR_WAY_RANK_MASK 0x7
181
a0c36a1f
MCC
182/*
183 * i7core structs
184 */
185
186#define NUM_CHANS 3
442305b1
MCC
187#define MAX_DIMMS 3 /* Max DIMMS per channel */
188#define MAX_MCR_FUNC 4
189#define MAX_CHAN_FUNC 3
a0c36a1f
MCC
190
191struct i7core_info {
192 u32 mc_control;
193 u32 mc_status;
194 u32 max_dod;
f122a892 195 u32 ch_map;
a0c36a1f
MCC
196};
197
194a40fe
MCC
198
199struct i7core_inject {
200 int enable;
201
202 u32 section;
203 u32 type;
204 u32 eccmask;
205
206 /* Error address mask */
207 int channel, dimm, rank, bank, page, col;
208};
209
0b2b7b7e 210struct i7core_channel {
442305b1
MCC
211 u32 ranks;
212 u32 dimms;
0b2b7b7e
MCC
213};
214
8f331907 215struct pci_id_descr {
66607706
MCC
216 int dev;
217 int func;
218 int dev_id;
de06eeef 219 int optional;
8f331907
MCC
220};
221
bd9e19ca 222struct pci_id_table {
1288c18f
MCC
223 const struct pci_id_descr *descr;
224 int n_devs;
bd9e19ca
VM
225};
226
f4742949
MCC
227struct i7core_dev {
228 struct list_head list;
229 u8 socket;
230 struct pci_dev **pdev;
de06eeef 231 int n_devs;
f4742949
MCC
232 struct mem_ctl_info *mci;
233};
234
a0c36a1f 235struct i7core_pvt {
f4742949
MCC
236 struct pci_dev *pci_noncore;
237 struct pci_dev *pci_mcr[MAX_MCR_FUNC + 1];
238 struct pci_dev *pci_ch[NUM_CHANS][MAX_CHAN_FUNC + 1];
239
240 struct i7core_dev *i7core_dev;
67166af4 241
a0c36a1f 242 struct i7core_info info;
194a40fe 243 struct i7core_inject inject;
f4742949 244 struct i7core_channel channel[NUM_CHANS];
67166af4 245
f4742949 246 int channels; /* Number of active channels */
442305b1 247
f4742949
MCC
248 int ce_count_available;
249 int csrow_map[NUM_CHANS][MAX_DIMMS];
b4e8f0b6
MCC
250
251 /* ECC corrected errors counts per udimm */
f4742949
MCC
252 unsigned long udimm_ce_count[MAX_DIMMS];
253 int udimm_last_ce_count[MAX_DIMMS];
b4e8f0b6 254 /* ECC corrected errors counts per rdimm */
f4742949
MCC
255 unsigned long rdimm_ce_count[NUM_CHANS][MAX_DIMMS];
256 int rdimm_last_ce_count[NUM_CHANS][MAX_DIMMS];
442305b1 257
f4742949 258 unsigned int is_registered;
14d2c083 259
d5381642
MCC
260 /* mcelog glue */
261 struct edac_mce edac_mce;
ca9c90ba
MCC
262
263 /* Fifo double buffers */
d5381642 264 struct mce mce_entry[MCE_LOG_LEN];
ca9c90ba
MCC
265 struct mce mce_outentry[MCE_LOG_LEN];
266
267 /* Fifo in/out counters */
268 unsigned mce_in, mce_out;
269
270 /* Count indicator to show errors not got */
271 unsigned mce_overrun;
939747bd
MCC
272
273 /* Struct to control EDAC polling */
274 struct edac_pci_ctl_info *i7core_pci;
a0c36a1f
MCC
275};
276
8f331907
MCC
277#define PCI_DESCR(device, function, device_id) \
278 .dev = (device), \
279 .func = (function), \
280 .dev_id = (device_id)
281
1288c18f 282static const struct pci_id_descr pci_dev_descr_i7core_nehalem[] = {
8f331907
MCC
283 /* Memory controller */
284 { PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_I7_MCR) },
285 { PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_I7_MC_TAD) },
de06eeef
MCC
286 /* Exists only for RDIMM */
287 { PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_I7_MC_RAS), .optional = 1 },
8f331907
MCC
288 { PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_I7_MC_TEST) },
289
290 /* Channel 0 */
291 { PCI_DESCR(4, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH0_CTRL) },
292 { PCI_DESCR(4, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH0_ADDR) },
293 { PCI_DESCR(4, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH0_RANK) },
294 { PCI_DESCR(4, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH0_TC) },
295
296 /* Channel 1 */
297 { PCI_DESCR(5, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH1_CTRL) },
298 { PCI_DESCR(5, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH1_ADDR) },
299 { PCI_DESCR(5, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH1_RANK) },
300 { PCI_DESCR(5, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH1_TC) },
301
302 /* Channel 2 */
303 { PCI_DESCR(6, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH2_CTRL) },
304 { PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH2_ADDR) },
305 { PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH2_RANK) },
306 { PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH2_TC) },
310cbb72
MCC
307
308 /* Generic Non-core registers */
309 /*
310 * This is the PCI device on i7core and on Xeon 35xx (8086:2c41)
311 * On Xeon 55xx, however, it has a different id (8086:2c40). So,
312 * the probing code needs to test for the other address in case of
313 * failure of this one
314 */
fd382654 315 { PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_I7_NONCORE) },
310cbb72 316
a0c36a1f 317};
8f331907 318
1288c18f 319static const struct pci_id_descr pci_dev_descr_lynnfield[] = {
52a2e4fc
MCC
320 { PCI_DESCR( 3, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR) },
321 { PCI_DESCR( 3, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD) },
322 { PCI_DESCR( 3, 4, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST) },
323
324 { PCI_DESCR( 4, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL) },
325 { PCI_DESCR( 4, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR) },
326 { PCI_DESCR( 4, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK) },
327 { PCI_DESCR( 4, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC) },
328
508fa179
MCC
329 { PCI_DESCR( 5, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL) },
330 { PCI_DESCR( 5, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR) },
331 { PCI_DESCR( 5, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK) },
332 { PCI_DESCR( 5, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC) },
52a2e4fc 333
f05da2f7
MCC
334 /*
335 * This is the PCI device has an alternate address on some
336 * processors like Core i7 860
337 */
52a2e4fc
MCC
338 { PCI_DESCR( 0, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE) },
339};
340
1288c18f 341static const struct pci_id_descr pci_dev_descr_i7core_westmere[] = {
bd9e19ca
VM
342 /* Memory controller */
343 { PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR_REV2) },
344 { PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD_REV2) },
345 /* Exists only for RDIMM */
346 { PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_RAS_REV2), .optional = 1 },
347 { PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST_REV2) },
348
349 /* Channel 0 */
350 { PCI_DESCR(4, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL_REV2) },
351 { PCI_DESCR(4, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR_REV2) },
352 { PCI_DESCR(4, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK_REV2) },
353 { PCI_DESCR(4, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC_REV2) },
354
355 /* Channel 1 */
356 { PCI_DESCR(5, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL_REV2) },
357 { PCI_DESCR(5, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR_REV2) },
358 { PCI_DESCR(5, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK_REV2) },
359 { PCI_DESCR(5, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC_REV2) },
360
361 /* Channel 2 */
362 { PCI_DESCR(6, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_CTRL_REV2) },
363 { PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_ADDR_REV2) },
364 { PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_RANK_REV2) },
365 { PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_TC_REV2) },
366
367 /* Generic Non-core registers */
368 { PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_REV2) },
369
370};
371
1288c18f
MCC
372#define PCI_ID_TABLE_ENTRY(A) { .descr=A, .n_devs = ARRAY_SIZE(A) }
373static const struct pci_id_table pci_dev_table[] = {
bd9e19ca
VM
374 PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_nehalem),
375 PCI_ID_TABLE_ENTRY(pci_dev_descr_lynnfield),
376 PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_westmere),
377};
378
8f331907
MCC
379/*
380 * pci_device_id table for which devices we are looking for
8f331907
MCC
381 */
382static const struct pci_device_id i7core_pci_tbl[] __devinitdata = {
d1fd4fb6 383 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
f05da2f7 384 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
8f331907
MCC
385 {0,} /* 0 terminated list. */
386};
387
a0c36a1f
MCC
388/****************************************************************************
389 Anciliary status routines
390 ****************************************************************************/
391
392 /* MC_CONTROL bits */
ef708b53
MCC
393#define CH_ACTIVE(pvt, ch) ((pvt)->info.mc_control & (1 << (8 + ch)))
394#define ECCx8(pvt) ((pvt)->info.mc_control & (1 << 1))
a0c36a1f
MCC
395
396 /* MC_STATUS bits */
61053fde 397#define ECC_ENABLED(pvt) ((pvt)->info.mc_status & (1 << 4))
ef708b53 398#define CH_DISABLED(pvt, ch) ((pvt)->info.mc_status & (1 << ch))
a0c36a1f
MCC
399
400 /* MC_MAX_DOD read functions */
854d3349 401static inline int numdimms(u32 dimms)
a0c36a1f 402{
854d3349 403 return (dimms & 0x3) + 1;
a0c36a1f
MCC
404}
405
854d3349 406static inline int numrank(u32 rank)
a0c36a1f
MCC
407{
408 static int ranks[4] = { 1, 2, 4, -EINVAL };
409
854d3349 410 return ranks[rank & 0x3];
a0c36a1f
MCC
411}
412
854d3349 413static inline int numbank(u32 bank)
a0c36a1f
MCC
414{
415 static int banks[4] = { 4, 8, 16, -EINVAL };
416
854d3349 417 return banks[bank & 0x3];
a0c36a1f
MCC
418}
419
854d3349 420static inline int numrow(u32 row)
a0c36a1f
MCC
421{
422 static int rows[8] = {
423 1 << 12, 1 << 13, 1 << 14, 1 << 15,
424 1 << 16, -EINVAL, -EINVAL, -EINVAL,
425 };
426
854d3349 427 return rows[row & 0x7];
a0c36a1f
MCC
428}
429
854d3349 430static inline int numcol(u32 col)
a0c36a1f
MCC
431{
432 static int cols[8] = {
433 1 << 10, 1 << 11, 1 << 12, -EINVAL,
434 };
854d3349 435 return cols[col & 0x3];
a0c36a1f
MCC
436}
437
f4742949 438static struct i7core_dev *get_i7core_dev(u8 socket)
66607706
MCC
439{
440 struct i7core_dev *i7core_dev;
441
442 list_for_each_entry(i7core_dev, &i7core_edac_list, list) {
443 if (i7core_dev->socket == socket)
444 return i7core_dev;
445 }
446
447 return NULL;
448}
449
848b2f7e
HS
450static struct i7core_dev *alloc_i7core_dev(u8 socket,
451 const struct pci_id_table *table)
452{
453 struct i7core_dev *i7core_dev;
454
455 i7core_dev = kzalloc(sizeof(*i7core_dev), GFP_KERNEL);
456 if (!i7core_dev)
457 return NULL;
458
459 i7core_dev->pdev = kzalloc(sizeof(*i7core_dev->pdev) * table->n_devs,
460 GFP_KERNEL);
461 if (!i7core_dev->pdev) {
462 kfree(i7core_dev);
463 return NULL;
464 }
465
466 i7core_dev->socket = socket;
467 i7core_dev->n_devs = table->n_devs;
468 list_add_tail(&i7core_dev->list, &i7core_edac_list);
469
470 return i7core_dev;
471}
472
2aa9be44
HS
473static void free_i7core_dev(struct i7core_dev *i7core_dev)
474{
475 list_del(&i7core_dev->list);
476 kfree(i7core_dev->pdev);
477 kfree(i7core_dev);
478}
479
a0c36a1f
MCC
480/****************************************************************************
481 Memory check routines
482 ****************************************************************************/
67166af4
MCC
483static struct pci_dev *get_pdev_slot_func(u8 socket, unsigned slot,
484 unsigned func)
ef708b53 485{
66607706 486 struct i7core_dev *i7core_dev = get_i7core_dev(socket);
ef708b53 487 int i;
ef708b53 488
66607706
MCC
489 if (!i7core_dev)
490 return NULL;
491
de06eeef 492 for (i = 0; i < i7core_dev->n_devs; i++) {
66607706 493 if (!i7core_dev->pdev[i])
ef708b53
MCC
494 continue;
495
66607706
MCC
496 if (PCI_SLOT(i7core_dev->pdev[i]->devfn) == slot &&
497 PCI_FUNC(i7core_dev->pdev[i]->devfn) == func) {
498 return i7core_dev->pdev[i];
ef708b53
MCC
499 }
500 }
501
eb94fc40
MCC
502 return NULL;
503}
504
ec6df24c
MCC
505/**
506 * i7core_get_active_channels() - gets the number of channels and csrows
507 * @socket: Quick Path Interconnect socket
508 * @channels: Number of channels that will be returned
509 * @csrows: Number of csrows found
510 *
511 * Since EDAC core needs to know in advance the number of available channels
512 * and csrows, in order to allocate memory for csrows/channels, it is needed
513 * to run two similar steps. At the first step, implemented on this function,
514 * it checks the number of csrows/channels present at one socket.
515 * this is used in order to properly allocate the size of mci components.
516 *
517 * It should be noticed that none of the current available datasheets explain
518 * or even mention how csrows are seen by the memory controller. So, we need
519 * to add a fake description for csrows.
520 * So, this driver is attributing one DIMM memory for one csrow.
521 */
1288c18f 522static int i7core_get_active_channels(const u8 socket, unsigned *channels,
67166af4 523 unsigned *csrows)
eb94fc40
MCC
524{
525 struct pci_dev *pdev = NULL;
526 int i, j;
527 u32 status, control;
528
529 *channels = 0;
530 *csrows = 0;
531
67166af4 532 pdev = get_pdev_slot_func(socket, 3, 0);
b7c76151 533 if (!pdev) {
67166af4
MCC
534 i7core_printk(KERN_ERR, "Couldn't find socket %d fn 3.0!!!\n",
535 socket);
ef708b53 536 return -ENODEV;
b7c76151 537 }
ef708b53
MCC
538
539 /* Device 3 function 0 reads */
540 pci_read_config_dword(pdev, MC_STATUS, &status);
541 pci_read_config_dword(pdev, MC_CONTROL, &control);
542
543 for (i = 0; i < NUM_CHANS; i++) {
eb94fc40 544 u32 dimm_dod[3];
ef708b53
MCC
545 /* Check if the channel is active */
546 if (!(control & (1 << (8 + i))))
547 continue;
548
549 /* Check if the channel is disabled */
41fcb7fe 550 if (status & (1 << i))
ef708b53 551 continue;
ef708b53 552
67166af4 553 pdev = get_pdev_slot_func(socket, i + 4, 1);
eb94fc40 554 if (!pdev) {
67166af4
MCC
555 i7core_printk(KERN_ERR, "Couldn't find socket %d "
556 "fn %d.%d!!!\n",
557 socket, i + 4, 1);
eb94fc40
MCC
558 return -ENODEV;
559 }
560 /* Devices 4-6 function 1 */
561 pci_read_config_dword(pdev,
562 MC_DOD_CH_DIMM0, &dimm_dod[0]);
563 pci_read_config_dword(pdev,
564 MC_DOD_CH_DIMM1, &dimm_dod[1]);
565 pci_read_config_dword(pdev,
566 MC_DOD_CH_DIMM2, &dimm_dod[2]);
567
ef708b53 568 (*channels)++;
eb94fc40
MCC
569
570 for (j = 0; j < 3; j++) {
571 if (!DIMM_PRESENT(dimm_dod[j]))
572 continue;
573 (*csrows)++;
574 }
ef708b53
MCC
575 }
576
c77720b9 577 debugf0("Number of active channels on socket %d: %d\n",
67166af4 578 socket, *channels);
1c6fed80 579
ef708b53
MCC
580 return 0;
581}
582
1288c18f 583static int get_dimm_config(const struct mem_ctl_info *mci, int *csrow)
a0c36a1f
MCC
584{
585 struct i7core_pvt *pvt = mci->pvt_info;
1c6fed80 586 struct csrow_info *csr;
854d3349 587 struct pci_dev *pdev;
ba6c5c62 588 int i, j;
5566cb7c 589 unsigned long last_page = 0;
1c6fed80 590 enum edac_type mode;
854d3349 591 enum mem_type mtype;
a0c36a1f 592
854d3349 593 /* Get data from the MC register, function 0 */
f4742949 594 pdev = pvt->pci_mcr[0];
7dd6953c 595 if (!pdev)
8f331907
MCC
596 return -ENODEV;
597
f122a892 598 /* Device 3 function 0 reads */
7dd6953c
MCC
599 pci_read_config_dword(pdev, MC_CONTROL, &pvt->info.mc_control);
600 pci_read_config_dword(pdev, MC_STATUS, &pvt->info.mc_status);
601 pci_read_config_dword(pdev, MC_MAX_DOD, &pvt->info.max_dod);
602 pci_read_config_dword(pdev, MC_CHANNEL_MAPPER, &pvt->info.ch_map);
f122a892 603
17cb7b0c 604 debugf0("QPI %d control=0x%08x status=0x%08x dod=0x%08x map=0x%08x\n",
4af91889 605 pvt->i7core_dev->socket, pvt->info.mc_control, pvt->info.mc_status,
f122a892 606 pvt->info.max_dod, pvt->info.ch_map);
a0c36a1f 607
1c6fed80 608 if (ECC_ENABLED(pvt)) {
41fcb7fe 609 debugf0("ECC enabled with x%d SDCC\n", ECCx8(pvt) ? 8 : 4);
1c6fed80
MCC
610 if (ECCx8(pvt))
611 mode = EDAC_S8ECD8ED;
612 else
613 mode = EDAC_S4ECD4ED;
614 } else {
a0c36a1f 615 debugf0("ECC disabled\n");
1c6fed80
MCC
616 mode = EDAC_NONE;
617 }
a0c36a1f
MCC
618
619 /* FIXME: need to handle the error codes */
17cb7b0c
MCC
620 debugf0("DOD Max limits: DIMMS: %d, %d-ranked, %d-banked "
621 "x%x x 0x%x\n",
854d3349
MCC
622 numdimms(pvt->info.max_dod),
623 numrank(pvt->info.max_dod >> 2),
276b824c 624 numbank(pvt->info.max_dod >> 4),
854d3349
MCC
625 numrow(pvt->info.max_dod >> 6),
626 numcol(pvt->info.max_dod >> 9));
a0c36a1f 627
0b2b7b7e 628 for (i = 0; i < NUM_CHANS; i++) {
854d3349 629 u32 data, dimm_dod[3], value[8];
0b2b7b7e 630
52a2e4fc
MCC
631 if (!pvt->pci_ch[i][0])
632 continue;
633
0b2b7b7e
MCC
634 if (!CH_ACTIVE(pvt, i)) {
635 debugf0("Channel %i is not active\n", i);
636 continue;
637 }
638 if (CH_DISABLED(pvt, i)) {
639 debugf0("Channel %i is disabled\n", i);
640 continue;
641 }
642
f122a892 643 /* Devices 4-6 function 0 */
f4742949 644 pci_read_config_dword(pvt->pci_ch[i][0],
0b2b7b7e
MCC
645 MC_CHANNEL_DIMM_INIT_PARAMS, &data);
646
f4742949 647 pvt->channel[i].ranks = (data & QUAD_RANK_PRESENT) ?
67166af4 648 4 : 2;
0b2b7b7e 649
854d3349
MCC
650 if (data & REGISTERED_DIMM)
651 mtype = MEM_RDDR3;
14d2c083 652 else
854d3349
MCC
653 mtype = MEM_DDR3;
654#if 0
0b2b7b7e
MCC
655 if (data & THREE_DIMMS_PRESENT)
656 pvt->channel[i].dimms = 3;
657 else if (data & SINGLE_QUAD_RANK_PRESENT)
658 pvt->channel[i].dimms = 1;
659 else
660 pvt->channel[i].dimms = 2;
854d3349
MCC
661#endif
662
663 /* Devices 4-6 function 1 */
f4742949 664 pci_read_config_dword(pvt->pci_ch[i][1],
854d3349 665 MC_DOD_CH_DIMM0, &dimm_dod[0]);
f4742949 666 pci_read_config_dword(pvt->pci_ch[i][1],
854d3349 667 MC_DOD_CH_DIMM1, &dimm_dod[1]);
f4742949 668 pci_read_config_dword(pvt->pci_ch[i][1],
854d3349 669 MC_DOD_CH_DIMM2, &dimm_dod[2]);
0b2b7b7e 670
1c6fed80 671 debugf0("Ch%d phy rd%d, wr%d (0x%08x): "
854d3349 672 "%d ranks, %cDIMMs\n",
1c6fed80
MCC
673 i,
674 RDLCH(pvt->info.ch_map, i), WRLCH(pvt->info.ch_map, i),
675 data,
f4742949 676 pvt->channel[i].ranks,
41fcb7fe 677 (data & REGISTERED_DIMM) ? 'R' : 'U');
854d3349
MCC
678
679 for (j = 0; j < 3; j++) {
680 u32 banks, ranks, rows, cols;
5566cb7c 681 u32 size, npages;
854d3349
MCC
682
683 if (!DIMM_PRESENT(dimm_dod[j]))
684 continue;
685
686 banks = numbank(MC_DOD_NUMBANK(dimm_dod[j]));
687 ranks = numrank(MC_DOD_NUMRANK(dimm_dod[j]));
688 rows = numrow(MC_DOD_NUMROW(dimm_dod[j]));
689 cols = numcol(MC_DOD_NUMCOL(dimm_dod[j]));
690
5566cb7c
MCC
691 /* DDR3 has 8 I/O banks */
692 size = (rows * cols * banks * ranks) >> (20 - 3);
693
f4742949 694 pvt->channel[i].dimms++;
854d3349 695
17cb7b0c
MCC
696 debugf0("\tdimm %d %d Mb offset: %x, "
697 "bank: %d, rank: %d, row: %#x, col: %#x\n",
698 j, size,
854d3349
MCC
699 RANKOFFSET(dimm_dod[j]),
700 banks, ranks, rows, cols);
701
e9144601 702 npages = MiB_TO_PAGES(size);
5566cb7c 703
ba6c5c62 704 csr = &mci->csrows[*csrow];
5566cb7c
MCC
705 csr->first_page = last_page + 1;
706 last_page += npages;
707 csr->last_page = last_page;
708 csr->nr_pages = npages;
709
854d3349 710 csr->page_mask = 0;
eb94fc40 711 csr->grain = 8;
ba6c5c62 712 csr->csrow_idx = *csrow;
eb94fc40
MCC
713 csr->nr_channels = 1;
714
715 csr->channels[0].chan_idx = i;
716 csr->channels[0].ce_count = 0;
854d3349 717
f4742949 718 pvt->csrow_map[i][j] = *csrow;
b4e8f0b6 719
854d3349
MCC
720 switch (banks) {
721 case 4:
722 csr->dtype = DEV_X4;
723 break;
724 case 8:
725 csr->dtype = DEV_X8;
726 break;
727 case 16:
728 csr->dtype = DEV_X16;
729 break;
730 default:
731 csr->dtype = DEV_UNKNOWN;
732 }
733
734 csr->edac_mode = mode;
735 csr->mtype = mtype;
736
ba6c5c62 737 (*csrow)++;
854d3349 738 }
1c6fed80 739
854d3349
MCC
740 pci_read_config_dword(pdev, MC_SAG_CH_0, &value[0]);
741 pci_read_config_dword(pdev, MC_SAG_CH_1, &value[1]);
742 pci_read_config_dword(pdev, MC_SAG_CH_2, &value[2]);
743 pci_read_config_dword(pdev, MC_SAG_CH_3, &value[3]);
744 pci_read_config_dword(pdev, MC_SAG_CH_4, &value[4]);
745 pci_read_config_dword(pdev, MC_SAG_CH_5, &value[5]);
746 pci_read_config_dword(pdev, MC_SAG_CH_6, &value[6]);
747 pci_read_config_dword(pdev, MC_SAG_CH_7, &value[7]);
17cb7b0c 748 debugf1("\t[%i] DIVBY3\tREMOVED\tOFFSET\n", i);
854d3349 749 for (j = 0; j < 8; j++)
17cb7b0c 750 debugf1("\t\t%#x\t%#x\t%#x\n",
854d3349
MCC
751 (value[j] >> 27) & 0x1,
752 (value[j] >> 24) & 0x7,
753 (value[j] && ((1 << 24) - 1)));
0b2b7b7e
MCC
754 }
755
a0c36a1f
MCC
756 return 0;
757}
758
194a40fe
MCC
759/****************************************************************************
760 Error insertion routines
761 ****************************************************************************/
762
763/* The i7core has independent error injection features per channel.
764 However, to have a simpler code, we don't allow enabling error injection
765 on more than one channel.
766 Also, since a change at an inject parameter will be applied only at enable,
767 we're disabling error injection on all write calls to the sysfs nodes that
768 controls the error code injection.
769 */
1288c18f 770static int disable_inject(const struct mem_ctl_info *mci)
194a40fe
MCC
771{
772 struct i7core_pvt *pvt = mci->pvt_info;
773
774 pvt->inject.enable = 0;
775
f4742949 776 if (!pvt->pci_ch[pvt->inject.channel][0])
8f331907
MCC
777 return -ENODEV;
778
f4742949 779 pci_write_config_dword(pvt->pci_ch[pvt->inject.channel][0],
4157d9f5 780 MC_CHANNEL_ERROR_INJECT, 0);
8f331907
MCC
781
782 return 0;
194a40fe
MCC
783}
784
785/*
786 * i7core inject inject.section
787 *
788 * accept and store error injection inject.section value
789 * bit 0 - refers to the lower 32-byte half cacheline
790 * bit 1 - refers to the upper 32-byte half cacheline
791 */
792static ssize_t i7core_inject_section_store(struct mem_ctl_info *mci,
793 const char *data, size_t count)
794{
795 struct i7core_pvt *pvt = mci->pvt_info;
796 unsigned long value;
797 int rc;
798
799 if (pvt->inject.enable)
41fcb7fe 800 disable_inject(mci);
194a40fe
MCC
801
802 rc = strict_strtoul(data, 10, &value);
803 if ((rc < 0) || (value > 3))
2068def5 804 return -EIO;
194a40fe
MCC
805
806 pvt->inject.section = (u32) value;
807 return count;
808}
809
810static ssize_t i7core_inject_section_show(struct mem_ctl_info *mci,
811 char *data)
812{
813 struct i7core_pvt *pvt = mci->pvt_info;
814 return sprintf(data, "0x%08x\n", pvt->inject.section);
815}
816
817/*
818 * i7core inject.type
819 *
820 * accept and store error injection inject.section value
821 * bit 0 - repeat enable - Enable error repetition
822 * bit 1 - inject ECC error
823 * bit 2 - inject parity error
824 */
825static ssize_t i7core_inject_type_store(struct mem_ctl_info *mci,
826 const char *data, size_t count)
827{
828 struct i7core_pvt *pvt = mci->pvt_info;
829 unsigned long value;
830 int rc;
831
832 if (pvt->inject.enable)
41fcb7fe 833 disable_inject(mci);
194a40fe
MCC
834
835 rc = strict_strtoul(data, 10, &value);
836 if ((rc < 0) || (value > 7))
2068def5 837 return -EIO;
194a40fe
MCC
838
839 pvt->inject.type = (u32) value;
840 return count;
841}
842
843static ssize_t i7core_inject_type_show(struct mem_ctl_info *mci,
844 char *data)
845{
846 struct i7core_pvt *pvt = mci->pvt_info;
847 return sprintf(data, "0x%08x\n", pvt->inject.type);
848}
849
850/*
851 * i7core_inject_inject.eccmask_store
852 *
853 * The type of error (UE/CE) will depend on the inject.eccmask value:
854 * Any bits set to a 1 will flip the corresponding ECC bit
855 * Correctable errors can be injected by flipping 1 bit or the bits within
856 * a symbol pair (2 consecutive aligned 8-bit pairs - i.e. 7:0 and 15:8 or
857 * 23:16 and 31:24). Flipping bits in two symbol pairs will cause an
858 * uncorrectable error to be injected.
859 */
860static ssize_t i7core_inject_eccmask_store(struct mem_ctl_info *mci,
861 const char *data, size_t count)
862{
863 struct i7core_pvt *pvt = mci->pvt_info;
864 unsigned long value;
865 int rc;
866
867 if (pvt->inject.enable)
41fcb7fe 868 disable_inject(mci);
194a40fe
MCC
869
870 rc = strict_strtoul(data, 10, &value);
871 if (rc < 0)
2068def5 872 return -EIO;
194a40fe
MCC
873
874 pvt->inject.eccmask = (u32) value;
875 return count;
876}
877
878static ssize_t i7core_inject_eccmask_show(struct mem_ctl_info *mci,
879 char *data)
880{
881 struct i7core_pvt *pvt = mci->pvt_info;
882 return sprintf(data, "0x%08x\n", pvt->inject.eccmask);
883}
884
885/*
886 * i7core_addrmatch
887 *
888 * The type of error (UE/CE) will depend on the inject.eccmask value:
889 * Any bits set to a 1 will flip the corresponding ECC bit
890 * Correctable errors can be injected by flipping 1 bit or the bits within
891 * a symbol pair (2 consecutive aligned 8-bit pairs - i.e. 7:0 and 15:8 or
892 * 23:16 and 31:24). Flipping bits in two symbol pairs will cause an
893 * uncorrectable error to be injected.
894 */
194a40fe 895
a5538e53
MCC
896#define DECLARE_ADDR_MATCH(param, limit) \
897static ssize_t i7core_inject_store_##param( \
898 struct mem_ctl_info *mci, \
899 const char *data, size_t count) \
900{ \
cc301b3a 901 struct i7core_pvt *pvt; \
a5538e53
MCC
902 long value; \
903 int rc; \
904 \
cc301b3a
MCC
905 debugf1("%s()\n", __func__); \
906 pvt = mci->pvt_info; \
907 \
a5538e53
MCC
908 if (pvt->inject.enable) \
909 disable_inject(mci); \
910 \
4f87fad1 911 if (!strcasecmp(data, "any") || !strcasecmp(data, "any\n"))\
a5538e53
MCC
912 value = -1; \
913 else { \
914 rc = strict_strtoul(data, 10, &value); \
915 if ((rc < 0) || (value >= limit)) \
916 return -EIO; \
917 } \
918 \
919 pvt->inject.param = value; \
920 \
921 return count; \
922} \
923 \
924static ssize_t i7core_inject_show_##param( \
925 struct mem_ctl_info *mci, \
926 char *data) \
927{ \
cc301b3a
MCC
928 struct i7core_pvt *pvt; \
929 \
930 pvt = mci->pvt_info; \
931 debugf1("%s() pvt=%p\n", __func__, pvt); \
a5538e53
MCC
932 if (pvt->inject.param < 0) \
933 return sprintf(data, "any\n"); \
934 else \
935 return sprintf(data, "%d\n", pvt->inject.param);\
194a40fe
MCC
936}
937
a5538e53
MCC
938#define ATTR_ADDR_MATCH(param) \
939 { \
940 .attr = { \
941 .name = #param, \
942 .mode = (S_IRUGO | S_IWUSR) \
943 }, \
944 .show = i7core_inject_show_##param, \
945 .store = i7core_inject_store_##param, \
946 }
194a40fe 947
a5538e53
MCC
948DECLARE_ADDR_MATCH(channel, 3);
949DECLARE_ADDR_MATCH(dimm, 3);
950DECLARE_ADDR_MATCH(rank, 4);
951DECLARE_ADDR_MATCH(bank, 32);
952DECLARE_ADDR_MATCH(page, 0x10000);
953DECLARE_ADDR_MATCH(col, 0x4000);
194a40fe 954
1288c18f 955static int write_and_test(struct pci_dev *dev, const int where, const u32 val)
276b824c
MCC
956{
957 u32 read;
958 int count;
959
4157d9f5
MCC
960 debugf0("setting pci %02x:%02x.%x reg=%02x value=%08x\n",
961 dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn),
962 where, val);
963
276b824c
MCC
964 for (count = 0; count < 10; count++) {
965 if (count)
b990538a 966 msleep(100);
276b824c
MCC
967 pci_write_config_dword(dev, where, val);
968 pci_read_config_dword(dev, where, &read);
969
970 if (read == val)
971 return 0;
972 }
973
4157d9f5
MCC
974 i7core_printk(KERN_ERR, "Error during set pci %02x:%02x.%x reg=%02x "
975 "write=%08x. Read=%08x\n",
976 dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn),
977 where, val, read);
276b824c
MCC
978
979 return -EINVAL;
980}
981
194a40fe
MCC
982/*
983 * This routine prepares the Memory Controller for error injection.
984 * The error will be injected when some process tries to write to the
985 * memory that matches the given criteria.
986 * The criteria can be set in terms of a mask where dimm, rank, bank, page
987 * and col can be specified.
988 * A -1 value for any of the mask items will make the MCU to ignore
989 * that matching criteria for error injection.
990 *
991 * It should be noticed that the error will only happen after a write operation
992 * on a memory that matches the condition. if REPEAT_EN is not enabled at
993 * inject mask, then it will produce just one error. Otherwise, it will repeat
994 * until the injectmask would be cleaned.
995 *
996 * FIXME: This routine assumes that MAXNUMDIMMS value of MC_MAX_DOD
997 * is reliable enough to check if the MC is using the
998 * three channels. However, this is not clear at the datasheet.
999 */
1000static ssize_t i7core_inject_enable_store(struct mem_ctl_info *mci,
1001 const char *data, size_t count)
1002{
1003 struct i7core_pvt *pvt = mci->pvt_info;
1004 u32 injectmask;
1005 u64 mask = 0;
1006 int rc;
1007 long enable;
1008
f4742949 1009 if (!pvt->pci_ch[pvt->inject.channel][0])
8f331907
MCC
1010 return 0;
1011
194a40fe
MCC
1012 rc = strict_strtoul(data, 10, &enable);
1013 if ((rc < 0))
1014 return 0;
1015
1016 if (enable) {
1017 pvt->inject.enable = 1;
1018 } else {
1019 disable_inject(mci);
1020 return count;
1021 }
1022
1023 /* Sets pvt->inject.dimm mask */
1024 if (pvt->inject.dimm < 0)
486dd09f 1025 mask |= 1LL << 41;
194a40fe 1026 else {
f4742949 1027 if (pvt->channel[pvt->inject.channel].dimms > 2)
486dd09f 1028 mask |= (pvt->inject.dimm & 0x3LL) << 35;
194a40fe 1029 else
486dd09f 1030 mask |= (pvt->inject.dimm & 0x1LL) << 36;
194a40fe
MCC
1031 }
1032
1033 /* Sets pvt->inject.rank mask */
1034 if (pvt->inject.rank < 0)
486dd09f 1035 mask |= 1LL << 40;
194a40fe 1036 else {
f4742949 1037 if (pvt->channel[pvt->inject.channel].dimms > 2)
486dd09f 1038 mask |= (pvt->inject.rank & 0x1LL) << 34;
194a40fe 1039 else
486dd09f 1040 mask |= (pvt->inject.rank & 0x3LL) << 34;
194a40fe
MCC
1041 }
1042
1043 /* Sets pvt->inject.bank mask */
1044 if (pvt->inject.bank < 0)
486dd09f 1045 mask |= 1LL << 39;
194a40fe 1046 else
486dd09f 1047 mask |= (pvt->inject.bank & 0x15LL) << 30;
194a40fe
MCC
1048
1049 /* Sets pvt->inject.page mask */
1050 if (pvt->inject.page < 0)
486dd09f 1051 mask |= 1LL << 38;
194a40fe 1052 else
486dd09f 1053 mask |= (pvt->inject.page & 0xffff) << 14;
194a40fe
MCC
1054
1055 /* Sets pvt->inject.column mask */
1056 if (pvt->inject.col < 0)
486dd09f 1057 mask |= 1LL << 37;
194a40fe 1058 else
486dd09f 1059 mask |= (pvt->inject.col & 0x3fff);
194a40fe 1060
276b824c
MCC
1061 /*
1062 * bit 0: REPEAT_EN
1063 * bits 1-2: MASK_HALF_CACHELINE
1064 * bit 3: INJECT_ECC
1065 * bit 4: INJECT_ADDR_PARITY
1066 */
1067
1068 injectmask = (pvt->inject.type & 1) |
1069 (pvt->inject.section & 0x3) << 1 |
1070 (pvt->inject.type & 0x6) << (3 - 1);
1071
1072 /* Unlock writes to registers - this register is write only */
f4742949 1073 pci_write_config_dword(pvt->pci_noncore,
67166af4 1074 MC_CFG_CONTROL, 0x2);
e9bd2e73 1075
f4742949 1076 write_and_test(pvt->pci_ch[pvt->inject.channel][0],
194a40fe 1077 MC_CHANNEL_ADDR_MATCH, mask);
f4742949 1078 write_and_test(pvt->pci_ch[pvt->inject.channel][0],
7b029d03 1079 MC_CHANNEL_ADDR_MATCH + 4, mask >> 32L);
7b029d03 1080
f4742949 1081 write_and_test(pvt->pci_ch[pvt->inject.channel][0],
194a40fe
MCC
1082 MC_CHANNEL_ERROR_MASK, pvt->inject.eccmask);
1083
f4742949 1084 write_and_test(pvt->pci_ch[pvt->inject.channel][0],
4157d9f5 1085 MC_CHANNEL_ERROR_INJECT, injectmask);
276b824c 1086
194a40fe 1087 /*
276b824c
MCC
1088 * This is something undocumented, based on my tests
1089 * Without writing 8 to this register, errors aren't injected. Not sure
1090 * why.
194a40fe 1091 */
f4742949 1092 pci_write_config_dword(pvt->pci_noncore,
276b824c 1093 MC_CFG_CONTROL, 8);
194a40fe 1094
41fcb7fe
MCC
1095 debugf0("Error inject addr match 0x%016llx, ecc 0x%08x,"
1096 " inject 0x%08x\n",
194a40fe
MCC
1097 mask, pvt->inject.eccmask, injectmask);
1098
7b029d03 1099
194a40fe
MCC
1100 return count;
1101}
1102
1103static ssize_t i7core_inject_enable_show(struct mem_ctl_info *mci,
1104 char *data)
1105{
1106 struct i7core_pvt *pvt = mci->pvt_info;
7b029d03
MCC
1107 u32 injectmask;
1108
52a2e4fc
MCC
1109 if (!pvt->pci_ch[pvt->inject.channel][0])
1110 return 0;
1111
f4742949 1112 pci_read_config_dword(pvt->pci_ch[pvt->inject.channel][0],
4157d9f5 1113 MC_CHANNEL_ERROR_INJECT, &injectmask);
7b029d03
MCC
1114
1115 debugf0("Inject error read: 0x%018x\n", injectmask);
1116
1117 if (injectmask & 0x0c)
1118 pvt->inject.enable = 1;
1119
194a40fe
MCC
1120 return sprintf(data, "%d\n", pvt->inject.enable);
1121}
1122
f338d736
MCC
1123#define DECLARE_COUNTER(param) \
1124static ssize_t i7core_show_counter_##param( \
1125 struct mem_ctl_info *mci, \
1126 char *data) \
1127{ \
1128 struct i7core_pvt *pvt = mci->pvt_info; \
1129 \
1130 debugf1("%s() \n", __func__); \
1131 if (!pvt->ce_count_available || (pvt->is_registered)) \
1132 return sprintf(data, "data unavailable\n"); \
1133 return sprintf(data, "%lu\n", \
1134 pvt->udimm_ce_count[param]); \
1135}
442305b1 1136
f338d736
MCC
1137#define ATTR_COUNTER(param) \
1138 { \
1139 .attr = { \
1140 .name = __stringify(udimm##param), \
1141 .mode = (S_IRUGO | S_IWUSR) \
1142 }, \
1143 .show = i7core_show_counter_##param \
d88b8507 1144 }
442305b1 1145
f338d736
MCC
1146DECLARE_COUNTER(0);
1147DECLARE_COUNTER(1);
1148DECLARE_COUNTER(2);
442305b1 1149
194a40fe
MCC
1150/*
1151 * Sysfs struct
1152 */
a5538e53 1153
1288c18f 1154static const struct mcidev_sysfs_attribute i7core_addrmatch_attrs[] = {
a5538e53
MCC
1155 ATTR_ADDR_MATCH(channel),
1156 ATTR_ADDR_MATCH(dimm),
1157 ATTR_ADDR_MATCH(rank),
1158 ATTR_ADDR_MATCH(bank),
1159 ATTR_ADDR_MATCH(page),
1160 ATTR_ADDR_MATCH(col),
1288c18f 1161 { } /* End of list */
a5538e53
MCC
1162};
1163
1288c18f 1164static const struct mcidev_sysfs_group i7core_inject_addrmatch = {
a5538e53
MCC
1165 .name = "inject_addrmatch",
1166 .mcidev_attr = i7core_addrmatch_attrs,
1167};
1168
1288c18f 1169static const struct mcidev_sysfs_attribute i7core_udimm_counters_attrs[] = {
f338d736
MCC
1170 ATTR_COUNTER(0),
1171 ATTR_COUNTER(1),
1172 ATTR_COUNTER(2),
64aab720 1173 { .attr = { .name = NULL } }
f338d736
MCC
1174};
1175
1288c18f 1176static const struct mcidev_sysfs_group i7core_udimm_counters = {
f338d736
MCC
1177 .name = "all_channel_counts",
1178 .mcidev_attr = i7core_udimm_counters_attrs,
1179};
1180
1288c18f 1181static const struct mcidev_sysfs_attribute i7core_sysfs_rdimm_attrs[] = {
194a40fe
MCC
1182 {
1183 .attr = {
1184 .name = "inject_section",
1185 .mode = (S_IRUGO | S_IWUSR)
1186 },
1187 .show = i7core_inject_section_show,
1188 .store = i7core_inject_section_store,
1189 }, {
1190 .attr = {
1191 .name = "inject_type",
1192 .mode = (S_IRUGO | S_IWUSR)
1193 },
1194 .show = i7core_inject_type_show,
1195 .store = i7core_inject_type_store,
1196 }, {
1197 .attr = {
1198 .name = "inject_eccmask",
1199 .mode = (S_IRUGO | S_IWUSR)
1200 },
1201 .show = i7core_inject_eccmask_show,
1202 .store = i7core_inject_eccmask_store,
1203 }, {
a5538e53 1204 .grp = &i7core_inject_addrmatch,
194a40fe
MCC
1205 }, {
1206 .attr = {
1207 .name = "inject_enable",
1208 .mode = (S_IRUGO | S_IWUSR)
1209 },
1210 .show = i7core_inject_enable_show,
1211 .store = i7core_inject_enable_store,
1212 },
1288c18f
MCC
1213 { } /* End of list */
1214};
1215
1216static const struct mcidev_sysfs_attribute i7core_sysfs_udimm_attrs[] = {
1217 {
1218 .attr = {
1219 .name = "inject_section",
1220 .mode = (S_IRUGO | S_IWUSR)
1221 },
1222 .show = i7core_inject_section_show,
1223 .store = i7core_inject_section_store,
1224 }, {
1225 .attr = {
1226 .name = "inject_type",
1227 .mode = (S_IRUGO | S_IWUSR)
1228 },
1229 .show = i7core_inject_type_show,
1230 .store = i7core_inject_type_store,
1231 }, {
1232 .attr = {
1233 .name = "inject_eccmask",
1234 .mode = (S_IRUGO | S_IWUSR)
1235 },
1236 .show = i7core_inject_eccmask_show,
1237 .store = i7core_inject_eccmask_store,
1238 }, {
1239 .grp = &i7core_inject_addrmatch,
1240 }, {
1241 .attr = {
1242 .name = "inject_enable",
1243 .mode = (S_IRUGO | S_IWUSR)
1244 },
1245 .show = i7core_inject_enable_show,
1246 .store = i7core_inject_enable_store,
1247 }, {
1248 .grp = &i7core_udimm_counters,
1249 },
1250 { } /* End of list */
194a40fe
MCC
1251};
1252
a0c36a1f
MCC
1253/****************************************************************************
1254 Device initialization routines: put/get, init/exit
1255 ****************************************************************************/
1256
1257/*
64c10f6e 1258 * i7core_put_all_devices 'put' all the devices that we have
a0c36a1f
MCC
1259 * reserved via 'get'
1260 */
13d6e9b6 1261static void i7core_put_devices(struct i7core_dev *i7core_dev)
a0c36a1f 1262{
13d6e9b6 1263 int i;
a0c36a1f 1264
22e6bcbd 1265 debugf0(__FILE__ ": %s()\n", __func__);
de06eeef 1266 for (i = 0; i < i7core_dev->n_devs; i++) {
22e6bcbd
MCC
1267 struct pci_dev *pdev = i7core_dev->pdev[i];
1268 if (!pdev)
1269 continue;
1270 debugf0("Removing dev %02x:%02x.%d\n",
1271 pdev->bus->number,
1272 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1273 pci_dev_put(pdev);
1274 }
13d6e9b6 1275}
66607706 1276
13d6e9b6
MCC
1277static void i7core_put_all_devices(void)
1278{
42538680 1279 struct i7core_dev *i7core_dev, *tmp;
13d6e9b6 1280
39300e71 1281 list_for_each_entry_safe(i7core_dev, tmp, &i7core_edac_list, list) {
13d6e9b6 1282 i7core_put_devices(i7core_dev);
2aa9be44 1283 free_i7core_dev(i7core_dev);
39300e71 1284 }
a0c36a1f
MCC
1285}
1286
1288c18f 1287static void __init i7core_xeon_pci_fixup(const struct pci_id_table *table)
bc2d7245
KM
1288{
1289 struct pci_dev *pdev = NULL;
1290 int i;
54a08ab1 1291
bc2d7245
KM
1292 /*
1293 * On Xeon 55xx, the Intel Quckpath Arch Generic Non-core pci buses
1294 * aren't announced by acpi. So, we need to use a legacy scan probing
1295 * to detect them
1296 */
bd9e19ca
VM
1297 while (table && table->descr) {
1298 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, table->descr[0].dev_id, NULL);
1299 if (unlikely(!pdev)) {
1300 for (i = 0; i < MAX_SOCKET_BUSES; i++)
1301 pcibios_scan_specific_bus(255-i);
1302 }
bda14289 1303 pci_dev_put(pdev);
bd9e19ca 1304 table++;
bc2d7245
KM
1305 }
1306}
1307
bda14289
MCC
1308static unsigned i7core_pci_lastbus(void)
1309{
1310 int last_bus = 0, bus;
1311 struct pci_bus *b = NULL;
1312
1313 while ((b = pci_find_next_bus(b)) != NULL) {
1314 bus = b->number;
1315 debugf0("Found bus %d\n", bus);
1316 if (bus > last_bus)
1317 last_bus = bus;
1318 }
1319
1320 debugf0("Last bus %d\n", last_bus);
1321
1322 return last_bus;
1323}
1324
a0c36a1f 1325/*
64c10f6e 1326 * i7core_get_all_devices Find and perform 'get' operation on the MCH's
a0c36a1f
MCC
1327 * device/functions we want to reference for this driver
1328 *
1329 * Need to 'get' device 16 func 1 and func 2
1330 */
b197cba0
HS
1331static int i7core_get_onedevice(struct pci_dev **prev,
1332 const struct pci_id_table *table,
1333 const unsigned devno,
1334 const unsigned last_bus)
a0c36a1f 1335{
66607706 1336 struct i7core_dev *i7core_dev;
b197cba0 1337 const struct pci_id_descr *dev_descr = &table->descr[devno];
66607706 1338
8f331907 1339 struct pci_dev *pdev = NULL;
67166af4
MCC
1340 u8 bus = 0;
1341 u8 socket = 0;
a0c36a1f 1342
c77720b9 1343 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
de06eeef 1344 dev_descr->dev_id, *prev);
c77720b9 1345
c77720b9
MCC
1346 /*
1347 * On Xeon 55xx, the Intel Quckpath Arch Generic Non-core regs
1348 * is at addr 8086:2c40, instead of 8086:2c41. So, we need
1349 * to probe for the alternate address in case of failure
1350 */
de06eeef 1351 if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_I7_NONCORE && !pdev)
c77720b9 1352 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
fd382654 1353 PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT, *prev);
d1fd4fb6 1354
bd9e19ca 1355 if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE && !pdev)
f05da2f7
MCC
1356 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
1357 PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT,
1358 *prev);
1359
c77720b9
MCC
1360 if (!pdev) {
1361 if (*prev) {
1362 *prev = pdev;
1363 return 0;
d1fd4fb6
MCC
1364 }
1365
de06eeef 1366 if (dev_descr->optional)
c77720b9 1367 return 0;
310cbb72 1368
bd9e19ca
VM
1369 if (devno == 0)
1370 return -ENODEV;
1371
ab089374 1372 i7core_printk(KERN_INFO,
c77720b9 1373 "Device not found: dev %02x.%d PCI ID %04x:%04x\n",
de06eeef
MCC
1374 dev_descr->dev, dev_descr->func,
1375 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
67166af4 1376
c77720b9
MCC
1377 /* End of list, leave */
1378 return -ENODEV;
1379 }
1380 bus = pdev->bus->number;
67166af4 1381
bda14289 1382 socket = last_bus - bus;
c77720b9 1383
66607706
MCC
1384 i7core_dev = get_i7core_dev(socket);
1385 if (!i7core_dev) {
848b2f7e 1386 i7core_dev = alloc_i7core_dev(socket, table);
2896637b
HS
1387 if (!i7core_dev) {
1388 pci_dev_put(pdev);
66607706 1389 return -ENOMEM;
2896637b 1390 }
c77720b9 1391 }
67166af4 1392
66607706 1393 if (i7core_dev->pdev[devno]) {
c77720b9
MCC
1394 i7core_printk(KERN_ERR,
1395 "Duplicated device for "
1396 "dev %02x:%02x.%d PCI ID %04x:%04x\n",
de06eeef
MCC
1397 bus, dev_descr->dev, dev_descr->func,
1398 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
c77720b9
MCC
1399 pci_dev_put(pdev);
1400 return -ENODEV;
1401 }
67166af4 1402
66607706 1403 i7core_dev->pdev[devno] = pdev;
c77720b9
MCC
1404
1405 /* Sanity check */
de06eeef
MCC
1406 if (unlikely(PCI_SLOT(pdev->devfn) != dev_descr->dev ||
1407 PCI_FUNC(pdev->devfn) != dev_descr->func)) {
c77720b9
MCC
1408 i7core_printk(KERN_ERR,
1409 "Device PCI ID %04x:%04x "
1410 "has dev %02x:%02x.%d instead of dev %02x:%02x.%d\n",
de06eeef 1411 PCI_VENDOR_ID_INTEL, dev_descr->dev_id,
c77720b9 1412 bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
de06eeef 1413 bus, dev_descr->dev, dev_descr->func);
c77720b9
MCC
1414 return -ENODEV;
1415 }
ef708b53 1416
c77720b9
MCC
1417 /* Be sure that the device is enabled */
1418 if (unlikely(pci_enable_device(pdev) < 0)) {
1419 i7core_printk(KERN_ERR,
1420 "Couldn't enable "
1421 "dev %02x:%02x.%d PCI ID %04x:%04x\n",
de06eeef
MCC
1422 bus, dev_descr->dev, dev_descr->func,
1423 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
c77720b9
MCC
1424 return -ENODEV;
1425 }
ef708b53 1426
d4c27795 1427 debugf0("Detected socket %d dev %02x:%02x.%d PCI ID %04x:%04x\n",
de06eeef
MCC
1428 socket, bus, dev_descr->dev,
1429 dev_descr->func,
1430 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
8f331907 1431
c77720b9 1432 *prev = pdev;
ef708b53 1433
c77720b9
MCC
1434 return 0;
1435}
a0c36a1f 1436
64c10f6e 1437static int i7core_get_all_devices(void)
c77720b9 1438{
64c10f6e 1439 int i, j, rc, last_bus;
c77720b9 1440 struct pci_dev *pdev = NULL;
64c10f6e 1441 const struct pci_id_table *table;
bd9e19ca 1442
bda14289
MCC
1443 last_bus = i7core_pci_lastbus();
1444
64c10f6e
HS
1445 for (j = 0; j < ARRAY_SIZE(pci_dev_table); j++) {
1446 table = &pci_dev_table[j];
bd9e19ca
VM
1447 for (i = 0; i < table->n_devs; i++) {
1448 pdev = NULL;
1449 do {
b197cba0 1450 rc = i7core_get_onedevice(&pdev, table, i,
bda14289 1451 last_bus);
bd9e19ca
VM
1452 if (rc < 0) {
1453 if (i == 0) {
1454 i = table->n_devs;
1455 break;
1456 }
1457 i7core_put_all_devices();
1458 return -ENODEV;
1459 }
1460 } while (pdev);
1461 }
c77720b9 1462 }
66607706 1463
ef708b53 1464 return 0;
ef708b53
MCC
1465}
1466
f4742949
MCC
1467static int mci_bind_devs(struct mem_ctl_info *mci,
1468 struct i7core_dev *i7core_dev)
ef708b53
MCC
1469{
1470 struct i7core_pvt *pvt = mci->pvt_info;
1471 struct pci_dev *pdev;
f4742949 1472 int i, func, slot;
ef708b53 1473
f4742949
MCC
1474 /* Associates i7core_dev and mci for future usage */
1475 pvt->i7core_dev = i7core_dev;
1476 i7core_dev->mci = mci;
66607706 1477
f4742949 1478 pvt->is_registered = 0;
de06eeef 1479 for (i = 0; i < i7core_dev->n_devs; i++) {
f4742949
MCC
1480 pdev = i7core_dev->pdev[i];
1481 if (!pdev)
66607706
MCC
1482 continue;
1483
f4742949
MCC
1484 func = PCI_FUNC(pdev->devfn);
1485 slot = PCI_SLOT(pdev->devfn);
1486 if (slot == 3) {
1487 if (unlikely(func > MAX_MCR_FUNC))
1488 goto error;
1489 pvt->pci_mcr[func] = pdev;
1490 } else if (likely(slot >= 4 && slot < 4 + NUM_CHANS)) {
1491 if (unlikely(func > MAX_CHAN_FUNC))
ef708b53 1492 goto error;
f4742949
MCC
1493 pvt->pci_ch[slot - 4][func] = pdev;
1494 } else if (!slot && !func)
1495 pvt->pci_noncore = pdev;
1496 else
1497 goto error;
ef708b53 1498
f4742949
MCC
1499 debugf0("Associated fn %d.%d, dev = %p, socket %d\n",
1500 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
1501 pdev, i7core_dev->socket);
14d2c083 1502
f4742949
MCC
1503 if (PCI_SLOT(pdev->devfn) == 3 &&
1504 PCI_FUNC(pdev->devfn) == 2)
1505 pvt->is_registered = 1;
a0c36a1f 1506 }
e9bd2e73 1507
a0c36a1f 1508 return 0;
ef708b53
MCC
1509
1510error:
1511 i7core_printk(KERN_ERR, "Device %d, function %d "
1512 "is out of the expected range\n",
1513 slot, func);
1514 return -EINVAL;
a0c36a1f
MCC
1515}
1516
442305b1
MCC
1517/****************************************************************************
1518 Error check routines
1519 ****************************************************************************/
f4742949 1520static void i7core_rdimm_update_csrow(struct mem_ctl_info *mci,
1288c18f
MCC
1521 const int chan,
1522 const int dimm,
1523 const int add)
b4e8f0b6
MCC
1524{
1525 char *msg;
1526 struct i7core_pvt *pvt = mci->pvt_info;
f4742949 1527 int row = pvt->csrow_map[chan][dimm], i;
b4e8f0b6
MCC
1528
1529 for (i = 0; i < add; i++) {
1530 msg = kasprintf(GFP_KERNEL, "Corrected error "
f4742949
MCC
1531 "(Socket=%d channel=%d dimm=%d)",
1532 pvt->i7core_dev->socket, chan, dimm);
b4e8f0b6
MCC
1533
1534 edac_mc_handle_fbd_ce(mci, row, 0, msg);
1535 kfree (msg);
1536 }
1537}
1538
1539static void i7core_rdimm_update_ce_count(struct mem_ctl_info *mci,
1288c18f
MCC
1540 const int chan,
1541 const int new0,
1542 const int new1,
1543 const int new2)
b4e8f0b6
MCC
1544{
1545 struct i7core_pvt *pvt = mci->pvt_info;
1546 int add0 = 0, add1 = 0, add2 = 0;
1547 /* Updates CE counters if it is not the first time here */
f4742949 1548 if (pvt->ce_count_available) {
b4e8f0b6
MCC
1549 /* Updates CE counters */
1550
f4742949
MCC
1551 add2 = new2 - pvt->rdimm_last_ce_count[chan][2];
1552 add1 = new1 - pvt->rdimm_last_ce_count[chan][1];
1553 add0 = new0 - pvt->rdimm_last_ce_count[chan][0];
b4e8f0b6
MCC
1554
1555 if (add2 < 0)
1556 add2 += 0x7fff;
f4742949 1557 pvt->rdimm_ce_count[chan][2] += add2;
b4e8f0b6
MCC
1558
1559 if (add1 < 0)
1560 add1 += 0x7fff;
f4742949 1561 pvt->rdimm_ce_count[chan][1] += add1;
b4e8f0b6
MCC
1562
1563 if (add0 < 0)
1564 add0 += 0x7fff;
f4742949 1565 pvt->rdimm_ce_count[chan][0] += add0;
b4e8f0b6 1566 } else
f4742949 1567 pvt->ce_count_available = 1;
b4e8f0b6
MCC
1568
1569 /* Store the new values */
f4742949
MCC
1570 pvt->rdimm_last_ce_count[chan][2] = new2;
1571 pvt->rdimm_last_ce_count[chan][1] = new1;
1572 pvt->rdimm_last_ce_count[chan][0] = new0;
b4e8f0b6
MCC
1573
1574 /*updated the edac core */
1575 if (add0 != 0)
f4742949 1576 i7core_rdimm_update_csrow(mci, chan, 0, add0);
b4e8f0b6 1577 if (add1 != 0)
f4742949 1578 i7core_rdimm_update_csrow(mci, chan, 1, add1);
b4e8f0b6 1579 if (add2 != 0)
f4742949 1580 i7core_rdimm_update_csrow(mci, chan, 2, add2);
b4e8f0b6
MCC
1581
1582}
1583
f4742949 1584static void i7core_rdimm_check_mc_ecc_err(struct mem_ctl_info *mci)
b4e8f0b6
MCC
1585{
1586 struct i7core_pvt *pvt = mci->pvt_info;
1587 u32 rcv[3][2];
1588 int i, new0, new1, new2;
1589
1590 /*Read DEV 3: FUN 2: MC_COR_ECC_CNT regs directly*/
f4742949 1591 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_0,
b4e8f0b6 1592 &rcv[0][0]);
f4742949 1593 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_1,
b4e8f0b6 1594 &rcv[0][1]);
f4742949 1595 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_2,
b4e8f0b6 1596 &rcv[1][0]);
f4742949 1597 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_3,
b4e8f0b6 1598 &rcv[1][1]);
f4742949 1599 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_4,
b4e8f0b6 1600 &rcv[2][0]);
f4742949 1601 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_5,
b4e8f0b6
MCC
1602 &rcv[2][1]);
1603 for (i = 0 ; i < 3; i++) {
1604 debugf3("MC_COR_ECC_CNT%d = 0x%x; MC_COR_ECC_CNT%d = 0x%x\n",
1605 (i * 2), rcv[i][0], (i * 2) + 1, rcv[i][1]);
1606 /*if the channel has 3 dimms*/
f4742949 1607 if (pvt->channel[i].dimms > 2) {
b4e8f0b6
MCC
1608 new0 = DIMM_BOT_COR_ERR(rcv[i][0]);
1609 new1 = DIMM_TOP_COR_ERR(rcv[i][0]);
1610 new2 = DIMM_BOT_COR_ERR(rcv[i][1]);
1611 } else {
1612 new0 = DIMM_TOP_COR_ERR(rcv[i][0]) +
1613 DIMM_BOT_COR_ERR(rcv[i][0]);
1614 new1 = DIMM_TOP_COR_ERR(rcv[i][1]) +
1615 DIMM_BOT_COR_ERR(rcv[i][1]);
1616 new2 = 0;
1617 }
1618
f4742949 1619 i7core_rdimm_update_ce_count(mci, i, new0, new1, new2);
b4e8f0b6
MCC
1620 }
1621}
442305b1
MCC
1622
1623/* This function is based on the device 3 function 4 registers as described on:
1624 * Intel Xeon Processor 5500 Series Datasheet Volume 2
1625 * http://www.intel.com/Assets/PDF/datasheet/321322.pdf
1626 * also available at:
1627 * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
1628 */
f4742949 1629static void i7core_udimm_check_mc_ecc_err(struct mem_ctl_info *mci)
442305b1
MCC
1630{
1631 struct i7core_pvt *pvt = mci->pvt_info;
1632 u32 rcv1, rcv0;
1633 int new0, new1, new2;
1634
f4742949 1635 if (!pvt->pci_mcr[4]) {
b990538a 1636 debugf0("%s MCR registers not found\n", __func__);
442305b1
MCC
1637 return;
1638 }
1639
b4e8f0b6 1640 /* Corrected test errors */
f4742949
MCC
1641 pci_read_config_dword(pvt->pci_mcr[4], MC_TEST_ERR_RCV1, &rcv1);
1642 pci_read_config_dword(pvt->pci_mcr[4], MC_TEST_ERR_RCV0, &rcv0);
442305b1
MCC
1643
1644 /* Store the new values */
1645 new2 = DIMM2_COR_ERR(rcv1);
1646 new1 = DIMM1_COR_ERR(rcv0);
1647 new0 = DIMM0_COR_ERR(rcv0);
1648
442305b1 1649 /* Updates CE counters if it is not the first time here */
f4742949 1650 if (pvt->ce_count_available) {
442305b1
MCC
1651 /* Updates CE counters */
1652 int add0, add1, add2;
1653
f4742949
MCC
1654 add2 = new2 - pvt->udimm_last_ce_count[2];
1655 add1 = new1 - pvt->udimm_last_ce_count[1];
1656 add0 = new0 - pvt->udimm_last_ce_count[0];
442305b1
MCC
1657
1658 if (add2 < 0)
1659 add2 += 0x7fff;
f4742949 1660 pvt->udimm_ce_count[2] += add2;
442305b1
MCC
1661
1662 if (add1 < 0)
1663 add1 += 0x7fff;
f4742949 1664 pvt->udimm_ce_count[1] += add1;
442305b1
MCC
1665
1666 if (add0 < 0)
1667 add0 += 0x7fff;
f4742949 1668 pvt->udimm_ce_count[0] += add0;
b4e8f0b6
MCC
1669
1670 if (add0 | add1 | add2)
1671 i7core_printk(KERN_ERR, "New Corrected error(s): "
1672 "dimm0: +%d, dimm1: +%d, dimm2 +%d\n",
1673 add0, add1, add2);
442305b1 1674 } else
f4742949 1675 pvt->ce_count_available = 1;
442305b1
MCC
1676
1677 /* Store the new values */
f4742949
MCC
1678 pvt->udimm_last_ce_count[2] = new2;
1679 pvt->udimm_last_ce_count[1] = new1;
1680 pvt->udimm_last_ce_count[0] = new0;
442305b1
MCC
1681}
1682
8a2f118e
MCC
1683/*
1684 * According with tables E-11 and E-12 of chapter E.3.3 of Intel 64 and IA-32
1685 * Architectures Software Developer’s Manual Volume 3B.
f237fcf2
MCC
1686 * Nehalem are defined as family 0x06, model 0x1a
1687 *
1688 * The MCA registers used here are the following ones:
8a2f118e 1689 * struct mce field MCA Register
f237fcf2
MCC
1690 * m->status MSR_IA32_MC8_STATUS
1691 * m->addr MSR_IA32_MC8_ADDR
1692 * m->misc MSR_IA32_MC8_MISC
8a2f118e
MCC
1693 * In the case of Nehalem, the error information is masked at .status and .misc
1694 * fields
1695 */
d5381642 1696static void i7core_mce_output_error(struct mem_ctl_info *mci,
1288c18f 1697 const struct mce *m)
d5381642 1698{
b4e8f0b6 1699 struct i7core_pvt *pvt = mci->pvt_info;
a639539f 1700 char *type, *optype, *err, *msg;
8a2f118e 1701 unsigned long error = m->status & 0x1ff0000l;
a639539f 1702 u32 optypenum = (m->status >> 4) & 0x07;
8a2f118e
MCC
1703 u32 core_err_cnt = (m->status >> 38) && 0x7fff;
1704 u32 dimm = (m->misc >> 16) & 0x3;
1705 u32 channel = (m->misc >> 18) & 0x3;
1706 u32 syndrome = m->misc >> 32;
1707 u32 errnum = find_first_bit(&error, 32);
b4e8f0b6 1708 int csrow;
8a2f118e 1709
c5d34528
MCC
1710 if (m->mcgstatus & 1)
1711 type = "FATAL";
1712 else
1713 type = "NON_FATAL";
1714
a639539f 1715 switch (optypenum) {
b990538a
MCC
1716 case 0:
1717 optype = "generic undef request";
1718 break;
1719 case 1:
1720 optype = "read error";
1721 break;
1722 case 2:
1723 optype = "write error";
1724 break;
1725 case 3:
1726 optype = "addr/cmd error";
1727 break;
1728 case 4:
1729 optype = "scrubbing error";
1730 break;
1731 default:
1732 optype = "reserved";
1733 break;
a639539f
MCC
1734 }
1735
8a2f118e
MCC
1736 switch (errnum) {
1737 case 16:
1738 err = "read ECC error";
1739 break;
1740 case 17:
1741 err = "RAS ECC error";
1742 break;
1743 case 18:
1744 err = "write parity error";
1745 break;
1746 case 19:
1747 err = "redundacy loss";
1748 break;
1749 case 20:
1750 err = "reserved";
1751 break;
1752 case 21:
1753 err = "memory range error";
1754 break;
1755 case 22:
1756 err = "RTID out of range";
1757 break;
1758 case 23:
1759 err = "address parity error";
1760 break;
1761 case 24:
1762 err = "byte enable parity error";
1763 break;
1764 default:
1765 err = "unknown";
d5381642 1766 }
d5381642 1767
f237fcf2 1768 /* FIXME: should convert addr into bank and rank information */
8a2f118e 1769 msg = kasprintf(GFP_ATOMIC,
f4742949 1770 "%s (addr = 0x%08llx, cpu=%d, Dimm=%d, Channel=%d, "
a639539f 1771 "syndrome=0x%08x, count=%d, Err=%08llx:%08llx (%s: %s))\n",
f4742949 1772 type, (long long) m->addr, m->cpu, dimm, channel,
a639539f
MCC
1773 syndrome, core_err_cnt, (long long)m->status,
1774 (long long)m->misc, optype, err);
8a2f118e
MCC
1775
1776 debugf0("%s", msg);
d5381642 1777
f4742949 1778 csrow = pvt->csrow_map[channel][dimm];
b4e8f0b6 1779
d5381642 1780 /* Call the helper to output message */
b4e8f0b6
MCC
1781 if (m->mcgstatus & 1)
1782 edac_mc_handle_fbd_ue(mci, csrow, 0,
1783 0 /* FIXME: should be channel here */, msg);
f4742949 1784 else if (!pvt->is_registered)
b4e8f0b6
MCC
1785 edac_mc_handle_fbd_ce(mci, csrow,
1786 0 /* FIXME: should be channel here */, msg);
8a2f118e
MCC
1787
1788 kfree(msg);
d5381642
MCC
1789}
1790
87d1d272
MCC
1791/*
1792 * i7core_check_error Retrieve and process errors reported by the
1793 * hardware. Called by the Core module.
1794 */
1795static void i7core_check_error(struct mem_ctl_info *mci)
1796{
d5381642
MCC
1797 struct i7core_pvt *pvt = mci->pvt_info;
1798 int i;
1799 unsigned count = 0;
ca9c90ba 1800 struct mce *m;
d5381642 1801
ca9c90ba
MCC
1802 /*
1803 * MCE first step: Copy all mce errors into a temporary buffer
1804 * We use a double buffering here, to reduce the risk of
1805 * loosing an error.
1806 */
1807 smp_rmb();
321ece4d
MCC
1808 count = (pvt->mce_out + MCE_LOG_LEN - pvt->mce_in)
1809 % MCE_LOG_LEN;
ca9c90ba 1810 if (!count)
8a311e17 1811 goto check_ce_error;
f4742949 1812
ca9c90ba 1813 m = pvt->mce_outentry;
321ece4d
MCC
1814 if (pvt->mce_in + count > MCE_LOG_LEN) {
1815 unsigned l = MCE_LOG_LEN - pvt->mce_in;
f4742949 1816
ca9c90ba
MCC
1817 memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * l);
1818 smp_wmb();
1819 pvt->mce_in = 0;
1820 count -= l;
1821 m += l;
1822 }
1823 memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * count);
1824 smp_wmb();
1825 pvt->mce_in += count;
1826
1827 smp_rmb();
1828 if (pvt->mce_overrun) {
1829 i7core_printk(KERN_ERR, "Lost %d memory errors\n",
1830 pvt->mce_overrun);
1831 smp_wmb();
1832 pvt->mce_overrun = 0;
1833 }
d5381642 1834
ca9c90ba
MCC
1835 /*
1836 * MCE second step: parse errors and display
1837 */
d5381642 1838 for (i = 0; i < count; i++)
ca9c90ba 1839 i7core_mce_output_error(mci, &pvt->mce_outentry[i]);
d5381642 1840
ca9c90ba
MCC
1841 /*
1842 * Now, let's increment CE error counts
1843 */
8a311e17 1844check_ce_error:
f4742949
MCC
1845 if (!pvt->is_registered)
1846 i7core_udimm_check_mc_ecc_err(mci);
1847 else
1848 i7core_rdimm_check_mc_ecc_err(mci);
87d1d272
MCC
1849}
1850
d5381642
MCC
1851/*
1852 * i7core_mce_check_error Replicates mcelog routine to get errors
1853 * This routine simply queues mcelog errors, and
1854 * return. The error itself should be handled later
1855 * by i7core_check_error.
6e103be1
MCC
1856 * WARNING: As this routine should be called at NMI time, extra care should
1857 * be taken to avoid deadlocks, and to be as fast as possible.
d5381642
MCC
1858 */
1859static int i7core_mce_check_error(void *priv, struct mce *mce)
1860{
c5d34528
MCC
1861 struct mem_ctl_info *mci = priv;
1862 struct i7core_pvt *pvt = mci->pvt_info;
d5381642 1863
8a2f118e
MCC
1864 /*
1865 * Just let mcelog handle it if the error is
1866 * outside the memory controller
1867 */
1868 if (((mce->status & 0xffff) >> 7) != 1)
1869 return 0;
1870
f237fcf2
MCC
1871 /* Bank 8 registers are the only ones that we know how to handle */
1872 if (mce->bank != 8)
1873 return 0;
1874
3b918c12 1875#ifdef CONFIG_SMP
f4742949 1876 /* Only handle if it is the right mc controller */
6e103be1 1877 if (cpu_data(mce->cpu).phys_proc_id != pvt->i7core_dev->socket)
f4742949 1878 return 0;
3b918c12 1879#endif
f4742949 1880
ca9c90ba 1881 smp_rmb();
321ece4d 1882 if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) {
ca9c90ba
MCC
1883 smp_wmb();
1884 pvt->mce_overrun++;
1885 return 0;
d5381642 1886 }
6e103be1
MCC
1887
1888 /* Copy memory error at the ringbuffer */
1889 memcpy(&pvt->mce_entry[pvt->mce_out], mce, sizeof(*mce));
ca9c90ba 1890 smp_wmb();
321ece4d 1891 pvt->mce_out = (pvt->mce_out + 1) % MCE_LOG_LEN;
d5381642 1892
c5d34528
MCC
1893 /* Handle fatal errors immediately */
1894 if (mce->mcgstatus & 1)
1895 i7core_check_error(mci);
1896
d5381642 1897 /* Advice mcelog that the error were handled */
8a2f118e 1898 return 1;
d5381642
MCC
1899}
1900
a3aa0a4a
HS
1901static void i7core_pci_ctl_create(struct i7core_pvt *pvt)
1902{
1903 pvt->i7core_pci = edac_pci_create_generic_ctl(
1904 &pvt->i7core_dev->pdev[0]->dev,
1905 EDAC_MOD_STR);
1906 if (unlikely(!pvt->i7core_pci))
1907 pr_warn("Unable to setup PCI error report via EDAC\n");
1908}
1909
1910static void i7core_pci_ctl_release(struct i7core_pvt *pvt)
1911{
1912 if (likely(pvt->i7core_pci))
1913 edac_pci_release_generic_ctl(pvt->i7core_pci);
1914 else
1915 i7core_printk(KERN_ERR,
1916 "Couldn't find mem_ctl_info for socket %d\n",
1917 pvt->i7core_dev->socket);
1918 pvt->i7core_pci = NULL;
1919}
1920
f4742949 1921static int i7core_register_mci(struct i7core_dev *i7core_dev,
1288c18f 1922 const int num_channels, const int num_csrows)
a0c36a1f
MCC
1923{
1924 struct mem_ctl_info *mci;
1925 struct i7core_pvt *pvt;
ba6c5c62 1926 int csrow = 0;
f4742949 1927 int rc;
a0c36a1f 1928
a0c36a1f 1929 /* allocate a new MC control structure */
d4c27795
MCC
1930 mci = edac_mc_alloc(sizeof(*pvt), num_csrows, num_channels,
1931 i7core_dev->socket);
f4742949
MCC
1932 if (unlikely(!mci))
1933 return -ENOMEM;
a0c36a1f 1934
3cfd0146
MCC
1935 debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n",
1936 __func__, mci, &i7core_dev->pdev[0]->dev);
a0c36a1f 1937
a0c36a1f 1938 pvt = mci->pvt_info;
ef708b53 1939 memset(pvt, 0, sizeof(*pvt));
67166af4 1940
41fcb7fe
MCC
1941 /*
1942 * FIXME: how to handle RDDR3 at MCI level? It is possible to have
1943 * Mixed RDDR3/UDDR3 with Nehalem, provided that they are on different
1944 * memory channels
1945 */
1946 mci->mtype_cap = MEM_FLAG_DDR3;
a0c36a1f
MCC
1947 mci->edac_ctl_cap = EDAC_FLAG_NONE;
1948 mci->edac_cap = EDAC_FLAG_NONE;
1949 mci->mod_name = "i7core_edac.c";
1950 mci->mod_ver = I7CORE_REVISION;
f4742949
MCC
1951 mci->ctl_name = kasprintf(GFP_KERNEL, "i7 core #%d",
1952 i7core_dev->socket);
1953 mci->dev_name = pci_name(i7core_dev->pdev[0]);
a0c36a1f 1954 mci->ctl_page_to_phys = NULL;
1288c18f 1955
ef708b53 1956 /* Store pci devices at mci for faster access */
f4742949 1957 rc = mci_bind_devs(mci, i7core_dev);
41fcb7fe 1958 if (unlikely(rc < 0))
628c5ddf 1959 goto fail0;
ef708b53 1960
5939813b
HS
1961 if (pvt->is_registered)
1962 mci->mc_driver_sysfs_attributes = i7core_sysfs_rdimm_attrs;
1963 else
1964 mci->mc_driver_sysfs_attributes = i7core_sysfs_udimm_attrs;
1965
ef708b53 1966 /* Get dimm basic config */
f4742949 1967 get_dimm_config(mci, &csrow);
5939813b
HS
1968 /* record ptr to the generic device */
1969 mci->dev = &i7core_dev->pdev[0]->dev;
1970 /* Set the function pointer to an actual operation function */
1971 mci->edac_check = i7core_check_error;
ef708b53 1972
a0c36a1f 1973 /* add this new MC control structure to EDAC's list of MCs */
b7c76151 1974 if (unlikely(edac_mc_add_mc(mci))) {
a0c36a1f
MCC
1975 debugf0("MC: " __FILE__
1976 ": %s(): failed edac_mc_add_mc()\n", __func__);
1977 /* FIXME: perhaps some code should go here that disables error
1978 * reporting if we just enabled it
1979 */
b7c76151
MCC
1980
1981 rc = -EINVAL;
628c5ddf 1982 goto fail0;
a0c36a1f
MCC
1983 }
1984
194a40fe 1985 /* Default error mask is any memory */
ef708b53 1986 pvt->inject.channel = 0;
194a40fe
MCC
1987 pvt->inject.dimm = -1;
1988 pvt->inject.rank = -1;
1989 pvt->inject.bank = -1;
1990 pvt->inject.page = -1;
1991 pvt->inject.col = -1;
1992
a3aa0a4a
HS
1993 /* allocating generic PCI control info */
1994 i7core_pci_ctl_create(pvt);
1995
d5381642 1996 /* Registers on edac_mce in order to receive memory errors */
c5d34528 1997 pvt->edac_mce.priv = mci;
d5381642 1998 pvt->edac_mce.check_error = i7core_mce_check_error;
d5381642 1999 rc = edac_mce_register(&pvt->edac_mce);
b990538a 2000 if (unlikely(rc < 0)) {
d5381642
MCC
2001 debugf0("MC: " __FILE__
2002 ": %s(): failed edac_mce_register()\n", __func__);
628c5ddf 2003 goto fail1;
f4742949
MCC
2004 }
2005
628c5ddf
HS
2006 return 0;
2007
2008fail1:
2009 i7core_pci_ctl_release(pvt);
2010 edac_mc_del_mc(mci->dev);
2011fail0:
2012 kfree(mci->ctl_name);
2013 edac_mc_free(mci);
f4742949
MCC
2014 return rc;
2015}
2016
2017/*
2018 * i7core_probe Probe for ONE instance of device to see if it is
2019 * present.
2020 * return:
2021 * 0 for FOUND a device
2022 * < 0 for error code
2023 */
2d95d815 2024
f4742949
MCC
2025static int __devinit i7core_probe(struct pci_dev *pdev,
2026 const struct pci_device_id *id)
2027{
f4742949
MCC
2028 int rc;
2029 struct i7core_dev *i7core_dev;
2030
2d95d815
MCC
2031 /* get the pci devices we want to reserve for our use */
2032 mutex_lock(&i7core_edac_lock);
2033
f4742949 2034 /*
d4c27795 2035 * All memory controllers are allocated at the first pass.
f4742949 2036 */
2d95d815
MCC
2037 if (unlikely(probed >= 1)) {
2038 mutex_unlock(&i7core_edac_lock);
f4742949 2039 return -EINVAL;
2d95d815
MCC
2040 }
2041 probed++;
de06eeef 2042
64c10f6e 2043 rc = i7core_get_all_devices();
f4742949
MCC
2044 if (unlikely(rc < 0))
2045 goto fail0;
2046
2047 list_for_each_entry(i7core_dev, &i7core_edac_list, list) {
2048 int channels;
2049 int csrows;
2050
2051 /* Check the number of active and not disabled channels */
2052 rc = i7core_get_active_channels(i7core_dev->socket,
2053 &channels, &csrows);
2054 if (unlikely(rc < 0))
2055 goto fail1;
2056
d4c27795
MCC
2057 rc = i7core_register_mci(i7core_dev, channels, csrows);
2058 if (unlikely(rc < 0))
2059 goto fail1;
d5381642
MCC
2060 }
2061
ef708b53 2062 i7core_printk(KERN_INFO, "Driver loaded.\n");
8f331907 2063
66607706 2064 mutex_unlock(&i7core_edac_lock);
a0c36a1f
MCC
2065 return 0;
2066
66607706 2067fail1:
13d6e9b6 2068 i7core_put_all_devices();
66607706
MCC
2069fail0:
2070 mutex_unlock(&i7core_edac_lock);
b7c76151 2071 return rc;
a0c36a1f
MCC
2072}
2073
2074/*
2075 * i7core_remove destructor for one instance of device
2076 *
2077 */
2078static void __devexit i7core_remove(struct pci_dev *pdev)
2079{
2080 struct mem_ctl_info *mci;
64c10f6e 2081 struct i7core_dev *i7core_dev;
939747bd 2082 struct i7core_pvt *pvt;
a0c36a1f
MCC
2083
2084 debugf0(__FILE__ ": %s()\n", __func__);
2085
22e6bcbd
MCC
2086 /*
2087 * we have a trouble here: pdev value for removal will be wrong, since
2088 * it will point to the X58 register used to detect that the machine
2089 * is a Nehalem or upper design. However, due to the way several PCI
2090 * devices are grouped together to provide MC functionality, we need
2091 * to use a different method for releasing the devices
2092 */
87d1d272 2093
66607706 2094 mutex_lock(&i7core_edac_lock);
64c10f6e 2095 list_for_each_entry(i7core_dev, &i7core_edac_list, list) {
939747bd
MCC
2096 mci = find_mci_by_dev(&i7core_dev->pdev[0]->dev);
2097 if (unlikely(!mci || !mci->pvt_info)) {
3cfd0146
MCC
2098 debugf0("MC: " __FILE__ ": %s(): dev = %p\n",
2099 __func__, &i7core_dev->pdev[0]->dev);
2100
2101 i7core_printk(KERN_ERR,
939747bd
MCC
2102 "Couldn't find mci hanler\n");
2103 } else {
2104 pvt = mci->pvt_info;
22e6bcbd 2105 i7core_dev = pvt->i7core_dev;
939747bd 2106
3cfd0146
MCC
2107 debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n",
2108 __func__, mci, &i7core_dev->pdev[0]->dev);
2109
41ba6c10
MCC
2110 /* Disable MCE NMI handler */
2111 edac_mce_unregister(&pvt->edac_mce);
2112
2113 /* Disable EDAC polling */
a3aa0a4a 2114 i7core_pci_ctl_release(pvt);
939747bd 2115
41ba6c10 2116 /* Remove MC sysfs nodes */
939747bd
MCC
2117 edac_mc_del_mc(&i7core_dev->pdev[0]->dev);
2118
accf74ff 2119 debugf1("%s: free mci struct\n", mci->ctl_name);
22e6bcbd
MCC
2120 kfree(mci->ctl_name);
2121 edac_mc_free(mci);
22e6bcbd
MCC
2122 }
2123 }
64c10f6e
HS
2124
2125 /* Release PCI resources */
2126 i7core_put_all_devices();
2127
2d95d815
MCC
2128 probed--;
2129
66607706 2130 mutex_unlock(&i7core_edac_lock);
a0c36a1f
MCC
2131}
2132
a0c36a1f
MCC
2133MODULE_DEVICE_TABLE(pci, i7core_pci_tbl);
2134
2135/*
2136 * i7core_driver pci_driver structure for this module
2137 *
2138 */
2139static struct pci_driver i7core_driver = {
2140 .name = "i7core_edac",
2141 .probe = i7core_probe,
2142 .remove = __devexit_p(i7core_remove),
2143 .id_table = i7core_pci_tbl,
2144};
2145
2146/*
2147 * i7core_init Module entry function
2148 * Try to initialize this module for its devices
2149 */
2150static int __init i7core_init(void)
2151{
2152 int pci_rc;
2153
2154 debugf2("MC: " __FILE__ ": %s()\n", __func__);
2155
2156 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
2157 opstate_init();
2158
54a08ab1
MCC
2159 if (use_pci_fixup)
2160 i7core_xeon_pci_fixup(pci_dev_table);
bc2d7245 2161
a0c36a1f
MCC
2162 pci_rc = pci_register_driver(&i7core_driver);
2163
3ef288a9
MCC
2164 if (pci_rc >= 0)
2165 return 0;
2166
2167 i7core_printk(KERN_ERR, "Failed to register device with error %d.\n",
2168 pci_rc);
2169
2170 return pci_rc;
a0c36a1f
MCC
2171}
2172
2173/*
2174 * i7core_exit() Module exit function
2175 * Unregister the driver
2176 */
2177static void __exit i7core_exit(void)
2178{
2179 debugf2("MC: " __FILE__ ": %s()\n", __func__);
2180 pci_unregister_driver(&i7core_driver);
2181}
2182
2183module_init(i7core_init);
2184module_exit(i7core_exit);
2185
2186MODULE_LICENSE("GPL");
2187MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
2188MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
2189MODULE_DESCRIPTION("MC Driver for Intel i7 Core memory controllers - "
2190 I7CORE_REVISION);
2191
2192module_param(edac_op_state, int, 0444);
2193MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");