i7core_edac: scrubbing fixups
[GitHub/LineageOS/android_kernel_samsung_universal7580.git] / drivers / edac / i7core_edac.c
CommitLineData
52707f91
MCC
1/* Intel i7 core/Nehalem Memory Controller kernel module
2 *
e7bf068a 3 * This driver supports the memory controllers found on the Intel
52707f91
MCC
4 * processor families i7core, i7core 7xx/8xx, i5core, Xeon 35xx,
5 * Xeon 55xx and Xeon 56xx also known as Nehalem, Nehalem-EP, Lynnfield
6 * and Westmere-EP.
a0c36a1f
MCC
7 *
8 * This file may be distributed under the terms of the
9 * GNU General Public License version 2 only.
10 *
52707f91 11 * Copyright (c) 2009-2010 by:
a0c36a1f
MCC
12 * Mauro Carvalho Chehab <mchehab@redhat.com>
13 *
14 * Red Hat Inc. http://www.redhat.com
15 *
16 * Forked and adapted from the i5400_edac driver
17 *
18 * Based on the following public Intel datasheets:
19 * Intel Core i7 Processor Extreme Edition and Intel Core i7 Processor
20 * Datasheet, Volume 2:
21 * http://download.intel.com/design/processor/datashts/320835.pdf
22 * Intel Xeon Processor 5500 Series Datasheet Volume 2
23 * http://www.intel.com/Assets/PDF/datasheet/321322.pdf
24 * also available at:
25 * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
26 */
27
a0c36a1f
MCC
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/pci.h>
31#include <linux/pci_ids.h>
32#include <linux/slab.h>
3b918c12 33#include <linux/delay.h>
535e9c78 34#include <linux/dmi.h>
a0c36a1f
MCC
35#include <linux/edac.h>
36#include <linux/mmzone.h>
f4742949 37#include <linux/smp.h>
4140c542 38#include <asm/mce.h>
14d2c083 39#include <asm/processor.h>
a0c36a1f
MCC
40
41#include "edac_core.h"
42
18c29002
MCC
43/* Static vars */
44static LIST_HEAD(i7core_edac_list);
45static DEFINE_MUTEX(i7core_edac_lock);
46static int probed;
47
54a08ab1
MCC
48static int use_pci_fixup;
49module_param(use_pci_fixup, int, 0444);
50MODULE_PARM_DESC(use_pci_fixup, "Enable PCI fixup to seek for hidden devices");
f4742949
MCC
51/*
52 * This is used for Nehalem-EP and Nehalem-EX devices, where the non-core
53 * registers start at bus 255, and are not reported by BIOS.
54 * We currently find devices with only 2 sockets. In order to support more QPI
55 * Quick Path Interconnect, just increment this number.
56 */
57#define MAX_SOCKET_BUSES 2
58
59
a0c36a1f
MCC
60/*
61 * Alter this version for the module when modifications are made
62 */
152ba394 63#define I7CORE_REVISION " Ver: 1.0.0"
a0c36a1f
MCC
64#define EDAC_MOD_STR "i7core_edac"
65
a0c36a1f
MCC
66/*
67 * Debug macros
68 */
69#define i7core_printk(level, fmt, arg...) \
70 edac_printk(level, "i7core", fmt, ##arg)
71
72#define i7core_mc_printk(mci, level, fmt, arg...) \
73 edac_mc_chipset_printk(mci, level, "i7core", fmt, ##arg)
74
75/*
76 * i7core Memory Controller Registers
77 */
78
e9bd2e73
MCC
79 /* OFFSETS for Device 0 Function 0 */
80
81#define MC_CFG_CONTROL 0x90
e8b6a127
SG
82 #define MC_CFG_UNLOCK 0x02
83 #define MC_CFG_LOCK 0x00
e9bd2e73 84
a0c36a1f
MCC
85 /* OFFSETS for Device 3 Function 0 */
86
87#define MC_CONTROL 0x48
88#define MC_STATUS 0x4c
89#define MC_MAX_DOD 0x64
90
442305b1
MCC
91/*
92 * OFFSETS for Device 3 Function 4, as inicated on Xeon 5500 datasheet:
93 * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
94 */
95
96#define MC_TEST_ERR_RCV1 0x60
97 #define DIMM2_COR_ERR(r) ((r) & 0x7fff)
98
99#define MC_TEST_ERR_RCV0 0x64
100 #define DIMM1_COR_ERR(r) (((r) >> 16) & 0x7fff)
101 #define DIMM0_COR_ERR(r) ((r) & 0x7fff)
102
b4e8f0b6 103/* OFFSETS for Device 3 Function 2, as inicated on Xeon 5500 datasheet */
e8b6a127
SG
104#define MC_SSRCONTROL 0x48
105 #define SSR_MODE_DISABLE 0x00
106 #define SSR_MODE_ENABLE 0x01
107 #define SSR_MODE_MASK 0x03
108
109#define MC_SCRUB_CONTROL 0x4c
110 #define STARTSCRUB (1 << 24)
535e9c78 111 #define SCRUBINTERVAL_MASK 0xffffff
e8b6a127 112
b4e8f0b6
MCC
113#define MC_COR_ECC_CNT_0 0x80
114#define MC_COR_ECC_CNT_1 0x84
115#define MC_COR_ECC_CNT_2 0x88
116#define MC_COR_ECC_CNT_3 0x8c
117#define MC_COR_ECC_CNT_4 0x90
118#define MC_COR_ECC_CNT_5 0x94
119
120#define DIMM_TOP_COR_ERR(r) (((r) >> 16) & 0x7fff)
121#define DIMM_BOT_COR_ERR(r) ((r) & 0x7fff)
122
123
a0c36a1f
MCC
124 /* OFFSETS for Devices 4,5 and 6 Function 0 */
125
0b2b7b7e
MCC
126#define MC_CHANNEL_DIMM_INIT_PARAMS 0x58
127 #define THREE_DIMMS_PRESENT (1 << 24)
128 #define SINGLE_QUAD_RANK_PRESENT (1 << 23)
129 #define QUAD_RANK_PRESENT (1 << 22)
130 #define REGISTERED_DIMM (1 << 15)
131
f122a892
MCC
132#define MC_CHANNEL_MAPPER 0x60
133 #define RDLCH(r, ch) ((((r) >> (3 + (ch * 6))) & 0x07) - 1)
134 #define WRLCH(r, ch) ((((r) >> (ch * 6)) & 0x07) - 1)
135
0b2b7b7e
MCC
136#define MC_CHANNEL_RANK_PRESENT 0x7c
137 #define RANK_PRESENT_MASK 0xffff
138
a0c36a1f 139#define MC_CHANNEL_ADDR_MATCH 0xf0
194a40fe
MCC
140#define MC_CHANNEL_ERROR_MASK 0xf8
141#define MC_CHANNEL_ERROR_INJECT 0xfc
142 #define INJECT_ADDR_PARITY 0x10
143 #define INJECT_ECC 0x08
144 #define MASK_CACHELINE 0x06
145 #define MASK_FULL_CACHELINE 0x06
146 #define MASK_MSB32_CACHELINE 0x04
147 #define MASK_LSB32_CACHELINE 0x02
148 #define NO_MASK_CACHELINE 0x00
149 #define REPEAT_EN 0x01
a0c36a1f 150
0b2b7b7e 151 /* OFFSETS for Devices 4,5 and 6 Function 1 */
b990538a 152
0b2b7b7e
MCC
153#define MC_DOD_CH_DIMM0 0x48
154#define MC_DOD_CH_DIMM1 0x4c
155#define MC_DOD_CH_DIMM2 0x50
156 #define RANKOFFSET_MASK ((1 << 12) | (1 << 11) | (1 << 10))
157 #define RANKOFFSET(x) ((x & RANKOFFSET_MASK) >> 10)
158 #define DIMM_PRESENT_MASK (1 << 9)
159 #define DIMM_PRESENT(x) (((x) & DIMM_PRESENT_MASK) >> 9)
854d3349
MCC
160 #define MC_DOD_NUMBANK_MASK ((1 << 8) | (1 << 7))
161 #define MC_DOD_NUMBANK(x) (((x) & MC_DOD_NUMBANK_MASK) >> 7)
162 #define MC_DOD_NUMRANK_MASK ((1 << 6) | (1 << 5))
163 #define MC_DOD_NUMRANK(x) (((x) & MC_DOD_NUMRANK_MASK) >> 5)
41fcb7fe 164 #define MC_DOD_NUMROW_MASK ((1 << 4) | (1 << 3) | (1 << 2))
5566cb7c 165 #define MC_DOD_NUMROW(x) (((x) & MC_DOD_NUMROW_MASK) >> 2)
854d3349
MCC
166 #define MC_DOD_NUMCOL_MASK 3
167 #define MC_DOD_NUMCOL(x) ((x) & MC_DOD_NUMCOL_MASK)
0b2b7b7e 168
f122a892
MCC
169#define MC_RANK_PRESENT 0x7c
170
0b2b7b7e
MCC
171#define MC_SAG_CH_0 0x80
172#define MC_SAG_CH_1 0x84
173#define MC_SAG_CH_2 0x88
174#define MC_SAG_CH_3 0x8c
175#define MC_SAG_CH_4 0x90
176#define MC_SAG_CH_5 0x94
177#define MC_SAG_CH_6 0x98
178#define MC_SAG_CH_7 0x9c
179
180#define MC_RIR_LIMIT_CH_0 0x40
181#define MC_RIR_LIMIT_CH_1 0x44
182#define MC_RIR_LIMIT_CH_2 0x48
183#define MC_RIR_LIMIT_CH_3 0x4C
184#define MC_RIR_LIMIT_CH_4 0x50
185#define MC_RIR_LIMIT_CH_5 0x54
186#define MC_RIR_LIMIT_CH_6 0x58
187#define MC_RIR_LIMIT_CH_7 0x5C
188#define MC_RIR_LIMIT_MASK ((1 << 10) - 1)
189
190#define MC_RIR_WAY_CH 0x80
191 #define MC_RIR_WAY_OFFSET_MASK (((1 << 14) - 1) & ~0x7)
192 #define MC_RIR_WAY_RANK_MASK 0x7
193
a0c36a1f
MCC
194/*
195 * i7core structs
196 */
197
198#define NUM_CHANS 3
442305b1
MCC
199#define MAX_DIMMS 3 /* Max DIMMS per channel */
200#define MAX_MCR_FUNC 4
201#define MAX_CHAN_FUNC 3
a0c36a1f
MCC
202
203struct i7core_info {
204 u32 mc_control;
205 u32 mc_status;
206 u32 max_dod;
f122a892 207 u32 ch_map;
a0c36a1f
MCC
208};
209
194a40fe
MCC
210
211struct i7core_inject {
212 int enable;
213
214 u32 section;
215 u32 type;
216 u32 eccmask;
217
218 /* Error address mask */
219 int channel, dimm, rank, bank, page, col;
220};
221
0b2b7b7e 222struct i7core_channel {
442305b1
MCC
223 u32 ranks;
224 u32 dimms;
0b2b7b7e
MCC
225};
226
8f331907 227struct pci_id_descr {
66607706
MCC
228 int dev;
229 int func;
230 int dev_id;
de06eeef 231 int optional;
8f331907
MCC
232};
233
bd9e19ca 234struct pci_id_table {
1288c18f
MCC
235 const struct pci_id_descr *descr;
236 int n_devs;
bd9e19ca
VM
237};
238
f4742949
MCC
239struct i7core_dev {
240 struct list_head list;
241 u8 socket;
242 struct pci_dev **pdev;
de06eeef 243 int n_devs;
f4742949
MCC
244 struct mem_ctl_info *mci;
245};
246
a0c36a1f 247struct i7core_pvt {
f4742949
MCC
248 struct pci_dev *pci_noncore;
249 struct pci_dev *pci_mcr[MAX_MCR_FUNC + 1];
250 struct pci_dev *pci_ch[NUM_CHANS][MAX_CHAN_FUNC + 1];
251
252 struct i7core_dev *i7core_dev;
67166af4 253
a0c36a1f 254 struct i7core_info info;
194a40fe 255 struct i7core_inject inject;
f4742949 256 struct i7core_channel channel[NUM_CHANS];
67166af4 257
f4742949
MCC
258 int ce_count_available;
259 int csrow_map[NUM_CHANS][MAX_DIMMS];
b4e8f0b6
MCC
260
261 /* ECC corrected errors counts per udimm */
f4742949
MCC
262 unsigned long udimm_ce_count[MAX_DIMMS];
263 int udimm_last_ce_count[MAX_DIMMS];
b4e8f0b6 264 /* ECC corrected errors counts per rdimm */
f4742949
MCC
265 unsigned long rdimm_ce_count[NUM_CHANS][MAX_DIMMS];
266 int rdimm_last_ce_count[NUM_CHANS][MAX_DIMMS];
442305b1 267
27100db0 268 bool is_registered, enable_scrub;
14d2c083 269
ca9c90ba 270 /* Fifo double buffers */
d5381642 271 struct mce mce_entry[MCE_LOG_LEN];
ca9c90ba
MCC
272 struct mce mce_outentry[MCE_LOG_LEN];
273
274 /* Fifo in/out counters */
275 unsigned mce_in, mce_out;
276
277 /* Count indicator to show errors not got */
278 unsigned mce_overrun;
939747bd 279
535e9c78
NC
280 /* DCLK Frequency used for computing scrub rate */
281 int dclk_freq;
282
939747bd
MCC
283 /* Struct to control EDAC polling */
284 struct edac_pci_ctl_info *i7core_pci;
a0c36a1f
MCC
285};
286
8f331907
MCC
287#define PCI_DESCR(device, function, device_id) \
288 .dev = (device), \
289 .func = (function), \
290 .dev_id = (device_id)
291
1288c18f 292static const struct pci_id_descr pci_dev_descr_i7core_nehalem[] = {
8f331907
MCC
293 /* Memory controller */
294 { PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_I7_MCR) },
295 { PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_I7_MC_TAD) },
224e871f 296 /* Exists only for RDIMM */
de06eeef 297 { PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_I7_MC_RAS), .optional = 1 },
8f331907
MCC
298 { PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_I7_MC_TEST) },
299
300 /* Channel 0 */
301 { PCI_DESCR(4, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH0_CTRL) },
302 { PCI_DESCR(4, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH0_ADDR) },
303 { PCI_DESCR(4, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH0_RANK) },
304 { PCI_DESCR(4, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH0_TC) },
305
306 /* Channel 1 */
307 { PCI_DESCR(5, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH1_CTRL) },
308 { PCI_DESCR(5, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH1_ADDR) },
309 { PCI_DESCR(5, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH1_RANK) },
310 { PCI_DESCR(5, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH1_TC) },
311
312 /* Channel 2 */
313 { PCI_DESCR(6, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH2_CTRL) },
314 { PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH2_ADDR) },
315 { PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH2_RANK) },
316 { PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH2_TC) },
224e871f
MCC
317
318 /* Generic Non-core registers */
319 /*
320 * This is the PCI device on i7core and on Xeon 35xx (8086:2c41)
321 * On Xeon 55xx, however, it has a different id (8086:2c40). So,
322 * the probing code needs to test for the other address in case of
323 * failure of this one
324 */
325 { PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_I7_NONCORE) },
326
a0c36a1f 327};
8f331907 328
1288c18f 329static const struct pci_id_descr pci_dev_descr_lynnfield[] = {
52a2e4fc
MCC
330 { PCI_DESCR( 3, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR) },
331 { PCI_DESCR( 3, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD) },
332 { PCI_DESCR( 3, 4, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST) },
333
334 { PCI_DESCR( 4, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL) },
335 { PCI_DESCR( 4, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR) },
336 { PCI_DESCR( 4, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK) },
337 { PCI_DESCR( 4, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC) },
338
508fa179
MCC
339 { PCI_DESCR( 5, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL) },
340 { PCI_DESCR( 5, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR) },
341 { PCI_DESCR( 5, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK) },
342 { PCI_DESCR( 5, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC) },
224e871f
MCC
343
344 /*
345 * This is the PCI device has an alternate address on some
346 * processors like Core i7 860
347 */
348 { PCI_DESCR( 0, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE) },
52a2e4fc
MCC
349};
350
1288c18f 351static const struct pci_id_descr pci_dev_descr_i7core_westmere[] = {
bd9e19ca
VM
352 /* Memory controller */
353 { PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR_REV2) },
354 { PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD_REV2) },
355 /* Exists only for RDIMM */
356 { PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_RAS_REV2), .optional = 1 },
357 { PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST_REV2) },
358
359 /* Channel 0 */
360 { PCI_DESCR(4, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL_REV2) },
361 { PCI_DESCR(4, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR_REV2) },
362 { PCI_DESCR(4, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK_REV2) },
363 { PCI_DESCR(4, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC_REV2) },
364
365 /* Channel 1 */
366 { PCI_DESCR(5, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL_REV2) },
367 { PCI_DESCR(5, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR_REV2) },
368 { PCI_DESCR(5, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK_REV2) },
369 { PCI_DESCR(5, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC_REV2) },
370
371 /* Channel 2 */
372 { PCI_DESCR(6, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_CTRL_REV2) },
373 { PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_ADDR_REV2) },
374 { PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_RANK_REV2) },
375 { PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_TC_REV2) },
224e871f
MCC
376
377 /* Generic Non-core registers */
378 { PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_REV2) },
379
bd9e19ca
VM
380};
381
1288c18f
MCC
382#define PCI_ID_TABLE_ENTRY(A) { .descr=A, .n_devs = ARRAY_SIZE(A) }
383static const struct pci_id_table pci_dev_table[] = {
bd9e19ca
VM
384 PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_nehalem),
385 PCI_ID_TABLE_ENTRY(pci_dev_descr_lynnfield),
386 PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_westmere),
3c52cc57 387 {0,} /* 0 terminated list. */
bd9e19ca
VM
388};
389
8f331907
MCC
390/*
391 * pci_device_id table for which devices we are looking for
8f331907
MCC
392 */
393static const struct pci_device_id i7core_pci_tbl[] __devinitdata = {
d1fd4fb6 394 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
f05da2f7 395 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
8f331907
MCC
396 {0,} /* 0 terminated list. */
397};
398
a0c36a1f
MCC
399/****************************************************************************
400 Anciliary status routines
401 ****************************************************************************/
402
403 /* MC_CONTROL bits */
ef708b53
MCC
404#define CH_ACTIVE(pvt, ch) ((pvt)->info.mc_control & (1 << (8 + ch)))
405#define ECCx8(pvt) ((pvt)->info.mc_control & (1 << 1))
a0c36a1f
MCC
406
407 /* MC_STATUS bits */
61053fde 408#define ECC_ENABLED(pvt) ((pvt)->info.mc_status & (1 << 4))
ef708b53 409#define CH_DISABLED(pvt, ch) ((pvt)->info.mc_status & (1 << ch))
a0c36a1f
MCC
410
411 /* MC_MAX_DOD read functions */
854d3349 412static inline int numdimms(u32 dimms)
a0c36a1f 413{
854d3349 414 return (dimms & 0x3) + 1;
a0c36a1f
MCC
415}
416
854d3349 417static inline int numrank(u32 rank)
a0c36a1f
MCC
418{
419 static int ranks[4] = { 1, 2, 4, -EINVAL };
420
854d3349 421 return ranks[rank & 0x3];
a0c36a1f
MCC
422}
423
854d3349 424static inline int numbank(u32 bank)
a0c36a1f
MCC
425{
426 static int banks[4] = { 4, 8, 16, -EINVAL };
427
854d3349 428 return banks[bank & 0x3];
a0c36a1f
MCC
429}
430
854d3349 431static inline int numrow(u32 row)
a0c36a1f
MCC
432{
433 static int rows[8] = {
434 1 << 12, 1 << 13, 1 << 14, 1 << 15,
435 1 << 16, -EINVAL, -EINVAL, -EINVAL,
436 };
437
854d3349 438 return rows[row & 0x7];
a0c36a1f
MCC
439}
440
854d3349 441static inline int numcol(u32 col)
a0c36a1f
MCC
442{
443 static int cols[8] = {
444 1 << 10, 1 << 11, 1 << 12, -EINVAL,
445 };
854d3349 446 return cols[col & 0x3];
a0c36a1f
MCC
447}
448
f4742949 449static struct i7core_dev *get_i7core_dev(u8 socket)
66607706
MCC
450{
451 struct i7core_dev *i7core_dev;
452
453 list_for_each_entry(i7core_dev, &i7core_edac_list, list) {
454 if (i7core_dev->socket == socket)
455 return i7core_dev;
456 }
457
458 return NULL;
459}
460
848b2f7e
HS
461static struct i7core_dev *alloc_i7core_dev(u8 socket,
462 const struct pci_id_table *table)
463{
464 struct i7core_dev *i7core_dev;
465
466 i7core_dev = kzalloc(sizeof(*i7core_dev), GFP_KERNEL);
467 if (!i7core_dev)
468 return NULL;
469
470 i7core_dev->pdev = kzalloc(sizeof(*i7core_dev->pdev) * table->n_devs,
471 GFP_KERNEL);
472 if (!i7core_dev->pdev) {
473 kfree(i7core_dev);
474 return NULL;
475 }
476
477 i7core_dev->socket = socket;
478 i7core_dev->n_devs = table->n_devs;
479 list_add_tail(&i7core_dev->list, &i7core_edac_list);
480
481 return i7core_dev;
482}
483
2aa9be44
HS
484static void free_i7core_dev(struct i7core_dev *i7core_dev)
485{
486 list_del(&i7core_dev->list);
487 kfree(i7core_dev->pdev);
488 kfree(i7core_dev);
489}
490
a0c36a1f
MCC
491/****************************************************************************
492 Memory check routines
493 ****************************************************************************/
67166af4
MCC
494static struct pci_dev *get_pdev_slot_func(u8 socket, unsigned slot,
495 unsigned func)
ef708b53 496{
66607706 497 struct i7core_dev *i7core_dev = get_i7core_dev(socket);
ef708b53 498 int i;
ef708b53 499
66607706
MCC
500 if (!i7core_dev)
501 return NULL;
502
de06eeef 503 for (i = 0; i < i7core_dev->n_devs; i++) {
66607706 504 if (!i7core_dev->pdev[i])
ef708b53
MCC
505 continue;
506
66607706
MCC
507 if (PCI_SLOT(i7core_dev->pdev[i]->devfn) == slot &&
508 PCI_FUNC(i7core_dev->pdev[i]->devfn) == func) {
509 return i7core_dev->pdev[i];
ef708b53
MCC
510 }
511 }
512
eb94fc40
MCC
513 return NULL;
514}
515
ec6df24c
MCC
516/**
517 * i7core_get_active_channels() - gets the number of channels and csrows
518 * @socket: Quick Path Interconnect socket
519 * @channels: Number of channels that will be returned
520 * @csrows: Number of csrows found
521 *
522 * Since EDAC core needs to know in advance the number of available channels
523 * and csrows, in order to allocate memory for csrows/channels, it is needed
524 * to run two similar steps. At the first step, implemented on this function,
525 * it checks the number of csrows/channels present at one socket.
526 * this is used in order to properly allocate the size of mci components.
527 *
528 * It should be noticed that none of the current available datasheets explain
529 * or even mention how csrows are seen by the memory controller. So, we need
530 * to add a fake description for csrows.
531 * So, this driver is attributing one DIMM memory for one csrow.
532 */
1288c18f 533static int i7core_get_active_channels(const u8 socket, unsigned *channels,
67166af4 534 unsigned *csrows)
eb94fc40
MCC
535{
536 struct pci_dev *pdev = NULL;
537 int i, j;
538 u32 status, control;
539
540 *channels = 0;
541 *csrows = 0;
542
67166af4 543 pdev = get_pdev_slot_func(socket, 3, 0);
b7c76151 544 if (!pdev) {
67166af4
MCC
545 i7core_printk(KERN_ERR, "Couldn't find socket %d fn 3.0!!!\n",
546 socket);
ef708b53 547 return -ENODEV;
b7c76151 548 }
ef708b53
MCC
549
550 /* Device 3 function 0 reads */
551 pci_read_config_dword(pdev, MC_STATUS, &status);
552 pci_read_config_dword(pdev, MC_CONTROL, &control);
553
554 for (i = 0; i < NUM_CHANS; i++) {
eb94fc40 555 u32 dimm_dod[3];
ef708b53
MCC
556 /* Check if the channel is active */
557 if (!(control & (1 << (8 + i))))
558 continue;
559
560 /* Check if the channel is disabled */
41fcb7fe 561 if (status & (1 << i))
ef708b53 562 continue;
ef708b53 563
67166af4 564 pdev = get_pdev_slot_func(socket, i + 4, 1);
eb94fc40 565 if (!pdev) {
67166af4
MCC
566 i7core_printk(KERN_ERR, "Couldn't find socket %d "
567 "fn %d.%d!!!\n",
568 socket, i + 4, 1);
eb94fc40
MCC
569 return -ENODEV;
570 }
571 /* Devices 4-6 function 1 */
572 pci_read_config_dword(pdev,
573 MC_DOD_CH_DIMM0, &dimm_dod[0]);
574 pci_read_config_dword(pdev,
575 MC_DOD_CH_DIMM1, &dimm_dod[1]);
576 pci_read_config_dword(pdev,
577 MC_DOD_CH_DIMM2, &dimm_dod[2]);
578
ef708b53 579 (*channels)++;
eb94fc40
MCC
580
581 for (j = 0; j < 3; j++) {
582 if (!DIMM_PRESENT(dimm_dod[j]))
583 continue;
584 (*csrows)++;
585 }
ef708b53
MCC
586 }
587
c77720b9 588 debugf0("Number of active channels on socket %d: %d\n",
67166af4 589 socket, *channels);
1c6fed80 590
ef708b53
MCC
591 return 0;
592}
593
2e5185f7 594static int get_dimm_config(const struct mem_ctl_info *mci)
a0c36a1f
MCC
595{
596 struct i7core_pvt *pvt = mci->pvt_info;
1c6fed80 597 struct csrow_info *csr;
854d3349 598 struct pci_dev *pdev;
ba6c5c62 599 int i, j;
2e5185f7 600 int csrow = 0;
5566cb7c 601 unsigned long last_page = 0;
1c6fed80 602 enum edac_type mode;
854d3349 603 enum mem_type mtype;
a0c36a1f 604
854d3349 605 /* Get data from the MC register, function 0 */
f4742949 606 pdev = pvt->pci_mcr[0];
7dd6953c 607 if (!pdev)
8f331907
MCC
608 return -ENODEV;
609
f122a892 610 /* Device 3 function 0 reads */
7dd6953c
MCC
611 pci_read_config_dword(pdev, MC_CONTROL, &pvt->info.mc_control);
612 pci_read_config_dword(pdev, MC_STATUS, &pvt->info.mc_status);
613 pci_read_config_dword(pdev, MC_MAX_DOD, &pvt->info.max_dod);
614 pci_read_config_dword(pdev, MC_CHANNEL_MAPPER, &pvt->info.ch_map);
f122a892 615
17cb7b0c 616 debugf0("QPI %d control=0x%08x status=0x%08x dod=0x%08x map=0x%08x\n",
4af91889 617 pvt->i7core_dev->socket, pvt->info.mc_control, pvt->info.mc_status,
f122a892 618 pvt->info.max_dod, pvt->info.ch_map);
a0c36a1f 619
1c6fed80 620 if (ECC_ENABLED(pvt)) {
41fcb7fe 621 debugf0("ECC enabled with x%d SDCC\n", ECCx8(pvt) ? 8 : 4);
1c6fed80
MCC
622 if (ECCx8(pvt))
623 mode = EDAC_S8ECD8ED;
624 else
625 mode = EDAC_S4ECD4ED;
626 } else {
a0c36a1f 627 debugf0("ECC disabled\n");
1c6fed80
MCC
628 mode = EDAC_NONE;
629 }
a0c36a1f
MCC
630
631 /* FIXME: need to handle the error codes */
17cb7b0c
MCC
632 debugf0("DOD Max limits: DIMMS: %d, %d-ranked, %d-banked "
633 "x%x x 0x%x\n",
854d3349
MCC
634 numdimms(pvt->info.max_dod),
635 numrank(pvt->info.max_dod >> 2),
276b824c 636 numbank(pvt->info.max_dod >> 4),
854d3349
MCC
637 numrow(pvt->info.max_dod >> 6),
638 numcol(pvt->info.max_dod >> 9));
a0c36a1f 639
0b2b7b7e 640 for (i = 0; i < NUM_CHANS; i++) {
854d3349 641 u32 data, dimm_dod[3], value[8];
0b2b7b7e 642
52a2e4fc
MCC
643 if (!pvt->pci_ch[i][0])
644 continue;
645
0b2b7b7e
MCC
646 if (!CH_ACTIVE(pvt, i)) {
647 debugf0("Channel %i is not active\n", i);
648 continue;
649 }
650 if (CH_DISABLED(pvt, i)) {
651 debugf0("Channel %i is disabled\n", i);
652 continue;
653 }
654
f122a892 655 /* Devices 4-6 function 0 */
f4742949 656 pci_read_config_dword(pvt->pci_ch[i][0],
0b2b7b7e
MCC
657 MC_CHANNEL_DIMM_INIT_PARAMS, &data);
658
f4742949 659 pvt->channel[i].ranks = (data & QUAD_RANK_PRESENT) ?
67166af4 660 4 : 2;
0b2b7b7e 661
854d3349
MCC
662 if (data & REGISTERED_DIMM)
663 mtype = MEM_RDDR3;
14d2c083 664 else
854d3349
MCC
665 mtype = MEM_DDR3;
666#if 0
0b2b7b7e
MCC
667 if (data & THREE_DIMMS_PRESENT)
668 pvt->channel[i].dimms = 3;
669 else if (data & SINGLE_QUAD_RANK_PRESENT)
670 pvt->channel[i].dimms = 1;
671 else
672 pvt->channel[i].dimms = 2;
854d3349
MCC
673#endif
674
675 /* Devices 4-6 function 1 */
f4742949 676 pci_read_config_dword(pvt->pci_ch[i][1],
854d3349 677 MC_DOD_CH_DIMM0, &dimm_dod[0]);
f4742949 678 pci_read_config_dword(pvt->pci_ch[i][1],
854d3349 679 MC_DOD_CH_DIMM1, &dimm_dod[1]);
f4742949 680 pci_read_config_dword(pvt->pci_ch[i][1],
854d3349 681 MC_DOD_CH_DIMM2, &dimm_dod[2]);
0b2b7b7e 682
1c6fed80 683 debugf0("Ch%d phy rd%d, wr%d (0x%08x): "
854d3349 684 "%d ranks, %cDIMMs\n",
1c6fed80
MCC
685 i,
686 RDLCH(pvt->info.ch_map, i), WRLCH(pvt->info.ch_map, i),
687 data,
f4742949 688 pvt->channel[i].ranks,
41fcb7fe 689 (data & REGISTERED_DIMM) ? 'R' : 'U');
854d3349
MCC
690
691 for (j = 0; j < 3; j++) {
692 u32 banks, ranks, rows, cols;
5566cb7c 693 u32 size, npages;
854d3349
MCC
694
695 if (!DIMM_PRESENT(dimm_dod[j]))
696 continue;
697
698 banks = numbank(MC_DOD_NUMBANK(dimm_dod[j]));
699 ranks = numrank(MC_DOD_NUMRANK(dimm_dod[j]));
700 rows = numrow(MC_DOD_NUMROW(dimm_dod[j]));
701 cols = numcol(MC_DOD_NUMCOL(dimm_dod[j]));
702
5566cb7c
MCC
703 /* DDR3 has 8 I/O banks */
704 size = (rows * cols * banks * ranks) >> (20 - 3);
705
f4742949 706 pvt->channel[i].dimms++;
854d3349 707
17cb7b0c
MCC
708 debugf0("\tdimm %d %d Mb offset: %x, "
709 "bank: %d, rank: %d, row: %#x, col: %#x\n",
710 j, size,
854d3349
MCC
711 RANKOFFSET(dimm_dod[j]),
712 banks, ranks, rows, cols);
713
e9144601 714 npages = MiB_TO_PAGES(size);
5566cb7c 715
2e5185f7 716 csr = &mci->csrows[csrow];
5566cb7c
MCC
717 csr->first_page = last_page + 1;
718 last_page += npages;
719 csr->last_page = last_page;
720 csr->nr_pages = npages;
721
854d3349 722 csr->page_mask = 0;
eb94fc40 723 csr->grain = 8;
2e5185f7 724 csr->csrow_idx = csrow;
eb94fc40
MCC
725 csr->nr_channels = 1;
726
727 csr->channels[0].chan_idx = i;
728 csr->channels[0].ce_count = 0;
854d3349 729
2e5185f7 730 pvt->csrow_map[i][j] = csrow;
b4e8f0b6 731
854d3349
MCC
732 switch (banks) {
733 case 4:
734 csr->dtype = DEV_X4;
735 break;
736 case 8:
737 csr->dtype = DEV_X8;
738 break;
739 case 16:
740 csr->dtype = DEV_X16;
741 break;
742 default:
743 csr->dtype = DEV_UNKNOWN;
744 }
745
746 csr->edac_mode = mode;
747 csr->mtype = mtype;
748
2e5185f7 749 csrow++;
854d3349 750 }
1c6fed80 751
854d3349
MCC
752 pci_read_config_dword(pdev, MC_SAG_CH_0, &value[0]);
753 pci_read_config_dword(pdev, MC_SAG_CH_1, &value[1]);
754 pci_read_config_dword(pdev, MC_SAG_CH_2, &value[2]);
755 pci_read_config_dword(pdev, MC_SAG_CH_3, &value[3]);
756 pci_read_config_dword(pdev, MC_SAG_CH_4, &value[4]);
757 pci_read_config_dword(pdev, MC_SAG_CH_5, &value[5]);
758 pci_read_config_dword(pdev, MC_SAG_CH_6, &value[6]);
759 pci_read_config_dword(pdev, MC_SAG_CH_7, &value[7]);
17cb7b0c 760 debugf1("\t[%i] DIVBY3\tREMOVED\tOFFSET\n", i);
854d3349 761 for (j = 0; j < 8; j++)
17cb7b0c 762 debugf1("\t\t%#x\t%#x\t%#x\n",
854d3349
MCC
763 (value[j] >> 27) & 0x1,
764 (value[j] >> 24) & 0x7,
80b8ce89 765 (value[j] & ((1 << 24) - 1)));
0b2b7b7e
MCC
766 }
767
a0c36a1f
MCC
768 return 0;
769}
770
194a40fe
MCC
771/****************************************************************************
772 Error insertion routines
773 ****************************************************************************/
774
775/* The i7core has independent error injection features per channel.
776 However, to have a simpler code, we don't allow enabling error injection
777 on more than one channel.
778 Also, since a change at an inject parameter will be applied only at enable,
779 we're disabling error injection on all write calls to the sysfs nodes that
780 controls the error code injection.
781 */
1288c18f 782static int disable_inject(const struct mem_ctl_info *mci)
194a40fe
MCC
783{
784 struct i7core_pvt *pvt = mci->pvt_info;
785
786 pvt->inject.enable = 0;
787
f4742949 788 if (!pvt->pci_ch[pvt->inject.channel][0])
8f331907
MCC
789 return -ENODEV;
790
f4742949 791 pci_write_config_dword(pvt->pci_ch[pvt->inject.channel][0],
4157d9f5 792 MC_CHANNEL_ERROR_INJECT, 0);
8f331907
MCC
793
794 return 0;
194a40fe
MCC
795}
796
797/*
798 * i7core inject inject.section
799 *
800 * accept and store error injection inject.section value
801 * bit 0 - refers to the lower 32-byte half cacheline
802 * bit 1 - refers to the upper 32-byte half cacheline
803 */
804static ssize_t i7core_inject_section_store(struct mem_ctl_info *mci,
805 const char *data, size_t count)
806{
807 struct i7core_pvt *pvt = mci->pvt_info;
808 unsigned long value;
809 int rc;
810
811 if (pvt->inject.enable)
41fcb7fe 812 disable_inject(mci);
194a40fe
MCC
813
814 rc = strict_strtoul(data, 10, &value);
815 if ((rc < 0) || (value > 3))
2068def5 816 return -EIO;
194a40fe
MCC
817
818 pvt->inject.section = (u32) value;
819 return count;
820}
821
822static ssize_t i7core_inject_section_show(struct mem_ctl_info *mci,
823 char *data)
824{
825 struct i7core_pvt *pvt = mci->pvt_info;
826 return sprintf(data, "0x%08x\n", pvt->inject.section);
827}
828
829/*
830 * i7core inject.type
831 *
832 * accept and store error injection inject.section value
833 * bit 0 - repeat enable - Enable error repetition
834 * bit 1 - inject ECC error
835 * bit 2 - inject parity error
836 */
837static ssize_t i7core_inject_type_store(struct mem_ctl_info *mci,
838 const char *data, size_t count)
839{
840 struct i7core_pvt *pvt = mci->pvt_info;
841 unsigned long value;
842 int rc;
843
844 if (pvt->inject.enable)
41fcb7fe 845 disable_inject(mci);
194a40fe
MCC
846
847 rc = strict_strtoul(data, 10, &value);
848 if ((rc < 0) || (value > 7))
2068def5 849 return -EIO;
194a40fe
MCC
850
851 pvt->inject.type = (u32) value;
852 return count;
853}
854
855static ssize_t i7core_inject_type_show(struct mem_ctl_info *mci,
856 char *data)
857{
858 struct i7core_pvt *pvt = mci->pvt_info;
859 return sprintf(data, "0x%08x\n", pvt->inject.type);
860}
861
862/*
863 * i7core_inject_inject.eccmask_store
864 *
865 * The type of error (UE/CE) will depend on the inject.eccmask value:
866 * Any bits set to a 1 will flip the corresponding ECC bit
867 * Correctable errors can be injected by flipping 1 bit or the bits within
868 * a symbol pair (2 consecutive aligned 8-bit pairs - i.e. 7:0 and 15:8 or
869 * 23:16 and 31:24). Flipping bits in two symbol pairs will cause an
870 * uncorrectable error to be injected.
871 */
872static ssize_t i7core_inject_eccmask_store(struct mem_ctl_info *mci,
873 const char *data, size_t count)
874{
875 struct i7core_pvt *pvt = mci->pvt_info;
876 unsigned long value;
877 int rc;
878
879 if (pvt->inject.enable)
41fcb7fe 880 disable_inject(mci);
194a40fe
MCC
881
882 rc = strict_strtoul(data, 10, &value);
883 if (rc < 0)
2068def5 884 return -EIO;
194a40fe
MCC
885
886 pvt->inject.eccmask = (u32) value;
887 return count;
888}
889
890static ssize_t i7core_inject_eccmask_show(struct mem_ctl_info *mci,
891 char *data)
892{
893 struct i7core_pvt *pvt = mci->pvt_info;
894 return sprintf(data, "0x%08x\n", pvt->inject.eccmask);
895}
896
897/*
898 * i7core_addrmatch
899 *
900 * The type of error (UE/CE) will depend on the inject.eccmask value:
901 * Any bits set to a 1 will flip the corresponding ECC bit
902 * Correctable errors can be injected by flipping 1 bit or the bits within
903 * a symbol pair (2 consecutive aligned 8-bit pairs - i.e. 7:0 and 15:8 or
904 * 23:16 and 31:24). Flipping bits in two symbol pairs will cause an
905 * uncorrectable error to be injected.
906 */
194a40fe 907
a5538e53
MCC
908#define DECLARE_ADDR_MATCH(param, limit) \
909static ssize_t i7core_inject_store_##param( \
910 struct mem_ctl_info *mci, \
911 const char *data, size_t count) \
912{ \
cc301b3a 913 struct i7core_pvt *pvt; \
a5538e53
MCC
914 long value; \
915 int rc; \
916 \
cc301b3a
MCC
917 debugf1("%s()\n", __func__); \
918 pvt = mci->pvt_info; \
919 \
a5538e53
MCC
920 if (pvt->inject.enable) \
921 disable_inject(mci); \
922 \
4f87fad1 923 if (!strcasecmp(data, "any") || !strcasecmp(data, "any\n"))\
a5538e53
MCC
924 value = -1; \
925 else { \
926 rc = strict_strtoul(data, 10, &value); \
927 if ((rc < 0) || (value >= limit)) \
928 return -EIO; \
929 } \
930 \
931 pvt->inject.param = value; \
932 \
933 return count; \
934} \
935 \
936static ssize_t i7core_inject_show_##param( \
937 struct mem_ctl_info *mci, \
938 char *data) \
939{ \
cc301b3a
MCC
940 struct i7core_pvt *pvt; \
941 \
942 pvt = mci->pvt_info; \
943 debugf1("%s() pvt=%p\n", __func__, pvt); \
a5538e53
MCC
944 if (pvt->inject.param < 0) \
945 return sprintf(data, "any\n"); \
946 else \
947 return sprintf(data, "%d\n", pvt->inject.param);\
194a40fe
MCC
948}
949
a5538e53
MCC
950#define ATTR_ADDR_MATCH(param) \
951 { \
952 .attr = { \
953 .name = #param, \
954 .mode = (S_IRUGO | S_IWUSR) \
955 }, \
956 .show = i7core_inject_show_##param, \
957 .store = i7core_inject_store_##param, \
958 }
194a40fe 959
a5538e53
MCC
960DECLARE_ADDR_MATCH(channel, 3);
961DECLARE_ADDR_MATCH(dimm, 3);
962DECLARE_ADDR_MATCH(rank, 4);
963DECLARE_ADDR_MATCH(bank, 32);
964DECLARE_ADDR_MATCH(page, 0x10000);
965DECLARE_ADDR_MATCH(col, 0x4000);
194a40fe 966
1288c18f 967static int write_and_test(struct pci_dev *dev, const int where, const u32 val)
276b824c
MCC
968{
969 u32 read;
970 int count;
971
4157d9f5
MCC
972 debugf0("setting pci %02x:%02x.%x reg=%02x value=%08x\n",
973 dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn),
974 where, val);
975
276b824c
MCC
976 for (count = 0; count < 10; count++) {
977 if (count)
b990538a 978 msleep(100);
276b824c
MCC
979 pci_write_config_dword(dev, where, val);
980 pci_read_config_dword(dev, where, &read);
981
982 if (read == val)
983 return 0;
984 }
985
4157d9f5
MCC
986 i7core_printk(KERN_ERR, "Error during set pci %02x:%02x.%x reg=%02x "
987 "write=%08x. Read=%08x\n",
988 dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn),
989 where, val, read);
276b824c
MCC
990
991 return -EINVAL;
992}
993
194a40fe
MCC
994/*
995 * This routine prepares the Memory Controller for error injection.
996 * The error will be injected when some process tries to write to the
997 * memory that matches the given criteria.
998 * The criteria can be set in terms of a mask where dimm, rank, bank, page
999 * and col can be specified.
1000 * A -1 value for any of the mask items will make the MCU to ignore
1001 * that matching criteria for error injection.
1002 *
1003 * It should be noticed that the error will only happen after a write operation
1004 * on a memory that matches the condition. if REPEAT_EN is not enabled at
1005 * inject mask, then it will produce just one error. Otherwise, it will repeat
1006 * until the injectmask would be cleaned.
1007 *
1008 * FIXME: This routine assumes that MAXNUMDIMMS value of MC_MAX_DOD
1009 * is reliable enough to check if the MC is using the
1010 * three channels. However, this is not clear at the datasheet.
1011 */
1012static ssize_t i7core_inject_enable_store(struct mem_ctl_info *mci,
1013 const char *data, size_t count)
1014{
1015 struct i7core_pvt *pvt = mci->pvt_info;
1016 u32 injectmask;
1017 u64 mask = 0;
1018 int rc;
1019 long enable;
1020
f4742949 1021 if (!pvt->pci_ch[pvt->inject.channel][0])
8f331907
MCC
1022 return 0;
1023
194a40fe
MCC
1024 rc = strict_strtoul(data, 10, &enable);
1025 if ((rc < 0))
1026 return 0;
1027
1028 if (enable) {
1029 pvt->inject.enable = 1;
1030 } else {
1031 disable_inject(mci);
1032 return count;
1033 }
1034
1035 /* Sets pvt->inject.dimm mask */
1036 if (pvt->inject.dimm < 0)
486dd09f 1037 mask |= 1LL << 41;
194a40fe 1038 else {
f4742949 1039 if (pvt->channel[pvt->inject.channel].dimms > 2)
486dd09f 1040 mask |= (pvt->inject.dimm & 0x3LL) << 35;
194a40fe 1041 else
486dd09f 1042 mask |= (pvt->inject.dimm & 0x1LL) << 36;
194a40fe
MCC
1043 }
1044
1045 /* Sets pvt->inject.rank mask */
1046 if (pvt->inject.rank < 0)
486dd09f 1047 mask |= 1LL << 40;
194a40fe 1048 else {
f4742949 1049 if (pvt->channel[pvt->inject.channel].dimms > 2)
486dd09f 1050 mask |= (pvt->inject.rank & 0x1LL) << 34;
194a40fe 1051 else
486dd09f 1052 mask |= (pvt->inject.rank & 0x3LL) << 34;
194a40fe
MCC
1053 }
1054
1055 /* Sets pvt->inject.bank mask */
1056 if (pvt->inject.bank < 0)
486dd09f 1057 mask |= 1LL << 39;
194a40fe 1058 else
486dd09f 1059 mask |= (pvt->inject.bank & 0x15LL) << 30;
194a40fe
MCC
1060
1061 /* Sets pvt->inject.page mask */
1062 if (pvt->inject.page < 0)
486dd09f 1063 mask |= 1LL << 38;
194a40fe 1064 else
486dd09f 1065 mask |= (pvt->inject.page & 0xffff) << 14;
194a40fe
MCC
1066
1067 /* Sets pvt->inject.column mask */
1068 if (pvt->inject.col < 0)
486dd09f 1069 mask |= 1LL << 37;
194a40fe 1070 else
486dd09f 1071 mask |= (pvt->inject.col & 0x3fff);
194a40fe 1072
276b824c
MCC
1073 /*
1074 * bit 0: REPEAT_EN
1075 * bits 1-2: MASK_HALF_CACHELINE
1076 * bit 3: INJECT_ECC
1077 * bit 4: INJECT_ADDR_PARITY
1078 */
1079
1080 injectmask = (pvt->inject.type & 1) |
1081 (pvt->inject.section & 0x3) << 1 |
1082 (pvt->inject.type & 0x6) << (3 - 1);
1083
1084 /* Unlock writes to registers - this register is write only */
f4742949 1085 pci_write_config_dword(pvt->pci_noncore,
67166af4 1086 MC_CFG_CONTROL, 0x2);
e9bd2e73 1087
f4742949 1088 write_and_test(pvt->pci_ch[pvt->inject.channel][0],
194a40fe 1089 MC_CHANNEL_ADDR_MATCH, mask);
f4742949 1090 write_and_test(pvt->pci_ch[pvt->inject.channel][0],
7b029d03 1091 MC_CHANNEL_ADDR_MATCH + 4, mask >> 32L);
7b029d03 1092
f4742949 1093 write_and_test(pvt->pci_ch[pvt->inject.channel][0],
194a40fe
MCC
1094 MC_CHANNEL_ERROR_MASK, pvt->inject.eccmask);
1095
f4742949 1096 write_and_test(pvt->pci_ch[pvt->inject.channel][0],
4157d9f5 1097 MC_CHANNEL_ERROR_INJECT, injectmask);
276b824c 1098
194a40fe 1099 /*
276b824c
MCC
1100 * This is something undocumented, based on my tests
1101 * Without writing 8 to this register, errors aren't injected. Not sure
1102 * why.
194a40fe 1103 */
f4742949 1104 pci_write_config_dword(pvt->pci_noncore,
276b824c 1105 MC_CFG_CONTROL, 8);
194a40fe 1106
41fcb7fe
MCC
1107 debugf0("Error inject addr match 0x%016llx, ecc 0x%08x,"
1108 " inject 0x%08x\n",
194a40fe
MCC
1109 mask, pvt->inject.eccmask, injectmask);
1110
7b029d03 1111
194a40fe
MCC
1112 return count;
1113}
1114
1115static ssize_t i7core_inject_enable_show(struct mem_ctl_info *mci,
1116 char *data)
1117{
1118 struct i7core_pvt *pvt = mci->pvt_info;
7b029d03
MCC
1119 u32 injectmask;
1120
52a2e4fc
MCC
1121 if (!pvt->pci_ch[pvt->inject.channel][0])
1122 return 0;
1123
f4742949 1124 pci_read_config_dword(pvt->pci_ch[pvt->inject.channel][0],
4157d9f5 1125 MC_CHANNEL_ERROR_INJECT, &injectmask);
7b029d03
MCC
1126
1127 debugf0("Inject error read: 0x%018x\n", injectmask);
1128
1129 if (injectmask & 0x0c)
1130 pvt->inject.enable = 1;
1131
194a40fe
MCC
1132 return sprintf(data, "%d\n", pvt->inject.enable);
1133}
1134
f338d736
MCC
1135#define DECLARE_COUNTER(param) \
1136static ssize_t i7core_show_counter_##param( \
1137 struct mem_ctl_info *mci, \
1138 char *data) \
1139{ \
1140 struct i7core_pvt *pvt = mci->pvt_info; \
1141 \
1142 debugf1("%s() \n", __func__); \
1143 if (!pvt->ce_count_available || (pvt->is_registered)) \
1144 return sprintf(data, "data unavailable\n"); \
1145 return sprintf(data, "%lu\n", \
1146 pvt->udimm_ce_count[param]); \
1147}
442305b1 1148
f338d736
MCC
1149#define ATTR_COUNTER(param) \
1150 { \
1151 .attr = { \
1152 .name = __stringify(udimm##param), \
1153 .mode = (S_IRUGO | S_IWUSR) \
1154 }, \
1155 .show = i7core_show_counter_##param \
d88b8507 1156 }
442305b1 1157
f338d736
MCC
1158DECLARE_COUNTER(0);
1159DECLARE_COUNTER(1);
1160DECLARE_COUNTER(2);
442305b1 1161
194a40fe
MCC
1162/*
1163 * Sysfs struct
1164 */
a5538e53 1165
1288c18f 1166static const struct mcidev_sysfs_attribute i7core_addrmatch_attrs[] = {
a5538e53
MCC
1167 ATTR_ADDR_MATCH(channel),
1168 ATTR_ADDR_MATCH(dimm),
1169 ATTR_ADDR_MATCH(rank),
1170 ATTR_ADDR_MATCH(bank),
1171 ATTR_ADDR_MATCH(page),
1172 ATTR_ADDR_MATCH(col),
1288c18f 1173 { } /* End of list */
a5538e53
MCC
1174};
1175
1288c18f 1176static const struct mcidev_sysfs_group i7core_inject_addrmatch = {
a5538e53
MCC
1177 .name = "inject_addrmatch",
1178 .mcidev_attr = i7core_addrmatch_attrs,
1179};
1180
1288c18f 1181static const struct mcidev_sysfs_attribute i7core_udimm_counters_attrs[] = {
f338d736
MCC
1182 ATTR_COUNTER(0),
1183 ATTR_COUNTER(1),
1184 ATTR_COUNTER(2),
64aab720 1185 { .attr = { .name = NULL } }
f338d736
MCC
1186};
1187
1288c18f 1188static const struct mcidev_sysfs_group i7core_udimm_counters = {
f338d736
MCC
1189 .name = "all_channel_counts",
1190 .mcidev_attr = i7core_udimm_counters_attrs,
1191};
1192
1288c18f 1193static const struct mcidev_sysfs_attribute i7core_sysfs_rdimm_attrs[] = {
194a40fe
MCC
1194 {
1195 .attr = {
1196 .name = "inject_section",
1197 .mode = (S_IRUGO | S_IWUSR)
1198 },
1199 .show = i7core_inject_section_show,
1200 .store = i7core_inject_section_store,
1201 }, {
1202 .attr = {
1203 .name = "inject_type",
1204 .mode = (S_IRUGO | S_IWUSR)
1205 },
1206 .show = i7core_inject_type_show,
1207 .store = i7core_inject_type_store,
1208 }, {
1209 .attr = {
1210 .name = "inject_eccmask",
1211 .mode = (S_IRUGO | S_IWUSR)
1212 },
1213 .show = i7core_inject_eccmask_show,
1214 .store = i7core_inject_eccmask_store,
1215 }, {
a5538e53 1216 .grp = &i7core_inject_addrmatch,
194a40fe
MCC
1217 }, {
1218 .attr = {
1219 .name = "inject_enable",
1220 .mode = (S_IRUGO | S_IWUSR)
1221 },
1222 .show = i7core_inject_enable_show,
1223 .store = i7core_inject_enable_store,
1224 },
1288c18f
MCC
1225 { } /* End of list */
1226};
1227
1228static const struct mcidev_sysfs_attribute i7core_sysfs_udimm_attrs[] = {
1229 {
1230 .attr = {
1231 .name = "inject_section",
1232 .mode = (S_IRUGO | S_IWUSR)
1233 },
1234 .show = i7core_inject_section_show,
1235 .store = i7core_inject_section_store,
1236 }, {
1237 .attr = {
1238 .name = "inject_type",
1239 .mode = (S_IRUGO | S_IWUSR)
1240 },
1241 .show = i7core_inject_type_show,
1242 .store = i7core_inject_type_store,
1243 }, {
1244 .attr = {
1245 .name = "inject_eccmask",
1246 .mode = (S_IRUGO | S_IWUSR)
1247 },
1248 .show = i7core_inject_eccmask_show,
1249 .store = i7core_inject_eccmask_store,
1250 }, {
1251 .grp = &i7core_inject_addrmatch,
1252 }, {
1253 .attr = {
1254 .name = "inject_enable",
1255 .mode = (S_IRUGO | S_IWUSR)
1256 },
1257 .show = i7core_inject_enable_show,
1258 .store = i7core_inject_enable_store,
1259 }, {
1260 .grp = &i7core_udimm_counters,
1261 },
1262 { } /* End of list */
194a40fe
MCC
1263};
1264
a0c36a1f
MCC
1265/****************************************************************************
1266 Device initialization routines: put/get, init/exit
1267 ****************************************************************************/
1268
1269/*
64c10f6e 1270 * i7core_put_all_devices 'put' all the devices that we have
a0c36a1f
MCC
1271 * reserved via 'get'
1272 */
13d6e9b6 1273static void i7core_put_devices(struct i7core_dev *i7core_dev)
a0c36a1f 1274{
13d6e9b6 1275 int i;
a0c36a1f 1276
22e6bcbd 1277 debugf0(__FILE__ ": %s()\n", __func__);
de06eeef 1278 for (i = 0; i < i7core_dev->n_devs; i++) {
22e6bcbd
MCC
1279 struct pci_dev *pdev = i7core_dev->pdev[i];
1280 if (!pdev)
1281 continue;
1282 debugf0("Removing dev %02x:%02x.%d\n",
1283 pdev->bus->number,
1284 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1285 pci_dev_put(pdev);
1286 }
13d6e9b6 1287}
66607706 1288
13d6e9b6
MCC
1289static void i7core_put_all_devices(void)
1290{
42538680 1291 struct i7core_dev *i7core_dev, *tmp;
13d6e9b6 1292
39300e71 1293 list_for_each_entry_safe(i7core_dev, tmp, &i7core_edac_list, list) {
13d6e9b6 1294 i7core_put_devices(i7core_dev);
2aa9be44 1295 free_i7core_dev(i7core_dev);
39300e71 1296 }
a0c36a1f
MCC
1297}
1298
1288c18f 1299static void __init i7core_xeon_pci_fixup(const struct pci_id_table *table)
bc2d7245
KM
1300{
1301 struct pci_dev *pdev = NULL;
1302 int i;
54a08ab1 1303
bc2d7245 1304 /*
e7bf068a 1305 * On Xeon 55xx, the Intel Quick Path Arch Generic Non-core pci buses
bc2d7245
KM
1306 * aren't announced by acpi. So, we need to use a legacy scan probing
1307 * to detect them
1308 */
bd9e19ca
VM
1309 while (table && table->descr) {
1310 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, table->descr[0].dev_id, NULL);
1311 if (unlikely(!pdev)) {
1312 for (i = 0; i < MAX_SOCKET_BUSES; i++)
1313 pcibios_scan_specific_bus(255-i);
1314 }
bda14289 1315 pci_dev_put(pdev);
bd9e19ca 1316 table++;
bc2d7245
KM
1317 }
1318}
1319
bda14289
MCC
1320static unsigned i7core_pci_lastbus(void)
1321{
1322 int last_bus = 0, bus;
1323 struct pci_bus *b = NULL;
1324
1325 while ((b = pci_find_next_bus(b)) != NULL) {
1326 bus = b->number;
1327 debugf0("Found bus %d\n", bus);
1328 if (bus > last_bus)
1329 last_bus = bus;
1330 }
1331
1332 debugf0("Last bus %d\n", last_bus);
1333
1334 return last_bus;
1335}
1336
a0c36a1f 1337/*
64c10f6e 1338 * i7core_get_all_devices Find and perform 'get' operation on the MCH's
a0c36a1f
MCC
1339 * device/functions we want to reference for this driver
1340 *
1341 * Need to 'get' device 16 func 1 and func 2
1342 */
b197cba0
HS
1343static int i7core_get_onedevice(struct pci_dev **prev,
1344 const struct pci_id_table *table,
1345 const unsigned devno,
1346 const unsigned last_bus)
a0c36a1f 1347{
66607706 1348 struct i7core_dev *i7core_dev;
b197cba0 1349 const struct pci_id_descr *dev_descr = &table->descr[devno];
66607706 1350
8f331907 1351 struct pci_dev *pdev = NULL;
67166af4
MCC
1352 u8 bus = 0;
1353 u8 socket = 0;
a0c36a1f 1354
c77720b9 1355 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
de06eeef 1356 dev_descr->dev_id, *prev);
c77720b9 1357
224e871f
MCC
1358 /*
1359 * On Xeon 55xx, the Intel Quckpath Arch Generic Non-core regs
1360 * is at addr 8086:2c40, instead of 8086:2c41. So, we need
1361 * to probe for the alternate address in case of failure
1362 */
1363 if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_I7_NONCORE && !pdev)
1364 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
1365 PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT, *prev);
1366
1367 if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE && !pdev)
1368 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
1369 PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT,
1370 *prev);
1371
c77720b9
MCC
1372 if (!pdev) {
1373 if (*prev) {
1374 *prev = pdev;
1375 return 0;
d1fd4fb6
MCC
1376 }
1377
de06eeef 1378 if (dev_descr->optional)
c77720b9 1379 return 0;
310cbb72 1380
bd9e19ca
VM
1381 if (devno == 0)
1382 return -ENODEV;
1383
ab089374 1384 i7core_printk(KERN_INFO,
c77720b9 1385 "Device not found: dev %02x.%d PCI ID %04x:%04x\n",
de06eeef
MCC
1386 dev_descr->dev, dev_descr->func,
1387 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
67166af4 1388
c77720b9
MCC
1389 /* End of list, leave */
1390 return -ENODEV;
1391 }
1392 bus = pdev->bus->number;
67166af4 1393
bda14289 1394 socket = last_bus - bus;
c77720b9 1395
66607706
MCC
1396 i7core_dev = get_i7core_dev(socket);
1397 if (!i7core_dev) {
848b2f7e 1398 i7core_dev = alloc_i7core_dev(socket, table);
2896637b
HS
1399 if (!i7core_dev) {
1400 pci_dev_put(pdev);
66607706 1401 return -ENOMEM;
2896637b 1402 }
c77720b9 1403 }
67166af4 1404
66607706 1405 if (i7core_dev->pdev[devno]) {
c77720b9
MCC
1406 i7core_printk(KERN_ERR,
1407 "Duplicated device for "
1408 "dev %02x:%02x.%d PCI ID %04x:%04x\n",
de06eeef
MCC
1409 bus, dev_descr->dev, dev_descr->func,
1410 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
c77720b9
MCC
1411 pci_dev_put(pdev);
1412 return -ENODEV;
1413 }
67166af4 1414
66607706 1415 i7core_dev->pdev[devno] = pdev;
c77720b9
MCC
1416
1417 /* Sanity check */
de06eeef
MCC
1418 if (unlikely(PCI_SLOT(pdev->devfn) != dev_descr->dev ||
1419 PCI_FUNC(pdev->devfn) != dev_descr->func)) {
c77720b9
MCC
1420 i7core_printk(KERN_ERR,
1421 "Device PCI ID %04x:%04x "
1422 "has dev %02x:%02x.%d instead of dev %02x:%02x.%d\n",
de06eeef 1423 PCI_VENDOR_ID_INTEL, dev_descr->dev_id,
c77720b9 1424 bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
de06eeef 1425 bus, dev_descr->dev, dev_descr->func);
c77720b9
MCC
1426 return -ENODEV;
1427 }
ef708b53 1428
c77720b9
MCC
1429 /* Be sure that the device is enabled */
1430 if (unlikely(pci_enable_device(pdev) < 0)) {
1431 i7core_printk(KERN_ERR,
1432 "Couldn't enable "
1433 "dev %02x:%02x.%d PCI ID %04x:%04x\n",
de06eeef
MCC
1434 bus, dev_descr->dev, dev_descr->func,
1435 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
c77720b9
MCC
1436 return -ENODEV;
1437 }
ef708b53 1438
d4c27795 1439 debugf0("Detected socket %d dev %02x:%02x.%d PCI ID %04x:%04x\n",
de06eeef
MCC
1440 socket, bus, dev_descr->dev,
1441 dev_descr->func,
1442 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
8f331907 1443
a3e15416
MCC
1444 /*
1445 * As stated on drivers/pci/search.c, the reference count for
1446 * @from is always decremented if it is not %NULL. So, as we need
1447 * to get all devices up to null, we need to do a get for the device
1448 */
1449 pci_dev_get(pdev);
1450
c77720b9 1451 *prev = pdev;
ef708b53 1452
c77720b9
MCC
1453 return 0;
1454}
a0c36a1f 1455
64c10f6e 1456static int i7core_get_all_devices(void)
c77720b9 1457{
3c52cc57 1458 int i, rc, last_bus;
c77720b9 1459 struct pci_dev *pdev = NULL;
3c52cc57 1460 const struct pci_id_table *table = pci_dev_table;
bd9e19ca 1461
bda14289
MCC
1462 last_bus = i7core_pci_lastbus();
1463
3c52cc57 1464 while (table && table->descr) {
bd9e19ca
VM
1465 for (i = 0; i < table->n_devs; i++) {
1466 pdev = NULL;
1467 do {
b197cba0 1468 rc = i7core_get_onedevice(&pdev, table, i,
bda14289 1469 last_bus);
bd9e19ca
VM
1470 if (rc < 0) {
1471 if (i == 0) {
1472 i = table->n_devs;
1473 break;
1474 }
1475 i7core_put_all_devices();
1476 return -ENODEV;
1477 }
1478 } while (pdev);
1479 }
3c52cc57 1480 table++;
c77720b9 1481 }
66607706 1482
ef708b53 1483 return 0;
ef708b53
MCC
1484}
1485
f4742949
MCC
1486static int mci_bind_devs(struct mem_ctl_info *mci,
1487 struct i7core_dev *i7core_dev)
ef708b53
MCC
1488{
1489 struct i7core_pvt *pvt = mci->pvt_info;
1490 struct pci_dev *pdev;
f4742949 1491 int i, func, slot;
27100db0 1492 char *family;
ef708b53 1493
27100db0
MCC
1494 pvt->is_registered = false;
1495 pvt->enable_scrub = false;
de06eeef 1496 for (i = 0; i < i7core_dev->n_devs; i++) {
f4742949
MCC
1497 pdev = i7core_dev->pdev[i];
1498 if (!pdev)
66607706
MCC
1499 continue;
1500
f4742949
MCC
1501 func = PCI_FUNC(pdev->devfn);
1502 slot = PCI_SLOT(pdev->devfn);
1503 if (slot == 3) {
1504 if (unlikely(func > MAX_MCR_FUNC))
1505 goto error;
1506 pvt->pci_mcr[func] = pdev;
1507 } else if (likely(slot >= 4 && slot < 4 + NUM_CHANS)) {
1508 if (unlikely(func > MAX_CHAN_FUNC))
ef708b53 1509 goto error;
f4742949 1510 pvt->pci_ch[slot - 4][func] = pdev;
27100db0 1511 } else if (!slot && !func) {
f4742949 1512 pvt->pci_noncore = pdev;
27100db0
MCC
1513
1514 /* Detect the processor family */
1515 switch (pdev->device) {
1516 case PCI_DEVICE_ID_INTEL_I7_NONCORE:
1517 family = "Xeon 35xx/ i7core";
1518 pvt->enable_scrub = false;
1519 break;
1520 case PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT:
1521 family = "i7-800/i5-700";
1522 pvt->enable_scrub = false;
1523 break;
1524 case PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE:
1525 family = "Xeon 34xx";
1526 pvt->enable_scrub = false;
1527 break;
1528 case PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT:
1529 family = "Xeon 55xx";
1530 pvt->enable_scrub = true;
1531 break;
1532 case PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_REV2:
1533 family = "Xeon 56xx / i7-900";
1534 pvt->enable_scrub = true;
1535 break;
1536 default:
1537 family = "unknown";
1538 pvt->enable_scrub = false;
1539 }
1540 debugf0("Detected a processor type %s\n", family);
1541 } else
f4742949 1542 goto error;
ef708b53 1543
f4742949
MCC
1544 debugf0("Associated fn %d.%d, dev = %p, socket %d\n",
1545 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
1546 pdev, i7core_dev->socket);
14d2c083 1547
f4742949
MCC
1548 if (PCI_SLOT(pdev->devfn) == 3 &&
1549 PCI_FUNC(pdev->devfn) == 2)
27100db0 1550 pvt->is_registered = true;
a0c36a1f 1551 }
e9bd2e73 1552
a0c36a1f 1553 return 0;
ef708b53
MCC
1554
1555error:
1556 i7core_printk(KERN_ERR, "Device %d, function %d "
1557 "is out of the expected range\n",
1558 slot, func);
1559 return -EINVAL;
a0c36a1f
MCC
1560}
1561
442305b1
MCC
1562/****************************************************************************
1563 Error check routines
1564 ****************************************************************************/
f4742949 1565static void i7core_rdimm_update_csrow(struct mem_ctl_info *mci,
1288c18f
MCC
1566 const int chan,
1567 const int dimm,
1568 const int add)
b4e8f0b6
MCC
1569{
1570 char *msg;
1571 struct i7core_pvt *pvt = mci->pvt_info;
f4742949 1572 int row = pvt->csrow_map[chan][dimm], i;
b4e8f0b6
MCC
1573
1574 for (i = 0; i < add; i++) {
1575 msg = kasprintf(GFP_KERNEL, "Corrected error "
f4742949
MCC
1576 "(Socket=%d channel=%d dimm=%d)",
1577 pvt->i7core_dev->socket, chan, dimm);
b4e8f0b6
MCC
1578
1579 edac_mc_handle_fbd_ce(mci, row, 0, msg);
1580 kfree (msg);
1581 }
1582}
1583
1584static void i7core_rdimm_update_ce_count(struct mem_ctl_info *mci,
1288c18f
MCC
1585 const int chan,
1586 const int new0,
1587 const int new1,
1588 const int new2)
b4e8f0b6
MCC
1589{
1590 struct i7core_pvt *pvt = mci->pvt_info;
1591 int add0 = 0, add1 = 0, add2 = 0;
1592 /* Updates CE counters if it is not the first time here */
f4742949 1593 if (pvt->ce_count_available) {
b4e8f0b6
MCC
1594 /* Updates CE counters */
1595
f4742949
MCC
1596 add2 = new2 - pvt->rdimm_last_ce_count[chan][2];
1597 add1 = new1 - pvt->rdimm_last_ce_count[chan][1];
1598 add0 = new0 - pvt->rdimm_last_ce_count[chan][0];
b4e8f0b6
MCC
1599
1600 if (add2 < 0)
1601 add2 += 0x7fff;
f4742949 1602 pvt->rdimm_ce_count[chan][2] += add2;
b4e8f0b6
MCC
1603
1604 if (add1 < 0)
1605 add1 += 0x7fff;
f4742949 1606 pvt->rdimm_ce_count[chan][1] += add1;
b4e8f0b6
MCC
1607
1608 if (add0 < 0)
1609 add0 += 0x7fff;
f4742949 1610 pvt->rdimm_ce_count[chan][0] += add0;
b4e8f0b6 1611 } else
f4742949 1612 pvt->ce_count_available = 1;
b4e8f0b6
MCC
1613
1614 /* Store the new values */
f4742949
MCC
1615 pvt->rdimm_last_ce_count[chan][2] = new2;
1616 pvt->rdimm_last_ce_count[chan][1] = new1;
1617 pvt->rdimm_last_ce_count[chan][0] = new0;
b4e8f0b6
MCC
1618
1619 /*updated the edac core */
1620 if (add0 != 0)
f4742949 1621 i7core_rdimm_update_csrow(mci, chan, 0, add0);
b4e8f0b6 1622 if (add1 != 0)
f4742949 1623 i7core_rdimm_update_csrow(mci, chan, 1, add1);
b4e8f0b6 1624 if (add2 != 0)
f4742949 1625 i7core_rdimm_update_csrow(mci, chan, 2, add2);
b4e8f0b6
MCC
1626
1627}
1628
f4742949 1629static void i7core_rdimm_check_mc_ecc_err(struct mem_ctl_info *mci)
b4e8f0b6
MCC
1630{
1631 struct i7core_pvt *pvt = mci->pvt_info;
1632 u32 rcv[3][2];
1633 int i, new0, new1, new2;
1634
1635 /*Read DEV 3: FUN 2: MC_COR_ECC_CNT regs directly*/
f4742949 1636 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_0,
b4e8f0b6 1637 &rcv[0][0]);
f4742949 1638 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_1,
b4e8f0b6 1639 &rcv[0][1]);
f4742949 1640 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_2,
b4e8f0b6 1641 &rcv[1][0]);
f4742949 1642 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_3,
b4e8f0b6 1643 &rcv[1][1]);
f4742949 1644 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_4,
b4e8f0b6 1645 &rcv[2][0]);
f4742949 1646 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_5,
b4e8f0b6
MCC
1647 &rcv[2][1]);
1648 for (i = 0 ; i < 3; i++) {
1649 debugf3("MC_COR_ECC_CNT%d = 0x%x; MC_COR_ECC_CNT%d = 0x%x\n",
1650 (i * 2), rcv[i][0], (i * 2) + 1, rcv[i][1]);
1651 /*if the channel has 3 dimms*/
f4742949 1652 if (pvt->channel[i].dimms > 2) {
b4e8f0b6
MCC
1653 new0 = DIMM_BOT_COR_ERR(rcv[i][0]);
1654 new1 = DIMM_TOP_COR_ERR(rcv[i][0]);
1655 new2 = DIMM_BOT_COR_ERR(rcv[i][1]);
1656 } else {
1657 new0 = DIMM_TOP_COR_ERR(rcv[i][0]) +
1658 DIMM_BOT_COR_ERR(rcv[i][0]);
1659 new1 = DIMM_TOP_COR_ERR(rcv[i][1]) +
1660 DIMM_BOT_COR_ERR(rcv[i][1]);
1661 new2 = 0;
1662 }
1663
f4742949 1664 i7core_rdimm_update_ce_count(mci, i, new0, new1, new2);
b4e8f0b6
MCC
1665 }
1666}
442305b1
MCC
1667
1668/* This function is based on the device 3 function 4 registers as described on:
1669 * Intel Xeon Processor 5500 Series Datasheet Volume 2
1670 * http://www.intel.com/Assets/PDF/datasheet/321322.pdf
1671 * also available at:
1672 * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
1673 */
f4742949 1674static void i7core_udimm_check_mc_ecc_err(struct mem_ctl_info *mci)
442305b1
MCC
1675{
1676 struct i7core_pvt *pvt = mci->pvt_info;
1677 u32 rcv1, rcv0;
1678 int new0, new1, new2;
1679
f4742949 1680 if (!pvt->pci_mcr[4]) {
b990538a 1681 debugf0("%s MCR registers not found\n", __func__);
442305b1
MCC
1682 return;
1683 }
1684
b4e8f0b6 1685 /* Corrected test errors */
f4742949
MCC
1686 pci_read_config_dword(pvt->pci_mcr[4], MC_TEST_ERR_RCV1, &rcv1);
1687 pci_read_config_dword(pvt->pci_mcr[4], MC_TEST_ERR_RCV0, &rcv0);
442305b1
MCC
1688
1689 /* Store the new values */
1690 new2 = DIMM2_COR_ERR(rcv1);
1691 new1 = DIMM1_COR_ERR(rcv0);
1692 new0 = DIMM0_COR_ERR(rcv0);
1693
442305b1 1694 /* Updates CE counters if it is not the first time here */
f4742949 1695 if (pvt->ce_count_available) {
442305b1
MCC
1696 /* Updates CE counters */
1697 int add0, add1, add2;
1698
f4742949
MCC
1699 add2 = new2 - pvt->udimm_last_ce_count[2];
1700 add1 = new1 - pvt->udimm_last_ce_count[1];
1701 add0 = new0 - pvt->udimm_last_ce_count[0];
442305b1
MCC
1702
1703 if (add2 < 0)
1704 add2 += 0x7fff;
f4742949 1705 pvt->udimm_ce_count[2] += add2;
442305b1
MCC
1706
1707 if (add1 < 0)
1708 add1 += 0x7fff;
f4742949 1709 pvt->udimm_ce_count[1] += add1;
442305b1
MCC
1710
1711 if (add0 < 0)
1712 add0 += 0x7fff;
f4742949 1713 pvt->udimm_ce_count[0] += add0;
b4e8f0b6
MCC
1714
1715 if (add0 | add1 | add2)
1716 i7core_printk(KERN_ERR, "New Corrected error(s): "
1717 "dimm0: +%d, dimm1: +%d, dimm2 +%d\n",
1718 add0, add1, add2);
442305b1 1719 } else
f4742949 1720 pvt->ce_count_available = 1;
442305b1
MCC
1721
1722 /* Store the new values */
f4742949
MCC
1723 pvt->udimm_last_ce_count[2] = new2;
1724 pvt->udimm_last_ce_count[1] = new1;
1725 pvt->udimm_last_ce_count[0] = new0;
442305b1
MCC
1726}
1727
8a2f118e
MCC
1728/*
1729 * According with tables E-11 and E-12 of chapter E.3.3 of Intel 64 and IA-32
1730 * Architectures Software Developer’s Manual Volume 3B.
f237fcf2
MCC
1731 * Nehalem are defined as family 0x06, model 0x1a
1732 *
1733 * The MCA registers used here are the following ones:
8a2f118e 1734 * struct mce field MCA Register
f237fcf2
MCC
1735 * m->status MSR_IA32_MC8_STATUS
1736 * m->addr MSR_IA32_MC8_ADDR
1737 * m->misc MSR_IA32_MC8_MISC
8a2f118e
MCC
1738 * In the case of Nehalem, the error information is masked at .status and .misc
1739 * fields
1740 */
d5381642 1741static void i7core_mce_output_error(struct mem_ctl_info *mci,
1288c18f 1742 const struct mce *m)
d5381642 1743{
b4e8f0b6 1744 struct i7core_pvt *pvt = mci->pvt_info;
a639539f 1745 char *type, *optype, *err, *msg;
8a2f118e 1746 unsigned long error = m->status & 0x1ff0000l;
a639539f 1747 u32 optypenum = (m->status >> 4) & 0x07;
8cf2d239 1748 u32 core_err_cnt = (m->status >> 38) & 0x7fff;
8a2f118e
MCC
1749 u32 dimm = (m->misc >> 16) & 0x3;
1750 u32 channel = (m->misc >> 18) & 0x3;
1751 u32 syndrome = m->misc >> 32;
1752 u32 errnum = find_first_bit(&error, 32);
b4e8f0b6 1753 int csrow;
8a2f118e 1754
c5d34528
MCC
1755 if (m->mcgstatus & 1)
1756 type = "FATAL";
1757 else
1758 type = "NON_FATAL";
1759
a639539f 1760 switch (optypenum) {
b990538a
MCC
1761 case 0:
1762 optype = "generic undef request";
1763 break;
1764 case 1:
1765 optype = "read error";
1766 break;
1767 case 2:
1768 optype = "write error";
1769 break;
1770 case 3:
1771 optype = "addr/cmd error";
1772 break;
1773 case 4:
1774 optype = "scrubbing error";
1775 break;
1776 default:
1777 optype = "reserved";
1778 break;
a639539f
MCC
1779 }
1780
8a2f118e
MCC
1781 switch (errnum) {
1782 case 16:
1783 err = "read ECC error";
1784 break;
1785 case 17:
1786 err = "RAS ECC error";
1787 break;
1788 case 18:
1789 err = "write parity error";
1790 break;
1791 case 19:
1792 err = "redundacy loss";
1793 break;
1794 case 20:
1795 err = "reserved";
1796 break;
1797 case 21:
1798 err = "memory range error";
1799 break;
1800 case 22:
1801 err = "RTID out of range";
1802 break;
1803 case 23:
1804 err = "address parity error";
1805 break;
1806 case 24:
1807 err = "byte enable parity error";
1808 break;
1809 default:
1810 err = "unknown";
d5381642 1811 }
d5381642 1812
f237fcf2 1813 /* FIXME: should convert addr into bank and rank information */
8a2f118e 1814 msg = kasprintf(GFP_ATOMIC,
f4742949 1815 "%s (addr = 0x%08llx, cpu=%d, Dimm=%d, Channel=%d, "
a639539f 1816 "syndrome=0x%08x, count=%d, Err=%08llx:%08llx (%s: %s))\n",
f4742949 1817 type, (long long) m->addr, m->cpu, dimm, channel,
a639539f
MCC
1818 syndrome, core_err_cnt, (long long)m->status,
1819 (long long)m->misc, optype, err);
8a2f118e
MCC
1820
1821 debugf0("%s", msg);
d5381642 1822
f4742949 1823 csrow = pvt->csrow_map[channel][dimm];
b4e8f0b6 1824
d5381642 1825 /* Call the helper to output message */
b4e8f0b6
MCC
1826 if (m->mcgstatus & 1)
1827 edac_mc_handle_fbd_ue(mci, csrow, 0,
1828 0 /* FIXME: should be channel here */, msg);
f4742949 1829 else if (!pvt->is_registered)
b4e8f0b6
MCC
1830 edac_mc_handle_fbd_ce(mci, csrow,
1831 0 /* FIXME: should be channel here */, msg);
8a2f118e
MCC
1832
1833 kfree(msg);
d5381642
MCC
1834}
1835
87d1d272
MCC
1836/*
1837 * i7core_check_error Retrieve and process errors reported by the
1838 * hardware. Called by the Core module.
1839 */
1840static void i7core_check_error(struct mem_ctl_info *mci)
1841{
d5381642
MCC
1842 struct i7core_pvt *pvt = mci->pvt_info;
1843 int i;
1844 unsigned count = 0;
ca9c90ba 1845 struct mce *m;
d5381642 1846
ca9c90ba
MCC
1847 /*
1848 * MCE first step: Copy all mce errors into a temporary buffer
1849 * We use a double buffering here, to reduce the risk of
25985edc 1850 * losing an error.
ca9c90ba
MCC
1851 */
1852 smp_rmb();
321ece4d
MCC
1853 count = (pvt->mce_out + MCE_LOG_LEN - pvt->mce_in)
1854 % MCE_LOG_LEN;
ca9c90ba 1855 if (!count)
8a311e17 1856 goto check_ce_error;
f4742949 1857
ca9c90ba 1858 m = pvt->mce_outentry;
321ece4d
MCC
1859 if (pvt->mce_in + count > MCE_LOG_LEN) {
1860 unsigned l = MCE_LOG_LEN - pvt->mce_in;
f4742949 1861
ca9c90ba
MCC
1862 memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * l);
1863 smp_wmb();
1864 pvt->mce_in = 0;
1865 count -= l;
1866 m += l;
1867 }
1868 memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * count);
1869 smp_wmb();
1870 pvt->mce_in += count;
1871
1872 smp_rmb();
1873 if (pvt->mce_overrun) {
1874 i7core_printk(KERN_ERR, "Lost %d memory errors\n",
1875 pvt->mce_overrun);
1876 smp_wmb();
1877 pvt->mce_overrun = 0;
1878 }
d5381642 1879
ca9c90ba
MCC
1880 /*
1881 * MCE second step: parse errors and display
1882 */
d5381642 1883 for (i = 0; i < count; i++)
ca9c90ba 1884 i7core_mce_output_error(mci, &pvt->mce_outentry[i]);
d5381642 1885
ca9c90ba
MCC
1886 /*
1887 * Now, let's increment CE error counts
1888 */
8a311e17 1889check_ce_error:
f4742949
MCC
1890 if (!pvt->is_registered)
1891 i7core_udimm_check_mc_ecc_err(mci);
1892 else
1893 i7core_rdimm_check_mc_ecc_err(mci);
87d1d272
MCC
1894}
1895
d5381642
MCC
1896/*
1897 * i7core_mce_check_error Replicates mcelog routine to get errors
1898 * This routine simply queues mcelog errors, and
1899 * return. The error itself should be handled later
1900 * by i7core_check_error.
6e103be1
MCC
1901 * WARNING: As this routine should be called at NMI time, extra care should
1902 * be taken to avoid deadlocks, and to be as fast as possible.
d5381642 1903 */
4140c542
BP
1904static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val,
1905 void *data)
d5381642 1906{
4140c542
BP
1907 struct mce *mce = (struct mce *)data;
1908 struct i7core_dev *i7_dev;
1909 struct mem_ctl_info *mci;
1910 struct i7core_pvt *pvt;
1911
1912 i7_dev = get_i7core_dev(mce->socketid);
1913 if (!i7_dev)
1914 return NOTIFY_BAD;
1915
1916 mci = i7_dev->mci;
1917 pvt = mci->pvt_info;
d5381642 1918
8a2f118e
MCC
1919 /*
1920 * Just let mcelog handle it if the error is
1921 * outside the memory controller
1922 */
1923 if (((mce->status & 0xffff) >> 7) != 1)
4140c542 1924 return NOTIFY_DONE;
8a2f118e 1925
f237fcf2
MCC
1926 /* Bank 8 registers are the only ones that we know how to handle */
1927 if (mce->bank != 8)
4140c542 1928 return NOTIFY_DONE;
f237fcf2 1929
3b918c12 1930#ifdef CONFIG_SMP
f4742949 1931 /* Only handle if it is the right mc controller */
5034086b 1932 if (mce->socketid != pvt->i7core_dev->socket)
4140c542 1933 return NOTIFY_DONE;
3b918c12 1934#endif
f4742949 1935
ca9c90ba 1936 smp_rmb();
321ece4d 1937 if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) {
ca9c90ba
MCC
1938 smp_wmb();
1939 pvt->mce_overrun++;
4140c542 1940 return NOTIFY_DONE;
d5381642 1941 }
6e103be1
MCC
1942
1943 /* Copy memory error at the ringbuffer */
1944 memcpy(&pvt->mce_entry[pvt->mce_out], mce, sizeof(*mce));
ca9c90ba 1945 smp_wmb();
321ece4d 1946 pvt->mce_out = (pvt->mce_out + 1) % MCE_LOG_LEN;
d5381642 1947
c5d34528
MCC
1948 /* Handle fatal errors immediately */
1949 if (mce->mcgstatus & 1)
1950 i7core_check_error(mci);
1951
e7bf068a 1952 /* Advise mcelog that the errors were handled */
4140c542 1953 return NOTIFY_STOP;
d5381642
MCC
1954}
1955
4140c542
BP
1956static struct notifier_block i7_mce_dec = {
1957 .notifier_call = i7core_mce_check_error,
1958};
1959
535e9c78
NC
1960struct memdev_dmi_entry {
1961 u8 type;
1962 u8 length;
1963 u16 handle;
1964 u16 phys_mem_array_handle;
1965 u16 mem_err_info_handle;
1966 u16 total_width;
1967 u16 data_width;
1968 u16 size;
1969 u8 form;
1970 u8 device_set;
1971 u8 device_locator;
1972 u8 bank_locator;
1973 u8 memory_type;
1974 u16 type_detail;
1975 u16 speed;
1976 u8 manufacturer;
1977 u8 serial_number;
1978 u8 asset_tag;
1979 u8 part_number;
1980 u8 attributes;
1981 u32 extended_size;
1982 u16 conf_mem_clk_speed;
1983} __attribute__((__packed__));
1984
1985
1986/*
1987 * Decode the DRAM Clock Frequency, be paranoid, make sure that all
1988 * memory devices show the same speed, and if they don't then consider
1989 * all speeds to be invalid.
1990 */
1991static void decode_dclk(const struct dmi_header *dh, void *_dclk_freq)
1992{
1993 int *dclk_freq = _dclk_freq;
1994 u16 dmi_mem_clk_speed;
1995
1996 if (*dclk_freq == -1)
1997 return;
1998
1999 if (dh->type == DMI_ENTRY_MEM_DEVICE) {
2000 struct memdev_dmi_entry *memdev_dmi_entry =
2001 (struct memdev_dmi_entry *)dh;
2002 unsigned long conf_mem_clk_speed_offset =
2003 (unsigned long)&memdev_dmi_entry->conf_mem_clk_speed -
2004 (unsigned long)&memdev_dmi_entry->type;
2005 unsigned long speed_offset =
2006 (unsigned long)&memdev_dmi_entry->speed -
2007 (unsigned long)&memdev_dmi_entry->type;
2008
2009 /* Check that a DIMM is present */
2010 if (memdev_dmi_entry->size == 0)
2011 return;
2012
2013 /*
2014 * Pick the configured speed if it's available, otherwise
2015 * pick the DIMM speed, or we don't have a speed.
2016 */
2017 if (memdev_dmi_entry->length > conf_mem_clk_speed_offset) {
2018 dmi_mem_clk_speed =
2019 memdev_dmi_entry->conf_mem_clk_speed;
2020 } else if (memdev_dmi_entry->length > speed_offset) {
2021 dmi_mem_clk_speed = memdev_dmi_entry->speed;
2022 } else {
2023 *dclk_freq = -1;
2024 return;
2025 }
2026
2027 if (*dclk_freq == 0) {
2028 /* First pass, speed was 0 */
2029 if (dmi_mem_clk_speed > 0) {
2030 /* Set speed if a valid speed is read */
2031 *dclk_freq = dmi_mem_clk_speed;
2032 } else {
2033 /* Otherwise we don't have a valid speed */
2034 *dclk_freq = -1;
2035 }
2036 } else if (*dclk_freq > 0 &&
2037 *dclk_freq != dmi_mem_clk_speed) {
2038 /*
2039 * If we have a speed, check that all DIMMS are the same
2040 * speed, otherwise set the speed as invalid.
2041 */
2042 *dclk_freq = -1;
2043 }
2044 }
2045}
2046
2047/*
2048 * The default DCLK frequency is used as a fallback if we
2049 * fail to find anything reliable in the DMI. The value
2050 * is taken straight from the datasheet.
2051 */
2052#define DEFAULT_DCLK_FREQ 800
2053
2054static int get_dclk_freq(void)
2055{
2056 int dclk_freq = 0;
2057
2058 dmi_walk(decode_dclk, (void *)&dclk_freq);
2059
2060 if (dclk_freq < 1)
2061 return DEFAULT_DCLK_FREQ;
2062
2063 return dclk_freq;
2064}
2065
e8b6a127
SG
2066/*
2067 * set_sdram_scrub_rate This routine sets byte/sec bandwidth scrub rate
2068 * to hardware according to SCRUBINTERVAL formula
2069 * found in datasheet.
2070 */
2071static int set_sdram_scrub_rate(struct mem_ctl_info *mci, u32 new_bw)
2072{
2073 struct i7core_pvt *pvt = mci->pvt_info;
2074 struct pci_dev *pdev;
e8b6a127
SG
2075 u32 dw_scrub;
2076 u32 dw_ssr;
2077
2078 /* Get data from the MC register, function 2 */
2079 pdev = pvt->pci_mcr[2];
2080 if (!pdev)
2081 return -ENODEV;
2082
2083 pci_read_config_dword(pdev, MC_SCRUB_CONTROL, &dw_scrub);
2084
2085 if (new_bw == 0) {
2086 /* Prepare to disable petrol scrub */
2087 dw_scrub &= ~STARTSCRUB;
2088 /* Stop the patrol scrub engine */
535e9c78
NC
2089 write_and_test(pdev, MC_SCRUB_CONTROL,
2090 dw_scrub & ~SCRUBINTERVAL_MASK);
e8b6a127
SG
2091
2092 /* Get current status of scrub rate and set bit to disable */
2093 pci_read_config_dword(pdev, MC_SSRCONTROL, &dw_ssr);
2094 dw_ssr &= ~SSR_MODE_MASK;
2095 dw_ssr |= SSR_MODE_DISABLE;
2096 } else {
535e9c78
NC
2097 const int cache_line_size = 64;
2098 const u32 freq_dclk_mhz = pvt->dclk_freq;
2099 unsigned long long scrub_interval;
e8b6a127
SG
2100 /*
2101 * Translate the desired scrub rate to a register value and
535e9c78 2102 * program the corresponding register value.
e8b6a127 2103 */
535e9c78
NC
2104 scrub_interval = (unsigned long long)freq_dclk_mhz *
2105 cache_line_size * 1000000 / new_bw;
2106
2107 if (!scrub_interval || scrub_interval > SCRUBINTERVAL_MASK)
2108 return -EINVAL;
2109
2110 dw_scrub = SCRUBINTERVAL_MASK & scrub_interval;
e8b6a127
SG
2111
2112 /* Start the patrol scrub engine */
2113 pci_write_config_dword(pdev, MC_SCRUB_CONTROL,
2114 STARTSCRUB | dw_scrub);
2115
2116 /* Get current status of scrub rate and set bit to enable */
2117 pci_read_config_dword(pdev, MC_SSRCONTROL, &dw_ssr);
2118 dw_ssr &= ~SSR_MODE_MASK;
2119 dw_ssr |= SSR_MODE_ENABLE;
2120 }
2121 /* Disable or enable scrubbing */
2122 pci_write_config_dword(pdev, MC_SSRCONTROL, dw_ssr);
2123
2124 return new_bw;
2125}
2126
2127/*
2128 * get_sdram_scrub_rate This routine convert current scrub rate value
2129 * into byte/sec bandwidth accourding to
2130 * SCRUBINTERVAL formula found in datasheet.
2131 */
2132static int get_sdram_scrub_rate(struct mem_ctl_info *mci)
2133{
2134 struct i7core_pvt *pvt = mci->pvt_info;
2135 struct pci_dev *pdev;
2136 const u32 cache_line_size = 64;
535e9c78
NC
2137 const u32 freq_dclk_mhz = pvt->dclk_freq;
2138 unsigned long long scrub_rate;
e8b6a127
SG
2139 u32 scrubval;
2140
2141 /* Get data from the MC register, function 2 */
2142 pdev = pvt->pci_mcr[2];
2143 if (!pdev)
2144 return -ENODEV;
2145
2146 /* Get current scrub control data */
2147 pci_read_config_dword(pdev, MC_SCRUB_CONTROL, &scrubval);
2148
2149 /* Mask highest 8-bits to 0 */
535e9c78 2150 scrubval &= SCRUBINTERVAL_MASK;
e8b6a127
SG
2151 if (!scrubval)
2152 return 0;
2153
2154 /* Calculate scrub rate value into byte/sec bandwidth */
535e9c78
NC
2155 scrub_rate = (unsigned long long)freq_dclk_mhz *
2156 1000000 * cache_line_size / scrubval;
2157 return (int)scrub_rate;
e8b6a127
SG
2158}
2159
2160static void enable_sdram_scrub_setting(struct mem_ctl_info *mci)
2161{
2162 struct i7core_pvt *pvt = mci->pvt_info;
2163 u32 pci_lock;
2164
2165 /* Unlock writes to pci registers */
2166 pci_read_config_dword(pvt->pci_noncore, MC_CFG_CONTROL, &pci_lock);
2167 pci_lock &= ~0x3;
2168 pci_write_config_dword(pvt->pci_noncore, MC_CFG_CONTROL,
2169 pci_lock | MC_CFG_UNLOCK);
2170
2171 mci->set_sdram_scrub_rate = set_sdram_scrub_rate;
2172 mci->get_sdram_scrub_rate = get_sdram_scrub_rate;
2173}
2174
2175static void disable_sdram_scrub_setting(struct mem_ctl_info *mci)
2176{
2177 struct i7core_pvt *pvt = mci->pvt_info;
2178 u32 pci_lock;
2179
2180 /* Lock writes to pci registers */
2181 pci_read_config_dword(pvt->pci_noncore, MC_CFG_CONTROL, &pci_lock);
2182 pci_lock &= ~0x3;
2183 pci_write_config_dword(pvt->pci_noncore, MC_CFG_CONTROL,
2184 pci_lock | MC_CFG_LOCK);
2185}
2186
a3aa0a4a
HS
2187static void i7core_pci_ctl_create(struct i7core_pvt *pvt)
2188{
2189 pvt->i7core_pci = edac_pci_create_generic_ctl(
2190 &pvt->i7core_dev->pdev[0]->dev,
2191 EDAC_MOD_STR);
2192 if (unlikely(!pvt->i7core_pci))
f9902f24
MCC
2193 i7core_printk(KERN_WARNING,
2194 "Unable to setup PCI error report via EDAC\n");
a3aa0a4a
HS
2195}
2196
2197static void i7core_pci_ctl_release(struct i7core_pvt *pvt)
2198{
2199 if (likely(pvt->i7core_pci))
2200 edac_pci_release_generic_ctl(pvt->i7core_pci);
2201 else
2202 i7core_printk(KERN_ERR,
2203 "Couldn't find mem_ctl_info for socket %d\n",
2204 pvt->i7core_dev->socket);
2205 pvt->i7core_pci = NULL;
2206}
2207
1c6edbbe
HS
2208static void i7core_unregister_mci(struct i7core_dev *i7core_dev)
2209{
2210 struct mem_ctl_info *mci = i7core_dev->mci;
2211 struct i7core_pvt *pvt;
2212
2213 if (unlikely(!mci || !mci->pvt_info)) {
2214 debugf0("MC: " __FILE__ ": %s(): dev = %p\n",
2215 __func__, &i7core_dev->pdev[0]->dev);
2216
2217 i7core_printk(KERN_ERR, "Couldn't find mci handler\n");
2218 return;
2219 }
2220
2221 pvt = mci->pvt_info;
2222
2223 debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n",
2224 __func__, mci, &i7core_dev->pdev[0]->dev);
2225
e8b6a127 2226 /* Disable scrubrate setting */
27100db0
MCC
2227 if (pvt->enable_scrub)
2228 disable_sdram_scrub_setting(mci);
e8b6a127 2229
4140c542 2230 atomic_notifier_chain_unregister(&x86_mce_decoder_chain, &i7_mce_dec);
1c6edbbe
HS
2231
2232 /* Disable EDAC polling */
2233 i7core_pci_ctl_release(pvt);
2234
2235 /* Remove MC sysfs nodes */
2236 edac_mc_del_mc(mci->dev);
2237
2238 debugf1("%s: free mci struct\n", mci->ctl_name);
2239 kfree(mci->ctl_name);
2240 edac_mc_free(mci);
2241 i7core_dev->mci = NULL;
2242}
2243
aace4283 2244static int i7core_register_mci(struct i7core_dev *i7core_dev)
a0c36a1f
MCC
2245{
2246 struct mem_ctl_info *mci;
2247 struct i7core_pvt *pvt;
aace4283
HS
2248 int rc, channels, csrows;
2249
2250 /* Check the number of active and not disabled channels */
2251 rc = i7core_get_active_channels(i7core_dev->socket, &channels, &csrows);
2252 if (unlikely(rc < 0))
2253 return rc;
a0c36a1f 2254
a0c36a1f 2255 /* allocate a new MC control structure */
aace4283 2256 mci = edac_mc_alloc(sizeof(*pvt), csrows, channels, i7core_dev->socket);
f4742949
MCC
2257 if (unlikely(!mci))
2258 return -ENOMEM;
a0c36a1f 2259
3cfd0146
MCC
2260 debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n",
2261 __func__, mci, &i7core_dev->pdev[0]->dev);
a0c36a1f 2262
a0c36a1f 2263 pvt = mci->pvt_info;
ef708b53 2264 memset(pvt, 0, sizeof(*pvt));
67166af4 2265
6d37d240
MCC
2266 /* Associates i7core_dev and mci for future usage */
2267 pvt->i7core_dev = i7core_dev;
2268 i7core_dev->mci = mci;
2269
41fcb7fe
MCC
2270 /*
2271 * FIXME: how to handle RDDR3 at MCI level? It is possible to have
2272 * Mixed RDDR3/UDDR3 with Nehalem, provided that they are on different
2273 * memory channels
2274 */
2275 mci->mtype_cap = MEM_FLAG_DDR3;
a0c36a1f
MCC
2276 mci->edac_ctl_cap = EDAC_FLAG_NONE;
2277 mci->edac_cap = EDAC_FLAG_NONE;
2278 mci->mod_name = "i7core_edac.c";
2279 mci->mod_ver = I7CORE_REVISION;
f4742949
MCC
2280 mci->ctl_name = kasprintf(GFP_KERNEL, "i7 core #%d",
2281 i7core_dev->socket);
2282 mci->dev_name = pci_name(i7core_dev->pdev[0]);
a0c36a1f 2283 mci->ctl_page_to_phys = NULL;
1288c18f 2284
ef708b53 2285 /* Store pci devices at mci for faster access */
f4742949 2286 rc = mci_bind_devs(mci, i7core_dev);
41fcb7fe 2287 if (unlikely(rc < 0))
628c5ddf 2288 goto fail0;
ef708b53 2289
5939813b
HS
2290 if (pvt->is_registered)
2291 mci->mc_driver_sysfs_attributes = i7core_sysfs_rdimm_attrs;
2292 else
2293 mci->mc_driver_sysfs_attributes = i7core_sysfs_udimm_attrs;
2294
ef708b53 2295 /* Get dimm basic config */
2e5185f7 2296 get_dimm_config(mci);
5939813b
HS
2297 /* record ptr to the generic device */
2298 mci->dev = &i7core_dev->pdev[0]->dev;
2299 /* Set the function pointer to an actual operation function */
2300 mci->edac_check = i7core_check_error;
ef708b53 2301
e8b6a127 2302 /* Enable scrubrate setting */
27100db0
MCC
2303 if (pvt->enable_scrub)
2304 enable_sdram_scrub_setting(mci);
e8b6a127 2305
a0c36a1f 2306 /* add this new MC control structure to EDAC's list of MCs */
b7c76151 2307 if (unlikely(edac_mc_add_mc(mci))) {
a0c36a1f
MCC
2308 debugf0("MC: " __FILE__
2309 ": %s(): failed edac_mc_add_mc()\n", __func__);
2310 /* FIXME: perhaps some code should go here that disables error
2311 * reporting if we just enabled it
2312 */
b7c76151
MCC
2313
2314 rc = -EINVAL;
628c5ddf 2315 goto fail0;
a0c36a1f
MCC
2316 }
2317
194a40fe 2318 /* Default error mask is any memory */
ef708b53 2319 pvt->inject.channel = 0;
194a40fe
MCC
2320 pvt->inject.dimm = -1;
2321 pvt->inject.rank = -1;
2322 pvt->inject.bank = -1;
2323 pvt->inject.page = -1;
2324 pvt->inject.col = -1;
2325
a3aa0a4a
HS
2326 /* allocating generic PCI control info */
2327 i7core_pci_ctl_create(pvt);
2328
535e9c78
NC
2329 /* DCLK for scrub rate setting */
2330 pvt->dclk_freq = get_dclk_freq();
2331
4140c542 2332 atomic_notifier_chain_register(&x86_mce_decoder_chain, &i7_mce_dec);
f4742949 2333
628c5ddf
HS
2334 return 0;
2335
628c5ddf
HS
2336fail0:
2337 kfree(mci->ctl_name);
2338 edac_mc_free(mci);
1c6edbbe 2339 i7core_dev->mci = NULL;
f4742949
MCC
2340 return rc;
2341}
2342
2343/*
2344 * i7core_probe Probe for ONE instance of device to see if it is
2345 * present.
2346 * return:
2347 * 0 for FOUND a device
2348 * < 0 for error code
2349 */
2d95d815 2350
f4742949
MCC
2351static int __devinit i7core_probe(struct pci_dev *pdev,
2352 const struct pci_device_id *id)
2353{
40557591 2354 int rc, count = 0;
f4742949
MCC
2355 struct i7core_dev *i7core_dev;
2356
2d95d815
MCC
2357 /* get the pci devices we want to reserve for our use */
2358 mutex_lock(&i7core_edac_lock);
2359
f4742949 2360 /*
d4c27795 2361 * All memory controllers are allocated at the first pass.
f4742949 2362 */
2d95d815
MCC
2363 if (unlikely(probed >= 1)) {
2364 mutex_unlock(&i7core_edac_lock);
76a7bd81 2365 return -ENODEV;
2d95d815
MCC
2366 }
2367 probed++;
de06eeef 2368
64c10f6e 2369 rc = i7core_get_all_devices();
f4742949
MCC
2370 if (unlikely(rc < 0))
2371 goto fail0;
2372
2373 list_for_each_entry(i7core_dev, &i7core_edac_list, list) {
40557591 2374 count++;
aace4283 2375 rc = i7core_register_mci(i7core_dev);
d4c27795
MCC
2376 if (unlikely(rc < 0))
2377 goto fail1;
d5381642
MCC
2378 }
2379
40557591
MCC
2380 /*
2381 * Nehalem-EX uses a different memory controller. However, as the
2382 * memory controller is not visible on some Nehalem/Nehalem-EP, we
2383 * need to indirectly probe via a X58 PCI device. The same devices
2384 * are found on (some) Nehalem-EX. So, on those machines, the
2385 * probe routine needs to return -ENODEV, as the actual Memory
2386 * Controller registers won't be detected.
2387 */
2388 if (!count) {
2389 rc = -ENODEV;
2390 goto fail1;
2391 }
2392
2393 i7core_printk(KERN_INFO,
2394 "Driver loaded, %d memory controller(s) found.\n",
2395 count);
8f331907 2396
66607706 2397 mutex_unlock(&i7core_edac_lock);
a0c36a1f
MCC
2398 return 0;
2399
66607706 2400fail1:
88ef5ea9
MCC
2401 list_for_each_entry(i7core_dev, &i7core_edac_list, list)
2402 i7core_unregister_mci(i7core_dev);
2403
13d6e9b6 2404 i7core_put_all_devices();
66607706
MCC
2405fail0:
2406 mutex_unlock(&i7core_edac_lock);
b7c76151 2407 return rc;
a0c36a1f
MCC
2408}
2409
2410/*
2411 * i7core_remove destructor for one instance of device
2412 *
2413 */
2414static void __devexit i7core_remove(struct pci_dev *pdev)
2415{
64c10f6e 2416 struct i7core_dev *i7core_dev;
a0c36a1f
MCC
2417
2418 debugf0(__FILE__ ": %s()\n", __func__);
2419
22e6bcbd
MCC
2420 /*
2421 * we have a trouble here: pdev value for removal will be wrong, since
2422 * it will point to the X58 register used to detect that the machine
2423 * is a Nehalem or upper design. However, due to the way several PCI
2424 * devices are grouped together to provide MC functionality, we need
2425 * to use a different method for releasing the devices
2426 */
87d1d272 2427
66607706 2428 mutex_lock(&i7core_edac_lock);
71fe0170
HS
2429
2430 if (unlikely(!probed)) {
2431 mutex_unlock(&i7core_edac_lock);
2432 return;
2433 }
2434
88ef5ea9
MCC
2435 list_for_each_entry(i7core_dev, &i7core_edac_list, list)
2436 i7core_unregister_mci(i7core_dev);
64c10f6e
HS
2437
2438 /* Release PCI resources */
2439 i7core_put_all_devices();
2440
2d95d815
MCC
2441 probed--;
2442
66607706 2443 mutex_unlock(&i7core_edac_lock);
a0c36a1f
MCC
2444}
2445
a0c36a1f
MCC
2446MODULE_DEVICE_TABLE(pci, i7core_pci_tbl);
2447
2448/*
2449 * i7core_driver pci_driver structure for this module
2450 *
2451 */
2452static struct pci_driver i7core_driver = {
2453 .name = "i7core_edac",
2454 .probe = i7core_probe,
2455 .remove = __devexit_p(i7core_remove),
2456 .id_table = i7core_pci_tbl,
2457};
2458
2459/*
2460 * i7core_init Module entry function
2461 * Try to initialize this module for its devices
2462 */
2463static int __init i7core_init(void)
2464{
2465 int pci_rc;
2466
2467 debugf2("MC: " __FILE__ ": %s()\n", __func__);
2468
2469 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
2470 opstate_init();
2471
54a08ab1
MCC
2472 if (use_pci_fixup)
2473 i7core_xeon_pci_fixup(pci_dev_table);
bc2d7245 2474
a0c36a1f
MCC
2475 pci_rc = pci_register_driver(&i7core_driver);
2476
3ef288a9
MCC
2477 if (pci_rc >= 0)
2478 return 0;
2479
2480 i7core_printk(KERN_ERR, "Failed to register device with error %d.\n",
2481 pci_rc);
2482
2483 return pci_rc;
a0c36a1f
MCC
2484}
2485
2486/*
2487 * i7core_exit() Module exit function
2488 * Unregister the driver
2489 */
2490static void __exit i7core_exit(void)
2491{
2492 debugf2("MC: " __FILE__ ": %s()\n", __func__);
2493 pci_unregister_driver(&i7core_driver);
2494}
2495
2496module_init(i7core_init);
2497module_exit(i7core_exit);
2498
2499MODULE_LICENSE("GPL");
2500MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
2501MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
2502MODULE_DESCRIPTION("MC Driver for Intel i7 Core memory controllers - "
2503 I7CORE_REVISION);
2504
2505module_param(edac_op_state, int, 0444);
2506MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");