i7core_edac: Fix compilation on 32 bits arch
[GitHub/LineageOS/android_kernel_samsung_universal7580.git] / drivers / edac / i7core_edac.c
CommitLineData
52707f91
MCC
1/* Intel i7 core/Nehalem Memory Controller kernel module
2 *
e7bf068a 3 * This driver supports the memory controllers found on the Intel
52707f91
MCC
4 * processor families i7core, i7core 7xx/8xx, i5core, Xeon 35xx,
5 * Xeon 55xx and Xeon 56xx also known as Nehalem, Nehalem-EP, Lynnfield
6 * and Westmere-EP.
a0c36a1f
MCC
7 *
8 * This file may be distributed under the terms of the
9 * GNU General Public License version 2 only.
10 *
52707f91 11 * Copyright (c) 2009-2010 by:
a0c36a1f
MCC
12 * Mauro Carvalho Chehab <mchehab@redhat.com>
13 *
14 * Red Hat Inc. http://www.redhat.com
15 *
16 * Forked and adapted from the i5400_edac driver
17 *
18 * Based on the following public Intel datasheets:
19 * Intel Core i7 Processor Extreme Edition and Intel Core i7 Processor
20 * Datasheet, Volume 2:
21 * http://download.intel.com/design/processor/datashts/320835.pdf
22 * Intel Xeon Processor 5500 Series Datasheet Volume 2
23 * http://www.intel.com/Assets/PDF/datasheet/321322.pdf
24 * also available at:
25 * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
26 */
27
a0c36a1f
MCC
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/pci.h>
31#include <linux/pci_ids.h>
32#include <linux/slab.h>
3b918c12 33#include <linux/delay.h>
535e9c78 34#include <linux/dmi.h>
a0c36a1f
MCC
35#include <linux/edac.h>
36#include <linux/mmzone.h>
f4742949 37#include <linux/smp.h>
4140c542 38#include <asm/mce.h>
14d2c083 39#include <asm/processor.h>
4fad8098 40#include <asm/div64.h>
a0c36a1f
MCC
41
42#include "edac_core.h"
43
18c29002
MCC
44/* Static vars */
45static LIST_HEAD(i7core_edac_list);
46static DEFINE_MUTEX(i7core_edac_lock);
47static int probed;
48
54a08ab1
MCC
49static int use_pci_fixup;
50module_param(use_pci_fixup, int, 0444);
51MODULE_PARM_DESC(use_pci_fixup, "Enable PCI fixup to seek for hidden devices");
f4742949
MCC
52/*
53 * This is used for Nehalem-EP and Nehalem-EX devices, where the non-core
54 * registers start at bus 255, and are not reported by BIOS.
55 * We currently find devices with only 2 sockets. In order to support more QPI
56 * Quick Path Interconnect, just increment this number.
57 */
58#define MAX_SOCKET_BUSES 2
59
60
a0c36a1f
MCC
61/*
62 * Alter this version for the module when modifications are made
63 */
152ba394 64#define I7CORE_REVISION " Ver: 1.0.0"
a0c36a1f
MCC
65#define EDAC_MOD_STR "i7core_edac"
66
a0c36a1f
MCC
67/*
68 * Debug macros
69 */
70#define i7core_printk(level, fmt, arg...) \
71 edac_printk(level, "i7core", fmt, ##arg)
72
73#define i7core_mc_printk(mci, level, fmt, arg...) \
74 edac_mc_chipset_printk(mci, level, "i7core", fmt, ##arg)
75
76/*
77 * i7core Memory Controller Registers
78 */
79
e9bd2e73
MCC
80 /* OFFSETS for Device 0 Function 0 */
81
82#define MC_CFG_CONTROL 0x90
e8b6a127
SG
83 #define MC_CFG_UNLOCK 0x02
84 #define MC_CFG_LOCK 0x00
e9bd2e73 85
a0c36a1f
MCC
86 /* OFFSETS for Device 3 Function 0 */
87
88#define MC_CONTROL 0x48
89#define MC_STATUS 0x4c
90#define MC_MAX_DOD 0x64
91
442305b1
MCC
92/*
93 * OFFSETS for Device 3 Function 4, as inicated on Xeon 5500 datasheet:
94 * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
95 */
96
97#define MC_TEST_ERR_RCV1 0x60
98 #define DIMM2_COR_ERR(r) ((r) & 0x7fff)
99
100#define MC_TEST_ERR_RCV0 0x64
101 #define DIMM1_COR_ERR(r) (((r) >> 16) & 0x7fff)
102 #define DIMM0_COR_ERR(r) ((r) & 0x7fff)
103
b4e8f0b6 104/* OFFSETS for Device 3 Function 2, as inicated on Xeon 5500 datasheet */
e8b6a127
SG
105#define MC_SSRCONTROL 0x48
106 #define SSR_MODE_DISABLE 0x00
107 #define SSR_MODE_ENABLE 0x01
108 #define SSR_MODE_MASK 0x03
109
110#define MC_SCRUB_CONTROL 0x4c
111 #define STARTSCRUB (1 << 24)
535e9c78 112 #define SCRUBINTERVAL_MASK 0xffffff
e8b6a127 113
b4e8f0b6
MCC
114#define MC_COR_ECC_CNT_0 0x80
115#define MC_COR_ECC_CNT_1 0x84
116#define MC_COR_ECC_CNT_2 0x88
117#define MC_COR_ECC_CNT_3 0x8c
118#define MC_COR_ECC_CNT_4 0x90
119#define MC_COR_ECC_CNT_5 0x94
120
121#define DIMM_TOP_COR_ERR(r) (((r) >> 16) & 0x7fff)
122#define DIMM_BOT_COR_ERR(r) ((r) & 0x7fff)
123
124
a0c36a1f
MCC
125 /* OFFSETS for Devices 4,5 and 6 Function 0 */
126
0b2b7b7e
MCC
127#define MC_CHANNEL_DIMM_INIT_PARAMS 0x58
128 #define THREE_DIMMS_PRESENT (1 << 24)
129 #define SINGLE_QUAD_RANK_PRESENT (1 << 23)
130 #define QUAD_RANK_PRESENT (1 << 22)
131 #define REGISTERED_DIMM (1 << 15)
132
f122a892
MCC
133#define MC_CHANNEL_MAPPER 0x60
134 #define RDLCH(r, ch) ((((r) >> (3 + (ch * 6))) & 0x07) - 1)
135 #define WRLCH(r, ch) ((((r) >> (ch * 6)) & 0x07) - 1)
136
0b2b7b7e
MCC
137#define MC_CHANNEL_RANK_PRESENT 0x7c
138 #define RANK_PRESENT_MASK 0xffff
139
a0c36a1f 140#define MC_CHANNEL_ADDR_MATCH 0xf0
194a40fe
MCC
141#define MC_CHANNEL_ERROR_MASK 0xf8
142#define MC_CHANNEL_ERROR_INJECT 0xfc
143 #define INJECT_ADDR_PARITY 0x10
144 #define INJECT_ECC 0x08
145 #define MASK_CACHELINE 0x06
146 #define MASK_FULL_CACHELINE 0x06
147 #define MASK_MSB32_CACHELINE 0x04
148 #define MASK_LSB32_CACHELINE 0x02
149 #define NO_MASK_CACHELINE 0x00
150 #define REPEAT_EN 0x01
a0c36a1f 151
0b2b7b7e 152 /* OFFSETS for Devices 4,5 and 6 Function 1 */
b990538a 153
0b2b7b7e
MCC
154#define MC_DOD_CH_DIMM0 0x48
155#define MC_DOD_CH_DIMM1 0x4c
156#define MC_DOD_CH_DIMM2 0x50
157 #define RANKOFFSET_MASK ((1 << 12) | (1 << 11) | (1 << 10))
158 #define RANKOFFSET(x) ((x & RANKOFFSET_MASK) >> 10)
159 #define DIMM_PRESENT_MASK (1 << 9)
160 #define DIMM_PRESENT(x) (((x) & DIMM_PRESENT_MASK) >> 9)
854d3349
MCC
161 #define MC_DOD_NUMBANK_MASK ((1 << 8) | (1 << 7))
162 #define MC_DOD_NUMBANK(x) (((x) & MC_DOD_NUMBANK_MASK) >> 7)
163 #define MC_DOD_NUMRANK_MASK ((1 << 6) | (1 << 5))
164 #define MC_DOD_NUMRANK(x) (((x) & MC_DOD_NUMRANK_MASK) >> 5)
41fcb7fe 165 #define MC_DOD_NUMROW_MASK ((1 << 4) | (1 << 3) | (1 << 2))
5566cb7c 166 #define MC_DOD_NUMROW(x) (((x) & MC_DOD_NUMROW_MASK) >> 2)
854d3349
MCC
167 #define MC_DOD_NUMCOL_MASK 3
168 #define MC_DOD_NUMCOL(x) ((x) & MC_DOD_NUMCOL_MASK)
0b2b7b7e 169
f122a892
MCC
170#define MC_RANK_PRESENT 0x7c
171
0b2b7b7e
MCC
172#define MC_SAG_CH_0 0x80
173#define MC_SAG_CH_1 0x84
174#define MC_SAG_CH_2 0x88
175#define MC_SAG_CH_3 0x8c
176#define MC_SAG_CH_4 0x90
177#define MC_SAG_CH_5 0x94
178#define MC_SAG_CH_6 0x98
179#define MC_SAG_CH_7 0x9c
180
181#define MC_RIR_LIMIT_CH_0 0x40
182#define MC_RIR_LIMIT_CH_1 0x44
183#define MC_RIR_LIMIT_CH_2 0x48
184#define MC_RIR_LIMIT_CH_3 0x4C
185#define MC_RIR_LIMIT_CH_4 0x50
186#define MC_RIR_LIMIT_CH_5 0x54
187#define MC_RIR_LIMIT_CH_6 0x58
188#define MC_RIR_LIMIT_CH_7 0x5C
189#define MC_RIR_LIMIT_MASK ((1 << 10) - 1)
190
191#define MC_RIR_WAY_CH 0x80
192 #define MC_RIR_WAY_OFFSET_MASK (((1 << 14) - 1) & ~0x7)
193 #define MC_RIR_WAY_RANK_MASK 0x7
194
a0c36a1f
MCC
195/*
196 * i7core structs
197 */
198
199#define NUM_CHANS 3
442305b1
MCC
200#define MAX_DIMMS 3 /* Max DIMMS per channel */
201#define MAX_MCR_FUNC 4
202#define MAX_CHAN_FUNC 3
a0c36a1f
MCC
203
204struct i7core_info {
205 u32 mc_control;
206 u32 mc_status;
207 u32 max_dod;
f122a892 208 u32 ch_map;
a0c36a1f
MCC
209};
210
194a40fe
MCC
211
212struct i7core_inject {
213 int enable;
214
215 u32 section;
216 u32 type;
217 u32 eccmask;
218
219 /* Error address mask */
220 int channel, dimm, rank, bank, page, col;
221};
222
0b2b7b7e 223struct i7core_channel {
442305b1
MCC
224 u32 ranks;
225 u32 dimms;
0b2b7b7e
MCC
226};
227
8f331907 228struct pci_id_descr {
66607706
MCC
229 int dev;
230 int func;
231 int dev_id;
de06eeef 232 int optional;
8f331907
MCC
233};
234
bd9e19ca 235struct pci_id_table {
1288c18f
MCC
236 const struct pci_id_descr *descr;
237 int n_devs;
bd9e19ca
VM
238};
239
f4742949
MCC
240struct i7core_dev {
241 struct list_head list;
242 u8 socket;
243 struct pci_dev **pdev;
de06eeef 244 int n_devs;
f4742949
MCC
245 struct mem_ctl_info *mci;
246};
247
a0c36a1f 248struct i7core_pvt {
f4742949
MCC
249 struct pci_dev *pci_noncore;
250 struct pci_dev *pci_mcr[MAX_MCR_FUNC + 1];
251 struct pci_dev *pci_ch[NUM_CHANS][MAX_CHAN_FUNC + 1];
252
253 struct i7core_dev *i7core_dev;
67166af4 254
a0c36a1f 255 struct i7core_info info;
194a40fe 256 struct i7core_inject inject;
f4742949 257 struct i7core_channel channel[NUM_CHANS];
67166af4 258
f4742949
MCC
259 int ce_count_available;
260 int csrow_map[NUM_CHANS][MAX_DIMMS];
b4e8f0b6
MCC
261
262 /* ECC corrected errors counts per udimm */
f4742949
MCC
263 unsigned long udimm_ce_count[MAX_DIMMS];
264 int udimm_last_ce_count[MAX_DIMMS];
b4e8f0b6 265 /* ECC corrected errors counts per rdimm */
f4742949
MCC
266 unsigned long rdimm_ce_count[NUM_CHANS][MAX_DIMMS];
267 int rdimm_last_ce_count[NUM_CHANS][MAX_DIMMS];
442305b1 268
27100db0 269 bool is_registered, enable_scrub;
14d2c083 270
ca9c90ba 271 /* Fifo double buffers */
d5381642 272 struct mce mce_entry[MCE_LOG_LEN];
ca9c90ba
MCC
273 struct mce mce_outentry[MCE_LOG_LEN];
274
275 /* Fifo in/out counters */
276 unsigned mce_in, mce_out;
277
278 /* Count indicator to show errors not got */
279 unsigned mce_overrun;
939747bd 280
535e9c78
NC
281 /* DCLK Frequency used for computing scrub rate */
282 int dclk_freq;
283
939747bd
MCC
284 /* Struct to control EDAC polling */
285 struct edac_pci_ctl_info *i7core_pci;
a0c36a1f
MCC
286};
287
8f331907
MCC
288#define PCI_DESCR(device, function, device_id) \
289 .dev = (device), \
290 .func = (function), \
291 .dev_id = (device_id)
292
1288c18f 293static const struct pci_id_descr pci_dev_descr_i7core_nehalem[] = {
8f331907
MCC
294 /* Memory controller */
295 { PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_I7_MCR) },
296 { PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_I7_MC_TAD) },
224e871f 297 /* Exists only for RDIMM */
de06eeef 298 { PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_I7_MC_RAS), .optional = 1 },
8f331907
MCC
299 { PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_I7_MC_TEST) },
300
301 /* Channel 0 */
302 { PCI_DESCR(4, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH0_CTRL) },
303 { PCI_DESCR(4, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH0_ADDR) },
304 { PCI_DESCR(4, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH0_RANK) },
305 { PCI_DESCR(4, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH0_TC) },
306
307 /* Channel 1 */
308 { PCI_DESCR(5, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH1_CTRL) },
309 { PCI_DESCR(5, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH1_ADDR) },
310 { PCI_DESCR(5, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH1_RANK) },
311 { PCI_DESCR(5, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH1_TC) },
312
313 /* Channel 2 */
314 { PCI_DESCR(6, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH2_CTRL) },
315 { PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH2_ADDR) },
316 { PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH2_RANK) },
317 { PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH2_TC) },
224e871f
MCC
318
319 /* Generic Non-core registers */
320 /*
321 * This is the PCI device on i7core and on Xeon 35xx (8086:2c41)
322 * On Xeon 55xx, however, it has a different id (8086:2c40). So,
323 * the probing code needs to test for the other address in case of
324 * failure of this one
325 */
326 { PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_I7_NONCORE) },
327
a0c36a1f 328};
8f331907 329
1288c18f 330static const struct pci_id_descr pci_dev_descr_lynnfield[] = {
52a2e4fc
MCC
331 { PCI_DESCR( 3, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR) },
332 { PCI_DESCR( 3, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD) },
333 { PCI_DESCR( 3, 4, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST) },
334
335 { PCI_DESCR( 4, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL) },
336 { PCI_DESCR( 4, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR) },
337 { PCI_DESCR( 4, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK) },
338 { PCI_DESCR( 4, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC) },
339
508fa179
MCC
340 { PCI_DESCR( 5, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL) },
341 { PCI_DESCR( 5, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR) },
342 { PCI_DESCR( 5, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK) },
343 { PCI_DESCR( 5, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC) },
224e871f
MCC
344
345 /*
346 * This is the PCI device has an alternate address on some
347 * processors like Core i7 860
348 */
349 { PCI_DESCR( 0, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE) },
52a2e4fc
MCC
350};
351
1288c18f 352static const struct pci_id_descr pci_dev_descr_i7core_westmere[] = {
bd9e19ca
VM
353 /* Memory controller */
354 { PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR_REV2) },
355 { PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD_REV2) },
356 /* Exists only for RDIMM */
357 { PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_RAS_REV2), .optional = 1 },
358 { PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST_REV2) },
359
360 /* Channel 0 */
361 { PCI_DESCR(4, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL_REV2) },
362 { PCI_DESCR(4, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR_REV2) },
363 { PCI_DESCR(4, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK_REV2) },
364 { PCI_DESCR(4, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC_REV2) },
365
366 /* Channel 1 */
367 { PCI_DESCR(5, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL_REV2) },
368 { PCI_DESCR(5, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR_REV2) },
369 { PCI_DESCR(5, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK_REV2) },
370 { PCI_DESCR(5, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC_REV2) },
371
372 /* Channel 2 */
373 { PCI_DESCR(6, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_CTRL_REV2) },
374 { PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_ADDR_REV2) },
375 { PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_RANK_REV2) },
376 { PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_TC_REV2) },
224e871f
MCC
377
378 /* Generic Non-core registers */
379 { PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_REV2) },
380
bd9e19ca
VM
381};
382
1288c18f
MCC
383#define PCI_ID_TABLE_ENTRY(A) { .descr=A, .n_devs = ARRAY_SIZE(A) }
384static const struct pci_id_table pci_dev_table[] = {
bd9e19ca
VM
385 PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_nehalem),
386 PCI_ID_TABLE_ENTRY(pci_dev_descr_lynnfield),
387 PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_westmere),
3c52cc57 388 {0,} /* 0 terminated list. */
bd9e19ca
VM
389};
390
8f331907
MCC
391/*
392 * pci_device_id table for which devices we are looking for
8f331907
MCC
393 */
394static const struct pci_device_id i7core_pci_tbl[] __devinitdata = {
d1fd4fb6 395 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
f05da2f7 396 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
8f331907
MCC
397 {0,} /* 0 terminated list. */
398};
399
a0c36a1f
MCC
400/****************************************************************************
401 Anciliary status routines
402 ****************************************************************************/
403
404 /* MC_CONTROL bits */
ef708b53
MCC
405#define CH_ACTIVE(pvt, ch) ((pvt)->info.mc_control & (1 << (8 + ch)))
406#define ECCx8(pvt) ((pvt)->info.mc_control & (1 << 1))
a0c36a1f
MCC
407
408 /* MC_STATUS bits */
61053fde 409#define ECC_ENABLED(pvt) ((pvt)->info.mc_status & (1 << 4))
ef708b53 410#define CH_DISABLED(pvt, ch) ((pvt)->info.mc_status & (1 << ch))
a0c36a1f
MCC
411
412 /* MC_MAX_DOD read functions */
854d3349 413static inline int numdimms(u32 dimms)
a0c36a1f 414{
854d3349 415 return (dimms & 0x3) + 1;
a0c36a1f
MCC
416}
417
854d3349 418static inline int numrank(u32 rank)
a0c36a1f
MCC
419{
420 static int ranks[4] = { 1, 2, 4, -EINVAL };
421
854d3349 422 return ranks[rank & 0x3];
a0c36a1f
MCC
423}
424
854d3349 425static inline int numbank(u32 bank)
a0c36a1f
MCC
426{
427 static int banks[4] = { 4, 8, 16, -EINVAL };
428
854d3349 429 return banks[bank & 0x3];
a0c36a1f
MCC
430}
431
854d3349 432static inline int numrow(u32 row)
a0c36a1f
MCC
433{
434 static int rows[8] = {
435 1 << 12, 1 << 13, 1 << 14, 1 << 15,
436 1 << 16, -EINVAL, -EINVAL, -EINVAL,
437 };
438
854d3349 439 return rows[row & 0x7];
a0c36a1f
MCC
440}
441
854d3349 442static inline int numcol(u32 col)
a0c36a1f
MCC
443{
444 static int cols[8] = {
445 1 << 10, 1 << 11, 1 << 12, -EINVAL,
446 };
854d3349 447 return cols[col & 0x3];
a0c36a1f
MCC
448}
449
f4742949 450static struct i7core_dev *get_i7core_dev(u8 socket)
66607706
MCC
451{
452 struct i7core_dev *i7core_dev;
453
454 list_for_each_entry(i7core_dev, &i7core_edac_list, list) {
455 if (i7core_dev->socket == socket)
456 return i7core_dev;
457 }
458
459 return NULL;
460}
461
848b2f7e
HS
462static struct i7core_dev *alloc_i7core_dev(u8 socket,
463 const struct pci_id_table *table)
464{
465 struct i7core_dev *i7core_dev;
466
467 i7core_dev = kzalloc(sizeof(*i7core_dev), GFP_KERNEL);
468 if (!i7core_dev)
469 return NULL;
470
471 i7core_dev->pdev = kzalloc(sizeof(*i7core_dev->pdev) * table->n_devs,
472 GFP_KERNEL);
473 if (!i7core_dev->pdev) {
474 kfree(i7core_dev);
475 return NULL;
476 }
477
478 i7core_dev->socket = socket;
479 i7core_dev->n_devs = table->n_devs;
480 list_add_tail(&i7core_dev->list, &i7core_edac_list);
481
482 return i7core_dev;
483}
484
2aa9be44
HS
485static void free_i7core_dev(struct i7core_dev *i7core_dev)
486{
487 list_del(&i7core_dev->list);
488 kfree(i7core_dev->pdev);
489 kfree(i7core_dev);
490}
491
a0c36a1f
MCC
492/****************************************************************************
493 Memory check routines
494 ****************************************************************************/
67166af4
MCC
495static struct pci_dev *get_pdev_slot_func(u8 socket, unsigned slot,
496 unsigned func)
ef708b53 497{
66607706 498 struct i7core_dev *i7core_dev = get_i7core_dev(socket);
ef708b53 499 int i;
ef708b53 500
66607706
MCC
501 if (!i7core_dev)
502 return NULL;
503
de06eeef 504 for (i = 0; i < i7core_dev->n_devs; i++) {
66607706 505 if (!i7core_dev->pdev[i])
ef708b53
MCC
506 continue;
507
66607706
MCC
508 if (PCI_SLOT(i7core_dev->pdev[i]->devfn) == slot &&
509 PCI_FUNC(i7core_dev->pdev[i]->devfn) == func) {
510 return i7core_dev->pdev[i];
ef708b53
MCC
511 }
512 }
513
eb94fc40
MCC
514 return NULL;
515}
516
ec6df24c
MCC
517/**
518 * i7core_get_active_channels() - gets the number of channels and csrows
519 * @socket: Quick Path Interconnect socket
520 * @channels: Number of channels that will be returned
521 * @csrows: Number of csrows found
522 *
523 * Since EDAC core needs to know in advance the number of available channels
524 * and csrows, in order to allocate memory for csrows/channels, it is needed
525 * to run two similar steps. At the first step, implemented on this function,
526 * it checks the number of csrows/channels present at one socket.
527 * this is used in order to properly allocate the size of mci components.
528 *
529 * It should be noticed that none of the current available datasheets explain
530 * or even mention how csrows are seen by the memory controller. So, we need
531 * to add a fake description for csrows.
532 * So, this driver is attributing one DIMM memory for one csrow.
533 */
1288c18f 534static int i7core_get_active_channels(const u8 socket, unsigned *channels,
67166af4 535 unsigned *csrows)
eb94fc40
MCC
536{
537 struct pci_dev *pdev = NULL;
538 int i, j;
539 u32 status, control;
540
541 *channels = 0;
542 *csrows = 0;
543
67166af4 544 pdev = get_pdev_slot_func(socket, 3, 0);
b7c76151 545 if (!pdev) {
67166af4
MCC
546 i7core_printk(KERN_ERR, "Couldn't find socket %d fn 3.0!!!\n",
547 socket);
ef708b53 548 return -ENODEV;
b7c76151 549 }
ef708b53
MCC
550
551 /* Device 3 function 0 reads */
552 pci_read_config_dword(pdev, MC_STATUS, &status);
553 pci_read_config_dword(pdev, MC_CONTROL, &control);
554
555 for (i = 0; i < NUM_CHANS; i++) {
eb94fc40 556 u32 dimm_dod[3];
ef708b53
MCC
557 /* Check if the channel is active */
558 if (!(control & (1 << (8 + i))))
559 continue;
560
561 /* Check if the channel is disabled */
41fcb7fe 562 if (status & (1 << i))
ef708b53 563 continue;
ef708b53 564
67166af4 565 pdev = get_pdev_slot_func(socket, i + 4, 1);
eb94fc40 566 if (!pdev) {
67166af4
MCC
567 i7core_printk(KERN_ERR, "Couldn't find socket %d "
568 "fn %d.%d!!!\n",
569 socket, i + 4, 1);
eb94fc40
MCC
570 return -ENODEV;
571 }
572 /* Devices 4-6 function 1 */
573 pci_read_config_dword(pdev,
574 MC_DOD_CH_DIMM0, &dimm_dod[0]);
575 pci_read_config_dword(pdev,
576 MC_DOD_CH_DIMM1, &dimm_dod[1]);
577 pci_read_config_dword(pdev,
578 MC_DOD_CH_DIMM2, &dimm_dod[2]);
579
ef708b53 580 (*channels)++;
eb94fc40
MCC
581
582 for (j = 0; j < 3; j++) {
583 if (!DIMM_PRESENT(dimm_dod[j]))
584 continue;
585 (*csrows)++;
586 }
ef708b53
MCC
587 }
588
c77720b9 589 debugf0("Number of active channels on socket %d: %d\n",
67166af4 590 socket, *channels);
1c6fed80 591
ef708b53
MCC
592 return 0;
593}
594
2e5185f7 595static int get_dimm_config(const struct mem_ctl_info *mci)
a0c36a1f
MCC
596{
597 struct i7core_pvt *pvt = mci->pvt_info;
1c6fed80 598 struct csrow_info *csr;
854d3349 599 struct pci_dev *pdev;
ba6c5c62 600 int i, j;
2e5185f7 601 int csrow = 0;
5566cb7c 602 unsigned long last_page = 0;
1c6fed80 603 enum edac_type mode;
854d3349 604 enum mem_type mtype;
a0c36a1f 605
854d3349 606 /* Get data from the MC register, function 0 */
f4742949 607 pdev = pvt->pci_mcr[0];
7dd6953c 608 if (!pdev)
8f331907
MCC
609 return -ENODEV;
610
f122a892 611 /* Device 3 function 0 reads */
7dd6953c
MCC
612 pci_read_config_dword(pdev, MC_CONTROL, &pvt->info.mc_control);
613 pci_read_config_dword(pdev, MC_STATUS, &pvt->info.mc_status);
614 pci_read_config_dword(pdev, MC_MAX_DOD, &pvt->info.max_dod);
615 pci_read_config_dword(pdev, MC_CHANNEL_MAPPER, &pvt->info.ch_map);
f122a892 616
17cb7b0c 617 debugf0("QPI %d control=0x%08x status=0x%08x dod=0x%08x map=0x%08x\n",
4af91889 618 pvt->i7core_dev->socket, pvt->info.mc_control, pvt->info.mc_status,
f122a892 619 pvt->info.max_dod, pvt->info.ch_map);
a0c36a1f 620
1c6fed80 621 if (ECC_ENABLED(pvt)) {
41fcb7fe 622 debugf0("ECC enabled with x%d SDCC\n", ECCx8(pvt) ? 8 : 4);
1c6fed80
MCC
623 if (ECCx8(pvt))
624 mode = EDAC_S8ECD8ED;
625 else
626 mode = EDAC_S4ECD4ED;
627 } else {
a0c36a1f 628 debugf0("ECC disabled\n");
1c6fed80
MCC
629 mode = EDAC_NONE;
630 }
a0c36a1f
MCC
631
632 /* FIXME: need to handle the error codes */
17cb7b0c
MCC
633 debugf0("DOD Max limits: DIMMS: %d, %d-ranked, %d-banked "
634 "x%x x 0x%x\n",
854d3349
MCC
635 numdimms(pvt->info.max_dod),
636 numrank(pvt->info.max_dod >> 2),
276b824c 637 numbank(pvt->info.max_dod >> 4),
854d3349
MCC
638 numrow(pvt->info.max_dod >> 6),
639 numcol(pvt->info.max_dod >> 9));
a0c36a1f 640
0b2b7b7e 641 for (i = 0; i < NUM_CHANS; i++) {
854d3349 642 u32 data, dimm_dod[3], value[8];
0b2b7b7e 643
52a2e4fc
MCC
644 if (!pvt->pci_ch[i][0])
645 continue;
646
0b2b7b7e
MCC
647 if (!CH_ACTIVE(pvt, i)) {
648 debugf0("Channel %i is not active\n", i);
649 continue;
650 }
651 if (CH_DISABLED(pvt, i)) {
652 debugf0("Channel %i is disabled\n", i);
653 continue;
654 }
655
f122a892 656 /* Devices 4-6 function 0 */
f4742949 657 pci_read_config_dword(pvt->pci_ch[i][0],
0b2b7b7e
MCC
658 MC_CHANNEL_DIMM_INIT_PARAMS, &data);
659
f4742949 660 pvt->channel[i].ranks = (data & QUAD_RANK_PRESENT) ?
67166af4 661 4 : 2;
0b2b7b7e 662
854d3349
MCC
663 if (data & REGISTERED_DIMM)
664 mtype = MEM_RDDR3;
14d2c083 665 else
854d3349
MCC
666 mtype = MEM_DDR3;
667#if 0
0b2b7b7e
MCC
668 if (data & THREE_DIMMS_PRESENT)
669 pvt->channel[i].dimms = 3;
670 else if (data & SINGLE_QUAD_RANK_PRESENT)
671 pvt->channel[i].dimms = 1;
672 else
673 pvt->channel[i].dimms = 2;
854d3349
MCC
674#endif
675
676 /* Devices 4-6 function 1 */
f4742949 677 pci_read_config_dword(pvt->pci_ch[i][1],
854d3349 678 MC_DOD_CH_DIMM0, &dimm_dod[0]);
f4742949 679 pci_read_config_dword(pvt->pci_ch[i][1],
854d3349 680 MC_DOD_CH_DIMM1, &dimm_dod[1]);
f4742949 681 pci_read_config_dword(pvt->pci_ch[i][1],
854d3349 682 MC_DOD_CH_DIMM2, &dimm_dod[2]);
0b2b7b7e 683
1c6fed80 684 debugf0("Ch%d phy rd%d, wr%d (0x%08x): "
854d3349 685 "%d ranks, %cDIMMs\n",
1c6fed80
MCC
686 i,
687 RDLCH(pvt->info.ch_map, i), WRLCH(pvt->info.ch_map, i),
688 data,
f4742949 689 pvt->channel[i].ranks,
41fcb7fe 690 (data & REGISTERED_DIMM) ? 'R' : 'U');
854d3349
MCC
691
692 for (j = 0; j < 3; j++) {
693 u32 banks, ranks, rows, cols;
5566cb7c 694 u32 size, npages;
854d3349
MCC
695
696 if (!DIMM_PRESENT(dimm_dod[j]))
697 continue;
698
699 banks = numbank(MC_DOD_NUMBANK(dimm_dod[j]));
700 ranks = numrank(MC_DOD_NUMRANK(dimm_dod[j]));
701 rows = numrow(MC_DOD_NUMROW(dimm_dod[j]));
702 cols = numcol(MC_DOD_NUMCOL(dimm_dod[j]));
703
5566cb7c
MCC
704 /* DDR3 has 8 I/O banks */
705 size = (rows * cols * banks * ranks) >> (20 - 3);
706
f4742949 707 pvt->channel[i].dimms++;
854d3349 708
17cb7b0c
MCC
709 debugf0("\tdimm %d %d Mb offset: %x, "
710 "bank: %d, rank: %d, row: %#x, col: %#x\n",
711 j, size,
854d3349
MCC
712 RANKOFFSET(dimm_dod[j]),
713 banks, ranks, rows, cols);
714
e9144601 715 npages = MiB_TO_PAGES(size);
5566cb7c 716
2e5185f7 717 csr = &mci->csrows[csrow];
5566cb7c
MCC
718 csr->first_page = last_page + 1;
719 last_page += npages;
720 csr->last_page = last_page;
721 csr->nr_pages = npages;
722
854d3349 723 csr->page_mask = 0;
eb94fc40 724 csr->grain = 8;
2e5185f7 725 csr->csrow_idx = csrow;
eb94fc40
MCC
726 csr->nr_channels = 1;
727
728 csr->channels[0].chan_idx = i;
729 csr->channels[0].ce_count = 0;
854d3349 730
2e5185f7 731 pvt->csrow_map[i][j] = csrow;
b4e8f0b6 732
854d3349
MCC
733 switch (banks) {
734 case 4:
735 csr->dtype = DEV_X4;
736 break;
737 case 8:
738 csr->dtype = DEV_X8;
739 break;
740 case 16:
741 csr->dtype = DEV_X16;
742 break;
743 default:
744 csr->dtype = DEV_UNKNOWN;
745 }
746
747 csr->edac_mode = mode;
748 csr->mtype = mtype;
749
2e5185f7 750 csrow++;
854d3349 751 }
1c6fed80 752
854d3349
MCC
753 pci_read_config_dword(pdev, MC_SAG_CH_0, &value[0]);
754 pci_read_config_dword(pdev, MC_SAG_CH_1, &value[1]);
755 pci_read_config_dword(pdev, MC_SAG_CH_2, &value[2]);
756 pci_read_config_dword(pdev, MC_SAG_CH_3, &value[3]);
757 pci_read_config_dword(pdev, MC_SAG_CH_4, &value[4]);
758 pci_read_config_dword(pdev, MC_SAG_CH_5, &value[5]);
759 pci_read_config_dword(pdev, MC_SAG_CH_6, &value[6]);
760 pci_read_config_dword(pdev, MC_SAG_CH_7, &value[7]);
17cb7b0c 761 debugf1("\t[%i] DIVBY3\tREMOVED\tOFFSET\n", i);
854d3349 762 for (j = 0; j < 8; j++)
17cb7b0c 763 debugf1("\t\t%#x\t%#x\t%#x\n",
854d3349
MCC
764 (value[j] >> 27) & 0x1,
765 (value[j] >> 24) & 0x7,
80b8ce89 766 (value[j] & ((1 << 24) - 1)));
0b2b7b7e
MCC
767 }
768
a0c36a1f
MCC
769 return 0;
770}
771
194a40fe
MCC
772/****************************************************************************
773 Error insertion routines
774 ****************************************************************************/
775
776/* The i7core has independent error injection features per channel.
777 However, to have a simpler code, we don't allow enabling error injection
778 on more than one channel.
779 Also, since a change at an inject parameter will be applied only at enable,
780 we're disabling error injection on all write calls to the sysfs nodes that
781 controls the error code injection.
782 */
1288c18f 783static int disable_inject(const struct mem_ctl_info *mci)
194a40fe
MCC
784{
785 struct i7core_pvt *pvt = mci->pvt_info;
786
787 pvt->inject.enable = 0;
788
f4742949 789 if (!pvt->pci_ch[pvt->inject.channel][0])
8f331907
MCC
790 return -ENODEV;
791
f4742949 792 pci_write_config_dword(pvt->pci_ch[pvt->inject.channel][0],
4157d9f5 793 MC_CHANNEL_ERROR_INJECT, 0);
8f331907
MCC
794
795 return 0;
194a40fe
MCC
796}
797
798/*
799 * i7core inject inject.section
800 *
801 * accept and store error injection inject.section value
802 * bit 0 - refers to the lower 32-byte half cacheline
803 * bit 1 - refers to the upper 32-byte half cacheline
804 */
805static ssize_t i7core_inject_section_store(struct mem_ctl_info *mci,
806 const char *data, size_t count)
807{
808 struct i7core_pvt *pvt = mci->pvt_info;
809 unsigned long value;
810 int rc;
811
812 if (pvt->inject.enable)
41fcb7fe 813 disable_inject(mci);
194a40fe
MCC
814
815 rc = strict_strtoul(data, 10, &value);
816 if ((rc < 0) || (value > 3))
2068def5 817 return -EIO;
194a40fe
MCC
818
819 pvt->inject.section = (u32) value;
820 return count;
821}
822
823static ssize_t i7core_inject_section_show(struct mem_ctl_info *mci,
824 char *data)
825{
826 struct i7core_pvt *pvt = mci->pvt_info;
827 return sprintf(data, "0x%08x\n", pvt->inject.section);
828}
829
830/*
831 * i7core inject.type
832 *
833 * accept and store error injection inject.section value
834 * bit 0 - repeat enable - Enable error repetition
835 * bit 1 - inject ECC error
836 * bit 2 - inject parity error
837 */
838static ssize_t i7core_inject_type_store(struct mem_ctl_info *mci,
839 const char *data, size_t count)
840{
841 struct i7core_pvt *pvt = mci->pvt_info;
842 unsigned long value;
843 int rc;
844
845 if (pvt->inject.enable)
41fcb7fe 846 disable_inject(mci);
194a40fe
MCC
847
848 rc = strict_strtoul(data, 10, &value);
849 if ((rc < 0) || (value > 7))
2068def5 850 return -EIO;
194a40fe
MCC
851
852 pvt->inject.type = (u32) value;
853 return count;
854}
855
856static ssize_t i7core_inject_type_show(struct mem_ctl_info *mci,
857 char *data)
858{
859 struct i7core_pvt *pvt = mci->pvt_info;
860 return sprintf(data, "0x%08x\n", pvt->inject.type);
861}
862
863/*
864 * i7core_inject_inject.eccmask_store
865 *
866 * The type of error (UE/CE) will depend on the inject.eccmask value:
867 * Any bits set to a 1 will flip the corresponding ECC bit
868 * Correctable errors can be injected by flipping 1 bit or the bits within
869 * a symbol pair (2 consecutive aligned 8-bit pairs - i.e. 7:0 and 15:8 or
870 * 23:16 and 31:24). Flipping bits in two symbol pairs will cause an
871 * uncorrectable error to be injected.
872 */
873static ssize_t i7core_inject_eccmask_store(struct mem_ctl_info *mci,
874 const char *data, size_t count)
875{
876 struct i7core_pvt *pvt = mci->pvt_info;
877 unsigned long value;
878 int rc;
879
880 if (pvt->inject.enable)
41fcb7fe 881 disable_inject(mci);
194a40fe
MCC
882
883 rc = strict_strtoul(data, 10, &value);
884 if (rc < 0)
2068def5 885 return -EIO;
194a40fe
MCC
886
887 pvt->inject.eccmask = (u32) value;
888 return count;
889}
890
891static ssize_t i7core_inject_eccmask_show(struct mem_ctl_info *mci,
892 char *data)
893{
894 struct i7core_pvt *pvt = mci->pvt_info;
895 return sprintf(data, "0x%08x\n", pvt->inject.eccmask);
896}
897
898/*
899 * i7core_addrmatch
900 *
901 * The type of error (UE/CE) will depend on the inject.eccmask value:
902 * Any bits set to a 1 will flip the corresponding ECC bit
903 * Correctable errors can be injected by flipping 1 bit or the bits within
904 * a symbol pair (2 consecutive aligned 8-bit pairs - i.e. 7:0 and 15:8 or
905 * 23:16 and 31:24). Flipping bits in two symbol pairs will cause an
906 * uncorrectable error to be injected.
907 */
194a40fe 908
a5538e53
MCC
909#define DECLARE_ADDR_MATCH(param, limit) \
910static ssize_t i7core_inject_store_##param( \
911 struct mem_ctl_info *mci, \
912 const char *data, size_t count) \
913{ \
cc301b3a 914 struct i7core_pvt *pvt; \
a5538e53
MCC
915 long value; \
916 int rc; \
917 \
cc301b3a
MCC
918 debugf1("%s()\n", __func__); \
919 pvt = mci->pvt_info; \
920 \
a5538e53
MCC
921 if (pvt->inject.enable) \
922 disable_inject(mci); \
923 \
4f87fad1 924 if (!strcasecmp(data, "any") || !strcasecmp(data, "any\n"))\
a5538e53
MCC
925 value = -1; \
926 else { \
927 rc = strict_strtoul(data, 10, &value); \
928 if ((rc < 0) || (value >= limit)) \
929 return -EIO; \
930 } \
931 \
932 pvt->inject.param = value; \
933 \
934 return count; \
935} \
936 \
937static ssize_t i7core_inject_show_##param( \
938 struct mem_ctl_info *mci, \
939 char *data) \
940{ \
cc301b3a
MCC
941 struct i7core_pvt *pvt; \
942 \
943 pvt = mci->pvt_info; \
944 debugf1("%s() pvt=%p\n", __func__, pvt); \
a5538e53
MCC
945 if (pvt->inject.param < 0) \
946 return sprintf(data, "any\n"); \
947 else \
948 return sprintf(data, "%d\n", pvt->inject.param);\
194a40fe
MCC
949}
950
a5538e53
MCC
951#define ATTR_ADDR_MATCH(param) \
952 { \
953 .attr = { \
954 .name = #param, \
955 .mode = (S_IRUGO | S_IWUSR) \
956 }, \
957 .show = i7core_inject_show_##param, \
958 .store = i7core_inject_store_##param, \
959 }
194a40fe 960
a5538e53
MCC
961DECLARE_ADDR_MATCH(channel, 3);
962DECLARE_ADDR_MATCH(dimm, 3);
963DECLARE_ADDR_MATCH(rank, 4);
964DECLARE_ADDR_MATCH(bank, 32);
965DECLARE_ADDR_MATCH(page, 0x10000);
966DECLARE_ADDR_MATCH(col, 0x4000);
194a40fe 967
1288c18f 968static int write_and_test(struct pci_dev *dev, const int where, const u32 val)
276b824c
MCC
969{
970 u32 read;
971 int count;
972
4157d9f5
MCC
973 debugf0("setting pci %02x:%02x.%x reg=%02x value=%08x\n",
974 dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn),
975 where, val);
976
276b824c
MCC
977 for (count = 0; count < 10; count++) {
978 if (count)
b990538a 979 msleep(100);
276b824c
MCC
980 pci_write_config_dword(dev, where, val);
981 pci_read_config_dword(dev, where, &read);
982
983 if (read == val)
984 return 0;
985 }
986
4157d9f5
MCC
987 i7core_printk(KERN_ERR, "Error during set pci %02x:%02x.%x reg=%02x "
988 "write=%08x. Read=%08x\n",
989 dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn),
990 where, val, read);
276b824c
MCC
991
992 return -EINVAL;
993}
994
194a40fe
MCC
995/*
996 * This routine prepares the Memory Controller for error injection.
997 * The error will be injected when some process tries to write to the
998 * memory that matches the given criteria.
999 * The criteria can be set in terms of a mask where dimm, rank, bank, page
1000 * and col can be specified.
1001 * A -1 value for any of the mask items will make the MCU to ignore
1002 * that matching criteria for error injection.
1003 *
1004 * It should be noticed that the error will only happen after a write operation
1005 * on a memory that matches the condition. if REPEAT_EN is not enabled at
1006 * inject mask, then it will produce just one error. Otherwise, it will repeat
1007 * until the injectmask would be cleaned.
1008 *
1009 * FIXME: This routine assumes that MAXNUMDIMMS value of MC_MAX_DOD
1010 * is reliable enough to check if the MC is using the
1011 * three channels. However, this is not clear at the datasheet.
1012 */
1013static ssize_t i7core_inject_enable_store(struct mem_ctl_info *mci,
1014 const char *data, size_t count)
1015{
1016 struct i7core_pvt *pvt = mci->pvt_info;
1017 u32 injectmask;
1018 u64 mask = 0;
1019 int rc;
1020 long enable;
1021
f4742949 1022 if (!pvt->pci_ch[pvt->inject.channel][0])
8f331907
MCC
1023 return 0;
1024
194a40fe
MCC
1025 rc = strict_strtoul(data, 10, &enable);
1026 if ((rc < 0))
1027 return 0;
1028
1029 if (enable) {
1030 pvt->inject.enable = 1;
1031 } else {
1032 disable_inject(mci);
1033 return count;
1034 }
1035
1036 /* Sets pvt->inject.dimm mask */
1037 if (pvt->inject.dimm < 0)
486dd09f 1038 mask |= 1LL << 41;
194a40fe 1039 else {
f4742949 1040 if (pvt->channel[pvt->inject.channel].dimms > 2)
486dd09f 1041 mask |= (pvt->inject.dimm & 0x3LL) << 35;
194a40fe 1042 else
486dd09f 1043 mask |= (pvt->inject.dimm & 0x1LL) << 36;
194a40fe
MCC
1044 }
1045
1046 /* Sets pvt->inject.rank mask */
1047 if (pvt->inject.rank < 0)
486dd09f 1048 mask |= 1LL << 40;
194a40fe 1049 else {
f4742949 1050 if (pvt->channel[pvt->inject.channel].dimms > 2)
486dd09f 1051 mask |= (pvt->inject.rank & 0x1LL) << 34;
194a40fe 1052 else
486dd09f 1053 mask |= (pvt->inject.rank & 0x3LL) << 34;
194a40fe
MCC
1054 }
1055
1056 /* Sets pvt->inject.bank mask */
1057 if (pvt->inject.bank < 0)
486dd09f 1058 mask |= 1LL << 39;
194a40fe 1059 else
486dd09f 1060 mask |= (pvt->inject.bank & 0x15LL) << 30;
194a40fe
MCC
1061
1062 /* Sets pvt->inject.page mask */
1063 if (pvt->inject.page < 0)
486dd09f 1064 mask |= 1LL << 38;
194a40fe 1065 else
486dd09f 1066 mask |= (pvt->inject.page & 0xffff) << 14;
194a40fe
MCC
1067
1068 /* Sets pvt->inject.column mask */
1069 if (pvt->inject.col < 0)
486dd09f 1070 mask |= 1LL << 37;
194a40fe 1071 else
486dd09f 1072 mask |= (pvt->inject.col & 0x3fff);
194a40fe 1073
276b824c
MCC
1074 /*
1075 * bit 0: REPEAT_EN
1076 * bits 1-2: MASK_HALF_CACHELINE
1077 * bit 3: INJECT_ECC
1078 * bit 4: INJECT_ADDR_PARITY
1079 */
1080
1081 injectmask = (pvt->inject.type & 1) |
1082 (pvt->inject.section & 0x3) << 1 |
1083 (pvt->inject.type & 0x6) << (3 - 1);
1084
1085 /* Unlock writes to registers - this register is write only */
f4742949 1086 pci_write_config_dword(pvt->pci_noncore,
67166af4 1087 MC_CFG_CONTROL, 0x2);
e9bd2e73 1088
f4742949 1089 write_and_test(pvt->pci_ch[pvt->inject.channel][0],
194a40fe 1090 MC_CHANNEL_ADDR_MATCH, mask);
f4742949 1091 write_and_test(pvt->pci_ch[pvt->inject.channel][0],
7b029d03 1092 MC_CHANNEL_ADDR_MATCH + 4, mask >> 32L);
7b029d03 1093
f4742949 1094 write_and_test(pvt->pci_ch[pvt->inject.channel][0],
194a40fe
MCC
1095 MC_CHANNEL_ERROR_MASK, pvt->inject.eccmask);
1096
f4742949 1097 write_and_test(pvt->pci_ch[pvt->inject.channel][0],
4157d9f5 1098 MC_CHANNEL_ERROR_INJECT, injectmask);
276b824c 1099
194a40fe 1100 /*
276b824c
MCC
1101 * This is something undocumented, based on my tests
1102 * Without writing 8 to this register, errors aren't injected. Not sure
1103 * why.
194a40fe 1104 */
f4742949 1105 pci_write_config_dword(pvt->pci_noncore,
276b824c 1106 MC_CFG_CONTROL, 8);
194a40fe 1107
41fcb7fe
MCC
1108 debugf0("Error inject addr match 0x%016llx, ecc 0x%08x,"
1109 " inject 0x%08x\n",
194a40fe
MCC
1110 mask, pvt->inject.eccmask, injectmask);
1111
7b029d03 1112
194a40fe
MCC
1113 return count;
1114}
1115
1116static ssize_t i7core_inject_enable_show(struct mem_ctl_info *mci,
1117 char *data)
1118{
1119 struct i7core_pvt *pvt = mci->pvt_info;
7b029d03
MCC
1120 u32 injectmask;
1121
52a2e4fc
MCC
1122 if (!pvt->pci_ch[pvt->inject.channel][0])
1123 return 0;
1124
f4742949 1125 pci_read_config_dword(pvt->pci_ch[pvt->inject.channel][0],
4157d9f5 1126 MC_CHANNEL_ERROR_INJECT, &injectmask);
7b029d03
MCC
1127
1128 debugf0("Inject error read: 0x%018x\n", injectmask);
1129
1130 if (injectmask & 0x0c)
1131 pvt->inject.enable = 1;
1132
194a40fe
MCC
1133 return sprintf(data, "%d\n", pvt->inject.enable);
1134}
1135
f338d736
MCC
1136#define DECLARE_COUNTER(param) \
1137static ssize_t i7core_show_counter_##param( \
1138 struct mem_ctl_info *mci, \
1139 char *data) \
1140{ \
1141 struct i7core_pvt *pvt = mci->pvt_info; \
1142 \
1143 debugf1("%s() \n", __func__); \
1144 if (!pvt->ce_count_available || (pvt->is_registered)) \
1145 return sprintf(data, "data unavailable\n"); \
1146 return sprintf(data, "%lu\n", \
1147 pvt->udimm_ce_count[param]); \
1148}
442305b1 1149
f338d736
MCC
1150#define ATTR_COUNTER(param) \
1151 { \
1152 .attr = { \
1153 .name = __stringify(udimm##param), \
1154 .mode = (S_IRUGO | S_IWUSR) \
1155 }, \
1156 .show = i7core_show_counter_##param \
d88b8507 1157 }
442305b1 1158
f338d736
MCC
1159DECLARE_COUNTER(0);
1160DECLARE_COUNTER(1);
1161DECLARE_COUNTER(2);
442305b1 1162
194a40fe
MCC
1163/*
1164 * Sysfs struct
1165 */
a5538e53 1166
1288c18f 1167static const struct mcidev_sysfs_attribute i7core_addrmatch_attrs[] = {
a5538e53
MCC
1168 ATTR_ADDR_MATCH(channel),
1169 ATTR_ADDR_MATCH(dimm),
1170 ATTR_ADDR_MATCH(rank),
1171 ATTR_ADDR_MATCH(bank),
1172 ATTR_ADDR_MATCH(page),
1173 ATTR_ADDR_MATCH(col),
1288c18f 1174 { } /* End of list */
a5538e53
MCC
1175};
1176
1288c18f 1177static const struct mcidev_sysfs_group i7core_inject_addrmatch = {
a5538e53
MCC
1178 .name = "inject_addrmatch",
1179 .mcidev_attr = i7core_addrmatch_attrs,
1180};
1181
1288c18f 1182static const struct mcidev_sysfs_attribute i7core_udimm_counters_attrs[] = {
f338d736
MCC
1183 ATTR_COUNTER(0),
1184 ATTR_COUNTER(1),
1185 ATTR_COUNTER(2),
64aab720 1186 { .attr = { .name = NULL } }
f338d736
MCC
1187};
1188
1288c18f 1189static const struct mcidev_sysfs_group i7core_udimm_counters = {
f338d736
MCC
1190 .name = "all_channel_counts",
1191 .mcidev_attr = i7core_udimm_counters_attrs,
1192};
1193
1288c18f 1194static const struct mcidev_sysfs_attribute i7core_sysfs_rdimm_attrs[] = {
194a40fe
MCC
1195 {
1196 .attr = {
1197 .name = "inject_section",
1198 .mode = (S_IRUGO | S_IWUSR)
1199 },
1200 .show = i7core_inject_section_show,
1201 .store = i7core_inject_section_store,
1202 }, {
1203 .attr = {
1204 .name = "inject_type",
1205 .mode = (S_IRUGO | S_IWUSR)
1206 },
1207 .show = i7core_inject_type_show,
1208 .store = i7core_inject_type_store,
1209 }, {
1210 .attr = {
1211 .name = "inject_eccmask",
1212 .mode = (S_IRUGO | S_IWUSR)
1213 },
1214 .show = i7core_inject_eccmask_show,
1215 .store = i7core_inject_eccmask_store,
1216 }, {
a5538e53 1217 .grp = &i7core_inject_addrmatch,
194a40fe
MCC
1218 }, {
1219 .attr = {
1220 .name = "inject_enable",
1221 .mode = (S_IRUGO | S_IWUSR)
1222 },
1223 .show = i7core_inject_enable_show,
1224 .store = i7core_inject_enable_store,
1225 },
1288c18f
MCC
1226 { } /* End of list */
1227};
1228
1229static const struct mcidev_sysfs_attribute i7core_sysfs_udimm_attrs[] = {
1230 {
1231 .attr = {
1232 .name = "inject_section",
1233 .mode = (S_IRUGO | S_IWUSR)
1234 },
1235 .show = i7core_inject_section_show,
1236 .store = i7core_inject_section_store,
1237 }, {
1238 .attr = {
1239 .name = "inject_type",
1240 .mode = (S_IRUGO | S_IWUSR)
1241 },
1242 .show = i7core_inject_type_show,
1243 .store = i7core_inject_type_store,
1244 }, {
1245 .attr = {
1246 .name = "inject_eccmask",
1247 .mode = (S_IRUGO | S_IWUSR)
1248 },
1249 .show = i7core_inject_eccmask_show,
1250 .store = i7core_inject_eccmask_store,
1251 }, {
1252 .grp = &i7core_inject_addrmatch,
1253 }, {
1254 .attr = {
1255 .name = "inject_enable",
1256 .mode = (S_IRUGO | S_IWUSR)
1257 },
1258 .show = i7core_inject_enable_show,
1259 .store = i7core_inject_enable_store,
1260 }, {
1261 .grp = &i7core_udimm_counters,
1262 },
1263 { } /* End of list */
194a40fe
MCC
1264};
1265
a0c36a1f
MCC
1266/****************************************************************************
1267 Device initialization routines: put/get, init/exit
1268 ****************************************************************************/
1269
1270/*
64c10f6e 1271 * i7core_put_all_devices 'put' all the devices that we have
a0c36a1f
MCC
1272 * reserved via 'get'
1273 */
13d6e9b6 1274static void i7core_put_devices(struct i7core_dev *i7core_dev)
a0c36a1f 1275{
13d6e9b6 1276 int i;
a0c36a1f 1277
22e6bcbd 1278 debugf0(__FILE__ ": %s()\n", __func__);
de06eeef 1279 for (i = 0; i < i7core_dev->n_devs; i++) {
22e6bcbd
MCC
1280 struct pci_dev *pdev = i7core_dev->pdev[i];
1281 if (!pdev)
1282 continue;
1283 debugf0("Removing dev %02x:%02x.%d\n",
1284 pdev->bus->number,
1285 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1286 pci_dev_put(pdev);
1287 }
13d6e9b6 1288}
66607706 1289
13d6e9b6
MCC
1290static void i7core_put_all_devices(void)
1291{
42538680 1292 struct i7core_dev *i7core_dev, *tmp;
13d6e9b6 1293
39300e71 1294 list_for_each_entry_safe(i7core_dev, tmp, &i7core_edac_list, list) {
13d6e9b6 1295 i7core_put_devices(i7core_dev);
2aa9be44 1296 free_i7core_dev(i7core_dev);
39300e71 1297 }
a0c36a1f
MCC
1298}
1299
1288c18f 1300static void __init i7core_xeon_pci_fixup(const struct pci_id_table *table)
bc2d7245
KM
1301{
1302 struct pci_dev *pdev = NULL;
1303 int i;
54a08ab1 1304
bc2d7245 1305 /*
e7bf068a 1306 * On Xeon 55xx, the Intel Quick Path Arch Generic Non-core pci buses
bc2d7245
KM
1307 * aren't announced by acpi. So, we need to use a legacy scan probing
1308 * to detect them
1309 */
bd9e19ca
VM
1310 while (table && table->descr) {
1311 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, table->descr[0].dev_id, NULL);
1312 if (unlikely(!pdev)) {
1313 for (i = 0; i < MAX_SOCKET_BUSES; i++)
1314 pcibios_scan_specific_bus(255-i);
1315 }
bda14289 1316 pci_dev_put(pdev);
bd9e19ca 1317 table++;
bc2d7245
KM
1318 }
1319}
1320
bda14289
MCC
1321static unsigned i7core_pci_lastbus(void)
1322{
1323 int last_bus = 0, bus;
1324 struct pci_bus *b = NULL;
1325
1326 while ((b = pci_find_next_bus(b)) != NULL) {
1327 bus = b->number;
1328 debugf0("Found bus %d\n", bus);
1329 if (bus > last_bus)
1330 last_bus = bus;
1331 }
1332
1333 debugf0("Last bus %d\n", last_bus);
1334
1335 return last_bus;
1336}
1337
a0c36a1f 1338/*
64c10f6e 1339 * i7core_get_all_devices Find and perform 'get' operation on the MCH's
a0c36a1f
MCC
1340 * device/functions we want to reference for this driver
1341 *
1342 * Need to 'get' device 16 func 1 and func 2
1343 */
b197cba0
HS
1344static int i7core_get_onedevice(struct pci_dev **prev,
1345 const struct pci_id_table *table,
1346 const unsigned devno,
1347 const unsigned last_bus)
a0c36a1f 1348{
66607706 1349 struct i7core_dev *i7core_dev;
b197cba0 1350 const struct pci_id_descr *dev_descr = &table->descr[devno];
66607706 1351
8f331907 1352 struct pci_dev *pdev = NULL;
67166af4
MCC
1353 u8 bus = 0;
1354 u8 socket = 0;
a0c36a1f 1355
c77720b9 1356 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
de06eeef 1357 dev_descr->dev_id, *prev);
c77720b9 1358
224e871f
MCC
1359 /*
1360 * On Xeon 55xx, the Intel Quckpath Arch Generic Non-core regs
1361 * is at addr 8086:2c40, instead of 8086:2c41. So, we need
1362 * to probe for the alternate address in case of failure
1363 */
1364 if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_I7_NONCORE && !pdev)
1365 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
1366 PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT, *prev);
1367
1368 if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE && !pdev)
1369 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
1370 PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT,
1371 *prev);
1372
c77720b9
MCC
1373 if (!pdev) {
1374 if (*prev) {
1375 *prev = pdev;
1376 return 0;
d1fd4fb6
MCC
1377 }
1378
de06eeef 1379 if (dev_descr->optional)
c77720b9 1380 return 0;
310cbb72 1381
bd9e19ca
VM
1382 if (devno == 0)
1383 return -ENODEV;
1384
ab089374 1385 i7core_printk(KERN_INFO,
c77720b9 1386 "Device not found: dev %02x.%d PCI ID %04x:%04x\n",
de06eeef
MCC
1387 dev_descr->dev, dev_descr->func,
1388 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
67166af4 1389
c77720b9
MCC
1390 /* End of list, leave */
1391 return -ENODEV;
1392 }
1393 bus = pdev->bus->number;
67166af4 1394
bda14289 1395 socket = last_bus - bus;
c77720b9 1396
66607706
MCC
1397 i7core_dev = get_i7core_dev(socket);
1398 if (!i7core_dev) {
848b2f7e 1399 i7core_dev = alloc_i7core_dev(socket, table);
2896637b
HS
1400 if (!i7core_dev) {
1401 pci_dev_put(pdev);
66607706 1402 return -ENOMEM;
2896637b 1403 }
c77720b9 1404 }
67166af4 1405
66607706 1406 if (i7core_dev->pdev[devno]) {
c77720b9
MCC
1407 i7core_printk(KERN_ERR,
1408 "Duplicated device for "
1409 "dev %02x:%02x.%d PCI ID %04x:%04x\n",
de06eeef
MCC
1410 bus, dev_descr->dev, dev_descr->func,
1411 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
c77720b9
MCC
1412 pci_dev_put(pdev);
1413 return -ENODEV;
1414 }
67166af4 1415
66607706 1416 i7core_dev->pdev[devno] = pdev;
c77720b9
MCC
1417
1418 /* Sanity check */
de06eeef
MCC
1419 if (unlikely(PCI_SLOT(pdev->devfn) != dev_descr->dev ||
1420 PCI_FUNC(pdev->devfn) != dev_descr->func)) {
c77720b9
MCC
1421 i7core_printk(KERN_ERR,
1422 "Device PCI ID %04x:%04x "
1423 "has dev %02x:%02x.%d instead of dev %02x:%02x.%d\n",
de06eeef 1424 PCI_VENDOR_ID_INTEL, dev_descr->dev_id,
c77720b9 1425 bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
de06eeef 1426 bus, dev_descr->dev, dev_descr->func);
c77720b9
MCC
1427 return -ENODEV;
1428 }
ef708b53 1429
c77720b9
MCC
1430 /* Be sure that the device is enabled */
1431 if (unlikely(pci_enable_device(pdev) < 0)) {
1432 i7core_printk(KERN_ERR,
1433 "Couldn't enable "
1434 "dev %02x:%02x.%d PCI ID %04x:%04x\n",
de06eeef
MCC
1435 bus, dev_descr->dev, dev_descr->func,
1436 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
c77720b9
MCC
1437 return -ENODEV;
1438 }
ef708b53 1439
d4c27795 1440 debugf0("Detected socket %d dev %02x:%02x.%d PCI ID %04x:%04x\n",
de06eeef
MCC
1441 socket, bus, dev_descr->dev,
1442 dev_descr->func,
1443 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
8f331907 1444
a3e15416
MCC
1445 /*
1446 * As stated on drivers/pci/search.c, the reference count for
1447 * @from is always decremented if it is not %NULL. So, as we need
1448 * to get all devices up to null, we need to do a get for the device
1449 */
1450 pci_dev_get(pdev);
1451
c77720b9 1452 *prev = pdev;
ef708b53 1453
c77720b9
MCC
1454 return 0;
1455}
a0c36a1f 1456
64c10f6e 1457static int i7core_get_all_devices(void)
c77720b9 1458{
3c52cc57 1459 int i, rc, last_bus;
c77720b9 1460 struct pci_dev *pdev = NULL;
3c52cc57 1461 const struct pci_id_table *table = pci_dev_table;
bd9e19ca 1462
bda14289
MCC
1463 last_bus = i7core_pci_lastbus();
1464
3c52cc57 1465 while (table && table->descr) {
bd9e19ca
VM
1466 for (i = 0; i < table->n_devs; i++) {
1467 pdev = NULL;
1468 do {
b197cba0 1469 rc = i7core_get_onedevice(&pdev, table, i,
bda14289 1470 last_bus);
bd9e19ca
VM
1471 if (rc < 0) {
1472 if (i == 0) {
1473 i = table->n_devs;
1474 break;
1475 }
1476 i7core_put_all_devices();
1477 return -ENODEV;
1478 }
1479 } while (pdev);
1480 }
3c52cc57 1481 table++;
c77720b9 1482 }
66607706 1483
ef708b53 1484 return 0;
ef708b53
MCC
1485}
1486
f4742949
MCC
1487static int mci_bind_devs(struct mem_ctl_info *mci,
1488 struct i7core_dev *i7core_dev)
ef708b53
MCC
1489{
1490 struct i7core_pvt *pvt = mci->pvt_info;
1491 struct pci_dev *pdev;
f4742949 1492 int i, func, slot;
27100db0 1493 char *family;
ef708b53 1494
27100db0
MCC
1495 pvt->is_registered = false;
1496 pvt->enable_scrub = false;
de06eeef 1497 for (i = 0; i < i7core_dev->n_devs; i++) {
f4742949
MCC
1498 pdev = i7core_dev->pdev[i];
1499 if (!pdev)
66607706
MCC
1500 continue;
1501
f4742949
MCC
1502 func = PCI_FUNC(pdev->devfn);
1503 slot = PCI_SLOT(pdev->devfn);
1504 if (slot == 3) {
1505 if (unlikely(func > MAX_MCR_FUNC))
1506 goto error;
1507 pvt->pci_mcr[func] = pdev;
1508 } else if (likely(slot >= 4 && slot < 4 + NUM_CHANS)) {
1509 if (unlikely(func > MAX_CHAN_FUNC))
ef708b53 1510 goto error;
f4742949 1511 pvt->pci_ch[slot - 4][func] = pdev;
27100db0 1512 } else if (!slot && !func) {
f4742949 1513 pvt->pci_noncore = pdev;
27100db0
MCC
1514
1515 /* Detect the processor family */
1516 switch (pdev->device) {
1517 case PCI_DEVICE_ID_INTEL_I7_NONCORE:
1518 family = "Xeon 35xx/ i7core";
1519 pvt->enable_scrub = false;
1520 break;
1521 case PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT:
1522 family = "i7-800/i5-700";
1523 pvt->enable_scrub = false;
1524 break;
1525 case PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE:
1526 family = "Xeon 34xx";
1527 pvt->enable_scrub = false;
1528 break;
1529 case PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT:
1530 family = "Xeon 55xx";
1531 pvt->enable_scrub = true;
1532 break;
1533 case PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_REV2:
1534 family = "Xeon 56xx / i7-900";
1535 pvt->enable_scrub = true;
1536 break;
1537 default:
1538 family = "unknown";
1539 pvt->enable_scrub = false;
1540 }
1541 debugf0("Detected a processor type %s\n", family);
1542 } else
f4742949 1543 goto error;
ef708b53 1544
f4742949
MCC
1545 debugf0("Associated fn %d.%d, dev = %p, socket %d\n",
1546 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
1547 pdev, i7core_dev->socket);
14d2c083 1548
f4742949
MCC
1549 if (PCI_SLOT(pdev->devfn) == 3 &&
1550 PCI_FUNC(pdev->devfn) == 2)
27100db0 1551 pvt->is_registered = true;
a0c36a1f 1552 }
e9bd2e73 1553
a0c36a1f 1554 return 0;
ef708b53
MCC
1555
1556error:
1557 i7core_printk(KERN_ERR, "Device %d, function %d "
1558 "is out of the expected range\n",
1559 slot, func);
1560 return -EINVAL;
a0c36a1f
MCC
1561}
1562
442305b1
MCC
1563/****************************************************************************
1564 Error check routines
1565 ****************************************************************************/
f4742949 1566static void i7core_rdimm_update_csrow(struct mem_ctl_info *mci,
1288c18f
MCC
1567 const int chan,
1568 const int dimm,
1569 const int add)
b4e8f0b6
MCC
1570{
1571 char *msg;
1572 struct i7core_pvt *pvt = mci->pvt_info;
f4742949 1573 int row = pvt->csrow_map[chan][dimm], i;
b4e8f0b6
MCC
1574
1575 for (i = 0; i < add; i++) {
1576 msg = kasprintf(GFP_KERNEL, "Corrected error "
f4742949
MCC
1577 "(Socket=%d channel=%d dimm=%d)",
1578 pvt->i7core_dev->socket, chan, dimm);
b4e8f0b6
MCC
1579
1580 edac_mc_handle_fbd_ce(mci, row, 0, msg);
1581 kfree (msg);
1582 }
1583}
1584
1585static void i7core_rdimm_update_ce_count(struct mem_ctl_info *mci,
1288c18f
MCC
1586 const int chan,
1587 const int new0,
1588 const int new1,
1589 const int new2)
b4e8f0b6
MCC
1590{
1591 struct i7core_pvt *pvt = mci->pvt_info;
1592 int add0 = 0, add1 = 0, add2 = 0;
1593 /* Updates CE counters if it is not the first time here */
f4742949 1594 if (pvt->ce_count_available) {
b4e8f0b6
MCC
1595 /* Updates CE counters */
1596
f4742949
MCC
1597 add2 = new2 - pvt->rdimm_last_ce_count[chan][2];
1598 add1 = new1 - pvt->rdimm_last_ce_count[chan][1];
1599 add0 = new0 - pvt->rdimm_last_ce_count[chan][0];
b4e8f0b6
MCC
1600
1601 if (add2 < 0)
1602 add2 += 0x7fff;
f4742949 1603 pvt->rdimm_ce_count[chan][2] += add2;
b4e8f0b6
MCC
1604
1605 if (add1 < 0)
1606 add1 += 0x7fff;
f4742949 1607 pvt->rdimm_ce_count[chan][1] += add1;
b4e8f0b6
MCC
1608
1609 if (add0 < 0)
1610 add0 += 0x7fff;
f4742949 1611 pvt->rdimm_ce_count[chan][0] += add0;
b4e8f0b6 1612 } else
f4742949 1613 pvt->ce_count_available = 1;
b4e8f0b6
MCC
1614
1615 /* Store the new values */
f4742949
MCC
1616 pvt->rdimm_last_ce_count[chan][2] = new2;
1617 pvt->rdimm_last_ce_count[chan][1] = new1;
1618 pvt->rdimm_last_ce_count[chan][0] = new0;
b4e8f0b6
MCC
1619
1620 /*updated the edac core */
1621 if (add0 != 0)
f4742949 1622 i7core_rdimm_update_csrow(mci, chan, 0, add0);
b4e8f0b6 1623 if (add1 != 0)
f4742949 1624 i7core_rdimm_update_csrow(mci, chan, 1, add1);
b4e8f0b6 1625 if (add2 != 0)
f4742949 1626 i7core_rdimm_update_csrow(mci, chan, 2, add2);
b4e8f0b6
MCC
1627
1628}
1629
f4742949 1630static void i7core_rdimm_check_mc_ecc_err(struct mem_ctl_info *mci)
b4e8f0b6
MCC
1631{
1632 struct i7core_pvt *pvt = mci->pvt_info;
1633 u32 rcv[3][2];
1634 int i, new0, new1, new2;
1635
1636 /*Read DEV 3: FUN 2: MC_COR_ECC_CNT regs directly*/
f4742949 1637 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_0,
b4e8f0b6 1638 &rcv[0][0]);
f4742949 1639 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_1,
b4e8f0b6 1640 &rcv[0][1]);
f4742949 1641 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_2,
b4e8f0b6 1642 &rcv[1][0]);
f4742949 1643 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_3,
b4e8f0b6 1644 &rcv[1][1]);
f4742949 1645 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_4,
b4e8f0b6 1646 &rcv[2][0]);
f4742949 1647 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_5,
b4e8f0b6
MCC
1648 &rcv[2][1]);
1649 for (i = 0 ; i < 3; i++) {
1650 debugf3("MC_COR_ECC_CNT%d = 0x%x; MC_COR_ECC_CNT%d = 0x%x\n",
1651 (i * 2), rcv[i][0], (i * 2) + 1, rcv[i][1]);
1652 /*if the channel has 3 dimms*/
f4742949 1653 if (pvt->channel[i].dimms > 2) {
b4e8f0b6
MCC
1654 new0 = DIMM_BOT_COR_ERR(rcv[i][0]);
1655 new1 = DIMM_TOP_COR_ERR(rcv[i][0]);
1656 new2 = DIMM_BOT_COR_ERR(rcv[i][1]);
1657 } else {
1658 new0 = DIMM_TOP_COR_ERR(rcv[i][0]) +
1659 DIMM_BOT_COR_ERR(rcv[i][0]);
1660 new1 = DIMM_TOP_COR_ERR(rcv[i][1]) +
1661 DIMM_BOT_COR_ERR(rcv[i][1]);
1662 new2 = 0;
1663 }
1664
f4742949 1665 i7core_rdimm_update_ce_count(mci, i, new0, new1, new2);
b4e8f0b6
MCC
1666 }
1667}
442305b1
MCC
1668
1669/* This function is based on the device 3 function 4 registers as described on:
1670 * Intel Xeon Processor 5500 Series Datasheet Volume 2
1671 * http://www.intel.com/Assets/PDF/datasheet/321322.pdf
1672 * also available at:
1673 * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
1674 */
f4742949 1675static void i7core_udimm_check_mc_ecc_err(struct mem_ctl_info *mci)
442305b1
MCC
1676{
1677 struct i7core_pvt *pvt = mci->pvt_info;
1678 u32 rcv1, rcv0;
1679 int new0, new1, new2;
1680
f4742949 1681 if (!pvt->pci_mcr[4]) {
b990538a 1682 debugf0("%s MCR registers not found\n", __func__);
442305b1
MCC
1683 return;
1684 }
1685
b4e8f0b6 1686 /* Corrected test errors */
f4742949
MCC
1687 pci_read_config_dword(pvt->pci_mcr[4], MC_TEST_ERR_RCV1, &rcv1);
1688 pci_read_config_dword(pvt->pci_mcr[4], MC_TEST_ERR_RCV0, &rcv0);
442305b1
MCC
1689
1690 /* Store the new values */
1691 new2 = DIMM2_COR_ERR(rcv1);
1692 new1 = DIMM1_COR_ERR(rcv0);
1693 new0 = DIMM0_COR_ERR(rcv0);
1694
442305b1 1695 /* Updates CE counters if it is not the first time here */
f4742949 1696 if (pvt->ce_count_available) {
442305b1
MCC
1697 /* Updates CE counters */
1698 int add0, add1, add2;
1699
f4742949
MCC
1700 add2 = new2 - pvt->udimm_last_ce_count[2];
1701 add1 = new1 - pvt->udimm_last_ce_count[1];
1702 add0 = new0 - pvt->udimm_last_ce_count[0];
442305b1
MCC
1703
1704 if (add2 < 0)
1705 add2 += 0x7fff;
f4742949 1706 pvt->udimm_ce_count[2] += add2;
442305b1
MCC
1707
1708 if (add1 < 0)
1709 add1 += 0x7fff;
f4742949 1710 pvt->udimm_ce_count[1] += add1;
442305b1
MCC
1711
1712 if (add0 < 0)
1713 add0 += 0x7fff;
f4742949 1714 pvt->udimm_ce_count[0] += add0;
b4e8f0b6
MCC
1715
1716 if (add0 | add1 | add2)
1717 i7core_printk(KERN_ERR, "New Corrected error(s): "
1718 "dimm0: +%d, dimm1: +%d, dimm2 +%d\n",
1719 add0, add1, add2);
442305b1 1720 } else
f4742949 1721 pvt->ce_count_available = 1;
442305b1
MCC
1722
1723 /* Store the new values */
f4742949
MCC
1724 pvt->udimm_last_ce_count[2] = new2;
1725 pvt->udimm_last_ce_count[1] = new1;
1726 pvt->udimm_last_ce_count[0] = new0;
442305b1
MCC
1727}
1728
8a2f118e
MCC
1729/*
1730 * According with tables E-11 and E-12 of chapter E.3.3 of Intel 64 and IA-32
1731 * Architectures Software Developer’s Manual Volume 3B.
f237fcf2
MCC
1732 * Nehalem are defined as family 0x06, model 0x1a
1733 *
1734 * The MCA registers used here are the following ones:
8a2f118e 1735 * struct mce field MCA Register
f237fcf2
MCC
1736 * m->status MSR_IA32_MC8_STATUS
1737 * m->addr MSR_IA32_MC8_ADDR
1738 * m->misc MSR_IA32_MC8_MISC
8a2f118e
MCC
1739 * In the case of Nehalem, the error information is masked at .status and .misc
1740 * fields
1741 */
d5381642 1742static void i7core_mce_output_error(struct mem_ctl_info *mci,
1288c18f 1743 const struct mce *m)
d5381642 1744{
b4e8f0b6 1745 struct i7core_pvt *pvt = mci->pvt_info;
a639539f 1746 char *type, *optype, *err, *msg;
8a2f118e 1747 unsigned long error = m->status & 0x1ff0000l;
a639539f 1748 u32 optypenum = (m->status >> 4) & 0x07;
8cf2d239 1749 u32 core_err_cnt = (m->status >> 38) & 0x7fff;
8a2f118e
MCC
1750 u32 dimm = (m->misc >> 16) & 0x3;
1751 u32 channel = (m->misc >> 18) & 0x3;
1752 u32 syndrome = m->misc >> 32;
1753 u32 errnum = find_first_bit(&error, 32);
b4e8f0b6 1754 int csrow;
8a2f118e 1755
c5d34528
MCC
1756 if (m->mcgstatus & 1)
1757 type = "FATAL";
1758 else
1759 type = "NON_FATAL";
1760
a639539f 1761 switch (optypenum) {
b990538a
MCC
1762 case 0:
1763 optype = "generic undef request";
1764 break;
1765 case 1:
1766 optype = "read error";
1767 break;
1768 case 2:
1769 optype = "write error";
1770 break;
1771 case 3:
1772 optype = "addr/cmd error";
1773 break;
1774 case 4:
1775 optype = "scrubbing error";
1776 break;
1777 default:
1778 optype = "reserved";
1779 break;
a639539f
MCC
1780 }
1781
8a2f118e
MCC
1782 switch (errnum) {
1783 case 16:
1784 err = "read ECC error";
1785 break;
1786 case 17:
1787 err = "RAS ECC error";
1788 break;
1789 case 18:
1790 err = "write parity error";
1791 break;
1792 case 19:
1793 err = "redundacy loss";
1794 break;
1795 case 20:
1796 err = "reserved";
1797 break;
1798 case 21:
1799 err = "memory range error";
1800 break;
1801 case 22:
1802 err = "RTID out of range";
1803 break;
1804 case 23:
1805 err = "address parity error";
1806 break;
1807 case 24:
1808 err = "byte enable parity error";
1809 break;
1810 default:
1811 err = "unknown";
d5381642 1812 }
d5381642 1813
f237fcf2 1814 /* FIXME: should convert addr into bank and rank information */
8a2f118e 1815 msg = kasprintf(GFP_ATOMIC,
f4742949 1816 "%s (addr = 0x%08llx, cpu=%d, Dimm=%d, Channel=%d, "
a639539f 1817 "syndrome=0x%08x, count=%d, Err=%08llx:%08llx (%s: %s))\n",
f4742949 1818 type, (long long) m->addr, m->cpu, dimm, channel,
a639539f
MCC
1819 syndrome, core_err_cnt, (long long)m->status,
1820 (long long)m->misc, optype, err);
8a2f118e
MCC
1821
1822 debugf0("%s", msg);
d5381642 1823
f4742949 1824 csrow = pvt->csrow_map[channel][dimm];
b4e8f0b6 1825
d5381642 1826 /* Call the helper to output message */
b4e8f0b6
MCC
1827 if (m->mcgstatus & 1)
1828 edac_mc_handle_fbd_ue(mci, csrow, 0,
1829 0 /* FIXME: should be channel here */, msg);
f4742949 1830 else if (!pvt->is_registered)
b4e8f0b6
MCC
1831 edac_mc_handle_fbd_ce(mci, csrow,
1832 0 /* FIXME: should be channel here */, msg);
8a2f118e
MCC
1833
1834 kfree(msg);
d5381642
MCC
1835}
1836
87d1d272
MCC
1837/*
1838 * i7core_check_error Retrieve and process errors reported by the
1839 * hardware. Called by the Core module.
1840 */
1841static void i7core_check_error(struct mem_ctl_info *mci)
1842{
d5381642
MCC
1843 struct i7core_pvt *pvt = mci->pvt_info;
1844 int i;
1845 unsigned count = 0;
ca9c90ba 1846 struct mce *m;
d5381642 1847
ca9c90ba
MCC
1848 /*
1849 * MCE first step: Copy all mce errors into a temporary buffer
1850 * We use a double buffering here, to reduce the risk of
25985edc 1851 * losing an error.
ca9c90ba
MCC
1852 */
1853 smp_rmb();
321ece4d
MCC
1854 count = (pvt->mce_out + MCE_LOG_LEN - pvt->mce_in)
1855 % MCE_LOG_LEN;
ca9c90ba 1856 if (!count)
8a311e17 1857 goto check_ce_error;
f4742949 1858
ca9c90ba 1859 m = pvt->mce_outentry;
321ece4d
MCC
1860 if (pvt->mce_in + count > MCE_LOG_LEN) {
1861 unsigned l = MCE_LOG_LEN - pvt->mce_in;
f4742949 1862
ca9c90ba
MCC
1863 memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * l);
1864 smp_wmb();
1865 pvt->mce_in = 0;
1866 count -= l;
1867 m += l;
1868 }
1869 memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * count);
1870 smp_wmb();
1871 pvt->mce_in += count;
1872
1873 smp_rmb();
1874 if (pvt->mce_overrun) {
1875 i7core_printk(KERN_ERR, "Lost %d memory errors\n",
1876 pvt->mce_overrun);
1877 smp_wmb();
1878 pvt->mce_overrun = 0;
1879 }
d5381642 1880
ca9c90ba
MCC
1881 /*
1882 * MCE second step: parse errors and display
1883 */
d5381642 1884 for (i = 0; i < count; i++)
ca9c90ba 1885 i7core_mce_output_error(mci, &pvt->mce_outentry[i]);
d5381642 1886
ca9c90ba
MCC
1887 /*
1888 * Now, let's increment CE error counts
1889 */
8a311e17 1890check_ce_error:
f4742949
MCC
1891 if (!pvt->is_registered)
1892 i7core_udimm_check_mc_ecc_err(mci);
1893 else
1894 i7core_rdimm_check_mc_ecc_err(mci);
87d1d272
MCC
1895}
1896
d5381642
MCC
1897/*
1898 * i7core_mce_check_error Replicates mcelog routine to get errors
1899 * This routine simply queues mcelog errors, and
1900 * return. The error itself should be handled later
1901 * by i7core_check_error.
6e103be1
MCC
1902 * WARNING: As this routine should be called at NMI time, extra care should
1903 * be taken to avoid deadlocks, and to be as fast as possible.
d5381642 1904 */
4140c542
BP
1905static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val,
1906 void *data)
d5381642 1907{
4140c542
BP
1908 struct mce *mce = (struct mce *)data;
1909 struct i7core_dev *i7_dev;
1910 struct mem_ctl_info *mci;
1911 struct i7core_pvt *pvt;
1912
1913 i7_dev = get_i7core_dev(mce->socketid);
1914 if (!i7_dev)
1915 return NOTIFY_BAD;
1916
1917 mci = i7_dev->mci;
1918 pvt = mci->pvt_info;
d5381642 1919
8a2f118e
MCC
1920 /*
1921 * Just let mcelog handle it if the error is
1922 * outside the memory controller
1923 */
1924 if (((mce->status & 0xffff) >> 7) != 1)
4140c542 1925 return NOTIFY_DONE;
8a2f118e 1926
f237fcf2
MCC
1927 /* Bank 8 registers are the only ones that we know how to handle */
1928 if (mce->bank != 8)
4140c542 1929 return NOTIFY_DONE;
f237fcf2 1930
3b918c12 1931#ifdef CONFIG_SMP
f4742949 1932 /* Only handle if it is the right mc controller */
5034086b 1933 if (mce->socketid != pvt->i7core_dev->socket)
4140c542 1934 return NOTIFY_DONE;
3b918c12 1935#endif
f4742949 1936
ca9c90ba 1937 smp_rmb();
321ece4d 1938 if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) {
ca9c90ba
MCC
1939 smp_wmb();
1940 pvt->mce_overrun++;
4140c542 1941 return NOTIFY_DONE;
d5381642 1942 }
6e103be1
MCC
1943
1944 /* Copy memory error at the ringbuffer */
1945 memcpy(&pvt->mce_entry[pvt->mce_out], mce, sizeof(*mce));
ca9c90ba 1946 smp_wmb();
321ece4d 1947 pvt->mce_out = (pvt->mce_out + 1) % MCE_LOG_LEN;
d5381642 1948
c5d34528
MCC
1949 /* Handle fatal errors immediately */
1950 if (mce->mcgstatus & 1)
1951 i7core_check_error(mci);
1952
e7bf068a 1953 /* Advise mcelog that the errors were handled */
4140c542 1954 return NOTIFY_STOP;
d5381642
MCC
1955}
1956
4140c542
BP
1957static struct notifier_block i7_mce_dec = {
1958 .notifier_call = i7core_mce_check_error,
1959};
1960
535e9c78
NC
1961struct memdev_dmi_entry {
1962 u8 type;
1963 u8 length;
1964 u16 handle;
1965 u16 phys_mem_array_handle;
1966 u16 mem_err_info_handle;
1967 u16 total_width;
1968 u16 data_width;
1969 u16 size;
1970 u8 form;
1971 u8 device_set;
1972 u8 device_locator;
1973 u8 bank_locator;
1974 u8 memory_type;
1975 u16 type_detail;
1976 u16 speed;
1977 u8 manufacturer;
1978 u8 serial_number;
1979 u8 asset_tag;
1980 u8 part_number;
1981 u8 attributes;
1982 u32 extended_size;
1983 u16 conf_mem_clk_speed;
1984} __attribute__((__packed__));
1985
1986
1987/*
1988 * Decode the DRAM Clock Frequency, be paranoid, make sure that all
1989 * memory devices show the same speed, and if they don't then consider
1990 * all speeds to be invalid.
1991 */
1992static void decode_dclk(const struct dmi_header *dh, void *_dclk_freq)
1993{
1994 int *dclk_freq = _dclk_freq;
1995 u16 dmi_mem_clk_speed;
1996
1997 if (*dclk_freq == -1)
1998 return;
1999
2000 if (dh->type == DMI_ENTRY_MEM_DEVICE) {
2001 struct memdev_dmi_entry *memdev_dmi_entry =
2002 (struct memdev_dmi_entry *)dh;
2003 unsigned long conf_mem_clk_speed_offset =
2004 (unsigned long)&memdev_dmi_entry->conf_mem_clk_speed -
2005 (unsigned long)&memdev_dmi_entry->type;
2006 unsigned long speed_offset =
2007 (unsigned long)&memdev_dmi_entry->speed -
2008 (unsigned long)&memdev_dmi_entry->type;
2009
2010 /* Check that a DIMM is present */
2011 if (memdev_dmi_entry->size == 0)
2012 return;
2013
2014 /*
2015 * Pick the configured speed if it's available, otherwise
2016 * pick the DIMM speed, or we don't have a speed.
2017 */
2018 if (memdev_dmi_entry->length > conf_mem_clk_speed_offset) {
2019 dmi_mem_clk_speed =
2020 memdev_dmi_entry->conf_mem_clk_speed;
2021 } else if (memdev_dmi_entry->length > speed_offset) {
2022 dmi_mem_clk_speed = memdev_dmi_entry->speed;
2023 } else {
2024 *dclk_freq = -1;
2025 return;
2026 }
2027
2028 if (*dclk_freq == 0) {
2029 /* First pass, speed was 0 */
2030 if (dmi_mem_clk_speed > 0) {
2031 /* Set speed if a valid speed is read */
2032 *dclk_freq = dmi_mem_clk_speed;
2033 } else {
2034 /* Otherwise we don't have a valid speed */
2035 *dclk_freq = -1;
2036 }
2037 } else if (*dclk_freq > 0 &&
2038 *dclk_freq != dmi_mem_clk_speed) {
2039 /*
2040 * If we have a speed, check that all DIMMS are the same
2041 * speed, otherwise set the speed as invalid.
2042 */
2043 *dclk_freq = -1;
2044 }
2045 }
2046}
2047
2048/*
2049 * The default DCLK frequency is used as a fallback if we
2050 * fail to find anything reliable in the DMI. The value
2051 * is taken straight from the datasheet.
2052 */
2053#define DEFAULT_DCLK_FREQ 800
2054
2055static int get_dclk_freq(void)
2056{
2057 int dclk_freq = 0;
2058
2059 dmi_walk(decode_dclk, (void *)&dclk_freq);
2060
2061 if (dclk_freq < 1)
2062 return DEFAULT_DCLK_FREQ;
2063
2064 return dclk_freq;
2065}
2066
e8b6a127
SG
2067/*
2068 * set_sdram_scrub_rate This routine sets byte/sec bandwidth scrub rate
2069 * to hardware according to SCRUBINTERVAL formula
2070 * found in datasheet.
2071 */
2072static int set_sdram_scrub_rate(struct mem_ctl_info *mci, u32 new_bw)
2073{
2074 struct i7core_pvt *pvt = mci->pvt_info;
2075 struct pci_dev *pdev;
e8b6a127
SG
2076 u32 dw_scrub;
2077 u32 dw_ssr;
2078
2079 /* Get data from the MC register, function 2 */
2080 pdev = pvt->pci_mcr[2];
2081 if (!pdev)
2082 return -ENODEV;
2083
2084 pci_read_config_dword(pdev, MC_SCRUB_CONTROL, &dw_scrub);
2085
2086 if (new_bw == 0) {
2087 /* Prepare to disable petrol scrub */
2088 dw_scrub &= ~STARTSCRUB;
2089 /* Stop the patrol scrub engine */
535e9c78
NC
2090 write_and_test(pdev, MC_SCRUB_CONTROL,
2091 dw_scrub & ~SCRUBINTERVAL_MASK);
e8b6a127
SG
2092
2093 /* Get current status of scrub rate and set bit to disable */
2094 pci_read_config_dword(pdev, MC_SSRCONTROL, &dw_ssr);
2095 dw_ssr &= ~SSR_MODE_MASK;
2096 dw_ssr |= SSR_MODE_DISABLE;
2097 } else {
535e9c78
NC
2098 const int cache_line_size = 64;
2099 const u32 freq_dclk_mhz = pvt->dclk_freq;
2100 unsigned long long scrub_interval;
e8b6a127
SG
2101 /*
2102 * Translate the desired scrub rate to a register value and
535e9c78 2103 * program the corresponding register value.
e8b6a127 2104 */
535e9c78 2105 scrub_interval = (unsigned long long)freq_dclk_mhz *
4fad8098
SD
2106 cache_line_size * 1000000;
2107 do_div(scrub_interval, new_bw);
535e9c78
NC
2108
2109 if (!scrub_interval || scrub_interval > SCRUBINTERVAL_MASK)
2110 return -EINVAL;
2111
2112 dw_scrub = SCRUBINTERVAL_MASK & scrub_interval;
e8b6a127
SG
2113
2114 /* Start the patrol scrub engine */
2115 pci_write_config_dword(pdev, MC_SCRUB_CONTROL,
2116 STARTSCRUB | dw_scrub);
2117
2118 /* Get current status of scrub rate and set bit to enable */
2119 pci_read_config_dword(pdev, MC_SSRCONTROL, &dw_ssr);
2120 dw_ssr &= ~SSR_MODE_MASK;
2121 dw_ssr |= SSR_MODE_ENABLE;
2122 }
2123 /* Disable or enable scrubbing */
2124 pci_write_config_dword(pdev, MC_SSRCONTROL, dw_ssr);
2125
2126 return new_bw;
2127}
2128
2129/*
2130 * get_sdram_scrub_rate This routine convert current scrub rate value
2131 * into byte/sec bandwidth accourding to
2132 * SCRUBINTERVAL formula found in datasheet.
2133 */
2134static int get_sdram_scrub_rate(struct mem_ctl_info *mci)
2135{
2136 struct i7core_pvt *pvt = mci->pvt_info;
2137 struct pci_dev *pdev;
2138 const u32 cache_line_size = 64;
535e9c78
NC
2139 const u32 freq_dclk_mhz = pvt->dclk_freq;
2140 unsigned long long scrub_rate;
e8b6a127
SG
2141 u32 scrubval;
2142
2143 /* Get data from the MC register, function 2 */
2144 pdev = pvt->pci_mcr[2];
2145 if (!pdev)
2146 return -ENODEV;
2147
2148 /* Get current scrub control data */
2149 pci_read_config_dword(pdev, MC_SCRUB_CONTROL, &scrubval);
2150
2151 /* Mask highest 8-bits to 0 */
535e9c78 2152 scrubval &= SCRUBINTERVAL_MASK;
e8b6a127
SG
2153 if (!scrubval)
2154 return 0;
2155
2156 /* Calculate scrub rate value into byte/sec bandwidth */
535e9c78 2157 scrub_rate = (unsigned long long)freq_dclk_mhz *
4fad8098
SD
2158 1000000 * cache_line_size;
2159 do_div(scrub_rate, scrubval);
535e9c78 2160 return (int)scrub_rate;
e8b6a127
SG
2161}
2162
2163static void enable_sdram_scrub_setting(struct mem_ctl_info *mci)
2164{
2165 struct i7core_pvt *pvt = mci->pvt_info;
2166 u32 pci_lock;
2167
2168 /* Unlock writes to pci registers */
2169 pci_read_config_dword(pvt->pci_noncore, MC_CFG_CONTROL, &pci_lock);
2170 pci_lock &= ~0x3;
2171 pci_write_config_dword(pvt->pci_noncore, MC_CFG_CONTROL,
2172 pci_lock | MC_CFG_UNLOCK);
2173
2174 mci->set_sdram_scrub_rate = set_sdram_scrub_rate;
2175 mci->get_sdram_scrub_rate = get_sdram_scrub_rate;
2176}
2177
2178static void disable_sdram_scrub_setting(struct mem_ctl_info *mci)
2179{
2180 struct i7core_pvt *pvt = mci->pvt_info;
2181 u32 pci_lock;
2182
2183 /* Lock writes to pci registers */
2184 pci_read_config_dword(pvt->pci_noncore, MC_CFG_CONTROL, &pci_lock);
2185 pci_lock &= ~0x3;
2186 pci_write_config_dword(pvt->pci_noncore, MC_CFG_CONTROL,
2187 pci_lock | MC_CFG_LOCK);
2188}
2189
a3aa0a4a
HS
2190static void i7core_pci_ctl_create(struct i7core_pvt *pvt)
2191{
2192 pvt->i7core_pci = edac_pci_create_generic_ctl(
2193 &pvt->i7core_dev->pdev[0]->dev,
2194 EDAC_MOD_STR);
2195 if (unlikely(!pvt->i7core_pci))
f9902f24
MCC
2196 i7core_printk(KERN_WARNING,
2197 "Unable to setup PCI error report via EDAC\n");
a3aa0a4a
HS
2198}
2199
2200static void i7core_pci_ctl_release(struct i7core_pvt *pvt)
2201{
2202 if (likely(pvt->i7core_pci))
2203 edac_pci_release_generic_ctl(pvt->i7core_pci);
2204 else
2205 i7core_printk(KERN_ERR,
2206 "Couldn't find mem_ctl_info for socket %d\n",
2207 pvt->i7core_dev->socket);
2208 pvt->i7core_pci = NULL;
2209}
2210
1c6edbbe
HS
2211static void i7core_unregister_mci(struct i7core_dev *i7core_dev)
2212{
2213 struct mem_ctl_info *mci = i7core_dev->mci;
2214 struct i7core_pvt *pvt;
2215
2216 if (unlikely(!mci || !mci->pvt_info)) {
2217 debugf0("MC: " __FILE__ ": %s(): dev = %p\n",
2218 __func__, &i7core_dev->pdev[0]->dev);
2219
2220 i7core_printk(KERN_ERR, "Couldn't find mci handler\n");
2221 return;
2222 }
2223
2224 pvt = mci->pvt_info;
2225
2226 debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n",
2227 __func__, mci, &i7core_dev->pdev[0]->dev);
2228
e8b6a127 2229 /* Disable scrubrate setting */
27100db0
MCC
2230 if (pvt->enable_scrub)
2231 disable_sdram_scrub_setting(mci);
e8b6a127 2232
4140c542 2233 atomic_notifier_chain_unregister(&x86_mce_decoder_chain, &i7_mce_dec);
1c6edbbe
HS
2234
2235 /* Disable EDAC polling */
2236 i7core_pci_ctl_release(pvt);
2237
2238 /* Remove MC sysfs nodes */
2239 edac_mc_del_mc(mci->dev);
2240
2241 debugf1("%s: free mci struct\n", mci->ctl_name);
2242 kfree(mci->ctl_name);
2243 edac_mc_free(mci);
2244 i7core_dev->mci = NULL;
2245}
2246
aace4283 2247static int i7core_register_mci(struct i7core_dev *i7core_dev)
a0c36a1f
MCC
2248{
2249 struct mem_ctl_info *mci;
2250 struct i7core_pvt *pvt;
aace4283
HS
2251 int rc, channels, csrows;
2252
2253 /* Check the number of active and not disabled channels */
2254 rc = i7core_get_active_channels(i7core_dev->socket, &channels, &csrows);
2255 if (unlikely(rc < 0))
2256 return rc;
a0c36a1f 2257
a0c36a1f 2258 /* allocate a new MC control structure */
aace4283 2259 mci = edac_mc_alloc(sizeof(*pvt), csrows, channels, i7core_dev->socket);
f4742949
MCC
2260 if (unlikely(!mci))
2261 return -ENOMEM;
a0c36a1f 2262
3cfd0146
MCC
2263 debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n",
2264 __func__, mci, &i7core_dev->pdev[0]->dev);
a0c36a1f 2265
a0c36a1f 2266 pvt = mci->pvt_info;
ef708b53 2267 memset(pvt, 0, sizeof(*pvt));
67166af4 2268
6d37d240
MCC
2269 /* Associates i7core_dev and mci for future usage */
2270 pvt->i7core_dev = i7core_dev;
2271 i7core_dev->mci = mci;
2272
41fcb7fe
MCC
2273 /*
2274 * FIXME: how to handle RDDR3 at MCI level? It is possible to have
2275 * Mixed RDDR3/UDDR3 with Nehalem, provided that they are on different
2276 * memory channels
2277 */
2278 mci->mtype_cap = MEM_FLAG_DDR3;
a0c36a1f
MCC
2279 mci->edac_ctl_cap = EDAC_FLAG_NONE;
2280 mci->edac_cap = EDAC_FLAG_NONE;
2281 mci->mod_name = "i7core_edac.c";
2282 mci->mod_ver = I7CORE_REVISION;
f4742949
MCC
2283 mci->ctl_name = kasprintf(GFP_KERNEL, "i7 core #%d",
2284 i7core_dev->socket);
2285 mci->dev_name = pci_name(i7core_dev->pdev[0]);
a0c36a1f 2286 mci->ctl_page_to_phys = NULL;
1288c18f 2287
ef708b53 2288 /* Store pci devices at mci for faster access */
f4742949 2289 rc = mci_bind_devs(mci, i7core_dev);
41fcb7fe 2290 if (unlikely(rc < 0))
628c5ddf 2291 goto fail0;
ef708b53 2292
5939813b
HS
2293 if (pvt->is_registered)
2294 mci->mc_driver_sysfs_attributes = i7core_sysfs_rdimm_attrs;
2295 else
2296 mci->mc_driver_sysfs_attributes = i7core_sysfs_udimm_attrs;
2297
ef708b53 2298 /* Get dimm basic config */
2e5185f7 2299 get_dimm_config(mci);
5939813b
HS
2300 /* record ptr to the generic device */
2301 mci->dev = &i7core_dev->pdev[0]->dev;
2302 /* Set the function pointer to an actual operation function */
2303 mci->edac_check = i7core_check_error;
ef708b53 2304
e8b6a127 2305 /* Enable scrubrate setting */
27100db0
MCC
2306 if (pvt->enable_scrub)
2307 enable_sdram_scrub_setting(mci);
e8b6a127 2308
a0c36a1f 2309 /* add this new MC control structure to EDAC's list of MCs */
b7c76151 2310 if (unlikely(edac_mc_add_mc(mci))) {
a0c36a1f
MCC
2311 debugf0("MC: " __FILE__
2312 ": %s(): failed edac_mc_add_mc()\n", __func__);
2313 /* FIXME: perhaps some code should go here that disables error
2314 * reporting if we just enabled it
2315 */
b7c76151
MCC
2316
2317 rc = -EINVAL;
628c5ddf 2318 goto fail0;
a0c36a1f
MCC
2319 }
2320
194a40fe 2321 /* Default error mask is any memory */
ef708b53 2322 pvt->inject.channel = 0;
194a40fe
MCC
2323 pvt->inject.dimm = -1;
2324 pvt->inject.rank = -1;
2325 pvt->inject.bank = -1;
2326 pvt->inject.page = -1;
2327 pvt->inject.col = -1;
2328
a3aa0a4a
HS
2329 /* allocating generic PCI control info */
2330 i7core_pci_ctl_create(pvt);
2331
535e9c78
NC
2332 /* DCLK for scrub rate setting */
2333 pvt->dclk_freq = get_dclk_freq();
2334
4140c542 2335 atomic_notifier_chain_register(&x86_mce_decoder_chain, &i7_mce_dec);
f4742949 2336
628c5ddf
HS
2337 return 0;
2338
628c5ddf
HS
2339fail0:
2340 kfree(mci->ctl_name);
2341 edac_mc_free(mci);
1c6edbbe 2342 i7core_dev->mci = NULL;
f4742949
MCC
2343 return rc;
2344}
2345
2346/*
2347 * i7core_probe Probe for ONE instance of device to see if it is
2348 * present.
2349 * return:
2350 * 0 for FOUND a device
2351 * < 0 for error code
2352 */
2d95d815 2353
f4742949
MCC
2354static int __devinit i7core_probe(struct pci_dev *pdev,
2355 const struct pci_device_id *id)
2356{
40557591 2357 int rc, count = 0;
f4742949
MCC
2358 struct i7core_dev *i7core_dev;
2359
2d95d815
MCC
2360 /* get the pci devices we want to reserve for our use */
2361 mutex_lock(&i7core_edac_lock);
2362
f4742949 2363 /*
d4c27795 2364 * All memory controllers are allocated at the first pass.
f4742949 2365 */
2d95d815
MCC
2366 if (unlikely(probed >= 1)) {
2367 mutex_unlock(&i7core_edac_lock);
76a7bd81 2368 return -ENODEV;
2d95d815
MCC
2369 }
2370 probed++;
de06eeef 2371
64c10f6e 2372 rc = i7core_get_all_devices();
f4742949
MCC
2373 if (unlikely(rc < 0))
2374 goto fail0;
2375
2376 list_for_each_entry(i7core_dev, &i7core_edac_list, list) {
40557591 2377 count++;
aace4283 2378 rc = i7core_register_mci(i7core_dev);
d4c27795
MCC
2379 if (unlikely(rc < 0))
2380 goto fail1;
d5381642
MCC
2381 }
2382
40557591
MCC
2383 /*
2384 * Nehalem-EX uses a different memory controller. However, as the
2385 * memory controller is not visible on some Nehalem/Nehalem-EP, we
2386 * need to indirectly probe via a X58 PCI device. The same devices
2387 * are found on (some) Nehalem-EX. So, on those machines, the
2388 * probe routine needs to return -ENODEV, as the actual Memory
2389 * Controller registers won't be detected.
2390 */
2391 if (!count) {
2392 rc = -ENODEV;
2393 goto fail1;
2394 }
2395
2396 i7core_printk(KERN_INFO,
2397 "Driver loaded, %d memory controller(s) found.\n",
2398 count);
8f331907 2399
66607706 2400 mutex_unlock(&i7core_edac_lock);
a0c36a1f
MCC
2401 return 0;
2402
66607706 2403fail1:
88ef5ea9
MCC
2404 list_for_each_entry(i7core_dev, &i7core_edac_list, list)
2405 i7core_unregister_mci(i7core_dev);
2406
13d6e9b6 2407 i7core_put_all_devices();
66607706
MCC
2408fail0:
2409 mutex_unlock(&i7core_edac_lock);
b7c76151 2410 return rc;
a0c36a1f
MCC
2411}
2412
2413/*
2414 * i7core_remove destructor for one instance of device
2415 *
2416 */
2417static void __devexit i7core_remove(struct pci_dev *pdev)
2418{
64c10f6e 2419 struct i7core_dev *i7core_dev;
a0c36a1f
MCC
2420
2421 debugf0(__FILE__ ": %s()\n", __func__);
2422
22e6bcbd
MCC
2423 /*
2424 * we have a trouble here: pdev value for removal will be wrong, since
2425 * it will point to the X58 register used to detect that the machine
2426 * is a Nehalem or upper design. However, due to the way several PCI
2427 * devices are grouped together to provide MC functionality, we need
2428 * to use a different method for releasing the devices
2429 */
87d1d272 2430
66607706 2431 mutex_lock(&i7core_edac_lock);
71fe0170
HS
2432
2433 if (unlikely(!probed)) {
2434 mutex_unlock(&i7core_edac_lock);
2435 return;
2436 }
2437
88ef5ea9
MCC
2438 list_for_each_entry(i7core_dev, &i7core_edac_list, list)
2439 i7core_unregister_mci(i7core_dev);
64c10f6e
HS
2440
2441 /* Release PCI resources */
2442 i7core_put_all_devices();
2443
2d95d815
MCC
2444 probed--;
2445
66607706 2446 mutex_unlock(&i7core_edac_lock);
a0c36a1f
MCC
2447}
2448
a0c36a1f
MCC
2449MODULE_DEVICE_TABLE(pci, i7core_pci_tbl);
2450
2451/*
2452 * i7core_driver pci_driver structure for this module
2453 *
2454 */
2455static struct pci_driver i7core_driver = {
2456 .name = "i7core_edac",
2457 .probe = i7core_probe,
2458 .remove = __devexit_p(i7core_remove),
2459 .id_table = i7core_pci_tbl,
2460};
2461
2462/*
2463 * i7core_init Module entry function
2464 * Try to initialize this module for its devices
2465 */
2466static int __init i7core_init(void)
2467{
2468 int pci_rc;
2469
2470 debugf2("MC: " __FILE__ ": %s()\n", __func__);
2471
2472 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
2473 opstate_init();
2474
54a08ab1
MCC
2475 if (use_pci_fixup)
2476 i7core_xeon_pci_fixup(pci_dev_table);
bc2d7245 2477
a0c36a1f
MCC
2478 pci_rc = pci_register_driver(&i7core_driver);
2479
3ef288a9
MCC
2480 if (pci_rc >= 0)
2481 return 0;
2482
2483 i7core_printk(KERN_ERR, "Failed to register device with error %d.\n",
2484 pci_rc);
2485
2486 return pci_rc;
a0c36a1f
MCC
2487}
2488
2489/*
2490 * i7core_exit() Module exit function
2491 * Unregister the driver
2492 */
2493static void __exit i7core_exit(void)
2494{
2495 debugf2("MC: " __FILE__ ": %s()\n", __func__);
2496 pci_unregister_driver(&i7core_driver);
2497}
2498
2499module_init(i7core_init);
2500module_exit(i7core_exit);
2501
2502MODULE_LICENSE("GPL");
2503MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
2504MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
2505MODULE_DESCRIPTION("MC Driver for Intel i7 Core memory controllers - "
2506 I7CORE_REVISION);
2507
2508module_param(edac_op_state, int, 0444);
2509MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");