edac: Use more normal debugging macro style
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / edac / edac_mc.c
CommitLineData
da9bb1d2
AC
1/*
2 * edac_mc kernel module
49c0dab7 3 * (C) 2005, 2006 Linux Networx (http://lnxi.com)
da9bb1d2
AC
4 * This file may be distributed under the terms of the
5 * GNU General Public License.
6 *
7 * Written by Thayne Harbaugh
8 * Based on work by Dan Hollis <goemon at anime dot net> and others.
9 * http://www.anime.net/~goemon/linux-ecc/
10 *
11 * Modified by Dave Peterson and Doug Thompson
12 *
13 */
14
da9bb1d2
AC
15#include <linux/module.h>
16#include <linux/proc_fs.h>
17#include <linux/kernel.h>
18#include <linux/types.h>
19#include <linux/smp.h>
20#include <linux/init.h>
21#include <linux/sysctl.h>
22#include <linux/highmem.h>
23#include <linux/timer.h>
24#include <linux/slab.h>
25#include <linux/jiffies.h>
26#include <linux/spinlock.h>
27#include <linux/list.h>
da9bb1d2 28#include <linux/ctype.h>
c0d12172 29#include <linux/edac.h>
53f2d028 30#include <linux/bitops.h>
da9bb1d2
AC
31#include <asm/uaccess.h>
32#include <asm/page.h>
33#include <asm/edac.h>
20bcb7a8 34#include "edac_core.h"
7c9281d7 35#include "edac_module.h"
da9bb1d2 36
53f2d028
MCC
37#define CREATE_TRACE_POINTS
38#define TRACE_INCLUDE_PATH ../../include/ras
39#include <ras/ras_event.h>
40
da9bb1d2 41/* lock to memory controller's control array */
63b7df91 42static DEFINE_MUTEX(mem_ctls_mutex);
ff6ac2a6 43static LIST_HEAD(mc_devices);
da9bb1d2 44
da9bb1d2
AC
45#ifdef CONFIG_EDAC_DEBUG
46
a4b4be3f 47static void edac_mc_dump_channel(struct rank_info *chan)
da9bb1d2
AC
48{
49 debugf4("\tchannel = %p\n", chan);
50 debugf4("\tchannel->chan_idx = %d\n", chan->chan_idx);
da9bb1d2 51 debugf4("\tchannel->csrow = %p\n\n", chan->csrow);
4275be63
MCC
52 debugf4("\tchannel->dimm = %p\n", chan->dimm);
53}
54
55static void edac_mc_dump_dimm(struct dimm_info *dimm)
56{
57 int i;
58
59 debugf4("\tdimm = %p\n", dimm);
60 debugf4("\tdimm->label = '%s'\n", dimm->label);
61 debugf4("\tdimm->nr_pages = 0x%x\n", dimm->nr_pages);
62 debugf4("\tdimm location ");
63 for (i = 0; i < dimm->mci->n_layers; i++) {
64 printk(KERN_CONT "%d", dimm->location[i]);
65 if (i < dimm->mci->n_layers - 1)
66 printk(KERN_CONT ".");
67 }
68 printk(KERN_CONT "\n");
69 debugf4("\tdimm->grain = %d\n", dimm->grain);
70 debugf4("\tdimm->nr_pages = 0x%x\n", dimm->nr_pages);
da9bb1d2
AC
71}
72
2da1c119 73static void edac_mc_dump_csrow(struct csrow_info *csrow)
da9bb1d2
AC
74{
75 debugf4("\tcsrow = %p\n", csrow);
76 debugf4("\tcsrow->csrow_idx = %d\n", csrow->csrow_idx);
079708b9 77 debugf4("\tcsrow->first_page = 0x%lx\n", csrow->first_page);
da9bb1d2
AC
78 debugf4("\tcsrow->last_page = 0x%lx\n", csrow->last_page);
79 debugf4("\tcsrow->page_mask = 0x%lx\n", csrow->page_mask);
079708b9 80 debugf4("\tcsrow->nr_channels = %d\n", csrow->nr_channels);
da9bb1d2
AC
81 debugf4("\tcsrow->channels = %p\n", csrow->channels);
82 debugf4("\tcsrow->mci = %p\n\n", csrow->mci);
83}
84
2da1c119 85static void edac_mc_dump_mci(struct mem_ctl_info *mci)
da9bb1d2
AC
86{
87 debugf3("\tmci = %p\n", mci);
88 debugf3("\tmci->mtype_cap = %lx\n", mci->mtype_cap);
89 debugf3("\tmci->edac_ctl_cap = %lx\n", mci->edac_ctl_cap);
90 debugf3("\tmci->edac_cap = %lx\n", mci->edac_cap);
91 debugf4("\tmci->edac_check = %p\n", mci->edac_check);
92 debugf3("\tmci->nr_csrows = %d, csrows = %p\n",
93 mci->nr_csrows, mci->csrows);
4275be63
MCC
94 debugf3("\tmci->nr_dimms = %d, dimms = %p\n",
95 mci->tot_dimms, mci->dimms);
fd687502 96 debugf3("\tdev = %p\n", mci->pdev);
079708b9 97 debugf3("\tmod_name:ctl_name = %s:%s\n", mci->mod_name, mci->ctl_name);
da9bb1d2
AC
98 debugf3("\tpvt_info = %p\n\n", mci->pvt_info);
99}
100
24f9a7fe
BP
101#endif /* CONFIG_EDAC_DEBUG */
102
239642fe
BP
103/*
104 * keep those in sync with the enum mem_type
105 */
106const char *edac_mem_types[] = {
107 "Empty csrow",
108 "Reserved csrow type",
109 "Unknown csrow type",
110 "Fast page mode RAM",
111 "Extended data out RAM",
112 "Burst Extended data out RAM",
113 "Single data rate SDRAM",
114 "Registered single data rate SDRAM",
115 "Double data rate SDRAM",
116 "Registered Double data rate SDRAM",
117 "Rambus DRAM",
118 "Unbuffered DDR2 RAM",
119 "Fully buffered DDR2",
120 "Registered DDR2 RAM",
121 "Rambus XDR",
122 "Unbuffered DDR3 RAM",
123 "Registered DDR3 RAM",
124};
125EXPORT_SYMBOL_GPL(edac_mem_types);
126
93e4fe64
MCC
127/**
128 * edac_align_ptr - Prepares the pointer offsets for a single-shot allocation
129 * @p: pointer to a pointer with the memory offset to be used. At
130 * return, this will be incremented to point to the next offset
131 * @size: Size of the data structure to be reserved
132 * @n_elems: Number of elements that should be reserved
da9bb1d2
AC
133 *
134 * If 'size' is a constant, the compiler will optimize this whole function
93e4fe64
MCC
135 * down to either a no-op or the addition of a constant to the value of '*p'.
136 *
137 * The 'p' pointer is absolutely needed to keep the proper advancing
138 * further in memory to the proper offsets when allocating the struct along
139 * with its embedded structs, as edac_device_alloc_ctl_info() does it
140 * above, for example.
141 *
142 * At return, the pointer 'p' will be incremented to be used on a next call
143 * to this function.
da9bb1d2 144 */
93e4fe64 145void *edac_align_ptr(void **p, unsigned size, int n_elems)
da9bb1d2
AC
146{
147 unsigned align, r;
93e4fe64 148 void *ptr = *p;
da9bb1d2 149
93e4fe64
MCC
150 *p += size * n_elems;
151
152 /*
153 * 'p' can possibly be an unaligned item X such that sizeof(X) is
154 * 'size'. Adjust 'p' so that its alignment is at least as
155 * stringent as what the compiler would provide for X and return
156 * the aligned result.
157 * Here we assume that the alignment of a "long long" is the most
da9bb1d2
AC
158 * stringent alignment that the compiler will ever provide by default.
159 * As far as I know, this is a reasonable assumption.
160 */
161 if (size > sizeof(long))
162 align = sizeof(long long);
163 else if (size > sizeof(int))
164 align = sizeof(long);
165 else if (size > sizeof(short))
166 align = sizeof(int);
167 else if (size > sizeof(char))
168 align = sizeof(short);
169 else
079708b9 170 return (char *)ptr;
da9bb1d2
AC
171
172 r = size % align;
173
174 if (r == 0)
079708b9 175 return (char *)ptr;
da9bb1d2 176
93e4fe64
MCC
177 *p += align - r;
178
7391c6dc 179 return (void *)(((unsigned long)ptr) + align - r);
da9bb1d2
AC
180}
181
da9bb1d2 182/**
4275be63
MCC
183 * edac_mc_alloc: Allocate and partially fill a struct mem_ctl_info structure
184 * @mc_num: Memory controller number
185 * @n_layers: Number of MC hierarchy layers
186 * layers: Describes each layer as seen by the Memory Controller
187 * @size_pvt: size of private storage needed
188 *
da9bb1d2
AC
189 *
190 * Everything is kmalloc'ed as one big chunk - more efficient.
191 * Only can be used if all structures have the same lifetime - otherwise
192 * you have to allocate and initialize your own structures.
193 *
194 * Use edac_mc_free() to free mc structures allocated by this function.
195 *
4275be63
MCC
196 * NOTE: drivers handle multi-rank memories in different ways: in some
197 * drivers, one multi-rank memory stick is mapped as one entry, while, in
198 * others, a single multi-rank memory stick would be mapped into several
199 * entries. Currently, this function will allocate multiple struct dimm_info
200 * on such scenarios, as grouping the multiple ranks require drivers change.
201 *
da9bb1d2 202 * Returns:
ca0907b9
MCC
203 * On failure: NULL
204 * On success: struct mem_ctl_info pointer
da9bb1d2 205 */
ca0907b9
MCC
206struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
207 unsigned n_layers,
208 struct edac_mc_layer *layers,
209 unsigned sz_pvt)
da9bb1d2
AC
210{
211 struct mem_ctl_info *mci;
4275be63 212 struct edac_mc_layer *layer;
de3910eb
MCC
213 struct csrow_info *csr;
214 struct rank_info *chan;
a7d7d2e1 215 struct dimm_info *dimm;
4275be63
MCC
216 u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS];
217 unsigned pos[EDAC_MAX_LAYERS];
4275be63
MCC
218 unsigned size, tot_dimms = 1, count = 1;
219 unsigned tot_csrows = 1, tot_channels = 1, tot_errcount = 0;
5926ff50 220 void *pvt, *p, *ptr = NULL;
de3910eb 221 int i, j, row, chn, n, len, off;
4275be63
MCC
222 bool per_rank = false;
223
224 BUG_ON(n_layers > EDAC_MAX_LAYERS || n_layers == 0);
225 /*
226 * Calculate the total amount of dimms and csrows/cschannels while
227 * in the old API emulation mode
228 */
229 for (i = 0; i < n_layers; i++) {
230 tot_dimms *= layers[i].size;
231 if (layers[i].is_virt_csrow)
232 tot_csrows *= layers[i].size;
233 else
234 tot_channels *= layers[i].size;
235
236 if (layers[i].type == EDAC_MC_LAYER_CHIP_SELECT)
237 per_rank = true;
238 }
da9bb1d2
AC
239
240 /* Figure out the offsets of the various items from the start of an mc
241 * structure. We want the alignment of each item to be at least as
242 * stringent as what the compiler would provide if we could simply
243 * hardcode everything into a single struct.
244 */
93e4fe64 245 mci = edac_align_ptr(&ptr, sizeof(*mci), 1);
4275be63 246 layer = edac_align_ptr(&ptr, sizeof(*layer), n_layers);
4275be63
MCC
247 for (i = 0; i < n_layers; i++) {
248 count *= layers[i].size;
dd23cd6e 249 debugf4("errcount layer %d size %d\n", i, count);
4275be63
MCC
250 ce_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count);
251 ue_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count);
252 tot_errcount += 2 * count;
253 }
254
dd23cd6e 255 debugf4("allocating %d error counters\n", tot_errcount);
93e4fe64 256 pvt = edac_align_ptr(&ptr, sz_pvt, 1);
079708b9 257 size = ((unsigned long)pvt) + sz_pvt;
da9bb1d2 258
dd23cd6e
MCC
259 debugf1("allocating %u bytes for mci data (%d %s, %d csrows/channels)\n",
260 size,
4275be63
MCC
261 tot_dimms,
262 per_rank ? "ranks" : "dimms",
263 tot_csrows * tot_channels);
de3910eb 264
8096cfaf
DT
265 mci = kzalloc(size, GFP_KERNEL);
266 if (mci == NULL)
da9bb1d2
AC
267 return NULL;
268
269 /* Adjust pointers so they point within the memory we just allocated
270 * rather than an imaginary chunk of memory located at address 0.
271 */
4275be63 272 layer = (struct edac_mc_layer *)(((char *)mci) + ((unsigned long)layer));
4275be63
MCC
273 for (i = 0; i < n_layers; i++) {
274 mci->ce_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ce_per_layer[i]));
275 mci->ue_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ue_per_layer[i]));
276 }
079708b9 277 pvt = sz_pvt ? (((char *)mci) + ((unsigned long)pvt)) : NULL;
da9bb1d2 278
b8f6f975 279 /* setup index and various internal pointers */
4275be63 280 mci->mc_idx = mc_num;
4275be63 281 mci->tot_dimms = tot_dimms;
da9bb1d2 282 mci->pvt_info = pvt;
4275be63
MCC
283 mci->n_layers = n_layers;
284 mci->layers = layer;
285 memcpy(mci->layers, layers, sizeof(*layer) * n_layers);
286 mci->nr_csrows = tot_csrows;
287 mci->num_cschannel = tot_channels;
288 mci->mem_is_per_rank = per_rank;
da9bb1d2 289
a7d7d2e1 290 /*
de3910eb 291 * Alocate and fill the csrow/channels structs
a7d7d2e1 292 */
de3910eb
MCC
293 mci->csrows = kcalloc(sizeof(*mci->csrows), tot_csrows, GFP_KERNEL);
294 if (!mci->csrows)
295 goto error;
4275be63 296 for (row = 0; row < tot_csrows; row++) {
de3910eb
MCC
297 csr = kzalloc(sizeof(**mci->csrows), GFP_KERNEL);
298 if (!csr)
299 goto error;
300 mci->csrows[row] = csr;
4275be63
MCC
301 csr->csrow_idx = row;
302 csr->mci = mci;
303 csr->nr_channels = tot_channels;
de3910eb
MCC
304 csr->channels = kcalloc(sizeof(*csr->channels), tot_channels,
305 GFP_KERNEL);
306 if (!csr->channels)
307 goto error;
4275be63
MCC
308
309 for (chn = 0; chn < tot_channels; chn++) {
de3910eb
MCC
310 chan = kzalloc(sizeof(**csr->channels), GFP_KERNEL);
311 if (!chan)
312 goto error;
313 csr->channels[chn] = chan;
da9bb1d2 314 chan->chan_idx = chn;
4275be63
MCC
315 chan->csrow = csr;
316 }
317 }
318
319 /*
de3910eb 320 * Allocate and fill the dimm structs
4275be63 321 */
de3910eb
MCC
322 mci->dimms = kcalloc(sizeof(*mci->dimms), tot_dimms, GFP_KERNEL);
323 if (!mci->dimms)
324 goto error;
325
4275be63
MCC
326 memset(&pos, 0, sizeof(pos));
327 row = 0;
328 chn = 0;
dd23cd6e 329 debugf4("initializing %d %s\n", tot_dimms,
4275be63
MCC
330 per_rank ? "ranks" : "dimms");
331 for (i = 0; i < tot_dimms; i++) {
de3910eb
MCC
332 chan = mci->csrows[row]->channels[chn];
333 off = EDAC_DIMM_OFF(layer, n_layers, pos[0], pos[1], pos[2]);
334 if (off < 0 || off >= tot_dimms) {
335 edac_mc_printk(mci, KERN_ERR, "EDAC core bug: EDAC_DIMM_OFF is trying to do an illegal data access\n");
336 goto error;
337 }
338
339 dimm = kzalloc(sizeof(**mci->dimms), GFP_KERNEL);
340 mci->dimms[off] = dimm;
4275be63
MCC
341 dimm->mci = mci;
342
dd23cd6e
MCC
343 debugf2("%d: %s%i (%d:%d:%d): row %d, chan %d\n", i,
344 per_rank ? "rank" : "dimm", off,
4275be63
MCC
345 pos[0], pos[1], pos[2], row, chn);
346
5926ff50
MCC
347 /*
348 * Copy DIMM location and initialize it.
349 */
350 len = sizeof(dimm->label);
351 p = dimm->label;
352 n = snprintf(p, len, "mc#%u", mc_num);
353 p += n;
354 len -= n;
355 for (j = 0; j < n_layers; j++) {
356 n = snprintf(p, len, "%s#%u",
357 edac_layer_name[layers[j].type],
358 pos[j]);
359 p += n;
360 len -= n;
4275be63
MCC
361 dimm->location[j] = pos[j];
362
5926ff50
MCC
363 if (len <= 0)
364 break;
365 }
366
4275be63
MCC
367 /* Link it to the csrows old API data */
368 chan->dimm = dimm;
369 dimm->csrow = row;
370 dimm->cschannel = chn;
371
372 /* Increment csrow location */
373 row++;
374 if (row == tot_csrows) {
375 row = 0;
376 chn++;
377 }
a7d7d2e1 378
4275be63
MCC
379 /* Increment dimm location */
380 for (j = n_layers - 1; j >= 0; j--) {
381 pos[j]++;
382 if (pos[j] < layers[j].size)
383 break;
384 pos[j] = 0;
da9bb1d2
AC
385 }
386 }
387
81d87cb1
DJ
388 mci->op_state = OP_ALLOC;
389
8096cfaf
DT
390 /* at this point, the root kobj is valid, and in order to
391 * 'free' the object, then the function:
392 * edac_mc_unregister_sysfs_main_kobj() must be called
393 * which will perform kobj unregistration and the actual free
394 * will occur during the kobject callback operation
395 */
53f2d028 396
da9bb1d2 397 return mci;
de3910eb
MCC
398
399error:
400 if (mci->dimms) {
401 for (i = 0; i < tot_dimms; i++)
402 kfree(mci->dimms[i]);
403 kfree(mci->dimms);
404 }
405 if (mci->csrows) {
406 for (chn = 0; chn < tot_channels; chn++) {
407 csr = mci->csrows[chn];
408 if (csr) {
409 for (chn = 0; chn < tot_channels; chn++)
410 kfree(csr->channels[chn]);
411 kfree(csr);
412 }
413 kfree(mci->csrows[i]);
414 }
415 kfree(mci->csrows);
416 }
417 kfree(mci);
418
419 return NULL;
4275be63 420}
9110540f 421EXPORT_SYMBOL_GPL(edac_mc_alloc);
da9bb1d2 422
da9bb1d2 423/**
8096cfaf
DT
424 * edac_mc_free
425 * 'Free' a previously allocated 'mci' structure
da9bb1d2 426 * @mci: pointer to a struct mem_ctl_info structure
da9bb1d2
AC
427 */
428void edac_mc_free(struct mem_ctl_info *mci)
429{
dd23cd6e 430 debugf1("\n");
bbc560ae 431
de3910eb 432 /* the mci instance is freed here, when the sysfs object is dropped */
7a623c03 433 edac_unregister_sysfs(mci);
da9bb1d2 434}
9110540f 435EXPORT_SYMBOL_GPL(edac_mc_free);
da9bb1d2 436
bce19683 437
939747bd 438/**
bce19683
DT
439 * find_mci_by_dev
440 *
441 * scan list of controllers looking for the one that manages
442 * the 'dev' device
939747bd 443 * @dev: pointer to a struct device related with the MCI
bce19683 444 */
939747bd 445struct mem_ctl_info *find_mci_by_dev(struct device *dev)
da9bb1d2
AC
446{
447 struct mem_ctl_info *mci;
448 struct list_head *item;
449
dd23cd6e 450 debugf3("\n");
da9bb1d2
AC
451
452 list_for_each(item, &mc_devices) {
453 mci = list_entry(item, struct mem_ctl_info, link);
454
fd687502 455 if (mci->pdev == dev)
da9bb1d2
AC
456 return mci;
457 }
458
459 return NULL;
460}
939747bd 461EXPORT_SYMBOL_GPL(find_mci_by_dev);
da9bb1d2 462
81d87cb1
DJ
463/*
464 * handler for EDAC to check if NMI type handler has asserted interrupt
465 */
466static int edac_mc_assert_error_check_and_clear(void)
467{
66ee2f94 468 int old_state;
81d87cb1 469
079708b9 470 if (edac_op_state == EDAC_OPSTATE_POLL)
81d87cb1
DJ
471 return 1;
472
66ee2f94
DJ
473 old_state = edac_err_assert;
474 edac_err_assert = 0;
81d87cb1 475
66ee2f94 476 return old_state;
81d87cb1
DJ
477}
478
479/*
480 * edac_mc_workq_function
481 * performs the operation scheduled by a workq request
482 */
81d87cb1
DJ
483static void edac_mc_workq_function(struct work_struct *work_req)
484{
fbeb4384 485 struct delayed_work *d_work = to_delayed_work(work_req);
81d87cb1 486 struct mem_ctl_info *mci = to_edac_mem_ctl_work(d_work);
81d87cb1
DJ
487
488 mutex_lock(&mem_ctls_mutex);
489
bf52fa4a
DT
490 /* if this control struct has movd to offline state, we are done */
491 if (mci->op_state == OP_OFFLINE) {
492 mutex_unlock(&mem_ctls_mutex);
493 return;
494 }
495
81d87cb1
DJ
496 /* Only poll controllers that are running polled and have a check */
497 if (edac_mc_assert_error_check_and_clear() && (mci->edac_check != NULL))
498 mci->edac_check(mci);
499
81d87cb1
DJ
500 mutex_unlock(&mem_ctls_mutex);
501
502 /* Reschedule */
4de78c68 503 queue_delayed_work(edac_workqueue, &mci->work,
052dfb45 504 msecs_to_jiffies(edac_mc_get_poll_msec()));
81d87cb1
DJ
505}
506
507/*
508 * edac_mc_workq_setup
509 * initialize a workq item for this mci
510 * passing in the new delay period in msec
bf52fa4a
DT
511 *
512 * locking model:
513 *
514 * called with the mem_ctls_mutex held
81d87cb1 515 */
bf52fa4a 516static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec)
81d87cb1 517{
dd23cd6e 518 debugf0("\n");
81d87cb1 519
bf52fa4a
DT
520 /* if this instance is not in the POLL state, then simply return */
521 if (mci->op_state != OP_RUNNING_POLL)
522 return;
523
81d87cb1 524 INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
81d87cb1
DJ
525 queue_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec));
526}
527
528/*
529 * edac_mc_workq_teardown
530 * stop the workq processing on this mci
bf52fa4a
DT
531 *
532 * locking model:
533 *
534 * called WITHOUT lock held
81d87cb1 535 */
bf52fa4a 536static void edac_mc_workq_teardown(struct mem_ctl_info *mci)
81d87cb1
DJ
537{
538 int status;
539
00740c58
BP
540 if (mci->op_state != OP_RUNNING_POLL)
541 return;
542
bce19683
DT
543 status = cancel_delayed_work(&mci->work);
544 if (status == 0) {
dd23cd6e 545 debugf0("not canceled, flush the queue\n");
bf52fa4a 546
bce19683
DT
547 /* workq instance might be running, wait for it */
548 flush_workqueue(edac_workqueue);
81d87cb1
DJ
549 }
550}
551
552/*
bce19683
DT
553 * edac_mc_reset_delay_period(unsigned long value)
554 *
555 * user space has updated our poll period value, need to
556 * reset our workq delays
81d87cb1 557 */
bce19683 558void edac_mc_reset_delay_period(int value)
81d87cb1 559{
bce19683
DT
560 struct mem_ctl_info *mci;
561 struct list_head *item;
562
563 mutex_lock(&mem_ctls_mutex);
564
565 /* scan the list and turn off all workq timers, doing so under lock
566 */
567 list_for_each(item, &mc_devices) {
568 mci = list_entry(item, struct mem_ctl_info, link);
569
570 if (mci->op_state == OP_RUNNING_POLL)
571 cancel_delayed_work(&mci->work);
572 }
573
574 mutex_unlock(&mem_ctls_mutex);
81d87cb1 575
bce19683
DT
576
577 /* re-walk the list, and reset the poll delay */
bf52fa4a
DT
578 mutex_lock(&mem_ctls_mutex);
579
bce19683
DT
580 list_for_each(item, &mc_devices) {
581 mci = list_entry(item, struct mem_ctl_info, link);
582
583 edac_mc_workq_setup(mci, (unsigned long) value);
584 }
81d87cb1
DJ
585
586 mutex_unlock(&mem_ctls_mutex);
587}
588
bce19683
DT
589
590
2d7bbb91
DT
591/* Return 0 on success, 1 on failure.
592 * Before calling this function, caller must
593 * assign a unique value to mci->mc_idx.
bf52fa4a
DT
594 *
595 * locking model:
596 *
597 * called with the mem_ctls_mutex lock held
2d7bbb91 598 */
079708b9 599static int add_mc_to_global_list(struct mem_ctl_info *mci)
da9bb1d2
AC
600{
601 struct list_head *item, *insert_before;
602 struct mem_ctl_info *p;
da9bb1d2 603
2d7bbb91 604 insert_before = &mc_devices;
da9bb1d2 605
fd687502 606 p = find_mci_by_dev(mci->pdev);
bf52fa4a 607 if (unlikely(p != NULL))
2d7bbb91 608 goto fail0;
da9bb1d2 609
2d7bbb91
DT
610 list_for_each(item, &mc_devices) {
611 p = list_entry(item, struct mem_ctl_info, link);
da9bb1d2 612
2d7bbb91
DT
613 if (p->mc_idx >= mci->mc_idx) {
614 if (unlikely(p->mc_idx == mci->mc_idx))
615 goto fail1;
da9bb1d2 616
2d7bbb91
DT
617 insert_before = item;
618 break;
da9bb1d2 619 }
da9bb1d2
AC
620 }
621
622 list_add_tail_rcu(&mci->link, insert_before);
c0d12172 623 atomic_inc(&edac_handlers);
da9bb1d2 624 return 0;
2d7bbb91 625
052dfb45 626fail0:
2d7bbb91 627 edac_printk(KERN_WARNING, EDAC_MC,
fd687502 628 "%s (%s) %s %s already assigned %d\n", dev_name(p->pdev),
17aa7e03 629 edac_dev_name(mci), p->mod_name, p->ctl_name, p->mc_idx);
2d7bbb91
DT
630 return 1;
631
052dfb45 632fail1:
2d7bbb91 633 edac_printk(KERN_WARNING, EDAC_MC,
052dfb45
DT
634 "bug in low-level driver: attempt to assign\n"
635 " duplicate mc_idx %d in %s()\n", p->mc_idx, __func__);
2d7bbb91 636 return 1;
da9bb1d2
AC
637}
638
e7ecd891 639static void del_mc_from_global_list(struct mem_ctl_info *mci)
a1d03fcc 640{
c0d12172 641 atomic_dec(&edac_handlers);
a1d03fcc 642 list_del_rcu(&mci->link);
e2e77098
LJ
643
644 /* these are for safe removal of devices from global list while
645 * NMI handlers may be traversing list
646 */
647 synchronize_rcu();
648 INIT_LIST_HEAD(&mci->link);
a1d03fcc
DP
649}
650
5da0831c
DT
651/**
652 * edac_mc_find: Search for a mem_ctl_info structure whose index is 'idx'.
653 *
654 * If found, return a pointer to the structure.
655 * Else return NULL.
656 *
657 * Caller must hold mem_ctls_mutex.
658 */
079708b9 659struct mem_ctl_info *edac_mc_find(int idx)
5da0831c
DT
660{
661 struct list_head *item;
662 struct mem_ctl_info *mci;
663
664 list_for_each(item, &mc_devices) {
665 mci = list_entry(item, struct mem_ctl_info, link);
666
667 if (mci->mc_idx >= idx) {
668 if (mci->mc_idx == idx)
669 return mci;
670
671 break;
672 }
673 }
674
675 return NULL;
676}
677EXPORT_SYMBOL(edac_mc_find);
678
da9bb1d2 679/**
472678eb
DP
680 * edac_mc_add_mc: Insert the 'mci' structure into the mci global list and
681 * create sysfs entries associated with mci structure
da9bb1d2
AC
682 * @mci: pointer to the mci structure to be added to the list
683 *
684 * Return:
685 * 0 Success
686 * !0 Failure
687 */
688
689/* FIXME - should a warning be printed if no error detection? correction? */
b8f6f975 690int edac_mc_add_mc(struct mem_ctl_info *mci)
da9bb1d2 691{
dd23cd6e 692 debugf0("\n");
b8f6f975 693
da9bb1d2
AC
694#ifdef CONFIG_EDAC_DEBUG
695 if (edac_debug_level >= 3)
696 edac_mc_dump_mci(mci);
e7ecd891 697
da9bb1d2
AC
698 if (edac_debug_level >= 4) {
699 int i;
700
701 for (i = 0; i < mci->nr_csrows; i++) {
702 int j;
e7ecd891 703
de3910eb
MCC
704 edac_mc_dump_csrow(mci->csrows[i]);
705 for (j = 0; j < mci->csrows[i]->nr_channels; j++)
706 edac_mc_dump_channel(mci->csrows[i]->channels[j]);
da9bb1d2 707 }
4275be63 708 for (i = 0; i < mci->tot_dimms; i++)
de3910eb 709 edac_mc_dump_dimm(mci->dimms[i]);
da9bb1d2
AC
710 }
711#endif
63b7df91 712 mutex_lock(&mem_ctls_mutex);
da9bb1d2
AC
713
714 if (add_mc_to_global_list(mci))
028a7b6d 715 goto fail0;
da9bb1d2
AC
716
717 /* set load time so that error rate can be tracked */
718 mci->start_time = jiffies;
719
9794f33d 720 if (edac_create_sysfs_mci_device(mci)) {
721 edac_mc_printk(mci, KERN_WARNING,
052dfb45 722 "failed to create sysfs device\n");
9794f33d 723 goto fail1;
724 }
da9bb1d2 725
81d87cb1
DJ
726 /* If there IS a check routine, then we are running POLLED */
727 if (mci->edac_check != NULL) {
728 /* This instance is NOW RUNNING */
729 mci->op_state = OP_RUNNING_POLL;
730
731 edac_mc_workq_setup(mci, edac_mc_get_poll_msec());
732 } else {
733 mci->op_state = OP_RUNNING_INTERRUPT;
734 }
735
da9bb1d2 736 /* Report action taken */
bf52fa4a 737 edac_mc_printk(mci, KERN_INFO, "Giving out device to '%s' '%s':"
17aa7e03 738 " DEV %s\n", mci->mod_name, mci->ctl_name, edac_dev_name(mci));
da9bb1d2 739
63b7df91 740 mutex_unlock(&mem_ctls_mutex);
028a7b6d 741 return 0;
da9bb1d2 742
052dfb45 743fail1:
028a7b6d
DP
744 del_mc_from_global_list(mci);
745
052dfb45 746fail0:
63b7df91 747 mutex_unlock(&mem_ctls_mutex);
028a7b6d 748 return 1;
da9bb1d2 749}
9110540f 750EXPORT_SYMBOL_GPL(edac_mc_add_mc);
da9bb1d2 751
da9bb1d2 752/**
472678eb
DP
753 * edac_mc_del_mc: Remove sysfs entries for specified mci structure and
754 * remove mci structure from global list
37f04581 755 * @pdev: Pointer to 'struct device' representing mci structure to remove.
da9bb1d2 756 *
18dbc337 757 * Return pointer to removed mci structure, or NULL if device not found.
da9bb1d2 758 */
079708b9 759struct mem_ctl_info *edac_mc_del_mc(struct device *dev)
da9bb1d2 760{
18dbc337 761 struct mem_ctl_info *mci;
da9bb1d2 762
dd23cd6e 763 debugf0("\n");
bf52fa4a 764
63b7df91 765 mutex_lock(&mem_ctls_mutex);
18dbc337 766
bf52fa4a
DT
767 /* find the requested mci struct in the global list */
768 mci = find_mci_by_dev(dev);
769 if (mci == NULL) {
63b7df91 770 mutex_unlock(&mem_ctls_mutex);
18dbc337
DP
771 return NULL;
772 }
773
da9bb1d2 774 del_mc_from_global_list(mci);
63b7df91 775 mutex_unlock(&mem_ctls_mutex);
bf52fa4a 776
bb31b312 777 /* flush workq processes */
bf52fa4a 778 edac_mc_workq_teardown(mci);
bb31b312
BP
779
780 /* marking MCI offline */
781 mci->op_state = OP_OFFLINE;
782
783 /* remove from sysfs */
bf52fa4a
DT
784 edac_remove_sysfs_mci_device(mci);
785
537fba28 786 edac_printk(KERN_INFO, EDAC_MC,
052dfb45 787 "Removed device %d for %s %s: DEV %s\n", mci->mc_idx,
17aa7e03 788 mci->mod_name, mci->ctl_name, edac_dev_name(mci));
bf52fa4a 789
18dbc337 790 return mci;
da9bb1d2 791}
9110540f 792EXPORT_SYMBOL_GPL(edac_mc_del_mc);
da9bb1d2 793
2da1c119
AB
794static void edac_mc_scrub_block(unsigned long page, unsigned long offset,
795 u32 size)
da9bb1d2
AC
796{
797 struct page *pg;
798 void *virt_addr;
799 unsigned long flags = 0;
800
dd23cd6e 801 debugf3("\n");
da9bb1d2
AC
802
803 /* ECC error page was not in our memory. Ignore it. */
079708b9 804 if (!pfn_valid(page))
da9bb1d2
AC
805 return;
806
807 /* Find the actual page structure then map it and fix */
808 pg = pfn_to_page(page);
809
810 if (PageHighMem(pg))
811 local_irq_save(flags);
812
4e5df7ca 813 virt_addr = kmap_atomic(pg);
da9bb1d2
AC
814
815 /* Perform architecture specific atomic scrub operation */
816 atomic_scrub(virt_addr + offset, size);
817
818 /* Unmap and complete */
4e5df7ca 819 kunmap_atomic(virt_addr);
da9bb1d2
AC
820
821 if (PageHighMem(pg))
822 local_irq_restore(flags);
823}
824
da9bb1d2 825/* FIXME - should return -1 */
e7ecd891 826int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page)
da9bb1d2 827{
de3910eb 828 struct csrow_info **csrows = mci->csrows;
a895bf8b 829 int row, i, j, n;
da9bb1d2 830
dd23cd6e 831 debugf1("MC%d: 0x%lx\n", mci->mc_idx, page);
da9bb1d2
AC
832 row = -1;
833
834 for (i = 0; i < mci->nr_csrows; i++) {
de3910eb 835 struct csrow_info *csrow = csrows[i];
a895bf8b
MCC
836 n = 0;
837 for (j = 0; j < csrow->nr_channels; j++) {
de3910eb 838 struct dimm_info *dimm = csrow->channels[j]->dimm;
a895bf8b
MCC
839 n += dimm->nr_pages;
840 }
841 if (n == 0)
da9bb1d2
AC
842 continue;
843
dd23cd6e
MCC
844 debugf3("MC%d: first(0x%lx) page(0x%lx) last(0x%lx) "
845 "mask(0x%lx)\n", mci->mc_idx,
537fba28
DP
846 csrow->first_page, page, csrow->last_page,
847 csrow->page_mask);
da9bb1d2
AC
848
849 if ((page >= csrow->first_page) &&
850 (page <= csrow->last_page) &&
851 ((page & csrow->page_mask) ==
852 (csrow->first_page & csrow->page_mask))) {
853 row = i;
854 break;
855 }
856 }
857
858 if (row == -1)
537fba28 859 edac_mc_printk(mci, KERN_ERR,
052dfb45
DT
860 "could not look up page error address %lx\n",
861 (unsigned long)page);
da9bb1d2
AC
862
863 return row;
864}
9110540f 865EXPORT_SYMBOL_GPL(edac_mc_find_csrow_by_page);
da9bb1d2 866
4275be63
MCC
867const char *edac_layer_name[] = {
868 [EDAC_MC_LAYER_BRANCH] = "branch",
869 [EDAC_MC_LAYER_CHANNEL] = "channel",
870 [EDAC_MC_LAYER_SLOT] = "slot",
871 [EDAC_MC_LAYER_CHIP_SELECT] = "csrow",
872};
873EXPORT_SYMBOL_GPL(edac_layer_name);
874
875static void edac_inc_ce_error(struct mem_ctl_info *mci,
876 bool enable_per_layer_report,
877 const int pos[EDAC_MAX_LAYERS])
da9bb1d2 878{
4275be63 879 int i, index = 0;
da9bb1d2 880
5926ff50 881 mci->ce_mc++;
da9bb1d2 882
4275be63
MCC
883 if (!enable_per_layer_report) {
884 mci->ce_noinfo_count++;
da9bb1d2
AC
885 return;
886 }
e7ecd891 887
4275be63
MCC
888 for (i = 0; i < mci->n_layers; i++) {
889 if (pos[i] < 0)
890 break;
891 index += pos[i];
892 mci->ce_per_layer[i][index]++;
893
894 if (i < mci->n_layers - 1)
895 index *= mci->layers[i + 1].size;
896 }
897}
898
899static void edac_inc_ue_error(struct mem_ctl_info *mci,
900 bool enable_per_layer_report,
901 const int pos[EDAC_MAX_LAYERS])
902{
903 int i, index = 0;
904
5926ff50 905 mci->ue_mc++;
4275be63
MCC
906
907 if (!enable_per_layer_report) {
908 mci->ce_noinfo_count++;
da9bb1d2
AC
909 return;
910 }
911
4275be63
MCC
912 for (i = 0; i < mci->n_layers; i++) {
913 if (pos[i] < 0)
914 break;
915 index += pos[i];
916 mci->ue_per_layer[i][index]++;
a7d7d2e1 917
4275be63
MCC
918 if (i < mci->n_layers - 1)
919 index *= mci->layers[i + 1].size;
920 }
921}
da9bb1d2 922
4275be63
MCC
923static void edac_ce_error(struct mem_ctl_info *mci,
924 const int pos[EDAC_MAX_LAYERS],
925 const char *msg,
926 const char *location,
927 const char *label,
928 const char *detail,
929 const char *other_detail,
930 const bool enable_per_layer_report,
931 const unsigned long page_frame_number,
932 const unsigned long offset_in_page,
53f2d028 933 long grain)
4275be63
MCC
934{
935 unsigned long remapped_page;
936
937 if (edac_mc_get_log_ce()) {
938 if (other_detail && *other_detail)
939 edac_mc_printk(mci, KERN_WARNING,
53f2d028 940 "CE %s on %s (%s %s - %s)\n",
4275be63
MCC
941 msg, label, location,
942 detail, other_detail);
943 else
944 edac_mc_printk(mci, KERN_WARNING,
53f2d028 945 "CE %s on %s (%s %s)\n",
4275be63
MCC
946 msg, label, location,
947 detail);
948 }
949 edac_inc_ce_error(mci, enable_per_layer_report, pos);
da9bb1d2
AC
950
951 if (mci->scrub_mode & SCRUB_SW_SRC) {
952 /*
4275be63
MCC
953 * Some memory controllers (called MCs below) can remap
954 * memory so that it is still available at a different
955 * address when PCI devices map into memory.
956 * MC's that can't do this, lose the memory where PCI
957 * devices are mapped. This mapping is MC-dependent
958 * and so we call back into the MC driver for it to
959 * map the MC page to a physical (CPU) page which can
960 * then be mapped to a virtual page - which can then
961 * be scrubbed.
962 */
da9bb1d2 963 remapped_page = mci->ctl_page_to_phys ?
052dfb45
DT
964 mci->ctl_page_to_phys(mci, page_frame_number) :
965 page_frame_number;
da9bb1d2 966
4275be63
MCC
967 edac_mc_scrub_block(remapped_page,
968 offset_in_page, grain);
da9bb1d2
AC
969 }
970}
971
4275be63
MCC
972static void edac_ue_error(struct mem_ctl_info *mci,
973 const int pos[EDAC_MAX_LAYERS],
974 const char *msg,
975 const char *location,
976 const char *label,
977 const char *detail,
978 const char *other_detail,
979 const bool enable_per_layer_report)
da9bb1d2 980{
4275be63
MCC
981 if (edac_mc_get_log_ue()) {
982 if (other_detail && *other_detail)
983 edac_mc_printk(mci, KERN_WARNING,
53f2d028 984 "UE %s on %s (%s %s - %s)\n",
4275be63
MCC
985 msg, label, location, detail,
986 other_detail);
987 else
988 edac_mc_printk(mci, KERN_WARNING,
53f2d028 989 "UE %s on %s (%s %s)\n",
4275be63
MCC
990 msg, label, location, detail);
991 }
e7ecd891 992
4275be63
MCC
993 if (edac_mc_get_panic_on_ue()) {
994 if (other_detail && *other_detail)
995 panic("UE %s on %s (%s%s - %s)\n",
996 msg, label, location, detail, other_detail);
997 else
998 panic("UE %s on %s (%s%s)\n",
999 msg, label, location, detail);
1000 }
1001
1002 edac_inc_ue_error(mci, enable_per_layer_report, pos);
da9bb1d2
AC
1003}
1004
4275be63 1005#define OTHER_LABEL " or "
53f2d028
MCC
1006
1007/**
1008 * edac_mc_handle_error - reports a memory event to userspace
1009 *
1010 * @type: severity of the error (CE/UE/Fatal)
1011 * @mci: a struct mem_ctl_info pointer
1012 * @page_frame_number: mem page where the error occurred
1013 * @offset_in_page: offset of the error inside the page
1014 * @syndrome: ECC syndrome
1015 * @top_layer: Memory layer[0] position
1016 * @mid_layer: Memory layer[1] position
1017 * @low_layer: Memory layer[2] position
1018 * @msg: Message meaningful to the end users that
1019 * explains the event
1020 * @other_detail: Technical details about the event that
1021 * may help hardware manufacturers and
1022 * EDAC developers to analyse the event
1023 * @arch_log: Architecture-specific struct that can
1024 * be used to add extended information to the
1025 * tracepoint, like dumping MCE registers.
1026 */
4275be63
MCC
1027void edac_mc_handle_error(const enum hw_event_mc_err_type type,
1028 struct mem_ctl_info *mci,
1029 const unsigned long page_frame_number,
1030 const unsigned long offset_in_page,
1031 const unsigned long syndrome,
53f2d028
MCC
1032 const int top_layer,
1033 const int mid_layer,
1034 const int low_layer,
4275be63
MCC
1035 const char *msg,
1036 const char *other_detail,
53f2d028 1037 const void *arch_log)
da9bb1d2 1038{
4275be63
MCC
1039 /* FIXME: too much for stack: move it to some pre-alocated area */
1040 char detail[80], location[80];
1041 char label[(EDAC_MC_LABEL_LEN + 1 + sizeof(OTHER_LABEL)) * mci->tot_dimms];
1042 char *p;
1043 int row = -1, chan = -1;
53f2d028 1044 int pos[EDAC_MAX_LAYERS] = { top_layer, mid_layer, low_layer };
4275be63 1045 int i;
53f2d028 1046 long grain;
4275be63 1047 bool enable_per_layer_report = false;
53f2d028
MCC
1048 u16 error_count; /* FIXME: make it a parameter */
1049 u8 grain_bits;
da9bb1d2 1050
dd23cd6e 1051 debugf3("MC%d\n", mci->mc_idx);
da9bb1d2 1052
4275be63
MCC
1053 /*
1054 * Check if the event report is consistent and if the memory
1055 * location is known. If it is known, enable_per_layer_report will be
1056 * true, the DIMM(s) label info will be filled and the per-layer
1057 * error counters will be incremented.
1058 */
1059 for (i = 0; i < mci->n_layers; i++) {
1060 if (pos[i] >= (int)mci->layers[i].size) {
1061 if (type == HW_EVENT_ERR_CORRECTED)
1062 p = "CE";
1063 else
1064 p = "UE";
1065
1066 edac_mc_printk(mci, KERN_ERR,
1067 "INTERNAL ERROR: %s value is out of range (%d >= %d)\n",
1068 edac_layer_name[mci->layers[i].type],
1069 pos[i], mci->layers[i].size);
1070 /*
1071 * Instead of just returning it, let's use what's
1072 * known about the error. The increment routines and
1073 * the DIMM filter logic will do the right thing by
1074 * pointing the likely damaged DIMMs.
1075 */
1076 pos[i] = -1;
1077 }
1078 if (pos[i] >= 0)
1079 enable_per_layer_report = true;
da9bb1d2
AC
1080 }
1081
4275be63
MCC
1082 /*
1083 * Get the dimm label/grain that applies to the match criteria.
1084 * As the error algorithm may not be able to point to just one memory
1085 * stick, the logic here will get all possible labels that could
1086 * pottentially be affected by the error.
1087 * On FB-DIMM memory controllers, for uncorrected errors, it is common
1088 * to have only the MC channel and the MC dimm (also called "branch")
1089 * but the channel is not known, as the memory is arranged in pairs,
1090 * where each memory belongs to a separate channel within the same
1091 * branch.
1092 */
1093 grain = 0;
1094 p = label;
1095 *p = '\0';
1096 for (i = 0; i < mci->tot_dimms; i++) {
de3910eb 1097 struct dimm_info *dimm = mci->dimms[i];
da9bb1d2 1098
53f2d028 1099 if (top_layer >= 0 && top_layer != dimm->location[0])
4275be63 1100 continue;
53f2d028 1101 if (mid_layer >= 0 && mid_layer != dimm->location[1])
4275be63 1102 continue;
53f2d028 1103 if (low_layer >= 0 && low_layer != dimm->location[2])
4275be63 1104 continue;
da9bb1d2 1105
4275be63
MCC
1106 /* get the max grain, over the error match range */
1107 if (dimm->grain > grain)
1108 grain = dimm->grain;
9794f33d 1109
4275be63
MCC
1110 /*
1111 * If the error is memory-controller wide, there's no need to
1112 * seek for the affected DIMMs because the whole
1113 * channel/memory controller/... may be affected.
1114 * Also, don't show errors for empty DIMM slots.
1115 */
1116 if (enable_per_layer_report && dimm->nr_pages) {
1117 if (p != label) {
1118 strcpy(p, OTHER_LABEL);
1119 p += strlen(OTHER_LABEL);
1120 }
1121 strcpy(p, dimm->label);
1122 p += strlen(p);
1123 *p = '\0';
1124
1125 /*
1126 * get csrow/channel of the DIMM, in order to allow
1127 * incrementing the compat API counters
1128 */
dd23cd6e 1129 debugf4("%s csrows map: (%d,%d)\n",
4275be63
MCC
1130 mci->mem_is_per_rank ? "rank" : "dimm",
1131 dimm->csrow, dimm->cschannel);
1132
1133 if (row == -1)
1134 row = dimm->csrow;
1135 else if (row >= 0 && row != dimm->csrow)
1136 row = -2;
1137
1138 if (chan == -1)
1139 chan = dimm->cschannel;
1140 else if (chan >= 0 && chan != dimm->cschannel)
1141 chan = -2;
1142 }
9794f33d 1143 }
1144
4275be63
MCC
1145 if (!enable_per_layer_report) {
1146 strcpy(label, "any memory");
1147 } else {
dd23cd6e
MCC
1148 debugf4("csrow/channel to increment: (%d,%d)\n",
1149 row, chan);
4275be63
MCC
1150 if (p == label)
1151 strcpy(label, "unknown memory");
1152 if (type == HW_EVENT_ERR_CORRECTED) {
1153 if (row >= 0) {
de3910eb 1154 mci->csrows[row]->ce_count++;
4275be63 1155 if (chan >= 0)
de3910eb 1156 mci->csrows[row]->channels[chan]->ce_count++;
4275be63
MCC
1157 }
1158 } else
1159 if (row >= 0)
de3910eb 1160 mci->csrows[row]->ue_count++;
9794f33d 1161 }
1162
4275be63
MCC
1163 /* Fill the RAM location data */
1164 p = location;
1165 for (i = 0; i < mci->n_layers; i++) {
1166 if (pos[i] < 0)
1167 continue;
9794f33d 1168
4275be63
MCC
1169 p += sprintf(p, "%s:%d ",
1170 edac_layer_name[mci->layers[i].type],
1171 pos[i]);
9794f33d 1172 }
53f2d028
MCC
1173 if (p > location)
1174 *(p - 1) = '\0';
1175
1176 /* Report the error via the trace interface */
1177
1178 error_count = 1; /* FIXME: allow change it */
1179 grain_bits = fls_long(grain) + 1;
1180 trace_mc_event(type, msg, label, error_count,
1181 mci->mc_idx, top_layer, mid_layer, low_layer,
1182 PAGES_TO_MiB(page_frame_number) | offset_in_page,
1183 grain_bits, syndrome, other_detail);
a7d7d2e1 1184
4275be63
MCC
1185 /* Memory type dependent details about the error */
1186 if (type == HW_EVENT_ERR_CORRECTED) {
1187 snprintf(detail, sizeof(detail),
53f2d028 1188 "page:0x%lx offset:0x%lx grain:%ld syndrome:0x%lx",
4275be63
MCC
1189 page_frame_number, offset_in_page,
1190 grain, syndrome);
1191 edac_ce_error(mci, pos, msg, location, label, detail,
1192 other_detail, enable_per_layer_report,
1193 page_frame_number, offset_in_page, grain);
1194 } else {
1195 snprintf(detail, sizeof(detail),
53f2d028 1196 "page:0x%lx offset:0x%lx grain:%ld",
4275be63 1197 page_frame_number, offset_in_page, grain);
9794f33d 1198
4275be63
MCC
1199 edac_ue_error(mci, pos, msg, location, label, detail,
1200 other_detail, enable_per_layer_report);
1201 }
9794f33d 1202}
4275be63 1203EXPORT_SYMBOL_GPL(edac_mc_handle_error);