9037ffa100d5991abd2323f052590e44dd8f8145
[deliverable/linux.git] / drivers / edac / edac_mc.c
1 /*
2 * edac_mc kernel module
3 * (C) 2005, 2006 Linux Networx (http://lnxi.com)
4 * This file may be distributed under the terms of the
5 * GNU General Public License.
6 *
7 * Written by Thayne Harbaugh
8 * Based on work by Dan Hollis <goemon at anime dot net> and others.
9 * http://www.anime.net/~goemon/linux-ecc/
10 *
11 * Modified by Dave Peterson and Doug Thompson
12 *
13 */
14
15 #include <linux/module.h>
16 #include <linux/proc_fs.h>
17 #include <linux/kernel.h>
18 #include <linux/types.h>
19 #include <linux/smp.h>
20 #include <linux/init.h>
21 #include <linux/sysctl.h>
22 #include <linux/highmem.h>
23 #include <linux/timer.h>
24 #include <linux/slab.h>
25 #include <linux/jiffies.h>
26 #include <linux/spinlock.h>
27 #include <linux/list.h>
28 #include <linux/ctype.h>
29 #include <linux/edac.h>
30 #include <linux/bitops.h>
31 #include <asm/uaccess.h>
32 #include <asm/page.h>
33 #include <asm/edac.h>
34 #include "edac_core.h"
35 #include "edac_module.h"
36
37 #define CREATE_TRACE_POINTS
38 #define TRACE_INCLUDE_PATH ../../include/ras
39 #include <ras/ras_event.h>
40
41 /* lock to memory controller's control array */
42 static DEFINE_MUTEX(mem_ctls_mutex);
43 static LIST_HEAD(mc_devices);
44
45 unsigned edac_dimm_info_location(struct dimm_info *dimm, char *buf,
46 unsigned len)
47 {
48 struct mem_ctl_info *mci = dimm->mci;
49 int i, n, count = 0;
50 char *p = buf;
51
52 for (i = 0; i < mci->n_layers; i++) {
53 n = snprintf(p, len, "%s %d ",
54 edac_layer_name[mci->layers[i].type],
55 dimm->location[i]);
56 p += n;
57 len -= n;
58 count += n;
59 if (!len)
60 break;
61 }
62
63 return count;
64 }
65
66 #ifdef CONFIG_EDAC_DEBUG
67
68 static void edac_mc_dump_channel(struct rank_info *chan)
69 {
70 edac_dbg(4, " channel->chan_idx = %d\n", chan->chan_idx);
71 edac_dbg(4, " channel = %p\n", chan);
72 edac_dbg(4, " channel->csrow = %p\n", chan->csrow);
73 edac_dbg(4, " channel->dimm = %p\n", chan->dimm);
74 }
75
76 static void edac_mc_dump_dimm(struct dimm_info *dimm, int number)
77 {
78 char location[80];
79
80 edac_dimm_info_location(dimm, location, sizeof(location));
81
82 edac_dbg(4, "%s%i: %smapped as virtual row %d, chan %d\n",
83 dimm->mci->mem_is_per_rank ? "rank" : "dimm",
84 number, location, dimm->csrow, dimm->cschannel);
85 edac_dbg(4, " dimm = %p\n", dimm);
86 edac_dbg(4, " dimm->label = '%s'\n", dimm->label);
87 edac_dbg(4, " dimm->nr_pages = 0x%x\n", dimm->nr_pages);
88 edac_dbg(4, " dimm->grain = %d\n", dimm->grain);
89 edac_dbg(4, " dimm->nr_pages = 0x%x\n", dimm->nr_pages);
90 }
91
92 static void edac_mc_dump_csrow(struct csrow_info *csrow)
93 {
94 edac_dbg(4, "csrow->csrow_idx = %d\n", csrow->csrow_idx);
95 edac_dbg(4, " csrow = %p\n", csrow);
96 edac_dbg(4, " csrow->first_page = 0x%lx\n", csrow->first_page);
97 edac_dbg(4, " csrow->last_page = 0x%lx\n", csrow->last_page);
98 edac_dbg(4, " csrow->page_mask = 0x%lx\n", csrow->page_mask);
99 edac_dbg(4, " csrow->nr_channels = %d\n", csrow->nr_channels);
100 edac_dbg(4, " csrow->channels = %p\n", csrow->channels);
101 edac_dbg(4, " csrow->mci = %p\n", csrow->mci);
102 }
103
104 static void edac_mc_dump_mci(struct mem_ctl_info *mci)
105 {
106 edac_dbg(3, "\tmci = %p\n", mci);
107 edac_dbg(3, "\tmci->mtype_cap = %lx\n", mci->mtype_cap);
108 edac_dbg(3, "\tmci->edac_ctl_cap = %lx\n", mci->edac_ctl_cap);
109 edac_dbg(3, "\tmci->edac_cap = %lx\n", mci->edac_cap);
110 edac_dbg(4, "\tmci->edac_check = %p\n", mci->edac_check);
111 edac_dbg(3, "\tmci->nr_csrows = %d, csrows = %p\n",
112 mci->nr_csrows, mci->csrows);
113 edac_dbg(3, "\tmci->nr_dimms = %d, dimms = %p\n",
114 mci->tot_dimms, mci->dimms);
115 edac_dbg(3, "\tdev = %p\n", mci->pdev);
116 edac_dbg(3, "\tmod_name:ctl_name = %s:%s\n",
117 mci->mod_name, mci->ctl_name);
118 edac_dbg(3, "\tpvt_info = %p\n\n", mci->pvt_info);
119 }
120
121 #endif /* CONFIG_EDAC_DEBUG */
122
123 /*
124 * keep those in sync with the enum mem_type
125 */
126 const char *edac_mem_types[] = {
127 "Empty csrow",
128 "Reserved csrow type",
129 "Unknown csrow type",
130 "Fast page mode RAM",
131 "Extended data out RAM",
132 "Burst Extended data out RAM",
133 "Single data rate SDRAM",
134 "Registered single data rate SDRAM",
135 "Double data rate SDRAM",
136 "Registered Double data rate SDRAM",
137 "Rambus DRAM",
138 "Unbuffered DDR2 RAM",
139 "Fully buffered DDR2",
140 "Registered DDR2 RAM",
141 "Rambus XDR",
142 "Unbuffered DDR3 RAM",
143 "Registered DDR3 RAM",
144 };
145 EXPORT_SYMBOL_GPL(edac_mem_types);
146
147 /**
148 * edac_align_ptr - Prepares the pointer offsets for a single-shot allocation
149 * @p: pointer to a pointer with the memory offset to be used. At
150 * return, this will be incremented to point to the next offset
151 * @size: Size of the data structure to be reserved
152 * @n_elems: Number of elements that should be reserved
153 *
154 * If 'size' is a constant, the compiler will optimize this whole function
155 * down to either a no-op or the addition of a constant to the value of '*p'.
156 *
157 * The 'p' pointer is absolutely needed to keep the proper advancing
158 * further in memory to the proper offsets when allocating the struct along
159 * with its embedded structs, as edac_device_alloc_ctl_info() does it
160 * above, for example.
161 *
162 * At return, the pointer 'p' will be incremented to be used on a next call
163 * to this function.
164 */
165 void *edac_align_ptr(void **p, unsigned size, int n_elems)
166 {
167 unsigned align, r;
168 void *ptr = *p;
169
170 *p += size * n_elems;
171
172 /*
173 * 'p' can possibly be an unaligned item X such that sizeof(X) is
174 * 'size'. Adjust 'p' so that its alignment is at least as
175 * stringent as what the compiler would provide for X and return
176 * the aligned result.
177 * Here we assume that the alignment of a "long long" is the most
178 * stringent alignment that the compiler will ever provide by default.
179 * As far as I know, this is a reasonable assumption.
180 */
181 if (size > sizeof(long))
182 align = sizeof(long long);
183 else if (size > sizeof(int))
184 align = sizeof(long);
185 else if (size > sizeof(short))
186 align = sizeof(int);
187 else if (size > sizeof(char))
188 align = sizeof(short);
189 else
190 return (char *)ptr;
191
192 r = (unsigned long)p % align;
193
194 if (r == 0)
195 return (char *)ptr;
196
197 *p += align - r;
198
199 return (void *)(((unsigned long)ptr) + align - r);
200 }
201
202 /**
203 * edac_mc_alloc: Allocate and partially fill a struct mem_ctl_info structure
204 * @mc_num: Memory controller number
205 * @n_layers: Number of MC hierarchy layers
206 * layers: Describes each layer as seen by the Memory Controller
207 * @size_pvt: size of private storage needed
208 *
209 *
210 * Everything is kmalloc'ed as one big chunk - more efficient.
211 * Only can be used if all structures have the same lifetime - otherwise
212 * you have to allocate and initialize your own structures.
213 *
214 * Use edac_mc_free() to free mc structures allocated by this function.
215 *
216 * NOTE: drivers handle multi-rank memories in different ways: in some
217 * drivers, one multi-rank memory stick is mapped as one entry, while, in
218 * others, a single multi-rank memory stick would be mapped into several
219 * entries. Currently, this function will allocate multiple struct dimm_info
220 * on such scenarios, as grouping the multiple ranks require drivers change.
221 *
222 * Returns:
223 * On failure: NULL
224 * On success: struct mem_ctl_info pointer
225 */
226 struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
227 unsigned n_layers,
228 struct edac_mc_layer *layers,
229 unsigned sz_pvt)
230 {
231 struct mem_ctl_info *mci;
232 struct edac_mc_layer *layer;
233 struct csrow_info *csr;
234 struct rank_info *chan;
235 struct dimm_info *dimm;
236 u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS];
237 unsigned pos[EDAC_MAX_LAYERS];
238 unsigned size, tot_dimms = 1, count = 1;
239 unsigned tot_csrows = 1, tot_channels = 1, tot_errcount = 0;
240 void *pvt, *p, *ptr = NULL;
241 int i, j, row, chn, n, len, off;
242 bool per_rank = false;
243
244 BUG_ON(n_layers > EDAC_MAX_LAYERS || n_layers == 0);
245 /*
246 * Calculate the total amount of dimms and csrows/cschannels while
247 * in the old API emulation mode
248 */
249 for (i = 0; i < n_layers; i++) {
250 tot_dimms *= layers[i].size;
251 if (layers[i].is_virt_csrow)
252 tot_csrows *= layers[i].size;
253 else
254 tot_channels *= layers[i].size;
255
256 if (layers[i].type == EDAC_MC_LAYER_CHIP_SELECT)
257 per_rank = true;
258 }
259
260 /* Figure out the offsets of the various items from the start of an mc
261 * structure. We want the alignment of each item to be at least as
262 * stringent as what the compiler would provide if we could simply
263 * hardcode everything into a single struct.
264 */
265 mci = edac_align_ptr(&ptr, sizeof(*mci), 1);
266 layer = edac_align_ptr(&ptr, sizeof(*layer), n_layers);
267 for (i = 0; i < n_layers; i++) {
268 count *= layers[i].size;
269 edac_dbg(4, "errcount layer %d size %d\n", i, count);
270 ce_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count);
271 ue_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count);
272 tot_errcount += 2 * count;
273 }
274
275 edac_dbg(4, "allocating %d error counters\n", tot_errcount);
276 pvt = edac_align_ptr(&ptr, sz_pvt, 1);
277 size = ((unsigned long)pvt) + sz_pvt;
278
279 edac_dbg(1, "allocating %u bytes for mci data (%d %s, %d csrows/channels)\n",
280 size,
281 tot_dimms,
282 per_rank ? "ranks" : "dimms",
283 tot_csrows * tot_channels);
284
285 mci = kzalloc(size, GFP_KERNEL);
286 if (mci == NULL)
287 return NULL;
288
289 /* Adjust pointers so they point within the memory we just allocated
290 * rather than an imaginary chunk of memory located at address 0.
291 */
292 layer = (struct edac_mc_layer *)(((char *)mci) + ((unsigned long)layer));
293 for (i = 0; i < n_layers; i++) {
294 mci->ce_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ce_per_layer[i]));
295 mci->ue_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ue_per_layer[i]));
296 }
297 pvt = sz_pvt ? (((char *)mci) + ((unsigned long)pvt)) : NULL;
298
299 /* setup index and various internal pointers */
300 mci->mc_idx = mc_num;
301 mci->tot_dimms = tot_dimms;
302 mci->pvt_info = pvt;
303 mci->n_layers = n_layers;
304 mci->layers = layer;
305 memcpy(mci->layers, layers, sizeof(*layer) * n_layers);
306 mci->nr_csrows = tot_csrows;
307 mci->num_cschannel = tot_channels;
308 mci->mem_is_per_rank = per_rank;
309
310 /*
311 * Alocate and fill the csrow/channels structs
312 */
313 mci->csrows = kcalloc(sizeof(*mci->csrows), tot_csrows, GFP_KERNEL);
314 if (!mci->csrows)
315 goto error;
316 for (row = 0; row < tot_csrows; row++) {
317 csr = kzalloc(sizeof(**mci->csrows), GFP_KERNEL);
318 if (!csr)
319 goto error;
320 mci->csrows[row] = csr;
321 csr->csrow_idx = row;
322 csr->mci = mci;
323 csr->nr_channels = tot_channels;
324 csr->channels = kcalloc(sizeof(*csr->channels), tot_channels,
325 GFP_KERNEL);
326 if (!csr->channels)
327 goto error;
328
329 for (chn = 0; chn < tot_channels; chn++) {
330 chan = kzalloc(sizeof(**csr->channels), GFP_KERNEL);
331 if (!chan)
332 goto error;
333 csr->channels[chn] = chan;
334 chan->chan_idx = chn;
335 chan->csrow = csr;
336 }
337 }
338
339 /*
340 * Allocate and fill the dimm structs
341 */
342 mci->dimms = kcalloc(sizeof(*mci->dimms), tot_dimms, GFP_KERNEL);
343 if (!mci->dimms)
344 goto error;
345
346 memset(&pos, 0, sizeof(pos));
347 row = 0;
348 chn = 0;
349 for (i = 0; i < tot_dimms; i++) {
350 chan = mci->csrows[row]->channels[chn];
351 off = EDAC_DIMM_OFF(layer, n_layers, pos[0], pos[1], pos[2]);
352 if (off < 0 || off >= tot_dimms) {
353 edac_mc_printk(mci, KERN_ERR, "EDAC core bug: EDAC_DIMM_OFF is trying to do an illegal data access\n");
354 goto error;
355 }
356
357 dimm = kzalloc(sizeof(**mci->dimms), GFP_KERNEL);
358 if (!dimm)
359 goto error;
360 mci->dimms[off] = dimm;
361 dimm->mci = mci;
362
363 /*
364 * Copy DIMM location and initialize it.
365 */
366 len = sizeof(dimm->label);
367 p = dimm->label;
368 n = snprintf(p, len, "mc#%u", mc_num);
369 p += n;
370 len -= n;
371 for (j = 0; j < n_layers; j++) {
372 n = snprintf(p, len, "%s#%u",
373 edac_layer_name[layers[j].type],
374 pos[j]);
375 p += n;
376 len -= n;
377 dimm->location[j] = pos[j];
378
379 if (len <= 0)
380 break;
381 }
382
383 /* Link it to the csrows old API data */
384 chan->dimm = dimm;
385 dimm->csrow = row;
386 dimm->cschannel = chn;
387
388 /* Increment csrow location */
389 row++;
390 if (row == tot_csrows) {
391 row = 0;
392 chn++;
393 }
394
395 /* Increment dimm location */
396 for (j = n_layers - 1; j >= 0; j--) {
397 pos[j]++;
398 if (pos[j] < layers[j].size)
399 break;
400 pos[j] = 0;
401 }
402 }
403
404 mci->op_state = OP_ALLOC;
405
406 /* at this point, the root kobj is valid, and in order to
407 * 'free' the object, then the function:
408 * edac_mc_unregister_sysfs_main_kobj() must be called
409 * which will perform kobj unregistration and the actual free
410 * will occur during the kobject callback operation
411 */
412
413 return mci;
414
415 error:
416 if (mci->dimms) {
417 for (i = 0; i < tot_dimms; i++)
418 kfree(mci->dimms[i]);
419 kfree(mci->dimms);
420 }
421 if (mci->csrows) {
422 for (row = 0; row < tot_csrows; row++) {
423 csr = mci->csrows[row];
424 if (csr) {
425 if (csr->channels) {
426 for (chn = 0; chn < tot_channels; chn++)
427 kfree(csr->channels[chn]);
428 kfree(csr->channels);
429 }
430 kfree(csr);
431 }
432 }
433 kfree(mci->csrows);
434 }
435 kfree(mci);
436
437 return NULL;
438 }
439 EXPORT_SYMBOL_GPL(edac_mc_alloc);
440
441 /**
442 * edac_mc_free
443 * 'Free' a previously allocated 'mci' structure
444 * @mci: pointer to a struct mem_ctl_info structure
445 */
446 void edac_mc_free(struct mem_ctl_info *mci)
447 {
448 edac_dbg(1, "\n");
449
450 /* the mci instance is freed here, when the sysfs object is dropped */
451 edac_unregister_sysfs(mci);
452 }
453 EXPORT_SYMBOL_GPL(edac_mc_free);
454
455
456 /**
457 * find_mci_by_dev
458 *
459 * scan list of controllers looking for the one that manages
460 * the 'dev' device
461 * @dev: pointer to a struct device related with the MCI
462 */
463 struct mem_ctl_info *find_mci_by_dev(struct device *dev)
464 {
465 struct mem_ctl_info *mci;
466 struct list_head *item;
467
468 edac_dbg(3, "\n");
469
470 list_for_each(item, &mc_devices) {
471 mci = list_entry(item, struct mem_ctl_info, link);
472
473 if (mci->pdev == dev)
474 return mci;
475 }
476
477 return NULL;
478 }
479 EXPORT_SYMBOL_GPL(find_mci_by_dev);
480
481 /*
482 * handler for EDAC to check if NMI type handler has asserted interrupt
483 */
484 static int edac_mc_assert_error_check_and_clear(void)
485 {
486 int old_state;
487
488 if (edac_op_state == EDAC_OPSTATE_POLL)
489 return 1;
490
491 old_state = edac_err_assert;
492 edac_err_assert = 0;
493
494 return old_state;
495 }
496
497 /*
498 * edac_mc_workq_function
499 * performs the operation scheduled by a workq request
500 */
501 static void edac_mc_workq_function(struct work_struct *work_req)
502 {
503 struct delayed_work *d_work = to_delayed_work(work_req);
504 struct mem_ctl_info *mci = to_edac_mem_ctl_work(d_work);
505
506 mutex_lock(&mem_ctls_mutex);
507
508 /* if this control struct has movd to offline state, we are done */
509 if (mci->op_state == OP_OFFLINE) {
510 mutex_unlock(&mem_ctls_mutex);
511 return;
512 }
513
514 /* Only poll controllers that are running polled and have a check */
515 if (edac_mc_assert_error_check_and_clear() && (mci->edac_check != NULL))
516 mci->edac_check(mci);
517
518 mutex_unlock(&mem_ctls_mutex);
519
520 /* Reschedule */
521 queue_delayed_work(edac_workqueue, &mci->work,
522 msecs_to_jiffies(edac_mc_get_poll_msec()));
523 }
524
525 /*
526 * edac_mc_workq_setup
527 * initialize a workq item for this mci
528 * passing in the new delay period in msec
529 *
530 * locking model:
531 *
532 * called with the mem_ctls_mutex held
533 */
534 static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec)
535 {
536 edac_dbg(0, "\n");
537
538 /* if this instance is not in the POLL state, then simply return */
539 if (mci->op_state != OP_RUNNING_POLL)
540 return;
541
542 INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
543 queue_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec));
544 }
545
546 /*
547 * edac_mc_workq_teardown
548 * stop the workq processing on this mci
549 *
550 * locking model:
551 *
552 * called WITHOUT lock held
553 */
554 static void edac_mc_workq_teardown(struct mem_ctl_info *mci)
555 {
556 int status;
557
558 if (mci->op_state != OP_RUNNING_POLL)
559 return;
560
561 status = cancel_delayed_work(&mci->work);
562 if (status == 0) {
563 edac_dbg(0, "not canceled, flush the queue\n");
564
565 /* workq instance might be running, wait for it */
566 flush_workqueue(edac_workqueue);
567 }
568 }
569
570 /*
571 * edac_mc_reset_delay_period(unsigned long value)
572 *
573 * user space has updated our poll period value, need to
574 * reset our workq delays
575 */
576 void edac_mc_reset_delay_period(int value)
577 {
578 struct mem_ctl_info *mci;
579 struct list_head *item;
580
581 mutex_lock(&mem_ctls_mutex);
582
583 /* scan the list and turn off all workq timers, doing so under lock
584 */
585 list_for_each(item, &mc_devices) {
586 mci = list_entry(item, struct mem_ctl_info, link);
587
588 if (mci->op_state == OP_RUNNING_POLL)
589 cancel_delayed_work(&mci->work);
590 }
591
592 mutex_unlock(&mem_ctls_mutex);
593
594
595 /* re-walk the list, and reset the poll delay */
596 mutex_lock(&mem_ctls_mutex);
597
598 list_for_each(item, &mc_devices) {
599 mci = list_entry(item, struct mem_ctl_info, link);
600
601 edac_mc_workq_setup(mci, (unsigned long) value);
602 }
603
604 mutex_unlock(&mem_ctls_mutex);
605 }
606
607
608
609 /* Return 0 on success, 1 on failure.
610 * Before calling this function, caller must
611 * assign a unique value to mci->mc_idx.
612 *
613 * locking model:
614 *
615 * called with the mem_ctls_mutex lock held
616 */
617 static int add_mc_to_global_list(struct mem_ctl_info *mci)
618 {
619 struct list_head *item, *insert_before;
620 struct mem_ctl_info *p;
621
622 insert_before = &mc_devices;
623
624 p = find_mci_by_dev(mci->pdev);
625 if (unlikely(p != NULL))
626 goto fail0;
627
628 list_for_each(item, &mc_devices) {
629 p = list_entry(item, struct mem_ctl_info, link);
630
631 if (p->mc_idx >= mci->mc_idx) {
632 if (unlikely(p->mc_idx == mci->mc_idx))
633 goto fail1;
634
635 insert_before = item;
636 break;
637 }
638 }
639
640 list_add_tail_rcu(&mci->link, insert_before);
641 atomic_inc(&edac_handlers);
642 return 0;
643
644 fail0:
645 edac_printk(KERN_WARNING, EDAC_MC,
646 "%s (%s) %s %s already assigned %d\n", dev_name(p->pdev),
647 edac_dev_name(mci), p->mod_name, p->ctl_name, p->mc_idx);
648 return 1;
649
650 fail1:
651 edac_printk(KERN_WARNING, EDAC_MC,
652 "bug in low-level driver: attempt to assign\n"
653 " duplicate mc_idx %d in %s()\n", p->mc_idx, __func__);
654 return 1;
655 }
656
657 static void del_mc_from_global_list(struct mem_ctl_info *mci)
658 {
659 atomic_dec(&edac_handlers);
660 list_del_rcu(&mci->link);
661
662 /* these are for safe removal of devices from global list while
663 * NMI handlers may be traversing list
664 */
665 synchronize_rcu();
666 INIT_LIST_HEAD(&mci->link);
667 }
668
669 /**
670 * edac_mc_find: Search for a mem_ctl_info structure whose index is 'idx'.
671 *
672 * If found, return a pointer to the structure.
673 * Else return NULL.
674 *
675 * Caller must hold mem_ctls_mutex.
676 */
677 struct mem_ctl_info *edac_mc_find(int idx)
678 {
679 struct list_head *item;
680 struct mem_ctl_info *mci;
681
682 list_for_each(item, &mc_devices) {
683 mci = list_entry(item, struct mem_ctl_info, link);
684
685 if (mci->mc_idx >= idx) {
686 if (mci->mc_idx == idx)
687 return mci;
688
689 break;
690 }
691 }
692
693 return NULL;
694 }
695 EXPORT_SYMBOL(edac_mc_find);
696
697 /**
698 * edac_mc_add_mc: Insert the 'mci' structure into the mci global list and
699 * create sysfs entries associated with mci structure
700 * @mci: pointer to the mci structure to be added to the list
701 *
702 * Return:
703 * 0 Success
704 * !0 Failure
705 */
706
707 /* FIXME - should a warning be printed if no error detection? correction? */
708 int edac_mc_add_mc(struct mem_ctl_info *mci)
709 {
710 edac_dbg(0, "\n");
711
712 #ifdef CONFIG_EDAC_DEBUG
713 if (edac_debug_level >= 3)
714 edac_mc_dump_mci(mci);
715
716 if (edac_debug_level >= 4) {
717 int i;
718
719 for (i = 0; i < mci->nr_csrows; i++) {
720 struct csrow_info *csrow = mci->csrows[i];
721 u32 nr_pages = 0;
722 int j;
723
724 for (j = 0; j < csrow->nr_channels; j++)
725 nr_pages += csrow->channels[j]->dimm->nr_pages;
726 if (!nr_pages)
727 continue;
728 edac_mc_dump_csrow(csrow);
729 for (j = 0; j < csrow->nr_channels; j++)
730 if (csrow->channels[j]->dimm->nr_pages)
731 edac_mc_dump_channel(csrow->channels[j]);
732 }
733 for (i = 0; i < mci->tot_dimms; i++)
734 if (mci->dimms[i]->nr_pages)
735 edac_mc_dump_dimm(mci->dimms[i], i);
736 }
737 #endif
738 mutex_lock(&mem_ctls_mutex);
739
740 if (add_mc_to_global_list(mci))
741 goto fail0;
742
743 /* set load time so that error rate can be tracked */
744 mci->start_time = jiffies;
745
746 if (edac_create_sysfs_mci_device(mci)) {
747 edac_mc_printk(mci, KERN_WARNING,
748 "failed to create sysfs device\n");
749 goto fail1;
750 }
751
752 /* If there IS a check routine, then we are running POLLED */
753 if (mci->edac_check != NULL) {
754 /* This instance is NOW RUNNING */
755 mci->op_state = OP_RUNNING_POLL;
756
757 edac_mc_workq_setup(mci, edac_mc_get_poll_msec());
758 } else {
759 mci->op_state = OP_RUNNING_INTERRUPT;
760 }
761
762 /* Report action taken */
763 edac_mc_printk(mci, KERN_INFO, "Giving out device to '%s' '%s':"
764 " DEV %s\n", mci->mod_name, mci->ctl_name, edac_dev_name(mci));
765
766 mutex_unlock(&mem_ctls_mutex);
767 return 0;
768
769 fail1:
770 del_mc_from_global_list(mci);
771
772 fail0:
773 mutex_unlock(&mem_ctls_mutex);
774 return 1;
775 }
776 EXPORT_SYMBOL_GPL(edac_mc_add_mc);
777
778 /**
779 * edac_mc_del_mc: Remove sysfs entries for specified mci structure and
780 * remove mci structure from global list
781 * @pdev: Pointer to 'struct device' representing mci structure to remove.
782 *
783 * Return pointer to removed mci structure, or NULL if device not found.
784 */
785 struct mem_ctl_info *edac_mc_del_mc(struct device *dev)
786 {
787 struct mem_ctl_info *mci;
788
789 edac_dbg(0, "\n");
790
791 mutex_lock(&mem_ctls_mutex);
792
793 /* find the requested mci struct in the global list */
794 mci = find_mci_by_dev(dev);
795 if (mci == NULL) {
796 mutex_unlock(&mem_ctls_mutex);
797 return NULL;
798 }
799
800 del_mc_from_global_list(mci);
801 mutex_unlock(&mem_ctls_mutex);
802
803 /* flush workq processes */
804 edac_mc_workq_teardown(mci);
805
806 /* marking MCI offline */
807 mci->op_state = OP_OFFLINE;
808
809 /* remove from sysfs */
810 edac_remove_sysfs_mci_device(mci);
811
812 edac_printk(KERN_INFO, EDAC_MC,
813 "Removed device %d for %s %s: DEV %s\n", mci->mc_idx,
814 mci->mod_name, mci->ctl_name, edac_dev_name(mci));
815
816 return mci;
817 }
818 EXPORT_SYMBOL_GPL(edac_mc_del_mc);
819
820 static void edac_mc_scrub_block(unsigned long page, unsigned long offset,
821 u32 size)
822 {
823 struct page *pg;
824 void *virt_addr;
825 unsigned long flags = 0;
826
827 edac_dbg(3, "\n");
828
829 /* ECC error page was not in our memory. Ignore it. */
830 if (!pfn_valid(page))
831 return;
832
833 /* Find the actual page structure then map it and fix */
834 pg = pfn_to_page(page);
835
836 if (PageHighMem(pg))
837 local_irq_save(flags);
838
839 virt_addr = kmap_atomic(pg);
840
841 /* Perform architecture specific atomic scrub operation */
842 atomic_scrub(virt_addr + offset, size);
843
844 /* Unmap and complete */
845 kunmap_atomic(virt_addr);
846
847 if (PageHighMem(pg))
848 local_irq_restore(flags);
849 }
850
851 /* FIXME - should return -1 */
852 int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page)
853 {
854 struct csrow_info **csrows = mci->csrows;
855 int row, i, j, n;
856
857 edac_dbg(1, "MC%d: 0x%lx\n", mci->mc_idx, page);
858 row = -1;
859
860 for (i = 0; i < mci->nr_csrows; i++) {
861 struct csrow_info *csrow = csrows[i];
862 n = 0;
863 for (j = 0; j < csrow->nr_channels; j++) {
864 struct dimm_info *dimm = csrow->channels[j]->dimm;
865 n += dimm->nr_pages;
866 }
867 if (n == 0)
868 continue;
869
870 edac_dbg(3, "MC%d: first(0x%lx) page(0x%lx) last(0x%lx) mask(0x%lx)\n",
871 mci->mc_idx,
872 csrow->first_page, page, csrow->last_page,
873 csrow->page_mask);
874
875 if ((page >= csrow->first_page) &&
876 (page <= csrow->last_page) &&
877 ((page & csrow->page_mask) ==
878 (csrow->first_page & csrow->page_mask))) {
879 row = i;
880 break;
881 }
882 }
883
884 if (row == -1)
885 edac_mc_printk(mci, KERN_ERR,
886 "could not look up page error address %lx\n",
887 (unsigned long)page);
888
889 return row;
890 }
891 EXPORT_SYMBOL_GPL(edac_mc_find_csrow_by_page);
892
893 const char *edac_layer_name[] = {
894 [EDAC_MC_LAYER_BRANCH] = "branch",
895 [EDAC_MC_LAYER_CHANNEL] = "channel",
896 [EDAC_MC_LAYER_SLOT] = "slot",
897 [EDAC_MC_LAYER_CHIP_SELECT] = "csrow",
898 };
899 EXPORT_SYMBOL_GPL(edac_layer_name);
900
901 static void edac_inc_ce_error(struct mem_ctl_info *mci,
902 bool enable_per_layer_report,
903 const int pos[EDAC_MAX_LAYERS],
904 const u16 count)
905 {
906 int i, index = 0;
907
908 mci->ce_mc += count;
909
910 if (!enable_per_layer_report) {
911 mci->ce_noinfo_count += count;
912 return;
913 }
914
915 for (i = 0; i < mci->n_layers; i++) {
916 if (pos[i] < 0)
917 break;
918 index += pos[i];
919 mci->ce_per_layer[i][index] += count;
920
921 if (i < mci->n_layers - 1)
922 index *= mci->layers[i + 1].size;
923 }
924 }
925
926 static void edac_inc_ue_error(struct mem_ctl_info *mci,
927 bool enable_per_layer_report,
928 const int pos[EDAC_MAX_LAYERS],
929 const u16 count)
930 {
931 int i, index = 0;
932
933 mci->ue_mc += count;
934
935 if (!enable_per_layer_report) {
936 mci->ce_noinfo_count += count;
937 return;
938 }
939
940 for (i = 0; i < mci->n_layers; i++) {
941 if (pos[i] < 0)
942 break;
943 index += pos[i];
944 mci->ue_per_layer[i][index] += count;
945
946 if (i < mci->n_layers - 1)
947 index *= mci->layers[i + 1].size;
948 }
949 }
950
951 static void edac_ce_error(struct mem_ctl_info *mci,
952 const u16 error_count,
953 const int pos[EDAC_MAX_LAYERS],
954 const char *msg,
955 const char *location,
956 const char *label,
957 const char *detail,
958 const char *other_detail,
959 const bool enable_per_layer_report,
960 const unsigned long page_frame_number,
961 const unsigned long offset_in_page,
962 long grain)
963 {
964 unsigned long remapped_page;
965
966 if (edac_mc_get_log_ce()) {
967 if (other_detail && *other_detail)
968 edac_mc_printk(mci, KERN_WARNING,
969 "%d CE %s on %s (%s %s - %s)\n",
970 error_count,
971 msg, label, location,
972 detail, other_detail);
973 else
974 edac_mc_printk(mci, KERN_WARNING,
975 "%d CE %s on %s (%s %s)\n",
976 error_count,
977 msg, label, location,
978 detail);
979 }
980 edac_inc_ce_error(mci, enable_per_layer_report, pos, error_count);
981
982 if (mci->scrub_mode & SCRUB_SW_SRC) {
983 /*
984 * Some memory controllers (called MCs below) can remap
985 * memory so that it is still available at a different
986 * address when PCI devices map into memory.
987 * MC's that can't do this, lose the memory where PCI
988 * devices are mapped. This mapping is MC-dependent
989 * and so we call back into the MC driver for it to
990 * map the MC page to a physical (CPU) page which can
991 * then be mapped to a virtual page - which can then
992 * be scrubbed.
993 */
994 remapped_page = mci->ctl_page_to_phys ?
995 mci->ctl_page_to_phys(mci, page_frame_number) :
996 page_frame_number;
997
998 edac_mc_scrub_block(remapped_page,
999 offset_in_page, grain);
1000 }
1001 }
1002
1003 static void edac_ue_error(struct mem_ctl_info *mci,
1004 const u16 error_count,
1005 const int pos[EDAC_MAX_LAYERS],
1006 const char *msg,
1007 const char *location,
1008 const char *label,
1009 const char *detail,
1010 const char *other_detail,
1011 const bool enable_per_layer_report)
1012 {
1013 if (edac_mc_get_log_ue()) {
1014 if (other_detail && *other_detail)
1015 edac_mc_printk(mci, KERN_WARNING,
1016 "%d UE %s on %s (%s %s - %s)\n",
1017 error_count,
1018 msg, label, location, detail,
1019 other_detail);
1020 else
1021 edac_mc_printk(mci, KERN_WARNING,
1022 "%d UE %s on %s (%s %s)\n",
1023 error_count,
1024 msg, label, location, detail);
1025 }
1026
1027 if (edac_mc_get_panic_on_ue()) {
1028 if (other_detail && *other_detail)
1029 panic("UE %s on %s (%s%s - %s)\n",
1030 msg, label, location, detail, other_detail);
1031 else
1032 panic("UE %s on %s (%s%s)\n",
1033 msg, label, location, detail);
1034 }
1035
1036 edac_inc_ue_error(mci, enable_per_layer_report, pos, error_count);
1037 }
1038
1039 #define OTHER_LABEL " or "
1040
1041 /**
1042 * edac_mc_handle_error - reports a memory event to userspace
1043 *
1044 * @type: severity of the error (CE/UE/Fatal)
1045 * @mci: a struct mem_ctl_info pointer
1046 * @error_count: Number of errors of the same type
1047 * @page_frame_number: mem page where the error occurred
1048 * @offset_in_page: offset of the error inside the page
1049 * @syndrome: ECC syndrome
1050 * @top_layer: Memory layer[0] position
1051 * @mid_layer: Memory layer[1] position
1052 * @low_layer: Memory layer[2] position
1053 * @msg: Message meaningful to the end users that
1054 * explains the event
1055 * @other_detail: Technical details about the event that
1056 * may help hardware manufacturers and
1057 * EDAC developers to analyse the event
1058 */
1059 void edac_mc_handle_error(const enum hw_event_mc_err_type type,
1060 struct mem_ctl_info *mci,
1061 const u16 error_count,
1062 const unsigned long page_frame_number,
1063 const unsigned long offset_in_page,
1064 const unsigned long syndrome,
1065 const int top_layer,
1066 const int mid_layer,
1067 const int low_layer,
1068 const char *msg,
1069 const char *other_detail)
1070 {
1071 /* FIXME: too much for stack: move it to some pre-alocated area */
1072 char detail[80], location[80];
1073 char label[(EDAC_MC_LABEL_LEN + 1 + sizeof(OTHER_LABEL)) * mci->tot_dimms];
1074 char *p;
1075 int row = -1, chan = -1;
1076 int pos[EDAC_MAX_LAYERS] = { top_layer, mid_layer, low_layer };
1077 int i;
1078 long grain;
1079 bool enable_per_layer_report = false;
1080 u8 grain_bits;
1081
1082 edac_dbg(3, "MC%d\n", mci->mc_idx);
1083
1084 /*
1085 * Check if the event report is consistent and if the memory
1086 * location is known. If it is known, enable_per_layer_report will be
1087 * true, the DIMM(s) label info will be filled and the per-layer
1088 * error counters will be incremented.
1089 */
1090 for (i = 0; i < mci->n_layers; i++) {
1091 if (pos[i] >= (int)mci->layers[i].size) {
1092 if (type == HW_EVENT_ERR_CORRECTED)
1093 p = "CE";
1094 else
1095 p = "UE";
1096
1097 edac_mc_printk(mci, KERN_ERR,
1098 "INTERNAL ERROR: %s value is out of range (%d >= %d)\n",
1099 edac_layer_name[mci->layers[i].type],
1100 pos[i], mci->layers[i].size);
1101 /*
1102 * Instead of just returning it, let's use what's
1103 * known about the error. The increment routines and
1104 * the DIMM filter logic will do the right thing by
1105 * pointing the likely damaged DIMMs.
1106 */
1107 pos[i] = -1;
1108 }
1109 if (pos[i] >= 0)
1110 enable_per_layer_report = true;
1111 }
1112
1113 /*
1114 * Get the dimm label/grain that applies to the match criteria.
1115 * As the error algorithm may not be able to point to just one memory
1116 * stick, the logic here will get all possible labels that could
1117 * pottentially be affected by the error.
1118 * On FB-DIMM memory controllers, for uncorrected errors, it is common
1119 * to have only the MC channel and the MC dimm (also called "branch")
1120 * but the channel is not known, as the memory is arranged in pairs,
1121 * where each memory belongs to a separate channel within the same
1122 * branch.
1123 */
1124 grain = 0;
1125 p = label;
1126 *p = '\0';
1127 for (i = 0; i < mci->tot_dimms; i++) {
1128 struct dimm_info *dimm = mci->dimms[i];
1129
1130 if (top_layer >= 0 && top_layer != dimm->location[0])
1131 continue;
1132 if (mid_layer >= 0 && mid_layer != dimm->location[1])
1133 continue;
1134 if (low_layer >= 0 && low_layer != dimm->location[2])
1135 continue;
1136
1137 /* get the max grain, over the error match range */
1138 if (dimm->grain > grain)
1139 grain = dimm->grain;
1140
1141 /*
1142 * If the error is memory-controller wide, there's no need to
1143 * seek for the affected DIMMs because the whole
1144 * channel/memory controller/... may be affected.
1145 * Also, don't show errors for empty DIMM slots.
1146 */
1147 if (enable_per_layer_report && dimm->nr_pages) {
1148 if (p != label) {
1149 strcpy(p, OTHER_LABEL);
1150 p += strlen(OTHER_LABEL);
1151 }
1152 strcpy(p, dimm->label);
1153 p += strlen(p);
1154 *p = '\0';
1155
1156 /*
1157 * get csrow/channel of the DIMM, in order to allow
1158 * incrementing the compat API counters
1159 */
1160 edac_dbg(4, "%s csrows map: (%d,%d)\n",
1161 mci->mem_is_per_rank ? "rank" : "dimm",
1162 dimm->csrow, dimm->cschannel);
1163 if (row == -1)
1164 row = dimm->csrow;
1165 else if (row >= 0 && row != dimm->csrow)
1166 row = -2;
1167
1168 if (chan == -1)
1169 chan = dimm->cschannel;
1170 else if (chan >= 0 && chan != dimm->cschannel)
1171 chan = -2;
1172 }
1173 }
1174
1175 if (!enable_per_layer_report) {
1176 strcpy(label, "any memory");
1177 } else {
1178 edac_dbg(4, "csrow/channel to increment: (%d,%d)\n", row, chan);
1179 if (p == label)
1180 strcpy(label, "unknown memory");
1181 if (type == HW_EVENT_ERR_CORRECTED) {
1182 if (row >= 0) {
1183 mci->csrows[row]->ce_count += error_count;
1184 if (chan >= 0)
1185 mci->csrows[row]->channels[chan]->ce_count += error_count;
1186 }
1187 } else
1188 if (row >= 0)
1189 mci->csrows[row]->ue_count += error_count;
1190 }
1191
1192 /* Fill the RAM location data */
1193 p = location;
1194 for (i = 0; i < mci->n_layers; i++) {
1195 if (pos[i] < 0)
1196 continue;
1197
1198 p += sprintf(p, "%s:%d ",
1199 edac_layer_name[mci->layers[i].type],
1200 pos[i]);
1201 }
1202 if (p > location)
1203 *(p - 1) = '\0';
1204
1205 /* Report the error via the trace interface */
1206
1207 grain_bits = fls_long(grain) + 1;
1208 trace_mc_event(type, msg, label, error_count,
1209 mci->mc_idx, top_layer, mid_layer, low_layer,
1210 PAGES_TO_MiB(page_frame_number) | offset_in_page,
1211 grain_bits, syndrome, other_detail);
1212
1213 /* Memory type dependent details about the error */
1214 if (type == HW_EVENT_ERR_CORRECTED) {
1215 snprintf(detail, sizeof(detail),
1216 "page:0x%lx offset:0x%lx grain:%ld syndrome:0x%lx",
1217 page_frame_number, offset_in_page,
1218 grain, syndrome);
1219 edac_ce_error(mci, error_count, pos, msg, location, label,
1220 detail, other_detail, enable_per_layer_report,
1221 page_frame_number, offset_in_page, grain);
1222 } else {
1223 snprintf(detail, sizeof(detail),
1224 "page:0x%lx offset:0x%lx grain:%ld",
1225 page_frame_number, offset_in_page, grain);
1226
1227 edac_ue_error(mci, error_count, pos, msg, location, label,
1228 detail, other_detail, enable_per_layer_report);
1229 }
1230 }
1231 EXPORT_SYMBOL_GPL(edac_mc_handle_error);
This page took 0.058934 seconds and 4 git commands to generate.