Merge branch 'upstream'
[deliverable/linux.git] / drivers / edac / edac_mc.c
1 /*
2 * edac_mc kernel module
3 * (C) 2005 Linux Networx (http://lnxi.com)
4 * This file may be distributed under the terms of the
5 * GNU General Public License.
6 *
7 * Written by Thayne Harbaugh
8 * Based on work by Dan Hollis <goemon at anime dot net> and others.
9 * http://www.anime.net/~goemon/linux-ecc/
10 *
11 * Modified by Dave Peterson and Doug Thompson
12 *
13 */
14
15 #include <linux/config.h>
16 #include <linux/module.h>
17 #include <linux/proc_fs.h>
18 #include <linux/kernel.h>
19 #include <linux/types.h>
20 #include <linux/smp.h>
21 #include <linux/init.h>
22 #include <linux/sysctl.h>
23 #include <linux/highmem.h>
24 #include <linux/timer.h>
25 #include <linux/slab.h>
26 #include <linux/jiffies.h>
27 #include <linux/spinlock.h>
28 #include <linux/list.h>
29 #include <linux/sysdev.h>
30 #include <linux/ctype.h>
31 #include <linux/kthread.h>
32 #include <asm/uaccess.h>
33 #include <asm/page.h>
34 #include <asm/edac.h>
35 #include "edac_mc.h"
36
37 #define EDAC_MC_VERSION "Ver: 2.0.0 " __DATE__
38
39 /* For now, disable the EDAC sysfs code. The sysfs interface that EDAC
40 * presents to user space needs more thought, and is likely to change
41 * substantially.
42 */
43 #define DISABLE_EDAC_SYSFS
44
45 #ifdef CONFIG_EDAC_DEBUG
46 /* Values of 0 to 4 will generate output */
47 int edac_debug_level = 1;
48 EXPORT_SYMBOL_GPL(edac_debug_level);
49 #endif
50
51 /* EDAC Controls, setable by module parameter, and sysfs */
52 static int log_ue = 1;
53 static int log_ce = 1;
54 static int panic_on_ue;
55 static int poll_msec = 1000;
56
57 static int check_pci_parity = 0; /* default YES check PCI parity */
58 static int panic_on_pci_parity; /* default no panic on PCI Parity */
59 static atomic_t pci_parity_count = ATOMIC_INIT(0);
60
61 /* lock to memory controller's control array */
62 static DECLARE_MUTEX(mem_ctls_mutex);
63 static struct list_head mc_devices = LIST_HEAD_INIT(mc_devices);
64
65 static struct task_struct *edac_thread;
66
67 /* Structure of the whitelist and blacklist arrays */
68 struct edac_pci_device_list {
69 unsigned int vendor; /* Vendor ID */
70 unsigned int device; /* Deviice ID */
71 };
72
73 #define MAX_LISTED_PCI_DEVICES 32
74
75 /* List of PCI devices (vendor-id:device-id) that should be skipped */
76 static struct edac_pci_device_list pci_blacklist[MAX_LISTED_PCI_DEVICES];
77 static int pci_blacklist_count;
78
79 /* List of PCI devices (vendor-id:device-id) that should be scanned */
80 static struct edac_pci_device_list pci_whitelist[MAX_LISTED_PCI_DEVICES];
81 static int pci_whitelist_count ;
82
83 /* START sysfs data and methods */
84
85 #ifndef DISABLE_EDAC_SYSFS
86
87 static const char *mem_types[] = {
88 [MEM_EMPTY] = "Empty",
89 [MEM_RESERVED] = "Reserved",
90 [MEM_UNKNOWN] = "Unknown",
91 [MEM_FPM] = "FPM",
92 [MEM_EDO] = "EDO",
93 [MEM_BEDO] = "BEDO",
94 [MEM_SDR] = "Unbuffered-SDR",
95 [MEM_RDR] = "Registered-SDR",
96 [MEM_DDR] = "Unbuffered-DDR",
97 [MEM_RDDR] = "Registered-DDR",
98 [MEM_RMBS] = "RMBS"
99 };
100
101 static const char *dev_types[] = {
102 [DEV_UNKNOWN] = "Unknown",
103 [DEV_X1] = "x1",
104 [DEV_X2] = "x2",
105 [DEV_X4] = "x4",
106 [DEV_X8] = "x8",
107 [DEV_X16] = "x16",
108 [DEV_X32] = "x32",
109 [DEV_X64] = "x64"
110 };
111
112 static const char *edac_caps[] = {
113 [EDAC_UNKNOWN] = "Unknown",
114 [EDAC_NONE] = "None",
115 [EDAC_RESERVED] = "Reserved",
116 [EDAC_PARITY] = "PARITY",
117 [EDAC_EC] = "EC",
118 [EDAC_SECDED] = "SECDED",
119 [EDAC_S2ECD2ED] = "S2ECD2ED",
120 [EDAC_S4ECD4ED] = "S4ECD4ED",
121 [EDAC_S8ECD8ED] = "S8ECD8ED",
122 [EDAC_S16ECD16ED] = "S16ECD16ED"
123 };
124
125 /* sysfs object: /sys/devices/system/edac */
126 static struct sysdev_class edac_class = {
127 set_kset_name("edac"),
128 };
129
130 /* sysfs objects:
131 * /sys/devices/system/edac/mc
132 * /sys/devices/system/edac/pci
133 */
134 static struct kobject edac_memctrl_kobj;
135 static struct kobject edac_pci_kobj;
136
137 /* We use these to wait for the reference counts on edac_memctrl_kobj and
138 * edac_pci_kobj to reach 0.
139 */
140 static struct completion edac_memctrl_kobj_complete;
141 static struct completion edac_pci_kobj_complete;
142
143 /*
144 * /sys/devices/system/edac/mc;
145 * data structures and methods
146 */
147 #if 0
148 static ssize_t memctrl_string_show(void *ptr, char *buffer)
149 {
150 char *value = (char*) ptr;
151 return sprintf(buffer, "%s\n", value);
152 }
153 #endif
154
155 static ssize_t memctrl_int_show(void *ptr, char *buffer)
156 {
157 int *value = (int*) ptr;
158 return sprintf(buffer, "%d\n", *value);
159 }
160
161 static ssize_t memctrl_int_store(void *ptr, const char *buffer, size_t count)
162 {
163 int *value = (int*) ptr;
164
165 if (isdigit(*buffer))
166 *value = simple_strtoul(buffer, NULL, 0);
167
168 return count;
169 }
170
171 struct memctrl_dev_attribute {
172 struct attribute attr;
173 void *value;
174 ssize_t (*show)(void *,char *);
175 ssize_t (*store)(void *, const char *, size_t);
176 };
177
178 /* Set of show/store abstract level functions for memory control object */
179 static ssize_t memctrl_dev_show(struct kobject *kobj,
180 struct attribute *attr, char *buffer)
181 {
182 struct memctrl_dev_attribute *memctrl_dev;
183 memctrl_dev = (struct memctrl_dev_attribute*)attr;
184
185 if (memctrl_dev->show)
186 return memctrl_dev->show(memctrl_dev->value, buffer);
187
188 return -EIO;
189 }
190
191 static ssize_t memctrl_dev_store(struct kobject *kobj, struct attribute *attr,
192 const char *buffer, size_t count)
193 {
194 struct memctrl_dev_attribute *memctrl_dev;
195 memctrl_dev = (struct memctrl_dev_attribute*)attr;
196
197 if (memctrl_dev->store)
198 return memctrl_dev->store(memctrl_dev->value, buffer, count);
199
200 return -EIO;
201 }
202
203 static struct sysfs_ops memctrlfs_ops = {
204 .show = memctrl_dev_show,
205 .store = memctrl_dev_store
206 };
207
208 #define MEMCTRL_ATTR(_name,_mode,_show,_store) \
209 struct memctrl_dev_attribute attr_##_name = { \
210 .attr = {.name = __stringify(_name), .mode = _mode }, \
211 .value = &_name, \
212 .show = _show, \
213 .store = _store, \
214 };
215
216 #define MEMCTRL_STRING_ATTR(_name,_data,_mode,_show,_store) \
217 struct memctrl_dev_attribute attr_##_name = { \
218 .attr = {.name = __stringify(_name), .mode = _mode }, \
219 .value = _data, \
220 .show = _show, \
221 .store = _store, \
222 };
223
224 /* cwrow<id> attribute f*/
225 #if 0
226 MEMCTRL_STRING_ATTR(mc_version,EDAC_MC_VERSION,S_IRUGO,memctrl_string_show,NULL);
227 #endif
228
229 /* csrow<id> control files */
230 MEMCTRL_ATTR(panic_on_ue,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store);
231 MEMCTRL_ATTR(log_ue,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store);
232 MEMCTRL_ATTR(log_ce,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store);
233 MEMCTRL_ATTR(poll_msec,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store);
234
235 /* Base Attributes of the memory ECC object */
236 static struct memctrl_dev_attribute *memctrl_attr[] = {
237 &attr_panic_on_ue,
238 &attr_log_ue,
239 &attr_log_ce,
240 &attr_poll_msec,
241 NULL,
242 };
243
244 /* Main MC kobject release() function */
245 static void edac_memctrl_master_release(struct kobject *kobj)
246 {
247 debugf1("%s()\n", __func__);
248 complete(&edac_memctrl_kobj_complete);
249 }
250
251 static struct kobj_type ktype_memctrl = {
252 .release = edac_memctrl_master_release,
253 .sysfs_ops = &memctrlfs_ops,
254 .default_attrs = (struct attribute **) memctrl_attr,
255 };
256
257 #endif /* DISABLE_EDAC_SYSFS */
258
259 /* Initialize the main sysfs entries for edac:
260 * /sys/devices/system/edac
261 *
262 * and children
263 *
264 * Return: 0 SUCCESS
265 * !0 FAILURE
266 */
267 static int edac_sysfs_memctrl_setup(void)
268 #ifdef DISABLE_EDAC_SYSFS
269 {
270 return 0;
271 }
272 #else
273 {
274 int err=0;
275
276 debugf1("%s()\n", __func__);
277
278 /* create the /sys/devices/system/edac directory */
279 err = sysdev_class_register(&edac_class);
280
281 if (!err) {
282 /* Init the MC's kobject */
283 memset(&edac_memctrl_kobj, 0, sizeof (edac_memctrl_kobj));
284 edac_memctrl_kobj.parent = &edac_class.kset.kobj;
285 edac_memctrl_kobj.ktype = &ktype_memctrl;
286
287 /* generate sysfs "..../edac/mc" */
288 err = kobject_set_name(&edac_memctrl_kobj,"mc");
289
290 if (!err) {
291 /* FIXME: maybe new sysdev_create_subdir() */
292 err = kobject_register(&edac_memctrl_kobj);
293
294 if (err)
295 debugf1("Failed to register '.../edac/mc'\n");
296 else
297 debugf1("Registered '.../edac/mc' kobject\n");
298 }
299 } else
300 debugf1("%s() error=%d\n", __func__, err);
301
302 return err;
303 }
304 #endif /* DISABLE_EDAC_SYSFS */
305
306 /*
307 * MC teardown:
308 * the '..../edac/mc' kobject followed by '..../edac' itself
309 */
310 static void edac_sysfs_memctrl_teardown(void)
311 {
312 #ifndef DISABLE_EDAC_SYSFS
313 debugf0("MC: " __FILE__ ": %s()\n", __func__);
314
315 /* Unregister the MC's kobject and wait for reference count to reach
316 * 0.
317 */
318 init_completion(&edac_memctrl_kobj_complete);
319 kobject_unregister(&edac_memctrl_kobj);
320 wait_for_completion(&edac_memctrl_kobj_complete);
321
322 /* Unregister the 'edac' object */
323 sysdev_class_unregister(&edac_class);
324 #endif /* DISABLE_EDAC_SYSFS */
325 }
326
327 #ifndef DISABLE_EDAC_SYSFS
328
329 /*
330 * /sys/devices/system/edac/pci;
331 * data structures and methods
332 */
333
334 struct list_control {
335 struct edac_pci_device_list *list;
336 int *count;
337 };
338
339 #if 0
340 /* Output the list as: vendor_id:device:id<,vendor_id:device_id> */
341 static ssize_t edac_pci_list_string_show(void *ptr, char *buffer)
342 {
343 struct list_control *listctl;
344 struct edac_pci_device_list *list;
345 char *p = buffer;
346 int len=0;
347 int i;
348
349 listctl = ptr;
350 list = listctl->list;
351
352 for (i = 0; i < *(listctl->count); i++, list++ ) {
353 if (len > 0)
354 len += snprintf(p + len, (PAGE_SIZE-len), ",");
355
356 len += snprintf(p + len,
357 (PAGE_SIZE-len),
358 "%x:%x",
359 list->vendor,list->device);
360 }
361
362 len += snprintf(p + len,(PAGE_SIZE-len), "\n");
363 return (ssize_t) len;
364 }
365
366 /**
367 *
368 * Scan string from **s to **e looking for one 'vendor:device' tuple
369 * where each field is a hex value
370 *
371 * return 0 if an entry is NOT found
372 * return 1 if an entry is found
373 * fill in *vendor_id and *device_id with values found
374 *
375 * In both cases, make sure *s has been moved forward toward *e
376 */
377 static int parse_one_device(const char **s,const char **e,
378 unsigned int *vendor_id, unsigned int *device_id)
379 {
380 const char *runner, *p;
381
382 /* if null byte, we are done */
383 if (!**s) {
384 (*s)++; /* keep *s moving */
385 return 0;
386 }
387
388 /* skip over newlines & whitespace */
389 if ((**s == '\n') || isspace(**s)) {
390 (*s)++;
391 return 0;
392 }
393
394 if (!isxdigit(**s)) {
395 (*s)++;
396 return 0;
397 }
398
399 /* parse vendor_id */
400 runner = *s;
401
402 while (runner < *e) {
403 /* scan for vendor:device delimiter */
404 if (*runner == ':') {
405 *vendor_id = simple_strtol((char*) *s, (char**) &p, 16);
406 runner = p + 1;
407 break;
408 }
409
410 runner++;
411 }
412
413 if (!isxdigit(*runner)) {
414 *s = ++runner;
415 return 0;
416 }
417
418 /* parse device_id */
419 if (runner < *e) {
420 *device_id = simple_strtol((char*)runner, (char**)&p, 16);
421 runner = p;
422 }
423
424 *s = runner;
425 return 1;
426 }
427
428 static ssize_t edac_pci_list_string_store(void *ptr, const char *buffer,
429 size_t count)
430 {
431 struct list_control *listctl;
432 struct edac_pci_device_list *list;
433 unsigned int vendor_id, device_id;
434 const char *s, *e;
435 int *index;
436
437 s = (char*)buffer;
438 e = s + count;
439 listctl = ptr;
440 list = listctl->list;
441 index = listctl->count;
442 *index = 0;
443
444 while (*index < MAX_LISTED_PCI_DEVICES) {
445 if (parse_one_device(&s,&e,&vendor_id,&device_id)) {
446 list[ *index ].vendor = vendor_id;
447 list[ *index ].device = device_id;
448 (*index)++;
449 }
450
451 /* check for all data consume */
452 if (s >= e)
453 break;
454 }
455
456 return count;
457 }
458
459 #endif
460 static ssize_t edac_pci_int_show(void *ptr, char *buffer)
461 {
462 int *value = ptr;
463 return sprintf(buffer,"%d\n",*value);
464 }
465
466 static ssize_t edac_pci_int_store(void *ptr, const char *buffer, size_t count)
467 {
468 int *value = ptr;
469
470 if (isdigit(*buffer))
471 *value = simple_strtoul(buffer,NULL,0);
472
473 return count;
474 }
475
476 struct edac_pci_dev_attribute {
477 struct attribute attr;
478 void *value;
479 ssize_t (*show)(void *,char *);
480 ssize_t (*store)(void *, const char *,size_t);
481 };
482
483 /* Set of show/store abstract level functions for PCI Parity object */
484 static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
485 char *buffer)
486 {
487 struct edac_pci_dev_attribute *edac_pci_dev;
488 edac_pci_dev= (struct edac_pci_dev_attribute*)attr;
489
490 if (edac_pci_dev->show)
491 return edac_pci_dev->show(edac_pci_dev->value, buffer);
492 return -EIO;
493 }
494
495 static ssize_t edac_pci_dev_store(struct kobject *kobj,
496 struct attribute *attr, const char *buffer, size_t count)
497 {
498 struct edac_pci_dev_attribute *edac_pci_dev;
499 edac_pci_dev= (struct edac_pci_dev_attribute*)attr;
500
501 if (edac_pci_dev->show)
502 return edac_pci_dev->store(edac_pci_dev->value, buffer, count);
503 return -EIO;
504 }
505
506 static struct sysfs_ops edac_pci_sysfs_ops = {
507 .show = edac_pci_dev_show,
508 .store = edac_pci_dev_store
509 };
510
511 #define EDAC_PCI_ATTR(_name,_mode,_show,_store) \
512 struct edac_pci_dev_attribute edac_pci_attr_##_name = { \
513 .attr = {.name = __stringify(_name), .mode = _mode }, \
514 .value = &_name, \
515 .show = _show, \
516 .store = _store, \
517 };
518
519 #define EDAC_PCI_STRING_ATTR(_name,_data,_mode,_show,_store) \
520 struct edac_pci_dev_attribute edac_pci_attr_##_name = { \
521 .attr = {.name = __stringify(_name), .mode = _mode }, \
522 .value = _data, \
523 .show = _show, \
524 .store = _store, \
525 };
526
527 #if 0
528 static struct list_control pci_whitelist_control = {
529 .list = pci_whitelist,
530 .count = &pci_whitelist_count
531 };
532
533 static struct list_control pci_blacklist_control = {
534 .list = pci_blacklist,
535 .count = &pci_blacklist_count
536 };
537
538 /* whitelist attribute */
539 EDAC_PCI_STRING_ATTR(pci_parity_whitelist,
540 &pci_whitelist_control,
541 S_IRUGO|S_IWUSR,
542 edac_pci_list_string_show,
543 edac_pci_list_string_store);
544
545 EDAC_PCI_STRING_ATTR(pci_parity_blacklist,
546 &pci_blacklist_control,
547 S_IRUGO|S_IWUSR,
548 edac_pci_list_string_show,
549 edac_pci_list_string_store);
550 #endif
551
552 /* PCI Parity control files */
553 EDAC_PCI_ATTR(check_pci_parity, S_IRUGO|S_IWUSR, edac_pci_int_show,
554 edac_pci_int_store);
555 EDAC_PCI_ATTR(panic_on_pci_parity, S_IRUGO|S_IWUSR, edac_pci_int_show,
556 edac_pci_int_store);
557 EDAC_PCI_ATTR(pci_parity_count, S_IRUGO, edac_pci_int_show, NULL);
558
559 /* Base Attributes of the memory ECC object */
560 static struct edac_pci_dev_attribute *edac_pci_attr[] = {
561 &edac_pci_attr_check_pci_parity,
562 &edac_pci_attr_panic_on_pci_parity,
563 &edac_pci_attr_pci_parity_count,
564 NULL,
565 };
566
567 /* No memory to release */
568 static void edac_pci_release(struct kobject *kobj)
569 {
570 debugf1("%s()\n", __func__);
571 complete(&edac_pci_kobj_complete);
572 }
573
574 static struct kobj_type ktype_edac_pci = {
575 .release = edac_pci_release,
576 .sysfs_ops = &edac_pci_sysfs_ops,
577 .default_attrs = (struct attribute **) edac_pci_attr,
578 };
579
580 #endif /* DISABLE_EDAC_SYSFS */
581
582 /**
583 * edac_sysfs_pci_setup()
584 *
585 */
586 static int edac_sysfs_pci_setup(void)
587 #ifdef DISABLE_EDAC_SYSFS
588 {
589 return 0;
590 }
591 #else
592 {
593 int err;
594
595 debugf1("%s()\n", __func__);
596
597 memset(&edac_pci_kobj, 0, sizeof(edac_pci_kobj));
598 edac_pci_kobj.parent = &edac_class.kset.kobj;
599 edac_pci_kobj.ktype = &ktype_edac_pci;
600 err = kobject_set_name(&edac_pci_kobj, "pci");
601
602 if (!err) {
603 /* Instanstiate the csrow object */
604 /* FIXME: maybe new sysdev_create_subdir() */
605 err = kobject_register(&edac_pci_kobj);
606
607 if (err)
608 debugf1("Failed to register '.../edac/pci'\n");
609 else
610 debugf1("Registered '.../edac/pci' kobject\n");
611 }
612
613 return err;
614 }
615 #endif /* DISABLE_EDAC_SYSFS */
616
617 static void edac_sysfs_pci_teardown(void)
618 {
619 #ifndef DISABLE_EDAC_SYSFS
620 debugf0("%s()\n", __func__);
621 init_completion(&edac_pci_kobj_complete);
622 kobject_unregister(&edac_pci_kobj);
623 wait_for_completion(&edac_pci_kobj_complete);
624 #endif
625 }
626
627 #ifndef DISABLE_EDAC_SYSFS
628
629 /* EDAC sysfs CSROW data structures and methods */
630
631 /* Set of more detailed csrow<id> attribute show/store functions */
632 static ssize_t csrow_ch0_dimm_label_show(struct csrow_info *csrow, char *data)
633 {
634 ssize_t size = 0;
635
636 if (csrow->nr_channels > 0) {
637 size = snprintf(data, EDAC_MC_LABEL_LEN,"%s\n",
638 csrow->channels[0].label);
639 }
640
641 return size;
642 }
643
644 static ssize_t csrow_ch1_dimm_label_show(struct csrow_info *csrow, char *data)
645 {
646 ssize_t size = 0;
647
648 if (csrow->nr_channels > 0) {
649 size = snprintf(data, EDAC_MC_LABEL_LEN, "%s\n",
650 csrow->channels[1].label);
651 }
652
653 return size;
654 }
655
656 static ssize_t csrow_ch0_dimm_label_store(struct csrow_info *csrow,
657 const char *data, size_t size)
658 {
659 ssize_t max_size = 0;
660
661 if (csrow->nr_channels > 0) {
662 max_size = min((ssize_t)size,(ssize_t)EDAC_MC_LABEL_LEN-1);
663 strncpy(csrow->channels[0].label, data, max_size);
664 csrow->channels[0].label[max_size] = '\0';
665 }
666
667 return size;
668 }
669
670 static ssize_t csrow_ch1_dimm_label_store(struct csrow_info *csrow,
671 const char *data, size_t size)
672 {
673 ssize_t max_size = 0;
674
675 if (csrow->nr_channels > 1) {
676 max_size = min((ssize_t)size,(ssize_t)EDAC_MC_LABEL_LEN-1);
677 strncpy(csrow->channels[1].label, data, max_size);
678 csrow->channels[1].label[max_size] = '\0';
679 }
680
681 return max_size;
682 }
683
684 static ssize_t csrow_ue_count_show(struct csrow_info *csrow, char *data)
685 {
686 return sprintf(data,"%u\n", csrow->ue_count);
687 }
688
689 static ssize_t csrow_ce_count_show(struct csrow_info *csrow, char *data)
690 {
691 return sprintf(data,"%u\n", csrow->ce_count);
692 }
693
694 static ssize_t csrow_ch0_ce_count_show(struct csrow_info *csrow, char *data)
695 {
696 ssize_t size = 0;
697
698 if (csrow->nr_channels > 0) {
699 size = sprintf(data,"%u\n", csrow->channels[0].ce_count);
700 }
701
702 return size;
703 }
704
705 static ssize_t csrow_ch1_ce_count_show(struct csrow_info *csrow, char *data)
706 {
707 ssize_t size = 0;
708
709 if (csrow->nr_channels > 1) {
710 size = sprintf(data,"%u\n", csrow->channels[1].ce_count);
711 }
712
713 return size;
714 }
715
716 static ssize_t csrow_size_show(struct csrow_info *csrow, char *data)
717 {
718 return sprintf(data,"%u\n", PAGES_TO_MiB(csrow->nr_pages));
719 }
720
721 static ssize_t csrow_mem_type_show(struct csrow_info *csrow, char *data)
722 {
723 return sprintf(data,"%s\n", mem_types[csrow->mtype]);
724 }
725
726 static ssize_t csrow_dev_type_show(struct csrow_info *csrow, char *data)
727 {
728 return sprintf(data,"%s\n", dev_types[csrow->dtype]);
729 }
730
731 static ssize_t csrow_edac_mode_show(struct csrow_info *csrow, char *data)
732 {
733 return sprintf(data,"%s\n", edac_caps[csrow->edac_mode]);
734 }
735
736 struct csrowdev_attribute {
737 struct attribute attr;
738 ssize_t (*show)(struct csrow_info *,char *);
739 ssize_t (*store)(struct csrow_info *, const char *,size_t);
740 };
741
742 #define to_csrow(k) container_of(k, struct csrow_info, kobj)
743 #define to_csrowdev_attr(a) container_of(a, struct csrowdev_attribute, attr)
744
745 /* Set of show/store higher level functions for csrow objects */
746 static ssize_t csrowdev_show(struct kobject *kobj, struct attribute *attr,
747 char *buffer)
748 {
749 struct csrow_info *csrow = to_csrow(kobj);
750 struct csrowdev_attribute *csrowdev_attr = to_csrowdev_attr(attr);
751
752 if (csrowdev_attr->show)
753 return csrowdev_attr->show(csrow, buffer);
754
755 return -EIO;
756 }
757
758 static ssize_t csrowdev_store(struct kobject *kobj, struct attribute *attr,
759 const char *buffer, size_t count)
760 {
761 struct csrow_info *csrow = to_csrow(kobj);
762 struct csrowdev_attribute * csrowdev_attr = to_csrowdev_attr(attr);
763
764 if (csrowdev_attr->store)
765 return csrowdev_attr->store(csrow, buffer, count);
766
767 return -EIO;
768 }
769
770 static struct sysfs_ops csrowfs_ops = {
771 .show = csrowdev_show,
772 .store = csrowdev_store
773 };
774
775 #define CSROWDEV_ATTR(_name,_mode,_show,_store) \
776 struct csrowdev_attribute attr_##_name = { \
777 .attr = {.name = __stringify(_name), .mode = _mode }, \
778 .show = _show, \
779 .store = _store, \
780 };
781
782 /* cwrow<id>/attribute files */
783 CSROWDEV_ATTR(size_mb,S_IRUGO,csrow_size_show,NULL);
784 CSROWDEV_ATTR(dev_type,S_IRUGO,csrow_dev_type_show,NULL);
785 CSROWDEV_ATTR(mem_type,S_IRUGO,csrow_mem_type_show,NULL);
786 CSROWDEV_ATTR(edac_mode,S_IRUGO,csrow_edac_mode_show,NULL);
787 CSROWDEV_ATTR(ue_count,S_IRUGO,csrow_ue_count_show,NULL);
788 CSROWDEV_ATTR(ce_count,S_IRUGO,csrow_ce_count_show,NULL);
789 CSROWDEV_ATTR(ch0_ce_count,S_IRUGO,csrow_ch0_ce_count_show,NULL);
790 CSROWDEV_ATTR(ch1_ce_count,S_IRUGO,csrow_ch1_ce_count_show,NULL);
791
792 /* control/attribute files */
793 CSROWDEV_ATTR(ch0_dimm_label,S_IRUGO|S_IWUSR,
794 csrow_ch0_dimm_label_show,
795 csrow_ch0_dimm_label_store);
796 CSROWDEV_ATTR(ch1_dimm_label,S_IRUGO|S_IWUSR,
797 csrow_ch1_dimm_label_show,
798 csrow_ch1_dimm_label_store);
799
800 /* Attributes of the CSROW<id> object */
801 static struct csrowdev_attribute *csrow_attr[] = {
802 &attr_dev_type,
803 &attr_mem_type,
804 &attr_edac_mode,
805 &attr_size_mb,
806 &attr_ue_count,
807 &attr_ce_count,
808 &attr_ch0_ce_count,
809 &attr_ch1_ce_count,
810 &attr_ch0_dimm_label,
811 &attr_ch1_dimm_label,
812 NULL,
813 };
814
815 /* No memory to release */
816 static void edac_csrow_instance_release(struct kobject *kobj)
817 {
818 struct csrow_info *cs;
819
820 debugf1("%s()\n", __func__);
821 cs = container_of(kobj, struct csrow_info, kobj);
822 complete(&cs->kobj_complete);
823 }
824
825 static struct kobj_type ktype_csrow = {
826 .release = edac_csrow_instance_release,
827 .sysfs_ops = &csrowfs_ops,
828 .default_attrs = (struct attribute **) csrow_attr,
829 };
830
831 /* Create a CSROW object under specifed edac_mc_device */
832 static int edac_create_csrow_object(struct kobject *edac_mci_kobj,
833 struct csrow_info *csrow, int index)
834 {
835 int err = 0;
836
837 debugf0("%s()\n", __func__);
838 memset(&csrow->kobj, 0, sizeof(csrow->kobj));
839
840 /* generate ..../edac/mc/mc<id>/csrow<index> */
841
842 csrow->kobj.parent = edac_mci_kobj;
843 csrow->kobj.ktype = &ktype_csrow;
844
845 /* name this instance of csrow<id> */
846 err = kobject_set_name(&csrow->kobj,"csrow%d",index);
847
848 if (!err) {
849 /* Instanstiate the csrow object */
850 err = kobject_register(&csrow->kobj);
851
852 if (err)
853 debugf0("Failed to register CSROW%d\n",index);
854 else
855 debugf0("Registered CSROW%d\n",index);
856 }
857
858 return err;
859 }
860
861 /* sysfs data structures and methods for the MCI kobjects */
862
863 static ssize_t mci_reset_counters_store(struct mem_ctl_info *mci,
864 const char *data, size_t count)
865 {
866 int row, chan;
867
868 mci->ue_noinfo_count = 0;
869 mci->ce_noinfo_count = 0;
870 mci->ue_count = 0;
871 mci->ce_count = 0;
872
873 for (row = 0; row < mci->nr_csrows; row++) {
874 struct csrow_info *ri = &mci->csrows[row];
875
876 ri->ue_count = 0;
877 ri->ce_count = 0;
878
879 for (chan = 0; chan < ri->nr_channels; chan++)
880 ri->channels[chan].ce_count = 0;
881 }
882
883 mci->start_time = jiffies;
884 return count;
885 }
886
887 static ssize_t mci_ue_count_show(struct mem_ctl_info *mci, char *data)
888 {
889 return sprintf(data,"%d\n", mci->ue_count);
890 }
891
892 static ssize_t mci_ce_count_show(struct mem_ctl_info *mci, char *data)
893 {
894 return sprintf(data,"%d\n", mci->ce_count);
895 }
896
897 static ssize_t mci_ce_noinfo_show(struct mem_ctl_info *mci, char *data)
898 {
899 return sprintf(data,"%d\n", mci->ce_noinfo_count);
900 }
901
902 static ssize_t mci_ue_noinfo_show(struct mem_ctl_info *mci, char *data)
903 {
904 return sprintf(data,"%d\n", mci->ue_noinfo_count);
905 }
906
907 static ssize_t mci_seconds_show(struct mem_ctl_info *mci, char *data)
908 {
909 return sprintf(data,"%ld\n", (jiffies - mci->start_time) / HZ);
910 }
911
912 static ssize_t mci_mod_name_show(struct mem_ctl_info *mci, char *data)
913 {
914 return sprintf(data,"%s %s\n", mci->mod_name, mci->mod_ver);
915 }
916
917 static ssize_t mci_ctl_name_show(struct mem_ctl_info *mci, char *data)
918 {
919 return sprintf(data,"%s\n", mci->ctl_name);
920 }
921
922 static int mci_output_edac_cap(char *buf, unsigned long edac_cap)
923 {
924 char *p = buf;
925 int bit_idx;
926
927 for (bit_idx = 0; bit_idx < 8 * sizeof(edac_cap); bit_idx++) {
928 if ((edac_cap >> bit_idx) & 0x1)
929 p += sprintf(p, "%s ", edac_caps[bit_idx]);
930 }
931
932 return p - buf;
933 }
934
935 static ssize_t mci_edac_capability_show(struct mem_ctl_info *mci, char *data)
936 {
937 char *p = data;
938
939 p += mci_output_edac_cap(p,mci->edac_ctl_cap);
940 p += sprintf(p, "\n");
941 return p - data;
942 }
943
944 static ssize_t mci_edac_current_capability_show(struct mem_ctl_info *mci,
945 char *data)
946 {
947 char *p = data;
948
949 p += mci_output_edac_cap(p,mci->edac_cap);
950 p += sprintf(p, "\n");
951 return p - data;
952 }
953
954 static int mci_output_mtype_cap(char *buf, unsigned long mtype_cap)
955 {
956 char *p = buf;
957 int bit_idx;
958
959 for (bit_idx = 0; bit_idx < 8 * sizeof(mtype_cap); bit_idx++) {
960 if ((mtype_cap >> bit_idx) & 0x1)
961 p += sprintf(p, "%s ", mem_types[bit_idx]);
962 }
963
964 return p - buf;
965 }
966
967 static ssize_t mci_supported_mem_type_show(struct mem_ctl_info *mci,
968 char *data)
969 {
970 char *p = data;
971
972 p += mci_output_mtype_cap(p,mci->mtype_cap);
973 p += sprintf(p, "\n");
974 return p - data;
975 }
976
977 static ssize_t mci_size_mb_show(struct mem_ctl_info *mci, char *data)
978 {
979 int total_pages, csrow_idx;
980
981 for (total_pages = csrow_idx = 0; csrow_idx < mci->nr_csrows;
982 csrow_idx++) {
983 struct csrow_info *csrow = &mci->csrows[csrow_idx];
984
985 if (!csrow->nr_pages)
986 continue;
987
988 total_pages += csrow->nr_pages;
989 }
990
991 return sprintf(data,"%u\n", PAGES_TO_MiB(total_pages));
992 }
993
994 struct mcidev_attribute {
995 struct attribute attr;
996 ssize_t (*show)(struct mem_ctl_info *,char *);
997 ssize_t (*store)(struct mem_ctl_info *, const char *,size_t);
998 };
999
1000 #define to_mci(k) container_of(k, struct mem_ctl_info, edac_mci_kobj)
1001 #define to_mcidev_attr(a) container_of(a, struct mcidev_attribute, attr)
1002
1003 static ssize_t mcidev_show(struct kobject *kobj, struct attribute *attr,
1004 char *buffer)
1005 {
1006 struct mem_ctl_info *mem_ctl_info = to_mci(kobj);
1007 struct mcidev_attribute * mcidev_attr = to_mcidev_attr(attr);
1008
1009 if (mcidev_attr->show)
1010 return mcidev_attr->show(mem_ctl_info, buffer);
1011
1012 return -EIO;
1013 }
1014
1015 static ssize_t mcidev_store(struct kobject *kobj, struct attribute *attr,
1016 const char *buffer, size_t count)
1017 {
1018 struct mem_ctl_info *mem_ctl_info = to_mci(kobj);
1019 struct mcidev_attribute * mcidev_attr = to_mcidev_attr(attr);
1020
1021 if (mcidev_attr->store)
1022 return mcidev_attr->store(mem_ctl_info, buffer, count);
1023
1024 return -EIO;
1025 }
1026
1027 static struct sysfs_ops mci_ops = {
1028 .show = mcidev_show,
1029 .store = mcidev_store
1030 };
1031
1032 #define MCIDEV_ATTR(_name,_mode,_show,_store) \
1033 struct mcidev_attribute mci_attr_##_name = { \
1034 .attr = {.name = __stringify(_name), .mode = _mode }, \
1035 .show = _show, \
1036 .store = _store, \
1037 };
1038
1039 /* Control file */
1040 MCIDEV_ATTR(reset_counters,S_IWUSR,NULL,mci_reset_counters_store);
1041
1042 /* Attribute files */
1043 MCIDEV_ATTR(mc_name,S_IRUGO,mci_ctl_name_show,NULL);
1044 MCIDEV_ATTR(module_name,S_IRUGO,mci_mod_name_show,NULL);
1045 MCIDEV_ATTR(edac_capability,S_IRUGO,mci_edac_capability_show,NULL);
1046 MCIDEV_ATTR(size_mb,S_IRUGO,mci_size_mb_show,NULL);
1047 MCIDEV_ATTR(seconds_since_reset,S_IRUGO,mci_seconds_show,NULL);
1048 MCIDEV_ATTR(ue_noinfo_count,S_IRUGO,mci_ue_noinfo_show,NULL);
1049 MCIDEV_ATTR(ce_noinfo_count,S_IRUGO,mci_ce_noinfo_show,NULL);
1050 MCIDEV_ATTR(ue_count,S_IRUGO,mci_ue_count_show,NULL);
1051 MCIDEV_ATTR(ce_count,S_IRUGO,mci_ce_count_show,NULL);
1052 MCIDEV_ATTR(edac_current_capability,S_IRUGO,
1053 mci_edac_current_capability_show,NULL);
1054 MCIDEV_ATTR(supported_mem_type,S_IRUGO,
1055 mci_supported_mem_type_show,NULL);
1056
1057 static struct mcidev_attribute *mci_attr[] = {
1058 &mci_attr_reset_counters,
1059 &mci_attr_module_name,
1060 &mci_attr_mc_name,
1061 &mci_attr_edac_capability,
1062 &mci_attr_edac_current_capability,
1063 &mci_attr_supported_mem_type,
1064 &mci_attr_size_mb,
1065 &mci_attr_seconds_since_reset,
1066 &mci_attr_ue_noinfo_count,
1067 &mci_attr_ce_noinfo_count,
1068 &mci_attr_ue_count,
1069 &mci_attr_ce_count,
1070 NULL
1071 };
1072
1073 /*
1074 * Release of a MC controlling instance
1075 */
1076 static void edac_mci_instance_release(struct kobject *kobj)
1077 {
1078 struct mem_ctl_info *mci;
1079
1080 mci = to_mci(kobj);
1081 debugf0("%s() idx=%d\n", __func__, mci->mc_idx);
1082 complete(&mci->kobj_complete);
1083 }
1084
1085 static struct kobj_type ktype_mci = {
1086 .release = edac_mci_instance_release,
1087 .sysfs_ops = &mci_ops,
1088 .default_attrs = (struct attribute **) mci_attr,
1089 };
1090
1091 #endif /* DISABLE_EDAC_SYSFS */
1092
1093 #define EDAC_DEVICE_SYMLINK "device"
1094
1095 /*
1096 * Create a new Memory Controller kobject instance,
1097 * mc<id> under the 'mc' directory
1098 *
1099 * Return:
1100 * 0 Success
1101 * !0 Failure
1102 */
1103 static int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
1104 #ifdef DISABLE_EDAC_SYSFS
1105 {
1106 return 0;
1107 }
1108 #else
1109 {
1110 int i;
1111 int err;
1112 struct csrow_info *csrow;
1113 struct kobject *edac_mci_kobj=&mci->edac_mci_kobj;
1114
1115 debugf0("%s() idx=%d\n", __func__, mci->mc_idx);
1116 memset(edac_mci_kobj, 0, sizeof(*edac_mci_kobj));
1117
1118 /* set the name of the mc<id> object */
1119 err = kobject_set_name(edac_mci_kobj,"mc%d",mci->mc_idx);
1120
1121 if (err)
1122 return err;
1123
1124 /* link to our parent the '..../edac/mc' object */
1125 edac_mci_kobj->parent = &edac_memctrl_kobj;
1126 edac_mci_kobj->ktype = &ktype_mci;
1127
1128 /* register the mc<id> kobject */
1129 err = kobject_register(edac_mci_kobj);
1130
1131 if (err)
1132 return err;
1133
1134 /* create a symlink for the device */
1135 err = sysfs_create_link(edac_mci_kobj, &mci->pdev->dev.kobj,
1136 EDAC_DEVICE_SYMLINK);
1137
1138 if (err)
1139 goto fail0;
1140
1141 /* Make directories for each CSROW object
1142 * under the mc<id> kobject
1143 */
1144 for (i = 0; i < mci->nr_csrows; i++) {
1145 csrow = &mci->csrows[i];
1146
1147 /* Only expose populated CSROWs */
1148 if (csrow->nr_pages > 0) {
1149 err = edac_create_csrow_object(edac_mci_kobj,csrow,i);
1150
1151 if (err)
1152 goto fail1;
1153 }
1154 }
1155
1156 return 0;
1157
1158 /* CSROW error: backout what has already been registered, */
1159 fail1:
1160 for ( i--; i >= 0; i--) {
1161 if (csrow->nr_pages > 0) {
1162 init_completion(&csrow->kobj_complete);
1163 kobject_unregister(&mci->csrows[i].kobj);
1164 wait_for_completion(&csrow->kobj_complete);
1165 }
1166 }
1167
1168 fail0:
1169 init_completion(&mci->kobj_complete);
1170 kobject_unregister(edac_mci_kobj);
1171 wait_for_completion(&mci->kobj_complete);
1172 return err;
1173 }
1174 #endif /* DISABLE_EDAC_SYSFS */
1175
1176 /*
1177 * remove a Memory Controller instance
1178 */
1179 static void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
1180 {
1181 #ifndef DISABLE_EDAC_SYSFS
1182 int i;
1183
1184 debugf0("%s()\n", __func__);
1185
1186 /* remove all csrow kobjects */
1187 for (i = 0; i < mci->nr_csrows; i++) {
1188 if (mci->csrows[i].nr_pages > 0) {
1189 init_completion(&mci->csrows[i].kobj_complete);
1190 kobject_unregister(&mci->csrows[i].kobj);
1191 wait_for_completion(&mci->csrows[i].kobj_complete);
1192 }
1193 }
1194
1195 sysfs_remove_link(&mci->edac_mci_kobj, EDAC_DEVICE_SYMLINK);
1196 init_completion(&mci->kobj_complete);
1197 kobject_unregister(&mci->edac_mci_kobj);
1198 wait_for_completion(&mci->kobj_complete);
1199 #endif /* DISABLE_EDAC_SYSFS */
1200 }
1201
1202 /* END OF sysfs data and methods */
1203
1204 #ifdef CONFIG_EDAC_DEBUG
1205
1206 void edac_mc_dump_channel(struct channel_info *chan)
1207 {
1208 debugf4("\tchannel = %p\n", chan);
1209 debugf4("\tchannel->chan_idx = %d\n", chan->chan_idx);
1210 debugf4("\tchannel->ce_count = %d\n", chan->ce_count);
1211 debugf4("\tchannel->label = '%s'\n", chan->label);
1212 debugf4("\tchannel->csrow = %p\n\n", chan->csrow);
1213 }
1214 EXPORT_SYMBOL_GPL(edac_mc_dump_channel);
1215
1216 void edac_mc_dump_csrow(struct csrow_info *csrow)
1217 {
1218 debugf4("\tcsrow = %p\n", csrow);
1219 debugf4("\tcsrow->csrow_idx = %d\n", csrow->csrow_idx);
1220 debugf4("\tcsrow->first_page = 0x%lx\n",
1221 csrow->first_page);
1222 debugf4("\tcsrow->last_page = 0x%lx\n", csrow->last_page);
1223 debugf4("\tcsrow->page_mask = 0x%lx\n", csrow->page_mask);
1224 debugf4("\tcsrow->nr_pages = 0x%x\n", csrow->nr_pages);
1225 debugf4("\tcsrow->nr_channels = %d\n",
1226 csrow->nr_channels);
1227 debugf4("\tcsrow->channels = %p\n", csrow->channels);
1228 debugf4("\tcsrow->mci = %p\n\n", csrow->mci);
1229 }
1230 EXPORT_SYMBOL_GPL(edac_mc_dump_csrow);
1231
1232 void edac_mc_dump_mci(struct mem_ctl_info *mci)
1233 {
1234 debugf3("\tmci = %p\n", mci);
1235 debugf3("\tmci->mtype_cap = %lx\n", mci->mtype_cap);
1236 debugf3("\tmci->edac_ctl_cap = %lx\n", mci->edac_ctl_cap);
1237 debugf3("\tmci->edac_cap = %lx\n", mci->edac_cap);
1238 debugf4("\tmci->edac_check = %p\n", mci->edac_check);
1239 debugf3("\tmci->nr_csrows = %d, csrows = %p\n",
1240 mci->nr_csrows, mci->csrows);
1241 debugf3("\tpdev = %p\n", mci->pdev);
1242 debugf3("\tmod_name:ctl_name = %s:%s\n",
1243 mci->mod_name, mci->ctl_name);
1244 debugf3("\tpvt_info = %p\n\n", mci->pvt_info);
1245 }
1246 EXPORT_SYMBOL_GPL(edac_mc_dump_mci);
1247
1248 #endif /* CONFIG_EDAC_DEBUG */
1249
1250 /* 'ptr' points to a possibly unaligned item X such that sizeof(X) is 'size'.
1251 * Adjust 'ptr' so that its alignment is at least as stringent as what the
1252 * compiler would provide for X and return the aligned result.
1253 *
1254 * If 'size' is a constant, the compiler will optimize this whole function
1255 * down to either a no-op or the addition of a constant to the value of 'ptr'.
1256 */
1257 static inline char * align_ptr(void *ptr, unsigned size)
1258 {
1259 unsigned align, r;
1260
1261 /* Here we assume that the alignment of a "long long" is the most
1262 * stringent alignment that the compiler will ever provide by default.
1263 * As far as I know, this is a reasonable assumption.
1264 */
1265 if (size > sizeof(long))
1266 align = sizeof(long long);
1267 else if (size > sizeof(int))
1268 align = sizeof(long);
1269 else if (size > sizeof(short))
1270 align = sizeof(int);
1271 else if (size > sizeof(char))
1272 align = sizeof(short);
1273 else
1274 return (char *) ptr;
1275
1276 r = size % align;
1277
1278 if (r == 0)
1279 return (char *) ptr;
1280
1281 return (char *) (((unsigned long) ptr) + align - r);
1282 }
1283
1284 /**
1285 * edac_mc_alloc: Allocate a struct mem_ctl_info structure
1286 * @size_pvt: size of private storage needed
1287 * @nr_csrows: Number of CWROWS needed for this MC
1288 * @nr_chans: Number of channels for the MC
1289 *
1290 * Everything is kmalloc'ed as one big chunk - more efficient.
1291 * Only can be used if all structures have the same lifetime - otherwise
1292 * you have to allocate and initialize your own structures.
1293 *
1294 * Use edac_mc_free() to free mc structures allocated by this function.
1295 *
1296 * Returns:
1297 * NULL allocation failed
1298 * struct mem_ctl_info pointer
1299 */
1300 struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
1301 unsigned nr_chans)
1302 {
1303 struct mem_ctl_info *mci;
1304 struct csrow_info *csi, *csrow;
1305 struct channel_info *chi, *chp, *chan;
1306 void *pvt;
1307 unsigned size;
1308 int row, chn;
1309
1310 /* Figure out the offsets of the various items from the start of an mc
1311 * structure. We want the alignment of each item to be at least as
1312 * stringent as what the compiler would provide if we could simply
1313 * hardcode everything into a single struct.
1314 */
1315 mci = (struct mem_ctl_info *) 0;
1316 csi = (struct csrow_info *)align_ptr(&mci[1], sizeof(*csi));
1317 chi = (struct channel_info *)
1318 align_ptr(&csi[nr_csrows], sizeof(*chi));
1319 pvt = align_ptr(&chi[nr_chans * nr_csrows], sz_pvt);
1320 size = ((unsigned long) pvt) + sz_pvt;
1321
1322 if ((mci = kmalloc(size, GFP_KERNEL)) == NULL)
1323 return NULL;
1324
1325 /* Adjust pointers so they point within the memory we just allocated
1326 * rather than an imaginary chunk of memory located at address 0.
1327 */
1328 csi = (struct csrow_info *) (((char *) mci) + ((unsigned long) csi));
1329 chi = (struct channel_info *) (((char *) mci) + ((unsigned long) chi));
1330 pvt = sz_pvt ? (((char *) mci) + ((unsigned long) pvt)) : NULL;
1331
1332 memset(mci, 0, size); /* clear all fields */
1333 mci->csrows = csi;
1334 mci->pvt_info = pvt;
1335 mci->nr_csrows = nr_csrows;
1336
1337 for (row = 0; row < nr_csrows; row++) {
1338 csrow = &csi[row];
1339 csrow->csrow_idx = row;
1340 csrow->mci = mci;
1341 csrow->nr_channels = nr_chans;
1342 chp = &chi[row * nr_chans];
1343 csrow->channels = chp;
1344
1345 for (chn = 0; chn < nr_chans; chn++) {
1346 chan = &chp[chn];
1347 chan->chan_idx = chn;
1348 chan->csrow = csrow;
1349 }
1350 }
1351
1352 return mci;
1353 }
1354 EXPORT_SYMBOL_GPL(edac_mc_alloc);
1355
1356 /**
1357 * edac_mc_free: Free a previously allocated 'mci' structure
1358 * @mci: pointer to a struct mem_ctl_info structure
1359 */
1360 void edac_mc_free(struct mem_ctl_info *mci)
1361 {
1362 kfree(mci);
1363 }
1364 EXPORT_SYMBOL_GPL(edac_mc_free);
1365
1366 static struct mem_ctl_info *find_mci_by_pdev(struct pci_dev *pdev)
1367 {
1368 struct mem_ctl_info *mci;
1369 struct list_head *item;
1370
1371 debugf3("%s()\n", __func__);
1372
1373 list_for_each(item, &mc_devices) {
1374 mci = list_entry(item, struct mem_ctl_info, link);
1375
1376 if (mci->pdev == pdev)
1377 return mci;
1378 }
1379
1380 return NULL;
1381 }
1382
1383 static int add_mc_to_global_list(struct mem_ctl_info *mci)
1384 {
1385 struct list_head *item, *insert_before;
1386 struct mem_ctl_info *p;
1387 int i;
1388
1389 if (list_empty(&mc_devices)) {
1390 mci->mc_idx = 0;
1391 insert_before = &mc_devices;
1392 } else {
1393 if (find_mci_by_pdev(mci->pdev)) {
1394 edac_printk(KERN_WARNING, EDAC_MC,
1395 "%s (%s) %s %s already assigned %d\n",
1396 mci->pdev->dev.bus_id,
1397 pci_name(mci->pdev), mci->mod_name,
1398 mci->ctl_name, mci->mc_idx);
1399 return 1;
1400 }
1401
1402 insert_before = NULL;
1403 i = 0;
1404
1405 list_for_each(item, &mc_devices) {
1406 p = list_entry(item, struct mem_ctl_info, link);
1407
1408 if (p->mc_idx != i) {
1409 insert_before = item;
1410 break;
1411 }
1412
1413 i++;
1414 }
1415
1416 mci->mc_idx = i;
1417
1418 if (insert_before == NULL)
1419 insert_before = &mc_devices;
1420 }
1421
1422 list_add_tail_rcu(&mci->link, insert_before);
1423 return 0;
1424 }
1425
1426 static void complete_mc_list_del(struct rcu_head *head)
1427 {
1428 struct mem_ctl_info *mci;
1429
1430 mci = container_of(head, struct mem_ctl_info, rcu);
1431 INIT_LIST_HEAD(&mci->link);
1432 complete(&mci->complete);
1433 }
1434
1435 static void del_mc_from_global_list(struct mem_ctl_info *mci)
1436 {
1437 list_del_rcu(&mci->link);
1438 init_completion(&mci->complete);
1439 call_rcu(&mci->rcu, complete_mc_list_del);
1440 wait_for_completion(&mci->complete);
1441 }
1442
1443 /**
1444 * edac_mc_add_mc: Insert the 'mci' structure into the mci global list and
1445 * create sysfs entries associated with mci structure
1446 * @mci: pointer to the mci structure to be added to the list
1447 *
1448 * Return:
1449 * 0 Success
1450 * !0 Failure
1451 */
1452
1453 /* FIXME - should a warning be printed if no error detection? correction? */
1454 int edac_mc_add_mc(struct mem_ctl_info *mci)
1455 {
1456 debugf0("%s()\n", __func__);
1457 #ifdef CONFIG_EDAC_DEBUG
1458 if (edac_debug_level >= 3)
1459 edac_mc_dump_mci(mci);
1460
1461 if (edac_debug_level >= 4) {
1462 int i;
1463
1464 for (i = 0; i < mci->nr_csrows; i++) {
1465 int j;
1466
1467 edac_mc_dump_csrow(&mci->csrows[i]);
1468 for (j = 0; j < mci->csrows[i].nr_channels; j++)
1469 edac_mc_dump_channel(
1470 &mci->csrows[i].channels[j]);
1471 }
1472 }
1473 #endif
1474 down(&mem_ctls_mutex);
1475
1476 if (add_mc_to_global_list(mci))
1477 goto fail0;
1478
1479 /* set load time so that error rate can be tracked */
1480 mci->start_time = jiffies;
1481
1482 if (edac_create_sysfs_mci_device(mci)) {
1483 edac_mc_printk(mci, KERN_WARNING,
1484 "failed to create sysfs device\n");
1485 goto fail1;
1486 }
1487
1488 /* Report action taken */
1489 edac_mc_printk(mci, KERN_INFO, "Giving out device to %s %s: PCI %s\n",
1490 mci->mod_name, mci->ctl_name, pci_name(mci->pdev));
1491
1492 up(&mem_ctls_mutex);
1493 return 0;
1494
1495 fail1:
1496 del_mc_from_global_list(mci);
1497
1498 fail0:
1499 up(&mem_ctls_mutex);
1500 return 1;
1501 }
1502 EXPORT_SYMBOL_GPL(edac_mc_add_mc);
1503
1504 /**
1505 * edac_mc_del_mc: Remove sysfs entries for specified mci structure and
1506 * remove mci structure from global list
1507 * @pdev: Pointer to 'struct pci_dev' representing mci structure to remove.
1508 *
1509 * Return pointer to removed mci structure, or NULL if device not found.
1510 */
1511 struct mem_ctl_info * edac_mc_del_mc(struct pci_dev *pdev)
1512 {
1513 struct mem_ctl_info *mci;
1514
1515 debugf0("MC: %s()\n", __func__);
1516 down(&mem_ctls_mutex);
1517
1518 if ((mci = find_mci_by_pdev(pdev)) == NULL) {
1519 up(&mem_ctls_mutex);
1520 return NULL;
1521 }
1522
1523 edac_remove_sysfs_mci_device(mci);
1524 del_mc_from_global_list(mci);
1525 up(&mem_ctls_mutex);
1526 edac_printk(KERN_INFO, EDAC_MC,
1527 "Removed device %d for %s %s: PCI %s\n", mci->mc_idx,
1528 mci->mod_name, mci->ctl_name, pci_name(mci->pdev));
1529 return mci;
1530 }
1531 EXPORT_SYMBOL_GPL(edac_mc_del_mc);
1532
1533 void edac_mc_scrub_block(unsigned long page, unsigned long offset, u32 size)
1534 {
1535 struct page *pg;
1536 void *virt_addr;
1537 unsigned long flags = 0;
1538
1539 debugf3("%s()\n", __func__);
1540
1541 /* ECC error page was not in our memory. Ignore it. */
1542 if(!pfn_valid(page))
1543 return;
1544
1545 /* Find the actual page structure then map it and fix */
1546 pg = pfn_to_page(page);
1547
1548 if (PageHighMem(pg))
1549 local_irq_save(flags);
1550
1551 virt_addr = kmap_atomic(pg, KM_BOUNCE_READ);
1552
1553 /* Perform architecture specific atomic scrub operation */
1554 atomic_scrub(virt_addr + offset, size);
1555
1556 /* Unmap and complete */
1557 kunmap_atomic(virt_addr, KM_BOUNCE_READ);
1558
1559 if (PageHighMem(pg))
1560 local_irq_restore(flags);
1561 }
1562 EXPORT_SYMBOL_GPL(edac_mc_scrub_block);
1563
1564 /* FIXME - should return -1 */
1565 int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page)
1566 {
1567 struct csrow_info *csrows = mci->csrows;
1568 int row, i;
1569
1570 debugf1("MC%d: %s(): 0x%lx\n", mci->mc_idx, __func__, page);
1571 row = -1;
1572
1573 for (i = 0; i < mci->nr_csrows; i++) {
1574 struct csrow_info *csrow = &csrows[i];
1575
1576 if (csrow->nr_pages == 0)
1577 continue;
1578
1579 debugf3("MC%d: %s(): first(0x%lx) page(0x%lx) last(0x%lx) "
1580 "mask(0x%lx)\n", mci->mc_idx, __func__,
1581 csrow->first_page, page, csrow->last_page,
1582 csrow->page_mask);
1583
1584 if ((page >= csrow->first_page) &&
1585 (page <= csrow->last_page) &&
1586 ((page & csrow->page_mask) ==
1587 (csrow->first_page & csrow->page_mask))) {
1588 row = i;
1589 break;
1590 }
1591 }
1592
1593 if (row == -1)
1594 edac_mc_printk(mci, KERN_ERR,
1595 "could not look up page error address %lx\n",
1596 (unsigned long) page);
1597
1598 return row;
1599 }
1600 EXPORT_SYMBOL_GPL(edac_mc_find_csrow_by_page);
1601
1602 /* FIXME - setable log (warning/emerg) levels */
1603 /* FIXME - integrate with evlog: http://evlog.sourceforge.net/ */
1604 void edac_mc_handle_ce(struct mem_ctl_info *mci,
1605 unsigned long page_frame_number, unsigned long offset_in_page,
1606 unsigned long syndrome, int row, int channel, const char *msg)
1607 {
1608 unsigned long remapped_page;
1609
1610 debugf3("MC%d: %s()\n", mci->mc_idx, __func__);
1611
1612 /* FIXME - maybe make panic on INTERNAL ERROR an option */
1613 if (row >= mci->nr_csrows || row < 0) {
1614 /* something is wrong */
1615 edac_mc_printk(mci, KERN_ERR,
1616 "INTERNAL ERROR: row out of range "
1617 "(%d >= %d)\n", row, mci->nr_csrows);
1618 edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
1619 return;
1620 }
1621
1622 if (channel >= mci->csrows[row].nr_channels || channel < 0) {
1623 /* something is wrong */
1624 edac_mc_printk(mci, KERN_ERR,
1625 "INTERNAL ERROR: channel out of range "
1626 "(%d >= %d)\n", channel,
1627 mci->csrows[row].nr_channels);
1628 edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
1629 return;
1630 }
1631
1632 if (log_ce)
1633 /* FIXME - put in DIMM location */
1634 edac_mc_printk(mci, KERN_WARNING,
1635 "CE page 0x%lx, offset 0x%lx, grain %d, syndrome "
1636 "0x%lx, row %d, channel %d, label \"%s\": %s\n",
1637 page_frame_number, offset_in_page,
1638 mci->csrows[row].grain, syndrome, row, channel,
1639 mci->csrows[row].channels[channel].label, msg);
1640
1641 mci->ce_count++;
1642 mci->csrows[row].ce_count++;
1643 mci->csrows[row].channels[channel].ce_count++;
1644
1645 if (mci->scrub_mode & SCRUB_SW_SRC) {
1646 /*
1647 * Some MC's can remap memory so that it is still available
1648 * at a different address when PCI devices map into memory.
1649 * MC's that can't do this lose the memory where PCI devices
1650 * are mapped. This mapping is MC dependant and so we call
1651 * back into the MC driver for it to map the MC page to
1652 * a physical (CPU) page which can then be mapped to a virtual
1653 * page - which can then be scrubbed.
1654 */
1655 remapped_page = mci->ctl_page_to_phys ?
1656 mci->ctl_page_to_phys(mci, page_frame_number) :
1657 page_frame_number;
1658
1659 edac_mc_scrub_block(remapped_page, offset_in_page,
1660 mci->csrows[row].grain);
1661 }
1662 }
1663 EXPORT_SYMBOL_GPL(edac_mc_handle_ce);
1664
1665 void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci, const char *msg)
1666 {
1667 if (log_ce)
1668 edac_mc_printk(mci, KERN_WARNING,
1669 "CE - no information available: %s\n", msg);
1670
1671 mci->ce_noinfo_count++;
1672 mci->ce_count++;
1673 }
1674 EXPORT_SYMBOL_GPL(edac_mc_handle_ce_no_info);
1675
1676 void edac_mc_handle_ue(struct mem_ctl_info *mci,
1677 unsigned long page_frame_number, unsigned long offset_in_page,
1678 int row, const char *msg)
1679 {
1680 int len = EDAC_MC_LABEL_LEN * 4;
1681 char labels[len + 1];
1682 char *pos = labels;
1683 int chan;
1684 int chars;
1685
1686 debugf3("MC%d: %s()\n", mci->mc_idx, __func__);
1687
1688 /* FIXME - maybe make panic on INTERNAL ERROR an option */
1689 if (row >= mci->nr_csrows || row < 0) {
1690 /* something is wrong */
1691 edac_mc_printk(mci, KERN_ERR,
1692 "INTERNAL ERROR: row out of range "
1693 "(%d >= %d)\n", row, mci->nr_csrows);
1694 edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
1695 return;
1696 }
1697
1698 chars = snprintf(pos, len + 1, "%s",
1699 mci->csrows[row].channels[0].label);
1700 len -= chars;
1701 pos += chars;
1702
1703 for (chan = 1; (chan < mci->csrows[row].nr_channels) && (len > 0);
1704 chan++) {
1705 chars = snprintf(pos, len + 1, ":%s",
1706 mci->csrows[row].channels[chan].label);
1707 len -= chars;
1708 pos += chars;
1709 }
1710
1711 if (log_ue)
1712 edac_mc_printk(mci, KERN_EMERG,
1713 "UE page 0x%lx, offset 0x%lx, grain %d, row %d, "
1714 "labels \"%s\": %s\n", page_frame_number,
1715 offset_in_page, mci->csrows[row].grain, row, labels,
1716 msg);
1717
1718 if (panic_on_ue)
1719 panic("EDAC MC%d: UE page 0x%lx, offset 0x%lx, grain %d, "
1720 "row %d, labels \"%s\": %s\n", mci->mc_idx,
1721 page_frame_number, offset_in_page,
1722 mci->csrows[row].grain, row, labels, msg);
1723
1724 mci->ue_count++;
1725 mci->csrows[row].ue_count++;
1726 }
1727 EXPORT_SYMBOL_GPL(edac_mc_handle_ue);
1728
1729 void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci, const char *msg)
1730 {
1731 if (panic_on_ue)
1732 panic("EDAC MC%d: Uncorrected Error", mci->mc_idx);
1733
1734 if (log_ue)
1735 edac_mc_printk(mci, KERN_WARNING,
1736 "UE - no information available: %s\n", msg);
1737 mci->ue_noinfo_count++;
1738 mci->ue_count++;
1739 }
1740 EXPORT_SYMBOL_GPL(edac_mc_handle_ue_no_info);
1741
1742 #ifdef CONFIG_PCI
1743
1744 static u16 get_pci_parity_status(struct pci_dev *dev, int secondary)
1745 {
1746 int where;
1747 u16 status;
1748
1749 where = secondary ? PCI_SEC_STATUS : PCI_STATUS;
1750 pci_read_config_word(dev, where, &status);
1751
1752 /* If we get back 0xFFFF then we must suspect that the card has been
1753 * pulled but the Linux PCI layer has not yet finished cleaning up.
1754 * We don't want to report on such devices
1755 */
1756
1757 if (status == 0xFFFF) {
1758 u32 sanity;
1759
1760 pci_read_config_dword(dev, 0, &sanity);
1761
1762 if (sanity == 0xFFFFFFFF)
1763 return 0;
1764 }
1765
1766 status &= PCI_STATUS_DETECTED_PARITY | PCI_STATUS_SIG_SYSTEM_ERROR |
1767 PCI_STATUS_PARITY;
1768
1769 if (status)
1770 /* reset only the bits we are interested in */
1771 pci_write_config_word(dev, where, status);
1772
1773 return status;
1774 }
1775
1776 typedef void (*pci_parity_check_fn_t) (struct pci_dev *dev);
1777
1778 /* Clear any PCI parity errors logged by this device. */
1779 static void edac_pci_dev_parity_clear(struct pci_dev *dev)
1780 {
1781 u8 header_type;
1782
1783 get_pci_parity_status(dev, 0);
1784
1785 /* read the device TYPE, looking for bridges */
1786 pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
1787
1788 if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE)
1789 get_pci_parity_status(dev, 1);
1790 }
1791
1792 /*
1793 * PCI Parity polling
1794 *
1795 */
1796 static void edac_pci_dev_parity_test(struct pci_dev *dev)
1797 {
1798 u16 status;
1799 u8 header_type;
1800
1801 /* read the STATUS register on this device
1802 */
1803 status = get_pci_parity_status(dev, 0);
1804
1805 debugf2("PCI STATUS= 0x%04x %s\n", status, dev->dev.bus_id );
1806
1807 /* check the status reg for errors */
1808 if (status) {
1809 if (status & (PCI_STATUS_SIG_SYSTEM_ERROR))
1810 edac_printk(KERN_CRIT, EDAC_PCI,
1811 "Signaled System Error on %s\n",
1812 pci_name(dev));
1813
1814 if (status & (PCI_STATUS_PARITY)) {
1815 edac_printk(KERN_CRIT, EDAC_PCI,
1816 "Master Data Parity Error on %s\n",
1817 pci_name(dev));
1818
1819 atomic_inc(&pci_parity_count);
1820 }
1821
1822 if (status & (PCI_STATUS_DETECTED_PARITY)) {
1823 edac_printk(KERN_CRIT, EDAC_PCI,
1824 "Detected Parity Error on %s\n",
1825 pci_name(dev));
1826
1827 atomic_inc(&pci_parity_count);
1828 }
1829 }
1830
1831 /* read the device TYPE, looking for bridges */
1832 pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
1833
1834 debugf2("PCI HEADER TYPE= 0x%02x %s\n", header_type, dev->dev.bus_id );
1835
1836 if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
1837 /* On bridges, need to examine secondary status register */
1838 status = get_pci_parity_status(dev, 1);
1839
1840 debugf2("PCI SEC_STATUS= 0x%04x %s\n",
1841 status, dev->dev.bus_id );
1842
1843 /* check the secondary status reg for errors */
1844 if (status) {
1845 if (status & (PCI_STATUS_SIG_SYSTEM_ERROR))
1846 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
1847 "Signaled System Error on %s\n",
1848 pci_name(dev));
1849
1850 if (status & (PCI_STATUS_PARITY)) {
1851 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
1852 "Master Data Parity Error on "
1853 "%s\n", pci_name(dev));
1854
1855 atomic_inc(&pci_parity_count);
1856 }
1857
1858 if (status & (PCI_STATUS_DETECTED_PARITY)) {
1859 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
1860 "Detected Parity Error on %s\n",
1861 pci_name(dev));
1862
1863 atomic_inc(&pci_parity_count);
1864 }
1865 }
1866 }
1867 }
1868
1869 /*
1870 * check_dev_on_list: Scan for a PCI device on a white/black list
1871 * @list: an EDAC &edac_pci_device_list white/black list pointer
1872 * @free_index: index of next free entry on the list
1873 * @pci_dev: PCI Device pointer
1874 *
1875 * see if list contains the device.
1876 *
1877 * Returns: 0 not found
1878 * 1 found on list
1879 */
1880 static int check_dev_on_list(struct edac_pci_device_list *list,
1881 int free_index, struct pci_dev *dev)
1882 {
1883 int i;
1884 int rc = 0; /* Assume not found */
1885 unsigned short vendor=dev->vendor;
1886 unsigned short device=dev->device;
1887
1888 /* Scan the list, looking for a vendor/device match */
1889 for (i = 0; i < free_index; i++, list++ ) {
1890 if ((list->vendor == vendor ) && (list->device == device )) {
1891 rc = 1;
1892 break;
1893 }
1894 }
1895
1896 return rc;
1897 }
1898
1899 /*
1900 * pci_dev parity list iterator
1901 * Scan the PCI device list for one iteration, looking for SERRORs
1902 * Master Parity ERRORS or Parity ERRORs on primary or secondary devices
1903 */
1904 static inline void edac_pci_dev_parity_iterator(pci_parity_check_fn_t fn)
1905 {
1906 struct pci_dev *dev = NULL;
1907
1908 /* request for kernel access to the next PCI device, if any,
1909 * and while we are looking at it have its reference count
1910 * bumped until we are done with it
1911 */
1912 while((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
1913 /* if whitelist exists then it has priority, so only scan
1914 * those devices on the whitelist
1915 */
1916 if (pci_whitelist_count > 0 ) {
1917 if (check_dev_on_list(pci_whitelist,
1918 pci_whitelist_count, dev))
1919 fn(dev);
1920 } else {
1921 /*
1922 * if no whitelist, then check if this devices is
1923 * blacklisted
1924 */
1925 if (!check_dev_on_list(pci_blacklist,
1926 pci_blacklist_count, dev))
1927 fn(dev);
1928 }
1929 }
1930 }
1931
1932 static void do_pci_parity_check(void)
1933 {
1934 unsigned long flags;
1935 int before_count;
1936
1937 debugf3("%s()\n", __func__);
1938
1939 if (!check_pci_parity)
1940 return;
1941
1942 before_count = atomic_read(&pci_parity_count);
1943
1944 /* scan all PCI devices looking for a Parity Error on devices and
1945 * bridges
1946 */
1947 local_irq_save(flags);
1948 edac_pci_dev_parity_iterator(edac_pci_dev_parity_test);
1949 local_irq_restore(flags);
1950
1951 /* Only if operator has selected panic on PCI Error */
1952 if (panic_on_pci_parity) {
1953 /* If the count is different 'after' from 'before' */
1954 if (before_count != atomic_read(&pci_parity_count))
1955 panic("EDAC: PCI Parity Error");
1956 }
1957 }
1958
1959 static inline void clear_pci_parity_errors(void)
1960 {
1961 /* Clear any PCI bus parity errors that devices initially have logged
1962 * in their registers.
1963 */
1964 edac_pci_dev_parity_iterator(edac_pci_dev_parity_clear);
1965 }
1966
1967 #else /* CONFIG_PCI */
1968
1969 static inline void do_pci_parity_check(void)
1970 {
1971 /* no-op */
1972 }
1973
1974 static inline void clear_pci_parity_errors(void)
1975 {
1976 /* no-op */
1977 }
1978
1979 #endif /* CONFIG_PCI */
1980
1981 /*
1982 * Iterate over all MC instances and check for ECC, et al, errors
1983 */
1984 static inline void check_mc_devices(void)
1985 {
1986 struct list_head *item;
1987 struct mem_ctl_info *mci;
1988
1989 debugf3("%s()\n", __func__);
1990 down(&mem_ctls_mutex);
1991
1992 list_for_each(item, &mc_devices) {
1993 mci = list_entry(item, struct mem_ctl_info, link);
1994
1995 if (mci->edac_check != NULL)
1996 mci->edac_check(mci);
1997 }
1998
1999 up(&mem_ctls_mutex);
2000 }
2001
2002 /*
2003 * Check MC status every poll_msec.
2004 * Check PCI status every poll_msec as well.
2005 *
2006 * This where the work gets done for edac.
2007 *
2008 * SMP safe, doesn't use NMI, and auto-rate-limits.
2009 */
2010 static void do_edac_check(void)
2011 {
2012 debugf3("%s()\n", __func__);
2013 check_mc_devices();
2014 do_pci_parity_check();
2015 }
2016
2017 static int edac_kernel_thread(void *arg)
2018 {
2019 while (!kthread_should_stop()) {
2020 do_edac_check();
2021
2022 /* goto sleep for the interval */
2023 schedule_timeout_interruptible((HZ * poll_msec) / 1000);
2024 try_to_freeze();
2025 }
2026
2027 return 0;
2028 }
2029
2030 /*
2031 * edac_mc_init
2032 * module initialization entry point
2033 */
2034 static int __init edac_mc_init(void)
2035 {
2036 edac_printk(KERN_INFO, EDAC_MC, EDAC_MC_VERSION "\n");
2037
2038 /*
2039 * Harvest and clear any boot/initialization PCI parity errors
2040 *
2041 * FIXME: This only clears errors logged by devices present at time of
2042 * module initialization. We should also do an initial clear
2043 * of each newly hotplugged device.
2044 */
2045 clear_pci_parity_errors();
2046
2047 /* Create the MC sysfs entries */
2048 if (edac_sysfs_memctrl_setup()) {
2049 edac_printk(KERN_ERR, EDAC_MC,
2050 "Error initializing sysfs code\n");
2051 return -ENODEV;
2052 }
2053
2054 /* Create the PCI parity sysfs entries */
2055 if (edac_sysfs_pci_setup()) {
2056 edac_sysfs_memctrl_teardown();
2057 edac_printk(KERN_ERR, EDAC_MC,
2058 "EDAC PCI: Error initializing sysfs code\n");
2059 return -ENODEV;
2060 }
2061
2062 /* create our kernel thread */
2063 edac_thread = kthread_run(edac_kernel_thread, NULL, "kedac");
2064
2065 if (IS_ERR(edac_thread)) {
2066 /* remove the sysfs entries */
2067 edac_sysfs_memctrl_teardown();
2068 edac_sysfs_pci_teardown();
2069 return PTR_ERR(edac_thread);
2070 }
2071
2072 return 0;
2073 }
2074
2075 /*
2076 * edac_mc_exit()
2077 * module exit/termination functioni
2078 */
2079 static void __exit edac_mc_exit(void)
2080 {
2081 debugf0("%s()\n", __func__);
2082 kthread_stop(edac_thread);
2083
2084 /* tear down the sysfs device */
2085 edac_sysfs_memctrl_teardown();
2086 edac_sysfs_pci_teardown();
2087 }
2088
2089 module_init(edac_mc_init);
2090 module_exit(edac_mc_exit);
2091
2092 MODULE_LICENSE("GPL");
2093 MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh et al\n"
2094 "Based on work by Dan Hollis et al");
2095 MODULE_DESCRIPTION("Core library routines for MC reporting");
2096
2097 module_param(panic_on_ue, int, 0644);
2098 MODULE_PARM_DESC(panic_on_ue, "Panic on uncorrected error: 0=off 1=on");
2099 module_param(check_pci_parity, int, 0644);
2100 MODULE_PARM_DESC(check_pci_parity, "Check for PCI bus parity errors: 0=off 1=on");
2101 module_param(panic_on_pci_parity, int, 0644);
2102 MODULE_PARM_DESC(panic_on_pci_parity, "Panic on PCI Bus Parity error: 0=off 1=on");
2103 module_param(log_ue, int, 0644);
2104 MODULE_PARM_DESC(log_ue, "Log uncorrectable error to console: 0=off 1=on");
2105 module_param(log_ce, int, 0644);
2106 MODULE_PARM_DESC(log_ce, "Log correctable error to console: 0=off 1=on");
2107 module_param(poll_msec, int, 0644);
2108 MODULE_PARM_DESC(poll_msec, "Polling period in milliseconds");
2109 #ifdef CONFIG_EDAC_DEBUG
2110 module_param(edac_debug_level, int, 0644);
2111 MODULE_PARM_DESC(edac_debug_level, "Debug level");
2112 #endif
This page took 0.093501 seconds and 6 git commands to generate.