Merge branch 'for-john' of git://x-git.kernel.org/pub/scm/linux/kernel/git/jberg...
[deliverable/linux.git] / drivers / edac / edac_mc_sysfs.c
1 /*
2 * edac_mc kernel module
3 * (C) 2005-2007 Linux Networx (http://lnxi.com)
4 *
5 * This file may be distributed under the terms of the
6 * GNU General Public License.
7 *
8 * Written Doug Thompson <norsk5@xmission.com> www.softwarebitmaker.com
9 *
10 * (c) 2012-2013 - Mauro Carvalho Chehab <mchehab@redhat.com>
11 * The entire API were re-written, and ported to use struct device
12 *
13 */
14
15 #include <linux/ctype.h>
16 #include <linux/slab.h>
17 #include <linux/edac.h>
18 #include <linux/bug.h>
19 #include <linux/pm_runtime.h>
20 #include <linux/uaccess.h>
21
22 #include "edac_core.h"
23 #include "edac_module.h"
24
25 /* MC EDAC Controls, setable by module parameter, and sysfs */
26 static int edac_mc_log_ue = 1;
27 static int edac_mc_log_ce = 1;
28 static int edac_mc_panic_on_ue;
29 static int edac_mc_poll_msec = 1000;
30
31 /* Getter functions for above */
32 int edac_mc_get_log_ue(void)
33 {
34 return edac_mc_log_ue;
35 }
36
37 int edac_mc_get_log_ce(void)
38 {
39 return edac_mc_log_ce;
40 }
41
42 int edac_mc_get_panic_on_ue(void)
43 {
44 return edac_mc_panic_on_ue;
45 }
46
47 /* this is temporary */
48 int edac_mc_get_poll_msec(void)
49 {
50 return edac_mc_poll_msec;
51 }
52
53 static int edac_set_poll_msec(const char *val, struct kernel_param *kp)
54 {
55 long l;
56 int ret;
57
58 if (!val)
59 return -EINVAL;
60
61 ret = strict_strtol(val, 0, &l);
62 if (ret == -EINVAL || ((int)l != l))
63 return -EINVAL;
64 *((int *)kp->arg) = l;
65
66 /* notify edac_mc engine to reset the poll period */
67 edac_mc_reset_delay_period(l);
68
69 return 0;
70 }
71
72 /* Parameter declarations for above */
73 module_param(edac_mc_panic_on_ue, int, 0644);
74 MODULE_PARM_DESC(edac_mc_panic_on_ue, "Panic on uncorrected error: 0=off 1=on");
75 module_param(edac_mc_log_ue, int, 0644);
76 MODULE_PARM_DESC(edac_mc_log_ue,
77 "Log uncorrectable error to console: 0=off 1=on");
78 module_param(edac_mc_log_ce, int, 0644);
79 MODULE_PARM_DESC(edac_mc_log_ce,
80 "Log correctable error to console: 0=off 1=on");
81 module_param_call(edac_mc_poll_msec, edac_set_poll_msec, param_get_int,
82 &edac_mc_poll_msec, 0644);
83 MODULE_PARM_DESC(edac_mc_poll_msec, "Polling period in milliseconds");
84
85 static struct device *mci_pdev;
86
87 /*
88 * various constants for Memory Controllers
89 */
90 static const char *mem_types[] = {
91 [MEM_EMPTY] = "Empty",
92 [MEM_RESERVED] = "Reserved",
93 [MEM_UNKNOWN] = "Unknown",
94 [MEM_FPM] = "FPM",
95 [MEM_EDO] = "EDO",
96 [MEM_BEDO] = "BEDO",
97 [MEM_SDR] = "Unbuffered-SDR",
98 [MEM_RDR] = "Registered-SDR",
99 [MEM_DDR] = "Unbuffered-DDR",
100 [MEM_RDDR] = "Registered-DDR",
101 [MEM_RMBS] = "RMBS",
102 [MEM_DDR2] = "Unbuffered-DDR2",
103 [MEM_FB_DDR2] = "FullyBuffered-DDR2",
104 [MEM_RDDR2] = "Registered-DDR2",
105 [MEM_XDR] = "XDR",
106 [MEM_DDR3] = "Unbuffered-DDR3",
107 [MEM_RDDR3] = "Registered-DDR3"
108 };
109
110 static const char *dev_types[] = {
111 [DEV_UNKNOWN] = "Unknown",
112 [DEV_X1] = "x1",
113 [DEV_X2] = "x2",
114 [DEV_X4] = "x4",
115 [DEV_X8] = "x8",
116 [DEV_X16] = "x16",
117 [DEV_X32] = "x32",
118 [DEV_X64] = "x64"
119 };
120
121 static const char *edac_caps[] = {
122 [EDAC_UNKNOWN] = "Unknown",
123 [EDAC_NONE] = "None",
124 [EDAC_RESERVED] = "Reserved",
125 [EDAC_PARITY] = "PARITY",
126 [EDAC_EC] = "EC",
127 [EDAC_SECDED] = "SECDED",
128 [EDAC_S2ECD2ED] = "S2ECD2ED",
129 [EDAC_S4ECD4ED] = "S4ECD4ED",
130 [EDAC_S8ECD8ED] = "S8ECD8ED",
131 [EDAC_S16ECD16ED] = "S16ECD16ED"
132 };
133
134 #ifdef CONFIG_EDAC_LEGACY_SYSFS
135 /*
136 * EDAC sysfs CSROW data structures and methods
137 */
138
139 #define to_csrow(k) container_of(k, struct csrow_info, dev)
140
141 /*
142 * We need it to avoid namespace conflicts between the legacy API
143 * and the per-dimm/per-rank one
144 */
145 #define DEVICE_ATTR_LEGACY(_name, _mode, _show, _store) \
146 struct device_attribute dev_attr_legacy_##_name = __ATTR(_name, _mode, _show, _store)
147
148 struct dev_ch_attribute {
149 struct device_attribute attr;
150 int channel;
151 };
152
153 #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
154 struct dev_ch_attribute dev_attr_legacy_##_name = \
155 { __ATTR(_name, _mode, _show, _store), (_var) }
156
157 #define to_channel(k) (container_of(k, struct dev_ch_attribute, attr)->channel)
158
159 /* Set of more default csrow<id> attribute show/store functions */
160 static ssize_t csrow_ue_count_show(struct device *dev,
161 struct device_attribute *mattr, char *data)
162 {
163 struct csrow_info *csrow = to_csrow(dev);
164
165 return sprintf(data, "%u\n", csrow->ue_count);
166 }
167
168 static ssize_t csrow_ce_count_show(struct device *dev,
169 struct device_attribute *mattr, char *data)
170 {
171 struct csrow_info *csrow = to_csrow(dev);
172
173 return sprintf(data, "%u\n", csrow->ce_count);
174 }
175
176 static ssize_t csrow_size_show(struct device *dev,
177 struct device_attribute *mattr, char *data)
178 {
179 struct csrow_info *csrow = to_csrow(dev);
180 int i;
181 u32 nr_pages = 0;
182
183 if (csrow->mci->csbased)
184 return sprintf(data, "%u\n", PAGES_TO_MiB(csrow->nr_pages));
185
186 for (i = 0; i < csrow->nr_channels; i++)
187 nr_pages += csrow->channels[i]->dimm->nr_pages;
188 return sprintf(data, "%u\n", PAGES_TO_MiB(nr_pages));
189 }
190
191 static ssize_t csrow_mem_type_show(struct device *dev,
192 struct device_attribute *mattr, char *data)
193 {
194 struct csrow_info *csrow = to_csrow(dev);
195
196 return sprintf(data, "%s\n", mem_types[csrow->channels[0]->dimm->mtype]);
197 }
198
199 static ssize_t csrow_dev_type_show(struct device *dev,
200 struct device_attribute *mattr, char *data)
201 {
202 struct csrow_info *csrow = to_csrow(dev);
203
204 return sprintf(data, "%s\n", dev_types[csrow->channels[0]->dimm->dtype]);
205 }
206
207 static ssize_t csrow_edac_mode_show(struct device *dev,
208 struct device_attribute *mattr,
209 char *data)
210 {
211 struct csrow_info *csrow = to_csrow(dev);
212
213 return sprintf(data, "%s\n", edac_caps[csrow->channels[0]->dimm->edac_mode]);
214 }
215
216 /* show/store functions for DIMM Label attributes */
217 static ssize_t channel_dimm_label_show(struct device *dev,
218 struct device_attribute *mattr,
219 char *data)
220 {
221 struct csrow_info *csrow = to_csrow(dev);
222 unsigned chan = to_channel(mattr);
223 struct rank_info *rank = csrow->channels[chan];
224
225 /* if field has not been initialized, there is nothing to send */
226 if (!rank->dimm->label[0])
227 return 0;
228
229 return snprintf(data, EDAC_MC_LABEL_LEN, "%s\n",
230 rank->dimm->label);
231 }
232
233 static ssize_t channel_dimm_label_store(struct device *dev,
234 struct device_attribute *mattr,
235 const char *data, size_t count)
236 {
237 struct csrow_info *csrow = to_csrow(dev);
238 unsigned chan = to_channel(mattr);
239 struct rank_info *rank = csrow->channels[chan];
240
241 ssize_t max_size = 0;
242
243 max_size = min((ssize_t) count, (ssize_t) EDAC_MC_LABEL_LEN - 1);
244 strncpy(rank->dimm->label, data, max_size);
245 rank->dimm->label[max_size] = '\0';
246
247 return max_size;
248 }
249
250 /* show function for dynamic chX_ce_count attribute */
251 static ssize_t channel_ce_count_show(struct device *dev,
252 struct device_attribute *mattr, char *data)
253 {
254 struct csrow_info *csrow = to_csrow(dev);
255 unsigned chan = to_channel(mattr);
256 struct rank_info *rank = csrow->channels[chan];
257
258 return sprintf(data, "%u\n", rank->ce_count);
259 }
260
261 /* cwrow<id>/attribute files */
262 DEVICE_ATTR_LEGACY(size_mb, S_IRUGO, csrow_size_show, NULL);
263 DEVICE_ATTR_LEGACY(dev_type, S_IRUGO, csrow_dev_type_show, NULL);
264 DEVICE_ATTR_LEGACY(mem_type, S_IRUGO, csrow_mem_type_show, NULL);
265 DEVICE_ATTR_LEGACY(edac_mode, S_IRUGO, csrow_edac_mode_show, NULL);
266 DEVICE_ATTR_LEGACY(ue_count, S_IRUGO, csrow_ue_count_show, NULL);
267 DEVICE_ATTR_LEGACY(ce_count, S_IRUGO, csrow_ce_count_show, NULL);
268
269 /* default attributes of the CSROW<id> object */
270 static struct attribute *csrow_attrs[] = {
271 &dev_attr_legacy_dev_type.attr,
272 &dev_attr_legacy_mem_type.attr,
273 &dev_attr_legacy_edac_mode.attr,
274 &dev_attr_legacy_size_mb.attr,
275 &dev_attr_legacy_ue_count.attr,
276 &dev_attr_legacy_ce_count.attr,
277 NULL,
278 };
279
280 static struct attribute_group csrow_attr_grp = {
281 .attrs = csrow_attrs,
282 };
283
284 static const struct attribute_group *csrow_attr_groups[] = {
285 &csrow_attr_grp,
286 NULL
287 };
288
289 static void csrow_attr_release(struct device *dev)
290 {
291 struct csrow_info *csrow = container_of(dev, struct csrow_info, dev);
292
293 edac_dbg(1, "Releasing csrow device %s\n", dev_name(dev));
294 kfree(csrow);
295 }
296
297 static struct device_type csrow_attr_type = {
298 .groups = csrow_attr_groups,
299 .release = csrow_attr_release,
300 };
301
302 /*
303 * possible dynamic channel DIMM Label attribute files
304 *
305 */
306
307 #define EDAC_NR_CHANNELS 6
308
309 DEVICE_CHANNEL(ch0_dimm_label, S_IRUGO | S_IWUSR,
310 channel_dimm_label_show, channel_dimm_label_store, 0);
311 DEVICE_CHANNEL(ch1_dimm_label, S_IRUGO | S_IWUSR,
312 channel_dimm_label_show, channel_dimm_label_store, 1);
313 DEVICE_CHANNEL(ch2_dimm_label, S_IRUGO | S_IWUSR,
314 channel_dimm_label_show, channel_dimm_label_store, 2);
315 DEVICE_CHANNEL(ch3_dimm_label, S_IRUGO | S_IWUSR,
316 channel_dimm_label_show, channel_dimm_label_store, 3);
317 DEVICE_CHANNEL(ch4_dimm_label, S_IRUGO | S_IWUSR,
318 channel_dimm_label_show, channel_dimm_label_store, 4);
319 DEVICE_CHANNEL(ch5_dimm_label, S_IRUGO | S_IWUSR,
320 channel_dimm_label_show, channel_dimm_label_store, 5);
321
322 /* Total possible dynamic DIMM Label attribute file table */
323 static struct device_attribute *dynamic_csrow_dimm_attr[] = {
324 &dev_attr_legacy_ch0_dimm_label.attr,
325 &dev_attr_legacy_ch1_dimm_label.attr,
326 &dev_attr_legacy_ch2_dimm_label.attr,
327 &dev_attr_legacy_ch3_dimm_label.attr,
328 &dev_attr_legacy_ch4_dimm_label.attr,
329 &dev_attr_legacy_ch5_dimm_label.attr
330 };
331
332 /* possible dynamic channel ce_count attribute files */
333 DEVICE_CHANNEL(ch0_ce_count, S_IRUGO | S_IWUSR,
334 channel_ce_count_show, NULL, 0);
335 DEVICE_CHANNEL(ch1_ce_count, S_IRUGO | S_IWUSR,
336 channel_ce_count_show, NULL, 1);
337 DEVICE_CHANNEL(ch2_ce_count, S_IRUGO | S_IWUSR,
338 channel_ce_count_show, NULL, 2);
339 DEVICE_CHANNEL(ch3_ce_count, S_IRUGO | S_IWUSR,
340 channel_ce_count_show, NULL, 3);
341 DEVICE_CHANNEL(ch4_ce_count, S_IRUGO | S_IWUSR,
342 channel_ce_count_show, NULL, 4);
343 DEVICE_CHANNEL(ch5_ce_count, S_IRUGO | S_IWUSR,
344 channel_ce_count_show, NULL, 5);
345
346 /* Total possible dynamic ce_count attribute file table */
347 static struct device_attribute *dynamic_csrow_ce_count_attr[] = {
348 &dev_attr_legacy_ch0_ce_count.attr,
349 &dev_attr_legacy_ch1_ce_count.attr,
350 &dev_attr_legacy_ch2_ce_count.attr,
351 &dev_attr_legacy_ch3_ce_count.attr,
352 &dev_attr_legacy_ch4_ce_count.attr,
353 &dev_attr_legacy_ch5_ce_count.attr
354 };
355
356 static inline int nr_pages_per_csrow(struct csrow_info *csrow)
357 {
358 int chan, nr_pages = 0;
359
360 for (chan = 0; chan < csrow->nr_channels; chan++)
361 nr_pages += csrow->channels[chan]->dimm->nr_pages;
362
363 return nr_pages;
364 }
365
366 /* Create a CSROW object under specifed edac_mc_device */
367 static int edac_create_csrow_object(struct mem_ctl_info *mci,
368 struct csrow_info *csrow, int index)
369 {
370 int err, chan;
371
372 if (csrow->nr_channels >= EDAC_NR_CHANNELS)
373 return -ENODEV;
374
375 csrow->dev.type = &csrow_attr_type;
376 csrow->dev.bus = &mci->bus;
377 device_initialize(&csrow->dev);
378 csrow->dev.parent = &mci->dev;
379 csrow->mci = mci;
380 dev_set_name(&csrow->dev, "csrow%d", index);
381 dev_set_drvdata(&csrow->dev, csrow);
382
383 edac_dbg(0, "creating (virtual) csrow node %s\n",
384 dev_name(&csrow->dev));
385
386 err = device_add(&csrow->dev);
387 if (err < 0)
388 return err;
389
390 for (chan = 0; chan < csrow->nr_channels; chan++) {
391 /* Only expose populated DIMMs */
392 if (!csrow->channels[chan]->dimm->nr_pages)
393 continue;
394 err = device_create_file(&csrow->dev,
395 dynamic_csrow_dimm_attr[chan]);
396 if (err < 0)
397 goto error;
398 err = device_create_file(&csrow->dev,
399 dynamic_csrow_ce_count_attr[chan]);
400 if (err < 0) {
401 device_remove_file(&csrow->dev,
402 dynamic_csrow_dimm_attr[chan]);
403 goto error;
404 }
405 }
406
407 return 0;
408
409 error:
410 for (--chan; chan >= 0; chan--) {
411 device_remove_file(&csrow->dev,
412 dynamic_csrow_dimm_attr[chan]);
413 device_remove_file(&csrow->dev,
414 dynamic_csrow_ce_count_attr[chan]);
415 }
416 put_device(&csrow->dev);
417
418 return err;
419 }
420
421 /* Create a CSROW object under specifed edac_mc_device */
422 static int edac_create_csrow_objects(struct mem_ctl_info *mci)
423 {
424 int err, i, chan;
425 struct csrow_info *csrow;
426
427 for (i = 0; i < mci->nr_csrows; i++) {
428 csrow = mci->csrows[i];
429 if (!nr_pages_per_csrow(csrow))
430 continue;
431 err = edac_create_csrow_object(mci, mci->csrows[i], i);
432 if (err < 0) {
433 edac_dbg(1,
434 "failure: create csrow objects for csrow %d\n",
435 i);
436 goto error;
437 }
438 }
439 return 0;
440
441 error:
442 for (--i; i >= 0; i--) {
443 csrow = mci->csrows[i];
444 if (!nr_pages_per_csrow(csrow))
445 continue;
446 for (chan = csrow->nr_channels - 1; chan >= 0; chan--) {
447 if (!csrow->channels[chan]->dimm->nr_pages)
448 continue;
449 device_remove_file(&csrow->dev,
450 dynamic_csrow_dimm_attr[chan]);
451 device_remove_file(&csrow->dev,
452 dynamic_csrow_ce_count_attr[chan]);
453 }
454 put_device(&mci->csrows[i]->dev);
455 }
456
457 return err;
458 }
459
460 static void edac_delete_csrow_objects(struct mem_ctl_info *mci)
461 {
462 int i, chan;
463 struct csrow_info *csrow;
464
465 for (i = mci->nr_csrows - 1; i >= 0; i--) {
466 csrow = mci->csrows[i];
467 if (!nr_pages_per_csrow(csrow))
468 continue;
469 for (chan = csrow->nr_channels - 1; chan >= 0; chan--) {
470 if (!csrow->channels[chan]->dimm->nr_pages)
471 continue;
472 edac_dbg(1, "Removing csrow %d channel %d sysfs nodes\n",
473 i, chan);
474 device_remove_file(&csrow->dev,
475 dynamic_csrow_dimm_attr[chan]);
476 device_remove_file(&csrow->dev,
477 dynamic_csrow_ce_count_attr[chan]);
478 }
479 device_unregister(&mci->csrows[i]->dev);
480 }
481 }
482 #endif
483
484 /*
485 * Per-dimm (or per-rank) devices
486 */
487
488 #define to_dimm(k) container_of(k, struct dimm_info, dev)
489
490 /* show/store functions for DIMM Label attributes */
491 static ssize_t dimmdev_location_show(struct device *dev,
492 struct device_attribute *mattr, char *data)
493 {
494 struct dimm_info *dimm = to_dimm(dev);
495
496 return edac_dimm_info_location(dimm, data, PAGE_SIZE);
497 }
498
499 static ssize_t dimmdev_label_show(struct device *dev,
500 struct device_attribute *mattr, char *data)
501 {
502 struct dimm_info *dimm = to_dimm(dev);
503
504 /* if field has not been initialized, there is nothing to send */
505 if (!dimm->label[0])
506 return 0;
507
508 return snprintf(data, EDAC_MC_LABEL_LEN, "%s\n", dimm->label);
509 }
510
511 static ssize_t dimmdev_label_store(struct device *dev,
512 struct device_attribute *mattr,
513 const char *data,
514 size_t count)
515 {
516 struct dimm_info *dimm = to_dimm(dev);
517
518 ssize_t max_size = 0;
519
520 max_size = min((ssize_t) count, (ssize_t) EDAC_MC_LABEL_LEN - 1);
521 strncpy(dimm->label, data, max_size);
522 dimm->label[max_size] = '\0';
523
524 return max_size;
525 }
526
527 static ssize_t dimmdev_size_show(struct device *dev,
528 struct device_attribute *mattr, char *data)
529 {
530 struct dimm_info *dimm = to_dimm(dev);
531
532 return sprintf(data, "%u\n", PAGES_TO_MiB(dimm->nr_pages));
533 }
534
535 static ssize_t dimmdev_mem_type_show(struct device *dev,
536 struct device_attribute *mattr, char *data)
537 {
538 struct dimm_info *dimm = to_dimm(dev);
539
540 return sprintf(data, "%s\n", mem_types[dimm->mtype]);
541 }
542
543 static ssize_t dimmdev_dev_type_show(struct device *dev,
544 struct device_attribute *mattr, char *data)
545 {
546 struct dimm_info *dimm = to_dimm(dev);
547
548 return sprintf(data, "%s\n", dev_types[dimm->dtype]);
549 }
550
551 static ssize_t dimmdev_edac_mode_show(struct device *dev,
552 struct device_attribute *mattr,
553 char *data)
554 {
555 struct dimm_info *dimm = to_dimm(dev);
556
557 return sprintf(data, "%s\n", edac_caps[dimm->edac_mode]);
558 }
559
560 /* dimm/rank attribute files */
561 static DEVICE_ATTR(dimm_label, S_IRUGO | S_IWUSR,
562 dimmdev_label_show, dimmdev_label_store);
563 static DEVICE_ATTR(dimm_location, S_IRUGO, dimmdev_location_show, NULL);
564 static DEVICE_ATTR(size, S_IRUGO, dimmdev_size_show, NULL);
565 static DEVICE_ATTR(dimm_mem_type, S_IRUGO, dimmdev_mem_type_show, NULL);
566 static DEVICE_ATTR(dimm_dev_type, S_IRUGO, dimmdev_dev_type_show, NULL);
567 static DEVICE_ATTR(dimm_edac_mode, S_IRUGO, dimmdev_edac_mode_show, NULL);
568
569 /* attributes of the dimm<id>/rank<id> object */
570 static struct attribute *dimm_attrs[] = {
571 &dev_attr_dimm_label.attr,
572 &dev_attr_dimm_location.attr,
573 &dev_attr_size.attr,
574 &dev_attr_dimm_mem_type.attr,
575 &dev_attr_dimm_dev_type.attr,
576 &dev_attr_dimm_edac_mode.attr,
577 NULL,
578 };
579
580 static struct attribute_group dimm_attr_grp = {
581 .attrs = dimm_attrs,
582 };
583
584 static const struct attribute_group *dimm_attr_groups[] = {
585 &dimm_attr_grp,
586 NULL
587 };
588
589 static void dimm_attr_release(struct device *dev)
590 {
591 struct dimm_info *dimm = container_of(dev, struct dimm_info, dev);
592
593 edac_dbg(1, "Releasing dimm device %s\n", dev_name(dev));
594 kfree(dimm);
595 }
596
597 static struct device_type dimm_attr_type = {
598 .groups = dimm_attr_groups,
599 .release = dimm_attr_release,
600 };
601
602 /* Create a DIMM object under specifed memory controller device */
603 static int edac_create_dimm_object(struct mem_ctl_info *mci,
604 struct dimm_info *dimm,
605 int index)
606 {
607 int err;
608 dimm->mci = mci;
609
610 dimm->dev.type = &dimm_attr_type;
611 dimm->dev.bus = &mci->bus;
612 device_initialize(&dimm->dev);
613
614 dimm->dev.parent = &mci->dev;
615 if (mci->mem_is_per_rank)
616 dev_set_name(&dimm->dev, "rank%d", index);
617 else
618 dev_set_name(&dimm->dev, "dimm%d", index);
619 dev_set_drvdata(&dimm->dev, dimm);
620 pm_runtime_forbid(&mci->dev);
621
622 err = device_add(&dimm->dev);
623
624 edac_dbg(0, "creating rank/dimm device %s\n", dev_name(&dimm->dev));
625
626 return err;
627 }
628
629 /*
630 * Memory controller device
631 */
632
633 #define to_mci(k) container_of(k, struct mem_ctl_info, dev)
634
635 static ssize_t mci_reset_counters_store(struct device *dev,
636 struct device_attribute *mattr,
637 const char *data, size_t count)
638 {
639 struct mem_ctl_info *mci = to_mci(dev);
640 int cnt, row, chan, i;
641 mci->ue_mc = 0;
642 mci->ce_mc = 0;
643 mci->ue_noinfo_count = 0;
644 mci->ce_noinfo_count = 0;
645
646 for (row = 0; row < mci->nr_csrows; row++) {
647 struct csrow_info *ri = mci->csrows[row];
648
649 ri->ue_count = 0;
650 ri->ce_count = 0;
651
652 for (chan = 0; chan < ri->nr_channels; chan++)
653 ri->channels[chan]->ce_count = 0;
654 }
655
656 cnt = 1;
657 for (i = 0; i < mci->n_layers; i++) {
658 cnt *= mci->layers[i].size;
659 memset(mci->ce_per_layer[i], 0, cnt * sizeof(u32));
660 memset(mci->ue_per_layer[i], 0, cnt * sizeof(u32));
661 }
662
663 mci->start_time = jiffies;
664 return count;
665 }
666
667 /* Memory scrubbing interface:
668 *
669 * A MC driver can limit the scrubbing bandwidth based on the CPU type.
670 * Therefore, ->set_sdram_scrub_rate should be made to return the actual
671 * bandwidth that is accepted or 0 when scrubbing is to be disabled.
672 *
673 * Negative value still means that an error has occurred while setting
674 * the scrub rate.
675 */
676 static ssize_t mci_sdram_scrub_rate_store(struct device *dev,
677 struct device_attribute *mattr,
678 const char *data, size_t count)
679 {
680 struct mem_ctl_info *mci = to_mci(dev);
681 unsigned long bandwidth = 0;
682 int new_bw = 0;
683
684 if (strict_strtoul(data, 10, &bandwidth) < 0)
685 return -EINVAL;
686
687 new_bw = mci->set_sdram_scrub_rate(mci, bandwidth);
688 if (new_bw < 0) {
689 edac_printk(KERN_WARNING, EDAC_MC,
690 "Error setting scrub rate to: %lu\n", bandwidth);
691 return -EINVAL;
692 }
693
694 return count;
695 }
696
697 /*
698 * ->get_sdram_scrub_rate() return value semantics same as above.
699 */
700 static ssize_t mci_sdram_scrub_rate_show(struct device *dev,
701 struct device_attribute *mattr,
702 char *data)
703 {
704 struct mem_ctl_info *mci = to_mci(dev);
705 int bandwidth = 0;
706
707 bandwidth = mci->get_sdram_scrub_rate(mci);
708 if (bandwidth < 0) {
709 edac_printk(KERN_DEBUG, EDAC_MC, "Error reading scrub rate\n");
710 return bandwidth;
711 }
712
713 return sprintf(data, "%d\n", bandwidth);
714 }
715
716 /* default attribute files for the MCI object */
717 static ssize_t mci_ue_count_show(struct device *dev,
718 struct device_attribute *mattr,
719 char *data)
720 {
721 struct mem_ctl_info *mci = to_mci(dev);
722
723 return sprintf(data, "%d\n", mci->ue_mc);
724 }
725
726 static ssize_t mci_ce_count_show(struct device *dev,
727 struct device_attribute *mattr,
728 char *data)
729 {
730 struct mem_ctl_info *mci = to_mci(dev);
731
732 return sprintf(data, "%d\n", mci->ce_mc);
733 }
734
735 static ssize_t mci_ce_noinfo_show(struct device *dev,
736 struct device_attribute *mattr,
737 char *data)
738 {
739 struct mem_ctl_info *mci = to_mci(dev);
740
741 return sprintf(data, "%d\n", mci->ce_noinfo_count);
742 }
743
744 static ssize_t mci_ue_noinfo_show(struct device *dev,
745 struct device_attribute *mattr,
746 char *data)
747 {
748 struct mem_ctl_info *mci = to_mci(dev);
749
750 return sprintf(data, "%d\n", mci->ue_noinfo_count);
751 }
752
753 static ssize_t mci_seconds_show(struct device *dev,
754 struct device_attribute *mattr,
755 char *data)
756 {
757 struct mem_ctl_info *mci = to_mci(dev);
758
759 return sprintf(data, "%ld\n", (jiffies - mci->start_time) / HZ);
760 }
761
762 static ssize_t mci_ctl_name_show(struct device *dev,
763 struct device_attribute *mattr,
764 char *data)
765 {
766 struct mem_ctl_info *mci = to_mci(dev);
767
768 return sprintf(data, "%s\n", mci->ctl_name);
769 }
770
771 static ssize_t mci_size_mb_show(struct device *dev,
772 struct device_attribute *mattr,
773 char *data)
774 {
775 struct mem_ctl_info *mci = to_mci(dev);
776 int total_pages = 0, csrow_idx, j;
777
778 for (csrow_idx = 0; csrow_idx < mci->nr_csrows; csrow_idx++) {
779 struct csrow_info *csrow = mci->csrows[csrow_idx];
780
781 if (csrow->mci->csbased) {
782 total_pages += csrow->nr_pages;
783 } else {
784 for (j = 0; j < csrow->nr_channels; j++) {
785 struct dimm_info *dimm = csrow->channels[j]->dimm;
786
787 total_pages += dimm->nr_pages;
788 }
789 }
790 }
791
792 return sprintf(data, "%u\n", PAGES_TO_MiB(total_pages));
793 }
794
795 static ssize_t mci_max_location_show(struct device *dev,
796 struct device_attribute *mattr,
797 char *data)
798 {
799 struct mem_ctl_info *mci = to_mci(dev);
800 int i;
801 char *p = data;
802
803 for (i = 0; i < mci->n_layers; i++) {
804 p += sprintf(p, "%s %d ",
805 edac_layer_name[mci->layers[i].type],
806 mci->layers[i].size - 1);
807 }
808
809 return p - data;
810 }
811
812 #ifdef CONFIG_EDAC_DEBUG
813 static ssize_t edac_fake_inject_write(struct file *file,
814 const char __user *data,
815 size_t count, loff_t *ppos)
816 {
817 struct device *dev = file->private_data;
818 struct mem_ctl_info *mci = to_mci(dev);
819 static enum hw_event_mc_err_type type;
820 u16 errcount = mci->fake_inject_count;
821
822 if (!errcount)
823 errcount = 1;
824
825 type = mci->fake_inject_ue ? HW_EVENT_ERR_UNCORRECTED
826 : HW_EVENT_ERR_CORRECTED;
827
828 printk(KERN_DEBUG
829 "Generating %d %s fake error%s to %d.%d.%d to test core handling. NOTE: this won't test the driver-specific decoding logic.\n",
830 errcount,
831 (type == HW_EVENT_ERR_UNCORRECTED) ? "UE" : "CE",
832 errcount > 1 ? "s" : "",
833 mci->fake_inject_layer[0],
834 mci->fake_inject_layer[1],
835 mci->fake_inject_layer[2]
836 );
837 edac_mc_handle_error(type, mci, errcount, 0, 0, 0,
838 mci->fake_inject_layer[0],
839 mci->fake_inject_layer[1],
840 mci->fake_inject_layer[2],
841 "FAKE ERROR", "for EDAC testing only");
842
843 return count;
844 }
845
846 static const struct file_operations debug_fake_inject_fops = {
847 .open = simple_open,
848 .write = edac_fake_inject_write,
849 .llseek = generic_file_llseek,
850 };
851 #endif
852
853 /* default Control file */
854 DEVICE_ATTR(reset_counters, S_IWUSR, NULL, mci_reset_counters_store);
855
856 /* default Attribute files */
857 DEVICE_ATTR(mc_name, S_IRUGO, mci_ctl_name_show, NULL);
858 DEVICE_ATTR(size_mb, S_IRUGO, mci_size_mb_show, NULL);
859 DEVICE_ATTR(seconds_since_reset, S_IRUGO, mci_seconds_show, NULL);
860 DEVICE_ATTR(ue_noinfo_count, S_IRUGO, mci_ue_noinfo_show, NULL);
861 DEVICE_ATTR(ce_noinfo_count, S_IRUGO, mci_ce_noinfo_show, NULL);
862 DEVICE_ATTR(ue_count, S_IRUGO, mci_ue_count_show, NULL);
863 DEVICE_ATTR(ce_count, S_IRUGO, mci_ce_count_show, NULL);
864 DEVICE_ATTR(max_location, S_IRUGO, mci_max_location_show, NULL);
865
866 /* memory scrubber attribute file */
867 DEVICE_ATTR(sdram_scrub_rate, 0, NULL, NULL);
868
869 static struct attribute *mci_attrs[] = {
870 &dev_attr_reset_counters.attr,
871 &dev_attr_mc_name.attr,
872 &dev_attr_size_mb.attr,
873 &dev_attr_seconds_since_reset.attr,
874 &dev_attr_ue_noinfo_count.attr,
875 &dev_attr_ce_noinfo_count.attr,
876 &dev_attr_ue_count.attr,
877 &dev_attr_ce_count.attr,
878 &dev_attr_max_location.attr,
879 NULL
880 };
881
882 static struct attribute_group mci_attr_grp = {
883 .attrs = mci_attrs,
884 };
885
886 static const struct attribute_group *mci_attr_groups[] = {
887 &mci_attr_grp,
888 NULL
889 };
890
891 static void mci_attr_release(struct device *dev)
892 {
893 struct mem_ctl_info *mci = container_of(dev, struct mem_ctl_info, dev);
894
895 edac_dbg(1, "Releasing csrow device %s\n", dev_name(dev));
896 kfree(mci);
897 }
898
899 static struct device_type mci_attr_type = {
900 .groups = mci_attr_groups,
901 .release = mci_attr_release,
902 };
903
904 #ifdef CONFIG_EDAC_DEBUG
905 static struct dentry *edac_debugfs;
906
907 int __init edac_debugfs_init(void)
908 {
909 edac_debugfs = debugfs_create_dir("edac", NULL);
910 if (IS_ERR(edac_debugfs)) {
911 edac_debugfs = NULL;
912 return -ENOMEM;
913 }
914 return 0;
915 }
916
917 void __exit edac_debugfs_exit(void)
918 {
919 debugfs_remove(edac_debugfs);
920 }
921
922 int edac_create_debug_nodes(struct mem_ctl_info *mci)
923 {
924 struct dentry *d, *parent;
925 char name[80];
926 int i;
927
928 if (!edac_debugfs)
929 return -ENODEV;
930
931 d = debugfs_create_dir(mci->dev.kobj.name, edac_debugfs);
932 if (!d)
933 return -ENOMEM;
934 parent = d;
935
936 for (i = 0; i < mci->n_layers; i++) {
937 sprintf(name, "fake_inject_%s",
938 edac_layer_name[mci->layers[i].type]);
939 d = debugfs_create_u8(name, S_IRUGO | S_IWUSR, parent,
940 &mci->fake_inject_layer[i]);
941 if (!d)
942 goto nomem;
943 }
944
945 d = debugfs_create_bool("fake_inject_ue", S_IRUGO | S_IWUSR, parent,
946 &mci->fake_inject_ue);
947 if (!d)
948 goto nomem;
949
950 d = debugfs_create_u16("fake_inject_count", S_IRUGO | S_IWUSR, parent,
951 &mci->fake_inject_count);
952 if (!d)
953 goto nomem;
954
955 d = debugfs_create_file("fake_inject", S_IWUSR, parent,
956 &mci->dev,
957 &debug_fake_inject_fops);
958 if (!d)
959 goto nomem;
960
961 mci->debugfs = parent;
962 return 0;
963 nomem:
964 debugfs_remove(mci->debugfs);
965 return -ENOMEM;
966 }
967 #endif
968
969 /*
970 * Create a new Memory Controller kobject instance,
971 * mc<id> under the 'mc' directory
972 *
973 * Return:
974 * 0 Success
975 * !0 Failure
976 */
977 int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
978 {
979 int i, err;
980
981 /*
982 * The memory controller needs its own bus, in order to avoid
983 * namespace conflicts at /sys/bus/edac.
984 */
985 mci->bus.name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx);
986 if (!mci->bus.name)
987 return -ENOMEM;
988 edac_dbg(0, "creating bus %s\n", mci->bus.name);
989 err = bus_register(&mci->bus);
990 if (err < 0)
991 return err;
992
993 /* get the /sys/devices/system/edac subsys reference */
994 mci->dev.type = &mci_attr_type;
995 device_initialize(&mci->dev);
996
997 mci->dev.parent = mci_pdev;
998 mci->dev.bus = &mci->bus;
999 dev_set_name(&mci->dev, "mc%d", mci->mc_idx);
1000 dev_set_drvdata(&mci->dev, mci);
1001 pm_runtime_forbid(&mci->dev);
1002
1003 edac_dbg(0, "creating device %s\n", dev_name(&mci->dev));
1004 err = device_add(&mci->dev);
1005 if (err < 0) {
1006 edac_dbg(1, "failure: create device %s\n", dev_name(&mci->dev));
1007 bus_unregister(&mci->bus);
1008 kfree(mci->bus.name);
1009 return err;
1010 }
1011
1012 if (mci->set_sdram_scrub_rate || mci->get_sdram_scrub_rate) {
1013 if (mci->get_sdram_scrub_rate) {
1014 dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
1015 dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
1016 }
1017 if (mci->set_sdram_scrub_rate) {
1018 dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
1019 dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
1020 }
1021 err = device_create_file(&mci->dev,
1022 &dev_attr_sdram_scrub_rate);
1023 if (err) {
1024 edac_dbg(1, "failure: create sdram_scrub_rate\n");
1025 goto fail2;
1026 }
1027 }
1028 /*
1029 * Create the dimm/rank devices
1030 */
1031 for (i = 0; i < mci->tot_dimms; i++) {
1032 struct dimm_info *dimm = mci->dimms[i];
1033 /* Only expose populated DIMMs */
1034 if (dimm->nr_pages == 0)
1035 continue;
1036 #ifdef CONFIG_EDAC_DEBUG
1037 edac_dbg(1, "creating dimm%d, located at ", i);
1038 if (edac_debug_level >= 1) {
1039 int lay;
1040 for (lay = 0; lay < mci->n_layers; lay++)
1041 printk(KERN_CONT "%s %d ",
1042 edac_layer_name[mci->layers[lay].type],
1043 dimm->location[lay]);
1044 printk(KERN_CONT "\n");
1045 }
1046 #endif
1047 err = edac_create_dimm_object(mci, dimm, i);
1048 if (err) {
1049 edac_dbg(1, "failure: create dimm %d obj\n", i);
1050 goto fail;
1051 }
1052 }
1053
1054 #ifdef CONFIG_EDAC_LEGACY_SYSFS
1055 err = edac_create_csrow_objects(mci);
1056 if (err < 0)
1057 goto fail;
1058 #endif
1059
1060 #ifdef CONFIG_EDAC_DEBUG
1061 edac_create_debug_nodes(mci);
1062 #endif
1063 return 0;
1064
1065 fail:
1066 for (i--; i >= 0; i--) {
1067 struct dimm_info *dimm = mci->dimms[i];
1068 if (dimm->nr_pages == 0)
1069 continue;
1070 device_unregister(&dimm->dev);
1071 }
1072 fail2:
1073 device_unregister(&mci->dev);
1074 bus_unregister(&mci->bus);
1075 kfree(mci->bus.name);
1076 return err;
1077 }
1078
1079 /*
1080 * remove a Memory Controller instance
1081 */
1082 void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
1083 {
1084 int i;
1085
1086 edac_dbg(0, "\n");
1087
1088 #ifdef CONFIG_EDAC_DEBUG
1089 debugfs_remove(mci->debugfs);
1090 #endif
1091 #ifdef CONFIG_EDAC_LEGACY_SYSFS
1092 edac_delete_csrow_objects(mci);
1093 #endif
1094
1095 for (i = 0; i < mci->tot_dimms; i++) {
1096 struct dimm_info *dimm = mci->dimms[i];
1097 if (dimm->nr_pages == 0)
1098 continue;
1099 edac_dbg(0, "removing device %s\n", dev_name(&dimm->dev));
1100 device_unregister(&dimm->dev);
1101 }
1102 }
1103
1104 void edac_unregister_sysfs(struct mem_ctl_info *mci)
1105 {
1106 edac_dbg(1, "Unregistering device %s\n", dev_name(&mci->dev));
1107 device_unregister(&mci->dev);
1108 bus_unregister(&mci->bus);
1109 kfree(mci->bus.name);
1110 }
1111
1112 static void mc_attr_release(struct device *dev)
1113 {
1114 /*
1115 * There's no container structure here, as this is just the mci
1116 * parent device, used to create the /sys/devices/mc sysfs node.
1117 * So, there are no attributes on it.
1118 */
1119 edac_dbg(1, "Releasing device %s\n", dev_name(dev));
1120 kfree(dev);
1121 }
1122
1123 static struct device_type mc_attr_type = {
1124 .release = mc_attr_release,
1125 };
1126 /*
1127 * Init/exit code for the module. Basically, creates/removes /sys/class/rc
1128 */
1129 int __init edac_mc_sysfs_init(void)
1130 {
1131 struct bus_type *edac_subsys;
1132 int err;
1133
1134 /* get the /sys/devices/system/edac subsys reference */
1135 edac_subsys = edac_get_sysfs_subsys();
1136 if (edac_subsys == NULL) {
1137 edac_dbg(1, "no edac_subsys\n");
1138 err = -EINVAL;
1139 goto out;
1140 }
1141
1142 mci_pdev = kzalloc(sizeof(*mci_pdev), GFP_KERNEL);
1143 if (!mci_pdev) {
1144 err = -ENOMEM;
1145 goto out_put_sysfs;
1146 }
1147
1148 mci_pdev->bus = edac_subsys;
1149 mci_pdev->type = &mc_attr_type;
1150 device_initialize(mci_pdev);
1151 dev_set_name(mci_pdev, "mc");
1152
1153 err = device_add(mci_pdev);
1154 if (err < 0)
1155 goto out_dev_free;
1156
1157 edac_dbg(0, "device %s created\n", dev_name(mci_pdev));
1158
1159 return 0;
1160
1161 out_dev_free:
1162 kfree(mci_pdev);
1163 out_put_sysfs:
1164 edac_put_sysfs_subsys();
1165 out:
1166 return err;
1167 }
1168
1169 void __exit edac_mc_sysfs_exit(void)
1170 {
1171 device_unregister(mci_pdev);
1172 edac_put_sysfs_subsys();
1173 }
This page took 0.06942 seconds and 6 git commands to generate.