2 * edac_mc kernel module
3 * (C) 2005, 2006 Linux Networx (http://lnxi.com)
4 * This file may be distributed under the terms of the
5 * GNU General Public License.
7 * Written by Thayne Harbaugh
8 * Based on work by Dan Hollis <goemon at anime dot net> and others.
9 * http://www.anime.net/~goemon/linux-ecc/
11 * Modified by Dave Peterson and Doug Thompson
15 #include <linux/module.h>
16 #include <linux/proc_fs.h>
17 #include <linux/kernel.h>
18 #include <linux/types.h>
19 #include <linux/smp.h>
20 #include <linux/init.h>
21 #include <linux/sysctl.h>
22 #include <linux/highmem.h>
23 #include <linux/timer.h>
24 #include <linux/slab.h>
25 #include <linux/jiffies.h>
26 #include <linux/spinlock.h>
27 #include <linux/list.h>
28 #include <linux/sysdev.h>
29 #include <linux/ctype.h>
30 #include <linux/edac.h>
31 #include <asm/uaccess.h>
34 #include "edac_core.h"
35 #include "edac_module.h"
37 /* lock to memory controller's control array */
38 static DEFINE_MUTEX(mem_ctls_mutex
);
39 static LIST_HEAD(mc_devices
);
41 #ifdef CONFIG_EDAC_DEBUG
43 static void edac_mc_dump_channel(struct channel_info
*chan
)
45 debugf4("\tchannel = %p\n", chan
);
46 debugf4("\tchannel->chan_idx = %d\n", chan
->chan_idx
);
47 debugf4("\tchannel->ce_count = %d\n", chan
->ce_count
);
48 debugf4("\tchannel->label = '%s'\n", chan
->label
);
49 debugf4("\tchannel->csrow = %p\n\n", chan
->csrow
);
52 static void edac_mc_dump_csrow(struct csrow_info
*csrow
)
54 debugf4("\tcsrow = %p\n", csrow
);
55 debugf4("\tcsrow->csrow_idx = %d\n", csrow
->csrow_idx
);
56 debugf4("\tcsrow->first_page = 0x%lx\n", csrow
->first_page
);
57 debugf4("\tcsrow->last_page = 0x%lx\n", csrow
->last_page
);
58 debugf4("\tcsrow->page_mask = 0x%lx\n", csrow
->page_mask
);
59 debugf4("\tcsrow->nr_pages = 0x%x\n", csrow
->nr_pages
);
60 debugf4("\tcsrow->nr_channels = %d\n", csrow
->nr_channels
);
61 debugf4("\tcsrow->channels = %p\n", csrow
->channels
);
62 debugf4("\tcsrow->mci = %p\n\n", csrow
->mci
);
65 static void edac_mc_dump_mci(struct mem_ctl_info
*mci
)
67 debugf3("\tmci = %p\n", mci
);
68 debugf3("\tmci->mtype_cap = %lx\n", mci
->mtype_cap
);
69 debugf3("\tmci->edac_ctl_cap = %lx\n", mci
->edac_ctl_cap
);
70 debugf3("\tmci->edac_cap = %lx\n", mci
->edac_cap
);
71 debugf4("\tmci->edac_check = %p\n", mci
->edac_check
);
72 debugf3("\tmci->nr_csrows = %d, csrows = %p\n",
73 mci
->nr_csrows
, mci
->csrows
);
74 debugf3("\tdev = %p\n", mci
->dev
);
75 debugf3("\tmod_name:ctl_name = %s:%s\n", mci
->mod_name
, mci
->ctl_name
);
76 debugf3("\tpvt_info = %p\n\n", mci
->pvt_info
);
80 * keep those in sync with the enum mem_type
82 const char *edac_mem_types
[] = {
84 "Reserved csrow type",
87 "Extended data out RAM",
88 "Burst Extended data out RAM",
89 "Single data rate SDRAM",
90 "Registered single data rate SDRAM",
91 "Double data rate SDRAM",
92 "Registered Double data rate SDRAM",
94 "Unbuffered DDR2 RAM",
95 "Fully buffered DDR2",
96 "Registered DDR2 RAM",
98 "Unbuffered DDR3 RAM",
99 "Registered DDR3 RAM",
101 EXPORT_SYMBOL_GPL(edac_mem_types
);
103 #endif /* CONFIG_EDAC_DEBUG */
105 /* 'ptr' points to a possibly unaligned item X such that sizeof(X) is 'size'.
106 * Adjust 'ptr' so that its alignment is at least as stringent as what the
107 * compiler would provide for X and return the aligned result.
109 * If 'size' is a constant, the compiler will optimize this whole function
110 * down to either a no-op or the addition of a constant to the value of 'ptr'.
112 void *edac_align_ptr(void *ptr
, unsigned size
)
116 /* Here we assume that the alignment of a "long long" is the most
117 * stringent alignment that the compiler will ever provide by default.
118 * As far as I know, this is a reasonable assumption.
120 if (size
> sizeof(long))
121 align
= sizeof(long long);
122 else if (size
> sizeof(int))
123 align
= sizeof(long);
124 else if (size
> sizeof(short))
126 else if (size
> sizeof(char))
127 align
= sizeof(short);
136 return (void *)(((unsigned long)ptr
) + align
- r
);
140 * edac_mc_alloc: Allocate a struct mem_ctl_info structure
141 * @size_pvt: size of private storage needed
142 * @nr_csrows: Number of CWROWS needed for this MC
143 * @nr_chans: Number of channels for the MC
145 * Everything is kmalloc'ed as one big chunk - more efficient.
146 * Only can be used if all structures have the same lifetime - otherwise
147 * you have to allocate and initialize your own structures.
149 * Use edac_mc_free() to free mc structures allocated by this function.
152 * NULL allocation failed
153 * struct mem_ctl_info pointer
155 struct mem_ctl_info
*edac_mc_alloc(unsigned sz_pvt
, unsigned nr_csrows
,
156 unsigned nr_chans
, int edac_index
)
158 struct mem_ctl_info
*mci
;
159 struct csrow_info
*csi
, *csrow
;
160 struct channel_info
*chi
, *chp
, *chan
;
166 /* Figure out the offsets of the various items from the start of an mc
167 * structure. We want the alignment of each item to be at least as
168 * stringent as what the compiler would provide if we could simply
169 * hardcode everything into a single struct.
171 mci
= (struct mem_ctl_info
*)0;
172 csi
= edac_align_ptr(&mci
[1], sizeof(*csi
));
173 chi
= edac_align_ptr(&csi
[nr_csrows
], sizeof(*chi
));
174 pvt
= edac_align_ptr(&chi
[nr_chans
* nr_csrows
], sz_pvt
);
175 size
= ((unsigned long)pvt
) + sz_pvt
;
177 mci
= kzalloc(size
, GFP_KERNEL
);
181 /* Adjust pointers so they point within the memory we just allocated
182 * rather than an imaginary chunk of memory located at address 0.
184 csi
= (struct csrow_info
*)(((char *)mci
) + ((unsigned long)csi
));
185 chi
= (struct channel_info
*)(((char *)mci
) + ((unsigned long)chi
));
186 pvt
= sz_pvt
? (((char *)mci
) + ((unsigned long)pvt
)) : NULL
;
188 /* setup index and various internal pointers */
189 mci
->mc_idx
= edac_index
;
192 mci
->nr_csrows
= nr_csrows
;
194 for (row
= 0; row
< nr_csrows
; row
++) {
196 csrow
->csrow_idx
= row
;
198 csrow
->nr_channels
= nr_chans
;
199 chp
= &chi
[row
* nr_chans
];
200 csrow
->channels
= chp
;
202 for (chn
= 0; chn
< nr_chans
; chn
++) {
204 chan
->chan_idx
= chn
;
209 mci
->op_state
= OP_ALLOC
;
210 INIT_LIST_HEAD(&mci
->grp_kobj_list
);
213 * Initialize the 'root' kobj for the edac_mc controller
215 err
= edac_mc_register_sysfs_main_kobj(mci
);
221 /* at this point, the root kobj is valid, and in order to
222 * 'free' the object, then the function:
223 * edac_mc_unregister_sysfs_main_kobj() must be called
224 * which will perform kobj unregistration and the actual free
225 * will occur during the kobject callback operation
229 EXPORT_SYMBOL_GPL(edac_mc_alloc
);
233 * 'Free' a previously allocated 'mci' structure
234 * @mci: pointer to a struct mem_ctl_info structure
236 void edac_mc_free(struct mem_ctl_info
*mci
)
238 edac_mc_unregister_sysfs_main_kobj(mci
);
240 EXPORT_SYMBOL_GPL(edac_mc_free
);
246 * scan list of controllers looking for the one that manages
248 * @dev: pointer to a struct device related with the MCI
250 struct mem_ctl_info
*find_mci_by_dev(struct device
*dev
)
252 struct mem_ctl_info
*mci
;
253 struct list_head
*item
;
255 debugf3("%s()\n", __func__
);
257 list_for_each(item
, &mc_devices
) {
258 mci
= list_entry(item
, struct mem_ctl_info
, link
);
266 EXPORT_SYMBOL_GPL(find_mci_by_dev
);
269 * handler for EDAC to check if NMI type handler has asserted interrupt
271 static int edac_mc_assert_error_check_and_clear(void)
275 if (edac_op_state
== EDAC_OPSTATE_POLL
)
278 old_state
= edac_err_assert
;
285 * edac_mc_workq_function
286 * performs the operation scheduled by a workq request
288 static void edac_mc_workq_function(struct work_struct
*work_req
)
290 struct delayed_work
*d_work
= to_delayed_work(work_req
);
291 struct mem_ctl_info
*mci
= to_edac_mem_ctl_work(d_work
);
293 mutex_lock(&mem_ctls_mutex
);
295 /* if this control struct has movd to offline state, we are done */
296 if (mci
->op_state
== OP_OFFLINE
) {
297 mutex_unlock(&mem_ctls_mutex
);
301 /* Only poll controllers that are running polled and have a check */
302 if (edac_mc_assert_error_check_and_clear() && (mci
->edac_check
!= NULL
))
303 mci
->edac_check(mci
);
305 mutex_unlock(&mem_ctls_mutex
);
308 queue_delayed_work(edac_workqueue
, &mci
->work
,
309 msecs_to_jiffies(edac_mc_get_poll_msec()));
313 * edac_mc_workq_setup
314 * initialize a workq item for this mci
315 * passing in the new delay period in msec
319 * called with the mem_ctls_mutex held
321 static void edac_mc_workq_setup(struct mem_ctl_info
*mci
, unsigned msec
)
323 debugf0("%s()\n", __func__
);
325 /* if this instance is not in the POLL state, then simply return */
326 if (mci
->op_state
!= OP_RUNNING_POLL
)
329 INIT_DELAYED_WORK(&mci
->work
, edac_mc_workq_function
);
330 queue_delayed_work(edac_workqueue
, &mci
->work
, msecs_to_jiffies(msec
));
334 * edac_mc_workq_teardown
335 * stop the workq processing on this mci
339 * called WITHOUT lock held
341 static void edac_mc_workq_teardown(struct mem_ctl_info
*mci
)
345 if (mci
->op_state
!= OP_RUNNING_POLL
)
348 status
= cancel_delayed_work(&mci
->work
);
350 debugf0("%s() not canceled, flush the queue\n",
353 /* workq instance might be running, wait for it */
354 flush_workqueue(edac_workqueue
);
359 * edac_mc_reset_delay_period(unsigned long value)
361 * user space has updated our poll period value, need to
362 * reset our workq delays
364 void edac_mc_reset_delay_period(int value
)
366 struct mem_ctl_info
*mci
;
367 struct list_head
*item
;
369 mutex_lock(&mem_ctls_mutex
);
371 /* scan the list and turn off all workq timers, doing so under lock
373 list_for_each(item
, &mc_devices
) {
374 mci
= list_entry(item
, struct mem_ctl_info
, link
);
376 if (mci
->op_state
== OP_RUNNING_POLL
)
377 cancel_delayed_work(&mci
->work
);
380 mutex_unlock(&mem_ctls_mutex
);
383 /* re-walk the list, and reset the poll delay */
384 mutex_lock(&mem_ctls_mutex
);
386 list_for_each(item
, &mc_devices
) {
387 mci
= list_entry(item
, struct mem_ctl_info
, link
);
389 edac_mc_workq_setup(mci
, (unsigned long) value
);
392 mutex_unlock(&mem_ctls_mutex
);
397 /* Return 0 on success, 1 on failure.
398 * Before calling this function, caller must
399 * assign a unique value to mci->mc_idx.
403 * called with the mem_ctls_mutex lock held
405 static int add_mc_to_global_list(struct mem_ctl_info
*mci
)
407 struct list_head
*item
, *insert_before
;
408 struct mem_ctl_info
*p
;
410 insert_before
= &mc_devices
;
412 p
= find_mci_by_dev(mci
->dev
);
413 if (unlikely(p
!= NULL
))
416 list_for_each(item
, &mc_devices
) {
417 p
= list_entry(item
, struct mem_ctl_info
, link
);
419 if (p
->mc_idx
>= mci
->mc_idx
) {
420 if (unlikely(p
->mc_idx
== mci
->mc_idx
))
423 insert_before
= item
;
428 list_add_tail_rcu(&mci
->link
, insert_before
);
429 atomic_inc(&edac_handlers
);
433 edac_printk(KERN_WARNING
, EDAC_MC
,
434 "%s (%s) %s %s already assigned %d\n", dev_name(p
->dev
),
435 edac_dev_name(mci
), p
->mod_name
, p
->ctl_name
, p
->mc_idx
);
439 edac_printk(KERN_WARNING
, EDAC_MC
,
440 "bug in low-level driver: attempt to assign\n"
441 " duplicate mc_idx %d in %s()\n", p
->mc_idx
, __func__
);
445 static void complete_mc_list_del(struct rcu_head
*head
)
447 struct mem_ctl_info
*mci
;
449 mci
= container_of(head
, struct mem_ctl_info
, rcu
);
450 INIT_LIST_HEAD(&mci
->link
);
453 static void del_mc_from_global_list(struct mem_ctl_info
*mci
)
455 atomic_dec(&edac_handlers
);
456 list_del_rcu(&mci
->link
);
457 call_rcu(&mci
->rcu
, complete_mc_list_del
);
462 * edac_mc_find: Search for a mem_ctl_info structure whose index is 'idx'.
464 * If found, return a pointer to the structure.
467 * Caller must hold mem_ctls_mutex.
469 struct mem_ctl_info
*edac_mc_find(int idx
)
471 struct list_head
*item
;
472 struct mem_ctl_info
*mci
;
474 list_for_each(item
, &mc_devices
) {
475 mci
= list_entry(item
, struct mem_ctl_info
, link
);
477 if (mci
->mc_idx
>= idx
) {
478 if (mci
->mc_idx
== idx
)
487 EXPORT_SYMBOL(edac_mc_find
);
490 * edac_mc_add_mc: Insert the 'mci' structure into the mci global list and
491 * create sysfs entries associated with mci structure
492 * @mci: pointer to the mci structure to be added to the list
493 * @mc_idx: A unique numeric identifier to be assigned to the 'mci' structure.
500 /* FIXME - should a warning be printed if no error detection? correction? */
501 int edac_mc_add_mc(struct mem_ctl_info
*mci
)
503 debugf0("%s()\n", __func__
);
505 #ifdef CONFIG_EDAC_DEBUG
506 if (edac_debug_level
>= 3)
507 edac_mc_dump_mci(mci
);
509 if (edac_debug_level
>= 4) {
512 for (i
= 0; i
< mci
->nr_csrows
; i
++) {
515 edac_mc_dump_csrow(&mci
->csrows
[i
]);
516 for (j
= 0; j
< mci
->csrows
[i
].nr_channels
; j
++)
517 edac_mc_dump_channel(&mci
->csrows
[i
].
522 mutex_lock(&mem_ctls_mutex
);
524 if (add_mc_to_global_list(mci
))
527 /* set load time so that error rate can be tracked */
528 mci
->start_time
= jiffies
;
530 if (edac_create_sysfs_mci_device(mci
)) {
531 edac_mc_printk(mci
, KERN_WARNING
,
532 "failed to create sysfs device\n");
536 /* If there IS a check routine, then we are running POLLED */
537 if (mci
->edac_check
!= NULL
) {
538 /* This instance is NOW RUNNING */
539 mci
->op_state
= OP_RUNNING_POLL
;
541 edac_mc_workq_setup(mci
, edac_mc_get_poll_msec());
543 mci
->op_state
= OP_RUNNING_INTERRUPT
;
546 /* Report action taken */
547 edac_mc_printk(mci
, KERN_INFO
, "Giving out device to '%s' '%s':"
548 " DEV %s\n", mci
->mod_name
, mci
->ctl_name
, edac_dev_name(mci
));
550 mutex_unlock(&mem_ctls_mutex
);
554 del_mc_from_global_list(mci
);
557 mutex_unlock(&mem_ctls_mutex
);
560 EXPORT_SYMBOL_GPL(edac_mc_add_mc
);
563 * edac_mc_del_mc: Remove sysfs entries for specified mci structure and
564 * remove mci structure from global list
565 * @pdev: Pointer to 'struct device' representing mci structure to remove.
567 * Return pointer to removed mci structure, or NULL if device not found.
569 struct mem_ctl_info
*edac_mc_del_mc(struct device
*dev
)
571 struct mem_ctl_info
*mci
;
573 debugf0("%s()\n", __func__
);
575 mutex_lock(&mem_ctls_mutex
);
577 /* find the requested mci struct in the global list */
578 mci
= find_mci_by_dev(dev
);
580 mutex_unlock(&mem_ctls_mutex
);
584 /* marking MCI offline */
585 mci
->op_state
= OP_OFFLINE
;
587 del_mc_from_global_list(mci
);
588 mutex_unlock(&mem_ctls_mutex
);
590 /* flush workq processes and remove sysfs */
591 edac_mc_workq_teardown(mci
);
592 edac_remove_sysfs_mci_device(mci
);
594 edac_printk(KERN_INFO
, EDAC_MC
,
595 "Removed device %d for %s %s: DEV %s\n", mci
->mc_idx
,
596 mci
->mod_name
, mci
->ctl_name
, edac_dev_name(mci
));
600 EXPORT_SYMBOL_GPL(edac_mc_del_mc
);
602 static void edac_mc_scrub_block(unsigned long page
, unsigned long offset
,
607 unsigned long flags
= 0;
609 debugf3("%s()\n", __func__
);
611 /* ECC error page was not in our memory. Ignore it. */
612 if (!pfn_valid(page
))
615 /* Find the actual page structure then map it and fix */
616 pg
= pfn_to_page(page
);
619 local_irq_save(flags
);
621 virt_addr
= kmap_atomic(pg
, KM_BOUNCE_READ
);
623 /* Perform architecture specific atomic scrub operation */
624 atomic_scrub(virt_addr
+ offset
, size
);
626 /* Unmap and complete */
627 kunmap_atomic(virt_addr
, KM_BOUNCE_READ
);
630 local_irq_restore(flags
);
633 /* FIXME - should return -1 */
634 int edac_mc_find_csrow_by_page(struct mem_ctl_info
*mci
, unsigned long page
)
636 struct csrow_info
*csrows
= mci
->csrows
;
639 debugf1("MC%d: %s(): 0x%lx\n", mci
->mc_idx
, __func__
, page
);
642 for (i
= 0; i
< mci
->nr_csrows
; i
++) {
643 struct csrow_info
*csrow
= &csrows
[i
];
645 if (csrow
->nr_pages
== 0)
648 debugf3("MC%d: %s(): first(0x%lx) page(0x%lx) last(0x%lx) "
649 "mask(0x%lx)\n", mci
->mc_idx
, __func__
,
650 csrow
->first_page
, page
, csrow
->last_page
,
653 if ((page
>= csrow
->first_page
) &&
654 (page
<= csrow
->last_page
) &&
655 ((page
& csrow
->page_mask
) ==
656 (csrow
->first_page
& csrow
->page_mask
))) {
663 edac_mc_printk(mci
, KERN_ERR
,
664 "could not look up page error address %lx\n",
665 (unsigned long)page
);
669 EXPORT_SYMBOL_GPL(edac_mc_find_csrow_by_page
);
671 /* FIXME - setable log (warning/emerg) levels */
672 /* FIXME - integrate with evlog: http://evlog.sourceforge.net/ */
673 void edac_mc_handle_ce(struct mem_ctl_info
*mci
,
674 unsigned long page_frame_number
,
675 unsigned long offset_in_page
, unsigned long syndrome
,
676 int row
, int channel
, const char *msg
)
678 unsigned long remapped_page
;
680 debugf3("MC%d: %s()\n", mci
->mc_idx
, __func__
);
682 /* FIXME - maybe make panic on INTERNAL ERROR an option */
683 if (row
>= mci
->nr_csrows
|| row
< 0) {
684 /* something is wrong */
685 edac_mc_printk(mci
, KERN_ERR
,
686 "INTERNAL ERROR: row out of range "
687 "(%d >= %d)\n", row
, mci
->nr_csrows
);
688 edac_mc_handle_ce_no_info(mci
, "INTERNAL ERROR");
692 if (channel
>= mci
->csrows
[row
].nr_channels
|| channel
< 0) {
693 /* something is wrong */
694 edac_mc_printk(mci
, KERN_ERR
,
695 "INTERNAL ERROR: channel out of range "
696 "(%d >= %d)\n", channel
,
697 mci
->csrows
[row
].nr_channels
);
698 edac_mc_handle_ce_no_info(mci
, "INTERNAL ERROR");
702 if (edac_mc_get_log_ce())
703 /* FIXME - put in DIMM location */
704 edac_mc_printk(mci
, KERN_WARNING
,
705 "CE page 0x%lx, offset 0x%lx, grain %d, syndrome "
706 "0x%lx, row %d, channel %d, label \"%s\": %s\n",
707 page_frame_number
, offset_in_page
,
708 mci
->csrows
[row
].grain
, syndrome
, row
, channel
,
709 mci
->csrows
[row
].channels
[channel
].label
, msg
);
712 mci
->csrows
[row
].ce_count
++;
713 mci
->csrows
[row
].channels
[channel
].ce_count
++;
715 if (mci
->scrub_mode
& SCRUB_SW_SRC
) {
717 * Some MC's can remap memory so that it is still available
718 * at a different address when PCI devices map into memory.
719 * MC's that can't do this lose the memory where PCI devices
720 * are mapped. This mapping is MC dependant and so we call
721 * back into the MC driver for it to map the MC page to
722 * a physical (CPU) page which can then be mapped to a virtual
723 * page - which can then be scrubbed.
725 remapped_page
= mci
->ctl_page_to_phys
?
726 mci
->ctl_page_to_phys(mci
, page_frame_number
) :
729 edac_mc_scrub_block(remapped_page
, offset_in_page
,
730 mci
->csrows
[row
].grain
);
733 EXPORT_SYMBOL_GPL(edac_mc_handle_ce
);
735 void edac_mc_handle_ce_no_info(struct mem_ctl_info
*mci
, const char *msg
)
737 if (edac_mc_get_log_ce())
738 edac_mc_printk(mci
, KERN_WARNING
,
739 "CE - no information available: %s\n", msg
);
741 mci
->ce_noinfo_count
++;
744 EXPORT_SYMBOL_GPL(edac_mc_handle_ce_no_info
);
746 void edac_mc_handle_ue(struct mem_ctl_info
*mci
,
747 unsigned long page_frame_number
,
748 unsigned long offset_in_page
, int row
, const char *msg
)
750 int len
= EDAC_MC_LABEL_LEN
* 4;
751 char labels
[len
+ 1];
756 debugf3("MC%d: %s()\n", mci
->mc_idx
, __func__
);
758 /* FIXME - maybe make panic on INTERNAL ERROR an option */
759 if (row
>= mci
->nr_csrows
|| row
< 0) {
760 /* something is wrong */
761 edac_mc_printk(mci
, KERN_ERR
,
762 "INTERNAL ERROR: row out of range "
763 "(%d >= %d)\n", row
, mci
->nr_csrows
);
764 edac_mc_handle_ue_no_info(mci
, "INTERNAL ERROR");
768 chars
= snprintf(pos
, len
+ 1, "%s",
769 mci
->csrows
[row
].channels
[0].label
);
773 for (chan
= 1; (chan
< mci
->csrows
[row
].nr_channels
) && (len
> 0);
775 chars
= snprintf(pos
, len
+ 1, ":%s",
776 mci
->csrows
[row
].channels
[chan
].label
);
781 if (edac_mc_get_log_ue())
782 edac_mc_printk(mci
, KERN_EMERG
,
783 "UE page 0x%lx, offset 0x%lx, grain %d, row %d, "
784 "labels \"%s\": %s\n", page_frame_number
,
785 offset_in_page
, mci
->csrows
[row
].grain
, row
,
788 if (edac_mc_get_panic_on_ue())
789 panic("EDAC MC%d: UE page 0x%lx, offset 0x%lx, grain %d, "
790 "row %d, labels \"%s\": %s\n", mci
->mc_idx
,
791 page_frame_number
, offset_in_page
,
792 mci
->csrows
[row
].grain
, row
, labels
, msg
);
795 mci
->csrows
[row
].ue_count
++;
797 EXPORT_SYMBOL_GPL(edac_mc_handle_ue
);
799 void edac_mc_handle_ue_no_info(struct mem_ctl_info
*mci
, const char *msg
)
801 if (edac_mc_get_panic_on_ue())
802 panic("EDAC MC%d: Uncorrected Error", mci
->mc_idx
);
804 if (edac_mc_get_log_ue())
805 edac_mc_printk(mci
, KERN_WARNING
,
806 "UE - no information available: %s\n", msg
);
807 mci
->ue_noinfo_count
++;
810 EXPORT_SYMBOL_GPL(edac_mc_handle_ue_no_info
);
812 /*************************************************************
813 * On Fully Buffered DIMM modules, this help function is
814 * called to process UE events
816 void edac_mc_handle_fbd_ue(struct mem_ctl_info
*mci
,
818 unsigned int channela
,
819 unsigned int channelb
, char *msg
)
821 int len
= EDAC_MC_LABEL_LEN
* 4;
822 char labels
[len
+ 1];
826 if (csrow
>= mci
->nr_csrows
) {
827 /* something is wrong */
828 edac_mc_printk(mci
, KERN_ERR
,
829 "INTERNAL ERROR: row out of range (%d >= %d)\n",
830 csrow
, mci
->nr_csrows
);
831 edac_mc_handle_ue_no_info(mci
, "INTERNAL ERROR");
835 if (channela
>= mci
->csrows
[csrow
].nr_channels
) {
836 /* something is wrong */
837 edac_mc_printk(mci
, KERN_ERR
,
838 "INTERNAL ERROR: channel-a out of range "
840 channela
, mci
->csrows
[csrow
].nr_channels
);
841 edac_mc_handle_ue_no_info(mci
, "INTERNAL ERROR");
845 if (channelb
>= mci
->csrows
[csrow
].nr_channels
) {
846 /* something is wrong */
847 edac_mc_printk(mci
, KERN_ERR
,
848 "INTERNAL ERROR: channel-b out of range "
850 channelb
, mci
->csrows
[csrow
].nr_channels
);
851 edac_mc_handle_ue_no_info(mci
, "INTERNAL ERROR");
856 mci
->csrows
[csrow
].ue_count
++;
858 /* Generate the DIMM labels from the specified channels */
859 chars
= snprintf(pos
, len
+ 1, "%s",
860 mci
->csrows
[csrow
].channels
[channela
].label
);
863 chars
= snprintf(pos
, len
+ 1, "-%s",
864 mci
->csrows
[csrow
].channels
[channelb
].label
);
866 if (edac_mc_get_log_ue())
867 edac_mc_printk(mci
, KERN_EMERG
,
868 "UE row %d, channel-a= %d channel-b= %d "
869 "labels \"%s\": %s\n", csrow
, channela
, channelb
,
872 if (edac_mc_get_panic_on_ue())
873 panic("UE row %d, channel-a= %d channel-b= %d "
874 "labels \"%s\": %s\n", csrow
, channela
,
875 channelb
, labels
, msg
);
877 EXPORT_SYMBOL(edac_mc_handle_fbd_ue
);
879 /*************************************************************
880 * On Fully Buffered DIMM modules, this help function is
881 * called to process CE events
883 void edac_mc_handle_fbd_ce(struct mem_ctl_info
*mci
,
884 unsigned int csrow
, unsigned int channel
, char *msg
)
887 /* Ensure boundary values */
888 if (csrow
>= mci
->nr_csrows
) {
889 /* something is wrong */
890 edac_mc_printk(mci
, KERN_ERR
,
891 "INTERNAL ERROR: row out of range (%d >= %d)\n",
892 csrow
, mci
->nr_csrows
);
893 edac_mc_handle_ce_no_info(mci
, "INTERNAL ERROR");
896 if (channel
>= mci
->csrows
[csrow
].nr_channels
) {
897 /* something is wrong */
898 edac_mc_printk(mci
, KERN_ERR
,
899 "INTERNAL ERROR: channel out of range (%d >= %d)\n",
900 channel
, mci
->csrows
[csrow
].nr_channels
);
901 edac_mc_handle_ce_no_info(mci
, "INTERNAL ERROR");
905 if (edac_mc_get_log_ce())
906 /* FIXME - put in DIMM location */
907 edac_mc_printk(mci
, KERN_WARNING
,
908 "CE row %d, channel %d, label \"%s\": %s\n",
910 mci
->csrows
[csrow
].channels
[channel
].label
, msg
);
913 mci
->csrows
[csrow
].ce_count
++;
914 mci
->csrows
[csrow
].channels
[channel
].ce_count
++;
916 EXPORT_SYMBOL(edac_mc_handle_fbd_ce
);