i5000_edac: Fix the logic that retrieves memory information
[deliverable/linux.git] / drivers / edac / e752x_edac.c
1 /*
2 * Intel e752x Memory Controller kernel module
3 * (C) 2004 Linux Networx (http://lnxi.com)
4 * This file may be distributed under the terms of the
5 * GNU General Public License.
6 *
7 * See "enum e752x_chips" below for supported chipsets
8 *
9 * Datasheet:
10 * http://www.intel.in/content/www/in/en/chipsets/e7525-memory-controller-hub-datasheet.html
11 *
12 * Written by Tom Zimmerman
13 *
14 * Contributors:
15 * Thayne Harbaugh at realmsys.com (?)
16 * Wang Zhenyu at intel.com
17 * Dave Jiang at mvista.com
18 *
19 * $Id: edac_e752x.c,v 1.5.2.11 2005/10/05 00:43:44 dsp_llnl Exp $
20 *
21 */
22
23 #include <linux/module.h>
24 #include <linux/init.h>
25 #include <linux/pci.h>
26 #include <linux/pci_ids.h>
27 #include <linux/edac.h>
28 #include "edac_core.h"
29
30 #define E752X_REVISION " Ver: 2.0.2"
31 #define EDAC_MOD_STR "e752x_edac"
32
33 static int report_non_memory_errors;
34 static int force_function_unhide;
35 static int sysbus_parity = -1;
36
37 static struct edac_pci_ctl_info *e752x_pci;
38
39 #define e752x_printk(level, fmt, arg...) \
40 edac_printk(level, "e752x", fmt, ##arg)
41
42 #define e752x_mc_printk(mci, level, fmt, arg...) \
43 edac_mc_chipset_printk(mci, level, "e752x", fmt, ##arg)
44
45 #ifndef PCI_DEVICE_ID_INTEL_7520_0
46 #define PCI_DEVICE_ID_INTEL_7520_0 0x3590
47 #endif /* PCI_DEVICE_ID_INTEL_7520_0 */
48
49 #ifndef PCI_DEVICE_ID_INTEL_7520_1_ERR
50 #define PCI_DEVICE_ID_INTEL_7520_1_ERR 0x3591
51 #endif /* PCI_DEVICE_ID_INTEL_7520_1_ERR */
52
53 #ifndef PCI_DEVICE_ID_INTEL_7525_0
54 #define PCI_DEVICE_ID_INTEL_7525_0 0x359E
55 #endif /* PCI_DEVICE_ID_INTEL_7525_0 */
56
57 #ifndef PCI_DEVICE_ID_INTEL_7525_1_ERR
58 #define PCI_DEVICE_ID_INTEL_7525_1_ERR 0x3593
59 #endif /* PCI_DEVICE_ID_INTEL_7525_1_ERR */
60
61 #ifndef PCI_DEVICE_ID_INTEL_7320_0
62 #define PCI_DEVICE_ID_INTEL_7320_0 0x3592
63 #endif /* PCI_DEVICE_ID_INTEL_7320_0 */
64
65 #ifndef PCI_DEVICE_ID_INTEL_7320_1_ERR
66 #define PCI_DEVICE_ID_INTEL_7320_1_ERR 0x3593
67 #endif /* PCI_DEVICE_ID_INTEL_7320_1_ERR */
68
69 #ifndef PCI_DEVICE_ID_INTEL_3100_0
70 #define PCI_DEVICE_ID_INTEL_3100_0 0x35B0
71 #endif /* PCI_DEVICE_ID_INTEL_3100_0 */
72
73 #ifndef PCI_DEVICE_ID_INTEL_3100_1_ERR
74 #define PCI_DEVICE_ID_INTEL_3100_1_ERR 0x35B1
75 #endif /* PCI_DEVICE_ID_INTEL_3100_1_ERR */
76
77 #define E752X_NR_CSROWS 8 /* number of csrows */
78
79 /* E752X register addresses - device 0 function 0 */
80 #define E752X_MCHSCRB 0x52 /* Memory Scrub register (16b) */
81 /*
82 * 6:5 Scrub Completion Count
83 * 3:2 Scrub Rate (i3100 only)
84 * 01=fast 10=normal
85 * 1:0 Scrub Mode enable
86 * 00=off 10=on
87 */
88 #define E752X_DRB 0x60 /* DRAM row boundary register (8b) */
89 #define E752X_DRA 0x70 /* DRAM row attribute register (8b) */
90 /*
91 * 31:30 Device width row 7
92 * 01=x8 10=x4 11=x8 DDR2
93 * 27:26 Device width row 6
94 * 23:22 Device width row 5
95 * 19:20 Device width row 4
96 * 15:14 Device width row 3
97 * 11:10 Device width row 2
98 * 7:6 Device width row 1
99 * 3:2 Device width row 0
100 */
101 #define E752X_DRC 0x7C /* DRAM controller mode reg (32b) */
102 /* FIXME:IS THIS RIGHT? */
103 /*
104 * 22 Number channels 0=1,1=2
105 * 19:18 DRB Granularity 32/64MB
106 */
107 #define E752X_DRM 0x80 /* Dimm mapping register */
108 #define E752X_DDRCSR 0x9A /* DDR control and status reg (16b) */
109 /*
110 * 14:12 1 single A, 2 single B, 3 dual
111 */
112 #define E752X_TOLM 0xC4 /* DRAM top of low memory reg (16b) */
113 #define E752X_REMAPBASE 0xC6 /* DRAM remap base address reg (16b) */
114 #define E752X_REMAPLIMIT 0xC8 /* DRAM remap limit address reg (16b) */
115 #define E752X_REMAPOFFSET 0xCA /* DRAM remap limit offset reg (16b) */
116
117 /* E752X register addresses - device 0 function 1 */
118 #define E752X_FERR_GLOBAL 0x40 /* Global first error register (32b) */
119 #define E752X_NERR_GLOBAL 0x44 /* Global next error register (32b) */
120 #define E752X_HI_FERR 0x50 /* Hub interface first error reg (8b) */
121 #define E752X_HI_NERR 0x52 /* Hub interface next error reg (8b) */
122 #define E752X_HI_ERRMASK 0x54 /* Hub interface error mask reg (8b) */
123 #define E752X_HI_SMICMD 0x5A /* Hub interface SMI command reg (8b) */
124 #define E752X_SYSBUS_FERR 0x60 /* System buss first error reg (16b) */
125 #define E752X_SYSBUS_NERR 0x62 /* System buss next error reg (16b) */
126 #define E752X_SYSBUS_ERRMASK 0x64 /* System buss error mask reg (16b) */
127 #define E752X_SYSBUS_SMICMD 0x6A /* System buss SMI command reg (16b) */
128 #define E752X_BUF_FERR 0x70 /* Memory buffer first error reg (8b) */
129 #define E752X_BUF_NERR 0x72 /* Memory buffer next error reg (8b) */
130 #define E752X_BUF_ERRMASK 0x74 /* Memory buffer error mask reg (8b) */
131 #define E752X_BUF_SMICMD 0x7A /* Memory buffer SMI cmd reg (8b) */
132 #define E752X_DRAM_FERR 0x80 /* DRAM first error register (16b) */
133 #define E752X_DRAM_NERR 0x82 /* DRAM next error register (16b) */
134 #define E752X_DRAM_ERRMASK 0x84 /* DRAM error mask register (8b) */
135 #define E752X_DRAM_SMICMD 0x8A /* DRAM SMI command register (8b) */
136 #define E752X_DRAM_RETR_ADD 0xAC /* DRAM Retry address register (32b) */
137 #define E752X_DRAM_SEC1_ADD 0xA0 /* DRAM first correctable memory */
138 /* error address register (32b) */
139 /*
140 * 31 Reserved
141 * 30:2 CE address (64 byte block 34:6
142 * 1 Reserved
143 * 0 HiLoCS
144 */
145 #define E752X_DRAM_SEC2_ADD 0xC8 /* DRAM first correctable memory */
146 /* error address register (32b) */
147 /*
148 * 31 Reserved
149 * 30:2 CE address (64 byte block 34:6)
150 * 1 Reserved
151 * 0 HiLoCS
152 */
153 #define E752X_DRAM_DED_ADD 0xA4 /* DRAM first uncorrectable memory */
154 /* error address register (32b) */
155 /*
156 * 31 Reserved
157 * 30:2 CE address (64 byte block 34:6)
158 * 1 Reserved
159 * 0 HiLoCS
160 */
161 #define E752X_DRAM_SCRB_ADD 0xA8 /* DRAM 1st uncorrectable scrub mem */
162 /* error address register (32b) */
163 /*
164 * 31 Reserved
165 * 30:2 CE address (64 byte block 34:6
166 * 1 Reserved
167 * 0 HiLoCS
168 */
169 #define E752X_DRAM_SEC1_SYNDROME 0xC4 /* DRAM first correctable memory */
170 /* error syndrome register (16b) */
171 #define E752X_DRAM_SEC2_SYNDROME 0xC6 /* DRAM second correctable memory */
172 /* error syndrome register (16b) */
173 #define E752X_DEVPRES1 0xF4 /* Device Present 1 register (8b) */
174
175 /* 3100 IMCH specific register addresses - device 0 function 1 */
176 #define I3100_NSI_FERR 0x48 /* NSI first error reg (32b) */
177 #define I3100_NSI_NERR 0x4C /* NSI next error reg (32b) */
178 #define I3100_NSI_SMICMD 0x54 /* NSI SMI command register (32b) */
179 #define I3100_NSI_EMASK 0x90 /* NSI error mask register (32b) */
180
181 /* ICH5R register addresses - device 30 function 0 */
182 #define ICH5R_PCI_STAT 0x06 /* PCI status register (16b) */
183 #define ICH5R_PCI_2ND_STAT 0x1E /* PCI status secondary reg (16b) */
184 #define ICH5R_PCI_BRIDGE_CTL 0x3E /* PCI bridge control register (16b) */
185
186 enum e752x_chips {
187 E7520 = 0,
188 E7525 = 1,
189 E7320 = 2,
190 I3100 = 3
191 };
192
193 struct e752x_pvt {
194 struct pci_dev *bridge_ck;
195 struct pci_dev *dev_d0f0;
196 struct pci_dev *dev_d0f1;
197 u32 tolm;
198 u32 remapbase;
199 u32 remaplimit;
200 int mc_symmetric;
201 u8 map[8];
202 int map_type;
203 const struct e752x_dev_info *dev_info;
204 };
205
206 struct e752x_dev_info {
207 u16 err_dev;
208 u16 ctl_dev;
209 const char *ctl_name;
210 };
211
212 struct e752x_error_info {
213 u32 ferr_global;
214 u32 nerr_global;
215 u32 nsi_ferr; /* 3100 only */
216 u32 nsi_nerr; /* 3100 only */
217 u8 hi_ferr; /* all but 3100 */
218 u8 hi_nerr; /* all but 3100 */
219 u16 sysbus_ferr;
220 u16 sysbus_nerr;
221 u8 buf_ferr;
222 u8 buf_nerr;
223 u16 dram_ferr;
224 u16 dram_nerr;
225 u32 dram_sec1_add;
226 u32 dram_sec2_add;
227 u16 dram_sec1_syndrome;
228 u16 dram_sec2_syndrome;
229 u32 dram_ded_add;
230 u32 dram_scrb_add;
231 u32 dram_retr_add;
232 };
233
234 static const struct e752x_dev_info e752x_devs[] = {
235 [E7520] = {
236 .err_dev = PCI_DEVICE_ID_INTEL_7520_1_ERR,
237 .ctl_dev = PCI_DEVICE_ID_INTEL_7520_0,
238 .ctl_name = "E7520"},
239 [E7525] = {
240 .err_dev = PCI_DEVICE_ID_INTEL_7525_1_ERR,
241 .ctl_dev = PCI_DEVICE_ID_INTEL_7525_0,
242 .ctl_name = "E7525"},
243 [E7320] = {
244 .err_dev = PCI_DEVICE_ID_INTEL_7320_1_ERR,
245 .ctl_dev = PCI_DEVICE_ID_INTEL_7320_0,
246 .ctl_name = "E7320"},
247 [I3100] = {
248 .err_dev = PCI_DEVICE_ID_INTEL_3100_1_ERR,
249 .ctl_dev = PCI_DEVICE_ID_INTEL_3100_0,
250 .ctl_name = "3100"},
251 };
252
253 /* Valid scrub rates for the e752x/3100 hardware memory scrubber. We
254 * map the scrubbing bandwidth to a hardware register value. The 'set'
255 * operation finds the 'matching or higher value'. Note that scrubbing
256 * on the e752x can only be enabled/disabled. The 3100 supports
257 * a normal and fast mode.
258 */
259
260 #define SDRATE_EOT 0xFFFFFFFF
261
262 struct scrubrate {
263 u32 bandwidth; /* bandwidth consumed by scrubbing in bytes/sec */
264 u16 scrubval; /* register value for scrub rate */
265 };
266
267 /* Rate below assumes same performance as i3100 using PC3200 DDR2 in
268 * normal mode. e752x bridges don't support choosing normal or fast mode,
269 * so the scrubbing bandwidth value isn't all that important - scrubbing is
270 * either on or off.
271 */
272 static const struct scrubrate scrubrates_e752x[] = {
273 {0, 0x00}, /* Scrubbing Off */
274 {500000, 0x02}, /* Scrubbing On */
275 {SDRATE_EOT, 0x00} /* End of Table */
276 };
277
278 /* Fast mode: 2 GByte PC3200 DDR2 scrubbed in 33s = 63161283 bytes/s
279 * Normal mode: 125 (32000 / 256) times slower than fast mode.
280 */
281 static const struct scrubrate scrubrates_i3100[] = {
282 {0, 0x00}, /* Scrubbing Off */
283 {500000, 0x0a}, /* Normal mode - 32k clocks */
284 {62500000, 0x06}, /* Fast mode - 256 clocks */
285 {SDRATE_EOT, 0x00} /* End of Table */
286 };
287
288 static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci,
289 unsigned long page)
290 {
291 u32 remap;
292 struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
293
294 debugf3("%s()\n", __func__);
295
296 if (page < pvt->tolm)
297 return page;
298
299 if ((page >= 0x100000) && (page < pvt->remapbase))
300 return page;
301
302 remap = (page - pvt->tolm) + pvt->remapbase;
303
304 if (remap < pvt->remaplimit)
305 return remap;
306
307 e752x_printk(KERN_ERR, "Invalid page %lx - out of range\n", page);
308 return pvt->tolm - 1;
309 }
310
311 static void do_process_ce(struct mem_ctl_info *mci, u16 error_one,
312 u32 sec1_add, u16 sec1_syndrome)
313 {
314 u32 page;
315 int row;
316 int channel;
317 int i;
318 struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
319
320 debugf3("%s()\n", __func__);
321
322 /* convert the addr to 4k page */
323 page = sec1_add >> (PAGE_SHIFT - 4);
324
325 /* FIXME - check for -1 */
326 if (pvt->mc_symmetric) {
327 /* chip select are bits 14 & 13 */
328 row = ((page >> 1) & 3);
329 e752x_printk(KERN_WARNING,
330 "Test row %d Table %d %d %d %d %d %d %d %d\n", row,
331 pvt->map[0], pvt->map[1], pvt->map[2], pvt->map[3],
332 pvt->map[4], pvt->map[5], pvt->map[6],
333 pvt->map[7]);
334
335 /* test for channel remapping */
336 for (i = 0; i < 8; i++) {
337 if (pvt->map[i] == row)
338 break;
339 }
340
341 e752x_printk(KERN_WARNING, "Test computed row %d\n", i);
342
343 if (i < 8)
344 row = i;
345 else
346 e752x_mc_printk(mci, KERN_WARNING,
347 "row %d not found in remap table\n",
348 row);
349 } else
350 row = edac_mc_find_csrow_by_page(mci, page);
351
352 /* 0 = channel A, 1 = channel B */
353 channel = !(error_one & 1);
354
355 /* e752x mc reads 34:6 of the DRAM linear address */
356 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
357 page, offset_in_page(sec1_add << 4), sec1_syndrome,
358 row, channel, -1,
359 "e752x CE", "", NULL);
360 }
361
362 static inline void process_ce(struct mem_ctl_info *mci, u16 error_one,
363 u32 sec1_add, u16 sec1_syndrome, int *error_found,
364 int handle_error)
365 {
366 *error_found = 1;
367
368 if (handle_error)
369 do_process_ce(mci, error_one, sec1_add, sec1_syndrome);
370 }
371
372 static void do_process_ue(struct mem_ctl_info *mci, u16 error_one,
373 u32 ded_add, u32 scrb_add)
374 {
375 u32 error_2b, block_page;
376 int row;
377 struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
378
379 debugf3("%s()\n", __func__);
380
381 if (error_one & 0x0202) {
382 error_2b = ded_add;
383
384 /* convert to 4k address */
385 block_page = error_2b >> (PAGE_SHIFT - 4);
386
387 row = pvt->mc_symmetric ?
388 /* chip select are bits 14 & 13 */
389 ((block_page >> 1) & 3) :
390 edac_mc_find_csrow_by_page(mci, block_page);
391
392 /* e752x mc reads 34:6 of the DRAM linear address */
393 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
394 block_page,
395 offset_in_page(error_2b << 4), 0,
396 row, -1, -1,
397 "e752x UE from Read", "", NULL);
398
399 }
400 if (error_one & 0x0404) {
401 error_2b = scrb_add;
402
403 /* convert to 4k address */
404 block_page = error_2b >> (PAGE_SHIFT - 4);
405
406 row = pvt->mc_symmetric ?
407 /* chip select are bits 14 & 13 */
408 ((block_page >> 1) & 3) :
409 edac_mc_find_csrow_by_page(mci, block_page);
410
411 /* e752x mc reads 34:6 of the DRAM linear address */
412 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
413 block_page,
414 offset_in_page(error_2b << 4), 0,
415 row, -1, -1,
416 "e752x UE from Scruber", "", NULL);
417 }
418 }
419
420 static inline void process_ue(struct mem_ctl_info *mci, u16 error_one,
421 u32 ded_add, u32 scrb_add, int *error_found,
422 int handle_error)
423 {
424 *error_found = 1;
425
426 if (handle_error)
427 do_process_ue(mci, error_one, ded_add, scrb_add);
428 }
429
430 static inline void process_ue_no_info_wr(struct mem_ctl_info *mci,
431 int *error_found, int handle_error)
432 {
433 *error_found = 1;
434
435 if (!handle_error)
436 return;
437
438 debugf3("%s()\n", __func__);
439 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
440 -1, -1, -1,
441 "e752x UE log memory write", "", NULL);
442 }
443
444 static void do_process_ded_retry(struct mem_ctl_info *mci, u16 error,
445 u32 retry_add)
446 {
447 u32 error_1b, page;
448 int row;
449 struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
450
451 error_1b = retry_add;
452 page = error_1b >> (PAGE_SHIFT - 4); /* convert the addr to 4k page */
453
454 /* chip select are bits 14 & 13 */
455 row = pvt->mc_symmetric ? ((page >> 1) & 3) :
456 edac_mc_find_csrow_by_page(mci, page);
457
458 e752x_mc_printk(mci, KERN_WARNING,
459 "CE page 0x%lx, row %d : Memory read retry\n",
460 (long unsigned int)page, row);
461 }
462
463 static inline void process_ded_retry(struct mem_ctl_info *mci, u16 error,
464 u32 retry_add, int *error_found,
465 int handle_error)
466 {
467 *error_found = 1;
468
469 if (handle_error)
470 do_process_ded_retry(mci, error, retry_add);
471 }
472
473 static inline void process_threshold_ce(struct mem_ctl_info *mci, u16 error,
474 int *error_found, int handle_error)
475 {
476 *error_found = 1;
477
478 if (handle_error)
479 e752x_mc_printk(mci, KERN_WARNING, "Memory threshold CE\n");
480 }
481
482 static char *global_message[11] = {
483 "PCI Express C1",
484 "PCI Express C",
485 "PCI Express B1",
486 "PCI Express B",
487 "PCI Express A1",
488 "PCI Express A",
489 "DMA Controller",
490 "HUB or NS Interface",
491 "System Bus",
492 "DRAM Controller", /* 9th entry */
493 "Internal Buffer"
494 };
495
496 #define DRAM_ENTRY 9
497
498 static char *fatal_message[2] = { "Non-Fatal ", "Fatal " };
499
500 static void do_global_error(int fatal, u32 errors)
501 {
502 int i;
503
504 for (i = 0; i < 11; i++) {
505 if (errors & (1 << i)) {
506 /* If the error is from DRAM Controller OR
507 * we are to report ALL errors, then
508 * report the error
509 */
510 if ((i == DRAM_ENTRY) || report_non_memory_errors)
511 e752x_printk(KERN_WARNING, "%sError %s\n",
512 fatal_message[fatal],
513 global_message[i]);
514 }
515 }
516 }
517
518 static inline void global_error(int fatal, u32 errors, int *error_found,
519 int handle_error)
520 {
521 *error_found = 1;
522
523 if (handle_error)
524 do_global_error(fatal, errors);
525 }
526
527 static char *hub_message[7] = {
528 "HI Address or Command Parity", "HI Illegal Access",
529 "HI Internal Parity", "Out of Range Access",
530 "HI Data Parity", "Enhanced Config Access",
531 "Hub Interface Target Abort"
532 };
533
534 static void do_hub_error(int fatal, u8 errors)
535 {
536 int i;
537
538 for (i = 0; i < 7; i++) {
539 if (errors & (1 << i))
540 e752x_printk(KERN_WARNING, "%sError %s\n",
541 fatal_message[fatal], hub_message[i]);
542 }
543 }
544
545 static inline void hub_error(int fatal, u8 errors, int *error_found,
546 int handle_error)
547 {
548 *error_found = 1;
549
550 if (handle_error)
551 do_hub_error(fatal, errors);
552 }
553
554 #define NSI_FATAL_MASK 0x0c080081
555 #define NSI_NON_FATAL_MASK 0x23a0ba64
556 #define NSI_ERR_MASK (NSI_FATAL_MASK | NSI_NON_FATAL_MASK)
557
558 static char *nsi_message[30] = {
559 "NSI Link Down", /* NSI_FERR/NSI_NERR bit 0, fatal error */
560 "", /* reserved */
561 "NSI Parity Error", /* bit 2, non-fatal */
562 "", /* reserved */
563 "", /* reserved */
564 "Correctable Error Message", /* bit 5, non-fatal */
565 "Non-Fatal Error Message", /* bit 6, non-fatal */
566 "Fatal Error Message", /* bit 7, fatal */
567 "", /* reserved */
568 "Receiver Error", /* bit 9, non-fatal */
569 "", /* reserved */
570 "Bad TLP", /* bit 11, non-fatal */
571 "Bad DLLP", /* bit 12, non-fatal */
572 "REPLAY_NUM Rollover", /* bit 13, non-fatal */
573 "", /* reserved */
574 "Replay Timer Timeout", /* bit 15, non-fatal */
575 "", /* reserved */
576 "", /* reserved */
577 "", /* reserved */
578 "Data Link Protocol Error", /* bit 19, fatal */
579 "", /* reserved */
580 "Poisoned TLP", /* bit 21, non-fatal */
581 "", /* reserved */
582 "Completion Timeout", /* bit 23, non-fatal */
583 "Completer Abort", /* bit 24, non-fatal */
584 "Unexpected Completion", /* bit 25, non-fatal */
585 "Receiver Overflow", /* bit 26, fatal */
586 "Malformed TLP", /* bit 27, fatal */
587 "", /* reserved */
588 "Unsupported Request" /* bit 29, non-fatal */
589 };
590
591 static void do_nsi_error(int fatal, u32 errors)
592 {
593 int i;
594
595 for (i = 0; i < 30; i++) {
596 if (errors & (1 << i))
597 printk(KERN_WARNING "%sError %s\n",
598 fatal_message[fatal], nsi_message[i]);
599 }
600 }
601
602 static inline void nsi_error(int fatal, u32 errors, int *error_found,
603 int handle_error)
604 {
605 *error_found = 1;
606
607 if (handle_error)
608 do_nsi_error(fatal, errors);
609 }
610
611 static char *membuf_message[4] = {
612 "Internal PMWB to DRAM parity",
613 "Internal PMWB to System Bus Parity",
614 "Internal System Bus or IO to PMWB Parity",
615 "Internal DRAM to PMWB Parity"
616 };
617
618 static void do_membuf_error(u8 errors)
619 {
620 int i;
621
622 for (i = 0; i < 4; i++) {
623 if (errors & (1 << i))
624 e752x_printk(KERN_WARNING, "Non-Fatal Error %s\n",
625 membuf_message[i]);
626 }
627 }
628
629 static inline void membuf_error(u8 errors, int *error_found, int handle_error)
630 {
631 *error_found = 1;
632
633 if (handle_error)
634 do_membuf_error(errors);
635 }
636
637 static char *sysbus_message[10] = {
638 "Addr or Request Parity",
639 "Data Strobe Glitch",
640 "Addr Strobe Glitch",
641 "Data Parity",
642 "Addr Above TOM",
643 "Non DRAM Lock Error",
644 "MCERR", "BINIT",
645 "Memory Parity",
646 "IO Subsystem Parity"
647 };
648
649 static void do_sysbus_error(int fatal, u32 errors)
650 {
651 int i;
652
653 for (i = 0; i < 10; i++) {
654 if (errors & (1 << i))
655 e752x_printk(KERN_WARNING, "%sError System Bus %s\n",
656 fatal_message[fatal], sysbus_message[i]);
657 }
658 }
659
660 static inline void sysbus_error(int fatal, u32 errors, int *error_found,
661 int handle_error)
662 {
663 *error_found = 1;
664
665 if (handle_error)
666 do_sysbus_error(fatal, errors);
667 }
668
669 static void e752x_check_hub_interface(struct e752x_error_info *info,
670 int *error_found, int handle_error)
671 {
672 u8 stat8;
673
674 //pci_read_config_byte(dev,E752X_HI_FERR,&stat8);
675
676 stat8 = info->hi_ferr;
677
678 if (stat8 & 0x7f) { /* Error, so process */
679 stat8 &= 0x7f;
680
681 if (stat8 & 0x2b)
682 hub_error(1, stat8 & 0x2b, error_found, handle_error);
683
684 if (stat8 & 0x54)
685 hub_error(0, stat8 & 0x54, error_found, handle_error);
686 }
687 //pci_read_config_byte(dev,E752X_HI_NERR,&stat8);
688
689 stat8 = info->hi_nerr;
690
691 if (stat8 & 0x7f) { /* Error, so process */
692 stat8 &= 0x7f;
693
694 if (stat8 & 0x2b)
695 hub_error(1, stat8 & 0x2b, error_found, handle_error);
696
697 if (stat8 & 0x54)
698 hub_error(0, stat8 & 0x54, error_found, handle_error);
699 }
700 }
701
702 static void e752x_check_ns_interface(struct e752x_error_info *info,
703 int *error_found, int handle_error)
704 {
705 u32 stat32;
706
707 stat32 = info->nsi_ferr;
708 if (stat32 & NSI_ERR_MASK) { /* Error, so process */
709 if (stat32 & NSI_FATAL_MASK) /* check for fatal errors */
710 nsi_error(1, stat32 & NSI_FATAL_MASK, error_found,
711 handle_error);
712 if (stat32 & NSI_NON_FATAL_MASK) /* check for non-fatal ones */
713 nsi_error(0, stat32 & NSI_NON_FATAL_MASK, error_found,
714 handle_error);
715 }
716 stat32 = info->nsi_nerr;
717 if (stat32 & NSI_ERR_MASK) {
718 if (stat32 & NSI_FATAL_MASK)
719 nsi_error(1, stat32 & NSI_FATAL_MASK, error_found,
720 handle_error);
721 if (stat32 & NSI_NON_FATAL_MASK)
722 nsi_error(0, stat32 & NSI_NON_FATAL_MASK, error_found,
723 handle_error);
724 }
725 }
726
727 static void e752x_check_sysbus(struct e752x_error_info *info,
728 int *error_found, int handle_error)
729 {
730 u32 stat32, error32;
731
732 //pci_read_config_dword(dev,E752X_SYSBUS_FERR,&stat32);
733 stat32 = info->sysbus_ferr + (info->sysbus_nerr << 16);
734
735 if (stat32 == 0)
736 return; /* no errors */
737
738 error32 = (stat32 >> 16) & 0x3ff;
739 stat32 = stat32 & 0x3ff;
740
741 if (stat32 & 0x087)
742 sysbus_error(1, stat32 & 0x087, error_found, handle_error);
743
744 if (stat32 & 0x378)
745 sysbus_error(0, stat32 & 0x378, error_found, handle_error);
746
747 if (error32 & 0x087)
748 sysbus_error(1, error32 & 0x087, error_found, handle_error);
749
750 if (error32 & 0x378)
751 sysbus_error(0, error32 & 0x378, error_found, handle_error);
752 }
753
754 static void e752x_check_membuf(struct e752x_error_info *info,
755 int *error_found, int handle_error)
756 {
757 u8 stat8;
758
759 stat8 = info->buf_ferr;
760
761 if (stat8 & 0x0f) { /* Error, so process */
762 stat8 &= 0x0f;
763 membuf_error(stat8, error_found, handle_error);
764 }
765
766 stat8 = info->buf_nerr;
767
768 if (stat8 & 0x0f) { /* Error, so process */
769 stat8 &= 0x0f;
770 membuf_error(stat8, error_found, handle_error);
771 }
772 }
773
774 static void e752x_check_dram(struct mem_ctl_info *mci,
775 struct e752x_error_info *info, int *error_found,
776 int handle_error)
777 {
778 u16 error_one, error_next;
779
780 error_one = info->dram_ferr;
781 error_next = info->dram_nerr;
782
783 /* decode and report errors */
784 if (error_one & 0x0101) /* check first error correctable */
785 process_ce(mci, error_one, info->dram_sec1_add,
786 info->dram_sec1_syndrome, error_found, handle_error);
787
788 if (error_next & 0x0101) /* check next error correctable */
789 process_ce(mci, error_next, info->dram_sec2_add,
790 info->dram_sec2_syndrome, error_found, handle_error);
791
792 if (error_one & 0x4040)
793 process_ue_no_info_wr(mci, error_found, handle_error);
794
795 if (error_next & 0x4040)
796 process_ue_no_info_wr(mci, error_found, handle_error);
797
798 if (error_one & 0x2020)
799 process_ded_retry(mci, error_one, info->dram_retr_add,
800 error_found, handle_error);
801
802 if (error_next & 0x2020)
803 process_ded_retry(mci, error_next, info->dram_retr_add,
804 error_found, handle_error);
805
806 if (error_one & 0x0808)
807 process_threshold_ce(mci, error_one, error_found, handle_error);
808
809 if (error_next & 0x0808)
810 process_threshold_ce(mci, error_next, error_found,
811 handle_error);
812
813 if (error_one & 0x0606)
814 process_ue(mci, error_one, info->dram_ded_add,
815 info->dram_scrb_add, error_found, handle_error);
816
817 if (error_next & 0x0606)
818 process_ue(mci, error_next, info->dram_ded_add,
819 info->dram_scrb_add, error_found, handle_error);
820 }
821
822 static void e752x_get_error_info(struct mem_ctl_info *mci,
823 struct e752x_error_info *info)
824 {
825 struct pci_dev *dev;
826 struct e752x_pvt *pvt;
827
828 memset(info, 0, sizeof(*info));
829 pvt = (struct e752x_pvt *)mci->pvt_info;
830 dev = pvt->dev_d0f1;
831 pci_read_config_dword(dev, E752X_FERR_GLOBAL, &info->ferr_global);
832
833 if (info->ferr_global) {
834 if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) {
835 pci_read_config_dword(dev, I3100_NSI_FERR,
836 &info->nsi_ferr);
837 info->hi_ferr = 0;
838 } else {
839 pci_read_config_byte(dev, E752X_HI_FERR,
840 &info->hi_ferr);
841 info->nsi_ferr = 0;
842 }
843 pci_read_config_word(dev, E752X_SYSBUS_FERR,
844 &info->sysbus_ferr);
845 pci_read_config_byte(dev, E752X_BUF_FERR, &info->buf_ferr);
846 pci_read_config_word(dev, E752X_DRAM_FERR, &info->dram_ferr);
847 pci_read_config_dword(dev, E752X_DRAM_SEC1_ADD,
848 &info->dram_sec1_add);
849 pci_read_config_word(dev, E752X_DRAM_SEC1_SYNDROME,
850 &info->dram_sec1_syndrome);
851 pci_read_config_dword(dev, E752X_DRAM_DED_ADD,
852 &info->dram_ded_add);
853 pci_read_config_dword(dev, E752X_DRAM_SCRB_ADD,
854 &info->dram_scrb_add);
855 pci_read_config_dword(dev, E752X_DRAM_RETR_ADD,
856 &info->dram_retr_add);
857
858 /* ignore the reserved bits just in case */
859 if (info->hi_ferr & 0x7f)
860 pci_write_config_byte(dev, E752X_HI_FERR,
861 info->hi_ferr);
862
863 if (info->nsi_ferr & NSI_ERR_MASK)
864 pci_write_config_dword(dev, I3100_NSI_FERR,
865 info->nsi_ferr);
866
867 if (info->sysbus_ferr)
868 pci_write_config_word(dev, E752X_SYSBUS_FERR,
869 info->sysbus_ferr);
870
871 if (info->buf_ferr & 0x0f)
872 pci_write_config_byte(dev, E752X_BUF_FERR,
873 info->buf_ferr);
874
875 if (info->dram_ferr)
876 pci_write_bits16(pvt->bridge_ck, E752X_DRAM_FERR,
877 info->dram_ferr, info->dram_ferr);
878
879 pci_write_config_dword(dev, E752X_FERR_GLOBAL,
880 info->ferr_global);
881 }
882
883 pci_read_config_dword(dev, E752X_NERR_GLOBAL, &info->nerr_global);
884
885 if (info->nerr_global) {
886 if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) {
887 pci_read_config_dword(dev, I3100_NSI_NERR,
888 &info->nsi_nerr);
889 info->hi_nerr = 0;
890 } else {
891 pci_read_config_byte(dev, E752X_HI_NERR,
892 &info->hi_nerr);
893 info->nsi_nerr = 0;
894 }
895 pci_read_config_word(dev, E752X_SYSBUS_NERR,
896 &info->sysbus_nerr);
897 pci_read_config_byte(dev, E752X_BUF_NERR, &info->buf_nerr);
898 pci_read_config_word(dev, E752X_DRAM_NERR, &info->dram_nerr);
899 pci_read_config_dword(dev, E752X_DRAM_SEC2_ADD,
900 &info->dram_sec2_add);
901 pci_read_config_word(dev, E752X_DRAM_SEC2_SYNDROME,
902 &info->dram_sec2_syndrome);
903
904 if (info->hi_nerr & 0x7f)
905 pci_write_config_byte(dev, E752X_HI_NERR,
906 info->hi_nerr);
907
908 if (info->nsi_nerr & NSI_ERR_MASK)
909 pci_write_config_dword(dev, I3100_NSI_NERR,
910 info->nsi_nerr);
911
912 if (info->sysbus_nerr)
913 pci_write_config_word(dev, E752X_SYSBUS_NERR,
914 info->sysbus_nerr);
915
916 if (info->buf_nerr & 0x0f)
917 pci_write_config_byte(dev, E752X_BUF_NERR,
918 info->buf_nerr);
919
920 if (info->dram_nerr)
921 pci_write_bits16(pvt->bridge_ck, E752X_DRAM_NERR,
922 info->dram_nerr, info->dram_nerr);
923
924 pci_write_config_dword(dev, E752X_NERR_GLOBAL,
925 info->nerr_global);
926 }
927 }
928
929 static int e752x_process_error_info(struct mem_ctl_info *mci,
930 struct e752x_error_info *info,
931 int handle_errors)
932 {
933 u32 error32, stat32;
934 int error_found;
935
936 error_found = 0;
937 error32 = (info->ferr_global >> 18) & 0x3ff;
938 stat32 = (info->ferr_global >> 4) & 0x7ff;
939
940 if (error32)
941 global_error(1, error32, &error_found, handle_errors);
942
943 if (stat32)
944 global_error(0, stat32, &error_found, handle_errors);
945
946 error32 = (info->nerr_global >> 18) & 0x3ff;
947 stat32 = (info->nerr_global >> 4) & 0x7ff;
948
949 if (error32)
950 global_error(1, error32, &error_found, handle_errors);
951
952 if (stat32)
953 global_error(0, stat32, &error_found, handle_errors);
954
955 e752x_check_hub_interface(info, &error_found, handle_errors);
956 e752x_check_ns_interface(info, &error_found, handle_errors);
957 e752x_check_sysbus(info, &error_found, handle_errors);
958 e752x_check_membuf(info, &error_found, handle_errors);
959 e752x_check_dram(mci, info, &error_found, handle_errors);
960 return error_found;
961 }
962
963 static void e752x_check(struct mem_ctl_info *mci)
964 {
965 struct e752x_error_info info;
966
967 debugf3("%s()\n", __func__);
968 e752x_get_error_info(mci, &info);
969 e752x_process_error_info(mci, &info, 1);
970 }
971
972 /* Program byte/sec bandwidth scrub rate to hardware */
973 static int set_sdram_scrub_rate(struct mem_ctl_info *mci, u32 new_bw)
974 {
975 const struct scrubrate *scrubrates;
976 struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
977 struct pci_dev *pdev = pvt->dev_d0f0;
978 int i;
979
980 if (pvt->dev_info->ctl_dev == PCI_DEVICE_ID_INTEL_3100_0)
981 scrubrates = scrubrates_i3100;
982 else
983 scrubrates = scrubrates_e752x;
984
985 /* Translate the desired scrub rate to a e752x/3100 register value.
986 * Search for the bandwidth that is equal or greater than the
987 * desired rate and program the cooresponding register value.
988 */
989 for (i = 0; scrubrates[i].bandwidth != SDRATE_EOT; i++)
990 if (scrubrates[i].bandwidth >= new_bw)
991 break;
992
993 if (scrubrates[i].bandwidth == SDRATE_EOT)
994 return -1;
995
996 pci_write_config_word(pdev, E752X_MCHSCRB, scrubrates[i].scrubval);
997
998 return scrubrates[i].bandwidth;
999 }
1000
1001 /* Convert current scrub rate value into byte/sec bandwidth */
1002 static int get_sdram_scrub_rate(struct mem_ctl_info *mci)
1003 {
1004 const struct scrubrate *scrubrates;
1005 struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
1006 struct pci_dev *pdev = pvt->dev_d0f0;
1007 u16 scrubval;
1008 int i;
1009
1010 if (pvt->dev_info->ctl_dev == PCI_DEVICE_ID_INTEL_3100_0)
1011 scrubrates = scrubrates_i3100;
1012 else
1013 scrubrates = scrubrates_e752x;
1014
1015 /* Find the bandwidth matching the memory scrubber configuration */
1016 pci_read_config_word(pdev, E752X_MCHSCRB, &scrubval);
1017 scrubval = scrubval & 0x0f;
1018
1019 for (i = 0; scrubrates[i].bandwidth != SDRATE_EOT; i++)
1020 if (scrubrates[i].scrubval == scrubval)
1021 break;
1022
1023 if (scrubrates[i].bandwidth == SDRATE_EOT) {
1024 e752x_printk(KERN_WARNING,
1025 "Invalid sdram scrub control value: 0x%x\n", scrubval);
1026 return -1;
1027 }
1028 return scrubrates[i].bandwidth;
1029
1030 }
1031
1032 /* Return 1 if dual channel mode is active. Else return 0. */
1033 static inline int dual_channel_active(u16 ddrcsr)
1034 {
1035 return (((ddrcsr >> 12) & 3) == 3);
1036 }
1037
1038 /* Remap csrow index numbers if map_type is "reverse"
1039 */
1040 static inline int remap_csrow_index(struct mem_ctl_info *mci, int index)
1041 {
1042 struct e752x_pvt *pvt = mci->pvt_info;
1043
1044 if (!pvt->map_type)
1045 return (7 - index);
1046
1047 return (index);
1048 }
1049
1050 static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
1051 u16 ddrcsr)
1052 {
1053 struct csrow_info *csrow;
1054 unsigned long last_cumul_size;
1055 int index, mem_dev, drc_chan;
1056 int drc_drbg; /* DRB granularity 0=64mb, 1=128mb */
1057 int drc_ddim; /* DRAM Data Integrity Mode 0=none, 2=edac */
1058 u8 value;
1059 u32 dra, drc, cumul_size, i, nr_pages;
1060
1061 dra = 0;
1062 for (index = 0; index < 4; index++) {
1063 u8 dra_reg;
1064 pci_read_config_byte(pdev, E752X_DRA + index, &dra_reg);
1065 dra |= dra_reg << (index * 8);
1066 }
1067 pci_read_config_dword(pdev, E752X_DRC, &drc);
1068 drc_chan = dual_channel_active(ddrcsr) ? 1 : 0;
1069 drc_drbg = drc_chan + 1; /* 128 in dual mode, 64 in single */
1070 drc_ddim = (drc >> 20) & 0x3;
1071
1072 /* The dram row boundary (DRB) reg values are boundary address for
1073 * each DRAM row with a granularity of 64 or 128MB (single/dual
1074 * channel operation). DRB regs are cumulative; therefore DRB7 will
1075 * contain the total memory contained in all eight rows.
1076 */
1077 for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) {
1078 /* mem_dev 0=x8, 1=x4 */
1079 mem_dev = (dra >> (index * 4 + 2)) & 0x3;
1080 csrow = &mci->csrows[remap_csrow_index(mci, index)];
1081
1082 mem_dev = (mem_dev == 2);
1083 pci_read_config_byte(pdev, E752X_DRB + index, &value);
1084 /* convert a 128 or 64 MiB DRB to a page size. */
1085 cumul_size = value << (25 + drc_drbg - PAGE_SHIFT);
1086 debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
1087 cumul_size);
1088 if (cumul_size == last_cumul_size)
1089 continue; /* not populated */
1090
1091 csrow->first_page = last_cumul_size;
1092 csrow->last_page = cumul_size - 1;
1093 nr_pages = cumul_size - last_cumul_size;
1094 last_cumul_size = cumul_size;
1095
1096 for (i = 0; i < csrow->nr_channels; i++) {
1097 struct dimm_info *dimm = csrow->channels[i].dimm;
1098
1099 debugf3("Initializing rank at (%i,%i)\n", index, i);
1100 dimm->nr_pages = nr_pages / csrow->nr_channels;
1101 dimm->grain = 1 << 12; /* 4KiB - resolution of CELOG */
1102 dimm->mtype = MEM_RDDR; /* only one type supported */
1103 dimm->dtype = mem_dev ? DEV_X4 : DEV_X8;
1104
1105 /*
1106 * if single channel or x8 devices then SECDED
1107 * if dual channel and x4 then S4ECD4ED
1108 */
1109 if (drc_ddim) {
1110 if (drc_chan && mem_dev) {
1111 dimm->edac_mode = EDAC_S4ECD4ED;
1112 mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
1113 } else {
1114 dimm->edac_mode = EDAC_SECDED;
1115 mci->edac_cap |= EDAC_FLAG_SECDED;
1116 }
1117 } else
1118 dimm->edac_mode = EDAC_NONE;
1119 }
1120 }
1121 }
1122
1123 static void e752x_init_mem_map_table(struct pci_dev *pdev,
1124 struct e752x_pvt *pvt)
1125 {
1126 int index;
1127 u8 value, last, row;
1128
1129 last = 0;
1130 row = 0;
1131
1132 for (index = 0; index < 8; index += 2) {
1133 pci_read_config_byte(pdev, E752X_DRB + index, &value);
1134 /* test if there is a dimm in this slot */
1135 if (value == last) {
1136 /* no dimm in the slot, so flag it as empty */
1137 pvt->map[index] = 0xff;
1138 pvt->map[index + 1] = 0xff;
1139 } else { /* there is a dimm in the slot */
1140 pvt->map[index] = row;
1141 row++;
1142 last = value;
1143 /* test the next value to see if the dimm is double
1144 * sided
1145 */
1146 pci_read_config_byte(pdev, E752X_DRB + index + 1,
1147 &value);
1148
1149 /* the dimm is single sided, so flag as empty */
1150 /* this is a double sided dimm to save the next row #*/
1151 pvt->map[index + 1] = (value == last) ? 0xff : row;
1152 row++;
1153 last = value;
1154 }
1155 }
1156 }
1157
1158 /* Return 0 on success or 1 on failure. */
1159 static int e752x_get_devs(struct pci_dev *pdev, int dev_idx,
1160 struct e752x_pvt *pvt)
1161 {
1162 struct pci_dev *dev;
1163
1164 pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
1165 pvt->dev_info->err_dev, pvt->bridge_ck);
1166
1167 if (pvt->bridge_ck == NULL)
1168 pvt->bridge_ck = pci_scan_single_device(pdev->bus,
1169 PCI_DEVFN(0, 1));
1170
1171 if (pvt->bridge_ck == NULL) {
1172 e752x_printk(KERN_ERR, "error reporting device not found:"
1173 "vendor %x device 0x%x (broken BIOS?)\n",
1174 PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].err_dev);
1175 return 1;
1176 }
1177
1178 dev = pci_get_device(PCI_VENDOR_ID_INTEL,
1179 e752x_devs[dev_idx].ctl_dev,
1180 NULL);
1181
1182 if (dev == NULL)
1183 goto fail;
1184
1185 pvt->dev_d0f0 = dev;
1186 pvt->dev_d0f1 = pci_dev_get(pvt->bridge_ck);
1187
1188 return 0;
1189
1190 fail:
1191 pci_dev_put(pvt->bridge_ck);
1192 return 1;
1193 }
1194
1195 /* Setup system bus parity mask register.
1196 * Sysbus parity supported on:
1197 * e7320/e7520/e7525 + Xeon
1198 */
1199 static void e752x_init_sysbus_parity_mask(struct e752x_pvt *pvt)
1200 {
1201 char *cpu_id = cpu_data(0).x86_model_id;
1202 struct pci_dev *dev = pvt->dev_d0f1;
1203 int enable = 1;
1204
1205 /* Allow module parameter override, else see if CPU supports parity */
1206 if (sysbus_parity != -1) {
1207 enable = sysbus_parity;
1208 } else if (cpu_id[0] && !strstr(cpu_id, "Xeon")) {
1209 e752x_printk(KERN_INFO, "System Bus Parity not "
1210 "supported by CPU, disabling\n");
1211 enable = 0;
1212 }
1213
1214 if (enable)
1215 pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x0000);
1216 else
1217 pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x0309);
1218 }
1219
1220 static void e752x_init_error_reporting_regs(struct e752x_pvt *pvt)
1221 {
1222 struct pci_dev *dev;
1223
1224 dev = pvt->dev_d0f1;
1225 /* Turn off error disable & SMI in case the BIOS turned it on */
1226 if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) {
1227 pci_write_config_dword(dev, I3100_NSI_EMASK, 0);
1228 pci_write_config_dword(dev, I3100_NSI_SMICMD, 0);
1229 } else {
1230 pci_write_config_byte(dev, E752X_HI_ERRMASK, 0x00);
1231 pci_write_config_byte(dev, E752X_HI_SMICMD, 0x00);
1232 }
1233
1234 e752x_init_sysbus_parity_mask(pvt);
1235
1236 pci_write_config_word(dev, E752X_SYSBUS_SMICMD, 0x00);
1237 pci_write_config_byte(dev, E752X_BUF_ERRMASK, 0x00);
1238 pci_write_config_byte(dev, E752X_BUF_SMICMD, 0x00);
1239 pci_write_config_byte(dev, E752X_DRAM_ERRMASK, 0x00);
1240 pci_write_config_byte(dev, E752X_DRAM_SMICMD, 0x00);
1241 }
1242
1243 static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
1244 {
1245 u16 pci_data;
1246 u8 stat8;
1247 struct mem_ctl_info *mci;
1248 struct edac_mc_layer layers[2];
1249 struct e752x_pvt *pvt;
1250 u16 ddrcsr;
1251 int drc_chan; /* Number of channels 0=1chan,1=2chan */
1252 struct e752x_error_info discard;
1253
1254 debugf0("%s(): mci\n", __func__);
1255 debugf0("Starting Probe1\n");
1256
1257 /* check to see if device 0 function 1 is enabled; if it isn't, we
1258 * assume the BIOS has reserved it for a reason and is expecting
1259 * exclusive access, we take care not to violate that assumption and
1260 * fail the probe. */
1261 pci_read_config_byte(pdev, E752X_DEVPRES1, &stat8);
1262 if (!force_function_unhide && !(stat8 & (1 << 5))) {
1263 printk(KERN_INFO "Contact your BIOS vendor to see if the "
1264 "E752x error registers can be safely un-hidden\n");
1265 return -ENODEV;
1266 }
1267 stat8 |= (1 << 5);
1268 pci_write_config_byte(pdev, E752X_DEVPRES1, stat8);
1269
1270 pci_read_config_word(pdev, E752X_DDRCSR, &ddrcsr);
1271 /* FIXME: should check >>12 or 0xf, true for all? */
1272 /* Dual channel = 1, Single channel = 0 */
1273 drc_chan = dual_channel_active(ddrcsr);
1274
1275 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
1276 layers[0].size = E752X_NR_CSROWS;
1277 layers[0].is_virt_csrow = true;
1278 layers[1].type = EDAC_MC_LAYER_CHANNEL;
1279 layers[1].size = drc_chan + 1;
1280 layers[1].is_virt_csrow = false;
1281 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
1282 if (mci == NULL)
1283 return -ENOMEM;
1284
1285 debugf3("%s(): init mci\n", __func__);
1286 mci->mtype_cap = MEM_FLAG_RDDR;
1287 /* 3100 IMCH supports SECDEC only */
1288 mci->edac_ctl_cap = (dev_idx == I3100) ? EDAC_FLAG_SECDED :
1289 (EDAC_FLAG_NONE | EDAC_FLAG_SECDED | EDAC_FLAG_S4ECD4ED);
1290 /* FIXME - what if different memory types are in different csrows? */
1291 mci->mod_name = EDAC_MOD_STR;
1292 mci->mod_ver = E752X_REVISION;
1293 mci->dev = &pdev->dev;
1294
1295 debugf3("%s(): init pvt\n", __func__);
1296 pvt = (struct e752x_pvt *)mci->pvt_info;
1297 pvt->dev_info = &e752x_devs[dev_idx];
1298 pvt->mc_symmetric = ((ddrcsr & 0x10) != 0);
1299
1300 if (e752x_get_devs(pdev, dev_idx, pvt)) {
1301 edac_mc_free(mci);
1302 return -ENODEV;
1303 }
1304
1305 debugf3("%s(): more mci init\n", __func__);
1306 mci->ctl_name = pvt->dev_info->ctl_name;
1307 mci->dev_name = pci_name(pdev);
1308 mci->edac_check = e752x_check;
1309 mci->ctl_page_to_phys = ctl_page_to_phys;
1310 mci->set_sdram_scrub_rate = set_sdram_scrub_rate;
1311 mci->get_sdram_scrub_rate = get_sdram_scrub_rate;
1312
1313 /* set the map type. 1 = normal, 0 = reversed
1314 * Must be set before e752x_init_csrows in case csrow mapping
1315 * is reversed.
1316 */
1317 pci_read_config_byte(pdev, E752X_DRM, &stat8);
1318 pvt->map_type = ((stat8 & 0x0f) > ((stat8 >> 4) & 0x0f));
1319
1320 e752x_init_csrows(mci, pdev, ddrcsr);
1321 e752x_init_mem_map_table(pdev, pvt);
1322
1323 if (dev_idx == I3100)
1324 mci->edac_cap = EDAC_FLAG_SECDED; /* the only mode supported */
1325 else
1326 mci->edac_cap |= EDAC_FLAG_NONE;
1327 debugf3("%s(): tolm, remapbase, remaplimit\n", __func__);
1328
1329 /* load the top of low memory, remap base, and remap limit vars */
1330 pci_read_config_word(pdev, E752X_TOLM, &pci_data);
1331 pvt->tolm = ((u32) pci_data) << 4;
1332 pci_read_config_word(pdev, E752X_REMAPBASE, &pci_data);
1333 pvt->remapbase = ((u32) pci_data) << 14;
1334 pci_read_config_word(pdev, E752X_REMAPLIMIT, &pci_data);
1335 pvt->remaplimit = ((u32) pci_data) << 14;
1336 e752x_printk(KERN_INFO,
1337 "tolm = %x, remapbase = %x, remaplimit = %x\n",
1338 pvt->tolm, pvt->remapbase, pvt->remaplimit);
1339
1340 /* Here we assume that we will never see multiple instances of this
1341 * type of memory controller. The ID is therefore hardcoded to 0.
1342 */
1343 if (edac_mc_add_mc(mci)) {
1344 debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
1345 goto fail;
1346 }
1347
1348 e752x_init_error_reporting_regs(pvt);
1349 e752x_get_error_info(mci, &discard); /* clear other MCH errors */
1350
1351 /* allocating generic PCI control info */
1352 e752x_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
1353 if (!e752x_pci) {
1354 printk(KERN_WARNING
1355 "%s(): Unable to create PCI control\n", __func__);
1356 printk(KERN_WARNING
1357 "%s(): PCI error report via EDAC not setup\n",
1358 __func__);
1359 }
1360
1361 /* get this far and it's successful */
1362 debugf3("%s(): success\n", __func__);
1363 return 0;
1364
1365 fail:
1366 pci_dev_put(pvt->dev_d0f0);
1367 pci_dev_put(pvt->dev_d0f1);
1368 pci_dev_put(pvt->bridge_ck);
1369 edac_mc_free(mci);
1370
1371 return -ENODEV;
1372 }
1373
1374 /* returns count (>= 0), or negative on error */
1375 static int __devinit e752x_init_one(struct pci_dev *pdev,
1376 const struct pci_device_id *ent)
1377 {
1378 debugf0("%s()\n", __func__);
1379
1380 /* wake up and enable device */
1381 if (pci_enable_device(pdev) < 0)
1382 return -EIO;
1383
1384 return e752x_probe1(pdev, ent->driver_data);
1385 }
1386
1387 static void __devexit e752x_remove_one(struct pci_dev *pdev)
1388 {
1389 struct mem_ctl_info *mci;
1390 struct e752x_pvt *pvt;
1391
1392 debugf0("%s()\n", __func__);
1393
1394 if (e752x_pci)
1395 edac_pci_release_generic_ctl(e752x_pci);
1396
1397 if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
1398 return;
1399
1400 pvt = (struct e752x_pvt *)mci->pvt_info;
1401 pci_dev_put(pvt->dev_d0f0);
1402 pci_dev_put(pvt->dev_d0f1);
1403 pci_dev_put(pvt->bridge_ck);
1404 edac_mc_free(mci);
1405 }
1406
1407 static DEFINE_PCI_DEVICE_TABLE(e752x_pci_tbl) = {
1408 {
1409 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1410 E7520},
1411 {
1412 PCI_VEND_DEV(INTEL, 7525_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1413 E7525},
1414 {
1415 PCI_VEND_DEV(INTEL, 7320_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1416 E7320},
1417 {
1418 PCI_VEND_DEV(INTEL, 3100_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1419 I3100},
1420 {
1421 0,
1422 } /* 0 terminated list. */
1423 };
1424
1425 MODULE_DEVICE_TABLE(pci, e752x_pci_tbl);
1426
1427 static struct pci_driver e752x_driver = {
1428 .name = EDAC_MOD_STR,
1429 .probe = e752x_init_one,
1430 .remove = __devexit_p(e752x_remove_one),
1431 .id_table = e752x_pci_tbl,
1432 };
1433
1434 static int __init e752x_init(void)
1435 {
1436 int pci_rc;
1437
1438 debugf3("%s()\n", __func__);
1439
1440 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
1441 opstate_init();
1442
1443 pci_rc = pci_register_driver(&e752x_driver);
1444 return (pci_rc < 0) ? pci_rc : 0;
1445 }
1446
1447 static void __exit e752x_exit(void)
1448 {
1449 debugf3("%s()\n", __func__);
1450 pci_unregister_driver(&e752x_driver);
1451 }
1452
1453 module_init(e752x_init);
1454 module_exit(e752x_exit);
1455
1456 MODULE_LICENSE("GPL");
1457 MODULE_AUTHOR("Linux Networx (http://lnxi.com) Tom Zimmerman\n");
1458 MODULE_DESCRIPTION("MC support for Intel e752x/3100 memory controllers");
1459
1460 module_param(force_function_unhide, int, 0444);
1461 MODULE_PARM_DESC(force_function_unhide, "if BIOS sets Dev0:Fun1 up as hidden:"
1462 " 1=force unhide and hope BIOS doesn't fight driver for "
1463 "Dev0:Fun1 access");
1464
1465 module_param(edac_op_state, int, 0444);
1466 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
1467
1468 module_param(sysbus_parity, int, 0444);
1469 MODULE_PARM_DESC(sysbus_parity, "0=disable system bus parity checking,"
1470 " 1=enable system bus parity checking, default=auto-detect");
1471 module_param(report_non_memory_errors, int, 0644);
1472 MODULE_PARM_DESC(report_non_memory_errors, "0=disable non-memory error "
1473 "reporting, 1=enable non-memory error reporting");
This page took 0.060313 seconds and 6 git commands to generate.