4a12961c5ef6f4f53bbc4ac399e9e3a16c440328
[deliverable/linux.git] / drivers / edac / i7core_edac.c
1 /* Intel i7 core/Nehalem Memory Controller kernel module
2 *
3 * This driver supports yhe memory controllers found on the Intel
4 * processor families i7core, i7core 7xx/8xx, i5core, Xeon 35xx,
5 * Xeon 55xx and Xeon 56xx also known as Nehalem, Nehalem-EP, Lynnfield
6 * and Westmere-EP.
7 *
8 * This file may be distributed under the terms of the
9 * GNU General Public License version 2 only.
10 *
11 * Copyright (c) 2009-2010 by:
12 * Mauro Carvalho Chehab <mchehab@redhat.com>
13 *
14 * Red Hat Inc. http://www.redhat.com
15 *
16 * Forked and adapted from the i5400_edac driver
17 *
18 * Based on the following public Intel datasheets:
19 * Intel Core i7 Processor Extreme Edition and Intel Core i7 Processor
20 * Datasheet, Volume 2:
21 * http://download.intel.com/design/processor/datashts/320835.pdf
22 * Intel Xeon Processor 5500 Series Datasheet Volume 2
23 * http://www.intel.com/Assets/PDF/datasheet/321322.pdf
24 * also available at:
25 * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
26 */
27
28 #include <linux/module.h>
29 #include <linux/init.h>
30 #include <linux/pci.h>
31 #include <linux/pci_ids.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34 #include <linux/edac.h>
35 #include <linux/mmzone.h>
36 #include <linux/edac_mce.h>
37 #include <linux/smp.h>
38 #include <asm/processor.h>
39
40 #include "edac_core.h"
41
42 /* Static vars */
43 static LIST_HEAD(i7core_edac_list);
44 static DEFINE_MUTEX(i7core_edac_lock);
45 static int probed;
46
47 static int use_pci_fixup;
48 module_param(use_pci_fixup, int, 0444);
49 MODULE_PARM_DESC(use_pci_fixup, "Enable PCI fixup to seek for hidden devices");
50 /*
51 * This is used for Nehalem-EP and Nehalem-EX devices, where the non-core
52 * registers start at bus 255, and are not reported by BIOS.
53 * We currently find devices with only 2 sockets. In order to support more QPI
54 * Quick Path Interconnect, just increment this number.
55 */
56 #define MAX_SOCKET_BUSES 2
57
58
59 /*
60 * Alter this version for the module when modifications are made
61 */
62 #define I7CORE_REVISION " Ver: 1.0.0 " __DATE__
63 #define EDAC_MOD_STR "i7core_edac"
64
65 /*
66 * Debug macros
67 */
68 #define i7core_printk(level, fmt, arg...) \
69 edac_printk(level, "i7core", fmt, ##arg)
70
71 #define i7core_mc_printk(mci, level, fmt, arg...) \
72 edac_mc_chipset_printk(mci, level, "i7core", fmt, ##arg)
73
74 /*
75 * i7core Memory Controller Registers
76 */
77
78 /* OFFSETS for Device 0 Function 0 */
79
80 #define MC_CFG_CONTROL 0x90
81
82 /* OFFSETS for Device 3 Function 0 */
83
84 #define MC_CONTROL 0x48
85 #define MC_STATUS 0x4c
86 #define MC_MAX_DOD 0x64
87
88 /*
89 * OFFSETS for Device 3 Function 4, as inicated on Xeon 5500 datasheet:
90 * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
91 */
92
93 #define MC_TEST_ERR_RCV1 0x60
94 #define DIMM2_COR_ERR(r) ((r) & 0x7fff)
95
96 #define MC_TEST_ERR_RCV0 0x64
97 #define DIMM1_COR_ERR(r) (((r) >> 16) & 0x7fff)
98 #define DIMM0_COR_ERR(r) ((r) & 0x7fff)
99
100 /* OFFSETS for Device 3 Function 2, as inicated on Xeon 5500 datasheet */
101 #define MC_COR_ECC_CNT_0 0x80
102 #define MC_COR_ECC_CNT_1 0x84
103 #define MC_COR_ECC_CNT_2 0x88
104 #define MC_COR_ECC_CNT_3 0x8c
105 #define MC_COR_ECC_CNT_4 0x90
106 #define MC_COR_ECC_CNT_5 0x94
107
108 #define DIMM_TOP_COR_ERR(r) (((r) >> 16) & 0x7fff)
109 #define DIMM_BOT_COR_ERR(r) ((r) & 0x7fff)
110
111
112 /* OFFSETS for Devices 4,5 and 6 Function 0 */
113
114 #define MC_CHANNEL_DIMM_INIT_PARAMS 0x58
115 #define THREE_DIMMS_PRESENT (1 << 24)
116 #define SINGLE_QUAD_RANK_PRESENT (1 << 23)
117 #define QUAD_RANK_PRESENT (1 << 22)
118 #define REGISTERED_DIMM (1 << 15)
119
120 #define MC_CHANNEL_MAPPER 0x60
121 #define RDLCH(r, ch) ((((r) >> (3 + (ch * 6))) & 0x07) - 1)
122 #define WRLCH(r, ch) ((((r) >> (ch * 6)) & 0x07) - 1)
123
124 #define MC_CHANNEL_RANK_PRESENT 0x7c
125 #define RANK_PRESENT_MASK 0xffff
126
127 #define MC_CHANNEL_ADDR_MATCH 0xf0
128 #define MC_CHANNEL_ERROR_MASK 0xf8
129 #define MC_CHANNEL_ERROR_INJECT 0xfc
130 #define INJECT_ADDR_PARITY 0x10
131 #define INJECT_ECC 0x08
132 #define MASK_CACHELINE 0x06
133 #define MASK_FULL_CACHELINE 0x06
134 #define MASK_MSB32_CACHELINE 0x04
135 #define MASK_LSB32_CACHELINE 0x02
136 #define NO_MASK_CACHELINE 0x00
137 #define REPEAT_EN 0x01
138
139 /* OFFSETS for Devices 4,5 and 6 Function 1 */
140
141 #define MC_DOD_CH_DIMM0 0x48
142 #define MC_DOD_CH_DIMM1 0x4c
143 #define MC_DOD_CH_DIMM2 0x50
144 #define RANKOFFSET_MASK ((1 << 12) | (1 << 11) | (1 << 10))
145 #define RANKOFFSET(x) ((x & RANKOFFSET_MASK) >> 10)
146 #define DIMM_PRESENT_MASK (1 << 9)
147 #define DIMM_PRESENT(x) (((x) & DIMM_PRESENT_MASK) >> 9)
148 #define MC_DOD_NUMBANK_MASK ((1 << 8) | (1 << 7))
149 #define MC_DOD_NUMBANK(x) (((x) & MC_DOD_NUMBANK_MASK) >> 7)
150 #define MC_DOD_NUMRANK_MASK ((1 << 6) | (1 << 5))
151 #define MC_DOD_NUMRANK(x) (((x) & MC_DOD_NUMRANK_MASK) >> 5)
152 #define MC_DOD_NUMROW_MASK ((1 << 4) | (1 << 3) | (1 << 2))
153 #define MC_DOD_NUMROW(x) (((x) & MC_DOD_NUMROW_MASK) >> 2)
154 #define MC_DOD_NUMCOL_MASK 3
155 #define MC_DOD_NUMCOL(x) ((x) & MC_DOD_NUMCOL_MASK)
156
157 #define MC_RANK_PRESENT 0x7c
158
159 #define MC_SAG_CH_0 0x80
160 #define MC_SAG_CH_1 0x84
161 #define MC_SAG_CH_2 0x88
162 #define MC_SAG_CH_3 0x8c
163 #define MC_SAG_CH_4 0x90
164 #define MC_SAG_CH_5 0x94
165 #define MC_SAG_CH_6 0x98
166 #define MC_SAG_CH_7 0x9c
167
168 #define MC_RIR_LIMIT_CH_0 0x40
169 #define MC_RIR_LIMIT_CH_1 0x44
170 #define MC_RIR_LIMIT_CH_2 0x48
171 #define MC_RIR_LIMIT_CH_3 0x4C
172 #define MC_RIR_LIMIT_CH_4 0x50
173 #define MC_RIR_LIMIT_CH_5 0x54
174 #define MC_RIR_LIMIT_CH_6 0x58
175 #define MC_RIR_LIMIT_CH_7 0x5C
176 #define MC_RIR_LIMIT_MASK ((1 << 10) - 1)
177
178 #define MC_RIR_WAY_CH 0x80
179 #define MC_RIR_WAY_OFFSET_MASK (((1 << 14) - 1) & ~0x7)
180 #define MC_RIR_WAY_RANK_MASK 0x7
181
182 /*
183 * i7core structs
184 */
185
186 #define NUM_CHANS 3
187 #define MAX_DIMMS 3 /* Max DIMMS per channel */
188 #define MAX_MCR_FUNC 4
189 #define MAX_CHAN_FUNC 3
190
191 struct i7core_info {
192 u32 mc_control;
193 u32 mc_status;
194 u32 max_dod;
195 u32 ch_map;
196 };
197
198
199 struct i7core_inject {
200 int enable;
201
202 u32 section;
203 u32 type;
204 u32 eccmask;
205
206 /* Error address mask */
207 int channel, dimm, rank, bank, page, col;
208 };
209
210 struct i7core_channel {
211 u32 ranks;
212 u32 dimms;
213 };
214
215 struct pci_id_descr {
216 int dev;
217 int func;
218 int dev_id;
219 int optional;
220 };
221
222 struct pci_id_table {
223 const struct pci_id_descr *descr;
224 int n_devs;
225 };
226
227 struct i7core_dev {
228 struct list_head list;
229 u8 socket;
230 struct pci_dev **pdev;
231 int n_devs;
232 struct mem_ctl_info *mci;
233 };
234
235 struct i7core_pvt {
236 struct pci_dev *pci_noncore;
237 struct pci_dev *pci_mcr[MAX_MCR_FUNC + 1];
238 struct pci_dev *pci_ch[NUM_CHANS][MAX_CHAN_FUNC + 1];
239
240 struct i7core_dev *i7core_dev;
241
242 struct i7core_info info;
243 struct i7core_inject inject;
244 struct i7core_channel channel[NUM_CHANS];
245
246 int ce_count_available;
247 int csrow_map[NUM_CHANS][MAX_DIMMS];
248
249 /* ECC corrected errors counts per udimm */
250 unsigned long udimm_ce_count[MAX_DIMMS];
251 int udimm_last_ce_count[MAX_DIMMS];
252 /* ECC corrected errors counts per rdimm */
253 unsigned long rdimm_ce_count[NUM_CHANS][MAX_DIMMS];
254 int rdimm_last_ce_count[NUM_CHANS][MAX_DIMMS];
255
256 unsigned int is_registered;
257
258 /* mcelog glue */
259 struct edac_mce edac_mce;
260
261 /* Fifo double buffers */
262 struct mce mce_entry[MCE_LOG_LEN];
263 struct mce mce_outentry[MCE_LOG_LEN];
264
265 /* Fifo in/out counters */
266 unsigned mce_in, mce_out;
267
268 /* Count indicator to show errors not got */
269 unsigned mce_overrun;
270
271 /* Struct to control EDAC polling */
272 struct edac_pci_ctl_info *i7core_pci;
273 };
274
275 #define PCI_DESCR(device, function, device_id) \
276 .dev = (device), \
277 .func = (function), \
278 .dev_id = (device_id)
279
280 static const struct pci_id_descr pci_dev_descr_i7core_nehalem[] = {
281 /* Memory controller */
282 { PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_I7_MCR) },
283 { PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_I7_MC_TAD) },
284 /* Exists only for RDIMM */
285 { PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_I7_MC_RAS), .optional = 1 },
286 { PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_I7_MC_TEST) },
287
288 /* Channel 0 */
289 { PCI_DESCR(4, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH0_CTRL) },
290 { PCI_DESCR(4, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH0_ADDR) },
291 { PCI_DESCR(4, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH0_RANK) },
292 { PCI_DESCR(4, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH0_TC) },
293
294 /* Channel 1 */
295 { PCI_DESCR(5, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH1_CTRL) },
296 { PCI_DESCR(5, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH1_ADDR) },
297 { PCI_DESCR(5, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH1_RANK) },
298 { PCI_DESCR(5, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH1_TC) },
299
300 /* Channel 2 */
301 { PCI_DESCR(6, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH2_CTRL) },
302 { PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH2_ADDR) },
303 { PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH2_RANK) },
304 { PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH2_TC) },
305
306 /* Generic Non-core registers */
307 /*
308 * This is the PCI device on i7core and on Xeon 35xx (8086:2c41)
309 * On Xeon 55xx, however, it has a different id (8086:2c40). So,
310 * the probing code needs to test for the other address in case of
311 * failure of this one
312 */
313 { PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_I7_NONCORE) },
314
315 };
316
317 static const struct pci_id_descr pci_dev_descr_lynnfield[] = {
318 { PCI_DESCR( 3, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR) },
319 { PCI_DESCR( 3, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD) },
320 { PCI_DESCR( 3, 4, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST) },
321
322 { PCI_DESCR( 4, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL) },
323 { PCI_DESCR( 4, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR) },
324 { PCI_DESCR( 4, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK) },
325 { PCI_DESCR( 4, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC) },
326
327 { PCI_DESCR( 5, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL) },
328 { PCI_DESCR( 5, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR) },
329 { PCI_DESCR( 5, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK) },
330 { PCI_DESCR( 5, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC) },
331
332 /*
333 * This is the PCI device has an alternate address on some
334 * processors like Core i7 860
335 */
336 { PCI_DESCR( 0, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE) },
337 };
338
339 static const struct pci_id_descr pci_dev_descr_i7core_westmere[] = {
340 /* Memory controller */
341 { PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR_REV2) },
342 { PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD_REV2) },
343 /* Exists only for RDIMM */
344 { PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_RAS_REV2), .optional = 1 },
345 { PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST_REV2) },
346
347 /* Channel 0 */
348 { PCI_DESCR(4, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL_REV2) },
349 { PCI_DESCR(4, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR_REV2) },
350 { PCI_DESCR(4, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK_REV2) },
351 { PCI_DESCR(4, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_TC_REV2) },
352
353 /* Channel 1 */
354 { PCI_DESCR(5, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_CTRL_REV2) },
355 { PCI_DESCR(5, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_ADDR_REV2) },
356 { PCI_DESCR(5, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_RANK_REV2) },
357 { PCI_DESCR(5, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH1_TC_REV2) },
358
359 /* Channel 2 */
360 { PCI_DESCR(6, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_CTRL_REV2) },
361 { PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_ADDR_REV2) },
362 { PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_RANK_REV2) },
363 { PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_TC_REV2) },
364
365 /* Generic Non-core registers */
366 { PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_REV2) },
367
368 };
369
370 #define PCI_ID_TABLE_ENTRY(A) { .descr=A, .n_devs = ARRAY_SIZE(A) }
371 static const struct pci_id_table pci_dev_table[] = {
372 PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_nehalem),
373 PCI_ID_TABLE_ENTRY(pci_dev_descr_lynnfield),
374 PCI_ID_TABLE_ENTRY(pci_dev_descr_i7core_westmere),
375 };
376
377 /*
378 * pci_device_id table for which devices we are looking for
379 */
380 static const struct pci_device_id i7core_pci_tbl[] __devinitdata = {
381 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
382 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
383 {0,} /* 0 terminated list. */
384 };
385
386 /****************************************************************************
387 Anciliary status routines
388 ****************************************************************************/
389
390 /* MC_CONTROL bits */
391 #define CH_ACTIVE(pvt, ch) ((pvt)->info.mc_control & (1 << (8 + ch)))
392 #define ECCx8(pvt) ((pvt)->info.mc_control & (1 << 1))
393
394 /* MC_STATUS bits */
395 #define ECC_ENABLED(pvt) ((pvt)->info.mc_status & (1 << 4))
396 #define CH_DISABLED(pvt, ch) ((pvt)->info.mc_status & (1 << ch))
397
398 /* MC_MAX_DOD read functions */
399 static inline int numdimms(u32 dimms)
400 {
401 return (dimms & 0x3) + 1;
402 }
403
404 static inline int numrank(u32 rank)
405 {
406 static int ranks[4] = { 1, 2, 4, -EINVAL };
407
408 return ranks[rank & 0x3];
409 }
410
411 static inline int numbank(u32 bank)
412 {
413 static int banks[4] = { 4, 8, 16, -EINVAL };
414
415 return banks[bank & 0x3];
416 }
417
418 static inline int numrow(u32 row)
419 {
420 static int rows[8] = {
421 1 << 12, 1 << 13, 1 << 14, 1 << 15,
422 1 << 16, -EINVAL, -EINVAL, -EINVAL,
423 };
424
425 return rows[row & 0x7];
426 }
427
428 static inline int numcol(u32 col)
429 {
430 static int cols[8] = {
431 1 << 10, 1 << 11, 1 << 12, -EINVAL,
432 };
433 return cols[col & 0x3];
434 }
435
436 static struct i7core_dev *get_i7core_dev(u8 socket)
437 {
438 struct i7core_dev *i7core_dev;
439
440 list_for_each_entry(i7core_dev, &i7core_edac_list, list) {
441 if (i7core_dev->socket == socket)
442 return i7core_dev;
443 }
444
445 return NULL;
446 }
447
448 static struct i7core_dev *alloc_i7core_dev(u8 socket,
449 const struct pci_id_table *table)
450 {
451 struct i7core_dev *i7core_dev;
452
453 i7core_dev = kzalloc(sizeof(*i7core_dev), GFP_KERNEL);
454 if (!i7core_dev)
455 return NULL;
456
457 i7core_dev->pdev = kzalloc(sizeof(*i7core_dev->pdev) * table->n_devs,
458 GFP_KERNEL);
459 if (!i7core_dev->pdev) {
460 kfree(i7core_dev);
461 return NULL;
462 }
463
464 i7core_dev->socket = socket;
465 i7core_dev->n_devs = table->n_devs;
466 list_add_tail(&i7core_dev->list, &i7core_edac_list);
467
468 return i7core_dev;
469 }
470
471 static void free_i7core_dev(struct i7core_dev *i7core_dev)
472 {
473 list_del(&i7core_dev->list);
474 kfree(i7core_dev->pdev);
475 kfree(i7core_dev);
476 }
477
478 /****************************************************************************
479 Memory check routines
480 ****************************************************************************/
481 static struct pci_dev *get_pdev_slot_func(u8 socket, unsigned slot,
482 unsigned func)
483 {
484 struct i7core_dev *i7core_dev = get_i7core_dev(socket);
485 int i;
486
487 if (!i7core_dev)
488 return NULL;
489
490 for (i = 0; i < i7core_dev->n_devs; i++) {
491 if (!i7core_dev->pdev[i])
492 continue;
493
494 if (PCI_SLOT(i7core_dev->pdev[i]->devfn) == slot &&
495 PCI_FUNC(i7core_dev->pdev[i]->devfn) == func) {
496 return i7core_dev->pdev[i];
497 }
498 }
499
500 return NULL;
501 }
502
503 /**
504 * i7core_get_active_channels() - gets the number of channels and csrows
505 * @socket: Quick Path Interconnect socket
506 * @channels: Number of channels that will be returned
507 * @csrows: Number of csrows found
508 *
509 * Since EDAC core needs to know in advance the number of available channels
510 * and csrows, in order to allocate memory for csrows/channels, it is needed
511 * to run two similar steps. At the first step, implemented on this function,
512 * it checks the number of csrows/channels present at one socket.
513 * this is used in order to properly allocate the size of mci components.
514 *
515 * It should be noticed that none of the current available datasheets explain
516 * or even mention how csrows are seen by the memory controller. So, we need
517 * to add a fake description for csrows.
518 * So, this driver is attributing one DIMM memory for one csrow.
519 */
520 static int i7core_get_active_channels(const u8 socket, unsigned *channels,
521 unsigned *csrows)
522 {
523 struct pci_dev *pdev = NULL;
524 int i, j;
525 u32 status, control;
526
527 *channels = 0;
528 *csrows = 0;
529
530 pdev = get_pdev_slot_func(socket, 3, 0);
531 if (!pdev) {
532 i7core_printk(KERN_ERR, "Couldn't find socket %d fn 3.0!!!\n",
533 socket);
534 return -ENODEV;
535 }
536
537 /* Device 3 function 0 reads */
538 pci_read_config_dword(pdev, MC_STATUS, &status);
539 pci_read_config_dword(pdev, MC_CONTROL, &control);
540
541 for (i = 0; i < NUM_CHANS; i++) {
542 u32 dimm_dod[3];
543 /* Check if the channel is active */
544 if (!(control & (1 << (8 + i))))
545 continue;
546
547 /* Check if the channel is disabled */
548 if (status & (1 << i))
549 continue;
550
551 pdev = get_pdev_slot_func(socket, i + 4, 1);
552 if (!pdev) {
553 i7core_printk(KERN_ERR, "Couldn't find socket %d "
554 "fn %d.%d!!!\n",
555 socket, i + 4, 1);
556 return -ENODEV;
557 }
558 /* Devices 4-6 function 1 */
559 pci_read_config_dword(pdev,
560 MC_DOD_CH_DIMM0, &dimm_dod[0]);
561 pci_read_config_dword(pdev,
562 MC_DOD_CH_DIMM1, &dimm_dod[1]);
563 pci_read_config_dword(pdev,
564 MC_DOD_CH_DIMM2, &dimm_dod[2]);
565
566 (*channels)++;
567
568 for (j = 0; j < 3; j++) {
569 if (!DIMM_PRESENT(dimm_dod[j]))
570 continue;
571 (*csrows)++;
572 }
573 }
574
575 debugf0("Number of active channels on socket %d: %d\n",
576 socket, *channels);
577
578 return 0;
579 }
580
581 static int get_dimm_config(const struct mem_ctl_info *mci)
582 {
583 struct i7core_pvt *pvt = mci->pvt_info;
584 struct csrow_info *csr;
585 struct pci_dev *pdev;
586 int i, j;
587 int csrow = 0;
588 unsigned long last_page = 0;
589 enum edac_type mode;
590 enum mem_type mtype;
591
592 /* Get data from the MC register, function 0 */
593 pdev = pvt->pci_mcr[0];
594 if (!pdev)
595 return -ENODEV;
596
597 /* Device 3 function 0 reads */
598 pci_read_config_dword(pdev, MC_CONTROL, &pvt->info.mc_control);
599 pci_read_config_dword(pdev, MC_STATUS, &pvt->info.mc_status);
600 pci_read_config_dword(pdev, MC_MAX_DOD, &pvt->info.max_dod);
601 pci_read_config_dword(pdev, MC_CHANNEL_MAPPER, &pvt->info.ch_map);
602
603 debugf0("QPI %d control=0x%08x status=0x%08x dod=0x%08x map=0x%08x\n",
604 pvt->i7core_dev->socket, pvt->info.mc_control, pvt->info.mc_status,
605 pvt->info.max_dod, pvt->info.ch_map);
606
607 if (ECC_ENABLED(pvt)) {
608 debugf0("ECC enabled with x%d SDCC\n", ECCx8(pvt) ? 8 : 4);
609 if (ECCx8(pvt))
610 mode = EDAC_S8ECD8ED;
611 else
612 mode = EDAC_S4ECD4ED;
613 } else {
614 debugf0("ECC disabled\n");
615 mode = EDAC_NONE;
616 }
617
618 /* FIXME: need to handle the error codes */
619 debugf0("DOD Max limits: DIMMS: %d, %d-ranked, %d-banked "
620 "x%x x 0x%x\n",
621 numdimms(pvt->info.max_dod),
622 numrank(pvt->info.max_dod >> 2),
623 numbank(pvt->info.max_dod >> 4),
624 numrow(pvt->info.max_dod >> 6),
625 numcol(pvt->info.max_dod >> 9));
626
627 for (i = 0; i < NUM_CHANS; i++) {
628 u32 data, dimm_dod[3], value[8];
629
630 if (!pvt->pci_ch[i][0])
631 continue;
632
633 if (!CH_ACTIVE(pvt, i)) {
634 debugf0("Channel %i is not active\n", i);
635 continue;
636 }
637 if (CH_DISABLED(pvt, i)) {
638 debugf0("Channel %i is disabled\n", i);
639 continue;
640 }
641
642 /* Devices 4-6 function 0 */
643 pci_read_config_dword(pvt->pci_ch[i][0],
644 MC_CHANNEL_DIMM_INIT_PARAMS, &data);
645
646 pvt->channel[i].ranks = (data & QUAD_RANK_PRESENT) ?
647 4 : 2;
648
649 if (data & REGISTERED_DIMM)
650 mtype = MEM_RDDR3;
651 else
652 mtype = MEM_DDR3;
653 #if 0
654 if (data & THREE_DIMMS_PRESENT)
655 pvt->channel[i].dimms = 3;
656 else if (data & SINGLE_QUAD_RANK_PRESENT)
657 pvt->channel[i].dimms = 1;
658 else
659 pvt->channel[i].dimms = 2;
660 #endif
661
662 /* Devices 4-6 function 1 */
663 pci_read_config_dword(pvt->pci_ch[i][1],
664 MC_DOD_CH_DIMM0, &dimm_dod[0]);
665 pci_read_config_dword(pvt->pci_ch[i][1],
666 MC_DOD_CH_DIMM1, &dimm_dod[1]);
667 pci_read_config_dword(pvt->pci_ch[i][1],
668 MC_DOD_CH_DIMM2, &dimm_dod[2]);
669
670 debugf0("Ch%d phy rd%d, wr%d (0x%08x): "
671 "%d ranks, %cDIMMs\n",
672 i,
673 RDLCH(pvt->info.ch_map, i), WRLCH(pvt->info.ch_map, i),
674 data,
675 pvt->channel[i].ranks,
676 (data & REGISTERED_DIMM) ? 'R' : 'U');
677
678 for (j = 0; j < 3; j++) {
679 u32 banks, ranks, rows, cols;
680 u32 size, npages;
681
682 if (!DIMM_PRESENT(dimm_dod[j]))
683 continue;
684
685 banks = numbank(MC_DOD_NUMBANK(dimm_dod[j]));
686 ranks = numrank(MC_DOD_NUMRANK(dimm_dod[j]));
687 rows = numrow(MC_DOD_NUMROW(dimm_dod[j]));
688 cols = numcol(MC_DOD_NUMCOL(dimm_dod[j]));
689
690 /* DDR3 has 8 I/O banks */
691 size = (rows * cols * banks * ranks) >> (20 - 3);
692
693 pvt->channel[i].dimms++;
694
695 debugf0("\tdimm %d %d Mb offset: %x, "
696 "bank: %d, rank: %d, row: %#x, col: %#x\n",
697 j, size,
698 RANKOFFSET(dimm_dod[j]),
699 banks, ranks, rows, cols);
700
701 npages = MiB_TO_PAGES(size);
702
703 csr = &mci->csrows[csrow];
704 csr->first_page = last_page + 1;
705 last_page += npages;
706 csr->last_page = last_page;
707 csr->nr_pages = npages;
708
709 csr->page_mask = 0;
710 csr->grain = 8;
711 csr->csrow_idx = csrow;
712 csr->nr_channels = 1;
713
714 csr->channels[0].chan_idx = i;
715 csr->channels[0].ce_count = 0;
716
717 pvt->csrow_map[i][j] = csrow;
718
719 switch (banks) {
720 case 4:
721 csr->dtype = DEV_X4;
722 break;
723 case 8:
724 csr->dtype = DEV_X8;
725 break;
726 case 16:
727 csr->dtype = DEV_X16;
728 break;
729 default:
730 csr->dtype = DEV_UNKNOWN;
731 }
732
733 csr->edac_mode = mode;
734 csr->mtype = mtype;
735
736 csrow++;
737 }
738
739 pci_read_config_dword(pdev, MC_SAG_CH_0, &value[0]);
740 pci_read_config_dword(pdev, MC_SAG_CH_1, &value[1]);
741 pci_read_config_dword(pdev, MC_SAG_CH_2, &value[2]);
742 pci_read_config_dword(pdev, MC_SAG_CH_3, &value[3]);
743 pci_read_config_dword(pdev, MC_SAG_CH_4, &value[4]);
744 pci_read_config_dword(pdev, MC_SAG_CH_5, &value[5]);
745 pci_read_config_dword(pdev, MC_SAG_CH_6, &value[6]);
746 pci_read_config_dword(pdev, MC_SAG_CH_7, &value[7]);
747 debugf1("\t[%i] DIVBY3\tREMOVED\tOFFSET\n", i);
748 for (j = 0; j < 8; j++)
749 debugf1("\t\t%#x\t%#x\t%#x\n",
750 (value[j] >> 27) & 0x1,
751 (value[j] >> 24) & 0x7,
752 (value[j] && ((1 << 24) - 1)));
753 }
754
755 return 0;
756 }
757
758 /****************************************************************************
759 Error insertion routines
760 ****************************************************************************/
761
762 /* The i7core has independent error injection features per channel.
763 However, to have a simpler code, we don't allow enabling error injection
764 on more than one channel.
765 Also, since a change at an inject parameter will be applied only at enable,
766 we're disabling error injection on all write calls to the sysfs nodes that
767 controls the error code injection.
768 */
769 static int disable_inject(const struct mem_ctl_info *mci)
770 {
771 struct i7core_pvt *pvt = mci->pvt_info;
772
773 pvt->inject.enable = 0;
774
775 if (!pvt->pci_ch[pvt->inject.channel][0])
776 return -ENODEV;
777
778 pci_write_config_dword(pvt->pci_ch[pvt->inject.channel][0],
779 MC_CHANNEL_ERROR_INJECT, 0);
780
781 return 0;
782 }
783
784 /*
785 * i7core inject inject.section
786 *
787 * accept and store error injection inject.section value
788 * bit 0 - refers to the lower 32-byte half cacheline
789 * bit 1 - refers to the upper 32-byte half cacheline
790 */
791 static ssize_t i7core_inject_section_store(struct mem_ctl_info *mci,
792 const char *data, size_t count)
793 {
794 struct i7core_pvt *pvt = mci->pvt_info;
795 unsigned long value;
796 int rc;
797
798 if (pvt->inject.enable)
799 disable_inject(mci);
800
801 rc = strict_strtoul(data, 10, &value);
802 if ((rc < 0) || (value > 3))
803 return -EIO;
804
805 pvt->inject.section = (u32) value;
806 return count;
807 }
808
809 static ssize_t i7core_inject_section_show(struct mem_ctl_info *mci,
810 char *data)
811 {
812 struct i7core_pvt *pvt = mci->pvt_info;
813 return sprintf(data, "0x%08x\n", pvt->inject.section);
814 }
815
816 /*
817 * i7core inject.type
818 *
819 * accept and store error injection inject.section value
820 * bit 0 - repeat enable - Enable error repetition
821 * bit 1 - inject ECC error
822 * bit 2 - inject parity error
823 */
824 static ssize_t i7core_inject_type_store(struct mem_ctl_info *mci,
825 const char *data, size_t count)
826 {
827 struct i7core_pvt *pvt = mci->pvt_info;
828 unsigned long value;
829 int rc;
830
831 if (pvt->inject.enable)
832 disable_inject(mci);
833
834 rc = strict_strtoul(data, 10, &value);
835 if ((rc < 0) || (value > 7))
836 return -EIO;
837
838 pvt->inject.type = (u32) value;
839 return count;
840 }
841
842 static ssize_t i7core_inject_type_show(struct mem_ctl_info *mci,
843 char *data)
844 {
845 struct i7core_pvt *pvt = mci->pvt_info;
846 return sprintf(data, "0x%08x\n", pvt->inject.type);
847 }
848
849 /*
850 * i7core_inject_inject.eccmask_store
851 *
852 * The type of error (UE/CE) will depend on the inject.eccmask value:
853 * Any bits set to a 1 will flip the corresponding ECC bit
854 * Correctable errors can be injected by flipping 1 bit or the bits within
855 * a symbol pair (2 consecutive aligned 8-bit pairs - i.e. 7:0 and 15:8 or
856 * 23:16 and 31:24). Flipping bits in two symbol pairs will cause an
857 * uncorrectable error to be injected.
858 */
859 static ssize_t i7core_inject_eccmask_store(struct mem_ctl_info *mci,
860 const char *data, size_t count)
861 {
862 struct i7core_pvt *pvt = mci->pvt_info;
863 unsigned long value;
864 int rc;
865
866 if (pvt->inject.enable)
867 disable_inject(mci);
868
869 rc = strict_strtoul(data, 10, &value);
870 if (rc < 0)
871 return -EIO;
872
873 pvt->inject.eccmask = (u32) value;
874 return count;
875 }
876
877 static ssize_t i7core_inject_eccmask_show(struct mem_ctl_info *mci,
878 char *data)
879 {
880 struct i7core_pvt *pvt = mci->pvt_info;
881 return sprintf(data, "0x%08x\n", pvt->inject.eccmask);
882 }
883
884 /*
885 * i7core_addrmatch
886 *
887 * The type of error (UE/CE) will depend on the inject.eccmask value:
888 * Any bits set to a 1 will flip the corresponding ECC bit
889 * Correctable errors can be injected by flipping 1 bit or the bits within
890 * a symbol pair (2 consecutive aligned 8-bit pairs - i.e. 7:0 and 15:8 or
891 * 23:16 and 31:24). Flipping bits in two symbol pairs will cause an
892 * uncorrectable error to be injected.
893 */
894
895 #define DECLARE_ADDR_MATCH(param, limit) \
896 static ssize_t i7core_inject_store_##param( \
897 struct mem_ctl_info *mci, \
898 const char *data, size_t count) \
899 { \
900 struct i7core_pvt *pvt; \
901 long value; \
902 int rc; \
903 \
904 debugf1("%s()\n", __func__); \
905 pvt = mci->pvt_info; \
906 \
907 if (pvt->inject.enable) \
908 disable_inject(mci); \
909 \
910 if (!strcasecmp(data, "any") || !strcasecmp(data, "any\n"))\
911 value = -1; \
912 else { \
913 rc = strict_strtoul(data, 10, &value); \
914 if ((rc < 0) || (value >= limit)) \
915 return -EIO; \
916 } \
917 \
918 pvt->inject.param = value; \
919 \
920 return count; \
921 } \
922 \
923 static ssize_t i7core_inject_show_##param( \
924 struct mem_ctl_info *mci, \
925 char *data) \
926 { \
927 struct i7core_pvt *pvt; \
928 \
929 pvt = mci->pvt_info; \
930 debugf1("%s() pvt=%p\n", __func__, pvt); \
931 if (pvt->inject.param < 0) \
932 return sprintf(data, "any\n"); \
933 else \
934 return sprintf(data, "%d\n", pvt->inject.param);\
935 }
936
937 #define ATTR_ADDR_MATCH(param) \
938 { \
939 .attr = { \
940 .name = #param, \
941 .mode = (S_IRUGO | S_IWUSR) \
942 }, \
943 .show = i7core_inject_show_##param, \
944 .store = i7core_inject_store_##param, \
945 }
946
947 DECLARE_ADDR_MATCH(channel, 3);
948 DECLARE_ADDR_MATCH(dimm, 3);
949 DECLARE_ADDR_MATCH(rank, 4);
950 DECLARE_ADDR_MATCH(bank, 32);
951 DECLARE_ADDR_MATCH(page, 0x10000);
952 DECLARE_ADDR_MATCH(col, 0x4000);
953
954 static int write_and_test(struct pci_dev *dev, const int where, const u32 val)
955 {
956 u32 read;
957 int count;
958
959 debugf0("setting pci %02x:%02x.%x reg=%02x value=%08x\n",
960 dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn),
961 where, val);
962
963 for (count = 0; count < 10; count++) {
964 if (count)
965 msleep(100);
966 pci_write_config_dword(dev, where, val);
967 pci_read_config_dword(dev, where, &read);
968
969 if (read == val)
970 return 0;
971 }
972
973 i7core_printk(KERN_ERR, "Error during set pci %02x:%02x.%x reg=%02x "
974 "write=%08x. Read=%08x\n",
975 dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn),
976 where, val, read);
977
978 return -EINVAL;
979 }
980
981 /*
982 * This routine prepares the Memory Controller for error injection.
983 * The error will be injected when some process tries to write to the
984 * memory that matches the given criteria.
985 * The criteria can be set in terms of a mask where dimm, rank, bank, page
986 * and col can be specified.
987 * A -1 value for any of the mask items will make the MCU to ignore
988 * that matching criteria for error injection.
989 *
990 * It should be noticed that the error will only happen after a write operation
991 * on a memory that matches the condition. if REPEAT_EN is not enabled at
992 * inject mask, then it will produce just one error. Otherwise, it will repeat
993 * until the injectmask would be cleaned.
994 *
995 * FIXME: This routine assumes that MAXNUMDIMMS value of MC_MAX_DOD
996 * is reliable enough to check if the MC is using the
997 * three channels. However, this is not clear at the datasheet.
998 */
999 static ssize_t i7core_inject_enable_store(struct mem_ctl_info *mci,
1000 const char *data, size_t count)
1001 {
1002 struct i7core_pvt *pvt = mci->pvt_info;
1003 u32 injectmask;
1004 u64 mask = 0;
1005 int rc;
1006 long enable;
1007
1008 if (!pvt->pci_ch[pvt->inject.channel][0])
1009 return 0;
1010
1011 rc = strict_strtoul(data, 10, &enable);
1012 if ((rc < 0))
1013 return 0;
1014
1015 if (enable) {
1016 pvt->inject.enable = 1;
1017 } else {
1018 disable_inject(mci);
1019 return count;
1020 }
1021
1022 /* Sets pvt->inject.dimm mask */
1023 if (pvt->inject.dimm < 0)
1024 mask |= 1LL << 41;
1025 else {
1026 if (pvt->channel[pvt->inject.channel].dimms > 2)
1027 mask |= (pvt->inject.dimm & 0x3LL) << 35;
1028 else
1029 mask |= (pvt->inject.dimm & 0x1LL) << 36;
1030 }
1031
1032 /* Sets pvt->inject.rank mask */
1033 if (pvt->inject.rank < 0)
1034 mask |= 1LL << 40;
1035 else {
1036 if (pvt->channel[pvt->inject.channel].dimms > 2)
1037 mask |= (pvt->inject.rank & 0x1LL) << 34;
1038 else
1039 mask |= (pvt->inject.rank & 0x3LL) << 34;
1040 }
1041
1042 /* Sets pvt->inject.bank mask */
1043 if (pvt->inject.bank < 0)
1044 mask |= 1LL << 39;
1045 else
1046 mask |= (pvt->inject.bank & 0x15LL) << 30;
1047
1048 /* Sets pvt->inject.page mask */
1049 if (pvt->inject.page < 0)
1050 mask |= 1LL << 38;
1051 else
1052 mask |= (pvt->inject.page & 0xffff) << 14;
1053
1054 /* Sets pvt->inject.column mask */
1055 if (pvt->inject.col < 0)
1056 mask |= 1LL << 37;
1057 else
1058 mask |= (pvt->inject.col & 0x3fff);
1059
1060 /*
1061 * bit 0: REPEAT_EN
1062 * bits 1-2: MASK_HALF_CACHELINE
1063 * bit 3: INJECT_ECC
1064 * bit 4: INJECT_ADDR_PARITY
1065 */
1066
1067 injectmask = (pvt->inject.type & 1) |
1068 (pvt->inject.section & 0x3) << 1 |
1069 (pvt->inject.type & 0x6) << (3 - 1);
1070
1071 /* Unlock writes to registers - this register is write only */
1072 pci_write_config_dword(pvt->pci_noncore,
1073 MC_CFG_CONTROL, 0x2);
1074
1075 write_and_test(pvt->pci_ch[pvt->inject.channel][0],
1076 MC_CHANNEL_ADDR_MATCH, mask);
1077 write_and_test(pvt->pci_ch[pvt->inject.channel][0],
1078 MC_CHANNEL_ADDR_MATCH + 4, mask >> 32L);
1079
1080 write_and_test(pvt->pci_ch[pvt->inject.channel][0],
1081 MC_CHANNEL_ERROR_MASK, pvt->inject.eccmask);
1082
1083 write_and_test(pvt->pci_ch[pvt->inject.channel][0],
1084 MC_CHANNEL_ERROR_INJECT, injectmask);
1085
1086 /*
1087 * This is something undocumented, based on my tests
1088 * Without writing 8 to this register, errors aren't injected. Not sure
1089 * why.
1090 */
1091 pci_write_config_dword(pvt->pci_noncore,
1092 MC_CFG_CONTROL, 8);
1093
1094 debugf0("Error inject addr match 0x%016llx, ecc 0x%08x,"
1095 " inject 0x%08x\n",
1096 mask, pvt->inject.eccmask, injectmask);
1097
1098
1099 return count;
1100 }
1101
1102 static ssize_t i7core_inject_enable_show(struct mem_ctl_info *mci,
1103 char *data)
1104 {
1105 struct i7core_pvt *pvt = mci->pvt_info;
1106 u32 injectmask;
1107
1108 if (!pvt->pci_ch[pvt->inject.channel][0])
1109 return 0;
1110
1111 pci_read_config_dword(pvt->pci_ch[pvt->inject.channel][0],
1112 MC_CHANNEL_ERROR_INJECT, &injectmask);
1113
1114 debugf0("Inject error read: 0x%018x\n", injectmask);
1115
1116 if (injectmask & 0x0c)
1117 pvt->inject.enable = 1;
1118
1119 return sprintf(data, "%d\n", pvt->inject.enable);
1120 }
1121
1122 #define DECLARE_COUNTER(param) \
1123 static ssize_t i7core_show_counter_##param( \
1124 struct mem_ctl_info *mci, \
1125 char *data) \
1126 { \
1127 struct i7core_pvt *pvt = mci->pvt_info; \
1128 \
1129 debugf1("%s() \n", __func__); \
1130 if (!pvt->ce_count_available || (pvt->is_registered)) \
1131 return sprintf(data, "data unavailable\n"); \
1132 return sprintf(data, "%lu\n", \
1133 pvt->udimm_ce_count[param]); \
1134 }
1135
1136 #define ATTR_COUNTER(param) \
1137 { \
1138 .attr = { \
1139 .name = __stringify(udimm##param), \
1140 .mode = (S_IRUGO | S_IWUSR) \
1141 }, \
1142 .show = i7core_show_counter_##param \
1143 }
1144
1145 DECLARE_COUNTER(0);
1146 DECLARE_COUNTER(1);
1147 DECLARE_COUNTER(2);
1148
1149 /*
1150 * Sysfs struct
1151 */
1152
1153 static const struct mcidev_sysfs_attribute i7core_addrmatch_attrs[] = {
1154 ATTR_ADDR_MATCH(channel),
1155 ATTR_ADDR_MATCH(dimm),
1156 ATTR_ADDR_MATCH(rank),
1157 ATTR_ADDR_MATCH(bank),
1158 ATTR_ADDR_MATCH(page),
1159 ATTR_ADDR_MATCH(col),
1160 { } /* End of list */
1161 };
1162
1163 static const struct mcidev_sysfs_group i7core_inject_addrmatch = {
1164 .name = "inject_addrmatch",
1165 .mcidev_attr = i7core_addrmatch_attrs,
1166 };
1167
1168 static const struct mcidev_sysfs_attribute i7core_udimm_counters_attrs[] = {
1169 ATTR_COUNTER(0),
1170 ATTR_COUNTER(1),
1171 ATTR_COUNTER(2),
1172 { .attr = { .name = NULL } }
1173 };
1174
1175 static const struct mcidev_sysfs_group i7core_udimm_counters = {
1176 .name = "all_channel_counts",
1177 .mcidev_attr = i7core_udimm_counters_attrs,
1178 };
1179
1180 static const struct mcidev_sysfs_attribute i7core_sysfs_rdimm_attrs[] = {
1181 {
1182 .attr = {
1183 .name = "inject_section",
1184 .mode = (S_IRUGO | S_IWUSR)
1185 },
1186 .show = i7core_inject_section_show,
1187 .store = i7core_inject_section_store,
1188 }, {
1189 .attr = {
1190 .name = "inject_type",
1191 .mode = (S_IRUGO | S_IWUSR)
1192 },
1193 .show = i7core_inject_type_show,
1194 .store = i7core_inject_type_store,
1195 }, {
1196 .attr = {
1197 .name = "inject_eccmask",
1198 .mode = (S_IRUGO | S_IWUSR)
1199 },
1200 .show = i7core_inject_eccmask_show,
1201 .store = i7core_inject_eccmask_store,
1202 }, {
1203 .grp = &i7core_inject_addrmatch,
1204 }, {
1205 .attr = {
1206 .name = "inject_enable",
1207 .mode = (S_IRUGO | S_IWUSR)
1208 },
1209 .show = i7core_inject_enable_show,
1210 .store = i7core_inject_enable_store,
1211 },
1212 { } /* End of list */
1213 };
1214
1215 static const struct mcidev_sysfs_attribute i7core_sysfs_udimm_attrs[] = {
1216 {
1217 .attr = {
1218 .name = "inject_section",
1219 .mode = (S_IRUGO | S_IWUSR)
1220 },
1221 .show = i7core_inject_section_show,
1222 .store = i7core_inject_section_store,
1223 }, {
1224 .attr = {
1225 .name = "inject_type",
1226 .mode = (S_IRUGO | S_IWUSR)
1227 },
1228 .show = i7core_inject_type_show,
1229 .store = i7core_inject_type_store,
1230 }, {
1231 .attr = {
1232 .name = "inject_eccmask",
1233 .mode = (S_IRUGO | S_IWUSR)
1234 },
1235 .show = i7core_inject_eccmask_show,
1236 .store = i7core_inject_eccmask_store,
1237 }, {
1238 .grp = &i7core_inject_addrmatch,
1239 }, {
1240 .attr = {
1241 .name = "inject_enable",
1242 .mode = (S_IRUGO | S_IWUSR)
1243 },
1244 .show = i7core_inject_enable_show,
1245 .store = i7core_inject_enable_store,
1246 }, {
1247 .grp = &i7core_udimm_counters,
1248 },
1249 { } /* End of list */
1250 };
1251
1252 /****************************************************************************
1253 Device initialization routines: put/get, init/exit
1254 ****************************************************************************/
1255
1256 /*
1257 * i7core_put_all_devices 'put' all the devices that we have
1258 * reserved via 'get'
1259 */
1260 static void i7core_put_devices(struct i7core_dev *i7core_dev)
1261 {
1262 int i;
1263
1264 debugf0(__FILE__ ": %s()\n", __func__);
1265 for (i = 0; i < i7core_dev->n_devs; i++) {
1266 struct pci_dev *pdev = i7core_dev->pdev[i];
1267 if (!pdev)
1268 continue;
1269 debugf0("Removing dev %02x:%02x.%d\n",
1270 pdev->bus->number,
1271 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1272 pci_dev_put(pdev);
1273 }
1274 }
1275
1276 static void i7core_put_all_devices(void)
1277 {
1278 struct i7core_dev *i7core_dev, *tmp;
1279
1280 list_for_each_entry_safe(i7core_dev, tmp, &i7core_edac_list, list) {
1281 i7core_put_devices(i7core_dev);
1282 free_i7core_dev(i7core_dev);
1283 }
1284 }
1285
1286 static void __init i7core_xeon_pci_fixup(const struct pci_id_table *table)
1287 {
1288 struct pci_dev *pdev = NULL;
1289 int i;
1290
1291 /*
1292 * On Xeon 55xx, the Intel Quckpath Arch Generic Non-core pci buses
1293 * aren't announced by acpi. So, we need to use a legacy scan probing
1294 * to detect them
1295 */
1296 while (table && table->descr) {
1297 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, table->descr[0].dev_id, NULL);
1298 if (unlikely(!pdev)) {
1299 for (i = 0; i < MAX_SOCKET_BUSES; i++)
1300 pcibios_scan_specific_bus(255-i);
1301 }
1302 pci_dev_put(pdev);
1303 table++;
1304 }
1305 }
1306
1307 static unsigned i7core_pci_lastbus(void)
1308 {
1309 int last_bus = 0, bus;
1310 struct pci_bus *b = NULL;
1311
1312 while ((b = pci_find_next_bus(b)) != NULL) {
1313 bus = b->number;
1314 debugf0("Found bus %d\n", bus);
1315 if (bus > last_bus)
1316 last_bus = bus;
1317 }
1318
1319 debugf0("Last bus %d\n", last_bus);
1320
1321 return last_bus;
1322 }
1323
1324 /*
1325 * i7core_get_all_devices Find and perform 'get' operation on the MCH's
1326 * device/functions we want to reference for this driver
1327 *
1328 * Need to 'get' device 16 func 1 and func 2
1329 */
1330 static int i7core_get_onedevice(struct pci_dev **prev,
1331 const struct pci_id_table *table,
1332 const unsigned devno,
1333 const unsigned last_bus)
1334 {
1335 struct i7core_dev *i7core_dev;
1336 const struct pci_id_descr *dev_descr = &table->descr[devno];
1337
1338 struct pci_dev *pdev = NULL;
1339 u8 bus = 0;
1340 u8 socket = 0;
1341
1342 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
1343 dev_descr->dev_id, *prev);
1344
1345 /*
1346 * On Xeon 55xx, the Intel Quckpath Arch Generic Non-core regs
1347 * is at addr 8086:2c40, instead of 8086:2c41. So, we need
1348 * to probe for the alternate address in case of failure
1349 */
1350 if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_I7_NONCORE && !pdev)
1351 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
1352 PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT, *prev);
1353
1354 if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE && !pdev)
1355 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
1356 PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT,
1357 *prev);
1358
1359 if (!pdev) {
1360 if (*prev) {
1361 *prev = pdev;
1362 return 0;
1363 }
1364
1365 if (dev_descr->optional)
1366 return 0;
1367
1368 if (devno == 0)
1369 return -ENODEV;
1370
1371 i7core_printk(KERN_INFO,
1372 "Device not found: dev %02x.%d PCI ID %04x:%04x\n",
1373 dev_descr->dev, dev_descr->func,
1374 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1375
1376 /* End of list, leave */
1377 return -ENODEV;
1378 }
1379 bus = pdev->bus->number;
1380
1381 socket = last_bus - bus;
1382
1383 i7core_dev = get_i7core_dev(socket);
1384 if (!i7core_dev) {
1385 i7core_dev = alloc_i7core_dev(socket, table);
1386 if (!i7core_dev) {
1387 pci_dev_put(pdev);
1388 return -ENOMEM;
1389 }
1390 }
1391
1392 if (i7core_dev->pdev[devno]) {
1393 i7core_printk(KERN_ERR,
1394 "Duplicated device for "
1395 "dev %02x:%02x.%d PCI ID %04x:%04x\n",
1396 bus, dev_descr->dev, dev_descr->func,
1397 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1398 pci_dev_put(pdev);
1399 return -ENODEV;
1400 }
1401
1402 i7core_dev->pdev[devno] = pdev;
1403
1404 /* Sanity check */
1405 if (unlikely(PCI_SLOT(pdev->devfn) != dev_descr->dev ||
1406 PCI_FUNC(pdev->devfn) != dev_descr->func)) {
1407 i7core_printk(KERN_ERR,
1408 "Device PCI ID %04x:%04x "
1409 "has dev %02x:%02x.%d instead of dev %02x:%02x.%d\n",
1410 PCI_VENDOR_ID_INTEL, dev_descr->dev_id,
1411 bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
1412 bus, dev_descr->dev, dev_descr->func);
1413 return -ENODEV;
1414 }
1415
1416 /* Be sure that the device is enabled */
1417 if (unlikely(pci_enable_device(pdev) < 0)) {
1418 i7core_printk(KERN_ERR,
1419 "Couldn't enable "
1420 "dev %02x:%02x.%d PCI ID %04x:%04x\n",
1421 bus, dev_descr->dev, dev_descr->func,
1422 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1423 return -ENODEV;
1424 }
1425
1426 debugf0("Detected socket %d dev %02x:%02x.%d PCI ID %04x:%04x\n",
1427 socket, bus, dev_descr->dev,
1428 dev_descr->func,
1429 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1430
1431 *prev = pdev;
1432
1433 return 0;
1434 }
1435
1436 static int i7core_get_all_devices(void)
1437 {
1438 int i, j, rc, last_bus;
1439 struct pci_dev *pdev = NULL;
1440 const struct pci_id_table *table;
1441
1442 last_bus = i7core_pci_lastbus();
1443
1444 for (j = 0; j < ARRAY_SIZE(pci_dev_table); j++) {
1445 table = &pci_dev_table[j];
1446 for (i = 0; i < table->n_devs; i++) {
1447 pdev = NULL;
1448 do {
1449 rc = i7core_get_onedevice(&pdev, table, i,
1450 last_bus);
1451 if (rc < 0) {
1452 if (i == 0) {
1453 i = table->n_devs;
1454 break;
1455 }
1456 i7core_put_all_devices();
1457 return -ENODEV;
1458 }
1459 } while (pdev);
1460 }
1461 }
1462
1463 return 0;
1464 }
1465
1466 static int mci_bind_devs(struct mem_ctl_info *mci,
1467 struct i7core_dev *i7core_dev)
1468 {
1469 struct i7core_pvt *pvt = mci->pvt_info;
1470 struct pci_dev *pdev;
1471 int i, func, slot;
1472
1473 pvt->is_registered = 0;
1474 for (i = 0; i < i7core_dev->n_devs; i++) {
1475 pdev = i7core_dev->pdev[i];
1476 if (!pdev)
1477 continue;
1478
1479 func = PCI_FUNC(pdev->devfn);
1480 slot = PCI_SLOT(pdev->devfn);
1481 if (slot == 3) {
1482 if (unlikely(func > MAX_MCR_FUNC))
1483 goto error;
1484 pvt->pci_mcr[func] = pdev;
1485 } else if (likely(slot >= 4 && slot < 4 + NUM_CHANS)) {
1486 if (unlikely(func > MAX_CHAN_FUNC))
1487 goto error;
1488 pvt->pci_ch[slot - 4][func] = pdev;
1489 } else if (!slot && !func)
1490 pvt->pci_noncore = pdev;
1491 else
1492 goto error;
1493
1494 debugf0("Associated fn %d.%d, dev = %p, socket %d\n",
1495 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
1496 pdev, i7core_dev->socket);
1497
1498 if (PCI_SLOT(pdev->devfn) == 3 &&
1499 PCI_FUNC(pdev->devfn) == 2)
1500 pvt->is_registered = 1;
1501 }
1502
1503 return 0;
1504
1505 error:
1506 i7core_printk(KERN_ERR, "Device %d, function %d "
1507 "is out of the expected range\n",
1508 slot, func);
1509 return -EINVAL;
1510 }
1511
1512 /****************************************************************************
1513 Error check routines
1514 ****************************************************************************/
1515 static void i7core_rdimm_update_csrow(struct mem_ctl_info *mci,
1516 const int chan,
1517 const int dimm,
1518 const int add)
1519 {
1520 char *msg;
1521 struct i7core_pvt *pvt = mci->pvt_info;
1522 int row = pvt->csrow_map[chan][dimm], i;
1523
1524 for (i = 0; i < add; i++) {
1525 msg = kasprintf(GFP_KERNEL, "Corrected error "
1526 "(Socket=%d channel=%d dimm=%d)",
1527 pvt->i7core_dev->socket, chan, dimm);
1528
1529 edac_mc_handle_fbd_ce(mci, row, 0, msg);
1530 kfree (msg);
1531 }
1532 }
1533
1534 static void i7core_rdimm_update_ce_count(struct mem_ctl_info *mci,
1535 const int chan,
1536 const int new0,
1537 const int new1,
1538 const int new2)
1539 {
1540 struct i7core_pvt *pvt = mci->pvt_info;
1541 int add0 = 0, add1 = 0, add2 = 0;
1542 /* Updates CE counters if it is not the first time here */
1543 if (pvt->ce_count_available) {
1544 /* Updates CE counters */
1545
1546 add2 = new2 - pvt->rdimm_last_ce_count[chan][2];
1547 add1 = new1 - pvt->rdimm_last_ce_count[chan][1];
1548 add0 = new0 - pvt->rdimm_last_ce_count[chan][0];
1549
1550 if (add2 < 0)
1551 add2 += 0x7fff;
1552 pvt->rdimm_ce_count[chan][2] += add2;
1553
1554 if (add1 < 0)
1555 add1 += 0x7fff;
1556 pvt->rdimm_ce_count[chan][1] += add1;
1557
1558 if (add0 < 0)
1559 add0 += 0x7fff;
1560 pvt->rdimm_ce_count[chan][0] += add0;
1561 } else
1562 pvt->ce_count_available = 1;
1563
1564 /* Store the new values */
1565 pvt->rdimm_last_ce_count[chan][2] = new2;
1566 pvt->rdimm_last_ce_count[chan][1] = new1;
1567 pvt->rdimm_last_ce_count[chan][0] = new0;
1568
1569 /*updated the edac core */
1570 if (add0 != 0)
1571 i7core_rdimm_update_csrow(mci, chan, 0, add0);
1572 if (add1 != 0)
1573 i7core_rdimm_update_csrow(mci, chan, 1, add1);
1574 if (add2 != 0)
1575 i7core_rdimm_update_csrow(mci, chan, 2, add2);
1576
1577 }
1578
1579 static void i7core_rdimm_check_mc_ecc_err(struct mem_ctl_info *mci)
1580 {
1581 struct i7core_pvt *pvt = mci->pvt_info;
1582 u32 rcv[3][2];
1583 int i, new0, new1, new2;
1584
1585 /*Read DEV 3: FUN 2: MC_COR_ECC_CNT regs directly*/
1586 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_0,
1587 &rcv[0][0]);
1588 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_1,
1589 &rcv[0][1]);
1590 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_2,
1591 &rcv[1][0]);
1592 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_3,
1593 &rcv[1][1]);
1594 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_4,
1595 &rcv[2][0]);
1596 pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_5,
1597 &rcv[2][1]);
1598 for (i = 0 ; i < 3; i++) {
1599 debugf3("MC_COR_ECC_CNT%d = 0x%x; MC_COR_ECC_CNT%d = 0x%x\n",
1600 (i * 2), rcv[i][0], (i * 2) + 1, rcv[i][1]);
1601 /*if the channel has 3 dimms*/
1602 if (pvt->channel[i].dimms > 2) {
1603 new0 = DIMM_BOT_COR_ERR(rcv[i][0]);
1604 new1 = DIMM_TOP_COR_ERR(rcv[i][0]);
1605 new2 = DIMM_BOT_COR_ERR(rcv[i][1]);
1606 } else {
1607 new0 = DIMM_TOP_COR_ERR(rcv[i][0]) +
1608 DIMM_BOT_COR_ERR(rcv[i][0]);
1609 new1 = DIMM_TOP_COR_ERR(rcv[i][1]) +
1610 DIMM_BOT_COR_ERR(rcv[i][1]);
1611 new2 = 0;
1612 }
1613
1614 i7core_rdimm_update_ce_count(mci, i, new0, new1, new2);
1615 }
1616 }
1617
1618 /* This function is based on the device 3 function 4 registers as described on:
1619 * Intel Xeon Processor 5500 Series Datasheet Volume 2
1620 * http://www.intel.com/Assets/PDF/datasheet/321322.pdf
1621 * also available at:
1622 * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
1623 */
1624 static void i7core_udimm_check_mc_ecc_err(struct mem_ctl_info *mci)
1625 {
1626 struct i7core_pvt *pvt = mci->pvt_info;
1627 u32 rcv1, rcv0;
1628 int new0, new1, new2;
1629
1630 if (!pvt->pci_mcr[4]) {
1631 debugf0("%s MCR registers not found\n", __func__);
1632 return;
1633 }
1634
1635 /* Corrected test errors */
1636 pci_read_config_dword(pvt->pci_mcr[4], MC_TEST_ERR_RCV1, &rcv1);
1637 pci_read_config_dword(pvt->pci_mcr[4], MC_TEST_ERR_RCV0, &rcv0);
1638
1639 /* Store the new values */
1640 new2 = DIMM2_COR_ERR(rcv1);
1641 new1 = DIMM1_COR_ERR(rcv0);
1642 new0 = DIMM0_COR_ERR(rcv0);
1643
1644 /* Updates CE counters if it is not the first time here */
1645 if (pvt->ce_count_available) {
1646 /* Updates CE counters */
1647 int add0, add1, add2;
1648
1649 add2 = new2 - pvt->udimm_last_ce_count[2];
1650 add1 = new1 - pvt->udimm_last_ce_count[1];
1651 add0 = new0 - pvt->udimm_last_ce_count[0];
1652
1653 if (add2 < 0)
1654 add2 += 0x7fff;
1655 pvt->udimm_ce_count[2] += add2;
1656
1657 if (add1 < 0)
1658 add1 += 0x7fff;
1659 pvt->udimm_ce_count[1] += add1;
1660
1661 if (add0 < 0)
1662 add0 += 0x7fff;
1663 pvt->udimm_ce_count[0] += add0;
1664
1665 if (add0 | add1 | add2)
1666 i7core_printk(KERN_ERR, "New Corrected error(s): "
1667 "dimm0: +%d, dimm1: +%d, dimm2 +%d\n",
1668 add0, add1, add2);
1669 } else
1670 pvt->ce_count_available = 1;
1671
1672 /* Store the new values */
1673 pvt->udimm_last_ce_count[2] = new2;
1674 pvt->udimm_last_ce_count[1] = new1;
1675 pvt->udimm_last_ce_count[0] = new0;
1676 }
1677
1678 /*
1679 * According with tables E-11 and E-12 of chapter E.3.3 of Intel 64 and IA-32
1680 * Architectures Software Developer’s Manual Volume 3B.
1681 * Nehalem are defined as family 0x06, model 0x1a
1682 *
1683 * The MCA registers used here are the following ones:
1684 * struct mce field MCA Register
1685 * m->status MSR_IA32_MC8_STATUS
1686 * m->addr MSR_IA32_MC8_ADDR
1687 * m->misc MSR_IA32_MC8_MISC
1688 * In the case of Nehalem, the error information is masked at .status and .misc
1689 * fields
1690 */
1691 static void i7core_mce_output_error(struct mem_ctl_info *mci,
1692 const struct mce *m)
1693 {
1694 struct i7core_pvt *pvt = mci->pvt_info;
1695 char *type, *optype, *err, *msg;
1696 unsigned long error = m->status & 0x1ff0000l;
1697 u32 optypenum = (m->status >> 4) & 0x07;
1698 u32 core_err_cnt = (m->status >> 38) && 0x7fff;
1699 u32 dimm = (m->misc >> 16) & 0x3;
1700 u32 channel = (m->misc >> 18) & 0x3;
1701 u32 syndrome = m->misc >> 32;
1702 u32 errnum = find_first_bit(&error, 32);
1703 int csrow;
1704
1705 if (m->mcgstatus & 1)
1706 type = "FATAL";
1707 else
1708 type = "NON_FATAL";
1709
1710 switch (optypenum) {
1711 case 0:
1712 optype = "generic undef request";
1713 break;
1714 case 1:
1715 optype = "read error";
1716 break;
1717 case 2:
1718 optype = "write error";
1719 break;
1720 case 3:
1721 optype = "addr/cmd error";
1722 break;
1723 case 4:
1724 optype = "scrubbing error";
1725 break;
1726 default:
1727 optype = "reserved";
1728 break;
1729 }
1730
1731 switch (errnum) {
1732 case 16:
1733 err = "read ECC error";
1734 break;
1735 case 17:
1736 err = "RAS ECC error";
1737 break;
1738 case 18:
1739 err = "write parity error";
1740 break;
1741 case 19:
1742 err = "redundacy loss";
1743 break;
1744 case 20:
1745 err = "reserved";
1746 break;
1747 case 21:
1748 err = "memory range error";
1749 break;
1750 case 22:
1751 err = "RTID out of range";
1752 break;
1753 case 23:
1754 err = "address parity error";
1755 break;
1756 case 24:
1757 err = "byte enable parity error";
1758 break;
1759 default:
1760 err = "unknown";
1761 }
1762
1763 /* FIXME: should convert addr into bank and rank information */
1764 msg = kasprintf(GFP_ATOMIC,
1765 "%s (addr = 0x%08llx, cpu=%d, Dimm=%d, Channel=%d, "
1766 "syndrome=0x%08x, count=%d, Err=%08llx:%08llx (%s: %s))\n",
1767 type, (long long) m->addr, m->cpu, dimm, channel,
1768 syndrome, core_err_cnt, (long long)m->status,
1769 (long long)m->misc, optype, err);
1770
1771 debugf0("%s", msg);
1772
1773 csrow = pvt->csrow_map[channel][dimm];
1774
1775 /* Call the helper to output message */
1776 if (m->mcgstatus & 1)
1777 edac_mc_handle_fbd_ue(mci, csrow, 0,
1778 0 /* FIXME: should be channel here */, msg);
1779 else if (!pvt->is_registered)
1780 edac_mc_handle_fbd_ce(mci, csrow,
1781 0 /* FIXME: should be channel here */, msg);
1782
1783 kfree(msg);
1784 }
1785
1786 /*
1787 * i7core_check_error Retrieve and process errors reported by the
1788 * hardware. Called by the Core module.
1789 */
1790 static void i7core_check_error(struct mem_ctl_info *mci)
1791 {
1792 struct i7core_pvt *pvt = mci->pvt_info;
1793 int i;
1794 unsigned count = 0;
1795 struct mce *m;
1796
1797 /*
1798 * MCE first step: Copy all mce errors into a temporary buffer
1799 * We use a double buffering here, to reduce the risk of
1800 * loosing an error.
1801 */
1802 smp_rmb();
1803 count = (pvt->mce_out + MCE_LOG_LEN - pvt->mce_in)
1804 % MCE_LOG_LEN;
1805 if (!count)
1806 goto check_ce_error;
1807
1808 m = pvt->mce_outentry;
1809 if (pvt->mce_in + count > MCE_LOG_LEN) {
1810 unsigned l = MCE_LOG_LEN - pvt->mce_in;
1811
1812 memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * l);
1813 smp_wmb();
1814 pvt->mce_in = 0;
1815 count -= l;
1816 m += l;
1817 }
1818 memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * count);
1819 smp_wmb();
1820 pvt->mce_in += count;
1821
1822 smp_rmb();
1823 if (pvt->mce_overrun) {
1824 i7core_printk(KERN_ERR, "Lost %d memory errors\n",
1825 pvt->mce_overrun);
1826 smp_wmb();
1827 pvt->mce_overrun = 0;
1828 }
1829
1830 /*
1831 * MCE second step: parse errors and display
1832 */
1833 for (i = 0; i < count; i++)
1834 i7core_mce_output_error(mci, &pvt->mce_outentry[i]);
1835
1836 /*
1837 * Now, let's increment CE error counts
1838 */
1839 check_ce_error:
1840 if (!pvt->is_registered)
1841 i7core_udimm_check_mc_ecc_err(mci);
1842 else
1843 i7core_rdimm_check_mc_ecc_err(mci);
1844 }
1845
1846 /*
1847 * i7core_mce_check_error Replicates mcelog routine to get errors
1848 * This routine simply queues mcelog errors, and
1849 * return. The error itself should be handled later
1850 * by i7core_check_error.
1851 * WARNING: As this routine should be called at NMI time, extra care should
1852 * be taken to avoid deadlocks, and to be as fast as possible.
1853 */
1854 static int i7core_mce_check_error(void *priv, struct mce *mce)
1855 {
1856 struct mem_ctl_info *mci = priv;
1857 struct i7core_pvt *pvt = mci->pvt_info;
1858
1859 /*
1860 * Just let mcelog handle it if the error is
1861 * outside the memory controller
1862 */
1863 if (((mce->status & 0xffff) >> 7) != 1)
1864 return 0;
1865
1866 /* Bank 8 registers are the only ones that we know how to handle */
1867 if (mce->bank != 8)
1868 return 0;
1869
1870 #ifdef CONFIG_SMP
1871 /* Only handle if it is the right mc controller */
1872 if (cpu_data(mce->cpu).phys_proc_id != pvt->i7core_dev->socket)
1873 return 0;
1874 #endif
1875
1876 smp_rmb();
1877 if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) {
1878 smp_wmb();
1879 pvt->mce_overrun++;
1880 return 0;
1881 }
1882
1883 /* Copy memory error at the ringbuffer */
1884 memcpy(&pvt->mce_entry[pvt->mce_out], mce, sizeof(*mce));
1885 smp_wmb();
1886 pvt->mce_out = (pvt->mce_out + 1) % MCE_LOG_LEN;
1887
1888 /* Handle fatal errors immediately */
1889 if (mce->mcgstatus & 1)
1890 i7core_check_error(mci);
1891
1892 /* Advice mcelog that the error were handled */
1893 return 1;
1894 }
1895
1896 static void i7core_pci_ctl_create(struct i7core_pvt *pvt)
1897 {
1898 pvt->i7core_pci = edac_pci_create_generic_ctl(
1899 &pvt->i7core_dev->pdev[0]->dev,
1900 EDAC_MOD_STR);
1901 if (unlikely(!pvt->i7core_pci))
1902 pr_warn("Unable to setup PCI error report via EDAC\n");
1903 }
1904
1905 static void i7core_pci_ctl_release(struct i7core_pvt *pvt)
1906 {
1907 if (likely(pvt->i7core_pci))
1908 edac_pci_release_generic_ctl(pvt->i7core_pci);
1909 else
1910 i7core_printk(KERN_ERR,
1911 "Couldn't find mem_ctl_info for socket %d\n",
1912 pvt->i7core_dev->socket);
1913 pvt->i7core_pci = NULL;
1914 }
1915
1916 static void i7core_unregister_mci(struct i7core_dev *i7core_dev)
1917 {
1918 struct mem_ctl_info *mci = i7core_dev->mci;
1919 struct i7core_pvt *pvt;
1920
1921 if (unlikely(!mci || !mci->pvt_info)) {
1922 debugf0("MC: " __FILE__ ": %s(): dev = %p\n",
1923 __func__, &i7core_dev->pdev[0]->dev);
1924
1925 i7core_printk(KERN_ERR, "Couldn't find mci handler\n");
1926 return;
1927 }
1928
1929 pvt = mci->pvt_info;
1930
1931 debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n",
1932 __func__, mci, &i7core_dev->pdev[0]->dev);
1933
1934 /* Disable MCE NMI handler */
1935 edac_mce_unregister(&pvt->edac_mce);
1936
1937 /* Disable EDAC polling */
1938 i7core_pci_ctl_release(pvt);
1939
1940 /* Remove MC sysfs nodes */
1941 edac_mc_del_mc(mci->dev);
1942
1943 debugf1("%s: free mci struct\n", mci->ctl_name);
1944 kfree(mci->ctl_name);
1945 edac_mc_free(mci);
1946 i7core_dev->mci = NULL;
1947 }
1948
1949 static int i7core_register_mci(struct i7core_dev *i7core_dev)
1950 {
1951 struct mem_ctl_info *mci;
1952 struct i7core_pvt *pvt;
1953 int rc, channels, csrows;
1954
1955 /* Check the number of active and not disabled channels */
1956 rc = i7core_get_active_channels(i7core_dev->socket, &channels, &csrows);
1957 if (unlikely(rc < 0))
1958 return rc;
1959
1960 /* allocate a new MC control structure */
1961 mci = edac_mc_alloc(sizeof(*pvt), csrows, channels, i7core_dev->socket);
1962 if (unlikely(!mci))
1963 return -ENOMEM;
1964
1965 debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n",
1966 __func__, mci, &i7core_dev->pdev[0]->dev);
1967
1968 pvt = mci->pvt_info;
1969 memset(pvt, 0, sizeof(*pvt));
1970
1971 /*
1972 * FIXME: how to handle RDDR3 at MCI level? It is possible to have
1973 * Mixed RDDR3/UDDR3 with Nehalem, provided that they are on different
1974 * memory channels
1975 */
1976 mci->mtype_cap = MEM_FLAG_DDR3;
1977 mci->edac_ctl_cap = EDAC_FLAG_NONE;
1978 mci->edac_cap = EDAC_FLAG_NONE;
1979 mci->mod_name = "i7core_edac.c";
1980 mci->mod_ver = I7CORE_REVISION;
1981 mci->ctl_name = kasprintf(GFP_KERNEL, "i7 core #%d",
1982 i7core_dev->socket);
1983 mci->dev_name = pci_name(i7core_dev->pdev[0]);
1984 mci->ctl_page_to_phys = NULL;
1985
1986 /* Store pci devices at mci for faster access */
1987 rc = mci_bind_devs(mci, i7core_dev);
1988 if (unlikely(rc < 0))
1989 goto fail0;
1990
1991 if (pvt->is_registered)
1992 mci->mc_driver_sysfs_attributes = i7core_sysfs_rdimm_attrs;
1993 else
1994 mci->mc_driver_sysfs_attributes = i7core_sysfs_udimm_attrs;
1995
1996 /* Get dimm basic config */
1997 get_dimm_config(mci);
1998 /* record ptr to the generic device */
1999 mci->dev = &i7core_dev->pdev[0]->dev;
2000 /* Set the function pointer to an actual operation function */
2001 mci->edac_check = i7core_check_error;
2002
2003 /* add this new MC control structure to EDAC's list of MCs */
2004 if (unlikely(edac_mc_add_mc(mci))) {
2005 debugf0("MC: " __FILE__
2006 ": %s(): failed edac_mc_add_mc()\n", __func__);
2007 /* FIXME: perhaps some code should go here that disables error
2008 * reporting if we just enabled it
2009 */
2010
2011 rc = -EINVAL;
2012 goto fail0;
2013 }
2014
2015 /* Default error mask is any memory */
2016 pvt->inject.channel = 0;
2017 pvt->inject.dimm = -1;
2018 pvt->inject.rank = -1;
2019 pvt->inject.bank = -1;
2020 pvt->inject.page = -1;
2021 pvt->inject.col = -1;
2022
2023 /* allocating generic PCI control info */
2024 i7core_pci_ctl_create(pvt);
2025
2026 /* Registers on edac_mce in order to receive memory errors */
2027 pvt->edac_mce.priv = mci;
2028 pvt->edac_mce.check_error = i7core_mce_check_error;
2029 rc = edac_mce_register(&pvt->edac_mce);
2030 if (unlikely(rc < 0)) {
2031 debugf0("MC: " __FILE__
2032 ": %s(): failed edac_mce_register()\n", __func__);
2033 goto fail1;
2034 }
2035
2036 /* Associates i7core_dev and mci for future usage */
2037 pvt->i7core_dev = i7core_dev;
2038 i7core_dev->mci = mci;
2039
2040 return 0;
2041
2042 fail1:
2043 i7core_pci_ctl_release(pvt);
2044 edac_mc_del_mc(mci->dev);
2045 fail0:
2046 kfree(mci->ctl_name);
2047 edac_mc_free(mci);
2048 i7core_dev->mci = NULL;
2049 return rc;
2050 }
2051
2052 /*
2053 * i7core_probe Probe for ONE instance of device to see if it is
2054 * present.
2055 * return:
2056 * 0 for FOUND a device
2057 * < 0 for error code
2058 */
2059
2060 static int __devinit i7core_probe(struct pci_dev *pdev,
2061 const struct pci_device_id *id)
2062 {
2063 int rc;
2064 struct i7core_dev *i7core_dev;
2065
2066 /* get the pci devices we want to reserve for our use */
2067 mutex_lock(&i7core_edac_lock);
2068
2069 /*
2070 * All memory controllers are allocated at the first pass.
2071 */
2072 if (unlikely(probed >= 1)) {
2073 mutex_unlock(&i7core_edac_lock);
2074 return -EINVAL;
2075 }
2076 probed++;
2077
2078 rc = i7core_get_all_devices();
2079 if (unlikely(rc < 0))
2080 goto fail0;
2081
2082 list_for_each_entry(i7core_dev, &i7core_edac_list, list) {
2083 rc = i7core_register_mci(i7core_dev);
2084 if (unlikely(rc < 0))
2085 goto fail1;
2086 }
2087
2088 i7core_printk(KERN_INFO, "Driver loaded.\n");
2089
2090 mutex_unlock(&i7core_edac_lock);
2091 return 0;
2092
2093 fail1:
2094 list_for_each_entry(i7core_dev, &i7core_edac_list, list) {
2095 if (i7core_dev->mci)
2096 i7core_unregister_mci(i7core_dev);
2097 }
2098 i7core_put_all_devices();
2099 fail0:
2100 mutex_unlock(&i7core_edac_lock);
2101 return rc;
2102 }
2103
2104 /*
2105 * i7core_remove destructor for one instance of device
2106 *
2107 */
2108 static void __devexit i7core_remove(struct pci_dev *pdev)
2109 {
2110 struct i7core_dev *i7core_dev;
2111
2112 debugf0(__FILE__ ": %s()\n", __func__);
2113
2114 /*
2115 * we have a trouble here: pdev value for removal will be wrong, since
2116 * it will point to the X58 register used to detect that the machine
2117 * is a Nehalem or upper design. However, due to the way several PCI
2118 * devices are grouped together to provide MC functionality, we need
2119 * to use a different method for releasing the devices
2120 */
2121
2122 mutex_lock(&i7core_edac_lock);
2123
2124 if (unlikely(!probed)) {
2125 mutex_unlock(&i7core_edac_lock);
2126 return;
2127 }
2128
2129 list_for_each_entry(i7core_dev, &i7core_edac_list, list) {
2130 if (i7core_dev->mci)
2131 i7core_unregister_mci(i7core_dev);
2132 }
2133
2134 /* Release PCI resources */
2135 i7core_put_all_devices();
2136
2137 probed--;
2138
2139 mutex_unlock(&i7core_edac_lock);
2140 }
2141
2142 MODULE_DEVICE_TABLE(pci, i7core_pci_tbl);
2143
2144 /*
2145 * i7core_driver pci_driver structure for this module
2146 *
2147 */
2148 static struct pci_driver i7core_driver = {
2149 .name = "i7core_edac",
2150 .probe = i7core_probe,
2151 .remove = __devexit_p(i7core_remove),
2152 .id_table = i7core_pci_tbl,
2153 };
2154
2155 /*
2156 * i7core_init Module entry function
2157 * Try to initialize this module for its devices
2158 */
2159 static int __init i7core_init(void)
2160 {
2161 int pci_rc;
2162
2163 debugf2("MC: " __FILE__ ": %s()\n", __func__);
2164
2165 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
2166 opstate_init();
2167
2168 if (use_pci_fixup)
2169 i7core_xeon_pci_fixup(pci_dev_table);
2170
2171 pci_rc = pci_register_driver(&i7core_driver);
2172
2173 if (pci_rc >= 0)
2174 return 0;
2175
2176 i7core_printk(KERN_ERR, "Failed to register device with error %d.\n",
2177 pci_rc);
2178
2179 return pci_rc;
2180 }
2181
2182 /*
2183 * i7core_exit() Module exit function
2184 * Unregister the driver
2185 */
2186 static void __exit i7core_exit(void)
2187 {
2188 debugf2("MC: " __FILE__ ": %s()\n", __func__);
2189 pci_unregister_driver(&i7core_driver);
2190 }
2191
2192 module_init(i7core_init);
2193 module_exit(i7core_exit);
2194
2195 MODULE_LICENSE("GPL");
2196 MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
2197 MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
2198 MODULE_DESCRIPTION("MC Driver for Intel i7 Core memory controllers - "
2199 I7CORE_REVISION);
2200
2201 module_param(edac_op_state, int, 0444);
2202 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
This page took 0.171097 seconds and 4 git commands to generate.