Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[deliverable/linux.git] / drivers / edac / sb_edac.c
1 /* Intel Sandy Bridge -EN/-EP/-EX Memory Controller kernel module
2 *
3 * This driver supports the memory controllers found on the Intel
4 * processor family Sandy Bridge.
5 *
6 * This file may be distributed under the terms of the
7 * GNU General Public License version 2 only.
8 *
9 * Copyright (c) 2011 by:
10 * Mauro Carvalho Chehab
11 */
12
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/pci.h>
16 #include <linux/pci_ids.h>
17 #include <linux/slab.h>
18 #include <linux/delay.h>
19 #include <linux/edac.h>
20 #include <linux/mmzone.h>
21 #include <linux/smp.h>
22 #include <linux/bitmap.h>
23 #include <linux/math64.h>
24 #include <asm/processor.h>
25 #include <asm/mce.h>
26
27 #include "edac_core.h"
28
29 /* Static vars */
30 static LIST_HEAD(sbridge_edac_list);
31 static DEFINE_MUTEX(sbridge_edac_lock);
32 static int probed;
33
34 /*
35 * Alter this version for the module when modifications are made
36 */
37 #define SBRIDGE_REVISION " Ver: 1.1.0 "
38 #define EDAC_MOD_STR "sbridge_edac"
39
40 /*
41 * Debug macros
42 */
43 #define sbridge_printk(level, fmt, arg...) \
44 edac_printk(level, "sbridge", fmt, ##arg)
45
46 #define sbridge_mc_printk(mci, level, fmt, arg...) \
47 edac_mc_chipset_printk(mci, level, "sbridge", fmt, ##arg)
48
49 /*
50 * Get a bit field at register value <v>, from bit <lo> to bit <hi>
51 */
52 #define GET_BITFIELD(v, lo, hi) \
53 (((v) & GENMASK_ULL(hi, lo)) >> (lo))
54
55 /*
56 * sbridge Memory Controller Registers
57 */
58
59 /*
60 * FIXME: For now, let's order by device function, as it makes
61 * easier for driver's development process. This table should be
62 * moved to pci_id.h when submitted upstream
63 */
64 #define PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0 0x3cf4 /* 12.6 */
65 #define PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1 0x3cf6 /* 12.7 */
66 #define PCI_DEVICE_ID_INTEL_SBRIDGE_BR 0x3cf5 /* 13.6 */
67 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0 0x3ca0 /* 14.0 */
68 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA 0x3ca8 /* 15.0 */
69 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS 0x3c71 /* 15.1 */
70 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0 0x3caa /* 15.2 */
71 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1 0x3cab /* 15.3 */
72 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2 0x3cac /* 15.4 */
73 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3 0x3cad /* 15.5 */
74 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO 0x3cb8 /* 17.0 */
75
76 /*
77 * Currently, unused, but will be needed in the future
78 * implementations, as they hold the error counters
79 */
80 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR0 0x3c72 /* 16.2 */
81 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR1 0x3c73 /* 16.3 */
82 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR2 0x3c76 /* 16.6 */
83 #define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR3 0x3c77 /* 16.7 */
84
85 /* Devices 12 Function 6, Offsets 0x80 to 0xcc */
86 static const u32 sbridge_dram_rule[] = {
87 0x80, 0x88, 0x90, 0x98, 0xa0,
88 0xa8, 0xb0, 0xb8, 0xc0, 0xc8,
89 };
90
91 static const u32 ibridge_dram_rule[] = {
92 0x60, 0x68, 0x70, 0x78, 0x80,
93 0x88, 0x90, 0x98, 0xa0, 0xa8,
94 0xb0, 0xb8, 0xc0, 0xc8, 0xd0,
95 0xd8, 0xe0, 0xe8, 0xf0, 0xf8,
96 };
97
98 #define SAD_LIMIT(reg) ((GET_BITFIELD(reg, 6, 25) << 26) | 0x3ffffff)
99 #define DRAM_ATTR(reg) GET_BITFIELD(reg, 2, 3)
100 #define INTERLEAVE_MODE(reg) GET_BITFIELD(reg, 1, 1)
101 #define DRAM_RULE_ENABLE(reg) GET_BITFIELD(reg, 0, 0)
102 #define A7MODE(reg) GET_BITFIELD(reg, 26, 26)
103
104 static char *get_dram_attr(u32 reg)
105 {
106 switch(DRAM_ATTR(reg)) {
107 case 0:
108 return "DRAM";
109 case 1:
110 return "MMCFG";
111 case 2:
112 return "NXM";
113 default:
114 return "unknown";
115 }
116 }
117
118 static const u32 sbridge_interleave_list[] = {
119 0x84, 0x8c, 0x94, 0x9c, 0xa4,
120 0xac, 0xb4, 0xbc, 0xc4, 0xcc,
121 };
122
123 static const u32 ibridge_interleave_list[] = {
124 0x64, 0x6c, 0x74, 0x7c, 0x84,
125 0x8c, 0x94, 0x9c, 0xa4, 0xac,
126 0xb4, 0xbc, 0xc4, 0xcc, 0xd4,
127 0xdc, 0xe4, 0xec, 0xf4, 0xfc,
128 };
129
130 struct interleave_pkg {
131 unsigned char start;
132 unsigned char end;
133 };
134
135 static const struct interleave_pkg sbridge_interleave_pkg[] = {
136 { 0, 2 },
137 { 3, 5 },
138 { 8, 10 },
139 { 11, 13 },
140 { 16, 18 },
141 { 19, 21 },
142 { 24, 26 },
143 { 27, 29 },
144 };
145
146 static const struct interleave_pkg ibridge_interleave_pkg[] = {
147 { 0, 3 },
148 { 4, 7 },
149 { 8, 11 },
150 { 12, 15 },
151 { 16, 19 },
152 { 20, 23 },
153 { 24, 27 },
154 { 28, 31 },
155 };
156
157 static inline int sad_pkg(const struct interleave_pkg *table, u32 reg,
158 int interleave)
159 {
160 return GET_BITFIELD(reg, table[interleave].start,
161 table[interleave].end);
162 }
163
164 /* Devices 12 Function 7 */
165
166 #define TOLM 0x80
167 #define TOHM 0x84
168 #define HASWELL_TOHM_0 0xd4
169 #define HASWELL_TOHM_1 0xd8
170
171 #define GET_TOLM(reg) ((GET_BITFIELD(reg, 0, 3) << 28) | 0x3ffffff)
172 #define GET_TOHM(reg) ((GET_BITFIELD(reg, 0, 20) << 25) | 0x3ffffff)
173
174 /* Device 13 Function 6 */
175
176 #define SAD_TARGET 0xf0
177
178 #define SOURCE_ID(reg) GET_BITFIELD(reg, 9, 11)
179
180 #define SAD_CONTROL 0xf4
181
182 /* Device 14 function 0 */
183
184 static const u32 tad_dram_rule[] = {
185 0x40, 0x44, 0x48, 0x4c,
186 0x50, 0x54, 0x58, 0x5c,
187 0x60, 0x64, 0x68, 0x6c,
188 };
189 #define MAX_TAD ARRAY_SIZE(tad_dram_rule)
190
191 #define TAD_LIMIT(reg) ((GET_BITFIELD(reg, 12, 31) << 26) | 0x3ffffff)
192 #define TAD_SOCK(reg) GET_BITFIELD(reg, 10, 11)
193 #define TAD_CH(reg) GET_BITFIELD(reg, 8, 9)
194 #define TAD_TGT3(reg) GET_BITFIELD(reg, 6, 7)
195 #define TAD_TGT2(reg) GET_BITFIELD(reg, 4, 5)
196 #define TAD_TGT1(reg) GET_BITFIELD(reg, 2, 3)
197 #define TAD_TGT0(reg) GET_BITFIELD(reg, 0, 1)
198
199 /* Device 15, function 0 */
200
201 #define MCMTR 0x7c
202
203 #define IS_ECC_ENABLED(mcmtr) GET_BITFIELD(mcmtr, 2, 2)
204 #define IS_LOCKSTEP_ENABLED(mcmtr) GET_BITFIELD(mcmtr, 1, 1)
205 #define IS_CLOSE_PG(mcmtr) GET_BITFIELD(mcmtr, 0, 0)
206
207 /* Device 15, function 1 */
208
209 #define RASENABLES 0xac
210 #define IS_MIRROR_ENABLED(reg) GET_BITFIELD(reg, 0, 0)
211
212 /* Device 15, functions 2-5 */
213
214 static const int mtr_regs[] = {
215 0x80, 0x84, 0x88,
216 };
217
218 #define RANK_DISABLE(mtr) GET_BITFIELD(mtr, 16, 19)
219 #define IS_DIMM_PRESENT(mtr) GET_BITFIELD(mtr, 14, 14)
220 #define RANK_CNT_BITS(mtr) GET_BITFIELD(mtr, 12, 13)
221 #define RANK_WIDTH_BITS(mtr) GET_BITFIELD(mtr, 2, 4)
222 #define COL_WIDTH_BITS(mtr) GET_BITFIELD(mtr, 0, 1)
223
224 static const u32 tad_ch_nilv_offset[] = {
225 0x90, 0x94, 0x98, 0x9c,
226 0xa0, 0xa4, 0xa8, 0xac,
227 0xb0, 0xb4, 0xb8, 0xbc,
228 };
229 #define CHN_IDX_OFFSET(reg) GET_BITFIELD(reg, 28, 29)
230 #define TAD_OFFSET(reg) (GET_BITFIELD(reg, 6, 25) << 26)
231
232 static const u32 rir_way_limit[] = {
233 0x108, 0x10c, 0x110, 0x114, 0x118,
234 };
235 #define MAX_RIR_RANGES ARRAY_SIZE(rir_way_limit)
236
237 #define IS_RIR_VALID(reg) GET_BITFIELD(reg, 31, 31)
238 #define RIR_WAY(reg) GET_BITFIELD(reg, 28, 29)
239
240 #define MAX_RIR_WAY 8
241
242 static const u32 rir_offset[MAX_RIR_RANGES][MAX_RIR_WAY] = {
243 { 0x120, 0x124, 0x128, 0x12c, 0x130, 0x134, 0x138, 0x13c },
244 { 0x140, 0x144, 0x148, 0x14c, 0x150, 0x154, 0x158, 0x15c },
245 { 0x160, 0x164, 0x168, 0x16c, 0x170, 0x174, 0x178, 0x17c },
246 { 0x180, 0x184, 0x188, 0x18c, 0x190, 0x194, 0x198, 0x19c },
247 { 0x1a0, 0x1a4, 0x1a8, 0x1ac, 0x1b0, 0x1b4, 0x1b8, 0x1bc },
248 };
249
250 #define RIR_RNK_TGT(reg) GET_BITFIELD(reg, 16, 19)
251 #define RIR_OFFSET(reg) GET_BITFIELD(reg, 2, 14)
252
253 /* Device 16, functions 2-7 */
254
255 /*
256 * FIXME: Implement the error count reads directly
257 */
258
259 static const u32 correrrcnt[] = {
260 0x104, 0x108, 0x10c, 0x110,
261 };
262
263 #define RANK_ODD_OV(reg) GET_BITFIELD(reg, 31, 31)
264 #define RANK_ODD_ERR_CNT(reg) GET_BITFIELD(reg, 16, 30)
265 #define RANK_EVEN_OV(reg) GET_BITFIELD(reg, 15, 15)
266 #define RANK_EVEN_ERR_CNT(reg) GET_BITFIELD(reg, 0, 14)
267
268 static const u32 correrrthrsld[] = {
269 0x11c, 0x120, 0x124, 0x128,
270 };
271
272 #define RANK_ODD_ERR_THRSLD(reg) GET_BITFIELD(reg, 16, 30)
273 #define RANK_EVEN_ERR_THRSLD(reg) GET_BITFIELD(reg, 0, 14)
274
275
276 /* Device 17, function 0 */
277
278 #define SB_RANK_CFG_A 0x0328
279
280 #define IB_RANK_CFG_A 0x0320
281
282 /*
283 * sbridge structs
284 */
285
286 #define NUM_CHANNELS 4
287 #define MAX_DIMMS 3 /* Max DIMMS per channel */
288
289 enum type {
290 SANDY_BRIDGE,
291 IVY_BRIDGE,
292 HASWELL,
293 };
294
295 struct sbridge_pvt;
296 struct sbridge_info {
297 enum type type;
298 u32 mcmtr;
299 u32 rankcfgr;
300 u64 (*get_tolm)(struct sbridge_pvt *pvt);
301 u64 (*get_tohm)(struct sbridge_pvt *pvt);
302 u64 (*rir_limit)(u32 reg);
303 const u32 *dram_rule;
304 const u32 *interleave_list;
305 const struct interleave_pkg *interleave_pkg;
306 u8 max_sad;
307 u8 max_interleave;
308 u8 (*get_node_id)(struct sbridge_pvt *pvt);
309 enum mem_type (*get_memory_type)(struct sbridge_pvt *pvt);
310 struct pci_dev *pci_vtd;
311 };
312
313 struct sbridge_channel {
314 u32 ranks;
315 u32 dimms;
316 };
317
318 struct pci_id_descr {
319 int dev_id;
320 int optional;
321 };
322
323 struct pci_id_table {
324 const struct pci_id_descr *descr;
325 int n_devs;
326 };
327
328 struct sbridge_dev {
329 struct list_head list;
330 u8 bus, mc;
331 u8 node_id, source_id;
332 struct pci_dev **pdev;
333 int n_devs;
334 struct mem_ctl_info *mci;
335 };
336
337 struct sbridge_pvt {
338 struct pci_dev *pci_ta, *pci_ddrio, *pci_ras;
339 struct pci_dev *pci_sad0, *pci_sad1;
340 struct pci_dev *pci_ha0, *pci_ha1;
341 struct pci_dev *pci_br0, *pci_br1;
342 struct pci_dev *pci_ha1_ta;
343 struct pci_dev *pci_tad[NUM_CHANNELS];
344
345 struct sbridge_dev *sbridge_dev;
346
347 struct sbridge_info info;
348 struct sbridge_channel channel[NUM_CHANNELS];
349
350 /* Memory type detection */
351 bool is_mirrored, is_lockstep, is_close_pg;
352
353 /* Fifo double buffers */
354 struct mce mce_entry[MCE_LOG_LEN];
355 struct mce mce_outentry[MCE_LOG_LEN];
356
357 /* Fifo in/out counters */
358 unsigned mce_in, mce_out;
359
360 /* Count indicator to show errors not got */
361 unsigned mce_overrun;
362
363 /* Memory description */
364 u64 tolm, tohm;
365 };
366
367 #define PCI_DESCR(device_id, opt) \
368 .dev_id = (device_id), \
369 .optional = opt
370
371 static const struct pci_id_descr pci_dev_descr_sbridge[] = {
372 /* Processor Home Agent */
373 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0, 0) },
374
375 /* Memory controller */
376 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA, 0) },
377 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS, 0) },
378 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0, 0) },
379 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1, 0) },
380 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2, 0) },
381 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3, 0) },
382 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO, 1) },
383
384 /* System Address Decoder */
385 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0, 0) },
386 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1, 0) },
387
388 /* Broadcast Registers */
389 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_BR, 0) },
390 };
391
392 #define PCI_ID_TABLE_ENTRY(A) { .descr=A, .n_devs = ARRAY_SIZE(A) }
393 static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
394 PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge),
395 {0,} /* 0 terminated list. */
396 };
397
398 /* This changes depending if 1HA or 2HA:
399 * 1HA:
400 * 0x0eb8 (17.0) is DDRIO0
401 * 2HA:
402 * 0x0ebc (17.4) is DDRIO0
403 */
404 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0 0x0eb8
405 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0 0x0ebc
406
407 /* pci ids */
408 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0 0x0ea0
409 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA 0x0ea8
410 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS 0x0e71
411 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0 0x0eaa
412 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1 0x0eab
413 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2 0x0eac
414 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3 0x0ead
415 #define PCI_DEVICE_ID_INTEL_IBRIDGE_SAD 0x0ec8
416 #define PCI_DEVICE_ID_INTEL_IBRIDGE_BR0 0x0ec9
417 #define PCI_DEVICE_ID_INTEL_IBRIDGE_BR1 0x0eca
418 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1 0x0e60
419 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA 0x0e68
420 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS 0x0e79
421 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0 0x0e6a
422 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1 0x0e6b
423
424 static const struct pci_id_descr pci_dev_descr_ibridge[] = {
425 /* Processor Home Agent */
426 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0, 0) },
427
428 /* Memory controller */
429 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA, 0) },
430 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS, 0) },
431 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0, 0) },
432 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1, 0) },
433 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2, 0) },
434 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3, 0) },
435
436 /* System Address Decoder */
437 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_SAD, 0) },
438
439 /* Broadcast Registers */
440 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_BR0, 1) },
441 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_BR1, 0) },
442
443 /* Optional, mode 2HA */
444 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1, 1) },
445 #if 0
446 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA, 1) },
447 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS, 1) },
448 #endif
449 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0, 1) },
450 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1, 1) },
451
452 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0, 1) },
453 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0, 1) },
454 };
455
456 static const struct pci_id_table pci_dev_descr_ibridge_table[] = {
457 PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge),
458 {0,} /* 0 terminated list. */
459 };
460
461 /* Haswell support */
462 /* EN processor:
463 * - 1 IMC
464 * - 3 DDR3 channels, 2 DPC per channel
465 * EP processor:
466 * - 1 or 2 IMC
467 * - 4 DDR4 channels, 3 DPC per channel
468 * EP 4S processor:
469 * - 2 IMC
470 * - 4 DDR4 channels, 3 DPC per channel
471 * EX processor:
472 * - 2 IMC
473 * - each IMC interfaces with a SMI 2 channel
474 * - each SMI channel interfaces with a scalable memory buffer
475 * - each scalable memory buffer supports 4 DDR3/DDR4 channels, 3 DPC
476 */
477 #define HASWELL_DDRCRCLKCONTROLS 0xa10
478 #define HASWELL_HASYSDEFEATURE2 0x84
479 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_VTD_MISC 0x2f28
480 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0 0x2fa0
481 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1 0x2f60
482 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA 0x2fa8
483 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_THERMAL 0x2f71
484 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA 0x2f68
485 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_THERMAL 0x2f79
486 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0 0x2ffc
487 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1 0x2ffd
488 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0 0x2faa
489 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1 0x2fab
490 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2 0x2fac
491 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3 0x2fad
492 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0 0x2f6a
493 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1 0x2f6b
494 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2 0x2f6c
495 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3 0x2f6d
496 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0 0x2fbd
497 static const struct pci_id_descr pci_dev_descr_haswell[] = {
498 /* first item must be the HA */
499 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0, 0) },
500
501 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0, 0) },
502 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1, 0) },
503
504 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1, 1) },
505
506 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA, 0) },
507 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_THERMAL, 0) },
508 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0, 0) },
509 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1, 0) },
510 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2, 1) },
511 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3, 1) },
512
513 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0, 1) },
514
515 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA, 1) },
516 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_THERMAL, 1) },
517 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0, 1) },
518 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1, 1) },
519 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2, 1) },
520 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3, 1) },
521 };
522
523 static const struct pci_id_table pci_dev_descr_haswell_table[] = {
524 PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell),
525 {0,} /* 0 terminated list. */
526 };
527
528 /*
529 * pci_device_id table for which devices we are looking for
530 */
531 static const struct pci_device_id sbridge_pci_tbl[] = {
532 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA)},
533 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA)},
534 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0)},
535 {0,} /* 0 terminated list. */
536 };
537
538
539 /****************************************************************************
540 Ancillary status routines
541 ****************************************************************************/
542
543 static inline int numrank(enum type type, u32 mtr)
544 {
545 int ranks = (1 << RANK_CNT_BITS(mtr));
546 int max = 4;
547
548 if (type == HASWELL)
549 max = 8;
550
551 if (ranks > max) {
552 edac_dbg(0, "Invalid number of ranks: %d (max = %i) raw value = %x (%04x)\n",
553 ranks, max, (unsigned int)RANK_CNT_BITS(mtr), mtr);
554 return -EINVAL;
555 }
556
557 return ranks;
558 }
559
560 static inline int numrow(u32 mtr)
561 {
562 int rows = (RANK_WIDTH_BITS(mtr) + 12);
563
564 if (rows < 13 || rows > 18) {
565 edac_dbg(0, "Invalid number of rows: %d (should be between 14 and 17) raw value = %x (%04x)\n",
566 rows, (unsigned int)RANK_WIDTH_BITS(mtr), mtr);
567 return -EINVAL;
568 }
569
570 return 1 << rows;
571 }
572
573 static inline int numcol(u32 mtr)
574 {
575 int cols = (COL_WIDTH_BITS(mtr) + 10);
576
577 if (cols > 12) {
578 edac_dbg(0, "Invalid number of cols: %d (max = 4) raw value = %x (%04x)\n",
579 cols, (unsigned int)COL_WIDTH_BITS(mtr), mtr);
580 return -EINVAL;
581 }
582
583 return 1 << cols;
584 }
585
586 static struct sbridge_dev *get_sbridge_dev(u8 bus)
587 {
588 struct sbridge_dev *sbridge_dev;
589
590 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
591 if (sbridge_dev->bus == bus)
592 return sbridge_dev;
593 }
594
595 return NULL;
596 }
597
598 static struct sbridge_dev *alloc_sbridge_dev(u8 bus,
599 const struct pci_id_table *table)
600 {
601 struct sbridge_dev *sbridge_dev;
602
603 sbridge_dev = kzalloc(sizeof(*sbridge_dev), GFP_KERNEL);
604 if (!sbridge_dev)
605 return NULL;
606
607 sbridge_dev->pdev = kzalloc(sizeof(*sbridge_dev->pdev) * table->n_devs,
608 GFP_KERNEL);
609 if (!sbridge_dev->pdev) {
610 kfree(sbridge_dev);
611 return NULL;
612 }
613
614 sbridge_dev->bus = bus;
615 sbridge_dev->n_devs = table->n_devs;
616 list_add_tail(&sbridge_dev->list, &sbridge_edac_list);
617
618 return sbridge_dev;
619 }
620
621 static void free_sbridge_dev(struct sbridge_dev *sbridge_dev)
622 {
623 list_del(&sbridge_dev->list);
624 kfree(sbridge_dev->pdev);
625 kfree(sbridge_dev);
626 }
627
628 static u64 sbridge_get_tolm(struct sbridge_pvt *pvt)
629 {
630 u32 reg;
631
632 /* Address range is 32:28 */
633 pci_read_config_dword(pvt->pci_sad1, TOLM, &reg);
634 return GET_TOLM(reg);
635 }
636
637 static u64 sbridge_get_tohm(struct sbridge_pvt *pvt)
638 {
639 u32 reg;
640
641 pci_read_config_dword(pvt->pci_sad1, TOHM, &reg);
642 return GET_TOHM(reg);
643 }
644
645 static u64 ibridge_get_tolm(struct sbridge_pvt *pvt)
646 {
647 u32 reg;
648
649 pci_read_config_dword(pvt->pci_br1, TOLM, &reg);
650
651 return GET_TOLM(reg);
652 }
653
654 static u64 ibridge_get_tohm(struct sbridge_pvt *pvt)
655 {
656 u32 reg;
657
658 pci_read_config_dword(pvt->pci_br1, TOHM, &reg);
659
660 return GET_TOHM(reg);
661 }
662
663 static u64 rir_limit(u32 reg)
664 {
665 return ((u64)GET_BITFIELD(reg, 1, 10) << 29) | 0x1fffffff;
666 }
667
668 static enum mem_type get_memory_type(struct sbridge_pvt *pvt)
669 {
670 u32 reg;
671 enum mem_type mtype;
672
673 if (pvt->pci_ddrio) {
674 pci_read_config_dword(pvt->pci_ddrio, pvt->info.rankcfgr,
675 &reg);
676 if (GET_BITFIELD(reg, 11, 11))
677 /* FIXME: Can also be LRDIMM */
678 mtype = MEM_RDDR3;
679 else
680 mtype = MEM_DDR3;
681 } else
682 mtype = MEM_UNKNOWN;
683
684 return mtype;
685 }
686
687 static enum mem_type haswell_get_memory_type(struct sbridge_pvt *pvt)
688 {
689 u32 reg;
690 bool registered = false;
691 enum mem_type mtype = MEM_UNKNOWN;
692
693 if (!pvt->pci_ddrio)
694 goto out;
695
696 pci_read_config_dword(pvt->pci_ddrio,
697 HASWELL_DDRCRCLKCONTROLS, &reg);
698 /* Is_Rdimm */
699 if (GET_BITFIELD(reg, 16, 16))
700 registered = true;
701
702 pci_read_config_dword(pvt->pci_ta, MCMTR, &reg);
703 if (GET_BITFIELD(reg, 14, 14)) {
704 if (registered)
705 mtype = MEM_RDDR4;
706 else
707 mtype = MEM_DDR4;
708 } else {
709 if (registered)
710 mtype = MEM_RDDR3;
711 else
712 mtype = MEM_DDR3;
713 }
714
715 out:
716 return mtype;
717 }
718
719 static u8 get_node_id(struct sbridge_pvt *pvt)
720 {
721 u32 reg;
722 pci_read_config_dword(pvt->pci_br0, SAD_CONTROL, &reg);
723 return GET_BITFIELD(reg, 0, 2);
724 }
725
726 static u8 haswell_get_node_id(struct sbridge_pvt *pvt)
727 {
728 u32 reg;
729
730 pci_read_config_dword(pvt->pci_sad1, SAD_CONTROL, &reg);
731 return GET_BITFIELD(reg, 0, 3);
732 }
733
734 static u64 haswell_get_tolm(struct sbridge_pvt *pvt)
735 {
736 u32 reg;
737
738 pci_read_config_dword(pvt->info.pci_vtd, TOLM, &reg);
739 return (GET_BITFIELD(reg, 26, 31) << 26) | 0x1ffffff;
740 }
741
742 static u64 haswell_get_tohm(struct sbridge_pvt *pvt)
743 {
744 u64 rc;
745 u32 reg;
746
747 pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOHM_0, &reg);
748 rc = GET_BITFIELD(reg, 26, 31);
749 pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOHM_1, &reg);
750 rc = ((reg << 6) | rc) << 26;
751
752 return rc | 0x1ffffff;
753 }
754
755 static u64 haswell_rir_limit(u32 reg)
756 {
757 return (((u64)GET_BITFIELD(reg, 1, 11) + 1) << 29) - 1;
758 }
759
760 static inline u8 sad_pkg_socket(u8 pkg)
761 {
762 /* on Ivy Bridge, nodeID is SASS, where A is HA and S is node id */
763 return ((pkg >> 3) << 2) | (pkg & 0x3);
764 }
765
766 static inline u8 sad_pkg_ha(u8 pkg)
767 {
768 return (pkg >> 2) & 0x1;
769 }
770
771 /****************************************************************************
772 Memory check routines
773 ****************************************************************************/
774 static struct pci_dev *get_pdev_same_bus(u8 bus, u32 id)
775 {
776 struct pci_dev *pdev = NULL;
777
778 do {
779 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, id, pdev);
780 if (pdev && pdev->bus->number == bus)
781 break;
782 } while (pdev);
783
784 return pdev;
785 }
786
787 /**
788 * check_if_ecc_is_active() - Checks if ECC is active
789 * @bus: Device bus
790 * @type: Memory controller type
791 * returns: 0 in case ECC is active, -ENODEV if it can't be determined or
792 * disabled
793 */
794 static int check_if_ecc_is_active(const u8 bus, enum type type)
795 {
796 struct pci_dev *pdev = NULL;
797 u32 mcmtr, id;
798
799 if (type == IVY_BRIDGE)
800 id = PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA;
801 else if (type == HASWELL)
802 id = PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA;
803 else
804 id = PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA;
805
806 pdev = get_pdev_same_bus(bus, id);
807 if (!pdev) {
808 sbridge_printk(KERN_ERR, "Couldn't find PCI device "
809 "%04x:%04x! on bus %02d\n",
810 PCI_VENDOR_ID_INTEL, id, bus);
811 return -ENODEV;
812 }
813
814 pci_read_config_dword(pdev, MCMTR, &mcmtr);
815 if (!IS_ECC_ENABLED(mcmtr)) {
816 sbridge_printk(KERN_ERR, "ECC is disabled. Aborting\n");
817 return -ENODEV;
818 }
819 return 0;
820 }
821
822 static int get_dimm_config(struct mem_ctl_info *mci)
823 {
824 struct sbridge_pvt *pvt = mci->pvt_info;
825 struct dimm_info *dimm;
826 unsigned i, j, banks, ranks, rows, cols, npages;
827 u64 size;
828 u32 reg;
829 enum edac_type mode;
830 enum mem_type mtype;
831
832 if (pvt->info.type == HASWELL)
833 pci_read_config_dword(pvt->pci_sad1, SAD_TARGET, &reg);
834 else
835 pci_read_config_dword(pvt->pci_br0, SAD_TARGET, &reg);
836
837 pvt->sbridge_dev->source_id = SOURCE_ID(reg);
838
839 pvt->sbridge_dev->node_id = pvt->info.get_node_id(pvt);
840 edac_dbg(0, "mc#%d: Node ID: %d, source ID: %d\n",
841 pvt->sbridge_dev->mc,
842 pvt->sbridge_dev->node_id,
843 pvt->sbridge_dev->source_id);
844
845 pci_read_config_dword(pvt->pci_ras, RASENABLES, &reg);
846 if (IS_MIRROR_ENABLED(reg)) {
847 edac_dbg(0, "Memory mirror is enabled\n");
848 pvt->is_mirrored = true;
849 } else {
850 edac_dbg(0, "Memory mirror is disabled\n");
851 pvt->is_mirrored = false;
852 }
853
854 pci_read_config_dword(pvt->pci_ta, MCMTR, &pvt->info.mcmtr);
855 if (IS_LOCKSTEP_ENABLED(pvt->info.mcmtr)) {
856 edac_dbg(0, "Lockstep is enabled\n");
857 mode = EDAC_S8ECD8ED;
858 pvt->is_lockstep = true;
859 } else {
860 edac_dbg(0, "Lockstep is disabled\n");
861 mode = EDAC_S4ECD4ED;
862 pvt->is_lockstep = false;
863 }
864 if (IS_CLOSE_PG(pvt->info.mcmtr)) {
865 edac_dbg(0, "address map is on closed page mode\n");
866 pvt->is_close_pg = true;
867 } else {
868 edac_dbg(0, "address map is on open page mode\n");
869 pvt->is_close_pg = false;
870 }
871
872 mtype = pvt->info.get_memory_type(pvt);
873 if (mtype == MEM_RDDR3 || mtype == MEM_RDDR4)
874 edac_dbg(0, "Memory is registered\n");
875 else if (mtype == MEM_UNKNOWN)
876 edac_dbg(0, "Cannot determine memory type\n");
877 else
878 edac_dbg(0, "Memory is unregistered\n");
879
880 if (mtype == MEM_DDR4 || MEM_RDDR4)
881 banks = 16;
882 else
883 banks = 8;
884
885 for (i = 0; i < NUM_CHANNELS; i++) {
886 u32 mtr;
887
888 for (j = 0; j < ARRAY_SIZE(mtr_regs); j++) {
889 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
890 i, j, 0);
891 pci_read_config_dword(pvt->pci_tad[i],
892 mtr_regs[j], &mtr);
893 edac_dbg(4, "Channel #%d MTR%d = %x\n", i, j, mtr);
894 if (IS_DIMM_PRESENT(mtr)) {
895 pvt->channel[i].dimms++;
896
897 ranks = numrank(pvt->info.type, mtr);
898 rows = numrow(mtr);
899 cols = numcol(mtr);
900
901 size = ((u64)rows * cols * banks * ranks) >> (20 - 3);
902 npages = MiB_TO_PAGES(size);
903
904 edac_dbg(0, "mc#%d: channel %d, dimm %d, %Ld Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
905 pvt->sbridge_dev->mc, i, j,
906 size, npages,
907 banks, ranks, rows, cols);
908
909 dimm->nr_pages = npages;
910 dimm->grain = 32;
911 switch (banks) {
912 case 16:
913 dimm->dtype = DEV_X16;
914 break;
915 case 8:
916 dimm->dtype = DEV_X8;
917 break;
918 case 4:
919 dimm->dtype = DEV_X4;
920 break;
921 }
922 dimm->mtype = mtype;
923 dimm->edac_mode = mode;
924 snprintf(dimm->label, sizeof(dimm->label),
925 "CPU_SrcID#%u_Channel#%u_DIMM#%u",
926 pvt->sbridge_dev->source_id, i, j);
927 }
928 }
929 }
930
931 return 0;
932 }
933
934 static void get_memory_layout(const struct mem_ctl_info *mci)
935 {
936 struct sbridge_pvt *pvt = mci->pvt_info;
937 int i, j, k, n_sads, n_tads, sad_interl;
938 u32 reg;
939 u64 limit, prv = 0;
940 u64 tmp_mb;
941 u32 mb, kb;
942 u32 rir_way;
943
944 /*
945 * Step 1) Get TOLM/TOHM ranges
946 */
947
948 pvt->tolm = pvt->info.get_tolm(pvt);
949 tmp_mb = (1 + pvt->tolm) >> 20;
950
951 mb = div_u64_rem(tmp_mb, 1000, &kb);
952 edac_dbg(0, "TOLM: %u.%03u GB (0x%016Lx)\n", mb, kb, (u64)pvt->tolm);
953
954 /* Address range is already 45:25 */
955 pvt->tohm = pvt->info.get_tohm(pvt);
956 tmp_mb = (1 + pvt->tohm) >> 20;
957
958 mb = div_u64_rem(tmp_mb, 1000, &kb);
959 edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)\n", mb, kb, (u64)pvt->tohm);
960
961 /*
962 * Step 2) Get SAD range and SAD Interleave list
963 * TAD registers contain the interleave wayness. However, it
964 * seems simpler to just discover it indirectly, with the
965 * algorithm bellow.
966 */
967 prv = 0;
968 for (n_sads = 0; n_sads < pvt->info.max_sad; n_sads++) {
969 /* SAD_LIMIT Address range is 45:26 */
970 pci_read_config_dword(pvt->pci_sad0, pvt->info.dram_rule[n_sads],
971 &reg);
972 limit = SAD_LIMIT(reg);
973
974 if (!DRAM_RULE_ENABLE(reg))
975 continue;
976
977 if (limit <= prv)
978 break;
979
980 tmp_mb = (limit + 1) >> 20;
981 mb = div_u64_rem(tmp_mb, 1000, &kb);
982 edac_dbg(0, "SAD#%d %s up to %u.%03u GB (0x%016Lx) Interleave: %s reg=0x%08x\n",
983 n_sads,
984 get_dram_attr(reg),
985 mb, kb,
986 ((u64)tmp_mb) << 20L,
987 INTERLEAVE_MODE(reg) ? "8:6" : "[8:6]XOR[18:16]",
988 reg);
989 prv = limit;
990
991 pci_read_config_dword(pvt->pci_sad0, pvt->info.interleave_list[n_sads],
992 &reg);
993 sad_interl = sad_pkg(pvt->info.interleave_pkg, reg, 0);
994 for (j = 0; j < 8; j++) {
995 u32 pkg = sad_pkg(pvt->info.interleave_pkg, reg, j);
996 if (j > 0 && sad_interl == pkg)
997 break;
998
999 edac_dbg(0, "SAD#%d, interleave #%d: %d\n",
1000 n_sads, j, pkg);
1001 }
1002 }
1003
1004 /*
1005 * Step 3) Get TAD range
1006 */
1007 prv = 0;
1008 for (n_tads = 0; n_tads < MAX_TAD; n_tads++) {
1009 pci_read_config_dword(pvt->pci_ha0, tad_dram_rule[n_tads],
1010 &reg);
1011 limit = TAD_LIMIT(reg);
1012 if (limit <= prv)
1013 break;
1014 tmp_mb = (limit + 1) >> 20;
1015
1016 mb = div_u64_rem(tmp_mb, 1000, &kb);
1017 edac_dbg(0, "TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n",
1018 n_tads, mb, kb,
1019 ((u64)tmp_mb) << 20L,
1020 (u32)TAD_SOCK(reg),
1021 (u32)TAD_CH(reg),
1022 (u32)TAD_TGT0(reg),
1023 (u32)TAD_TGT1(reg),
1024 (u32)TAD_TGT2(reg),
1025 (u32)TAD_TGT3(reg),
1026 reg);
1027 prv = limit;
1028 }
1029
1030 /*
1031 * Step 4) Get TAD offsets, per each channel
1032 */
1033 for (i = 0; i < NUM_CHANNELS; i++) {
1034 if (!pvt->channel[i].dimms)
1035 continue;
1036 for (j = 0; j < n_tads; j++) {
1037 pci_read_config_dword(pvt->pci_tad[i],
1038 tad_ch_nilv_offset[j],
1039 &reg);
1040 tmp_mb = TAD_OFFSET(reg) >> 20;
1041 mb = div_u64_rem(tmp_mb, 1000, &kb);
1042 edac_dbg(0, "TAD CH#%d, offset #%d: %u.%03u GB (0x%016Lx), reg=0x%08x\n",
1043 i, j,
1044 mb, kb,
1045 ((u64)tmp_mb) << 20L,
1046 reg);
1047 }
1048 }
1049
1050 /*
1051 * Step 6) Get RIR Wayness/Limit, per each channel
1052 */
1053 for (i = 0; i < NUM_CHANNELS; i++) {
1054 if (!pvt->channel[i].dimms)
1055 continue;
1056 for (j = 0; j < MAX_RIR_RANGES; j++) {
1057 pci_read_config_dword(pvt->pci_tad[i],
1058 rir_way_limit[j],
1059 &reg);
1060
1061 if (!IS_RIR_VALID(reg))
1062 continue;
1063
1064 tmp_mb = pvt->info.rir_limit(reg) >> 20;
1065 rir_way = 1 << RIR_WAY(reg);
1066 mb = div_u64_rem(tmp_mb, 1000, &kb);
1067 edac_dbg(0, "CH#%d RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d, reg=0x%08x\n",
1068 i, j,
1069 mb, kb,
1070 ((u64)tmp_mb) << 20L,
1071 rir_way,
1072 reg);
1073
1074 for (k = 0; k < rir_way; k++) {
1075 pci_read_config_dword(pvt->pci_tad[i],
1076 rir_offset[j][k],
1077 &reg);
1078 tmp_mb = RIR_OFFSET(reg) << 6;
1079
1080 mb = div_u64_rem(tmp_mb, 1000, &kb);
1081 edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n",
1082 i, j, k,
1083 mb, kb,
1084 ((u64)tmp_mb) << 20L,
1085 (u32)RIR_RNK_TGT(reg),
1086 reg);
1087 }
1088 }
1089 }
1090 }
1091
1092 static struct mem_ctl_info *get_mci_for_node_id(u8 node_id)
1093 {
1094 struct sbridge_dev *sbridge_dev;
1095
1096 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
1097 if (sbridge_dev->node_id == node_id)
1098 return sbridge_dev->mci;
1099 }
1100 return NULL;
1101 }
1102
1103 static int get_memory_error_data(struct mem_ctl_info *mci,
1104 u64 addr,
1105 u8 *socket,
1106 long *channel_mask,
1107 u8 *rank,
1108 char **area_type, char *msg)
1109 {
1110 struct mem_ctl_info *new_mci;
1111 struct sbridge_pvt *pvt = mci->pvt_info;
1112 struct pci_dev *pci_ha;
1113 int n_rir, n_sads, n_tads, sad_way, sck_xch;
1114 int sad_interl, idx, base_ch;
1115 int interleave_mode, shiftup = 0;
1116 unsigned sad_interleave[pvt->info.max_interleave];
1117 u32 reg, dram_rule;
1118 u8 ch_way, sck_way, pkg, sad_ha = 0;
1119 u32 tad_offset;
1120 u32 rir_way;
1121 u32 mb, kb;
1122 u64 ch_addr, offset, limit = 0, prv = 0;
1123
1124
1125 /*
1126 * Step 0) Check if the address is at special memory ranges
1127 * The check bellow is probably enough to fill all cases where
1128 * the error is not inside a memory, except for the legacy
1129 * range (e. g. VGA addresses). It is unlikely, however, that the
1130 * memory controller would generate an error on that range.
1131 */
1132 if ((addr > (u64) pvt->tolm) && (addr < (1LL << 32))) {
1133 sprintf(msg, "Error at TOLM area, on addr 0x%08Lx", addr);
1134 return -EINVAL;
1135 }
1136 if (addr >= (u64)pvt->tohm) {
1137 sprintf(msg, "Error at MMIOH area, on addr 0x%016Lx", addr);
1138 return -EINVAL;
1139 }
1140
1141 /*
1142 * Step 1) Get socket
1143 */
1144 for (n_sads = 0; n_sads < pvt->info.max_sad; n_sads++) {
1145 pci_read_config_dword(pvt->pci_sad0, pvt->info.dram_rule[n_sads],
1146 &reg);
1147
1148 if (!DRAM_RULE_ENABLE(reg))
1149 continue;
1150
1151 limit = SAD_LIMIT(reg);
1152 if (limit <= prv) {
1153 sprintf(msg, "Can't discover the memory socket");
1154 return -EINVAL;
1155 }
1156 if (addr <= limit)
1157 break;
1158 prv = limit;
1159 }
1160 if (n_sads == pvt->info.max_sad) {
1161 sprintf(msg, "Can't discover the memory socket");
1162 return -EINVAL;
1163 }
1164 dram_rule = reg;
1165 *area_type = get_dram_attr(dram_rule);
1166 interleave_mode = INTERLEAVE_MODE(dram_rule);
1167
1168 pci_read_config_dword(pvt->pci_sad0, pvt->info.interleave_list[n_sads],
1169 &reg);
1170
1171 if (pvt->info.type == SANDY_BRIDGE) {
1172 sad_interl = sad_pkg(pvt->info.interleave_pkg, reg, 0);
1173 for (sad_way = 0; sad_way < 8; sad_way++) {
1174 u32 pkg = sad_pkg(pvt->info.interleave_pkg, reg, sad_way);
1175 if (sad_way > 0 && sad_interl == pkg)
1176 break;
1177 sad_interleave[sad_way] = pkg;
1178 edac_dbg(0, "SAD interleave #%d: %d\n",
1179 sad_way, sad_interleave[sad_way]);
1180 }
1181 edac_dbg(0, "mc#%d: Error detected on SAD#%d: address 0x%016Lx < 0x%016Lx, Interleave [%d:6]%s\n",
1182 pvt->sbridge_dev->mc,
1183 n_sads,
1184 addr,
1185 limit,
1186 sad_way + 7,
1187 !interleave_mode ? "" : "XOR[18:16]");
1188 if (interleave_mode)
1189 idx = ((addr >> 6) ^ (addr >> 16)) & 7;
1190 else
1191 idx = (addr >> 6) & 7;
1192 switch (sad_way) {
1193 case 1:
1194 idx = 0;
1195 break;
1196 case 2:
1197 idx = idx & 1;
1198 break;
1199 case 4:
1200 idx = idx & 3;
1201 break;
1202 case 8:
1203 break;
1204 default:
1205 sprintf(msg, "Can't discover socket interleave");
1206 return -EINVAL;
1207 }
1208 *socket = sad_interleave[idx];
1209 edac_dbg(0, "SAD interleave index: %d (wayness %d) = CPU socket %d\n",
1210 idx, sad_way, *socket);
1211 } else if (pvt->info.type == HASWELL) {
1212 int bits, a7mode = A7MODE(dram_rule);
1213
1214 if (a7mode) {
1215 /* A7 mode swaps P9 with P6 */
1216 bits = GET_BITFIELD(addr, 7, 8) << 1;
1217 bits |= GET_BITFIELD(addr, 9, 9);
1218 } else
1219 bits = GET_BITFIELD(addr, 7, 9);
1220
1221 if (interleave_mode) {
1222 /* interleave mode will XOR {8,7,6} with {18,17,16} */
1223 idx = GET_BITFIELD(addr, 16, 18);
1224 idx ^= bits;
1225 } else
1226 idx = bits;
1227
1228 pkg = sad_pkg(pvt->info.interleave_pkg, reg, idx);
1229 *socket = sad_pkg_socket(pkg);
1230 sad_ha = sad_pkg_ha(pkg);
1231
1232 if (a7mode) {
1233 /* MCChanShiftUpEnable */
1234 pci_read_config_dword(pvt->pci_ha0,
1235 HASWELL_HASYSDEFEATURE2, &reg);
1236 shiftup = GET_BITFIELD(reg, 22, 22);
1237 }
1238
1239 edac_dbg(0, "SAD interleave package: %d = CPU socket %d, HA %i, shiftup: %i\n",
1240 idx, *socket, sad_ha, shiftup);
1241 } else {
1242 /* Ivy Bridge's SAD mode doesn't support XOR interleave mode */
1243 idx = (addr >> 6) & 7;
1244 pkg = sad_pkg(pvt->info.interleave_pkg, reg, idx);
1245 *socket = sad_pkg_socket(pkg);
1246 sad_ha = sad_pkg_ha(pkg);
1247 edac_dbg(0, "SAD interleave package: %d = CPU socket %d, HA %d\n",
1248 idx, *socket, sad_ha);
1249 }
1250
1251 /*
1252 * Move to the proper node structure, in order to access the
1253 * right PCI registers
1254 */
1255 new_mci = get_mci_for_node_id(*socket);
1256 if (!new_mci) {
1257 sprintf(msg, "Struct for socket #%u wasn't initialized",
1258 *socket);
1259 return -EINVAL;
1260 }
1261 mci = new_mci;
1262 pvt = mci->pvt_info;
1263
1264 /*
1265 * Step 2) Get memory channel
1266 */
1267 prv = 0;
1268 if (pvt->info.type == SANDY_BRIDGE)
1269 pci_ha = pvt->pci_ha0;
1270 else {
1271 if (sad_ha)
1272 pci_ha = pvt->pci_ha1;
1273 else
1274 pci_ha = pvt->pci_ha0;
1275 }
1276 for (n_tads = 0; n_tads < MAX_TAD; n_tads++) {
1277 pci_read_config_dword(pci_ha, tad_dram_rule[n_tads], &reg);
1278 limit = TAD_LIMIT(reg);
1279 if (limit <= prv) {
1280 sprintf(msg, "Can't discover the memory channel");
1281 return -EINVAL;
1282 }
1283 if (addr <= limit)
1284 break;
1285 prv = limit;
1286 }
1287 if (n_tads == MAX_TAD) {
1288 sprintf(msg, "Can't discover the memory channel");
1289 return -EINVAL;
1290 }
1291
1292 ch_way = TAD_CH(reg) + 1;
1293 sck_way = TAD_SOCK(reg) + 1;
1294
1295 if (ch_way == 3)
1296 idx = addr >> 6;
1297 else
1298 idx = (addr >> (6 + sck_way + shiftup)) & 0x3;
1299 idx = idx % ch_way;
1300
1301 /*
1302 * FIXME: Shouldn't we use CHN_IDX_OFFSET() here, when ch_way == 3 ???
1303 */
1304 switch (idx) {
1305 case 0:
1306 base_ch = TAD_TGT0(reg);
1307 break;
1308 case 1:
1309 base_ch = TAD_TGT1(reg);
1310 break;
1311 case 2:
1312 base_ch = TAD_TGT2(reg);
1313 break;
1314 case 3:
1315 base_ch = TAD_TGT3(reg);
1316 break;
1317 default:
1318 sprintf(msg, "Can't discover the TAD target");
1319 return -EINVAL;
1320 }
1321 *channel_mask = 1 << base_ch;
1322
1323 pci_read_config_dword(pvt->pci_tad[base_ch],
1324 tad_ch_nilv_offset[n_tads],
1325 &tad_offset);
1326
1327 if (pvt->is_mirrored) {
1328 *channel_mask |= 1 << ((base_ch + 2) % 4);
1329 switch(ch_way) {
1330 case 2:
1331 case 4:
1332 sck_xch = 1 << sck_way * (ch_way >> 1);
1333 break;
1334 default:
1335 sprintf(msg, "Invalid mirror set. Can't decode addr");
1336 return -EINVAL;
1337 }
1338 } else
1339 sck_xch = (1 << sck_way) * ch_way;
1340
1341 if (pvt->is_lockstep)
1342 *channel_mask |= 1 << ((base_ch + 1) % 4);
1343
1344 offset = TAD_OFFSET(tad_offset);
1345
1346 edac_dbg(0, "TAD#%d: address 0x%016Lx < 0x%016Lx, socket interleave %d, channel interleave %d (offset 0x%08Lx), index %d, base ch: %d, ch mask: 0x%02lx\n",
1347 n_tads,
1348 addr,
1349 limit,
1350 (u32)TAD_SOCK(reg),
1351 ch_way,
1352 offset,
1353 idx,
1354 base_ch,
1355 *channel_mask);
1356
1357 /* Calculate channel address */
1358 /* Remove the TAD offset */
1359
1360 if (offset > addr) {
1361 sprintf(msg, "Can't calculate ch addr: TAD offset 0x%08Lx is too high for addr 0x%08Lx!",
1362 offset, addr);
1363 return -EINVAL;
1364 }
1365 addr -= offset;
1366 /* Store the low bits [0:6] of the addr */
1367 ch_addr = addr & 0x7f;
1368 /* Remove socket wayness and remove 6 bits */
1369 addr >>= 6;
1370 addr = div_u64(addr, sck_xch);
1371 #if 0
1372 /* Divide by channel way */
1373 addr = addr / ch_way;
1374 #endif
1375 /* Recover the last 6 bits */
1376 ch_addr |= addr << 6;
1377
1378 /*
1379 * Step 3) Decode rank
1380 */
1381 for (n_rir = 0; n_rir < MAX_RIR_RANGES; n_rir++) {
1382 pci_read_config_dword(pvt->pci_tad[base_ch],
1383 rir_way_limit[n_rir],
1384 &reg);
1385
1386 if (!IS_RIR_VALID(reg))
1387 continue;
1388
1389 limit = pvt->info.rir_limit(reg);
1390 mb = div_u64_rem(limit >> 20, 1000, &kb);
1391 edac_dbg(0, "RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d\n",
1392 n_rir,
1393 mb, kb,
1394 limit,
1395 1 << RIR_WAY(reg));
1396 if (ch_addr <= limit)
1397 break;
1398 }
1399 if (n_rir == MAX_RIR_RANGES) {
1400 sprintf(msg, "Can't discover the memory rank for ch addr 0x%08Lx",
1401 ch_addr);
1402 return -EINVAL;
1403 }
1404 rir_way = RIR_WAY(reg);
1405
1406 if (pvt->is_close_pg)
1407 idx = (ch_addr >> 6);
1408 else
1409 idx = (ch_addr >> 13); /* FIXME: Datasheet says to shift by 15 */
1410 idx %= 1 << rir_way;
1411
1412 pci_read_config_dword(pvt->pci_tad[base_ch],
1413 rir_offset[n_rir][idx],
1414 &reg);
1415 *rank = RIR_RNK_TGT(reg);
1416
1417 edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n",
1418 n_rir,
1419 ch_addr,
1420 limit,
1421 rir_way,
1422 idx);
1423
1424 return 0;
1425 }
1426
1427 /****************************************************************************
1428 Device initialization routines: put/get, init/exit
1429 ****************************************************************************/
1430
1431 /*
1432 * sbridge_put_all_devices 'put' all the devices that we have
1433 * reserved via 'get'
1434 */
1435 static void sbridge_put_devices(struct sbridge_dev *sbridge_dev)
1436 {
1437 int i;
1438
1439 edac_dbg(0, "\n");
1440 for (i = 0; i < sbridge_dev->n_devs; i++) {
1441 struct pci_dev *pdev = sbridge_dev->pdev[i];
1442 if (!pdev)
1443 continue;
1444 edac_dbg(0, "Removing dev %02x:%02x.%d\n",
1445 pdev->bus->number,
1446 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1447 pci_dev_put(pdev);
1448 }
1449 }
1450
1451 static void sbridge_put_all_devices(void)
1452 {
1453 struct sbridge_dev *sbridge_dev, *tmp;
1454
1455 list_for_each_entry_safe(sbridge_dev, tmp, &sbridge_edac_list, list) {
1456 sbridge_put_devices(sbridge_dev);
1457 free_sbridge_dev(sbridge_dev);
1458 }
1459 }
1460
1461 static int sbridge_get_onedevice(struct pci_dev **prev,
1462 u8 *num_mc,
1463 const struct pci_id_table *table,
1464 const unsigned devno)
1465 {
1466 struct sbridge_dev *sbridge_dev;
1467 const struct pci_id_descr *dev_descr = &table->descr[devno];
1468 struct pci_dev *pdev = NULL;
1469 u8 bus = 0;
1470
1471 sbridge_printk(KERN_DEBUG,
1472 "Seeking for: PCI ID %04x:%04x\n",
1473 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1474
1475 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
1476 dev_descr->dev_id, *prev);
1477
1478 if (!pdev) {
1479 if (*prev) {
1480 *prev = pdev;
1481 return 0;
1482 }
1483
1484 if (dev_descr->optional)
1485 return 0;
1486
1487 /* if the HA wasn't found */
1488 if (devno == 0)
1489 return -ENODEV;
1490
1491 sbridge_printk(KERN_INFO,
1492 "Device not found: %04x:%04x\n",
1493 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1494
1495 /* End of list, leave */
1496 return -ENODEV;
1497 }
1498 bus = pdev->bus->number;
1499
1500 sbridge_dev = get_sbridge_dev(bus);
1501 if (!sbridge_dev) {
1502 sbridge_dev = alloc_sbridge_dev(bus, table);
1503 if (!sbridge_dev) {
1504 pci_dev_put(pdev);
1505 return -ENOMEM;
1506 }
1507 (*num_mc)++;
1508 }
1509
1510 if (sbridge_dev->pdev[devno]) {
1511 sbridge_printk(KERN_ERR,
1512 "Duplicated device for %04x:%04x\n",
1513 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1514 pci_dev_put(pdev);
1515 return -ENODEV;
1516 }
1517
1518 sbridge_dev->pdev[devno] = pdev;
1519
1520 /* Be sure that the device is enabled */
1521 if (unlikely(pci_enable_device(pdev) < 0)) {
1522 sbridge_printk(KERN_ERR,
1523 "Couldn't enable %04x:%04x\n",
1524 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1525 return -ENODEV;
1526 }
1527
1528 edac_dbg(0, "Detected %04x:%04x\n",
1529 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
1530
1531 /*
1532 * As stated on drivers/pci/search.c, the reference count for
1533 * @from is always decremented if it is not %NULL. So, as we need
1534 * to get all devices up to null, we need to do a get for the device
1535 */
1536 pci_dev_get(pdev);
1537
1538 *prev = pdev;
1539
1540 return 0;
1541 }
1542
1543 /*
1544 * sbridge_get_all_devices - Find and perform 'get' operation on the MCH's
1545 * devices we want to reference for this driver.
1546 * @num_mc: pointer to the memory controllers count, to be incremented in case
1547 * of success.
1548 * @table: model specific table
1549 *
1550 * returns 0 in case of success or error code
1551 */
1552 static int sbridge_get_all_devices(u8 *num_mc,
1553 const struct pci_id_table *table)
1554 {
1555 int i, rc;
1556 struct pci_dev *pdev = NULL;
1557
1558 while (table && table->descr) {
1559 for (i = 0; i < table->n_devs; i++) {
1560 pdev = NULL;
1561 do {
1562 rc = sbridge_get_onedevice(&pdev, num_mc,
1563 table, i);
1564 if (rc < 0) {
1565 if (i == 0) {
1566 i = table->n_devs;
1567 break;
1568 }
1569 sbridge_put_all_devices();
1570 return -ENODEV;
1571 }
1572 } while (pdev);
1573 }
1574 table++;
1575 }
1576
1577 return 0;
1578 }
1579
1580 static int sbridge_mci_bind_devs(struct mem_ctl_info *mci,
1581 struct sbridge_dev *sbridge_dev)
1582 {
1583 struct sbridge_pvt *pvt = mci->pvt_info;
1584 struct pci_dev *pdev;
1585 int i;
1586
1587 for (i = 0; i < sbridge_dev->n_devs; i++) {
1588 pdev = sbridge_dev->pdev[i];
1589 if (!pdev)
1590 continue;
1591
1592 switch (pdev->device) {
1593 case PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0:
1594 pvt->pci_sad0 = pdev;
1595 break;
1596 case PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1:
1597 pvt->pci_sad1 = pdev;
1598 break;
1599 case PCI_DEVICE_ID_INTEL_SBRIDGE_BR:
1600 pvt->pci_br0 = pdev;
1601 break;
1602 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0:
1603 pvt->pci_ha0 = pdev;
1604 break;
1605 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA:
1606 pvt->pci_ta = pdev;
1607 break;
1608 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS:
1609 pvt->pci_ras = pdev;
1610 break;
1611 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0:
1612 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1:
1613 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2:
1614 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3:
1615 {
1616 int id = pdev->device - PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0;
1617 pvt->pci_tad[id] = pdev;
1618 }
1619 break;
1620 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO:
1621 pvt->pci_ddrio = pdev;
1622 break;
1623 default:
1624 goto error;
1625 }
1626
1627 edac_dbg(0, "Associated PCI %02x:%02x, bus %d with dev = %p\n",
1628 pdev->vendor, pdev->device,
1629 sbridge_dev->bus,
1630 pdev);
1631 }
1632
1633 /* Check if everything were registered */
1634 if (!pvt->pci_sad0 || !pvt->pci_sad1 || !pvt->pci_ha0 ||
1635 !pvt-> pci_tad || !pvt->pci_ras || !pvt->pci_ta)
1636 goto enodev;
1637
1638 for (i = 0; i < NUM_CHANNELS; i++) {
1639 if (!pvt->pci_tad[i])
1640 goto enodev;
1641 }
1642 return 0;
1643
1644 enodev:
1645 sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
1646 return -ENODEV;
1647
1648 error:
1649 sbridge_printk(KERN_ERR, "Unexpected device %02x:%02x\n",
1650 PCI_VENDOR_ID_INTEL, pdev->device);
1651 return -EINVAL;
1652 }
1653
1654 static int ibridge_mci_bind_devs(struct mem_ctl_info *mci,
1655 struct sbridge_dev *sbridge_dev)
1656 {
1657 struct sbridge_pvt *pvt = mci->pvt_info;
1658 struct pci_dev *pdev, *tmp;
1659 int i;
1660 bool mode_2ha = false;
1661
1662 tmp = pci_get_device(PCI_VENDOR_ID_INTEL,
1663 PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1, NULL);
1664 if (tmp) {
1665 mode_2ha = true;
1666 pci_dev_put(tmp);
1667 }
1668
1669 for (i = 0; i < sbridge_dev->n_devs; i++) {
1670 pdev = sbridge_dev->pdev[i];
1671 if (!pdev)
1672 continue;
1673
1674 switch (pdev->device) {
1675 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0:
1676 pvt->pci_ha0 = pdev;
1677 break;
1678 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA:
1679 pvt->pci_ta = pdev;
1680 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS:
1681 pvt->pci_ras = pdev;
1682 break;
1683 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2:
1684 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3:
1685 /* if we have 2 HAs active, channels 2 and 3
1686 * are in other device */
1687 if (mode_2ha)
1688 break;
1689 /* fall through */
1690 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0:
1691 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1:
1692 {
1693 int id = pdev->device - PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0;
1694 pvt->pci_tad[id] = pdev;
1695 }
1696 break;
1697 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0:
1698 pvt->pci_ddrio = pdev;
1699 break;
1700 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0:
1701 if (!mode_2ha)
1702 pvt->pci_ddrio = pdev;
1703 break;
1704 case PCI_DEVICE_ID_INTEL_IBRIDGE_SAD:
1705 pvt->pci_sad0 = pdev;
1706 break;
1707 case PCI_DEVICE_ID_INTEL_IBRIDGE_BR0:
1708 pvt->pci_br0 = pdev;
1709 break;
1710 case PCI_DEVICE_ID_INTEL_IBRIDGE_BR1:
1711 pvt->pci_br1 = pdev;
1712 break;
1713 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1:
1714 pvt->pci_ha1 = pdev;
1715 break;
1716 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0:
1717 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1:
1718 {
1719 int id = pdev->device - PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0 + 2;
1720
1721 /* we shouldn't have this device if we have just one
1722 * HA present */
1723 WARN_ON(!mode_2ha);
1724 pvt->pci_tad[id] = pdev;
1725 }
1726 break;
1727 default:
1728 goto error;
1729 }
1730
1731 edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n",
1732 sbridge_dev->bus,
1733 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
1734 pdev);
1735 }
1736
1737 /* Check if everything were registered */
1738 if (!pvt->pci_sad0 || !pvt->pci_ha0 || !pvt->pci_br0 ||
1739 !pvt->pci_br1 || !pvt->pci_tad || !pvt->pci_ras ||
1740 !pvt->pci_ta)
1741 goto enodev;
1742
1743 for (i = 0; i < NUM_CHANNELS; i++) {
1744 if (!pvt->pci_tad[i])
1745 goto enodev;
1746 }
1747 return 0;
1748
1749 enodev:
1750 sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
1751 return -ENODEV;
1752
1753 error:
1754 sbridge_printk(KERN_ERR,
1755 "Unexpected device %02x:%02x\n", PCI_VENDOR_ID_INTEL,
1756 pdev->device);
1757 return -EINVAL;
1758 }
1759
1760 static int haswell_mci_bind_devs(struct mem_ctl_info *mci,
1761 struct sbridge_dev *sbridge_dev)
1762 {
1763 struct sbridge_pvt *pvt = mci->pvt_info;
1764 struct pci_dev *pdev, *tmp;
1765 int i;
1766 bool mode_2ha = false;
1767
1768 tmp = pci_get_device(PCI_VENDOR_ID_INTEL,
1769 PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1, NULL);
1770 if (tmp) {
1771 mode_2ha = true;
1772 pci_dev_put(tmp);
1773 }
1774
1775 /* there's only one device per system; not tied to any bus */
1776 if (pvt->info.pci_vtd == NULL)
1777 /* result will be checked later */
1778 pvt->info.pci_vtd = pci_get_device(PCI_VENDOR_ID_INTEL,
1779 PCI_DEVICE_ID_INTEL_HASWELL_IMC_VTD_MISC,
1780 NULL);
1781
1782 for (i = 0; i < sbridge_dev->n_devs; i++) {
1783 pdev = sbridge_dev->pdev[i];
1784 if (!pdev)
1785 continue;
1786
1787 switch (pdev->device) {
1788 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0:
1789 pvt->pci_sad0 = pdev;
1790 break;
1791 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1:
1792 pvt->pci_sad1 = pdev;
1793 break;
1794 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0:
1795 pvt->pci_ha0 = pdev;
1796 break;
1797 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA:
1798 pvt->pci_ta = pdev;
1799 break;
1800 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_THERMAL:
1801 pvt->pci_ras = pdev;
1802 break;
1803 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0:
1804 pvt->pci_tad[0] = pdev;
1805 break;
1806 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1:
1807 pvt->pci_tad[1] = pdev;
1808 break;
1809 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2:
1810 if (!mode_2ha)
1811 pvt->pci_tad[2] = pdev;
1812 break;
1813 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3:
1814 if (!mode_2ha)
1815 pvt->pci_tad[3] = pdev;
1816 break;
1817 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0:
1818 pvt->pci_ddrio = pdev;
1819 break;
1820 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1:
1821 pvt->pci_ha1 = pdev;
1822 break;
1823 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA:
1824 pvt->pci_ha1_ta = pdev;
1825 break;
1826 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0:
1827 if (mode_2ha)
1828 pvt->pci_tad[2] = pdev;
1829 break;
1830 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1:
1831 if (mode_2ha)
1832 pvt->pci_tad[3] = pdev;
1833 break;
1834 default:
1835 break;
1836 }
1837
1838 edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n",
1839 sbridge_dev->bus,
1840 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
1841 pdev);
1842 }
1843
1844 /* Check if everything were registered */
1845 if (!pvt->pci_sad0 || !pvt->pci_ha0 || !pvt->pci_sad1 ||
1846 !pvt->pci_ras || !pvt->pci_ta || !pvt->info.pci_vtd)
1847 goto enodev;
1848
1849 for (i = 0; i < NUM_CHANNELS; i++) {
1850 if (!pvt->pci_tad[i])
1851 goto enodev;
1852 }
1853 return 0;
1854
1855 enodev:
1856 sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
1857 return -ENODEV;
1858 }
1859
1860 /****************************************************************************
1861 Error check routines
1862 ****************************************************************************/
1863
1864 /*
1865 * While Sandy Bridge has error count registers, SMI BIOS read values from
1866 * and resets the counters. So, they are not reliable for the OS to read
1867 * from them. So, we have no option but to just trust on whatever MCE is
1868 * telling us about the errors.
1869 */
1870 static void sbridge_mce_output_error(struct mem_ctl_info *mci,
1871 const struct mce *m)
1872 {
1873 struct mem_ctl_info *new_mci;
1874 struct sbridge_pvt *pvt = mci->pvt_info;
1875 enum hw_event_mc_err_type tp_event;
1876 char *type, *optype, msg[256];
1877 bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0);
1878 bool overflow = GET_BITFIELD(m->status, 62, 62);
1879 bool uncorrected_error = GET_BITFIELD(m->status, 61, 61);
1880 bool recoverable;
1881 u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
1882 u32 mscod = GET_BITFIELD(m->status, 16, 31);
1883 u32 errcode = GET_BITFIELD(m->status, 0, 15);
1884 u32 channel = GET_BITFIELD(m->status, 0, 3);
1885 u32 optypenum = GET_BITFIELD(m->status, 4, 6);
1886 long channel_mask, first_channel;
1887 u8 rank, socket;
1888 int rc, dimm;
1889 char *area_type = NULL;
1890
1891 if (pvt->info.type == IVY_BRIDGE)
1892 recoverable = true;
1893 else
1894 recoverable = GET_BITFIELD(m->status, 56, 56);
1895
1896 if (uncorrected_error) {
1897 if (ripv) {
1898 type = "FATAL";
1899 tp_event = HW_EVENT_ERR_FATAL;
1900 } else {
1901 type = "NON_FATAL";
1902 tp_event = HW_EVENT_ERR_UNCORRECTED;
1903 }
1904 } else {
1905 type = "CORRECTED";
1906 tp_event = HW_EVENT_ERR_CORRECTED;
1907 }
1908
1909 /*
1910 * According with Table 15-9 of the Intel Architecture spec vol 3A,
1911 * memory errors should fit in this mask:
1912 * 000f 0000 1mmm cccc (binary)
1913 * where:
1914 * f = Correction Report Filtering Bit. If 1, subsequent errors
1915 * won't be shown
1916 * mmm = error type
1917 * cccc = channel
1918 * If the mask doesn't match, report an error to the parsing logic
1919 */
1920 if (! ((errcode & 0xef80) == 0x80)) {
1921 optype = "Can't parse: it is not a mem";
1922 } else {
1923 switch (optypenum) {
1924 case 0:
1925 optype = "generic undef request error";
1926 break;
1927 case 1:
1928 optype = "memory read error";
1929 break;
1930 case 2:
1931 optype = "memory write error";
1932 break;
1933 case 3:
1934 optype = "addr/cmd error";
1935 break;
1936 case 4:
1937 optype = "memory scrubbing error";
1938 break;
1939 default:
1940 optype = "reserved";
1941 break;
1942 }
1943 }
1944
1945 /* Only decode errors with an valid address (ADDRV) */
1946 if (!GET_BITFIELD(m->status, 58, 58))
1947 return;
1948
1949 rc = get_memory_error_data(mci, m->addr, &socket,
1950 &channel_mask, &rank, &area_type, msg);
1951 if (rc < 0)
1952 goto err_parsing;
1953 new_mci = get_mci_for_node_id(socket);
1954 if (!new_mci) {
1955 strcpy(msg, "Error: socket got corrupted!");
1956 goto err_parsing;
1957 }
1958 mci = new_mci;
1959 pvt = mci->pvt_info;
1960
1961 first_channel = find_first_bit(&channel_mask, NUM_CHANNELS);
1962
1963 if (rank < 4)
1964 dimm = 0;
1965 else if (rank < 8)
1966 dimm = 1;
1967 else
1968 dimm = 2;
1969
1970
1971 /*
1972 * FIXME: On some memory configurations (mirror, lockstep), the
1973 * Memory Controller can't point the error to a single DIMM. The
1974 * EDAC core should be handling the channel mask, in order to point
1975 * to the group of dimm's where the error may be happening.
1976 */
1977 if (!pvt->is_lockstep && !pvt->is_mirrored && !pvt->is_close_pg)
1978 channel = first_channel;
1979
1980 snprintf(msg, sizeof(msg),
1981 "%s%s area:%s err_code:%04x:%04x socket:%d channel_mask:%ld rank:%d",
1982 overflow ? " OVERFLOW" : "",
1983 (uncorrected_error && recoverable) ? " recoverable" : "",
1984 area_type,
1985 mscod, errcode,
1986 socket,
1987 channel_mask,
1988 rank);
1989
1990 edac_dbg(0, "%s\n", msg);
1991
1992 /* FIXME: need support for channel mask */
1993
1994 /* Call the helper to output message */
1995 edac_mc_handle_error(tp_event, mci, core_err_cnt,
1996 m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
1997 channel, dimm, -1,
1998 optype, msg);
1999 return;
2000 err_parsing:
2001 edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0,
2002 -1, -1, -1,
2003 msg, "");
2004
2005 }
2006
2007 /*
2008 * sbridge_check_error Retrieve and process errors reported by the
2009 * hardware. Called by the Core module.
2010 */
2011 static void sbridge_check_error(struct mem_ctl_info *mci)
2012 {
2013 struct sbridge_pvt *pvt = mci->pvt_info;
2014 int i;
2015 unsigned count = 0;
2016 struct mce *m;
2017
2018 /*
2019 * MCE first step: Copy all mce errors into a temporary buffer
2020 * We use a double buffering here, to reduce the risk of
2021 * loosing an error.
2022 */
2023 smp_rmb();
2024 count = (pvt->mce_out + MCE_LOG_LEN - pvt->mce_in)
2025 % MCE_LOG_LEN;
2026 if (!count)
2027 return;
2028
2029 m = pvt->mce_outentry;
2030 if (pvt->mce_in + count > MCE_LOG_LEN) {
2031 unsigned l = MCE_LOG_LEN - pvt->mce_in;
2032
2033 memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * l);
2034 smp_wmb();
2035 pvt->mce_in = 0;
2036 count -= l;
2037 m += l;
2038 }
2039 memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * count);
2040 smp_wmb();
2041 pvt->mce_in += count;
2042
2043 smp_rmb();
2044 if (pvt->mce_overrun) {
2045 sbridge_printk(KERN_ERR, "Lost %d memory errors\n",
2046 pvt->mce_overrun);
2047 smp_wmb();
2048 pvt->mce_overrun = 0;
2049 }
2050
2051 /*
2052 * MCE second step: parse errors and display
2053 */
2054 for (i = 0; i < count; i++)
2055 sbridge_mce_output_error(mci, &pvt->mce_outentry[i]);
2056 }
2057
2058 /*
2059 * sbridge_mce_check_error Replicates mcelog routine to get errors
2060 * This routine simply queues mcelog errors, and
2061 * return. The error itself should be handled later
2062 * by sbridge_check_error.
2063 * WARNING: As this routine should be called at NMI time, extra care should
2064 * be taken to avoid deadlocks, and to be as fast as possible.
2065 */
2066 static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
2067 void *data)
2068 {
2069 struct mce *mce = (struct mce *)data;
2070 struct mem_ctl_info *mci;
2071 struct sbridge_pvt *pvt;
2072 char *type;
2073
2074 if (get_edac_report_status() == EDAC_REPORTING_DISABLED)
2075 return NOTIFY_DONE;
2076
2077 mci = get_mci_for_node_id(mce->socketid);
2078 if (!mci)
2079 return NOTIFY_BAD;
2080 pvt = mci->pvt_info;
2081
2082 /*
2083 * Just let mcelog handle it if the error is
2084 * outside the memory controller. A memory error
2085 * is indicated by bit 7 = 1 and bits = 8-11,13-15 = 0.
2086 * bit 12 has an special meaning.
2087 */
2088 if ((mce->status & 0xefff) >> 7 != 1)
2089 return NOTIFY_DONE;
2090
2091 if (mce->mcgstatus & MCG_STATUS_MCIP)
2092 type = "Exception";
2093 else
2094 type = "Event";
2095
2096 sbridge_mc_printk(mci, KERN_DEBUG, "HANDLING MCE MEMORY ERROR\n");
2097
2098 sbridge_mc_printk(mci, KERN_DEBUG, "CPU %d: Machine Check %s: %Lx "
2099 "Bank %d: %016Lx\n", mce->extcpu, type,
2100 mce->mcgstatus, mce->bank, mce->status);
2101 sbridge_mc_printk(mci, KERN_DEBUG, "TSC %llx ", mce->tsc);
2102 sbridge_mc_printk(mci, KERN_DEBUG, "ADDR %llx ", mce->addr);
2103 sbridge_mc_printk(mci, KERN_DEBUG, "MISC %llx ", mce->misc);
2104
2105 sbridge_mc_printk(mci, KERN_DEBUG, "PROCESSOR %u:%x TIME %llu SOCKET "
2106 "%u APIC %x\n", mce->cpuvendor, mce->cpuid,
2107 mce->time, mce->socketid, mce->apicid);
2108
2109 smp_rmb();
2110 if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) {
2111 smp_wmb();
2112 pvt->mce_overrun++;
2113 return NOTIFY_DONE;
2114 }
2115
2116 /* Copy memory error at the ringbuffer */
2117 memcpy(&pvt->mce_entry[pvt->mce_out], mce, sizeof(*mce));
2118 smp_wmb();
2119 pvt->mce_out = (pvt->mce_out + 1) % MCE_LOG_LEN;
2120
2121 /* Handle fatal errors immediately */
2122 if (mce->mcgstatus & 1)
2123 sbridge_check_error(mci);
2124
2125 /* Advice mcelog that the error were handled */
2126 return NOTIFY_STOP;
2127 }
2128
2129 static struct notifier_block sbridge_mce_dec = {
2130 .notifier_call = sbridge_mce_check_error,
2131 };
2132
2133 /****************************************************************************
2134 EDAC register/unregister logic
2135 ****************************************************************************/
2136
2137 static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev)
2138 {
2139 struct mem_ctl_info *mci = sbridge_dev->mci;
2140 struct sbridge_pvt *pvt;
2141
2142 if (unlikely(!mci || !mci->pvt_info)) {
2143 edac_dbg(0, "MC: dev = %p\n", &sbridge_dev->pdev[0]->dev);
2144
2145 sbridge_printk(KERN_ERR, "Couldn't find mci handler\n");
2146 return;
2147 }
2148
2149 pvt = mci->pvt_info;
2150
2151 edac_dbg(0, "MC: mci = %p, dev = %p\n",
2152 mci, &sbridge_dev->pdev[0]->dev);
2153
2154 /* Remove MC sysfs nodes */
2155 edac_mc_del_mc(mci->pdev);
2156
2157 edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
2158 kfree(mci->ctl_name);
2159 edac_mc_free(mci);
2160 sbridge_dev->mci = NULL;
2161 }
2162
2163 static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
2164 {
2165 struct mem_ctl_info *mci;
2166 struct edac_mc_layer layers[2];
2167 struct sbridge_pvt *pvt;
2168 struct pci_dev *pdev = sbridge_dev->pdev[0];
2169 int rc;
2170
2171 /* Check the number of active and not disabled channels */
2172 rc = check_if_ecc_is_active(sbridge_dev->bus, type);
2173 if (unlikely(rc < 0))
2174 return rc;
2175
2176 /* allocate a new MC control structure */
2177 layers[0].type = EDAC_MC_LAYER_CHANNEL;
2178 layers[0].size = NUM_CHANNELS;
2179 layers[0].is_virt_csrow = false;
2180 layers[1].type = EDAC_MC_LAYER_SLOT;
2181 layers[1].size = MAX_DIMMS;
2182 layers[1].is_virt_csrow = true;
2183 mci = edac_mc_alloc(sbridge_dev->mc, ARRAY_SIZE(layers), layers,
2184 sizeof(*pvt));
2185
2186 if (unlikely(!mci))
2187 return -ENOMEM;
2188
2189 edac_dbg(0, "MC: mci = %p, dev = %p\n",
2190 mci, &pdev->dev);
2191
2192 pvt = mci->pvt_info;
2193 memset(pvt, 0, sizeof(*pvt));
2194
2195 /* Associate sbridge_dev and mci for future usage */
2196 pvt->sbridge_dev = sbridge_dev;
2197 sbridge_dev->mci = mci;
2198
2199 mci->mtype_cap = MEM_FLAG_DDR3;
2200 mci->edac_ctl_cap = EDAC_FLAG_NONE;
2201 mci->edac_cap = EDAC_FLAG_NONE;
2202 mci->mod_name = "sbridge_edac.c";
2203 mci->mod_ver = SBRIDGE_REVISION;
2204 mci->dev_name = pci_name(pdev);
2205 mci->ctl_page_to_phys = NULL;
2206
2207 /* Set the function pointer to an actual operation function */
2208 mci->edac_check = sbridge_check_error;
2209
2210 pvt->info.type = type;
2211 switch (type) {
2212 case IVY_BRIDGE:
2213 pvt->info.rankcfgr = IB_RANK_CFG_A;
2214 pvt->info.get_tolm = ibridge_get_tolm;
2215 pvt->info.get_tohm = ibridge_get_tohm;
2216 pvt->info.dram_rule = ibridge_dram_rule;
2217 pvt->info.get_memory_type = get_memory_type;
2218 pvt->info.get_node_id = get_node_id;
2219 pvt->info.rir_limit = rir_limit;
2220 pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule);
2221 pvt->info.interleave_list = ibridge_interleave_list;
2222 pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list);
2223 pvt->info.interleave_pkg = ibridge_interleave_pkg;
2224 mci->ctl_name = kasprintf(GFP_KERNEL, "Ivy Bridge Socket#%d", mci->mc_idx);
2225
2226 /* Store pci devices at mci for faster access */
2227 rc = ibridge_mci_bind_devs(mci, sbridge_dev);
2228 if (unlikely(rc < 0))
2229 goto fail0;
2230 break;
2231 case SANDY_BRIDGE:
2232 pvt->info.rankcfgr = SB_RANK_CFG_A;
2233 pvt->info.get_tolm = sbridge_get_tolm;
2234 pvt->info.get_tohm = sbridge_get_tohm;
2235 pvt->info.dram_rule = sbridge_dram_rule;
2236 pvt->info.get_memory_type = get_memory_type;
2237 pvt->info.get_node_id = get_node_id;
2238 pvt->info.rir_limit = rir_limit;
2239 pvt->info.max_sad = ARRAY_SIZE(sbridge_dram_rule);
2240 pvt->info.interleave_list = sbridge_interleave_list;
2241 pvt->info.max_interleave = ARRAY_SIZE(sbridge_interleave_list);
2242 pvt->info.interleave_pkg = sbridge_interleave_pkg;
2243 mci->ctl_name = kasprintf(GFP_KERNEL, "Sandy Bridge Socket#%d", mci->mc_idx);
2244
2245 /* Store pci devices at mci for faster access */
2246 rc = sbridge_mci_bind_devs(mci, sbridge_dev);
2247 if (unlikely(rc < 0))
2248 goto fail0;
2249 break;
2250 case HASWELL:
2251 /* rankcfgr isn't used */
2252 pvt->info.get_tolm = haswell_get_tolm;
2253 pvt->info.get_tohm = haswell_get_tohm;
2254 pvt->info.dram_rule = ibridge_dram_rule;
2255 pvt->info.get_memory_type = haswell_get_memory_type;
2256 pvt->info.get_node_id = haswell_get_node_id;
2257 pvt->info.rir_limit = haswell_rir_limit;
2258 pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule);
2259 pvt->info.interleave_list = ibridge_interleave_list;
2260 pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list);
2261 pvt->info.interleave_pkg = ibridge_interleave_pkg;
2262 mci->ctl_name = kasprintf(GFP_KERNEL, "Haswell Socket#%d", mci->mc_idx);
2263
2264 /* Store pci devices at mci for faster access */
2265 rc = haswell_mci_bind_devs(mci, sbridge_dev);
2266 if (unlikely(rc < 0))
2267 goto fail0;
2268 break;
2269 }
2270
2271 /* Get dimm basic config and the memory layout */
2272 get_dimm_config(mci);
2273 get_memory_layout(mci);
2274
2275 /* record ptr to the generic device */
2276 mci->pdev = &pdev->dev;
2277
2278 /* add this new MC control structure to EDAC's list of MCs */
2279 if (unlikely(edac_mc_add_mc(mci))) {
2280 edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
2281 rc = -EINVAL;
2282 goto fail0;
2283 }
2284
2285 return 0;
2286
2287 fail0:
2288 kfree(mci->ctl_name);
2289 edac_mc_free(mci);
2290 sbridge_dev->mci = NULL;
2291 return rc;
2292 }
2293
2294 /*
2295 * sbridge_probe Probe for ONE instance of device to see if it is
2296 * present.
2297 * return:
2298 * 0 for FOUND a device
2299 * < 0 for error code
2300 */
2301
2302 static int sbridge_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2303 {
2304 int rc = -ENODEV;
2305 u8 mc, num_mc = 0;
2306 struct sbridge_dev *sbridge_dev;
2307 enum type type = SANDY_BRIDGE;
2308
2309 /* get the pci devices we want to reserve for our use */
2310 mutex_lock(&sbridge_edac_lock);
2311
2312 /*
2313 * All memory controllers are allocated at the first pass.
2314 */
2315 if (unlikely(probed >= 1)) {
2316 mutex_unlock(&sbridge_edac_lock);
2317 return -ENODEV;
2318 }
2319 probed++;
2320
2321 switch (pdev->device) {
2322 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA:
2323 rc = sbridge_get_all_devices(&num_mc, pci_dev_descr_ibridge_table);
2324 type = IVY_BRIDGE;
2325 break;
2326 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA:
2327 rc = sbridge_get_all_devices(&num_mc, pci_dev_descr_sbridge_table);
2328 type = SANDY_BRIDGE;
2329 break;
2330 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0:
2331 rc = sbridge_get_all_devices(&num_mc, pci_dev_descr_haswell_table);
2332 type = HASWELL;
2333 break;
2334 }
2335 if (unlikely(rc < 0))
2336 goto fail0;
2337 mc = 0;
2338
2339 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
2340 edac_dbg(0, "Registering MC#%d (%d of %d)\n",
2341 mc, mc + 1, num_mc);
2342
2343 sbridge_dev->mc = mc++;
2344 rc = sbridge_register_mci(sbridge_dev, type);
2345 if (unlikely(rc < 0))
2346 goto fail1;
2347 }
2348
2349 sbridge_printk(KERN_INFO, "Driver loaded.\n");
2350
2351 mutex_unlock(&sbridge_edac_lock);
2352 return 0;
2353
2354 fail1:
2355 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list)
2356 sbridge_unregister_mci(sbridge_dev);
2357
2358 sbridge_put_all_devices();
2359 fail0:
2360 mutex_unlock(&sbridge_edac_lock);
2361 return rc;
2362 }
2363
2364 /*
2365 * sbridge_remove destructor for one instance of device
2366 *
2367 */
2368 static void sbridge_remove(struct pci_dev *pdev)
2369 {
2370 struct sbridge_dev *sbridge_dev;
2371
2372 edac_dbg(0, "\n");
2373
2374 /*
2375 * we have a trouble here: pdev value for removal will be wrong, since
2376 * it will point to the X58 register used to detect that the machine
2377 * is a Nehalem or upper design. However, due to the way several PCI
2378 * devices are grouped together to provide MC functionality, we need
2379 * to use a different method for releasing the devices
2380 */
2381
2382 mutex_lock(&sbridge_edac_lock);
2383
2384 if (unlikely(!probed)) {
2385 mutex_unlock(&sbridge_edac_lock);
2386 return;
2387 }
2388
2389 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list)
2390 sbridge_unregister_mci(sbridge_dev);
2391
2392 /* Release PCI resources */
2393 sbridge_put_all_devices();
2394
2395 probed--;
2396
2397 mutex_unlock(&sbridge_edac_lock);
2398 }
2399
2400 MODULE_DEVICE_TABLE(pci, sbridge_pci_tbl);
2401
2402 /*
2403 * sbridge_driver pci_driver structure for this module
2404 *
2405 */
2406 static struct pci_driver sbridge_driver = {
2407 .name = "sbridge_edac",
2408 .probe = sbridge_probe,
2409 .remove = sbridge_remove,
2410 .id_table = sbridge_pci_tbl,
2411 };
2412
2413 /*
2414 * sbridge_init Module entry function
2415 * Try to initialize this module for its devices
2416 */
2417 static int __init sbridge_init(void)
2418 {
2419 int pci_rc;
2420
2421 edac_dbg(2, "\n");
2422
2423 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
2424 opstate_init();
2425
2426 pci_rc = pci_register_driver(&sbridge_driver);
2427 if (pci_rc >= 0) {
2428 mce_register_decode_chain(&sbridge_mce_dec);
2429 if (get_edac_report_status() == EDAC_REPORTING_DISABLED)
2430 sbridge_printk(KERN_WARNING, "Loading driver, error reporting disabled.\n");
2431 return 0;
2432 }
2433
2434 sbridge_printk(KERN_ERR, "Failed to register device with error %d.\n",
2435 pci_rc);
2436
2437 return pci_rc;
2438 }
2439
2440 /*
2441 * sbridge_exit() Module exit function
2442 * Unregister the driver
2443 */
2444 static void __exit sbridge_exit(void)
2445 {
2446 edac_dbg(2, "\n");
2447 pci_unregister_driver(&sbridge_driver);
2448 mce_unregister_decode_chain(&sbridge_mce_dec);
2449 }
2450
2451 module_init(sbridge_init);
2452 module_exit(sbridge_exit);
2453
2454 module_param(edac_op_state, int, 0444);
2455 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
2456
2457 MODULE_LICENSE("GPL");
2458 MODULE_AUTHOR("Mauro Carvalho Chehab");
2459 MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
2460 MODULE_DESCRIPTION("MC Driver for Intel Sandy Bridge and Ivy Bridge memory controllers - "
2461 SBRIDGE_REVISION);
This page took 0.08174 seconds and 6 git commands to generate.