Commit | Line | Data |
---|---|---|
2bc65418 | 1 | #include "amd64_edac.h" |
23ac4ae8 | 2 | #include <asm/amd_nb.h> |
2bc65418 DT |
3 | |
4 | static struct edac_pci_ctl_info *amd64_ctl_pci; | |
5 | ||
6 | static int report_gart_errors; | |
7 | module_param(report_gart_errors, int, 0644); | |
8 | ||
9 | /* | |
10 | * Set by command line parameter. If BIOS has enabled the ECC, this override is | |
11 | * cleared to prevent re-enabling the hardware by this driver. | |
12 | */ | |
13 | static int ecc_enable_override; | |
14 | module_param(ecc_enable_override, int, 0644); | |
15 | ||
a29d8b8e | 16 | static struct msr __percpu *msrs; |
50542251 | 17 | |
2bc65418 DT |
18 | /* Lookup table for all possible MC control instances */ |
19 | struct amd64_pvt; | |
3011b20d BP |
20 | static struct mem_ctl_info *mci_lookup[EDAC_MAX_NUMNODES]; |
21 | static struct amd64_pvt *pvt_lookup[EDAC_MAX_NUMNODES]; | |
2bc65418 | 22 | |
b70ef010 | 23 | /* |
1433eb99 BP |
24 | * Address to DRAM bank mapping: see F2x80 for K8 and F2x[1,0]80 for Fam10 and |
25 | * later. | |
b70ef010 | 26 | */ |
1433eb99 BP |
27 | static int ddr2_dbam_revCG[] = { |
28 | [0] = 32, | |
29 | [1] = 64, | |
30 | [2] = 128, | |
31 | [3] = 256, | |
32 | [4] = 512, | |
33 | [5] = 1024, | |
34 | [6] = 2048, | |
35 | }; | |
36 | ||
37 | static int ddr2_dbam_revD[] = { | |
38 | [0] = 32, | |
39 | [1] = 64, | |
40 | [2 ... 3] = 128, | |
41 | [4] = 256, | |
42 | [5] = 512, | |
43 | [6] = 256, | |
44 | [7] = 512, | |
45 | [8 ... 9] = 1024, | |
46 | [10] = 2048, | |
47 | }; | |
48 | ||
49 | static int ddr2_dbam[] = { [0] = 128, | |
50 | [1] = 256, | |
51 | [2 ... 4] = 512, | |
52 | [5 ... 6] = 1024, | |
53 | [7 ... 8] = 2048, | |
54 | [9 ... 10] = 4096, | |
55 | [11] = 8192, | |
56 | }; | |
57 | ||
58 | static int ddr3_dbam[] = { [0] = -1, | |
59 | [1] = 256, | |
60 | [2] = 512, | |
61 | [3 ... 4] = -1, | |
62 | [5 ... 6] = 1024, | |
63 | [7 ... 8] = 2048, | |
64 | [9 ... 10] = 4096, | |
24f9a7fe | 65 | [11] = 8192, |
b70ef010 BP |
66 | }; |
67 | ||
68 | /* | |
69 | * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing | |
70 | * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching- | |
71 | * or higher value'. | |
72 | * | |
73 | *FIXME: Produce a better mapping/linearisation. | |
74 | */ | |
75 | ||
76 | struct scrubrate scrubrates[] = { | |
77 | { 0x01, 1600000000UL}, | |
78 | { 0x02, 800000000UL}, | |
79 | { 0x03, 400000000UL}, | |
80 | { 0x04, 200000000UL}, | |
81 | { 0x05, 100000000UL}, | |
82 | { 0x06, 50000000UL}, | |
83 | { 0x07, 25000000UL}, | |
84 | { 0x08, 12284069UL}, | |
85 | { 0x09, 6274509UL}, | |
86 | { 0x0A, 3121951UL}, | |
87 | { 0x0B, 1560975UL}, | |
88 | { 0x0C, 781440UL}, | |
89 | { 0x0D, 390720UL}, | |
90 | { 0x0E, 195300UL}, | |
91 | { 0x0F, 97650UL}, | |
92 | { 0x10, 48854UL}, | |
93 | { 0x11, 24427UL}, | |
94 | { 0x12, 12213UL}, | |
95 | { 0x13, 6101UL}, | |
96 | { 0x14, 3051UL}, | |
97 | { 0x15, 1523UL}, | |
98 | { 0x16, 761UL}, | |
99 | { 0x00, 0UL}, /* scrubbing off */ | |
100 | }; | |
101 | ||
2bc65418 DT |
102 | /* |
103 | * Memory scrubber control interface. For K8, memory scrubbing is handled by | |
104 | * hardware and can involve L2 cache, dcache as well as the main memory. With | |
105 | * F10, this is extended to L3 cache scrubbing on CPU models sporting that | |
106 | * functionality. | |
107 | * | |
108 | * This causes the "units" for the scrubbing speed to vary from 64 byte blocks | |
109 | * (dram) over to cache lines. This is nasty, so we will use bandwidth in | |
110 | * bytes/sec for the setting. | |
111 | * | |
112 | * Currently, we only do dram scrubbing. If the scrubbing is done in software on | |
113 | * other archs, we might not have access to the caches directly. | |
114 | */ | |
115 | ||
116 | /* | |
117 | * scan the scrub rate mapping table for a close or matching bandwidth value to | |
118 | * issue. If requested is too big, then use last maximum value found. | |
119 | */ | |
395ae783 | 120 | static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate) |
2bc65418 DT |
121 | { |
122 | u32 scrubval; | |
123 | int i; | |
124 | ||
125 | /* | |
126 | * map the configured rate (new_bw) to a value specific to the AMD64 | |
127 | * memory controller and apply to register. Search for the first | |
128 | * bandwidth entry that is greater or equal than the setting requested | |
129 | * and program that. If at last entry, turn off DRAM scrubbing. | |
130 | */ | |
131 | for (i = 0; i < ARRAY_SIZE(scrubrates); i++) { | |
132 | /* | |
133 | * skip scrub rates which aren't recommended | |
134 | * (see F10 BKDG, F3x58) | |
135 | */ | |
395ae783 | 136 | if (scrubrates[i].scrubval < min_rate) |
2bc65418 DT |
137 | continue; |
138 | ||
139 | if (scrubrates[i].bandwidth <= new_bw) | |
140 | break; | |
141 | ||
142 | /* | |
143 | * if no suitable bandwidth found, turn off DRAM scrubbing | |
144 | * entirely by falling back to the last element in the | |
145 | * scrubrates array. | |
146 | */ | |
147 | } | |
148 | ||
149 | scrubval = scrubrates[i].scrubval; | |
150 | if (scrubval) | |
24f9a7fe BP |
151 | amd64_info("Setting scrub rate bandwidth: %u\n", |
152 | scrubrates[i].bandwidth); | |
2bc65418 | 153 | else |
24f9a7fe | 154 | amd64_info("Turning scrubbing off.\n"); |
2bc65418 DT |
155 | |
156 | pci_write_bits32(ctl, K8_SCRCTRL, scrubval, 0x001F); | |
157 | ||
158 | return 0; | |
159 | } | |
160 | ||
395ae783 | 161 | static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bw) |
2bc65418 DT |
162 | { |
163 | struct amd64_pvt *pvt = mci->pvt_info; | |
2bc65418 | 164 | |
8d5b5d9c | 165 | return __amd64_set_scrub_rate(pvt->F3, bw, pvt->min_scrubrate); |
2bc65418 DT |
166 | } |
167 | ||
168 | static int amd64_get_scrub_rate(struct mem_ctl_info *mci, u32 *bw) | |
169 | { | |
170 | struct amd64_pvt *pvt = mci->pvt_info; | |
171 | u32 scrubval = 0; | |
6ba5dcdc | 172 | int status = -1, i; |
2bc65418 | 173 | |
8d5b5d9c | 174 | amd64_read_pci_cfg(pvt->F3, K8_SCRCTRL, &scrubval); |
2bc65418 DT |
175 | |
176 | scrubval = scrubval & 0x001F; | |
177 | ||
24f9a7fe | 178 | amd64_debug("pci-read, sdram scrub control value: %d\n", scrubval); |
2bc65418 | 179 | |
926311fd | 180 | for (i = 0; i < ARRAY_SIZE(scrubrates); i++) { |
2bc65418 DT |
181 | if (scrubrates[i].scrubval == scrubval) { |
182 | *bw = scrubrates[i].bandwidth; | |
183 | status = 0; | |
184 | break; | |
185 | } | |
186 | } | |
187 | ||
188 | return status; | |
189 | } | |
190 | ||
6775763a DT |
191 | /* Map from a CSROW entry to the mask entry that operates on it */ |
192 | static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow) | |
193 | { | |
1433eb99 | 194 | if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) |
9d858bb1 BP |
195 | return csrow; |
196 | else | |
197 | return csrow >> 1; | |
6775763a DT |
198 | } |
199 | ||
200 | /* return the 'base' address the i'th CS entry of the 'dct' DRAM controller */ | |
201 | static u32 amd64_get_dct_base(struct amd64_pvt *pvt, int dct, int csrow) | |
202 | { | |
203 | if (dct == 0) | |
204 | return pvt->dcsb0[csrow]; | |
205 | else | |
206 | return pvt->dcsb1[csrow]; | |
207 | } | |
208 | ||
209 | /* | |
210 | * Return the 'mask' address the i'th CS entry. This function is needed because | |
211 | * there number of DCSM registers on Rev E and prior vs Rev F and later is | |
212 | * different. | |
213 | */ | |
214 | static u32 amd64_get_dct_mask(struct amd64_pvt *pvt, int dct, int csrow) | |
215 | { | |
216 | if (dct == 0) | |
217 | return pvt->dcsm0[amd64_map_to_dcs_mask(pvt, csrow)]; | |
218 | else | |
219 | return pvt->dcsm1[amd64_map_to_dcs_mask(pvt, csrow)]; | |
220 | } | |
221 | ||
222 | ||
223 | /* | |
224 | * In *base and *limit, pass back the full 40-bit base and limit physical | |
225 | * addresses for the node given by node_id. This information is obtained from | |
226 | * DRAM Base (section 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers. The | |
227 | * base and limit addresses are of type SysAddr, as defined at the start of | |
228 | * section 3.4.4 (p. 70). They are the lowest and highest physical addresses | |
229 | * in the address range they represent. | |
230 | */ | |
231 | static void amd64_get_base_and_limit(struct amd64_pvt *pvt, int node_id, | |
232 | u64 *base, u64 *limit) | |
233 | { | |
234 | *base = pvt->dram_base[node_id]; | |
235 | *limit = pvt->dram_limit[node_id]; | |
236 | } | |
237 | ||
238 | /* | |
239 | * Return 1 if the SysAddr given by sys_addr matches the base/limit associated | |
240 | * with node_id | |
241 | */ | |
242 | static int amd64_base_limit_match(struct amd64_pvt *pvt, | |
243 | u64 sys_addr, int node_id) | |
244 | { | |
245 | u64 base, limit, addr; | |
246 | ||
247 | amd64_get_base_and_limit(pvt, node_id, &base, &limit); | |
248 | ||
249 | /* The K8 treats this as a 40-bit value. However, bits 63-40 will be | |
250 | * all ones if the most significant implemented address bit is 1. | |
251 | * Here we discard bits 63-40. See section 3.4.2 of AMD publication | |
252 | * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1 | |
253 | * Application Programming. | |
254 | */ | |
255 | addr = sys_addr & 0x000000ffffffffffull; | |
256 | ||
257 | return (addr >= base) && (addr <= limit); | |
258 | } | |
259 | ||
260 | /* | |
261 | * Attempt to map a SysAddr to a node. On success, return a pointer to the | |
262 | * mem_ctl_info structure for the node that the SysAddr maps to. | |
263 | * | |
264 | * On failure, return NULL. | |
265 | */ | |
266 | static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci, | |
267 | u64 sys_addr) | |
268 | { | |
269 | struct amd64_pvt *pvt; | |
270 | int node_id; | |
271 | u32 intlv_en, bits; | |
272 | ||
273 | /* | |
274 | * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section | |
275 | * 3.4.4.2) registers to map the SysAddr to a node ID. | |
276 | */ | |
277 | pvt = mci->pvt_info; | |
278 | ||
279 | /* | |
280 | * The value of this field should be the same for all DRAM Base | |
281 | * registers. Therefore we arbitrarily choose to read it from the | |
282 | * register for node 0. | |
283 | */ | |
284 | intlv_en = pvt->dram_IntlvEn[0]; | |
285 | ||
286 | if (intlv_en == 0) { | |
8edc5445 | 287 | for (node_id = 0; node_id < DRAM_REG_COUNT; node_id++) { |
6775763a | 288 | if (amd64_base_limit_match(pvt, sys_addr, node_id)) |
8edc5445 | 289 | goto found; |
6775763a | 290 | } |
8edc5445 | 291 | goto err_no_match; |
6775763a DT |
292 | } |
293 | ||
72f158fe BP |
294 | if (unlikely((intlv_en != 0x01) && |
295 | (intlv_en != 0x03) && | |
296 | (intlv_en != 0x07))) { | |
24f9a7fe | 297 | amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en); |
6775763a DT |
298 | return NULL; |
299 | } | |
300 | ||
301 | bits = (((u32) sys_addr) >> 12) & intlv_en; | |
302 | ||
303 | for (node_id = 0; ; ) { | |
8edc5445 | 304 | if ((pvt->dram_IntlvSel[node_id] & intlv_en) == bits) |
6775763a DT |
305 | break; /* intlv_sel field matches */ |
306 | ||
307 | if (++node_id >= DRAM_REG_COUNT) | |
308 | goto err_no_match; | |
309 | } | |
310 | ||
311 | /* sanity test for sys_addr */ | |
312 | if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) { | |
24f9a7fe BP |
313 | amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address" |
314 | "range for node %d with node interleaving enabled.\n", | |
315 | __func__, sys_addr, node_id); | |
6775763a DT |
316 | return NULL; |
317 | } | |
318 | ||
319 | found: | |
320 | return edac_mc_find(node_id); | |
321 | ||
322 | err_no_match: | |
323 | debugf2("sys_addr 0x%lx doesn't match any node\n", | |
324 | (unsigned long)sys_addr); | |
325 | ||
326 | return NULL; | |
327 | } | |
e2ce7255 DT |
328 | |
329 | /* | |
330 | * Extract the DRAM CS base address from selected csrow register. | |
331 | */ | |
332 | static u64 base_from_dct_base(struct amd64_pvt *pvt, int csrow) | |
333 | { | |
334 | return ((u64) (amd64_get_dct_base(pvt, 0, csrow) & pvt->dcsb_base)) << | |
335 | pvt->dcs_shift; | |
336 | } | |
337 | ||
338 | /* | |
339 | * Extract the mask from the dcsb0[csrow] entry in a CPU revision-specific way. | |
340 | */ | |
341 | static u64 mask_from_dct_mask(struct amd64_pvt *pvt, int csrow) | |
342 | { | |
343 | u64 dcsm_bits, other_bits; | |
344 | u64 mask; | |
345 | ||
346 | /* Extract bits from DRAM CS Mask. */ | |
347 | dcsm_bits = amd64_get_dct_mask(pvt, 0, csrow) & pvt->dcsm_mask; | |
348 | ||
349 | other_bits = pvt->dcsm_mask; | |
350 | other_bits = ~(other_bits << pvt->dcs_shift); | |
351 | ||
352 | /* | |
353 | * The extracted bits from DCSM belong in the spaces represented by | |
354 | * the cleared bits in other_bits. | |
355 | */ | |
356 | mask = (dcsm_bits << pvt->dcs_shift) | other_bits; | |
357 | ||
358 | return mask; | |
359 | } | |
360 | ||
361 | /* | |
362 | * @input_addr is an InputAddr associated with the node given by mci. Return the | |
363 | * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr). | |
364 | */ | |
365 | static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr) | |
366 | { | |
367 | struct amd64_pvt *pvt; | |
368 | int csrow; | |
369 | u64 base, mask; | |
370 | ||
371 | pvt = mci->pvt_info; | |
372 | ||
373 | /* | |
374 | * Here we use the DRAM CS Base and DRAM CS Mask registers. For each CS | |
375 | * base/mask register pair, test the condition shown near the start of | |
376 | * section 3.5.4 (p. 84, BKDG #26094, K8, revA-E). | |
377 | */ | |
9d858bb1 | 378 | for (csrow = 0; csrow < pvt->cs_count; csrow++) { |
e2ce7255 DT |
379 | |
380 | /* This DRAM chip select is disabled on this node */ | |
381 | if ((pvt->dcsb0[csrow] & K8_DCSB_CS_ENABLE) == 0) | |
382 | continue; | |
383 | ||
384 | base = base_from_dct_base(pvt, csrow); | |
385 | mask = ~mask_from_dct_mask(pvt, csrow); | |
386 | ||
387 | if ((input_addr & mask) == (base & mask)) { | |
388 | debugf2("InputAddr 0x%lx matches csrow %d (node %d)\n", | |
389 | (unsigned long)input_addr, csrow, | |
390 | pvt->mc_node_id); | |
391 | ||
392 | return csrow; | |
393 | } | |
394 | } | |
395 | ||
396 | debugf2("no matching csrow for InputAddr 0x%lx (MC node %d)\n", | |
397 | (unsigned long)input_addr, pvt->mc_node_id); | |
398 | ||
399 | return -1; | |
400 | } | |
401 | ||
402 | /* | |
403 | * Return the base value defined by the DRAM Base register for the node | |
404 | * represented by mci. This function returns the full 40-bit value despite the | |
405 | * fact that the register only stores bits 39-24 of the value. See section | |
406 | * 3.4.4.1 (BKDG #26094, K8, revA-E) | |
407 | */ | |
408 | static inline u64 get_dram_base(struct mem_ctl_info *mci) | |
409 | { | |
410 | struct amd64_pvt *pvt = mci->pvt_info; | |
411 | ||
412 | return pvt->dram_base[pvt->mc_node_id]; | |
413 | } | |
414 | ||
415 | /* | |
416 | * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094) | |
417 | * for the node represented by mci. Info is passed back in *hole_base, | |
418 | * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if | |
419 | * info is invalid. Info may be invalid for either of the following reasons: | |
420 | * | |
421 | * - The revision of the node is not E or greater. In this case, the DRAM Hole | |
422 | * Address Register does not exist. | |
423 | * | |
424 | * - The DramHoleValid bit is cleared in the DRAM Hole Address Register, | |
425 | * indicating that its contents are not valid. | |
426 | * | |
427 | * The values passed back in *hole_base, *hole_offset, and *hole_size are | |
428 | * complete 32-bit values despite the fact that the bitfields in the DHAR | |
429 | * only represent bits 31-24 of the base and offset values. | |
430 | */ | |
431 | int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base, | |
432 | u64 *hole_offset, u64 *hole_size) | |
433 | { | |
434 | struct amd64_pvt *pvt = mci->pvt_info; | |
435 | u64 base; | |
436 | ||
437 | /* only revE and later have the DRAM Hole Address Register */ | |
1433eb99 | 438 | if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_E) { |
e2ce7255 DT |
439 | debugf1(" revision %d for node %d does not support DHAR\n", |
440 | pvt->ext_model, pvt->mc_node_id); | |
441 | return 1; | |
442 | } | |
443 | ||
444 | /* only valid for Fam10h */ | |
445 | if (boot_cpu_data.x86 == 0x10 && | |
446 | (pvt->dhar & F10_DRAM_MEM_HOIST_VALID) == 0) { | |
447 | debugf1(" Dram Memory Hoisting is DISABLED on this system\n"); | |
448 | return 1; | |
449 | } | |
450 | ||
451 | if ((pvt->dhar & DHAR_VALID) == 0) { | |
452 | debugf1(" Dram Memory Hoisting is DISABLED on this node %d\n", | |
453 | pvt->mc_node_id); | |
454 | return 1; | |
455 | } | |
456 | ||
457 | /* This node has Memory Hoisting */ | |
458 | ||
459 | /* +------------------+--------------------+--------------------+----- | |
460 | * | memory | DRAM hole | relocated | | |
461 | * | [0, (x - 1)] | [x, 0xffffffff] | addresses from | | |
462 | * | | | DRAM hole | | |
463 | * | | | [0x100000000, | | |
464 | * | | | (0x100000000+ | | |
465 | * | | | (0xffffffff-x))] | | |
466 | * +------------------+--------------------+--------------------+----- | |
467 | * | |
468 | * Above is a diagram of physical memory showing the DRAM hole and the | |
469 | * relocated addresses from the DRAM hole. As shown, the DRAM hole | |
470 | * starts at address x (the base address) and extends through address | |
471 | * 0xffffffff. The DRAM Hole Address Register (DHAR) relocates the | |
472 | * addresses in the hole so that they start at 0x100000000. | |
473 | */ | |
474 | ||
475 | base = dhar_base(pvt->dhar); | |
476 | ||
477 | *hole_base = base; | |
478 | *hole_size = (0x1ull << 32) - base; | |
479 | ||
480 | if (boot_cpu_data.x86 > 0xf) | |
481 | *hole_offset = f10_dhar_offset(pvt->dhar); | |
482 | else | |
483 | *hole_offset = k8_dhar_offset(pvt->dhar); | |
484 | ||
485 | debugf1(" DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n", | |
486 | pvt->mc_node_id, (unsigned long)*hole_base, | |
487 | (unsigned long)*hole_offset, (unsigned long)*hole_size); | |
488 | ||
489 | return 0; | |
490 | } | |
491 | EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info); | |
492 | ||
93c2df58 DT |
493 | /* |
494 | * Return the DramAddr that the SysAddr given by @sys_addr maps to. It is | |
495 | * assumed that sys_addr maps to the node given by mci. | |
496 | * | |
497 | * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section | |
498 | * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a | |
499 | * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled, | |
500 | * then it is also involved in translating a SysAddr to a DramAddr. Sections | |
501 | * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting. | |
502 | * These parts of the documentation are unclear. I interpret them as follows: | |
503 | * | |
504 | * When node n receives a SysAddr, it processes the SysAddr as follows: | |
505 | * | |
506 | * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM | |
507 | * Limit registers for node n. If the SysAddr is not within the range | |
508 | * specified by the base and limit values, then node n ignores the Sysaddr | |
509 | * (since it does not map to node n). Otherwise continue to step 2 below. | |
510 | * | |
511 | * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is | |
512 | * disabled so skip to step 3 below. Otherwise see if the SysAddr is within | |
513 | * the range of relocated addresses (starting at 0x100000000) from the DRAM | |
514 | * hole. If not, skip to step 3 below. Else get the value of the | |
515 | * DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the | |
516 | * offset defined by this value from the SysAddr. | |
517 | * | |
518 | * 3. Obtain the base address for node n from the DRAMBase field of the DRAM | |
519 | * Base register for node n. To obtain the DramAddr, subtract the base | |
520 | * address from the SysAddr, as shown near the start of section 3.4.4 (p.70). | |
521 | */ | |
522 | static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr) | |
523 | { | |
524 | u64 dram_base, hole_base, hole_offset, hole_size, dram_addr; | |
525 | int ret = 0; | |
526 | ||
527 | dram_base = get_dram_base(mci); | |
528 | ||
529 | ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset, | |
530 | &hole_size); | |
531 | if (!ret) { | |
532 | if ((sys_addr >= (1ull << 32)) && | |
533 | (sys_addr < ((1ull << 32) + hole_size))) { | |
534 | /* use DHAR to translate SysAddr to DramAddr */ | |
535 | dram_addr = sys_addr - hole_offset; | |
536 | ||
537 | debugf2("using DHAR to translate SysAddr 0x%lx to " | |
538 | "DramAddr 0x%lx\n", | |
539 | (unsigned long)sys_addr, | |
540 | (unsigned long)dram_addr); | |
541 | ||
542 | return dram_addr; | |
543 | } | |
544 | } | |
545 | ||
546 | /* | |
547 | * Translate the SysAddr to a DramAddr as shown near the start of | |
548 | * section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8 | |
549 | * only deals with 40-bit values. Therefore we discard bits 63-40 of | |
550 | * sys_addr below. If bit 39 of sys_addr is 1 then the bits we | |
551 | * discard are all 1s. Otherwise the bits we discard are all 0s. See | |
552 | * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture | |
553 | * Programmer's Manual Volume 1 Application Programming. | |
554 | */ | |
555 | dram_addr = (sys_addr & 0xffffffffffull) - dram_base; | |
556 | ||
557 | debugf2("using DRAM Base register to translate SysAddr 0x%lx to " | |
558 | "DramAddr 0x%lx\n", (unsigned long)sys_addr, | |
559 | (unsigned long)dram_addr); | |
560 | return dram_addr; | |
561 | } | |
562 | ||
563 | /* | |
564 | * @intlv_en is the value of the IntlvEn field from a DRAM Base register | |
565 | * (section 3.4.4.1). Return the number of bits from a SysAddr that are used | |
566 | * for node interleaving. | |
567 | */ | |
568 | static int num_node_interleave_bits(unsigned intlv_en) | |
569 | { | |
570 | static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 }; | |
571 | int n; | |
572 | ||
573 | BUG_ON(intlv_en > 7); | |
574 | n = intlv_shift_table[intlv_en]; | |
575 | return n; | |
576 | } | |
577 | ||
578 | /* Translate the DramAddr given by @dram_addr to an InputAddr. */ | |
579 | static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr) | |
580 | { | |
581 | struct amd64_pvt *pvt; | |
582 | int intlv_shift; | |
583 | u64 input_addr; | |
584 | ||
585 | pvt = mci->pvt_info; | |
586 | ||
587 | /* | |
588 | * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E) | |
589 | * concerning translating a DramAddr to an InputAddr. | |
590 | */ | |
591 | intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]); | |
592 | input_addr = ((dram_addr >> intlv_shift) & 0xffffff000ull) + | |
593 | (dram_addr & 0xfff); | |
594 | ||
595 | debugf2(" Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n", | |
596 | intlv_shift, (unsigned long)dram_addr, | |
597 | (unsigned long)input_addr); | |
598 | ||
599 | return input_addr; | |
600 | } | |
601 | ||
602 | /* | |
603 | * Translate the SysAddr represented by @sys_addr to an InputAddr. It is | |
604 | * assumed that @sys_addr maps to the node given by mci. | |
605 | */ | |
606 | static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr) | |
607 | { | |
608 | u64 input_addr; | |
609 | ||
610 | input_addr = | |
611 | dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr)); | |
612 | ||
613 | debugf2("SysAdddr 0x%lx translates to InputAddr 0x%lx\n", | |
614 | (unsigned long)sys_addr, (unsigned long)input_addr); | |
615 | ||
616 | return input_addr; | |
617 | } | |
618 | ||
619 | ||
620 | /* | |
621 | * @input_addr is an InputAddr associated with the node represented by mci. | |
622 | * Translate @input_addr to a DramAddr and return the result. | |
623 | */ | |
624 | static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr) | |
625 | { | |
626 | struct amd64_pvt *pvt; | |
627 | int node_id, intlv_shift; | |
628 | u64 bits, dram_addr; | |
629 | u32 intlv_sel; | |
630 | ||
631 | /* | |
632 | * Near the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E) | |
633 | * shows how to translate a DramAddr to an InputAddr. Here we reverse | |
634 | * this procedure. When translating from a DramAddr to an InputAddr, the | |
635 | * bits used for node interleaving are discarded. Here we recover these | |
636 | * bits from the IntlvSel field of the DRAM Limit register (section | |
637 | * 3.4.4.2) for the node that input_addr is associated with. | |
638 | */ | |
639 | pvt = mci->pvt_info; | |
640 | node_id = pvt->mc_node_id; | |
641 | BUG_ON((node_id < 0) || (node_id > 7)); | |
642 | ||
643 | intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]); | |
644 | ||
645 | if (intlv_shift == 0) { | |
646 | debugf1(" InputAddr 0x%lx translates to DramAddr of " | |
647 | "same value\n", (unsigned long)input_addr); | |
648 | ||
649 | return input_addr; | |
650 | } | |
651 | ||
652 | bits = ((input_addr & 0xffffff000ull) << intlv_shift) + | |
653 | (input_addr & 0xfff); | |
654 | ||
655 | intlv_sel = pvt->dram_IntlvSel[node_id] & ((1 << intlv_shift) - 1); | |
656 | dram_addr = bits + (intlv_sel << 12); | |
657 | ||
658 | debugf1("InputAddr 0x%lx translates to DramAddr 0x%lx " | |
659 | "(%d node interleave bits)\n", (unsigned long)input_addr, | |
660 | (unsigned long)dram_addr, intlv_shift); | |
661 | ||
662 | return dram_addr; | |
663 | } | |
664 | ||
665 | /* | |
666 | * @dram_addr is a DramAddr that maps to the node represented by mci. Convert | |
667 | * @dram_addr to a SysAddr. | |
668 | */ | |
669 | static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr) | |
670 | { | |
671 | struct amd64_pvt *pvt = mci->pvt_info; | |
672 | u64 hole_base, hole_offset, hole_size, base, limit, sys_addr; | |
673 | int ret = 0; | |
674 | ||
675 | ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset, | |
676 | &hole_size); | |
677 | if (!ret) { | |
678 | if ((dram_addr >= hole_base) && | |
679 | (dram_addr < (hole_base + hole_size))) { | |
680 | sys_addr = dram_addr + hole_offset; | |
681 | ||
682 | debugf1("using DHAR to translate DramAddr 0x%lx to " | |
683 | "SysAddr 0x%lx\n", (unsigned long)dram_addr, | |
684 | (unsigned long)sys_addr); | |
685 | ||
686 | return sys_addr; | |
687 | } | |
688 | } | |
689 | ||
690 | amd64_get_base_and_limit(pvt, pvt->mc_node_id, &base, &limit); | |
691 | sys_addr = dram_addr + base; | |
692 | ||
693 | /* | |
694 | * The sys_addr we have computed up to this point is a 40-bit value | |
695 | * because the k8 deals with 40-bit values. However, the value we are | |
696 | * supposed to return is a full 64-bit physical address. The AMD | |
697 | * x86-64 architecture specifies that the most significant implemented | |
698 | * address bit through bit 63 of a physical address must be either all | |
699 | * 0s or all 1s. Therefore we sign-extend the 40-bit sys_addr to a | |
700 | * 64-bit value below. See section 3.4.2 of AMD publication 24592: | |
701 | * AMD x86-64 Architecture Programmer's Manual Volume 1 Application | |
702 | * Programming. | |
703 | */ | |
704 | sys_addr |= ~((sys_addr & (1ull << 39)) - 1); | |
705 | ||
706 | debugf1(" Node %d, DramAddr 0x%lx to SysAddr 0x%lx\n", | |
707 | pvt->mc_node_id, (unsigned long)dram_addr, | |
708 | (unsigned long)sys_addr); | |
709 | ||
710 | return sys_addr; | |
711 | } | |
712 | ||
713 | /* | |
714 | * @input_addr is an InputAddr associated with the node given by mci. Translate | |
715 | * @input_addr to a SysAddr. | |
716 | */ | |
717 | static inline u64 input_addr_to_sys_addr(struct mem_ctl_info *mci, | |
718 | u64 input_addr) | |
719 | { | |
720 | return dram_addr_to_sys_addr(mci, | |
721 | input_addr_to_dram_addr(mci, input_addr)); | |
722 | } | |
723 | ||
724 | /* | |
725 | * Find the minimum and maximum InputAddr values that map to the given @csrow. | |
726 | * Pass back these values in *input_addr_min and *input_addr_max. | |
727 | */ | |
728 | static void find_csrow_limits(struct mem_ctl_info *mci, int csrow, | |
729 | u64 *input_addr_min, u64 *input_addr_max) | |
730 | { | |
731 | struct amd64_pvt *pvt; | |
732 | u64 base, mask; | |
733 | ||
734 | pvt = mci->pvt_info; | |
9d858bb1 | 735 | BUG_ON((csrow < 0) || (csrow >= pvt->cs_count)); |
93c2df58 DT |
736 | |
737 | base = base_from_dct_base(pvt, csrow); | |
738 | mask = mask_from_dct_mask(pvt, csrow); | |
739 | ||
740 | *input_addr_min = base & ~mask; | |
741 | *input_addr_max = base | mask | pvt->dcs_mask_notused; | |
742 | } | |
743 | ||
93c2df58 DT |
744 | /* Map the Error address to a PAGE and PAGE OFFSET. */ |
745 | static inline void error_address_to_page_and_offset(u64 error_address, | |
746 | u32 *page, u32 *offset) | |
747 | { | |
748 | *page = (u32) (error_address >> PAGE_SHIFT); | |
749 | *offset = ((u32) error_address) & ~PAGE_MASK; | |
750 | } | |
751 | ||
752 | /* | |
753 | * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address | |
754 | * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers | |
755 | * of a node that detected an ECC memory error. mci represents the node that | |
756 | * the error address maps to (possibly different from the node that detected | |
757 | * the error). Return the number of the csrow that sys_addr maps to, or -1 on | |
758 | * error. | |
759 | */ | |
760 | static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr) | |
761 | { | |
762 | int csrow; | |
763 | ||
764 | csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr)); | |
765 | ||
766 | if (csrow == -1) | |
24f9a7fe BP |
767 | amd64_mc_err(mci, "Failed to translate InputAddr to csrow for " |
768 | "address 0x%lx\n", (unsigned long)sys_addr); | |
93c2df58 DT |
769 | return csrow; |
770 | } | |
e2ce7255 | 771 | |
bfc04aec | 772 | static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16); |
2da11654 | 773 | |
ad6a32e9 BP |
774 | static u16 extract_syndrome(struct err_regs *err) |
775 | { | |
776 | return ((err->nbsh >> 15) & 0xff) | ((err->nbsl >> 16) & 0xff00); | |
777 | } | |
778 | ||
2da11654 DT |
779 | /* |
780 | * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs | |
781 | * are ECC capable. | |
782 | */ | |
783 | static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt) | |
784 | { | |
785 | int bit; | |
584fcff4 | 786 | enum dev_type edac_cap = EDAC_FLAG_NONE; |
2da11654 | 787 | |
1433eb99 | 788 | bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= K8_REV_F) |
2da11654 DT |
789 | ? 19 |
790 | : 17; | |
791 | ||
584fcff4 | 792 | if (pvt->dclr0 & BIT(bit)) |
2da11654 DT |
793 | edac_cap = EDAC_FLAG_SECDED; |
794 | ||
795 | return edac_cap; | |
796 | } | |
797 | ||
798 | ||
8566c4df | 799 | static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt); |
2da11654 | 800 | |
68798e17 BP |
801 | static void amd64_dump_dramcfg_low(u32 dclr, int chan) |
802 | { | |
803 | debugf1("F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr); | |
804 | ||
805 | debugf1(" DIMM type: %sbuffered; all DIMMs support ECC: %s\n", | |
806 | (dclr & BIT(16)) ? "un" : "", | |
807 | (dclr & BIT(19)) ? "yes" : "no"); | |
808 | ||
809 | debugf1(" PAR/ERR parity: %s\n", | |
810 | (dclr & BIT(8)) ? "enabled" : "disabled"); | |
811 | ||
812 | debugf1(" DCT 128bit mode width: %s\n", | |
813 | (dclr & BIT(11)) ? "128b" : "64b"); | |
814 | ||
815 | debugf1(" x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n", | |
816 | (dclr & BIT(12)) ? "yes" : "no", | |
817 | (dclr & BIT(13)) ? "yes" : "no", | |
818 | (dclr & BIT(14)) ? "yes" : "no", | |
819 | (dclr & BIT(15)) ? "yes" : "no"); | |
820 | } | |
821 | ||
2da11654 DT |
822 | /* Display and decode various NB registers for debug purposes. */ |
823 | static void amd64_dump_misc_regs(struct amd64_pvt *pvt) | |
824 | { | |
825 | int ganged; | |
826 | ||
68798e17 BP |
827 | debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap); |
828 | ||
829 | debugf1(" NB two channel DRAM capable: %s\n", | |
830 | (pvt->nbcap & K8_NBCAP_DCT_DUAL) ? "yes" : "no"); | |
2da11654 | 831 | |
68798e17 BP |
832 | debugf1(" ECC capable: %s, ChipKill ECC capable: %s\n", |
833 | (pvt->nbcap & K8_NBCAP_SECDED) ? "yes" : "no", | |
834 | (pvt->nbcap & K8_NBCAP_CHIPKILL) ? "yes" : "no"); | |
835 | ||
836 | amd64_dump_dramcfg_low(pvt->dclr0, 0); | |
2da11654 | 837 | |
8de1d91e | 838 | debugf1("F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare); |
2da11654 | 839 | |
8de1d91e BP |
840 | debugf1("F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, " |
841 | "offset: 0x%08x\n", | |
842 | pvt->dhar, | |
843 | dhar_base(pvt->dhar), | |
844 | (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt->dhar) | |
845 | : f10_dhar_offset(pvt->dhar)); | |
2da11654 | 846 | |
8de1d91e BP |
847 | debugf1(" DramHoleValid: %s\n", |
848 | (pvt->dhar & DHAR_VALID) ? "yes" : "no"); | |
2da11654 | 849 | |
8de1d91e | 850 | /* everything below this point is Fam10h and above */ |
8566c4df BP |
851 | if (boot_cpu_data.x86 == 0xf) { |
852 | amd64_debug_display_dimm_sizes(0, pvt); | |
2da11654 | 853 | return; |
8566c4df | 854 | } |
2da11654 | 855 | |
24f9a7fe | 856 | amd64_info("using %s syndromes.\n", ((pvt->syn_type == 8) ? "x8" : "x4")); |
ad6a32e9 | 857 | |
8de1d91e | 858 | /* Only if NOT ganged does dclr1 have valid info */ |
68798e17 BP |
859 | if (!dct_ganging_enabled(pvt)) |
860 | amd64_dump_dramcfg_low(pvt->dclr1, 1); | |
2da11654 DT |
861 | |
862 | /* | |
863 | * Determine if ganged and then dump memory sizes for first controller, | |
864 | * and if NOT ganged dump info for 2nd controller. | |
865 | */ | |
866 | ganged = dct_ganging_enabled(pvt); | |
867 | ||
8566c4df | 868 | amd64_debug_display_dimm_sizes(0, pvt); |
2da11654 DT |
869 | |
870 | if (!ganged) | |
8566c4df | 871 | amd64_debug_display_dimm_sizes(1, pvt); |
2da11654 DT |
872 | } |
873 | ||
874 | /* Read in both of DBAM registers */ | |
875 | static void amd64_read_dbam_reg(struct amd64_pvt *pvt) | |
876 | { | |
8d5b5d9c | 877 | amd64_read_pci_cfg(pvt->F2, DBAM0, &pvt->dbam0); |
2da11654 | 878 | |
6ba5dcdc | 879 | if (boot_cpu_data.x86 >= 0x10) |
8d5b5d9c | 880 | amd64_read_pci_cfg(pvt->F2, DBAM1, &pvt->dbam1); |
2da11654 DT |
881 | } |
882 | ||
94be4bff DT |
883 | /* |
884 | * NOTE: CPU Revision Dependent code: Rev E and Rev F | |
885 | * | |
886 | * Set the DCSB and DCSM mask values depending on the CPU revision value. Also | |
887 | * set the shift factor for the DCSB and DCSM values. | |
888 | * | |
889 | * ->dcs_mask_notused, RevE: | |
890 | * | |
891 | * To find the max InputAddr for the csrow, start with the base address and set | |
892 | * all bits that are "don't care" bits in the test at the start of section | |
893 | * 3.5.4 (p. 84). | |
894 | * | |
895 | * The "don't care" bits are all set bits in the mask and all bits in the gaps | |
896 | * between bit ranges [35:25] and [19:13]. The value REV_E_DCS_NOTUSED_BITS | |
897 | * represents bits [24:20] and [12:0], which are all bits in the above-mentioned | |
898 | * gaps. | |
899 | * | |
900 | * ->dcs_mask_notused, RevF and later: | |
901 | * | |
902 | * To find the max InputAddr for the csrow, start with the base address and set | |
903 | * all bits that are "don't care" bits in the test at the start of NPT section | |
904 | * 4.5.4 (p. 87). | |
905 | * | |
906 | * The "don't care" bits are all set bits in the mask and all bits in the gaps | |
907 | * between bit ranges [36:27] and [21:13]. | |
908 | * | |
909 | * The value REV_F_F1Xh_DCS_NOTUSED_BITS represents bits [26:22] and [12:0], | |
910 | * which are all bits in the above-mentioned gaps. | |
911 | */ | |
912 | static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt) | |
913 | { | |
9d858bb1 | 914 | |
1433eb99 | 915 | if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) { |
9d858bb1 BP |
916 | pvt->dcsb_base = REV_E_DCSB_BASE_BITS; |
917 | pvt->dcsm_mask = REV_E_DCSM_MASK_BITS; | |
918 | pvt->dcs_mask_notused = REV_E_DCS_NOTUSED_BITS; | |
919 | pvt->dcs_shift = REV_E_DCS_SHIFT; | |
920 | pvt->cs_count = 8; | |
921 | pvt->num_dcsm = 8; | |
922 | } else { | |
94be4bff DT |
923 | pvt->dcsb_base = REV_F_F1Xh_DCSB_BASE_BITS; |
924 | pvt->dcsm_mask = REV_F_F1Xh_DCSM_MASK_BITS; | |
925 | pvt->dcs_mask_notused = REV_F_F1Xh_DCS_NOTUSED_BITS; | |
926 | pvt->dcs_shift = REV_F_F1Xh_DCS_SHIFT; | |
3ab0e7dc BP |
927 | pvt->cs_count = 8; |
928 | pvt->num_dcsm = 4; | |
94be4bff DT |
929 | } |
930 | } | |
931 | ||
932 | /* | |
933 | * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask hw registers | |
934 | */ | |
935 | static void amd64_read_dct_base_mask(struct amd64_pvt *pvt) | |
936 | { | |
6ba5dcdc | 937 | int cs, reg; |
94be4bff DT |
938 | |
939 | amd64_set_dct_base_and_mask(pvt); | |
940 | ||
9d858bb1 | 941 | for (cs = 0; cs < pvt->cs_count; cs++) { |
94be4bff | 942 | reg = K8_DCSB0 + (cs * 4); |
8d5b5d9c | 943 | if (!amd64_read_pci_cfg(pvt->F2, reg, &pvt->dcsb0[cs])) |
94be4bff DT |
944 | debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n", |
945 | cs, pvt->dcsb0[cs], reg); | |
946 | ||
947 | /* If DCT are NOT ganged, then read in DCT1's base */ | |
948 | if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) { | |
949 | reg = F10_DCSB1 + (cs * 4); | |
8d5b5d9c | 950 | if (!amd64_read_pci_cfg(pvt->F2, reg, |
6ba5dcdc | 951 | &pvt->dcsb1[cs])) |
94be4bff DT |
952 | debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n", |
953 | cs, pvt->dcsb1[cs], reg); | |
954 | } else { | |
955 | pvt->dcsb1[cs] = 0; | |
956 | } | |
957 | } | |
958 | ||
959 | for (cs = 0; cs < pvt->num_dcsm; cs++) { | |
4afcd2dc | 960 | reg = K8_DCSM0 + (cs * 4); |
8d5b5d9c | 961 | if (!amd64_read_pci_cfg(pvt->F2, reg, &pvt->dcsm0[cs])) |
94be4bff DT |
962 | debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n", |
963 | cs, pvt->dcsm0[cs], reg); | |
964 | ||
965 | /* If DCT are NOT ganged, then read in DCT1's mask */ | |
966 | if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) { | |
967 | reg = F10_DCSM1 + (cs * 4); | |
8d5b5d9c | 968 | if (!amd64_read_pci_cfg(pvt->F2, reg, |
6ba5dcdc | 969 | &pvt->dcsm1[cs])) |
94be4bff DT |
970 | debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n", |
971 | cs, pvt->dcsm1[cs], reg); | |
6ba5dcdc | 972 | } else { |
94be4bff | 973 | pvt->dcsm1[cs] = 0; |
6ba5dcdc | 974 | } |
94be4bff DT |
975 | } |
976 | } | |
977 | ||
24f9a7fe | 978 | static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt, int cs) |
94be4bff DT |
979 | { |
980 | enum mem_type type; | |
981 | ||
1433eb99 | 982 | if (boot_cpu_data.x86 >= 0x10 || pvt->ext_model >= K8_REV_F) { |
6b4c0bde BP |
983 | if (pvt->dchr0 & DDR3_MODE) |
984 | type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3; | |
985 | else | |
986 | type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2; | |
94be4bff | 987 | } else { |
94be4bff DT |
988 | type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR; |
989 | } | |
990 | ||
24f9a7fe | 991 | amd64_info("CS%d: %s\n", cs, edac_mem_types[type]); |
94be4bff DT |
992 | |
993 | return type; | |
994 | } | |
995 | ||
ddff876d DT |
996 | /* |
997 | * Read the DRAM Configuration Low register. It differs between CG, D & E revs | |
998 | * and the later RevF memory controllers (DDR vs DDR2) | |
999 | * | |
1000 | * Return: | |
1001 | * number of memory channels in operation | |
1002 | * Pass back: | |
1003 | * contents of the DCL0_LOW register | |
1004 | */ | |
1005 | static int k8_early_channel_count(struct amd64_pvt *pvt) | |
1006 | { | |
1007 | int flag, err = 0; | |
1008 | ||
8d5b5d9c | 1009 | err = amd64_read_pci_cfg(pvt->F2, F10_DCLR_0, &pvt->dclr0); |
ddff876d DT |
1010 | if (err) |
1011 | return err; | |
1012 | ||
9f56da0e | 1013 | if (pvt->ext_model >= K8_REV_F) |
ddff876d DT |
1014 | /* RevF (NPT) and later */ |
1015 | flag = pvt->dclr0 & F10_WIDTH_128; | |
9f56da0e | 1016 | else |
ddff876d DT |
1017 | /* RevE and earlier */ |
1018 | flag = pvt->dclr0 & REVE_WIDTH_128; | |
ddff876d DT |
1019 | |
1020 | /* not used */ | |
1021 | pvt->dclr1 = 0; | |
1022 | ||
1023 | return (flag) ? 2 : 1; | |
1024 | } | |
1025 | ||
1026 | /* extract the ERROR ADDRESS for the K8 CPUs */ | |
1027 | static u64 k8_get_error_address(struct mem_ctl_info *mci, | |
ef44cc4c | 1028 | struct err_regs *info) |
ddff876d DT |
1029 | { |
1030 | return (((u64) (info->nbeah & 0xff)) << 32) + | |
1031 | (info->nbeal & ~0x03); | |
1032 | } | |
1033 | ||
1034 | /* | |
1035 | * Read the Base and Limit registers for K8 based Memory controllers; extract | |
1036 | * fields from the 'raw' reg into separate data fields | |
1037 | * | |
1038 | * Isolates: BASE, LIMIT, IntlvEn, IntlvSel, RW_EN | |
1039 | */ | |
1040 | static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram) | |
1041 | { | |
1042 | u32 low; | |
1043 | u32 off = dram << 3; /* 8 bytes between DRAM entries */ | |
ddff876d | 1044 | |
8d5b5d9c | 1045 | amd64_read_pci_cfg(pvt->F1, K8_DRAM_BASE_LOW + off, &low); |
ddff876d DT |
1046 | |
1047 | /* Extract parts into separate data entries */ | |
4997811e | 1048 | pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 8; |
ddff876d DT |
1049 | pvt->dram_IntlvEn[dram] = (low >> 8) & 0x7; |
1050 | pvt->dram_rw_en[dram] = (low & 0x3); | |
1051 | ||
8d5b5d9c | 1052 | amd64_read_pci_cfg(pvt->F1, K8_DRAM_LIMIT_LOW + off, &low); |
ddff876d DT |
1053 | |
1054 | /* | |
1055 | * Extract parts into separate data entries. Limit is the HIGHEST memory | |
1056 | * location of the region, so lower 24 bits need to be all ones | |
1057 | */ | |
4997811e | 1058 | pvt->dram_limit[dram] = (((u64) low & 0xFFFF0000) << 8) | 0x00FFFFFF; |
ddff876d DT |
1059 | pvt->dram_IntlvSel[dram] = (low >> 8) & 0x7; |
1060 | pvt->dram_DstNode[dram] = (low & 0x7); | |
1061 | } | |
1062 | ||
1063 | static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, | |
ad6a32e9 | 1064 | struct err_regs *err_info, u64 sys_addr) |
ddff876d DT |
1065 | { |
1066 | struct mem_ctl_info *src_mci; | |
ddff876d DT |
1067 | int channel, csrow; |
1068 | u32 page, offset; | |
ad6a32e9 | 1069 | u16 syndrome; |
ddff876d | 1070 | |
ad6a32e9 | 1071 | syndrome = extract_syndrome(err_info); |
ddff876d DT |
1072 | |
1073 | /* CHIPKILL enabled */ | |
ad6a32e9 | 1074 | if (err_info->nbcfg & K8_NBCFG_CHIPKILL) { |
bfc04aec | 1075 | channel = get_channel_from_ecc_syndrome(mci, syndrome); |
ddff876d DT |
1076 | if (channel < 0) { |
1077 | /* | |
1078 | * Syndrome didn't map, so we don't know which of the | |
1079 | * 2 DIMMs is in error. So we need to ID 'both' of them | |
1080 | * as suspect. | |
1081 | */ | |
24f9a7fe BP |
1082 | amd64_mc_warn(mci, "unknown syndrome 0x%04x - possible " |
1083 | "error reporting race\n", syndrome); | |
ddff876d DT |
1084 | edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); |
1085 | return; | |
1086 | } | |
1087 | } else { | |
1088 | /* | |
1089 | * non-chipkill ecc mode | |
1090 | * | |
1091 | * The k8 documentation is unclear about how to determine the | |
1092 | * channel number when using non-chipkill memory. This method | |
1093 | * was obtained from email communication with someone at AMD. | |
1094 | * (Wish the email was placed in this comment - norsk) | |
1095 | */ | |
44e9e2ee | 1096 | channel = ((sys_addr & BIT(3)) != 0); |
ddff876d DT |
1097 | } |
1098 | ||
1099 | /* | |
1100 | * Find out which node the error address belongs to. This may be | |
1101 | * different from the node that detected the error. | |
1102 | */ | |
44e9e2ee | 1103 | src_mci = find_mc_by_sys_addr(mci, sys_addr); |
2cff18c2 | 1104 | if (!src_mci) { |
24f9a7fe | 1105 | amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n", |
44e9e2ee | 1106 | (unsigned long)sys_addr); |
ddff876d DT |
1107 | edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); |
1108 | return; | |
1109 | } | |
1110 | ||
44e9e2ee BP |
1111 | /* Now map the sys_addr to a CSROW */ |
1112 | csrow = sys_addr_to_csrow(src_mci, sys_addr); | |
ddff876d DT |
1113 | if (csrow < 0) { |
1114 | edac_mc_handle_ce_no_info(src_mci, EDAC_MOD_STR); | |
1115 | } else { | |
44e9e2ee | 1116 | error_address_to_page_and_offset(sys_addr, &page, &offset); |
ddff876d DT |
1117 | |
1118 | edac_mc_handle_ce(src_mci, page, offset, syndrome, csrow, | |
1119 | channel, EDAC_MOD_STR); | |
1120 | } | |
1121 | } | |
1122 | ||
1433eb99 | 1123 | static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode) |
ddff876d | 1124 | { |
1433eb99 | 1125 | int *dbam_map; |
ddff876d | 1126 | |
1433eb99 BP |
1127 | if (pvt->ext_model >= K8_REV_F) |
1128 | dbam_map = ddr2_dbam; | |
1129 | else if (pvt->ext_model >= K8_REV_D) | |
1130 | dbam_map = ddr2_dbam_revD; | |
1131 | else | |
1132 | dbam_map = ddr2_dbam_revCG; | |
ddff876d | 1133 | |
1433eb99 | 1134 | return dbam_map[cs_mode]; |
ddff876d DT |
1135 | } |
1136 | ||
1afd3c98 DT |
1137 | /* |
1138 | * Get the number of DCT channels in use. | |
1139 | * | |
1140 | * Return: | |
1141 | * number of Memory Channels in operation | |
1142 | * Pass back: | |
1143 | * contents of the DCL0_LOW register | |
1144 | */ | |
1145 | static int f10_early_channel_count(struct amd64_pvt *pvt) | |
1146 | { | |
57a30854 | 1147 | int dbams[] = { DBAM0, DBAM1 }; |
6ba5dcdc | 1148 | int i, j, channels = 0; |
1afd3c98 DT |
1149 | u32 dbam; |
1150 | ||
1afd3c98 DT |
1151 | /* If we are in 128 bit mode, then we are using 2 channels */ |
1152 | if (pvt->dclr0 & F10_WIDTH_128) { | |
1afd3c98 DT |
1153 | channels = 2; |
1154 | return channels; | |
1155 | } | |
1156 | ||
1157 | /* | |
d16149e8 BP |
1158 | * Need to check if in unganged mode: In such, there are 2 channels, |
1159 | * but they are not in 128 bit mode and thus the above 'dclr0' status | |
1160 | * bit will be OFF. | |
1afd3c98 DT |
1161 | * |
1162 | * Need to check DCT0[0] and DCT1[0] to see if only one of them has | |
1163 | * their CSEnable bit on. If so, then SINGLE DIMM case. | |
1164 | */ | |
d16149e8 | 1165 | debugf0("Data width is not 128 bits - need more decoding\n"); |
ddff876d | 1166 | |
1afd3c98 DT |
1167 | /* |
1168 | * Check DRAM Bank Address Mapping values for each DIMM to see if there | |
1169 | * is more than just one DIMM present in unganged mode. Need to check | |
1170 | * both controllers since DIMMs can be placed in either one. | |
1171 | */ | |
57a30854 | 1172 | for (i = 0; i < ARRAY_SIZE(dbams); i++) { |
8d5b5d9c | 1173 | if (amd64_read_pci_cfg(pvt->F2, dbams[i], &dbam)) |
1afd3c98 DT |
1174 | goto err_reg; |
1175 | ||
57a30854 WW |
1176 | for (j = 0; j < 4; j++) { |
1177 | if (DBAM_DIMM(j, dbam) > 0) { | |
1178 | channels++; | |
1179 | break; | |
1180 | } | |
1181 | } | |
1afd3c98 DT |
1182 | } |
1183 | ||
d16149e8 BP |
1184 | if (channels > 2) |
1185 | channels = 2; | |
1186 | ||
24f9a7fe | 1187 | amd64_info("MCT channel count: %d\n", channels); |
1afd3c98 DT |
1188 | |
1189 | return channels; | |
1190 | ||
1191 | err_reg: | |
1192 | return -1; | |
1193 | ||
1194 | } | |
1195 | ||
1433eb99 | 1196 | static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode) |
1afd3c98 | 1197 | { |
1433eb99 BP |
1198 | int *dbam_map; |
1199 | ||
1200 | if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE) | |
1201 | dbam_map = ddr3_dbam; | |
1202 | else | |
1203 | dbam_map = ddr2_dbam; | |
1204 | ||
1205 | return dbam_map[cs_mode]; | |
1afd3c98 DT |
1206 | } |
1207 | ||
1208 | /* Enable extended configuration access via 0xCF8 feature */ | |
1209 | static void amd64_setup(struct amd64_pvt *pvt) | |
1210 | { | |
1211 | u32 reg; | |
1212 | ||
8d5b5d9c | 1213 | amd64_read_pci_cfg(pvt->F3, F10_NB_CFG_HIGH, ®); |
1afd3c98 DT |
1214 | |
1215 | pvt->flags.cf8_extcfg = !!(reg & F10_NB_CFG_LOW_ENABLE_EXT_CFG); | |
1216 | reg |= F10_NB_CFG_LOW_ENABLE_EXT_CFG; | |
8d5b5d9c | 1217 | pci_write_config_dword(pvt->F3, F10_NB_CFG_HIGH, reg); |
1afd3c98 DT |
1218 | } |
1219 | ||
1220 | /* Restore the extended configuration access via 0xCF8 feature */ | |
1221 | static void amd64_teardown(struct amd64_pvt *pvt) | |
1222 | { | |
1223 | u32 reg; | |
1224 | ||
8d5b5d9c | 1225 | amd64_read_pci_cfg(pvt->F3, F10_NB_CFG_HIGH, ®); |
1afd3c98 DT |
1226 | |
1227 | reg &= ~F10_NB_CFG_LOW_ENABLE_EXT_CFG; | |
1228 | if (pvt->flags.cf8_extcfg) | |
1229 | reg |= F10_NB_CFG_LOW_ENABLE_EXT_CFG; | |
8d5b5d9c | 1230 | pci_write_config_dword(pvt->F3, F10_NB_CFG_HIGH, reg); |
1afd3c98 DT |
1231 | } |
1232 | ||
1233 | static u64 f10_get_error_address(struct mem_ctl_info *mci, | |
ef44cc4c | 1234 | struct err_regs *info) |
1afd3c98 DT |
1235 | { |
1236 | return (((u64) (info->nbeah & 0xffff)) << 32) + | |
1237 | (info->nbeal & ~0x01); | |
1238 | } | |
1239 | ||
1240 | /* | |
1241 | * Read the Base and Limit registers for F10 based Memory controllers. Extract | |
1242 | * fields from the 'raw' reg into separate data fields. | |
1243 | * | |
1244 | * Isolates: BASE, LIMIT, IntlvEn, IntlvSel, RW_EN. | |
1245 | */ | |
1246 | static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram) | |
1247 | { | |
1248 | u32 high_offset, low_offset, high_base, low_base, high_limit, low_limit; | |
1249 | ||
1250 | low_offset = K8_DRAM_BASE_LOW + (dram << 3); | |
1251 | high_offset = F10_DRAM_BASE_HIGH + (dram << 3); | |
1252 | ||
1253 | /* read the 'raw' DRAM BASE Address register */ | |
8d5b5d9c | 1254 | amd64_read_pci_cfg(pvt->F1, low_offset, &low_base); |
1afd3c98 DT |
1255 | |
1256 | /* Read from the ECS data register */ | |
8d5b5d9c | 1257 | amd64_read_pci_cfg(pvt->F1, high_offset, &high_base); |
1afd3c98 DT |
1258 | |
1259 | /* Extract parts into separate data entries */ | |
1260 | pvt->dram_rw_en[dram] = (low_base & 0x3); | |
1261 | ||
1262 | if (pvt->dram_rw_en[dram] == 0) | |
1263 | return; | |
1264 | ||
1265 | pvt->dram_IntlvEn[dram] = (low_base >> 8) & 0x7; | |
1266 | ||
66216a7a | 1267 | pvt->dram_base[dram] = (((u64)high_base & 0x000000FF) << 40) | |
4997811e | 1268 | (((u64)low_base & 0xFFFF0000) << 8); |
1afd3c98 DT |
1269 | |
1270 | low_offset = K8_DRAM_LIMIT_LOW + (dram << 3); | |
1271 | high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3); | |
1272 | ||
1273 | /* read the 'raw' LIMIT registers */ | |
8d5b5d9c | 1274 | amd64_read_pci_cfg(pvt->F1, low_offset, &low_limit); |
1afd3c98 DT |
1275 | |
1276 | /* Read from the ECS data register for the HIGH portion */ | |
8d5b5d9c | 1277 | amd64_read_pci_cfg(pvt->F1, high_offset, &high_limit); |
1afd3c98 | 1278 | |
1afd3c98 DT |
1279 | pvt->dram_DstNode[dram] = (low_limit & 0x7); |
1280 | pvt->dram_IntlvSel[dram] = (low_limit >> 8) & 0x7; | |
1281 | ||
1282 | /* | |
1283 | * Extract address values and form a LIMIT address. Limit is the HIGHEST | |
1284 | * memory location of the region, so low 24 bits need to be all ones. | |
1285 | */ | |
66216a7a | 1286 | pvt->dram_limit[dram] = (((u64)high_limit & 0x000000FF) << 40) | |
4997811e | 1287 | (((u64) low_limit & 0xFFFF0000) << 8) | |
66216a7a | 1288 | 0x00FFFFFF; |
1afd3c98 | 1289 | } |
6163b5d4 DT |
1290 | |
1291 | static void f10_read_dram_ctl_register(struct amd64_pvt *pvt) | |
1292 | { | |
6163b5d4 | 1293 | |
8d5b5d9c | 1294 | if (!amd64_read_pci_cfg(pvt->F2, F10_DCTL_SEL_LOW, |
6ba5dcdc | 1295 | &pvt->dram_ctl_select_low)) { |
72381bd5 BP |
1296 | debugf0("F2x110 (DCTL Sel. Low): 0x%08x, " |
1297 | "High range addresses at: 0x%x\n", | |
1298 | pvt->dram_ctl_select_low, | |
1299 | dct_sel_baseaddr(pvt)); | |
1300 | ||
1301 | debugf0(" DCT mode: %s, All DCTs on: %s\n", | |
1302 | (dct_ganging_enabled(pvt) ? "ganged" : "unganged"), | |
1303 | (dct_dram_enabled(pvt) ? "yes" : "no")); | |
1304 | ||
1305 | if (!dct_ganging_enabled(pvt)) | |
1306 | debugf0(" Address range split per DCT: %s\n", | |
1307 | (dct_high_range_enabled(pvt) ? "yes" : "no")); | |
1308 | ||
1309 | debugf0(" DCT data interleave for ECC: %s, " | |
1310 | "DRAM cleared since last warm reset: %s\n", | |
1311 | (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"), | |
1312 | (dct_memory_cleared(pvt) ? "yes" : "no")); | |
1313 | ||
1314 | debugf0(" DCT channel interleave: %s, " | |
1315 | "DCT interleave bits selector: 0x%x\n", | |
1316 | (dct_interleave_enabled(pvt) ? "enabled" : "disabled"), | |
6163b5d4 DT |
1317 | dct_sel_interleave_addr(pvt)); |
1318 | } | |
1319 | ||
8d5b5d9c | 1320 | amd64_read_pci_cfg(pvt->F2, F10_DCTL_SEL_HIGH, |
6ba5dcdc | 1321 | &pvt->dram_ctl_select_high); |
6163b5d4 DT |
1322 | } |
1323 | ||
f71d0a05 DT |
1324 | /* |
1325 | * determine channel based on the interleaving mode: F10h BKDG, 2.8.9 Memory | |
1326 | * Interleaving Modes. | |
1327 | */ | |
6163b5d4 DT |
1328 | static u32 f10_determine_channel(struct amd64_pvt *pvt, u64 sys_addr, |
1329 | int hi_range_sel, u32 intlv_en) | |
1330 | { | |
1331 | u32 cs, temp, dct_sel_high = (pvt->dram_ctl_select_low >> 1) & 1; | |
1332 | ||
1333 | if (dct_ganging_enabled(pvt)) | |
1334 | cs = 0; | |
1335 | else if (hi_range_sel) | |
1336 | cs = dct_sel_high; | |
1337 | else if (dct_interleave_enabled(pvt)) { | |
f71d0a05 DT |
1338 | /* |
1339 | * see F2x110[DctSelIntLvAddr] - channel interleave mode | |
1340 | */ | |
6163b5d4 DT |
1341 | if (dct_sel_interleave_addr(pvt) == 0) |
1342 | cs = sys_addr >> 6 & 1; | |
1343 | else if ((dct_sel_interleave_addr(pvt) >> 1) & 1) { | |
1344 | temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2; | |
1345 | ||
1346 | if (dct_sel_interleave_addr(pvt) & 1) | |
1347 | cs = (sys_addr >> 9 & 1) ^ temp; | |
1348 | else | |
1349 | cs = (sys_addr >> 6 & 1) ^ temp; | |
1350 | } else if (intlv_en & 4) | |
1351 | cs = sys_addr >> 15 & 1; | |
1352 | else if (intlv_en & 2) | |
1353 | cs = sys_addr >> 14 & 1; | |
1354 | else if (intlv_en & 1) | |
1355 | cs = sys_addr >> 13 & 1; | |
1356 | else | |
1357 | cs = sys_addr >> 12 & 1; | |
1358 | } else if (dct_high_range_enabled(pvt) && !dct_ganging_enabled(pvt)) | |
1359 | cs = ~dct_sel_high & 1; | |
1360 | else | |
1361 | cs = 0; | |
1362 | ||
1363 | return cs; | |
1364 | } | |
1365 | ||
1366 | static inline u32 f10_map_intlv_en_to_shift(u32 intlv_en) | |
1367 | { | |
1368 | if (intlv_en == 1) | |
1369 | return 1; | |
1370 | else if (intlv_en == 3) | |
1371 | return 2; | |
1372 | else if (intlv_en == 7) | |
1373 | return 3; | |
1374 | ||
1375 | return 0; | |
1376 | } | |
1377 | ||
f71d0a05 DT |
1378 | /* See F10h BKDG, 2.8.10.2 DctSelBaseOffset Programming */ |
1379 | static inline u64 f10_get_base_addr_offset(u64 sys_addr, int hi_range_sel, | |
6163b5d4 DT |
1380 | u32 dct_sel_base_addr, |
1381 | u64 dct_sel_base_off, | |
f71d0a05 | 1382 | u32 hole_valid, u32 hole_off, |
6163b5d4 DT |
1383 | u64 dram_base) |
1384 | { | |
1385 | u64 chan_off; | |
1386 | ||
1387 | if (hi_range_sel) { | |
9975a5f2 | 1388 | if (!(dct_sel_base_addr & 0xFFFF0000) && |
f71d0a05 | 1389 | hole_valid && (sys_addr >= 0x100000000ULL)) |
6163b5d4 DT |
1390 | chan_off = hole_off << 16; |
1391 | else | |
1392 | chan_off = dct_sel_base_off; | |
1393 | } else { | |
f71d0a05 | 1394 | if (hole_valid && (sys_addr >= 0x100000000ULL)) |
6163b5d4 DT |
1395 | chan_off = hole_off << 16; |
1396 | else | |
1397 | chan_off = dram_base & 0xFFFFF8000000ULL; | |
1398 | } | |
1399 | ||
1400 | return (sys_addr & 0x0000FFFFFFFFFFC0ULL) - | |
1401 | (chan_off & 0x0000FFFFFF800000ULL); | |
1402 | } | |
1403 | ||
1404 | /* Hack for the time being - Can we get this from BIOS?? */ | |
1405 | #define CH0SPARE_RANK 0 | |
1406 | #define CH1SPARE_RANK 1 | |
1407 | ||
1408 | /* | |
1409 | * checks if the csrow passed in is marked as SPARED, if so returns the new | |
1410 | * spare row | |
1411 | */ | |
1412 | static inline int f10_process_possible_spare(int csrow, | |
1413 | u32 cs, struct amd64_pvt *pvt) | |
1414 | { | |
1415 | u32 swap_done; | |
1416 | u32 bad_dram_cs; | |
1417 | ||
1418 | /* Depending on channel, isolate respective SPARING info */ | |
1419 | if (cs) { | |
1420 | swap_done = F10_ONLINE_SPARE_SWAPDONE1(pvt->online_spare); | |
1421 | bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS1(pvt->online_spare); | |
1422 | if (swap_done && (csrow == bad_dram_cs)) | |
1423 | csrow = CH1SPARE_RANK; | |
1424 | } else { | |
1425 | swap_done = F10_ONLINE_SPARE_SWAPDONE0(pvt->online_spare); | |
1426 | bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS0(pvt->online_spare); | |
1427 | if (swap_done && (csrow == bad_dram_cs)) | |
1428 | csrow = CH0SPARE_RANK; | |
1429 | } | |
1430 | return csrow; | |
1431 | } | |
1432 | ||
1433 | /* | |
1434 | * Iterate over the DRAM DCT "base" and "mask" registers looking for a | |
1435 | * SystemAddr match on the specified 'ChannelSelect' and 'NodeID' | |
1436 | * | |
1437 | * Return: | |
1438 | * -EINVAL: NOT FOUND | |
1439 | * 0..csrow = Chip-Select Row | |
1440 | */ | |
1441 | static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs) | |
1442 | { | |
1443 | struct mem_ctl_info *mci; | |
1444 | struct amd64_pvt *pvt; | |
1445 | u32 cs_base, cs_mask; | |
1446 | int cs_found = -EINVAL; | |
1447 | int csrow; | |
1448 | ||
1449 | mci = mci_lookup[nid]; | |
1450 | if (!mci) | |
1451 | return cs_found; | |
1452 | ||
1453 | pvt = mci->pvt_info; | |
1454 | ||
1455 | debugf1("InputAddr=0x%x channelselect=%d\n", in_addr, cs); | |
1456 | ||
9d858bb1 | 1457 | for (csrow = 0; csrow < pvt->cs_count; csrow++) { |
6163b5d4 DT |
1458 | |
1459 | cs_base = amd64_get_dct_base(pvt, cs, csrow); | |
1460 | if (!(cs_base & K8_DCSB_CS_ENABLE)) | |
1461 | continue; | |
1462 | ||
1463 | /* | |
1464 | * We have an ENABLED CSROW, Isolate just the MASK bits of the | |
1465 | * target: [28:19] and [13:5], which map to [36:27] and [21:13] | |
1466 | * of the actual address. | |
1467 | */ | |
1468 | cs_base &= REV_F_F1Xh_DCSB_BASE_BITS; | |
1469 | ||
1470 | /* | |
1471 | * Get the DCT Mask, and ENABLE the reserved bits: [18:16] and | |
1472 | * [4:0] to become ON. Then mask off bits [28:0] ([36:8]) | |
1473 | */ | |
1474 | cs_mask = amd64_get_dct_mask(pvt, cs, csrow); | |
1475 | ||
1476 | debugf1(" CSROW=%d CSBase=0x%x RAW CSMask=0x%x\n", | |
1477 | csrow, cs_base, cs_mask); | |
1478 | ||
1479 | cs_mask = (cs_mask | 0x0007C01F) & 0x1FFFFFFF; | |
1480 | ||
1481 | debugf1(" Final CSMask=0x%x\n", cs_mask); | |
1482 | debugf1(" (InputAddr & ~CSMask)=0x%x " | |
1483 | "(CSBase & ~CSMask)=0x%x\n", | |
1484 | (in_addr & ~cs_mask), (cs_base & ~cs_mask)); | |
1485 | ||
1486 | if ((in_addr & ~cs_mask) == (cs_base & ~cs_mask)) { | |
1487 | cs_found = f10_process_possible_spare(csrow, cs, pvt); | |
1488 | ||
1489 | debugf1(" MATCH csrow=%d\n", cs_found); | |
1490 | break; | |
1491 | } | |
1492 | } | |
1493 | return cs_found; | |
1494 | } | |
1495 | ||
f71d0a05 DT |
1496 | /* For a given @dram_range, check if @sys_addr falls within it. */ |
1497 | static int f10_match_to_this_node(struct amd64_pvt *pvt, int dram_range, | |
1498 | u64 sys_addr, int *nid, int *chan_sel) | |
1499 | { | |
1500 | int node_id, cs_found = -EINVAL, high_range = 0; | |
1501 | u32 intlv_en, intlv_sel, intlv_shift, hole_off; | |
1502 | u32 hole_valid, tmp, dct_sel_base, channel; | |
1503 | u64 dram_base, chan_addr, dct_sel_base_off; | |
1504 | ||
1505 | dram_base = pvt->dram_base[dram_range]; | |
1506 | intlv_en = pvt->dram_IntlvEn[dram_range]; | |
1507 | ||
1508 | node_id = pvt->dram_DstNode[dram_range]; | |
1509 | intlv_sel = pvt->dram_IntlvSel[dram_range]; | |
1510 | ||
1511 | debugf1("(dram=%d) Base=0x%llx SystemAddr= 0x%llx Limit=0x%llx\n", | |
1512 | dram_range, dram_base, sys_addr, pvt->dram_limit[dram_range]); | |
1513 | ||
1514 | /* | |
1515 | * This assumes that one node's DHAR is the same as all the other | |
1516 | * nodes' DHAR. | |
1517 | */ | |
1518 | hole_off = (pvt->dhar & 0x0000FF80); | |
1519 | hole_valid = (pvt->dhar & 0x1); | |
1520 | dct_sel_base_off = (pvt->dram_ctl_select_high & 0xFFFFFC00) << 16; | |
1521 | ||
1522 | debugf1(" HoleOffset=0x%x HoleValid=0x%x IntlvSel=0x%x\n", | |
1523 | hole_off, hole_valid, intlv_sel); | |
1524 | ||
e726f3c3 | 1525 | if (intlv_en && |
f71d0a05 DT |
1526 | (intlv_sel != ((sys_addr >> 12) & intlv_en))) |
1527 | return -EINVAL; | |
1528 | ||
1529 | dct_sel_base = dct_sel_baseaddr(pvt); | |
1530 | ||
1531 | /* | |
1532 | * check whether addresses >= DctSelBaseAddr[47:27] are to be used to | |
1533 | * select between DCT0 and DCT1. | |
1534 | */ | |
1535 | if (dct_high_range_enabled(pvt) && | |
1536 | !dct_ganging_enabled(pvt) && | |
1537 | ((sys_addr >> 27) >= (dct_sel_base >> 11))) | |
1538 | high_range = 1; | |
1539 | ||
1540 | channel = f10_determine_channel(pvt, sys_addr, high_range, intlv_en); | |
1541 | ||
1542 | chan_addr = f10_get_base_addr_offset(sys_addr, high_range, dct_sel_base, | |
1543 | dct_sel_base_off, hole_valid, | |
1544 | hole_off, dram_base); | |
1545 | ||
1546 | intlv_shift = f10_map_intlv_en_to_shift(intlv_en); | |
1547 | ||
1548 | /* remove Node ID (in case of memory interleaving) */ | |
1549 | tmp = chan_addr & 0xFC0; | |
1550 | ||
1551 | chan_addr = ((chan_addr >> intlv_shift) & 0xFFFFFFFFF000ULL) | tmp; | |
1552 | ||
1553 | /* remove channel interleave and hash */ | |
1554 | if (dct_interleave_enabled(pvt) && | |
1555 | !dct_high_range_enabled(pvt) && | |
1556 | !dct_ganging_enabled(pvt)) { | |
1557 | if (dct_sel_interleave_addr(pvt) != 1) | |
1558 | chan_addr = (chan_addr >> 1) & 0xFFFFFFFFFFFFFFC0ULL; | |
1559 | else { | |
1560 | tmp = chan_addr & 0xFC0; | |
1561 | chan_addr = ((chan_addr & 0xFFFFFFFFFFFFC000ULL) >> 1) | |
1562 | | tmp; | |
1563 | } | |
1564 | } | |
1565 | ||
1566 | debugf1(" (ChannelAddrLong=0x%llx) >> 8 becomes InputAddr=0x%x\n", | |
1567 | chan_addr, (u32)(chan_addr >> 8)); | |
1568 | ||
1569 | cs_found = f10_lookup_addr_in_dct(chan_addr >> 8, node_id, channel); | |
1570 | ||
1571 | if (cs_found >= 0) { | |
1572 | *nid = node_id; | |
1573 | *chan_sel = channel; | |
1574 | } | |
1575 | return cs_found; | |
1576 | } | |
1577 | ||
1578 | static int f10_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr, | |
1579 | int *node, int *chan_sel) | |
1580 | { | |
1581 | int dram_range, cs_found = -EINVAL; | |
1582 | u64 dram_base, dram_limit; | |
1583 | ||
1584 | for (dram_range = 0; dram_range < DRAM_REG_COUNT; dram_range++) { | |
1585 | ||
1586 | if (!pvt->dram_rw_en[dram_range]) | |
1587 | continue; | |
1588 | ||
1589 | dram_base = pvt->dram_base[dram_range]; | |
1590 | dram_limit = pvt->dram_limit[dram_range]; | |
1591 | ||
1592 | if ((dram_base <= sys_addr) && (sys_addr <= dram_limit)) { | |
1593 | ||
1594 | cs_found = f10_match_to_this_node(pvt, dram_range, | |
1595 | sys_addr, node, | |
1596 | chan_sel); | |
1597 | if (cs_found >= 0) | |
1598 | break; | |
1599 | } | |
1600 | } | |
1601 | return cs_found; | |
1602 | } | |
1603 | ||
1604 | /* | |
bdc30a0c BP |
1605 | * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps |
1606 | * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW). | |
f71d0a05 | 1607 | * |
bdc30a0c BP |
1608 | * The @sys_addr is usually an error address received from the hardware |
1609 | * (MCX_ADDR). | |
f71d0a05 DT |
1610 | */ |
1611 | static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci, | |
ad6a32e9 | 1612 | struct err_regs *err_info, |
f71d0a05 DT |
1613 | u64 sys_addr) |
1614 | { | |
1615 | struct amd64_pvt *pvt = mci->pvt_info; | |
1616 | u32 page, offset; | |
f71d0a05 | 1617 | int nid, csrow, chan = 0; |
ad6a32e9 | 1618 | u16 syndrome; |
f71d0a05 DT |
1619 | |
1620 | csrow = f10_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan); | |
1621 | ||
bdc30a0c BP |
1622 | if (csrow < 0) { |
1623 | edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); | |
1624 | return; | |
1625 | } | |
1626 | ||
1627 | error_address_to_page_and_offset(sys_addr, &page, &offset); | |
f71d0a05 | 1628 | |
ad6a32e9 | 1629 | syndrome = extract_syndrome(err_info); |
bdc30a0c BP |
1630 | |
1631 | /* | |
1632 | * We need the syndromes for channel detection only when we're | |
1633 | * ganged. Otherwise @chan should already contain the channel at | |
1634 | * this point. | |
1635 | */ | |
962b70a1 | 1636 | if (dct_ganging_enabled(pvt) && (pvt->nbcfg & K8_NBCFG_CHIPKILL)) |
bdc30a0c | 1637 | chan = get_channel_from_ecc_syndrome(mci, syndrome); |
f71d0a05 | 1638 | |
bdc30a0c BP |
1639 | if (chan >= 0) |
1640 | edac_mc_handle_ce(mci, page, offset, syndrome, csrow, chan, | |
1641 | EDAC_MOD_STR); | |
1642 | else | |
f71d0a05 | 1643 | /* |
bdc30a0c | 1644 | * Channel unknown, report all channels on this CSROW as failed. |
f71d0a05 | 1645 | */ |
bdc30a0c | 1646 | for (chan = 0; chan < mci->csrows[csrow].nr_channels; chan++) |
f71d0a05 | 1647 | edac_mc_handle_ce(mci, page, offset, syndrome, |
bdc30a0c | 1648 | csrow, chan, EDAC_MOD_STR); |
f71d0a05 DT |
1649 | } |
1650 | ||
f71d0a05 | 1651 | /* |
8566c4df | 1652 | * debug routine to display the memory sizes of all logical DIMMs and its |
f71d0a05 DT |
1653 | * CSROWs as well |
1654 | */ | |
8566c4df | 1655 | static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt) |
f71d0a05 | 1656 | { |
603adaf6 | 1657 | int dimm, size0, size1, factor = 0; |
f71d0a05 DT |
1658 | u32 dbam; |
1659 | u32 *dcsb; | |
1660 | ||
8566c4df | 1661 | if (boot_cpu_data.x86 == 0xf) { |
603adaf6 BP |
1662 | if (pvt->dclr0 & F10_WIDTH_128) |
1663 | factor = 1; | |
1664 | ||
8566c4df | 1665 | /* K8 families < revF not supported yet */ |
1433eb99 | 1666 | if (pvt->ext_model < K8_REV_F) |
8566c4df BP |
1667 | return; |
1668 | else | |
1669 | WARN_ON(ctrl != 0); | |
1670 | } | |
1671 | ||
1672 | debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", | |
1673 | ctrl, ctrl ? pvt->dbam1 : pvt->dbam0); | |
f71d0a05 DT |
1674 | |
1675 | dbam = ctrl ? pvt->dbam1 : pvt->dbam0; | |
1676 | dcsb = ctrl ? pvt->dcsb1 : pvt->dcsb0; | |
1677 | ||
8566c4df BP |
1678 | edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl); |
1679 | ||
f71d0a05 DT |
1680 | /* Dump memory sizes for DIMM and its CSROWs */ |
1681 | for (dimm = 0; dimm < 4; dimm++) { | |
1682 | ||
1683 | size0 = 0; | |
1684 | if (dcsb[dimm*2] & K8_DCSB_CS_ENABLE) | |
1433eb99 | 1685 | size0 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam)); |
f71d0a05 DT |
1686 | |
1687 | size1 = 0; | |
1688 | if (dcsb[dimm*2 + 1] & K8_DCSB_CS_ENABLE) | |
1433eb99 | 1689 | size1 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam)); |
f71d0a05 | 1690 | |
24f9a7fe BP |
1691 | amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n", |
1692 | dimm * 2, size0 << factor, | |
1693 | dimm * 2 + 1, size1 << factor); | |
f71d0a05 DT |
1694 | } |
1695 | } | |
1696 | ||
4d37607a DT |
1697 | static struct amd64_family_type amd64_family_types[] = { |
1698 | [K8_CPUS] = { | |
0092b20d | 1699 | .ctl_name = "K8", |
8d5b5d9c BP |
1700 | .f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP, |
1701 | .f3_id = PCI_DEVICE_ID_AMD_K8_NB_MISC, | |
4d37607a | 1702 | .ops = { |
1433eb99 BP |
1703 | .early_channel_count = k8_early_channel_count, |
1704 | .get_error_address = k8_get_error_address, | |
1705 | .read_dram_base_limit = k8_read_dram_base_limit, | |
1706 | .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow, | |
1707 | .dbam_to_cs = k8_dbam_to_chip_select, | |
4d37607a DT |
1708 | } |
1709 | }, | |
1710 | [F10_CPUS] = { | |
0092b20d | 1711 | .ctl_name = "F10h", |
8d5b5d9c BP |
1712 | .f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP, |
1713 | .f3_id = PCI_DEVICE_ID_AMD_10H_NB_MISC, | |
4d37607a | 1714 | .ops = { |
1433eb99 BP |
1715 | .early_channel_count = f10_early_channel_count, |
1716 | .get_error_address = f10_get_error_address, | |
1717 | .read_dram_base_limit = f10_read_dram_base_limit, | |
1718 | .read_dram_ctl_register = f10_read_dram_ctl_register, | |
1719 | .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow, | |
1720 | .dbam_to_cs = f10_dbam_to_chip_select, | |
4d37607a DT |
1721 | } |
1722 | }, | |
4d37607a DT |
1723 | }; |
1724 | ||
1725 | static struct pci_dev *pci_get_related_function(unsigned int vendor, | |
1726 | unsigned int device, | |
1727 | struct pci_dev *related) | |
1728 | { | |
1729 | struct pci_dev *dev = NULL; | |
1730 | ||
1731 | dev = pci_get_device(vendor, device, dev); | |
1732 | while (dev) { | |
1733 | if ((dev->bus->number == related->bus->number) && | |
1734 | (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn))) | |
1735 | break; | |
1736 | dev = pci_get_device(vendor, device, dev); | |
1737 | } | |
1738 | ||
1739 | return dev; | |
1740 | } | |
1741 | ||
b1289d6f | 1742 | /* |
bfc04aec BP |
1743 | * These are tables of eigenvectors (one per line) which can be used for the |
1744 | * construction of the syndrome tables. The modified syndrome search algorithm | |
1745 | * uses those to find the symbol in error and thus the DIMM. | |
b1289d6f | 1746 | * |
bfc04aec | 1747 | * Algorithm courtesy of Ross LaFetra from AMD. |
b1289d6f | 1748 | */ |
bfc04aec BP |
1749 | static u16 x4_vectors[] = { |
1750 | 0x2f57, 0x1afe, 0x66cc, 0xdd88, | |
1751 | 0x11eb, 0x3396, 0x7f4c, 0xeac8, | |
1752 | 0x0001, 0x0002, 0x0004, 0x0008, | |
1753 | 0x1013, 0x3032, 0x4044, 0x8088, | |
1754 | 0x106b, 0x30d6, 0x70fc, 0xe0a8, | |
1755 | 0x4857, 0xc4fe, 0x13cc, 0x3288, | |
1756 | 0x1ac5, 0x2f4a, 0x5394, 0xa1e8, | |
1757 | 0x1f39, 0x251e, 0xbd6c, 0x6bd8, | |
1758 | 0x15c1, 0x2a42, 0x89ac, 0x4758, | |
1759 | 0x2b03, 0x1602, 0x4f0c, 0xca08, | |
1760 | 0x1f07, 0x3a0e, 0x6b04, 0xbd08, | |
1761 | 0x8ba7, 0x465e, 0x244c, 0x1cc8, | |
1762 | 0x2b87, 0x164e, 0x642c, 0xdc18, | |
1763 | 0x40b9, 0x80de, 0x1094, 0x20e8, | |
1764 | 0x27db, 0x1eb6, 0x9dac, 0x7b58, | |
1765 | 0x11c1, 0x2242, 0x84ac, 0x4c58, | |
1766 | 0x1be5, 0x2d7a, 0x5e34, 0xa718, | |
1767 | 0x4b39, 0x8d1e, 0x14b4, 0x28d8, | |
1768 | 0x4c97, 0xc87e, 0x11fc, 0x33a8, | |
1769 | 0x8e97, 0x497e, 0x2ffc, 0x1aa8, | |
1770 | 0x16b3, 0x3d62, 0x4f34, 0x8518, | |
1771 | 0x1e2f, 0x391a, 0x5cac, 0xf858, | |
1772 | 0x1d9f, 0x3b7a, 0x572c, 0xfe18, | |
1773 | 0x15f5, 0x2a5a, 0x5264, 0xa3b8, | |
1774 | 0x1dbb, 0x3b66, 0x715c, 0xe3f8, | |
1775 | 0x4397, 0xc27e, 0x17fc, 0x3ea8, | |
1776 | 0x1617, 0x3d3e, 0x6464, 0xb8b8, | |
1777 | 0x23ff, 0x12aa, 0xab6c, 0x56d8, | |
1778 | 0x2dfb, 0x1ba6, 0x913c, 0x7328, | |
1779 | 0x185d, 0x2ca6, 0x7914, 0x9e28, | |
1780 | 0x171b, 0x3e36, 0x7d7c, 0xebe8, | |
1781 | 0x4199, 0x82ee, 0x19f4, 0x2e58, | |
1782 | 0x4807, 0xc40e, 0x130c, 0x3208, | |
1783 | 0x1905, 0x2e0a, 0x5804, 0xac08, | |
1784 | 0x213f, 0x132a, 0xadfc, 0x5ba8, | |
1785 | 0x19a9, 0x2efe, 0xb5cc, 0x6f88, | |
b1289d6f DT |
1786 | }; |
1787 | ||
bfc04aec BP |
1788 | static u16 x8_vectors[] = { |
1789 | 0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480, | |
1790 | 0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80, | |
1791 | 0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80, | |
1792 | 0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80, | |
1793 | 0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780, | |
1794 | 0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080, | |
1795 | 0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080, | |
1796 | 0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080, | |
1797 | 0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80, | |
1798 | 0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580, | |
1799 | 0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880, | |
1800 | 0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280, | |
1801 | 0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180, | |
1802 | 0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580, | |
1803 | 0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280, | |
1804 | 0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180, | |
1805 | 0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080, | |
1806 | 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080, | |
1807 | 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000, | |
1808 | }; | |
1809 | ||
1810 | static int decode_syndrome(u16 syndrome, u16 *vectors, int num_vecs, | |
ad6a32e9 | 1811 | int v_dim) |
b1289d6f | 1812 | { |
bfc04aec BP |
1813 | unsigned int i, err_sym; |
1814 | ||
1815 | for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) { | |
1816 | u16 s = syndrome; | |
1817 | int v_idx = err_sym * v_dim; | |
1818 | int v_end = (err_sym + 1) * v_dim; | |
1819 | ||
1820 | /* walk over all 16 bits of the syndrome */ | |
1821 | for (i = 1; i < (1U << 16); i <<= 1) { | |
1822 | ||
1823 | /* if bit is set in that eigenvector... */ | |
1824 | if (v_idx < v_end && vectors[v_idx] & i) { | |
1825 | u16 ev_comp = vectors[v_idx++]; | |
1826 | ||
1827 | /* ... and bit set in the modified syndrome, */ | |
1828 | if (s & i) { | |
1829 | /* remove it. */ | |
1830 | s ^= ev_comp; | |
4d37607a | 1831 | |
bfc04aec BP |
1832 | if (!s) |
1833 | return err_sym; | |
1834 | } | |
b1289d6f | 1835 | |
bfc04aec BP |
1836 | } else if (s & i) |
1837 | /* can't get to zero, move to next symbol */ | |
1838 | break; | |
1839 | } | |
b1289d6f DT |
1840 | } |
1841 | ||
1842 | debugf0("syndrome(%x) not found\n", syndrome); | |
1843 | return -1; | |
1844 | } | |
d27bf6fa | 1845 | |
bfc04aec BP |
1846 | static int map_err_sym_to_channel(int err_sym, int sym_size) |
1847 | { | |
1848 | if (sym_size == 4) | |
1849 | switch (err_sym) { | |
1850 | case 0x20: | |
1851 | case 0x21: | |
1852 | return 0; | |
1853 | break; | |
1854 | case 0x22: | |
1855 | case 0x23: | |
1856 | return 1; | |
1857 | break; | |
1858 | default: | |
1859 | return err_sym >> 4; | |
1860 | break; | |
1861 | } | |
1862 | /* x8 symbols */ | |
1863 | else | |
1864 | switch (err_sym) { | |
1865 | /* imaginary bits not in a DIMM */ | |
1866 | case 0x10: | |
1867 | WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n", | |
1868 | err_sym); | |
1869 | return -1; | |
1870 | break; | |
1871 | ||
1872 | case 0x11: | |
1873 | return 0; | |
1874 | break; | |
1875 | case 0x12: | |
1876 | return 1; | |
1877 | break; | |
1878 | default: | |
1879 | return err_sym >> 3; | |
1880 | break; | |
1881 | } | |
1882 | return -1; | |
1883 | } | |
1884 | ||
1885 | static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome) | |
1886 | { | |
1887 | struct amd64_pvt *pvt = mci->pvt_info; | |
ad6a32e9 BP |
1888 | int err_sym = -1; |
1889 | ||
1890 | if (pvt->syn_type == 8) | |
1891 | err_sym = decode_syndrome(syndrome, x8_vectors, | |
1892 | ARRAY_SIZE(x8_vectors), | |
1893 | pvt->syn_type); | |
1894 | else if (pvt->syn_type == 4) | |
1895 | err_sym = decode_syndrome(syndrome, x4_vectors, | |
1896 | ARRAY_SIZE(x4_vectors), | |
1897 | pvt->syn_type); | |
1898 | else { | |
24f9a7fe | 1899 | amd64_warn("Illegal syndrome type: %u\n", pvt->syn_type); |
ad6a32e9 | 1900 | return err_sym; |
bfc04aec | 1901 | } |
ad6a32e9 BP |
1902 | |
1903 | return map_err_sym_to_channel(err_sym, pvt->syn_type); | |
bfc04aec BP |
1904 | } |
1905 | ||
d27bf6fa DT |
1906 | /* |
1907 | * Handle any Correctable Errors (CEs) that have occurred. Check for valid ERROR | |
1908 | * ADDRESS and process. | |
1909 | */ | |
1910 | static void amd64_handle_ce(struct mem_ctl_info *mci, | |
ef44cc4c | 1911 | struct err_regs *info) |
d27bf6fa DT |
1912 | { |
1913 | struct amd64_pvt *pvt = mci->pvt_info; | |
44e9e2ee | 1914 | u64 sys_addr; |
d27bf6fa DT |
1915 | |
1916 | /* Ensure that the Error Address is VALID */ | |
24f9a7fe BP |
1917 | if (!(info->nbsh & K8_NBSH_VALID_ERROR_ADDR)) { |
1918 | amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n"); | |
d27bf6fa DT |
1919 | edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); |
1920 | return; | |
1921 | } | |
1922 | ||
1f6bcee7 | 1923 | sys_addr = pvt->ops->get_error_address(mci, info); |
d27bf6fa | 1924 | |
24f9a7fe | 1925 | amd64_mc_err(mci, "CE ERROR_ADDRESS= 0x%llx\n", sys_addr); |
d27bf6fa | 1926 | |
44e9e2ee | 1927 | pvt->ops->map_sysaddr_to_csrow(mci, info, sys_addr); |
d27bf6fa DT |
1928 | } |
1929 | ||
1930 | /* Handle any Un-correctable Errors (UEs) */ | |
1931 | static void amd64_handle_ue(struct mem_ctl_info *mci, | |
ef44cc4c | 1932 | struct err_regs *info) |
d27bf6fa | 1933 | { |
1f6bcee7 BP |
1934 | struct amd64_pvt *pvt = mci->pvt_info; |
1935 | struct mem_ctl_info *log_mci, *src_mci = NULL; | |
d27bf6fa | 1936 | int csrow; |
44e9e2ee | 1937 | u64 sys_addr; |
d27bf6fa | 1938 | u32 page, offset; |
d27bf6fa DT |
1939 | |
1940 | log_mci = mci; | |
1941 | ||
24f9a7fe BP |
1942 | if (!(info->nbsh & K8_NBSH_VALID_ERROR_ADDR)) { |
1943 | amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n"); | |
d27bf6fa DT |
1944 | edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR); |
1945 | return; | |
1946 | } | |
1947 | ||
1f6bcee7 | 1948 | sys_addr = pvt->ops->get_error_address(mci, info); |
d27bf6fa DT |
1949 | |
1950 | /* | |
1951 | * Find out which node the error address belongs to. This may be | |
1952 | * different from the node that detected the error. | |
1953 | */ | |
44e9e2ee | 1954 | src_mci = find_mc_by_sys_addr(mci, sys_addr); |
d27bf6fa | 1955 | if (!src_mci) { |
24f9a7fe BP |
1956 | amd64_mc_err(mci, "ERROR ADDRESS (0x%lx) NOT mapped to a MC\n", |
1957 | (unsigned long)sys_addr); | |
d27bf6fa DT |
1958 | edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR); |
1959 | return; | |
1960 | } | |
1961 | ||
1962 | log_mci = src_mci; | |
1963 | ||
44e9e2ee | 1964 | csrow = sys_addr_to_csrow(log_mci, sys_addr); |
d27bf6fa | 1965 | if (csrow < 0) { |
24f9a7fe BP |
1966 | amd64_mc_err(mci, "ERROR_ADDRESS (0x%lx) NOT mapped to CS\n", |
1967 | (unsigned long)sys_addr); | |
d27bf6fa DT |
1968 | edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR); |
1969 | } else { | |
44e9e2ee | 1970 | error_address_to_page_and_offset(sys_addr, &page, &offset); |
d27bf6fa DT |
1971 | edac_mc_handle_ue(log_mci, page, offset, csrow, EDAC_MOD_STR); |
1972 | } | |
1973 | } | |
1974 | ||
549d042d | 1975 | static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci, |
b69b29de | 1976 | struct err_regs *info) |
d27bf6fa | 1977 | { |
b70ef010 BP |
1978 | u32 ec = ERROR_CODE(info->nbsl); |
1979 | u32 xec = EXT_ERROR_CODE(info->nbsl); | |
17adea01 | 1980 | int ecc_type = (info->nbsh >> 13) & 0x3; |
d27bf6fa | 1981 | |
b70ef010 BP |
1982 | /* Bail early out if this was an 'observed' error */ |
1983 | if (PP(ec) == K8_NBSL_PP_OBS) | |
1984 | return; | |
d27bf6fa | 1985 | |
ecaf5606 BP |
1986 | /* Do only ECC errors */ |
1987 | if (xec && xec != F10_NBSL_EXT_ERR_ECC) | |
d27bf6fa | 1988 | return; |
d27bf6fa | 1989 | |
ecaf5606 | 1990 | if (ecc_type == 2) |
d27bf6fa | 1991 | amd64_handle_ce(mci, info); |
ecaf5606 | 1992 | else if (ecc_type == 1) |
d27bf6fa | 1993 | amd64_handle_ue(mci, info); |
d27bf6fa DT |
1994 | } |
1995 | ||
7cfd4a87 | 1996 | void amd64_decode_bus_error(int node_id, struct mce *m, u32 nbcfg) |
d27bf6fa | 1997 | { |
549d042d | 1998 | struct mem_ctl_info *mci = mci_lookup[node_id]; |
7cfd4a87 | 1999 | struct err_regs regs; |
d27bf6fa | 2000 | |
7cfd4a87 BP |
2001 | regs.nbsl = (u32) m->status; |
2002 | regs.nbsh = (u32)(m->status >> 32); | |
2003 | regs.nbeal = (u32) m->addr; | |
2004 | regs.nbeah = (u32)(m->addr >> 32); | |
2005 | regs.nbcfg = nbcfg; | |
2006 | ||
2007 | __amd64_decode_bus_error(mci, ®s); | |
d27bf6fa | 2008 | |
d27bf6fa DT |
2009 | /* |
2010 | * Check the UE bit of the NB status high register, if set generate some | |
2011 | * logs. If NOT a GART error, then process the event as a NO-INFO event. | |
2012 | * If it was a GART error, skip that process. | |
549d042d BP |
2013 | * |
2014 | * FIXME: this should go somewhere else, if at all. | |
d27bf6fa | 2015 | */ |
7cfd4a87 | 2016 | if (regs.nbsh & K8_NBSH_UC_ERR && !report_gart_errors) |
5110dbde | 2017 | edac_mc_handle_ue_no_info(mci, "UE bit is set"); |
549d042d | 2018 | |
d27bf6fa | 2019 | } |
d27bf6fa | 2020 | |
0ec449ee | 2021 | /* |
8d5b5d9c | 2022 | * Use pvt->F2 which contains the F2 CPU PCI device to get the related |
bbd0c1f6 | 2023 | * F1 (AddrMap) and F3 (Misc) devices. Return negative value on error. |
0ec449ee | 2024 | */ |
bbd0c1f6 BP |
2025 | static int amd64_reserve_mc_sibling_devices(struct amd64_pvt *pvt, u16 f1_id, |
2026 | u16 f3_id) | |
0ec449ee | 2027 | { |
0ec449ee | 2028 | /* Reserve the ADDRESS MAP Device */ |
8d5b5d9c BP |
2029 | pvt->F1 = pci_get_related_function(pvt->F2->vendor, f1_id, pvt->F2); |
2030 | if (!pvt->F1) { | |
24f9a7fe BP |
2031 | amd64_err("error address map device not found: " |
2032 | "vendor %x device 0x%x (broken BIOS?)\n", | |
2033 | PCI_VENDOR_ID_AMD, f1_id); | |
bbd0c1f6 | 2034 | return -ENODEV; |
0ec449ee DT |
2035 | } |
2036 | ||
2037 | /* Reserve the MISC Device */ | |
8d5b5d9c BP |
2038 | pvt->F3 = pci_get_related_function(pvt->F2->vendor, f3_id, pvt->F2); |
2039 | if (!pvt->F3) { | |
2040 | pci_dev_put(pvt->F1); | |
2041 | pvt->F1 = NULL; | |
0ec449ee | 2042 | |
24f9a7fe BP |
2043 | amd64_err("error F3 device not found: " |
2044 | "vendor %x device 0x%x (broken BIOS?)\n", | |
2045 | PCI_VENDOR_ID_AMD, f3_id); | |
0ec449ee | 2046 | |
bbd0c1f6 | 2047 | return -ENODEV; |
0ec449ee | 2048 | } |
8d5b5d9c BP |
2049 | debugf1("F1: %s\n", pci_name(pvt->F1)); |
2050 | debugf1("F2: %s\n", pci_name(pvt->F2)); | |
2051 | debugf1("F3: %s\n", pci_name(pvt->F3)); | |
0ec449ee DT |
2052 | |
2053 | return 0; | |
2054 | } | |
2055 | ||
2056 | static void amd64_free_mc_sibling_devices(struct amd64_pvt *pvt) | |
2057 | { | |
8d5b5d9c BP |
2058 | pci_dev_put(pvt->F1); |
2059 | pci_dev_put(pvt->F3); | |
0ec449ee DT |
2060 | } |
2061 | ||
2062 | /* | |
2063 | * Retrieve the hardware registers of the memory controller (this includes the | |
2064 | * 'Address Map' and 'Misc' device regs) | |
2065 | */ | |
2066 | static void amd64_read_mc_registers(struct amd64_pvt *pvt) | |
2067 | { | |
2068 | u64 msr_val; | |
ad6a32e9 | 2069 | u32 tmp; |
6ba5dcdc | 2070 | int dram; |
0ec449ee DT |
2071 | |
2072 | /* | |
2073 | * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since | |
2074 | * those are Read-As-Zero | |
2075 | */ | |
e97f8bb8 BP |
2076 | rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem); |
2077 | debugf0(" TOP_MEM: 0x%016llx\n", pvt->top_mem); | |
0ec449ee DT |
2078 | |
2079 | /* check first whether TOP_MEM2 is enabled */ | |
2080 | rdmsrl(MSR_K8_SYSCFG, msr_val); | |
2081 | if (msr_val & (1U << 21)) { | |
e97f8bb8 BP |
2082 | rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2); |
2083 | debugf0(" TOP_MEM2: 0x%016llx\n", pvt->top_mem2); | |
0ec449ee DT |
2084 | } else |
2085 | debugf0(" TOP_MEM2 disabled.\n"); | |
2086 | ||
8d5b5d9c | 2087 | amd64_read_pci_cfg(pvt->F3, K8_NBCAP, &pvt->nbcap); |
0ec449ee DT |
2088 | |
2089 | if (pvt->ops->read_dram_ctl_register) | |
2090 | pvt->ops->read_dram_ctl_register(pvt); | |
2091 | ||
2092 | for (dram = 0; dram < DRAM_REG_COUNT; dram++) { | |
2093 | /* | |
2094 | * Call CPU specific READ function to get the DRAM Base and | |
2095 | * Limit values from the DCT. | |
2096 | */ | |
2097 | pvt->ops->read_dram_base_limit(pvt, dram); | |
2098 | ||
2099 | /* | |
2100 | * Only print out debug info on rows with both R and W Enabled. | |
2101 | * Normal processing, compiler should optimize this whole 'if' | |
2102 | * debug output block away. | |
2103 | */ | |
2104 | if (pvt->dram_rw_en[dram] != 0) { | |
e97f8bb8 BP |
2105 | debugf1(" DRAM-BASE[%d]: 0x%016llx " |
2106 | "DRAM-LIMIT: 0x%016llx\n", | |
0ec449ee | 2107 | dram, |
e97f8bb8 BP |
2108 | pvt->dram_base[dram], |
2109 | pvt->dram_limit[dram]); | |
2110 | ||
0ec449ee DT |
2111 | debugf1(" IntlvEn=%s %s %s " |
2112 | "IntlvSel=%d DstNode=%d\n", | |
2113 | pvt->dram_IntlvEn[dram] ? | |
2114 | "Enabled" : "Disabled", | |
2115 | (pvt->dram_rw_en[dram] & 0x2) ? "W" : "!W", | |
2116 | (pvt->dram_rw_en[dram] & 0x1) ? "R" : "!R", | |
2117 | pvt->dram_IntlvSel[dram], | |
2118 | pvt->dram_DstNode[dram]); | |
2119 | } | |
2120 | } | |
2121 | ||
2122 | amd64_read_dct_base_mask(pvt); | |
2123 | ||
8d5b5d9c | 2124 | amd64_read_pci_cfg(pvt->F1, K8_DHAR, &pvt->dhar); |
0ec449ee DT |
2125 | amd64_read_dbam_reg(pvt); |
2126 | ||
8d5b5d9c | 2127 | amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare); |
0ec449ee | 2128 | |
8d5b5d9c BP |
2129 | amd64_read_pci_cfg(pvt->F2, F10_DCLR_0, &pvt->dclr0); |
2130 | amd64_read_pci_cfg(pvt->F2, F10_DCHR_0, &pvt->dchr0); | |
0ec449ee | 2131 | |
ad6a32e9 BP |
2132 | if (boot_cpu_data.x86 >= 0x10) { |
2133 | if (!dct_ganging_enabled(pvt)) { | |
8d5b5d9c BP |
2134 | amd64_read_pci_cfg(pvt->F2, F10_DCLR_1, &pvt->dclr1); |
2135 | amd64_read_pci_cfg(pvt->F2, F10_DCHR_1, &pvt->dchr1); | |
ad6a32e9 | 2136 | } |
8d5b5d9c | 2137 | amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp); |
0ec449ee | 2138 | } |
ad6a32e9 BP |
2139 | |
2140 | if (boot_cpu_data.x86 == 0x10 && | |
2141 | boot_cpu_data.x86_model > 7 && | |
2142 | /* F3x180[EccSymbolSize]=1 => x8 symbols */ | |
2143 | tmp & BIT(25)) | |
2144 | pvt->syn_type = 8; | |
2145 | else | |
2146 | pvt->syn_type = 4; | |
2147 | ||
0ec449ee | 2148 | amd64_dump_misc_regs(pvt); |
0ec449ee DT |
2149 | } |
2150 | ||
2151 | /* | |
2152 | * NOTE: CPU Revision Dependent code | |
2153 | * | |
2154 | * Input: | |
9d858bb1 | 2155 | * @csrow_nr ChipSelect Row Number (0..pvt->cs_count-1) |
0ec449ee DT |
2156 | * k8 private pointer to --> |
2157 | * DRAM Bank Address mapping register | |
2158 | * node_id | |
2159 | * DCL register where dual_channel_active is | |
2160 | * | |
2161 | * The DBAM register consists of 4 sets of 4 bits each definitions: | |
2162 | * | |
2163 | * Bits: CSROWs | |
2164 | * 0-3 CSROWs 0 and 1 | |
2165 | * 4-7 CSROWs 2 and 3 | |
2166 | * 8-11 CSROWs 4 and 5 | |
2167 | * 12-15 CSROWs 6 and 7 | |
2168 | * | |
2169 | * Values range from: 0 to 15 | |
2170 | * The meaning of the values depends on CPU revision and dual-channel state, | |
2171 | * see relevant BKDG more info. | |
2172 | * | |
2173 | * The memory controller provides for total of only 8 CSROWs in its current | |
2174 | * architecture. Each "pair" of CSROWs normally represents just one DIMM in | |
2175 | * single channel or two (2) DIMMs in dual channel mode. | |
2176 | * | |
2177 | * The following code logic collapses the various tables for CSROW based on CPU | |
2178 | * revision. | |
2179 | * | |
2180 | * Returns: | |
2181 | * The number of PAGE_SIZE pages on the specified CSROW number it | |
2182 | * encompasses | |
2183 | * | |
2184 | */ | |
2185 | static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt) | |
2186 | { | |
1433eb99 | 2187 | u32 cs_mode, nr_pages; |
0ec449ee DT |
2188 | |
2189 | /* | |
2190 | * The math on this doesn't look right on the surface because x/2*4 can | |
2191 | * be simplified to x*2 but this expression makes use of the fact that | |
2192 | * it is integral math where 1/2=0. This intermediate value becomes the | |
2193 | * number of bits to shift the DBAM register to extract the proper CSROW | |
2194 | * field. | |
2195 | */ | |
1433eb99 | 2196 | cs_mode = (pvt->dbam0 >> ((csrow_nr / 2) * 4)) & 0xF; |
0ec449ee | 2197 | |
1433eb99 | 2198 | nr_pages = pvt->ops->dbam_to_cs(pvt, cs_mode) << (20 - PAGE_SHIFT); |
0ec449ee DT |
2199 | |
2200 | /* | |
2201 | * If dual channel then double the memory size of single channel. | |
2202 | * Channel count is 1 or 2 | |
2203 | */ | |
2204 | nr_pages <<= (pvt->channel_count - 1); | |
2205 | ||
1433eb99 | 2206 | debugf0(" (csrow=%d) DBAM map index= %d\n", csrow_nr, cs_mode); |
0ec449ee DT |
2207 | debugf0(" nr_pages= %u channel-count = %d\n", |
2208 | nr_pages, pvt->channel_count); | |
2209 | ||
2210 | return nr_pages; | |
2211 | } | |
2212 | ||
2213 | /* | |
2214 | * Initialize the array of csrow attribute instances, based on the values | |
2215 | * from pci config hardware registers. | |
2216 | */ | |
2217 | static int amd64_init_csrows(struct mem_ctl_info *mci) | |
2218 | { | |
2219 | struct csrow_info *csrow; | |
2220 | struct amd64_pvt *pvt; | |
2221 | u64 input_addr_min, input_addr_max, sys_addr; | |
6ba5dcdc | 2222 | int i, empty = 1; |
0ec449ee DT |
2223 | |
2224 | pvt = mci->pvt_info; | |
2225 | ||
8d5b5d9c | 2226 | amd64_read_pci_cfg(pvt->F3, K8_NBCFG, &pvt->nbcfg); |
0ec449ee DT |
2227 | |
2228 | debugf0("NBCFG= 0x%x CHIPKILL= %s DRAM ECC= %s\n", pvt->nbcfg, | |
2229 | (pvt->nbcfg & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled", | |
2230 | (pvt->nbcfg & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled" | |
2231 | ); | |
2232 | ||
9d858bb1 | 2233 | for (i = 0; i < pvt->cs_count; i++) { |
0ec449ee DT |
2234 | csrow = &mci->csrows[i]; |
2235 | ||
2236 | if ((pvt->dcsb0[i] & K8_DCSB_CS_ENABLE) == 0) { | |
2237 | debugf1("----CSROW %d EMPTY for node %d\n", i, | |
2238 | pvt->mc_node_id); | |
2239 | continue; | |
2240 | } | |
2241 | ||
2242 | debugf1("----CSROW %d VALID for MC node %d\n", | |
2243 | i, pvt->mc_node_id); | |
2244 | ||
2245 | empty = 0; | |
2246 | csrow->nr_pages = amd64_csrow_nr_pages(i, pvt); | |
2247 | find_csrow_limits(mci, i, &input_addr_min, &input_addr_max); | |
2248 | sys_addr = input_addr_to_sys_addr(mci, input_addr_min); | |
2249 | csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT); | |
2250 | sys_addr = input_addr_to_sys_addr(mci, input_addr_max); | |
2251 | csrow->last_page = (u32) (sys_addr >> PAGE_SHIFT); | |
2252 | csrow->page_mask = ~mask_from_dct_mask(pvt, i); | |
2253 | /* 8 bytes of resolution */ | |
2254 | ||
24f9a7fe | 2255 | csrow->mtype = amd64_determine_memory_type(pvt, i); |
0ec449ee DT |
2256 | |
2257 | debugf1(" for MC node %d csrow %d:\n", pvt->mc_node_id, i); | |
2258 | debugf1(" input_addr_min: 0x%lx input_addr_max: 0x%lx\n", | |
2259 | (unsigned long)input_addr_min, | |
2260 | (unsigned long)input_addr_max); | |
2261 | debugf1(" sys_addr: 0x%lx page_mask: 0x%lx\n", | |
2262 | (unsigned long)sys_addr, csrow->page_mask); | |
2263 | debugf1(" nr_pages: %u first_page: 0x%lx " | |
2264 | "last_page: 0x%lx\n", | |
2265 | (unsigned)csrow->nr_pages, | |
2266 | csrow->first_page, csrow->last_page); | |
2267 | ||
2268 | /* | |
2269 | * determine whether CHIPKILL or JUST ECC or NO ECC is operating | |
2270 | */ | |
2271 | if (pvt->nbcfg & K8_NBCFG_ECC_ENABLE) | |
2272 | csrow->edac_mode = | |
2273 | (pvt->nbcfg & K8_NBCFG_CHIPKILL) ? | |
2274 | EDAC_S4ECD4ED : EDAC_SECDED; | |
2275 | else | |
2276 | csrow->edac_mode = EDAC_NONE; | |
2277 | } | |
2278 | ||
2279 | return empty; | |
2280 | } | |
d27bf6fa | 2281 | |
f6d6ae96 BP |
2282 | /* get all cores on this DCT */ |
2283 | static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid) | |
2284 | { | |
2285 | int cpu; | |
2286 | ||
2287 | for_each_online_cpu(cpu) | |
2288 | if (amd_get_nb_id(cpu) == nid) | |
2289 | cpumask_set_cpu(cpu, mask); | |
2290 | } | |
2291 | ||
2292 | /* check MCG_CTL on all the cpus on this node */ | |
2293 | static bool amd64_nb_mce_bank_enabled_on_node(int nid) | |
2294 | { | |
2295 | cpumask_var_t mask; | |
50542251 | 2296 | int cpu, nbe; |
f6d6ae96 BP |
2297 | bool ret = false; |
2298 | ||
2299 | if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) { | |
24f9a7fe | 2300 | amd64_warn("%s: Error allocating mask\n", __func__); |
f6d6ae96 BP |
2301 | return false; |
2302 | } | |
2303 | ||
2304 | get_cpus_on_this_dct_cpumask(mask, nid); | |
2305 | ||
f6d6ae96 BP |
2306 | rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs); |
2307 | ||
2308 | for_each_cpu(cpu, mask) { | |
50542251 BP |
2309 | struct msr *reg = per_cpu_ptr(msrs, cpu); |
2310 | nbe = reg->l & K8_MSR_MCGCTL_NBE; | |
f6d6ae96 BP |
2311 | |
2312 | debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n", | |
50542251 | 2313 | cpu, reg->q, |
f6d6ae96 BP |
2314 | (nbe ? "enabled" : "disabled")); |
2315 | ||
2316 | if (!nbe) | |
2317 | goto out; | |
f6d6ae96 BP |
2318 | } |
2319 | ret = true; | |
2320 | ||
2321 | out: | |
f6d6ae96 BP |
2322 | free_cpumask_var(mask); |
2323 | return ret; | |
2324 | } | |
2325 | ||
2326 | static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on) | |
2327 | { | |
2328 | cpumask_var_t cmask; | |
50542251 | 2329 | int cpu; |
f6d6ae96 BP |
2330 | |
2331 | if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) { | |
24f9a7fe | 2332 | amd64_warn("%s: error allocating mask\n", __func__); |
f6d6ae96 BP |
2333 | return false; |
2334 | } | |
2335 | ||
2336 | get_cpus_on_this_dct_cpumask(cmask, pvt->mc_node_id); | |
2337 | ||
f6d6ae96 BP |
2338 | rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs); |
2339 | ||
2340 | for_each_cpu(cpu, cmask) { | |
2341 | ||
50542251 BP |
2342 | struct msr *reg = per_cpu_ptr(msrs, cpu); |
2343 | ||
f6d6ae96 | 2344 | if (on) { |
50542251 | 2345 | if (reg->l & K8_MSR_MCGCTL_NBE) |
d95cf4de | 2346 | pvt->flags.nb_mce_enable = 1; |
f6d6ae96 | 2347 | |
50542251 | 2348 | reg->l |= K8_MSR_MCGCTL_NBE; |
f6d6ae96 BP |
2349 | } else { |
2350 | /* | |
d95cf4de | 2351 | * Turn off NB MCE reporting only when it was off before |
f6d6ae96 | 2352 | */ |
d95cf4de | 2353 | if (!pvt->flags.nb_mce_enable) |
50542251 | 2354 | reg->l &= ~K8_MSR_MCGCTL_NBE; |
f6d6ae96 | 2355 | } |
f6d6ae96 BP |
2356 | } |
2357 | wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs); | |
2358 | ||
f6d6ae96 BP |
2359 | free_cpumask_var(cmask); |
2360 | ||
2361 | return 0; | |
2362 | } | |
2363 | ||
f9431992 DT |
2364 | static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci) |
2365 | { | |
2366 | struct amd64_pvt *pvt = mci->pvt_info; | |
f6d6ae96 | 2367 | u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn; |
f9431992 | 2368 | |
8d5b5d9c | 2369 | amd64_read_pci_cfg(pvt->F3, K8_NBCTL, &value); |
f9431992 DT |
2370 | |
2371 | /* turn on UECCn and CECCEn bits */ | |
2372 | pvt->old_nbctl = value & mask; | |
2373 | pvt->nbctl_mcgctl_saved = 1; | |
2374 | ||
2375 | value |= mask; | |
8d5b5d9c | 2376 | pci_write_config_dword(pvt->F3, K8_NBCTL, value); |
f9431992 | 2377 | |
f6d6ae96 | 2378 | if (amd64_toggle_ecc_err_reporting(pvt, ON)) |
24f9a7fe | 2379 | amd64_warn("Error enabling ECC reporting over MCGCTL!\n"); |
f9431992 | 2380 | |
8d5b5d9c | 2381 | amd64_read_pci_cfg(pvt->F3, K8_NBCFG, &value); |
f9431992 DT |
2382 | |
2383 | debugf0("NBCFG(1)= 0x%x CHIPKILL= %s ECC_ENABLE= %s\n", value, | |
2384 | (value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled", | |
2385 | (value & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled"); | |
2386 | ||
2387 | if (!(value & K8_NBCFG_ECC_ENABLE)) { | |
24f9a7fe | 2388 | amd64_warn("DRAM ECC disabled on this node, enabling...\n"); |
f9431992 | 2389 | |
d95cf4de BP |
2390 | pvt->flags.nb_ecc_prev = 0; |
2391 | ||
f9431992 DT |
2392 | /* Attempt to turn on DRAM ECC Enable */ |
2393 | value |= K8_NBCFG_ECC_ENABLE; | |
8d5b5d9c | 2394 | pci_write_config_dword(pvt->F3, K8_NBCFG, value); |
f9431992 | 2395 | |
8d5b5d9c | 2396 | amd64_read_pci_cfg(pvt->F3, K8_NBCFG, &value); |
f9431992 DT |
2397 | |
2398 | if (!(value & K8_NBCFG_ECC_ENABLE)) { | |
24f9a7fe BP |
2399 | amd64_warn("Hardware rejected DRAM ECC enable," |
2400 | "check memory DIMM configuration.\n"); | |
f9431992 | 2401 | } else { |
24f9a7fe | 2402 | amd64_info("Hardware accepted DRAM ECC Enable\n"); |
f9431992 | 2403 | } |
d95cf4de BP |
2404 | } else { |
2405 | pvt->flags.nb_ecc_prev = 1; | |
f9431992 | 2406 | } |
d95cf4de | 2407 | |
f9431992 DT |
2408 | debugf0("NBCFG(2)= 0x%x CHIPKILL= %s ECC_ENABLE= %s\n", value, |
2409 | (value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled", | |
2410 | (value & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled"); | |
2411 | ||
2412 | pvt->ctl_error_info.nbcfg = value; | |
2413 | } | |
2414 | ||
2415 | static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt) | |
2416 | { | |
f6d6ae96 | 2417 | u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn; |
f9431992 DT |
2418 | |
2419 | if (!pvt->nbctl_mcgctl_saved) | |
2420 | return; | |
2421 | ||
8d5b5d9c | 2422 | amd64_read_pci_cfg(pvt->F3, K8_NBCTL, &value); |
f9431992 DT |
2423 | value &= ~mask; |
2424 | value |= pvt->old_nbctl; | |
2425 | ||
8d5b5d9c | 2426 | pci_write_config_dword(pvt->F3, K8_NBCTL, value); |
f9431992 | 2427 | |
d95cf4de BP |
2428 | /* restore previous BIOS DRAM ECC "off" setting which we force-enabled */ |
2429 | if (!pvt->flags.nb_ecc_prev) { | |
8d5b5d9c | 2430 | amd64_read_pci_cfg(pvt->F3, K8_NBCFG, &value); |
d95cf4de | 2431 | value &= ~K8_NBCFG_ECC_ENABLE; |
8d5b5d9c | 2432 | pci_write_config_dword(pvt->F3, K8_NBCFG, value); |
d95cf4de BP |
2433 | } |
2434 | ||
2435 | /* restore the NB Enable MCGCTL bit */ | |
f6d6ae96 | 2436 | if (amd64_toggle_ecc_err_reporting(pvt, OFF)) |
24f9a7fe | 2437 | amd64_warn("Error restoring NB MCGCTL settings!\n"); |
f9431992 DT |
2438 | } |
2439 | ||
2440 | /* | |
2441 | * EDAC requires that the BIOS have ECC enabled before taking over the | |
2442 | * processing of ECC errors. This is because the BIOS can properly initialize | |
2443 | * the memory system completely. A command line option allows to force-enable | |
2444 | * hardware ECC later in amd64_enable_ecc_error_reporting(). | |
2445 | */ | |
cab4d277 BP |
2446 | static const char *ecc_msg = |
2447 | "ECC disabled in the BIOS or no ECC capability, module will not load.\n" | |
2448 | " Either enable ECC checking or force module loading by setting " | |
2449 | "'ecc_enable_override'.\n" | |
2450 | " (Note that use of the override may cause unknown side effects.)\n"; | |
be3468e8 | 2451 | |
f9431992 DT |
2452 | static int amd64_check_ecc_enabled(struct amd64_pvt *pvt) |
2453 | { | |
2454 | u32 value; | |
06724535 BP |
2455 | u8 ecc_enabled = 0; |
2456 | bool nb_mce_en = false; | |
f9431992 | 2457 | |
8d5b5d9c | 2458 | amd64_read_pci_cfg(pvt->F3, K8_NBCFG, &value); |
f9431992 DT |
2459 | |
2460 | ecc_enabled = !!(value & K8_NBCFG_ECC_ENABLE); | |
24f9a7fe | 2461 | amd64_info("DRAM ECC %s.\n", (ecc_enabled ? "enabled" : "disabled")); |
f9431992 | 2462 | |
06724535 BP |
2463 | nb_mce_en = amd64_nb_mce_bank_enabled_on_node(pvt->mc_node_id); |
2464 | if (!nb_mce_en) | |
24f9a7fe BP |
2465 | amd64_notice("NB MCE bank disabled, " |
2466 | "set MSR 0x%08x[4] on node %d to enable.\n", | |
be3468e8 | 2467 | MSR_IA32_MCG_CTL, pvt->mc_node_id); |
f9431992 | 2468 | |
06724535 | 2469 | if (!ecc_enabled || !nb_mce_en) { |
f9431992 | 2470 | if (!ecc_enable_override) { |
24f9a7fe | 2471 | amd64_notice("%s", ecc_msg); |
be3468e8 | 2472 | return -ENODEV; |
d95cf4de | 2473 | } else { |
24f9a7fe | 2474 | amd64_warn("Forcing ECC on!\n"); |
be3468e8 | 2475 | } |
43f5e687 | 2476 | } |
f9431992 | 2477 | |
be3468e8 | 2478 | return 0; |
f9431992 DT |
2479 | } |
2480 | ||
7d6034d3 DT |
2481 | struct mcidev_sysfs_attribute sysfs_attrs[ARRAY_SIZE(amd64_dbg_attrs) + |
2482 | ARRAY_SIZE(amd64_inj_attrs) + | |
2483 | 1]; | |
2484 | ||
2485 | struct mcidev_sysfs_attribute terminator = { .attr = { .name = NULL } }; | |
2486 | ||
2487 | static void amd64_set_mc_sysfs_attributes(struct mem_ctl_info *mci) | |
2488 | { | |
2489 | unsigned int i = 0, j = 0; | |
2490 | ||
2491 | for (; i < ARRAY_SIZE(amd64_dbg_attrs); i++) | |
2492 | sysfs_attrs[i] = amd64_dbg_attrs[i]; | |
2493 | ||
2494 | for (j = 0; j < ARRAY_SIZE(amd64_inj_attrs); j++, i++) | |
2495 | sysfs_attrs[i] = amd64_inj_attrs[j]; | |
2496 | ||
2497 | sysfs_attrs[i] = terminator; | |
2498 | ||
2499 | mci->mc_driver_sysfs_attributes = sysfs_attrs; | |
2500 | } | |
2501 | ||
2502 | static void amd64_setup_mci_misc_attributes(struct mem_ctl_info *mci) | |
2503 | { | |
2504 | struct amd64_pvt *pvt = mci->pvt_info; | |
2505 | ||
2506 | mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2; | |
2507 | mci->edac_ctl_cap = EDAC_FLAG_NONE; | |
7d6034d3 DT |
2508 | |
2509 | if (pvt->nbcap & K8_NBCAP_SECDED) | |
2510 | mci->edac_ctl_cap |= EDAC_FLAG_SECDED; | |
2511 | ||
2512 | if (pvt->nbcap & K8_NBCAP_CHIPKILL) | |
2513 | mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED; | |
2514 | ||
2515 | mci->edac_cap = amd64_determine_edac_cap(pvt); | |
2516 | mci->mod_name = EDAC_MOD_STR; | |
2517 | mci->mod_ver = EDAC_AMD64_VERSION; | |
0092b20d | 2518 | mci->ctl_name = pvt->ctl_name; |
8d5b5d9c | 2519 | mci->dev_name = pci_name(pvt->F2); |
7d6034d3 DT |
2520 | mci->ctl_page_to_phys = NULL; |
2521 | ||
7d6034d3 DT |
2522 | /* memory scrubber interface */ |
2523 | mci->set_sdram_scrub_rate = amd64_set_scrub_rate; | |
2524 | mci->get_sdram_scrub_rate = amd64_get_scrub_rate; | |
2525 | } | |
2526 | ||
0092b20d BP |
2527 | /* |
2528 | * returns a pointer to the family descriptor on success, NULL otherwise. | |
2529 | */ | |
2530 | static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt) | |
395ae783 | 2531 | { |
0092b20d BP |
2532 | u8 fam = boot_cpu_data.x86; |
2533 | struct amd64_family_type *fam_type = NULL; | |
2534 | ||
2535 | switch (fam) { | |
395ae783 | 2536 | case 0xf: |
0092b20d | 2537 | fam_type = &amd64_family_types[K8_CPUS]; |
b8cfa02f | 2538 | pvt->ops = &amd64_family_types[K8_CPUS].ops; |
0092b20d BP |
2539 | pvt->ctl_name = fam_type->ctl_name; |
2540 | pvt->min_scrubrate = K8_MIN_SCRUB_RATE_BITS; | |
395ae783 BP |
2541 | break; |
2542 | case 0x10: | |
0092b20d | 2543 | fam_type = &amd64_family_types[F10_CPUS]; |
b8cfa02f | 2544 | pvt->ops = &amd64_family_types[F10_CPUS].ops; |
0092b20d BP |
2545 | pvt->ctl_name = fam_type->ctl_name; |
2546 | pvt->min_scrubrate = F10_MIN_SCRUB_RATE_BITS; | |
395ae783 BP |
2547 | break; |
2548 | ||
2549 | default: | |
24f9a7fe | 2550 | amd64_err("Unsupported family!\n"); |
0092b20d | 2551 | return NULL; |
395ae783 | 2552 | } |
0092b20d | 2553 | |
b8cfa02f BP |
2554 | pvt->ext_model = boot_cpu_data.x86_model >> 4; |
2555 | ||
24f9a7fe | 2556 | amd64_info("%s %sdetected (node %d).\n", pvt->ctl_name, |
0092b20d | 2557 | (fam == 0xf ? |
24f9a7fe BP |
2558 | (pvt->ext_model >= K8_REV_F ? "revF or later " |
2559 | : "revE or earlier ") | |
2560 | : ""), pvt->mc_node_id); | |
0092b20d | 2561 | return fam_type; |
395ae783 BP |
2562 | } |
2563 | ||
7d6034d3 DT |
2564 | /* |
2565 | * Init stuff for this DRAM Controller device. | |
2566 | * | |
2567 | * Due to a hardware feature on Fam10h CPUs, the Enable Extended Configuration | |
2568 | * Space feature MUST be enabled on ALL Processors prior to actually reading | |
2569 | * from the ECS registers. Since the loading of the module can occur on any | |
2570 | * 'core', and cores don't 'see' all the other processors ECS data when the | |
2571 | * others are NOT enabled. Our solution is to first enable ECS access in this | |
2572 | * routine on all processors, gather some data in a amd64_pvt structure and | |
2573 | * later come back in a finish-setup function to perform that final | |
2574 | * initialization. See also amd64_init_2nd_stage() for that. | |
2575 | */ | |
8d5b5d9c | 2576 | static int amd64_probe_one_instance(struct pci_dev *F2) |
7d6034d3 DT |
2577 | { |
2578 | struct amd64_pvt *pvt = NULL; | |
0092b20d | 2579 | struct amd64_family_type *fam_type = NULL; |
7d6034d3 DT |
2580 | int err = 0, ret; |
2581 | ||
2582 | ret = -ENOMEM; | |
2583 | pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL); | |
2584 | if (!pvt) | |
2585 | goto err_exit; | |
2586 | ||
8d5b5d9c BP |
2587 | pvt->mc_node_id = get_node_id(F2); |
2588 | pvt->F2 = F2; | |
7d6034d3 | 2589 | |
395ae783 | 2590 | ret = -EINVAL; |
0092b20d BP |
2591 | fam_type = amd64_per_family_init(pvt); |
2592 | if (!fam_type) | |
395ae783 BP |
2593 | goto err_free; |
2594 | ||
7d6034d3 | 2595 | ret = -ENODEV; |
8d5b5d9c BP |
2596 | err = amd64_reserve_mc_sibling_devices(pvt, fam_type->f1_id, |
2597 | fam_type->f3_id); | |
7d6034d3 DT |
2598 | if (err) |
2599 | goto err_free; | |
2600 | ||
2601 | ret = -EINVAL; | |
2602 | err = amd64_check_ecc_enabled(pvt); | |
2603 | if (err) | |
2604 | goto err_put; | |
2605 | ||
2606 | /* | |
2607 | * Key operation here: setup of HW prior to performing ops on it. Some | |
2608 | * setup is required to access ECS data. After this is performed, the | |
2609 | * 'teardown' function must be called upon error and normal exit paths. | |
2610 | */ | |
2611 | if (boot_cpu_data.x86 >= 0x10) | |
2612 | amd64_setup(pvt); | |
2613 | ||
2614 | /* | |
2615 | * Save the pointer to the private data for use in 2nd initialization | |
2616 | * stage | |
2617 | */ | |
2618 | pvt_lookup[pvt->mc_node_id] = pvt; | |
2619 | ||
2620 | return 0; | |
2621 | ||
2622 | err_put: | |
2623 | amd64_free_mc_sibling_devices(pvt); | |
2624 | ||
2625 | err_free: | |
2626 | kfree(pvt); | |
2627 | ||
2628 | err_exit: | |
2629 | return ret; | |
2630 | } | |
2631 | ||
2632 | /* | |
2633 | * This is the finishing stage of the init code. Needs to be performed after all | |
2634 | * MCs' hardware have been prepped for accessing extended config space. | |
2635 | */ | |
2636 | static int amd64_init_2nd_stage(struct amd64_pvt *pvt) | |
2637 | { | |
2638 | int node_id = pvt->mc_node_id; | |
2639 | struct mem_ctl_info *mci; | |
18ba54ac | 2640 | int ret = -ENODEV; |
7d6034d3 DT |
2641 | |
2642 | amd64_read_mc_registers(pvt); | |
2643 | ||
7d6034d3 DT |
2644 | /* |
2645 | * We need to determine how many memory channels there are. Then use | |
2646 | * that information for calculating the size of the dynamic instance | |
2647 | * tables in the 'mci' structure | |
2648 | */ | |
2649 | pvt->channel_count = pvt->ops->early_channel_count(pvt); | |
2650 | if (pvt->channel_count < 0) | |
2651 | goto err_exit; | |
2652 | ||
2653 | ret = -ENOMEM; | |
9d858bb1 | 2654 | mci = edac_mc_alloc(0, pvt->cs_count, pvt->channel_count, node_id); |
7d6034d3 DT |
2655 | if (!mci) |
2656 | goto err_exit; | |
2657 | ||
2658 | mci->pvt_info = pvt; | |
2659 | ||
8d5b5d9c | 2660 | mci->dev = &pvt->F2->dev; |
7d6034d3 DT |
2661 | amd64_setup_mci_misc_attributes(mci); |
2662 | ||
2663 | if (amd64_init_csrows(mci)) | |
2664 | mci->edac_cap = EDAC_FLAG_NONE; | |
2665 | ||
2666 | amd64_enable_ecc_error_reporting(mci); | |
2667 | amd64_set_mc_sysfs_attributes(mci); | |
2668 | ||
2669 | ret = -ENODEV; | |
2670 | if (edac_mc_add_mc(mci)) { | |
2671 | debugf1("failed edac_mc_add_mc()\n"); | |
2672 | goto err_add_mc; | |
2673 | } | |
2674 | ||
2675 | mci_lookup[node_id] = mci; | |
2676 | pvt_lookup[node_id] = NULL; | |
549d042d BP |
2677 | |
2678 | /* register stuff with EDAC MCE */ | |
2679 | if (report_gart_errors) | |
2680 | amd_report_gart_errors(true); | |
2681 | ||
2682 | amd_register_ecc_decoder(amd64_decode_bus_error); | |
2683 | ||
7d6034d3 DT |
2684 | return 0; |
2685 | ||
2686 | err_add_mc: | |
2687 | edac_mc_free(mci); | |
2688 | ||
2689 | err_exit: | |
2690 | debugf0("failure to init 2nd stage: ret=%d\n", ret); | |
2691 | ||
2692 | amd64_restore_ecc_error_reporting(pvt); | |
2693 | ||
2694 | if (boot_cpu_data.x86 > 0xf) | |
2695 | amd64_teardown(pvt); | |
2696 | ||
2697 | amd64_free_mc_sibling_devices(pvt); | |
2698 | ||
2699 | kfree(pvt_lookup[pvt->mc_node_id]); | |
2700 | pvt_lookup[node_id] = NULL; | |
2701 | ||
2702 | return ret; | |
2703 | } | |
2704 | ||
2705 | ||
2706 | static int __devinit amd64_init_one_instance(struct pci_dev *pdev, | |
b8cfa02f | 2707 | const struct pci_device_id *mc_type) |
7d6034d3 DT |
2708 | { |
2709 | int ret = 0; | |
2710 | ||
7d6034d3 | 2711 | ret = pci_enable_device(pdev); |
b8cfa02f BP |
2712 | if (ret < 0) { |
2713 | debugf0("ret=%d\n", ret); | |
2714 | return -EIO; | |
2715 | } | |
7d6034d3 | 2716 | |
b8cfa02f | 2717 | ret = amd64_probe_one_instance(pdev); |
7d6034d3 | 2718 | if (ret < 0) |
24f9a7fe | 2719 | amd64_err("Error probing instance: %d\n", get_node_id(pdev)); |
7d6034d3 DT |
2720 | |
2721 | return ret; | |
2722 | } | |
2723 | ||
2724 | static void __devexit amd64_remove_one_instance(struct pci_dev *pdev) | |
2725 | { | |
2726 | struct mem_ctl_info *mci; | |
2727 | struct amd64_pvt *pvt; | |
2728 | ||
2729 | /* Remove from EDAC CORE tracking list */ | |
2730 | mci = edac_mc_del_mc(&pdev->dev); | |
2731 | if (!mci) | |
2732 | return; | |
2733 | ||
2734 | pvt = mci->pvt_info; | |
2735 | ||
2736 | amd64_restore_ecc_error_reporting(pvt); | |
2737 | ||
2738 | if (boot_cpu_data.x86 > 0xf) | |
2739 | amd64_teardown(pvt); | |
2740 | ||
2741 | amd64_free_mc_sibling_devices(pvt); | |
2742 | ||
549d042d BP |
2743 | /* unregister from EDAC MCE */ |
2744 | amd_report_gart_errors(false); | |
2745 | amd_unregister_ecc_decoder(amd64_decode_bus_error); | |
2746 | ||
7d6034d3 | 2747 | /* Free the EDAC CORE resources */ |
8f68ed97 BP |
2748 | mci->pvt_info = NULL; |
2749 | mci_lookup[pvt->mc_node_id] = NULL; | |
2750 | ||
2751 | kfree(pvt); | |
7d6034d3 DT |
2752 | edac_mc_free(mci); |
2753 | } | |
2754 | ||
2755 | /* | |
2756 | * This table is part of the interface for loading drivers for PCI devices. The | |
2757 | * PCI core identifies what devices are on a system during boot, and then | |
2758 | * inquiry this table to see if this driver is for a given device found. | |
2759 | */ | |
2760 | static const struct pci_device_id amd64_pci_table[] __devinitdata = { | |
2761 | { | |
2762 | .vendor = PCI_VENDOR_ID_AMD, | |
2763 | .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL, | |
2764 | .subvendor = PCI_ANY_ID, | |
2765 | .subdevice = PCI_ANY_ID, | |
2766 | .class = 0, | |
2767 | .class_mask = 0, | |
7d6034d3 DT |
2768 | }, |
2769 | { | |
2770 | .vendor = PCI_VENDOR_ID_AMD, | |
2771 | .device = PCI_DEVICE_ID_AMD_10H_NB_DRAM, | |
2772 | .subvendor = PCI_ANY_ID, | |
2773 | .subdevice = PCI_ANY_ID, | |
2774 | .class = 0, | |
2775 | .class_mask = 0, | |
7d6034d3 | 2776 | }, |
7d6034d3 DT |
2777 | {0, } |
2778 | }; | |
2779 | MODULE_DEVICE_TABLE(pci, amd64_pci_table); | |
2780 | ||
2781 | static struct pci_driver amd64_pci_driver = { | |
2782 | .name = EDAC_MOD_STR, | |
2783 | .probe = amd64_init_one_instance, | |
2784 | .remove = __devexit_p(amd64_remove_one_instance), | |
2785 | .id_table = amd64_pci_table, | |
2786 | }; | |
2787 | ||
2788 | static void amd64_setup_pci_device(void) | |
2789 | { | |
2790 | struct mem_ctl_info *mci; | |
2791 | struct amd64_pvt *pvt; | |
2792 | ||
2793 | if (amd64_ctl_pci) | |
2794 | return; | |
2795 | ||
2796 | mci = mci_lookup[0]; | |
2797 | if (mci) { | |
2798 | ||
2799 | pvt = mci->pvt_info; | |
2800 | amd64_ctl_pci = | |
8d5b5d9c | 2801 | edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR); |
7d6034d3 DT |
2802 | |
2803 | if (!amd64_ctl_pci) { | |
2804 | pr_warning("%s(): Unable to create PCI control\n", | |
2805 | __func__); | |
2806 | ||
2807 | pr_warning("%s(): PCI error report via EDAC not set\n", | |
2808 | __func__); | |
2809 | } | |
2810 | } | |
2811 | } | |
2812 | ||
2813 | static int __init amd64_edac_init(void) | |
2814 | { | |
2815 | int nb, err = -ENODEV; | |
56b34b91 | 2816 | bool load_ok = false; |
7d6034d3 DT |
2817 | |
2818 | edac_printk(KERN_INFO, EDAC_MOD_STR, EDAC_AMD64_VERSION "\n"); | |
2819 | ||
2820 | opstate_init(); | |
2821 | ||
9653a5c7 | 2822 | if (amd_cache_northbridges() < 0) |
56b34b91 | 2823 | goto err_ret; |
7d6034d3 | 2824 | |
50542251 | 2825 | msrs = msrs_alloc(); |
56b34b91 BP |
2826 | if (!msrs) |
2827 | goto err_ret; | |
50542251 | 2828 | |
7d6034d3 DT |
2829 | err = pci_register_driver(&amd64_pci_driver); |
2830 | if (err) | |
56b34b91 | 2831 | goto err_pci; |
7d6034d3 DT |
2832 | |
2833 | /* | |
2834 | * At this point, the array 'pvt_lookup[]' contains pointers to alloc'd | |
2835 | * amd64_pvt structs. These will be used in the 2nd stage init function | |
2836 | * to finish initialization of the MC instances. | |
2837 | */ | |
56b34b91 | 2838 | err = -ENODEV; |
9653a5c7 | 2839 | for (nb = 0; nb < amd_nb_num(); nb++) { |
7d6034d3 DT |
2840 | if (!pvt_lookup[nb]) |
2841 | continue; | |
2842 | ||
2843 | err = amd64_init_2nd_stage(pvt_lookup[nb]); | |
2844 | if (err) | |
37da0450 | 2845 | goto err_2nd_stage; |
7d6034d3 | 2846 | |
56b34b91 BP |
2847 | load_ok = true; |
2848 | } | |
7d6034d3 | 2849 | |
56b34b91 BP |
2850 | if (load_ok) { |
2851 | amd64_setup_pci_device(); | |
2852 | return 0; | |
2853 | } | |
7d6034d3 | 2854 | |
37da0450 | 2855 | err_2nd_stage: |
7d6034d3 | 2856 | pci_unregister_driver(&amd64_pci_driver); |
56b34b91 BP |
2857 | err_pci: |
2858 | msrs_free(msrs); | |
2859 | msrs = NULL; | |
2860 | err_ret: | |
7d6034d3 DT |
2861 | return err; |
2862 | } | |
2863 | ||
2864 | static void __exit amd64_edac_exit(void) | |
2865 | { | |
2866 | if (amd64_ctl_pci) | |
2867 | edac_pci_release_generic_ctl(amd64_ctl_pci); | |
2868 | ||
2869 | pci_unregister_driver(&amd64_pci_driver); | |
50542251 BP |
2870 | |
2871 | msrs_free(msrs); | |
2872 | msrs = NULL; | |
7d6034d3 DT |
2873 | } |
2874 | ||
2875 | module_init(amd64_edac_init); | |
2876 | module_exit(amd64_edac_exit); | |
2877 | ||
2878 | MODULE_LICENSE("GPL"); | |
2879 | MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, " | |
2880 | "Dave Peterson, Thayne Harbaugh"); | |
2881 | MODULE_DESCRIPTION("MC support for AMD64 memory controllers - " | |
2882 | EDAC_AMD64_VERSION); | |
2883 | ||
2884 | module_param(edac_op_state, int, 0444); | |
2885 | MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); |