ASoC: fsl: Add S/PDIF CPU DAI driver
[deliverable/linux.git] / arch / x86 / kernel / cpu / intel_cacheinfo.c
1 /*
2 * Routines to indentify caches on Intel CPU.
3 *
4 * Changes:
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
7 * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
8 */
9
10 #include <linux/init.h>
11 #include <linux/slab.h>
12 #include <linux/device.h>
13 #include <linux/compiler.h>
14 #include <linux/cpu.h>
15 #include <linux/sched.h>
16 #include <linux/pci.h>
17
18 #include <asm/processor.h>
19 #include <linux/smp.h>
20 #include <asm/amd_nb.h>
21 #include <asm/smp.h>
22
23 #define LVL_1_INST 1
24 #define LVL_1_DATA 2
25 #define LVL_2 3
26 #define LVL_3 4
27 #define LVL_TRACE 5
28
29 struct _cache_table {
30 unsigned char descriptor;
31 char cache_type;
32 short size;
33 };
34
35 #define MB(x) ((x) * 1024)
36
37 /* All the cache descriptor types we care about (no TLB or
38 trace cache entries) */
39
40 static const struct _cache_table __cpuinitconst cache_table[] =
41 {
42 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
43 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
44 { 0x09, LVL_1_INST, 32 }, /* 4-way set assoc, 64 byte line size */
45 { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
46 { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
47 { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */
48 { 0x0e, LVL_1_DATA, 24 }, /* 6-way set assoc, 64 byte line size */
49 { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */
50 { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
51 { 0x23, LVL_3, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
52 { 0x25, LVL_3, MB(2) }, /* 8-way set assoc, sectored cache, 64 byte line size */
53 { 0x29, LVL_3, MB(4) }, /* 8-way set assoc, sectored cache, 64 byte line size */
54 { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */
55 { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */
56 { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
57 { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
58 { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
59 { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
60 { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
61 { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
62 { 0x3f, LVL_2, 256 }, /* 2-way set assoc, 64 byte line size */
63 { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
64 { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
65 { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
66 { 0x44, LVL_2, MB(1) }, /* 4-way set assoc, 32 byte line size */
67 { 0x45, LVL_2, MB(2) }, /* 4-way set assoc, 32 byte line size */
68 { 0x46, LVL_3, MB(4) }, /* 4-way set assoc, 64 byte line size */
69 { 0x47, LVL_3, MB(8) }, /* 8-way set assoc, 64 byte line size */
70 { 0x48, LVL_2, MB(3) }, /* 12-way set assoc, 64 byte line size */
71 { 0x49, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */
72 { 0x4a, LVL_3, MB(6) }, /* 12-way set assoc, 64 byte line size */
73 { 0x4b, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */
74 { 0x4c, LVL_3, MB(12) }, /* 12-way set assoc, 64 byte line size */
75 { 0x4d, LVL_3, MB(16) }, /* 16-way set assoc, 64 byte line size */
76 { 0x4e, LVL_2, MB(6) }, /* 24-way set assoc, 64 byte line size */
77 { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
78 { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
79 { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
80 { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
81 { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */
82 { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */
83 { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */
84 { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */
85 { 0x78, LVL_2, MB(1) }, /* 4-way set assoc, 64 byte line size */
86 { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
87 { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
88 { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
89 { 0x7c, LVL_2, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
90 { 0x7d, LVL_2, MB(2) }, /* 8-way set assoc, 64 byte line size */
91 { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */
92 { 0x80, LVL_2, 512 }, /* 8-way set assoc, 64 byte line size */
93 { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */
94 { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */
95 { 0x84, LVL_2, MB(1) }, /* 8-way set assoc, 32 byte line size */
96 { 0x85, LVL_2, MB(2) }, /* 8-way set assoc, 32 byte line size */
97 { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
98 { 0x87, LVL_2, MB(1) }, /* 8-way set assoc, 64 byte line size */
99 { 0xd0, LVL_3, 512 }, /* 4-way set assoc, 64 byte line size */
100 { 0xd1, LVL_3, MB(1) }, /* 4-way set assoc, 64 byte line size */
101 { 0xd2, LVL_3, MB(2) }, /* 4-way set assoc, 64 byte line size */
102 { 0xd6, LVL_3, MB(1) }, /* 8-way set assoc, 64 byte line size */
103 { 0xd7, LVL_3, MB(2) }, /* 8-way set assoc, 64 byte line size */
104 { 0xd8, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */
105 { 0xdc, LVL_3, MB(2) }, /* 12-way set assoc, 64 byte line size */
106 { 0xdd, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */
107 { 0xde, LVL_3, MB(8) }, /* 12-way set assoc, 64 byte line size */
108 { 0xe2, LVL_3, MB(2) }, /* 16-way set assoc, 64 byte line size */
109 { 0xe3, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */
110 { 0xe4, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */
111 { 0xea, LVL_3, MB(12) }, /* 24-way set assoc, 64 byte line size */
112 { 0xeb, LVL_3, MB(18) }, /* 24-way set assoc, 64 byte line size */
113 { 0xec, LVL_3, MB(24) }, /* 24-way set assoc, 64 byte line size */
114 { 0x00, 0, 0}
115 };
116
117
118 enum _cache_type {
119 CACHE_TYPE_NULL = 0,
120 CACHE_TYPE_DATA = 1,
121 CACHE_TYPE_INST = 2,
122 CACHE_TYPE_UNIFIED = 3
123 };
124
125 union _cpuid4_leaf_eax {
126 struct {
127 enum _cache_type type:5;
128 unsigned int level:3;
129 unsigned int is_self_initializing:1;
130 unsigned int is_fully_associative:1;
131 unsigned int reserved:4;
132 unsigned int num_threads_sharing:12;
133 unsigned int num_cores_on_die:6;
134 } split;
135 u32 full;
136 };
137
138 union _cpuid4_leaf_ebx {
139 struct {
140 unsigned int coherency_line_size:12;
141 unsigned int physical_line_partition:10;
142 unsigned int ways_of_associativity:10;
143 } split;
144 u32 full;
145 };
146
147 union _cpuid4_leaf_ecx {
148 struct {
149 unsigned int number_of_sets:32;
150 } split;
151 u32 full;
152 };
153
154 struct _cpuid4_info_regs {
155 union _cpuid4_leaf_eax eax;
156 union _cpuid4_leaf_ebx ebx;
157 union _cpuid4_leaf_ecx ecx;
158 unsigned long size;
159 struct amd_northbridge *nb;
160 };
161
162 struct _cpuid4_info {
163 struct _cpuid4_info_regs base;
164 DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
165 };
166
167 unsigned short num_cache_leaves;
168
169 /* AMD doesn't have CPUID4. Emulate it here to report the same
170 information to the user. This makes some assumptions about the machine:
171 L2 not shared, no SMT etc. that is currently true on AMD CPUs.
172
173 In theory the TLBs could be reported as fake type (they are in "dummy").
174 Maybe later */
175 union l1_cache {
176 struct {
177 unsigned line_size:8;
178 unsigned lines_per_tag:8;
179 unsigned assoc:8;
180 unsigned size_in_kb:8;
181 };
182 unsigned val;
183 };
184
185 union l2_cache {
186 struct {
187 unsigned line_size:8;
188 unsigned lines_per_tag:4;
189 unsigned assoc:4;
190 unsigned size_in_kb:16;
191 };
192 unsigned val;
193 };
194
195 union l3_cache {
196 struct {
197 unsigned line_size:8;
198 unsigned lines_per_tag:4;
199 unsigned assoc:4;
200 unsigned res:2;
201 unsigned size_encoded:14;
202 };
203 unsigned val;
204 };
205
206 static const unsigned short __cpuinitconst assocs[] = {
207 [1] = 1,
208 [2] = 2,
209 [4] = 4,
210 [6] = 8,
211 [8] = 16,
212 [0xa] = 32,
213 [0xb] = 48,
214 [0xc] = 64,
215 [0xd] = 96,
216 [0xe] = 128,
217 [0xf] = 0xffff /* fully associative - no way to show this currently */
218 };
219
220 static const unsigned char __cpuinitconst levels[] = { 1, 1, 2, 3 };
221 static const unsigned char __cpuinitconst types[] = { 1, 2, 3, 3 };
222
223 static void __cpuinit
224 amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
225 union _cpuid4_leaf_ebx *ebx,
226 union _cpuid4_leaf_ecx *ecx)
227 {
228 unsigned dummy;
229 unsigned line_size, lines_per_tag, assoc, size_in_kb;
230 union l1_cache l1i, l1d;
231 union l2_cache l2;
232 union l3_cache l3;
233 union l1_cache *l1 = &l1d;
234
235 eax->full = 0;
236 ebx->full = 0;
237 ecx->full = 0;
238
239 cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
240 cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
241
242 switch (leaf) {
243 case 1:
244 l1 = &l1i;
245 case 0:
246 if (!l1->val)
247 return;
248 assoc = assocs[l1->assoc];
249 line_size = l1->line_size;
250 lines_per_tag = l1->lines_per_tag;
251 size_in_kb = l1->size_in_kb;
252 break;
253 case 2:
254 if (!l2.val)
255 return;
256 assoc = assocs[l2.assoc];
257 line_size = l2.line_size;
258 lines_per_tag = l2.lines_per_tag;
259 /* cpu_data has errata corrections for K7 applied */
260 size_in_kb = __this_cpu_read(cpu_info.x86_cache_size);
261 break;
262 case 3:
263 if (!l3.val)
264 return;
265 assoc = assocs[l3.assoc];
266 line_size = l3.line_size;
267 lines_per_tag = l3.lines_per_tag;
268 size_in_kb = l3.size_encoded * 512;
269 if (boot_cpu_has(X86_FEATURE_AMD_DCM)) {
270 size_in_kb = size_in_kb >> 1;
271 assoc = assoc >> 1;
272 }
273 break;
274 default:
275 return;
276 }
277
278 eax->split.is_self_initializing = 1;
279 eax->split.type = types[leaf];
280 eax->split.level = levels[leaf];
281 eax->split.num_threads_sharing = 0;
282 eax->split.num_cores_on_die = __this_cpu_read(cpu_info.x86_max_cores) - 1;
283
284
285 if (assoc == 0xffff)
286 eax->split.is_fully_associative = 1;
287 ebx->split.coherency_line_size = line_size - 1;
288 ebx->split.ways_of_associativity = assoc - 1;
289 ebx->split.physical_line_partition = lines_per_tag - 1;
290 ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
291 (ebx->split.ways_of_associativity + 1) - 1;
292 }
293
294 struct _cache_attr {
295 struct attribute attr;
296 ssize_t (*show)(struct _cpuid4_info *, char *, unsigned int);
297 ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count,
298 unsigned int);
299 };
300
301 #if defined(CONFIG_AMD_NB) && defined(CONFIG_SYSFS)
302 /*
303 * L3 cache descriptors
304 */
305 static void __cpuinit amd_calc_l3_indices(struct amd_northbridge *nb)
306 {
307 struct amd_l3_cache *l3 = &nb->l3_cache;
308 unsigned int sc0, sc1, sc2, sc3;
309 u32 val = 0;
310
311 pci_read_config_dword(nb->misc, 0x1C4, &val);
312
313 /* calculate subcache sizes */
314 l3->subcaches[0] = sc0 = !(val & BIT(0));
315 l3->subcaches[1] = sc1 = !(val & BIT(4));
316
317 if (boot_cpu_data.x86 == 0x15) {
318 l3->subcaches[0] = sc0 += !(val & BIT(1));
319 l3->subcaches[1] = sc1 += !(val & BIT(5));
320 }
321
322 l3->subcaches[2] = sc2 = !(val & BIT(8)) + !(val & BIT(9));
323 l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13));
324
325 l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1;
326 }
327
328 static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index)
329 {
330 int node;
331
332 /* only for L3, and not in virtualized environments */
333 if (index < 3)
334 return;
335
336 node = amd_get_nb_id(smp_processor_id());
337 this_leaf->nb = node_to_amd_nb(node);
338 if (this_leaf->nb && !this_leaf->nb->l3_cache.indices)
339 amd_calc_l3_indices(this_leaf->nb);
340 }
341
342 /*
343 * check whether a slot used for disabling an L3 index is occupied.
344 * @l3: L3 cache descriptor
345 * @slot: slot number (0..1)
346 *
347 * @returns: the disabled index if used or negative value if slot free.
348 */
349 int amd_get_l3_disable_slot(struct amd_northbridge *nb, unsigned slot)
350 {
351 unsigned int reg = 0;
352
353 pci_read_config_dword(nb->misc, 0x1BC + slot * 4, &reg);
354
355 /* check whether this slot is activated already */
356 if (reg & (3UL << 30))
357 return reg & 0xfff;
358
359 return -1;
360 }
361
362 static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
363 unsigned int slot)
364 {
365 int index;
366
367 if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
368 return -EINVAL;
369
370 index = amd_get_l3_disable_slot(this_leaf->base.nb, slot);
371 if (index >= 0)
372 return sprintf(buf, "%d\n", index);
373
374 return sprintf(buf, "FREE\n");
375 }
376
377 #define SHOW_CACHE_DISABLE(slot) \
378 static ssize_t \
379 show_cache_disable_##slot(struct _cpuid4_info *this_leaf, char *buf, \
380 unsigned int cpu) \
381 { \
382 return show_cache_disable(this_leaf, buf, slot); \
383 }
384 SHOW_CACHE_DISABLE(0)
385 SHOW_CACHE_DISABLE(1)
386
387 static void amd_l3_disable_index(struct amd_northbridge *nb, int cpu,
388 unsigned slot, unsigned long idx)
389 {
390 int i;
391
392 idx |= BIT(30);
393
394 /*
395 * disable index in all 4 subcaches
396 */
397 for (i = 0; i < 4; i++) {
398 u32 reg = idx | (i << 20);
399
400 if (!nb->l3_cache.subcaches[i])
401 continue;
402
403 pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
404
405 /*
406 * We need to WBINVD on a core on the node containing the L3
407 * cache which indices we disable therefore a simple wbinvd()
408 * is not sufficient.
409 */
410 wbinvd_on_cpu(cpu);
411
412 reg |= BIT(31);
413 pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
414 }
415 }
416
417 /*
418 * disable a L3 cache index by using a disable-slot
419 *
420 * @l3: L3 cache descriptor
421 * @cpu: A CPU on the node containing the L3 cache
422 * @slot: slot number (0..1)
423 * @index: index to disable
424 *
425 * @return: 0 on success, error status on failure
426 */
427 int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu, unsigned slot,
428 unsigned long index)
429 {
430 int ret = 0;
431
432 /* check if @slot is already used or the index is already disabled */
433 ret = amd_get_l3_disable_slot(nb, slot);
434 if (ret >= 0)
435 return -EEXIST;
436
437 if (index > nb->l3_cache.indices)
438 return -EINVAL;
439
440 /* check whether the other slot has disabled the same index already */
441 if (index == amd_get_l3_disable_slot(nb, !slot))
442 return -EEXIST;
443
444 amd_l3_disable_index(nb, cpu, slot, index);
445
446 return 0;
447 }
448
449 static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
450 const char *buf, size_t count,
451 unsigned int slot)
452 {
453 unsigned long val = 0;
454 int cpu, err = 0;
455
456 if (!capable(CAP_SYS_ADMIN))
457 return -EPERM;
458
459 if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
460 return -EINVAL;
461
462 cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
463
464 if (strict_strtoul(buf, 10, &val) < 0)
465 return -EINVAL;
466
467 err = amd_set_l3_disable_slot(this_leaf->base.nb, cpu, slot, val);
468 if (err) {
469 if (err == -EEXIST)
470 pr_warning("L3 slot %d in use/index already disabled!\n",
471 slot);
472 return err;
473 }
474 return count;
475 }
476
477 #define STORE_CACHE_DISABLE(slot) \
478 static ssize_t \
479 store_cache_disable_##slot(struct _cpuid4_info *this_leaf, \
480 const char *buf, size_t count, \
481 unsigned int cpu) \
482 { \
483 return store_cache_disable(this_leaf, buf, count, slot); \
484 }
485 STORE_CACHE_DISABLE(0)
486 STORE_CACHE_DISABLE(1)
487
488 static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
489 show_cache_disable_0, store_cache_disable_0);
490 static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
491 show_cache_disable_1, store_cache_disable_1);
492
493 static ssize_t
494 show_subcaches(struct _cpuid4_info *this_leaf, char *buf, unsigned int cpu)
495 {
496 if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
497 return -EINVAL;
498
499 return sprintf(buf, "%x\n", amd_get_subcaches(cpu));
500 }
501
502 static ssize_t
503 store_subcaches(struct _cpuid4_info *this_leaf, const char *buf, size_t count,
504 unsigned int cpu)
505 {
506 unsigned long val;
507
508 if (!capable(CAP_SYS_ADMIN))
509 return -EPERM;
510
511 if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
512 return -EINVAL;
513
514 if (strict_strtoul(buf, 16, &val) < 0)
515 return -EINVAL;
516
517 if (amd_set_subcaches(cpu, val))
518 return -EINVAL;
519
520 return count;
521 }
522
523 static struct _cache_attr subcaches =
524 __ATTR(subcaches, 0644, show_subcaches, store_subcaches);
525
526 #else
527 #define amd_init_l3_cache(x, y)
528 #endif /* CONFIG_AMD_NB && CONFIG_SYSFS */
529
530 static int
531 __cpuinit cpuid4_cache_lookup_regs(int index,
532 struct _cpuid4_info_regs *this_leaf)
533 {
534 union _cpuid4_leaf_eax eax;
535 union _cpuid4_leaf_ebx ebx;
536 union _cpuid4_leaf_ecx ecx;
537 unsigned edx;
538
539 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
540 if (cpu_has_topoext)
541 cpuid_count(0x8000001d, index, &eax.full,
542 &ebx.full, &ecx.full, &edx);
543 else
544 amd_cpuid4(index, &eax, &ebx, &ecx);
545 amd_init_l3_cache(this_leaf, index);
546 } else {
547 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
548 }
549
550 if (eax.split.type == CACHE_TYPE_NULL)
551 return -EIO; /* better error ? */
552
553 this_leaf->eax = eax;
554 this_leaf->ebx = ebx;
555 this_leaf->ecx = ecx;
556 this_leaf->size = (ecx.split.number_of_sets + 1) *
557 (ebx.split.coherency_line_size + 1) *
558 (ebx.split.physical_line_partition + 1) *
559 (ebx.split.ways_of_associativity + 1);
560 return 0;
561 }
562
563 static int __cpuinit find_num_cache_leaves(struct cpuinfo_x86 *c)
564 {
565 unsigned int eax, ebx, ecx, edx, op;
566 union _cpuid4_leaf_eax cache_eax;
567 int i = -1;
568
569 if (c->x86_vendor == X86_VENDOR_AMD)
570 op = 0x8000001d;
571 else
572 op = 4;
573
574 do {
575 ++i;
576 /* Do cpuid(op) loop to find out num_cache_leaves */
577 cpuid_count(op, i, &eax, &ebx, &ecx, &edx);
578 cache_eax.full = eax;
579 } while (cache_eax.split.type != CACHE_TYPE_NULL);
580 return i;
581 }
582
583 void __cpuinit init_amd_cacheinfo(struct cpuinfo_x86 *c)
584 {
585
586 if (cpu_has_topoext) {
587 num_cache_leaves = find_num_cache_leaves(c);
588 } else if (c->extended_cpuid_level >= 0x80000006) {
589 if (cpuid_edx(0x80000006) & 0xf000)
590 num_cache_leaves = 4;
591 else
592 num_cache_leaves = 3;
593 }
594 }
595
596 unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
597 {
598 /* Cache sizes */
599 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0;
600 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
601 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
602 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
603 #ifdef CONFIG_X86_HT
604 unsigned int cpu = c->cpu_index;
605 #endif
606
607 if (c->cpuid_level > 3) {
608 static int is_initialized;
609
610 if (is_initialized == 0) {
611 /* Init num_cache_leaves from boot CPU */
612 num_cache_leaves = find_num_cache_leaves(c);
613 is_initialized++;
614 }
615
616 /*
617 * Whenever possible use cpuid(4), deterministic cache
618 * parameters cpuid leaf to find the cache details
619 */
620 for (i = 0; i < num_cache_leaves; i++) {
621 struct _cpuid4_info_regs this_leaf = {};
622 int retval;
623
624 retval = cpuid4_cache_lookup_regs(i, &this_leaf);
625 if (retval < 0)
626 continue;
627
628 switch (this_leaf.eax.split.level) {
629 case 1:
630 if (this_leaf.eax.split.type == CACHE_TYPE_DATA)
631 new_l1d = this_leaf.size/1024;
632 else if (this_leaf.eax.split.type == CACHE_TYPE_INST)
633 new_l1i = this_leaf.size/1024;
634 break;
635 case 2:
636 new_l2 = this_leaf.size/1024;
637 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
638 index_msb = get_count_order(num_threads_sharing);
639 l2_id = c->apicid & ~((1 << index_msb) - 1);
640 break;
641 case 3:
642 new_l3 = this_leaf.size/1024;
643 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
644 index_msb = get_count_order(num_threads_sharing);
645 l3_id = c->apicid & ~((1 << index_msb) - 1);
646 break;
647 default:
648 break;
649 }
650 }
651 }
652 /*
653 * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
654 * trace cache
655 */
656 if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
657 /* supports eax=2 call */
658 int j, n;
659 unsigned int regs[4];
660 unsigned char *dp = (unsigned char *)regs;
661 int only_trace = 0;
662
663 if (num_cache_leaves != 0 && c->x86 == 15)
664 only_trace = 1;
665
666 /* Number of times to iterate */
667 n = cpuid_eax(2) & 0xFF;
668
669 for (i = 0 ; i < n ; i++) {
670 cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
671
672 /* If bit 31 is set, this is an unknown format */
673 for (j = 0 ; j < 3 ; j++)
674 if (regs[j] & (1 << 31))
675 regs[j] = 0;
676
677 /* Byte 0 is level count, not a descriptor */
678 for (j = 1 ; j < 16 ; j++) {
679 unsigned char des = dp[j];
680 unsigned char k = 0;
681
682 /* look up this descriptor in the table */
683 while (cache_table[k].descriptor != 0) {
684 if (cache_table[k].descriptor == des) {
685 if (only_trace && cache_table[k].cache_type != LVL_TRACE)
686 break;
687 switch (cache_table[k].cache_type) {
688 case LVL_1_INST:
689 l1i += cache_table[k].size;
690 break;
691 case LVL_1_DATA:
692 l1d += cache_table[k].size;
693 break;
694 case LVL_2:
695 l2 += cache_table[k].size;
696 break;
697 case LVL_3:
698 l3 += cache_table[k].size;
699 break;
700 case LVL_TRACE:
701 trace += cache_table[k].size;
702 break;
703 }
704
705 break;
706 }
707
708 k++;
709 }
710 }
711 }
712 }
713
714 if (new_l1d)
715 l1d = new_l1d;
716
717 if (new_l1i)
718 l1i = new_l1i;
719
720 if (new_l2) {
721 l2 = new_l2;
722 #ifdef CONFIG_X86_HT
723 per_cpu(cpu_llc_id, cpu) = l2_id;
724 #endif
725 }
726
727 if (new_l3) {
728 l3 = new_l3;
729 #ifdef CONFIG_X86_HT
730 per_cpu(cpu_llc_id, cpu) = l3_id;
731 #endif
732 }
733
734 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
735
736 return l2;
737 }
738
739 #ifdef CONFIG_SYSFS
740
741 /* pointer to _cpuid4_info array (for each cache leaf) */
742 static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info);
743 #define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y]))
744
745 #ifdef CONFIG_SMP
746
747 static int __cpuinit cache_shared_amd_cpu_map_setup(unsigned int cpu, int index)
748 {
749 struct _cpuid4_info *this_leaf;
750 int i, sibling;
751
752 if (cpu_has_topoext) {
753 unsigned int apicid, nshared, first, last;
754
755 if (!per_cpu(ici_cpuid4_info, cpu))
756 return 0;
757
758 this_leaf = CPUID4_INFO_IDX(cpu, index);
759 nshared = this_leaf->base.eax.split.num_threads_sharing + 1;
760 apicid = cpu_data(cpu).apicid;
761 first = apicid - (apicid % nshared);
762 last = first + nshared - 1;
763
764 for_each_online_cpu(i) {
765 apicid = cpu_data(i).apicid;
766 if ((apicid < first) || (apicid > last))
767 continue;
768 if (!per_cpu(ici_cpuid4_info, i))
769 continue;
770 this_leaf = CPUID4_INFO_IDX(i, index);
771
772 for_each_online_cpu(sibling) {
773 apicid = cpu_data(sibling).apicid;
774 if ((apicid < first) || (apicid > last))
775 continue;
776 set_bit(sibling, this_leaf->shared_cpu_map);
777 }
778 }
779 } else if (index == 3) {
780 for_each_cpu(i, cpu_llc_shared_mask(cpu)) {
781 if (!per_cpu(ici_cpuid4_info, i))
782 continue;
783 this_leaf = CPUID4_INFO_IDX(i, index);
784 for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) {
785 if (!cpu_online(sibling))
786 continue;
787 set_bit(sibling, this_leaf->shared_cpu_map);
788 }
789 }
790 } else
791 return 0;
792
793 return 1;
794 }
795
796 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
797 {
798 struct _cpuid4_info *this_leaf, *sibling_leaf;
799 unsigned long num_threads_sharing;
800 int index_msb, i;
801 struct cpuinfo_x86 *c = &cpu_data(cpu);
802
803 if (c->x86_vendor == X86_VENDOR_AMD) {
804 if (cache_shared_amd_cpu_map_setup(cpu, index))
805 return;
806 }
807
808 this_leaf = CPUID4_INFO_IDX(cpu, index);
809 num_threads_sharing = 1 + this_leaf->base.eax.split.num_threads_sharing;
810
811 if (num_threads_sharing == 1)
812 cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map));
813 else {
814 index_msb = get_count_order(num_threads_sharing);
815
816 for_each_online_cpu(i) {
817 if (cpu_data(i).apicid >> index_msb ==
818 c->apicid >> index_msb) {
819 cpumask_set_cpu(i,
820 to_cpumask(this_leaf->shared_cpu_map));
821 if (i != cpu && per_cpu(ici_cpuid4_info, i)) {
822 sibling_leaf =
823 CPUID4_INFO_IDX(i, index);
824 cpumask_set_cpu(cpu, to_cpumask(
825 sibling_leaf->shared_cpu_map));
826 }
827 }
828 }
829 }
830 }
831 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
832 {
833 struct _cpuid4_info *this_leaf, *sibling_leaf;
834 int sibling;
835
836 this_leaf = CPUID4_INFO_IDX(cpu, index);
837 for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) {
838 sibling_leaf = CPUID4_INFO_IDX(sibling, index);
839 cpumask_clear_cpu(cpu,
840 to_cpumask(sibling_leaf->shared_cpu_map));
841 }
842 }
843 #else
844 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
845 {
846 }
847
848 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
849 {
850 }
851 #endif
852
853 static void __cpuinit free_cache_attributes(unsigned int cpu)
854 {
855 int i;
856
857 for (i = 0; i < num_cache_leaves; i++)
858 cache_remove_shared_cpu_map(cpu, i);
859
860 kfree(per_cpu(ici_cpuid4_info, cpu));
861 per_cpu(ici_cpuid4_info, cpu) = NULL;
862 }
863
864 static void __cpuinit get_cpu_leaves(void *_retval)
865 {
866 int j, *retval = _retval, cpu = smp_processor_id();
867
868 /* Do cpuid and store the results */
869 for (j = 0; j < num_cache_leaves; j++) {
870 struct _cpuid4_info *this_leaf = CPUID4_INFO_IDX(cpu, j);
871
872 *retval = cpuid4_cache_lookup_regs(j, &this_leaf->base);
873 if (unlikely(*retval < 0)) {
874 int i;
875
876 for (i = 0; i < j; i++)
877 cache_remove_shared_cpu_map(cpu, i);
878 break;
879 }
880 cache_shared_cpu_map_setup(cpu, j);
881 }
882 }
883
884 static int __cpuinit detect_cache_attributes(unsigned int cpu)
885 {
886 int retval;
887
888 if (num_cache_leaves == 0)
889 return -ENOENT;
890
891 per_cpu(ici_cpuid4_info, cpu) = kzalloc(
892 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
893 if (per_cpu(ici_cpuid4_info, cpu) == NULL)
894 return -ENOMEM;
895
896 smp_call_function_single(cpu, get_cpu_leaves, &retval, true);
897 if (retval) {
898 kfree(per_cpu(ici_cpuid4_info, cpu));
899 per_cpu(ici_cpuid4_info, cpu) = NULL;
900 }
901
902 return retval;
903 }
904
905 #include <linux/kobject.h>
906 #include <linux/sysfs.h>
907 #include <linux/cpu.h>
908
909 /* pointer to kobject for cpuX/cache */
910 static DEFINE_PER_CPU(struct kobject *, ici_cache_kobject);
911
912 struct _index_kobject {
913 struct kobject kobj;
914 unsigned int cpu;
915 unsigned short index;
916 };
917
918 /* pointer to array of kobjects for cpuX/cache/indexY */
919 static DEFINE_PER_CPU(struct _index_kobject *, ici_index_kobject);
920 #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(ici_index_kobject, x))[y]))
921
922 #define show_one_plus(file_name, object, val) \
923 static ssize_t show_##file_name(struct _cpuid4_info *this_leaf, char *buf, \
924 unsigned int cpu) \
925 { \
926 return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \
927 }
928
929 show_one_plus(level, base.eax.split.level, 0);
930 show_one_plus(coherency_line_size, base.ebx.split.coherency_line_size, 1);
931 show_one_plus(physical_line_partition, base.ebx.split.physical_line_partition, 1);
932 show_one_plus(ways_of_associativity, base.ebx.split.ways_of_associativity, 1);
933 show_one_plus(number_of_sets, base.ecx.split.number_of_sets, 1);
934
935 static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf,
936 unsigned int cpu)
937 {
938 return sprintf(buf, "%luK\n", this_leaf->base.size / 1024);
939 }
940
941 static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
942 int type, char *buf)
943 {
944 ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
945 int n = 0;
946
947 if (len > 1) {
948 const struct cpumask *mask;
949
950 mask = to_cpumask(this_leaf->shared_cpu_map);
951 n = type ?
952 cpulist_scnprintf(buf, len-2, mask) :
953 cpumask_scnprintf(buf, len-2, mask);
954 buf[n++] = '\n';
955 buf[n] = '\0';
956 }
957 return n;
958 }
959
960 static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf,
961 unsigned int cpu)
962 {
963 return show_shared_cpu_map_func(leaf, 0, buf);
964 }
965
966 static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf,
967 unsigned int cpu)
968 {
969 return show_shared_cpu_map_func(leaf, 1, buf);
970 }
971
972 static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf,
973 unsigned int cpu)
974 {
975 switch (this_leaf->base.eax.split.type) {
976 case CACHE_TYPE_DATA:
977 return sprintf(buf, "Data\n");
978 case CACHE_TYPE_INST:
979 return sprintf(buf, "Instruction\n");
980 case CACHE_TYPE_UNIFIED:
981 return sprintf(buf, "Unified\n");
982 default:
983 return sprintf(buf, "Unknown\n");
984 }
985 }
986
987 #define to_object(k) container_of(k, struct _index_kobject, kobj)
988 #define to_attr(a) container_of(a, struct _cache_attr, attr)
989
990 #define define_one_ro(_name) \
991 static struct _cache_attr _name = \
992 __ATTR(_name, 0444, show_##_name, NULL)
993
994 define_one_ro(level);
995 define_one_ro(type);
996 define_one_ro(coherency_line_size);
997 define_one_ro(physical_line_partition);
998 define_one_ro(ways_of_associativity);
999 define_one_ro(number_of_sets);
1000 define_one_ro(size);
1001 define_one_ro(shared_cpu_map);
1002 define_one_ro(shared_cpu_list);
1003
1004 static struct attribute *default_attrs[] = {
1005 &type.attr,
1006 &level.attr,
1007 &coherency_line_size.attr,
1008 &physical_line_partition.attr,
1009 &ways_of_associativity.attr,
1010 &number_of_sets.attr,
1011 &size.attr,
1012 &shared_cpu_map.attr,
1013 &shared_cpu_list.attr,
1014 NULL
1015 };
1016
1017 #ifdef CONFIG_AMD_NB
1018 static struct attribute ** __cpuinit amd_l3_attrs(void)
1019 {
1020 static struct attribute **attrs;
1021 int n;
1022
1023 if (attrs)
1024 return attrs;
1025
1026 n = ARRAY_SIZE(default_attrs);
1027
1028 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
1029 n += 2;
1030
1031 if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
1032 n += 1;
1033
1034 attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
1035 if (attrs == NULL)
1036 return attrs = default_attrs;
1037
1038 for (n = 0; default_attrs[n]; n++)
1039 attrs[n] = default_attrs[n];
1040
1041 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
1042 attrs[n++] = &cache_disable_0.attr;
1043 attrs[n++] = &cache_disable_1.attr;
1044 }
1045
1046 if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
1047 attrs[n++] = &subcaches.attr;
1048
1049 return attrs;
1050 }
1051 #endif
1052
1053 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1054 {
1055 struct _cache_attr *fattr = to_attr(attr);
1056 struct _index_kobject *this_leaf = to_object(kobj);
1057 ssize_t ret;
1058
1059 ret = fattr->show ?
1060 fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
1061 buf, this_leaf->cpu) :
1062 0;
1063 return ret;
1064 }
1065
1066 static ssize_t store(struct kobject *kobj, struct attribute *attr,
1067 const char *buf, size_t count)
1068 {
1069 struct _cache_attr *fattr = to_attr(attr);
1070 struct _index_kobject *this_leaf = to_object(kobj);
1071 ssize_t ret;
1072
1073 ret = fattr->store ?
1074 fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
1075 buf, count, this_leaf->cpu) :
1076 0;
1077 return ret;
1078 }
1079
1080 static const struct sysfs_ops sysfs_ops = {
1081 .show = show,
1082 .store = store,
1083 };
1084
1085 static struct kobj_type ktype_cache = {
1086 .sysfs_ops = &sysfs_ops,
1087 .default_attrs = default_attrs,
1088 };
1089
1090 static struct kobj_type ktype_percpu_entry = {
1091 .sysfs_ops = &sysfs_ops,
1092 };
1093
1094 static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
1095 {
1096 kfree(per_cpu(ici_cache_kobject, cpu));
1097 kfree(per_cpu(ici_index_kobject, cpu));
1098 per_cpu(ici_cache_kobject, cpu) = NULL;
1099 per_cpu(ici_index_kobject, cpu) = NULL;
1100 free_cache_attributes(cpu);
1101 }
1102
1103 static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
1104 {
1105 int err;
1106
1107 if (num_cache_leaves == 0)
1108 return -ENOENT;
1109
1110 err = detect_cache_attributes(cpu);
1111 if (err)
1112 return err;
1113
1114 /* Allocate all required memory */
1115 per_cpu(ici_cache_kobject, cpu) =
1116 kzalloc(sizeof(struct kobject), GFP_KERNEL);
1117 if (unlikely(per_cpu(ici_cache_kobject, cpu) == NULL))
1118 goto err_out;
1119
1120 per_cpu(ici_index_kobject, cpu) = kzalloc(
1121 sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL);
1122 if (unlikely(per_cpu(ici_index_kobject, cpu) == NULL))
1123 goto err_out;
1124
1125 return 0;
1126
1127 err_out:
1128 cpuid4_cache_sysfs_exit(cpu);
1129 return -ENOMEM;
1130 }
1131
1132 static DECLARE_BITMAP(cache_dev_map, NR_CPUS);
1133
1134 /* Add/Remove cache interface for CPU device */
1135 static int __cpuinit cache_add_dev(struct device *dev)
1136 {
1137 unsigned int cpu = dev->id;
1138 unsigned long i, j;
1139 struct _index_kobject *this_object;
1140 struct _cpuid4_info *this_leaf;
1141 int retval;
1142
1143 retval = cpuid4_cache_sysfs_init(cpu);
1144 if (unlikely(retval < 0))
1145 return retval;
1146
1147 retval = kobject_init_and_add(per_cpu(ici_cache_kobject, cpu),
1148 &ktype_percpu_entry,
1149 &dev->kobj, "%s", "cache");
1150 if (retval < 0) {
1151 cpuid4_cache_sysfs_exit(cpu);
1152 return retval;
1153 }
1154
1155 for (i = 0; i < num_cache_leaves; i++) {
1156 this_object = INDEX_KOBJECT_PTR(cpu, i);
1157 this_object->cpu = cpu;
1158 this_object->index = i;
1159
1160 this_leaf = CPUID4_INFO_IDX(cpu, i);
1161
1162 ktype_cache.default_attrs = default_attrs;
1163 #ifdef CONFIG_AMD_NB
1164 if (this_leaf->base.nb)
1165 ktype_cache.default_attrs = amd_l3_attrs();
1166 #endif
1167 retval = kobject_init_and_add(&(this_object->kobj),
1168 &ktype_cache,
1169 per_cpu(ici_cache_kobject, cpu),
1170 "index%1lu", i);
1171 if (unlikely(retval)) {
1172 for (j = 0; j < i; j++)
1173 kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj));
1174 kobject_put(per_cpu(ici_cache_kobject, cpu));
1175 cpuid4_cache_sysfs_exit(cpu);
1176 return retval;
1177 }
1178 kobject_uevent(&(this_object->kobj), KOBJ_ADD);
1179 }
1180 cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
1181
1182 kobject_uevent(per_cpu(ici_cache_kobject, cpu), KOBJ_ADD);
1183 return 0;
1184 }
1185
1186 static void __cpuinit cache_remove_dev(struct device *dev)
1187 {
1188 unsigned int cpu = dev->id;
1189 unsigned long i;
1190
1191 if (per_cpu(ici_cpuid4_info, cpu) == NULL)
1192 return;
1193 if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
1194 return;
1195 cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map));
1196
1197 for (i = 0; i < num_cache_leaves; i++)
1198 kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj));
1199 kobject_put(per_cpu(ici_cache_kobject, cpu));
1200 cpuid4_cache_sysfs_exit(cpu);
1201 }
1202
1203 static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
1204 unsigned long action, void *hcpu)
1205 {
1206 unsigned int cpu = (unsigned long)hcpu;
1207 struct device *dev;
1208
1209 dev = get_cpu_device(cpu);
1210 switch (action) {
1211 case CPU_ONLINE:
1212 case CPU_ONLINE_FROZEN:
1213 cache_add_dev(dev);
1214 break;
1215 case CPU_DEAD:
1216 case CPU_DEAD_FROZEN:
1217 cache_remove_dev(dev);
1218 break;
1219 }
1220 return NOTIFY_OK;
1221 }
1222
1223 static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = {
1224 .notifier_call = cacheinfo_cpu_callback,
1225 };
1226
1227 static int __init cache_sysfs_init(void)
1228 {
1229 int i;
1230
1231 if (num_cache_leaves == 0)
1232 return 0;
1233
1234 for_each_online_cpu(i) {
1235 int err;
1236 struct device *dev = get_cpu_device(i);
1237
1238 err = cache_add_dev(dev);
1239 if (err)
1240 return err;
1241 }
1242 register_hotcpu_notifier(&cacheinfo_cpu_notifier);
1243 return 0;
1244 }
1245
1246 device_initcall(cache_sysfs_init);
1247
1248 #endif
This page took 0.05832 seconds and 5 git commands to generate.