tracing/selftest: remove TRACE_CONT reference
[deliverable/linux.git] / arch / x86 / kernel / cpu / intel_cacheinfo.c
1 /*
2 * Routines to indentify caches on Intel CPU.
3 *
4 * Changes:
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
7 * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
8 */
9
10 #include <linux/init.h>
11 #include <linux/slab.h>
12 #include <linux/device.h>
13 #include <linux/compiler.h>
14 #include <linux/cpu.h>
15 #include <linux/sched.h>
16 #include <linux/pci.h>
17
18 #include <asm/processor.h>
19 #include <asm/smp.h>
20
21 #define LVL_1_INST 1
22 #define LVL_1_DATA 2
23 #define LVL_2 3
24 #define LVL_3 4
25 #define LVL_TRACE 5
26
27 struct _cache_table
28 {
29 unsigned char descriptor;
30 char cache_type;
31 short size;
32 };
33
34 /* all the cache descriptor types we care about (no TLB or trace cache entries) */
35 static struct _cache_table cache_table[] __cpuinitdata =
36 {
37 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
38 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
39 { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
40 { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
41 { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
42 { 0x23, LVL_3, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
43 { 0x25, LVL_3, 2048 }, /* 8-way set assoc, sectored cache, 64 byte line size */
44 { 0x29, LVL_3, 4096 }, /* 8-way set assoc, sectored cache, 64 byte line size */
45 { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */
46 { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */
47 { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
48 { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
49 { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
50 { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
51 { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
52 { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
53 { 0x3f, LVL_2, 256 }, /* 2-way set assoc, 64 byte line size */
54 { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
55 { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
56 { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
57 { 0x44, LVL_2, 1024 }, /* 4-way set assoc, 32 byte line size */
58 { 0x45, LVL_2, 2048 }, /* 4-way set assoc, 32 byte line size */
59 { 0x46, LVL_3, 4096 }, /* 4-way set assoc, 64 byte line size */
60 { 0x47, LVL_3, 8192 }, /* 8-way set assoc, 64 byte line size */
61 { 0x49, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */
62 { 0x4a, LVL_3, 6144 }, /* 12-way set assoc, 64 byte line size */
63 { 0x4b, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */
64 { 0x4c, LVL_3, 12288 }, /* 12-way set assoc, 64 byte line size */
65 { 0x4d, LVL_3, 16384 }, /* 16-way set assoc, 64 byte line size */
66 { 0x4e, LVL_2, 6144 }, /* 24-way set assoc, 64 byte line size */
67 { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
68 { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
69 { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
70 { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
71 { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */
72 { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */
73 { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */
74 { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */
75 { 0x78, LVL_2, 1024 }, /* 4-way set assoc, 64 byte line size */
76 { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
77 { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
78 { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
79 { 0x7c, LVL_2, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
80 { 0x7d, LVL_2, 2048 }, /* 8-way set assoc, 64 byte line size */
81 { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */
82 { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */
83 { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */
84 { 0x84, LVL_2, 1024 }, /* 8-way set assoc, 32 byte line size */
85 { 0x85, LVL_2, 2048 }, /* 8-way set assoc, 32 byte line size */
86 { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
87 { 0x87, LVL_2, 1024 }, /* 8-way set assoc, 64 byte line size */
88 { 0x00, 0, 0}
89 };
90
91
92 enum _cache_type
93 {
94 CACHE_TYPE_NULL = 0,
95 CACHE_TYPE_DATA = 1,
96 CACHE_TYPE_INST = 2,
97 CACHE_TYPE_UNIFIED = 3
98 };
99
100 union _cpuid4_leaf_eax {
101 struct {
102 enum _cache_type type:5;
103 unsigned int level:3;
104 unsigned int is_self_initializing:1;
105 unsigned int is_fully_associative:1;
106 unsigned int reserved:4;
107 unsigned int num_threads_sharing:12;
108 unsigned int num_cores_on_die:6;
109 } split;
110 u32 full;
111 };
112
113 union _cpuid4_leaf_ebx {
114 struct {
115 unsigned int coherency_line_size:12;
116 unsigned int physical_line_partition:10;
117 unsigned int ways_of_associativity:10;
118 } split;
119 u32 full;
120 };
121
122 union _cpuid4_leaf_ecx {
123 struct {
124 unsigned int number_of_sets:32;
125 } split;
126 u32 full;
127 };
128
129 struct _cpuid4_info {
130 union _cpuid4_leaf_eax eax;
131 union _cpuid4_leaf_ebx ebx;
132 union _cpuid4_leaf_ecx ecx;
133 unsigned long size;
134 unsigned long can_disable;
135 cpumask_t shared_cpu_map; /* future?: only cpus/node is needed */
136 };
137
138 #ifdef CONFIG_PCI
139 static struct pci_device_id k8_nb_id[] = {
140 { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1103) },
141 { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1203) },
142 {}
143 };
144 #endif
145
146 unsigned short num_cache_leaves;
147
148 /* AMD doesn't have CPUID4. Emulate it here to report the same
149 information to the user. This makes some assumptions about the machine:
150 L2 not shared, no SMT etc. that is currently true on AMD CPUs.
151
152 In theory the TLBs could be reported as fake type (they are in "dummy").
153 Maybe later */
154 union l1_cache {
155 struct {
156 unsigned line_size : 8;
157 unsigned lines_per_tag : 8;
158 unsigned assoc : 8;
159 unsigned size_in_kb : 8;
160 };
161 unsigned val;
162 };
163
164 union l2_cache {
165 struct {
166 unsigned line_size : 8;
167 unsigned lines_per_tag : 4;
168 unsigned assoc : 4;
169 unsigned size_in_kb : 16;
170 };
171 unsigned val;
172 };
173
174 union l3_cache {
175 struct {
176 unsigned line_size : 8;
177 unsigned lines_per_tag : 4;
178 unsigned assoc : 4;
179 unsigned res : 2;
180 unsigned size_encoded : 14;
181 };
182 unsigned val;
183 };
184
185 static unsigned short assocs[] __cpuinitdata = {
186 [1] = 1, [2] = 2, [4] = 4, [6] = 8,
187 [8] = 16, [0xa] = 32, [0xb] = 48,
188 [0xc] = 64,
189 [0xf] = 0xffff // ??
190 };
191
192 static unsigned char levels[] __cpuinitdata = { 1, 1, 2, 3 };
193 static unsigned char types[] __cpuinitdata = { 1, 2, 3, 3 };
194
195 static void __cpuinit
196 amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
197 union _cpuid4_leaf_ebx *ebx,
198 union _cpuid4_leaf_ecx *ecx)
199 {
200 unsigned dummy;
201 unsigned line_size, lines_per_tag, assoc, size_in_kb;
202 union l1_cache l1i, l1d;
203 union l2_cache l2;
204 union l3_cache l3;
205 union l1_cache *l1 = &l1d;
206
207 eax->full = 0;
208 ebx->full = 0;
209 ecx->full = 0;
210
211 cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
212 cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
213
214 switch (leaf) {
215 case 1:
216 l1 = &l1i;
217 case 0:
218 if (!l1->val)
219 return;
220 assoc = l1->assoc;
221 line_size = l1->line_size;
222 lines_per_tag = l1->lines_per_tag;
223 size_in_kb = l1->size_in_kb;
224 break;
225 case 2:
226 if (!l2.val)
227 return;
228 assoc = l2.assoc;
229 line_size = l2.line_size;
230 lines_per_tag = l2.lines_per_tag;
231 /* cpu_data has errata corrections for K7 applied */
232 size_in_kb = current_cpu_data.x86_cache_size;
233 break;
234 case 3:
235 if (!l3.val)
236 return;
237 assoc = l3.assoc;
238 line_size = l3.line_size;
239 lines_per_tag = l3.lines_per_tag;
240 size_in_kb = l3.size_encoded * 512;
241 break;
242 default:
243 return;
244 }
245
246 eax->split.is_self_initializing = 1;
247 eax->split.type = types[leaf];
248 eax->split.level = levels[leaf];
249 if (leaf == 3)
250 eax->split.num_threads_sharing = current_cpu_data.x86_max_cores - 1;
251 else
252 eax->split.num_threads_sharing = 0;
253 eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1;
254
255
256 if (assoc == 0xf)
257 eax->split.is_fully_associative = 1;
258 ebx->split.coherency_line_size = line_size - 1;
259 ebx->split.ways_of_associativity = assocs[assoc] - 1;
260 ebx->split.physical_line_partition = lines_per_tag - 1;
261 ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
262 (ebx->split.ways_of_associativity + 1) - 1;
263 }
264
265 static void __cpuinit
266 amd_check_l3_disable(int index, struct _cpuid4_info *this_leaf)
267 {
268 if (index < 3)
269 return;
270 this_leaf->can_disable = 1;
271 }
272
273 static int
274 __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
275 {
276 union _cpuid4_leaf_eax eax;
277 union _cpuid4_leaf_ebx ebx;
278 union _cpuid4_leaf_ecx ecx;
279 unsigned edx;
280
281 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
282 amd_cpuid4(index, &eax, &ebx, &ecx);
283 if (boot_cpu_data.x86 >= 0x10)
284 amd_check_l3_disable(index, this_leaf);
285 } else {
286 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
287 }
288
289 if (eax.split.type == CACHE_TYPE_NULL)
290 return -EIO; /* better error ? */
291
292 this_leaf->eax = eax;
293 this_leaf->ebx = ebx;
294 this_leaf->ecx = ecx;
295 this_leaf->size = (ecx.split.number_of_sets + 1) *
296 (ebx.split.coherency_line_size + 1) *
297 (ebx.split.physical_line_partition + 1) *
298 (ebx.split.ways_of_associativity + 1);
299 return 0;
300 }
301
302 static int __cpuinit find_num_cache_leaves(void)
303 {
304 unsigned int eax, ebx, ecx, edx;
305 union _cpuid4_leaf_eax cache_eax;
306 int i = -1;
307
308 do {
309 ++i;
310 /* Do cpuid(4) loop to find out num_cache_leaves */
311 cpuid_count(4, i, &eax, &ebx, &ecx, &edx);
312 cache_eax.full = eax;
313 } while (cache_eax.split.type != CACHE_TYPE_NULL);
314 return i;
315 }
316
317 unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
318 {
319 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
320 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
321 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
322 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
323 #ifdef CONFIG_X86_HT
324 unsigned int cpu = c->cpu_index;
325 #endif
326
327 if (c->cpuid_level > 3) {
328 static int is_initialized;
329
330 if (is_initialized == 0) {
331 /* Init num_cache_leaves from boot CPU */
332 num_cache_leaves = find_num_cache_leaves();
333 is_initialized++;
334 }
335
336 /*
337 * Whenever possible use cpuid(4), deterministic cache
338 * parameters cpuid leaf to find the cache details
339 */
340 for (i = 0; i < num_cache_leaves; i++) {
341 struct _cpuid4_info this_leaf;
342
343 int retval;
344
345 retval = cpuid4_cache_lookup(i, &this_leaf);
346 if (retval >= 0) {
347 switch(this_leaf.eax.split.level) {
348 case 1:
349 if (this_leaf.eax.split.type ==
350 CACHE_TYPE_DATA)
351 new_l1d = this_leaf.size/1024;
352 else if (this_leaf.eax.split.type ==
353 CACHE_TYPE_INST)
354 new_l1i = this_leaf.size/1024;
355 break;
356 case 2:
357 new_l2 = this_leaf.size/1024;
358 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
359 index_msb = get_count_order(num_threads_sharing);
360 l2_id = c->apicid >> index_msb;
361 break;
362 case 3:
363 new_l3 = this_leaf.size/1024;
364 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
365 index_msb = get_count_order(num_threads_sharing);
366 l3_id = c->apicid >> index_msb;
367 break;
368 default:
369 break;
370 }
371 }
372 }
373 }
374 /*
375 * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
376 * trace cache
377 */
378 if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
379 /* supports eax=2 call */
380 int j, n;
381 unsigned int regs[4];
382 unsigned char *dp = (unsigned char *)regs;
383 int only_trace = 0;
384
385 if (num_cache_leaves != 0 && c->x86 == 15)
386 only_trace = 1;
387
388 /* Number of times to iterate */
389 n = cpuid_eax(2) & 0xFF;
390
391 for ( i = 0 ; i < n ; i++ ) {
392 cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
393
394 /* If bit 31 is set, this is an unknown format */
395 for ( j = 0 ; j < 3 ; j++ ) {
396 if (regs[j] & (1 << 31)) regs[j] = 0;
397 }
398
399 /* Byte 0 is level count, not a descriptor */
400 for ( j = 1 ; j < 16 ; j++ ) {
401 unsigned char des = dp[j];
402 unsigned char k = 0;
403
404 /* look up this descriptor in the table */
405 while (cache_table[k].descriptor != 0)
406 {
407 if (cache_table[k].descriptor == des) {
408 if (only_trace && cache_table[k].cache_type != LVL_TRACE)
409 break;
410 switch (cache_table[k].cache_type) {
411 case LVL_1_INST:
412 l1i += cache_table[k].size;
413 break;
414 case LVL_1_DATA:
415 l1d += cache_table[k].size;
416 break;
417 case LVL_2:
418 l2 += cache_table[k].size;
419 break;
420 case LVL_3:
421 l3 += cache_table[k].size;
422 break;
423 case LVL_TRACE:
424 trace += cache_table[k].size;
425 break;
426 }
427
428 break;
429 }
430
431 k++;
432 }
433 }
434 }
435 }
436
437 if (new_l1d)
438 l1d = new_l1d;
439
440 if (new_l1i)
441 l1i = new_l1i;
442
443 if (new_l2) {
444 l2 = new_l2;
445 #ifdef CONFIG_X86_HT
446 per_cpu(cpu_llc_id, cpu) = l2_id;
447 #endif
448 }
449
450 if (new_l3) {
451 l3 = new_l3;
452 #ifdef CONFIG_X86_HT
453 per_cpu(cpu_llc_id, cpu) = l3_id;
454 #endif
455 }
456
457 if (trace)
458 printk (KERN_INFO "CPU: Trace cache: %dK uops", trace);
459 else if ( l1i )
460 printk (KERN_INFO "CPU: L1 I cache: %dK", l1i);
461
462 if (l1d)
463 printk(", L1 D cache: %dK\n", l1d);
464 else
465 printk("\n");
466
467 if (l2)
468 printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
469
470 if (l3)
471 printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
472
473 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
474
475 return l2;
476 }
477
478 /* pointer to _cpuid4_info array (for each cache leaf) */
479 static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info);
480 #define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y]))
481
482 #ifdef CONFIG_SMP
483 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
484 {
485 struct _cpuid4_info *this_leaf, *sibling_leaf;
486 unsigned long num_threads_sharing;
487 int index_msb, i;
488 struct cpuinfo_x86 *c = &cpu_data(cpu);
489
490 this_leaf = CPUID4_INFO_IDX(cpu, index);
491 num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
492
493 if (num_threads_sharing == 1)
494 cpu_set(cpu, this_leaf->shared_cpu_map);
495 else {
496 index_msb = get_count_order(num_threads_sharing);
497
498 for_each_online_cpu(i) {
499 if (cpu_data(i).apicid >> index_msb ==
500 c->apicid >> index_msb) {
501 cpu_set(i, this_leaf->shared_cpu_map);
502 if (i != cpu && per_cpu(cpuid4_info, i)) {
503 sibling_leaf = CPUID4_INFO_IDX(i, index);
504 cpu_set(cpu, sibling_leaf->shared_cpu_map);
505 }
506 }
507 }
508 }
509 }
510 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
511 {
512 struct _cpuid4_info *this_leaf, *sibling_leaf;
513 int sibling;
514
515 this_leaf = CPUID4_INFO_IDX(cpu, index);
516 for_each_cpu_mask_nr(sibling, this_leaf->shared_cpu_map) {
517 sibling_leaf = CPUID4_INFO_IDX(sibling, index);
518 cpu_clear(cpu, sibling_leaf->shared_cpu_map);
519 }
520 }
521 #else
522 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) {}
523 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) {}
524 #endif
525
526 static void __cpuinit free_cache_attributes(unsigned int cpu)
527 {
528 int i;
529
530 for (i = 0; i < num_cache_leaves; i++)
531 cache_remove_shared_cpu_map(cpu, i);
532
533 kfree(per_cpu(cpuid4_info, cpu));
534 per_cpu(cpuid4_info, cpu) = NULL;
535 }
536
537 static int __cpuinit detect_cache_attributes(unsigned int cpu)
538 {
539 struct _cpuid4_info *this_leaf;
540 unsigned long j;
541 int retval;
542 cpumask_t oldmask;
543
544 if (num_cache_leaves == 0)
545 return -ENOENT;
546
547 per_cpu(cpuid4_info, cpu) = kzalloc(
548 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
549 if (per_cpu(cpuid4_info, cpu) == NULL)
550 return -ENOMEM;
551
552 oldmask = current->cpus_allowed;
553 retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
554 if (retval)
555 goto out;
556
557 /* Do cpuid and store the results */
558 for (j = 0; j < num_cache_leaves; j++) {
559 this_leaf = CPUID4_INFO_IDX(cpu, j);
560 retval = cpuid4_cache_lookup(j, this_leaf);
561 if (unlikely(retval < 0)) {
562 int i;
563
564 for (i = 0; i < j; i++)
565 cache_remove_shared_cpu_map(cpu, i);
566 break;
567 }
568 cache_shared_cpu_map_setup(cpu, j);
569 }
570 set_cpus_allowed_ptr(current, &oldmask);
571
572 out:
573 if (retval) {
574 kfree(per_cpu(cpuid4_info, cpu));
575 per_cpu(cpuid4_info, cpu) = NULL;
576 }
577
578 return retval;
579 }
580
581 #ifdef CONFIG_SYSFS
582
583 #include <linux/kobject.h>
584 #include <linux/sysfs.h>
585
586 extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */
587
588 /* pointer to kobject for cpuX/cache */
589 static DEFINE_PER_CPU(struct kobject *, cache_kobject);
590
591 struct _index_kobject {
592 struct kobject kobj;
593 unsigned int cpu;
594 unsigned short index;
595 };
596
597 /* pointer to array of kobjects for cpuX/cache/indexY */
598 static DEFINE_PER_CPU(struct _index_kobject *, index_kobject);
599 #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y]))
600
601 #define show_one_plus(file_name, object, val) \
602 static ssize_t show_##file_name \
603 (struct _cpuid4_info *this_leaf, char *buf) \
604 { \
605 return sprintf (buf, "%lu\n", (unsigned long)this_leaf->object + val); \
606 }
607
608 show_one_plus(level, eax.split.level, 0);
609 show_one_plus(coherency_line_size, ebx.split.coherency_line_size, 1);
610 show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1);
611 show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1);
612 show_one_plus(number_of_sets, ecx.split.number_of_sets, 1);
613
614 static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf)
615 {
616 return sprintf (buf, "%luK\n", this_leaf->size / 1024);
617 }
618
619 static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
620 int type, char *buf)
621 {
622 ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
623 int n = 0;
624
625 if (len > 1) {
626 cpumask_t *mask = &this_leaf->shared_cpu_map;
627
628 n = type?
629 cpulist_scnprintf(buf, len-2, *mask):
630 cpumask_scnprintf(buf, len-2, *mask);
631 buf[n++] = '\n';
632 buf[n] = '\0';
633 }
634 return n;
635 }
636
637 static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf)
638 {
639 return show_shared_cpu_map_func(leaf, 0, buf);
640 }
641
642 static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf)
643 {
644 return show_shared_cpu_map_func(leaf, 1, buf);
645 }
646
647 static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf)
648 {
649 switch (this_leaf->eax.split.type) {
650 case CACHE_TYPE_DATA:
651 return sprintf(buf, "Data\n");
652 case CACHE_TYPE_INST:
653 return sprintf(buf, "Instruction\n");
654 case CACHE_TYPE_UNIFIED:
655 return sprintf(buf, "Unified\n");
656 default:
657 return sprintf(buf, "Unknown\n");
658 }
659 }
660
661 #define to_object(k) container_of(k, struct _index_kobject, kobj)
662 #define to_attr(a) container_of(a, struct _cache_attr, attr)
663
664 #ifdef CONFIG_PCI
665 static struct pci_dev *get_k8_northbridge(int node)
666 {
667 struct pci_dev *dev = NULL;
668 int i;
669
670 for (i = 0; i <= node; i++) {
671 do {
672 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
673 if (!dev)
674 break;
675 } while (!pci_match_id(&k8_nb_id[0], dev));
676 if (!dev)
677 break;
678 }
679 return dev;
680 }
681 #else
682 static struct pci_dev *get_k8_northbridge(int node)
683 {
684 return NULL;
685 }
686 #endif
687
688 static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf)
689 {
690 int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map));
691 struct pci_dev *dev = NULL;
692 ssize_t ret = 0;
693 int i;
694
695 if (!this_leaf->can_disable)
696 return sprintf(buf, "Feature not enabled\n");
697
698 dev = get_k8_northbridge(node);
699 if (!dev) {
700 printk(KERN_ERR "Attempting AMD northbridge operation on a system with no northbridge\n");
701 return -EINVAL;
702 }
703
704 for (i = 0; i < 2; i++) {
705 unsigned int reg;
706
707 pci_read_config_dword(dev, 0x1BC + i * 4, &reg);
708
709 ret += sprintf(buf, "%sEntry: %d\n", buf, i);
710 ret += sprintf(buf, "%sReads: %s\tNew Entries: %s\n",
711 buf,
712 reg & 0x80000000 ? "Disabled" : "Allowed",
713 reg & 0x40000000 ? "Disabled" : "Allowed");
714 ret += sprintf(buf, "%sSubCache: %x\tIndex: %x\n",
715 buf, (reg & 0x30000) >> 16, reg & 0xfff);
716 }
717 return ret;
718 }
719
720 static ssize_t
721 store_cache_disable(struct _cpuid4_info *this_leaf, const char *buf,
722 size_t count)
723 {
724 int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map));
725 struct pci_dev *dev = NULL;
726 unsigned int ret, index, val;
727
728 if (!this_leaf->can_disable)
729 return 0;
730
731 if (strlen(buf) > 15)
732 return -EINVAL;
733
734 ret = sscanf(buf, "%x %x", &index, &val);
735 if (ret != 2)
736 return -EINVAL;
737 if (index > 1)
738 return -EINVAL;
739
740 val |= 0xc0000000;
741 dev = get_k8_northbridge(node);
742 if (!dev) {
743 printk(KERN_ERR "Attempting AMD northbridge operation on a system with no northbridge\n");
744 return -EINVAL;
745 }
746
747 pci_write_config_dword(dev, 0x1BC + index * 4, val & ~0x40000000);
748 wbinvd();
749 pci_write_config_dword(dev, 0x1BC + index * 4, val);
750
751 return 1;
752 }
753
754 struct _cache_attr {
755 struct attribute attr;
756 ssize_t (*show)(struct _cpuid4_info *, char *);
757 ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
758 };
759
760 #define define_one_ro(_name) \
761 static struct _cache_attr _name = \
762 __ATTR(_name, 0444, show_##_name, NULL)
763
764 define_one_ro(level);
765 define_one_ro(type);
766 define_one_ro(coherency_line_size);
767 define_one_ro(physical_line_partition);
768 define_one_ro(ways_of_associativity);
769 define_one_ro(number_of_sets);
770 define_one_ro(size);
771 define_one_ro(shared_cpu_map);
772 define_one_ro(shared_cpu_list);
773
774 static struct _cache_attr cache_disable = __ATTR(cache_disable, 0644, show_cache_disable, store_cache_disable);
775
776 static struct attribute * default_attrs[] = {
777 &type.attr,
778 &level.attr,
779 &coherency_line_size.attr,
780 &physical_line_partition.attr,
781 &ways_of_associativity.attr,
782 &number_of_sets.attr,
783 &size.attr,
784 &shared_cpu_map.attr,
785 &shared_cpu_list.attr,
786 &cache_disable.attr,
787 NULL
788 };
789
790 static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf)
791 {
792 struct _cache_attr *fattr = to_attr(attr);
793 struct _index_kobject *this_leaf = to_object(kobj);
794 ssize_t ret;
795
796 ret = fattr->show ?
797 fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
798 buf) :
799 0;
800 return ret;
801 }
802
803 static ssize_t store(struct kobject * kobj, struct attribute * attr,
804 const char * buf, size_t count)
805 {
806 struct _cache_attr *fattr = to_attr(attr);
807 struct _index_kobject *this_leaf = to_object(kobj);
808 ssize_t ret;
809
810 ret = fattr->store ?
811 fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
812 buf, count) :
813 0;
814 return ret;
815 }
816
817 static struct sysfs_ops sysfs_ops = {
818 .show = show,
819 .store = store,
820 };
821
822 static struct kobj_type ktype_cache = {
823 .sysfs_ops = &sysfs_ops,
824 .default_attrs = default_attrs,
825 };
826
827 static struct kobj_type ktype_percpu_entry = {
828 .sysfs_ops = &sysfs_ops,
829 };
830
831 static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
832 {
833 kfree(per_cpu(cache_kobject, cpu));
834 kfree(per_cpu(index_kobject, cpu));
835 per_cpu(cache_kobject, cpu) = NULL;
836 per_cpu(index_kobject, cpu) = NULL;
837 free_cache_attributes(cpu);
838 }
839
840 static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
841 {
842 int err;
843
844 if (num_cache_leaves == 0)
845 return -ENOENT;
846
847 err = detect_cache_attributes(cpu);
848 if (err)
849 return err;
850
851 /* Allocate all required memory */
852 per_cpu(cache_kobject, cpu) =
853 kzalloc(sizeof(struct kobject), GFP_KERNEL);
854 if (unlikely(per_cpu(cache_kobject, cpu) == NULL))
855 goto err_out;
856
857 per_cpu(index_kobject, cpu) = kzalloc(
858 sizeof(struct _index_kobject ) * num_cache_leaves, GFP_KERNEL);
859 if (unlikely(per_cpu(index_kobject, cpu) == NULL))
860 goto err_out;
861
862 return 0;
863
864 err_out:
865 cpuid4_cache_sysfs_exit(cpu);
866 return -ENOMEM;
867 }
868
869 static cpumask_t cache_dev_map = CPU_MASK_NONE;
870
871 /* Add/Remove cache interface for CPU device */
872 static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
873 {
874 unsigned int cpu = sys_dev->id;
875 unsigned long i, j;
876 struct _index_kobject *this_object;
877 int retval;
878
879 retval = cpuid4_cache_sysfs_init(cpu);
880 if (unlikely(retval < 0))
881 return retval;
882
883 retval = kobject_init_and_add(per_cpu(cache_kobject, cpu),
884 &ktype_percpu_entry,
885 &sys_dev->kobj, "%s", "cache");
886 if (retval < 0) {
887 cpuid4_cache_sysfs_exit(cpu);
888 return retval;
889 }
890
891 for (i = 0; i < num_cache_leaves; i++) {
892 this_object = INDEX_KOBJECT_PTR(cpu,i);
893 this_object->cpu = cpu;
894 this_object->index = i;
895 retval = kobject_init_and_add(&(this_object->kobj),
896 &ktype_cache,
897 per_cpu(cache_kobject, cpu),
898 "index%1lu", i);
899 if (unlikely(retval)) {
900 for (j = 0; j < i; j++) {
901 kobject_put(&(INDEX_KOBJECT_PTR(cpu,j)->kobj));
902 }
903 kobject_put(per_cpu(cache_kobject, cpu));
904 cpuid4_cache_sysfs_exit(cpu);
905 return retval;
906 }
907 kobject_uevent(&(this_object->kobj), KOBJ_ADD);
908 }
909 cpu_set(cpu, cache_dev_map);
910
911 kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD);
912 return 0;
913 }
914
915 static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
916 {
917 unsigned int cpu = sys_dev->id;
918 unsigned long i;
919
920 if (per_cpu(cpuid4_info, cpu) == NULL)
921 return;
922 if (!cpu_isset(cpu, cache_dev_map))
923 return;
924 cpu_clear(cpu, cache_dev_map);
925
926 for (i = 0; i < num_cache_leaves; i++)
927 kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
928 kobject_put(per_cpu(cache_kobject, cpu));
929 cpuid4_cache_sysfs_exit(cpu);
930 }
931
932 static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
933 unsigned long action, void *hcpu)
934 {
935 unsigned int cpu = (unsigned long)hcpu;
936 struct sys_device *sys_dev;
937
938 sys_dev = get_cpu_sysdev(cpu);
939 switch (action) {
940 case CPU_ONLINE:
941 case CPU_ONLINE_FROZEN:
942 cache_add_dev(sys_dev);
943 break;
944 case CPU_DEAD:
945 case CPU_DEAD_FROZEN:
946 cache_remove_dev(sys_dev);
947 break;
948 }
949 return NOTIFY_OK;
950 }
951
952 static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier =
953 {
954 .notifier_call = cacheinfo_cpu_callback,
955 };
956
957 static int __cpuinit cache_sysfs_init(void)
958 {
959 int i;
960
961 if (num_cache_leaves == 0)
962 return 0;
963
964 for_each_online_cpu(i) {
965 int err;
966 struct sys_device *sys_dev = get_cpu_sysdev(i);
967
968 err = cache_add_dev(sys_dev);
969 if (err)
970 return err;
971 }
972 register_hotcpu_notifier(&cacheinfo_cpu_notifier);
973 return 0;
974 }
975
976 device_initcall(cache_sysfs_init);
977
978 #endif
This page took 0.051633 seconds and 5 git commands to generate.