632fa06ea162c567923044ddf4e3ced0339d24bd
[deliverable/linux.git] / arch / s390 / kernel / cache.c
1 /*
2 * Extract CPU cache information and expose them via sysfs.
3 *
4 * Copyright IBM Corp. 2012
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 */
7
8 #include <linux/seq_file.h>
9 #include <linux/cpu.h>
10 #include <linux/cacheinfo.h>
11 #include <asm/facility.h>
12
13 enum {
14 CACHE_SCOPE_NOTEXISTS,
15 CACHE_SCOPE_PRIVATE,
16 CACHE_SCOPE_SHARED,
17 CACHE_SCOPE_RESERVED,
18 };
19
20 enum {
21 CTYPE_SEPARATE,
22 CTYPE_DATA,
23 CTYPE_INSTRUCTION,
24 CTYPE_UNIFIED,
25 };
26
27 enum {
28 EXTRACT_TOPOLOGY,
29 EXTRACT_LINE_SIZE,
30 EXTRACT_SIZE,
31 EXTRACT_ASSOCIATIVITY,
32 };
33
34 enum {
35 CACHE_TI_UNIFIED = 0,
36 CACHE_TI_DATA = 0,
37 CACHE_TI_INSTRUCTION,
38 };
39
40 struct cache_info {
41 unsigned char : 4;
42 unsigned char scope : 2;
43 unsigned char type : 2;
44 };
45
46 #define CACHE_MAX_LEVEL 8
47 union cache_topology {
48 struct cache_info ci[CACHE_MAX_LEVEL];
49 unsigned long long raw;
50 };
51
52 static const char * const cache_type_string[] = {
53 "",
54 "Instruction",
55 "Data",
56 "",
57 "Unified",
58 };
59
60 static const enum cache_type cache_type_map[] = {
61 [CTYPE_SEPARATE] = CACHE_TYPE_SEPARATE,
62 [CTYPE_DATA] = CACHE_TYPE_DATA,
63 [CTYPE_INSTRUCTION] = CACHE_TYPE_INST,
64 [CTYPE_UNIFIED] = CACHE_TYPE_UNIFIED,
65 };
66
67 void show_cacheinfo(struct seq_file *m)
68 {
69 struct cpu_cacheinfo *this_cpu_ci;
70 struct cacheinfo *cache;
71 int idx;
72
73 get_online_cpus();
74 this_cpu_ci = get_cpu_cacheinfo(cpumask_any(cpu_online_mask));
75 for (idx = 0; idx < this_cpu_ci->num_leaves; idx++) {
76 cache = this_cpu_ci->info_list + idx;
77 seq_printf(m, "cache%-11d: ", idx);
78 seq_printf(m, "level=%d ", cache->level);
79 seq_printf(m, "type=%s ", cache_type_string[cache->type]);
80 seq_printf(m, "scope=%s ",
81 cache->disable_sysfs ? "Shared" : "Private");
82 seq_printf(m, "size=%dK ", cache->size >> 10);
83 seq_printf(m, "line_size=%u ", cache->coherency_line_size);
84 seq_printf(m, "associativity=%d", cache->ways_of_associativity);
85 seq_puts(m, "\n");
86 }
87 put_online_cpus();
88 }
89
90 static inline enum cache_type get_cache_type(struct cache_info *ci, int level)
91 {
92 if (level >= CACHE_MAX_LEVEL)
93 return CACHE_TYPE_NOCACHE;
94
95 ci += level;
96
97 if (ci->scope != CACHE_SCOPE_SHARED && ci->scope != CACHE_SCOPE_PRIVATE)
98 return CACHE_TYPE_NOCACHE;
99
100 return cache_type_map[ci->type];
101 }
102
103 static inline unsigned long ecag(int ai, int li, int ti)
104 {
105 unsigned long cmd, val;
106
107 cmd = ai << 4 | li << 1 | ti;
108 asm volatile(".insn rsy,0xeb000000004c,%0,0,0(%1)" /* ecag */
109 : "=d" (val) : "a" (cmd));
110 return val;
111 }
112
113 static void ci_leaf_init(struct cacheinfo *this_leaf, int private,
114 enum cache_type type, unsigned int level)
115 {
116 int ti, num_sets;
117 int cpu = smp_processor_id();
118
119 if (type == CACHE_TYPE_INST)
120 ti = CACHE_TI_INSTRUCTION;
121 else
122 ti = CACHE_TI_UNIFIED;
123
124 this_leaf->level = level + 1;
125 this_leaf->type = type;
126 this_leaf->coherency_line_size = ecag(EXTRACT_LINE_SIZE, level, ti);
127 this_leaf->ways_of_associativity = ecag(EXTRACT_ASSOCIATIVITY,
128 level, ti);
129 this_leaf->size = ecag(EXTRACT_SIZE, level, ti);
130
131 num_sets = this_leaf->size / this_leaf->coherency_line_size;
132 num_sets /= this_leaf->ways_of_associativity;
133 this_leaf->number_of_sets = num_sets;
134 cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
135 if (!private)
136 this_leaf->disable_sysfs = true;
137 }
138
139 int init_cache_level(unsigned int cpu)
140 {
141 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
142 unsigned int level = 0, leaves = 0;
143 union cache_topology ct;
144 enum cache_type ctype;
145
146 if (!this_cpu_ci)
147 return -EINVAL;
148
149 ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
150 do {
151 ctype = get_cache_type(&ct.ci[0], level);
152 if (ctype == CACHE_TYPE_NOCACHE)
153 break;
154 /* Separate instruction and data caches */
155 leaves += (ctype == CACHE_TYPE_SEPARATE) ? 2 : 1;
156 } while (++level < CACHE_MAX_LEVEL);
157
158 this_cpu_ci->num_levels = level;
159 this_cpu_ci->num_leaves = leaves;
160
161 return 0;
162 }
163
164 int populate_cache_leaves(unsigned int cpu)
165 {
166 unsigned int level, idx, pvt;
167 union cache_topology ct;
168 enum cache_type ctype;
169 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
170 struct cacheinfo *this_leaf = this_cpu_ci->info_list;
171
172 ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
173 for (idx = 0, level = 0; level < this_cpu_ci->num_levels &&
174 idx < this_cpu_ci->num_leaves; idx++, level++) {
175 if (!this_leaf)
176 return -EINVAL;
177
178 pvt = (ct.ci[level].scope == CACHE_SCOPE_PRIVATE) ? 1 : 0;
179 ctype = get_cache_type(&ct.ci[0], level);
180 if (ctype == CACHE_TYPE_SEPARATE) {
181 ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_DATA, level);
182 ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_INST, level);
183 } else {
184 ci_leaf_init(this_leaf++, pvt, ctype, level);
185 }
186 }
187 return 0;
188 }
This page took 0.041975 seconds and 4 git commands to generate.