Commit | Line | Data |
---|---|---|
43eab878 WD |
1 | /* |
2 | * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code. | |
3 | * | |
4 | * ARMv7 support: Jean Pihet <jpihet@mvista.com> | |
5 | * 2010 (c) MontaVista Software, LLC. | |
6 | * | |
7 | * Copied from ARMv6 code, with the low level code inspired | |
8 | * by the ARMv7 Oprofile code. | |
9 | * | |
10 | * Cortex-A8 has up to 4 configurable performance counters and | |
11 | * a single cycle counter. | |
12 | * Cortex-A9 has up to 31 configurable performance counters and | |
13 | * a single cycle counter. | |
14 | * | |
15 | * All counters can be enabled/disabled and IRQ masked separately. The cycle | |
16 | * counter and all 4 performance counters together can be reset separately. | |
17 | */ | |
18 | ||
19 | #ifdef CONFIG_CPU_V7 | |
a505addc | 20 | |
6d4eaf99 WD |
21 | /* |
22 | * Common ARMv7 event types | |
23 | * | |
24 | * Note: An implementation may not be able to count all of these events | |
25 | * but the encodings are considered to be `reserved' in the case that | |
26 | * they are not available. | |
27 | */ | |
43eab878 | 28 | enum armv7_perf_types { |
4d301512 WD |
29 | ARMV7_PERFCTR_PMNC_SW_INCR = 0x00, |
30 | ARMV7_PERFCTR_L1_ICACHE_REFILL = 0x01, | |
31 | ARMV7_PERFCTR_ITLB_REFILL = 0x02, | |
32 | ARMV7_PERFCTR_L1_DCACHE_REFILL = 0x03, | |
33 | ARMV7_PERFCTR_L1_DCACHE_ACCESS = 0x04, | |
34 | ARMV7_PERFCTR_DTLB_REFILL = 0x05, | |
35 | ARMV7_PERFCTR_MEM_READ = 0x06, | |
36 | ARMV7_PERFCTR_MEM_WRITE = 0x07, | |
37 | ARMV7_PERFCTR_INSTR_EXECUTED = 0x08, | |
38 | ARMV7_PERFCTR_EXC_TAKEN = 0x09, | |
39 | ARMV7_PERFCTR_EXC_EXECUTED = 0x0A, | |
40 | ARMV7_PERFCTR_CID_WRITE = 0x0B, | |
41 | ||
42 | /* | |
43 | * ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS. | |
43eab878 | 44 | * It counts: |
4d301512 | 45 | * - all (taken) branch instructions, |
43eab878 WD |
46 | * - instructions that explicitly write the PC, |
47 | * - exception generating instructions. | |
48 | */ | |
4d301512 WD |
49 | ARMV7_PERFCTR_PC_WRITE = 0x0C, |
50 | ARMV7_PERFCTR_PC_IMM_BRANCH = 0x0D, | |
51 | ARMV7_PERFCTR_PC_PROC_RETURN = 0x0E, | |
52 | ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS = 0x0F, | |
53 | ARMV7_PERFCTR_PC_BRANCH_MIS_PRED = 0x10, | |
54 | ARMV7_PERFCTR_CLOCK_CYCLES = 0x11, | |
55 | ARMV7_PERFCTR_PC_BRANCH_PRED = 0x12, | |
6d4eaf99 WD |
56 | |
57 | /* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */ | |
4d301512 WD |
58 | ARMV7_PERFCTR_MEM_ACCESS = 0x13, |
59 | ARMV7_PERFCTR_L1_ICACHE_ACCESS = 0x14, | |
60 | ARMV7_PERFCTR_L1_DCACHE_WB = 0x15, | |
61 | ARMV7_PERFCTR_L2_CACHE_ACCESS = 0x16, | |
62 | ARMV7_PERFCTR_L2_CACHE_REFILL = 0x17, | |
63 | ARMV7_PERFCTR_L2_CACHE_WB = 0x18, | |
64 | ARMV7_PERFCTR_BUS_ACCESS = 0x19, | |
65 | ARMV7_PERFCTR_MEM_ERROR = 0x1A, | |
66 | ARMV7_PERFCTR_INSTR_SPEC = 0x1B, | |
67 | ARMV7_PERFCTR_TTBR_WRITE = 0x1C, | |
68 | ARMV7_PERFCTR_BUS_CYCLES = 0x1D, | |
69 | ||
70 | ARMV7_PERFCTR_CPU_CYCLES = 0xFF | |
43eab878 WD |
71 | }; |
72 | ||
73 | /* ARMv7 Cortex-A8 specific event types */ | |
74 | enum armv7_a8_perf_types { | |
4d301512 WD |
75 | ARMV7_A8_PERFCTR_L2_CACHE_ACCESS = 0x43, |
76 | ARMV7_A8_PERFCTR_L2_CACHE_REFILL = 0x44, | |
77 | ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS = 0x50, | |
0445e7a5 | 78 | ARMV7_A8_PERFCTR_STALL_ISIDE = 0x56, |
43eab878 WD |
79 | }; |
80 | ||
81 | /* ARMv7 Cortex-A9 specific event types */ | |
82 | enum armv7_a9_perf_types { | |
4d301512 | 83 | ARMV7_A9_PERFCTR_INSTR_CORE_RENAME = 0x68, |
0445e7a5 WD |
84 | ARMV7_A9_PERFCTR_STALL_ICACHE = 0x60, |
85 | ARMV7_A9_PERFCTR_STALL_DISPATCH = 0x66, | |
43eab878 WD |
86 | }; |
87 | ||
0c205cbe WD |
88 | /* ARMv7 Cortex-A5 specific event types */ |
89 | enum armv7_a5_perf_types { | |
4d301512 WD |
90 | ARMV7_A5_PERFCTR_PREFETCH_LINEFILL = 0xc2, |
91 | ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP = 0xc3, | |
0c205cbe WD |
92 | }; |
93 | ||
14abd038 WD |
94 | /* ARMv7 Cortex-A15 specific event types */ |
95 | enum armv7_a15_perf_types { | |
4d301512 WD |
96 | ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ = 0x40, |
97 | ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE = 0x41, | |
98 | ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ = 0x42, | |
99 | ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE = 0x43, | |
14abd038 | 100 | |
4d301512 WD |
101 | ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ = 0x4C, |
102 | ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE = 0x4D, | |
14abd038 | 103 | |
4d301512 WD |
104 | ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ = 0x50, |
105 | ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE = 0x51, | |
106 | ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ = 0x52, | |
107 | ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE = 0x53, | |
14abd038 | 108 | |
4d301512 | 109 | ARMV7_A15_PERFCTR_PC_WRITE_SPEC = 0x76, |
14abd038 WD |
110 | }; |
111 | ||
43eab878 WD |
112 | /* |
113 | * Cortex-A8 HW events mapping | |
114 | * | |
115 | * The hardware events that we support. We do support cache operations but | |
116 | * we have harvard caches and no way to combine instruction and data | |
117 | * accesses/misses in hardware. | |
118 | */ | |
119 | static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = { | |
0445e7a5 WD |
120 | [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, |
121 | [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, | |
122 | [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, | |
123 | [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL, | |
124 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, | |
125 | [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | |
126 | [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED, | |
127 | [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A8_PERFCTR_STALL_ISIDE, | |
128 | [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED, | |
43eab878 WD |
129 | }; |
130 | ||
131 | static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | |
132 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
133 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | |
134 | [C(L1D)] = { | |
135 | /* | |
136 | * The performance counters don't differentiate between read | |
137 | * and write accesses/misses so this isn't strictly correct, | |
138 | * but it's the best we can do. Writes and reads get | |
139 | * combined. | |
140 | */ | |
141 | [C(OP_READ)] = { | |
4d301512 WD |
142 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, |
143 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, | |
43eab878 WD |
144 | }, |
145 | [C(OP_WRITE)] = { | |
4d301512 WD |
146 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, |
147 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, | |
43eab878 WD |
148 | }, |
149 | [C(OP_PREFETCH)] = { | |
150 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
151 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
152 | }, | |
153 | }, | |
154 | [C(L1I)] = { | |
155 | [C(OP_READ)] = { | |
4d301512 WD |
156 | [C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS, |
157 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, | |
43eab878 WD |
158 | }, |
159 | [C(OP_WRITE)] = { | |
40c390c7 WD |
160 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
161 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
43eab878 WD |
162 | }, |
163 | [C(OP_PREFETCH)] = { | |
164 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
165 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
166 | }, | |
167 | }, | |
168 | [C(LL)] = { | |
169 | [C(OP_READ)] = { | |
4d301512 WD |
170 | [C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS, |
171 | [C(RESULT_MISS)] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL, | |
43eab878 WD |
172 | }, |
173 | [C(OP_WRITE)] = { | |
4d301512 WD |
174 | [C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS, |
175 | [C(RESULT_MISS)] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL, | |
43eab878 WD |
176 | }, |
177 | [C(OP_PREFETCH)] = { | |
178 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
179 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
180 | }, | |
181 | }, | |
182 | [C(DTLB)] = { | |
43eab878 WD |
183 | [C(OP_READ)] = { |
184 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
185 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, | |
186 | }, | |
187 | [C(OP_WRITE)] = { | |
188 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
189 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, | |
190 | }, | |
191 | [C(OP_PREFETCH)] = { | |
192 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
193 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
194 | }, | |
195 | }, | |
196 | [C(ITLB)] = { | |
197 | [C(OP_READ)] = { | |
198 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
4d301512 | 199 | [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL, |
43eab878 WD |
200 | }, |
201 | [C(OP_WRITE)] = { | |
202 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
4d301512 | 203 | [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL, |
43eab878 WD |
204 | }, |
205 | [C(OP_PREFETCH)] = { | |
206 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
207 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
208 | }, | |
209 | }, | |
210 | [C(BPU)] = { | |
211 | [C(OP_READ)] = { | |
4d301512 WD |
212 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, |
213 | [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | |
43eab878 WD |
214 | }, |
215 | [C(OP_WRITE)] = { | |
4d301512 WD |
216 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, |
217 | [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | |
43eab878 WD |
218 | }, |
219 | [C(OP_PREFETCH)] = { | |
220 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
221 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
222 | }, | |
223 | }, | |
89d6c0b5 PZ |
224 | [C(NODE)] = { |
225 | [C(OP_READ)] = { | |
226 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
227 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
228 | }, | |
229 | [C(OP_WRITE)] = { | |
230 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
231 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
232 | }, | |
233 | [C(OP_PREFETCH)] = { | |
234 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
235 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
236 | }, | |
237 | }, | |
43eab878 WD |
238 | }; |
239 | ||
240 | /* | |
241 | * Cortex-A9 HW events mapping | |
242 | */ | |
243 | static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = { | |
0445e7a5 WD |
244 | [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, |
245 | [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_A9_PERFCTR_INSTR_CORE_RENAME, | |
246 | [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, | |
247 | [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL, | |
248 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, | |
249 | [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | |
250 | [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED, | |
251 | [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A9_PERFCTR_STALL_ICACHE, | |
252 | [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV7_A9_PERFCTR_STALL_DISPATCH, | |
43eab878 WD |
253 | }; |
254 | ||
255 | static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | |
256 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
257 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | |
258 | [C(L1D)] = { | |
259 | /* | |
260 | * The performance counters don't differentiate between read | |
261 | * and write accesses/misses so this isn't strictly correct, | |
262 | * but it's the best we can do. Writes and reads get | |
263 | * combined. | |
264 | */ | |
265 | [C(OP_READ)] = { | |
4d301512 WD |
266 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, |
267 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, | |
43eab878 WD |
268 | }, |
269 | [C(OP_WRITE)] = { | |
4d301512 WD |
270 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, |
271 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, | |
43eab878 WD |
272 | }, |
273 | [C(OP_PREFETCH)] = { | |
274 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
275 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
276 | }, | |
277 | }, | |
278 | [C(L1I)] = { | |
279 | [C(OP_READ)] = { | |
280 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
4d301512 | 281 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, |
43eab878 WD |
282 | }, |
283 | [C(OP_WRITE)] = { | |
284 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
40c390c7 | 285 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, |
43eab878 WD |
286 | }, |
287 | [C(OP_PREFETCH)] = { | |
288 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
289 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
290 | }, | |
291 | }, | |
292 | [C(LL)] = { | |
293 | [C(OP_READ)] = { | |
294 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
295 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
296 | }, | |
297 | [C(OP_WRITE)] = { | |
298 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
299 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
300 | }, | |
301 | [C(OP_PREFETCH)] = { | |
302 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
303 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
304 | }, | |
305 | }, | |
306 | [C(DTLB)] = { | |
43eab878 WD |
307 | [C(OP_READ)] = { |
308 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
309 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, | |
310 | }, | |
311 | [C(OP_WRITE)] = { | |
312 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
313 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, | |
314 | }, | |
315 | [C(OP_PREFETCH)] = { | |
316 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
317 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
318 | }, | |
319 | }, | |
320 | [C(ITLB)] = { | |
321 | [C(OP_READ)] = { | |
322 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
4d301512 | 323 | [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL, |
43eab878 WD |
324 | }, |
325 | [C(OP_WRITE)] = { | |
326 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
4d301512 | 327 | [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL, |
43eab878 WD |
328 | }, |
329 | [C(OP_PREFETCH)] = { | |
330 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
331 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
332 | }, | |
333 | }, | |
334 | [C(BPU)] = { | |
335 | [C(OP_READ)] = { | |
4d301512 WD |
336 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, |
337 | [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | |
43eab878 WD |
338 | }, |
339 | [C(OP_WRITE)] = { | |
4d301512 WD |
340 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, |
341 | [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | |
43eab878 WD |
342 | }, |
343 | [C(OP_PREFETCH)] = { | |
344 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
345 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
346 | }, | |
347 | }, | |
89d6c0b5 PZ |
348 | [C(NODE)] = { |
349 | [C(OP_READ)] = { | |
350 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
351 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
352 | }, | |
353 | [C(OP_WRITE)] = { | |
354 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
355 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
356 | }, | |
357 | [C(OP_PREFETCH)] = { | |
358 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
359 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
360 | }, | |
361 | }, | |
43eab878 WD |
362 | }; |
363 | ||
0c205cbe WD |
364 | /* |
365 | * Cortex-A5 HW events mapping | |
366 | */ | |
367 | static const unsigned armv7_a5_perf_map[PERF_COUNT_HW_MAX] = { | |
0445e7a5 WD |
368 | [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, |
369 | [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, | |
370 | [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, | |
371 | [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL, | |
372 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, | |
373 | [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | |
374 | [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED, | |
375 | [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED, | |
376 | [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED, | |
0c205cbe WD |
377 | }; |
378 | ||
379 | static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | |
380 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
381 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | |
382 | [C(L1D)] = { | |
383 | [C(OP_READ)] = { | |
4d301512 WD |
384 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, |
385 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, | |
0c205cbe WD |
386 | }, |
387 | [C(OP_WRITE)] = { | |
4d301512 WD |
388 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, |
389 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, | |
0c205cbe WD |
390 | }, |
391 | [C(OP_PREFETCH)] = { | |
4d301512 WD |
392 | [C(RESULT_ACCESS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL, |
393 | [C(RESULT_MISS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP, | |
0c205cbe WD |
394 | }, |
395 | }, | |
396 | [C(L1I)] = { | |
397 | [C(OP_READ)] = { | |
398 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, | |
4d301512 | 399 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, |
0c205cbe WD |
400 | }, |
401 | [C(OP_WRITE)] = { | |
40c390c7 WD |
402 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
403 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
0c205cbe WD |
404 | }, |
405 | /* | |
406 | * The prefetch counters don't differentiate between the I | |
407 | * side and the D side. | |
408 | */ | |
409 | [C(OP_PREFETCH)] = { | |
4d301512 WD |
410 | [C(RESULT_ACCESS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL, |
411 | [C(RESULT_MISS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP, | |
0c205cbe WD |
412 | }, |
413 | }, | |
414 | [C(LL)] = { | |
415 | [C(OP_READ)] = { | |
416 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
417 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
418 | }, | |
419 | [C(OP_WRITE)] = { | |
420 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
421 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
422 | }, | |
423 | [C(OP_PREFETCH)] = { | |
424 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
425 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
426 | }, | |
427 | }, | |
428 | [C(DTLB)] = { | |
429 | [C(OP_READ)] = { | |
430 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
431 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, | |
432 | }, | |
433 | [C(OP_WRITE)] = { | |
434 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
435 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, | |
436 | }, | |
437 | [C(OP_PREFETCH)] = { | |
438 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
439 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
440 | }, | |
441 | }, | |
442 | [C(ITLB)] = { | |
443 | [C(OP_READ)] = { | |
444 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
4d301512 | 445 | [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL, |
0c205cbe WD |
446 | }, |
447 | [C(OP_WRITE)] = { | |
448 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
4d301512 | 449 | [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL, |
0c205cbe WD |
450 | }, |
451 | [C(OP_PREFETCH)] = { | |
452 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
453 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
454 | }, | |
455 | }, | |
456 | [C(BPU)] = { | |
457 | [C(OP_READ)] = { | |
458 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, | |
4d301512 | 459 | [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, |
0c205cbe WD |
460 | }, |
461 | [C(OP_WRITE)] = { | |
462 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, | |
4d301512 | 463 | [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, |
0c205cbe WD |
464 | }, |
465 | [C(OP_PREFETCH)] = { | |
466 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
467 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
468 | }, | |
469 | }, | |
91756acb WD |
470 | [C(NODE)] = { |
471 | [C(OP_READ)] = { | |
472 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
473 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
474 | }, | |
475 | [C(OP_WRITE)] = { | |
476 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
477 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
478 | }, | |
479 | [C(OP_PREFETCH)] = { | |
480 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
481 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
482 | }, | |
483 | }, | |
0c205cbe WD |
484 | }; |
485 | ||
14abd038 WD |
486 | /* |
487 | * Cortex-A15 HW events mapping | |
488 | */ | |
489 | static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = { | |
0445e7a5 WD |
490 | [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, |
491 | [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, | |
492 | [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, | |
493 | [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL, | |
494 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_A15_PERFCTR_PC_WRITE_SPEC, | |
495 | [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | |
496 | [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES, | |
497 | [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED, | |
498 | [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED, | |
14abd038 WD |
499 | }; |
500 | ||
501 | static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | |
502 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
503 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | |
504 | [C(L1D)] = { | |
505 | [C(OP_READ)] = { | |
4d301512 WD |
506 | [C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ, |
507 | [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ, | |
14abd038 WD |
508 | }, |
509 | [C(OP_WRITE)] = { | |
4d301512 WD |
510 | [C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE, |
511 | [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE, | |
14abd038 WD |
512 | }, |
513 | [C(OP_PREFETCH)] = { | |
514 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
515 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
516 | }, | |
517 | }, | |
518 | [C(L1I)] = { | |
519 | /* | |
520 | * Not all performance counters differentiate between read | |
521 | * and write accesses/misses so we're not always strictly | |
522 | * correct, but it's the best we can do. Writes and reads get | |
523 | * combined in these cases. | |
524 | */ | |
525 | [C(OP_READ)] = { | |
526 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, | |
4d301512 | 527 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, |
14abd038 WD |
528 | }, |
529 | [C(OP_WRITE)] = { | |
40c390c7 WD |
530 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
531 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
14abd038 WD |
532 | }, |
533 | [C(OP_PREFETCH)] = { | |
534 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
535 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
536 | }, | |
537 | }, | |
538 | [C(LL)] = { | |
539 | [C(OP_READ)] = { | |
4d301512 WD |
540 | [C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ, |
541 | [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ, | |
14abd038 WD |
542 | }, |
543 | [C(OP_WRITE)] = { | |
4d301512 WD |
544 | [C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE, |
545 | [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE, | |
14abd038 WD |
546 | }, |
547 | [C(OP_PREFETCH)] = { | |
548 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
549 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
550 | }, | |
551 | }, | |
552 | [C(DTLB)] = { | |
553 | [C(OP_READ)] = { | |
554 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
4d301512 | 555 | [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ, |
14abd038 WD |
556 | }, |
557 | [C(OP_WRITE)] = { | |
558 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
4d301512 | 559 | [C(RESULT_MISS)] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE, |
14abd038 WD |
560 | }, |
561 | [C(OP_PREFETCH)] = { | |
562 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
563 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
564 | }, | |
565 | }, | |
566 | [C(ITLB)] = { | |
567 | [C(OP_READ)] = { | |
568 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
4d301512 | 569 | [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL, |
14abd038 WD |
570 | }, |
571 | [C(OP_WRITE)] = { | |
572 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
4d301512 | 573 | [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL, |
14abd038 WD |
574 | }, |
575 | [C(OP_PREFETCH)] = { | |
576 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
577 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
578 | }, | |
579 | }, | |
580 | [C(BPU)] = { | |
581 | [C(OP_READ)] = { | |
582 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, | |
4d301512 | 583 | [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, |
14abd038 WD |
584 | }, |
585 | [C(OP_WRITE)] = { | |
586 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, | |
4d301512 | 587 | [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, |
14abd038 WD |
588 | }, |
589 | [C(OP_PREFETCH)] = { | |
590 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
591 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
592 | }, | |
593 | }, | |
91756acb WD |
594 | [C(NODE)] = { |
595 | [C(OP_READ)] = { | |
596 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
597 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
598 | }, | |
599 | [C(OP_WRITE)] = { | |
600 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
601 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
602 | }, | |
603 | [C(OP_PREFETCH)] = { | |
604 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
605 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
606 | }, | |
607 | }, | |
14abd038 WD |
608 | }; |
609 | ||
d33c88c6 WD |
610 | /* |
611 | * Cortex-A7 HW events mapping | |
612 | */ | |
613 | static const unsigned armv7_a7_perf_map[PERF_COUNT_HW_MAX] = { | |
614 | [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, | |
615 | [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, | |
616 | [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, | |
617 | [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL, | |
618 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, | |
619 | [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | |
620 | [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES, | |
621 | [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED, | |
622 | [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED, | |
623 | }; | |
624 | ||
625 | static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | |
626 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
627 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | |
628 | [C(L1D)] = { | |
629 | /* | |
630 | * The performance counters don't differentiate between read | |
631 | * and write accesses/misses so this isn't strictly correct, | |
632 | * but it's the best we can do. Writes and reads get | |
633 | * combined. | |
634 | */ | |
635 | [C(OP_READ)] = { | |
636 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, | |
637 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, | |
638 | }, | |
639 | [C(OP_WRITE)] = { | |
640 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, | |
641 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, | |
642 | }, | |
643 | [C(OP_PREFETCH)] = { | |
644 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
645 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
646 | }, | |
647 | }, | |
648 | [C(L1I)] = { | |
649 | [C(OP_READ)] = { | |
650 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, | |
651 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, | |
652 | }, | |
653 | [C(OP_WRITE)] = { | |
40c390c7 WD |
654 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
655 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
d33c88c6 WD |
656 | }, |
657 | [C(OP_PREFETCH)] = { | |
658 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
659 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
660 | }, | |
661 | }, | |
662 | [C(LL)] = { | |
663 | [C(OP_READ)] = { | |
664 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_CACHE_ACCESS, | |
665 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL, | |
666 | }, | |
667 | [C(OP_WRITE)] = { | |
668 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_CACHE_ACCESS, | |
669 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL, | |
670 | }, | |
671 | [C(OP_PREFETCH)] = { | |
672 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
673 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
674 | }, | |
675 | }, | |
676 | [C(DTLB)] = { | |
677 | [C(OP_READ)] = { | |
678 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
679 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, | |
680 | }, | |
681 | [C(OP_WRITE)] = { | |
682 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
683 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, | |
684 | }, | |
685 | [C(OP_PREFETCH)] = { | |
686 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
687 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
688 | }, | |
689 | }, | |
690 | [C(ITLB)] = { | |
691 | [C(OP_READ)] = { | |
692 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
693 | [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL, | |
694 | }, | |
695 | [C(OP_WRITE)] = { | |
696 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
697 | [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL, | |
698 | }, | |
699 | [C(OP_PREFETCH)] = { | |
700 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
701 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
702 | }, | |
703 | }, | |
704 | [C(BPU)] = { | |
705 | [C(OP_READ)] = { | |
706 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, | |
707 | [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | |
708 | }, | |
709 | [C(OP_WRITE)] = { | |
710 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, | |
711 | [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | |
712 | }, | |
713 | [C(OP_PREFETCH)] = { | |
714 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
715 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
716 | }, | |
717 | }, | |
718 | [C(NODE)] = { | |
719 | [C(OP_READ)] = { | |
720 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
721 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
722 | }, | |
723 | [C(OP_WRITE)] = { | |
724 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
725 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
726 | }, | |
727 | [C(OP_PREFETCH)] = { | |
728 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
729 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
730 | }, | |
731 | }, | |
732 | }; | |
733 | ||
43eab878 | 734 | /* |
c691bb62 | 735 | * Perf Events' indices |
43eab878 | 736 | */ |
c691bb62 WD |
737 | #define ARMV7_IDX_CYCLE_COUNTER 0 |
738 | #define ARMV7_IDX_COUNTER0 1 | |
7279adbd SK |
739 | #define ARMV7_IDX_COUNTER_LAST(cpu_pmu) \ |
740 | (ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1) | |
c691bb62 WD |
741 | |
742 | #define ARMV7_MAX_COUNTERS 32 | |
743 | #define ARMV7_COUNTER_MASK (ARMV7_MAX_COUNTERS - 1) | |
43eab878 WD |
744 | |
745 | /* | |
c691bb62 | 746 | * ARMv7 low level PMNC access |
43eab878 | 747 | */ |
43eab878 WD |
748 | |
749 | /* | |
c691bb62 | 750 | * Perf Event to low level counters mapping |
43eab878 | 751 | */ |
c691bb62 WD |
752 | #define ARMV7_IDX_TO_COUNTER(x) \ |
753 | (((x) - ARMV7_IDX_COUNTER0) & ARMV7_COUNTER_MASK) | |
43eab878 WD |
754 | |
755 | /* | |
756 | * Per-CPU PMNC: config reg | |
757 | */ | |
758 | #define ARMV7_PMNC_E (1 << 0) /* Enable all counters */ | |
759 | #define ARMV7_PMNC_P (1 << 1) /* Reset all counters */ | |
760 | #define ARMV7_PMNC_C (1 << 2) /* Cycle counter reset */ | |
761 | #define ARMV7_PMNC_D (1 << 3) /* CCNT counts every 64th cpu cycle */ | |
762 | #define ARMV7_PMNC_X (1 << 4) /* Export to ETM */ | |
763 | #define ARMV7_PMNC_DP (1 << 5) /* Disable CCNT if non-invasive debug*/ | |
764 | #define ARMV7_PMNC_N_SHIFT 11 /* Number of counters supported */ | |
765 | #define ARMV7_PMNC_N_MASK 0x1f | |
766 | #define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */ | |
767 | ||
768 | /* | |
43eab878 | 769 | * FLAG: counters overflow flag status reg |
43eab878 | 770 | */ |
43eab878 WD |
771 | #define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */ |
772 | #define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK | |
43eab878 WD |
773 | |
774 | /* | |
a505addc | 775 | * PMXEVTYPER: Event selection reg |
43eab878 | 776 | */ |
f2fe09b0 | 777 | #define ARMV7_EVTYPE_MASK 0xc80000ff /* Mask for writable bits */ |
a505addc | 778 | #define ARMV7_EVTYPE_EVENT 0xff /* Mask for EVENT bits */ |
43eab878 WD |
779 | |
780 | /* | |
a505addc | 781 | * Event filters for PMUv2 |
43eab878 | 782 | */ |
a505addc WD |
783 | #define ARMV7_EXCLUDE_PL1 (1 << 31) |
784 | #define ARMV7_EXCLUDE_USER (1 << 30) | |
785 | #define ARMV7_INCLUDE_HYP (1 << 27) | |
43eab878 | 786 | |
6330aae7 | 787 | static inline u32 armv7_pmnc_read(void) |
43eab878 WD |
788 | { |
789 | u32 val; | |
790 | asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val)); | |
791 | return val; | |
792 | } | |
793 | ||
6330aae7 | 794 | static inline void armv7_pmnc_write(u32 val) |
43eab878 WD |
795 | { |
796 | val &= ARMV7_PMNC_MASK; | |
d25d3b4c | 797 | isb(); |
43eab878 WD |
798 | asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val)); |
799 | } | |
800 | ||
6330aae7 | 801 | static inline int armv7_pmnc_has_overflowed(u32 pmnc) |
43eab878 WD |
802 | { |
803 | return pmnc & ARMV7_OVERFLOWED_MASK; | |
804 | } | |
805 | ||
7279adbd | 806 | static inline int armv7_pmnc_counter_valid(struct arm_pmu *cpu_pmu, int idx) |
c691bb62 | 807 | { |
7279adbd SK |
808 | return idx >= ARMV7_IDX_CYCLE_COUNTER && |
809 | idx <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); | |
c691bb62 WD |
810 | } |
811 | ||
812 | static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx) | |
43eab878 | 813 | { |
7279adbd | 814 | return pmnc & BIT(ARMV7_IDX_TO_COUNTER(idx)); |
43eab878 WD |
815 | } |
816 | ||
25e29c7c | 817 | static inline int armv7_pmnc_select_counter(int idx) |
43eab878 | 818 | { |
7279adbd | 819 | u32 counter = ARMV7_IDX_TO_COUNTER(idx); |
c691bb62 | 820 | asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter)); |
d25d3b4c | 821 | isb(); |
43eab878 WD |
822 | |
823 | return idx; | |
824 | } | |
825 | ||
ed6f2a52 | 826 | static inline u32 armv7pmu_read_counter(struct perf_event *event) |
43eab878 | 827 | { |
7279adbd | 828 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); |
ed6f2a52 SK |
829 | struct hw_perf_event *hwc = &event->hw; |
830 | int idx = hwc->idx; | |
6330aae7 | 831 | u32 value = 0; |
43eab878 | 832 | |
7279adbd | 833 | if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) |
43eab878 WD |
834 | pr_err("CPU%u reading wrong counter %d\n", |
835 | smp_processor_id(), idx); | |
c691bb62 WD |
836 | else if (idx == ARMV7_IDX_CYCLE_COUNTER) |
837 | asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value)); | |
838 | else if (armv7_pmnc_select_counter(idx) == idx) | |
839 | asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (value)); | |
43eab878 WD |
840 | |
841 | return value; | |
842 | } | |
843 | ||
ed6f2a52 | 844 | static inline void armv7pmu_write_counter(struct perf_event *event, u32 value) |
43eab878 | 845 | { |
7279adbd | 846 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); |
ed6f2a52 SK |
847 | struct hw_perf_event *hwc = &event->hw; |
848 | int idx = hwc->idx; | |
849 | ||
7279adbd | 850 | if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) |
43eab878 WD |
851 | pr_err("CPU%u writing wrong counter %d\n", |
852 | smp_processor_id(), idx); | |
c691bb62 WD |
853 | else if (idx == ARMV7_IDX_CYCLE_COUNTER) |
854 | asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value)); | |
855 | else if (armv7_pmnc_select_counter(idx) == idx) | |
856 | asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (value)); | |
43eab878 WD |
857 | } |
858 | ||
25e29c7c | 859 | static inline void armv7_pmnc_write_evtsel(int idx, u32 val) |
43eab878 WD |
860 | { |
861 | if (armv7_pmnc_select_counter(idx) == idx) { | |
a505addc | 862 | val &= ARMV7_EVTYPE_MASK; |
43eab878 WD |
863 | asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val)); |
864 | } | |
865 | } | |
866 | ||
25e29c7c | 867 | static inline int armv7_pmnc_enable_counter(int idx) |
43eab878 | 868 | { |
7279adbd | 869 | u32 counter = ARMV7_IDX_TO_COUNTER(idx); |
c691bb62 | 870 | asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter))); |
43eab878 WD |
871 | return idx; |
872 | } | |
873 | ||
25e29c7c | 874 | static inline int armv7_pmnc_disable_counter(int idx) |
43eab878 | 875 | { |
7279adbd | 876 | u32 counter = ARMV7_IDX_TO_COUNTER(idx); |
c691bb62 | 877 | asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter))); |
43eab878 WD |
878 | return idx; |
879 | } | |
880 | ||
25e29c7c | 881 | static inline int armv7_pmnc_enable_intens(int idx) |
43eab878 | 882 | { |
7279adbd | 883 | u32 counter = ARMV7_IDX_TO_COUNTER(idx); |
c691bb62 | 884 | asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter))); |
43eab878 WD |
885 | return idx; |
886 | } | |
887 | ||
25e29c7c | 888 | static inline int armv7_pmnc_disable_intens(int idx) |
43eab878 | 889 | { |
7279adbd | 890 | u32 counter = ARMV7_IDX_TO_COUNTER(idx); |
c691bb62 | 891 | asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter))); |
99c1745b WD |
892 | isb(); |
893 | /* Clear the overflow flag in case an interrupt is pending. */ | |
894 | asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(counter))); | |
895 | isb(); | |
896 | ||
43eab878 WD |
897 | return idx; |
898 | } | |
899 | ||
900 | static inline u32 armv7_pmnc_getreset_flags(void) | |
901 | { | |
902 | u32 val; | |
903 | ||
904 | /* Read */ | |
905 | asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val)); | |
906 | ||
907 | /* Write to clear flags */ | |
908 | val &= ARMV7_FLAG_MASK; | |
909 | asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val)); | |
910 | ||
911 | return val; | |
912 | } | |
913 | ||
914 | #ifdef DEBUG | |
7279adbd | 915 | static void armv7_pmnc_dump_regs(struct arm_pmu *cpu_pmu) |
43eab878 WD |
916 | { |
917 | u32 val; | |
918 | unsigned int cnt; | |
919 | ||
920 | printk(KERN_INFO "PMNC registers dump:\n"); | |
921 | ||
922 | asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val)); | |
923 | printk(KERN_INFO "PMNC =0x%08x\n", val); | |
924 | ||
925 | asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val)); | |
926 | printk(KERN_INFO "CNTENS=0x%08x\n", val); | |
927 | ||
928 | asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val)); | |
929 | printk(KERN_INFO "INTENS=0x%08x\n", val); | |
930 | ||
931 | asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val)); | |
932 | printk(KERN_INFO "FLAGS =0x%08x\n", val); | |
933 | ||
934 | asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val)); | |
935 | printk(KERN_INFO "SELECT=0x%08x\n", val); | |
936 | ||
937 | asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val)); | |
938 | printk(KERN_INFO "CCNT =0x%08x\n", val); | |
939 | ||
7279adbd SK |
940 | for (cnt = ARMV7_IDX_COUNTER0; |
941 | cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) { | |
43eab878 WD |
942 | armv7_pmnc_select_counter(cnt); |
943 | asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val)); | |
944 | printk(KERN_INFO "CNT[%d] count =0x%08x\n", | |
c691bb62 | 945 | ARMV7_IDX_TO_COUNTER(cnt), val); |
43eab878 WD |
946 | asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val)); |
947 | printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n", | |
c691bb62 | 948 | ARMV7_IDX_TO_COUNTER(cnt), val); |
43eab878 WD |
949 | } |
950 | } | |
951 | #endif | |
952 | ||
ed6f2a52 | 953 | static void armv7pmu_enable_event(struct perf_event *event) |
43eab878 WD |
954 | { |
955 | unsigned long flags; | |
ed6f2a52 SK |
956 | struct hw_perf_event *hwc = &event->hw; |
957 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); | |
8be3f9a2 | 958 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
ed6f2a52 | 959 | int idx = hwc->idx; |
43eab878 | 960 | |
7279adbd SK |
961 | if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) { |
962 | pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n", | |
963 | smp_processor_id(), idx); | |
964 | return; | |
965 | } | |
966 | ||
43eab878 WD |
967 | /* |
968 | * Enable counter and interrupt, and set the counter to count | |
969 | * the event that we're interested in. | |
970 | */ | |
0f78d2d5 | 971 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
43eab878 WD |
972 | |
973 | /* | |
974 | * Disable counter | |
975 | */ | |
976 | armv7_pmnc_disable_counter(idx); | |
977 | ||
978 | /* | |
979 | * Set event (if destined for PMNx counters) | |
a505addc WD |
980 | * We only need to set the event for the cycle counter if we |
981 | * have the ability to perform event filtering. | |
43eab878 | 982 | */ |
513c99ce | 983 | if (cpu_pmu->set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER) |
43eab878 WD |
984 | armv7_pmnc_write_evtsel(idx, hwc->config_base); |
985 | ||
986 | /* | |
987 | * Enable interrupt for this counter | |
988 | */ | |
989 | armv7_pmnc_enable_intens(idx); | |
990 | ||
991 | /* | |
992 | * Enable counter | |
993 | */ | |
994 | armv7_pmnc_enable_counter(idx); | |
995 | ||
0f78d2d5 | 996 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
43eab878 WD |
997 | } |
998 | ||
ed6f2a52 | 999 | static void armv7pmu_disable_event(struct perf_event *event) |
43eab878 WD |
1000 | { |
1001 | unsigned long flags; | |
ed6f2a52 SK |
1002 | struct hw_perf_event *hwc = &event->hw; |
1003 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); | |
8be3f9a2 | 1004 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
ed6f2a52 | 1005 | int idx = hwc->idx; |
43eab878 | 1006 | |
7279adbd SK |
1007 | if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) { |
1008 | pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n", | |
1009 | smp_processor_id(), idx); | |
1010 | return; | |
1011 | } | |
1012 | ||
43eab878 WD |
1013 | /* |
1014 | * Disable counter and interrupt | |
1015 | */ | |
0f78d2d5 | 1016 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
43eab878 WD |
1017 | |
1018 | /* | |
1019 | * Disable counter | |
1020 | */ | |
1021 | armv7_pmnc_disable_counter(idx); | |
1022 | ||
1023 | /* | |
1024 | * Disable interrupt for this counter | |
1025 | */ | |
1026 | armv7_pmnc_disable_intens(idx); | |
1027 | ||
0f78d2d5 | 1028 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
43eab878 WD |
1029 | } |
1030 | ||
1031 | static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) | |
1032 | { | |
6330aae7 | 1033 | u32 pmnc; |
43eab878 | 1034 | struct perf_sample_data data; |
ed6f2a52 SK |
1035 | struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev; |
1036 | struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events(); | |
43eab878 WD |
1037 | struct pt_regs *regs; |
1038 | int idx; | |
1039 | ||
1040 | /* | |
1041 | * Get and reset the IRQ flags | |
1042 | */ | |
1043 | pmnc = armv7_pmnc_getreset_flags(); | |
1044 | ||
1045 | /* | |
1046 | * Did an overflow occur? | |
1047 | */ | |
1048 | if (!armv7_pmnc_has_overflowed(pmnc)) | |
1049 | return IRQ_NONE; | |
1050 | ||
1051 | /* | |
1052 | * Handle the counter(s) overflow(s) | |
1053 | */ | |
1054 | regs = get_irq_regs(); | |
1055 | ||
8be3f9a2 | 1056 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { |
43eab878 WD |
1057 | struct perf_event *event = cpuc->events[idx]; |
1058 | struct hw_perf_event *hwc; | |
1059 | ||
f6f5a30c WD |
1060 | /* Ignore if we don't have an event. */ |
1061 | if (!event) | |
1062 | continue; | |
1063 | ||
43eab878 WD |
1064 | /* |
1065 | * We have a single interrupt for all counters. Check that | |
1066 | * each counter has overflowed before we process it. | |
1067 | */ | |
1068 | if (!armv7_pmnc_counter_has_overflowed(pmnc, idx)) | |
1069 | continue; | |
1070 | ||
1071 | hwc = &event->hw; | |
ed6f2a52 | 1072 | armpmu_event_update(event); |
fd0d000b | 1073 | perf_sample_data_init(&data, 0, hwc->last_period); |
ed6f2a52 | 1074 | if (!armpmu_event_set_period(event)) |
43eab878 WD |
1075 | continue; |
1076 | ||
a8b0ca17 | 1077 | if (perf_event_overflow(event, &data, regs)) |
ed6f2a52 | 1078 | cpu_pmu->disable(event); |
43eab878 WD |
1079 | } |
1080 | ||
1081 | /* | |
1082 | * Handle the pending perf events. | |
1083 | * | |
1084 | * Note: this call *must* be run with interrupts disabled. For | |
1085 | * platforms that can have the PMU interrupts raised as an NMI, this | |
1086 | * will not work. | |
1087 | */ | |
1088 | irq_work_run(); | |
1089 | ||
1090 | return IRQ_HANDLED; | |
1091 | } | |
1092 | ||
ed6f2a52 | 1093 | static void armv7pmu_start(struct arm_pmu *cpu_pmu) |
43eab878 WD |
1094 | { |
1095 | unsigned long flags; | |
8be3f9a2 | 1096 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
43eab878 | 1097 | |
0f78d2d5 | 1098 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
43eab878 WD |
1099 | /* Enable all counters */ |
1100 | armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E); | |
0f78d2d5 | 1101 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
43eab878 WD |
1102 | } |
1103 | ||
ed6f2a52 | 1104 | static void armv7pmu_stop(struct arm_pmu *cpu_pmu) |
43eab878 WD |
1105 | { |
1106 | unsigned long flags; | |
8be3f9a2 | 1107 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
43eab878 | 1108 | |
0f78d2d5 | 1109 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
43eab878 WD |
1110 | /* Disable all counters */ |
1111 | armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E); | |
0f78d2d5 | 1112 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
43eab878 WD |
1113 | } |
1114 | ||
8be3f9a2 | 1115 | static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc, |
ed6f2a52 | 1116 | struct perf_event *event) |
43eab878 WD |
1117 | { |
1118 | int idx; | |
ed6f2a52 SK |
1119 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); |
1120 | struct hw_perf_event *hwc = &event->hw; | |
1121 | unsigned long evtype = hwc->config_base & ARMV7_EVTYPE_EVENT; | |
43eab878 WD |
1122 | |
1123 | /* Always place a cycle counter into the cycle counter. */ | |
a505addc | 1124 | if (evtype == ARMV7_PERFCTR_CPU_CYCLES) { |
c691bb62 | 1125 | if (test_and_set_bit(ARMV7_IDX_CYCLE_COUNTER, cpuc->used_mask)) |
43eab878 WD |
1126 | return -EAGAIN; |
1127 | ||
c691bb62 WD |
1128 | return ARMV7_IDX_CYCLE_COUNTER; |
1129 | } | |
43eab878 | 1130 | |
c691bb62 WD |
1131 | /* |
1132 | * For anything other than a cycle counter, try and use | |
1133 | * the events counters | |
1134 | */ | |
8be3f9a2 | 1135 | for (idx = ARMV7_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) { |
c691bb62 WD |
1136 | if (!test_and_set_bit(idx, cpuc->used_mask)) |
1137 | return idx; | |
43eab878 | 1138 | } |
c691bb62 WD |
1139 | |
1140 | /* The counters are all in use. */ | |
1141 | return -EAGAIN; | |
43eab878 WD |
1142 | } |
1143 | ||
a505addc WD |
1144 | /* |
1145 | * Add an event filter to a given event. This will only work for PMUv2 PMUs. | |
1146 | */ | |
1147 | static int armv7pmu_set_event_filter(struct hw_perf_event *event, | |
1148 | struct perf_event_attr *attr) | |
1149 | { | |
1150 | unsigned long config_base = 0; | |
1151 | ||
1152 | if (attr->exclude_idle) | |
1153 | return -EPERM; | |
1154 | if (attr->exclude_user) | |
1155 | config_base |= ARMV7_EXCLUDE_USER; | |
1156 | if (attr->exclude_kernel) | |
1157 | config_base |= ARMV7_EXCLUDE_PL1; | |
1158 | if (!attr->exclude_hv) | |
1159 | config_base |= ARMV7_INCLUDE_HYP; | |
1160 | ||
1161 | /* | |
1162 | * Install the filter into config_base as this is used to | |
1163 | * construct the event type. | |
1164 | */ | |
1165 | event->config_base = config_base; | |
1166 | ||
1167 | return 0; | |
43eab878 WD |
1168 | } |
1169 | ||
574b69cb WD |
1170 | static void armv7pmu_reset(void *info) |
1171 | { | |
ed6f2a52 | 1172 | struct arm_pmu *cpu_pmu = (struct arm_pmu *)info; |
8be3f9a2 | 1173 | u32 idx, nb_cnt = cpu_pmu->num_events; |
574b69cb WD |
1174 | |
1175 | /* The counter and interrupt enable registers are unknown at reset. */ | |
ed6f2a52 SK |
1176 | for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) { |
1177 | armv7_pmnc_disable_counter(idx); | |
1178 | armv7_pmnc_disable_intens(idx); | |
1179 | } | |
574b69cb WD |
1180 | |
1181 | /* Initialize & Reset PMNC: C and P bits */ | |
1182 | armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C); | |
1183 | } | |
1184 | ||
e1f431b5 MR |
1185 | static int armv7_a8_map_event(struct perf_event *event) |
1186 | { | |
6dbc0029 | 1187 | return armpmu_map_event(event, &armv7_a8_perf_map, |
e1f431b5 MR |
1188 | &armv7_a8_perf_cache_map, 0xFF); |
1189 | } | |
1190 | ||
1191 | static int armv7_a9_map_event(struct perf_event *event) | |
1192 | { | |
6dbc0029 | 1193 | return armpmu_map_event(event, &armv7_a9_perf_map, |
e1f431b5 MR |
1194 | &armv7_a9_perf_cache_map, 0xFF); |
1195 | } | |
1196 | ||
1197 | static int armv7_a5_map_event(struct perf_event *event) | |
1198 | { | |
6dbc0029 | 1199 | return armpmu_map_event(event, &armv7_a5_perf_map, |
e1f431b5 MR |
1200 | &armv7_a5_perf_cache_map, 0xFF); |
1201 | } | |
1202 | ||
1203 | static int armv7_a15_map_event(struct perf_event *event) | |
1204 | { | |
6dbc0029 | 1205 | return armpmu_map_event(event, &armv7_a15_perf_map, |
e1f431b5 MR |
1206 | &armv7_a15_perf_cache_map, 0xFF); |
1207 | } | |
1208 | ||
d33c88c6 WD |
1209 | static int armv7_a7_map_event(struct perf_event *event) |
1210 | { | |
6dbc0029 | 1211 | return armpmu_map_event(event, &armv7_a7_perf_map, |
d33c88c6 WD |
1212 | &armv7_a7_perf_cache_map, 0xFF); |
1213 | } | |
1214 | ||
513c99ce SK |
1215 | static void armv7pmu_init(struct arm_pmu *cpu_pmu) |
1216 | { | |
1217 | cpu_pmu->handle_irq = armv7pmu_handle_irq; | |
1218 | cpu_pmu->enable = armv7pmu_enable_event; | |
1219 | cpu_pmu->disable = armv7pmu_disable_event; | |
1220 | cpu_pmu->read_counter = armv7pmu_read_counter; | |
1221 | cpu_pmu->write_counter = armv7pmu_write_counter; | |
1222 | cpu_pmu->get_event_idx = armv7pmu_get_event_idx; | |
1223 | cpu_pmu->start = armv7pmu_start; | |
1224 | cpu_pmu->stop = armv7pmu_stop; | |
1225 | cpu_pmu->reset = armv7pmu_reset; | |
1226 | cpu_pmu->max_period = (1LLU << 32) - 1; | |
43eab878 WD |
1227 | }; |
1228 | ||
351a102d | 1229 | static u32 armv7_read_num_pmnc_events(void) |
43eab878 WD |
1230 | { |
1231 | u32 nb_cnt; | |
1232 | ||
43eab878 WD |
1233 | /* Read the nb of CNTx counters supported from PMNC */ |
1234 | nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK; | |
1235 | ||
1236 | /* Add the CPU cycles counter and return */ | |
1237 | return nb_cnt + 1; | |
1238 | } | |
1239 | ||
351a102d | 1240 | static int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu) |
43eab878 | 1241 | { |
513c99ce SK |
1242 | armv7pmu_init(cpu_pmu); |
1243 | cpu_pmu->name = "ARMv7 Cortex-A8"; | |
1244 | cpu_pmu->map_event = armv7_a8_map_event; | |
1245 | cpu_pmu->num_events = armv7_read_num_pmnc_events(); | |
1246 | return 0; | |
43eab878 WD |
1247 | } |
1248 | ||
351a102d | 1249 | static int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu) |
43eab878 | 1250 | { |
513c99ce SK |
1251 | armv7pmu_init(cpu_pmu); |
1252 | cpu_pmu->name = "ARMv7 Cortex-A9"; | |
1253 | cpu_pmu->map_event = armv7_a9_map_event; | |
1254 | cpu_pmu->num_events = armv7_read_num_pmnc_events(); | |
1255 | return 0; | |
43eab878 | 1256 | } |
0c205cbe | 1257 | |
351a102d | 1258 | static int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu) |
0c205cbe | 1259 | { |
513c99ce SK |
1260 | armv7pmu_init(cpu_pmu); |
1261 | cpu_pmu->name = "ARMv7 Cortex-A5"; | |
1262 | cpu_pmu->map_event = armv7_a5_map_event; | |
1263 | cpu_pmu->num_events = armv7_read_num_pmnc_events(); | |
1264 | return 0; | |
0c205cbe | 1265 | } |
14abd038 | 1266 | |
351a102d | 1267 | static int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu) |
14abd038 | 1268 | { |
513c99ce SK |
1269 | armv7pmu_init(cpu_pmu); |
1270 | cpu_pmu->name = "ARMv7 Cortex-A15"; | |
1271 | cpu_pmu->map_event = armv7_a15_map_event; | |
1272 | cpu_pmu->num_events = armv7_read_num_pmnc_events(); | |
1273 | cpu_pmu->set_event_filter = armv7pmu_set_event_filter; | |
1274 | return 0; | |
14abd038 | 1275 | } |
d33c88c6 | 1276 | |
351a102d | 1277 | static int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu) |
d33c88c6 | 1278 | { |
513c99ce SK |
1279 | armv7pmu_init(cpu_pmu); |
1280 | cpu_pmu->name = "ARMv7 Cortex-A7"; | |
1281 | cpu_pmu->map_event = armv7_a7_map_event; | |
1282 | cpu_pmu->num_events = armv7_read_num_pmnc_events(); | |
1283 | cpu_pmu->set_event_filter = armv7pmu_set_event_filter; | |
1284 | return 0; | |
d33c88c6 | 1285 | } |
43eab878 | 1286 | #else |
513c99ce | 1287 | static inline int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu) |
43eab878 | 1288 | { |
513c99ce | 1289 | return -ENODEV; |
43eab878 WD |
1290 | } |
1291 | ||
513c99ce | 1292 | static inline int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu) |
43eab878 | 1293 | { |
513c99ce | 1294 | return -ENODEV; |
43eab878 | 1295 | } |
0c205cbe | 1296 | |
513c99ce | 1297 | static inline int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu) |
0c205cbe | 1298 | { |
513c99ce | 1299 | return -ENODEV; |
0c205cbe | 1300 | } |
14abd038 | 1301 | |
513c99ce | 1302 | static inline int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu) |
14abd038 | 1303 | { |
513c99ce | 1304 | return -ENODEV; |
14abd038 | 1305 | } |
d33c88c6 | 1306 | |
513c99ce | 1307 | static inline int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu) |
d33c88c6 | 1308 | { |
513c99ce | 1309 | return -ENODEV; |
d33c88c6 | 1310 | } |
43eab878 | 1311 | #endif /* CONFIG_CPU_V7 */ |