2 * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code.
4 * ARMv7 support: Jean Pihet <jpihet@mvista.com>
5 * 2010 (c) MontaVista Software, LLC.
7 * Copied from ARMv6 code, with the low level code inspired
8 * by the ARMv7 Oprofile code.
10 * Cortex-A8 has up to 4 configurable performance counters and
11 * a single cycle counter.
12 * Cortex-A9 has up to 31 configurable performance counters and
13 * a single cycle counter.
15 * All counters can be enabled/disabled and IRQ masked separately. The cycle
16 * counter and all 4 performance counters together can be reset separately.
22 #include <asm/cputype.h>
23 #include <asm/irq_regs.h>
25 #include "../vfp/vfpinstr.h"
28 #include <linux/perf/arm_pmu.h>
29 #include <linux/platform_device.h>
32 * Common ARMv7 event types
34 * Note: An implementation may not be able to count all of these events
35 * but the encodings are considered to be `reserved' in the case that
36 * they are not available.
38 #define ARMV7_PERFCTR_PMNC_SW_INCR 0x00
39 #define ARMV7_PERFCTR_L1_ICACHE_REFILL 0x01
40 #define ARMV7_PERFCTR_ITLB_REFILL 0x02
41 #define ARMV7_PERFCTR_L1_DCACHE_REFILL 0x03
42 #define ARMV7_PERFCTR_L1_DCACHE_ACCESS 0x04
43 #define ARMV7_PERFCTR_DTLB_REFILL 0x05
44 #define ARMV7_PERFCTR_MEM_READ 0x06
45 #define ARMV7_PERFCTR_MEM_WRITE 0x07
46 #define ARMV7_PERFCTR_INSTR_EXECUTED 0x08
47 #define ARMV7_PERFCTR_EXC_TAKEN 0x09
48 #define ARMV7_PERFCTR_EXC_EXECUTED 0x0A
49 #define ARMV7_PERFCTR_CID_WRITE 0x0B
52 * ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
54 * - all (taken) branch instructions,
55 * - instructions that explicitly write the PC,
56 * - exception generating instructions.
58 #define ARMV7_PERFCTR_PC_WRITE 0x0C
59 #define ARMV7_PERFCTR_PC_IMM_BRANCH 0x0D
60 #define ARMV7_PERFCTR_PC_PROC_RETURN 0x0E
61 #define ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS 0x0F
62 #define ARMV7_PERFCTR_PC_BRANCH_MIS_PRED 0x10
63 #define ARMV7_PERFCTR_CLOCK_CYCLES 0x11
64 #define ARMV7_PERFCTR_PC_BRANCH_PRED 0x12
66 /* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */
67 #define ARMV7_PERFCTR_MEM_ACCESS 0x13
68 #define ARMV7_PERFCTR_L1_ICACHE_ACCESS 0x14
69 #define ARMV7_PERFCTR_L1_DCACHE_WB 0x15
70 #define ARMV7_PERFCTR_L2_CACHE_ACCESS 0x16
71 #define ARMV7_PERFCTR_L2_CACHE_REFILL 0x17
72 #define ARMV7_PERFCTR_L2_CACHE_WB 0x18
73 #define ARMV7_PERFCTR_BUS_ACCESS 0x19
74 #define ARMV7_PERFCTR_MEM_ERROR 0x1A
75 #define ARMV7_PERFCTR_INSTR_SPEC 0x1B
76 #define ARMV7_PERFCTR_TTBR_WRITE 0x1C
77 #define ARMV7_PERFCTR_BUS_CYCLES 0x1D
79 #define ARMV7_PERFCTR_CPU_CYCLES 0xFF
81 /* ARMv7 Cortex-A8 specific event types */
82 #define ARMV7_A8_PERFCTR_L2_CACHE_ACCESS 0x43
83 #define ARMV7_A8_PERFCTR_L2_CACHE_REFILL 0x44
84 #define ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS 0x50
85 #define ARMV7_A8_PERFCTR_STALL_ISIDE 0x56
87 /* ARMv7 Cortex-A9 specific event types */
88 #define ARMV7_A9_PERFCTR_INSTR_CORE_RENAME 0x68
89 #define ARMV7_A9_PERFCTR_STALL_ICACHE 0x60
90 #define ARMV7_A9_PERFCTR_STALL_DISPATCH 0x66
92 /* ARMv7 Cortex-A5 specific event types */
93 #define ARMV7_A5_PERFCTR_PREFETCH_LINEFILL 0xc2
94 #define ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP 0xc3
96 /* ARMv7 Cortex-A15 specific event types */
97 #define ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ 0x40
98 #define ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE 0x41
99 #define ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ 0x42
100 #define ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE 0x43
102 #define ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ 0x4C
103 #define ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE 0x4D
105 #define ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ 0x50
106 #define ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE 0x51
107 #define ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ 0x52
108 #define ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE 0x53
110 #define ARMV7_A15_PERFCTR_PC_WRITE_SPEC 0x76
112 /* ARMv7 Cortex-A12 specific event types */
113 #define ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ 0x40
114 #define ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE 0x41
116 #define ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ 0x50
117 #define ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE 0x51
119 #define ARMV7_A12_PERFCTR_PC_WRITE_SPEC 0x76
121 #define ARMV7_A12_PERFCTR_PF_TLB_REFILL 0xe7
123 /* ARMv7 Krait specific event types */
124 #define KRAIT_PMRESR0_GROUP0 0xcc
125 #define KRAIT_PMRESR1_GROUP0 0xd0
126 #define KRAIT_PMRESR2_GROUP0 0xd4
127 #define KRAIT_VPMRESR0_GROUP0 0xd8
129 #define KRAIT_PERFCTR_L1_ICACHE_ACCESS 0x10011
130 #define KRAIT_PERFCTR_L1_ICACHE_MISS 0x10010
132 #define KRAIT_PERFCTR_L1_ITLB_ACCESS 0x12222
133 #define KRAIT_PERFCTR_L1_DTLB_ACCESS 0x12210
135 /* ARMv7 Scorpion specific event types */
136 #define SCORPION_LPM0_GROUP0 0x4c
137 #define SCORPION_LPM1_GROUP0 0x50
138 #define SCORPION_LPM2_GROUP0 0x54
139 #define SCORPION_L2LPM_GROUP0 0x58
140 #define SCORPION_VLPM_GROUP0 0x5c
142 #define SCORPION_ICACHE_ACCESS 0x10053
143 #define SCORPION_ICACHE_MISS 0x10052
145 #define SCORPION_DTLB_ACCESS 0x12013
146 #define SCORPION_DTLB_MISS 0x12012
148 #define SCORPION_ITLB_MISS 0x12021
151 * Cortex-A8 HW events mapping
153 * The hardware events that we support. We do support cache operations but
154 * we have harvard caches and no way to combine instruction and data
155 * accesses/misses in hardware.
157 static const unsigned armv7_a8_perf_map
[PERF_COUNT_HW_MAX
] = {
158 PERF_MAP_ALL_UNSUPPORTED
,
159 [PERF_COUNT_HW_CPU_CYCLES
] = ARMV7_PERFCTR_CPU_CYCLES
,
160 [PERF_COUNT_HW_INSTRUCTIONS
] = ARMV7_PERFCTR_INSTR_EXECUTED
,
161 [PERF_COUNT_HW_CACHE_REFERENCES
] = ARMV7_PERFCTR_L1_DCACHE_ACCESS
,
162 [PERF_COUNT_HW_CACHE_MISSES
] = ARMV7_PERFCTR_L1_DCACHE_REFILL
,
163 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = ARMV7_PERFCTR_PC_WRITE
,
164 [PERF_COUNT_HW_BRANCH_MISSES
] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
165 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] = ARMV7_A8_PERFCTR_STALL_ISIDE
,
168 static const unsigned armv7_a8_perf_cache_map
[PERF_COUNT_HW_CACHE_MAX
]
169 [PERF_COUNT_HW_CACHE_OP_MAX
]
170 [PERF_COUNT_HW_CACHE_RESULT_MAX
] = {
171 PERF_CACHE_MAP_ALL_UNSUPPORTED
,
174 * The performance counters don't differentiate between read and write
175 * accesses/misses so this isn't strictly correct, but it's the best we
176 * can do. Writes and reads get combined.
178 [C(L1D
)][C(OP_READ
)][C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS
,
179 [C(L1D
)][C(OP_READ
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_L1_DCACHE_REFILL
,
180 [C(L1D
)][C(OP_WRITE
)][C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS
,
181 [C(L1D
)][C(OP_WRITE
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_L1_DCACHE_REFILL
,
183 [C(L1I
)][C(OP_READ
)][C(RESULT_ACCESS
)] = ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS
,
184 [C(L1I
)][C(OP_READ
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_L1_ICACHE_REFILL
,
186 [C(LL
)][C(OP_READ
)][C(RESULT_ACCESS
)] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS
,
187 [C(LL
)][C(OP_READ
)][C(RESULT_MISS
)] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL
,
188 [C(LL
)][C(OP_WRITE
)][C(RESULT_ACCESS
)] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS
,
189 [C(LL
)][C(OP_WRITE
)][C(RESULT_MISS
)] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL
,
191 [C(DTLB
)][C(OP_READ
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_DTLB_REFILL
,
192 [C(DTLB
)][C(OP_WRITE
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_DTLB_REFILL
,
194 [C(ITLB
)][C(OP_READ
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_ITLB_REFILL
,
195 [C(ITLB
)][C(OP_WRITE
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_ITLB_REFILL
,
197 [C(BPU
)][C(OP_READ
)][C(RESULT_ACCESS
)] = ARMV7_PERFCTR_PC_BRANCH_PRED
,
198 [C(BPU
)][C(OP_READ
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
199 [C(BPU
)][C(OP_WRITE
)][C(RESULT_ACCESS
)] = ARMV7_PERFCTR_PC_BRANCH_PRED
,
200 [C(BPU
)][C(OP_WRITE
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
204 * Cortex-A9 HW events mapping
206 static const unsigned armv7_a9_perf_map
[PERF_COUNT_HW_MAX
] = {
207 PERF_MAP_ALL_UNSUPPORTED
,
208 [PERF_COUNT_HW_CPU_CYCLES
] = ARMV7_PERFCTR_CPU_CYCLES
,
209 [PERF_COUNT_HW_INSTRUCTIONS
] = ARMV7_A9_PERFCTR_INSTR_CORE_RENAME
,
210 [PERF_COUNT_HW_CACHE_REFERENCES
] = ARMV7_PERFCTR_L1_DCACHE_ACCESS
,
211 [PERF_COUNT_HW_CACHE_MISSES
] = ARMV7_PERFCTR_L1_DCACHE_REFILL
,
212 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = ARMV7_PERFCTR_PC_WRITE
,
213 [PERF_COUNT_HW_BRANCH_MISSES
] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
214 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
] = ARMV7_A9_PERFCTR_STALL_ICACHE
,
215 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND
] = ARMV7_A9_PERFCTR_STALL_DISPATCH
,
218 static const unsigned armv7_a9_perf_cache_map
[PERF_COUNT_HW_CACHE_MAX
]
219 [PERF_COUNT_HW_CACHE_OP_MAX
]
220 [PERF_COUNT_HW_CACHE_RESULT_MAX
] = {
221 PERF_CACHE_MAP_ALL_UNSUPPORTED
,
224 * The performance counters don't differentiate between read and write
225 * accesses/misses so this isn't strictly correct, but it's the best we
226 * can do. Writes and reads get combined.
228 [C(L1D
)][C(OP_READ
)][C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS
,
229 [C(L1D
)][C(OP_READ
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_L1_DCACHE_REFILL
,
230 [C(L1D
)][C(OP_WRITE
)][C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS
,
231 [C(L1D
)][C(OP_WRITE
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_L1_DCACHE_REFILL
,
233 [C(L1I
)][C(OP_READ
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_L1_ICACHE_REFILL
,
235 [C(DTLB
)][C(OP_READ
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_DTLB_REFILL
,
236 [C(DTLB
)][C(OP_WRITE
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_DTLB_REFILL
,
238 [C(ITLB
)][C(OP_READ
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_ITLB_REFILL
,
239 [C(ITLB
)][C(OP_WRITE
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_ITLB_REFILL
,
241 [C(BPU
)][C(OP_READ
)][C(RESULT_ACCESS
)] = ARMV7_PERFCTR_PC_BRANCH_PRED
,
242 [C(BPU
)][C(OP_READ
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
243 [C(BPU
)][C(OP_WRITE
)][C(RESULT_ACCESS
)] = ARMV7_PERFCTR_PC_BRANCH_PRED
,
244 [C(BPU
)][C(OP_WRITE
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
248 * Cortex-A5 HW events mapping
250 static const unsigned armv7_a5_perf_map
[PERF_COUNT_HW_MAX
] = {
251 PERF_MAP_ALL_UNSUPPORTED
,
252 [PERF_COUNT_HW_CPU_CYCLES
] = ARMV7_PERFCTR_CPU_CYCLES
,
253 [PERF_COUNT_HW_INSTRUCTIONS
] = ARMV7_PERFCTR_INSTR_EXECUTED
,
254 [PERF_COUNT_HW_CACHE_REFERENCES
] = ARMV7_PERFCTR_L1_DCACHE_ACCESS
,
255 [PERF_COUNT_HW_CACHE_MISSES
] = ARMV7_PERFCTR_L1_DCACHE_REFILL
,
256 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = ARMV7_PERFCTR_PC_WRITE
,
257 [PERF_COUNT_HW_BRANCH_MISSES
] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
260 static const unsigned armv7_a5_perf_cache_map
[PERF_COUNT_HW_CACHE_MAX
]
261 [PERF_COUNT_HW_CACHE_OP_MAX
]
262 [PERF_COUNT_HW_CACHE_RESULT_MAX
] = {
263 PERF_CACHE_MAP_ALL_UNSUPPORTED
,
265 [C(L1D
)][C(OP_READ
)][C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS
,
266 [C(L1D
)][C(OP_READ
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_L1_DCACHE_REFILL
,
267 [C(L1D
)][C(OP_WRITE
)][C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS
,
268 [C(L1D
)][C(OP_WRITE
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_L1_DCACHE_REFILL
,
269 [C(L1D
)][C(OP_PREFETCH
)][C(RESULT_ACCESS
)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL
,
270 [C(L1D
)][C(OP_PREFETCH
)][C(RESULT_MISS
)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP
,
272 [C(L1I
)][C(OP_READ
)][C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS
,
273 [C(L1I
)][C(OP_READ
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_L1_ICACHE_REFILL
,
275 * The prefetch counters don't differentiate between the I side and the
278 [C(L1I
)][C(OP_PREFETCH
)][C(RESULT_ACCESS
)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL
,
279 [C(L1I
)][C(OP_PREFETCH
)][C(RESULT_MISS
)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP
,
281 [C(DTLB
)][C(OP_READ
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_DTLB_REFILL
,
282 [C(DTLB
)][C(OP_WRITE
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_DTLB_REFILL
,
284 [C(ITLB
)][C(OP_READ
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_ITLB_REFILL
,
285 [C(ITLB
)][C(OP_WRITE
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_ITLB_REFILL
,
287 [C(BPU
)][C(OP_READ
)][C(RESULT_ACCESS
)] = ARMV7_PERFCTR_PC_BRANCH_PRED
,
288 [C(BPU
)][C(OP_READ
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
289 [C(BPU
)][C(OP_WRITE
)][C(RESULT_ACCESS
)] = ARMV7_PERFCTR_PC_BRANCH_PRED
,
290 [C(BPU
)][C(OP_WRITE
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
294 * Cortex-A15 HW events mapping
296 static const unsigned armv7_a15_perf_map
[PERF_COUNT_HW_MAX
] = {
297 PERF_MAP_ALL_UNSUPPORTED
,
298 [PERF_COUNT_HW_CPU_CYCLES
] = ARMV7_PERFCTR_CPU_CYCLES
,
299 [PERF_COUNT_HW_INSTRUCTIONS
] = ARMV7_PERFCTR_INSTR_EXECUTED
,
300 [PERF_COUNT_HW_CACHE_REFERENCES
] = ARMV7_PERFCTR_L1_DCACHE_ACCESS
,
301 [PERF_COUNT_HW_CACHE_MISSES
] = ARMV7_PERFCTR_L1_DCACHE_REFILL
,
302 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = ARMV7_A15_PERFCTR_PC_WRITE_SPEC
,
303 [PERF_COUNT_HW_BRANCH_MISSES
] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
304 [PERF_COUNT_HW_BUS_CYCLES
] = ARMV7_PERFCTR_BUS_CYCLES
,
307 static const unsigned armv7_a15_perf_cache_map
[PERF_COUNT_HW_CACHE_MAX
]
308 [PERF_COUNT_HW_CACHE_OP_MAX
]
309 [PERF_COUNT_HW_CACHE_RESULT_MAX
] = {
310 PERF_CACHE_MAP_ALL_UNSUPPORTED
,
312 [C(L1D
)][C(OP_READ
)][C(RESULT_ACCESS
)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ
,
313 [C(L1D
)][C(OP_READ
)][C(RESULT_MISS
)] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ
,
314 [C(L1D
)][C(OP_WRITE
)][C(RESULT_ACCESS
)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE
,
315 [C(L1D
)][C(OP_WRITE
)][C(RESULT_MISS
)] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE
,
318 * Not all performance counters differentiate between read and write
319 * accesses/misses so we're not always strictly correct, but it's the
320 * best we can do. Writes and reads get combined in these cases.
322 [C(L1I
)][C(OP_READ
)][C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS
,
323 [C(L1I
)][C(OP_READ
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_L1_ICACHE_REFILL
,
325 [C(LL
)][C(OP_READ
)][C(RESULT_ACCESS
)] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ
,
326 [C(LL
)][C(OP_READ
)][C(RESULT_MISS
)] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ
,
327 [C(LL
)][C(OP_WRITE
)][C(RESULT_ACCESS
)] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE
,
328 [C(LL
)][C(OP_WRITE
)][C(RESULT_MISS
)] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE
,
330 [C(DTLB
)][C(OP_READ
)][C(RESULT_MISS
)] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ
,
331 [C(DTLB
)][C(OP_WRITE
)][C(RESULT_MISS
)] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE
,
333 [C(ITLB
)][C(OP_READ
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_ITLB_REFILL
,
334 [C(ITLB
)][C(OP_WRITE
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_ITLB_REFILL
,
336 [C(BPU
)][C(OP_READ
)][C(RESULT_ACCESS
)] = ARMV7_PERFCTR_PC_BRANCH_PRED
,
337 [C(BPU
)][C(OP_READ
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
338 [C(BPU
)][C(OP_WRITE
)][C(RESULT_ACCESS
)] = ARMV7_PERFCTR_PC_BRANCH_PRED
,
339 [C(BPU
)][C(OP_WRITE
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
343 * Cortex-A7 HW events mapping
345 static const unsigned armv7_a7_perf_map
[PERF_COUNT_HW_MAX
] = {
346 PERF_MAP_ALL_UNSUPPORTED
,
347 [PERF_COUNT_HW_CPU_CYCLES
] = ARMV7_PERFCTR_CPU_CYCLES
,
348 [PERF_COUNT_HW_INSTRUCTIONS
] = ARMV7_PERFCTR_INSTR_EXECUTED
,
349 [PERF_COUNT_HW_CACHE_REFERENCES
] = ARMV7_PERFCTR_L1_DCACHE_ACCESS
,
350 [PERF_COUNT_HW_CACHE_MISSES
] = ARMV7_PERFCTR_L1_DCACHE_REFILL
,
351 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = ARMV7_PERFCTR_PC_WRITE
,
352 [PERF_COUNT_HW_BRANCH_MISSES
] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
353 [PERF_COUNT_HW_BUS_CYCLES
] = ARMV7_PERFCTR_BUS_CYCLES
,
356 static const unsigned armv7_a7_perf_cache_map
[PERF_COUNT_HW_CACHE_MAX
]
357 [PERF_COUNT_HW_CACHE_OP_MAX
]
358 [PERF_COUNT_HW_CACHE_RESULT_MAX
] = {
359 PERF_CACHE_MAP_ALL_UNSUPPORTED
,
362 * The performance counters don't differentiate between read and write
363 * accesses/misses so this isn't strictly correct, but it's the best we
364 * can do. Writes and reads get combined.
366 [C(L1D
)][C(OP_READ
)][C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS
,
367 [C(L1D
)][C(OP_READ
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_L1_DCACHE_REFILL
,
368 [C(L1D
)][C(OP_WRITE
)][C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS
,
369 [C(L1D
)][C(OP_WRITE
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_L1_DCACHE_REFILL
,
371 [C(L1I
)][C(OP_READ
)][C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS
,
372 [C(L1I
)][C(OP_READ
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_L1_ICACHE_REFILL
,
374 [C(LL
)][C(OP_READ
)][C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L2_CACHE_ACCESS
,
375 [C(LL
)][C(OP_READ
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_L2_CACHE_REFILL
,
376 [C(LL
)][C(OP_WRITE
)][C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L2_CACHE_ACCESS
,
377 [C(LL
)][C(OP_WRITE
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_L2_CACHE_REFILL
,
379 [C(DTLB
)][C(OP_READ
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_DTLB_REFILL
,
380 [C(DTLB
)][C(OP_WRITE
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_DTLB_REFILL
,
382 [C(ITLB
)][C(OP_READ
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_ITLB_REFILL
,
383 [C(ITLB
)][C(OP_WRITE
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_ITLB_REFILL
,
385 [C(BPU
)][C(OP_READ
)][C(RESULT_ACCESS
)] = ARMV7_PERFCTR_PC_BRANCH_PRED
,
386 [C(BPU
)][C(OP_READ
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
387 [C(BPU
)][C(OP_WRITE
)][C(RESULT_ACCESS
)] = ARMV7_PERFCTR_PC_BRANCH_PRED
,
388 [C(BPU
)][C(OP_WRITE
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
392 * Cortex-A12 HW events mapping
394 static const unsigned armv7_a12_perf_map
[PERF_COUNT_HW_MAX
] = {
395 PERF_MAP_ALL_UNSUPPORTED
,
396 [PERF_COUNT_HW_CPU_CYCLES
] = ARMV7_PERFCTR_CPU_CYCLES
,
397 [PERF_COUNT_HW_INSTRUCTIONS
] = ARMV7_PERFCTR_INSTR_EXECUTED
,
398 [PERF_COUNT_HW_CACHE_REFERENCES
] = ARMV7_PERFCTR_L1_DCACHE_ACCESS
,
399 [PERF_COUNT_HW_CACHE_MISSES
] = ARMV7_PERFCTR_L1_DCACHE_REFILL
,
400 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = ARMV7_A12_PERFCTR_PC_WRITE_SPEC
,
401 [PERF_COUNT_HW_BRANCH_MISSES
] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
402 [PERF_COUNT_HW_BUS_CYCLES
] = ARMV7_PERFCTR_BUS_CYCLES
,
405 static const unsigned armv7_a12_perf_cache_map
[PERF_COUNT_HW_CACHE_MAX
]
406 [PERF_COUNT_HW_CACHE_OP_MAX
]
407 [PERF_COUNT_HW_CACHE_RESULT_MAX
] = {
408 PERF_CACHE_MAP_ALL_UNSUPPORTED
,
410 [C(L1D
)][C(OP_READ
)][C(RESULT_ACCESS
)] = ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ
,
411 [C(L1D
)][C(OP_READ
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_L1_DCACHE_REFILL
,
412 [C(L1D
)][C(OP_WRITE
)][C(RESULT_ACCESS
)] = ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE
,
413 [C(L1D
)][C(OP_WRITE
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_L1_DCACHE_REFILL
,
416 * Not all performance counters differentiate between read and write
417 * accesses/misses so we're not always strictly correct, but it's the
418 * best we can do. Writes and reads get combined in these cases.
420 [C(L1I
)][C(OP_READ
)][C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS
,
421 [C(L1I
)][C(OP_READ
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_L1_ICACHE_REFILL
,
423 [C(LL
)][C(OP_READ
)][C(RESULT_ACCESS
)] = ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ
,
424 [C(LL
)][C(OP_READ
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_L2_CACHE_REFILL
,
425 [C(LL
)][C(OP_WRITE
)][C(RESULT_ACCESS
)] = ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE
,
426 [C(LL
)][C(OP_WRITE
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_L2_CACHE_REFILL
,
428 [C(DTLB
)][C(OP_READ
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_DTLB_REFILL
,
429 [C(DTLB
)][C(OP_WRITE
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_DTLB_REFILL
,
430 [C(DTLB
)][C(OP_PREFETCH
)][C(RESULT_MISS
)] = ARMV7_A12_PERFCTR_PF_TLB_REFILL
,
432 [C(ITLB
)][C(OP_READ
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_ITLB_REFILL
,
433 [C(ITLB
)][C(OP_WRITE
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_ITLB_REFILL
,
435 [C(BPU
)][C(OP_READ
)][C(RESULT_ACCESS
)] = ARMV7_PERFCTR_PC_BRANCH_PRED
,
436 [C(BPU
)][C(OP_READ
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
437 [C(BPU
)][C(OP_WRITE
)][C(RESULT_ACCESS
)] = ARMV7_PERFCTR_PC_BRANCH_PRED
,
438 [C(BPU
)][C(OP_WRITE
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
442 * Krait HW events mapping
444 static const unsigned krait_perf_map
[PERF_COUNT_HW_MAX
] = {
445 PERF_MAP_ALL_UNSUPPORTED
,
446 [PERF_COUNT_HW_CPU_CYCLES
] = ARMV7_PERFCTR_CPU_CYCLES
,
447 [PERF_COUNT_HW_INSTRUCTIONS
] = ARMV7_PERFCTR_INSTR_EXECUTED
,
448 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = ARMV7_PERFCTR_PC_WRITE
,
449 [PERF_COUNT_HW_BRANCH_MISSES
] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
450 [PERF_COUNT_HW_BUS_CYCLES
] = ARMV7_PERFCTR_CLOCK_CYCLES
,
453 static const unsigned krait_perf_map_no_branch
[PERF_COUNT_HW_MAX
] = {
454 PERF_MAP_ALL_UNSUPPORTED
,
455 [PERF_COUNT_HW_CPU_CYCLES
] = ARMV7_PERFCTR_CPU_CYCLES
,
456 [PERF_COUNT_HW_INSTRUCTIONS
] = ARMV7_PERFCTR_INSTR_EXECUTED
,
457 [PERF_COUNT_HW_BRANCH_MISSES
] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
458 [PERF_COUNT_HW_BUS_CYCLES
] = ARMV7_PERFCTR_CLOCK_CYCLES
,
461 static const unsigned krait_perf_cache_map
[PERF_COUNT_HW_CACHE_MAX
]
462 [PERF_COUNT_HW_CACHE_OP_MAX
]
463 [PERF_COUNT_HW_CACHE_RESULT_MAX
] = {
464 PERF_CACHE_MAP_ALL_UNSUPPORTED
,
467 * The performance counters don't differentiate between read and write
468 * accesses/misses so this isn't strictly correct, but it's the best we
469 * can do. Writes and reads get combined.
471 [C(L1D
)][C(OP_READ
)][C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS
,
472 [C(L1D
)][C(OP_READ
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_L1_DCACHE_REFILL
,
473 [C(L1D
)][C(OP_WRITE
)][C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS
,
474 [C(L1D
)][C(OP_WRITE
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_L1_DCACHE_REFILL
,
476 [C(L1I
)][C(OP_READ
)][C(RESULT_ACCESS
)] = KRAIT_PERFCTR_L1_ICACHE_ACCESS
,
477 [C(L1I
)][C(OP_READ
)][C(RESULT_MISS
)] = KRAIT_PERFCTR_L1_ICACHE_MISS
,
479 [C(DTLB
)][C(OP_READ
)][C(RESULT_ACCESS
)] = KRAIT_PERFCTR_L1_DTLB_ACCESS
,
480 [C(DTLB
)][C(OP_WRITE
)][C(RESULT_ACCESS
)] = KRAIT_PERFCTR_L1_DTLB_ACCESS
,
482 [C(ITLB
)][C(OP_READ
)][C(RESULT_ACCESS
)] = KRAIT_PERFCTR_L1_ITLB_ACCESS
,
483 [C(ITLB
)][C(OP_WRITE
)][C(RESULT_ACCESS
)] = KRAIT_PERFCTR_L1_ITLB_ACCESS
,
485 [C(BPU
)][C(OP_READ
)][C(RESULT_ACCESS
)] = ARMV7_PERFCTR_PC_BRANCH_PRED
,
486 [C(BPU
)][C(OP_READ
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
487 [C(BPU
)][C(OP_WRITE
)][C(RESULT_ACCESS
)] = ARMV7_PERFCTR_PC_BRANCH_PRED
,
488 [C(BPU
)][C(OP_WRITE
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
492 * Scorpion HW events mapping
494 static const unsigned scorpion_perf_map
[PERF_COUNT_HW_MAX
] = {
495 PERF_MAP_ALL_UNSUPPORTED
,
496 [PERF_COUNT_HW_CPU_CYCLES
] = ARMV7_PERFCTR_CPU_CYCLES
,
497 [PERF_COUNT_HW_INSTRUCTIONS
] = ARMV7_PERFCTR_INSTR_EXECUTED
,
498 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = ARMV7_PERFCTR_PC_WRITE
,
499 [PERF_COUNT_HW_BRANCH_MISSES
] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
500 [PERF_COUNT_HW_BUS_CYCLES
] = ARMV7_PERFCTR_CLOCK_CYCLES
,
503 static const unsigned scorpion_perf_cache_map
[PERF_COUNT_HW_CACHE_MAX
]
504 [PERF_COUNT_HW_CACHE_OP_MAX
]
505 [PERF_COUNT_HW_CACHE_RESULT_MAX
] = {
506 PERF_CACHE_MAP_ALL_UNSUPPORTED
,
508 * The performance counters don't differentiate between read and write
509 * accesses/misses so this isn't strictly correct, but it's the best we
510 * can do. Writes and reads get combined.
512 [C(L1D
)][C(OP_READ
)][C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS
,
513 [C(L1D
)][C(OP_READ
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_L1_DCACHE_REFILL
,
514 [C(L1D
)][C(OP_WRITE
)][C(RESULT_ACCESS
)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS
,
515 [C(L1D
)][C(OP_WRITE
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_L1_DCACHE_REFILL
,
516 [C(L1I
)][C(OP_READ
)][C(RESULT_ACCESS
)] = SCORPION_ICACHE_ACCESS
,
517 [C(L1I
)][C(OP_READ
)][C(RESULT_MISS
)] = SCORPION_ICACHE_MISS
,
519 * Only ITLB misses and DTLB refills are supported. If users want the
520 * DTLB refills misses a raw counter must be used.
522 [C(DTLB
)][C(OP_READ
)][C(RESULT_ACCESS
)] = SCORPION_DTLB_ACCESS
,
523 [C(DTLB
)][C(OP_READ
)][C(RESULT_MISS
)] = SCORPION_DTLB_MISS
,
524 [C(DTLB
)][C(OP_WRITE
)][C(RESULT_ACCESS
)] = SCORPION_DTLB_ACCESS
,
525 [C(DTLB
)][C(OP_WRITE
)][C(RESULT_MISS
)] = SCORPION_DTLB_MISS
,
526 [C(ITLB
)][C(OP_READ
)][C(RESULT_MISS
)] = SCORPION_ITLB_MISS
,
527 [C(ITLB
)][C(OP_WRITE
)][C(RESULT_MISS
)] = SCORPION_ITLB_MISS
,
528 [C(BPU
)][C(OP_READ
)][C(RESULT_ACCESS
)] = ARMV7_PERFCTR_PC_BRANCH_PRED
,
529 [C(BPU
)][C(OP_READ
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
530 [C(BPU
)][C(OP_WRITE
)][C(RESULT_ACCESS
)] = ARMV7_PERFCTR_PC_BRANCH_PRED
,
531 [C(BPU
)][C(OP_WRITE
)][C(RESULT_MISS
)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
,
534 #define ARMV7_EVENT_ATTR_RESOLVE(m) #m
535 #define ARMV7_EVENT_ATTR(name, config) \
536 PMU_EVENT_ATTR_STRING(name, armv7_event_attr_##name, \
537 "event=" ARMV7_EVENT_ATTR_RESOLVE(config))
539 ARMV7_EVENT_ATTR(sw_incr
, ARMV7_PERFCTR_PMNC_SW_INCR
);
540 ARMV7_EVENT_ATTR(l1i_cache_refill
, ARMV7_PERFCTR_L1_ICACHE_REFILL
);
541 ARMV7_EVENT_ATTR(l1i_tlb_refill
, ARMV7_PERFCTR_ITLB_REFILL
);
542 ARMV7_EVENT_ATTR(l1d_cache_refill
, ARMV7_PERFCTR_L1_DCACHE_REFILL
);
543 ARMV7_EVENT_ATTR(l1d_cache
, ARMV7_PERFCTR_L1_DCACHE_ACCESS
);
544 ARMV7_EVENT_ATTR(l1d_tlb_refill
, ARMV7_PERFCTR_DTLB_REFILL
);
545 ARMV7_EVENT_ATTR(ld_retired
, ARMV7_PERFCTR_MEM_READ
);
546 ARMV7_EVENT_ATTR(st_retired
, ARMV7_PERFCTR_MEM_WRITE
);
547 ARMV7_EVENT_ATTR(inst_retired
, ARMV7_PERFCTR_INSTR_EXECUTED
);
548 ARMV7_EVENT_ATTR(exc_taken
, ARMV7_PERFCTR_EXC_TAKEN
);
549 ARMV7_EVENT_ATTR(exc_return
, ARMV7_PERFCTR_EXC_EXECUTED
);
550 ARMV7_EVENT_ATTR(cid_write_retired
, ARMV7_PERFCTR_CID_WRITE
);
551 ARMV7_EVENT_ATTR(pc_write_retired
, ARMV7_PERFCTR_PC_WRITE
);
552 ARMV7_EVENT_ATTR(br_immed_retired
, ARMV7_PERFCTR_PC_IMM_BRANCH
);
553 ARMV7_EVENT_ATTR(br_return_retired
, ARMV7_PERFCTR_PC_PROC_RETURN
);
554 ARMV7_EVENT_ATTR(unaligned_ldst_retired
, ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS
);
555 ARMV7_EVENT_ATTR(br_mis_pred
, ARMV7_PERFCTR_PC_BRANCH_MIS_PRED
);
556 ARMV7_EVENT_ATTR(cpu_cycles
, ARMV7_PERFCTR_CLOCK_CYCLES
);
557 ARMV7_EVENT_ATTR(br_pred
, ARMV7_PERFCTR_PC_BRANCH_PRED
);
559 static struct attribute
*armv7_pmuv1_event_attrs
[] = {
560 &armv7_event_attr_sw_incr
.attr
.attr
,
561 &armv7_event_attr_l1i_cache_refill
.attr
.attr
,
562 &armv7_event_attr_l1i_tlb_refill
.attr
.attr
,
563 &armv7_event_attr_l1d_cache_refill
.attr
.attr
,
564 &armv7_event_attr_l1d_cache
.attr
.attr
,
565 &armv7_event_attr_l1d_tlb_refill
.attr
.attr
,
566 &armv7_event_attr_ld_retired
.attr
.attr
,
567 &armv7_event_attr_st_retired
.attr
.attr
,
568 &armv7_event_attr_inst_retired
.attr
.attr
,
569 &armv7_event_attr_exc_taken
.attr
.attr
,
570 &armv7_event_attr_exc_return
.attr
.attr
,
571 &armv7_event_attr_cid_write_retired
.attr
.attr
,
572 &armv7_event_attr_pc_write_retired
.attr
.attr
,
573 &armv7_event_attr_br_immed_retired
.attr
.attr
,
574 &armv7_event_attr_br_return_retired
.attr
.attr
,
575 &armv7_event_attr_unaligned_ldst_retired
.attr
.attr
,
576 &armv7_event_attr_br_mis_pred
.attr
.attr
,
577 &armv7_event_attr_cpu_cycles
.attr
.attr
,
578 &armv7_event_attr_br_pred
.attr
.attr
,
582 static struct attribute_group armv7_pmuv1_events_attr_group
= {
584 .attrs
= armv7_pmuv1_event_attrs
,
587 static const struct attribute_group
*armv7_pmuv1_attr_groups
[] = {
588 &armv7_pmuv1_events_attr_group
,
592 ARMV7_EVENT_ATTR(mem_access
, ARMV7_PERFCTR_MEM_ACCESS
);
593 ARMV7_EVENT_ATTR(l1i_cache
, ARMV7_PERFCTR_L1_ICACHE_ACCESS
);
594 ARMV7_EVENT_ATTR(l1d_cache_wb
, ARMV7_PERFCTR_L1_DCACHE_WB
);
595 ARMV7_EVENT_ATTR(l2d_cache
, ARMV7_PERFCTR_L2_CACHE_ACCESS
);
596 ARMV7_EVENT_ATTR(l2d_cache_refill
, ARMV7_PERFCTR_L2_CACHE_REFILL
);
597 ARMV7_EVENT_ATTR(l2d_cache_wb
, ARMV7_PERFCTR_L2_CACHE_WB
);
598 ARMV7_EVENT_ATTR(bus_access
, ARMV7_PERFCTR_BUS_ACCESS
);
599 ARMV7_EVENT_ATTR(memory_error
, ARMV7_PERFCTR_MEM_ERROR
);
600 ARMV7_EVENT_ATTR(inst_spec
, ARMV7_PERFCTR_INSTR_SPEC
);
601 ARMV7_EVENT_ATTR(ttbr_write_retired
, ARMV7_PERFCTR_TTBR_WRITE
);
602 ARMV7_EVENT_ATTR(bus_cycles
, ARMV7_PERFCTR_BUS_CYCLES
);
604 static struct attribute
*armv7_pmuv2_event_attrs
[] = {
605 &armv7_event_attr_sw_incr
.attr
.attr
,
606 &armv7_event_attr_l1i_cache_refill
.attr
.attr
,
607 &armv7_event_attr_l1i_tlb_refill
.attr
.attr
,
608 &armv7_event_attr_l1d_cache_refill
.attr
.attr
,
609 &armv7_event_attr_l1d_cache
.attr
.attr
,
610 &armv7_event_attr_l1d_tlb_refill
.attr
.attr
,
611 &armv7_event_attr_ld_retired
.attr
.attr
,
612 &armv7_event_attr_st_retired
.attr
.attr
,
613 &armv7_event_attr_inst_retired
.attr
.attr
,
614 &armv7_event_attr_exc_taken
.attr
.attr
,
615 &armv7_event_attr_exc_return
.attr
.attr
,
616 &armv7_event_attr_cid_write_retired
.attr
.attr
,
617 &armv7_event_attr_pc_write_retired
.attr
.attr
,
618 &armv7_event_attr_br_immed_retired
.attr
.attr
,
619 &armv7_event_attr_br_return_retired
.attr
.attr
,
620 &armv7_event_attr_unaligned_ldst_retired
.attr
.attr
,
621 &armv7_event_attr_br_mis_pred
.attr
.attr
,
622 &armv7_event_attr_cpu_cycles
.attr
.attr
,
623 &armv7_event_attr_br_pred
.attr
.attr
,
624 &armv7_event_attr_mem_access
.attr
.attr
,
625 &armv7_event_attr_l1i_cache
.attr
.attr
,
626 &armv7_event_attr_l1d_cache_wb
.attr
.attr
,
627 &armv7_event_attr_l2d_cache
.attr
.attr
,
628 &armv7_event_attr_l2d_cache_refill
.attr
.attr
,
629 &armv7_event_attr_l2d_cache_wb
.attr
.attr
,
630 &armv7_event_attr_bus_access
.attr
.attr
,
631 &armv7_event_attr_memory_error
.attr
.attr
,
632 &armv7_event_attr_inst_spec
.attr
.attr
,
633 &armv7_event_attr_ttbr_write_retired
.attr
.attr
,
634 &armv7_event_attr_bus_cycles
.attr
.attr
,
638 static struct attribute_group armv7_pmuv2_events_attr_group
= {
640 .attrs
= armv7_pmuv2_event_attrs
,
643 static const struct attribute_group
*armv7_pmuv2_attr_groups
[] = {
644 &armv7_pmuv2_events_attr_group
,
649 * Perf Events' indices
651 #define ARMV7_IDX_CYCLE_COUNTER 0
652 #define ARMV7_IDX_COUNTER0 1
653 #define ARMV7_IDX_COUNTER_LAST(cpu_pmu) \
654 (ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
656 #define ARMV7_MAX_COUNTERS 32
657 #define ARMV7_COUNTER_MASK (ARMV7_MAX_COUNTERS - 1)
660 * ARMv7 low level PMNC access
664 * Perf Event to low level counters mapping
666 #define ARMV7_IDX_TO_COUNTER(x) \
667 (((x) - ARMV7_IDX_COUNTER0) & ARMV7_COUNTER_MASK)
670 * Per-CPU PMNC: config reg
672 #define ARMV7_PMNC_E (1 << 0) /* Enable all counters */
673 #define ARMV7_PMNC_P (1 << 1) /* Reset all counters */
674 #define ARMV7_PMNC_C (1 << 2) /* Cycle counter reset */
675 #define ARMV7_PMNC_D (1 << 3) /* CCNT counts every 64th cpu cycle */
676 #define ARMV7_PMNC_X (1 << 4) /* Export to ETM */
677 #define ARMV7_PMNC_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
678 #define ARMV7_PMNC_N_SHIFT 11 /* Number of counters supported */
679 #define ARMV7_PMNC_N_MASK 0x1f
680 #define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */
683 * FLAG: counters overflow flag status reg
685 #define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */
686 #define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK
689 * PMXEVTYPER: Event selection reg
691 #define ARMV7_EVTYPE_MASK 0xc80000ff /* Mask for writable bits */
692 #define ARMV7_EVTYPE_EVENT 0xff /* Mask for EVENT bits */
695 * Event filters for PMUv2
697 #define ARMV7_EXCLUDE_PL1 (1 << 31)
698 #define ARMV7_EXCLUDE_USER (1 << 30)
699 #define ARMV7_INCLUDE_HYP (1 << 27)
701 static inline u32
armv7_pmnc_read(void)
704 asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val
));
708 static inline void armv7_pmnc_write(u32 val
)
710 val
&= ARMV7_PMNC_MASK
;
712 asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val
));
715 static inline int armv7_pmnc_has_overflowed(u32 pmnc
)
717 return pmnc
& ARMV7_OVERFLOWED_MASK
;
720 static inline int armv7_pmnc_counter_valid(struct arm_pmu
*cpu_pmu
, int idx
)
722 return idx
>= ARMV7_IDX_CYCLE_COUNTER
&&
723 idx
<= ARMV7_IDX_COUNTER_LAST(cpu_pmu
);
726 static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc
, int idx
)
728 return pmnc
& BIT(ARMV7_IDX_TO_COUNTER(idx
));
731 static inline void armv7_pmnc_select_counter(int idx
)
733 u32 counter
= ARMV7_IDX_TO_COUNTER(idx
);
734 asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter
));
738 static inline u32
armv7pmu_read_counter(struct perf_event
*event
)
740 struct arm_pmu
*cpu_pmu
= to_arm_pmu(event
->pmu
);
741 struct hw_perf_event
*hwc
= &event
->hw
;
745 if (!armv7_pmnc_counter_valid(cpu_pmu
, idx
)) {
746 pr_err("CPU%u reading wrong counter %d\n",
747 smp_processor_id(), idx
);
748 } else if (idx
== ARMV7_IDX_CYCLE_COUNTER
) {
749 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value
));
751 armv7_pmnc_select_counter(idx
);
752 asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (value
));
758 static inline void armv7pmu_write_counter(struct perf_event
*event
, u32 value
)
760 struct arm_pmu
*cpu_pmu
= to_arm_pmu(event
->pmu
);
761 struct hw_perf_event
*hwc
= &event
->hw
;
764 if (!armv7_pmnc_counter_valid(cpu_pmu
, idx
)) {
765 pr_err("CPU%u writing wrong counter %d\n",
766 smp_processor_id(), idx
);
767 } else if (idx
== ARMV7_IDX_CYCLE_COUNTER
) {
768 asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value
));
770 armv7_pmnc_select_counter(idx
);
771 asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (value
));
775 static inline void armv7_pmnc_write_evtsel(int idx
, u32 val
)
777 armv7_pmnc_select_counter(idx
);
778 val
&= ARMV7_EVTYPE_MASK
;
779 asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val
));
782 static inline void armv7_pmnc_enable_counter(int idx
)
784 u32 counter
= ARMV7_IDX_TO_COUNTER(idx
);
785 asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter
)));
788 static inline void armv7_pmnc_disable_counter(int idx
)
790 u32 counter
= ARMV7_IDX_TO_COUNTER(idx
);
791 asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter
)));
794 static inline void armv7_pmnc_enable_intens(int idx
)
796 u32 counter
= ARMV7_IDX_TO_COUNTER(idx
);
797 asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter
)));
800 static inline void armv7_pmnc_disable_intens(int idx
)
802 u32 counter
= ARMV7_IDX_TO_COUNTER(idx
);
803 asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter
)));
805 /* Clear the overflow flag in case an interrupt is pending. */
806 asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(counter
)));
810 static inline u32
armv7_pmnc_getreset_flags(void)
815 asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val
));
817 /* Write to clear flags */
818 val
&= ARMV7_FLAG_MASK
;
819 asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val
));
825 static void armv7_pmnc_dump_regs(struct arm_pmu
*cpu_pmu
)
830 pr_info("PMNC registers dump:\n");
832 asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val
));
833 pr_info("PMNC =0x%08x\n", val
);
835 asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val
));
836 pr_info("CNTENS=0x%08x\n", val
);
838 asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val
));
839 pr_info("INTENS=0x%08x\n", val
);
841 asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val
));
842 pr_info("FLAGS =0x%08x\n", val
);
844 asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val
));
845 pr_info("SELECT=0x%08x\n", val
);
847 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val
));
848 pr_info("CCNT =0x%08x\n", val
);
850 for (cnt
= ARMV7_IDX_COUNTER0
;
851 cnt
<= ARMV7_IDX_COUNTER_LAST(cpu_pmu
); cnt
++) {
852 armv7_pmnc_select_counter(cnt
);
853 asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val
));
854 pr_info("CNT[%d] count =0x%08x\n",
855 ARMV7_IDX_TO_COUNTER(cnt
), val
);
856 asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val
));
857 pr_info("CNT[%d] evtsel=0x%08x\n",
858 ARMV7_IDX_TO_COUNTER(cnt
), val
);
863 static void armv7pmu_enable_event(struct perf_event
*event
)
866 struct hw_perf_event
*hwc
= &event
->hw
;
867 struct arm_pmu
*cpu_pmu
= to_arm_pmu(event
->pmu
);
868 struct pmu_hw_events
*events
= this_cpu_ptr(cpu_pmu
->hw_events
);
871 if (!armv7_pmnc_counter_valid(cpu_pmu
, idx
)) {
872 pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n",
873 smp_processor_id(), idx
);
878 * Enable counter and interrupt, and set the counter to count
879 * the event that we're interested in.
881 raw_spin_lock_irqsave(&events
->pmu_lock
, flags
);
886 armv7_pmnc_disable_counter(idx
);
889 * Set event (if destined for PMNx counters)
890 * We only need to set the event for the cycle counter if we
891 * have the ability to perform event filtering.
893 if (cpu_pmu
->set_event_filter
|| idx
!= ARMV7_IDX_CYCLE_COUNTER
)
894 armv7_pmnc_write_evtsel(idx
, hwc
->config_base
);
897 * Enable interrupt for this counter
899 armv7_pmnc_enable_intens(idx
);
904 armv7_pmnc_enable_counter(idx
);
906 raw_spin_unlock_irqrestore(&events
->pmu_lock
, flags
);
909 static void armv7pmu_disable_event(struct perf_event
*event
)
912 struct hw_perf_event
*hwc
= &event
->hw
;
913 struct arm_pmu
*cpu_pmu
= to_arm_pmu(event
->pmu
);
914 struct pmu_hw_events
*events
= this_cpu_ptr(cpu_pmu
->hw_events
);
917 if (!armv7_pmnc_counter_valid(cpu_pmu
, idx
)) {
918 pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n",
919 smp_processor_id(), idx
);
924 * Disable counter and interrupt
926 raw_spin_lock_irqsave(&events
->pmu_lock
, flags
);
931 armv7_pmnc_disable_counter(idx
);
934 * Disable interrupt for this counter
936 armv7_pmnc_disable_intens(idx
);
938 raw_spin_unlock_irqrestore(&events
->pmu_lock
, flags
);
941 static irqreturn_t
armv7pmu_handle_irq(int irq_num
, void *dev
)
944 struct perf_sample_data data
;
945 struct arm_pmu
*cpu_pmu
= (struct arm_pmu
*)dev
;
946 struct pmu_hw_events
*cpuc
= this_cpu_ptr(cpu_pmu
->hw_events
);
947 struct pt_regs
*regs
;
951 * Get and reset the IRQ flags
953 pmnc
= armv7_pmnc_getreset_flags();
956 * Did an overflow occur?
958 if (!armv7_pmnc_has_overflowed(pmnc
))
962 * Handle the counter(s) overflow(s)
964 regs
= get_irq_regs();
966 for (idx
= 0; idx
< cpu_pmu
->num_events
; ++idx
) {
967 struct perf_event
*event
= cpuc
->events
[idx
];
968 struct hw_perf_event
*hwc
;
970 /* Ignore if we don't have an event. */
975 * We have a single interrupt for all counters. Check that
976 * each counter has overflowed before we process it.
978 if (!armv7_pmnc_counter_has_overflowed(pmnc
, idx
))
982 armpmu_event_update(event
);
983 perf_sample_data_init(&data
, 0, hwc
->last_period
);
984 if (!armpmu_event_set_period(event
))
987 if (perf_event_overflow(event
, &data
, regs
))
988 cpu_pmu
->disable(event
);
992 * Handle the pending perf events.
994 * Note: this call *must* be run with interrupts disabled. For
995 * platforms that can have the PMU interrupts raised as an NMI, this
1003 static void armv7pmu_start(struct arm_pmu
*cpu_pmu
)
1005 unsigned long flags
;
1006 struct pmu_hw_events
*events
= this_cpu_ptr(cpu_pmu
->hw_events
);
1008 raw_spin_lock_irqsave(&events
->pmu_lock
, flags
);
1009 /* Enable all counters */
1010 armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E
);
1011 raw_spin_unlock_irqrestore(&events
->pmu_lock
, flags
);
1014 static void armv7pmu_stop(struct arm_pmu
*cpu_pmu
)
1016 unsigned long flags
;
1017 struct pmu_hw_events
*events
= this_cpu_ptr(cpu_pmu
->hw_events
);
1019 raw_spin_lock_irqsave(&events
->pmu_lock
, flags
);
1020 /* Disable all counters */
1021 armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E
);
1022 raw_spin_unlock_irqrestore(&events
->pmu_lock
, flags
);
1025 static int armv7pmu_get_event_idx(struct pmu_hw_events
*cpuc
,
1026 struct perf_event
*event
)
1029 struct arm_pmu
*cpu_pmu
= to_arm_pmu(event
->pmu
);
1030 struct hw_perf_event
*hwc
= &event
->hw
;
1031 unsigned long evtype
= hwc
->config_base
& ARMV7_EVTYPE_EVENT
;
1033 /* Always place a cycle counter into the cycle counter. */
1034 if (evtype
== ARMV7_PERFCTR_CPU_CYCLES
) {
1035 if (test_and_set_bit(ARMV7_IDX_CYCLE_COUNTER
, cpuc
->used_mask
))
1038 return ARMV7_IDX_CYCLE_COUNTER
;
1042 * For anything other than a cycle counter, try and use
1043 * the events counters
1045 for (idx
= ARMV7_IDX_COUNTER0
; idx
< cpu_pmu
->num_events
; ++idx
) {
1046 if (!test_and_set_bit(idx
, cpuc
->used_mask
))
1050 /* The counters are all in use. */
1055 * Add an event filter to a given event. This will only work for PMUv2 PMUs.
1057 static int armv7pmu_set_event_filter(struct hw_perf_event
*event
,
1058 struct perf_event_attr
*attr
)
1060 unsigned long config_base
= 0;
1062 if (attr
->exclude_idle
)
1064 if (attr
->exclude_user
)
1065 config_base
|= ARMV7_EXCLUDE_USER
;
1066 if (attr
->exclude_kernel
)
1067 config_base
|= ARMV7_EXCLUDE_PL1
;
1068 if (!attr
->exclude_hv
)
1069 config_base
|= ARMV7_INCLUDE_HYP
;
1072 * Install the filter into config_base as this is used to
1073 * construct the event type.
1075 event
->config_base
= config_base
;
1080 static void armv7pmu_reset(void *info
)
1082 struct arm_pmu
*cpu_pmu
= (struct arm_pmu
*)info
;
1083 u32 idx
, nb_cnt
= cpu_pmu
->num_events
;
1085 /* The counter and interrupt enable registers are unknown at reset. */
1086 for (idx
= ARMV7_IDX_CYCLE_COUNTER
; idx
< nb_cnt
; ++idx
) {
1087 armv7_pmnc_disable_counter(idx
);
1088 armv7_pmnc_disable_intens(idx
);
1091 /* Initialize & Reset PMNC: C and P bits */
1092 armv7_pmnc_write(ARMV7_PMNC_P
| ARMV7_PMNC_C
);
1095 static int armv7_a8_map_event(struct perf_event
*event
)
1097 return armpmu_map_event(event
, &armv7_a8_perf_map
,
1098 &armv7_a8_perf_cache_map
, 0xFF);
1101 static int armv7_a9_map_event(struct perf_event
*event
)
1103 return armpmu_map_event(event
, &armv7_a9_perf_map
,
1104 &armv7_a9_perf_cache_map
, 0xFF);
1107 static int armv7_a5_map_event(struct perf_event
*event
)
1109 return armpmu_map_event(event
, &armv7_a5_perf_map
,
1110 &armv7_a5_perf_cache_map
, 0xFF);
1113 static int armv7_a15_map_event(struct perf_event
*event
)
1115 return armpmu_map_event(event
, &armv7_a15_perf_map
,
1116 &armv7_a15_perf_cache_map
, 0xFF);
1119 static int armv7_a7_map_event(struct perf_event
*event
)
1121 return armpmu_map_event(event
, &armv7_a7_perf_map
,
1122 &armv7_a7_perf_cache_map
, 0xFF);
1125 static int armv7_a12_map_event(struct perf_event
*event
)
1127 return armpmu_map_event(event
, &armv7_a12_perf_map
,
1128 &armv7_a12_perf_cache_map
, 0xFF);
1131 static int krait_map_event(struct perf_event
*event
)
1133 return armpmu_map_event(event
, &krait_perf_map
,
1134 &krait_perf_cache_map
, 0xFFFFF);
1137 static int krait_map_event_no_branch(struct perf_event
*event
)
1139 return armpmu_map_event(event
, &krait_perf_map_no_branch
,
1140 &krait_perf_cache_map
, 0xFFFFF);
1143 static int scorpion_map_event(struct perf_event
*event
)
1145 return armpmu_map_event(event
, &scorpion_perf_map
,
1146 &scorpion_perf_cache_map
, 0xFFFFF);
1149 static void armv7pmu_init(struct arm_pmu
*cpu_pmu
)
1151 cpu_pmu
->handle_irq
= armv7pmu_handle_irq
;
1152 cpu_pmu
->enable
= armv7pmu_enable_event
;
1153 cpu_pmu
->disable
= armv7pmu_disable_event
;
1154 cpu_pmu
->read_counter
= armv7pmu_read_counter
;
1155 cpu_pmu
->write_counter
= armv7pmu_write_counter
;
1156 cpu_pmu
->get_event_idx
= armv7pmu_get_event_idx
;
1157 cpu_pmu
->start
= armv7pmu_start
;
1158 cpu_pmu
->stop
= armv7pmu_stop
;
1159 cpu_pmu
->reset
= armv7pmu_reset
;
1160 cpu_pmu
->max_period
= (1LLU << 32) - 1;
1163 static void armv7_read_num_pmnc_events(void *info
)
1167 /* Read the nb of CNTx counters supported from PMNC */
1168 *nb_cnt
= (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT
) & ARMV7_PMNC_N_MASK
;
1170 /* Add the CPU cycles counter */
1174 static int armv7_probe_num_events(struct arm_pmu
*arm_pmu
)
1176 return smp_call_function_any(&arm_pmu
->supported_cpus
,
1177 armv7_read_num_pmnc_events
,
1178 &arm_pmu
->num_events
, 1);
1181 static int armv7_a8_pmu_init(struct arm_pmu
*cpu_pmu
)
1183 armv7pmu_init(cpu_pmu
);
1184 cpu_pmu
->name
= "armv7_cortex_a8";
1185 cpu_pmu
->map_event
= armv7_a8_map_event
;
1186 cpu_pmu
->pmu
.attr_groups
= armv7_pmuv1_attr_groups
;
1187 return armv7_probe_num_events(cpu_pmu
);
1190 static int armv7_a9_pmu_init(struct arm_pmu
*cpu_pmu
)
1192 armv7pmu_init(cpu_pmu
);
1193 cpu_pmu
->name
= "armv7_cortex_a9";
1194 cpu_pmu
->map_event
= armv7_a9_map_event
;
1195 cpu_pmu
->pmu
.attr_groups
= armv7_pmuv1_attr_groups
;
1196 return armv7_probe_num_events(cpu_pmu
);
1199 static int armv7_a5_pmu_init(struct arm_pmu
*cpu_pmu
)
1201 armv7pmu_init(cpu_pmu
);
1202 cpu_pmu
->name
= "armv7_cortex_a5";
1203 cpu_pmu
->map_event
= armv7_a5_map_event
;
1204 cpu_pmu
->pmu
.attr_groups
= armv7_pmuv1_attr_groups
;
1205 return armv7_probe_num_events(cpu_pmu
);
1208 static int armv7_a15_pmu_init(struct arm_pmu
*cpu_pmu
)
1210 armv7pmu_init(cpu_pmu
);
1211 cpu_pmu
->name
= "armv7_cortex_a15";
1212 cpu_pmu
->map_event
= armv7_a15_map_event
;
1213 cpu_pmu
->set_event_filter
= armv7pmu_set_event_filter
;
1214 cpu_pmu
->pmu
.attr_groups
= armv7_pmuv2_attr_groups
;
1215 return armv7_probe_num_events(cpu_pmu
);
1218 static int armv7_a7_pmu_init(struct arm_pmu
*cpu_pmu
)
1220 armv7pmu_init(cpu_pmu
);
1221 cpu_pmu
->name
= "armv7_cortex_a7";
1222 cpu_pmu
->map_event
= armv7_a7_map_event
;
1223 cpu_pmu
->set_event_filter
= armv7pmu_set_event_filter
;
1224 cpu_pmu
->pmu
.attr_groups
= armv7_pmuv2_attr_groups
;
1225 return armv7_probe_num_events(cpu_pmu
);
1228 static int armv7_a12_pmu_init(struct arm_pmu
*cpu_pmu
)
1230 armv7pmu_init(cpu_pmu
);
1231 cpu_pmu
->name
= "armv7_cortex_a12";
1232 cpu_pmu
->map_event
= armv7_a12_map_event
;
1233 cpu_pmu
->set_event_filter
= armv7pmu_set_event_filter
;
1234 cpu_pmu
->pmu
.attr_groups
= armv7_pmuv2_attr_groups
;
1235 return armv7_probe_num_events(cpu_pmu
);
1238 static int armv7_a17_pmu_init(struct arm_pmu
*cpu_pmu
)
1240 int ret
= armv7_a12_pmu_init(cpu_pmu
);
1241 cpu_pmu
->name
= "armv7_cortex_a17";
1242 cpu_pmu
->pmu
.attr_groups
= armv7_pmuv2_attr_groups
;
1247 * Krait Performance Monitor Region Event Selection Register (PMRESRn)
1250 * +--------------------------------+
1251 * PMRESR0 | EN | CC | CC | CC | CC | N = 1, R = 0
1252 * +--------------------------------+
1253 * PMRESR1 | EN | CC | CC | CC | CC | N = 1, R = 1
1254 * +--------------------------------+
1255 * PMRESR2 | EN | CC | CC | CC | CC | N = 1, R = 2
1256 * +--------------------------------+
1257 * VPMRESR0 | EN | CC | CC | CC | CC | N = 2, R = ?
1258 * +--------------------------------+
1259 * EN | G=3 | G=2 | G=1 | G=0
1263 * hwc->config_base = 0xNRCCG
1265 * N = prefix, 1 for Krait CPU (PMRESRn), 2 for Venum VFP (VPMRESR)
1266 * R = region register
1267 * CC = class of events the group G is choosing from
1268 * G = group or particular event
1270 * Example: 0x12021 is a Krait CPU event in PMRESR2's group 1 with code 2
1272 * A region (R) corresponds to a piece of the CPU (execution unit, instruction
1273 * unit, etc.) while the event code (CC) corresponds to a particular class of
1274 * events (interrupts for example). An event code is broken down into
1275 * groups (G) that can be mapped into the PMU (irq, fiqs, and irq+fiqs for
1279 #define KRAIT_EVENT (1 << 16)
1280 #define VENUM_EVENT (2 << 16)
1281 #define KRAIT_EVENT_MASK (KRAIT_EVENT | VENUM_EVENT)
1282 #define PMRESRn_EN BIT(31)
1284 #define EVENT_REGION(event) (((event) >> 12) & 0xf) /* R */
1285 #define EVENT_GROUP(event) ((event) & 0xf) /* G */
1286 #define EVENT_CODE(event) (((event) >> 4) & 0xff) /* CC */
1287 #define EVENT_VENUM(event) (!!(event & VENUM_EVENT)) /* N=2 */
1288 #define EVENT_CPU(event) (!!(event & KRAIT_EVENT)) /* N=1 */
1290 static u32
krait_read_pmresrn(int n
)
1296 asm volatile("mrc p15, 1, %0, c9, c15, 0" : "=r" (val
));
1299 asm volatile("mrc p15, 1, %0, c9, c15, 1" : "=r" (val
));
1302 asm volatile("mrc p15, 1, %0, c9, c15, 2" : "=r" (val
));
1305 BUG(); /* Should be validated in krait_pmu_get_event_idx() */
1311 static void krait_write_pmresrn(int n
, u32 val
)
1315 asm volatile("mcr p15, 1, %0, c9, c15, 0" : : "r" (val
));
1318 asm volatile("mcr p15, 1, %0, c9, c15, 1" : : "r" (val
));
1321 asm volatile("mcr p15, 1, %0, c9, c15, 2" : : "r" (val
));
1324 BUG(); /* Should be validated in krait_pmu_get_event_idx() */
1328 static u32
venum_read_pmresr(void)
1331 asm volatile("mrc p10, 7, %0, c11, c0, 0" : "=r" (val
));
1335 static void venum_write_pmresr(u32 val
)
1337 asm volatile("mcr p10, 7, %0, c11, c0, 0" : : "r" (val
));
1340 static void venum_pre_pmresr(u32
*venum_orig_val
, u32
*fp_orig_val
)
1345 BUG_ON(preemptible());
1346 /* CPACR Enable CP10 and CP11 access */
1347 *venum_orig_val
= get_copro_access();
1348 venum_new_val
= *venum_orig_val
| CPACC_SVC(10) | CPACC_SVC(11);
1349 set_copro_access(venum_new_val
);
1352 *fp_orig_val
= fmrx(FPEXC
);
1353 fp_new_val
= *fp_orig_val
| FPEXC_EN
;
1354 fmxr(FPEXC
, fp_new_val
);
1357 static void venum_post_pmresr(u32 venum_orig_val
, u32 fp_orig_val
)
1359 BUG_ON(preemptible());
1361 fmxr(FPEXC
, fp_orig_val
);
1364 set_copro_access(venum_orig_val
);
1367 static u32
krait_get_pmresrn_event(unsigned int region
)
1369 static const u32 pmresrn_table
[] = { KRAIT_PMRESR0_GROUP0
,
1370 KRAIT_PMRESR1_GROUP0
,
1371 KRAIT_PMRESR2_GROUP0
};
1372 return pmresrn_table
[region
];
1375 static void krait_evt_setup(int idx
, u32 config_base
)
1380 unsigned int region
= EVENT_REGION(config_base
);
1381 unsigned int group
= EVENT_GROUP(config_base
);
1382 unsigned int code
= EVENT_CODE(config_base
);
1383 unsigned int group_shift
;
1384 bool venum_event
= EVENT_VENUM(config_base
);
1386 group_shift
= group
* 8;
1387 mask
= 0xff << group_shift
;
1389 /* Configure evtsel for the region and group */
1391 val
= KRAIT_VPMRESR0_GROUP0
;
1393 val
= krait_get_pmresrn_event(region
);
1395 /* Mix in mode-exclusion bits */
1396 val
|= config_base
& (ARMV7_EXCLUDE_USER
| ARMV7_EXCLUDE_PL1
);
1397 armv7_pmnc_write_evtsel(idx
, val
);
1400 venum_pre_pmresr(&vval
, &fval
);
1401 val
= venum_read_pmresr();
1403 val
|= code
<< group_shift
;
1405 venum_write_pmresr(val
);
1406 venum_post_pmresr(vval
, fval
);
1408 val
= krait_read_pmresrn(region
);
1410 val
|= code
<< group_shift
;
1412 krait_write_pmresrn(region
, val
);
1416 static u32
clear_pmresrn_group(u32 val
, int group
)
1421 group_shift
= group
* 8;
1422 mask
= 0xff << group_shift
;
1425 /* Don't clear enable bit if entire region isn't disabled */
1426 if (val
& ~PMRESRn_EN
)
1427 return val
|= PMRESRn_EN
;
1432 static void krait_clearpmu(u32 config_base
)
1436 unsigned int region
= EVENT_REGION(config_base
);
1437 unsigned int group
= EVENT_GROUP(config_base
);
1438 bool venum_event
= EVENT_VENUM(config_base
);
1441 venum_pre_pmresr(&vval
, &fval
);
1442 val
= venum_read_pmresr();
1443 val
= clear_pmresrn_group(val
, group
);
1444 venum_write_pmresr(val
);
1445 venum_post_pmresr(vval
, fval
);
1447 val
= krait_read_pmresrn(region
);
1448 val
= clear_pmresrn_group(val
, group
);
1449 krait_write_pmresrn(region
, val
);
1453 static void krait_pmu_disable_event(struct perf_event
*event
)
1455 unsigned long flags
;
1456 struct hw_perf_event
*hwc
= &event
->hw
;
1458 struct arm_pmu
*cpu_pmu
= to_arm_pmu(event
->pmu
);
1459 struct pmu_hw_events
*events
= this_cpu_ptr(cpu_pmu
->hw_events
);
1461 /* Disable counter and interrupt */
1462 raw_spin_lock_irqsave(&events
->pmu_lock
, flags
);
1464 /* Disable counter */
1465 armv7_pmnc_disable_counter(idx
);
1468 * Clear pmresr code (if destined for PMNx counters)
1470 if (hwc
->config_base
& KRAIT_EVENT_MASK
)
1471 krait_clearpmu(hwc
->config_base
);
1473 /* Disable interrupt for this counter */
1474 armv7_pmnc_disable_intens(idx
);
1476 raw_spin_unlock_irqrestore(&events
->pmu_lock
, flags
);
1479 static void krait_pmu_enable_event(struct perf_event
*event
)
1481 unsigned long flags
;
1482 struct hw_perf_event
*hwc
= &event
->hw
;
1484 struct arm_pmu
*cpu_pmu
= to_arm_pmu(event
->pmu
);
1485 struct pmu_hw_events
*events
= this_cpu_ptr(cpu_pmu
->hw_events
);
1488 * Enable counter and interrupt, and set the counter to count
1489 * the event that we're interested in.
1491 raw_spin_lock_irqsave(&events
->pmu_lock
, flags
);
1493 /* Disable counter */
1494 armv7_pmnc_disable_counter(idx
);
1497 * Set event (if destined for PMNx counters)
1498 * We set the event for the cycle counter because we
1499 * have the ability to perform event filtering.
1501 if (hwc
->config_base
& KRAIT_EVENT_MASK
)
1502 krait_evt_setup(idx
, hwc
->config_base
);
1504 armv7_pmnc_write_evtsel(idx
, hwc
->config_base
);
1506 /* Enable interrupt for this counter */
1507 armv7_pmnc_enable_intens(idx
);
1509 /* Enable counter */
1510 armv7_pmnc_enable_counter(idx
);
1512 raw_spin_unlock_irqrestore(&events
->pmu_lock
, flags
);
1515 static void krait_pmu_reset(void *info
)
1518 struct arm_pmu
*cpu_pmu
= info
;
1519 u32 idx
, nb_cnt
= cpu_pmu
->num_events
;
1521 armv7pmu_reset(info
);
1523 /* Clear all pmresrs */
1524 krait_write_pmresrn(0, 0);
1525 krait_write_pmresrn(1, 0);
1526 krait_write_pmresrn(2, 0);
1528 venum_pre_pmresr(&vval
, &fval
);
1529 venum_write_pmresr(0);
1530 venum_post_pmresr(vval
, fval
);
1532 /* Reset PMxEVNCTCR to sane default */
1533 for (idx
= ARMV7_IDX_CYCLE_COUNTER
; idx
< nb_cnt
; ++idx
) {
1534 armv7_pmnc_select_counter(idx
);
1535 asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
1540 static int krait_event_to_bit(struct perf_event
*event
, unsigned int region
,
1544 struct hw_perf_event
*hwc
= &event
->hw
;
1545 struct arm_pmu
*cpu_pmu
= to_arm_pmu(event
->pmu
);
1547 if (hwc
->config_base
& VENUM_EVENT
)
1548 bit
= KRAIT_VPMRESR0_GROUP0
;
1550 bit
= krait_get_pmresrn_event(region
);
1551 bit
-= krait_get_pmresrn_event(0);
1554 * Lower bits are reserved for use by the counters (see
1555 * armv7pmu_get_event_idx() for more info)
1557 bit
+= ARMV7_IDX_COUNTER_LAST(cpu_pmu
) + 1;
1563 * We check for column exclusion constraints here.
1564 * Two events cant use the same group within a pmresr register.
1566 static int krait_pmu_get_event_idx(struct pmu_hw_events
*cpuc
,
1567 struct perf_event
*event
)
1571 struct hw_perf_event
*hwc
= &event
->hw
;
1572 unsigned int region
= EVENT_REGION(hwc
->config_base
);
1573 unsigned int code
= EVENT_CODE(hwc
->config_base
);
1574 unsigned int group
= EVENT_GROUP(hwc
->config_base
);
1575 bool venum_event
= EVENT_VENUM(hwc
->config_base
);
1576 bool krait_event
= EVENT_CPU(hwc
->config_base
);
1578 if (venum_event
|| krait_event
) {
1579 /* Ignore invalid events */
1580 if (group
> 3 || region
> 2)
1582 if (venum_event
&& (code
& 0xe0))
1585 bit
= krait_event_to_bit(event
, region
, group
);
1586 if (test_and_set_bit(bit
, cpuc
->used_mask
))
1590 idx
= armv7pmu_get_event_idx(cpuc
, event
);
1591 if (idx
< 0 && bit
>= 0)
1592 clear_bit(bit
, cpuc
->used_mask
);
1597 static void krait_pmu_clear_event_idx(struct pmu_hw_events
*cpuc
,
1598 struct perf_event
*event
)
1601 struct hw_perf_event
*hwc
= &event
->hw
;
1602 unsigned int region
= EVENT_REGION(hwc
->config_base
);
1603 unsigned int group
= EVENT_GROUP(hwc
->config_base
);
1604 bool venum_event
= EVENT_VENUM(hwc
->config_base
);
1605 bool krait_event
= EVENT_CPU(hwc
->config_base
);
1607 if (venum_event
|| krait_event
) {
1608 bit
= krait_event_to_bit(event
, region
, group
);
1609 clear_bit(bit
, cpuc
->used_mask
);
1613 static int krait_pmu_init(struct arm_pmu
*cpu_pmu
)
1615 armv7pmu_init(cpu_pmu
);
1616 cpu_pmu
->name
= "armv7_krait";
1617 /* Some early versions of Krait don't support PC write events */
1618 if (of_property_read_bool(cpu_pmu
->plat_device
->dev
.of_node
,
1619 "qcom,no-pc-write"))
1620 cpu_pmu
->map_event
= krait_map_event_no_branch
;
1622 cpu_pmu
->map_event
= krait_map_event
;
1623 cpu_pmu
->set_event_filter
= armv7pmu_set_event_filter
;
1624 cpu_pmu
->reset
= krait_pmu_reset
;
1625 cpu_pmu
->enable
= krait_pmu_enable_event
;
1626 cpu_pmu
->disable
= krait_pmu_disable_event
;
1627 cpu_pmu
->get_event_idx
= krait_pmu_get_event_idx
;
1628 cpu_pmu
->clear_event_idx
= krait_pmu_clear_event_idx
;
1629 return armv7_probe_num_events(cpu_pmu
);
1633 * Scorpion Local Performance Monitor Register (LPMn)
1636 * +--------------------------------+
1637 * LPM0 | EN | CC | CC | CC | CC | N = 1, R = 0
1638 * +--------------------------------+
1639 * LPM1 | EN | CC | CC | CC | CC | N = 1, R = 1
1640 * +--------------------------------+
1641 * LPM2 | EN | CC | CC | CC | CC | N = 1, R = 2
1642 * +--------------------------------+
1643 * L2LPM | EN | CC | CC | CC | CC | N = 1, R = 3
1644 * +--------------------------------+
1645 * VLPM | EN | CC | CC | CC | CC | N = 2, R = ?
1646 * +--------------------------------+
1647 * EN | G=3 | G=2 | G=1 | G=0
1652 * hwc->config_base = 0xNRCCG
1654 * N = prefix, 1 for Scorpion CPU (LPMn/L2LPM), 2 for Venum VFP (VLPM)
1655 * R = region register
1656 * CC = class of events the group G is choosing from
1657 * G = group or particular event
1659 * Example: 0x12021 is a Scorpion CPU event in LPM2's group 1 with code 2
1661 * A region (R) corresponds to a piece of the CPU (execution unit, instruction
1662 * unit, etc.) while the event code (CC) corresponds to a particular class of
1663 * events (interrupts for example). An event code is broken down into
1664 * groups (G) that can be mapped into the PMU (irq, fiqs, and irq+fiqs for
1668 static u32
scorpion_read_pmresrn(int n
)
1674 asm volatile("mrc p15, 0, %0, c15, c0, 0" : "=r" (val
));
1677 asm volatile("mrc p15, 1, %0, c15, c0, 0" : "=r" (val
));
1680 asm volatile("mrc p15, 2, %0, c15, c0, 0" : "=r" (val
));
1683 asm volatile("mrc p15, 3, %0, c15, c2, 0" : "=r" (val
));
1686 BUG(); /* Should be validated in scorpion_pmu_get_event_idx() */
1692 static void scorpion_write_pmresrn(int n
, u32 val
)
1696 asm volatile("mcr p15, 0, %0, c15, c0, 0" : : "r" (val
));
1699 asm volatile("mcr p15, 1, %0, c15, c0, 0" : : "r" (val
));
1702 asm volatile("mcr p15, 2, %0, c15, c0, 0" : : "r" (val
));
1705 asm volatile("mcr p15, 3, %0, c15, c2, 0" : : "r" (val
));
1708 BUG(); /* Should be validated in scorpion_pmu_get_event_idx() */
1712 static u32
scorpion_get_pmresrn_event(unsigned int region
)
1714 static const u32 pmresrn_table
[] = { SCORPION_LPM0_GROUP0
,
1715 SCORPION_LPM1_GROUP0
,
1716 SCORPION_LPM2_GROUP0
,
1717 SCORPION_L2LPM_GROUP0
};
1718 return pmresrn_table
[region
];
1721 static void scorpion_evt_setup(int idx
, u32 config_base
)
1726 unsigned int region
= EVENT_REGION(config_base
);
1727 unsigned int group
= EVENT_GROUP(config_base
);
1728 unsigned int code
= EVENT_CODE(config_base
);
1729 unsigned int group_shift
;
1730 bool venum_event
= EVENT_VENUM(config_base
);
1732 group_shift
= group
* 8;
1733 mask
= 0xff << group_shift
;
1735 /* Configure evtsel for the region and group */
1737 val
= SCORPION_VLPM_GROUP0
;
1739 val
= scorpion_get_pmresrn_event(region
);
1741 /* Mix in mode-exclusion bits */
1742 val
|= config_base
& (ARMV7_EXCLUDE_USER
| ARMV7_EXCLUDE_PL1
);
1743 armv7_pmnc_write_evtsel(idx
, val
);
1745 asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
1748 venum_pre_pmresr(&vval
, &fval
);
1749 val
= venum_read_pmresr();
1751 val
|= code
<< group_shift
;
1753 venum_write_pmresr(val
);
1754 venum_post_pmresr(vval
, fval
);
1756 val
= scorpion_read_pmresrn(region
);
1758 val
|= code
<< group_shift
;
1760 scorpion_write_pmresrn(region
, val
);
1764 static void scorpion_clearpmu(u32 config_base
)
1768 unsigned int region
= EVENT_REGION(config_base
);
1769 unsigned int group
= EVENT_GROUP(config_base
);
1770 bool venum_event
= EVENT_VENUM(config_base
);
1773 venum_pre_pmresr(&vval
, &fval
);
1774 val
= venum_read_pmresr();
1775 val
= clear_pmresrn_group(val
, group
);
1776 venum_write_pmresr(val
);
1777 venum_post_pmresr(vval
, fval
);
1779 val
= scorpion_read_pmresrn(region
);
1780 val
= clear_pmresrn_group(val
, group
);
1781 scorpion_write_pmresrn(region
, val
);
1785 static void scorpion_pmu_disable_event(struct perf_event
*event
)
1787 unsigned long flags
;
1788 struct hw_perf_event
*hwc
= &event
->hw
;
1790 struct arm_pmu
*cpu_pmu
= to_arm_pmu(event
->pmu
);
1791 struct pmu_hw_events
*events
= this_cpu_ptr(cpu_pmu
->hw_events
);
1793 /* Disable counter and interrupt */
1794 raw_spin_lock_irqsave(&events
->pmu_lock
, flags
);
1796 /* Disable counter */
1797 armv7_pmnc_disable_counter(idx
);
1800 * Clear pmresr code (if destined for PMNx counters)
1802 if (hwc
->config_base
& KRAIT_EVENT_MASK
)
1803 scorpion_clearpmu(hwc
->config_base
);
1805 /* Disable interrupt for this counter */
1806 armv7_pmnc_disable_intens(idx
);
1808 raw_spin_unlock_irqrestore(&events
->pmu_lock
, flags
);
1811 static void scorpion_pmu_enable_event(struct perf_event
*event
)
1813 unsigned long flags
;
1814 struct hw_perf_event
*hwc
= &event
->hw
;
1816 struct arm_pmu
*cpu_pmu
= to_arm_pmu(event
->pmu
);
1817 struct pmu_hw_events
*events
= this_cpu_ptr(cpu_pmu
->hw_events
);
1820 * Enable counter and interrupt, and set the counter to count
1821 * the event that we're interested in.
1823 raw_spin_lock_irqsave(&events
->pmu_lock
, flags
);
1825 /* Disable counter */
1826 armv7_pmnc_disable_counter(idx
);
1829 * Set event (if destined for PMNx counters)
1830 * We don't set the event for the cycle counter because we
1831 * don't have the ability to perform event filtering.
1833 if (hwc
->config_base
& KRAIT_EVENT_MASK
)
1834 scorpion_evt_setup(idx
, hwc
->config_base
);
1835 else if (idx
!= ARMV7_IDX_CYCLE_COUNTER
)
1836 armv7_pmnc_write_evtsel(idx
, hwc
->config_base
);
1838 /* Enable interrupt for this counter */
1839 armv7_pmnc_enable_intens(idx
);
1841 /* Enable counter */
1842 armv7_pmnc_enable_counter(idx
);
1844 raw_spin_unlock_irqrestore(&events
->pmu_lock
, flags
);
1847 static void scorpion_pmu_reset(void *info
)
1850 struct arm_pmu
*cpu_pmu
= info
;
1851 u32 idx
, nb_cnt
= cpu_pmu
->num_events
;
1853 armv7pmu_reset(info
);
1855 /* Clear all pmresrs */
1856 scorpion_write_pmresrn(0, 0);
1857 scorpion_write_pmresrn(1, 0);
1858 scorpion_write_pmresrn(2, 0);
1859 scorpion_write_pmresrn(3, 0);
1861 venum_pre_pmresr(&vval
, &fval
);
1862 venum_write_pmresr(0);
1863 venum_post_pmresr(vval
, fval
);
1865 /* Reset PMxEVNCTCR to sane default */
1866 for (idx
= ARMV7_IDX_CYCLE_COUNTER
; idx
< nb_cnt
; ++idx
) {
1867 armv7_pmnc_select_counter(idx
);
1868 asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
1872 static int scorpion_event_to_bit(struct perf_event
*event
, unsigned int region
,
1876 struct hw_perf_event
*hwc
= &event
->hw
;
1877 struct arm_pmu
*cpu_pmu
= to_arm_pmu(event
->pmu
);
1879 if (hwc
->config_base
& VENUM_EVENT
)
1880 bit
= SCORPION_VLPM_GROUP0
;
1882 bit
= scorpion_get_pmresrn_event(region
);
1883 bit
-= scorpion_get_pmresrn_event(0);
1886 * Lower bits are reserved for use by the counters (see
1887 * armv7pmu_get_event_idx() for more info)
1889 bit
+= ARMV7_IDX_COUNTER_LAST(cpu_pmu
) + 1;
1895 * We check for column exclusion constraints here.
1896 * Two events cant use the same group within a pmresr register.
1898 static int scorpion_pmu_get_event_idx(struct pmu_hw_events
*cpuc
,
1899 struct perf_event
*event
)
1903 struct hw_perf_event
*hwc
= &event
->hw
;
1904 unsigned int region
= EVENT_REGION(hwc
->config_base
);
1905 unsigned int group
= EVENT_GROUP(hwc
->config_base
);
1906 bool venum_event
= EVENT_VENUM(hwc
->config_base
);
1907 bool scorpion_event
= EVENT_CPU(hwc
->config_base
);
1909 if (venum_event
|| scorpion_event
) {
1910 /* Ignore invalid events */
1911 if (group
> 3 || region
> 3)
1914 bit
= scorpion_event_to_bit(event
, region
, group
);
1915 if (test_and_set_bit(bit
, cpuc
->used_mask
))
1919 idx
= armv7pmu_get_event_idx(cpuc
, event
);
1920 if (idx
< 0 && bit
>= 0)
1921 clear_bit(bit
, cpuc
->used_mask
);
1926 static void scorpion_pmu_clear_event_idx(struct pmu_hw_events
*cpuc
,
1927 struct perf_event
*event
)
1930 struct hw_perf_event
*hwc
= &event
->hw
;
1931 unsigned int region
= EVENT_REGION(hwc
->config_base
);
1932 unsigned int group
= EVENT_GROUP(hwc
->config_base
);
1933 bool venum_event
= EVENT_VENUM(hwc
->config_base
);
1934 bool scorpion_event
= EVENT_CPU(hwc
->config_base
);
1936 if (venum_event
|| scorpion_event
) {
1937 bit
= scorpion_event_to_bit(event
, region
, group
);
1938 clear_bit(bit
, cpuc
->used_mask
);
1942 static int scorpion_pmu_init(struct arm_pmu
*cpu_pmu
)
1944 armv7pmu_init(cpu_pmu
);
1945 cpu_pmu
->name
= "armv7_scorpion";
1946 cpu_pmu
->map_event
= scorpion_map_event
;
1947 cpu_pmu
->reset
= scorpion_pmu_reset
;
1948 cpu_pmu
->enable
= scorpion_pmu_enable_event
;
1949 cpu_pmu
->disable
= scorpion_pmu_disable_event
;
1950 cpu_pmu
->get_event_idx
= scorpion_pmu_get_event_idx
;
1951 cpu_pmu
->clear_event_idx
= scorpion_pmu_clear_event_idx
;
1952 return armv7_probe_num_events(cpu_pmu
);
1955 static int scorpion_mp_pmu_init(struct arm_pmu
*cpu_pmu
)
1957 armv7pmu_init(cpu_pmu
);
1958 cpu_pmu
->name
= "armv7_scorpion_mp";
1959 cpu_pmu
->map_event
= scorpion_map_event
;
1960 cpu_pmu
->reset
= scorpion_pmu_reset
;
1961 cpu_pmu
->enable
= scorpion_pmu_enable_event
;
1962 cpu_pmu
->disable
= scorpion_pmu_disable_event
;
1963 cpu_pmu
->get_event_idx
= scorpion_pmu_get_event_idx
;
1964 cpu_pmu
->clear_event_idx
= scorpion_pmu_clear_event_idx
;
1965 return armv7_probe_num_events(cpu_pmu
);
1968 static const struct of_device_id armv7_pmu_of_device_ids
[] = {
1969 {.compatible
= "arm,cortex-a17-pmu", .data
= armv7_a17_pmu_init
},
1970 {.compatible
= "arm,cortex-a15-pmu", .data
= armv7_a15_pmu_init
},
1971 {.compatible
= "arm,cortex-a12-pmu", .data
= armv7_a12_pmu_init
},
1972 {.compatible
= "arm,cortex-a9-pmu", .data
= armv7_a9_pmu_init
},
1973 {.compatible
= "arm,cortex-a8-pmu", .data
= armv7_a8_pmu_init
},
1974 {.compatible
= "arm,cortex-a7-pmu", .data
= armv7_a7_pmu_init
},
1975 {.compatible
= "arm,cortex-a5-pmu", .data
= armv7_a5_pmu_init
},
1976 {.compatible
= "qcom,krait-pmu", .data
= krait_pmu_init
},
1977 {.compatible
= "qcom,scorpion-pmu", .data
= scorpion_pmu_init
},
1978 {.compatible
= "qcom,scorpion-mp-pmu", .data
= scorpion_mp_pmu_init
},
1982 static const struct pmu_probe_info armv7_pmu_probe_table
[] = {
1983 ARM_PMU_PROBE(ARM_CPU_PART_CORTEX_A8
, armv7_a8_pmu_init
),
1984 ARM_PMU_PROBE(ARM_CPU_PART_CORTEX_A9
, armv7_a9_pmu_init
),
1985 { /* sentinel value */ }
1989 static int armv7_pmu_device_probe(struct platform_device
*pdev
)
1991 return arm_pmu_device_probe(pdev
, armv7_pmu_of_device_ids
,
1992 armv7_pmu_probe_table
);
1995 static struct platform_driver armv7_pmu_driver
= {
1997 .name
= "armv7-pmu",
1998 .of_match_table
= armv7_pmu_of_device_ids
,
2000 .probe
= armv7_pmu_device_probe
,
2003 static int __init
register_armv7_pmu_driver(void)
2005 return platform_driver_register(&armv7_pmu_driver
);
2007 device_initcall(register_armv7_pmu_driver
);
2008 #endif /* CONFIG_CPU_V7 */