Commit | Line | Data |
---|---|---|
43eab878 WD |
1 | /* |
2 | * ARMv6 Performance counter handling code. | |
3 | * | |
4 | * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles | |
5 | * | |
6 | * ARMv6 has 2 configurable performance counters and a single cycle counter. | |
7 | * They all share a single reset bit but can be written to zero so we can use | |
8 | * that for a reset. | |
9 | * | |
10 | * The counters can't be individually enabled or disabled so when we remove | |
11 | * one event and replace it with another we could get spurious counts from the | |
12 | * wrong event. However, we can take advantage of the fact that the | |
13 | * performance counters can export events to the event bus, and the event bus | |
14 | * itself can be monitored. This requires that we *don't* export the events to | |
15 | * the event bus. The procedure for disabling a configurable counter is: | |
16 | * - change the counter to count the ETMEXTOUT[0] signal (0x20). This | |
17 | * effectively stops the counter from counting. | |
18 | * - disable the counter's interrupt generation (each counter has it's | |
19 | * own interrupt enable bit). | |
20 | * Once stopped, the counter value can be written as 0 to reset. | |
21 | * | |
22 | * To enable a counter: | |
23 | * - enable the counter's interrupt generation. | |
24 | * - set the new event type. | |
25 | * | |
26 | * Note: the dedicated cycle counter only counts cycles and can't be | |
27 | * enabled/disabled independently of the others. When we want to disable the | |
28 | * cycle counter, we have to just disable the interrupt reporting and start | |
29 | * ignoring that counter. When re-enabling, we have to reset the value and | |
30 | * enable the interrupt. | |
31 | */ | |
32 | ||
e399b1a4 | 33 | #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) |
43eab878 WD |
34 | enum armv6_perf_types { |
35 | ARMV6_PERFCTR_ICACHE_MISS = 0x0, | |
36 | ARMV6_PERFCTR_IBUF_STALL = 0x1, | |
37 | ARMV6_PERFCTR_DDEP_STALL = 0x2, | |
38 | ARMV6_PERFCTR_ITLB_MISS = 0x3, | |
39 | ARMV6_PERFCTR_DTLB_MISS = 0x4, | |
40 | ARMV6_PERFCTR_BR_EXEC = 0x5, | |
41 | ARMV6_PERFCTR_BR_MISPREDICT = 0x6, | |
42 | ARMV6_PERFCTR_INSTR_EXEC = 0x7, | |
43 | ARMV6_PERFCTR_DCACHE_HIT = 0x9, | |
44 | ARMV6_PERFCTR_DCACHE_ACCESS = 0xA, | |
45 | ARMV6_PERFCTR_DCACHE_MISS = 0xB, | |
46 | ARMV6_PERFCTR_DCACHE_WBACK = 0xC, | |
47 | ARMV6_PERFCTR_SW_PC_CHANGE = 0xD, | |
48 | ARMV6_PERFCTR_MAIN_TLB_MISS = 0xF, | |
49 | ARMV6_PERFCTR_EXPL_D_ACCESS = 0x10, | |
50 | ARMV6_PERFCTR_LSU_FULL_STALL = 0x11, | |
51 | ARMV6_PERFCTR_WBUF_DRAINED = 0x12, | |
52 | ARMV6_PERFCTR_CPU_CYCLES = 0xFF, | |
53 | ARMV6_PERFCTR_NOP = 0x20, | |
54 | }; | |
55 | ||
56 | enum armv6_counters { | |
d2b41f74 | 57 | ARMV6_CYCLE_COUNTER = 0, |
43eab878 WD |
58 | ARMV6_COUNTER0, |
59 | ARMV6_COUNTER1, | |
60 | }; | |
61 | ||
62 | /* | |
63 | * The hardware events that we support. We do support cache operations but | |
64 | * we have harvard caches and no way to combine instruction and data | |
65 | * accesses/misses in hardware. | |
66 | */ | |
67 | static const unsigned armv6_perf_map[PERF_COUNT_HW_MAX] = { | |
cf20ae8c | 68 | PERF_MAP_ALL_UNSUPPORTED, |
0445e7a5 WD |
69 | [PERF_COUNT_HW_CPU_CYCLES] = ARMV6_PERFCTR_CPU_CYCLES, |
70 | [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6_PERFCTR_INSTR_EXEC, | |
0445e7a5 WD |
71 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6_PERFCTR_BR_EXEC, |
72 | [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6_PERFCTR_BR_MISPREDICT, | |
0445e7a5 WD |
73 | [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV6_PERFCTR_IBUF_STALL, |
74 | [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV6_PERFCTR_LSU_FULL_STALL, | |
43eab878 WD |
75 | }; |
76 | ||
77 | static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | |
78 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
79 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | |
cf20ae8c MR |
80 | PERF_CACHE_MAP_ALL_UNSUPPORTED, |
81 | ||
82 | /* | |
83 | * The performance counters don't differentiate between read and write | |
84 | * accesses/misses so this isn't strictly correct, but it's the best we | |
85 | * can do. Writes and reads get combined. | |
86 | */ | |
87 | [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS, | |
88 | [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS, | |
89 | [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS, | |
90 | [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS, | |
91 | ||
92 | [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS, | |
93 | ||
94 | /* | |
95 | * The ARM performance counters can count micro DTLB misses, micro ITLB | |
96 | * misses and main TLB misses. There isn't an event for TLB misses, so | |
97 | * use the micro misses here and if users want the main TLB misses they | |
98 | * can use a raw counter. | |
99 | */ | |
100 | [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS, | |
101 | [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS, | |
102 | ||
103 | [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS, | |
104 | [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS, | |
43eab878 WD |
105 | }; |
106 | ||
107 | enum armv6mpcore_perf_types { | |
108 | ARMV6MPCORE_PERFCTR_ICACHE_MISS = 0x0, | |
109 | ARMV6MPCORE_PERFCTR_IBUF_STALL = 0x1, | |
110 | ARMV6MPCORE_PERFCTR_DDEP_STALL = 0x2, | |
111 | ARMV6MPCORE_PERFCTR_ITLB_MISS = 0x3, | |
112 | ARMV6MPCORE_PERFCTR_DTLB_MISS = 0x4, | |
113 | ARMV6MPCORE_PERFCTR_BR_EXEC = 0x5, | |
114 | ARMV6MPCORE_PERFCTR_BR_NOTPREDICT = 0x6, | |
115 | ARMV6MPCORE_PERFCTR_BR_MISPREDICT = 0x7, | |
116 | ARMV6MPCORE_PERFCTR_INSTR_EXEC = 0x8, | |
117 | ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS = 0xA, | |
118 | ARMV6MPCORE_PERFCTR_DCACHE_RDMISS = 0xB, | |
119 | ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS = 0xC, | |
120 | ARMV6MPCORE_PERFCTR_DCACHE_WRMISS = 0xD, | |
121 | ARMV6MPCORE_PERFCTR_DCACHE_EVICTION = 0xE, | |
122 | ARMV6MPCORE_PERFCTR_SW_PC_CHANGE = 0xF, | |
123 | ARMV6MPCORE_PERFCTR_MAIN_TLB_MISS = 0x10, | |
124 | ARMV6MPCORE_PERFCTR_EXPL_MEM_ACCESS = 0x11, | |
125 | ARMV6MPCORE_PERFCTR_LSU_FULL_STALL = 0x12, | |
126 | ARMV6MPCORE_PERFCTR_WBUF_DRAINED = 0x13, | |
127 | ARMV6MPCORE_PERFCTR_CPU_CYCLES = 0xFF, | |
128 | }; | |
129 | ||
130 | /* | |
131 | * The hardware events that we support. We do support cache operations but | |
132 | * we have harvard caches and no way to combine instruction and data | |
133 | * accesses/misses in hardware. | |
134 | */ | |
135 | static const unsigned armv6mpcore_perf_map[PERF_COUNT_HW_MAX] = { | |
cf20ae8c | 136 | PERF_MAP_ALL_UNSUPPORTED, |
0445e7a5 WD |
137 | [PERF_COUNT_HW_CPU_CYCLES] = ARMV6MPCORE_PERFCTR_CPU_CYCLES, |
138 | [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_INSTR_EXEC, | |
0445e7a5 WD |
139 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_BR_EXEC, |
140 | [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6MPCORE_PERFCTR_BR_MISPREDICT, | |
0445e7a5 WD |
141 | [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV6MPCORE_PERFCTR_IBUF_STALL, |
142 | [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV6MPCORE_PERFCTR_LSU_FULL_STALL, | |
43eab878 WD |
143 | }; |
144 | ||
145 | static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | |
146 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
147 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | |
cf20ae8c MR |
148 | PERF_CACHE_MAP_ALL_UNSUPPORTED, |
149 | ||
150 | [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS, | |
151 | [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DCACHE_RDMISS, | |
152 | [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS, | |
153 | [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DCACHE_WRMISS, | |
154 | ||
155 | [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS, | |
156 | ||
157 | /* | |
158 | * The ARM performance counters can count micro DTLB misses, micro ITLB | |
159 | * misses and main TLB misses. There isn't an event for TLB misses, so | |
160 | * use the micro misses here and if users want the main TLB misses they | |
161 | * can use a raw counter. | |
162 | */ | |
163 | [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DTLB_MISS, | |
164 | [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DTLB_MISS, | |
165 | ||
166 | [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ITLB_MISS, | |
167 | [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ITLB_MISS, | |
43eab878 WD |
168 | }; |
169 | ||
170 | static inline unsigned long | |
171 | armv6_pmcr_read(void) | |
172 | { | |
173 | u32 val; | |
174 | asm volatile("mrc p15, 0, %0, c15, c12, 0" : "=r"(val)); | |
175 | return val; | |
176 | } | |
177 | ||
178 | static inline void | |
179 | armv6_pmcr_write(unsigned long val) | |
180 | { | |
181 | asm volatile("mcr p15, 0, %0, c15, c12, 0" : : "r"(val)); | |
182 | } | |
183 | ||
184 | #define ARMV6_PMCR_ENABLE (1 << 0) | |
185 | #define ARMV6_PMCR_CTR01_RESET (1 << 1) | |
186 | #define ARMV6_PMCR_CCOUNT_RESET (1 << 2) | |
187 | #define ARMV6_PMCR_CCOUNT_DIV (1 << 3) | |
188 | #define ARMV6_PMCR_COUNT0_IEN (1 << 4) | |
189 | #define ARMV6_PMCR_COUNT1_IEN (1 << 5) | |
190 | #define ARMV6_PMCR_CCOUNT_IEN (1 << 6) | |
191 | #define ARMV6_PMCR_COUNT0_OVERFLOW (1 << 8) | |
192 | #define ARMV6_PMCR_COUNT1_OVERFLOW (1 << 9) | |
193 | #define ARMV6_PMCR_CCOUNT_OVERFLOW (1 << 10) | |
194 | #define ARMV6_PMCR_EVT_COUNT0_SHIFT 20 | |
195 | #define ARMV6_PMCR_EVT_COUNT0_MASK (0xFF << ARMV6_PMCR_EVT_COUNT0_SHIFT) | |
196 | #define ARMV6_PMCR_EVT_COUNT1_SHIFT 12 | |
197 | #define ARMV6_PMCR_EVT_COUNT1_MASK (0xFF << ARMV6_PMCR_EVT_COUNT1_SHIFT) | |
198 | ||
199 | #define ARMV6_PMCR_OVERFLOWED_MASK \ | |
200 | (ARMV6_PMCR_COUNT0_OVERFLOW | ARMV6_PMCR_COUNT1_OVERFLOW | \ | |
201 | ARMV6_PMCR_CCOUNT_OVERFLOW) | |
202 | ||
203 | static inline int | |
204 | armv6_pmcr_has_overflowed(unsigned long pmcr) | |
205 | { | |
206 | return pmcr & ARMV6_PMCR_OVERFLOWED_MASK; | |
207 | } | |
208 | ||
209 | static inline int | |
210 | armv6_pmcr_counter_has_overflowed(unsigned long pmcr, | |
211 | enum armv6_counters counter) | |
212 | { | |
213 | int ret = 0; | |
214 | ||
215 | if (ARMV6_CYCLE_COUNTER == counter) | |
216 | ret = pmcr & ARMV6_PMCR_CCOUNT_OVERFLOW; | |
217 | else if (ARMV6_COUNTER0 == counter) | |
218 | ret = pmcr & ARMV6_PMCR_COUNT0_OVERFLOW; | |
219 | else if (ARMV6_COUNTER1 == counter) | |
220 | ret = pmcr & ARMV6_PMCR_COUNT1_OVERFLOW; | |
221 | else | |
222 | WARN_ONCE(1, "invalid counter number (%d)\n", counter); | |
223 | ||
224 | return ret; | |
225 | } | |
226 | ||
ed6f2a52 | 227 | static inline u32 armv6pmu_read_counter(struct perf_event *event) |
43eab878 | 228 | { |
ed6f2a52 SK |
229 | struct hw_perf_event *hwc = &event->hw; |
230 | int counter = hwc->idx; | |
43eab878 WD |
231 | unsigned long value = 0; |
232 | ||
233 | if (ARMV6_CYCLE_COUNTER == counter) | |
234 | asm volatile("mrc p15, 0, %0, c15, c12, 1" : "=r"(value)); | |
235 | else if (ARMV6_COUNTER0 == counter) | |
236 | asm volatile("mrc p15, 0, %0, c15, c12, 2" : "=r"(value)); | |
237 | else if (ARMV6_COUNTER1 == counter) | |
238 | asm volatile("mrc p15, 0, %0, c15, c12, 3" : "=r"(value)); | |
239 | else | |
240 | WARN_ONCE(1, "invalid counter number (%d)\n", counter); | |
241 | ||
242 | return value; | |
243 | } | |
244 | ||
ed6f2a52 | 245 | static inline void armv6pmu_write_counter(struct perf_event *event, u32 value) |
43eab878 | 246 | { |
ed6f2a52 SK |
247 | struct hw_perf_event *hwc = &event->hw; |
248 | int counter = hwc->idx; | |
249 | ||
43eab878 WD |
250 | if (ARMV6_CYCLE_COUNTER == counter) |
251 | asm volatile("mcr p15, 0, %0, c15, c12, 1" : : "r"(value)); | |
252 | else if (ARMV6_COUNTER0 == counter) | |
253 | asm volatile("mcr p15, 0, %0, c15, c12, 2" : : "r"(value)); | |
254 | else if (ARMV6_COUNTER1 == counter) | |
255 | asm volatile("mcr p15, 0, %0, c15, c12, 3" : : "r"(value)); | |
256 | else | |
257 | WARN_ONCE(1, "invalid counter number (%d)\n", counter); | |
258 | } | |
259 | ||
ed6f2a52 | 260 | static void armv6pmu_enable_event(struct perf_event *event) |
43eab878 WD |
261 | { |
262 | unsigned long val, mask, evt, flags; | |
ed6f2a52 SK |
263 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); |
264 | struct hw_perf_event *hwc = &event->hw; | |
8be3f9a2 | 265 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
ed6f2a52 | 266 | int idx = hwc->idx; |
43eab878 WD |
267 | |
268 | if (ARMV6_CYCLE_COUNTER == idx) { | |
269 | mask = 0; | |
270 | evt = ARMV6_PMCR_CCOUNT_IEN; | |
271 | } else if (ARMV6_COUNTER0 == idx) { | |
272 | mask = ARMV6_PMCR_EVT_COUNT0_MASK; | |
273 | evt = (hwc->config_base << ARMV6_PMCR_EVT_COUNT0_SHIFT) | | |
274 | ARMV6_PMCR_COUNT0_IEN; | |
275 | } else if (ARMV6_COUNTER1 == idx) { | |
276 | mask = ARMV6_PMCR_EVT_COUNT1_MASK; | |
277 | evt = (hwc->config_base << ARMV6_PMCR_EVT_COUNT1_SHIFT) | | |
278 | ARMV6_PMCR_COUNT1_IEN; | |
279 | } else { | |
280 | WARN_ONCE(1, "invalid counter number (%d)\n", idx); | |
281 | return; | |
282 | } | |
283 | ||
284 | /* | |
285 | * Mask out the current event and set the counter to count the event | |
286 | * that we're interested in. | |
287 | */ | |
0f78d2d5 | 288 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
43eab878 WD |
289 | val = armv6_pmcr_read(); |
290 | val &= ~mask; | |
291 | val |= evt; | |
292 | armv6_pmcr_write(val); | |
0f78d2d5 | 293 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
43eab878 WD |
294 | } |
295 | ||
296 | static irqreturn_t | |
297 | armv6pmu_handle_irq(int irq_num, | |
298 | void *dev) | |
299 | { | |
300 | unsigned long pmcr = armv6_pmcr_read(); | |
301 | struct perf_sample_data data; | |
ed6f2a52 SK |
302 | struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev; |
303 | struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events(); | |
43eab878 WD |
304 | struct pt_regs *regs; |
305 | int idx; | |
306 | ||
307 | if (!armv6_pmcr_has_overflowed(pmcr)) | |
308 | return IRQ_NONE; | |
309 | ||
310 | regs = get_irq_regs(); | |
311 | ||
312 | /* | |
313 | * The interrupts are cleared by writing the overflow flags back to | |
314 | * the control register. All of the other bits don't have any effect | |
315 | * if they are rewritten, so write the whole value back. | |
316 | */ | |
317 | armv6_pmcr_write(pmcr); | |
318 | ||
8be3f9a2 | 319 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { |
43eab878 WD |
320 | struct perf_event *event = cpuc->events[idx]; |
321 | struct hw_perf_event *hwc; | |
322 | ||
f6f5a30c WD |
323 | /* Ignore if we don't have an event. */ |
324 | if (!event) | |
43eab878 WD |
325 | continue; |
326 | ||
327 | /* | |
328 | * We have a single interrupt for all counters. Check that | |
329 | * each counter has overflowed before we process it. | |
330 | */ | |
331 | if (!armv6_pmcr_counter_has_overflowed(pmcr, idx)) | |
332 | continue; | |
333 | ||
334 | hwc = &event->hw; | |
ed6f2a52 | 335 | armpmu_event_update(event); |
fd0d000b | 336 | perf_sample_data_init(&data, 0, hwc->last_period); |
ed6f2a52 | 337 | if (!armpmu_event_set_period(event)) |
43eab878 WD |
338 | continue; |
339 | ||
a8b0ca17 | 340 | if (perf_event_overflow(event, &data, regs)) |
ed6f2a52 | 341 | cpu_pmu->disable(event); |
43eab878 WD |
342 | } |
343 | ||
344 | /* | |
345 | * Handle the pending perf events. | |
346 | * | |
347 | * Note: this call *must* be run with interrupts disabled. For | |
348 | * platforms that can have the PMU interrupts raised as an NMI, this | |
349 | * will not work. | |
350 | */ | |
351 | irq_work_run(); | |
352 | ||
353 | return IRQ_HANDLED; | |
354 | } | |
355 | ||
ed6f2a52 | 356 | static void armv6pmu_start(struct arm_pmu *cpu_pmu) |
43eab878 WD |
357 | { |
358 | unsigned long flags, val; | |
8be3f9a2 | 359 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
43eab878 | 360 | |
0f78d2d5 | 361 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
43eab878 WD |
362 | val = armv6_pmcr_read(); |
363 | val |= ARMV6_PMCR_ENABLE; | |
364 | armv6_pmcr_write(val); | |
0f78d2d5 | 365 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
43eab878 WD |
366 | } |
367 | ||
ed6f2a52 | 368 | static void armv6pmu_stop(struct arm_pmu *cpu_pmu) |
43eab878 WD |
369 | { |
370 | unsigned long flags, val; | |
8be3f9a2 | 371 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
43eab878 | 372 | |
0f78d2d5 | 373 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
43eab878 WD |
374 | val = armv6_pmcr_read(); |
375 | val &= ~ARMV6_PMCR_ENABLE; | |
376 | armv6_pmcr_write(val); | |
0f78d2d5 | 377 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
43eab878 WD |
378 | } |
379 | ||
380 | static int | |
8be3f9a2 | 381 | armv6pmu_get_event_idx(struct pmu_hw_events *cpuc, |
ed6f2a52 | 382 | struct perf_event *event) |
43eab878 | 383 | { |
ed6f2a52 | 384 | struct hw_perf_event *hwc = &event->hw; |
43eab878 | 385 | /* Always place a cycle counter into the cycle counter. */ |
ed6f2a52 | 386 | if (ARMV6_PERFCTR_CPU_CYCLES == hwc->config_base) { |
43eab878 WD |
387 | if (test_and_set_bit(ARMV6_CYCLE_COUNTER, cpuc->used_mask)) |
388 | return -EAGAIN; | |
389 | ||
390 | return ARMV6_CYCLE_COUNTER; | |
391 | } else { | |
392 | /* | |
393 | * For anything other than a cycle counter, try and use | |
394 | * counter0 and counter1. | |
395 | */ | |
396 | if (!test_and_set_bit(ARMV6_COUNTER1, cpuc->used_mask)) | |
397 | return ARMV6_COUNTER1; | |
398 | ||
399 | if (!test_and_set_bit(ARMV6_COUNTER0, cpuc->used_mask)) | |
400 | return ARMV6_COUNTER0; | |
401 | ||
402 | /* The counters are all in use. */ | |
403 | return -EAGAIN; | |
404 | } | |
405 | } | |
406 | ||
ed6f2a52 | 407 | static void armv6pmu_disable_event(struct perf_event *event) |
43eab878 WD |
408 | { |
409 | unsigned long val, mask, evt, flags; | |
ed6f2a52 SK |
410 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); |
411 | struct hw_perf_event *hwc = &event->hw; | |
8be3f9a2 | 412 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
ed6f2a52 | 413 | int idx = hwc->idx; |
43eab878 WD |
414 | |
415 | if (ARMV6_CYCLE_COUNTER == idx) { | |
416 | mask = ARMV6_PMCR_CCOUNT_IEN; | |
417 | evt = 0; | |
418 | } else if (ARMV6_COUNTER0 == idx) { | |
419 | mask = ARMV6_PMCR_COUNT0_IEN | ARMV6_PMCR_EVT_COUNT0_MASK; | |
420 | evt = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT0_SHIFT; | |
421 | } else if (ARMV6_COUNTER1 == idx) { | |
422 | mask = ARMV6_PMCR_COUNT1_IEN | ARMV6_PMCR_EVT_COUNT1_MASK; | |
423 | evt = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT1_SHIFT; | |
424 | } else { | |
425 | WARN_ONCE(1, "invalid counter number (%d)\n", idx); | |
426 | return; | |
427 | } | |
428 | ||
429 | /* | |
430 | * Mask out the current event and set the counter to count the number | |
431 | * of ETM bus signal assertion cycles. The external reporting should | |
432 | * be disabled and so this should never increment. | |
433 | */ | |
0f78d2d5 | 434 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
43eab878 WD |
435 | val = armv6_pmcr_read(); |
436 | val &= ~mask; | |
437 | val |= evt; | |
438 | armv6_pmcr_write(val); | |
0f78d2d5 | 439 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
43eab878 WD |
440 | } |
441 | ||
ed6f2a52 | 442 | static void armv6mpcore_pmu_disable_event(struct perf_event *event) |
43eab878 WD |
443 | { |
444 | unsigned long val, mask, flags, evt = 0; | |
ed6f2a52 SK |
445 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); |
446 | struct hw_perf_event *hwc = &event->hw; | |
8be3f9a2 | 447 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
ed6f2a52 | 448 | int idx = hwc->idx; |
43eab878 WD |
449 | |
450 | if (ARMV6_CYCLE_COUNTER == idx) { | |
451 | mask = ARMV6_PMCR_CCOUNT_IEN; | |
452 | } else if (ARMV6_COUNTER0 == idx) { | |
453 | mask = ARMV6_PMCR_COUNT0_IEN; | |
454 | } else if (ARMV6_COUNTER1 == idx) { | |
455 | mask = ARMV6_PMCR_COUNT1_IEN; | |
456 | } else { | |
457 | WARN_ONCE(1, "invalid counter number (%d)\n", idx); | |
458 | return; | |
459 | } | |
460 | ||
461 | /* | |
462 | * Unlike UP ARMv6, we don't have a way of stopping the counters. We | |
463 | * simply disable the interrupt reporting. | |
464 | */ | |
0f78d2d5 | 465 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
43eab878 WD |
466 | val = armv6_pmcr_read(); |
467 | val &= ~mask; | |
468 | val |= evt; | |
469 | armv6_pmcr_write(val); | |
0f78d2d5 | 470 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
43eab878 WD |
471 | } |
472 | ||
e1f431b5 MR |
473 | static int armv6_map_event(struct perf_event *event) |
474 | { | |
6dbc0029 | 475 | return armpmu_map_event(event, &armv6_perf_map, |
e1f431b5 MR |
476 | &armv6_perf_cache_map, 0xFF); |
477 | } | |
478 | ||
3d1ff755 | 479 | static void armv6pmu_init(struct arm_pmu *cpu_pmu) |
43eab878 | 480 | { |
513c99ce SK |
481 | cpu_pmu->handle_irq = armv6pmu_handle_irq; |
482 | cpu_pmu->enable = armv6pmu_enable_event; | |
483 | cpu_pmu->disable = armv6pmu_disable_event; | |
484 | cpu_pmu->read_counter = armv6pmu_read_counter; | |
485 | cpu_pmu->write_counter = armv6pmu_write_counter; | |
486 | cpu_pmu->get_event_idx = armv6pmu_get_event_idx; | |
487 | cpu_pmu->start = armv6pmu_start; | |
488 | cpu_pmu->stop = armv6pmu_stop; | |
489 | cpu_pmu->map_event = armv6_map_event; | |
490 | cpu_pmu->num_events = 3; | |
491 | cpu_pmu->max_period = (1LLU << 32) - 1; | |
3d1ff755 MR |
492 | } |
493 | ||
494 | static int armv6_1136_pmu_init(struct arm_pmu *cpu_pmu) | |
495 | { | |
496 | armv6pmu_init(cpu_pmu); | |
497 | cpu_pmu->name = "armv6_1136"; | |
498 | return 0; | |
499 | } | |
500 | ||
501 | static int armv6_1156_pmu_init(struct arm_pmu *cpu_pmu) | |
502 | { | |
503 | armv6pmu_init(cpu_pmu); | |
504 | cpu_pmu->name = "armv6_1156"; | |
505 | return 0; | |
506 | } | |
513c99ce | 507 | |
3d1ff755 MR |
508 | static int armv6_1176_pmu_init(struct arm_pmu *cpu_pmu) |
509 | { | |
510 | armv6pmu_init(cpu_pmu); | |
511 | cpu_pmu->name = "armv6_1176"; | |
513c99ce | 512 | return 0; |
43eab878 WD |
513 | } |
514 | ||
515 | /* | |
516 | * ARMv6mpcore is almost identical to single core ARMv6 with the exception | |
517 | * that some of the events have different enumerations and that there is no | |
518 | * *hack* to stop the programmable counters. To stop the counters we simply | |
519 | * disable the interrupt reporting and update the event. When unthrottling we | |
520 | * reset the period and enable the interrupt reporting. | |
521 | */ | |
e1f431b5 MR |
522 | |
523 | static int armv6mpcore_map_event(struct perf_event *event) | |
524 | { | |
6dbc0029 | 525 | return armpmu_map_event(event, &armv6mpcore_perf_map, |
e1f431b5 MR |
526 | &armv6mpcore_perf_cache_map, 0xFF); |
527 | } | |
528 | ||
351a102d | 529 | static int armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu) |
43eab878 | 530 | { |
3d1ff755 | 531 | cpu_pmu->name = "armv6_11mpcore"; |
513c99ce SK |
532 | cpu_pmu->handle_irq = armv6pmu_handle_irq; |
533 | cpu_pmu->enable = armv6pmu_enable_event; | |
534 | cpu_pmu->disable = armv6mpcore_pmu_disable_event; | |
535 | cpu_pmu->read_counter = armv6pmu_read_counter; | |
536 | cpu_pmu->write_counter = armv6pmu_write_counter; | |
537 | cpu_pmu->get_event_idx = armv6pmu_get_event_idx; | |
538 | cpu_pmu->start = armv6pmu_start; | |
539 | cpu_pmu->stop = armv6pmu_stop; | |
540 | cpu_pmu->map_event = armv6mpcore_map_event; | |
541 | cpu_pmu->num_events = 3; | |
542 | cpu_pmu->max_period = (1LLU << 32) - 1; | |
543 | ||
544 | return 0; | |
43eab878 WD |
545 | } |
546 | #else | |
3d1ff755 MR |
547 | static int armv6_1136_pmu_init(struct arm_pmu *cpu_pmu) |
548 | { | |
549 | return -ENODEV; | |
550 | } | |
551 | ||
552 | static int armv6_1156_pmu_init(struct arm_pmu *cpu_pmu) | |
553 | { | |
554 | return -ENODEV; | |
555 | } | |
556 | ||
557 | static int armv6_1176_pmu_init(struct arm_pmu *cpu_pmu) | |
43eab878 | 558 | { |
513c99ce | 559 | return -ENODEV; |
43eab878 WD |
560 | } |
561 | ||
513c99ce | 562 | static int armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu) |
43eab878 | 563 | { |
513c99ce | 564 | return -ENODEV; |
43eab878 | 565 | } |
e399b1a4 | 566 | #endif /* CONFIG_CPU_V6 || CONFIG_CPU_V6K */ |