Commit | Line | Data |
---|---|---|
43eab878 WD |
1 | /* |
2 | * ARMv5 [xscale] Performance counter handling code. | |
3 | * | |
4 | * Copyright (C) 2010, ARM Ltd., Will Deacon <will.deacon@arm.com> | |
5 | * | |
6 | * Based on the previous xscale OProfile code. | |
7 | * | |
8 | * There are two variants of the xscale PMU that we support: | |
9 | * - xscale1pmu: 2 event counters and a cycle counter | |
10 | * - xscale2pmu: 4 event counters and a cycle counter | |
11 | * The two variants share event definitions, but have different | |
12 | * PMU structures. | |
13 | */ | |
14 | ||
15 | #ifdef CONFIG_CPU_XSCALE | |
16 | enum xscale_perf_types { | |
17 | XSCALE_PERFCTR_ICACHE_MISS = 0x00, | |
18 | XSCALE_PERFCTR_ICACHE_NO_DELIVER = 0x01, | |
19 | XSCALE_PERFCTR_DATA_STALL = 0x02, | |
20 | XSCALE_PERFCTR_ITLB_MISS = 0x03, | |
21 | XSCALE_PERFCTR_DTLB_MISS = 0x04, | |
22 | XSCALE_PERFCTR_BRANCH = 0x05, | |
23 | XSCALE_PERFCTR_BRANCH_MISS = 0x06, | |
24 | XSCALE_PERFCTR_INSTRUCTION = 0x07, | |
25 | XSCALE_PERFCTR_DCACHE_FULL_STALL = 0x08, | |
26 | XSCALE_PERFCTR_DCACHE_FULL_STALL_CONTIG = 0x09, | |
27 | XSCALE_PERFCTR_DCACHE_ACCESS = 0x0A, | |
28 | XSCALE_PERFCTR_DCACHE_MISS = 0x0B, | |
29 | XSCALE_PERFCTR_DCACHE_WRITE_BACK = 0x0C, | |
30 | XSCALE_PERFCTR_PC_CHANGED = 0x0D, | |
31 | XSCALE_PERFCTR_BCU_REQUEST = 0x10, | |
32 | XSCALE_PERFCTR_BCU_FULL = 0x11, | |
33 | XSCALE_PERFCTR_BCU_DRAIN = 0x12, | |
34 | XSCALE_PERFCTR_BCU_ECC_NO_ELOG = 0x14, | |
35 | XSCALE_PERFCTR_BCU_1_BIT_ERR = 0x15, | |
36 | XSCALE_PERFCTR_RMW = 0x16, | |
37 | /* XSCALE_PERFCTR_CCNT is not hardware defined */ | |
38 | XSCALE_PERFCTR_CCNT = 0xFE, | |
39 | XSCALE_PERFCTR_UNUSED = 0xFF, | |
40 | }; | |
41 | ||
42 | enum xscale_counters { | |
d2b41f74 | 43 | XSCALE_CYCLE_COUNTER = 0, |
43eab878 WD |
44 | XSCALE_COUNTER0, |
45 | XSCALE_COUNTER1, | |
46 | XSCALE_COUNTER2, | |
47 | XSCALE_COUNTER3, | |
48 | }; | |
49 | ||
50 | static const unsigned xscale_perf_map[PERF_COUNT_HW_MAX] = { | |
f929f575 | 51 | PERF_MAP_ALL_UNSUPPORTED, |
0445e7a5 WD |
52 | [PERF_COUNT_HW_CPU_CYCLES] = XSCALE_PERFCTR_CCNT, |
53 | [PERF_COUNT_HW_INSTRUCTIONS] = XSCALE_PERFCTR_INSTRUCTION, | |
0445e7a5 WD |
54 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = XSCALE_PERFCTR_BRANCH, |
55 | [PERF_COUNT_HW_BRANCH_MISSES] = XSCALE_PERFCTR_BRANCH_MISS, | |
0445e7a5 | 56 | [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = XSCALE_PERFCTR_ICACHE_NO_DELIVER, |
43eab878 WD |
57 | }; |
58 | ||
59 | static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | |
60 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
61 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | |
f929f575 MR |
62 | PERF_CACHE_MAP_ALL_UNSUPPORTED, |
63 | ||
64 | [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS, | |
65 | [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = XSCALE_PERFCTR_DCACHE_MISS, | |
66 | [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS, | |
67 | [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = XSCALE_PERFCTR_DCACHE_MISS, | |
68 | ||
69 | [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = XSCALE_PERFCTR_ICACHE_MISS, | |
70 | ||
71 | [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = XSCALE_PERFCTR_DTLB_MISS, | |
72 | [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = XSCALE_PERFCTR_DTLB_MISS, | |
73 | ||
74 | [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = XSCALE_PERFCTR_ITLB_MISS, | |
75 | [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = XSCALE_PERFCTR_ITLB_MISS, | |
43eab878 WD |
76 | }; |
77 | ||
78 | #define XSCALE_PMU_ENABLE 0x001 | |
79 | #define XSCALE_PMN_RESET 0x002 | |
80 | #define XSCALE_CCNT_RESET 0x004 | |
81 | #define XSCALE_PMU_RESET (CCNT_RESET | PMN_RESET) | |
82 | #define XSCALE_PMU_CNT64 0x008 | |
83 | ||
84 | #define XSCALE1_OVERFLOWED_MASK 0x700 | |
85 | #define XSCALE1_CCOUNT_OVERFLOW 0x400 | |
86 | #define XSCALE1_COUNT0_OVERFLOW 0x100 | |
87 | #define XSCALE1_COUNT1_OVERFLOW 0x200 | |
88 | #define XSCALE1_CCOUNT_INT_EN 0x040 | |
89 | #define XSCALE1_COUNT0_INT_EN 0x010 | |
90 | #define XSCALE1_COUNT1_INT_EN 0x020 | |
91 | #define XSCALE1_COUNT0_EVT_SHFT 12 | |
92 | #define XSCALE1_COUNT0_EVT_MASK (0xff << XSCALE1_COUNT0_EVT_SHFT) | |
93 | #define XSCALE1_COUNT1_EVT_SHFT 20 | |
94 | #define XSCALE1_COUNT1_EVT_MASK (0xff << XSCALE1_COUNT1_EVT_SHFT) | |
95 | ||
96 | static inline u32 | |
97 | xscale1pmu_read_pmnc(void) | |
98 | { | |
99 | u32 val; | |
100 | asm volatile("mrc p14, 0, %0, c0, c0, 0" : "=r" (val)); | |
101 | return val; | |
102 | } | |
103 | ||
104 | static inline void | |
105 | xscale1pmu_write_pmnc(u32 val) | |
106 | { | |
107 | /* upper 4bits and 7, 11 are write-as-0 */ | |
108 | val &= 0xffff77f; | |
109 | asm volatile("mcr p14, 0, %0, c0, c0, 0" : : "r" (val)); | |
110 | } | |
111 | ||
112 | static inline int | |
113 | xscale1_pmnc_counter_has_overflowed(unsigned long pmnc, | |
114 | enum xscale_counters counter) | |
115 | { | |
116 | int ret = 0; | |
117 | ||
118 | switch (counter) { | |
119 | case XSCALE_CYCLE_COUNTER: | |
120 | ret = pmnc & XSCALE1_CCOUNT_OVERFLOW; | |
121 | break; | |
122 | case XSCALE_COUNTER0: | |
123 | ret = pmnc & XSCALE1_COUNT0_OVERFLOW; | |
124 | break; | |
125 | case XSCALE_COUNTER1: | |
126 | ret = pmnc & XSCALE1_COUNT1_OVERFLOW; | |
127 | break; | |
128 | default: | |
129 | WARN_ONCE(1, "invalid counter number (%d)\n", counter); | |
130 | } | |
131 | ||
132 | return ret; | |
133 | } | |
134 | ||
135 | static irqreturn_t | |
136 | xscale1pmu_handle_irq(int irq_num, void *dev) | |
137 | { | |
138 | unsigned long pmnc; | |
139 | struct perf_sample_data data; | |
ed6f2a52 SK |
140 | struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev; |
141 | struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events(); | |
43eab878 WD |
142 | struct pt_regs *regs; |
143 | int idx; | |
144 | ||
145 | /* | |
146 | * NOTE: there's an A stepping erratum that states if an overflow | |
147 | * bit already exists and another occurs, the previous | |
148 | * Overflow bit gets cleared. There's no workaround. | |
149 | * Fixed in B stepping or later. | |
150 | */ | |
151 | pmnc = xscale1pmu_read_pmnc(); | |
152 | ||
153 | /* | |
154 | * Write the value back to clear the overflow flags. Overflow | |
155 | * flags remain in pmnc for use below. We also disable the PMU | |
156 | * while we process the interrupt. | |
157 | */ | |
158 | xscale1pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE); | |
159 | ||
160 | if (!(pmnc & XSCALE1_OVERFLOWED_MASK)) | |
161 | return IRQ_NONE; | |
162 | ||
163 | regs = get_irq_regs(); | |
164 | ||
8be3f9a2 | 165 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { |
43eab878 WD |
166 | struct perf_event *event = cpuc->events[idx]; |
167 | struct hw_perf_event *hwc; | |
168 | ||
f6f5a30c WD |
169 | if (!event) |
170 | continue; | |
171 | ||
43eab878 WD |
172 | if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx)) |
173 | continue; | |
174 | ||
175 | hwc = &event->hw; | |
ed6f2a52 | 176 | armpmu_event_update(event); |
fd0d000b | 177 | perf_sample_data_init(&data, 0, hwc->last_period); |
ed6f2a52 | 178 | if (!armpmu_event_set_period(event)) |
43eab878 WD |
179 | continue; |
180 | ||
a8b0ca17 | 181 | if (perf_event_overflow(event, &data, regs)) |
ed6f2a52 | 182 | cpu_pmu->disable(event); |
43eab878 WD |
183 | } |
184 | ||
185 | irq_work_run(); | |
186 | ||
187 | /* | |
188 | * Re-enable the PMU. | |
189 | */ | |
190 | pmnc = xscale1pmu_read_pmnc() | XSCALE_PMU_ENABLE; | |
191 | xscale1pmu_write_pmnc(pmnc); | |
192 | ||
193 | return IRQ_HANDLED; | |
194 | } | |
195 | ||
ed6f2a52 | 196 | static void xscale1pmu_enable_event(struct perf_event *event) |
43eab878 WD |
197 | { |
198 | unsigned long val, mask, evt, flags; | |
ed6f2a52 SK |
199 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); |
200 | struct hw_perf_event *hwc = &event->hw; | |
8be3f9a2 | 201 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
ed6f2a52 | 202 | int idx = hwc->idx; |
43eab878 WD |
203 | |
204 | switch (idx) { | |
205 | case XSCALE_CYCLE_COUNTER: | |
206 | mask = 0; | |
207 | evt = XSCALE1_CCOUNT_INT_EN; | |
208 | break; | |
209 | case XSCALE_COUNTER0: | |
210 | mask = XSCALE1_COUNT0_EVT_MASK; | |
211 | evt = (hwc->config_base << XSCALE1_COUNT0_EVT_SHFT) | | |
212 | XSCALE1_COUNT0_INT_EN; | |
213 | break; | |
214 | case XSCALE_COUNTER1: | |
215 | mask = XSCALE1_COUNT1_EVT_MASK; | |
216 | evt = (hwc->config_base << XSCALE1_COUNT1_EVT_SHFT) | | |
217 | XSCALE1_COUNT1_INT_EN; | |
218 | break; | |
219 | default: | |
220 | WARN_ONCE(1, "invalid counter number (%d)\n", idx); | |
221 | return; | |
222 | } | |
223 | ||
0f78d2d5 | 224 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
43eab878 WD |
225 | val = xscale1pmu_read_pmnc(); |
226 | val &= ~mask; | |
227 | val |= evt; | |
228 | xscale1pmu_write_pmnc(val); | |
0f78d2d5 | 229 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
43eab878 WD |
230 | } |
231 | ||
ed6f2a52 | 232 | static void xscale1pmu_disable_event(struct perf_event *event) |
43eab878 WD |
233 | { |
234 | unsigned long val, mask, evt, flags; | |
ed6f2a52 SK |
235 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); |
236 | struct hw_perf_event *hwc = &event->hw; | |
8be3f9a2 | 237 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
ed6f2a52 | 238 | int idx = hwc->idx; |
43eab878 WD |
239 | |
240 | switch (idx) { | |
241 | case XSCALE_CYCLE_COUNTER: | |
242 | mask = XSCALE1_CCOUNT_INT_EN; | |
243 | evt = 0; | |
244 | break; | |
245 | case XSCALE_COUNTER0: | |
246 | mask = XSCALE1_COUNT0_INT_EN | XSCALE1_COUNT0_EVT_MASK; | |
247 | evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT0_EVT_SHFT; | |
248 | break; | |
249 | case XSCALE_COUNTER1: | |
250 | mask = XSCALE1_COUNT1_INT_EN | XSCALE1_COUNT1_EVT_MASK; | |
251 | evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT1_EVT_SHFT; | |
252 | break; | |
253 | default: | |
254 | WARN_ONCE(1, "invalid counter number (%d)\n", idx); | |
255 | return; | |
256 | } | |
257 | ||
0f78d2d5 | 258 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
43eab878 WD |
259 | val = xscale1pmu_read_pmnc(); |
260 | val &= ~mask; | |
261 | val |= evt; | |
262 | xscale1pmu_write_pmnc(val); | |
0f78d2d5 | 263 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
43eab878 WD |
264 | } |
265 | ||
266 | static int | |
8be3f9a2 | 267 | xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc, |
ed6f2a52 | 268 | struct perf_event *event) |
43eab878 | 269 | { |
ed6f2a52 SK |
270 | struct hw_perf_event *hwc = &event->hw; |
271 | if (XSCALE_PERFCTR_CCNT == hwc->config_base) { | |
43eab878 WD |
272 | if (test_and_set_bit(XSCALE_CYCLE_COUNTER, cpuc->used_mask)) |
273 | return -EAGAIN; | |
274 | ||
275 | return XSCALE_CYCLE_COUNTER; | |
276 | } else { | |
277 | if (!test_and_set_bit(XSCALE_COUNTER1, cpuc->used_mask)) | |
278 | return XSCALE_COUNTER1; | |
279 | ||
280 | if (!test_and_set_bit(XSCALE_COUNTER0, cpuc->used_mask)) | |
281 | return XSCALE_COUNTER0; | |
282 | ||
283 | return -EAGAIN; | |
284 | } | |
285 | } | |
286 | ||
ed6f2a52 | 287 | static void xscale1pmu_start(struct arm_pmu *cpu_pmu) |
43eab878 WD |
288 | { |
289 | unsigned long flags, val; | |
8be3f9a2 | 290 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
43eab878 | 291 | |
0f78d2d5 | 292 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
43eab878 WD |
293 | val = xscale1pmu_read_pmnc(); |
294 | val |= XSCALE_PMU_ENABLE; | |
295 | xscale1pmu_write_pmnc(val); | |
0f78d2d5 | 296 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
43eab878 WD |
297 | } |
298 | ||
ed6f2a52 | 299 | static void xscale1pmu_stop(struct arm_pmu *cpu_pmu) |
43eab878 WD |
300 | { |
301 | unsigned long flags, val; | |
8be3f9a2 | 302 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
43eab878 | 303 | |
0f78d2d5 | 304 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
43eab878 WD |
305 | val = xscale1pmu_read_pmnc(); |
306 | val &= ~XSCALE_PMU_ENABLE; | |
307 | xscale1pmu_write_pmnc(val); | |
0f78d2d5 | 308 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
43eab878 WD |
309 | } |
310 | ||
ed6f2a52 | 311 | static inline u32 xscale1pmu_read_counter(struct perf_event *event) |
43eab878 | 312 | { |
ed6f2a52 SK |
313 | struct hw_perf_event *hwc = &event->hw; |
314 | int counter = hwc->idx; | |
43eab878 WD |
315 | u32 val = 0; |
316 | ||
317 | switch (counter) { | |
318 | case XSCALE_CYCLE_COUNTER: | |
319 | asm volatile("mrc p14, 0, %0, c1, c0, 0" : "=r" (val)); | |
320 | break; | |
321 | case XSCALE_COUNTER0: | |
322 | asm volatile("mrc p14, 0, %0, c2, c0, 0" : "=r" (val)); | |
323 | break; | |
324 | case XSCALE_COUNTER1: | |
325 | asm volatile("mrc p14, 0, %0, c3, c0, 0" : "=r" (val)); | |
326 | break; | |
327 | } | |
328 | ||
329 | return val; | |
330 | } | |
331 | ||
ed6f2a52 | 332 | static inline void xscale1pmu_write_counter(struct perf_event *event, u32 val) |
43eab878 | 333 | { |
ed6f2a52 SK |
334 | struct hw_perf_event *hwc = &event->hw; |
335 | int counter = hwc->idx; | |
336 | ||
43eab878 WD |
337 | switch (counter) { |
338 | case XSCALE_CYCLE_COUNTER: | |
339 | asm volatile("mcr p14, 0, %0, c1, c0, 0" : : "r" (val)); | |
340 | break; | |
341 | case XSCALE_COUNTER0: | |
342 | asm volatile("mcr p14, 0, %0, c2, c0, 0" : : "r" (val)); | |
343 | break; | |
344 | case XSCALE_COUNTER1: | |
345 | asm volatile("mcr p14, 0, %0, c3, c0, 0" : : "r" (val)); | |
346 | break; | |
347 | } | |
348 | } | |
349 | ||
e1f431b5 MR |
350 | static int xscale_map_event(struct perf_event *event) |
351 | { | |
6dbc0029 | 352 | return armpmu_map_event(event, &xscale_perf_map, |
e1f431b5 MR |
353 | &xscale_perf_cache_map, 0xFF); |
354 | } | |
355 | ||
351a102d | 356 | static int xscale1pmu_init(struct arm_pmu *cpu_pmu) |
43eab878 | 357 | { |
3d1ff755 | 358 | cpu_pmu->name = "armv5_xscale1"; |
513c99ce SK |
359 | cpu_pmu->handle_irq = xscale1pmu_handle_irq; |
360 | cpu_pmu->enable = xscale1pmu_enable_event; | |
361 | cpu_pmu->disable = xscale1pmu_disable_event; | |
362 | cpu_pmu->read_counter = xscale1pmu_read_counter; | |
363 | cpu_pmu->write_counter = xscale1pmu_write_counter; | |
364 | cpu_pmu->get_event_idx = xscale1pmu_get_event_idx; | |
365 | cpu_pmu->start = xscale1pmu_start; | |
366 | cpu_pmu->stop = xscale1pmu_stop; | |
367 | cpu_pmu->map_event = xscale_map_event; | |
368 | cpu_pmu->num_events = 3; | |
369 | cpu_pmu->max_period = (1LLU << 32) - 1; | |
370 | ||
371 | return 0; | |
43eab878 WD |
372 | } |
373 | ||
374 | #define XSCALE2_OVERFLOWED_MASK 0x01f | |
375 | #define XSCALE2_CCOUNT_OVERFLOW 0x001 | |
376 | #define XSCALE2_COUNT0_OVERFLOW 0x002 | |
377 | #define XSCALE2_COUNT1_OVERFLOW 0x004 | |
378 | #define XSCALE2_COUNT2_OVERFLOW 0x008 | |
379 | #define XSCALE2_COUNT3_OVERFLOW 0x010 | |
380 | #define XSCALE2_CCOUNT_INT_EN 0x001 | |
381 | #define XSCALE2_COUNT0_INT_EN 0x002 | |
382 | #define XSCALE2_COUNT1_INT_EN 0x004 | |
383 | #define XSCALE2_COUNT2_INT_EN 0x008 | |
384 | #define XSCALE2_COUNT3_INT_EN 0x010 | |
385 | #define XSCALE2_COUNT0_EVT_SHFT 0 | |
386 | #define XSCALE2_COUNT0_EVT_MASK (0xff << XSCALE2_COUNT0_EVT_SHFT) | |
387 | #define XSCALE2_COUNT1_EVT_SHFT 8 | |
388 | #define XSCALE2_COUNT1_EVT_MASK (0xff << XSCALE2_COUNT1_EVT_SHFT) | |
389 | #define XSCALE2_COUNT2_EVT_SHFT 16 | |
390 | #define XSCALE2_COUNT2_EVT_MASK (0xff << XSCALE2_COUNT2_EVT_SHFT) | |
391 | #define XSCALE2_COUNT3_EVT_SHFT 24 | |
392 | #define XSCALE2_COUNT3_EVT_MASK (0xff << XSCALE2_COUNT3_EVT_SHFT) | |
393 | ||
394 | static inline u32 | |
395 | xscale2pmu_read_pmnc(void) | |
396 | { | |
397 | u32 val; | |
398 | asm volatile("mrc p14, 0, %0, c0, c1, 0" : "=r" (val)); | |
399 | /* bits 1-2 and 4-23 are read-unpredictable */ | |
400 | return val & 0xff000009; | |
401 | } | |
402 | ||
403 | static inline void | |
404 | xscale2pmu_write_pmnc(u32 val) | |
405 | { | |
406 | /* bits 4-23 are write-as-0, 24-31 are write ignored */ | |
407 | val &= 0xf; | |
408 | asm volatile("mcr p14, 0, %0, c0, c1, 0" : : "r" (val)); | |
409 | } | |
410 | ||
411 | static inline u32 | |
412 | xscale2pmu_read_overflow_flags(void) | |
413 | { | |
414 | u32 val; | |
415 | asm volatile("mrc p14, 0, %0, c5, c1, 0" : "=r" (val)); | |
416 | return val; | |
417 | } | |
418 | ||
419 | static inline void | |
420 | xscale2pmu_write_overflow_flags(u32 val) | |
421 | { | |
422 | asm volatile("mcr p14, 0, %0, c5, c1, 0" : : "r" (val)); | |
423 | } | |
424 | ||
425 | static inline u32 | |
426 | xscale2pmu_read_event_select(void) | |
427 | { | |
428 | u32 val; | |
429 | asm volatile("mrc p14, 0, %0, c8, c1, 0" : "=r" (val)); | |
430 | return val; | |
431 | } | |
432 | ||
433 | static inline void | |
434 | xscale2pmu_write_event_select(u32 val) | |
435 | { | |
436 | asm volatile("mcr p14, 0, %0, c8, c1, 0" : : "r"(val)); | |
437 | } | |
438 | ||
439 | static inline u32 | |
440 | xscale2pmu_read_int_enable(void) | |
441 | { | |
442 | u32 val; | |
443 | asm volatile("mrc p14, 0, %0, c4, c1, 0" : "=r" (val)); | |
444 | return val; | |
445 | } | |
446 | ||
447 | static void | |
448 | xscale2pmu_write_int_enable(u32 val) | |
449 | { | |
450 | asm volatile("mcr p14, 0, %0, c4, c1, 0" : : "r" (val)); | |
451 | } | |
452 | ||
453 | static inline int | |
454 | xscale2_pmnc_counter_has_overflowed(unsigned long of_flags, | |
455 | enum xscale_counters counter) | |
456 | { | |
457 | int ret = 0; | |
458 | ||
459 | switch (counter) { | |
460 | case XSCALE_CYCLE_COUNTER: | |
461 | ret = of_flags & XSCALE2_CCOUNT_OVERFLOW; | |
462 | break; | |
463 | case XSCALE_COUNTER0: | |
464 | ret = of_flags & XSCALE2_COUNT0_OVERFLOW; | |
465 | break; | |
466 | case XSCALE_COUNTER1: | |
467 | ret = of_flags & XSCALE2_COUNT1_OVERFLOW; | |
468 | break; | |
469 | case XSCALE_COUNTER2: | |
470 | ret = of_flags & XSCALE2_COUNT2_OVERFLOW; | |
471 | break; | |
472 | case XSCALE_COUNTER3: | |
473 | ret = of_flags & XSCALE2_COUNT3_OVERFLOW; | |
474 | break; | |
475 | default: | |
476 | WARN_ONCE(1, "invalid counter number (%d)\n", counter); | |
477 | } | |
478 | ||
479 | return ret; | |
480 | } | |
481 | ||
482 | static irqreturn_t | |
483 | xscale2pmu_handle_irq(int irq_num, void *dev) | |
484 | { | |
485 | unsigned long pmnc, of_flags; | |
486 | struct perf_sample_data data; | |
ed6f2a52 SK |
487 | struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev; |
488 | struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events(); | |
43eab878 WD |
489 | struct pt_regs *regs; |
490 | int idx; | |
491 | ||
492 | /* Disable the PMU. */ | |
493 | pmnc = xscale2pmu_read_pmnc(); | |
494 | xscale2pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE); | |
495 | ||
496 | /* Check the overflow flag register. */ | |
497 | of_flags = xscale2pmu_read_overflow_flags(); | |
498 | if (!(of_flags & XSCALE2_OVERFLOWED_MASK)) | |
499 | return IRQ_NONE; | |
500 | ||
501 | /* Clear the overflow bits. */ | |
502 | xscale2pmu_write_overflow_flags(of_flags); | |
503 | ||
504 | regs = get_irq_regs(); | |
505 | ||
8be3f9a2 | 506 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { |
43eab878 WD |
507 | struct perf_event *event = cpuc->events[idx]; |
508 | struct hw_perf_event *hwc; | |
509 | ||
f6f5a30c WD |
510 | if (!event) |
511 | continue; | |
512 | ||
3f31ae12 | 513 | if (!xscale2_pmnc_counter_has_overflowed(of_flags, idx)) |
43eab878 WD |
514 | continue; |
515 | ||
516 | hwc = &event->hw; | |
ed6f2a52 | 517 | armpmu_event_update(event); |
fd0d000b | 518 | perf_sample_data_init(&data, 0, hwc->last_period); |
ed6f2a52 | 519 | if (!armpmu_event_set_period(event)) |
43eab878 WD |
520 | continue; |
521 | ||
a8b0ca17 | 522 | if (perf_event_overflow(event, &data, regs)) |
ed6f2a52 | 523 | cpu_pmu->disable(event); |
43eab878 WD |
524 | } |
525 | ||
526 | irq_work_run(); | |
527 | ||
528 | /* | |
529 | * Re-enable the PMU. | |
530 | */ | |
531 | pmnc = xscale2pmu_read_pmnc() | XSCALE_PMU_ENABLE; | |
532 | xscale2pmu_write_pmnc(pmnc); | |
533 | ||
534 | return IRQ_HANDLED; | |
535 | } | |
536 | ||
ed6f2a52 | 537 | static void xscale2pmu_enable_event(struct perf_event *event) |
43eab878 WD |
538 | { |
539 | unsigned long flags, ien, evtsel; | |
ed6f2a52 SK |
540 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); |
541 | struct hw_perf_event *hwc = &event->hw; | |
8be3f9a2 | 542 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
ed6f2a52 | 543 | int idx = hwc->idx; |
43eab878 WD |
544 | |
545 | ien = xscale2pmu_read_int_enable(); | |
546 | evtsel = xscale2pmu_read_event_select(); | |
547 | ||
548 | switch (idx) { | |
549 | case XSCALE_CYCLE_COUNTER: | |
550 | ien |= XSCALE2_CCOUNT_INT_EN; | |
551 | break; | |
552 | case XSCALE_COUNTER0: | |
553 | ien |= XSCALE2_COUNT0_INT_EN; | |
554 | evtsel &= ~XSCALE2_COUNT0_EVT_MASK; | |
555 | evtsel |= hwc->config_base << XSCALE2_COUNT0_EVT_SHFT; | |
556 | break; | |
557 | case XSCALE_COUNTER1: | |
558 | ien |= XSCALE2_COUNT1_INT_EN; | |
559 | evtsel &= ~XSCALE2_COUNT1_EVT_MASK; | |
560 | evtsel |= hwc->config_base << XSCALE2_COUNT1_EVT_SHFT; | |
561 | break; | |
562 | case XSCALE_COUNTER2: | |
563 | ien |= XSCALE2_COUNT2_INT_EN; | |
564 | evtsel &= ~XSCALE2_COUNT2_EVT_MASK; | |
565 | evtsel |= hwc->config_base << XSCALE2_COUNT2_EVT_SHFT; | |
566 | break; | |
567 | case XSCALE_COUNTER3: | |
568 | ien |= XSCALE2_COUNT3_INT_EN; | |
569 | evtsel &= ~XSCALE2_COUNT3_EVT_MASK; | |
570 | evtsel |= hwc->config_base << XSCALE2_COUNT3_EVT_SHFT; | |
571 | break; | |
572 | default: | |
573 | WARN_ONCE(1, "invalid counter number (%d)\n", idx); | |
574 | return; | |
575 | } | |
576 | ||
0f78d2d5 | 577 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
43eab878 WD |
578 | xscale2pmu_write_event_select(evtsel); |
579 | xscale2pmu_write_int_enable(ien); | |
0f78d2d5 | 580 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
43eab878 WD |
581 | } |
582 | ||
ed6f2a52 | 583 | static void xscale2pmu_disable_event(struct perf_event *event) |
43eab878 | 584 | { |
3f31ae12 | 585 | unsigned long flags, ien, evtsel, of_flags; |
ed6f2a52 SK |
586 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); |
587 | struct hw_perf_event *hwc = &event->hw; | |
8be3f9a2 | 588 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
ed6f2a52 | 589 | int idx = hwc->idx; |
43eab878 WD |
590 | |
591 | ien = xscale2pmu_read_int_enable(); | |
592 | evtsel = xscale2pmu_read_event_select(); | |
593 | ||
594 | switch (idx) { | |
595 | case XSCALE_CYCLE_COUNTER: | |
596 | ien &= ~XSCALE2_CCOUNT_INT_EN; | |
3f31ae12 | 597 | of_flags = XSCALE2_CCOUNT_OVERFLOW; |
43eab878 WD |
598 | break; |
599 | case XSCALE_COUNTER0: | |
600 | ien &= ~XSCALE2_COUNT0_INT_EN; | |
601 | evtsel &= ~XSCALE2_COUNT0_EVT_MASK; | |
602 | evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT0_EVT_SHFT; | |
3f31ae12 | 603 | of_flags = XSCALE2_COUNT0_OVERFLOW; |
43eab878 WD |
604 | break; |
605 | case XSCALE_COUNTER1: | |
606 | ien &= ~XSCALE2_COUNT1_INT_EN; | |
607 | evtsel &= ~XSCALE2_COUNT1_EVT_MASK; | |
608 | evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT1_EVT_SHFT; | |
3f31ae12 | 609 | of_flags = XSCALE2_COUNT1_OVERFLOW; |
43eab878 WD |
610 | break; |
611 | case XSCALE_COUNTER2: | |
612 | ien &= ~XSCALE2_COUNT2_INT_EN; | |
613 | evtsel &= ~XSCALE2_COUNT2_EVT_MASK; | |
614 | evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT2_EVT_SHFT; | |
3f31ae12 | 615 | of_flags = XSCALE2_COUNT2_OVERFLOW; |
43eab878 WD |
616 | break; |
617 | case XSCALE_COUNTER3: | |
618 | ien &= ~XSCALE2_COUNT3_INT_EN; | |
619 | evtsel &= ~XSCALE2_COUNT3_EVT_MASK; | |
620 | evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT3_EVT_SHFT; | |
3f31ae12 | 621 | of_flags = XSCALE2_COUNT3_OVERFLOW; |
43eab878 WD |
622 | break; |
623 | default: | |
624 | WARN_ONCE(1, "invalid counter number (%d)\n", idx); | |
625 | return; | |
626 | } | |
627 | ||
0f78d2d5 | 628 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
43eab878 WD |
629 | xscale2pmu_write_event_select(evtsel); |
630 | xscale2pmu_write_int_enable(ien); | |
3f31ae12 | 631 | xscale2pmu_write_overflow_flags(of_flags); |
0f78d2d5 | 632 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
43eab878 WD |
633 | } |
634 | ||
635 | static int | |
8be3f9a2 | 636 | xscale2pmu_get_event_idx(struct pmu_hw_events *cpuc, |
ed6f2a52 | 637 | struct perf_event *event) |
43eab878 WD |
638 | { |
639 | int idx = xscale1pmu_get_event_idx(cpuc, event); | |
640 | if (idx >= 0) | |
641 | goto out; | |
642 | ||
643 | if (!test_and_set_bit(XSCALE_COUNTER3, cpuc->used_mask)) | |
644 | idx = XSCALE_COUNTER3; | |
645 | else if (!test_and_set_bit(XSCALE_COUNTER2, cpuc->used_mask)) | |
646 | idx = XSCALE_COUNTER2; | |
647 | out: | |
648 | return idx; | |
649 | } | |
650 | ||
ed6f2a52 | 651 | static void xscale2pmu_start(struct arm_pmu *cpu_pmu) |
43eab878 WD |
652 | { |
653 | unsigned long flags, val; | |
8be3f9a2 | 654 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
43eab878 | 655 | |
0f78d2d5 | 656 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
43eab878 WD |
657 | val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64; |
658 | val |= XSCALE_PMU_ENABLE; | |
659 | xscale2pmu_write_pmnc(val); | |
0f78d2d5 | 660 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
43eab878 WD |
661 | } |
662 | ||
ed6f2a52 | 663 | static void xscale2pmu_stop(struct arm_pmu *cpu_pmu) |
43eab878 WD |
664 | { |
665 | unsigned long flags, val; | |
8be3f9a2 | 666 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
43eab878 | 667 | |
0f78d2d5 | 668 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
43eab878 WD |
669 | val = xscale2pmu_read_pmnc(); |
670 | val &= ~XSCALE_PMU_ENABLE; | |
671 | xscale2pmu_write_pmnc(val); | |
0f78d2d5 | 672 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
43eab878 WD |
673 | } |
674 | ||
ed6f2a52 | 675 | static inline u32 xscale2pmu_read_counter(struct perf_event *event) |
43eab878 | 676 | { |
ed6f2a52 SK |
677 | struct hw_perf_event *hwc = &event->hw; |
678 | int counter = hwc->idx; | |
43eab878 WD |
679 | u32 val = 0; |
680 | ||
681 | switch (counter) { | |
682 | case XSCALE_CYCLE_COUNTER: | |
683 | asm volatile("mrc p14, 0, %0, c1, c1, 0" : "=r" (val)); | |
684 | break; | |
685 | case XSCALE_COUNTER0: | |
686 | asm volatile("mrc p14, 0, %0, c0, c2, 0" : "=r" (val)); | |
687 | break; | |
688 | case XSCALE_COUNTER1: | |
689 | asm volatile("mrc p14, 0, %0, c1, c2, 0" : "=r" (val)); | |
690 | break; | |
691 | case XSCALE_COUNTER2: | |
692 | asm volatile("mrc p14, 0, %0, c2, c2, 0" : "=r" (val)); | |
693 | break; | |
694 | case XSCALE_COUNTER3: | |
695 | asm volatile("mrc p14, 0, %0, c3, c2, 0" : "=r" (val)); | |
696 | break; | |
697 | } | |
698 | ||
699 | return val; | |
700 | } | |
701 | ||
ed6f2a52 | 702 | static inline void xscale2pmu_write_counter(struct perf_event *event, u32 val) |
43eab878 | 703 | { |
ed6f2a52 SK |
704 | struct hw_perf_event *hwc = &event->hw; |
705 | int counter = hwc->idx; | |
706 | ||
43eab878 WD |
707 | switch (counter) { |
708 | case XSCALE_CYCLE_COUNTER: | |
709 | asm volatile("mcr p14, 0, %0, c1, c1, 0" : : "r" (val)); | |
710 | break; | |
711 | case XSCALE_COUNTER0: | |
712 | asm volatile("mcr p14, 0, %0, c0, c2, 0" : : "r" (val)); | |
713 | break; | |
714 | case XSCALE_COUNTER1: | |
715 | asm volatile("mcr p14, 0, %0, c1, c2, 0" : : "r" (val)); | |
716 | break; | |
717 | case XSCALE_COUNTER2: | |
718 | asm volatile("mcr p14, 0, %0, c2, c2, 0" : : "r" (val)); | |
719 | break; | |
720 | case XSCALE_COUNTER3: | |
721 | asm volatile("mcr p14, 0, %0, c3, c2, 0" : : "r" (val)); | |
722 | break; | |
723 | } | |
724 | } | |
725 | ||
351a102d | 726 | static int xscale2pmu_init(struct arm_pmu *cpu_pmu) |
43eab878 | 727 | { |
3d1ff755 | 728 | cpu_pmu->name = "armv5_xscale2"; |
513c99ce SK |
729 | cpu_pmu->handle_irq = xscale2pmu_handle_irq; |
730 | cpu_pmu->enable = xscale2pmu_enable_event; | |
731 | cpu_pmu->disable = xscale2pmu_disable_event; | |
732 | cpu_pmu->read_counter = xscale2pmu_read_counter; | |
733 | cpu_pmu->write_counter = xscale2pmu_write_counter; | |
734 | cpu_pmu->get_event_idx = xscale2pmu_get_event_idx; | |
735 | cpu_pmu->start = xscale2pmu_start; | |
736 | cpu_pmu->stop = xscale2pmu_stop; | |
737 | cpu_pmu->map_event = xscale_map_event; | |
738 | cpu_pmu->num_events = 5; | |
739 | cpu_pmu->max_period = (1LLU << 32) - 1; | |
740 | ||
741 | return 0; | |
43eab878 WD |
742 | } |
743 | #else | |
513c99ce | 744 | static inline int xscale1pmu_init(struct arm_pmu *cpu_pmu) |
43eab878 | 745 | { |
513c99ce | 746 | return -ENODEV; |
43eab878 WD |
747 | } |
748 | ||
513c99ce | 749 | static inline int xscale2pmu_init(struct arm_pmu *cpu_pmu) |
43eab878 | 750 | { |
513c99ce | 751 | return -ENODEV; |
43eab878 WD |
752 | } |
753 | #endif /* CONFIG_CPU_XSCALE */ |