Commit | Line | Data |
---|---|---|
979f8671 MC |
1 | /* |
2 | * Hardware performance events for the Alpha. | |
3 | * | |
4 | * We implement HW counts on the EV67 and subsequent CPUs only. | |
5 | * | |
6 | * (C) 2010 Michael J. Cree | |
7 | * | |
8 | * Somewhat based on the Sparc code, and to a lesser extent the PowerPC and | |
9 | * ARM code, which are copyright by their respective authors. | |
10 | */ | |
11 | ||
12 | #include <linux/perf_event.h> | |
13 | #include <linux/kprobes.h> | |
14 | #include <linux/kernel.h> | |
15 | #include <linux/kdebug.h> | |
16 | #include <linux/mutex.h> | |
004417a6 | 17 | #include <linux/init.h> |
979f8671 MC |
18 | |
19 | #include <asm/hwrpb.h> | |
60063497 | 20 | #include <linux/atomic.h> |
979f8671 MC |
21 | #include <asm/irq.h> |
22 | #include <asm/irq_regs.h> | |
23 | #include <asm/pal.h> | |
24 | #include <asm/wrperfmon.h> | |
25 | #include <asm/hw_irq.h> | |
26 | ||
27 | ||
28 | /* The maximum number of PMCs on any Alpha CPU whatsoever. */ | |
29 | #define MAX_HWEVENTS 3 | |
30 | #define PMC_NO_INDEX -1 | |
31 | ||
32 | /* For tracking PMCs and the hw events they monitor on each CPU. */ | |
33 | struct cpu_hw_events { | |
34 | int enabled; | |
35 | /* Number of events scheduled; also number entries valid in arrays below. */ | |
36 | int n_events; | |
37 | /* Number events added since last hw_perf_disable(). */ | |
38 | int n_added; | |
39 | /* Events currently scheduled. */ | |
40 | struct perf_event *event[MAX_HWEVENTS]; | |
41 | /* Event type of each scheduled event. */ | |
42 | unsigned long evtype[MAX_HWEVENTS]; | |
43 | /* Current index of each scheduled event; if not yet determined | |
44 | * contains PMC_NO_INDEX. | |
45 | */ | |
46 | int current_idx[MAX_HWEVENTS]; | |
47 | /* The active PMCs' config for easy use with wrperfmon(). */ | |
48 | unsigned long config; | |
49 | /* The active counters' indices for easy use with wrperfmon(). */ | |
50 | unsigned long idx_mask; | |
51 | }; | |
52 | DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); | |
53 | ||
54 | ||
55 | ||
56 | /* | |
57 | * A structure to hold the description of the PMCs available on a particular | |
58 | * type of Alpha CPU. | |
59 | */ | |
60 | struct alpha_pmu_t { | |
61 | /* Mapping of the perf system hw event types to indigenous event types */ | |
62 | const int *event_map; | |
63 | /* The number of entries in the event_map */ | |
64 | int max_events; | |
65 | /* The number of PMCs on this Alpha */ | |
66 | int num_pmcs; | |
67 | /* | |
68 | * All PMC counters reside in the IBOX register PCTR. This is the | |
69 | * LSB of the counter. | |
70 | */ | |
71 | int pmc_count_shift[MAX_HWEVENTS]; | |
72 | /* | |
73 | * The mask that isolates the PMC bits when the LSB of the counter | |
74 | * is shifted to bit 0. | |
75 | */ | |
76 | unsigned long pmc_count_mask[MAX_HWEVENTS]; | |
77 | /* The maximum period the PMC can count. */ | |
78 | unsigned long pmc_max_period[MAX_HWEVENTS]; | |
79 | /* | |
80 | * The maximum value that may be written to the counter due to | |
81 | * hardware restrictions is pmc_max_period - pmc_left. | |
82 | */ | |
83 | long pmc_left[3]; | |
84 | /* Subroutine for allocation of PMCs. Enforces constraints. */ | |
85 | int (*check_constraints)(struct perf_event **, unsigned long *, int); | |
6e22f8f2 WD |
86 | /* Subroutine for checking validity of a raw event for this PMU. */ |
87 | int (*raw_event_valid)(u64 config); | |
979f8671 MC |
88 | }; |
89 | ||
90 | /* | |
91 | * The Alpha CPU PMU description currently in operation. This is set during | |
92 | * the boot process to the specific CPU of the machine. | |
93 | */ | |
94 | static const struct alpha_pmu_t *alpha_pmu; | |
95 | ||
96 | ||
97 | #define HW_OP_UNSUPPORTED -1 | |
98 | ||
99 | /* | |
100 | * The hardware description of the EV67, EV68, EV69, EV7 and EV79 PMUs | |
101 | * follow. Since they are identical we refer to them collectively as the | |
102 | * EV67 henceforth. | |
103 | */ | |
104 | ||
105 | /* | |
106 | * EV67 PMC event types | |
107 | * | |
108 | * There is no one-to-one mapping of the possible hw event types to the | |
109 | * actual codes that are used to program the PMCs hence we introduce our | |
110 | * own hw event type identifiers. | |
111 | */ | |
112 | enum ev67_pmc_event_type { | |
113 | EV67_CYCLES = 1, | |
114 | EV67_INSTRUCTIONS, | |
115 | EV67_BCACHEMISS, | |
116 | EV67_MBOXREPLAY, | |
117 | EV67_LAST_ET | |
118 | }; | |
119 | #define EV67_NUM_EVENT_TYPES (EV67_LAST_ET-EV67_CYCLES) | |
120 | ||
121 | ||
122 | /* Mapping of the hw event types to the perf tool interface */ | |
123 | static const int ev67_perfmon_event_map[] = { | |
124 | [PERF_COUNT_HW_CPU_CYCLES] = EV67_CYCLES, | |
125 | [PERF_COUNT_HW_INSTRUCTIONS] = EV67_INSTRUCTIONS, | |
126 | [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, | |
127 | [PERF_COUNT_HW_CACHE_MISSES] = EV67_BCACHEMISS, | |
128 | }; | |
129 | ||
130 | struct ev67_mapping_t { | |
131 | int config; | |
132 | int idx; | |
133 | }; | |
134 | ||
135 | /* | |
136 | * The mapping used for one event only - these must be in same order as enum | |
137 | * ev67_pmc_event_type definition. | |
138 | */ | |
139 | static const struct ev67_mapping_t ev67_mapping[] = { | |
140 | {EV67_PCTR_INSTR_CYCLES, 1}, /* EV67_CYCLES, */ | |
141 | {EV67_PCTR_INSTR_CYCLES, 0}, /* EV67_INSTRUCTIONS */ | |
142 | {EV67_PCTR_INSTR_BCACHEMISS, 1}, /* EV67_BCACHEMISS */ | |
143 | {EV67_PCTR_CYCLES_MBOX, 1} /* EV67_MBOXREPLAY */ | |
144 | }; | |
145 | ||
146 | ||
147 | /* | |
148 | * Check that a group of events can be simultaneously scheduled on to the | |
149 | * EV67 PMU. Also allocate counter indices and config. | |
150 | */ | |
151 | static int ev67_check_constraints(struct perf_event **event, | |
152 | unsigned long *evtype, int n_ev) | |
153 | { | |
154 | int idx0; | |
155 | unsigned long config; | |
156 | ||
157 | idx0 = ev67_mapping[evtype[0]-1].idx; | |
158 | config = ev67_mapping[evtype[0]-1].config; | |
159 | if (n_ev == 1) | |
160 | goto success; | |
161 | ||
162 | BUG_ON(n_ev != 2); | |
163 | ||
164 | if (evtype[0] == EV67_MBOXREPLAY || evtype[1] == EV67_MBOXREPLAY) { | |
165 | /* MBOX replay traps must be on PMC 1 */ | |
166 | idx0 = (evtype[0] == EV67_MBOXREPLAY) ? 1 : 0; | |
167 | /* Only cycles can accompany MBOX replay traps */ | |
168 | if (evtype[idx0] == EV67_CYCLES) { | |
169 | config = EV67_PCTR_CYCLES_MBOX; | |
170 | goto success; | |
171 | } | |
172 | } | |
173 | ||
174 | if (evtype[0] == EV67_BCACHEMISS || evtype[1] == EV67_BCACHEMISS) { | |
175 | /* Bcache misses must be on PMC 1 */ | |
176 | idx0 = (evtype[0] == EV67_BCACHEMISS) ? 1 : 0; | |
177 | /* Only instructions can accompany Bcache misses */ | |
178 | if (evtype[idx0] == EV67_INSTRUCTIONS) { | |
179 | config = EV67_PCTR_INSTR_BCACHEMISS; | |
180 | goto success; | |
181 | } | |
182 | } | |
183 | ||
184 | if (evtype[0] == EV67_INSTRUCTIONS || evtype[1] == EV67_INSTRUCTIONS) { | |
185 | /* Instructions must be on PMC 0 */ | |
186 | idx0 = (evtype[0] == EV67_INSTRUCTIONS) ? 0 : 1; | |
187 | /* By this point only cycles can accompany instructions */ | |
188 | if (evtype[idx0^1] == EV67_CYCLES) { | |
189 | config = EV67_PCTR_INSTR_CYCLES; | |
190 | goto success; | |
191 | } | |
192 | } | |
193 | ||
194 | /* Otherwise, darn it, there is a conflict. */ | |
195 | return -1; | |
196 | ||
197 | success: | |
198 | event[0]->hw.idx = idx0; | |
199 | event[0]->hw.config_base = config; | |
200 | if (n_ev == 2) { | |
201 | event[1]->hw.idx = idx0 ^ 1; | |
202 | event[1]->hw.config_base = config; | |
203 | } | |
204 | return 0; | |
205 | } | |
206 | ||
207 | ||
6e22f8f2 WD |
208 | static int ev67_raw_event_valid(u64 config) |
209 | { | |
210 | return config >= EV67_CYCLES && config < EV67_LAST_ET; | |
211 | }; | |
212 | ||
213 | ||
979f8671 MC |
214 | static const struct alpha_pmu_t ev67_pmu = { |
215 | .event_map = ev67_perfmon_event_map, | |
216 | .max_events = ARRAY_SIZE(ev67_perfmon_event_map), | |
217 | .num_pmcs = 2, | |
218 | .pmc_count_shift = {EV67_PCTR_0_COUNT_SHIFT, EV67_PCTR_1_COUNT_SHIFT, 0}, | |
219 | .pmc_count_mask = {EV67_PCTR_0_COUNT_MASK, EV67_PCTR_1_COUNT_MASK, 0}, | |
220 | .pmc_max_period = {(1UL<<20) - 1, (1UL<<20) - 1, 0}, | |
221 | .pmc_left = {16, 4, 0}, | |
6e22f8f2 WD |
222 | .check_constraints = ev67_check_constraints, |
223 | .raw_event_valid = ev67_raw_event_valid, | |
979f8671 MC |
224 | }; |
225 | ||
226 | ||
227 | ||
228 | /* | |
229 | * Helper routines to ensure that we read/write only the correct PMC bits | |
230 | * when calling the wrperfmon PALcall. | |
231 | */ | |
232 | static inline void alpha_write_pmc(int idx, unsigned long val) | |
233 | { | |
234 | val &= alpha_pmu->pmc_count_mask[idx]; | |
235 | val <<= alpha_pmu->pmc_count_shift[idx]; | |
236 | val |= (1<<idx); | |
237 | wrperfmon(PERFMON_CMD_WRITE, val); | |
238 | } | |
239 | ||
240 | static inline unsigned long alpha_read_pmc(int idx) | |
241 | { | |
242 | unsigned long val; | |
243 | ||
244 | val = wrperfmon(PERFMON_CMD_READ, 0); | |
245 | val >>= alpha_pmu->pmc_count_shift[idx]; | |
246 | val &= alpha_pmu->pmc_count_mask[idx]; | |
247 | return val; | |
248 | } | |
249 | ||
250 | /* Set a new period to sample over */ | |
251 | static int alpha_perf_event_set_period(struct perf_event *event, | |
252 | struct hw_perf_event *hwc, int idx) | |
253 | { | |
7b598cdd | 254 | long left = local64_read(&hwc->period_left); |
979f8671 MC |
255 | long period = hwc->sample_period; |
256 | int ret = 0; | |
257 | ||
258 | if (unlikely(left <= -period)) { | |
259 | left = period; | |
7b598cdd | 260 | local64_set(&hwc->period_left, left); |
979f8671 MC |
261 | hwc->last_period = period; |
262 | ret = 1; | |
263 | } | |
264 | ||
265 | if (unlikely(left <= 0)) { | |
266 | left += period; | |
7b598cdd | 267 | local64_set(&hwc->period_left, left); |
979f8671 MC |
268 | hwc->last_period = period; |
269 | ret = 1; | |
270 | } | |
271 | ||
272 | /* | |
273 | * Hardware restrictions require that the counters must not be | |
274 | * written with values that are too close to the maximum period. | |
275 | */ | |
276 | if (unlikely(left < alpha_pmu->pmc_left[idx])) | |
277 | left = alpha_pmu->pmc_left[idx]; | |
278 | ||
279 | if (left > (long)alpha_pmu->pmc_max_period[idx]) | |
280 | left = alpha_pmu->pmc_max_period[idx]; | |
281 | ||
7b598cdd | 282 | local64_set(&hwc->prev_count, (unsigned long)(-left)); |
979f8671 MC |
283 | |
284 | alpha_write_pmc(idx, (unsigned long)(-left)); | |
285 | ||
286 | perf_event_update_userpage(event); | |
287 | ||
288 | return ret; | |
289 | } | |
290 | ||
291 | ||
292 | /* | |
293 | * Calculates the count (the 'delta') since the last time the PMC was read. | |
294 | * | |
295 | * As the PMCs' full period can easily be exceeded within the perf system | |
296 | * sampling period we cannot use any high order bits as a guard bit in the | |
297 | * PMCs to detect overflow as is done by other architectures. The code here | |
298 | * calculates the delta on the basis that there is no overflow when ovf is | |
299 | * zero. The value passed via ovf by the interrupt handler corrects for | |
300 | * overflow. | |
301 | * | |
302 | * This can be racey on rare occasions -- a call to this routine can occur | |
303 | * with an overflowed counter just before the PMI service routine is called. | |
304 | * The check for delta negative hopefully always rectifies this situation. | |
305 | */ | |
306 | static unsigned long alpha_perf_event_update(struct perf_event *event, | |
307 | struct hw_perf_event *hwc, int idx, long ovf) | |
308 | { | |
309 | long prev_raw_count, new_raw_count; | |
310 | long delta; | |
311 | ||
312 | again: | |
7b598cdd | 313 | prev_raw_count = local64_read(&hwc->prev_count); |
979f8671 MC |
314 | new_raw_count = alpha_read_pmc(idx); |
315 | ||
7b598cdd | 316 | if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, |
979f8671 MC |
317 | new_raw_count) != prev_raw_count) |
318 | goto again; | |
319 | ||
a4eaf7f1 | 320 | delta = (new_raw_count - (prev_raw_count & alpha_pmu->pmc_count_mask[idx])) + ovf; |
979f8671 MC |
321 | |
322 | /* It is possible on very rare occasions that the PMC has overflowed | |
323 | * but the interrupt is yet to come. Detect and fix this situation. | |
324 | */ | |
325 | if (unlikely(delta < 0)) { | |
326 | delta += alpha_pmu->pmc_max_period[idx] + 1; | |
327 | } | |
328 | ||
7b598cdd MC |
329 | local64_add(delta, &event->count); |
330 | local64_sub(delta, &hwc->period_left); | |
979f8671 MC |
331 | |
332 | return new_raw_count; | |
333 | } | |
334 | ||
335 | ||
336 | /* | |
337 | * Collect all HW events into the array event[]. | |
338 | */ | |
339 | static int collect_events(struct perf_event *group, int max_count, | |
340 | struct perf_event *event[], unsigned long *evtype, | |
341 | int *current_idx) | |
342 | { | |
343 | struct perf_event *pe; | |
344 | int n = 0; | |
345 | ||
346 | if (!is_software_event(group)) { | |
347 | if (n >= max_count) | |
348 | return -1; | |
349 | event[n] = group; | |
350 | evtype[n] = group->hw.event_base; | |
351 | current_idx[n++] = PMC_NO_INDEX; | |
352 | } | |
353 | list_for_each_entry(pe, &group->sibling_list, group_entry) { | |
354 | if (!is_software_event(pe) && pe->state != PERF_EVENT_STATE_OFF) { | |
355 | if (n >= max_count) | |
356 | return -1; | |
357 | event[n] = pe; | |
358 | evtype[n] = pe->hw.event_base; | |
359 | current_idx[n++] = PMC_NO_INDEX; | |
360 | } | |
361 | } | |
362 | return n; | |
363 | } | |
364 | ||
365 | ||
366 | ||
367 | /* | |
368 | * Check that a group of events can be simultaneously scheduled on to the PMU. | |
369 | */ | |
370 | static int alpha_check_constraints(struct perf_event **events, | |
371 | unsigned long *evtypes, int n_ev) | |
372 | { | |
373 | ||
374 | /* No HW events is possible from hw_perf_group_sched_in(). */ | |
375 | if (n_ev == 0) | |
376 | return 0; | |
377 | ||
378 | if (n_ev > alpha_pmu->num_pmcs) | |
379 | return -1; | |
380 | ||
381 | return alpha_pmu->check_constraints(events, evtypes, n_ev); | |
382 | } | |
383 | ||
384 | ||
385 | /* | |
386 | * If new events have been scheduled then update cpuc with the new | |
387 | * configuration. This may involve shifting cycle counts from one PMC to | |
388 | * another. | |
389 | */ | |
390 | static void maybe_change_configuration(struct cpu_hw_events *cpuc) | |
391 | { | |
392 | int j; | |
393 | ||
394 | if (cpuc->n_added == 0) | |
395 | return; | |
396 | ||
397 | /* Find counters that are moving to another PMC and update */ | |
398 | for (j = 0; j < cpuc->n_events; j++) { | |
399 | struct perf_event *pe = cpuc->event[j]; | |
400 | ||
401 | if (cpuc->current_idx[j] != PMC_NO_INDEX && | |
402 | cpuc->current_idx[j] != pe->hw.idx) { | |
403 | alpha_perf_event_update(pe, &pe->hw, cpuc->current_idx[j], 0); | |
404 | cpuc->current_idx[j] = PMC_NO_INDEX; | |
405 | } | |
406 | } | |
407 | ||
408 | /* Assign to counters all unassigned events. */ | |
409 | cpuc->idx_mask = 0; | |
410 | for (j = 0; j < cpuc->n_events; j++) { | |
411 | struct perf_event *pe = cpuc->event[j]; | |
412 | struct hw_perf_event *hwc = &pe->hw; | |
413 | int idx = hwc->idx; | |
414 | ||
a4eaf7f1 PZ |
415 | if (cpuc->current_idx[j] == PMC_NO_INDEX) { |
416 | alpha_perf_event_set_period(pe, hwc, idx); | |
417 | cpuc->current_idx[j] = idx; | |
979f8671 MC |
418 | } |
419 | ||
a4eaf7f1 PZ |
420 | if (!(hwc->state & PERF_HES_STOPPED)) |
421 | cpuc->idx_mask |= (1<<cpuc->current_idx[j]); | |
979f8671 MC |
422 | } |
423 | cpuc->config = cpuc->event[0]->hw.config_base; | |
424 | } | |
425 | ||
426 | ||
427 | ||
428 | /* Schedule perf HW event on to PMU. | |
429 | * - this function is called from outside this module via the pmu struct | |
430 | * returned from perf event initialisation. | |
431 | */ | |
a4eaf7f1 | 432 | static int alpha_pmu_add(struct perf_event *event, int flags) |
979f8671 | 433 | { |
2999a4b3 | 434 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
65175c07 | 435 | struct hw_perf_event *hwc = &event->hw; |
979f8671 MC |
436 | int n0; |
437 | int ret; | |
65175c07 | 438 | unsigned long irq_flags; |
979f8671 MC |
439 | |
440 | /* | |
441 | * The Sparc code has the IRQ disable first followed by the perf | |
442 | * disable, however this can lead to an overflowed counter with the | |
443 | * PMI disabled on rare occasions. The alpha_perf_event_update() | |
444 | * routine should detect this situation by noting a negative delta, | |
445 | * nevertheless we disable the PMCs first to enable a potential | |
446 | * final PMI to occur before we disable interrupts. | |
447 | */ | |
33696fc0 | 448 | perf_pmu_disable(event->pmu); |
65175c07 | 449 | local_irq_save(irq_flags); |
979f8671 MC |
450 | |
451 | /* Default to error to be returned */ | |
452 | ret = -EAGAIN; | |
453 | ||
454 | /* Insert event on to PMU and if successful modify ret to valid return */ | |
455 | n0 = cpuc->n_events; | |
456 | if (n0 < alpha_pmu->num_pmcs) { | |
457 | cpuc->event[n0] = event; | |
458 | cpuc->evtype[n0] = event->hw.event_base; | |
459 | cpuc->current_idx[n0] = PMC_NO_INDEX; | |
460 | ||
461 | if (!alpha_check_constraints(cpuc->event, cpuc->evtype, n0+1)) { | |
462 | cpuc->n_events++; | |
463 | cpuc->n_added++; | |
464 | ret = 0; | |
465 | } | |
466 | } | |
467 | ||
a4eaf7f1 PZ |
468 | hwc->state = PERF_HES_UPTODATE; |
469 | if (!(flags & PERF_EF_START)) | |
470 | hwc->state |= PERF_HES_STOPPED; | |
471 | ||
65175c07 | 472 | local_irq_restore(irq_flags); |
33696fc0 | 473 | perf_pmu_enable(event->pmu); |
979f8671 MC |
474 | |
475 | return ret; | |
476 | } | |
477 | ||
478 | ||
479 | ||
480 | /* Disable performance monitoring unit | |
481 | * - this function is called from outside this module via the pmu struct | |
482 | * returned from perf event initialisation. | |
483 | */ | |
a4eaf7f1 | 484 | static void alpha_pmu_del(struct perf_event *event, int flags) |
979f8671 | 485 | { |
2999a4b3 | 486 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
979f8671 | 487 | struct hw_perf_event *hwc = &event->hw; |
65175c07 | 488 | unsigned long irq_flags; |
979f8671 MC |
489 | int j; |
490 | ||
33696fc0 | 491 | perf_pmu_disable(event->pmu); |
65175c07 | 492 | local_irq_save(irq_flags); |
979f8671 MC |
493 | |
494 | for (j = 0; j < cpuc->n_events; j++) { | |
495 | if (event == cpuc->event[j]) { | |
496 | int idx = cpuc->current_idx[j]; | |
497 | ||
498 | /* Shift remaining entries down into the existing | |
499 | * slot. | |
500 | */ | |
501 | while (++j < cpuc->n_events) { | |
502 | cpuc->event[j - 1] = cpuc->event[j]; | |
503 | cpuc->evtype[j - 1] = cpuc->evtype[j]; | |
504 | cpuc->current_idx[j - 1] = | |
505 | cpuc->current_idx[j]; | |
506 | } | |
507 | ||
508 | /* Absorb the final count and turn off the event. */ | |
509 | alpha_perf_event_update(event, hwc, idx, 0); | |
510 | perf_event_update_userpage(event); | |
511 | ||
512 | cpuc->idx_mask &= ~(1UL<<idx); | |
513 | cpuc->n_events--; | |
514 | break; | |
515 | } | |
516 | } | |
517 | ||
65175c07 | 518 | local_irq_restore(irq_flags); |
33696fc0 | 519 | perf_pmu_enable(event->pmu); |
979f8671 MC |
520 | } |
521 | ||
522 | ||
523 | static void alpha_pmu_read(struct perf_event *event) | |
524 | { | |
525 | struct hw_perf_event *hwc = &event->hw; | |
526 | ||
527 | alpha_perf_event_update(event, hwc, hwc->idx, 0); | |
528 | } | |
529 | ||
530 | ||
a4eaf7f1 | 531 | static void alpha_pmu_stop(struct perf_event *event, int flags) |
979f8671 MC |
532 | { |
533 | struct hw_perf_event *hwc = &event->hw; | |
2999a4b3 | 534 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
979f8671 | 535 | |
a4eaf7f1 | 536 | if (!(hwc->state & PERF_HES_STOPPED)) { |
65175c07 | 537 | cpuc->idx_mask &= ~(1UL<<hwc->idx); |
a4eaf7f1 PZ |
538 | hwc->state |= PERF_HES_STOPPED; |
539 | } | |
540 | ||
541 | if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { | |
542 | alpha_perf_event_update(event, hwc, hwc->idx, 0); | |
543 | hwc->state |= PERF_HES_UPTODATE; | |
544 | } | |
545 | ||
546 | if (cpuc->enabled) | |
65175c07 | 547 | wrperfmon(PERFMON_CMD_DISABLE, (1UL<<hwc->idx)); |
a4eaf7f1 PZ |
548 | } |
549 | ||
550 | ||
551 | static void alpha_pmu_start(struct perf_event *event, int flags) | |
552 | { | |
553 | struct hw_perf_event *hwc = &event->hw; | |
2999a4b3 | 554 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
a4eaf7f1 PZ |
555 | |
556 | if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) | |
557 | return; | |
558 | ||
559 | if (flags & PERF_EF_RELOAD) { | |
560 | WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); | |
561 | alpha_perf_event_set_period(event, hwc, hwc->idx); | |
562 | } | |
563 | ||
564 | hwc->state = 0; | |
565 | ||
979f8671 | 566 | cpuc->idx_mask |= 1UL<<hwc->idx; |
a4eaf7f1 PZ |
567 | if (cpuc->enabled) |
568 | wrperfmon(PERFMON_CMD_ENABLE, (1UL<<hwc->idx)); | |
979f8671 MC |
569 | } |
570 | ||
571 | ||
572 | /* | |
573 | * Check that CPU performance counters are supported. | |
574 | * - currently support EV67 and later CPUs. | |
575 | * - actually some later revisions of the EV6 have the same PMC model as the | |
576 | * EV67 but we don't do suffiently deep CPU detection to detect them. | |
577 | * Bad luck to the very few people who might have one, I guess. | |
578 | */ | |
579 | static int supported_cpu(void) | |
580 | { | |
581 | struct percpu_struct *cpu; | |
582 | unsigned long cputype; | |
583 | ||
584 | /* Get cpu type from HW */ | |
585 | cpu = (struct percpu_struct *)((char *)hwrpb + hwrpb->processor_offset); | |
586 | cputype = cpu->type & 0xffffffff; | |
587 | /* Include all of EV67, EV68, EV7, EV79 and EV69 as supported. */ | |
588 | return (cputype >= EV67_CPU) && (cputype <= EV69_CPU); | |
589 | } | |
590 | ||
591 | ||
592 | ||
593 | static void hw_perf_event_destroy(struct perf_event *event) | |
594 | { | |
595 | /* Nothing to be done! */ | |
596 | return; | |
597 | } | |
598 | ||
599 | ||
600 | ||
601 | static int __hw_perf_event_init(struct perf_event *event) | |
602 | { | |
603 | struct perf_event_attr *attr = &event->attr; | |
604 | struct hw_perf_event *hwc = &event->hw; | |
605 | struct perf_event *evts[MAX_HWEVENTS]; | |
606 | unsigned long evtypes[MAX_HWEVENTS]; | |
607 | int idx_rubbish_bin[MAX_HWEVENTS]; | |
608 | int ev; | |
609 | int n; | |
610 | ||
611 | /* We only support a limited range of HARDWARE event types with one | |
612 | * only programmable via a RAW event type. | |
613 | */ | |
614 | if (attr->type == PERF_TYPE_HARDWARE) { | |
615 | if (attr->config >= alpha_pmu->max_events) | |
616 | return -EINVAL; | |
617 | ev = alpha_pmu->event_map[attr->config]; | |
618 | } else if (attr->type == PERF_TYPE_HW_CACHE) { | |
619 | return -EOPNOTSUPP; | |
620 | } else if (attr->type == PERF_TYPE_RAW) { | |
6e22f8f2 WD |
621 | if (!alpha_pmu->raw_event_valid(attr->config)) |
622 | return -EINVAL; | |
623 | ev = attr->config; | |
979f8671 MC |
624 | } else { |
625 | return -EOPNOTSUPP; | |
626 | } | |
627 | ||
628 | if (ev < 0) { | |
629 | return ev; | |
630 | } | |
631 | ||
632 | /* The EV67 does not support mode exclusion */ | |
633 | if (attr->exclude_kernel || attr->exclude_user | |
634 | || attr->exclude_hv || attr->exclude_idle) { | |
635 | return -EPERM; | |
636 | } | |
637 | ||
638 | /* | |
639 | * We place the event type in event_base here and leave calculation | |
640 | * of the codes to programme the PMU for alpha_pmu_enable() because | |
641 | * it is only then we will know what HW events are actually | |
642 | * scheduled on to the PMU. At that point the code to programme the | |
643 | * PMU is put into config_base and the PMC to use is placed into | |
644 | * idx. We initialise idx (below) to PMC_NO_INDEX to indicate that | |
645 | * it is yet to be determined. | |
646 | */ | |
647 | hwc->event_base = ev; | |
648 | ||
649 | /* Collect events in a group together suitable for calling | |
650 | * alpha_check_constraints() to verify that the group as a whole can | |
651 | * be scheduled on to the PMU. | |
652 | */ | |
653 | n = 0; | |
654 | if (event->group_leader != event) { | |
655 | n = collect_events(event->group_leader, | |
656 | alpha_pmu->num_pmcs - 1, | |
657 | evts, evtypes, idx_rubbish_bin); | |
658 | if (n < 0) | |
659 | return -EINVAL; | |
660 | } | |
661 | evtypes[n] = hwc->event_base; | |
662 | evts[n] = event; | |
663 | ||
664 | if (alpha_check_constraints(evts, evtypes, n + 1)) | |
665 | return -EINVAL; | |
666 | ||
667 | /* Indicate that PMU config and idx are yet to be determined. */ | |
668 | hwc->config_base = 0; | |
669 | hwc->idx = PMC_NO_INDEX; | |
670 | ||
671 | event->destroy = hw_perf_event_destroy; | |
672 | ||
673 | /* | |
674 | * Most architectures reserve the PMU for their use at this point. | |
675 | * As there is no existing mechanism to arbitrate usage and there | |
676 | * appears to be no other user of the Alpha PMU we just assume | |
677 | * that we can just use it, hence a NO-OP here. | |
678 | * | |
679 | * Maybe an alpha_reserve_pmu() routine should be implemented but is | |
680 | * anything else ever going to use it? | |
681 | */ | |
682 | ||
683 | if (!hwc->sample_period) { | |
684 | hwc->sample_period = alpha_pmu->pmc_max_period[0]; | |
685 | hwc->last_period = hwc->sample_period; | |
7b598cdd | 686 | local64_set(&hwc->period_left, hwc->sample_period); |
979f8671 MC |
687 | } |
688 | ||
689 | return 0; | |
690 | } | |
691 | ||
979f8671 MC |
692 | /* |
693 | * Main entry point to initialise a HW performance event. | |
694 | */ | |
b0a873eb | 695 | static int alpha_pmu_event_init(struct perf_event *event) |
979f8671 MC |
696 | { |
697 | int err; | |
698 | ||
2481c5fa SE |
699 | /* does not support taken branch sampling */ |
700 | if (has_branch_stack(event)) | |
701 | return -EOPNOTSUPP; | |
702 | ||
b0a873eb PZ |
703 | switch (event->attr.type) { |
704 | case PERF_TYPE_RAW: | |
705 | case PERF_TYPE_HARDWARE: | |
706 | case PERF_TYPE_HW_CACHE: | |
707 | break; | |
708 | ||
709 | default: | |
710 | return -ENOENT; | |
711 | } | |
712 | ||
979f8671 | 713 | if (!alpha_pmu) |
b0a873eb | 714 | return -ENODEV; |
979f8671 MC |
715 | |
716 | /* Do the real initialisation work. */ | |
717 | err = __hw_perf_event_init(event); | |
718 | ||
b0a873eb | 719 | return err; |
979f8671 MC |
720 | } |
721 | ||
979f8671 MC |
722 | /* |
723 | * Main entry point - enable HW performance counters. | |
724 | */ | |
a4eaf7f1 | 725 | static void alpha_pmu_enable(struct pmu *pmu) |
979f8671 | 726 | { |
2999a4b3 | 727 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
979f8671 MC |
728 | |
729 | if (cpuc->enabled) | |
730 | return; | |
731 | ||
732 | cpuc->enabled = 1; | |
733 | barrier(); | |
734 | ||
735 | if (cpuc->n_events > 0) { | |
736 | /* Update cpuc with information from any new scheduled events. */ | |
737 | maybe_change_configuration(cpuc); | |
738 | ||
739 | /* Start counting the desired events. */ | |
740 | wrperfmon(PERFMON_CMD_LOGGING_OPTIONS, EV67_PCTR_MODE_AGGREGATE); | |
741 | wrperfmon(PERFMON_CMD_DESIRED_EVENTS, cpuc->config); | |
742 | wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask); | |
743 | } | |
744 | } | |
745 | ||
746 | ||
747 | /* | |
748 | * Main entry point - disable HW performance counters. | |
749 | */ | |
750 | ||
a4eaf7f1 | 751 | static void alpha_pmu_disable(struct pmu *pmu) |
979f8671 | 752 | { |
2999a4b3 | 753 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); |
979f8671 MC |
754 | |
755 | if (!cpuc->enabled) | |
756 | return; | |
757 | ||
758 | cpuc->enabled = 0; | |
759 | cpuc->n_added = 0; | |
760 | ||
761 | wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask); | |
762 | } | |
763 | ||
33696fc0 | 764 | static struct pmu pmu = { |
a4eaf7f1 PZ |
765 | .pmu_enable = alpha_pmu_enable, |
766 | .pmu_disable = alpha_pmu_disable, | |
33696fc0 | 767 | .event_init = alpha_pmu_event_init, |
a4eaf7f1 PZ |
768 | .add = alpha_pmu_add, |
769 | .del = alpha_pmu_del, | |
770 | .start = alpha_pmu_start, | |
771 | .stop = alpha_pmu_stop, | |
33696fc0 | 772 | .read = alpha_pmu_read, |
33696fc0 PZ |
773 | }; |
774 | ||
979f8671 MC |
775 | |
776 | /* | |
777 | * Main entry point - don't know when this is called but it | |
778 | * obviously dumps debug info. | |
779 | */ | |
780 | void perf_event_print_debug(void) | |
781 | { | |
782 | unsigned long flags; | |
783 | unsigned long pcr; | |
784 | int pcr0, pcr1; | |
785 | int cpu; | |
786 | ||
787 | if (!supported_cpu()) | |
788 | return; | |
789 | ||
790 | local_irq_save(flags); | |
791 | ||
792 | cpu = smp_processor_id(); | |
793 | ||
794 | pcr = wrperfmon(PERFMON_CMD_READ, 0); | |
795 | pcr0 = (pcr >> alpha_pmu->pmc_count_shift[0]) & alpha_pmu->pmc_count_mask[0]; | |
796 | pcr1 = (pcr >> alpha_pmu->pmc_count_shift[1]) & alpha_pmu->pmc_count_mask[1]; | |
797 | ||
798 | pr_info("CPU#%d: PCTR0[%06x] PCTR1[%06x]\n", cpu, pcr0, pcr1); | |
799 | ||
800 | local_irq_restore(flags); | |
801 | } | |
802 | ||
803 | ||
804 | /* | |
805 | * Performance Monitoring Interrupt Service Routine called when a PMC | |
806 | * overflows. The PMC that overflowed is passed in la_ptr. | |
807 | */ | |
808 | static void alpha_perf_event_irq_handler(unsigned long la_ptr, | |
809 | struct pt_regs *regs) | |
810 | { | |
811 | struct cpu_hw_events *cpuc; | |
812 | struct perf_sample_data data; | |
813 | struct perf_event *event; | |
814 | struct hw_perf_event *hwc; | |
815 | int idx, j; | |
816 | ||
2999a4b3 CL |
817 | __this_cpu_inc(irq_pmi_count); |
818 | cpuc = this_cpu_ptr(&cpu_hw_events); | |
979f8671 MC |
819 | |
820 | /* Completely counting through the PMC's period to trigger a new PMC | |
821 | * overflow interrupt while in this interrupt routine is utterly | |
822 | * disastrous! The EV6 and EV67 counters are sufficiently large to | |
823 | * prevent this but to be really sure disable the PMCs. | |
824 | */ | |
825 | wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask); | |
826 | ||
827 | /* la_ptr is the counter that overflowed. */ | |
15ac9a39 | 828 | if (unlikely(la_ptr >= alpha_pmu->num_pmcs)) { |
979f8671 MC |
829 | /* This should never occur! */ |
830 | irq_err_count++; | |
831 | pr_warning("PMI: silly index %ld\n", la_ptr); | |
832 | wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask); | |
833 | return; | |
834 | } | |
835 | ||
836 | idx = la_ptr; | |
837 | ||
979f8671 MC |
838 | for (j = 0; j < cpuc->n_events; j++) { |
839 | if (cpuc->current_idx[j] == idx) | |
840 | break; | |
841 | } | |
842 | ||
843 | if (unlikely(j == cpuc->n_events)) { | |
844 | /* This can occur if the event is disabled right on a PMC overflow. */ | |
845 | wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask); | |
846 | return; | |
847 | } | |
848 | ||
849 | event = cpuc->event[j]; | |
850 | ||
851 | if (unlikely(!event)) { | |
852 | /* This should never occur! */ | |
853 | irq_err_count++; | |
854 | pr_warning("PMI: No event at index %d!\n", idx); | |
855 | wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask); | |
856 | return; | |
857 | } | |
858 | ||
859 | hwc = &event->hw; | |
860 | alpha_perf_event_update(event, hwc, idx, alpha_pmu->pmc_max_period[idx]+1); | |
fd0d000b | 861 | perf_sample_data_init(&data, 0, hwc->last_period); |
979f8671 MC |
862 | |
863 | if (alpha_perf_event_set_period(event, hwc, idx)) { | |
a8b0ca17 | 864 | if (perf_event_overflow(event, &data, regs)) { |
979f8671 MC |
865 | /* Interrupts coming too quickly; "throttle" the |
866 | * counter, i.e., disable it for a little while. | |
867 | */ | |
65175c07 | 868 | alpha_pmu_stop(event, 0); |
979f8671 MC |
869 | } |
870 | } | |
871 | wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask); | |
872 | ||
873 | return; | |
874 | } | |
875 | ||
876 | ||
877 | ||
878 | /* | |
879 | * Init call to initialise performance events at kernel startup. | |
880 | */ | |
004417a6 | 881 | int __init init_hw_perf_events(void) |
979f8671 MC |
882 | { |
883 | pr_info("Performance events: "); | |
884 | ||
885 | if (!supported_cpu()) { | |
886 | pr_cont("No support for your CPU.\n"); | |
004417a6 | 887 | return 0; |
979f8671 MC |
888 | } |
889 | ||
890 | pr_cont("Supported CPU type!\n"); | |
891 | ||
892 | /* Override performance counter IRQ vector */ | |
893 | ||
894 | perf_irq = alpha_perf_event_irq_handler; | |
895 | ||
896 | /* And set up PMU specification */ | |
897 | alpha_pmu = &ev67_pmu; | |
b0a873eb | 898 | |
2e80a82a | 899 | perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); |
979f8671 | 900 | |
004417a6 PZ |
901 | return 0; |
902 | } | |
903 | early_initcall(init_hw_perf_events); |