Commit | Line | Data |
---|---|---|
4574910e PM |
1 | /* |
2 | * Performance counter support - powerpc architecture code | |
3 | * | |
4 | * Copyright 2008-2009 Paul Mackerras, IBM Corporation. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License | |
8 | * as published by the Free Software Foundation; either version | |
9 | * 2 of the License, or (at your option) any later version. | |
10 | */ | |
11 | #include <linux/kernel.h> | |
12 | #include <linux/sched.h> | |
13 | #include <linux/perf_counter.h> | |
14 | #include <linux/percpu.h> | |
15 | #include <linux/hardirq.h> | |
16 | #include <asm/reg.h> | |
17 | #include <asm/pmc.h> | |
01d0287f | 18 | #include <asm/machdep.h> |
0475f9ea | 19 | #include <asm/firmware.h> |
0bbd0d4b | 20 | #include <asm/ptrace.h> |
4574910e PM |
21 | |
22 | struct cpu_hw_counters { | |
23 | int n_counters; | |
24 | int n_percpu; | |
25 | int disabled; | |
26 | int n_added; | |
ab7ef2e5 PM |
27 | int n_limited; |
28 | u8 pmcs_enabled; | |
4574910e | 29 | struct perf_counter *counter[MAX_HWCOUNTERS]; |
ef923214 | 30 | u64 events[MAX_HWCOUNTERS]; |
ab7ef2e5 | 31 | unsigned int flags[MAX_HWCOUNTERS]; |
448d64f8 | 32 | unsigned long mmcr[3]; |
ab7ef2e5 PM |
33 | struct perf_counter *limited_counter[MAX_LIMITED_HWCOUNTERS]; |
34 | u8 limited_hwidx[MAX_LIMITED_HWCOUNTERS]; | |
4574910e PM |
35 | }; |
36 | DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters); | |
37 | ||
38 | struct power_pmu *ppmu; | |
39 | ||
d095cd46 PM |
40 | /* |
41 | * Normally, to ignore kernel events we set the FCS (freeze counters | |
42 | * in supervisor mode) bit in MMCR0, but if the kernel runs with the | |
43 | * hypervisor bit set in the MSR, or if we are running on a processor | |
44 | * where the hypervisor bit is forced to 1 (as on Apple G5 processors), | |
45 | * then we need to use the FCHV bit to ignore kernel events. | |
46 | */ | |
47 | static unsigned int freeze_counters_kernel = MMCR0_FCS; | |
48 | ||
7595d63b PM |
49 | static void perf_counter_interrupt(struct pt_regs *regs); |
50 | ||
4574910e PM |
51 | void perf_counter_print_debug(void) |
52 | { | |
53 | } | |
54 | ||
4574910e PM |
55 | /* |
56 | * Read one performance monitor counter (PMC). | |
57 | */ | |
58 | static unsigned long read_pmc(int idx) | |
59 | { | |
60 | unsigned long val; | |
61 | ||
62 | switch (idx) { | |
63 | case 1: | |
64 | val = mfspr(SPRN_PMC1); | |
65 | break; | |
66 | case 2: | |
67 | val = mfspr(SPRN_PMC2); | |
68 | break; | |
69 | case 3: | |
70 | val = mfspr(SPRN_PMC3); | |
71 | break; | |
72 | case 4: | |
73 | val = mfspr(SPRN_PMC4); | |
74 | break; | |
75 | case 5: | |
76 | val = mfspr(SPRN_PMC5); | |
77 | break; | |
78 | case 6: | |
79 | val = mfspr(SPRN_PMC6); | |
80 | break; | |
81 | case 7: | |
82 | val = mfspr(SPRN_PMC7); | |
83 | break; | |
84 | case 8: | |
85 | val = mfspr(SPRN_PMC8); | |
86 | break; | |
87 | default: | |
88 | printk(KERN_ERR "oops trying to read PMC%d\n", idx); | |
89 | val = 0; | |
90 | } | |
91 | return val; | |
92 | } | |
93 | ||
94 | /* | |
95 | * Write one PMC. | |
96 | */ | |
97 | static void write_pmc(int idx, unsigned long val) | |
98 | { | |
99 | switch (idx) { | |
100 | case 1: | |
101 | mtspr(SPRN_PMC1, val); | |
102 | break; | |
103 | case 2: | |
104 | mtspr(SPRN_PMC2, val); | |
105 | break; | |
106 | case 3: | |
107 | mtspr(SPRN_PMC3, val); | |
108 | break; | |
109 | case 4: | |
110 | mtspr(SPRN_PMC4, val); | |
111 | break; | |
112 | case 5: | |
113 | mtspr(SPRN_PMC5, val); | |
114 | break; | |
115 | case 6: | |
116 | mtspr(SPRN_PMC6, val); | |
117 | break; | |
118 | case 7: | |
119 | mtspr(SPRN_PMC7, val); | |
120 | break; | |
121 | case 8: | |
122 | mtspr(SPRN_PMC8, val); | |
123 | break; | |
124 | default: | |
125 | printk(KERN_ERR "oops trying to write PMC%d\n", idx); | |
126 | } | |
127 | } | |
128 | ||
129 | /* | |
130 | * Check if a set of events can all go on the PMU at once. | |
131 | * If they can't, this will look at alternative codes for the events | |
132 | * and see if any combination of alternative codes is feasible. | |
133 | * The feasible set is returned in event[]. | |
134 | */ | |
ef923214 | 135 | static int power_check_constraints(u64 event[], unsigned int cflags[], |
ab7ef2e5 | 136 | int n_ev) |
4574910e | 137 | { |
448d64f8 | 138 | unsigned long mask, value, nv; |
ef923214 | 139 | u64 alternatives[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES]; |
448d64f8 PM |
140 | unsigned long amasks[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES]; |
141 | unsigned long avalues[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES]; | |
142 | unsigned long smasks[MAX_HWCOUNTERS], svalues[MAX_HWCOUNTERS]; | |
4574910e PM |
143 | int n_alt[MAX_HWCOUNTERS], choice[MAX_HWCOUNTERS]; |
144 | int i, j; | |
448d64f8 PM |
145 | unsigned long addf = ppmu->add_fields; |
146 | unsigned long tadd = ppmu->test_adder; | |
4574910e PM |
147 | |
148 | if (n_ev > ppmu->n_counter) | |
149 | return -1; | |
150 | ||
151 | /* First see if the events will go on as-is */ | |
152 | for (i = 0; i < n_ev; ++i) { | |
ab7ef2e5 PM |
153 | if ((cflags[i] & PPMU_LIMITED_PMC_REQD) |
154 | && !ppmu->limited_pmc_event(event[i])) { | |
155 | ppmu->get_alternatives(event[i], cflags[i], | |
156 | alternatives[i]); | |
157 | event[i] = alternatives[i][0]; | |
158 | } | |
4574910e PM |
159 | if (ppmu->get_constraint(event[i], &amasks[i][0], |
160 | &avalues[i][0])) | |
161 | return -1; | |
4574910e PM |
162 | } |
163 | value = mask = 0; | |
164 | for (i = 0; i < n_ev; ++i) { | |
165 | nv = (value | avalues[i][0]) + (value & avalues[i][0] & addf); | |
166 | if ((((nv + tadd) ^ value) & mask) != 0 || | |
167 | (((nv + tadd) ^ avalues[i][0]) & amasks[i][0]) != 0) | |
168 | break; | |
169 | value = nv; | |
170 | mask |= amasks[i][0]; | |
171 | } | |
172 | if (i == n_ev) | |
173 | return 0; /* all OK */ | |
174 | ||
175 | /* doesn't work, gather alternatives... */ | |
176 | if (!ppmu->get_alternatives) | |
177 | return -1; | |
178 | for (i = 0; i < n_ev; ++i) { | |
ab7ef2e5 PM |
179 | choice[i] = 0; |
180 | n_alt[i] = ppmu->get_alternatives(event[i], cflags[i], | |
181 | alternatives[i]); | |
4574910e PM |
182 | for (j = 1; j < n_alt[i]; ++j) |
183 | ppmu->get_constraint(alternatives[i][j], | |
184 | &amasks[i][j], &avalues[i][j]); | |
185 | } | |
186 | ||
187 | /* enumerate all possibilities and see if any will work */ | |
188 | i = 0; | |
189 | j = -1; | |
190 | value = mask = nv = 0; | |
191 | while (i < n_ev) { | |
192 | if (j >= 0) { | |
193 | /* we're backtracking, restore context */ | |
194 | value = svalues[i]; | |
195 | mask = smasks[i]; | |
196 | j = choice[i]; | |
197 | } | |
198 | /* | |
199 | * See if any alternative k for event i, | |
200 | * where k > j, will satisfy the constraints. | |
201 | */ | |
202 | while (++j < n_alt[i]) { | |
203 | nv = (value | avalues[i][j]) + | |
204 | (value & avalues[i][j] & addf); | |
205 | if ((((nv + tadd) ^ value) & mask) == 0 && | |
206 | (((nv + tadd) ^ avalues[i][j]) | |
207 | & amasks[i][j]) == 0) | |
208 | break; | |
209 | } | |
210 | if (j >= n_alt[i]) { | |
211 | /* | |
212 | * No feasible alternative, backtrack | |
213 | * to event i-1 and continue enumerating its | |
214 | * alternatives from where we got up to. | |
215 | */ | |
216 | if (--i < 0) | |
217 | return -1; | |
218 | } else { | |
219 | /* | |
220 | * Found a feasible alternative for event i, | |
221 | * remember where we got up to with this event, | |
222 | * go on to the next event, and start with | |
223 | * the first alternative for it. | |
224 | */ | |
225 | choice[i] = j; | |
226 | svalues[i] = value; | |
227 | smasks[i] = mask; | |
228 | value = nv; | |
229 | mask |= amasks[i][j]; | |
230 | ++i; | |
231 | j = -1; | |
232 | } | |
233 | } | |
234 | ||
235 | /* OK, we have a feasible combination, tell the caller the solution */ | |
236 | for (i = 0; i < n_ev; ++i) | |
237 | event[i] = alternatives[i][choice[i]]; | |
238 | return 0; | |
239 | } | |
240 | ||
0475f9ea PM |
241 | /* |
242 | * Check if newly-added counters have consistent settings for | |
243 | * exclude_{user,kernel,hv} with each other and any previously | |
244 | * added counters. | |
245 | */ | |
ab7ef2e5 PM |
246 | static int check_excludes(struct perf_counter **ctrs, unsigned int cflags[], |
247 | int n_prev, int n_new) | |
0475f9ea | 248 | { |
ab7ef2e5 PM |
249 | int eu = 0, ek = 0, eh = 0; |
250 | int i, n, first; | |
0475f9ea PM |
251 | struct perf_counter *counter; |
252 | ||
253 | n = n_prev + n_new; | |
254 | if (n <= 1) | |
255 | return 0; | |
256 | ||
ab7ef2e5 PM |
257 | first = 1; |
258 | for (i = 0; i < n; ++i) { | |
259 | if (cflags[i] & PPMU_LIMITED_PMC_OK) { | |
260 | cflags[i] &= ~PPMU_LIMITED_PMC_REQD; | |
261 | continue; | |
262 | } | |
0475f9ea | 263 | counter = ctrs[i]; |
ab7ef2e5 | 264 | if (first) { |
0d48696f PZ |
265 | eu = counter->attr.exclude_user; |
266 | ek = counter->attr.exclude_kernel; | |
267 | eh = counter->attr.exclude_hv; | |
ab7ef2e5 | 268 | first = 0; |
0d48696f PZ |
269 | } else if (counter->attr.exclude_user != eu || |
270 | counter->attr.exclude_kernel != ek || | |
271 | counter->attr.exclude_hv != eh) { | |
0475f9ea | 272 | return -EAGAIN; |
ab7ef2e5 | 273 | } |
0475f9ea | 274 | } |
ab7ef2e5 PM |
275 | |
276 | if (eu || ek || eh) | |
277 | for (i = 0; i < n; ++i) | |
278 | if (cflags[i] & PPMU_LIMITED_PMC_OK) | |
279 | cflags[i] |= PPMU_LIMITED_PMC_REQD; | |
280 | ||
0475f9ea PM |
281 | return 0; |
282 | } | |
283 | ||
4aeb0b42 | 284 | static void power_pmu_read(struct perf_counter *counter) |
4574910e PM |
285 | { |
286 | long val, delta, prev; | |
287 | ||
288 | if (!counter->hw.idx) | |
289 | return; | |
290 | /* | |
291 | * Performance monitor interrupts come even when interrupts | |
292 | * are soft-disabled, as long as interrupts are hard-enabled. | |
293 | * Therefore we treat them like NMIs. | |
294 | */ | |
295 | do { | |
296 | prev = atomic64_read(&counter->hw.prev_count); | |
297 | barrier(); | |
298 | val = read_pmc(counter->hw.idx); | |
299 | } while (atomic64_cmpxchg(&counter->hw.prev_count, prev, val) != prev); | |
300 | ||
301 | /* The counters are only 32 bits wide */ | |
302 | delta = (val - prev) & 0xfffffffful; | |
303 | atomic64_add(delta, &counter->count); | |
304 | atomic64_sub(delta, &counter->hw.period_left); | |
305 | } | |
306 | ||
ab7ef2e5 PM |
307 | /* |
308 | * On some machines, PMC5 and PMC6 can't be written, don't respect | |
309 | * the freeze conditions, and don't generate interrupts. This tells | |
310 | * us if `counter' is using such a PMC. | |
311 | */ | |
312 | static int is_limited_pmc(int pmcnum) | |
313 | { | |
0bbd0d4b PM |
314 | return (ppmu->flags & PPMU_LIMITED_PMC5_6) |
315 | && (pmcnum == 5 || pmcnum == 6); | |
ab7ef2e5 PM |
316 | } |
317 | ||
318 | static void freeze_limited_counters(struct cpu_hw_counters *cpuhw, | |
319 | unsigned long pmc5, unsigned long pmc6) | |
320 | { | |
321 | struct perf_counter *counter; | |
322 | u64 val, prev, delta; | |
323 | int i; | |
324 | ||
325 | for (i = 0; i < cpuhw->n_limited; ++i) { | |
326 | counter = cpuhw->limited_counter[i]; | |
327 | if (!counter->hw.idx) | |
328 | continue; | |
329 | val = (counter->hw.idx == 5) ? pmc5 : pmc6; | |
330 | prev = atomic64_read(&counter->hw.prev_count); | |
331 | counter->hw.idx = 0; | |
332 | delta = (val - prev) & 0xfffffffful; | |
333 | atomic64_add(delta, &counter->count); | |
334 | } | |
335 | } | |
336 | ||
337 | static void thaw_limited_counters(struct cpu_hw_counters *cpuhw, | |
338 | unsigned long pmc5, unsigned long pmc6) | |
339 | { | |
340 | struct perf_counter *counter; | |
341 | u64 val; | |
342 | int i; | |
343 | ||
344 | for (i = 0; i < cpuhw->n_limited; ++i) { | |
345 | counter = cpuhw->limited_counter[i]; | |
346 | counter->hw.idx = cpuhw->limited_hwidx[i]; | |
347 | val = (counter->hw.idx == 5) ? pmc5 : pmc6; | |
348 | atomic64_set(&counter->hw.prev_count, val); | |
349 | perf_counter_update_userpage(counter); | |
350 | } | |
351 | } | |
352 | ||
353 | /* | |
354 | * Since limited counters don't respect the freeze conditions, we | |
355 | * have to read them immediately after freezing or unfreezing the | |
356 | * other counters. We try to keep the values from the limited | |
357 | * counters as consistent as possible by keeping the delay (in | |
358 | * cycles and instructions) between freezing/unfreezing and reading | |
359 | * the limited counters as small and consistent as possible. | |
360 | * Therefore, if any limited counters are in use, we read them | |
361 | * both, and always in the same order, to minimize variability, | |
362 | * and do it inside the same asm that writes MMCR0. | |
363 | */ | |
364 | static void write_mmcr0(struct cpu_hw_counters *cpuhw, unsigned long mmcr0) | |
365 | { | |
366 | unsigned long pmc5, pmc6; | |
367 | ||
368 | if (!cpuhw->n_limited) { | |
369 | mtspr(SPRN_MMCR0, mmcr0); | |
370 | return; | |
371 | } | |
372 | ||
373 | /* | |
374 | * Write MMCR0, then read PMC5 and PMC6 immediately. | |
dcd945e0 PM |
375 | * To ensure we don't get a performance monitor interrupt |
376 | * between writing MMCR0 and freezing/thawing the limited | |
377 | * counters, we first write MMCR0 with the counter overflow | |
378 | * interrupt enable bits turned off. | |
ab7ef2e5 PM |
379 | */ |
380 | asm volatile("mtspr %3,%2; mfspr %0,%4; mfspr %1,%5" | |
381 | : "=&r" (pmc5), "=&r" (pmc6) | |
dcd945e0 PM |
382 | : "r" (mmcr0 & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)), |
383 | "i" (SPRN_MMCR0), | |
ab7ef2e5 PM |
384 | "i" (SPRN_PMC5), "i" (SPRN_PMC6)); |
385 | ||
386 | if (mmcr0 & MMCR0_FC) | |
387 | freeze_limited_counters(cpuhw, pmc5, pmc6); | |
388 | else | |
389 | thaw_limited_counters(cpuhw, pmc5, pmc6); | |
dcd945e0 PM |
390 | |
391 | /* | |
392 | * Write the full MMCR0 including the counter overflow interrupt | |
393 | * enable bits, if necessary. | |
394 | */ | |
395 | if (mmcr0 & (MMCR0_PMC1CE | MMCR0_PMCjCE)) | |
396 | mtspr(SPRN_MMCR0, mmcr0); | |
ab7ef2e5 PM |
397 | } |
398 | ||
4574910e PM |
399 | /* |
400 | * Disable all counters to prevent PMU interrupts and to allow | |
401 | * counters to be added or removed. | |
402 | */ | |
9e35ad38 | 403 | void hw_perf_disable(void) |
4574910e PM |
404 | { |
405 | struct cpu_hw_counters *cpuhw; | |
4574910e PM |
406 | unsigned long flags; |
407 | ||
408 | local_irq_save(flags); | |
409 | cpuhw = &__get_cpu_var(cpu_hw_counters); | |
410 | ||
448d64f8 | 411 | if (!cpuhw->disabled) { |
4574910e PM |
412 | cpuhw->disabled = 1; |
413 | cpuhw->n_added = 0; | |
414 | ||
01d0287f PM |
415 | /* |
416 | * Check if we ever enabled the PMU on this cpu. | |
417 | */ | |
418 | if (!cpuhw->pmcs_enabled) { | |
419 | if (ppc_md.enable_pmcs) | |
420 | ppc_md.enable_pmcs(); | |
421 | cpuhw->pmcs_enabled = 1; | |
422 | } | |
423 | ||
f708223d PM |
424 | /* |
425 | * Disable instruction sampling if it was enabled | |
426 | */ | |
427 | if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) { | |
428 | mtspr(SPRN_MMCRA, | |
429 | cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); | |
430 | mb(); | |
431 | } | |
432 | ||
4574910e PM |
433 | /* |
434 | * Set the 'freeze counters' bit. | |
435 | * The barrier is to make sure the mtspr has been | |
436 | * executed and the PMU has frozen the counters | |
437 | * before we return. | |
438 | */ | |
ab7ef2e5 | 439 | write_mmcr0(cpuhw, mfspr(SPRN_MMCR0) | MMCR0_FC); |
4574910e PM |
440 | mb(); |
441 | } | |
442 | local_irq_restore(flags); | |
4574910e PM |
443 | } |
444 | ||
445 | /* | |
446 | * Re-enable all counters if disable == 0. | |
447 | * If we were previously disabled and counters were added, then | |
448 | * put the new config on the PMU. | |
449 | */ | |
9e35ad38 | 450 | void hw_perf_enable(void) |
4574910e PM |
451 | { |
452 | struct perf_counter *counter; | |
453 | struct cpu_hw_counters *cpuhw; | |
454 | unsigned long flags; | |
455 | long i; | |
456 | unsigned long val; | |
457 | s64 left; | |
458 | unsigned int hwc_index[MAX_HWCOUNTERS]; | |
ab7ef2e5 PM |
459 | int n_lim; |
460 | int idx; | |
4574910e | 461 | |
4574910e | 462 | local_irq_save(flags); |
c0daaf3f | 463 | cpuhw = &__get_cpu_var(cpu_hw_counters); |
9e35ad38 PZ |
464 | if (!cpuhw->disabled) { |
465 | local_irq_restore(flags); | |
466 | return; | |
467 | } | |
4574910e PM |
468 | cpuhw->disabled = 0; |
469 | ||
470 | /* | |
471 | * If we didn't change anything, or only removed counters, | |
472 | * no need to recalculate MMCR* settings and reset the PMCs. | |
473 | * Just reenable the PMU with the current MMCR* settings | |
474 | * (possibly updated for removal of counters). | |
475 | */ | |
476 | if (!cpuhw->n_added) { | |
f708223d | 477 | mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); |
4574910e | 478 | mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); |
01d0287f PM |
479 | if (cpuhw->n_counters == 0) |
480 | get_lppaca()->pmcregs_in_use = 0; | |
f708223d | 481 | goto out_enable; |
4574910e PM |
482 | } |
483 | ||
484 | /* | |
485 | * Compute MMCR* values for the new set of counters | |
486 | */ | |
487 | if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_counters, hwc_index, | |
488 | cpuhw->mmcr)) { | |
489 | /* shouldn't ever get here */ | |
490 | printk(KERN_ERR "oops compute_mmcr failed\n"); | |
491 | goto out; | |
492 | } | |
493 | ||
0475f9ea PM |
494 | /* |
495 | * Add in MMCR0 freeze bits corresponding to the | |
0d48696f | 496 | * attr.exclude_* bits for the first counter. |
0475f9ea PM |
497 | * We have already checked that all counters have the |
498 | * same values for these bits as the first counter. | |
499 | */ | |
500 | counter = cpuhw->counter[0]; | |
0d48696f | 501 | if (counter->attr.exclude_user) |
0475f9ea | 502 | cpuhw->mmcr[0] |= MMCR0_FCP; |
0d48696f | 503 | if (counter->attr.exclude_kernel) |
d095cd46 | 504 | cpuhw->mmcr[0] |= freeze_counters_kernel; |
0d48696f | 505 | if (counter->attr.exclude_hv) |
0475f9ea PM |
506 | cpuhw->mmcr[0] |= MMCR0_FCHV; |
507 | ||
4574910e PM |
508 | /* |
509 | * Write the new configuration to MMCR* with the freeze | |
510 | * bit set and set the hardware counters to their initial values. | |
511 | * Then unfreeze the counters. | |
512 | */ | |
01d0287f | 513 | get_lppaca()->pmcregs_in_use = 1; |
f708223d | 514 | mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); |
4574910e PM |
515 | mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); |
516 | mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)) | |
517 | | MMCR0_FC); | |
518 | ||
519 | /* | |
520 | * Read off any pre-existing counters that need to move | |
521 | * to another PMC. | |
522 | */ | |
523 | for (i = 0; i < cpuhw->n_counters; ++i) { | |
524 | counter = cpuhw->counter[i]; | |
525 | if (counter->hw.idx && counter->hw.idx != hwc_index[i] + 1) { | |
4aeb0b42 | 526 | power_pmu_read(counter); |
4574910e PM |
527 | write_pmc(counter->hw.idx, 0); |
528 | counter->hw.idx = 0; | |
529 | } | |
530 | } | |
531 | ||
532 | /* | |
533 | * Initialize the PMCs for all the new and moved counters. | |
534 | */ | |
ab7ef2e5 | 535 | cpuhw->n_limited = n_lim = 0; |
4574910e PM |
536 | for (i = 0; i < cpuhw->n_counters; ++i) { |
537 | counter = cpuhw->counter[i]; | |
538 | if (counter->hw.idx) | |
539 | continue; | |
ab7ef2e5 PM |
540 | idx = hwc_index[i] + 1; |
541 | if (is_limited_pmc(idx)) { | |
542 | cpuhw->limited_counter[n_lim] = counter; | |
543 | cpuhw->limited_hwidx[n_lim] = idx; | |
544 | ++n_lim; | |
545 | continue; | |
546 | } | |
4574910e | 547 | val = 0; |
b23f3325 | 548 | if (counter->hw.sample_period) { |
4574910e PM |
549 | left = atomic64_read(&counter->hw.period_left); |
550 | if (left < 0x80000000L) | |
551 | val = 0x80000000L - left; | |
552 | } | |
553 | atomic64_set(&counter->hw.prev_count, val); | |
ab7ef2e5 PM |
554 | counter->hw.idx = idx; |
555 | write_pmc(idx, val); | |
7b732a75 | 556 | perf_counter_update_userpage(counter); |
4574910e | 557 | } |
ab7ef2e5 | 558 | cpuhw->n_limited = n_lim; |
4574910e | 559 | cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE; |
f708223d PM |
560 | |
561 | out_enable: | |
562 | mb(); | |
ab7ef2e5 | 563 | write_mmcr0(cpuhw, cpuhw->mmcr[0]); |
4574910e | 564 | |
f708223d PM |
565 | /* |
566 | * Enable instruction sampling if necessary | |
567 | */ | |
568 | if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) { | |
569 | mb(); | |
570 | mtspr(SPRN_MMCRA, cpuhw->mmcr[2]); | |
571 | } | |
572 | ||
4574910e PM |
573 | out: |
574 | local_irq_restore(flags); | |
575 | } | |
576 | ||
577 | static int collect_events(struct perf_counter *group, int max_count, | |
ef923214 | 578 | struct perf_counter *ctrs[], u64 *events, |
ab7ef2e5 | 579 | unsigned int *flags) |
4574910e PM |
580 | { |
581 | int n = 0; | |
582 | struct perf_counter *counter; | |
583 | ||
584 | if (!is_software_counter(group)) { | |
585 | if (n >= max_count) | |
586 | return -1; | |
587 | ctrs[n] = group; | |
ab7ef2e5 | 588 | flags[n] = group->hw.counter_base; |
4574910e PM |
589 | events[n++] = group->hw.config; |
590 | } | |
591 | list_for_each_entry(counter, &group->sibling_list, list_entry) { | |
592 | if (!is_software_counter(counter) && | |
593 | counter->state != PERF_COUNTER_STATE_OFF) { | |
594 | if (n >= max_count) | |
595 | return -1; | |
596 | ctrs[n] = counter; | |
ab7ef2e5 | 597 | flags[n] = counter->hw.counter_base; |
4574910e PM |
598 | events[n++] = counter->hw.config; |
599 | } | |
600 | } | |
601 | return n; | |
602 | } | |
603 | ||
604 | static void counter_sched_in(struct perf_counter *counter, int cpu) | |
605 | { | |
606 | counter->state = PERF_COUNTER_STATE_ACTIVE; | |
607 | counter->oncpu = cpu; | |
dc66270b | 608 | counter->tstamp_running += counter->ctx->time - counter->tstamp_stopped; |
4574910e | 609 | if (is_software_counter(counter)) |
4aeb0b42 | 610 | counter->pmu->enable(counter); |
4574910e PM |
611 | } |
612 | ||
613 | /* | |
614 | * Called to enable a whole group of counters. | |
615 | * Returns 1 if the group was enabled, or -EAGAIN if it could not be. | |
616 | * Assumes the caller has disabled interrupts and has | |
617 | * frozen the PMU with hw_perf_save_disable. | |
618 | */ | |
619 | int hw_perf_group_sched_in(struct perf_counter *group_leader, | |
620 | struct perf_cpu_context *cpuctx, | |
621 | struct perf_counter_context *ctx, int cpu) | |
622 | { | |
623 | struct cpu_hw_counters *cpuhw; | |
624 | long i, n, n0; | |
625 | struct perf_counter *sub; | |
626 | ||
627 | cpuhw = &__get_cpu_var(cpu_hw_counters); | |
628 | n0 = cpuhw->n_counters; | |
629 | n = collect_events(group_leader, ppmu->n_counter - n0, | |
ab7ef2e5 PM |
630 | &cpuhw->counter[n0], &cpuhw->events[n0], |
631 | &cpuhw->flags[n0]); | |
4574910e PM |
632 | if (n < 0) |
633 | return -EAGAIN; | |
ab7ef2e5 | 634 | if (check_excludes(cpuhw->counter, cpuhw->flags, n0, n)) |
0475f9ea | 635 | return -EAGAIN; |
ab7ef2e5 PM |
636 | i = power_check_constraints(cpuhw->events, cpuhw->flags, n + n0); |
637 | if (i < 0) | |
4574910e PM |
638 | return -EAGAIN; |
639 | cpuhw->n_counters = n0 + n; | |
640 | cpuhw->n_added += n; | |
641 | ||
642 | /* | |
643 | * OK, this group can go on; update counter states etc., | |
644 | * and enable any software counters | |
645 | */ | |
646 | for (i = n0; i < n0 + n; ++i) | |
647 | cpuhw->counter[i]->hw.config = cpuhw->events[i]; | |
3b6f9e5c | 648 | cpuctx->active_oncpu += n; |
4574910e PM |
649 | n = 1; |
650 | counter_sched_in(group_leader, cpu); | |
651 | list_for_each_entry(sub, &group_leader->sibling_list, list_entry) { | |
652 | if (sub->state != PERF_COUNTER_STATE_OFF) { | |
653 | counter_sched_in(sub, cpu); | |
654 | ++n; | |
655 | } | |
656 | } | |
4574910e PM |
657 | ctx->nr_active += n; |
658 | ||
659 | return 1; | |
660 | } | |
661 | ||
662 | /* | |
663 | * Add a counter to the PMU. | |
664 | * If all counters are not already frozen, then we disable and | |
9e35ad38 | 665 | * re-enable the PMU in order to get hw_perf_enable to do the |
4574910e PM |
666 | * actual work of reconfiguring the PMU. |
667 | */ | |
4aeb0b42 | 668 | static int power_pmu_enable(struct perf_counter *counter) |
4574910e PM |
669 | { |
670 | struct cpu_hw_counters *cpuhw; | |
671 | unsigned long flags; | |
4574910e PM |
672 | int n0; |
673 | int ret = -EAGAIN; | |
674 | ||
675 | local_irq_save(flags); | |
9e35ad38 | 676 | perf_disable(); |
4574910e PM |
677 | |
678 | /* | |
679 | * Add the counter to the list (if there is room) | |
680 | * and check whether the total set is still feasible. | |
681 | */ | |
682 | cpuhw = &__get_cpu_var(cpu_hw_counters); | |
683 | n0 = cpuhw->n_counters; | |
684 | if (n0 >= ppmu->n_counter) | |
685 | goto out; | |
686 | cpuhw->counter[n0] = counter; | |
687 | cpuhw->events[n0] = counter->hw.config; | |
ab7ef2e5 PM |
688 | cpuhw->flags[n0] = counter->hw.counter_base; |
689 | if (check_excludes(cpuhw->counter, cpuhw->flags, n0, 1)) | |
0475f9ea | 690 | goto out; |
ab7ef2e5 | 691 | if (power_check_constraints(cpuhw->events, cpuhw->flags, n0 + 1)) |
4574910e PM |
692 | goto out; |
693 | ||
694 | counter->hw.config = cpuhw->events[n0]; | |
695 | ++cpuhw->n_counters; | |
696 | ++cpuhw->n_added; | |
697 | ||
698 | ret = 0; | |
699 | out: | |
9e35ad38 | 700 | perf_enable(); |
4574910e PM |
701 | local_irq_restore(flags); |
702 | return ret; | |
703 | } | |
704 | ||
705 | /* | |
706 | * Remove a counter from the PMU. | |
707 | */ | |
4aeb0b42 | 708 | static void power_pmu_disable(struct perf_counter *counter) |
4574910e PM |
709 | { |
710 | struct cpu_hw_counters *cpuhw; | |
711 | long i; | |
4574910e PM |
712 | unsigned long flags; |
713 | ||
714 | local_irq_save(flags); | |
9e35ad38 | 715 | perf_disable(); |
4574910e | 716 | |
4aeb0b42 | 717 | power_pmu_read(counter); |
4574910e PM |
718 | |
719 | cpuhw = &__get_cpu_var(cpu_hw_counters); | |
720 | for (i = 0; i < cpuhw->n_counters; ++i) { | |
721 | if (counter == cpuhw->counter[i]) { | |
722 | while (++i < cpuhw->n_counters) | |
723 | cpuhw->counter[i-1] = cpuhw->counter[i]; | |
724 | --cpuhw->n_counters; | |
725 | ppmu->disable_pmc(counter->hw.idx - 1, cpuhw->mmcr); | |
ab7ef2e5 PM |
726 | if (counter->hw.idx) { |
727 | write_pmc(counter->hw.idx, 0); | |
728 | counter->hw.idx = 0; | |
729 | } | |
7b732a75 | 730 | perf_counter_update_userpage(counter); |
4574910e PM |
731 | break; |
732 | } | |
733 | } | |
ab7ef2e5 PM |
734 | for (i = 0; i < cpuhw->n_limited; ++i) |
735 | if (counter == cpuhw->limited_counter[i]) | |
736 | break; | |
737 | if (i < cpuhw->n_limited) { | |
738 | while (++i < cpuhw->n_limited) { | |
739 | cpuhw->limited_counter[i-1] = cpuhw->limited_counter[i]; | |
740 | cpuhw->limited_hwidx[i-1] = cpuhw->limited_hwidx[i]; | |
741 | } | |
742 | --cpuhw->n_limited; | |
743 | } | |
4574910e PM |
744 | if (cpuhw->n_counters == 0) { |
745 | /* disable exceptions if no counters are running */ | |
746 | cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE); | |
747 | } | |
748 | ||
9e35ad38 | 749 | perf_enable(); |
4574910e PM |
750 | local_irq_restore(flags); |
751 | } | |
752 | ||
8a7b8cb9 PM |
753 | /* |
754 | * Re-enable interrupts on a counter after they were throttled | |
755 | * because they were coming too fast. | |
756 | */ | |
757 | static void power_pmu_unthrottle(struct perf_counter *counter) | |
758 | { | |
759 | s64 val, left; | |
760 | unsigned long flags; | |
761 | ||
b23f3325 | 762 | if (!counter->hw.idx || !counter->hw.sample_period) |
8a7b8cb9 PM |
763 | return; |
764 | local_irq_save(flags); | |
765 | perf_disable(); | |
766 | power_pmu_read(counter); | |
b23f3325 | 767 | left = counter->hw.sample_period; |
9e350de3 | 768 | counter->hw.last_period = left; |
8a7b8cb9 PM |
769 | val = 0; |
770 | if (left < 0x80000000L) | |
771 | val = 0x80000000L - left; | |
772 | write_pmc(counter->hw.idx, val); | |
773 | atomic64_set(&counter->hw.prev_count, val); | |
774 | atomic64_set(&counter->hw.period_left, left); | |
775 | perf_counter_update_userpage(counter); | |
776 | perf_enable(); | |
777 | local_irq_restore(flags); | |
778 | } | |
779 | ||
4aeb0b42 RR |
780 | struct pmu power_pmu = { |
781 | .enable = power_pmu_enable, | |
782 | .disable = power_pmu_disable, | |
783 | .read = power_pmu_read, | |
8a7b8cb9 | 784 | .unthrottle = power_pmu_unthrottle, |
4574910e PM |
785 | }; |
786 | ||
ab7ef2e5 PM |
787 | /* |
788 | * Return 1 if we might be able to put counter on a limited PMC, | |
789 | * or 0 if not. | |
790 | * A counter can only go on a limited PMC if it counts something | |
791 | * that a limited PMC can count, doesn't require interrupts, and | |
792 | * doesn't exclude any processor mode. | |
793 | */ | |
ef923214 | 794 | static int can_go_on_limited_pmc(struct perf_counter *counter, u64 ev, |
ab7ef2e5 PM |
795 | unsigned int flags) |
796 | { | |
797 | int n; | |
ef923214 | 798 | u64 alt[MAX_EVENT_ALTERNATIVES]; |
ab7ef2e5 | 799 | |
0d48696f PZ |
800 | if (counter->attr.exclude_user |
801 | || counter->attr.exclude_kernel | |
802 | || counter->attr.exclude_hv | |
803 | || counter->attr.sample_period) | |
ab7ef2e5 PM |
804 | return 0; |
805 | ||
806 | if (ppmu->limited_pmc_event(ev)) | |
807 | return 1; | |
808 | ||
809 | /* | |
810 | * The requested event isn't on a limited PMC already; | |
811 | * see if any alternative code goes on a limited PMC. | |
812 | */ | |
813 | if (!ppmu->get_alternatives) | |
814 | return 0; | |
815 | ||
816 | flags |= PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD; | |
817 | n = ppmu->get_alternatives(ev, flags, alt); | |
ab7ef2e5 | 818 | |
ef923214 | 819 | return n > 0; |
ab7ef2e5 PM |
820 | } |
821 | ||
822 | /* | |
823 | * Find an alternative event that goes on a normal PMC, if possible, | |
824 | * and return the event code, or 0 if there is no such alternative. | |
825 | * (Note: event code 0 is "don't count" on all machines.) | |
826 | */ | |
ef923214 | 827 | static u64 normal_pmc_alternative(u64 ev, unsigned long flags) |
ab7ef2e5 | 828 | { |
ef923214 | 829 | u64 alt[MAX_EVENT_ALTERNATIVES]; |
ab7ef2e5 PM |
830 | int n; |
831 | ||
832 | flags &= ~(PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD); | |
833 | n = ppmu->get_alternatives(ev, flags, alt); | |
834 | if (!n) | |
835 | return 0; | |
836 | return alt[0]; | |
837 | } | |
838 | ||
7595d63b PM |
839 | /* Number of perf_counters counting hardware events */ |
840 | static atomic_t num_counters; | |
841 | /* Used to avoid races in calling reserve/release_pmc_hardware */ | |
842 | static DEFINE_MUTEX(pmc_reserve_mutex); | |
843 | ||
844 | /* | |
845 | * Release the PMU if this is the last perf_counter. | |
846 | */ | |
847 | static void hw_perf_counter_destroy(struct perf_counter *counter) | |
848 | { | |
849 | if (!atomic_add_unless(&num_counters, -1, 1)) { | |
850 | mutex_lock(&pmc_reserve_mutex); | |
851 | if (atomic_dec_return(&num_counters) == 0) | |
852 | release_pmc_hardware(); | |
853 | mutex_unlock(&pmc_reserve_mutex); | |
854 | } | |
855 | } | |
856 | ||
106b506c PM |
857 | /* |
858 | * Translate a generic cache event config to a raw event code. | |
859 | */ | |
860 | static int hw_perf_cache_event(u64 config, u64 *eventp) | |
861 | { | |
862 | unsigned long type, op, result; | |
863 | int ev; | |
864 | ||
865 | if (!ppmu->cache_events) | |
866 | return -EINVAL; | |
867 | ||
868 | /* unpack config */ | |
869 | type = config & 0xff; | |
870 | op = (config >> 8) & 0xff; | |
871 | result = (config >> 16) & 0xff; | |
872 | ||
873 | if (type >= PERF_COUNT_HW_CACHE_MAX || | |
874 | op >= PERF_COUNT_HW_CACHE_OP_MAX || | |
875 | result >= PERF_COUNT_HW_CACHE_RESULT_MAX) | |
876 | return -EINVAL; | |
877 | ||
878 | ev = (*ppmu->cache_events)[type][op][result]; | |
879 | if (ev == 0) | |
880 | return -EOPNOTSUPP; | |
881 | if (ev == -1) | |
882 | return -EINVAL; | |
883 | *eventp = ev; | |
884 | return 0; | |
885 | } | |
886 | ||
4aeb0b42 | 887 | const struct pmu *hw_perf_counter_init(struct perf_counter *counter) |
4574910e | 888 | { |
ef923214 PM |
889 | u64 ev; |
890 | unsigned long flags; | |
4574910e | 891 | struct perf_counter *ctrs[MAX_HWCOUNTERS]; |
ef923214 | 892 | u64 events[MAX_HWCOUNTERS]; |
ab7ef2e5 | 893 | unsigned int cflags[MAX_HWCOUNTERS]; |
4574910e | 894 | int n; |
7595d63b | 895 | int err; |
4574910e PM |
896 | |
897 | if (!ppmu) | |
d5d2bc0d | 898 | return ERR_PTR(-ENXIO); |
106b506c PM |
899 | switch (counter->attr.type) { |
900 | case PERF_TYPE_HARDWARE: | |
a21ca2ca | 901 | ev = counter->attr.config; |
9aaa131a | 902 | if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0) |
d5d2bc0d | 903 | return ERR_PTR(-EOPNOTSUPP); |
4574910e | 904 | ev = ppmu->generic_events[ev]; |
106b506c PM |
905 | break; |
906 | case PERF_TYPE_HW_CACHE: | |
907 | err = hw_perf_cache_event(counter->attr.config, &ev); | |
908 | if (err) | |
909 | return ERR_PTR(err); | |
910 | break; | |
911 | case PERF_TYPE_RAW: | |
a21ca2ca | 912 | ev = counter->attr.config; |
106b506c | 913 | break; |
90c8f954 PM |
914 | default: |
915 | return ERR_PTR(-EINVAL); | |
4574910e PM |
916 | } |
917 | counter->hw.config_base = ev; | |
918 | counter->hw.idx = 0; | |
919 | ||
0475f9ea PM |
920 | /* |
921 | * If we are not running on a hypervisor, force the | |
922 | * exclude_hv bit to 0 so that we don't care what | |
d095cd46 | 923 | * the user set it to. |
0475f9ea PM |
924 | */ |
925 | if (!firmware_has_feature(FW_FEATURE_LPAR)) | |
0d48696f | 926 | counter->attr.exclude_hv = 0; |
ab7ef2e5 PM |
927 | |
928 | /* | |
929 | * If this is a per-task counter, then we can use | |
930 | * PM_RUN_* events interchangeably with their non RUN_* | |
931 | * equivalents, e.g. PM_RUN_CYC instead of PM_CYC. | |
932 | * XXX we should check if the task is an idle task. | |
933 | */ | |
934 | flags = 0; | |
935 | if (counter->ctx->task) | |
936 | flags |= PPMU_ONLY_COUNT_RUN; | |
937 | ||
938 | /* | |
939 | * If this machine has limited counters, check whether this | |
940 | * event could go on a limited counter. | |
941 | */ | |
0bbd0d4b | 942 | if (ppmu->flags & PPMU_LIMITED_PMC5_6) { |
ab7ef2e5 PM |
943 | if (can_go_on_limited_pmc(counter, ev, flags)) { |
944 | flags |= PPMU_LIMITED_PMC_OK; | |
945 | } else if (ppmu->limited_pmc_event(ev)) { | |
946 | /* | |
947 | * The requested event is on a limited PMC, | |
948 | * but we can't use a limited PMC; see if any | |
949 | * alternative goes on a normal PMC. | |
950 | */ | |
951 | ev = normal_pmc_alternative(ev, flags); | |
952 | if (!ev) | |
953 | return ERR_PTR(-EINVAL); | |
954 | } | |
955 | } | |
956 | ||
4574910e PM |
957 | /* |
958 | * If this is in a group, check if it can go on with all the | |
959 | * other hardware counters in the group. We assume the counter | |
960 | * hasn't been linked into its leader's sibling list at this point. | |
961 | */ | |
962 | n = 0; | |
963 | if (counter->group_leader != counter) { | |
964 | n = collect_events(counter->group_leader, ppmu->n_counter - 1, | |
ab7ef2e5 | 965 | ctrs, events, cflags); |
4574910e | 966 | if (n < 0) |
d5d2bc0d | 967 | return ERR_PTR(-EINVAL); |
4574910e | 968 | } |
0475f9ea | 969 | events[n] = ev; |
86028598 | 970 | ctrs[n] = counter; |
ab7ef2e5 PM |
971 | cflags[n] = flags; |
972 | if (check_excludes(ctrs, cflags, n, 1)) | |
d5d2bc0d | 973 | return ERR_PTR(-EINVAL); |
ab7ef2e5 | 974 | if (power_check_constraints(events, cflags, n + 1)) |
d5d2bc0d | 975 | return ERR_PTR(-EINVAL); |
4574910e | 976 | |
0475f9ea | 977 | counter->hw.config = events[n]; |
ab7ef2e5 | 978 | counter->hw.counter_base = cflags[n]; |
9e350de3 PZ |
979 | counter->hw.last_period = counter->hw.sample_period; |
980 | atomic64_set(&counter->hw.period_left, counter->hw.last_period); | |
7595d63b PM |
981 | |
982 | /* | |
983 | * See if we need to reserve the PMU. | |
984 | * If no counters are currently in use, then we have to take a | |
985 | * mutex to ensure that we don't race with another task doing | |
986 | * reserve_pmc_hardware or release_pmc_hardware. | |
987 | */ | |
988 | err = 0; | |
989 | if (!atomic_inc_not_zero(&num_counters)) { | |
990 | mutex_lock(&pmc_reserve_mutex); | |
991 | if (atomic_read(&num_counters) == 0 && | |
992 | reserve_pmc_hardware(perf_counter_interrupt)) | |
993 | err = -EBUSY; | |
994 | else | |
995 | atomic_inc(&num_counters); | |
996 | mutex_unlock(&pmc_reserve_mutex); | |
997 | } | |
998 | counter->destroy = hw_perf_counter_destroy; | |
999 | ||
1000 | if (err) | |
d5d2bc0d | 1001 | return ERR_PTR(err); |
4aeb0b42 | 1002 | return &power_pmu; |
4574910e PM |
1003 | } |
1004 | ||
4574910e PM |
1005 | /* |
1006 | * A counter has overflowed; update its count and record | |
1007 | * things if requested. Note that interrupts are hard-disabled | |
1008 | * here so there is no possibility of being interrupted. | |
1009 | */ | |
1010 | static void record_and_restart(struct perf_counter *counter, long val, | |
ca8f2d7f | 1011 | struct pt_regs *regs, int nmi) |
4574910e | 1012 | { |
b23f3325 | 1013 | u64 period = counter->hw.sample_period; |
448d64f8 | 1014 | unsigned long mmcra, sdsync; |
4574910e PM |
1015 | s64 prev, delta, left; |
1016 | int record = 0; | |
1017 | ||
1018 | /* we don't have to worry about interrupts here */ | |
1019 | prev = atomic64_read(&counter->hw.prev_count); | |
1020 | delta = (val - prev) & 0xfffffffful; | |
1021 | atomic64_add(delta, &counter->count); | |
1022 | ||
1023 | /* | |
1024 | * See if the total period for this counter has expired, | |
1025 | * and update for the next period. | |
1026 | */ | |
1027 | val = 0; | |
1028 | left = atomic64_read(&counter->hw.period_left) - delta; | |
60db5e09 | 1029 | if (period) { |
4574910e | 1030 | if (left <= 0) { |
60db5e09 | 1031 | left += period; |
4574910e | 1032 | if (left <= 0) |
60db5e09 | 1033 | left = period; |
4574910e PM |
1034 | record = 1; |
1035 | } | |
1036 | if (left < 0x80000000L) | |
1037 | val = 0x80000000L - left; | |
1038 | } | |
4574910e PM |
1039 | |
1040 | /* | |
1041 | * Finally record data if requested. | |
1042 | */ | |
0bbd0d4b | 1043 | if (record) { |
df1a132b | 1044 | struct perf_sample_data data = { |
9e350de3 PZ |
1045 | .regs = regs, |
1046 | .addr = 0, | |
1047 | .period = counter->hw.last_period, | |
df1a132b PZ |
1048 | }; |
1049 | ||
1b58c251 | 1050 | if (counter->attr.sample_type & PERF_SAMPLE_ADDR) { |
0bbd0d4b PM |
1051 | /* |
1052 | * The user wants a data address recorded. | |
1053 | * If we're not doing instruction sampling, | |
1054 | * give them the SDAR (sampled data address). | |
1055 | * If we are doing instruction sampling, then only | |
1056 | * give them the SDAR if it corresponds to the | |
1057 | * instruction pointed to by SIAR; this is indicated | |
1058 | * by the [POWER6_]MMCRA_SDSYNC bit in MMCRA. | |
1059 | */ | |
1060 | mmcra = regs->dsisr; | |
1061 | sdsync = (ppmu->flags & PPMU_ALT_SIPR) ? | |
1062 | POWER6_MMCRA_SDSYNC : MMCRA_SDSYNC; | |
1063 | if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync)) | |
df1a132b | 1064 | data.addr = mfspr(SPRN_SDAR); |
0bbd0d4b | 1065 | } |
df1a132b | 1066 | if (perf_counter_overflow(counter, nmi, &data)) { |
8a7b8cb9 PM |
1067 | /* |
1068 | * Interrupts are coming too fast - throttle them | |
1069 | * by setting the counter to 0, so it will be | |
1070 | * at least 2^30 cycles until the next interrupt | |
1071 | * (assuming each counter counts at most 2 counts | |
1072 | * per cycle). | |
1073 | */ | |
1074 | val = 0; | |
1075 | left = ~0ULL >> 1; | |
1076 | } | |
0bbd0d4b | 1077 | } |
8a7b8cb9 PM |
1078 | |
1079 | write_pmc(counter->hw.idx, val); | |
1080 | atomic64_set(&counter->hw.prev_count, val); | |
1081 | atomic64_set(&counter->hw.period_left, left); | |
1082 | perf_counter_update_userpage(counter); | |
0bbd0d4b PM |
1083 | } |
1084 | ||
1085 | /* | |
1086 | * Called from generic code to get the misc flags (i.e. processor mode) | |
1087 | * for an event. | |
1088 | */ | |
1089 | unsigned long perf_misc_flags(struct pt_regs *regs) | |
1090 | { | |
1091 | unsigned long mmcra; | |
1092 | ||
1093 | if (TRAP(regs) != 0xf00) { | |
1094 | /* not a PMU interrupt */ | |
1095 | return user_mode(regs) ? PERF_EVENT_MISC_USER : | |
1096 | PERF_EVENT_MISC_KERNEL; | |
1097 | } | |
1098 | ||
1099 | mmcra = regs->dsisr; | |
1100 | if (ppmu->flags & PPMU_ALT_SIPR) { | |
1101 | if (mmcra & POWER6_MMCRA_SIHV) | |
1102 | return PERF_EVENT_MISC_HYPERVISOR; | |
1103 | return (mmcra & POWER6_MMCRA_SIPR) ? PERF_EVENT_MISC_USER : | |
1104 | PERF_EVENT_MISC_KERNEL; | |
1105 | } | |
1106 | if (mmcra & MMCRA_SIHV) | |
1107 | return PERF_EVENT_MISC_HYPERVISOR; | |
1108 | return (mmcra & MMCRA_SIPR) ? PERF_EVENT_MISC_USER : | |
1109 | PERF_EVENT_MISC_KERNEL; | |
1110 | } | |
1111 | ||
1112 | /* | |
1113 | * Called from generic code to get the instruction pointer | |
1114 | * for an event. | |
1115 | */ | |
1116 | unsigned long perf_instruction_pointer(struct pt_regs *regs) | |
1117 | { | |
1118 | unsigned long mmcra; | |
1119 | unsigned long ip; | |
1120 | unsigned long slot; | |
1121 | ||
1122 | if (TRAP(regs) != 0xf00) | |
1123 | return regs->nip; /* not a PMU interrupt */ | |
1124 | ||
1125 | ip = mfspr(SPRN_SIAR); | |
1126 | mmcra = regs->dsisr; | |
1127 | if ((mmcra & MMCRA_SAMPLE_ENABLE) && !(ppmu->flags & PPMU_ALT_SIPR)) { | |
1128 | slot = (mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT; | |
1129 | if (slot > 1) | |
1130 | ip += 4 * (slot - 1); | |
1131 | } | |
1132 | return ip; | |
4574910e PM |
1133 | } |
1134 | ||
1135 | /* | |
1136 | * Performance monitor interrupt stuff | |
1137 | */ | |
1138 | static void perf_counter_interrupt(struct pt_regs *regs) | |
1139 | { | |
1140 | int i; | |
1141 | struct cpu_hw_counters *cpuhw = &__get_cpu_var(cpu_hw_counters); | |
1142 | struct perf_counter *counter; | |
1143 | long val; | |
925d519a | 1144 | int found = 0; |
ca8f2d7f PM |
1145 | int nmi; |
1146 | ||
ab7ef2e5 PM |
1147 | if (cpuhw->n_limited) |
1148 | freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5), | |
1149 | mfspr(SPRN_PMC6)); | |
1150 | ||
0bbd0d4b PM |
1151 | /* |
1152 | * Overload regs->dsisr to store MMCRA so we only need to read it once. | |
1153 | */ | |
1154 | regs->dsisr = mfspr(SPRN_MMCRA); | |
1155 | ||
ca8f2d7f PM |
1156 | /* |
1157 | * If interrupts were soft-disabled when this PMU interrupt | |
1158 | * occurred, treat it as an NMI. | |
1159 | */ | |
1160 | nmi = !regs->softe; | |
1161 | if (nmi) | |
1162 | nmi_enter(); | |
1163 | else | |
1164 | irq_enter(); | |
4574910e PM |
1165 | |
1166 | for (i = 0; i < cpuhw->n_counters; ++i) { | |
1167 | counter = cpuhw->counter[i]; | |
dcd945e0 | 1168 | if (!counter->hw.idx || is_limited_pmc(counter->hw.idx)) |
ab7ef2e5 | 1169 | continue; |
4574910e PM |
1170 | val = read_pmc(counter->hw.idx); |
1171 | if ((int)val < 0) { | |
1172 | /* counter has overflowed */ | |
1173 | found = 1; | |
ca8f2d7f | 1174 | record_and_restart(counter, val, regs, nmi); |
4574910e PM |
1175 | } |
1176 | } | |
1177 | ||
1178 | /* | |
1179 | * In case we didn't find and reset the counter that caused | |
1180 | * the interrupt, scan all counters and reset any that are | |
1181 | * negative, to avoid getting continual interrupts. | |
1182 | * Any that we processed in the previous loop will not be negative. | |
1183 | */ | |
1184 | if (!found) { | |
1185 | for (i = 0; i < ppmu->n_counter; ++i) { | |
ab7ef2e5 PM |
1186 | if (is_limited_pmc(i + 1)) |
1187 | continue; | |
4574910e PM |
1188 | val = read_pmc(i + 1); |
1189 | if ((int)val < 0) | |
1190 | write_pmc(i + 1, 0); | |
1191 | } | |
1192 | } | |
1193 | ||
1194 | /* | |
1195 | * Reset MMCR0 to its normal value. This will set PMXE and | |
1196 | * clear FC (freeze counters) and PMAO (perf mon alert occurred) | |
1197 | * and thus allow interrupts to occur again. | |
1198 | * XXX might want to use MSR.PM to keep the counters frozen until | |
1199 | * we get back out of this interrupt. | |
1200 | */ | |
ab7ef2e5 | 1201 | write_mmcr0(cpuhw, cpuhw->mmcr[0]); |
4574910e | 1202 | |
ca8f2d7f PM |
1203 | if (nmi) |
1204 | nmi_exit(); | |
1205 | else | |
db4fb5ac | 1206 | irq_exit(); |
4574910e PM |
1207 | } |
1208 | ||
01d0287f PM |
1209 | void hw_perf_counter_setup(int cpu) |
1210 | { | |
1211 | struct cpu_hw_counters *cpuhw = &per_cpu(cpu_hw_counters, cpu); | |
1212 | ||
1213 | memset(cpuhw, 0, sizeof(*cpuhw)); | |
1214 | cpuhw->mmcr[0] = MMCR0_FC; | |
1215 | } | |
1216 | ||
079b3c56 | 1217 | int register_power_pmu(struct power_pmu *pmu) |
4574910e | 1218 | { |
079b3c56 PM |
1219 | if (ppmu) |
1220 | return -EBUSY; /* something's already registered */ | |
1221 | ||
1222 | ppmu = pmu; | |
1223 | pr_info("%s performance monitor hardware support registered\n", | |
1224 | pmu->name); | |
d095cd46 PM |
1225 | |
1226 | /* | |
1227 | * Use FCHV to ignore kernel events if MSR.HV is set. | |
1228 | */ | |
1229 | if (mfmsr() & MSR_HV) | |
1230 | freeze_counters_kernel = MMCR0_FCHV; | |
1231 | ||
4574910e PM |
1232 | return 0; |
1233 | } |