powerpc/85xx: Fix SMP compile error and allow NULL for smp_ops
[deliverable/linux.git] / arch / powerpc / kernel / perf_counter.c
CommitLineData
4574910e
PM
1/*
2 * Performance counter support - powerpc architecture code
3 *
4 * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11#include <linux/kernel.h>
12#include <linux/sched.h>
13#include <linux/perf_counter.h>
14#include <linux/percpu.h>
15#include <linux/hardirq.h>
16#include <asm/reg.h>
17#include <asm/pmc.h>
01d0287f 18#include <asm/machdep.h>
0475f9ea 19#include <asm/firmware.h>
0bbd0d4b 20#include <asm/ptrace.h>
4574910e
PM
21
22struct cpu_hw_counters {
23 int n_counters;
24 int n_percpu;
25 int disabled;
26 int n_added;
ab7ef2e5
PM
27 int n_limited;
28 u8 pmcs_enabled;
4574910e 29 struct perf_counter *counter[MAX_HWCOUNTERS];
ef923214 30 u64 events[MAX_HWCOUNTERS];
ab7ef2e5 31 unsigned int flags[MAX_HWCOUNTERS];
448d64f8 32 unsigned long mmcr[3];
ab7ef2e5
PM
33 struct perf_counter *limited_counter[MAX_LIMITED_HWCOUNTERS];
34 u8 limited_hwidx[MAX_LIMITED_HWCOUNTERS];
4574910e
PM
35};
36DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters);
37
38struct power_pmu *ppmu;
39
d095cd46
PM
40/*
41 * Normally, to ignore kernel events we set the FCS (freeze counters
42 * in supervisor mode) bit in MMCR0, but if the kernel runs with the
43 * hypervisor bit set in the MSR, or if we are running on a processor
44 * where the hypervisor bit is forced to 1 (as on Apple G5 processors),
45 * then we need to use the FCHV bit to ignore kernel events.
46 */
47static unsigned int freeze_counters_kernel = MMCR0_FCS;
48
98fb1807
PM
49/*
50 * 32-bit doesn't have MMCRA but does have an MMCR2,
51 * and a few other names are different.
52 */
53#ifdef CONFIG_PPC32
54
55#define MMCR0_FCHV 0
56#define MMCR0_PMCjCE MMCR0_PMCnCE
57
58#define SPRN_MMCRA SPRN_MMCR2
59#define MMCRA_SAMPLE_ENABLE 0
60
61static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
62{
63 return 0;
64}
65static inline void perf_set_pmu_inuse(int inuse) { }
66static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) { }
67static inline u32 perf_get_misc_flags(struct pt_regs *regs)
68{
69 return 0;
70}
71static inline void perf_read_regs(struct pt_regs *regs) { }
72static inline int perf_intr_is_nmi(struct pt_regs *regs)
73{
74 return 0;
75}
76
77#endif /* CONFIG_PPC32 */
78
79/*
80 * Things that are specific to 64-bit implementations.
81 */
82#ifdef CONFIG_PPC64
83
84static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
85{
86 unsigned long mmcra = regs->dsisr;
87
88 if ((mmcra & MMCRA_SAMPLE_ENABLE) && !(ppmu->flags & PPMU_ALT_SIPR)) {
89 unsigned long slot = (mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT;
90 if (slot > 1)
91 return 4 * (slot - 1);
92 }
93 return 0;
94}
95
96static inline void perf_set_pmu_inuse(int inuse)
97{
98 get_lppaca()->pmcregs_in_use = inuse;
99}
100
101/*
102 * The user wants a data address recorded.
103 * If we're not doing instruction sampling, give them the SDAR
104 * (sampled data address). If we are doing instruction sampling, then
105 * only give them the SDAR if it corresponds to the instruction
106 * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC
107 * bit in MMCRA.
108 */
109static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp)
110{
111 unsigned long mmcra = regs->dsisr;
112 unsigned long sdsync = (ppmu->flags & PPMU_ALT_SIPR) ?
113 POWER6_MMCRA_SDSYNC : MMCRA_SDSYNC;
114
115 if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync))
116 *addrp = mfspr(SPRN_SDAR);
117}
118
119static inline u32 perf_get_misc_flags(struct pt_regs *regs)
120{
121 unsigned long mmcra = regs->dsisr;
122
123 if (TRAP(regs) != 0xf00)
124 return 0; /* not a PMU interrupt */
125
126 if (ppmu->flags & PPMU_ALT_SIPR) {
127 if (mmcra & POWER6_MMCRA_SIHV)
128 return PERF_EVENT_MISC_HYPERVISOR;
129 return (mmcra & POWER6_MMCRA_SIPR) ?
130 PERF_EVENT_MISC_USER : PERF_EVENT_MISC_KERNEL;
131 }
132 if (mmcra & MMCRA_SIHV)
133 return PERF_EVENT_MISC_HYPERVISOR;
134 return (mmcra & MMCRA_SIPR) ? PERF_EVENT_MISC_USER :
135 PERF_EVENT_MISC_KERNEL;
136}
137
138/*
139 * Overload regs->dsisr to store MMCRA so we only need to read it once
140 * on each interrupt.
141 */
142static inline void perf_read_regs(struct pt_regs *regs)
143{
144 regs->dsisr = mfspr(SPRN_MMCRA);
145}
146
147/*
148 * If interrupts were soft-disabled when a PMU interrupt occurs, treat
149 * it as an NMI.
150 */
151static inline int perf_intr_is_nmi(struct pt_regs *regs)
152{
153 return !regs->softe;
154}
155
156#endif /* CONFIG_PPC64 */
157
7595d63b
PM
158static void perf_counter_interrupt(struct pt_regs *regs);
159
4574910e
PM
160void perf_counter_print_debug(void)
161{
162}
163
4574910e
PM
164/*
165 * Read one performance monitor counter (PMC).
166 */
167static unsigned long read_pmc(int idx)
168{
169 unsigned long val;
170
171 switch (idx) {
172 case 1:
173 val = mfspr(SPRN_PMC1);
174 break;
175 case 2:
176 val = mfspr(SPRN_PMC2);
177 break;
178 case 3:
179 val = mfspr(SPRN_PMC3);
180 break;
181 case 4:
182 val = mfspr(SPRN_PMC4);
183 break;
184 case 5:
185 val = mfspr(SPRN_PMC5);
186 break;
187 case 6:
188 val = mfspr(SPRN_PMC6);
189 break;
98fb1807 190#ifdef CONFIG_PPC64
4574910e
PM
191 case 7:
192 val = mfspr(SPRN_PMC7);
193 break;
194 case 8:
195 val = mfspr(SPRN_PMC8);
196 break;
98fb1807 197#endif /* CONFIG_PPC64 */
4574910e
PM
198 default:
199 printk(KERN_ERR "oops trying to read PMC%d\n", idx);
200 val = 0;
201 }
202 return val;
203}
204
205/*
206 * Write one PMC.
207 */
208static void write_pmc(int idx, unsigned long val)
209{
210 switch (idx) {
211 case 1:
212 mtspr(SPRN_PMC1, val);
213 break;
214 case 2:
215 mtspr(SPRN_PMC2, val);
216 break;
217 case 3:
218 mtspr(SPRN_PMC3, val);
219 break;
220 case 4:
221 mtspr(SPRN_PMC4, val);
222 break;
223 case 5:
224 mtspr(SPRN_PMC5, val);
225 break;
226 case 6:
227 mtspr(SPRN_PMC6, val);
228 break;
98fb1807 229#ifdef CONFIG_PPC64
4574910e
PM
230 case 7:
231 mtspr(SPRN_PMC7, val);
232 break;
233 case 8:
234 mtspr(SPRN_PMC8, val);
235 break;
98fb1807 236#endif /* CONFIG_PPC64 */
4574910e
PM
237 default:
238 printk(KERN_ERR "oops trying to write PMC%d\n", idx);
239 }
240}
241
242/*
243 * Check if a set of events can all go on the PMU at once.
244 * If they can't, this will look at alternative codes for the events
245 * and see if any combination of alternative codes is feasible.
246 * The feasible set is returned in event[].
247 */
ef923214 248static int power_check_constraints(u64 event[], unsigned int cflags[],
ab7ef2e5 249 int n_ev)
4574910e 250{
448d64f8 251 unsigned long mask, value, nv;
ef923214 252 u64 alternatives[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
448d64f8
PM
253 unsigned long amasks[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
254 unsigned long avalues[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
255 unsigned long smasks[MAX_HWCOUNTERS], svalues[MAX_HWCOUNTERS];
4574910e
PM
256 int n_alt[MAX_HWCOUNTERS], choice[MAX_HWCOUNTERS];
257 int i, j;
448d64f8
PM
258 unsigned long addf = ppmu->add_fields;
259 unsigned long tadd = ppmu->test_adder;
4574910e
PM
260
261 if (n_ev > ppmu->n_counter)
262 return -1;
263
264 /* First see if the events will go on as-is */
265 for (i = 0; i < n_ev; ++i) {
ab7ef2e5
PM
266 if ((cflags[i] & PPMU_LIMITED_PMC_REQD)
267 && !ppmu->limited_pmc_event(event[i])) {
268 ppmu->get_alternatives(event[i], cflags[i],
269 alternatives[i]);
270 event[i] = alternatives[i][0];
271 }
4574910e
PM
272 if (ppmu->get_constraint(event[i], &amasks[i][0],
273 &avalues[i][0]))
274 return -1;
4574910e
PM
275 }
276 value = mask = 0;
277 for (i = 0; i < n_ev; ++i) {
278 nv = (value | avalues[i][0]) + (value & avalues[i][0] & addf);
279 if ((((nv + tadd) ^ value) & mask) != 0 ||
280 (((nv + tadd) ^ avalues[i][0]) & amasks[i][0]) != 0)
281 break;
282 value = nv;
283 mask |= amasks[i][0];
284 }
285 if (i == n_ev)
286 return 0; /* all OK */
287
288 /* doesn't work, gather alternatives... */
289 if (!ppmu->get_alternatives)
290 return -1;
291 for (i = 0; i < n_ev; ++i) {
ab7ef2e5
PM
292 choice[i] = 0;
293 n_alt[i] = ppmu->get_alternatives(event[i], cflags[i],
294 alternatives[i]);
4574910e
PM
295 for (j = 1; j < n_alt[i]; ++j)
296 ppmu->get_constraint(alternatives[i][j],
297 &amasks[i][j], &avalues[i][j]);
298 }
299
300 /* enumerate all possibilities and see if any will work */
301 i = 0;
302 j = -1;
303 value = mask = nv = 0;
304 while (i < n_ev) {
305 if (j >= 0) {
306 /* we're backtracking, restore context */
307 value = svalues[i];
308 mask = smasks[i];
309 j = choice[i];
310 }
311 /*
312 * See if any alternative k for event i,
313 * where k > j, will satisfy the constraints.
314 */
315 while (++j < n_alt[i]) {
316 nv = (value | avalues[i][j]) +
317 (value & avalues[i][j] & addf);
318 if ((((nv + tadd) ^ value) & mask) == 0 &&
319 (((nv + tadd) ^ avalues[i][j])
320 & amasks[i][j]) == 0)
321 break;
322 }
323 if (j >= n_alt[i]) {
324 /*
325 * No feasible alternative, backtrack
326 * to event i-1 and continue enumerating its
327 * alternatives from where we got up to.
328 */
329 if (--i < 0)
330 return -1;
331 } else {
332 /*
333 * Found a feasible alternative for event i,
334 * remember where we got up to with this event,
335 * go on to the next event, and start with
336 * the first alternative for it.
337 */
338 choice[i] = j;
339 svalues[i] = value;
340 smasks[i] = mask;
341 value = nv;
342 mask |= amasks[i][j];
343 ++i;
344 j = -1;
345 }
346 }
347
348 /* OK, we have a feasible combination, tell the caller the solution */
349 for (i = 0; i < n_ev; ++i)
350 event[i] = alternatives[i][choice[i]];
351 return 0;
352}
353
0475f9ea
PM
354/*
355 * Check if newly-added counters have consistent settings for
356 * exclude_{user,kernel,hv} with each other and any previously
357 * added counters.
358 */
ab7ef2e5
PM
359static int check_excludes(struct perf_counter **ctrs, unsigned int cflags[],
360 int n_prev, int n_new)
0475f9ea 361{
ab7ef2e5
PM
362 int eu = 0, ek = 0, eh = 0;
363 int i, n, first;
0475f9ea
PM
364 struct perf_counter *counter;
365
366 n = n_prev + n_new;
367 if (n <= 1)
368 return 0;
369
ab7ef2e5
PM
370 first = 1;
371 for (i = 0; i < n; ++i) {
372 if (cflags[i] & PPMU_LIMITED_PMC_OK) {
373 cflags[i] &= ~PPMU_LIMITED_PMC_REQD;
374 continue;
375 }
0475f9ea 376 counter = ctrs[i];
ab7ef2e5 377 if (first) {
0d48696f
PZ
378 eu = counter->attr.exclude_user;
379 ek = counter->attr.exclude_kernel;
380 eh = counter->attr.exclude_hv;
ab7ef2e5 381 first = 0;
0d48696f
PZ
382 } else if (counter->attr.exclude_user != eu ||
383 counter->attr.exclude_kernel != ek ||
384 counter->attr.exclude_hv != eh) {
0475f9ea 385 return -EAGAIN;
ab7ef2e5 386 }
0475f9ea 387 }
ab7ef2e5
PM
388
389 if (eu || ek || eh)
390 for (i = 0; i < n; ++i)
391 if (cflags[i] & PPMU_LIMITED_PMC_OK)
392 cflags[i] |= PPMU_LIMITED_PMC_REQD;
393
0475f9ea
PM
394 return 0;
395}
396
4aeb0b42 397static void power_pmu_read(struct perf_counter *counter)
4574910e 398{
98fb1807 399 s64 val, delta, prev;
4574910e
PM
400
401 if (!counter->hw.idx)
402 return;
403 /*
404 * Performance monitor interrupts come even when interrupts
405 * are soft-disabled, as long as interrupts are hard-enabled.
406 * Therefore we treat them like NMIs.
407 */
408 do {
409 prev = atomic64_read(&counter->hw.prev_count);
410 barrier();
411 val = read_pmc(counter->hw.idx);
412 } while (atomic64_cmpxchg(&counter->hw.prev_count, prev, val) != prev);
413
414 /* The counters are only 32 bits wide */
415 delta = (val - prev) & 0xfffffffful;
416 atomic64_add(delta, &counter->count);
417 atomic64_sub(delta, &counter->hw.period_left);
418}
419
ab7ef2e5
PM
420/*
421 * On some machines, PMC5 and PMC6 can't be written, don't respect
422 * the freeze conditions, and don't generate interrupts. This tells
423 * us if `counter' is using such a PMC.
424 */
425static int is_limited_pmc(int pmcnum)
426{
0bbd0d4b
PM
427 return (ppmu->flags & PPMU_LIMITED_PMC5_6)
428 && (pmcnum == 5 || pmcnum == 6);
ab7ef2e5
PM
429}
430
431static void freeze_limited_counters(struct cpu_hw_counters *cpuhw,
432 unsigned long pmc5, unsigned long pmc6)
433{
434 struct perf_counter *counter;
435 u64 val, prev, delta;
436 int i;
437
438 for (i = 0; i < cpuhw->n_limited; ++i) {
439 counter = cpuhw->limited_counter[i];
440 if (!counter->hw.idx)
441 continue;
442 val = (counter->hw.idx == 5) ? pmc5 : pmc6;
443 prev = atomic64_read(&counter->hw.prev_count);
444 counter->hw.idx = 0;
445 delta = (val - prev) & 0xfffffffful;
446 atomic64_add(delta, &counter->count);
447 }
448}
449
450static void thaw_limited_counters(struct cpu_hw_counters *cpuhw,
451 unsigned long pmc5, unsigned long pmc6)
452{
453 struct perf_counter *counter;
454 u64 val;
455 int i;
456
457 for (i = 0; i < cpuhw->n_limited; ++i) {
458 counter = cpuhw->limited_counter[i];
459 counter->hw.idx = cpuhw->limited_hwidx[i];
460 val = (counter->hw.idx == 5) ? pmc5 : pmc6;
461 atomic64_set(&counter->hw.prev_count, val);
462 perf_counter_update_userpage(counter);
463 }
464}
465
466/*
467 * Since limited counters don't respect the freeze conditions, we
468 * have to read them immediately after freezing or unfreezing the
469 * other counters. We try to keep the values from the limited
470 * counters as consistent as possible by keeping the delay (in
471 * cycles and instructions) between freezing/unfreezing and reading
472 * the limited counters as small and consistent as possible.
473 * Therefore, if any limited counters are in use, we read them
474 * both, and always in the same order, to minimize variability,
475 * and do it inside the same asm that writes MMCR0.
476 */
477static void write_mmcr0(struct cpu_hw_counters *cpuhw, unsigned long mmcr0)
478{
479 unsigned long pmc5, pmc6;
480
481 if (!cpuhw->n_limited) {
482 mtspr(SPRN_MMCR0, mmcr0);
483 return;
484 }
485
486 /*
487 * Write MMCR0, then read PMC5 and PMC6 immediately.
dcd945e0
PM
488 * To ensure we don't get a performance monitor interrupt
489 * between writing MMCR0 and freezing/thawing the limited
490 * counters, we first write MMCR0 with the counter overflow
491 * interrupt enable bits turned off.
ab7ef2e5
PM
492 */
493 asm volatile("mtspr %3,%2; mfspr %0,%4; mfspr %1,%5"
494 : "=&r" (pmc5), "=&r" (pmc6)
dcd945e0
PM
495 : "r" (mmcr0 & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)),
496 "i" (SPRN_MMCR0),
ab7ef2e5
PM
497 "i" (SPRN_PMC5), "i" (SPRN_PMC6));
498
499 if (mmcr0 & MMCR0_FC)
500 freeze_limited_counters(cpuhw, pmc5, pmc6);
501 else
502 thaw_limited_counters(cpuhw, pmc5, pmc6);
dcd945e0
PM
503
504 /*
505 * Write the full MMCR0 including the counter overflow interrupt
506 * enable bits, if necessary.
507 */
508 if (mmcr0 & (MMCR0_PMC1CE | MMCR0_PMCjCE))
509 mtspr(SPRN_MMCR0, mmcr0);
ab7ef2e5
PM
510}
511
4574910e
PM
512/*
513 * Disable all counters to prevent PMU interrupts and to allow
514 * counters to be added or removed.
515 */
9e35ad38 516void hw_perf_disable(void)
4574910e
PM
517{
518 struct cpu_hw_counters *cpuhw;
4574910e
PM
519 unsigned long flags;
520
f36a1a13
PM
521 if (!ppmu)
522 return;
4574910e
PM
523 local_irq_save(flags);
524 cpuhw = &__get_cpu_var(cpu_hw_counters);
525
448d64f8 526 if (!cpuhw->disabled) {
4574910e
PM
527 cpuhw->disabled = 1;
528 cpuhw->n_added = 0;
529
01d0287f
PM
530 /*
531 * Check if we ever enabled the PMU on this cpu.
532 */
533 if (!cpuhw->pmcs_enabled) {
534 if (ppc_md.enable_pmcs)
535 ppc_md.enable_pmcs();
536 cpuhw->pmcs_enabled = 1;
537 }
538
f708223d
PM
539 /*
540 * Disable instruction sampling if it was enabled
541 */
542 if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) {
543 mtspr(SPRN_MMCRA,
544 cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
545 mb();
546 }
547
4574910e
PM
548 /*
549 * Set the 'freeze counters' bit.
550 * The barrier is to make sure the mtspr has been
551 * executed and the PMU has frozen the counters
552 * before we return.
553 */
ab7ef2e5 554 write_mmcr0(cpuhw, mfspr(SPRN_MMCR0) | MMCR0_FC);
4574910e
PM
555 mb();
556 }
557 local_irq_restore(flags);
4574910e
PM
558}
559
560/*
561 * Re-enable all counters if disable == 0.
562 * If we were previously disabled and counters were added, then
563 * put the new config on the PMU.
564 */
9e35ad38 565void hw_perf_enable(void)
4574910e
PM
566{
567 struct perf_counter *counter;
568 struct cpu_hw_counters *cpuhw;
569 unsigned long flags;
570 long i;
571 unsigned long val;
572 s64 left;
573 unsigned int hwc_index[MAX_HWCOUNTERS];
ab7ef2e5
PM
574 int n_lim;
575 int idx;
4574910e 576
f36a1a13
PM
577 if (!ppmu)
578 return;
4574910e 579 local_irq_save(flags);
c0daaf3f 580 cpuhw = &__get_cpu_var(cpu_hw_counters);
9e35ad38
PZ
581 if (!cpuhw->disabled) {
582 local_irq_restore(flags);
583 return;
584 }
4574910e
PM
585 cpuhw->disabled = 0;
586
587 /*
588 * If we didn't change anything, or only removed counters,
589 * no need to recalculate MMCR* settings and reset the PMCs.
590 * Just reenable the PMU with the current MMCR* settings
591 * (possibly updated for removal of counters).
592 */
593 if (!cpuhw->n_added) {
f708223d 594 mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
4574910e 595 mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
01d0287f 596 if (cpuhw->n_counters == 0)
98fb1807 597 perf_set_pmu_inuse(0);
f708223d 598 goto out_enable;
4574910e
PM
599 }
600
601 /*
602 * Compute MMCR* values for the new set of counters
603 */
604 if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_counters, hwc_index,
605 cpuhw->mmcr)) {
606 /* shouldn't ever get here */
607 printk(KERN_ERR "oops compute_mmcr failed\n");
608 goto out;
609 }
610
0475f9ea
PM
611 /*
612 * Add in MMCR0 freeze bits corresponding to the
0d48696f 613 * attr.exclude_* bits for the first counter.
0475f9ea
PM
614 * We have already checked that all counters have the
615 * same values for these bits as the first counter.
616 */
617 counter = cpuhw->counter[0];
0d48696f 618 if (counter->attr.exclude_user)
0475f9ea 619 cpuhw->mmcr[0] |= MMCR0_FCP;
0d48696f 620 if (counter->attr.exclude_kernel)
d095cd46 621 cpuhw->mmcr[0] |= freeze_counters_kernel;
0d48696f 622 if (counter->attr.exclude_hv)
0475f9ea
PM
623 cpuhw->mmcr[0] |= MMCR0_FCHV;
624
4574910e
PM
625 /*
626 * Write the new configuration to MMCR* with the freeze
627 * bit set and set the hardware counters to their initial values.
628 * Then unfreeze the counters.
629 */
98fb1807 630 perf_set_pmu_inuse(1);
f708223d 631 mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
4574910e
PM
632 mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
633 mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE))
634 | MMCR0_FC);
635
636 /*
637 * Read off any pre-existing counters that need to move
638 * to another PMC.
639 */
640 for (i = 0; i < cpuhw->n_counters; ++i) {
641 counter = cpuhw->counter[i];
642 if (counter->hw.idx && counter->hw.idx != hwc_index[i] + 1) {
4aeb0b42 643 power_pmu_read(counter);
4574910e
PM
644 write_pmc(counter->hw.idx, 0);
645 counter->hw.idx = 0;
646 }
647 }
648
649 /*
650 * Initialize the PMCs for all the new and moved counters.
651 */
ab7ef2e5 652 cpuhw->n_limited = n_lim = 0;
4574910e
PM
653 for (i = 0; i < cpuhw->n_counters; ++i) {
654 counter = cpuhw->counter[i];
655 if (counter->hw.idx)
656 continue;
ab7ef2e5
PM
657 idx = hwc_index[i] + 1;
658 if (is_limited_pmc(idx)) {
659 cpuhw->limited_counter[n_lim] = counter;
660 cpuhw->limited_hwidx[n_lim] = idx;
661 ++n_lim;
662 continue;
663 }
4574910e 664 val = 0;
b23f3325 665 if (counter->hw.sample_period) {
4574910e
PM
666 left = atomic64_read(&counter->hw.period_left);
667 if (left < 0x80000000L)
668 val = 0x80000000L - left;
669 }
670 atomic64_set(&counter->hw.prev_count, val);
ab7ef2e5
PM
671 counter->hw.idx = idx;
672 write_pmc(idx, val);
7b732a75 673 perf_counter_update_userpage(counter);
4574910e 674 }
ab7ef2e5 675 cpuhw->n_limited = n_lim;
4574910e 676 cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE;
f708223d
PM
677
678 out_enable:
679 mb();
ab7ef2e5 680 write_mmcr0(cpuhw, cpuhw->mmcr[0]);
4574910e 681
f708223d
PM
682 /*
683 * Enable instruction sampling if necessary
684 */
685 if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) {
686 mb();
687 mtspr(SPRN_MMCRA, cpuhw->mmcr[2]);
688 }
689
4574910e
PM
690 out:
691 local_irq_restore(flags);
692}
693
694static int collect_events(struct perf_counter *group, int max_count,
ef923214 695 struct perf_counter *ctrs[], u64 *events,
ab7ef2e5 696 unsigned int *flags)
4574910e
PM
697{
698 int n = 0;
699 struct perf_counter *counter;
700
701 if (!is_software_counter(group)) {
702 if (n >= max_count)
703 return -1;
704 ctrs[n] = group;
ab7ef2e5 705 flags[n] = group->hw.counter_base;
4574910e
PM
706 events[n++] = group->hw.config;
707 }
708 list_for_each_entry(counter, &group->sibling_list, list_entry) {
709 if (!is_software_counter(counter) &&
710 counter->state != PERF_COUNTER_STATE_OFF) {
711 if (n >= max_count)
712 return -1;
713 ctrs[n] = counter;
ab7ef2e5 714 flags[n] = counter->hw.counter_base;
4574910e
PM
715 events[n++] = counter->hw.config;
716 }
717 }
718 return n;
719}
720
721static void counter_sched_in(struct perf_counter *counter, int cpu)
722{
723 counter->state = PERF_COUNTER_STATE_ACTIVE;
724 counter->oncpu = cpu;
dc66270b 725 counter->tstamp_running += counter->ctx->time - counter->tstamp_stopped;
4574910e 726 if (is_software_counter(counter))
4aeb0b42 727 counter->pmu->enable(counter);
4574910e
PM
728}
729
730/*
731 * Called to enable a whole group of counters.
732 * Returns 1 if the group was enabled, or -EAGAIN if it could not be.
733 * Assumes the caller has disabled interrupts and has
734 * frozen the PMU with hw_perf_save_disable.
735 */
736int hw_perf_group_sched_in(struct perf_counter *group_leader,
737 struct perf_cpu_context *cpuctx,
738 struct perf_counter_context *ctx, int cpu)
739{
740 struct cpu_hw_counters *cpuhw;
741 long i, n, n0;
742 struct perf_counter *sub;
743
f36a1a13
PM
744 if (!ppmu)
745 return 0;
4574910e
PM
746 cpuhw = &__get_cpu_var(cpu_hw_counters);
747 n0 = cpuhw->n_counters;
748 n = collect_events(group_leader, ppmu->n_counter - n0,
ab7ef2e5
PM
749 &cpuhw->counter[n0], &cpuhw->events[n0],
750 &cpuhw->flags[n0]);
4574910e
PM
751 if (n < 0)
752 return -EAGAIN;
ab7ef2e5 753 if (check_excludes(cpuhw->counter, cpuhw->flags, n0, n))
0475f9ea 754 return -EAGAIN;
ab7ef2e5
PM
755 i = power_check_constraints(cpuhw->events, cpuhw->flags, n + n0);
756 if (i < 0)
4574910e
PM
757 return -EAGAIN;
758 cpuhw->n_counters = n0 + n;
759 cpuhw->n_added += n;
760
761 /*
762 * OK, this group can go on; update counter states etc.,
763 * and enable any software counters
764 */
765 for (i = n0; i < n0 + n; ++i)
766 cpuhw->counter[i]->hw.config = cpuhw->events[i];
3b6f9e5c 767 cpuctx->active_oncpu += n;
4574910e
PM
768 n = 1;
769 counter_sched_in(group_leader, cpu);
770 list_for_each_entry(sub, &group_leader->sibling_list, list_entry) {
771 if (sub->state != PERF_COUNTER_STATE_OFF) {
772 counter_sched_in(sub, cpu);
773 ++n;
774 }
775 }
4574910e
PM
776 ctx->nr_active += n;
777
778 return 1;
779}
780
781/*
782 * Add a counter to the PMU.
783 * If all counters are not already frozen, then we disable and
9e35ad38 784 * re-enable the PMU in order to get hw_perf_enable to do the
4574910e
PM
785 * actual work of reconfiguring the PMU.
786 */
4aeb0b42 787static int power_pmu_enable(struct perf_counter *counter)
4574910e
PM
788{
789 struct cpu_hw_counters *cpuhw;
790 unsigned long flags;
4574910e
PM
791 int n0;
792 int ret = -EAGAIN;
793
794 local_irq_save(flags);
9e35ad38 795 perf_disable();
4574910e
PM
796
797 /*
798 * Add the counter to the list (if there is room)
799 * and check whether the total set is still feasible.
800 */
801 cpuhw = &__get_cpu_var(cpu_hw_counters);
802 n0 = cpuhw->n_counters;
803 if (n0 >= ppmu->n_counter)
804 goto out;
805 cpuhw->counter[n0] = counter;
806 cpuhw->events[n0] = counter->hw.config;
ab7ef2e5
PM
807 cpuhw->flags[n0] = counter->hw.counter_base;
808 if (check_excludes(cpuhw->counter, cpuhw->flags, n0, 1))
0475f9ea 809 goto out;
ab7ef2e5 810 if (power_check_constraints(cpuhw->events, cpuhw->flags, n0 + 1))
4574910e
PM
811 goto out;
812
813 counter->hw.config = cpuhw->events[n0];
814 ++cpuhw->n_counters;
815 ++cpuhw->n_added;
816
817 ret = 0;
818 out:
9e35ad38 819 perf_enable();
4574910e
PM
820 local_irq_restore(flags);
821 return ret;
822}
823
824/*
825 * Remove a counter from the PMU.
826 */
4aeb0b42 827static void power_pmu_disable(struct perf_counter *counter)
4574910e
PM
828{
829 struct cpu_hw_counters *cpuhw;
830 long i;
4574910e
PM
831 unsigned long flags;
832
833 local_irq_save(flags);
9e35ad38 834 perf_disable();
4574910e 835
4aeb0b42 836 power_pmu_read(counter);
4574910e
PM
837
838 cpuhw = &__get_cpu_var(cpu_hw_counters);
839 for (i = 0; i < cpuhw->n_counters; ++i) {
840 if (counter == cpuhw->counter[i]) {
841 while (++i < cpuhw->n_counters)
842 cpuhw->counter[i-1] = cpuhw->counter[i];
843 --cpuhw->n_counters;
844 ppmu->disable_pmc(counter->hw.idx - 1, cpuhw->mmcr);
ab7ef2e5
PM
845 if (counter->hw.idx) {
846 write_pmc(counter->hw.idx, 0);
847 counter->hw.idx = 0;
848 }
7b732a75 849 perf_counter_update_userpage(counter);
4574910e
PM
850 break;
851 }
852 }
ab7ef2e5
PM
853 for (i = 0; i < cpuhw->n_limited; ++i)
854 if (counter == cpuhw->limited_counter[i])
855 break;
856 if (i < cpuhw->n_limited) {
857 while (++i < cpuhw->n_limited) {
858 cpuhw->limited_counter[i-1] = cpuhw->limited_counter[i];
859 cpuhw->limited_hwidx[i-1] = cpuhw->limited_hwidx[i];
860 }
861 --cpuhw->n_limited;
862 }
4574910e
PM
863 if (cpuhw->n_counters == 0) {
864 /* disable exceptions if no counters are running */
865 cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE);
866 }
867
9e35ad38 868 perf_enable();
4574910e
PM
869 local_irq_restore(flags);
870}
871
8a7b8cb9
PM
872/*
873 * Re-enable interrupts on a counter after they were throttled
874 * because they were coming too fast.
875 */
876static void power_pmu_unthrottle(struct perf_counter *counter)
877{
878 s64 val, left;
879 unsigned long flags;
880
b23f3325 881 if (!counter->hw.idx || !counter->hw.sample_period)
8a7b8cb9
PM
882 return;
883 local_irq_save(flags);
884 perf_disable();
885 power_pmu_read(counter);
b23f3325 886 left = counter->hw.sample_period;
9e350de3 887 counter->hw.last_period = left;
8a7b8cb9
PM
888 val = 0;
889 if (left < 0x80000000L)
890 val = 0x80000000L - left;
891 write_pmc(counter->hw.idx, val);
892 atomic64_set(&counter->hw.prev_count, val);
893 atomic64_set(&counter->hw.period_left, left);
894 perf_counter_update_userpage(counter);
895 perf_enable();
896 local_irq_restore(flags);
897}
898
4aeb0b42
RR
899struct pmu power_pmu = {
900 .enable = power_pmu_enable,
901 .disable = power_pmu_disable,
902 .read = power_pmu_read,
8a7b8cb9 903 .unthrottle = power_pmu_unthrottle,
4574910e
PM
904};
905
ab7ef2e5
PM
906/*
907 * Return 1 if we might be able to put counter on a limited PMC,
908 * or 0 if not.
909 * A counter can only go on a limited PMC if it counts something
910 * that a limited PMC can count, doesn't require interrupts, and
911 * doesn't exclude any processor mode.
912 */
ef923214 913static int can_go_on_limited_pmc(struct perf_counter *counter, u64 ev,
ab7ef2e5
PM
914 unsigned int flags)
915{
916 int n;
ef923214 917 u64 alt[MAX_EVENT_ALTERNATIVES];
ab7ef2e5 918
0d48696f
PZ
919 if (counter->attr.exclude_user
920 || counter->attr.exclude_kernel
921 || counter->attr.exclude_hv
922 || counter->attr.sample_period)
ab7ef2e5
PM
923 return 0;
924
925 if (ppmu->limited_pmc_event(ev))
926 return 1;
927
928 /*
929 * The requested event isn't on a limited PMC already;
930 * see if any alternative code goes on a limited PMC.
931 */
932 if (!ppmu->get_alternatives)
933 return 0;
934
935 flags |= PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD;
936 n = ppmu->get_alternatives(ev, flags, alt);
ab7ef2e5 937
ef923214 938 return n > 0;
ab7ef2e5
PM
939}
940
941/*
942 * Find an alternative event that goes on a normal PMC, if possible,
943 * and return the event code, or 0 if there is no such alternative.
944 * (Note: event code 0 is "don't count" on all machines.)
945 */
ef923214 946static u64 normal_pmc_alternative(u64 ev, unsigned long flags)
ab7ef2e5 947{
ef923214 948 u64 alt[MAX_EVENT_ALTERNATIVES];
ab7ef2e5
PM
949 int n;
950
951 flags &= ~(PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD);
952 n = ppmu->get_alternatives(ev, flags, alt);
953 if (!n)
954 return 0;
955 return alt[0];
956}
957
7595d63b
PM
958/* Number of perf_counters counting hardware events */
959static atomic_t num_counters;
960/* Used to avoid races in calling reserve/release_pmc_hardware */
961static DEFINE_MUTEX(pmc_reserve_mutex);
962
963/*
964 * Release the PMU if this is the last perf_counter.
965 */
966static void hw_perf_counter_destroy(struct perf_counter *counter)
967{
968 if (!atomic_add_unless(&num_counters, -1, 1)) {
969 mutex_lock(&pmc_reserve_mutex);
970 if (atomic_dec_return(&num_counters) == 0)
971 release_pmc_hardware();
972 mutex_unlock(&pmc_reserve_mutex);
973 }
974}
975
106b506c
PM
976/*
977 * Translate a generic cache event config to a raw event code.
978 */
979static int hw_perf_cache_event(u64 config, u64 *eventp)
980{
981 unsigned long type, op, result;
982 int ev;
983
984 if (!ppmu->cache_events)
985 return -EINVAL;
986
987 /* unpack config */
988 type = config & 0xff;
989 op = (config >> 8) & 0xff;
990 result = (config >> 16) & 0xff;
991
992 if (type >= PERF_COUNT_HW_CACHE_MAX ||
993 op >= PERF_COUNT_HW_CACHE_OP_MAX ||
994 result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
995 return -EINVAL;
996
997 ev = (*ppmu->cache_events)[type][op][result];
998 if (ev == 0)
999 return -EOPNOTSUPP;
1000 if (ev == -1)
1001 return -EINVAL;
1002 *eventp = ev;
1003 return 0;
1004}
1005
4aeb0b42 1006const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
4574910e 1007{
ef923214
PM
1008 u64 ev;
1009 unsigned long flags;
4574910e 1010 struct perf_counter *ctrs[MAX_HWCOUNTERS];
ef923214 1011 u64 events[MAX_HWCOUNTERS];
ab7ef2e5 1012 unsigned int cflags[MAX_HWCOUNTERS];
4574910e 1013 int n;
7595d63b 1014 int err;
4574910e
PM
1015
1016 if (!ppmu)
d5d2bc0d 1017 return ERR_PTR(-ENXIO);
106b506c
PM
1018 switch (counter->attr.type) {
1019 case PERF_TYPE_HARDWARE:
a21ca2ca 1020 ev = counter->attr.config;
9aaa131a 1021 if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
d5d2bc0d 1022 return ERR_PTR(-EOPNOTSUPP);
4574910e 1023 ev = ppmu->generic_events[ev];
106b506c
PM
1024 break;
1025 case PERF_TYPE_HW_CACHE:
1026 err = hw_perf_cache_event(counter->attr.config, &ev);
1027 if (err)
1028 return ERR_PTR(err);
1029 break;
1030 case PERF_TYPE_RAW:
a21ca2ca 1031 ev = counter->attr.config;
106b506c 1032 break;
90c8f954
PM
1033 default:
1034 return ERR_PTR(-EINVAL);
4574910e
PM
1035 }
1036 counter->hw.config_base = ev;
1037 counter->hw.idx = 0;
1038
0475f9ea
PM
1039 /*
1040 * If we are not running on a hypervisor, force the
1041 * exclude_hv bit to 0 so that we don't care what
d095cd46 1042 * the user set it to.
0475f9ea
PM
1043 */
1044 if (!firmware_has_feature(FW_FEATURE_LPAR))
0d48696f 1045 counter->attr.exclude_hv = 0;
ab7ef2e5
PM
1046
1047 /*
1048 * If this is a per-task counter, then we can use
1049 * PM_RUN_* events interchangeably with their non RUN_*
1050 * equivalents, e.g. PM_RUN_CYC instead of PM_CYC.
1051 * XXX we should check if the task is an idle task.
1052 */
1053 flags = 0;
1054 if (counter->ctx->task)
1055 flags |= PPMU_ONLY_COUNT_RUN;
1056
1057 /*
1058 * If this machine has limited counters, check whether this
1059 * event could go on a limited counter.
1060 */
0bbd0d4b 1061 if (ppmu->flags & PPMU_LIMITED_PMC5_6) {
ab7ef2e5
PM
1062 if (can_go_on_limited_pmc(counter, ev, flags)) {
1063 flags |= PPMU_LIMITED_PMC_OK;
1064 } else if (ppmu->limited_pmc_event(ev)) {
1065 /*
1066 * The requested event is on a limited PMC,
1067 * but we can't use a limited PMC; see if any
1068 * alternative goes on a normal PMC.
1069 */
1070 ev = normal_pmc_alternative(ev, flags);
1071 if (!ev)
1072 return ERR_PTR(-EINVAL);
1073 }
1074 }
1075
4574910e
PM
1076 /*
1077 * If this is in a group, check if it can go on with all the
1078 * other hardware counters in the group. We assume the counter
1079 * hasn't been linked into its leader's sibling list at this point.
1080 */
1081 n = 0;
1082 if (counter->group_leader != counter) {
1083 n = collect_events(counter->group_leader, ppmu->n_counter - 1,
ab7ef2e5 1084 ctrs, events, cflags);
4574910e 1085 if (n < 0)
d5d2bc0d 1086 return ERR_PTR(-EINVAL);
4574910e 1087 }
0475f9ea 1088 events[n] = ev;
86028598 1089 ctrs[n] = counter;
ab7ef2e5
PM
1090 cflags[n] = flags;
1091 if (check_excludes(ctrs, cflags, n, 1))
d5d2bc0d 1092 return ERR_PTR(-EINVAL);
ab7ef2e5 1093 if (power_check_constraints(events, cflags, n + 1))
d5d2bc0d 1094 return ERR_PTR(-EINVAL);
4574910e 1095
0475f9ea 1096 counter->hw.config = events[n];
ab7ef2e5 1097 counter->hw.counter_base = cflags[n];
9e350de3
PZ
1098 counter->hw.last_period = counter->hw.sample_period;
1099 atomic64_set(&counter->hw.period_left, counter->hw.last_period);
7595d63b
PM
1100
1101 /*
1102 * See if we need to reserve the PMU.
1103 * If no counters are currently in use, then we have to take a
1104 * mutex to ensure that we don't race with another task doing
1105 * reserve_pmc_hardware or release_pmc_hardware.
1106 */
1107 err = 0;
1108 if (!atomic_inc_not_zero(&num_counters)) {
1109 mutex_lock(&pmc_reserve_mutex);
1110 if (atomic_read(&num_counters) == 0 &&
1111 reserve_pmc_hardware(perf_counter_interrupt))
1112 err = -EBUSY;
1113 else
1114 atomic_inc(&num_counters);
1115 mutex_unlock(&pmc_reserve_mutex);
1116 }
1117 counter->destroy = hw_perf_counter_destroy;
1118
1119 if (err)
d5d2bc0d 1120 return ERR_PTR(err);
4aeb0b42 1121 return &power_pmu;
4574910e
PM
1122}
1123
4574910e
PM
1124/*
1125 * A counter has overflowed; update its count and record
1126 * things if requested. Note that interrupts are hard-disabled
1127 * here so there is no possibility of being interrupted.
1128 */
98fb1807 1129static void record_and_restart(struct perf_counter *counter, unsigned long val,
ca8f2d7f 1130 struct pt_regs *regs, int nmi)
4574910e 1131{
b23f3325 1132 u64 period = counter->hw.sample_period;
4574910e
PM
1133 s64 prev, delta, left;
1134 int record = 0;
1135
1136 /* we don't have to worry about interrupts here */
1137 prev = atomic64_read(&counter->hw.prev_count);
1138 delta = (val - prev) & 0xfffffffful;
1139 atomic64_add(delta, &counter->count);
1140
1141 /*
1142 * See if the total period for this counter has expired,
1143 * and update for the next period.
1144 */
1145 val = 0;
1146 left = atomic64_read(&counter->hw.period_left) - delta;
60db5e09 1147 if (period) {
4574910e 1148 if (left <= 0) {
60db5e09 1149 left += period;
4574910e 1150 if (left <= 0)
60db5e09 1151 left = period;
4574910e
PM
1152 record = 1;
1153 }
98fb1807
PM
1154 if (left < 0x80000000LL)
1155 val = 0x80000000LL - left;
4574910e 1156 }
4574910e
PM
1157
1158 /*
1159 * Finally record data if requested.
1160 */
0bbd0d4b 1161 if (record) {
df1a132b 1162 struct perf_sample_data data = {
9e350de3
PZ
1163 .regs = regs,
1164 .addr = 0,
1165 .period = counter->hw.last_period,
df1a132b
PZ
1166 };
1167
98fb1807
PM
1168 if (counter->attr.sample_type & PERF_SAMPLE_ADDR)
1169 perf_get_data_addr(regs, &data.addr);
1170
df1a132b 1171 if (perf_counter_overflow(counter, nmi, &data)) {
8a7b8cb9
PM
1172 /*
1173 * Interrupts are coming too fast - throttle them
1174 * by setting the counter to 0, so it will be
1175 * at least 2^30 cycles until the next interrupt
1176 * (assuming each counter counts at most 2 counts
1177 * per cycle).
1178 */
1179 val = 0;
1180 left = ~0ULL >> 1;
1181 }
0bbd0d4b 1182 }
8a7b8cb9
PM
1183
1184 write_pmc(counter->hw.idx, val);
1185 atomic64_set(&counter->hw.prev_count, val);
1186 atomic64_set(&counter->hw.period_left, left);
1187 perf_counter_update_userpage(counter);
0bbd0d4b
PM
1188}
1189
1190/*
1191 * Called from generic code to get the misc flags (i.e. processor mode)
1192 * for an event.
1193 */
1194unsigned long perf_misc_flags(struct pt_regs *regs)
1195{
98fb1807 1196 u32 flags = perf_get_misc_flags(regs);
0bbd0d4b 1197
98fb1807
PM
1198 if (flags)
1199 return flags;
1200 return user_mode(regs) ? PERF_EVENT_MISC_USER :
1201 PERF_EVENT_MISC_KERNEL;
0bbd0d4b
PM
1202}
1203
1204/*
1205 * Called from generic code to get the instruction pointer
1206 * for an event.
1207 */
1208unsigned long perf_instruction_pointer(struct pt_regs *regs)
1209{
0bbd0d4b 1210 unsigned long ip;
0bbd0d4b
PM
1211
1212 if (TRAP(regs) != 0xf00)
1213 return regs->nip; /* not a PMU interrupt */
1214
98fb1807 1215 ip = mfspr(SPRN_SIAR) + perf_ip_adjust(regs);
0bbd0d4b 1216 return ip;
4574910e
PM
1217}
1218
1219/*
1220 * Performance monitor interrupt stuff
1221 */
1222static void perf_counter_interrupt(struct pt_regs *regs)
1223{
1224 int i;
1225 struct cpu_hw_counters *cpuhw = &__get_cpu_var(cpu_hw_counters);
1226 struct perf_counter *counter;
98fb1807 1227 unsigned long val;
925d519a 1228 int found = 0;
ca8f2d7f
PM
1229 int nmi;
1230
ab7ef2e5
PM
1231 if (cpuhw->n_limited)
1232 freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5),
1233 mfspr(SPRN_PMC6));
1234
98fb1807 1235 perf_read_regs(regs);
0bbd0d4b 1236
98fb1807 1237 nmi = perf_intr_is_nmi(regs);
ca8f2d7f
PM
1238 if (nmi)
1239 nmi_enter();
1240 else
1241 irq_enter();
4574910e
PM
1242
1243 for (i = 0; i < cpuhw->n_counters; ++i) {
1244 counter = cpuhw->counter[i];
dcd945e0 1245 if (!counter->hw.idx || is_limited_pmc(counter->hw.idx))
ab7ef2e5 1246 continue;
4574910e
PM
1247 val = read_pmc(counter->hw.idx);
1248 if ((int)val < 0) {
1249 /* counter has overflowed */
1250 found = 1;
ca8f2d7f 1251 record_and_restart(counter, val, regs, nmi);
4574910e
PM
1252 }
1253 }
1254
1255 /*
1256 * In case we didn't find and reset the counter that caused
1257 * the interrupt, scan all counters and reset any that are
1258 * negative, to avoid getting continual interrupts.
1259 * Any that we processed in the previous loop will not be negative.
1260 */
1261 if (!found) {
1262 for (i = 0; i < ppmu->n_counter; ++i) {
ab7ef2e5
PM
1263 if (is_limited_pmc(i + 1))
1264 continue;
4574910e
PM
1265 val = read_pmc(i + 1);
1266 if ((int)val < 0)
1267 write_pmc(i + 1, 0);
1268 }
1269 }
1270
1271 /*
1272 * Reset MMCR0 to its normal value. This will set PMXE and
1273 * clear FC (freeze counters) and PMAO (perf mon alert occurred)
1274 * and thus allow interrupts to occur again.
1275 * XXX might want to use MSR.PM to keep the counters frozen until
1276 * we get back out of this interrupt.
1277 */
ab7ef2e5 1278 write_mmcr0(cpuhw, cpuhw->mmcr[0]);
4574910e 1279
ca8f2d7f
PM
1280 if (nmi)
1281 nmi_exit();
1282 else
db4fb5ac 1283 irq_exit();
4574910e
PM
1284}
1285
01d0287f
PM
1286void hw_perf_counter_setup(int cpu)
1287{
1288 struct cpu_hw_counters *cpuhw = &per_cpu(cpu_hw_counters, cpu);
1289
f36a1a13
PM
1290 if (!ppmu)
1291 return;
01d0287f
PM
1292 memset(cpuhw, 0, sizeof(*cpuhw));
1293 cpuhw->mmcr[0] = MMCR0_FC;
1294}
1295
079b3c56 1296int register_power_pmu(struct power_pmu *pmu)
4574910e 1297{
079b3c56
PM
1298 if (ppmu)
1299 return -EBUSY; /* something's already registered */
1300
1301 ppmu = pmu;
1302 pr_info("%s performance monitor hardware support registered\n",
1303 pmu->name);
d095cd46 1304
98fb1807 1305#ifdef MSR_HV
d095cd46
PM
1306 /*
1307 * Use FCHV to ignore kernel events if MSR.HV is set.
1308 */
1309 if (mfmsr() & MSR_HV)
1310 freeze_counters_kernel = MMCR0_FCHV;
98fb1807 1311#endif /* CONFIG_PPC64 */
d095cd46 1312
4574910e
PM
1313 return 0;
1314}
This page took 0.120318 seconds and 5 git commands to generate.