2 * Performance counter support - powerpc architecture code
4 * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #include <linux/kernel.h>
12 #include <linux/sched.h>
13 #include <linux/perf_counter.h>
14 #include <linux/percpu.h>
15 #include <linux/hardirq.h>
18 #include <asm/machdep.h>
19 #include <asm/firmware.h>
21 struct cpu_hw_counters
{
28 struct perf_counter
*counter
[MAX_HWCOUNTERS
];
29 unsigned int events
[MAX_HWCOUNTERS
];
30 unsigned int flags
[MAX_HWCOUNTERS
];
32 struct perf_counter
*limited_counter
[MAX_LIMITED_HWCOUNTERS
];
33 u8 limited_hwidx
[MAX_LIMITED_HWCOUNTERS
];
35 DEFINE_PER_CPU(struct cpu_hw_counters
, cpu_hw_counters
);
37 struct power_pmu
*ppmu
;
40 * Normally, to ignore kernel events we set the FCS (freeze counters
41 * in supervisor mode) bit in MMCR0, but if the kernel runs with the
42 * hypervisor bit set in the MSR, or if we are running on a processor
43 * where the hypervisor bit is forced to 1 (as on Apple G5 processors),
44 * then we need to use the FCHV bit to ignore kernel events.
46 static unsigned int freeze_counters_kernel
= MMCR0_FCS
;
48 static void perf_counter_interrupt(struct pt_regs
*regs
);
50 void perf_counter_print_debug(void)
55 * Read one performance monitor counter (PMC).
57 static unsigned long read_pmc(int idx
)
63 val
= mfspr(SPRN_PMC1
);
66 val
= mfspr(SPRN_PMC2
);
69 val
= mfspr(SPRN_PMC3
);
72 val
= mfspr(SPRN_PMC4
);
75 val
= mfspr(SPRN_PMC5
);
78 val
= mfspr(SPRN_PMC6
);
81 val
= mfspr(SPRN_PMC7
);
84 val
= mfspr(SPRN_PMC8
);
87 printk(KERN_ERR
"oops trying to read PMC%d\n", idx
);
96 static void write_pmc(int idx
, unsigned long val
)
100 mtspr(SPRN_PMC1
, val
);
103 mtspr(SPRN_PMC2
, val
);
106 mtspr(SPRN_PMC3
, val
);
109 mtspr(SPRN_PMC4
, val
);
112 mtspr(SPRN_PMC5
, val
);
115 mtspr(SPRN_PMC6
, val
);
118 mtspr(SPRN_PMC7
, val
);
121 mtspr(SPRN_PMC8
, val
);
124 printk(KERN_ERR
"oops trying to write PMC%d\n", idx
);
129 * Check if a set of events can all go on the PMU at once.
130 * If they can't, this will look at alternative codes for the events
131 * and see if any combination of alternative codes is feasible.
132 * The feasible set is returned in event[].
134 static int power_check_constraints(unsigned int event
[], unsigned int cflags
[],
138 unsigned int alternatives
[MAX_HWCOUNTERS
][MAX_EVENT_ALTERNATIVES
];
139 u64 amasks
[MAX_HWCOUNTERS
][MAX_EVENT_ALTERNATIVES
];
140 u64 avalues
[MAX_HWCOUNTERS
][MAX_EVENT_ALTERNATIVES
];
141 u64 smasks
[MAX_HWCOUNTERS
], svalues
[MAX_HWCOUNTERS
];
142 int n_alt
[MAX_HWCOUNTERS
], choice
[MAX_HWCOUNTERS
];
144 u64 addf
= ppmu
->add_fields
;
145 u64 tadd
= ppmu
->test_adder
;
147 if (n_ev
> ppmu
->n_counter
)
150 /* First see if the events will go on as-is */
151 for (i
= 0; i
< n_ev
; ++i
) {
152 if ((cflags
[i
] & PPMU_LIMITED_PMC_REQD
)
153 && !ppmu
->limited_pmc_event(event
[i
])) {
154 ppmu
->get_alternatives(event
[i
], cflags
[i
],
156 event
[i
] = alternatives
[i
][0];
158 if (ppmu
->get_constraint(event
[i
], &amasks
[i
][0],
163 for (i
= 0; i
< n_ev
; ++i
) {
164 nv
= (value
| avalues
[i
][0]) + (value
& avalues
[i
][0] & addf
);
165 if ((((nv
+ tadd
) ^ value
) & mask
) != 0 ||
166 (((nv
+ tadd
) ^ avalues
[i
][0]) & amasks
[i
][0]) != 0)
169 mask
|= amasks
[i
][0];
172 return 0; /* all OK */
174 /* doesn't work, gather alternatives... */
175 if (!ppmu
->get_alternatives
)
177 for (i
= 0; i
< n_ev
; ++i
) {
179 n_alt
[i
] = ppmu
->get_alternatives(event
[i
], cflags
[i
],
181 for (j
= 1; j
< n_alt
[i
]; ++j
)
182 ppmu
->get_constraint(alternatives
[i
][j
],
183 &amasks
[i
][j
], &avalues
[i
][j
]);
186 /* enumerate all possibilities and see if any will work */
189 value
= mask
= nv
= 0;
192 /* we're backtracking, restore context */
198 * See if any alternative k for event i,
199 * where k > j, will satisfy the constraints.
201 while (++j
< n_alt
[i
]) {
202 nv
= (value
| avalues
[i
][j
]) +
203 (value
& avalues
[i
][j
] & addf
);
204 if ((((nv
+ tadd
) ^ value
) & mask
) == 0 &&
205 (((nv
+ tadd
) ^ avalues
[i
][j
])
206 & amasks
[i
][j
]) == 0)
211 * No feasible alternative, backtrack
212 * to event i-1 and continue enumerating its
213 * alternatives from where we got up to.
219 * Found a feasible alternative for event i,
220 * remember where we got up to with this event,
221 * go on to the next event, and start with
222 * the first alternative for it.
228 mask
|= amasks
[i
][j
];
234 /* OK, we have a feasible combination, tell the caller the solution */
235 for (i
= 0; i
< n_ev
; ++i
)
236 event
[i
] = alternatives
[i
][choice
[i
]];
241 * Check if newly-added counters have consistent settings for
242 * exclude_{user,kernel,hv} with each other and any previously
245 static int check_excludes(struct perf_counter
**ctrs
, unsigned int cflags
[],
246 int n_prev
, int n_new
)
248 int eu
= 0, ek
= 0, eh
= 0;
250 struct perf_counter
*counter
;
257 for (i
= 0; i
< n
; ++i
) {
258 if (cflags
[i
] & PPMU_LIMITED_PMC_OK
) {
259 cflags
[i
] &= ~PPMU_LIMITED_PMC_REQD
;
264 eu
= counter
->hw_event
.exclude_user
;
265 ek
= counter
->hw_event
.exclude_kernel
;
266 eh
= counter
->hw_event
.exclude_hv
;
268 } else if (counter
->hw_event
.exclude_user
!= eu
||
269 counter
->hw_event
.exclude_kernel
!= ek
||
270 counter
->hw_event
.exclude_hv
!= eh
) {
276 for (i
= 0; i
< n
; ++i
)
277 if (cflags
[i
] & PPMU_LIMITED_PMC_OK
)
278 cflags
[i
] |= PPMU_LIMITED_PMC_REQD
;
283 static void power_pmu_read(struct perf_counter
*counter
)
285 long val
, delta
, prev
;
287 if (!counter
->hw
.idx
)
290 * Performance monitor interrupts come even when interrupts
291 * are soft-disabled, as long as interrupts are hard-enabled.
292 * Therefore we treat them like NMIs.
295 prev
= atomic64_read(&counter
->hw
.prev_count
);
297 val
= read_pmc(counter
->hw
.idx
);
298 } while (atomic64_cmpxchg(&counter
->hw
.prev_count
, prev
, val
) != prev
);
300 /* The counters are only 32 bits wide */
301 delta
= (val
- prev
) & 0xfffffffful
;
302 atomic64_add(delta
, &counter
->count
);
303 atomic64_sub(delta
, &counter
->hw
.period_left
);
307 * On some machines, PMC5 and PMC6 can't be written, don't respect
308 * the freeze conditions, and don't generate interrupts. This tells
309 * us if `counter' is using such a PMC.
311 static int is_limited_pmc(int pmcnum
)
313 return ppmu
->limited_pmc5_6
&& (pmcnum
== 5 || pmcnum
== 6);
316 static void freeze_limited_counters(struct cpu_hw_counters
*cpuhw
,
317 unsigned long pmc5
, unsigned long pmc6
)
319 struct perf_counter
*counter
;
320 u64 val
, prev
, delta
;
323 for (i
= 0; i
< cpuhw
->n_limited
; ++i
) {
324 counter
= cpuhw
->limited_counter
[i
];
325 if (!counter
->hw
.idx
)
327 val
= (counter
->hw
.idx
== 5) ? pmc5
: pmc6
;
328 prev
= atomic64_read(&counter
->hw
.prev_count
);
330 delta
= (val
- prev
) & 0xfffffffful
;
331 atomic64_add(delta
, &counter
->count
);
335 static void thaw_limited_counters(struct cpu_hw_counters
*cpuhw
,
336 unsigned long pmc5
, unsigned long pmc6
)
338 struct perf_counter
*counter
;
342 for (i
= 0; i
< cpuhw
->n_limited
; ++i
) {
343 counter
= cpuhw
->limited_counter
[i
];
344 counter
->hw
.idx
= cpuhw
->limited_hwidx
[i
];
345 val
= (counter
->hw
.idx
== 5) ? pmc5
: pmc6
;
346 atomic64_set(&counter
->hw
.prev_count
, val
);
347 perf_counter_update_userpage(counter
);
352 * Since limited counters don't respect the freeze conditions, we
353 * have to read them immediately after freezing or unfreezing the
354 * other counters. We try to keep the values from the limited
355 * counters as consistent as possible by keeping the delay (in
356 * cycles and instructions) between freezing/unfreezing and reading
357 * the limited counters as small and consistent as possible.
358 * Therefore, if any limited counters are in use, we read them
359 * both, and always in the same order, to minimize variability,
360 * and do it inside the same asm that writes MMCR0.
362 static void write_mmcr0(struct cpu_hw_counters
*cpuhw
, unsigned long mmcr0
)
364 unsigned long pmc5
, pmc6
;
366 if (!cpuhw
->n_limited
) {
367 mtspr(SPRN_MMCR0
, mmcr0
);
372 * Write MMCR0, then read PMC5 and PMC6 immediately.
374 asm volatile("mtspr %3,%2; mfspr %0,%4; mfspr %1,%5"
375 : "=&r" (pmc5
), "=&r" (pmc6
)
376 : "r" (mmcr0
), "i" (SPRN_MMCR0
),
377 "i" (SPRN_PMC5
), "i" (SPRN_PMC6
));
379 if (mmcr0
& MMCR0_FC
)
380 freeze_limited_counters(cpuhw
, pmc5
, pmc6
);
382 thaw_limited_counters(cpuhw
, pmc5
, pmc6
);
386 * Disable all counters to prevent PMU interrupts and to allow
387 * counters to be added or removed.
389 void hw_perf_disable(void)
391 struct cpu_hw_counters
*cpuhw
;
395 local_irq_save(flags
);
396 cpuhw
= &__get_cpu_var(cpu_hw_counters
);
398 ret
= cpuhw
->disabled
;
404 * Check if we ever enabled the PMU on this cpu.
406 if (!cpuhw
->pmcs_enabled
) {
407 if (ppc_md
.enable_pmcs
)
408 ppc_md
.enable_pmcs();
409 cpuhw
->pmcs_enabled
= 1;
413 * Disable instruction sampling if it was enabled
415 if (cpuhw
->mmcr
[2] & MMCRA_SAMPLE_ENABLE
) {
417 cpuhw
->mmcr
[2] & ~MMCRA_SAMPLE_ENABLE
);
422 * Set the 'freeze counters' bit.
423 * The barrier is to make sure the mtspr has been
424 * executed and the PMU has frozen the counters
427 write_mmcr0(cpuhw
, mfspr(SPRN_MMCR0
) | MMCR0_FC
);
430 local_irq_restore(flags
);
434 * Re-enable all counters if disable == 0.
435 * If we were previously disabled and counters were added, then
436 * put the new config on the PMU.
438 void hw_perf_enable(void)
440 struct perf_counter
*counter
;
441 struct cpu_hw_counters
*cpuhw
;
446 unsigned int hwc_index
[MAX_HWCOUNTERS
];
450 local_irq_save(flags
);
451 if (!cpuhw
->disabled
) {
452 local_irq_restore(flags
);
456 cpuhw
= &__get_cpu_var(cpu_hw_counters
);
460 * If we didn't change anything, or only removed counters,
461 * no need to recalculate MMCR* settings and reset the PMCs.
462 * Just reenable the PMU with the current MMCR* settings
463 * (possibly updated for removal of counters).
465 if (!cpuhw
->n_added
) {
466 mtspr(SPRN_MMCRA
, cpuhw
->mmcr
[2] & ~MMCRA_SAMPLE_ENABLE
);
467 mtspr(SPRN_MMCR1
, cpuhw
->mmcr
[1]);
468 if (cpuhw
->n_counters
== 0)
469 get_lppaca()->pmcregs_in_use
= 0;
474 * Compute MMCR* values for the new set of counters
476 if (ppmu
->compute_mmcr(cpuhw
->events
, cpuhw
->n_counters
, hwc_index
,
478 /* shouldn't ever get here */
479 printk(KERN_ERR
"oops compute_mmcr failed\n");
484 * Add in MMCR0 freeze bits corresponding to the
485 * hw_event.exclude_* bits for the first counter.
486 * We have already checked that all counters have the
487 * same values for these bits as the first counter.
489 counter
= cpuhw
->counter
[0];
490 if (counter
->hw_event
.exclude_user
)
491 cpuhw
->mmcr
[0] |= MMCR0_FCP
;
492 if (counter
->hw_event
.exclude_kernel
)
493 cpuhw
->mmcr
[0] |= freeze_counters_kernel
;
494 if (counter
->hw_event
.exclude_hv
)
495 cpuhw
->mmcr
[0] |= MMCR0_FCHV
;
498 * Write the new configuration to MMCR* with the freeze
499 * bit set and set the hardware counters to their initial values.
500 * Then unfreeze the counters.
502 get_lppaca()->pmcregs_in_use
= 1;
503 mtspr(SPRN_MMCRA
, cpuhw
->mmcr
[2] & ~MMCRA_SAMPLE_ENABLE
);
504 mtspr(SPRN_MMCR1
, cpuhw
->mmcr
[1]);
505 mtspr(SPRN_MMCR0
, (cpuhw
->mmcr
[0] & ~(MMCR0_PMC1CE
| MMCR0_PMCjCE
))
509 * Read off any pre-existing counters that need to move
512 for (i
= 0; i
< cpuhw
->n_counters
; ++i
) {
513 counter
= cpuhw
->counter
[i
];
514 if (counter
->hw
.idx
&& counter
->hw
.idx
!= hwc_index
[i
] + 1) {
515 power_pmu_read(counter
);
516 write_pmc(counter
->hw
.idx
, 0);
522 * Initialize the PMCs for all the new and moved counters.
524 cpuhw
->n_limited
= n_lim
= 0;
525 for (i
= 0; i
< cpuhw
->n_counters
; ++i
) {
526 counter
= cpuhw
->counter
[i
];
529 idx
= hwc_index
[i
] + 1;
530 if (is_limited_pmc(idx
)) {
531 cpuhw
->limited_counter
[n_lim
] = counter
;
532 cpuhw
->limited_hwidx
[n_lim
] = idx
;
537 if (counter
->hw
.irq_period
) {
538 left
= atomic64_read(&counter
->hw
.period_left
);
539 if (left
< 0x80000000L
)
540 val
= 0x80000000L
- left
;
542 atomic64_set(&counter
->hw
.prev_count
, val
);
543 counter
->hw
.idx
= idx
;
545 perf_counter_update_userpage(counter
);
547 cpuhw
->n_limited
= n_lim
;
548 cpuhw
->mmcr
[0] |= MMCR0_PMXE
| MMCR0_FCECE
;
552 write_mmcr0(cpuhw
, cpuhw
->mmcr
[0]);
555 * Enable instruction sampling if necessary
557 if (cpuhw
->mmcr
[2] & MMCRA_SAMPLE_ENABLE
) {
559 mtspr(SPRN_MMCRA
, cpuhw
->mmcr
[2]);
563 local_irq_restore(flags
);
566 static int collect_events(struct perf_counter
*group
, int max_count
,
567 struct perf_counter
*ctrs
[], unsigned int *events
,
571 struct perf_counter
*counter
;
573 if (!is_software_counter(group
)) {
577 flags
[n
] = group
->hw
.counter_base
;
578 events
[n
++] = group
->hw
.config
;
580 list_for_each_entry(counter
, &group
->sibling_list
, list_entry
) {
581 if (!is_software_counter(counter
) &&
582 counter
->state
!= PERF_COUNTER_STATE_OFF
) {
586 flags
[n
] = counter
->hw
.counter_base
;
587 events
[n
++] = counter
->hw
.config
;
593 static void counter_sched_in(struct perf_counter
*counter
, int cpu
)
595 counter
->state
= PERF_COUNTER_STATE_ACTIVE
;
596 counter
->oncpu
= cpu
;
597 counter
->tstamp_running
+= counter
->ctx
->time
- counter
->tstamp_stopped
;
598 if (is_software_counter(counter
))
599 counter
->pmu
->enable(counter
);
603 * Called to enable a whole group of counters.
604 * Returns 1 if the group was enabled, or -EAGAIN if it could not be.
605 * Assumes the caller has disabled interrupts and has
606 * frozen the PMU with hw_perf_save_disable.
608 int hw_perf_group_sched_in(struct perf_counter
*group_leader
,
609 struct perf_cpu_context
*cpuctx
,
610 struct perf_counter_context
*ctx
, int cpu
)
612 struct cpu_hw_counters
*cpuhw
;
614 struct perf_counter
*sub
;
616 cpuhw
= &__get_cpu_var(cpu_hw_counters
);
617 n0
= cpuhw
->n_counters
;
618 n
= collect_events(group_leader
, ppmu
->n_counter
- n0
,
619 &cpuhw
->counter
[n0
], &cpuhw
->events
[n0
],
623 if (check_excludes(cpuhw
->counter
, cpuhw
->flags
, n0
, n
))
625 i
= power_check_constraints(cpuhw
->events
, cpuhw
->flags
, n
+ n0
);
628 cpuhw
->n_counters
= n0
+ n
;
632 * OK, this group can go on; update counter states etc.,
633 * and enable any software counters
635 for (i
= n0
; i
< n0
+ n
; ++i
)
636 cpuhw
->counter
[i
]->hw
.config
= cpuhw
->events
[i
];
637 cpuctx
->active_oncpu
+= n
;
639 counter_sched_in(group_leader
, cpu
);
640 list_for_each_entry(sub
, &group_leader
->sibling_list
, list_entry
) {
641 if (sub
->state
!= PERF_COUNTER_STATE_OFF
) {
642 counter_sched_in(sub
, cpu
);
652 * Add a counter to the PMU.
653 * If all counters are not already frozen, then we disable and
654 * re-enable the PMU in order to get hw_perf_enable to do the
655 * actual work of reconfiguring the PMU.
657 static int power_pmu_enable(struct perf_counter
*counter
)
659 struct cpu_hw_counters
*cpuhw
;
664 local_irq_save(flags
);
668 * Add the counter to the list (if there is room)
669 * and check whether the total set is still feasible.
671 cpuhw
= &__get_cpu_var(cpu_hw_counters
);
672 n0
= cpuhw
->n_counters
;
673 if (n0
>= ppmu
->n_counter
)
675 cpuhw
->counter
[n0
] = counter
;
676 cpuhw
->events
[n0
] = counter
->hw
.config
;
677 cpuhw
->flags
[n0
] = counter
->hw
.counter_base
;
678 if (check_excludes(cpuhw
->counter
, cpuhw
->flags
, n0
, 1))
680 if (power_check_constraints(cpuhw
->events
, cpuhw
->flags
, n0
+ 1))
683 counter
->hw
.config
= cpuhw
->events
[n0
];
690 local_irq_restore(flags
);
695 * Remove a counter from the PMU.
697 static void power_pmu_disable(struct perf_counter
*counter
)
699 struct cpu_hw_counters
*cpuhw
;
703 local_irq_save(flags
);
706 power_pmu_read(counter
);
708 cpuhw
= &__get_cpu_var(cpu_hw_counters
);
709 for (i
= 0; i
< cpuhw
->n_counters
; ++i
) {
710 if (counter
== cpuhw
->counter
[i
]) {
711 while (++i
< cpuhw
->n_counters
)
712 cpuhw
->counter
[i
-1] = cpuhw
->counter
[i
];
714 ppmu
->disable_pmc(counter
->hw
.idx
- 1, cpuhw
->mmcr
);
715 if (counter
->hw
.idx
) {
716 write_pmc(counter
->hw
.idx
, 0);
719 perf_counter_update_userpage(counter
);
723 for (i
= 0; i
< cpuhw
->n_limited
; ++i
)
724 if (counter
== cpuhw
->limited_counter
[i
])
726 if (i
< cpuhw
->n_limited
) {
727 while (++i
< cpuhw
->n_limited
) {
728 cpuhw
->limited_counter
[i
-1] = cpuhw
->limited_counter
[i
];
729 cpuhw
->limited_hwidx
[i
-1] = cpuhw
->limited_hwidx
[i
];
733 if (cpuhw
->n_counters
== 0) {
734 /* disable exceptions if no counters are running */
735 cpuhw
->mmcr
[0] &= ~(MMCR0_PMXE
| MMCR0_FCECE
);
739 local_irq_restore(flags
);
742 struct pmu power_pmu
= {
743 .enable
= power_pmu_enable
,
744 .disable
= power_pmu_disable
,
745 .read
= power_pmu_read
,
749 * Return 1 if we might be able to put counter on a limited PMC,
751 * A counter can only go on a limited PMC if it counts something
752 * that a limited PMC can count, doesn't require interrupts, and
753 * doesn't exclude any processor mode.
755 static int can_go_on_limited_pmc(struct perf_counter
*counter
, unsigned int ev
,
759 unsigned int alt
[MAX_EVENT_ALTERNATIVES
];
761 if (counter
->hw_event
.exclude_user
762 || counter
->hw_event
.exclude_kernel
763 || counter
->hw_event
.exclude_hv
764 || counter
->hw_event
.irq_period
)
767 if (ppmu
->limited_pmc_event(ev
))
771 * The requested event isn't on a limited PMC already;
772 * see if any alternative code goes on a limited PMC.
774 if (!ppmu
->get_alternatives
)
777 flags
|= PPMU_LIMITED_PMC_OK
| PPMU_LIMITED_PMC_REQD
;
778 n
= ppmu
->get_alternatives(ev
, flags
, alt
);
786 * Find an alternative event that goes on a normal PMC, if possible,
787 * and return the event code, or 0 if there is no such alternative.
788 * (Note: event code 0 is "don't count" on all machines.)
790 static unsigned long normal_pmc_alternative(unsigned long ev
,
793 unsigned int alt
[MAX_EVENT_ALTERNATIVES
];
796 flags
&= ~(PPMU_LIMITED_PMC_OK
| PPMU_LIMITED_PMC_REQD
);
797 n
= ppmu
->get_alternatives(ev
, flags
, alt
);
803 /* Number of perf_counters counting hardware events */
804 static atomic_t num_counters
;
805 /* Used to avoid races in calling reserve/release_pmc_hardware */
806 static DEFINE_MUTEX(pmc_reserve_mutex
);
809 * Release the PMU if this is the last perf_counter.
811 static void hw_perf_counter_destroy(struct perf_counter
*counter
)
813 if (!atomic_add_unless(&num_counters
, -1, 1)) {
814 mutex_lock(&pmc_reserve_mutex
);
815 if (atomic_dec_return(&num_counters
) == 0)
816 release_pmc_hardware();
817 mutex_unlock(&pmc_reserve_mutex
);
821 const struct pmu
*hw_perf_counter_init(struct perf_counter
*counter
)
823 unsigned long ev
, flags
;
824 struct perf_counter
*ctrs
[MAX_HWCOUNTERS
];
825 unsigned int events
[MAX_HWCOUNTERS
];
826 unsigned int cflags
[MAX_HWCOUNTERS
];
831 return ERR_PTR(-ENXIO
);
832 if (!perf_event_raw(&counter
->hw_event
)) {
833 ev
= perf_event_id(&counter
->hw_event
);
834 if (ev
>= ppmu
->n_generic
|| ppmu
->generic_events
[ev
] == 0)
835 return ERR_PTR(-EOPNOTSUPP
);
836 ev
= ppmu
->generic_events
[ev
];
838 ev
= perf_event_config(&counter
->hw_event
);
840 counter
->hw
.config_base
= ev
;
844 * If we are not running on a hypervisor, force the
845 * exclude_hv bit to 0 so that we don't care what
846 * the user set it to.
848 if (!firmware_has_feature(FW_FEATURE_LPAR
))
849 counter
->hw_event
.exclude_hv
= 0;
852 * If this is a per-task counter, then we can use
853 * PM_RUN_* events interchangeably with their non RUN_*
854 * equivalents, e.g. PM_RUN_CYC instead of PM_CYC.
855 * XXX we should check if the task is an idle task.
858 if (counter
->ctx
->task
)
859 flags
|= PPMU_ONLY_COUNT_RUN
;
862 * If this machine has limited counters, check whether this
863 * event could go on a limited counter.
865 if (ppmu
->limited_pmc5_6
) {
866 if (can_go_on_limited_pmc(counter
, ev
, flags
)) {
867 flags
|= PPMU_LIMITED_PMC_OK
;
868 } else if (ppmu
->limited_pmc_event(ev
)) {
870 * The requested event is on a limited PMC,
871 * but we can't use a limited PMC; see if any
872 * alternative goes on a normal PMC.
874 ev
= normal_pmc_alternative(ev
, flags
);
876 return ERR_PTR(-EINVAL
);
881 * If this is in a group, check if it can go on with all the
882 * other hardware counters in the group. We assume the counter
883 * hasn't been linked into its leader's sibling list at this point.
886 if (counter
->group_leader
!= counter
) {
887 n
= collect_events(counter
->group_leader
, ppmu
->n_counter
- 1,
888 ctrs
, events
, cflags
);
890 return ERR_PTR(-EINVAL
);
895 if (check_excludes(ctrs
, cflags
, n
, 1))
896 return ERR_PTR(-EINVAL
);
897 if (power_check_constraints(events
, cflags
, n
+ 1))
898 return ERR_PTR(-EINVAL
);
900 counter
->hw
.config
= events
[n
];
901 counter
->hw
.counter_base
= cflags
[n
];
902 atomic64_set(&counter
->hw
.period_left
, counter
->hw
.irq_period
);
905 * See if we need to reserve the PMU.
906 * If no counters are currently in use, then we have to take a
907 * mutex to ensure that we don't race with another task doing
908 * reserve_pmc_hardware or release_pmc_hardware.
911 if (!atomic_inc_not_zero(&num_counters
)) {
912 mutex_lock(&pmc_reserve_mutex
);
913 if (atomic_read(&num_counters
) == 0 &&
914 reserve_pmc_hardware(perf_counter_interrupt
))
917 atomic_inc(&num_counters
);
918 mutex_unlock(&pmc_reserve_mutex
);
920 counter
->destroy
= hw_perf_counter_destroy
;
928 * A counter has overflowed; update its count and record
929 * things if requested. Note that interrupts are hard-disabled
930 * here so there is no possibility of being interrupted.
932 static void record_and_restart(struct perf_counter
*counter
, long val
,
933 struct pt_regs
*regs
, int nmi
)
935 u64 period
= counter
->hw
.irq_period
;
936 s64 prev
, delta
, left
;
939 /* we don't have to worry about interrupts here */
940 prev
= atomic64_read(&counter
->hw
.prev_count
);
941 delta
= (val
- prev
) & 0xfffffffful
;
942 atomic64_add(delta
, &counter
->count
);
945 * See if the total period for this counter has expired,
946 * and update for the next period.
949 left
= atomic64_read(&counter
->hw
.period_left
) - delta
;
957 if (left
< 0x80000000L
)
958 val
= 0x80000000L
- left
;
960 write_pmc(counter
->hw
.idx
, val
);
961 atomic64_set(&counter
->hw
.prev_count
, val
);
962 atomic64_set(&counter
->hw
.period_left
, left
);
963 perf_counter_update_userpage(counter
);
966 * Finally record data if requested.
969 perf_counter_overflow(counter
, nmi
, regs
, 0);
973 * Performance monitor interrupt stuff
975 static void perf_counter_interrupt(struct pt_regs
*regs
)
978 struct cpu_hw_counters
*cpuhw
= &__get_cpu_var(cpu_hw_counters
);
979 struct perf_counter
*counter
;
984 if (cpuhw
->n_limited
)
985 freeze_limited_counters(cpuhw
, mfspr(SPRN_PMC5
),
989 * If interrupts were soft-disabled when this PMU interrupt
990 * occurred, treat it as an NMI.
998 for (i
= 0; i
< cpuhw
->n_counters
; ++i
) {
999 counter
= cpuhw
->counter
[i
];
1000 if (is_limited_pmc(counter
->hw
.idx
))
1002 val
= read_pmc(counter
->hw
.idx
);
1004 /* counter has overflowed */
1006 record_and_restart(counter
, val
, regs
, nmi
);
1011 * In case we didn't find and reset the counter that caused
1012 * the interrupt, scan all counters and reset any that are
1013 * negative, to avoid getting continual interrupts.
1014 * Any that we processed in the previous loop will not be negative.
1017 for (i
= 0; i
< ppmu
->n_counter
; ++i
) {
1018 if (is_limited_pmc(i
+ 1))
1020 val
= read_pmc(i
+ 1);
1022 write_pmc(i
+ 1, 0);
1027 * Reset MMCR0 to its normal value. This will set PMXE and
1028 * clear FC (freeze counters) and PMAO (perf mon alert occurred)
1029 * and thus allow interrupts to occur again.
1030 * XXX might want to use MSR.PM to keep the counters frozen until
1031 * we get back out of this interrupt.
1033 write_mmcr0(cpuhw
, cpuhw
->mmcr
[0]);
1041 void hw_perf_counter_setup(int cpu
)
1043 struct cpu_hw_counters
*cpuhw
= &per_cpu(cpu_hw_counters
, cpu
);
1045 memset(cpuhw
, 0, sizeof(*cpuhw
));
1046 cpuhw
->mmcr
[0] = MMCR0_FC
;
1049 extern struct power_pmu power4_pmu
;
1050 extern struct power_pmu ppc970_pmu
;
1051 extern struct power_pmu power5_pmu
;
1052 extern struct power_pmu power5p_pmu
;
1053 extern struct power_pmu power6_pmu
;
1055 static int init_perf_counters(void)
1059 /* XXX should get this from cputable */
1060 pvr
= mfspr(SPRN_PVR
);
1061 switch (PVR_VER(pvr
)) {
1075 ppmu
= &power5p_pmu
;
1083 * Use FCHV to ignore kernel events if MSR.HV is set.
1085 if (mfmsr() & MSR_HV
)
1086 freeze_counters_kernel
= MMCR0_FCHV
;
1091 arch_initcall(init_perf_counters
);