2 * Performance event support - powerpc architecture code
4 * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #include <linux/kernel.h>
12 #include <linux/sched.h>
13 #include <linux/perf_event.h>
14 #include <linux/percpu.h>
15 #include <linux/hardirq.h>
18 #include <asm/machdep.h>
19 #include <asm/firmware.h>
20 #include <asm/ptrace.h>
22 struct cpu_hw_events
{
29 struct perf_event
*event
[MAX_HWEVENTS
];
30 u64 events
[MAX_HWEVENTS
];
31 unsigned int flags
[MAX_HWEVENTS
];
32 unsigned long mmcr
[3];
33 struct perf_event
*limited_counter
[MAX_LIMITED_HWCOUNTERS
];
34 u8 limited_hwidx
[MAX_LIMITED_HWCOUNTERS
];
35 u64 alternatives
[MAX_HWEVENTS
][MAX_EVENT_ALTERNATIVES
];
36 unsigned long amasks
[MAX_HWEVENTS
][MAX_EVENT_ALTERNATIVES
];
37 unsigned long avalues
[MAX_HWEVENTS
][MAX_EVENT_ALTERNATIVES
];
39 unsigned int group_flag
;
42 DEFINE_PER_CPU(struct cpu_hw_events
, cpu_hw_events
);
44 struct power_pmu
*ppmu
;
47 * Normally, to ignore kernel events we set the FCS (freeze counters
48 * in supervisor mode) bit in MMCR0, but if the kernel runs with the
49 * hypervisor bit set in the MSR, or if we are running on a processor
50 * where the hypervisor bit is forced to 1 (as on Apple G5 processors),
51 * then we need to use the FCHV bit to ignore kernel events.
53 static unsigned int freeze_events_kernel
= MMCR0_FCS
;
56 * 32-bit doesn't have MMCRA but does have an MMCR2,
57 * and a few other names are different.
62 #define MMCR0_PMCjCE MMCR0_PMCnCE
64 #define SPRN_MMCRA SPRN_MMCR2
65 #define MMCRA_SAMPLE_ENABLE 0
67 static inline unsigned long perf_ip_adjust(struct pt_regs
*regs
)
71 static inline void perf_get_data_addr(struct pt_regs
*regs
, u64
*addrp
) { }
72 static inline u32
perf_get_misc_flags(struct pt_regs
*regs
)
76 static inline void perf_read_regs(struct pt_regs
*regs
)
80 static inline int perf_intr_is_nmi(struct pt_regs
*regs
)
85 static inline int siar_valid(struct pt_regs
*regs
)
90 #endif /* CONFIG_PPC32 */
92 static bool regs_use_siar(struct pt_regs
*regs
)
94 return !!(regs
->result
& 1);
98 * Things that are specific to 64-bit implementations.
102 static inline unsigned long perf_ip_adjust(struct pt_regs
*regs
)
104 unsigned long mmcra
= regs
->dsisr
;
106 if ((ppmu
->flags
& PPMU_HAS_SSLOT
) && (mmcra
& MMCRA_SAMPLE_ENABLE
)) {
107 unsigned long slot
= (mmcra
& MMCRA_SLOT
) >> MMCRA_SLOT_SHIFT
;
109 return 4 * (slot
- 1);
116 * The user wants a data address recorded.
117 * If we're not doing instruction sampling, give them the SDAR
118 * (sampled data address). If we are doing instruction sampling, then
119 * only give them the SDAR if it corresponds to the instruction
120 * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC or
121 * the [POWER7P_]MMCRA_SDAR_VALID bit in MMCRA.
123 static inline void perf_get_data_addr(struct pt_regs
*regs
, u64
*addrp
)
125 unsigned long mmcra
= regs
->dsisr
;
126 unsigned long sdsync
;
128 if (ppmu
->flags
& PPMU_SIAR_VALID
)
129 sdsync
= POWER7P_MMCRA_SDAR_VALID
;
130 else if (ppmu
->flags
& PPMU_ALT_SIPR
)
131 sdsync
= POWER6_MMCRA_SDSYNC
;
133 sdsync
= MMCRA_SDSYNC
;
135 if (!(mmcra
& MMCRA_SAMPLE_ENABLE
) || (mmcra
& sdsync
))
136 *addrp
= mfspr(SPRN_SDAR
);
139 static bool regs_sihv(struct pt_regs
*regs
)
141 unsigned long sihv
= MMCRA_SIHV
;
143 if (ppmu
->flags
& PPMU_ALT_SIPR
)
144 sihv
= POWER6_MMCRA_SIHV
;
146 return !!(regs
->dsisr
& sihv
);
149 static bool regs_sipr(struct pt_regs
*regs
)
151 unsigned long sipr
= MMCRA_SIPR
;
153 if (ppmu
->flags
& PPMU_ALT_SIPR
)
154 sipr
= POWER6_MMCRA_SIPR
;
156 return !!(regs
->dsisr
& sipr
);
159 static bool regs_no_sipr(struct pt_regs
*regs
)
161 return !!(regs
->result
& 2);
164 static inline u32
perf_flags_from_msr(struct pt_regs
*regs
)
166 if (regs
->msr
& MSR_PR
)
167 return PERF_RECORD_MISC_USER
;
168 if ((regs
->msr
& MSR_HV
) && freeze_events_kernel
!= MMCR0_FCHV
)
169 return PERF_RECORD_MISC_HYPERVISOR
;
170 return PERF_RECORD_MISC_KERNEL
;
173 static inline u32
perf_get_misc_flags(struct pt_regs
*regs
)
175 bool use_siar
= regs_use_siar(regs
);
178 return perf_flags_from_msr(regs
);
181 * If we don't have flags in MMCRA, rather than using
182 * the MSR, we intuit the flags from the address in
183 * SIAR which should give slightly more reliable
186 if (regs_no_sipr(regs
)) {
187 unsigned long siar
= mfspr(SPRN_SIAR
);
188 if (siar
>= PAGE_OFFSET
)
189 return PERF_RECORD_MISC_KERNEL
;
190 return PERF_RECORD_MISC_USER
;
193 /* PR has priority over HV, so order below is important */
195 return PERF_RECORD_MISC_USER
;
197 if (regs_sihv(regs
) && (freeze_events_kernel
!= MMCR0_FCHV
))
198 return PERF_RECORD_MISC_HYPERVISOR
;
200 return PERF_RECORD_MISC_KERNEL
;
204 * Overload regs->dsisr to store MMCRA so we only need to read it once
206 * Overload regs->result to specify whether we should use the MSR (result
207 * is zero) or the SIAR (result is non zero).
209 static inline void perf_read_regs(struct pt_regs
*regs
)
211 unsigned long mmcra
= mfspr(SPRN_MMCRA
);
212 int marked
= mmcra
& MMCRA_SAMPLE_ENABLE
;
218 if (ppmu
->flags
& PPMU_NO_SIPR
)
222 * If this isn't a PMU exception (eg a software event) the SIAR is
223 * not valid. Use pt_regs.
225 * If it is a marked event use the SIAR.
227 * If the PMU doesn't update the SIAR for non marked events use
230 * If the PMU has HV/PR flags then check to see if they
231 * place the exception in userspace. If so, use pt_regs. In
232 * continuous sampling mode the SIAR and the PMU exception are
233 * not synchronised, so they may be many instructions apart.
234 * This can result in confusing backtraces. We still want
235 * hypervisor samples as well as samples in the kernel with
236 * interrupts off hence the userspace check.
238 if (TRAP(regs
) != 0xf00)
242 else if ((ppmu
->flags
& PPMU_NO_CONT_SAMPLING
))
244 else if (!regs_no_sipr(regs
) && regs_sipr(regs
))
249 regs
->result
|= use_siar
;
253 * If interrupts were soft-disabled when a PMU interrupt occurs, treat
256 static inline int perf_intr_is_nmi(struct pt_regs
*regs
)
262 * On processors like P7+ that have the SIAR-Valid bit, marked instructions
263 * must be sampled only if the SIAR-valid bit is set.
265 * For unmarked instructions and for processors that don't have the SIAR-Valid
266 * bit, assume that SIAR is valid.
268 static inline int siar_valid(struct pt_regs
*regs
)
270 unsigned long mmcra
= regs
->dsisr
;
271 int marked
= mmcra
& MMCRA_SAMPLE_ENABLE
;
273 if ((ppmu
->flags
& PPMU_SIAR_VALID
) && marked
)
274 return mmcra
& POWER7P_MMCRA_SIAR_VALID
;
279 #endif /* CONFIG_PPC64 */
281 static void perf_event_interrupt(struct pt_regs
*regs
);
283 void perf_event_print_debug(void)
288 * Read one performance monitor counter (PMC).
290 static unsigned long read_pmc(int idx
)
296 val
= mfspr(SPRN_PMC1
);
299 val
= mfspr(SPRN_PMC2
);
302 val
= mfspr(SPRN_PMC3
);
305 val
= mfspr(SPRN_PMC4
);
308 val
= mfspr(SPRN_PMC5
);
311 val
= mfspr(SPRN_PMC6
);
315 val
= mfspr(SPRN_PMC7
);
318 val
= mfspr(SPRN_PMC8
);
320 #endif /* CONFIG_PPC64 */
322 printk(KERN_ERR
"oops trying to read PMC%d\n", idx
);
331 static void write_pmc(int idx
, unsigned long val
)
335 mtspr(SPRN_PMC1
, val
);
338 mtspr(SPRN_PMC2
, val
);
341 mtspr(SPRN_PMC3
, val
);
344 mtspr(SPRN_PMC4
, val
);
347 mtspr(SPRN_PMC5
, val
);
350 mtspr(SPRN_PMC6
, val
);
354 mtspr(SPRN_PMC7
, val
);
357 mtspr(SPRN_PMC8
, val
);
359 #endif /* CONFIG_PPC64 */
361 printk(KERN_ERR
"oops trying to write PMC%d\n", idx
);
366 * Check if a set of events can all go on the PMU at once.
367 * If they can't, this will look at alternative codes for the events
368 * and see if any combination of alternative codes is feasible.
369 * The feasible set is returned in event_id[].
371 static int power_check_constraints(struct cpu_hw_events
*cpuhw
,
372 u64 event_id
[], unsigned int cflags
[],
375 unsigned long mask
, value
, nv
;
376 unsigned long smasks
[MAX_HWEVENTS
], svalues
[MAX_HWEVENTS
];
377 int n_alt
[MAX_HWEVENTS
], choice
[MAX_HWEVENTS
];
379 unsigned long addf
= ppmu
->add_fields
;
380 unsigned long tadd
= ppmu
->test_adder
;
382 if (n_ev
> ppmu
->n_counter
)
385 /* First see if the events will go on as-is */
386 for (i
= 0; i
< n_ev
; ++i
) {
387 if ((cflags
[i
] & PPMU_LIMITED_PMC_REQD
)
388 && !ppmu
->limited_pmc_event(event_id
[i
])) {
389 ppmu
->get_alternatives(event_id
[i
], cflags
[i
],
390 cpuhw
->alternatives
[i
]);
391 event_id
[i
] = cpuhw
->alternatives
[i
][0];
393 if (ppmu
->get_constraint(event_id
[i
], &cpuhw
->amasks
[i
][0],
394 &cpuhw
->avalues
[i
][0]))
398 for (i
= 0; i
< n_ev
; ++i
) {
399 nv
= (value
| cpuhw
->avalues
[i
][0]) +
400 (value
& cpuhw
->avalues
[i
][0] & addf
);
401 if ((((nv
+ tadd
) ^ value
) & mask
) != 0 ||
402 (((nv
+ tadd
) ^ cpuhw
->avalues
[i
][0]) &
403 cpuhw
->amasks
[i
][0]) != 0)
406 mask
|= cpuhw
->amasks
[i
][0];
409 return 0; /* all OK */
411 /* doesn't work, gather alternatives... */
412 if (!ppmu
->get_alternatives
)
414 for (i
= 0; i
< n_ev
; ++i
) {
416 n_alt
[i
] = ppmu
->get_alternatives(event_id
[i
], cflags
[i
],
417 cpuhw
->alternatives
[i
]);
418 for (j
= 1; j
< n_alt
[i
]; ++j
)
419 ppmu
->get_constraint(cpuhw
->alternatives
[i
][j
],
420 &cpuhw
->amasks
[i
][j
],
421 &cpuhw
->avalues
[i
][j
]);
424 /* enumerate all possibilities and see if any will work */
427 value
= mask
= nv
= 0;
430 /* we're backtracking, restore context */
436 * See if any alternative k for event_id i,
437 * where k > j, will satisfy the constraints.
439 while (++j
< n_alt
[i
]) {
440 nv
= (value
| cpuhw
->avalues
[i
][j
]) +
441 (value
& cpuhw
->avalues
[i
][j
] & addf
);
442 if ((((nv
+ tadd
) ^ value
) & mask
) == 0 &&
443 (((nv
+ tadd
) ^ cpuhw
->avalues
[i
][j
])
444 & cpuhw
->amasks
[i
][j
]) == 0)
449 * No feasible alternative, backtrack
450 * to event_id i-1 and continue enumerating its
451 * alternatives from where we got up to.
457 * Found a feasible alternative for event_id i,
458 * remember where we got up to with this event_id,
459 * go on to the next event_id, and start with
460 * the first alternative for it.
466 mask
|= cpuhw
->amasks
[i
][j
];
472 /* OK, we have a feasible combination, tell the caller the solution */
473 for (i
= 0; i
< n_ev
; ++i
)
474 event_id
[i
] = cpuhw
->alternatives
[i
][choice
[i
]];
479 * Check if newly-added events have consistent settings for
480 * exclude_{user,kernel,hv} with each other and any previously
483 static int check_excludes(struct perf_event
**ctrs
, unsigned int cflags
[],
484 int n_prev
, int n_new
)
486 int eu
= 0, ek
= 0, eh
= 0;
488 struct perf_event
*event
;
495 for (i
= 0; i
< n
; ++i
) {
496 if (cflags
[i
] & PPMU_LIMITED_PMC_OK
) {
497 cflags
[i
] &= ~PPMU_LIMITED_PMC_REQD
;
502 eu
= event
->attr
.exclude_user
;
503 ek
= event
->attr
.exclude_kernel
;
504 eh
= event
->attr
.exclude_hv
;
506 } else if (event
->attr
.exclude_user
!= eu
||
507 event
->attr
.exclude_kernel
!= ek
||
508 event
->attr
.exclude_hv
!= eh
) {
514 for (i
= 0; i
< n
; ++i
)
515 if (cflags
[i
] & PPMU_LIMITED_PMC_OK
)
516 cflags
[i
] |= PPMU_LIMITED_PMC_REQD
;
521 static u64
check_and_compute_delta(u64 prev
, u64 val
)
523 u64 delta
= (val
- prev
) & 0xfffffffful
;
526 * POWER7 can roll back counter values, if the new value is smaller
527 * than the previous value it will cause the delta and the counter to
528 * have bogus values unless we rolled a counter over. If a coutner is
529 * rolled back, it will be smaller, but within 256, which is the maximum
530 * number of events to rollback at once. If we dectect a rollback
531 * return 0. This can lead to a small lack of precision in the
534 if (prev
> val
&& (prev
- val
) < 256)
540 static void power_pmu_read(struct perf_event
*event
)
542 s64 val
, delta
, prev
;
544 if (event
->hw
.state
& PERF_HES_STOPPED
)
550 * Performance monitor interrupts come even when interrupts
551 * are soft-disabled, as long as interrupts are hard-enabled.
552 * Therefore we treat them like NMIs.
555 prev
= local64_read(&event
->hw
.prev_count
);
557 val
= read_pmc(event
->hw
.idx
);
558 delta
= check_and_compute_delta(prev
, val
);
561 } while (local64_cmpxchg(&event
->hw
.prev_count
, prev
, val
) != prev
);
563 local64_add(delta
, &event
->count
);
564 local64_sub(delta
, &event
->hw
.period_left
);
568 * On some machines, PMC5 and PMC6 can't be written, don't respect
569 * the freeze conditions, and don't generate interrupts. This tells
570 * us if `event' is using such a PMC.
572 static int is_limited_pmc(int pmcnum
)
574 return (ppmu
->flags
& PPMU_LIMITED_PMC5_6
)
575 && (pmcnum
== 5 || pmcnum
== 6);
578 static void freeze_limited_counters(struct cpu_hw_events
*cpuhw
,
579 unsigned long pmc5
, unsigned long pmc6
)
581 struct perf_event
*event
;
582 u64 val
, prev
, delta
;
585 for (i
= 0; i
< cpuhw
->n_limited
; ++i
) {
586 event
= cpuhw
->limited_counter
[i
];
589 val
= (event
->hw
.idx
== 5) ? pmc5
: pmc6
;
590 prev
= local64_read(&event
->hw
.prev_count
);
592 delta
= check_and_compute_delta(prev
, val
);
594 local64_add(delta
, &event
->count
);
598 static void thaw_limited_counters(struct cpu_hw_events
*cpuhw
,
599 unsigned long pmc5
, unsigned long pmc6
)
601 struct perf_event
*event
;
605 for (i
= 0; i
< cpuhw
->n_limited
; ++i
) {
606 event
= cpuhw
->limited_counter
[i
];
607 event
->hw
.idx
= cpuhw
->limited_hwidx
[i
];
608 val
= (event
->hw
.idx
== 5) ? pmc5
: pmc6
;
609 prev
= local64_read(&event
->hw
.prev_count
);
610 if (check_and_compute_delta(prev
, val
))
611 local64_set(&event
->hw
.prev_count
, val
);
612 perf_event_update_userpage(event
);
617 * Since limited events don't respect the freeze conditions, we
618 * have to read them immediately after freezing or unfreezing the
619 * other events. We try to keep the values from the limited
620 * events as consistent as possible by keeping the delay (in
621 * cycles and instructions) between freezing/unfreezing and reading
622 * the limited events as small and consistent as possible.
623 * Therefore, if any limited events are in use, we read them
624 * both, and always in the same order, to minimize variability,
625 * and do it inside the same asm that writes MMCR0.
627 static void write_mmcr0(struct cpu_hw_events
*cpuhw
, unsigned long mmcr0
)
629 unsigned long pmc5
, pmc6
;
631 if (!cpuhw
->n_limited
) {
632 mtspr(SPRN_MMCR0
, mmcr0
);
637 * Write MMCR0, then read PMC5 and PMC6 immediately.
638 * To ensure we don't get a performance monitor interrupt
639 * between writing MMCR0 and freezing/thawing the limited
640 * events, we first write MMCR0 with the event overflow
641 * interrupt enable bits turned off.
643 asm volatile("mtspr %3,%2; mfspr %0,%4; mfspr %1,%5"
644 : "=&r" (pmc5
), "=&r" (pmc6
)
645 : "r" (mmcr0
& ~(MMCR0_PMC1CE
| MMCR0_PMCjCE
)),
647 "i" (SPRN_PMC5
), "i" (SPRN_PMC6
));
649 if (mmcr0
& MMCR0_FC
)
650 freeze_limited_counters(cpuhw
, pmc5
, pmc6
);
652 thaw_limited_counters(cpuhw
, pmc5
, pmc6
);
655 * Write the full MMCR0 including the event overflow interrupt
656 * enable bits, if necessary.
658 if (mmcr0
& (MMCR0_PMC1CE
| MMCR0_PMCjCE
))
659 mtspr(SPRN_MMCR0
, mmcr0
);
663 * Disable all events to prevent PMU interrupts and to allow
664 * events to be added or removed.
666 static void power_pmu_disable(struct pmu
*pmu
)
668 struct cpu_hw_events
*cpuhw
;
673 local_irq_save(flags
);
674 cpuhw
= &__get_cpu_var(cpu_hw_events
);
676 if (!cpuhw
->disabled
) {
681 * Check if we ever enabled the PMU on this cpu.
683 if (!cpuhw
->pmcs_enabled
) {
685 cpuhw
->pmcs_enabled
= 1;
689 * Disable instruction sampling if it was enabled
691 if (cpuhw
->mmcr
[2] & MMCRA_SAMPLE_ENABLE
) {
693 cpuhw
->mmcr
[2] & ~MMCRA_SAMPLE_ENABLE
);
698 * Set the 'freeze counters' bit.
699 * The barrier is to make sure the mtspr has been
700 * executed and the PMU has frozen the events
703 write_mmcr0(cpuhw
, mfspr(SPRN_MMCR0
) | MMCR0_FC
);
706 local_irq_restore(flags
);
710 * Re-enable all events if disable == 0.
711 * If we were previously disabled and events were added, then
712 * put the new config on the PMU.
714 static void power_pmu_enable(struct pmu
*pmu
)
716 struct perf_event
*event
;
717 struct cpu_hw_events
*cpuhw
;
722 unsigned int hwc_index
[MAX_HWEVENTS
];
728 local_irq_save(flags
);
729 cpuhw
= &__get_cpu_var(cpu_hw_events
);
730 if (!cpuhw
->disabled
) {
731 local_irq_restore(flags
);
737 * If we didn't change anything, or only removed events,
738 * no need to recalculate MMCR* settings and reset the PMCs.
739 * Just reenable the PMU with the current MMCR* settings
740 * (possibly updated for removal of events).
742 if (!cpuhw
->n_added
) {
743 mtspr(SPRN_MMCRA
, cpuhw
->mmcr
[2] & ~MMCRA_SAMPLE_ENABLE
);
744 mtspr(SPRN_MMCR1
, cpuhw
->mmcr
[1]);
745 if (cpuhw
->n_events
== 0)
746 ppc_set_pmu_inuse(0);
751 * Compute MMCR* values for the new set of events
753 if (ppmu
->compute_mmcr(cpuhw
->events
, cpuhw
->n_events
, hwc_index
,
755 /* shouldn't ever get here */
756 printk(KERN_ERR
"oops compute_mmcr failed\n");
761 * Add in MMCR0 freeze bits corresponding to the
762 * attr.exclude_* bits for the first event.
763 * We have already checked that all events have the
764 * same values for these bits as the first event.
766 event
= cpuhw
->event
[0];
767 if (event
->attr
.exclude_user
)
768 cpuhw
->mmcr
[0] |= MMCR0_FCP
;
769 if (event
->attr
.exclude_kernel
)
770 cpuhw
->mmcr
[0] |= freeze_events_kernel
;
771 if (event
->attr
.exclude_hv
)
772 cpuhw
->mmcr
[0] |= MMCR0_FCHV
;
775 * Write the new configuration to MMCR* with the freeze
776 * bit set and set the hardware events to their initial values.
777 * Then unfreeze the events.
779 ppc_set_pmu_inuse(1);
780 mtspr(SPRN_MMCRA
, cpuhw
->mmcr
[2] & ~MMCRA_SAMPLE_ENABLE
);
781 mtspr(SPRN_MMCR1
, cpuhw
->mmcr
[1]);
782 mtspr(SPRN_MMCR0
, (cpuhw
->mmcr
[0] & ~(MMCR0_PMC1CE
| MMCR0_PMCjCE
))
786 * Read off any pre-existing events that need to move
789 for (i
= 0; i
< cpuhw
->n_events
; ++i
) {
790 event
= cpuhw
->event
[i
];
791 if (event
->hw
.idx
&& event
->hw
.idx
!= hwc_index
[i
] + 1) {
792 power_pmu_read(event
);
793 write_pmc(event
->hw
.idx
, 0);
799 * Initialize the PMCs for all the new and moved events.
801 cpuhw
->n_limited
= n_lim
= 0;
802 for (i
= 0; i
< cpuhw
->n_events
; ++i
) {
803 event
= cpuhw
->event
[i
];
806 idx
= hwc_index
[i
] + 1;
807 if (is_limited_pmc(idx
)) {
808 cpuhw
->limited_counter
[n_lim
] = event
;
809 cpuhw
->limited_hwidx
[n_lim
] = idx
;
814 if (event
->hw
.sample_period
) {
815 left
= local64_read(&event
->hw
.period_left
);
816 if (left
< 0x80000000L
)
817 val
= 0x80000000L
- left
;
819 local64_set(&event
->hw
.prev_count
, val
);
821 if (event
->hw
.state
& PERF_HES_STOPPED
)
824 perf_event_update_userpage(event
);
826 cpuhw
->n_limited
= n_lim
;
827 cpuhw
->mmcr
[0] |= MMCR0_PMXE
| MMCR0_FCECE
;
831 write_mmcr0(cpuhw
, cpuhw
->mmcr
[0]);
834 * Enable instruction sampling if necessary
836 if (cpuhw
->mmcr
[2] & MMCRA_SAMPLE_ENABLE
) {
838 mtspr(SPRN_MMCRA
, cpuhw
->mmcr
[2]);
842 local_irq_restore(flags
);
845 static int collect_events(struct perf_event
*group
, int max_count
,
846 struct perf_event
*ctrs
[], u64
*events
,
850 struct perf_event
*event
;
852 if (!is_software_event(group
)) {
856 flags
[n
] = group
->hw
.event_base
;
857 events
[n
++] = group
->hw
.config
;
859 list_for_each_entry(event
, &group
->sibling_list
, group_entry
) {
860 if (!is_software_event(event
) &&
861 event
->state
!= PERF_EVENT_STATE_OFF
) {
865 flags
[n
] = event
->hw
.event_base
;
866 events
[n
++] = event
->hw
.config
;
873 * Add a event to the PMU.
874 * If all events are not already frozen, then we disable and
875 * re-enable the PMU in order to get hw_perf_enable to do the
876 * actual work of reconfiguring the PMU.
878 static int power_pmu_add(struct perf_event
*event
, int ef_flags
)
880 struct cpu_hw_events
*cpuhw
;
885 local_irq_save(flags
);
886 perf_pmu_disable(event
->pmu
);
889 * Add the event to the list (if there is room)
890 * and check whether the total set is still feasible.
892 cpuhw
= &__get_cpu_var(cpu_hw_events
);
893 n0
= cpuhw
->n_events
;
894 if (n0
>= ppmu
->n_counter
)
896 cpuhw
->event
[n0
] = event
;
897 cpuhw
->events
[n0
] = event
->hw
.config
;
898 cpuhw
->flags
[n0
] = event
->hw
.event_base
;
901 * This event may have been disabled/stopped in record_and_restart()
902 * because we exceeded the ->event_limit. If re-starting the event,
903 * clear the ->hw.state (STOPPED and UPTODATE flags), so the user
904 * notification is re-enabled.
906 if (!(ef_flags
& PERF_EF_START
))
907 event
->hw
.state
= PERF_HES_STOPPED
| PERF_HES_UPTODATE
;
912 * If group events scheduling transaction was started,
913 * skip the schedulability test here, it will be performed
914 * at commit time(->commit_txn) as a whole
916 if (cpuhw
->group_flag
& PERF_EVENT_TXN
)
919 if (check_excludes(cpuhw
->event
, cpuhw
->flags
, n0
, 1))
921 if (power_check_constraints(cpuhw
, cpuhw
->events
, cpuhw
->flags
, n0
+ 1))
923 event
->hw
.config
= cpuhw
->events
[n0
];
931 perf_pmu_enable(event
->pmu
);
932 local_irq_restore(flags
);
937 * Remove a event from the PMU.
939 static void power_pmu_del(struct perf_event
*event
, int ef_flags
)
941 struct cpu_hw_events
*cpuhw
;
945 local_irq_save(flags
);
946 perf_pmu_disable(event
->pmu
);
948 power_pmu_read(event
);
950 cpuhw
= &__get_cpu_var(cpu_hw_events
);
951 for (i
= 0; i
< cpuhw
->n_events
; ++i
) {
952 if (event
== cpuhw
->event
[i
]) {
953 while (++i
< cpuhw
->n_events
) {
954 cpuhw
->event
[i
-1] = cpuhw
->event
[i
];
955 cpuhw
->events
[i
-1] = cpuhw
->events
[i
];
956 cpuhw
->flags
[i
-1] = cpuhw
->flags
[i
];
959 ppmu
->disable_pmc(event
->hw
.idx
- 1, cpuhw
->mmcr
);
961 write_pmc(event
->hw
.idx
, 0);
964 perf_event_update_userpage(event
);
968 for (i
= 0; i
< cpuhw
->n_limited
; ++i
)
969 if (event
== cpuhw
->limited_counter
[i
])
971 if (i
< cpuhw
->n_limited
) {
972 while (++i
< cpuhw
->n_limited
) {
973 cpuhw
->limited_counter
[i
-1] = cpuhw
->limited_counter
[i
];
974 cpuhw
->limited_hwidx
[i
-1] = cpuhw
->limited_hwidx
[i
];
978 if (cpuhw
->n_events
== 0) {
979 /* disable exceptions if no events are running */
980 cpuhw
->mmcr
[0] &= ~(MMCR0_PMXE
| MMCR0_FCECE
);
983 perf_pmu_enable(event
->pmu
);
984 local_irq_restore(flags
);
988 * POWER-PMU does not support disabling individual counters, hence
989 * program their cycle counter to their max value and ignore the interrupts.
992 static void power_pmu_start(struct perf_event
*event
, int ef_flags
)
998 if (!event
->hw
.idx
|| !event
->hw
.sample_period
)
1001 if (!(event
->hw
.state
& PERF_HES_STOPPED
))
1004 if (ef_flags
& PERF_EF_RELOAD
)
1005 WARN_ON_ONCE(!(event
->hw
.state
& PERF_HES_UPTODATE
));
1007 local_irq_save(flags
);
1008 perf_pmu_disable(event
->pmu
);
1010 event
->hw
.state
= 0;
1011 left
= local64_read(&event
->hw
.period_left
);
1014 if (left
< 0x80000000L
)
1015 val
= 0x80000000L
- left
;
1017 write_pmc(event
->hw
.idx
, val
);
1019 perf_event_update_userpage(event
);
1020 perf_pmu_enable(event
->pmu
);
1021 local_irq_restore(flags
);
1024 static void power_pmu_stop(struct perf_event
*event
, int ef_flags
)
1026 unsigned long flags
;
1028 if (!event
->hw
.idx
|| !event
->hw
.sample_period
)
1031 if (event
->hw
.state
& PERF_HES_STOPPED
)
1034 local_irq_save(flags
);
1035 perf_pmu_disable(event
->pmu
);
1037 power_pmu_read(event
);
1038 event
->hw
.state
|= PERF_HES_STOPPED
| PERF_HES_UPTODATE
;
1039 write_pmc(event
->hw
.idx
, 0);
1041 perf_event_update_userpage(event
);
1042 perf_pmu_enable(event
->pmu
);
1043 local_irq_restore(flags
);
1047 * Start group events scheduling transaction
1048 * Set the flag to make pmu::enable() not perform the
1049 * schedulability test, it will be performed at commit time
1051 void power_pmu_start_txn(struct pmu
*pmu
)
1053 struct cpu_hw_events
*cpuhw
= &__get_cpu_var(cpu_hw_events
);
1055 perf_pmu_disable(pmu
);
1056 cpuhw
->group_flag
|= PERF_EVENT_TXN
;
1057 cpuhw
->n_txn_start
= cpuhw
->n_events
;
1061 * Stop group events scheduling transaction
1062 * Clear the flag and pmu::enable() will perform the
1063 * schedulability test.
1065 void power_pmu_cancel_txn(struct pmu
*pmu
)
1067 struct cpu_hw_events
*cpuhw
= &__get_cpu_var(cpu_hw_events
);
1069 cpuhw
->group_flag
&= ~PERF_EVENT_TXN
;
1070 perf_pmu_enable(pmu
);
1074 * Commit group events scheduling transaction
1075 * Perform the group schedulability test as a whole
1076 * Return 0 if success
1078 int power_pmu_commit_txn(struct pmu
*pmu
)
1080 struct cpu_hw_events
*cpuhw
;
1085 cpuhw
= &__get_cpu_var(cpu_hw_events
);
1086 n
= cpuhw
->n_events
;
1087 if (check_excludes(cpuhw
->event
, cpuhw
->flags
, 0, n
))
1089 i
= power_check_constraints(cpuhw
, cpuhw
->events
, cpuhw
->flags
, n
);
1093 for (i
= cpuhw
->n_txn_start
; i
< n
; ++i
)
1094 cpuhw
->event
[i
]->hw
.config
= cpuhw
->events
[i
];
1096 cpuhw
->group_flag
&= ~PERF_EVENT_TXN
;
1097 perf_pmu_enable(pmu
);
1102 * Return 1 if we might be able to put event on a limited PMC,
1104 * A event can only go on a limited PMC if it counts something
1105 * that a limited PMC can count, doesn't require interrupts, and
1106 * doesn't exclude any processor mode.
1108 static int can_go_on_limited_pmc(struct perf_event
*event
, u64 ev
,
1112 u64 alt
[MAX_EVENT_ALTERNATIVES
];
1114 if (event
->attr
.exclude_user
1115 || event
->attr
.exclude_kernel
1116 || event
->attr
.exclude_hv
1117 || event
->attr
.sample_period
)
1120 if (ppmu
->limited_pmc_event(ev
))
1124 * The requested event_id isn't on a limited PMC already;
1125 * see if any alternative code goes on a limited PMC.
1127 if (!ppmu
->get_alternatives
)
1130 flags
|= PPMU_LIMITED_PMC_OK
| PPMU_LIMITED_PMC_REQD
;
1131 n
= ppmu
->get_alternatives(ev
, flags
, alt
);
1137 * Find an alternative event_id that goes on a normal PMC, if possible,
1138 * and return the event_id code, or 0 if there is no such alternative.
1139 * (Note: event_id code 0 is "don't count" on all machines.)
1141 static u64
normal_pmc_alternative(u64 ev
, unsigned long flags
)
1143 u64 alt
[MAX_EVENT_ALTERNATIVES
];
1146 flags
&= ~(PPMU_LIMITED_PMC_OK
| PPMU_LIMITED_PMC_REQD
);
1147 n
= ppmu
->get_alternatives(ev
, flags
, alt
);
1153 /* Number of perf_events counting hardware events */
1154 static atomic_t num_events
;
1155 /* Used to avoid races in calling reserve/release_pmc_hardware */
1156 static DEFINE_MUTEX(pmc_reserve_mutex
);
1159 * Release the PMU if this is the last perf_event.
1161 static void hw_perf_event_destroy(struct perf_event
*event
)
1163 if (!atomic_add_unless(&num_events
, -1, 1)) {
1164 mutex_lock(&pmc_reserve_mutex
);
1165 if (atomic_dec_return(&num_events
) == 0)
1166 release_pmc_hardware();
1167 mutex_unlock(&pmc_reserve_mutex
);
1172 * Translate a generic cache event_id config to a raw event_id code.
1174 static int hw_perf_cache_event(u64 config
, u64
*eventp
)
1176 unsigned long type
, op
, result
;
1179 if (!ppmu
->cache_events
)
1183 type
= config
& 0xff;
1184 op
= (config
>> 8) & 0xff;
1185 result
= (config
>> 16) & 0xff;
1187 if (type
>= PERF_COUNT_HW_CACHE_MAX
||
1188 op
>= PERF_COUNT_HW_CACHE_OP_MAX
||
1189 result
>= PERF_COUNT_HW_CACHE_RESULT_MAX
)
1192 ev
= (*ppmu
->cache_events
)[type
][op
][result
];
1201 static int power_pmu_event_init(struct perf_event
*event
)
1204 unsigned long flags
;
1205 struct perf_event
*ctrs
[MAX_HWEVENTS
];
1206 u64 events
[MAX_HWEVENTS
];
1207 unsigned int cflags
[MAX_HWEVENTS
];
1210 struct cpu_hw_events
*cpuhw
;
1215 /* does not support taken branch sampling */
1216 if (has_branch_stack(event
))
1219 switch (event
->attr
.type
) {
1220 case PERF_TYPE_HARDWARE
:
1221 ev
= event
->attr
.config
;
1222 if (ev
>= ppmu
->n_generic
|| ppmu
->generic_events
[ev
] == 0)
1224 ev
= ppmu
->generic_events
[ev
];
1226 case PERF_TYPE_HW_CACHE
:
1227 err
= hw_perf_cache_event(event
->attr
.config
, &ev
);
1232 ev
= event
->attr
.config
;
1238 event
->hw
.config_base
= ev
;
1242 * If we are not running on a hypervisor, force the
1243 * exclude_hv bit to 0 so that we don't care what
1244 * the user set it to.
1246 if (!firmware_has_feature(FW_FEATURE_LPAR
))
1247 event
->attr
.exclude_hv
= 0;
1250 * If this is a per-task event, then we can use
1251 * PM_RUN_* events interchangeably with their non RUN_*
1252 * equivalents, e.g. PM_RUN_CYC instead of PM_CYC.
1253 * XXX we should check if the task is an idle task.
1256 if (event
->attach_state
& PERF_ATTACH_TASK
)
1257 flags
|= PPMU_ONLY_COUNT_RUN
;
1260 * If this machine has limited events, check whether this
1261 * event_id could go on a limited event.
1263 if (ppmu
->flags
& PPMU_LIMITED_PMC5_6
) {
1264 if (can_go_on_limited_pmc(event
, ev
, flags
)) {
1265 flags
|= PPMU_LIMITED_PMC_OK
;
1266 } else if (ppmu
->limited_pmc_event(ev
)) {
1268 * The requested event_id is on a limited PMC,
1269 * but we can't use a limited PMC; see if any
1270 * alternative goes on a normal PMC.
1272 ev
= normal_pmc_alternative(ev
, flags
);
1279 * If this is in a group, check if it can go on with all the
1280 * other hardware events in the group. We assume the event
1281 * hasn't been linked into its leader's sibling list at this point.
1284 if (event
->group_leader
!= event
) {
1285 n
= collect_events(event
->group_leader
, ppmu
->n_counter
- 1,
1286 ctrs
, events
, cflags
);
1293 if (check_excludes(ctrs
, cflags
, n
, 1))
1296 cpuhw
= &get_cpu_var(cpu_hw_events
);
1297 err
= power_check_constraints(cpuhw
, events
, cflags
, n
+ 1);
1298 put_cpu_var(cpu_hw_events
);
1302 event
->hw
.config
= events
[n
];
1303 event
->hw
.event_base
= cflags
[n
];
1304 event
->hw
.last_period
= event
->hw
.sample_period
;
1305 local64_set(&event
->hw
.period_left
, event
->hw
.last_period
);
1308 * See if we need to reserve the PMU.
1309 * If no events are currently in use, then we have to take a
1310 * mutex to ensure that we don't race with another task doing
1311 * reserve_pmc_hardware or release_pmc_hardware.
1314 if (!atomic_inc_not_zero(&num_events
)) {
1315 mutex_lock(&pmc_reserve_mutex
);
1316 if (atomic_read(&num_events
) == 0 &&
1317 reserve_pmc_hardware(perf_event_interrupt
))
1320 atomic_inc(&num_events
);
1321 mutex_unlock(&pmc_reserve_mutex
);
1323 event
->destroy
= hw_perf_event_destroy
;
1328 static int power_pmu_event_idx(struct perf_event
*event
)
1330 return event
->hw
.idx
;
1333 ssize_t
power_events_sysfs_show(struct device
*dev
,
1334 struct device_attribute
*attr
, char *page
)
1336 struct perf_pmu_events_attr
*pmu_attr
;
1338 pmu_attr
= container_of(attr
, struct perf_pmu_events_attr
, attr
);
1340 return sprintf(page
, "event=0x%02llx\n", pmu_attr
->id
);
1343 struct pmu power_pmu
= {
1344 .pmu_enable
= power_pmu_enable
,
1345 .pmu_disable
= power_pmu_disable
,
1346 .event_init
= power_pmu_event_init
,
1347 .add
= power_pmu_add
,
1348 .del
= power_pmu_del
,
1349 .start
= power_pmu_start
,
1350 .stop
= power_pmu_stop
,
1351 .read
= power_pmu_read
,
1352 .start_txn
= power_pmu_start_txn
,
1353 .cancel_txn
= power_pmu_cancel_txn
,
1354 .commit_txn
= power_pmu_commit_txn
,
1355 .event_idx
= power_pmu_event_idx
,
1360 * A counter has overflowed; update its count and record
1361 * things if requested. Note that interrupts are hard-disabled
1362 * here so there is no possibility of being interrupted.
1364 static void record_and_restart(struct perf_event
*event
, unsigned long val
,
1365 struct pt_regs
*regs
)
1367 u64 period
= event
->hw
.sample_period
;
1368 s64 prev
, delta
, left
;
1371 if (event
->hw
.state
& PERF_HES_STOPPED
) {
1372 write_pmc(event
->hw
.idx
, 0);
1376 /* we don't have to worry about interrupts here */
1377 prev
= local64_read(&event
->hw
.prev_count
);
1378 delta
= check_and_compute_delta(prev
, val
);
1379 local64_add(delta
, &event
->count
);
1382 * See if the total period for this event has expired,
1383 * and update for the next period.
1386 left
= local64_read(&event
->hw
.period_left
) - delta
;
1394 record
= siar_valid(regs
);
1395 event
->hw
.last_period
= event
->hw
.sample_period
;
1397 if (left
< 0x80000000LL
)
1398 val
= 0x80000000LL
- left
;
1401 write_pmc(event
->hw
.idx
, val
);
1402 local64_set(&event
->hw
.prev_count
, val
);
1403 local64_set(&event
->hw
.period_left
, left
);
1404 perf_event_update_userpage(event
);
1407 * Finally record data if requested.
1410 struct perf_sample_data data
;
1412 perf_sample_data_init(&data
, ~0ULL, event
->hw
.last_period
);
1414 if (event
->attr
.sample_type
& PERF_SAMPLE_ADDR
)
1415 perf_get_data_addr(regs
, &data
.addr
);
1417 if (perf_event_overflow(event
, &data
, regs
))
1418 power_pmu_stop(event
, 0);
1423 * Called from generic code to get the misc flags (i.e. processor mode)
1426 unsigned long perf_misc_flags(struct pt_regs
*regs
)
1428 u32 flags
= perf_get_misc_flags(regs
);
1432 return user_mode(regs
) ? PERF_RECORD_MISC_USER
:
1433 PERF_RECORD_MISC_KERNEL
;
1437 * Called from generic code to get the instruction pointer
1440 unsigned long perf_instruction_pointer(struct pt_regs
*regs
)
1442 bool use_siar
= regs_use_siar(regs
);
1444 if (use_siar
&& siar_valid(regs
))
1445 return mfspr(SPRN_SIAR
) + perf_ip_adjust(regs
);
1447 return 0; // no valid instruction pointer
1452 static bool pmc_overflow_power7(unsigned long val
)
1455 * Events on POWER7 can roll back if a speculative event doesn't
1456 * eventually complete. Unfortunately in some rare cases they will
1457 * raise a performance monitor exception. We need to catch this to
1458 * ensure we reset the PMC. In all cases the PMC will be 256 or less
1459 * cycles from overflow.
1461 * We only do this if the first pass fails to find any overflowing
1462 * PMCs because a user might set a period of less than 256 and we
1463 * don't want to mistakenly reset them.
1465 if ((0x80000000 - val
) <= 256)
1471 static bool pmc_overflow(unsigned long val
)
1480 * Performance monitor interrupt stuff
1482 static void perf_event_interrupt(struct pt_regs
*regs
)
1485 struct cpu_hw_events
*cpuhw
= &__get_cpu_var(cpu_hw_events
);
1486 struct perf_event
*event
;
1487 unsigned long val
[8];
1491 if (cpuhw
->n_limited
)
1492 freeze_limited_counters(cpuhw
, mfspr(SPRN_PMC5
),
1495 perf_read_regs(regs
);
1497 nmi
= perf_intr_is_nmi(regs
);
1503 /* Read all the PMCs since we'll need them a bunch of times */
1504 for (i
= 0; i
< ppmu
->n_counter
; ++i
)
1505 val
[i
] = read_pmc(i
+ 1);
1507 /* Try to find what caused the IRQ */
1509 for (i
= 0; i
< ppmu
->n_counter
; ++i
) {
1510 if (!pmc_overflow(val
[i
]))
1512 if (is_limited_pmc(i
+ 1))
1513 continue; /* these won't generate IRQs */
1515 * We've found one that's overflowed. For active
1516 * counters we need to log this. For inactive
1517 * counters, we need to reset it anyway
1521 for (j
= 0; j
< cpuhw
->n_events
; ++j
) {
1522 event
= cpuhw
->event
[j
];
1523 if (event
->hw
.idx
== (i
+ 1)) {
1525 record_and_restart(event
, val
[i
], regs
);
1530 /* reset non active counters that have overflowed */
1531 write_pmc(i
+ 1, 0);
1533 if (!found
&& pvr_version_is(PVR_POWER7
)) {
1534 /* check active counters for special buggy p7 overflow */
1535 for (i
= 0; i
< cpuhw
->n_events
; ++i
) {
1536 event
= cpuhw
->event
[i
];
1537 if (!event
->hw
.idx
|| is_limited_pmc(event
->hw
.idx
))
1539 if (pmc_overflow_power7(val
[event
->hw
.idx
- 1])) {
1540 /* event has overflowed in a buggy way*/
1542 record_and_restart(event
,
1543 val
[event
->hw
.idx
- 1],
1548 if ((!found
) && printk_ratelimit())
1549 printk(KERN_WARNING
"Can't find PMC that caused IRQ\n");
1552 * Reset MMCR0 to its normal value. This will set PMXE and
1553 * clear FC (freeze counters) and PMAO (perf mon alert occurred)
1554 * and thus allow interrupts to occur again.
1555 * XXX might want to use MSR.PM to keep the events frozen until
1556 * we get back out of this interrupt.
1558 write_mmcr0(cpuhw
, cpuhw
->mmcr
[0]);
1566 static void power_pmu_setup(int cpu
)
1568 struct cpu_hw_events
*cpuhw
= &per_cpu(cpu_hw_events
, cpu
);
1572 memset(cpuhw
, 0, sizeof(*cpuhw
));
1573 cpuhw
->mmcr
[0] = MMCR0_FC
;
1576 static int __cpuinit
1577 power_pmu_notifier(struct notifier_block
*self
, unsigned long action
, void *hcpu
)
1579 unsigned int cpu
= (long)hcpu
;
1581 switch (action
& ~CPU_TASKS_FROZEN
) {
1582 case CPU_UP_PREPARE
:
1583 power_pmu_setup(cpu
);
1593 int __cpuinit
register_power_pmu(struct power_pmu
*pmu
)
1596 return -EBUSY
; /* something's already registered */
1599 pr_info("%s performance monitor hardware support registered\n",
1602 power_pmu
.attr_groups
= ppmu
->attr_groups
;
1606 * Use FCHV to ignore kernel events if MSR.HV is set.
1608 if (mfmsr() & MSR_HV
)
1609 freeze_events_kernel
= MMCR0_FCHV
;
1610 #endif /* CONFIG_PPC64 */
1612 perf_pmu_register(&power_pmu
, "cpu", PERF_TYPE_RAW
);
1613 perf_cpu_notifier(power_pmu_notifier
);