2 * Performance event support - powerpc architecture code
4 * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #include <linux/kernel.h>
12 #include <linux/sched.h>
13 #include <linux/perf_event.h>
14 #include <linux/percpu.h>
15 #include <linux/hardirq.h>
18 #include <asm/machdep.h>
19 #include <asm/firmware.h>
20 #include <asm/ptrace.h>
22 struct cpu_hw_events
{
29 struct perf_event
*event
[MAX_HWEVENTS
];
30 u64 events
[MAX_HWEVENTS
];
31 unsigned int flags
[MAX_HWEVENTS
];
32 unsigned long mmcr
[3];
33 struct perf_event
*limited_counter
[MAX_LIMITED_HWCOUNTERS
];
34 u8 limited_hwidx
[MAX_LIMITED_HWCOUNTERS
];
35 u64 alternatives
[MAX_HWEVENTS
][MAX_EVENT_ALTERNATIVES
];
36 unsigned long amasks
[MAX_HWEVENTS
][MAX_EVENT_ALTERNATIVES
];
37 unsigned long avalues
[MAX_HWEVENTS
][MAX_EVENT_ALTERNATIVES
];
39 unsigned int group_flag
;
42 DEFINE_PER_CPU(struct cpu_hw_events
, cpu_hw_events
);
44 struct power_pmu
*ppmu
;
47 * Normally, to ignore kernel events we set the FCS (freeze counters
48 * in supervisor mode) bit in MMCR0, but if the kernel runs with the
49 * hypervisor bit set in the MSR, or if we are running on a processor
50 * where the hypervisor bit is forced to 1 (as on Apple G5 processors),
51 * then we need to use the FCHV bit to ignore kernel events.
53 static unsigned int freeze_events_kernel
= MMCR0_FCS
;
56 * 32-bit doesn't have MMCRA but does have an MMCR2,
57 * and a few other names are different.
62 #define MMCR0_PMCjCE MMCR0_PMCnCE
64 #define SPRN_MMCRA SPRN_MMCR2
65 #define MMCRA_SAMPLE_ENABLE 0
67 static inline unsigned long perf_ip_adjust(struct pt_regs
*regs
)
71 static inline void perf_get_data_addr(struct pt_regs
*regs
, u64
*addrp
) { }
72 static inline u32
perf_get_misc_flags(struct pt_regs
*regs
)
76 static inline void perf_read_regs(struct pt_regs
*regs
)
80 static inline int perf_intr_is_nmi(struct pt_regs
*regs
)
85 static inline int siar_valid(struct pt_regs
*regs
)
90 #endif /* CONFIG_PPC32 */
93 * Things that are specific to 64-bit implementations.
97 static inline unsigned long perf_ip_adjust(struct pt_regs
*regs
)
99 unsigned long mmcra
= regs
->dsisr
;
101 if ((ppmu
->flags
& PPMU_HAS_SSLOT
) && (mmcra
& MMCRA_SAMPLE_ENABLE
)) {
102 unsigned long slot
= (mmcra
& MMCRA_SLOT
) >> MMCRA_SLOT_SHIFT
;
104 return 4 * (slot
- 1);
111 * The user wants a data address recorded.
112 * If we're not doing instruction sampling, give them the SDAR
113 * (sampled data address). If we are doing instruction sampling, then
114 * only give them the SDAR if it corresponds to the instruction
115 * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC or
116 * the [POWER7P_]MMCRA_SDAR_VALID bit in MMCRA.
118 static inline void perf_get_data_addr(struct pt_regs
*regs
, u64
*addrp
)
120 unsigned long mmcra
= regs
->dsisr
;
121 unsigned long sdsync
;
123 if (ppmu
->flags
& PPMU_SIAR_VALID
)
124 sdsync
= POWER7P_MMCRA_SDAR_VALID
;
125 else if (ppmu
->flags
& PPMU_ALT_SIPR
)
126 sdsync
= POWER6_MMCRA_SDSYNC
;
128 sdsync
= MMCRA_SDSYNC
;
130 if (!(mmcra
& MMCRA_SAMPLE_ENABLE
) || (mmcra
& sdsync
))
131 *addrp
= mfspr(SPRN_SDAR
);
134 static bool mmcra_sihv(unsigned long mmcra
)
136 unsigned long sihv
= MMCRA_SIHV
;
138 if (ppmu
->flags
& PPMU_ALT_SIPR
)
139 sihv
= POWER6_MMCRA_SIHV
;
141 return !!(mmcra
& sihv
);
144 static bool mmcra_sipr(unsigned long mmcra
)
146 unsigned long sipr
= MMCRA_SIPR
;
148 if (ppmu
->flags
& PPMU_ALT_SIPR
)
149 sipr
= POWER6_MMCRA_SIPR
;
151 return !!(mmcra
& sipr
);
154 static inline u32
perf_flags_from_msr(struct pt_regs
*regs
)
156 if (regs
->msr
& MSR_PR
)
157 return PERF_RECORD_MISC_USER
;
158 if ((regs
->msr
& MSR_HV
) && freeze_events_kernel
!= MMCR0_FCHV
)
159 return PERF_RECORD_MISC_HYPERVISOR
;
160 return PERF_RECORD_MISC_KERNEL
;
163 static inline u32
perf_get_misc_flags(struct pt_regs
*regs
)
165 unsigned long mmcra
= regs
->dsisr
;
166 unsigned long use_siar
= regs
->result
;
169 return perf_flags_from_msr(regs
);
172 * If we don't have flags in MMCRA, rather than using
173 * the MSR, we intuit the flags from the address in
174 * SIAR which should give slightly more reliable
177 if (ppmu
->flags
& PPMU_NO_SIPR
) {
178 unsigned long siar
= mfspr(SPRN_SIAR
);
179 if (siar
>= PAGE_OFFSET
)
180 return PERF_RECORD_MISC_KERNEL
;
181 return PERF_RECORD_MISC_USER
;
184 /* PR has priority over HV, so order below is important */
185 if (mmcra_sipr(mmcra
))
186 return PERF_RECORD_MISC_USER
;
187 if (mmcra_sihv(mmcra
) && (freeze_events_kernel
!= MMCR0_FCHV
))
188 return PERF_RECORD_MISC_HYPERVISOR
;
189 return PERF_RECORD_MISC_KERNEL
;
193 * Overload regs->dsisr to store MMCRA so we only need to read it once
195 * Overload regs->result to specify whether we should use the MSR (result
196 * is zero) or the SIAR (result is non zero).
198 static inline void perf_read_regs(struct pt_regs
*regs
)
200 unsigned long mmcra
= mfspr(SPRN_MMCRA
);
201 int marked
= mmcra
& MMCRA_SAMPLE_ENABLE
;
205 * If this isn't a PMU exception (eg a software event) the SIAR is
206 * not valid. Use pt_regs.
208 * If it is a marked event use the SIAR.
210 * If the PMU doesn't update the SIAR for non marked events use
213 * If the PMU has HV/PR flags then check to see if they
214 * place the exception in userspace. If so, use pt_regs. In
215 * continuous sampling mode the SIAR and the PMU exception are
216 * not synchronised, so they may be many instructions apart.
217 * This can result in confusing backtraces. We still want
218 * hypervisor samples as well as samples in the kernel with
219 * interrupts off hence the userspace check.
221 if (TRAP(regs
) != 0xf00)
225 else if ((ppmu
->flags
& PPMU_NO_CONT_SAMPLING
))
227 else if (!(ppmu
->flags
& PPMU_NO_SIPR
) && mmcra_sipr(mmcra
))
233 regs
->result
= use_siar
;
237 * If interrupts were soft-disabled when a PMU interrupt occurs, treat
240 static inline int perf_intr_is_nmi(struct pt_regs
*regs
)
246 * On processors like P7+ that have the SIAR-Valid bit, marked instructions
247 * must be sampled only if the SIAR-valid bit is set.
249 * For unmarked instructions and for processors that don't have the SIAR-Valid
250 * bit, assume that SIAR is valid.
252 static inline int siar_valid(struct pt_regs
*regs
)
254 unsigned long mmcra
= regs
->dsisr
;
255 int marked
= mmcra
& MMCRA_SAMPLE_ENABLE
;
257 if ((ppmu
->flags
& PPMU_SIAR_VALID
) && marked
)
258 return mmcra
& POWER7P_MMCRA_SIAR_VALID
;
263 #endif /* CONFIG_PPC64 */
265 static void perf_event_interrupt(struct pt_regs
*regs
);
267 void perf_event_print_debug(void)
272 * Read one performance monitor counter (PMC).
274 static unsigned long read_pmc(int idx
)
280 val
= mfspr(SPRN_PMC1
);
283 val
= mfspr(SPRN_PMC2
);
286 val
= mfspr(SPRN_PMC3
);
289 val
= mfspr(SPRN_PMC4
);
292 val
= mfspr(SPRN_PMC5
);
295 val
= mfspr(SPRN_PMC6
);
299 val
= mfspr(SPRN_PMC7
);
302 val
= mfspr(SPRN_PMC8
);
304 #endif /* CONFIG_PPC64 */
306 printk(KERN_ERR
"oops trying to read PMC%d\n", idx
);
315 static void write_pmc(int idx
, unsigned long val
)
319 mtspr(SPRN_PMC1
, val
);
322 mtspr(SPRN_PMC2
, val
);
325 mtspr(SPRN_PMC3
, val
);
328 mtspr(SPRN_PMC4
, val
);
331 mtspr(SPRN_PMC5
, val
);
334 mtspr(SPRN_PMC6
, val
);
338 mtspr(SPRN_PMC7
, val
);
341 mtspr(SPRN_PMC8
, val
);
343 #endif /* CONFIG_PPC64 */
345 printk(KERN_ERR
"oops trying to write PMC%d\n", idx
);
350 * Check if a set of events can all go on the PMU at once.
351 * If they can't, this will look at alternative codes for the events
352 * and see if any combination of alternative codes is feasible.
353 * The feasible set is returned in event_id[].
355 static int power_check_constraints(struct cpu_hw_events
*cpuhw
,
356 u64 event_id
[], unsigned int cflags
[],
359 unsigned long mask
, value
, nv
;
360 unsigned long smasks
[MAX_HWEVENTS
], svalues
[MAX_HWEVENTS
];
361 int n_alt
[MAX_HWEVENTS
], choice
[MAX_HWEVENTS
];
363 unsigned long addf
= ppmu
->add_fields
;
364 unsigned long tadd
= ppmu
->test_adder
;
366 if (n_ev
> ppmu
->n_counter
)
369 /* First see if the events will go on as-is */
370 for (i
= 0; i
< n_ev
; ++i
) {
371 if ((cflags
[i
] & PPMU_LIMITED_PMC_REQD
)
372 && !ppmu
->limited_pmc_event(event_id
[i
])) {
373 ppmu
->get_alternatives(event_id
[i
], cflags
[i
],
374 cpuhw
->alternatives
[i
]);
375 event_id
[i
] = cpuhw
->alternatives
[i
][0];
377 if (ppmu
->get_constraint(event_id
[i
], &cpuhw
->amasks
[i
][0],
378 &cpuhw
->avalues
[i
][0]))
382 for (i
= 0; i
< n_ev
; ++i
) {
383 nv
= (value
| cpuhw
->avalues
[i
][0]) +
384 (value
& cpuhw
->avalues
[i
][0] & addf
);
385 if ((((nv
+ tadd
) ^ value
) & mask
) != 0 ||
386 (((nv
+ tadd
) ^ cpuhw
->avalues
[i
][0]) &
387 cpuhw
->amasks
[i
][0]) != 0)
390 mask
|= cpuhw
->amasks
[i
][0];
393 return 0; /* all OK */
395 /* doesn't work, gather alternatives... */
396 if (!ppmu
->get_alternatives
)
398 for (i
= 0; i
< n_ev
; ++i
) {
400 n_alt
[i
] = ppmu
->get_alternatives(event_id
[i
], cflags
[i
],
401 cpuhw
->alternatives
[i
]);
402 for (j
= 1; j
< n_alt
[i
]; ++j
)
403 ppmu
->get_constraint(cpuhw
->alternatives
[i
][j
],
404 &cpuhw
->amasks
[i
][j
],
405 &cpuhw
->avalues
[i
][j
]);
408 /* enumerate all possibilities and see if any will work */
411 value
= mask
= nv
= 0;
414 /* we're backtracking, restore context */
420 * See if any alternative k for event_id i,
421 * where k > j, will satisfy the constraints.
423 while (++j
< n_alt
[i
]) {
424 nv
= (value
| cpuhw
->avalues
[i
][j
]) +
425 (value
& cpuhw
->avalues
[i
][j
] & addf
);
426 if ((((nv
+ tadd
) ^ value
) & mask
) == 0 &&
427 (((nv
+ tadd
) ^ cpuhw
->avalues
[i
][j
])
428 & cpuhw
->amasks
[i
][j
]) == 0)
433 * No feasible alternative, backtrack
434 * to event_id i-1 and continue enumerating its
435 * alternatives from where we got up to.
441 * Found a feasible alternative for event_id i,
442 * remember where we got up to with this event_id,
443 * go on to the next event_id, and start with
444 * the first alternative for it.
450 mask
|= cpuhw
->amasks
[i
][j
];
456 /* OK, we have a feasible combination, tell the caller the solution */
457 for (i
= 0; i
< n_ev
; ++i
)
458 event_id
[i
] = cpuhw
->alternatives
[i
][choice
[i
]];
463 * Check if newly-added events have consistent settings for
464 * exclude_{user,kernel,hv} with each other and any previously
467 static int check_excludes(struct perf_event
**ctrs
, unsigned int cflags
[],
468 int n_prev
, int n_new
)
470 int eu
= 0, ek
= 0, eh
= 0;
472 struct perf_event
*event
;
479 for (i
= 0; i
< n
; ++i
) {
480 if (cflags
[i
] & PPMU_LIMITED_PMC_OK
) {
481 cflags
[i
] &= ~PPMU_LIMITED_PMC_REQD
;
486 eu
= event
->attr
.exclude_user
;
487 ek
= event
->attr
.exclude_kernel
;
488 eh
= event
->attr
.exclude_hv
;
490 } else if (event
->attr
.exclude_user
!= eu
||
491 event
->attr
.exclude_kernel
!= ek
||
492 event
->attr
.exclude_hv
!= eh
) {
498 for (i
= 0; i
< n
; ++i
)
499 if (cflags
[i
] & PPMU_LIMITED_PMC_OK
)
500 cflags
[i
] |= PPMU_LIMITED_PMC_REQD
;
505 static u64
check_and_compute_delta(u64 prev
, u64 val
)
507 u64 delta
= (val
- prev
) & 0xfffffffful
;
510 * POWER7 can roll back counter values, if the new value is smaller
511 * than the previous value it will cause the delta and the counter to
512 * have bogus values unless we rolled a counter over. If a coutner is
513 * rolled back, it will be smaller, but within 256, which is the maximum
514 * number of events to rollback at once. If we dectect a rollback
515 * return 0. This can lead to a small lack of precision in the
518 if (prev
> val
&& (prev
- val
) < 256)
524 static void power_pmu_read(struct perf_event
*event
)
526 s64 val
, delta
, prev
;
528 if (event
->hw
.state
& PERF_HES_STOPPED
)
534 * Performance monitor interrupts come even when interrupts
535 * are soft-disabled, as long as interrupts are hard-enabled.
536 * Therefore we treat them like NMIs.
539 prev
= local64_read(&event
->hw
.prev_count
);
541 val
= read_pmc(event
->hw
.idx
);
542 delta
= check_and_compute_delta(prev
, val
);
545 } while (local64_cmpxchg(&event
->hw
.prev_count
, prev
, val
) != prev
);
547 local64_add(delta
, &event
->count
);
548 local64_sub(delta
, &event
->hw
.period_left
);
552 * On some machines, PMC5 and PMC6 can't be written, don't respect
553 * the freeze conditions, and don't generate interrupts. This tells
554 * us if `event' is using such a PMC.
556 static int is_limited_pmc(int pmcnum
)
558 return (ppmu
->flags
& PPMU_LIMITED_PMC5_6
)
559 && (pmcnum
== 5 || pmcnum
== 6);
562 static void freeze_limited_counters(struct cpu_hw_events
*cpuhw
,
563 unsigned long pmc5
, unsigned long pmc6
)
565 struct perf_event
*event
;
566 u64 val
, prev
, delta
;
569 for (i
= 0; i
< cpuhw
->n_limited
; ++i
) {
570 event
= cpuhw
->limited_counter
[i
];
573 val
= (event
->hw
.idx
== 5) ? pmc5
: pmc6
;
574 prev
= local64_read(&event
->hw
.prev_count
);
576 delta
= check_and_compute_delta(prev
, val
);
578 local64_add(delta
, &event
->count
);
582 static void thaw_limited_counters(struct cpu_hw_events
*cpuhw
,
583 unsigned long pmc5
, unsigned long pmc6
)
585 struct perf_event
*event
;
589 for (i
= 0; i
< cpuhw
->n_limited
; ++i
) {
590 event
= cpuhw
->limited_counter
[i
];
591 event
->hw
.idx
= cpuhw
->limited_hwidx
[i
];
592 val
= (event
->hw
.idx
== 5) ? pmc5
: pmc6
;
593 prev
= local64_read(&event
->hw
.prev_count
);
594 if (check_and_compute_delta(prev
, val
))
595 local64_set(&event
->hw
.prev_count
, val
);
596 perf_event_update_userpage(event
);
601 * Since limited events don't respect the freeze conditions, we
602 * have to read them immediately after freezing or unfreezing the
603 * other events. We try to keep the values from the limited
604 * events as consistent as possible by keeping the delay (in
605 * cycles and instructions) between freezing/unfreezing and reading
606 * the limited events as small and consistent as possible.
607 * Therefore, if any limited events are in use, we read them
608 * both, and always in the same order, to minimize variability,
609 * and do it inside the same asm that writes MMCR0.
611 static void write_mmcr0(struct cpu_hw_events
*cpuhw
, unsigned long mmcr0
)
613 unsigned long pmc5
, pmc6
;
615 if (!cpuhw
->n_limited
) {
616 mtspr(SPRN_MMCR0
, mmcr0
);
621 * Write MMCR0, then read PMC5 and PMC6 immediately.
622 * To ensure we don't get a performance monitor interrupt
623 * between writing MMCR0 and freezing/thawing the limited
624 * events, we first write MMCR0 with the event overflow
625 * interrupt enable bits turned off.
627 asm volatile("mtspr %3,%2; mfspr %0,%4; mfspr %1,%5"
628 : "=&r" (pmc5
), "=&r" (pmc6
)
629 : "r" (mmcr0
& ~(MMCR0_PMC1CE
| MMCR0_PMCjCE
)),
631 "i" (SPRN_PMC5
), "i" (SPRN_PMC6
));
633 if (mmcr0
& MMCR0_FC
)
634 freeze_limited_counters(cpuhw
, pmc5
, pmc6
);
636 thaw_limited_counters(cpuhw
, pmc5
, pmc6
);
639 * Write the full MMCR0 including the event overflow interrupt
640 * enable bits, if necessary.
642 if (mmcr0
& (MMCR0_PMC1CE
| MMCR0_PMCjCE
))
643 mtspr(SPRN_MMCR0
, mmcr0
);
647 * Disable all events to prevent PMU interrupts and to allow
648 * events to be added or removed.
650 static void power_pmu_disable(struct pmu
*pmu
)
652 struct cpu_hw_events
*cpuhw
;
657 local_irq_save(flags
);
658 cpuhw
= &__get_cpu_var(cpu_hw_events
);
660 if (!cpuhw
->disabled
) {
665 * Check if we ever enabled the PMU on this cpu.
667 if (!cpuhw
->pmcs_enabled
) {
669 cpuhw
->pmcs_enabled
= 1;
673 * Disable instruction sampling if it was enabled
675 if (cpuhw
->mmcr
[2] & MMCRA_SAMPLE_ENABLE
) {
677 cpuhw
->mmcr
[2] & ~MMCRA_SAMPLE_ENABLE
);
682 * Set the 'freeze counters' bit.
683 * The barrier is to make sure the mtspr has been
684 * executed and the PMU has frozen the events
687 write_mmcr0(cpuhw
, mfspr(SPRN_MMCR0
) | MMCR0_FC
);
690 local_irq_restore(flags
);
694 * Re-enable all events if disable == 0.
695 * If we were previously disabled and events were added, then
696 * put the new config on the PMU.
698 static void power_pmu_enable(struct pmu
*pmu
)
700 struct perf_event
*event
;
701 struct cpu_hw_events
*cpuhw
;
706 unsigned int hwc_index
[MAX_HWEVENTS
];
712 local_irq_save(flags
);
713 cpuhw
= &__get_cpu_var(cpu_hw_events
);
714 if (!cpuhw
->disabled
) {
715 local_irq_restore(flags
);
721 * If we didn't change anything, or only removed events,
722 * no need to recalculate MMCR* settings and reset the PMCs.
723 * Just reenable the PMU with the current MMCR* settings
724 * (possibly updated for removal of events).
726 if (!cpuhw
->n_added
) {
727 mtspr(SPRN_MMCRA
, cpuhw
->mmcr
[2] & ~MMCRA_SAMPLE_ENABLE
);
728 mtspr(SPRN_MMCR1
, cpuhw
->mmcr
[1]);
729 if (cpuhw
->n_events
== 0)
730 ppc_set_pmu_inuse(0);
735 * Compute MMCR* values for the new set of events
737 if (ppmu
->compute_mmcr(cpuhw
->events
, cpuhw
->n_events
, hwc_index
,
739 /* shouldn't ever get here */
740 printk(KERN_ERR
"oops compute_mmcr failed\n");
745 * Add in MMCR0 freeze bits corresponding to the
746 * attr.exclude_* bits for the first event.
747 * We have already checked that all events have the
748 * same values for these bits as the first event.
750 event
= cpuhw
->event
[0];
751 if (event
->attr
.exclude_user
)
752 cpuhw
->mmcr
[0] |= MMCR0_FCP
;
753 if (event
->attr
.exclude_kernel
)
754 cpuhw
->mmcr
[0] |= freeze_events_kernel
;
755 if (event
->attr
.exclude_hv
)
756 cpuhw
->mmcr
[0] |= MMCR0_FCHV
;
759 * Write the new configuration to MMCR* with the freeze
760 * bit set and set the hardware events to their initial values.
761 * Then unfreeze the events.
763 ppc_set_pmu_inuse(1);
764 mtspr(SPRN_MMCRA
, cpuhw
->mmcr
[2] & ~MMCRA_SAMPLE_ENABLE
);
765 mtspr(SPRN_MMCR1
, cpuhw
->mmcr
[1]);
766 mtspr(SPRN_MMCR0
, (cpuhw
->mmcr
[0] & ~(MMCR0_PMC1CE
| MMCR0_PMCjCE
))
770 * Read off any pre-existing events that need to move
773 for (i
= 0; i
< cpuhw
->n_events
; ++i
) {
774 event
= cpuhw
->event
[i
];
775 if (event
->hw
.idx
&& event
->hw
.idx
!= hwc_index
[i
] + 1) {
776 power_pmu_read(event
);
777 write_pmc(event
->hw
.idx
, 0);
783 * Initialize the PMCs for all the new and moved events.
785 cpuhw
->n_limited
= n_lim
= 0;
786 for (i
= 0; i
< cpuhw
->n_events
; ++i
) {
787 event
= cpuhw
->event
[i
];
790 idx
= hwc_index
[i
] + 1;
791 if (is_limited_pmc(idx
)) {
792 cpuhw
->limited_counter
[n_lim
] = event
;
793 cpuhw
->limited_hwidx
[n_lim
] = idx
;
798 if (event
->hw
.sample_period
) {
799 left
= local64_read(&event
->hw
.period_left
);
800 if (left
< 0x80000000L
)
801 val
= 0x80000000L
- left
;
803 local64_set(&event
->hw
.prev_count
, val
);
805 if (event
->hw
.state
& PERF_HES_STOPPED
)
808 perf_event_update_userpage(event
);
810 cpuhw
->n_limited
= n_lim
;
811 cpuhw
->mmcr
[0] |= MMCR0_PMXE
| MMCR0_FCECE
;
815 write_mmcr0(cpuhw
, cpuhw
->mmcr
[0]);
818 * Enable instruction sampling if necessary
820 if (cpuhw
->mmcr
[2] & MMCRA_SAMPLE_ENABLE
) {
822 mtspr(SPRN_MMCRA
, cpuhw
->mmcr
[2]);
826 local_irq_restore(flags
);
829 static int collect_events(struct perf_event
*group
, int max_count
,
830 struct perf_event
*ctrs
[], u64
*events
,
834 struct perf_event
*event
;
836 if (!is_software_event(group
)) {
840 flags
[n
] = group
->hw
.event_base
;
841 events
[n
++] = group
->hw
.config
;
843 list_for_each_entry(event
, &group
->sibling_list
, group_entry
) {
844 if (!is_software_event(event
) &&
845 event
->state
!= PERF_EVENT_STATE_OFF
) {
849 flags
[n
] = event
->hw
.event_base
;
850 events
[n
++] = event
->hw
.config
;
857 * Add a event to the PMU.
858 * If all events are not already frozen, then we disable and
859 * re-enable the PMU in order to get hw_perf_enable to do the
860 * actual work of reconfiguring the PMU.
862 static int power_pmu_add(struct perf_event
*event
, int ef_flags
)
864 struct cpu_hw_events
*cpuhw
;
869 local_irq_save(flags
);
870 perf_pmu_disable(event
->pmu
);
873 * Add the event to the list (if there is room)
874 * and check whether the total set is still feasible.
876 cpuhw
= &__get_cpu_var(cpu_hw_events
);
877 n0
= cpuhw
->n_events
;
878 if (n0
>= ppmu
->n_counter
)
880 cpuhw
->event
[n0
] = event
;
881 cpuhw
->events
[n0
] = event
->hw
.config
;
882 cpuhw
->flags
[n0
] = event
->hw
.event_base
;
885 * This event may have been disabled/stopped in record_and_restart()
886 * because we exceeded the ->event_limit. If re-starting the event,
887 * clear the ->hw.state (STOPPED and UPTODATE flags), so the user
888 * notification is re-enabled.
890 if (!(ef_flags
& PERF_EF_START
))
891 event
->hw
.state
= PERF_HES_STOPPED
| PERF_HES_UPTODATE
;
896 * If group events scheduling transaction was started,
897 * skip the schedulability test here, it will be performed
898 * at commit time(->commit_txn) as a whole
900 if (cpuhw
->group_flag
& PERF_EVENT_TXN
)
903 if (check_excludes(cpuhw
->event
, cpuhw
->flags
, n0
, 1))
905 if (power_check_constraints(cpuhw
, cpuhw
->events
, cpuhw
->flags
, n0
+ 1))
907 event
->hw
.config
= cpuhw
->events
[n0
];
915 perf_pmu_enable(event
->pmu
);
916 local_irq_restore(flags
);
921 * Remove a event from the PMU.
923 static void power_pmu_del(struct perf_event
*event
, int ef_flags
)
925 struct cpu_hw_events
*cpuhw
;
929 local_irq_save(flags
);
930 perf_pmu_disable(event
->pmu
);
932 power_pmu_read(event
);
934 cpuhw
= &__get_cpu_var(cpu_hw_events
);
935 for (i
= 0; i
< cpuhw
->n_events
; ++i
) {
936 if (event
== cpuhw
->event
[i
]) {
937 while (++i
< cpuhw
->n_events
) {
938 cpuhw
->event
[i
-1] = cpuhw
->event
[i
];
939 cpuhw
->events
[i
-1] = cpuhw
->events
[i
];
940 cpuhw
->flags
[i
-1] = cpuhw
->flags
[i
];
943 ppmu
->disable_pmc(event
->hw
.idx
- 1, cpuhw
->mmcr
);
945 write_pmc(event
->hw
.idx
, 0);
948 perf_event_update_userpage(event
);
952 for (i
= 0; i
< cpuhw
->n_limited
; ++i
)
953 if (event
== cpuhw
->limited_counter
[i
])
955 if (i
< cpuhw
->n_limited
) {
956 while (++i
< cpuhw
->n_limited
) {
957 cpuhw
->limited_counter
[i
-1] = cpuhw
->limited_counter
[i
];
958 cpuhw
->limited_hwidx
[i
-1] = cpuhw
->limited_hwidx
[i
];
962 if (cpuhw
->n_events
== 0) {
963 /* disable exceptions if no events are running */
964 cpuhw
->mmcr
[0] &= ~(MMCR0_PMXE
| MMCR0_FCECE
);
967 perf_pmu_enable(event
->pmu
);
968 local_irq_restore(flags
);
972 * POWER-PMU does not support disabling individual counters, hence
973 * program their cycle counter to their max value and ignore the interrupts.
976 static void power_pmu_start(struct perf_event
*event
, int ef_flags
)
982 if (!event
->hw
.idx
|| !event
->hw
.sample_period
)
985 if (!(event
->hw
.state
& PERF_HES_STOPPED
))
988 if (ef_flags
& PERF_EF_RELOAD
)
989 WARN_ON_ONCE(!(event
->hw
.state
& PERF_HES_UPTODATE
));
991 local_irq_save(flags
);
992 perf_pmu_disable(event
->pmu
);
995 left
= local64_read(&event
->hw
.period_left
);
998 if (left
< 0x80000000L
)
999 val
= 0x80000000L
- left
;
1001 write_pmc(event
->hw
.idx
, val
);
1003 perf_event_update_userpage(event
);
1004 perf_pmu_enable(event
->pmu
);
1005 local_irq_restore(flags
);
1008 static void power_pmu_stop(struct perf_event
*event
, int ef_flags
)
1010 unsigned long flags
;
1012 if (!event
->hw
.idx
|| !event
->hw
.sample_period
)
1015 if (event
->hw
.state
& PERF_HES_STOPPED
)
1018 local_irq_save(flags
);
1019 perf_pmu_disable(event
->pmu
);
1021 power_pmu_read(event
);
1022 event
->hw
.state
|= PERF_HES_STOPPED
| PERF_HES_UPTODATE
;
1023 write_pmc(event
->hw
.idx
, 0);
1025 perf_event_update_userpage(event
);
1026 perf_pmu_enable(event
->pmu
);
1027 local_irq_restore(flags
);
1031 * Start group events scheduling transaction
1032 * Set the flag to make pmu::enable() not perform the
1033 * schedulability test, it will be performed at commit time
1035 void power_pmu_start_txn(struct pmu
*pmu
)
1037 struct cpu_hw_events
*cpuhw
= &__get_cpu_var(cpu_hw_events
);
1039 perf_pmu_disable(pmu
);
1040 cpuhw
->group_flag
|= PERF_EVENT_TXN
;
1041 cpuhw
->n_txn_start
= cpuhw
->n_events
;
1045 * Stop group events scheduling transaction
1046 * Clear the flag and pmu::enable() will perform the
1047 * schedulability test.
1049 void power_pmu_cancel_txn(struct pmu
*pmu
)
1051 struct cpu_hw_events
*cpuhw
= &__get_cpu_var(cpu_hw_events
);
1053 cpuhw
->group_flag
&= ~PERF_EVENT_TXN
;
1054 perf_pmu_enable(pmu
);
1058 * Commit group events scheduling transaction
1059 * Perform the group schedulability test as a whole
1060 * Return 0 if success
1062 int power_pmu_commit_txn(struct pmu
*pmu
)
1064 struct cpu_hw_events
*cpuhw
;
1069 cpuhw
= &__get_cpu_var(cpu_hw_events
);
1070 n
= cpuhw
->n_events
;
1071 if (check_excludes(cpuhw
->event
, cpuhw
->flags
, 0, n
))
1073 i
= power_check_constraints(cpuhw
, cpuhw
->events
, cpuhw
->flags
, n
);
1077 for (i
= cpuhw
->n_txn_start
; i
< n
; ++i
)
1078 cpuhw
->event
[i
]->hw
.config
= cpuhw
->events
[i
];
1080 cpuhw
->group_flag
&= ~PERF_EVENT_TXN
;
1081 perf_pmu_enable(pmu
);
1086 * Return 1 if we might be able to put event on a limited PMC,
1088 * A event can only go on a limited PMC if it counts something
1089 * that a limited PMC can count, doesn't require interrupts, and
1090 * doesn't exclude any processor mode.
1092 static int can_go_on_limited_pmc(struct perf_event
*event
, u64 ev
,
1096 u64 alt
[MAX_EVENT_ALTERNATIVES
];
1098 if (event
->attr
.exclude_user
1099 || event
->attr
.exclude_kernel
1100 || event
->attr
.exclude_hv
1101 || event
->attr
.sample_period
)
1104 if (ppmu
->limited_pmc_event(ev
))
1108 * The requested event_id isn't on a limited PMC already;
1109 * see if any alternative code goes on a limited PMC.
1111 if (!ppmu
->get_alternatives
)
1114 flags
|= PPMU_LIMITED_PMC_OK
| PPMU_LIMITED_PMC_REQD
;
1115 n
= ppmu
->get_alternatives(ev
, flags
, alt
);
1121 * Find an alternative event_id that goes on a normal PMC, if possible,
1122 * and return the event_id code, or 0 if there is no such alternative.
1123 * (Note: event_id code 0 is "don't count" on all machines.)
1125 static u64
normal_pmc_alternative(u64 ev
, unsigned long flags
)
1127 u64 alt
[MAX_EVENT_ALTERNATIVES
];
1130 flags
&= ~(PPMU_LIMITED_PMC_OK
| PPMU_LIMITED_PMC_REQD
);
1131 n
= ppmu
->get_alternatives(ev
, flags
, alt
);
1137 /* Number of perf_events counting hardware events */
1138 static atomic_t num_events
;
1139 /* Used to avoid races in calling reserve/release_pmc_hardware */
1140 static DEFINE_MUTEX(pmc_reserve_mutex
);
1143 * Release the PMU if this is the last perf_event.
1145 static void hw_perf_event_destroy(struct perf_event
*event
)
1147 if (!atomic_add_unless(&num_events
, -1, 1)) {
1148 mutex_lock(&pmc_reserve_mutex
);
1149 if (atomic_dec_return(&num_events
) == 0)
1150 release_pmc_hardware();
1151 mutex_unlock(&pmc_reserve_mutex
);
1156 * Translate a generic cache event_id config to a raw event_id code.
1158 static int hw_perf_cache_event(u64 config
, u64
*eventp
)
1160 unsigned long type
, op
, result
;
1163 if (!ppmu
->cache_events
)
1167 type
= config
& 0xff;
1168 op
= (config
>> 8) & 0xff;
1169 result
= (config
>> 16) & 0xff;
1171 if (type
>= PERF_COUNT_HW_CACHE_MAX
||
1172 op
>= PERF_COUNT_HW_CACHE_OP_MAX
||
1173 result
>= PERF_COUNT_HW_CACHE_RESULT_MAX
)
1176 ev
= (*ppmu
->cache_events
)[type
][op
][result
];
1185 static int power_pmu_event_init(struct perf_event
*event
)
1188 unsigned long flags
;
1189 struct perf_event
*ctrs
[MAX_HWEVENTS
];
1190 u64 events
[MAX_HWEVENTS
];
1191 unsigned int cflags
[MAX_HWEVENTS
];
1194 struct cpu_hw_events
*cpuhw
;
1199 /* does not support taken branch sampling */
1200 if (has_branch_stack(event
))
1203 switch (event
->attr
.type
) {
1204 case PERF_TYPE_HARDWARE
:
1205 ev
= event
->attr
.config
;
1206 if (ev
>= ppmu
->n_generic
|| ppmu
->generic_events
[ev
] == 0)
1208 ev
= ppmu
->generic_events
[ev
];
1210 case PERF_TYPE_HW_CACHE
:
1211 err
= hw_perf_cache_event(event
->attr
.config
, &ev
);
1216 ev
= event
->attr
.config
;
1222 event
->hw
.config_base
= ev
;
1226 * If we are not running on a hypervisor, force the
1227 * exclude_hv bit to 0 so that we don't care what
1228 * the user set it to.
1230 if (!firmware_has_feature(FW_FEATURE_LPAR
))
1231 event
->attr
.exclude_hv
= 0;
1234 * If this is a per-task event, then we can use
1235 * PM_RUN_* events interchangeably with their non RUN_*
1236 * equivalents, e.g. PM_RUN_CYC instead of PM_CYC.
1237 * XXX we should check if the task is an idle task.
1240 if (event
->attach_state
& PERF_ATTACH_TASK
)
1241 flags
|= PPMU_ONLY_COUNT_RUN
;
1244 * If this machine has limited events, check whether this
1245 * event_id could go on a limited event.
1247 if (ppmu
->flags
& PPMU_LIMITED_PMC5_6
) {
1248 if (can_go_on_limited_pmc(event
, ev
, flags
)) {
1249 flags
|= PPMU_LIMITED_PMC_OK
;
1250 } else if (ppmu
->limited_pmc_event(ev
)) {
1252 * The requested event_id is on a limited PMC,
1253 * but we can't use a limited PMC; see if any
1254 * alternative goes on a normal PMC.
1256 ev
= normal_pmc_alternative(ev
, flags
);
1263 * If this is in a group, check if it can go on with all the
1264 * other hardware events in the group. We assume the event
1265 * hasn't been linked into its leader's sibling list at this point.
1268 if (event
->group_leader
!= event
) {
1269 n
= collect_events(event
->group_leader
, ppmu
->n_counter
- 1,
1270 ctrs
, events
, cflags
);
1277 if (check_excludes(ctrs
, cflags
, n
, 1))
1280 cpuhw
= &get_cpu_var(cpu_hw_events
);
1281 err
= power_check_constraints(cpuhw
, events
, cflags
, n
+ 1);
1282 put_cpu_var(cpu_hw_events
);
1286 event
->hw
.config
= events
[n
];
1287 event
->hw
.event_base
= cflags
[n
];
1288 event
->hw
.last_period
= event
->hw
.sample_period
;
1289 local64_set(&event
->hw
.period_left
, event
->hw
.last_period
);
1292 * See if we need to reserve the PMU.
1293 * If no events are currently in use, then we have to take a
1294 * mutex to ensure that we don't race with another task doing
1295 * reserve_pmc_hardware or release_pmc_hardware.
1298 if (!atomic_inc_not_zero(&num_events
)) {
1299 mutex_lock(&pmc_reserve_mutex
);
1300 if (atomic_read(&num_events
) == 0 &&
1301 reserve_pmc_hardware(perf_event_interrupt
))
1304 atomic_inc(&num_events
);
1305 mutex_unlock(&pmc_reserve_mutex
);
1307 event
->destroy
= hw_perf_event_destroy
;
1312 static int power_pmu_event_idx(struct perf_event
*event
)
1314 return event
->hw
.idx
;
1317 ssize_t
power_events_sysfs_show(struct device
*dev
,
1318 struct device_attribute
*attr
, char *page
)
1320 struct perf_pmu_events_attr
*pmu_attr
;
1322 pmu_attr
= container_of(attr
, struct perf_pmu_events_attr
, attr
);
1324 return sprintf(page
, "event=0x%02llx\n", pmu_attr
->id
);
1327 struct pmu power_pmu
= {
1328 .pmu_enable
= power_pmu_enable
,
1329 .pmu_disable
= power_pmu_disable
,
1330 .event_init
= power_pmu_event_init
,
1331 .add
= power_pmu_add
,
1332 .del
= power_pmu_del
,
1333 .start
= power_pmu_start
,
1334 .stop
= power_pmu_stop
,
1335 .read
= power_pmu_read
,
1336 .start_txn
= power_pmu_start_txn
,
1337 .cancel_txn
= power_pmu_cancel_txn
,
1338 .commit_txn
= power_pmu_commit_txn
,
1339 .event_idx
= power_pmu_event_idx
,
1344 * A counter has overflowed; update its count and record
1345 * things if requested. Note that interrupts are hard-disabled
1346 * here so there is no possibility of being interrupted.
1348 static void record_and_restart(struct perf_event
*event
, unsigned long val
,
1349 struct pt_regs
*regs
)
1351 u64 period
= event
->hw
.sample_period
;
1352 s64 prev
, delta
, left
;
1355 if (event
->hw
.state
& PERF_HES_STOPPED
) {
1356 write_pmc(event
->hw
.idx
, 0);
1360 /* we don't have to worry about interrupts here */
1361 prev
= local64_read(&event
->hw
.prev_count
);
1362 delta
= check_and_compute_delta(prev
, val
);
1363 local64_add(delta
, &event
->count
);
1366 * See if the total period for this event has expired,
1367 * and update for the next period.
1370 left
= local64_read(&event
->hw
.period_left
) - delta
;
1378 record
= siar_valid(regs
);
1379 event
->hw
.last_period
= event
->hw
.sample_period
;
1381 if (left
< 0x80000000LL
)
1382 val
= 0x80000000LL
- left
;
1385 write_pmc(event
->hw
.idx
, val
);
1386 local64_set(&event
->hw
.prev_count
, val
);
1387 local64_set(&event
->hw
.period_left
, left
);
1388 perf_event_update_userpage(event
);
1391 * Finally record data if requested.
1394 struct perf_sample_data data
;
1396 perf_sample_data_init(&data
, ~0ULL, event
->hw
.last_period
);
1398 if (event
->attr
.sample_type
& PERF_SAMPLE_ADDR
)
1399 perf_get_data_addr(regs
, &data
.addr
);
1401 if (perf_event_overflow(event
, &data
, regs
))
1402 power_pmu_stop(event
, 0);
1407 * Called from generic code to get the misc flags (i.e. processor mode)
1410 unsigned long perf_misc_flags(struct pt_regs
*regs
)
1412 u32 flags
= perf_get_misc_flags(regs
);
1416 return user_mode(regs
) ? PERF_RECORD_MISC_USER
:
1417 PERF_RECORD_MISC_KERNEL
;
1421 * Called from generic code to get the instruction pointer
1424 unsigned long perf_instruction_pointer(struct pt_regs
*regs
)
1426 unsigned long use_siar
= regs
->result
;
1428 if (use_siar
&& siar_valid(regs
))
1429 return mfspr(SPRN_SIAR
) + perf_ip_adjust(regs
);
1431 return 0; // no valid instruction pointer
1436 static bool pmc_overflow_power7(unsigned long val
)
1439 * Events on POWER7 can roll back if a speculative event doesn't
1440 * eventually complete. Unfortunately in some rare cases they will
1441 * raise a performance monitor exception. We need to catch this to
1442 * ensure we reset the PMC. In all cases the PMC will be 256 or less
1443 * cycles from overflow.
1445 * We only do this if the first pass fails to find any overflowing
1446 * PMCs because a user might set a period of less than 256 and we
1447 * don't want to mistakenly reset them.
1449 if ((0x80000000 - val
) <= 256)
1455 static bool pmc_overflow(unsigned long val
)
1464 * Performance monitor interrupt stuff
1466 static void perf_event_interrupt(struct pt_regs
*regs
)
1469 struct cpu_hw_events
*cpuhw
= &__get_cpu_var(cpu_hw_events
);
1470 struct perf_event
*event
;
1471 unsigned long val
[8];
1475 if (cpuhw
->n_limited
)
1476 freeze_limited_counters(cpuhw
, mfspr(SPRN_PMC5
),
1479 perf_read_regs(regs
);
1481 nmi
= perf_intr_is_nmi(regs
);
1487 /* Read all the PMCs since we'll need them a bunch of times */
1488 for (i
= 0; i
< ppmu
->n_counter
; ++i
)
1489 val
[i
] = read_pmc(i
+ 1);
1491 /* Try to find what caused the IRQ */
1493 for (i
= 0; i
< ppmu
->n_counter
; ++i
) {
1494 if (!pmc_overflow(val
[i
]))
1496 if (is_limited_pmc(i
+ 1))
1497 continue; /* these won't generate IRQs */
1499 * We've found one that's overflowed. For active
1500 * counters we need to log this. For inactive
1501 * counters, we need to reset it anyway
1505 for (j
= 0; j
< cpuhw
->n_events
; ++j
) {
1506 event
= cpuhw
->event
[j
];
1507 if (event
->hw
.idx
== (i
+ 1)) {
1509 record_and_restart(event
, val
[i
], regs
);
1514 /* reset non active counters that have overflowed */
1515 write_pmc(i
+ 1, 0);
1517 if (!found
&& pvr_version_is(PVR_POWER7
)) {
1518 /* check active counters for special buggy p7 overflow */
1519 for (i
= 0; i
< cpuhw
->n_events
; ++i
) {
1520 event
= cpuhw
->event
[i
];
1521 if (!event
->hw
.idx
|| is_limited_pmc(event
->hw
.idx
))
1523 if (pmc_overflow_power7(val
[event
->hw
.idx
- 1])) {
1524 /* event has overflowed in a buggy way*/
1526 record_and_restart(event
,
1527 val
[event
->hw
.idx
- 1],
1532 if ((!found
) && printk_ratelimit())
1533 printk(KERN_WARNING
"Can't find PMC that caused IRQ\n");
1536 * Reset MMCR0 to its normal value. This will set PMXE and
1537 * clear FC (freeze counters) and PMAO (perf mon alert occurred)
1538 * and thus allow interrupts to occur again.
1539 * XXX might want to use MSR.PM to keep the events frozen until
1540 * we get back out of this interrupt.
1542 write_mmcr0(cpuhw
, cpuhw
->mmcr
[0]);
1550 static void power_pmu_setup(int cpu
)
1552 struct cpu_hw_events
*cpuhw
= &per_cpu(cpu_hw_events
, cpu
);
1556 memset(cpuhw
, 0, sizeof(*cpuhw
));
1557 cpuhw
->mmcr
[0] = MMCR0_FC
;
1560 static int __cpuinit
1561 power_pmu_notifier(struct notifier_block
*self
, unsigned long action
, void *hcpu
)
1563 unsigned int cpu
= (long)hcpu
;
1565 switch (action
& ~CPU_TASKS_FROZEN
) {
1566 case CPU_UP_PREPARE
:
1567 power_pmu_setup(cpu
);
1577 int __cpuinit
register_power_pmu(struct power_pmu
*pmu
)
1580 return -EBUSY
; /* something's already registered */
1583 pr_info("%s performance monitor hardware support registered\n",
1586 power_pmu
.attr_groups
= ppmu
->attr_groups
;
1590 * Use FCHV to ignore kernel events if MSR.HV is set.
1592 if (mfmsr() & MSR_HV
)
1593 freeze_events_kernel
= MMCR0_FCHV
;
1594 #endif /* CONFIG_PPC64 */
1596 perf_pmu_register(&power_pmu
, "cpu", PERF_TYPE_RAW
);
1597 perf_cpu_notifier(power_pmu_notifier
);