#include <asm/pmc.h>
#include <asm/machdep.h>
#include <asm/firmware.h>
+#include <asm/ptrace.h>
struct cpu_hw_counters {
int n_counters;
int n_limited;
u8 pmcs_enabled;
struct perf_counter *counter[MAX_HWCOUNTERS];
- unsigned int events[MAX_HWCOUNTERS];
+ u64 events[MAX_HWCOUNTERS];
unsigned int flags[MAX_HWCOUNTERS];
u64 mmcr[3];
struct perf_counter *limited_counter[MAX_LIMITED_HWCOUNTERS];
* and see if any combination of alternative codes is feasible.
* The feasible set is returned in event[].
*/
-static int power_check_constraints(unsigned int event[], unsigned int cflags[],
+static int power_check_constraints(u64 event[], unsigned int cflags[],
int n_ev)
{
u64 mask, value, nv;
- unsigned int alternatives[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
+ u64 alternatives[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
u64 amasks[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
u64 avalues[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
u64 smasks[MAX_HWCOUNTERS], svalues[MAX_HWCOUNTERS];
}
counter = ctrs[i];
if (first) {
- eu = counter->hw_event.exclude_user;
- ek = counter->hw_event.exclude_kernel;
- eh = counter->hw_event.exclude_hv;
+ eu = counter->attr.exclude_user;
+ ek = counter->attr.exclude_kernel;
+ eh = counter->attr.exclude_hv;
first = 0;
- } else if (counter->hw_event.exclude_user != eu ||
- counter->hw_event.exclude_kernel != ek ||
- counter->hw_event.exclude_hv != eh) {
+ } else if (counter->attr.exclude_user != eu ||
+ counter->attr.exclude_kernel != ek ||
+ counter->attr.exclude_hv != eh) {
return -EAGAIN;
}
}
*/
static int is_limited_pmc(int pmcnum)
{
- return ppmu->limited_pmc5_6 && (pmcnum == 5 || pmcnum == 6);
+ return (ppmu->flags & PPMU_LIMITED_PMC5_6)
+ && (pmcnum == 5 || pmcnum == 6);
}
static void freeze_limited_counters(struct cpu_hw_counters *cpuhw,
int idx;
local_irq_save(flags);
+ cpuhw = &__get_cpu_var(cpu_hw_counters);
if (!cpuhw->disabled) {
local_irq_restore(flags);
return;
}
-
- cpuhw = &__get_cpu_var(cpu_hw_counters);
cpuhw->disabled = 0;
/*
/*
* Add in MMCR0 freeze bits corresponding to the
- * hw_event.exclude_* bits for the first counter.
+ * attr.exclude_* bits for the first counter.
* We have already checked that all counters have the
* same values for these bits as the first counter.
*/
counter = cpuhw->counter[0];
- if (counter->hw_event.exclude_user)
+ if (counter->attr.exclude_user)
cpuhw->mmcr[0] |= MMCR0_FCP;
- if (counter->hw_event.exclude_kernel)
+ if (counter->attr.exclude_kernel)
cpuhw->mmcr[0] |= freeze_counters_kernel;
- if (counter->hw_event.exclude_hv)
+ if (counter->attr.exclude_hv)
cpuhw->mmcr[0] |= MMCR0_FCHV;
/*
continue;
}
val = 0;
- if (counter->hw_event.irq_period) {
+ if (counter->hw.sample_period) {
left = atomic64_read(&counter->hw.period_left);
if (left < 0x80000000L)
val = 0x80000000L - left;
}
static int collect_events(struct perf_counter *group, int max_count,
- struct perf_counter *ctrs[], unsigned int *events,
+ struct perf_counter *ctrs[], u64 *events,
unsigned int *flags)
{
int n = 0;
local_irq_restore(flags);
}
+/*
+ * Re-enable interrupts on a counter after they were throttled
+ * because they were coming too fast.
+ */
+static void power_pmu_unthrottle(struct perf_counter *counter)
+{
+ s64 val, left;
+ unsigned long flags;
+
+ if (!counter->hw.idx || !counter->hw.sample_period)
+ return;
+ local_irq_save(flags);
+ perf_disable();
+ power_pmu_read(counter);
+ left = counter->hw.sample_period;
+ val = 0;
+ if (left < 0x80000000L)
+ val = 0x80000000L - left;
+ write_pmc(counter->hw.idx, val);
+ atomic64_set(&counter->hw.prev_count, val);
+ atomic64_set(&counter->hw.period_left, left);
+ perf_counter_update_userpage(counter);
+ perf_enable();
+ local_irq_restore(flags);
+}
+
struct pmu power_pmu = {
.enable = power_pmu_enable,
.disable = power_pmu_disable,
.read = power_pmu_read,
+ .unthrottle = power_pmu_unthrottle,
};
/*
* that a limited PMC can count, doesn't require interrupts, and
* doesn't exclude any processor mode.
*/
-static int can_go_on_limited_pmc(struct perf_counter *counter, unsigned int ev,
+static int can_go_on_limited_pmc(struct perf_counter *counter, u64 ev,
unsigned int flags)
{
int n;
- unsigned int alt[MAX_EVENT_ALTERNATIVES];
+ u64 alt[MAX_EVENT_ALTERNATIVES];
- if (counter->hw_event.exclude_user
- || counter->hw_event.exclude_kernel
- || counter->hw_event.exclude_hv
- || counter->hw_event.irq_period)
+ if (counter->attr.exclude_user
+ || counter->attr.exclude_kernel
+ || counter->attr.exclude_hv
+ || counter->attr.sample_period)
return 0;
if (ppmu->limited_pmc_event(ev))
flags |= PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD;
n = ppmu->get_alternatives(ev, flags, alt);
- if (n)
- return alt[0];
- return 0;
+ return n > 0;
}
/*
* and return the event code, or 0 if there is no such alternative.
* (Note: event code 0 is "don't count" on all machines.)
*/
-static unsigned long normal_pmc_alternative(unsigned long ev,
- unsigned long flags)
+static u64 normal_pmc_alternative(u64 ev, unsigned long flags)
{
- unsigned int alt[MAX_EVENT_ALTERNATIVES];
+ u64 alt[MAX_EVENT_ALTERNATIVES];
int n;
flags &= ~(PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD);
const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
{
- unsigned long ev, flags;
+ u64 ev;
+ unsigned long flags;
struct perf_counter *ctrs[MAX_HWCOUNTERS];
- unsigned int events[MAX_HWCOUNTERS];
+ u64 events[MAX_HWCOUNTERS];
unsigned int cflags[MAX_HWCOUNTERS];
int n;
int err;
if (!ppmu)
return ERR_PTR(-ENXIO);
- if ((s64)counter->hw_event.irq_period < 0)
- return ERR_PTR(-EINVAL);
- if (!perf_event_raw(&counter->hw_event)) {
- ev = perf_event_id(&counter->hw_event);
+ if (!perf_event_raw(&counter->attr)) {
+ ev = perf_event_id(&counter->attr);
if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
return ERR_PTR(-EOPNOTSUPP);
ev = ppmu->generic_events[ev];
} else {
- ev = perf_event_config(&counter->hw_event);
+ ev = perf_event_config(&counter->attr);
}
counter->hw.config_base = ev;
counter->hw.idx = 0;
* the user set it to.
*/
if (!firmware_has_feature(FW_FEATURE_LPAR))
- counter->hw_event.exclude_hv = 0;
+ counter->attr.exclude_hv = 0;
/*
* If this is a per-task counter, then we can use
* If this machine has limited counters, check whether this
* event could go on a limited counter.
*/
- if (ppmu->limited_pmc5_6) {
+ if (ppmu->flags & PPMU_LIMITED_PMC5_6) {
if (can_go_on_limited_pmc(counter, ev, flags)) {
flags |= PPMU_LIMITED_PMC_OK;
} else if (ppmu->limited_pmc_event(ev)) {
counter->hw.config = events[n];
counter->hw.counter_base = cflags[n];
- atomic64_set(&counter->hw.period_left, counter->hw_event.irq_period);
+ atomic64_set(&counter->hw.period_left, counter->hw.sample_period);
/*
* See if we need to reserve the PMU.
static void record_and_restart(struct perf_counter *counter, long val,
struct pt_regs *regs, int nmi)
{
+ u64 period = counter->hw.sample_period;
s64 prev, delta, left;
int record = 0;
+ u64 addr, mmcra, sdsync;
/* we don't have to worry about interrupts here */
prev = atomic64_read(&counter->hw.prev_count);
*/
val = 0;
left = atomic64_read(&counter->hw.period_left) - delta;
- if (counter->hw_event.irq_period) {
+ if (period) {
if (left <= 0) {
- left += counter->hw_event.irq_period;
+ left += period;
if (left <= 0)
- left = counter->hw_event.irq_period;
+ left = period;
record = 1;
}
if (left < 0x80000000L)
val = 0x80000000L - left;
}
+
+ /*
+ * Finally record data if requested.
+ */
+ if (record) {
+ addr = 0;
+ if (counter->attr.record_type & PERF_RECORD_ADDR) {
+ /*
+ * The user wants a data address recorded.
+ * If we're not doing instruction sampling,
+ * give them the SDAR (sampled data address).
+ * If we are doing instruction sampling, then only
+ * give them the SDAR if it corresponds to the
+ * instruction pointed to by SIAR; this is indicated
+ * by the [POWER6_]MMCRA_SDSYNC bit in MMCRA.
+ */
+ mmcra = regs->dsisr;
+ sdsync = (ppmu->flags & PPMU_ALT_SIPR) ?
+ POWER6_MMCRA_SDSYNC : MMCRA_SDSYNC;
+ if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync))
+ addr = mfspr(SPRN_SDAR);
+ }
+ if (perf_counter_overflow(counter, nmi, regs, addr)) {
+ /*
+ * Interrupts are coming too fast - throttle them
+ * by setting the counter to 0, so it will be
+ * at least 2^30 cycles until the next interrupt
+ * (assuming each counter counts at most 2 counts
+ * per cycle).
+ */
+ val = 0;
+ left = ~0ULL >> 1;
+ }
+ }
+
write_pmc(counter->hw.idx, val);
atomic64_set(&counter->hw.prev_count, val);
atomic64_set(&counter->hw.period_left, left);
perf_counter_update_userpage(counter);
+}
- /*
- * Finally record data if requested.
- */
- if (record)
- perf_counter_overflow(counter, nmi, regs, 0);
+/*
+ * Called from generic code to get the misc flags (i.e. processor mode)
+ * for an event.
+ */
+unsigned long perf_misc_flags(struct pt_regs *regs)
+{
+ unsigned long mmcra;
+
+ if (TRAP(regs) != 0xf00) {
+ /* not a PMU interrupt */
+ return user_mode(regs) ? PERF_EVENT_MISC_USER :
+ PERF_EVENT_MISC_KERNEL;
+ }
+
+ mmcra = regs->dsisr;
+ if (ppmu->flags & PPMU_ALT_SIPR) {
+ if (mmcra & POWER6_MMCRA_SIHV)
+ return PERF_EVENT_MISC_HYPERVISOR;
+ return (mmcra & POWER6_MMCRA_SIPR) ? PERF_EVENT_MISC_USER :
+ PERF_EVENT_MISC_KERNEL;
+ }
+ if (mmcra & MMCRA_SIHV)
+ return PERF_EVENT_MISC_HYPERVISOR;
+ return (mmcra & MMCRA_SIPR) ? PERF_EVENT_MISC_USER :
+ PERF_EVENT_MISC_KERNEL;
+}
+
+/*
+ * Called from generic code to get the instruction pointer
+ * for an event.
+ */
+unsigned long perf_instruction_pointer(struct pt_regs *regs)
+{
+ unsigned long mmcra;
+ unsigned long ip;
+ unsigned long slot;
+
+ if (TRAP(regs) != 0xf00)
+ return regs->nip; /* not a PMU interrupt */
+
+ ip = mfspr(SPRN_SIAR);
+ mmcra = regs->dsisr;
+ if ((mmcra & MMCRA_SAMPLE_ENABLE) && !(ppmu->flags & PPMU_ALT_SIPR)) {
+ slot = (mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT;
+ if (slot > 1)
+ ip += 4 * (slot - 1);
+ }
+ return ip;
}
/*
freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5),
mfspr(SPRN_PMC6));
+ /*
+ * Overload regs->dsisr to store MMCRA so we only need to read it once.
+ */
+ regs->dsisr = mfspr(SPRN_MMCRA);
+
/*
* If interrupts were soft-disabled when this PMU interrupt
* occurred, treat it as an NMI.