perf_counter: Rename perf_counter_hw_event => perf_counter_attr
[deliverable/linux.git] / arch / powerpc / kernel / perf_counter.c
index fe21b2440f28296bef2f0e13f4160e2e195e73a0..ea54686cb7878dae52a72496d6eff89fdfe639cb 100644 (file)
@@ -262,13 +262,13 @@ static int check_excludes(struct perf_counter **ctrs, unsigned int cflags[],
                }
                counter = ctrs[i];
                if (first) {
-                       eu = counter->hw_event.exclude_user;
-                       ek = counter->hw_event.exclude_kernel;
-                       eh = counter->hw_event.exclude_hv;
+                       eu = counter->attr.exclude_user;
+                       ek = counter->attr.exclude_kernel;
+                       eh = counter->attr.exclude_hv;
                        first = 0;
-               } else if (counter->hw_event.exclude_user != eu ||
-                          counter->hw_event.exclude_kernel != ek ||
-                          counter->hw_event.exclude_hv != eh) {
+               } else if (counter->attr.exclude_user != eu ||
+                          counter->attr.exclude_kernel != ek ||
+                          counter->attr.exclude_hv != eh) {
                        return -EAGAIN;
                }
        }
@@ -483,16 +483,16 @@ void hw_perf_enable(void)
 
        /*
         * Add in MMCR0 freeze bits corresponding to the
-        * hw_event.exclude_* bits for the first counter.
+        * attr.exclude_* bits for the first counter.
         * We have already checked that all counters have the
         * same values for these bits as the first counter.
         */
        counter = cpuhw->counter[0];
-       if (counter->hw_event.exclude_user)
+       if (counter->attr.exclude_user)
                cpuhw->mmcr[0] |= MMCR0_FCP;
-       if (counter->hw_event.exclude_kernel)
+       if (counter->attr.exclude_kernel)
                cpuhw->mmcr[0] |= freeze_counters_kernel;
-       if (counter->hw_event.exclude_hv)
+       if (counter->attr.exclude_hv)
                cpuhw->mmcr[0] |= MMCR0_FCHV;
 
        /*
@@ -535,7 +535,7 @@ void hw_perf_enable(void)
                        continue;
                }
                val = 0;
-               if (counter->hw.irq_period) {
+               if (counter->hw.sample_period) {
                        left = atomic64_read(&counter->hw.period_left);
                        if (left < 0x80000000L)
                                val = 0x80000000L - left;
@@ -740,10 +740,37 @@ static void power_pmu_disable(struct perf_counter *counter)
        local_irq_restore(flags);
 }
 
+/*
+ * Re-enable interrupts on a counter after they were throttled
+ * because they were coming too fast.
+ */
+static void power_pmu_unthrottle(struct perf_counter *counter)
+{
+       s64 val, left;
+       unsigned long flags;
+
+       if (!counter->hw.idx || !counter->hw.sample_period)
+               return;
+       local_irq_save(flags);
+       perf_disable();
+       power_pmu_read(counter);
+       left = counter->hw.sample_period;
+       val = 0;
+       if (left < 0x80000000L)
+               val = 0x80000000L - left;
+       write_pmc(counter->hw.idx, val);
+       atomic64_set(&counter->hw.prev_count, val);
+       atomic64_set(&counter->hw.period_left, left);
+       perf_counter_update_userpage(counter);
+       perf_enable();
+       local_irq_restore(flags);
+}
+
 struct pmu power_pmu = {
        .enable         = power_pmu_enable,
        .disable        = power_pmu_disable,
        .read           = power_pmu_read,
+       .unthrottle     = power_pmu_unthrottle,
 };
 
 /*
@@ -759,10 +786,10 @@ static int can_go_on_limited_pmc(struct perf_counter *counter, u64 ev,
        int n;
        u64 alt[MAX_EVENT_ALTERNATIVES];
 
-       if (counter->hw_event.exclude_user
-           || counter->hw_event.exclude_kernel
-           || counter->hw_event.exclude_hv
-           || counter->hw_event.irq_period)
+       if (counter->attr.exclude_user
+           || counter->attr.exclude_kernel
+           || counter->attr.exclude_hv
+           || counter->attr.sample_period)
                return 0;
 
        if (ppmu->limited_pmc_event(ev))
@@ -828,13 +855,13 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
 
        if (!ppmu)
                return ERR_PTR(-ENXIO);
-       if (!perf_event_raw(&counter->hw_event)) {
-               ev = perf_event_id(&counter->hw_event);
+       if (!perf_event_raw(&counter->attr)) {
+               ev = perf_event_id(&counter->attr);
                if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
                        return ERR_PTR(-EOPNOTSUPP);
                ev = ppmu->generic_events[ev];
        } else {
-               ev = perf_event_config(&counter->hw_event);
+               ev = perf_event_config(&counter->attr);
        }
        counter->hw.config_base = ev;
        counter->hw.idx = 0;
@@ -845,7 +872,7 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
         * the user set it to.
         */
        if (!firmware_has_feature(FW_FEATURE_LPAR))
-               counter->hw_event.exclude_hv = 0;
+               counter->attr.exclude_hv = 0;
 
        /*
         * If this is a per-task counter, then we can use
@@ -898,7 +925,7 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
 
        counter->hw.config = events[n];
        counter->hw.counter_base = cflags[n];
-       atomic64_set(&counter->hw.period_left, counter->hw.irq_period);
+       atomic64_set(&counter->hw.period_left, counter->hw.sample_period);
 
        /*
         * See if we need to reserve the PMU.
@@ -931,7 +958,7 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
 static void record_and_restart(struct perf_counter *counter, long val,
                               struct pt_regs *regs, int nmi)
 {
-       u64 period = counter->hw.irq_period;
+       u64 period = counter->hw.sample_period;
        s64 prev, delta, left;
        int record = 0;
        u64 addr, mmcra, sdsync;
@@ -957,17 +984,13 @@ static void record_and_restart(struct perf_counter *counter, long val,
                if (left < 0x80000000L)
                        val = 0x80000000L - left;
        }
-       write_pmc(counter->hw.idx, val);
-       atomic64_set(&counter->hw.prev_count, val);
-       atomic64_set(&counter->hw.period_left, left);
-       perf_counter_update_userpage(counter);
 
        /*
         * Finally record data if requested.
         */
        if (record) {
                addr = 0;
-               if (counter->hw_event.record_type & PERF_RECORD_ADDR) {
+               if (counter->attr.record_type & PERF_RECORD_ADDR) {
                        /*
                         * The user wants a data address recorded.
                         * If we're not doing instruction sampling,
@@ -983,8 +1006,23 @@ static void record_and_restart(struct perf_counter *counter, long val,
                        if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync))
                                addr = mfspr(SPRN_SDAR);
                }
-               perf_counter_overflow(counter, nmi, regs, addr);
+               if (perf_counter_overflow(counter, nmi, regs, addr)) {
+                       /*
+                        * Interrupts are coming too fast - throttle them
+                        * by setting the counter to 0, so it will be
+                        * at least 2^30 cycles until the next interrupt
+                        * (assuming each counter counts at most 2 counts
+                        * per cycle).
+                        */
+                       val = 0;
+                       left = ~0ULL >> 1;
+               }
        }
+
+       write_pmc(counter->hw.idx, val);
+       atomic64_set(&counter->hw.prev_count, val);
+       atomic64_set(&counter->hw.period_left, left);
+       perf_counter_update_userpage(counter);
 }
 
 /*
This page took 0.053757 seconds and 5 git commands to generate.