perf_counter: powerpc: only reserve PMU hardware when we need it
[deliverable/linux.git] / arch / powerpc / kernel / perf_counter.c
index df007fe0cc0bb6e6c989dc273a82349df8e5af99..560dd1e7b524472c35309c54cfcac5d57ccaf2bd 100644 (file)
@@ -41,6 +41,8 @@ struct power_pmu *ppmu;
  */
 static unsigned int freeze_counters_kernel = MMCR0_FCS;
 
+static void perf_counter_interrupt(struct pt_regs *regs);
+
 void perf_counter_print_debug(void)
 {
 }
@@ -594,6 +596,24 @@ struct hw_perf_counter_ops power_perf_ops = {
        .read = power_perf_read
 };
 
+/* Number of perf_counters counting hardware events */
+static atomic_t num_counters;
+/* Used to avoid races in calling reserve/release_pmc_hardware */
+static DEFINE_MUTEX(pmc_reserve_mutex);
+
+/*
+ * Release the PMU if this is the last perf_counter.
+ */
+static void hw_perf_counter_destroy(struct perf_counter *counter)
+{
+       if (!atomic_add_unless(&num_counters, -1, 1)) {
+               mutex_lock(&pmc_reserve_mutex);
+               if (atomic_dec_return(&num_counters) == 0)
+                       release_pmc_hardware();
+               mutex_unlock(&pmc_reserve_mutex);
+       }
+}
+
 const struct hw_perf_counter_ops *
 hw_perf_counter_init(struct perf_counter *counter)
 {
@@ -601,6 +621,7 @@ hw_perf_counter_init(struct perf_counter *counter)
        struct perf_counter *ctrs[MAX_HWCOUNTERS];
        unsigned int events[MAX_HWCOUNTERS];
        int n;
+       int err;
 
        if (!ppmu)
                return NULL;
@@ -646,25 +667,28 @@ hw_perf_counter_init(struct perf_counter *counter)
 
        counter->hw.config = events[n];
        atomic64_set(&counter->hw.period_left, counter->hw_event.irq_period);
-       return &power_perf_ops;
-}
 
-/*
* Handle wakeups.
- */
-void perf_counter_do_pending(void)
-{
-       int i;
-       struct cpu_hw_counters *cpuhw = &__get_cpu_var(cpu_hw_counters);
-       struct perf_counter *counter;
-
-       for (i = 0; i < cpuhw->n_counters; ++i) {
-               counter = cpuhw->counter[i];
-               if (counter && counter->wakeup_pending) {
-                       counter->wakeup_pending = 0;
-                       wake_up(&counter->waitq);
-               }
+       /*
       * See if we need to reserve the PMU.
+        * If no counters are currently in use, then we have to take a
+        * mutex to ensure that we don't race with another task doing
+        * reserve_pmc_hardware or release_pmc_hardware.
+        */
+       err = 0;
+       if (!atomic_inc_not_zero(&num_counters)) {
+               mutex_lock(&pmc_reserve_mutex);
+               if (atomic_read(&num_counters) == 0 &&
+                   reserve_pmc_hardware(perf_counter_interrupt))
+                       err = -EBUSY;
+               else
+                       atomic_inc(&num_counters);
+               mutex_unlock(&pmc_reserve_mutex);
        }
+       counter->destroy = hw_perf_counter_destroy;
+
+       if (err)
+               return NULL;
+       return &power_perf_ops;
 }
 
 /*
@@ -720,7 +744,7 @@ static void perf_counter_interrupt(struct pt_regs *regs)
        struct cpu_hw_counters *cpuhw = &__get_cpu_var(cpu_hw_counters);
        struct perf_counter *counter;
        long val;
-       int need_wakeup = 0, found = 0;
+       int found = 0;
 
        for (i = 0; i < cpuhw->n_counters; ++i) {
                counter = cpuhw->counter[i];
@@ -761,7 +785,7 @@ static void perf_counter_interrupt(struct pt_regs *regs)
         * immediately; otherwise we'll have do the wakeup when interrupts
         * get soft-enabled.
         */
-       if (get_perf_counter_pending() && regs->softe) {
+       if (test_perf_counter_pending() && regs->softe) {
                irq_enter();
                clear_perf_counter_pending();
                perf_counter_do_pending();
@@ -787,11 +811,6 @@ static int init_perf_counters(void)
 {
        unsigned long pvr;
 
-       if (reserve_pmc_hardware(perf_counter_interrupt)) {
-               printk(KERN_ERR "Couldn't init performance monitor subsystem\n");
-               return -EBUSY;
-       }
-
        /* XXX should get this from cputable */
        pvr = mfspr(SPRN_PVR);
        switch (PVR_VER(pvr)) {
This page took 0.025802 seconds and 5 git commands to generate.