perf_counter: call atomic64_set for counter->count
[deliverable/linux.git] / kernel / perf_counter.c
index d850a1fb8d4cf0b5787758e2c4c40d408484ff52..5ea0240adab230a65216ba0b5c137c7c74ed131b 100644 (file)
@@ -419,6 +419,54 @@ counter_sched_in(struct perf_counter *counter,
        return 0;
 }
 
+static int
+group_sched_in(struct perf_counter *group_counter,
+              struct perf_cpu_context *cpuctx,
+              struct perf_counter_context *ctx,
+              int cpu)
+{
+       struct perf_counter *counter, *partial_group;
+       int ret;
+
+       if (group_counter->state == PERF_COUNTER_STATE_OFF)
+               return 0;
+
+       ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu);
+       if (ret)
+               return ret < 0 ? ret : 0;
+
+       group_counter->prev_state = group_counter->state;
+       if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
+               return -EAGAIN;
+
+       /*
+        * Schedule in siblings as one group (if any):
+        */
+       list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
+               counter->prev_state = counter->state;
+               if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
+                       partial_group = counter;
+                       goto group_error;
+               }
+       }
+
+       return 0;
+
+group_error:
+       /*
+        * Groups can be scheduled in as one unit only, so undo any
+        * partial group before returning:
+        */
+       list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
+               if (counter == partial_group)
+                       break;
+               counter_sched_out(counter, cpuctx, ctx);
+       }
+       counter_sched_out(group_counter, cpuctx, ctx);
+
+       return -EAGAIN;
+}
+
 /*
  * Return 1 for a group consisting entirely of software counters,
  * 0 if the group contains any hardware counters.
@@ -643,6 +691,9 @@ static void __perf_counter_enable(void *info)
 
        if (!group_can_go_on(counter, cpuctx, 1))
                err = -EEXIST;
+       else if (counter == leader)
+               err = group_sched_in(counter, cpuctx, ctx,
+                                    smp_processor_id());
        else
                err = counter_sched_in(counter, cpuctx, ctx,
                                       smp_processor_id());
@@ -786,57 +837,17 @@ void perf_counter_task_sched_out(struct task_struct *task, int cpu)
        cpuctx->task_ctx = NULL;
 }
 
-static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx)
+static void __perf_counter_task_sched_out(struct perf_counter_context *ctx)
 {
-       __perf_counter_sched_out(&cpuctx->ctx, cpuctx);
+       struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
+
+       __perf_counter_sched_out(ctx, cpuctx);
+       cpuctx->task_ctx = NULL;
 }
 
-static int
-group_sched_in(struct perf_counter *group_counter,
-              struct perf_cpu_context *cpuctx,
-              struct perf_counter_context *ctx,
-              int cpu)
+static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx)
 {
-       struct perf_counter *counter, *partial_group;
-       int ret;
-
-       if (group_counter->state == PERF_COUNTER_STATE_OFF)
-               return 0;
-
-       ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu);
-       if (ret)
-               return ret < 0 ? ret : 0;
-
-       group_counter->prev_state = group_counter->state;
-       if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
-               return -EAGAIN;
-
-       /*
-        * Schedule in siblings as one group (if any):
-        */
-       list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
-               counter->prev_state = counter->state;
-               if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
-                       partial_group = counter;
-                       goto group_error;
-               }
-       }
-
-       return 0;
-
-group_error:
-       /*
-        * Groups can be scheduled in as one unit only, so undo any
-        * partial group before returning:
-        */
-       list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
-               if (counter == partial_group)
-                       break;
-               counter_sched_out(counter, cpuctx, ctx);
-       }
-       counter_sched_out(group_counter, cpuctx, ctx);
-
-       return -EAGAIN;
+       __perf_counter_sched_out(&cpuctx->ctx, cpuctx);
 }
 
 static void
@@ -940,15 +951,13 @@ int perf_counter_task_disable(void)
        struct perf_counter *counter;
        unsigned long flags;
        u64 perf_flags;
-       int cpu;
 
        if (likely(!ctx->nr_counters))
                return 0;
 
        local_irq_save(flags);
-       cpu = smp_processor_id();
 
-       perf_counter_task_sched_out(curr, cpu);
+       __perf_counter_task_sched_out(ctx);
 
        spin_lock(&ctx->lock);
 
@@ -986,7 +995,7 @@ int perf_counter_task_enable(void)
        local_irq_save(flags);
        cpu = smp_processor_id();
 
-       perf_counter_task_sched_out(curr, cpu);
+       __perf_counter_task_sched_out(ctx);
 
        spin_lock(&ctx->lock);
 
@@ -1051,7 +1060,7 @@ void perf_counter_task_tick(struct task_struct *curr, int cpu)
        ctx = &curr->perf_counter_ctx;
 
        perf_counter_cpu_sched_out(cpuctx);
-       perf_counter_task_sched_out(curr, cpu);
+       __perf_counter_task_sched_out(ctx);
 
        rotate_ctx(&cpuctx->ctx);
        rotate_ctx(ctx);
@@ -1272,7 +1281,7 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
 static void perf_counter_reset(struct perf_counter *counter)
 {
        (void)perf_counter_read(counter);
-       atomic_set(&counter->count, 0);
+       atomic64_set(&counter->count, 0);
        perf_counter_update_userpage(counter);
 }
 
This page took 0.026415 seconds and 5 git commands to generate.