return 0;
}
+static int
+group_sched_in(struct perf_counter *group_counter,
+ struct perf_cpu_context *cpuctx,
+ struct perf_counter_context *ctx,
+ int cpu)
+{
+ struct perf_counter *counter, *partial_group;
+ int ret;
+
+ if (group_counter->state == PERF_COUNTER_STATE_OFF)
+ return 0;
+
+ ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu);
+ if (ret)
+ return ret < 0 ? ret : 0;
+
+ group_counter->prev_state = group_counter->state;
+ if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
+ return -EAGAIN;
+
+ /*
+ * Schedule in siblings as one group (if any):
+ */
+ list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
+ counter->prev_state = counter->state;
+ if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
+ partial_group = counter;
+ goto group_error;
+ }
+ }
+
+ return 0;
+
+group_error:
+ /*
+ * Groups can be scheduled in as one unit only, so undo any
+ * partial group before returning:
+ */
+ list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
+ if (counter == partial_group)
+ break;
+ counter_sched_out(counter, cpuctx, ctx);
+ }
+ counter_sched_out(group_counter, cpuctx, ctx);
+
+ return -EAGAIN;
+}
+
/*
* Return 1 for a group consisting entirely of software counters,
* 0 if the group contains any hardware counters.
if (!group_can_go_on(counter, cpuctx, 1))
err = -EEXIST;
+ else if (counter == leader)
+ err = group_sched_in(counter, cpuctx, ctx,
+ smp_processor_id());
else
err = counter_sched_in(counter, cpuctx, ctx,
smp_processor_id());
cpuctx->task_ctx = NULL;
}
-static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx)
+static void __perf_counter_task_sched_out(struct perf_counter_context *ctx)
{
- __perf_counter_sched_out(&cpuctx->ctx, cpuctx);
+ struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
+
+ __perf_counter_sched_out(ctx, cpuctx);
+ cpuctx->task_ctx = NULL;
}
-static int
-group_sched_in(struct perf_counter *group_counter,
- struct perf_cpu_context *cpuctx,
- struct perf_counter_context *ctx,
- int cpu)
+static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx)
{
- struct perf_counter *counter, *partial_group;
- int ret;
-
- if (group_counter->state == PERF_COUNTER_STATE_OFF)
- return 0;
-
- ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu);
- if (ret)
- return ret < 0 ? ret : 0;
-
- group_counter->prev_state = group_counter->state;
- if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
- return -EAGAIN;
-
- /*
- * Schedule in siblings as one group (if any):
- */
- list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
- counter->prev_state = counter->state;
- if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
- partial_group = counter;
- goto group_error;
- }
- }
-
- return 0;
-
-group_error:
- /*
- * Groups can be scheduled in as one unit only, so undo any
- * partial group before returning:
- */
- list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
- if (counter == partial_group)
- break;
- counter_sched_out(counter, cpuctx, ctx);
- }
- counter_sched_out(group_counter, cpuctx, ctx);
-
- return -EAGAIN;
+ __perf_counter_sched_out(&cpuctx->ctx, cpuctx);
}
static void
struct perf_counter *counter;
unsigned long flags;
u64 perf_flags;
- int cpu;
if (likely(!ctx->nr_counters))
return 0;
local_irq_save(flags);
- cpu = smp_processor_id();
- perf_counter_task_sched_out(curr, cpu);
+ __perf_counter_task_sched_out(ctx);
spin_lock(&ctx->lock);
local_irq_save(flags);
cpu = smp_processor_id();
- perf_counter_task_sched_out(curr, cpu);
+ __perf_counter_task_sched_out(ctx);
spin_lock(&ctx->lock);
ctx = &curr->perf_counter_ctx;
perf_counter_cpu_sched_out(cpuctx);
- perf_counter_task_sched_out(curr, cpu);
+ __perf_counter_task_sched_out(ctx);
rotate_ctx(&cpuctx->ctx);
rotate_ctx(ctx);
static void perf_counter_reset(struct perf_counter *counter)
{
(void)perf_counter_read(counter);
- atomic_set(&counter->count, 0);
+ atomic64_set(&counter->count, 0);
perf_counter_update_userpage(counter);
}