perf_counter: rework the task clock software counter
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Mon, 6 Apr 2009 09:45:11 +0000 (11:45 +0200)
committerIngo Molnar <mingo@elte.hu>
Tue, 7 Apr 2009 08:49:00 +0000 (10:49 +0200)
Rework the task clock software counter to use the context time instead
of the task runtime clock, this removes the last such user.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
LKML-Reference: <20090406094518.445450972@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
kernel/perf_counter.c

index 84d85ab4e1611b4cb4cb3b063a1ec170e14be2b5..56b7eb53d673ed28faf8b6e9dcb364023861dbda 100644 (file)
@@ -974,9 +974,6 @@ int perf_counter_task_disable(void)
        curr_rq_lock_irq_save(&flags);
        cpu = smp_processor_id();
 
-       /* force the update of the task clock: */
-       __task_delta_exec(curr, 1);
-
        perf_counter_task_sched_out(curr, cpu);
 
        spin_lock(&ctx->lock);
@@ -1017,9 +1014,6 @@ int perf_counter_task_enable(void)
        curr_rq_lock_irq_save(&flags);
        cpu = smp_processor_id();
 
-       /* force the update of the task clock: */
-       __task_delta_exec(curr, 1);
-
        perf_counter_task_sched_out(curr, cpu);
 
        spin_lock(&ctx->lock);
@@ -2347,38 +2341,28 @@ static const struct hw_perf_counter_ops perf_ops_cpu_clock = {
  * Software counter: task time clock
  */
 
-/*
- * Called from within the scheduler:
- */
-static u64 task_clock_perf_counter_val(struct perf_counter *counter, int update)
-{
-       struct task_struct *curr = counter->task;
-       u64 delta;
-
-       delta = __task_delta_exec(curr, update);
-
-       return curr->se.sum_exec_runtime + delta;
-}
-
-static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now)
+static void task_clock_perf_counter_update(struct perf_counter *counter)
 {
-       u64 prev;
+       u64 prev, now;
        s64 delta;
 
-       prev = atomic64_read(&counter->hw.prev_count);
-
-       atomic64_set(&counter->hw.prev_count, now);
+       update_context_time(counter->ctx);
+       now = counter->ctx->time;
 
+       prev = atomic64_xchg(&counter->hw.prev_count, now);
        delta = now - prev;
-
        atomic64_add(delta, &counter->count);
 }
 
 static int task_clock_perf_counter_enable(struct perf_counter *counter)
 {
        struct hw_perf_counter *hwc = &counter->hw;
+       u64 now;
+
+       update_context_time(counter->ctx);
+       now = counter->ctx->time;
 
-       atomic64_set(&hwc->prev_count, task_clock_perf_counter_val(counter, 0));
+       atomic64_set(&hwc->prev_count, now);
        hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
        hwc->hrtimer.function = perf_swcounter_hrtimer;
        if (hwc->irq_period) {
@@ -2393,14 +2377,12 @@ static int task_clock_perf_counter_enable(struct perf_counter *counter)
 static void task_clock_perf_counter_disable(struct perf_counter *counter)
 {
        hrtimer_cancel(&counter->hw.hrtimer);
-       task_clock_perf_counter_update(counter,
-                       task_clock_perf_counter_val(counter, 0));
+       task_clock_perf_counter_update(counter);
 }
 
 static void task_clock_perf_counter_read(struct perf_counter *counter)
 {
-       task_clock_perf_counter_update(counter,
-                       task_clock_perf_counter_val(counter, 1));
+       task_clock_perf_counter_update(counter);
 }
 
 static const struct hw_perf_counter_ops perf_ops_task_clock = {
This page took 0.027425 seconds and 5 git commands to generate.