perf_counter: theres more to overflow than writing events
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Mon, 6 Apr 2009 09:45:04 +0000 (11:45 +0200)
committerIngo Molnar <mingo@elte.hu>
Tue, 7 Apr 2009 08:48:56 +0000 (10:48 +0200)
Prepare for more generic overflow handling. The new perf_counter_overflow()
method will handle the generic bits of the counter overflow, and can return
a !0 return value, in which case the counter should be (soft) disabled, so
that it won't count until it's properly disabled.

XXX: do powerpc and swcounter

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
LKML-Reference: <20090406094517.812109629@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/powerpc/kernel/perf_counter.c
arch/x86/kernel/cpu/perf_counter.c
include/linux/perf_counter.h
kernel/perf_counter.c

index 0a4d14f279ae6354efc06a48fb82453311bbae72..f88c35d0710a506c763ef7ddc996ee63f0f4a061 100644 (file)
@@ -732,7 +732,7 @@ static void record_and_restart(struct perf_counter *counter, long val,
         * Finally record data if requested.
         */
        if (record)
-               perf_counter_output(counter, 1, regs);
+               perf_counter_overflow(counter, 1, regs);
 }
 
 /*
index 438415866fe47942589461e9958cf5e0bc46d4a5..1116a41bc7b5d351321e7b490c3ee9e7b01b49d2 100644 (file)
@@ -800,7 +800,8 @@ again:
                        continue;
 
                perf_save_and_restart(counter);
-               perf_counter_output(counter, nmi, regs);
+               if (perf_counter_overflow(counter, nmi, regs))
+                       __pmc_generic_disable(counter, &counter->hw, bit);
        }
 
        hw_perf_ack_status(ack);
index 977fb15a53f37486f6ea3880f12c4567a59f86a8..ca2d4df29e0c8bbf62179ba15ae70273788486f8 100644 (file)
@@ -491,8 +491,8 @@ extern int hw_perf_group_sched_in(struct perf_counter *group_leader,
               struct perf_counter_context *ctx, int cpu);
 extern void perf_counter_update_userpage(struct perf_counter *counter);
 
-extern void perf_counter_output(struct perf_counter *counter,
-                               int nmi, struct pt_regs *regs);
+extern int perf_counter_overflow(struct perf_counter *counter,
+                                int nmi, struct pt_regs *regs);
 /*
  * Return 1 for a software counter, 0 for a hardware counter
  */
index 0a2ade2e4f11f6754b90a406e527bd484cf5c669..195e976eb07ddf252b10f72a78b872551cab995a 100644 (file)
@@ -1800,8 +1800,8 @@ static void perf_output_end(struct perf_output_handle *handle)
        rcu_read_unlock();
 }
 
-void perf_counter_output(struct perf_counter *counter,
-                        int nmi, struct pt_regs *regs)
+static void perf_counter_output(struct perf_counter *counter,
+                               int nmi, struct pt_regs *regs)
 {
        int ret;
        u64 record_type = counter->hw_event.record_type;
@@ -2033,6 +2033,17 @@ void perf_counter_munmap(unsigned long addr, unsigned long len,
        perf_counter_mmap_event(&mmap_event);
 }
 
+/*
+ * Generic counter overflow handling.
+ */
+
+int perf_counter_overflow(struct perf_counter *counter,
+                         int nmi, struct pt_regs *regs)
+{
+       perf_counter_output(counter, nmi, regs);
+       return 0;
+}
+
 /*
  * Generic software counter infrastructure
  */
@@ -2077,6 +2088,7 @@ static void perf_swcounter_set_period(struct perf_counter *counter)
 
 static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
 {
+       enum hrtimer_restart ret = HRTIMER_RESTART;
        struct perf_counter *counter;
        struct pt_regs *regs;
 
@@ -2092,12 +2104,14 @@ static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
                        !counter->hw_event.exclude_user)
                regs = task_pt_regs(current);
 
-       if (regs)
-               perf_counter_output(counter, 0, regs);
+       if (regs) {
+               if (perf_counter_overflow(counter, 0, regs))
+                       ret = HRTIMER_NORESTART;
+       }
 
        hrtimer_forward_now(hrtimer, ns_to_ktime(counter->hw.irq_period));
 
-       return HRTIMER_RESTART;
+       return ret;
 }
 
 static void perf_swcounter_overflow(struct perf_counter *counter,
@@ -2105,7 +2119,10 @@ static void perf_swcounter_overflow(struct perf_counter *counter,
 {
        perf_swcounter_update(counter);
        perf_swcounter_set_period(counter);
-       perf_counter_output(counter, nmi, regs);
+       if (perf_counter_overflow(counter, nmi, regs))
+               /* soft-disable the counter */
+               ;
+
 }
 
 static int perf_swcounter_match(struct perf_counter *counter,
This page took 0.032438 seconds and 5 git commands to generate.