ARM: 6835/1: perf: ensure overflows aren't missed due to IRQ latency
[deliverable/linux.git] / arch / arm / kernel / perf_event.c
index e422f4c269a02bcce8d38fd800e5a42e54f5583b..69cfee0fe00f1cbc38eb7f789ef23aae51688dd6 100644 (file)
@@ -205,11 +205,9 @@ armpmu_event_set_period(struct perf_event *event,
 static u64
 armpmu_event_update(struct perf_event *event,
                    struct hw_perf_event *hwc,
-                   int idx)
+                   int idx, int overflow)
 {
-       int shift = 64 - 32;
-       s64 prev_raw_count, new_raw_count;
-       u64 delta;
+       u64 delta, prev_raw_count, new_raw_count;
 
 again:
        prev_raw_count = local64_read(&hwc->prev_count);
@@ -219,8 +217,13 @@ again:
                             new_raw_count) != prev_raw_count)
                goto again;
 
-       delta = (new_raw_count << shift) - (prev_raw_count << shift);
-       delta >>= shift;
+       new_raw_count &= armpmu->max_period;
+       prev_raw_count &= armpmu->max_period;
+
+       if (overflow)
+               delta = armpmu->max_period - prev_raw_count + new_raw_count;
+       else
+               delta = new_raw_count - prev_raw_count;
 
        local64_add(delta, &event->count);
        local64_sub(delta, &hwc->period_left);
@@ -237,7 +240,7 @@ armpmu_read(struct perf_event *event)
        if (hwc->idx < 0)
                return;
 
-       armpmu_event_update(event, hwc, hwc->idx);
+       armpmu_event_update(event, hwc, hwc->idx, 0);
 }
 
 static void
@@ -255,7 +258,7 @@ armpmu_stop(struct perf_event *event, int flags)
        if (!(hwc->state & PERF_HES_STOPPED)) {
                armpmu->disable(hwc, hwc->idx);
                barrier(); /* why? */
-               armpmu_event_update(event, hwc, hwc->idx);
+               armpmu_event_update(event, hwc, hwc->idx, 0);
                hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
        }
 }
This page took 0.029764 seconds and 5 git commands to generate.