perf_counter: More aggressive frequency adjustment
[deliverable/linux.git] / kernel / perf_counter.c
index 06ea3eae886e3b0c0343c3a3ab02795e39504cce..51c571ee4d0b1369e1030b0b1f5d91dca8140885 100644 (file)
@@ -16,8 +16,9 @@
 #include <linux/file.h>
 #include <linux/poll.h>
 #include <linux/sysfs.h>
-#include <linux/ptrace.h>
+#include <linux/dcache.h>
 #include <linux/percpu.h>
+#include <linux/ptrace.h>
 #include <linux/vmstat.h>
 #include <linux/hardirq.h>
 #include <linux/rculist.h>
@@ -26,7 +27,6 @@
 #include <linux/anon_inodes.h>
 #include <linux/kernel_stat.h>
 #include <linux/perf_counter.h>
-#include <linux/dcache.h>
 
 #include <asm/irq_regs.h>
 
@@ -40,12 +40,14 @@ static int perf_reserved_percpu __read_mostly;
 static int perf_overcommit __read_mostly = 1;
 
 static atomic_t nr_counters __read_mostly;
-static atomic_t nr_mmap_tracking __read_mostly;
-static atomic_t nr_munmap_tracking __read_mostly;
-static atomic_t nr_comm_tracking __read_mostly;
+static atomic_t nr_mmap_counters __read_mostly;
+static atomic_t nr_comm_counters __read_mostly;
 
 int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */
 int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */
+int sysctl_perf_counter_limit __read_mostly = 100000; /* max NMIs per second */
+
+static atomic64_t perf_counter_id;
 
 /*
  * Lock for (sysadmin-configurable) counter reservations:
@@ -64,7 +66,9 @@ void __weak hw_perf_disable(void)             { barrier(); }
 void __weak hw_perf_enable(void)               { barrier(); }
 
 void __weak hw_perf_counter_setup(int cpu)     { barrier(); }
-int __weak hw_perf_group_sched_in(struct perf_counter *group_leader,
+
+int __weak
+hw_perf_group_sched_in(struct perf_counter *group_leader,
               struct perf_cpu_context *cpuctx,
               struct perf_counter_context *ctx, int cpu)
 {
@@ -102,12 +106,92 @@ static void get_ctx(struct perf_counter_context *ctx)
        atomic_inc(&ctx->refcount);
 }
 
+static void free_ctx(struct rcu_head *head)
+{
+       struct perf_counter_context *ctx;
+
+       ctx = container_of(head, struct perf_counter_context, rcu_head);
+       kfree(ctx);
+}
+
 static void put_ctx(struct perf_counter_context *ctx)
 {
-       if (atomic_dec_and_test(&ctx->refcount))
-               kfree(ctx);
+       if (atomic_dec_and_test(&ctx->refcount)) {
+               if (ctx->parent_ctx)
+                       put_ctx(ctx->parent_ctx);
+               if (ctx->task)
+                       put_task_struct(ctx->task);
+               call_rcu(&ctx->rcu_head, free_ctx);
+       }
+}
+
+/*
+ * Get the perf_counter_context for a task and lock it.
+ * This has to cope with with the fact that until it is locked,
+ * the context could get moved to another task.
+ */
+static struct perf_counter_context *
+perf_lock_task_context(struct task_struct *task, unsigned long *flags)
+{
+       struct perf_counter_context *ctx;
+
+       rcu_read_lock();
+ retry:
+       ctx = rcu_dereference(task->perf_counter_ctxp);
+       if (ctx) {
+               /*
+                * If this context is a clone of another, it might
+                * get swapped for another underneath us by
+                * perf_counter_task_sched_out, though the
+                * rcu_read_lock() protects us from any context
+                * getting freed.  Lock the context and check if it
+                * got swapped before we could get the lock, and retry
+                * if so.  If we locked the right context, then it
+                * can't get swapped on us any more.
+                */
+               spin_lock_irqsave(&ctx->lock, *flags);
+               if (ctx != rcu_dereference(task->perf_counter_ctxp)) {
+                       spin_unlock_irqrestore(&ctx->lock, *flags);
+                       goto retry;
+               }
+       }
+       rcu_read_unlock();
+       return ctx;
+}
+
+/*
+ * Get the context for a task and increment its pin_count so it
+ * can't get swapped to another task.  This also increments its
+ * reference count so that the context can't get freed.
+ */
+static struct perf_counter_context *perf_pin_task_context(struct task_struct *task)
+{
+       struct perf_counter_context *ctx;
+       unsigned long flags;
+
+       ctx = perf_lock_task_context(task, &flags);
+       if (ctx) {
+               ++ctx->pin_count;
+               get_ctx(ctx);
+               spin_unlock_irqrestore(&ctx->lock, flags);
+       }
+       return ctx;
+}
+
+static void perf_unpin_context(struct perf_counter_context *ctx)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&ctx->lock, flags);
+       --ctx->pin_count;
+       spin_unlock_irqrestore(&ctx->lock, flags);
+       put_ctx(ctx);
 }
 
+/*
+ * Add a counter from the lists for its context.
+ * Must be called with ctx->mutex and ctx->lock held.
+ */
 static void
 list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
 {
@@ -131,7 +215,7 @@ list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
 
 /*
  * Remove a counter from the lists for its context.
- * Must be called with counter->mutex and ctx->mutex held.
+ * Must be called with ctx->mutex and ctx->lock held.
  */
 static void
 list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
@@ -177,7 +261,7 @@ counter_sched_out(struct perf_counter *counter,
        if (!is_software_counter(counter))
                cpuctx->active_oncpu--;
        ctx->nr_active--;
-       if (counter->hw_event.exclusive || !cpuctx->active_oncpu)
+       if (counter->attr.exclusive || !cpuctx->active_oncpu)
                cpuctx->exclusive = 0;
 }
 
@@ -199,7 +283,7 @@ group_sched_out(struct perf_counter *group_counter,
        list_for_each_entry(counter, &group_counter->sibling_list, list_entry)
                counter_sched_out(counter, cpuctx, ctx);
 
-       if (group_counter->hw_event.exclusive)
+       if (group_counter->attr.exclusive)
                cpuctx->exclusive = 0;
 }
 
@@ -214,7 +298,6 @@ static void __perf_counter_remove_from_context(void *info)
        struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
        struct perf_counter *counter = info;
        struct perf_counter_context *ctx = counter->ctx;
-       unsigned long flags;
 
        /*
         * If this is a task context, we need to check whether it is
@@ -224,7 +307,7 @@ static void __perf_counter_remove_from_context(void *info)
        if (ctx->task && cpuctx->task_ctx != ctx)
                return;
 
-       spin_lock_irqsave(&ctx->lock, flags);
+       spin_lock(&ctx->lock);
        /*
         * Protect the list operation against NMI by disabling the
         * counters on a global level.
@@ -246,17 +329,24 @@ static void __perf_counter_remove_from_context(void *info)
        }
 
        perf_enable();
-       spin_unlock_irqrestore(&ctx->lock, flags);
+       spin_unlock(&ctx->lock);
 }
 
 
 /*
  * Remove the counter from a task's (or a CPU's) list of counters.
  *
- * Must be called with counter->mutex and ctx->mutex held.
+ * Must be called with ctx->mutex held.
  *
  * CPU counters are removed with a smp call. For task counters we only
  * call when the task is on a CPU.
+ *
+ * If counter->ctx is a cloned context, callers must make sure that
+ * every task struct that counter->ctx->task could possibly point to
+ * remains valid.  This is OK when called from perf_release since
+ * that only calls us on the top-level context, which can't be a clone.
+ * When called from perf_counter_exit_task, it's OK because the
+ * context has been detached from its task.
  */
 static void perf_counter_remove_from_context(struct perf_counter *counter)
 {
@@ -355,7 +445,6 @@ static void __perf_counter_disable(void *info)
        struct perf_counter *counter = info;
        struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
        struct perf_counter_context *ctx = counter->ctx;
-       unsigned long flags;
 
        /*
         * If this is a per-task counter, need to check whether this
@@ -364,7 +453,7 @@ static void __perf_counter_disable(void *info)
        if (ctx->task && cpuctx->task_ctx != ctx)
                return;
 
-       spin_lock_irqsave(&ctx->lock, flags);
+       spin_lock(&ctx->lock);
 
        /*
         * If the counter is on, turn it off.
@@ -380,11 +469,21 @@ static void __perf_counter_disable(void *info)
                counter->state = PERF_COUNTER_STATE_OFF;
        }
 
-       spin_unlock_irqrestore(&ctx->lock, flags);
+       spin_unlock(&ctx->lock);
 }
 
 /*
  * Disable a counter.
+ *
+ * If counter->ctx is a cloned context, callers must make sure that
+ * every task struct that counter->ctx->task could possibly point to
+ * remains valid.  This condition is satisifed when called through
+ * perf_counter_for_each_child or perf_counter_for_each because they
+ * hold the top-level counter's child_mutex, so any descendant that
+ * goes to exit will block in sync_child_counter.
+ * When called from perf_pending_counter it's OK because counter->ctx
+ * is the current context on this CPU and preemption is disabled,
+ * hence we can't get into perf_counter_task_sched_out for this context.
  */
 static void perf_counter_disable(struct perf_counter *counter)
 {
@@ -452,7 +551,7 @@ counter_sched_in(struct perf_counter *counter,
                cpuctx->active_oncpu++;
        ctx->nr_active++;
 
-       if (counter->hw_event.exclusive)
+       if (counter->attr.exclusive)
                cpuctx->exclusive = 1;
 
        return 0;
@@ -474,7 +573,6 @@ group_sched_in(struct perf_counter *group_counter,
        if (ret)
                return ret < 0 ? ret : 0;
 
-       group_counter->prev_state = group_counter->state;
        if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
                return -EAGAIN;
 
@@ -482,7 +580,6 @@ group_sched_in(struct perf_counter *group_counter,
         * Schedule in siblings as one group (if any):
         */
        list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
-               counter->prev_state = counter->state;
                if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
                        partial_group = counter;
                        goto group_error;
@@ -546,7 +643,7 @@ static int group_can_go_on(struct perf_counter *counter,
         * If this group is exclusive and there are already
         * counters on the CPU, it can't go on.
         */
-       if (counter->hw_event.exclusive && cpuctx->active_oncpu)
+       if (counter->attr.exclusive && cpuctx->active_oncpu)
                return 0;
        /*
         * Otherwise, try to add it if all previous groups were able
@@ -559,7 +656,6 @@ static void add_counter_to_ctx(struct perf_counter *counter,
                               struct perf_counter_context *ctx)
 {
        list_add_counter(counter, ctx);
-       counter->prev_state = PERF_COUNTER_STATE_OFF;
        counter->tstamp_enabled = ctx->time;
        counter->tstamp_running = ctx->time;
        counter->tstamp_stopped = ctx->time;
@@ -567,6 +663,8 @@ static void add_counter_to_ctx(struct perf_counter *counter,
 
 /*
  * Cross CPU call to install and enable a performance counter
+ *
+ * Must be called with ctx->mutex held
  */
 static void __perf_install_in_context(void *info)
 {
@@ -575,7 +673,6 @@ static void __perf_install_in_context(void *info)
        struct perf_counter_context *ctx = counter->ctx;
        struct perf_counter *leader = counter->group_leader;
        int cpu = smp_processor_id();
-       unsigned long flags;
        int err;
 
        /*
@@ -591,7 +688,7 @@ static void __perf_install_in_context(void *info)
                cpuctx->task_ctx = ctx;
        }
 
-       spin_lock_irqsave(&ctx->lock, flags);
+       spin_lock(&ctx->lock);
        ctx->is_active = 1;
        update_context_time(ctx);
 
@@ -629,7 +726,7 @@ static void __perf_install_in_context(void *info)
                 */
                if (leader != counter)
                        group_sched_out(leader, cpuctx, ctx);
-               if (leader->hw_event.pinned) {
+               if (leader->attr.pinned) {
                        update_group_times(leader);
                        leader->state = PERF_COUNTER_STATE_ERROR;
                }
@@ -641,7 +738,7 @@ static void __perf_install_in_context(void *info)
  unlock:
        perf_enable();
 
-       spin_unlock_irqrestore(&ctx->lock, flags);
+       spin_unlock(&ctx->lock);
 }
 
 /*
@@ -705,7 +802,6 @@ static void __perf_counter_enable(void *info)
        struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
        struct perf_counter_context *ctx = counter->ctx;
        struct perf_counter *leader = counter->group_leader;
-       unsigned long flags;
        int err;
 
        /*
@@ -718,11 +814,10 @@ static void __perf_counter_enable(void *info)
                cpuctx->task_ctx = ctx;
        }
 
-       spin_lock_irqsave(&ctx->lock, flags);
+       spin_lock(&ctx->lock);
        ctx->is_active = 1;
        update_context_time(ctx);
 
-       counter->prev_state = counter->state;
        if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
                goto unlock;
        counter->state = PERF_COUNTER_STATE_INACTIVE;
@@ -755,18 +850,24 @@ static void __perf_counter_enable(void *info)
                 */
                if (leader != counter)
                        group_sched_out(leader, cpuctx, ctx);
-               if (leader->hw_event.pinned) {
+               if (leader->attr.pinned) {
                        update_group_times(leader);
                        leader->state = PERF_COUNTER_STATE_ERROR;
                }
        }
 
  unlock:
-       spin_unlock_irqrestore(&ctx->lock, flags);
+       spin_unlock(&ctx->lock);
 }
 
 /*
  * Enable a counter.
+ *
+ * If counter->ctx is a cloned context, callers must make sure that
+ * every task struct that counter->ctx->task could possibly point to
+ * remains valid.  This condition is satisfied when called through
+ * perf_counter_for_each_child or perf_counter_for_each as described
+ * for perf_counter_disable.
  */
 static void perf_counter_enable(struct perf_counter *counter)
 {
@@ -827,7 +928,7 @@ static int perf_counter_refresh(struct perf_counter *counter, int refresh)
        /*
         * not supported on inherited counters
         */
-       if (counter->hw_event.inherit)
+       if (counter->attr.inherit)
                return -EINVAL;
 
        atomic_add(refresh, &counter->event_limit);
@@ -861,6 +962,25 @@ void __perf_counter_sched_out(struct perf_counter_context *ctx,
        spin_unlock(&ctx->lock);
 }
 
+/*
+ * Test whether two contexts are equivalent, i.e. whether they
+ * have both been cloned from the same version of the same context
+ * and they both have the same number of enabled counters.
+ * If the number of enabled counters is the same, then the set
+ * of enabled counters should be the same, because these are both
+ * inherited contexts, therefore we can't access individual counters
+ * in them directly with an fd; we can only enable/disable all
+ * counters via prctl, or enable/disable all counters in a family
+ * via ioctl, which will have the same effect on both contexts.
+ */
+static int context_equiv(struct perf_counter_context *ctx1,
+                        struct perf_counter_context *ctx2)
+{
+       return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
+               && ctx1->parent_gen == ctx2->parent_gen
+               && !ctx1->pin_count && !ctx2->pin_count;
+}
+
 /*
  * Called from scheduler to remove the counters of the current task,
  * with interrupts disabled.
@@ -872,34 +992,82 @@ void __perf_counter_sched_out(struct perf_counter_context *ctx,
  * accessing the counter control register. If a NMI hits, then it will
  * not restart the counter.
  */
-void perf_counter_task_sched_out(struct task_struct *task, int cpu)
+void perf_counter_task_sched_out(struct task_struct *task,
+                                struct task_struct *next, int cpu)
 {
        struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
        struct perf_counter_context *ctx = task->perf_counter_ctxp;
+       struct perf_counter_context *next_ctx;
+       struct perf_counter_context *parent;
        struct pt_regs *regs;
+       int do_switch = 1;
+
+       regs = task_pt_regs(task);
+       perf_swcounter_event(PERF_COUNT_CONTEXT_SWITCHES, 1, 1, regs, 0);
 
        if (likely(!ctx || !cpuctx->task_ctx))
                return;
 
        update_context_time(ctx);
 
-       regs = task_pt_regs(task);
-       perf_swcounter_event(PERF_COUNT_CONTEXT_SWITCHES, 1, 1, regs, 0);
-       __perf_counter_sched_out(ctx, cpuctx);
+       rcu_read_lock();
+       parent = rcu_dereference(ctx->parent_ctx);
+       next_ctx = next->perf_counter_ctxp;
+       if (parent && next_ctx &&
+           rcu_dereference(next_ctx->parent_ctx) == parent) {
+               /*
+                * Looks like the two contexts are clones, so we might be
+                * able to optimize the context switch.  We lock both
+                * contexts and check that they are clones under the
+                * lock (including re-checking that neither has been
+                * uncloned in the meantime).  It doesn't matter which
+                * order we take the locks because no other cpu could
+                * be trying to lock both of these tasks.
+                */
+               spin_lock(&ctx->lock);
+               spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
+               if (context_equiv(ctx, next_ctx)) {
+                       /*
+                        * XXX do we need a memory barrier of sorts
+                        * wrt to rcu_dereference() of perf_counter_ctxp
+                        */
+                       task->perf_counter_ctxp = next_ctx;
+                       next->perf_counter_ctxp = ctx;
+                       ctx->task = next;
+                       next_ctx->task = task;
+                       do_switch = 0;
+               }
+               spin_unlock(&next_ctx->lock);
+               spin_unlock(&ctx->lock);
+       }
+       rcu_read_unlock();
 
-       cpuctx->task_ctx = NULL;
+       if (do_switch) {
+               __perf_counter_sched_out(ctx, cpuctx);
+               cpuctx->task_ctx = NULL;
+       }
 }
 
+/*
+ * Called with IRQs disabled
+ */
 static void __perf_counter_task_sched_out(struct perf_counter_context *ctx)
 {
        struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
 
        if (!cpuctx->task_ctx)
                return;
+
+       if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
+               return;
+
        __perf_counter_sched_out(ctx, cpuctx);
        cpuctx->task_ctx = NULL;
 }
 
+/*
+ * Called with IRQs disabled
+ */
 static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx)
 {
        __perf_counter_sched_out(&cpuctx->ctx, cpuctx);
@@ -927,7 +1095,7 @@ __perf_counter_sched_in(struct perf_counter_context *ctx,
         */
        list_for_each_entry(counter, &ctx->counter_list, list_entry) {
                if (counter->state <= PERF_COUNTER_STATE_OFF ||
-                   !counter->hw_event.pinned)
+                   !counter->attr.pinned)
                        continue;
                if (counter->cpu != -1 && counter->cpu != cpu)
                        continue;
@@ -955,7 +1123,7 @@ __perf_counter_sched_in(struct perf_counter_context *ctx,
                 * ignore pinned counters since we did them already.
                 */
                if (counter->state <= PERF_COUNTER_STATE_OFF ||
-                   counter->hw_event.pinned)
+                   counter->attr.pinned)
                        continue;
 
                /*
@@ -998,6 +1166,8 @@ void perf_counter_task_sched_in(struct task_struct *task, int cpu)
 
        if (likely(!ctx))
                return;
+       if (cpuctx->task_ctx == ctx)
+               return;
        __perf_counter_sched_in(ctx, cpuctx, cpu);
        cpuctx->task_ctx = ctx;
 }
@@ -1009,115 +1179,93 @@ static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
        __perf_counter_sched_in(ctx, cpuctx, cpu);
 }
 
-int perf_counter_task_disable(void)
-{
-       struct task_struct *curr = current;
-       struct perf_counter_context *ctx = curr->perf_counter_ctxp;
-       struct perf_counter *counter;
-       unsigned long flags;
-
-       if (!ctx || !ctx->nr_counters)
-               return 0;
+#define MAX_INTERRUPTS (~0ULL)
 
-       local_irq_save(flags);
+static void perf_log_throttle(struct perf_counter *counter, int enable);
+static void perf_log_period(struct perf_counter *counter, u64 period);
 
-       __perf_counter_task_sched_out(ctx);
+static void perf_adjust_period(struct perf_counter *counter, u64 events)
+{
+       struct hw_perf_counter *hwc = &counter->hw;
+       u64 period, sample_period;
+       s64 delta;
 
-       spin_lock(&ctx->lock);
+       events *= hwc->sample_period;
+       period = div64_u64(events, counter->attr.sample_freq);
 
-       /*
-        * Disable all the counters:
-        */
-       perf_disable();
+       delta = (s64)(period - hwc->sample_period);
+       delta = (delta + 7) / 8; /* low pass filter */
 
-       list_for_each_entry(counter, &ctx->counter_list, list_entry) {
-               if (counter->state != PERF_COUNTER_STATE_ERROR) {
-                       update_group_times(counter);
-                       counter->state = PERF_COUNTER_STATE_OFF;
-               }
-       }
+       sample_period = hwc->sample_period + delta;
 
-       perf_enable();
+       if (!sample_period)
+               sample_period = 1;
 
-       spin_unlock_irqrestore(&ctx->lock, flags);
+       perf_log_period(counter, sample_period);
 
-       return 0;
+       hwc->sample_period = sample_period;
 }
 
-int perf_counter_task_enable(void)
+static void perf_ctx_adjust_freq(struct perf_counter_context *ctx)
 {
-       struct task_struct *curr = current;
-       struct perf_counter_context *ctx = curr->perf_counter_ctxp;
        struct perf_counter *counter;
-       unsigned long flags;
-       int cpu;
-
-       if (!ctx || !ctx->nr_counters)
-               return 0;
-
-       local_irq_save(flags);
-       cpu = smp_processor_id();
-
-       __perf_counter_task_sched_out(ctx);
+       struct hw_perf_counter *hwc;
+       u64 interrupts, freq;
 
        spin_lock(&ctx->lock);
-
-       /*
-        * Disable all the counters:
-        */
-       perf_disable();
-
        list_for_each_entry(counter, &ctx->counter_list, list_entry) {
-               if (counter->state > PERF_COUNTER_STATE_OFF)
+               if (counter->state != PERF_COUNTER_STATE_ACTIVE)
                        continue;
-               counter->state = PERF_COUNTER_STATE_INACTIVE;
-               counter->tstamp_enabled =
-                       ctx->time - counter->total_time_enabled;
-               counter->hw_event.disabled = 0;
-       }
-       perf_enable();
-
-       spin_unlock(&ctx->lock);
-
-       perf_counter_task_sched_in(curr, cpu);
-
-       local_irq_restore(flags);
 
-       return 0;
-}
-
-static void perf_log_period(struct perf_counter *counter, u64 period);
+               hwc = &counter->hw;
 
-static void perf_adjust_freq(struct perf_counter_context *ctx)
-{
-       struct perf_counter *counter;
-       u64 irq_period;
-       u64 events, period;
-       s64 delta;
+               interrupts = hwc->interrupts;
+               hwc->interrupts = 0;
 
-       spin_lock(&ctx->lock);
-       list_for_each_entry(counter, &ctx->counter_list, list_entry) {
-               if (counter->state != PERF_COUNTER_STATE_ACTIVE)
-                       continue;
+               /*
+                * unthrottle counters on the tick
+                */
+               if (interrupts == MAX_INTERRUPTS) {
+                       perf_log_throttle(counter, 1);
+                       counter->pmu->unthrottle(counter);
+                       interrupts = 2*sysctl_perf_counter_limit/HZ;
+               }
 
-               if (!counter->hw_event.freq || !counter->hw_event.irq_freq)
+               if (!counter->attr.freq || !counter->attr.sample_freq)
                        continue;
 
-               events = HZ * counter->hw.interrupts * counter->hw.irq_period;
-               period = div64_u64(events, counter->hw_event.irq_freq);
+               /*
+                * if the specified freq < HZ then we need to skip ticks
+                */
+               if (counter->attr.sample_freq < HZ) {
+                       freq = counter->attr.sample_freq;
 
-               delta = (s64)(1 + period - counter->hw.irq_period);
-               delta >>= 1;
+                       hwc->freq_count += freq;
+                       hwc->freq_interrupts += interrupts;
 
-               irq_period = counter->hw.irq_period + delta;
+                       if (hwc->freq_count < HZ)
+                               continue;
 
-               if (!irq_period)
-                       irq_period = 1;
+                       interrupts = hwc->freq_interrupts;
+                       hwc->freq_interrupts = 0;
+                       hwc->freq_count -= HZ;
+               } else
+                       freq = HZ;
 
-               perf_log_period(counter, irq_period);
+               perf_adjust_period(counter, freq * interrupts);
 
-               counter->hw.irq_period = irq_period;
-               counter->hw.interrupts = 0;
+               /*
+                * In order to avoid being stalled by an (accidental) huge
+                * sample period, force reset the sample period if we didn't
+                * get any events in this freq period.
+                */
+               if (!interrupts) {
+                       perf_disable();
+                       counter->pmu->disable(counter);
+                       atomic_set(&hwc->period_left, 0);
+                       counter->pmu->enable(counter);
+                       perf_enable();
+               }
        }
        spin_unlock(&ctx->lock);
 }
@@ -1157,9 +1305,9 @@ void perf_counter_task_tick(struct task_struct *curr, int cpu)
        cpuctx = &per_cpu(perf_cpu_context, cpu);
        ctx = curr->perf_counter_ctxp;
 
-       perf_adjust_freq(&cpuctx->ctx);
+       perf_ctx_adjust_freq(&cpuctx->ctx);
        if (ctx)
-               perf_adjust_freq(ctx);
+               perf_ctx_adjust_freq(ctx);
 
        perf_counter_cpu_sched_out(cpuctx);
        if (ctx)
@@ -1223,18 +1371,14 @@ __perf_counter_init_context(struct perf_counter_context *ctx,
        ctx->task = task;
 }
 
-static void put_context(struct perf_counter_context *ctx)
-{
-       if (ctx->task)
-               put_task_struct(ctx->task);
-}
-
 static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
 {
-       struct perf_cpu_context *cpuctx;
+       struct perf_counter_context *parent_ctx;
        struct perf_counter_context *ctx;
-       struct perf_counter_context *tctx;
+       struct perf_cpu_context *cpuctx;
        struct task_struct *task;
+       unsigned long flags;
+       int err;
 
        /*
         * If cpu is not a wildcard then this is a percpu counter:
@@ -1257,6 +1401,7 @@ static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
 
                cpuctx = &per_cpu(perf_cpu_context, cpu);
                ctx = &cpuctx->ctx;
+               get_ctx(ctx);
 
                return ctx;
        }
@@ -1273,37 +1418,58 @@ static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
        if (!task)
                return ERR_PTR(-ESRCH);
 
+       /*
+        * Can't attach counters to a dying task.
+        */
+       err = -ESRCH;
+       if (task->flags & PF_EXITING)
+               goto errout;
+
        /* Reuse ptrace permission checks for now. */
-       if (!ptrace_may_access(task, PTRACE_MODE_READ)) {
-               put_task_struct(task);
-               return ERR_PTR(-EACCES);
+       err = -EACCES;
+       if (!ptrace_may_access(task, PTRACE_MODE_READ))
+               goto errout;
+
+ retry:
+       ctx = perf_lock_task_context(task, &flags);
+       if (ctx) {
+               parent_ctx = ctx->parent_ctx;
+               if (parent_ctx) {
+                       put_ctx(parent_ctx);
+                       ctx->parent_ctx = NULL;         /* no longer a clone */
+               }
+               /*
+                * Get an extra reference before dropping the lock so that
+                * this context won't get freed if the task exits.
+                */
+               get_ctx(ctx);
+               spin_unlock_irqrestore(&ctx->lock, flags);
        }
 
-       ctx = task->perf_counter_ctxp;
        if (!ctx) {
                ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL);
-               if (!ctx) {
-                       put_task_struct(task);
-                       return ERR_PTR(-ENOMEM);
-               }
+               err = -ENOMEM;
+               if (!ctx)
+                       goto errout;
                __perf_counter_init_context(ctx, task);
-               /*
-                * Make sure other cpus see correct values for *ctx
-                * once task->perf_counter_ctxp is visible to them.
-                */
-               smp_wmb();
-               tctx = cmpxchg(&task->perf_counter_ctxp, NULL, ctx);
-               if (tctx) {
+               get_ctx(ctx);
+               if (cmpxchg(&task->perf_counter_ctxp, NULL, ctx)) {
                        /*
                         * We raced with some other task; use
                         * the context they set.
                         */
                        kfree(ctx);
-                       ctx = tctx;
+                       goto retry;
                }
+               get_task_struct(task);
        }
 
+       put_task_struct(task);
        return ctx;
+
+ errout:
+       put_task_struct(task);
+       return ERR_PTR(err);
 }
 
 static void free_counter_rcu(struct rcu_head *head)
@@ -1311,7 +1477,8 @@ static void free_counter_rcu(struct rcu_head *head)
        struct perf_counter *counter;
 
        counter = container_of(head, struct perf_counter, rcu_head);
-       put_ctx(counter->ctx);
+       if (counter->ns)
+               put_pid_ns(counter->ns);
        kfree(counter);
 }
 
@@ -1322,16 +1489,15 @@ static void free_counter(struct perf_counter *counter)
        perf_pending_sync(counter);
 
        atomic_dec(&nr_counters);
-       if (counter->hw_event.mmap)
-               atomic_dec(&nr_mmap_tracking);
-       if (counter->hw_event.munmap)
-               atomic_dec(&nr_munmap_tracking);
-       if (counter->hw_event.comm)
-               atomic_dec(&nr_comm_tracking);
+       if (counter->attr.mmap)
+               atomic_dec(&nr_mmap_counters);
+       if (counter->attr.comm)
+               atomic_dec(&nr_comm_counters);
 
        if (counter->destroy)
                counter->destroy(counter);
 
+       put_ctx(counter->ctx);
        call_rcu(&counter->rcu_head, free_counter_rcu);
 }
 
@@ -1345,16 +1511,17 @@ static int perf_release(struct inode *inode, struct file *file)
 
        file->private_data = NULL;
 
+       WARN_ON_ONCE(ctx->parent_ctx);
        mutex_lock(&ctx->mutex);
-       mutex_lock(&counter->mutex);
-
        perf_counter_remove_from_context(counter);
-
-       mutex_unlock(&counter->mutex);
        mutex_unlock(&ctx->mutex);
 
+       mutex_lock(&counter->owner->perf_counter_mutex);
+       list_del_init(&counter->owner_entry);
+       mutex_unlock(&counter->owner->perf_counter_mutex);
+       put_task_struct(counter->owner);
+
        free_counter(counter);
-       put_context(ctx);
 
        return 0;
 }
@@ -1376,16 +1543,19 @@ perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
        if (counter->state == PERF_COUNTER_STATE_ERROR)
                return 0;
 
-       mutex_lock(&counter->mutex);
+       WARN_ON_ONCE(counter->ctx->parent_ctx);
+       mutex_lock(&counter->child_mutex);
        values[0] = perf_counter_read(counter);
        n = 1;
-       if (counter->hw_event.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+       if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
                values[n++] = counter->total_time_enabled +
                        atomic64_read(&counter->child_total_time_enabled);
-       if (counter->hw_event.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+       if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
                values[n++] = counter->total_time_running +
                        atomic64_read(&counter->child_total_time_running);
-       mutex_unlock(&counter->mutex);
+       if (counter->attr.read_format & PERF_FORMAT_ID)
+               values[n++] = counter->id;
+       mutex_unlock(&counter->child_mutex);
 
        if (count < n * sizeof(u64))
                return -EINVAL;
@@ -1435,25 +1605,33 @@ static void perf_counter_for_each_sibling(struct perf_counter *counter,
        struct perf_counter_context *ctx = counter->ctx;
        struct perf_counter *sibling;
 
-       spin_lock_irq(&ctx->lock);
+       WARN_ON_ONCE(ctx->parent_ctx);
+       mutex_lock(&ctx->mutex);
        counter = counter->group_leader;
 
        func(counter);
        list_for_each_entry(sibling, &counter->sibling_list, list_entry)
                func(sibling);
-       spin_unlock_irq(&ctx->lock);
+       mutex_unlock(&ctx->mutex);
 }
 
+/*
+ * Holding the top-level counter's child_mutex means that any
+ * descendant process that has inherited this counter will block
+ * in sync_child_counter if it goes to exit, thus satisfying the
+ * task existence requirements of perf_counter_enable/disable.
+ */
 static void perf_counter_for_each_child(struct perf_counter *counter,
                                        void (*func)(struct perf_counter *))
 {
        struct perf_counter *child;
 
-       mutex_lock(&counter->mutex);
+       WARN_ON_ONCE(counter->ctx->parent_ctx);
+       mutex_lock(&counter->child_mutex);
        func(counter);
        list_for_each_entry(child, &counter->child_list, child_list)
                func(child);
-       mutex_unlock(&counter->mutex);
+       mutex_unlock(&counter->child_mutex);
 }
 
 static void perf_counter_for_each(struct perf_counter *counter,
@@ -1461,11 +1639,49 @@ static void perf_counter_for_each(struct perf_counter *counter,
 {
        struct perf_counter *child;
 
-       mutex_lock(&counter->mutex);
+       WARN_ON_ONCE(counter->ctx->parent_ctx);
+       mutex_lock(&counter->child_mutex);
        perf_counter_for_each_sibling(counter, func);
        list_for_each_entry(child, &counter->child_list, child_list)
                perf_counter_for_each_sibling(child, func);
-       mutex_unlock(&counter->mutex);
+       mutex_unlock(&counter->child_mutex);
+}
+
+static int perf_counter_period(struct perf_counter *counter, u64 __user *arg)
+{
+       struct perf_counter_context *ctx = counter->ctx;
+       unsigned long size;
+       int ret = 0;
+       u64 value;
+
+       if (!counter->attr.sample_period)
+               return -EINVAL;
+
+       size = copy_from_user(&value, arg, sizeof(value));
+       if (size != sizeof(value))
+               return -EFAULT;
+
+       if (!value)
+               return -EINVAL;
+
+       spin_lock_irq(&ctx->lock);
+       if (counter->attr.freq) {
+               if (value > sysctl_perf_counter_limit) {
+                       ret = -EINVAL;
+                       goto unlock;
+               }
+
+               counter->attr.sample_freq = value;
+       } else {
+               perf_log_period(counter, value);
+
+               counter->attr.sample_period = value;
+               counter->hw.sample_period = value;
+       }
+unlock:
+       spin_unlock_irq(&ctx->lock);
+
+       return ret;
 }
 
 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
@@ -1487,6 +1703,10 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 
        case PERF_COUNTER_IOC_REFRESH:
                return perf_counter_refresh(counter, arg);
+
+       case PERF_COUNTER_IOC_PERIOD:
+               return perf_counter_period(counter, (u64 __user *)arg);
+
        default:
                return -ENOTTY;
        }
@@ -1499,6 +1719,30 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        return 0;
 }
 
+int perf_counter_task_enable(void)
+{
+       struct perf_counter *counter;
+
+       mutex_lock(&current->perf_counter_mutex);
+       list_for_each_entry(counter, &current->perf_counter_list, owner_entry)
+               perf_counter_for_each_child(counter, perf_counter_enable);
+       mutex_unlock(&current->perf_counter_mutex);
+
+       return 0;
+}
+
+int perf_counter_task_disable(void)
+{
+       struct perf_counter *counter;
+
+       mutex_lock(&current->perf_counter_mutex);
+       list_for_each_entry(counter, &current->perf_counter_list, owner_entry)
+               perf_counter_for_each_child(counter, perf_counter_disable);
+       mutex_unlock(&current->perf_counter_mutex);
+
+       return 0;
+}
+
 /*
  * Callers need to ensure there can be no nesting of this function, otherwise
  * the seqlock logic goes bad. We can not serialize this because the arch
@@ -1506,8 +1750,8 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  */
 void perf_counter_update_userpage(struct perf_counter *counter)
 {
-       struct perf_mmap_data *data;
        struct perf_counter_mmap_page *userpg;
+       struct perf_mmap_data *data;
 
        rcu_read_lock();
        data = rcu_dereference(counter->data);
@@ -1611,10 +1855,11 @@ fail:
 
 static void __perf_mmap_data_free(struct rcu_head *rcu_head)
 {
-       struct perf_mmap_data *data = container_of(rcu_head,
-                       struct perf_mmap_data, rcu_head);
+       struct perf_mmap_data *data;
        int i;
 
+       data = container_of(rcu_head, struct perf_mmap_data, rcu_head);
+
        free_page((unsigned long)data->user_page);
        for (i = 0; i < data->nr_pages; i++)
                free_page((unsigned long)data->data_pages[i]);
@@ -1642,8 +1887,8 @@ static void perf_mmap_close(struct vm_area_struct *vma)
 {
        struct perf_counter *counter = vma->vm_file->private_data;
 
-       if (atomic_dec_and_mutex_lock(&counter->mmap_count,
-                                     &counter->mmap_mutex)) {
+       WARN_ON_ONCE(counter->ctx->parent_ctx);
+       if (atomic_dec_and_mutex_lock(&counter->mmap_count, &counter->mmap_mutex)) {
                struct user_struct *user = current_user();
 
                atomic_long_sub(counter->data->nr_pages + 1, &user->locked_vm);
@@ -1662,11 +1907,11 @@ static struct vm_operations_struct perf_mmap_vmops = {
 static int perf_mmap(struct file *file, struct vm_area_struct *vma)
 {
        struct perf_counter *counter = file->private_data;
+       unsigned long user_locked, user_lock_limit;
        struct user_struct *user = current_user();
+       unsigned long locked, lock_limit;
        unsigned long vma_size;
        unsigned long nr_pages;
-       unsigned long user_locked, user_lock_limit;
-       unsigned long locked, lock_limit;
        long user_extra, extra;
        int ret = 0;
 
@@ -1689,6 +1934,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
        if (vma->vm_pgoff != 0)
                return -EINVAL;
 
+       WARN_ON_ONCE(counter->ctx->parent_ctx);
        mutex_lock(&counter->mmap_mutex);
        if (atomic_inc_not_zero(&counter->mmap_count)) {
                if (nr_pages != counter->data->nr_pages)
@@ -1698,6 +1944,12 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
 
        user_extra = nr_pages + 1;
        user_lock_limit = sysctl_perf_counter_mlock >> (PAGE_SHIFT - 10);
+
+       /*
+        * Increase the limit linearly with more CPUs:
+        */
+       user_lock_limit *= num_online_cpus();
+
        user_locked = atomic_long_read(&user->locked_vm) + user_extra;
 
        extra = 0;
@@ -1734,8 +1986,8 @@ unlock:
 
 static int perf_fasync(int fd, struct file *filp, int on)
 {
-       struct perf_counter *counter = filp->private_data;
        struct inode *inode = filp->f_path.dentry->d_inode;
+       struct perf_counter *counter = filp->private_data;
        int retval;
 
        mutex_lock(&inode->i_mutex);
@@ -1899,8 +2151,8 @@ __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
 struct perf_output_handle {
        struct perf_counter     *counter;
        struct perf_mmap_data   *data;
-       unsigned int            offset;
-       unsigned int            head;
+       unsigned long           head;
+       unsigned long           offset;
        int                     nmi;
        int                     overflow;
        int                     locked;
@@ -1954,7 +2206,8 @@ static void perf_output_lock(struct perf_output_handle *handle)
 static void perf_output_unlock(struct perf_output_handle *handle)
 {
        struct perf_mmap_data *data = handle->data;
-       int head, cpu;
+       unsigned long head;
+       int cpu;
 
        data->done_head = data->head;
 
@@ -1967,7 +2220,7 @@ again:
         * before we publish the new head, matched by a rmb() in userspace when
         * reading this position.
         */
-       while ((head = atomic_xchg(&data->done_head, 0)))
+       while ((head = atomic_long_xchg(&data->done_head, 0)))
                data->user_page->data_head = head;
 
        /*
@@ -1980,7 +2233,7 @@ again:
        /*
         * Therefore we have to validate we did not indeed do so.
         */
-       if (unlikely(atomic_read(&data->done_head))) {
+       if (unlikely(atomic_long_read(&data->done_head))) {
                /*
                 * Since we had it locked, we can lock it again.
                 */
@@ -2025,9 +2278,9 @@ static int perf_output_begin(struct perf_output_handle *handle,
        perf_output_lock(handle);
 
        do {
-               offset = head = atomic_read(&data->head);
+               offset = head = atomic_long_read(&data->head);
                head += size;
-       } while (atomic_cmpxchg(&data->head, offset, head) != offset);
+       } while (atomic_long_cmpxchg(&data->head, offset, head) != offset);
 
        handle->offset  = offset;
        handle->head    = head;
@@ -2046,7 +2299,7 @@ out:
 }
 
 static void perf_output_copy(struct perf_output_handle *handle,
-                            void *buf, unsigned int len)
+                            const void *buf, unsigned int len)
 {
        unsigned int pages_mask;
        unsigned int offset;
@@ -2078,7 +2331,7 @@ static void perf_output_copy(struct perf_output_handle *handle,
         * Check we didn't copy past our reservation window, taking the
         * possible unsigned int wrap into account.
         */
-       WARN_ON_ONCE(((int)(handle->head - handle->offset)) < 0);
+       WARN_ON_ONCE(((long)(handle->head - handle->offset)) < 0);
 }
 
 #define perf_output_put(handle, x) \
@@ -2089,7 +2342,7 @@ static void perf_output_end(struct perf_output_handle *handle)
        struct perf_counter *counter = handle->counter;
        struct perf_mmap_data *data = handle->data;
 
-       int wakeup_events = counter->hw_event.wakeup_events;
+       int wakeup_events = counter->attr.wakeup_events;
 
        if (handle->overflow && wakeup_events) {
                int events = atomic_inc_return(&data->events);
@@ -2103,11 +2356,33 @@ static void perf_output_end(struct perf_output_handle *handle)
        rcu_read_unlock();
 }
 
+static u32 perf_counter_pid(struct perf_counter *counter, struct task_struct *p)
+{
+       /*
+        * only top level counters have the pid namespace they were created in
+        */
+       if (counter->parent)
+               counter = counter->parent;
+
+       return task_tgid_nr_ns(p, counter->ns);
+}
+
+static u32 perf_counter_tid(struct perf_counter *counter, struct task_struct *p)
+{
+       /*
+        * only top level counters have the pid namespace they were created in
+        */
+       if (counter->parent)
+               counter = counter->parent;
+
+       return task_pid_nr_ns(p, counter->ns);
+}
+
 static void perf_counter_output(struct perf_counter *counter,
                                int nmi, struct pt_regs *regs, u64 addr)
 {
        int ret;
-       u64 record_type = counter->hw_event.record_type;
+       u64 sample_type = counter->attr.sample_type;
        struct perf_output_handle handle;
        struct perf_event_header header;
        u64 ip;
@@ -2115,7 +2390,7 @@ static void perf_counter_output(struct perf_counter *counter,
                u32 pid, tid;
        } tid_entry;
        struct {
-               u64 event;
+               u64 id;
                u64 counter;
        } group_entry;
        struct perf_callchain_entry *callchain = NULL;
@@ -2131,61 +2406,66 @@ static void perf_counter_output(struct perf_counter *counter,
        header.misc = PERF_EVENT_MISC_OVERFLOW;
        header.misc |= perf_misc_flags(regs);
 
-       if (record_type & PERF_RECORD_IP) {
+       if (sample_type & PERF_SAMPLE_IP) {
                ip = perf_instruction_pointer(regs);
-               header.type |= PERF_RECORD_IP;
+               header.type |= PERF_SAMPLE_IP;
                header.size += sizeof(ip);
        }
 
-       if (record_type & PERF_RECORD_TID) {
+       if (sample_type & PERF_SAMPLE_TID) {
                /* namespace issues */
-               tid_entry.pid = current->group_leader->pid;
-               tid_entry.tid = current->pid;
+               tid_entry.pid = perf_counter_pid(counter, current);
+               tid_entry.tid = perf_counter_tid(counter, current);
 
-               header.type |= PERF_RECORD_TID;
+               header.type |= PERF_SAMPLE_TID;
                header.size += sizeof(tid_entry);
        }
 
-       if (record_type & PERF_RECORD_TIME) {
+       if (sample_type & PERF_SAMPLE_TIME) {
                /*
                 * Maybe do better on x86 and provide cpu_clock_nmi()
                 */
                time = sched_clock();
 
-               header.type |= PERF_RECORD_TIME;
+               header.type |= PERF_SAMPLE_TIME;
                header.size += sizeof(u64);
        }
 
-       if (record_type & PERF_RECORD_ADDR) {
-               header.type |= PERF_RECORD_ADDR;
+       if (sample_type & PERF_SAMPLE_ADDR) {
+               header.type |= PERF_SAMPLE_ADDR;
                header.size += sizeof(u64);
        }
 
-       if (record_type & PERF_RECORD_CONFIG) {
-               header.type |= PERF_RECORD_CONFIG;
+       if (sample_type & PERF_SAMPLE_ID) {
+               header.type |= PERF_SAMPLE_ID;
                header.size += sizeof(u64);
        }
 
-       if (record_type & PERF_RECORD_CPU) {
-               header.type |= PERF_RECORD_CPU;
+       if (sample_type & PERF_SAMPLE_CPU) {
+               header.type |= PERF_SAMPLE_CPU;
                header.size += sizeof(cpu_entry);
 
                cpu_entry.cpu = raw_smp_processor_id();
        }
 
-       if (record_type & PERF_RECORD_GROUP) {
-               header.type |= PERF_RECORD_GROUP;
+       if (sample_type & PERF_SAMPLE_PERIOD) {
+               header.type |= PERF_SAMPLE_PERIOD;
+               header.size += sizeof(u64);
+       }
+
+       if (sample_type & PERF_SAMPLE_GROUP) {
+               header.type |= PERF_SAMPLE_GROUP;
                header.size += sizeof(u64) +
                        counter->nr_siblings * sizeof(group_entry);
        }
 
-       if (record_type & PERF_RECORD_CALLCHAIN) {
+       if (sample_type & PERF_SAMPLE_CALLCHAIN) {
                callchain = perf_callchain(regs);
 
                if (callchain) {
                        callchain_size = (1 + callchain->nr) * sizeof(u64);
 
-                       header.type |= PERF_RECORD_CALLCHAIN;
+                       header.type |= PERF_SAMPLE_CALLCHAIN;
                        header.size += callchain_size;
                }
        }
@@ -2196,28 +2476,31 @@ static void perf_counter_output(struct perf_counter *counter,
 
        perf_output_put(&handle, header);
 
-       if (record_type & PERF_RECORD_IP)
+       if (sample_type & PERF_SAMPLE_IP)
                perf_output_put(&handle, ip);
 
-       if (record_type & PERF_RECORD_TID)
+       if (sample_type & PERF_SAMPLE_TID)
                perf_output_put(&handle, tid_entry);
 
-       if (record_type & PERF_RECORD_TIME)
+       if (sample_type & PERF_SAMPLE_TIME)
                perf_output_put(&handle, time);
 
-       if (record_type & PERF_RECORD_ADDR)
+       if (sample_type & PERF_SAMPLE_ADDR)
                perf_output_put(&handle, addr);
 
-       if (record_type & PERF_RECORD_CONFIG)
-               perf_output_put(&handle, counter->hw_event.config);
+       if (sample_type & PERF_SAMPLE_ID)
+               perf_output_put(&handle, counter->id);
 
-       if (record_type & PERF_RECORD_CPU)
+       if (sample_type & PERF_SAMPLE_CPU)
                perf_output_put(&handle, cpu_entry);
 
+       if (sample_type & PERF_SAMPLE_PERIOD)
+               perf_output_put(&handle, counter->hw.sample_period);
+
        /*
-        * XXX PERF_RECORD_GROUP vs inherited counters seems difficult.
+        * XXX PERF_SAMPLE_GROUP vs inherited counters seems difficult.
         */
-       if (record_type & PERF_RECORD_GROUP) {
+       if (sample_type & PERF_SAMPLE_GROUP) {
                struct perf_counter *leader, *sub;
                u64 nr = counter->nr_siblings;
 
@@ -2228,7 +2511,7 @@ static void perf_counter_output(struct perf_counter *counter,
                        if (sub != counter)
                                sub->pmu->read(sub);
 
-                       group_entry.event = sub->hw_event.config;
+                       group_entry.id = sub->id;
                        group_entry.counter = atomic64_read(&sub->count);
 
                        perf_output_put(&handle, group_entry);
@@ -2241,13 +2524,111 @@ static void perf_counter_output(struct perf_counter *counter,
        perf_output_end(&handle);
 }
 
+/*
+ * fork tracking
+ */
+
+struct perf_fork_event {
+       struct task_struct      *task;
+
+       struct {
+               struct perf_event_header        header;
+
+               u32                             pid;
+               u32                             ppid;
+       } event;
+};
+
+static void perf_counter_fork_output(struct perf_counter *counter,
+                                    struct perf_fork_event *fork_event)
+{
+       struct perf_output_handle handle;
+       int size = fork_event->event.header.size;
+       struct task_struct *task = fork_event->task;
+       int ret = perf_output_begin(&handle, counter, size, 0, 0);
+
+       if (ret)
+               return;
+
+       fork_event->event.pid = perf_counter_pid(counter, task);
+       fork_event->event.ppid = perf_counter_pid(counter, task->real_parent);
+
+       perf_output_put(&handle, fork_event->event);
+       perf_output_end(&handle);
+}
+
+static int perf_counter_fork_match(struct perf_counter *counter)
+{
+       if (counter->attr.comm || counter->attr.mmap)
+               return 1;
+
+       return 0;
+}
+
+static void perf_counter_fork_ctx(struct perf_counter_context *ctx,
+                                 struct perf_fork_event *fork_event)
+{
+       struct perf_counter *counter;
+
+       if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
+               return;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
+               if (perf_counter_fork_match(counter))
+                       perf_counter_fork_output(counter, fork_event);
+       }
+       rcu_read_unlock();
+}
+
+static void perf_counter_fork_event(struct perf_fork_event *fork_event)
+{
+       struct perf_cpu_context *cpuctx;
+       struct perf_counter_context *ctx;
+
+       cpuctx = &get_cpu_var(perf_cpu_context);
+       perf_counter_fork_ctx(&cpuctx->ctx, fork_event);
+       put_cpu_var(perf_cpu_context);
+
+       rcu_read_lock();
+       /*
+        * doesn't really matter which of the child contexts the
+        * events ends up in.
+        */
+       ctx = rcu_dereference(current->perf_counter_ctxp);
+       if (ctx)
+               perf_counter_fork_ctx(ctx, fork_event);
+       rcu_read_unlock();
+}
+
+void perf_counter_fork(struct task_struct *task)
+{
+       struct perf_fork_event fork_event;
+
+       if (!atomic_read(&nr_comm_counters) &&
+           !atomic_read(&nr_mmap_counters))
+               return;
+
+       fork_event = (struct perf_fork_event){
+               .task   = task,
+               .event  = {
+                       .header = {
+                               .type = PERF_EVENT_FORK,
+                               .size = sizeof(fork_event.event),
+                       },
+               },
+       };
+
+       perf_counter_fork_event(&fork_event);
+}
+
 /*
  * comm tracking
  */
 
 struct perf_comm_event {
-       struct task_struct      *task;
-       char                    *comm;
+       struct task_struct      *task;
+       char                    *comm;
        int                     comm_size;
 
        struct {
@@ -2268,17 +2649,18 @@ static void perf_counter_comm_output(struct perf_counter *counter,
        if (ret)
                return;
 
+       comm_event->event.pid = perf_counter_pid(counter, comm_event->task);
+       comm_event->event.tid = perf_counter_tid(counter, comm_event->task);
+
        perf_output_put(&handle, comm_event->event);
        perf_output_copy(&handle, comm_event->comm,
                                   comm_event->comm_size);
        perf_output_end(&handle);
 }
 
-static int perf_counter_comm_match(struct perf_counter *counter,
-                                  struct perf_comm_event *comm_event)
+static int perf_counter_comm_match(struct perf_counter *counter)
 {
-       if (counter->hw_event.comm &&
-           comm_event->event.header.type == PERF_EVENT_COMM)
+       if (counter->attr.comm)
                return 1;
 
        return 0;
@@ -2294,7 +2676,7 @@ static void perf_counter_comm_ctx(struct perf_counter_context *ctx,
 
        rcu_read_lock();
        list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
-               if (perf_counter_comm_match(counter, comm_event))
+               if (perf_counter_comm_match(counter))
                        perf_counter_comm_output(counter, comm_event);
        }
        rcu_read_unlock();
@@ -2303,6 +2685,7 @@ static void perf_counter_comm_ctx(struct perf_counter_context *ctx,
 static void perf_counter_comm_event(struct perf_comm_event *comm_event)
 {
        struct perf_cpu_context *cpuctx;
+       struct perf_counter_context *ctx;
        unsigned int size;
        char *comm = comm_event->task->comm;
 
@@ -2317,24 +2700,28 @@ static void perf_counter_comm_event(struct perf_comm_event *comm_event)
        perf_counter_comm_ctx(&cpuctx->ctx, comm_event);
        put_cpu_var(perf_cpu_context);
 
-       perf_counter_comm_ctx(current->perf_counter_ctxp, comm_event);
+       rcu_read_lock();
+       /*
+        * doesn't really matter which of the child contexts the
+        * events ends up in.
+        */
+       ctx = rcu_dereference(current->perf_counter_ctxp);
+       if (ctx)
+               perf_counter_comm_ctx(ctx, comm_event);
+       rcu_read_unlock();
 }
 
 void perf_counter_comm(struct task_struct *task)
 {
        struct perf_comm_event comm_event;
 
-       if (!atomic_read(&nr_comm_tracking))
-               return;
-       if (!current->perf_counter_ctxp)
+       if (!atomic_read(&nr_comm_counters))
                return;
 
        comm_event = (struct perf_comm_event){
                .task   = task,
                .event  = {
                        .header = { .type = PERF_EVENT_COMM, },
-                       .pid    = task->group_leader->pid,
-                       .tid    = task->pid,
                },
        };
 
@@ -2346,9 +2733,10 @@ void perf_counter_comm(struct task_struct *task)
  */
 
 struct perf_mmap_event {
-       struct file     *file;
-       char            *file_name;
-       int             file_size;
+       struct vm_area_struct   *vma;
+
+       const char              *file_name;
+       int                     file_size;
 
        struct {
                struct perf_event_header        header;
@@ -2371,6 +2759,9 @@ static void perf_counter_mmap_output(struct perf_counter *counter,
        if (ret)
                return;
 
+       mmap_event->event.pid = perf_counter_pid(counter, current);
+       mmap_event->event.tid = perf_counter_tid(counter, current);
+
        perf_output_put(&handle, mmap_event->event);
        perf_output_copy(&handle, mmap_event->file_name,
                                   mmap_event->file_size);
@@ -2380,12 +2771,7 @@ static void perf_counter_mmap_output(struct perf_counter *counter,
 static int perf_counter_mmap_match(struct perf_counter *counter,
                                   struct perf_mmap_event *mmap_event)
 {
-       if (counter->hw_event.mmap &&
-           mmap_event->event.header.type == PERF_EVENT_MMAP)
-               return 1;
-
-       if (counter->hw_event.munmap &&
-           mmap_event->event.header.type == PERF_EVENT_MUNMAP)
+       if (counter->attr.mmap)
                return 1;
 
        return 0;
@@ -2410,11 +2796,13 @@ static void perf_counter_mmap_ctx(struct perf_counter_context *ctx,
 static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event)
 {
        struct perf_cpu_context *cpuctx;
-       struct file *file = mmap_event->file;
+       struct perf_counter_context *ctx;
+       struct vm_area_struct *vma = mmap_event->vma;
+       struct file *file = vma->vm_file;
        unsigned int size;
        char tmp[16];
        char *buf = NULL;
-       char *name;
+       const char *name;
 
        if (file) {
                buf = kzalloc(PATH_MAX, GFP_KERNEL);
@@ -2428,6 +2816,15 @@ static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event)
                        goto got_name;
                }
        } else {
+               name = arch_vma_name(mmap_event->vma);
+               if (name)
+                       goto got_name;
+
+               if (!vma->vm_mm) {
+                       name = strncpy(tmp, "[vdso]", sizeof(tmp));
+                       goto got_name;
+               }
+
                name = strncpy(tmp, "//anon", sizeof(tmp));
                goto got_name;
        }
@@ -2444,64 +2841,87 @@ got_name:
        perf_counter_mmap_ctx(&cpuctx->ctx, mmap_event);
        put_cpu_var(perf_cpu_context);
 
-       perf_counter_mmap_ctx(current->perf_counter_ctxp, mmap_event);
+       rcu_read_lock();
+       /*
+        * doesn't really matter which of the child contexts the
+        * events ends up in.
+        */
+       ctx = rcu_dereference(current->perf_counter_ctxp);
+       if (ctx)
+               perf_counter_mmap_ctx(ctx, mmap_event);
+       rcu_read_unlock();
 
        kfree(buf);
 }
 
-void perf_counter_mmap(unsigned long addr, unsigned long len,
-                      unsigned long pgoff, struct file *file)
+void __perf_counter_mmap(struct vm_area_struct *vma)
 {
        struct perf_mmap_event mmap_event;
 
-       if (!atomic_read(&nr_mmap_tracking))
-               return;
-       if (!current->perf_counter_ctxp)
+       if (!atomic_read(&nr_mmap_counters))
                return;
 
        mmap_event = (struct perf_mmap_event){
-               .file   = file,
+               .vma    = vma,
                .event  = {
                        .header = { .type = PERF_EVENT_MMAP, },
-                       .pid    = current->group_leader->pid,
-                       .tid    = current->pid,
-                       .start  = addr,
-                       .len    = len,
-                       .pgoff  = pgoff,
+                       .start  = vma->vm_start,
+                       .len    = vma->vm_end - vma->vm_start,
+                       .pgoff  = vma->vm_pgoff,
                },
        };
 
        perf_counter_mmap_event(&mmap_event);
 }
 
-void perf_counter_munmap(unsigned long addr, unsigned long len,
-                        unsigned long pgoff, struct file *file)
+/*
+ * Log sample_period changes so that analyzing tools can re-normalize the
+ * event flow.
+ */
+
+struct freq_event {
+       struct perf_event_header        header;
+       u64                             time;
+       u64                             id;
+       u64                             period;
+};
+
+static void perf_log_period(struct perf_counter *counter, u64 period)
 {
-       struct perf_mmap_event mmap_event;
+       struct perf_output_handle handle;
+       struct freq_event event;
+       int ret;
 
-       if (!atomic_read(&nr_munmap_tracking))
+       if (counter->hw.sample_period == period)
                return;
 
-       mmap_event = (struct perf_mmap_event){
-               .file   = file,
-               .event  = {
-                       .header = { .type = PERF_EVENT_MUNMAP, },
-                       .pid    = current->group_leader->pid,
-                       .tid    = current->pid,
-                       .start  = addr,
-                       .len    = len,
-                       .pgoff  = pgoff,
+       if (counter->attr.sample_type & PERF_SAMPLE_PERIOD)
+               return;
+
+       event = (struct freq_event) {
+               .header = {
+                       .type = PERF_EVENT_PERIOD,
+                       .misc = 0,
+                       .size = sizeof(event),
                },
+               .time = sched_clock(),
+               .id = counter->id,
+               .period = period,
        };
 
-       perf_counter_mmap_event(&mmap_event);
+       ret = perf_output_begin(&handle, counter, sizeof(event), 1, 0);
+       if (ret)
+               return;
+
+       perf_output_put(&handle, event);
+       perf_output_end(&handle);
 }
 
 /*
- *
+ * IRQ throttle logging
  */
 
-static void perf_log_period(struct perf_counter *counter, u64 period)
+static void perf_log_throttle(struct perf_counter *counter, int enable)
 {
        struct perf_output_handle handle;
        int ret;
@@ -2509,25 +2929,20 @@ static void perf_log_period(struct perf_counter *counter, u64 period)
        struct {
                struct perf_event_header        header;
                u64                             time;
-               u64                             period;
-       } freq_event = {
+       } throttle_event = {
                .header = {
-                       .type = PERF_EVENT_PERIOD,
+                       .type = PERF_EVENT_THROTTLE + 1,
                        .misc = 0,
-                       .size = sizeof(freq_event),
+                       .size = sizeof(throttle_event),
                },
                .time = sched_clock(),
-               .period = period,
        };
 
-       if (counter->hw.irq_period == period)
-               return;
-
-       ret = perf_output_begin(&handle, counter, sizeof(freq_event), 0, 0);
+       ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 1, 0);
        if (ret)
                return;
 
-       perf_output_put(&handle, freq_event);
+       perf_output_put(&handle, throttle_event);
        perf_output_end(&handle);
 }
 
@@ -2539,9 +2954,39 @@ int perf_counter_overflow(struct perf_counter *counter,
                          int nmi, struct pt_regs *regs, u64 addr)
 {
        int events = atomic_read(&counter->event_limit);
+       int throttle = counter->pmu->unthrottle != NULL;
+       struct hw_perf_counter *hwc = &counter->hw;
        int ret = 0;
 
-       counter->hw.interrupts++;
+       if (!throttle) {
+               hwc->interrupts++;
+       } else {
+               if (hwc->interrupts != MAX_INTERRUPTS) {
+                       hwc->interrupts++;
+                       if (HZ * hwc->interrupts > (u64)sysctl_perf_counter_limit) {
+                               hwc->interrupts = MAX_INTERRUPTS;
+                               perf_log_throttle(counter, 0);
+                               ret = 1;
+                       }
+               } else {
+                       /*
+                        * Keep re-disabling counters even though on the previous
+                        * pass we disabled it - just in case we raced with a
+                        * sched-in and the counter got enabled again:
+                        */
+                       ret = 1;
+               }
+       }
+
+       if (counter->attr.freq) {
+               u64 now = sched_clock();
+               s64 delta = now - hwc->freq_stamp;
+
+               hwc->freq_stamp = now;
+
+               if (delta > 0 && delta < TICK_NSEC)
+                       perf_adjust_period(counter, NSEC_PER_SEC / (int)delta);
+       }
 
        /*
         * XXX event_limit might not quite work as expected on inherited
@@ -2590,7 +3035,7 @@ static void perf_swcounter_set_period(struct perf_counter *counter)
 {
        struct hw_perf_counter *hwc = &counter->hw;
        s64 left = atomic64_read(&hwc->period_left);
-       s64 period = hwc->irq_period;
+       s64 period = hwc->sample_period;
 
        if (unlikely(left <= -period)) {
                left = period;
@@ -2621,8 +3066,8 @@ static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
         * In case we exclude kernel IPs or are somehow not in interrupt
         * context, provide the next best thing, the user IP.
         */
-       if ((counter->hw_event.exclude_kernel || !regs) &&
-                       !counter->hw_event.exclude_user)
+       if ((counter->attr.exclude_kernel || !regs) &&
+                       !counter->attr.exclude_user)
                regs = task_pt_regs(current);
 
        if (regs) {
@@ -2630,7 +3075,7 @@ static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
                        ret = HRTIMER_NORESTART;
        }
 
-       period = max_t(u64, 10000, counter->hw.irq_period);
+       period = max_t(u64, 10000, counter->hw.sample_period);
        hrtimer_forward_now(hrtimer, ns_to_ktime(period));
 
        return ret;
@@ -2647,27 +3092,63 @@ static void perf_swcounter_overflow(struct perf_counter *counter,
 
 }
 
+static int perf_swcounter_is_counting(struct perf_counter *counter)
+{
+       struct perf_counter_context *ctx;
+       unsigned long flags;
+       int count;
+
+       if (counter->state == PERF_COUNTER_STATE_ACTIVE)
+               return 1;
+
+       if (counter->state != PERF_COUNTER_STATE_INACTIVE)
+               return 0;
+
+       /*
+        * If the counter is inactive, it could be just because
+        * its task is scheduled out, or because it's in a group
+        * which could not go on the PMU.  We want to count in
+        * the first case but not the second.  If the context is
+        * currently active then an inactive software counter must
+        * be the second case.  If it's not currently active then
+        * we need to know whether the counter was active when the
+        * context was last active, which we can determine by
+        * comparing counter->tstamp_stopped with ctx->time.
+        *
+        * We are within an RCU read-side critical section,
+        * which protects the existence of *ctx.
+        */
+       ctx = counter->ctx;
+       spin_lock_irqsave(&ctx->lock, flags);
+       count = 1;
+       /* Re-check state now we have the lock */
+       if (counter->state < PERF_COUNTER_STATE_INACTIVE ||
+           counter->ctx->is_active ||
+           counter->tstamp_stopped < ctx->time)
+               count = 0;
+       spin_unlock_irqrestore(&ctx->lock, flags);
+       return count;
+}
+
 static int perf_swcounter_match(struct perf_counter *counter,
                                enum perf_event_types type,
                                u32 event, struct pt_regs *regs)
 {
-       if (counter->state != PERF_COUNTER_STATE_ACTIVE)
-               return 0;
-
-       if (perf_event_raw(&counter->hw_event))
+       if (!perf_swcounter_is_counting(counter))
                return 0;
 
-       if (perf_event_type(&counter->hw_event) != type)
+       if (counter->attr.type != type)
                return 0;
-
-       if (perf_event_id(&counter->hw_event) != event)
+       if (counter->attr.config != event)
                return 0;
 
-       if (counter->hw_event.exclude_user && user_mode(regs))
-               return 0;
+       if (regs) {
+               if (counter->attr.exclude_user && user_mode(regs))
+                       return 0;
 
-       if (counter->hw_event.exclude_kernel && !user_mode(regs))
-               return 0;
+               if (counter->attr.exclude_kernel && !user_mode(regs))
+                       return 0;
+       }
 
        return 1;
 }
@@ -2676,7 +3157,8 @@ static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
                               int nmi, struct pt_regs *regs, u64 addr)
 {
        int neg = atomic64_add_negative(nr, &counter->hw.count);
-       if (counter->hw.irq_period && !neg)
+
+       if (counter->hw.sample_period && !neg && regs)
                perf_swcounter_overflow(counter, nmi, regs, addr);
 }
 
@@ -2718,6 +3200,7 @@ static void __perf_swcounter_event(enum perf_event_types type, u32 event,
 {
        struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
        int *recursion = perf_swcounter_recursion_context(cpuctx);
+       struct perf_counter_context *ctx;
 
        if (*recursion)
                goto out;
@@ -2727,10 +3210,15 @@ static void __perf_swcounter_event(enum perf_event_types type, u32 event,
 
        perf_swcounter_ctx_event(&cpuctx->ctx, type, event,
                                 nr, nmi, regs, addr);
-       if (cpuctx->task_ctx) {
-               perf_swcounter_ctx_event(cpuctx->task_ctx, type, event,
-                                        nr, nmi, regs, addr);
-       }
+       rcu_read_lock();
+       /*
+        * doesn't really matter which of the child contexts the
+        * events ends up in.
+        */
+       ctx = rcu_dereference(current->perf_counter_ctxp);
+       if (ctx)
+               perf_swcounter_ctx_event(ctx, type, event, nr, nmi, regs, addr);
+       rcu_read_unlock();
 
        barrier();
        (*recursion)--;
@@ -2791,8 +3279,8 @@ static int cpu_clock_perf_counter_enable(struct perf_counter *counter)
        atomic64_set(&hwc->prev_count, cpu_clock(cpu));
        hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
        hwc->hrtimer.function = perf_swcounter_hrtimer;
-       if (hwc->irq_period) {
-               u64 period = max_t(u64, 10000, hwc->irq_period);
+       if (hwc->sample_period) {
+               u64 period = max_t(u64, 10000, hwc->sample_period);
                __hrtimer_start_range_ns(&hwc->hrtimer,
                                ns_to_ktime(period), 0,
                                HRTIMER_MODE_REL, 0);
@@ -2803,7 +3291,7 @@ static int cpu_clock_perf_counter_enable(struct perf_counter *counter)
 
 static void cpu_clock_perf_counter_disable(struct perf_counter *counter)
 {
-       if (counter->hw.irq_period)
+       if (counter->hw.sample_period)
                hrtimer_cancel(&counter->hw.hrtimer);
        cpu_clock_perf_counter_update(counter);
 }
@@ -2843,8 +3331,8 @@ static int task_clock_perf_counter_enable(struct perf_counter *counter)
        atomic64_set(&hwc->prev_count, now);
        hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
        hwc->hrtimer.function = perf_swcounter_hrtimer;
-       if (hwc->irq_period) {
-               u64 period = max_t(u64, 10000, hwc->irq_period);
+       if (hwc->sample_period) {
+               u64 period = max_t(u64, 10000, hwc->sample_period);
                __hrtimer_start_range_ns(&hwc->hrtimer,
                                ns_to_ktime(period), 0,
                                HRTIMER_MODE_REL, 0);
@@ -2855,7 +3343,7 @@ static int task_clock_perf_counter_enable(struct perf_counter *counter)
 
 static void task_clock_perf_counter_disable(struct perf_counter *counter)
 {
-       if (counter->hw.irq_period)
+       if (counter->hw.sample_period)
                hrtimer_cancel(&counter->hw.hrtimer);
        task_clock_perf_counter_update(counter, counter->ctx->time);
 
@@ -2886,55 +3374,24 @@ static const struct pmu perf_ops_task_clock = {
 /*
  * Software counter: cpu migrations
  */
-
-static inline u64 get_cpu_migrations(struct perf_counter *counter)
-{
-       struct task_struct *curr = counter->ctx->task;
-
-       if (curr)
-               return curr->se.nr_migrations;
-       return cpu_nr_migrations(smp_processor_id());
-}
-
-static void cpu_migrations_perf_counter_update(struct perf_counter *counter)
-{
-       u64 prev, now;
-       s64 delta;
-
-       prev = atomic64_read(&counter->hw.prev_count);
-       now = get_cpu_migrations(counter);
-
-       atomic64_set(&counter->hw.prev_count, now);
-
-       delta = now - prev;
-
-       atomic64_add(delta, &counter->count);
-}
-
-static void cpu_migrations_perf_counter_read(struct perf_counter *counter)
+void perf_counter_task_migration(struct task_struct *task, int cpu)
 {
-       cpu_migrations_perf_counter_update(counter);
-}
+       struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
+       struct perf_counter_context *ctx;
 
-static int cpu_migrations_perf_counter_enable(struct perf_counter *counter)
-{
-       if (counter->prev_state <= PERF_COUNTER_STATE_OFF)
-               atomic64_set(&counter->hw.prev_count,
-                            get_cpu_migrations(counter));
-       return 0;
-}
+       perf_swcounter_ctx_event(&cpuctx->ctx, PERF_TYPE_SOFTWARE,
+                                PERF_COUNT_CPU_MIGRATIONS,
+                                1, 1, NULL, 0);
 
-static void cpu_migrations_perf_counter_disable(struct perf_counter *counter)
-{
-       cpu_migrations_perf_counter_update(counter);
+       ctx = perf_pin_task_context(task);
+       if (ctx) {
+               perf_swcounter_ctx_event(ctx, PERF_TYPE_SOFTWARE,
+                                        PERF_COUNT_CPU_MIGRATIONS,
+                                        1, 1, NULL, 0);
+               perf_unpin_context(ctx);
+       }
 }
 
-static const struct pmu perf_ops_cpu_migrations = {
-       .enable         = cpu_migrations_perf_counter_enable,
-       .disable        = cpu_migrations_perf_counter_disable,
-       .read           = cpu_migrations_perf_counter_read,
-};
-
 #ifdef CONFIG_EVENT_PROFILE
 void perf_tpcounter_event(int event_id)
 {
@@ -2952,12 +3409,12 @@ extern void ftrace_profile_disable(int);
 
 static void tp_perf_counter_destroy(struct perf_counter *counter)
 {
-       ftrace_profile_disable(perf_event_id(&counter->hw_event));
+       ftrace_profile_disable(perf_event_id(&counter->attr));
 }
 
 static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
 {
-       int event_id = perf_event_id(&counter->hw_event);
+       int event_id = perf_event_id(&counter->attr);
        int ret;
 
        ret = ftrace_profile_enable(event_id);
@@ -2965,7 +3422,6 @@ static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
                return NULL;
 
        counter->destroy = tp_perf_counter_destroy;
-       counter->hw.irq_period = counter->hw_event.irq_period;
 
        return &perf_ops_generic;
 }
@@ -2987,7 +3443,7 @@ static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
         * to be kernel events, and page faults are never hypervisor
         * events.
         */
-       switch (perf_event_id(&counter->hw_event)) {
+       switch (counter->attr.config) {
        case PERF_COUNT_CPU_CLOCK:
                pmu = &perf_ops_cpu_clock;
 
@@ -3007,11 +3463,8 @@ static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
        case PERF_COUNT_PAGE_FAULTS_MIN:
        case PERF_COUNT_PAGE_FAULTS_MAJ:
        case PERF_COUNT_CONTEXT_SWITCHES:
-               pmu = &perf_ops_generic;
-               break;
        case PERF_COUNT_CPU_MIGRATIONS:
-               if (!counter->hw_event.exclude_kernel)
-                       pmu = &perf_ops_cpu_migrations;
+               pmu = &perf_ops_generic;
                break;
        }
 
@@ -3022,7 +3475,7 @@ static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
  * Allocate and initialize a counter structure
  */
 static struct perf_counter *
-perf_counter_alloc(struct perf_counter_hw_event *hw_event,
+perf_counter_alloc(struct perf_counter_attr *attr,
                   int cpu,
                   struct perf_counter_context *ctx,
                   struct perf_counter *group_leader,
@@ -3044,7 +3497,9 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event,
        if (!group_leader)
                group_leader = counter;
 
-       mutex_init(&counter->mutex);
+       mutex_init(&counter->child_mutex);
+       INIT_LIST_HEAD(&counter->child_list);
+
        INIT_LIST_HEAD(&counter->list_entry);
        INIT_LIST_HEAD(&counter->event_entry);
        INIT_LIST_HEAD(&counter->sibling_list);
@@ -3052,40 +3507,44 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event,
 
        mutex_init(&counter->mmap_mutex);
 
-       INIT_LIST_HEAD(&counter->child_list);
+       counter->cpu            = cpu;
+       counter->attr           = *attr;
+       counter->group_leader   = group_leader;
+       counter->pmu            = NULL;
+       counter->ctx            = ctx;
+       counter->oncpu          = -1;
 
-       counter->cpu                    = cpu;
-       counter->hw_event               = *hw_event;
-       counter->group_leader           = group_leader;
-       counter->pmu                    = NULL;
-       counter->ctx                    = ctx;
-       get_ctx(ctx);
+       counter->ns             = get_pid_ns(current->nsproxy->pid_ns);
+       counter->id             = atomic64_inc_return(&perf_counter_id);
 
-       counter->state = PERF_COUNTER_STATE_INACTIVE;
-       if (hw_event->disabled)
+       counter->state          = PERF_COUNTER_STATE_INACTIVE;
+
+       if (attr->disabled)
                counter->state = PERF_COUNTER_STATE_OFF;
 
        pmu = NULL;
 
        hwc = &counter->hw;
-       if (hw_event->freq && hw_event->irq_freq)
-               hwc->irq_period = div64_u64(TICK_NSEC, hw_event->irq_freq);
-       else
-               hwc->irq_period = hw_event->irq_period;
+       hwc->sample_period = attr->sample_period;
+       if (attr->freq && attr->sample_freq)
+               hwc->sample_period = 1;
+
+       atomic64_set(&hwc->period_left, hwc->sample_period);
 
        /*
-        * we currently do not support PERF_RECORD_GROUP on inherited counters
+        * we currently do not support PERF_SAMPLE_GROUP on inherited counters
         */
-       if (hw_event->inherit && (hw_event->record_type & PERF_RECORD_GROUP))
+       if (attr->inherit && (attr->sample_type & PERF_SAMPLE_GROUP))
                goto done;
 
-       if (perf_event_raw(hw_event)) {
+       if (attr->type == PERF_TYPE_RAW) {
                pmu = hw_perf_counter_init(counter);
                goto done;
        }
 
-       switch (perf_event_type(hw_event)) {
+       switch (attr->type) {
        case PERF_TYPE_HARDWARE:
+       case PERF_TYPE_HW_CACHE:
                pmu = hw_perf_counter_init(counter);
                break;
 
@@ -3105,6 +3564,8 @@ done:
                err = PTR_ERR(pmu);
 
        if (err) {
+               if (counter->ns)
+                       put_pid_ns(counter->ns);
                kfree(counter);
                return ERR_PTR(err);
        }
@@ -3112,12 +3573,10 @@ done:
        counter->pmu = pmu;
 
        atomic_inc(&nr_counters);
-       if (counter->hw_event.mmap)
-               atomic_inc(&nr_mmap_tracking);
-       if (counter->hw_event.munmap)
-               atomic_inc(&nr_munmap_tracking);
-       if (counter->hw_event.comm)
-               atomic_inc(&nr_comm_tracking);
+       if (counter->attr.mmap)
+               atomic_inc(&nr_mmap_counters);
+       if (counter->attr.comm)
+               atomic_inc(&nr_comm_counters);
 
        return counter;
 }
@@ -3125,17 +3584,17 @@ done:
 /**
  * sys_perf_counter_open - open a performance counter, associate it to a task/cpu
  *
- * @hw_event_uptr:     event type attributes for monitoring/sampling
+ * @attr_uptr: event type attributes for monitoring/sampling
  * @pid:               target pid
  * @cpu:               target cpu
  * @group_fd:          group leader counter fd
  */
 SYSCALL_DEFINE5(perf_counter_open,
-               const struct perf_counter_hw_event __user *, hw_event_uptr,
+               const struct perf_counter_attr __user *, attr_uptr,
                pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
 {
        struct perf_counter *counter, *group_leader;
-       struct perf_counter_hw_event hw_event;
+       struct perf_counter_attr attr;
        struct perf_counter_context *ctx;
        struct file *counter_file = NULL;
        struct file *group_file = NULL;
@@ -3147,7 +3606,7 @@ SYSCALL_DEFINE5(perf_counter_open,
        if (flags)
                return -EINVAL;
 
-       if (copy_from_user(&hw_event, hw_event_uptr, sizeof(hw_event)) != 0)
+       if (copy_from_user(&attr, attr_uptr, sizeof(attr)) != 0)
                return -EFAULT;
 
        /*
@@ -3185,11 +3644,11 @@ SYSCALL_DEFINE5(perf_counter_open,
                /*
                 * Only a group leader can be exclusive or pinned
                 */
-               if (hw_event.exclusive || hw_event.pinned)
+               if (attr.exclusive || attr.pinned)
                        goto err_put_context;
        }
 
-       counter = perf_counter_alloc(&hw_event, cpu, ctx, group_leader,
+       counter = perf_counter_alloc(&attr, cpu, ctx, group_leader,
                                     GFP_KERNEL);
        ret = PTR_ERR(counter);
        if (IS_ERR(counter))
@@ -3204,10 +3663,18 @@ SYSCALL_DEFINE5(perf_counter_open,
                goto err_free_put_context;
 
        counter->filp = counter_file;
+       WARN_ON_ONCE(ctx->parent_ctx);
        mutex_lock(&ctx->mutex);
        perf_install_in_context(ctx, counter, cpu);
+       ++ctx->generation;
        mutex_unlock(&ctx->mutex);
 
+       counter->owner = current;
+       get_task_struct(current);
+       mutex_lock(&current->perf_counter_mutex);
+       list_add_tail(&counter->owner_entry, &current->perf_counter_list);
+       mutex_unlock(&current->perf_counter_mutex);
+
        fput_light(counter_file, fput_needed2);
 
 out_fput:
@@ -3219,7 +3686,7 @@ err_free_put_context:
        kfree(counter);
 
 err_put_context:
-       put_context(ctx);
+       put_ctx(ctx);
 
        goto out_fput;
 }
@@ -3246,11 +3713,25 @@ inherit_counter(struct perf_counter *parent_counter,
        if (parent_counter->parent)
                parent_counter = parent_counter->parent;
 
-       child_counter = perf_counter_alloc(&parent_counter->hw_event,
+       child_counter = perf_counter_alloc(&parent_counter->attr,
                                           parent_counter->cpu, child_ctx,
                                           group_leader, GFP_KERNEL);
        if (IS_ERR(child_counter))
                return child_counter;
+       get_ctx(child_ctx);
+
+       /*
+        * Make the child state follow the state of the parent counter,
+        * not its attr.disabled bit.  We hold the parent's mutex,
+        * so we won't race with perf_counter_{en, dis}able_family.
+        */
+       if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE)
+               child_counter->state = PERF_COUNTER_STATE_INACTIVE;
+       else
+               child_counter->state = PERF_COUNTER_STATE_OFF;
+
+       if (parent_counter->attr.freq)
+               child_counter->hw.sample_period = parent_counter->hw.sample_period;
 
        /*
         * Link it up in the child's context:
@@ -3261,7 +3742,7 @@ inherit_counter(struct perf_counter *parent_counter,
        /*
         * inherit into child's child as well:
         */
-       child_counter->hw_event.inherit = 1;
+       child_counter->attr.inherit = 1;
 
        /*
         * Get a reference to the parent filp - we will fput it
@@ -3274,20 +3755,10 @@ inherit_counter(struct perf_counter *parent_counter,
        /*
         * Link this into the parent counter's child list
         */
-       mutex_lock(&parent_counter->mutex);
+       WARN_ON_ONCE(parent_counter->ctx->parent_ctx);
+       mutex_lock(&parent_counter->child_mutex);
        list_add_tail(&child_counter->child_list, &parent_counter->child_list);
-
-       /*
-        * Make the child state follow the state of the parent counter,
-        * not its hw_event.disabled bit.  We hold the parent's mutex,
-        * so we won't race with perf_counter_{en,dis}able_family.
-        */
-       if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE)
-               child_counter->state = PERF_COUNTER_STATE_INACTIVE;
-       else
-               child_counter->state = PERF_COUNTER_STATE_OFF;
-
-       mutex_unlock(&parent_counter->mutex);
+       mutex_unlock(&parent_counter->child_mutex);
 
        return child_counter;
 }
@@ -3334,9 +3805,10 @@ static void sync_child_counter(struct perf_counter *child_counter,
        /*
         * Remove this counter from the parent's list
         */
-       mutex_lock(&parent_counter->mutex);
+       WARN_ON_ONCE(parent_counter->ctx->parent_ctx);
+       mutex_lock(&parent_counter->child_mutex);
        list_del_init(&child_counter->child_list);
-       mutex_unlock(&parent_counter->mutex);
+       mutex_unlock(&parent_counter->child_mutex);
 
        /*
         * Release the parent counter, if this was the last
@@ -3346,22 +3818,13 @@ static void sync_child_counter(struct perf_counter *child_counter,
 }
 
 static void
-__perf_counter_exit_task(struct task_struct *child,
-                        struct perf_counter *child_counter,
+__perf_counter_exit_task(struct perf_counter *child_counter,
                         struct perf_counter_context *child_ctx)
 {
        struct perf_counter *parent_counter;
 
-       /*
-        * Protect against concurrent operations on child_counter
-        * due its fd getting closed, etc.
-        */
-       mutex_lock(&child_counter->mutex);
-
        update_counter_times(child_counter);
-       list_del_counter(child_counter, child_ctx);
-
-       mutex_unlock(&child_counter->mutex);
+       perf_counter_remove_from_context(child_counter);
 
        parent_counter = child_counter->parent;
        /*
@@ -3377,11 +3840,6 @@ __perf_counter_exit_task(struct task_struct *child,
 
 /*
  * When a child task exits, feed back counter values to parent counters.
- *
- * Note: we may be running in child context, but the PID is not hashed
- * anymore so new counters will not be added.
- * (XXX not sure that is true when we get called from flush_old_exec.
- *  -- paulus)
  */
 void perf_counter_exit_task(struct task_struct *child)
 {
@@ -3389,16 +3847,36 @@ void perf_counter_exit_task(struct task_struct *child)
        struct perf_counter_context *child_ctx;
        unsigned long flags;
 
-       WARN_ON_ONCE(child != current);
-
-       child_ctx = child->perf_counter_ctxp;
-
-       if (likely(!child_ctx))
+       if (likely(!child->perf_counter_ctxp))
                return;
 
        local_irq_save(flags);
+       /*
+        * We can't reschedule here because interrupts are disabled,
+        * and either child is current or it is a task that can't be
+        * scheduled, so we are now safe from rescheduling changing
+        * our context.
+        */
+       child_ctx = child->perf_counter_ctxp;
        __perf_counter_task_sched_out(child_ctx);
+
+       /*
+        * Take the context lock here so that if find_get_context is
+        * reading child->perf_counter_ctxp, we wait until it has
+        * incremented the context's refcount before we do put_ctx below.
+        */
+       spin_lock(&child_ctx->lock);
        child->perf_counter_ctxp = NULL;
+       if (child_ctx->parent_ctx) {
+               /*
+                * This context is a clone; unclone it so it can't get
+                * swapped to another process while we're removing all
+                * the counters from it.
+                */
+               put_ctx(child_ctx->parent_ctx);
+               child_ctx->parent_ctx = NULL;
+       }
+       spin_unlock(&child_ctx->lock);
        local_irq_restore(flags);
 
        mutex_lock(&child_ctx->mutex);
@@ -3406,7 +3884,7 @@ void perf_counter_exit_task(struct task_struct *child)
 again:
        list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list,
                                 list_entry)
-               __perf_counter_exit_task(child, child_counter, child_ctx);
+               __perf_counter_exit_task(child_counter, child_ctx);
 
        /*
         * If the last counter was a group counter, it will have appended all
@@ -3421,17 +3899,64 @@ again:
        put_ctx(child_ctx);
 }
 
+/*
+ * free an unexposed, unused context as created by inheritance by
+ * init_task below, used by fork() in case of fail.
+ */
+void perf_counter_free_task(struct task_struct *task)
+{
+       struct perf_counter_context *ctx = task->perf_counter_ctxp;
+       struct perf_counter *counter, *tmp;
+
+       if (!ctx)
+               return;
+
+       mutex_lock(&ctx->mutex);
+again:
+       list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry) {
+               struct perf_counter *parent = counter->parent;
+
+               if (WARN_ON_ONCE(!parent))
+                       continue;
+
+               mutex_lock(&parent->child_mutex);
+               list_del_init(&counter->child_list);
+               mutex_unlock(&parent->child_mutex);
+
+               fput(parent->filp);
+
+               list_del_counter(counter, ctx);
+               free_counter(counter);
+       }
+
+       if (!list_empty(&ctx->counter_list))
+               goto again;
+
+       mutex_unlock(&ctx->mutex);
+
+       put_ctx(ctx);
+}
+
 /*
  * Initialize the perf_counter context in task_struct
  */
-void perf_counter_init_task(struct task_struct *child)
+int perf_counter_init_task(struct task_struct *child)
 {
        struct perf_counter_context *child_ctx, *parent_ctx;
+       struct perf_counter_context *cloned_ctx;
        struct perf_counter *counter;
        struct task_struct *parent = current;
+       int inherited_all = 1;
+       int ret = 0;
 
        child->perf_counter_ctxp = NULL;
 
+       mutex_init(&child->perf_counter_mutex);
+       INIT_LIST_HEAD(&child->perf_counter_list);
+
+       if (likely(!parent->perf_counter_ctxp))
+               return 0;
+
        /*
         * This is executed from the parent task context, so inherit
         * counters that have been marked for cloning.
@@ -3440,14 +3965,24 @@ void perf_counter_init_task(struct task_struct *child)
 
        child_ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL);
        if (!child_ctx)
-               return;
-
-       parent_ctx = parent->perf_counter_ctxp;
-       if (likely(!parent_ctx || !parent_ctx->nr_counters))
-               return;
+               return -ENOMEM;
 
        __perf_counter_init_context(child_ctx, child);
        child->perf_counter_ctxp = child_ctx;
+       get_task_struct(child);
+
+       /*
+        * If the parent's context is a clone, pin it so it won't get
+        * swapped under us.
+        */
+       parent_ctx = perf_pin_task_context(parent);
+
+       /*
+        * No need to check if parent_ctx != NULL here; since we saw
+        * it non-NULL earlier, the only reason for it to become NULL
+        * is if we exit, and since we're currently in the middle of
+        * a fork we can't be exiting at the same time.
+        */
 
        /*
         * Lock the parent list. No need to lock the child - not PID
@@ -3463,15 +3998,44 @@ void perf_counter_init_task(struct task_struct *child)
                if (counter != counter->group_leader)
                        continue;
 
-               if (!counter->hw_event.inherit)
+               if (!counter->attr.inherit) {
+                       inherited_all = 0;
                        continue;
+               }
 
-               if (inherit_group(counter, parent,
-                                 parent_ctx, child, child_ctx))
+               ret = inherit_group(counter, parent, parent_ctx,
+                                            child, child_ctx);
+               if (ret) {
+                       inherited_all = 0;
                        break;
+               }
+       }
+
+       if (inherited_all) {
+               /*
+                * Mark the child context as a clone of the parent
+                * context, or of whatever the parent is a clone of.
+                * Note that if the parent is a clone, it could get
+                * uncloned at any point, but that doesn't matter
+                * because the list of counters and the generation
+                * count can't have changed since we took the mutex.
+                */
+               cloned_ctx = rcu_dereference(parent_ctx->parent_ctx);
+               if (cloned_ctx) {
+                       child_ctx->parent_ctx = cloned_ctx;
+                       child_ctx->parent_gen = parent_ctx->parent_gen;
+               } else {
+                       child_ctx->parent_ctx = parent_ctx;
+                       child_ctx->parent_gen = parent_ctx->generation;
+               }
+               get_ctx(child_ctx->parent_ctx);
        }
 
        mutex_unlock(&parent_ctx->mutex);
+
+       perf_unpin_context(parent_ctx);
+
+       return ret;
 }
 
 static void __cpuinit perf_counter_init_cpu(int cpu)
@@ -3535,8 +4099,12 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
        return NOTIFY_OK;
 }
 
+/*
+ * This has to have a higher priority than migration_notifier in sched.c.
+ */
 static struct notifier_block __cpuinitdata perf_cpu_nb = {
        .notifier_call          = perf_cpu_notify,
+       .priority               = 20,
 };
 
 void __init perf_counter_init(void)
This page took 0.081013 seconds and 5 git commands to generate.