Merge branch 'perf/urgent' into perf/core, to resolve conflict
[deliverable/linux.git] / kernel / events / core.c
index 52bedc5a5aaa190be3545d7534ce065ce735e473..9de459a4dac73cf2c37dc5f8e19bb12be1a91a5e 100644 (file)
@@ -412,7 +412,8 @@ int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
        if (ret || !write)
                return ret;
 
-       if (sysctl_perf_cpu_time_max_percent == 100) {
+       if (sysctl_perf_cpu_time_max_percent == 100 ||
+           sysctl_perf_cpu_time_max_percent == 0) {
                printk(KERN_WARNING
                       "perf: Dynamic interrupt throttling disabled, can hang your system!\n");
                WRITE_ONCE(perf_sample_allowed_ns, 0);
@@ -1105,6 +1106,7 @@ static void put_ctx(struct perf_event_context *ctx)
  * function.
  *
  * Lock order:
+ *    cred_guard_mutex
  *     task_struct::perf_event_mutex
  *       perf_event_context::mutex
  *         perf_event::child_mutex;
@@ -1925,8 +1927,13 @@ event_sched_in(struct perf_event *event,
        if (event->state <= PERF_EVENT_STATE_OFF)
                return 0;
 
-       event->state = PERF_EVENT_STATE_ACTIVE;
-       event->oncpu = smp_processor_id();
+       WRITE_ONCE(event->oncpu, smp_processor_id());
+       /*
+        * Order event::oncpu write to happen before the ACTIVE state
+        * is visible.
+        */
+       smp_wmb();
+       WRITE_ONCE(event->state, PERF_EVENT_STATE_ACTIVE);
 
        /*
         * Unthrottle events, since we scheduled we might have missed several
@@ -2358,6 +2365,29 @@ void perf_event_enable(struct perf_event *event)
 }
 EXPORT_SYMBOL_GPL(perf_event_enable);
 
+static int __perf_event_stop(void *info)
+{
+       struct perf_event *event = info;
+
+       /* for AUX events, our job is done if the event is already inactive */
+       if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE)
+               return 0;
+
+       /* matches smp_wmb() in event_sched_in() */
+       smp_rmb();
+
+       /*
+        * There is a window with interrupts enabled before we get here,
+        * so we need to check again lest we try to stop another CPU's event.
+        */
+       if (READ_ONCE(event->oncpu) != smp_processor_id())
+               return -EAGAIN;
+
+       event->pmu->stop(event, PERF_EF_UPDATE);
+
+       return 0;
+}
+
 static int _perf_event_refresh(struct perf_event *event, int refresh)
 {
        /*
@@ -3420,7 +3450,6 @@ static struct task_struct *
 find_lively_task_by_vpid(pid_t vpid)
 {
        struct task_struct *task;
-       int err;
 
        rcu_read_lock();
        if (!vpid)
@@ -3434,16 +3463,7 @@ find_lively_task_by_vpid(pid_t vpid)
        if (!task)
                return ERR_PTR(-ESRCH);
 
-       /* Reuse ptrace permission checks for now. */
-       err = -EACCES;
-       if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
-               goto errout;
-
        return task;
-errout:
-       put_task_struct(task);
-       return ERR_PTR(err);
-
 }
 
 /*
@@ -4351,6 +4371,19 @@ static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned lon
        case PERF_EVENT_IOC_SET_BPF:
                return perf_event_set_bpf_prog(event, arg);
 
+       case PERF_EVENT_IOC_PAUSE_OUTPUT: {
+               struct ring_buffer *rb;
+
+               rcu_read_lock();
+               rb = rcu_dereference(event->rb);
+               if (!rb || !rb->nr_pages) {
+                       rcu_read_unlock();
+                       return -EINVAL;
+               }
+               rb_toggle_paused(rb, !!arg);
+               rcu_read_unlock();
+               return 0;
+       }
        default:
                return -ENOTTY;
        }
@@ -4667,6 +4700,8 @@ static void perf_mmap_open(struct vm_area_struct *vma)
                event->pmu->event_mapped(event);
 }
 
+static void perf_pmu_output_stop(struct perf_event *event);
+
 /*
  * A buffer can be mmap()ed multiple times; either directly through the same
  * event, or through other events by use of perf_event_set_output().
@@ -4694,10 +4729,22 @@ static void perf_mmap_close(struct vm_area_struct *vma)
         */
        if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff &&
            atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) {
+               /*
+                * Stop all AUX events that are writing to this buffer,
+                * so that we can free its AUX pages and corresponding PMU
+                * data. Note that after rb::aux_mmap_count dropped to zero,
+                * they won't start any more (see perf_aux_output_begin()).
+                */
+               perf_pmu_output_stop(event);
+
+               /* now it's safe to free the pages */
                atomic_long_sub(rb->aux_nr_pages, &mmap_user->locked_vm);
                vma->vm_mm->pinned_vm -= rb->aux_mmap_locked;
 
+               /* this has to be the last one */
                rb_free_aux(rb);
+               WARN_ON_ONCE(atomic_read(&rb->aux_refcount));
+
                mutex_unlock(&event->mmap_mutex);
        }
 
@@ -5638,9 +5685,13 @@ void perf_prepare_sample(struct perf_event_header *header,
        }
 }
 
-void perf_event_output(struct perf_event *event,
-                       struct perf_sample_data *data,
-                       struct pt_regs *regs)
+static void __always_inline
+__perf_event_output(struct perf_event *event,
+                   struct perf_sample_data *data,
+                   struct pt_regs *regs,
+                   int (*output_begin)(struct perf_output_handle *,
+                                       struct perf_event *,
+                                       unsigned int))
 {
        struct perf_output_handle handle;
        struct perf_event_header header;
@@ -5650,7 +5701,7 @@ void perf_event_output(struct perf_event *event,
 
        perf_prepare_sample(&header, data, event, regs);
 
-       if (perf_output_begin(&handle, event, header.size))
+       if (output_begin(&handle, event, header.size))
                goto exit;
 
        perf_output_sample(&handle, &header, data, event);
@@ -5661,6 +5712,30 @@ exit:
        rcu_read_unlock();
 }
 
+void
+perf_event_output_forward(struct perf_event *event,
+                        struct perf_sample_data *data,
+                        struct pt_regs *regs)
+{
+       __perf_event_output(event, data, regs, perf_output_begin_forward);
+}
+
+void
+perf_event_output_backward(struct perf_event *event,
+                          struct perf_sample_data *data,
+                          struct pt_regs *regs)
+{
+       __perf_event_output(event, data, regs, perf_output_begin_backward);
+}
+
+void
+perf_event_output(struct perf_event *event,
+                 struct perf_sample_data *data,
+                 struct pt_regs *regs)
+{
+       __perf_event_output(event, data, regs, perf_output_begin);
+}
+
 /*
  * read event_id
  */
@@ -5768,6 +5843,80 @@ next:
        rcu_read_unlock();
 }
 
+struct remote_output {
+       struct ring_buffer      *rb;
+       int                     err;
+};
+
+static void __perf_event_output_stop(struct perf_event *event, void *data)
+{
+       struct perf_event *parent = event->parent;
+       struct remote_output *ro = data;
+       struct ring_buffer *rb = ro->rb;
+
+       if (!has_aux(event))
+               return;
+
+       if (!parent)
+               parent = event;
+
+       /*
+        * In case of inheritance, it will be the parent that links to the
+        * ring-buffer, but it will be the child that's actually using it:
+        */
+       if (rcu_dereference(parent->rb) == rb)
+               ro->err = __perf_event_stop(event);
+}
+
+static int __perf_pmu_output_stop(void *info)
+{
+       struct perf_event *event = info;
+       struct pmu *pmu = event->pmu;
+       struct perf_cpu_context *cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
+       struct remote_output ro = {
+               .rb     = event->rb,
+       };
+
+       rcu_read_lock();
+       perf_event_aux_ctx(&cpuctx->ctx, __perf_event_output_stop, &ro);
+       if (cpuctx->task_ctx)
+               perf_event_aux_ctx(cpuctx->task_ctx, __perf_event_output_stop,
+                                  &ro);
+       rcu_read_unlock();
+
+       return ro.err;
+}
+
+static void perf_pmu_output_stop(struct perf_event *event)
+{
+       struct perf_event *iter;
+       int err, cpu;
+
+restart:
+       rcu_read_lock();
+       list_for_each_entry_rcu(iter, &event->rb->event_list, rb_entry) {
+               /*
+                * For per-CPU events, we need to make sure that neither they
+                * nor their children are running; for cpu==-1 events it's
+                * sufficient to stop the event itself if it's active, since
+                * it can't have children.
+                */
+               cpu = iter->cpu;
+               if (cpu == -1)
+                       cpu = READ_ONCE(iter->oncpu);
+
+               if (cpu == -1)
+                       continue;
+
+               err = cpu_function_call(cpu, __perf_pmu_output_stop, event);
+               if (err == -EAGAIN) {
+                       rcu_read_unlock();
+                       goto restart;
+               }
+       }
+       rcu_read_unlock();
+}
+
 /*
  * task tracking -- fork/exit
  *
@@ -6499,10 +6648,7 @@ static int __perf_event_overflow(struct perf_event *event,
                irq_work_queue(&event->pending);
        }
 
-       if (event->overflow_handler)
-               event->overflow_handler(event, data, regs);
-       else
-               perf_event_output(event, data, regs);
+       event->overflow_handler(event, data, regs);
 
        if (*perf_event_fasync(event) && event->pending_kill) {
                event->pending_wakeup = 1;
@@ -7693,6 +7839,15 @@ int perf_pmu_register(struct pmu *pmu, const char *name, int type)
        }
 
 skip_type:
+       if (pmu->task_ctx_nr == perf_hw_context) {
+               static int hw_context_taken = 0;
+
+               if (WARN_ON_ONCE(hw_context_taken))
+                       pmu->task_ctx_nr = perf_invalid_context;
+
+               hw_context_taken = 1;
+       }
+
        pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
        if (pmu->pmu_cpu_context)
                goto got_cpu_context;
@@ -8014,8 +8169,16 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
                context = parent_event->overflow_handler_context;
        }
 
-       event->overflow_handler = overflow_handler;
-       event->overflow_handler_context = context;
+       if (overflow_handler) {
+               event->overflow_handler = overflow_handler;
+               event->overflow_handler_context = context;
+       } else if (is_write_backward(event)){
+               event->overflow_handler = perf_event_output_backward;
+               event->overflow_handler_context = NULL;
+       } else {
+               event->overflow_handler = perf_event_output_forward;
+               event->overflow_handler_context = NULL;
+       }
 
        perf_event__state_init(event);
 
@@ -8247,6 +8410,13 @@ perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
        if (output_event->clock != event->clock)
                goto out;
 
+       /*
+        * Either writing ring buffer from beginning or from end.
+        * Mixing is not allowed.
+        */
+       if (is_write_backward(output_event) != is_write_backward(event))
+               goto out;
+
        /*
         * If both events generate aux data, they must be on the same PMU
         */
@@ -8413,6 +8583,24 @@ SYSCALL_DEFINE5(perf_event_open,
 
        get_online_cpus();
 
+       if (task) {
+               err = mutex_lock_interruptible(&task->signal->cred_guard_mutex);
+               if (err)
+                       goto err_cpus;
+
+               /*
+                * Reuse ptrace permission checks for now.
+                *
+                * We must hold cred_guard_mutex across this and any potential
+                * perf_install_in_context() call for this new event to
+                * serialize against exec() altering our credentials (and the
+                * perf_event_exit_task() that could imply).
+                */
+               err = -EACCES;
+               if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
+                       goto err_cred;
+       }
+
        if (flags & PERF_FLAG_PID_CGROUP)
                cgroup_fd = pid;
 
@@ -8420,7 +8608,7 @@ SYSCALL_DEFINE5(perf_event_open,
                                 NULL, NULL, cgroup_fd);
        if (IS_ERR(event)) {
                err = PTR_ERR(event);
-               goto err_cpus;
+               goto err_cred;
        }
 
        if (is_sampling_event(event)) {
@@ -8479,11 +8667,6 @@ SYSCALL_DEFINE5(perf_event_open,
                goto err_context;
        }
 
-       if (task) {
-               put_task_struct(task);
-               task = NULL;
-       }
-
        /*
         * Look up the group leader (we will attach this event to it):
         */
@@ -8581,6 +8764,11 @@ SYSCALL_DEFINE5(perf_event_open,
 
        WARN_ON_ONCE(ctx->parent_ctx);
 
+       /*
+        * This is the point on no return; we cannot fail hereafter. This is
+        * where we start modifying current state.
+        */
+
        if (move_group) {
                /*
                 * See perf_event_ctx_lock() for comments on the details
@@ -8652,6 +8840,11 @@ SYSCALL_DEFINE5(perf_event_open,
                mutex_unlock(&gctx->mutex);
        mutex_unlock(&ctx->mutex);
 
+       if (task) {
+               mutex_unlock(&task->signal->cred_guard_mutex);
+               put_task_struct(task);
+       }
+
        put_online_cpus();
 
        mutex_lock(&current->perf_event_mutex);
@@ -8684,6 +8877,9 @@ err_alloc:
         */
        if (!event_file)
                free_event(event);
+err_cred:
+       if (task)
+               mutex_unlock(&task->signal->cred_guard_mutex);
 err_cpus:
        put_online_cpus();
 err_task:
@@ -8968,6 +9164,9 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
 
 /*
  * When a child task exits, feed back event values to parent events.
+ *
+ * Can be called with cred_guard_mutex held when called from
+ * install_exec_creds().
  */
 void perf_event_exit_task(struct task_struct *child)
 {
This page took 0.048649 seconds and 5 git commands to generate.