Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64...
[deliverable/linux.git] / kernel / events / core.c
CommitLineData
0793a61d 1/*
57c0c15b 2 * Performance events core code:
0793a61d 3 *
98144511 4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
e7e7ee2e 5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
90eec103 6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
d36b6910 7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
7b732a75 8 *
57c0c15b 9 * For licensing details see kernel-base/COPYING
0793a61d
TG
10 */
11
12#include <linux/fs.h>
b9cacc7b 13#include <linux/mm.h>
0793a61d
TG
14#include <linux/cpu.h>
15#include <linux/smp.h>
2e80a82a 16#include <linux/idr.h>
04289bb9 17#include <linux/file.h>
0793a61d 18#include <linux/poll.h>
5a0e3ad6 19#include <linux/slab.h>
76e1d904 20#include <linux/hash.h>
12351ef8 21#include <linux/tick.h>
0793a61d 22#include <linux/sysfs.h>
22a4f650 23#include <linux/dcache.h>
0793a61d 24#include <linux/percpu.h>
22a4f650 25#include <linux/ptrace.h>
c277443c 26#include <linux/reboot.h>
b9cacc7b 27#include <linux/vmstat.h>
abe43400 28#include <linux/device.h>
6e5fdeed 29#include <linux/export.h>
906010b2 30#include <linux/vmalloc.h>
b9cacc7b
PZ
31#include <linux/hardirq.h>
32#include <linux/rculist.h>
0793a61d
TG
33#include <linux/uaccess.h>
34#include <linux/syscalls.h>
35#include <linux/anon_inodes.h>
aa9c4c0f 36#include <linux/kernel_stat.h>
39bed6cb 37#include <linux/cgroup.h>
cdd6c482 38#include <linux/perf_event.h>
af658dca 39#include <linux/trace_events.h>
3c502e7a 40#include <linux/hw_breakpoint.h>
c5ebcedb 41#include <linux/mm_types.h>
c464c76e 42#include <linux/module.h>
f972eb63 43#include <linux/mman.h>
b3f20785 44#include <linux/compat.h>
2541517c
AS
45#include <linux/bpf.h>
46#include <linux/filter.h>
375637bc
AS
47#include <linux/namei.h>
48#include <linux/parser.h>
0793a61d 49
76369139
FW
50#include "internal.h"
51
4e193bd4
TB
52#include <asm/irq_regs.h>
53
272325c4
PZ
54typedef int (*remote_function_f)(void *);
55
fe4b04fa 56struct remote_function_call {
e7e7ee2e 57 struct task_struct *p;
272325c4 58 remote_function_f func;
e7e7ee2e
IM
59 void *info;
60 int ret;
fe4b04fa
PZ
61};
62
63static void remote_function(void *data)
64{
65 struct remote_function_call *tfc = data;
66 struct task_struct *p = tfc->p;
67
68 if (p) {
0da4cf3e
PZ
69 /* -EAGAIN */
70 if (task_cpu(p) != smp_processor_id())
71 return;
72
73 /*
74 * Now that we're on right CPU with IRQs disabled, we can test
75 * if we hit the right task without races.
76 */
77
78 tfc->ret = -ESRCH; /* No such (running) process */
79 if (p != current)
fe4b04fa
PZ
80 return;
81 }
82
83 tfc->ret = tfc->func(tfc->info);
84}
85
86/**
87 * task_function_call - call a function on the cpu on which a task runs
88 * @p: the task to evaluate
89 * @func: the function to be called
90 * @info: the function call argument
91 *
92 * Calls the function @func when the task is currently running. This might
93 * be on the current CPU, which just calls the function directly
94 *
95 * returns: @func return value, or
96 * -ESRCH - when the process isn't running
97 * -EAGAIN - when the process moved away
98 */
99static int
272325c4 100task_function_call(struct task_struct *p, remote_function_f func, void *info)
fe4b04fa
PZ
101{
102 struct remote_function_call data = {
e7e7ee2e
IM
103 .p = p,
104 .func = func,
105 .info = info,
0da4cf3e 106 .ret = -EAGAIN,
fe4b04fa 107 };
0da4cf3e 108 int ret;
fe4b04fa 109
0da4cf3e
PZ
110 do {
111 ret = smp_call_function_single(task_cpu(p), remote_function, &data, 1);
112 if (!ret)
113 ret = data.ret;
114 } while (ret == -EAGAIN);
fe4b04fa 115
0da4cf3e 116 return ret;
fe4b04fa
PZ
117}
118
119/**
120 * cpu_function_call - call a function on the cpu
121 * @func: the function to be called
122 * @info: the function call argument
123 *
124 * Calls the function @func on the remote cpu.
125 *
126 * returns: @func return value or -ENXIO when the cpu is offline
127 */
272325c4 128static int cpu_function_call(int cpu, remote_function_f func, void *info)
fe4b04fa
PZ
129{
130 struct remote_function_call data = {
e7e7ee2e
IM
131 .p = NULL,
132 .func = func,
133 .info = info,
134 .ret = -ENXIO, /* No such CPU */
fe4b04fa
PZ
135 };
136
137 smp_call_function_single(cpu, remote_function, &data, 1);
138
139 return data.ret;
140}
141
fae3fde6
PZ
142static inline struct perf_cpu_context *
143__get_cpu_context(struct perf_event_context *ctx)
144{
145 return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
146}
147
148static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
149 struct perf_event_context *ctx)
0017960f 150{
fae3fde6
PZ
151 raw_spin_lock(&cpuctx->ctx.lock);
152 if (ctx)
153 raw_spin_lock(&ctx->lock);
154}
155
156static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
157 struct perf_event_context *ctx)
158{
159 if (ctx)
160 raw_spin_unlock(&ctx->lock);
161 raw_spin_unlock(&cpuctx->ctx.lock);
162}
163
63b6da39
PZ
164#define TASK_TOMBSTONE ((void *)-1L)
165
166static bool is_kernel_event(struct perf_event *event)
167{
f47c02c0 168 return READ_ONCE(event->owner) == TASK_TOMBSTONE;
63b6da39
PZ
169}
170
39a43640
PZ
171/*
172 * On task ctx scheduling...
173 *
174 * When !ctx->nr_events a task context will not be scheduled. This means
175 * we can disable the scheduler hooks (for performance) without leaving
176 * pending task ctx state.
177 *
178 * This however results in two special cases:
179 *
180 * - removing the last event from a task ctx; this is relatively straight
181 * forward and is done in __perf_remove_from_context.
182 *
183 * - adding the first event to a task ctx; this is tricky because we cannot
184 * rely on ctx->is_active and therefore cannot use event_function_call().
185 * See perf_install_in_context().
186 *
39a43640
PZ
187 * If ctx->nr_events, then ctx->is_active and cpuctx->task_ctx are set.
188 */
189
fae3fde6
PZ
190typedef void (*event_f)(struct perf_event *, struct perf_cpu_context *,
191 struct perf_event_context *, void *);
192
193struct event_function_struct {
194 struct perf_event *event;
195 event_f func;
196 void *data;
197};
198
199static int event_function(void *info)
200{
201 struct event_function_struct *efs = info;
202 struct perf_event *event = efs->event;
0017960f 203 struct perf_event_context *ctx = event->ctx;
fae3fde6
PZ
204 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
205 struct perf_event_context *task_ctx = cpuctx->task_ctx;
63b6da39 206 int ret = 0;
fae3fde6
PZ
207
208 WARN_ON_ONCE(!irqs_disabled());
209
63b6da39 210 perf_ctx_lock(cpuctx, task_ctx);
fae3fde6
PZ
211 /*
212 * Since we do the IPI call without holding ctx->lock things can have
213 * changed, double check we hit the task we set out to hit.
fae3fde6
PZ
214 */
215 if (ctx->task) {
63b6da39 216 if (ctx->task != current) {
0da4cf3e 217 ret = -ESRCH;
63b6da39
PZ
218 goto unlock;
219 }
fae3fde6 220
fae3fde6
PZ
221 /*
222 * We only use event_function_call() on established contexts,
223 * and event_function() is only ever called when active (or
224 * rather, we'll have bailed in task_function_call() or the
225 * above ctx->task != current test), therefore we must have
226 * ctx->is_active here.
227 */
228 WARN_ON_ONCE(!ctx->is_active);
229 /*
230 * And since we have ctx->is_active, cpuctx->task_ctx must
231 * match.
232 */
63b6da39
PZ
233 WARN_ON_ONCE(task_ctx != ctx);
234 } else {
235 WARN_ON_ONCE(&cpuctx->ctx != ctx);
fae3fde6 236 }
63b6da39 237
fae3fde6 238 efs->func(event, cpuctx, ctx, efs->data);
63b6da39 239unlock:
fae3fde6
PZ
240 perf_ctx_unlock(cpuctx, task_ctx);
241
63b6da39 242 return ret;
fae3fde6
PZ
243}
244
245static void event_function_local(struct perf_event *event, event_f func, void *data)
246{
247 struct event_function_struct efs = {
248 .event = event,
249 .func = func,
250 .data = data,
251 };
252
253 int ret = event_function(&efs);
254 WARN_ON_ONCE(ret);
255}
256
257static void event_function_call(struct perf_event *event, event_f func, void *data)
0017960f
PZ
258{
259 struct perf_event_context *ctx = event->ctx;
63b6da39 260 struct task_struct *task = READ_ONCE(ctx->task); /* verified in event_function */
fae3fde6
PZ
261 struct event_function_struct efs = {
262 .event = event,
263 .func = func,
264 .data = data,
265 };
0017960f 266
c97f4736
PZ
267 if (!event->parent) {
268 /*
269 * If this is a !child event, we must hold ctx::mutex to
270 * stabilize the the event->ctx relation. See
271 * perf_event_ctx_lock().
272 */
273 lockdep_assert_held(&ctx->mutex);
274 }
0017960f
PZ
275
276 if (!task) {
fae3fde6 277 cpu_function_call(event->cpu, event_function, &efs);
0017960f
PZ
278 return;
279 }
280
63b6da39
PZ
281 if (task == TASK_TOMBSTONE)
282 return;
283
a096309b 284again:
fae3fde6 285 if (!task_function_call(task, event_function, &efs))
0017960f
PZ
286 return;
287
288 raw_spin_lock_irq(&ctx->lock);
63b6da39
PZ
289 /*
290 * Reload the task pointer, it might have been changed by
291 * a concurrent perf_event_context_sched_out().
292 */
293 task = ctx->task;
a096309b
PZ
294 if (task == TASK_TOMBSTONE) {
295 raw_spin_unlock_irq(&ctx->lock);
296 return;
0017960f 297 }
a096309b
PZ
298 if (ctx->is_active) {
299 raw_spin_unlock_irq(&ctx->lock);
300 goto again;
301 }
302 func(event, NULL, ctx, data);
0017960f
PZ
303 raw_spin_unlock_irq(&ctx->lock);
304}
305
e5d1367f
SE
306#define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
307 PERF_FLAG_FD_OUTPUT |\
a21b0b35
YD
308 PERF_FLAG_PID_CGROUP |\
309 PERF_FLAG_FD_CLOEXEC)
e5d1367f 310
bce38cd5
SE
311/*
312 * branch priv levels that need permission checks
313 */
314#define PERF_SAMPLE_BRANCH_PERM_PLM \
315 (PERF_SAMPLE_BRANCH_KERNEL |\
316 PERF_SAMPLE_BRANCH_HV)
317
0b3fcf17
SE
318enum event_type_t {
319 EVENT_FLEXIBLE = 0x1,
320 EVENT_PINNED = 0x2,
3cbaa590 321 EVENT_TIME = 0x4,
0b3fcf17
SE
322 EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
323};
324
e5d1367f
SE
325/*
326 * perf_sched_events : >0 events exist
327 * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
328 */
9107c89e
PZ
329
330static void perf_sched_delayed(struct work_struct *work);
331DEFINE_STATIC_KEY_FALSE(perf_sched_events);
332static DECLARE_DELAYED_WORK(perf_sched_work, perf_sched_delayed);
333static DEFINE_MUTEX(perf_sched_mutex);
334static atomic_t perf_sched_count;
335
e5d1367f 336static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
ba532500 337static DEFINE_PER_CPU(int, perf_sched_cb_usages);
f2fb6bef 338static DEFINE_PER_CPU(struct pmu_event_list, pmu_sb_events);
e5d1367f 339
cdd6c482
IM
340static atomic_t nr_mmap_events __read_mostly;
341static atomic_t nr_comm_events __read_mostly;
342static atomic_t nr_task_events __read_mostly;
948b26b6 343static atomic_t nr_freq_events __read_mostly;
45ac1403 344static atomic_t nr_switch_events __read_mostly;
9ee318a7 345
108b02cf
PZ
346static LIST_HEAD(pmus);
347static DEFINE_MUTEX(pmus_lock);
348static struct srcu_struct pmus_srcu;
349
0764771d 350/*
cdd6c482 351 * perf event paranoia level:
0fbdea19
IM
352 * -1 - not paranoid at all
353 * 0 - disallow raw tracepoint access for unpriv
cdd6c482 354 * 1 - disallow cpu events for unpriv
0fbdea19 355 * 2 - disallow kernel profiling for unpriv
0764771d 356 */
0161028b 357int sysctl_perf_event_paranoid __read_mostly = 2;
0764771d 358
20443384
FW
359/* Minimum for 512 kiB + 1 user control page */
360int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
df58ab24
PZ
361
362/*
cdd6c482 363 * max perf event sample rate
df58ab24 364 */
14c63f17
DH
365#define DEFAULT_MAX_SAMPLE_RATE 100000
366#define DEFAULT_SAMPLE_PERIOD_NS (NSEC_PER_SEC / DEFAULT_MAX_SAMPLE_RATE)
367#define DEFAULT_CPU_TIME_MAX_PERCENT 25
368
369int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE;
370
371static int max_samples_per_tick __read_mostly = DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ);
372static int perf_sample_period_ns __read_mostly = DEFAULT_SAMPLE_PERIOD_NS;
373
d9494cb4
PZ
374static int perf_sample_allowed_ns __read_mostly =
375 DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100;
14c63f17 376
18ab2cd3 377static void update_perf_cpu_limits(void)
14c63f17
DH
378{
379 u64 tmp = perf_sample_period_ns;
380
381 tmp *= sysctl_perf_cpu_time_max_percent;
91a612ee
PZ
382 tmp = div_u64(tmp, 100);
383 if (!tmp)
384 tmp = 1;
385
386 WRITE_ONCE(perf_sample_allowed_ns, tmp);
14c63f17 387}
163ec435 388
9e630205
SE
389static int perf_rotate_context(struct perf_cpu_context *cpuctx);
390
163ec435
PZ
391int perf_proc_update_handler(struct ctl_table *table, int write,
392 void __user *buffer, size_t *lenp,
393 loff_t *ppos)
394{
723478c8 395 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
163ec435
PZ
396
397 if (ret || !write)
398 return ret;
399
ab7fdefb
KL
400 /*
401 * If throttling is disabled don't allow the write:
402 */
403 if (sysctl_perf_cpu_time_max_percent == 100 ||
404 sysctl_perf_cpu_time_max_percent == 0)
405 return -EINVAL;
406
163ec435 407 max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
14c63f17
DH
408 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
409 update_perf_cpu_limits();
410
411 return 0;
412}
413
414int sysctl_perf_cpu_time_max_percent __read_mostly = DEFAULT_CPU_TIME_MAX_PERCENT;
415
416int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
417 void __user *buffer, size_t *lenp,
418 loff_t *ppos)
419{
420 int ret = proc_dointvec(table, write, buffer, lenp, ppos);
421
422 if (ret || !write)
423 return ret;
424
b303e7c1
PZ
425 if (sysctl_perf_cpu_time_max_percent == 100 ||
426 sysctl_perf_cpu_time_max_percent == 0) {
91a612ee
PZ
427 printk(KERN_WARNING
428 "perf: Dynamic interrupt throttling disabled, can hang your system!\n");
429 WRITE_ONCE(perf_sample_allowed_ns, 0);
430 } else {
431 update_perf_cpu_limits();
432 }
163ec435
PZ
433
434 return 0;
435}
1ccd1549 436
14c63f17
DH
437/*
438 * perf samples are done in some very critical code paths (NMIs).
439 * If they take too much CPU time, the system can lock up and not
440 * get any real work done. This will drop the sample rate when
441 * we detect that events are taking too long.
442 */
443#define NR_ACCUMULATED_SAMPLES 128
d9494cb4 444static DEFINE_PER_CPU(u64, running_sample_length);
14c63f17 445
91a612ee
PZ
446static u64 __report_avg;
447static u64 __report_allowed;
448
6a02ad66 449static void perf_duration_warn(struct irq_work *w)
14c63f17 450{
6a02ad66 451 printk_ratelimited(KERN_WARNING
91a612ee
PZ
452 "perf: interrupt took too long (%lld > %lld), lowering "
453 "kernel.perf_event_max_sample_rate to %d\n",
454 __report_avg, __report_allowed,
455 sysctl_perf_event_sample_rate);
6a02ad66
PZ
456}
457
458static DEFINE_IRQ_WORK(perf_duration_work, perf_duration_warn);
459
460void perf_sample_event_took(u64 sample_len_ns)
461{
91a612ee
PZ
462 u64 max_len = READ_ONCE(perf_sample_allowed_ns);
463 u64 running_len;
464 u64 avg_len;
465 u32 max;
14c63f17 466
91a612ee 467 if (max_len == 0)
14c63f17
DH
468 return;
469
91a612ee
PZ
470 /* Decay the counter by 1 average sample. */
471 running_len = __this_cpu_read(running_sample_length);
472 running_len -= running_len/NR_ACCUMULATED_SAMPLES;
473 running_len += sample_len_ns;
474 __this_cpu_write(running_sample_length, running_len);
14c63f17
DH
475
476 /*
91a612ee
PZ
477 * Note: this will be biased artifically low until we have
478 * seen NR_ACCUMULATED_SAMPLES. Doing it this way keeps us
14c63f17
DH
479 * from having to maintain a count.
480 */
91a612ee
PZ
481 avg_len = running_len/NR_ACCUMULATED_SAMPLES;
482 if (avg_len <= max_len)
14c63f17
DH
483 return;
484
91a612ee
PZ
485 __report_avg = avg_len;
486 __report_allowed = max_len;
14c63f17 487
91a612ee
PZ
488 /*
489 * Compute a throttle threshold 25% below the current duration.
490 */
491 avg_len += avg_len / 4;
492 max = (TICK_NSEC / 100) * sysctl_perf_cpu_time_max_percent;
493 if (avg_len < max)
494 max /= (u32)avg_len;
495 else
496 max = 1;
14c63f17 497
91a612ee
PZ
498 WRITE_ONCE(perf_sample_allowed_ns, avg_len);
499 WRITE_ONCE(max_samples_per_tick, max);
500
501 sysctl_perf_event_sample_rate = max * HZ;
502 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
6a02ad66 503
cd578abb 504 if (!irq_work_queue(&perf_duration_work)) {
91a612ee 505 early_printk("perf: interrupt took too long (%lld > %lld), lowering "
cd578abb 506 "kernel.perf_event_max_sample_rate to %d\n",
91a612ee 507 __report_avg, __report_allowed,
cd578abb
PZ
508 sysctl_perf_event_sample_rate);
509 }
14c63f17
DH
510}
511
cdd6c482 512static atomic64_t perf_event_id;
a96bbc16 513
0b3fcf17
SE
514static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
515 enum event_type_t event_type);
516
517static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
e5d1367f
SE
518 enum event_type_t event_type,
519 struct task_struct *task);
520
521static void update_context_time(struct perf_event_context *ctx);
522static u64 perf_event_time(struct perf_event *event);
0b3fcf17 523
cdd6c482 524void __weak perf_event_print_debug(void) { }
0793a61d 525
84c79910 526extern __weak const char *perf_pmu_name(void)
0793a61d 527{
84c79910 528 return "pmu";
0793a61d
TG
529}
530
0b3fcf17
SE
531static inline u64 perf_clock(void)
532{
533 return local_clock();
534}
535
34f43927
PZ
536static inline u64 perf_event_clock(struct perf_event *event)
537{
538 return event->clock();
539}
540
e5d1367f
SE
541#ifdef CONFIG_CGROUP_PERF
542
e5d1367f
SE
543static inline bool
544perf_cgroup_match(struct perf_event *event)
545{
546 struct perf_event_context *ctx = event->ctx;
547 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
548
ef824fa1
TH
549 /* @event doesn't care about cgroup */
550 if (!event->cgrp)
551 return true;
552
553 /* wants specific cgroup scope but @cpuctx isn't associated with any */
554 if (!cpuctx->cgrp)
555 return false;
556
557 /*
558 * Cgroup scoping is recursive. An event enabled for a cgroup is
559 * also enabled for all its descendant cgroups. If @cpuctx's
560 * cgroup is a descendant of @event's (the test covers identity
561 * case), it's a match.
562 */
563 return cgroup_is_descendant(cpuctx->cgrp->css.cgroup,
564 event->cgrp->css.cgroup);
e5d1367f
SE
565}
566
e5d1367f
SE
567static inline void perf_detach_cgroup(struct perf_event *event)
568{
4e2ba650 569 css_put(&event->cgrp->css);
e5d1367f
SE
570 event->cgrp = NULL;
571}
572
573static inline int is_cgroup_event(struct perf_event *event)
574{
575 return event->cgrp != NULL;
576}
577
578static inline u64 perf_cgroup_event_time(struct perf_event *event)
579{
580 struct perf_cgroup_info *t;
581
582 t = per_cpu_ptr(event->cgrp->info, event->cpu);
583 return t->time;
584}
585
586static inline void __update_cgrp_time(struct perf_cgroup *cgrp)
587{
588 struct perf_cgroup_info *info;
589 u64 now;
590
591 now = perf_clock();
592
593 info = this_cpu_ptr(cgrp->info);
594
595 info->time += now - info->timestamp;
596 info->timestamp = now;
597}
598
599static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
600{
601 struct perf_cgroup *cgrp_out = cpuctx->cgrp;
602 if (cgrp_out)
603 __update_cgrp_time(cgrp_out);
604}
605
606static inline void update_cgrp_time_from_event(struct perf_event *event)
607{
3f7cce3c
SE
608 struct perf_cgroup *cgrp;
609
e5d1367f 610 /*
3f7cce3c
SE
611 * ensure we access cgroup data only when needed and
612 * when we know the cgroup is pinned (css_get)
e5d1367f 613 */
3f7cce3c 614 if (!is_cgroup_event(event))
e5d1367f
SE
615 return;
616
614e4c4e 617 cgrp = perf_cgroup_from_task(current, event->ctx);
3f7cce3c
SE
618 /*
619 * Do not update time when cgroup is not active
620 */
621 if (cgrp == event->cgrp)
622 __update_cgrp_time(event->cgrp);
e5d1367f
SE
623}
624
625static inline void
3f7cce3c
SE
626perf_cgroup_set_timestamp(struct task_struct *task,
627 struct perf_event_context *ctx)
e5d1367f
SE
628{
629 struct perf_cgroup *cgrp;
630 struct perf_cgroup_info *info;
631
3f7cce3c
SE
632 /*
633 * ctx->lock held by caller
634 * ensure we do not access cgroup data
635 * unless we have the cgroup pinned (css_get)
636 */
637 if (!task || !ctx->nr_cgroups)
e5d1367f
SE
638 return;
639
614e4c4e 640 cgrp = perf_cgroup_from_task(task, ctx);
e5d1367f 641 info = this_cpu_ptr(cgrp->info);
3f7cce3c 642 info->timestamp = ctx->timestamp;
e5d1367f
SE
643}
644
645#define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */
646#define PERF_CGROUP_SWIN 0x2 /* cgroup switch in events based on task */
647
648/*
649 * reschedule events based on the cgroup constraint of task.
650 *
651 * mode SWOUT : schedule out everything
652 * mode SWIN : schedule in based on cgroup for next
653 */
18ab2cd3 654static void perf_cgroup_switch(struct task_struct *task, int mode)
e5d1367f
SE
655{
656 struct perf_cpu_context *cpuctx;
657 struct pmu *pmu;
658 unsigned long flags;
659
660 /*
661 * disable interrupts to avoid geting nr_cgroup
662 * changes via __perf_event_disable(). Also
663 * avoids preemption.
664 */
665 local_irq_save(flags);
666
667 /*
668 * we reschedule only in the presence of cgroup
669 * constrained events.
670 */
e5d1367f
SE
671
672 list_for_each_entry_rcu(pmu, &pmus, entry) {
e5d1367f 673 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
95cf59ea
PZ
674 if (cpuctx->unique_pmu != pmu)
675 continue; /* ensure we process each cpuctx once */
e5d1367f 676
e5d1367f
SE
677 /*
678 * perf_cgroup_events says at least one
679 * context on this CPU has cgroup events.
680 *
681 * ctx->nr_cgroups reports the number of cgroup
682 * events for a context.
683 */
684 if (cpuctx->ctx.nr_cgroups > 0) {
facc4307
PZ
685 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
686 perf_pmu_disable(cpuctx->ctx.pmu);
e5d1367f
SE
687
688 if (mode & PERF_CGROUP_SWOUT) {
689 cpu_ctx_sched_out(cpuctx, EVENT_ALL);
690 /*
691 * must not be done before ctxswout due
692 * to event_filter_match() in event_sched_out()
693 */
694 cpuctx->cgrp = NULL;
695 }
696
697 if (mode & PERF_CGROUP_SWIN) {
e566b76e 698 WARN_ON_ONCE(cpuctx->cgrp);
95cf59ea
PZ
699 /*
700 * set cgrp before ctxsw in to allow
701 * event_filter_match() to not have to pass
702 * task around
614e4c4e
SE
703 * we pass the cpuctx->ctx to perf_cgroup_from_task()
704 * because cgorup events are only per-cpu
e5d1367f 705 */
614e4c4e 706 cpuctx->cgrp = perf_cgroup_from_task(task, &cpuctx->ctx);
e5d1367f
SE
707 cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
708 }
facc4307
PZ
709 perf_pmu_enable(cpuctx->ctx.pmu);
710 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
e5d1367f 711 }
e5d1367f
SE
712 }
713
e5d1367f
SE
714 local_irq_restore(flags);
715}
716
a8d757ef
SE
717static inline void perf_cgroup_sched_out(struct task_struct *task,
718 struct task_struct *next)
e5d1367f 719{
a8d757ef
SE
720 struct perf_cgroup *cgrp1;
721 struct perf_cgroup *cgrp2 = NULL;
722
ddaaf4e2 723 rcu_read_lock();
a8d757ef
SE
724 /*
725 * we come here when we know perf_cgroup_events > 0
614e4c4e
SE
726 * we do not need to pass the ctx here because we know
727 * we are holding the rcu lock
a8d757ef 728 */
614e4c4e 729 cgrp1 = perf_cgroup_from_task(task, NULL);
70a01657 730 cgrp2 = perf_cgroup_from_task(next, NULL);
a8d757ef
SE
731
732 /*
733 * only schedule out current cgroup events if we know
734 * that we are switching to a different cgroup. Otherwise,
735 * do no touch the cgroup events.
736 */
737 if (cgrp1 != cgrp2)
738 perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
ddaaf4e2
SE
739
740 rcu_read_unlock();
e5d1367f
SE
741}
742
a8d757ef
SE
743static inline void perf_cgroup_sched_in(struct task_struct *prev,
744 struct task_struct *task)
e5d1367f 745{
a8d757ef
SE
746 struct perf_cgroup *cgrp1;
747 struct perf_cgroup *cgrp2 = NULL;
748
ddaaf4e2 749 rcu_read_lock();
a8d757ef
SE
750 /*
751 * we come here when we know perf_cgroup_events > 0
614e4c4e
SE
752 * we do not need to pass the ctx here because we know
753 * we are holding the rcu lock
a8d757ef 754 */
614e4c4e 755 cgrp1 = perf_cgroup_from_task(task, NULL);
614e4c4e 756 cgrp2 = perf_cgroup_from_task(prev, NULL);
a8d757ef
SE
757
758 /*
759 * only need to schedule in cgroup events if we are changing
760 * cgroup during ctxsw. Cgroup events were not scheduled
761 * out of ctxsw out if that was not the case.
762 */
763 if (cgrp1 != cgrp2)
764 perf_cgroup_switch(task, PERF_CGROUP_SWIN);
ddaaf4e2
SE
765
766 rcu_read_unlock();
e5d1367f
SE
767}
768
769static inline int perf_cgroup_connect(int fd, struct perf_event *event,
770 struct perf_event_attr *attr,
771 struct perf_event *group_leader)
772{
773 struct perf_cgroup *cgrp;
774 struct cgroup_subsys_state *css;
2903ff01
AV
775 struct fd f = fdget(fd);
776 int ret = 0;
e5d1367f 777
2903ff01 778 if (!f.file)
e5d1367f
SE
779 return -EBADF;
780
b583043e 781 css = css_tryget_online_from_dir(f.file->f_path.dentry,
ec903c0c 782 &perf_event_cgrp_subsys);
3db272c0
LZ
783 if (IS_ERR(css)) {
784 ret = PTR_ERR(css);
785 goto out;
786 }
e5d1367f
SE
787
788 cgrp = container_of(css, struct perf_cgroup, css);
789 event->cgrp = cgrp;
790
791 /*
792 * all events in a group must monitor
793 * the same cgroup because a task belongs
794 * to only one perf cgroup at a time
795 */
796 if (group_leader && group_leader->cgrp != cgrp) {
797 perf_detach_cgroup(event);
798 ret = -EINVAL;
e5d1367f 799 }
3db272c0 800out:
2903ff01 801 fdput(f);
e5d1367f
SE
802 return ret;
803}
804
805static inline void
806perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
807{
808 struct perf_cgroup_info *t;
809 t = per_cpu_ptr(event->cgrp->info, event->cpu);
810 event->shadow_ctx_time = now - t->timestamp;
811}
812
813static inline void
814perf_cgroup_defer_enabled(struct perf_event *event)
815{
816 /*
817 * when the current task's perf cgroup does not match
818 * the event's, we need to remember to call the
819 * perf_mark_enable() function the first time a task with
820 * a matching perf cgroup is scheduled in.
821 */
822 if (is_cgroup_event(event) && !perf_cgroup_match(event))
823 event->cgrp_defer_enabled = 1;
824}
825
826static inline void
827perf_cgroup_mark_enabled(struct perf_event *event,
828 struct perf_event_context *ctx)
829{
830 struct perf_event *sub;
831 u64 tstamp = perf_event_time(event);
832
833 if (!event->cgrp_defer_enabled)
834 return;
835
836 event->cgrp_defer_enabled = 0;
837
838 event->tstamp_enabled = tstamp - event->total_time_enabled;
839 list_for_each_entry(sub, &event->sibling_list, group_entry) {
840 if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
841 sub->tstamp_enabled = tstamp - sub->total_time_enabled;
842 sub->cgrp_defer_enabled = 0;
843 }
844 }
845}
846#else /* !CONFIG_CGROUP_PERF */
847
848static inline bool
849perf_cgroup_match(struct perf_event *event)
850{
851 return true;
852}
853
854static inline void perf_detach_cgroup(struct perf_event *event)
855{}
856
857static inline int is_cgroup_event(struct perf_event *event)
858{
859 return 0;
860}
861
862static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event)
863{
864 return 0;
865}
866
867static inline void update_cgrp_time_from_event(struct perf_event *event)
868{
869}
870
871static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
872{
873}
874
a8d757ef
SE
875static inline void perf_cgroup_sched_out(struct task_struct *task,
876 struct task_struct *next)
e5d1367f
SE
877{
878}
879
a8d757ef
SE
880static inline void perf_cgroup_sched_in(struct task_struct *prev,
881 struct task_struct *task)
e5d1367f
SE
882{
883}
884
885static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
886 struct perf_event_attr *attr,
887 struct perf_event *group_leader)
888{
889 return -EINVAL;
890}
891
892static inline void
3f7cce3c
SE
893perf_cgroup_set_timestamp(struct task_struct *task,
894 struct perf_event_context *ctx)
e5d1367f
SE
895{
896}
897
898void
899perf_cgroup_switch(struct task_struct *task, struct task_struct *next)
900{
901}
902
903static inline void
904perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
905{
906}
907
908static inline u64 perf_cgroup_event_time(struct perf_event *event)
909{
910 return 0;
911}
912
913static inline void
914perf_cgroup_defer_enabled(struct perf_event *event)
915{
916}
917
918static inline void
919perf_cgroup_mark_enabled(struct perf_event *event,
920 struct perf_event_context *ctx)
921{
922}
923#endif
924
9e630205
SE
925/*
926 * set default to be dependent on timer tick just
927 * like original code
928 */
929#define PERF_CPU_HRTIMER (1000 / HZ)
930/*
931 * function must be called with interrupts disbled
932 */
272325c4 933static enum hrtimer_restart perf_mux_hrtimer_handler(struct hrtimer *hr)
9e630205
SE
934{
935 struct perf_cpu_context *cpuctx;
9e630205
SE
936 int rotations = 0;
937
938 WARN_ON(!irqs_disabled());
939
940 cpuctx = container_of(hr, struct perf_cpu_context, hrtimer);
9e630205
SE
941 rotations = perf_rotate_context(cpuctx);
942
4cfafd30
PZ
943 raw_spin_lock(&cpuctx->hrtimer_lock);
944 if (rotations)
9e630205 945 hrtimer_forward_now(hr, cpuctx->hrtimer_interval);
4cfafd30
PZ
946 else
947 cpuctx->hrtimer_active = 0;
948 raw_spin_unlock(&cpuctx->hrtimer_lock);
9e630205 949
4cfafd30 950 return rotations ? HRTIMER_RESTART : HRTIMER_NORESTART;
9e630205
SE
951}
952
272325c4 953static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu)
9e630205 954{
272325c4 955 struct hrtimer *timer = &cpuctx->hrtimer;
9e630205 956 struct pmu *pmu = cpuctx->ctx.pmu;
272325c4 957 u64 interval;
9e630205
SE
958
959 /* no multiplexing needed for SW PMU */
960 if (pmu->task_ctx_nr == perf_sw_context)
961 return;
962
62b85639
SE
963 /*
964 * check default is sane, if not set then force to
965 * default interval (1/tick)
966 */
272325c4
PZ
967 interval = pmu->hrtimer_interval_ms;
968 if (interval < 1)
969 interval = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER;
62b85639 970
272325c4 971 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * interval);
9e630205 972
4cfafd30
PZ
973 raw_spin_lock_init(&cpuctx->hrtimer_lock);
974 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
272325c4 975 timer->function = perf_mux_hrtimer_handler;
9e630205
SE
976}
977
272325c4 978static int perf_mux_hrtimer_restart(struct perf_cpu_context *cpuctx)
9e630205 979{
272325c4 980 struct hrtimer *timer = &cpuctx->hrtimer;
9e630205 981 struct pmu *pmu = cpuctx->ctx.pmu;
4cfafd30 982 unsigned long flags;
9e630205
SE
983
984 /* not for SW PMU */
985 if (pmu->task_ctx_nr == perf_sw_context)
272325c4 986 return 0;
9e630205 987
4cfafd30
PZ
988 raw_spin_lock_irqsave(&cpuctx->hrtimer_lock, flags);
989 if (!cpuctx->hrtimer_active) {
990 cpuctx->hrtimer_active = 1;
991 hrtimer_forward_now(timer, cpuctx->hrtimer_interval);
992 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
993 }
994 raw_spin_unlock_irqrestore(&cpuctx->hrtimer_lock, flags);
9e630205 995
272325c4 996 return 0;
9e630205
SE
997}
998
33696fc0 999void perf_pmu_disable(struct pmu *pmu)
9e35ad38 1000{
33696fc0
PZ
1001 int *count = this_cpu_ptr(pmu->pmu_disable_count);
1002 if (!(*count)++)
1003 pmu->pmu_disable(pmu);
9e35ad38 1004}
9e35ad38 1005
33696fc0 1006void perf_pmu_enable(struct pmu *pmu)
9e35ad38 1007{
33696fc0
PZ
1008 int *count = this_cpu_ptr(pmu->pmu_disable_count);
1009 if (!--(*count))
1010 pmu->pmu_enable(pmu);
9e35ad38 1011}
9e35ad38 1012
2fde4f94 1013static DEFINE_PER_CPU(struct list_head, active_ctx_list);
e9d2b064
PZ
1014
1015/*
2fde4f94
MR
1016 * perf_event_ctx_activate(), perf_event_ctx_deactivate(), and
1017 * perf_event_task_tick() are fully serialized because they're strictly cpu
1018 * affine and perf_event_ctx{activate,deactivate} are called with IRQs
1019 * disabled, while perf_event_task_tick is called from IRQ context.
e9d2b064 1020 */
2fde4f94 1021static void perf_event_ctx_activate(struct perf_event_context *ctx)
9e35ad38 1022{
2fde4f94 1023 struct list_head *head = this_cpu_ptr(&active_ctx_list);
b5ab4cd5 1024
e9d2b064 1025 WARN_ON(!irqs_disabled());
b5ab4cd5 1026
2fde4f94
MR
1027 WARN_ON(!list_empty(&ctx->active_ctx_list));
1028
1029 list_add(&ctx->active_ctx_list, head);
1030}
1031
1032static void perf_event_ctx_deactivate(struct perf_event_context *ctx)
1033{
1034 WARN_ON(!irqs_disabled());
1035
1036 WARN_ON(list_empty(&ctx->active_ctx_list));
1037
1038 list_del_init(&ctx->active_ctx_list);
9e35ad38 1039}
9e35ad38 1040
cdd6c482 1041static void get_ctx(struct perf_event_context *ctx)
a63eaf34 1042{
e5289d4a 1043 WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
a63eaf34
PM
1044}
1045
4af57ef2
YZ
1046static void free_ctx(struct rcu_head *head)
1047{
1048 struct perf_event_context *ctx;
1049
1050 ctx = container_of(head, struct perf_event_context, rcu_head);
1051 kfree(ctx->task_ctx_data);
1052 kfree(ctx);
1053}
1054
cdd6c482 1055static void put_ctx(struct perf_event_context *ctx)
a63eaf34 1056{
564c2b21
PM
1057 if (atomic_dec_and_test(&ctx->refcount)) {
1058 if (ctx->parent_ctx)
1059 put_ctx(ctx->parent_ctx);
63b6da39 1060 if (ctx->task && ctx->task != TASK_TOMBSTONE)
c93f7669 1061 put_task_struct(ctx->task);
4af57ef2 1062 call_rcu(&ctx->rcu_head, free_ctx);
564c2b21 1063 }
a63eaf34
PM
1064}
1065
f63a8daa
PZ
1066/*
1067 * Because of perf_event::ctx migration in sys_perf_event_open::move_group and
1068 * perf_pmu_migrate_context() we need some magic.
1069 *
1070 * Those places that change perf_event::ctx will hold both
1071 * perf_event_ctx::mutex of the 'old' and 'new' ctx value.
1072 *
8b10c5e2
PZ
1073 * Lock ordering is by mutex address. There are two other sites where
1074 * perf_event_context::mutex nests and those are:
1075 *
1076 * - perf_event_exit_task_context() [ child , 0 ]
8ba289b8
PZ
1077 * perf_event_exit_event()
1078 * put_event() [ parent, 1 ]
8b10c5e2
PZ
1079 *
1080 * - perf_event_init_context() [ parent, 0 ]
1081 * inherit_task_group()
1082 * inherit_group()
1083 * inherit_event()
1084 * perf_event_alloc()
1085 * perf_init_event()
1086 * perf_try_init_event() [ child , 1 ]
1087 *
1088 * While it appears there is an obvious deadlock here -- the parent and child
1089 * nesting levels are inverted between the two. This is in fact safe because
1090 * life-time rules separate them. That is an exiting task cannot fork, and a
1091 * spawning task cannot (yet) exit.
1092 *
1093 * But remember that that these are parent<->child context relations, and
1094 * migration does not affect children, therefore these two orderings should not
1095 * interact.
f63a8daa
PZ
1096 *
1097 * The change in perf_event::ctx does not affect children (as claimed above)
1098 * because the sys_perf_event_open() case will install a new event and break
1099 * the ctx parent<->child relation, and perf_pmu_migrate_context() is only
1100 * concerned with cpuctx and that doesn't have children.
1101 *
1102 * The places that change perf_event::ctx will issue:
1103 *
1104 * perf_remove_from_context();
1105 * synchronize_rcu();
1106 * perf_install_in_context();
1107 *
1108 * to affect the change. The remove_from_context() + synchronize_rcu() should
1109 * quiesce the event, after which we can install it in the new location. This
1110 * means that only external vectors (perf_fops, prctl) can perturb the event
1111 * while in transit. Therefore all such accessors should also acquire
1112 * perf_event_context::mutex to serialize against this.
1113 *
1114 * However; because event->ctx can change while we're waiting to acquire
1115 * ctx->mutex we must be careful and use the below perf_event_ctx_lock()
1116 * function.
1117 *
1118 * Lock order:
79c9ce57 1119 * cred_guard_mutex
f63a8daa
PZ
1120 * task_struct::perf_event_mutex
1121 * perf_event_context::mutex
f63a8daa 1122 * perf_event::child_mutex;
07c4a776 1123 * perf_event_context::lock
f63a8daa
PZ
1124 * perf_event::mmap_mutex
1125 * mmap_sem
1126 */
a83fe28e
PZ
1127static struct perf_event_context *
1128perf_event_ctx_lock_nested(struct perf_event *event, int nesting)
f63a8daa
PZ
1129{
1130 struct perf_event_context *ctx;
1131
1132again:
1133 rcu_read_lock();
1134 ctx = ACCESS_ONCE(event->ctx);
1135 if (!atomic_inc_not_zero(&ctx->refcount)) {
1136 rcu_read_unlock();
1137 goto again;
1138 }
1139 rcu_read_unlock();
1140
a83fe28e 1141 mutex_lock_nested(&ctx->mutex, nesting);
f63a8daa
PZ
1142 if (event->ctx != ctx) {
1143 mutex_unlock(&ctx->mutex);
1144 put_ctx(ctx);
1145 goto again;
1146 }
1147
1148 return ctx;
1149}
1150
a83fe28e
PZ
1151static inline struct perf_event_context *
1152perf_event_ctx_lock(struct perf_event *event)
1153{
1154 return perf_event_ctx_lock_nested(event, 0);
1155}
1156
f63a8daa
PZ
1157static void perf_event_ctx_unlock(struct perf_event *event,
1158 struct perf_event_context *ctx)
1159{
1160 mutex_unlock(&ctx->mutex);
1161 put_ctx(ctx);
1162}
1163
211de6eb
PZ
1164/*
1165 * This must be done under the ctx->lock, such as to serialize against
1166 * context_equiv(), therefore we cannot call put_ctx() since that might end up
1167 * calling scheduler related locks and ctx->lock nests inside those.
1168 */
1169static __must_check struct perf_event_context *
1170unclone_ctx(struct perf_event_context *ctx)
71a851b4 1171{
211de6eb
PZ
1172 struct perf_event_context *parent_ctx = ctx->parent_ctx;
1173
1174 lockdep_assert_held(&ctx->lock);
1175
1176 if (parent_ctx)
71a851b4 1177 ctx->parent_ctx = NULL;
5a3126d4 1178 ctx->generation++;
211de6eb
PZ
1179
1180 return parent_ctx;
71a851b4
PZ
1181}
1182
6844c09d
ACM
1183static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
1184{
1185 /*
1186 * only top level events have the pid namespace they were created in
1187 */
1188 if (event->parent)
1189 event = event->parent;
1190
1191 return task_tgid_nr_ns(p, event->ns);
1192}
1193
1194static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
1195{
1196 /*
1197 * only top level events have the pid namespace they were created in
1198 */
1199 if (event->parent)
1200 event = event->parent;
1201
1202 return task_pid_nr_ns(p, event->ns);
1203}
1204
7f453c24 1205/*
cdd6c482 1206 * If we inherit events we want to return the parent event id
7f453c24
PZ
1207 * to userspace.
1208 */
cdd6c482 1209static u64 primary_event_id(struct perf_event *event)
7f453c24 1210{
cdd6c482 1211 u64 id = event->id;
7f453c24 1212
cdd6c482
IM
1213 if (event->parent)
1214 id = event->parent->id;
7f453c24
PZ
1215
1216 return id;
1217}
1218
25346b93 1219/*
cdd6c482 1220 * Get the perf_event_context for a task and lock it.
63b6da39 1221 *
25346b93
PM
1222 * This has to cope with with the fact that until it is locked,
1223 * the context could get moved to another task.
1224 */
cdd6c482 1225static struct perf_event_context *
8dc85d54 1226perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
25346b93 1227{
cdd6c482 1228 struct perf_event_context *ctx;
25346b93 1229
9ed6060d 1230retry:
058ebd0e
PZ
1231 /*
1232 * One of the few rules of preemptible RCU is that one cannot do
1233 * rcu_read_unlock() while holding a scheduler (or nested) lock when
2fd59077 1234 * part of the read side critical section was irqs-enabled -- see
058ebd0e
PZ
1235 * rcu_read_unlock_special().
1236 *
1237 * Since ctx->lock nests under rq->lock we must ensure the entire read
2fd59077 1238 * side critical section has interrupts disabled.
058ebd0e 1239 */
2fd59077 1240 local_irq_save(*flags);
058ebd0e 1241 rcu_read_lock();
8dc85d54 1242 ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
25346b93
PM
1243 if (ctx) {
1244 /*
1245 * If this context is a clone of another, it might
1246 * get swapped for another underneath us by
cdd6c482 1247 * perf_event_task_sched_out, though the
25346b93
PM
1248 * rcu_read_lock() protects us from any context
1249 * getting freed. Lock the context and check if it
1250 * got swapped before we could get the lock, and retry
1251 * if so. If we locked the right context, then it
1252 * can't get swapped on us any more.
1253 */
2fd59077 1254 raw_spin_lock(&ctx->lock);
8dc85d54 1255 if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
2fd59077 1256 raw_spin_unlock(&ctx->lock);
058ebd0e 1257 rcu_read_unlock();
2fd59077 1258 local_irq_restore(*flags);
25346b93
PM
1259 goto retry;
1260 }
b49a9e7e 1261
63b6da39
PZ
1262 if (ctx->task == TASK_TOMBSTONE ||
1263 !atomic_inc_not_zero(&ctx->refcount)) {
2fd59077 1264 raw_spin_unlock(&ctx->lock);
b49a9e7e 1265 ctx = NULL;
828b6f0e
PZ
1266 } else {
1267 WARN_ON_ONCE(ctx->task != task);
b49a9e7e 1268 }
25346b93
PM
1269 }
1270 rcu_read_unlock();
2fd59077
PM
1271 if (!ctx)
1272 local_irq_restore(*flags);
25346b93
PM
1273 return ctx;
1274}
1275
1276/*
1277 * Get the context for a task and increment its pin_count so it
1278 * can't get swapped to another task. This also increments its
1279 * reference count so that the context can't get freed.
1280 */
8dc85d54
PZ
1281static struct perf_event_context *
1282perf_pin_task_context(struct task_struct *task, int ctxn)
25346b93 1283{
cdd6c482 1284 struct perf_event_context *ctx;
25346b93
PM
1285 unsigned long flags;
1286
8dc85d54 1287 ctx = perf_lock_task_context(task, ctxn, &flags);
25346b93
PM
1288 if (ctx) {
1289 ++ctx->pin_count;
e625cce1 1290 raw_spin_unlock_irqrestore(&ctx->lock, flags);
25346b93
PM
1291 }
1292 return ctx;
1293}
1294
cdd6c482 1295static void perf_unpin_context(struct perf_event_context *ctx)
25346b93
PM
1296{
1297 unsigned long flags;
1298
e625cce1 1299 raw_spin_lock_irqsave(&ctx->lock, flags);
25346b93 1300 --ctx->pin_count;
e625cce1 1301 raw_spin_unlock_irqrestore(&ctx->lock, flags);
25346b93
PM
1302}
1303
f67218c3
PZ
1304/*
1305 * Update the record of the current time in a context.
1306 */
1307static void update_context_time(struct perf_event_context *ctx)
1308{
1309 u64 now = perf_clock();
1310
1311 ctx->time += now - ctx->timestamp;
1312 ctx->timestamp = now;
1313}
1314
4158755d
SE
1315static u64 perf_event_time(struct perf_event *event)
1316{
1317 struct perf_event_context *ctx = event->ctx;
e5d1367f
SE
1318
1319 if (is_cgroup_event(event))
1320 return perf_cgroup_event_time(event);
1321
4158755d
SE
1322 return ctx ? ctx->time : 0;
1323}
1324
f67218c3
PZ
1325/*
1326 * Update the total_time_enabled and total_time_running fields for a event.
1327 */
1328static void update_event_times(struct perf_event *event)
1329{
1330 struct perf_event_context *ctx = event->ctx;
1331 u64 run_end;
1332
3cbaa590
PZ
1333 lockdep_assert_held(&ctx->lock);
1334
f67218c3
PZ
1335 if (event->state < PERF_EVENT_STATE_INACTIVE ||
1336 event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
1337 return;
3cbaa590 1338
e5d1367f
SE
1339 /*
1340 * in cgroup mode, time_enabled represents
1341 * the time the event was enabled AND active
1342 * tasks were in the monitored cgroup. This is
1343 * independent of the activity of the context as
1344 * there may be a mix of cgroup and non-cgroup events.
1345 *
1346 * That is why we treat cgroup events differently
1347 * here.
1348 */
1349 if (is_cgroup_event(event))
46cd6a7f 1350 run_end = perf_cgroup_event_time(event);
e5d1367f
SE
1351 else if (ctx->is_active)
1352 run_end = ctx->time;
acd1d7c1
PZ
1353 else
1354 run_end = event->tstamp_stopped;
1355
1356 event->total_time_enabled = run_end - event->tstamp_enabled;
f67218c3
PZ
1357
1358 if (event->state == PERF_EVENT_STATE_INACTIVE)
1359 run_end = event->tstamp_stopped;
1360 else
4158755d 1361 run_end = perf_event_time(event);
f67218c3
PZ
1362
1363 event->total_time_running = run_end - event->tstamp_running;
e5d1367f 1364
f67218c3
PZ
1365}
1366
96c21a46
PZ
1367/*
1368 * Update total_time_enabled and total_time_running for all events in a group.
1369 */
1370static void update_group_times(struct perf_event *leader)
1371{
1372 struct perf_event *event;
1373
1374 update_event_times(leader);
1375 list_for_each_entry(event, &leader->sibling_list, group_entry)
1376 update_event_times(event);
1377}
1378
889ff015
FW
1379static struct list_head *
1380ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
1381{
1382 if (event->attr.pinned)
1383 return &ctx->pinned_groups;
1384 else
1385 return &ctx->flexible_groups;
1386}
1387
fccc714b 1388/*
cdd6c482 1389 * Add a event from the lists for its context.
fccc714b
PZ
1390 * Must be called with ctx->mutex and ctx->lock held.
1391 */
04289bb9 1392static void
cdd6c482 1393list_add_event(struct perf_event *event, struct perf_event_context *ctx)
04289bb9 1394{
c994d613
PZ
1395 lockdep_assert_held(&ctx->lock);
1396
8a49542c
PZ
1397 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
1398 event->attach_state |= PERF_ATTACH_CONTEXT;
04289bb9
IM
1399
1400 /*
8a49542c
PZ
1401 * If we're a stand alone event or group leader, we go to the context
1402 * list, group events are kept attached to the group so that
1403 * perf_group_detach can, at all times, locate all siblings.
04289bb9 1404 */
8a49542c 1405 if (event->group_leader == event) {
889ff015
FW
1406 struct list_head *list;
1407
d6f962b5
FW
1408 if (is_software_event(event))
1409 event->group_flags |= PERF_GROUP_SOFTWARE;
1410
889ff015
FW
1411 list = ctx_group_list(event, ctx);
1412 list_add_tail(&event->group_entry, list);
5c148194 1413 }
592903cd 1414
08309379 1415 if (is_cgroup_event(event))
e5d1367f 1416 ctx->nr_cgroups++;
e5d1367f 1417
cdd6c482
IM
1418 list_add_rcu(&event->event_entry, &ctx->event_list);
1419 ctx->nr_events++;
1420 if (event->attr.inherit_stat)
bfbd3381 1421 ctx->nr_stat++;
5a3126d4
PZ
1422
1423 ctx->generation++;
04289bb9
IM
1424}
1425
0231bb53
JO
1426/*
1427 * Initialize event state based on the perf_event_attr::disabled.
1428 */
1429static inline void perf_event__state_init(struct perf_event *event)
1430{
1431 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF :
1432 PERF_EVENT_STATE_INACTIVE;
1433}
1434
a723968c 1435static void __perf_event_read_size(struct perf_event *event, int nr_siblings)
c320c7b7
ACM
1436{
1437 int entry = sizeof(u64); /* value */
1438 int size = 0;
1439 int nr = 1;
1440
1441 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1442 size += sizeof(u64);
1443
1444 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1445 size += sizeof(u64);
1446
1447 if (event->attr.read_format & PERF_FORMAT_ID)
1448 entry += sizeof(u64);
1449
1450 if (event->attr.read_format & PERF_FORMAT_GROUP) {
a723968c 1451 nr += nr_siblings;
c320c7b7
ACM
1452 size += sizeof(u64);
1453 }
1454
1455 size += entry * nr;
1456 event->read_size = size;
1457}
1458
a723968c 1459static void __perf_event_header_size(struct perf_event *event, u64 sample_type)
c320c7b7
ACM
1460{
1461 struct perf_sample_data *data;
c320c7b7
ACM
1462 u16 size = 0;
1463
c320c7b7
ACM
1464 if (sample_type & PERF_SAMPLE_IP)
1465 size += sizeof(data->ip);
1466
6844c09d
ACM
1467 if (sample_type & PERF_SAMPLE_ADDR)
1468 size += sizeof(data->addr);
1469
1470 if (sample_type & PERF_SAMPLE_PERIOD)
1471 size += sizeof(data->period);
1472
c3feedf2
AK
1473 if (sample_type & PERF_SAMPLE_WEIGHT)
1474 size += sizeof(data->weight);
1475
6844c09d
ACM
1476 if (sample_type & PERF_SAMPLE_READ)
1477 size += event->read_size;
1478
d6be9ad6
SE
1479 if (sample_type & PERF_SAMPLE_DATA_SRC)
1480 size += sizeof(data->data_src.val);
1481
fdfbbd07
AK
1482 if (sample_type & PERF_SAMPLE_TRANSACTION)
1483 size += sizeof(data->txn);
1484
6844c09d
ACM
1485 event->header_size = size;
1486}
1487
a723968c
PZ
1488/*
1489 * Called at perf_event creation and when events are attached/detached from a
1490 * group.
1491 */
1492static void perf_event__header_size(struct perf_event *event)
1493{
1494 __perf_event_read_size(event,
1495 event->group_leader->nr_siblings);
1496 __perf_event_header_size(event, event->attr.sample_type);
1497}
1498
6844c09d
ACM
1499static void perf_event__id_header_size(struct perf_event *event)
1500{
1501 struct perf_sample_data *data;
1502 u64 sample_type = event->attr.sample_type;
1503 u16 size = 0;
1504
c320c7b7
ACM
1505 if (sample_type & PERF_SAMPLE_TID)
1506 size += sizeof(data->tid_entry);
1507
1508 if (sample_type & PERF_SAMPLE_TIME)
1509 size += sizeof(data->time);
1510
ff3d527c
AH
1511 if (sample_type & PERF_SAMPLE_IDENTIFIER)
1512 size += sizeof(data->id);
1513
c320c7b7
ACM
1514 if (sample_type & PERF_SAMPLE_ID)
1515 size += sizeof(data->id);
1516
1517 if (sample_type & PERF_SAMPLE_STREAM_ID)
1518 size += sizeof(data->stream_id);
1519
1520 if (sample_type & PERF_SAMPLE_CPU)
1521 size += sizeof(data->cpu_entry);
1522
6844c09d 1523 event->id_header_size = size;
c320c7b7
ACM
1524}
1525
a723968c
PZ
1526static bool perf_event_validate_size(struct perf_event *event)
1527{
1528 /*
1529 * The values computed here will be over-written when we actually
1530 * attach the event.
1531 */
1532 __perf_event_read_size(event, event->group_leader->nr_siblings + 1);
1533 __perf_event_header_size(event, event->attr.sample_type & ~PERF_SAMPLE_READ);
1534 perf_event__id_header_size(event);
1535
1536 /*
1537 * Sum the lot; should not exceed the 64k limit we have on records.
1538 * Conservative limit to allow for callchains and other variable fields.
1539 */
1540 if (event->read_size + event->header_size +
1541 event->id_header_size + sizeof(struct perf_event_header) >= 16*1024)
1542 return false;
1543
1544 return true;
1545}
1546
8a49542c
PZ
1547static void perf_group_attach(struct perf_event *event)
1548{
c320c7b7 1549 struct perf_event *group_leader = event->group_leader, *pos;
8a49542c 1550
74c3337c
PZ
1551 /*
1552 * We can have double attach due to group movement in perf_event_open.
1553 */
1554 if (event->attach_state & PERF_ATTACH_GROUP)
1555 return;
1556
8a49542c
PZ
1557 event->attach_state |= PERF_ATTACH_GROUP;
1558
1559 if (group_leader == event)
1560 return;
1561
652884fe
PZ
1562 WARN_ON_ONCE(group_leader->ctx != event->ctx);
1563
8a49542c
PZ
1564 if (group_leader->group_flags & PERF_GROUP_SOFTWARE &&
1565 !is_software_event(event))
1566 group_leader->group_flags &= ~PERF_GROUP_SOFTWARE;
1567
1568 list_add_tail(&event->group_entry, &group_leader->sibling_list);
1569 group_leader->nr_siblings++;
c320c7b7
ACM
1570
1571 perf_event__header_size(group_leader);
1572
1573 list_for_each_entry(pos, &group_leader->sibling_list, group_entry)
1574 perf_event__header_size(pos);
8a49542c
PZ
1575}
1576
a63eaf34 1577/*
cdd6c482 1578 * Remove a event from the lists for its context.
fccc714b 1579 * Must be called with ctx->mutex and ctx->lock held.
a63eaf34 1580 */
04289bb9 1581static void
cdd6c482 1582list_del_event(struct perf_event *event, struct perf_event_context *ctx)
04289bb9 1583{
68cacd29 1584 struct perf_cpu_context *cpuctx;
652884fe
PZ
1585
1586 WARN_ON_ONCE(event->ctx != ctx);
1587 lockdep_assert_held(&ctx->lock);
1588
8a49542c
PZ
1589 /*
1590 * We can have double detach due to exit/hot-unplug + close.
1591 */
1592 if (!(event->attach_state & PERF_ATTACH_CONTEXT))
a63eaf34 1593 return;
8a49542c
PZ
1594
1595 event->attach_state &= ~PERF_ATTACH_CONTEXT;
1596
68cacd29 1597 if (is_cgroup_event(event)) {
e5d1367f 1598 ctx->nr_cgroups--;
70a01657
PZ
1599 /*
1600 * Because cgroup events are always per-cpu events, this will
1601 * always be called from the right CPU.
1602 */
68cacd29
SE
1603 cpuctx = __get_cpu_context(ctx);
1604 /*
70a01657
PZ
1605 * If there are no more cgroup events then clear cgrp to avoid
1606 * stale pointer in update_cgrp_time_from_cpuctx().
68cacd29
SE
1607 */
1608 if (!ctx->nr_cgroups)
1609 cpuctx->cgrp = NULL;
1610 }
e5d1367f 1611
cdd6c482
IM
1612 ctx->nr_events--;
1613 if (event->attr.inherit_stat)
bfbd3381 1614 ctx->nr_stat--;
8bc20959 1615
cdd6c482 1616 list_del_rcu(&event->event_entry);
04289bb9 1617
8a49542c
PZ
1618 if (event->group_leader == event)
1619 list_del_init(&event->group_entry);
5c148194 1620
96c21a46 1621 update_group_times(event);
b2e74a26
SE
1622
1623 /*
1624 * If event was in error state, then keep it
1625 * that way, otherwise bogus counts will be
1626 * returned on read(). The only way to get out
1627 * of error state is by explicit re-enabling
1628 * of the event
1629 */
1630 if (event->state > PERF_EVENT_STATE_OFF)
1631 event->state = PERF_EVENT_STATE_OFF;
5a3126d4
PZ
1632
1633 ctx->generation++;
050735b0
PZ
1634}
1635
8a49542c 1636static void perf_group_detach(struct perf_event *event)
050735b0
PZ
1637{
1638 struct perf_event *sibling, *tmp;
8a49542c
PZ
1639 struct list_head *list = NULL;
1640
1641 /*
1642 * We can have double detach due to exit/hot-unplug + close.
1643 */
1644 if (!(event->attach_state & PERF_ATTACH_GROUP))
1645 return;
1646
1647 event->attach_state &= ~PERF_ATTACH_GROUP;
1648
1649 /*
1650 * If this is a sibling, remove it from its group.
1651 */
1652 if (event->group_leader != event) {
1653 list_del_init(&event->group_entry);
1654 event->group_leader->nr_siblings--;
c320c7b7 1655 goto out;
8a49542c
PZ
1656 }
1657
1658 if (!list_empty(&event->group_entry))
1659 list = &event->group_entry;
2e2af50b 1660
04289bb9 1661 /*
cdd6c482
IM
1662 * If this was a group event with sibling events then
1663 * upgrade the siblings to singleton events by adding them
8a49542c 1664 * to whatever list we are on.
04289bb9 1665 */
cdd6c482 1666 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
8a49542c
PZ
1667 if (list)
1668 list_move_tail(&sibling->group_entry, list);
04289bb9 1669 sibling->group_leader = sibling;
d6f962b5
FW
1670
1671 /* Inherit group flags from the previous leader */
1672 sibling->group_flags = event->group_flags;
652884fe
PZ
1673
1674 WARN_ON_ONCE(sibling->ctx != event->ctx);
04289bb9 1675 }
c320c7b7
ACM
1676
1677out:
1678 perf_event__header_size(event->group_leader);
1679
1680 list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry)
1681 perf_event__header_size(tmp);
04289bb9
IM
1682}
1683
fadfe7be
JO
1684static bool is_orphaned_event(struct perf_event *event)
1685{
a69b0ca4 1686 return event->state == PERF_EVENT_STATE_DEAD;
fadfe7be
JO
1687}
1688
2c81a647 1689static inline int __pmu_filter_match(struct perf_event *event)
66eb579e
MR
1690{
1691 struct pmu *pmu = event->pmu;
1692 return pmu->filter_match ? pmu->filter_match(event) : 1;
1693}
1694
2c81a647
MR
1695/*
1696 * Check whether we should attempt to schedule an event group based on
1697 * PMU-specific filtering. An event group can consist of HW and SW events,
1698 * potentially with a SW leader, so we must check all the filters, to
1699 * determine whether a group is schedulable:
1700 */
1701static inline int pmu_filter_match(struct perf_event *event)
1702{
1703 struct perf_event *child;
1704
1705 if (!__pmu_filter_match(event))
1706 return 0;
1707
1708 list_for_each_entry(child, &event->sibling_list, group_entry) {
1709 if (!__pmu_filter_match(child))
1710 return 0;
1711 }
1712
1713 return 1;
1714}
1715
fa66f07a
SE
1716static inline int
1717event_filter_match(struct perf_event *event)
1718{
e5d1367f 1719 return (event->cpu == -1 || event->cpu == smp_processor_id())
66eb579e 1720 && perf_cgroup_match(event) && pmu_filter_match(event);
fa66f07a
SE
1721}
1722
9ffcfa6f
SE
1723static void
1724event_sched_out(struct perf_event *event,
3b6f9e5c 1725 struct perf_cpu_context *cpuctx,
cdd6c482 1726 struct perf_event_context *ctx)
3b6f9e5c 1727{
4158755d 1728 u64 tstamp = perf_event_time(event);
fa66f07a 1729 u64 delta;
652884fe
PZ
1730
1731 WARN_ON_ONCE(event->ctx != ctx);
1732 lockdep_assert_held(&ctx->lock);
1733
fa66f07a
SE
1734 /*
1735 * An event which could not be activated because of
1736 * filter mismatch still needs to have its timings
1737 * maintained, otherwise bogus information is return
1738 * via read() for time_enabled, time_running:
1739 */
1740 if (event->state == PERF_EVENT_STATE_INACTIVE
1741 && !event_filter_match(event)) {
e5d1367f 1742 delta = tstamp - event->tstamp_stopped;
fa66f07a 1743 event->tstamp_running += delta;
4158755d 1744 event->tstamp_stopped = tstamp;
fa66f07a
SE
1745 }
1746
cdd6c482 1747 if (event->state != PERF_EVENT_STATE_ACTIVE)
9ffcfa6f 1748 return;
3b6f9e5c 1749
44377277
AS
1750 perf_pmu_disable(event->pmu);
1751
28a967c3
PZ
1752 event->tstamp_stopped = tstamp;
1753 event->pmu->del(event, 0);
1754 event->oncpu = -1;
cdd6c482
IM
1755 event->state = PERF_EVENT_STATE_INACTIVE;
1756 if (event->pending_disable) {
1757 event->pending_disable = 0;
1758 event->state = PERF_EVENT_STATE_OFF;
970892a9 1759 }
3b6f9e5c 1760
cdd6c482 1761 if (!is_software_event(event))
3b6f9e5c 1762 cpuctx->active_oncpu--;
2fde4f94
MR
1763 if (!--ctx->nr_active)
1764 perf_event_ctx_deactivate(ctx);
0f5a2601
PZ
1765 if (event->attr.freq && event->attr.sample_freq)
1766 ctx->nr_freq--;
cdd6c482 1767 if (event->attr.exclusive || !cpuctx->active_oncpu)
3b6f9e5c 1768 cpuctx->exclusive = 0;
44377277
AS
1769
1770 perf_pmu_enable(event->pmu);
3b6f9e5c
PM
1771}
1772
d859e29f 1773static void
cdd6c482 1774group_sched_out(struct perf_event *group_event,
d859e29f 1775 struct perf_cpu_context *cpuctx,
cdd6c482 1776 struct perf_event_context *ctx)
d859e29f 1777{
cdd6c482 1778 struct perf_event *event;
fa66f07a 1779 int state = group_event->state;
d859e29f 1780
cdd6c482 1781 event_sched_out(group_event, cpuctx, ctx);
d859e29f
PM
1782
1783 /*
1784 * Schedule out siblings (if any):
1785 */
cdd6c482
IM
1786 list_for_each_entry(event, &group_event->sibling_list, group_entry)
1787 event_sched_out(event, cpuctx, ctx);
d859e29f 1788
fa66f07a 1789 if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
d859e29f
PM
1790 cpuctx->exclusive = 0;
1791}
1792
45a0e07a 1793#define DETACH_GROUP 0x01UL
0017960f 1794
0793a61d 1795/*
cdd6c482 1796 * Cross CPU call to remove a performance event
0793a61d 1797 *
cdd6c482 1798 * We disable the event on the hardware level first. After that we
0793a61d
TG
1799 * remove it from the context list.
1800 */
fae3fde6
PZ
1801static void
1802__perf_remove_from_context(struct perf_event *event,
1803 struct perf_cpu_context *cpuctx,
1804 struct perf_event_context *ctx,
1805 void *info)
0793a61d 1806{
45a0e07a 1807 unsigned long flags = (unsigned long)info;
0793a61d 1808
cdd6c482 1809 event_sched_out(event, cpuctx, ctx);
45a0e07a 1810 if (flags & DETACH_GROUP)
46ce0fe9 1811 perf_group_detach(event);
cdd6c482 1812 list_del_event(event, ctx);
39a43640
PZ
1813
1814 if (!ctx->nr_events && ctx->is_active) {
64ce3126 1815 ctx->is_active = 0;
39a43640
PZ
1816 if (ctx->task) {
1817 WARN_ON_ONCE(cpuctx->task_ctx != ctx);
1818 cpuctx->task_ctx = NULL;
1819 }
64ce3126 1820 }
0793a61d
TG
1821}
1822
0793a61d 1823/*
cdd6c482 1824 * Remove the event from a task's (or a CPU's) list of events.
0793a61d 1825 *
cdd6c482
IM
1826 * If event->ctx is a cloned context, callers must make sure that
1827 * every task struct that event->ctx->task could possibly point to
c93f7669
PM
1828 * remains valid. This is OK when called from perf_release since
1829 * that only calls us on the top-level context, which can't be a clone.
cdd6c482 1830 * When called from perf_event_exit_task, it's OK because the
c93f7669 1831 * context has been detached from its task.
0793a61d 1832 */
45a0e07a 1833static void perf_remove_from_context(struct perf_event *event, unsigned long flags)
0793a61d 1834{
fae3fde6 1835 lockdep_assert_held(&event->ctx->mutex);
0793a61d 1836
45a0e07a 1837 event_function_call(event, __perf_remove_from_context, (void *)flags);
0793a61d
TG
1838}
1839
d859e29f 1840/*
cdd6c482 1841 * Cross CPU call to disable a performance event
d859e29f 1842 */
fae3fde6
PZ
1843static void __perf_event_disable(struct perf_event *event,
1844 struct perf_cpu_context *cpuctx,
1845 struct perf_event_context *ctx,
1846 void *info)
7b648018 1847{
fae3fde6
PZ
1848 if (event->state < PERF_EVENT_STATE_INACTIVE)
1849 return;
7b648018 1850
fae3fde6
PZ
1851 update_context_time(ctx);
1852 update_cgrp_time_from_event(event);
1853 update_group_times(event);
1854 if (event == event->group_leader)
1855 group_sched_out(event, cpuctx, ctx);
1856 else
1857 event_sched_out(event, cpuctx, ctx);
1858 event->state = PERF_EVENT_STATE_OFF;
7b648018
PZ
1859}
1860
d859e29f 1861/*
cdd6c482 1862 * Disable a event.
c93f7669 1863 *
cdd6c482
IM
1864 * If event->ctx is a cloned context, callers must make sure that
1865 * every task struct that event->ctx->task could possibly point to
c93f7669 1866 * remains valid. This condition is satisifed when called through
cdd6c482
IM
1867 * perf_event_for_each_child or perf_event_for_each because they
1868 * hold the top-level event's child_mutex, so any descendant that
8ba289b8
PZ
1869 * goes to exit will block in perf_event_exit_event().
1870 *
cdd6c482 1871 * When called from perf_pending_event it's OK because event->ctx
c93f7669 1872 * is the current context on this CPU and preemption is disabled,
cdd6c482 1873 * hence we can't get into perf_event_task_sched_out for this context.
d859e29f 1874 */
f63a8daa 1875static void _perf_event_disable(struct perf_event *event)
d859e29f 1876{
cdd6c482 1877 struct perf_event_context *ctx = event->ctx;
d859e29f 1878
e625cce1 1879 raw_spin_lock_irq(&ctx->lock);
7b648018 1880 if (event->state <= PERF_EVENT_STATE_OFF) {
e625cce1 1881 raw_spin_unlock_irq(&ctx->lock);
7b648018 1882 return;
53cfbf59 1883 }
e625cce1 1884 raw_spin_unlock_irq(&ctx->lock);
7b648018 1885
fae3fde6
PZ
1886 event_function_call(event, __perf_event_disable, NULL);
1887}
1888
1889void perf_event_disable_local(struct perf_event *event)
1890{
1891 event_function_local(event, __perf_event_disable, NULL);
d859e29f 1892}
f63a8daa
PZ
1893
1894/*
1895 * Strictly speaking kernel users cannot create groups and therefore this
1896 * interface does not need the perf_event_ctx_lock() magic.
1897 */
1898void perf_event_disable(struct perf_event *event)
1899{
1900 struct perf_event_context *ctx;
1901
1902 ctx = perf_event_ctx_lock(event);
1903 _perf_event_disable(event);
1904 perf_event_ctx_unlock(event, ctx);
1905}
dcfce4a0 1906EXPORT_SYMBOL_GPL(perf_event_disable);
d859e29f 1907
e5d1367f
SE
1908static void perf_set_shadow_time(struct perf_event *event,
1909 struct perf_event_context *ctx,
1910 u64 tstamp)
1911{
1912 /*
1913 * use the correct time source for the time snapshot
1914 *
1915 * We could get by without this by leveraging the
1916 * fact that to get to this function, the caller
1917 * has most likely already called update_context_time()
1918 * and update_cgrp_time_xx() and thus both timestamp
1919 * are identical (or very close). Given that tstamp is,
1920 * already adjusted for cgroup, we could say that:
1921 * tstamp - ctx->timestamp
1922 * is equivalent to
1923 * tstamp - cgrp->timestamp.
1924 *
1925 * Then, in perf_output_read(), the calculation would
1926 * work with no changes because:
1927 * - event is guaranteed scheduled in
1928 * - no scheduled out in between
1929 * - thus the timestamp would be the same
1930 *
1931 * But this is a bit hairy.
1932 *
1933 * So instead, we have an explicit cgroup call to remain
1934 * within the time time source all along. We believe it
1935 * is cleaner and simpler to understand.
1936 */
1937 if (is_cgroup_event(event))
1938 perf_cgroup_set_shadow_time(event, tstamp);
1939 else
1940 event->shadow_ctx_time = tstamp - ctx->timestamp;
1941}
1942
4fe757dd
PZ
1943#define MAX_INTERRUPTS (~0ULL)
1944
1945static void perf_log_throttle(struct perf_event *event, int enable);
ec0d7729 1946static void perf_log_itrace_start(struct perf_event *event);
4fe757dd 1947
235c7fc7 1948static int
9ffcfa6f 1949event_sched_in(struct perf_event *event,
235c7fc7 1950 struct perf_cpu_context *cpuctx,
6e37738a 1951 struct perf_event_context *ctx)
235c7fc7 1952{
4158755d 1953 u64 tstamp = perf_event_time(event);
44377277 1954 int ret = 0;
4158755d 1955
63342411
PZ
1956 lockdep_assert_held(&ctx->lock);
1957
cdd6c482 1958 if (event->state <= PERF_EVENT_STATE_OFF)
235c7fc7
IM
1959 return 0;
1960
95ff4ca2
AS
1961 WRITE_ONCE(event->oncpu, smp_processor_id());
1962 /*
1963 * Order event::oncpu write to happen before the ACTIVE state
1964 * is visible.
1965 */
1966 smp_wmb();
1967 WRITE_ONCE(event->state, PERF_EVENT_STATE_ACTIVE);
4fe757dd
PZ
1968
1969 /*
1970 * Unthrottle events, since we scheduled we might have missed several
1971 * ticks already, also for a heavily scheduling task there is little
1972 * guarantee it'll get a tick in a timely manner.
1973 */
1974 if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
1975 perf_log_throttle(event, 1);
1976 event->hw.interrupts = 0;
1977 }
1978
235c7fc7
IM
1979 /*
1980 * The new state must be visible before we turn it on in the hardware:
1981 */
1982 smp_wmb();
1983
44377277
AS
1984 perf_pmu_disable(event->pmu);
1985
72f669c0
SL
1986 perf_set_shadow_time(event, ctx, tstamp);
1987
ec0d7729
AS
1988 perf_log_itrace_start(event);
1989
a4eaf7f1 1990 if (event->pmu->add(event, PERF_EF_START)) {
cdd6c482
IM
1991 event->state = PERF_EVENT_STATE_INACTIVE;
1992 event->oncpu = -1;
44377277
AS
1993 ret = -EAGAIN;
1994 goto out;
235c7fc7
IM
1995 }
1996
00a2916f
PZ
1997 event->tstamp_running += tstamp - event->tstamp_stopped;
1998
cdd6c482 1999 if (!is_software_event(event))
3b6f9e5c 2000 cpuctx->active_oncpu++;
2fde4f94
MR
2001 if (!ctx->nr_active++)
2002 perf_event_ctx_activate(ctx);
0f5a2601
PZ
2003 if (event->attr.freq && event->attr.sample_freq)
2004 ctx->nr_freq++;
235c7fc7 2005
cdd6c482 2006 if (event->attr.exclusive)
3b6f9e5c
PM
2007 cpuctx->exclusive = 1;
2008
44377277
AS
2009out:
2010 perf_pmu_enable(event->pmu);
2011
2012 return ret;
235c7fc7
IM
2013}
2014
6751b71e 2015static int
cdd6c482 2016group_sched_in(struct perf_event *group_event,
6751b71e 2017 struct perf_cpu_context *cpuctx,
6e37738a 2018 struct perf_event_context *ctx)
6751b71e 2019{
6bde9b6c 2020 struct perf_event *event, *partial_group = NULL;
4a234593 2021 struct pmu *pmu = ctx->pmu;
d7842da4
SE
2022 u64 now = ctx->time;
2023 bool simulate = false;
6751b71e 2024
cdd6c482 2025 if (group_event->state == PERF_EVENT_STATE_OFF)
6751b71e
PM
2026 return 0;
2027
fbbe0701 2028 pmu->start_txn(pmu, PERF_PMU_TXN_ADD);
6bde9b6c 2029
9ffcfa6f 2030 if (event_sched_in(group_event, cpuctx, ctx)) {
ad5133b7 2031 pmu->cancel_txn(pmu);
272325c4 2032 perf_mux_hrtimer_restart(cpuctx);
6751b71e 2033 return -EAGAIN;
90151c35 2034 }
6751b71e
PM
2035
2036 /*
2037 * Schedule in siblings as one group (if any):
2038 */
cdd6c482 2039 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
9ffcfa6f 2040 if (event_sched_in(event, cpuctx, ctx)) {
cdd6c482 2041 partial_group = event;
6751b71e
PM
2042 goto group_error;
2043 }
2044 }
2045
9ffcfa6f 2046 if (!pmu->commit_txn(pmu))
6e85158c 2047 return 0;
9ffcfa6f 2048
6751b71e
PM
2049group_error:
2050 /*
2051 * Groups can be scheduled in as one unit only, so undo any
2052 * partial group before returning:
d7842da4
SE
2053 * The events up to the failed event are scheduled out normally,
2054 * tstamp_stopped will be updated.
2055 *
2056 * The failed events and the remaining siblings need to have
2057 * their timings updated as if they had gone thru event_sched_in()
2058 * and event_sched_out(). This is required to get consistent timings
2059 * across the group. This also takes care of the case where the group
2060 * could never be scheduled by ensuring tstamp_stopped is set to mark
2061 * the time the event was actually stopped, such that time delta
2062 * calculation in update_event_times() is correct.
6751b71e 2063 */
cdd6c482
IM
2064 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
2065 if (event == partial_group)
d7842da4
SE
2066 simulate = true;
2067
2068 if (simulate) {
2069 event->tstamp_running += now - event->tstamp_stopped;
2070 event->tstamp_stopped = now;
2071 } else {
2072 event_sched_out(event, cpuctx, ctx);
2073 }
6751b71e 2074 }
9ffcfa6f 2075 event_sched_out(group_event, cpuctx, ctx);
6751b71e 2076
ad5133b7 2077 pmu->cancel_txn(pmu);
90151c35 2078
272325c4 2079 perf_mux_hrtimer_restart(cpuctx);
9e630205 2080
6751b71e
PM
2081 return -EAGAIN;
2082}
2083
3b6f9e5c 2084/*
cdd6c482 2085 * Work out whether we can put this event group on the CPU now.
3b6f9e5c 2086 */
cdd6c482 2087static int group_can_go_on(struct perf_event *event,
3b6f9e5c
PM
2088 struct perf_cpu_context *cpuctx,
2089 int can_add_hw)
2090{
2091 /*
cdd6c482 2092 * Groups consisting entirely of software events can always go on.
3b6f9e5c 2093 */
d6f962b5 2094 if (event->group_flags & PERF_GROUP_SOFTWARE)
3b6f9e5c
PM
2095 return 1;
2096 /*
2097 * If an exclusive group is already on, no other hardware
cdd6c482 2098 * events can go on.
3b6f9e5c
PM
2099 */
2100 if (cpuctx->exclusive)
2101 return 0;
2102 /*
2103 * If this group is exclusive and there are already
cdd6c482 2104 * events on the CPU, it can't go on.
3b6f9e5c 2105 */
cdd6c482 2106 if (event->attr.exclusive && cpuctx->active_oncpu)
3b6f9e5c
PM
2107 return 0;
2108 /*
2109 * Otherwise, try to add it if all previous groups were able
2110 * to go on.
2111 */
2112 return can_add_hw;
2113}
2114
cdd6c482
IM
2115static void add_event_to_ctx(struct perf_event *event,
2116 struct perf_event_context *ctx)
53cfbf59 2117{
4158755d
SE
2118 u64 tstamp = perf_event_time(event);
2119
cdd6c482 2120 list_add_event(event, ctx);
8a49542c 2121 perf_group_attach(event);
4158755d
SE
2122 event->tstamp_enabled = tstamp;
2123 event->tstamp_running = tstamp;
2124 event->tstamp_stopped = tstamp;
53cfbf59
PM
2125}
2126
bd2afa49
PZ
2127static void ctx_sched_out(struct perf_event_context *ctx,
2128 struct perf_cpu_context *cpuctx,
2129 enum event_type_t event_type);
2c29ef0f
PZ
2130static void
2131ctx_sched_in(struct perf_event_context *ctx,
2132 struct perf_cpu_context *cpuctx,
2133 enum event_type_t event_type,
2134 struct task_struct *task);
fe4b04fa 2135
bd2afa49
PZ
2136static void task_ctx_sched_out(struct perf_cpu_context *cpuctx,
2137 struct perf_event_context *ctx)
2138{
2139 if (!cpuctx->task_ctx)
2140 return;
2141
2142 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
2143 return;
2144
2145 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
2146}
2147
dce5855b
PZ
2148static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
2149 struct perf_event_context *ctx,
2150 struct task_struct *task)
2151{
2152 cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task);
2153 if (ctx)
2154 ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
2155 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
2156 if (ctx)
2157 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
2158}
2159
3e349507
PZ
2160static void ctx_resched(struct perf_cpu_context *cpuctx,
2161 struct perf_event_context *task_ctx)
0017960f 2162{
3e349507
PZ
2163 perf_pmu_disable(cpuctx->ctx.pmu);
2164 if (task_ctx)
2165 task_ctx_sched_out(cpuctx, task_ctx);
2166 cpu_ctx_sched_out(cpuctx, EVENT_ALL);
2167 perf_event_sched_in(cpuctx, task_ctx, current);
2168 perf_pmu_enable(cpuctx->ctx.pmu);
0017960f
PZ
2169}
2170
0793a61d 2171/*
cdd6c482 2172 * Cross CPU call to install and enable a performance event
682076ae 2173 *
a096309b
PZ
2174 * Very similar to remote_function() + event_function() but cannot assume that
2175 * things like ctx->is_active and cpuctx->task_ctx are set.
0793a61d 2176 */
fe4b04fa 2177static int __perf_install_in_context(void *info)
0793a61d 2178{
a096309b
PZ
2179 struct perf_event *event = info;
2180 struct perf_event_context *ctx = event->ctx;
108b02cf 2181 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
2c29ef0f 2182 struct perf_event_context *task_ctx = cpuctx->task_ctx;
a096309b
PZ
2183 bool activate = true;
2184 int ret = 0;
0793a61d 2185
63b6da39 2186 raw_spin_lock(&cpuctx->ctx.lock);
39a43640 2187 if (ctx->task) {
b58f6b0d
PZ
2188 raw_spin_lock(&ctx->lock);
2189 task_ctx = ctx;
a096309b
PZ
2190
2191 /* If we're on the wrong CPU, try again */
2192 if (task_cpu(ctx->task) != smp_processor_id()) {
2193 ret = -ESRCH;
63b6da39 2194 goto unlock;
a096309b 2195 }
b58f6b0d 2196
39a43640 2197 /*
a096309b
PZ
2198 * If we're on the right CPU, see if the task we target is
2199 * current, if not we don't have to activate the ctx, a future
2200 * context switch will do that for us.
39a43640 2201 */
a096309b
PZ
2202 if (ctx->task != current)
2203 activate = false;
2204 else
2205 WARN_ON_ONCE(cpuctx->task_ctx && cpuctx->task_ctx != ctx);
2206
63b6da39
PZ
2207 } else if (task_ctx) {
2208 raw_spin_lock(&task_ctx->lock);
2c29ef0f 2209 }
b58f6b0d 2210
a096309b
PZ
2211 if (activate) {
2212 ctx_sched_out(ctx, cpuctx, EVENT_TIME);
2213 add_event_to_ctx(event, ctx);
2214 ctx_resched(cpuctx, task_ctx);
2215 } else {
2216 add_event_to_ctx(event, ctx);
2217 }
2218
63b6da39 2219unlock:
2c29ef0f 2220 perf_ctx_unlock(cpuctx, task_ctx);
fe4b04fa 2221
a096309b 2222 return ret;
0793a61d
TG
2223}
2224
2225/*
a096309b
PZ
2226 * Attach a performance event to a context.
2227 *
2228 * Very similar to event_function_call, see comment there.
0793a61d
TG
2229 */
2230static void
cdd6c482
IM
2231perf_install_in_context(struct perf_event_context *ctx,
2232 struct perf_event *event,
0793a61d
TG
2233 int cpu)
2234{
a096309b 2235 struct task_struct *task = READ_ONCE(ctx->task);
39a43640 2236
fe4b04fa
PZ
2237 lockdep_assert_held(&ctx->mutex);
2238
c3f00c70 2239 event->ctx = ctx;
0cda4c02
YZ
2240 if (event->cpu != -1)
2241 event->cpu = cpu;
c3f00c70 2242
a096309b
PZ
2243 if (!task) {
2244 cpu_function_call(cpu, __perf_install_in_context, event);
2245 return;
2246 }
2247
2248 /*
2249 * Should not happen, we validate the ctx is still alive before calling.
2250 */
2251 if (WARN_ON_ONCE(task == TASK_TOMBSTONE))
2252 return;
2253
39a43640
PZ
2254 /*
2255 * Installing events is tricky because we cannot rely on ctx->is_active
2256 * to be set in case this is the nr_events 0 -> 1 transition.
39a43640 2257 */
a096309b 2258again:
63b6da39 2259 /*
a096309b
PZ
2260 * Cannot use task_function_call() because we need to run on the task's
2261 * CPU regardless of whether its current or not.
63b6da39 2262 */
a096309b
PZ
2263 if (!cpu_function_call(task_cpu(task), __perf_install_in_context, event))
2264 return;
2265
2266 raw_spin_lock_irq(&ctx->lock);
2267 task = ctx->task;
84c4e620 2268 if (WARN_ON_ONCE(task == TASK_TOMBSTONE)) {
a096309b
PZ
2269 /*
2270 * Cannot happen because we already checked above (which also
2271 * cannot happen), and we hold ctx->mutex, which serializes us
2272 * against perf_event_exit_task_context().
2273 */
63b6da39
PZ
2274 raw_spin_unlock_irq(&ctx->lock);
2275 return;
2276 }
39a43640 2277 raw_spin_unlock_irq(&ctx->lock);
39a43640 2278 /*
a096309b
PZ
2279 * Since !ctx->is_active doesn't mean anything, we must IPI
2280 * unconditionally.
39a43640 2281 */
a096309b 2282 goto again;
0793a61d
TG
2283}
2284
fa289bec 2285/*
cdd6c482 2286 * Put a event into inactive state and update time fields.
fa289bec
PM
2287 * Enabling the leader of a group effectively enables all
2288 * the group members that aren't explicitly disabled, so we
2289 * have to update their ->tstamp_enabled also.
2290 * Note: this works for group members as well as group leaders
2291 * since the non-leader members' sibling_lists will be empty.
2292 */
1d9b482e 2293static void __perf_event_mark_enabled(struct perf_event *event)
fa289bec 2294{
cdd6c482 2295 struct perf_event *sub;
4158755d 2296 u64 tstamp = perf_event_time(event);
fa289bec 2297
cdd6c482 2298 event->state = PERF_EVENT_STATE_INACTIVE;
4158755d 2299 event->tstamp_enabled = tstamp - event->total_time_enabled;
9ed6060d 2300 list_for_each_entry(sub, &event->sibling_list, group_entry) {
4158755d
SE
2301 if (sub->state >= PERF_EVENT_STATE_INACTIVE)
2302 sub->tstamp_enabled = tstamp - sub->total_time_enabled;
9ed6060d 2303 }
fa289bec
PM
2304}
2305
d859e29f 2306/*
cdd6c482 2307 * Cross CPU call to enable a performance event
d859e29f 2308 */
fae3fde6
PZ
2309static void __perf_event_enable(struct perf_event *event,
2310 struct perf_cpu_context *cpuctx,
2311 struct perf_event_context *ctx,
2312 void *info)
04289bb9 2313{
cdd6c482 2314 struct perf_event *leader = event->group_leader;
fae3fde6 2315 struct perf_event_context *task_ctx;
04289bb9 2316
6e801e01
PZ
2317 if (event->state >= PERF_EVENT_STATE_INACTIVE ||
2318 event->state <= PERF_EVENT_STATE_ERROR)
fae3fde6 2319 return;
3cbed429 2320
bd2afa49
PZ
2321 if (ctx->is_active)
2322 ctx_sched_out(ctx, cpuctx, EVENT_TIME);
2323
1d9b482e 2324 __perf_event_mark_enabled(event);
04289bb9 2325
fae3fde6
PZ
2326 if (!ctx->is_active)
2327 return;
2328
e5d1367f 2329 if (!event_filter_match(event)) {
bd2afa49 2330 if (is_cgroup_event(event))
e5d1367f 2331 perf_cgroup_defer_enabled(event);
bd2afa49 2332 ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
fae3fde6 2333 return;
e5d1367f 2334 }
f4c4176f 2335
04289bb9 2336 /*
cdd6c482 2337 * If the event is in a group and isn't the group leader,
d859e29f 2338 * then don't put it on unless the group is on.
04289bb9 2339 */
bd2afa49
PZ
2340 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) {
2341 ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
fae3fde6 2342 return;
bd2afa49 2343 }
fe4b04fa 2344
fae3fde6
PZ
2345 task_ctx = cpuctx->task_ctx;
2346 if (ctx->task)
2347 WARN_ON_ONCE(task_ctx != ctx);
d859e29f 2348
fae3fde6 2349 ctx_resched(cpuctx, task_ctx);
7b648018
PZ
2350}
2351
d859e29f 2352/*
cdd6c482 2353 * Enable a event.
c93f7669 2354 *
cdd6c482
IM
2355 * If event->ctx is a cloned context, callers must make sure that
2356 * every task struct that event->ctx->task could possibly point to
c93f7669 2357 * remains valid. This condition is satisfied when called through
cdd6c482
IM
2358 * perf_event_for_each_child or perf_event_for_each as described
2359 * for perf_event_disable.
d859e29f 2360 */
f63a8daa 2361static void _perf_event_enable(struct perf_event *event)
d859e29f 2362{
cdd6c482 2363 struct perf_event_context *ctx = event->ctx;
d859e29f 2364
7b648018 2365 raw_spin_lock_irq(&ctx->lock);
6e801e01
PZ
2366 if (event->state >= PERF_EVENT_STATE_INACTIVE ||
2367 event->state < PERF_EVENT_STATE_ERROR) {
7b648018 2368 raw_spin_unlock_irq(&ctx->lock);
d859e29f
PM
2369 return;
2370 }
2371
d859e29f 2372 /*
cdd6c482 2373 * If the event is in error state, clear that first.
7b648018
PZ
2374 *
2375 * That way, if we see the event in error state below, we know that it
2376 * has gone back into error state, as distinct from the task having
2377 * been scheduled away before the cross-call arrived.
d859e29f 2378 */
cdd6c482
IM
2379 if (event->state == PERF_EVENT_STATE_ERROR)
2380 event->state = PERF_EVENT_STATE_OFF;
e625cce1 2381 raw_spin_unlock_irq(&ctx->lock);
fe4b04fa 2382
fae3fde6 2383 event_function_call(event, __perf_event_enable, NULL);
d859e29f 2384}
f63a8daa
PZ
2385
2386/*
2387 * See perf_event_disable();
2388 */
2389void perf_event_enable(struct perf_event *event)
2390{
2391 struct perf_event_context *ctx;
2392
2393 ctx = perf_event_ctx_lock(event);
2394 _perf_event_enable(event);
2395 perf_event_ctx_unlock(event, ctx);
2396}
dcfce4a0 2397EXPORT_SYMBOL_GPL(perf_event_enable);
d859e29f 2398
375637bc
AS
2399struct stop_event_data {
2400 struct perf_event *event;
2401 unsigned int restart;
2402};
2403
95ff4ca2
AS
2404static int __perf_event_stop(void *info)
2405{
375637bc
AS
2406 struct stop_event_data *sd = info;
2407 struct perf_event *event = sd->event;
95ff4ca2 2408
375637bc 2409 /* if it's already INACTIVE, do nothing */
95ff4ca2
AS
2410 if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE)
2411 return 0;
2412
2413 /* matches smp_wmb() in event_sched_in() */
2414 smp_rmb();
2415
2416 /*
2417 * There is a window with interrupts enabled before we get here,
2418 * so we need to check again lest we try to stop another CPU's event.
2419 */
2420 if (READ_ONCE(event->oncpu) != smp_processor_id())
2421 return -EAGAIN;
2422
2423 event->pmu->stop(event, PERF_EF_UPDATE);
2424
375637bc
AS
2425 /*
2426 * May race with the actual stop (through perf_pmu_output_stop()),
2427 * but it is only used for events with AUX ring buffer, and such
2428 * events will refuse to restart because of rb::aux_mmap_count==0,
2429 * see comments in perf_aux_output_begin().
2430 *
2431 * Since this is happening on a event-local CPU, no trace is lost
2432 * while restarting.
2433 */
2434 if (sd->restart)
2435 event->pmu->start(event, PERF_EF_START);
2436
95ff4ca2
AS
2437 return 0;
2438}
2439
375637bc
AS
2440static int perf_event_restart(struct perf_event *event)
2441{
2442 struct stop_event_data sd = {
2443 .event = event,
2444 .restart = 1,
2445 };
2446 int ret = 0;
2447
2448 do {
2449 if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE)
2450 return 0;
2451
2452 /* matches smp_wmb() in event_sched_in() */
2453 smp_rmb();
2454
2455 /*
2456 * We only want to restart ACTIVE events, so if the event goes
2457 * inactive here (event->oncpu==-1), there's nothing more to do;
2458 * fall through with ret==-ENXIO.
2459 */
2460 ret = cpu_function_call(READ_ONCE(event->oncpu),
2461 __perf_event_stop, &sd);
2462 } while (ret == -EAGAIN);
2463
2464 return ret;
2465}
2466
2467/*
2468 * In order to contain the amount of racy and tricky in the address filter
2469 * configuration management, it is a two part process:
2470 *
2471 * (p1) when userspace mappings change as a result of (1) or (2) or (3) below,
2472 * we update the addresses of corresponding vmas in
2473 * event::addr_filters_offs array and bump the event::addr_filters_gen;
2474 * (p2) when an event is scheduled in (pmu::add), it calls
2475 * perf_event_addr_filters_sync() which calls pmu::addr_filters_sync()
2476 * if the generation has changed since the previous call.
2477 *
2478 * If (p1) happens while the event is active, we restart it to force (p2).
2479 *
2480 * (1) perf_addr_filters_apply(): adjusting filters' offsets based on
2481 * pre-existing mappings, called once when new filters arrive via SET_FILTER
2482 * ioctl;
2483 * (2) perf_addr_filters_adjust(): adjusting filters' offsets based on newly
2484 * registered mapping, called for every new mmap(), with mm::mmap_sem down
2485 * for reading;
2486 * (3) perf_event_addr_filters_exec(): clearing filters' offsets in the process
2487 * of exec.
2488 */
2489void perf_event_addr_filters_sync(struct perf_event *event)
2490{
2491 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
2492
2493 if (!has_addr_filter(event))
2494 return;
2495
2496 raw_spin_lock(&ifh->lock);
2497 if (event->addr_filters_gen != event->hw.addr_filters_gen) {
2498 event->pmu->addr_filters_sync(event);
2499 event->hw.addr_filters_gen = event->addr_filters_gen;
2500 }
2501 raw_spin_unlock(&ifh->lock);
2502}
2503EXPORT_SYMBOL_GPL(perf_event_addr_filters_sync);
2504
f63a8daa 2505static int _perf_event_refresh(struct perf_event *event, int refresh)
79f14641 2506{
2023b359 2507 /*
cdd6c482 2508 * not supported on inherited events
2023b359 2509 */
2e939d1d 2510 if (event->attr.inherit || !is_sampling_event(event))
2023b359
PZ
2511 return -EINVAL;
2512
cdd6c482 2513 atomic_add(refresh, &event->event_limit);
f63a8daa 2514 _perf_event_enable(event);
2023b359
PZ
2515
2516 return 0;
79f14641 2517}
f63a8daa
PZ
2518
2519/*
2520 * See perf_event_disable()
2521 */
2522int perf_event_refresh(struct perf_event *event, int refresh)
2523{
2524 struct perf_event_context *ctx;
2525 int ret;
2526
2527 ctx = perf_event_ctx_lock(event);
2528 ret = _perf_event_refresh(event, refresh);
2529 perf_event_ctx_unlock(event, ctx);
2530
2531 return ret;
2532}
26ca5c11 2533EXPORT_SYMBOL_GPL(perf_event_refresh);
79f14641 2534
5b0311e1
FW
2535static void ctx_sched_out(struct perf_event_context *ctx,
2536 struct perf_cpu_context *cpuctx,
2537 enum event_type_t event_type)
235c7fc7 2538{
db24d33e 2539 int is_active = ctx->is_active;
c994d613 2540 struct perf_event *event;
235c7fc7 2541
c994d613 2542 lockdep_assert_held(&ctx->lock);
235c7fc7 2543
39a43640
PZ
2544 if (likely(!ctx->nr_events)) {
2545 /*
2546 * See __perf_remove_from_context().
2547 */
2548 WARN_ON_ONCE(ctx->is_active);
2549 if (ctx->task)
2550 WARN_ON_ONCE(cpuctx->task_ctx);
facc4307 2551 return;
39a43640
PZ
2552 }
2553
db24d33e 2554 ctx->is_active &= ~event_type;
3cbaa590
PZ
2555 if (!(ctx->is_active & EVENT_ALL))
2556 ctx->is_active = 0;
2557
63e30d3e
PZ
2558 if (ctx->task) {
2559 WARN_ON_ONCE(cpuctx->task_ctx != ctx);
2560 if (!ctx->is_active)
2561 cpuctx->task_ctx = NULL;
2562 }
facc4307 2563
8fdc6539
PZ
2564 /*
2565 * Always update time if it was set; not only when it changes.
2566 * Otherwise we can 'forget' to update time for any but the last
2567 * context we sched out. For example:
2568 *
2569 * ctx_sched_out(.event_type = EVENT_FLEXIBLE)
2570 * ctx_sched_out(.event_type = EVENT_PINNED)
2571 *
2572 * would only update time for the pinned events.
2573 */
3cbaa590
PZ
2574 if (is_active & EVENT_TIME) {
2575 /* update (and stop) ctx time */
2576 update_context_time(ctx);
2577 update_cgrp_time_from_cpuctx(cpuctx);
2578 }
2579
8fdc6539
PZ
2580 is_active ^= ctx->is_active; /* changed bits */
2581
3cbaa590 2582 if (!ctx->nr_active || !(is_active & EVENT_ALL))
facc4307 2583 return;
5b0311e1 2584
075e0b00 2585 perf_pmu_disable(ctx->pmu);
3cbaa590 2586 if (is_active & EVENT_PINNED) {
889ff015
FW
2587 list_for_each_entry(event, &ctx->pinned_groups, group_entry)
2588 group_sched_out(event, cpuctx, ctx);
9ed6060d 2589 }
889ff015 2590
3cbaa590 2591 if (is_active & EVENT_FLEXIBLE) {
889ff015 2592 list_for_each_entry(event, &ctx->flexible_groups, group_entry)
8c9ed8e1 2593 group_sched_out(event, cpuctx, ctx);
9ed6060d 2594 }
1b9a644f 2595 perf_pmu_enable(ctx->pmu);
235c7fc7
IM
2596}
2597
564c2b21 2598/*
5a3126d4
PZ
2599 * Test whether two contexts are equivalent, i.e. whether they have both been
2600 * cloned from the same version of the same context.
2601 *
2602 * Equivalence is measured using a generation number in the context that is
2603 * incremented on each modification to it; see unclone_ctx(), list_add_event()
2604 * and list_del_event().
564c2b21 2605 */
cdd6c482
IM
2606static int context_equiv(struct perf_event_context *ctx1,
2607 struct perf_event_context *ctx2)
564c2b21 2608{
211de6eb
PZ
2609 lockdep_assert_held(&ctx1->lock);
2610 lockdep_assert_held(&ctx2->lock);
2611
5a3126d4
PZ
2612 /* Pinning disables the swap optimization */
2613 if (ctx1->pin_count || ctx2->pin_count)
2614 return 0;
2615
2616 /* If ctx1 is the parent of ctx2 */
2617 if (ctx1 == ctx2->parent_ctx && ctx1->generation == ctx2->parent_gen)
2618 return 1;
2619
2620 /* If ctx2 is the parent of ctx1 */
2621 if (ctx1->parent_ctx == ctx2 && ctx1->parent_gen == ctx2->generation)
2622 return 1;
2623
2624 /*
2625 * If ctx1 and ctx2 have the same parent; we flatten the parent
2626 * hierarchy, see perf_event_init_context().
2627 */
2628 if (ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx &&
2629 ctx1->parent_gen == ctx2->parent_gen)
2630 return 1;
2631
2632 /* Unmatched */
2633 return 0;
564c2b21
PM
2634}
2635
cdd6c482
IM
2636static void __perf_event_sync_stat(struct perf_event *event,
2637 struct perf_event *next_event)
bfbd3381
PZ
2638{
2639 u64 value;
2640
cdd6c482 2641 if (!event->attr.inherit_stat)
bfbd3381
PZ
2642 return;
2643
2644 /*
cdd6c482 2645 * Update the event value, we cannot use perf_event_read()
bfbd3381
PZ
2646 * because we're in the middle of a context switch and have IRQs
2647 * disabled, which upsets smp_call_function_single(), however
cdd6c482 2648 * we know the event must be on the current CPU, therefore we
bfbd3381
PZ
2649 * don't need to use it.
2650 */
cdd6c482
IM
2651 switch (event->state) {
2652 case PERF_EVENT_STATE_ACTIVE:
3dbebf15
PZ
2653 event->pmu->read(event);
2654 /* fall-through */
bfbd3381 2655
cdd6c482
IM
2656 case PERF_EVENT_STATE_INACTIVE:
2657 update_event_times(event);
bfbd3381
PZ
2658 break;
2659
2660 default:
2661 break;
2662 }
2663
2664 /*
cdd6c482 2665 * In order to keep per-task stats reliable we need to flip the event
bfbd3381
PZ
2666 * values when we flip the contexts.
2667 */
e7850595
PZ
2668 value = local64_read(&next_event->count);
2669 value = local64_xchg(&event->count, value);
2670 local64_set(&next_event->count, value);
bfbd3381 2671
cdd6c482
IM
2672 swap(event->total_time_enabled, next_event->total_time_enabled);
2673 swap(event->total_time_running, next_event->total_time_running);
19d2e755 2674
bfbd3381 2675 /*
19d2e755 2676 * Since we swizzled the values, update the user visible data too.
bfbd3381 2677 */
cdd6c482
IM
2678 perf_event_update_userpage(event);
2679 perf_event_update_userpage(next_event);
bfbd3381
PZ
2680}
2681
cdd6c482
IM
2682static void perf_event_sync_stat(struct perf_event_context *ctx,
2683 struct perf_event_context *next_ctx)
bfbd3381 2684{
cdd6c482 2685 struct perf_event *event, *next_event;
bfbd3381
PZ
2686
2687 if (!ctx->nr_stat)
2688 return;
2689
02ffdbc8
PZ
2690 update_context_time(ctx);
2691
cdd6c482
IM
2692 event = list_first_entry(&ctx->event_list,
2693 struct perf_event, event_entry);
bfbd3381 2694
cdd6c482
IM
2695 next_event = list_first_entry(&next_ctx->event_list,
2696 struct perf_event, event_entry);
bfbd3381 2697
cdd6c482
IM
2698 while (&event->event_entry != &ctx->event_list &&
2699 &next_event->event_entry != &next_ctx->event_list) {
bfbd3381 2700
cdd6c482 2701 __perf_event_sync_stat(event, next_event);
bfbd3381 2702
cdd6c482
IM
2703 event = list_next_entry(event, event_entry);
2704 next_event = list_next_entry(next_event, event_entry);
bfbd3381
PZ
2705 }
2706}
2707
fe4b04fa
PZ
2708static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
2709 struct task_struct *next)
0793a61d 2710{
8dc85d54 2711 struct perf_event_context *ctx = task->perf_event_ctxp[ctxn];
cdd6c482 2712 struct perf_event_context *next_ctx;
5a3126d4 2713 struct perf_event_context *parent, *next_parent;
108b02cf 2714 struct perf_cpu_context *cpuctx;
c93f7669 2715 int do_switch = 1;
0793a61d 2716
108b02cf
PZ
2717 if (likely(!ctx))
2718 return;
10989fb2 2719
108b02cf
PZ
2720 cpuctx = __get_cpu_context(ctx);
2721 if (!cpuctx->task_ctx)
0793a61d
TG
2722 return;
2723
c93f7669 2724 rcu_read_lock();
8dc85d54 2725 next_ctx = next->perf_event_ctxp[ctxn];
5a3126d4
PZ
2726 if (!next_ctx)
2727 goto unlock;
2728
2729 parent = rcu_dereference(ctx->parent_ctx);
2730 next_parent = rcu_dereference(next_ctx->parent_ctx);
2731
2732 /* If neither context have a parent context; they cannot be clones. */
802c8a61 2733 if (!parent && !next_parent)
5a3126d4
PZ
2734 goto unlock;
2735
2736 if (next_parent == ctx || next_ctx == parent || next_parent == parent) {
c93f7669
PM
2737 /*
2738 * Looks like the two contexts are clones, so we might be
2739 * able to optimize the context switch. We lock both
2740 * contexts and check that they are clones under the
2741 * lock (including re-checking that neither has been
2742 * uncloned in the meantime). It doesn't matter which
2743 * order we take the locks because no other cpu could
2744 * be trying to lock both of these tasks.
2745 */
e625cce1
TG
2746 raw_spin_lock(&ctx->lock);
2747 raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
c93f7669 2748 if (context_equiv(ctx, next_ctx)) {
63b6da39
PZ
2749 WRITE_ONCE(ctx->task, next);
2750 WRITE_ONCE(next_ctx->task, task);
5a158c3c
YZ
2751
2752 swap(ctx->task_ctx_data, next_ctx->task_ctx_data);
2753
63b6da39
PZ
2754 /*
2755 * RCU_INIT_POINTER here is safe because we've not
2756 * modified the ctx and the above modification of
2757 * ctx->task and ctx->task_ctx_data are immaterial
2758 * since those values are always verified under
2759 * ctx->lock which we're now holding.
2760 */
2761 RCU_INIT_POINTER(task->perf_event_ctxp[ctxn], next_ctx);
2762 RCU_INIT_POINTER(next->perf_event_ctxp[ctxn], ctx);
2763
c93f7669 2764 do_switch = 0;
bfbd3381 2765
cdd6c482 2766 perf_event_sync_stat(ctx, next_ctx);
c93f7669 2767 }
e625cce1
TG
2768 raw_spin_unlock(&next_ctx->lock);
2769 raw_spin_unlock(&ctx->lock);
564c2b21 2770 }
5a3126d4 2771unlock:
c93f7669 2772 rcu_read_unlock();
564c2b21 2773
c93f7669 2774 if (do_switch) {
facc4307 2775 raw_spin_lock(&ctx->lock);
8833d0e2 2776 task_ctx_sched_out(cpuctx, ctx);
facc4307 2777 raw_spin_unlock(&ctx->lock);
c93f7669 2778 }
0793a61d
TG
2779}
2780
ba532500
YZ
2781void perf_sched_cb_dec(struct pmu *pmu)
2782{
2783 this_cpu_dec(perf_sched_cb_usages);
2784}
2785
2786void perf_sched_cb_inc(struct pmu *pmu)
2787{
2788 this_cpu_inc(perf_sched_cb_usages);
2789}
2790
2791/*
2792 * This function provides the context switch callback to the lower code
2793 * layer. It is invoked ONLY when the context switch callback is enabled.
2794 */
2795static void perf_pmu_sched_task(struct task_struct *prev,
2796 struct task_struct *next,
2797 bool sched_in)
2798{
2799 struct perf_cpu_context *cpuctx;
2800 struct pmu *pmu;
2801 unsigned long flags;
2802
2803 if (prev == next)
2804 return;
2805
2806 local_irq_save(flags);
2807
2808 rcu_read_lock();
2809
2810 list_for_each_entry_rcu(pmu, &pmus, entry) {
2811 if (pmu->sched_task) {
2812 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
2813
2814 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
2815
2816 perf_pmu_disable(pmu);
2817
2818 pmu->sched_task(cpuctx->task_ctx, sched_in);
2819
2820 perf_pmu_enable(pmu);
2821
2822 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
2823 }
2824 }
2825
2826 rcu_read_unlock();
2827
2828 local_irq_restore(flags);
2829}
2830
45ac1403
AH
2831static void perf_event_switch(struct task_struct *task,
2832 struct task_struct *next_prev, bool sched_in);
2833
8dc85d54
PZ
2834#define for_each_task_context_nr(ctxn) \
2835 for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++)
2836
2837/*
2838 * Called from scheduler to remove the events of the current task,
2839 * with interrupts disabled.
2840 *
2841 * We stop each event and update the event value in event->count.
2842 *
2843 * This does not protect us against NMI, but disable()
2844 * sets the disabled bit in the control field of event _before_
2845 * accessing the event control register. If a NMI hits, then it will
2846 * not restart the event.
2847 */
ab0cce56
JO
2848void __perf_event_task_sched_out(struct task_struct *task,
2849 struct task_struct *next)
8dc85d54
PZ
2850{
2851 int ctxn;
2852
ba532500
YZ
2853 if (__this_cpu_read(perf_sched_cb_usages))
2854 perf_pmu_sched_task(task, next, false);
2855
45ac1403
AH
2856 if (atomic_read(&nr_switch_events))
2857 perf_event_switch(task, next, false);
2858
8dc85d54
PZ
2859 for_each_task_context_nr(ctxn)
2860 perf_event_context_sched_out(task, ctxn, next);
e5d1367f
SE
2861
2862 /*
2863 * if cgroup events exist on this CPU, then we need
2864 * to check if we have to switch out PMU state.
2865 * cgroup event are system-wide mode only
2866 */
4a32fea9 2867 if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
a8d757ef 2868 perf_cgroup_sched_out(task, next);
8dc85d54
PZ
2869}
2870
5b0311e1
FW
2871/*
2872 * Called with IRQs disabled
2873 */
2874static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
2875 enum event_type_t event_type)
2876{
2877 ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
04289bb9
IM
2878}
2879
235c7fc7 2880static void
5b0311e1 2881ctx_pinned_sched_in(struct perf_event_context *ctx,
6e37738a 2882 struct perf_cpu_context *cpuctx)
0793a61d 2883{
cdd6c482 2884 struct perf_event *event;
0793a61d 2885
889ff015
FW
2886 list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
2887 if (event->state <= PERF_EVENT_STATE_OFF)
3b6f9e5c 2888 continue;
5632ab12 2889 if (!event_filter_match(event))
3b6f9e5c
PM
2890 continue;
2891
e5d1367f
SE
2892 /* may need to reset tstamp_enabled */
2893 if (is_cgroup_event(event))
2894 perf_cgroup_mark_enabled(event, ctx);
2895
8c9ed8e1 2896 if (group_can_go_on(event, cpuctx, 1))
6e37738a 2897 group_sched_in(event, cpuctx, ctx);
3b6f9e5c
PM
2898
2899 /*
2900 * If this pinned group hasn't been scheduled,
2901 * put it in error state.
2902 */
cdd6c482
IM
2903 if (event->state == PERF_EVENT_STATE_INACTIVE) {
2904 update_group_times(event);
2905 event->state = PERF_EVENT_STATE_ERROR;
53cfbf59 2906 }
3b6f9e5c 2907 }
5b0311e1
FW
2908}
2909
2910static void
2911ctx_flexible_sched_in(struct perf_event_context *ctx,
6e37738a 2912 struct perf_cpu_context *cpuctx)
5b0311e1
FW
2913{
2914 struct perf_event *event;
2915 int can_add_hw = 1;
3b6f9e5c 2916
889ff015
FW
2917 list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
2918 /* Ignore events in OFF or ERROR state */
2919 if (event->state <= PERF_EVENT_STATE_OFF)
3b6f9e5c 2920 continue;
04289bb9
IM
2921 /*
2922 * Listen to the 'cpu' scheduling filter constraint
cdd6c482 2923 * of events:
04289bb9 2924 */
5632ab12 2925 if (!event_filter_match(event))
0793a61d
TG
2926 continue;
2927
e5d1367f
SE
2928 /* may need to reset tstamp_enabled */
2929 if (is_cgroup_event(event))
2930 perf_cgroup_mark_enabled(event, ctx);
2931
9ed6060d 2932 if (group_can_go_on(event, cpuctx, can_add_hw)) {
6e37738a 2933 if (group_sched_in(event, cpuctx, ctx))
dd0e6ba2 2934 can_add_hw = 0;
9ed6060d 2935 }
0793a61d 2936 }
5b0311e1
FW
2937}
2938
2939static void
2940ctx_sched_in(struct perf_event_context *ctx,
2941 struct perf_cpu_context *cpuctx,
e5d1367f
SE
2942 enum event_type_t event_type,
2943 struct task_struct *task)
5b0311e1 2944{
db24d33e 2945 int is_active = ctx->is_active;
c994d613
PZ
2946 u64 now;
2947
2948 lockdep_assert_held(&ctx->lock);
e5d1367f 2949
5b0311e1 2950 if (likely(!ctx->nr_events))
facc4307 2951 return;
5b0311e1 2952
3cbaa590 2953 ctx->is_active |= (event_type | EVENT_TIME);
63e30d3e
PZ
2954 if (ctx->task) {
2955 if (!is_active)
2956 cpuctx->task_ctx = ctx;
2957 else
2958 WARN_ON_ONCE(cpuctx->task_ctx != ctx);
2959 }
2960
3cbaa590
PZ
2961 is_active ^= ctx->is_active; /* changed bits */
2962
2963 if (is_active & EVENT_TIME) {
2964 /* start ctx time */
2965 now = perf_clock();
2966 ctx->timestamp = now;
2967 perf_cgroup_set_timestamp(task, ctx);
2968 }
2969
5b0311e1
FW
2970 /*
2971 * First go through the list and put on any pinned groups
2972 * in order to give them the best chance of going on.
2973 */
3cbaa590 2974 if (is_active & EVENT_PINNED)
6e37738a 2975 ctx_pinned_sched_in(ctx, cpuctx);
5b0311e1
FW
2976
2977 /* Then walk through the lower prio flexible groups */
3cbaa590 2978 if (is_active & EVENT_FLEXIBLE)
6e37738a 2979 ctx_flexible_sched_in(ctx, cpuctx);
235c7fc7
IM
2980}
2981
329c0e01 2982static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
e5d1367f
SE
2983 enum event_type_t event_type,
2984 struct task_struct *task)
329c0e01
FW
2985{
2986 struct perf_event_context *ctx = &cpuctx->ctx;
2987
e5d1367f 2988 ctx_sched_in(ctx, cpuctx, event_type, task);
329c0e01
FW
2989}
2990
e5d1367f
SE
2991static void perf_event_context_sched_in(struct perf_event_context *ctx,
2992 struct task_struct *task)
235c7fc7 2993{
108b02cf 2994 struct perf_cpu_context *cpuctx;
235c7fc7 2995
108b02cf 2996 cpuctx = __get_cpu_context(ctx);
329c0e01
FW
2997 if (cpuctx->task_ctx == ctx)
2998 return;
2999
facc4307 3000 perf_ctx_lock(cpuctx, ctx);
1b9a644f 3001 perf_pmu_disable(ctx->pmu);
329c0e01
FW
3002 /*
3003 * We want to keep the following priority order:
3004 * cpu pinned (that don't need to move), task pinned,
3005 * cpu flexible, task flexible.
3006 */
3007 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
63e30d3e 3008 perf_event_sched_in(cpuctx, ctx, task);
facc4307
PZ
3009 perf_pmu_enable(ctx->pmu);
3010 perf_ctx_unlock(cpuctx, ctx);
235c7fc7
IM
3011}
3012
8dc85d54
PZ
3013/*
3014 * Called from scheduler to add the events of the current task
3015 * with interrupts disabled.
3016 *
3017 * We restore the event value and then enable it.
3018 *
3019 * This does not protect us against NMI, but enable()
3020 * sets the enabled bit in the control field of event _before_
3021 * accessing the event control register. If a NMI hits, then it will
3022 * keep the event running.
3023 */
ab0cce56
JO
3024void __perf_event_task_sched_in(struct task_struct *prev,
3025 struct task_struct *task)
8dc85d54
PZ
3026{
3027 struct perf_event_context *ctx;
3028 int ctxn;
3029
7e41d177
PZ
3030 /*
3031 * If cgroup events exist on this CPU, then we need to check if we have
3032 * to switch in PMU state; cgroup event are system-wide mode only.
3033 *
3034 * Since cgroup events are CPU events, we must schedule these in before
3035 * we schedule in the task events.
3036 */
3037 if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
3038 perf_cgroup_sched_in(prev, task);
3039
8dc85d54
PZ
3040 for_each_task_context_nr(ctxn) {
3041 ctx = task->perf_event_ctxp[ctxn];
3042 if (likely(!ctx))
3043 continue;
3044
e5d1367f 3045 perf_event_context_sched_in(ctx, task);
8dc85d54 3046 }
d010b332 3047
45ac1403
AH
3048 if (atomic_read(&nr_switch_events))
3049 perf_event_switch(task, prev, true);
3050
ba532500
YZ
3051 if (__this_cpu_read(perf_sched_cb_usages))
3052 perf_pmu_sched_task(prev, task, true);
235c7fc7
IM
3053}
3054
abd50713
PZ
3055static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
3056{
3057 u64 frequency = event->attr.sample_freq;
3058 u64 sec = NSEC_PER_SEC;
3059 u64 divisor, dividend;
3060
3061 int count_fls, nsec_fls, frequency_fls, sec_fls;
3062
3063 count_fls = fls64(count);
3064 nsec_fls = fls64(nsec);
3065 frequency_fls = fls64(frequency);
3066 sec_fls = 30;
3067
3068 /*
3069 * We got @count in @nsec, with a target of sample_freq HZ
3070 * the target period becomes:
3071 *
3072 * @count * 10^9
3073 * period = -------------------
3074 * @nsec * sample_freq
3075 *
3076 */
3077
3078 /*
3079 * Reduce accuracy by one bit such that @a and @b converge
3080 * to a similar magnitude.
3081 */
fe4b04fa 3082#define REDUCE_FLS(a, b) \
abd50713
PZ
3083do { \
3084 if (a##_fls > b##_fls) { \
3085 a >>= 1; \
3086 a##_fls--; \
3087 } else { \
3088 b >>= 1; \
3089 b##_fls--; \
3090 } \
3091} while (0)
3092
3093 /*
3094 * Reduce accuracy until either term fits in a u64, then proceed with
3095 * the other, so that finally we can do a u64/u64 division.
3096 */
3097 while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
3098 REDUCE_FLS(nsec, frequency);
3099 REDUCE_FLS(sec, count);
3100 }
3101
3102 if (count_fls + sec_fls > 64) {
3103 divisor = nsec * frequency;
3104
3105 while (count_fls + sec_fls > 64) {
3106 REDUCE_FLS(count, sec);
3107 divisor >>= 1;
3108 }
3109
3110 dividend = count * sec;
3111 } else {
3112 dividend = count * sec;
3113
3114 while (nsec_fls + frequency_fls > 64) {
3115 REDUCE_FLS(nsec, frequency);
3116 dividend >>= 1;
3117 }
3118
3119 divisor = nsec * frequency;
3120 }
3121
f6ab91ad
PZ
3122 if (!divisor)
3123 return dividend;
3124
abd50713
PZ
3125 return div64_u64(dividend, divisor);
3126}
3127
e050e3f0
SE
3128static DEFINE_PER_CPU(int, perf_throttled_count);
3129static DEFINE_PER_CPU(u64, perf_throttled_seq);
3130
f39d47ff 3131static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable)
bd2b5b12 3132{
cdd6c482 3133 struct hw_perf_event *hwc = &event->hw;
f6ab91ad 3134 s64 period, sample_period;
bd2b5b12
PZ
3135 s64 delta;
3136
abd50713 3137 period = perf_calculate_period(event, nsec, count);
bd2b5b12
PZ
3138
3139 delta = (s64)(period - hwc->sample_period);
3140 delta = (delta + 7) / 8; /* low pass filter */
3141
3142 sample_period = hwc->sample_period + delta;
3143
3144 if (!sample_period)
3145 sample_period = 1;
3146
bd2b5b12 3147 hwc->sample_period = sample_period;
abd50713 3148
e7850595 3149 if (local64_read(&hwc->period_left) > 8*sample_period) {
f39d47ff
SE
3150 if (disable)
3151 event->pmu->stop(event, PERF_EF_UPDATE);
3152
e7850595 3153 local64_set(&hwc->period_left, 0);
f39d47ff
SE
3154
3155 if (disable)
3156 event->pmu->start(event, PERF_EF_RELOAD);
abd50713 3157 }
bd2b5b12
PZ
3158}
3159
e050e3f0
SE
3160/*
3161 * combine freq adjustment with unthrottling to avoid two passes over the
3162 * events. At the same time, make sure, having freq events does not change
3163 * the rate of unthrottling as that would introduce bias.
3164 */
3165static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
3166 int needs_unthr)
60db5e09 3167{
cdd6c482
IM
3168 struct perf_event *event;
3169 struct hw_perf_event *hwc;
e050e3f0 3170 u64 now, period = TICK_NSEC;
abd50713 3171 s64 delta;
60db5e09 3172
e050e3f0
SE
3173 /*
3174 * only need to iterate over all events iff:
3175 * - context have events in frequency mode (needs freq adjust)
3176 * - there are events to unthrottle on this cpu
3177 */
3178 if (!(ctx->nr_freq || needs_unthr))
0f5a2601
PZ
3179 return;
3180
e050e3f0 3181 raw_spin_lock(&ctx->lock);
f39d47ff 3182 perf_pmu_disable(ctx->pmu);
e050e3f0 3183
03541f8b 3184 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
cdd6c482 3185 if (event->state != PERF_EVENT_STATE_ACTIVE)
60db5e09
PZ
3186 continue;
3187
5632ab12 3188 if (!event_filter_match(event))
5d27c23d
PZ
3189 continue;
3190
44377277
AS
3191 perf_pmu_disable(event->pmu);
3192
cdd6c482 3193 hwc = &event->hw;
6a24ed6c 3194
ae23bff1 3195 if (hwc->interrupts == MAX_INTERRUPTS) {
e050e3f0 3196 hwc->interrupts = 0;
cdd6c482 3197 perf_log_throttle(event, 1);
a4eaf7f1 3198 event->pmu->start(event, 0);
a78ac325
PZ
3199 }
3200
cdd6c482 3201 if (!event->attr.freq || !event->attr.sample_freq)
44377277 3202 goto next;
60db5e09 3203
e050e3f0
SE
3204 /*
3205 * stop the event and update event->count
3206 */
3207 event->pmu->stop(event, PERF_EF_UPDATE);
3208
e7850595 3209 now = local64_read(&event->count);
abd50713
PZ
3210 delta = now - hwc->freq_count_stamp;
3211 hwc->freq_count_stamp = now;
60db5e09 3212
e050e3f0
SE
3213 /*
3214 * restart the event
3215 * reload only if value has changed
f39d47ff
SE
3216 * we have stopped the event so tell that
3217 * to perf_adjust_period() to avoid stopping it
3218 * twice.
e050e3f0 3219 */
abd50713 3220 if (delta > 0)
f39d47ff 3221 perf_adjust_period(event, period, delta, false);
e050e3f0
SE
3222
3223 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
44377277
AS
3224 next:
3225 perf_pmu_enable(event->pmu);
60db5e09 3226 }
e050e3f0 3227
f39d47ff 3228 perf_pmu_enable(ctx->pmu);
e050e3f0 3229 raw_spin_unlock(&ctx->lock);
60db5e09
PZ
3230}
3231
235c7fc7 3232/*
cdd6c482 3233 * Round-robin a context's events:
235c7fc7 3234 */
cdd6c482 3235static void rotate_ctx(struct perf_event_context *ctx)
0793a61d 3236{
dddd3379
TG
3237 /*
3238 * Rotate the first entry last of non-pinned groups. Rotation might be
3239 * disabled by the inheritance code.
3240 */
3241 if (!ctx->rotate_disable)
3242 list_rotate_left(&ctx->flexible_groups);
235c7fc7
IM
3243}
3244
9e630205 3245static int perf_rotate_context(struct perf_cpu_context *cpuctx)
235c7fc7 3246{
8dc85d54 3247 struct perf_event_context *ctx = NULL;
2fde4f94 3248 int rotate = 0;
7fc23a53 3249
b5ab4cd5 3250 if (cpuctx->ctx.nr_events) {
b5ab4cd5
PZ
3251 if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
3252 rotate = 1;
3253 }
235c7fc7 3254
8dc85d54 3255 ctx = cpuctx->task_ctx;
b5ab4cd5 3256 if (ctx && ctx->nr_events) {
b5ab4cd5
PZ
3257 if (ctx->nr_events != ctx->nr_active)
3258 rotate = 1;
3259 }
9717e6cd 3260
e050e3f0 3261 if (!rotate)
0f5a2601
PZ
3262 goto done;
3263
facc4307 3264 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
1b9a644f 3265 perf_pmu_disable(cpuctx->ctx.pmu);
60db5e09 3266
e050e3f0
SE
3267 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
3268 if (ctx)
3269 ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
0793a61d 3270
e050e3f0
SE
3271 rotate_ctx(&cpuctx->ctx);
3272 if (ctx)
3273 rotate_ctx(ctx);
235c7fc7 3274
e050e3f0 3275 perf_event_sched_in(cpuctx, ctx, current);
235c7fc7 3276
0f5a2601
PZ
3277 perf_pmu_enable(cpuctx->ctx.pmu);
3278 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
b5ab4cd5 3279done:
9e630205
SE
3280
3281 return rotate;
e9d2b064
PZ
3282}
3283
3284void perf_event_task_tick(void)
3285{
2fde4f94
MR
3286 struct list_head *head = this_cpu_ptr(&active_ctx_list);
3287 struct perf_event_context *ctx, *tmp;
e050e3f0 3288 int throttled;
b5ab4cd5 3289
e9d2b064
PZ
3290 WARN_ON(!irqs_disabled());
3291
e050e3f0
SE
3292 __this_cpu_inc(perf_throttled_seq);
3293 throttled = __this_cpu_xchg(perf_throttled_count, 0);
555e0c1e 3294 tick_dep_clear_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS);
e050e3f0 3295
2fde4f94 3296 list_for_each_entry_safe(ctx, tmp, head, active_ctx_list)
e050e3f0 3297 perf_adjust_freq_unthr_context(ctx, throttled);
0793a61d
TG
3298}
3299
889ff015
FW
3300static int event_enable_on_exec(struct perf_event *event,
3301 struct perf_event_context *ctx)
3302{
3303 if (!event->attr.enable_on_exec)
3304 return 0;
3305
3306 event->attr.enable_on_exec = 0;
3307 if (event->state >= PERF_EVENT_STATE_INACTIVE)
3308 return 0;
3309
1d9b482e 3310 __perf_event_mark_enabled(event);
889ff015
FW
3311
3312 return 1;
3313}
3314
57e7986e 3315/*
cdd6c482 3316 * Enable all of a task's events that have been marked enable-on-exec.
57e7986e
PM
3317 * This expects task == current.
3318 */
c1274499 3319static void perf_event_enable_on_exec(int ctxn)
57e7986e 3320{
c1274499 3321 struct perf_event_context *ctx, *clone_ctx = NULL;
3e349507 3322 struct perf_cpu_context *cpuctx;
cdd6c482 3323 struct perf_event *event;
57e7986e
PM
3324 unsigned long flags;
3325 int enabled = 0;
3326
3327 local_irq_save(flags);
c1274499 3328 ctx = current->perf_event_ctxp[ctxn];
cdd6c482 3329 if (!ctx || !ctx->nr_events)
57e7986e
PM
3330 goto out;
3331
3e349507
PZ
3332 cpuctx = __get_cpu_context(ctx);
3333 perf_ctx_lock(cpuctx, ctx);
7fce2509 3334 ctx_sched_out(ctx, cpuctx, EVENT_TIME);
3e349507
PZ
3335 list_for_each_entry(event, &ctx->event_list, event_entry)
3336 enabled |= event_enable_on_exec(event, ctx);
57e7986e
PM
3337
3338 /*
3e349507 3339 * Unclone and reschedule this context if we enabled any event.
57e7986e 3340 */
3e349507 3341 if (enabled) {
211de6eb 3342 clone_ctx = unclone_ctx(ctx);
3e349507
PZ
3343 ctx_resched(cpuctx, ctx);
3344 }
3345 perf_ctx_unlock(cpuctx, ctx);
57e7986e 3346
9ed6060d 3347out:
57e7986e 3348 local_irq_restore(flags);
211de6eb
PZ
3349
3350 if (clone_ctx)
3351 put_ctx(clone_ctx);
57e7986e
PM
3352}
3353
0492d4c5
PZ
3354struct perf_read_data {
3355 struct perf_event *event;
3356 bool group;
7d88962e 3357 int ret;
0492d4c5
PZ
3358};
3359
0793a61d 3360/*
cdd6c482 3361 * Cross CPU call to read the hardware event
0793a61d 3362 */
cdd6c482 3363static void __perf_event_read(void *info)
0793a61d 3364{
0492d4c5
PZ
3365 struct perf_read_data *data = info;
3366 struct perf_event *sub, *event = data->event;
cdd6c482 3367 struct perf_event_context *ctx = event->ctx;
108b02cf 3368 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
4a00c16e 3369 struct pmu *pmu = event->pmu;
621a01ea 3370
e1ac3614
PM
3371 /*
3372 * If this is a task context, we need to check whether it is
3373 * the current task context of this cpu. If not it has been
3374 * scheduled out before the smp call arrived. In that case
cdd6c482
IM
3375 * event->count would have been updated to a recent sample
3376 * when the event was scheduled out.
e1ac3614
PM
3377 */
3378 if (ctx->task && cpuctx->task_ctx != ctx)
3379 return;
3380
e625cce1 3381 raw_spin_lock(&ctx->lock);
e5d1367f 3382 if (ctx->is_active) {
542e72fc 3383 update_context_time(ctx);
e5d1367f
SE
3384 update_cgrp_time_from_event(event);
3385 }
0492d4c5 3386
cdd6c482 3387 update_event_times(event);
4a00c16e
SB
3388 if (event->state != PERF_EVENT_STATE_ACTIVE)
3389 goto unlock;
0492d4c5 3390
4a00c16e
SB
3391 if (!data->group) {
3392 pmu->read(event);
3393 data->ret = 0;
0492d4c5 3394 goto unlock;
4a00c16e
SB
3395 }
3396
3397 pmu->start_txn(pmu, PERF_PMU_TXN_READ);
3398
3399 pmu->read(event);
0492d4c5
PZ
3400
3401 list_for_each_entry(sub, &event->sibling_list, group_entry) {
3402 update_event_times(sub);
4a00c16e
SB
3403 if (sub->state == PERF_EVENT_STATE_ACTIVE) {
3404 /*
3405 * Use sibling's PMU rather than @event's since
3406 * sibling could be on different (eg: software) PMU.
3407 */
0492d4c5 3408 sub->pmu->read(sub);
4a00c16e 3409 }
0492d4c5 3410 }
4a00c16e
SB
3411
3412 data->ret = pmu->commit_txn(pmu);
0492d4c5
PZ
3413
3414unlock:
e625cce1 3415 raw_spin_unlock(&ctx->lock);
0793a61d
TG
3416}
3417
b5e58793
PZ
3418static inline u64 perf_event_count(struct perf_event *event)
3419{
eacd3ecc
MF
3420 if (event->pmu->count)
3421 return event->pmu->count(event);
3422
3423 return __perf_event_count(event);
b5e58793
PZ
3424}
3425
ffe8690c
KX
3426/*
3427 * NMI-safe method to read a local event, that is an event that
3428 * is:
3429 * - either for the current task, or for this CPU
3430 * - does not have inherit set, for inherited task events
3431 * will not be local and we cannot read them atomically
3432 * - must not have a pmu::count method
3433 */
3434u64 perf_event_read_local(struct perf_event *event)
3435{
3436 unsigned long flags;
3437 u64 val;
3438
3439 /*
3440 * Disabling interrupts avoids all counter scheduling (context
3441 * switches, timer based rotation and IPIs).
3442 */
3443 local_irq_save(flags);
3444
3445 /* If this is a per-task event, it must be for current */
3446 WARN_ON_ONCE((event->attach_state & PERF_ATTACH_TASK) &&
3447 event->hw.target != current);
3448
3449 /* If this is a per-CPU event, it must be for this CPU */
3450 WARN_ON_ONCE(!(event->attach_state & PERF_ATTACH_TASK) &&
3451 event->cpu != smp_processor_id());
3452
3453 /*
3454 * It must not be an event with inherit set, we cannot read
3455 * all child counters from atomic context.
3456 */
3457 WARN_ON_ONCE(event->attr.inherit);
3458
3459 /*
3460 * It must not have a pmu::count method, those are not
3461 * NMI safe.
3462 */
3463 WARN_ON_ONCE(event->pmu->count);
3464
3465 /*
3466 * If the event is currently on this CPU, its either a per-task event,
3467 * or local to this CPU. Furthermore it means its ACTIVE (otherwise
3468 * oncpu == -1).
3469 */
3470 if (event->oncpu == smp_processor_id())
3471 event->pmu->read(event);
3472
3473 val = local64_read(&event->count);
3474 local_irq_restore(flags);
3475
3476 return val;
3477}
3478
7d88962e 3479static int perf_event_read(struct perf_event *event, bool group)
0793a61d 3480{
7d88962e
SB
3481 int ret = 0;
3482
0793a61d 3483 /*
cdd6c482
IM
3484 * If event is enabled and currently active on a CPU, update the
3485 * value in the event structure:
0793a61d 3486 */
cdd6c482 3487 if (event->state == PERF_EVENT_STATE_ACTIVE) {
0492d4c5
PZ
3488 struct perf_read_data data = {
3489 .event = event,
3490 .group = group,
7d88962e 3491 .ret = 0,
0492d4c5 3492 };
cdd6c482 3493 smp_call_function_single(event->oncpu,
0492d4c5 3494 __perf_event_read, &data, 1);
7d88962e 3495 ret = data.ret;
cdd6c482 3496 } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
2b8988c9
PZ
3497 struct perf_event_context *ctx = event->ctx;
3498 unsigned long flags;
3499
e625cce1 3500 raw_spin_lock_irqsave(&ctx->lock, flags);
c530ccd9
SE
3501 /*
3502 * may read while context is not active
3503 * (e.g., thread is blocked), in that case
3504 * we cannot update context time
3505 */
e5d1367f 3506 if (ctx->is_active) {
c530ccd9 3507 update_context_time(ctx);
e5d1367f
SE
3508 update_cgrp_time_from_event(event);
3509 }
0492d4c5
PZ
3510 if (group)
3511 update_group_times(event);
3512 else
3513 update_event_times(event);
e625cce1 3514 raw_spin_unlock_irqrestore(&ctx->lock, flags);
0793a61d 3515 }
7d88962e
SB
3516
3517 return ret;
0793a61d
TG
3518}
3519
a63eaf34 3520/*
cdd6c482 3521 * Initialize the perf_event context in a task_struct:
a63eaf34 3522 */
eb184479 3523static void __perf_event_init_context(struct perf_event_context *ctx)
a63eaf34 3524{
e625cce1 3525 raw_spin_lock_init(&ctx->lock);
a63eaf34 3526 mutex_init(&ctx->mutex);
2fde4f94 3527 INIT_LIST_HEAD(&ctx->active_ctx_list);
889ff015
FW
3528 INIT_LIST_HEAD(&ctx->pinned_groups);
3529 INIT_LIST_HEAD(&ctx->flexible_groups);
a63eaf34
PM
3530 INIT_LIST_HEAD(&ctx->event_list);
3531 atomic_set(&ctx->refcount, 1);
eb184479
PZ
3532}
3533
3534static struct perf_event_context *
3535alloc_perf_context(struct pmu *pmu, struct task_struct *task)
3536{
3537 struct perf_event_context *ctx;
3538
3539 ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
3540 if (!ctx)
3541 return NULL;
3542
3543 __perf_event_init_context(ctx);
3544 if (task) {
3545 ctx->task = task;
3546 get_task_struct(task);
0793a61d 3547 }
eb184479
PZ
3548 ctx->pmu = pmu;
3549
3550 return ctx;
a63eaf34
PM
3551}
3552
2ebd4ffb
MH
3553static struct task_struct *
3554find_lively_task_by_vpid(pid_t vpid)
3555{
3556 struct task_struct *task;
0793a61d
TG
3557
3558 rcu_read_lock();
2ebd4ffb 3559 if (!vpid)
0793a61d
TG
3560 task = current;
3561 else
2ebd4ffb 3562 task = find_task_by_vpid(vpid);
0793a61d
TG
3563 if (task)
3564 get_task_struct(task);
3565 rcu_read_unlock();
3566
3567 if (!task)
3568 return ERR_PTR(-ESRCH);
3569
2ebd4ffb 3570 return task;
2ebd4ffb
MH
3571}
3572
fe4b04fa
PZ
3573/*
3574 * Returns a matching context with refcount and pincount.
3575 */
108b02cf 3576static struct perf_event_context *
4af57ef2
YZ
3577find_get_context(struct pmu *pmu, struct task_struct *task,
3578 struct perf_event *event)
0793a61d 3579{
211de6eb 3580 struct perf_event_context *ctx, *clone_ctx = NULL;
22a4f650 3581 struct perf_cpu_context *cpuctx;
4af57ef2 3582 void *task_ctx_data = NULL;
25346b93 3583 unsigned long flags;
8dc85d54 3584 int ctxn, err;
4af57ef2 3585 int cpu = event->cpu;
0793a61d 3586
22a4ec72 3587 if (!task) {
cdd6c482 3588 /* Must be root to operate on a CPU event: */
0764771d 3589 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
0793a61d
TG
3590 return ERR_PTR(-EACCES);
3591
0793a61d 3592 /*
cdd6c482 3593 * We could be clever and allow to attach a event to an
0793a61d
TG
3594 * offline CPU and activate it when the CPU comes up, but
3595 * that's for later.
3596 */
f6325e30 3597 if (!cpu_online(cpu))
0793a61d
TG
3598 return ERR_PTR(-ENODEV);
3599
108b02cf 3600 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
0793a61d 3601 ctx = &cpuctx->ctx;
c93f7669 3602 get_ctx(ctx);
fe4b04fa 3603 ++ctx->pin_count;
0793a61d 3604
0793a61d
TG
3605 return ctx;
3606 }
3607
8dc85d54
PZ
3608 err = -EINVAL;
3609 ctxn = pmu->task_ctx_nr;
3610 if (ctxn < 0)
3611 goto errout;
3612
4af57ef2
YZ
3613 if (event->attach_state & PERF_ATTACH_TASK_DATA) {
3614 task_ctx_data = kzalloc(pmu->task_ctx_size, GFP_KERNEL);
3615 if (!task_ctx_data) {
3616 err = -ENOMEM;
3617 goto errout;
3618 }
3619 }
3620
9ed6060d 3621retry:
8dc85d54 3622 ctx = perf_lock_task_context(task, ctxn, &flags);
c93f7669 3623 if (ctx) {
211de6eb 3624 clone_ctx = unclone_ctx(ctx);
fe4b04fa 3625 ++ctx->pin_count;
4af57ef2
YZ
3626
3627 if (task_ctx_data && !ctx->task_ctx_data) {
3628 ctx->task_ctx_data = task_ctx_data;
3629 task_ctx_data = NULL;
3630 }
e625cce1 3631 raw_spin_unlock_irqrestore(&ctx->lock, flags);
211de6eb
PZ
3632
3633 if (clone_ctx)
3634 put_ctx(clone_ctx);
9137fb28 3635 } else {
eb184479 3636 ctx = alloc_perf_context(pmu, task);
c93f7669
PM
3637 err = -ENOMEM;
3638 if (!ctx)
3639 goto errout;
eb184479 3640
4af57ef2
YZ
3641 if (task_ctx_data) {
3642 ctx->task_ctx_data = task_ctx_data;
3643 task_ctx_data = NULL;
3644 }
3645
dbe08d82
ON
3646 err = 0;
3647 mutex_lock(&task->perf_event_mutex);
3648 /*
3649 * If it has already passed perf_event_exit_task().
3650 * we must see PF_EXITING, it takes this mutex too.
3651 */
3652 if (task->flags & PF_EXITING)
3653 err = -ESRCH;
3654 else if (task->perf_event_ctxp[ctxn])
3655 err = -EAGAIN;
fe4b04fa 3656 else {
9137fb28 3657 get_ctx(ctx);
fe4b04fa 3658 ++ctx->pin_count;
dbe08d82 3659 rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx);
fe4b04fa 3660 }
dbe08d82
ON
3661 mutex_unlock(&task->perf_event_mutex);
3662
3663 if (unlikely(err)) {
9137fb28 3664 put_ctx(ctx);
dbe08d82
ON
3665
3666 if (err == -EAGAIN)
3667 goto retry;
3668 goto errout;
a63eaf34
PM
3669 }
3670 }
3671
4af57ef2 3672 kfree(task_ctx_data);
0793a61d 3673 return ctx;
c93f7669 3674
9ed6060d 3675errout:
4af57ef2 3676 kfree(task_ctx_data);
c93f7669 3677 return ERR_PTR(err);
0793a61d
TG
3678}
3679
6fb2915d 3680static void perf_event_free_filter(struct perf_event *event);
2541517c 3681static void perf_event_free_bpf_prog(struct perf_event *event);
6fb2915d 3682
cdd6c482 3683static void free_event_rcu(struct rcu_head *head)
592903cd 3684{
cdd6c482 3685 struct perf_event *event;
592903cd 3686
cdd6c482
IM
3687 event = container_of(head, struct perf_event, rcu_head);
3688 if (event->ns)
3689 put_pid_ns(event->ns);
6fb2915d 3690 perf_event_free_filter(event);
cdd6c482 3691 kfree(event);
592903cd
PZ
3692}
3693
b69cf536
PZ
3694static void ring_buffer_attach(struct perf_event *event,
3695 struct ring_buffer *rb);
925d519a 3696
f2fb6bef
KL
3697static void detach_sb_event(struct perf_event *event)
3698{
3699 struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu);
3700
3701 raw_spin_lock(&pel->lock);
3702 list_del_rcu(&event->sb_list);
3703 raw_spin_unlock(&pel->lock);
3704}
3705
a4f144eb 3706static bool is_sb_event(struct perf_event *event)
f2fb6bef 3707{
a4f144eb
DCC
3708 struct perf_event_attr *attr = &event->attr;
3709
f2fb6bef 3710 if (event->parent)
a4f144eb 3711 return false;
f2fb6bef
KL
3712
3713 if (event->attach_state & PERF_ATTACH_TASK)
a4f144eb 3714 return false;
f2fb6bef 3715
a4f144eb
DCC
3716 if (attr->mmap || attr->mmap_data || attr->mmap2 ||
3717 attr->comm || attr->comm_exec ||
3718 attr->task ||
3719 attr->context_switch)
3720 return true;
3721 return false;
3722}
3723
3724static void unaccount_pmu_sb_event(struct perf_event *event)
3725{
3726 if (is_sb_event(event))
3727 detach_sb_event(event);
f2fb6bef
KL
3728}
3729
4beb31f3 3730static void unaccount_event_cpu(struct perf_event *event, int cpu)
f1600952 3731{
4beb31f3
FW
3732 if (event->parent)
3733 return;
3734
4beb31f3
FW
3735 if (is_cgroup_event(event))
3736 atomic_dec(&per_cpu(perf_cgroup_events, cpu));
3737}
925d519a 3738
555e0c1e
FW
3739#ifdef CONFIG_NO_HZ_FULL
3740static DEFINE_SPINLOCK(nr_freq_lock);
3741#endif
3742
3743static void unaccount_freq_event_nohz(void)
3744{
3745#ifdef CONFIG_NO_HZ_FULL
3746 spin_lock(&nr_freq_lock);
3747 if (atomic_dec_and_test(&nr_freq_events))
3748 tick_nohz_dep_clear(TICK_DEP_BIT_PERF_EVENTS);
3749 spin_unlock(&nr_freq_lock);
3750#endif
3751}
3752
3753static void unaccount_freq_event(void)
3754{
3755 if (tick_nohz_full_enabled())
3756 unaccount_freq_event_nohz();
3757 else
3758 atomic_dec(&nr_freq_events);
3759}
3760
4beb31f3
FW
3761static void unaccount_event(struct perf_event *event)
3762{
25432ae9
PZ
3763 bool dec = false;
3764
4beb31f3
FW
3765 if (event->parent)
3766 return;
3767
3768 if (event->attach_state & PERF_ATTACH_TASK)
25432ae9 3769 dec = true;
4beb31f3
FW
3770 if (event->attr.mmap || event->attr.mmap_data)
3771 atomic_dec(&nr_mmap_events);
3772 if (event->attr.comm)
3773 atomic_dec(&nr_comm_events);
3774 if (event->attr.task)
3775 atomic_dec(&nr_task_events);
948b26b6 3776 if (event->attr.freq)
555e0c1e 3777 unaccount_freq_event();
45ac1403 3778 if (event->attr.context_switch) {
25432ae9 3779 dec = true;
45ac1403
AH
3780 atomic_dec(&nr_switch_events);
3781 }
4beb31f3 3782 if (is_cgroup_event(event))
25432ae9 3783 dec = true;
4beb31f3 3784 if (has_branch_stack(event))
25432ae9
PZ
3785 dec = true;
3786
9107c89e
PZ
3787 if (dec) {
3788 if (!atomic_add_unless(&perf_sched_count, -1, 1))
3789 schedule_delayed_work(&perf_sched_work, HZ);
3790 }
4beb31f3
FW
3791
3792 unaccount_event_cpu(event, event->cpu);
f2fb6bef
KL
3793
3794 unaccount_pmu_sb_event(event);
4beb31f3 3795}
925d519a 3796
9107c89e
PZ
3797static void perf_sched_delayed(struct work_struct *work)
3798{
3799 mutex_lock(&perf_sched_mutex);
3800 if (atomic_dec_and_test(&perf_sched_count))
3801 static_branch_disable(&perf_sched_events);
3802 mutex_unlock(&perf_sched_mutex);
3803}
3804
bed5b25a
AS
3805/*
3806 * The following implement mutual exclusion of events on "exclusive" pmus
3807 * (PERF_PMU_CAP_EXCLUSIVE). Such pmus can only have one event scheduled
3808 * at a time, so we disallow creating events that might conflict, namely:
3809 *
3810 * 1) cpu-wide events in the presence of per-task events,
3811 * 2) per-task events in the presence of cpu-wide events,
3812 * 3) two matching events on the same context.
3813 *
3814 * The former two cases are handled in the allocation path (perf_event_alloc(),
a0733e69 3815 * _free_event()), the latter -- before the first perf_install_in_context().
bed5b25a
AS
3816 */
3817static int exclusive_event_init(struct perf_event *event)
3818{
3819 struct pmu *pmu = event->pmu;
3820
3821 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
3822 return 0;
3823
3824 /*
3825 * Prevent co-existence of per-task and cpu-wide events on the
3826 * same exclusive pmu.
3827 *
3828 * Negative pmu::exclusive_cnt means there are cpu-wide
3829 * events on this "exclusive" pmu, positive means there are
3830 * per-task events.
3831 *
3832 * Since this is called in perf_event_alloc() path, event::ctx
3833 * doesn't exist yet; it is, however, safe to use PERF_ATTACH_TASK
3834 * to mean "per-task event", because unlike other attach states it
3835 * never gets cleared.
3836 */
3837 if (event->attach_state & PERF_ATTACH_TASK) {
3838 if (!atomic_inc_unless_negative(&pmu->exclusive_cnt))
3839 return -EBUSY;
3840 } else {
3841 if (!atomic_dec_unless_positive(&pmu->exclusive_cnt))
3842 return -EBUSY;
3843 }
3844
3845 return 0;
3846}
3847
3848static void exclusive_event_destroy(struct perf_event *event)
3849{
3850 struct pmu *pmu = event->pmu;
3851
3852 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
3853 return;
3854
3855 /* see comment in exclusive_event_init() */
3856 if (event->attach_state & PERF_ATTACH_TASK)
3857 atomic_dec(&pmu->exclusive_cnt);
3858 else
3859 atomic_inc(&pmu->exclusive_cnt);
3860}
3861
3862static bool exclusive_event_match(struct perf_event *e1, struct perf_event *e2)
3863{
3864 if ((e1->pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE) &&
3865 (e1->cpu == e2->cpu ||
3866 e1->cpu == -1 ||
3867 e2->cpu == -1))
3868 return true;
3869 return false;
3870}
3871
3872/* Called under the same ctx::mutex as perf_install_in_context() */
3873static bool exclusive_event_installable(struct perf_event *event,
3874 struct perf_event_context *ctx)
3875{
3876 struct perf_event *iter_event;
3877 struct pmu *pmu = event->pmu;
3878
3879 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
3880 return true;
3881
3882 list_for_each_entry(iter_event, &ctx->event_list, event_entry) {
3883 if (exclusive_event_match(iter_event, event))
3884 return false;
3885 }
3886
3887 return true;
3888}
3889
375637bc
AS
3890static void perf_addr_filters_splice(struct perf_event *event,
3891 struct list_head *head);
3892
683ede43 3893static void _free_event(struct perf_event *event)
f1600952 3894{
e360adbe 3895 irq_work_sync(&event->pending);
925d519a 3896
4beb31f3 3897 unaccount_event(event);
9ee318a7 3898
76369139 3899 if (event->rb) {
9bb5d40c
PZ
3900 /*
3901 * Can happen when we close an event with re-directed output.
3902 *
3903 * Since we have a 0 refcount, perf_mmap_close() will skip
3904 * over us; possibly making our ring_buffer_put() the last.
3905 */
3906 mutex_lock(&event->mmap_mutex);
b69cf536 3907 ring_buffer_attach(event, NULL);
9bb5d40c 3908 mutex_unlock(&event->mmap_mutex);
a4be7c27
PZ
3909 }
3910
e5d1367f
SE
3911 if (is_cgroup_event(event))
3912 perf_detach_cgroup(event);
3913
a0733e69
PZ
3914 if (!event->parent) {
3915 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
3916 put_callchain_buffers();
3917 }
3918
3919 perf_event_free_bpf_prog(event);
375637bc
AS
3920 perf_addr_filters_splice(event, NULL);
3921 kfree(event->addr_filters_offs);
a0733e69
PZ
3922
3923 if (event->destroy)
3924 event->destroy(event);
3925
3926 if (event->ctx)
3927 put_ctx(event->ctx);
3928
62a92c8f
AS
3929 exclusive_event_destroy(event);
3930 module_put(event->pmu->module);
a0733e69
PZ
3931
3932 call_rcu(&event->rcu_head, free_event_rcu);
f1600952
PZ
3933}
3934
683ede43
PZ
3935/*
3936 * Used to free events which have a known refcount of 1, such as in error paths
3937 * where the event isn't exposed yet and inherited events.
3938 */
3939static void free_event(struct perf_event *event)
0793a61d 3940{
683ede43
PZ
3941 if (WARN(atomic_long_cmpxchg(&event->refcount, 1, 0) != 1,
3942 "unexpected event refcount: %ld; ptr=%p\n",
3943 atomic_long_read(&event->refcount), event)) {
3944 /* leak to avoid use-after-free */
3945 return;
3946 }
0793a61d 3947
683ede43 3948 _free_event(event);
0793a61d
TG
3949}
3950
a66a3052 3951/*
f8697762 3952 * Remove user event from the owner task.
a66a3052 3953 */
f8697762 3954static void perf_remove_from_owner(struct perf_event *event)
fb0459d7 3955{
8882135b 3956 struct task_struct *owner;
fb0459d7 3957
8882135b 3958 rcu_read_lock();
8882135b 3959 /*
f47c02c0
PZ
3960 * Matches the smp_store_release() in perf_event_exit_task(). If we
3961 * observe !owner it means the list deletion is complete and we can
3962 * indeed free this event, otherwise we need to serialize on
8882135b
PZ
3963 * owner->perf_event_mutex.
3964 */
f47c02c0 3965 owner = lockless_dereference(event->owner);
8882135b
PZ
3966 if (owner) {
3967 /*
3968 * Since delayed_put_task_struct() also drops the last
3969 * task reference we can safely take a new reference
3970 * while holding the rcu_read_lock().
3971 */
3972 get_task_struct(owner);
3973 }
3974 rcu_read_unlock();
3975
3976 if (owner) {
f63a8daa
PZ
3977 /*
3978 * If we're here through perf_event_exit_task() we're already
3979 * holding ctx->mutex which would be an inversion wrt. the
3980 * normal lock order.
3981 *
3982 * However we can safely take this lock because its the child
3983 * ctx->mutex.
3984 */
3985 mutex_lock_nested(&owner->perf_event_mutex, SINGLE_DEPTH_NESTING);
3986
8882135b
PZ
3987 /*
3988 * We have to re-check the event->owner field, if it is cleared
3989 * we raced with perf_event_exit_task(), acquiring the mutex
3990 * ensured they're done, and we can proceed with freeing the
3991 * event.
3992 */
f47c02c0 3993 if (event->owner) {
8882135b 3994 list_del_init(&event->owner_entry);
f47c02c0
PZ
3995 smp_store_release(&event->owner, NULL);
3996 }
8882135b
PZ
3997 mutex_unlock(&owner->perf_event_mutex);
3998 put_task_struct(owner);
3999 }
f8697762
JO
4000}
4001
f8697762
JO
4002static void put_event(struct perf_event *event)
4003{
f8697762
JO
4004 if (!atomic_long_dec_and_test(&event->refcount))
4005 return;
4006
c6e5b732
PZ
4007 _free_event(event);
4008}
4009
4010/*
4011 * Kill an event dead; while event:refcount will preserve the event
4012 * object, it will not preserve its functionality. Once the last 'user'
4013 * gives up the object, we'll destroy the thing.
4014 */
4015int perf_event_release_kernel(struct perf_event *event)
4016{
a4f4bb6d 4017 struct perf_event_context *ctx = event->ctx;
c6e5b732
PZ
4018 struct perf_event *child, *tmp;
4019
a4f4bb6d
PZ
4020 /*
4021 * If we got here through err_file: fput(event_file); we will not have
4022 * attached to a context yet.
4023 */
4024 if (!ctx) {
4025 WARN_ON_ONCE(event->attach_state &
4026 (PERF_ATTACH_CONTEXT|PERF_ATTACH_GROUP));
4027 goto no_ctx;
4028 }
4029
f8697762
JO
4030 if (!is_kernel_event(event))
4031 perf_remove_from_owner(event);
8882135b 4032
5fa7c8ec 4033 ctx = perf_event_ctx_lock(event);
a83fe28e 4034 WARN_ON_ONCE(ctx->parent_ctx);
a69b0ca4 4035 perf_remove_from_context(event, DETACH_GROUP);
683ede43 4036
a69b0ca4 4037 raw_spin_lock_irq(&ctx->lock);
683ede43 4038 /*
a69b0ca4
PZ
4039 * Mark this even as STATE_DEAD, there is no external reference to it
4040 * anymore.
683ede43 4041 *
a69b0ca4
PZ
4042 * Anybody acquiring event->child_mutex after the below loop _must_
4043 * also see this, most importantly inherit_event() which will avoid
4044 * placing more children on the list.
683ede43 4045 *
c6e5b732
PZ
4046 * Thus this guarantees that we will in fact observe and kill _ALL_
4047 * child events.
683ede43 4048 */
a69b0ca4
PZ
4049 event->state = PERF_EVENT_STATE_DEAD;
4050 raw_spin_unlock_irq(&ctx->lock);
4051
4052 perf_event_ctx_unlock(event, ctx);
683ede43 4053
c6e5b732
PZ
4054again:
4055 mutex_lock(&event->child_mutex);
4056 list_for_each_entry(child, &event->child_list, child_list) {
a6fa941d 4057
c6e5b732
PZ
4058 /*
4059 * Cannot change, child events are not migrated, see the
4060 * comment with perf_event_ctx_lock_nested().
4061 */
4062 ctx = lockless_dereference(child->ctx);
4063 /*
4064 * Since child_mutex nests inside ctx::mutex, we must jump
4065 * through hoops. We start by grabbing a reference on the ctx.
4066 *
4067 * Since the event cannot get freed while we hold the
4068 * child_mutex, the context must also exist and have a !0
4069 * reference count.
4070 */
4071 get_ctx(ctx);
4072
4073 /*
4074 * Now that we have a ctx ref, we can drop child_mutex, and
4075 * acquire ctx::mutex without fear of it going away. Then we
4076 * can re-acquire child_mutex.
4077 */
4078 mutex_unlock(&event->child_mutex);
4079 mutex_lock(&ctx->mutex);
4080 mutex_lock(&event->child_mutex);
4081
4082 /*
4083 * Now that we hold ctx::mutex and child_mutex, revalidate our
4084 * state, if child is still the first entry, it didn't get freed
4085 * and we can continue doing so.
4086 */
4087 tmp = list_first_entry_or_null(&event->child_list,
4088 struct perf_event, child_list);
4089 if (tmp == child) {
4090 perf_remove_from_context(child, DETACH_GROUP);
4091 list_del(&child->child_list);
4092 free_event(child);
4093 /*
4094 * This matches the refcount bump in inherit_event();
4095 * this can't be the last reference.
4096 */
4097 put_event(event);
4098 }
4099
4100 mutex_unlock(&event->child_mutex);
4101 mutex_unlock(&ctx->mutex);
4102 put_ctx(ctx);
4103 goto again;
4104 }
4105 mutex_unlock(&event->child_mutex);
4106
a4f4bb6d
PZ
4107no_ctx:
4108 put_event(event); /* Must be the 'last' reference */
683ede43
PZ
4109 return 0;
4110}
4111EXPORT_SYMBOL_GPL(perf_event_release_kernel);
4112
8b10c5e2
PZ
4113/*
4114 * Called when the last reference to the file is gone.
4115 */
a6fa941d
AV
4116static int perf_release(struct inode *inode, struct file *file)
4117{
c6e5b732 4118 perf_event_release_kernel(file->private_data);
a6fa941d 4119 return 0;
fb0459d7 4120}
fb0459d7 4121
59ed446f 4122u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
e53c0994 4123{
cdd6c482 4124 struct perf_event *child;
e53c0994
PZ
4125 u64 total = 0;
4126
59ed446f
PZ
4127 *enabled = 0;
4128 *running = 0;
4129
6f10581a 4130 mutex_lock(&event->child_mutex);
01add3ea 4131
7d88962e 4132 (void)perf_event_read(event, false);
01add3ea
SB
4133 total += perf_event_count(event);
4134
59ed446f
PZ
4135 *enabled += event->total_time_enabled +
4136 atomic64_read(&event->child_total_time_enabled);
4137 *running += event->total_time_running +
4138 atomic64_read(&event->child_total_time_running);
4139
4140 list_for_each_entry(child, &event->child_list, child_list) {
7d88962e 4141 (void)perf_event_read(child, false);
01add3ea 4142 total += perf_event_count(child);
59ed446f
PZ
4143 *enabled += child->total_time_enabled;
4144 *running += child->total_time_running;
4145 }
6f10581a 4146 mutex_unlock(&event->child_mutex);
e53c0994
PZ
4147
4148 return total;
4149}
fb0459d7 4150EXPORT_SYMBOL_GPL(perf_event_read_value);
e53c0994 4151
7d88962e 4152static int __perf_read_group_add(struct perf_event *leader,
fa8c2693 4153 u64 read_format, u64 *values)
3dab77fb 4154{
fa8c2693
PZ
4155 struct perf_event *sub;
4156 int n = 1; /* skip @nr */
7d88962e 4157 int ret;
f63a8daa 4158
7d88962e
SB
4159 ret = perf_event_read(leader, true);
4160 if (ret)
4161 return ret;
abf4868b 4162
fa8c2693
PZ
4163 /*
4164 * Since we co-schedule groups, {enabled,running} times of siblings
4165 * will be identical to those of the leader, so we only publish one
4166 * set.
4167 */
4168 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
4169 values[n++] += leader->total_time_enabled +
4170 atomic64_read(&leader->child_total_time_enabled);
4171 }
3dab77fb 4172
fa8c2693
PZ
4173 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
4174 values[n++] += leader->total_time_running +
4175 atomic64_read(&leader->child_total_time_running);
4176 }
4177
4178 /*
4179 * Write {count,id} tuples for every sibling.
4180 */
4181 values[n++] += perf_event_count(leader);
abf4868b
PZ
4182 if (read_format & PERF_FORMAT_ID)
4183 values[n++] = primary_event_id(leader);
3dab77fb 4184
fa8c2693
PZ
4185 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
4186 values[n++] += perf_event_count(sub);
4187 if (read_format & PERF_FORMAT_ID)
4188 values[n++] = primary_event_id(sub);
4189 }
7d88962e
SB
4190
4191 return 0;
fa8c2693 4192}
3dab77fb 4193
fa8c2693
PZ
4194static int perf_read_group(struct perf_event *event,
4195 u64 read_format, char __user *buf)
4196{
4197 struct perf_event *leader = event->group_leader, *child;
4198 struct perf_event_context *ctx = leader->ctx;
7d88962e 4199 int ret;
fa8c2693 4200 u64 *values;
3dab77fb 4201
fa8c2693 4202 lockdep_assert_held(&ctx->mutex);
3dab77fb 4203
fa8c2693
PZ
4204 values = kzalloc(event->read_size, GFP_KERNEL);
4205 if (!values)
4206 return -ENOMEM;
3dab77fb 4207
fa8c2693
PZ
4208 values[0] = 1 + leader->nr_siblings;
4209
4210 /*
4211 * By locking the child_mutex of the leader we effectively
4212 * lock the child list of all siblings.. XXX explain how.
4213 */
4214 mutex_lock(&leader->child_mutex);
abf4868b 4215
7d88962e
SB
4216 ret = __perf_read_group_add(leader, read_format, values);
4217 if (ret)
4218 goto unlock;
4219
4220 list_for_each_entry(child, &leader->child_list, child_list) {
4221 ret = __perf_read_group_add(child, read_format, values);
4222 if (ret)
4223 goto unlock;
4224 }
abf4868b 4225
fa8c2693 4226 mutex_unlock(&leader->child_mutex);
abf4868b 4227
7d88962e 4228 ret = event->read_size;
fa8c2693
PZ
4229 if (copy_to_user(buf, values, event->read_size))
4230 ret = -EFAULT;
7d88962e 4231 goto out;
fa8c2693 4232
7d88962e
SB
4233unlock:
4234 mutex_unlock(&leader->child_mutex);
4235out:
fa8c2693 4236 kfree(values);
abf4868b 4237 return ret;
3dab77fb
PZ
4238}
4239
b15f495b 4240static int perf_read_one(struct perf_event *event,
3dab77fb
PZ
4241 u64 read_format, char __user *buf)
4242{
59ed446f 4243 u64 enabled, running;
3dab77fb
PZ
4244 u64 values[4];
4245 int n = 0;
4246
59ed446f
PZ
4247 values[n++] = perf_event_read_value(event, &enabled, &running);
4248 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
4249 values[n++] = enabled;
4250 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
4251 values[n++] = running;
3dab77fb 4252 if (read_format & PERF_FORMAT_ID)
cdd6c482 4253 values[n++] = primary_event_id(event);
3dab77fb
PZ
4254
4255 if (copy_to_user(buf, values, n * sizeof(u64)))
4256 return -EFAULT;
4257
4258 return n * sizeof(u64);
4259}
4260
dc633982
JO
4261static bool is_event_hup(struct perf_event *event)
4262{
4263 bool no_children;
4264
a69b0ca4 4265 if (event->state > PERF_EVENT_STATE_EXIT)
dc633982
JO
4266 return false;
4267
4268 mutex_lock(&event->child_mutex);
4269 no_children = list_empty(&event->child_list);
4270 mutex_unlock(&event->child_mutex);
4271 return no_children;
4272}
4273
0793a61d 4274/*
cdd6c482 4275 * Read the performance event - simple non blocking version for now
0793a61d
TG
4276 */
4277static ssize_t
b15f495b 4278__perf_read(struct perf_event *event, char __user *buf, size_t count)
0793a61d 4279{
cdd6c482 4280 u64 read_format = event->attr.read_format;
3dab77fb 4281 int ret;
0793a61d 4282
3b6f9e5c 4283 /*
cdd6c482 4284 * Return end-of-file for a read on a event that is in
3b6f9e5c
PM
4285 * error state (i.e. because it was pinned but it couldn't be
4286 * scheduled on to the CPU at some point).
4287 */
cdd6c482 4288 if (event->state == PERF_EVENT_STATE_ERROR)
3b6f9e5c
PM
4289 return 0;
4290
c320c7b7 4291 if (count < event->read_size)
3dab77fb
PZ
4292 return -ENOSPC;
4293
cdd6c482 4294 WARN_ON_ONCE(event->ctx->parent_ctx);
3dab77fb 4295 if (read_format & PERF_FORMAT_GROUP)
b15f495b 4296 ret = perf_read_group(event, read_format, buf);
3dab77fb 4297 else
b15f495b 4298 ret = perf_read_one(event, read_format, buf);
0793a61d 4299
3dab77fb 4300 return ret;
0793a61d
TG
4301}
4302
0793a61d
TG
4303static ssize_t
4304perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
4305{
cdd6c482 4306 struct perf_event *event = file->private_data;
f63a8daa
PZ
4307 struct perf_event_context *ctx;
4308 int ret;
0793a61d 4309
f63a8daa 4310 ctx = perf_event_ctx_lock(event);
b15f495b 4311 ret = __perf_read(event, buf, count);
f63a8daa
PZ
4312 perf_event_ctx_unlock(event, ctx);
4313
4314 return ret;
0793a61d
TG
4315}
4316
4317static unsigned int perf_poll(struct file *file, poll_table *wait)
4318{
cdd6c482 4319 struct perf_event *event = file->private_data;
76369139 4320 struct ring_buffer *rb;
61b67684 4321 unsigned int events = POLLHUP;
c7138f37 4322
e708d7ad 4323 poll_wait(file, &event->waitq, wait);
179033b3 4324
dc633982 4325 if (is_event_hup(event))
179033b3 4326 return events;
c7138f37 4327
10c6db11 4328 /*
9bb5d40c
PZ
4329 * Pin the event->rb by taking event->mmap_mutex; otherwise
4330 * perf_event_set_output() can swizzle our rb and make us miss wakeups.
10c6db11
PZ
4331 */
4332 mutex_lock(&event->mmap_mutex);
9bb5d40c
PZ
4333 rb = event->rb;
4334 if (rb)
76369139 4335 events = atomic_xchg(&rb->poll, 0);
10c6db11 4336 mutex_unlock(&event->mmap_mutex);
0793a61d
TG
4337 return events;
4338}
4339
f63a8daa 4340static void _perf_event_reset(struct perf_event *event)
6de6a7b9 4341{
7d88962e 4342 (void)perf_event_read(event, false);
e7850595 4343 local64_set(&event->count, 0);
cdd6c482 4344 perf_event_update_userpage(event);
3df5edad
PZ
4345}
4346
c93f7669 4347/*
cdd6c482
IM
4348 * Holding the top-level event's child_mutex means that any
4349 * descendant process that has inherited this event will block
8ba289b8 4350 * in perf_event_exit_event() if it goes to exit, thus satisfying the
cdd6c482 4351 * task existence requirements of perf_event_enable/disable.
c93f7669 4352 */
cdd6c482
IM
4353static void perf_event_for_each_child(struct perf_event *event,
4354 void (*func)(struct perf_event *))
3df5edad 4355{
cdd6c482 4356 struct perf_event *child;
3df5edad 4357
cdd6c482 4358 WARN_ON_ONCE(event->ctx->parent_ctx);
f63a8daa 4359
cdd6c482
IM
4360 mutex_lock(&event->child_mutex);
4361 func(event);
4362 list_for_each_entry(child, &event->child_list, child_list)
3df5edad 4363 func(child);
cdd6c482 4364 mutex_unlock(&event->child_mutex);
3df5edad
PZ
4365}
4366
cdd6c482
IM
4367static void perf_event_for_each(struct perf_event *event,
4368 void (*func)(struct perf_event *))
3df5edad 4369{
cdd6c482
IM
4370 struct perf_event_context *ctx = event->ctx;
4371 struct perf_event *sibling;
3df5edad 4372
f63a8daa
PZ
4373 lockdep_assert_held(&ctx->mutex);
4374
cdd6c482 4375 event = event->group_leader;
75f937f2 4376
cdd6c482 4377 perf_event_for_each_child(event, func);
cdd6c482 4378 list_for_each_entry(sibling, &event->sibling_list, group_entry)
724b6daa 4379 perf_event_for_each_child(sibling, func);
6de6a7b9
PZ
4380}
4381
fae3fde6
PZ
4382static void __perf_event_period(struct perf_event *event,
4383 struct perf_cpu_context *cpuctx,
4384 struct perf_event_context *ctx,
4385 void *info)
c7999c6f 4386{
fae3fde6 4387 u64 value = *((u64 *)info);
c7999c6f 4388 bool active;
08247e31 4389
cdd6c482 4390 if (event->attr.freq) {
cdd6c482 4391 event->attr.sample_freq = value;
08247e31 4392 } else {
cdd6c482
IM
4393 event->attr.sample_period = value;
4394 event->hw.sample_period = value;
08247e31 4395 }
bad7192b
PZ
4396
4397 active = (event->state == PERF_EVENT_STATE_ACTIVE);
4398 if (active) {
4399 perf_pmu_disable(ctx->pmu);
1e02cd40
PZ
4400 /*
4401 * We could be throttled; unthrottle now to avoid the tick
4402 * trying to unthrottle while we already re-started the event.
4403 */
4404 if (event->hw.interrupts == MAX_INTERRUPTS) {
4405 event->hw.interrupts = 0;
4406 perf_log_throttle(event, 1);
4407 }
bad7192b
PZ
4408 event->pmu->stop(event, PERF_EF_UPDATE);
4409 }
4410
4411 local64_set(&event->hw.period_left, 0);
4412
4413 if (active) {
4414 event->pmu->start(event, PERF_EF_RELOAD);
4415 perf_pmu_enable(ctx->pmu);
4416 }
c7999c6f
PZ
4417}
4418
4419static int perf_event_period(struct perf_event *event, u64 __user *arg)
4420{
c7999c6f
PZ
4421 u64 value;
4422
4423 if (!is_sampling_event(event))
4424 return -EINVAL;
4425
4426 if (copy_from_user(&value, arg, sizeof(value)))
4427 return -EFAULT;
4428
4429 if (!value)
4430 return -EINVAL;
4431
4432 if (event->attr.freq && value > sysctl_perf_event_sample_rate)
4433 return -EINVAL;
4434
fae3fde6 4435 event_function_call(event, __perf_event_period, &value);
08247e31 4436
c7999c6f 4437 return 0;
08247e31
PZ
4438}
4439
ac9721f3
PZ
4440static const struct file_operations perf_fops;
4441
2903ff01 4442static inline int perf_fget_light(int fd, struct fd *p)
ac9721f3 4443{
2903ff01
AV
4444 struct fd f = fdget(fd);
4445 if (!f.file)
4446 return -EBADF;
ac9721f3 4447
2903ff01
AV
4448 if (f.file->f_op != &perf_fops) {
4449 fdput(f);
4450 return -EBADF;
ac9721f3 4451 }
2903ff01
AV
4452 *p = f;
4453 return 0;
ac9721f3
PZ
4454}
4455
4456static int perf_event_set_output(struct perf_event *event,
4457 struct perf_event *output_event);
6fb2915d 4458static int perf_event_set_filter(struct perf_event *event, void __user *arg);
2541517c 4459static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd);
a4be7c27 4460
f63a8daa 4461static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg)
d859e29f 4462{
cdd6c482 4463 void (*func)(struct perf_event *);
3df5edad 4464 u32 flags = arg;
d859e29f
PM
4465
4466 switch (cmd) {
cdd6c482 4467 case PERF_EVENT_IOC_ENABLE:
f63a8daa 4468 func = _perf_event_enable;
d859e29f 4469 break;
cdd6c482 4470 case PERF_EVENT_IOC_DISABLE:
f63a8daa 4471 func = _perf_event_disable;
79f14641 4472 break;
cdd6c482 4473 case PERF_EVENT_IOC_RESET:
f63a8daa 4474 func = _perf_event_reset;
6de6a7b9 4475 break;
3df5edad 4476
cdd6c482 4477 case PERF_EVENT_IOC_REFRESH:
f63a8daa 4478 return _perf_event_refresh(event, arg);
08247e31 4479
cdd6c482
IM
4480 case PERF_EVENT_IOC_PERIOD:
4481 return perf_event_period(event, (u64 __user *)arg);
08247e31 4482
cf4957f1
JO
4483 case PERF_EVENT_IOC_ID:
4484 {
4485 u64 id = primary_event_id(event);
4486
4487 if (copy_to_user((void __user *)arg, &id, sizeof(id)))
4488 return -EFAULT;
4489 return 0;
4490 }
4491
cdd6c482 4492 case PERF_EVENT_IOC_SET_OUTPUT:
ac9721f3 4493 {
ac9721f3 4494 int ret;
ac9721f3 4495 if (arg != -1) {
2903ff01
AV
4496 struct perf_event *output_event;
4497 struct fd output;
4498 ret = perf_fget_light(arg, &output);
4499 if (ret)
4500 return ret;
4501 output_event = output.file->private_data;
4502 ret = perf_event_set_output(event, output_event);
4503 fdput(output);
4504 } else {
4505 ret = perf_event_set_output(event, NULL);
ac9721f3 4506 }
ac9721f3
PZ
4507 return ret;
4508 }
a4be7c27 4509
6fb2915d
LZ
4510 case PERF_EVENT_IOC_SET_FILTER:
4511 return perf_event_set_filter(event, (void __user *)arg);
4512
2541517c
AS
4513 case PERF_EVENT_IOC_SET_BPF:
4514 return perf_event_set_bpf_prog(event, arg);
4515
86e7972f
WN
4516 case PERF_EVENT_IOC_PAUSE_OUTPUT: {
4517 struct ring_buffer *rb;
4518
4519 rcu_read_lock();
4520 rb = rcu_dereference(event->rb);
4521 if (!rb || !rb->nr_pages) {
4522 rcu_read_unlock();
4523 return -EINVAL;
4524 }
4525 rb_toggle_paused(rb, !!arg);
4526 rcu_read_unlock();
4527 return 0;
4528 }
d859e29f 4529 default:
3df5edad 4530 return -ENOTTY;
d859e29f 4531 }
3df5edad
PZ
4532
4533 if (flags & PERF_IOC_FLAG_GROUP)
cdd6c482 4534 perf_event_for_each(event, func);
3df5edad 4535 else
cdd6c482 4536 perf_event_for_each_child(event, func);
3df5edad
PZ
4537
4538 return 0;
d859e29f
PM
4539}
4540
f63a8daa
PZ
4541static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
4542{
4543 struct perf_event *event = file->private_data;
4544 struct perf_event_context *ctx;
4545 long ret;
4546
4547 ctx = perf_event_ctx_lock(event);
4548 ret = _perf_ioctl(event, cmd, arg);
4549 perf_event_ctx_unlock(event, ctx);
4550
4551 return ret;
4552}
4553
b3f20785
PM
4554#ifdef CONFIG_COMPAT
4555static long perf_compat_ioctl(struct file *file, unsigned int cmd,
4556 unsigned long arg)
4557{
4558 switch (_IOC_NR(cmd)) {
4559 case _IOC_NR(PERF_EVENT_IOC_SET_FILTER):
4560 case _IOC_NR(PERF_EVENT_IOC_ID):
4561 /* Fix up pointer size (usually 4 -> 8 in 32-on-64-bit case */
4562 if (_IOC_SIZE(cmd) == sizeof(compat_uptr_t)) {
4563 cmd &= ~IOCSIZE_MASK;
4564 cmd |= sizeof(void *) << IOCSIZE_SHIFT;
4565 }
4566 break;
4567 }
4568 return perf_ioctl(file, cmd, arg);
4569}
4570#else
4571# define perf_compat_ioctl NULL
4572#endif
4573
cdd6c482 4574int perf_event_task_enable(void)
771d7cde 4575{
f63a8daa 4576 struct perf_event_context *ctx;
cdd6c482 4577 struct perf_event *event;
771d7cde 4578
cdd6c482 4579 mutex_lock(&current->perf_event_mutex);
f63a8daa
PZ
4580 list_for_each_entry(event, &current->perf_event_list, owner_entry) {
4581 ctx = perf_event_ctx_lock(event);
4582 perf_event_for_each_child(event, _perf_event_enable);
4583 perf_event_ctx_unlock(event, ctx);
4584 }
cdd6c482 4585 mutex_unlock(&current->perf_event_mutex);
771d7cde
PZ
4586
4587 return 0;
4588}
4589
cdd6c482 4590int perf_event_task_disable(void)
771d7cde 4591{
f63a8daa 4592 struct perf_event_context *ctx;
cdd6c482 4593 struct perf_event *event;
771d7cde 4594
cdd6c482 4595 mutex_lock(&current->perf_event_mutex);
f63a8daa
PZ
4596 list_for_each_entry(event, &current->perf_event_list, owner_entry) {
4597 ctx = perf_event_ctx_lock(event);
4598 perf_event_for_each_child(event, _perf_event_disable);
4599 perf_event_ctx_unlock(event, ctx);
4600 }
cdd6c482 4601 mutex_unlock(&current->perf_event_mutex);
771d7cde
PZ
4602
4603 return 0;
4604}
4605
cdd6c482 4606static int perf_event_index(struct perf_event *event)
194002b2 4607{
a4eaf7f1
PZ
4608 if (event->hw.state & PERF_HES_STOPPED)
4609 return 0;
4610
cdd6c482 4611 if (event->state != PERF_EVENT_STATE_ACTIVE)
194002b2
PZ
4612 return 0;
4613
35edc2a5 4614 return event->pmu->event_idx(event);
194002b2
PZ
4615}
4616
c4794295 4617static void calc_timer_values(struct perf_event *event,
e3f3541c 4618 u64 *now,
7f310a5d
EM
4619 u64 *enabled,
4620 u64 *running)
c4794295 4621{
e3f3541c 4622 u64 ctx_time;
c4794295 4623
e3f3541c
PZ
4624 *now = perf_clock();
4625 ctx_time = event->shadow_ctx_time + *now;
c4794295
EM
4626 *enabled = ctx_time - event->tstamp_enabled;
4627 *running = ctx_time - event->tstamp_running;
4628}
4629
fa731587
PZ
4630static void perf_event_init_userpage(struct perf_event *event)
4631{
4632 struct perf_event_mmap_page *userpg;
4633 struct ring_buffer *rb;
4634
4635 rcu_read_lock();
4636 rb = rcu_dereference(event->rb);
4637 if (!rb)
4638 goto unlock;
4639
4640 userpg = rb->user_page;
4641
4642 /* Allow new userspace to detect that bit 0 is deprecated */
4643 userpg->cap_bit0_is_deprecated = 1;
4644 userpg->size = offsetof(struct perf_event_mmap_page, __reserved);
e8c6deac
AS
4645 userpg->data_offset = PAGE_SIZE;
4646 userpg->data_size = perf_data_size(rb);
fa731587
PZ
4647
4648unlock:
4649 rcu_read_unlock();
4650}
4651
c1317ec2
AL
4652void __weak arch_perf_update_userpage(
4653 struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now)
e3f3541c
PZ
4654{
4655}
4656
38ff667b
PZ
4657/*
4658 * Callers need to ensure there can be no nesting of this function, otherwise
4659 * the seqlock logic goes bad. We can not serialize this because the arch
4660 * code calls this from NMI context.
4661 */
cdd6c482 4662void perf_event_update_userpage(struct perf_event *event)
37d81828 4663{
cdd6c482 4664 struct perf_event_mmap_page *userpg;
76369139 4665 struct ring_buffer *rb;
e3f3541c 4666 u64 enabled, running, now;
38ff667b
PZ
4667
4668 rcu_read_lock();
5ec4c599
PZ
4669 rb = rcu_dereference(event->rb);
4670 if (!rb)
4671 goto unlock;
4672
0d641208
EM
4673 /*
4674 * compute total_time_enabled, total_time_running
4675 * based on snapshot values taken when the event
4676 * was last scheduled in.
4677 *
4678 * we cannot simply called update_context_time()
4679 * because of locking issue as we can be called in
4680 * NMI context
4681 */
e3f3541c 4682 calc_timer_values(event, &now, &enabled, &running);
38ff667b 4683
76369139 4684 userpg = rb->user_page;
7b732a75
PZ
4685 /*
4686 * Disable preemption so as to not let the corresponding user-space
4687 * spin too long if we get preempted.
4688 */
4689 preempt_disable();
37d81828 4690 ++userpg->lock;
92f22a38 4691 barrier();
cdd6c482 4692 userpg->index = perf_event_index(event);
b5e58793 4693 userpg->offset = perf_event_count(event);
365a4038 4694 if (userpg->index)
e7850595 4695 userpg->offset -= local64_read(&event->hw.prev_count);
7b732a75 4696
0d641208 4697 userpg->time_enabled = enabled +
cdd6c482 4698 atomic64_read(&event->child_total_time_enabled);
7f8b4e4e 4699
0d641208 4700 userpg->time_running = running +
cdd6c482 4701 atomic64_read(&event->child_total_time_running);
7f8b4e4e 4702
c1317ec2 4703 arch_perf_update_userpage(event, userpg, now);
e3f3541c 4704
92f22a38 4705 barrier();
37d81828 4706 ++userpg->lock;
7b732a75 4707 preempt_enable();
38ff667b 4708unlock:
7b732a75 4709 rcu_read_unlock();
37d81828
PM
4710}
4711
906010b2
PZ
4712static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4713{
4714 struct perf_event *event = vma->vm_file->private_data;
76369139 4715 struct ring_buffer *rb;
906010b2
PZ
4716 int ret = VM_FAULT_SIGBUS;
4717
4718 if (vmf->flags & FAULT_FLAG_MKWRITE) {
4719 if (vmf->pgoff == 0)
4720 ret = 0;
4721 return ret;
4722 }
4723
4724 rcu_read_lock();
76369139
FW
4725 rb = rcu_dereference(event->rb);
4726 if (!rb)
906010b2
PZ
4727 goto unlock;
4728
4729 if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
4730 goto unlock;
4731
76369139 4732 vmf->page = perf_mmap_to_page(rb, vmf->pgoff);
906010b2
PZ
4733 if (!vmf->page)
4734 goto unlock;
4735
4736 get_page(vmf->page);
4737 vmf->page->mapping = vma->vm_file->f_mapping;
4738 vmf->page->index = vmf->pgoff;
4739
4740 ret = 0;
4741unlock:
4742 rcu_read_unlock();
4743
4744 return ret;
4745}
4746
10c6db11
PZ
4747static void ring_buffer_attach(struct perf_event *event,
4748 struct ring_buffer *rb)
4749{
b69cf536 4750 struct ring_buffer *old_rb = NULL;
10c6db11
PZ
4751 unsigned long flags;
4752
b69cf536
PZ
4753 if (event->rb) {
4754 /*
4755 * Should be impossible, we set this when removing
4756 * event->rb_entry and wait/clear when adding event->rb_entry.
4757 */
4758 WARN_ON_ONCE(event->rcu_pending);
10c6db11 4759
b69cf536 4760 old_rb = event->rb;
b69cf536
PZ
4761 spin_lock_irqsave(&old_rb->event_lock, flags);
4762 list_del_rcu(&event->rb_entry);
4763 spin_unlock_irqrestore(&old_rb->event_lock, flags);
10c6db11 4764
2f993cf0
ON
4765 event->rcu_batches = get_state_synchronize_rcu();
4766 event->rcu_pending = 1;
b69cf536 4767 }
10c6db11 4768
b69cf536 4769 if (rb) {
2f993cf0
ON
4770 if (event->rcu_pending) {
4771 cond_synchronize_rcu(event->rcu_batches);
4772 event->rcu_pending = 0;
4773 }
4774
b69cf536
PZ
4775 spin_lock_irqsave(&rb->event_lock, flags);
4776 list_add_rcu(&event->rb_entry, &rb->event_list);
4777 spin_unlock_irqrestore(&rb->event_lock, flags);
4778 }
4779
4780 rcu_assign_pointer(event->rb, rb);
4781
4782 if (old_rb) {
4783 ring_buffer_put(old_rb);
4784 /*
4785 * Since we detached before setting the new rb, so that we
4786 * could attach the new rb, we could have missed a wakeup.
4787 * Provide it now.
4788 */
4789 wake_up_all(&event->waitq);
4790 }
10c6db11
PZ
4791}
4792
4793static void ring_buffer_wakeup(struct perf_event *event)
4794{
4795 struct ring_buffer *rb;
4796
4797 rcu_read_lock();
4798 rb = rcu_dereference(event->rb);
9bb5d40c
PZ
4799 if (rb) {
4800 list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
4801 wake_up_all(&event->waitq);
4802 }
10c6db11
PZ
4803 rcu_read_unlock();
4804}
4805
fdc26706 4806struct ring_buffer *ring_buffer_get(struct perf_event *event)
7b732a75 4807{
76369139 4808 struct ring_buffer *rb;
7b732a75 4809
ac9721f3 4810 rcu_read_lock();
76369139
FW
4811 rb = rcu_dereference(event->rb);
4812 if (rb) {
4813 if (!atomic_inc_not_zero(&rb->refcount))
4814 rb = NULL;
ac9721f3
PZ
4815 }
4816 rcu_read_unlock();
4817
76369139 4818 return rb;
ac9721f3
PZ
4819}
4820
fdc26706 4821void ring_buffer_put(struct ring_buffer *rb)
ac9721f3 4822{
76369139 4823 if (!atomic_dec_and_test(&rb->refcount))
ac9721f3 4824 return;
7b732a75 4825
9bb5d40c 4826 WARN_ON_ONCE(!list_empty(&rb->event_list));
10c6db11 4827
76369139 4828 call_rcu(&rb->rcu_head, rb_free_rcu);
7b732a75
PZ
4829}
4830
4831static void perf_mmap_open(struct vm_area_struct *vma)
4832{
cdd6c482 4833 struct perf_event *event = vma->vm_file->private_data;
7b732a75 4834
cdd6c482 4835 atomic_inc(&event->mmap_count);
9bb5d40c 4836 atomic_inc(&event->rb->mmap_count);
1e0fb9ec 4837
45bfb2e5
PZ
4838 if (vma->vm_pgoff)
4839 atomic_inc(&event->rb->aux_mmap_count);
4840
1e0fb9ec
AL
4841 if (event->pmu->event_mapped)
4842 event->pmu->event_mapped(event);
7b732a75
PZ
4843}
4844
95ff4ca2
AS
4845static void perf_pmu_output_stop(struct perf_event *event);
4846
9bb5d40c
PZ
4847/*
4848 * A buffer can be mmap()ed multiple times; either directly through the same
4849 * event, or through other events by use of perf_event_set_output().
4850 *
4851 * In order to undo the VM accounting done by perf_mmap() we need to destroy
4852 * the buffer here, where we still have a VM context. This means we need
4853 * to detach all events redirecting to us.
4854 */
7b732a75
PZ
4855static void perf_mmap_close(struct vm_area_struct *vma)
4856{
cdd6c482 4857 struct perf_event *event = vma->vm_file->private_data;
7b732a75 4858
b69cf536 4859 struct ring_buffer *rb = ring_buffer_get(event);
9bb5d40c
PZ
4860 struct user_struct *mmap_user = rb->mmap_user;
4861 int mmap_locked = rb->mmap_locked;
4862 unsigned long size = perf_data_size(rb);
789f90fc 4863
1e0fb9ec
AL
4864 if (event->pmu->event_unmapped)
4865 event->pmu->event_unmapped(event);
4866
45bfb2e5
PZ
4867 /*
4868 * rb->aux_mmap_count will always drop before rb->mmap_count and
4869 * event->mmap_count, so it is ok to use event->mmap_mutex to
4870 * serialize with perf_mmap here.
4871 */
4872 if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff &&
4873 atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) {
95ff4ca2
AS
4874 /*
4875 * Stop all AUX events that are writing to this buffer,
4876 * so that we can free its AUX pages and corresponding PMU
4877 * data. Note that after rb::aux_mmap_count dropped to zero,
4878 * they won't start any more (see perf_aux_output_begin()).
4879 */
4880 perf_pmu_output_stop(event);
4881
4882 /* now it's safe to free the pages */
45bfb2e5
PZ
4883 atomic_long_sub(rb->aux_nr_pages, &mmap_user->locked_vm);
4884 vma->vm_mm->pinned_vm -= rb->aux_mmap_locked;
4885
95ff4ca2 4886 /* this has to be the last one */
45bfb2e5 4887 rb_free_aux(rb);
95ff4ca2
AS
4888 WARN_ON_ONCE(atomic_read(&rb->aux_refcount));
4889
45bfb2e5
PZ
4890 mutex_unlock(&event->mmap_mutex);
4891 }
4892
9bb5d40c
PZ
4893 atomic_dec(&rb->mmap_count);
4894
4895 if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
b69cf536 4896 goto out_put;
9bb5d40c 4897
b69cf536 4898 ring_buffer_attach(event, NULL);
9bb5d40c
PZ
4899 mutex_unlock(&event->mmap_mutex);
4900
4901 /* If there's still other mmap()s of this buffer, we're done. */
b69cf536
PZ
4902 if (atomic_read(&rb->mmap_count))
4903 goto out_put;
ac9721f3 4904
9bb5d40c
PZ
4905 /*
4906 * No other mmap()s, detach from all other events that might redirect
4907 * into the now unreachable buffer. Somewhat complicated by the
4908 * fact that rb::event_lock otherwise nests inside mmap_mutex.
4909 */
4910again:
4911 rcu_read_lock();
4912 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
4913 if (!atomic_long_inc_not_zero(&event->refcount)) {
4914 /*
4915 * This event is en-route to free_event() which will
4916 * detach it and remove it from the list.
4917 */
4918 continue;
4919 }
4920 rcu_read_unlock();
789f90fc 4921
9bb5d40c
PZ
4922 mutex_lock(&event->mmap_mutex);
4923 /*
4924 * Check we didn't race with perf_event_set_output() which can
4925 * swizzle the rb from under us while we were waiting to
4926 * acquire mmap_mutex.
4927 *
4928 * If we find a different rb; ignore this event, a next
4929 * iteration will no longer find it on the list. We have to
4930 * still restart the iteration to make sure we're not now
4931 * iterating the wrong list.
4932 */
b69cf536
PZ
4933 if (event->rb == rb)
4934 ring_buffer_attach(event, NULL);
4935
cdd6c482 4936 mutex_unlock(&event->mmap_mutex);
9bb5d40c 4937 put_event(event);
ac9721f3 4938
9bb5d40c
PZ
4939 /*
4940 * Restart the iteration; either we're on the wrong list or
4941 * destroyed its integrity by doing a deletion.
4942 */
4943 goto again;
7b732a75 4944 }
9bb5d40c
PZ
4945 rcu_read_unlock();
4946
4947 /*
4948 * It could be there's still a few 0-ref events on the list; they'll
4949 * get cleaned up by free_event() -- they'll also still have their
4950 * ref on the rb and will free it whenever they are done with it.
4951 *
4952 * Aside from that, this buffer is 'fully' detached and unmapped,
4953 * undo the VM accounting.
4954 */
4955
4956 atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm);
4957 vma->vm_mm->pinned_vm -= mmap_locked;
4958 free_uid(mmap_user);
4959
b69cf536 4960out_put:
9bb5d40c 4961 ring_buffer_put(rb); /* could be last */
37d81828
PM
4962}
4963
f0f37e2f 4964static const struct vm_operations_struct perf_mmap_vmops = {
43a21ea8 4965 .open = perf_mmap_open,
45bfb2e5 4966 .close = perf_mmap_close, /* non mergable */
43a21ea8
PZ
4967 .fault = perf_mmap_fault,
4968 .page_mkwrite = perf_mmap_fault,
37d81828
PM
4969};
4970
4971static int perf_mmap(struct file *file, struct vm_area_struct *vma)
4972{
cdd6c482 4973 struct perf_event *event = file->private_data;
22a4f650 4974 unsigned long user_locked, user_lock_limit;
789f90fc 4975 struct user_struct *user = current_user();
22a4f650 4976 unsigned long locked, lock_limit;
45bfb2e5 4977 struct ring_buffer *rb = NULL;
7b732a75
PZ
4978 unsigned long vma_size;
4979 unsigned long nr_pages;
45bfb2e5 4980 long user_extra = 0, extra = 0;
d57e34fd 4981 int ret = 0, flags = 0;
37d81828 4982
c7920614
PZ
4983 /*
4984 * Don't allow mmap() of inherited per-task counters. This would
4985 * create a performance issue due to all children writing to the
76369139 4986 * same rb.
c7920614
PZ
4987 */
4988 if (event->cpu == -1 && event->attr.inherit)
4989 return -EINVAL;
4990
43a21ea8 4991 if (!(vma->vm_flags & VM_SHARED))
37d81828 4992 return -EINVAL;
7b732a75
PZ
4993
4994 vma_size = vma->vm_end - vma->vm_start;
45bfb2e5
PZ
4995
4996 if (vma->vm_pgoff == 0) {
4997 nr_pages = (vma_size / PAGE_SIZE) - 1;
4998 } else {
4999 /*
5000 * AUX area mapping: if rb->aux_nr_pages != 0, it's already
5001 * mapped, all subsequent mappings should have the same size
5002 * and offset. Must be above the normal perf buffer.
5003 */
5004 u64 aux_offset, aux_size;
5005
5006 if (!event->rb)
5007 return -EINVAL;
5008
5009 nr_pages = vma_size / PAGE_SIZE;
5010
5011 mutex_lock(&event->mmap_mutex);
5012 ret = -EINVAL;
5013
5014 rb = event->rb;
5015 if (!rb)
5016 goto aux_unlock;
5017
5018 aux_offset = ACCESS_ONCE(rb->user_page->aux_offset);
5019 aux_size = ACCESS_ONCE(rb->user_page->aux_size);
5020
5021 if (aux_offset < perf_data_size(rb) + PAGE_SIZE)
5022 goto aux_unlock;
5023
5024 if (aux_offset != vma->vm_pgoff << PAGE_SHIFT)
5025 goto aux_unlock;
5026
5027 /* already mapped with a different offset */
5028 if (rb_has_aux(rb) && rb->aux_pgoff != vma->vm_pgoff)
5029 goto aux_unlock;
5030
5031 if (aux_size != vma_size || aux_size != nr_pages * PAGE_SIZE)
5032 goto aux_unlock;
5033
5034 /* already mapped with a different size */
5035 if (rb_has_aux(rb) && rb->aux_nr_pages != nr_pages)
5036 goto aux_unlock;
5037
5038 if (!is_power_of_2(nr_pages))
5039 goto aux_unlock;
5040
5041 if (!atomic_inc_not_zero(&rb->mmap_count))
5042 goto aux_unlock;
5043
5044 if (rb_has_aux(rb)) {
5045 atomic_inc(&rb->aux_mmap_count);
5046 ret = 0;
5047 goto unlock;
5048 }
5049
5050 atomic_set(&rb->aux_mmap_count, 1);
5051 user_extra = nr_pages;
5052
5053 goto accounting;
5054 }
7b732a75 5055
7730d865 5056 /*
76369139 5057 * If we have rb pages ensure they're a power-of-two number, so we
7730d865
PZ
5058 * can do bitmasks instead of modulo.
5059 */
2ed11312 5060 if (nr_pages != 0 && !is_power_of_2(nr_pages))
37d81828
PM
5061 return -EINVAL;
5062
7b732a75 5063 if (vma_size != PAGE_SIZE * (1 + nr_pages))
37d81828
PM
5064 return -EINVAL;
5065
cdd6c482 5066 WARN_ON_ONCE(event->ctx->parent_ctx);
9bb5d40c 5067again:
cdd6c482 5068 mutex_lock(&event->mmap_mutex);
76369139 5069 if (event->rb) {
9bb5d40c 5070 if (event->rb->nr_pages != nr_pages) {
ebb3c4c4 5071 ret = -EINVAL;
9bb5d40c
PZ
5072 goto unlock;
5073 }
5074
5075 if (!atomic_inc_not_zero(&event->rb->mmap_count)) {
5076 /*
5077 * Raced against perf_mmap_close() through
5078 * perf_event_set_output(). Try again, hope for better
5079 * luck.
5080 */
5081 mutex_unlock(&event->mmap_mutex);
5082 goto again;
5083 }
5084
ebb3c4c4
PZ
5085 goto unlock;
5086 }
5087
789f90fc 5088 user_extra = nr_pages + 1;
45bfb2e5
PZ
5089
5090accounting:
cdd6c482 5091 user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
a3862d3f
IM
5092
5093 /*
5094 * Increase the limit linearly with more CPUs:
5095 */
5096 user_lock_limit *= num_online_cpus();
5097
789f90fc 5098 user_locked = atomic_long_read(&user->locked_vm) + user_extra;
c5078f78 5099
789f90fc
PZ
5100 if (user_locked > user_lock_limit)
5101 extra = user_locked - user_lock_limit;
7b732a75 5102
78d7d407 5103 lock_limit = rlimit(RLIMIT_MEMLOCK);
7b732a75 5104 lock_limit >>= PAGE_SHIFT;
bc3e53f6 5105 locked = vma->vm_mm->pinned_vm + extra;
7b732a75 5106
459ec28a
IM
5107 if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
5108 !capable(CAP_IPC_LOCK)) {
ebb3c4c4
PZ
5109 ret = -EPERM;
5110 goto unlock;
5111 }
7b732a75 5112
45bfb2e5 5113 WARN_ON(!rb && event->rb);
906010b2 5114
d57e34fd 5115 if (vma->vm_flags & VM_WRITE)
76369139 5116 flags |= RING_BUFFER_WRITABLE;
d57e34fd 5117
76369139 5118 if (!rb) {
45bfb2e5
PZ
5119 rb = rb_alloc(nr_pages,
5120 event->attr.watermark ? event->attr.wakeup_watermark : 0,
5121 event->cpu, flags);
26cb63ad 5122
45bfb2e5
PZ
5123 if (!rb) {
5124 ret = -ENOMEM;
5125 goto unlock;
5126 }
43a21ea8 5127
45bfb2e5
PZ
5128 atomic_set(&rb->mmap_count, 1);
5129 rb->mmap_user = get_current_user();
5130 rb->mmap_locked = extra;
26cb63ad 5131
45bfb2e5 5132 ring_buffer_attach(event, rb);
ac9721f3 5133
45bfb2e5
PZ
5134 perf_event_init_userpage(event);
5135 perf_event_update_userpage(event);
5136 } else {
1a594131
AS
5137 ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages,
5138 event->attr.aux_watermark, flags);
45bfb2e5
PZ
5139 if (!ret)
5140 rb->aux_mmap_locked = extra;
5141 }
9a0f05cb 5142
ebb3c4c4 5143unlock:
45bfb2e5
PZ
5144 if (!ret) {
5145 atomic_long_add(user_extra, &user->locked_vm);
5146 vma->vm_mm->pinned_vm += extra;
5147
ac9721f3 5148 atomic_inc(&event->mmap_count);
45bfb2e5
PZ
5149 } else if (rb) {
5150 atomic_dec(&rb->mmap_count);
5151 }
5152aux_unlock:
cdd6c482 5153 mutex_unlock(&event->mmap_mutex);
37d81828 5154
9bb5d40c
PZ
5155 /*
5156 * Since pinned accounting is per vm we cannot allow fork() to copy our
5157 * vma.
5158 */
26cb63ad 5159 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
37d81828 5160 vma->vm_ops = &perf_mmap_vmops;
7b732a75 5161
1e0fb9ec
AL
5162 if (event->pmu->event_mapped)
5163 event->pmu->event_mapped(event);
5164
7b732a75 5165 return ret;
37d81828
PM
5166}
5167
3c446b3d
PZ
5168static int perf_fasync(int fd, struct file *filp, int on)
5169{
496ad9aa 5170 struct inode *inode = file_inode(filp);
cdd6c482 5171 struct perf_event *event = filp->private_data;
3c446b3d
PZ
5172 int retval;
5173
5955102c 5174 inode_lock(inode);
cdd6c482 5175 retval = fasync_helper(fd, filp, on, &event->fasync);
5955102c 5176 inode_unlock(inode);
3c446b3d
PZ
5177
5178 if (retval < 0)
5179 return retval;
5180
5181 return 0;
5182}
5183
0793a61d 5184static const struct file_operations perf_fops = {
3326c1ce 5185 .llseek = no_llseek,
0793a61d
TG
5186 .release = perf_release,
5187 .read = perf_read,
5188 .poll = perf_poll,
d859e29f 5189 .unlocked_ioctl = perf_ioctl,
b3f20785 5190 .compat_ioctl = perf_compat_ioctl,
37d81828 5191 .mmap = perf_mmap,
3c446b3d 5192 .fasync = perf_fasync,
0793a61d
TG
5193};
5194
925d519a 5195/*
cdd6c482 5196 * Perf event wakeup
925d519a
PZ
5197 *
5198 * If there's data, ensure we set the poll() state and publish everything
5199 * to user-space before waking everybody up.
5200 */
5201
fed66e2c
PZ
5202static inline struct fasync_struct **perf_event_fasync(struct perf_event *event)
5203{
5204 /* only the parent has fasync state */
5205 if (event->parent)
5206 event = event->parent;
5207 return &event->fasync;
5208}
5209
cdd6c482 5210void perf_event_wakeup(struct perf_event *event)
925d519a 5211{
10c6db11 5212 ring_buffer_wakeup(event);
4c9e2542 5213
cdd6c482 5214 if (event->pending_kill) {
fed66e2c 5215 kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill);
cdd6c482 5216 event->pending_kill = 0;
4c9e2542 5217 }
925d519a
PZ
5218}
5219
e360adbe 5220static void perf_pending_event(struct irq_work *entry)
79f14641 5221{
cdd6c482
IM
5222 struct perf_event *event = container_of(entry,
5223 struct perf_event, pending);
d525211f
PZ
5224 int rctx;
5225
5226 rctx = perf_swevent_get_recursion_context();
5227 /*
5228 * If we 'fail' here, that's OK, it means recursion is already disabled
5229 * and we won't recurse 'further'.
5230 */
79f14641 5231
cdd6c482
IM
5232 if (event->pending_disable) {
5233 event->pending_disable = 0;
fae3fde6 5234 perf_event_disable_local(event);
79f14641
PZ
5235 }
5236
cdd6c482
IM
5237 if (event->pending_wakeup) {
5238 event->pending_wakeup = 0;
5239 perf_event_wakeup(event);
79f14641 5240 }
d525211f
PZ
5241
5242 if (rctx >= 0)
5243 perf_swevent_put_recursion_context(rctx);
79f14641
PZ
5244}
5245
39447b38
ZY
5246/*
5247 * We assume there is only KVM supporting the callbacks.
5248 * Later on, we might change it to a list if there is
5249 * another virtualization implementation supporting the callbacks.
5250 */
5251struct perf_guest_info_callbacks *perf_guest_cbs;
5252
5253int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
5254{
5255 perf_guest_cbs = cbs;
5256 return 0;
5257}
5258EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
5259
5260int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
5261{
5262 perf_guest_cbs = NULL;
5263 return 0;
5264}
5265EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
5266
4018994f
JO
5267static void
5268perf_output_sample_regs(struct perf_output_handle *handle,
5269 struct pt_regs *regs, u64 mask)
5270{
5271 int bit;
5272
5273 for_each_set_bit(bit, (const unsigned long *) &mask,
5274 sizeof(mask) * BITS_PER_BYTE) {
5275 u64 val;
5276
5277 val = perf_reg_value(regs, bit);
5278 perf_output_put(handle, val);
5279 }
5280}
5281
60e2364e 5282static void perf_sample_regs_user(struct perf_regs *regs_user,
88a7c26a
AL
5283 struct pt_regs *regs,
5284 struct pt_regs *regs_user_copy)
4018994f 5285{
88a7c26a
AL
5286 if (user_mode(regs)) {
5287 regs_user->abi = perf_reg_abi(current);
2565711f 5288 regs_user->regs = regs;
88a7c26a
AL
5289 } else if (current->mm) {
5290 perf_get_regs_user(regs_user, regs, regs_user_copy);
2565711f
PZ
5291 } else {
5292 regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE;
5293 regs_user->regs = NULL;
4018994f
JO
5294 }
5295}
5296
60e2364e
SE
5297static void perf_sample_regs_intr(struct perf_regs *regs_intr,
5298 struct pt_regs *regs)
5299{
5300 regs_intr->regs = regs;
5301 regs_intr->abi = perf_reg_abi(current);
5302}
5303
5304
c5ebcedb
JO
5305/*
5306 * Get remaining task size from user stack pointer.
5307 *
5308 * It'd be better to take stack vma map and limit this more
5309 * precisly, but there's no way to get it safely under interrupt,
5310 * so using TASK_SIZE as limit.
5311 */
5312static u64 perf_ustack_task_size(struct pt_regs *regs)
5313{
5314 unsigned long addr = perf_user_stack_pointer(regs);
5315
5316 if (!addr || addr >= TASK_SIZE)
5317 return 0;
5318
5319 return TASK_SIZE - addr;
5320}
5321
5322static u16
5323perf_sample_ustack_size(u16 stack_size, u16 header_size,
5324 struct pt_regs *regs)
5325{
5326 u64 task_size;
5327
5328 /* No regs, no stack pointer, no dump. */
5329 if (!regs)
5330 return 0;
5331
5332 /*
5333 * Check if we fit in with the requested stack size into the:
5334 * - TASK_SIZE
5335 * If we don't, we limit the size to the TASK_SIZE.
5336 *
5337 * - remaining sample size
5338 * If we don't, we customize the stack size to
5339 * fit in to the remaining sample size.
5340 */
5341
5342 task_size = min((u64) USHRT_MAX, perf_ustack_task_size(regs));
5343 stack_size = min(stack_size, (u16) task_size);
5344
5345 /* Current header size plus static size and dynamic size. */
5346 header_size += 2 * sizeof(u64);
5347
5348 /* Do we fit in with the current stack dump size? */
5349 if ((u16) (header_size + stack_size) < header_size) {
5350 /*
5351 * If we overflow the maximum size for the sample,
5352 * we customize the stack dump size to fit in.
5353 */
5354 stack_size = USHRT_MAX - header_size - sizeof(u64);
5355 stack_size = round_up(stack_size, sizeof(u64));
5356 }
5357
5358 return stack_size;
5359}
5360
5361static void
5362perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
5363 struct pt_regs *regs)
5364{
5365 /* Case of a kernel thread, nothing to dump */
5366 if (!regs) {
5367 u64 size = 0;
5368 perf_output_put(handle, size);
5369 } else {
5370 unsigned long sp;
5371 unsigned int rem;
5372 u64 dyn_size;
5373
5374 /*
5375 * We dump:
5376 * static size
5377 * - the size requested by user or the best one we can fit
5378 * in to the sample max size
5379 * data
5380 * - user stack dump data
5381 * dynamic size
5382 * - the actual dumped size
5383 */
5384
5385 /* Static size. */
5386 perf_output_put(handle, dump_size);
5387
5388 /* Data. */
5389 sp = perf_user_stack_pointer(regs);
5390 rem = __output_copy_user(handle, (void *) sp, dump_size);
5391 dyn_size = dump_size - rem;
5392
5393 perf_output_skip(handle, rem);
5394
5395 /* Dynamic size. */
5396 perf_output_put(handle, dyn_size);
5397 }
5398}
5399
c980d109
ACM
5400static void __perf_event_header__init_id(struct perf_event_header *header,
5401 struct perf_sample_data *data,
5402 struct perf_event *event)
6844c09d
ACM
5403{
5404 u64 sample_type = event->attr.sample_type;
5405
5406 data->type = sample_type;
5407 header->size += event->id_header_size;
5408
5409 if (sample_type & PERF_SAMPLE_TID) {
5410 /* namespace issues */
5411 data->tid_entry.pid = perf_event_pid(event, current);
5412 data->tid_entry.tid = perf_event_tid(event, current);
5413 }
5414
5415 if (sample_type & PERF_SAMPLE_TIME)
34f43927 5416 data->time = perf_event_clock(event);
6844c09d 5417
ff3d527c 5418 if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER))
6844c09d
ACM
5419 data->id = primary_event_id(event);
5420
5421 if (sample_type & PERF_SAMPLE_STREAM_ID)
5422 data->stream_id = event->id;
5423
5424 if (sample_type & PERF_SAMPLE_CPU) {
5425 data->cpu_entry.cpu = raw_smp_processor_id();
5426 data->cpu_entry.reserved = 0;
5427 }
5428}
5429
76369139
FW
5430void perf_event_header__init_id(struct perf_event_header *header,
5431 struct perf_sample_data *data,
5432 struct perf_event *event)
c980d109
ACM
5433{
5434 if (event->attr.sample_id_all)
5435 __perf_event_header__init_id(header, data, event);
5436}
5437
5438static void __perf_event__output_id_sample(struct perf_output_handle *handle,
5439 struct perf_sample_data *data)
5440{
5441 u64 sample_type = data->type;
5442
5443 if (sample_type & PERF_SAMPLE_TID)
5444 perf_output_put(handle, data->tid_entry);
5445
5446 if (sample_type & PERF_SAMPLE_TIME)
5447 perf_output_put(handle, data->time);
5448
5449 if (sample_type & PERF_SAMPLE_ID)
5450 perf_output_put(handle, data->id);
5451
5452 if (sample_type & PERF_SAMPLE_STREAM_ID)
5453 perf_output_put(handle, data->stream_id);
5454
5455 if (sample_type & PERF_SAMPLE_CPU)
5456 perf_output_put(handle, data->cpu_entry);
ff3d527c
AH
5457
5458 if (sample_type & PERF_SAMPLE_IDENTIFIER)
5459 perf_output_put(handle, data->id);
c980d109
ACM
5460}
5461
76369139
FW
5462void perf_event__output_id_sample(struct perf_event *event,
5463 struct perf_output_handle *handle,
5464 struct perf_sample_data *sample)
c980d109
ACM
5465{
5466 if (event->attr.sample_id_all)
5467 __perf_event__output_id_sample(handle, sample);
5468}
5469
3dab77fb 5470static void perf_output_read_one(struct perf_output_handle *handle,
eed01528
SE
5471 struct perf_event *event,
5472 u64 enabled, u64 running)
3dab77fb 5473{
cdd6c482 5474 u64 read_format = event->attr.read_format;
3dab77fb
PZ
5475 u64 values[4];
5476 int n = 0;
5477
b5e58793 5478 values[n++] = perf_event_count(event);
3dab77fb 5479 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
eed01528 5480 values[n++] = enabled +
cdd6c482 5481 atomic64_read(&event->child_total_time_enabled);
3dab77fb
PZ
5482 }
5483 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
eed01528 5484 values[n++] = running +
cdd6c482 5485 atomic64_read(&event->child_total_time_running);
3dab77fb
PZ
5486 }
5487 if (read_format & PERF_FORMAT_ID)
cdd6c482 5488 values[n++] = primary_event_id(event);
3dab77fb 5489
76369139 5490 __output_copy(handle, values, n * sizeof(u64));
3dab77fb
PZ
5491}
5492
5493/*
cdd6c482 5494 * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
3dab77fb
PZ
5495 */
5496static void perf_output_read_group(struct perf_output_handle *handle,
eed01528
SE
5497 struct perf_event *event,
5498 u64 enabled, u64 running)
3dab77fb 5499{
cdd6c482
IM
5500 struct perf_event *leader = event->group_leader, *sub;
5501 u64 read_format = event->attr.read_format;
3dab77fb
PZ
5502 u64 values[5];
5503 int n = 0;
5504
5505 values[n++] = 1 + leader->nr_siblings;
5506
5507 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
eed01528 5508 values[n++] = enabled;
3dab77fb
PZ
5509
5510 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
eed01528 5511 values[n++] = running;
3dab77fb 5512
cdd6c482 5513 if (leader != event)
3dab77fb
PZ
5514 leader->pmu->read(leader);
5515
b5e58793 5516 values[n++] = perf_event_count(leader);
3dab77fb 5517 if (read_format & PERF_FORMAT_ID)
cdd6c482 5518 values[n++] = primary_event_id(leader);
3dab77fb 5519
76369139 5520 __output_copy(handle, values, n * sizeof(u64));
3dab77fb 5521
65abc865 5522 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
3dab77fb
PZ
5523 n = 0;
5524
6f5ab001
JO
5525 if ((sub != event) &&
5526 (sub->state == PERF_EVENT_STATE_ACTIVE))
3dab77fb
PZ
5527 sub->pmu->read(sub);
5528
b5e58793 5529 values[n++] = perf_event_count(sub);
3dab77fb 5530 if (read_format & PERF_FORMAT_ID)
cdd6c482 5531 values[n++] = primary_event_id(sub);
3dab77fb 5532
76369139 5533 __output_copy(handle, values, n * sizeof(u64));
3dab77fb
PZ
5534 }
5535}
5536
eed01528
SE
5537#define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
5538 PERF_FORMAT_TOTAL_TIME_RUNNING)
5539
3dab77fb 5540static void perf_output_read(struct perf_output_handle *handle,
cdd6c482 5541 struct perf_event *event)
3dab77fb 5542{
e3f3541c 5543 u64 enabled = 0, running = 0, now;
eed01528
SE
5544 u64 read_format = event->attr.read_format;
5545
5546 /*
5547 * compute total_time_enabled, total_time_running
5548 * based on snapshot values taken when the event
5549 * was last scheduled in.
5550 *
5551 * we cannot simply called update_context_time()
5552 * because of locking issue as we are called in
5553 * NMI context
5554 */
c4794295 5555 if (read_format & PERF_FORMAT_TOTAL_TIMES)
e3f3541c 5556 calc_timer_values(event, &now, &enabled, &running);
eed01528 5557
cdd6c482 5558 if (event->attr.read_format & PERF_FORMAT_GROUP)
eed01528 5559 perf_output_read_group(handle, event, enabled, running);
3dab77fb 5560 else
eed01528 5561 perf_output_read_one(handle, event, enabled, running);
3dab77fb
PZ
5562}
5563
5622f295
MM
5564void perf_output_sample(struct perf_output_handle *handle,
5565 struct perf_event_header *header,
5566 struct perf_sample_data *data,
cdd6c482 5567 struct perf_event *event)
5622f295
MM
5568{
5569 u64 sample_type = data->type;
5570
5571 perf_output_put(handle, *header);
5572
ff3d527c
AH
5573 if (sample_type & PERF_SAMPLE_IDENTIFIER)
5574 perf_output_put(handle, data->id);
5575
5622f295
MM
5576 if (sample_type & PERF_SAMPLE_IP)
5577 perf_output_put(handle, data->ip);
5578
5579 if (sample_type & PERF_SAMPLE_TID)
5580 perf_output_put(handle, data->tid_entry);
5581
5582 if (sample_type & PERF_SAMPLE_TIME)
5583 perf_output_put(handle, data->time);
5584
5585 if (sample_type & PERF_SAMPLE_ADDR)
5586 perf_output_put(handle, data->addr);
5587
5588 if (sample_type & PERF_SAMPLE_ID)
5589 perf_output_put(handle, data->id);
5590
5591 if (sample_type & PERF_SAMPLE_STREAM_ID)
5592 perf_output_put(handle, data->stream_id);
5593
5594 if (sample_type & PERF_SAMPLE_CPU)
5595 perf_output_put(handle, data->cpu_entry);
5596
5597 if (sample_type & PERF_SAMPLE_PERIOD)
5598 perf_output_put(handle, data->period);
5599
5600 if (sample_type & PERF_SAMPLE_READ)
cdd6c482 5601 perf_output_read(handle, event);
5622f295
MM
5602
5603 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
5604 if (data->callchain) {
5605 int size = 1;
5606
5607 if (data->callchain)
5608 size += data->callchain->nr;
5609
5610 size *= sizeof(u64);
5611
76369139 5612 __output_copy(handle, data->callchain, size);
5622f295
MM
5613 } else {
5614 u64 nr = 0;
5615 perf_output_put(handle, nr);
5616 }
5617 }
5618
5619 if (sample_type & PERF_SAMPLE_RAW) {
5620 if (data->raw) {
fa128e6a
AS
5621 u32 raw_size = data->raw->size;
5622 u32 real_size = round_up(raw_size + sizeof(u32),
5623 sizeof(u64)) - sizeof(u32);
5624 u64 zero = 0;
5625
5626 perf_output_put(handle, real_size);
5627 __output_copy(handle, data->raw->data, raw_size);
5628 if (real_size - raw_size)
5629 __output_copy(handle, &zero, real_size - raw_size);
5622f295
MM
5630 } else {
5631 struct {
5632 u32 size;
5633 u32 data;
5634 } raw = {
5635 .size = sizeof(u32),
5636 .data = 0,
5637 };
5638 perf_output_put(handle, raw);
5639 }
5640 }
a7ac67ea 5641
bce38cd5
SE
5642 if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
5643 if (data->br_stack) {
5644 size_t size;
5645
5646 size = data->br_stack->nr
5647 * sizeof(struct perf_branch_entry);
5648
5649 perf_output_put(handle, data->br_stack->nr);
5650 perf_output_copy(handle, data->br_stack->entries, size);
5651 } else {
5652 /*
5653 * we always store at least the value of nr
5654 */
5655 u64 nr = 0;
5656 perf_output_put(handle, nr);
5657 }
5658 }
4018994f
JO
5659
5660 if (sample_type & PERF_SAMPLE_REGS_USER) {
5661 u64 abi = data->regs_user.abi;
5662
5663 /*
5664 * If there are no regs to dump, notice it through
5665 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE).
5666 */
5667 perf_output_put(handle, abi);
5668
5669 if (abi) {
5670 u64 mask = event->attr.sample_regs_user;
5671 perf_output_sample_regs(handle,
5672 data->regs_user.regs,
5673 mask);
5674 }
5675 }
c5ebcedb 5676
a5cdd40c 5677 if (sample_type & PERF_SAMPLE_STACK_USER) {
c5ebcedb
JO
5678 perf_output_sample_ustack(handle,
5679 data->stack_user_size,
5680 data->regs_user.regs);
a5cdd40c 5681 }
c3feedf2
AK
5682
5683 if (sample_type & PERF_SAMPLE_WEIGHT)
5684 perf_output_put(handle, data->weight);
d6be9ad6
SE
5685
5686 if (sample_type & PERF_SAMPLE_DATA_SRC)
5687 perf_output_put(handle, data->data_src.val);
a5cdd40c 5688
fdfbbd07
AK
5689 if (sample_type & PERF_SAMPLE_TRANSACTION)
5690 perf_output_put(handle, data->txn);
5691
60e2364e
SE
5692 if (sample_type & PERF_SAMPLE_REGS_INTR) {
5693 u64 abi = data->regs_intr.abi;
5694 /*
5695 * If there are no regs to dump, notice it through
5696 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE).
5697 */
5698 perf_output_put(handle, abi);
5699
5700 if (abi) {
5701 u64 mask = event->attr.sample_regs_intr;
5702
5703 perf_output_sample_regs(handle,
5704 data->regs_intr.regs,
5705 mask);
5706 }
5707 }
5708
a5cdd40c
PZ
5709 if (!event->attr.watermark) {
5710 int wakeup_events = event->attr.wakeup_events;
5711
5712 if (wakeup_events) {
5713 struct ring_buffer *rb = handle->rb;
5714 int events = local_inc_return(&rb->events);
5715
5716 if (events >= wakeup_events) {
5717 local_sub(wakeup_events, &rb->events);
5718 local_inc(&rb->wakeup);
5719 }
5720 }
5721 }
5622f295
MM
5722}
5723
5724void perf_prepare_sample(struct perf_event_header *header,
5725 struct perf_sample_data *data,
cdd6c482 5726 struct perf_event *event,
5622f295 5727 struct pt_regs *regs)
7b732a75 5728{
cdd6c482 5729 u64 sample_type = event->attr.sample_type;
7b732a75 5730
cdd6c482 5731 header->type = PERF_RECORD_SAMPLE;
c320c7b7 5732 header->size = sizeof(*header) + event->header_size;
5622f295
MM
5733
5734 header->misc = 0;
5735 header->misc |= perf_misc_flags(regs);
6fab0192 5736
c980d109 5737 __perf_event_header__init_id(header, data, event);
6844c09d 5738
c320c7b7 5739 if (sample_type & PERF_SAMPLE_IP)
5622f295
MM
5740 data->ip = perf_instruction_pointer(regs);
5741
b23f3325 5742 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
5622f295 5743 int size = 1;
394ee076 5744
e6dab5ff 5745 data->callchain = perf_callchain(event, regs);
5622f295
MM
5746
5747 if (data->callchain)
5748 size += data->callchain->nr;
5749
5750 header->size += size * sizeof(u64);
394ee076
PZ
5751 }
5752
3a43ce68 5753 if (sample_type & PERF_SAMPLE_RAW) {
a044560c
PZ
5754 int size = sizeof(u32);
5755
5756 if (data->raw)
5757 size += data->raw->size;
5758 else
5759 size += sizeof(u32);
5760
fa128e6a 5761 header->size += round_up(size, sizeof(u64));
7f453c24 5762 }
bce38cd5
SE
5763
5764 if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
5765 int size = sizeof(u64); /* nr */
5766 if (data->br_stack) {
5767 size += data->br_stack->nr
5768 * sizeof(struct perf_branch_entry);
5769 }
5770 header->size += size;
5771 }
4018994f 5772
2565711f 5773 if (sample_type & (PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER))
88a7c26a
AL
5774 perf_sample_regs_user(&data->regs_user, regs,
5775 &data->regs_user_copy);
2565711f 5776
4018994f
JO
5777 if (sample_type & PERF_SAMPLE_REGS_USER) {
5778 /* regs dump ABI info */
5779 int size = sizeof(u64);
5780
4018994f
JO
5781 if (data->regs_user.regs) {
5782 u64 mask = event->attr.sample_regs_user;
5783 size += hweight64(mask) * sizeof(u64);
5784 }
5785
5786 header->size += size;
5787 }
c5ebcedb
JO
5788
5789 if (sample_type & PERF_SAMPLE_STACK_USER) {
5790 /*
5791 * Either we need PERF_SAMPLE_STACK_USER bit to be allways
5792 * processed as the last one or have additional check added
5793 * in case new sample type is added, because we could eat
5794 * up the rest of the sample size.
5795 */
c5ebcedb
JO
5796 u16 stack_size = event->attr.sample_stack_user;
5797 u16 size = sizeof(u64);
5798
c5ebcedb 5799 stack_size = perf_sample_ustack_size(stack_size, header->size,
2565711f 5800 data->regs_user.regs);
c5ebcedb
JO
5801
5802 /*
5803 * If there is something to dump, add space for the dump
5804 * itself and for the field that tells the dynamic size,
5805 * which is how many have been actually dumped.
5806 */
5807 if (stack_size)
5808 size += sizeof(u64) + stack_size;
5809
5810 data->stack_user_size = stack_size;
5811 header->size += size;
5812 }
60e2364e
SE
5813
5814 if (sample_type & PERF_SAMPLE_REGS_INTR) {
5815 /* regs dump ABI info */
5816 int size = sizeof(u64);
5817
5818 perf_sample_regs_intr(&data->regs_intr, regs);
5819
5820 if (data->regs_intr.regs) {
5821 u64 mask = event->attr.sample_regs_intr;
5822
5823 size += hweight64(mask) * sizeof(u64);
5824 }
5825
5826 header->size += size;
5827 }
5622f295 5828}
7f453c24 5829
9ecda41a
WN
5830static void __always_inline
5831__perf_event_output(struct perf_event *event,
5832 struct perf_sample_data *data,
5833 struct pt_regs *regs,
5834 int (*output_begin)(struct perf_output_handle *,
5835 struct perf_event *,
5836 unsigned int))
5622f295
MM
5837{
5838 struct perf_output_handle handle;
5839 struct perf_event_header header;
689802b2 5840
927c7a9e
FW
5841 /* protect the callchain buffers */
5842 rcu_read_lock();
5843
cdd6c482 5844 perf_prepare_sample(&header, data, event, regs);
5c148194 5845
9ecda41a 5846 if (output_begin(&handle, event, header.size))
927c7a9e 5847 goto exit;
0322cd6e 5848
cdd6c482 5849 perf_output_sample(&handle, &header, data, event);
f413cdb8 5850
8a057d84 5851 perf_output_end(&handle);
927c7a9e
FW
5852
5853exit:
5854 rcu_read_unlock();
0322cd6e
PZ
5855}
5856
9ecda41a
WN
5857void
5858perf_event_output_forward(struct perf_event *event,
5859 struct perf_sample_data *data,
5860 struct pt_regs *regs)
5861{
5862 __perf_event_output(event, data, regs, perf_output_begin_forward);
5863}
5864
5865void
5866perf_event_output_backward(struct perf_event *event,
5867 struct perf_sample_data *data,
5868 struct pt_regs *regs)
5869{
5870 __perf_event_output(event, data, regs, perf_output_begin_backward);
5871}
5872
5873void
5874perf_event_output(struct perf_event *event,
5875 struct perf_sample_data *data,
5876 struct pt_regs *regs)
5877{
5878 __perf_event_output(event, data, regs, perf_output_begin);
5879}
5880
38b200d6 5881/*
cdd6c482 5882 * read event_id
38b200d6
PZ
5883 */
5884
5885struct perf_read_event {
5886 struct perf_event_header header;
5887
5888 u32 pid;
5889 u32 tid;
38b200d6
PZ
5890};
5891
5892static void
cdd6c482 5893perf_event_read_event(struct perf_event *event,
38b200d6
PZ
5894 struct task_struct *task)
5895{
5896 struct perf_output_handle handle;
c980d109 5897 struct perf_sample_data sample;
dfc65094 5898 struct perf_read_event read_event = {
38b200d6 5899 .header = {
cdd6c482 5900 .type = PERF_RECORD_READ,
38b200d6 5901 .misc = 0,
c320c7b7 5902 .size = sizeof(read_event) + event->read_size,
38b200d6 5903 },
cdd6c482
IM
5904 .pid = perf_event_pid(event, task),
5905 .tid = perf_event_tid(event, task),
38b200d6 5906 };
3dab77fb 5907 int ret;
38b200d6 5908
c980d109 5909 perf_event_header__init_id(&read_event.header, &sample, event);
a7ac67ea 5910 ret = perf_output_begin(&handle, event, read_event.header.size);
38b200d6
PZ
5911 if (ret)
5912 return;
5913
dfc65094 5914 perf_output_put(&handle, read_event);
cdd6c482 5915 perf_output_read(&handle, event);
c980d109 5916 perf_event__output_id_sample(event, &handle, &sample);
3dab77fb 5917
38b200d6
PZ
5918 perf_output_end(&handle);
5919}
5920
aab5b71e 5921typedef void (perf_iterate_f)(struct perf_event *event, void *data);
52d857a8
JO
5922
5923static void
aab5b71e
PZ
5924perf_iterate_ctx(struct perf_event_context *ctx,
5925 perf_iterate_f output,
b73e4fef 5926 void *data, bool all)
52d857a8
JO
5927{
5928 struct perf_event *event;
5929
5930 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
b73e4fef
AS
5931 if (!all) {
5932 if (event->state < PERF_EVENT_STATE_INACTIVE)
5933 continue;
5934 if (!event_filter_match(event))
5935 continue;
5936 }
5937
67516844 5938 output(event, data);
52d857a8
JO
5939 }
5940}
5941
aab5b71e 5942static void perf_iterate_sb_cpu(perf_iterate_f output, void *data)
f2fb6bef
KL
5943{
5944 struct pmu_event_list *pel = this_cpu_ptr(&pmu_sb_events);
5945 struct perf_event *event;
5946
5947 list_for_each_entry_rcu(event, &pel->list, sb_list) {
5948 if (event->state < PERF_EVENT_STATE_INACTIVE)
5949 continue;
5950 if (!event_filter_match(event))
5951 continue;
5952 output(event, data);
5953 }
5954}
5955
aab5b71e
PZ
5956/*
5957 * Iterate all events that need to receive side-band events.
5958 *
5959 * For new callers; ensure that account_pmu_sb_event() includes
5960 * your event, otherwise it might not get delivered.
5961 */
52d857a8 5962static void
aab5b71e 5963perf_iterate_sb(perf_iterate_f output, void *data,
52d857a8
JO
5964 struct perf_event_context *task_ctx)
5965{
52d857a8 5966 struct perf_event_context *ctx;
52d857a8
JO
5967 int ctxn;
5968
aab5b71e
PZ
5969 rcu_read_lock();
5970 preempt_disable();
5971
4e93ad60 5972 /*
aab5b71e
PZ
5973 * If we have task_ctx != NULL we only notify the task context itself.
5974 * The task_ctx is set only for EXIT events before releasing task
4e93ad60
JO
5975 * context.
5976 */
5977 if (task_ctx) {
aab5b71e
PZ
5978 perf_iterate_ctx(task_ctx, output, data, false);
5979 goto done;
4e93ad60
JO
5980 }
5981
aab5b71e 5982 perf_iterate_sb_cpu(output, data);
f2fb6bef
KL
5983
5984 for_each_task_context_nr(ctxn) {
52d857a8
JO
5985 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
5986 if (ctx)
aab5b71e 5987 perf_iterate_ctx(ctx, output, data, false);
52d857a8 5988 }
aab5b71e 5989done:
f2fb6bef 5990 preempt_enable();
52d857a8 5991 rcu_read_unlock();
95ff4ca2
AS
5992}
5993
375637bc
AS
5994/*
5995 * Clear all file-based filters at exec, they'll have to be
5996 * re-instated when/if these objects are mmapped again.
5997 */
5998static void perf_event_addr_filters_exec(struct perf_event *event, void *data)
5999{
6000 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
6001 struct perf_addr_filter *filter;
6002 unsigned int restart = 0, count = 0;
6003 unsigned long flags;
6004
6005 if (!has_addr_filter(event))
6006 return;
6007
6008 raw_spin_lock_irqsave(&ifh->lock, flags);
6009 list_for_each_entry(filter, &ifh->list, entry) {
6010 if (filter->inode) {
6011 event->addr_filters_offs[count] = 0;
6012 restart++;
6013 }
6014
6015 count++;
6016 }
6017
6018 if (restart)
6019 event->addr_filters_gen++;
6020 raw_spin_unlock_irqrestore(&ifh->lock, flags);
6021
6022 if (restart)
6023 perf_event_restart(event);
6024}
6025
6026void perf_event_exec(void)
6027{
6028 struct perf_event_context *ctx;
6029 int ctxn;
6030
6031 rcu_read_lock();
6032 for_each_task_context_nr(ctxn) {
6033 ctx = current->perf_event_ctxp[ctxn];
6034 if (!ctx)
6035 continue;
6036
6037 perf_event_enable_on_exec(ctxn);
6038
aab5b71e 6039 perf_iterate_ctx(ctx, perf_event_addr_filters_exec, NULL,
375637bc
AS
6040 true);
6041 }
6042 rcu_read_unlock();
6043}
6044
95ff4ca2
AS
6045struct remote_output {
6046 struct ring_buffer *rb;
6047 int err;
6048};
6049
6050static void __perf_event_output_stop(struct perf_event *event, void *data)
6051{
6052 struct perf_event *parent = event->parent;
6053 struct remote_output *ro = data;
6054 struct ring_buffer *rb = ro->rb;
375637bc
AS
6055 struct stop_event_data sd = {
6056 .event = event,
6057 };
95ff4ca2
AS
6058
6059 if (!has_aux(event))
6060 return;
6061
6062 if (!parent)
6063 parent = event;
6064
6065 /*
6066 * In case of inheritance, it will be the parent that links to the
6067 * ring-buffer, but it will be the child that's actually using it:
6068 */
6069 if (rcu_dereference(parent->rb) == rb)
375637bc 6070 ro->err = __perf_event_stop(&sd);
95ff4ca2
AS
6071}
6072
6073static int __perf_pmu_output_stop(void *info)
6074{
6075 struct perf_event *event = info;
6076 struct pmu *pmu = event->pmu;
6077 struct perf_cpu_context *cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
6078 struct remote_output ro = {
6079 .rb = event->rb,
6080 };
6081
6082 rcu_read_lock();
aab5b71e 6083 perf_iterate_ctx(&cpuctx->ctx, __perf_event_output_stop, &ro, false);
95ff4ca2 6084 if (cpuctx->task_ctx)
aab5b71e 6085 perf_iterate_ctx(cpuctx->task_ctx, __perf_event_output_stop,
b73e4fef 6086 &ro, false);
95ff4ca2
AS
6087 rcu_read_unlock();
6088
6089 return ro.err;
6090}
6091
6092static void perf_pmu_output_stop(struct perf_event *event)
6093{
6094 struct perf_event *iter;
6095 int err, cpu;
6096
6097restart:
6098 rcu_read_lock();
6099 list_for_each_entry_rcu(iter, &event->rb->event_list, rb_entry) {
6100 /*
6101 * For per-CPU events, we need to make sure that neither they
6102 * nor their children are running; for cpu==-1 events it's
6103 * sufficient to stop the event itself if it's active, since
6104 * it can't have children.
6105 */
6106 cpu = iter->cpu;
6107 if (cpu == -1)
6108 cpu = READ_ONCE(iter->oncpu);
6109
6110 if (cpu == -1)
6111 continue;
6112
6113 err = cpu_function_call(cpu, __perf_pmu_output_stop, event);
6114 if (err == -EAGAIN) {
6115 rcu_read_unlock();
6116 goto restart;
6117 }
6118 }
6119 rcu_read_unlock();
52d857a8
JO
6120}
6121
60313ebe 6122/*
9f498cc5
PZ
6123 * task tracking -- fork/exit
6124 *
13d7a241 6125 * enabled by: attr.comm | attr.mmap | attr.mmap2 | attr.mmap_data | attr.task
60313ebe
PZ
6126 */
6127
9f498cc5 6128struct perf_task_event {
3a80b4a3 6129 struct task_struct *task;
cdd6c482 6130 struct perf_event_context *task_ctx;
60313ebe
PZ
6131
6132 struct {
6133 struct perf_event_header header;
6134
6135 u32 pid;
6136 u32 ppid;
9f498cc5
PZ
6137 u32 tid;
6138 u32 ptid;
393b2ad8 6139 u64 time;
cdd6c482 6140 } event_id;
60313ebe
PZ
6141};
6142
67516844
JO
6143static int perf_event_task_match(struct perf_event *event)
6144{
13d7a241
SE
6145 return event->attr.comm || event->attr.mmap ||
6146 event->attr.mmap2 || event->attr.mmap_data ||
6147 event->attr.task;
67516844
JO
6148}
6149
cdd6c482 6150static void perf_event_task_output(struct perf_event *event,
52d857a8 6151 void *data)
60313ebe 6152{
52d857a8 6153 struct perf_task_event *task_event = data;
60313ebe 6154 struct perf_output_handle handle;
c980d109 6155 struct perf_sample_data sample;
9f498cc5 6156 struct task_struct *task = task_event->task;
c980d109 6157 int ret, size = task_event->event_id.header.size;
8bb39f9a 6158
67516844
JO
6159 if (!perf_event_task_match(event))
6160 return;
6161
c980d109 6162 perf_event_header__init_id(&task_event->event_id.header, &sample, event);
60313ebe 6163
c980d109 6164 ret = perf_output_begin(&handle, event,
a7ac67ea 6165 task_event->event_id.header.size);
ef60777c 6166 if (ret)
c980d109 6167 goto out;
60313ebe 6168
cdd6c482
IM
6169 task_event->event_id.pid = perf_event_pid(event, task);
6170 task_event->event_id.ppid = perf_event_pid(event, current);
60313ebe 6171
cdd6c482
IM
6172 task_event->event_id.tid = perf_event_tid(event, task);
6173 task_event->event_id.ptid = perf_event_tid(event, current);
9f498cc5 6174
34f43927
PZ
6175 task_event->event_id.time = perf_event_clock(event);
6176
cdd6c482 6177 perf_output_put(&handle, task_event->event_id);
393b2ad8 6178
c980d109
ACM
6179 perf_event__output_id_sample(event, &handle, &sample);
6180
60313ebe 6181 perf_output_end(&handle);
c980d109
ACM
6182out:
6183 task_event->event_id.header.size = size;
60313ebe
PZ
6184}
6185
cdd6c482
IM
6186static void perf_event_task(struct task_struct *task,
6187 struct perf_event_context *task_ctx,
3a80b4a3 6188 int new)
60313ebe 6189{
9f498cc5 6190 struct perf_task_event task_event;
60313ebe 6191
cdd6c482
IM
6192 if (!atomic_read(&nr_comm_events) &&
6193 !atomic_read(&nr_mmap_events) &&
6194 !atomic_read(&nr_task_events))
60313ebe
PZ
6195 return;
6196
9f498cc5 6197 task_event = (struct perf_task_event){
3a80b4a3
PZ
6198 .task = task,
6199 .task_ctx = task_ctx,
cdd6c482 6200 .event_id = {
60313ebe 6201 .header = {
cdd6c482 6202 .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
573402db 6203 .misc = 0,
cdd6c482 6204 .size = sizeof(task_event.event_id),
60313ebe 6205 },
573402db
PZ
6206 /* .pid */
6207 /* .ppid */
9f498cc5
PZ
6208 /* .tid */
6209 /* .ptid */
34f43927 6210 /* .time */
60313ebe
PZ
6211 },
6212 };
6213
aab5b71e 6214 perf_iterate_sb(perf_event_task_output,
52d857a8
JO
6215 &task_event,
6216 task_ctx);
9f498cc5
PZ
6217}
6218
cdd6c482 6219void perf_event_fork(struct task_struct *task)
9f498cc5 6220{
cdd6c482 6221 perf_event_task(task, NULL, 1);
60313ebe
PZ
6222}
6223
8d1b2d93
PZ
6224/*
6225 * comm tracking
6226 */
6227
6228struct perf_comm_event {
22a4f650
IM
6229 struct task_struct *task;
6230 char *comm;
8d1b2d93
PZ
6231 int comm_size;
6232
6233 struct {
6234 struct perf_event_header header;
6235
6236 u32 pid;
6237 u32 tid;
cdd6c482 6238 } event_id;
8d1b2d93
PZ
6239};
6240
67516844
JO
6241static int perf_event_comm_match(struct perf_event *event)
6242{
6243 return event->attr.comm;
6244}
6245
cdd6c482 6246static void perf_event_comm_output(struct perf_event *event,
52d857a8 6247 void *data)
8d1b2d93 6248{
52d857a8 6249 struct perf_comm_event *comm_event = data;
8d1b2d93 6250 struct perf_output_handle handle;
c980d109 6251 struct perf_sample_data sample;
cdd6c482 6252 int size = comm_event->event_id.header.size;
c980d109
ACM
6253 int ret;
6254
67516844
JO
6255 if (!perf_event_comm_match(event))
6256 return;
6257
c980d109
ACM
6258 perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
6259 ret = perf_output_begin(&handle, event,
a7ac67ea 6260 comm_event->event_id.header.size);
8d1b2d93
PZ
6261
6262 if (ret)
c980d109 6263 goto out;
8d1b2d93 6264
cdd6c482
IM
6265 comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
6266 comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
709e50cf 6267
cdd6c482 6268 perf_output_put(&handle, comm_event->event_id);
76369139 6269 __output_copy(&handle, comm_event->comm,
8d1b2d93 6270 comm_event->comm_size);
c980d109
ACM
6271
6272 perf_event__output_id_sample(event, &handle, &sample);
6273
8d1b2d93 6274 perf_output_end(&handle);
c980d109
ACM
6275out:
6276 comm_event->event_id.header.size = size;
8d1b2d93
PZ
6277}
6278
cdd6c482 6279static void perf_event_comm_event(struct perf_comm_event *comm_event)
8d1b2d93 6280{
413ee3b4 6281 char comm[TASK_COMM_LEN];
8d1b2d93 6282 unsigned int size;
8d1b2d93 6283
413ee3b4 6284 memset(comm, 0, sizeof(comm));
96b02d78 6285 strlcpy(comm, comm_event->task->comm, sizeof(comm));
888fcee0 6286 size = ALIGN(strlen(comm)+1, sizeof(u64));
8d1b2d93
PZ
6287
6288 comm_event->comm = comm;
6289 comm_event->comm_size = size;
6290
cdd6c482 6291 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
8dc85d54 6292
aab5b71e 6293 perf_iterate_sb(perf_event_comm_output,
52d857a8
JO
6294 comm_event,
6295 NULL);
8d1b2d93
PZ
6296}
6297
82b89778 6298void perf_event_comm(struct task_struct *task, bool exec)
8d1b2d93 6299{
9ee318a7
PZ
6300 struct perf_comm_event comm_event;
6301
cdd6c482 6302 if (!atomic_read(&nr_comm_events))
9ee318a7 6303 return;
a63eaf34 6304
9ee318a7 6305 comm_event = (struct perf_comm_event){
8d1b2d93 6306 .task = task,
573402db
PZ
6307 /* .comm */
6308 /* .comm_size */
cdd6c482 6309 .event_id = {
573402db 6310 .header = {
cdd6c482 6311 .type = PERF_RECORD_COMM,
82b89778 6312 .misc = exec ? PERF_RECORD_MISC_COMM_EXEC : 0,
573402db
PZ
6313 /* .size */
6314 },
6315 /* .pid */
6316 /* .tid */
8d1b2d93
PZ
6317 },
6318 };
6319
cdd6c482 6320 perf_event_comm_event(&comm_event);
8d1b2d93
PZ
6321}
6322
0a4a9391
PZ
6323/*
6324 * mmap tracking
6325 */
6326
6327struct perf_mmap_event {
089dd79d
PZ
6328 struct vm_area_struct *vma;
6329
6330 const char *file_name;
6331 int file_size;
13d7a241
SE
6332 int maj, min;
6333 u64 ino;
6334 u64 ino_generation;
f972eb63 6335 u32 prot, flags;
0a4a9391
PZ
6336
6337 struct {
6338 struct perf_event_header header;
6339
6340 u32 pid;
6341 u32 tid;
6342 u64 start;
6343 u64 len;
6344 u64 pgoff;
cdd6c482 6345 } event_id;
0a4a9391
PZ
6346};
6347
67516844
JO
6348static int perf_event_mmap_match(struct perf_event *event,
6349 void *data)
6350{
6351 struct perf_mmap_event *mmap_event = data;
6352 struct vm_area_struct *vma = mmap_event->vma;
6353 int executable = vma->vm_flags & VM_EXEC;
6354
6355 return (!executable && event->attr.mmap_data) ||
13d7a241 6356 (executable && (event->attr.mmap || event->attr.mmap2));
67516844
JO
6357}
6358
cdd6c482 6359static void perf_event_mmap_output(struct perf_event *event,
52d857a8 6360 void *data)
0a4a9391 6361{
52d857a8 6362 struct perf_mmap_event *mmap_event = data;
0a4a9391 6363 struct perf_output_handle handle;
c980d109 6364 struct perf_sample_data sample;
cdd6c482 6365 int size = mmap_event->event_id.header.size;
c980d109 6366 int ret;
0a4a9391 6367
67516844
JO
6368 if (!perf_event_mmap_match(event, data))
6369 return;
6370
13d7a241
SE
6371 if (event->attr.mmap2) {
6372 mmap_event->event_id.header.type = PERF_RECORD_MMAP2;
6373 mmap_event->event_id.header.size += sizeof(mmap_event->maj);
6374 mmap_event->event_id.header.size += sizeof(mmap_event->min);
6375 mmap_event->event_id.header.size += sizeof(mmap_event->ino);
d008d525 6376 mmap_event->event_id.header.size += sizeof(mmap_event->ino_generation);
f972eb63
PZ
6377 mmap_event->event_id.header.size += sizeof(mmap_event->prot);
6378 mmap_event->event_id.header.size += sizeof(mmap_event->flags);
13d7a241
SE
6379 }
6380
c980d109
ACM
6381 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
6382 ret = perf_output_begin(&handle, event,
a7ac67ea 6383 mmap_event->event_id.header.size);
0a4a9391 6384 if (ret)
c980d109 6385 goto out;
0a4a9391 6386
cdd6c482
IM
6387 mmap_event->event_id.pid = perf_event_pid(event, current);
6388 mmap_event->event_id.tid = perf_event_tid(event, current);
709e50cf 6389
cdd6c482 6390 perf_output_put(&handle, mmap_event->event_id);
13d7a241
SE
6391
6392 if (event->attr.mmap2) {
6393 perf_output_put(&handle, mmap_event->maj);
6394 perf_output_put(&handle, mmap_event->min);
6395 perf_output_put(&handle, mmap_event->ino);
6396 perf_output_put(&handle, mmap_event->ino_generation);
f972eb63
PZ
6397 perf_output_put(&handle, mmap_event->prot);
6398 perf_output_put(&handle, mmap_event->flags);
13d7a241
SE
6399 }
6400
76369139 6401 __output_copy(&handle, mmap_event->file_name,
0a4a9391 6402 mmap_event->file_size);
c980d109
ACM
6403
6404 perf_event__output_id_sample(event, &handle, &sample);
6405
78d613eb 6406 perf_output_end(&handle);
c980d109
ACM
6407out:
6408 mmap_event->event_id.header.size = size;
0a4a9391
PZ
6409}
6410
cdd6c482 6411static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
0a4a9391 6412{
089dd79d
PZ
6413 struct vm_area_struct *vma = mmap_event->vma;
6414 struct file *file = vma->vm_file;
13d7a241
SE
6415 int maj = 0, min = 0;
6416 u64 ino = 0, gen = 0;
f972eb63 6417 u32 prot = 0, flags = 0;
0a4a9391
PZ
6418 unsigned int size;
6419 char tmp[16];
6420 char *buf = NULL;
2c42cfbf 6421 char *name;
413ee3b4 6422
0a4a9391 6423 if (file) {
13d7a241
SE
6424 struct inode *inode;
6425 dev_t dev;
3ea2f2b9 6426
2c42cfbf 6427 buf = kmalloc(PATH_MAX, GFP_KERNEL);
0a4a9391 6428 if (!buf) {
c7e548b4
ON
6429 name = "//enomem";
6430 goto cpy_name;
0a4a9391 6431 }
413ee3b4 6432 /*
3ea2f2b9 6433 * d_path() works from the end of the rb backwards, so we
413ee3b4
AB
6434 * need to add enough zero bytes after the string to handle
6435 * the 64bit alignment we do later.
6436 */
9bf39ab2 6437 name = file_path(file, buf, PATH_MAX - sizeof(u64));
0a4a9391 6438 if (IS_ERR(name)) {
c7e548b4
ON
6439 name = "//toolong";
6440 goto cpy_name;
0a4a9391 6441 }
13d7a241
SE
6442 inode = file_inode(vma->vm_file);
6443 dev = inode->i_sb->s_dev;
6444 ino = inode->i_ino;
6445 gen = inode->i_generation;
6446 maj = MAJOR(dev);
6447 min = MINOR(dev);
f972eb63
PZ
6448
6449 if (vma->vm_flags & VM_READ)
6450 prot |= PROT_READ;
6451 if (vma->vm_flags & VM_WRITE)
6452 prot |= PROT_WRITE;
6453 if (vma->vm_flags & VM_EXEC)
6454 prot |= PROT_EXEC;
6455
6456 if (vma->vm_flags & VM_MAYSHARE)
6457 flags = MAP_SHARED;
6458 else
6459 flags = MAP_PRIVATE;
6460
6461 if (vma->vm_flags & VM_DENYWRITE)
6462 flags |= MAP_DENYWRITE;
6463 if (vma->vm_flags & VM_MAYEXEC)
6464 flags |= MAP_EXECUTABLE;
6465 if (vma->vm_flags & VM_LOCKED)
6466 flags |= MAP_LOCKED;
6467 if (vma->vm_flags & VM_HUGETLB)
6468 flags |= MAP_HUGETLB;
6469
c7e548b4 6470 goto got_name;
0a4a9391 6471 } else {
fbe26abe
JO
6472 if (vma->vm_ops && vma->vm_ops->name) {
6473 name = (char *) vma->vm_ops->name(vma);
6474 if (name)
6475 goto cpy_name;
6476 }
6477
2c42cfbf 6478 name = (char *)arch_vma_name(vma);
c7e548b4
ON
6479 if (name)
6480 goto cpy_name;
089dd79d 6481
32c5fb7e 6482 if (vma->vm_start <= vma->vm_mm->start_brk &&
3af9e859 6483 vma->vm_end >= vma->vm_mm->brk) {
c7e548b4
ON
6484 name = "[heap]";
6485 goto cpy_name;
32c5fb7e
ON
6486 }
6487 if (vma->vm_start <= vma->vm_mm->start_stack &&
3af9e859 6488 vma->vm_end >= vma->vm_mm->start_stack) {
c7e548b4
ON
6489 name = "[stack]";
6490 goto cpy_name;
089dd79d
PZ
6491 }
6492
c7e548b4
ON
6493 name = "//anon";
6494 goto cpy_name;
0a4a9391
PZ
6495 }
6496
c7e548b4
ON
6497cpy_name:
6498 strlcpy(tmp, name, sizeof(tmp));
6499 name = tmp;
0a4a9391 6500got_name:
2c42cfbf
PZ
6501 /*
6502 * Since our buffer works in 8 byte units we need to align our string
6503 * size to a multiple of 8. However, we must guarantee the tail end is
6504 * zero'd out to avoid leaking random bits to userspace.
6505 */
6506 size = strlen(name)+1;
6507 while (!IS_ALIGNED(size, sizeof(u64)))
6508 name[size++] = '\0';
0a4a9391
PZ
6509
6510 mmap_event->file_name = name;
6511 mmap_event->file_size = size;
13d7a241
SE
6512 mmap_event->maj = maj;
6513 mmap_event->min = min;
6514 mmap_event->ino = ino;
6515 mmap_event->ino_generation = gen;
f972eb63
PZ
6516 mmap_event->prot = prot;
6517 mmap_event->flags = flags;
0a4a9391 6518
2fe85427
SE
6519 if (!(vma->vm_flags & VM_EXEC))
6520 mmap_event->event_id.header.misc |= PERF_RECORD_MISC_MMAP_DATA;
6521
cdd6c482 6522 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
0a4a9391 6523
aab5b71e 6524 perf_iterate_sb(perf_event_mmap_output,
52d857a8
JO
6525 mmap_event,
6526 NULL);
665c2142 6527
0a4a9391
PZ
6528 kfree(buf);
6529}
6530
375637bc
AS
6531/*
6532 * Whether this @filter depends on a dynamic object which is not loaded
6533 * yet or its load addresses are not known.
6534 */
6535static bool perf_addr_filter_needs_mmap(struct perf_addr_filter *filter)
6536{
6537 return filter->filter && filter->inode;
6538}
6539
6540/*
6541 * Check whether inode and address range match filter criteria.
6542 */
6543static bool perf_addr_filter_match(struct perf_addr_filter *filter,
6544 struct file *file, unsigned long offset,
6545 unsigned long size)
6546{
6547 if (filter->inode != file->f_inode)
6548 return false;
6549
6550 if (filter->offset > offset + size)
6551 return false;
6552
6553 if (filter->offset + filter->size < offset)
6554 return false;
6555
6556 return true;
6557}
6558
6559static void __perf_addr_filters_adjust(struct perf_event *event, void *data)
6560{
6561 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
6562 struct vm_area_struct *vma = data;
6563 unsigned long off = vma->vm_pgoff << PAGE_SHIFT, flags;
6564 struct file *file = vma->vm_file;
6565 struct perf_addr_filter *filter;
6566 unsigned int restart = 0, count = 0;
6567
6568 if (!has_addr_filter(event))
6569 return;
6570
6571 if (!file)
6572 return;
6573
6574 raw_spin_lock_irqsave(&ifh->lock, flags);
6575 list_for_each_entry(filter, &ifh->list, entry) {
6576 if (perf_addr_filter_match(filter, file, off,
6577 vma->vm_end - vma->vm_start)) {
6578 event->addr_filters_offs[count] = vma->vm_start;
6579 restart++;
6580 }
6581
6582 count++;
6583 }
6584
6585 if (restart)
6586 event->addr_filters_gen++;
6587 raw_spin_unlock_irqrestore(&ifh->lock, flags);
6588
6589 if (restart)
6590 perf_event_restart(event);
6591}
6592
6593/*
6594 * Adjust all task's events' filters to the new vma
6595 */
6596static void perf_addr_filters_adjust(struct vm_area_struct *vma)
6597{
6598 struct perf_event_context *ctx;
6599 int ctxn;
6600
6601 rcu_read_lock();
6602 for_each_task_context_nr(ctxn) {
6603 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
6604 if (!ctx)
6605 continue;
6606
aab5b71e 6607 perf_iterate_ctx(ctx, __perf_addr_filters_adjust, vma, true);
375637bc
AS
6608 }
6609 rcu_read_unlock();
6610}
6611
3af9e859 6612void perf_event_mmap(struct vm_area_struct *vma)
0a4a9391 6613{
9ee318a7
PZ
6614 struct perf_mmap_event mmap_event;
6615
cdd6c482 6616 if (!atomic_read(&nr_mmap_events))
9ee318a7
PZ
6617 return;
6618
6619 mmap_event = (struct perf_mmap_event){
089dd79d 6620 .vma = vma,
573402db
PZ
6621 /* .file_name */
6622 /* .file_size */
cdd6c482 6623 .event_id = {
573402db 6624 .header = {
cdd6c482 6625 .type = PERF_RECORD_MMAP,
39447b38 6626 .misc = PERF_RECORD_MISC_USER,
573402db
PZ
6627 /* .size */
6628 },
6629 /* .pid */
6630 /* .tid */
089dd79d
PZ
6631 .start = vma->vm_start,
6632 .len = vma->vm_end - vma->vm_start,
3a0304e9 6633 .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT,
0a4a9391 6634 },
13d7a241
SE
6635 /* .maj (attr_mmap2 only) */
6636 /* .min (attr_mmap2 only) */
6637 /* .ino (attr_mmap2 only) */
6638 /* .ino_generation (attr_mmap2 only) */
f972eb63
PZ
6639 /* .prot (attr_mmap2 only) */
6640 /* .flags (attr_mmap2 only) */
0a4a9391
PZ
6641 };
6642
375637bc 6643 perf_addr_filters_adjust(vma);
cdd6c482 6644 perf_event_mmap_event(&mmap_event);
0a4a9391
PZ
6645}
6646
68db7e98
AS
6647void perf_event_aux_event(struct perf_event *event, unsigned long head,
6648 unsigned long size, u64 flags)
6649{
6650 struct perf_output_handle handle;
6651 struct perf_sample_data sample;
6652 struct perf_aux_event {
6653 struct perf_event_header header;
6654 u64 offset;
6655 u64 size;
6656 u64 flags;
6657 } rec = {
6658 .header = {
6659 .type = PERF_RECORD_AUX,
6660 .misc = 0,
6661 .size = sizeof(rec),
6662 },
6663 .offset = head,
6664 .size = size,
6665 .flags = flags,
6666 };
6667 int ret;
6668
6669 perf_event_header__init_id(&rec.header, &sample, event);
6670 ret = perf_output_begin(&handle, event, rec.header.size);
6671
6672 if (ret)
6673 return;
6674
6675 perf_output_put(&handle, rec);
6676 perf_event__output_id_sample(event, &handle, &sample);
6677
6678 perf_output_end(&handle);
6679}
6680
f38b0dbb
KL
6681/*
6682 * Lost/dropped samples logging
6683 */
6684void perf_log_lost_samples(struct perf_event *event, u64 lost)
6685{
6686 struct perf_output_handle handle;
6687 struct perf_sample_data sample;
6688 int ret;
6689
6690 struct {
6691 struct perf_event_header header;
6692 u64 lost;
6693 } lost_samples_event = {
6694 .header = {
6695 .type = PERF_RECORD_LOST_SAMPLES,
6696 .misc = 0,
6697 .size = sizeof(lost_samples_event),
6698 },
6699 .lost = lost,
6700 };
6701
6702 perf_event_header__init_id(&lost_samples_event.header, &sample, event);
6703
6704 ret = perf_output_begin(&handle, event,
6705 lost_samples_event.header.size);
6706 if (ret)
6707 return;
6708
6709 perf_output_put(&handle, lost_samples_event);
6710 perf_event__output_id_sample(event, &handle, &sample);
6711 perf_output_end(&handle);
6712}
6713
45ac1403
AH
6714/*
6715 * context_switch tracking
6716 */
6717
6718struct perf_switch_event {
6719 struct task_struct *task;
6720 struct task_struct *next_prev;
6721
6722 struct {
6723 struct perf_event_header header;
6724 u32 next_prev_pid;
6725 u32 next_prev_tid;
6726 } event_id;
6727};
6728
6729static int perf_event_switch_match(struct perf_event *event)
6730{
6731 return event->attr.context_switch;
6732}
6733
6734static void perf_event_switch_output(struct perf_event *event, void *data)
6735{
6736 struct perf_switch_event *se = data;
6737 struct perf_output_handle handle;
6738 struct perf_sample_data sample;
6739 int ret;
6740
6741 if (!perf_event_switch_match(event))
6742 return;
6743
6744 /* Only CPU-wide events are allowed to see next/prev pid/tid */
6745 if (event->ctx->task) {
6746 se->event_id.header.type = PERF_RECORD_SWITCH;
6747 se->event_id.header.size = sizeof(se->event_id.header);
6748 } else {
6749 se->event_id.header.type = PERF_RECORD_SWITCH_CPU_WIDE;
6750 se->event_id.header.size = sizeof(se->event_id);
6751 se->event_id.next_prev_pid =
6752 perf_event_pid(event, se->next_prev);
6753 se->event_id.next_prev_tid =
6754 perf_event_tid(event, se->next_prev);
6755 }
6756
6757 perf_event_header__init_id(&se->event_id.header, &sample, event);
6758
6759 ret = perf_output_begin(&handle, event, se->event_id.header.size);
6760 if (ret)
6761 return;
6762
6763 if (event->ctx->task)
6764 perf_output_put(&handle, se->event_id.header);
6765 else
6766 perf_output_put(&handle, se->event_id);
6767
6768 perf_event__output_id_sample(event, &handle, &sample);
6769
6770 perf_output_end(&handle);
6771}
6772
6773static void perf_event_switch(struct task_struct *task,
6774 struct task_struct *next_prev, bool sched_in)
6775{
6776 struct perf_switch_event switch_event;
6777
6778 /* N.B. caller checks nr_switch_events != 0 */
6779
6780 switch_event = (struct perf_switch_event){
6781 .task = task,
6782 .next_prev = next_prev,
6783 .event_id = {
6784 .header = {
6785 /* .type */
6786 .misc = sched_in ? 0 : PERF_RECORD_MISC_SWITCH_OUT,
6787 /* .size */
6788 },
6789 /* .next_prev_pid */
6790 /* .next_prev_tid */
6791 },
6792 };
6793
aab5b71e 6794 perf_iterate_sb(perf_event_switch_output,
45ac1403
AH
6795 &switch_event,
6796 NULL);
6797}
6798
a78ac325
PZ
6799/*
6800 * IRQ throttle logging
6801 */
6802
cdd6c482 6803static void perf_log_throttle(struct perf_event *event, int enable)
a78ac325
PZ
6804{
6805 struct perf_output_handle handle;
c980d109 6806 struct perf_sample_data sample;
a78ac325
PZ
6807 int ret;
6808
6809 struct {
6810 struct perf_event_header header;
6811 u64 time;
cca3f454 6812 u64 id;
7f453c24 6813 u64 stream_id;
a78ac325
PZ
6814 } throttle_event = {
6815 .header = {
cdd6c482 6816 .type = PERF_RECORD_THROTTLE,
a78ac325
PZ
6817 .misc = 0,
6818 .size = sizeof(throttle_event),
6819 },
34f43927 6820 .time = perf_event_clock(event),
cdd6c482
IM
6821 .id = primary_event_id(event),
6822 .stream_id = event->id,
a78ac325
PZ
6823 };
6824
966ee4d6 6825 if (enable)
cdd6c482 6826 throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
966ee4d6 6827
c980d109
ACM
6828 perf_event_header__init_id(&throttle_event.header, &sample, event);
6829
6830 ret = perf_output_begin(&handle, event,
a7ac67ea 6831 throttle_event.header.size);
a78ac325
PZ
6832 if (ret)
6833 return;
6834
6835 perf_output_put(&handle, throttle_event);
c980d109 6836 perf_event__output_id_sample(event, &handle, &sample);
a78ac325
PZ
6837 perf_output_end(&handle);
6838}
6839
ec0d7729
AS
6840static void perf_log_itrace_start(struct perf_event *event)
6841{
6842 struct perf_output_handle handle;
6843 struct perf_sample_data sample;
6844 struct perf_aux_event {
6845 struct perf_event_header header;
6846 u32 pid;
6847 u32 tid;
6848 } rec;
6849 int ret;
6850
6851 if (event->parent)
6852 event = event->parent;
6853
6854 if (!(event->pmu->capabilities & PERF_PMU_CAP_ITRACE) ||
6855 event->hw.itrace_started)
6856 return;
6857
ec0d7729
AS
6858 rec.header.type = PERF_RECORD_ITRACE_START;
6859 rec.header.misc = 0;
6860 rec.header.size = sizeof(rec);
6861 rec.pid = perf_event_pid(event, current);
6862 rec.tid = perf_event_tid(event, current);
6863
6864 perf_event_header__init_id(&rec.header, &sample, event);
6865 ret = perf_output_begin(&handle, event, rec.header.size);
6866
6867 if (ret)
6868 return;
6869
6870 perf_output_put(&handle, rec);
6871 perf_event__output_id_sample(event, &handle, &sample);
6872
6873 perf_output_end(&handle);
6874}
6875
f6c7d5fe 6876/*
cdd6c482 6877 * Generic event overflow handling, sampling.
f6c7d5fe
PZ
6878 */
6879
a8b0ca17 6880static int __perf_event_overflow(struct perf_event *event,
5622f295
MM
6881 int throttle, struct perf_sample_data *data,
6882 struct pt_regs *regs)
f6c7d5fe 6883{
cdd6c482
IM
6884 int events = atomic_read(&event->event_limit);
6885 struct hw_perf_event *hwc = &event->hw;
e050e3f0 6886 u64 seq;
79f14641
PZ
6887 int ret = 0;
6888
96398826
PZ
6889 /*
6890 * Non-sampling counters might still use the PMI to fold short
6891 * hardware counters, ignore those.
6892 */
6893 if (unlikely(!is_sampling_event(event)))
6894 return 0;
6895
e050e3f0
SE
6896 seq = __this_cpu_read(perf_throttled_seq);
6897 if (seq != hwc->interrupts_seq) {
6898 hwc->interrupts_seq = seq;
6899 hwc->interrupts = 1;
6900 } else {
6901 hwc->interrupts++;
6902 if (unlikely(throttle
6903 && hwc->interrupts >= max_samples_per_tick)) {
6904 __this_cpu_inc(perf_throttled_count);
555e0c1e 6905 tick_dep_set_cpu(smp_processor_id(), TICK_DEP_BIT_PERF_EVENTS);
163ec435
PZ
6906 hwc->interrupts = MAX_INTERRUPTS;
6907 perf_log_throttle(event, 0);
a78ac325
PZ
6908 ret = 1;
6909 }
e050e3f0 6910 }
60db5e09 6911
cdd6c482 6912 if (event->attr.freq) {
def0a9b2 6913 u64 now = perf_clock();
abd50713 6914 s64 delta = now - hwc->freq_time_stamp;
bd2b5b12 6915
abd50713 6916 hwc->freq_time_stamp = now;
bd2b5b12 6917
abd50713 6918 if (delta > 0 && delta < 2*TICK_NSEC)
f39d47ff 6919 perf_adjust_period(event, delta, hwc->last_period, true);
bd2b5b12
PZ
6920 }
6921
2023b359
PZ
6922 /*
6923 * XXX event_limit might not quite work as expected on inherited
cdd6c482 6924 * events
2023b359
PZ
6925 */
6926
cdd6c482
IM
6927 event->pending_kill = POLL_IN;
6928 if (events && atomic_dec_and_test(&event->event_limit)) {
79f14641 6929 ret = 1;
cdd6c482 6930 event->pending_kill = POLL_HUP;
a8b0ca17
PZ
6931 event->pending_disable = 1;
6932 irq_work_queue(&event->pending);
79f14641
PZ
6933 }
6934
1879445d 6935 event->overflow_handler(event, data, regs);
453f19ee 6936
fed66e2c 6937 if (*perf_event_fasync(event) && event->pending_kill) {
a8b0ca17
PZ
6938 event->pending_wakeup = 1;
6939 irq_work_queue(&event->pending);
f506b3dc
PZ
6940 }
6941
79f14641 6942 return ret;
f6c7d5fe
PZ
6943}
6944
a8b0ca17 6945int perf_event_overflow(struct perf_event *event,
5622f295
MM
6946 struct perf_sample_data *data,
6947 struct pt_regs *regs)
850bc73f 6948{
a8b0ca17 6949 return __perf_event_overflow(event, 1, data, regs);
850bc73f
PZ
6950}
6951
15dbf27c 6952/*
cdd6c482 6953 * Generic software event infrastructure
15dbf27c
PZ
6954 */
6955
b28ab83c
PZ
6956struct swevent_htable {
6957 struct swevent_hlist *swevent_hlist;
6958 struct mutex hlist_mutex;
6959 int hlist_refcount;
6960
6961 /* Recursion avoidance in each contexts */
6962 int recursion[PERF_NR_CONTEXTS];
6963};
6964
6965static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
6966
7b4b6658 6967/*
cdd6c482
IM
6968 * We directly increment event->count and keep a second value in
6969 * event->hw.period_left to count intervals. This period event
7b4b6658
PZ
6970 * is kept in the range [-sample_period, 0] so that we can use the
6971 * sign as trigger.
6972 */
6973
ab573844 6974u64 perf_swevent_set_period(struct perf_event *event)
15dbf27c 6975{
cdd6c482 6976 struct hw_perf_event *hwc = &event->hw;
7b4b6658
PZ
6977 u64 period = hwc->last_period;
6978 u64 nr, offset;
6979 s64 old, val;
6980
6981 hwc->last_period = hwc->sample_period;
15dbf27c
PZ
6982
6983again:
e7850595 6984 old = val = local64_read(&hwc->period_left);
7b4b6658
PZ
6985 if (val < 0)
6986 return 0;
15dbf27c 6987
7b4b6658
PZ
6988 nr = div64_u64(period + val, period);
6989 offset = nr * period;
6990 val -= offset;
e7850595 6991 if (local64_cmpxchg(&hwc->period_left, old, val) != old)
7b4b6658 6992 goto again;
15dbf27c 6993
7b4b6658 6994 return nr;
15dbf27c
PZ
6995}
6996
0cff784a 6997static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
a8b0ca17 6998 struct perf_sample_data *data,
5622f295 6999 struct pt_regs *regs)
15dbf27c 7000{
cdd6c482 7001 struct hw_perf_event *hwc = &event->hw;
850bc73f 7002 int throttle = 0;
15dbf27c 7003
0cff784a
PZ
7004 if (!overflow)
7005 overflow = perf_swevent_set_period(event);
15dbf27c 7006
7b4b6658
PZ
7007 if (hwc->interrupts == MAX_INTERRUPTS)
7008 return;
15dbf27c 7009
7b4b6658 7010 for (; overflow; overflow--) {
a8b0ca17 7011 if (__perf_event_overflow(event, throttle,
5622f295 7012 data, regs)) {
7b4b6658
PZ
7013 /*
7014 * We inhibit the overflow from happening when
7015 * hwc->interrupts == MAX_INTERRUPTS.
7016 */
7017 break;
7018 }
cf450a73 7019 throttle = 1;
7b4b6658 7020 }
15dbf27c
PZ
7021}
7022
a4eaf7f1 7023static void perf_swevent_event(struct perf_event *event, u64 nr,
a8b0ca17 7024 struct perf_sample_data *data,
5622f295 7025 struct pt_regs *regs)
7b4b6658 7026{
cdd6c482 7027 struct hw_perf_event *hwc = &event->hw;
d6d020e9 7028
e7850595 7029 local64_add(nr, &event->count);
d6d020e9 7030
0cff784a
PZ
7031 if (!regs)
7032 return;
7033
6c7e550f 7034 if (!is_sampling_event(event))
7b4b6658 7035 return;
d6d020e9 7036
5d81e5cf
AV
7037 if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) {
7038 data->period = nr;
7039 return perf_swevent_overflow(event, 1, data, regs);
7040 } else
7041 data->period = event->hw.last_period;
7042
0cff784a 7043 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
a8b0ca17 7044 return perf_swevent_overflow(event, 1, data, regs);
0cff784a 7045
e7850595 7046 if (local64_add_negative(nr, &hwc->period_left))
7b4b6658 7047 return;
df1a132b 7048
a8b0ca17 7049 perf_swevent_overflow(event, 0, data, regs);
d6d020e9
PZ
7050}
7051
f5ffe02e
FW
7052static int perf_exclude_event(struct perf_event *event,
7053 struct pt_regs *regs)
7054{
a4eaf7f1 7055 if (event->hw.state & PERF_HES_STOPPED)
91b2f482 7056 return 1;
a4eaf7f1 7057
f5ffe02e
FW
7058 if (regs) {
7059 if (event->attr.exclude_user && user_mode(regs))
7060 return 1;
7061
7062 if (event->attr.exclude_kernel && !user_mode(regs))
7063 return 1;
7064 }
7065
7066 return 0;
7067}
7068
cdd6c482 7069static int perf_swevent_match(struct perf_event *event,
1c432d89 7070 enum perf_type_id type,
6fb2915d
LZ
7071 u32 event_id,
7072 struct perf_sample_data *data,
7073 struct pt_regs *regs)
15dbf27c 7074{
cdd6c482 7075 if (event->attr.type != type)
a21ca2ca 7076 return 0;
f5ffe02e 7077
cdd6c482 7078 if (event->attr.config != event_id)
15dbf27c
PZ
7079 return 0;
7080
f5ffe02e
FW
7081 if (perf_exclude_event(event, regs))
7082 return 0;
15dbf27c
PZ
7083
7084 return 1;
7085}
7086
76e1d904
FW
7087static inline u64 swevent_hash(u64 type, u32 event_id)
7088{
7089 u64 val = event_id | (type << 32);
7090
7091 return hash_64(val, SWEVENT_HLIST_BITS);
7092}
7093
49f135ed
FW
7094static inline struct hlist_head *
7095__find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
76e1d904 7096{
49f135ed
FW
7097 u64 hash = swevent_hash(type, event_id);
7098
7099 return &hlist->heads[hash];
7100}
76e1d904 7101
49f135ed
FW
7102/* For the read side: events when they trigger */
7103static inline struct hlist_head *
b28ab83c 7104find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
49f135ed
FW
7105{
7106 struct swevent_hlist *hlist;
76e1d904 7107
b28ab83c 7108 hlist = rcu_dereference(swhash->swevent_hlist);
76e1d904
FW
7109 if (!hlist)
7110 return NULL;
7111
49f135ed
FW
7112 return __find_swevent_head(hlist, type, event_id);
7113}
7114
7115/* For the event head insertion and removal in the hlist */
7116static inline struct hlist_head *
b28ab83c 7117find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
49f135ed
FW
7118{
7119 struct swevent_hlist *hlist;
7120 u32 event_id = event->attr.config;
7121 u64 type = event->attr.type;
7122
7123 /*
7124 * Event scheduling is always serialized against hlist allocation
7125 * and release. Which makes the protected version suitable here.
7126 * The context lock guarantees that.
7127 */
b28ab83c 7128 hlist = rcu_dereference_protected(swhash->swevent_hlist,
49f135ed
FW
7129 lockdep_is_held(&event->ctx->lock));
7130 if (!hlist)
7131 return NULL;
7132
7133 return __find_swevent_head(hlist, type, event_id);
76e1d904
FW
7134}
7135
7136static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
a8b0ca17 7137 u64 nr,
76e1d904
FW
7138 struct perf_sample_data *data,
7139 struct pt_regs *regs)
15dbf27c 7140{
4a32fea9 7141 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
cdd6c482 7142 struct perf_event *event;
76e1d904 7143 struct hlist_head *head;
15dbf27c 7144
76e1d904 7145 rcu_read_lock();
b28ab83c 7146 head = find_swevent_head_rcu(swhash, type, event_id);
76e1d904
FW
7147 if (!head)
7148 goto end;
7149
b67bfe0d 7150 hlist_for_each_entry_rcu(event, head, hlist_entry) {
6fb2915d 7151 if (perf_swevent_match(event, type, event_id, data, regs))
a8b0ca17 7152 perf_swevent_event(event, nr, data, regs);
15dbf27c 7153 }
76e1d904
FW
7154end:
7155 rcu_read_unlock();
15dbf27c
PZ
7156}
7157
86038c5e
PZI
7158DEFINE_PER_CPU(struct pt_regs, __perf_regs[4]);
7159
4ed7c92d 7160int perf_swevent_get_recursion_context(void)
96f6d444 7161{
4a32fea9 7162 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
96f6d444 7163
b28ab83c 7164 return get_recursion_context(swhash->recursion);
96f6d444 7165}
645e8cc0 7166EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
96f6d444 7167
98b5c2c6 7168void perf_swevent_put_recursion_context(int rctx)
15dbf27c 7169{
4a32fea9 7170 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
927c7a9e 7171
b28ab83c 7172 put_recursion_context(swhash->recursion, rctx);
ce71b9df 7173}
15dbf27c 7174
86038c5e 7175void ___perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
b8e83514 7176{
a4234bfc 7177 struct perf_sample_data data;
4ed7c92d 7178
86038c5e 7179 if (WARN_ON_ONCE(!regs))
4ed7c92d 7180 return;
a4234bfc 7181
fd0d000b 7182 perf_sample_data_init(&data, addr, 0);
a8b0ca17 7183 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
86038c5e
PZI
7184}
7185
7186void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
7187{
7188 int rctx;
7189
7190 preempt_disable_notrace();
7191 rctx = perf_swevent_get_recursion_context();
7192 if (unlikely(rctx < 0))
7193 goto fail;
7194
7195 ___perf_sw_event(event_id, nr, regs, addr);
4ed7c92d
PZ
7196
7197 perf_swevent_put_recursion_context(rctx);
86038c5e 7198fail:
1c024eca 7199 preempt_enable_notrace();
b8e83514
PZ
7200}
7201
cdd6c482 7202static void perf_swevent_read(struct perf_event *event)
15dbf27c 7203{
15dbf27c
PZ
7204}
7205
a4eaf7f1 7206static int perf_swevent_add(struct perf_event *event, int flags)
15dbf27c 7207{
4a32fea9 7208 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
cdd6c482 7209 struct hw_perf_event *hwc = &event->hw;
76e1d904
FW
7210 struct hlist_head *head;
7211
6c7e550f 7212 if (is_sampling_event(event)) {
7b4b6658 7213 hwc->last_period = hwc->sample_period;
cdd6c482 7214 perf_swevent_set_period(event);
7b4b6658 7215 }
76e1d904 7216
a4eaf7f1
PZ
7217 hwc->state = !(flags & PERF_EF_START);
7218
b28ab83c 7219 head = find_swevent_head(swhash, event);
12ca6ad2 7220 if (WARN_ON_ONCE(!head))
76e1d904
FW
7221 return -EINVAL;
7222
7223 hlist_add_head_rcu(&event->hlist_entry, head);
6a694a60 7224 perf_event_update_userpage(event);
76e1d904 7225
15dbf27c
PZ
7226 return 0;
7227}
7228
a4eaf7f1 7229static void perf_swevent_del(struct perf_event *event, int flags)
15dbf27c 7230{
76e1d904 7231 hlist_del_rcu(&event->hlist_entry);
15dbf27c
PZ
7232}
7233
a4eaf7f1 7234static void perf_swevent_start(struct perf_event *event, int flags)
5c92d124 7235{
a4eaf7f1 7236 event->hw.state = 0;
d6d020e9 7237}
aa9c4c0f 7238
a4eaf7f1 7239static void perf_swevent_stop(struct perf_event *event, int flags)
d6d020e9 7240{
a4eaf7f1 7241 event->hw.state = PERF_HES_STOPPED;
bae43c99
IM
7242}
7243
49f135ed
FW
7244/* Deref the hlist from the update side */
7245static inline struct swevent_hlist *
b28ab83c 7246swevent_hlist_deref(struct swevent_htable *swhash)
49f135ed 7247{
b28ab83c
PZ
7248 return rcu_dereference_protected(swhash->swevent_hlist,
7249 lockdep_is_held(&swhash->hlist_mutex));
49f135ed
FW
7250}
7251
b28ab83c 7252static void swevent_hlist_release(struct swevent_htable *swhash)
76e1d904 7253{
b28ab83c 7254 struct swevent_hlist *hlist = swevent_hlist_deref(swhash);
76e1d904 7255
49f135ed 7256 if (!hlist)
76e1d904
FW
7257 return;
7258
70691d4a 7259 RCU_INIT_POINTER(swhash->swevent_hlist, NULL);
fa4bbc4c 7260 kfree_rcu(hlist, rcu_head);
76e1d904
FW
7261}
7262
3b364d7b 7263static void swevent_hlist_put_cpu(int cpu)
76e1d904 7264{
b28ab83c 7265 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
76e1d904 7266
b28ab83c 7267 mutex_lock(&swhash->hlist_mutex);
76e1d904 7268
b28ab83c
PZ
7269 if (!--swhash->hlist_refcount)
7270 swevent_hlist_release(swhash);
76e1d904 7271
b28ab83c 7272 mutex_unlock(&swhash->hlist_mutex);
76e1d904
FW
7273}
7274
3b364d7b 7275static void swevent_hlist_put(void)
76e1d904
FW
7276{
7277 int cpu;
7278
76e1d904 7279 for_each_possible_cpu(cpu)
3b364d7b 7280 swevent_hlist_put_cpu(cpu);
76e1d904
FW
7281}
7282
3b364d7b 7283static int swevent_hlist_get_cpu(int cpu)
76e1d904 7284{
b28ab83c 7285 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
76e1d904
FW
7286 int err = 0;
7287
b28ab83c 7288 mutex_lock(&swhash->hlist_mutex);
b28ab83c 7289 if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
76e1d904
FW
7290 struct swevent_hlist *hlist;
7291
7292 hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
7293 if (!hlist) {
7294 err = -ENOMEM;
7295 goto exit;
7296 }
b28ab83c 7297 rcu_assign_pointer(swhash->swevent_hlist, hlist);
76e1d904 7298 }
b28ab83c 7299 swhash->hlist_refcount++;
9ed6060d 7300exit:
b28ab83c 7301 mutex_unlock(&swhash->hlist_mutex);
76e1d904
FW
7302
7303 return err;
7304}
7305
3b364d7b 7306static int swevent_hlist_get(void)
76e1d904 7307{
3b364d7b 7308 int err, cpu, failed_cpu;
76e1d904 7309
76e1d904
FW
7310 get_online_cpus();
7311 for_each_possible_cpu(cpu) {
3b364d7b 7312 err = swevent_hlist_get_cpu(cpu);
76e1d904
FW
7313 if (err) {
7314 failed_cpu = cpu;
7315 goto fail;
7316 }
7317 }
7318 put_online_cpus();
7319
7320 return 0;
9ed6060d 7321fail:
76e1d904
FW
7322 for_each_possible_cpu(cpu) {
7323 if (cpu == failed_cpu)
7324 break;
3b364d7b 7325 swevent_hlist_put_cpu(cpu);
76e1d904
FW
7326 }
7327
7328 put_online_cpus();
7329 return err;
7330}
7331
c5905afb 7332struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
95476b64 7333
b0a873eb
PZ
7334static void sw_perf_event_destroy(struct perf_event *event)
7335{
7336 u64 event_id = event->attr.config;
95476b64 7337
b0a873eb
PZ
7338 WARN_ON(event->parent);
7339
c5905afb 7340 static_key_slow_dec(&perf_swevent_enabled[event_id]);
3b364d7b 7341 swevent_hlist_put();
b0a873eb
PZ
7342}
7343
7344static int perf_swevent_init(struct perf_event *event)
7345{
8176cced 7346 u64 event_id = event->attr.config;
b0a873eb
PZ
7347
7348 if (event->attr.type != PERF_TYPE_SOFTWARE)
7349 return -ENOENT;
7350
2481c5fa
SE
7351 /*
7352 * no branch sampling for software events
7353 */
7354 if (has_branch_stack(event))
7355 return -EOPNOTSUPP;
7356
b0a873eb
PZ
7357 switch (event_id) {
7358 case PERF_COUNT_SW_CPU_CLOCK:
7359 case PERF_COUNT_SW_TASK_CLOCK:
7360 return -ENOENT;
7361
7362 default:
7363 break;
7364 }
7365
ce677831 7366 if (event_id >= PERF_COUNT_SW_MAX)
b0a873eb
PZ
7367 return -ENOENT;
7368
7369 if (!event->parent) {
7370 int err;
7371
3b364d7b 7372 err = swevent_hlist_get();
b0a873eb
PZ
7373 if (err)
7374 return err;
7375
c5905afb 7376 static_key_slow_inc(&perf_swevent_enabled[event_id]);
b0a873eb
PZ
7377 event->destroy = sw_perf_event_destroy;
7378 }
7379
7380 return 0;
7381}
7382
7383static struct pmu perf_swevent = {
89a1e187 7384 .task_ctx_nr = perf_sw_context,
95476b64 7385
34f43927
PZ
7386 .capabilities = PERF_PMU_CAP_NO_NMI,
7387
b0a873eb 7388 .event_init = perf_swevent_init,
a4eaf7f1
PZ
7389 .add = perf_swevent_add,
7390 .del = perf_swevent_del,
7391 .start = perf_swevent_start,
7392 .stop = perf_swevent_stop,
1c024eca 7393 .read = perf_swevent_read,
1c024eca
PZ
7394};
7395
b0a873eb
PZ
7396#ifdef CONFIG_EVENT_TRACING
7397
1c024eca
PZ
7398static int perf_tp_filter_match(struct perf_event *event,
7399 struct perf_sample_data *data)
7400{
7401 void *record = data->raw->data;
7402
b71b437e
PZ
7403 /* only top level events have filters set */
7404 if (event->parent)
7405 event = event->parent;
7406
1c024eca
PZ
7407 if (likely(!event->filter) || filter_match_preds(event->filter, record))
7408 return 1;
7409 return 0;
7410}
7411
7412static int perf_tp_event_match(struct perf_event *event,
7413 struct perf_sample_data *data,
7414 struct pt_regs *regs)
7415{
a0f7d0f7
FW
7416 if (event->hw.state & PERF_HES_STOPPED)
7417 return 0;
580d607c
PZ
7418 /*
7419 * All tracepoints are from kernel-space.
7420 */
7421 if (event->attr.exclude_kernel)
1c024eca
PZ
7422 return 0;
7423
7424 if (!perf_tp_filter_match(event, data))
7425 return 0;
7426
7427 return 1;
7428}
7429
85b67bcb
AS
7430void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
7431 struct trace_event_call *call, u64 count,
7432 struct pt_regs *regs, struct hlist_head *head,
7433 struct task_struct *task)
7434{
7435 struct bpf_prog *prog = call->prog;
7436
7437 if (prog) {
7438 *(struct pt_regs **)raw_data = regs;
7439 if (!trace_call_bpf(prog, raw_data) || hlist_empty(head)) {
7440 perf_swevent_put_recursion_context(rctx);
7441 return;
7442 }
7443 }
7444 perf_tp_event(call->event.type, count, raw_data, size, regs, head,
7445 rctx, task);
7446}
7447EXPORT_SYMBOL_GPL(perf_trace_run_bpf_submit);
7448
1e1dcd93 7449void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
e6dab5ff
AV
7450 struct pt_regs *regs, struct hlist_head *head, int rctx,
7451 struct task_struct *task)
95476b64
FW
7452{
7453 struct perf_sample_data data;
1c024eca 7454 struct perf_event *event;
1c024eca 7455
95476b64
FW
7456 struct perf_raw_record raw = {
7457 .size = entry_size,
7458 .data = record,
7459 };
7460
1e1dcd93 7461 perf_sample_data_init(&data, 0, 0);
95476b64
FW
7462 data.raw = &raw;
7463
1e1dcd93
AS
7464 perf_trace_buf_update(record, event_type);
7465
b67bfe0d 7466 hlist_for_each_entry_rcu(event, head, hlist_entry) {
1c024eca 7467 if (perf_tp_event_match(event, &data, regs))
a8b0ca17 7468 perf_swevent_event(event, count, &data, regs);
4f41c013 7469 }
ecc55f84 7470
e6dab5ff
AV
7471 /*
7472 * If we got specified a target task, also iterate its context and
7473 * deliver this event there too.
7474 */
7475 if (task && task != current) {
7476 struct perf_event_context *ctx;
7477 struct trace_entry *entry = record;
7478
7479 rcu_read_lock();
7480 ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]);
7481 if (!ctx)
7482 goto unlock;
7483
7484 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
7485 if (event->attr.type != PERF_TYPE_TRACEPOINT)
7486 continue;
7487 if (event->attr.config != entry->type)
7488 continue;
7489 if (perf_tp_event_match(event, &data, regs))
7490 perf_swevent_event(event, count, &data, regs);
7491 }
7492unlock:
7493 rcu_read_unlock();
7494 }
7495
ecc55f84 7496 perf_swevent_put_recursion_context(rctx);
95476b64
FW
7497}
7498EXPORT_SYMBOL_GPL(perf_tp_event);
7499
cdd6c482 7500static void tp_perf_event_destroy(struct perf_event *event)
e077df4f 7501{
1c024eca 7502 perf_trace_destroy(event);
e077df4f
PZ
7503}
7504
b0a873eb 7505static int perf_tp_event_init(struct perf_event *event)
e077df4f 7506{
76e1d904
FW
7507 int err;
7508
b0a873eb
PZ
7509 if (event->attr.type != PERF_TYPE_TRACEPOINT)
7510 return -ENOENT;
7511
2481c5fa
SE
7512 /*
7513 * no branch sampling for tracepoint events
7514 */
7515 if (has_branch_stack(event))
7516 return -EOPNOTSUPP;
7517
1c024eca
PZ
7518 err = perf_trace_init(event);
7519 if (err)
b0a873eb 7520 return err;
e077df4f 7521
cdd6c482 7522 event->destroy = tp_perf_event_destroy;
e077df4f 7523
b0a873eb
PZ
7524 return 0;
7525}
7526
7527static struct pmu perf_tracepoint = {
89a1e187
PZ
7528 .task_ctx_nr = perf_sw_context,
7529
b0a873eb 7530 .event_init = perf_tp_event_init,
a4eaf7f1
PZ
7531 .add = perf_trace_add,
7532 .del = perf_trace_del,
7533 .start = perf_swevent_start,
7534 .stop = perf_swevent_stop,
b0a873eb 7535 .read = perf_swevent_read,
b0a873eb
PZ
7536};
7537
7538static inline void perf_tp_register(void)
7539{
2e80a82a 7540 perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT);
e077df4f 7541}
6fb2915d 7542
6fb2915d
LZ
7543static void perf_event_free_filter(struct perf_event *event)
7544{
7545 ftrace_profile_free_filter(event);
7546}
7547
2541517c
AS
7548static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
7549{
98b5c2c6 7550 bool is_kprobe, is_tracepoint;
2541517c
AS
7551 struct bpf_prog *prog;
7552
7553 if (event->attr.type != PERF_TYPE_TRACEPOINT)
7554 return -EINVAL;
7555
7556 if (event->tp_event->prog)
7557 return -EEXIST;
7558
98b5c2c6
AS
7559 is_kprobe = event->tp_event->flags & TRACE_EVENT_FL_UKPROBE;
7560 is_tracepoint = event->tp_event->flags & TRACE_EVENT_FL_TRACEPOINT;
7561 if (!is_kprobe && !is_tracepoint)
7562 /* bpf programs can only be attached to u/kprobe or tracepoint */
2541517c
AS
7563 return -EINVAL;
7564
7565 prog = bpf_prog_get(prog_fd);
7566 if (IS_ERR(prog))
7567 return PTR_ERR(prog);
7568
98b5c2c6
AS
7569 if ((is_kprobe && prog->type != BPF_PROG_TYPE_KPROBE) ||
7570 (is_tracepoint && prog->type != BPF_PROG_TYPE_TRACEPOINT)) {
2541517c
AS
7571 /* valid fd, but invalid bpf program type */
7572 bpf_prog_put(prog);
7573 return -EINVAL;
7574 }
7575
32bbe007
AS
7576 if (is_tracepoint) {
7577 int off = trace_event_get_offsets(event->tp_event);
7578
7579 if (prog->aux->max_ctx_offset > off) {
7580 bpf_prog_put(prog);
7581 return -EACCES;
7582 }
7583 }
2541517c
AS
7584 event->tp_event->prog = prog;
7585
7586 return 0;
7587}
7588
7589static void perf_event_free_bpf_prog(struct perf_event *event)
7590{
7591 struct bpf_prog *prog;
7592
7593 if (!event->tp_event)
7594 return;
7595
7596 prog = event->tp_event->prog;
7597 if (prog) {
7598 event->tp_event->prog = NULL;
ceb56070 7599 bpf_prog_put_rcu(prog);
2541517c
AS
7600 }
7601}
7602
e077df4f 7603#else
6fb2915d 7604
b0a873eb 7605static inline void perf_tp_register(void)
e077df4f 7606{
e077df4f 7607}
6fb2915d 7608
6fb2915d
LZ
7609static void perf_event_free_filter(struct perf_event *event)
7610{
7611}
7612
2541517c
AS
7613static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd)
7614{
7615 return -ENOENT;
7616}
7617
7618static void perf_event_free_bpf_prog(struct perf_event *event)
7619{
7620}
07b139c8 7621#endif /* CONFIG_EVENT_TRACING */
e077df4f 7622
24f1e32c 7623#ifdef CONFIG_HAVE_HW_BREAKPOINT
f5ffe02e 7624void perf_bp_event(struct perf_event *bp, void *data)
24f1e32c 7625{
f5ffe02e
FW
7626 struct perf_sample_data sample;
7627 struct pt_regs *regs = data;
7628
fd0d000b 7629 perf_sample_data_init(&sample, bp->attr.bp_addr, 0);
f5ffe02e 7630
a4eaf7f1 7631 if (!bp->hw.state && !perf_exclude_event(bp, regs))
a8b0ca17 7632 perf_swevent_event(bp, 1, &sample, regs);
24f1e32c
FW
7633}
7634#endif
7635
375637bc
AS
7636/*
7637 * Allocate a new address filter
7638 */
7639static struct perf_addr_filter *
7640perf_addr_filter_new(struct perf_event *event, struct list_head *filters)
7641{
7642 int node = cpu_to_node(event->cpu == -1 ? 0 : event->cpu);
7643 struct perf_addr_filter *filter;
7644
7645 filter = kzalloc_node(sizeof(*filter), GFP_KERNEL, node);
7646 if (!filter)
7647 return NULL;
7648
7649 INIT_LIST_HEAD(&filter->entry);
7650 list_add_tail(&filter->entry, filters);
7651
7652 return filter;
7653}
7654
7655static void free_filters_list(struct list_head *filters)
7656{
7657 struct perf_addr_filter *filter, *iter;
7658
7659 list_for_each_entry_safe(filter, iter, filters, entry) {
7660 if (filter->inode)
7661 iput(filter->inode);
7662 list_del(&filter->entry);
7663 kfree(filter);
7664 }
7665}
7666
7667/*
7668 * Free existing address filters and optionally install new ones
7669 */
7670static void perf_addr_filters_splice(struct perf_event *event,
7671 struct list_head *head)
7672{
7673 unsigned long flags;
7674 LIST_HEAD(list);
7675
7676 if (!has_addr_filter(event))
7677 return;
7678
7679 /* don't bother with children, they don't have their own filters */
7680 if (event->parent)
7681 return;
7682
7683 raw_spin_lock_irqsave(&event->addr_filters.lock, flags);
7684
7685 list_splice_init(&event->addr_filters.list, &list);
7686 if (head)
7687 list_splice(head, &event->addr_filters.list);
7688
7689 raw_spin_unlock_irqrestore(&event->addr_filters.lock, flags);
7690
7691 free_filters_list(&list);
7692}
7693
7694/*
7695 * Scan through mm's vmas and see if one of them matches the
7696 * @filter; if so, adjust filter's address range.
7697 * Called with mm::mmap_sem down for reading.
7698 */
7699static unsigned long perf_addr_filter_apply(struct perf_addr_filter *filter,
7700 struct mm_struct *mm)
7701{
7702 struct vm_area_struct *vma;
7703
7704 for (vma = mm->mmap; vma; vma = vma->vm_next) {
7705 struct file *file = vma->vm_file;
7706 unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
7707 unsigned long vma_size = vma->vm_end - vma->vm_start;
7708
7709 if (!file)
7710 continue;
7711
7712 if (!perf_addr_filter_match(filter, file, off, vma_size))
7713 continue;
7714
7715 return vma->vm_start;
7716 }
7717
7718 return 0;
7719}
7720
7721/*
7722 * Update event's address range filters based on the
7723 * task's existing mappings, if any.
7724 */
7725static void perf_event_addr_filters_apply(struct perf_event *event)
7726{
7727 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
7728 struct task_struct *task = READ_ONCE(event->ctx->task);
7729 struct perf_addr_filter *filter;
7730 struct mm_struct *mm = NULL;
7731 unsigned int count = 0;
7732 unsigned long flags;
7733
7734 /*
7735 * We may observe TASK_TOMBSTONE, which means that the event tear-down
7736 * will stop on the parent's child_mutex that our caller is also holding
7737 */
7738 if (task == TASK_TOMBSTONE)
7739 return;
7740
7741 mm = get_task_mm(event->ctx->task);
7742 if (!mm)
7743 goto restart;
7744
7745 down_read(&mm->mmap_sem);
7746
7747 raw_spin_lock_irqsave(&ifh->lock, flags);
7748 list_for_each_entry(filter, &ifh->list, entry) {
7749 event->addr_filters_offs[count] = 0;
7750
7751 if (perf_addr_filter_needs_mmap(filter))
7752 event->addr_filters_offs[count] =
7753 perf_addr_filter_apply(filter, mm);
7754
7755 count++;
7756 }
7757
7758 event->addr_filters_gen++;
7759 raw_spin_unlock_irqrestore(&ifh->lock, flags);
7760
7761 up_read(&mm->mmap_sem);
7762
7763 mmput(mm);
7764
7765restart:
7766 perf_event_restart(event);
7767}
7768
7769/*
7770 * Address range filtering: limiting the data to certain
7771 * instruction address ranges. Filters are ioctl()ed to us from
7772 * userspace as ascii strings.
7773 *
7774 * Filter string format:
7775 *
7776 * ACTION RANGE_SPEC
7777 * where ACTION is one of the
7778 * * "filter": limit the trace to this region
7779 * * "start": start tracing from this address
7780 * * "stop": stop tracing at this address/region;
7781 * RANGE_SPEC is
7782 * * for kernel addresses: <start address>[/<size>]
7783 * * for object files: <start address>[/<size>]@</path/to/object/file>
7784 *
7785 * if <size> is not specified, the range is treated as a single address.
7786 */
7787enum {
7788 IF_ACT_FILTER,
7789 IF_ACT_START,
7790 IF_ACT_STOP,
7791 IF_SRC_FILE,
7792 IF_SRC_KERNEL,
7793 IF_SRC_FILEADDR,
7794 IF_SRC_KERNELADDR,
7795};
7796
7797enum {
7798 IF_STATE_ACTION = 0,
7799 IF_STATE_SOURCE,
7800 IF_STATE_END,
7801};
7802
7803static const match_table_t if_tokens = {
7804 { IF_ACT_FILTER, "filter" },
7805 { IF_ACT_START, "start" },
7806 { IF_ACT_STOP, "stop" },
7807 { IF_SRC_FILE, "%u/%u@%s" },
7808 { IF_SRC_KERNEL, "%u/%u" },
7809 { IF_SRC_FILEADDR, "%u@%s" },
7810 { IF_SRC_KERNELADDR, "%u" },
7811};
7812
7813/*
7814 * Address filter string parser
7815 */
7816static int
7817perf_event_parse_addr_filter(struct perf_event *event, char *fstr,
7818 struct list_head *filters)
7819{
7820 struct perf_addr_filter *filter = NULL;
7821 char *start, *orig, *filename = NULL;
7822 struct path path;
7823 substring_t args[MAX_OPT_ARGS];
7824 int state = IF_STATE_ACTION, token;
7825 unsigned int kernel = 0;
7826 int ret = -EINVAL;
7827
7828 orig = fstr = kstrdup(fstr, GFP_KERNEL);
7829 if (!fstr)
7830 return -ENOMEM;
7831
7832 while ((start = strsep(&fstr, " ,\n")) != NULL) {
7833 ret = -EINVAL;
7834
7835 if (!*start)
7836 continue;
7837
7838 /* filter definition begins */
7839 if (state == IF_STATE_ACTION) {
7840 filter = perf_addr_filter_new(event, filters);
7841 if (!filter)
7842 goto fail;
7843 }
7844
7845 token = match_token(start, if_tokens, args);
7846 switch (token) {
7847 case IF_ACT_FILTER:
7848 case IF_ACT_START:
7849 filter->filter = 1;
7850
7851 case IF_ACT_STOP:
7852 if (state != IF_STATE_ACTION)
7853 goto fail;
7854
7855 state = IF_STATE_SOURCE;
7856 break;
7857
7858 case IF_SRC_KERNELADDR:
7859 case IF_SRC_KERNEL:
7860 kernel = 1;
7861
7862 case IF_SRC_FILEADDR:
7863 case IF_SRC_FILE:
7864 if (state != IF_STATE_SOURCE)
7865 goto fail;
7866
7867 if (token == IF_SRC_FILE || token == IF_SRC_KERNEL)
7868 filter->range = 1;
7869
7870 *args[0].to = 0;
7871 ret = kstrtoul(args[0].from, 0, &filter->offset);
7872 if (ret)
7873 goto fail;
7874
7875 if (filter->range) {
7876 *args[1].to = 0;
7877 ret = kstrtoul(args[1].from, 0, &filter->size);
7878 if (ret)
7879 goto fail;
7880 }
7881
7882 if (token == IF_SRC_FILE) {
7883 filename = match_strdup(&args[2]);
7884 if (!filename) {
7885 ret = -ENOMEM;
7886 goto fail;
7887 }
7888 }
7889
7890 state = IF_STATE_END;
7891 break;
7892
7893 default:
7894 goto fail;
7895 }
7896
7897 /*
7898 * Filter definition is fully parsed, validate and install it.
7899 * Make sure that it doesn't contradict itself or the event's
7900 * attribute.
7901 */
7902 if (state == IF_STATE_END) {
7903 if (kernel && event->attr.exclude_kernel)
7904 goto fail;
7905
7906 if (!kernel) {
7907 if (!filename)
7908 goto fail;
7909
7910 /* look up the path and grab its inode */
7911 ret = kern_path(filename, LOOKUP_FOLLOW, &path);
7912 if (ret)
7913 goto fail_free_name;
7914
7915 filter->inode = igrab(d_inode(path.dentry));
7916 path_put(&path);
7917 kfree(filename);
7918 filename = NULL;
7919
7920 ret = -EINVAL;
7921 if (!filter->inode ||
7922 !S_ISREG(filter->inode->i_mode))
7923 /* free_filters_list() will iput() */
7924 goto fail;
7925 }
7926
7927 /* ready to consume more filters */
7928 state = IF_STATE_ACTION;
7929 filter = NULL;
7930 }
7931 }
7932
7933 if (state != IF_STATE_ACTION)
7934 goto fail;
7935
7936 kfree(orig);
7937
7938 return 0;
7939
7940fail_free_name:
7941 kfree(filename);
7942fail:
7943 free_filters_list(filters);
7944 kfree(orig);
7945
7946 return ret;
7947}
7948
7949static int
7950perf_event_set_addr_filter(struct perf_event *event, char *filter_str)
7951{
7952 LIST_HEAD(filters);
7953 int ret;
7954
7955 /*
7956 * Since this is called in perf_ioctl() path, we're already holding
7957 * ctx::mutex.
7958 */
7959 lockdep_assert_held(&event->ctx->mutex);
7960
7961 if (WARN_ON_ONCE(event->parent))
7962 return -EINVAL;
7963
7964 /*
7965 * For now, we only support filtering in per-task events; doing so
7966 * for CPU-wide events requires additional context switching trickery,
7967 * since same object code will be mapped at different virtual
7968 * addresses in different processes.
7969 */
7970 if (!event->ctx->task)
7971 return -EOPNOTSUPP;
7972
7973 ret = perf_event_parse_addr_filter(event, filter_str, &filters);
7974 if (ret)
7975 return ret;
7976
7977 ret = event->pmu->addr_filters_validate(&filters);
7978 if (ret) {
7979 free_filters_list(&filters);
7980 return ret;
7981 }
7982
7983 /* remove existing filters, if any */
7984 perf_addr_filters_splice(event, &filters);
7985
7986 /* install new filters */
7987 perf_event_for_each_child(event, perf_event_addr_filters_apply);
7988
7989 return ret;
7990}
7991
c796bbbe
AS
7992static int perf_event_set_filter(struct perf_event *event, void __user *arg)
7993{
7994 char *filter_str;
7995 int ret = -EINVAL;
7996
375637bc
AS
7997 if ((event->attr.type != PERF_TYPE_TRACEPOINT ||
7998 !IS_ENABLED(CONFIG_EVENT_TRACING)) &&
7999 !has_addr_filter(event))
c796bbbe
AS
8000 return -EINVAL;
8001
8002 filter_str = strndup_user(arg, PAGE_SIZE);
8003 if (IS_ERR(filter_str))
8004 return PTR_ERR(filter_str);
8005
8006 if (IS_ENABLED(CONFIG_EVENT_TRACING) &&
8007 event->attr.type == PERF_TYPE_TRACEPOINT)
8008 ret = ftrace_profile_set_filter(event, event->attr.config,
8009 filter_str);
375637bc
AS
8010 else if (has_addr_filter(event))
8011 ret = perf_event_set_addr_filter(event, filter_str);
c796bbbe
AS
8012
8013 kfree(filter_str);
8014 return ret;
8015}
8016
b0a873eb
PZ
8017/*
8018 * hrtimer based swevent callback
8019 */
f29ac756 8020
b0a873eb 8021static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
f29ac756 8022{
b0a873eb
PZ
8023 enum hrtimer_restart ret = HRTIMER_RESTART;
8024 struct perf_sample_data data;
8025 struct pt_regs *regs;
8026 struct perf_event *event;
8027 u64 period;
f29ac756 8028
b0a873eb 8029 event = container_of(hrtimer, struct perf_event, hw.hrtimer);
ba3dd36c
PZ
8030
8031 if (event->state != PERF_EVENT_STATE_ACTIVE)
8032 return HRTIMER_NORESTART;
8033
b0a873eb 8034 event->pmu->read(event);
f344011c 8035
fd0d000b 8036 perf_sample_data_init(&data, 0, event->hw.last_period);
b0a873eb
PZ
8037 regs = get_irq_regs();
8038
8039 if (regs && !perf_exclude_event(event, regs)) {
77aeeebd 8040 if (!(event->attr.exclude_idle && is_idle_task(current)))
33b07b8b 8041 if (__perf_event_overflow(event, 1, &data, regs))
b0a873eb
PZ
8042 ret = HRTIMER_NORESTART;
8043 }
24f1e32c 8044
b0a873eb
PZ
8045 period = max_t(u64, 10000, event->hw.sample_period);
8046 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
24f1e32c 8047
b0a873eb 8048 return ret;
f29ac756
PZ
8049}
8050
b0a873eb 8051static void perf_swevent_start_hrtimer(struct perf_event *event)
5c92d124 8052{
b0a873eb 8053 struct hw_perf_event *hwc = &event->hw;
5d508e82
FBH
8054 s64 period;
8055
8056 if (!is_sampling_event(event))
8057 return;
f5ffe02e 8058
5d508e82
FBH
8059 period = local64_read(&hwc->period_left);
8060 if (period) {
8061 if (period < 0)
8062 period = 10000;
fa407f35 8063
5d508e82
FBH
8064 local64_set(&hwc->period_left, 0);
8065 } else {
8066 period = max_t(u64, 10000, hwc->sample_period);
8067 }
3497d206
TG
8068 hrtimer_start(&hwc->hrtimer, ns_to_ktime(period),
8069 HRTIMER_MODE_REL_PINNED);
24f1e32c 8070}
b0a873eb
PZ
8071
8072static void perf_swevent_cancel_hrtimer(struct perf_event *event)
24f1e32c 8073{
b0a873eb
PZ
8074 struct hw_perf_event *hwc = &event->hw;
8075
6c7e550f 8076 if (is_sampling_event(event)) {
b0a873eb 8077 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
fa407f35 8078 local64_set(&hwc->period_left, ktime_to_ns(remaining));
b0a873eb
PZ
8079
8080 hrtimer_cancel(&hwc->hrtimer);
8081 }
24f1e32c
FW
8082}
8083
ba3dd36c
PZ
8084static void perf_swevent_init_hrtimer(struct perf_event *event)
8085{
8086 struct hw_perf_event *hwc = &event->hw;
8087
8088 if (!is_sampling_event(event))
8089 return;
8090
8091 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
8092 hwc->hrtimer.function = perf_swevent_hrtimer;
8093
8094 /*
8095 * Since hrtimers have a fixed rate, we can do a static freq->period
8096 * mapping and avoid the whole period adjust feedback stuff.
8097 */
8098 if (event->attr.freq) {
8099 long freq = event->attr.sample_freq;
8100
8101 event->attr.sample_period = NSEC_PER_SEC / freq;
8102 hwc->sample_period = event->attr.sample_period;
8103 local64_set(&hwc->period_left, hwc->sample_period);
778141e3 8104 hwc->last_period = hwc->sample_period;
ba3dd36c
PZ
8105 event->attr.freq = 0;
8106 }
8107}
8108
b0a873eb
PZ
8109/*
8110 * Software event: cpu wall time clock
8111 */
8112
8113static void cpu_clock_event_update(struct perf_event *event)
24f1e32c 8114{
b0a873eb
PZ
8115 s64 prev;
8116 u64 now;
8117
a4eaf7f1 8118 now = local_clock();
b0a873eb
PZ
8119 prev = local64_xchg(&event->hw.prev_count, now);
8120 local64_add(now - prev, &event->count);
24f1e32c 8121}
24f1e32c 8122
a4eaf7f1 8123static void cpu_clock_event_start(struct perf_event *event, int flags)
b0a873eb 8124{
a4eaf7f1 8125 local64_set(&event->hw.prev_count, local_clock());
b0a873eb 8126 perf_swevent_start_hrtimer(event);
b0a873eb
PZ
8127}
8128
a4eaf7f1 8129static void cpu_clock_event_stop(struct perf_event *event, int flags)
f29ac756 8130{
b0a873eb
PZ
8131 perf_swevent_cancel_hrtimer(event);
8132 cpu_clock_event_update(event);
8133}
f29ac756 8134
a4eaf7f1
PZ
8135static int cpu_clock_event_add(struct perf_event *event, int flags)
8136{
8137 if (flags & PERF_EF_START)
8138 cpu_clock_event_start(event, flags);
6a694a60 8139 perf_event_update_userpage(event);
a4eaf7f1
PZ
8140
8141 return 0;
8142}
8143
8144static void cpu_clock_event_del(struct perf_event *event, int flags)
8145{
8146 cpu_clock_event_stop(event, flags);
8147}
8148
b0a873eb
PZ
8149static void cpu_clock_event_read(struct perf_event *event)
8150{
8151 cpu_clock_event_update(event);
8152}
f344011c 8153
b0a873eb
PZ
8154static int cpu_clock_event_init(struct perf_event *event)
8155{
8156 if (event->attr.type != PERF_TYPE_SOFTWARE)
8157 return -ENOENT;
8158
8159 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
8160 return -ENOENT;
8161
2481c5fa
SE
8162 /*
8163 * no branch sampling for software events
8164 */
8165 if (has_branch_stack(event))
8166 return -EOPNOTSUPP;
8167
ba3dd36c
PZ
8168 perf_swevent_init_hrtimer(event);
8169
b0a873eb 8170 return 0;
f29ac756
PZ
8171}
8172
b0a873eb 8173static struct pmu perf_cpu_clock = {
89a1e187
PZ
8174 .task_ctx_nr = perf_sw_context,
8175
34f43927
PZ
8176 .capabilities = PERF_PMU_CAP_NO_NMI,
8177
b0a873eb 8178 .event_init = cpu_clock_event_init,
a4eaf7f1
PZ
8179 .add = cpu_clock_event_add,
8180 .del = cpu_clock_event_del,
8181 .start = cpu_clock_event_start,
8182 .stop = cpu_clock_event_stop,
b0a873eb
PZ
8183 .read = cpu_clock_event_read,
8184};
8185
8186/*
8187 * Software event: task time clock
8188 */
8189
8190static void task_clock_event_update(struct perf_event *event, u64 now)
5c92d124 8191{
b0a873eb
PZ
8192 u64 prev;
8193 s64 delta;
5c92d124 8194
b0a873eb
PZ
8195 prev = local64_xchg(&event->hw.prev_count, now);
8196 delta = now - prev;
8197 local64_add(delta, &event->count);
8198}
5c92d124 8199
a4eaf7f1 8200static void task_clock_event_start(struct perf_event *event, int flags)
b0a873eb 8201{
a4eaf7f1 8202 local64_set(&event->hw.prev_count, event->ctx->time);
b0a873eb 8203 perf_swevent_start_hrtimer(event);
b0a873eb
PZ
8204}
8205
a4eaf7f1 8206static void task_clock_event_stop(struct perf_event *event, int flags)
b0a873eb
PZ
8207{
8208 perf_swevent_cancel_hrtimer(event);
8209 task_clock_event_update(event, event->ctx->time);
a4eaf7f1
PZ
8210}
8211
8212static int task_clock_event_add(struct perf_event *event, int flags)
8213{
8214 if (flags & PERF_EF_START)
8215 task_clock_event_start(event, flags);
6a694a60 8216 perf_event_update_userpage(event);
b0a873eb 8217
a4eaf7f1
PZ
8218 return 0;
8219}
8220
8221static void task_clock_event_del(struct perf_event *event, int flags)
8222{
8223 task_clock_event_stop(event, PERF_EF_UPDATE);
b0a873eb
PZ
8224}
8225
8226static void task_clock_event_read(struct perf_event *event)
8227{
768a06e2
PZ
8228 u64 now = perf_clock();
8229 u64 delta = now - event->ctx->timestamp;
8230 u64 time = event->ctx->time + delta;
b0a873eb
PZ
8231
8232 task_clock_event_update(event, time);
8233}
8234
8235static int task_clock_event_init(struct perf_event *event)
6fb2915d 8236{
b0a873eb
PZ
8237 if (event->attr.type != PERF_TYPE_SOFTWARE)
8238 return -ENOENT;
8239
8240 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
8241 return -ENOENT;
8242
2481c5fa
SE
8243 /*
8244 * no branch sampling for software events
8245 */
8246 if (has_branch_stack(event))
8247 return -EOPNOTSUPP;
8248
ba3dd36c
PZ
8249 perf_swevent_init_hrtimer(event);
8250
b0a873eb 8251 return 0;
6fb2915d
LZ
8252}
8253
b0a873eb 8254static struct pmu perf_task_clock = {
89a1e187
PZ
8255 .task_ctx_nr = perf_sw_context,
8256
34f43927
PZ
8257 .capabilities = PERF_PMU_CAP_NO_NMI,
8258
b0a873eb 8259 .event_init = task_clock_event_init,
a4eaf7f1
PZ
8260 .add = task_clock_event_add,
8261 .del = task_clock_event_del,
8262 .start = task_clock_event_start,
8263 .stop = task_clock_event_stop,
b0a873eb
PZ
8264 .read = task_clock_event_read,
8265};
6fb2915d 8266
ad5133b7 8267static void perf_pmu_nop_void(struct pmu *pmu)
e077df4f 8268{
e077df4f 8269}
6fb2915d 8270
fbbe0701
SB
8271static void perf_pmu_nop_txn(struct pmu *pmu, unsigned int flags)
8272{
8273}
8274
ad5133b7 8275static int perf_pmu_nop_int(struct pmu *pmu)
6fb2915d 8276{
ad5133b7 8277 return 0;
6fb2915d
LZ
8278}
8279
18ab2cd3 8280static DEFINE_PER_CPU(unsigned int, nop_txn_flags);
fbbe0701
SB
8281
8282static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags)
6fb2915d 8283{
fbbe0701
SB
8284 __this_cpu_write(nop_txn_flags, flags);
8285
8286 if (flags & ~PERF_PMU_TXN_ADD)
8287 return;
8288
ad5133b7 8289 perf_pmu_disable(pmu);
6fb2915d
LZ
8290}
8291
ad5133b7
PZ
8292static int perf_pmu_commit_txn(struct pmu *pmu)
8293{
fbbe0701
SB
8294 unsigned int flags = __this_cpu_read(nop_txn_flags);
8295
8296 __this_cpu_write(nop_txn_flags, 0);
8297
8298 if (flags & ~PERF_PMU_TXN_ADD)
8299 return 0;
8300
ad5133b7
PZ
8301 perf_pmu_enable(pmu);
8302 return 0;
8303}
e077df4f 8304
ad5133b7 8305static void perf_pmu_cancel_txn(struct pmu *pmu)
24f1e32c 8306{
fbbe0701
SB
8307 unsigned int flags = __this_cpu_read(nop_txn_flags);
8308
8309 __this_cpu_write(nop_txn_flags, 0);
8310
8311 if (flags & ~PERF_PMU_TXN_ADD)
8312 return;
8313
ad5133b7 8314 perf_pmu_enable(pmu);
24f1e32c
FW
8315}
8316
35edc2a5
PZ
8317static int perf_event_idx_default(struct perf_event *event)
8318{
c719f560 8319 return 0;
35edc2a5
PZ
8320}
8321
8dc85d54
PZ
8322/*
8323 * Ensures all contexts with the same task_ctx_nr have the same
8324 * pmu_cpu_context too.
8325 */
9e317041 8326static struct perf_cpu_context __percpu *find_pmu_context(int ctxn)
24f1e32c 8327{
8dc85d54 8328 struct pmu *pmu;
b326e956 8329
8dc85d54
PZ
8330 if (ctxn < 0)
8331 return NULL;
24f1e32c 8332
8dc85d54
PZ
8333 list_for_each_entry(pmu, &pmus, entry) {
8334 if (pmu->task_ctx_nr == ctxn)
8335 return pmu->pmu_cpu_context;
8336 }
24f1e32c 8337
8dc85d54 8338 return NULL;
24f1e32c
FW
8339}
8340
51676957 8341static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu)
24f1e32c 8342{
51676957
PZ
8343 int cpu;
8344
8345 for_each_possible_cpu(cpu) {
8346 struct perf_cpu_context *cpuctx;
8347
8348 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
8349
3f1f3320
PZ
8350 if (cpuctx->unique_pmu == old_pmu)
8351 cpuctx->unique_pmu = pmu;
51676957
PZ
8352 }
8353}
8354
8355static void free_pmu_context(struct pmu *pmu)
8356{
8357 struct pmu *i;
f5ffe02e 8358
8dc85d54 8359 mutex_lock(&pmus_lock);
0475f9ea 8360 /*
8dc85d54 8361 * Like a real lame refcount.
0475f9ea 8362 */
51676957
PZ
8363 list_for_each_entry(i, &pmus, entry) {
8364 if (i->pmu_cpu_context == pmu->pmu_cpu_context) {
8365 update_pmu_context(i, pmu);
8dc85d54 8366 goto out;
51676957 8367 }
8dc85d54 8368 }
d6d020e9 8369
51676957 8370 free_percpu(pmu->pmu_cpu_context);
8dc85d54
PZ
8371out:
8372 mutex_unlock(&pmus_lock);
24f1e32c 8373}
6e855cd4
AS
8374
8375/*
8376 * Let userspace know that this PMU supports address range filtering:
8377 */
8378static ssize_t nr_addr_filters_show(struct device *dev,
8379 struct device_attribute *attr,
8380 char *page)
8381{
8382 struct pmu *pmu = dev_get_drvdata(dev);
8383
8384 return snprintf(page, PAGE_SIZE - 1, "%d\n", pmu->nr_addr_filters);
8385}
8386DEVICE_ATTR_RO(nr_addr_filters);
8387
2e80a82a 8388static struct idr pmu_idr;
d6d020e9 8389
abe43400
PZ
8390static ssize_t
8391type_show(struct device *dev, struct device_attribute *attr, char *page)
8392{
8393 struct pmu *pmu = dev_get_drvdata(dev);
8394
8395 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type);
8396}
90826ca7 8397static DEVICE_ATTR_RO(type);
abe43400 8398
62b85639
SE
8399static ssize_t
8400perf_event_mux_interval_ms_show(struct device *dev,
8401 struct device_attribute *attr,
8402 char *page)
8403{
8404 struct pmu *pmu = dev_get_drvdata(dev);
8405
8406 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->hrtimer_interval_ms);
8407}
8408
272325c4
PZ
8409static DEFINE_MUTEX(mux_interval_mutex);
8410
62b85639
SE
8411static ssize_t
8412perf_event_mux_interval_ms_store(struct device *dev,
8413 struct device_attribute *attr,
8414 const char *buf, size_t count)
8415{
8416 struct pmu *pmu = dev_get_drvdata(dev);
8417 int timer, cpu, ret;
8418
8419 ret = kstrtoint(buf, 0, &timer);
8420 if (ret)
8421 return ret;
8422
8423 if (timer < 1)
8424 return -EINVAL;
8425
8426 /* same value, noting to do */
8427 if (timer == pmu->hrtimer_interval_ms)
8428 return count;
8429
272325c4 8430 mutex_lock(&mux_interval_mutex);
62b85639
SE
8431 pmu->hrtimer_interval_ms = timer;
8432
8433 /* update all cpuctx for this PMU */
272325c4
PZ
8434 get_online_cpus();
8435 for_each_online_cpu(cpu) {
62b85639
SE
8436 struct perf_cpu_context *cpuctx;
8437 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
8438 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer);
8439
272325c4
PZ
8440 cpu_function_call(cpu,
8441 (remote_function_f)perf_mux_hrtimer_restart, cpuctx);
62b85639 8442 }
272325c4
PZ
8443 put_online_cpus();
8444 mutex_unlock(&mux_interval_mutex);
62b85639
SE
8445
8446 return count;
8447}
90826ca7 8448static DEVICE_ATTR_RW(perf_event_mux_interval_ms);
62b85639 8449
90826ca7
GKH
8450static struct attribute *pmu_dev_attrs[] = {
8451 &dev_attr_type.attr,
8452 &dev_attr_perf_event_mux_interval_ms.attr,
8453 NULL,
abe43400 8454};
90826ca7 8455ATTRIBUTE_GROUPS(pmu_dev);
abe43400
PZ
8456
8457static int pmu_bus_running;
8458static struct bus_type pmu_bus = {
8459 .name = "event_source",
90826ca7 8460 .dev_groups = pmu_dev_groups,
abe43400
PZ
8461};
8462
8463static void pmu_dev_release(struct device *dev)
8464{
8465 kfree(dev);
8466}
8467
8468static int pmu_dev_alloc(struct pmu *pmu)
8469{
8470 int ret = -ENOMEM;
8471
8472 pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
8473 if (!pmu->dev)
8474 goto out;
8475
0c9d42ed 8476 pmu->dev->groups = pmu->attr_groups;
abe43400
PZ
8477 device_initialize(pmu->dev);
8478 ret = dev_set_name(pmu->dev, "%s", pmu->name);
8479 if (ret)
8480 goto free_dev;
8481
8482 dev_set_drvdata(pmu->dev, pmu);
8483 pmu->dev->bus = &pmu_bus;
8484 pmu->dev->release = pmu_dev_release;
8485 ret = device_add(pmu->dev);
8486 if (ret)
8487 goto free_dev;
8488
6e855cd4
AS
8489 /* For PMUs with address filters, throw in an extra attribute: */
8490 if (pmu->nr_addr_filters)
8491 ret = device_create_file(pmu->dev, &dev_attr_nr_addr_filters);
8492
8493 if (ret)
8494 goto del_dev;
8495
abe43400
PZ
8496out:
8497 return ret;
8498
6e855cd4
AS
8499del_dev:
8500 device_del(pmu->dev);
8501
abe43400
PZ
8502free_dev:
8503 put_device(pmu->dev);
8504 goto out;
8505}
8506
547e9fd7 8507static struct lock_class_key cpuctx_mutex;
facc4307 8508static struct lock_class_key cpuctx_lock;
547e9fd7 8509
03d8e80b 8510int perf_pmu_register(struct pmu *pmu, const char *name, int type)
24f1e32c 8511{
108b02cf 8512 int cpu, ret;
24f1e32c 8513
b0a873eb 8514 mutex_lock(&pmus_lock);
33696fc0
PZ
8515 ret = -ENOMEM;
8516 pmu->pmu_disable_count = alloc_percpu(int);
8517 if (!pmu->pmu_disable_count)
8518 goto unlock;
f29ac756 8519
2e80a82a
PZ
8520 pmu->type = -1;
8521 if (!name)
8522 goto skip_type;
8523 pmu->name = name;
8524
8525 if (type < 0) {
0e9c3be2
TH
8526 type = idr_alloc(&pmu_idr, pmu, PERF_TYPE_MAX, 0, GFP_KERNEL);
8527 if (type < 0) {
8528 ret = type;
2e80a82a
PZ
8529 goto free_pdc;
8530 }
8531 }
8532 pmu->type = type;
8533
abe43400
PZ
8534 if (pmu_bus_running) {
8535 ret = pmu_dev_alloc(pmu);
8536 if (ret)
8537 goto free_idr;
8538 }
8539
2e80a82a 8540skip_type:
26657848
PZ
8541 if (pmu->task_ctx_nr == perf_hw_context) {
8542 static int hw_context_taken = 0;
8543
5101ef20
MR
8544 /*
8545 * Other than systems with heterogeneous CPUs, it never makes
8546 * sense for two PMUs to share perf_hw_context. PMUs which are
8547 * uncore must use perf_invalid_context.
8548 */
8549 if (WARN_ON_ONCE(hw_context_taken &&
8550 !(pmu->capabilities & PERF_PMU_CAP_HETEROGENEOUS_CPUS)))
26657848
PZ
8551 pmu->task_ctx_nr = perf_invalid_context;
8552
8553 hw_context_taken = 1;
8554 }
8555
8dc85d54
PZ
8556 pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
8557 if (pmu->pmu_cpu_context)
8558 goto got_cpu_context;
f29ac756 8559
c4814202 8560 ret = -ENOMEM;
108b02cf
PZ
8561 pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
8562 if (!pmu->pmu_cpu_context)
abe43400 8563 goto free_dev;
f344011c 8564
108b02cf
PZ
8565 for_each_possible_cpu(cpu) {
8566 struct perf_cpu_context *cpuctx;
8567
8568 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
eb184479 8569 __perf_event_init_context(&cpuctx->ctx);
547e9fd7 8570 lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
facc4307 8571 lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
108b02cf 8572 cpuctx->ctx.pmu = pmu;
9e630205 8573
272325c4 8574 __perf_mux_hrtimer_init(cpuctx, cpu);
9e630205 8575
3f1f3320 8576 cpuctx->unique_pmu = pmu;
108b02cf 8577 }
76e1d904 8578
8dc85d54 8579got_cpu_context:
ad5133b7
PZ
8580 if (!pmu->start_txn) {
8581 if (pmu->pmu_enable) {
8582 /*
8583 * If we have pmu_enable/pmu_disable calls, install
8584 * transaction stubs that use that to try and batch
8585 * hardware accesses.
8586 */
8587 pmu->start_txn = perf_pmu_start_txn;
8588 pmu->commit_txn = perf_pmu_commit_txn;
8589 pmu->cancel_txn = perf_pmu_cancel_txn;
8590 } else {
fbbe0701 8591 pmu->start_txn = perf_pmu_nop_txn;
ad5133b7
PZ
8592 pmu->commit_txn = perf_pmu_nop_int;
8593 pmu->cancel_txn = perf_pmu_nop_void;
f344011c 8594 }
5c92d124 8595 }
15dbf27c 8596
ad5133b7
PZ
8597 if (!pmu->pmu_enable) {
8598 pmu->pmu_enable = perf_pmu_nop_void;
8599 pmu->pmu_disable = perf_pmu_nop_void;
8600 }
8601
35edc2a5
PZ
8602 if (!pmu->event_idx)
8603 pmu->event_idx = perf_event_idx_default;
8604
b0a873eb 8605 list_add_rcu(&pmu->entry, &pmus);
bed5b25a 8606 atomic_set(&pmu->exclusive_cnt, 0);
33696fc0
PZ
8607 ret = 0;
8608unlock:
b0a873eb
PZ
8609 mutex_unlock(&pmus_lock);
8610
33696fc0 8611 return ret;
108b02cf 8612
abe43400
PZ
8613free_dev:
8614 device_del(pmu->dev);
8615 put_device(pmu->dev);
8616
2e80a82a
PZ
8617free_idr:
8618 if (pmu->type >= PERF_TYPE_MAX)
8619 idr_remove(&pmu_idr, pmu->type);
8620
108b02cf
PZ
8621free_pdc:
8622 free_percpu(pmu->pmu_disable_count);
8623 goto unlock;
f29ac756 8624}
c464c76e 8625EXPORT_SYMBOL_GPL(perf_pmu_register);
f29ac756 8626
b0a873eb 8627void perf_pmu_unregister(struct pmu *pmu)
5c92d124 8628{
b0a873eb
PZ
8629 mutex_lock(&pmus_lock);
8630 list_del_rcu(&pmu->entry);
8631 mutex_unlock(&pmus_lock);
5c92d124 8632
0475f9ea 8633 /*
cde8e884
PZ
8634 * We dereference the pmu list under both SRCU and regular RCU, so
8635 * synchronize against both of those.
0475f9ea 8636 */
b0a873eb 8637 synchronize_srcu(&pmus_srcu);
cde8e884 8638 synchronize_rcu();
d6d020e9 8639
33696fc0 8640 free_percpu(pmu->pmu_disable_count);
2e80a82a
PZ
8641 if (pmu->type >= PERF_TYPE_MAX)
8642 idr_remove(&pmu_idr, pmu->type);
6e855cd4
AS
8643 if (pmu->nr_addr_filters)
8644 device_remove_file(pmu->dev, &dev_attr_nr_addr_filters);
abe43400
PZ
8645 device_del(pmu->dev);
8646 put_device(pmu->dev);
51676957 8647 free_pmu_context(pmu);
b0a873eb 8648}
c464c76e 8649EXPORT_SYMBOL_GPL(perf_pmu_unregister);
d6d020e9 8650
cc34b98b
MR
8651static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
8652{
ccd41c86 8653 struct perf_event_context *ctx = NULL;
cc34b98b
MR
8654 int ret;
8655
8656 if (!try_module_get(pmu->module))
8657 return -ENODEV;
ccd41c86
PZ
8658
8659 if (event->group_leader != event) {
8b10c5e2
PZ
8660 /*
8661 * This ctx->mutex can nest when we're called through
8662 * inheritance. See the perf_event_ctx_lock_nested() comment.
8663 */
8664 ctx = perf_event_ctx_lock_nested(event->group_leader,
8665 SINGLE_DEPTH_NESTING);
ccd41c86
PZ
8666 BUG_ON(!ctx);
8667 }
8668
cc34b98b
MR
8669 event->pmu = pmu;
8670 ret = pmu->event_init(event);
ccd41c86
PZ
8671
8672 if (ctx)
8673 perf_event_ctx_unlock(event->group_leader, ctx);
8674
cc34b98b
MR
8675 if (ret)
8676 module_put(pmu->module);
8677
8678 return ret;
8679}
8680
18ab2cd3 8681static struct pmu *perf_init_event(struct perf_event *event)
b0a873eb
PZ
8682{
8683 struct pmu *pmu = NULL;
8684 int idx;
940c5b29 8685 int ret;
b0a873eb
PZ
8686
8687 idx = srcu_read_lock(&pmus_srcu);
2e80a82a
PZ
8688
8689 rcu_read_lock();
8690 pmu = idr_find(&pmu_idr, event->attr.type);
8691 rcu_read_unlock();
940c5b29 8692 if (pmu) {
cc34b98b 8693 ret = perf_try_init_event(pmu, event);
940c5b29
LM
8694 if (ret)
8695 pmu = ERR_PTR(ret);
2e80a82a 8696 goto unlock;
940c5b29 8697 }
2e80a82a 8698
b0a873eb 8699 list_for_each_entry_rcu(pmu, &pmus, entry) {
cc34b98b 8700 ret = perf_try_init_event(pmu, event);
b0a873eb 8701 if (!ret)
e5f4d339 8702 goto unlock;
76e1d904 8703
b0a873eb
PZ
8704 if (ret != -ENOENT) {
8705 pmu = ERR_PTR(ret);
e5f4d339 8706 goto unlock;
f344011c 8707 }
5c92d124 8708 }
e5f4d339
PZ
8709 pmu = ERR_PTR(-ENOENT);
8710unlock:
b0a873eb 8711 srcu_read_unlock(&pmus_srcu, idx);
15dbf27c 8712
4aeb0b42 8713 return pmu;
5c92d124
IM
8714}
8715
f2fb6bef
KL
8716static void attach_sb_event(struct perf_event *event)
8717{
8718 struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu);
8719
8720 raw_spin_lock(&pel->lock);
8721 list_add_rcu(&event->sb_list, &pel->list);
8722 raw_spin_unlock(&pel->lock);
8723}
8724
aab5b71e
PZ
8725/*
8726 * We keep a list of all !task (and therefore per-cpu) events
8727 * that need to receive side-band records.
8728 *
8729 * This avoids having to scan all the various PMU per-cpu contexts
8730 * looking for them.
8731 */
f2fb6bef
KL
8732static void account_pmu_sb_event(struct perf_event *event)
8733{
a4f144eb 8734 if (is_sb_event(event))
f2fb6bef
KL
8735 attach_sb_event(event);
8736}
8737
4beb31f3
FW
8738static void account_event_cpu(struct perf_event *event, int cpu)
8739{
8740 if (event->parent)
8741 return;
8742
4beb31f3
FW
8743 if (is_cgroup_event(event))
8744 atomic_inc(&per_cpu(perf_cgroup_events, cpu));
8745}
8746
555e0c1e
FW
8747/* Freq events need the tick to stay alive (see perf_event_task_tick). */
8748static void account_freq_event_nohz(void)
8749{
8750#ifdef CONFIG_NO_HZ_FULL
8751 /* Lock so we don't race with concurrent unaccount */
8752 spin_lock(&nr_freq_lock);
8753 if (atomic_inc_return(&nr_freq_events) == 1)
8754 tick_nohz_dep_set(TICK_DEP_BIT_PERF_EVENTS);
8755 spin_unlock(&nr_freq_lock);
8756#endif
8757}
8758
8759static void account_freq_event(void)
8760{
8761 if (tick_nohz_full_enabled())
8762 account_freq_event_nohz();
8763 else
8764 atomic_inc(&nr_freq_events);
8765}
8766
8767
766d6c07
FW
8768static void account_event(struct perf_event *event)
8769{
25432ae9
PZ
8770 bool inc = false;
8771
4beb31f3
FW
8772 if (event->parent)
8773 return;
8774
766d6c07 8775 if (event->attach_state & PERF_ATTACH_TASK)
25432ae9 8776 inc = true;
766d6c07
FW
8777 if (event->attr.mmap || event->attr.mmap_data)
8778 atomic_inc(&nr_mmap_events);
8779 if (event->attr.comm)
8780 atomic_inc(&nr_comm_events);
8781 if (event->attr.task)
8782 atomic_inc(&nr_task_events);
555e0c1e
FW
8783 if (event->attr.freq)
8784 account_freq_event();
45ac1403
AH
8785 if (event->attr.context_switch) {
8786 atomic_inc(&nr_switch_events);
25432ae9 8787 inc = true;
45ac1403 8788 }
4beb31f3 8789 if (has_branch_stack(event))
25432ae9 8790 inc = true;
4beb31f3 8791 if (is_cgroup_event(event))
25432ae9
PZ
8792 inc = true;
8793
9107c89e
PZ
8794 if (inc) {
8795 if (atomic_inc_not_zero(&perf_sched_count))
8796 goto enabled;
8797
8798 mutex_lock(&perf_sched_mutex);
8799 if (!atomic_read(&perf_sched_count)) {
8800 static_branch_enable(&perf_sched_events);
8801 /*
8802 * Guarantee that all CPUs observe they key change and
8803 * call the perf scheduling hooks before proceeding to
8804 * install events that need them.
8805 */
8806 synchronize_sched();
8807 }
8808 /*
8809 * Now that we have waited for the sync_sched(), allow further
8810 * increments to by-pass the mutex.
8811 */
8812 atomic_inc(&perf_sched_count);
8813 mutex_unlock(&perf_sched_mutex);
8814 }
8815enabled:
4beb31f3
FW
8816
8817 account_event_cpu(event, event->cpu);
f2fb6bef
KL
8818
8819 account_pmu_sb_event(event);
766d6c07
FW
8820}
8821
0793a61d 8822/*
cdd6c482 8823 * Allocate and initialize a event structure
0793a61d 8824 */
cdd6c482 8825static struct perf_event *
c3f00c70 8826perf_event_alloc(struct perf_event_attr *attr, int cpu,
d580ff86
PZ
8827 struct task_struct *task,
8828 struct perf_event *group_leader,
8829 struct perf_event *parent_event,
4dc0da86 8830 perf_overflow_handler_t overflow_handler,
79dff51e 8831 void *context, int cgroup_fd)
0793a61d 8832{
51b0fe39 8833 struct pmu *pmu;
cdd6c482
IM
8834 struct perf_event *event;
8835 struct hw_perf_event *hwc;
90983b16 8836 long err = -EINVAL;
0793a61d 8837
66832eb4
ON
8838 if ((unsigned)cpu >= nr_cpu_ids) {
8839 if (!task || cpu != -1)
8840 return ERR_PTR(-EINVAL);
8841 }
8842
c3f00c70 8843 event = kzalloc(sizeof(*event), GFP_KERNEL);
cdd6c482 8844 if (!event)
d5d2bc0d 8845 return ERR_PTR(-ENOMEM);
0793a61d 8846
04289bb9 8847 /*
cdd6c482 8848 * Single events are their own group leaders, with an
04289bb9
IM
8849 * empty sibling list:
8850 */
8851 if (!group_leader)
cdd6c482 8852 group_leader = event;
04289bb9 8853
cdd6c482
IM
8854 mutex_init(&event->child_mutex);
8855 INIT_LIST_HEAD(&event->child_list);
fccc714b 8856
cdd6c482
IM
8857 INIT_LIST_HEAD(&event->group_entry);
8858 INIT_LIST_HEAD(&event->event_entry);
8859 INIT_LIST_HEAD(&event->sibling_list);
10c6db11 8860 INIT_LIST_HEAD(&event->rb_entry);
71ad88ef 8861 INIT_LIST_HEAD(&event->active_entry);
375637bc 8862 INIT_LIST_HEAD(&event->addr_filters.list);
f3ae75de
SE
8863 INIT_HLIST_NODE(&event->hlist_entry);
8864
10c6db11 8865
cdd6c482 8866 init_waitqueue_head(&event->waitq);
e360adbe 8867 init_irq_work(&event->pending, perf_pending_event);
0793a61d 8868
cdd6c482 8869 mutex_init(&event->mmap_mutex);
375637bc 8870 raw_spin_lock_init(&event->addr_filters.lock);
7b732a75 8871
a6fa941d 8872 atomic_long_set(&event->refcount, 1);
cdd6c482
IM
8873 event->cpu = cpu;
8874 event->attr = *attr;
8875 event->group_leader = group_leader;
8876 event->pmu = NULL;
cdd6c482 8877 event->oncpu = -1;
a96bbc16 8878
cdd6c482 8879 event->parent = parent_event;
b84fbc9f 8880
17cf22c3 8881 event->ns = get_pid_ns(task_active_pid_ns(current));
cdd6c482 8882 event->id = atomic64_inc_return(&perf_event_id);
a96bbc16 8883
cdd6c482 8884 event->state = PERF_EVENT_STATE_INACTIVE;
329d876d 8885
d580ff86
PZ
8886 if (task) {
8887 event->attach_state = PERF_ATTACH_TASK;
d580ff86 8888 /*
50f16a8b
PZ
8889 * XXX pmu::event_init needs to know what task to account to
8890 * and we cannot use the ctx information because we need the
8891 * pmu before we get a ctx.
d580ff86 8892 */
50f16a8b 8893 event->hw.target = task;
d580ff86
PZ
8894 }
8895
34f43927
PZ
8896 event->clock = &local_clock;
8897 if (parent_event)
8898 event->clock = parent_event->clock;
8899
4dc0da86 8900 if (!overflow_handler && parent_event) {
b326e956 8901 overflow_handler = parent_event->overflow_handler;
4dc0da86
AK
8902 context = parent_event->overflow_handler_context;
8903 }
66832eb4 8904
1879445d
WN
8905 if (overflow_handler) {
8906 event->overflow_handler = overflow_handler;
8907 event->overflow_handler_context = context;
9ecda41a
WN
8908 } else if (is_write_backward(event)){
8909 event->overflow_handler = perf_event_output_backward;
8910 event->overflow_handler_context = NULL;
1879445d 8911 } else {
9ecda41a 8912 event->overflow_handler = perf_event_output_forward;
1879445d
WN
8913 event->overflow_handler_context = NULL;
8914 }
97eaf530 8915
0231bb53 8916 perf_event__state_init(event);
a86ed508 8917
4aeb0b42 8918 pmu = NULL;
b8e83514 8919
cdd6c482 8920 hwc = &event->hw;
bd2b5b12 8921 hwc->sample_period = attr->sample_period;
0d48696f 8922 if (attr->freq && attr->sample_freq)
bd2b5b12 8923 hwc->sample_period = 1;
eced1dfc 8924 hwc->last_period = hwc->sample_period;
bd2b5b12 8925
e7850595 8926 local64_set(&hwc->period_left, hwc->sample_period);
60db5e09 8927
2023b359 8928 /*
cdd6c482 8929 * we currently do not support PERF_FORMAT_GROUP on inherited events
2023b359 8930 */
3dab77fb 8931 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
90983b16 8932 goto err_ns;
a46a2300
YZ
8933
8934 if (!has_branch_stack(event))
8935 event->attr.branch_sample_type = 0;
2023b359 8936
79dff51e
MF
8937 if (cgroup_fd != -1) {
8938 err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader);
8939 if (err)
8940 goto err_ns;
8941 }
8942
b0a873eb 8943 pmu = perf_init_event(event);
4aeb0b42 8944 if (!pmu)
90983b16
FW
8945 goto err_ns;
8946 else if (IS_ERR(pmu)) {
4aeb0b42 8947 err = PTR_ERR(pmu);
90983b16 8948 goto err_ns;
621a01ea 8949 }
d5d2bc0d 8950
bed5b25a
AS
8951 err = exclusive_event_init(event);
8952 if (err)
8953 goto err_pmu;
8954
375637bc
AS
8955 if (has_addr_filter(event)) {
8956 event->addr_filters_offs = kcalloc(pmu->nr_addr_filters,
8957 sizeof(unsigned long),
8958 GFP_KERNEL);
8959 if (!event->addr_filters_offs)
8960 goto err_per_task;
8961
8962 /* force hw sync on the address filters */
8963 event->addr_filters_gen = 1;
8964 }
8965
cdd6c482 8966 if (!event->parent) {
927c7a9e 8967 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
97c79a38 8968 err = get_callchain_buffers(attr->sample_max_stack);
90983b16 8969 if (err)
375637bc 8970 goto err_addr_filters;
d010b332 8971 }
f344011c 8972 }
9ee318a7 8973
927a5570
AS
8974 /* symmetric to unaccount_event() in _free_event() */
8975 account_event(event);
8976
cdd6c482 8977 return event;
90983b16 8978
375637bc
AS
8979err_addr_filters:
8980 kfree(event->addr_filters_offs);
8981
bed5b25a
AS
8982err_per_task:
8983 exclusive_event_destroy(event);
8984
90983b16
FW
8985err_pmu:
8986 if (event->destroy)
8987 event->destroy(event);
c464c76e 8988 module_put(pmu->module);
90983b16 8989err_ns:
79dff51e
MF
8990 if (is_cgroup_event(event))
8991 perf_detach_cgroup(event);
90983b16
FW
8992 if (event->ns)
8993 put_pid_ns(event->ns);
8994 kfree(event);
8995
8996 return ERR_PTR(err);
0793a61d
TG
8997}
8998
cdd6c482
IM
8999static int perf_copy_attr(struct perf_event_attr __user *uattr,
9000 struct perf_event_attr *attr)
974802ea 9001{
974802ea 9002 u32 size;
cdf8073d 9003 int ret;
974802ea
PZ
9004
9005 if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
9006 return -EFAULT;
9007
9008 /*
9009 * zero the full structure, so that a short copy will be nice.
9010 */
9011 memset(attr, 0, sizeof(*attr));
9012
9013 ret = get_user(size, &uattr->size);
9014 if (ret)
9015 return ret;
9016
9017 if (size > PAGE_SIZE) /* silly large */
9018 goto err_size;
9019
9020 if (!size) /* abi compat */
9021 size = PERF_ATTR_SIZE_VER0;
9022
9023 if (size < PERF_ATTR_SIZE_VER0)
9024 goto err_size;
9025
9026 /*
9027 * If we're handed a bigger struct than we know of,
cdf8073d
IS
9028 * ensure all the unknown bits are 0 - i.e. new
9029 * user-space does not rely on any kernel feature
9030 * extensions we dont know about yet.
974802ea
PZ
9031 */
9032 if (size > sizeof(*attr)) {
cdf8073d
IS
9033 unsigned char __user *addr;
9034 unsigned char __user *end;
9035 unsigned char val;
974802ea 9036
cdf8073d
IS
9037 addr = (void __user *)uattr + sizeof(*attr);
9038 end = (void __user *)uattr + size;
974802ea 9039
cdf8073d 9040 for (; addr < end; addr++) {
974802ea
PZ
9041 ret = get_user(val, addr);
9042 if (ret)
9043 return ret;
9044 if (val)
9045 goto err_size;
9046 }
b3e62e35 9047 size = sizeof(*attr);
974802ea
PZ
9048 }
9049
9050 ret = copy_from_user(attr, uattr, size);
9051 if (ret)
9052 return -EFAULT;
9053
cd757645 9054 if (attr->__reserved_1)
974802ea
PZ
9055 return -EINVAL;
9056
9057 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
9058 return -EINVAL;
9059
9060 if (attr->read_format & ~(PERF_FORMAT_MAX-1))
9061 return -EINVAL;
9062
bce38cd5
SE
9063 if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK) {
9064 u64 mask = attr->branch_sample_type;
9065
9066 /* only using defined bits */
9067 if (mask & ~(PERF_SAMPLE_BRANCH_MAX-1))
9068 return -EINVAL;
9069
9070 /* at least one branch bit must be set */
9071 if (!(mask & ~PERF_SAMPLE_BRANCH_PLM_ALL))
9072 return -EINVAL;
9073
bce38cd5
SE
9074 /* propagate priv level, when not set for branch */
9075 if (!(mask & PERF_SAMPLE_BRANCH_PLM_ALL)) {
9076
9077 /* exclude_kernel checked on syscall entry */
9078 if (!attr->exclude_kernel)
9079 mask |= PERF_SAMPLE_BRANCH_KERNEL;
9080
9081 if (!attr->exclude_user)
9082 mask |= PERF_SAMPLE_BRANCH_USER;
9083
9084 if (!attr->exclude_hv)
9085 mask |= PERF_SAMPLE_BRANCH_HV;
9086 /*
9087 * adjust user setting (for HW filter setup)
9088 */
9089 attr->branch_sample_type = mask;
9090 }
e712209a
SE
9091 /* privileged levels capture (kernel, hv): check permissions */
9092 if ((mask & PERF_SAMPLE_BRANCH_PERM_PLM)
2b923c8f
SE
9093 && perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
9094 return -EACCES;
bce38cd5 9095 }
4018994f 9096
c5ebcedb 9097 if (attr->sample_type & PERF_SAMPLE_REGS_USER) {
4018994f 9098 ret = perf_reg_validate(attr->sample_regs_user);
c5ebcedb
JO
9099 if (ret)
9100 return ret;
9101 }
9102
9103 if (attr->sample_type & PERF_SAMPLE_STACK_USER) {
9104 if (!arch_perf_have_user_stack_dump())
9105 return -ENOSYS;
9106
9107 /*
9108 * We have __u32 type for the size, but so far
9109 * we can only use __u16 as maximum due to the
9110 * __u16 sample size limit.
9111 */
9112 if (attr->sample_stack_user >= USHRT_MAX)
9113 ret = -EINVAL;
9114 else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64)))
9115 ret = -EINVAL;
9116 }
4018994f 9117
60e2364e
SE
9118 if (attr->sample_type & PERF_SAMPLE_REGS_INTR)
9119 ret = perf_reg_validate(attr->sample_regs_intr);
974802ea
PZ
9120out:
9121 return ret;
9122
9123err_size:
9124 put_user(sizeof(*attr), &uattr->size);
9125 ret = -E2BIG;
9126 goto out;
9127}
9128
ac9721f3
PZ
9129static int
9130perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
a4be7c27 9131{
b69cf536 9132 struct ring_buffer *rb = NULL;
a4be7c27
PZ
9133 int ret = -EINVAL;
9134
ac9721f3 9135 if (!output_event)
a4be7c27
PZ
9136 goto set;
9137
ac9721f3
PZ
9138 /* don't allow circular references */
9139 if (event == output_event)
a4be7c27
PZ
9140 goto out;
9141
0f139300
PZ
9142 /*
9143 * Don't allow cross-cpu buffers
9144 */
9145 if (output_event->cpu != event->cpu)
9146 goto out;
9147
9148 /*
76369139 9149 * If its not a per-cpu rb, it must be the same task.
0f139300
PZ
9150 */
9151 if (output_event->cpu == -1 && output_event->ctx != event->ctx)
9152 goto out;
9153
34f43927
PZ
9154 /*
9155 * Mixing clocks in the same buffer is trouble you don't need.
9156 */
9157 if (output_event->clock != event->clock)
9158 goto out;
9159
9ecda41a
WN
9160 /*
9161 * Either writing ring buffer from beginning or from end.
9162 * Mixing is not allowed.
9163 */
9164 if (is_write_backward(output_event) != is_write_backward(event))
9165 goto out;
9166
45bfb2e5
PZ
9167 /*
9168 * If both events generate aux data, they must be on the same PMU
9169 */
9170 if (has_aux(event) && has_aux(output_event) &&
9171 event->pmu != output_event->pmu)
9172 goto out;
9173
a4be7c27 9174set:
cdd6c482 9175 mutex_lock(&event->mmap_mutex);
ac9721f3
PZ
9176 /* Can't redirect output if we've got an active mmap() */
9177 if (atomic_read(&event->mmap_count))
9178 goto unlock;
a4be7c27 9179
ac9721f3 9180 if (output_event) {
76369139
FW
9181 /* get the rb we want to redirect to */
9182 rb = ring_buffer_get(output_event);
9183 if (!rb)
ac9721f3 9184 goto unlock;
a4be7c27
PZ
9185 }
9186
b69cf536 9187 ring_buffer_attach(event, rb);
9bb5d40c 9188
a4be7c27 9189 ret = 0;
ac9721f3
PZ
9190unlock:
9191 mutex_unlock(&event->mmap_mutex);
9192
a4be7c27 9193out:
a4be7c27
PZ
9194 return ret;
9195}
9196
f63a8daa
PZ
9197static void mutex_lock_double(struct mutex *a, struct mutex *b)
9198{
9199 if (b < a)
9200 swap(a, b);
9201
9202 mutex_lock(a);
9203 mutex_lock_nested(b, SINGLE_DEPTH_NESTING);
9204}
9205
34f43927
PZ
9206static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id)
9207{
9208 bool nmi_safe = false;
9209
9210 switch (clk_id) {
9211 case CLOCK_MONOTONIC:
9212 event->clock = &ktime_get_mono_fast_ns;
9213 nmi_safe = true;
9214 break;
9215
9216 case CLOCK_MONOTONIC_RAW:
9217 event->clock = &ktime_get_raw_fast_ns;
9218 nmi_safe = true;
9219 break;
9220
9221 case CLOCK_REALTIME:
9222 event->clock = &ktime_get_real_ns;
9223 break;
9224
9225 case CLOCK_BOOTTIME:
9226 event->clock = &ktime_get_boot_ns;
9227 break;
9228
9229 case CLOCK_TAI:
9230 event->clock = &ktime_get_tai_ns;
9231 break;
9232
9233 default:
9234 return -EINVAL;
9235 }
9236
9237 if (!nmi_safe && !(event->pmu->capabilities & PERF_PMU_CAP_NO_NMI))
9238 return -EINVAL;
9239
9240 return 0;
9241}
9242
0793a61d 9243/**
cdd6c482 9244 * sys_perf_event_open - open a performance event, associate it to a task/cpu
9f66a381 9245 *
cdd6c482 9246 * @attr_uptr: event_id type attributes for monitoring/sampling
0793a61d 9247 * @pid: target pid
9f66a381 9248 * @cpu: target cpu
cdd6c482 9249 * @group_fd: group leader event fd
0793a61d 9250 */
cdd6c482
IM
9251SYSCALL_DEFINE5(perf_event_open,
9252 struct perf_event_attr __user *, attr_uptr,
2743a5b0 9253 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
0793a61d 9254{
b04243ef
PZ
9255 struct perf_event *group_leader = NULL, *output_event = NULL;
9256 struct perf_event *event, *sibling;
cdd6c482 9257 struct perf_event_attr attr;
f63a8daa 9258 struct perf_event_context *ctx, *uninitialized_var(gctx);
cdd6c482 9259 struct file *event_file = NULL;
2903ff01 9260 struct fd group = {NULL, 0};
38a81da2 9261 struct task_struct *task = NULL;
89a1e187 9262 struct pmu *pmu;
ea635c64 9263 int event_fd;
b04243ef 9264 int move_group = 0;
dc86cabe 9265 int err;
a21b0b35 9266 int f_flags = O_RDWR;
79dff51e 9267 int cgroup_fd = -1;
0793a61d 9268
2743a5b0 9269 /* for future expandability... */
e5d1367f 9270 if (flags & ~PERF_FLAG_ALL)
2743a5b0
PM
9271 return -EINVAL;
9272
dc86cabe
IM
9273 err = perf_copy_attr(attr_uptr, &attr);
9274 if (err)
9275 return err;
eab656ae 9276
0764771d
PZ
9277 if (!attr.exclude_kernel) {
9278 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
9279 return -EACCES;
9280 }
9281
df58ab24 9282 if (attr.freq) {
cdd6c482 9283 if (attr.sample_freq > sysctl_perf_event_sample_rate)
df58ab24 9284 return -EINVAL;
0819b2e3
PZ
9285 } else {
9286 if (attr.sample_period & (1ULL << 63))
9287 return -EINVAL;
df58ab24
PZ
9288 }
9289
97c79a38
ACM
9290 if (!attr.sample_max_stack)
9291 attr.sample_max_stack = sysctl_perf_event_max_stack;
9292
e5d1367f
SE
9293 /*
9294 * In cgroup mode, the pid argument is used to pass the fd
9295 * opened to the cgroup directory in cgroupfs. The cpu argument
9296 * designates the cpu on which to monitor threads from that
9297 * cgroup.
9298 */
9299 if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1))
9300 return -EINVAL;
9301
a21b0b35
YD
9302 if (flags & PERF_FLAG_FD_CLOEXEC)
9303 f_flags |= O_CLOEXEC;
9304
9305 event_fd = get_unused_fd_flags(f_flags);
ea635c64
AV
9306 if (event_fd < 0)
9307 return event_fd;
9308
ac9721f3 9309 if (group_fd != -1) {
2903ff01
AV
9310 err = perf_fget_light(group_fd, &group);
9311 if (err)
d14b12d7 9312 goto err_fd;
2903ff01 9313 group_leader = group.file->private_data;
ac9721f3
PZ
9314 if (flags & PERF_FLAG_FD_OUTPUT)
9315 output_event = group_leader;
9316 if (flags & PERF_FLAG_FD_NO_GROUP)
9317 group_leader = NULL;
9318 }
9319
e5d1367f 9320 if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) {
c6be5a5c
PZ
9321 task = find_lively_task_by_vpid(pid);
9322 if (IS_ERR(task)) {
9323 err = PTR_ERR(task);
9324 goto err_group_fd;
9325 }
9326 }
9327
1f4ee503
PZ
9328 if (task && group_leader &&
9329 group_leader->attr.inherit != attr.inherit) {
9330 err = -EINVAL;
9331 goto err_task;
9332 }
9333
fbfc623f
YZ
9334 get_online_cpus();
9335
79c9ce57
PZ
9336 if (task) {
9337 err = mutex_lock_interruptible(&task->signal->cred_guard_mutex);
9338 if (err)
9339 goto err_cpus;
9340
9341 /*
9342 * Reuse ptrace permission checks for now.
9343 *
9344 * We must hold cred_guard_mutex across this and any potential
9345 * perf_install_in_context() call for this new event to
9346 * serialize against exec() altering our credentials (and the
9347 * perf_event_exit_task() that could imply).
9348 */
9349 err = -EACCES;
9350 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
9351 goto err_cred;
9352 }
9353
79dff51e
MF
9354 if (flags & PERF_FLAG_PID_CGROUP)
9355 cgroup_fd = pid;
9356
4dc0da86 9357 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
79dff51e 9358 NULL, NULL, cgroup_fd);
d14b12d7
SE
9359 if (IS_ERR(event)) {
9360 err = PTR_ERR(event);
79c9ce57 9361 goto err_cred;
d14b12d7
SE
9362 }
9363
53b25335
VW
9364 if (is_sampling_event(event)) {
9365 if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) {
a1396555 9366 err = -EOPNOTSUPP;
53b25335
VW
9367 goto err_alloc;
9368 }
9369 }
9370
89a1e187
PZ
9371 /*
9372 * Special case software events and allow them to be part of
9373 * any hardware group.
9374 */
9375 pmu = event->pmu;
b04243ef 9376
34f43927
PZ
9377 if (attr.use_clockid) {
9378 err = perf_event_set_clock(event, attr.clockid);
9379 if (err)
9380 goto err_alloc;
9381 }
9382
b04243ef
PZ
9383 if (group_leader &&
9384 (is_software_event(event) != is_software_event(group_leader))) {
9385 if (is_software_event(event)) {
9386 /*
9387 * If event and group_leader are not both a software
9388 * event, and event is, then group leader is not.
9389 *
9390 * Allow the addition of software events to !software
9391 * groups, this is safe because software events never
9392 * fail to schedule.
9393 */
9394 pmu = group_leader->pmu;
9395 } else if (is_software_event(group_leader) &&
9396 (group_leader->group_flags & PERF_GROUP_SOFTWARE)) {
9397 /*
9398 * In case the group is a pure software group, and we
9399 * try to add a hardware event, move the whole group to
9400 * the hardware context.
9401 */
9402 move_group = 1;
9403 }
9404 }
89a1e187
PZ
9405
9406 /*
9407 * Get the target context (task or percpu):
9408 */
4af57ef2 9409 ctx = find_get_context(pmu, task, event);
89a1e187
PZ
9410 if (IS_ERR(ctx)) {
9411 err = PTR_ERR(ctx);
c6be5a5c 9412 goto err_alloc;
89a1e187
PZ
9413 }
9414
bed5b25a
AS
9415 if ((pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE) && group_leader) {
9416 err = -EBUSY;
9417 goto err_context;
9418 }
9419
ccff286d 9420 /*
cdd6c482 9421 * Look up the group leader (we will attach this event to it):
04289bb9 9422 */
ac9721f3 9423 if (group_leader) {
dc86cabe 9424 err = -EINVAL;
04289bb9 9425
04289bb9 9426 /*
ccff286d
IM
9427 * Do not allow a recursive hierarchy (this new sibling
9428 * becoming part of another group-sibling):
9429 */
9430 if (group_leader->group_leader != group_leader)
c3f00c70 9431 goto err_context;
34f43927
PZ
9432
9433 /* All events in a group should have the same clock */
9434 if (group_leader->clock != event->clock)
9435 goto err_context;
9436
ccff286d
IM
9437 /*
9438 * Do not allow to attach to a group in a different
9439 * task or CPU context:
04289bb9 9440 */
b04243ef 9441 if (move_group) {
c3c87e77
PZ
9442 /*
9443 * Make sure we're both on the same task, or both
9444 * per-cpu events.
9445 */
9446 if (group_leader->ctx->task != ctx->task)
9447 goto err_context;
9448
9449 /*
9450 * Make sure we're both events for the same CPU;
9451 * grouping events for different CPUs is broken; since
9452 * you can never concurrently schedule them anyhow.
9453 */
9454 if (group_leader->cpu != event->cpu)
b04243ef
PZ
9455 goto err_context;
9456 } else {
9457 if (group_leader->ctx != ctx)
9458 goto err_context;
9459 }
9460
3b6f9e5c
PM
9461 /*
9462 * Only a group leader can be exclusive or pinned
9463 */
0d48696f 9464 if (attr.exclusive || attr.pinned)
c3f00c70 9465 goto err_context;
ac9721f3
PZ
9466 }
9467
9468 if (output_event) {
9469 err = perf_event_set_output(event, output_event);
9470 if (err)
c3f00c70 9471 goto err_context;
ac9721f3 9472 }
0793a61d 9473
a21b0b35
YD
9474 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event,
9475 f_flags);
ea635c64
AV
9476 if (IS_ERR(event_file)) {
9477 err = PTR_ERR(event_file);
201c2f85 9478 event_file = NULL;
c3f00c70 9479 goto err_context;
ea635c64 9480 }
9b51f66d 9481
b04243ef 9482 if (move_group) {
f63a8daa 9483 gctx = group_leader->ctx;
f55fc2a5 9484 mutex_lock_double(&gctx->mutex, &ctx->mutex);
84c4e620
PZ
9485 if (gctx->task == TASK_TOMBSTONE) {
9486 err = -ESRCH;
9487 goto err_locked;
9488 }
f55fc2a5
PZ
9489 } else {
9490 mutex_lock(&ctx->mutex);
9491 }
9492
84c4e620
PZ
9493 if (ctx->task == TASK_TOMBSTONE) {
9494 err = -ESRCH;
9495 goto err_locked;
9496 }
9497
a723968c
PZ
9498 if (!perf_event_validate_size(event)) {
9499 err = -E2BIG;
9500 goto err_locked;
9501 }
9502
f55fc2a5
PZ
9503 /*
9504 * Must be under the same ctx::mutex as perf_install_in_context(),
9505 * because we need to serialize with concurrent event creation.
9506 */
9507 if (!exclusive_event_installable(event, ctx)) {
9508 /* exclusive and group stuff are assumed mutually exclusive */
9509 WARN_ON_ONCE(move_group);
f63a8daa 9510
f55fc2a5
PZ
9511 err = -EBUSY;
9512 goto err_locked;
9513 }
f63a8daa 9514
f55fc2a5
PZ
9515 WARN_ON_ONCE(ctx->parent_ctx);
9516
79c9ce57
PZ
9517 /*
9518 * This is the point on no return; we cannot fail hereafter. This is
9519 * where we start modifying current state.
9520 */
9521
f55fc2a5 9522 if (move_group) {
f63a8daa
PZ
9523 /*
9524 * See perf_event_ctx_lock() for comments on the details
9525 * of swizzling perf_event::ctx.
9526 */
45a0e07a 9527 perf_remove_from_context(group_leader, 0);
0231bb53 9528
b04243ef
PZ
9529 list_for_each_entry(sibling, &group_leader->sibling_list,
9530 group_entry) {
45a0e07a 9531 perf_remove_from_context(sibling, 0);
b04243ef
PZ
9532 put_ctx(gctx);
9533 }
b04243ef 9534
f63a8daa
PZ
9535 /*
9536 * Wait for everybody to stop referencing the events through
9537 * the old lists, before installing it on new lists.
9538 */
0cda4c02 9539 synchronize_rcu();
f63a8daa 9540
8f95b435
PZI
9541 /*
9542 * Install the group siblings before the group leader.
9543 *
9544 * Because a group leader will try and install the entire group
9545 * (through the sibling list, which is still in-tact), we can
9546 * end up with siblings installed in the wrong context.
9547 *
9548 * By installing siblings first we NO-OP because they're not
9549 * reachable through the group lists.
9550 */
b04243ef
PZ
9551 list_for_each_entry(sibling, &group_leader->sibling_list,
9552 group_entry) {
8f95b435 9553 perf_event__state_init(sibling);
9fc81d87 9554 perf_install_in_context(ctx, sibling, sibling->cpu);
b04243ef
PZ
9555 get_ctx(ctx);
9556 }
8f95b435
PZI
9557
9558 /*
9559 * Removing from the context ends up with disabled
9560 * event. What we want here is event in the initial
9561 * startup state, ready to be add into new context.
9562 */
9563 perf_event__state_init(group_leader);
9564 perf_install_in_context(ctx, group_leader, group_leader->cpu);
9565 get_ctx(ctx);
b04243ef 9566
f55fc2a5
PZ
9567 /*
9568 * Now that all events are installed in @ctx, nothing
9569 * references @gctx anymore, so drop the last reference we have
9570 * on it.
9571 */
9572 put_ctx(gctx);
bed5b25a
AS
9573 }
9574
f73e22ab
PZ
9575 /*
9576 * Precalculate sample_data sizes; do while holding ctx::mutex such
9577 * that we're serialized against further additions and before
9578 * perf_install_in_context() which is the point the event is active and
9579 * can use these values.
9580 */
9581 perf_event__header_size(event);
9582 perf_event__id_header_size(event);
9583
78cd2c74
PZ
9584 event->owner = current;
9585
e2d37cd2 9586 perf_install_in_context(ctx, event, event->cpu);
fe4b04fa 9587 perf_unpin_context(ctx);
f63a8daa 9588
f55fc2a5 9589 if (move_group)
f63a8daa 9590 mutex_unlock(&gctx->mutex);
d859e29f 9591 mutex_unlock(&ctx->mutex);
9b51f66d 9592
79c9ce57
PZ
9593 if (task) {
9594 mutex_unlock(&task->signal->cred_guard_mutex);
9595 put_task_struct(task);
9596 }
9597
fbfc623f
YZ
9598 put_online_cpus();
9599
cdd6c482
IM
9600 mutex_lock(&current->perf_event_mutex);
9601 list_add_tail(&event->owner_entry, &current->perf_event_list);
9602 mutex_unlock(&current->perf_event_mutex);
082ff5a2 9603
8a49542c
PZ
9604 /*
9605 * Drop the reference on the group_event after placing the
9606 * new event on the sibling_list. This ensures destruction
9607 * of the group leader will find the pointer to itself in
9608 * perf_group_detach().
9609 */
2903ff01 9610 fdput(group);
ea635c64
AV
9611 fd_install(event_fd, event_file);
9612 return event_fd;
0793a61d 9613
f55fc2a5
PZ
9614err_locked:
9615 if (move_group)
9616 mutex_unlock(&gctx->mutex);
9617 mutex_unlock(&ctx->mutex);
9618/* err_file: */
9619 fput(event_file);
c3f00c70 9620err_context:
fe4b04fa 9621 perf_unpin_context(ctx);
ea635c64 9622 put_ctx(ctx);
c6be5a5c 9623err_alloc:
13005627
PZ
9624 /*
9625 * If event_file is set, the fput() above will have called ->release()
9626 * and that will take care of freeing the event.
9627 */
9628 if (!event_file)
9629 free_event(event);
79c9ce57
PZ
9630err_cred:
9631 if (task)
9632 mutex_unlock(&task->signal->cred_guard_mutex);
1f4ee503 9633err_cpus:
fbfc623f 9634 put_online_cpus();
1f4ee503 9635err_task:
e7d0bc04
PZ
9636 if (task)
9637 put_task_struct(task);
89a1e187 9638err_group_fd:
2903ff01 9639 fdput(group);
ea635c64
AV
9640err_fd:
9641 put_unused_fd(event_fd);
dc86cabe 9642 return err;
0793a61d
TG
9643}
9644
fb0459d7
AV
9645/**
9646 * perf_event_create_kernel_counter
9647 *
9648 * @attr: attributes of the counter to create
9649 * @cpu: cpu in which the counter is bound
38a81da2 9650 * @task: task to profile (NULL for percpu)
fb0459d7
AV
9651 */
9652struct perf_event *
9653perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
38a81da2 9654 struct task_struct *task,
4dc0da86
AK
9655 perf_overflow_handler_t overflow_handler,
9656 void *context)
fb0459d7 9657{
fb0459d7 9658 struct perf_event_context *ctx;
c3f00c70 9659 struct perf_event *event;
fb0459d7 9660 int err;
d859e29f 9661
fb0459d7
AV
9662 /*
9663 * Get the target context (task or percpu):
9664 */
d859e29f 9665
4dc0da86 9666 event = perf_event_alloc(attr, cpu, task, NULL, NULL,
79dff51e 9667 overflow_handler, context, -1);
c3f00c70
PZ
9668 if (IS_ERR(event)) {
9669 err = PTR_ERR(event);
9670 goto err;
9671 }
d859e29f 9672
f8697762 9673 /* Mark owner so we could distinguish it from user events. */
63b6da39 9674 event->owner = TASK_TOMBSTONE;
f8697762 9675
4af57ef2 9676 ctx = find_get_context(event->pmu, task, event);
c6567f64
FW
9677 if (IS_ERR(ctx)) {
9678 err = PTR_ERR(ctx);
c3f00c70 9679 goto err_free;
d859e29f 9680 }
fb0459d7 9681
fb0459d7
AV
9682 WARN_ON_ONCE(ctx->parent_ctx);
9683 mutex_lock(&ctx->mutex);
84c4e620
PZ
9684 if (ctx->task == TASK_TOMBSTONE) {
9685 err = -ESRCH;
9686 goto err_unlock;
9687 }
9688
bed5b25a 9689 if (!exclusive_event_installable(event, ctx)) {
bed5b25a 9690 err = -EBUSY;
84c4e620 9691 goto err_unlock;
bed5b25a
AS
9692 }
9693
fb0459d7 9694 perf_install_in_context(ctx, event, cpu);
fe4b04fa 9695 perf_unpin_context(ctx);
fb0459d7
AV
9696 mutex_unlock(&ctx->mutex);
9697
fb0459d7
AV
9698 return event;
9699
84c4e620
PZ
9700err_unlock:
9701 mutex_unlock(&ctx->mutex);
9702 perf_unpin_context(ctx);
9703 put_ctx(ctx);
c3f00c70
PZ
9704err_free:
9705 free_event(event);
9706err:
c6567f64 9707 return ERR_PTR(err);
9b51f66d 9708}
fb0459d7 9709EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
9b51f66d 9710
0cda4c02
YZ
9711void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
9712{
9713 struct perf_event_context *src_ctx;
9714 struct perf_event_context *dst_ctx;
9715 struct perf_event *event, *tmp;
9716 LIST_HEAD(events);
9717
9718 src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx;
9719 dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx;
9720
f63a8daa
PZ
9721 /*
9722 * See perf_event_ctx_lock() for comments on the details
9723 * of swizzling perf_event::ctx.
9724 */
9725 mutex_lock_double(&src_ctx->mutex, &dst_ctx->mutex);
0cda4c02
YZ
9726 list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
9727 event_entry) {
45a0e07a 9728 perf_remove_from_context(event, 0);
9a545de0 9729 unaccount_event_cpu(event, src_cpu);
0cda4c02 9730 put_ctx(src_ctx);
9886167d 9731 list_add(&event->migrate_entry, &events);
0cda4c02 9732 }
0cda4c02 9733
8f95b435
PZI
9734 /*
9735 * Wait for the events to quiesce before re-instating them.
9736 */
0cda4c02
YZ
9737 synchronize_rcu();
9738
8f95b435
PZI
9739 /*
9740 * Re-instate events in 2 passes.
9741 *
9742 * Skip over group leaders and only install siblings on this first
9743 * pass, siblings will not get enabled without a leader, however a
9744 * leader will enable its siblings, even if those are still on the old
9745 * context.
9746 */
9747 list_for_each_entry_safe(event, tmp, &events, migrate_entry) {
9748 if (event->group_leader == event)
9749 continue;
9750
9751 list_del(&event->migrate_entry);
9752 if (event->state >= PERF_EVENT_STATE_OFF)
9753 event->state = PERF_EVENT_STATE_INACTIVE;
9754 account_event_cpu(event, dst_cpu);
9755 perf_install_in_context(dst_ctx, event, dst_cpu);
9756 get_ctx(dst_ctx);
9757 }
9758
9759 /*
9760 * Once all the siblings are setup properly, install the group leaders
9761 * to make it go.
9762 */
9886167d
PZ
9763 list_for_each_entry_safe(event, tmp, &events, migrate_entry) {
9764 list_del(&event->migrate_entry);
0cda4c02
YZ
9765 if (event->state >= PERF_EVENT_STATE_OFF)
9766 event->state = PERF_EVENT_STATE_INACTIVE;
9a545de0 9767 account_event_cpu(event, dst_cpu);
0cda4c02
YZ
9768 perf_install_in_context(dst_ctx, event, dst_cpu);
9769 get_ctx(dst_ctx);
9770 }
9771 mutex_unlock(&dst_ctx->mutex);
f63a8daa 9772 mutex_unlock(&src_ctx->mutex);
0cda4c02
YZ
9773}
9774EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);
9775
cdd6c482 9776static void sync_child_event(struct perf_event *child_event,
38b200d6 9777 struct task_struct *child)
d859e29f 9778{
cdd6c482 9779 struct perf_event *parent_event = child_event->parent;
8bc20959 9780 u64 child_val;
d859e29f 9781
cdd6c482
IM
9782 if (child_event->attr.inherit_stat)
9783 perf_event_read_event(child_event, child);
38b200d6 9784
b5e58793 9785 child_val = perf_event_count(child_event);
d859e29f
PM
9786
9787 /*
9788 * Add back the child's count to the parent's count:
9789 */
a6e6dea6 9790 atomic64_add(child_val, &parent_event->child_count);
cdd6c482
IM
9791 atomic64_add(child_event->total_time_enabled,
9792 &parent_event->child_total_time_enabled);
9793 atomic64_add(child_event->total_time_running,
9794 &parent_event->child_total_time_running);
d859e29f
PM
9795}
9796
9b51f66d 9797static void
8ba289b8
PZ
9798perf_event_exit_event(struct perf_event *child_event,
9799 struct perf_event_context *child_ctx,
9800 struct task_struct *child)
9b51f66d 9801{
8ba289b8
PZ
9802 struct perf_event *parent_event = child_event->parent;
9803
1903d50c
PZ
9804 /*
9805 * Do not destroy the 'original' grouping; because of the context
9806 * switch optimization the original events could've ended up in a
9807 * random child task.
9808 *
9809 * If we were to destroy the original group, all group related
9810 * operations would cease to function properly after this random
9811 * child dies.
9812 *
9813 * Do destroy all inherited groups, we don't care about those
9814 * and being thorough is better.
9815 */
32132a3d
PZ
9816 raw_spin_lock_irq(&child_ctx->lock);
9817 WARN_ON_ONCE(child_ctx->is_active);
9818
8ba289b8 9819 if (parent_event)
32132a3d
PZ
9820 perf_group_detach(child_event);
9821 list_del_event(child_event, child_ctx);
a69b0ca4 9822 child_event->state = PERF_EVENT_STATE_EXIT; /* is_event_hup() */
32132a3d 9823 raw_spin_unlock_irq(&child_ctx->lock);
0cc0c027 9824
9b51f66d 9825 /*
8ba289b8 9826 * Parent events are governed by their filedesc, retain them.
9b51f66d 9827 */
8ba289b8 9828 if (!parent_event) {
179033b3 9829 perf_event_wakeup(child_event);
8ba289b8 9830 return;
4bcf349a 9831 }
8ba289b8
PZ
9832 /*
9833 * Child events can be cleaned up.
9834 */
9835
9836 sync_child_event(child_event, child);
9837
9838 /*
9839 * Remove this event from the parent's list
9840 */
9841 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
9842 mutex_lock(&parent_event->child_mutex);
9843 list_del_init(&child_event->child_list);
9844 mutex_unlock(&parent_event->child_mutex);
9845
9846 /*
9847 * Kick perf_poll() for is_event_hup().
9848 */
9849 perf_event_wakeup(parent_event);
9850 free_event(child_event);
9851 put_event(parent_event);
9b51f66d
IM
9852}
9853
8dc85d54 9854static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
9b51f66d 9855{
211de6eb 9856 struct perf_event_context *child_ctx, *clone_ctx = NULL;
63b6da39 9857 struct perf_event *child_event, *next;
63b6da39
PZ
9858
9859 WARN_ON_ONCE(child != current);
9b51f66d 9860
6a3351b6 9861 child_ctx = perf_pin_task_context(child, ctxn);
63b6da39 9862 if (!child_ctx)
9b51f66d
IM
9863 return;
9864
ad3a37de 9865 /*
6a3351b6
PZ
9866 * In order to reduce the amount of tricky in ctx tear-down, we hold
9867 * ctx::mutex over the entire thing. This serializes against almost
9868 * everything that wants to access the ctx.
9869 *
9870 * The exception is sys_perf_event_open() /
9871 * perf_event_create_kernel_count() which does find_get_context()
9872 * without ctx::mutex (it cannot because of the move_group double mutex
9873 * lock thing). See the comments in perf_install_in_context().
ad3a37de 9874 */
6a3351b6 9875 mutex_lock(&child_ctx->mutex);
c93f7669
PM
9876
9877 /*
6a3351b6
PZ
9878 * In a single ctx::lock section, de-schedule the events and detach the
9879 * context from the task such that we cannot ever get it scheduled back
9880 * in.
c93f7669 9881 */
6a3351b6 9882 raw_spin_lock_irq(&child_ctx->lock);
63b6da39 9883 task_ctx_sched_out(__get_cpu_context(child_ctx), child_ctx);
4a1c0f26 9884
71a851b4 9885 /*
63b6da39
PZ
9886 * Now that the context is inactive, destroy the task <-> ctx relation
9887 * and mark the context dead.
71a851b4 9888 */
63b6da39
PZ
9889 RCU_INIT_POINTER(child->perf_event_ctxp[ctxn], NULL);
9890 put_ctx(child_ctx); /* cannot be last */
9891 WRITE_ONCE(child_ctx->task, TASK_TOMBSTONE);
9892 put_task_struct(current); /* cannot be last */
4a1c0f26 9893
211de6eb 9894 clone_ctx = unclone_ctx(child_ctx);
6a3351b6 9895 raw_spin_unlock_irq(&child_ctx->lock);
9f498cc5 9896
211de6eb
PZ
9897 if (clone_ctx)
9898 put_ctx(clone_ctx);
4a1c0f26 9899
9f498cc5 9900 /*
cdd6c482
IM
9901 * Report the task dead after unscheduling the events so that we
9902 * won't get any samples after PERF_RECORD_EXIT. We can however still
9903 * get a few PERF_RECORD_READ events.
9f498cc5 9904 */
cdd6c482 9905 perf_event_task(child, child_ctx, 0);
a63eaf34 9906
ebf905fc 9907 list_for_each_entry_safe(child_event, next, &child_ctx->event_list, event_entry)
8ba289b8 9908 perf_event_exit_event(child_event, child_ctx, child);
8bc20959 9909
a63eaf34
PM
9910 mutex_unlock(&child_ctx->mutex);
9911
9912 put_ctx(child_ctx);
9b51f66d
IM
9913}
9914
8dc85d54
PZ
9915/*
9916 * When a child task exits, feed back event values to parent events.
79c9ce57
PZ
9917 *
9918 * Can be called with cred_guard_mutex held when called from
9919 * install_exec_creds().
8dc85d54
PZ
9920 */
9921void perf_event_exit_task(struct task_struct *child)
9922{
8882135b 9923 struct perf_event *event, *tmp;
8dc85d54
PZ
9924 int ctxn;
9925
8882135b
PZ
9926 mutex_lock(&child->perf_event_mutex);
9927 list_for_each_entry_safe(event, tmp, &child->perf_event_list,
9928 owner_entry) {
9929 list_del_init(&event->owner_entry);
9930
9931 /*
9932 * Ensure the list deletion is visible before we clear
9933 * the owner, closes a race against perf_release() where
9934 * we need to serialize on the owner->perf_event_mutex.
9935 */
f47c02c0 9936 smp_store_release(&event->owner, NULL);
8882135b
PZ
9937 }
9938 mutex_unlock(&child->perf_event_mutex);
9939
8dc85d54
PZ
9940 for_each_task_context_nr(ctxn)
9941 perf_event_exit_task_context(child, ctxn);
4e93ad60
JO
9942
9943 /*
9944 * The perf_event_exit_task_context calls perf_event_task
9945 * with child's task_ctx, which generates EXIT events for
9946 * child contexts and sets child->perf_event_ctxp[] to NULL.
9947 * At this point we need to send EXIT events to cpu contexts.
9948 */
9949 perf_event_task(child, NULL, 0);
8dc85d54
PZ
9950}
9951
889ff015
FW
9952static void perf_free_event(struct perf_event *event,
9953 struct perf_event_context *ctx)
9954{
9955 struct perf_event *parent = event->parent;
9956
9957 if (WARN_ON_ONCE(!parent))
9958 return;
9959
9960 mutex_lock(&parent->child_mutex);
9961 list_del_init(&event->child_list);
9962 mutex_unlock(&parent->child_mutex);
9963
a6fa941d 9964 put_event(parent);
889ff015 9965
652884fe 9966 raw_spin_lock_irq(&ctx->lock);
8a49542c 9967 perf_group_detach(event);
889ff015 9968 list_del_event(event, ctx);
652884fe 9969 raw_spin_unlock_irq(&ctx->lock);
889ff015
FW
9970 free_event(event);
9971}
9972
bbbee908 9973/*
652884fe 9974 * Free an unexposed, unused context as created by inheritance by
8dc85d54 9975 * perf_event_init_task below, used by fork() in case of fail.
652884fe
PZ
9976 *
9977 * Not all locks are strictly required, but take them anyway to be nice and
9978 * help out with the lockdep assertions.
bbbee908 9979 */
cdd6c482 9980void perf_event_free_task(struct task_struct *task)
bbbee908 9981{
8dc85d54 9982 struct perf_event_context *ctx;
cdd6c482 9983 struct perf_event *event, *tmp;
8dc85d54 9984 int ctxn;
bbbee908 9985
8dc85d54
PZ
9986 for_each_task_context_nr(ctxn) {
9987 ctx = task->perf_event_ctxp[ctxn];
9988 if (!ctx)
9989 continue;
bbbee908 9990
8dc85d54 9991 mutex_lock(&ctx->mutex);
bbbee908 9992again:
8dc85d54
PZ
9993 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
9994 group_entry)
9995 perf_free_event(event, ctx);
bbbee908 9996
8dc85d54
PZ
9997 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
9998 group_entry)
9999 perf_free_event(event, ctx);
bbbee908 10000
8dc85d54
PZ
10001 if (!list_empty(&ctx->pinned_groups) ||
10002 !list_empty(&ctx->flexible_groups))
10003 goto again;
bbbee908 10004
8dc85d54 10005 mutex_unlock(&ctx->mutex);
bbbee908 10006
8dc85d54
PZ
10007 put_ctx(ctx);
10008 }
889ff015
FW
10009}
10010
4e231c79
PZ
10011void perf_event_delayed_put(struct task_struct *task)
10012{
10013 int ctxn;
10014
10015 for_each_task_context_nr(ctxn)
10016 WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
10017}
10018
e03e7ee3 10019struct file *perf_event_get(unsigned int fd)
ffe8690c 10020{
e03e7ee3 10021 struct file *file;
ffe8690c 10022
e03e7ee3
AS
10023 file = fget_raw(fd);
10024 if (!file)
10025 return ERR_PTR(-EBADF);
ffe8690c 10026
e03e7ee3
AS
10027 if (file->f_op != &perf_fops) {
10028 fput(file);
10029 return ERR_PTR(-EBADF);
10030 }
ffe8690c 10031
e03e7ee3 10032 return file;
ffe8690c
KX
10033}
10034
10035const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
10036{
10037 if (!event)
10038 return ERR_PTR(-EINVAL);
10039
10040 return &event->attr;
10041}
10042
97dee4f3
PZ
10043/*
10044 * inherit a event from parent task to child task:
10045 */
10046static struct perf_event *
10047inherit_event(struct perf_event *parent_event,
10048 struct task_struct *parent,
10049 struct perf_event_context *parent_ctx,
10050 struct task_struct *child,
10051 struct perf_event *group_leader,
10052 struct perf_event_context *child_ctx)
10053{
1929def9 10054 enum perf_event_active_state parent_state = parent_event->state;
97dee4f3 10055 struct perf_event *child_event;
cee010ec 10056 unsigned long flags;
97dee4f3
PZ
10057
10058 /*
10059 * Instead of creating recursive hierarchies of events,
10060 * we link inherited events back to the original parent,
10061 * which has a filp for sure, which we use as the reference
10062 * count:
10063 */
10064 if (parent_event->parent)
10065 parent_event = parent_event->parent;
10066
10067 child_event = perf_event_alloc(&parent_event->attr,
10068 parent_event->cpu,
d580ff86 10069 child,
97dee4f3 10070 group_leader, parent_event,
79dff51e 10071 NULL, NULL, -1);
97dee4f3
PZ
10072 if (IS_ERR(child_event))
10073 return child_event;
a6fa941d 10074
c6e5b732
PZ
10075 /*
10076 * is_orphaned_event() and list_add_tail(&parent_event->child_list)
10077 * must be under the same lock in order to serialize against
10078 * perf_event_release_kernel(), such that either we must observe
10079 * is_orphaned_event() or they will observe us on the child_list.
10080 */
10081 mutex_lock(&parent_event->child_mutex);
fadfe7be
JO
10082 if (is_orphaned_event(parent_event) ||
10083 !atomic_long_inc_not_zero(&parent_event->refcount)) {
c6e5b732 10084 mutex_unlock(&parent_event->child_mutex);
a6fa941d
AV
10085 free_event(child_event);
10086 return NULL;
10087 }
10088
97dee4f3
PZ
10089 get_ctx(child_ctx);
10090
10091 /*
10092 * Make the child state follow the state of the parent event,
10093 * not its attr.disabled bit. We hold the parent's mutex,
10094 * so we won't race with perf_event_{en, dis}able_family.
10095 */
1929def9 10096 if (parent_state >= PERF_EVENT_STATE_INACTIVE)
97dee4f3
PZ
10097 child_event->state = PERF_EVENT_STATE_INACTIVE;
10098 else
10099 child_event->state = PERF_EVENT_STATE_OFF;
10100
10101 if (parent_event->attr.freq) {
10102 u64 sample_period = parent_event->hw.sample_period;
10103 struct hw_perf_event *hwc = &child_event->hw;
10104
10105 hwc->sample_period = sample_period;
10106 hwc->last_period = sample_period;
10107
10108 local64_set(&hwc->period_left, sample_period);
10109 }
10110
10111 child_event->ctx = child_ctx;
10112 child_event->overflow_handler = parent_event->overflow_handler;
4dc0da86
AK
10113 child_event->overflow_handler_context
10114 = parent_event->overflow_handler_context;
97dee4f3 10115
614b6780
TG
10116 /*
10117 * Precalculate sample_data sizes
10118 */
10119 perf_event__header_size(child_event);
6844c09d 10120 perf_event__id_header_size(child_event);
614b6780 10121
97dee4f3
PZ
10122 /*
10123 * Link it up in the child's context:
10124 */
cee010ec 10125 raw_spin_lock_irqsave(&child_ctx->lock, flags);
97dee4f3 10126 add_event_to_ctx(child_event, child_ctx);
cee010ec 10127 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
97dee4f3 10128
97dee4f3
PZ
10129 /*
10130 * Link this into the parent event's child list
10131 */
97dee4f3
PZ
10132 list_add_tail(&child_event->child_list, &parent_event->child_list);
10133 mutex_unlock(&parent_event->child_mutex);
10134
10135 return child_event;
10136}
10137
10138static int inherit_group(struct perf_event *parent_event,
10139 struct task_struct *parent,
10140 struct perf_event_context *parent_ctx,
10141 struct task_struct *child,
10142 struct perf_event_context *child_ctx)
10143{
10144 struct perf_event *leader;
10145 struct perf_event *sub;
10146 struct perf_event *child_ctr;
10147
10148 leader = inherit_event(parent_event, parent, parent_ctx,
10149 child, NULL, child_ctx);
10150 if (IS_ERR(leader))
10151 return PTR_ERR(leader);
10152 list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
10153 child_ctr = inherit_event(sub, parent, parent_ctx,
10154 child, leader, child_ctx);
10155 if (IS_ERR(child_ctr))
10156 return PTR_ERR(child_ctr);
10157 }
10158 return 0;
889ff015
FW
10159}
10160
10161static int
10162inherit_task_group(struct perf_event *event, struct task_struct *parent,
10163 struct perf_event_context *parent_ctx,
8dc85d54 10164 struct task_struct *child, int ctxn,
889ff015
FW
10165 int *inherited_all)
10166{
10167 int ret;
8dc85d54 10168 struct perf_event_context *child_ctx;
889ff015
FW
10169
10170 if (!event->attr.inherit) {
10171 *inherited_all = 0;
10172 return 0;
bbbee908
PZ
10173 }
10174
fe4b04fa 10175 child_ctx = child->perf_event_ctxp[ctxn];
889ff015
FW
10176 if (!child_ctx) {
10177 /*
10178 * This is executed from the parent task context, so
10179 * inherit events that have been marked for cloning.
10180 * First allocate and initialize a context for the
10181 * child.
10182 */
bbbee908 10183
734df5ab 10184 child_ctx = alloc_perf_context(parent_ctx->pmu, child);
889ff015
FW
10185 if (!child_ctx)
10186 return -ENOMEM;
bbbee908 10187
8dc85d54 10188 child->perf_event_ctxp[ctxn] = child_ctx;
889ff015
FW
10189 }
10190
10191 ret = inherit_group(event, parent, parent_ctx,
10192 child, child_ctx);
10193
10194 if (ret)
10195 *inherited_all = 0;
10196
10197 return ret;
bbbee908
PZ
10198}
10199
9b51f66d 10200/*
cdd6c482 10201 * Initialize the perf_event context in task_struct
9b51f66d 10202 */
985c8dcb 10203static int perf_event_init_context(struct task_struct *child, int ctxn)
9b51f66d 10204{
889ff015 10205 struct perf_event_context *child_ctx, *parent_ctx;
cdd6c482
IM
10206 struct perf_event_context *cloned_ctx;
10207 struct perf_event *event;
9b51f66d 10208 struct task_struct *parent = current;
564c2b21 10209 int inherited_all = 1;
dddd3379 10210 unsigned long flags;
6ab423e0 10211 int ret = 0;
9b51f66d 10212
8dc85d54 10213 if (likely(!parent->perf_event_ctxp[ctxn]))
6ab423e0
PZ
10214 return 0;
10215
ad3a37de 10216 /*
25346b93
PM
10217 * If the parent's context is a clone, pin it so it won't get
10218 * swapped under us.
ad3a37de 10219 */
8dc85d54 10220 parent_ctx = perf_pin_task_context(parent, ctxn);
ffb4ef21
PZ
10221 if (!parent_ctx)
10222 return 0;
25346b93 10223
ad3a37de
PM
10224 /*
10225 * No need to check if parent_ctx != NULL here; since we saw
10226 * it non-NULL earlier, the only reason for it to become NULL
10227 * is if we exit, and since we're currently in the middle of
10228 * a fork we can't be exiting at the same time.
10229 */
ad3a37de 10230
9b51f66d
IM
10231 /*
10232 * Lock the parent list. No need to lock the child - not PID
10233 * hashed yet and not running, so nobody can access it.
10234 */
d859e29f 10235 mutex_lock(&parent_ctx->mutex);
9b51f66d
IM
10236
10237 /*
10238 * We dont have to disable NMIs - we are only looking at
10239 * the list, not manipulating it:
10240 */
889ff015 10241 list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
8dc85d54
PZ
10242 ret = inherit_task_group(event, parent, parent_ctx,
10243 child, ctxn, &inherited_all);
889ff015
FW
10244 if (ret)
10245 break;
10246 }
b93f7978 10247
dddd3379
TG
10248 /*
10249 * We can't hold ctx->lock when iterating the ->flexible_group list due
10250 * to allocations, but we need to prevent rotation because
10251 * rotate_ctx() will change the list from interrupt context.
10252 */
10253 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
10254 parent_ctx->rotate_disable = 1;
10255 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
10256
889ff015 10257 list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
8dc85d54
PZ
10258 ret = inherit_task_group(event, parent, parent_ctx,
10259 child, ctxn, &inherited_all);
889ff015 10260 if (ret)
9b51f66d 10261 break;
564c2b21
PM
10262 }
10263
dddd3379
TG
10264 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
10265 parent_ctx->rotate_disable = 0;
dddd3379 10266
8dc85d54 10267 child_ctx = child->perf_event_ctxp[ctxn];
889ff015 10268
05cbaa28 10269 if (child_ctx && inherited_all) {
564c2b21
PM
10270 /*
10271 * Mark the child context as a clone of the parent
10272 * context, or of whatever the parent is a clone of.
c5ed5145
PZ
10273 *
10274 * Note that if the parent is a clone, the holding of
10275 * parent_ctx->lock avoids it from being uncloned.
564c2b21 10276 */
c5ed5145 10277 cloned_ctx = parent_ctx->parent_ctx;
ad3a37de
PM
10278 if (cloned_ctx) {
10279 child_ctx->parent_ctx = cloned_ctx;
25346b93 10280 child_ctx->parent_gen = parent_ctx->parent_gen;
564c2b21
PM
10281 } else {
10282 child_ctx->parent_ctx = parent_ctx;
10283 child_ctx->parent_gen = parent_ctx->generation;
10284 }
10285 get_ctx(child_ctx->parent_ctx);
9b51f66d
IM
10286 }
10287
c5ed5145 10288 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
d859e29f 10289 mutex_unlock(&parent_ctx->mutex);
6ab423e0 10290
25346b93 10291 perf_unpin_context(parent_ctx);
fe4b04fa 10292 put_ctx(parent_ctx);
ad3a37de 10293
6ab423e0 10294 return ret;
9b51f66d
IM
10295}
10296
8dc85d54
PZ
10297/*
10298 * Initialize the perf_event context in task_struct
10299 */
10300int perf_event_init_task(struct task_struct *child)
10301{
10302 int ctxn, ret;
10303
8550d7cb
ON
10304 memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp));
10305 mutex_init(&child->perf_event_mutex);
10306 INIT_LIST_HEAD(&child->perf_event_list);
10307
8dc85d54
PZ
10308 for_each_task_context_nr(ctxn) {
10309 ret = perf_event_init_context(child, ctxn);
6c72e350
PZ
10310 if (ret) {
10311 perf_event_free_task(child);
8dc85d54 10312 return ret;
6c72e350 10313 }
8dc85d54
PZ
10314 }
10315
10316 return 0;
10317}
10318
220b140b
PM
10319static void __init perf_event_init_all_cpus(void)
10320{
b28ab83c 10321 struct swevent_htable *swhash;
220b140b 10322 int cpu;
220b140b
PM
10323
10324 for_each_possible_cpu(cpu) {
b28ab83c
PZ
10325 swhash = &per_cpu(swevent_htable, cpu);
10326 mutex_init(&swhash->hlist_mutex);
2fde4f94 10327 INIT_LIST_HEAD(&per_cpu(active_ctx_list, cpu));
f2fb6bef
KL
10328
10329 INIT_LIST_HEAD(&per_cpu(pmu_sb_events.list, cpu));
10330 raw_spin_lock_init(&per_cpu(pmu_sb_events.lock, cpu));
220b140b
PM
10331 }
10332}
10333
0db0628d 10334static void perf_event_init_cpu(int cpu)
0793a61d 10335{
108b02cf 10336 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
0793a61d 10337
b28ab83c 10338 mutex_lock(&swhash->hlist_mutex);
059fcd8c 10339 if (swhash->hlist_refcount > 0 && !swevent_hlist_deref(swhash)) {
76e1d904
FW
10340 struct swevent_hlist *hlist;
10341
b28ab83c
PZ
10342 hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
10343 WARN_ON(!hlist);
10344 rcu_assign_pointer(swhash->swevent_hlist, hlist);
76e1d904 10345 }
b28ab83c 10346 mutex_unlock(&swhash->hlist_mutex);
0793a61d
TG
10347}
10348
2965faa5 10349#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE
108b02cf 10350static void __perf_event_exit_context(void *__info)
0793a61d 10351{
108b02cf 10352 struct perf_event_context *ctx = __info;
fae3fde6
PZ
10353 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
10354 struct perf_event *event;
0793a61d 10355
fae3fde6
PZ
10356 raw_spin_lock(&ctx->lock);
10357 list_for_each_entry(event, &ctx->event_list, event_entry)
45a0e07a 10358 __perf_remove_from_context(event, cpuctx, ctx, (void *)DETACH_GROUP);
fae3fde6 10359 raw_spin_unlock(&ctx->lock);
0793a61d 10360}
108b02cf
PZ
10361
10362static void perf_event_exit_cpu_context(int cpu)
10363{
10364 struct perf_event_context *ctx;
10365 struct pmu *pmu;
10366 int idx;
10367
10368 idx = srcu_read_lock(&pmus_srcu);
10369 list_for_each_entry_rcu(pmu, &pmus, entry) {
917bdd1c 10370 ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
108b02cf
PZ
10371
10372 mutex_lock(&ctx->mutex);
10373 smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
10374 mutex_unlock(&ctx->mutex);
10375 }
10376 srcu_read_unlock(&pmus_srcu, idx);
108b02cf
PZ
10377}
10378
cdd6c482 10379static void perf_event_exit_cpu(int cpu)
0793a61d 10380{
e3703f8c 10381 perf_event_exit_cpu_context(cpu);
0793a61d
TG
10382}
10383#else
cdd6c482 10384static inline void perf_event_exit_cpu(int cpu) { }
0793a61d
TG
10385#endif
10386
c277443c
PZ
10387static int
10388perf_reboot(struct notifier_block *notifier, unsigned long val, void *v)
10389{
10390 int cpu;
10391
10392 for_each_online_cpu(cpu)
10393 perf_event_exit_cpu(cpu);
10394
10395 return NOTIFY_OK;
10396}
10397
10398/*
10399 * Run the perf reboot notifier at the very last possible moment so that
10400 * the generic watchdog code runs as long as possible.
10401 */
10402static struct notifier_block perf_reboot_notifier = {
10403 .notifier_call = perf_reboot,
10404 .priority = INT_MIN,
10405};
10406
0db0628d 10407static int
0793a61d
TG
10408perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
10409{
10410 unsigned int cpu = (long)hcpu;
10411
4536e4d1 10412 switch (action & ~CPU_TASKS_FROZEN) {
0793a61d
TG
10413
10414 case CPU_UP_PREPARE:
1dcaac1c
PZ
10415 /*
10416 * This must be done before the CPU comes alive, because the
10417 * moment we can run tasks we can encounter (software) events.
10418 *
10419 * Specifically, someone can have inherited events on kthreadd
10420 * or a pre-existing worker thread that gets re-bound.
10421 */
cdd6c482 10422 perf_event_init_cpu(cpu);
0793a61d
TG
10423 break;
10424
10425 case CPU_DOWN_PREPARE:
1dcaac1c
PZ
10426 /*
10427 * This must be done before the CPU dies because after that an
10428 * active event might want to IPI the CPU and that'll not work
10429 * so great for dead CPUs.
10430 *
10431 * XXX smp_call_function_single() return -ENXIO without a warn
10432 * so we could possibly deal with this.
10433 *
10434 * This is safe against new events arriving because
10435 * sys_perf_event_open() serializes against hotplug using
10436 * get_online_cpus().
10437 */
cdd6c482 10438 perf_event_exit_cpu(cpu);
0793a61d 10439 break;
0793a61d
TG
10440 default:
10441 break;
10442 }
10443
10444 return NOTIFY_OK;
10445}
10446
cdd6c482 10447void __init perf_event_init(void)
0793a61d 10448{
3c502e7a
JW
10449 int ret;
10450
2e80a82a
PZ
10451 idr_init(&pmu_idr);
10452
220b140b 10453 perf_event_init_all_cpus();
b0a873eb 10454 init_srcu_struct(&pmus_srcu);
2e80a82a
PZ
10455 perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE);
10456 perf_pmu_register(&perf_cpu_clock, NULL, -1);
10457 perf_pmu_register(&perf_task_clock, NULL, -1);
b0a873eb
PZ
10458 perf_tp_register();
10459 perf_cpu_notifier(perf_cpu_notify);
c277443c 10460 register_reboot_notifier(&perf_reboot_notifier);
3c502e7a
JW
10461
10462 ret = init_hw_breakpoint();
10463 WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
b2029520 10464
b01c3a00
JO
10465 /*
10466 * Build time assertion that we keep the data_head at the intended
10467 * location. IOW, validation we got the __reserved[] size right.
10468 */
10469 BUILD_BUG_ON((offsetof(struct perf_event_mmap_page, data_head))
10470 != 1024);
0793a61d 10471}
abe43400 10472
fd979c01
CS
10473ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
10474 char *page)
10475{
10476 struct perf_pmu_events_attr *pmu_attr =
10477 container_of(attr, struct perf_pmu_events_attr, attr);
10478
10479 if (pmu_attr->event_str)
10480 return sprintf(page, "%s\n", pmu_attr->event_str);
10481
10482 return 0;
10483}
675965b0 10484EXPORT_SYMBOL_GPL(perf_event_sysfs_show);
fd979c01 10485
abe43400
PZ
10486static int __init perf_event_sysfs_init(void)
10487{
10488 struct pmu *pmu;
10489 int ret;
10490
10491 mutex_lock(&pmus_lock);
10492
10493 ret = bus_register(&pmu_bus);
10494 if (ret)
10495 goto unlock;
10496
10497 list_for_each_entry(pmu, &pmus, entry) {
10498 if (!pmu->name || pmu->type < 0)
10499 continue;
10500
10501 ret = pmu_dev_alloc(pmu);
10502 WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret);
10503 }
10504 pmu_bus_running = 1;
10505 ret = 0;
10506
10507unlock:
10508 mutex_unlock(&pmus_lock);
10509
10510 return ret;
10511}
10512device_initcall(perf_event_sysfs_init);
e5d1367f
SE
10513
10514#ifdef CONFIG_CGROUP_PERF
eb95419b
TH
10515static struct cgroup_subsys_state *
10516perf_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
e5d1367f
SE
10517{
10518 struct perf_cgroup *jc;
e5d1367f 10519
1b15d055 10520 jc = kzalloc(sizeof(*jc), GFP_KERNEL);
e5d1367f
SE
10521 if (!jc)
10522 return ERR_PTR(-ENOMEM);
10523
e5d1367f
SE
10524 jc->info = alloc_percpu(struct perf_cgroup_info);
10525 if (!jc->info) {
10526 kfree(jc);
10527 return ERR_PTR(-ENOMEM);
10528 }
10529
e5d1367f
SE
10530 return &jc->css;
10531}
10532
eb95419b 10533static void perf_cgroup_css_free(struct cgroup_subsys_state *css)
e5d1367f 10534{
eb95419b
TH
10535 struct perf_cgroup *jc = container_of(css, struct perf_cgroup, css);
10536
e5d1367f
SE
10537 free_percpu(jc->info);
10538 kfree(jc);
10539}
10540
10541static int __perf_cgroup_move(void *info)
10542{
10543 struct task_struct *task = info;
ddaaf4e2 10544 rcu_read_lock();
e5d1367f 10545 perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN);
ddaaf4e2 10546 rcu_read_unlock();
e5d1367f
SE
10547 return 0;
10548}
10549
1f7dd3e5 10550static void perf_cgroup_attach(struct cgroup_taskset *tset)
e5d1367f 10551{
bb9d97b6 10552 struct task_struct *task;
1f7dd3e5 10553 struct cgroup_subsys_state *css;
bb9d97b6 10554
1f7dd3e5 10555 cgroup_taskset_for_each(task, css, tset)
bb9d97b6 10556 task_function_call(task, __perf_cgroup_move, task);
e5d1367f
SE
10557}
10558
073219e9 10559struct cgroup_subsys perf_event_cgrp_subsys = {
92fb9748
TH
10560 .css_alloc = perf_cgroup_css_alloc,
10561 .css_free = perf_cgroup_css_free,
bb9d97b6 10562 .attach = perf_cgroup_attach,
e5d1367f
SE
10563};
10564#endif /* CONFIG_CGROUP_PERF */
This page took 1.42666 seconds and 5 git commands to generate.