2 * Performance events core code:
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
9 * For licensing details see kernel-base/COPYING
14 #include <linux/cpu.h>
15 #include <linux/smp.h>
16 #include <linux/file.h>
17 #include <linux/poll.h>
18 #include <linux/slab.h>
19 #include <linux/hash.h>
20 #include <linux/sysfs.h>
21 #include <linux/dcache.h>
22 #include <linux/percpu.h>
23 #include <linux/ptrace.h>
24 #include <linux/vmstat.h>
25 #include <linux/vmalloc.h>
26 #include <linux/hardirq.h>
27 #include <linux/rculist.h>
28 #include <linux/uaccess.h>
29 #include <linux/syscalls.h>
30 #include <linux/anon_inodes.h>
31 #include <linux/kernel_stat.h>
32 #include <linux/perf_event.h>
33 #include <linux/ftrace_event.h>
34 #include <linux/hw_breakpoint.h>
36 #include <asm/irq_regs.h>
39 * Each CPU has a list of per CPU events:
41 static DEFINE_PER_CPU(struct perf_cpu_context
, perf_cpu_context
);
43 int perf_max_events __read_mostly
= 1;
44 static int perf_reserved_percpu __read_mostly
;
45 static int perf_overcommit __read_mostly
= 1;
47 static atomic_t nr_events __read_mostly
;
48 static atomic_t nr_mmap_events __read_mostly
;
49 static atomic_t nr_comm_events __read_mostly
;
50 static atomic_t nr_task_events __read_mostly
;
53 * perf event paranoia level:
54 * -1 - not paranoid at all
55 * 0 - disallow raw tracepoint access for unpriv
56 * 1 - disallow cpu events for unpriv
57 * 2 - disallow kernel profiling for unpriv
59 int sysctl_perf_event_paranoid __read_mostly
= 1;
61 int sysctl_perf_event_mlock __read_mostly
= 512; /* 'free' kb per user */
64 * max perf event sample rate
66 int sysctl_perf_event_sample_rate __read_mostly
= 100000;
68 static atomic64_t perf_event_id
;
71 * Lock for (sysadmin-configurable) event reservations:
73 static DEFINE_SPINLOCK(perf_resource_lock
);
76 * Architecture provided APIs - weak aliases:
78 extern __weak
const struct pmu
*hw_perf_event_init(struct perf_event
*event
)
83 void __weak
hw_perf_disable(void) { barrier(); }
84 void __weak
hw_perf_enable(void) { barrier(); }
87 hw_perf_group_sched_in(struct perf_event
*group_leader
,
88 struct perf_cpu_context
*cpuctx
,
89 struct perf_event_context
*ctx
)
94 void __weak
perf_event_print_debug(void) { }
96 static DEFINE_PER_CPU(int, perf_disable_count
);
98 void perf_disable(void)
100 if (!__get_cpu_var(perf_disable_count
)++)
104 void perf_enable(void)
106 if (!--__get_cpu_var(perf_disable_count
))
110 static void get_ctx(struct perf_event_context
*ctx
)
112 WARN_ON(!atomic_inc_not_zero(&ctx
->refcount
));
115 static void free_ctx(struct rcu_head
*head
)
117 struct perf_event_context
*ctx
;
119 ctx
= container_of(head
, struct perf_event_context
, rcu_head
);
123 static void put_ctx(struct perf_event_context
*ctx
)
125 if (atomic_dec_and_test(&ctx
->refcount
)) {
127 put_ctx(ctx
->parent_ctx
);
129 put_task_struct(ctx
->task
);
130 call_rcu(&ctx
->rcu_head
, free_ctx
);
134 static void unclone_ctx(struct perf_event_context
*ctx
)
136 if (ctx
->parent_ctx
) {
137 put_ctx(ctx
->parent_ctx
);
138 ctx
->parent_ctx
= NULL
;
143 * If we inherit events we want to return the parent event id
146 static u64
primary_event_id(struct perf_event
*event
)
151 id
= event
->parent
->id
;
157 * Get the perf_event_context for a task and lock it.
158 * This has to cope with with the fact that until it is locked,
159 * the context could get moved to another task.
161 static struct perf_event_context
*
162 perf_lock_task_context(struct task_struct
*task
, unsigned long *flags
)
164 struct perf_event_context
*ctx
;
168 ctx
= rcu_dereference(task
->perf_event_ctxp
);
171 * If this context is a clone of another, it might
172 * get swapped for another underneath us by
173 * perf_event_task_sched_out, though the
174 * rcu_read_lock() protects us from any context
175 * getting freed. Lock the context and check if it
176 * got swapped before we could get the lock, and retry
177 * if so. If we locked the right context, then it
178 * can't get swapped on us any more.
180 raw_spin_lock_irqsave(&ctx
->lock
, *flags
);
181 if (ctx
!= rcu_dereference(task
->perf_event_ctxp
)) {
182 raw_spin_unlock_irqrestore(&ctx
->lock
, *flags
);
186 if (!atomic_inc_not_zero(&ctx
->refcount
)) {
187 raw_spin_unlock_irqrestore(&ctx
->lock
, *flags
);
196 * Get the context for a task and increment its pin_count so it
197 * can't get swapped to another task. This also increments its
198 * reference count so that the context can't get freed.
200 static struct perf_event_context
*perf_pin_task_context(struct task_struct
*task
)
202 struct perf_event_context
*ctx
;
205 ctx
= perf_lock_task_context(task
, &flags
);
208 raw_spin_unlock_irqrestore(&ctx
->lock
, flags
);
213 static void perf_unpin_context(struct perf_event_context
*ctx
)
217 raw_spin_lock_irqsave(&ctx
->lock
, flags
);
219 raw_spin_unlock_irqrestore(&ctx
->lock
, flags
);
223 static inline u64
perf_clock(void)
225 return cpu_clock(raw_smp_processor_id());
229 * Update the record of the current time in a context.
231 static void update_context_time(struct perf_event_context
*ctx
)
233 u64 now
= perf_clock();
235 ctx
->time
+= now
- ctx
->timestamp
;
236 ctx
->timestamp
= now
;
240 * Update the total_time_enabled and total_time_running fields for a event.
242 static void update_event_times(struct perf_event
*event
)
244 struct perf_event_context
*ctx
= event
->ctx
;
247 if (event
->state
< PERF_EVENT_STATE_INACTIVE
||
248 event
->group_leader
->state
< PERF_EVENT_STATE_INACTIVE
)
254 run_end
= event
->tstamp_stopped
;
256 event
->total_time_enabled
= run_end
- event
->tstamp_enabled
;
258 if (event
->state
== PERF_EVENT_STATE_INACTIVE
)
259 run_end
= event
->tstamp_stopped
;
263 event
->total_time_running
= run_end
- event
->tstamp_running
;
266 static struct list_head
*
267 ctx_group_list(struct perf_event
*event
, struct perf_event_context
*ctx
)
269 if (event
->attr
.pinned
)
270 return &ctx
->pinned_groups
;
272 return &ctx
->flexible_groups
;
276 * Add a event from the lists for its context.
277 * Must be called with ctx->mutex and ctx->lock held.
280 list_add_event(struct perf_event
*event
, struct perf_event_context
*ctx
)
282 struct perf_event
*group_leader
= event
->group_leader
;
285 * Depending on whether it is a standalone or sibling event,
286 * add it straight to the context's event list, or to the group
287 * leader's sibling list:
289 if (group_leader
== event
) {
290 struct list_head
*list
;
292 if (is_software_event(event
))
293 event
->group_flags
|= PERF_GROUP_SOFTWARE
;
295 list
= ctx_group_list(event
, ctx
);
296 list_add_tail(&event
->group_entry
, list
);
298 if (group_leader
->group_flags
& PERF_GROUP_SOFTWARE
&&
299 !is_software_event(event
))
300 group_leader
->group_flags
&= ~PERF_GROUP_SOFTWARE
;
302 list_add_tail(&event
->group_entry
, &group_leader
->sibling_list
);
303 group_leader
->nr_siblings
++;
306 list_add_rcu(&event
->event_entry
, &ctx
->event_list
);
308 if (event
->attr
.inherit_stat
)
313 * Remove a event from the lists for its context.
314 * Must be called with ctx->mutex and ctx->lock held.
317 list_del_event(struct perf_event
*event
, struct perf_event_context
*ctx
)
319 struct perf_event
*sibling
, *tmp
;
321 if (list_empty(&event
->group_entry
))
324 if (event
->attr
.inherit_stat
)
327 list_del_init(&event
->group_entry
);
328 list_del_rcu(&event
->event_entry
);
330 if (event
->group_leader
!= event
)
331 event
->group_leader
->nr_siblings
--;
333 update_event_times(event
);
336 * If event was in error state, then keep it
337 * that way, otherwise bogus counts will be
338 * returned on read(). The only way to get out
339 * of error state is by explicit re-enabling
342 if (event
->state
> PERF_EVENT_STATE_OFF
)
343 event
->state
= PERF_EVENT_STATE_OFF
;
345 if (event
->state
> PERF_EVENT_STATE_FREE
)
349 * If this was a group event with sibling events then
350 * upgrade the siblings to singleton events by adding them
351 * to the context list directly:
353 list_for_each_entry_safe(sibling
, tmp
, &event
->sibling_list
, group_entry
) {
354 struct list_head
*list
;
356 list
= ctx_group_list(event
, ctx
);
357 list_move_tail(&sibling
->group_entry
, list
);
358 sibling
->group_leader
= sibling
;
360 /* Inherit group flags from the previous leader */
361 sibling
->group_flags
= event
->group_flags
;
366 event_sched_out(struct perf_event
*event
,
367 struct perf_cpu_context
*cpuctx
,
368 struct perf_event_context
*ctx
)
370 if (event
->state
!= PERF_EVENT_STATE_ACTIVE
)
373 event
->state
= PERF_EVENT_STATE_INACTIVE
;
374 if (event
->pending_disable
) {
375 event
->pending_disable
= 0;
376 event
->state
= PERF_EVENT_STATE_OFF
;
378 event
->tstamp_stopped
= ctx
->time
;
379 event
->pmu
->disable(event
);
382 if (!is_software_event(event
))
383 cpuctx
->active_oncpu
--;
385 if (event
->attr
.exclusive
|| !cpuctx
->active_oncpu
)
386 cpuctx
->exclusive
= 0;
390 group_sched_out(struct perf_event
*group_event
,
391 struct perf_cpu_context
*cpuctx
,
392 struct perf_event_context
*ctx
)
394 struct perf_event
*event
;
396 if (group_event
->state
!= PERF_EVENT_STATE_ACTIVE
)
399 event_sched_out(group_event
, cpuctx
, ctx
);
402 * Schedule out siblings (if any):
404 list_for_each_entry(event
, &group_event
->sibling_list
, group_entry
)
405 event_sched_out(event
, cpuctx
, ctx
);
407 if (group_event
->attr
.exclusive
)
408 cpuctx
->exclusive
= 0;
412 * Cross CPU call to remove a performance event
414 * We disable the event on the hardware level first. After that we
415 * remove it from the context list.
417 static void __perf_event_remove_from_context(void *info
)
419 struct perf_cpu_context
*cpuctx
= &__get_cpu_var(perf_cpu_context
);
420 struct perf_event
*event
= info
;
421 struct perf_event_context
*ctx
= event
->ctx
;
424 * If this is a task context, we need to check whether it is
425 * the current task context of this cpu. If not it has been
426 * scheduled out before the smp call arrived.
428 if (ctx
->task
&& cpuctx
->task_ctx
!= ctx
)
431 raw_spin_lock(&ctx
->lock
);
433 * Protect the list operation against NMI by disabling the
434 * events on a global level.
438 event_sched_out(event
, cpuctx
, ctx
);
440 list_del_event(event
, ctx
);
444 * Allow more per task events with respect to the
447 cpuctx
->max_pertask
=
448 min(perf_max_events
- ctx
->nr_events
,
449 perf_max_events
- perf_reserved_percpu
);
453 raw_spin_unlock(&ctx
->lock
);
458 * Remove the event from a task's (or a CPU's) list of events.
460 * Must be called with ctx->mutex held.
462 * CPU events are removed with a smp call. For task events we only
463 * call when the task is on a CPU.
465 * If event->ctx is a cloned context, callers must make sure that
466 * every task struct that event->ctx->task could possibly point to
467 * remains valid. This is OK when called from perf_release since
468 * that only calls us on the top-level context, which can't be a clone.
469 * When called from perf_event_exit_task, it's OK because the
470 * context has been detached from its task.
472 static void perf_event_remove_from_context(struct perf_event
*event
)
474 struct perf_event_context
*ctx
= event
->ctx
;
475 struct task_struct
*task
= ctx
->task
;
479 * Per cpu events are removed via an smp call and
480 * the removal is always successful.
482 smp_call_function_single(event
->cpu
,
483 __perf_event_remove_from_context
,
489 task_oncpu_function_call(task
, __perf_event_remove_from_context
,
492 raw_spin_lock_irq(&ctx
->lock
);
494 * If the context is active we need to retry the smp call.
496 if (ctx
->nr_active
&& !list_empty(&event
->group_entry
)) {
497 raw_spin_unlock_irq(&ctx
->lock
);
502 * The lock prevents that this context is scheduled in so we
503 * can remove the event safely, if the call above did not
506 if (!list_empty(&event
->group_entry
))
507 list_del_event(event
, ctx
);
508 raw_spin_unlock_irq(&ctx
->lock
);
512 * Update total_time_enabled and total_time_running for all events in a group.
514 static void update_group_times(struct perf_event
*leader
)
516 struct perf_event
*event
;
518 update_event_times(leader
);
519 list_for_each_entry(event
, &leader
->sibling_list
, group_entry
)
520 update_event_times(event
);
524 * Cross CPU call to disable a performance event
526 static void __perf_event_disable(void *info
)
528 struct perf_event
*event
= info
;
529 struct perf_cpu_context
*cpuctx
= &__get_cpu_var(perf_cpu_context
);
530 struct perf_event_context
*ctx
= event
->ctx
;
533 * If this is a per-task event, need to check whether this
534 * event's task is the current task on this cpu.
536 if (ctx
->task
&& cpuctx
->task_ctx
!= ctx
)
539 raw_spin_lock(&ctx
->lock
);
542 * If the event is on, turn it off.
543 * If it is in error state, leave it in error state.
545 if (event
->state
>= PERF_EVENT_STATE_INACTIVE
) {
546 update_context_time(ctx
);
547 update_group_times(event
);
548 if (event
== event
->group_leader
)
549 group_sched_out(event
, cpuctx
, ctx
);
551 event_sched_out(event
, cpuctx
, ctx
);
552 event
->state
= PERF_EVENT_STATE_OFF
;
555 raw_spin_unlock(&ctx
->lock
);
561 * If event->ctx is a cloned context, callers must make sure that
562 * every task struct that event->ctx->task could possibly point to
563 * remains valid. This condition is satisifed when called through
564 * perf_event_for_each_child or perf_event_for_each because they
565 * hold the top-level event's child_mutex, so any descendant that
566 * goes to exit will block in sync_child_event.
567 * When called from perf_pending_event it's OK because event->ctx
568 * is the current context on this CPU and preemption is disabled,
569 * hence we can't get into perf_event_task_sched_out for this context.
571 void perf_event_disable(struct perf_event
*event
)
573 struct perf_event_context
*ctx
= event
->ctx
;
574 struct task_struct
*task
= ctx
->task
;
578 * Disable the event on the cpu that it's on
580 smp_call_function_single(event
->cpu
, __perf_event_disable
,
586 task_oncpu_function_call(task
, __perf_event_disable
, event
);
588 raw_spin_lock_irq(&ctx
->lock
);
590 * If the event is still active, we need to retry the cross-call.
592 if (event
->state
== PERF_EVENT_STATE_ACTIVE
) {
593 raw_spin_unlock_irq(&ctx
->lock
);
598 * Since we have the lock this context can't be scheduled
599 * in, so we can change the state safely.
601 if (event
->state
== PERF_EVENT_STATE_INACTIVE
) {
602 update_group_times(event
);
603 event
->state
= PERF_EVENT_STATE_OFF
;
606 raw_spin_unlock_irq(&ctx
->lock
);
610 event_sched_in(struct perf_event
*event
,
611 struct perf_cpu_context
*cpuctx
,
612 struct perf_event_context
*ctx
)
614 if (event
->state
<= PERF_EVENT_STATE_OFF
)
617 event
->state
= PERF_EVENT_STATE_ACTIVE
;
618 event
->oncpu
= smp_processor_id();
620 * The new state must be visible before we turn it on in the hardware:
624 if (event
->pmu
->enable(event
)) {
625 event
->state
= PERF_EVENT_STATE_INACTIVE
;
630 event
->tstamp_running
+= ctx
->time
- event
->tstamp_stopped
;
632 if (!is_software_event(event
))
633 cpuctx
->active_oncpu
++;
636 if (event
->attr
.exclusive
)
637 cpuctx
->exclusive
= 1;
643 group_sched_in(struct perf_event
*group_event
,
644 struct perf_cpu_context
*cpuctx
,
645 struct perf_event_context
*ctx
)
647 struct perf_event
*event
, *partial_group
;
650 if (group_event
->state
== PERF_EVENT_STATE_OFF
)
653 ret
= hw_perf_group_sched_in(group_event
, cpuctx
, ctx
);
655 return ret
< 0 ? ret
: 0;
657 if (event_sched_in(group_event
, cpuctx
, ctx
))
661 * Schedule in siblings as one group (if any):
663 list_for_each_entry(event
, &group_event
->sibling_list
, group_entry
) {
664 if (event_sched_in(event
, cpuctx
, ctx
)) {
665 partial_group
= event
;
674 * Groups can be scheduled in as one unit only, so undo any
675 * partial group before returning:
677 list_for_each_entry(event
, &group_event
->sibling_list
, group_entry
) {
678 if (event
== partial_group
)
680 event_sched_out(event
, cpuctx
, ctx
);
682 event_sched_out(group_event
, cpuctx
, ctx
);
688 * Work out whether we can put this event group on the CPU now.
690 static int group_can_go_on(struct perf_event
*event
,
691 struct perf_cpu_context
*cpuctx
,
695 * Groups consisting entirely of software events can always go on.
697 if (event
->group_flags
& PERF_GROUP_SOFTWARE
)
700 * If an exclusive group is already on, no other hardware
703 if (cpuctx
->exclusive
)
706 * If this group is exclusive and there are already
707 * events on the CPU, it can't go on.
709 if (event
->attr
.exclusive
&& cpuctx
->active_oncpu
)
712 * Otherwise, try to add it if all previous groups were able
718 static void add_event_to_ctx(struct perf_event
*event
,
719 struct perf_event_context
*ctx
)
721 list_add_event(event
, ctx
);
722 event
->tstamp_enabled
= ctx
->time
;
723 event
->tstamp_running
= ctx
->time
;
724 event
->tstamp_stopped
= ctx
->time
;
728 * Cross CPU call to install and enable a performance event
730 * Must be called with ctx->mutex held
732 static void __perf_install_in_context(void *info
)
734 struct perf_cpu_context
*cpuctx
= &__get_cpu_var(perf_cpu_context
);
735 struct perf_event
*event
= info
;
736 struct perf_event_context
*ctx
= event
->ctx
;
737 struct perf_event
*leader
= event
->group_leader
;
741 * If this is a task context, we need to check whether it is
742 * the current task context of this cpu. If not it has been
743 * scheduled out before the smp call arrived.
744 * Or possibly this is the right context but it isn't
745 * on this cpu because it had no events.
747 if (ctx
->task
&& cpuctx
->task_ctx
!= ctx
) {
748 if (cpuctx
->task_ctx
|| ctx
->task
!= current
)
750 cpuctx
->task_ctx
= ctx
;
753 raw_spin_lock(&ctx
->lock
);
755 update_context_time(ctx
);
758 * Protect the list operation against NMI by disabling the
759 * events on a global level. NOP for non NMI based events.
763 add_event_to_ctx(event
, ctx
);
765 if (event
->cpu
!= -1 && event
->cpu
!= smp_processor_id())
769 * Don't put the event on if it is disabled or if
770 * it is in a group and the group isn't on.
772 if (event
->state
!= PERF_EVENT_STATE_INACTIVE
||
773 (leader
!= event
&& leader
->state
!= PERF_EVENT_STATE_ACTIVE
))
777 * An exclusive event can't go on if there are already active
778 * hardware events, and no hardware event can go on if there
779 * is already an exclusive event on.
781 if (!group_can_go_on(event
, cpuctx
, 1))
784 err
= event_sched_in(event
, cpuctx
, ctx
);
788 * This event couldn't go on. If it is in a group
789 * then we have to pull the whole group off.
790 * If the event group is pinned then put it in error state.
793 group_sched_out(leader
, cpuctx
, ctx
);
794 if (leader
->attr
.pinned
) {
795 update_group_times(leader
);
796 leader
->state
= PERF_EVENT_STATE_ERROR
;
800 if (!err
&& !ctx
->task
&& cpuctx
->max_pertask
)
801 cpuctx
->max_pertask
--;
806 raw_spin_unlock(&ctx
->lock
);
810 * Attach a performance event to a context
812 * First we add the event to the list with the hardware enable bit
813 * in event->hw_config cleared.
815 * If the event is attached to a task which is on a CPU we use a smp
816 * call to enable it in the task context. The task might have been
817 * scheduled away, but we check this in the smp call again.
819 * Must be called with ctx->mutex held.
822 perf_install_in_context(struct perf_event_context
*ctx
,
823 struct perf_event
*event
,
826 struct task_struct
*task
= ctx
->task
;
830 * Per cpu events are installed via an smp call and
831 * the install is always successful.
833 smp_call_function_single(cpu
, __perf_install_in_context
,
839 task_oncpu_function_call(task
, __perf_install_in_context
,
842 raw_spin_lock_irq(&ctx
->lock
);
844 * we need to retry the smp call.
846 if (ctx
->is_active
&& list_empty(&event
->group_entry
)) {
847 raw_spin_unlock_irq(&ctx
->lock
);
852 * The lock prevents that this context is scheduled in so we
853 * can add the event safely, if it the call above did not
856 if (list_empty(&event
->group_entry
))
857 add_event_to_ctx(event
, ctx
);
858 raw_spin_unlock_irq(&ctx
->lock
);
862 * Put a event into inactive state and update time fields.
863 * Enabling the leader of a group effectively enables all
864 * the group members that aren't explicitly disabled, so we
865 * have to update their ->tstamp_enabled also.
866 * Note: this works for group members as well as group leaders
867 * since the non-leader members' sibling_lists will be empty.
869 static void __perf_event_mark_enabled(struct perf_event
*event
,
870 struct perf_event_context
*ctx
)
872 struct perf_event
*sub
;
874 event
->state
= PERF_EVENT_STATE_INACTIVE
;
875 event
->tstamp_enabled
= ctx
->time
- event
->total_time_enabled
;
876 list_for_each_entry(sub
, &event
->sibling_list
, group_entry
)
877 if (sub
->state
>= PERF_EVENT_STATE_INACTIVE
)
878 sub
->tstamp_enabled
=
879 ctx
->time
- sub
->total_time_enabled
;
883 * Cross CPU call to enable a performance event
885 static void __perf_event_enable(void *info
)
887 struct perf_event
*event
= info
;
888 struct perf_cpu_context
*cpuctx
= &__get_cpu_var(perf_cpu_context
);
889 struct perf_event_context
*ctx
= event
->ctx
;
890 struct perf_event
*leader
= event
->group_leader
;
894 * If this is a per-task event, need to check whether this
895 * event's task is the current task on this cpu.
897 if (ctx
->task
&& cpuctx
->task_ctx
!= ctx
) {
898 if (cpuctx
->task_ctx
|| ctx
->task
!= current
)
900 cpuctx
->task_ctx
= ctx
;
903 raw_spin_lock(&ctx
->lock
);
905 update_context_time(ctx
);
907 if (event
->state
>= PERF_EVENT_STATE_INACTIVE
)
909 __perf_event_mark_enabled(event
, ctx
);
911 if (event
->cpu
!= -1 && event
->cpu
!= smp_processor_id())
915 * If the event is in a group and isn't the group leader,
916 * then don't put it on unless the group is on.
918 if (leader
!= event
&& leader
->state
!= PERF_EVENT_STATE_ACTIVE
)
921 if (!group_can_go_on(event
, cpuctx
, 1)) {
926 err
= group_sched_in(event
, cpuctx
, ctx
);
928 err
= event_sched_in(event
, cpuctx
, ctx
);
934 * If this event can't go on and it's part of a
935 * group, then the whole group has to come off.
938 group_sched_out(leader
, cpuctx
, ctx
);
939 if (leader
->attr
.pinned
) {
940 update_group_times(leader
);
941 leader
->state
= PERF_EVENT_STATE_ERROR
;
946 raw_spin_unlock(&ctx
->lock
);
952 * If event->ctx is a cloned context, callers must make sure that
953 * every task struct that event->ctx->task could possibly point to
954 * remains valid. This condition is satisfied when called through
955 * perf_event_for_each_child or perf_event_for_each as described
956 * for perf_event_disable.
958 void perf_event_enable(struct perf_event
*event
)
960 struct perf_event_context
*ctx
= event
->ctx
;
961 struct task_struct
*task
= ctx
->task
;
965 * Enable the event on the cpu that it's on
967 smp_call_function_single(event
->cpu
, __perf_event_enable
,
972 raw_spin_lock_irq(&ctx
->lock
);
973 if (event
->state
>= PERF_EVENT_STATE_INACTIVE
)
977 * If the event is in error state, clear that first.
978 * That way, if we see the event in error state below, we
979 * know that it has gone back into error state, as distinct
980 * from the task having been scheduled away before the
981 * cross-call arrived.
983 if (event
->state
== PERF_EVENT_STATE_ERROR
)
984 event
->state
= PERF_EVENT_STATE_OFF
;
987 raw_spin_unlock_irq(&ctx
->lock
);
988 task_oncpu_function_call(task
, __perf_event_enable
, event
);
990 raw_spin_lock_irq(&ctx
->lock
);
993 * If the context is active and the event is still off,
994 * we need to retry the cross-call.
996 if (ctx
->is_active
&& event
->state
== PERF_EVENT_STATE_OFF
)
1000 * Since we have the lock this context can't be scheduled
1001 * in, so we can change the state safely.
1003 if (event
->state
== PERF_EVENT_STATE_OFF
)
1004 __perf_event_mark_enabled(event
, ctx
);
1007 raw_spin_unlock_irq(&ctx
->lock
);
1010 static int perf_event_refresh(struct perf_event
*event
, int refresh
)
1013 * not supported on inherited events
1015 if (event
->attr
.inherit
)
1018 atomic_add(refresh
, &event
->event_limit
);
1019 perf_event_enable(event
);
1025 EVENT_FLEXIBLE
= 0x1,
1027 EVENT_ALL
= EVENT_FLEXIBLE
| EVENT_PINNED
,
1030 static void ctx_sched_out(struct perf_event_context
*ctx
,
1031 struct perf_cpu_context
*cpuctx
,
1032 enum event_type_t event_type
)
1034 struct perf_event
*event
;
1036 raw_spin_lock(&ctx
->lock
);
1038 if (likely(!ctx
->nr_events
))
1040 update_context_time(ctx
);
1043 if (!ctx
->nr_active
)
1046 if (event_type
& EVENT_PINNED
)
1047 list_for_each_entry(event
, &ctx
->pinned_groups
, group_entry
)
1048 group_sched_out(event
, cpuctx
, ctx
);
1050 if (event_type
& EVENT_FLEXIBLE
)
1051 list_for_each_entry(event
, &ctx
->flexible_groups
, group_entry
)
1052 group_sched_out(event
, cpuctx
, ctx
);
1057 raw_spin_unlock(&ctx
->lock
);
1061 * Test whether two contexts are equivalent, i.e. whether they
1062 * have both been cloned from the same version of the same context
1063 * and they both have the same number of enabled events.
1064 * If the number of enabled events is the same, then the set
1065 * of enabled events should be the same, because these are both
1066 * inherited contexts, therefore we can't access individual events
1067 * in them directly with an fd; we can only enable/disable all
1068 * events via prctl, or enable/disable all events in a family
1069 * via ioctl, which will have the same effect on both contexts.
1071 static int context_equiv(struct perf_event_context
*ctx1
,
1072 struct perf_event_context
*ctx2
)
1074 return ctx1
->parent_ctx
&& ctx1
->parent_ctx
== ctx2
->parent_ctx
1075 && ctx1
->parent_gen
== ctx2
->parent_gen
1076 && !ctx1
->pin_count
&& !ctx2
->pin_count
;
1079 static void __perf_event_sync_stat(struct perf_event
*event
,
1080 struct perf_event
*next_event
)
1084 if (!event
->attr
.inherit_stat
)
1088 * Update the event value, we cannot use perf_event_read()
1089 * because we're in the middle of a context switch and have IRQs
1090 * disabled, which upsets smp_call_function_single(), however
1091 * we know the event must be on the current CPU, therefore we
1092 * don't need to use it.
1094 switch (event
->state
) {
1095 case PERF_EVENT_STATE_ACTIVE
:
1096 event
->pmu
->read(event
);
1099 case PERF_EVENT_STATE_INACTIVE
:
1100 update_event_times(event
);
1108 * In order to keep per-task stats reliable we need to flip the event
1109 * values when we flip the contexts.
1111 value
= atomic64_read(&next_event
->count
);
1112 value
= atomic64_xchg(&event
->count
, value
);
1113 atomic64_set(&next_event
->count
, value
);
1115 swap(event
->total_time_enabled
, next_event
->total_time_enabled
);
1116 swap(event
->total_time_running
, next_event
->total_time_running
);
1119 * Since we swizzled the values, update the user visible data too.
1121 perf_event_update_userpage(event
);
1122 perf_event_update_userpage(next_event
);
1125 #define list_next_entry(pos, member) \
1126 list_entry(pos->member.next, typeof(*pos), member)
1128 static void perf_event_sync_stat(struct perf_event_context
*ctx
,
1129 struct perf_event_context
*next_ctx
)
1131 struct perf_event
*event
, *next_event
;
1136 update_context_time(ctx
);
1138 event
= list_first_entry(&ctx
->event_list
,
1139 struct perf_event
, event_entry
);
1141 next_event
= list_first_entry(&next_ctx
->event_list
,
1142 struct perf_event
, event_entry
);
1144 while (&event
->event_entry
!= &ctx
->event_list
&&
1145 &next_event
->event_entry
!= &next_ctx
->event_list
) {
1147 __perf_event_sync_stat(event
, next_event
);
1149 event
= list_next_entry(event
, event_entry
);
1150 next_event
= list_next_entry(next_event
, event_entry
);
1155 * Called from scheduler to remove the events of the current task,
1156 * with interrupts disabled.
1158 * We stop each event and update the event value in event->count.
1160 * This does not protect us against NMI, but disable()
1161 * sets the disabled bit in the control field of event _before_
1162 * accessing the event control register. If a NMI hits, then it will
1163 * not restart the event.
1165 void perf_event_task_sched_out(struct task_struct
*task
,
1166 struct task_struct
*next
)
1168 struct perf_cpu_context
*cpuctx
= &__get_cpu_var(perf_cpu_context
);
1169 struct perf_event_context
*ctx
= task
->perf_event_ctxp
;
1170 struct perf_event_context
*next_ctx
;
1171 struct perf_event_context
*parent
;
1174 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES
, 1, 1, NULL
, 0);
1176 if (likely(!ctx
|| !cpuctx
->task_ctx
))
1180 parent
= rcu_dereference(ctx
->parent_ctx
);
1181 next_ctx
= next
->perf_event_ctxp
;
1182 if (parent
&& next_ctx
&&
1183 rcu_dereference(next_ctx
->parent_ctx
) == parent
) {
1185 * Looks like the two contexts are clones, so we might be
1186 * able to optimize the context switch. We lock both
1187 * contexts and check that they are clones under the
1188 * lock (including re-checking that neither has been
1189 * uncloned in the meantime). It doesn't matter which
1190 * order we take the locks because no other cpu could
1191 * be trying to lock both of these tasks.
1193 raw_spin_lock(&ctx
->lock
);
1194 raw_spin_lock_nested(&next_ctx
->lock
, SINGLE_DEPTH_NESTING
);
1195 if (context_equiv(ctx
, next_ctx
)) {
1197 * XXX do we need a memory barrier of sorts
1198 * wrt to rcu_dereference() of perf_event_ctxp
1200 task
->perf_event_ctxp
= next_ctx
;
1201 next
->perf_event_ctxp
= ctx
;
1203 next_ctx
->task
= task
;
1206 perf_event_sync_stat(ctx
, next_ctx
);
1208 raw_spin_unlock(&next_ctx
->lock
);
1209 raw_spin_unlock(&ctx
->lock
);
1214 ctx_sched_out(ctx
, cpuctx
, EVENT_ALL
);
1215 cpuctx
->task_ctx
= NULL
;
1219 static void task_ctx_sched_out(struct perf_event_context
*ctx
,
1220 enum event_type_t event_type
)
1222 struct perf_cpu_context
*cpuctx
= &__get_cpu_var(perf_cpu_context
);
1224 if (!cpuctx
->task_ctx
)
1227 if (WARN_ON_ONCE(ctx
!= cpuctx
->task_ctx
))
1230 ctx_sched_out(ctx
, cpuctx
, event_type
);
1231 cpuctx
->task_ctx
= NULL
;
1235 * Called with IRQs disabled
1237 static void __perf_event_task_sched_out(struct perf_event_context
*ctx
)
1239 task_ctx_sched_out(ctx
, EVENT_ALL
);
1243 * Called with IRQs disabled
1245 static void cpu_ctx_sched_out(struct perf_cpu_context
*cpuctx
,
1246 enum event_type_t event_type
)
1248 ctx_sched_out(&cpuctx
->ctx
, cpuctx
, event_type
);
1252 ctx_pinned_sched_in(struct perf_event_context
*ctx
,
1253 struct perf_cpu_context
*cpuctx
)
1255 struct perf_event
*event
;
1257 list_for_each_entry(event
, &ctx
->pinned_groups
, group_entry
) {
1258 if (event
->state
<= PERF_EVENT_STATE_OFF
)
1260 if (event
->cpu
!= -1 && event
->cpu
!= smp_processor_id())
1263 if (group_can_go_on(event
, cpuctx
, 1))
1264 group_sched_in(event
, cpuctx
, ctx
);
1267 * If this pinned group hasn't been scheduled,
1268 * put it in error state.
1270 if (event
->state
== PERF_EVENT_STATE_INACTIVE
) {
1271 update_group_times(event
);
1272 event
->state
= PERF_EVENT_STATE_ERROR
;
1278 ctx_flexible_sched_in(struct perf_event_context
*ctx
,
1279 struct perf_cpu_context
*cpuctx
)
1281 struct perf_event
*event
;
1284 list_for_each_entry(event
, &ctx
->flexible_groups
, group_entry
) {
1285 /* Ignore events in OFF or ERROR state */
1286 if (event
->state
<= PERF_EVENT_STATE_OFF
)
1289 * Listen to the 'cpu' scheduling filter constraint
1292 if (event
->cpu
!= -1 && event
->cpu
!= smp_processor_id())
1295 if (group_can_go_on(event
, cpuctx
, can_add_hw
))
1296 if (group_sched_in(event
, cpuctx
, ctx
))
1302 ctx_sched_in(struct perf_event_context
*ctx
,
1303 struct perf_cpu_context
*cpuctx
,
1304 enum event_type_t event_type
)
1306 raw_spin_lock(&ctx
->lock
);
1308 if (likely(!ctx
->nr_events
))
1311 ctx
->timestamp
= perf_clock();
1316 * First go through the list and put on any pinned groups
1317 * in order to give them the best chance of going on.
1319 if (event_type
& EVENT_PINNED
)
1320 ctx_pinned_sched_in(ctx
, cpuctx
);
1322 /* Then walk through the lower prio flexible groups */
1323 if (event_type
& EVENT_FLEXIBLE
)
1324 ctx_flexible_sched_in(ctx
, cpuctx
);
1328 raw_spin_unlock(&ctx
->lock
);
1331 static void cpu_ctx_sched_in(struct perf_cpu_context
*cpuctx
,
1332 enum event_type_t event_type
)
1334 struct perf_event_context
*ctx
= &cpuctx
->ctx
;
1336 ctx_sched_in(ctx
, cpuctx
, event_type
);
1339 static void task_ctx_sched_in(struct task_struct
*task
,
1340 enum event_type_t event_type
)
1342 struct perf_cpu_context
*cpuctx
= &__get_cpu_var(perf_cpu_context
);
1343 struct perf_event_context
*ctx
= task
->perf_event_ctxp
;
1347 if (cpuctx
->task_ctx
== ctx
)
1349 ctx_sched_in(ctx
, cpuctx
, event_type
);
1350 cpuctx
->task_ctx
= ctx
;
1353 * Called from scheduler to add the events of the current task
1354 * with interrupts disabled.
1356 * We restore the event value and then enable it.
1358 * This does not protect us against NMI, but enable()
1359 * sets the enabled bit in the control field of event _before_
1360 * accessing the event control register. If a NMI hits, then it will
1361 * keep the event running.
1363 void perf_event_task_sched_in(struct task_struct
*task
)
1365 struct perf_cpu_context
*cpuctx
= &__get_cpu_var(perf_cpu_context
);
1366 struct perf_event_context
*ctx
= task
->perf_event_ctxp
;
1371 if (cpuctx
->task_ctx
== ctx
)
1377 * We want to keep the following priority order:
1378 * cpu pinned (that don't need to move), task pinned,
1379 * cpu flexible, task flexible.
1381 cpu_ctx_sched_out(cpuctx
, EVENT_FLEXIBLE
);
1383 ctx_sched_in(ctx
, cpuctx
, EVENT_PINNED
);
1384 cpu_ctx_sched_in(cpuctx
, EVENT_FLEXIBLE
);
1385 ctx_sched_in(ctx
, cpuctx
, EVENT_FLEXIBLE
);
1387 cpuctx
->task_ctx
= ctx
;
1392 #define MAX_INTERRUPTS (~0ULL)
1394 static void perf_log_throttle(struct perf_event
*event
, int enable
);
1396 static u64
perf_calculate_period(struct perf_event
*event
, u64 nsec
, u64 count
)
1398 u64 frequency
= event
->attr
.sample_freq
;
1399 u64 sec
= NSEC_PER_SEC
;
1400 u64 divisor
, dividend
;
1402 int count_fls
, nsec_fls
, frequency_fls
, sec_fls
;
1404 count_fls
= fls64(count
);
1405 nsec_fls
= fls64(nsec
);
1406 frequency_fls
= fls64(frequency
);
1410 * We got @count in @nsec, with a target of sample_freq HZ
1411 * the target period becomes:
1414 * period = -------------------
1415 * @nsec * sample_freq
1420 * Reduce accuracy by one bit such that @a and @b converge
1421 * to a similar magnitude.
1423 #define REDUCE_FLS(a, b) \
1425 if (a##_fls > b##_fls) { \
1435 * Reduce accuracy until either term fits in a u64, then proceed with
1436 * the other, so that finally we can do a u64/u64 division.
1438 while (count_fls
+ sec_fls
> 64 && nsec_fls
+ frequency_fls
> 64) {
1439 REDUCE_FLS(nsec
, frequency
);
1440 REDUCE_FLS(sec
, count
);
1443 if (count_fls
+ sec_fls
> 64) {
1444 divisor
= nsec
* frequency
;
1446 while (count_fls
+ sec_fls
> 64) {
1447 REDUCE_FLS(count
, sec
);
1451 dividend
= count
* sec
;
1453 dividend
= count
* sec
;
1455 while (nsec_fls
+ frequency_fls
> 64) {
1456 REDUCE_FLS(nsec
, frequency
);
1460 divisor
= nsec
* frequency
;
1463 return div64_u64(dividend
, divisor
);
1466 static void perf_event_stop(struct perf_event
*event
)
1468 if (!event
->pmu
->stop
)
1469 return event
->pmu
->disable(event
);
1471 return event
->pmu
->stop(event
);
1474 static int perf_event_start(struct perf_event
*event
)
1476 if (!event
->pmu
->start
)
1477 return event
->pmu
->enable(event
);
1479 return event
->pmu
->start(event
);
1482 static void perf_adjust_period(struct perf_event
*event
, u64 nsec
, u64 count
)
1484 struct hw_perf_event
*hwc
= &event
->hw
;
1485 u64 period
, sample_period
;
1488 period
= perf_calculate_period(event
, nsec
, count
);
1490 delta
= (s64
)(period
- hwc
->sample_period
);
1491 delta
= (delta
+ 7) / 8; /* low pass filter */
1493 sample_period
= hwc
->sample_period
+ delta
;
1498 hwc
->sample_period
= sample_period
;
1500 if (atomic64_read(&hwc
->period_left
) > 8*sample_period
) {
1502 perf_event_stop(event
);
1503 atomic64_set(&hwc
->period_left
, 0);
1504 perf_event_start(event
);
1509 static void perf_ctx_adjust_freq(struct perf_event_context
*ctx
)
1511 struct perf_event
*event
;
1512 struct hw_perf_event
*hwc
;
1513 u64 interrupts
, now
;
1516 raw_spin_lock(&ctx
->lock
);
1517 list_for_each_entry_rcu(event
, &ctx
->event_list
, event_entry
) {
1518 if (event
->state
!= PERF_EVENT_STATE_ACTIVE
)
1521 if (event
->cpu
!= -1 && event
->cpu
!= smp_processor_id())
1526 interrupts
= hwc
->interrupts
;
1527 hwc
->interrupts
= 0;
1530 * unthrottle events on the tick
1532 if (interrupts
== MAX_INTERRUPTS
) {
1533 perf_log_throttle(event
, 1);
1535 event
->pmu
->unthrottle(event
);
1539 if (!event
->attr
.freq
|| !event
->attr
.sample_freq
)
1543 event
->pmu
->read(event
);
1544 now
= atomic64_read(&event
->count
);
1545 delta
= now
- hwc
->freq_count_stamp
;
1546 hwc
->freq_count_stamp
= now
;
1549 perf_adjust_period(event
, TICK_NSEC
, delta
);
1552 raw_spin_unlock(&ctx
->lock
);
1556 * Round-robin a context's events:
1558 static void rotate_ctx(struct perf_event_context
*ctx
)
1560 raw_spin_lock(&ctx
->lock
);
1562 /* Rotate the first entry last of non-pinned groups */
1563 list_rotate_left(&ctx
->flexible_groups
);
1565 raw_spin_unlock(&ctx
->lock
);
1568 void perf_event_task_tick(struct task_struct
*curr
)
1570 struct perf_cpu_context
*cpuctx
;
1571 struct perf_event_context
*ctx
;
1574 if (!atomic_read(&nr_events
))
1577 cpuctx
= &__get_cpu_var(perf_cpu_context
);
1578 if (cpuctx
->ctx
.nr_events
&&
1579 cpuctx
->ctx
.nr_events
!= cpuctx
->ctx
.nr_active
)
1582 ctx
= curr
->perf_event_ctxp
;
1583 if (ctx
&& ctx
->nr_events
&& ctx
->nr_events
!= ctx
->nr_active
)
1586 perf_ctx_adjust_freq(&cpuctx
->ctx
);
1588 perf_ctx_adjust_freq(ctx
);
1594 cpu_ctx_sched_out(cpuctx
, EVENT_FLEXIBLE
);
1596 task_ctx_sched_out(ctx
, EVENT_FLEXIBLE
);
1598 rotate_ctx(&cpuctx
->ctx
);
1602 cpu_ctx_sched_in(cpuctx
, EVENT_FLEXIBLE
);
1604 task_ctx_sched_in(curr
, EVENT_FLEXIBLE
);
1608 static int event_enable_on_exec(struct perf_event
*event
,
1609 struct perf_event_context
*ctx
)
1611 if (!event
->attr
.enable_on_exec
)
1614 event
->attr
.enable_on_exec
= 0;
1615 if (event
->state
>= PERF_EVENT_STATE_INACTIVE
)
1618 __perf_event_mark_enabled(event
, ctx
);
1624 * Enable all of a task's events that have been marked enable-on-exec.
1625 * This expects task == current.
1627 static void perf_event_enable_on_exec(struct task_struct
*task
)
1629 struct perf_event_context
*ctx
;
1630 struct perf_event
*event
;
1631 unsigned long flags
;
1635 local_irq_save(flags
);
1636 ctx
= task
->perf_event_ctxp
;
1637 if (!ctx
|| !ctx
->nr_events
)
1640 __perf_event_task_sched_out(ctx
);
1642 raw_spin_lock(&ctx
->lock
);
1644 list_for_each_entry(event
, &ctx
->pinned_groups
, group_entry
) {
1645 ret
= event_enable_on_exec(event
, ctx
);
1650 list_for_each_entry(event
, &ctx
->flexible_groups
, group_entry
) {
1651 ret
= event_enable_on_exec(event
, ctx
);
1657 * Unclone this context if we enabled any event.
1662 raw_spin_unlock(&ctx
->lock
);
1664 perf_event_task_sched_in(task
);
1666 local_irq_restore(flags
);
1670 * Cross CPU call to read the hardware event
1672 static void __perf_event_read(void *info
)
1674 struct perf_cpu_context
*cpuctx
= &__get_cpu_var(perf_cpu_context
);
1675 struct perf_event
*event
= info
;
1676 struct perf_event_context
*ctx
= event
->ctx
;
1679 * If this is a task context, we need to check whether it is
1680 * the current task context of this cpu. If not it has been
1681 * scheduled out before the smp call arrived. In that case
1682 * event->count would have been updated to a recent sample
1683 * when the event was scheduled out.
1685 if (ctx
->task
&& cpuctx
->task_ctx
!= ctx
)
1688 raw_spin_lock(&ctx
->lock
);
1689 update_context_time(ctx
);
1690 update_event_times(event
);
1691 raw_spin_unlock(&ctx
->lock
);
1693 event
->pmu
->read(event
);
1696 static u64
perf_event_read(struct perf_event
*event
)
1699 * If event is enabled and currently active on a CPU, update the
1700 * value in the event structure:
1702 if (event
->state
== PERF_EVENT_STATE_ACTIVE
) {
1703 smp_call_function_single(event
->oncpu
,
1704 __perf_event_read
, event
, 1);
1705 } else if (event
->state
== PERF_EVENT_STATE_INACTIVE
) {
1706 struct perf_event_context
*ctx
= event
->ctx
;
1707 unsigned long flags
;
1709 raw_spin_lock_irqsave(&ctx
->lock
, flags
);
1710 update_context_time(ctx
);
1711 update_event_times(event
);
1712 raw_spin_unlock_irqrestore(&ctx
->lock
, flags
);
1715 return atomic64_read(&event
->count
);
1719 * Initialize the perf_event context in a task_struct:
1722 __perf_event_init_context(struct perf_event_context
*ctx
,
1723 struct task_struct
*task
)
1725 raw_spin_lock_init(&ctx
->lock
);
1726 mutex_init(&ctx
->mutex
);
1727 INIT_LIST_HEAD(&ctx
->pinned_groups
);
1728 INIT_LIST_HEAD(&ctx
->flexible_groups
);
1729 INIT_LIST_HEAD(&ctx
->event_list
);
1730 atomic_set(&ctx
->refcount
, 1);
1734 static struct perf_event_context
*find_get_context(pid_t pid
, int cpu
)
1736 struct perf_event_context
*ctx
;
1737 struct perf_cpu_context
*cpuctx
;
1738 struct task_struct
*task
;
1739 unsigned long flags
;
1742 if (pid
== -1 && cpu
!= -1) {
1743 /* Must be root to operate on a CPU event: */
1744 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN
))
1745 return ERR_PTR(-EACCES
);
1747 if (cpu
< 0 || cpu
>= nr_cpumask_bits
)
1748 return ERR_PTR(-EINVAL
);
1751 * We could be clever and allow to attach a event to an
1752 * offline CPU and activate it when the CPU comes up, but
1755 if (!cpu_online(cpu
))
1756 return ERR_PTR(-ENODEV
);
1758 cpuctx
= &per_cpu(perf_cpu_context
, cpu
);
1769 task
= find_task_by_vpid(pid
);
1771 get_task_struct(task
);
1775 return ERR_PTR(-ESRCH
);
1778 * Can't attach events to a dying task.
1781 if (task
->flags
& PF_EXITING
)
1784 /* Reuse ptrace permission checks for now. */
1786 if (!ptrace_may_access(task
, PTRACE_MODE_READ
))
1790 ctx
= perf_lock_task_context(task
, &flags
);
1793 raw_spin_unlock_irqrestore(&ctx
->lock
, flags
);
1797 ctx
= kzalloc(sizeof(struct perf_event_context
), GFP_KERNEL
);
1801 __perf_event_init_context(ctx
, task
);
1803 if (cmpxchg(&task
->perf_event_ctxp
, NULL
, ctx
)) {
1805 * We raced with some other task; use
1806 * the context they set.
1811 get_task_struct(task
);
1814 put_task_struct(task
);
1818 put_task_struct(task
);
1819 return ERR_PTR(err
);
1822 static void perf_event_free_filter(struct perf_event
*event
);
1824 static void free_event_rcu(struct rcu_head
*head
)
1826 struct perf_event
*event
;
1828 event
= container_of(head
, struct perf_event
, rcu_head
);
1830 put_pid_ns(event
->ns
);
1831 perf_event_free_filter(event
);
1835 static void perf_pending_sync(struct perf_event
*event
);
1837 static void free_event(struct perf_event
*event
)
1839 perf_pending_sync(event
);
1841 if (!event
->parent
) {
1842 atomic_dec(&nr_events
);
1843 if (event
->attr
.mmap
)
1844 atomic_dec(&nr_mmap_events
);
1845 if (event
->attr
.comm
)
1846 atomic_dec(&nr_comm_events
);
1847 if (event
->attr
.task
)
1848 atomic_dec(&nr_task_events
);
1851 if (event
->output
) {
1852 fput(event
->output
->filp
);
1853 event
->output
= NULL
;
1857 event
->destroy(event
);
1859 put_ctx(event
->ctx
);
1860 call_rcu(&event
->rcu_head
, free_event_rcu
);
1863 int perf_event_release_kernel(struct perf_event
*event
)
1865 struct perf_event_context
*ctx
= event
->ctx
;
1867 event
->state
= PERF_EVENT_STATE_FREE
;
1869 WARN_ON_ONCE(ctx
->parent_ctx
);
1870 mutex_lock(&ctx
->mutex
);
1871 perf_event_remove_from_context(event
);
1872 mutex_unlock(&ctx
->mutex
);
1874 mutex_lock(&event
->owner
->perf_event_mutex
);
1875 list_del_init(&event
->owner_entry
);
1876 mutex_unlock(&event
->owner
->perf_event_mutex
);
1877 put_task_struct(event
->owner
);
1883 EXPORT_SYMBOL_GPL(perf_event_release_kernel
);
1886 * Called when the last reference to the file is gone.
1888 static int perf_release(struct inode
*inode
, struct file
*file
)
1890 struct perf_event
*event
= file
->private_data
;
1892 file
->private_data
= NULL
;
1894 return perf_event_release_kernel(event
);
1897 static int perf_event_read_size(struct perf_event
*event
)
1899 int entry
= sizeof(u64
); /* value */
1903 if (event
->attr
.read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
)
1904 size
+= sizeof(u64
);
1906 if (event
->attr
.read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
)
1907 size
+= sizeof(u64
);
1909 if (event
->attr
.read_format
& PERF_FORMAT_ID
)
1910 entry
+= sizeof(u64
);
1912 if (event
->attr
.read_format
& PERF_FORMAT_GROUP
) {
1913 nr
+= event
->group_leader
->nr_siblings
;
1914 size
+= sizeof(u64
);
1922 u64
perf_event_read_value(struct perf_event
*event
, u64
*enabled
, u64
*running
)
1924 struct perf_event
*child
;
1930 mutex_lock(&event
->child_mutex
);
1931 total
+= perf_event_read(event
);
1932 *enabled
+= event
->total_time_enabled
+
1933 atomic64_read(&event
->child_total_time_enabled
);
1934 *running
+= event
->total_time_running
+
1935 atomic64_read(&event
->child_total_time_running
);
1937 list_for_each_entry(child
, &event
->child_list
, child_list
) {
1938 total
+= perf_event_read(child
);
1939 *enabled
+= child
->total_time_enabled
;
1940 *running
+= child
->total_time_running
;
1942 mutex_unlock(&event
->child_mutex
);
1946 EXPORT_SYMBOL_GPL(perf_event_read_value
);
1948 static int perf_event_read_group(struct perf_event
*event
,
1949 u64 read_format
, char __user
*buf
)
1951 struct perf_event
*leader
= event
->group_leader
, *sub
;
1952 int n
= 0, size
= 0, ret
= -EFAULT
;
1953 struct perf_event_context
*ctx
= leader
->ctx
;
1955 u64 count
, enabled
, running
;
1957 mutex_lock(&ctx
->mutex
);
1958 count
= perf_event_read_value(leader
, &enabled
, &running
);
1960 values
[n
++] = 1 + leader
->nr_siblings
;
1961 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
)
1962 values
[n
++] = enabled
;
1963 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
)
1964 values
[n
++] = running
;
1965 values
[n
++] = count
;
1966 if (read_format
& PERF_FORMAT_ID
)
1967 values
[n
++] = primary_event_id(leader
);
1969 size
= n
* sizeof(u64
);
1971 if (copy_to_user(buf
, values
, size
))
1976 list_for_each_entry(sub
, &leader
->sibling_list
, group_entry
) {
1979 values
[n
++] = perf_event_read_value(sub
, &enabled
, &running
);
1980 if (read_format
& PERF_FORMAT_ID
)
1981 values
[n
++] = primary_event_id(sub
);
1983 size
= n
* sizeof(u64
);
1985 if (copy_to_user(buf
+ ret
, values
, size
)) {
1993 mutex_unlock(&ctx
->mutex
);
1998 static int perf_event_read_one(struct perf_event
*event
,
1999 u64 read_format
, char __user
*buf
)
2001 u64 enabled
, running
;
2005 values
[n
++] = perf_event_read_value(event
, &enabled
, &running
);
2006 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
)
2007 values
[n
++] = enabled
;
2008 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
)
2009 values
[n
++] = running
;
2010 if (read_format
& PERF_FORMAT_ID
)
2011 values
[n
++] = primary_event_id(event
);
2013 if (copy_to_user(buf
, values
, n
* sizeof(u64
)))
2016 return n
* sizeof(u64
);
2020 * Read the performance event - simple non blocking version for now
2023 perf_read_hw(struct perf_event
*event
, char __user
*buf
, size_t count
)
2025 u64 read_format
= event
->attr
.read_format
;
2029 * Return end-of-file for a read on a event that is in
2030 * error state (i.e. because it was pinned but it couldn't be
2031 * scheduled on to the CPU at some point).
2033 if (event
->state
== PERF_EVENT_STATE_ERROR
)
2036 if (count
< perf_event_read_size(event
))
2039 WARN_ON_ONCE(event
->ctx
->parent_ctx
);
2040 if (read_format
& PERF_FORMAT_GROUP
)
2041 ret
= perf_event_read_group(event
, read_format
, buf
);
2043 ret
= perf_event_read_one(event
, read_format
, buf
);
2049 perf_read(struct file
*file
, char __user
*buf
, size_t count
, loff_t
*ppos
)
2051 struct perf_event
*event
= file
->private_data
;
2053 return perf_read_hw(event
, buf
, count
);
2056 static unsigned int perf_poll(struct file
*file
, poll_table
*wait
)
2058 struct perf_event
*event
= file
->private_data
;
2059 struct perf_mmap_data
*data
;
2060 unsigned int events
= POLL_HUP
;
2063 data
= rcu_dereference(event
->data
);
2065 events
= atomic_xchg(&data
->poll
, 0);
2068 poll_wait(file
, &event
->waitq
, wait
);
2073 static void perf_event_reset(struct perf_event
*event
)
2075 (void)perf_event_read(event
);
2076 atomic64_set(&event
->count
, 0);
2077 perf_event_update_userpage(event
);
2081 * Holding the top-level event's child_mutex means that any
2082 * descendant process that has inherited this event will block
2083 * in sync_child_event if it goes to exit, thus satisfying the
2084 * task existence requirements of perf_event_enable/disable.
2086 static void perf_event_for_each_child(struct perf_event
*event
,
2087 void (*func
)(struct perf_event
*))
2089 struct perf_event
*child
;
2091 WARN_ON_ONCE(event
->ctx
->parent_ctx
);
2092 mutex_lock(&event
->child_mutex
);
2094 list_for_each_entry(child
, &event
->child_list
, child_list
)
2096 mutex_unlock(&event
->child_mutex
);
2099 static void perf_event_for_each(struct perf_event
*event
,
2100 void (*func
)(struct perf_event
*))
2102 struct perf_event_context
*ctx
= event
->ctx
;
2103 struct perf_event
*sibling
;
2105 WARN_ON_ONCE(ctx
->parent_ctx
);
2106 mutex_lock(&ctx
->mutex
);
2107 event
= event
->group_leader
;
2109 perf_event_for_each_child(event
, func
);
2111 list_for_each_entry(sibling
, &event
->sibling_list
, group_entry
)
2112 perf_event_for_each_child(event
, func
);
2113 mutex_unlock(&ctx
->mutex
);
2116 static int perf_event_period(struct perf_event
*event
, u64 __user
*arg
)
2118 struct perf_event_context
*ctx
= event
->ctx
;
2123 if (!event
->attr
.sample_period
)
2126 size
= copy_from_user(&value
, arg
, sizeof(value
));
2127 if (size
!= sizeof(value
))
2133 raw_spin_lock_irq(&ctx
->lock
);
2134 if (event
->attr
.freq
) {
2135 if (value
> sysctl_perf_event_sample_rate
) {
2140 event
->attr
.sample_freq
= value
;
2142 event
->attr
.sample_period
= value
;
2143 event
->hw
.sample_period
= value
;
2146 raw_spin_unlock_irq(&ctx
->lock
);
2151 static int perf_event_set_output(struct perf_event
*event
, int output_fd
);
2152 static int perf_event_set_filter(struct perf_event
*event
, void __user
*arg
);
2154 static long perf_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
2156 struct perf_event
*event
= file
->private_data
;
2157 void (*func
)(struct perf_event
*);
2161 case PERF_EVENT_IOC_ENABLE
:
2162 func
= perf_event_enable
;
2164 case PERF_EVENT_IOC_DISABLE
:
2165 func
= perf_event_disable
;
2167 case PERF_EVENT_IOC_RESET
:
2168 func
= perf_event_reset
;
2171 case PERF_EVENT_IOC_REFRESH
:
2172 return perf_event_refresh(event
, arg
);
2174 case PERF_EVENT_IOC_PERIOD
:
2175 return perf_event_period(event
, (u64 __user
*)arg
);
2177 case PERF_EVENT_IOC_SET_OUTPUT
:
2178 return perf_event_set_output(event
, arg
);
2180 case PERF_EVENT_IOC_SET_FILTER
:
2181 return perf_event_set_filter(event
, (void __user
*)arg
);
2187 if (flags
& PERF_IOC_FLAG_GROUP
)
2188 perf_event_for_each(event
, func
);
2190 perf_event_for_each_child(event
, func
);
2195 int perf_event_task_enable(void)
2197 struct perf_event
*event
;
2199 mutex_lock(¤t
->perf_event_mutex
);
2200 list_for_each_entry(event
, ¤t
->perf_event_list
, owner_entry
)
2201 perf_event_for_each_child(event
, perf_event_enable
);
2202 mutex_unlock(¤t
->perf_event_mutex
);
2207 int perf_event_task_disable(void)
2209 struct perf_event
*event
;
2211 mutex_lock(¤t
->perf_event_mutex
);
2212 list_for_each_entry(event
, ¤t
->perf_event_list
, owner_entry
)
2213 perf_event_for_each_child(event
, perf_event_disable
);
2214 mutex_unlock(¤t
->perf_event_mutex
);
2219 #ifndef PERF_EVENT_INDEX_OFFSET
2220 # define PERF_EVENT_INDEX_OFFSET 0
2223 static int perf_event_index(struct perf_event
*event
)
2225 if (event
->state
!= PERF_EVENT_STATE_ACTIVE
)
2228 return event
->hw
.idx
+ 1 - PERF_EVENT_INDEX_OFFSET
;
2232 * Callers need to ensure there can be no nesting of this function, otherwise
2233 * the seqlock logic goes bad. We can not serialize this because the arch
2234 * code calls this from NMI context.
2236 void perf_event_update_userpage(struct perf_event
*event
)
2238 struct perf_event_mmap_page
*userpg
;
2239 struct perf_mmap_data
*data
;
2242 data
= rcu_dereference(event
->data
);
2246 userpg
= data
->user_page
;
2249 * Disable preemption so as to not let the corresponding user-space
2250 * spin too long if we get preempted.
2255 userpg
->index
= perf_event_index(event
);
2256 userpg
->offset
= atomic64_read(&event
->count
);
2257 if (event
->state
== PERF_EVENT_STATE_ACTIVE
)
2258 userpg
->offset
-= atomic64_read(&event
->hw
.prev_count
);
2260 userpg
->time_enabled
= event
->total_time_enabled
+
2261 atomic64_read(&event
->child_total_time_enabled
);
2263 userpg
->time_running
= event
->total_time_running
+
2264 atomic64_read(&event
->child_total_time_running
);
2273 static unsigned long perf_data_size(struct perf_mmap_data
*data
)
2275 return data
->nr_pages
<< (PAGE_SHIFT
+ data
->data_order
);
2278 #ifndef CONFIG_PERF_USE_VMALLOC
2281 * Back perf_mmap() with regular GFP_KERNEL-0 pages.
2284 static struct page
*
2285 perf_mmap_to_page(struct perf_mmap_data
*data
, unsigned long pgoff
)
2287 if (pgoff
> data
->nr_pages
)
2291 return virt_to_page(data
->user_page
);
2293 return virt_to_page(data
->data_pages
[pgoff
- 1]);
2296 static struct perf_mmap_data
*
2297 perf_mmap_data_alloc(struct perf_event
*event
, int nr_pages
)
2299 struct perf_mmap_data
*data
;
2303 WARN_ON(atomic_read(&event
->mmap_count
));
2305 size
= sizeof(struct perf_mmap_data
);
2306 size
+= nr_pages
* sizeof(void *);
2308 data
= kzalloc(size
, GFP_KERNEL
);
2312 data
->user_page
= (void *)get_zeroed_page(GFP_KERNEL
);
2313 if (!data
->user_page
)
2314 goto fail_user_page
;
2316 for (i
= 0; i
< nr_pages
; i
++) {
2317 data
->data_pages
[i
] = (void *)get_zeroed_page(GFP_KERNEL
);
2318 if (!data
->data_pages
[i
])
2319 goto fail_data_pages
;
2322 data
->data_order
= 0;
2323 data
->nr_pages
= nr_pages
;
2328 for (i
--; i
>= 0; i
--)
2329 free_page((unsigned long)data
->data_pages
[i
]);
2331 free_page((unsigned long)data
->user_page
);
2340 static void perf_mmap_free_page(unsigned long addr
)
2342 struct page
*page
= virt_to_page((void *)addr
);
2344 page
->mapping
= NULL
;
2348 static void perf_mmap_data_free(struct perf_mmap_data
*data
)
2352 perf_mmap_free_page((unsigned long)data
->user_page
);
2353 for (i
= 0; i
< data
->nr_pages
; i
++)
2354 perf_mmap_free_page((unsigned long)data
->data_pages
[i
]);
2361 * Back perf_mmap() with vmalloc memory.
2363 * Required for architectures that have d-cache aliasing issues.
2366 static struct page
*
2367 perf_mmap_to_page(struct perf_mmap_data
*data
, unsigned long pgoff
)
2369 if (pgoff
> (1UL << data
->data_order
))
2372 return vmalloc_to_page((void *)data
->user_page
+ pgoff
* PAGE_SIZE
);
2375 static void perf_mmap_unmark_page(void *addr
)
2377 struct page
*page
= vmalloc_to_page(addr
);
2379 page
->mapping
= NULL
;
2382 static void perf_mmap_data_free_work(struct work_struct
*work
)
2384 struct perf_mmap_data
*data
;
2388 data
= container_of(work
, struct perf_mmap_data
, work
);
2389 nr
= 1 << data
->data_order
;
2391 base
= data
->user_page
;
2392 for (i
= 0; i
< nr
+ 1; i
++)
2393 perf_mmap_unmark_page(base
+ (i
* PAGE_SIZE
));
2399 static void perf_mmap_data_free(struct perf_mmap_data
*data
)
2401 schedule_work(&data
->work
);
2404 static struct perf_mmap_data
*
2405 perf_mmap_data_alloc(struct perf_event
*event
, int nr_pages
)
2407 struct perf_mmap_data
*data
;
2411 WARN_ON(atomic_read(&event
->mmap_count
));
2413 size
= sizeof(struct perf_mmap_data
);
2414 size
+= sizeof(void *);
2416 data
= kzalloc(size
, GFP_KERNEL
);
2420 INIT_WORK(&data
->work
, perf_mmap_data_free_work
);
2422 all_buf
= vmalloc_user((nr_pages
+ 1) * PAGE_SIZE
);
2426 data
->user_page
= all_buf
;
2427 data
->data_pages
[0] = all_buf
+ PAGE_SIZE
;
2428 data
->data_order
= ilog2(nr_pages
);
2442 static int perf_mmap_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
2444 struct perf_event
*event
= vma
->vm_file
->private_data
;
2445 struct perf_mmap_data
*data
;
2446 int ret
= VM_FAULT_SIGBUS
;
2448 if (vmf
->flags
& FAULT_FLAG_MKWRITE
) {
2449 if (vmf
->pgoff
== 0)
2455 data
= rcu_dereference(event
->data
);
2459 if (vmf
->pgoff
&& (vmf
->flags
& FAULT_FLAG_WRITE
))
2462 vmf
->page
= perf_mmap_to_page(data
, vmf
->pgoff
);
2466 get_page(vmf
->page
);
2467 vmf
->page
->mapping
= vma
->vm_file
->f_mapping
;
2468 vmf
->page
->index
= vmf
->pgoff
;
2478 perf_mmap_data_init(struct perf_event
*event
, struct perf_mmap_data
*data
)
2480 long max_size
= perf_data_size(data
);
2482 atomic_set(&data
->lock
, -1);
2484 if (event
->attr
.watermark
) {
2485 data
->watermark
= min_t(long, max_size
,
2486 event
->attr
.wakeup_watermark
);
2489 if (!data
->watermark
)
2490 data
->watermark
= max_size
/ 2;
2493 rcu_assign_pointer(event
->data
, data
);
2496 static void perf_mmap_data_free_rcu(struct rcu_head
*rcu_head
)
2498 struct perf_mmap_data
*data
;
2500 data
= container_of(rcu_head
, struct perf_mmap_data
, rcu_head
);
2501 perf_mmap_data_free(data
);
2504 static void perf_mmap_data_release(struct perf_event
*event
)
2506 struct perf_mmap_data
*data
= event
->data
;
2508 WARN_ON(atomic_read(&event
->mmap_count
));
2510 rcu_assign_pointer(event
->data
, NULL
);
2511 call_rcu(&data
->rcu_head
, perf_mmap_data_free_rcu
);
2514 static void perf_mmap_open(struct vm_area_struct
*vma
)
2516 struct perf_event
*event
= vma
->vm_file
->private_data
;
2518 atomic_inc(&event
->mmap_count
);
2521 static void perf_mmap_close(struct vm_area_struct
*vma
)
2523 struct perf_event
*event
= vma
->vm_file
->private_data
;
2525 WARN_ON_ONCE(event
->ctx
->parent_ctx
);
2526 if (atomic_dec_and_mutex_lock(&event
->mmap_count
, &event
->mmap_mutex
)) {
2527 unsigned long size
= perf_data_size(event
->data
);
2528 struct user_struct
*user
= current_user();
2530 atomic_long_sub((size
>> PAGE_SHIFT
) + 1, &user
->locked_vm
);
2531 vma
->vm_mm
->locked_vm
-= event
->data
->nr_locked
;
2532 perf_mmap_data_release(event
);
2533 mutex_unlock(&event
->mmap_mutex
);
2537 static const struct vm_operations_struct perf_mmap_vmops
= {
2538 .open
= perf_mmap_open
,
2539 .close
= perf_mmap_close
,
2540 .fault
= perf_mmap_fault
,
2541 .page_mkwrite
= perf_mmap_fault
,
2544 static int perf_mmap(struct file
*file
, struct vm_area_struct
*vma
)
2546 struct perf_event
*event
= file
->private_data
;
2547 unsigned long user_locked
, user_lock_limit
;
2548 struct user_struct
*user
= current_user();
2549 unsigned long locked
, lock_limit
;
2550 struct perf_mmap_data
*data
;
2551 unsigned long vma_size
;
2552 unsigned long nr_pages
;
2553 long user_extra
, extra
;
2556 if (!(vma
->vm_flags
& VM_SHARED
))
2559 vma_size
= vma
->vm_end
- vma
->vm_start
;
2560 nr_pages
= (vma_size
/ PAGE_SIZE
) - 1;
2563 * If we have data pages ensure they're a power-of-two number, so we
2564 * can do bitmasks instead of modulo.
2566 if (nr_pages
!= 0 && !is_power_of_2(nr_pages
))
2569 if (vma_size
!= PAGE_SIZE
* (1 + nr_pages
))
2572 if (vma
->vm_pgoff
!= 0)
2575 WARN_ON_ONCE(event
->ctx
->parent_ctx
);
2576 mutex_lock(&event
->mmap_mutex
);
2577 if (event
->output
) {
2582 if (atomic_inc_not_zero(&event
->mmap_count
)) {
2583 if (nr_pages
!= event
->data
->nr_pages
)
2588 user_extra
= nr_pages
+ 1;
2589 user_lock_limit
= sysctl_perf_event_mlock
>> (PAGE_SHIFT
- 10);
2592 * Increase the limit linearly with more CPUs:
2594 user_lock_limit
*= num_online_cpus();
2596 user_locked
= atomic_long_read(&user
->locked_vm
) + user_extra
;
2599 if (user_locked
> user_lock_limit
)
2600 extra
= user_locked
- user_lock_limit
;
2602 lock_limit
= rlimit(RLIMIT_MEMLOCK
);
2603 lock_limit
>>= PAGE_SHIFT
;
2604 locked
= vma
->vm_mm
->locked_vm
+ extra
;
2606 if ((locked
> lock_limit
) && perf_paranoid_tracepoint_raw() &&
2607 !capable(CAP_IPC_LOCK
)) {
2612 WARN_ON(event
->data
);
2614 data
= perf_mmap_data_alloc(event
, nr_pages
);
2620 perf_mmap_data_init(event
, data
);
2622 atomic_set(&event
->mmap_count
, 1);
2623 atomic_long_add(user_extra
, &user
->locked_vm
);
2624 vma
->vm_mm
->locked_vm
+= extra
;
2625 event
->data
->nr_locked
= extra
;
2626 if (vma
->vm_flags
& VM_WRITE
)
2627 event
->data
->writable
= 1;
2630 mutex_unlock(&event
->mmap_mutex
);
2632 vma
->vm_flags
|= VM_RESERVED
;
2633 vma
->vm_ops
= &perf_mmap_vmops
;
2638 static int perf_fasync(int fd
, struct file
*filp
, int on
)
2640 struct inode
*inode
= filp
->f_path
.dentry
->d_inode
;
2641 struct perf_event
*event
= filp
->private_data
;
2644 mutex_lock(&inode
->i_mutex
);
2645 retval
= fasync_helper(fd
, filp
, on
, &event
->fasync
);
2646 mutex_unlock(&inode
->i_mutex
);
2654 static const struct file_operations perf_fops
= {
2655 .llseek
= no_llseek
,
2656 .release
= perf_release
,
2659 .unlocked_ioctl
= perf_ioctl
,
2660 .compat_ioctl
= perf_ioctl
,
2662 .fasync
= perf_fasync
,
2668 * If there's data, ensure we set the poll() state and publish everything
2669 * to user-space before waking everybody up.
2672 void perf_event_wakeup(struct perf_event
*event
)
2674 wake_up_all(&event
->waitq
);
2676 if (event
->pending_kill
) {
2677 kill_fasync(&event
->fasync
, SIGIO
, event
->pending_kill
);
2678 event
->pending_kill
= 0;
2685 * Handle the case where we need to wakeup up from NMI (or rq->lock) context.
2687 * The NMI bit means we cannot possibly take locks. Therefore, maintain a
2688 * single linked list and use cmpxchg() to add entries lockless.
2691 static void perf_pending_event(struct perf_pending_entry
*entry
)
2693 struct perf_event
*event
= container_of(entry
,
2694 struct perf_event
, pending
);
2696 if (event
->pending_disable
) {
2697 event
->pending_disable
= 0;
2698 __perf_event_disable(event
);
2701 if (event
->pending_wakeup
) {
2702 event
->pending_wakeup
= 0;
2703 perf_event_wakeup(event
);
2707 #define PENDING_TAIL ((struct perf_pending_entry *)-1UL)
2709 static DEFINE_PER_CPU(struct perf_pending_entry
*, perf_pending_head
) = {
2713 static void perf_pending_queue(struct perf_pending_entry
*entry
,
2714 void (*func
)(struct perf_pending_entry
*))
2716 struct perf_pending_entry
**head
;
2718 if (cmpxchg(&entry
->next
, NULL
, PENDING_TAIL
) != NULL
)
2723 head
= &get_cpu_var(perf_pending_head
);
2726 entry
->next
= *head
;
2727 } while (cmpxchg(head
, entry
->next
, entry
) != entry
->next
);
2729 set_perf_event_pending();
2731 put_cpu_var(perf_pending_head
);
2734 static int __perf_pending_run(void)
2736 struct perf_pending_entry
*list
;
2739 list
= xchg(&__get_cpu_var(perf_pending_head
), PENDING_TAIL
);
2740 while (list
!= PENDING_TAIL
) {
2741 void (*func
)(struct perf_pending_entry
*);
2742 struct perf_pending_entry
*entry
= list
;
2749 * Ensure we observe the unqueue before we issue the wakeup,
2750 * so that we won't be waiting forever.
2751 * -- see perf_not_pending().
2762 static inline int perf_not_pending(struct perf_event
*event
)
2765 * If we flush on whatever cpu we run, there is a chance we don't
2769 __perf_pending_run();
2773 * Ensure we see the proper queue state before going to sleep
2774 * so that we do not miss the wakeup. -- see perf_pending_handle()
2777 return event
->pending
.next
== NULL
;
2780 static void perf_pending_sync(struct perf_event
*event
)
2782 wait_event(event
->waitq
, perf_not_pending(event
));
2785 void perf_event_do_pending(void)
2787 __perf_pending_run();
2791 * Callchain support -- arch specific
2794 __weak
struct perf_callchain_entry
*perf_callchain(struct pt_regs
*regs
)
2800 void perf_arch_fetch_caller_regs(struct pt_regs
*regs
, unsigned long ip
, int skip
)
2806 * We assume there is only KVM supporting the callbacks.
2807 * Later on, we might change it to a list if there is
2808 * another virtualization implementation supporting the callbacks.
2810 struct perf_guest_info_callbacks
*perf_guest_cbs
;
2812 int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks
*cbs
)
2814 perf_guest_cbs
= cbs
;
2817 EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks
);
2819 int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks
*cbs
)
2821 perf_guest_cbs
= NULL
;
2824 EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks
);
2829 static bool perf_output_space(struct perf_mmap_data
*data
, unsigned long tail
,
2830 unsigned long offset
, unsigned long head
)
2834 if (!data
->writable
)
2837 mask
= perf_data_size(data
) - 1;
2839 offset
= (offset
- tail
) & mask
;
2840 head
= (head
- tail
) & mask
;
2842 if ((int)(head
- offset
) < 0)
2848 static void perf_output_wakeup(struct perf_output_handle
*handle
)
2850 atomic_set(&handle
->data
->poll
, POLL_IN
);
2853 handle
->event
->pending_wakeup
= 1;
2854 perf_pending_queue(&handle
->event
->pending
,
2855 perf_pending_event
);
2857 perf_event_wakeup(handle
->event
);
2861 * Curious locking construct.
2863 * We need to ensure a later event_id doesn't publish a head when a former
2864 * event_id isn't done writing. However since we need to deal with NMIs we
2865 * cannot fully serialize things.
2867 * What we do is serialize between CPUs so we only have to deal with NMI
2868 * nesting on a single CPU.
2870 * We only publish the head (and generate a wakeup) when the outer-most
2871 * event_id completes.
2873 static void perf_output_lock(struct perf_output_handle
*handle
)
2875 struct perf_mmap_data
*data
= handle
->data
;
2876 int cur
, cpu
= get_cpu();
2881 cur
= atomic_cmpxchg(&data
->lock
, -1, cpu
);
2893 static void perf_output_unlock(struct perf_output_handle
*handle
)
2895 struct perf_mmap_data
*data
= handle
->data
;
2899 data
->done_head
= data
->head
;
2901 if (!handle
->locked
)
2906 * The xchg implies a full barrier that ensures all writes are done
2907 * before we publish the new head, matched by a rmb() in userspace when
2908 * reading this position.
2910 while ((head
= atomic_long_xchg(&data
->done_head
, 0)))
2911 data
->user_page
->data_head
= head
;
2914 * NMI can happen here, which means we can miss a done_head update.
2917 cpu
= atomic_xchg(&data
->lock
, -1);
2918 WARN_ON_ONCE(cpu
!= smp_processor_id());
2921 * Therefore we have to validate we did not indeed do so.
2923 if (unlikely(atomic_long_read(&data
->done_head
))) {
2925 * Since we had it locked, we can lock it again.
2927 while (atomic_cmpxchg(&data
->lock
, -1, cpu
) != -1)
2933 if (atomic_xchg(&data
->wakeup
, 0))
2934 perf_output_wakeup(handle
);
2939 void perf_output_copy(struct perf_output_handle
*handle
,
2940 const void *buf
, unsigned int len
)
2942 unsigned int pages_mask
;
2943 unsigned long offset
;
2947 offset
= handle
->offset
;
2948 pages_mask
= handle
->data
->nr_pages
- 1;
2949 pages
= handle
->data
->data_pages
;
2952 unsigned long page_offset
;
2953 unsigned long page_size
;
2956 nr
= (offset
>> PAGE_SHIFT
) & pages_mask
;
2957 page_size
= 1UL << (handle
->data
->data_order
+ PAGE_SHIFT
);
2958 page_offset
= offset
& (page_size
- 1);
2959 size
= min_t(unsigned int, page_size
- page_offset
, len
);
2961 memcpy(pages
[nr
] + page_offset
, buf
, size
);
2968 handle
->offset
= offset
;
2971 * Check we didn't copy past our reservation window, taking the
2972 * possible unsigned int wrap into account.
2974 WARN_ON_ONCE(((long)(handle
->head
- handle
->offset
)) < 0);
2977 int perf_output_begin(struct perf_output_handle
*handle
,
2978 struct perf_event
*event
, unsigned int size
,
2979 int nmi
, int sample
)
2981 struct perf_event
*output_event
;
2982 struct perf_mmap_data
*data
;
2983 unsigned long tail
, offset
, head
;
2986 struct perf_event_header header
;
2993 * For inherited events we send all the output towards the parent.
2996 event
= event
->parent
;
2998 output_event
= rcu_dereference(event
->output
);
3000 event
= output_event
;
3002 data
= rcu_dereference(event
->data
);
3006 handle
->data
= data
;
3007 handle
->event
= event
;
3009 handle
->sample
= sample
;
3011 if (!data
->nr_pages
)
3014 have_lost
= atomic_read(&data
->lost
);
3016 size
+= sizeof(lost_event
);
3018 perf_output_lock(handle
);
3022 * Userspace could choose to issue a mb() before updating the
3023 * tail pointer. So that all reads will be completed before the
3026 tail
= ACCESS_ONCE(data
->user_page
->data_tail
);
3028 offset
= head
= atomic_long_read(&data
->head
);
3030 if (unlikely(!perf_output_space(data
, tail
, offset
, head
)))
3032 } while (atomic_long_cmpxchg(&data
->head
, offset
, head
) != offset
);
3034 handle
->offset
= offset
;
3035 handle
->head
= head
;
3037 if (head
- tail
> data
->watermark
)
3038 atomic_set(&data
->wakeup
, 1);
3041 lost_event
.header
.type
= PERF_RECORD_LOST
;
3042 lost_event
.header
.misc
= 0;
3043 lost_event
.header
.size
= sizeof(lost_event
);
3044 lost_event
.id
= event
->id
;
3045 lost_event
.lost
= atomic_xchg(&data
->lost
, 0);
3047 perf_output_put(handle
, lost_event
);
3053 atomic_inc(&data
->lost
);
3054 perf_output_unlock(handle
);
3061 void perf_output_end(struct perf_output_handle
*handle
)
3063 struct perf_event
*event
= handle
->event
;
3064 struct perf_mmap_data
*data
= handle
->data
;
3066 int wakeup_events
= event
->attr
.wakeup_events
;
3068 if (handle
->sample
&& wakeup_events
) {
3069 int events
= atomic_inc_return(&data
->events
);
3070 if (events
>= wakeup_events
) {
3071 atomic_sub(wakeup_events
, &data
->events
);
3072 atomic_set(&data
->wakeup
, 1);
3076 perf_output_unlock(handle
);
3080 static u32
perf_event_pid(struct perf_event
*event
, struct task_struct
*p
)
3083 * only top level events have the pid namespace they were created in
3086 event
= event
->parent
;
3088 return task_tgid_nr_ns(p
, event
->ns
);
3091 static u32
perf_event_tid(struct perf_event
*event
, struct task_struct
*p
)
3094 * only top level events have the pid namespace they were created in
3097 event
= event
->parent
;
3099 return task_pid_nr_ns(p
, event
->ns
);
3102 static void perf_output_read_one(struct perf_output_handle
*handle
,
3103 struct perf_event
*event
)
3105 u64 read_format
= event
->attr
.read_format
;
3109 values
[n
++] = atomic64_read(&event
->count
);
3110 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
) {
3111 values
[n
++] = event
->total_time_enabled
+
3112 atomic64_read(&event
->child_total_time_enabled
);
3114 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
) {
3115 values
[n
++] = event
->total_time_running
+
3116 atomic64_read(&event
->child_total_time_running
);
3118 if (read_format
& PERF_FORMAT_ID
)
3119 values
[n
++] = primary_event_id(event
);
3121 perf_output_copy(handle
, values
, n
* sizeof(u64
));
3125 * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
3127 static void perf_output_read_group(struct perf_output_handle
*handle
,
3128 struct perf_event
*event
)
3130 struct perf_event
*leader
= event
->group_leader
, *sub
;
3131 u64 read_format
= event
->attr
.read_format
;
3135 values
[n
++] = 1 + leader
->nr_siblings
;
3137 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
)
3138 values
[n
++] = leader
->total_time_enabled
;
3140 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
)
3141 values
[n
++] = leader
->total_time_running
;
3143 if (leader
!= event
)
3144 leader
->pmu
->read(leader
);
3146 values
[n
++] = atomic64_read(&leader
->count
);
3147 if (read_format
& PERF_FORMAT_ID
)
3148 values
[n
++] = primary_event_id(leader
);
3150 perf_output_copy(handle
, values
, n
* sizeof(u64
));
3152 list_for_each_entry(sub
, &leader
->sibling_list
, group_entry
) {
3156 sub
->pmu
->read(sub
);
3158 values
[n
++] = atomic64_read(&sub
->count
);
3159 if (read_format
& PERF_FORMAT_ID
)
3160 values
[n
++] = primary_event_id(sub
);
3162 perf_output_copy(handle
, values
, n
* sizeof(u64
));
3166 static void perf_output_read(struct perf_output_handle
*handle
,
3167 struct perf_event
*event
)
3169 if (event
->attr
.read_format
& PERF_FORMAT_GROUP
)
3170 perf_output_read_group(handle
, event
);
3172 perf_output_read_one(handle
, event
);
3175 void perf_output_sample(struct perf_output_handle
*handle
,
3176 struct perf_event_header
*header
,
3177 struct perf_sample_data
*data
,
3178 struct perf_event
*event
)
3180 u64 sample_type
= data
->type
;
3182 perf_output_put(handle
, *header
);
3184 if (sample_type
& PERF_SAMPLE_IP
)
3185 perf_output_put(handle
, data
->ip
);
3187 if (sample_type
& PERF_SAMPLE_TID
)
3188 perf_output_put(handle
, data
->tid_entry
);
3190 if (sample_type
& PERF_SAMPLE_TIME
)
3191 perf_output_put(handle
, data
->time
);
3193 if (sample_type
& PERF_SAMPLE_ADDR
)
3194 perf_output_put(handle
, data
->addr
);
3196 if (sample_type
& PERF_SAMPLE_ID
)
3197 perf_output_put(handle
, data
->id
);
3199 if (sample_type
& PERF_SAMPLE_STREAM_ID
)
3200 perf_output_put(handle
, data
->stream_id
);
3202 if (sample_type
& PERF_SAMPLE_CPU
)
3203 perf_output_put(handle
, data
->cpu_entry
);
3205 if (sample_type
& PERF_SAMPLE_PERIOD
)
3206 perf_output_put(handle
, data
->period
);
3208 if (sample_type
& PERF_SAMPLE_READ
)
3209 perf_output_read(handle
, event
);
3211 if (sample_type
& PERF_SAMPLE_CALLCHAIN
) {
3212 if (data
->callchain
) {
3215 if (data
->callchain
)
3216 size
+= data
->callchain
->nr
;
3218 size
*= sizeof(u64
);
3220 perf_output_copy(handle
, data
->callchain
, size
);
3223 perf_output_put(handle
, nr
);
3227 if (sample_type
& PERF_SAMPLE_RAW
) {
3229 perf_output_put(handle
, data
->raw
->size
);
3230 perf_output_copy(handle
, data
->raw
->data
,
3237 .size
= sizeof(u32
),
3240 perf_output_put(handle
, raw
);
3245 void perf_prepare_sample(struct perf_event_header
*header
,
3246 struct perf_sample_data
*data
,
3247 struct perf_event
*event
,
3248 struct pt_regs
*regs
)
3250 u64 sample_type
= event
->attr
.sample_type
;
3252 data
->type
= sample_type
;
3254 header
->type
= PERF_RECORD_SAMPLE
;
3255 header
->size
= sizeof(*header
);
3258 header
->misc
|= perf_misc_flags(regs
);
3260 if (sample_type
& PERF_SAMPLE_IP
) {
3261 data
->ip
= perf_instruction_pointer(regs
);
3263 header
->size
+= sizeof(data
->ip
);
3266 if (sample_type
& PERF_SAMPLE_TID
) {
3267 /* namespace issues */
3268 data
->tid_entry
.pid
= perf_event_pid(event
, current
);
3269 data
->tid_entry
.tid
= perf_event_tid(event
, current
);
3271 header
->size
+= sizeof(data
->tid_entry
);
3274 if (sample_type
& PERF_SAMPLE_TIME
) {
3275 data
->time
= perf_clock();
3277 header
->size
+= sizeof(data
->time
);
3280 if (sample_type
& PERF_SAMPLE_ADDR
)
3281 header
->size
+= sizeof(data
->addr
);
3283 if (sample_type
& PERF_SAMPLE_ID
) {
3284 data
->id
= primary_event_id(event
);
3286 header
->size
+= sizeof(data
->id
);
3289 if (sample_type
& PERF_SAMPLE_STREAM_ID
) {
3290 data
->stream_id
= event
->id
;
3292 header
->size
+= sizeof(data
->stream_id
);
3295 if (sample_type
& PERF_SAMPLE_CPU
) {
3296 data
->cpu_entry
.cpu
= raw_smp_processor_id();
3297 data
->cpu_entry
.reserved
= 0;
3299 header
->size
+= sizeof(data
->cpu_entry
);
3302 if (sample_type
& PERF_SAMPLE_PERIOD
)
3303 header
->size
+= sizeof(data
->period
);
3305 if (sample_type
& PERF_SAMPLE_READ
)
3306 header
->size
+= perf_event_read_size(event
);
3308 if (sample_type
& PERF_SAMPLE_CALLCHAIN
) {
3311 data
->callchain
= perf_callchain(regs
);
3313 if (data
->callchain
)
3314 size
+= data
->callchain
->nr
;
3316 header
->size
+= size
* sizeof(u64
);
3319 if (sample_type
& PERF_SAMPLE_RAW
) {
3320 int size
= sizeof(u32
);
3323 size
+= data
->raw
->size
;
3325 size
+= sizeof(u32
);
3327 WARN_ON_ONCE(size
& (sizeof(u64
)-1));
3328 header
->size
+= size
;
3332 static void perf_event_output(struct perf_event
*event
, int nmi
,
3333 struct perf_sample_data
*data
,
3334 struct pt_regs
*regs
)
3336 struct perf_output_handle handle
;
3337 struct perf_event_header header
;
3339 perf_prepare_sample(&header
, data
, event
, regs
);
3341 if (perf_output_begin(&handle
, event
, header
.size
, nmi
, 1))
3344 perf_output_sample(&handle
, &header
, data
, event
);
3346 perf_output_end(&handle
);
3353 struct perf_read_event
{
3354 struct perf_event_header header
;
3361 perf_event_read_event(struct perf_event
*event
,
3362 struct task_struct
*task
)
3364 struct perf_output_handle handle
;
3365 struct perf_read_event read_event
= {
3367 .type
= PERF_RECORD_READ
,
3369 .size
= sizeof(read_event
) + perf_event_read_size(event
),
3371 .pid
= perf_event_pid(event
, task
),
3372 .tid
= perf_event_tid(event
, task
),
3376 ret
= perf_output_begin(&handle
, event
, read_event
.header
.size
, 0, 0);
3380 perf_output_put(&handle
, read_event
);
3381 perf_output_read(&handle
, event
);
3383 perf_output_end(&handle
);
3387 * task tracking -- fork/exit
3389 * enabled by: attr.comm | attr.mmap | attr.task
3392 struct perf_task_event
{
3393 struct task_struct
*task
;
3394 struct perf_event_context
*task_ctx
;
3397 struct perf_event_header header
;
3407 static void perf_event_task_output(struct perf_event
*event
,
3408 struct perf_task_event
*task_event
)
3410 struct perf_output_handle handle
;
3411 struct task_struct
*task
= task_event
->task
;
3412 unsigned long flags
;
3416 * If this CPU attempts to acquire an rq lock held by a CPU spinning
3417 * in perf_output_lock() from interrupt context, it's game over.
3419 local_irq_save(flags
);
3421 size
= task_event
->event_id
.header
.size
;
3422 ret
= perf_output_begin(&handle
, event
, size
, 0, 0);
3425 local_irq_restore(flags
);
3429 task_event
->event_id
.pid
= perf_event_pid(event
, task
);
3430 task_event
->event_id
.ppid
= perf_event_pid(event
, current
);
3432 task_event
->event_id
.tid
= perf_event_tid(event
, task
);
3433 task_event
->event_id
.ptid
= perf_event_tid(event
, current
);
3435 perf_output_put(&handle
, task_event
->event_id
);
3437 perf_output_end(&handle
);
3438 local_irq_restore(flags
);
3441 static int perf_event_task_match(struct perf_event
*event
)
3443 if (event
->state
< PERF_EVENT_STATE_INACTIVE
)
3446 if (event
->cpu
!= -1 && event
->cpu
!= smp_processor_id())
3449 if (event
->attr
.comm
|| event
->attr
.mmap
|| event
->attr
.task
)
3455 static void perf_event_task_ctx(struct perf_event_context
*ctx
,
3456 struct perf_task_event
*task_event
)
3458 struct perf_event
*event
;
3460 list_for_each_entry_rcu(event
, &ctx
->event_list
, event_entry
) {
3461 if (perf_event_task_match(event
))
3462 perf_event_task_output(event
, task_event
);
3466 static void perf_event_task_event(struct perf_task_event
*task_event
)
3468 struct perf_cpu_context
*cpuctx
;
3469 struct perf_event_context
*ctx
= task_event
->task_ctx
;
3472 cpuctx
= &get_cpu_var(perf_cpu_context
);
3473 perf_event_task_ctx(&cpuctx
->ctx
, task_event
);
3475 ctx
= rcu_dereference(current
->perf_event_ctxp
);
3477 perf_event_task_ctx(ctx
, task_event
);
3478 put_cpu_var(perf_cpu_context
);
3482 static void perf_event_task(struct task_struct
*task
,
3483 struct perf_event_context
*task_ctx
,
3486 struct perf_task_event task_event
;
3488 if (!atomic_read(&nr_comm_events
) &&
3489 !atomic_read(&nr_mmap_events
) &&
3490 !atomic_read(&nr_task_events
))
3493 task_event
= (struct perf_task_event
){
3495 .task_ctx
= task_ctx
,
3498 .type
= new ? PERF_RECORD_FORK
: PERF_RECORD_EXIT
,
3500 .size
= sizeof(task_event
.event_id
),
3506 .time
= perf_clock(),
3510 perf_event_task_event(&task_event
);
3513 void perf_event_fork(struct task_struct
*task
)
3515 perf_event_task(task
, NULL
, 1);
3522 struct perf_comm_event
{
3523 struct task_struct
*task
;
3528 struct perf_event_header header
;
3535 static void perf_event_comm_output(struct perf_event
*event
,
3536 struct perf_comm_event
*comm_event
)
3538 struct perf_output_handle handle
;
3539 int size
= comm_event
->event_id
.header
.size
;
3540 int ret
= perf_output_begin(&handle
, event
, size
, 0, 0);
3545 comm_event
->event_id
.pid
= perf_event_pid(event
, comm_event
->task
);
3546 comm_event
->event_id
.tid
= perf_event_tid(event
, comm_event
->task
);
3548 perf_output_put(&handle
, comm_event
->event_id
);
3549 perf_output_copy(&handle
, comm_event
->comm
,
3550 comm_event
->comm_size
);
3551 perf_output_end(&handle
);
3554 static int perf_event_comm_match(struct perf_event
*event
)
3556 if (event
->state
< PERF_EVENT_STATE_INACTIVE
)
3559 if (event
->cpu
!= -1 && event
->cpu
!= smp_processor_id())
3562 if (event
->attr
.comm
)
3568 static void perf_event_comm_ctx(struct perf_event_context
*ctx
,
3569 struct perf_comm_event
*comm_event
)
3571 struct perf_event
*event
;
3573 list_for_each_entry_rcu(event
, &ctx
->event_list
, event_entry
) {
3574 if (perf_event_comm_match(event
))
3575 perf_event_comm_output(event
, comm_event
);
3579 static void perf_event_comm_event(struct perf_comm_event
*comm_event
)
3581 struct perf_cpu_context
*cpuctx
;
3582 struct perf_event_context
*ctx
;
3584 char comm
[TASK_COMM_LEN
];
3586 memset(comm
, 0, sizeof(comm
));
3587 strlcpy(comm
, comm_event
->task
->comm
, sizeof(comm
));
3588 size
= ALIGN(strlen(comm
)+1, sizeof(u64
));
3590 comm_event
->comm
= comm
;
3591 comm_event
->comm_size
= size
;
3593 comm_event
->event_id
.header
.size
= sizeof(comm_event
->event_id
) + size
;
3596 cpuctx
= &get_cpu_var(perf_cpu_context
);
3597 perf_event_comm_ctx(&cpuctx
->ctx
, comm_event
);
3598 ctx
= rcu_dereference(current
->perf_event_ctxp
);
3600 perf_event_comm_ctx(ctx
, comm_event
);
3601 put_cpu_var(perf_cpu_context
);
3605 void perf_event_comm(struct task_struct
*task
)
3607 struct perf_comm_event comm_event
;
3609 if (task
->perf_event_ctxp
)
3610 perf_event_enable_on_exec(task
);
3612 if (!atomic_read(&nr_comm_events
))
3615 comm_event
= (struct perf_comm_event
){
3621 .type
= PERF_RECORD_COMM
,
3630 perf_event_comm_event(&comm_event
);
3637 struct perf_mmap_event
{
3638 struct vm_area_struct
*vma
;
3640 const char *file_name
;
3644 struct perf_event_header header
;
3654 static void perf_event_mmap_output(struct perf_event
*event
,
3655 struct perf_mmap_event
*mmap_event
)
3657 struct perf_output_handle handle
;
3658 int size
= mmap_event
->event_id
.header
.size
;
3659 int ret
= perf_output_begin(&handle
, event
, size
, 0, 0);
3664 mmap_event
->event_id
.pid
= perf_event_pid(event
, current
);
3665 mmap_event
->event_id
.tid
= perf_event_tid(event
, current
);
3667 perf_output_put(&handle
, mmap_event
->event_id
);
3668 perf_output_copy(&handle
, mmap_event
->file_name
,
3669 mmap_event
->file_size
);
3670 perf_output_end(&handle
);
3673 static int perf_event_mmap_match(struct perf_event
*event
,
3674 struct perf_mmap_event
*mmap_event
)
3676 if (event
->state
< PERF_EVENT_STATE_INACTIVE
)
3679 if (event
->cpu
!= -1 && event
->cpu
!= smp_processor_id())
3682 if (event
->attr
.mmap
)
3688 static void perf_event_mmap_ctx(struct perf_event_context
*ctx
,
3689 struct perf_mmap_event
*mmap_event
)
3691 struct perf_event
*event
;
3693 list_for_each_entry_rcu(event
, &ctx
->event_list
, event_entry
) {
3694 if (perf_event_mmap_match(event
, mmap_event
))
3695 perf_event_mmap_output(event
, mmap_event
);
3699 static void perf_event_mmap_event(struct perf_mmap_event
*mmap_event
)
3701 struct perf_cpu_context
*cpuctx
;
3702 struct perf_event_context
*ctx
;
3703 struct vm_area_struct
*vma
= mmap_event
->vma
;
3704 struct file
*file
= vma
->vm_file
;
3710 memset(tmp
, 0, sizeof(tmp
));
3714 * d_path works from the end of the buffer backwards, so we
3715 * need to add enough zero bytes after the string to handle
3716 * the 64bit alignment we do later.
3718 buf
= kzalloc(PATH_MAX
+ sizeof(u64
), GFP_KERNEL
);
3720 name
= strncpy(tmp
, "//enomem", sizeof(tmp
));
3723 name
= d_path(&file
->f_path
, buf
, PATH_MAX
);
3725 name
= strncpy(tmp
, "//toolong", sizeof(tmp
));
3729 if (arch_vma_name(mmap_event
->vma
)) {
3730 name
= strncpy(tmp
, arch_vma_name(mmap_event
->vma
),
3736 name
= strncpy(tmp
, "[vdso]", sizeof(tmp
));
3740 name
= strncpy(tmp
, "//anon", sizeof(tmp
));
3745 size
= ALIGN(strlen(name
)+1, sizeof(u64
));
3747 mmap_event
->file_name
= name
;
3748 mmap_event
->file_size
= size
;
3750 mmap_event
->event_id
.header
.size
= sizeof(mmap_event
->event_id
) + size
;
3753 cpuctx
= &get_cpu_var(perf_cpu_context
);
3754 perf_event_mmap_ctx(&cpuctx
->ctx
, mmap_event
);
3755 ctx
= rcu_dereference(current
->perf_event_ctxp
);
3757 perf_event_mmap_ctx(ctx
, mmap_event
);
3758 put_cpu_var(perf_cpu_context
);
3764 void __perf_event_mmap(struct vm_area_struct
*vma
)
3766 struct perf_mmap_event mmap_event
;
3768 if (!atomic_read(&nr_mmap_events
))
3771 mmap_event
= (struct perf_mmap_event
){
3777 .type
= PERF_RECORD_MMAP
,
3778 .misc
= PERF_RECORD_MISC_USER
,
3783 .start
= vma
->vm_start
,
3784 .len
= vma
->vm_end
- vma
->vm_start
,
3785 .pgoff
= (u64
)vma
->vm_pgoff
<< PAGE_SHIFT
,
3789 perf_event_mmap_event(&mmap_event
);
3793 * IRQ throttle logging
3796 static void perf_log_throttle(struct perf_event
*event
, int enable
)
3798 struct perf_output_handle handle
;
3802 struct perf_event_header header
;
3806 } throttle_event
= {
3808 .type
= PERF_RECORD_THROTTLE
,
3810 .size
= sizeof(throttle_event
),
3812 .time
= perf_clock(),
3813 .id
= primary_event_id(event
),
3814 .stream_id
= event
->id
,
3818 throttle_event
.header
.type
= PERF_RECORD_UNTHROTTLE
;
3820 ret
= perf_output_begin(&handle
, event
, sizeof(throttle_event
), 1, 0);
3824 perf_output_put(&handle
, throttle_event
);
3825 perf_output_end(&handle
);
3829 * Generic event overflow handling, sampling.
3832 static int __perf_event_overflow(struct perf_event
*event
, int nmi
,
3833 int throttle
, struct perf_sample_data
*data
,
3834 struct pt_regs
*regs
)
3836 int events
= atomic_read(&event
->event_limit
);
3837 struct hw_perf_event
*hwc
= &event
->hw
;
3840 throttle
= (throttle
&& event
->pmu
->unthrottle
!= NULL
);
3845 if (hwc
->interrupts
!= MAX_INTERRUPTS
) {
3847 if (HZ
* hwc
->interrupts
>
3848 (u64
)sysctl_perf_event_sample_rate
) {
3849 hwc
->interrupts
= MAX_INTERRUPTS
;
3850 perf_log_throttle(event
, 0);
3855 * Keep re-disabling events even though on the previous
3856 * pass we disabled it - just in case we raced with a
3857 * sched-in and the event got enabled again:
3863 if (event
->attr
.freq
) {
3864 u64 now
= perf_clock();
3865 s64 delta
= now
- hwc
->freq_time_stamp
;
3867 hwc
->freq_time_stamp
= now
;
3869 if (delta
> 0 && delta
< 2*TICK_NSEC
)
3870 perf_adjust_period(event
, delta
, hwc
->last_period
);
3874 * XXX event_limit might not quite work as expected on inherited
3878 event
->pending_kill
= POLL_IN
;
3879 if (events
&& atomic_dec_and_test(&event
->event_limit
)) {
3881 event
->pending_kill
= POLL_HUP
;
3883 event
->pending_disable
= 1;
3884 perf_pending_queue(&event
->pending
,
3885 perf_pending_event
);
3887 perf_event_disable(event
);
3890 if (event
->overflow_handler
)
3891 event
->overflow_handler(event
, nmi
, data
, regs
);
3893 perf_event_output(event
, nmi
, data
, regs
);
3898 int perf_event_overflow(struct perf_event
*event
, int nmi
,
3899 struct perf_sample_data
*data
,
3900 struct pt_regs
*regs
)
3902 return __perf_event_overflow(event
, nmi
, 1, data
, regs
);
3906 * Generic software event infrastructure
3910 * We directly increment event->count and keep a second value in
3911 * event->hw.period_left to count intervals. This period event
3912 * is kept in the range [-sample_period, 0] so that we can use the
3916 static u64
perf_swevent_set_period(struct perf_event
*event
)
3918 struct hw_perf_event
*hwc
= &event
->hw
;
3919 u64 period
= hwc
->last_period
;
3923 hwc
->last_period
= hwc
->sample_period
;
3926 old
= val
= atomic64_read(&hwc
->period_left
);
3930 nr
= div64_u64(period
+ val
, period
);
3931 offset
= nr
* period
;
3933 if (atomic64_cmpxchg(&hwc
->period_left
, old
, val
) != old
)
3939 static void perf_swevent_overflow(struct perf_event
*event
, u64 overflow
,
3940 int nmi
, struct perf_sample_data
*data
,
3941 struct pt_regs
*regs
)
3943 struct hw_perf_event
*hwc
= &event
->hw
;
3946 data
->period
= event
->hw
.last_period
;
3948 overflow
= perf_swevent_set_period(event
);
3950 if (hwc
->interrupts
== MAX_INTERRUPTS
)
3953 for (; overflow
; overflow
--) {
3954 if (__perf_event_overflow(event
, nmi
, throttle
,
3957 * We inhibit the overflow from happening when
3958 * hwc->interrupts == MAX_INTERRUPTS.
3966 static void perf_swevent_unthrottle(struct perf_event
*event
)
3969 * Nothing to do, we already reset hwc->interrupts.
3973 static void perf_swevent_add(struct perf_event
*event
, u64 nr
,
3974 int nmi
, struct perf_sample_data
*data
,
3975 struct pt_regs
*regs
)
3977 struct hw_perf_event
*hwc
= &event
->hw
;
3979 atomic64_add(nr
, &event
->count
);
3984 if (!hwc
->sample_period
)
3987 if (nr
== 1 && hwc
->sample_period
== 1 && !event
->attr
.freq
)
3988 return perf_swevent_overflow(event
, 1, nmi
, data
, regs
);
3990 if (atomic64_add_negative(nr
, &hwc
->period_left
))
3993 perf_swevent_overflow(event
, 0, nmi
, data
, regs
);
3996 static int perf_tp_event_match(struct perf_event
*event
,
3997 struct perf_sample_data
*data
);
3999 static int perf_exclude_event(struct perf_event
*event
,
4000 struct pt_regs
*regs
)
4003 if (event
->attr
.exclude_user
&& user_mode(regs
))
4006 if (event
->attr
.exclude_kernel
&& !user_mode(regs
))
4013 static int perf_swevent_match(struct perf_event
*event
,
4014 enum perf_type_id type
,
4016 struct perf_sample_data
*data
,
4017 struct pt_regs
*regs
)
4019 if (event
->attr
.type
!= type
)
4022 if (event
->attr
.config
!= event_id
)
4025 if (perf_exclude_event(event
, regs
))
4028 if (event
->attr
.type
== PERF_TYPE_TRACEPOINT
&&
4029 !perf_tp_event_match(event
, data
))
4035 static inline u64
swevent_hash(u64 type
, u32 event_id
)
4037 u64 val
= event_id
| (type
<< 32);
4039 return hash_64(val
, SWEVENT_HLIST_BITS
);
4042 static struct hlist_head
*
4043 find_swevent_head(struct perf_cpu_context
*ctx
, u64 type
, u32 event_id
)
4046 struct swevent_hlist
*hlist
;
4048 hash
= swevent_hash(type
, event_id
);
4050 hlist
= rcu_dereference(ctx
->swevent_hlist
);
4054 return &hlist
->heads
[hash
];
4057 static void do_perf_sw_event(enum perf_type_id type
, u32 event_id
,
4059 struct perf_sample_data
*data
,
4060 struct pt_regs
*regs
)
4062 struct perf_cpu_context
*cpuctx
;
4063 struct perf_event
*event
;
4064 struct hlist_node
*node
;
4065 struct hlist_head
*head
;
4067 cpuctx
= &__get_cpu_var(perf_cpu_context
);
4071 head
= find_swevent_head(cpuctx
, type
, event_id
);
4076 hlist_for_each_entry_rcu(event
, node
, head
, hlist_entry
) {
4077 if (perf_swevent_match(event
, type
, event_id
, data
, regs
))
4078 perf_swevent_add(event
, nr
, nmi
, data
, regs
);
4084 int perf_swevent_get_recursion_context(void)
4086 struct perf_cpu_context
*cpuctx
= &get_cpu_var(perf_cpu_context
);
4093 else if (in_softirq())
4098 if (cpuctx
->recursion
[rctx
]) {
4099 put_cpu_var(perf_cpu_context
);
4103 cpuctx
->recursion
[rctx
]++;
4108 EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context
);
4110 void perf_swevent_put_recursion_context(int rctx
)
4112 struct perf_cpu_context
*cpuctx
= &__get_cpu_var(perf_cpu_context
);
4114 cpuctx
->recursion
[rctx
]--;
4115 put_cpu_var(perf_cpu_context
);
4117 EXPORT_SYMBOL_GPL(perf_swevent_put_recursion_context
);
4120 void __perf_sw_event(u32 event_id
, u64 nr
, int nmi
,
4121 struct pt_regs
*regs
, u64 addr
)
4123 struct perf_sample_data data
;
4126 rctx
= perf_swevent_get_recursion_context();
4130 perf_sample_data_init(&data
, addr
);
4132 do_perf_sw_event(PERF_TYPE_SOFTWARE
, event_id
, nr
, nmi
, &data
, regs
);
4134 perf_swevent_put_recursion_context(rctx
);
4137 static void perf_swevent_read(struct perf_event
*event
)
4141 static int perf_swevent_enable(struct perf_event
*event
)
4143 struct hw_perf_event
*hwc
= &event
->hw
;
4144 struct perf_cpu_context
*cpuctx
;
4145 struct hlist_head
*head
;
4147 cpuctx
= &__get_cpu_var(perf_cpu_context
);
4149 if (hwc
->sample_period
) {
4150 hwc
->last_period
= hwc
->sample_period
;
4151 perf_swevent_set_period(event
);
4154 head
= find_swevent_head(cpuctx
, event
->attr
.type
, event
->attr
.config
);
4155 if (WARN_ON_ONCE(!head
))
4158 hlist_add_head_rcu(&event
->hlist_entry
, head
);
4163 static void perf_swevent_disable(struct perf_event
*event
)
4165 hlist_del_rcu(&event
->hlist_entry
);
4168 static const struct pmu perf_ops_generic
= {
4169 .enable
= perf_swevent_enable
,
4170 .disable
= perf_swevent_disable
,
4171 .read
= perf_swevent_read
,
4172 .unthrottle
= perf_swevent_unthrottle
,
4176 * hrtimer based swevent callback
4179 static enum hrtimer_restart
perf_swevent_hrtimer(struct hrtimer
*hrtimer
)
4181 enum hrtimer_restart ret
= HRTIMER_RESTART
;
4182 struct perf_sample_data data
;
4183 struct pt_regs
*regs
;
4184 struct perf_event
*event
;
4187 event
= container_of(hrtimer
, struct perf_event
, hw
.hrtimer
);
4188 event
->pmu
->read(event
);
4190 perf_sample_data_init(&data
, 0);
4191 data
.period
= event
->hw
.last_period
;
4192 regs
= get_irq_regs();
4194 if (regs
&& !perf_exclude_event(event
, regs
)) {
4195 if (!(event
->attr
.exclude_idle
&& current
->pid
== 0))
4196 if (perf_event_overflow(event
, 0, &data
, regs
))
4197 ret
= HRTIMER_NORESTART
;
4200 period
= max_t(u64
, 10000, event
->hw
.sample_period
);
4201 hrtimer_forward_now(hrtimer
, ns_to_ktime(period
));
4206 static void perf_swevent_start_hrtimer(struct perf_event
*event
)
4208 struct hw_perf_event
*hwc
= &event
->hw
;
4210 hrtimer_init(&hwc
->hrtimer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
4211 hwc
->hrtimer
.function
= perf_swevent_hrtimer
;
4212 if (hwc
->sample_period
) {
4215 if (hwc
->remaining
) {
4216 if (hwc
->remaining
< 0)
4219 period
= hwc
->remaining
;
4222 period
= max_t(u64
, 10000, hwc
->sample_period
);
4224 __hrtimer_start_range_ns(&hwc
->hrtimer
,
4225 ns_to_ktime(period
), 0,
4226 HRTIMER_MODE_REL
, 0);
4230 static void perf_swevent_cancel_hrtimer(struct perf_event
*event
)
4232 struct hw_perf_event
*hwc
= &event
->hw
;
4234 if (hwc
->sample_period
) {
4235 ktime_t remaining
= hrtimer_get_remaining(&hwc
->hrtimer
);
4236 hwc
->remaining
= ktime_to_ns(remaining
);
4238 hrtimer_cancel(&hwc
->hrtimer
);
4243 * Software event: cpu wall time clock
4246 static void cpu_clock_perf_event_update(struct perf_event
*event
)
4248 int cpu
= raw_smp_processor_id();
4252 now
= cpu_clock(cpu
);
4253 prev
= atomic64_xchg(&event
->hw
.prev_count
, now
);
4254 atomic64_add(now
- prev
, &event
->count
);
4257 static int cpu_clock_perf_event_enable(struct perf_event
*event
)
4259 struct hw_perf_event
*hwc
= &event
->hw
;
4260 int cpu
= raw_smp_processor_id();
4262 atomic64_set(&hwc
->prev_count
, cpu_clock(cpu
));
4263 perf_swevent_start_hrtimer(event
);
4268 static void cpu_clock_perf_event_disable(struct perf_event
*event
)
4270 perf_swevent_cancel_hrtimer(event
);
4271 cpu_clock_perf_event_update(event
);
4274 static void cpu_clock_perf_event_read(struct perf_event
*event
)
4276 cpu_clock_perf_event_update(event
);
4279 static const struct pmu perf_ops_cpu_clock
= {
4280 .enable
= cpu_clock_perf_event_enable
,
4281 .disable
= cpu_clock_perf_event_disable
,
4282 .read
= cpu_clock_perf_event_read
,
4286 * Software event: task time clock
4289 static void task_clock_perf_event_update(struct perf_event
*event
, u64 now
)
4294 prev
= atomic64_xchg(&event
->hw
.prev_count
, now
);
4296 atomic64_add(delta
, &event
->count
);
4299 static int task_clock_perf_event_enable(struct perf_event
*event
)
4301 struct hw_perf_event
*hwc
= &event
->hw
;
4304 now
= event
->ctx
->time
;
4306 atomic64_set(&hwc
->prev_count
, now
);
4308 perf_swevent_start_hrtimer(event
);
4313 static void task_clock_perf_event_disable(struct perf_event
*event
)
4315 perf_swevent_cancel_hrtimer(event
);
4316 task_clock_perf_event_update(event
, event
->ctx
->time
);
4320 static void task_clock_perf_event_read(struct perf_event
*event
)
4325 update_context_time(event
->ctx
);
4326 time
= event
->ctx
->time
;
4328 u64 now
= perf_clock();
4329 u64 delta
= now
- event
->ctx
->timestamp
;
4330 time
= event
->ctx
->time
+ delta
;
4333 task_clock_perf_event_update(event
, time
);
4336 static const struct pmu perf_ops_task_clock
= {
4337 .enable
= task_clock_perf_event_enable
,
4338 .disable
= task_clock_perf_event_disable
,
4339 .read
= task_clock_perf_event_read
,
4342 static void swevent_hlist_release_rcu(struct rcu_head
*rcu_head
)
4344 struct swevent_hlist
*hlist
;
4346 hlist
= container_of(rcu_head
, struct swevent_hlist
, rcu_head
);
4350 static void swevent_hlist_release(struct perf_cpu_context
*cpuctx
)
4352 struct swevent_hlist
*hlist
;
4354 if (!cpuctx
->swevent_hlist
)
4357 hlist
= cpuctx
->swevent_hlist
;
4358 rcu_assign_pointer(cpuctx
->swevent_hlist
, NULL
);
4359 call_rcu(&hlist
->rcu_head
, swevent_hlist_release_rcu
);
4362 static void swevent_hlist_put_cpu(struct perf_event
*event
, int cpu
)
4364 struct perf_cpu_context
*cpuctx
= &per_cpu(perf_cpu_context
, cpu
);
4366 mutex_lock(&cpuctx
->hlist_mutex
);
4368 if (!--cpuctx
->hlist_refcount
)
4369 swevent_hlist_release(cpuctx
);
4371 mutex_unlock(&cpuctx
->hlist_mutex
);
4374 static void swevent_hlist_put(struct perf_event
*event
)
4378 if (event
->cpu
!= -1) {
4379 swevent_hlist_put_cpu(event
, event
->cpu
);
4383 for_each_possible_cpu(cpu
)
4384 swevent_hlist_put_cpu(event
, cpu
);
4387 static int swevent_hlist_get_cpu(struct perf_event
*event
, int cpu
)
4389 struct perf_cpu_context
*cpuctx
= &per_cpu(perf_cpu_context
, cpu
);
4392 mutex_lock(&cpuctx
->hlist_mutex
);
4394 if (!cpuctx
->swevent_hlist
&& cpu_online(cpu
)) {
4395 struct swevent_hlist
*hlist
;
4397 hlist
= kzalloc(sizeof(*hlist
), GFP_KERNEL
);
4402 rcu_assign_pointer(cpuctx
->swevent_hlist
, hlist
);
4404 cpuctx
->hlist_refcount
++;
4406 mutex_unlock(&cpuctx
->hlist_mutex
);
4411 static int swevent_hlist_get(struct perf_event
*event
)
4414 int cpu
, failed_cpu
;
4416 if (event
->cpu
!= -1)
4417 return swevent_hlist_get_cpu(event
, event
->cpu
);
4420 for_each_possible_cpu(cpu
) {
4421 err
= swevent_hlist_get_cpu(event
, cpu
);
4431 for_each_possible_cpu(cpu
) {
4432 if (cpu
== failed_cpu
)
4434 swevent_hlist_put_cpu(event
, cpu
);
4441 #ifdef CONFIG_EVENT_TRACING
4443 void perf_tp_event(int event_id
, u64 addr
, u64 count
, void *record
,
4444 int entry_size
, struct pt_regs
*regs
)
4446 struct perf_sample_data data
;
4447 struct perf_raw_record raw
= {
4452 perf_sample_data_init(&data
, addr
);
4455 /* Trace events already protected against recursion */
4456 do_perf_sw_event(PERF_TYPE_TRACEPOINT
, event_id
, count
, 1,
4459 EXPORT_SYMBOL_GPL(perf_tp_event
);
4461 static int perf_tp_event_match(struct perf_event
*event
,
4462 struct perf_sample_data
*data
)
4464 void *record
= data
->raw
->data
;
4466 if (likely(!event
->filter
) || filter_match_preds(event
->filter
, record
))
4471 static void tp_perf_event_destroy(struct perf_event
*event
)
4473 perf_trace_disable(event
->attr
.config
);
4474 swevent_hlist_put(event
);
4477 static const struct pmu
*tp_perf_event_init(struct perf_event
*event
)
4482 * Raw tracepoint data is a severe data leak, only allow root to
4485 if ((event
->attr
.sample_type
& PERF_SAMPLE_RAW
) &&
4486 perf_paranoid_tracepoint_raw() &&
4487 !capable(CAP_SYS_ADMIN
))
4488 return ERR_PTR(-EPERM
);
4490 if (perf_trace_enable(event
->attr
.config
))
4493 event
->destroy
= tp_perf_event_destroy
;
4494 err
= swevent_hlist_get(event
);
4496 perf_trace_disable(event
->attr
.config
);
4497 return ERR_PTR(err
);
4500 return &perf_ops_generic
;
4503 static int perf_event_set_filter(struct perf_event
*event
, void __user
*arg
)
4508 if (event
->attr
.type
!= PERF_TYPE_TRACEPOINT
)
4511 filter_str
= strndup_user(arg
, PAGE_SIZE
);
4512 if (IS_ERR(filter_str
))
4513 return PTR_ERR(filter_str
);
4515 ret
= ftrace_profile_set_filter(event
, event
->attr
.config
, filter_str
);
4521 static void perf_event_free_filter(struct perf_event
*event
)
4523 ftrace_profile_free_filter(event
);
4528 static int perf_tp_event_match(struct perf_event
*event
,
4529 struct perf_sample_data
*data
)
4534 static const struct pmu
*tp_perf_event_init(struct perf_event
*event
)
4539 static int perf_event_set_filter(struct perf_event
*event
, void __user
*arg
)
4544 static void perf_event_free_filter(struct perf_event
*event
)
4548 #endif /* CONFIG_EVENT_TRACING */
4550 #ifdef CONFIG_HAVE_HW_BREAKPOINT
4551 static void bp_perf_event_destroy(struct perf_event
*event
)
4553 release_bp_slot(event
);
4556 static const struct pmu
*bp_perf_event_init(struct perf_event
*bp
)
4560 err
= register_perf_hw_breakpoint(bp
);
4562 return ERR_PTR(err
);
4564 bp
->destroy
= bp_perf_event_destroy
;
4566 return &perf_ops_bp
;
4569 void perf_bp_event(struct perf_event
*bp
, void *data
)
4571 struct perf_sample_data sample
;
4572 struct pt_regs
*regs
= data
;
4574 perf_sample_data_init(&sample
, bp
->attr
.bp_addr
);
4576 if (!perf_exclude_event(bp
, regs
))
4577 perf_swevent_add(bp
, 1, 1, &sample
, regs
);
4580 static const struct pmu
*bp_perf_event_init(struct perf_event
*bp
)
4585 void perf_bp_event(struct perf_event
*bp
, void *regs
)
4590 atomic_t perf_swevent_enabled
[PERF_COUNT_SW_MAX
];
4592 static void sw_perf_event_destroy(struct perf_event
*event
)
4594 u64 event_id
= event
->attr
.config
;
4596 WARN_ON(event
->parent
);
4598 atomic_dec(&perf_swevent_enabled
[event_id
]);
4599 swevent_hlist_put(event
);
4602 static const struct pmu
*sw_perf_event_init(struct perf_event
*event
)
4604 const struct pmu
*pmu
= NULL
;
4605 u64 event_id
= event
->attr
.config
;
4608 * Software events (currently) can't in general distinguish
4609 * between user, kernel and hypervisor events.
4610 * However, context switches and cpu migrations are considered
4611 * to be kernel events, and page faults are never hypervisor
4615 case PERF_COUNT_SW_CPU_CLOCK
:
4616 pmu
= &perf_ops_cpu_clock
;
4619 case PERF_COUNT_SW_TASK_CLOCK
:
4621 * If the user instantiates this as a per-cpu event,
4622 * use the cpu_clock event instead.
4624 if (event
->ctx
->task
)
4625 pmu
= &perf_ops_task_clock
;
4627 pmu
= &perf_ops_cpu_clock
;
4630 case PERF_COUNT_SW_PAGE_FAULTS
:
4631 case PERF_COUNT_SW_PAGE_FAULTS_MIN
:
4632 case PERF_COUNT_SW_PAGE_FAULTS_MAJ
:
4633 case PERF_COUNT_SW_CONTEXT_SWITCHES
:
4634 case PERF_COUNT_SW_CPU_MIGRATIONS
:
4635 case PERF_COUNT_SW_ALIGNMENT_FAULTS
:
4636 case PERF_COUNT_SW_EMULATION_FAULTS
:
4637 if (!event
->parent
) {
4640 err
= swevent_hlist_get(event
);
4642 return ERR_PTR(err
);
4644 atomic_inc(&perf_swevent_enabled
[event_id
]);
4645 event
->destroy
= sw_perf_event_destroy
;
4647 pmu
= &perf_ops_generic
;
4655 * Allocate and initialize a event structure
4657 static struct perf_event
*
4658 perf_event_alloc(struct perf_event_attr
*attr
,
4660 struct perf_event_context
*ctx
,
4661 struct perf_event
*group_leader
,
4662 struct perf_event
*parent_event
,
4663 perf_overflow_handler_t overflow_handler
,
4666 const struct pmu
*pmu
;
4667 struct perf_event
*event
;
4668 struct hw_perf_event
*hwc
;
4671 event
= kzalloc(sizeof(*event
), gfpflags
);
4673 return ERR_PTR(-ENOMEM
);
4676 * Single events are their own group leaders, with an
4677 * empty sibling list:
4680 group_leader
= event
;
4682 mutex_init(&event
->child_mutex
);
4683 INIT_LIST_HEAD(&event
->child_list
);
4685 INIT_LIST_HEAD(&event
->group_entry
);
4686 INIT_LIST_HEAD(&event
->event_entry
);
4687 INIT_LIST_HEAD(&event
->sibling_list
);
4688 init_waitqueue_head(&event
->waitq
);
4690 mutex_init(&event
->mmap_mutex
);
4693 event
->attr
= *attr
;
4694 event
->group_leader
= group_leader
;
4699 event
->parent
= parent_event
;
4701 event
->ns
= get_pid_ns(current
->nsproxy
->pid_ns
);
4702 event
->id
= atomic64_inc_return(&perf_event_id
);
4704 event
->state
= PERF_EVENT_STATE_INACTIVE
;
4706 if (!overflow_handler
&& parent_event
)
4707 overflow_handler
= parent_event
->overflow_handler
;
4709 event
->overflow_handler
= overflow_handler
;
4712 event
->state
= PERF_EVENT_STATE_OFF
;
4717 hwc
->sample_period
= attr
->sample_period
;
4718 if (attr
->freq
&& attr
->sample_freq
)
4719 hwc
->sample_period
= 1;
4720 hwc
->last_period
= hwc
->sample_period
;
4722 atomic64_set(&hwc
->period_left
, hwc
->sample_period
);
4725 * we currently do not support PERF_FORMAT_GROUP on inherited events
4727 if (attr
->inherit
&& (attr
->read_format
& PERF_FORMAT_GROUP
))
4730 switch (attr
->type
) {
4732 case PERF_TYPE_HARDWARE
:
4733 case PERF_TYPE_HW_CACHE
:
4734 pmu
= hw_perf_event_init(event
);
4737 case PERF_TYPE_SOFTWARE
:
4738 pmu
= sw_perf_event_init(event
);
4741 case PERF_TYPE_TRACEPOINT
:
4742 pmu
= tp_perf_event_init(event
);
4745 case PERF_TYPE_BREAKPOINT
:
4746 pmu
= bp_perf_event_init(event
);
4757 else if (IS_ERR(pmu
))
4762 put_pid_ns(event
->ns
);
4764 return ERR_PTR(err
);
4769 if (!event
->parent
) {
4770 atomic_inc(&nr_events
);
4771 if (event
->attr
.mmap
)
4772 atomic_inc(&nr_mmap_events
);
4773 if (event
->attr
.comm
)
4774 atomic_inc(&nr_comm_events
);
4775 if (event
->attr
.task
)
4776 atomic_inc(&nr_task_events
);
4782 static int perf_copy_attr(struct perf_event_attr __user
*uattr
,
4783 struct perf_event_attr
*attr
)
4788 if (!access_ok(VERIFY_WRITE
, uattr
, PERF_ATTR_SIZE_VER0
))
4792 * zero the full structure, so that a short copy will be nice.
4794 memset(attr
, 0, sizeof(*attr
));
4796 ret
= get_user(size
, &uattr
->size
);
4800 if (size
> PAGE_SIZE
) /* silly large */
4803 if (!size
) /* abi compat */
4804 size
= PERF_ATTR_SIZE_VER0
;
4806 if (size
< PERF_ATTR_SIZE_VER0
)
4810 * If we're handed a bigger struct than we know of,
4811 * ensure all the unknown bits are 0 - i.e. new
4812 * user-space does not rely on any kernel feature
4813 * extensions we dont know about yet.
4815 if (size
> sizeof(*attr
)) {
4816 unsigned char __user
*addr
;
4817 unsigned char __user
*end
;
4820 addr
= (void __user
*)uattr
+ sizeof(*attr
);
4821 end
= (void __user
*)uattr
+ size
;
4823 for (; addr
< end
; addr
++) {
4824 ret
= get_user(val
, addr
);
4830 size
= sizeof(*attr
);
4833 ret
= copy_from_user(attr
, uattr
, size
);
4838 * If the type exists, the corresponding creation will verify
4841 if (attr
->type
>= PERF_TYPE_MAX
)
4844 if (attr
->__reserved_1
)
4847 if (attr
->sample_type
& ~(PERF_SAMPLE_MAX
-1))
4850 if (attr
->read_format
& ~(PERF_FORMAT_MAX
-1))
4857 put_user(sizeof(*attr
), &uattr
->size
);
4862 static int perf_event_set_output(struct perf_event
*event
, int output_fd
)
4864 struct perf_event
*output_event
= NULL
;
4865 struct file
*output_file
= NULL
;
4866 struct perf_event
*old_output
;
4867 int fput_needed
= 0;
4873 output_file
= fget_light(output_fd
, &fput_needed
);
4877 if (output_file
->f_op
!= &perf_fops
)
4880 output_event
= output_file
->private_data
;
4882 /* Don't chain output fds */
4883 if (output_event
->output
)
4886 /* Don't set an output fd when we already have an output channel */
4890 atomic_long_inc(&output_file
->f_count
);
4893 mutex_lock(&event
->mmap_mutex
);
4894 old_output
= event
->output
;
4895 rcu_assign_pointer(event
->output
, output_event
);
4896 mutex_unlock(&event
->mmap_mutex
);
4900 * we need to make sure no existing perf_output_*()
4901 * is still referencing this event.
4904 fput(old_output
->filp
);
4909 fput_light(output_file
, fput_needed
);
4914 * sys_perf_event_open - open a performance event, associate it to a task/cpu
4916 * @attr_uptr: event_id type attributes for monitoring/sampling
4919 * @group_fd: group leader event fd
4921 SYSCALL_DEFINE5(perf_event_open
,
4922 struct perf_event_attr __user
*, attr_uptr
,
4923 pid_t
, pid
, int, cpu
, int, group_fd
, unsigned long, flags
)
4925 struct perf_event
*event
, *group_leader
;
4926 struct perf_event_attr attr
;
4927 struct perf_event_context
*ctx
;
4928 struct file
*event_file
= NULL
;
4929 struct file
*group_file
= NULL
;
4930 int fput_needed
= 0;
4931 int fput_needed2
= 0;
4934 /* for future expandability... */
4935 if (flags
& ~(PERF_FLAG_FD_NO_GROUP
| PERF_FLAG_FD_OUTPUT
))
4938 err
= perf_copy_attr(attr_uptr
, &attr
);
4942 if (!attr
.exclude_kernel
) {
4943 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN
))
4948 if (attr
.sample_freq
> sysctl_perf_event_sample_rate
)
4953 * Get the target context (task or percpu):
4955 ctx
= find_get_context(pid
, cpu
);
4957 return PTR_ERR(ctx
);
4960 * Look up the group leader (we will attach this event to it):
4962 group_leader
= NULL
;
4963 if (group_fd
!= -1 && !(flags
& PERF_FLAG_FD_NO_GROUP
)) {
4965 group_file
= fget_light(group_fd
, &fput_needed
);
4967 goto err_put_context
;
4968 if (group_file
->f_op
!= &perf_fops
)
4969 goto err_put_context
;
4971 group_leader
= group_file
->private_data
;
4973 * Do not allow a recursive hierarchy (this new sibling
4974 * becoming part of another group-sibling):
4976 if (group_leader
->group_leader
!= group_leader
)
4977 goto err_put_context
;
4979 * Do not allow to attach to a group in a different
4980 * task or CPU context:
4982 if (group_leader
->ctx
!= ctx
)
4983 goto err_put_context
;
4985 * Only a group leader can be exclusive or pinned
4987 if (attr
.exclusive
|| attr
.pinned
)
4988 goto err_put_context
;
4991 event
= perf_event_alloc(&attr
, cpu
, ctx
, group_leader
,
4992 NULL
, NULL
, GFP_KERNEL
);
4993 err
= PTR_ERR(event
);
4995 goto err_put_context
;
4997 err
= anon_inode_getfd("[perf_event]", &perf_fops
, event
, O_RDWR
);
4999 goto err_free_put_context
;
5001 event_file
= fget_light(err
, &fput_needed2
);
5003 goto err_free_put_context
;
5005 if (flags
& PERF_FLAG_FD_OUTPUT
) {
5006 err
= perf_event_set_output(event
, group_fd
);
5008 goto err_fput_free_put_context
;
5011 event
->filp
= event_file
;
5012 WARN_ON_ONCE(ctx
->parent_ctx
);
5013 mutex_lock(&ctx
->mutex
);
5014 perf_install_in_context(ctx
, event
, cpu
);
5016 mutex_unlock(&ctx
->mutex
);
5018 event
->owner
= current
;
5019 get_task_struct(current
);
5020 mutex_lock(¤t
->perf_event_mutex
);
5021 list_add_tail(&event
->owner_entry
, ¤t
->perf_event_list
);
5022 mutex_unlock(¤t
->perf_event_mutex
);
5024 err_fput_free_put_context
:
5025 fput_light(event_file
, fput_needed2
);
5027 err_free_put_context
:
5035 fput_light(group_file
, fput_needed
);
5041 * perf_event_create_kernel_counter
5043 * @attr: attributes of the counter to create
5044 * @cpu: cpu in which the counter is bound
5045 * @pid: task to profile
5048 perf_event_create_kernel_counter(struct perf_event_attr
*attr
, int cpu
,
5050 perf_overflow_handler_t overflow_handler
)
5052 struct perf_event
*event
;
5053 struct perf_event_context
*ctx
;
5057 * Get the target context (task or percpu):
5060 ctx
= find_get_context(pid
, cpu
);
5066 event
= perf_event_alloc(attr
, cpu
, ctx
, NULL
,
5067 NULL
, overflow_handler
, GFP_KERNEL
);
5068 if (IS_ERR(event
)) {
5069 err
= PTR_ERR(event
);
5070 goto err_put_context
;
5074 WARN_ON_ONCE(ctx
->parent_ctx
);
5075 mutex_lock(&ctx
->mutex
);
5076 perf_install_in_context(ctx
, event
, cpu
);
5078 mutex_unlock(&ctx
->mutex
);
5080 event
->owner
= current
;
5081 get_task_struct(current
);
5082 mutex_lock(¤t
->perf_event_mutex
);
5083 list_add_tail(&event
->owner_entry
, ¤t
->perf_event_list
);
5084 mutex_unlock(¤t
->perf_event_mutex
);
5091 return ERR_PTR(err
);
5093 EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter
);
5096 * inherit a event from parent task to child task:
5098 static struct perf_event
*
5099 inherit_event(struct perf_event
*parent_event
,
5100 struct task_struct
*parent
,
5101 struct perf_event_context
*parent_ctx
,
5102 struct task_struct
*child
,
5103 struct perf_event
*group_leader
,
5104 struct perf_event_context
*child_ctx
)
5106 struct perf_event
*child_event
;
5109 * Instead of creating recursive hierarchies of events,
5110 * we link inherited events back to the original parent,
5111 * which has a filp for sure, which we use as the reference
5114 if (parent_event
->parent
)
5115 parent_event
= parent_event
->parent
;
5117 child_event
= perf_event_alloc(&parent_event
->attr
,
5118 parent_event
->cpu
, child_ctx
,
5119 group_leader
, parent_event
,
5121 if (IS_ERR(child_event
))
5126 * Make the child state follow the state of the parent event,
5127 * not its attr.disabled bit. We hold the parent's mutex,
5128 * so we won't race with perf_event_{en, dis}able_family.
5130 if (parent_event
->state
>= PERF_EVENT_STATE_INACTIVE
)
5131 child_event
->state
= PERF_EVENT_STATE_INACTIVE
;
5133 child_event
->state
= PERF_EVENT_STATE_OFF
;
5135 if (parent_event
->attr
.freq
) {
5136 u64 sample_period
= parent_event
->hw
.sample_period
;
5137 struct hw_perf_event
*hwc
= &child_event
->hw
;
5139 hwc
->sample_period
= sample_period
;
5140 hwc
->last_period
= sample_period
;
5142 atomic64_set(&hwc
->period_left
, sample_period
);
5145 child_event
->overflow_handler
= parent_event
->overflow_handler
;
5148 * Link it up in the child's context:
5150 add_event_to_ctx(child_event
, child_ctx
);
5153 * Get a reference to the parent filp - we will fput it
5154 * when the child event exits. This is safe to do because
5155 * we are in the parent and we know that the filp still
5156 * exists and has a nonzero count:
5158 atomic_long_inc(&parent_event
->filp
->f_count
);
5161 * Link this into the parent event's child list
5163 WARN_ON_ONCE(parent_event
->ctx
->parent_ctx
);
5164 mutex_lock(&parent_event
->child_mutex
);
5165 list_add_tail(&child_event
->child_list
, &parent_event
->child_list
);
5166 mutex_unlock(&parent_event
->child_mutex
);
5171 static int inherit_group(struct perf_event
*parent_event
,
5172 struct task_struct
*parent
,
5173 struct perf_event_context
*parent_ctx
,
5174 struct task_struct
*child
,
5175 struct perf_event_context
*child_ctx
)
5177 struct perf_event
*leader
;
5178 struct perf_event
*sub
;
5179 struct perf_event
*child_ctr
;
5181 leader
= inherit_event(parent_event
, parent
, parent_ctx
,
5182 child
, NULL
, child_ctx
);
5184 return PTR_ERR(leader
);
5185 list_for_each_entry(sub
, &parent_event
->sibling_list
, group_entry
) {
5186 child_ctr
= inherit_event(sub
, parent
, parent_ctx
,
5187 child
, leader
, child_ctx
);
5188 if (IS_ERR(child_ctr
))
5189 return PTR_ERR(child_ctr
);
5194 static void sync_child_event(struct perf_event
*child_event
,
5195 struct task_struct
*child
)
5197 struct perf_event
*parent_event
= child_event
->parent
;
5200 if (child_event
->attr
.inherit_stat
)
5201 perf_event_read_event(child_event
, child
);
5203 child_val
= atomic64_read(&child_event
->count
);
5206 * Add back the child's count to the parent's count:
5208 atomic64_add(child_val
, &parent_event
->count
);
5209 atomic64_add(child_event
->total_time_enabled
,
5210 &parent_event
->child_total_time_enabled
);
5211 atomic64_add(child_event
->total_time_running
,
5212 &parent_event
->child_total_time_running
);
5215 * Remove this event from the parent's list
5217 WARN_ON_ONCE(parent_event
->ctx
->parent_ctx
);
5218 mutex_lock(&parent_event
->child_mutex
);
5219 list_del_init(&child_event
->child_list
);
5220 mutex_unlock(&parent_event
->child_mutex
);
5223 * Release the parent event, if this was the last
5226 fput(parent_event
->filp
);
5230 __perf_event_exit_task(struct perf_event
*child_event
,
5231 struct perf_event_context
*child_ctx
,
5232 struct task_struct
*child
)
5234 struct perf_event
*parent_event
;
5236 perf_event_remove_from_context(child_event
);
5238 parent_event
= child_event
->parent
;
5240 * It can happen that parent exits first, and has events
5241 * that are still around due to the child reference. These
5242 * events need to be zapped - but otherwise linger.
5245 sync_child_event(child_event
, child
);
5246 free_event(child_event
);
5251 * When a child task exits, feed back event values to parent events.
5253 void perf_event_exit_task(struct task_struct
*child
)
5255 struct perf_event
*child_event
, *tmp
;
5256 struct perf_event_context
*child_ctx
;
5257 unsigned long flags
;
5259 if (likely(!child
->perf_event_ctxp
)) {
5260 perf_event_task(child
, NULL
, 0);
5264 local_irq_save(flags
);
5266 * We can't reschedule here because interrupts are disabled,
5267 * and either child is current or it is a task that can't be
5268 * scheduled, so we are now safe from rescheduling changing
5271 child_ctx
= child
->perf_event_ctxp
;
5272 __perf_event_task_sched_out(child_ctx
);
5275 * Take the context lock here so that if find_get_context is
5276 * reading child->perf_event_ctxp, we wait until it has
5277 * incremented the context's refcount before we do put_ctx below.
5279 raw_spin_lock(&child_ctx
->lock
);
5280 child
->perf_event_ctxp
= NULL
;
5282 * If this context is a clone; unclone it so it can't get
5283 * swapped to another process while we're removing all
5284 * the events from it.
5286 unclone_ctx(child_ctx
);
5287 update_context_time(child_ctx
);
5288 raw_spin_unlock_irqrestore(&child_ctx
->lock
, flags
);
5291 * Report the task dead after unscheduling the events so that we
5292 * won't get any samples after PERF_RECORD_EXIT. We can however still
5293 * get a few PERF_RECORD_READ events.
5295 perf_event_task(child
, child_ctx
, 0);
5298 * We can recurse on the same lock type through:
5300 * __perf_event_exit_task()
5301 * sync_child_event()
5302 * fput(parent_event->filp)
5304 * mutex_lock(&ctx->mutex)
5306 * But since its the parent context it won't be the same instance.
5308 mutex_lock_nested(&child_ctx
->mutex
, SINGLE_DEPTH_NESTING
);
5311 list_for_each_entry_safe(child_event
, tmp
, &child_ctx
->pinned_groups
,
5313 __perf_event_exit_task(child_event
, child_ctx
, child
);
5315 list_for_each_entry_safe(child_event
, tmp
, &child_ctx
->flexible_groups
,
5317 __perf_event_exit_task(child_event
, child_ctx
, child
);
5320 * If the last event was a group event, it will have appended all
5321 * its siblings to the list, but we obtained 'tmp' before that which
5322 * will still point to the list head terminating the iteration.
5324 if (!list_empty(&child_ctx
->pinned_groups
) ||
5325 !list_empty(&child_ctx
->flexible_groups
))
5328 mutex_unlock(&child_ctx
->mutex
);
5333 static void perf_free_event(struct perf_event
*event
,
5334 struct perf_event_context
*ctx
)
5336 struct perf_event
*parent
= event
->parent
;
5338 if (WARN_ON_ONCE(!parent
))
5341 mutex_lock(&parent
->child_mutex
);
5342 list_del_init(&event
->child_list
);
5343 mutex_unlock(&parent
->child_mutex
);
5347 list_del_event(event
, ctx
);
5352 * free an unexposed, unused context as created by inheritance by
5353 * init_task below, used by fork() in case of fail.
5355 void perf_event_free_task(struct task_struct
*task
)
5357 struct perf_event_context
*ctx
= task
->perf_event_ctxp
;
5358 struct perf_event
*event
, *tmp
;
5363 mutex_lock(&ctx
->mutex
);
5365 list_for_each_entry_safe(event
, tmp
, &ctx
->pinned_groups
, group_entry
)
5366 perf_free_event(event
, ctx
);
5368 list_for_each_entry_safe(event
, tmp
, &ctx
->flexible_groups
,
5370 perf_free_event(event
, ctx
);
5372 if (!list_empty(&ctx
->pinned_groups
) ||
5373 !list_empty(&ctx
->flexible_groups
))
5376 mutex_unlock(&ctx
->mutex
);
5382 inherit_task_group(struct perf_event
*event
, struct task_struct
*parent
,
5383 struct perf_event_context
*parent_ctx
,
5384 struct task_struct
*child
,
5388 struct perf_event_context
*child_ctx
= child
->perf_event_ctxp
;
5390 if (!event
->attr
.inherit
) {
5397 * This is executed from the parent task context, so
5398 * inherit events that have been marked for cloning.
5399 * First allocate and initialize a context for the
5403 child_ctx
= kzalloc(sizeof(struct perf_event_context
),
5408 __perf_event_init_context(child_ctx
, child
);
5409 child
->perf_event_ctxp
= child_ctx
;
5410 get_task_struct(child
);
5413 ret
= inherit_group(event
, parent
, parent_ctx
,
5424 * Initialize the perf_event context in task_struct
5426 int perf_event_init_task(struct task_struct
*child
)
5428 struct perf_event_context
*child_ctx
, *parent_ctx
;
5429 struct perf_event_context
*cloned_ctx
;
5430 struct perf_event
*event
;
5431 struct task_struct
*parent
= current
;
5432 int inherited_all
= 1;
5435 child
->perf_event_ctxp
= NULL
;
5437 mutex_init(&child
->perf_event_mutex
);
5438 INIT_LIST_HEAD(&child
->perf_event_list
);
5440 if (likely(!parent
->perf_event_ctxp
))
5444 * If the parent's context is a clone, pin it so it won't get
5447 parent_ctx
= perf_pin_task_context(parent
);
5450 * No need to check if parent_ctx != NULL here; since we saw
5451 * it non-NULL earlier, the only reason for it to become NULL
5452 * is if we exit, and since we're currently in the middle of
5453 * a fork we can't be exiting at the same time.
5457 * Lock the parent list. No need to lock the child - not PID
5458 * hashed yet and not running, so nobody can access it.
5460 mutex_lock(&parent_ctx
->mutex
);
5463 * We dont have to disable NMIs - we are only looking at
5464 * the list, not manipulating it:
5466 list_for_each_entry(event
, &parent_ctx
->pinned_groups
, group_entry
) {
5467 ret
= inherit_task_group(event
, parent
, parent_ctx
, child
,
5473 list_for_each_entry(event
, &parent_ctx
->flexible_groups
, group_entry
) {
5474 ret
= inherit_task_group(event
, parent
, parent_ctx
, child
,
5480 child_ctx
= child
->perf_event_ctxp
;
5482 if (child_ctx
&& inherited_all
) {
5484 * Mark the child context as a clone of the parent
5485 * context, or of whatever the parent is a clone of.
5486 * Note that if the parent is a clone, it could get
5487 * uncloned at any point, but that doesn't matter
5488 * because the list of events and the generation
5489 * count can't have changed since we took the mutex.
5491 cloned_ctx
= rcu_dereference(parent_ctx
->parent_ctx
);
5493 child_ctx
->parent_ctx
= cloned_ctx
;
5494 child_ctx
->parent_gen
= parent_ctx
->parent_gen
;
5496 child_ctx
->parent_ctx
= parent_ctx
;
5497 child_ctx
->parent_gen
= parent_ctx
->generation
;
5499 get_ctx(child_ctx
->parent_ctx
);
5502 mutex_unlock(&parent_ctx
->mutex
);
5504 perf_unpin_context(parent_ctx
);
5509 static void __init
perf_event_init_all_cpus(void)
5512 struct perf_cpu_context
*cpuctx
;
5514 for_each_possible_cpu(cpu
) {
5515 cpuctx
= &per_cpu(perf_cpu_context
, cpu
);
5516 mutex_init(&cpuctx
->hlist_mutex
);
5517 __perf_event_init_context(&cpuctx
->ctx
, NULL
);
5521 static void __cpuinit
perf_event_init_cpu(int cpu
)
5523 struct perf_cpu_context
*cpuctx
;
5525 cpuctx
= &per_cpu(perf_cpu_context
, cpu
);
5527 spin_lock(&perf_resource_lock
);
5528 cpuctx
->max_pertask
= perf_max_events
- perf_reserved_percpu
;
5529 spin_unlock(&perf_resource_lock
);
5531 mutex_lock(&cpuctx
->hlist_mutex
);
5532 if (cpuctx
->hlist_refcount
> 0) {
5533 struct swevent_hlist
*hlist
;
5535 hlist
= kzalloc(sizeof(*hlist
), GFP_KERNEL
);
5536 WARN_ON_ONCE(!hlist
);
5537 rcu_assign_pointer(cpuctx
->swevent_hlist
, hlist
);
5539 mutex_unlock(&cpuctx
->hlist_mutex
);
5542 #ifdef CONFIG_HOTPLUG_CPU
5543 static void __perf_event_exit_cpu(void *info
)
5545 struct perf_cpu_context
*cpuctx
= &__get_cpu_var(perf_cpu_context
);
5546 struct perf_event_context
*ctx
= &cpuctx
->ctx
;
5547 struct perf_event
*event
, *tmp
;
5549 list_for_each_entry_safe(event
, tmp
, &ctx
->pinned_groups
, group_entry
)
5550 __perf_event_remove_from_context(event
);
5551 list_for_each_entry_safe(event
, tmp
, &ctx
->flexible_groups
, group_entry
)
5552 __perf_event_remove_from_context(event
);
5554 static void perf_event_exit_cpu(int cpu
)
5556 struct perf_cpu_context
*cpuctx
= &per_cpu(perf_cpu_context
, cpu
);
5557 struct perf_event_context
*ctx
= &cpuctx
->ctx
;
5559 mutex_lock(&cpuctx
->hlist_mutex
);
5560 swevent_hlist_release(cpuctx
);
5561 mutex_unlock(&cpuctx
->hlist_mutex
);
5563 mutex_lock(&ctx
->mutex
);
5564 smp_call_function_single(cpu
, __perf_event_exit_cpu
, NULL
, 1);
5565 mutex_unlock(&ctx
->mutex
);
5568 static inline void perf_event_exit_cpu(int cpu
) { }
5571 static int __cpuinit
5572 perf_cpu_notify(struct notifier_block
*self
, unsigned long action
, void *hcpu
)
5574 unsigned int cpu
= (long)hcpu
;
5578 case CPU_UP_PREPARE
:
5579 case CPU_UP_PREPARE_FROZEN
:
5580 perf_event_init_cpu(cpu
);
5583 case CPU_DOWN_PREPARE
:
5584 case CPU_DOWN_PREPARE_FROZEN
:
5585 perf_event_exit_cpu(cpu
);
5596 * This has to have a higher priority than migration_notifier in sched.c.
5598 static struct notifier_block __cpuinitdata perf_cpu_nb
= {
5599 .notifier_call
= perf_cpu_notify
,
5603 void __init
perf_event_init(void)
5605 perf_event_init_all_cpus();
5606 perf_cpu_notify(&perf_cpu_nb
, (unsigned long)CPU_UP_PREPARE
,
5607 (void *)(long)smp_processor_id());
5608 perf_cpu_notify(&perf_cpu_nb
, (unsigned long)CPU_ONLINE
,
5609 (void *)(long)smp_processor_id());
5610 register_cpu_notifier(&perf_cpu_nb
);
5613 static ssize_t
perf_show_reserve_percpu(struct sysdev_class
*class,
5614 struct sysdev_class_attribute
*attr
,
5617 return sprintf(buf
, "%d\n", perf_reserved_percpu
);
5621 perf_set_reserve_percpu(struct sysdev_class
*class,
5622 struct sysdev_class_attribute
*attr
,
5626 struct perf_cpu_context
*cpuctx
;
5630 err
= strict_strtoul(buf
, 10, &val
);
5633 if (val
> perf_max_events
)
5636 spin_lock(&perf_resource_lock
);
5637 perf_reserved_percpu
= val
;
5638 for_each_online_cpu(cpu
) {
5639 cpuctx
= &per_cpu(perf_cpu_context
, cpu
);
5640 raw_spin_lock_irq(&cpuctx
->ctx
.lock
);
5641 mpt
= min(perf_max_events
- cpuctx
->ctx
.nr_events
,
5642 perf_max_events
- perf_reserved_percpu
);
5643 cpuctx
->max_pertask
= mpt
;
5644 raw_spin_unlock_irq(&cpuctx
->ctx
.lock
);
5646 spin_unlock(&perf_resource_lock
);
5651 static ssize_t
perf_show_overcommit(struct sysdev_class
*class,
5652 struct sysdev_class_attribute
*attr
,
5655 return sprintf(buf
, "%d\n", perf_overcommit
);
5659 perf_set_overcommit(struct sysdev_class
*class,
5660 struct sysdev_class_attribute
*attr
,
5661 const char *buf
, size_t count
)
5666 err
= strict_strtoul(buf
, 10, &val
);
5672 spin_lock(&perf_resource_lock
);
5673 perf_overcommit
= val
;
5674 spin_unlock(&perf_resource_lock
);
5679 static SYSDEV_CLASS_ATTR(
5682 perf_show_reserve_percpu
,
5683 perf_set_reserve_percpu
5686 static SYSDEV_CLASS_ATTR(
5689 perf_show_overcommit
,
5693 static struct attribute
*perfclass_attrs
[] = {
5694 &attr_reserve_percpu
.attr
,
5695 &attr_overcommit
.attr
,
5699 static struct attribute_group perfclass_attr_group
= {
5700 .attrs
= perfclass_attrs
,
5701 .name
= "perf_events",
5704 static int __init
perf_event_sysfs_init(void)
5706 return sysfs_create_group(&cpu_sysdev_class
.kset
.kobj
,
5707 &perfclass_attr_group
);
5709 device_initcall(perf_event_sysfs_init
);