2 * Performance counter core code
4 * Copyright(C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright(C) 2008 Red Hat, Inc., Ingo Molnar
7 * For licencing details see kernel-base/COPYING
11 #include <linux/cpu.h>
12 #include <linux/smp.h>
13 #include <linux/file.h>
14 #include <linux/poll.h>
15 #include <linux/sysfs.h>
16 #include <linux/ptrace.h>
17 #include <linux/percpu.h>
18 #include <linux/uaccess.h>
19 #include <linux/syscalls.h>
20 #include <linux/anon_inodes.h>
21 #include <linux/kernel_stat.h>
22 #include <linux/perf_counter.h>
24 #include <linux/vmstat.h>
25 #include <linux/rculist.h>
28 * Each CPU has a list of per CPU counters:
30 DEFINE_PER_CPU(struct perf_cpu_context
, perf_cpu_context
);
32 int perf_max_counters __read_mostly
= 1;
33 static int perf_reserved_percpu __read_mostly
;
34 static int perf_overcommit __read_mostly
= 1;
37 * Mutex for (sysadmin-configurable) counter reservations:
39 static DEFINE_MUTEX(perf_resource_mutex
);
42 * Architecture provided APIs - weak aliases:
44 extern __weak
const struct hw_perf_counter_ops
*
45 hw_perf_counter_init(struct perf_counter
*counter
)
50 u64 __weak
hw_perf_save_disable(void) { return 0; }
51 void __weak
hw_perf_restore(u64 ctrl
) { barrier(); }
52 void __weak
hw_perf_counter_setup(int cpu
) { barrier(); }
53 int __weak
hw_perf_group_sched_in(struct perf_counter
*group_leader
,
54 struct perf_cpu_context
*cpuctx
,
55 struct perf_counter_context
*ctx
, int cpu
)
60 void __weak
perf_counter_print_debug(void) { }
63 list_add_counter(struct perf_counter
*counter
, struct perf_counter_context
*ctx
)
65 struct perf_counter
*group_leader
= counter
->group_leader
;
68 * Depending on whether it is a standalone or sibling counter,
69 * add it straight to the context's counter list, or to the group
70 * leader's sibling list:
72 if (counter
->group_leader
== counter
)
73 list_add_tail(&counter
->list_entry
, &ctx
->counter_list
);
75 list_add_tail(&counter
->list_entry
, &group_leader
->sibling_list
);
77 list_add_rcu(&counter
->event_entry
, &ctx
->event_list
);
81 list_del_counter(struct perf_counter
*counter
, struct perf_counter_context
*ctx
)
83 struct perf_counter
*sibling
, *tmp
;
85 list_del_init(&counter
->list_entry
);
86 list_del_rcu(&counter
->event_entry
);
89 * If this was a group counter with sibling counters then
90 * upgrade the siblings to singleton counters by adding them
91 * to the context list directly:
93 list_for_each_entry_safe(sibling
, tmp
,
94 &counter
->sibling_list
, list_entry
) {
96 list_move_tail(&sibling
->list_entry
, &ctx
->counter_list
);
97 sibling
->group_leader
= sibling
;
102 counter_sched_out(struct perf_counter
*counter
,
103 struct perf_cpu_context
*cpuctx
,
104 struct perf_counter_context
*ctx
)
106 if (counter
->state
!= PERF_COUNTER_STATE_ACTIVE
)
109 counter
->state
= PERF_COUNTER_STATE_INACTIVE
;
110 counter
->hw_ops
->disable(counter
);
113 if (!is_software_counter(counter
))
114 cpuctx
->active_oncpu
--;
116 if (counter
->hw_event
.exclusive
|| !cpuctx
->active_oncpu
)
117 cpuctx
->exclusive
= 0;
121 group_sched_out(struct perf_counter
*group_counter
,
122 struct perf_cpu_context
*cpuctx
,
123 struct perf_counter_context
*ctx
)
125 struct perf_counter
*counter
;
127 if (group_counter
->state
!= PERF_COUNTER_STATE_ACTIVE
)
130 counter_sched_out(group_counter
, cpuctx
, ctx
);
133 * Schedule out siblings (if any):
135 list_for_each_entry(counter
, &group_counter
->sibling_list
, list_entry
)
136 counter_sched_out(counter
, cpuctx
, ctx
);
138 if (group_counter
->hw_event
.exclusive
)
139 cpuctx
->exclusive
= 0;
143 * Cross CPU call to remove a performance counter
145 * We disable the counter on the hardware level first. After that we
146 * remove it from the context list.
148 static void __perf_counter_remove_from_context(void *info
)
150 struct perf_cpu_context
*cpuctx
= &__get_cpu_var(perf_cpu_context
);
151 struct perf_counter
*counter
= info
;
152 struct perf_counter_context
*ctx
= counter
->ctx
;
157 * If this is a task context, we need to check whether it is
158 * the current task context of this cpu. If not it has been
159 * scheduled out before the smp call arrived.
161 if (ctx
->task
&& cpuctx
->task_ctx
!= ctx
)
164 curr_rq_lock_irq_save(&flags
);
165 spin_lock(&ctx
->lock
);
167 counter_sched_out(counter
, cpuctx
, ctx
);
169 counter
->task
= NULL
;
173 * Protect the list operation against NMI by disabling the
174 * counters on a global level. NOP for non NMI based counters.
176 perf_flags
= hw_perf_save_disable();
177 list_del_counter(counter
, ctx
);
178 hw_perf_restore(perf_flags
);
182 * Allow more per task counters with respect to the
185 cpuctx
->max_pertask
=
186 min(perf_max_counters
- ctx
->nr_counters
,
187 perf_max_counters
- perf_reserved_percpu
);
190 spin_unlock(&ctx
->lock
);
191 curr_rq_unlock_irq_restore(&flags
);
196 * Remove the counter from a task's (or a CPU's) list of counters.
198 * Must be called with counter->mutex and ctx->mutex held.
200 * CPU counters are removed with a smp call. For task counters we only
201 * call when the task is on a CPU.
203 static void perf_counter_remove_from_context(struct perf_counter
*counter
)
205 struct perf_counter_context
*ctx
= counter
->ctx
;
206 struct task_struct
*task
= ctx
->task
;
210 * Per cpu counters are removed via an smp call and
211 * the removal is always sucessful.
213 smp_call_function_single(counter
->cpu
,
214 __perf_counter_remove_from_context
,
220 task_oncpu_function_call(task
, __perf_counter_remove_from_context
,
223 spin_lock_irq(&ctx
->lock
);
225 * If the context is active we need to retry the smp call.
227 if (ctx
->nr_active
&& !list_empty(&counter
->list_entry
)) {
228 spin_unlock_irq(&ctx
->lock
);
233 * The lock prevents that this context is scheduled in so we
234 * can remove the counter safely, if the call above did not
237 if (!list_empty(&counter
->list_entry
)) {
239 list_del_counter(counter
, ctx
);
240 counter
->task
= NULL
;
242 spin_unlock_irq(&ctx
->lock
);
246 * Cross CPU call to disable a performance counter
248 static void __perf_counter_disable(void *info
)
250 struct perf_counter
*counter
= info
;
251 struct perf_cpu_context
*cpuctx
= &__get_cpu_var(perf_cpu_context
);
252 struct perf_counter_context
*ctx
= counter
->ctx
;
256 * If this is a per-task counter, need to check whether this
257 * counter's task is the current task on this cpu.
259 if (ctx
->task
&& cpuctx
->task_ctx
!= ctx
)
262 curr_rq_lock_irq_save(&flags
);
263 spin_lock(&ctx
->lock
);
266 * If the counter is on, turn it off.
267 * If it is in error state, leave it in error state.
269 if (counter
->state
>= PERF_COUNTER_STATE_INACTIVE
) {
270 if (counter
== counter
->group_leader
)
271 group_sched_out(counter
, cpuctx
, ctx
);
273 counter_sched_out(counter
, cpuctx
, ctx
);
274 counter
->state
= PERF_COUNTER_STATE_OFF
;
277 spin_unlock(&ctx
->lock
);
278 curr_rq_unlock_irq_restore(&flags
);
284 static void perf_counter_disable(struct perf_counter
*counter
)
286 struct perf_counter_context
*ctx
= counter
->ctx
;
287 struct task_struct
*task
= ctx
->task
;
291 * Disable the counter on the cpu that it's on
293 smp_call_function_single(counter
->cpu
, __perf_counter_disable
,
299 task_oncpu_function_call(task
, __perf_counter_disable
, counter
);
301 spin_lock_irq(&ctx
->lock
);
303 * If the counter is still active, we need to retry the cross-call.
305 if (counter
->state
== PERF_COUNTER_STATE_ACTIVE
) {
306 spin_unlock_irq(&ctx
->lock
);
311 * Since we have the lock this context can't be scheduled
312 * in, so we can change the state safely.
314 if (counter
->state
== PERF_COUNTER_STATE_INACTIVE
)
315 counter
->state
= PERF_COUNTER_STATE_OFF
;
317 spin_unlock_irq(&ctx
->lock
);
321 * Disable a counter and all its children.
323 static void perf_counter_disable_family(struct perf_counter
*counter
)
325 struct perf_counter
*child
;
327 perf_counter_disable(counter
);
330 * Lock the mutex to protect the list of children
332 mutex_lock(&counter
->mutex
);
333 list_for_each_entry(child
, &counter
->child_list
, child_list
)
334 perf_counter_disable(child
);
335 mutex_unlock(&counter
->mutex
);
339 counter_sched_in(struct perf_counter
*counter
,
340 struct perf_cpu_context
*cpuctx
,
341 struct perf_counter_context
*ctx
,
344 if (counter
->state
<= PERF_COUNTER_STATE_OFF
)
347 counter
->state
= PERF_COUNTER_STATE_ACTIVE
;
348 counter
->oncpu
= cpu
; /* TODO: put 'cpu' into cpuctx->cpu */
350 * The new state must be visible before we turn it on in the hardware:
354 if (counter
->hw_ops
->enable(counter
)) {
355 counter
->state
= PERF_COUNTER_STATE_INACTIVE
;
360 if (!is_software_counter(counter
))
361 cpuctx
->active_oncpu
++;
364 if (counter
->hw_event
.exclusive
)
365 cpuctx
->exclusive
= 1;
371 * Return 1 for a group consisting entirely of software counters,
372 * 0 if the group contains any hardware counters.
374 static int is_software_only_group(struct perf_counter
*leader
)
376 struct perf_counter
*counter
;
378 if (!is_software_counter(leader
))
380 list_for_each_entry(counter
, &leader
->sibling_list
, list_entry
)
381 if (!is_software_counter(counter
))
387 * Work out whether we can put this counter group on the CPU now.
389 static int group_can_go_on(struct perf_counter
*counter
,
390 struct perf_cpu_context
*cpuctx
,
394 * Groups consisting entirely of software counters can always go on.
396 if (is_software_only_group(counter
))
399 * If an exclusive group is already on, no other hardware
400 * counters can go on.
402 if (cpuctx
->exclusive
)
405 * If this group is exclusive and there are already
406 * counters on the CPU, it can't go on.
408 if (counter
->hw_event
.exclusive
&& cpuctx
->active_oncpu
)
411 * Otherwise, try to add it if all previous groups were able
418 * Cross CPU call to install and enable a performance counter
420 static void __perf_install_in_context(void *info
)
422 struct perf_cpu_context
*cpuctx
= &__get_cpu_var(perf_cpu_context
);
423 struct perf_counter
*counter
= info
;
424 struct perf_counter_context
*ctx
= counter
->ctx
;
425 struct perf_counter
*leader
= counter
->group_leader
;
426 int cpu
= smp_processor_id();
432 * If this is a task context, we need to check whether it is
433 * the current task context of this cpu. If not it has been
434 * scheduled out before the smp call arrived.
436 if (ctx
->task
&& cpuctx
->task_ctx
!= ctx
)
439 curr_rq_lock_irq_save(&flags
);
440 spin_lock(&ctx
->lock
);
443 * Protect the list operation against NMI by disabling the
444 * counters on a global level. NOP for non NMI based counters.
446 perf_flags
= hw_perf_save_disable();
448 list_add_counter(counter
, ctx
);
450 counter
->prev_state
= PERF_COUNTER_STATE_OFF
;
453 * Don't put the counter on if it is disabled or if
454 * it is in a group and the group isn't on.
456 if (counter
->state
!= PERF_COUNTER_STATE_INACTIVE
||
457 (leader
!= counter
&& leader
->state
!= PERF_COUNTER_STATE_ACTIVE
))
461 * An exclusive counter can't go on if there are already active
462 * hardware counters, and no hardware counter can go on if there
463 * is already an exclusive counter on.
465 if (!group_can_go_on(counter
, cpuctx
, 1))
468 err
= counter_sched_in(counter
, cpuctx
, ctx
, cpu
);
472 * This counter couldn't go on. If it is in a group
473 * then we have to pull the whole group off.
474 * If the counter group is pinned then put it in error state.
476 if (leader
!= counter
)
477 group_sched_out(leader
, cpuctx
, ctx
);
478 if (leader
->hw_event
.pinned
)
479 leader
->state
= PERF_COUNTER_STATE_ERROR
;
482 if (!err
&& !ctx
->task
&& cpuctx
->max_pertask
)
483 cpuctx
->max_pertask
--;
486 hw_perf_restore(perf_flags
);
488 spin_unlock(&ctx
->lock
);
489 curr_rq_unlock_irq_restore(&flags
);
493 * Attach a performance counter to a context
495 * First we add the counter to the list with the hardware enable bit
496 * in counter->hw_config cleared.
498 * If the counter is attached to a task which is on a CPU we use a smp
499 * call to enable it in the task context. The task might have been
500 * scheduled away, but we check this in the smp call again.
502 * Must be called with ctx->mutex held.
505 perf_install_in_context(struct perf_counter_context
*ctx
,
506 struct perf_counter
*counter
,
509 struct task_struct
*task
= ctx
->task
;
513 * Per cpu counters are installed via an smp call and
514 * the install is always sucessful.
516 smp_call_function_single(cpu
, __perf_install_in_context
,
521 counter
->task
= task
;
523 task_oncpu_function_call(task
, __perf_install_in_context
,
526 spin_lock_irq(&ctx
->lock
);
528 * we need to retry the smp call.
530 if (ctx
->is_active
&& list_empty(&counter
->list_entry
)) {
531 spin_unlock_irq(&ctx
->lock
);
536 * The lock prevents that this context is scheduled in so we
537 * can add the counter safely, if it the call above did not
540 if (list_empty(&counter
->list_entry
)) {
541 list_add_counter(counter
, ctx
);
544 spin_unlock_irq(&ctx
->lock
);
548 * Cross CPU call to enable a performance counter
550 static void __perf_counter_enable(void *info
)
552 struct perf_counter
*counter
= info
;
553 struct perf_cpu_context
*cpuctx
= &__get_cpu_var(perf_cpu_context
);
554 struct perf_counter_context
*ctx
= counter
->ctx
;
555 struct perf_counter
*leader
= counter
->group_leader
;
560 * If this is a per-task counter, need to check whether this
561 * counter's task is the current task on this cpu.
563 if (ctx
->task
&& cpuctx
->task_ctx
!= ctx
)
566 curr_rq_lock_irq_save(&flags
);
567 spin_lock(&ctx
->lock
);
569 counter
->prev_state
= counter
->state
;
570 if (counter
->state
>= PERF_COUNTER_STATE_INACTIVE
)
572 counter
->state
= PERF_COUNTER_STATE_INACTIVE
;
575 * If the counter is in a group and isn't the group leader,
576 * then don't put it on unless the group is on.
578 if (leader
!= counter
&& leader
->state
!= PERF_COUNTER_STATE_ACTIVE
)
581 if (!group_can_go_on(counter
, cpuctx
, 1))
584 err
= counter_sched_in(counter
, cpuctx
, ctx
,
589 * If this counter can't go on and it's part of a
590 * group, then the whole group has to come off.
592 if (leader
!= counter
)
593 group_sched_out(leader
, cpuctx
, ctx
);
594 if (leader
->hw_event
.pinned
)
595 leader
->state
= PERF_COUNTER_STATE_ERROR
;
599 spin_unlock(&ctx
->lock
);
600 curr_rq_unlock_irq_restore(&flags
);
606 static void perf_counter_enable(struct perf_counter
*counter
)
608 struct perf_counter_context
*ctx
= counter
->ctx
;
609 struct task_struct
*task
= ctx
->task
;
613 * Enable the counter on the cpu that it's on
615 smp_call_function_single(counter
->cpu
, __perf_counter_enable
,
620 spin_lock_irq(&ctx
->lock
);
621 if (counter
->state
>= PERF_COUNTER_STATE_INACTIVE
)
625 * If the counter is in error state, clear that first.
626 * That way, if we see the counter in error state below, we
627 * know that it has gone back into error state, as distinct
628 * from the task having been scheduled away before the
629 * cross-call arrived.
631 if (counter
->state
== PERF_COUNTER_STATE_ERROR
)
632 counter
->state
= PERF_COUNTER_STATE_OFF
;
635 spin_unlock_irq(&ctx
->lock
);
636 task_oncpu_function_call(task
, __perf_counter_enable
, counter
);
638 spin_lock_irq(&ctx
->lock
);
641 * If the context is active and the counter is still off,
642 * we need to retry the cross-call.
644 if (ctx
->is_active
&& counter
->state
== PERF_COUNTER_STATE_OFF
)
648 * Since we have the lock this context can't be scheduled
649 * in, so we can change the state safely.
651 if (counter
->state
== PERF_COUNTER_STATE_OFF
)
652 counter
->state
= PERF_COUNTER_STATE_INACTIVE
;
654 spin_unlock_irq(&ctx
->lock
);
658 * Enable a counter and all its children.
660 static void perf_counter_enable_family(struct perf_counter
*counter
)
662 struct perf_counter
*child
;
664 perf_counter_enable(counter
);
667 * Lock the mutex to protect the list of children
669 mutex_lock(&counter
->mutex
);
670 list_for_each_entry(child
, &counter
->child_list
, child_list
)
671 perf_counter_enable(child
);
672 mutex_unlock(&counter
->mutex
);
675 void __perf_counter_sched_out(struct perf_counter_context
*ctx
,
676 struct perf_cpu_context
*cpuctx
)
678 struct perf_counter
*counter
;
681 spin_lock(&ctx
->lock
);
683 if (likely(!ctx
->nr_counters
))
686 flags
= hw_perf_save_disable();
687 if (ctx
->nr_active
) {
688 list_for_each_entry(counter
, &ctx
->counter_list
, list_entry
)
689 group_sched_out(counter
, cpuctx
, ctx
);
691 hw_perf_restore(flags
);
693 spin_unlock(&ctx
->lock
);
697 * Called from scheduler to remove the counters of the current task,
698 * with interrupts disabled.
700 * We stop each counter and update the counter value in counter->count.
702 * This does not protect us against NMI, but disable()
703 * sets the disabled bit in the control field of counter _before_
704 * accessing the counter control register. If a NMI hits, then it will
705 * not restart the counter.
707 void perf_counter_task_sched_out(struct task_struct
*task
, int cpu
)
709 struct perf_cpu_context
*cpuctx
= &per_cpu(perf_cpu_context
, cpu
);
710 struct perf_counter_context
*ctx
= &task
->perf_counter_ctx
;
712 if (likely(!cpuctx
->task_ctx
))
715 __perf_counter_sched_out(ctx
, cpuctx
);
717 cpuctx
->task_ctx
= NULL
;
720 static void perf_counter_cpu_sched_out(struct perf_cpu_context
*cpuctx
)
722 __perf_counter_sched_out(&cpuctx
->ctx
, cpuctx
);
726 group_sched_in(struct perf_counter
*group_counter
,
727 struct perf_cpu_context
*cpuctx
,
728 struct perf_counter_context
*ctx
,
731 struct perf_counter
*counter
, *partial_group
;
734 if (group_counter
->state
== PERF_COUNTER_STATE_OFF
)
737 ret
= hw_perf_group_sched_in(group_counter
, cpuctx
, ctx
, cpu
);
739 return ret
< 0 ? ret
: 0;
741 group_counter
->prev_state
= group_counter
->state
;
742 if (counter_sched_in(group_counter
, cpuctx
, ctx
, cpu
))
746 * Schedule in siblings as one group (if any):
748 list_for_each_entry(counter
, &group_counter
->sibling_list
, list_entry
) {
749 counter
->prev_state
= counter
->state
;
750 if (counter_sched_in(counter
, cpuctx
, ctx
, cpu
)) {
751 partial_group
= counter
;
760 * Groups can be scheduled in as one unit only, so undo any
761 * partial group before returning:
763 list_for_each_entry(counter
, &group_counter
->sibling_list
, list_entry
) {
764 if (counter
== partial_group
)
766 counter_sched_out(counter
, cpuctx
, ctx
);
768 counter_sched_out(group_counter
, cpuctx
, ctx
);
774 __perf_counter_sched_in(struct perf_counter_context
*ctx
,
775 struct perf_cpu_context
*cpuctx
, int cpu
)
777 struct perf_counter
*counter
;
781 spin_lock(&ctx
->lock
);
783 if (likely(!ctx
->nr_counters
))
786 flags
= hw_perf_save_disable();
789 * First go through the list and put on any pinned groups
790 * in order to give them the best chance of going on.
792 list_for_each_entry(counter
, &ctx
->counter_list
, list_entry
) {
793 if (counter
->state
<= PERF_COUNTER_STATE_OFF
||
794 !counter
->hw_event
.pinned
)
796 if (counter
->cpu
!= -1 && counter
->cpu
!= cpu
)
799 if (group_can_go_on(counter
, cpuctx
, 1))
800 group_sched_in(counter
, cpuctx
, ctx
, cpu
);
803 * If this pinned group hasn't been scheduled,
804 * put it in error state.
806 if (counter
->state
== PERF_COUNTER_STATE_INACTIVE
)
807 counter
->state
= PERF_COUNTER_STATE_ERROR
;
810 list_for_each_entry(counter
, &ctx
->counter_list
, list_entry
) {
812 * Ignore counters in OFF or ERROR state, and
813 * ignore pinned counters since we did them already.
815 if (counter
->state
<= PERF_COUNTER_STATE_OFF
||
816 counter
->hw_event
.pinned
)
820 * Listen to the 'cpu' scheduling filter constraint
823 if (counter
->cpu
!= -1 && counter
->cpu
!= cpu
)
826 if (group_can_go_on(counter
, cpuctx
, can_add_hw
)) {
827 if (group_sched_in(counter
, cpuctx
, ctx
, cpu
))
831 hw_perf_restore(flags
);
833 spin_unlock(&ctx
->lock
);
837 * Called from scheduler to add the counters of the current task
838 * with interrupts disabled.
840 * We restore the counter value and then enable it.
842 * This does not protect us against NMI, but enable()
843 * sets the enabled bit in the control field of counter _before_
844 * accessing the counter control register. If a NMI hits, then it will
845 * keep the counter running.
847 void perf_counter_task_sched_in(struct task_struct
*task
, int cpu
)
849 struct perf_cpu_context
*cpuctx
= &per_cpu(perf_cpu_context
, cpu
);
850 struct perf_counter_context
*ctx
= &task
->perf_counter_ctx
;
852 __perf_counter_sched_in(ctx
, cpuctx
, cpu
);
853 cpuctx
->task_ctx
= ctx
;
856 static void perf_counter_cpu_sched_in(struct perf_cpu_context
*cpuctx
, int cpu
)
858 struct perf_counter_context
*ctx
= &cpuctx
->ctx
;
860 __perf_counter_sched_in(ctx
, cpuctx
, cpu
);
863 int perf_counter_task_disable(void)
865 struct task_struct
*curr
= current
;
866 struct perf_counter_context
*ctx
= &curr
->perf_counter_ctx
;
867 struct perf_counter
*counter
;
872 if (likely(!ctx
->nr_counters
))
875 curr_rq_lock_irq_save(&flags
);
876 cpu
= smp_processor_id();
878 /* force the update of the task clock: */
879 __task_delta_exec(curr
, 1);
881 perf_counter_task_sched_out(curr
, cpu
);
883 spin_lock(&ctx
->lock
);
886 * Disable all the counters:
888 perf_flags
= hw_perf_save_disable();
890 list_for_each_entry(counter
, &ctx
->counter_list
, list_entry
) {
891 if (counter
->state
!= PERF_COUNTER_STATE_ERROR
)
892 counter
->state
= PERF_COUNTER_STATE_OFF
;
895 hw_perf_restore(perf_flags
);
897 spin_unlock(&ctx
->lock
);
899 curr_rq_unlock_irq_restore(&flags
);
904 int perf_counter_task_enable(void)
906 struct task_struct
*curr
= current
;
907 struct perf_counter_context
*ctx
= &curr
->perf_counter_ctx
;
908 struct perf_counter
*counter
;
913 if (likely(!ctx
->nr_counters
))
916 curr_rq_lock_irq_save(&flags
);
917 cpu
= smp_processor_id();
919 /* force the update of the task clock: */
920 __task_delta_exec(curr
, 1);
922 perf_counter_task_sched_out(curr
, cpu
);
924 spin_lock(&ctx
->lock
);
927 * Disable all the counters:
929 perf_flags
= hw_perf_save_disable();
931 list_for_each_entry(counter
, &ctx
->counter_list
, list_entry
) {
932 if (counter
->state
> PERF_COUNTER_STATE_OFF
)
934 counter
->state
= PERF_COUNTER_STATE_INACTIVE
;
935 counter
->hw_event
.disabled
= 0;
937 hw_perf_restore(perf_flags
);
939 spin_unlock(&ctx
->lock
);
941 perf_counter_task_sched_in(curr
, cpu
);
943 curr_rq_unlock_irq_restore(&flags
);
949 * Round-robin a context's counters:
951 static void rotate_ctx(struct perf_counter_context
*ctx
)
953 struct perf_counter
*counter
;
956 if (!ctx
->nr_counters
)
959 spin_lock(&ctx
->lock
);
961 * Rotate the first entry last (works just fine for group counters too):
963 perf_flags
= hw_perf_save_disable();
964 list_for_each_entry(counter
, &ctx
->counter_list
, list_entry
) {
965 list_move_tail(&counter
->list_entry
, &ctx
->counter_list
);
968 hw_perf_restore(perf_flags
);
970 spin_unlock(&ctx
->lock
);
973 void perf_counter_task_tick(struct task_struct
*curr
, int cpu
)
975 struct perf_cpu_context
*cpuctx
= &per_cpu(perf_cpu_context
, cpu
);
976 struct perf_counter_context
*ctx
= &curr
->perf_counter_ctx
;
977 const int rotate_percpu
= 0;
980 perf_counter_cpu_sched_out(cpuctx
);
981 perf_counter_task_sched_out(curr
, cpu
);
984 rotate_ctx(&cpuctx
->ctx
);
988 perf_counter_cpu_sched_in(cpuctx
, cpu
);
989 perf_counter_task_sched_in(curr
, cpu
);
993 * Cross CPU call to read the hardware counter
995 static void __read(void *info
)
997 struct perf_counter
*counter
= info
;
1000 curr_rq_lock_irq_save(&flags
);
1001 counter
->hw_ops
->read(counter
);
1002 curr_rq_unlock_irq_restore(&flags
);
1005 static u64
perf_counter_read(struct perf_counter
*counter
)
1008 * If counter is enabled and currently active on a CPU, update the
1009 * value in the counter structure:
1011 if (counter
->state
== PERF_COUNTER_STATE_ACTIVE
) {
1012 smp_call_function_single(counter
->oncpu
,
1013 __read
, counter
, 1);
1016 return atomic64_read(&counter
->count
);
1020 * Cross CPU call to switch performance data pointers
1022 static void __perf_switch_irq_data(void *info
)
1024 struct perf_cpu_context
*cpuctx
= &__get_cpu_var(perf_cpu_context
);
1025 struct perf_counter
*counter
= info
;
1026 struct perf_counter_context
*ctx
= counter
->ctx
;
1027 struct perf_data
*oldirqdata
= counter
->irqdata
;
1030 * If this is a task context, we need to check whether it is
1031 * the current task context of this cpu. If not it has been
1032 * scheduled out before the smp call arrived.
1035 if (cpuctx
->task_ctx
!= ctx
)
1037 spin_lock(&ctx
->lock
);
1040 /* Change the pointer NMI safe */
1041 atomic_long_set((atomic_long_t
*)&counter
->irqdata
,
1042 (unsigned long) counter
->usrdata
);
1043 counter
->usrdata
= oldirqdata
;
1046 spin_unlock(&ctx
->lock
);
1049 static struct perf_data
*perf_switch_irq_data(struct perf_counter
*counter
)
1051 struct perf_counter_context
*ctx
= counter
->ctx
;
1052 struct perf_data
*oldirqdata
= counter
->irqdata
;
1053 struct task_struct
*task
= ctx
->task
;
1056 smp_call_function_single(counter
->cpu
,
1057 __perf_switch_irq_data
,
1059 return counter
->usrdata
;
1063 spin_lock_irq(&ctx
->lock
);
1064 if (counter
->state
!= PERF_COUNTER_STATE_ACTIVE
) {
1065 counter
->irqdata
= counter
->usrdata
;
1066 counter
->usrdata
= oldirqdata
;
1067 spin_unlock_irq(&ctx
->lock
);
1070 spin_unlock_irq(&ctx
->lock
);
1071 task_oncpu_function_call(task
, __perf_switch_irq_data
, counter
);
1072 /* Might have failed, because task was scheduled out */
1073 if (counter
->irqdata
== oldirqdata
)
1076 return counter
->usrdata
;
1079 static void put_context(struct perf_counter_context
*ctx
)
1082 put_task_struct(ctx
->task
);
1085 static struct perf_counter_context
*find_get_context(pid_t pid
, int cpu
)
1087 struct perf_cpu_context
*cpuctx
;
1088 struct perf_counter_context
*ctx
;
1089 struct task_struct
*task
;
1092 * If cpu is not a wildcard then this is a percpu counter:
1095 /* Must be root to operate on a CPU counter: */
1096 if (!capable(CAP_SYS_ADMIN
))
1097 return ERR_PTR(-EACCES
);
1099 if (cpu
< 0 || cpu
> num_possible_cpus())
1100 return ERR_PTR(-EINVAL
);
1103 * We could be clever and allow to attach a counter to an
1104 * offline CPU and activate it when the CPU comes up, but
1107 if (!cpu_isset(cpu
, cpu_online_map
))
1108 return ERR_PTR(-ENODEV
);
1110 cpuctx
= &per_cpu(perf_cpu_context
, cpu
);
1120 task
= find_task_by_vpid(pid
);
1122 get_task_struct(task
);
1126 return ERR_PTR(-ESRCH
);
1128 ctx
= &task
->perf_counter_ctx
;
1131 /* Reuse ptrace permission checks for now. */
1132 if (!ptrace_may_access(task
, PTRACE_MODE_READ
)) {
1134 return ERR_PTR(-EACCES
);
1140 static void free_counter_rcu(struct rcu_head
*head
)
1142 struct perf_counter
*counter
;
1144 counter
= container_of(head
, struct perf_counter
, rcu_head
);
1149 * Called when the last reference to the file is gone.
1151 static int perf_release(struct inode
*inode
, struct file
*file
)
1153 struct perf_counter
*counter
= file
->private_data
;
1154 struct perf_counter_context
*ctx
= counter
->ctx
;
1156 file
->private_data
= NULL
;
1158 mutex_lock(&ctx
->mutex
);
1159 mutex_lock(&counter
->mutex
);
1161 perf_counter_remove_from_context(counter
);
1163 mutex_unlock(&counter
->mutex
);
1164 mutex_unlock(&ctx
->mutex
);
1166 call_rcu(&counter
->rcu_head
, free_counter_rcu
);
1173 * Read the performance counter - simple non blocking version for now
1176 perf_read_hw(struct perf_counter
*counter
, char __user
*buf
, size_t count
)
1180 if (count
!= sizeof(cntval
))
1184 * Return end-of-file for a read on a counter that is in
1185 * error state (i.e. because it was pinned but it couldn't be
1186 * scheduled on to the CPU at some point).
1188 if (counter
->state
== PERF_COUNTER_STATE_ERROR
)
1191 mutex_lock(&counter
->mutex
);
1192 cntval
= perf_counter_read(counter
);
1193 mutex_unlock(&counter
->mutex
);
1195 return put_user(cntval
, (u64 __user
*) buf
) ? -EFAULT
: sizeof(cntval
);
1199 perf_copy_usrdata(struct perf_data
*usrdata
, char __user
*buf
, size_t count
)
1204 count
= min(count
, (size_t)usrdata
->len
);
1205 if (copy_to_user(buf
, usrdata
->data
+ usrdata
->rd_idx
, count
))
1208 /* Adjust the counters */
1209 usrdata
->len
-= count
;
1211 usrdata
->rd_idx
= 0;
1213 usrdata
->rd_idx
+= count
;
1219 perf_read_irq_data(struct perf_counter
*counter
,
1224 struct perf_data
*irqdata
, *usrdata
;
1225 DECLARE_WAITQUEUE(wait
, current
);
1228 irqdata
= counter
->irqdata
;
1229 usrdata
= counter
->usrdata
;
1231 if (usrdata
->len
+ irqdata
->len
>= count
)
1237 spin_lock_irq(&counter
->waitq
.lock
);
1238 __add_wait_queue(&counter
->waitq
, &wait
);
1240 set_current_state(TASK_INTERRUPTIBLE
);
1241 if (usrdata
->len
+ irqdata
->len
>= count
)
1244 if (signal_pending(current
))
1247 if (counter
->state
== PERF_COUNTER_STATE_ERROR
)
1250 spin_unlock_irq(&counter
->waitq
.lock
);
1252 spin_lock_irq(&counter
->waitq
.lock
);
1254 __remove_wait_queue(&counter
->waitq
, &wait
);
1255 __set_current_state(TASK_RUNNING
);
1256 spin_unlock_irq(&counter
->waitq
.lock
);
1258 if (usrdata
->len
+ irqdata
->len
< count
&&
1259 counter
->state
!= PERF_COUNTER_STATE_ERROR
)
1260 return -ERESTARTSYS
;
1262 mutex_lock(&counter
->mutex
);
1264 /* Drain pending data first: */
1265 res
= perf_copy_usrdata(usrdata
, buf
, count
);
1266 if (res
< 0 || res
== count
)
1269 /* Switch irq buffer: */
1270 usrdata
= perf_switch_irq_data(counter
);
1271 res2
= perf_copy_usrdata(usrdata
, buf
+ res
, count
- res
);
1279 mutex_unlock(&counter
->mutex
);
1285 perf_read(struct file
*file
, char __user
*buf
, size_t count
, loff_t
*ppos
)
1287 struct perf_counter
*counter
= file
->private_data
;
1289 switch (counter
->hw_event
.record_type
) {
1290 case PERF_RECORD_SIMPLE
:
1291 return perf_read_hw(counter
, buf
, count
);
1293 case PERF_RECORD_IRQ
:
1294 case PERF_RECORD_GROUP
:
1295 return perf_read_irq_data(counter
, buf
, count
,
1296 file
->f_flags
& O_NONBLOCK
);
1301 static unsigned int perf_poll(struct file
*file
, poll_table
*wait
)
1303 struct perf_counter
*counter
= file
->private_data
;
1304 unsigned int events
= 0;
1305 unsigned long flags
;
1307 poll_wait(file
, &counter
->waitq
, wait
);
1309 spin_lock_irqsave(&counter
->waitq
.lock
, flags
);
1310 if (counter
->usrdata
->len
|| counter
->irqdata
->len
)
1312 spin_unlock_irqrestore(&counter
->waitq
.lock
, flags
);
1317 static long perf_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
1319 struct perf_counter
*counter
= file
->private_data
;
1323 case PERF_COUNTER_IOC_ENABLE
:
1324 perf_counter_enable_family(counter
);
1326 case PERF_COUNTER_IOC_DISABLE
:
1327 perf_counter_disable_family(counter
);
1335 static const struct file_operations perf_fops
= {
1336 .release
= perf_release
,
1339 .unlocked_ioctl
= perf_ioctl
,
1340 .compat_ioctl
= perf_ioctl
,
1344 * Generic software counter infrastructure
1347 static void perf_swcounter_update(struct perf_counter
*counter
)
1349 struct hw_perf_counter
*hwc
= &counter
->hw
;
1354 prev
= atomic64_read(&hwc
->prev_count
);
1355 now
= atomic64_read(&hwc
->count
);
1356 if (atomic64_cmpxchg(&hwc
->prev_count
, prev
, now
) != prev
)
1361 atomic64_add(delta
, &counter
->count
);
1362 atomic64_sub(delta
, &hwc
->period_left
);
1365 static void perf_swcounter_set_period(struct perf_counter
*counter
)
1367 struct hw_perf_counter
*hwc
= &counter
->hw
;
1368 s64 left
= atomic64_read(&hwc
->period_left
);
1369 s64 period
= hwc
->irq_period
;
1371 if (unlikely(left
<= -period
)) {
1373 atomic64_set(&hwc
->period_left
, left
);
1376 if (unlikely(left
<= 0)) {
1378 atomic64_add(period
, &hwc
->period_left
);
1381 atomic64_set(&hwc
->prev_count
, -left
);
1382 atomic64_set(&hwc
->count
, -left
);
1385 static void perf_swcounter_save_and_restart(struct perf_counter
*counter
)
1387 perf_swcounter_update(counter
);
1388 perf_swcounter_set_period(counter
);
1391 static void perf_swcounter_store_irq(struct perf_counter
*counter
, u64 data
)
1393 struct perf_data
*irqdata
= counter
->irqdata
;
1395 if (irqdata
->len
> PERF_DATA_BUFLEN
- sizeof(u64
)) {
1398 u64
*p
= (u64
*) &irqdata
->data
[irqdata
->len
];
1401 irqdata
->len
+= sizeof(u64
);
1405 static void perf_swcounter_handle_group(struct perf_counter
*sibling
)
1407 struct perf_counter
*counter
, *group_leader
= sibling
->group_leader
;
1409 list_for_each_entry(counter
, &group_leader
->sibling_list
, list_entry
) {
1410 counter
->hw_ops
->read(counter
);
1411 perf_swcounter_store_irq(sibling
, counter
->hw_event
.type
);
1412 perf_swcounter_store_irq(sibling
, atomic64_read(&counter
->count
));
1416 static void perf_swcounter_interrupt(struct perf_counter
*counter
,
1417 int nmi
, struct pt_regs
*regs
)
1419 switch (counter
->hw_event
.record_type
) {
1420 case PERF_RECORD_SIMPLE
:
1423 case PERF_RECORD_IRQ
:
1424 perf_swcounter_store_irq(counter
, instruction_pointer(regs
));
1427 case PERF_RECORD_GROUP
:
1428 perf_swcounter_handle_group(counter
);
1433 counter
->wakeup_pending
= 1;
1434 set_tsk_thread_flag(current
, TIF_PERF_COUNTERS
);
1436 wake_up(&counter
->waitq
);
1439 static enum hrtimer_restart
perf_swcounter_hrtimer(struct hrtimer
*hrtimer
)
1441 struct perf_counter
*counter
;
1442 struct pt_regs
*regs
;
1444 counter
= container_of(hrtimer
, struct perf_counter
, hw
.hrtimer
);
1445 counter
->hw_ops
->read(counter
);
1447 regs
= get_irq_regs();
1449 * In case we exclude kernel IPs or are somehow not in interrupt
1450 * context, provide the next best thing, the user IP.
1452 if ((counter
->hw_event
.exclude_kernel
|| !regs
) &&
1453 !counter
->hw_event
.exclude_user
)
1454 regs
= task_pt_regs(current
);
1457 perf_swcounter_interrupt(counter
, 0, regs
);
1459 hrtimer_forward_now(hrtimer
, ns_to_ktime(counter
->hw
.irq_period
));
1461 return HRTIMER_RESTART
;
1464 static void perf_swcounter_overflow(struct perf_counter
*counter
,
1465 int nmi
, struct pt_regs
*regs
)
1467 perf_swcounter_save_and_restart(counter
);
1468 perf_swcounter_interrupt(counter
, nmi
, regs
);
1471 static int perf_swcounter_match(struct perf_counter
*counter
,
1472 enum hw_event_types event
,
1473 struct pt_regs
*regs
)
1475 if (counter
->state
!= PERF_COUNTER_STATE_ACTIVE
)
1478 if (counter
->hw_event
.raw
)
1481 if (counter
->hw_event
.type
!= event
)
1484 if (counter
->hw_event
.exclude_user
&& user_mode(regs
))
1487 if (counter
->hw_event
.exclude_kernel
&& !user_mode(regs
))
1493 static void perf_swcounter_add(struct perf_counter
*counter
, u64 nr
,
1494 int nmi
, struct pt_regs
*regs
)
1496 int neg
= atomic64_add_negative(nr
, &counter
->hw
.count
);
1497 if (counter
->hw
.irq_period
&& !neg
)
1498 perf_swcounter_overflow(counter
, nmi
, regs
);
1501 static void perf_swcounter_ctx_event(struct perf_counter_context
*ctx
,
1502 enum hw_event_types event
, u64 nr
,
1503 int nmi
, struct pt_regs
*regs
)
1505 struct perf_counter
*counter
;
1507 if (list_empty(&ctx
->event_list
))
1511 list_for_each_entry_rcu(counter
, &ctx
->event_list
, event_entry
) {
1512 if (perf_swcounter_match(counter
, event
, regs
))
1513 perf_swcounter_add(counter
, nr
, nmi
, regs
);
1518 void perf_swcounter_event(enum hw_event_types event
, u64 nr
,
1519 int nmi
, struct pt_regs
*regs
)
1521 struct perf_cpu_context
*cpuctx
= &get_cpu_var(perf_cpu_context
);
1523 perf_swcounter_ctx_event(&cpuctx
->ctx
, event
, nr
, nmi
, regs
);
1524 if (cpuctx
->task_ctx
)
1525 perf_swcounter_ctx_event(cpuctx
->task_ctx
, event
, nr
, nmi
, regs
);
1527 put_cpu_var(perf_cpu_context
);
1530 static void perf_swcounter_read(struct perf_counter
*counter
)
1532 perf_swcounter_update(counter
);
1535 static int perf_swcounter_enable(struct perf_counter
*counter
)
1537 perf_swcounter_set_period(counter
);
1541 static void perf_swcounter_disable(struct perf_counter
*counter
)
1543 perf_swcounter_update(counter
);
1546 static const struct hw_perf_counter_ops perf_ops_generic
= {
1547 .enable
= perf_swcounter_enable
,
1548 .disable
= perf_swcounter_disable
,
1549 .read
= perf_swcounter_read
,
1553 * Software counter: cpu wall time clock
1556 static void cpu_clock_perf_counter_update(struct perf_counter
*counter
)
1558 int cpu
= raw_smp_processor_id();
1562 now
= cpu_clock(cpu
);
1563 prev
= atomic64_read(&counter
->hw
.prev_count
);
1564 atomic64_set(&counter
->hw
.prev_count
, now
);
1565 atomic64_add(now
- prev
, &counter
->count
);
1568 static int cpu_clock_perf_counter_enable(struct perf_counter
*counter
)
1570 struct hw_perf_counter
*hwc
= &counter
->hw
;
1571 int cpu
= raw_smp_processor_id();
1573 atomic64_set(&hwc
->prev_count
, cpu_clock(cpu
));
1574 if (hwc
->irq_period
) {
1575 hrtimer_init(&hwc
->hrtimer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
1576 hwc
->hrtimer
.function
= perf_swcounter_hrtimer
;
1577 __hrtimer_start_range_ns(&hwc
->hrtimer
,
1578 ns_to_ktime(hwc
->irq_period
), 0,
1579 HRTIMER_MODE_REL
, 0);
1585 static void cpu_clock_perf_counter_disable(struct perf_counter
*counter
)
1587 hrtimer_cancel(&counter
->hw
.hrtimer
);
1588 cpu_clock_perf_counter_update(counter
);
1591 static void cpu_clock_perf_counter_read(struct perf_counter
*counter
)
1593 cpu_clock_perf_counter_update(counter
);
1596 static const struct hw_perf_counter_ops perf_ops_cpu_clock
= {
1597 .enable
= cpu_clock_perf_counter_enable
,
1598 .disable
= cpu_clock_perf_counter_disable
,
1599 .read
= cpu_clock_perf_counter_read
,
1603 * Software counter: task time clock
1607 * Called from within the scheduler:
1609 static u64
task_clock_perf_counter_val(struct perf_counter
*counter
, int update
)
1611 struct task_struct
*curr
= counter
->task
;
1614 delta
= __task_delta_exec(curr
, update
);
1616 return curr
->se
.sum_exec_runtime
+ delta
;
1619 static void task_clock_perf_counter_update(struct perf_counter
*counter
, u64 now
)
1624 prev
= atomic64_read(&counter
->hw
.prev_count
);
1626 atomic64_set(&counter
->hw
.prev_count
, now
);
1630 atomic64_add(delta
, &counter
->count
);
1633 static int task_clock_perf_counter_enable(struct perf_counter
*counter
)
1635 struct hw_perf_counter
*hwc
= &counter
->hw
;
1637 atomic64_set(&hwc
->prev_count
, task_clock_perf_counter_val(counter
, 0));
1638 if (hwc
->irq_period
) {
1639 hrtimer_init(&hwc
->hrtimer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
1640 hwc
->hrtimer
.function
= perf_swcounter_hrtimer
;
1641 __hrtimer_start_range_ns(&hwc
->hrtimer
,
1642 ns_to_ktime(hwc
->irq_period
), 0,
1643 HRTIMER_MODE_REL
, 0);
1649 static void task_clock_perf_counter_disable(struct perf_counter
*counter
)
1651 hrtimer_cancel(&counter
->hw
.hrtimer
);
1652 task_clock_perf_counter_update(counter
,
1653 task_clock_perf_counter_val(counter
, 0));
1656 static void task_clock_perf_counter_read(struct perf_counter
*counter
)
1658 task_clock_perf_counter_update(counter
,
1659 task_clock_perf_counter_val(counter
, 1));
1662 static const struct hw_perf_counter_ops perf_ops_task_clock
= {
1663 .enable
= task_clock_perf_counter_enable
,
1664 .disable
= task_clock_perf_counter_disable
,
1665 .read
= task_clock_perf_counter_read
,
1669 * Software counter: context switches
1672 static u64
get_context_switches(struct perf_counter
*counter
)
1674 struct task_struct
*curr
= counter
->ctx
->task
;
1677 return curr
->nvcsw
+ curr
->nivcsw
;
1678 return cpu_nr_switches(smp_processor_id());
1681 static void context_switches_perf_counter_update(struct perf_counter
*counter
)
1686 prev
= atomic64_read(&counter
->hw
.prev_count
);
1687 now
= get_context_switches(counter
);
1689 atomic64_set(&counter
->hw
.prev_count
, now
);
1693 atomic64_add(delta
, &counter
->count
);
1696 static void context_switches_perf_counter_read(struct perf_counter
*counter
)
1698 context_switches_perf_counter_update(counter
);
1701 static int context_switches_perf_counter_enable(struct perf_counter
*counter
)
1703 if (counter
->prev_state
<= PERF_COUNTER_STATE_OFF
)
1704 atomic64_set(&counter
->hw
.prev_count
,
1705 get_context_switches(counter
));
1709 static void context_switches_perf_counter_disable(struct perf_counter
*counter
)
1711 context_switches_perf_counter_update(counter
);
1714 static const struct hw_perf_counter_ops perf_ops_context_switches
= {
1715 .enable
= context_switches_perf_counter_enable
,
1716 .disable
= context_switches_perf_counter_disable
,
1717 .read
= context_switches_perf_counter_read
,
1721 * Software counter: cpu migrations
1724 static inline u64
get_cpu_migrations(struct perf_counter
*counter
)
1726 struct task_struct
*curr
= counter
->ctx
->task
;
1729 return curr
->se
.nr_migrations
;
1730 return cpu_nr_migrations(smp_processor_id());
1733 static void cpu_migrations_perf_counter_update(struct perf_counter
*counter
)
1738 prev
= atomic64_read(&counter
->hw
.prev_count
);
1739 now
= get_cpu_migrations(counter
);
1741 atomic64_set(&counter
->hw
.prev_count
, now
);
1745 atomic64_add(delta
, &counter
->count
);
1748 static void cpu_migrations_perf_counter_read(struct perf_counter
*counter
)
1750 cpu_migrations_perf_counter_update(counter
);
1753 static int cpu_migrations_perf_counter_enable(struct perf_counter
*counter
)
1755 if (counter
->prev_state
<= PERF_COUNTER_STATE_OFF
)
1756 atomic64_set(&counter
->hw
.prev_count
,
1757 get_cpu_migrations(counter
));
1761 static void cpu_migrations_perf_counter_disable(struct perf_counter
*counter
)
1763 cpu_migrations_perf_counter_update(counter
);
1766 static const struct hw_perf_counter_ops perf_ops_cpu_migrations
= {
1767 .enable
= cpu_migrations_perf_counter_enable
,
1768 .disable
= cpu_migrations_perf_counter_disable
,
1769 .read
= cpu_migrations_perf_counter_read
,
1772 static const struct hw_perf_counter_ops
*
1773 sw_perf_counter_init(struct perf_counter
*counter
)
1775 struct perf_counter_hw_event
*hw_event
= &counter
->hw_event
;
1776 const struct hw_perf_counter_ops
*hw_ops
= NULL
;
1777 struct hw_perf_counter
*hwc
= &counter
->hw
;
1780 * Software counters (currently) can't in general distinguish
1781 * between user, kernel and hypervisor events.
1782 * However, context switches and cpu migrations are considered
1783 * to be kernel events, and page faults are never hypervisor
1786 switch (counter
->hw_event
.type
) {
1787 case PERF_COUNT_CPU_CLOCK
:
1788 hw_ops
= &perf_ops_cpu_clock
;
1790 if (hw_event
->irq_period
&& hw_event
->irq_period
< 10000)
1791 hw_event
->irq_period
= 10000;
1793 case PERF_COUNT_TASK_CLOCK
:
1795 * If the user instantiates this as a per-cpu counter,
1796 * use the cpu_clock counter instead.
1798 if (counter
->ctx
->task
)
1799 hw_ops
= &perf_ops_task_clock
;
1801 hw_ops
= &perf_ops_cpu_clock
;
1803 if (hw_event
->irq_period
&& hw_event
->irq_period
< 10000)
1804 hw_event
->irq_period
= 10000;
1806 case PERF_COUNT_PAGE_FAULTS
:
1807 case PERF_COUNT_PAGE_FAULTS_MIN
:
1808 case PERF_COUNT_PAGE_FAULTS_MAJ
:
1809 hw_ops
= &perf_ops_generic
;
1811 case PERF_COUNT_CONTEXT_SWITCHES
:
1812 if (!counter
->hw_event
.exclude_kernel
)
1813 hw_ops
= &perf_ops_context_switches
;
1815 case PERF_COUNT_CPU_MIGRATIONS
:
1816 if (!counter
->hw_event
.exclude_kernel
)
1817 hw_ops
= &perf_ops_cpu_migrations
;
1824 hwc
->irq_period
= hw_event
->irq_period
;
1830 * Allocate and initialize a counter structure
1832 static struct perf_counter
*
1833 perf_counter_alloc(struct perf_counter_hw_event
*hw_event
,
1835 struct perf_counter_context
*ctx
,
1836 struct perf_counter
*group_leader
,
1839 const struct hw_perf_counter_ops
*hw_ops
;
1840 struct perf_counter
*counter
;
1842 counter
= kzalloc(sizeof(*counter
), gfpflags
);
1847 * Single counters are their own group leaders, with an
1848 * empty sibling list:
1851 group_leader
= counter
;
1853 mutex_init(&counter
->mutex
);
1854 INIT_LIST_HEAD(&counter
->list_entry
);
1855 INIT_LIST_HEAD(&counter
->event_entry
);
1856 INIT_LIST_HEAD(&counter
->sibling_list
);
1857 init_waitqueue_head(&counter
->waitq
);
1859 INIT_LIST_HEAD(&counter
->child_list
);
1861 counter
->irqdata
= &counter
->data
[0];
1862 counter
->usrdata
= &counter
->data
[1];
1864 counter
->hw_event
= *hw_event
;
1865 counter
->wakeup_pending
= 0;
1866 counter
->group_leader
= group_leader
;
1867 counter
->hw_ops
= NULL
;
1870 counter
->state
= PERF_COUNTER_STATE_INACTIVE
;
1871 if (hw_event
->disabled
)
1872 counter
->state
= PERF_COUNTER_STATE_OFF
;
1875 if (!hw_event
->raw
&& hw_event
->type
< 0)
1876 hw_ops
= sw_perf_counter_init(counter
);
1878 hw_ops
= hw_perf_counter_init(counter
);
1884 counter
->hw_ops
= hw_ops
;
1890 * sys_perf_counter_open - open a performance counter, associate it to a task/cpu
1892 * @hw_event_uptr: event type attributes for monitoring/sampling
1895 * @group_fd: group leader counter fd
1897 SYSCALL_DEFINE5(perf_counter_open
,
1898 const struct perf_counter_hw_event __user
*, hw_event_uptr
,
1899 pid_t
, pid
, int, cpu
, int, group_fd
, unsigned long, flags
)
1901 struct perf_counter
*counter
, *group_leader
;
1902 struct perf_counter_hw_event hw_event
;
1903 struct perf_counter_context
*ctx
;
1904 struct file
*counter_file
= NULL
;
1905 struct file
*group_file
= NULL
;
1906 int fput_needed
= 0;
1907 int fput_needed2
= 0;
1910 /* for future expandability... */
1914 if (copy_from_user(&hw_event
, hw_event_uptr
, sizeof(hw_event
)) != 0)
1918 * Get the target context (task or percpu):
1920 ctx
= find_get_context(pid
, cpu
);
1922 return PTR_ERR(ctx
);
1925 * Look up the group leader (we will attach this counter to it):
1927 group_leader
= NULL
;
1928 if (group_fd
!= -1) {
1930 group_file
= fget_light(group_fd
, &fput_needed
);
1932 goto err_put_context
;
1933 if (group_file
->f_op
!= &perf_fops
)
1934 goto err_put_context
;
1936 group_leader
= group_file
->private_data
;
1938 * Do not allow a recursive hierarchy (this new sibling
1939 * becoming part of another group-sibling):
1941 if (group_leader
->group_leader
!= group_leader
)
1942 goto err_put_context
;
1944 * Do not allow to attach to a group in a different
1945 * task or CPU context:
1947 if (group_leader
->ctx
!= ctx
)
1948 goto err_put_context
;
1950 * Only a group leader can be exclusive or pinned
1952 if (hw_event
.exclusive
|| hw_event
.pinned
)
1953 goto err_put_context
;
1957 counter
= perf_counter_alloc(&hw_event
, cpu
, ctx
, group_leader
,
1960 goto err_put_context
;
1962 ret
= anon_inode_getfd("[perf_counter]", &perf_fops
, counter
, 0);
1964 goto err_free_put_context
;
1966 counter_file
= fget_light(ret
, &fput_needed2
);
1968 goto err_free_put_context
;
1970 counter
->filp
= counter_file
;
1971 mutex_lock(&ctx
->mutex
);
1972 perf_install_in_context(ctx
, counter
, cpu
);
1973 mutex_unlock(&ctx
->mutex
);
1975 fput_light(counter_file
, fput_needed2
);
1978 fput_light(group_file
, fput_needed
);
1982 err_free_put_context
:
1992 * Initialize the perf_counter context in a task_struct:
1995 __perf_counter_init_context(struct perf_counter_context
*ctx
,
1996 struct task_struct
*task
)
1998 memset(ctx
, 0, sizeof(*ctx
));
1999 spin_lock_init(&ctx
->lock
);
2000 mutex_init(&ctx
->mutex
);
2001 INIT_LIST_HEAD(&ctx
->counter_list
);
2002 INIT_LIST_HEAD(&ctx
->event_list
);
2007 * inherit a counter from parent task to child task:
2009 static struct perf_counter
*
2010 inherit_counter(struct perf_counter
*parent_counter
,
2011 struct task_struct
*parent
,
2012 struct perf_counter_context
*parent_ctx
,
2013 struct task_struct
*child
,
2014 struct perf_counter
*group_leader
,
2015 struct perf_counter_context
*child_ctx
)
2017 struct perf_counter
*child_counter
;
2020 * Instead of creating recursive hierarchies of counters,
2021 * we link inherited counters back to the original parent,
2022 * which has a filp for sure, which we use as the reference
2025 if (parent_counter
->parent
)
2026 parent_counter
= parent_counter
->parent
;
2028 child_counter
= perf_counter_alloc(&parent_counter
->hw_event
,
2029 parent_counter
->cpu
, child_ctx
,
2030 group_leader
, GFP_KERNEL
);
2035 * Link it up in the child's context:
2037 child_counter
->task
= child
;
2038 list_add_counter(child_counter
, child_ctx
);
2039 child_ctx
->nr_counters
++;
2041 child_counter
->parent
= parent_counter
;
2043 * inherit into child's child as well:
2045 child_counter
->hw_event
.inherit
= 1;
2048 * Get a reference to the parent filp - we will fput it
2049 * when the child counter exits. This is safe to do because
2050 * we are in the parent and we know that the filp still
2051 * exists and has a nonzero count:
2053 atomic_long_inc(&parent_counter
->filp
->f_count
);
2056 * Link this into the parent counter's child list
2058 mutex_lock(&parent_counter
->mutex
);
2059 list_add_tail(&child_counter
->child_list
, &parent_counter
->child_list
);
2062 * Make the child state follow the state of the parent counter,
2063 * not its hw_event.disabled bit. We hold the parent's mutex,
2064 * so we won't race with perf_counter_{en,dis}able_family.
2066 if (parent_counter
->state
>= PERF_COUNTER_STATE_INACTIVE
)
2067 child_counter
->state
= PERF_COUNTER_STATE_INACTIVE
;
2069 child_counter
->state
= PERF_COUNTER_STATE_OFF
;
2071 mutex_unlock(&parent_counter
->mutex
);
2073 return child_counter
;
2076 static int inherit_group(struct perf_counter
*parent_counter
,
2077 struct task_struct
*parent
,
2078 struct perf_counter_context
*parent_ctx
,
2079 struct task_struct
*child
,
2080 struct perf_counter_context
*child_ctx
)
2082 struct perf_counter
*leader
;
2083 struct perf_counter
*sub
;
2085 leader
= inherit_counter(parent_counter
, parent
, parent_ctx
,
2086 child
, NULL
, child_ctx
);
2089 list_for_each_entry(sub
, &parent_counter
->sibling_list
, list_entry
) {
2090 if (!inherit_counter(sub
, parent
, parent_ctx
,
2091 child
, leader
, child_ctx
))
2097 static void sync_child_counter(struct perf_counter
*child_counter
,
2098 struct perf_counter
*parent_counter
)
2100 u64 parent_val
, child_val
;
2102 parent_val
= atomic64_read(&parent_counter
->count
);
2103 child_val
= atomic64_read(&child_counter
->count
);
2106 * Add back the child's count to the parent's count:
2108 atomic64_add(child_val
, &parent_counter
->count
);
2111 * Remove this counter from the parent's list
2113 mutex_lock(&parent_counter
->mutex
);
2114 list_del_init(&child_counter
->child_list
);
2115 mutex_unlock(&parent_counter
->mutex
);
2118 * Release the parent counter, if this was the last
2121 fput(parent_counter
->filp
);
2125 __perf_counter_exit_task(struct task_struct
*child
,
2126 struct perf_counter
*child_counter
,
2127 struct perf_counter_context
*child_ctx
)
2129 struct perf_counter
*parent_counter
;
2130 struct perf_counter
*sub
, *tmp
;
2133 * If we do not self-reap then we have to wait for the
2134 * child task to unschedule (it will happen for sure),
2135 * so that its counter is at its final count. (This
2136 * condition triggers rarely - child tasks usually get
2137 * off their CPU before the parent has a chance to
2138 * get this far into the reaping action)
2140 if (child
!= current
) {
2141 wait_task_inactive(child
, 0);
2142 list_del_init(&child_counter
->list_entry
);
2144 struct perf_cpu_context
*cpuctx
;
2145 unsigned long flags
;
2149 * Disable and unlink this counter.
2151 * Be careful about zapping the list - IRQ/NMI context
2152 * could still be processing it:
2154 curr_rq_lock_irq_save(&flags
);
2155 perf_flags
= hw_perf_save_disable();
2157 cpuctx
= &__get_cpu_var(perf_cpu_context
);
2159 group_sched_out(child_counter
, cpuctx
, child_ctx
);
2161 list_del_init(&child_counter
->list_entry
);
2163 child_ctx
->nr_counters
--;
2165 hw_perf_restore(perf_flags
);
2166 curr_rq_unlock_irq_restore(&flags
);
2169 parent_counter
= child_counter
->parent
;
2171 * It can happen that parent exits first, and has counters
2172 * that are still around due to the child reference. These
2173 * counters need to be zapped - but otherwise linger.
2175 if (parent_counter
) {
2176 sync_child_counter(child_counter
, parent_counter
);
2177 list_for_each_entry_safe(sub
, tmp
, &child_counter
->sibling_list
,
2180 sync_child_counter(sub
, sub
->parent
);
2184 kfree(child_counter
);
2189 * When a child task exits, feed back counter values to parent counters.
2191 * Note: we may be running in child context, but the PID is not hashed
2192 * anymore so new counters will not be added.
2194 void perf_counter_exit_task(struct task_struct
*child
)
2196 struct perf_counter
*child_counter
, *tmp
;
2197 struct perf_counter_context
*child_ctx
;
2199 child_ctx
= &child
->perf_counter_ctx
;
2201 if (likely(!child_ctx
->nr_counters
))
2204 list_for_each_entry_safe(child_counter
, tmp
, &child_ctx
->counter_list
,
2206 __perf_counter_exit_task(child
, child_counter
, child_ctx
);
2210 * Initialize the perf_counter context in task_struct
2212 void perf_counter_init_task(struct task_struct
*child
)
2214 struct perf_counter_context
*child_ctx
, *parent_ctx
;
2215 struct perf_counter
*counter
;
2216 struct task_struct
*parent
= current
;
2218 child_ctx
= &child
->perf_counter_ctx
;
2219 parent_ctx
= &parent
->perf_counter_ctx
;
2221 __perf_counter_init_context(child_ctx
, child
);
2224 * This is executed from the parent task context, so inherit
2225 * counters that have been marked for cloning:
2228 if (likely(!parent_ctx
->nr_counters
))
2232 * Lock the parent list. No need to lock the child - not PID
2233 * hashed yet and not running, so nobody can access it.
2235 mutex_lock(&parent_ctx
->mutex
);
2238 * We dont have to disable NMIs - we are only looking at
2239 * the list, not manipulating it:
2241 list_for_each_entry(counter
, &parent_ctx
->counter_list
, list_entry
) {
2242 if (!counter
->hw_event
.inherit
)
2245 if (inherit_group(counter
, parent
,
2246 parent_ctx
, child
, child_ctx
))
2250 mutex_unlock(&parent_ctx
->mutex
);
2253 static void __cpuinit
perf_counter_init_cpu(int cpu
)
2255 struct perf_cpu_context
*cpuctx
;
2257 cpuctx
= &per_cpu(perf_cpu_context
, cpu
);
2258 __perf_counter_init_context(&cpuctx
->ctx
, NULL
);
2260 mutex_lock(&perf_resource_mutex
);
2261 cpuctx
->max_pertask
= perf_max_counters
- perf_reserved_percpu
;
2262 mutex_unlock(&perf_resource_mutex
);
2264 hw_perf_counter_setup(cpu
);
2267 #ifdef CONFIG_HOTPLUG_CPU
2268 static void __perf_counter_exit_cpu(void *info
)
2270 struct perf_cpu_context
*cpuctx
= &__get_cpu_var(perf_cpu_context
);
2271 struct perf_counter_context
*ctx
= &cpuctx
->ctx
;
2272 struct perf_counter
*counter
, *tmp
;
2274 list_for_each_entry_safe(counter
, tmp
, &ctx
->counter_list
, list_entry
)
2275 __perf_counter_remove_from_context(counter
);
2277 static void perf_counter_exit_cpu(int cpu
)
2279 struct perf_cpu_context
*cpuctx
= &per_cpu(perf_cpu_context
, cpu
);
2280 struct perf_counter_context
*ctx
= &cpuctx
->ctx
;
2282 mutex_lock(&ctx
->mutex
);
2283 smp_call_function_single(cpu
, __perf_counter_exit_cpu
, NULL
, 1);
2284 mutex_unlock(&ctx
->mutex
);
2287 static inline void perf_counter_exit_cpu(int cpu
) { }
2290 static int __cpuinit
2291 perf_cpu_notify(struct notifier_block
*self
, unsigned long action
, void *hcpu
)
2293 unsigned int cpu
= (long)hcpu
;
2297 case CPU_UP_PREPARE
:
2298 case CPU_UP_PREPARE_FROZEN
:
2299 perf_counter_init_cpu(cpu
);
2302 case CPU_DOWN_PREPARE
:
2303 case CPU_DOWN_PREPARE_FROZEN
:
2304 perf_counter_exit_cpu(cpu
);
2314 static struct notifier_block __cpuinitdata perf_cpu_nb
= {
2315 .notifier_call
= perf_cpu_notify
,
2318 static int __init
perf_counter_init(void)
2320 perf_cpu_notify(&perf_cpu_nb
, (unsigned long)CPU_UP_PREPARE
,
2321 (void *)(long)smp_processor_id());
2322 register_cpu_notifier(&perf_cpu_nb
);
2326 early_initcall(perf_counter_init
);
2328 static ssize_t
perf_show_reserve_percpu(struct sysdev_class
*class, char *buf
)
2330 return sprintf(buf
, "%d\n", perf_reserved_percpu
);
2334 perf_set_reserve_percpu(struct sysdev_class
*class,
2338 struct perf_cpu_context
*cpuctx
;
2342 err
= strict_strtoul(buf
, 10, &val
);
2345 if (val
> perf_max_counters
)
2348 mutex_lock(&perf_resource_mutex
);
2349 perf_reserved_percpu
= val
;
2350 for_each_online_cpu(cpu
) {
2351 cpuctx
= &per_cpu(perf_cpu_context
, cpu
);
2352 spin_lock_irq(&cpuctx
->ctx
.lock
);
2353 mpt
= min(perf_max_counters
- cpuctx
->ctx
.nr_counters
,
2354 perf_max_counters
- perf_reserved_percpu
);
2355 cpuctx
->max_pertask
= mpt
;
2356 spin_unlock_irq(&cpuctx
->ctx
.lock
);
2358 mutex_unlock(&perf_resource_mutex
);
2363 static ssize_t
perf_show_overcommit(struct sysdev_class
*class, char *buf
)
2365 return sprintf(buf
, "%d\n", perf_overcommit
);
2369 perf_set_overcommit(struct sysdev_class
*class, const char *buf
, size_t count
)
2374 err
= strict_strtoul(buf
, 10, &val
);
2380 mutex_lock(&perf_resource_mutex
);
2381 perf_overcommit
= val
;
2382 mutex_unlock(&perf_resource_mutex
);
2387 static SYSDEV_CLASS_ATTR(
2390 perf_show_reserve_percpu
,
2391 perf_set_reserve_percpu
2394 static SYSDEV_CLASS_ATTR(
2397 perf_show_overcommit
,
2401 static struct attribute
*perfclass_attrs
[] = {
2402 &attr_reserve_percpu
.attr
,
2403 &attr_overcommit
.attr
,
2407 static struct attribute_group perfclass_attr_group
= {
2408 .attrs
= perfclass_attrs
,
2409 .name
= "perf_counters",
2412 static int __init
perf_counter_sysfs_init(void)
2414 return sysfs_create_group(&cpu_sysdev_class
.kset
.kobj
,
2415 &perfclass_attr_group
);
2417 device_initcall(perf_counter_sysfs_init
);