2 * Performance counter core code
4 * Copyright(C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright(C) 2008 Red Hat, Inc., Ingo Molnar
8 * For licensing details see kernel-base/COPYING
13 #include <linux/cpu.h>
14 #include <linux/smp.h>
15 #include <linux/file.h>
16 #include <linux/poll.h>
17 #include <linux/sysfs.h>
18 #include <linux/ptrace.h>
19 #include <linux/percpu.h>
20 #include <linux/vmstat.h>
21 #include <linux/hardirq.h>
22 #include <linux/rculist.h>
23 #include <linux/uaccess.h>
24 #include <linux/syscalls.h>
25 #include <linux/anon_inodes.h>
26 #include <linux/kernel_stat.h>
27 #include <linux/perf_counter.h>
29 #include <asm/irq_regs.h>
32 * Each CPU has a list of per CPU counters:
34 DEFINE_PER_CPU(struct perf_cpu_context
, perf_cpu_context
);
36 int perf_max_counters __read_mostly
= 1;
37 static int perf_reserved_percpu __read_mostly
;
38 static int perf_overcommit __read_mostly
= 1;
41 * Mutex for (sysadmin-configurable) counter reservations:
43 static DEFINE_MUTEX(perf_resource_mutex
);
46 * Architecture provided APIs - weak aliases:
48 extern __weak
const struct hw_perf_counter_ops
*
49 hw_perf_counter_init(struct perf_counter
*counter
)
54 u64 __weak
hw_perf_save_disable(void) { return 0; }
55 void __weak
hw_perf_restore(u64 ctrl
) { barrier(); }
56 void __weak
hw_perf_counter_setup(int cpu
) { barrier(); }
57 int __weak
hw_perf_group_sched_in(struct perf_counter
*group_leader
,
58 struct perf_cpu_context
*cpuctx
,
59 struct perf_counter_context
*ctx
, int cpu
)
64 void __weak
perf_counter_print_debug(void) { }
67 list_add_counter(struct perf_counter
*counter
, struct perf_counter_context
*ctx
)
69 struct perf_counter
*group_leader
= counter
->group_leader
;
72 * Depending on whether it is a standalone or sibling counter,
73 * add it straight to the context's counter list, or to the group
74 * leader's sibling list:
76 if (counter
->group_leader
== counter
)
77 list_add_tail(&counter
->list_entry
, &ctx
->counter_list
);
79 list_add_tail(&counter
->list_entry
, &group_leader
->sibling_list
);
80 group_leader
->nr_siblings
++;
83 list_add_rcu(&counter
->event_entry
, &ctx
->event_list
);
87 list_del_counter(struct perf_counter
*counter
, struct perf_counter_context
*ctx
)
89 struct perf_counter
*sibling
, *tmp
;
91 list_del_init(&counter
->list_entry
);
92 list_del_rcu(&counter
->event_entry
);
94 if (counter
->group_leader
!= counter
)
95 counter
->group_leader
->nr_siblings
--;
98 * If this was a group counter with sibling counters then
99 * upgrade the siblings to singleton counters by adding them
100 * to the context list directly:
102 list_for_each_entry_safe(sibling
, tmp
,
103 &counter
->sibling_list
, list_entry
) {
105 list_move_tail(&sibling
->list_entry
, &ctx
->counter_list
);
106 sibling
->group_leader
= sibling
;
111 counter_sched_out(struct perf_counter
*counter
,
112 struct perf_cpu_context
*cpuctx
,
113 struct perf_counter_context
*ctx
)
115 if (counter
->state
!= PERF_COUNTER_STATE_ACTIVE
)
118 counter
->state
= PERF_COUNTER_STATE_INACTIVE
;
119 counter
->hw_ops
->disable(counter
);
122 if (!is_software_counter(counter
))
123 cpuctx
->active_oncpu
--;
125 if (counter
->hw_event
.exclusive
|| !cpuctx
->active_oncpu
)
126 cpuctx
->exclusive
= 0;
130 group_sched_out(struct perf_counter
*group_counter
,
131 struct perf_cpu_context
*cpuctx
,
132 struct perf_counter_context
*ctx
)
134 struct perf_counter
*counter
;
136 if (group_counter
->state
!= PERF_COUNTER_STATE_ACTIVE
)
139 counter_sched_out(group_counter
, cpuctx
, ctx
);
142 * Schedule out siblings (if any):
144 list_for_each_entry(counter
, &group_counter
->sibling_list
, list_entry
)
145 counter_sched_out(counter
, cpuctx
, ctx
);
147 if (group_counter
->hw_event
.exclusive
)
148 cpuctx
->exclusive
= 0;
152 * Cross CPU call to remove a performance counter
154 * We disable the counter on the hardware level first. After that we
155 * remove it from the context list.
157 static void __perf_counter_remove_from_context(void *info
)
159 struct perf_cpu_context
*cpuctx
= &__get_cpu_var(perf_cpu_context
);
160 struct perf_counter
*counter
= info
;
161 struct perf_counter_context
*ctx
= counter
->ctx
;
166 * If this is a task context, we need to check whether it is
167 * the current task context of this cpu. If not it has been
168 * scheduled out before the smp call arrived.
170 if (ctx
->task
&& cpuctx
->task_ctx
!= ctx
)
173 curr_rq_lock_irq_save(&flags
);
174 spin_lock(&ctx
->lock
);
176 counter_sched_out(counter
, cpuctx
, ctx
);
178 counter
->task
= NULL
;
182 * Protect the list operation against NMI by disabling the
183 * counters on a global level. NOP for non NMI based counters.
185 perf_flags
= hw_perf_save_disable();
186 list_del_counter(counter
, ctx
);
187 hw_perf_restore(perf_flags
);
191 * Allow more per task counters with respect to the
194 cpuctx
->max_pertask
=
195 min(perf_max_counters
- ctx
->nr_counters
,
196 perf_max_counters
- perf_reserved_percpu
);
199 spin_unlock(&ctx
->lock
);
200 curr_rq_unlock_irq_restore(&flags
);
205 * Remove the counter from a task's (or a CPU's) list of counters.
207 * Must be called with counter->mutex and ctx->mutex held.
209 * CPU counters are removed with a smp call. For task counters we only
210 * call when the task is on a CPU.
212 static void perf_counter_remove_from_context(struct perf_counter
*counter
)
214 struct perf_counter_context
*ctx
= counter
->ctx
;
215 struct task_struct
*task
= ctx
->task
;
219 * Per cpu counters are removed via an smp call and
220 * the removal is always sucessful.
222 smp_call_function_single(counter
->cpu
,
223 __perf_counter_remove_from_context
,
229 task_oncpu_function_call(task
, __perf_counter_remove_from_context
,
232 spin_lock_irq(&ctx
->lock
);
234 * If the context is active we need to retry the smp call.
236 if (ctx
->nr_active
&& !list_empty(&counter
->list_entry
)) {
237 spin_unlock_irq(&ctx
->lock
);
242 * The lock prevents that this context is scheduled in so we
243 * can remove the counter safely, if the call above did not
246 if (!list_empty(&counter
->list_entry
)) {
248 list_del_counter(counter
, ctx
);
249 counter
->task
= NULL
;
251 spin_unlock_irq(&ctx
->lock
);
255 * Cross CPU call to disable a performance counter
257 static void __perf_counter_disable(void *info
)
259 struct perf_counter
*counter
= info
;
260 struct perf_cpu_context
*cpuctx
= &__get_cpu_var(perf_cpu_context
);
261 struct perf_counter_context
*ctx
= counter
->ctx
;
265 * If this is a per-task counter, need to check whether this
266 * counter's task is the current task on this cpu.
268 if (ctx
->task
&& cpuctx
->task_ctx
!= ctx
)
271 curr_rq_lock_irq_save(&flags
);
272 spin_lock(&ctx
->lock
);
275 * If the counter is on, turn it off.
276 * If it is in error state, leave it in error state.
278 if (counter
->state
>= PERF_COUNTER_STATE_INACTIVE
) {
279 if (counter
== counter
->group_leader
)
280 group_sched_out(counter
, cpuctx
, ctx
);
282 counter_sched_out(counter
, cpuctx
, ctx
);
283 counter
->state
= PERF_COUNTER_STATE_OFF
;
286 spin_unlock(&ctx
->lock
);
287 curr_rq_unlock_irq_restore(&flags
);
293 static void perf_counter_disable(struct perf_counter
*counter
)
295 struct perf_counter_context
*ctx
= counter
->ctx
;
296 struct task_struct
*task
= ctx
->task
;
300 * Disable the counter on the cpu that it's on
302 smp_call_function_single(counter
->cpu
, __perf_counter_disable
,
308 task_oncpu_function_call(task
, __perf_counter_disable
, counter
);
310 spin_lock_irq(&ctx
->lock
);
312 * If the counter is still active, we need to retry the cross-call.
314 if (counter
->state
== PERF_COUNTER_STATE_ACTIVE
) {
315 spin_unlock_irq(&ctx
->lock
);
320 * Since we have the lock this context can't be scheduled
321 * in, so we can change the state safely.
323 if (counter
->state
== PERF_COUNTER_STATE_INACTIVE
)
324 counter
->state
= PERF_COUNTER_STATE_OFF
;
326 spin_unlock_irq(&ctx
->lock
);
330 * Disable a counter and all its children.
332 static void perf_counter_disable_family(struct perf_counter
*counter
)
334 struct perf_counter
*child
;
336 perf_counter_disable(counter
);
339 * Lock the mutex to protect the list of children
341 mutex_lock(&counter
->mutex
);
342 list_for_each_entry(child
, &counter
->child_list
, child_list
)
343 perf_counter_disable(child
);
344 mutex_unlock(&counter
->mutex
);
348 counter_sched_in(struct perf_counter
*counter
,
349 struct perf_cpu_context
*cpuctx
,
350 struct perf_counter_context
*ctx
,
353 if (counter
->state
<= PERF_COUNTER_STATE_OFF
)
356 counter
->state
= PERF_COUNTER_STATE_ACTIVE
;
357 counter
->oncpu
= cpu
; /* TODO: put 'cpu' into cpuctx->cpu */
359 * The new state must be visible before we turn it on in the hardware:
363 if (counter
->hw_ops
->enable(counter
)) {
364 counter
->state
= PERF_COUNTER_STATE_INACTIVE
;
369 if (!is_software_counter(counter
))
370 cpuctx
->active_oncpu
++;
373 if (counter
->hw_event
.exclusive
)
374 cpuctx
->exclusive
= 1;
380 * Return 1 for a group consisting entirely of software counters,
381 * 0 if the group contains any hardware counters.
383 static int is_software_only_group(struct perf_counter
*leader
)
385 struct perf_counter
*counter
;
387 if (!is_software_counter(leader
))
390 list_for_each_entry(counter
, &leader
->sibling_list
, list_entry
)
391 if (!is_software_counter(counter
))
398 * Work out whether we can put this counter group on the CPU now.
400 static int group_can_go_on(struct perf_counter
*counter
,
401 struct perf_cpu_context
*cpuctx
,
405 * Groups consisting entirely of software counters can always go on.
407 if (is_software_only_group(counter
))
410 * If an exclusive group is already on, no other hardware
411 * counters can go on.
413 if (cpuctx
->exclusive
)
416 * If this group is exclusive and there are already
417 * counters on the CPU, it can't go on.
419 if (counter
->hw_event
.exclusive
&& cpuctx
->active_oncpu
)
422 * Otherwise, try to add it if all previous groups were able
429 * Cross CPU call to install and enable a performance counter
431 static void __perf_install_in_context(void *info
)
433 struct perf_cpu_context
*cpuctx
= &__get_cpu_var(perf_cpu_context
);
434 struct perf_counter
*counter
= info
;
435 struct perf_counter_context
*ctx
= counter
->ctx
;
436 struct perf_counter
*leader
= counter
->group_leader
;
437 int cpu
= smp_processor_id();
443 * If this is a task context, we need to check whether it is
444 * the current task context of this cpu. If not it has been
445 * scheduled out before the smp call arrived.
447 if (ctx
->task
&& cpuctx
->task_ctx
!= ctx
)
450 curr_rq_lock_irq_save(&flags
);
451 spin_lock(&ctx
->lock
);
454 * Protect the list operation against NMI by disabling the
455 * counters on a global level. NOP for non NMI based counters.
457 perf_flags
= hw_perf_save_disable();
459 list_add_counter(counter
, ctx
);
461 counter
->prev_state
= PERF_COUNTER_STATE_OFF
;
464 * Don't put the counter on if it is disabled or if
465 * it is in a group and the group isn't on.
467 if (counter
->state
!= PERF_COUNTER_STATE_INACTIVE
||
468 (leader
!= counter
&& leader
->state
!= PERF_COUNTER_STATE_ACTIVE
))
472 * An exclusive counter can't go on if there are already active
473 * hardware counters, and no hardware counter can go on if there
474 * is already an exclusive counter on.
476 if (!group_can_go_on(counter
, cpuctx
, 1))
479 err
= counter_sched_in(counter
, cpuctx
, ctx
, cpu
);
483 * This counter couldn't go on. If it is in a group
484 * then we have to pull the whole group off.
485 * If the counter group is pinned then put it in error state.
487 if (leader
!= counter
)
488 group_sched_out(leader
, cpuctx
, ctx
);
489 if (leader
->hw_event
.pinned
)
490 leader
->state
= PERF_COUNTER_STATE_ERROR
;
493 if (!err
&& !ctx
->task
&& cpuctx
->max_pertask
)
494 cpuctx
->max_pertask
--;
497 hw_perf_restore(perf_flags
);
499 spin_unlock(&ctx
->lock
);
500 curr_rq_unlock_irq_restore(&flags
);
504 * Attach a performance counter to a context
506 * First we add the counter to the list with the hardware enable bit
507 * in counter->hw_config cleared.
509 * If the counter is attached to a task which is on a CPU we use a smp
510 * call to enable it in the task context. The task might have been
511 * scheduled away, but we check this in the smp call again.
513 * Must be called with ctx->mutex held.
516 perf_install_in_context(struct perf_counter_context
*ctx
,
517 struct perf_counter
*counter
,
520 struct task_struct
*task
= ctx
->task
;
524 * Per cpu counters are installed via an smp call and
525 * the install is always sucessful.
527 smp_call_function_single(cpu
, __perf_install_in_context
,
532 counter
->task
= task
;
534 task_oncpu_function_call(task
, __perf_install_in_context
,
537 spin_lock_irq(&ctx
->lock
);
539 * we need to retry the smp call.
541 if (ctx
->is_active
&& list_empty(&counter
->list_entry
)) {
542 spin_unlock_irq(&ctx
->lock
);
547 * The lock prevents that this context is scheduled in so we
548 * can add the counter safely, if it the call above did not
551 if (list_empty(&counter
->list_entry
)) {
552 list_add_counter(counter
, ctx
);
555 spin_unlock_irq(&ctx
->lock
);
559 * Cross CPU call to enable a performance counter
561 static void __perf_counter_enable(void *info
)
563 struct perf_counter
*counter
= info
;
564 struct perf_cpu_context
*cpuctx
= &__get_cpu_var(perf_cpu_context
);
565 struct perf_counter_context
*ctx
= counter
->ctx
;
566 struct perf_counter
*leader
= counter
->group_leader
;
571 * If this is a per-task counter, need to check whether this
572 * counter's task is the current task on this cpu.
574 if (ctx
->task
&& cpuctx
->task_ctx
!= ctx
)
577 curr_rq_lock_irq_save(&flags
);
578 spin_lock(&ctx
->lock
);
580 counter
->prev_state
= counter
->state
;
581 if (counter
->state
>= PERF_COUNTER_STATE_INACTIVE
)
583 counter
->state
= PERF_COUNTER_STATE_INACTIVE
;
586 * If the counter is in a group and isn't the group leader,
587 * then don't put it on unless the group is on.
589 if (leader
!= counter
&& leader
->state
!= PERF_COUNTER_STATE_ACTIVE
)
592 if (!group_can_go_on(counter
, cpuctx
, 1))
595 err
= counter_sched_in(counter
, cpuctx
, ctx
,
600 * If this counter can't go on and it's part of a
601 * group, then the whole group has to come off.
603 if (leader
!= counter
)
604 group_sched_out(leader
, cpuctx
, ctx
);
605 if (leader
->hw_event
.pinned
)
606 leader
->state
= PERF_COUNTER_STATE_ERROR
;
610 spin_unlock(&ctx
->lock
);
611 curr_rq_unlock_irq_restore(&flags
);
617 static void perf_counter_enable(struct perf_counter
*counter
)
619 struct perf_counter_context
*ctx
= counter
->ctx
;
620 struct task_struct
*task
= ctx
->task
;
624 * Enable the counter on the cpu that it's on
626 smp_call_function_single(counter
->cpu
, __perf_counter_enable
,
631 spin_lock_irq(&ctx
->lock
);
632 if (counter
->state
>= PERF_COUNTER_STATE_INACTIVE
)
636 * If the counter is in error state, clear that first.
637 * That way, if we see the counter in error state below, we
638 * know that it has gone back into error state, as distinct
639 * from the task having been scheduled away before the
640 * cross-call arrived.
642 if (counter
->state
== PERF_COUNTER_STATE_ERROR
)
643 counter
->state
= PERF_COUNTER_STATE_OFF
;
646 spin_unlock_irq(&ctx
->lock
);
647 task_oncpu_function_call(task
, __perf_counter_enable
, counter
);
649 spin_lock_irq(&ctx
->lock
);
652 * If the context is active and the counter is still off,
653 * we need to retry the cross-call.
655 if (ctx
->is_active
&& counter
->state
== PERF_COUNTER_STATE_OFF
)
659 * Since we have the lock this context can't be scheduled
660 * in, so we can change the state safely.
662 if (counter
->state
== PERF_COUNTER_STATE_OFF
)
663 counter
->state
= PERF_COUNTER_STATE_INACTIVE
;
665 spin_unlock_irq(&ctx
->lock
);
669 * Enable a counter and all its children.
671 static void perf_counter_enable_family(struct perf_counter
*counter
)
673 struct perf_counter
*child
;
675 perf_counter_enable(counter
);
678 * Lock the mutex to protect the list of children
680 mutex_lock(&counter
->mutex
);
681 list_for_each_entry(child
, &counter
->child_list
, child_list
)
682 perf_counter_enable(child
);
683 mutex_unlock(&counter
->mutex
);
686 void __perf_counter_sched_out(struct perf_counter_context
*ctx
,
687 struct perf_cpu_context
*cpuctx
)
689 struct perf_counter
*counter
;
692 spin_lock(&ctx
->lock
);
694 if (likely(!ctx
->nr_counters
))
697 flags
= hw_perf_save_disable();
698 if (ctx
->nr_active
) {
699 list_for_each_entry(counter
, &ctx
->counter_list
, list_entry
)
700 group_sched_out(counter
, cpuctx
, ctx
);
702 hw_perf_restore(flags
);
704 spin_unlock(&ctx
->lock
);
708 * Called from scheduler to remove the counters of the current task,
709 * with interrupts disabled.
711 * We stop each counter and update the counter value in counter->count.
713 * This does not protect us against NMI, but disable()
714 * sets the disabled bit in the control field of counter _before_
715 * accessing the counter control register. If a NMI hits, then it will
716 * not restart the counter.
718 void perf_counter_task_sched_out(struct task_struct
*task
, int cpu
)
720 struct perf_cpu_context
*cpuctx
= &per_cpu(perf_cpu_context
, cpu
);
721 struct perf_counter_context
*ctx
= &task
->perf_counter_ctx
;
722 struct pt_regs
*regs
;
724 if (likely(!cpuctx
->task_ctx
))
727 regs
= task_pt_regs(task
);
728 perf_swcounter_event(PERF_COUNT_CONTEXT_SWITCHES
, 1, 1, regs
);
729 __perf_counter_sched_out(ctx
, cpuctx
);
731 cpuctx
->task_ctx
= NULL
;
734 static void perf_counter_cpu_sched_out(struct perf_cpu_context
*cpuctx
)
736 __perf_counter_sched_out(&cpuctx
->ctx
, cpuctx
);
740 group_sched_in(struct perf_counter
*group_counter
,
741 struct perf_cpu_context
*cpuctx
,
742 struct perf_counter_context
*ctx
,
745 struct perf_counter
*counter
, *partial_group
;
748 if (group_counter
->state
== PERF_COUNTER_STATE_OFF
)
751 ret
= hw_perf_group_sched_in(group_counter
, cpuctx
, ctx
, cpu
);
753 return ret
< 0 ? ret
: 0;
755 group_counter
->prev_state
= group_counter
->state
;
756 if (counter_sched_in(group_counter
, cpuctx
, ctx
, cpu
))
760 * Schedule in siblings as one group (if any):
762 list_for_each_entry(counter
, &group_counter
->sibling_list
, list_entry
) {
763 counter
->prev_state
= counter
->state
;
764 if (counter_sched_in(counter
, cpuctx
, ctx
, cpu
)) {
765 partial_group
= counter
;
774 * Groups can be scheduled in as one unit only, so undo any
775 * partial group before returning:
777 list_for_each_entry(counter
, &group_counter
->sibling_list
, list_entry
) {
778 if (counter
== partial_group
)
780 counter_sched_out(counter
, cpuctx
, ctx
);
782 counter_sched_out(group_counter
, cpuctx
, ctx
);
788 __perf_counter_sched_in(struct perf_counter_context
*ctx
,
789 struct perf_cpu_context
*cpuctx
, int cpu
)
791 struct perf_counter
*counter
;
795 spin_lock(&ctx
->lock
);
797 if (likely(!ctx
->nr_counters
))
800 flags
= hw_perf_save_disable();
803 * First go through the list and put on any pinned groups
804 * in order to give them the best chance of going on.
806 list_for_each_entry(counter
, &ctx
->counter_list
, list_entry
) {
807 if (counter
->state
<= PERF_COUNTER_STATE_OFF
||
808 !counter
->hw_event
.pinned
)
810 if (counter
->cpu
!= -1 && counter
->cpu
!= cpu
)
813 if (group_can_go_on(counter
, cpuctx
, 1))
814 group_sched_in(counter
, cpuctx
, ctx
, cpu
);
817 * If this pinned group hasn't been scheduled,
818 * put it in error state.
820 if (counter
->state
== PERF_COUNTER_STATE_INACTIVE
)
821 counter
->state
= PERF_COUNTER_STATE_ERROR
;
824 list_for_each_entry(counter
, &ctx
->counter_list
, list_entry
) {
826 * Ignore counters in OFF or ERROR state, and
827 * ignore pinned counters since we did them already.
829 if (counter
->state
<= PERF_COUNTER_STATE_OFF
||
830 counter
->hw_event
.pinned
)
834 * Listen to the 'cpu' scheduling filter constraint
837 if (counter
->cpu
!= -1 && counter
->cpu
!= cpu
)
840 if (group_can_go_on(counter
, cpuctx
, can_add_hw
)) {
841 if (group_sched_in(counter
, cpuctx
, ctx
, cpu
))
845 hw_perf_restore(flags
);
847 spin_unlock(&ctx
->lock
);
851 * Called from scheduler to add the counters of the current task
852 * with interrupts disabled.
854 * We restore the counter value and then enable it.
856 * This does not protect us against NMI, but enable()
857 * sets the enabled bit in the control field of counter _before_
858 * accessing the counter control register. If a NMI hits, then it will
859 * keep the counter running.
861 void perf_counter_task_sched_in(struct task_struct
*task
, int cpu
)
863 struct perf_cpu_context
*cpuctx
= &per_cpu(perf_cpu_context
, cpu
);
864 struct perf_counter_context
*ctx
= &task
->perf_counter_ctx
;
866 __perf_counter_sched_in(ctx
, cpuctx
, cpu
);
867 cpuctx
->task_ctx
= ctx
;
870 static void perf_counter_cpu_sched_in(struct perf_cpu_context
*cpuctx
, int cpu
)
872 struct perf_counter_context
*ctx
= &cpuctx
->ctx
;
874 __perf_counter_sched_in(ctx
, cpuctx
, cpu
);
877 int perf_counter_task_disable(void)
879 struct task_struct
*curr
= current
;
880 struct perf_counter_context
*ctx
= &curr
->perf_counter_ctx
;
881 struct perf_counter
*counter
;
886 if (likely(!ctx
->nr_counters
))
889 curr_rq_lock_irq_save(&flags
);
890 cpu
= smp_processor_id();
892 /* force the update of the task clock: */
893 __task_delta_exec(curr
, 1);
895 perf_counter_task_sched_out(curr
, cpu
);
897 spin_lock(&ctx
->lock
);
900 * Disable all the counters:
902 perf_flags
= hw_perf_save_disable();
904 list_for_each_entry(counter
, &ctx
->counter_list
, list_entry
) {
905 if (counter
->state
!= PERF_COUNTER_STATE_ERROR
)
906 counter
->state
= PERF_COUNTER_STATE_OFF
;
909 hw_perf_restore(perf_flags
);
911 spin_unlock(&ctx
->lock
);
913 curr_rq_unlock_irq_restore(&flags
);
918 int perf_counter_task_enable(void)
920 struct task_struct
*curr
= current
;
921 struct perf_counter_context
*ctx
= &curr
->perf_counter_ctx
;
922 struct perf_counter
*counter
;
927 if (likely(!ctx
->nr_counters
))
930 curr_rq_lock_irq_save(&flags
);
931 cpu
= smp_processor_id();
933 /* force the update of the task clock: */
934 __task_delta_exec(curr
, 1);
936 perf_counter_task_sched_out(curr
, cpu
);
938 spin_lock(&ctx
->lock
);
941 * Disable all the counters:
943 perf_flags
= hw_perf_save_disable();
945 list_for_each_entry(counter
, &ctx
->counter_list
, list_entry
) {
946 if (counter
->state
> PERF_COUNTER_STATE_OFF
)
948 counter
->state
= PERF_COUNTER_STATE_INACTIVE
;
949 counter
->hw_event
.disabled
= 0;
951 hw_perf_restore(perf_flags
);
953 spin_unlock(&ctx
->lock
);
955 perf_counter_task_sched_in(curr
, cpu
);
957 curr_rq_unlock_irq_restore(&flags
);
963 * Round-robin a context's counters:
965 static void rotate_ctx(struct perf_counter_context
*ctx
)
967 struct perf_counter
*counter
;
970 if (!ctx
->nr_counters
)
973 spin_lock(&ctx
->lock
);
975 * Rotate the first entry last (works just fine for group counters too):
977 perf_flags
= hw_perf_save_disable();
978 list_for_each_entry(counter
, &ctx
->counter_list
, list_entry
) {
979 list_move_tail(&counter
->list_entry
, &ctx
->counter_list
);
982 hw_perf_restore(perf_flags
);
984 spin_unlock(&ctx
->lock
);
987 void perf_counter_task_tick(struct task_struct
*curr
, int cpu
)
989 struct perf_cpu_context
*cpuctx
= &per_cpu(perf_cpu_context
, cpu
);
990 struct perf_counter_context
*ctx
= &curr
->perf_counter_ctx
;
991 const int rotate_percpu
= 0;
994 perf_counter_cpu_sched_out(cpuctx
);
995 perf_counter_task_sched_out(curr
, cpu
);
998 rotate_ctx(&cpuctx
->ctx
);
1002 perf_counter_cpu_sched_in(cpuctx
, cpu
);
1003 perf_counter_task_sched_in(curr
, cpu
);
1007 * Cross CPU call to read the hardware counter
1009 static void __read(void *info
)
1011 struct perf_counter
*counter
= info
;
1012 unsigned long flags
;
1014 curr_rq_lock_irq_save(&flags
);
1015 counter
->hw_ops
->read(counter
);
1016 curr_rq_unlock_irq_restore(&flags
);
1019 static u64
perf_counter_read(struct perf_counter
*counter
)
1022 * If counter is enabled and currently active on a CPU, update the
1023 * value in the counter structure:
1025 if (counter
->state
== PERF_COUNTER_STATE_ACTIVE
) {
1026 smp_call_function_single(counter
->oncpu
,
1027 __read
, counter
, 1);
1030 return atomic64_read(&counter
->count
);
1033 static void put_context(struct perf_counter_context
*ctx
)
1036 put_task_struct(ctx
->task
);
1039 static struct perf_counter_context
*find_get_context(pid_t pid
, int cpu
)
1041 struct perf_cpu_context
*cpuctx
;
1042 struct perf_counter_context
*ctx
;
1043 struct task_struct
*task
;
1046 * If cpu is not a wildcard then this is a percpu counter:
1049 /* Must be root to operate on a CPU counter: */
1050 if (!capable(CAP_SYS_ADMIN
))
1051 return ERR_PTR(-EACCES
);
1053 if (cpu
< 0 || cpu
> num_possible_cpus())
1054 return ERR_PTR(-EINVAL
);
1057 * We could be clever and allow to attach a counter to an
1058 * offline CPU and activate it when the CPU comes up, but
1061 if (!cpu_isset(cpu
, cpu_online_map
))
1062 return ERR_PTR(-ENODEV
);
1064 cpuctx
= &per_cpu(perf_cpu_context
, cpu
);
1074 task
= find_task_by_vpid(pid
);
1076 get_task_struct(task
);
1080 return ERR_PTR(-ESRCH
);
1082 ctx
= &task
->perf_counter_ctx
;
1085 /* Reuse ptrace permission checks for now. */
1086 if (!ptrace_may_access(task
, PTRACE_MODE_READ
)) {
1088 return ERR_PTR(-EACCES
);
1094 static void free_counter_rcu(struct rcu_head
*head
)
1096 struct perf_counter
*counter
;
1098 counter
= container_of(head
, struct perf_counter
, rcu_head
);
1102 static void free_counter(struct perf_counter
*counter
)
1104 if (counter
->destroy
)
1105 counter
->destroy(counter
);
1107 call_rcu(&counter
->rcu_head
, free_counter_rcu
);
1111 * Called when the last reference to the file is gone.
1113 static int perf_release(struct inode
*inode
, struct file
*file
)
1115 struct perf_counter
*counter
= file
->private_data
;
1116 struct perf_counter_context
*ctx
= counter
->ctx
;
1118 file
->private_data
= NULL
;
1120 mutex_lock(&ctx
->mutex
);
1121 mutex_lock(&counter
->mutex
);
1123 perf_counter_remove_from_context(counter
);
1125 mutex_unlock(&counter
->mutex
);
1126 mutex_unlock(&ctx
->mutex
);
1128 free_counter(counter
);
1135 * Read the performance counter - simple non blocking version for now
1138 perf_read_hw(struct perf_counter
*counter
, char __user
*buf
, size_t count
)
1142 if (count
< sizeof(cntval
))
1146 * Return end-of-file for a read on a counter that is in
1147 * error state (i.e. because it was pinned but it couldn't be
1148 * scheduled on to the CPU at some point).
1150 if (counter
->state
== PERF_COUNTER_STATE_ERROR
)
1153 mutex_lock(&counter
->mutex
);
1154 cntval
= perf_counter_read(counter
);
1155 mutex_unlock(&counter
->mutex
);
1157 return put_user(cntval
, (u64 __user
*) buf
) ? -EFAULT
: sizeof(cntval
);
1161 perf_read(struct file
*file
, char __user
*buf
, size_t count
, loff_t
*ppos
)
1163 struct perf_counter
*counter
= file
->private_data
;
1165 return perf_read_hw(counter
, buf
, count
);
1168 static unsigned int perf_poll(struct file
*file
, poll_table
*wait
)
1170 struct perf_counter
*counter
= file
->private_data
;
1171 struct perf_mmap_data
*data
;
1172 unsigned int events
;
1175 data
= rcu_dereference(counter
->data
);
1177 events
= atomic_xchg(&data
->wakeup
, 0);
1182 poll_wait(file
, &counter
->waitq
, wait
);
1187 static long perf_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
1189 struct perf_counter
*counter
= file
->private_data
;
1193 case PERF_COUNTER_IOC_ENABLE
:
1194 perf_counter_enable_family(counter
);
1196 case PERF_COUNTER_IOC_DISABLE
:
1197 perf_counter_disable_family(counter
);
1205 static void __perf_counter_update_userpage(struct perf_counter
*counter
,
1206 struct perf_mmap_data
*data
)
1208 struct perf_counter_mmap_page
*userpg
= data
->user_page
;
1211 * Disable preemption so as to not let the corresponding user-space
1212 * spin too long if we get preempted.
1217 userpg
->index
= counter
->hw
.idx
;
1218 userpg
->offset
= atomic64_read(&counter
->count
);
1219 if (counter
->state
== PERF_COUNTER_STATE_ACTIVE
)
1220 userpg
->offset
-= atomic64_read(&counter
->hw
.prev_count
);
1222 userpg
->data_head
= atomic_read(&data
->head
);
1228 void perf_counter_update_userpage(struct perf_counter
*counter
)
1230 struct perf_mmap_data
*data
;
1233 data
= rcu_dereference(counter
->data
);
1235 __perf_counter_update_userpage(counter
, data
);
1239 static int perf_mmap_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1241 struct perf_counter
*counter
= vma
->vm_file
->private_data
;
1242 struct perf_mmap_data
*data
;
1243 int ret
= VM_FAULT_SIGBUS
;
1246 data
= rcu_dereference(counter
->data
);
1250 if (vmf
->pgoff
== 0) {
1251 vmf
->page
= virt_to_page(data
->user_page
);
1253 int nr
= vmf
->pgoff
- 1;
1255 if ((unsigned)nr
> data
->nr_pages
)
1258 vmf
->page
= virt_to_page(data
->data_pages
[nr
]);
1260 get_page(vmf
->page
);
1268 static int perf_mmap_data_alloc(struct perf_counter
*counter
, int nr_pages
)
1270 struct perf_mmap_data
*data
;
1274 WARN_ON(atomic_read(&counter
->mmap_count
));
1276 size
= sizeof(struct perf_mmap_data
);
1277 size
+= nr_pages
* sizeof(void *);
1279 data
= kzalloc(size
, GFP_KERNEL
);
1283 data
->user_page
= (void *)get_zeroed_page(GFP_KERNEL
);
1284 if (!data
->user_page
)
1285 goto fail_user_page
;
1287 for (i
= 0; i
< nr_pages
; i
++) {
1288 data
->data_pages
[i
] = (void *)get_zeroed_page(GFP_KERNEL
);
1289 if (!data
->data_pages
[i
])
1290 goto fail_data_pages
;
1293 data
->nr_pages
= nr_pages
;
1295 rcu_assign_pointer(counter
->data
, data
);
1300 for (i
--; i
>= 0; i
--)
1301 free_page((unsigned long)data
->data_pages
[i
]);
1303 free_page((unsigned long)data
->user_page
);
1312 static void __perf_mmap_data_free(struct rcu_head
*rcu_head
)
1314 struct perf_mmap_data
*data
= container_of(rcu_head
,
1315 struct perf_mmap_data
, rcu_head
);
1318 free_page((unsigned long)data
->user_page
);
1319 for (i
= 0; i
< data
->nr_pages
; i
++)
1320 free_page((unsigned long)data
->data_pages
[i
]);
1324 static void perf_mmap_data_free(struct perf_counter
*counter
)
1326 struct perf_mmap_data
*data
= counter
->data
;
1328 WARN_ON(atomic_read(&counter
->mmap_count
));
1330 rcu_assign_pointer(counter
->data
, NULL
);
1331 call_rcu(&data
->rcu_head
, __perf_mmap_data_free
);
1334 static void perf_mmap_open(struct vm_area_struct
*vma
)
1336 struct perf_counter
*counter
= vma
->vm_file
->private_data
;
1338 atomic_inc(&counter
->mmap_count
);
1341 static void perf_mmap_close(struct vm_area_struct
*vma
)
1343 struct perf_counter
*counter
= vma
->vm_file
->private_data
;
1345 if (atomic_dec_and_mutex_lock(&counter
->mmap_count
,
1346 &counter
->mmap_mutex
)) {
1347 perf_mmap_data_free(counter
);
1348 mutex_unlock(&counter
->mmap_mutex
);
1352 static struct vm_operations_struct perf_mmap_vmops
= {
1353 .open
= perf_mmap_open
,
1354 .close
= perf_mmap_close
,
1355 .fault
= perf_mmap_fault
,
1358 static int perf_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1360 struct perf_counter
*counter
= file
->private_data
;
1361 unsigned long vma_size
;
1362 unsigned long nr_pages
;
1363 unsigned long locked
, lock_limit
;
1366 if (!(vma
->vm_flags
& VM_SHARED
) || (vma
->vm_flags
& VM_WRITE
))
1369 vma_size
= vma
->vm_end
- vma
->vm_start
;
1370 nr_pages
= (vma_size
/ PAGE_SIZE
) - 1;
1373 * If we have data pages ensure they're a power-of-two number, so we
1374 * can do bitmasks instead of modulo.
1376 if (nr_pages
!= 0 && !is_power_of_2(nr_pages
))
1379 if (vma_size
!= PAGE_SIZE
* (1 + nr_pages
))
1382 if (vma
->vm_pgoff
!= 0)
1385 locked
= vma_size
>> PAGE_SHIFT
;
1386 locked
+= vma
->vm_mm
->locked_vm
;
1388 lock_limit
= current
->signal
->rlim
[RLIMIT_MEMLOCK
].rlim_cur
;
1389 lock_limit
>>= PAGE_SHIFT
;
1391 if ((locked
> lock_limit
) && !capable(CAP_IPC_LOCK
))
1394 mutex_lock(&counter
->mmap_mutex
);
1395 if (atomic_inc_not_zero(&counter
->mmap_count
))
1398 WARN_ON(counter
->data
);
1399 ret
= perf_mmap_data_alloc(counter
, nr_pages
);
1401 atomic_set(&counter
->mmap_count
, 1);
1403 mutex_unlock(&counter
->mmap_mutex
);
1405 vma
->vm_flags
&= ~VM_MAYWRITE
;
1406 vma
->vm_flags
|= VM_RESERVED
;
1407 vma
->vm_ops
= &perf_mmap_vmops
;
1412 static const struct file_operations perf_fops
= {
1413 .release
= perf_release
,
1416 .unlocked_ioctl
= perf_ioctl
,
1417 .compat_ioctl
= perf_ioctl
,
1425 struct perf_output_handle
{
1426 struct perf_counter
*counter
;
1427 struct perf_mmap_data
*data
;
1428 unsigned int offset
;
1433 static int perf_output_begin(struct perf_output_handle
*handle
,
1434 struct perf_counter
*counter
, unsigned int size
)
1436 struct perf_mmap_data
*data
;
1437 unsigned int offset
, head
;
1440 data
= rcu_dereference(counter
->data
);
1444 if (!data
->nr_pages
)
1448 offset
= head
= atomic_read(&data
->head
);
1450 } while (atomic_cmpxchg(&data
->head
, offset
, head
) != offset
);
1452 handle
->counter
= counter
;
1453 handle
->data
= data
;
1454 handle
->offset
= offset
;
1455 handle
->head
= head
;
1456 handle
->wakeup
= (offset
>> PAGE_SHIFT
) != (head
>> PAGE_SHIFT
);
1466 static void perf_output_copy(struct perf_output_handle
*handle
,
1467 void *buf
, unsigned int len
)
1469 unsigned int pages_mask
;
1470 unsigned int offset
;
1474 offset
= handle
->offset
;
1475 pages_mask
= handle
->data
->nr_pages
- 1;
1476 pages
= handle
->data
->data_pages
;
1479 unsigned int page_offset
;
1482 nr
= (offset
>> PAGE_SHIFT
) & pages_mask
;
1483 page_offset
= offset
& (PAGE_SIZE
- 1);
1484 size
= min_t(unsigned int, PAGE_SIZE
- page_offset
, len
);
1486 memcpy(pages
[nr
] + page_offset
, buf
, size
);
1493 handle
->offset
= offset
;
1495 WARN_ON_ONCE(handle
->offset
> handle
->head
);
1498 #define perf_output_put(handle, x) \
1499 perf_output_copy((handle), &(x), sizeof(x))
1501 static void perf_output_end(struct perf_output_handle
*handle
, int nmi
)
1503 if (handle
->wakeup
) {
1504 (void)atomic_xchg(&handle
->data
->wakeup
, POLL_IN
);
1505 __perf_counter_update_userpage(handle
->counter
, handle
->data
);
1507 handle
->counter
->wakeup_pending
= 1;
1508 set_perf_counter_pending();
1510 wake_up(&handle
->counter
->waitq
);
1515 static int perf_output_write(struct perf_counter
*counter
, int nmi
,
1516 void *buf
, ssize_t size
)
1518 struct perf_output_handle handle
;
1521 ret
= perf_output_begin(&handle
, counter
, size
);
1525 perf_output_copy(&handle
, buf
, size
);
1526 perf_output_end(&handle
, nmi
);
1532 static void perf_output_simple(struct perf_counter
*counter
,
1533 int nmi
, struct pt_regs
*regs
)
1537 struct perf_event_header header
;
1542 event
.header
.type
= PERF_EVENT_IP
;
1543 event
.ip
= instruction_pointer(regs
);
1545 size
= sizeof(event
);
1547 if (counter
->hw_event
.include_tid
) {
1548 /* namespace issues */
1549 event
.pid
= current
->group_leader
->pid
;
1550 event
.tid
= current
->pid
;
1552 event
.header
.type
|= __PERF_EVENT_TID
;
1554 size
-= sizeof(u64
);
1556 event
.header
.size
= size
;
1558 perf_output_write(counter
, nmi
, &event
, size
);
1561 static void perf_output_group(struct perf_counter
*counter
, int nmi
)
1563 struct perf_output_handle handle
;
1564 struct perf_event_header header
;
1565 struct perf_counter
*leader
, *sub
;
1573 size
= sizeof(header
) + counter
->nr_siblings
* sizeof(entry
);
1575 ret
= perf_output_begin(&handle
, counter
, size
);
1579 header
.type
= PERF_EVENT_GROUP
;
1582 perf_output_put(&handle
, header
);
1584 leader
= counter
->group_leader
;
1585 list_for_each_entry(sub
, &leader
->sibling_list
, list_entry
) {
1587 sub
->hw_ops
->read(sub
);
1589 entry
.event
= sub
->hw_event
.config
;
1590 entry
.counter
= atomic64_read(&sub
->count
);
1592 perf_output_put(&handle
, entry
);
1595 perf_output_end(&handle
, nmi
);
1598 void perf_counter_output(struct perf_counter
*counter
,
1599 int nmi
, struct pt_regs
*regs
)
1601 switch (counter
->hw_event
.record_type
) {
1602 case PERF_RECORD_SIMPLE
:
1605 case PERF_RECORD_IRQ
:
1606 perf_output_simple(counter
, nmi
, regs
);
1609 case PERF_RECORD_GROUP
:
1610 perf_output_group(counter
, nmi
);
1616 * Generic software counter infrastructure
1619 static void perf_swcounter_update(struct perf_counter
*counter
)
1621 struct hw_perf_counter
*hwc
= &counter
->hw
;
1626 prev
= atomic64_read(&hwc
->prev_count
);
1627 now
= atomic64_read(&hwc
->count
);
1628 if (atomic64_cmpxchg(&hwc
->prev_count
, prev
, now
) != prev
)
1633 atomic64_add(delta
, &counter
->count
);
1634 atomic64_sub(delta
, &hwc
->period_left
);
1637 static void perf_swcounter_set_period(struct perf_counter
*counter
)
1639 struct hw_perf_counter
*hwc
= &counter
->hw
;
1640 s64 left
= atomic64_read(&hwc
->period_left
);
1641 s64 period
= hwc
->irq_period
;
1643 if (unlikely(left
<= -period
)) {
1645 atomic64_set(&hwc
->period_left
, left
);
1648 if (unlikely(left
<= 0)) {
1650 atomic64_add(period
, &hwc
->period_left
);
1653 atomic64_set(&hwc
->prev_count
, -left
);
1654 atomic64_set(&hwc
->count
, -left
);
1657 static enum hrtimer_restart
perf_swcounter_hrtimer(struct hrtimer
*hrtimer
)
1659 struct perf_counter
*counter
;
1660 struct pt_regs
*regs
;
1662 counter
= container_of(hrtimer
, struct perf_counter
, hw
.hrtimer
);
1663 counter
->hw_ops
->read(counter
);
1665 regs
= get_irq_regs();
1667 * In case we exclude kernel IPs or are somehow not in interrupt
1668 * context, provide the next best thing, the user IP.
1670 if ((counter
->hw_event
.exclude_kernel
|| !regs
) &&
1671 !counter
->hw_event
.exclude_user
)
1672 regs
= task_pt_regs(current
);
1675 perf_counter_output(counter
, 0, regs
);
1677 hrtimer_forward_now(hrtimer
, ns_to_ktime(counter
->hw
.irq_period
));
1679 return HRTIMER_RESTART
;
1682 static void perf_swcounter_overflow(struct perf_counter
*counter
,
1683 int nmi
, struct pt_regs
*regs
)
1685 perf_swcounter_update(counter
);
1686 perf_swcounter_set_period(counter
);
1687 perf_counter_output(counter
, nmi
, regs
);
1690 static int perf_swcounter_match(struct perf_counter
*counter
,
1691 enum perf_event_types type
,
1692 u32 event
, struct pt_regs
*regs
)
1694 if (counter
->state
!= PERF_COUNTER_STATE_ACTIVE
)
1697 if (perf_event_raw(&counter
->hw_event
))
1700 if (perf_event_type(&counter
->hw_event
) != type
)
1703 if (perf_event_id(&counter
->hw_event
) != event
)
1706 if (counter
->hw_event
.exclude_user
&& user_mode(regs
))
1709 if (counter
->hw_event
.exclude_kernel
&& !user_mode(regs
))
1715 static void perf_swcounter_add(struct perf_counter
*counter
, u64 nr
,
1716 int nmi
, struct pt_regs
*regs
)
1718 int neg
= atomic64_add_negative(nr
, &counter
->hw
.count
);
1719 if (counter
->hw
.irq_period
&& !neg
)
1720 perf_swcounter_overflow(counter
, nmi
, regs
);
1723 static void perf_swcounter_ctx_event(struct perf_counter_context
*ctx
,
1724 enum perf_event_types type
, u32 event
,
1725 u64 nr
, int nmi
, struct pt_regs
*regs
)
1727 struct perf_counter
*counter
;
1729 if (system_state
!= SYSTEM_RUNNING
|| list_empty(&ctx
->event_list
))
1733 list_for_each_entry_rcu(counter
, &ctx
->event_list
, event_entry
) {
1734 if (perf_swcounter_match(counter
, type
, event
, regs
))
1735 perf_swcounter_add(counter
, nr
, nmi
, regs
);
1740 static int *perf_swcounter_recursion_context(struct perf_cpu_context
*cpuctx
)
1743 return &cpuctx
->recursion
[3];
1746 return &cpuctx
->recursion
[2];
1749 return &cpuctx
->recursion
[1];
1751 return &cpuctx
->recursion
[0];
1754 static void __perf_swcounter_event(enum perf_event_types type
, u32 event
,
1755 u64 nr
, int nmi
, struct pt_regs
*regs
)
1757 struct perf_cpu_context
*cpuctx
= &get_cpu_var(perf_cpu_context
);
1758 int *recursion
= perf_swcounter_recursion_context(cpuctx
);
1766 perf_swcounter_ctx_event(&cpuctx
->ctx
, type
, event
, nr
, nmi
, regs
);
1767 if (cpuctx
->task_ctx
) {
1768 perf_swcounter_ctx_event(cpuctx
->task_ctx
, type
, event
,
1776 put_cpu_var(perf_cpu_context
);
1779 void perf_swcounter_event(u32 event
, u64 nr
, int nmi
, struct pt_regs
*regs
)
1781 __perf_swcounter_event(PERF_TYPE_SOFTWARE
, event
, nr
, nmi
, regs
);
1784 static void perf_swcounter_read(struct perf_counter
*counter
)
1786 perf_swcounter_update(counter
);
1789 static int perf_swcounter_enable(struct perf_counter
*counter
)
1791 perf_swcounter_set_period(counter
);
1795 static void perf_swcounter_disable(struct perf_counter
*counter
)
1797 perf_swcounter_update(counter
);
1800 static const struct hw_perf_counter_ops perf_ops_generic
= {
1801 .enable
= perf_swcounter_enable
,
1802 .disable
= perf_swcounter_disable
,
1803 .read
= perf_swcounter_read
,
1807 * Software counter: cpu wall time clock
1810 static void cpu_clock_perf_counter_update(struct perf_counter
*counter
)
1812 int cpu
= raw_smp_processor_id();
1816 now
= cpu_clock(cpu
);
1817 prev
= atomic64_read(&counter
->hw
.prev_count
);
1818 atomic64_set(&counter
->hw
.prev_count
, now
);
1819 atomic64_add(now
- prev
, &counter
->count
);
1822 static int cpu_clock_perf_counter_enable(struct perf_counter
*counter
)
1824 struct hw_perf_counter
*hwc
= &counter
->hw
;
1825 int cpu
= raw_smp_processor_id();
1827 atomic64_set(&hwc
->prev_count
, cpu_clock(cpu
));
1828 hrtimer_init(&hwc
->hrtimer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
1829 hwc
->hrtimer
.function
= perf_swcounter_hrtimer
;
1830 if (hwc
->irq_period
) {
1831 __hrtimer_start_range_ns(&hwc
->hrtimer
,
1832 ns_to_ktime(hwc
->irq_period
), 0,
1833 HRTIMER_MODE_REL
, 0);
1839 static void cpu_clock_perf_counter_disable(struct perf_counter
*counter
)
1841 hrtimer_cancel(&counter
->hw
.hrtimer
);
1842 cpu_clock_perf_counter_update(counter
);
1845 static void cpu_clock_perf_counter_read(struct perf_counter
*counter
)
1847 cpu_clock_perf_counter_update(counter
);
1850 static const struct hw_perf_counter_ops perf_ops_cpu_clock
= {
1851 .enable
= cpu_clock_perf_counter_enable
,
1852 .disable
= cpu_clock_perf_counter_disable
,
1853 .read
= cpu_clock_perf_counter_read
,
1857 * Software counter: task time clock
1861 * Called from within the scheduler:
1863 static u64
task_clock_perf_counter_val(struct perf_counter
*counter
, int update
)
1865 struct task_struct
*curr
= counter
->task
;
1868 delta
= __task_delta_exec(curr
, update
);
1870 return curr
->se
.sum_exec_runtime
+ delta
;
1873 static void task_clock_perf_counter_update(struct perf_counter
*counter
, u64 now
)
1878 prev
= atomic64_read(&counter
->hw
.prev_count
);
1880 atomic64_set(&counter
->hw
.prev_count
, now
);
1884 atomic64_add(delta
, &counter
->count
);
1887 static int task_clock_perf_counter_enable(struct perf_counter
*counter
)
1889 struct hw_perf_counter
*hwc
= &counter
->hw
;
1891 atomic64_set(&hwc
->prev_count
, task_clock_perf_counter_val(counter
, 0));
1892 hrtimer_init(&hwc
->hrtimer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
1893 hwc
->hrtimer
.function
= perf_swcounter_hrtimer
;
1894 if (hwc
->irq_period
) {
1895 __hrtimer_start_range_ns(&hwc
->hrtimer
,
1896 ns_to_ktime(hwc
->irq_period
), 0,
1897 HRTIMER_MODE_REL
, 0);
1903 static void task_clock_perf_counter_disable(struct perf_counter
*counter
)
1905 hrtimer_cancel(&counter
->hw
.hrtimer
);
1906 task_clock_perf_counter_update(counter
,
1907 task_clock_perf_counter_val(counter
, 0));
1910 static void task_clock_perf_counter_read(struct perf_counter
*counter
)
1912 task_clock_perf_counter_update(counter
,
1913 task_clock_perf_counter_val(counter
, 1));
1916 static const struct hw_perf_counter_ops perf_ops_task_clock
= {
1917 .enable
= task_clock_perf_counter_enable
,
1918 .disable
= task_clock_perf_counter_disable
,
1919 .read
= task_clock_perf_counter_read
,
1923 * Software counter: cpu migrations
1926 static inline u64
get_cpu_migrations(struct perf_counter
*counter
)
1928 struct task_struct
*curr
= counter
->ctx
->task
;
1931 return curr
->se
.nr_migrations
;
1932 return cpu_nr_migrations(smp_processor_id());
1935 static void cpu_migrations_perf_counter_update(struct perf_counter
*counter
)
1940 prev
= atomic64_read(&counter
->hw
.prev_count
);
1941 now
= get_cpu_migrations(counter
);
1943 atomic64_set(&counter
->hw
.prev_count
, now
);
1947 atomic64_add(delta
, &counter
->count
);
1950 static void cpu_migrations_perf_counter_read(struct perf_counter
*counter
)
1952 cpu_migrations_perf_counter_update(counter
);
1955 static int cpu_migrations_perf_counter_enable(struct perf_counter
*counter
)
1957 if (counter
->prev_state
<= PERF_COUNTER_STATE_OFF
)
1958 atomic64_set(&counter
->hw
.prev_count
,
1959 get_cpu_migrations(counter
));
1963 static void cpu_migrations_perf_counter_disable(struct perf_counter
*counter
)
1965 cpu_migrations_perf_counter_update(counter
);
1968 static const struct hw_perf_counter_ops perf_ops_cpu_migrations
= {
1969 .enable
= cpu_migrations_perf_counter_enable
,
1970 .disable
= cpu_migrations_perf_counter_disable
,
1971 .read
= cpu_migrations_perf_counter_read
,
1974 #ifdef CONFIG_EVENT_PROFILE
1975 void perf_tpcounter_event(int event_id
)
1977 struct pt_regs
*regs
= get_irq_regs();
1980 regs
= task_pt_regs(current
);
1982 __perf_swcounter_event(PERF_TYPE_TRACEPOINT
, event_id
, 1, 1, regs
);
1985 extern int ftrace_profile_enable(int);
1986 extern void ftrace_profile_disable(int);
1988 static void tp_perf_counter_destroy(struct perf_counter
*counter
)
1990 ftrace_profile_disable(perf_event_id(&counter
->hw_event
));
1993 static const struct hw_perf_counter_ops
*
1994 tp_perf_counter_init(struct perf_counter
*counter
)
1996 int event_id
= perf_event_id(&counter
->hw_event
);
1999 ret
= ftrace_profile_enable(event_id
);
2003 counter
->destroy
= tp_perf_counter_destroy
;
2004 counter
->hw
.irq_period
= counter
->hw_event
.irq_period
;
2006 return &perf_ops_generic
;
2009 static const struct hw_perf_counter_ops
*
2010 tp_perf_counter_init(struct perf_counter
*counter
)
2016 static const struct hw_perf_counter_ops
*
2017 sw_perf_counter_init(struct perf_counter
*counter
)
2019 struct perf_counter_hw_event
*hw_event
= &counter
->hw_event
;
2020 const struct hw_perf_counter_ops
*hw_ops
= NULL
;
2021 struct hw_perf_counter
*hwc
= &counter
->hw
;
2024 * Software counters (currently) can't in general distinguish
2025 * between user, kernel and hypervisor events.
2026 * However, context switches and cpu migrations are considered
2027 * to be kernel events, and page faults are never hypervisor
2030 switch (perf_event_id(&counter
->hw_event
)) {
2031 case PERF_COUNT_CPU_CLOCK
:
2032 hw_ops
= &perf_ops_cpu_clock
;
2034 if (hw_event
->irq_period
&& hw_event
->irq_period
< 10000)
2035 hw_event
->irq_period
= 10000;
2037 case PERF_COUNT_TASK_CLOCK
:
2039 * If the user instantiates this as a per-cpu counter,
2040 * use the cpu_clock counter instead.
2042 if (counter
->ctx
->task
)
2043 hw_ops
= &perf_ops_task_clock
;
2045 hw_ops
= &perf_ops_cpu_clock
;
2047 if (hw_event
->irq_period
&& hw_event
->irq_period
< 10000)
2048 hw_event
->irq_period
= 10000;
2050 case PERF_COUNT_PAGE_FAULTS
:
2051 case PERF_COUNT_PAGE_FAULTS_MIN
:
2052 case PERF_COUNT_PAGE_FAULTS_MAJ
:
2053 case PERF_COUNT_CONTEXT_SWITCHES
:
2054 hw_ops
= &perf_ops_generic
;
2056 case PERF_COUNT_CPU_MIGRATIONS
:
2057 if (!counter
->hw_event
.exclude_kernel
)
2058 hw_ops
= &perf_ops_cpu_migrations
;
2063 hwc
->irq_period
= hw_event
->irq_period
;
2069 * Allocate and initialize a counter structure
2071 static struct perf_counter
*
2072 perf_counter_alloc(struct perf_counter_hw_event
*hw_event
,
2074 struct perf_counter_context
*ctx
,
2075 struct perf_counter
*group_leader
,
2078 const struct hw_perf_counter_ops
*hw_ops
;
2079 struct perf_counter
*counter
;
2081 counter
= kzalloc(sizeof(*counter
), gfpflags
);
2086 * Single counters are their own group leaders, with an
2087 * empty sibling list:
2090 group_leader
= counter
;
2092 mutex_init(&counter
->mutex
);
2093 INIT_LIST_HEAD(&counter
->list_entry
);
2094 INIT_LIST_HEAD(&counter
->event_entry
);
2095 INIT_LIST_HEAD(&counter
->sibling_list
);
2096 init_waitqueue_head(&counter
->waitq
);
2098 mutex_init(&counter
->mmap_mutex
);
2100 INIT_LIST_HEAD(&counter
->child_list
);
2103 counter
->hw_event
= *hw_event
;
2104 counter
->wakeup_pending
= 0;
2105 counter
->group_leader
= group_leader
;
2106 counter
->hw_ops
= NULL
;
2109 counter
->state
= PERF_COUNTER_STATE_INACTIVE
;
2110 if (hw_event
->disabled
)
2111 counter
->state
= PERF_COUNTER_STATE_OFF
;
2115 if (perf_event_raw(hw_event
)) {
2116 hw_ops
= hw_perf_counter_init(counter
);
2120 switch (perf_event_type(hw_event
)) {
2121 case PERF_TYPE_HARDWARE
:
2122 hw_ops
= hw_perf_counter_init(counter
);
2125 case PERF_TYPE_SOFTWARE
:
2126 hw_ops
= sw_perf_counter_init(counter
);
2129 case PERF_TYPE_TRACEPOINT
:
2130 hw_ops
= tp_perf_counter_init(counter
);
2139 counter
->hw_ops
= hw_ops
;
2145 * sys_perf_counter_open - open a performance counter, associate it to a task/cpu
2147 * @hw_event_uptr: event type attributes for monitoring/sampling
2150 * @group_fd: group leader counter fd
2152 SYSCALL_DEFINE5(perf_counter_open
,
2153 const struct perf_counter_hw_event __user
*, hw_event_uptr
,
2154 pid_t
, pid
, int, cpu
, int, group_fd
, unsigned long, flags
)
2156 struct perf_counter
*counter
, *group_leader
;
2157 struct perf_counter_hw_event hw_event
;
2158 struct perf_counter_context
*ctx
;
2159 struct file
*counter_file
= NULL
;
2160 struct file
*group_file
= NULL
;
2161 int fput_needed
= 0;
2162 int fput_needed2
= 0;
2165 /* for future expandability... */
2169 if (copy_from_user(&hw_event
, hw_event_uptr
, sizeof(hw_event
)) != 0)
2173 * Get the target context (task or percpu):
2175 ctx
= find_get_context(pid
, cpu
);
2177 return PTR_ERR(ctx
);
2180 * Look up the group leader (we will attach this counter to it):
2182 group_leader
= NULL
;
2183 if (group_fd
!= -1) {
2185 group_file
= fget_light(group_fd
, &fput_needed
);
2187 goto err_put_context
;
2188 if (group_file
->f_op
!= &perf_fops
)
2189 goto err_put_context
;
2191 group_leader
= group_file
->private_data
;
2193 * Do not allow a recursive hierarchy (this new sibling
2194 * becoming part of another group-sibling):
2196 if (group_leader
->group_leader
!= group_leader
)
2197 goto err_put_context
;
2199 * Do not allow to attach to a group in a different
2200 * task or CPU context:
2202 if (group_leader
->ctx
!= ctx
)
2203 goto err_put_context
;
2205 * Only a group leader can be exclusive or pinned
2207 if (hw_event
.exclusive
|| hw_event
.pinned
)
2208 goto err_put_context
;
2212 counter
= perf_counter_alloc(&hw_event
, cpu
, ctx
, group_leader
,
2215 goto err_put_context
;
2217 ret
= anon_inode_getfd("[perf_counter]", &perf_fops
, counter
, 0);
2219 goto err_free_put_context
;
2221 counter_file
= fget_light(ret
, &fput_needed2
);
2223 goto err_free_put_context
;
2225 counter
->filp
= counter_file
;
2226 mutex_lock(&ctx
->mutex
);
2227 perf_install_in_context(ctx
, counter
, cpu
);
2228 mutex_unlock(&ctx
->mutex
);
2230 fput_light(counter_file
, fput_needed2
);
2233 fput_light(group_file
, fput_needed
);
2237 err_free_put_context
:
2247 * Initialize the perf_counter context in a task_struct:
2250 __perf_counter_init_context(struct perf_counter_context
*ctx
,
2251 struct task_struct
*task
)
2253 memset(ctx
, 0, sizeof(*ctx
));
2254 spin_lock_init(&ctx
->lock
);
2255 mutex_init(&ctx
->mutex
);
2256 INIT_LIST_HEAD(&ctx
->counter_list
);
2257 INIT_LIST_HEAD(&ctx
->event_list
);
2262 * inherit a counter from parent task to child task:
2264 static struct perf_counter
*
2265 inherit_counter(struct perf_counter
*parent_counter
,
2266 struct task_struct
*parent
,
2267 struct perf_counter_context
*parent_ctx
,
2268 struct task_struct
*child
,
2269 struct perf_counter
*group_leader
,
2270 struct perf_counter_context
*child_ctx
)
2272 struct perf_counter
*child_counter
;
2275 * Instead of creating recursive hierarchies of counters,
2276 * we link inherited counters back to the original parent,
2277 * which has a filp for sure, which we use as the reference
2280 if (parent_counter
->parent
)
2281 parent_counter
= parent_counter
->parent
;
2283 child_counter
= perf_counter_alloc(&parent_counter
->hw_event
,
2284 parent_counter
->cpu
, child_ctx
,
2285 group_leader
, GFP_KERNEL
);
2290 * Link it up in the child's context:
2292 child_counter
->task
= child
;
2293 list_add_counter(child_counter
, child_ctx
);
2294 child_ctx
->nr_counters
++;
2296 child_counter
->parent
= parent_counter
;
2298 * inherit into child's child as well:
2300 child_counter
->hw_event
.inherit
= 1;
2303 * Get a reference to the parent filp - we will fput it
2304 * when the child counter exits. This is safe to do because
2305 * we are in the parent and we know that the filp still
2306 * exists and has a nonzero count:
2308 atomic_long_inc(&parent_counter
->filp
->f_count
);
2311 * Link this into the parent counter's child list
2313 mutex_lock(&parent_counter
->mutex
);
2314 list_add_tail(&child_counter
->child_list
, &parent_counter
->child_list
);
2317 * Make the child state follow the state of the parent counter,
2318 * not its hw_event.disabled bit. We hold the parent's mutex,
2319 * so we won't race with perf_counter_{en,dis}able_family.
2321 if (parent_counter
->state
>= PERF_COUNTER_STATE_INACTIVE
)
2322 child_counter
->state
= PERF_COUNTER_STATE_INACTIVE
;
2324 child_counter
->state
= PERF_COUNTER_STATE_OFF
;
2326 mutex_unlock(&parent_counter
->mutex
);
2328 return child_counter
;
2331 static int inherit_group(struct perf_counter
*parent_counter
,
2332 struct task_struct
*parent
,
2333 struct perf_counter_context
*parent_ctx
,
2334 struct task_struct
*child
,
2335 struct perf_counter_context
*child_ctx
)
2337 struct perf_counter
*leader
;
2338 struct perf_counter
*sub
;
2340 leader
= inherit_counter(parent_counter
, parent
, parent_ctx
,
2341 child
, NULL
, child_ctx
);
2344 list_for_each_entry(sub
, &parent_counter
->sibling_list
, list_entry
) {
2345 if (!inherit_counter(sub
, parent
, parent_ctx
,
2346 child
, leader
, child_ctx
))
2352 static void sync_child_counter(struct perf_counter
*child_counter
,
2353 struct perf_counter
*parent_counter
)
2355 u64 parent_val
, child_val
;
2357 parent_val
= atomic64_read(&parent_counter
->count
);
2358 child_val
= atomic64_read(&child_counter
->count
);
2361 * Add back the child's count to the parent's count:
2363 atomic64_add(child_val
, &parent_counter
->count
);
2366 * Remove this counter from the parent's list
2368 mutex_lock(&parent_counter
->mutex
);
2369 list_del_init(&child_counter
->child_list
);
2370 mutex_unlock(&parent_counter
->mutex
);
2373 * Release the parent counter, if this was the last
2376 fput(parent_counter
->filp
);
2380 __perf_counter_exit_task(struct task_struct
*child
,
2381 struct perf_counter
*child_counter
,
2382 struct perf_counter_context
*child_ctx
)
2384 struct perf_counter
*parent_counter
;
2385 struct perf_counter
*sub
, *tmp
;
2388 * If we do not self-reap then we have to wait for the
2389 * child task to unschedule (it will happen for sure),
2390 * so that its counter is at its final count. (This
2391 * condition triggers rarely - child tasks usually get
2392 * off their CPU before the parent has a chance to
2393 * get this far into the reaping action)
2395 if (child
!= current
) {
2396 wait_task_inactive(child
, 0);
2397 list_del_init(&child_counter
->list_entry
);
2399 struct perf_cpu_context
*cpuctx
;
2400 unsigned long flags
;
2404 * Disable and unlink this counter.
2406 * Be careful about zapping the list - IRQ/NMI context
2407 * could still be processing it:
2409 curr_rq_lock_irq_save(&flags
);
2410 perf_flags
= hw_perf_save_disable();
2412 cpuctx
= &__get_cpu_var(perf_cpu_context
);
2414 group_sched_out(child_counter
, cpuctx
, child_ctx
);
2416 list_del_init(&child_counter
->list_entry
);
2418 child_ctx
->nr_counters
--;
2420 hw_perf_restore(perf_flags
);
2421 curr_rq_unlock_irq_restore(&flags
);
2424 parent_counter
= child_counter
->parent
;
2426 * It can happen that parent exits first, and has counters
2427 * that are still around due to the child reference. These
2428 * counters need to be zapped - but otherwise linger.
2430 if (parent_counter
) {
2431 sync_child_counter(child_counter
, parent_counter
);
2432 list_for_each_entry_safe(sub
, tmp
, &child_counter
->sibling_list
,
2435 sync_child_counter(sub
, sub
->parent
);
2439 free_counter(child_counter
);
2444 * When a child task exits, feed back counter values to parent counters.
2446 * Note: we may be running in child context, but the PID is not hashed
2447 * anymore so new counters will not be added.
2449 void perf_counter_exit_task(struct task_struct
*child
)
2451 struct perf_counter
*child_counter
, *tmp
;
2452 struct perf_counter_context
*child_ctx
;
2454 child_ctx
= &child
->perf_counter_ctx
;
2456 if (likely(!child_ctx
->nr_counters
))
2459 list_for_each_entry_safe(child_counter
, tmp
, &child_ctx
->counter_list
,
2461 __perf_counter_exit_task(child
, child_counter
, child_ctx
);
2465 * Initialize the perf_counter context in task_struct
2467 void perf_counter_init_task(struct task_struct
*child
)
2469 struct perf_counter_context
*child_ctx
, *parent_ctx
;
2470 struct perf_counter
*counter
;
2471 struct task_struct
*parent
= current
;
2473 child_ctx
= &child
->perf_counter_ctx
;
2474 parent_ctx
= &parent
->perf_counter_ctx
;
2476 __perf_counter_init_context(child_ctx
, child
);
2479 * This is executed from the parent task context, so inherit
2480 * counters that have been marked for cloning:
2483 if (likely(!parent_ctx
->nr_counters
))
2487 * Lock the parent list. No need to lock the child - not PID
2488 * hashed yet and not running, so nobody can access it.
2490 mutex_lock(&parent_ctx
->mutex
);
2493 * We dont have to disable NMIs - we are only looking at
2494 * the list, not manipulating it:
2496 list_for_each_entry(counter
, &parent_ctx
->counter_list
, list_entry
) {
2497 if (!counter
->hw_event
.inherit
)
2500 if (inherit_group(counter
, parent
,
2501 parent_ctx
, child
, child_ctx
))
2505 mutex_unlock(&parent_ctx
->mutex
);
2508 static void __cpuinit
perf_counter_init_cpu(int cpu
)
2510 struct perf_cpu_context
*cpuctx
;
2512 cpuctx
= &per_cpu(perf_cpu_context
, cpu
);
2513 __perf_counter_init_context(&cpuctx
->ctx
, NULL
);
2515 mutex_lock(&perf_resource_mutex
);
2516 cpuctx
->max_pertask
= perf_max_counters
- perf_reserved_percpu
;
2517 mutex_unlock(&perf_resource_mutex
);
2519 hw_perf_counter_setup(cpu
);
2522 #ifdef CONFIG_HOTPLUG_CPU
2523 static void __perf_counter_exit_cpu(void *info
)
2525 struct perf_cpu_context
*cpuctx
= &__get_cpu_var(perf_cpu_context
);
2526 struct perf_counter_context
*ctx
= &cpuctx
->ctx
;
2527 struct perf_counter
*counter
, *tmp
;
2529 list_for_each_entry_safe(counter
, tmp
, &ctx
->counter_list
, list_entry
)
2530 __perf_counter_remove_from_context(counter
);
2532 static void perf_counter_exit_cpu(int cpu
)
2534 struct perf_cpu_context
*cpuctx
= &per_cpu(perf_cpu_context
, cpu
);
2535 struct perf_counter_context
*ctx
= &cpuctx
->ctx
;
2537 mutex_lock(&ctx
->mutex
);
2538 smp_call_function_single(cpu
, __perf_counter_exit_cpu
, NULL
, 1);
2539 mutex_unlock(&ctx
->mutex
);
2542 static inline void perf_counter_exit_cpu(int cpu
) { }
2545 static int __cpuinit
2546 perf_cpu_notify(struct notifier_block
*self
, unsigned long action
, void *hcpu
)
2548 unsigned int cpu
= (long)hcpu
;
2552 case CPU_UP_PREPARE
:
2553 case CPU_UP_PREPARE_FROZEN
:
2554 perf_counter_init_cpu(cpu
);
2557 case CPU_DOWN_PREPARE
:
2558 case CPU_DOWN_PREPARE_FROZEN
:
2559 perf_counter_exit_cpu(cpu
);
2569 static struct notifier_block __cpuinitdata perf_cpu_nb
= {
2570 .notifier_call
= perf_cpu_notify
,
2573 static int __init
perf_counter_init(void)
2575 perf_cpu_notify(&perf_cpu_nb
, (unsigned long)CPU_UP_PREPARE
,
2576 (void *)(long)smp_processor_id());
2577 register_cpu_notifier(&perf_cpu_nb
);
2581 early_initcall(perf_counter_init
);
2583 static ssize_t
perf_show_reserve_percpu(struct sysdev_class
*class, char *buf
)
2585 return sprintf(buf
, "%d\n", perf_reserved_percpu
);
2589 perf_set_reserve_percpu(struct sysdev_class
*class,
2593 struct perf_cpu_context
*cpuctx
;
2597 err
= strict_strtoul(buf
, 10, &val
);
2600 if (val
> perf_max_counters
)
2603 mutex_lock(&perf_resource_mutex
);
2604 perf_reserved_percpu
= val
;
2605 for_each_online_cpu(cpu
) {
2606 cpuctx
= &per_cpu(perf_cpu_context
, cpu
);
2607 spin_lock_irq(&cpuctx
->ctx
.lock
);
2608 mpt
= min(perf_max_counters
- cpuctx
->ctx
.nr_counters
,
2609 perf_max_counters
- perf_reserved_percpu
);
2610 cpuctx
->max_pertask
= mpt
;
2611 spin_unlock_irq(&cpuctx
->ctx
.lock
);
2613 mutex_unlock(&perf_resource_mutex
);
2618 static ssize_t
perf_show_overcommit(struct sysdev_class
*class, char *buf
)
2620 return sprintf(buf
, "%d\n", perf_overcommit
);
2624 perf_set_overcommit(struct sysdev_class
*class, const char *buf
, size_t count
)
2629 err
= strict_strtoul(buf
, 10, &val
);
2635 mutex_lock(&perf_resource_mutex
);
2636 perf_overcommit
= val
;
2637 mutex_unlock(&perf_resource_mutex
);
2642 static SYSDEV_CLASS_ATTR(
2645 perf_show_reserve_percpu
,
2646 perf_set_reserve_percpu
2649 static SYSDEV_CLASS_ATTR(
2652 perf_show_overcommit
,
2656 static struct attribute
*perfclass_attrs
[] = {
2657 &attr_reserve_percpu
.attr
,
2658 &attr_overcommit
.attr
,
2662 static struct attribute_group perfclass_attr_group
= {
2663 .attrs
= perfclass_attrs
,
2664 .name
= "perf_counters",
2667 static int __init
perf_counter_sysfs_init(void)
2669 return sysfs_create_group(&cpu_sysdev_class
.kset
.kobj
,
2670 &perfclass_attr_group
);
2672 device_initcall(perf_counter_sysfs_init
);