perf_counter: fix race in perf_output_*
[deliverable/linux.git] / kernel / perf_counter.c
1 /*
2 * Performance counter core code
3 *
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8 *
9 * For licensing details see kernel-base/COPYING
10 */
11
12 #include <linux/fs.h>
13 #include <linux/mm.h>
14 #include <linux/cpu.h>
15 #include <linux/smp.h>
16 #include <linux/file.h>
17 #include <linux/poll.h>
18 #include <linux/sysfs.h>
19 #include <linux/ptrace.h>
20 #include <linux/percpu.h>
21 #include <linux/vmstat.h>
22 #include <linux/hardirq.h>
23 #include <linux/rculist.h>
24 #include <linux/uaccess.h>
25 #include <linux/syscalls.h>
26 #include <linux/anon_inodes.h>
27 #include <linux/kernel_stat.h>
28 #include <linux/perf_counter.h>
29 #include <linux/dcache.h>
30
31 #include <asm/irq_regs.h>
32
33 /*
34 * Each CPU has a list of per CPU counters:
35 */
36 DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
37
38 int perf_max_counters __read_mostly = 1;
39 static int perf_reserved_percpu __read_mostly;
40 static int perf_overcommit __read_mostly = 1;
41
42 static atomic_t nr_mmap_tracking __read_mostly;
43 static atomic_t nr_munmap_tracking __read_mostly;
44 static atomic_t nr_comm_tracking __read_mostly;
45
46 int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */
47
48 /*
49 * Mutex for (sysadmin-configurable) counter reservations:
50 */
51 static DEFINE_MUTEX(perf_resource_mutex);
52
53 /*
54 * Architecture provided APIs - weak aliases:
55 */
56 extern __weak const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
57 {
58 return NULL;
59 }
60
61 u64 __weak hw_perf_save_disable(void) { return 0; }
62 void __weak hw_perf_restore(u64 ctrl) { barrier(); }
63 void __weak hw_perf_counter_setup(int cpu) { barrier(); }
64 int __weak hw_perf_group_sched_in(struct perf_counter *group_leader,
65 struct perf_cpu_context *cpuctx,
66 struct perf_counter_context *ctx, int cpu)
67 {
68 return 0;
69 }
70
71 void __weak perf_counter_print_debug(void) { }
72
73 static void
74 list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
75 {
76 struct perf_counter *group_leader = counter->group_leader;
77
78 /*
79 * Depending on whether it is a standalone or sibling counter,
80 * add it straight to the context's counter list, or to the group
81 * leader's sibling list:
82 */
83 if (counter->group_leader == counter)
84 list_add_tail(&counter->list_entry, &ctx->counter_list);
85 else {
86 list_add_tail(&counter->list_entry, &group_leader->sibling_list);
87 group_leader->nr_siblings++;
88 }
89
90 list_add_rcu(&counter->event_entry, &ctx->event_list);
91 }
92
93 static void
94 list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
95 {
96 struct perf_counter *sibling, *tmp;
97
98 list_del_init(&counter->list_entry);
99 list_del_rcu(&counter->event_entry);
100
101 if (counter->group_leader != counter)
102 counter->group_leader->nr_siblings--;
103
104 /*
105 * If this was a group counter with sibling counters then
106 * upgrade the siblings to singleton counters by adding them
107 * to the context list directly:
108 */
109 list_for_each_entry_safe(sibling, tmp,
110 &counter->sibling_list, list_entry) {
111
112 list_move_tail(&sibling->list_entry, &ctx->counter_list);
113 sibling->group_leader = sibling;
114 }
115 }
116
117 static void
118 counter_sched_out(struct perf_counter *counter,
119 struct perf_cpu_context *cpuctx,
120 struct perf_counter_context *ctx)
121 {
122 if (counter->state != PERF_COUNTER_STATE_ACTIVE)
123 return;
124
125 counter->state = PERF_COUNTER_STATE_INACTIVE;
126 counter->tstamp_stopped = ctx->time;
127 counter->pmu->disable(counter);
128 counter->oncpu = -1;
129
130 if (!is_software_counter(counter))
131 cpuctx->active_oncpu--;
132 ctx->nr_active--;
133 if (counter->hw_event.exclusive || !cpuctx->active_oncpu)
134 cpuctx->exclusive = 0;
135 }
136
137 static void
138 group_sched_out(struct perf_counter *group_counter,
139 struct perf_cpu_context *cpuctx,
140 struct perf_counter_context *ctx)
141 {
142 struct perf_counter *counter;
143
144 if (group_counter->state != PERF_COUNTER_STATE_ACTIVE)
145 return;
146
147 counter_sched_out(group_counter, cpuctx, ctx);
148
149 /*
150 * Schedule out siblings (if any):
151 */
152 list_for_each_entry(counter, &group_counter->sibling_list, list_entry)
153 counter_sched_out(counter, cpuctx, ctx);
154
155 if (group_counter->hw_event.exclusive)
156 cpuctx->exclusive = 0;
157 }
158
159 /*
160 * Cross CPU call to remove a performance counter
161 *
162 * We disable the counter on the hardware level first. After that we
163 * remove it from the context list.
164 */
165 static void __perf_counter_remove_from_context(void *info)
166 {
167 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
168 struct perf_counter *counter = info;
169 struct perf_counter_context *ctx = counter->ctx;
170 unsigned long flags;
171 u64 perf_flags;
172
173 /*
174 * If this is a task context, we need to check whether it is
175 * the current task context of this cpu. If not it has been
176 * scheduled out before the smp call arrived.
177 */
178 if (ctx->task && cpuctx->task_ctx != ctx)
179 return;
180
181 spin_lock_irqsave(&ctx->lock, flags);
182
183 counter_sched_out(counter, cpuctx, ctx);
184
185 counter->task = NULL;
186 ctx->nr_counters--;
187
188 /*
189 * Protect the list operation against NMI by disabling the
190 * counters on a global level. NOP for non NMI based counters.
191 */
192 perf_flags = hw_perf_save_disable();
193 list_del_counter(counter, ctx);
194 hw_perf_restore(perf_flags);
195
196 if (!ctx->task) {
197 /*
198 * Allow more per task counters with respect to the
199 * reservation:
200 */
201 cpuctx->max_pertask =
202 min(perf_max_counters - ctx->nr_counters,
203 perf_max_counters - perf_reserved_percpu);
204 }
205
206 spin_unlock_irqrestore(&ctx->lock, flags);
207 }
208
209
210 /*
211 * Remove the counter from a task's (or a CPU's) list of counters.
212 *
213 * Must be called with counter->mutex and ctx->mutex held.
214 *
215 * CPU counters are removed with a smp call. For task counters we only
216 * call when the task is on a CPU.
217 */
218 static void perf_counter_remove_from_context(struct perf_counter *counter)
219 {
220 struct perf_counter_context *ctx = counter->ctx;
221 struct task_struct *task = ctx->task;
222
223 if (!task) {
224 /*
225 * Per cpu counters are removed via an smp call and
226 * the removal is always sucessful.
227 */
228 smp_call_function_single(counter->cpu,
229 __perf_counter_remove_from_context,
230 counter, 1);
231 return;
232 }
233
234 retry:
235 task_oncpu_function_call(task, __perf_counter_remove_from_context,
236 counter);
237
238 spin_lock_irq(&ctx->lock);
239 /*
240 * If the context is active we need to retry the smp call.
241 */
242 if (ctx->nr_active && !list_empty(&counter->list_entry)) {
243 spin_unlock_irq(&ctx->lock);
244 goto retry;
245 }
246
247 /*
248 * The lock prevents that this context is scheduled in so we
249 * can remove the counter safely, if the call above did not
250 * succeed.
251 */
252 if (!list_empty(&counter->list_entry)) {
253 ctx->nr_counters--;
254 list_del_counter(counter, ctx);
255 counter->task = NULL;
256 }
257 spin_unlock_irq(&ctx->lock);
258 }
259
260 static inline u64 perf_clock(void)
261 {
262 return cpu_clock(smp_processor_id());
263 }
264
265 /*
266 * Update the record of the current time in a context.
267 */
268 static void update_context_time(struct perf_counter_context *ctx)
269 {
270 u64 now = perf_clock();
271
272 ctx->time += now - ctx->timestamp;
273 ctx->timestamp = now;
274 }
275
276 /*
277 * Update the total_time_enabled and total_time_running fields for a counter.
278 */
279 static void update_counter_times(struct perf_counter *counter)
280 {
281 struct perf_counter_context *ctx = counter->ctx;
282 u64 run_end;
283
284 if (counter->state < PERF_COUNTER_STATE_INACTIVE)
285 return;
286
287 counter->total_time_enabled = ctx->time - counter->tstamp_enabled;
288
289 if (counter->state == PERF_COUNTER_STATE_INACTIVE)
290 run_end = counter->tstamp_stopped;
291 else
292 run_end = ctx->time;
293
294 counter->total_time_running = run_end - counter->tstamp_running;
295 }
296
297 /*
298 * Update total_time_enabled and total_time_running for all counters in a group.
299 */
300 static void update_group_times(struct perf_counter *leader)
301 {
302 struct perf_counter *counter;
303
304 update_counter_times(leader);
305 list_for_each_entry(counter, &leader->sibling_list, list_entry)
306 update_counter_times(counter);
307 }
308
309 /*
310 * Cross CPU call to disable a performance counter
311 */
312 static void __perf_counter_disable(void *info)
313 {
314 struct perf_counter *counter = info;
315 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
316 struct perf_counter_context *ctx = counter->ctx;
317 unsigned long flags;
318
319 /*
320 * If this is a per-task counter, need to check whether this
321 * counter's task is the current task on this cpu.
322 */
323 if (ctx->task && cpuctx->task_ctx != ctx)
324 return;
325
326 spin_lock_irqsave(&ctx->lock, flags);
327
328 /*
329 * If the counter is on, turn it off.
330 * If it is in error state, leave it in error state.
331 */
332 if (counter->state >= PERF_COUNTER_STATE_INACTIVE) {
333 update_context_time(ctx);
334 update_counter_times(counter);
335 if (counter == counter->group_leader)
336 group_sched_out(counter, cpuctx, ctx);
337 else
338 counter_sched_out(counter, cpuctx, ctx);
339 counter->state = PERF_COUNTER_STATE_OFF;
340 }
341
342 spin_unlock_irqrestore(&ctx->lock, flags);
343 }
344
345 /*
346 * Disable a counter.
347 */
348 static void perf_counter_disable(struct perf_counter *counter)
349 {
350 struct perf_counter_context *ctx = counter->ctx;
351 struct task_struct *task = ctx->task;
352
353 if (!task) {
354 /*
355 * Disable the counter on the cpu that it's on
356 */
357 smp_call_function_single(counter->cpu, __perf_counter_disable,
358 counter, 1);
359 return;
360 }
361
362 retry:
363 task_oncpu_function_call(task, __perf_counter_disable, counter);
364
365 spin_lock_irq(&ctx->lock);
366 /*
367 * If the counter is still active, we need to retry the cross-call.
368 */
369 if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
370 spin_unlock_irq(&ctx->lock);
371 goto retry;
372 }
373
374 /*
375 * Since we have the lock this context can't be scheduled
376 * in, so we can change the state safely.
377 */
378 if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
379 update_counter_times(counter);
380 counter->state = PERF_COUNTER_STATE_OFF;
381 }
382
383 spin_unlock_irq(&ctx->lock);
384 }
385
386 /*
387 * Disable a counter and all its children.
388 */
389 static void perf_counter_disable_family(struct perf_counter *counter)
390 {
391 struct perf_counter *child;
392
393 perf_counter_disable(counter);
394
395 /*
396 * Lock the mutex to protect the list of children
397 */
398 mutex_lock(&counter->mutex);
399 list_for_each_entry(child, &counter->child_list, child_list)
400 perf_counter_disable(child);
401 mutex_unlock(&counter->mutex);
402 }
403
404 static int
405 counter_sched_in(struct perf_counter *counter,
406 struct perf_cpu_context *cpuctx,
407 struct perf_counter_context *ctx,
408 int cpu)
409 {
410 if (counter->state <= PERF_COUNTER_STATE_OFF)
411 return 0;
412
413 counter->state = PERF_COUNTER_STATE_ACTIVE;
414 counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */
415 /*
416 * The new state must be visible before we turn it on in the hardware:
417 */
418 smp_wmb();
419
420 if (counter->pmu->enable(counter)) {
421 counter->state = PERF_COUNTER_STATE_INACTIVE;
422 counter->oncpu = -1;
423 return -EAGAIN;
424 }
425
426 counter->tstamp_running += ctx->time - counter->tstamp_stopped;
427
428 if (!is_software_counter(counter))
429 cpuctx->active_oncpu++;
430 ctx->nr_active++;
431
432 if (counter->hw_event.exclusive)
433 cpuctx->exclusive = 1;
434
435 return 0;
436 }
437
438 /*
439 * Return 1 for a group consisting entirely of software counters,
440 * 0 if the group contains any hardware counters.
441 */
442 static int is_software_only_group(struct perf_counter *leader)
443 {
444 struct perf_counter *counter;
445
446 if (!is_software_counter(leader))
447 return 0;
448
449 list_for_each_entry(counter, &leader->sibling_list, list_entry)
450 if (!is_software_counter(counter))
451 return 0;
452
453 return 1;
454 }
455
456 /*
457 * Work out whether we can put this counter group on the CPU now.
458 */
459 static int group_can_go_on(struct perf_counter *counter,
460 struct perf_cpu_context *cpuctx,
461 int can_add_hw)
462 {
463 /*
464 * Groups consisting entirely of software counters can always go on.
465 */
466 if (is_software_only_group(counter))
467 return 1;
468 /*
469 * If an exclusive group is already on, no other hardware
470 * counters can go on.
471 */
472 if (cpuctx->exclusive)
473 return 0;
474 /*
475 * If this group is exclusive and there are already
476 * counters on the CPU, it can't go on.
477 */
478 if (counter->hw_event.exclusive && cpuctx->active_oncpu)
479 return 0;
480 /*
481 * Otherwise, try to add it if all previous groups were able
482 * to go on.
483 */
484 return can_add_hw;
485 }
486
487 static void add_counter_to_ctx(struct perf_counter *counter,
488 struct perf_counter_context *ctx)
489 {
490 list_add_counter(counter, ctx);
491 ctx->nr_counters++;
492 counter->prev_state = PERF_COUNTER_STATE_OFF;
493 counter->tstamp_enabled = ctx->time;
494 counter->tstamp_running = ctx->time;
495 counter->tstamp_stopped = ctx->time;
496 }
497
498 /*
499 * Cross CPU call to install and enable a performance counter
500 */
501 static void __perf_install_in_context(void *info)
502 {
503 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
504 struct perf_counter *counter = info;
505 struct perf_counter_context *ctx = counter->ctx;
506 struct perf_counter *leader = counter->group_leader;
507 int cpu = smp_processor_id();
508 unsigned long flags;
509 u64 perf_flags;
510 int err;
511
512 /*
513 * If this is a task context, we need to check whether it is
514 * the current task context of this cpu. If not it has been
515 * scheduled out before the smp call arrived.
516 */
517 if (ctx->task && cpuctx->task_ctx != ctx)
518 return;
519
520 spin_lock_irqsave(&ctx->lock, flags);
521 update_context_time(ctx);
522
523 /*
524 * Protect the list operation against NMI by disabling the
525 * counters on a global level. NOP for non NMI based counters.
526 */
527 perf_flags = hw_perf_save_disable();
528
529 add_counter_to_ctx(counter, ctx);
530
531 /*
532 * Don't put the counter on if it is disabled or if
533 * it is in a group and the group isn't on.
534 */
535 if (counter->state != PERF_COUNTER_STATE_INACTIVE ||
536 (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE))
537 goto unlock;
538
539 /*
540 * An exclusive counter can't go on if there are already active
541 * hardware counters, and no hardware counter can go on if there
542 * is already an exclusive counter on.
543 */
544 if (!group_can_go_on(counter, cpuctx, 1))
545 err = -EEXIST;
546 else
547 err = counter_sched_in(counter, cpuctx, ctx, cpu);
548
549 if (err) {
550 /*
551 * This counter couldn't go on. If it is in a group
552 * then we have to pull the whole group off.
553 * If the counter group is pinned then put it in error state.
554 */
555 if (leader != counter)
556 group_sched_out(leader, cpuctx, ctx);
557 if (leader->hw_event.pinned) {
558 update_group_times(leader);
559 leader->state = PERF_COUNTER_STATE_ERROR;
560 }
561 }
562
563 if (!err && !ctx->task && cpuctx->max_pertask)
564 cpuctx->max_pertask--;
565
566 unlock:
567 hw_perf_restore(perf_flags);
568
569 spin_unlock_irqrestore(&ctx->lock, flags);
570 }
571
572 /*
573 * Attach a performance counter to a context
574 *
575 * First we add the counter to the list with the hardware enable bit
576 * in counter->hw_config cleared.
577 *
578 * If the counter is attached to a task which is on a CPU we use a smp
579 * call to enable it in the task context. The task might have been
580 * scheduled away, but we check this in the smp call again.
581 *
582 * Must be called with ctx->mutex held.
583 */
584 static void
585 perf_install_in_context(struct perf_counter_context *ctx,
586 struct perf_counter *counter,
587 int cpu)
588 {
589 struct task_struct *task = ctx->task;
590
591 if (!task) {
592 /*
593 * Per cpu counters are installed via an smp call and
594 * the install is always sucessful.
595 */
596 smp_call_function_single(cpu, __perf_install_in_context,
597 counter, 1);
598 return;
599 }
600
601 counter->task = task;
602 retry:
603 task_oncpu_function_call(task, __perf_install_in_context,
604 counter);
605
606 spin_lock_irq(&ctx->lock);
607 /*
608 * we need to retry the smp call.
609 */
610 if (ctx->is_active && list_empty(&counter->list_entry)) {
611 spin_unlock_irq(&ctx->lock);
612 goto retry;
613 }
614
615 /*
616 * The lock prevents that this context is scheduled in so we
617 * can add the counter safely, if it the call above did not
618 * succeed.
619 */
620 if (list_empty(&counter->list_entry))
621 add_counter_to_ctx(counter, ctx);
622 spin_unlock_irq(&ctx->lock);
623 }
624
625 /*
626 * Cross CPU call to enable a performance counter
627 */
628 static void __perf_counter_enable(void *info)
629 {
630 struct perf_counter *counter = info;
631 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
632 struct perf_counter_context *ctx = counter->ctx;
633 struct perf_counter *leader = counter->group_leader;
634 unsigned long flags;
635 int err;
636
637 /*
638 * If this is a per-task counter, need to check whether this
639 * counter's task is the current task on this cpu.
640 */
641 if (ctx->task && cpuctx->task_ctx != ctx)
642 return;
643
644 spin_lock_irqsave(&ctx->lock, flags);
645 update_context_time(ctx);
646
647 counter->prev_state = counter->state;
648 if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
649 goto unlock;
650 counter->state = PERF_COUNTER_STATE_INACTIVE;
651 counter->tstamp_enabled = ctx->time - counter->total_time_enabled;
652
653 /*
654 * If the counter is in a group and isn't the group leader,
655 * then don't put it on unless the group is on.
656 */
657 if (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE)
658 goto unlock;
659
660 if (!group_can_go_on(counter, cpuctx, 1))
661 err = -EEXIST;
662 else
663 err = counter_sched_in(counter, cpuctx, ctx,
664 smp_processor_id());
665
666 if (err) {
667 /*
668 * If this counter can't go on and it's part of a
669 * group, then the whole group has to come off.
670 */
671 if (leader != counter)
672 group_sched_out(leader, cpuctx, ctx);
673 if (leader->hw_event.pinned) {
674 update_group_times(leader);
675 leader->state = PERF_COUNTER_STATE_ERROR;
676 }
677 }
678
679 unlock:
680 spin_unlock_irqrestore(&ctx->lock, flags);
681 }
682
683 /*
684 * Enable a counter.
685 */
686 static void perf_counter_enable(struct perf_counter *counter)
687 {
688 struct perf_counter_context *ctx = counter->ctx;
689 struct task_struct *task = ctx->task;
690
691 if (!task) {
692 /*
693 * Enable the counter on the cpu that it's on
694 */
695 smp_call_function_single(counter->cpu, __perf_counter_enable,
696 counter, 1);
697 return;
698 }
699
700 spin_lock_irq(&ctx->lock);
701 if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
702 goto out;
703
704 /*
705 * If the counter is in error state, clear that first.
706 * That way, if we see the counter in error state below, we
707 * know that it has gone back into error state, as distinct
708 * from the task having been scheduled away before the
709 * cross-call arrived.
710 */
711 if (counter->state == PERF_COUNTER_STATE_ERROR)
712 counter->state = PERF_COUNTER_STATE_OFF;
713
714 retry:
715 spin_unlock_irq(&ctx->lock);
716 task_oncpu_function_call(task, __perf_counter_enable, counter);
717
718 spin_lock_irq(&ctx->lock);
719
720 /*
721 * If the context is active and the counter is still off,
722 * we need to retry the cross-call.
723 */
724 if (ctx->is_active && counter->state == PERF_COUNTER_STATE_OFF)
725 goto retry;
726
727 /*
728 * Since we have the lock this context can't be scheduled
729 * in, so we can change the state safely.
730 */
731 if (counter->state == PERF_COUNTER_STATE_OFF) {
732 counter->state = PERF_COUNTER_STATE_INACTIVE;
733 counter->tstamp_enabled =
734 ctx->time - counter->total_time_enabled;
735 }
736 out:
737 spin_unlock_irq(&ctx->lock);
738 }
739
740 static void perf_counter_refresh(struct perf_counter *counter, int refresh)
741 {
742 atomic_add(refresh, &counter->event_limit);
743 perf_counter_enable(counter);
744 }
745
746 /*
747 * Enable a counter and all its children.
748 */
749 static void perf_counter_enable_family(struct perf_counter *counter)
750 {
751 struct perf_counter *child;
752
753 perf_counter_enable(counter);
754
755 /*
756 * Lock the mutex to protect the list of children
757 */
758 mutex_lock(&counter->mutex);
759 list_for_each_entry(child, &counter->child_list, child_list)
760 perf_counter_enable(child);
761 mutex_unlock(&counter->mutex);
762 }
763
764 void __perf_counter_sched_out(struct perf_counter_context *ctx,
765 struct perf_cpu_context *cpuctx)
766 {
767 struct perf_counter *counter;
768 u64 flags;
769
770 spin_lock(&ctx->lock);
771 ctx->is_active = 0;
772 if (likely(!ctx->nr_counters))
773 goto out;
774 update_context_time(ctx);
775
776 flags = hw_perf_save_disable();
777 if (ctx->nr_active) {
778 list_for_each_entry(counter, &ctx->counter_list, list_entry)
779 group_sched_out(counter, cpuctx, ctx);
780 }
781 hw_perf_restore(flags);
782 out:
783 spin_unlock(&ctx->lock);
784 }
785
786 /*
787 * Called from scheduler to remove the counters of the current task,
788 * with interrupts disabled.
789 *
790 * We stop each counter and update the counter value in counter->count.
791 *
792 * This does not protect us against NMI, but disable()
793 * sets the disabled bit in the control field of counter _before_
794 * accessing the counter control register. If a NMI hits, then it will
795 * not restart the counter.
796 */
797 void perf_counter_task_sched_out(struct task_struct *task, int cpu)
798 {
799 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
800 struct perf_counter_context *ctx = &task->perf_counter_ctx;
801 struct pt_regs *regs;
802
803 if (likely(!cpuctx->task_ctx))
804 return;
805
806 update_context_time(ctx);
807
808 regs = task_pt_regs(task);
809 perf_swcounter_event(PERF_COUNT_CONTEXT_SWITCHES, 1, 1, regs, 0);
810 __perf_counter_sched_out(ctx, cpuctx);
811
812 cpuctx->task_ctx = NULL;
813 }
814
815 static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx)
816 {
817 __perf_counter_sched_out(&cpuctx->ctx, cpuctx);
818 }
819
820 static int
821 group_sched_in(struct perf_counter *group_counter,
822 struct perf_cpu_context *cpuctx,
823 struct perf_counter_context *ctx,
824 int cpu)
825 {
826 struct perf_counter *counter, *partial_group;
827 int ret;
828
829 if (group_counter->state == PERF_COUNTER_STATE_OFF)
830 return 0;
831
832 ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu);
833 if (ret)
834 return ret < 0 ? ret : 0;
835
836 group_counter->prev_state = group_counter->state;
837 if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
838 return -EAGAIN;
839
840 /*
841 * Schedule in siblings as one group (if any):
842 */
843 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
844 counter->prev_state = counter->state;
845 if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
846 partial_group = counter;
847 goto group_error;
848 }
849 }
850
851 return 0;
852
853 group_error:
854 /*
855 * Groups can be scheduled in as one unit only, so undo any
856 * partial group before returning:
857 */
858 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
859 if (counter == partial_group)
860 break;
861 counter_sched_out(counter, cpuctx, ctx);
862 }
863 counter_sched_out(group_counter, cpuctx, ctx);
864
865 return -EAGAIN;
866 }
867
868 static void
869 __perf_counter_sched_in(struct perf_counter_context *ctx,
870 struct perf_cpu_context *cpuctx, int cpu)
871 {
872 struct perf_counter *counter;
873 u64 flags;
874 int can_add_hw = 1;
875
876 spin_lock(&ctx->lock);
877 ctx->is_active = 1;
878 if (likely(!ctx->nr_counters))
879 goto out;
880
881 ctx->timestamp = perf_clock();
882
883 flags = hw_perf_save_disable();
884
885 /*
886 * First go through the list and put on any pinned groups
887 * in order to give them the best chance of going on.
888 */
889 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
890 if (counter->state <= PERF_COUNTER_STATE_OFF ||
891 !counter->hw_event.pinned)
892 continue;
893 if (counter->cpu != -1 && counter->cpu != cpu)
894 continue;
895
896 if (group_can_go_on(counter, cpuctx, 1))
897 group_sched_in(counter, cpuctx, ctx, cpu);
898
899 /*
900 * If this pinned group hasn't been scheduled,
901 * put it in error state.
902 */
903 if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
904 update_group_times(counter);
905 counter->state = PERF_COUNTER_STATE_ERROR;
906 }
907 }
908
909 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
910 /*
911 * Ignore counters in OFF or ERROR state, and
912 * ignore pinned counters since we did them already.
913 */
914 if (counter->state <= PERF_COUNTER_STATE_OFF ||
915 counter->hw_event.pinned)
916 continue;
917
918 /*
919 * Listen to the 'cpu' scheduling filter constraint
920 * of counters:
921 */
922 if (counter->cpu != -1 && counter->cpu != cpu)
923 continue;
924
925 if (group_can_go_on(counter, cpuctx, can_add_hw)) {
926 if (group_sched_in(counter, cpuctx, ctx, cpu))
927 can_add_hw = 0;
928 }
929 }
930 hw_perf_restore(flags);
931 out:
932 spin_unlock(&ctx->lock);
933 }
934
935 /*
936 * Called from scheduler to add the counters of the current task
937 * with interrupts disabled.
938 *
939 * We restore the counter value and then enable it.
940 *
941 * This does not protect us against NMI, but enable()
942 * sets the enabled bit in the control field of counter _before_
943 * accessing the counter control register. If a NMI hits, then it will
944 * keep the counter running.
945 */
946 void perf_counter_task_sched_in(struct task_struct *task, int cpu)
947 {
948 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
949 struct perf_counter_context *ctx = &task->perf_counter_ctx;
950
951 __perf_counter_sched_in(ctx, cpuctx, cpu);
952 cpuctx->task_ctx = ctx;
953 }
954
955 static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
956 {
957 struct perf_counter_context *ctx = &cpuctx->ctx;
958
959 __perf_counter_sched_in(ctx, cpuctx, cpu);
960 }
961
962 int perf_counter_task_disable(void)
963 {
964 struct task_struct *curr = current;
965 struct perf_counter_context *ctx = &curr->perf_counter_ctx;
966 struct perf_counter *counter;
967 unsigned long flags;
968 u64 perf_flags;
969 int cpu;
970
971 if (likely(!ctx->nr_counters))
972 return 0;
973
974 local_irq_save(flags);
975 cpu = smp_processor_id();
976
977 perf_counter_task_sched_out(curr, cpu);
978
979 spin_lock(&ctx->lock);
980
981 /*
982 * Disable all the counters:
983 */
984 perf_flags = hw_perf_save_disable();
985
986 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
987 if (counter->state != PERF_COUNTER_STATE_ERROR) {
988 update_group_times(counter);
989 counter->state = PERF_COUNTER_STATE_OFF;
990 }
991 }
992
993 hw_perf_restore(perf_flags);
994
995 spin_unlock_irqrestore(&ctx->lock, flags);
996
997 return 0;
998 }
999
1000 int perf_counter_task_enable(void)
1001 {
1002 struct task_struct *curr = current;
1003 struct perf_counter_context *ctx = &curr->perf_counter_ctx;
1004 struct perf_counter *counter;
1005 unsigned long flags;
1006 u64 perf_flags;
1007 int cpu;
1008
1009 if (likely(!ctx->nr_counters))
1010 return 0;
1011
1012 local_irq_save(flags);
1013 cpu = smp_processor_id();
1014
1015 perf_counter_task_sched_out(curr, cpu);
1016
1017 spin_lock(&ctx->lock);
1018
1019 /*
1020 * Disable all the counters:
1021 */
1022 perf_flags = hw_perf_save_disable();
1023
1024 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1025 if (counter->state > PERF_COUNTER_STATE_OFF)
1026 continue;
1027 counter->state = PERF_COUNTER_STATE_INACTIVE;
1028 counter->tstamp_enabled =
1029 ctx->time - counter->total_time_enabled;
1030 counter->hw_event.disabled = 0;
1031 }
1032 hw_perf_restore(perf_flags);
1033
1034 spin_unlock(&ctx->lock);
1035
1036 perf_counter_task_sched_in(curr, cpu);
1037
1038 local_irq_restore(flags);
1039
1040 return 0;
1041 }
1042
1043 /*
1044 * Round-robin a context's counters:
1045 */
1046 static void rotate_ctx(struct perf_counter_context *ctx)
1047 {
1048 struct perf_counter *counter;
1049 u64 perf_flags;
1050
1051 if (!ctx->nr_counters)
1052 return;
1053
1054 spin_lock(&ctx->lock);
1055 /*
1056 * Rotate the first entry last (works just fine for group counters too):
1057 */
1058 perf_flags = hw_perf_save_disable();
1059 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1060 list_move_tail(&counter->list_entry, &ctx->counter_list);
1061 break;
1062 }
1063 hw_perf_restore(perf_flags);
1064
1065 spin_unlock(&ctx->lock);
1066 }
1067
1068 void perf_counter_task_tick(struct task_struct *curr, int cpu)
1069 {
1070 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
1071 struct perf_counter_context *ctx = &curr->perf_counter_ctx;
1072 const int rotate_percpu = 0;
1073
1074 if (rotate_percpu)
1075 perf_counter_cpu_sched_out(cpuctx);
1076 perf_counter_task_sched_out(curr, cpu);
1077
1078 if (rotate_percpu)
1079 rotate_ctx(&cpuctx->ctx);
1080 rotate_ctx(ctx);
1081
1082 if (rotate_percpu)
1083 perf_counter_cpu_sched_in(cpuctx, cpu);
1084 perf_counter_task_sched_in(curr, cpu);
1085 }
1086
1087 /*
1088 * Cross CPU call to read the hardware counter
1089 */
1090 static void __read(void *info)
1091 {
1092 struct perf_counter *counter = info;
1093 struct perf_counter_context *ctx = counter->ctx;
1094 unsigned long flags;
1095
1096 local_irq_save(flags);
1097 if (ctx->is_active)
1098 update_context_time(ctx);
1099 counter->pmu->read(counter);
1100 update_counter_times(counter);
1101 local_irq_restore(flags);
1102 }
1103
1104 static u64 perf_counter_read(struct perf_counter *counter)
1105 {
1106 /*
1107 * If counter is enabled and currently active on a CPU, update the
1108 * value in the counter structure:
1109 */
1110 if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
1111 smp_call_function_single(counter->oncpu,
1112 __read, counter, 1);
1113 } else if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
1114 update_counter_times(counter);
1115 }
1116
1117 return atomic64_read(&counter->count);
1118 }
1119
1120 static void put_context(struct perf_counter_context *ctx)
1121 {
1122 if (ctx->task)
1123 put_task_struct(ctx->task);
1124 }
1125
1126 static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
1127 {
1128 struct perf_cpu_context *cpuctx;
1129 struct perf_counter_context *ctx;
1130 struct task_struct *task;
1131
1132 /*
1133 * If cpu is not a wildcard then this is a percpu counter:
1134 */
1135 if (cpu != -1) {
1136 /* Must be root to operate on a CPU counter: */
1137 if (sysctl_perf_counter_priv && !capable(CAP_SYS_ADMIN))
1138 return ERR_PTR(-EACCES);
1139
1140 if (cpu < 0 || cpu > num_possible_cpus())
1141 return ERR_PTR(-EINVAL);
1142
1143 /*
1144 * We could be clever and allow to attach a counter to an
1145 * offline CPU and activate it when the CPU comes up, but
1146 * that's for later.
1147 */
1148 if (!cpu_isset(cpu, cpu_online_map))
1149 return ERR_PTR(-ENODEV);
1150
1151 cpuctx = &per_cpu(perf_cpu_context, cpu);
1152 ctx = &cpuctx->ctx;
1153
1154 return ctx;
1155 }
1156
1157 rcu_read_lock();
1158 if (!pid)
1159 task = current;
1160 else
1161 task = find_task_by_vpid(pid);
1162 if (task)
1163 get_task_struct(task);
1164 rcu_read_unlock();
1165
1166 if (!task)
1167 return ERR_PTR(-ESRCH);
1168
1169 ctx = &task->perf_counter_ctx;
1170 ctx->task = task;
1171
1172 /* Reuse ptrace permission checks for now. */
1173 if (!ptrace_may_access(task, PTRACE_MODE_READ)) {
1174 put_context(ctx);
1175 return ERR_PTR(-EACCES);
1176 }
1177
1178 return ctx;
1179 }
1180
1181 static void free_counter_rcu(struct rcu_head *head)
1182 {
1183 struct perf_counter *counter;
1184
1185 counter = container_of(head, struct perf_counter, rcu_head);
1186 kfree(counter);
1187 }
1188
1189 static void perf_pending_sync(struct perf_counter *counter);
1190
1191 static void free_counter(struct perf_counter *counter)
1192 {
1193 perf_pending_sync(counter);
1194
1195 if (counter->hw_event.mmap)
1196 atomic_dec(&nr_mmap_tracking);
1197 if (counter->hw_event.munmap)
1198 atomic_dec(&nr_munmap_tracking);
1199 if (counter->hw_event.comm)
1200 atomic_dec(&nr_comm_tracking);
1201
1202 if (counter->destroy)
1203 counter->destroy(counter);
1204
1205 call_rcu(&counter->rcu_head, free_counter_rcu);
1206 }
1207
1208 /*
1209 * Called when the last reference to the file is gone.
1210 */
1211 static int perf_release(struct inode *inode, struct file *file)
1212 {
1213 struct perf_counter *counter = file->private_data;
1214 struct perf_counter_context *ctx = counter->ctx;
1215
1216 file->private_data = NULL;
1217
1218 mutex_lock(&ctx->mutex);
1219 mutex_lock(&counter->mutex);
1220
1221 perf_counter_remove_from_context(counter);
1222
1223 mutex_unlock(&counter->mutex);
1224 mutex_unlock(&ctx->mutex);
1225
1226 free_counter(counter);
1227 put_context(ctx);
1228
1229 return 0;
1230 }
1231
1232 /*
1233 * Read the performance counter - simple non blocking version for now
1234 */
1235 static ssize_t
1236 perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
1237 {
1238 u64 values[3];
1239 int n;
1240
1241 /*
1242 * Return end-of-file for a read on a counter that is in
1243 * error state (i.e. because it was pinned but it couldn't be
1244 * scheduled on to the CPU at some point).
1245 */
1246 if (counter->state == PERF_COUNTER_STATE_ERROR)
1247 return 0;
1248
1249 mutex_lock(&counter->mutex);
1250 values[0] = perf_counter_read(counter);
1251 n = 1;
1252 if (counter->hw_event.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1253 values[n++] = counter->total_time_enabled +
1254 atomic64_read(&counter->child_total_time_enabled);
1255 if (counter->hw_event.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1256 values[n++] = counter->total_time_running +
1257 atomic64_read(&counter->child_total_time_running);
1258 mutex_unlock(&counter->mutex);
1259
1260 if (count < n * sizeof(u64))
1261 return -EINVAL;
1262 count = n * sizeof(u64);
1263
1264 if (copy_to_user(buf, values, count))
1265 return -EFAULT;
1266
1267 return count;
1268 }
1269
1270 static ssize_t
1271 perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
1272 {
1273 struct perf_counter *counter = file->private_data;
1274
1275 return perf_read_hw(counter, buf, count);
1276 }
1277
1278 static unsigned int perf_poll(struct file *file, poll_table *wait)
1279 {
1280 struct perf_counter *counter = file->private_data;
1281 struct perf_mmap_data *data;
1282 unsigned int events = POLL_HUP;
1283
1284 rcu_read_lock();
1285 data = rcu_dereference(counter->data);
1286 if (data)
1287 events = atomic_xchg(&data->poll, 0);
1288 rcu_read_unlock();
1289
1290 poll_wait(file, &counter->waitq, wait);
1291
1292 return events;
1293 }
1294
1295 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1296 {
1297 struct perf_counter *counter = file->private_data;
1298 int err = 0;
1299
1300 switch (cmd) {
1301 case PERF_COUNTER_IOC_ENABLE:
1302 perf_counter_enable_family(counter);
1303 break;
1304 case PERF_COUNTER_IOC_DISABLE:
1305 perf_counter_disable_family(counter);
1306 break;
1307 case PERF_COUNTER_IOC_REFRESH:
1308 perf_counter_refresh(counter, arg);
1309 break;
1310 default:
1311 err = -ENOTTY;
1312 }
1313 return err;
1314 }
1315
1316 /*
1317 * Callers need to ensure there can be no nesting of this function, otherwise
1318 * the seqlock logic goes bad. We can not serialize this because the arch
1319 * code calls this from NMI context.
1320 */
1321 void perf_counter_update_userpage(struct perf_counter *counter)
1322 {
1323 struct perf_mmap_data *data;
1324 struct perf_counter_mmap_page *userpg;
1325
1326 rcu_read_lock();
1327 data = rcu_dereference(counter->data);
1328 if (!data)
1329 goto unlock;
1330
1331 userpg = data->user_page;
1332
1333 /*
1334 * Disable preemption so as to not let the corresponding user-space
1335 * spin too long if we get preempted.
1336 */
1337 preempt_disable();
1338 ++userpg->lock;
1339 barrier();
1340 userpg->index = counter->hw.idx;
1341 userpg->offset = atomic64_read(&counter->count);
1342 if (counter->state == PERF_COUNTER_STATE_ACTIVE)
1343 userpg->offset -= atomic64_read(&counter->hw.prev_count);
1344
1345 barrier();
1346 ++userpg->lock;
1347 preempt_enable();
1348 unlock:
1349 rcu_read_unlock();
1350 }
1351
1352 static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1353 {
1354 struct perf_counter *counter = vma->vm_file->private_data;
1355 struct perf_mmap_data *data;
1356 int ret = VM_FAULT_SIGBUS;
1357
1358 rcu_read_lock();
1359 data = rcu_dereference(counter->data);
1360 if (!data)
1361 goto unlock;
1362
1363 if (vmf->pgoff == 0) {
1364 vmf->page = virt_to_page(data->user_page);
1365 } else {
1366 int nr = vmf->pgoff - 1;
1367
1368 if ((unsigned)nr > data->nr_pages)
1369 goto unlock;
1370
1371 vmf->page = virt_to_page(data->data_pages[nr]);
1372 }
1373 get_page(vmf->page);
1374 ret = 0;
1375 unlock:
1376 rcu_read_unlock();
1377
1378 return ret;
1379 }
1380
1381 static int perf_mmap_data_alloc(struct perf_counter *counter, int nr_pages)
1382 {
1383 struct perf_mmap_data *data;
1384 unsigned long size;
1385 int i;
1386
1387 WARN_ON(atomic_read(&counter->mmap_count));
1388
1389 size = sizeof(struct perf_mmap_data);
1390 size += nr_pages * sizeof(void *);
1391
1392 data = kzalloc(size, GFP_KERNEL);
1393 if (!data)
1394 goto fail;
1395
1396 data->user_page = (void *)get_zeroed_page(GFP_KERNEL);
1397 if (!data->user_page)
1398 goto fail_user_page;
1399
1400 for (i = 0; i < nr_pages; i++) {
1401 data->data_pages[i] = (void *)get_zeroed_page(GFP_KERNEL);
1402 if (!data->data_pages[i])
1403 goto fail_data_pages;
1404 }
1405
1406 data->nr_pages = nr_pages;
1407
1408 rcu_assign_pointer(counter->data, data);
1409
1410 return 0;
1411
1412 fail_data_pages:
1413 for (i--; i >= 0; i--)
1414 free_page((unsigned long)data->data_pages[i]);
1415
1416 free_page((unsigned long)data->user_page);
1417
1418 fail_user_page:
1419 kfree(data);
1420
1421 fail:
1422 return -ENOMEM;
1423 }
1424
1425 static void __perf_mmap_data_free(struct rcu_head *rcu_head)
1426 {
1427 struct perf_mmap_data *data = container_of(rcu_head,
1428 struct perf_mmap_data, rcu_head);
1429 int i;
1430
1431 free_page((unsigned long)data->user_page);
1432 for (i = 0; i < data->nr_pages; i++)
1433 free_page((unsigned long)data->data_pages[i]);
1434 kfree(data);
1435 }
1436
1437 static void perf_mmap_data_free(struct perf_counter *counter)
1438 {
1439 struct perf_mmap_data *data = counter->data;
1440
1441 WARN_ON(atomic_read(&counter->mmap_count));
1442
1443 rcu_assign_pointer(counter->data, NULL);
1444 call_rcu(&data->rcu_head, __perf_mmap_data_free);
1445 }
1446
1447 static void perf_mmap_open(struct vm_area_struct *vma)
1448 {
1449 struct perf_counter *counter = vma->vm_file->private_data;
1450
1451 atomic_inc(&counter->mmap_count);
1452 }
1453
1454 static void perf_mmap_close(struct vm_area_struct *vma)
1455 {
1456 struct perf_counter *counter = vma->vm_file->private_data;
1457
1458 if (atomic_dec_and_mutex_lock(&counter->mmap_count,
1459 &counter->mmap_mutex)) {
1460 vma->vm_mm->locked_vm -= counter->data->nr_pages + 1;
1461 perf_mmap_data_free(counter);
1462 mutex_unlock(&counter->mmap_mutex);
1463 }
1464 }
1465
1466 static struct vm_operations_struct perf_mmap_vmops = {
1467 .open = perf_mmap_open,
1468 .close = perf_mmap_close,
1469 .fault = perf_mmap_fault,
1470 };
1471
1472 static int perf_mmap(struct file *file, struct vm_area_struct *vma)
1473 {
1474 struct perf_counter *counter = file->private_data;
1475 unsigned long vma_size;
1476 unsigned long nr_pages;
1477 unsigned long locked, lock_limit;
1478 int ret = 0;
1479
1480 if (!(vma->vm_flags & VM_SHARED) || (vma->vm_flags & VM_WRITE))
1481 return -EINVAL;
1482
1483 vma_size = vma->vm_end - vma->vm_start;
1484 nr_pages = (vma_size / PAGE_SIZE) - 1;
1485
1486 /*
1487 * If we have data pages ensure they're a power-of-two number, so we
1488 * can do bitmasks instead of modulo.
1489 */
1490 if (nr_pages != 0 && !is_power_of_2(nr_pages))
1491 return -EINVAL;
1492
1493 if (vma_size != PAGE_SIZE * (1 + nr_pages))
1494 return -EINVAL;
1495
1496 if (vma->vm_pgoff != 0)
1497 return -EINVAL;
1498
1499 mutex_lock(&counter->mmap_mutex);
1500 if (atomic_inc_not_zero(&counter->mmap_count)) {
1501 if (nr_pages != counter->data->nr_pages)
1502 ret = -EINVAL;
1503 goto unlock;
1504 }
1505
1506 locked = vma->vm_mm->locked_vm;
1507 locked += nr_pages + 1;
1508
1509 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
1510 lock_limit >>= PAGE_SHIFT;
1511
1512 if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
1513 ret = -EPERM;
1514 goto unlock;
1515 }
1516
1517 WARN_ON(counter->data);
1518 ret = perf_mmap_data_alloc(counter, nr_pages);
1519 if (ret)
1520 goto unlock;
1521
1522 atomic_set(&counter->mmap_count, 1);
1523 vma->vm_mm->locked_vm += nr_pages + 1;
1524 unlock:
1525 mutex_unlock(&counter->mmap_mutex);
1526
1527 vma->vm_flags &= ~VM_MAYWRITE;
1528 vma->vm_flags |= VM_RESERVED;
1529 vma->vm_ops = &perf_mmap_vmops;
1530
1531 return ret;
1532 }
1533
1534 static int perf_fasync(int fd, struct file *filp, int on)
1535 {
1536 struct perf_counter *counter = filp->private_data;
1537 struct inode *inode = filp->f_path.dentry->d_inode;
1538 int retval;
1539
1540 mutex_lock(&inode->i_mutex);
1541 retval = fasync_helper(fd, filp, on, &counter->fasync);
1542 mutex_unlock(&inode->i_mutex);
1543
1544 if (retval < 0)
1545 return retval;
1546
1547 return 0;
1548 }
1549
1550 static const struct file_operations perf_fops = {
1551 .release = perf_release,
1552 .read = perf_read,
1553 .poll = perf_poll,
1554 .unlocked_ioctl = perf_ioctl,
1555 .compat_ioctl = perf_ioctl,
1556 .mmap = perf_mmap,
1557 .fasync = perf_fasync,
1558 };
1559
1560 /*
1561 * Perf counter wakeup
1562 *
1563 * If there's data, ensure we set the poll() state and publish everything
1564 * to user-space before waking everybody up.
1565 */
1566
1567 void perf_counter_wakeup(struct perf_counter *counter)
1568 {
1569 wake_up_all(&counter->waitq);
1570
1571 if (counter->pending_kill) {
1572 kill_fasync(&counter->fasync, SIGIO, counter->pending_kill);
1573 counter->pending_kill = 0;
1574 }
1575 }
1576
1577 /*
1578 * Pending wakeups
1579 *
1580 * Handle the case where we need to wakeup up from NMI (or rq->lock) context.
1581 *
1582 * The NMI bit means we cannot possibly take locks. Therefore, maintain a
1583 * single linked list and use cmpxchg() to add entries lockless.
1584 */
1585
1586 static void perf_pending_counter(struct perf_pending_entry *entry)
1587 {
1588 struct perf_counter *counter = container_of(entry,
1589 struct perf_counter, pending);
1590
1591 if (counter->pending_disable) {
1592 counter->pending_disable = 0;
1593 perf_counter_disable(counter);
1594 }
1595
1596 if (counter->pending_wakeup) {
1597 counter->pending_wakeup = 0;
1598 perf_counter_wakeup(counter);
1599 }
1600 }
1601
1602 #define PENDING_TAIL ((struct perf_pending_entry *)-1UL)
1603
1604 static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = {
1605 PENDING_TAIL,
1606 };
1607
1608 static void perf_pending_queue(struct perf_pending_entry *entry,
1609 void (*func)(struct perf_pending_entry *))
1610 {
1611 struct perf_pending_entry **head;
1612
1613 if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL)
1614 return;
1615
1616 entry->func = func;
1617
1618 head = &get_cpu_var(perf_pending_head);
1619
1620 do {
1621 entry->next = *head;
1622 } while (cmpxchg(head, entry->next, entry) != entry->next);
1623
1624 set_perf_counter_pending();
1625
1626 put_cpu_var(perf_pending_head);
1627 }
1628
1629 static int __perf_pending_run(void)
1630 {
1631 struct perf_pending_entry *list;
1632 int nr = 0;
1633
1634 list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL);
1635 while (list != PENDING_TAIL) {
1636 void (*func)(struct perf_pending_entry *);
1637 struct perf_pending_entry *entry = list;
1638
1639 list = list->next;
1640
1641 func = entry->func;
1642 entry->next = NULL;
1643 /*
1644 * Ensure we observe the unqueue before we issue the wakeup,
1645 * so that we won't be waiting forever.
1646 * -- see perf_not_pending().
1647 */
1648 smp_wmb();
1649
1650 func(entry);
1651 nr++;
1652 }
1653
1654 return nr;
1655 }
1656
1657 static inline int perf_not_pending(struct perf_counter *counter)
1658 {
1659 /*
1660 * If we flush on whatever cpu we run, there is a chance we don't
1661 * need to wait.
1662 */
1663 get_cpu();
1664 __perf_pending_run();
1665 put_cpu();
1666
1667 /*
1668 * Ensure we see the proper queue state before going to sleep
1669 * so that we do not miss the wakeup. -- see perf_pending_handle()
1670 */
1671 smp_rmb();
1672 return counter->pending.next == NULL;
1673 }
1674
1675 static void perf_pending_sync(struct perf_counter *counter)
1676 {
1677 wait_event(counter->waitq, perf_not_pending(counter));
1678 }
1679
1680 void perf_counter_do_pending(void)
1681 {
1682 __perf_pending_run();
1683 }
1684
1685 /*
1686 * Callchain support -- arch specific
1687 */
1688
1689 __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
1690 {
1691 return NULL;
1692 }
1693
1694 /*
1695 * Output
1696 */
1697
1698 struct perf_output_handle {
1699 struct perf_counter *counter;
1700 struct perf_mmap_data *data;
1701 unsigned int offset;
1702 unsigned int head;
1703 int wakeup;
1704 int nmi;
1705 int overflow;
1706 int locked;
1707 unsigned long flags;
1708 };
1709
1710 static void perf_output_wakeup(struct perf_output_handle *handle)
1711 {
1712 atomic_set(&handle->data->poll, POLL_IN);
1713
1714 if (handle->nmi) {
1715 handle->counter->pending_wakeup = 1;
1716 perf_pending_queue(&handle->counter->pending,
1717 perf_pending_counter);
1718 } else
1719 perf_counter_wakeup(handle->counter);
1720 }
1721
1722 /*
1723 * Curious locking construct.
1724 *
1725 * We need to ensure a later event doesn't publish a head when a former
1726 * event isn't done writing. However since we need to deal with NMIs we
1727 * cannot fully serialize things.
1728 *
1729 * What we do is serialize between CPUs so we only have to deal with NMI
1730 * nesting on a single CPU.
1731 *
1732 * We only publish the head (and generate a wakeup) when the outer-most
1733 * event completes.
1734 */
1735 static void perf_output_lock(struct perf_output_handle *handle)
1736 {
1737 struct perf_mmap_data *data = handle->data;
1738 int cpu;
1739
1740 handle->locked = 0;
1741
1742 local_irq_save(handle->flags);
1743 cpu = smp_processor_id();
1744
1745 if (in_nmi() && atomic_read(&data->lock) == cpu)
1746 return;
1747
1748 while (atomic_cmpxchg(&data->lock, 0, cpu) != 0)
1749 cpu_relax();
1750
1751 handle->locked = 1;
1752 }
1753
1754 static void perf_output_unlock(struct perf_output_handle *handle)
1755 {
1756 struct perf_mmap_data *data = handle->data;
1757 int head, cpu;
1758
1759 if (handle->wakeup)
1760 data->wakeup_head = data->head;
1761
1762 if (!handle->locked)
1763 goto out;
1764
1765 again:
1766 /*
1767 * The xchg implies a full barrier that ensures all writes are done
1768 * before we publish the new head, matched by a rmb() in userspace when
1769 * reading this position.
1770 */
1771 while ((head = atomic_xchg(&data->wakeup_head, 0))) {
1772 data->user_page->data_head = head;
1773 handle->wakeup = 1;
1774 }
1775
1776 /*
1777 * NMI can happen here, which means we can miss a wakeup_head update.
1778 */
1779
1780 cpu = atomic_xchg(&data->lock, 0);
1781 WARN_ON_ONCE(cpu != smp_processor_id());
1782
1783 /*
1784 * Therefore we have to validate we did not indeed do so.
1785 */
1786 if (unlikely(atomic_read(&data->wakeup_head))) {
1787 /*
1788 * Since we had it locked, we can lock it again.
1789 */
1790 while (atomic_cmpxchg(&data->lock, 0, cpu) != 0)
1791 cpu_relax();
1792
1793 goto again;
1794 }
1795
1796 if (handle->wakeup)
1797 perf_output_wakeup(handle);
1798 out:
1799 local_irq_restore(handle->flags);
1800 }
1801
1802 static int perf_output_begin(struct perf_output_handle *handle,
1803 struct perf_counter *counter, unsigned int size,
1804 int nmi, int overflow)
1805 {
1806 struct perf_mmap_data *data;
1807 unsigned int offset, head;
1808
1809 rcu_read_lock();
1810 data = rcu_dereference(counter->data);
1811 if (!data)
1812 goto out;
1813
1814 handle->data = data;
1815 handle->counter = counter;
1816 handle->nmi = nmi;
1817 handle->overflow = overflow;
1818
1819 if (!data->nr_pages)
1820 goto fail;
1821
1822 perf_output_lock(handle);
1823
1824 do {
1825 offset = head = atomic_read(&data->head);
1826 head += size;
1827 } while (atomic_cmpxchg(&data->head, offset, head) != offset);
1828
1829 handle->offset = offset;
1830 handle->head = head;
1831 handle->wakeup = (offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT);
1832
1833 return 0;
1834
1835 fail:
1836 perf_output_wakeup(handle);
1837 out:
1838 rcu_read_unlock();
1839
1840 return -ENOSPC;
1841 }
1842
1843 static void perf_output_copy(struct perf_output_handle *handle,
1844 void *buf, unsigned int len)
1845 {
1846 unsigned int pages_mask;
1847 unsigned int offset;
1848 unsigned int size;
1849 void **pages;
1850
1851 offset = handle->offset;
1852 pages_mask = handle->data->nr_pages - 1;
1853 pages = handle->data->data_pages;
1854
1855 do {
1856 unsigned int page_offset;
1857 int nr;
1858
1859 nr = (offset >> PAGE_SHIFT) & pages_mask;
1860 page_offset = offset & (PAGE_SIZE - 1);
1861 size = min_t(unsigned int, PAGE_SIZE - page_offset, len);
1862
1863 memcpy(pages[nr] + page_offset, buf, size);
1864
1865 len -= size;
1866 buf += size;
1867 offset += size;
1868 } while (len);
1869
1870 handle->offset = offset;
1871
1872 WARN_ON_ONCE(handle->offset > handle->head);
1873 }
1874
1875 #define perf_output_put(handle, x) \
1876 perf_output_copy((handle), &(x), sizeof(x))
1877
1878 static void perf_output_end(struct perf_output_handle *handle)
1879 {
1880 struct perf_counter *counter = handle->counter;
1881 struct perf_mmap_data *data = handle->data;
1882
1883 int wakeup_events = counter->hw_event.wakeup_events;
1884
1885 if (handle->overflow && wakeup_events) {
1886 int events = atomic_inc_return(&data->events);
1887 if (events >= wakeup_events) {
1888 atomic_sub(wakeup_events, &data->events);
1889 handle->wakeup = 1;
1890 }
1891 }
1892
1893 perf_output_unlock(handle);
1894 rcu_read_unlock();
1895 }
1896
1897 static void perf_counter_output(struct perf_counter *counter,
1898 int nmi, struct pt_regs *regs, u64 addr)
1899 {
1900 int ret;
1901 u64 record_type = counter->hw_event.record_type;
1902 struct perf_output_handle handle;
1903 struct perf_event_header header;
1904 u64 ip;
1905 struct {
1906 u32 pid, tid;
1907 } tid_entry;
1908 struct {
1909 u64 event;
1910 u64 counter;
1911 } group_entry;
1912 struct perf_callchain_entry *callchain = NULL;
1913 int callchain_size = 0;
1914 u64 time;
1915
1916 header.type = 0;
1917 header.size = sizeof(header);
1918
1919 header.misc = PERF_EVENT_MISC_OVERFLOW;
1920 header.misc |= user_mode(regs) ?
1921 PERF_EVENT_MISC_USER : PERF_EVENT_MISC_KERNEL;
1922
1923 if (record_type & PERF_RECORD_IP) {
1924 ip = instruction_pointer(regs);
1925 header.type |= PERF_RECORD_IP;
1926 header.size += sizeof(ip);
1927 }
1928
1929 if (record_type & PERF_RECORD_TID) {
1930 /* namespace issues */
1931 tid_entry.pid = current->group_leader->pid;
1932 tid_entry.tid = current->pid;
1933
1934 header.type |= PERF_RECORD_TID;
1935 header.size += sizeof(tid_entry);
1936 }
1937
1938 if (record_type & PERF_RECORD_TIME) {
1939 /*
1940 * Maybe do better on x86 and provide cpu_clock_nmi()
1941 */
1942 time = sched_clock();
1943
1944 header.type |= PERF_RECORD_TIME;
1945 header.size += sizeof(u64);
1946 }
1947
1948 if (record_type & PERF_RECORD_ADDR) {
1949 header.type |= PERF_RECORD_ADDR;
1950 header.size += sizeof(u64);
1951 }
1952
1953 if (record_type & PERF_RECORD_GROUP) {
1954 header.type |= PERF_RECORD_GROUP;
1955 header.size += sizeof(u64) +
1956 counter->nr_siblings * sizeof(group_entry);
1957 }
1958
1959 if (record_type & PERF_RECORD_CALLCHAIN) {
1960 callchain = perf_callchain(regs);
1961
1962 if (callchain) {
1963 callchain_size = (1 + callchain->nr) * sizeof(u64);
1964
1965 header.type |= PERF_RECORD_CALLCHAIN;
1966 header.size += callchain_size;
1967 }
1968 }
1969
1970 ret = perf_output_begin(&handle, counter, header.size, nmi, 1);
1971 if (ret)
1972 return;
1973
1974 perf_output_put(&handle, header);
1975
1976 if (record_type & PERF_RECORD_IP)
1977 perf_output_put(&handle, ip);
1978
1979 if (record_type & PERF_RECORD_TID)
1980 perf_output_put(&handle, tid_entry);
1981
1982 if (record_type & PERF_RECORD_TIME)
1983 perf_output_put(&handle, time);
1984
1985 if (record_type & PERF_RECORD_ADDR)
1986 perf_output_put(&handle, addr);
1987
1988 if (record_type & PERF_RECORD_GROUP) {
1989 struct perf_counter *leader, *sub;
1990 u64 nr = counter->nr_siblings;
1991
1992 perf_output_put(&handle, nr);
1993
1994 leader = counter->group_leader;
1995 list_for_each_entry(sub, &leader->sibling_list, list_entry) {
1996 if (sub != counter)
1997 sub->pmu->read(sub);
1998
1999 group_entry.event = sub->hw_event.config;
2000 group_entry.counter = atomic64_read(&sub->count);
2001
2002 perf_output_put(&handle, group_entry);
2003 }
2004 }
2005
2006 if (callchain)
2007 perf_output_copy(&handle, callchain, callchain_size);
2008
2009 perf_output_end(&handle);
2010 }
2011
2012 /*
2013 * comm tracking
2014 */
2015
2016 struct perf_comm_event {
2017 struct task_struct *task;
2018 char *comm;
2019 int comm_size;
2020
2021 struct {
2022 struct perf_event_header header;
2023
2024 u32 pid;
2025 u32 tid;
2026 } event;
2027 };
2028
2029 static void perf_counter_comm_output(struct perf_counter *counter,
2030 struct perf_comm_event *comm_event)
2031 {
2032 struct perf_output_handle handle;
2033 int size = comm_event->event.header.size;
2034 int ret = perf_output_begin(&handle, counter, size, 0, 0);
2035
2036 if (ret)
2037 return;
2038
2039 perf_output_put(&handle, comm_event->event);
2040 perf_output_copy(&handle, comm_event->comm,
2041 comm_event->comm_size);
2042 perf_output_end(&handle);
2043 }
2044
2045 static int perf_counter_comm_match(struct perf_counter *counter,
2046 struct perf_comm_event *comm_event)
2047 {
2048 if (counter->hw_event.comm &&
2049 comm_event->event.header.type == PERF_EVENT_COMM)
2050 return 1;
2051
2052 return 0;
2053 }
2054
2055 static void perf_counter_comm_ctx(struct perf_counter_context *ctx,
2056 struct perf_comm_event *comm_event)
2057 {
2058 struct perf_counter *counter;
2059
2060 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
2061 return;
2062
2063 rcu_read_lock();
2064 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
2065 if (perf_counter_comm_match(counter, comm_event))
2066 perf_counter_comm_output(counter, comm_event);
2067 }
2068 rcu_read_unlock();
2069 }
2070
2071 static void perf_counter_comm_event(struct perf_comm_event *comm_event)
2072 {
2073 struct perf_cpu_context *cpuctx;
2074 unsigned int size;
2075 char *comm = comm_event->task->comm;
2076
2077 size = ALIGN(strlen(comm)+1, sizeof(u64));
2078
2079 comm_event->comm = comm;
2080 comm_event->comm_size = size;
2081
2082 comm_event->event.header.size = sizeof(comm_event->event) + size;
2083
2084 cpuctx = &get_cpu_var(perf_cpu_context);
2085 perf_counter_comm_ctx(&cpuctx->ctx, comm_event);
2086 put_cpu_var(perf_cpu_context);
2087
2088 perf_counter_comm_ctx(&current->perf_counter_ctx, comm_event);
2089 }
2090
2091 void perf_counter_comm(struct task_struct *task)
2092 {
2093 struct perf_comm_event comm_event;
2094
2095 if (!atomic_read(&nr_comm_tracking))
2096 return;
2097
2098 comm_event = (struct perf_comm_event){
2099 .task = task,
2100 .event = {
2101 .header = { .type = PERF_EVENT_COMM, },
2102 .pid = task->group_leader->pid,
2103 .tid = task->pid,
2104 },
2105 };
2106
2107 perf_counter_comm_event(&comm_event);
2108 }
2109
2110 /*
2111 * mmap tracking
2112 */
2113
2114 struct perf_mmap_event {
2115 struct file *file;
2116 char *file_name;
2117 int file_size;
2118
2119 struct {
2120 struct perf_event_header header;
2121
2122 u32 pid;
2123 u32 tid;
2124 u64 start;
2125 u64 len;
2126 u64 pgoff;
2127 } event;
2128 };
2129
2130 static void perf_counter_mmap_output(struct perf_counter *counter,
2131 struct perf_mmap_event *mmap_event)
2132 {
2133 struct perf_output_handle handle;
2134 int size = mmap_event->event.header.size;
2135 int ret = perf_output_begin(&handle, counter, size, 0, 0);
2136
2137 if (ret)
2138 return;
2139
2140 perf_output_put(&handle, mmap_event->event);
2141 perf_output_copy(&handle, mmap_event->file_name,
2142 mmap_event->file_size);
2143 perf_output_end(&handle);
2144 }
2145
2146 static int perf_counter_mmap_match(struct perf_counter *counter,
2147 struct perf_mmap_event *mmap_event)
2148 {
2149 if (counter->hw_event.mmap &&
2150 mmap_event->event.header.type == PERF_EVENT_MMAP)
2151 return 1;
2152
2153 if (counter->hw_event.munmap &&
2154 mmap_event->event.header.type == PERF_EVENT_MUNMAP)
2155 return 1;
2156
2157 return 0;
2158 }
2159
2160 static void perf_counter_mmap_ctx(struct perf_counter_context *ctx,
2161 struct perf_mmap_event *mmap_event)
2162 {
2163 struct perf_counter *counter;
2164
2165 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
2166 return;
2167
2168 rcu_read_lock();
2169 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
2170 if (perf_counter_mmap_match(counter, mmap_event))
2171 perf_counter_mmap_output(counter, mmap_event);
2172 }
2173 rcu_read_unlock();
2174 }
2175
2176 static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event)
2177 {
2178 struct perf_cpu_context *cpuctx;
2179 struct file *file = mmap_event->file;
2180 unsigned int size;
2181 char tmp[16];
2182 char *buf = NULL;
2183 char *name;
2184
2185 if (file) {
2186 buf = kzalloc(PATH_MAX, GFP_KERNEL);
2187 if (!buf) {
2188 name = strncpy(tmp, "//enomem", sizeof(tmp));
2189 goto got_name;
2190 }
2191 name = d_path(&file->f_path, buf, PATH_MAX);
2192 if (IS_ERR(name)) {
2193 name = strncpy(tmp, "//toolong", sizeof(tmp));
2194 goto got_name;
2195 }
2196 } else {
2197 name = strncpy(tmp, "//anon", sizeof(tmp));
2198 goto got_name;
2199 }
2200
2201 got_name:
2202 size = ALIGN(strlen(name)+1, sizeof(u64));
2203
2204 mmap_event->file_name = name;
2205 mmap_event->file_size = size;
2206
2207 mmap_event->event.header.size = sizeof(mmap_event->event) + size;
2208
2209 cpuctx = &get_cpu_var(perf_cpu_context);
2210 perf_counter_mmap_ctx(&cpuctx->ctx, mmap_event);
2211 put_cpu_var(perf_cpu_context);
2212
2213 perf_counter_mmap_ctx(&current->perf_counter_ctx, mmap_event);
2214
2215 kfree(buf);
2216 }
2217
2218 void perf_counter_mmap(unsigned long addr, unsigned long len,
2219 unsigned long pgoff, struct file *file)
2220 {
2221 struct perf_mmap_event mmap_event;
2222
2223 if (!atomic_read(&nr_mmap_tracking))
2224 return;
2225
2226 mmap_event = (struct perf_mmap_event){
2227 .file = file,
2228 .event = {
2229 .header = { .type = PERF_EVENT_MMAP, },
2230 .pid = current->group_leader->pid,
2231 .tid = current->pid,
2232 .start = addr,
2233 .len = len,
2234 .pgoff = pgoff,
2235 },
2236 };
2237
2238 perf_counter_mmap_event(&mmap_event);
2239 }
2240
2241 void perf_counter_munmap(unsigned long addr, unsigned long len,
2242 unsigned long pgoff, struct file *file)
2243 {
2244 struct perf_mmap_event mmap_event;
2245
2246 if (!atomic_read(&nr_munmap_tracking))
2247 return;
2248
2249 mmap_event = (struct perf_mmap_event){
2250 .file = file,
2251 .event = {
2252 .header = { .type = PERF_EVENT_MUNMAP, },
2253 .pid = current->group_leader->pid,
2254 .tid = current->pid,
2255 .start = addr,
2256 .len = len,
2257 .pgoff = pgoff,
2258 },
2259 };
2260
2261 perf_counter_mmap_event(&mmap_event);
2262 }
2263
2264 /*
2265 * Generic counter overflow handling.
2266 */
2267
2268 int perf_counter_overflow(struct perf_counter *counter,
2269 int nmi, struct pt_regs *regs, u64 addr)
2270 {
2271 int events = atomic_read(&counter->event_limit);
2272 int ret = 0;
2273
2274 counter->pending_kill = POLL_IN;
2275 if (events && atomic_dec_and_test(&counter->event_limit)) {
2276 ret = 1;
2277 counter->pending_kill = POLL_HUP;
2278 if (nmi) {
2279 counter->pending_disable = 1;
2280 perf_pending_queue(&counter->pending,
2281 perf_pending_counter);
2282 } else
2283 perf_counter_disable(counter);
2284 }
2285
2286 perf_counter_output(counter, nmi, regs, addr);
2287 return ret;
2288 }
2289
2290 /*
2291 * Generic software counter infrastructure
2292 */
2293
2294 static void perf_swcounter_update(struct perf_counter *counter)
2295 {
2296 struct hw_perf_counter *hwc = &counter->hw;
2297 u64 prev, now;
2298 s64 delta;
2299
2300 again:
2301 prev = atomic64_read(&hwc->prev_count);
2302 now = atomic64_read(&hwc->count);
2303 if (atomic64_cmpxchg(&hwc->prev_count, prev, now) != prev)
2304 goto again;
2305
2306 delta = now - prev;
2307
2308 atomic64_add(delta, &counter->count);
2309 atomic64_sub(delta, &hwc->period_left);
2310 }
2311
2312 static void perf_swcounter_set_period(struct perf_counter *counter)
2313 {
2314 struct hw_perf_counter *hwc = &counter->hw;
2315 s64 left = atomic64_read(&hwc->period_left);
2316 s64 period = hwc->irq_period;
2317
2318 if (unlikely(left <= -period)) {
2319 left = period;
2320 atomic64_set(&hwc->period_left, left);
2321 }
2322
2323 if (unlikely(left <= 0)) {
2324 left += period;
2325 atomic64_add(period, &hwc->period_left);
2326 }
2327
2328 atomic64_set(&hwc->prev_count, -left);
2329 atomic64_set(&hwc->count, -left);
2330 }
2331
2332 static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
2333 {
2334 enum hrtimer_restart ret = HRTIMER_RESTART;
2335 struct perf_counter *counter;
2336 struct pt_regs *regs;
2337
2338 counter = container_of(hrtimer, struct perf_counter, hw.hrtimer);
2339 counter->pmu->read(counter);
2340
2341 regs = get_irq_regs();
2342 /*
2343 * In case we exclude kernel IPs or are somehow not in interrupt
2344 * context, provide the next best thing, the user IP.
2345 */
2346 if ((counter->hw_event.exclude_kernel || !regs) &&
2347 !counter->hw_event.exclude_user)
2348 regs = task_pt_regs(current);
2349
2350 if (regs) {
2351 if (perf_counter_overflow(counter, 0, regs, 0))
2352 ret = HRTIMER_NORESTART;
2353 }
2354
2355 hrtimer_forward_now(hrtimer, ns_to_ktime(counter->hw.irq_period));
2356
2357 return ret;
2358 }
2359
2360 static void perf_swcounter_overflow(struct perf_counter *counter,
2361 int nmi, struct pt_regs *regs, u64 addr)
2362 {
2363 perf_swcounter_update(counter);
2364 perf_swcounter_set_period(counter);
2365 if (perf_counter_overflow(counter, nmi, regs, addr))
2366 /* soft-disable the counter */
2367 ;
2368
2369 }
2370
2371 static int perf_swcounter_match(struct perf_counter *counter,
2372 enum perf_event_types type,
2373 u32 event, struct pt_regs *regs)
2374 {
2375 if (counter->state != PERF_COUNTER_STATE_ACTIVE)
2376 return 0;
2377
2378 if (perf_event_raw(&counter->hw_event))
2379 return 0;
2380
2381 if (perf_event_type(&counter->hw_event) != type)
2382 return 0;
2383
2384 if (perf_event_id(&counter->hw_event) != event)
2385 return 0;
2386
2387 if (counter->hw_event.exclude_user && user_mode(regs))
2388 return 0;
2389
2390 if (counter->hw_event.exclude_kernel && !user_mode(regs))
2391 return 0;
2392
2393 return 1;
2394 }
2395
2396 static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
2397 int nmi, struct pt_regs *regs, u64 addr)
2398 {
2399 int neg = atomic64_add_negative(nr, &counter->hw.count);
2400 if (counter->hw.irq_period && !neg)
2401 perf_swcounter_overflow(counter, nmi, regs, addr);
2402 }
2403
2404 static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
2405 enum perf_event_types type, u32 event,
2406 u64 nr, int nmi, struct pt_regs *regs,
2407 u64 addr)
2408 {
2409 struct perf_counter *counter;
2410
2411 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
2412 return;
2413
2414 rcu_read_lock();
2415 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
2416 if (perf_swcounter_match(counter, type, event, regs))
2417 perf_swcounter_add(counter, nr, nmi, regs, addr);
2418 }
2419 rcu_read_unlock();
2420 }
2421
2422 static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx)
2423 {
2424 if (in_nmi())
2425 return &cpuctx->recursion[3];
2426
2427 if (in_irq())
2428 return &cpuctx->recursion[2];
2429
2430 if (in_softirq())
2431 return &cpuctx->recursion[1];
2432
2433 return &cpuctx->recursion[0];
2434 }
2435
2436 static void __perf_swcounter_event(enum perf_event_types type, u32 event,
2437 u64 nr, int nmi, struct pt_regs *regs,
2438 u64 addr)
2439 {
2440 struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
2441 int *recursion = perf_swcounter_recursion_context(cpuctx);
2442
2443 if (*recursion)
2444 goto out;
2445
2446 (*recursion)++;
2447 barrier();
2448
2449 perf_swcounter_ctx_event(&cpuctx->ctx, type, event,
2450 nr, nmi, regs, addr);
2451 if (cpuctx->task_ctx) {
2452 perf_swcounter_ctx_event(cpuctx->task_ctx, type, event,
2453 nr, nmi, regs, addr);
2454 }
2455
2456 barrier();
2457 (*recursion)--;
2458
2459 out:
2460 put_cpu_var(perf_cpu_context);
2461 }
2462
2463 void
2464 perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
2465 {
2466 __perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, regs, addr);
2467 }
2468
2469 static void perf_swcounter_read(struct perf_counter *counter)
2470 {
2471 perf_swcounter_update(counter);
2472 }
2473
2474 static int perf_swcounter_enable(struct perf_counter *counter)
2475 {
2476 perf_swcounter_set_period(counter);
2477 return 0;
2478 }
2479
2480 static void perf_swcounter_disable(struct perf_counter *counter)
2481 {
2482 perf_swcounter_update(counter);
2483 }
2484
2485 static const struct pmu perf_ops_generic = {
2486 .enable = perf_swcounter_enable,
2487 .disable = perf_swcounter_disable,
2488 .read = perf_swcounter_read,
2489 };
2490
2491 /*
2492 * Software counter: cpu wall time clock
2493 */
2494
2495 static void cpu_clock_perf_counter_update(struct perf_counter *counter)
2496 {
2497 int cpu = raw_smp_processor_id();
2498 s64 prev;
2499 u64 now;
2500
2501 now = cpu_clock(cpu);
2502 prev = atomic64_read(&counter->hw.prev_count);
2503 atomic64_set(&counter->hw.prev_count, now);
2504 atomic64_add(now - prev, &counter->count);
2505 }
2506
2507 static int cpu_clock_perf_counter_enable(struct perf_counter *counter)
2508 {
2509 struct hw_perf_counter *hwc = &counter->hw;
2510 int cpu = raw_smp_processor_id();
2511
2512 atomic64_set(&hwc->prev_count, cpu_clock(cpu));
2513 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2514 hwc->hrtimer.function = perf_swcounter_hrtimer;
2515 if (hwc->irq_period) {
2516 __hrtimer_start_range_ns(&hwc->hrtimer,
2517 ns_to_ktime(hwc->irq_period), 0,
2518 HRTIMER_MODE_REL, 0);
2519 }
2520
2521 return 0;
2522 }
2523
2524 static void cpu_clock_perf_counter_disable(struct perf_counter *counter)
2525 {
2526 hrtimer_cancel(&counter->hw.hrtimer);
2527 cpu_clock_perf_counter_update(counter);
2528 }
2529
2530 static void cpu_clock_perf_counter_read(struct perf_counter *counter)
2531 {
2532 cpu_clock_perf_counter_update(counter);
2533 }
2534
2535 static const struct pmu perf_ops_cpu_clock = {
2536 .enable = cpu_clock_perf_counter_enable,
2537 .disable = cpu_clock_perf_counter_disable,
2538 .read = cpu_clock_perf_counter_read,
2539 };
2540
2541 /*
2542 * Software counter: task time clock
2543 */
2544
2545 static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now)
2546 {
2547 u64 prev;
2548 s64 delta;
2549
2550 prev = atomic64_xchg(&counter->hw.prev_count, now);
2551 delta = now - prev;
2552 atomic64_add(delta, &counter->count);
2553 }
2554
2555 static int task_clock_perf_counter_enable(struct perf_counter *counter)
2556 {
2557 struct hw_perf_counter *hwc = &counter->hw;
2558 u64 now;
2559
2560 now = counter->ctx->time;
2561
2562 atomic64_set(&hwc->prev_count, now);
2563 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2564 hwc->hrtimer.function = perf_swcounter_hrtimer;
2565 if (hwc->irq_period) {
2566 __hrtimer_start_range_ns(&hwc->hrtimer,
2567 ns_to_ktime(hwc->irq_period), 0,
2568 HRTIMER_MODE_REL, 0);
2569 }
2570
2571 return 0;
2572 }
2573
2574 static void task_clock_perf_counter_disable(struct perf_counter *counter)
2575 {
2576 hrtimer_cancel(&counter->hw.hrtimer);
2577 task_clock_perf_counter_update(counter, counter->ctx->time);
2578
2579 }
2580
2581 static void task_clock_perf_counter_read(struct perf_counter *counter)
2582 {
2583 u64 time;
2584
2585 if (!in_nmi()) {
2586 update_context_time(counter->ctx);
2587 time = counter->ctx->time;
2588 } else {
2589 u64 now = perf_clock();
2590 u64 delta = now - counter->ctx->timestamp;
2591 time = counter->ctx->time + delta;
2592 }
2593
2594 task_clock_perf_counter_update(counter, time);
2595 }
2596
2597 static const struct pmu perf_ops_task_clock = {
2598 .enable = task_clock_perf_counter_enable,
2599 .disable = task_clock_perf_counter_disable,
2600 .read = task_clock_perf_counter_read,
2601 };
2602
2603 /*
2604 * Software counter: cpu migrations
2605 */
2606
2607 static inline u64 get_cpu_migrations(struct perf_counter *counter)
2608 {
2609 struct task_struct *curr = counter->ctx->task;
2610
2611 if (curr)
2612 return curr->se.nr_migrations;
2613 return cpu_nr_migrations(smp_processor_id());
2614 }
2615
2616 static void cpu_migrations_perf_counter_update(struct perf_counter *counter)
2617 {
2618 u64 prev, now;
2619 s64 delta;
2620
2621 prev = atomic64_read(&counter->hw.prev_count);
2622 now = get_cpu_migrations(counter);
2623
2624 atomic64_set(&counter->hw.prev_count, now);
2625
2626 delta = now - prev;
2627
2628 atomic64_add(delta, &counter->count);
2629 }
2630
2631 static void cpu_migrations_perf_counter_read(struct perf_counter *counter)
2632 {
2633 cpu_migrations_perf_counter_update(counter);
2634 }
2635
2636 static int cpu_migrations_perf_counter_enable(struct perf_counter *counter)
2637 {
2638 if (counter->prev_state <= PERF_COUNTER_STATE_OFF)
2639 atomic64_set(&counter->hw.prev_count,
2640 get_cpu_migrations(counter));
2641 return 0;
2642 }
2643
2644 static void cpu_migrations_perf_counter_disable(struct perf_counter *counter)
2645 {
2646 cpu_migrations_perf_counter_update(counter);
2647 }
2648
2649 static const struct pmu perf_ops_cpu_migrations = {
2650 .enable = cpu_migrations_perf_counter_enable,
2651 .disable = cpu_migrations_perf_counter_disable,
2652 .read = cpu_migrations_perf_counter_read,
2653 };
2654
2655 #ifdef CONFIG_EVENT_PROFILE
2656 void perf_tpcounter_event(int event_id)
2657 {
2658 struct pt_regs *regs = get_irq_regs();
2659
2660 if (!regs)
2661 regs = task_pt_regs(current);
2662
2663 __perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, regs, 0);
2664 }
2665 EXPORT_SYMBOL_GPL(perf_tpcounter_event);
2666
2667 extern int ftrace_profile_enable(int);
2668 extern void ftrace_profile_disable(int);
2669
2670 static void tp_perf_counter_destroy(struct perf_counter *counter)
2671 {
2672 ftrace_profile_disable(perf_event_id(&counter->hw_event));
2673 }
2674
2675 static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
2676 {
2677 int event_id = perf_event_id(&counter->hw_event);
2678 int ret;
2679
2680 ret = ftrace_profile_enable(event_id);
2681 if (ret)
2682 return NULL;
2683
2684 counter->destroy = tp_perf_counter_destroy;
2685 counter->hw.irq_period = counter->hw_event.irq_period;
2686
2687 return &perf_ops_generic;
2688 }
2689 #else
2690 static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
2691 {
2692 return NULL;
2693 }
2694 #endif
2695
2696 static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
2697 {
2698 struct perf_counter_hw_event *hw_event = &counter->hw_event;
2699 const struct pmu *pmu = NULL;
2700 struct hw_perf_counter *hwc = &counter->hw;
2701
2702 /*
2703 * Software counters (currently) can't in general distinguish
2704 * between user, kernel and hypervisor events.
2705 * However, context switches and cpu migrations are considered
2706 * to be kernel events, and page faults are never hypervisor
2707 * events.
2708 */
2709 switch (perf_event_id(&counter->hw_event)) {
2710 case PERF_COUNT_CPU_CLOCK:
2711 pmu = &perf_ops_cpu_clock;
2712
2713 if (hw_event->irq_period && hw_event->irq_period < 10000)
2714 hw_event->irq_period = 10000;
2715 break;
2716 case PERF_COUNT_TASK_CLOCK:
2717 /*
2718 * If the user instantiates this as a per-cpu counter,
2719 * use the cpu_clock counter instead.
2720 */
2721 if (counter->ctx->task)
2722 pmu = &perf_ops_task_clock;
2723 else
2724 pmu = &perf_ops_cpu_clock;
2725
2726 if (hw_event->irq_period && hw_event->irq_period < 10000)
2727 hw_event->irq_period = 10000;
2728 break;
2729 case PERF_COUNT_PAGE_FAULTS:
2730 case PERF_COUNT_PAGE_FAULTS_MIN:
2731 case PERF_COUNT_PAGE_FAULTS_MAJ:
2732 case PERF_COUNT_CONTEXT_SWITCHES:
2733 pmu = &perf_ops_generic;
2734 break;
2735 case PERF_COUNT_CPU_MIGRATIONS:
2736 if (!counter->hw_event.exclude_kernel)
2737 pmu = &perf_ops_cpu_migrations;
2738 break;
2739 }
2740
2741 if (pmu)
2742 hwc->irq_period = hw_event->irq_period;
2743
2744 return pmu;
2745 }
2746
2747 /*
2748 * Allocate and initialize a counter structure
2749 */
2750 static struct perf_counter *
2751 perf_counter_alloc(struct perf_counter_hw_event *hw_event,
2752 int cpu,
2753 struct perf_counter_context *ctx,
2754 struct perf_counter *group_leader,
2755 gfp_t gfpflags)
2756 {
2757 const struct pmu *pmu;
2758 struct perf_counter *counter;
2759 long err;
2760
2761 counter = kzalloc(sizeof(*counter), gfpflags);
2762 if (!counter)
2763 return ERR_PTR(-ENOMEM);
2764
2765 /*
2766 * Single counters are their own group leaders, with an
2767 * empty sibling list:
2768 */
2769 if (!group_leader)
2770 group_leader = counter;
2771
2772 mutex_init(&counter->mutex);
2773 INIT_LIST_HEAD(&counter->list_entry);
2774 INIT_LIST_HEAD(&counter->event_entry);
2775 INIT_LIST_HEAD(&counter->sibling_list);
2776 init_waitqueue_head(&counter->waitq);
2777
2778 mutex_init(&counter->mmap_mutex);
2779
2780 INIT_LIST_HEAD(&counter->child_list);
2781
2782 counter->cpu = cpu;
2783 counter->hw_event = *hw_event;
2784 counter->group_leader = group_leader;
2785 counter->pmu = NULL;
2786 counter->ctx = ctx;
2787
2788 counter->state = PERF_COUNTER_STATE_INACTIVE;
2789 if (hw_event->disabled)
2790 counter->state = PERF_COUNTER_STATE_OFF;
2791
2792 pmu = NULL;
2793
2794 if (perf_event_raw(hw_event)) {
2795 pmu = hw_perf_counter_init(counter);
2796 goto done;
2797 }
2798
2799 switch (perf_event_type(hw_event)) {
2800 case PERF_TYPE_HARDWARE:
2801 pmu = hw_perf_counter_init(counter);
2802 break;
2803
2804 case PERF_TYPE_SOFTWARE:
2805 pmu = sw_perf_counter_init(counter);
2806 break;
2807
2808 case PERF_TYPE_TRACEPOINT:
2809 pmu = tp_perf_counter_init(counter);
2810 break;
2811 }
2812 done:
2813 err = 0;
2814 if (!pmu)
2815 err = -EINVAL;
2816 else if (IS_ERR(pmu))
2817 err = PTR_ERR(pmu);
2818
2819 if (err) {
2820 kfree(counter);
2821 return ERR_PTR(err);
2822 }
2823
2824 counter->pmu = pmu;
2825
2826 if (counter->hw_event.mmap)
2827 atomic_inc(&nr_mmap_tracking);
2828 if (counter->hw_event.munmap)
2829 atomic_inc(&nr_munmap_tracking);
2830 if (counter->hw_event.comm)
2831 atomic_inc(&nr_comm_tracking);
2832
2833 return counter;
2834 }
2835
2836 /**
2837 * sys_perf_counter_open - open a performance counter, associate it to a task/cpu
2838 *
2839 * @hw_event_uptr: event type attributes for monitoring/sampling
2840 * @pid: target pid
2841 * @cpu: target cpu
2842 * @group_fd: group leader counter fd
2843 */
2844 SYSCALL_DEFINE5(perf_counter_open,
2845 const struct perf_counter_hw_event __user *, hw_event_uptr,
2846 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
2847 {
2848 struct perf_counter *counter, *group_leader;
2849 struct perf_counter_hw_event hw_event;
2850 struct perf_counter_context *ctx;
2851 struct file *counter_file = NULL;
2852 struct file *group_file = NULL;
2853 int fput_needed = 0;
2854 int fput_needed2 = 0;
2855 int ret;
2856
2857 /* for future expandability... */
2858 if (flags)
2859 return -EINVAL;
2860
2861 if (copy_from_user(&hw_event, hw_event_uptr, sizeof(hw_event)) != 0)
2862 return -EFAULT;
2863
2864 /*
2865 * Get the target context (task or percpu):
2866 */
2867 ctx = find_get_context(pid, cpu);
2868 if (IS_ERR(ctx))
2869 return PTR_ERR(ctx);
2870
2871 /*
2872 * Look up the group leader (we will attach this counter to it):
2873 */
2874 group_leader = NULL;
2875 if (group_fd != -1) {
2876 ret = -EINVAL;
2877 group_file = fget_light(group_fd, &fput_needed);
2878 if (!group_file)
2879 goto err_put_context;
2880 if (group_file->f_op != &perf_fops)
2881 goto err_put_context;
2882
2883 group_leader = group_file->private_data;
2884 /*
2885 * Do not allow a recursive hierarchy (this new sibling
2886 * becoming part of another group-sibling):
2887 */
2888 if (group_leader->group_leader != group_leader)
2889 goto err_put_context;
2890 /*
2891 * Do not allow to attach to a group in a different
2892 * task or CPU context:
2893 */
2894 if (group_leader->ctx != ctx)
2895 goto err_put_context;
2896 /*
2897 * Only a group leader can be exclusive or pinned
2898 */
2899 if (hw_event.exclusive || hw_event.pinned)
2900 goto err_put_context;
2901 }
2902
2903 counter = perf_counter_alloc(&hw_event, cpu, ctx, group_leader,
2904 GFP_KERNEL);
2905 ret = PTR_ERR(counter);
2906 if (IS_ERR(counter))
2907 goto err_put_context;
2908
2909 ret = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0);
2910 if (ret < 0)
2911 goto err_free_put_context;
2912
2913 counter_file = fget_light(ret, &fput_needed2);
2914 if (!counter_file)
2915 goto err_free_put_context;
2916
2917 counter->filp = counter_file;
2918 mutex_lock(&ctx->mutex);
2919 perf_install_in_context(ctx, counter, cpu);
2920 mutex_unlock(&ctx->mutex);
2921
2922 fput_light(counter_file, fput_needed2);
2923
2924 out_fput:
2925 fput_light(group_file, fput_needed);
2926
2927 return ret;
2928
2929 err_free_put_context:
2930 kfree(counter);
2931
2932 err_put_context:
2933 put_context(ctx);
2934
2935 goto out_fput;
2936 }
2937
2938 /*
2939 * Initialize the perf_counter context in a task_struct:
2940 */
2941 static void
2942 __perf_counter_init_context(struct perf_counter_context *ctx,
2943 struct task_struct *task)
2944 {
2945 memset(ctx, 0, sizeof(*ctx));
2946 spin_lock_init(&ctx->lock);
2947 mutex_init(&ctx->mutex);
2948 INIT_LIST_HEAD(&ctx->counter_list);
2949 INIT_LIST_HEAD(&ctx->event_list);
2950 ctx->task = task;
2951 }
2952
2953 /*
2954 * inherit a counter from parent task to child task:
2955 */
2956 static struct perf_counter *
2957 inherit_counter(struct perf_counter *parent_counter,
2958 struct task_struct *parent,
2959 struct perf_counter_context *parent_ctx,
2960 struct task_struct *child,
2961 struct perf_counter *group_leader,
2962 struct perf_counter_context *child_ctx)
2963 {
2964 struct perf_counter *child_counter;
2965
2966 /*
2967 * Instead of creating recursive hierarchies of counters,
2968 * we link inherited counters back to the original parent,
2969 * which has a filp for sure, which we use as the reference
2970 * count:
2971 */
2972 if (parent_counter->parent)
2973 parent_counter = parent_counter->parent;
2974
2975 child_counter = perf_counter_alloc(&parent_counter->hw_event,
2976 parent_counter->cpu, child_ctx,
2977 group_leader, GFP_KERNEL);
2978 if (IS_ERR(child_counter))
2979 return child_counter;
2980
2981 /*
2982 * Link it up in the child's context:
2983 */
2984 child_counter->task = child;
2985 add_counter_to_ctx(child_counter, child_ctx);
2986
2987 child_counter->parent = parent_counter;
2988 /*
2989 * inherit into child's child as well:
2990 */
2991 child_counter->hw_event.inherit = 1;
2992
2993 /*
2994 * Get a reference to the parent filp - we will fput it
2995 * when the child counter exits. This is safe to do because
2996 * we are in the parent and we know that the filp still
2997 * exists and has a nonzero count:
2998 */
2999 atomic_long_inc(&parent_counter->filp->f_count);
3000
3001 /*
3002 * Link this into the parent counter's child list
3003 */
3004 mutex_lock(&parent_counter->mutex);
3005 list_add_tail(&child_counter->child_list, &parent_counter->child_list);
3006
3007 /*
3008 * Make the child state follow the state of the parent counter,
3009 * not its hw_event.disabled bit. We hold the parent's mutex,
3010 * so we won't race with perf_counter_{en,dis}able_family.
3011 */
3012 if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE)
3013 child_counter->state = PERF_COUNTER_STATE_INACTIVE;
3014 else
3015 child_counter->state = PERF_COUNTER_STATE_OFF;
3016
3017 mutex_unlock(&parent_counter->mutex);
3018
3019 return child_counter;
3020 }
3021
3022 static int inherit_group(struct perf_counter *parent_counter,
3023 struct task_struct *parent,
3024 struct perf_counter_context *parent_ctx,
3025 struct task_struct *child,
3026 struct perf_counter_context *child_ctx)
3027 {
3028 struct perf_counter *leader;
3029 struct perf_counter *sub;
3030 struct perf_counter *child_ctr;
3031
3032 leader = inherit_counter(parent_counter, parent, parent_ctx,
3033 child, NULL, child_ctx);
3034 if (IS_ERR(leader))
3035 return PTR_ERR(leader);
3036 list_for_each_entry(sub, &parent_counter->sibling_list, list_entry) {
3037 child_ctr = inherit_counter(sub, parent, parent_ctx,
3038 child, leader, child_ctx);
3039 if (IS_ERR(child_ctr))
3040 return PTR_ERR(child_ctr);
3041 }
3042 return 0;
3043 }
3044
3045 static void sync_child_counter(struct perf_counter *child_counter,
3046 struct perf_counter *parent_counter)
3047 {
3048 u64 parent_val, child_val;
3049
3050 parent_val = atomic64_read(&parent_counter->count);
3051 child_val = atomic64_read(&child_counter->count);
3052
3053 /*
3054 * Add back the child's count to the parent's count:
3055 */
3056 atomic64_add(child_val, &parent_counter->count);
3057 atomic64_add(child_counter->total_time_enabled,
3058 &parent_counter->child_total_time_enabled);
3059 atomic64_add(child_counter->total_time_running,
3060 &parent_counter->child_total_time_running);
3061
3062 /*
3063 * Remove this counter from the parent's list
3064 */
3065 mutex_lock(&parent_counter->mutex);
3066 list_del_init(&child_counter->child_list);
3067 mutex_unlock(&parent_counter->mutex);
3068
3069 /*
3070 * Release the parent counter, if this was the last
3071 * reference to it.
3072 */
3073 fput(parent_counter->filp);
3074 }
3075
3076 static void
3077 __perf_counter_exit_task(struct task_struct *child,
3078 struct perf_counter *child_counter,
3079 struct perf_counter_context *child_ctx)
3080 {
3081 struct perf_counter *parent_counter;
3082 struct perf_counter *sub, *tmp;
3083
3084 /*
3085 * If we do not self-reap then we have to wait for the
3086 * child task to unschedule (it will happen for sure),
3087 * so that its counter is at its final count. (This
3088 * condition triggers rarely - child tasks usually get
3089 * off their CPU before the parent has a chance to
3090 * get this far into the reaping action)
3091 */
3092 if (child != current) {
3093 wait_task_inactive(child, 0);
3094 list_del_init(&child_counter->list_entry);
3095 update_counter_times(child_counter);
3096 } else {
3097 struct perf_cpu_context *cpuctx;
3098 unsigned long flags;
3099 u64 perf_flags;
3100
3101 /*
3102 * Disable and unlink this counter.
3103 *
3104 * Be careful about zapping the list - IRQ/NMI context
3105 * could still be processing it:
3106 */
3107 local_irq_save(flags);
3108 perf_flags = hw_perf_save_disable();
3109
3110 cpuctx = &__get_cpu_var(perf_cpu_context);
3111
3112 group_sched_out(child_counter, cpuctx, child_ctx);
3113 update_counter_times(child_counter);
3114
3115 list_del_init(&child_counter->list_entry);
3116
3117 child_ctx->nr_counters--;
3118
3119 hw_perf_restore(perf_flags);
3120 local_irq_restore(flags);
3121 }
3122
3123 parent_counter = child_counter->parent;
3124 /*
3125 * It can happen that parent exits first, and has counters
3126 * that are still around due to the child reference. These
3127 * counters need to be zapped - but otherwise linger.
3128 */
3129 if (parent_counter) {
3130 sync_child_counter(child_counter, parent_counter);
3131 list_for_each_entry_safe(sub, tmp, &child_counter->sibling_list,
3132 list_entry) {
3133 if (sub->parent) {
3134 sync_child_counter(sub, sub->parent);
3135 free_counter(sub);
3136 }
3137 }
3138 free_counter(child_counter);
3139 }
3140 }
3141
3142 /*
3143 * When a child task exits, feed back counter values to parent counters.
3144 *
3145 * Note: we may be running in child context, but the PID is not hashed
3146 * anymore so new counters will not be added.
3147 */
3148 void perf_counter_exit_task(struct task_struct *child)
3149 {
3150 struct perf_counter *child_counter, *tmp;
3151 struct perf_counter_context *child_ctx;
3152
3153 child_ctx = &child->perf_counter_ctx;
3154
3155 if (likely(!child_ctx->nr_counters))
3156 return;
3157
3158 list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list,
3159 list_entry)
3160 __perf_counter_exit_task(child, child_counter, child_ctx);
3161 }
3162
3163 /*
3164 * Initialize the perf_counter context in task_struct
3165 */
3166 void perf_counter_init_task(struct task_struct *child)
3167 {
3168 struct perf_counter_context *child_ctx, *parent_ctx;
3169 struct perf_counter *counter;
3170 struct task_struct *parent = current;
3171
3172 child_ctx = &child->perf_counter_ctx;
3173 parent_ctx = &parent->perf_counter_ctx;
3174
3175 __perf_counter_init_context(child_ctx, child);
3176
3177 /*
3178 * This is executed from the parent task context, so inherit
3179 * counters that have been marked for cloning:
3180 */
3181
3182 if (likely(!parent_ctx->nr_counters))
3183 return;
3184
3185 /*
3186 * Lock the parent list. No need to lock the child - not PID
3187 * hashed yet and not running, so nobody can access it.
3188 */
3189 mutex_lock(&parent_ctx->mutex);
3190
3191 /*
3192 * We dont have to disable NMIs - we are only looking at
3193 * the list, not manipulating it:
3194 */
3195 list_for_each_entry(counter, &parent_ctx->counter_list, list_entry) {
3196 if (!counter->hw_event.inherit)
3197 continue;
3198
3199 if (inherit_group(counter, parent,
3200 parent_ctx, child, child_ctx))
3201 break;
3202 }
3203
3204 mutex_unlock(&parent_ctx->mutex);
3205 }
3206
3207 static void __cpuinit perf_counter_init_cpu(int cpu)
3208 {
3209 struct perf_cpu_context *cpuctx;
3210
3211 cpuctx = &per_cpu(perf_cpu_context, cpu);
3212 __perf_counter_init_context(&cpuctx->ctx, NULL);
3213
3214 mutex_lock(&perf_resource_mutex);
3215 cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu;
3216 mutex_unlock(&perf_resource_mutex);
3217
3218 hw_perf_counter_setup(cpu);
3219 }
3220
3221 #ifdef CONFIG_HOTPLUG_CPU
3222 static void __perf_counter_exit_cpu(void *info)
3223 {
3224 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
3225 struct perf_counter_context *ctx = &cpuctx->ctx;
3226 struct perf_counter *counter, *tmp;
3227
3228 list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry)
3229 __perf_counter_remove_from_context(counter);
3230 }
3231 static void perf_counter_exit_cpu(int cpu)
3232 {
3233 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
3234 struct perf_counter_context *ctx = &cpuctx->ctx;
3235
3236 mutex_lock(&ctx->mutex);
3237 smp_call_function_single(cpu, __perf_counter_exit_cpu, NULL, 1);
3238 mutex_unlock(&ctx->mutex);
3239 }
3240 #else
3241 static inline void perf_counter_exit_cpu(int cpu) { }
3242 #endif
3243
3244 static int __cpuinit
3245 perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
3246 {
3247 unsigned int cpu = (long)hcpu;
3248
3249 switch (action) {
3250
3251 case CPU_UP_PREPARE:
3252 case CPU_UP_PREPARE_FROZEN:
3253 perf_counter_init_cpu(cpu);
3254 break;
3255
3256 case CPU_DOWN_PREPARE:
3257 case CPU_DOWN_PREPARE_FROZEN:
3258 perf_counter_exit_cpu(cpu);
3259 break;
3260
3261 default:
3262 break;
3263 }
3264
3265 return NOTIFY_OK;
3266 }
3267
3268 static struct notifier_block __cpuinitdata perf_cpu_nb = {
3269 .notifier_call = perf_cpu_notify,
3270 };
3271
3272 static int __init perf_counter_init(void)
3273 {
3274 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
3275 (void *)(long)smp_processor_id());
3276 register_cpu_notifier(&perf_cpu_nb);
3277
3278 return 0;
3279 }
3280 early_initcall(perf_counter_init);
3281
3282 static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf)
3283 {
3284 return sprintf(buf, "%d\n", perf_reserved_percpu);
3285 }
3286
3287 static ssize_t
3288 perf_set_reserve_percpu(struct sysdev_class *class,
3289 const char *buf,
3290 size_t count)
3291 {
3292 struct perf_cpu_context *cpuctx;
3293 unsigned long val;
3294 int err, cpu, mpt;
3295
3296 err = strict_strtoul(buf, 10, &val);
3297 if (err)
3298 return err;
3299 if (val > perf_max_counters)
3300 return -EINVAL;
3301
3302 mutex_lock(&perf_resource_mutex);
3303 perf_reserved_percpu = val;
3304 for_each_online_cpu(cpu) {
3305 cpuctx = &per_cpu(perf_cpu_context, cpu);
3306 spin_lock_irq(&cpuctx->ctx.lock);
3307 mpt = min(perf_max_counters - cpuctx->ctx.nr_counters,
3308 perf_max_counters - perf_reserved_percpu);
3309 cpuctx->max_pertask = mpt;
3310 spin_unlock_irq(&cpuctx->ctx.lock);
3311 }
3312 mutex_unlock(&perf_resource_mutex);
3313
3314 return count;
3315 }
3316
3317 static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf)
3318 {
3319 return sprintf(buf, "%d\n", perf_overcommit);
3320 }
3321
3322 static ssize_t
3323 perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count)
3324 {
3325 unsigned long val;
3326 int err;
3327
3328 err = strict_strtoul(buf, 10, &val);
3329 if (err)
3330 return err;
3331 if (val > 1)
3332 return -EINVAL;
3333
3334 mutex_lock(&perf_resource_mutex);
3335 perf_overcommit = val;
3336 mutex_unlock(&perf_resource_mutex);
3337
3338 return count;
3339 }
3340
3341 static SYSDEV_CLASS_ATTR(
3342 reserve_percpu,
3343 0644,
3344 perf_show_reserve_percpu,
3345 perf_set_reserve_percpu
3346 );
3347
3348 static SYSDEV_CLASS_ATTR(
3349 overcommit,
3350 0644,
3351 perf_show_overcommit,
3352 perf_set_overcommit
3353 );
3354
3355 static struct attribute *perfclass_attrs[] = {
3356 &attr_reserve_percpu.attr,
3357 &attr_overcommit.attr,
3358 NULL
3359 };
3360
3361 static struct attribute_group perfclass_attr_group = {
3362 .attrs = perfclass_attrs,
3363 .name = "perf_counters",
3364 };
3365
3366 static int __init perf_counter_sysfs_init(void)
3367 {
3368 return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
3369 &perfclass_attr_group);
3370 }
3371 device_initcall(perf_counter_sysfs_init);
This page took 0.099679 seconds and 6 git commands to generate.