perf_counter: Optimize sched in/out of counters
[deliverable/linux.git] / kernel / perf_counter.c
1 /*
2 * Performance counter core code
3 *
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8 *
9 * For licensing details see kernel-base/COPYING
10 */
11
12 #include <linux/fs.h>
13 #include <linux/mm.h>
14 #include <linux/cpu.h>
15 #include <linux/smp.h>
16 #include <linux/file.h>
17 #include <linux/poll.h>
18 #include <linux/sysfs.h>
19 #include <linux/ptrace.h>
20 #include <linux/percpu.h>
21 #include <linux/vmstat.h>
22 #include <linux/hardirq.h>
23 #include <linux/rculist.h>
24 #include <linux/uaccess.h>
25 #include <linux/syscalls.h>
26 #include <linux/anon_inodes.h>
27 #include <linux/kernel_stat.h>
28 #include <linux/perf_counter.h>
29 #include <linux/dcache.h>
30
31 #include <asm/irq_regs.h>
32
33 /*
34 * Each CPU has a list of per CPU counters:
35 */
36 DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
37
38 int perf_max_counters __read_mostly = 1;
39 static int perf_reserved_percpu __read_mostly;
40 static int perf_overcommit __read_mostly = 1;
41
42 static atomic_t nr_counters __read_mostly;
43 static atomic_t nr_mmap_tracking __read_mostly;
44 static atomic_t nr_munmap_tracking __read_mostly;
45 static atomic_t nr_comm_tracking __read_mostly;
46
47 int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */
48 int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */
49
50 /*
51 * Lock for (sysadmin-configurable) counter reservations:
52 */
53 static DEFINE_SPINLOCK(perf_resource_lock);
54
55 /*
56 * Architecture provided APIs - weak aliases:
57 */
58 extern __weak const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
59 {
60 return NULL;
61 }
62
63 void __weak hw_perf_disable(void) { barrier(); }
64 void __weak hw_perf_enable(void) { barrier(); }
65
66 void __weak hw_perf_counter_setup(int cpu) { barrier(); }
67 int __weak hw_perf_group_sched_in(struct perf_counter *group_leader,
68 struct perf_cpu_context *cpuctx,
69 struct perf_counter_context *ctx, int cpu)
70 {
71 return 0;
72 }
73
74 void __weak perf_counter_print_debug(void) { }
75
76 static DEFINE_PER_CPU(int, disable_count);
77
78 void __perf_disable(void)
79 {
80 __get_cpu_var(disable_count)++;
81 }
82
83 bool __perf_enable(void)
84 {
85 return !--__get_cpu_var(disable_count);
86 }
87
88 void perf_disable(void)
89 {
90 __perf_disable();
91 hw_perf_disable();
92 }
93
94 void perf_enable(void)
95 {
96 if (__perf_enable())
97 hw_perf_enable();
98 }
99
100 static void
101 list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
102 {
103 struct perf_counter *group_leader = counter->group_leader;
104
105 /*
106 * Depending on whether it is a standalone or sibling counter,
107 * add it straight to the context's counter list, or to the group
108 * leader's sibling list:
109 */
110 if (group_leader == counter)
111 list_add_tail(&counter->list_entry, &ctx->counter_list);
112 else {
113 list_add_tail(&counter->list_entry, &group_leader->sibling_list);
114 group_leader->nr_siblings++;
115 }
116
117 list_add_rcu(&counter->event_entry, &ctx->event_list);
118 ctx->nr_counters++;
119 }
120
121 static void
122 list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
123 {
124 struct perf_counter *sibling, *tmp;
125
126 ctx->nr_counters--;
127
128 list_del_init(&counter->list_entry);
129 list_del_rcu(&counter->event_entry);
130
131 if (counter->group_leader != counter)
132 counter->group_leader->nr_siblings--;
133
134 /*
135 * If this was a group counter with sibling counters then
136 * upgrade the siblings to singleton counters by adding them
137 * to the context list directly:
138 */
139 list_for_each_entry_safe(sibling, tmp,
140 &counter->sibling_list, list_entry) {
141
142 list_move_tail(&sibling->list_entry, &ctx->counter_list);
143 sibling->group_leader = sibling;
144 }
145 }
146
147 static void
148 counter_sched_out(struct perf_counter *counter,
149 struct perf_cpu_context *cpuctx,
150 struct perf_counter_context *ctx)
151 {
152 if (counter->state != PERF_COUNTER_STATE_ACTIVE)
153 return;
154
155 counter->state = PERF_COUNTER_STATE_INACTIVE;
156 counter->tstamp_stopped = ctx->time;
157 counter->pmu->disable(counter);
158 counter->oncpu = -1;
159
160 if (!is_software_counter(counter))
161 cpuctx->active_oncpu--;
162 ctx->nr_active--;
163 if (counter->hw_event.exclusive || !cpuctx->active_oncpu)
164 cpuctx->exclusive = 0;
165 }
166
167 static void
168 group_sched_out(struct perf_counter *group_counter,
169 struct perf_cpu_context *cpuctx,
170 struct perf_counter_context *ctx)
171 {
172 struct perf_counter *counter;
173
174 if (group_counter->state != PERF_COUNTER_STATE_ACTIVE)
175 return;
176
177 counter_sched_out(group_counter, cpuctx, ctx);
178
179 /*
180 * Schedule out siblings (if any):
181 */
182 list_for_each_entry(counter, &group_counter->sibling_list, list_entry)
183 counter_sched_out(counter, cpuctx, ctx);
184
185 if (group_counter->hw_event.exclusive)
186 cpuctx->exclusive = 0;
187 }
188
189 /*
190 * Cross CPU call to remove a performance counter
191 *
192 * We disable the counter on the hardware level first. After that we
193 * remove it from the context list.
194 */
195 static void __perf_counter_remove_from_context(void *info)
196 {
197 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
198 struct perf_counter *counter = info;
199 struct perf_counter_context *ctx = counter->ctx;
200 unsigned long flags;
201
202 /*
203 * If this is a task context, we need to check whether it is
204 * the current task context of this cpu. If not it has been
205 * scheduled out before the smp call arrived.
206 */
207 if (ctx->task && cpuctx->task_ctx != ctx)
208 return;
209
210 spin_lock_irqsave(&ctx->lock, flags);
211
212 counter_sched_out(counter, cpuctx, ctx);
213
214 counter->task = NULL;
215
216 /*
217 * Protect the list operation against NMI by disabling the
218 * counters on a global level. NOP for non NMI based counters.
219 */
220 perf_disable();
221 list_del_counter(counter, ctx);
222 perf_enable();
223
224 if (!ctx->task) {
225 /*
226 * Allow more per task counters with respect to the
227 * reservation:
228 */
229 cpuctx->max_pertask =
230 min(perf_max_counters - ctx->nr_counters,
231 perf_max_counters - perf_reserved_percpu);
232 }
233
234 spin_unlock_irqrestore(&ctx->lock, flags);
235 }
236
237
238 /*
239 * Remove the counter from a task's (or a CPU's) list of counters.
240 *
241 * Must be called with counter->mutex and ctx->mutex held.
242 *
243 * CPU counters are removed with a smp call. For task counters we only
244 * call when the task is on a CPU.
245 */
246 static void perf_counter_remove_from_context(struct perf_counter *counter)
247 {
248 struct perf_counter_context *ctx = counter->ctx;
249 struct task_struct *task = ctx->task;
250
251 if (!task) {
252 /*
253 * Per cpu counters are removed via an smp call and
254 * the removal is always sucessful.
255 */
256 smp_call_function_single(counter->cpu,
257 __perf_counter_remove_from_context,
258 counter, 1);
259 return;
260 }
261
262 retry:
263 task_oncpu_function_call(task, __perf_counter_remove_from_context,
264 counter);
265
266 spin_lock_irq(&ctx->lock);
267 /*
268 * If the context is active we need to retry the smp call.
269 */
270 if (ctx->nr_active && !list_empty(&counter->list_entry)) {
271 spin_unlock_irq(&ctx->lock);
272 goto retry;
273 }
274
275 /*
276 * The lock prevents that this context is scheduled in so we
277 * can remove the counter safely, if the call above did not
278 * succeed.
279 */
280 if (!list_empty(&counter->list_entry)) {
281 list_del_counter(counter, ctx);
282 counter->task = NULL;
283 }
284 spin_unlock_irq(&ctx->lock);
285 }
286
287 static inline u64 perf_clock(void)
288 {
289 return cpu_clock(smp_processor_id());
290 }
291
292 /*
293 * Update the record of the current time in a context.
294 */
295 static void update_context_time(struct perf_counter_context *ctx)
296 {
297 u64 now = perf_clock();
298
299 ctx->time += now - ctx->timestamp;
300 ctx->timestamp = now;
301 }
302
303 /*
304 * Update the total_time_enabled and total_time_running fields for a counter.
305 */
306 static void update_counter_times(struct perf_counter *counter)
307 {
308 struct perf_counter_context *ctx = counter->ctx;
309 u64 run_end;
310
311 if (counter->state < PERF_COUNTER_STATE_INACTIVE)
312 return;
313
314 counter->total_time_enabled = ctx->time - counter->tstamp_enabled;
315
316 if (counter->state == PERF_COUNTER_STATE_INACTIVE)
317 run_end = counter->tstamp_stopped;
318 else
319 run_end = ctx->time;
320
321 counter->total_time_running = run_end - counter->tstamp_running;
322 }
323
324 /*
325 * Update total_time_enabled and total_time_running for all counters in a group.
326 */
327 static void update_group_times(struct perf_counter *leader)
328 {
329 struct perf_counter *counter;
330
331 update_counter_times(leader);
332 list_for_each_entry(counter, &leader->sibling_list, list_entry)
333 update_counter_times(counter);
334 }
335
336 /*
337 * Cross CPU call to disable a performance counter
338 */
339 static void __perf_counter_disable(void *info)
340 {
341 struct perf_counter *counter = info;
342 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
343 struct perf_counter_context *ctx = counter->ctx;
344 unsigned long flags;
345
346 /*
347 * If this is a per-task counter, need to check whether this
348 * counter's task is the current task on this cpu.
349 */
350 if (ctx->task && cpuctx->task_ctx != ctx)
351 return;
352
353 spin_lock_irqsave(&ctx->lock, flags);
354
355 /*
356 * If the counter is on, turn it off.
357 * If it is in error state, leave it in error state.
358 */
359 if (counter->state >= PERF_COUNTER_STATE_INACTIVE) {
360 update_context_time(ctx);
361 update_counter_times(counter);
362 if (counter == counter->group_leader)
363 group_sched_out(counter, cpuctx, ctx);
364 else
365 counter_sched_out(counter, cpuctx, ctx);
366 counter->state = PERF_COUNTER_STATE_OFF;
367 }
368
369 spin_unlock_irqrestore(&ctx->lock, flags);
370 }
371
372 /*
373 * Disable a counter.
374 */
375 static void perf_counter_disable(struct perf_counter *counter)
376 {
377 struct perf_counter_context *ctx = counter->ctx;
378 struct task_struct *task = ctx->task;
379
380 if (!task) {
381 /*
382 * Disable the counter on the cpu that it's on
383 */
384 smp_call_function_single(counter->cpu, __perf_counter_disable,
385 counter, 1);
386 return;
387 }
388
389 retry:
390 task_oncpu_function_call(task, __perf_counter_disable, counter);
391
392 spin_lock_irq(&ctx->lock);
393 /*
394 * If the counter is still active, we need to retry the cross-call.
395 */
396 if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
397 spin_unlock_irq(&ctx->lock);
398 goto retry;
399 }
400
401 /*
402 * Since we have the lock this context can't be scheduled
403 * in, so we can change the state safely.
404 */
405 if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
406 update_counter_times(counter);
407 counter->state = PERF_COUNTER_STATE_OFF;
408 }
409
410 spin_unlock_irq(&ctx->lock);
411 }
412
413 static int
414 counter_sched_in(struct perf_counter *counter,
415 struct perf_cpu_context *cpuctx,
416 struct perf_counter_context *ctx,
417 int cpu)
418 {
419 if (counter->state <= PERF_COUNTER_STATE_OFF)
420 return 0;
421
422 counter->state = PERF_COUNTER_STATE_ACTIVE;
423 counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */
424 /*
425 * The new state must be visible before we turn it on in the hardware:
426 */
427 smp_wmb();
428
429 if (counter->pmu->enable(counter)) {
430 counter->state = PERF_COUNTER_STATE_INACTIVE;
431 counter->oncpu = -1;
432 return -EAGAIN;
433 }
434
435 counter->tstamp_running += ctx->time - counter->tstamp_stopped;
436
437 if (!is_software_counter(counter))
438 cpuctx->active_oncpu++;
439 ctx->nr_active++;
440
441 if (counter->hw_event.exclusive)
442 cpuctx->exclusive = 1;
443
444 return 0;
445 }
446
447 static int
448 group_sched_in(struct perf_counter *group_counter,
449 struct perf_cpu_context *cpuctx,
450 struct perf_counter_context *ctx,
451 int cpu)
452 {
453 struct perf_counter *counter, *partial_group;
454 int ret;
455
456 if (group_counter->state == PERF_COUNTER_STATE_OFF)
457 return 0;
458
459 ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu);
460 if (ret)
461 return ret < 0 ? ret : 0;
462
463 group_counter->prev_state = group_counter->state;
464 if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
465 return -EAGAIN;
466
467 /*
468 * Schedule in siblings as one group (if any):
469 */
470 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
471 counter->prev_state = counter->state;
472 if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
473 partial_group = counter;
474 goto group_error;
475 }
476 }
477
478 return 0;
479
480 group_error:
481 /*
482 * Groups can be scheduled in as one unit only, so undo any
483 * partial group before returning:
484 */
485 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
486 if (counter == partial_group)
487 break;
488 counter_sched_out(counter, cpuctx, ctx);
489 }
490 counter_sched_out(group_counter, cpuctx, ctx);
491
492 return -EAGAIN;
493 }
494
495 /*
496 * Return 1 for a group consisting entirely of software counters,
497 * 0 if the group contains any hardware counters.
498 */
499 static int is_software_only_group(struct perf_counter *leader)
500 {
501 struct perf_counter *counter;
502
503 if (!is_software_counter(leader))
504 return 0;
505
506 list_for_each_entry(counter, &leader->sibling_list, list_entry)
507 if (!is_software_counter(counter))
508 return 0;
509
510 return 1;
511 }
512
513 /*
514 * Work out whether we can put this counter group on the CPU now.
515 */
516 static int group_can_go_on(struct perf_counter *counter,
517 struct perf_cpu_context *cpuctx,
518 int can_add_hw)
519 {
520 /*
521 * Groups consisting entirely of software counters can always go on.
522 */
523 if (is_software_only_group(counter))
524 return 1;
525 /*
526 * If an exclusive group is already on, no other hardware
527 * counters can go on.
528 */
529 if (cpuctx->exclusive)
530 return 0;
531 /*
532 * If this group is exclusive and there are already
533 * counters on the CPU, it can't go on.
534 */
535 if (counter->hw_event.exclusive && cpuctx->active_oncpu)
536 return 0;
537 /*
538 * Otherwise, try to add it if all previous groups were able
539 * to go on.
540 */
541 return can_add_hw;
542 }
543
544 static void add_counter_to_ctx(struct perf_counter *counter,
545 struct perf_counter_context *ctx)
546 {
547 list_add_counter(counter, ctx);
548 counter->prev_state = PERF_COUNTER_STATE_OFF;
549 counter->tstamp_enabled = ctx->time;
550 counter->tstamp_running = ctx->time;
551 counter->tstamp_stopped = ctx->time;
552 }
553
554 /*
555 * Cross CPU call to install and enable a performance counter
556 */
557 static void __perf_install_in_context(void *info)
558 {
559 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
560 struct perf_counter *counter = info;
561 struct perf_counter_context *ctx = counter->ctx;
562 struct perf_counter *leader = counter->group_leader;
563 int cpu = smp_processor_id();
564 unsigned long flags;
565 int err;
566
567 /*
568 * If this is a task context, we need to check whether it is
569 * the current task context of this cpu. If not it has been
570 * scheduled out before the smp call arrived.
571 */
572 if (ctx->task && cpuctx->task_ctx != ctx)
573 return;
574
575 spin_lock_irqsave(&ctx->lock, flags);
576 update_context_time(ctx);
577
578 /*
579 * Protect the list operation against NMI by disabling the
580 * counters on a global level. NOP for non NMI based counters.
581 */
582 perf_disable();
583
584 add_counter_to_ctx(counter, ctx);
585
586 /*
587 * Don't put the counter on if it is disabled or if
588 * it is in a group and the group isn't on.
589 */
590 if (counter->state != PERF_COUNTER_STATE_INACTIVE ||
591 (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE))
592 goto unlock;
593
594 /*
595 * An exclusive counter can't go on if there are already active
596 * hardware counters, and no hardware counter can go on if there
597 * is already an exclusive counter on.
598 */
599 if (!group_can_go_on(counter, cpuctx, 1))
600 err = -EEXIST;
601 else
602 err = counter_sched_in(counter, cpuctx, ctx, cpu);
603
604 if (err) {
605 /*
606 * This counter couldn't go on. If it is in a group
607 * then we have to pull the whole group off.
608 * If the counter group is pinned then put it in error state.
609 */
610 if (leader != counter)
611 group_sched_out(leader, cpuctx, ctx);
612 if (leader->hw_event.pinned) {
613 update_group_times(leader);
614 leader->state = PERF_COUNTER_STATE_ERROR;
615 }
616 }
617
618 if (!err && !ctx->task && cpuctx->max_pertask)
619 cpuctx->max_pertask--;
620
621 unlock:
622 perf_enable();
623
624 spin_unlock_irqrestore(&ctx->lock, flags);
625 }
626
627 /*
628 * Attach a performance counter to a context
629 *
630 * First we add the counter to the list with the hardware enable bit
631 * in counter->hw_config cleared.
632 *
633 * If the counter is attached to a task which is on a CPU we use a smp
634 * call to enable it in the task context. The task might have been
635 * scheduled away, but we check this in the smp call again.
636 *
637 * Must be called with ctx->mutex held.
638 */
639 static void
640 perf_install_in_context(struct perf_counter_context *ctx,
641 struct perf_counter *counter,
642 int cpu)
643 {
644 struct task_struct *task = ctx->task;
645
646 if (!task) {
647 /*
648 * Per cpu counters are installed via an smp call and
649 * the install is always sucessful.
650 */
651 smp_call_function_single(cpu, __perf_install_in_context,
652 counter, 1);
653 return;
654 }
655
656 counter->task = task;
657 retry:
658 task_oncpu_function_call(task, __perf_install_in_context,
659 counter);
660
661 spin_lock_irq(&ctx->lock);
662 /*
663 * we need to retry the smp call.
664 */
665 if (ctx->is_active && list_empty(&counter->list_entry)) {
666 spin_unlock_irq(&ctx->lock);
667 goto retry;
668 }
669
670 /*
671 * The lock prevents that this context is scheduled in so we
672 * can add the counter safely, if it the call above did not
673 * succeed.
674 */
675 if (list_empty(&counter->list_entry))
676 add_counter_to_ctx(counter, ctx);
677 spin_unlock_irq(&ctx->lock);
678 }
679
680 /*
681 * Cross CPU call to enable a performance counter
682 */
683 static void __perf_counter_enable(void *info)
684 {
685 struct perf_counter *counter = info;
686 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
687 struct perf_counter_context *ctx = counter->ctx;
688 struct perf_counter *leader = counter->group_leader;
689 unsigned long flags;
690 int err;
691
692 /*
693 * If this is a per-task counter, need to check whether this
694 * counter's task is the current task on this cpu.
695 */
696 if (ctx->task && cpuctx->task_ctx != ctx)
697 return;
698
699 spin_lock_irqsave(&ctx->lock, flags);
700 update_context_time(ctx);
701
702 counter->prev_state = counter->state;
703 if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
704 goto unlock;
705 counter->state = PERF_COUNTER_STATE_INACTIVE;
706 counter->tstamp_enabled = ctx->time - counter->total_time_enabled;
707
708 /*
709 * If the counter is in a group and isn't the group leader,
710 * then don't put it on unless the group is on.
711 */
712 if (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE)
713 goto unlock;
714
715 if (!group_can_go_on(counter, cpuctx, 1)) {
716 err = -EEXIST;
717 } else {
718 perf_disable();
719 if (counter == leader)
720 err = group_sched_in(counter, cpuctx, ctx,
721 smp_processor_id());
722 else
723 err = counter_sched_in(counter, cpuctx, ctx,
724 smp_processor_id());
725 perf_enable();
726 }
727
728 if (err) {
729 /*
730 * If this counter can't go on and it's part of a
731 * group, then the whole group has to come off.
732 */
733 if (leader != counter)
734 group_sched_out(leader, cpuctx, ctx);
735 if (leader->hw_event.pinned) {
736 update_group_times(leader);
737 leader->state = PERF_COUNTER_STATE_ERROR;
738 }
739 }
740
741 unlock:
742 spin_unlock_irqrestore(&ctx->lock, flags);
743 }
744
745 /*
746 * Enable a counter.
747 */
748 static void perf_counter_enable(struct perf_counter *counter)
749 {
750 struct perf_counter_context *ctx = counter->ctx;
751 struct task_struct *task = ctx->task;
752
753 if (!task) {
754 /*
755 * Enable the counter on the cpu that it's on
756 */
757 smp_call_function_single(counter->cpu, __perf_counter_enable,
758 counter, 1);
759 return;
760 }
761
762 spin_lock_irq(&ctx->lock);
763 if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
764 goto out;
765
766 /*
767 * If the counter is in error state, clear that first.
768 * That way, if we see the counter in error state below, we
769 * know that it has gone back into error state, as distinct
770 * from the task having been scheduled away before the
771 * cross-call arrived.
772 */
773 if (counter->state == PERF_COUNTER_STATE_ERROR)
774 counter->state = PERF_COUNTER_STATE_OFF;
775
776 retry:
777 spin_unlock_irq(&ctx->lock);
778 task_oncpu_function_call(task, __perf_counter_enable, counter);
779
780 spin_lock_irq(&ctx->lock);
781
782 /*
783 * If the context is active and the counter is still off,
784 * we need to retry the cross-call.
785 */
786 if (ctx->is_active && counter->state == PERF_COUNTER_STATE_OFF)
787 goto retry;
788
789 /*
790 * Since we have the lock this context can't be scheduled
791 * in, so we can change the state safely.
792 */
793 if (counter->state == PERF_COUNTER_STATE_OFF) {
794 counter->state = PERF_COUNTER_STATE_INACTIVE;
795 counter->tstamp_enabled =
796 ctx->time - counter->total_time_enabled;
797 }
798 out:
799 spin_unlock_irq(&ctx->lock);
800 }
801
802 static int perf_counter_refresh(struct perf_counter *counter, int refresh)
803 {
804 /*
805 * not supported on inherited counters
806 */
807 if (counter->hw_event.inherit)
808 return -EINVAL;
809
810 atomic_add(refresh, &counter->event_limit);
811 perf_counter_enable(counter);
812
813 return 0;
814 }
815
816 void __perf_counter_sched_out(struct perf_counter_context *ctx,
817 struct perf_cpu_context *cpuctx)
818 {
819 struct perf_counter *counter;
820
821 spin_lock(&ctx->lock);
822 ctx->is_active = 0;
823 if (likely(!ctx->nr_counters))
824 goto out;
825 update_context_time(ctx);
826
827 perf_disable();
828 if (ctx->nr_active) {
829 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
830 if (counter != counter->group_leader)
831 counter_sched_out(counter, cpuctx, ctx);
832 else
833 group_sched_out(counter, cpuctx, ctx);
834 }
835 }
836 perf_enable();
837 out:
838 spin_unlock(&ctx->lock);
839 }
840
841 /*
842 * Called from scheduler to remove the counters of the current task,
843 * with interrupts disabled.
844 *
845 * We stop each counter and update the counter value in counter->count.
846 *
847 * This does not protect us against NMI, but disable()
848 * sets the disabled bit in the control field of counter _before_
849 * accessing the counter control register. If a NMI hits, then it will
850 * not restart the counter.
851 */
852 void perf_counter_task_sched_out(struct task_struct *task, int cpu)
853 {
854 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
855 struct perf_counter_context *ctx = &task->perf_counter_ctx;
856 struct pt_regs *regs;
857
858 if (likely(!cpuctx->task_ctx))
859 return;
860
861 update_context_time(ctx);
862
863 regs = task_pt_regs(task);
864 perf_swcounter_event(PERF_COUNT_CONTEXT_SWITCHES, 1, 1, regs, 0);
865 __perf_counter_sched_out(ctx, cpuctx);
866
867 cpuctx->task_ctx = NULL;
868 }
869
870 static void __perf_counter_task_sched_out(struct perf_counter_context *ctx)
871 {
872 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
873
874 __perf_counter_sched_out(ctx, cpuctx);
875 cpuctx->task_ctx = NULL;
876 }
877
878 static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx)
879 {
880 __perf_counter_sched_out(&cpuctx->ctx, cpuctx);
881 }
882
883 static void
884 __perf_counter_sched_in(struct perf_counter_context *ctx,
885 struct perf_cpu_context *cpuctx, int cpu)
886 {
887 struct perf_counter *counter;
888 int can_add_hw = 1;
889
890 spin_lock(&ctx->lock);
891 ctx->is_active = 1;
892 if (likely(!ctx->nr_counters))
893 goto out;
894
895 ctx->timestamp = perf_clock();
896
897 perf_disable();
898
899 /*
900 * First go through the list and put on any pinned groups
901 * in order to give them the best chance of going on.
902 */
903 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
904 if (counter->state <= PERF_COUNTER_STATE_OFF ||
905 !counter->hw_event.pinned)
906 continue;
907 if (counter->cpu != -1 && counter->cpu != cpu)
908 continue;
909
910 if (counter != counter->group_leader)
911 counter_sched_in(counter, cpuctx, ctx, cpu);
912 else {
913 if (group_can_go_on(counter, cpuctx, 1))
914 group_sched_in(counter, cpuctx, ctx, cpu);
915 }
916
917 /*
918 * If this pinned group hasn't been scheduled,
919 * put it in error state.
920 */
921 if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
922 update_group_times(counter);
923 counter->state = PERF_COUNTER_STATE_ERROR;
924 }
925 }
926
927 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
928 /*
929 * Ignore counters in OFF or ERROR state, and
930 * ignore pinned counters since we did them already.
931 */
932 if (counter->state <= PERF_COUNTER_STATE_OFF ||
933 counter->hw_event.pinned)
934 continue;
935
936 /*
937 * Listen to the 'cpu' scheduling filter constraint
938 * of counters:
939 */
940 if (counter->cpu != -1 && counter->cpu != cpu)
941 continue;
942
943 if (counter != counter->group_leader) {
944 if (counter_sched_in(counter, cpuctx, ctx, cpu))
945 can_add_hw = 0;
946 } else {
947 if (group_can_go_on(counter, cpuctx, can_add_hw)) {
948 if (group_sched_in(counter, cpuctx, ctx, cpu))
949 can_add_hw = 0;
950 }
951 }
952 }
953 perf_enable();
954 out:
955 spin_unlock(&ctx->lock);
956 }
957
958 /*
959 * Called from scheduler to add the counters of the current task
960 * with interrupts disabled.
961 *
962 * We restore the counter value and then enable it.
963 *
964 * This does not protect us against NMI, but enable()
965 * sets the enabled bit in the control field of counter _before_
966 * accessing the counter control register. If a NMI hits, then it will
967 * keep the counter running.
968 */
969 void perf_counter_task_sched_in(struct task_struct *task, int cpu)
970 {
971 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
972 struct perf_counter_context *ctx = &task->perf_counter_ctx;
973
974 __perf_counter_sched_in(ctx, cpuctx, cpu);
975 cpuctx->task_ctx = ctx;
976 }
977
978 static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
979 {
980 struct perf_counter_context *ctx = &cpuctx->ctx;
981
982 __perf_counter_sched_in(ctx, cpuctx, cpu);
983 }
984
985 int perf_counter_task_disable(void)
986 {
987 struct task_struct *curr = current;
988 struct perf_counter_context *ctx = &curr->perf_counter_ctx;
989 struct perf_counter *counter;
990 unsigned long flags;
991
992 if (likely(!ctx->nr_counters))
993 return 0;
994
995 local_irq_save(flags);
996
997 __perf_counter_task_sched_out(ctx);
998
999 spin_lock(&ctx->lock);
1000
1001 /*
1002 * Disable all the counters:
1003 */
1004 perf_disable();
1005
1006 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1007 if (counter->state != PERF_COUNTER_STATE_ERROR) {
1008 update_group_times(counter);
1009 counter->state = PERF_COUNTER_STATE_OFF;
1010 }
1011 }
1012
1013 perf_enable();
1014
1015 spin_unlock_irqrestore(&ctx->lock, flags);
1016
1017 return 0;
1018 }
1019
1020 int perf_counter_task_enable(void)
1021 {
1022 struct task_struct *curr = current;
1023 struct perf_counter_context *ctx = &curr->perf_counter_ctx;
1024 struct perf_counter *counter;
1025 unsigned long flags;
1026 int cpu;
1027
1028 if (likely(!ctx->nr_counters))
1029 return 0;
1030
1031 local_irq_save(flags);
1032 cpu = smp_processor_id();
1033
1034 __perf_counter_task_sched_out(ctx);
1035
1036 spin_lock(&ctx->lock);
1037
1038 /*
1039 * Disable all the counters:
1040 */
1041 perf_disable();
1042
1043 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1044 if (counter->state > PERF_COUNTER_STATE_OFF)
1045 continue;
1046 counter->state = PERF_COUNTER_STATE_INACTIVE;
1047 counter->tstamp_enabled =
1048 ctx->time - counter->total_time_enabled;
1049 counter->hw_event.disabled = 0;
1050 }
1051 perf_enable();
1052
1053 spin_unlock(&ctx->lock);
1054
1055 perf_counter_task_sched_in(curr, cpu);
1056
1057 local_irq_restore(flags);
1058
1059 return 0;
1060 }
1061
1062 static void perf_log_period(struct perf_counter *counter, u64 period);
1063
1064 static void perf_adjust_freq(struct perf_counter_context *ctx)
1065 {
1066 struct perf_counter *counter;
1067 u64 irq_period;
1068 u64 events, period;
1069 s64 delta;
1070
1071 spin_lock(&ctx->lock);
1072 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1073 if (counter->state != PERF_COUNTER_STATE_ACTIVE)
1074 continue;
1075
1076 if (!counter->hw_event.freq || !counter->hw_event.irq_freq)
1077 continue;
1078
1079 events = HZ * counter->hw.interrupts * counter->hw.irq_period;
1080 period = div64_u64(events, counter->hw_event.irq_freq);
1081
1082 delta = (s64)(1 + period - counter->hw.irq_period);
1083 delta >>= 1;
1084
1085 irq_period = counter->hw.irq_period + delta;
1086
1087 if (!irq_period)
1088 irq_period = 1;
1089
1090 perf_log_period(counter, irq_period);
1091
1092 counter->hw.irq_period = irq_period;
1093 counter->hw.interrupts = 0;
1094 }
1095 spin_unlock(&ctx->lock);
1096 }
1097
1098 /*
1099 * Round-robin a context's counters:
1100 */
1101 static void rotate_ctx(struct perf_counter_context *ctx)
1102 {
1103 struct perf_counter *counter;
1104
1105 if (!ctx->nr_counters)
1106 return;
1107
1108 spin_lock(&ctx->lock);
1109 /*
1110 * Rotate the first entry last (works just fine for group counters too):
1111 */
1112 perf_disable();
1113 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1114 list_move_tail(&counter->list_entry, &ctx->counter_list);
1115 break;
1116 }
1117 perf_enable();
1118
1119 spin_unlock(&ctx->lock);
1120 }
1121
1122 void perf_counter_task_tick(struct task_struct *curr, int cpu)
1123 {
1124 struct perf_cpu_context *cpuctx;
1125 struct perf_counter_context *ctx;
1126
1127 if (!atomic_read(&nr_counters))
1128 return;
1129
1130 cpuctx = &per_cpu(perf_cpu_context, cpu);
1131 ctx = &curr->perf_counter_ctx;
1132
1133 perf_adjust_freq(&cpuctx->ctx);
1134 perf_adjust_freq(ctx);
1135
1136 perf_counter_cpu_sched_out(cpuctx);
1137 __perf_counter_task_sched_out(ctx);
1138
1139 rotate_ctx(&cpuctx->ctx);
1140 rotate_ctx(ctx);
1141
1142 perf_counter_cpu_sched_in(cpuctx, cpu);
1143 perf_counter_task_sched_in(curr, cpu);
1144 }
1145
1146 /*
1147 * Cross CPU call to read the hardware counter
1148 */
1149 static void __read(void *info)
1150 {
1151 struct perf_counter *counter = info;
1152 struct perf_counter_context *ctx = counter->ctx;
1153 unsigned long flags;
1154
1155 local_irq_save(flags);
1156 if (ctx->is_active)
1157 update_context_time(ctx);
1158 counter->pmu->read(counter);
1159 update_counter_times(counter);
1160 local_irq_restore(flags);
1161 }
1162
1163 static u64 perf_counter_read(struct perf_counter *counter)
1164 {
1165 /*
1166 * If counter is enabled and currently active on a CPU, update the
1167 * value in the counter structure:
1168 */
1169 if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
1170 smp_call_function_single(counter->oncpu,
1171 __read, counter, 1);
1172 } else if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
1173 update_counter_times(counter);
1174 }
1175
1176 return atomic64_read(&counter->count);
1177 }
1178
1179 static void put_context(struct perf_counter_context *ctx)
1180 {
1181 if (ctx->task)
1182 put_task_struct(ctx->task);
1183 }
1184
1185 static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
1186 {
1187 struct perf_cpu_context *cpuctx;
1188 struct perf_counter_context *ctx;
1189 struct task_struct *task;
1190
1191 /*
1192 * If cpu is not a wildcard then this is a percpu counter:
1193 */
1194 if (cpu != -1) {
1195 /* Must be root to operate on a CPU counter: */
1196 if (sysctl_perf_counter_priv && !capable(CAP_SYS_ADMIN))
1197 return ERR_PTR(-EACCES);
1198
1199 if (cpu < 0 || cpu > num_possible_cpus())
1200 return ERR_PTR(-EINVAL);
1201
1202 /*
1203 * We could be clever and allow to attach a counter to an
1204 * offline CPU and activate it when the CPU comes up, but
1205 * that's for later.
1206 */
1207 if (!cpu_isset(cpu, cpu_online_map))
1208 return ERR_PTR(-ENODEV);
1209
1210 cpuctx = &per_cpu(perf_cpu_context, cpu);
1211 ctx = &cpuctx->ctx;
1212
1213 return ctx;
1214 }
1215
1216 rcu_read_lock();
1217 if (!pid)
1218 task = current;
1219 else
1220 task = find_task_by_vpid(pid);
1221 if (task)
1222 get_task_struct(task);
1223 rcu_read_unlock();
1224
1225 if (!task)
1226 return ERR_PTR(-ESRCH);
1227
1228 ctx = &task->perf_counter_ctx;
1229 ctx->task = task;
1230
1231 /* Reuse ptrace permission checks for now. */
1232 if (!ptrace_may_access(task, PTRACE_MODE_READ)) {
1233 put_context(ctx);
1234 return ERR_PTR(-EACCES);
1235 }
1236
1237 return ctx;
1238 }
1239
1240 static void free_counter_rcu(struct rcu_head *head)
1241 {
1242 struct perf_counter *counter;
1243
1244 counter = container_of(head, struct perf_counter, rcu_head);
1245 kfree(counter);
1246 }
1247
1248 static void perf_pending_sync(struct perf_counter *counter);
1249
1250 static void free_counter(struct perf_counter *counter)
1251 {
1252 perf_pending_sync(counter);
1253
1254 atomic_dec(&nr_counters);
1255 if (counter->hw_event.mmap)
1256 atomic_dec(&nr_mmap_tracking);
1257 if (counter->hw_event.munmap)
1258 atomic_dec(&nr_munmap_tracking);
1259 if (counter->hw_event.comm)
1260 atomic_dec(&nr_comm_tracking);
1261
1262 if (counter->destroy)
1263 counter->destroy(counter);
1264
1265 call_rcu(&counter->rcu_head, free_counter_rcu);
1266 }
1267
1268 /*
1269 * Called when the last reference to the file is gone.
1270 */
1271 static int perf_release(struct inode *inode, struct file *file)
1272 {
1273 struct perf_counter *counter = file->private_data;
1274 struct perf_counter_context *ctx = counter->ctx;
1275
1276 file->private_data = NULL;
1277
1278 mutex_lock(&ctx->mutex);
1279 mutex_lock(&counter->mutex);
1280
1281 perf_counter_remove_from_context(counter);
1282
1283 mutex_unlock(&counter->mutex);
1284 mutex_unlock(&ctx->mutex);
1285
1286 free_counter(counter);
1287 put_context(ctx);
1288
1289 return 0;
1290 }
1291
1292 /*
1293 * Read the performance counter - simple non blocking version for now
1294 */
1295 static ssize_t
1296 perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
1297 {
1298 u64 values[3];
1299 int n;
1300
1301 /*
1302 * Return end-of-file for a read on a counter that is in
1303 * error state (i.e. because it was pinned but it couldn't be
1304 * scheduled on to the CPU at some point).
1305 */
1306 if (counter->state == PERF_COUNTER_STATE_ERROR)
1307 return 0;
1308
1309 mutex_lock(&counter->mutex);
1310 values[0] = perf_counter_read(counter);
1311 n = 1;
1312 if (counter->hw_event.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1313 values[n++] = counter->total_time_enabled +
1314 atomic64_read(&counter->child_total_time_enabled);
1315 if (counter->hw_event.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1316 values[n++] = counter->total_time_running +
1317 atomic64_read(&counter->child_total_time_running);
1318 mutex_unlock(&counter->mutex);
1319
1320 if (count < n * sizeof(u64))
1321 return -EINVAL;
1322 count = n * sizeof(u64);
1323
1324 if (copy_to_user(buf, values, count))
1325 return -EFAULT;
1326
1327 return count;
1328 }
1329
1330 static ssize_t
1331 perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
1332 {
1333 struct perf_counter *counter = file->private_data;
1334
1335 return perf_read_hw(counter, buf, count);
1336 }
1337
1338 static unsigned int perf_poll(struct file *file, poll_table *wait)
1339 {
1340 struct perf_counter *counter = file->private_data;
1341 struct perf_mmap_data *data;
1342 unsigned int events = POLL_HUP;
1343
1344 rcu_read_lock();
1345 data = rcu_dereference(counter->data);
1346 if (data)
1347 events = atomic_xchg(&data->poll, 0);
1348 rcu_read_unlock();
1349
1350 poll_wait(file, &counter->waitq, wait);
1351
1352 return events;
1353 }
1354
1355 static void perf_counter_reset(struct perf_counter *counter)
1356 {
1357 (void)perf_counter_read(counter);
1358 atomic64_set(&counter->count, 0);
1359 perf_counter_update_userpage(counter);
1360 }
1361
1362 static void perf_counter_for_each_sibling(struct perf_counter *counter,
1363 void (*func)(struct perf_counter *))
1364 {
1365 struct perf_counter_context *ctx = counter->ctx;
1366 struct perf_counter *sibling;
1367
1368 spin_lock_irq(&ctx->lock);
1369 counter = counter->group_leader;
1370
1371 func(counter);
1372 list_for_each_entry(sibling, &counter->sibling_list, list_entry)
1373 func(sibling);
1374 spin_unlock_irq(&ctx->lock);
1375 }
1376
1377 static void perf_counter_for_each_child(struct perf_counter *counter,
1378 void (*func)(struct perf_counter *))
1379 {
1380 struct perf_counter *child;
1381
1382 mutex_lock(&counter->mutex);
1383 func(counter);
1384 list_for_each_entry(child, &counter->child_list, child_list)
1385 func(child);
1386 mutex_unlock(&counter->mutex);
1387 }
1388
1389 static void perf_counter_for_each(struct perf_counter *counter,
1390 void (*func)(struct perf_counter *))
1391 {
1392 struct perf_counter *child;
1393
1394 mutex_lock(&counter->mutex);
1395 perf_counter_for_each_sibling(counter, func);
1396 list_for_each_entry(child, &counter->child_list, child_list)
1397 perf_counter_for_each_sibling(child, func);
1398 mutex_unlock(&counter->mutex);
1399 }
1400
1401 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1402 {
1403 struct perf_counter *counter = file->private_data;
1404 void (*func)(struct perf_counter *);
1405 u32 flags = arg;
1406
1407 switch (cmd) {
1408 case PERF_COUNTER_IOC_ENABLE:
1409 func = perf_counter_enable;
1410 break;
1411 case PERF_COUNTER_IOC_DISABLE:
1412 func = perf_counter_disable;
1413 break;
1414 case PERF_COUNTER_IOC_RESET:
1415 func = perf_counter_reset;
1416 break;
1417
1418 case PERF_COUNTER_IOC_REFRESH:
1419 return perf_counter_refresh(counter, arg);
1420 default:
1421 return -ENOTTY;
1422 }
1423
1424 if (flags & PERF_IOC_FLAG_GROUP)
1425 perf_counter_for_each(counter, func);
1426 else
1427 perf_counter_for_each_child(counter, func);
1428
1429 return 0;
1430 }
1431
1432 /*
1433 * Callers need to ensure there can be no nesting of this function, otherwise
1434 * the seqlock logic goes bad. We can not serialize this because the arch
1435 * code calls this from NMI context.
1436 */
1437 void perf_counter_update_userpage(struct perf_counter *counter)
1438 {
1439 struct perf_mmap_data *data;
1440 struct perf_counter_mmap_page *userpg;
1441
1442 rcu_read_lock();
1443 data = rcu_dereference(counter->data);
1444 if (!data)
1445 goto unlock;
1446
1447 userpg = data->user_page;
1448
1449 /*
1450 * Disable preemption so as to not let the corresponding user-space
1451 * spin too long if we get preempted.
1452 */
1453 preempt_disable();
1454 ++userpg->lock;
1455 barrier();
1456 userpg->index = counter->hw.idx;
1457 userpg->offset = atomic64_read(&counter->count);
1458 if (counter->state == PERF_COUNTER_STATE_ACTIVE)
1459 userpg->offset -= atomic64_read(&counter->hw.prev_count);
1460
1461 barrier();
1462 ++userpg->lock;
1463 preempt_enable();
1464 unlock:
1465 rcu_read_unlock();
1466 }
1467
1468 static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1469 {
1470 struct perf_counter *counter = vma->vm_file->private_data;
1471 struct perf_mmap_data *data;
1472 int ret = VM_FAULT_SIGBUS;
1473
1474 rcu_read_lock();
1475 data = rcu_dereference(counter->data);
1476 if (!data)
1477 goto unlock;
1478
1479 if (vmf->pgoff == 0) {
1480 vmf->page = virt_to_page(data->user_page);
1481 } else {
1482 int nr = vmf->pgoff - 1;
1483
1484 if ((unsigned)nr > data->nr_pages)
1485 goto unlock;
1486
1487 vmf->page = virt_to_page(data->data_pages[nr]);
1488 }
1489 get_page(vmf->page);
1490 ret = 0;
1491 unlock:
1492 rcu_read_unlock();
1493
1494 return ret;
1495 }
1496
1497 static int perf_mmap_data_alloc(struct perf_counter *counter, int nr_pages)
1498 {
1499 struct perf_mmap_data *data;
1500 unsigned long size;
1501 int i;
1502
1503 WARN_ON(atomic_read(&counter->mmap_count));
1504
1505 size = sizeof(struct perf_mmap_data);
1506 size += nr_pages * sizeof(void *);
1507
1508 data = kzalloc(size, GFP_KERNEL);
1509 if (!data)
1510 goto fail;
1511
1512 data->user_page = (void *)get_zeroed_page(GFP_KERNEL);
1513 if (!data->user_page)
1514 goto fail_user_page;
1515
1516 for (i = 0; i < nr_pages; i++) {
1517 data->data_pages[i] = (void *)get_zeroed_page(GFP_KERNEL);
1518 if (!data->data_pages[i])
1519 goto fail_data_pages;
1520 }
1521
1522 data->nr_pages = nr_pages;
1523 atomic_set(&data->lock, -1);
1524
1525 rcu_assign_pointer(counter->data, data);
1526
1527 return 0;
1528
1529 fail_data_pages:
1530 for (i--; i >= 0; i--)
1531 free_page((unsigned long)data->data_pages[i]);
1532
1533 free_page((unsigned long)data->user_page);
1534
1535 fail_user_page:
1536 kfree(data);
1537
1538 fail:
1539 return -ENOMEM;
1540 }
1541
1542 static void __perf_mmap_data_free(struct rcu_head *rcu_head)
1543 {
1544 struct perf_mmap_data *data = container_of(rcu_head,
1545 struct perf_mmap_data, rcu_head);
1546 int i;
1547
1548 free_page((unsigned long)data->user_page);
1549 for (i = 0; i < data->nr_pages; i++)
1550 free_page((unsigned long)data->data_pages[i]);
1551 kfree(data);
1552 }
1553
1554 static void perf_mmap_data_free(struct perf_counter *counter)
1555 {
1556 struct perf_mmap_data *data = counter->data;
1557
1558 WARN_ON(atomic_read(&counter->mmap_count));
1559
1560 rcu_assign_pointer(counter->data, NULL);
1561 call_rcu(&data->rcu_head, __perf_mmap_data_free);
1562 }
1563
1564 static void perf_mmap_open(struct vm_area_struct *vma)
1565 {
1566 struct perf_counter *counter = vma->vm_file->private_data;
1567
1568 atomic_inc(&counter->mmap_count);
1569 }
1570
1571 static void perf_mmap_close(struct vm_area_struct *vma)
1572 {
1573 struct perf_counter *counter = vma->vm_file->private_data;
1574
1575 if (atomic_dec_and_mutex_lock(&counter->mmap_count,
1576 &counter->mmap_mutex)) {
1577 struct user_struct *user = current_user();
1578
1579 atomic_long_sub(counter->data->nr_pages + 1, &user->locked_vm);
1580 vma->vm_mm->locked_vm -= counter->data->nr_locked;
1581 perf_mmap_data_free(counter);
1582 mutex_unlock(&counter->mmap_mutex);
1583 }
1584 }
1585
1586 static struct vm_operations_struct perf_mmap_vmops = {
1587 .open = perf_mmap_open,
1588 .close = perf_mmap_close,
1589 .fault = perf_mmap_fault,
1590 };
1591
1592 static int perf_mmap(struct file *file, struct vm_area_struct *vma)
1593 {
1594 struct perf_counter *counter = file->private_data;
1595 struct user_struct *user = current_user();
1596 unsigned long vma_size;
1597 unsigned long nr_pages;
1598 unsigned long user_locked, user_lock_limit;
1599 unsigned long locked, lock_limit;
1600 long user_extra, extra;
1601 int ret = 0;
1602
1603 if (!(vma->vm_flags & VM_SHARED) || (vma->vm_flags & VM_WRITE))
1604 return -EINVAL;
1605
1606 vma_size = vma->vm_end - vma->vm_start;
1607 nr_pages = (vma_size / PAGE_SIZE) - 1;
1608
1609 /*
1610 * If we have data pages ensure they're a power-of-two number, so we
1611 * can do bitmasks instead of modulo.
1612 */
1613 if (nr_pages != 0 && !is_power_of_2(nr_pages))
1614 return -EINVAL;
1615
1616 if (vma_size != PAGE_SIZE * (1 + nr_pages))
1617 return -EINVAL;
1618
1619 if (vma->vm_pgoff != 0)
1620 return -EINVAL;
1621
1622 mutex_lock(&counter->mmap_mutex);
1623 if (atomic_inc_not_zero(&counter->mmap_count)) {
1624 if (nr_pages != counter->data->nr_pages)
1625 ret = -EINVAL;
1626 goto unlock;
1627 }
1628
1629 user_extra = nr_pages + 1;
1630 user_lock_limit = sysctl_perf_counter_mlock >> (PAGE_SHIFT - 10);
1631 user_locked = atomic_long_read(&user->locked_vm) + user_extra;
1632
1633 extra = 0;
1634 if (user_locked > user_lock_limit)
1635 extra = user_locked - user_lock_limit;
1636
1637 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
1638 lock_limit >>= PAGE_SHIFT;
1639 locked = vma->vm_mm->locked_vm + extra;
1640
1641 if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
1642 ret = -EPERM;
1643 goto unlock;
1644 }
1645
1646 WARN_ON(counter->data);
1647 ret = perf_mmap_data_alloc(counter, nr_pages);
1648 if (ret)
1649 goto unlock;
1650
1651 atomic_set(&counter->mmap_count, 1);
1652 atomic_long_add(user_extra, &user->locked_vm);
1653 vma->vm_mm->locked_vm += extra;
1654 counter->data->nr_locked = extra;
1655 unlock:
1656 mutex_unlock(&counter->mmap_mutex);
1657
1658 vma->vm_flags &= ~VM_MAYWRITE;
1659 vma->vm_flags |= VM_RESERVED;
1660 vma->vm_ops = &perf_mmap_vmops;
1661
1662 return ret;
1663 }
1664
1665 static int perf_fasync(int fd, struct file *filp, int on)
1666 {
1667 struct perf_counter *counter = filp->private_data;
1668 struct inode *inode = filp->f_path.dentry->d_inode;
1669 int retval;
1670
1671 mutex_lock(&inode->i_mutex);
1672 retval = fasync_helper(fd, filp, on, &counter->fasync);
1673 mutex_unlock(&inode->i_mutex);
1674
1675 if (retval < 0)
1676 return retval;
1677
1678 return 0;
1679 }
1680
1681 static const struct file_operations perf_fops = {
1682 .release = perf_release,
1683 .read = perf_read,
1684 .poll = perf_poll,
1685 .unlocked_ioctl = perf_ioctl,
1686 .compat_ioctl = perf_ioctl,
1687 .mmap = perf_mmap,
1688 .fasync = perf_fasync,
1689 };
1690
1691 /*
1692 * Perf counter wakeup
1693 *
1694 * If there's data, ensure we set the poll() state and publish everything
1695 * to user-space before waking everybody up.
1696 */
1697
1698 void perf_counter_wakeup(struct perf_counter *counter)
1699 {
1700 wake_up_all(&counter->waitq);
1701
1702 if (counter->pending_kill) {
1703 kill_fasync(&counter->fasync, SIGIO, counter->pending_kill);
1704 counter->pending_kill = 0;
1705 }
1706 }
1707
1708 /*
1709 * Pending wakeups
1710 *
1711 * Handle the case where we need to wakeup up from NMI (or rq->lock) context.
1712 *
1713 * The NMI bit means we cannot possibly take locks. Therefore, maintain a
1714 * single linked list and use cmpxchg() to add entries lockless.
1715 */
1716
1717 static void perf_pending_counter(struct perf_pending_entry *entry)
1718 {
1719 struct perf_counter *counter = container_of(entry,
1720 struct perf_counter, pending);
1721
1722 if (counter->pending_disable) {
1723 counter->pending_disable = 0;
1724 perf_counter_disable(counter);
1725 }
1726
1727 if (counter->pending_wakeup) {
1728 counter->pending_wakeup = 0;
1729 perf_counter_wakeup(counter);
1730 }
1731 }
1732
1733 #define PENDING_TAIL ((struct perf_pending_entry *)-1UL)
1734
1735 static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = {
1736 PENDING_TAIL,
1737 };
1738
1739 static void perf_pending_queue(struct perf_pending_entry *entry,
1740 void (*func)(struct perf_pending_entry *))
1741 {
1742 struct perf_pending_entry **head;
1743
1744 if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL)
1745 return;
1746
1747 entry->func = func;
1748
1749 head = &get_cpu_var(perf_pending_head);
1750
1751 do {
1752 entry->next = *head;
1753 } while (cmpxchg(head, entry->next, entry) != entry->next);
1754
1755 set_perf_counter_pending();
1756
1757 put_cpu_var(perf_pending_head);
1758 }
1759
1760 static int __perf_pending_run(void)
1761 {
1762 struct perf_pending_entry *list;
1763 int nr = 0;
1764
1765 list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL);
1766 while (list != PENDING_TAIL) {
1767 void (*func)(struct perf_pending_entry *);
1768 struct perf_pending_entry *entry = list;
1769
1770 list = list->next;
1771
1772 func = entry->func;
1773 entry->next = NULL;
1774 /*
1775 * Ensure we observe the unqueue before we issue the wakeup,
1776 * so that we won't be waiting forever.
1777 * -- see perf_not_pending().
1778 */
1779 smp_wmb();
1780
1781 func(entry);
1782 nr++;
1783 }
1784
1785 return nr;
1786 }
1787
1788 static inline int perf_not_pending(struct perf_counter *counter)
1789 {
1790 /*
1791 * If we flush on whatever cpu we run, there is a chance we don't
1792 * need to wait.
1793 */
1794 get_cpu();
1795 __perf_pending_run();
1796 put_cpu();
1797
1798 /*
1799 * Ensure we see the proper queue state before going to sleep
1800 * so that we do not miss the wakeup. -- see perf_pending_handle()
1801 */
1802 smp_rmb();
1803 return counter->pending.next == NULL;
1804 }
1805
1806 static void perf_pending_sync(struct perf_counter *counter)
1807 {
1808 wait_event(counter->waitq, perf_not_pending(counter));
1809 }
1810
1811 void perf_counter_do_pending(void)
1812 {
1813 __perf_pending_run();
1814 }
1815
1816 /*
1817 * Callchain support -- arch specific
1818 */
1819
1820 __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
1821 {
1822 return NULL;
1823 }
1824
1825 /*
1826 * Output
1827 */
1828
1829 struct perf_output_handle {
1830 struct perf_counter *counter;
1831 struct perf_mmap_data *data;
1832 unsigned int offset;
1833 unsigned int head;
1834 int nmi;
1835 int overflow;
1836 int locked;
1837 unsigned long flags;
1838 };
1839
1840 static void perf_output_wakeup(struct perf_output_handle *handle)
1841 {
1842 atomic_set(&handle->data->poll, POLL_IN);
1843
1844 if (handle->nmi) {
1845 handle->counter->pending_wakeup = 1;
1846 perf_pending_queue(&handle->counter->pending,
1847 perf_pending_counter);
1848 } else
1849 perf_counter_wakeup(handle->counter);
1850 }
1851
1852 /*
1853 * Curious locking construct.
1854 *
1855 * We need to ensure a later event doesn't publish a head when a former
1856 * event isn't done writing. However since we need to deal with NMIs we
1857 * cannot fully serialize things.
1858 *
1859 * What we do is serialize between CPUs so we only have to deal with NMI
1860 * nesting on a single CPU.
1861 *
1862 * We only publish the head (and generate a wakeup) when the outer-most
1863 * event completes.
1864 */
1865 static void perf_output_lock(struct perf_output_handle *handle)
1866 {
1867 struct perf_mmap_data *data = handle->data;
1868 int cpu;
1869
1870 handle->locked = 0;
1871
1872 local_irq_save(handle->flags);
1873 cpu = smp_processor_id();
1874
1875 if (in_nmi() && atomic_read(&data->lock) == cpu)
1876 return;
1877
1878 while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
1879 cpu_relax();
1880
1881 handle->locked = 1;
1882 }
1883
1884 static void perf_output_unlock(struct perf_output_handle *handle)
1885 {
1886 struct perf_mmap_data *data = handle->data;
1887 int head, cpu;
1888
1889 data->done_head = data->head;
1890
1891 if (!handle->locked)
1892 goto out;
1893
1894 again:
1895 /*
1896 * The xchg implies a full barrier that ensures all writes are done
1897 * before we publish the new head, matched by a rmb() in userspace when
1898 * reading this position.
1899 */
1900 while ((head = atomic_xchg(&data->done_head, 0)))
1901 data->user_page->data_head = head;
1902
1903 /*
1904 * NMI can happen here, which means we can miss a done_head update.
1905 */
1906
1907 cpu = atomic_xchg(&data->lock, -1);
1908 WARN_ON_ONCE(cpu != smp_processor_id());
1909
1910 /*
1911 * Therefore we have to validate we did not indeed do so.
1912 */
1913 if (unlikely(atomic_read(&data->done_head))) {
1914 /*
1915 * Since we had it locked, we can lock it again.
1916 */
1917 while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
1918 cpu_relax();
1919
1920 goto again;
1921 }
1922
1923 if (atomic_xchg(&data->wakeup, 0))
1924 perf_output_wakeup(handle);
1925 out:
1926 local_irq_restore(handle->flags);
1927 }
1928
1929 static int perf_output_begin(struct perf_output_handle *handle,
1930 struct perf_counter *counter, unsigned int size,
1931 int nmi, int overflow)
1932 {
1933 struct perf_mmap_data *data;
1934 unsigned int offset, head;
1935
1936 /*
1937 * For inherited counters we send all the output towards the parent.
1938 */
1939 if (counter->parent)
1940 counter = counter->parent;
1941
1942 rcu_read_lock();
1943 data = rcu_dereference(counter->data);
1944 if (!data)
1945 goto out;
1946
1947 handle->data = data;
1948 handle->counter = counter;
1949 handle->nmi = nmi;
1950 handle->overflow = overflow;
1951
1952 if (!data->nr_pages)
1953 goto fail;
1954
1955 perf_output_lock(handle);
1956
1957 do {
1958 offset = head = atomic_read(&data->head);
1959 head += size;
1960 } while (atomic_cmpxchg(&data->head, offset, head) != offset);
1961
1962 handle->offset = offset;
1963 handle->head = head;
1964
1965 if ((offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT))
1966 atomic_set(&data->wakeup, 1);
1967
1968 return 0;
1969
1970 fail:
1971 perf_output_wakeup(handle);
1972 out:
1973 rcu_read_unlock();
1974
1975 return -ENOSPC;
1976 }
1977
1978 static void perf_output_copy(struct perf_output_handle *handle,
1979 void *buf, unsigned int len)
1980 {
1981 unsigned int pages_mask;
1982 unsigned int offset;
1983 unsigned int size;
1984 void **pages;
1985
1986 offset = handle->offset;
1987 pages_mask = handle->data->nr_pages - 1;
1988 pages = handle->data->data_pages;
1989
1990 do {
1991 unsigned int page_offset;
1992 int nr;
1993
1994 nr = (offset >> PAGE_SHIFT) & pages_mask;
1995 page_offset = offset & (PAGE_SIZE - 1);
1996 size = min_t(unsigned int, PAGE_SIZE - page_offset, len);
1997
1998 memcpy(pages[nr] + page_offset, buf, size);
1999
2000 len -= size;
2001 buf += size;
2002 offset += size;
2003 } while (len);
2004
2005 handle->offset = offset;
2006
2007 /*
2008 * Check we didn't copy past our reservation window, taking the
2009 * possible unsigned int wrap into account.
2010 */
2011 WARN_ON_ONCE(((int)(handle->head - handle->offset)) < 0);
2012 }
2013
2014 #define perf_output_put(handle, x) \
2015 perf_output_copy((handle), &(x), sizeof(x))
2016
2017 static void perf_output_end(struct perf_output_handle *handle)
2018 {
2019 struct perf_counter *counter = handle->counter;
2020 struct perf_mmap_data *data = handle->data;
2021
2022 int wakeup_events = counter->hw_event.wakeup_events;
2023
2024 if (handle->overflow && wakeup_events) {
2025 int events = atomic_inc_return(&data->events);
2026 if (events >= wakeup_events) {
2027 atomic_sub(wakeup_events, &data->events);
2028 atomic_set(&data->wakeup, 1);
2029 }
2030 }
2031
2032 perf_output_unlock(handle);
2033 rcu_read_unlock();
2034 }
2035
2036 static void perf_counter_output(struct perf_counter *counter,
2037 int nmi, struct pt_regs *regs, u64 addr)
2038 {
2039 int ret;
2040 u64 record_type = counter->hw_event.record_type;
2041 struct perf_output_handle handle;
2042 struct perf_event_header header;
2043 u64 ip;
2044 struct {
2045 u32 pid, tid;
2046 } tid_entry;
2047 struct {
2048 u64 event;
2049 u64 counter;
2050 } group_entry;
2051 struct perf_callchain_entry *callchain = NULL;
2052 int callchain_size = 0;
2053 u64 time;
2054 struct {
2055 u32 cpu, reserved;
2056 } cpu_entry;
2057
2058 header.type = 0;
2059 header.size = sizeof(header);
2060
2061 header.misc = PERF_EVENT_MISC_OVERFLOW;
2062 header.misc |= perf_misc_flags(regs);
2063
2064 if (record_type & PERF_RECORD_IP) {
2065 ip = perf_instruction_pointer(regs);
2066 header.type |= PERF_RECORD_IP;
2067 header.size += sizeof(ip);
2068 }
2069
2070 if (record_type & PERF_RECORD_TID) {
2071 /* namespace issues */
2072 tid_entry.pid = current->group_leader->pid;
2073 tid_entry.tid = current->pid;
2074
2075 header.type |= PERF_RECORD_TID;
2076 header.size += sizeof(tid_entry);
2077 }
2078
2079 if (record_type & PERF_RECORD_TIME) {
2080 /*
2081 * Maybe do better on x86 and provide cpu_clock_nmi()
2082 */
2083 time = sched_clock();
2084
2085 header.type |= PERF_RECORD_TIME;
2086 header.size += sizeof(u64);
2087 }
2088
2089 if (record_type & PERF_RECORD_ADDR) {
2090 header.type |= PERF_RECORD_ADDR;
2091 header.size += sizeof(u64);
2092 }
2093
2094 if (record_type & PERF_RECORD_CONFIG) {
2095 header.type |= PERF_RECORD_CONFIG;
2096 header.size += sizeof(u64);
2097 }
2098
2099 if (record_type & PERF_RECORD_CPU) {
2100 header.type |= PERF_RECORD_CPU;
2101 header.size += sizeof(cpu_entry);
2102
2103 cpu_entry.cpu = raw_smp_processor_id();
2104 }
2105
2106 if (record_type & PERF_RECORD_GROUP) {
2107 header.type |= PERF_RECORD_GROUP;
2108 header.size += sizeof(u64) +
2109 counter->nr_siblings * sizeof(group_entry);
2110 }
2111
2112 if (record_type & PERF_RECORD_CALLCHAIN) {
2113 callchain = perf_callchain(regs);
2114
2115 if (callchain) {
2116 callchain_size = (1 + callchain->nr) * sizeof(u64);
2117
2118 header.type |= PERF_RECORD_CALLCHAIN;
2119 header.size += callchain_size;
2120 }
2121 }
2122
2123 ret = perf_output_begin(&handle, counter, header.size, nmi, 1);
2124 if (ret)
2125 return;
2126
2127 perf_output_put(&handle, header);
2128
2129 if (record_type & PERF_RECORD_IP)
2130 perf_output_put(&handle, ip);
2131
2132 if (record_type & PERF_RECORD_TID)
2133 perf_output_put(&handle, tid_entry);
2134
2135 if (record_type & PERF_RECORD_TIME)
2136 perf_output_put(&handle, time);
2137
2138 if (record_type & PERF_RECORD_ADDR)
2139 perf_output_put(&handle, addr);
2140
2141 if (record_type & PERF_RECORD_CONFIG)
2142 perf_output_put(&handle, counter->hw_event.config);
2143
2144 if (record_type & PERF_RECORD_CPU)
2145 perf_output_put(&handle, cpu_entry);
2146
2147 /*
2148 * XXX PERF_RECORD_GROUP vs inherited counters seems difficult.
2149 */
2150 if (record_type & PERF_RECORD_GROUP) {
2151 struct perf_counter *leader, *sub;
2152 u64 nr = counter->nr_siblings;
2153
2154 perf_output_put(&handle, nr);
2155
2156 leader = counter->group_leader;
2157 list_for_each_entry(sub, &leader->sibling_list, list_entry) {
2158 if (sub != counter)
2159 sub->pmu->read(sub);
2160
2161 group_entry.event = sub->hw_event.config;
2162 group_entry.counter = atomic64_read(&sub->count);
2163
2164 perf_output_put(&handle, group_entry);
2165 }
2166 }
2167
2168 if (callchain)
2169 perf_output_copy(&handle, callchain, callchain_size);
2170
2171 perf_output_end(&handle);
2172 }
2173
2174 /*
2175 * comm tracking
2176 */
2177
2178 struct perf_comm_event {
2179 struct task_struct *task;
2180 char *comm;
2181 int comm_size;
2182
2183 struct {
2184 struct perf_event_header header;
2185
2186 u32 pid;
2187 u32 tid;
2188 } event;
2189 };
2190
2191 static void perf_counter_comm_output(struct perf_counter *counter,
2192 struct perf_comm_event *comm_event)
2193 {
2194 struct perf_output_handle handle;
2195 int size = comm_event->event.header.size;
2196 int ret = perf_output_begin(&handle, counter, size, 0, 0);
2197
2198 if (ret)
2199 return;
2200
2201 perf_output_put(&handle, comm_event->event);
2202 perf_output_copy(&handle, comm_event->comm,
2203 comm_event->comm_size);
2204 perf_output_end(&handle);
2205 }
2206
2207 static int perf_counter_comm_match(struct perf_counter *counter,
2208 struct perf_comm_event *comm_event)
2209 {
2210 if (counter->hw_event.comm &&
2211 comm_event->event.header.type == PERF_EVENT_COMM)
2212 return 1;
2213
2214 return 0;
2215 }
2216
2217 static void perf_counter_comm_ctx(struct perf_counter_context *ctx,
2218 struct perf_comm_event *comm_event)
2219 {
2220 struct perf_counter *counter;
2221
2222 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
2223 return;
2224
2225 rcu_read_lock();
2226 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
2227 if (perf_counter_comm_match(counter, comm_event))
2228 perf_counter_comm_output(counter, comm_event);
2229 }
2230 rcu_read_unlock();
2231 }
2232
2233 static void perf_counter_comm_event(struct perf_comm_event *comm_event)
2234 {
2235 struct perf_cpu_context *cpuctx;
2236 unsigned int size;
2237 char *comm = comm_event->task->comm;
2238
2239 size = ALIGN(strlen(comm)+1, sizeof(u64));
2240
2241 comm_event->comm = comm;
2242 comm_event->comm_size = size;
2243
2244 comm_event->event.header.size = sizeof(comm_event->event) + size;
2245
2246 cpuctx = &get_cpu_var(perf_cpu_context);
2247 perf_counter_comm_ctx(&cpuctx->ctx, comm_event);
2248 put_cpu_var(perf_cpu_context);
2249
2250 perf_counter_comm_ctx(&current->perf_counter_ctx, comm_event);
2251 }
2252
2253 void perf_counter_comm(struct task_struct *task)
2254 {
2255 struct perf_comm_event comm_event;
2256
2257 if (!atomic_read(&nr_comm_tracking))
2258 return;
2259
2260 comm_event = (struct perf_comm_event){
2261 .task = task,
2262 .event = {
2263 .header = { .type = PERF_EVENT_COMM, },
2264 .pid = task->group_leader->pid,
2265 .tid = task->pid,
2266 },
2267 };
2268
2269 perf_counter_comm_event(&comm_event);
2270 }
2271
2272 /*
2273 * mmap tracking
2274 */
2275
2276 struct perf_mmap_event {
2277 struct file *file;
2278 char *file_name;
2279 int file_size;
2280
2281 struct {
2282 struct perf_event_header header;
2283
2284 u32 pid;
2285 u32 tid;
2286 u64 start;
2287 u64 len;
2288 u64 pgoff;
2289 } event;
2290 };
2291
2292 static void perf_counter_mmap_output(struct perf_counter *counter,
2293 struct perf_mmap_event *mmap_event)
2294 {
2295 struct perf_output_handle handle;
2296 int size = mmap_event->event.header.size;
2297 int ret = perf_output_begin(&handle, counter, size, 0, 0);
2298
2299 if (ret)
2300 return;
2301
2302 perf_output_put(&handle, mmap_event->event);
2303 perf_output_copy(&handle, mmap_event->file_name,
2304 mmap_event->file_size);
2305 perf_output_end(&handle);
2306 }
2307
2308 static int perf_counter_mmap_match(struct perf_counter *counter,
2309 struct perf_mmap_event *mmap_event)
2310 {
2311 if (counter->hw_event.mmap &&
2312 mmap_event->event.header.type == PERF_EVENT_MMAP)
2313 return 1;
2314
2315 if (counter->hw_event.munmap &&
2316 mmap_event->event.header.type == PERF_EVENT_MUNMAP)
2317 return 1;
2318
2319 return 0;
2320 }
2321
2322 static void perf_counter_mmap_ctx(struct perf_counter_context *ctx,
2323 struct perf_mmap_event *mmap_event)
2324 {
2325 struct perf_counter *counter;
2326
2327 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
2328 return;
2329
2330 rcu_read_lock();
2331 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
2332 if (perf_counter_mmap_match(counter, mmap_event))
2333 perf_counter_mmap_output(counter, mmap_event);
2334 }
2335 rcu_read_unlock();
2336 }
2337
2338 static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event)
2339 {
2340 struct perf_cpu_context *cpuctx;
2341 struct file *file = mmap_event->file;
2342 unsigned int size;
2343 char tmp[16];
2344 char *buf = NULL;
2345 char *name;
2346
2347 if (file) {
2348 buf = kzalloc(PATH_MAX, GFP_KERNEL);
2349 if (!buf) {
2350 name = strncpy(tmp, "//enomem", sizeof(tmp));
2351 goto got_name;
2352 }
2353 name = d_path(&file->f_path, buf, PATH_MAX);
2354 if (IS_ERR(name)) {
2355 name = strncpy(tmp, "//toolong", sizeof(tmp));
2356 goto got_name;
2357 }
2358 } else {
2359 name = strncpy(tmp, "//anon", sizeof(tmp));
2360 goto got_name;
2361 }
2362
2363 got_name:
2364 size = ALIGN(strlen(name)+1, sizeof(u64));
2365
2366 mmap_event->file_name = name;
2367 mmap_event->file_size = size;
2368
2369 mmap_event->event.header.size = sizeof(mmap_event->event) + size;
2370
2371 cpuctx = &get_cpu_var(perf_cpu_context);
2372 perf_counter_mmap_ctx(&cpuctx->ctx, mmap_event);
2373 put_cpu_var(perf_cpu_context);
2374
2375 perf_counter_mmap_ctx(&current->perf_counter_ctx, mmap_event);
2376
2377 kfree(buf);
2378 }
2379
2380 void perf_counter_mmap(unsigned long addr, unsigned long len,
2381 unsigned long pgoff, struct file *file)
2382 {
2383 struct perf_mmap_event mmap_event;
2384
2385 if (!atomic_read(&nr_mmap_tracking))
2386 return;
2387
2388 mmap_event = (struct perf_mmap_event){
2389 .file = file,
2390 .event = {
2391 .header = { .type = PERF_EVENT_MMAP, },
2392 .pid = current->group_leader->pid,
2393 .tid = current->pid,
2394 .start = addr,
2395 .len = len,
2396 .pgoff = pgoff,
2397 },
2398 };
2399
2400 perf_counter_mmap_event(&mmap_event);
2401 }
2402
2403 void perf_counter_munmap(unsigned long addr, unsigned long len,
2404 unsigned long pgoff, struct file *file)
2405 {
2406 struct perf_mmap_event mmap_event;
2407
2408 if (!atomic_read(&nr_munmap_tracking))
2409 return;
2410
2411 mmap_event = (struct perf_mmap_event){
2412 .file = file,
2413 .event = {
2414 .header = { .type = PERF_EVENT_MUNMAP, },
2415 .pid = current->group_leader->pid,
2416 .tid = current->pid,
2417 .start = addr,
2418 .len = len,
2419 .pgoff = pgoff,
2420 },
2421 };
2422
2423 perf_counter_mmap_event(&mmap_event);
2424 }
2425
2426 /*
2427 *
2428 */
2429
2430 static void perf_log_period(struct perf_counter *counter, u64 period)
2431 {
2432 struct perf_output_handle handle;
2433 int ret;
2434
2435 struct {
2436 struct perf_event_header header;
2437 u64 time;
2438 u64 period;
2439 } freq_event = {
2440 .header = {
2441 .type = PERF_EVENT_PERIOD,
2442 .misc = 0,
2443 .size = sizeof(freq_event),
2444 },
2445 .time = sched_clock(),
2446 .period = period,
2447 };
2448
2449 if (counter->hw.irq_period == period)
2450 return;
2451
2452 ret = perf_output_begin(&handle, counter, sizeof(freq_event), 0, 0);
2453 if (ret)
2454 return;
2455
2456 perf_output_put(&handle, freq_event);
2457 perf_output_end(&handle);
2458 }
2459
2460 /*
2461 * Generic counter overflow handling.
2462 */
2463
2464 int perf_counter_overflow(struct perf_counter *counter,
2465 int nmi, struct pt_regs *regs, u64 addr)
2466 {
2467 int events = atomic_read(&counter->event_limit);
2468 int ret = 0;
2469
2470 counter->hw.interrupts++;
2471
2472 /*
2473 * XXX event_limit might not quite work as expected on inherited
2474 * counters
2475 */
2476
2477 counter->pending_kill = POLL_IN;
2478 if (events && atomic_dec_and_test(&counter->event_limit)) {
2479 ret = 1;
2480 counter->pending_kill = POLL_HUP;
2481 if (nmi) {
2482 counter->pending_disable = 1;
2483 perf_pending_queue(&counter->pending,
2484 perf_pending_counter);
2485 } else
2486 perf_counter_disable(counter);
2487 }
2488
2489 perf_counter_output(counter, nmi, regs, addr);
2490 return ret;
2491 }
2492
2493 /*
2494 * Generic software counter infrastructure
2495 */
2496
2497 static void perf_swcounter_update(struct perf_counter *counter)
2498 {
2499 struct hw_perf_counter *hwc = &counter->hw;
2500 u64 prev, now;
2501 s64 delta;
2502
2503 again:
2504 prev = atomic64_read(&hwc->prev_count);
2505 now = atomic64_read(&hwc->count);
2506 if (atomic64_cmpxchg(&hwc->prev_count, prev, now) != prev)
2507 goto again;
2508
2509 delta = now - prev;
2510
2511 atomic64_add(delta, &counter->count);
2512 atomic64_sub(delta, &hwc->period_left);
2513 }
2514
2515 static void perf_swcounter_set_period(struct perf_counter *counter)
2516 {
2517 struct hw_perf_counter *hwc = &counter->hw;
2518 s64 left = atomic64_read(&hwc->period_left);
2519 s64 period = hwc->irq_period;
2520
2521 if (unlikely(left <= -period)) {
2522 left = period;
2523 atomic64_set(&hwc->period_left, left);
2524 }
2525
2526 if (unlikely(left <= 0)) {
2527 left += period;
2528 atomic64_add(period, &hwc->period_left);
2529 }
2530
2531 atomic64_set(&hwc->prev_count, -left);
2532 atomic64_set(&hwc->count, -left);
2533 }
2534
2535 static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
2536 {
2537 enum hrtimer_restart ret = HRTIMER_RESTART;
2538 struct perf_counter *counter;
2539 struct pt_regs *regs;
2540 u64 period;
2541
2542 counter = container_of(hrtimer, struct perf_counter, hw.hrtimer);
2543 counter->pmu->read(counter);
2544
2545 regs = get_irq_regs();
2546 /*
2547 * In case we exclude kernel IPs or are somehow not in interrupt
2548 * context, provide the next best thing, the user IP.
2549 */
2550 if ((counter->hw_event.exclude_kernel || !regs) &&
2551 !counter->hw_event.exclude_user)
2552 regs = task_pt_regs(current);
2553
2554 if (regs) {
2555 if (perf_counter_overflow(counter, 0, regs, 0))
2556 ret = HRTIMER_NORESTART;
2557 }
2558
2559 period = max_t(u64, 10000, counter->hw.irq_period);
2560 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
2561
2562 return ret;
2563 }
2564
2565 static void perf_swcounter_overflow(struct perf_counter *counter,
2566 int nmi, struct pt_regs *regs, u64 addr)
2567 {
2568 perf_swcounter_update(counter);
2569 perf_swcounter_set_period(counter);
2570 if (perf_counter_overflow(counter, nmi, regs, addr))
2571 /* soft-disable the counter */
2572 ;
2573
2574 }
2575
2576 static int perf_swcounter_match(struct perf_counter *counter,
2577 enum perf_event_types type,
2578 u32 event, struct pt_regs *regs)
2579 {
2580 if (counter->state != PERF_COUNTER_STATE_ACTIVE)
2581 return 0;
2582
2583 if (perf_event_raw(&counter->hw_event))
2584 return 0;
2585
2586 if (perf_event_type(&counter->hw_event) != type)
2587 return 0;
2588
2589 if (perf_event_id(&counter->hw_event) != event)
2590 return 0;
2591
2592 if (counter->hw_event.exclude_user && user_mode(regs))
2593 return 0;
2594
2595 if (counter->hw_event.exclude_kernel && !user_mode(regs))
2596 return 0;
2597
2598 return 1;
2599 }
2600
2601 static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
2602 int nmi, struct pt_regs *regs, u64 addr)
2603 {
2604 int neg = atomic64_add_negative(nr, &counter->hw.count);
2605 if (counter->hw.irq_period && !neg)
2606 perf_swcounter_overflow(counter, nmi, regs, addr);
2607 }
2608
2609 static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
2610 enum perf_event_types type, u32 event,
2611 u64 nr, int nmi, struct pt_regs *regs,
2612 u64 addr)
2613 {
2614 struct perf_counter *counter;
2615
2616 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
2617 return;
2618
2619 rcu_read_lock();
2620 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
2621 if (perf_swcounter_match(counter, type, event, regs))
2622 perf_swcounter_add(counter, nr, nmi, regs, addr);
2623 }
2624 rcu_read_unlock();
2625 }
2626
2627 static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx)
2628 {
2629 if (in_nmi())
2630 return &cpuctx->recursion[3];
2631
2632 if (in_irq())
2633 return &cpuctx->recursion[2];
2634
2635 if (in_softirq())
2636 return &cpuctx->recursion[1];
2637
2638 return &cpuctx->recursion[0];
2639 }
2640
2641 static void __perf_swcounter_event(enum perf_event_types type, u32 event,
2642 u64 nr, int nmi, struct pt_regs *regs,
2643 u64 addr)
2644 {
2645 struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
2646 int *recursion = perf_swcounter_recursion_context(cpuctx);
2647
2648 if (*recursion)
2649 goto out;
2650
2651 (*recursion)++;
2652 barrier();
2653
2654 perf_swcounter_ctx_event(&cpuctx->ctx, type, event,
2655 nr, nmi, regs, addr);
2656 if (cpuctx->task_ctx) {
2657 perf_swcounter_ctx_event(cpuctx->task_ctx, type, event,
2658 nr, nmi, regs, addr);
2659 }
2660
2661 barrier();
2662 (*recursion)--;
2663
2664 out:
2665 put_cpu_var(perf_cpu_context);
2666 }
2667
2668 void
2669 perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
2670 {
2671 __perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, regs, addr);
2672 }
2673
2674 static void perf_swcounter_read(struct perf_counter *counter)
2675 {
2676 perf_swcounter_update(counter);
2677 }
2678
2679 static int perf_swcounter_enable(struct perf_counter *counter)
2680 {
2681 perf_swcounter_set_period(counter);
2682 return 0;
2683 }
2684
2685 static void perf_swcounter_disable(struct perf_counter *counter)
2686 {
2687 perf_swcounter_update(counter);
2688 }
2689
2690 static const struct pmu perf_ops_generic = {
2691 .enable = perf_swcounter_enable,
2692 .disable = perf_swcounter_disable,
2693 .read = perf_swcounter_read,
2694 };
2695
2696 /*
2697 * Software counter: cpu wall time clock
2698 */
2699
2700 static void cpu_clock_perf_counter_update(struct perf_counter *counter)
2701 {
2702 int cpu = raw_smp_processor_id();
2703 s64 prev;
2704 u64 now;
2705
2706 now = cpu_clock(cpu);
2707 prev = atomic64_read(&counter->hw.prev_count);
2708 atomic64_set(&counter->hw.prev_count, now);
2709 atomic64_add(now - prev, &counter->count);
2710 }
2711
2712 static int cpu_clock_perf_counter_enable(struct perf_counter *counter)
2713 {
2714 struct hw_perf_counter *hwc = &counter->hw;
2715 int cpu = raw_smp_processor_id();
2716
2717 atomic64_set(&hwc->prev_count, cpu_clock(cpu));
2718 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2719 hwc->hrtimer.function = perf_swcounter_hrtimer;
2720 if (hwc->irq_period) {
2721 u64 period = max_t(u64, 10000, hwc->irq_period);
2722 __hrtimer_start_range_ns(&hwc->hrtimer,
2723 ns_to_ktime(period), 0,
2724 HRTIMER_MODE_REL, 0);
2725 }
2726
2727 return 0;
2728 }
2729
2730 static void cpu_clock_perf_counter_disable(struct perf_counter *counter)
2731 {
2732 if (counter->hw.irq_period)
2733 hrtimer_cancel(&counter->hw.hrtimer);
2734 cpu_clock_perf_counter_update(counter);
2735 }
2736
2737 static void cpu_clock_perf_counter_read(struct perf_counter *counter)
2738 {
2739 cpu_clock_perf_counter_update(counter);
2740 }
2741
2742 static const struct pmu perf_ops_cpu_clock = {
2743 .enable = cpu_clock_perf_counter_enable,
2744 .disable = cpu_clock_perf_counter_disable,
2745 .read = cpu_clock_perf_counter_read,
2746 };
2747
2748 /*
2749 * Software counter: task time clock
2750 */
2751
2752 static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now)
2753 {
2754 u64 prev;
2755 s64 delta;
2756
2757 prev = atomic64_xchg(&counter->hw.prev_count, now);
2758 delta = now - prev;
2759 atomic64_add(delta, &counter->count);
2760 }
2761
2762 static int task_clock_perf_counter_enable(struct perf_counter *counter)
2763 {
2764 struct hw_perf_counter *hwc = &counter->hw;
2765 u64 now;
2766
2767 now = counter->ctx->time;
2768
2769 atomic64_set(&hwc->prev_count, now);
2770 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2771 hwc->hrtimer.function = perf_swcounter_hrtimer;
2772 if (hwc->irq_period) {
2773 u64 period = max_t(u64, 10000, hwc->irq_period);
2774 __hrtimer_start_range_ns(&hwc->hrtimer,
2775 ns_to_ktime(period), 0,
2776 HRTIMER_MODE_REL, 0);
2777 }
2778
2779 return 0;
2780 }
2781
2782 static void task_clock_perf_counter_disable(struct perf_counter *counter)
2783 {
2784 if (counter->hw.irq_period)
2785 hrtimer_cancel(&counter->hw.hrtimer);
2786 task_clock_perf_counter_update(counter, counter->ctx->time);
2787
2788 }
2789
2790 static void task_clock_perf_counter_read(struct perf_counter *counter)
2791 {
2792 u64 time;
2793
2794 if (!in_nmi()) {
2795 update_context_time(counter->ctx);
2796 time = counter->ctx->time;
2797 } else {
2798 u64 now = perf_clock();
2799 u64 delta = now - counter->ctx->timestamp;
2800 time = counter->ctx->time + delta;
2801 }
2802
2803 task_clock_perf_counter_update(counter, time);
2804 }
2805
2806 static const struct pmu perf_ops_task_clock = {
2807 .enable = task_clock_perf_counter_enable,
2808 .disable = task_clock_perf_counter_disable,
2809 .read = task_clock_perf_counter_read,
2810 };
2811
2812 /*
2813 * Software counter: cpu migrations
2814 */
2815
2816 static inline u64 get_cpu_migrations(struct perf_counter *counter)
2817 {
2818 struct task_struct *curr = counter->ctx->task;
2819
2820 if (curr)
2821 return curr->se.nr_migrations;
2822 return cpu_nr_migrations(smp_processor_id());
2823 }
2824
2825 static void cpu_migrations_perf_counter_update(struct perf_counter *counter)
2826 {
2827 u64 prev, now;
2828 s64 delta;
2829
2830 prev = atomic64_read(&counter->hw.prev_count);
2831 now = get_cpu_migrations(counter);
2832
2833 atomic64_set(&counter->hw.prev_count, now);
2834
2835 delta = now - prev;
2836
2837 atomic64_add(delta, &counter->count);
2838 }
2839
2840 static void cpu_migrations_perf_counter_read(struct perf_counter *counter)
2841 {
2842 cpu_migrations_perf_counter_update(counter);
2843 }
2844
2845 static int cpu_migrations_perf_counter_enable(struct perf_counter *counter)
2846 {
2847 if (counter->prev_state <= PERF_COUNTER_STATE_OFF)
2848 atomic64_set(&counter->hw.prev_count,
2849 get_cpu_migrations(counter));
2850 return 0;
2851 }
2852
2853 static void cpu_migrations_perf_counter_disable(struct perf_counter *counter)
2854 {
2855 cpu_migrations_perf_counter_update(counter);
2856 }
2857
2858 static const struct pmu perf_ops_cpu_migrations = {
2859 .enable = cpu_migrations_perf_counter_enable,
2860 .disable = cpu_migrations_perf_counter_disable,
2861 .read = cpu_migrations_perf_counter_read,
2862 };
2863
2864 #ifdef CONFIG_EVENT_PROFILE
2865 void perf_tpcounter_event(int event_id)
2866 {
2867 struct pt_regs *regs = get_irq_regs();
2868
2869 if (!regs)
2870 regs = task_pt_regs(current);
2871
2872 __perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, regs, 0);
2873 }
2874 EXPORT_SYMBOL_GPL(perf_tpcounter_event);
2875
2876 extern int ftrace_profile_enable(int);
2877 extern void ftrace_profile_disable(int);
2878
2879 static void tp_perf_counter_destroy(struct perf_counter *counter)
2880 {
2881 ftrace_profile_disable(perf_event_id(&counter->hw_event));
2882 }
2883
2884 static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
2885 {
2886 int event_id = perf_event_id(&counter->hw_event);
2887 int ret;
2888
2889 ret = ftrace_profile_enable(event_id);
2890 if (ret)
2891 return NULL;
2892
2893 counter->destroy = tp_perf_counter_destroy;
2894 counter->hw.irq_period = counter->hw_event.irq_period;
2895
2896 return &perf_ops_generic;
2897 }
2898 #else
2899 static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
2900 {
2901 return NULL;
2902 }
2903 #endif
2904
2905 static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
2906 {
2907 const struct pmu *pmu = NULL;
2908
2909 /*
2910 * Software counters (currently) can't in general distinguish
2911 * between user, kernel and hypervisor events.
2912 * However, context switches and cpu migrations are considered
2913 * to be kernel events, and page faults are never hypervisor
2914 * events.
2915 */
2916 switch (perf_event_id(&counter->hw_event)) {
2917 case PERF_COUNT_CPU_CLOCK:
2918 pmu = &perf_ops_cpu_clock;
2919
2920 break;
2921 case PERF_COUNT_TASK_CLOCK:
2922 /*
2923 * If the user instantiates this as a per-cpu counter,
2924 * use the cpu_clock counter instead.
2925 */
2926 if (counter->ctx->task)
2927 pmu = &perf_ops_task_clock;
2928 else
2929 pmu = &perf_ops_cpu_clock;
2930
2931 break;
2932 case PERF_COUNT_PAGE_FAULTS:
2933 case PERF_COUNT_PAGE_FAULTS_MIN:
2934 case PERF_COUNT_PAGE_FAULTS_MAJ:
2935 case PERF_COUNT_CONTEXT_SWITCHES:
2936 pmu = &perf_ops_generic;
2937 break;
2938 case PERF_COUNT_CPU_MIGRATIONS:
2939 if (!counter->hw_event.exclude_kernel)
2940 pmu = &perf_ops_cpu_migrations;
2941 break;
2942 }
2943
2944 return pmu;
2945 }
2946
2947 /*
2948 * Allocate and initialize a counter structure
2949 */
2950 static struct perf_counter *
2951 perf_counter_alloc(struct perf_counter_hw_event *hw_event,
2952 int cpu,
2953 struct perf_counter_context *ctx,
2954 struct perf_counter *group_leader,
2955 gfp_t gfpflags)
2956 {
2957 const struct pmu *pmu;
2958 struct perf_counter *counter;
2959 struct hw_perf_counter *hwc;
2960 long err;
2961
2962 counter = kzalloc(sizeof(*counter), gfpflags);
2963 if (!counter)
2964 return ERR_PTR(-ENOMEM);
2965
2966 /*
2967 * Single counters are their own group leaders, with an
2968 * empty sibling list:
2969 */
2970 if (!group_leader)
2971 group_leader = counter;
2972
2973 mutex_init(&counter->mutex);
2974 INIT_LIST_HEAD(&counter->list_entry);
2975 INIT_LIST_HEAD(&counter->event_entry);
2976 INIT_LIST_HEAD(&counter->sibling_list);
2977 init_waitqueue_head(&counter->waitq);
2978
2979 mutex_init(&counter->mmap_mutex);
2980
2981 INIT_LIST_HEAD(&counter->child_list);
2982
2983 counter->cpu = cpu;
2984 counter->hw_event = *hw_event;
2985 counter->group_leader = group_leader;
2986 counter->pmu = NULL;
2987 counter->ctx = ctx;
2988
2989 counter->state = PERF_COUNTER_STATE_INACTIVE;
2990 if (hw_event->disabled)
2991 counter->state = PERF_COUNTER_STATE_OFF;
2992
2993 pmu = NULL;
2994
2995 hwc = &counter->hw;
2996 if (hw_event->freq && hw_event->irq_freq)
2997 hwc->irq_period = div64_u64(TICK_NSEC, hw_event->irq_freq);
2998 else
2999 hwc->irq_period = hw_event->irq_period;
3000
3001 /*
3002 * we currently do not support PERF_RECORD_GROUP on inherited counters
3003 */
3004 if (hw_event->inherit && (hw_event->record_type & PERF_RECORD_GROUP))
3005 goto done;
3006
3007 if (perf_event_raw(hw_event)) {
3008 pmu = hw_perf_counter_init(counter);
3009 goto done;
3010 }
3011
3012 switch (perf_event_type(hw_event)) {
3013 case PERF_TYPE_HARDWARE:
3014 pmu = hw_perf_counter_init(counter);
3015 break;
3016
3017 case PERF_TYPE_SOFTWARE:
3018 pmu = sw_perf_counter_init(counter);
3019 break;
3020
3021 case PERF_TYPE_TRACEPOINT:
3022 pmu = tp_perf_counter_init(counter);
3023 break;
3024 }
3025 done:
3026 err = 0;
3027 if (!pmu)
3028 err = -EINVAL;
3029 else if (IS_ERR(pmu))
3030 err = PTR_ERR(pmu);
3031
3032 if (err) {
3033 kfree(counter);
3034 return ERR_PTR(err);
3035 }
3036
3037 counter->pmu = pmu;
3038
3039 atomic_inc(&nr_counters);
3040 if (counter->hw_event.mmap)
3041 atomic_inc(&nr_mmap_tracking);
3042 if (counter->hw_event.munmap)
3043 atomic_inc(&nr_munmap_tracking);
3044 if (counter->hw_event.comm)
3045 atomic_inc(&nr_comm_tracking);
3046
3047 return counter;
3048 }
3049
3050 /**
3051 * sys_perf_counter_open - open a performance counter, associate it to a task/cpu
3052 *
3053 * @hw_event_uptr: event type attributes for monitoring/sampling
3054 * @pid: target pid
3055 * @cpu: target cpu
3056 * @group_fd: group leader counter fd
3057 */
3058 SYSCALL_DEFINE5(perf_counter_open,
3059 const struct perf_counter_hw_event __user *, hw_event_uptr,
3060 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
3061 {
3062 struct perf_counter *counter, *group_leader;
3063 struct perf_counter_hw_event hw_event;
3064 struct perf_counter_context *ctx;
3065 struct file *counter_file = NULL;
3066 struct file *group_file = NULL;
3067 int fput_needed = 0;
3068 int fput_needed2 = 0;
3069 int ret;
3070
3071 /* for future expandability... */
3072 if (flags)
3073 return -EINVAL;
3074
3075 if (copy_from_user(&hw_event, hw_event_uptr, sizeof(hw_event)) != 0)
3076 return -EFAULT;
3077
3078 /*
3079 * Get the target context (task or percpu):
3080 */
3081 ctx = find_get_context(pid, cpu);
3082 if (IS_ERR(ctx))
3083 return PTR_ERR(ctx);
3084
3085 /*
3086 * Look up the group leader (we will attach this counter to it):
3087 */
3088 group_leader = NULL;
3089 if (group_fd != -1) {
3090 ret = -EINVAL;
3091 group_file = fget_light(group_fd, &fput_needed);
3092 if (!group_file)
3093 goto err_put_context;
3094 if (group_file->f_op != &perf_fops)
3095 goto err_put_context;
3096
3097 group_leader = group_file->private_data;
3098 /*
3099 * Do not allow a recursive hierarchy (this new sibling
3100 * becoming part of another group-sibling):
3101 */
3102 if (group_leader->group_leader != group_leader)
3103 goto err_put_context;
3104 /*
3105 * Do not allow to attach to a group in a different
3106 * task or CPU context:
3107 */
3108 if (group_leader->ctx != ctx)
3109 goto err_put_context;
3110 /*
3111 * Only a group leader can be exclusive or pinned
3112 */
3113 if (hw_event.exclusive || hw_event.pinned)
3114 goto err_put_context;
3115 }
3116
3117 counter = perf_counter_alloc(&hw_event, cpu, ctx, group_leader,
3118 GFP_KERNEL);
3119 ret = PTR_ERR(counter);
3120 if (IS_ERR(counter))
3121 goto err_put_context;
3122
3123 ret = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0);
3124 if (ret < 0)
3125 goto err_free_put_context;
3126
3127 counter_file = fget_light(ret, &fput_needed2);
3128 if (!counter_file)
3129 goto err_free_put_context;
3130
3131 counter->filp = counter_file;
3132 mutex_lock(&ctx->mutex);
3133 perf_install_in_context(ctx, counter, cpu);
3134 mutex_unlock(&ctx->mutex);
3135
3136 fput_light(counter_file, fput_needed2);
3137
3138 out_fput:
3139 fput_light(group_file, fput_needed);
3140
3141 return ret;
3142
3143 err_free_put_context:
3144 kfree(counter);
3145
3146 err_put_context:
3147 put_context(ctx);
3148
3149 goto out_fput;
3150 }
3151
3152 /*
3153 * Initialize the perf_counter context in a task_struct:
3154 */
3155 static void
3156 __perf_counter_init_context(struct perf_counter_context *ctx,
3157 struct task_struct *task)
3158 {
3159 memset(ctx, 0, sizeof(*ctx));
3160 spin_lock_init(&ctx->lock);
3161 mutex_init(&ctx->mutex);
3162 INIT_LIST_HEAD(&ctx->counter_list);
3163 INIT_LIST_HEAD(&ctx->event_list);
3164 ctx->task = task;
3165 }
3166
3167 /*
3168 * inherit a counter from parent task to child task:
3169 */
3170 static struct perf_counter *
3171 inherit_counter(struct perf_counter *parent_counter,
3172 struct task_struct *parent,
3173 struct perf_counter_context *parent_ctx,
3174 struct task_struct *child,
3175 struct perf_counter *group_leader,
3176 struct perf_counter_context *child_ctx)
3177 {
3178 struct perf_counter *child_counter;
3179
3180 /*
3181 * Instead of creating recursive hierarchies of counters,
3182 * we link inherited counters back to the original parent,
3183 * which has a filp for sure, which we use as the reference
3184 * count:
3185 */
3186 if (parent_counter->parent)
3187 parent_counter = parent_counter->parent;
3188
3189 child_counter = perf_counter_alloc(&parent_counter->hw_event,
3190 parent_counter->cpu, child_ctx,
3191 group_leader, GFP_KERNEL);
3192 if (IS_ERR(child_counter))
3193 return child_counter;
3194
3195 /*
3196 * Link it up in the child's context:
3197 */
3198 child_counter->task = child;
3199 add_counter_to_ctx(child_counter, child_ctx);
3200
3201 child_counter->parent = parent_counter;
3202 /*
3203 * inherit into child's child as well:
3204 */
3205 child_counter->hw_event.inherit = 1;
3206
3207 /*
3208 * Get a reference to the parent filp - we will fput it
3209 * when the child counter exits. This is safe to do because
3210 * we are in the parent and we know that the filp still
3211 * exists and has a nonzero count:
3212 */
3213 atomic_long_inc(&parent_counter->filp->f_count);
3214
3215 /*
3216 * Link this into the parent counter's child list
3217 */
3218 mutex_lock(&parent_counter->mutex);
3219 list_add_tail(&child_counter->child_list, &parent_counter->child_list);
3220
3221 /*
3222 * Make the child state follow the state of the parent counter,
3223 * not its hw_event.disabled bit. We hold the parent's mutex,
3224 * so we won't race with perf_counter_{en,dis}able_family.
3225 */
3226 if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE)
3227 child_counter->state = PERF_COUNTER_STATE_INACTIVE;
3228 else
3229 child_counter->state = PERF_COUNTER_STATE_OFF;
3230
3231 mutex_unlock(&parent_counter->mutex);
3232
3233 return child_counter;
3234 }
3235
3236 static int inherit_group(struct perf_counter *parent_counter,
3237 struct task_struct *parent,
3238 struct perf_counter_context *parent_ctx,
3239 struct task_struct *child,
3240 struct perf_counter_context *child_ctx)
3241 {
3242 struct perf_counter *leader;
3243 struct perf_counter *sub;
3244 struct perf_counter *child_ctr;
3245
3246 leader = inherit_counter(parent_counter, parent, parent_ctx,
3247 child, NULL, child_ctx);
3248 if (IS_ERR(leader))
3249 return PTR_ERR(leader);
3250 list_for_each_entry(sub, &parent_counter->sibling_list, list_entry) {
3251 child_ctr = inherit_counter(sub, parent, parent_ctx,
3252 child, leader, child_ctx);
3253 if (IS_ERR(child_ctr))
3254 return PTR_ERR(child_ctr);
3255 }
3256 return 0;
3257 }
3258
3259 static void sync_child_counter(struct perf_counter *child_counter,
3260 struct perf_counter *parent_counter)
3261 {
3262 u64 child_val;
3263
3264 child_val = atomic64_read(&child_counter->count);
3265
3266 /*
3267 * Add back the child's count to the parent's count:
3268 */
3269 atomic64_add(child_val, &parent_counter->count);
3270 atomic64_add(child_counter->total_time_enabled,
3271 &parent_counter->child_total_time_enabled);
3272 atomic64_add(child_counter->total_time_running,
3273 &parent_counter->child_total_time_running);
3274
3275 /*
3276 * Remove this counter from the parent's list
3277 */
3278 mutex_lock(&parent_counter->mutex);
3279 list_del_init(&child_counter->child_list);
3280 mutex_unlock(&parent_counter->mutex);
3281
3282 /*
3283 * Release the parent counter, if this was the last
3284 * reference to it.
3285 */
3286 fput(parent_counter->filp);
3287 }
3288
3289 static void
3290 __perf_counter_exit_task(struct task_struct *child,
3291 struct perf_counter *child_counter,
3292 struct perf_counter_context *child_ctx)
3293 {
3294 struct perf_counter *parent_counter;
3295
3296 /*
3297 * If we do not self-reap then we have to wait for the
3298 * child task to unschedule (it will happen for sure),
3299 * so that its counter is at its final count. (This
3300 * condition triggers rarely - child tasks usually get
3301 * off their CPU before the parent has a chance to
3302 * get this far into the reaping action)
3303 */
3304 if (child != current) {
3305 wait_task_inactive(child, 0);
3306 update_counter_times(child_counter);
3307 list_del_counter(child_counter, child_ctx);
3308 } else {
3309 struct perf_cpu_context *cpuctx;
3310 unsigned long flags;
3311
3312 /*
3313 * Disable and unlink this counter.
3314 *
3315 * Be careful about zapping the list - IRQ/NMI context
3316 * could still be processing it:
3317 */
3318 local_irq_save(flags);
3319 perf_disable();
3320
3321 cpuctx = &__get_cpu_var(perf_cpu_context);
3322
3323 group_sched_out(child_counter, cpuctx, child_ctx);
3324 update_counter_times(child_counter);
3325
3326 list_del_counter(child_counter, child_ctx);
3327
3328 perf_enable();
3329 local_irq_restore(flags);
3330 }
3331
3332 parent_counter = child_counter->parent;
3333 /*
3334 * It can happen that parent exits first, and has counters
3335 * that are still around due to the child reference. These
3336 * counters need to be zapped - but otherwise linger.
3337 */
3338 if (parent_counter) {
3339 sync_child_counter(child_counter, parent_counter);
3340 free_counter(child_counter);
3341 }
3342 }
3343
3344 /*
3345 * When a child task exits, feed back counter values to parent counters.
3346 *
3347 * Note: we may be running in child context, but the PID is not hashed
3348 * anymore so new counters will not be added.
3349 */
3350 void perf_counter_exit_task(struct task_struct *child)
3351 {
3352 struct perf_counter *child_counter, *tmp;
3353 struct perf_counter_context *child_ctx;
3354
3355 WARN_ON_ONCE(child != current);
3356
3357 child_ctx = &child->perf_counter_ctx;
3358
3359 if (likely(!child_ctx->nr_counters))
3360 return;
3361
3362 again:
3363 list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list,
3364 list_entry)
3365 __perf_counter_exit_task(child, child_counter, child_ctx);
3366
3367 /*
3368 * If the last counter was a group counter, it will have appended all
3369 * its siblings to the list, but we obtained 'tmp' before that which
3370 * will still point to the list head terminating the iteration.
3371 */
3372 if (!list_empty(&child_ctx->counter_list))
3373 goto again;
3374 }
3375
3376 /*
3377 * Initialize the perf_counter context in task_struct
3378 */
3379 void perf_counter_init_task(struct task_struct *child)
3380 {
3381 struct perf_counter_context *child_ctx, *parent_ctx;
3382 struct perf_counter *counter;
3383 struct task_struct *parent = current;
3384
3385 child_ctx = &child->perf_counter_ctx;
3386 parent_ctx = &parent->perf_counter_ctx;
3387
3388 __perf_counter_init_context(child_ctx, child);
3389
3390 /*
3391 * This is executed from the parent task context, so inherit
3392 * counters that have been marked for cloning:
3393 */
3394
3395 if (likely(!parent_ctx->nr_counters))
3396 return;
3397
3398 /*
3399 * Lock the parent list. No need to lock the child - not PID
3400 * hashed yet and not running, so nobody can access it.
3401 */
3402 mutex_lock(&parent_ctx->mutex);
3403
3404 /*
3405 * We dont have to disable NMIs - we are only looking at
3406 * the list, not manipulating it:
3407 */
3408 list_for_each_entry_rcu(counter, &parent_ctx->event_list, event_entry) {
3409 if (counter != counter->group_leader)
3410 continue;
3411
3412 if (!counter->hw_event.inherit)
3413 continue;
3414
3415 if (inherit_group(counter, parent,
3416 parent_ctx, child, child_ctx))
3417 break;
3418 }
3419
3420 mutex_unlock(&parent_ctx->mutex);
3421 }
3422
3423 static void __cpuinit perf_counter_init_cpu(int cpu)
3424 {
3425 struct perf_cpu_context *cpuctx;
3426
3427 cpuctx = &per_cpu(perf_cpu_context, cpu);
3428 __perf_counter_init_context(&cpuctx->ctx, NULL);
3429
3430 spin_lock(&perf_resource_lock);
3431 cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu;
3432 spin_unlock(&perf_resource_lock);
3433
3434 hw_perf_counter_setup(cpu);
3435 }
3436
3437 #ifdef CONFIG_HOTPLUG_CPU
3438 static void __perf_counter_exit_cpu(void *info)
3439 {
3440 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
3441 struct perf_counter_context *ctx = &cpuctx->ctx;
3442 struct perf_counter *counter, *tmp;
3443
3444 list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry)
3445 __perf_counter_remove_from_context(counter);
3446 }
3447 static void perf_counter_exit_cpu(int cpu)
3448 {
3449 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
3450 struct perf_counter_context *ctx = &cpuctx->ctx;
3451
3452 mutex_lock(&ctx->mutex);
3453 smp_call_function_single(cpu, __perf_counter_exit_cpu, NULL, 1);
3454 mutex_unlock(&ctx->mutex);
3455 }
3456 #else
3457 static inline void perf_counter_exit_cpu(int cpu) { }
3458 #endif
3459
3460 static int __cpuinit
3461 perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
3462 {
3463 unsigned int cpu = (long)hcpu;
3464
3465 switch (action) {
3466
3467 case CPU_UP_PREPARE:
3468 case CPU_UP_PREPARE_FROZEN:
3469 perf_counter_init_cpu(cpu);
3470 break;
3471
3472 case CPU_DOWN_PREPARE:
3473 case CPU_DOWN_PREPARE_FROZEN:
3474 perf_counter_exit_cpu(cpu);
3475 break;
3476
3477 default:
3478 break;
3479 }
3480
3481 return NOTIFY_OK;
3482 }
3483
3484 static struct notifier_block __cpuinitdata perf_cpu_nb = {
3485 .notifier_call = perf_cpu_notify,
3486 };
3487
3488 void __init perf_counter_init(void)
3489 {
3490 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
3491 (void *)(long)smp_processor_id());
3492 register_cpu_notifier(&perf_cpu_nb);
3493 }
3494
3495 static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf)
3496 {
3497 return sprintf(buf, "%d\n", perf_reserved_percpu);
3498 }
3499
3500 static ssize_t
3501 perf_set_reserve_percpu(struct sysdev_class *class,
3502 const char *buf,
3503 size_t count)
3504 {
3505 struct perf_cpu_context *cpuctx;
3506 unsigned long val;
3507 int err, cpu, mpt;
3508
3509 err = strict_strtoul(buf, 10, &val);
3510 if (err)
3511 return err;
3512 if (val > perf_max_counters)
3513 return -EINVAL;
3514
3515 spin_lock(&perf_resource_lock);
3516 perf_reserved_percpu = val;
3517 for_each_online_cpu(cpu) {
3518 cpuctx = &per_cpu(perf_cpu_context, cpu);
3519 spin_lock_irq(&cpuctx->ctx.lock);
3520 mpt = min(perf_max_counters - cpuctx->ctx.nr_counters,
3521 perf_max_counters - perf_reserved_percpu);
3522 cpuctx->max_pertask = mpt;
3523 spin_unlock_irq(&cpuctx->ctx.lock);
3524 }
3525 spin_unlock(&perf_resource_lock);
3526
3527 return count;
3528 }
3529
3530 static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf)
3531 {
3532 return sprintf(buf, "%d\n", perf_overcommit);
3533 }
3534
3535 static ssize_t
3536 perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count)
3537 {
3538 unsigned long val;
3539 int err;
3540
3541 err = strict_strtoul(buf, 10, &val);
3542 if (err)
3543 return err;
3544 if (val > 1)
3545 return -EINVAL;
3546
3547 spin_lock(&perf_resource_lock);
3548 perf_overcommit = val;
3549 spin_unlock(&perf_resource_lock);
3550
3551 return count;
3552 }
3553
3554 static SYSDEV_CLASS_ATTR(
3555 reserve_percpu,
3556 0644,
3557 perf_show_reserve_percpu,
3558 perf_set_reserve_percpu
3559 );
3560
3561 static SYSDEV_CLASS_ATTR(
3562 overcommit,
3563 0644,
3564 perf_show_overcommit,
3565 perf_set_overcommit
3566 );
3567
3568 static struct attribute *perfclass_attrs[] = {
3569 &attr_reserve_percpu.attr,
3570 &attr_overcommit.attr,
3571 NULL
3572 };
3573
3574 static struct attribute_group perfclass_attr_group = {
3575 .attrs = perfclass_attrs,
3576 .name = "perf_counters",
3577 };
3578
3579 static int __init perf_counter_sysfs_init(void)
3580 {
3581 return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
3582 &perfclass_attr_group);
3583 }
3584 device_initcall(perf_counter_sysfs_init);
This page took 0.188531 seconds and 6 git commands to generate.