Merge commit 'tip/perfcounters/core' into perf-counters-for-linus
[deliverable/linux.git] / kernel / perf_counter.c
1 /*
2 * Performance counter core code
3 *
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8 *
9 * For licensing details see kernel-base/COPYING
10 */
11
12 #include <linux/fs.h>
13 #include <linux/mm.h>
14 #include <linux/cpu.h>
15 #include <linux/smp.h>
16 #include <linux/file.h>
17 #include <linux/poll.h>
18 #include <linux/sysfs.h>
19 #include <linux/dcache.h>
20 #include <linux/percpu.h>
21 #include <linux/ptrace.h>
22 #include <linux/vmstat.h>
23 #include <linux/hardirq.h>
24 #include <linux/rculist.h>
25 #include <linux/uaccess.h>
26 #include <linux/syscalls.h>
27 #include <linux/anon_inodes.h>
28 #include <linux/kernel_stat.h>
29 #include <linux/perf_counter.h>
30
31 #include <asm/irq_regs.h>
32
33 /*
34 * Each CPU has a list of per CPU counters:
35 */
36 DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
37
38 int perf_max_counters __read_mostly = 1;
39 static int perf_reserved_percpu __read_mostly;
40 static int perf_overcommit __read_mostly = 1;
41
42 static atomic_t nr_counters __read_mostly;
43 static atomic_t nr_mmap_counters __read_mostly;
44 static atomic_t nr_comm_counters __read_mostly;
45
46 /*
47 * perf counter paranoia level:
48 * 0 - not paranoid
49 * 1 - disallow cpu counters to unpriv
50 * 2 - disallow kernel profiling to unpriv
51 */
52 int sysctl_perf_counter_paranoid __read_mostly;
53
54 static inline bool perf_paranoid_cpu(void)
55 {
56 return sysctl_perf_counter_paranoid > 0;
57 }
58
59 static inline bool perf_paranoid_kernel(void)
60 {
61 return sysctl_perf_counter_paranoid > 1;
62 }
63
64 int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */
65
66 /*
67 * max perf counter sample rate
68 */
69 int sysctl_perf_counter_sample_rate __read_mostly = 100000;
70
71 static atomic64_t perf_counter_id;
72
73 /*
74 * Lock for (sysadmin-configurable) counter reservations:
75 */
76 static DEFINE_SPINLOCK(perf_resource_lock);
77
78 /*
79 * Architecture provided APIs - weak aliases:
80 */
81 extern __weak const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
82 {
83 return NULL;
84 }
85
86 void __weak hw_perf_disable(void) { barrier(); }
87 void __weak hw_perf_enable(void) { barrier(); }
88
89 void __weak hw_perf_counter_setup(int cpu) { barrier(); }
90
91 int __weak
92 hw_perf_group_sched_in(struct perf_counter *group_leader,
93 struct perf_cpu_context *cpuctx,
94 struct perf_counter_context *ctx, int cpu)
95 {
96 return 0;
97 }
98
99 void __weak perf_counter_print_debug(void) { }
100
101 static DEFINE_PER_CPU(int, disable_count);
102
103 void __perf_disable(void)
104 {
105 __get_cpu_var(disable_count)++;
106 }
107
108 bool __perf_enable(void)
109 {
110 return !--__get_cpu_var(disable_count);
111 }
112
113 void perf_disable(void)
114 {
115 __perf_disable();
116 hw_perf_disable();
117 }
118
119 void perf_enable(void)
120 {
121 if (__perf_enable())
122 hw_perf_enable();
123 }
124
125 static void get_ctx(struct perf_counter_context *ctx)
126 {
127 WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
128 }
129
130 static void free_ctx(struct rcu_head *head)
131 {
132 struct perf_counter_context *ctx;
133
134 ctx = container_of(head, struct perf_counter_context, rcu_head);
135 kfree(ctx);
136 }
137
138 static void put_ctx(struct perf_counter_context *ctx)
139 {
140 if (atomic_dec_and_test(&ctx->refcount)) {
141 if (ctx->parent_ctx)
142 put_ctx(ctx->parent_ctx);
143 if (ctx->task)
144 put_task_struct(ctx->task);
145 call_rcu(&ctx->rcu_head, free_ctx);
146 }
147 }
148
149 static void unclone_ctx(struct perf_counter_context *ctx)
150 {
151 if (ctx->parent_ctx) {
152 put_ctx(ctx->parent_ctx);
153 ctx->parent_ctx = NULL;
154 }
155 }
156
157 /*
158 * Get the perf_counter_context for a task and lock it.
159 * This has to cope with with the fact that until it is locked,
160 * the context could get moved to another task.
161 */
162 static struct perf_counter_context *
163 perf_lock_task_context(struct task_struct *task, unsigned long *flags)
164 {
165 struct perf_counter_context *ctx;
166
167 rcu_read_lock();
168 retry:
169 ctx = rcu_dereference(task->perf_counter_ctxp);
170 if (ctx) {
171 /*
172 * If this context is a clone of another, it might
173 * get swapped for another underneath us by
174 * perf_counter_task_sched_out, though the
175 * rcu_read_lock() protects us from any context
176 * getting freed. Lock the context and check if it
177 * got swapped before we could get the lock, and retry
178 * if so. If we locked the right context, then it
179 * can't get swapped on us any more.
180 */
181 spin_lock_irqsave(&ctx->lock, *flags);
182 if (ctx != rcu_dereference(task->perf_counter_ctxp)) {
183 spin_unlock_irqrestore(&ctx->lock, *flags);
184 goto retry;
185 }
186
187 if (!atomic_inc_not_zero(&ctx->refcount)) {
188 spin_unlock_irqrestore(&ctx->lock, *flags);
189 ctx = NULL;
190 }
191 }
192 rcu_read_unlock();
193 return ctx;
194 }
195
196 /*
197 * Get the context for a task and increment its pin_count so it
198 * can't get swapped to another task. This also increments its
199 * reference count so that the context can't get freed.
200 */
201 static struct perf_counter_context *perf_pin_task_context(struct task_struct *task)
202 {
203 struct perf_counter_context *ctx;
204 unsigned long flags;
205
206 ctx = perf_lock_task_context(task, &flags);
207 if (ctx) {
208 ++ctx->pin_count;
209 spin_unlock_irqrestore(&ctx->lock, flags);
210 }
211 return ctx;
212 }
213
214 static void perf_unpin_context(struct perf_counter_context *ctx)
215 {
216 unsigned long flags;
217
218 spin_lock_irqsave(&ctx->lock, flags);
219 --ctx->pin_count;
220 spin_unlock_irqrestore(&ctx->lock, flags);
221 put_ctx(ctx);
222 }
223
224 /*
225 * Add a counter from the lists for its context.
226 * Must be called with ctx->mutex and ctx->lock held.
227 */
228 static void
229 list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
230 {
231 struct perf_counter *group_leader = counter->group_leader;
232
233 /*
234 * Depending on whether it is a standalone or sibling counter,
235 * add it straight to the context's counter list, or to the group
236 * leader's sibling list:
237 */
238 if (group_leader == counter)
239 list_add_tail(&counter->list_entry, &ctx->counter_list);
240 else {
241 list_add_tail(&counter->list_entry, &group_leader->sibling_list);
242 group_leader->nr_siblings++;
243 }
244
245 list_add_rcu(&counter->event_entry, &ctx->event_list);
246 ctx->nr_counters++;
247 if (counter->attr.inherit_stat)
248 ctx->nr_stat++;
249 }
250
251 /*
252 * Remove a counter from the lists for its context.
253 * Must be called with ctx->mutex and ctx->lock held.
254 */
255 static void
256 list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
257 {
258 struct perf_counter *sibling, *tmp;
259
260 if (list_empty(&counter->list_entry))
261 return;
262 ctx->nr_counters--;
263 if (counter->attr.inherit_stat)
264 ctx->nr_stat--;
265
266 list_del_init(&counter->list_entry);
267 list_del_rcu(&counter->event_entry);
268
269 if (counter->group_leader != counter)
270 counter->group_leader->nr_siblings--;
271
272 /*
273 * If this was a group counter with sibling counters then
274 * upgrade the siblings to singleton counters by adding them
275 * to the context list directly:
276 */
277 list_for_each_entry_safe(sibling, tmp,
278 &counter->sibling_list, list_entry) {
279
280 list_move_tail(&sibling->list_entry, &ctx->counter_list);
281 sibling->group_leader = sibling;
282 }
283 }
284
285 static void
286 counter_sched_out(struct perf_counter *counter,
287 struct perf_cpu_context *cpuctx,
288 struct perf_counter_context *ctx)
289 {
290 if (counter->state != PERF_COUNTER_STATE_ACTIVE)
291 return;
292
293 counter->state = PERF_COUNTER_STATE_INACTIVE;
294 counter->tstamp_stopped = ctx->time;
295 counter->pmu->disable(counter);
296 counter->oncpu = -1;
297
298 if (!is_software_counter(counter))
299 cpuctx->active_oncpu--;
300 ctx->nr_active--;
301 if (counter->attr.exclusive || !cpuctx->active_oncpu)
302 cpuctx->exclusive = 0;
303 }
304
305 static void
306 group_sched_out(struct perf_counter *group_counter,
307 struct perf_cpu_context *cpuctx,
308 struct perf_counter_context *ctx)
309 {
310 struct perf_counter *counter;
311
312 if (group_counter->state != PERF_COUNTER_STATE_ACTIVE)
313 return;
314
315 counter_sched_out(group_counter, cpuctx, ctx);
316
317 /*
318 * Schedule out siblings (if any):
319 */
320 list_for_each_entry(counter, &group_counter->sibling_list, list_entry)
321 counter_sched_out(counter, cpuctx, ctx);
322
323 if (group_counter->attr.exclusive)
324 cpuctx->exclusive = 0;
325 }
326
327 /*
328 * Cross CPU call to remove a performance counter
329 *
330 * We disable the counter on the hardware level first. After that we
331 * remove it from the context list.
332 */
333 static void __perf_counter_remove_from_context(void *info)
334 {
335 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
336 struct perf_counter *counter = info;
337 struct perf_counter_context *ctx = counter->ctx;
338
339 /*
340 * If this is a task context, we need to check whether it is
341 * the current task context of this cpu. If not it has been
342 * scheduled out before the smp call arrived.
343 */
344 if (ctx->task && cpuctx->task_ctx != ctx)
345 return;
346
347 spin_lock(&ctx->lock);
348 /*
349 * Protect the list operation against NMI by disabling the
350 * counters on a global level.
351 */
352 perf_disable();
353
354 counter_sched_out(counter, cpuctx, ctx);
355
356 list_del_counter(counter, ctx);
357
358 if (!ctx->task) {
359 /*
360 * Allow more per task counters with respect to the
361 * reservation:
362 */
363 cpuctx->max_pertask =
364 min(perf_max_counters - ctx->nr_counters,
365 perf_max_counters - perf_reserved_percpu);
366 }
367
368 perf_enable();
369 spin_unlock(&ctx->lock);
370 }
371
372
373 /*
374 * Remove the counter from a task's (or a CPU's) list of counters.
375 *
376 * Must be called with ctx->mutex held.
377 *
378 * CPU counters are removed with a smp call. For task counters we only
379 * call when the task is on a CPU.
380 *
381 * If counter->ctx is a cloned context, callers must make sure that
382 * every task struct that counter->ctx->task could possibly point to
383 * remains valid. This is OK when called from perf_release since
384 * that only calls us on the top-level context, which can't be a clone.
385 * When called from perf_counter_exit_task, it's OK because the
386 * context has been detached from its task.
387 */
388 static void perf_counter_remove_from_context(struct perf_counter *counter)
389 {
390 struct perf_counter_context *ctx = counter->ctx;
391 struct task_struct *task = ctx->task;
392
393 if (!task) {
394 /*
395 * Per cpu counters are removed via an smp call and
396 * the removal is always sucessful.
397 */
398 smp_call_function_single(counter->cpu,
399 __perf_counter_remove_from_context,
400 counter, 1);
401 return;
402 }
403
404 retry:
405 task_oncpu_function_call(task, __perf_counter_remove_from_context,
406 counter);
407
408 spin_lock_irq(&ctx->lock);
409 /*
410 * If the context is active we need to retry the smp call.
411 */
412 if (ctx->nr_active && !list_empty(&counter->list_entry)) {
413 spin_unlock_irq(&ctx->lock);
414 goto retry;
415 }
416
417 /*
418 * The lock prevents that this context is scheduled in so we
419 * can remove the counter safely, if the call above did not
420 * succeed.
421 */
422 if (!list_empty(&counter->list_entry)) {
423 list_del_counter(counter, ctx);
424 }
425 spin_unlock_irq(&ctx->lock);
426 }
427
428 static inline u64 perf_clock(void)
429 {
430 return cpu_clock(smp_processor_id());
431 }
432
433 /*
434 * Update the record of the current time in a context.
435 */
436 static void update_context_time(struct perf_counter_context *ctx)
437 {
438 u64 now = perf_clock();
439
440 ctx->time += now - ctx->timestamp;
441 ctx->timestamp = now;
442 }
443
444 /*
445 * Update the total_time_enabled and total_time_running fields for a counter.
446 */
447 static void update_counter_times(struct perf_counter *counter)
448 {
449 struct perf_counter_context *ctx = counter->ctx;
450 u64 run_end;
451
452 if (counter->state < PERF_COUNTER_STATE_INACTIVE)
453 return;
454
455 counter->total_time_enabled = ctx->time - counter->tstamp_enabled;
456
457 if (counter->state == PERF_COUNTER_STATE_INACTIVE)
458 run_end = counter->tstamp_stopped;
459 else
460 run_end = ctx->time;
461
462 counter->total_time_running = run_end - counter->tstamp_running;
463 }
464
465 /*
466 * Update total_time_enabled and total_time_running for all counters in a group.
467 */
468 static void update_group_times(struct perf_counter *leader)
469 {
470 struct perf_counter *counter;
471
472 update_counter_times(leader);
473 list_for_each_entry(counter, &leader->sibling_list, list_entry)
474 update_counter_times(counter);
475 }
476
477 /*
478 * Cross CPU call to disable a performance counter
479 */
480 static void __perf_counter_disable(void *info)
481 {
482 struct perf_counter *counter = info;
483 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
484 struct perf_counter_context *ctx = counter->ctx;
485
486 /*
487 * If this is a per-task counter, need to check whether this
488 * counter's task is the current task on this cpu.
489 */
490 if (ctx->task && cpuctx->task_ctx != ctx)
491 return;
492
493 spin_lock(&ctx->lock);
494
495 /*
496 * If the counter is on, turn it off.
497 * If it is in error state, leave it in error state.
498 */
499 if (counter->state >= PERF_COUNTER_STATE_INACTIVE) {
500 update_context_time(ctx);
501 update_counter_times(counter);
502 if (counter == counter->group_leader)
503 group_sched_out(counter, cpuctx, ctx);
504 else
505 counter_sched_out(counter, cpuctx, ctx);
506 counter->state = PERF_COUNTER_STATE_OFF;
507 }
508
509 spin_unlock(&ctx->lock);
510 }
511
512 /*
513 * Disable a counter.
514 *
515 * If counter->ctx is a cloned context, callers must make sure that
516 * every task struct that counter->ctx->task could possibly point to
517 * remains valid. This condition is satisifed when called through
518 * perf_counter_for_each_child or perf_counter_for_each because they
519 * hold the top-level counter's child_mutex, so any descendant that
520 * goes to exit will block in sync_child_counter.
521 * When called from perf_pending_counter it's OK because counter->ctx
522 * is the current context on this CPU and preemption is disabled,
523 * hence we can't get into perf_counter_task_sched_out for this context.
524 */
525 static void perf_counter_disable(struct perf_counter *counter)
526 {
527 struct perf_counter_context *ctx = counter->ctx;
528 struct task_struct *task = ctx->task;
529
530 if (!task) {
531 /*
532 * Disable the counter on the cpu that it's on
533 */
534 smp_call_function_single(counter->cpu, __perf_counter_disable,
535 counter, 1);
536 return;
537 }
538
539 retry:
540 task_oncpu_function_call(task, __perf_counter_disable, counter);
541
542 spin_lock_irq(&ctx->lock);
543 /*
544 * If the counter is still active, we need to retry the cross-call.
545 */
546 if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
547 spin_unlock_irq(&ctx->lock);
548 goto retry;
549 }
550
551 /*
552 * Since we have the lock this context can't be scheduled
553 * in, so we can change the state safely.
554 */
555 if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
556 update_counter_times(counter);
557 counter->state = PERF_COUNTER_STATE_OFF;
558 }
559
560 spin_unlock_irq(&ctx->lock);
561 }
562
563 static int
564 counter_sched_in(struct perf_counter *counter,
565 struct perf_cpu_context *cpuctx,
566 struct perf_counter_context *ctx,
567 int cpu)
568 {
569 if (counter->state <= PERF_COUNTER_STATE_OFF)
570 return 0;
571
572 counter->state = PERF_COUNTER_STATE_ACTIVE;
573 counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */
574 /*
575 * The new state must be visible before we turn it on in the hardware:
576 */
577 smp_wmb();
578
579 if (counter->pmu->enable(counter)) {
580 counter->state = PERF_COUNTER_STATE_INACTIVE;
581 counter->oncpu = -1;
582 return -EAGAIN;
583 }
584
585 counter->tstamp_running += ctx->time - counter->tstamp_stopped;
586
587 if (!is_software_counter(counter))
588 cpuctx->active_oncpu++;
589 ctx->nr_active++;
590
591 if (counter->attr.exclusive)
592 cpuctx->exclusive = 1;
593
594 return 0;
595 }
596
597 static int
598 group_sched_in(struct perf_counter *group_counter,
599 struct perf_cpu_context *cpuctx,
600 struct perf_counter_context *ctx,
601 int cpu)
602 {
603 struct perf_counter *counter, *partial_group;
604 int ret;
605
606 if (group_counter->state == PERF_COUNTER_STATE_OFF)
607 return 0;
608
609 ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu);
610 if (ret)
611 return ret < 0 ? ret : 0;
612
613 if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
614 return -EAGAIN;
615
616 /*
617 * Schedule in siblings as one group (if any):
618 */
619 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
620 if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
621 partial_group = counter;
622 goto group_error;
623 }
624 }
625
626 return 0;
627
628 group_error:
629 /*
630 * Groups can be scheduled in as one unit only, so undo any
631 * partial group before returning:
632 */
633 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
634 if (counter == partial_group)
635 break;
636 counter_sched_out(counter, cpuctx, ctx);
637 }
638 counter_sched_out(group_counter, cpuctx, ctx);
639
640 return -EAGAIN;
641 }
642
643 /*
644 * Return 1 for a group consisting entirely of software counters,
645 * 0 if the group contains any hardware counters.
646 */
647 static int is_software_only_group(struct perf_counter *leader)
648 {
649 struct perf_counter *counter;
650
651 if (!is_software_counter(leader))
652 return 0;
653
654 list_for_each_entry(counter, &leader->sibling_list, list_entry)
655 if (!is_software_counter(counter))
656 return 0;
657
658 return 1;
659 }
660
661 /*
662 * Work out whether we can put this counter group on the CPU now.
663 */
664 static int group_can_go_on(struct perf_counter *counter,
665 struct perf_cpu_context *cpuctx,
666 int can_add_hw)
667 {
668 /*
669 * Groups consisting entirely of software counters can always go on.
670 */
671 if (is_software_only_group(counter))
672 return 1;
673 /*
674 * If an exclusive group is already on, no other hardware
675 * counters can go on.
676 */
677 if (cpuctx->exclusive)
678 return 0;
679 /*
680 * If this group is exclusive and there are already
681 * counters on the CPU, it can't go on.
682 */
683 if (counter->attr.exclusive && cpuctx->active_oncpu)
684 return 0;
685 /*
686 * Otherwise, try to add it if all previous groups were able
687 * to go on.
688 */
689 return can_add_hw;
690 }
691
692 static void add_counter_to_ctx(struct perf_counter *counter,
693 struct perf_counter_context *ctx)
694 {
695 list_add_counter(counter, ctx);
696 counter->tstamp_enabled = ctx->time;
697 counter->tstamp_running = ctx->time;
698 counter->tstamp_stopped = ctx->time;
699 }
700
701 /*
702 * Cross CPU call to install and enable a performance counter
703 *
704 * Must be called with ctx->mutex held
705 */
706 static void __perf_install_in_context(void *info)
707 {
708 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
709 struct perf_counter *counter = info;
710 struct perf_counter_context *ctx = counter->ctx;
711 struct perf_counter *leader = counter->group_leader;
712 int cpu = smp_processor_id();
713 int err;
714
715 /*
716 * If this is a task context, we need to check whether it is
717 * the current task context of this cpu. If not it has been
718 * scheduled out before the smp call arrived.
719 * Or possibly this is the right context but it isn't
720 * on this cpu because it had no counters.
721 */
722 if (ctx->task && cpuctx->task_ctx != ctx) {
723 if (cpuctx->task_ctx || ctx->task != current)
724 return;
725 cpuctx->task_ctx = ctx;
726 }
727
728 spin_lock(&ctx->lock);
729 ctx->is_active = 1;
730 update_context_time(ctx);
731
732 /*
733 * Protect the list operation against NMI by disabling the
734 * counters on a global level. NOP for non NMI based counters.
735 */
736 perf_disable();
737
738 add_counter_to_ctx(counter, ctx);
739
740 /*
741 * Don't put the counter on if it is disabled or if
742 * it is in a group and the group isn't on.
743 */
744 if (counter->state != PERF_COUNTER_STATE_INACTIVE ||
745 (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE))
746 goto unlock;
747
748 /*
749 * An exclusive counter can't go on if there are already active
750 * hardware counters, and no hardware counter can go on if there
751 * is already an exclusive counter on.
752 */
753 if (!group_can_go_on(counter, cpuctx, 1))
754 err = -EEXIST;
755 else
756 err = counter_sched_in(counter, cpuctx, ctx, cpu);
757
758 if (err) {
759 /*
760 * This counter couldn't go on. If it is in a group
761 * then we have to pull the whole group off.
762 * If the counter group is pinned then put it in error state.
763 */
764 if (leader != counter)
765 group_sched_out(leader, cpuctx, ctx);
766 if (leader->attr.pinned) {
767 update_group_times(leader);
768 leader->state = PERF_COUNTER_STATE_ERROR;
769 }
770 }
771
772 if (!err && !ctx->task && cpuctx->max_pertask)
773 cpuctx->max_pertask--;
774
775 unlock:
776 perf_enable();
777
778 spin_unlock(&ctx->lock);
779 }
780
781 /*
782 * Attach a performance counter to a context
783 *
784 * First we add the counter to the list with the hardware enable bit
785 * in counter->hw_config cleared.
786 *
787 * If the counter is attached to a task which is on a CPU we use a smp
788 * call to enable it in the task context. The task might have been
789 * scheduled away, but we check this in the smp call again.
790 *
791 * Must be called with ctx->mutex held.
792 */
793 static void
794 perf_install_in_context(struct perf_counter_context *ctx,
795 struct perf_counter *counter,
796 int cpu)
797 {
798 struct task_struct *task = ctx->task;
799
800 if (!task) {
801 /*
802 * Per cpu counters are installed via an smp call and
803 * the install is always sucessful.
804 */
805 smp_call_function_single(cpu, __perf_install_in_context,
806 counter, 1);
807 return;
808 }
809
810 retry:
811 task_oncpu_function_call(task, __perf_install_in_context,
812 counter);
813
814 spin_lock_irq(&ctx->lock);
815 /*
816 * we need to retry the smp call.
817 */
818 if (ctx->is_active && list_empty(&counter->list_entry)) {
819 spin_unlock_irq(&ctx->lock);
820 goto retry;
821 }
822
823 /*
824 * The lock prevents that this context is scheduled in so we
825 * can add the counter safely, if it the call above did not
826 * succeed.
827 */
828 if (list_empty(&counter->list_entry))
829 add_counter_to_ctx(counter, ctx);
830 spin_unlock_irq(&ctx->lock);
831 }
832
833 /*
834 * Cross CPU call to enable a performance counter
835 */
836 static void __perf_counter_enable(void *info)
837 {
838 struct perf_counter *counter = info;
839 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
840 struct perf_counter_context *ctx = counter->ctx;
841 struct perf_counter *leader = counter->group_leader;
842 int err;
843
844 /*
845 * If this is a per-task counter, need to check whether this
846 * counter's task is the current task on this cpu.
847 */
848 if (ctx->task && cpuctx->task_ctx != ctx) {
849 if (cpuctx->task_ctx || ctx->task != current)
850 return;
851 cpuctx->task_ctx = ctx;
852 }
853
854 spin_lock(&ctx->lock);
855 ctx->is_active = 1;
856 update_context_time(ctx);
857
858 if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
859 goto unlock;
860 counter->state = PERF_COUNTER_STATE_INACTIVE;
861 counter->tstamp_enabled = ctx->time - counter->total_time_enabled;
862
863 /*
864 * If the counter is in a group and isn't the group leader,
865 * then don't put it on unless the group is on.
866 */
867 if (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE)
868 goto unlock;
869
870 if (!group_can_go_on(counter, cpuctx, 1)) {
871 err = -EEXIST;
872 } else {
873 perf_disable();
874 if (counter == leader)
875 err = group_sched_in(counter, cpuctx, ctx,
876 smp_processor_id());
877 else
878 err = counter_sched_in(counter, cpuctx, ctx,
879 smp_processor_id());
880 perf_enable();
881 }
882
883 if (err) {
884 /*
885 * If this counter can't go on and it's part of a
886 * group, then the whole group has to come off.
887 */
888 if (leader != counter)
889 group_sched_out(leader, cpuctx, ctx);
890 if (leader->attr.pinned) {
891 update_group_times(leader);
892 leader->state = PERF_COUNTER_STATE_ERROR;
893 }
894 }
895
896 unlock:
897 spin_unlock(&ctx->lock);
898 }
899
900 /*
901 * Enable a counter.
902 *
903 * If counter->ctx is a cloned context, callers must make sure that
904 * every task struct that counter->ctx->task could possibly point to
905 * remains valid. This condition is satisfied when called through
906 * perf_counter_for_each_child or perf_counter_for_each as described
907 * for perf_counter_disable.
908 */
909 static void perf_counter_enable(struct perf_counter *counter)
910 {
911 struct perf_counter_context *ctx = counter->ctx;
912 struct task_struct *task = ctx->task;
913
914 if (!task) {
915 /*
916 * Enable the counter on the cpu that it's on
917 */
918 smp_call_function_single(counter->cpu, __perf_counter_enable,
919 counter, 1);
920 return;
921 }
922
923 spin_lock_irq(&ctx->lock);
924 if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
925 goto out;
926
927 /*
928 * If the counter is in error state, clear that first.
929 * That way, if we see the counter in error state below, we
930 * know that it has gone back into error state, as distinct
931 * from the task having been scheduled away before the
932 * cross-call arrived.
933 */
934 if (counter->state == PERF_COUNTER_STATE_ERROR)
935 counter->state = PERF_COUNTER_STATE_OFF;
936
937 retry:
938 spin_unlock_irq(&ctx->lock);
939 task_oncpu_function_call(task, __perf_counter_enable, counter);
940
941 spin_lock_irq(&ctx->lock);
942
943 /*
944 * If the context is active and the counter is still off,
945 * we need to retry the cross-call.
946 */
947 if (ctx->is_active && counter->state == PERF_COUNTER_STATE_OFF)
948 goto retry;
949
950 /*
951 * Since we have the lock this context can't be scheduled
952 * in, so we can change the state safely.
953 */
954 if (counter->state == PERF_COUNTER_STATE_OFF) {
955 counter->state = PERF_COUNTER_STATE_INACTIVE;
956 counter->tstamp_enabled =
957 ctx->time - counter->total_time_enabled;
958 }
959 out:
960 spin_unlock_irq(&ctx->lock);
961 }
962
963 static int perf_counter_refresh(struct perf_counter *counter, int refresh)
964 {
965 /*
966 * not supported on inherited counters
967 */
968 if (counter->attr.inherit)
969 return -EINVAL;
970
971 atomic_add(refresh, &counter->event_limit);
972 perf_counter_enable(counter);
973
974 return 0;
975 }
976
977 void __perf_counter_sched_out(struct perf_counter_context *ctx,
978 struct perf_cpu_context *cpuctx)
979 {
980 struct perf_counter *counter;
981
982 spin_lock(&ctx->lock);
983 ctx->is_active = 0;
984 if (likely(!ctx->nr_counters))
985 goto out;
986 update_context_time(ctx);
987
988 perf_disable();
989 if (ctx->nr_active) {
990 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
991 if (counter != counter->group_leader)
992 counter_sched_out(counter, cpuctx, ctx);
993 else
994 group_sched_out(counter, cpuctx, ctx);
995 }
996 }
997 perf_enable();
998 out:
999 spin_unlock(&ctx->lock);
1000 }
1001
1002 /*
1003 * Test whether two contexts are equivalent, i.e. whether they
1004 * have both been cloned from the same version of the same context
1005 * and they both have the same number of enabled counters.
1006 * If the number of enabled counters is the same, then the set
1007 * of enabled counters should be the same, because these are both
1008 * inherited contexts, therefore we can't access individual counters
1009 * in them directly with an fd; we can only enable/disable all
1010 * counters via prctl, or enable/disable all counters in a family
1011 * via ioctl, which will have the same effect on both contexts.
1012 */
1013 static int context_equiv(struct perf_counter_context *ctx1,
1014 struct perf_counter_context *ctx2)
1015 {
1016 return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
1017 && ctx1->parent_gen == ctx2->parent_gen
1018 && !ctx1->pin_count && !ctx2->pin_count;
1019 }
1020
1021 static void __perf_counter_read(void *counter);
1022
1023 static void __perf_counter_sync_stat(struct perf_counter *counter,
1024 struct perf_counter *next_counter)
1025 {
1026 u64 value;
1027
1028 if (!counter->attr.inherit_stat)
1029 return;
1030
1031 /*
1032 * Update the counter value, we cannot use perf_counter_read()
1033 * because we're in the middle of a context switch and have IRQs
1034 * disabled, which upsets smp_call_function_single(), however
1035 * we know the counter must be on the current CPU, therefore we
1036 * don't need to use it.
1037 */
1038 switch (counter->state) {
1039 case PERF_COUNTER_STATE_ACTIVE:
1040 __perf_counter_read(counter);
1041 break;
1042
1043 case PERF_COUNTER_STATE_INACTIVE:
1044 update_counter_times(counter);
1045 break;
1046
1047 default:
1048 break;
1049 }
1050
1051 /*
1052 * In order to keep per-task stats reliable we need to flip the counter
1053 * values when we flip the contexts.
1054 */
1055 value = atomic64_read(&next_counter->count);
1056 value = atomic64_xchg(&counter->count, value);
1057 atomic64_set(&next_counter->count, value);
1058
1059 swap(counter->total_time_enabled, next_counter->total_time_enabled);
1060 swap(counter->total_time_running, next_counter->total_time_running);
1061
1062 /*
1063 * Since we swizzled the values, update the user visible data too.
1064 */
1065 perf_counter_update_userpage(counter);
1066 perf_counter_update_userpage(next_counter);
1067 }
1068
1069 #define list_next_entry(pos, member) \
1070 list_entry(pos->member.next, typeof(*pos), member)
1071
1072 static void perf_counter_sync_stat(struct perf_counter_context *ctx,
1073 struct perf_counter_context *next_ctx)
1074 {
1075 struct perf_counter *counter, *next_counter;
1076
1077 if (!ctx->nr_stat)
1078 return;
1079
1080 counter = list_first_entry(&ctx->event_list,
1081 struct perf_counter, event_entry);
1082
1083 next_counter = list_first_entry(&next_ctx->event_list,
1084 struct perf_counter, event_entry);
1085
1086 while (&counter->event_entry != &ctx->event_list &&
1087 &next_counter->event_entry != &next_ctx->event_list) {
1088
1089 __perf_counter_sync_stat(counter, next_counter);
1090
1091 counter = list_next_entry(counter, event_entry);
1092 next_counter = list_next_entry(counter, event_entry);
1093 }
1094 }
1095
1096 /*
1097 * Called from scheduler to remove the counters of the current task,
1098 * with interrupts disabled.
1099 *
1100 * We stop each counter and update the counter value in counter->count.
1101 *
1102 * This does not protect us against NMI, but disable()
1103 * sets the disabled bit in the control field of counter _before_
1104 * accessing the counter control register. If a NMI hits, then it will
1105 * not restart the counter.
1106 */
1107 void perf_counter_task_sched_out(struct task_struct *task,
1108 struct task_struct *next, int cpu)
1109 {
1110 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
1111 struct perf_counter_context *ctx = task->perf_counter_ctxp;
1112 struct perf_counter_context *next_ctx;
1113 struct perf_counter_context *parent;
1114 struct pt_regs *regs;
1115 int do_switch = 1;
1116
1117 regs = task_pt_regs(task);
1118 perf_swcounter_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, regs, 0);
1119
1120 if (likely(!ctx || !cpuctx->task_ctx))
1121 return;
1122
1123 update_context_time(ctx);
1124
1125 rcu_read_lock();
1126 parent = rcu_dereference(ctx->parent_ctx);
1127 next_ctx = next->perf_counter_ctxp;
1128 if (parent && next_ctx &&
1129 rcu_dereference(next_ctx->parent_ctx) == parent) {
1130 /*
1131 * Looks like the two contexts are clones, so we might be
1132 * able to optimize the context switch. We lock both
1133 * contexts and check that they are clones under the
1134 * lock (including re-checking that neither has been
1135 * uncloned in the meantime). It doesn't matter which
1136 * order we take the locks because no other cpu could
1137 * be trying to lock both of these tasks.
1138 */
1139 spin_lock(&ctx->lock);
1140 spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
1141 if (context_equiv(ctx, next_ctx)) {
1142 /*
1143 * XXX do we need a memory barrier of sorts
1144 * wrt to rcu_dereference() of perf_counter_ctxp
1145 */
1146 task->perf_counter_ctxp = next_ctx;
1147 next->perf_counter_ctxp = ctx;
1148 ctx->task = next;
1149 next_ctx->task = task;
1150 do_switch = 0;
1151
1152 perf_counter_sync_stat(ctx, next_ctx);
1153 }
1154 spin_unlock(&next_ctx->lock);
1155 spin_unlock(&ctx->lock);
1156 }
1157 rcu_read_unlock();
1158
1159 if (do_switch) {
1160 __perf_counter_sched_out(ctx, cpuctx);
1161 cpuctx->task_ctx = NULL;
1162 }
1163 }
1164
1165 /*
1166 * Called with IRQs disabled
1167 */
1168 static void __perf_counter_task_sched_out(struct perf_counter_context *ctx)
1169 {
1170 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1171
1172 if (!cpuctx->task_ctx)
1173 return;
1174
1175 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
1176 return;
1177
1178 __perf_counter_sched_out(ctx, cpuctx);
1179 cpuctx->task_ctx = NULL;
1180 }
1181
1182 /*
1183 * Called with IRQs disabled
1184 */
1185 static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx)
1186 {
1187 __perf_counter_sched_out(&cpuctx->ctx, cpuctx);
1188 }
1189
1190 static void
1191 __perf_counter_sched_in(struct perf_counter_context *ctx,
1192 struct perf_cpu_context *cpuctx, int cpu)
1193 {
1194 struct perf_counter *counter;
1195 int can_add_hw = 1;
1196
1197 spin_lock(&ctx->lock);
1198 ctx->is_active = 1;
1199 if (likely(!ctx->nr_counters))
1200 goto out;
1201
1202 ctx->timestamp = perf_clock();
1203
1204 perf_disable();
1205
1206 /*
1207 * First go through the list and put on any pinned groups
1208 * in order to give them the best chance of going on.
1209 */
1210 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1211 if (counter->state <= PERF_COUNTER_STATE_OFF ||
1212 !counter->attr.pinned)
1213 continue;
1214 if (counter->cpu != -1 && counter->cpu != cpu)
1215 continue;
1216
1217 if (counter != counter->group_leader)
1218 counter_sched_in(counter, cpuctx, ctx, cpu);
1219 else {
1220 if (group_can_go_on(counter, cpuctx, 1))
1221 group_sched_in(counter, cpuctx, ctx, cpu);
1222 }
1223
1224 /*
1225 * If this pinned group hasn't been scheduled,
1226 * put it in error state.
1227 */
1228 if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
1229 update_group_times(counter);
1230 counter->state = PERF_COUNTER_STATE_ERROR;
1231 }
1232 }
1233
1234 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1235 /*
1236 * Ignore counters in OFF or ERROR state, and
1237 * ignore pinned counters since we did them already.
1238 */
1239 if (counter->state <= PERF_COUNTER_STATE_OFF ||
1240 counter->attr.pinned)
1241 continue;
1242
1243 /*
1244 * Listen to the 'cpu' scheduling filter constraint
1245 * of counters:
1246 */
1247 if (counter->cpu != -1 && counter->cpu != cpu)
1248 continue;
1249
1250 if (counter != counter->group_leader) {
1251 if (counter_sched_in(counter, cpuctx, ctx, cpu))
1252 can_add_hw = 0;
1253 } else {
1254 if (group_can_go_on(counter, cpuctx, can_add_hw)) {
1255 if (group_sched_in(counter, cpuctx, ctx, cpu))
1256 can_add_hw = 0;
1257 }
1258 }
1259 }
1260 perf_enable();
1261 out:
1262 spin_unlock(&ctx->lock);
1263 }
1264
1265 /*
1266 * Called from scheduler to add the counters of the current task
1267 * with interrupts disabled.
1268 *
1269 * We restore the counter value and then enable it.
1270 *
1271 * This does not protect us against NMI, but enable()
1272 * sets the enabled bit in the control field of counter _before_
1273 * accessing the counter control register. If a NMI hits, then it will
1274 * keep the counter running.
1275 */
1276 void perf_counter_task_sched_in(struct task_struct *task, int cpu)
1277 {
1278 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
1279 struct perf_counter_context *ctx = task->perf_counter_ctxp;
1280
1281 if (likely(!ctx))
1282 return;
1283 if (cpuctx->task_ctx == ctx)
1284 return;
1285 __perf_counter_sched_in(ctx, cpuctx, cpu);
1286 cpuctx->task_ctx = ctx;
1287 }
1288
1289 static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
1290 {
1291 struct perf_counter_context *ctx = &cpuctx->ctx;
1292
1293 __perf_counter_sched_in(ctx, cpuctx, cpu);
1294 }
1295
1296 #define MAX_INTERRUPTS (~0ULL)
1297
1298 static void perf_log_throttle(struct perf_counter *counter, int enable);
1299 static void perf_log_period(struct perf_counter *counter, u64 period);
1300
1301 static void perf_adjust_period(struct perf_counter *counter, u64 events)
1302 {
1303 struct hw_perf_counter *hwc = &counter->hw;
1304 u64 period, sample_period;
1305 s64 delta;
1306
1307 events *= hwc->sample_period;
1308 period = div64_u64(events, counter->attr.sample_freq);
1309
1310 delta = (s64)(period - hwc->sample_period);
1311 delta = (delta + 7) / 8; /* low pass filter */
1312
1313 sample_period = hwc->sample_period + delta;
1314
1315 if (!sample_period)
1316 sample_period = 1;
1317
1318 perf_log_period(counter, sample_period);
1319
1320 hwc->sample_period = sample_period;
1321 }
1322
1323 static void perf_ctx_adjust_freq(struct perf_counter_context *ctx)
1324 {
1325 struct perf_counter *counter;
1326 struct hw_perf_counter *hwc;
1327 u64 interrupts, freq;
1328
1329 spin_lock(&ctx->lock);
1330 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1331 if (counter->state != PERF_COUNTER_STATE_ACTIVE)
1332 continue;
1333
1334 hwc = &counter->hw;
1335
1336 interrupts = hwc->interrupts;
1337 hwc->interrupts = 0;
1338
1339 /*
1340 * unthrottle counters on the tick
1341 */
1342 if (interrupts == MAX_INTERRUPTS) {
1343 perf_log_throttle(counter, 1);
1344 counter->pmu->unthrottle(counter);
1345 interrupts = 2*sysctl_perf_counter_sample_rate/HZ;
1346 }
1347
1348 if (!counter->attr.freq || !counter->attr.sample_freq)
1349 continue;
1350
1351 /*
1352 * if the specified freq < HZ then we need to skip ticks
1353 */
1354 if (counter->attr.sample_freq < HZ) {
1355 freq = counter->attr.sample_freq;
1356
1357 hwc->freq_count += freq;
1358 hwc->freq_interrupts += interrupts;
1359
1360 if (hwc->freq_count < HZ)
1361 continue;
1362
1363 interrupts = hwc->freq_interrupts;
1364 hwc->freq_interrupts = 0;
1365 hwc->freq_count -= HZ;
1366 } else
1367 freq = HZ;
1368
1369 perf_adjust_period(counter, freq * interrupts);
1370
1371 /*
1372 * In order to avoid being stalled by an (accidental) huge
1373 * sample period, force reset the sample period if we didn't
1374 * get any events in this freq period.
1375 */
1376 if (!interrupts) {
1377 perf_disable();
1378 counter->pmu->disable(counter);
1379 atomic64_set(&hwc->period_left, 0);
1380 counter->pmu->enable(counter);
1381 perf_enable();
1382 }
1383 }
1384 spin_unlock(&ctx->lock);
1385 }
1386
1387 /*
1388 * Round-robin a context's counters:
1389 */
1390 static void rotate_ctx(struct perf_counter_context *ctx)
1391 {
1392 struct perf_counter *counter;
1393
1394 if (!ctx->nr_counters)
1395 return;
1396
1397 spin_lock(&ctx->lock);
1398 /*
1399 * Rotate the first entry last (works just fine for group counters too):
1400 */
1401 perf_disable();
1402 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1403 list_move_tail(&counter->list_entry, &ctx->counter_list);
1404 break;
1405 }
1406 perf_enable();
1407
1408 spin_unlock(&ctx->lock);
1409 }
1410
1411 void perf_counter_task_tick(struct task_struct *curr, int cpu)
1412 {
1413 struct perf_cpu_context *cpuctx;
1414 struct perf_counter_context *ctx;
1415
1416 if (!atomic_read(&nr_counters))
1417 return;
1418
1419 cpuctx = &per_cpu(perf_cpu_context, cpu);
1420 ctx = curr->perf_counter_ctxp;
1421
1422 perf_ctx_adjust_freq(&cpuctx->ctx);
1423 if (ctx)
1424 perf_ctx_adjust_freq(ctx);
1425
1426 perf_counter_cpu_sched_out(cpuctx);
1427 if (ctx)
1428 __perf_counter_task_sched_out(ctx);
1429
1430 rotate_ctx(&cpuctx->ctx);
1431 if (ctx)
1432 rotate_ctx(ctx);
1433
1434 perf_counter_cpu_sched_in(cpuctx, cpu);
1435 if (ctx)
1436 perf_counter_task_sched_in(curr, cpu);
1437 }
1438
1439 /*
1440 * Enable all of a task's counters that have been marked enable-on-exec.
1441 * This expects task == current.
1442 */
1443 static void perf_counter_enable_on_exec(struct task_struct *task)
1444 {
1445 struct perf_counter_context *ctx;
1446 struct perf_counter *counter;
1447 unsigned long flags;
1448 int enabled = 0;
1449
1450 local_irq_save(flags);
1451 ctx = task->perf_counter_ctxp;
1452 if (!ctx || !ctx->nr_counters)
1453 goto out;
1454
1455 __perf_counter_task_sched_out(ctx);
1456
1457 spin_lock(&ctx->lock);
1458
1459 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1460 if (!counter->attr.enable_on_exec)
1461 continue;
1462 counter->attr.enable_on_exec = 0;
1463 if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
1464 continue;
1465 counter->state = PERF_COUNTER_STATE_INACTIVE;
1466 counter->tstamp_enabled =
1467 ctx->time - counter->total_time_enabled;
1468 enabled = 1;
1469 }
1470
1471 /*
1472 * Unclone this context if we enabled any counter.
1473 */
1474 if (enabled)
1475 unclone_ctx(ctx);
1476
1477 spin_unlock(&ctx->lock);
1478
1479 perf_counter_task_sched_in(task, smp_processor_id());
1480 out:
1481 local_irq_restore(flags);
1482 }
1483
1484 /*
1485 * Cross CPU call to read the hardware counter
1486 */
1487 static void __perf_counter_read(void *info)
1488 {
1489 struct perf_counter *counter = info;
1490 struct perf_counter_context *ctx = counter->ctx;
1491 unsigned long flags;
1492
1493 local_irq_save(flags);
1494 if (ctx->is_active)
1495 update_context_time(ctx);
1496 counter->pmu->read(counter);
1497 update_counter_times(counter);
1498 local_irq_restore(flags);
1499 }
1500
1501 static u64 perf_counter_read(struct perf_counter *counter)
1502 {
1503 /*
1504 * If counter is enabled and currently active on a CPU, update the
1505 * value in the counter structure:
1506 */
1507 if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
1508 smp_call_function_single(counter->oncpu,
1509 __perf_counter_read, counter, 1);
1510 } else if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
1511 update_counter_times(counter);
1512 }
1513
1514 return atomic64_read(&counter->count);
1515 }
1516
1517 /*
1518 * Initialize the perf_counter context in a task_struct:
1519 */
1520 static void
1521 __perf_counter_init_context(struct perf_counter_context *ctx,
1522 struct task_struct *task)
1523 {
1524 memset(ctx, 0, sizeof(*ctx));
1525 spin_lock_init(&ctx->lock);
1526 mutex_init(&ctx->mutex);
1527 INIT_LIST_HEAD(&ctx->counter_list);
1528 INIT_LIST_HEAD(&ctx->event_list);
1529 atomic_set(&ctx->refcount, 1);
1530 ctx->task = task;
1531 }
1532
1533 static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
1534 {
1535 struct perf_counter_context *ctx;
1536 struct perf_cpu_context *cpuctx;
1537 struct task_struct *task;
1538 unsigned long flags;
1539 int err;
1540
1541 /*
1542 * If cpu is not a wildcard then this is a percpu counter:
1543 */
1544 if (cpu != -1) {
1545 /* Must be root to operate on a CPU counter: */
1546 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
1547 return ERR_PTR(-EACCES);
1548
1549 if (cpu < 0 || cpu > num_possible_cpus())
1550 return ERR_PTR(-EINVAL);
1551
1552 /*
1553 * We could be clever and allow to attach a counter to an
1554 * offline CPU and activate it when the CPU comes up, but
1555 * that's for later.
1556 */
1557 if (!cpu_isset(cpu, cpu_online_map))
1558 return ERR_PTR(-ENODEV);
1559
1560 cpuctx = &per_cpu(perf_cpu_context, cpu);
1561 ctx = &cpuctx->ctx;
1562 get_ctx(ctx);
1563
1564 return ctx;
1565 }
1566
1567 rcu_read_lock();
1568 if (!pid)
1569 task = current;
1570 else
1571 task = find_task_by_vpid(pid);
1572 if (task)
1573 get_task_struct(task);
1574 rcu_read_unlock();
1575
1576 if (!task)
1577 return ERR_PTR(-ESRCH);
1578
1579 /*
1580 * Can't attach counters to a dying task.
1581 */
1582 err = -ESRCH;
1583 if (task->flags & PF_EXITING)
1584 goto errout;
1585
1586 /* Reuse ptrace permission checks for now. */
1587 err = -EACCES;
1588 if (!ptrace_may_access(task, PTRACE_MODE_READ))
1589 goto errout;
1590
1591 retry:
1592 ctx = perf_lock_task_context(task, &flags);
1593 if (ctx) {
1594 unclone_ctx(ctx);
1595 spin_unlock_irqrestore(&ctx->lock, flags);
1596 }
1597
1598 if (!ctx) {
1599 ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL);
1600 err = -ENOMEM;
1601 if (!ctx)
1602 goto errout;
1603 __perf_counter_init_context(ctx, task);
1604 get_ctx(ctx);
1605 if (cmpxchg(&task->perf_counter_ctxp, NULL, ctx)) {
1606 /*
1607 * We raced with some other task; use
1608 * the context they set.
1609 */
1610 kfree(ctx);
1611 goto retry;
1612 }
1613 get_task_struct(task);
1614 }
1615
1616 put_task_struct(task);
1617 return ctx;
1618
1619 errout:
1620 put_task_struct(task);
1621 return ERR_PTR(err);
1622 }
1623
1624 static void free_counter_rcu(struct rcu_head *head)
1625 {
1626 struct perf_counter *counter;
1627
1628 counter = container_of(head, struct perf_counter, rcu_head);
1629 if (counter->ns)
1630 put_pid_ns(counter->ns);
1631 kfree(counter);
1632 }
1633
1634 static void perf_pending_sync(struct perf_counter *counter);
1635
1636 static void free_counter(struct perf_counter *counter)
1637 {
1638 perf_pending_sync(counter);
1639
1640 if (!counter->parent) {
1641 atomic_dec(&nr_counters);
1642 if (counter->attr.mmap)
1643 atomic_dec(&nr_mmap_counters);
1644 if (counter->attr.comm)
1645 atomic_dec(&nr_comm_counters);
1646 }
1647
1648 if (counter->destroy)
1649 counter->destroy(counter);
1650
1651 put_ctx(counter->ctx);
1652 call_rcu(&counter->rcu_head, free_counter_rcu);
1653 }
1654
1655 /*
1656 * Called when the last reference to the file is gone.
1657 */
1658 static int perf_release(struct inode *inode, struct file *file)
1659 {
1660 struct perf_counter *counter = file->private_data;
1661 struct perf_counter_context *ctx = counter->ctx;
1662
1663 file->private_data = NULL;
1664
1665 WARN_ON_ONCE(ctx->parent_ctx);
1666 mutex_lock(&ctx->mutex);
1667 perf_counter_remove_from_context(counter);
1668 mutex_unlock(&ctx->mutex);
1669
1670 mutex_lock(&counter->owner->perf_counter_mutex);
1671 list_del_init(&counter->owner_entry);
1672 mutex_unlock(&counter->owner->perf_counter_mutex);
1673 put_task_struct(counter->owner);
1674
1675 free_counter(counter);
1676
1677 return 0;
1678 }
1679
1680 /*
1681 * Read the performance counter - simple non blocking version for now
1682 */
1683 static ssize_t
1684 perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
1685 {
1686 u64 values[4];
1687 int n;
1688
1689 /*
1690 * Return end-of-file for a read on a counter that is in
1691 * error state (i.e. because it was pinned but it couldn't be
1692 * scheduled on to the CPU at some point).
1693 */
1694 if (counter->state == PERF_COUNTER_STATE_ERROR)
1695 return 0;
1696
1697 WARN_ON_ONCE(counter->ctx->parent_ctx);
1698 mutex_lock(&counter->child_mutex);
1699 values[0] = perf_counter_read(counter);
1700 n = 1;
1701 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1702 values[n++] = counter->total_time_enabled +
1703 atomic64_read(&counter->child_total_time_enabled);
1704 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1705 values[n++] = counter->total_time_running +
1706 atomic64_read(&counter->child_total_time_running);
1707 if (counter->attr.read_format & PERF_FORMAT_ID)
1708 values[n++] = counter->id;
1709 mutex_unlock(&counter->child_mutex);
1710
1711 if (count < n * sizeof(u64))
1712 return -EINVAL;
1713 count = n * sizeof(u64);
1714
1715 if (copy_to_user(buf, values, count))
1716 return -EFAULT;
1717
1718 return count;
1719 }
1720
1721 static ssize_t
1722 perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
1723 {
1724 struct perf_counter *counter = file->private_data;
1725
1726 return perf_read_hw(counter, buf, count);
1727 }
1728
1729 static unsigned int perf_poll(struct file *file, poll_table *wait)
1730 {
1731 struct perf_counter *counter = file->private_data;
1732 struct perf_mmap_data *data;
1733 unsigned int events = POLL_HUP;
1734
1735 rcu_read_lock();
1736 data = rcu_dereference(counter->data);
1737 if (data)
1738 events = atomic_xchg(&data->poll, 0);
1739 rcu_read_unlock();
1740
1741 poll_wait(file, &counter->waitq, wait);
1742
1743 return events;
1744 }
1745
1746 static void perf_counter_reset(struct perf_counter *counter)
1747 {
1748 (void)perf_counter_read(counter);
1749 atomic64_set(&counter->count, 0);
1750 perf_counter_update_userpage(counter);
1751 }
1752
1753 /*
1754 * Holding the top-level counter's child_mutex means that any
1755 * descendant process that has inherited this counter will block
1756 * in sync_child_counter if it goes to exit, thus satisfying the
1757 * task existence requirements of perf_counter_enable/disable.
1758 */
1759 static void perf_counter_for_each_child(struct perf_counter *counter,
1760 void (*func)(struct perf_counter *))
1761 {
1762 struct perf_counter *child;
1763
1764 WARN_ON_ONCE(counter->ctx->parent_ctx);
1765 mutex_lock(&counter->child_mutex);
1766 func(counter);
1767 list_for_each_entry(child, &counter->child_list, child_list)
1768 func(child);
1769 mutex_unlock(&counter->child_mutex);
1770 }
1771
1772 static void perf_counter_for_each(struct perf_counter *counter,
1773 void (*func)(struct perf_counter *))
1774 {
1775 struct perf_counter_context *ctx = counter->ctx;
1776 struct perf_counter *sibling;
1777
1778 WARN_ON_ONCE(ctx->parent_ctx);
1779 mutex_lock(&ctx->mutex);
1780 counter = counter->group_leader;
1781
1782 perf_counter_for_each_child(counter, func);
1783 func(counter);
1784 list_for_each_entry(sibling, &counter->sibling_list, list_entry)
1785 perf_counter_for_each_child(counter, func);
1786 mutex_unlock(&ctx->mutex);
1787 }
1788
1789 static int perf_counter_period(struct perf_counter *counter, u64 __user *arg)
1790 {
1791 struct perf_counter_context *ctx = counter->ctx;
1792 unsigned long size;
1793 int ret = 0;
1794 u64 value;
1795
1796 if (!counter->attr.sample_period)
1797 return -EINVAL;
1798
1799 size = copy_from_user(&value, arg, sizeof(value));
1800 if (size != sizeof(value))
1801 return -EFAULT;
1802
1803 if (!value)
1804 return -EINVAL;
1805
1806 spin_lock_irq(&ctx->lock);
1807 if (counter->attr.freq) {
1808 if (value > sysctl_perf_counter_sample_rate) {
1809 ret = -EINVAL;
1810 goto unlock;
1811 }
1812
1813 counter->attr.sample_freq = value;
1814 } else {
1815 perf_log_period(counter, value);
1816
1817 counter->attr.sample_period = value;
1818 counter->hw.sample_period = value;
1819 }
1820 unlock:
1821 spin_unlock_irq(&ctx->lock);
1822
1823 return ret;
1824 }
1825
1826 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1827 {
1828 struct perf_counter *counter = file->private_data;
1829 void (*func)(struct perf_counter *);
1830 u32 flags = arg;
1831
1832 switch (cmd) {
1833 case PERF_COUNTER_IOC_ENABLE:
1834 func = perf_counter_enable;
1835 break;
1836 case PERF_COUNTER_IOC_DISABLE:
1837 func = perf_counter_disable;
1838 break;
1839 case PERF_COUNTER_IOC_RESET:
1840 func = perf_counter_reset;
1841 break;
1842
1843 case PERF_COUNTER_IOC_REFRESH:
1844 return perf_counter_refresh(counter, arg);
1845
1846 case PERF_COUNTER_IOC_PERIOD:
1847 return perf_counter_period(counter, (u64 __user *)arg);
1848
1849 default:
1850 return -ENOTTY;
1851 }
1852
1853 if (flags & PERF_IOC_FLAG_GROUP)
1854 perf_counter_for_each(counter, func);
1855 else
1856 perf_counter_for_each_child(counter, func);
1857
1858 return 0;
1859 }
1860
1861 int perf_counter_task_enable(void)
1862 {
1863 struct perf_counter *counter;
1864
1865 mutex_lock(&current->perf_counter_mutex);
1866 list_for_each_entry(counter, &current->perf_counter_list, owner_entry)
1867 perf_counter_for_each_child(counter, perf_counter_enable);
1868 mutex_unlock(&current->perf_counter_mutex);
1869
1870 return 0;
1871 }
1872
1873 int perf_counter_task_disable(void)
1874 {
1875 struct perf_counter *counter;
1876
1877 mutex_lock(&current->perf_counter_mutex);
1878 list_for_each_entry(counter, &current->perf_counter_list, owner_entry)
1879 perf_counter_for_each_child(counter, perf_counter_disable);
1880 mutex_unlock(&current->perf_counter_mutex);
1881
1882 return 0;
1883 }
1884
1885 static int perf_counter_index(struct perf_counter *counter)
1886 {
1887 if (counter->state != PERF_COUNTER_STATE_ACTIVE)
1888 return 0;
1889
1890 return counter->hw.idx + 1 - PERF_COUNTER_INDEX_OFFSET;
1891 }
1892
1893 /*
1894 * Callers need to ensure there can be no nesting of this function, otherwise
1895 * the seqlock logic goes bad. We can not serialize this because the arch
1896 * code calls this from NMI context.
1897 */
1898 void perf_counter_update_userpage(struct perf_counter *counter)
1899 {
1900 struct perf_counter_mmap_page *userpg;
1901 struct perf_mmap_data *data;
1902
1903 rcu_read_lock();
1904 data = rcu_dereference(counter->data);
1905 if (!data)
1906 goto unlock;
1907
1908 userpg = data->user_page;
1909
1910 /*
1911 * Disable preemption so as to not let the corresponding user-space
1912 * spin too long if we get preempted.
1913 */
1914 preempt_disable();
1915 ++userpg->lock;
1916 barrier();
1917 userpg->index = perf_counter_index(counter);
1918 userpg->offset = atomic64_read(&counter->count);
1919 if (counter->state == PERF_COUNTER_STATE_ACTIVE)
1920 userpg->offset -= atomic64_read(&counter->hw.prev_count);
1921
1922 userpg->time_enabled = counter->total_time_enabled +
1923 atomic64_read(&counter->child_total_time_enabled);
1924
1925 userpg->time_running = counter->total_time_running +
1926 atomic64_read(&counter->child_total_time_running);
1927
1928 barrier();
1929 ++userpg->lock;
1930 preempt_enable();
1931 unlock:
1932 rcu_read_unlock();
1933 }
1934
1935 static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1936 {
1937 struct perf_counter *counter = vma->vm_file->private_data;
1938 struct perf_mmap_data *data;
1939 int ret = VM_FAULT_SIGBUS;
1940
1941 if (vmf->flags & FAULT_FLAG_MKWRITE) {
1942 if (vmf->pgoff == 0)
1943 ret = 0;
1944 return ret;
1945 }
1946
1947 rcu_read_lock();
1948 data = rcu_dereference(counter->data);
1949 if (!data)
1950 goto unlock;
1951
1952 if (vmf->pgoff == 0) {
1953 vmf->page = virt_to_page(data->user_page);
1954 } else {
1955 int nr = vmf->pgoff - 1;
1956
1957 if ((unsigned)nr > data->nr_pages)
1958 goto unlock;
1959
1960 if (vmf->flags & FAULT_FLAG_WRITE)
1961 goto unlock;
1962
1963 vmf->page = virt_to_page(data->data_pages[nr]);
1964 }
1965
1966 get_page(vmf->page);
1967 vmf->page->mapping = vma->vm_file->f_mapping;
1968 vmf->page->index = vmf->pgoff;
1969
1970 ret = 0;
1971 unlock:
1972 rcu_read_unlock();
1973
1974 return ret;
1975 }
1976
1977 static int perf_mmap_data_alloc(struct perf_counter *counter, int nr_pages)
1978 {
1979 struct perf_mmap_data *data;
1980 unsigned long size;
1981 int i;
1982
1983 WARN_ON(atomic_read(&counter->mmap_count));
1984
1985 size = sizeof(struct perf_mmap_data);
1986 size += nr_pages * sizeof(void *);
1987
1988 data = kzalloc(size, GFP_KERNEL);
1989 if (!data)
1990 goto fail;
1991
1992 data->user_page = (void *)get_zeroed_page(GFP_KERNEL);
1993 if (!data->user_page)
1994 goto fail_user_page;
1995
1996 for (i = 0; i < nr_pages; i++) {
1997 data->data_pages[i] = (void *)get_zeroed_page(GFP_KERNEL);
1998 if (!data->data_pages[i])
1999 goto fail_data_pages;
2000 }
2001
2002 data->nr_pages = nr_pages;
2003 atomic_set(&data->lock, -1);
2004
2005 rcu_assign_pointer(counter->data, data);
2006
2007 return 0;
2008
2009 fail_data_pages:
2010 for (i--; i >= 0; i--)
2011 free_page((unsigned long)data->data_pages[i]);
2012
2013 free_page((unsigned long)data->user_page);
2014
2015 fail_user_page:
2016 kfree(data);
2017
2018 fail:
2019 return -ENOMEM;
2020 }
2021
2022 static void perf_mmap_free_page(unsigned long addr)
2023 {
2024 struct page *page = virt_to_page(addr);
2025
2026 page->mapping = NULL;
2027 __free_page(page);
2028 }
2029
2030 static void __perf_mmap_data_free(struct rcu_head *rcu_head)
2031 {
2032 struct perf_mmap_data *data;
2033 int i;
2034
2035 data = container_of(rcu_head, struct perf_mmap_data, rcu_head);
2036
2037 perf_mmap_free_page((unsigned long)data->user_page);
2038 for (i = 0; i < data->nr_pages; i++)
2039 perf_mmap_free_page((unsigned long)data->data_pages[i]);
2040
2041 kfree(data);
2042 }
2043
2044 static void perf_mmap_data_free(struct perf_counter *counter)
2045 {
2046 struct perf_mmap_data *data = counter->data;
2047
2048 WARN_ON(atomic_read(&counter->mmap_count));
2049
2050 rcu_assign_pointer(counter->data, NULL);
2051 call_rcu(&data->rcu_head, __perf_mmap_data_free);
2052 }
2053
2054 static void perf_mmap_open(struct vm_area_struct *vma)
2055 {
2056 struct perf_counter *counter = vma->vm_file->private_data;
2057
2058 atomic_inc(&counter->mmap_count);
2059 }
2060
2061 static void perf_mmap_close(struct vm_area_struct *vma)
2062 {
2063 struct perf_counter *counter = vma->vm_file->private_data;
2064
2065 WARN_ON_ONCE(counter->ctx->parent_ctx);
2066 if (atomic_dec_and_mutex_lock(&counter->mmap_count, &counter->mmap_mutex)) {
2067 struct user_struct *user = current_user();
2068
2069 atomic_long_sub(counter->data->nr_pages + 1, &user->locked_vm);
2070 vma->vm_mm->locked_vm -= counter->data->nr_locked;
2071 perf_mmap_data_free(counter);
2072 mutex_unlock(&counter->mmap_mutex);
2073 }
2074 }
2075
2076 static struct vm_operations_struct perf_mmap_vmops = {
2077 .open = perf_mmap_open,
2078 .close = perf_mmap_close,
2079 .fault = perf_mmap_fault,
2080 .page_mkwrite = perf_mmap_fault,
2081 };
2082
2083 static int perf_mmap(struct file *file, struct vm_area_struct *vma)
2084 {
2085 struct perf_counter *counter = file->private_data;
2086 unsigned long user_locked, user_lock_limit;
2087 struct user_struct *user = current_user();
2088 unsigned long locked, lock_limit;
2089 unsigned long vma_size;
2090 unsigned long nr_pages;
2091 long user_extra, extra;
2092 int ret = 0;
2093
2094 if (!(vma->vm_flags & VM_SHARED))
2095 return -EINVAL;
2096
2097 vma_size = vma->vm_end - vma->vm_start;
2098 nr_pages = (vma_size / PAGE_SIZE) - 1;
2099
2100 /*
2101 * If we have data pages ensure they're a power-of-two number, so we
2102 * can do bitmasks instead of modulo.
2103 */
2104 if (nr_pages != 0 && !is_power_of_2(nr_pages))
2105 return -EINVAL;
2106
2107 if (vma_size != PAGE_SIZE * (1 + nr_pages))
2108 return -EINVAL;
2109
2110 if (vma->vm_pgoff != 0)
2111 return -EINVAL;
2112
2113 WARN_ON_ONCE(counter->ctx->parent_ctx);
2114 mutex_lock(&counter->mmap_mutex);
2115 if (atomic_inc_not_zero(&counter->mmap_count)) {
2116 if (nr_pages != counter->data->nr_pages)
2117 ret = -EINVAL;
2118 goto unlock;
2119 }
2120
2121 user_extra = nr_pages + 1;
2122 user_lock_limit = sysctl_perf_counter_mlock >> (PAGE_SHIFT - 10);
2123
2124 /*
2125 * Increase the limit linearly with more CPUs:
2126 */
2127 user_lock_limit *= num_online_cpus();
2128
2129 user_locked = atomic_long_read(&user->locked_vm) + user_extra;
2130
2131 extra = 0;
2132 if (user_locked > user_lock_limit)
2133 extra = user_locked - user_lock_limit;
2134
2135 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
2136 lock_limit >>= PAGE_SHIFT;
2137 locked = vma->vm_mm->locked_vm + extra;
2138
2139 if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
2140 ret = -EPERM;
2141 goto unlock;
2142 }
2143
2144 WARN_ON(counter->data);
2145 ret = perf_mmap_data_alloc(counter, nr_pages);
2146 if (ret)
2147 goto unlock;
2148
2149 atomic_set(&counter->mmap_count, 1);
2150 atomic_long_add(user_extra, &user->locked_vm);
2151 vma->vm_mm->locked_vm += extra;
2152 counter->data->nr_locked = extra;
2153 if (vma->vm_flags & VM_WRITE)
2154 counter->data->writable = 1;
2155
2156 unlock:
2157 mutex_unlock(&counter->mmap_mutex);
2158
2159 vma->vm_flags |= VM_RESERVED;
2160 vma->vm_ops = &perf_mmap_vmops;
2161
2162 return ret;
2163 }
2164
2165 static int perf_fasync(int fd, struct file *filp, int on)
2166 {
2167 struct inode *inode = filp->f_path.dentry->d_inode;
2168 struct perf_counter *counter = filp->private_data;
2169 int retval;
2170
2171 mutex_lock(&inode->i_mutex);
2172 retval = fasync_helper(fd, filp, on, &counter->fasync);
2173 mutex_unlock(&inode->i_mutex);
2174
2175 if (retval < 0)
2176 return retval;
2177
2178 return 0;
2179 }
2180
2181 static const struct file_operations perf_fops = {
2182 .release = perf_release,
2183 .read = perf_read,
2184 .poll = perf_poll,
2185 .unlocked_ioctl = perf_ioctl,
2186 .compat_ioctl = perf_ioctl,
2187 .mmap = perf_mmap,
2188 .fasync = perf_fasync,
2189 };
2190
2191 /*
2192 * Perf counter wakeup
2193 *
2194 * If there's data, ensure we set the poll() state and publish everything
2195 * to user-space before waking everybody up.
2196 */
2197
2198 void perf_counter_wakeup(struct perf_counter *counter)
2199 {
2200 wake_up_all(&counter->waitq);
2201
2202 if (counter->pending_kill) {
2203 kill_fasync(&counter->fasync, SIGIO, counter->pending_kill);
2204 counter->pending_kill = 0;
2205 }
2206 }
2207
2208 /*
2209 * Pending wakeups
2210 *
2211 * Handle the case where we need to wakeup up from NMI (or rq->lock) context.
2212 *
2213 * The NMI bit means we cannot possibly take locks. Therefore, maintain a
2214 * single linked list and use cmpxchg() to add entries lockless.
2215 */
2216
2217 static void perf_pending_counter(struct perf_pending_entry *entry)
2218 {
2219 struct perf_counter *counter = container_of(entry,
2220 struct perf_counter, pending);
2221
2222 if (counter->pending_disable) {
2223 counter->pending_disable = 0;
2224 perf_counter_disable(counter);
2225 }
2226
2227 if (counter->pending_wakeup) {
2228 counter->pending_wakeup = 0;
2229 perf_counter_wakeup(counter);
2230 }
2231 }
2232
2233 #define PENDING_TAIL ((struct perf_pending_entry *)-1UL)
2234
2235 static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = {
2236 PENDING_TAIL,
2237 };
2238
2239 static void perf_pending_queue(struct perf_pending_entry *entry,
2240 void (*func)(struct perf_pending_entry *))
2241 {
2242 struct perf_pending_entry **head;
2243
2244 if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL)
2245 return;
2246
2247 entry->func = func;
2248
2249 head = &get_cpu_var(perf_pending_head);
2250
2251 do {
2252 entry->next = *head;
2253 } while (cmpxchg(head, entry->next, entry) != entry->next);
2254
2255 set_perf_counter_pending();
2256
2257 put_cpu_var(perf_pending_head);
2258 }
2259
2260 static int __perf_pending_run(void)
2261 {
2262 struct perf_pending_entry *list;
2263 int nr = 0;
2264
2265 list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL);
2266 while (list != PENDING_TAIL) {
2267 void (*func)(struct perf_pending_entry *);
2268 struct perf_pending_entry *entry = list;
2269
2270 list = list->next;
2271
2272 func = entry->func;
2273 entry->next = NULL;
2274 /*
2275 * Ensure we observe the unqueue before we issue the wakeup,
2276 * so that we won't be waiting forever.
2277 * -- see perf_not_pending().
2278 */
2279 smp_wmb();
2280
2281 func(entry);
2282 nr++;
2283 }
2284
2285 return nr;
2286 }
2287
2288 static inline int perf_not_pending(struct perf_counter *counter)
2289 {
2290 /*
2291 * If we flush on whatever cpu we run, there is a chance we don't
2292 * need to wait.
2293 */
2294 get_cpu();
2295 __perf_pending_run();
2296 put_cpu();
2297
2298 /*
2299 * Ensure we see the proper queue state before going to sleep
2300 * so that we do not miss the wakeup. -- see perf_pending_handle()
2301 */
2302 smp_rmb();
2303 return counter->pending.next == NULL;
2304 }
2305
2306 static void perf_pending_sync(struct perf_counter *counter)
2307 {
2308 wait_event(counter->waitq, perf_not_pending(counter));
2309 }
2310
2311 void perf_counter_do_pending(void)
2312 {
2313 __perf_pending_run();
2314 }
2315
2316 /*
2317 * Callchain support -- arch specific
2318 */
2319
2320 __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2321 {
2322 return NULL;
2323 }
2324
2325 /*
2326 * Output
2327 */
2328
2329 struct perf_output_handle {
2330 struct perf_counter *counter;
2331 struct perf_mmap_data *data;
2332 unsigned long head;
2333 unsigned long offset;
2334 int nmi;
2335 int sample;
2336 int locked;
2337 unsigned long flags;
2338 };
2339
2340 static bool perf_output_space(struct perf_mmap_data *data,
2341 unsigned int offset, unsigned int head)
2342 {
2343 unsigned long tail;
2344 unsigned long mask;
2345
2346 if (!data->writable)
2347 return true;
2348
2349 mask = (data->nr_pages << PAGE_SHIFT) - 1;
2350 /*
2351 * Userspace could choose to issue a mb() before updating the tail
2352 * pointer. So that all reads will be completed before the write is
2353 * issued.
2354 */
2355 tail = ACCESS_ONCE(data->user_page->data_tail);
2356 smp_rmb();
2357
2358 offset = (offset - tail) & mask;
2359 head = (head - tail) & mask;
2360
2361 if ((int)(head - offset) < 0)
2362 return false;
2363
2364 return true;
2365 }
2366
2367 static void perf_output_wakeup(struct perf_output_handle *handle)
2368 {
2369 atomic_set(&handle->data->poll, POLL_IN);
2370
2371 if (handle->nmi) {
2372 handle->counter->pending_wakeup = 1;
2373 perf_pending_queue(&handle->counter->pending,
2374 perf_pending_counter);
2375 } else
2376 perf_counter_wakeup(handle->counter);
2377 }
2378
2379 /*
2380 * Curious locking construct.
2381 *
2382 * We need to ensure a later event doesn't publish a head when a former
2383 * event isn't done writing. However since we need to deal with NMIs we
2384 * cannot fully serialize things.
2385 *
2386 * What we do is serialize between CPUs so we only have to deal with NMI
2387 * nesting on a single CPU.
2388 *
2389 * We only publish the head (and generate a wakeup) when the outer-most
2390 * event completes.
2391 */
2392 static void perf_output_lock(struct perf_output_handle *handle)
2393 {
2394 struct perf_mmap_data *data = handle->data;
2395 int cpu;
2396
2397 handle->locked = 0;
2398
2399 local_irq_save(handle->flags);
2400 cpu = smp_processor_id();
2401
2402 if (in_nmi() && atomic_read(&data->lock) == cpu)
2403 return;
2404
2405 while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
2406 cpu_relax();
2407
2408 handle->locked = 1;
2409 }
2410
2411 static void perf_output_unlock(struct perf_output_handle *handle)
2412 {
2413 struct perf_mmap_data *data = handle->data;
2414 unsigned long head;
2415 int cpu;
2416
2417 data->done_head = data->head;
2418
2419 if (!handle->locked)
2420 goto out;
2421
2422 again:
2423 /*
2424 * The xchg implies a full barrier that ensures all writes are done
2425 * before we publish the new head, matched by a rmb() in userspace when
2426 * reading this position.
2427 */
2428 while ((head = atomic_long_xchg(&data->done_head, 0)))
2429 data->user_page->data_head = head;
2430
2431 /*
2432 * NMI can happen here, which means we can miss a done_head update.
2433 */
2434
2435 cpu = atomic_xchg(&data->lock, -1);
2436 WARN_ON_ONCE(cpu != smp_processor_id());
2437
2438 /*
2439 * Therefore we have to validate we did not indeed do so.
2440 */
2441 if (unlikely(atomic_long_read(&data->done_head))) {
2442 /*
2443 * Since we had it locked, we can lock it again.
2444 */
2445 while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
2446 cpu_relax();
2447
2448 goto again;
2449 }
2450
2451 if (atomic_xchg(&data->wakeup, 0))
2452 perf_output_wakeup(handle);
2453 out:
2454 local_irq_restore(handle->flags);
2455 }
2456
2457 static void perf_output_copy(struct perf_output_handle *handle,
2458 const void *buf, unsigned int len)
2459 {
2460 unsigned int pages_mask;
2461 unsigned int offset;
2462 unsigned int size;
2463 void **pages;
2464
2465 offset = handle->offset;
2466 pages_mask = handle->data->nr_pages - 1;
2467 pages = handle->data->data_pages;
2468
2469 do {
2470 unsigned int page_offset;
2471 int nr;
2472
2473 nr = (offset >> PAGE_SHIFT) & pages_mask;
2474 page_offset = offset & (PAGE_SIZE - 1);
2475 size = min_t(unsigned int, PAGE_SIZE - page_offset, len);
2476
2477 memcpy(pages[nr] + page_offset, buf, size);
2478
2479 len -= size;
2480 buf += size;
2481 offset += size;
2482 } while (len);
2483
2484 handle->offset = offset;
2485
2486 /*
2487 * Check we didn't copy past our reservation window, taking the
2488 * possible unsigned int wrap into account.
2489 */
2490 WARN_ON_ONCE(((long)(handle->head - handle->offset)) < 0);
2491 }
2492
2493 #define perf_output_put(handle, x) \
2494 perf_output_copy((handle), &(x), sizeof(x))
2495
2496 static int perf_output_begin(struct perf_output_handle *handle,
2497 struct perf_counter *counter, unsigned int size,
2498 int nmi, int sample)
2499 {
2500 struct perf_mmap_data *data;
2501 unsigned int offset, head;
2502 int have_lost;
2503 struct {
2504 struct perf_event_header header;
2505 u64 id;
2506 u64 lost;
2507 } lost_event;
2508
2509 /*
2510 * For inherited counters we send all the output towards the parent.
2511 */
2512 if (counter->parent)
2513 counter = counter->parent;
2514
2515 rcu_read_lock();
2516 data = rcu_dereference(counter->data);
2517 if (!data)
2518 goto out;
2519
2520 handle->data = data;
2521 handle->counter = counter;
2522 handle->nmi = nmi;
2523 handle->sample = sample;
2524
2525 if (!data->nr_pages)
2526 goto fail;
2527
2528 have_lost = atomic_read(&data->lost);
2529 if (have_lost)
2530 size += sizeof(lost_event);
2531
2532 perf_output_lock(handle);
2533
2534 do {
2535 offset = head = atomic_long_read(&data->head);
2536 head += size;
2537 if (unlikely(!perf_output_space(data, offset, head)))
2538 goto fail;
2539 } while (atomic_long_cmpxchg(&data->head, offset, head) != offset);
2540
2541 handle->offset = offset;
2542 handle->head = head;
2543
2544 if ((offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT))
2545 atomic_set(&data->wakeup, 1);
2546
2547 if (have_lost) {
2548 lost_event.header.type = PERF_EVENT_LOST;
2549 lost_event.header.misc = 0;
2550 lost_event.header.size = sizeof(lost_event);
2551 lost_event.id = counter->id;
2552 lost_event.lost = atomic_xchg(&data->lost, 0);
2553
2554 perf_output_put(handle, lost_event);
2555 }
2556
2557 return 0;
2558
2559 fail:
2560 atomic_inc(&data->lost);
2561 perf_output_unlock(handle);
2562 out:
2563 rcu_read_unlock();
2564
2565 return -ENOSPC;
2566 }
2567
2568 static void perf_output_end(struct perf_output_handle *handle)
2569 {
2570 struct perf_counter *counter = handle->counter;
2571 struct perf_mmap_data *data = handle->data;
2572
2573 int wakeup_events = counter->attr.wakeup_events;
2574
2575 if (handle->sample && wakeup_events) {
2576 int events = atomic_inc_return(&data->events);
2577 if (events >= wakeup_events) {
2578 atomic_sub(wakeup_events, &data->events);
2579 atomic_set(&data->wakeup, 1);
2580 }
2581 }
2582
2583 perf_output_unlock(handle);
2584 rcu_read_unlock();
2585 }
2586
2587 static u32 perf_counter_pid(struct perf_counter *counter, struct task_struct *p)
2588 {
2589 /*
2590 * only top level counters have the pid namespace they were created in
2591 */
2592 if (counter->parent)
2593 counter = counter->parent;
2594
2595 return task_tgid_nr_ns(p, counter->ns);
2596 }
2597
2598 static u32 perf_counter_tid(struct perf_counter *counter, struct task_struct *p)
2599 {
2600 /*
2601 * only top level counters have the pid namespace they were created in
2602 */
2603 if (counter->parent)
2604 counter = counter->parent;
2605
2606 return task_pid_nr_ns(p, counter->ns);
2607 }
2608
2609 static void perf_counter_output(struct perf_counter *counter, int nmi,
2610 struct perf_sample_data *data)
2611 {
2612 int ret;
2613 u64 sample_type = counter->attr.sample_type;
2614 struct perf_output_handle handle;
2615 struct perf_event_header header;
2616 u64 ip;
2617 struct {
2618 u32 pid, tid;
2619 } tid_entry;
2620 struct {
2621 u64 id;
2622 u64 counter;
2623 } group_entry;
2624 struct perf_callchain_entry *callchain = NULL;
2625 int callchain_size = 0;
2626 u64 time;
2627 struct {
2628 u32 cpu, reserved;
2629 } cpu_entry;
2630
2631 header.type = PERF_EVENT_SAMPLE;
2632 header.size = sizeof(header);
2633
2634 header.misc = 0;
2635 header.misc |= perf_misc_flags(data->regs);
2636
2637 if (sample_type & PERF_SAMPLE_IP) {
2638 ip = perf_instruction_pointer(data->regs);
2639 header.size += sizeof(ip);
2640 }
2641
2642 if (sample_type & PERF_SAMPLE_TID) {
2643 /* namespace issues */
2644 tid_entry.pid = perf_counter_pid(counter, current);
2645 tid_entry.tid = perf_counter_tid(counter, current);
2646
2647 header.size += sizeof(tid_entry);
2648 }
2649
2650 if (sample_type & PERF_SAMPLE_TIME) {
2651 /*
2652 * Maybe do better on x86 and provide cpu_clock_nmi()
2653 */
2654 time = sched_clock();
2655
2656 header.size += sizeof(u64);
2657 }
2658
2659 if (sample_type & PERF_SAMPLE_ADDR)
2660 header.size += sizeof(u64);
2661
2662 if (sample_type & PERF_SAMPLE_ID)
2663 header.size += sizeof(u64);
2664
2665 if (sample_type & PERF_SAMPLE_CPU) {
2666 header.size += sizeof(cpu_entry);
2667
2668 cpu_entry.cpu = raw_smp_processor_id();
2669 }
2670
2671 if (sample_type & PERF_SAMPLE_PERIOD)
2672 header.size += sizeof(u64);
2673
2674 if (sample_type & PERF_SAMPLE_GROUP) {
2675 header.size += sizeof(u64) +
2676 counter->nr_siblings * sizeof(group_entry);
2677 }
2678
2679 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
2680 callchain = perf_callchain(data->regs);
2681
2682 if (callchain) {
2683 callchain_size = (1 + callchain->nr) * sizeof(u64);
2684 header.size += callchain_size;
2685 } else
2686 header.size += sizeof(u64);
2687 }
2688
2689 ret = perf_output_begin(&handle, counter, header.size, nmi, 1);
2690 if (ret)
2691 return;
2692
2693 perf_output_put(&handle, header);
2694
2695 if (sample_type & PERF_SAMPLE_IP)
2696 perf_output_put(&handle, ip);
2697
2698 if (sample_type & PERF_SAMPLE_TID)
2699 perf_output_put(&handle, tid_entry);
2700
2701 if (sample_type & PERF_SAMPLE_TIME)
2702 perf_output_put(&handle, time);
2703
2704 if (sample_type & PERF_SAMPLE_ADDR)
2705 perf_output_put(&handle, data->addr);
2706
2707 if (sample_type & PERF_SAMPLE_ID)
2708 perf_output_put(&handle, counter->id);
2709
2710 if (sample_type & PERF_SAMPLE_CPU)
2711 perf_output_put(&handle, cpu_entry);
2712
2713 if (sample_type & PERF_SAMPLE_PERIOD)
2714 perf_output_put(&handle, data->period);
2715
2716 /*
2717 * XXX PERF_SAMPLE_GROUP vs inherited counters seems difficult.
2718 */
2719 if (sample_type & PERF_SAMPLE_GROUP) {
2720 struct perf_counter *leader, *sub;
2721 u64 nr = counter->nr_siblings;
2722
2723 perf_output_put(&handle, nr);
2724
2725 leader = counter->group_leader;
2726 list_for_each_entry(sub, &leader->sibling_list, list_entry) {
2727 if (sub != counter)
2728 sub->pmu->read(sub);
2729
2730 group_entry.id = sub->id;
2731 group_entry.counter = atomic64_read(&sub->count);
2732
2733 perf_output_put(&handle, group_entry);
2734 }
2735 }
2736
2737 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
2738 if (callchain)
2739 perf_output_copy(&handle, callchain, callchain_size);
2740 else {
2741 u64 nr = 0;
2742 perf_output_put(&handle, nr);
2743 }
2744 }
2745
2746 perf_output_end(&handle);
2747 }
2748
2749 /*
2750 * read event
2751 */
2752
2753 struct perf_read_event {
2754 struct perf_event_header header;
2755
2756 u32 pid;
2757 u32 tid;
2758 u64 value;
2759 u64 format[3];
2760 };
2761
2762 static void
2763 perf_counter_read_event(struct perf_counter *counter,
2764 struct task_struct *task)
2765 {
2766 struct perf_output_handle handle;
2767 struct perf_read_event event = {
2768 .header = {
2769 .type = PERF_EVENT_READ,
2770 .misc = 0,
2771 .size = sizeof(event) - sizeof(event.format),
2772 },
2773 .pid = perf_counter_pid(counter, task),
2774 .tid = perf_counter_tid(counter, task),
2775 .value = atomic64_read(&counter->count),
2776 };
2777 int ret, i = 0;
2778
2779 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
2780 event.header.size += sizeof(u64);
2781 event.format[i++] = counter->total_time_enabled;
2782 }
2783
2784 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
2785 event.header.size += sizeof(u64);
2786 event.format[i++] = counter->total_time_running;
2787 }
2788
2789 if (counter->attr.read_format & PERF_FORMAT_ID) {
2790 u64 id;
2791
2792 event.header.size += sizeof(u64);
2793 if (counter->parent)
2794 id = counter->parent->id;
2795 else
2796 id = counter->id;
2797
2798 event.format[i++] = id;
2799 }
2800
2801 ret = perf_output_begin(&handle, counter, event.header.size, 0, 0);
2802 if (ret)
2803 return;
2804
2805 perf_output_copy(&handle, &event, event.header.size);
2806 perf_output_end(&handle);
2807 }
2808
2809 /*
2810 * fork tracking
2811 */
2812
2813 struct perf_fork_event {
2814 struct task_struct *task;
2815
2816 struct {
2817 struct perf_event_header header;
2818
2819 u32 pid;
2820 u32 ppid;
2821 } event;
2822 };
2823
2824 static void perf_counter_fork_output(struct perf_counter *counter,
2825 struct perf_fork_event *fork_event)
2826 {
2827 struct perf_output_handle handle;
2828 int size = fork_event->event.header.size;
2829 struct task_struct *task = fork_event->task;
2830 int ret = perf_output_begin(&handle, counter, size, 0, 0);
2831
2832 if (ret)
2833 return;
2834
2835 fork_event->event.pid = perf_counter_pid(counter, task);
2836 fork_event->event.ppid = perf_counter_pid(counter, task->real_parent);
2837
2838 perf_output_put(&handle, fork_event->event);
2839 perf_output_end(&handle);
2840 }
2841
2842 static int perf_counter_fork_match(struct perf_counter *counter)
2843 {
2844 if (counter->attr.comm || counter->attr.mmap)
2845 return 1;
2846
2847 return 0;
2848 }
2849
2850 static void perf_counter_fork_ctx(struct perf_counter_context *ctx,
2851 struct perf_fork_event *fork_event)
2852 {
2853 struct perf_counter *counter;
2854
2855 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
2856 return;
2857
2858 rcu_read_lock();
2859 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
2860 if (perf_counter_fork_match(counter))
2861 perf_counter_fork_output(counter, fork_event);
2862 }
2863 rcu_read_unlock();
2864 }
2865
2866 static void perf_counter_fork_event(struct perf_fork_event *fork_event)
2867 {
2868 struct perf_cpu_context *cpuctx;
2869 struct perf_counter_context *ctx;
2870
2871 cpuctx = &get_cpu_var(perf_cpu_context);
2872 perf_counter_fork_ctx(&cpuctx->ctx, fork_event);
2873 put_cpu_var(perf_cpu_context);
2874
2875 rcu_read_lock();
2876 /*
2877 * doesn't really matter which of the child contexts the
2878 * events ends up in.
2879 */
2880 ctx = rcu_dereference(current->perf_counter_ctxp);
2881 if (ctx)
2882 perf_counter_fork_ctx(ctx, fork_event);
2883 rcu_read_unlock();
2884 }
2885
2886 void perf_counter_fork(struct task_struct *task)
2887 {
2888 struct perf_fork_event fork_event;
2889
2890 if (!atomic_read(&nr_comm_counters) &&
2891 !atomic_read(&nr_mmap_counters))
2892 return;
2893
2894 fork_event = (struct perf_fork_event){
2895 .task = task,
2896 .event = {
2897 .header = {
2898 .type = PERF_EVENT_FORK,
2899 .size = sizeof(fork_event.event),
2900 },
2901 },
2902 };
2903
2904 perf_counter_fork_event(&fork_event);
2905 }
2906
2907 /*
2908 * comm tracking
2909 */
2910
2911 struct perf_comm_event {
2912 struct task_struct *task;
2913 char *comm;
2914 int comm_size;
2915
2916 struct {
2917 struct perf_event_header header;
2918
2919 u32 pid;
2920 u32 tid;
2921 } event;
2922 };
2923
2924 static void perf_counter_comm_output(struct perf_counter *counter,
2925 struct perf_comm_event *comm_event)
2926 {
2927 struct perf_output_handle handle;
2928 int size = comm_event->event.header.size;
2929 int ret = perf_output_begin(&handle, counter, size, 0, 0);
2930
2931 if (ret)
2932 return;
2933
2934 comm_event->event.pid = perf_counter_pid(counter, comm_event->task);
2935 comm_event->event.tid = perf_counter_tid(counter, comm_event->task);
2936
2937 perf_output_put(&handle, comm_event->event);
2938 perf_output_copy(&handle, comm_event->comm,
2939 comm_event->comm_size);
2940 perf_output_end(&handle);
2941 }
2942
2943 static int perf_counter_comm_match(struct perf_counter *counter)
2944 {
2945 if (counter->attr.comm)
2946 return 1;
2947
2948 return 0;
2949 }
2950
2951 static void perf_counter_comm_ctx(struct perf_counter_context *ctx,
2952 struct perf_comm_event *comm_event)
2953 {
2954 struct perf_counter *counter;
2955
2956 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
2957 return;
2958
2959 rcu_read_lock();
2960 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
2961 if (perf_counter_comm_match(counter))
2962 perf_counter_comm_output(counter, comm_event);
2963 }
2964 rcu_read_unlock();
2965 }
2966
2967 static void perf_counter_comm_event(struct perf_comm_event *comm_event)
2968 {
2969 struct perf_cpu_context *cpuctx;
2970 struct perf_counter_context *ctx;
2971 unsigned int size;
2972 char comm[TASK_COMM_LEN];
2973
2974 memset(comm, 0, sizeof(comm));
2975 strncpy(comm, comm_event->task->comm, sizeof(comm));
2976 size = ALIGN(strlen(comm)+1, sizeof(u64));
2977
2978 comm_event->comm = comm;
2979 comm_event->comm_size = size;
2980
2981 comm_event->event.header.size = sizeof(comm_event->event) + size;
2982
2983 cpuctx = &get_cpu_var(perf_cpu_context);
2984 perf_counter_comm_ctx(&cpuctx->ctx, comm_event);
2985 put_cpu_var(perf_cpu_context);
2986
2987 rcu_read_lock();
2988 /*
2989 * doesn't really matter which of the child contexts the
2990 * events ends up in.
2991 */
2992 ctx = rcu_dereference(current->perf_counter_ctxp);
2993 if (ctx)
2994 perf_counter_comm_ctx(ctx, comm_event);
2995 rcu_read_unlock();
2996 }
2997
2998 void perf_counter_comm(struct task_struct *task)
2999 {
3000 struct perf_comm_event comm_event;
3001
3002 if (task->perf_counter_ctxp)
3003 perf_counter_enable_on_exec(task);
3004
3005 if (!atomic_read(&nr_comm_counters))
3006 return;
3007
3008 comm_event = (struct perf_comm_event){
3009 .task = task,
3010 .event = {
3011 .header = { .type = PERF_EVENT_COMM, },
3012 },
3013 };
3014
3015 perf_counter_comm_event(&comm_event);
3016 }
3017
3018 /*
3019 * mmap tracking
3020 */
3021
3022 struct perf_mmap_event {
3023 struct vm_area_struct *vma;
3024
3025 const char *file_name;
3026 int file_size;
3027
3028 struct {
3029 struct perf_event_header header;
3030
3031 u32 pid;
3032 u32 tid;
3033 u64 start;
3034 u64 len;
3035 u64 pgoff;
3036 } event;
3037 };
3038
3039 static void perf_counter_mmap_output(struct perf_counter *counter,
3040 struct perf_mmap_event *mmap_event)
3041 {
3042 struct perf_output_handle handle;
3043 int size = mmap_event->event.header.size;
3044 int ret = perf_output_begin(&handle, counter, size, 0, 0);
3045
3046 if (ret)
3047 return;
3048
3049 mmap_event->event.pid = perf_counter_pid(counter, current);
3050 mmap_event->event.tid = perf_counter_tid(counter, current);
3051
3052 perf_output_put(&handle, mmap_event->event);
3053 perf_output_copy(&handle, mmap_event->file_name,
3054 mmap_event->file_size);
3055 perf_output_end(&handle);
3056 }
3057
3058 static int perf_counter_mmap_match(struct perf_counter *counter,
3059 struct perf_mmap_event *mmap_event)
3060 {
3061 if (counter->attr.mmap)
3062 return 1;
3063
3064 return 0;
3065 }
3066
3067 static void perf_counter_mmap_ctx(struct perf_counter_context *ctx,
3068 struct perf_mmap_event *mmap_event)
3069 {
3070 struct perf_counter *counter;
3071
3072 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
3073 return;
3074
3075 rcu_read_lock();
3076 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
3077 if (perf_counter_mmap_match(counter, mmap_event))
3078 perf_counter_mmap_output(counter, mmap_event);
3079 }
3080 rcu_read_unlock();
3081 }
3082
3083 static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event)
3084 {
3085 struct perf_cpu_context *cpuctx;
3086 struct perf_counter_context *ctx;
3087 struct vm_area_struct *vma = mmap_event->vma;
3088 struct file *file = vma->vm_file;
3089 unsigned int size;
3090 char tmp[16];
3091 char *buf = NULL;
3092 const char *name;
3093
3094 memset(tmp, 0, sizeof(tmp));
3095
3096 if (file) {
3097 /*
3098 * d_path works from the end of the buffer backwards, so we
3099 * need to add enough zero bytes after the string to handle
3100 * the 64bit alignment we do later.
3101 */
3102 buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
3103 if (!buf) {
3104 name = strncpy(tmp, "//enomem", sizeof(tmp));
3105 goto got_name;
3106 }
3107 name = d_path(&file->f_path, buf, PATH_MAX);
3108 if (IS_ERR(name)) {
3109 name = strncpy(tmp, "//toolong", sizeof(tmp));
3110 goto got_name;
3111 }
3112 } else {
3113 if (arch_vma_name(mmap_event->vma)) {
3114 name = strncpy(tmp, arch_vma_name(mmap_event->vma),
3115 sizeof(tmp));
3116 goto got_name;
3117 }
3118
3119 if (!vma->vm_mm) {
3120 name = strncpy(tmp, "[vdso]", sizeof(tmp));
3121 goto got_name;
3122 }
3123
3124 name = strncpy(tmp, "//anon", sizeof(tmp));
3125 goto got_name;
3126 }
3127
3128 got_name:
3129 size = ALIGN(strlen(name)+1, sizeof(u64));
3130
3131 mmap_event->file_name = name;
3132 mmap_event->file_size = size;
3133
3134 mmap_event->event.header.size = sizeof(mmap_event->event) + size;
3135
3136 cpuctx = &get_cpu_var(perf_cpu_context);
3137 perf_counter_mmap_ctx(&cpuctx->ctx, mmap_event);
3138 put_cpu_var(perf_cpu_context);
3139
3140 rcu_read_lock();
3141 /*
3142 * doesn't really matter which of the child contexts the
3143 * events ends up in.
3144 */
3145 ctx = rcu_dereference(current->perf_counter_ctxp);
3146 if (ctx)
3147 perf_counter_mmap_ctx(ctx, mmap_event);
3148 rcu_read_unlock();
3149
3150 kfree(buf);
3151 }
3152
3153 void __perf_counter_mmap(struct vm_area_struct *vma)
3154 {
3155 struct perf_mmap_event mmap_event;
3156
3157 if (!atomic_read(&nr_mmap_counters))
3158 return;
3159
3160 mmap_event = (struct perf_mmap_event){
3161 .vma = vma,
3162 .event = {
3163 .header = { .type = PERF_EVENT_MMAP, },
3164 .start = vma->vm_start,
3165 .len = vma->vm_end - vma->vm_start,
3166 .pgoff = vma->vm_pgoff,
3167 },
3168 };
3169
3170 perf_counter_mmap_event(&mmap_event);
3171 }
3172
3173 /*
3174 * Log sample_period changes so that analyzing tools can re-normalize the
3175 * event flow.
3176 */
3177
3178 struct freq_event {
3179 struct perf_event_header header;
3180 u64 time;
3181 u64 id;
3182 u64 period;
3183 };
3184
3185 static void perf_log_period(struct perf_counter *counter, u64 period)
3186 {
3187 struct perf_output_handle handle;
3188 struct freq_event event;
3189 int ret;
3190
3191 if (counter->hw.sample_period == period)
3192 return;
3193
3194 if (counter->attr.sample_type & PERF_SAMPLE_PERIOD)
3195 return;
3196
3197 event = (struct freq_event) {
3198 .header = {
3199 .type = PERF_EVENT_PERIOD,
3200 .misc = 0,
3201 .size = sizeof(event),
3202 },
3203 .time = sched_clock(),
3204 .id = counter->id,
3205 .period = period,
3206 };
3207
3208 ret = perf_output_begin(&handle, counter, sizeof(event), 1, 0);
3209 if (ret)
3210 return;
3211
3212 perf_output_put(&handle, event);
3213 perf_output_end(&handle);
3214 }
3215
3216 /*
3217 * IRQ throttle logging
3218 */
3219
3220 static void perf_log_throttle(struct perf_counter *counter, int enable)
3221 {
3222 struct perf_output_handle handle;
3223 int ret;
3224
3225 struct {
3226 struct perf_event_header header;
3227 u64 time;
3228 u64 id;
3229 } throttle_event = {
3230 .header = {
3231 .type = PERF_EVENT_THROTTLE + 1,
3232 .misc = 0,
3233 .size = sizeof(throttle_event),
3234 },
3235 .time = sched_clock(),
3236 .id = counter->id,
3237 };
3238
3239 ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 1, 0);
3240 if (ret)
3241 return;
3242
3243 perf_output_put(&handle, throttle_event);
3244 perf_output_end(&handle);
3245 }
3246
3247 /*
3248 * Generic counter overflow handling, sampling.
3249 */
3250
3251 int perf_counter_overflow(struct perf_counter *counter, int nmi,
3252 struct perf_sample_data *data)
3253 {
3254 int events = atomic_read(&counter->event_limit);
3255 int throttle = counter->pmu->unthrottle != NULL;
3256 struct hw_perf_counter *hwc = &counter->hw;
3257 int ret = 0;
3258
3259 if (!throttle) {
3260 hwc->interrupts++;
3261 } else {
3262 if (hwc->interrupts != MAX_INTERRUPTS) {
3263 hwc->interrupts++;
3264 if (HZ * hwc->interrupts >
3265 (u64)sysctl_perf_counter_sample_rate) {
3266 hwc->interrupts = MAX_INTERRUPTS;
3267 perf_log_throttle(counter, 0);
3268 ret = 1;
3269 }
3270 } else {
3271 /*
3272 * Keep re-disabling counters even though on the previous
3273 * pass we disabled it - just in case we raced with a
3274 * sched-in and the counter got enabled again:
3275 */
3276 ret = 1;
3277 }
3278 }
3279
3280 if (counter->attr.freq) {
3281 u64 now = sched_clock();
3282 s64 delta = now - hwc->freq_stamp;
3283
3284 hwc->freq_stamp = now;
3285
3286 if (delta > 0 && delta < TICK_NSEC)
3287 perf_adjust_period(counter, NSEC_PER_SEC / (int)delta);
3288 }
3289
3290 /*
3291 * XXX event_limit might not quite work as expected on inherited
3292 * counters
3293 */
3294
3295 counter->pending_kill = POLL_IN;
3296 if (events && atomic_dec_and_test(&counter->event_limit)) {
3297 ret = 1;
3298 counter->pending_kill = POLL_HUP;
3299 if (nmi) {
3300 counter->pending_disable = 1;
3301 perf_pending_queue(&counter->pending,
3302 perf_pending_counter);
3303 } else
3304 perf_counter_disable(counter);
3305 }
3306
3307 perf_counter_output(counter, nmi, data);
3308 return ret;
3309 }
3310
3311 /*
3312 * Generic software counter infrastructure
3313 */
3314
3315 static void perf_swcounter_update(struct perf_counter *counter)
3316 {
3317 struct hw_perf_counter *hwc = &counter->hw;
3318 u64 prev, now;
3319 s64 delta;
3320
3321 again:
3322 prev = atomic64_read(&hwc->prev_count);
3323 now = atomic64_read(&hwc->count);
3324 if (atomic64_cmpxchg(&hwc->prev_count, prev, now) != prev)
3325 goto again;
3326
3327 delta = now - prev;
3328
3329 atomic64_add(delta, &counter->count);
3330 atomic64_sub(delta, &hwc->period_left);
3331 }
3332
3333 static void perf_swcounter_set_period(struct perf_counter *counter)
3334 {
3335 struct hw_perf_counter *hwc = &counter->hw;
3336 s64 left = atomic64_read(&hwc->period_left);
3337 s64 period = hwc->sample_period;
3338
3339 if (unlikely(left <= -period)) {
3340 left = period;
3341 atomic64_set(&hwc->period_left, left);
3342 hwc->last_period = period;
3343 }
3344
3345 if (unlikely(left <= 0)) {
3346 left += period;
3347 atomic64_add(period, &hwc->period_left);
3348 hwc->last_period = period;
3349 }
3350
3351 atomic64_set(&hwc->prev_count, -left);
3352 atomic64_set(&hwc->count, -left);
3353 }
3354
3355 static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
3356 {
3357 enum hrtimer_restart ret = HRTIMER_RESTART;
3358 struct perf_sample_data data;
3359 struct perf_counter *counter;
3360 u64 period;
3361
3362 counter = container_of(hrtimer, struct perf_counter, hw.hrtimer);
3363 counter->pmu->read(counter);
3364
3365 data.addr = 0;
3366 data.regs = get_irq_regs();
3367 /*
3368 * In case we exclude kernel IPs or are somehow not in interrupt
3369 * context, provide the next best thing, the user IP.
3370 */
3371 if ((counter->attr.exclude_kernel || !data.regs) &&
3372 !counter->attr.exclude_user)
3373 data.regs = task_pt_regs(current);
3374
3375 if (data.regs) {
3376 if (perf_counter_overflow(counter, 0, &data))
3377 ret = HRTIMER_NORESTART;
3378 }
3379
3380 period = max_t(u64, 10000, counter->hw.sample_period);
3381 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
3382
3383 return ret;
3384 }
3385
3386 static void perf_swcounter_overflow(struct perf_counter *counter,
3387 int nmi, struct perf_sample_data *data)
3388 {
3389 data->period = counter->hw.last_period;
3390
3391 perf_swcounter_update(counter);
3392 perf_swcounter_set_period(counter);
3393 if (perf_counter_overflow(counter, nmi, data))
3394 /* soft-disable the counter */
3395 ;
3396 }
3397
3398 static int perf_swcounter_is_counting(struct perf_counter *counter)
3399 {
3400 struct perf_counter_context *ctx;
3401 unsigned long flags;
3402 int count;
3403
3404 if (counter->state == PERF_COUNTER_STATE_ACTIVE)
3405 return 1;
3406
3407 if (counter->state != PERF_COUNTER_STATE_INACTIVE)
3408 return 0;
3409
3410 /*
3411 * If the counter is inactive, it could be just because
3412 * its task is scheduled out, or because it's in a group
3413 * which could not go on the PMU. We want to count in
3414 * the first case but not the second. If the context is
3415 * currently active then an inactive software counter must
3416 * be the second case. If it's not currently active then
3417 * we need to know whether the counter was active when the
3418 * context was last active, which we can determine by
3419 * comparing counter->tstamp_stopped with ctx->time.
3420 *
3421 * We are within an RCU read-side critical section,
3422 * which protects the existence of *ctx.
3423 */
3424 ctx = counter->ctx;
3425 spin_lock_irqsave(&ctx->lock, flags);
3426 count = 1;
3427 /* Re-check state now we have the lock */
3428 if (counter->state < PERF_COUNTER_STATE_INACTIVE ||
3429 counter->ctx->is_active ||
3430 counter->tstamp_stopped < ctx->time)
3431 count = 0;
3432 spin_unlock_irqrestore(&ctx->lock, flags);
3433 return count;
3434 }
3435
3436 static int perf_swcounter_match(struct perf_counter *counter,
3437 enum perf_type_id type,
3438 u32 event, struct pt_regs *regs)
3439 {
3440 if (!perf_swcounter_is_counting(counter))
3441 return 0;
3442
3443 if (counter->attr.type != type)
3444 return 0;
3445 if (counter->attr.config != event)
3446 return 0;
3447
3448 if (regs) {
3449 if (counter->attr.exclude_user && user_mode(regs))
3450 return 0;
3451
3452 if (counter->attr.exclude_kernel && !user_mode(regs))
3453 return 0;
3454 }
3455
3456 return 1;
3457 }
3458
3459 static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
3460 int nmi, struct perf_sample_data *data)
3461 {
3462 int neg = atomic64_add_negative(nr, &counter->hw.count);
3463
3464 if (counter->hw.sample_period && !neg && data->regs)
3465 perf_swcounter_overflow(counter, nmi, data);
3466 }
3467
3468 static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
3469 enum perf_type_id type,
3470 u32 event, u64 nr, int nmi,
3471 struct perf_sample_data *data)
3472 {
3473 struct perf_counter *counter;
3474
3475 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
3476 return;
3477
3478 rcu_read_lock();
3479 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
3480 if (perf_swcounter_match(counter, type, event, data->regs))
3481 perf_swcounter_add(counter, nr, nmi, data);
3482 }
3483 rcu_read_unlock();
3484 }
3485
3486 static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx)
3487 {
3488 if (in_nmi())
3489 return &cpuctx->recursion[3];
3490
3491 if (in_irq())
3492 return &cpuctx->recursion[2];
3493
3494 if (in_softirq())
3495 return &cpuctx->recursion[1];
3496
3497 return &cpuctx->recursion[0];
3498 }
3499
3500 static void do_perf_swcounter_event(enum perf_type_id type, u32 event,
3501 u64 nr, int nmi,
3502 struct perf_sample_data *data)
3503 {
3504 struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
3505 int *recursion = perf_swcounter_recursion_context(cpuctx);
3506 struct perf_counter_context *ctx;
3507
3508 if (*recursion)
3509 goto out;
3510
3511 (*recursion)++;
3512 barrier();
3513
3514 perf_swcounter_ctx_event(&cpuctx->ctx, type, event,
3515 nr, nmi, data);
3516 rcu_read_lock();
3517 /*
3518 * doesn't really matter which of the child contexts the
3519 * events ends up in.
3520 */
3521 ctx = rcu_dereference(current->perf_counter_ctxp);
3522 if (ctx)
3523 perf_swcounter_ctx_event(ctx, type, event, nr, nmi, data);
3524 rcu_read_unlock();
3525
3526 barrier();
3527 (*recursion)--;
3528
3529 out:
3530 put_cpu_var(perf_cpu_context);
3531 }
3532
3533 void __perf_swcounter_event(u32 event, u64 nr, int nmi,
3534 struct pt_regs *regs, u64 addr)
3535 {
3536 struct perf_sample_data data = {
3537 .regs = regs,
3538 .addr = addr,
3539 };
3540
3541 do_perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, &data);
3542 }
3543
3544 static void perf_swcounter_read(struct perf_counter *counter)
3545 {
3546 perf_swcounter_update(counter);
3547 }
3548
3549 static int perf_swcounter_enable(struct perf_counter *counter)
3550 {
3551 perf_swcounter_set_period(counter);
3552 return 0;
3553 }
3554
3555 static void perf_swcounter_disable(struct perf_counter *counter)
3556 {
3557 perf_swcounter_update(counter);
3558 }
3559
3560 static const struct pmu perf_ops_generic = {
3561 .enable = perf_swcounter_enable,
3562 .disable = perf_swcounter_disable,
3563 .read = perf_swcounter_read,
3564 };
3565
3566 /*
3567 * Software counter: cpu wall time clock
3568 */
3569
3570 static void cpu_clock_perf_counter_update(struct perf_counter *counter)
3571 {
3572 int cpu = raw_smp_processor_id();
3573 s64 prev;
3574 u64 now;
3575
3576 now = cpu_clock(cpu);
3577 prev = atomic64_read(&counter->hw.prev_count);
3578 atomic64_set(&counter->hw.prev_count, now);
3579 atomic64_add(now - prev, &counter->count);
3580 }
3581
3582 static int cpu_clock_perf_counter_enable(struct perf_counter *counter)
3583 {
3584 struct hw_perf_counter *hwc = &counter->hw;
3585 int cpu = raw_smp_processor_id();
3586
3587 atomic64_set(&hwc->prev_count, cpu_clock(cpu));
3588 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3589 hwc->hrtimer.function = perf_swcounter_hrtimer;
3590 if (hwc->sample_period) {
3591 u64 period = max_t(u64, 10000, hwc->sample_period);
3592 __hrtimer_start_range_ns(&hwc->hrtimer,
3593 ns_to_ktime(period), 0,
3594 HRTIMER_MODE_REL, 0);
3595 }
3596
3597 return 0;
3598 }
3599
3600 static void cpu_clock_perf_counter_disable(struct perf_counter *counter)
3601 {
3602 if (counter->hw.sample_period)
3603 hrtimer_cancel(&counter->hw.hrtimer);
3604 cpu_clock_perf_counter_update(counter);
3605 }
3606
3607 static void cpu_clock_perf_counter_read(struct perf_counter *counter)
3608 {
3609 cpu_clock_perf_counter_update(counter);
3610 }
3611
3612 static const struct pmu perf_ops_cpu_clock = {
3613 .enable = cpu_clock_perf_counter_enable,
3614 .disable = cpu_clock_perf_counter_disable,
3615 .read = cpu_clock_perf_counter_read,
3616 };
3617
3618 /*
3619 * Software counter: task time clock
3620 */
3621
3622 static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now)
3623 {
3624 u64 prev;
3625 s64 delta;
3626
3627 prev = atomic64_xchg(&counter->hw.prev_count, now);
3628 delta = now - prev;
3629 atomic64_add(delta, &counter->count);
3630 }
3631
3632 static int task_clock_perf_counter_enable(struct perf_counter *counter)
3633 {
3634 struct hw_perf_counter *hwc = &counter->hw;
3635 u64 now;
3636
3637 now = counter->ctx->time;
3638
3639 atomic64_set(&hwc->prev_count, now);
3640 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3641 hwc->hrtimer.function = perf_swcounter_hrtimer;
3642 if (hwc->sample_period) {
3643 u64 period = max_t(u64, 10000, hwc->sample_period);
3644 __hrtimer_start_range_ns(&hwc->hrtimer,
3645 ns_to_ktime(period), 0,
3646 HRTIMER_MODE_REL, 0);
3647 }
3648
3649 return 0;
3650 }
3651
3652 static void task_clock_perf_counter_disable(struct perf_counter *counter)
3653 {
3654 if (counter->hw.sample_period)
3655 hrtimer_cancel(&counter->hw.hrtimer);
3656 task_clock_perf_counter_update(counter, counter->ctx->time);
3657
3658 }
3659
3660 static void task_clock_perf_counter_read(struct perf_counter *counter)
3661 {
3662 u64 time;
3663
3664 if (!in_nmi()) {
3665 update_context_time(counter->ctx);
3666 time = counter->ctx->time;
3667 } else {
3668 u64 now = perf_clock();
3669 u64 delta = now - counter->ctx->timestamp;
3670 time = counter->ctx->time + delta;
3671 }
3672
3673 task_clock_perf_counter_update(counter, time);
3674 }
3675
3676 static const struct pmu perf_ops_task_clock = {
3677 .enable = task_clock_perf_counter_enable,
3678 .disable = task_clock_perf_counter_disable,
3679 .read = task_clock_perf_counter_read,
3680 };
3681
3682 #ifdef CONFIG_EVENT_PROFILE
3683 void perf_tpcounter_event(int event_id)
3684 {
3685 struct perf_sample_data data = {
3686 .regs = get_irq_regs(),
3687 .addr = 0,
3688 };
3689
3690 if (!data.regs)
3691 data.regs = task_pt_regs(current);
3692
3693 do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, &data);
3694 }
3695 EXPORT_SYMBOL_GPL(perf_tpcounter_event);
3696
3697 extern int ftrace_profile_enable(int);
3698 extern void ftrace_profile_disable(int);
3699
3700 static void tp_perf_counter_destroy(struct perf_counter *counter)
3701 {
3702 ftrace_profile_disable(counter->attr.config);
3703 }
3704
3705 static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
3706 {
3707 if (ftrace_profile_enable(counter->attr.config))
3708 return NULL;
3709
3710 counter->destroy = tp_perf_counter_destroy;
3711
3712 return &perf_ops_generic;
3713 }
3714 #else
3715 static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
3716 {
3717 return NULL;
3718 }
3719 #endif
3720
3721 atomic_t perf_swcounter_enabled[PERF_COUNT_SW_MAX];
3722
3723 static void sw_perf_counter_destroy(struct perf_counter *counter)
3724 {
3725 u64 event = counter->attr.config;
3726
3727 WARN_ON(counter->parent);
3728
3729 atomic_dec(&perf_swcounter_enabled[event]);
3730 }
3731
3732 static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
3733 {
3734 const struct pmu *pmu = NULL;
3735 u64 event = counter->attr.config;
3736
3737 /*
3738 * Software counters (currently) can't in general distinguish
3739 * between user, kernel and hypervisor events.
3740 * However, context switches and cpu migrations are considered
3741 * to be kernel events, and page faults are never hypervisor
3742 * events.
3743 */
3744 switch (event) {
3745 case PERF_COUNT_SW_CPU_CLOCK:
3746 pmu = &perf_ops_cpu_clock;
3747
3748 break;
3749 case PERF_COUNT_SW_TASK_CLOCK:
3750 /*
3751 * If the user instantiates this as a per-cpu counter,
3752 * use the cpu_clock counter instead.
3753 */
3754 if (counter->ctx->task)
3755 pmu = &perf_ops_task_clock;
3756 else
3757 pmu = &perf_ops_cpu_clock;
3758
3759 break;
3760 case PERF_COUNT_SW_PAGE_FAULTS:
3761 case PERF_COUNT_SW_PAGE_FAULTS_MIN:
3762 case PERF_COUNT_SW_PAGE_FAULTS_MAJ:
3763 case PERF_COUNT_SW_CONTEXT_SWITCHES:
3764 case PERF_COUNT_SW_CPU_MIGRATIONS:
3765 if (!counter->parent) {
3766 atomic_inc(&perf_swcounter_enabled[event]);
3767 counter->destroy = sw_perf_counter_destroy;
3768 }
3769 pmu = &perf_ops_generic;
3770 break;
3771 }
3772
3773 return pmu;
3774 }
3775
3776 /*
3777 * Allocate and initialize a counter structure
3778 */
3779 static struct perf_counter *
3780 perf_counter_alloc(struct perf_counter_attr *attr,
3781 int cpu,
3782 struct perf_counter_context *ctx,
3783 struct perf_counter *group_leader,
3784 struct perf_counter *parent_counter,
3785 gfp_t gfpflags)
3786 {
3787 const struct pmu *pmu;
3788 struct perf_counter *counter;
3789 struct hw_perf_counter *hwc;
3790 long err;
3791
3792 counter = kzalloc(sizeof(*counter), gfpflags);
3793 if (!counter)
3794 return ERR_PTR(-ENOMEM);
3795
3796 /*
3797 * Single counters are their own group leaders, with an
3798 * empty sibling list:
3799 */
3800 if (!group_leader)
3801 group_leader = counter;
3802
3803 mutex_init(&counter->child_mutex);
3804 INIT_LIST_HEAD(&counter->child_list);
3805
3806 INIT_LIST_HEAD(&counter->list_entry);
3807 INIT_LIST_HEAD(&counter->event_entry);
3808 INIT_LIST_HEAD(&counter->sibling_list);
3809 init_waitqueue_head(&counter->waitq);
3810
3811 mutex_init(&counter->mmap_mutex);
3812
3813 counter->cpu = cpu;
3814 counter->attr = *attr;
3815 counter->group_leader = group_leader;
3816 counter->pmu = NULL;
3817 counter->ctx = ctx;
3818 counter->oncpu = -1;
3819
3820 counter->parent = parent_counter;
3821
3822 counter->ns = get_pid_ns(current->nsproxy->pid_ns);
3823 counter->id = atomic64_inc_return(&perf_counter_id);
3824
3825 counter->state = PERF_COUNTER_STATE_INACTIVE;
3826
3827 if (attr->disabled)
3828 counter->state = PERF_COUNTER_STATE_OFF;
3829
3830 pmu = NULL;
3831
3832 hwc = &counter->hw;
3833 hwc->sample_period = attr->sample_period;
3834 if (attr->freq && attr->sample_freq)
3835 hwc->sample_period = 1;
3836
3837 atomic64_set(&hwc->period_left, hwc->sample_period);
3838
3839 /*
3840 * we currently do not support PERF_SAMPLE_GROUP on inherited counters
3841 */
3842 if (attr->inherit && (attr->sample_type & PERF_SAMPLE_GROUP))
3843 goto done;
3844
3845 switch (attr->type) {
3846 case PERF_TYPE_RAW:
3847 case PERF_TYPE_HARDWARE:
3848 case PERF_TYPE_HW_CACHE:
3849 pmu = hw_perf_counter_init(counter);
3850 break;
3851
3852 case PERF_TYPE_SOFTWARE:
3853 pmu = sw_perf_counter_init(counter);
3854 break;
3855
3856 case PERF_TYPE_TRACEPOINT:
3857 pmu = tp_perf_counter_init(counter);
3858 break;
3859
3860 default:
3861 break;
3862 }
3863 done:
3864 err = 0;
3865 if (!pmu)
3866 err = -EINVAL;
3867 else if (IS_ERR(pmu))
3868 err = PTR_ERR(pmu);
3869
3870 if (err) {
3871 if (counter->ns)
3872 put_pid_ns(counter->ns);
3873 kfree(counter);
3874 return ERR_PTR(err);
3875 }
3876
3877 counter->pmu = pmu;
3878
3879 if (!counter->parent) {
3880 atomic_inc(&nr_counters);
3881 if (counter->attr.mmap)
3882 atomic_inc(&nr_mmap_counters);
3883 if (counter->attr.comm)
3884 atomic_inc(&nr_comm_counters);
3885 }
3886
3887 return counter;
3888 }
3889
3890 static int perf_copy_attr(struct perf_counter_attr __user *uattr,
3891 struct perf_counter_attr *attr)
3892 {
3893 int ret;
3894 u32 size;
3895
3896 if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
3897 return -EFAULT;
3898
3899 /*
3900 * zero the full structure, so that a short copy will be nice.
3901 */
3902 memset(attr, 0, sizeof(*attr));
3903
3904 ret = get_user(size, &uattr->size);
3905 if (ret)
3906 return ret;
3907
3908 if (size > PAGE_SIZE) /* silly large */
3909 goto err_size;
3910
3911 if (!size) /* abi compat */
3912 size = PERF_ATTR_SIZE_VER0;
3913
3914 if (size < PERF_ATTR_SIZE_VER0)
3915 goto err_size;
3916
3917 /*
3918 * If we're handed a bigger struct than we know of,
3919 * ensure all the unknown bits are 0.
3920 */
3921 if (size > sizeof(*attr)) {
3922 unsigned long val;
3923 unsigned long __user *addr;
3924 unsigned long __user *end;
3925
3926 addr = PTR_ALIGN((void __user *)uattr + sizeof(*attr),
3927 sizeof(unsigned long));
3928 end = PTR_ALIGN((void __user *)uattr + size,
3929 sizeof(unsigned long));
3930
3931 for (; addr < end; addr += sizeof(unsigned long)) {
3932 ret = get_user(val, addr);
3933 if (ret)
3934 return ret;
3935 if (val)
3936 goto err_size;
3937 }
3938 }
3939
3940 ret = copy_from_user(attr, uattr, size);
3941 if (ret)
3942 return -EFAULT;
3943
3944 /*
3945 * If the type exists, the corresponding creation will verify
3946 * the attr->config.
3947 */
3948 if (attr->type >= PERF_TYPE_MAX)
3949 return -EINVAL;
3950
3951 if (attr->__reserved_1 || attr->__reserved_2 || attr->__reserved_3)
3952 return -EINVAL;
3953
3954 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
3955 return -EINVAL;
3956
3957 if (attr->read_format & ~(PERF_FORMAT_MAX-1))
3958 return -EINVAL;
3959
3960 out:
3961 return ret;
3962
3963 err_size:
3964 put_user(sizeof(*attr), &uattr->size);
3965 ret = -E2BIG;
3966 goto out;
3967 }
3968
3969 /**
3970 * sys_perf_counter_open - open a performance counter, associate it to a task/cpu
3971 *
3972 * @attr_uptr: event type attributes for monitoring/sampling
3973 * @pid: target pid
3974 * @cpu: target cpu
3975 * @group_fd: group leader counter fd
3976 */
3977 SYSCALL_DEFINE5(perf_counter_open,
3978 struct perf_counter_attr __user *, attr_uptr,
3979 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
3980 {
3981 struct perf_counter *counter, *group_leader;
3982 struct perf_counter_attr attr;
3983 struct perf_counter_context *ctx;
3984 struct file *counter_file = NULL;
3985 struct file *group_file = NULL;
3986 int fput_needed = 0;
3987 int fput_needed2 = 0;
3988 int ret;
3989
3990 /* for future expandability... */
3991 if (flags)
3992 return -EINVAL;
3993
3994 ret = perf_copy_attr(attr_uptr, &attr);
3995 if (ret)
3996 return ret;
3997
3998 if (!attr.exclude_kernel) {
3999 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
4000 return -EACCES;
4001 }
4002
4003 if (attr.freq) {
4004 if (attr.sample_freq > sysctl_perf_counter_sample_rate)
4005 return -EINVAL;
4006 }
4007
4008 /*
4009 * Get the target context (task or percpu):
4010 */
4011 ctx = find_get_context(pid, cpu);
4012 if (IS_ERR(ctx))
4013 return PTR_ERR(ctx);
4014
4015 /*
4016 * Look up the group leader (we will attach this counter to it):
4017 */
4018 group_leader = NULL;
4019 if (group_fd != -1) {
4020 ret = -EINVAL;
4021 group_file = fget_light(group_fd, &fput_needed);
4022 if (!group_file)
4023 goto err_put_context;
4024 if (group_file->f_op != &perf_fops)
4025 goto err_put_context;
4026
4027 group_leader = group_file->private_data;
4028 /*
4029 * Do not allow a recursive hierarchy (this new sibling
4030 * becoming part of another group-sibling):
4031 */
4032 if (group_leader->group_leader != group_leader)
4033 goto err_put_context;
4034 /*
4035 * Do not allow to attach to a group in a different
4036 * task or CPU context:
4037 */
4038 if (group_leader->ctx != ctx)
4039 goto err_put_context;
4040 /*
4041 * Only a group leader can be exclusive or pinned
4042 */
4043 if (attr.exclusive || attr.pinned)
4044 goto err_put_context;
4045 }
4046
4047 counter = perf_counter_alloc(&attr, cpu, ctx, group_leader,
4048 NULL, GFP_KERNEL);
4049 ret = PTR_ERR(counter);
4050 if (IS_ERR(counter))
4051 goto err_put_context;
4052
4053 ret = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0);
4054 if (ret < 0)
4055 goto err_free_put_context;
4056
4057 counter_file = fget_light(ret, &fput_needed2);
4058 if (!counter_file)
4059 goto err_free_put_context;
4060
4061 counter->filp = counter_file;
4062 WARN_ON_ONCE(ctx->parent_ctx);
4063 mutex_lock(&ctx->mutex);
4064 perf_install_in_context(ctx, counter, cpu);
4065 ++ctx->generation;
4066 mutex_unlock(&ctx->mutex);
4067
4068 counter->owner = current;
4069 get_task_struct(current);
4070 mutex_lock(&current->perf_counter_mutex);
4071 list_add_tail(&counter->owner_entry, &current->perf_counter_list);
4072 mutex_unlock(&current->perf_counter_mutex);
4073
4074 fput_light(counter_file, fput_needed2);
4075
4076 out_fput:
4077 fput_light(group_file, fput_needed);
4078
4079 return ret;
4080
4081 err_free_put_context:
4082 kfree(counter);
4083
4084 err_put_context:
4085 put_ctx(ctx);
4086
4087 goto out_fput;
4088 }
4089
4090 /*
4091 * inherit a counter from parent task to child task:
4092 */
4093 static struct perf_counter *
4094 inherit_counter(struct perf_counter *parent_counter,
4095 struct task_struct *parent,
4096 struct perf_counter_context *parent_ctx,
4097 struct task_struct *child,
4098 struct perf_counter *group_leader,
4099 struct perf_counter_context *child_ctx)
4100 {
4101 struct perf_counter *child_counter;
4102
4103 /*
4104 * Instead of creating recursive hierarchies of counters,
4105 * we link inherited counters back to the original parent,
4106 * which has a filp for sure, which we use as the reference
4107 * count:
4108 */
4109 if (parent_counter->parent)
4110 parent_counter = parent_counter->parent;
4111
4112 child_counter = perf_counter_alloc(&parent_counter->attr,
4113 parent_counter->cpu, child_ctx,
4114 group_leader, parent_counter,
4115 GFP_KERNEL);
4116 if (IS_ERR(child_counter))
4117 return child_counter;
4118 get_ctx(child_ctx);
4119
4120 /*
4121 * Make the child state follow the state of the parent counter,
4122 * not its attr.disabled bit. We hold the parent's mutex,
4123 * so we won't race with perf_counter_{en, dis}able_family.
4124 */
4125 if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE)
4126 child_counter->state = PERF_COUNTER_STATE_INACTIVE;
4127 else
4128 child_counter->state = PERF_COUNTER_STATE_OFF;
4129
4130 if (parent_counter->attr.freq)
4131 child_counter->hw.sample_period = parent_counter->hw.sample_period;
4132
4133 /*
4134 * Link it up in the child's context:
4135 */
4136 add_counter_to_ctx(child_counter, child_ctx);
4137
4138 /*
4139 * Get a reference to the parent filp - we will fput it
4140 * when the child counter exits. This is safe to do because
4141 * we are in the parent and we know that the filp still
4142 * exists and has a nonzero count:
4143 */
4144 atomic_long_inc(&parent_counter->filp->f_count);
4145
4146 /*
4147 * Link this into the parent counter's child list
4148 */
4149 WARN_ON_ONCE(parent_counter->ctx->parent_ctx);
4150 mutex_lock(&parent_counter->child_mutex);
4151 list_add_tail(&child_counter->child_list, &parent_counter->child_list);
4152 mutex_unlock(&parent_counter->child_mutex);
4153
4154 return child_counter;
4155 }
4156
4157 static int inherit_group(struct perf_counter *parent_counter,
4158 struct task_struct *parent,
4159 struct perf_counter_context *parent_ctx,
4160 struct task_struct *child,
4161 struct perf_counter_context *child_ctx)
4162 {
4163 struct perf_counter *leader;
4164 struct perf_counter *sub;
4165 struct perf_counter *child_ctr;
4166
4167 leader = inherit_counter(parent_counter, parent, parent_ctx,
4168 child, NULL, child_ctx);
4169 if (IS_ERR(leader))
4170 return PTR_ERR(leader);
4171 list_for_each_entry(sub, &parent_counter->sibling_list, list_entry) {
4172 child_ctr = inherit_counter(sub, parent, parent_ctx,
4173 child, leader, child_ctx);
4174 if (IS_ERR(child_ctr))
4175 return PTR_ERR(child_ctr);
4176 }
4177 return 0;
4178 }
4179
4180 static void sync_child_counter(struct perf_counter *child_counter,
4181 struct task_struct *child)
4182 {
4183 struct perf_counter *parent_counter = child_counter->parent;
4184 u64 child_val;
4185
4186 if (child_counter->attr.inherit_stat)
4187 perf_counter_read_event(child_counter, child);
4188
4189 child_val = atomic64_read(&child_counter->count);
4190
4191 /*
4192 * Add back the child's count to the parent's count:
4193 */
4194 atomic64_add(child_val, &parent_counter->count);
4195 atomic64_add(child_counter->total_time_enabled,
4196 &parent_counter->child_total_time_enabled);
4197 atomic64_add(child_counter->total_time_running,
4198 &parent_counter->child_total_time_running);
4199
4200 /*
4201 * Remove this counter from the parent's list
4202 */
4203 WARN_ON_ONCE(parent_counter->ctx->parent_ctx);
4204 mutex_lock(&parent_counter->child_mutex);
4205 list_del_init(&child_counter->child_list);
4206 mutex_unlock(&parent_counter->child_mutex);
4207
4208 /*
4209 * Release the parent counter, if this was the last
4210 * reference to it.
4211 */
4212 fput(parent_counter->filp);
4213 }
4214
4215 static void
4216 __perf_counter_exit_task(struct perf_counter *child_counter,
4217 struct perf_counter_context *child_ctx,
4218 struct task_struct *child)
4219 {
4220 struct perf_counter *parent_counter;
4221
4222 update_counter_times(child_counter);
4223 perf_counter_remove_from_context(child_counter);
4224
4225 parent_counter = child_counter->parent;
4226 /*
4227 * It can happen that parent exits first, and has counters
4228 * that are still around due to the child reference. These
4229 * counters need to be zapped - but otherwise linger.
4230 */
4231 if (parent_counter) {
4232 sync_child_counter(child_counter, child);
4233 free_counter(child_counter);
4234 }
4235 }
4236
4237 /*
4238 * When a child task exits, feed back counter values to parent counters.
4239 */
4240 void perf_counter_exit_task(struct task_struct *child)
4241 {
4242 struct perf_counter *child_counter, *tmp;
4243 struct perf_counter_context *child_ctx;
4244 unsigned long flags;
4245
4246 if (likely(!child->perf_counter_ctxp))
4247 return;
4248
4249 local_irq_save(flags);
4250 /*
4251 * We can't reschedule here because interrupts are disabled,
4252 * and either child is current or it is a task that can't be
4253 * scheduled, so we are now safe from rescheduling changing
4254 * our context.
4255 */
4256 child_ctx = child->perf_counter_ctxp;
4257 __perf_counter_task_sched_out(child_ctx);
4258
4259 /*
4260 * Take the context lock here so that if find_get_context is
4261 * reading child->perf_counter_ctxp, we wait until it has
4262 * incremented the context's refcount before we do put_ctx below.
4263 */
4264 spin_lock(&child_ctx->lock);
4265 child->perf_counter_ctxp = NULL;
4266 /*
4267 * If this context is a clone; unclone it so it can't get
4268 * swapped to another process while we're removing all
4269 * the counters from it.
4270 */
4271 unclone_ctx(child_ctx);
4272 spin_unlock(&child_ctx->lock);
4273 local_irq_restore(flags);
4274
4275 /*
4276 * We can recurse on the same lock type through:
4277 *
4278 * __perf_counter_exit_task()
4279 * sync_child_counter()
4280 * fput(parent_counter->filp)
4281 * perf_release()
4282 * mutex_lock(&ctx->mutex)
4283 *
4284 * But since its the parent context it won't be the same instance.
4285 */
4286 mutex_lock_nested(&child_ctx->mutex, SINGLE_DEPTH_NESTING);
4287
4288 again:
4289 list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list,
4290 list_entry)
4291 __perf_counter_exit_task(child_counter, child_ctx, child);
4292
4293 /*
4294 * If the last counter was a group counter, it will have appended all
4295 * its siblings to the list, but we obtained 'tmp' before that which
4296 * will still point to the list head terminating the iteration.
4297 */
4298 if (!list_empty(&child_ctx->counter_list))
4299 goto again;
4300
4301 mutex_unlock(&child_ctx->mutex);
4302
4303 put_ctx(child_ctx);
4304 }
4305
4306 /*
4307 * free an unexposed, unused context as created by inheritance by
4308 * init_task below, used by fork() in case of fail.
4309 */
4310 void perf_counter_free_task(struct task_struct *task)
4311 {
4312 struct perf_counter_context *ctx = task->perf_counter_ctxp;
4313 struct perf_counter *counter, *tmp;
4314
4315 if (!ctx)
4316 return;
4317
4318 mutex_lock(&ctx->mutex);
4319 again:
4320 list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry) {
4321 struct perf_counter *parent = counter->parent;
4322
4323 if (WARN_ON_ONCE(!parent))
4324 continue;
4325
4326 mutex_lock(&parent->child_mutex);
4327 list_del_init(&counter->child_list);
4328 mutex_unlock(&parent->child_mutex);
4329
4330 fput(parent->filp);
4331
4332 list_del_counter(counter, ctx);
4333 free_counter(counter);
4334 }
4335
4336 if (!list_empty(&ctx->counter_list))
4337 goto again;
4338
4339 mutex_unlock(&ctx->mutex);
4340
4341 put_ctx(ctx);
4342 }
4343
4344 /*
4345 * Initialize the perf_counter context in task_struct
4346 */
4347 int perf_counter_init_task(struct task_struct *child)
4348 {
4349 struct perf_counter_context *child_ctx, *parent_ctx;
4350 struct perf_counter_context *cloned_ctx;
4351 struct perf_counter *counter;
4352 struct task_struct *parent = current;
4353 int inherited_all = 1;
4354 int ret = 0;
4355
4356 child->perf_counter_ctxp = NULL;
4357
4358 mutex_init(&child->perf_counter_mutex);
4359 INIT_LIST_HEAD(&child->perf_counter_list);
4360
4361 if (likely(!parent->perf_counter_ctxp))
4362 return 0;
4363
4364 /*
4365 * This is executed from the parent task context, so inherit
4366 * counters that have been marked for cloning.
4367 * First allocate and initialize a context for the child.
4368 */
4369
4370 child_ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL);
4371 if (!child_ctx)
4372 return -ENOMEM;
4373
4374 __perf_counter_init_context(child_ctx, child);
4375 child->perf_counter_ctxp = child_ctx;
4376 get_task_struct(child);
4377
4378 /*
4379 * If the parent's context is a clone, pin it so it won't get
4380 * swapped under us.
4381 */
4382 parent_ctx = perf_pin_task_context(parent);
4383
4384 /*
4385 * No need to check if parent_ctx != NULL here; since we saw
4386 * it non-NULL earlier, the only reason for it to become NULL
4387 * is if we exit, and since we're currently in the middle of
4388 * a fork we can't be exiting at the same time.
4389 */
4390
4391 /*
4392 * Lock the parent list. No need to lock the child - not PID
4393 * hashed yet and not running, so nobody can access it.
4394 */
4395 mutex_lock(&parent_ctx->mutex);
4396
4397 /*
4398 * We dont have to disable NMIs - we are only looking at
4399 * the list, not manipulating it:
4400 */
4401 list_for_each_entry_rcu(counter, &parent_ctx->event_list, event_entry) {
4402 if (counter != counter->group_leader)
4403 continue;
4404
4405 if (!counter->attr.inherit) {
4406 inherited_all = 0;
4407 continue;
4408 }
4409
4410 ret = inherit_group(counter, parent, parent_ctx,
4411 child, child_ctx);
4412 if (ret) {
4413 inherited_all = 0;
4414 break;
4415 }
4416 }
4417
4418 if (inherited_all) {
4419 /*
4420 * Mark the child context as a clone of the parent
4421 * context, or of whatever the parent is a clone of.
4422 * Note that if the parent is a clone, it could get
4423 * uncloned at any point, but that doesn't matter
4424 * because the list of counters and the generation
4425 * count can't have changed since we took the mutex.
4426 */
4427 cloned_ctx = rcu_dereference(parent_ctx->parent_ctx);
4428 if (cloned_ctx) {
4429 child_ctx->parent_ctx = cloned_ctx;
4430 child_ctx->parent_gen = parent_ctx->parent_gen;
4431 } else {
4432 child_ctx->parent_ctx = parent_ctx;
4433 child_ctx->parent_gen = parent_ctx->generation;
4434 }
4435 get_ctx(child_ctx->parent_ctx);
4436 }
4437
4438 mutex_unlock(&parent_ctx->mutex);
4439
4440 perf_unpin_context(parent_ctx);
4441
4442 return ret;
4443 }
4444
4445 static void __cpuinit perf_counter_init_cpu(int cpu)
4446 {
4447 struct perf_cpu_context *cpuctx;
4448
4449 cpuctx = &per_cpu(perf_cpu_context, cpu);
4450 __perf_counter_init_context(&cpuctx->ctx, NULL);
4451
4452 spin_lock(&perf_resource_lock);
4453 cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu;
4454 spin_unlock(&perf_resource_lock);
4455
4456 hw_perf_counter_setup(cpu);
4457 }
4458
4459 #ifdef CONFIG_HOTPLUG_CPU
4460 static void __perf_counter_exit_cpu(void *info)
4461 {
4462 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
4463 struct perf_counter_context *ctx = &cpuctx->ctx;
4464 struct perf_counter *counter, *tmp;
4465
4466 list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry)
4467 __perf_counter_remove_from_context(counter);
4468 }
4469 static void perf_counter_exit_cpu(int cpu)
4470 {
4471 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
4472 struct perf_counter_context *ctx = &cpuctx->ctx;
4473
4474 mutex_lock(&ctx->mutex);
4475 smp_call_function_single(cpu, __perf_counter_exit_cpu, NULL, 1);
4476 mutex_unlock(&ctx->mutex);
4477 }
4478 #else
4479 static inline void perf_counter_exit_cpu(int cpu) { }
4480 #endif
4481
4482 static int __cpuinit
4483 perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
4484 {
4485 unsigned int cpu = (long)hcpu;
4486
4487 switch (action) {
4488
4489 case CPU_UP_PREPARE:
4490 case CPU_UP_PREPARE_FROZEN:
4491 perf_counter_init_cpu(cpu);
4492 break;
4493
4494 case CPU_DOWN_PREPARE:
4495 case CPU_DOWN_PREPARE_FROZEN:
4496 perf_counter_exit_cpu(cpu);
4497 break;
4498
4499 default:
4500 break;
4501 }
4502
4503 return NOTIFY_OK;
4504 }
4505
4506 /*
4507 * This has to have a higher priority than migration_notifier in sched.c.
4508 */
4509 static struct notifier_block __cpuinitdata perf_cpu_nb = {
4510 .notifier_call = perf_cpu_notify,
4511 .priority = 20,
4512 };
4513
4514 void __init perf_counter_init(void)
4515 {
4516 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
4517 (void *)(long)smp_processor_id());
4518 register_cpu_notifier(&perf_cpu_nb);
4519 }
4520
4521 static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf)
4522 {
4523 return sprintf(buf, "%d\n", perf_reserved_percpu);
4524 }
4525
4526 static ssize_t
4527 perf_set_reserve_percpu(struct sysdev_class *class,
4528 const char *buf,
4529 size_t count)
4530 {
4531 struct perf_cpu_context *cpuctx;
4532 unsigned long val;
4533 int err, cpu, mpt;
4534
4535 err = strict_strtoul(buf, 10, &val);
4536 if (err)
4537 return err;
4538 if (val > perf_max_counters)
4539 return -EINVAL;
4540
4541 spin_lock(&perf_resource_lock);
4542 perf_reserved_percpu = val;
4543 for_each_online_cpu(cpu) {
4544 cpuctx = &per_cpu(perf_cpu_context, cpu);
4545 spin_lock_irq(&cpuctx->ctx.lock);
4546 mpt = min(perf_max_counters - cpuctx->ctx.nr_counters,
4547 perf_max_counters - perf_reserved_percpu);
4548 cpuctx->max_pertask = mpt;
4549 spin_unlock_irq(&cpuctx->ctx.lock);
4550 }
4551 spin_unlock(&perf_resource_lock);
4552
4553 return count;
4554 }
4555
4556 static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf)
4557 {
4558 return sprintf(buf, "%d\n", perf_overcommit);
4559 }
4560
4561 static ssize_t
4562 perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count)
4563 {
4564 unsigned long val;
4565 int err;
4566
4567 err = strict_strtoul(buf, 10, &val);
4568 if (err)
4569 return err;
4570 if (val > 1)
4571 return -EINVAL;
4572
4573 spin_lock(&perf_resource_lock);
4574 perf_overcommit = val;
4575 spin_unlock(&perf_resource_lock);
4576
4577 return count;
4578 }
4579
4580 static SYSDEV_CLASS_ATTR(
4581 reserve_percpu,
4582 0644,
4583 perf_show_reserve_percpu,
4584 perf_set_reserve_percpu
4585 );
4586
4587 static SYSDEV_CLASS_ATTR(
4588 overcommit,
4589 0644,
4590 perf_show_overcommit,
4591 perf_set_overcommit
4592 );
4593
4594 static struct attribute *perfclass_attrs[] = {
4595 &attr_reserve_percpu.attr,
4596 &attr_overcommit.attr,
4597 NULL
4598 };
4599
4600 static struct attribute_group perfclass_attr_group = {
4601 .attrs = perfclass_attrs,
4602 .name = "perf_counters",
4603 };
4604
4605 static int __init perf_counter_sysfs_init(void)
4606 {
4607 return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
4608 &perfclass_attr_group);
4609 }
4610 device_initcall(perf_counter_sysfs_init);
This page took 0.254285 seconds and 5 git commands to generate.