perf_counter/x86: Fix the model number of Intel Core2 processors
[deliverable/linux.git] / kernel / perf_counter.c
1 /*
2 * Performance counter core code
3 *
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8 *
9 * For licensing details see kernel-base/COPYING
10 */
11
12 #include <linux/fs.h>
13 #include <linux/mm.h>
14 #include <linux/cpu.h>
15 #include <linux/smp.h>
16 #include <linux/file.h>
17 #include <linux/poll.h>
18 #include <linux/sysfs.h>
19 #include <linux/dcache.h>
20 #include <linux/percpu.h>
21 #include <linux/ptrace.h>
22 #include <linux/vmstat.h>
23 #include <linux/hardirq.h>
24 #include <linux/rculist.h>
25 #include <linux/uaccess.h>
26 #include <linux/syscalls.h>
27 #include <linux/anon_inodes.h>
28 #include <linux/kernel_stat.h>
29 #include <linux/perf_counter.h>
30
31 #include <asm/irq_regs.h>
32
33 /*
34 * Each CPU has a list of per CPU counters:
35 */
36 DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
37
38 int perf_max_counters __read_mostly = 1;
39 static int perf_reserved_percpu __read_mostly;
40 static int perf_overcommit __read_mostly = 1;
41
42 static atomic_t nr_counters __read_mostly;
43 static atomic_t nr_mmap_counters __read_mostly;
44 static atomic_t nr_comm_counters __read_mostly;
45
46 int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */
47 int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */
48 int sysctl_perf_counter_limit __read_mostly = 100000; /* max NMIs per second */
49
50 static atomic64_t perf_counter_id;
51
52 /*
53 * Lock for (sysadmin-configurable) counter reservations:
54 */
55 static DEFINE_SPINLOCK(perf_resource_lock);
56
57 /*
58 * Architecture provided APIs - weak aliases:
59 */
60 extern __weak const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
61 {
62 return NULL;
63 }
64
65 void __weak hw_perf_disable(void) { barrier(); }
66 void __weak hw_perf_enable(void) { barrier(); }
67
68 void __weak hw_perf_counter_setup(int cpu) { barrier(); }
69
70 int __weak
71 hw_perf_group_sched_in(struct perf_counter *group_leader,
72 struct perf_cpu_context *cpuctx,
73 struct perf_counter_context *ctx, int cpu)
74 {
75 return 0;
76 }
77
78 void __weak perf_counter_print_debug(void) { }
79
80 static DEFINE_PER_CPU(int, disable_count);
81
82 void __perf_disable(void)
83 {
84 __get_cpu_var(disable_count)++;
85 }
86
87 bool __perf_enable(void)
88 {
89 return !--__get_cpu_var(disable_count);
90 }
91
92 void perf_disable(void)
93 {
94 __perf_disable();
95 hw_perf_disable();
96 }
97
98 void perf_enable(void)
99 {
100 if (__perf_enable())
101 hw_perf_enable();
102 }
103
104 static void get_ctx(struct perf_counter_context *ctx)
105 {
106 atomic_inc(&ctx->refcount);
107 }
108
109 static void free_ctx(struct rcu_head *head)
110 {
111 struct perf_counter_context *ctx;
112
113 ctx = container_of(head, struct perf_counter_context, rcu_head);
114 kfree(ctx);
115 }
116
117 static void put_ctx(struct perf_counter_context *ctx)
118 {
119 if (atomic_dec_and_test(&ctx->refcount)) {
120 if (ctx->parent_ctx)
121 put_ctx(ctx->parent_ctx);
122 if (ctx->task)
123 put_task_struct(ctx->task);
124 call_rcu(&ctx->rcu_head, free_ctx);
125 }
126 }
127
128 /*
129 * Get the perf_counter_context for a task and lock it.
130 * This has to cope with with the fact that until it is locked,
131 * the context could get moved to another task.
132 */
133 static struct perf_counter_context *
134 perf_lock_task_context(struct task_struct *task, unsigned long *flags)
135 {
136 struct perf_counter_context *ctx;
137
138 rcu_read_lock();
139 retry:
140 ctx = rcu_dereference(task->perf_counter_ctxp);
141 if (ctx) {
142 /*
143 * If this context is a clone of another, it might
144 * get swapped for another underneath us by
145 * perf_counter_task_sched_out, though the
146 * rcu_read_lock() protects us from any context
147 * getting freed. Lock the context and check if it
148 * got swapped before we could get the lock, and retry
149 * if so. If we locked the right context, then it
150 * can't get swapped on us any more.
151 */
152 spin_lock_irqsave(&ctx->lock, *flags);
153 if (ctx != rcu_dereference(task->perf_counter_ctxp)) {
154 spin_unlock_irqrestore(&ctx->lock, *flags);
155 goto retry;
156 }
157 }
158 rcu_read_unlock();
159 return ctx;
160 }
161
162 /*
163 * Get the context for a task and increment its pin_count so it
164 * can't get swapped to another task. This also increments its
165 * reference count so that the context can't get freed.
166 */
167 static struct perf_counter_context *perf_pin_task_context(struct task_struct *task)
168 {
169 struct perf_counter_context *ctx;
170 unsigned long flags;
171
172 ctx = perf_lock_task_context(task, &flags);
173 if (ctx) {
174 ++ctx->pin_count;
175 get_ctx(ctx);
176 spin_unlock_irqrestore(&ctx->lock, flags);
177 }
178 return ctx;
179 }
180
181 static void perf_unpin_context(struct perf_counter_context *ctx)
182 {
183 unsigned long flags;
184
185 spin_lock_irqsave(&ctx->lock, flags);
186 --ctx->pin_count;
187 spin_unlock_irqrestore(&ctx->lock, flags);
188 put_ctx(ctx);
189 }
190
191 /*
192 * Add a counter from the lists for its context.
193 * Must be called with ctx->mutex and ctx->lock held.
194 */
195 static void
196 list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
197 {
198 struct perf_counter *group_leader = counter->group_leader;
199
200 /*
201 * Depending on whether it is a standalone or sibling counter,
202 * add it straight to the context's counter list, or to the group
203 * leader's sibling list:
204 */
205 if (group_leader == counter)
206 list_add_tail(&counter->list_entry, &ctx->counter_list);
207 else {
208 list_add_tail(&counter->list_entry, &group_leader->sibling_list);
209 group_leader->nr_siblings++;
210 }
211
212 list_add_rcu(&counter->event_entry, &ctx->event_list);
213 ctx->nr_counters++;
214 }
215
216 /*
217 * Remove a counter from the lists for its context.
218 * Must be called with ctx->mutex and ctx->lock held.
219 */
220 static void
221 list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
222 {
223 struct perf_counter *sibling, *tmp;
224
225 if (list_empty(&counter->list_entry))
226 return;
227 ctx->nr_counters--;
228
229 list_del_init(&counter->list_entry);
230 list_del_rcu(&counter->event_entry);
231
232 if (counter->group_leader != counter)
233 counter->group_leader->nr_siblings--;
234
235 /*
236 * If this was a group counter with sibling counters then
237 * upgrade the siblings to singleton counters by adding them
238 * to the context list directly:
239 */
240 list_for_each_entry_safe(sibling, tmp,
241 &counter->sibling_list, list_entry) {
242
243 list_move_tail(&sibling->list_entry, &ctx->counter_list);
244 sibling->group_leader = sibling;
245 }
246 }
247
248 static void
249 counter_sched_out(struct perf_counter *counter,
250 struct perf_cpu_context *cpuctx,
251 struct perf_counter_context *ctx)
252 {
253 if (counter->state != PERF_COUNTER_STATE_ACTIVE)
254 return;
255
256 counter->state = PERF_COUNTER_STATE_INACTIVE;
257 counter->tstamp_stopped = ctx->time;
258 counter->pmu->disable(counter);
259 counter->oncpu = -1;
260
261 if (!is_software_counter(counter))
262 cpuctx->active_oncpu--;
263 ctx->nr_active--;
264 if (counter->attr.exclusive || !cpuctx->active_oncpu)
265 cpuctx->exclusive = 0;
266 }
267
268 static void
269 group_sched_out(struct perf_counter *group_counter,
270 struct perf_cpu_context *cpuctx,
271 struct perf_counter_context *ctx)
272 {
273 struct perf_counter *counter;
274
275 if (group_counter->state != PERF_COUNTER_STATE_ACTIVE)
276 return;
277
278 counter_sched_out(group_counter, cpuctx, ctx);
279
280 /*
281 * Schedule out siblings (if any):
282 */
283 list_for_each_entry(counter, &group_counter->sibling_list, list_entry)
284 counter_sched_out(counter, cpuctx, ctx);
285
286 if (group_counter->attr.exclusive)
287 cpuctx->exclusive = 0;
288 }
289
290 /*
291 * Cross CPU call to remove a performance counter
292 *
293 * We disable the counter on the hardware level first. After that we
294 * remove it from the context list.
295 */
296 static void __perf_counter_remove_from_context(void *info)
297 {
298 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
299 struct perf_counter *counter = info;
300 struct perf_counter_context *ctx = counter->ctx;
301
302 /*
303 * If this is a task context, we need to check whether it is
304 * the current task context of this cpu. If not it has been
305 * scheduled out before the smp call arrived.
306 */
307 if (ctx->task && cpuctx->task_ctx != ctx)
308 return;
309
310 spin_lock(&ctx->lock);
311 /*
312 * Protect the list operation against NMI by disabling the
313 * counters on a global level.
314 */
315 perf_disable();
316
317 counter_sched_out(counter, cpuctx, ctx);
318
319 list_del_counter(counter, ctx);
320
321 if (!ctx->task) {
322 /*
323 * Allow more per task counters with respect to the
324 * reservation:
325 */
326 cpuctx->max_pertask =
327 min(perf_max_counters - ctx->nr_counters,
328 perf_max_counters - perf_reserved_percpu);
329 }
330
331 perf_enable();
332 spin_unlock(&ctx->lock);
333 }
334
335
336 /*
337 * Remove the counter from a task's (or a CPU's) list of counters.
338 *
339 * Must be called with ctx->mutex held.
340 *
341 * CPU counters are removed with a smp call. For task counters we only
342 * call when the task is on a CPU.
343 *
344 * If counter->ctx is a cloned context, callers must make sure that
345 * every task struct that counter->ctx->task could possibly point to
346 * remains valid. This is OK when called from perf_release since
347 * that only calls us on the top-level context, which can't be a clone.
348 * When called from perf_counter_exit_task, it's OK because the
349 * context has been detached from its task.
350 */
351 static void perf_counter_remove_from_context(struct perf_counter *counter)
352 {
353 struct perf_counter_context *ctx = counter->ctx;
354 struct task_struct *task = ctx->task;
355
356 if (!task) {
357 /*
358 * Per cpu counters are removed via an smp call and
359 * the removal is always sucessful.
360 */
361 smp_call_function_single(counter->cpu,
362 __perf_counter_remove_from_context,
363 counter, 1);
364 return;
365 }
366
367 retry:
368 task_oncpu_function_call(task, __perf_counter_remove_from_context,
369 counter);
370
371 spin_lock_irq(&ctx->lock);
372 /*
373 * If the context is active we need to retry the smp call.
374 */
375 if (ctx->nr_active && !list_empty(&counter->list_entry)) {
376 spin_unlock_irq(&ctx->lock);
377 goto retry;
378 }
379
380 /*
381 * The lock prevents that this context is scheduled in so we
382 * can remove the counter safely, if the call above did not
383 * succeed.
384 */
385 if (!list_empty(&counter->list_entry)) {
386 list_del_counter(counter, ctx);
387 }
388 spin_unlock_irq(&ctx->lock);
389 }
390
391 static inline u64 perf_clock(void)
392 {
393 return cpu_clock(smp_processor_id());
394 }
395
396 /*
397 * Update the record of the current time in a context.
398 */
399 static void update_context_time(struct perf_counter_context *ctx)
400 {
401 u64 now = perf_clock();
402
403 ctx->time += now - ctx->timestamp;
404 ctx->timestamp = now;
405 }
406
407 /*
408 * Update the total_time_enabled and total_time_running fields for a counter.
409 */
410 static void update_counter_times(struct perf_counter *counter)
411 {
412 struct perf_counter_context *ctx = counter->ctx;
413 u64 run_end;
414
415 if (counter->state < PERF_COUNTER_STATE_INACTIVE)
416 return;
417
418 counter->total_time_enabled = ctx->time - counter->tstamp_enabled;
419
420 if (counter->state == PERF_COUNTER_STATE_INACTIVE)
421 run_end = counter->tstamp_stopped;
422 else
423 run_end = ctx->time;
424
425 counter->total_time_running = run_end - counter->tstamp_running;
426 }
427
428 /*
429 * Update total_time_enabled and total_time_running for all counters in a group.
430 */
431 static void update_group_times(struct perf_counter *leader)
432 {
433 struct perf_counter *counter;
434
435 update_counter_times(leader);
436 list_for_each_entry(counter, &leader->sibling_list, list_entry)
437 update_counter_times(counter);
438 }
439
440 /*
441 * Cross CPU call to disable a performance counter
442 */
443 static void __perf_counter_disable(void *info)
444 {
445 struct perf_counter *counter = info;
446 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
447 struct perf_counter_context *ctx = counter->ctx;
448
449 /*
450 * If this is a per-task counter, need to check whether this
451 * counter's task is the current task on this cpu.
452 */
453 if (ctx->task && cpuctx->task_ctx != ctx)
454 return;
455
456 spin_lock(&ctx->lock);
457
458 /*
459 * If the counter is on, turn it off.
460 * If it is in error state, leave it in error state.
461 */
462 if (counter->state >= PERF_COUNTER_STATE_INACTIVE) {
463 update_context_time(ctx);
464 update_counter_times(counter);
465 if (counter == counter->group_leader)
466 group_sched_out(counter, cpuctx, ctx);
467 else
468 counter_sched_out(counter, cpuctx, ctx);
469 counter->state = PERF_COUNTER_STATE_OFF;
470 }
471
472 spin_unlock(&ctx->lock);
473 }
474
475 /*
476 * Disable a counter.
477 *
478 * If counter->ctx is a cloned context, callers must make sure that
479 * every task struct that counter->ctx->task could possibly point to
480 * remains valid. This condition is satisifed when called through
481 * perf_counter_for_each_child or perf_counter_for_each because they
482 * hold the top-level counter's child_mutex, so any descendant that
483 * goes to exit will block in sync_child_counter.
484 * When called from perf_pending_counter it's OK because counter->ctx
485 * is the current context on this CPU and preemption is disabled,
486 * hence we can't get into perf_counter_task_sched_out for this context.
487 */
488 static void perf_counter_disable(struct perf_counter *counter)
489 {
490 struct perf_counter_context *ctx = counter->ctx;
491 struct task_struct *task = ctx->task;
492
493 if (!task) {
494 /*
495 * Disable the counter on the cpu that it's on
496 */
497 smp_call_function_single(counter->cpu, __perf_counter_disable,
498 counter, 1);
499 return;
500 }
501
502 retry:
503 task_oncpu_function_call(task, __perf_counter_disable, counter);
504
505 spin_lock_irq(&ctx->lock);
506 /*
507 * If the counter is still active, we need to retry the cross-call.
508 */
509 if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
510 spin_unlock_irq(&ctx->lock);
511 goto retry;
512 }
513
514 /*
515 * Since we have the lock this context can't be scheduled
516 * in, so we can change the state safely.
517 */
518 if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
519 update_counter_times(counter);
520 counter->state = PERF_COUNTER_STATE_OFF;
521 }
522
523 spin_unlock_irq(&ctx->lock);
524 }
525
526 static int
527 counter_sched_in(struct perf_counter *counter,
528 struct perf_cpu_context *cpuctx,
529 struct perf_counter_context *ctx,
530 int cpu)
531 {
532 if (counter->state <= PERF_COUNTER_STATE_OFF)
533 return 0;
534
535 counter->state = PERF_COUNTER_STATE_ACTIVE;
536 counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */
537 /*
538 * The new state must be visible before we turn it on in the hardware:
539 */
540 smp_wmb();
541
542 if (counter->pmu->enable(counter)) {
543 counter->state = PERF_COUNTER_STATE_INACTIVE;
544 counter->oncpu = -1;
545 return -EAGAIN;
546 }
547
548 counter->tstamp_running += ctx->time - counter->tstamp_stopped;
549
550 if (!is_software_counter(counter))
551 cpuctx->active_oncpu++;
552 ctx->nr_active++;
553
554 if (counter->attr.exclusive)
555 cpuctx->exclusive = 1;
556
557 return 0;
558 }
559
560 static int
561 group_sched_in(struct perf_counter *group_counter,
562 struct perf_cpu_context *cpuctx,
563 struct perf_counter_context *ctx,
564 int cpu)
565 {
566 struct perf_counter *counter, *partial_group;
567 int ret;
568
569 if (group_counter->state == PERF_COUNTER_STATE_OFF)
570 return 0;
571
572 ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu);
573 if (ret)
574 return ret < 0 ? ret : 0;
575
576 if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
577 return -EAGAIN;
578
579 /*
580 * Schedule in siblings as one group (if any):
581 */
582 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
583 if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
584 partial_group = counter;
585 goto group_error;
586 }
587 }
588
589 return 0;
590
591 group_error:
592 /*
593 * Groups can be scheduled in as one unit only, so undo any
594 * partial group before returning:
595 */
596 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
597 if (counter == partial_group)
598 break;
599 counter_sched_out(counter, cpuctx, ctx);
600 }
601 counter_sched_out(group_counter, cpuctx, ctx);
602
603 return -EAGAIN;
604 }
605
606 /*
607 * Return 1 for a group consisting entirely of software counters,
608 * 0 if the group contains any hardware counters.
609 */
610 static int is_software_only_group(struct perf_counter *leader)
611 {
612 struct perf_counter *counter;
613
614 if (!is_software_counter(leader))
615 return 0;
616
617 list_for_each_entry(counter, &leader->sibling_list, list_entry)
618 if (!is_software_counter(counter))
619 return 0;
620
621 return 1;
622 }
623
624 /*
625 * Work out whether we can put this counter group on the CPU now.
626 */
627 static int group_can_go_on(struct perf_counter *counter,
628 struct perf_cpu_context *cpuctx,
629 int can_add_hw)
630 {
631 /*
632 * Groups consisting entirely of software counters can always go on.
633 */
634 if (is_software_only_group(counter))
635 return 1;
636 /*
637 * If an exclusive group is already on, no other hardware
638 * counters can go on.
639 */
640 if (cpuctx->exclusive)
641 return 0;
642 /*
643 * If this group is exclusive and there are already
644 * counters on the CPU, it can't go on.
645 */
646 if (counter->attr.exclusive && cpuctx->active_oncpu)
647 return 0;
648 /*
649 * Otherwise, try to add it if all previous groups were able
650 * to go on.
651 */
652 return can_add_hw;
653 }
654
655 static void add_counter_to_ctx(struct perf_counter *counter,
656 struct perf_counter_context *ctx)
657 {
658 list_add_counter(counter, ctx);
659 counter->tstamp_enabled = ctx->time;
660 counter->tstamp_running = ctx->time;
661 counter->tstamp_stopped = ctx->time;
662 }
663
664 /*
665 * Cross CPU call to install and enable a performance counter
666 *
667 * Must be called with ctx->mutex held
668 */
669 static void __perf_install_in_context(void *info)
670 {
671 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
672 struct perf_counter *counter = info;
673 struct perf_counter_context *ctx = counter->ctx;
674 struct perf_counter *leader = counter->group_leader;
675 int cpu = smp_processor_id();
676 int err;
677
678 /*
679 * If this is a task context, we need to check whether it is
680 * the current task context of this cpu. If not it has been
681 * scheduled out before the smp call arrived.
682 * Or possibly this is the right context but it isn't
683 * on this cpu because it had no counters.
684 */
685 if (ctx->task && cpuctx->task_ctx != ctx) {
686 if (cpuctx->task_ctx || ctx->task != current)
687 return;
688 cpuctx->task_ctx = ctx;
689 }
690
691 spin_lock(&ctx->lock);
692 ctx->is_active = 1;
693 update_context_time(ctx);
694
695 /*
696 * Protect the list operation against NMI by disabling the
697 * counters on a global level. NOP for non NMI based counters.
698 */
699 perf_disable();
700
701 add_counter_to_ctx(counter, ctx);
702
703 /*
704 * Don't put the counter on if it is disabled or if
705 * it is in a group and the group isn't on.
706 */
707 if (counter->state != PERF_COUNTER_STATE_INACTIVE ||
708 (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE))
709 goto unlock;
710
711 /*
712 * An exclusive counter can't go on if there are already active
713 * hardware counters, and no hardware counter can go on if there
714 * is already an exclusive counter on.
715 */
716 if (!group_can_go_on(counter, cpuctx, 1))
717 err = -EEXIST;
718 else
719 err = counter_sched_in(counter, cpuctx, ctx, cpu);
720
721 if (err) {
722 /*
723 * This counter couldn't go on. If it is in a group
724 * then we have to pull the whole group off.
725 * If the counter group is pinned then put it in error state.
726 */
727 if (leader != counter)
728 group_sched_out(leader, cpuctx, ctx);
729 if (leader->attr.pinned) {
730 update_group_times(leader);
731 leader->state = PERF_COUNTER_STATE_ERROR;
732 }
733 }
734
735 if (!err && !ctx->task && cpuctx->max_pertask)
736 cpuctx->max_pertask--;
737
738 unlock:
739 perf_enable();
740
741 spin_unlock(&ctx->lock);
742 }
743
744 /*
745 * Attach a performance counter to a context
746 *
747 * First we add the counter to the list with the hardware enable bit
748 * in counter->hw_config cleared.
749 *
750 * If the counter is attached to a task which is on a CPU we use a smp
751 * call to enable it in the task context. The task might have been
752 * scheduled away, but we check this in the smp call again.
753 *
754 * Must be called with ctx->mutex held.
755 */
756 static void
757 perf_install_in_context(struct perf_counter_context *ctx,
758 struct perf_counter *counter,
759 int cpu)
760 {
761 struct task_struct *task = ctx->task;
762
763 if (!task) {
764 /*
765 * Per cpu counters are installed via an smp call and
766 * the install is always sucessful.
767 */
768 smp_call_function_single(cpu, __perf_install_in_context,
769 counter, 1);
770 return;
771 }
772
773 retry:
774 task_oncpu_function_call(task, __perf_install_in_context,
775 counter);
776
777 spin_lock_irq(&ctx->lock);
778 /*
779 * we need to retry the smp call.
780 */
781 if (ctx->is_active && list_empty(&counter->list_entry)) {
782 spin_unlock_irq(&ctx->lock);
783 goto retry;
784 }
785
786 /*
787 * The lock prevents that this context is scheduled in so we
788 * can add the counter safely, if it the call above did not
789 * succeed.
790 */
791 if (list_empty(&counter->list_entry))
792 add_counter_to_ctx(counter, ctx);
793 spin_unlock_irq(&ctx->lock);
794 }
795
796 /*
797 * Cross CPU call to enable a performance counter
798 */
799 static void __perf_counter_enable(void *info)
800 {
801 struct perf_counter *counter = info;
802 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
803 struct perf_counter_context *ctx = counter->ctx;
804 struct perf_counter *leader = counter->group_leader;
805 int err;
806
807 /*
808 * If this is a per-task counter, need to check whether this
809 * counter's task is the current task on this cpu.
810 */
811 if (ctx->task && cpuctx->task_ctx != ctx) {
812 if (cpuctx->task_ctx || ctx->task != current)
813 return;
814 cpuctx->task_ctx = ctx;
815 }
816
817 spin_lock(&ctx->lock);
818 ctx->is_active = 1;
819 update_context_time(ctx);
820
821 if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
822 goto unlock;
823 counter->state = PERF_COUNTER_STATE_INACTIVE;
824 counter->tstamp_enabled = ctx->time - counter->total_time_enabled;
825
826 /*
827 * If the counter is in a group and isn't the group leader,
828 * then don't put it on unless the group is on.
829 */
830 if (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE)
831 goto unlock;
832
833 if (!group_can_go_on(counter, cpuctx, 1)) {
834 err = -EEXIST;
835 } else {
836 perf_disable();
837 if (counter == leader)
838 err = group_sched_in(counter, cpuctx, ctx,
839 smp_processor_id());
840 else
841 err = counter_sched_in(counter, cpuctx, ctx,
842 smp_processor_id());
843 perf_enable();
844 }
845
846 if (err) {
847 /*
848 * If this counter can't go on and it's part of a
849 * group, then the whole group has to come off.
850 */
851 if (leader != counter)
852 group_sched_out(leader, cpuctx, ctx);
853 if (leader->attr.pinned) {
854 update_group_times(leader);
855 leader->state = PERF_COUNTER_STATE_ERROR;
856 }
857 }
858
859 unlock:
860 spin_unlock(&ctx->lock);
861 }
862
863 /*
864 * Enable a counter.
865 *
866 * If counter->ctx is a cloned context, callers must make sure that
867 * every task struct that counter->ctx->task could possibly point to
868 * remains valid. This condition is satisfied when called through
869 * perf_counter_for_each_child or perf_counter_for_each as described
870 * for perf_counter_disable.
871 */
872 static void perf_counter_enable(struct perf_counter *counter)
873 {
874 struct perf_counter_context *ctx = counter->ctx;
875 struct task_struct *task = ctx->task;
876
877 if (!task) {
878 /*
879 * Enable the counter on the cpu that it's on
880 */
881 smp_call_function_single(counter->cpu, __perf_counter_enable,
882 counter, 1);
883 return;
884 }
885
886 spin_lock_irq(&ctx->lock);
887 if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
888 goto out;
889
890 /*
891 * If the counter is in error state, clear that first.
892 * That way, if we see the counter in error state below, we
893 * know that it has gone back into error state, as distinct
894 * from the task having been scheduled away before the
895 * cross-call arrived.
896 */
897 if (counter->state == PERF_COUNTER_STATE_ERROR)
898 counter->state = PERF_COUNTER_STATE_OFF;
899
900 retry:
901 spin_unlock_irq(&ctx->lock);
902 task_oncpu_function_call(task, __perf_counter_enable, counter);
903
904 spin_lock_irq(&ctx->lock);
905
906 /*
907 * If the context is active and the counter is still off,
908 * we need to retry the cross-call.
909 */
910 if (ctx->is_active && counter->state == PERF_COUNTER_STATE_OFF)
911 goto retry;
912
913 /*
914 * Since we have the lock this context can't be scheduled
915 * in, so we can change the state safely.
916 */
917 if (counter->state == PERF_COUNTER_STATE_OFF) {
918 counter->state = PERF_COUNTER_STATE_INACTIVE;
919 counter->tstamp_enabled =
920 ctx->time - counter->total_time_enabled;
921 }
922 out:
923 spin_unlock_irq(&ctx->lock);
924 }
925
926 static int perf_counter_refresh(struct perf_counter *counter, int refresh)
927 {
928 /*
929 * not supported on inherited counters
930 */
931 if (counter->attr.inherit)
932 return -EINVAL;
933
934 atomic_add(refresh, &counter->event_limit);
935 perf_counter_enable(counter);
936
937 return 0;
938 }
939
940 void __perf_counter_sched_out(struct perf_counter_context *ctx,
941 struct perf_cpu_context *cpuctx)
942 {
943 struct perf_counter *counter;
944
945 spin_lock(&ctx->lock);
946 ctx->is_active = 0;
947 if (likely(!ctx->nr_counters))
948 goto out;
949 update_context_time(ctx);
950
951 perf_disable();
952 if (ctx->nr_active) {
953 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
954 if (counter != counter->group_leader)
955 counter_sched_out(counter, cpuctx, ctx);
956 else
957 group_sched_out(counter, cpuctx, ctx);
958 }
959 }
960 perf_enable();
961 out:
962 spin_unlock(&ctx->lock);
963 }
964
965 /*
966 * Test whether two contexts are equivalent, i.e. whether they
967 * have both been cloned from the same version of the same context
968 * and they both have the same number of enabled counters.
969 * If the number of enabled counters is the same, then the set
970 * of enabled counters should be the same, because these are both
971 * inherited contexts, therefore we can't access individual counters
972 * in them directly with an fd; we can only enable/disable all
973 * counters via prctl, or enable/disable all counters in a family
974 * via ioctl, which will have the same effect on both contexts.
975 */
976 static int context_equiv(struct perf_counter_context *ctx1,
977 struct perf_counter_context *ctx2)
978 {
979 return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
980 && ctx1->parent_gen == ctx2->parent_gen
981 && !ctx1->pin_count && !ctx2->pin_count;
982 }
983
984 /*
985 * Called from scheduler to remove the counters of the current task,
986 * with interrupts disabled.
987 *
988 * We stop each counter and update the counter value in counter->count.
989 *
990 * This does not protect us against NMI, but disable()
991 * sets the disabled bit in the control field of counter _before_
992 * accessing the counter control register. If a NMI hits, then it will
993 * not restart the counter.
994 */
995 void perf_counter_task_sched_out(struct task_struct *task,
996 struct task_struct *next, int cpu)
997 {
998 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
999 struct perf_counter_context *ctx = task->perf_counter_ctxp;
1000 struct perf_counter_context *next_ctx;
1001 struct perf_counter_context *parent;
1002 struct pt_regs *regs;
1003 int do_switch = 1;
1004
1005 regs = task_pt_regs(task);
1006 perf_swcounter_event(PERF_COUNT_CONTEXT_SWITCHES, 1, 1, regs, 0);
1007
1008 if (likely(!ctx || !cpuctx->task_ctx))
1009 return;
1010
1011 update_context_time(ctx);
1012
1013 rcu_read_lock();
1014 parent = rcu_dereference(ctx->parent_ctx);
1015 next_ctx = next->perf_counter_ctxp;
1016 if (parent && next_ctx &&
1017 rcu_dereference(next_ctx->parent_ctx) == parent) {
1018 /*
1019 * Looks like the two contexts are clones, so we might be
1020 * able to optimize the context switch. We lock both
1021 * contexts and check that they are clones under the
1022 * lock (including re-checking that neither has been
1023 * uncloned in the meantime). It doesn't matter which
1024 * order we take the locks because no other cpu could
1025 * be trying to lock both of these tasks.
1026 */
1027 spin_lock(&ctx->lock);
1028 spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
1029 if (context_equiv(ctx, next_ctx)) {
1030 /*
1031 * XXX do we need a memory barrier of sorts
1032 * wrt to rcu_dereference() of perf_counter_ctxp
1033 */
1034 task->perf_counter_ctxp = next_ctx;
1035 next->perf_counter_ctxp = ctx;
1036 ctx->task = next;
1037 next_ctx->task = task;
1038 do_switch = 0;
1039 }
1040 spin_unlock(&next_ctx->lock);
1041 spin_unlock(&ctx->lock);
1042 }
1043 rcu_read_unlock();
1044
1045 if (do_switch) {
1046 __perf_counter_sched_out(ctx, cpuctx);
1047 cpuctx->task_ctx = NULL;
1048 }
1049 }
1050
1051 /*
1052 * Called with IRQs disabled
1053 */
1054 static void __perf_counter_task_sched_out(struct perf_counter_context *ctx)
1055 {
1056 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1057
1058 if (!cpuctx->task_ctx)
1059 return;
1060
1061 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
1062 return;
1063
1064 __perf_counter_sched_out(ctx, cpuctx);
1065 cpuctx->task_ctx = NULL;
1066 }
1067
1068 /*
1069 * Called with IRQs disabled
1070 */
1071 static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx)
1072 {
1073 __perf_counter_sched_out(&cpuctx->ctx, cpuctx);
1074 }
1075
1076 static void
1077 __perf_counter_sched_in(struct perf_counter_context *ctx,
1078 struct perf_cpu_context *cpuctx, int cpu)
1079 {
1080 struct perf_counter *counter;
1081 int can_add_hw = 1;
1082
1083 spin_lock(&ctx->lock);
1084 ctx->is_active = 1;
1085 if (likely(!ctx->nr_counters))
1086 goto out;
1087
1088 ctx->timestamp = perf_clock();
1089
1090 perf_disable();
1091
1092 /*
1093 * First go through the list and put on any pinned groups
1094 * in order to give them the best chance of going on.
1095 */
1096 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1097 if (counter->state <= PERF_COUNTER_STATE_OFF ||
1098 !counter->attr.pinned)
1099 continue;
1100 if (counter->cpu != -1 && counter->cpu != cpu)
1101 continue;
1102
1103 if (counter != counter->group_leader)
1104 counter_sched_in(counter, cpuctx, ctx, cpu);
1105 else {
1106 if (group_can_go_on(counter, cpuctx, 1))
1107 group_sched_in(counter, cpuctx, ctx, cpu);
1108 }
1109
1110 /*
1111 * If this pinned group hasn't been scheduled,
1112 * put it in error state.
1113 */
1114 if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
1115 update_group_times(counter);
1116 counter->state = PERF_COUNTER_STATE_ERROR;
1117 }
1118 }
1119
1120 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1121 /*
1122 * Ignore counters in OFF or ERROR state, and
1123 * ignore pinned counters since we did them already.
1124 */
1125 if (counter->state <= PERF_COUNTER_STATE_OFF ||
1126 counter->attr.pinned)
1127 continue;
1128
1129 /*
1130 * Listen to the 'cpu' scheduling filter constraint
1131 * of counters:
1132 */
1133 if (counter->cpu != -1 && counter->cpu != cpu)
1134 continue;
1135
1136 if (counter != counter->group_leader) {
1137 if (counter_sched_in(counter, cpuctx, ctx, cpu))
1138 can_add_hw = 0;
1139 } else {
1140 if (group_can_go_on(counter, cpuctx, can_add_hw)) {
1141 if (group_sched_in(counter, cpuctx, ctx, cpu))
1142 can_add_hw = 0;
1143 }
1144 }
1145 }
1146 perf_enable();
1147 out:
1148 spin_unlock(&ctx->lock);
1149 }
1150
1151 /*
1152 * Called from scheduler to add the counters of the current task
1153 * with interrupts disabled.
1154 *
1155 * We restore the counter value and then enable it.
1156 *
1157 * This does not protect us against NMI, but enable()
1158 * sets the enabled bit in the control field of counter _before_
1159 * accessing the counter control register. If a NMI hits, then it will
1160 * keep the counter running.
1161 */
1162 void perf_counter_task_sched_in(struct task_struct *task, int cpu)
1163 {
1164 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
1165 struct perf_counter_context *ctx = task->perf_counter_ctxp;
1166
1167 if (likely(!ctx))
1168 return;
1169 if (cpuctx->task_ctx == ctx)
1170 return;
1171 __perf_counter_sched_in(ctx, cpuctx, cpu);
1172 cpuctx->task_ctx = ctx;
1173 }
1174
1175 static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
1176 {
1177 struct perf_counter_context *ctx = &cpuctx->ctx;
1178
1179 __perf_counter_sched_in(ctx, cpuctx, cpu);
1180 }
1181
1182 #define MAX_INTERRUPTS (~0ULL)
1183
1184 static void perf_log_throttle(struct perf_counter *counter, int enable);
1185 static void perf_log_period(struct perf_counter *counter, u64 period);
1186
1187 static void perf_adjust_freq(struct perf_counter_context *ctx)
1188 {
1189 struct perf_counter *counter;
1190 struct hw_perf_counter *hwc;
1191 u64 interrupts, sample_period;
1192 u64 events, period, freq;
1193 s64 delta;
1194
1195 spin_lock(&ctx->lock);
1196 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1197 if (counter->state != PERF_COUNTER_STATE_ACTIVE)
1198 continue;
1199
1200 hwc = &counter->hw;
1201
1202 interrupts = hwc->interrupts;
1203 hwc->interrupts = 0;
1204
1205 if (interrupts == MAX_INTERRUPTS) {
1206 perf_log_throttle(counter, 1);
1207 counter->pmu->unthrottle(counter);
1208 interrupts = 2*sysctl_perf_counter_limit/HZ;
1209 }
1210
1211 if (!counter->attr.freq || !counter->attr.sample_freq)
1212 continue;
1213
1214 if (counter->attr.sample_freq < HZ) {
1215 freq = counter->attr.sample_freq;
1216
1217 hwc->freq_count += freq;
1218 hwc->freq_interrupts += interrupts;
1219
1220 if (hwc->freq_count < HZ)
1221 continue;
1222
1223 interrupts = hwc->freq_interrupts;
1224 hwc->freq_interrupts = 0;
1225 hwc->freq_count -= HZ;
1226 } else
1227 freq = HZ;
1228
1229 events = freq * interrupts * hwc->sample_period;
1230 period = div64_u64(events, counter->attr.sample_freq);
1231
1232 delta = (s64)(1 + period - hwc->sample_period);
1233 delta >>= 1;
1234
1235 sample_period = hwc->sample_period + delta;
1236
1237 if (!sample_period)
1238 sample_period = 1;
1239
1240 perf_log_period(counter, sample_period);
1241
1242 hwc->sample_period = sample_period;
1243 }
1244 spin_unlock(&ctx->lock);
1245 }
1246
1247 /*
1248 * Round-robin a context's counters:
1249 */
1250 static void rotate_ctx(struct perf_counter_context *ctx)
1251 {
1252 struct perf_counter *counter;
1253
1254 if (!ctx->nr_counters)
1255 return;
1256
1257 spin_lock(&ctx->lock);
1258 /*
1259 * Rotate the first entry last (works just fine for group counters too):
1260 */
1261 perf_disable();
1262 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1263 list_move_tail(&counter->list_entry, &ctx->counter_list);
1264 break;
1265 }
1266 perf_enable();
1267
1268 spin_unlock(&ctx->lock);
1269 }
1270
1271 void perf_counter_task_tick(struct task_struct *curr, int cpu)
1272 {
1273 struct perf_cpu_context *cpuctx;
1274 struct perf_counter_context *ctx;
1275
1276 if (!atomic_read(&nr_counters))
1277 return;
1278
1279 cpuctx = &per_cpu(perf_cpu_context, cpu);
1280 ctx = curr->perf_counter_ctxp;
1281
1282 perf_adjust_freq(&cpuctx->ctx);
1283 if (ctx)
1284 perf_adjust_freq(ctx);
1285
1286 perf_counter_cpu_sched_out(cpuctx);
1287 if (ctx)
1288 __perf_counter_task_sched_out(ctx);
1289
1290 rotate_ctx(&cpuctx->ctx);
1291 if (ctx)
1292 rotate_ctx(ctx);
1293
1294 perf_counter_cpu_sched_in(cpuctx, cpu);
1295 if (ctx)
1296 perf_counter_task_sched_in(curr, cpu);
1297 }
1298
1299 /*
1300 * Cross CPU call to read the hardware counter
1301 */
1302 static void __read(void *info)
1303 {
1304 struct perf_counter *counter = info;
1305 struct perf_counter_context *ctx = counter->ctx;
1306 unsigned long flags;
1307
1308 local_irq_save(flags);
1309 if (ctx->is_active)
1310 update_context_time(ctx);
1311 counter->pmu->read(counter);
1312 update_counter_times(counter);
1313 local_irq_restore(flags);
1314 }
1315
1316 static u64 perf_counter_read(struct perf_counter *counter)
1317 {
1318 /*
1319 * If counter is enabled and currently active on a CPU, update the
1320 * value in the counter structure:
1321 */
1322 if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
1323 smp_call_function_single(counter->oncpu,
1324 __read, counter, 1);
1325 } else if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
1326 update_counter_times(counter);
1327 }
1328
1329 return atomic64_read(&counter->count);
1330 }
1331
1332 /*
1333 * Initialize the perf_counter context in a task_struct:
1334 */
1335 static void
1336 __perf_counter_init_context(struct perf_counter_context *ctx,
1337 struct task_struct *task)
1338 {
1339 memset(ctx, 0, sizeof(*ctx));
1340 spin_lock_init(&ctx->lock);
1341 mutex_init(&ctx->mutex);
1342 INIT_LIST_HEAD(&ctx->counter_list);
1343 INIT_LIST_HEAD(&ctx->event_list);
1344 atomic_set(&ctx->refcount, 1);
1345 ctx->task = task;
1346 }
1347
1348 static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
1349 {
1350 struct perf_counter_context *parent_ctx;
1351 struct perf_counter_context *ctx;
1352 struct perf_cpu_context *cpuctx;
1353 struct task_struct *task;
1354 unsigned long flags;
1355 int err;
1356
1357 /*
1358 * If cpu is not a wildcard then this is a percpu counter:
1359 */
1360 if (cpu != -1) {
1361 /* Must be root to operate on a CPU counter: */
1362 if (sysctl_perf_counter_priv && !capable(CAP_SYS_ADMIN))
1363 return ERR_PTR(-EACCES);
1364
1365 if (cpu < 0 || cpu > num_possible_cpus())
1366 return ERR_PTR(-EINVAL);
1367
1368 /*
1369 * We could be clever and allow to attach a counter to an
1370 * offline CPU and activate it when the CPU comes up, but
1371 * that's for later.
1372 */
1373 if (!cpu_isset(cpu, cpu_online_map))
1374 return ERR_PTR(-ENODEV);
1375
1376 cpuctx = &per_cpu(perf_cpu_context, cpu);
1377 ctx = &cpuctx->ctx;
1378 get_ctx(ctx);
1379
1380 return ctx;
1381 }
1382
1383 rcu_read_lock();
1384 if (!pid)
1385 task = current;
1386 else
1387 task = find_task_by_vpid(pid);
1388 if (task)
1389 get_task_struct(task);
1390 rcu_read_unlock();
1391
1392 if (!task)
1393 return ERR_PTR(-ESRCH);
1394
1395 /*
1396 * Can't attach counters to a dying task.
1397 */
1398 err = -ESRCH;
1399 if (task->flags & PF_EXITING)
1400 goto errout;
1401
1402 /* Reuse ptrace permission checks for now. */
1403 err = -EACCES;
1404 if (!ptrace_may_access(task, PTRACE_MODE_READ))
1405 goto errout;
1406
1407 retry:
1408 ctx = perf_lock_task_context(task, &flags);
1409 if (ctx) {
1410 parent_ctx = ctx->parent_ctx;
1411 if (parent_ctx) {
1412 put_ctx(parent_ctx);
1413 ctx->parent_ctx = NULL; /* no longer a clone */
1414 }
1415 /*
1416 * Get an extra reference before dropping the lock so that
1417 * this context won't get freed if the task exits.
1418 */
1419 get_ctx(ctx);
1420 spin_unlock_irqrestore(&ctx->lock, flags);
1421 }
1422
1423 if (!ctx) {
1424 ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL);
1425 err = -ENOMEM;
1426 if (!ctx)
1427 goto errout;
1428 __perf_counter_init_context(ctx, task);
1429 get_ctx(ctx);
1430 if (cmpxchg(&task->perf_counter_ctxp, NULL, ctx)) {
1431 /*
1432 * We raced with some other task; use
1433 * the context they set.
1434 */
1435 kfree(ctx);
1436 goto retry;
1437 }
1438 get_task_struct(task);
1439 }
1440
1441 put_task_struct(task);
1442 return ctx;
1443
1444 errout:
1445 put_task_struct(task);
1446 return ERR_PTR(err);
1447 }
1448
1449 static void free_counter_rcu(struct rcu_head *head)
1450 {
1451 struct perf_counter *counter;
1452
1453 counter = container_of(head, struct perf_counter, rcu_head);
1454 if (counter->ns)
1455 put_pid_ns(counter->ns);
1456 kfree(counter);
1457 }
1458
1459 static void perf_pending_sync(struct perf_counter *counter);
1460
1461 static void free_counter(struct perf_counter *counter)
1462 {
1463 perf_pending_sync(counter);
1464
1465 atomic_dec(&nr_counters);
1466 if (counter->attr.mmap)
1467 atomic_dec(&nr_mmap_counters);
1468 if (counter->attr.comm)
1469 atomic_dec(&nr_comm_counters);
1470
1471 if (counter->destroy)
1472 counter->destroy(counter);
1473
1474 put_ctx(counter->ctx);
1475 call_rcu(&counter->rcu_head, free_counter_rcu);
1476 }
1477
1478 /*
1479 * Called when the last reference to the file is gone.
1480 */
1481 static int perf_release(struct inode *inode, struct file *file)
1482 {
1483 struct perf_counter *counter = file->private_data;
1484 struct perf_counter_context *ctx = counter->ctx;
1485
1486 file->private_data = NULL;
1487
1488 WARN_ON_ONCE(ctx->parent_ctx);
1489 mutex_lock(&ctx->mutex);
1490 perf_counter_remove_from_context(counter);
1491 mutex_unlock(&ctx->mutex);
1492
1493 mutex_lock(&counter->owner->perf_counter_mutex);
1494 list_del_init(&counter->owner_entry);
1495 mutex_unlock(&counter->owner->perf_counter_mutex);
1496 put_task_struct(counter->owner);
1497
1498 free_counter(counter);
1499
1500 return 0;
1501 }
1502
1503 /*
1504 * Read the performance counter - simple non blocking version for now
1505 */
1506 static ssize_t
1507 perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
1508 {
1509 u64 values[3];
1510 int n;
1511
1512 /*
1513 * Return end-of-file for a read on a counter that is in
1514 * error state (i.e. because it was pinned but it couldn't be
1515 * scheduled on to the CPU at some point).
1516 */
1517 if (counter->state == PERF_COUNTER_STATE_ERROR)
1518 return 0;
1519
1520 WARN_ON_ONCE(counter->ctx->parent_ctx);
1521 mutex_lock(&counter->child_mutex);
1522 values[0] = perf_counter_read(counter);
1523 n = 1;
1524 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1525 values[n++] = counter->total_time_enabled +
1526 atomic64_read(&counter->child_total_time_enabled);
1527 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1528 values[n++] = counter->total_time_running +
1529 atomic64_read(&counter->child_total_time_running);
1530 if (counter->attr.read_format & PERF_FORMAT_ID)
1531 values[n++] = counter->id;
1532 mutex_unlock(&counter->child_mutex);
1533
1534 if (count < n * sizeof(u64))
1535 return -EINVAL;
1536 count = n * sizeof(u64);
1537
1538 if (copy_to_user(buf, values, count))
1539 return -EFAULT;
1540
1541 return count;
1542 }
1543
1544 static ssize_t
1545 perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
1546 {
1547 struct perf_counter *counter = file->private_data;
1548
1549 return perf_read_hw(counter, buf, count);
1550 }
1551
1552 static unsigned int perf_poll(struct file *file, poll_table *wait)
1553 {
1554 struct perf_counter *counter = file->private_data;
1555 struct perf_mmap_data *data;
1556 unsigned int events = POLL_HUP;
1557
1558 rcu_read_lock();
1559 data = rcu_dereference(counter->data);
1560 if (data)
1561 events = atomic_xchg(&data->poll, 0);
1562 rcu_read_unlock();
1563
1564 poll_wait(file, &counter->waitq, wait);
1565
1566 return events;
1567 }
1568
1569 static void perf_counter_reset(struct perf_counter *counter)
1570 {
1571 (void)perf_counter_read(counter);
1572 atomic64_set(&counter->count, 0);
1573 perf_counter_update_userpage(counter);
1574 }
1575
1576 static void perf_counter_for_each_sibling(struct perf_counter *counter,
1577 void (*func)(struct perf_counter *))
1578 {
1579 struct perf_counter_context *ctx = counter->ctx;
1580 struct perf_counter *sibling;
1581
1582 WARN_ON_ONCE(ctx->parent_ctx);
1583 mutex_lock(&ctx->mutex);
1584 counter = counter->group_leader;
1585
1586 func(counter);
1587 list_for_each_entry(sibling, &counter->sibling_list, list_entry)
1588 func(sibling);
1589 mutex_unlock(&ctx->mutex);
1590 }
1591
1592 /*
1593 * Holding the top-level counter's child_mutex means that any
1594 * descendant process that has inherited this counter will block
1595 * in sync_child_counter if it goes to exit, thus satisfying the
1596 * task existence requirements of perf_counter_enable/disable.
1597 */
1598 static void perf_counter_for_each_child(struct perf_counter *counter,
1599 void (*func)(struct perf_counter *))
1600 {
1601 struct perf_counter *child;
1602
1603 WARN_ON_ONCE(counter->ctx->parent_ctx);
1604 mutex_lock(&counter->child_mutex);
1605 func(counter);
1606 list_for_each_entry(child, &counter->child_list, child_list)
1607 func(child);
1608 mutex_unlock(&counter->child_mutex);
1609 }
1610
1611 static void perf_counter_for_each(struct perf_counter *counter,
1612 void (*func)(struct perf_counter *))
1613 {
1614 struct perf_counter *child;
1615
1616 WARN_ON_ONCE(counter->ctx->parent_ctx);
1617 mutex_lock(&counter->child_mutex);
1618 perf_counter_for_each_sibling(counter, func);
1619 list_for_each_entry(child, &counter->child_list, child_list)
1620 perf_counter_for_each_sibling(child, func);
1621 mutex_unlock(&counter->child_mutex);
1622 }
1623
1624 static int perf_counter_period(struct perf_counter *counter, u64 __user *arg)
1625 {
1626 struct perf_counter_context *ctx = counter->ctx;
1627 unsigned long size;
1628 int ret = 0;
1629 u64 value;
1630
1631 if (!counter->attr.sample_period)
1632 return -EINVAL;
1633
1634 size = copy_from_user(&value, arg, sizeof(value));
1635 if (size != sizeof(value))
1636 return -EFAULT;
1637
1638 if (!value)
1639 return -EINVAL;
1640
1641 spin_lock_irq(&ctx->lock);
1642 if (counter->attr.freq) {
1643 if (value > sysctl_perf_counter_limit) {
1644 ret = -EINVAL;
1645 goto unlock;
1646 }
1647
1648 counter->attr.sample_freq = value;
1649 } else {
1650 counter->attr.sample_period = value;
1651 counter->hw.sample_period = value;
1652
1653 perf_log_period(counter, value);
1654 }
1655 unlock:
1656 spin_unlock_irq(&ctx->lock);
1657
1658 return ret;
1659 }
1660
1661 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1662 {
1663 struct perf_counter *counter = file->private_data;
1664 void (*func)(struct perf_counter *);
1665 u32 flags = arg;
1666
1667 switch (cmd) {
1668 case PERF_COUNTER_IOC_ENABLE:
1669 func = perf_counter_enable;
1670 break;
1671 case PERF_COUNTER_IOC_DISABLE:
1672 func = perf_counter_disable;
1673 break;
1674 case PERF_COUNTER_IOC_RESET:
1675 func = perf_counter_reset;
1676 break;
1677
1678 case PERF_COUNTER_IOC_REFRESH:
1679 return perf_counter_refresh(counter, arg);
1680
1681 case PERF_COUNTER_IOC_PERIOD:
1682 return perf_counter_period(counter, (u64 __user *)arg);
1683
1684 default:
1685 return -ENOTTY;
1686 }
1687
1688 if (flags & PERF_IOC_FLAG_GROUP)
1689 perf_counter_for_each(counter, func);
1690 else
1691 perf_counter_for_each_child(counter, func);
1692
1693 return 0;
1694 }
1695
1696 int perf_counter_task_enable(void)
1697 {
1698 struct perf_counter *counter;
1699
1700 mutex_lock(&current->perf_counter_mutex);
1701 list_for_each_entry(counter, &current->perf_counter_list, owner_entry)
1702 perf_counter_for_each_child(counter, perf_counter_enable);
1703 mutex_unlock(&current->perf_counter_mutex);
1704
1705 return 0;
1706 }
1707
1708 int perf_counter_task_disable(void)
1709 {
1710 struct perf_counter *counter;
1711
1712 mutex_lock(&current->perf_counter_mutex);
1713 list_for_each_entry(counter, &current->perf_counter_list, owner_entry)
1714 perf_counter_for_each_child(counter, perf_counter_disable);
1715 mutex_unlock(&current->perf_counter_mutex);
1716
1717 return 0;
1718 }
1719
1720 /*
1721 * Callers need to ensure there can be no nesting of this function, otherwise
1722 * the seqlock logic goes bad. We can not serialize this because the arch
1723 * code calls this from NMI context.
1724 */
1725 void perf_counter_update_userpage(struct perf_counter *counter)
1726 {
1727 struct perf_counter_mmap_page *userpg;
1728 struct perf_mmap_data *data;
1729
1730 rcu_read_lock();
1731 data = rcu_dereference(counter->data);
1732 if (!data)
1733 goto unlock;
1734
1735 userpg = data->user_page;
1736
1737 /*
1738 * Disable preemption so as to not let the corresponding user-space
1739 * spin too long if we get preempted.
1740 */
1741 preempt_disable();
1742 ++userpg->lock;
1743 barrier();
1744 userpg->index = counter->hw.idx;
1745 userpg->offset = atomic64_read(&counter->count);
1746 if (counter->state == PERF_COUNTER_STATE_ACTIVE)
1747 userpg->offset -= atomic64_read(&counter->hw.prev_count);
1748
1749 barrier();
1750 ++userpg->lock;
1751 preempt_enable();
1752 unlock:
1753 rcu_read_unlock();
1754 }
1755
1756 static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1757 {
1758 struct perf_counter *counter = vma->vm_file->private_data;
1759 struct perf_mmap_data *data;
1760 int ret = VM_FAULT_SIGBUS;
1761
1762 rcu_read_lock();
1763 data = rcu_dereference(counter->data);
1764 if (!data)
1765 goto unlock;
1766
1767 if (vmf->pgoff == 0) {
1768 vmf->page = virt_to_page(data->user_page);
1769 } else {
1770 int nr = vmf->pgoff - 1;
1771
1772 if ((unsigned)nr > data->nr_pages)
1773 goto unlock;
1774
1775 vmf->page = virt_to_page(data->data_pages[nr]);
1776 }
1777 get_page(vmf->page);
1778 ret = 0;
1779 unlock:
1780 rcu_read_unlock();
1781
1782 return ret;
1783 }
1784
1785 static int perf_mmap_data_alloc(struct perf_counter *counter, int nr_pages)
1786 {
1787 struct perf_mmap_data *data;
1788 unsigned long size;
1789 int i;
1790
1791 WARN_ON(atomic_read(&counter->mmap_count));
1792
1793 size = sizeof(struct perf_mmap_data);
1794 size += nr_pages * sizeof(void *);
1795
1796 data = kzalloc(size, GFP_KERNEL);
1797 if (!data)
1798 goto fail;
1799
1800 data->user_page = (void *)get_zeroed_page(GFP_KERNEL);
1801 if (!data->user_page)
1802 goto fail_user_page;
1803
1804 for (i = 0; i < nr_pages; i++) {
1805 data->data_pages[i] = (void *)get_zeroed_page(GFP_KERNEL);
1806 if (!data->data_pages[i])
1807 goto fail_data_pages;
1808 }
1809
1810 data->nr_pages = nr_pages;
1811 atomic_set(&data->lock, -1);
1812
1813 rcu_assign_pointer(counter->data, data);
1814
1815 return 0;
1816
1817 fail_data_pages:
1818 for (i--; i >= 0; i--)
1819 free_page((unsigned long)data->data_pages[i]);
1820
1821 free_page((unsigned long)data->user_page);
1822
1823 fail_user_page:
1824 kfree(data);
1825
1826 fail:
1827 return -ENOMEM;
1828 }
1829
1830 static void __perf_mmap_data_free(struct rcu_head *rcu_head)
1831 {
1832 struct perf_mmap_data *data;
1833 int i;
1834
1835 data = container_of(rcu_head, struct perf_mmap_data, rcu_head);
1836
1837 free_page((unsigned long)data->user_page);
1838 for (i = 0; i < data->nr_pages; i++)
1839 free_page((unsigned long)data->data_pages[i]);
1840 kfree(data);
1841 }
1842
1843 static void perf_mmap_data_free(struct perf_counter *counter)
1844 {
1845 struct perf_mmap_data *data = counter->data;
1846
1847 WARN_ON(atomic_read(&counter->mmap_count));
1848
1849 rcu_assign_pointer(counter->data, NULL);
1850 call_rcu(&data->rcu_head, __perf_mmap_data_free);
1851 }
1852
1853 static void perf_mmap_open(struct vm_area_struct *vma)
1854 {
1855 struct perf_counter *counter = vma->vm_file->private_data;
1856
1857 atomic_inc(&counter->mmap_count);
1858 }
1859
1860 static void perf_mmap_close(struct vm_area_struct *vma)
1861 {
1862 struct perf_counter *counter = vma->vm_file->private_data;
1863
1864 WARN_ON_ONCE(counter->ctx->parent_ctx);
1865 if (atomic_dec_and_mutex_lock(&counter->mmap_count, &counter->mmap_mutex)) {
1866 struct user_struct *user = current_user();
1867
1868 atomic_long_sub(counter->data->nr_pages + 1, &user->locked_vm);
1869 vma->vm_mm->locked_vm -= counter->data->nr_locked;
1870 perf_mmap_data_free(counter);
1871 mutex_unlock(&counter->mmap_mutex);
1872 }
1873 }
1874
1875 static struct vm_operations_struct perf_mmap_vmops = {
1876 .open = perf_mmap_open,
1877 .close = perf_mmap_close,
1878 .fault = perf_mmap_fault,
1879 };
1880
1881 static int perf_mmap(struct file *file, struct vm_area_struct *vma)
1882 {
1883 struct perf_counter *counter = file->private_data;
1884 unsigned long user_locked, user_lock_limit;
1885 struct user_struct *user = current_user();
1886 unsigned long locked, lock_limit;
1887 unsigned long vma_size;
1888 unsigned long nr_pages;
1889 long user_extra, extra;
1890 int ret = 0;
1891
1892 if (!(vma->vm_flags & VM_SHARED) || (vma->vm_flags & VM_WRITE))
1893 return -EINVAL;
1894
1895 vma_size = vma->vm_end - vma->vm_start;
1896 nr_pages = (vma_size / PAGE_SIZE) - 1;
1897
1898 /*
1899 * If we have data pages ensure they're a power-of-two number, so we
1900 * can do bitmasks instead of modulo.
1901 */
1902 if (nr_pages != 0 && !is_power_of_2(nr_pages))
1903 return -EINVAL;
1904
1905 if (vma_size != PAGE_SIZE * (1 + nr_pages))
1906 return -EINVAL;
1907
1908 if (vma->vm_pgoff != 0)
1909 return -EINVAL;
1910
1911 WARN_ON_ONCE(counter->ctx->parent_ctx);
1912 mutex_lock(&counter->mmap_mutex);
1913 if (atomic_inc_not_zero(&counter->mmap_count)) {
1914 if (nr_pages != counter->data->nr_pages)
1915 ret = -EINVAL;
1916 goto unlock;
1917 }
1918
1919 user_extra = nr_pages + 1;
1920 user_lock_limit = sysctl_perf_counter_mlock >> (PAGE_SHIFT - 10);
1921
1922 /*
1923 * Increase the limit linearly with more CPUs:
1924 */
1925 user_lock_limit *= num_online_cpus();
1926
1927 user_locked = atomic_long_read(&user->locked_vm) + user_extra;
1928
1929 extra = 0;
1930 if (user_locked > user_lock_limit)
1931 extra = user_locked - user_lock_limit;
1932
1933 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
1934 lock_limit >>= PAGE_SHIFT;
1935 locked = vma->vm_mm->locked_vm + extra;
1936
1937 if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
1938 ret = -EPERM;
1939 goto unlock;
1940 }
1941
1942 WARN_ON(counter->data);
1943 ret = perf_mmap_data_alloc(counter, nr_pages);
1944 if (ret)
1945 goto unlock;
1946
1947 atomic_set(&counter->mmap_count, 1);
1948 atomic_long_add(user_extra, &user->locked_vm);
1949 vma->vm_mm->locked_vm += extra;
1950 counter->data->nr_locked = extra;
1951 unlock:
1952 mutex_unlock(&counter->mmap_mutex);
1953
1954 vma->vm_flags &= ~VM_MAYWRITE;
1955 vma->vm_flags |= VM_RESERVED;
1956 vma->vm_ops = &perf_mmap_vmops;
1957
1958 return ret;
1959 }
1960
1961 static int perf_fasync(int fd, struct file *filp, int on)
1962 {
1963 struct inode *inode = filp->f_path.dentry->d_inode;
1964 struct perf_counter *counter = filp->private_data;
1965 int retval;
1966
1967 mutex_lock(&inode->i_mutex);
1968 retval = fasync_helper(fd, filp, on, &counter->fasync);
1969 mutex_unlock(&inode->i_mutex);
1970
1971 if (retval < 0)
1972 return retval;
1973
1974 return 0;
1975 }
1976
1977 static const struct file_operations perf_fops = {
1978 .release = perf_release,
1979 .read = perf_read,
1980 .poll = perf_poll,
1981 .unlocked_ioctl = perf_ioctl,
1982 .compat_ioctl = perf_ioctl,
1983 .mmap = perf_mmap,
1984 .fasync = perf_fasync,
1985 };
1986
1987 /*
1988 * Perf counter wakeup
1989 *
1990 * If there's data, ensure we set the poll() state and publish everything
1991 * to user-space before waking everybody up.
1992 */
1993
1994 void perf_counter_wakeup(struct perf_counter *counter)
1995 {
1996 wake_up_all(&counter->waitq);
1997
1998 if (counter->pending_kill) {
1999 kill_fasync(&counter->fasync, SIGIO, counter->pending_kill);
2000 counter->pending_kill = 0;
2001 }
2002 }
2003
2004 /*
2005 * Pending wakeups
2006 *
2007 * Handle the case where we need to wakeup up from NMI (or rq->lock) context.
2008 *
2009 * The NMI bit means we cannot possibly take locks. Therefore, maintain a
2010 * single linked list and use cmpxchg() to add entries lockless.
2011 */
2012
2013 static void perf_pending_counter(struct perf_pending_entry *entry)
2014 {
2015 struct perf_counter *counter = container_of(entry,
2016 struct perf_counter, pending);
2017
2018 if (counter->pending_disable) {
2019 counter->pending_disable = 0;
2020 perf_counter_disable(counter);
2021 }
2022
2023 if (counter->pending_wakeup) {
2024 counter->pending_wakeup = 0;
2025 perf_counter_wakeup(counter);
2026 }
2027 }
2028
2029 #define PENDING_TAIL ((struct perf_pending_entry *)-1UL)
2030
2031 static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = {
2032 PENDING_TAIL,
2033 };
2034
2035 static void perf_pending_queue(struct perf_pending_entry *entry,
2036 void (*func)(struct perf_pending_entry *))
2037 {
2038 struct perf_pending_entry **head;
2039
2040 if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL)
2041 return;
2042
2043 entry->func = func;
2044
2045 head = &get_cpu_var(perf_pending_head);
2046
2047 do {
2048 entry->next = *head;
2049 } while (cmpxchg(head, entry->next, entry) != entry->next);
2050
2051 set_perf_counter_pending();
2052
2053 put_cpu_var(perf_pending_head);
2054 }
2055
2056 static int __perf_pending_run(void)
2057 {
2058 struct perf_pending_entry *list;
2059 int nr = 0;
2060
2061 list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL);
2062 while (list != PENDING_TAIL) {
2063 void (*func)(struct perf_pending_entry *);
2064 struct perf_pending_entry *entry = list;
2065
2066 list = list->next;
2067
2068 func = entry->func;
2069 entry->next = NULL;
2070 /*
2071 * Ensure we observe the unqueue before we issue the wakeup,
2072 * so that we won't be waiting forever.
2073 * -- see perf_not_pending().
2074 */
2075 smp_wmb();
2076
2077 func(entry);
2078 nr++;
2079 }
2080
2081 return nr;
2082 }
2083
2084 static inline int perf_not_pending(struct perf_counter *counter)
2085 {
2086 /*
2087 * If we flush on whatever cpu we run, there is a chance we don't
2088 * need to wait.
2089 */
2090 get_cpu();
2091 __perf_pending_run();
2092 put_cpu();
2093
2094 /*
2095 * Ensure we see the proper queue state before going to sleep
2096 * so that we do not miss the wakeup. -- see perf_pending_handle()
2097 */
2098 smp_rmb();
2099 return counter->pending.next == NULL;
2100 }
2101
2102 static void perf_pending_sync(struct perf_counter *counter)
2103 {
2104 wait_event(counter->waitq, perf_not_pending(counter));
2105 }
2106
2107 void perf_counter_do_pending(void)
2108 {
2109 __perf_pending_run();
2110 }
2111
2112 /*
2113 * Callchain support -- arch specific
2114 */
2115
2116 __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2117 {
2118 return NULL;
2119 }
2120
2121 /*
2122 * Output
2123 */
2124
2125 struct perf_output_handle {
2126 struct perf_counter *counter;
2127 struct perf_mmap_data *data;
2128 unsigned long head;
2129 unsigned long offset;
2130 int nmi;
2131 int overflow;
2132 int locked;
2133 unsigned long flags;
2134 };
2135
2136 static void perf_output_wakeup(struct perf_output_handle *handle)
2137 {
2138 atomic_set(&handle->data->poll, POLL_IN);
2139
2140 if (handle->nmi) {
2141 handle->counter->pending_wakeup = 1;
2142 perf_pending_queue(&handle->counter->pending,
2143 perf_pending_counter);
2144 } else
2145 perf_counter_wakeup(handle->counter);
2146 }
2147
2148 /*
2149 * Curious locking construct.
2150 *
2151 * We need to ensure a later event doesn't publish a head when a former
2152 * event isn't done writing. However since we need to deal with NMIs we
2153 * cannot fully serialize things.
2154 *
2155 * What we do is serialize between CPUs so we only have to deal with NMI
2156 * nesting on a single CPU.
2157 *
2158 * We only publish the head (and generate a wakeup) when the outer-most
2159 * event completes.
2160 */
2161 static void perf_output_lock(struct perf_output_handle *handle)
2162 {
2163 struct perf_mmap_data *data = handle->data;
2164 int cpu;
2165
2166 handle->locked = 0;
2167
2168 local_irq_save(handle->flags);
2169 cpu = smp_processor_id();
2170
2171 if (in_nmi() && atomic_read(&data->lock) == cpu)
2172 return;
2173
2174 while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
2175 cpu_relax();
2176
2177 handle->locked = 1;
2178 }
2179
2180 static void perf_output_unlock(struct perf_output_handle *handle)
2181 {
2182 struct perf_mmap_data *data = handle->data;
2183 unsigned long head;
2184 int cpu;
2185
2186 data->done_head = data->head;
2187
2188 if (!handle->locked)
2189 goto out;
2190
2191 again:
2192 /*
2193 * The xchg implies a full barrier that ensures all writes are done
2194 * before we publish the new head, matched by a rmb() in userspace when
2195 * reading this position.
2196 */
2197 while ((head = atomic_long_xchg(&data->done_head, 0)))
2198 data->user_page->data_head = head;
2199
2200 /*
2201 * NMI can happen here, which means we can miss a done_head update.
2202 */
2203
2204 cpu = atomic_xchg(&data->lock, -1);
2205 WARN_ON_ONCE(cpu != smp_processor_id());
2206
2207 /*
2208 * Therefore we have to validate we did not indeed do so.
2209 */
2210 if (unlikely(atomic_long_read(&data->done_head))) {
2211 /*
2212 * Since we had it locked, we can lock it again.
2213 */
2214 while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
2215 cpu_relax();
2216
2217 goto again;
2218 }
2219
2220 if (atomic_xchg(&data->wakeup, 0))
2221 perf_output_wakeup(handle);
2222 out:
2223 local_irq_restore(handle->flags);
2224 }
2225
2226 static int perf_output_begin(struct perf_output_handle *handle,
2227 struct perf_counter *counter, unsigned int size,
2228 int nmi, int overflow)
2229 {
2230 struct perf_mmap_data *data;
2231 unsigned int offset, head;
2232
2233 /*
2234 * For inherited counters we send all the output towards the parent.
2235 */
2236 if (counter->parent)
2237 counter = counter->parent;
2238
2239 rcu_read_lock();
2240 data = rcu_dereference(counter->data);
2241 if (!data)
2242 goto out;
2243
2244 handle->data = data;
2245 handle->counter = counter;
2246 handle->nmi = nmi;
2247 handle->overflow = overflow;
2248
2249 if (!data->nr_pages)
2250 goto fail;
2251
2252 perf_output_lock(handle);
2253
2254 do {
2255 offset = head = atomic_long_read(&data->head);
2256 head += size;
2257 } while (atomic_long_cmpxchg(&data->head, offset, head) != offset);
2258
2259 handle->offset = offset;
2260 handle->head = head;
2261
2262 if ((offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT))
2263 atomic_set(&data->wakeup, 1);
2264
2265 return 0;
2266
2267 fail:
2268 perf_output_wakeup(handle);
2269 out:
2270 rcu_read_unlock();
2271
2272 return -ENOSPC;
2273 }
2274
2275 static void perf_output_copy(struct perf_output_handle *handle,
2276 const void *buf, unsigned int len)
2277 {
2278 unsigned int pages_mask;
2279 unsigned int offset;
2280 unsigned int size;
2281 void **pages;
2282
2283 offset = handle->offset;
2284 pages_mask = handle->data->nr_pages - 1;
2285 pages = handle->data->data_pages;
2286
2287 do {
2288 unsigned int page_offset;
2289 int nr;
2290
2291 nr = (offset >> PAGE_SHIFT) & pages_mask;
2292 page_offset = offset & (PAGE_SIZE - 1);
2293 size = min_t(unsigned int, PAGE_SIZE - page_offset, len);
2294
2295 memcpy(pages[nr] + page_offset, buf, size);
2296
2297 len -= size;
2298 buf += size;
2299 offset += size;
2300 } while (len);
2301
2302 handle->offset = offset;
2303
2304 /*
2305 * Check we didn't copy past our reservation window, taking the
2306 * possible unsigned int wrap into account.
2307 */
2308 WARN_ON_ONCE(((long)(handle->head - handle->offset)) < 0);
2309 }
2310
2311 #define perf_output_put(handle, x) \
2312 perf_output_copy((handle), &(x), sizeof(x))
2313
2314 static void perf_output_end(struct perf_output_handle *handle)
2315 {
2316 struct perf_counter *counter = handle->counter;
2317 struct perf_mmap_data *data = handle->data;
2318
2319 int wakeup_events = counter->attr.wakeup_events;
2320
2321 if (handle->overflow && wakeup_events) {
2322 int events = atomic_inc_return(&data->events);
2323 if (events >= wakeup_events) {
2324 atomic_sub(wakeup_events, &data->events);
2325 atomic_set(&data->wakeup, 1);
2326 }
2327 }
2328
2329 perf_output_unlock(handle);
2330 rcu_read_unlock();
2331 }
2332
2333 static u32 perf_counter_pid(struct perf_counter *counter, struct task_struct *p)
2334 {
2335 /*
2336 * only top level counters have the pid namespace they were created in
2337 */
2338 if (counter->parent)
2339 counter = counter->parent;
2340
2341 return task_tgid_nr_ns(p, counter->ns);
2342 }
2343
2344 static u32 perf_counter_tid(struct perf_counter *counter, struct task_struct *p)
2345 {
2346 /*
2347 * only top level counters have the pid namespace they were created in
2348 */
2349 if (counter->parent)
2350 counter = counter->parent;
2351
2352 return task_pid_nr_ns(p, counter->ns);
2353 }
2354
2355 static void perf_counter_output(struct perf_counter *counter,
2356 int nmi, struct pt_regs *regs, u64 addr)
2357 {
2358 int ret;
2359 u64 sample_type = counter->attr.sample_type;
2360 struct perf_output_handle handle;
2361 struct perf_event_header header;
2362 u64 ip;
2363 struct {
2364 u32 pid, tid;
2365 } tid_entry;
2366 struct {
2367 u64 id;
2368 u64 counter;
2369 } group_entry;
2370 struct perf_callchain_entry *callchain = NULL;
2371 int callchain_size = 0;
2372 u64 time;
2373 struct {
2374 u32 cpu, reserved;
2375 } cpu_entry;
2376
2377 header.type = 0;
2378 header.size = sizeof(header);
2379
2380 header.misc = PERF_EVENT_MISC_OVERFLOW;
2381 header.misc |= perf_misc_flags(regs);
2382
2383 if (sample_type & PERF_SAMPLE_IP) {
2384 ip = perf_instruction_pointer(regs);
2385 header.type |= PERF_SAMPLE_IP;
2386 header.size += sizeof(ip);
2387 }
2388
2389 if (sample_type & PERF_SAMPLE_TID) {
2390 /* namespace issues */
2391 tid_entry.pid = perf_counter_pid(counter, current);
2392 tid_entry.tid = perf_counter_tid(counter, current);
2393
2394 header.type |= PERF_SAMPLE_TID;
2395 header.size += sizeof(tid_entry);
2396 }
2397
2398 if (sample_type & PERF_SAMPLE_TIME) {
2399 /*
2400 * Maybe do better on x86 and provide cpu_clock_nmi()
2401 */
2402 time = sched_clock();
2403
2404 header.type |= PERF_SAMPLE_TIME;
2405 header.size += sizeof(u64);
2406 }
2407
2408 if (sample_type & PERF_SAMPLE_ADDR) {
2409 header.type |= PERF_SAMPLE_ADDR;
2410 header.size += sizeof(u64);
2411 }
2412
2413 if (sample_type & PERF_SAMPLE_ID) {
2414 header.type |= PERF_SAMPLE_ID;
2415 header.size += sizeof(u64);
2416 }
2417
2418 if (sample_type & PERF_SAMPLE_CPU) {
2419 header.type |= PERF_SAMPLE_CPU;
2420 header.size += sizeof(cpu_entry);
2421
2422 cpu_entry.cpu = raw_smp_processor_id();
2423 }
2424
2425 if (sample_type & PERF_SAMPLE_PERIOD) {
2426 header.type |= PERF_SAMPLE_PERIOD;
2427 header.size += sizeof(u64);
2428 }
2429
2430 if (sample_type & PERF_SAMPLE_GROUP) {
2431 header.type |= PERF_SAMPLE_GROUP;
2432 header.size += sizeof(u64) +
2433 counter->nr_siblings * sizeof(group_entry);
2434 }
2435
2436 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
2437 callchain = perf_callchain(regs);
2438
2439 if (callchain) {
2440 callchain_size = (1 + callchain->nr) * sizeof(u64);
2441
2442 header.type |= PERF_SAMPLE_CALLCHAIN;
2443 header.size += callchain_size;
2444 }
2445 }
2446
2447 ret = perf_output_begin(&handle, counter, header.size, nmi, 1);
2448 if (ret)
2449 return;
2450
2451 perf_output_put(&handle, header);
2452
2453 if (sample_type & PERF_SAMPLE_IP)
2454 perf_output_put(&handle, ip);
2455
2456 if (sample_type & PERF_SAMPLE_TID)
2457 perf_output_put(&handle, tid_entry);
2458
2459 if (sample_type & PERF_SAMPLE_TIME)
2460 perf_output_put(&handle, time);
2461
2462 if (sample_type & PERF_SAMPLE_ADDR)
2463 perf_output_put(&handle, addr);
2464
2465 if (sample_type & PERF_SAMPLE_ID)
2466 perf_output_put(&handle, counter->id);
2467
2468 if (sample_type & PERF_SAMPLE_CPU)
2469 perf_output_put(&handle, cpu_entry);
2470
2471 if (sample_type & PERF_SAMPLE_PERIOD)
2472 perf_output_put(&handle, counter->hw.sample_period);
2473
2474 /*
2475 * XXX PERF_SAMPLE_GROUP vs inherited counters seems difficult.
2476 */
2477 if (sample_type & PERF_SAMPLE_GROUP) {
2478 struct perf_counter *leader, *sub;
2479 u64 nr = counter->nr_siblings;
2480
2481 perf_output_put(&handle, nr);
2482
2483 leader = counter->group_leader;
2484 list_for_each_entry(sub, &leader->sibling_list, list_entry) {
2485 if (sub != counter)
2486 sub->pmu->read(sub);
2487
2488 group_entry.id = sub->id;
2489 group_entry.counter = atomic64_read(&sub->count);
2490
2491 perf_output_put(&handle, group_entry);
2492 }
2493 }
2494
2495 if (callchain)
2496 perf_output_copy(&handle, callchain, callchain_size);
2497
2498 perf_output_end(&handle);
2499 }
2500
2501 /*
2502 * fork tracking
2503 */
2504
2505 struct perf_fork_event {
2506 struct task_struct *task;
2507
2508 struct {
2509 struct perf_event_header header;
2510
2511 u32 pid;
2512 u32 ppid;
2513 } event;
2514 };
2515
2516 static void perf_counter_fork_output(struct perf_counter *counter,
2517 struct perf_fork_event *fork_event)
2518 {
2519 struct perf_output_handle handle;
2520 int size = fork_event->event.header.size;
2521 struct task_struct *task = fork_event->task;
2522 int ret = perf_output_begin(&handle, counter, size, 0, 0);
2523
2524 if (ret)
2525 return;
2526
2527 fork_event->event.pid = perf_counter_pid(counter, task);
2528 fork_event->event.ppid = perf_counter_pid(counter, task->real_parent);
2529
2530 perf_output_put(&handle, fork_event->event);
2531 perf_output_end(&handle);
2532 }
2533
2534 static int perf_counter_fork_match(struct perf_counter *counter)
2535 {
2536 if (counter->attr.comm || counter->attr.mmap)
2537 return 1;
2538
2539 return 0;
2540 }
2541
2542 static void perf_counter_fork_ctx(struct perf_counter_context *ctx,
2543 struct perf_fork_event *fork_event)
2544 {
2545 struct perf_counter *counter;
2546
2547 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
2548 return;
2549
2550 rcu_read_lock();
2551 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
2552 if (perf_counter_fork_match(counter))
2553 perf_counter_fork_output(counter, fork_event);
2554 }
2555 rcu_read_unlock();
2556 }
2557
2558 static void perf_counter_fork_event(struct perf_fork_event *fork_event)
2559 {
2560 struct perf_cpu_context *cpuctx;
2561 struct perf_counter_context *ctx;
2562
2563 cpuctx = &get_cpu_var(perf_cpu_context);
2564 perf_counter_fork_ctx(&cpuctx->ctx, fork_event);
2565 put_cpu_var(perf_cpu_context);
2566
2567 rcu_read_lock();
2568 /*
2569 * doesn't really matter which of the child contexts the
2570 * events ends up in.
2571 */
2572 ctx = rcu_dereference(current->perf_counter_ctxp);
2573 if (ctx)
2574 perf_counter_fork_ctx(ctx, fork_event);
2575 rcu_read_unlock();
2576 }
2577
2578 void perf_counter_fork(struct task_struct *task)
2579 {
2580 struct perf_fork_event fork_event;
2581
2582 if (!atomic_read(&nr_comm_counters) &&
2583 !atomic_read(&nr_mmap_counters))
2584 return;
2585
2586 fork_event = (struct perf_fork_event){
2587 .task = task,
2588 .event = {
2589 .header = {
2590 .type = PERF_EVENT_FORK,
2591 .size = sizeof(fork_event.event),
2592 },
2593 },
2594 };
2595
2596 perf_counter_fork_event(&fork_event);
2597 }
2598
2599 /*
2600 * comm tracking
2601 */
2602
2603 struct perf_comm_event {
2604 struct task_struct *task;
2605 char *comm;
2606 int comm_size;
2607
2608 struct {
2609 struct perf_event_header header;
2610
2611 u32 pid;
2612 u32 tid;
2613 } event;
2614 };
2615
2616 static void perf_counter_comm_output(struct perf_counter *counter,
2617 struct perf_comm_event *comm_event)
2618 {
2619 struct perf_output_handle handle;
2620 int size = comm_event->event.header.size;
2621 int ret = perf_output_begin(&handle, counter, size, 0, 0);
2622
2623 if (ret)
2624 return;
2625
2626 comm_event->event.pid = perf_counter_pid(counter, comm_event->task);
2627 comm_event->event.tid = perf_counter_tid(counter, comm_event->task);
2628
2629 perf_output_put(&handle, comm_event->event);
2630 perf_output_copy(&handle, comm_event->comm,
2631 comm_event->comm_size);
2632 perf_output_end(&handle);
2633 }
2634
2635 static int perf_counter_comm_match(struct perf_counter *counter)
2636 {
2637 if (counter->attr.comm)
2638 return 1;
2639
2640 return 0;
2641 }
2642
2643 static void perf_counter_comm_ctx(struct perf_counter_context *ctx,
2644 struct perf_comm_event *comm_event)
2645 {
2646 struct perf_counter *counter;
2647
2648 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
2649 return;
2650
2651 rcu_read_lock();
2652 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
2653 if (perf_counter_comm_match(counter))
2654 perf_counter_comm_output(counter, comm_event);
2655 }
2656 rcu_read_unlock();
2657 }
2658
2659 static void perf_counter_comm_event(struct perf_comm_event *comm_event)
2660 {
2661 struct perf_cpu_context *cpuctx;
2662 struct perf_counter_context *ctx;
2663 unsigned int size;
2664 char *comm = comm_event->task->comm;
2665
2666 size = ALIGN(strlen(comm)+1, sizeof(u64));
2667
2668 comm_event->comm = comm;
2669 comm_event->comm_size = size;
2670
2671 comm_event->event.header.size = sizeof(comm_event->event) + size;
2672
2673 cpuctx = &get_cpu_var(perf_cpu_context);
2674 perf_counter_comm_ctx(&cpuctx->ctx, comm_event);
2675 put_cpu_var(perf_cpu_context);
2676
2677 rcu_read_lock();
2678 /*
2679 * doesn't really matter which of the child contexts the
2680 * events ends up in.
2681 */
2682 ctx = rcu_dereference(current->perf_counter_ctxp);
2683 if (ctx)
2684 perf_counter_comm_ctx(ctx, comm_event);
2685 rcu_read_unlock();
2686 }
2687
2688 void perf_counter_comm(struct task_struct *task)
2689 {
2690 struct perf_comm_event comm_event;
2691
2692 if (!atomic_read(&nr_comm_counters))
2693 return;
2694
2695 comm_event = (struct perf_comm_event){
2696 .task = task,
2697 .event = {
2698 .header = { .type = PERF_EVENT_COMM, },
2699 },
2700 };
2701
2702 perf_counter_comm_event(&comm_event);
2703 }
2704
2705 /*
2706 * mmap tracking
2707 */
2708
2709 struct perf_mmap_event {
2710 struct vm_area_struct *vma;
2711
2712 const char *file_name;
2713 int file_size;
2714
2715 struct {
2716 struct perf_event_header header;
2717
2718 u32 pid;
2719 u32 tid;
2720 u64 start;
2721 u64 len;
2722 u64 pgoff;
2723 } event;
2724 };
2725
2726 static void perf_counter_mmap_output(struct perf_counter *counter,
2727 struct perf_mmap_event *mmap_event)
2728 {
2729 struct perf_output_handle handle;
2730 int size = mmap_event->event.header.size;
2731 int ret = perf_output_begin(&handle, counter, size, 0, 0);
2732
2733 if (ret)
2734 return;
2735
2736 mmap_event->event.pid = perf_counter_pid(counter, current);
2737 mmap_event->event.tid = perf_counter_tid(counter, current);
2738
2739 perf_output_put(&handle, mmap_event->event);
2740 perf_output_copy(&handle, mmap_event->file_name,
2741 mmap_event->file_size);
2742 perf_output_end(&handle);
2743 }
2744
2745 static int perf_counter_mmap_match(struct perf_counter *counter,
2746 struct perf_mmap_event *mmap_event)
2747 {
2748 if (counter->attr.mmap)
2749 return 1;
2750
2751 return 0;
2752 }
2753
2754 static void perf_counter_mmap_ctx(struct perf_counter_context *ctx,
2755 struct perf_mmap_event *mmap_event)
2756 {
2757 struct perf_counter *counter;
2758
2759 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
2760 return;
2761
2762 rcu_read_lock();
2763 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
2764 if (perf_counter_mmap_match(counter, mmap_event))
2765 perf_counter_mmap_output(counter, mmap_event);
2766 }
2767 rcu_read_unlock();
2768 }
2769
2770 static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event)
2771 {
2772 struct perf_cpu_context *cpuctx;
2773 struct perf_counter_context *ctx;
2774 struct vm_area_struct *vma = mmap_event->vma;
2775 struct file *file = vma->vm_file;
2776 unsigned int size;
2777 char tmp[16];
2778 char *buf = NULL;
2779 const char *name;
2780
2781 if (file) {
2782 buf = kzalloc(PATH_MAX, GFP_KERNEL);
2783 if (!buf) {
2784 name = strncpy(tmp, "//enomem", sizeof(tmp));
2785 goto got_name;
2786 }
2787 name = d_path(&file->f_path, buf, PATH_MAX);
2788 if (IS_ERR(name)) {
2789 name = strncpy(tmp, "//toolong", sizeof(tmp));
2790 goto got_name;
2791 }
2792 } else {
2793 name = arch_vma_name(mmap_event->vma);
2794 if (name)
2795 goto got_name;
2796
2797 if (!vma->vm_mm) {
2798 name = strncpy(tmp, "[vdso]", sizeof(tmp));
2799 goto got_name;
2800 }
2801
2802 name = strncpy(tmp, "//anon", sizeof(tmp));
2803 goto got_name;
2804 }
2805
2806 got_name:
2807 size = ALIGN(strlen(name)+1, sizeof(u64));
2808
2809 mmap_event->file_name = name;
2810 mmap_event->file_size = size;
2811
2812 mmap_event->event.header.size = sizeof(mmap_event->event) + size;
2813
2814 cpuctx = &get_cpu_var(perf_cpu_context);
2815 perf_counter_mmap_ctx(&cpuctx->ctx, mmap_event);
2816 put_cpu_var(perf_cpu_context);
2817
2818 rcu_read_lock();
2819 /*
2820 * doesn't really matter which of the child contexts the
2821 * events ends up in.
2822 */
2823 ctx = rcu_dereference(current->perf_counter_ctxp);
2824 if (ctx)
2825 perf_counter_mmap_ctx(ctx, mmap_event);
2826 rcu_read_unlock();
2827
2828 kfree(buf);
2829 }
2830
2831 void __perf_counter_mmap(struct vm_area_struct *vma)
2832 {
2833 struct perf_mmap_event mmap_event;
2834
2835 if (!atomic_read(&nr_mmap_counters))
2836 return;
2837
2838 mmap_event = (struct perf_mmap_event){
2839 .vma = vma,
2840 .event = {
2841 .header = { .type = PERF_EVENT_MMAP, },
2842 .start = vma->vm_start,
2843 .len = vma->vm_end - vma->vm_start,
2844 .pgoff = vma->vm_pgoff,
2845 },
2846 };
2847
2848 perf_counter_mmap_event(&mmap_event);
2849 }
2850
2851 /*
2852 * Log sample_period changes so that analyzing tools can re-normalize the
2853 * event flow.
2854 */
2855
2856 static void perf_log_period(struct perf_counter *counter, u64 period)
2857 {
2858 struct perf_output_handle handle;
2859 int ret;
2860
2861 struct {
2862 struct perf_event_header header;
2863 u64 time;
2864 u64 id;
2865 u64 period;
2866 } freq_event = {
2867 .header = {
2868 .type = PERF_EVENT_PERIOD,
2869 .misc = 0,
2870 .size = sizeof(freq_event),
2871 },
2872 .time = sched_clock(),
2873 .id = counter->id,
2874 .period = period,
2875 };
2876
2877 if (counter->hw.sample_period == period)
2878 return;
2879
2880 ret = perf_output_begin(&handle, counter, sizeof(freq_event), 0, 0);
2881 if (ret)
2882 return;
2883
2884 perf_output_put(&handle, freq_event);
2885 perf_output_end(&handle);
2886 }
2887
2888 /*
2889 * IRQ throttle logging
2890 */
2891
2892 static void perf_log_throttle(struct perf_counter *counter, int enable)
2893 {
2894 struct perf_output_handle handle;
2895 int ret;
2896
2897 struct {
2898 struct perf_event_header header;
2899 u64 time;
2900 } throttle_event = {
2901 .header = {
2902 .type = PERF_EVENT_THROTTLE + 1,
2903 .misc = 0,
2904 .size = sizeof(throttle_event),
2905 },
2906 .time = sched_clock(),
2907 };
2908
2909 ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 1, 0);
2910 if (ret)
2911 return;
2912
2913 perf_output_put(&handle, throttle_event);
2914 perf_output_end(&handle);
2915 }
2916
2917 /*
2918 * Generic counter overflow handling.
2919 */
2920
2921 int perf_counter_overflow(struct perf_counter *counter,
2922 int nmi, struct pt_regs *regs, u64 addr)
2923 {
2924 int events = atomic_read(&counter->event_limit);
2925 int throttle = counter->pmu->unthrottle != NULL;
2926 int ret = 0;
2927
2928 if (!throttle) {
2929 counter->hw.interrupts++;
2930 } else {
2931 if (counter->hw.interrupts != MAX_INTERRUPTS) {
2932 counter->hw.interrupts++;
2933 if (HZ*counter->hw.interrupts > (u64)sysctl_perf_counter_limit) {
2934 counter->hw.interrupts = MAX_INTERRUPTS;
2935 perf_log_throttle(counter, 0);
2936 ret = 1;
2937 }
2938 } else {
2939 /*
2940 * Keep re-disabling counters even though on the previous
2941 * pass we disabled it - just in case we raced with a
2942 * sched-in and the counter got enabled again:
2943 */
2944 ret = 1;
2945 }
2946 }
2947
2948 /*
2949 * XXX event_limit might not quite work as expected on inherited
2950 * counters
2951 */
2952
2953 counter->pending_kill = POLL_IN;
2954 if (events && atomic_dec_and_test(&counter->event_limit)) {
2955 ret = 1;
2956 counter->pending_kill = POLL_HUP;
2957 if (nmi) {
2958 counter->pending_disable = 1;
2959 perf_pending_queue(&counter->pending,
2960 perf_pending_counter);
2961 } else
2962 perf_counter_disable(counter);
2963 }
2964
2965 perf_counter_output(counter, nmi, regs, addr);
2966 return ret;
2967 }
2968
2969 /*
2970 * Generic software counter infrastructure
2971 */
2972
2973 static void perf_swcounter_update(struct perf_counter *counter)
2974 {
2975 struct hw_perf_counter *hwc = &counter->hw;
2976 u64 prev, now;
2977 s64 delta;
2978
2979 again:
2980 prev = atomic64_read(&hwc->prev_count);
2981 now = atomic64_read(&hwc->count);
2982 if (atomic64_cmpxchg(&hwc->prev_count, prev, now) != prev)
2983 goto again;
2984
2985 delta = now - prev;
2986
2987 atomic64_add(delta, &counter->count);
2988 atomic64_sub(delta, &hwc->period_left);
2989 }
2990
2991 static void perf_swcounter_set_period(struct perf_counter *counter)
2992 {
2993 struct hw_perf_counter *hwc = &counter->hw;
2994 s64 left = atomic64_read(&hwc->period_left);
2995 s64 period = hwc->sample_period;
2996
2997 if (unlikely(left <= -period)) {
2998 left = period;
2999 atomic64_set(&hwc->period_left, left);
3000 }
3001
3002 if (unlikely(left <= 0)) {
3003 left += period;
3004 atomic64_add(period, &hwc->period_left);
3005 }
3006
3007 atomic64_set(&hwc->prev_count, -left);
3008 atomic64_set(&hwc->count, -left);
3009 }
3010
3011 static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
3012 {
3013 enum hrtimer_restart ret = HRTIMER_RESTART;
3014 struct perf_counter *counter;
3015 struct pt_regs *regs;
3016 u64 period;
3017
3018 counter = container_of(hrtimer, struct perf_counter, hw.hrtimer);
3019 counter->pmu->read(counter);
3020
3021 regs = get_irq_regs();
3022 /*
3023 * In case we exclude kernel IPs or are somehow not in interrupt
3024 * context, provide the next best thing, the user IP.
3025 */
3026 if ((counter->attr.exclude_kernel || !regs) &&
3027 !counter->attr.exclude_user)
3028 regs = task_pt_regs(current);
3029
3030 if (regs) {
3031 if (perf_counter_overflow(counter, 0, regs, 0))
3032 ret = HRTIMER_NORESTART;
3033 }
3034
3035 period = max_t(u64, 10000, counter->hw.sample_period);
3036 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
3037
3038 return ret;
3039 }
3040
3041 static void perf_swcounter_overflow(struct perf_counter *counter,
3042 int nmi, struct pt_regs *regs, u64 addr)
3043 {
3044 perf_swcounter_update(counter);
3045 perf_swcounter_set_period(counter);
3046 if (perf_counter_overflow(counter, nmi, regs, addr))
3047 /* soft-disable the counter */
3048 ;
3049
3050 }
3051
3052 static int perf_swcounter_is_counting(struct perf_counter *counter)
3053 {
3054 struct perf_counter_context *ctx;
3055 unsigned long flags;
3056 int count;
3057
3058 if (counter->state == PERF_COUNTER_STATE_ACTIVE)
3059 return 1;
3060
3061 if (counter->state != PERF_COUNTER_STATE_INACTIVE)
3062 return 0;
3063
3064 /*
3065 * If the counter is inactive, it could be just because
3066 * its task is scheduled out, or because it's in a group
3067 * which could not go on the PMU. We want to count in
3068 * the first case but not the second. If the context is
3069 * currently active then an inactive software counter must
3070 * be the second case. If it's not currently active then
3071 * we need to know whether the counter was active when the
3072 * context was last active, which we can determine by
3073 * comparing counter->tstamp_stopped with ctx->time.
3074 *
3075 * We are within an RCU read-side critical section,
3076 * which protects the existence of *ctx.
3077 */
3078 ctx = counter->ctx;
3079 spin_lock_irqsave(&ctx->lock, flags);
3080 count = 1;
3081 /* Re-check state now we have the lock */
3082 if (counter->state < PERF_COUNTER_STATE_INACTIVE ||
3083 counter->ctx->is_active ||
3084 counter->tstamp_stopped < ctx->time)
3085 count = 0;
3086 spin_unlock_irqrestore(&ctx->lock, flags);
3087 return count;
3088 }
3089
3090 static int perf_swcounter_match(struct perf_counter *counter,
3091 enum perf_event_types type,
3092 u32 event, struct pt_regs *regs)
3093 {
3094 if (!perf_swcounter_is_counting(counter))
3095 return 0;
3096
3097 if (counter->attr.type != type)
3098 return 0;
3099 if (counter->attr.config != event)
3100 return 0;
3101
3102 if (regs) {
3103 if (counter->attr.exclude_user && user_mode(regs))
3104 return 0;
3105
3106 if (counter->attr.exclude_kernel && !user_mode(regs))
3107 return 0;
3108 }
3109
3110 return 1;
3111 }
3112
3113 static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
3114 int nmi, struct pt_regs *regs, u64 addr)
3115 {
3116 int neg = atomic64_add_negative(nr, &counter->hw.count);
3117
3118 if (counter->hw.sample_period && !neg && regs)
3119 perf_swcounter_overflow(counter, nmi, regs, addr);
3120 }
3121
3122 static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
3123 enum perf_event_types type, u32 event,
3124 u64 nr, int nmi, struct pt_regs *regs,
3125 u64 addr)
3126 {
3127 struct perf_counter *counter;
3128
3129 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
3130 return;
3131
3132 rcu_read_lock();
3133 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
3134 if (perf_swcounter_match(counter, type, event, regs))
3135 perf_swcounter_add(counter, nr, nmi, regs, addr);
3136 }
3137 rcu_read_unlock();
3138 }
3139
3140 static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx)
3141 {
3142 if (in_nmi())
3143 return &cpuctx->recursion[3];
3144
3145 if (in_irq())
3146 return &cpuctx->recursion[2];
3147
3148 if (in_softirq())
3149 return &cpuctx->recursion[1];
3150
3151 return &cpuctx->recursion[0];
3152 }
3153
3154 static void __perf_swcounter_event(enum perf_event_types type, u32 event,
3155 u64 nr, int nmi, struct pt_regs *regs,
3156 u64 addr)
3157 {
3158 struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
3159 int *recursion = perf_swcounter_recursion_context(cpuctx);
3160 struct perf_counter_context *ctx;
3161
3162 if (*recursion)
3163 goto out;
3164
3165 (*recursion)++;
3166 barrier();
3167
3168 perf_swcounter_ctx_event(&cpuctx->ctx, type, event,
3169 nr, nmi, regs, addr);
3170 rcu_read_lock();
3171 /*
3172 * doesn't really matter which of the child contexts the
3173 * events ends up in.
3174 */
3175 ctx = rcu_dereference(current->perf_counter_ctxp);
3176 if (ctx)
3177 perf_swcounter_ctx_event(ctx, type, event, nr, nmi, regs, addr);
3178 rcu_read_unlock();
3179
3180 barrier();
3181 (*recursion)--;
3182
3183 out:
3184 put_cpu_var(perf_cpu_context);
3185 }
3186
3187 void
3188 perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
3189 {
3190 __perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, regs, addr);
3191 }
3192
3193 static void perf_swcounter_read(struct perf_counter *counter)
3194 {
3195 perf_swcounter_update(counter);
3196 }
3197
3198 static int perf_swcounter_enable(struct perf_counter *counter)
3199 {
3200 perf_swcounter_set_period(counter);
3201 return 0;
3202 }
3203
3204 static void perf_swcounter_disable(struct perf_counter *counter)
3205 {
3206 perf_swcounter_update(counter);
3207 }
3208
3209 static const struct pmu perf_ops_generic = {
3210 .enable = perf_swcounter_enable,
3211 .disable = perf_swcounter_disable,
3212 .read = perf_swcounter_read,
3213 };
3214
3215 /*
3216 * Software counter: cpu wall time clock
3217 */
3218
3219 static void cpu_clock_perf_counter_update(struct perf_counter *counter)
3220 {
3221 int cpu = raw_smp_processor_id();
3222 s64 prev;
3223 u64 now;
3224
3225 now = cpu_clock(cpu);
3226 prev = atomic64_read(&counter->hw.prev_count);
3227 atomic64_set(&counter->hw.prev_count, now);
3228 atomic64_add(now - prev, &counter->count);
3229 }
3230
3231 static int cpu_clock_perf_counter_enable(struct perf_counter *counter)
3232 {
3233 struct hw_perf_counter *hwc = &counter->hw;
3234 int cpu = raw_smp_processor_id();
3235
3236 atomic64_set(&hwc->prev_count, cpu_clock(cpu));
3237 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3238 hwc->hrtimer.function = perf_swcounter_hrtimer;
3239 if (hwc->sample_period) {
3240 u64 period = max_t(u64, 10000, hwc->sample_period);
3241 __hrtimer_start_range_ns(&hwc->hrtimer,
3242 ns_to_ktime(period), 0,
3243 HRTIMER_MODE_REL, 0);
3244 }
3245
3246 return 0;
3247 }
3248
3249 static void cpu_clock_perf_counter_disable(struct perf_counter *counter)
3250 {
3251 if (counter->hw.sample_period)
3252 hrtimer_cancel(&counter->hw.hrtimer);
3253 cpu_clock_perf_counter_update(counter);
3254 }
3255
3256 static void cpu_clock_perf_counter_read(struct perf_counter *counter)
3257 {
3258 cpu_clock_perf_counter_update(counter);
3259 }
3260
3261 static const struct pmu perf_ops_cpu_clock = {
3262 .enable = cpu_clock_perf_counter_enable,
3263 .disable = cpu_clock_perf_counter_disable,
3264 .read = cpu_clock_perf_counter_read,
3265 };
3266
3267 /*
3268 * Software counter: task time clock
3269 */
3270
3271 static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now)
3272 {
3273 u64 prev;
3274 s64 delta;
3275
3276 prev = atomic64_xchg(&counter->hw.prev_count, now);
3277 delta = now - prev;
3278 atomic64_add(delta, &counter->count);
3279 }
3280
3281 static int task_clock_perf_counter_enable(struct perf_counter *counter)
3282 {
3283 struct hw_perf_counter *hwc = &counter->hw;
3284 u64 now;
3285
3286 now = counter->ctx->time;
3287
3288 atomic64_set(&hwc->prev_count, now);
3289 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3290 hwc->hrtimer.function = perf_swcounter_hrtimer;
3291 if (hwc->sample_period) {
3292 u64 period = max_t(u64, 10000, hwc->sample_period);
3293 __hrtimer_start_range_ns(&hwc->hrtimer,
3294 ns_to_ktime(period), 0,
3295 HRTIMER_MODE_REL, 0);
3296 }
3297
3298 return 0;
3299 }
3300
3301 static void task_clock_perf_counter_disable(struct perf_counter *counter)
3302 {
3303 if (counter->hw.sample_period)
3304 hrtimer_cancel(&counter->hw.hrtimer);
3305 task_clock_perf_counter_update(counter, counter->ctx->time);
3306
3307 }
3308
3309 static void task_clock_perf_counter_read(struct perf_counter *counter)
3310 {
3311 u64 time;
3312
3313 if (!in_nmi()) {
3314 update_context_time(counter->ctx);
3315 time = counter->ctx->time;
3316 } else {
3317 u64 now = perf_clock();
3318 u64 delta = now - counter->ctx->timestamp;
3319 time = counter->ctx->time + delta;
3320 }
3321
3322 task_clock_perf_counter_update(counter, time);
3323 }
3324
3325 static const struct pmu perf_ops_task_clock = {
3326 .enable = task_clock_perf_counter_enable,
3327 .disable = task_clock_perf_counter_disable,
3328 .read = task_clock_perf_counter_read,
3329 };
3330
3331 /*
3332 * Software counter: cpu migrations
3333 */
3334 void perf_counter_task_migration(struct task_struct *task, int cpu)
3335 {
3336 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
3337 struct perf_counter_context *ctx;
3338
3339 perf_swcounter_ctx_event(&cpuctx->ctx, PERF_TYPE_SOFTWARE,
3340 PERF_COUNT_CPU_MIGRATIONS,
3341 1, 1, NULL, 0);
3342
3343 ctx = perf_pin_task_context(task);
3344 if (ctx) {
3345 perf_swcounter_ctx_event(ctx, PERF_TYPE_SOFTWARE,
3346 PERF_COUNT_CPU_MIGRATIONS,
3347 1, 1, NULL, 0);
3348 perf_unpin_context(ctx);
3349 }
3350 }
3351
3352 #ifdef CONFIG_EVENT_PROFILE
3353 void perf_tpcounter_event(int event_id)
3354 {
3355 struct pt_regs *regs = get_irq_regs();
3356
3357 if (!regs)
3358 regs = task_pt_regs(current);
3359
3360 __perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, regs, 0);
3361 }
3362 EXPORT_SYMBOL_GPL(perf_tpcounter_event);
3363
3364 extern int ftrace_profile_enable(int);
3365 extern void ftrace_profile_disable(int);
3366
3367 static void tp_perf_counter_destroy(struct perf_counter *counter)
3368 {
3369 ftrace_profile_disable(perf_event_id(&counter->attr));
3370 }
3371
3372 static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
3373 {
3374 int event_id = perf_event_id(&counter->attr);
3375 int ret;
3376
3377 ret = ftrace_profile_enable(event_id);
3378 if (ret)
3379 return NULL;
3380
3381 counter->destroy = tp_perf_counter_destroy;
3382 counter->hw.sample_period = counter->attr.sample_period;
3383
3384 return &perf_ops_generic;
3385 }
3386 #else
3387 static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
3388 {
3389 return NULL;
3390 }
3391 #endif
3392
3393 static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
3394 {
3395 const struct pmu *pmu = NULL;
3396
3397 /*
3398 * Software counters (currently) can't in general distinguish
3399 * between user, kernel and hypervisor events.
3400 * However, context switches and cpu migrations are considered
3401 * to be kernel events, and page faults are never hypervisor
3402 * events.
3403 */
3404 switch (counter->attr.config) {
3405 case PERF_COUNT_CPU_CLOCK:
3406 pmu = &perf_ops_cpu_clock;
3407
3408 break;
3409 case PERF_COUNT_TASK_CLOCK:
3410 /*
3411 * If the user instantiates this as a per-cpu counter,
3412 * use the cpu_clock counter instead.
3413 */
3414 if (counter->ctx->task)
3415 pmu = &perf_ops_task_clock;
3416 else
3417 pmu = &perf_ops_cpu_clock;
3418
3419 break;
3420 case PERF_COUNT_PAGE_FAULTS:
3421 case PERF_COUNT_PAGE_FAULTS_MIN:
3422 case PERF_COUNT_PAGE_FAULTS_MAJ:
3423 case PERF_COUNT_CONTEXT_SWITCHES:
3424 case PERF_COUNT_CPU_MIGRATIONS:
3425 pmu = &perf_ops_generic;
3426 break;
3427 }
3428
3429 return pmu;
3430 }
3431
3432 /*
3433 * Allocate and initialize a counter structure
3434 */
3435 static struct perf_counter *
3436 perf_counter_alloc(struct perf_counter_attr *attr,
3437 int cpu,
3438 struct perf_counter_context *ctx,
3439 struct perf_counter *group_leader,
3440 gfp_t gfpflags)
3441 {
3442 const struct pmu *pmu;
3443 struct perf_counter *counter;
3444 struct hw_perf_counter *hwc;
3445 long err;
3446
3447 counter = kzalloc(sizeof(*counter), gfpflags);
3448 if (!counter)
3449 return ERR_PTR(-ENOMEM);
3450
3451 /*
3452 * Single counters are their own group leaders, with an
3453 * empty sibling list:
3454 */
3455 if (!group_leader)
3456 group_leader = counter;
3457
3458 mutex_init(&counter->child_mutex);
3459 INIT_LIST_HEAD(&counter->child_list);
3460
3461 INIT_LIST_HEAD(&counter->list_entry);
3462 INIT_LIST_HEAD(&counter->event_entry);
3463 INIT_LIST_HEAD(&counter->sibling_list);
3464 init_waitqueue_head(&counter->waitq);
3465
3466 mutex_init(&counter->mmap_mutex);
3467
3468 counter->cpu = cpu;
3469 counter->attr = *attr;
3470 counter->group_leader = group_leader;
3471 counter->pmu = NULL;
3472 counter->ctx = ctx;
3473 counter->oncpu = -1;
3474
3475 counter->ns = get_pid_ns(current->nsproxy->pid_ns);
3476 counter->id = atomic64_inc_return(&perf_counter_id);
3477
3478 counter->state = PERF_COUNTER_STATE_INACTIVE;
3479
3480 if (attr->disabled)
3481 counter->state = PERF_COUNTER_STATE_OFF;
3482
3483 pmu = NULL;
3484
3485 hwc = &counter->hw;
3486 if (attr->freq && attr->sample_freq)
3487 hwc->sample_period = div64_u64(TICK_NSEC, attr->sample_freq);
3488 else
3489 hwc->sample_period = attr->sample_period;
3490
3491 /*
3492 * we currently do not support PERF_SAMPLE_GROUP on inherited counters
3493 */
3494 if (attr->inherit && (attr->sample_type & PERF_SAMPLE_GROUP))
3495 goto done;
3496
3497 if (attr->type == PERF_TYPE_RAW) {
3498 pmu = hw_perf_counter_init(counter);
3499 goto done;
3500 }
3501
3502 switch (attr->type) {
3503 case PERF_TYPE_HARDWARE:
3504 case PERF_TYPE_HW_CACHE:
3505 pmu = hw_perf_counter_init(counter);
3506 break;
3507
3508 case PERF_TYPE_SOFTWARE:
3509 pmu = sw_perf_counter_init(counter);
3510 break;
3511
3512 case PERF_TYPE_TRACEPOINT:
3513 pmu = tp_perf_counter_init(counter);
3514 break;
3515 }
3516 done:
3517 err = 0;
3518 if (!pmu)
3519 err = -EINVAL;
3520 else if (IS_ERR(pmu))
3521 err = PTR_ERR(pmu);
3522
3523 if (err) {
3524 if (counter->ns)
3525 put_pid_ns(counter->ns);
3526 kfree(counter);
3527 return ERR_PTR(err);
3528 }
3529
3530 counter->pmu = pmu;
3531
3532 atomic_inc(&nr_counters);
3533 if (counter->attr.mmap)
3534 atomic_inc(&nr_mmap_counters);
3535 if (counter->attr.comm)
3536 atomic_inc(&nr_comm_counters);
3537
3538 return counter;
3539 }
3540
3541 /**
3542 * sys_perf_counter_open - open a performance counter, associate it to a task/cpu
3543 *
3544 * @attr_uptr: event type attributes for monitoring/sampling
3545 * @pid: target pid
3546 * @cpu: target cpu
3547 * @group_fd: group leader counter fd
3548 */
3549 SYSCALL_DEFINE5(perf_counter_open,
3550 const struct perf_counter_attr __user *, attr_uptr,
3551 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
3552 {
3553 struct perf_counter *counter, *group_leader;
3554 struct perf_counter_attr attr;
3555 struct perf_counter_context *ctx;
3556 struct file *counter_file = NULL;
3557 struct file *group_file = NULL;
3558 int fput_needed = 0;
3559 int fput_needed2 = 0;
3560 int ret;
3561
3562 /* for future expandability... */
3563 if (flags)
3564 return -EINVAL;
3565
3566 if (copy_from_user(&attr, attr_uptr, sizeof(attr)) != 0)
3567 return -EFAULT;
3568
3569 /*
3570 * Get the target context (task or percpu):
3571 */
3572 ctx = find_get_context(pid, cpu);
3573 if (IS_ERR(ctx))
3574 return PTR_ERR(ctx);
3575
3576 /*
3577 * Look up the group leader (we will attach this counter to it):
3578 */
3579 group_leader = NULL;
3580 if (group_fd != -1) {
3581 ret = -EINVAL;
3582 group_file = fget_light(group_fd, &fput_needed);
3583 if (!group_file)
3584 goto err_put_context;
3585 if (group_file->f_op != &perf_fops)
3586 goto err_put_context;
3587
3588 group_leader = group_file->private_data;
3589 /*
3590 * Do not allow a recursive hierarchy (this new sibling
3591 * becoming part of another group-sibling):
3592 */
3593 if (group_leader->group_leader != group_leader)
3594 goto err_put_context;
3595 /*
3596 * Do not allow to attach to a group in a different
3597 * task or CPU context:
3598 */
3599 if (group_leader->ctx != ctx)
3600 goto err_put_context;
3601 /*
3602 * Only a group leader can be exclusive or pinned
3603 */
3604 if (attr.exclusive || attr.pinned)
3605 goto err_put_context;
3606 }
3607
3608 counter = perf_counter_alloc(&attr, cpu, ctx, group_leader,
3609 GFP_KERNEL);
3610 ret = PTR_ERR(counter);
3611 if (IS_ERR(counter))
3612 goto err_put_context;
3613
3614 ret = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0);
3615 if (ret < 0)
3616 goto err_free_put_context;
3617
3618 counter_file = fget_light(ret, &fput_needed2);
3619 if (!counter_file)
3620 goto err_free_put_context;
3621
3622 counter->filp = counter_file;
3623 WARN_ON_ONCE(ctx->parent_ctx);
3624 mutex_lock(&ctx->mutex);
3625 perf_install_in_context(ctx, counter, cpu);
3626 ++ctx->generation;
3627 mutex_unlock(&ctx->mutex);
3628
3629 counter->owner = current;
3630 get_task_struct(current);
3631 mutex_lock(&current->perf_counter_mutex);
3632 list_add_tail(&counter->owner_entry, &current->perf_counter_list);
3633 mutex_unlock(&current->perf_counter_mutex);
3634
3635 fput_light(counter_file, fput_needed2);
3636
3637 out_fput:
3638 fput_light(group_file, fput_needed);
3639
3640 return ret;
3641
3642 err_free_put_context:
3643 kfree(counter);
3644
3645 err_put_context:
3646 put_ctx(ctx);
3647
3648 goto out_fput;
3649 }
3650
3651 /*
3652 * inherit a counter from parent task to child task:
3653 */
3654 static struct perf_counter *
3655 inherit_counter(struct perf_counter *parent_counter,
3656 struct task_struct *parent,
3657 struct perf_counter_context *parent_ctx,
3658 struct task_struct *child,
3659 struct perf_counter *group_leader,
3660 struct perf_counter_context *child_ctx)
3661 {
3662 struct perf_counter *child_counter;
3663
3664 /*
3665 * Instead of creating recursive hierarchies of counters,
3666 * we link inherited counters back to the original parent,
3667 * which has a filp for sure, which we use as the reference
3668 * count:
3669 */
3670 if (parent_counter->parent)
3671 parent_counter = parent_counter->parent;
3672
3673 child_counter = perf_counter_alloc(&parent_counter->attr,
3674 parent_counter->cpu, child_ctx,
3675 group_leader, GFP_KERNEL);
3676 if (IS_ERR(child_counter))
3677 return child_counter;
3678 get_ctx(child_ctx);
3679
3680 /*
3681 * Make the child state follow the state of the parent counter,
3682 * not its attr.disabled bit. We hold the parent's mutex,
3683 * so we won't race with perf_counter_{en, dis}able_family.
3684 */
3685 if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE)
3686 child_counter->state = PERF_COUNTER_STATE_INACTIVE;
3687 else
3688 child_counter->state = PERF_COUNTER_STATE_OFF;
3689
3690 /*
3691 * Link it up in the child's context:
3692 */
3693 add_counter_to_ctx(child_counter, child_ctx);
3694
3695 child_counter->parent = parent_counter;
3696 /*
3697 * inherit into child's child as well:
3698 */
3699 child_counter->attr.inherit = 1;
3700
3701 /*
3702 * Get a reference to the parent filp - we will fput it
3703 * when the child counter exits. This is safe to do because
3704 * we are in the parent and we know that the filp still
3705 * exists and has a nonzero count:
3706 */
3707 atomic_long_inc(&parent_counter->filp->f_count);
3708
3709 /*
3710 * Link this into the parent counter's child list
3711 */
3712 WARN_ON_ONCE(parent_counter->ctx->parent_ctx);
3713 mutex_lock(&parent_counter->child_mutex);
3714 list_add_tail(&child_counter->child_list, &parent_counter->child_list);
3715 mutex_unlock(&parent_counter->child_mutex);
3716
3717 return child_counter;
3718 }
3719
3720 static int inherit_group(struct perf_counter *parent_counter,
3721 struct task_struct *parent,
3722 struct perf_counter_context *parent_ctx,
3723 struct task_struct *child,
3724 struct perf_counter_context *child_ctx)
3725 {
3726 struct perf_counter *leader;
3727 struct perf_counter *sub;
3728 struct perf_counter *child_ctr;
3729
3730 leader = inherit_counter(parent_counter, parent, parent_ctx,
3731 child, NULL, child_ctx);
3732 if (IS_ERR(leader))
3733 return PTR_ERR(leader);
3734 list_for_each_entry(sub, &parent_counter->sibling_list, list_entry) {
3735 child_ctr = inherit_counter(sub, parent, parent_ctx,
3736 child, leader, child_ctx);
3737 if (IS_ERR(child_ctr))
3738 return PTR_ERR(child_ctr);
3739 }
3740 return 0;
3741 }
3742
3743 static void sync_child_counter(struct perf_counter *child_counter,
3744 struct perf_counter *parent_counter)
3745 {
3746 u64 child_val;
3747
3748 child_val = atomic64_read(&child_counter->count);
3749
3750 /*
3751 * Add back the child's count to the parent's count:
3752 */
3753 atomic64_add(child_val, &parent_counter->count);
3754 atomic64_add(child_counter->total_time_enabled,
3755 &parent_counter->child_total_time_enabled);
3756 atomic64_add(child_counter->total_time_running,
3757 &parent_counter->child_total_time_running);
3758
3759 /*
3760 * Remove this counter from the parent's list
3761 */
3762 WARN_ON_ONCE(parent_counter->ctx->parent_ctx);
3763 mutex_lock(&parent_counter->child_mutex);
3764 list_del_init(&child_counter->child_list);
3765 mutex_unlock(&parent_counter->child_mutex);
3766
3767 /*
3768 * Release the parent counter, if this was the last
3769 * reference to it.
3770 */
3771 fput(parent_counter->filp);
3772 }
3773
3774 static void
3775 __perf_counter_exit_task(struct perf_counter *child_counter,
3776 struct perf_counter_context *child_ctx)
3777 {
3778 struct perf_counter *parent_counter;
3779
3780 update_counter_times(child_counter);
3781 perf_counter_remove_from_context(child_counter);
3782
3783 parent_counter = child_counter->parent;
3784 /*
3785 * It can happen that parent exits first, and has counters
3786 * that are still around due to the child reference. These
3787 * counters need to be zapped - but otherwise linger.
3788 */
3789 if (parent_counter) {
3790 sync_child_counter(child_counter, parent_counter);
3791 free_counter(child_counter);
3792 }
3793 }
3794
3795 /*
3796 * When a child task exits, feed back counter values to parent counters.
3797 */
3798 void perf_counter_exit_task(struct task_struct *child)
3799 {
3800 struct perf_counter *child_counter, *tmp;
3801 struct perf_counter_context *child_ctx;
3802 unsigned long flags;
3803
3804 if (likely(!child->perf_counter_ctxp))
3805 return;
3806
3807 local_irq_save(flags);
3808 /*
3809 * We can't reschedule here because interrupts are disabled,
3810 * and either child is current or it is a task that can't be
3811 * scheduled, so we are now safe from rescheduling changing
3812 * our context.
3813 */
3814 child_ctx = child->perf_counter_ctxp;
3815 __perf_counter_task_sched_out(child_ctx);
3816
3817 /*
3818 * Take the context lock here so that if find_get_context is
3819 * reading child->perf_counter_ctxp, we wait until it has
3820 * incremented the context's refcount before we do put_ctx below.
3821 */
3822 spin_lock(&child_ctx->lock);
3823 child->perf_counter_ctxp = NULL;
3824 if (child_ctx->parent_ctx) {
3825 /*
3826 * This context is a clone; unclone it so it can't get
3827 * swapped to another process while we're removing all
3828 * the counters from it.
3829 */
3830 put_ctx(child_ctx->parent_ctx);
3831 child_ctx->parent_ctx = NULL;
3832 }
3833 spin_unlock(&child_ctx->lock);
3834 local_irq_restore(flags);
3835
3836 mutex_lock(&child_ctx->mutex);
3837
3838 again:
3839 list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list,
3840 list_entry)
3841 __perf_counter_exit_task(child_counter, child_ctx);
3842
3843 /*
3844 * If the last counter was a group counter, it will have appended all
3845 * its siblings to the list, but we obtained 'tmp' before that which
3846 * will still point to the list head terminating the iteration.
3847 */
3848 if (!list_empty(&child_ctx->counter_list))
3849 goto again;
3850
3851 mutex_unlock(&child_ctx->mutex);
3852
3853 put_ctx(child_ctx);
3854 }
3855
3856 /*
3857 * free an unexposed, unused context as created by inheritance by
3858 * init_task below, used by fork() in case of fail.
3859 */
3860 void perf_counter_free_task(struct task_struct *task)
3861 {
3862 struct perf_counter_context *ctx = task->perf_counter_ctxp;
3863 struct perf_counter *counter, *tmp;
3864
3865 if (!ctx)
3866 return;
3867
3868 mutex_lock(&ctx->mutex);
3869 again:
3870 list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry) {
3871 struct perf_counter *parent = counter->parent;
3872
3873 if (WARN_ON_ONCE(!parent))
3874 continue;
3875
3876 mutex_lock(&parent->child_mutex);
3877 list_del_init(&counter->child_list);
3878 mutex_unlock(&parent->child_mutex);
3879
3880 fput(parent->filp);
3881
3882 list_del_counter(counter, ctx);
3883 free_counter(counter);
3884 }
3885
3886 if (!list_empty(&ctx->counter_list))
3887 goto again;
3888
3889 mutex_unlock(&ctx->mutex);
3890
3891 put_ctx(ctx);
3892 }
3893
3894 /*
3895 * Initialize the perf_counter context in task_struct
3896 */
3897 int perf_counter_init_task(struct task_struct *child)
3898 {
3899 struct perf_counter_context *child_ctx, *parent_ctx;
3900 struct perf_counter_context *cloned_ctx;
3901 struct perf_counter *counter;
3902 struct task_struct *parent = current;
3903 int inherited_all = 1;
3904 int ret = 0;
3905
3906 child->perf_counter_ctxp = NULL;
3907
3908 mutex_init(&child->perf_counter_mutex);
3909 INIT_LIST_HEAD(&child->perf_counter_list);
3910
3911 if (likely(!parent->perf_counter_ctxp))
3912 return 0;
3913
3914 /*
3915 * This is executed from the parent task context, so inherit
3916 * counters that have been marked for cloning.
3917 * First allocate and initialize a context for the child.
3918 */
3919
3920 child_ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL);
3921 if (!child_ctx)
3922 return -ENOMEM;
3923
3924 __perf_counter_init_context(child_ctx, child);
3925 child->perf_counter_ctxp = child_ctx;
3926 get_task_struct(child);
3927
3928 /*
3929 * If the parent's context is a clone, pin it so it won't get
3930 * swapped under us.
3931 */
3932 parent_ctx = perf_pin_task_context(parent);
3933
3934 /*
3935 * No need to check if parent_ctx != NULL here; since we saw
3936 * it non-NULL earlier, the only reason for it to become NULL
3937 * is if we exit, and since we're currently in the middle of
3938 * a fork we can't be exiting at the same time.
3939 */
3940
3941 /*
3942 * Lock the parent list. No need to lock the child - not PID
3943 * hashed yet and not running, so nobody can access it.
3944 */
3945 mutex_lock(&parent_ctx->mutex);
3946
3947 /*
3948 * We dont have to disable NMIs - we are only looking at
3949 * the list, not manipulating it:
3950 */
3951 list_for_each_entry_rcu(counter, &parent_ctx->event_list, event_entry) {
3952 if (counter != counter->group_leader)
3953 continue;
3954
3955 if (!counter->attr.inherit) {
3956 inherited_all = 0;
3957 continue;
3958 }
3959
3960 ret = inherit_group(counter, parent, parent_ctx,
3961 child, child_ctx);
3962 if (ret) {
3963 inherited_all = 0;
3964 break;
3965 }
3966 }
3967
3968 if (inherited_all) {
3969 /*
3970 * Mark the child context as a clone of the parent
3971 * context, or of whatever the parent is a clone of.
3972 * Note that if the parent is a clone, it could get
3973 * uncloned at any point, but that doesn't matter
3974 * because the list of counters and the generation
3975 * count can't have changed since we took the mutex.
3976 */
3977 cloned_ctx = rcu_dereference(parent_ctx->parent_ctx);
3978 if (cloned_ctx) {
3979 child_ctx->parent_ctx = cloned_ctx;
3980 child_ctx->parent_gen = parent_ctx->parent_gen;
3981 } else {
3982 child_ctx->parent_ctx = parent_ctx;
3983 child_ctx->parent_gen = parent_ctx->generation;
3984 }
3985 get_ctx(child_ctx->parent_ctx);
3986 }
3987
3988 mutex_unlock(&parent_ctx->mutex);
3989
3990 perf_unpin_context(parent_ctx);
3991
3992 return ret;
3993 }
3994
3995 static void __cpuinit perf_counter_init_cpu(int cpu)
3996 {
3997 struct perf_cpu_context *cpuctx;
3998
3999 cpuctx = &per_cpu(perf_cpu_context, cpu);
4000 __perf_counter_init_context(&cpuctx->ctx, NULL);
4001
4002 spin_lock(&perf_resource_lock);
4003 cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu;
4004 spin_unlock(&perf_resource_lock);
4005
4006 hw_perf_counter_setup(cpu);
4007 }
4008
4009 #ifdef CONFIG_HOTPLUG_CPU
4010 static void __perf_counter_exit_cpu(void *info)
4011 {
4012 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
4013 struct perf_counter_context *ctx = &cpuctx->ctx;
4014 struct perf_counter *counter, *tmp;
4015
4016 list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry)
4017 __perf_counter_remove_from_context(counter);
4018 }
4019 static void perf_counter_exit_cpu(int cpu)
4020 {
4021 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
4022 struct perf_counter_context *ctx = &cpuctx->ctx;
4023
4024 mutex_lock(&ctx->mutex);
4025 smp_call_function_single(cpu, __perf_counter_exit_cpu, NULL, 1);
4026 mutex_unlock(&ctx->mutex);
4027 }
4028 #else
4029 static inline void perf_counter_exit_cpu(int cpu) { }
4030 #endif
4031
4032 static int __cpuinit
4033 perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
4034 {
4035 unsigned int cpu = (long)hcpu;
4036
4037 switch (action) {
4038
4039 case CPU_UP_PREPARE:
4040 case CPU_UP_PREPARE_FROZEN:
4041 perf_counter_init_cpu(cpu);
4042 break;
4043
4044 case CPU_DOWN_PREPARE:
4045 case CPU_DOWN_PREPARE_FROZEN:
4046 perf_counter_exit_cpu(cpu);
4047 break;
4048
4049 default:
4050 break;
4051 }
4052
4053 return NOTIFY_OK;
4054 }
4055
4056 /*
4057 * This has to have a higher priority than migration_notifier in sched.c.
4058 */
4059 static struct notifier_block __cpuinitdata perf_cpu_nb = {
4060 .notifier_call = perf_cpu_notify,
4061 .priority = 20,
4062 };
4063
4064 void __init perf_counter_init(void)
4065 {
4066 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
4067 (void *)(long)smp_processor_id());
4068 register_cpu_notifier(&perf_cpu_nb);
4069 }
4070
4071 static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf)
4072 {
4073 return sprintf(buf, "%d\n", perf_reserved_percpu);
4074 }
4075
4076 static ssize_t
4077 perf_set_reserve_percpu(struct sysdev_class *class,
4078 const char *buf,
4079 size_t count)
4080 {
4081 struct perf_cpu_context *cpuctx;
4082 unsigned long val;
4083 int err, cpu, mpt;
4084
4085 err = strict_strtoul(buf, 10, &val);
4086 if (err)
4087 return err;
4088 if (val > perf_max_counters)
4089 return -EINVAL;
4090
4091 spin_lock(&perf_resource_lock);
4092 perf_reserved_percpu = val;
4093 for_each_online_cpu(cpu) {
4094 cpuctx = &per_cpu(perf_cpu_context, cpu);
4095 spin_lock_irq(&cpuctx->ctx.lock);
4096 mpt = min(perf_max_counters - cpuctx->ctx.nr_counters,
4097 perf_max_counters - perf_reserved_percpu);
4098 cpuctx->max_pertask = mpt;
4099 spin_unlock_irq(&cpuctx->ctx.lock);
4100 }
4101 spin_unlock(&perf_resource_lock);
4102
4103 return count;
4104 }
4105
4106 static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf)
4107 {
4108 return sprintf(buf, "%d\n", perf_overcommit);
4109 }
4110
4111 static ssize_t
4112 perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count)
4113 {
4114 unsigned long val;
4115 int err;
4116
4117 err = strict_strtoul(buf, 10, &val);
4118 if (err)
4119 return err;
4120 if (val > 1)
4121 return -EINVAL;
4122
4123 spin_lock(&perf_resource_lock);
4124 perf_overcommit = val;
4125 spin_unlock(&perf_resource_lock);
4126
4127 return count;
4128 }
4129
4130 static SYSDEV_CLASS_ATTR(
4131 reserve_percpu,
4132 0644,
4133 perf_show_reserve_percpu,
4134 perf_set_reserve_percpu
4135 );
4136
4137 static SYSDEV_CLASS_ATTR(
4138 overcommit,
4139 0644,
4140 perf_show_overcommit,
4141 perf_set_overcommit
4142 );
4143
4144 static struct attribute *perfclass_attrs[] = {
4145 &attr_reserve_percpu.attr,
4146 &attr_overcommit.attr,
4147 NULL
4148 };
4149
4150 static struct attribute_group perfclass_attr_group = {
4151 .attrs = perfclass_attrs,
4152 .name = "perf_counters",
4153 };
4154
4155 static int __init perf_counter_sysfs_init(void)
4156 {
4157 return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
4158 &perfclass_attr_group);
4159 }
4160 device_initcall(perf_counter_sysfs_init);
This page took 0.126154 seconds and 5 git commands to generate.