2 * Performance events callchain code, extracted from core.c:
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
9 * For licensing details see kernel-base/COPYING
12 #include <linux/perf_event.h>
13 #include <linux/slab.h>
16 struct callchain_cpus_entries
{
17 struct rcu_head rcu_head
;
18 struct perf_callchain_entry
*cpu_entries
[0];
21 int sysctl_perf_event_max_stack __read_mostly
= PERF_MAX_STACK_DEPTH
;
22 int sysctl_perf_event_max_contexts_per_stack __read_mostly
= PERF_MAX_CONTEXTS_PER_STACK
;
24 static inline size_t perf_callchain_entry__sizeof(void)
26 return (sizeof(struct perf_callchain_entry
) +
27 sizeof(__u64
) * (sysctl_perf_event_max_stack
+
28 sysctl_perf_event_max_contexts_per_stack
));
31 static DEFINE_PER_CPU(int, callchain_recursion
[PERF_NR_CONTEXTS
]);
32 static atomic_t nr_callchain_events
;
33 static DEFINE_MUTEX(callchain_mutex
);
34 static struct callchain_cpus_entries
*callchain_cpus_entries
;
37 __weak
void perf_callchain_kernel(struct perf_callchain_entry_ctx
*entry
,
42 __weak
void perf_callchain_user(struct perf_callchain_entry_ctx
*entry
,
47 static void release_callchain_buffers_rcu(struct rcu_head
*head
)
49 struct callchain_cpus_entries
*entries
;
52 entries
= container_of(head
, struct callchain_cpus_entries
, rcu_head
);
54 for_each_possible_cpu(cpu
)
55 kfree(entries
->cpu_entries
[cpu
]);
60 static void release_callchain_buffers(void)
62 struct callchain_cpus_entries
*entries
;
64 entries
= callchain_cpus_entries
;
65 RCU_INIT_POINTER(callchain_cpus_entries
, NULL
);
66 call_rcu(&entries
->rcu_head
, release_callchain_buffers_rcu
);
69 static int alloc_callchain_buffers(void)
73 struct callchain_cpus_entries
*entries
;
76 * We can't use the percpu allocation API for data that can be
77 * accessed from NMI. Use a temporary manual per cpu allocation
78 * until that gets sorted out.
80 size
= offsetof(struct callchain_cpus_entries
, cpu_entries
[nr_cpu_ids
]);
82 entries
= kzalloc(size
, GFP_KERNEL
);
86 size
= perf_callchain_entry__sizeof() * PERF_NR_CONTEXTS
;
88 for_each_possible_cpu(cpu
) {
89 entries
->cpu_entries
[cpu
] = kmalloc_node(size
, GFP_KERNEL
,
91 if (!entries
->cpu_entries
[cpu
])
95 rcu_assign_pointer(callchain_cpus_entries
, entries
);
100 for_each_possible_cpu(cpu
)
101 kfree(entries
->cpu_entries
[cpu
]);
107 int get_callchain_buffers(void)
112 mutex_lock(&callchain_mutex
);
114 count
= atomic_inc_return(&nr_callchain_events
);
115 if (WARN_ON_ONCE(count
< 1)) {
121 /* If the allocation failed, give up */
122 if (!callchain_cpus_entries
)
127 err
= alloc_callchain_buffers();
130 atomic_dec(&nr_callchain_events
);
132 mutex_unlock(&callchain_mutex
);
137 void put_callchain_buffers(void)
139 if (atomic_dec_and_mutex_lock(&nr_callchain_events
, &callchain_mutex
)) {
140 release_callchain_buffers();
141 mutex_unlock(&callchain_mutex
);
145 static struct perf_callchain_entry
*get_callchain_entry(int *rctx
)
148 struct callchain_cpus_entries
*entries
;
150 *rctx
= get_recursion_context(this_cpu_ptr(callchain_recursion
));
154 entries
= rcu_dereference(callchain_cpus_entries
);
158 cpu
= smp_processor_id();
160 return (((void *)entries
->cpu_entries
[cpu
]) +
161 (*rctx
* perf_callchain_entry__sizeof()));
165 put_callchain_entry(int rctx
)
167 put_recursion_context(this_cpu_ptr(callchain_recursion
), rctx
);
170 struct perf_callchain_entry
*
171 perf_callchain(struct perf_event
*event
, struct pt_regs
*regs
)
173 bool kernel
= !event
->attr
.exclude_callchain_kernel
;
174 bool user
= !event
->attr
.exclude_callchain_user
;
175 /* Disallow cross-task user callchains. */
176 bool crosstask
= event
->ctx
->task
&& event
->ctx
->task
!= current
;
178 if (!kernel
&& !user
)
181 return get_perf_callchain(regs
, 0, kernel
, user
, sysctl_perf_event_max_stack
, crosstask
, true);
184 struct perf_callchain_entry
*
185 get_perf_callchain(struct pt_regs
*regs
, u32 init_nr
, bool kernel
, bool user
,
186 u32 max_stack
, bool crosstask
, bool add_mark
)
188 struct perf_callchain_entry
*entry
;
189 struct perf_callchain_entry_ctx ctx
;
192 entry
= get_callchain_entry(&rctx
);
200 ctx
.max_stack
= max_stack
;
201 ctx
.nr
= entry
->nr
= init_nr
;
203 ctx
.contexts_maxed
= false;
205 if (kernel
&& !user_mode(regs
)) {
207 perf_callchain_store_context(&ctx
, PERF_CONTEXT_KERNEL
);
208 perf_callchain_kernel(&ctx
, regs
);
212 if (!user_mode(regs
)) {
214 regs
= task_pt_regs(current
);
224 perf_callchain_store_context(&ctx
, PERF_CONTEXT_USER
);
225 perf_callchain_user(&ctx
, regs
);
230 put_callchain_entry(rctx
);
236 * Used for sysctl_perf_event_max_stack and
237 * sysctl_perf_event_max_contexts_per_stack.
239 int perf_event_max_stack_handler(struct ctl_table
*table
, int write
,
240 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
242 int *value
= table
->data
;
243 int new_value
= *value
, ret
;
244 struct ctl_table new_table
= *table
;
246 new_table
.data
= &new_value
;
247 ret
= proc_dointvec_minmax(&new_table
, write
, buffer
, lenp
, ppos
);
251 mutex_lock(&callchain_mutex
);
252 if (atomic_read(&nr_callchain_events
))
257 mutex_unlock(&callchain_mutex
);