Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux...
[deliverable/linux.git] / kernel / trace / trace_event_perf.c
CommitLineData
ac199db0 1/*
97d5a220 2 * trace event based perf event profiling/tracing
ac199db0
PZ
3 *
4 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
c530665c 5 * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
ac199db0
PZ
6 */
7
558e6547 8#include <linux/module.h>
430ad5a6 9#include <linux/kprobes.h>
ac199db0
PZ
10#include "trace.h"
11
6016ee13 12static char __percpu *perf_trace_buf[PERF_NR_CONTEXTS];
20ab4425 13
eb1e7961
FW
14/*
15 * Force it to be aligned to unsigned long to avoid misaligned accesses
16 * suprises
17 */
18typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
19 perf_trace_t;
ce71b9df 20
20ab4425 21/* Count the events in use (per event id, not per instance) */
97d5a220 22static int total_ref_count;
20ab4425 23
61c32659
FW
24static int perf_trace_event_perm(struct ftrace_event_call *tp_event,
25 struct perf_event *p_event)
26{
d5b5f391
PZ
27 if (tp_event->perf_perm) {
28 int ret = tp_event->perf_perm(tp_event, p_event);
29 if (ret)
30 return ret;
31 }
32
f4be073d
JO
33 /*
34 * We checked and allowed to create parent,
35 * allow children without checking.
36 */
37 if (p_event->parent)
38 return 0;
39
40 /*
41 * It's ok to check current process (owner) permissions in here,
42 * because code below is called only via perf_event_open syscall.
43 */
44
ced39002 45 /* The ftrace function trace is allowed only for root. */
cfa77bc4
JO
46 if (ftrace_event_is_function(tp_event)) {
47 if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
48 return -EPERM;
49
50 /*
51 * We don't allow user space callchains for function trace
52 * event, due to issues with page faults while tracing page
53 * fault handler and its overall trickiness nature.
54 */
55 if (!p_event->attr.exclude_callchain_user)
56 return -EINVAL;
63c45f4b
JO
57
58 /*
59 * Same reason to disable user stack dump as for user space
60 * callchains above.
61 */
62 if (p_event->attr.sample_type & PERF_SAMPLE_STACK_USER)
63 return -EINVAL;
cfa77bc4 64 }
ced39002 65
61c32659
FW
66 /* No tracing, just counting, so no obvious leak */
67 if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW))
68 return 0;
69
70 /* Some events are ok to be traced by non-root users... */
71 if (p_event->attach_state == PERF_ATTACH_TASK) {
72 if (tp_event->flags & TRACE_EVENT_FL_CAP_ANY)
73 return 0;
74 }
75
76 /*
77 * ...otherwise raw tracepoint data can be a severe data leak,
78 * only allow root to have these.
79 */
80 if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
81 return -EPERM;
82
83 return 0;
84}
85
ceec0b6f
JO
86static int perf_trace_event_reg(struct ftrace_event_call *tp_event,
87 struct perf_event *p_event)
e5e25cf4 88{
6016ee13 89 struct hlist_head __percpu *list;
ceec0b6f 90 int ret = -ENOMEM;
1c024eca 91 int cpu;
20ab4425 92
1c024eca
PZ
93 p_event->tp_event = tp_event;
94 if (tp_event->perf_refcount++ > 0)
e5e25cf4
FW
95 return 0;
96
1c024eca
PZ
97 list = alloc_percpu(struct hlist_head);
98 if (!list)
99 goto fail;
100
101 for_each_possible_cpu(cpu)
102 INIT_HLIST_HEAD(per_cpu_ptr(list, cpu));
20ab4425 103
1c024eca 104 tp_event->perf_events = list;
e5e25cf4 105
97d5a220 106 if (!total_ref_count) {
6016ee13 107 char __percpu *buf;
b7e2ecef 108 int i;
20ab4425 109
7ae07ea3 110 for (i = 0; i < PERF_NR_CONTEXTS; i++) {
6016ee13 111 buf = (char __percpu *)alloc_percpu(perf_trace_t);
b7e2ecef 112 if (!buf)
1c024eca 113 goto fail;
20ab4425 114
1c024eca 115 perf_trace_buf[i] = buf;
b7e2ecef 116 }
20ab4425
FW
117 }
118
ceec0b6f 119 ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER, NULL);
1c024eca
PZ
120 if (ret)
121 goto fail;
20ab4425 122
1c024eca
PZ
123 total_ref_count++;
124 return 0;
125
126fail:
97d5a220 127 if (!total_ref_count) {
b7e2ecef
PZ
128 int i;
129
7ae07ea3 130 for (i = 0; i < PERF_NR_CONTEXTS; i++) {
b7e2ecef
PZ
131 free_percpu(perf_trace_buf[i]);
132 perf_trace_buf[i] = NULL;
133 }
fe8e5b5a 134 }
1c024eca
PZ
135
136 if (!--tp_event->perf_refcount) {
137 free_percpu(tp_event->perf_events);
138 tp_event->perf_events = NULL;
fe8e5b5a 139 }
20ab4425
FW
140
141 return ret;
e5e25cf4
FW
142}
143
ceec0b6f
JO
144static void perf_trace_event_unreg(struct perf_event *p_event)
145{
146 struct ftrace_event_call *tp_event = p_event->tp_event;
147 int i;
148
149 if (--tp_event->perf_refcount > 0)
150 goto out;
151
152 tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER, NULL);
153
154 /*
155 * Ensure our callback won't be called anymore. The buffers
156 * will be freed after that.
157 */
158 tracepoint_synchronize_unregister();
159
160 free_percpu(tp_event->perf_events);
161 tp_event->perf_events = NULL;
162
163 if (!--total_ref_count) {
164 for (i = 0; i < PERF_NR_CONTEXTS; i++) {
165 free_percpu(perf_trace_buf[i]);
166 perf_trace_buf[i] = NULL;
167 }
168 }
169out:
170 module_put(tp_event->mod);
171}
172
173static int perf_trace_event_open(struct perf_event *p_event)
174{
175 struct ftrace_event_call *tp_event = p_event->tp_event;
176 return tp_event->class->reg(tp_event, TRACE_REG_PERF_OPEN, p_event);
177}
178
179static void perf_trace_event_close(struct perf_event *p_event)
180{
181 struct ftrace_event_call *tp_event = p_event->tp_event;
182 tp_event->class->reg(tp_event, TRACE_REG_PERF_CLOSE, p_event);
183}
184
185static int perf_trace_event_init(struct ftrace_event_call *tp_event,
186 struct perf_event *p_event)
187{
188 int ret;
189
190 ret = perf_trace_event_perm(tp_event, p_event);
191 if (ret)
192 return ret;
193
194 ret = perf_trace_event_reg(tp_event, p_event);
195 if (ret)
196 return ret;
197
198 ret = perf_trace_event_open(p_event);
199 if (ret) {
200 perf_trace_event_unreg(p_event);
201 return ret;
202 }
203
204 return 0;
205}
206
1c024eca 207int perf_trace_init(struct perf_event *p_event)
ac199db0 208{
1c024eca 209 struct ftrace_event_call *tp_event;
0022cedd 210 u64 event_id = p_event->attr.config;
20c8928a 211 int ret = -EINVAL;
ac199db0 212
20c8928a 213 mutex_lock(&event_mutex);
1c024eca 214 list_for_each_entry(tp_event, &ftrace_events, list) {
ff5f149b 215 if (tp_event->event.type == event_id &&
a1d0ce82 216 tp_event->class && tp_event->class->reg &&
1c024eca
PZ
217 try_module_get(tp_event->mod)) {
218 ret = perf_trace_event_init(tp_event, p_event);
9cb627d5
LZ
219 if (ret)
220 module_put(tp_event->mod);
20c8928a
LZ
221 break;
222 }
ac199db0 223 }
20c8928a 224 mutex_unlock(&event_mutex);
ac199db0 225
20c8928a 226 return ret;
ac199db0
PZ
227}
228
ceec0b6f
JO
229void perf_trace_destroy(struct perf_event *p_event)
230{
231 mutex_lock(&event_mutex);
232 perf_trace_event_close(p_event);
233 perf_trace_event_unreg(p_event);
234 mutex_unlock(&event_mutex);
235}
236
a4eaf7f1 237int perf_trace_add(struct perf_event *p_event, int flags)
e5e25cf4 238{
1c024eca 239 struct ftrace_event_call *tp_event = p_event->tp_event;
6016ee13 240 struct hlist_head __percpu *pcpu_list;
1c024eca 241 struct hlist_head *list;
20ab4425 242
6016ee13
NK
243 pcpu_list = tp_event->perf_events;
244 if (WARN_ON_ONCE(!pcpu_list))
1c024eca 245 return -EINVAL;
20ab4425 246
a4eaf7f1
PZ
247 if (!(flags & PERF_EF_START))
248 p_event->hw.state = PERF_HES_STOPPED;
249
6016ee13 250 list = this_cpu_ptr(pcpu_list);
1c024eca 251 hlist_add_head_rcu(&p_event->hlist_entry, list);
20ab4425 252
489c75c3 253 return tp_event->class->reg(tp_event, TRACE_REG_PERF_ADD, p_event);
1c024eca 254}
20ab4425 255
a4eaf7f1 256void perf_trace_del(struct perf_event *p_event, int flags)
1c024eca 257{
489c75c3 258 struct ftrace_event_call *tp_event = p_event->tp_event;
1c024eca 259 hlist_del_rcu(&p_event->hlist_entry);
489c75c3 260 tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event);
e5e25cf4
FW
261}
262
3da0f180
MH
263void *perf_trace_buf_prepare(int size, unsigned short type,
264 struct pt_regs *regs, int *rctxp)
430ad5a6
XG
265{
266 struct trace_entry *entry;
87f44bbc 267 unsigned long flags;
1c024eca 268 char *raw_data;
b7e2ecef 269 int pc;
430ad5a6 270
eb1e7961
FW
271 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
272
cd92bf61
ON
273 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
274 "perf buffer not large enough"))
275 return NULL;
276
430ad5a6
XG
277 pc = preempt_count();
278
430ad5a6
XG
279 *rctxp = perf_swevent_get_recursion_context();
280 if (*rctxp < 0)
1c024eca 281 return NULL;
430ad5a6 282
3771f077 283 raw_data = this_cpu_ptr(perf_trace_buf[*rctxp]);
430ad5a6
XG
284
285 /* zero the dead bytes from align to not leak stack to user */
eb1e7961 286 memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
430ad5a6
XG
287
288 entry = (struct trace_entry *)raw_data;
87f44bbc
PZ
289 local_save_flags(flags);
290 tracing_generic_entry_update(entry, flags, pc);
430ad5a6
XG
291 entry->type = type;
292
293 return raw_data;
430ad5a6 294}
97d5a220 295EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);
3da0f180 296NOKPROBE_SYMBOL(perf_trace_buf_prepare);
ced39002
JO
297
298#ifdef CONFIG_FUNCTION_TRACER
299static void
2f5f6ad9 300perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
a1e2e31d 301 struct ftrace_ops *ops, struct pt_regs *pt_regs)
ced39002
JO
302{
303 struct ftrace_entry *entry;
304 struct hlist_head *head;
305 struct pt_regs regs;
306 int rctx;
307
b8ebfd3f
ON
308 head = this_cpu_ptr(event_function.perf_events);
309 if (hlist_empty(head))
310 return;
311
ced39002
JO
312#define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \
313 sizeof(u64)) - sizeof(u32))
314
315 BUILD_BUG_ON(ENTRY_SIZE > PERF_MAX_TRACE_SIZE);
316
317 perf_fetch_caller_regs(&regs);
318
319 entry = perf_trace_buf_prepare(ENTRY_SIZE, TRACE_FN, NULL, &rctx);
320 if (!entry)
321 return;
322
323 entry->ip = ip;
324 entry->parent_ip = parent_ip;
ced39002 325 perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, 0,
e6dab5ff 326 1, &regs, head, NULL);
ced39002
JO
327
328#undef ENTRY_SIZE
329}
330
331static int perf_ftrace_function_register(struct perf_event *event)
332{
333 struct ftrace_ops *ops = &event->ftrace_ops;
334
335 ops->flags |= FTRACE_OPS_FL_CONTROL;
336 ops->func = perf_ftrace_function_call;
337 return register_ftrace_function(ops);
338}
339
340static int perf_ftrace_function_unregister(struct perf_event *event)
341{
342 struct ftrace_ops *ops = &event->ftrace_ops;
5500fa51
JO
343 int ret = unregister_ftrace_function(ops);
344 ftrace_free_filter(ops);
345 return ret;
ced39002
JO
346}
347
348static void perf_ftrace_function_enable(struct perf_event *event)
349{
350 ftrace_function_local_enable(&event->ftrace_ops);
351}
352
353static void perf_ftrace_function_disable(struct perf_event *event)
354{
355 ftrace_function_local_disable(&event->ftrace_ops);
356}
357
358int perf_ftrace_event_register(struct ftrace_event_call *call,
359 enum trace_reg type, void *data)
360{
361 switch (type) {
362 case TRACE_REG_REGISTER:
363 case TRACE_REG_UNREGISTER:
364 break;
365 case TRACE_REG_PERF_REGISTER:
366 case TRACE_REG_PERF_UNREGISTER:
367 return 0;
368 case TRACE_REG_PERF_OPEN:
369 return perf_ftrace_function_register(data);
370 case TRACE_REG_PERF_CLOSE:
371 return perf_ftrace_function_unregister(data);
372 case TRACE_REG_PERF_ADD:
373 perf_ftrace_function_enable(data);
374 return 0;
375 case TRACE_REG_PERF_DEL:
376 perf_ftrace_function_disable(data);
377 return 0;
378 }
379
380 return -EINVAL;
381}
382#endif /* CONFIG_FUNCTION_TRACER */
This page took 0.355055 seconds and 5 git commands to generate.