2 * trace event based perf event profiling/tracing
4 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
5 * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
8 #include <linux/module.h>
9 #include <linux/kprobes.h>
12 static char __percpu
*perf_trace_buf
[PERF_NR_CONTEXTS
];
15 * Force it to be aligned to unsigned long to avoid misaligned accesses
18 typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE
/ sizeof(unsigned long)])
21 /* Count the events in use (per event id, not per instance) */
22 static int total_ref_count
;
24 static int perf_trace_event_perm(struct ftrace_event_call
*tp_event
,
25 struct perf_event
*p_event
)
27 if (tp_event
->perf_perm
) {
28 int ret
= tp_event
->perf_perm(tp_event
, p_event
);
33 /* The ftrace function trace is allowed only for root. */
34 if (ftrace_event_is_function(tp_event
)) {
35 if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN
))
39 * We don't allow user space callchains for function trace
40 * event, due to issues with page faults while tracing page
41 * fault handler and its overall trickiness nature.
43 if (!p_event
->attr
.exclude_callchain_user
)
47 * Same reason to disable user stack dump as for user space
50 if (p_event
->attr
.sample_type
& PERF_SAMPLE_STACK_USER
)
54 /* No tracing, just counting, so no obvious leak */
55 if (!(p_event
->attr
.sample_type
& PERF_SAMPLE_RAW
))
58 /* Some events are ok to be traced by non-root users... */
59 if (p_event
->attach_state
== PERF_ATTACH_TASK
) {
60 if (tp_event
->flags
& TRACE_EVENT_FL_CAP_ANY
)
65 * ...otherwise raw tracepoint data can be a severe data leak,
66 * only allow root to have these.
68 if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN
))
74 static int perf_trace_event_reg(struct ftrace_event_call
*tp_event
,
75 struct perf_event
*p_event
)
77 struct hlist_head __percpu
*list
;
81 p_event
->tp_event
= tp_event
;
82 if (tp_event
->perf_refcount
++ > 0)
85 list
= alloc_percpu(struct hlist_head
);
89 for_each_possible_cpu(cpu
)
90 INIT_HLIST_HEAD(per_cpu_ptr(list
, cpu
));
92 tp_event
->perf_events
= list
;
94 if (!total_ref_count
) {
98 for (i
= 0; i
< PERF_NR_CONTEXTS
; i
++) {
99 buf
= (char __percpu
*)alloc_percpu(perf_trace_t
);
103 perf_trace_buf
[i
] = buf
;
107 ret
= tp_event
->class->reg(tp_event
, TRACE_REG_PERF_REGISTER
, NULL
);
115 if (!total_ref_count
) {
118 for (i
= 0; i
< PERF_NR_CONTEXTS
; i
++) {
119 free_percpu(perf_trace_buf
[i
]);
120 perf_trace_buf
[i
] = NULL
;
124 if (!--tp_event
->perf_refcount
) {
125 free_percpu(tp_event
->perf_events
);
126 tp_event
->perf_events
= NULL
;
132 static void perf_trace_event_unreg(struct perf_event
*p_event
)
134 struct ftrace_event_call
*tp_event
= p_event
->tp_event
;
137 if (--tp_event
->perf_refcount
> 0)
140 tp_event
->class->reg(tp_event
, TRACE_REG_PERF_UNREGISTER
, NULL
);
143 * Ensure our callback won't be called anymore. The buffers
144 * will be freed after that.
146 tracepoint_synchronize_unregister();
148 free_percpu(tp_event
->perf_events
);
149 tp_event
->perf_events
= NULL
;
151 if (!--total_ref_count
) {
152 for (i
= 0; i
< PERF_NR_CONTEXTS
; i
++) {
153 free_percpu(perf_trace_buf
[i
]);
154 perf_trace_buf
[i
] = NULL
;
158 module_put(tp_event
->mod
);
161 static int perf_trace_event_open(struct perf_event
*p_event
)
163 struct ftrace_event_call
*tp_event
= p_event
->tp_event
;
164 return tp_event
->class->reg(tp_event
, TRACE_REG_PERF_OPEN
, p_event
);
167 static void perf_trace_event_close(struct perf_event
*p_event
)
169 struct ftrace_event_call
*tp_event
= p_event
->tp_event
;
170 tp_event
->class->reg(tp_event
, TRACE_REG_PERF_CLOSE
, p_event
);
173 static int perf_trace_event_init(struct ftrace_event_call
*tp_event
,
174 struct perf_event
*p_event
)
178 ret
= perf_trace_event_perm(tp_event
, p_event
);
182 ret
= perf_trace_event_reg(tp_event
, p_event
);
186 ret
= perf_trace_event_open(p_event
);
188 perf_trace_event_unreg(p_event
);
195 int perf_trace_init(struct perf_event
*p_event
)
197 struct ftrace_event_call
*tp_event
;
198 u64 event_id
= p_event
->attr
.config
;
201 mutex_lock(&event_mutex
);
202 list_for_each_entry(tp_event
, &ftrace_events
, list
) {
203 if (tp_event
->event
.type
== event_id
&&
204 tp_event
->class && tp_event
->class->reg
&&
205 try_module_get(tp_event
->mod
)) {
206 ret
= perf_trace_event_init(tp_event
, p_event
);
208 module_put(tp_event
->mod
);
212 mutex_unlock(&event_mutex
);
217 void perf_trace_destroy(struct perf_event
*p_event
)
219 mutex_lock(&event_mutex
);
220 perf_trace_event_close(p_event
);
221 perf_trace_event_unreg(p_event
);
222 mutex_unlock(&event_mutex
);
225 int perf_trace_add(struct perf_event
*p_event
, int flags
)
227 struct ftrace_event_call
*tp_event
= p_event
->tp_event
;
228 struct hlist_head __percpu
*pcpu_list
;
229 struct hlist_head
*list
;
231 pcpu_list
= tp_event
->perf_events
;
232 if (WARN_ON_ONCE(!pcpu_list
))
235 if (!(flags
& PERF_EF_START
))
236 p_event
->hw
.state
= PERF_HES_STOPPED
;
238 list
= this_cpu_ptr(pcpu_list
);
239 hlist_add_head_rcu(&p_event
->hlist_entry
, list
);
241 return tp_event
->class->reg(tp_event
, TRACE_REG_PERF_ADD
, p_event
);
244 void perf_trace_del(struct perf_event
*p_event
, int flags
)
246 struct ftrace_event_call
*tp_event
= p_event
->tp_event
;
247 hlist_del_rcu(&p_event
->hlist_entry
);
248 tp_event
->class->reg(tp_event
, TRACE_REG_PERF_DEL
, p_event
);
251 __kprobes
void *perf_trace_buf_prepare(int size
, unsigned short type
,
252 struct pt_regs
*regs
, int *rctxp
)
254 struct trace_entry
*entry
;
259 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE
% sizeof(unsigned long));
261 if (WARN_ONCE(size
> PERF_MAX_TRACE_SIZE
,
262 "perf buffer not large enough"))
265 pc
= preempt_count();
267 *rctxp
= perf_swevent_get_recursion_context();
271 raw_data
= this_cpu_ptr(perf_trace_buf
[*rctxp
]);
273 /* zero the dead bytes from align to not leak stack to user */
274 memset(&raw_data
[size
- sizeof(u64
)], 0, sizeof(u64
));
276 entry
= (struct trace_entry
*)raw_data
;
277 local_save_flags(flags
);
278 tracing_generic_entry_update(entry
, flags
, pc
);
283 EXPORT_SYMBOL_GPL(perf_trace_buf_prepare
);
285 #ifdef CONFIG_FUNCTION_TRACER
287 perf_ftrace_function_call(unsigned long ip
, unsigned long parent_ip
,
288 struct ftrace_ops
*ops
, struct pt_regs
*pt_regs
)
290 struct ftrace_entry
*entry
;
291 struct hlist_head
*head
;
295 head
= this_cpu_ptr(event_function
.perf_events
);
296 if (hlist_empty(head
))
299 #define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \
300 sizeof(u64)) - sizeof(u32))
302 BUILD_BUG_ON(ENTRY_SIZE
> PERF_MAX_TRACE_SIZE
);
304 perf_fetch_caller_regs(®s
);
306 entry
= perf_trace_buf_prepare(ENTRY_SIZE
, TRACE_FN
, NULL
, &rctx
);
311 entry
->parent_ip
= parent_ip
;
312 perf_trace_buf_submit(entry
, ENTRY_SIZE
, rctx
, 0,
313 1, ®s
, head
, NULL
);
318 static int perf_ftrace_function_register(struct perf_event
*event
)
320 struct ftrace_ops
*ops
= &event
->ftrace_ops
;
322 ops
->flags
|= FTRACE_OPS_FL_CONTROL
;
323 ops
->func
= perf_ftrace_function_call
;
324 return register_ftrace_function(ops
);
327 static int perf_ftrace_function_unregister(struct perf_event
*event
)
329 struct ftrace_ops
*ops
= &event
->ftrace_ops
;
330 int ret
= unregister_ftrace_function(ops
);
331 ftrace_free_filter(ops
);
335 static void perf_ftrace_function_enable(struct perf_event
*event
)
337 ftrace_function_local_enable(&event
->ftrace_ops
);
340 static void perf_ftrace_function_disable(struct perf_event
*event
)
342 ftrace_function_local_disable(&event
->ftrace_ops
);
345 int perf_ftrace_event_register(struct ftrace_event_call
*call
,
346 enum trace_reg type
, void *data
)
349 case TRACE_REG_REGISTER
:
350 case TRACE_REG_UNREGISTER
:
352 case TRACE_REG_PERF_REGISTER
:
353 case TRACE_REG_PERF_UNREGISTER
:
355 case TRACE_REG_PERF_OPEN
:
356 return perf_ftrace_function_register(data
);
357 case TRACE_REG_PERF_CLOSE
:
358 return perf_ftrace_function_unregister(data
);
359 case TRACE_REG_PERF_ADD
:
360 perf_ftrace_function_enable(data
);
362 case TRACE_REG_PERF_DEL
:
363 perf_ftrace_function_disable(data
);
369 #endif /* CONFIG_FUNCTION_TRACER */