Merge tag 'efi-urgent' into x86/urgent
[deliverable/linux.git] / kernel / trace / trace_functions.c
1 /*
2 * ring buffer based function tracer
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Based on code from the latency_tracer, that is:
8 *
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 Nadia Yvette Chambers
11 */
12 #include <linux/ring_buffer.h>
13 #include <linux/debugfs.h>
14 #include <linux/uaccess.h>
15 #include <linux/ftrace.h>
16 #include <linux/fs.h>
17
18 #include "trace.h"
19
20 /* function tracing enabled */
21 static int ftrace_function_enabled;
22
23 static struct trace_array *func_trace;
24
25 static void tracing_start_function_trace(void);
26 static void tracing_stop_function_trace(void);
27
28 static int function_trace_init(struct trace_array *tr)
29 {
30 func_trace = tr;
31 tr->cpu = get_cpu();
32 put_cpu();
33
34 tracing_start_cmdline_record();
35 tracing_start_function_trace();
36 return 0;
37 }
38
39 static void function_trace_reset(struct trace_array *tr)
40 {
41 tracing_stop_function_trace();
42 tracing_stop_cmdline_record();
43 }
44
45 static void function_trace_start(struct trace_array *tr)
46 {
47 tracing_reset_online_cpus(tr);
48 }
49
50 /* Our option */
51 enum {
52 TRACE_FUNC_OPT_STACK = 0x1,
53 };
54
55 static struct tracer_flags func_flags;
56
57 static void
58 function_trace_call(unsigned long ip, unsigned long parent_ip,
59 struct ftrace_ops *op, struct pt_regs *pt_regs)
60 {
61 struct trace_array *tr = func_trace;
62 struct trace_array_cpu *data;
63 unsigned long flags;
64 int bit;
65 int cpu;
66 int pc;
67
68 if (unlikely(!ftrace_function_enabled))
69 return;
70
71 pc = preempt_count();
72 preempt_disable_notrace();
73
74 bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX);
75 if (bit < 0)
76 goto out;
77
78 cpu = smp_processor_id();
79 data = tr->data[cpu];
80 if (!atomic_read(&data->disabled)) {
81 local_save_flags(flags);
82 trace_function(tr, ip, parent_ip, flags, pc);
83 }
84 trace_clear_recursion(bit);
85
86 out:
87 preempt_enable_notrace();
88 }
89
90 static void
91 function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
92 struct ftrace_ops *op, struct pt_regs *pt_regs)
93 {
94 struct trace_array *tr = func_trace;
95 struct trace_array_cpu *data;
96 unsigned long flags;
97 long disabled;
98 int cpu;
99 int pc;
100
101 if (unlikely(!ftrace_function_enabled))
102 return;
103
104 /*
105 * Need to use raw, since this must be called before the
106 * recursive protection is performed.
107 */
108 local_irq_save(flags);
109 cpu = raw_smp_processor_id();
110 data = tr->data[cpu];
111 disabled = atomic_inc_return(&data->disabled);
112
113 if (likely(disabled == 1)) {
114 pc = preempt_count();
115 trace_function(tr, ip, parent_ip, flags, pc);
116 /*
117 * skip over 5 funcs:
118 * __ftrace_trace_stack,
119 * __trace_stack,
120 * function_stack_trace_call
121 * ftrace_list_func
122 * ftrace_call
123 */
124 __trace_stack(tr, flags, 5, pc);
125 }
126
127 atomic_dec(&data->disabled);
128 local_irq_restore(flags);
129 }
130
131
132 static struct ftrace_ops trace_ops __read_mostly =
133 {
134 .func = function_trace_call,
135 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
136 };
137
138 static struct ftrace_ops trace_stack_ops __read_mostly =
139 {
140 .func = function_stack_trace_call,
141 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
142 };
143
144 static struct tracer_opt func_opts[] = {
145 #ifdef CONFIG_STACKTRACE
146 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
147 #endif
148 { } /* Always set a last empty entry */
149 };
150
151 static struct tracer_flags func_flags = {
152 .val = 0, /* By default: all flags disabled */
153 .opts = func_opts
154 };
155
156 static void tracing_start_function_trace(void)
157 {
158 ftrace_function_enabled = 0;
159
160 if (func_flags.val & TRACE_FUNC_OPT_STACK)
161 register_ftrace_function(&trace_stack_ops);
162 else
163 register_ftrace_function(&trace_ops);
164
165 ftrace_function_enabled = 1;
166 }
167
168 static void tracing_stop_function_trace(void)
169 {
170 ftrace_function_enabled = 0;
171
172 if (func_flags.val & TRACE_FUNC_OPT_STACK)
173 unregister_ftrace_function(&trace_stack_ops);
174 else
175 unregister_ftrace_function(&trace_ops);
176 }
177
178 static int func_set_flag(u32 old_flags, u32 bit, int set)
179 {
180 switch (bit) {
181 case TRACE_FUNC_OPT_STACK:
182 /* do nothing if already set */
183 if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
184 break;
185
186 if (set) {
187 unregister_ftrace_function(&trace_ops);
188 register_ftrace_function(&trace_stack_ops);
189 } else {
190 unregister_ftrace_function(&trace_stack_ops);
191 register_ftrace_function(&trace_ops);
192 }
193
194 break;
195 default:
196 return -EINVAL;
197 }
198
199 return 0;
200 }
201
202 static struct tracer function_trace __read_mostly =
203 {
204 .name = "function",
205 .init = function_trace_init,
206 .reset = function_trace_reset,
207 .start = function_trace_start,
208 .wait_pipe = poll_wait_pipe,
209 .flags = &func_flags,
210 .set_flag = func_set_flag,
211 #ifdef CONFIG_FTRACE_SELFTEST
212 .selftest = trace_selftest_startup_function,
213 #endif
214 };
215
216 #ifdef CONFIG_DYNAMIC_FTRACE
217 static void
218 ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
219 {
220 long *count = (long *)data;
221
222 if (tracing_is_on())
223 return;
224
225 if (!*count)
226 return;
227
228 if (*count != -1)
229 (*count)--;
230
231 tracing_on();
232 }
233
234 static void
235 ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
236 {
237 long *count = (long *)data;
238
239 if (!tracing_is_on())
240 return;
241
242 if (!*count)
243 return;
244
245 if (*count != -1)
246 (*count)--;
247
248 tracing_off();
249 }
250
251 static int
252 ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
253 struct ftrace_probe_ops *ops, void *data);
254
255 static struct ftrace_probe_ops traceon_probe_ops = {
256 .func = ftrace_traceon,
257 .print = ftrace_trace_onoff_print,
258 };
259
260 static struct ftrace_probe_ops traceoff_probe_ops = {
261 .func = ftrace_traceoff,
262 .print = ftrace_trace_onoff_print,
263 };
264
265 static int
266 ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
267 struct ftrace_probe_ops *ops, void *data)
268 {
269 long count = (long)data;
270
271 seq_printf(m, "%ps:", (void *)ip);
272
273 if (ops == &traceon_probe_ops)
274 seq_printf(m, "traceon");
275 else
276 seq_printf(m, "traceoff");
277
278 if (count == -1)
279 seq_printf(m, ":unlimited\n");
280 else
281 seq_printf(m, ":count=%ld\n", count);
282
283 return 0;
284 }
285
286 static int
287 ftrace_trace_onoff_unreg(char *glob, char *cmd, char *param)
288 {
289 struct ftrace_probe_ops *ops;
290
291 /* we register both traceon and traceoff to this callback */
292 if (strcmp(cmd, "traceon") == 0)
293 ops = &traceon_probe_ops;
294 else
295 ops = &traceoff_probe_ops;
296
297 unregister_ftrace_function_probe_func(glob, ops);
298
299 return 0;
300 }
301
302 static int
303 ftrace_trace_onoff_callback(struct ftrace_hash *hash,
304 char *glob, char *cmd, char *param, int enable)
305 {
306 struct ftrace_probe_ops *ops;
307 void *count = (void *)-1;
308 char *number;
309 int ret;
310
311 /* hash funcs only work with set_ftrace_filter */
312 if (!enable)
313 return -EINVAL;
314
315 if (glob[0] == '!')
316 return ftrace_trace_onoff_unreg(glob+1, cmd, param);
317
318 /* we register both traceon and traceoff to this callback */
319 if (strcmp(cmd, "traceon") == 0)
320 ops = &traceon_probe_ops;
321 else
322 ops = &traceoff_probe_ops;
323
324 if (!param)
325 goto out_reg;
326
327 number = strsep(&param, ":");
328
329 if (!strlen(number))
330 goto out_reg;
331
332 /*
333 * We use the callback data field (which is a pointer)
334 * as our counter.
335 */
336 ret = kstrtoul(number, 0, (unsigned long *)&count);
337 if (ret)
338 return ret;
339
340 out_reg:
341 ret = register_ftrace_function_probe(glob, ops, count);
342
343 return ret < 0 ? ret : 0;
344 }
345
346 static struct ftrace_func_command ftrace_traceon_cmd = {
347 .name = "traceon",
348 .func = ftrace_trace_onoff_callback,
349 };
350
351 static struct ftrace_func_command ftrace_traceoff_cmd = {
352 .name = "traceoff",
353 .func = ftrace_trace_onoff_callback,
354 };
355
356 static int __init init_func_cmd_traceon(void)
357 {
358 int ret;
359
360 ret = register_ftrace_command(&ftrace_traceoff_cmd);
361 if (ret)
362 return ret;
363
364 ret = register_ftrace_command(&ftrace_traceon_cmd);
365 if (ret)
366 unregister_ftrace_command(&ftrace_traceoff_cmd);
367 return ret;
368 }
369 #else
370 static inline int init_func_cmd_traceon(void)
371 {
372 return 0;
373 }
374 #endif /* CONFIG_DYNAMIC_FTRACE */
375
376 static __init int init_function_trace(void)
377 {
378 init_func_cmd_traceon();
379 return register_tracer(&function_trace);
380 }
381 core_initcall(init_function_trace);
This page took 0.08538 seconds and 5 git commands to generate.