ip: ip_ra_control() rcu fix
[deliverable/linux.git] / kernel / trace / trace_functions.c
1 /*
2 * ring buffer based function tracer
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Based on code from the latency_tracer, that is:
8 *
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 William Lee Irwin III
11 */
12 #include <linux/ring_buffer.h>
13 #include <linux/debugfs.h>
14 #include <linux/uaccess.h>
15 #include <linux/ftrace.h>
16 #include <linux/fs.h>
17
18 #include "trace.h"
19
20 /* function tracing enabled */
21 static int ftrace_function_enabled;
22
23 static struct trace_array *func_trace;
24
25 static void tracing_start_function_trace(void);
26 static void tracing_stop_function_trace(void);
27
28 static int function_trace_init(struct trace_array *tr)
29 {
30 func_trace = tr;
31 tr->cpu = get_cpu();
32 put_cpu();
33
34 tracing_start_cmdline_record();
35 tracing_start_function_trace();
36 return 0;
37 }
38
39 static void function_trace_reset(struct trace_array *tr)
40 {
41 tracing_stop_function_trace();
42 tracing_stop_cmdline_record();
43 }
44
45 static void function_trace_start(struct trace_array *tr)
46 {
47 tracing_reset_online_cpus(tr);
48 }
49
50 static void
51 function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
52 {
53 struct trace_array *tr = func_trace;
54 struct trace_array_cpu *data;
55 unsigned long flags;
56 long disabled;
57 int cpu, resched;
58 int pc;
59
60 if (unlikely(!ftrace_function_enabled))
61 return;
62
63 pc = preempt_count();
64 resched = ftrace_preempt_disable();
65 local_save_flags(flags);
66 cpu = raw_smp_processor_id();
67 data = tr->data[cpu];
68 disabled = atomic_inc_return(&data->disabled);
69
70 if (likely(disabled == 1))
71 trace_function(tr, ip, parent_ip, flags, pc);
72
73 atomic_dec(&data->disabled);
74 ftrace_preempt_enable(resched);
75 }
76
77 static void
78 function_trace_call(unsigned long ip, unsigned long parent_ip)
79 {
80 struct trace_array *tr = func_trace;
81 struct trace_array_cpu *data;
82 unsigned long flags;
83 long disabled;
84 int cpu;
85 int pc;
86
87 if (unlikely(!ftrace_function_enabled))
88 return;
89
90 /*
91 * Need to use raw, since this must be called before the
92 * recursive protection is performed.
93 */
94 local_irq_save(flags);
95 cpu = raw_smp_processor_id();
96 data = tr->data[cpu];
97 disabled = atomic_inc_return(&data->disabled);
98
99 if (likely(disabled == 1)) {
100 pc = preempt_count();
101 trace_function(tr, ip, parent_ip, flags, pc);
102 }
103
104 atomic_dec(&data->disabled);
105 local_irq_restore(flags);
106 }
107
108 static void
109 function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
110 {
111 struct trace_array *tr = func_trace;
112 struct trace_array_cpu *data;
113 unsigned long flags;
114 long disabled;
115 int cpu;
116 int pc;
117
118 if (unlikely(!ftrace_function_enabled))
119 return;
120
121 /*
122 * Need to use raw, since this must be called before the
123 * recursive protection is performed.
124 */
125 local_irq_save(flags);
126 cpu = raw_smp_processor_id();
127 data = tr->data[cpu];
128 disabled = atomic_inc_return(&data->disabled);
129
130 if (likely(disabled == 1)) {
131 pc = preempt_count();
132 trace_function(tr, ip, parent_ip, flags, pc);
133 /*
134 * skip over 5 funcs:
135 * __ftrace_trace_stack,
136 * __trace_stack,
137 * function_stack_trace_call
138 * ftrace_list_func
139 * ftrace_call
140 */
141 __trace_stack(tr, flags, 5, pc);
142 }
143
144 atomic_dec(&data->disabled);
145 local_irq_restore(flags);
146 }
147
148
149 static struct ftrace_ops trace_ops __read_mostly =
150 {
151 .func = function_trace_call,
152 };
153
154 static struct ftrace_ops trace_stack_ops __read_mostly =
155 {
156 .func = function_stack_trace_call,
157 };
158
159 /* Our two options */
160 enum {
161 TRACE_FUNC_OPT_STACK = 0x1,
162 };
163
164 static struct tracer_opt func_opts[] = {
165 #ifdef CONFIG_STACKTRACE
166 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
167 #endif
168 { } /* Always set a last empty entry */
169 };
170
171 static struct tracer_flags func_flags = {
172 .val = 0, /* By default: all flags disabled */
173 .opts = func_opts
174 };
175
176 static void tracing_start_function_trace(void)
177 {
178 ftrace_function_enabled = 0;
179
180 if (trace_flags & TRACE_ITER_PREEMPTONLY)
181 trace_ops.func = function_trace_call_preempt_only;
182 else
183 trace_ops.func = function_trace_call;
184
185 if (func_flags.val & TRACE_FUNC_OPT_STACK)
186 register_ftrace_function(&trace_stack_ops);
187 else
188 register_ftrace_function(&trace_ops);
189
190 ftrace_function_enabled = 1;
191 }
192
193 static void tracing_stop_function_trace(void)
194 {
195 ftrace_function_enabled = 0;
196
197 if (func_flags.val & TRACE_FUNC_OPT_STACK)
198 unregister_ftrace_function(&trace_stack_ops);
199 else
200 unregister_ftrace_function(&trace_ops);
201 }
202
203 static int func_set_flag(u32 old_flags, u32 bit, int set)
204 {
205 if (bit == TRACE_FUNC_OPT_STACK) {
206 /* do nothing if already set */
207 if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
208 return 0;
209
210 if (set) {
211 unregister_ftrace_function(&trace_ops);
212 register_ftrace_function(&trace_stack_ops);
213 } else {
214 unregister_ftrace_function(&trace_stack_ops);
215 register_ftrace_function(&trace_ops);
216 }
217
218 return 0;
219 }
220
221 return -EINVAL;
222 }
223
224 static struct tracer function_trace __read_mostly =
225 {
226 .name = "function",
227 .init = function_trace_init,
228 .reset = function_trace_reset,
229 .start = function_trace_start,
230 .wait_pipe = poll_wait_pipe,
231 .flags = &func_flags,
232 .set_flag = func_set_flag,
233 #ifdef CONFIG_FTRACE_SELFTEST
234 .selftest = trace_selftest_startup_function,
235 #endif
236 };
237
238 #ifdef CONFIG_DYNAMIC_FTRACE
239 static void
240 ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
241 {
242 long *count = (long *)data;
243
244 if (tracing_is_on())
245 return;
246
247 if (!*count)
248 return;
249
250 if (*count != -1)
251 (*count)--;
252
253 tracing_on();
254 }
255
256 static void
257 ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
258 {
259 long *count = (long *)data;
260
261 if (!tracing_is_on())
262 return;
263
264 if (!*count)
265 return;
266
267 if (*count != -1)
268 (*count)--;
269
270 tracing_off();
271 }
272
273 static int
274 ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
275 struct ftrace_probe_ops *ops, void *data);
276
277 static struct ftrace_probe_ops traceon_probe_ops = {
278 .func = ftrace_traceon,
279 .print = ftrace_trace_onoff_print,
280 };
281
282 static struct ftrace_probe_ops traceoff_probe_ops = {
283 .func = ftrace_traceoff,
284 .print = ftrace_trace_onoff_print,
285 };
286
287 static int
288 ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
289 struct ftrace_probe_ops *ops, void *data)
290 {
291 long count = (long)data;
292
293 seq_printf(m, "%ps:", (void *)ip);
294
295 if (ops == &traceon_probe_ops)
296 seq_printf(m, "traceon");
297 else
298 seq_printf(m, "traceoff");
299
300 if (count == -1)
301 seq_printf(m, ":unlimited\n");
302 else
303 seq_printf(m, ":count=%ld\n", count);
304
305 return 0;
306 }
307
308 static int
309 ftrace_trace_onoff_unreg(char *glob, char *cmd, char *param)
310 {
311 struct ftrace_probe_ops *ops;
312
313 /* we register both traceon and traceoff to this callback */
314 if (strcmp(cmd, "traceon") == 0)
315 ops = &traceon_probe_ops;
316 else
317 ops = &traceoff_probe_ops;
318
319 unregister_ftrace_function_probe_func(glob, ops);
320
321 return 0;
322 }
323
324 static int
325 ftrace_trace_onoff_callback(char *glob, char *cmd, char *param, int enable)
326 {
327 struct ftrace_probe_ops *ops;
328 void *count = (void *)-1;
329 char *number;
330 int ret;
331
332 /* hash funcs only work with set_ftrace_filter */
333 if (!enable)
334 return -EINVAL;
335
336 if (glob[0] == '!')
337 return ftrace_trace_onoff_unreg(glob+1, cmd, param);
338
339 /* we register both traceon and traceoff to this callback */
340 if (strcmp(cmd, "traceon") == 0)
341 ops = &traceon_probe_ops;
342 else
343 ops = &traceoff_probe_ops;
344
345 if (!param)
346 goto out_reg;
347
348 number = strsep(&param, ":");
349
350 if (!strlen(number))
351 goto out_reg;
352
353 /*
354 * We use the callback data field (which is a pointer)
355 * as our counter.
356 */
357 ret = strict_strtoul(number, 0, (unsigned long *)&count);
358 if (ret)
359 return ret;
360
361 out_reg:
362 ret = register_ftrace_function_probe(glob, ops, count);
363
364 return ret < 0 ? ret : 0;
365 }
366
367 static struct ftrace_func_command ftrace_traceon_cmd = {
368 .name = "traceon",
369 .func = ftrace_trace_onoff_callback,
370 };
371
372 static struct ftrace_func_command ftrace_traceoff_cmd = {
373 .name = "traceoff",
374 .func = ftrace_trace_onoff_callback,
375 };
376
377 static int __init init_func_cmd_traceon(void)
378 {
379 int ret;
380
381 ret = register_ftrace_command(&ftrace_traceoff_cmd);
382 if (ret)
383 return ret;
384
385 ret = register_ftrace_command(&ftrace_traceon_cmd);
386 if (ret)
387 unregister_ftrace_command(&ftrace_traceoff_cmd);
388 return ret;
389 }
390 #else
391 static inline int init_func_cmd_traceon(void)
392 {
393 return 0;
394 }
395 #endif /* CONFIG_DYNAMIC_FTRACE */
396
397 static __init int init_function_trace(void)
398 {
399 init_func_cmd_traceon();
400 return register_tracer(&function_trace);
401 }
402 device_initcall(init_function_trace);
403
This page took 0.051588 seconds and 5 git commands to generate.