Commit | Line | Data |
---|---|---|
1b29b018 SR |
1 | /* |
2 | * ring buffer based function tracer | |
3 | * | |
4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> | |
5 | * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> | |
6 | * | |
7 | * Based on code from the latency_tracer, that is: | |
8 | * | |
9 | * Copyright (C) 2004-2006 Ingo Molnar | |
10 | * Copyright (C) 2004 William Lee Irwin III | |
11 | */ | |
23b4ff3a | 12 | #include <linux/ring_buffer.h> |
1b29b018 SR |
13 | #include <linux/debugfs.h> |
14 | #include <linux/uaccess.h> | |
15 | #include <linux/ftrace.h> | |
2e0f5761 | 16 | #include <linux/fs.h> |
1b29b018 SR |
17 | |
18 | #include "trace.h" | |
19 | ||
a225cdd2 SR |
20 | /* function tracing enabled */ |
21 | static int ftrace_function_enabled; | |
22 | ||
53614991 SR |
23 | static struct trace_array *func_trace; |
24 | ||
a225cdd2 SR |
25 | static void tracing_start_function_trace(void); |
26 | static void tracing_stop_function_trace(void); | |
27 | ||
b6f11df2 | 28 | static int function_trace_init(struct trace_array *tr) |
1b29b018 | 29 | { |
bb3c3c95 | 30 | func_trace = tr; |
26bc83f4 | 31 | tr->cpu = get_cpu(); |
26bc83f4 SR |
32 | put_cpu(); |
33 | ||
41bc8144 | 34 | tracing_start_cmdline_record(); |
1b29b018 | 35 | tracing_start_function_trace(); |
1c80025a | 36 | return 0; |
1b29b018 SR |
37 | } |
38 | ||
e309b41d | 39 | static void function_trace_reset(struct trace_array *tr) |
1b29b018 | 40 | { |
b6f11df2 ACM |
41 | tracing_stop_function_trace(); |
42 | tracing_stop_cmdline_record(); | |
1b29b018 SR |
43 | } |
44 | ||
9036990d SR |
45 | static void function_trace_start(struct trace_array *tr) |
46 | { | |
213cc060 | 47 | tracing_reset_online_cpus(tr); |
9036990d SR |
48 | } |
49 | ||
bb3c3c95 SR |
50 | static void |
51 | function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip) | |
52 | { | |
53 | struct trace_array *tr = func_trace; | |
54 | struct trace_array_cpu *data; | |
55 | unsigned long flags; | |
56 | long disabled; | |
57 | int cpu, resched; | |
58 | int pc; | |
59 | ||
60 | if (unlikely(!ftrace_function_enabled)) | |
61 | return; | |
62 | ||
63 | pc = preempt_count(); | |
64 | resched = ftrace_preempt_disable(); | |
65 | local_save_flags(flags); | |
66 | cpu = raw_smp_processor_id(); | |
67 | data = tr->data[cpu]; | |
68 | disabled = atomic_inc_return(&data->disabled); | |
69 | ||
70 | if (likely(disabled == 1)) | |
7be42151 | 71 | trace_function(tr, ip, parent_ip, flags, pc); |
bb3c3c95 SR |
72 | |
73 | atomic_dec(&data->disabled); | |
74 | ftrace_preempt_enable(resched); | |
75 | } | |
76 | ||
77 | static void | |
78 | function_trace_call(unsigned long ip, unsigned long parent_ip) | |
79 | { | |
80 | struct trace_array *tr = func_trace; | |
81 | struct trace_array_cpu *data; | |
82 | unsigned long flags; | |
83 | long disabled; | |
84 | int cpu; | |
85 | int pc; | |
86 | ||
87 | if (unlikely(!ftrace_function_enabled)) | |
88 | return; | |
89 | ||
90 | /* | |
91 | * Need to use raw, since this must be called before the | |
92 | * recursive protection is performed. | |
93 | */ | |
94 | local_irq_save(flags); | |
95 | cpu = raw_smp_processor_id(); | |
96 | data = tr->data[cpu]; | |
97 | disabled = atomic_inc_return(&data->disabled); | |
98 | ||
99 | if (likely(disabled == 1)) { | |
100 | pc = preempt_count(); | |
7be42151 | 101 | trace_function(tr, ip, parent_ip, flags, pc); |
bb3c3c95 SR |
102 | } |
103 | ||
104 | atomic_dec(&data->disabled); | |
105 | local_irq_restore(flags); | |
106 | } | |
107 | ||
53614991 SR |
108 | static void |
109 | function_stack_trace_call(unsigned long ip, unsigned long parent_ip) | |
110 | { | |
111 | struct trace_array *tr = func_trace; | |
112 | struct trace_array_cpu *data; | |
113 | unsigned long flags; | |
114 | long disabled; | |
115 | int cpu; | |
116 | int pc; | |
117 | ||
118 | if (unlikely(!ftrace_function_enabled)) | |
119 | return; | |
120 | ||
121 | /* | |
122 | * Need to use raw, since this must be called before the | |
123 | * recursive protection is performed. | |
124 | */ | |
125 | local_irq_save(flags); | |
126 | cpu = raw_smp_processor_id(); | |
127 | data = tr->data[cpu]; | |
128 | disabled = atomic_inc_return(&data->disabled); | |
129 | ||
130 | if (likely(disabled == 1)) { | |
131 | pc = preempt_count(); | |
7be42151 | 132 | trace_function(tr, ip, parent_ip, flags, pc); |
53614991 SR |
133 | /* |
134 | * skip over 5 funcs: | |
135 | * __ftrace_trace_stack, | |
136 | * __trace_stack, | |
137 | * function_stack_trace_call | |
138 | * ftrace_list_func | |
139 | * ftrace_call | |
140 | */ | |
7be42151 | 141 | __trace_stack(tr, flags, 5, pc); |
53614991 SR |
142 | } |
143 | ||
144 | atomic_dec(&data->disabled); | |
145 | local_irq_restore(flags); | |
146 | } | |
147 | ||
bb3c3c95 SR |
148 | |
149 | static struct ftrace_ops trace_ops __read_mostly = | |
150 | { | |
151 | .func = function_trace_call, | |
152 | }; | |
153 | ||
53614991 SR |
154 | static struct ftrace_ops trace_stack_ops __read_mostly = |
155 | { | |
156 | .func = function_stack_trace_call, | |
157 | }; | |
158 | ||
159 | /* Our two options */ | |
160 | enum { | |
161 | TRACE_FUNC_OPT_STACK = 0x1, | |
162 | }; | |
163 | ||
164 | static struct tracer_opt func_opts[] = { | |
165 | #ifdef CONFIG_STACKTRACE | |
166 | { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) }, | |
167 | #endif | |
168 | { } /* Always set a last empty entry */ | |
169 | }; | |
170 | ||
171 | static struct tracer_flags func_flags = { | |
172 | .val = 0, /* By default: all flags disabled */ | |
173 | .opts = func_opts | |
174 | }; | |
175 | ||
a225cdd2 | 176 | static void tracing_start_function_trace(void) |
3eb36aa0 SR |
177 | { |
178 | ftrace_function_enabled = 0; | |
179 | ||
180 | if (trace_flags & TRACE_ITER_PREEMPTONLY) | |
181 | trace_ops.func = function_trace_call_preempt_only; | |
182 | else | |
183 | trace_ops.func = function_trace_call; | |
184 | ||
185 | if (func_flags.val & TRACE_FUNC_OPT_STACK) | |
186 | register_ftrace_function(&trace_stack_ops); | |
187 | else | |
188 | register_ftrace_function(&trace_ops); | |
189 | ||
190 | ftrace_function_enabled = 1; | |
191 | } | |
192 | ||
a225cdd2 | 193 | static void tracing_stop_function_trace(void) |
3eb36aa0 SR |
194 | { |
195 | ftrace_function_enabled = 0; | |
196 | /* OK if they are not registered */ | |
197 | unregister_ftrace_function(&trace_stack_ops); | |
198 | unregister_ftrace_function(&trace_ops); | |
199 | } | |
200 | ||
53614991 SR |
201 | static int func_set_flag(u32 old_flags, u32 bit, int set) |
202 | { | |
203 | if (bit == TRACE_FUNC_OPT_STACK) { | |
204 | /* do nothing if already set */ | |
205 | if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK)) | |
206 | return 0; | |
207 | ||
3eb36aa0 SR |
208 | if (set) { |
209 | unregister_ftrace_function(&trace_ops); | |
53614991 | 210 | register_ftrace_function(&trace_stack_ops); |
3eb36aa0 | 211 | } else { |
53614991 | 212 | unregister_ftrace_function(&trace_stack_ops); |
3eb36aa0 SR |
213 | register_ftrace_function(&trace_ops); |
214 | } | |
53614991 SR |
215 | |
216 | return 0; | |
217 | } | |
218 | ||
219 | return -EINVAL; | |
220 | } | |
221 | ||
1b29b018 SR |
222 | static struct tracer function_trace __read_mostly = |
223 | { | |
3eb36aa0 SR |
224 | .name = "function", |
225 | .init = function_trace_init, | |
226 | .reset = function_trace_reset, | |
227 | .start = function_trace_start, | |
53614991 SR |
228 | .flags = &func_flags, |
229 | .set_flag = func_set_flag, | |
60a11774 | 230 | #ifdef CONFIG_FTRACE_SELFTEST |
3eb36aa0 | 231 | .selftest = trace_selftest_startup_function, |
60a11774 | 232 | #endif |
1b29b018 SR |
233 | }; |
234 | ||
23b4ff3a SR |
235 | #ifdef CONFIG_DYNAMIC_FTRACE |
236 | static void | |
237 | ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data) | |
238 | { | |
239 | long *count = (long *)data; | |
240 | ||
241 | if (tracing_is_on()) | |
242 | return; | |
243 | ||
244 | if (!*count) | |
245 | return; | |
246 | ||
247 | if (*count != -1) | |
248 | (*count)--; | |
249 | ||
250 | tracing_on(); | |
251 | } | |
252 | ||
253 | static void | |
254 | ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data) | |
255 | { | |
256 | long *count = (long *)data; | |
257 | ||
258 | if (!tracing_is_on()) | |
259 | return; | |
260 | ||
261 | if (!*count) | |
262 | return; | |
263 | ||
264 | if (*count != -1) | |
265 | (*count)--; | |
266 | ||
267 | tracing_off(); | |
268 | } | |
269 | ||
270 | static struct ftrace_hook_ops traceon_hook_ops = { | |
271 | .func = ftrace_traceon, | |
272 | }; | |
273 | ||
274 | static struct ftrace_hook_ops traceoff_hook_ops = { | |
275 | .func = ftrace_traceoff, | |
276 | }; | |
277 | ||
278 | static int | |
279 | ftrace_trace_onoff_unreg(char *glob, char *cmd, char *param) | |
280 | { | |
281 | struct ftrace_hook_ops *ops; | |
282 | ||
283 | /* we register both traceon and traceoff to this callback */ | |
284 | if (strcmp(cmd, "traceon") == 0) | |
285 | ops = &traceon_hook_ops; | |
286 | else | |
287 | ops = &traceoff_hook_ops; | |
288 | ||
289 | unregister_ftrace_function_hook_func(glob, ops); | |
290 | ||
291 | return 0; | |
292 | } | |
293 | ||
294 | static int | |
295 | ftrace_trace_onoff_callback(char *glob, char *cmd, char *param, int enable) | |
296 | { | |
297 | struct ftrace_hook_ops *ops; | |
298 | void *count = (void *)-1; | |
299 | char *number; | |
300 | int ret; | |
301 | ||
302 | /* hash funcs only work with set_ftrace_filter */ | |
303 | if (!enable) | |
304 | return -EINVAL; | |
305 | ||
306 | if (glob[0] == '!') | |
307 | return ftrace_trace_onoff_unreg(glob+1, cmd, param); | |
308 | ||
309 | /* we register both traceon and traceoff to this callback */ | |
310 | if (strcmp(cmd, "traceon") == 0) | |
311 | ops = &traceon_hook_ops; | |
312 | else | |
313 | ops = &traceoff_hook_ops; | |
314 | ||
315 | if (!param) | |
316 | goto out_reg; | |
317 | ||
318 | number = strsep(¶m, ":"); | |
319 | ||
320 | if (!strlen(number)) | |
321 | goto out_reg; | |
322 | ||
323 | /* | |
324 | * We use the callback data field (which is a pointer) | |
325 | * as our counter. | |
326 | */ | |
327 | ret = strict_strtoul(number, 0, (unsigned long *)&count); | |
328 | if (ret) | |
329 | return ret; | |
330 | ||
331 | out_reg: | |
332 | ret = register_ftrace_function_hook(glob, ops, count); | |
333 | ||
334 | return ret; | |
335 | } | |
336 | ||
337 | static struct ftrace_func_command ftrace_traceon_cmd = { | |
338 | .name = "traceon", | |
339 | .func = ftrace_trace_onoff_callback, | |
340 | }; | |
341 | ||
342 | static struct ftrace_func_command ftrace_traceoff_cmd = { | |
343 | .name = "traceoff", | |
344 | .func = ftrace_trace_onoff_callback, | |
345 | }; | |
346 | ||
347 | static int __init init_func_cmd_traceon(void) | |
348 | { | |
349 | int ret; | |
350 | ||
351 | ret = register_ftrace_command(&ftrace_traceoff_cmd); | |
352 | if (ret) | |
353 | return ret; | |
354 | ||
355 | ret = register_ftrace_command(&ftrace_traceon_cmd); | |
356 | if (ret) | |
357 | unregister_ftrace_command(&ftrace_traceoff_cmd); | |
358 | return ret; | |
359 | } | |
360 | #else | |
361 | static inline int init_func_cmd_traceon(void) | |
362 | { | |
363 | return 0; | |
364 | } | |
365 | #endif /* CONFIG_DYNAMIC_FTRACE */ | |
366 | ||
1b29b018 SR |
367 | static __init int init_function_trace(void) |
368 | { | |
23b4ff3a | 369 | init_func_cmd_traceon(); |
1b29b018 SR |
370 | return register_tracer(&function_trace); |
371 | } | |
372 | ||
373 | device_initcall(init_function_trace); | |
23b4ff3a | 374 |