Merge branch 'samsung/pinctrl' into next/drivers
[deliverable/linux.git] / kernel / trace / trace_functions.c
1 /*
2 * ring buffer based function tracer
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Based on code from the latency_tracer, that is:
8 *
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 William Lee Irwin III
11 */
12 #include <linux/ring_buffer.h>
13 #include <linux/debugfs.h>
14 #include <linux/uaccess.h>
15 #include <linux/ftrace.h>
16 #include <linux/pstore.h>
17 #include <linux/fs.h>
18
19 #include "trace.h"
20
21 /* function tracing enabled */
22 static int ftrace_function_enabled;
23
24 static struct trace_array *func_trace;
25
26 static void tracing_start_function_trace(void);
27 static void tracing_stop_function_trace(void);
28
29 static int function_trace_init(struct trace_array *tr)
30 {
31 func_trace = tr;
32 tr->cpu = get_cpu();
33 put_cpu();
34
35 tracing_start_cmdline_record();
36 tracing_start_function_trace();
37 return 0;
38 }
39
40 static void function_trace_reset(struct trace_array *tr)
41 {
42 tracing_stop_function_trace();
43 tracing_stop_cmdline_record();
44 }
45
46 static void function_trace_start(struct trace_array *tr)
47 {
48 tracing_reset_online_cpus(tr);
49 }
50
51 static void
52 function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
53 {
54 struct trace_array *tr = func_trace;
55 struct trace_array_cpu *data;
56 unsigned long flags;
57 long disabled;
58 int cpu;
59 int pc;
60
61 if (unlikely(!ftrace_function_enabled))
62 return;
63
64 pc = preempt_count();
65 preempt_disable_notrace();
66 local_save_flags(flags);
67 cpu = raw_smp_processor_id();
68 data = tr->data[cpu];
69 disabled = atomic_inc_return(&data->disabled);
70
71 if (likely(disabled == 1))
72 trace_function(tr, ip, parent_ip, flags, pc);
73
74 atomic_dec(&data->disabled);
75 preempt_enable_notrace();
76 }
77
78 /* Our two options */
79 enum {
80 TRACE_FUNC_OPT_STACK = 0x1,
81 TRACE_FUNC_OPT_PSTORE = 0x2,
82 };
83
84 static struct tracer_flags func_flags;
85
86 static void
87 function_trace_call(unsigned long ip, unsigned long parent_ip)
88 {
89 struct trace_array *tr = func_trace;
90 struct trace_array_cpu *data;
91 unsigned long flags;
92 long disabled;
93 int cpu;
94 int pc;
95
96 if (unlikely(!ftrace_function_enabled))
97 return;
98
99 /*
100 * Need to use raw, since this must be called before the
101 * recursive protection is performed.
102 */
103 local_irq_save(flags);
104 cpu = raw_smp_processor_id();
105 data = tr->data[cpu];
106 disabled = atomic_inc_return(&data->disabled);
107
108 if (likely(disabled == 1)) {
109 /*
110 * So far tracing doesn't support multiple buffers, so
111 * we make an explicit call for now.
112 */
113 if (unlikely(func_flags.val & TRACE_FUNC_OPT_PSTORE))
114 pstore_ftrace_call(ip, parent_ip);
115 pc = preempt_count();
116 trace_function(tr, ip, parent_ip, flags, pc);
117 }
118
119 atomic_dec(&data->disabled);
120 local_irq_restore(flags);
121 }
122
123 static void
124 function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
125 {
126 struct trace_array *tr = func_trace;
127 struct trace_array_cpu *data;
128 unsigned long flags;
129 long disabled;
130 int cpu;
131 int pc;
132
133 if (unlikely(!ftrace_function_enabled))
134 return;
135
136 /*
137 * Need to use raw, since this must be called before the
138 * recursive protection is performed.
139 */
140 local_irq_save(flags);
141 cpu = raw_smp_processor_id();
142 data = tr->data[cpu];
143 disabled = atomic_inc_return(&data->disabled);
144
145 if (likely(disabled == 1)) {
146 pc = preempt_count();
147 trace_function(tr, ip, parent_ip, flags, pc);
148 /*
149 * skip over 5 funcs:
150 * __ftrace_trace_stack,
151 * __trace_stack,
152 * function_stack_trace_call
153 * ftrace_list_func
154 * ftrace_call
155 */
156 __trace_stack(tr, flags, 5, pc);
157 }
158
159 atomic_dec(&data->disabled);
160 local_irq_restore(flags);
161 }
162
163
164 static struct ftrace_ops trace_ops __read_mostly =
165 {
166 .func = function_trace_call,
167 .flags = FTRACE_OPS_FL_GLOBAL,
168 };
169
170 static struct ftrace_ops trace_stack_ops __read_mostly =
171 {
172 .func = function_stack_trace_call,
173 .flags = FTRACE_OPS_FL_GLOBAL,
174 };
175
176 static struct tracer_opt func_opts[] = {
177 #ifdef CONFIG_STACKTRACE
178 { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
179 #endif
180 #ifdef CONFIG_PSTORE_FTRACE
181 { TRACER_OPT(func_pstore, TRACE_FUNC_OPT_PSTORE) },
182 #endif
183 { } /* Always set a last empty entry */
184 };
185
186 static struct tracer_flags func_flags = {
187 .val = 0, /* By default: all flags disabled */
188 .opts = func_opts
189 };
190
191 static void tracing_start_function_trace(void)
192 {
193 ftrace_function_enabled = 0;
194
195 if (trace_flags & TRACE_ITER_PREEMPTONLY)
196 trace_ops.func = function_trace_call_preempt_only;
197 else
198 trace_ops.func = function_trace_call;
199
200 if (func_flags.val & TRACE_FUNC_OPT_STACK)
201 register_ftrace_function(&trace_stack_ops);
202 else
203 register_ftrace_function(&trace_ops);
204
205 ftrace_function_enabled = 1;
206 }
207
208 static void tracing_stop_function_trace(void)
209 {
210 ftrace_function_enabled = 0;
211
212 if (func_flags.val & TRACE_FUNC_OPT_STACK)
213 unregister_ftrace_function(&trace_stack_ops);
214 else
215 unregister_ftrace_function(&trace_ops);
216 }
217
218 static int func_set_flag(u32 old_flags, u32 bit, int set)
219 {
220 switch (bit) {
221 case TRACE_FUNC_OPT_STACK:
222 /* do nothing if already set */
223 if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
224 break;
225
226 if (set) {
227 unregister_ftrace_function(&trace_ops);
228 register_ftrace_function(&trace_stack_ops);
229 } else {
230 unregister_ftrace_function(&trace_stack_ops);
231 register_ftrace_function(&trace_ops);
232 }
233
234 break;
235 case TRACE_FUNC_OPT_PSTORE:
236 break;
237 default:
238 return -EINVAL;
239 }
240
241 return 0;
242 }
243
244 static struct tracer function_trace __read_mostly =
245 {
246 .name = "function",
247 .init = function_trace_init,
248 .reset = function_trace_reset,
249 .start = function_trace_start,
250 .wait_pipe = poll_wait_pipe,
251 .flags = &func_flags,
252 .set_flag = func_set_flag,
253 #ifdef CONFIG_FTRACE_SELFTEST
254 .selftest = trace_selftest_startup_function,
255 #endif
256 };
257
258 #ifdef CONFIG_DYNAMIC_FTRACE
259 static void
260 ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
261 {
262 long *count = (long *)data;
263
264 if (tracing_is_on())
265 return;
266
267 if (!*count)
268 return;
269
270 if (*count != -1)
271 (*count)--;
272
273 tracing_on();
274 }
275
276 static void
277 ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
278 {
279 long *count = (long *)data;
280
281 if (!tracing_is_on())
282 return;
283
284 if (!*count)
285 return;
286
287 if (*count != -1)
288 (*count)--;
289
290 tracing_off();
291 }
292
293 static int
294 ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
295 struct ftrace_probe_ops *ops, void *data);
296
297 static struct ftrace_probe_ops traceon_probe_ops = {
298 .func = ftrace_traceon,
299 .print = ftrace_trace_onoff_print,
300 };
301
302 static struct ftrace_probe_ops traceoff_probe_ops = {
303 .func = ftrace_traceoff,
304 .print = ftrace_trace_onoff_print,
305 };
306
307 static int
308 ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
309 struct ftrace_probe_ops *ops, void *data)
310 {
311 long count = (long)data;
312
313 seq_printf(m, "%ps:", (void *)ip);
314
315 if (ops == &traceon_probe_ops)
316 seq_printf(m, "traceon");
317 else
318 seq_printf(m, "traceoff");
319
320 if (count == -1)
321 seq_printf(m, ":unlimited\n");
322 else
323 seq_printf(m, ":count=%ld\n", count);
324
325 return 0;
326 }
327
328 static int
329 ftrace_trace_onoff_unreg(char *glob, char *cmd, char *param)
330 {
331 struct ftrace_probe_ops *ops;
332
333 /* we register both traceon and traceoff to this callback */
334 if (strcmp(cmd, "traceon") == 0)
335 ops = &traceon_probe_ops;
336 else
337 ops = &traceoff_probe_ops;
338
339 unregister_ftrace_function_probe_func(glob, ops);
340
341 return 0;
342 }
343
344 static int
345 ftrace_trace_onoff_callback(struct ftrace_hash *hash,
346 char *glob, char *cmd, char *param, int enable)
347 {
348 struct ftrace_probe_ops *ops;
349 void *count = (void *)-1;
350 char *number;
351 int ret;
352
353 /* hash funcs only work with set_ftrace_filter */
354 if (!enable)
355 return -EINVAL;
356
357 if (glob[0] == '!')
358 return ftrace_trace_onoff_unreg(glob+1, cmd, param);
359
360 /* we register both traceon and traceoff to this callback */
361 if (strcmp(cmd, "traceon") == 0)
362 ops = &traceon_probe_ops;
363 else
364 ops = &traceoff_probe_ops;
365
366 if (!param)
367 goto out_reg;
368
369 number = strsep(&param, ":");
370
371 if (!strlen(number))
372 goto out_reg;
373
374 /*
375 * We use the callback data field (which is a pointer)
376 * as our counter.
377 */
378 ret = strict_strtoul(number, 0, (unsigned long *)&count);
379 if (ret)
380 return ret;
381
382 out_reg:
383 ret = register_ftrace_function_probe(glob, ops, count);
384
385 return ret < 0 ? ret : 0;
386 }
387
388 static struct ftrace_func_command ftrace_traceon_cmd = {
389 .name = "traceon",
390 .func = ftrace_trace_onoff_callback,
391 };
392
393 static struct ftrace_func_command ftrace_traceoff_cmd = {
394 .name = "traceoff",
395 .func = ftrace_trace_onoff_callback,
396 };
397
398 static int __init init_func_cmd_traceon(void)
399 {
400 int ret;
401
402 ret = register_ftrace_command(&ftrace_traceoff_cmd);
403 if (ret)
404 return ret;
405
406 ret = register_ftrace_command(&ftrace_traceon_cmd);
407 if (ret)
408 unregister_ftrace_command(&ftrace_traceoff_cmd);
409 return ret;
410 }
411 #else
412 static inline int init_func_cmd_traceon(void)
413 {
414 return 0;
415 }
416 #endif /* CONFIG_DYNAMIC_FTRACE */
417
418 static __init int init_function_trace(void)
419 {
420 init_func_cmd_traceon();
421 return register_tracer(&function_trace);
422 }
423 device_initcall(init_function_trace);
424
This page took 0.039571 seconds and 5 git commands to generate.