Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux...
[deliverable/linux.git] / kernel / trace / trace_stack.c
CommitLineData
e5a81b62
SR
1/*
2 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
3 *
4 */
5#include <linux/stacktrace.h>
6#include <linux/kallsyms.h>
7#include <linux/seq_file.h>
8#include <linux/spinlock.h>
9#include <linux/uaccess.h>
10#include <linux/debugfs.h>
11#include <linux/ftrace.h>
12#include <linux/module.h>
f38f1d2a 13#include <linux/sysctl.h>
e5a81b62
SR
14#include <linux/init.h>
15#include <linux/fs.h>
762e1207
SR
16
17#include <asm/setup.h>
18
e5a81b62
SR
19#include "trace.h"
20
21#define STACK_TRACE_ENTRIES 500
22
d4ecbfc4 23#ifdef CC_USING_FENTRY
4df29712 24# define fentry 1
d4ecbfc4 25#else
4df29712 26# define fentry 0
d4ecbfc4
SRRH
27#endif
28
1b6cced6
SR
29static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
30 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
31static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
32
4df29712
SRRH
33/*
34 * Reserve one entry for the passed in ip. This will allow
35 * us to remove most or all of the stack size overhead
36 * added by the stack tracer itself.
37 */
e5a81b62 38static struct stack_trace max_stack_trace = {
4df29712
SRRH
39 .max_entries = STACK_TRACE_ENTRIES - 1,
40 .entries = &stack_dump_trace[1],
e5a81b62
SR
41};
42
43static unsigned long max_stack_size;
445c8951 44static arch_spinlock_t max_stack_lock =
edc35bd7 45 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
e5a81b62 46
e5a81b62 47static DEFINE_PER_CPU(int, trace_active);
f38f1d2a
SR
48static DEFINE_MUTEX(stack_sysctl_mutex);
49
50int stack_tracer_enabled;
51static int last_stack_tracer_enabled;
e5a81b62 52
e3172181
MK
53static inline void print_max_stack(void)
54{
55 long i;
56 int size;
57
58 pr_emerg(" Depth Size Location (%d entries)\n"
59 " ----- ---- --------\n",
60 max_stack_trace.nr_entries - 1);
61
62 for (i = 0; i < max_stack_trace.nr_entries; i++) {
63 if (stack_dump_trace[i] == ULONG_MAX)
64 break;
65 if (i+1 == max_stack_trace.nr_entries ||
66 stack_dump_trace[i+1] == ULONG_MAX)
67 size = stack_dump_index[i];
68 else
69 size = stack_dump_index[i] - stack_dump_index[i+1];
70
71 pr_emerg("%3ld) %8d %5d %pS\n", i, stack_dump_index[i],
72 size, (void *)stack_dump_trace[i]);
73 }
74}
75
87889501 76static inline void
d4ecbfc4 77check_stack(unsigned long ip, unsigned long *stack)
e5a81b62 78{
e3172181 79 unsigned long this_size, flags; unsigned long *p, *top, *start;
4df29712
SRRH
80 static int tracer_frame;
81 int frame_size = ACCESS_ONCE(tracer_frame);
1b6cced6 82 int i;
e5a81b62 83
87889501 84 this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
e5a81b62 85 this_size = THREAD_SIZE - this_size;
4df29712
SRRH
86 /* Remove the frame of the tracer */
87 this_size -= frame_size;
e5a81b62
SR
88
89 if (this_size <= max_stack_size)
90 return;
91
81520a1b 92 /* we do not handle interrupt stacks yet */
87889501 93 if (!object_is_on_stack(stack))
81520a1b
SR
94 return;
95
a5e25883 96 local_irq_save(flags);
0199c4e6 97 arch_spin_lock(&max_stack_lock);
e5a81b62 98
4df29712
SRRH
99 /* In case another CPU set the tracer_frame on us */
100 if (unlikely(!frame_size))
101 this_size -= tracer_frame;
102
e5a81b62
SR
103 /* a race could have already updated it */
104 if (this_size <= max_stack_size)
105 goto out;
106
107 max_stack_size = this_size;
108
7eea4fce
JW
109 max_stack_trace.nr_entries = 0;
110
111 if (using_ftrace_ops_list_func())
112 max_stack_trace.skip = 4;
113 else
114 max_stack_trace.skip = 3;
e5a81b62
SR
115
116 save_stack_trace(&max_stack_trace);
117
d4ecbfc4 118 /*
4df29712
SRRH
119 * Add the passed in ip from the function tracer.
120 * Searching for this on the stack will skip over
121 * most of the overhead from the stack tracer itself.
d4ecbfc4 122 */
4df29712
SRRH
123 stack_dump_trace[0] = ip;
124 max_stack_trace.nr_entries++;
d4ecbfc4 125
1b6cced6
SR
126 /*
127 * Now find where in the stack these are.
128 */
129 i = 0;
87889501 130 start = stack;
1b6cced6
SR
131 top = (unsigned long *)
132 (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
133
134 /*
135 * Loop through all the entries. One of the entries may
136 * for some reason be missed on the stack, so we may
137 * have to account for them. If they are all there, this
138 * loop will only happen once. This code only takes place
139 * on a new max, so it is far from a fast path.
140 */
141 while (i < max_stack_trace.nr_entries) {
0a37119d 142 int found = 0;
1b6cced6
SR
143
144 stack_dump_index[i] = this_size;
145 p = start;
146
147 for (; p < top && i < max_stack_trace.nr_entries; p++) {
148 if (*p == stack_dump_trace[i]) {
149 this_size = stack_dump_index[i++] =
150 (top - p) * sizeof(unsigned long);
0a37119d 151 found = 1;
1b6cced6
SR
152 /* Start the search from here */
153 start = p + 1;
4df29712
SRRH
154 /*
155 * We do not want to show the overhead
156 * of the stack tracer stack in the
157 * max stack. If we haven't figured
158 * out what that is, then figure it out
159 * now.
160 */
161 if (unlikely(!tracer_frame) && i == 1) {
162 tracer_frame = (p - stack) *
163 sizeof(unsigned long);
164 max_stack_size -= tracer_frame;
165 }
1b6cced6
SR
166 }
167 }
168
0a37119d
SR
169 if (!found)
170 i++;
1b6cced6
SR
171 }
172
a70857e4 173 if (task_stack_end_corrupted(current)) {
e3172181
MK
174 print_max_stack();
175 BUG();
176 }
177
e5a81b62 178 out:
0199c4e6 179 arch_spin_unlock(&max_stack_lock);
a5e25883 180 local_irq_restore(flags);
e5a81b62
SR
181}
182
183static void
a1e2e31d
SR
184stack_trace_call(unsigned long ip, unsigned long parent_ip,
185 struct ftrace_ops *op, struct pt_regs *pt_regs)
e5a81b62 186{
87889501 187 unsigned long stack;
5168ae50 188 int cpu;
e5a81b62 189
5168ae50 190 preempt_disable_notrace();
e5a81b62
SR
191
192 cpu = raw_smp_processor_id();
193 /* no atomic needed, we only modify this variable by this cpu */
194 if (per_cpu(trace_active, cpu)++ != 0)
195 goto out;
196
4df29712
SRRH
197 /*
198 * When fentry is used, the traced function does not get
199 * its stack frame set up, and we lose the parent.
200 * The ip is pretty useless because the function tracer
201 * was called before that function set up its stack frame.
202 * In this case, we use the parent ip.
203 *
204 * By adding the return address of either the parent ip
205 * or the current ip we can disregard most of the stack usage
206 * caused by the stack tracer itself.
207 *
208 * The function tracer always reports the address of where the
209 * mcount call was, but the stack will hold the return address.
210 */
211 if (fentry)
212 ip = parent_ip;
213 else
214 ip += MCOUNT_INSN_SIZE;
215
216 check_stack(ip, &stack);
e5a81b62
SR
217
218 out:
219 per_cpu(trace_active, cpu)--;
220 /* prevent recursion in schedule */
5168ae50 221 preempt_enable_notrace();
e5a81b62
SR
222}
223
224static struct ftrace_ops trace_ops __read_mostly =
225{
226 .func = stack_trace_call,
4740974a 227 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
e5a81b62
SR
228};
229
230static ssize_t
231stack_max_size_read(struct file *filp, char __user *ubuf,
232 size_t count, loff_t *ppos)
233{
234 unsigned long *ptr = filp->private_data;
235 char buf[64];
236 int r;
237
238 r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
239 if (r > sizeof(buf))
240 r = sizeof(buf);
241 return simple_read_from_buffer(ubuf, count, ppos, buf, r);
242}
243
244static ssize_t
245stack_max_size_write(struct file *filp, const char __user *ubuf,
246 size_t count, loff_t *ppos)
247{
248 long *ptr = filp->private_data;
249 unsigned long val, flags;
e5a81b62 250 int ret;
4f48f8b7 251 int cpu;
e5a81b62 252
22fe9b54
PH
253 ret = kstrtoul_from_user(ubuf, count, 10, &val);
254 if (ret)
e5a81b62
SR
255 return ret;
256
a5e25883 257 local_irq_save(flags);
4f48f8b7
LJ
258
259 /*
260 * In case we trace inside arch_spin_lock() or after (NMI),
261 * we will cause circular lock, so we also need to increase
262 * the percpu trace_active here.
263 */
264 cpu = smp_processor_id();
265 per_cpu(trace_active, cpu)++;
266
0199c4e6 267 arch_spin_lock(&max_stack_lock);
e5a81b62 268 *ptr = val;
0199c4e6 269 arch_spin_unlock(&max_stack_lock);
4f48f8b7
LJ
270
271 per_cpu(trace_active, cpu)--;
a5e25883 272 local_irq_restore(flags);
e5a81b62
SR
273
274 return count;
275}
276
f38f1d2a 277static const struct file_operations stack_max_size_fops = {
e5a81b62
SR
278 .open = tracing_open_generic,
279 .read = stack_max_size_read,
280 .write = stack_max_size_write,
6038f373 281 .llseek = default_llseek,
e5a81b62
SR
282};
283
284static void *
2fc5f0cf 285__next(struct seq_file *m, loff_t *pos)
e5a81b62 286{
2fc5f0cf 287 long n = *pos - 1;
e5a81b62 288
2fc5f0cf 289 if (n >= max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX)
e5a81b62
SR
290 return NULL;
291
2fc5f0cf 292 m->private = (void *)n;
1b6cced6 293 return &m->private;
e5a81b62
SR
294}
295
2fc5f0cf
LZ
296static void *
297t_next(struct seq_file *m, void *v, loff_t *pos)
e5a81b62 298{
2fc5f0cf
LZ
299 (*pos)++;
300 return __next(m, pos);
301}
e5a81b62 302
2fc5f0cf
LZ
303static void *t_start(struct seq_file *m, loff_t *pos)
304{
4f48f8b7
LJ
305 int cpu;
306
e5a81b62 307 local_irq_disable();
4f48f8b7
LJ
308
309 cpu = smp_processor_id();
310 per_cpu(trace_active, cpu)++;
311
0199c4e6 312 arch_spin_lock(&max_stack_lock);
e5a81b62 313
522a110b
LW
314 if (*pos == 0)
315 return SEQ_START_TOKEN;
316
2fc5f0cf 317 return __next(m, pos);
e5a81b62
SR
318}
319
320static void t_stop(struct seq_file *m, void *p)
321{
4f48f8b7
LJ
322 int cpu;
323
0199c4e6 324 arch_spin_unlock(&max_stack_lock);
4f48f8b7
LJ
325
326 cpu = smp_processor_id();
327 per_cpu(trace_active, cpu)--;
328
e5a81b62
SR
329 local_irq_enable();
330}
331
1b6cced6 332static int trace_lookup_stack(struct seq_file *m, long i)
e5a81b62 333{
1b6cced6 334 unsigned long addr = stack_dump_trace[i];
e5a81b62 335
151772db 336 return seq_printf(m, "%pS\n", (void *)addr);
e5a81b62
SR
337}
338
e447e1df
SR
339static void print_disabled(struct seq_file *m)
340{
341 seq_puts(m, "#\n"
342 "# Stack tracer disabled\n"
343 "#\n"
344 "# To enable the stack tracer, either add 'stacktrace' to the\n"
345 "# kernel command line\n"
346 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
347 "#\n");
348}
349
e5a81b62
SR
350static int t_show(struct seq_file *m, void *v)
351{
522a110b 352 long i;
1b6cced6
SR
353 int size;
354
522a110b 355 if (v == SEQ_START_TOKEN) {
eb1871f3 356 seq_printf(m, " Depth Size Location"
1b6cced6 357 " (%d entries)\n"
eb1871f3 358 " ----- ---- --------\n",
083a63b4 359 max_stack_trace.nr_entries - 1);
e447e1df
SR
360
361 if (!stack_tracer_enabled && !max_stack_size)
362 print_disabled(m);
363
1b6cced6
SR
364 return 0;
365 }
e5a81b62 366
522a110b
LW
367 i = *(long *)v;
368
1b6cced6
SR
369 if (i >= max_stack_trace.nr_entries ||
370 stack_dump_trace[i] == ULONG_MAX)
e5a81b62
SR
371 return 0;
372
1b6cced6
SR
373 if (i+1 == max_stack_trace.nr_entries ||
374 stack_dump_trace[i+1] == ULONG_MAX)
375 size = stack_dump_index[i];
376 else
377 size = stack_dump_index[i] - stack_dump_index[i+1];
378
379 seq_printf(m, "%3ld) %8d %5d ", i, stack_dump_index[i], size);
380
381 trace_lookup_stack(m, i);
e5a81b62
SR
382
383 return 0;
384}
385
f38f1d2a 386static const struct seq_operations stack_trace_seq_ops = {
e5a81b62
SR
387 .start = t_start,
388 .next = t_next,
389 .stop = t_stop,
390 .show = t_show,
391};
392
393static int stack_trace_open(struct inode *inode, struct file *file)
394{
d8cc1ab7 395 return seq_open(file, &stack_trace_seq_ops);
e5a81b62
SR
396}
397
f38f1d2a 398static const struct file_operations stack_trace_fops = {
e5a81b62
SR
399 .open = stack_trace_open,
400 .read = seq_read,
401 .llseek = seq_lseek,
d8cc1ab7 402 .release = seq_release,
e5a81b62
SR
403};
404
d2d45c7a
SR
405static int
406stack_trace_filter_open(struct inode *inode, struct file *file)
407{
408 return ftrace_regex_open(&trace_ops, FTRACE_ITER_FILTER,
409 inode, file);
410}
411
412static const struct file_operations stack_trace_filter_fops = {
413 .open = stack_trace_filter_open,
414 .read = seq_read,
415 .write = ftrace_filter_write,
098c879e 416 .llseek = tracing_lseek,
d2d45c7a
SR
417 .release = ftrace_regex_release,
418};
419
f38f1d2a
SR
420int
421stack_trace_sysctl(struct ctl_table *table, int write,
8d65af78 422 void __user *buffer, size_t *lenp,
f38f1d2a
SR
423 loff_t *ppos)
424{
425 int ret;
426
427 mutex_lock(&stack_sysctl_mutex);
428
8d65af78 429 ret = proc_dointvec(table, write, buffer, lenp, ppos);
f38f1d2a
SR
430
431 if (ret || !write ||
a32c7765 432 (last_stack_tracer_enabled == !!stack_tracer_enabled))
f38f1d2a
SR
433 goto out;
434
a32c7765 435 last_stack_tracer_enabled = !!stack_tracer_enabled;
f38f1d2a
SR
436
437 if (stack_tracer_enabled)
438 register_ftrace_function(&trace_ops);
439 else
440 unregister_ftrace_function(&trace_ops);
441
442 out:
443 mutex_unlock(&stack_sysctl_mutex);
444 return ret;
445}
446
762e1207
SR
447static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;
448
f38f1d2a
SR
449static __init int enable_stacktrace(char *str)
450{
762e1207
SR
451 if (strncmp(str, "_filter=", 8) == 0)
452 strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE);
453
e05a43b7
SR
454 stack_tracer_enabled = 1;
455 last_stack_tracer_enabled = 1;
f38f1d2a
SR
456 return 1;
457}
458__setup("stacktrace", enable_stacktrace);
459
e5a81b62
SR
460static __init int stack_trace_init(void)
461{
462 struct dentry *d_tracer;
e5a81b62
SR
463
464 d_tracer = tracing_init_dentry();
ed6f1c99
NK
465 if (!d_tracer)
466 return 0;
e5a81b62 467
5452af66
FW
468 trace_create_file("stack_max_size", 0644, d_tracer,
469 &max_stack_size, &stack_max_size_fops);
e5a81b62 470
5452af66
FW
471 trace_create_file("stack_trace", 0444, d_tracer,
472 NULL, &stack_trace_fops);
e5a81b62 473
d2d45c7a
SR
474 trace_create_file("stack_trace_filter", 0444, d_tracer,
475 NULL, &stack_trace_filter_fops);
476
762e1207
SR
477 if (stack_trace_filter_buf[0])
478 ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
479
e05a43b7 480 if (stack_tracer_enabled)
f38f1d2a 481 register_ftrace_function(&trace_ops);
e5a81b62
SR
482
483 return 0;
484}
485
486device_initcall(stack_trace_init);
This page took 0.340467 seconds and 5 git commands to generate.