Merge tag 'regulator-v4.4' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie...
[deliverable/linux.git] / kernel / trace / trace_stack.c
CommitLineData
e5a81b62
SR
1/*
2 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
3 *
4 */
5#include <linux/stacktrace.h>
6#include <linux/kallsyms.h>
7#include <linux/seq_file.h>
8#include <linux/spinlock.h>
9#include <linux/uaccess.h>
e5a81b62
SR
10#include <linux/ftrace.h>
11#include <linux/module.h>
f38f1d2a 12#include <linux/sysctl.h>
e5a81b62 13#include <linux/init.h>
762e1207
SR
14
15#include <asm/setup.h>
16
e5a81b62
SR
17#include "trace.h"
18
19#define STACK_TRACE_ENTRIES 500
20
1b6cced6
SR
21static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
22 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
23static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
24
4df29712
SRRH
25/*
26 * Reserve one entry for the passed in ip. This will allow
27 * us to remove most or all of the stack size overhead
28 * added by the stack tracer itself.
29 */
e5a81b62 30static struct stack_trace max_stack_trace = {
4df29712 31 .max_entries = STACK_TRACE_ENTRIES - 1,
72ac426a 32 .entries = &stack_dump_trace[0],
e5a81b62
SR
33};
34
35static unsigned long max_stack_size;
445c8951 36static arch_spinlock_t max_stack_lock =
edc35bd7 37 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
e5a81b62 38
e5a81b62 39static DEFINE_PER_CPU(int, trace_active);
f38f1d2a
SR
40static DEFINE_MUTEX(stack_sysctl_mutex);
41
42int stack_tracer_enabled;
43static int last_stack_tracer_enabled;
e5a81b62 44
e3172181
MK
45static inline void print_max_stack(void)
46{
47 long i;
48 int size;
49
50 pr_emerg(" Depth Size Location (%d entries)\n"
51 " ----- ---- --------\n",
72ac426a 52 max_stack_trace.nr_entries);
e3172181
MK
53
54 for (i = 0; i < max_stack_trace.nr_entries; i++) {
55 if (stack_dump_trace[i] == ULONG_MAX)
56 break;
57 if (i+1 == max_stack_trace.nr_entries ||
58 stack_dump_trace[i+1] == ULONG_MAX)
59 size = stack_dump_index[i];
60 else
61 size = stack_dump_index[i] - stack_dump_index[i+1];
62
63 pr_emerg("%3ld) %8d %5d %pS\n", i, stack_dump_index[i],
64 size, (void *)stack_dump_trace[i]);
65 }
66}
67
87889501 68static inline void
d4ecbfc4 69check_stack(unsigned long ip, unsigned long *stack)
e5a81b62 70{
e3172181 71 unsigned long this_size, flags; unsigned long *p, *top, *start;
4df29712
SRRH
72 static int tracer_frame;
73 int frame_size = ACCESS_ONCE(tracer_frame);
72ac426a 74 int i, x;
e5a81b62 75
87889501 76 this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
e5a81b62 77 this_size = THREAD_SIZE - this_size;
4df29712
SRRH
78 /* Remove the frame of the tracer */
79 this_size -= frame_size;
e5a81b62
SR
80
81 if (this_size <= max_stack_size)
82 return;
83
81520a1b 84 /* we do not handle interrupt stacks yet */
87889501 85 if (!object_is_on_stack(stack))
81520a1b
SR
86 return;
87
1904be1b
SRRH
88 /* Can't do this from NMI context (can cause deadlocks) */
89 if (in_nmi())
90 return;
91
a5e25883 92 local_irq_save(flags);
0199c4e6 93 arch_spin_lock(&max_stack_lock);
e5a81b62 94
a2d76290
SRRH
95 /*
96 * RCU may not be watching, make it see us.
97 * The stack trace code uses rcu_sched.
98 */
99 rcu_irq_enter();
100
4df29712
SRRH
101 /* In case another CPU set the tracer_frame on us */
102 if (unlikely(!frame_size))
103 this_size -= tracer_frame;
104
e5a81b62
SR
105 /* a race could have already updated it */
106 if (this_size <= max_stack_size)
107 goto out;
108
109 max_stack_size = this_size;
110
7eea4fce 111 max_stack_trace.nr_entries = 0;
72ac426a 112 max_stack_trace.skip = 3;
e5a81b62
SR
113
114 save_stack_trace(&max_stack_trace);
115
72ac426a
SRRH
116 /* Skip over the overhead of the stack tracer itself */
117 for (i = 0; i < max_stack_trace.nr_entries; i++) {
118 if (stack_dump_trace[i] == ip)
119 break;
120 }
d4ecbfc4 121
1b6cced6
SR
122 /*
123 * Now find where in the stack these are.
124 */
72ac426a 125 x = 0;
87889501 126 start = stack;
1b6cced6
SR
127 top = (unsigned long *)
128 (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
129
130 /*
131 * Loop through all the entries. One of the entries may
132 * for some reason be missed on the stack, so we may
133 * have to account for them. If they are all there, this
134 * loop will only happen once. This code only takes place
135 * on a new max, so it is far from a fast path.
136 */
137 while (i < max_stack_trace.nr_entries) {
0a37119d 138 int found = 0;
1b6cced6 139
72ac426a 140 stack_dump_index[x] = this_size;
1b6cced6
SR
141 p = start;
142
143 for (; p < top && i < max_stack_trace.nr_entries; p++) {
72ac426a
SRRH
144 if (stack_dump_trace[i] == ULONG_MAX)
145 break;
1b6cced6 146 if (*p == stack_dump_trace[i]) {
72ac426a
SRRH
147 stack_dump_trace[x] = stack_dump_trace[i++];
148 this_size = stack_dump_index[x++] =
1b6cced6 149 (top - p) * sizeof(unsigned long);
0a37119d 150 found = 1;
1b6cced6
SR
151 /* Start the search from here */
152 start = p + 1;
4df29712
SRRH
153 /*
154 * We do not want to show the overhead
155 * of the stack tracer stack in the
156 * max stack. If we haven't figured
157 * out what that is, then figure it out
158 * now.
159 */
72ac426a 160 if (unlikely(!tracer_frame)) {
4df29712
SRRH
161 tracer_frame = (p - stack) *
162 sizeof(unsigned long);
163 max_stack_size -= tracer_frame;
164 }
1b6cced6
SR
165 }
166 }
167
0a37119d
SR
168 if (!found)
169 i++;
1b6cced6
SR
170 }
171
72ac426a
SRRH
172 max_stack_trace.nr_entries = x;
173 for (; x < i; x++)
174 stack_dump_trace[x] = ULONG_MAX;
175
a70857e4 176 if (task_stack_end_corrupted(current)) {
e3172181
MK
177 print_max_stack();
178 BUG();
179 }
180
e5a81b62 181 out:
a2d76290 182 rcu_irq_exit();
0199c4e6 183 arch_spin_unlock(&max_stack_lock);
a5e25883 184 local_irq_restore(flags);
e5a81b62
SR
185}
186
187static void
a1e2e31d
SR
188stack_trace_call(unsigned long ip, unsigned long parent_ip,
189 struct ftrace_ops *op, struct pt_regs *pt_regs)
e5a81b62 190{
87889501 191 unsigned long stack;
5168ae50 192 int cpu;
e5a81b62 193
5168ae50 194 preempt_disable_notrace();
e5a81b62
SR
195
196 cpu = raw_smp_processor_id();
197 /* no atomic needed, we only modify this variable by this cpu */
198 if (per_cpu(trace_active, cpu)++ != 0)
199 goto out;
200
72ac426a 201 ip += MCOUNT_INSN_SIZE;
4df29712
SRRH
202
203 check_stack(ip, &stack);
e5a81b62
SR
204
205 out:
206 per_cpu(trace_active, cpu)--;
207 /* prevent recursion in schedule */
5168ae50 208 preempt_enable_notrace();
e5a81b62
SR
209}
210
211static struct ftrace_ops trace_ops __read_mostly =
212{
213 .func = stack_trace_call,
4740974a 214 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
e5a81b62
SR
215};
216
217static ssize_t
218stack_max_size_read(struct file *filp, char __user *ubuf,
219 size_t count, loff_t *ppos)
220{
221 unsigned long *ptr = filp->private_data;
222 char buf[64];
223 int r;
224
225 r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
226 if (r > sizeof(buf))
227 r = sizeof(buf);
228 return simple_read_from_buffer(ubuf, count, ppos, buf, r);
229}
230
231static ssize_t
232stack_max_size_write(struct file *filp, const char __user *ubuf,
233 size_t count, loff_t *ppos)
234{
235 long *ptr = filp->private_data;
236 unsigned long val, flags;
e5a81b62 237 int ret;
4f48f8b7 238 int cpu;
e5a81b62 239
22fe9b54
PH
240 ret = kstrtoul_from_user(ubuf, count, 10, &val);
241 if (ret)
e5a81b62
SR
242 return ret;
243
a5e25883 244 local_irq_save(flags);
4f48f8b7
LJ
245
246 /*
247 * In case we trace inside arch_spin_lock() or after (NMI),
248 * we will cause circular lock, so we also need to increase
249 * the percpu trace_active here.
250 */
251 cpu = smp_processor_id();
252 per_cpu(trace_active, cpu)++;
253
0199c4e6 254 arch_spin_lock(&max_stack_lock);
e5a81b62 255 *ptr = val;
0199c4e6 256 arch_spin_unlock(&max_stack_lock);
4f48f8b7
LJ
257
258 per_cpu(trace_active, cpu)--;
a5e25883 259 local_irq_restore(flags);
e5a81b62
SR
260
261 return count;
262}
263
f38f1d2a 264static const struct file_operations stack_max_size_fops = {
e5a81b62
SR
265 .open = tracing_open_generic,
266 .read = stack_max_size_read,
267 .write = stack_max_size_write,
6038f373 268 .llseek = default_llseek,
e5a81b62
SR
269};
270
271static void *
2fc5f0cf 272__next(struct seq_file *m, loff_t *pos)
e5a81b62 273{
2fc5f0cf 274 long n = *pos - 1;
e5a81b62 275
72ac426a 276 if (n > max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX)
e5a81b62
SR
277 return NULL;
278
2fc5f0cf 279 m->private = (void *)n;
1b6cced6 280 return &m->private;
e5a81b62
SR
281}
282
2fc5f0cf
LZ
283static void *
284t_next(struct seq_file *m, void *v, loff_t *pos)
e5a81b62 285{
2fc5f0cf
LZ
286 (*pos)++;
287 return __next(m, pos);
288}
e5a81b62 289
2fc5f0cf
LZ
290static void *t_start(struct seq_file *m, loff_t *pos)
291{
4f48f8b7
LJ
292 int cpu;
293
e5a81b62 294 local_irq_disable();
4f48f8b7
LJ
295
296 cpu = smp_processor_id();
297 per_cpu(trace_active, cpu)++;
298
0199c4e6 299 arch_spin_lock(&max_stack_lock);
e5a81b62 300
522a110b
LW
301 if (*pos == 0)
302 return SEQ_START_TOKEN;
303
2fc5f0cf 304 return __next(m, pos);
e5a81b62
SR
305}
306
307static void t_stop(struct seq_file *m, void *p)
308{
4f48f8b7
LJ
309 int cpu;
310
0199c4e6 311 arch_spin_unlock(&max_stack_lock);
4f48f8b7
LJ
312
313 cpu = smp_processor_id();
314 per_cpu(trace_active, cpu)--;
315
e5a81b62
SR
316 local_irq_enable();
317}
318
962e3707 319static void trace_lookup_stack(struct seq_file *m, long i)
e5a81b62 320{
1b6cced6 321 unsigned long addr = stack_dump_trace[i];
e5a81b62 322
962e3707 323 seq_printf(m, "%pS\n", (void *)addr);
e5a81b62
SR
324}
325
e447e1df
SR
326static void print_disabled(struct seq_file *m)
327{
328 seq_puts(m, "#\n"
329 "# Stack tracer disabled\n"
330 "#\n"
331 "# To enable the stack tracer, either add 'stacktrace' to the\n"
332 "# kernel command line\n"
333 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
334 "#\n");
335}
336
e5a81b62
SR
337static int t_show(struct seq_file *m, void *v)
338{
522a110b 339 long i;
1b6cced6
SR
340 int size;
341
522a110b 342 if (v == SEQ_START_TOKEN) {
eb1871f3 343 seq_printf(m, " Depth Size Location"
1b6cced6 344 " (%d entries)\n"
eb1871f3 345 " ----- ---- --------\n",
72ac426a 346 max_stack_trace.nr_entries);
e447e1df
SR
347
348 if (!stack_tracer_enabled && !max_stack_size)
349 print_disabled(m);
350
1b6cced6
SR
351 return 0;
352 }
e5a81b62 353
522a110b
LW
354 i = *(long *)v;
355
1b6cced6
SR
356 if (i >= max_stack_trace.nr_entries ||
357 stack_dump_trace[i] == ULONG_MAX)
e5a81b62
SR
358 return 0;
359
1b6cced6
SR
360 if (i+1 == max_stack_trace.nr_entries ||
361 stack_dump_trace[i+1] == ULONG_MAX)
362 size = stack_dump_index[i];
363 else
364 size = stack_dump_index[i] - stack_dump_index[i+1];
365
366 seq_printf(m, "%3ld) %8d %5d ", i, stack_dump_index[i], size);
367
368 trace_lookup_stack(m, i);
e5a81b62
SR
369
370 return 0;
371}
372
f38f1d2a 373static const struct seq_operations stack_trace_seq_ops = {
e5a81b62
SR
374 .start = t_start,
375 .next = t_next,
376 .stop = t_stop,
377 .show = t_show,
378};
379
380static int stack_trace_open(struct inode *inode, struct file *file)
381{
d8cc1ab7 382 return seq_open(file, &stack_trace_seq_ops);
e5a81b62
SR
383}
384
f38f1d2a 385static const struct file_operations stack_trace_fops = {
e5a81b62
SR
386 .open = stack_trace_open,
387 .read = seq_read,
388 .llseek = seq_lseek,
d8cc1ab7 389 .release = seq_release,
e5a81b62
SR
390};
391
d2d45c7a
SR
392static int
393stack_trace_filter_open(struct inode *inode, struct file *file)
394{
395 return ftrace_regex_open(&trace_ops, FTRACE_ITER_FILTER,
396 inode, file);
397}
398
399static const struct file_operations stack_trace_filter_fops = {
400 .open = stack_trace_filter_open,
401 .read = seq_read,
402 .write = ftrace_filter_write,
098c879e 403 .llseek = tracing_lseek,
d2d45c7a
SR
404 .release = ftrace_regex_release,
405};
406
f38f1d2a
SR
407int
408stack_trace_sysctl(struct ctl_table *table, int write,
8d65af78 409 void __user *buffer, size_t *lenp,
f38f1d2a
SR
410 loff_t *ppos)
411{
412 int ret;
413
414 mutex_lock(&stack_sysctl_mutex);
415
8d65af78 416 ret = proc_dointvec(table, write, buffer, lenp, ppos);
f38f1d2a
SR
417
418 if (ret || !write ||
a32c7765 419 (last_stack_tracer_enabled == !!stack_tracer_enabled))
f38f1d2a
SR
420 goto out;
421
a32c7765 422 last_stack_tracer_enabled = !!stack_tracer_enabled;
f38f1d2a
SR
423
424 if (stack_tracer_enabled)
425 register_ftrace_function(&trace_ops);
426 else
427 unregister_ftrace_function(&trace_ops);
428
429 out:
430 mutex_unlock(&stack_sysctl_mutex);
431 return ret;
432}
433
762e1207
SR
434static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;
435
f38f1d2a
SR
436static __init int enable_stacktrace(char *str)
437{
762e1207
SR
438 if (strncmp(str, "_filter=", 8) == 0)
439 strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE);
440
e05a43b7
SR
441 stack_tracer_enabled = 1;
442 last_stack_tracer_enabled = 1;
f38f1d2a
SR
443 return 1;
444}
445__setup("stacktrace", enable_stacktrace);
446
e5a81b62
SR
447static __init int stack_trace_init(void)
448{
449 struct dentry *d_tracer;
e5a81b62
SR
450
451 d_tracer = tracing_init_dentry();
14a5ae40 452 if (IS_ERR(d_tracer))
ed6f1c99 453 return 0;
e5a81b62 454
5452af66
FW
455 trace_create_file("stack_max_size", 0644, d_tracer,
456 &max_stack_size, &stack_max_size_fops);
e5a81b62 457
5452af66
FW
458 trace_create_file("stack_trace", 0444, d_tracer,
459 NULL, &stack_trace_fops);
e5a81b62 460
d2d45c7a
SR
461 trace_create_file("stack_trace_filter", 0444, d_tracer,
462 NULL, &stack_trace_filter_fops);
463
762e1207
SR
464 if (stack_trace_filter_buf[0])
465 ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
466
e05a43b7 467 if (stack_tracer_enabled)
f38f1d2a 468 register_ftrace_function(&trace_ops);
e5a81b62
SR
469
470 return 0;
471}
472
473device_initcall(stack_trace_init);
This page took 0.329857 seconds and 5 git commands to generate.