drm/i915: Stop tracking last calculated Sink CRC.
[deliverable/linux.git] / kernel / trace / trace_stack.c
CommitLineData
e5a81b62
SR
1/*
2 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
3 *
4 */
5#include <linux/stacktrace.h>
6#include <linux/kallsyms.h>
7#include <linux/seq_file.h>
8#include <linux/spinlock.h>
9#include <linux/uaccess.h>
e5a81b62
SR
10#include <linux/ftrace.h>
11#include <linux/module.h>
f38f1d2a 12#include <linux/sysctl.h>
e5a81b62 13#include <linux/init.h>
762e1207
SR
14
15#include <asm/setup.h>
16
e5a81b62
SR
17#include "trace.h"
18
19#define STACK_TRACE_ENTRIES 500
20
1b6cced6
SR
21static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
22 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
23static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
24
4df29712
SRRH
25/*
26 * Reserve one entry for the passed in ip. This will allow
27 * us to remove most or all of the stack size overhead
28 * added by the stack tracer itself.
29 */
e5a81b62 30static struct stack_trace max_stack_trace = {
4df29712 31 .max_entries = STACK_TRACE_ENTRIES - 1,
72ac426a 32 .entries = &stack_dump_trace[0],
e5a81b62
SR
33};
34
35static unsigned long max_stack_size;
445c8951 36static arch_spinlock_t max_stack_lock =
edc35bd7 37 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
e5a81b62 38
e5a81b62 39static DEFINE_PER_CPU(int, trace_active);
f38f1d2a
SR
40static DEFINE_MUTEX(stack_sysctl_mutex);
41
42int stack_tracer_enabled;
43static int last_stack_tracer_enabled;
e5a81b62 44
e3172181
MK
45static inline void print_max_stack(void)
46{
47 long i;
48 int size;
49
50 pr_emerg(" Depth Size Location (%d entries)\n"
51 " ----- ---- --------\n",
72ac426a 52 max_stack_trace.nr_entries);
e3172181
MK
53
54 for (i = 0; i < max_stack_trace.nr_entries; i++) {
55 if (stack_dump_trace[i] == ULONG_MAX)
56 break;
57 if (i+1 == max_stack_trace.nr_entries ||
58 stack_dump_trace[i+1] == ULONG_MAX)
59 size = stack_dump_index[i];
60 else
61 size = stack_dump_index[i] - stack_dump_index[i+1];
62
63 pr_emerg("%3ld) %8d %5d %pS\n", i, stack_dump_index[i],
64 size, (void *)stack_dump_trace[i]);
65 }
66}
67
87889501 68static inline void
d4ecbfc4 69check_stack(unsigned long ip, unsigned long *stack)
e5a81b62 70{
e3172181 71 unsigned long this_size, flags; unsigned long *p, *top, *start;
4df29712
SRRH
72 static int tracer_frame;
73 int frame_size = ACCESS_ONCE(tracer_frame);
72ac426a 74 int i, x;
e5a81b62 75
87889501 76 this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
e5a81b62 77 this_size = THREAD_SIZE - this_size;
4df29712
SRRH
78 /* Remove the frame of the tracer */
79 this_size -= frame_size;
e5a81b62
SR
80
81 if (this_size <= max_stack_size)
82 return;
83
81520a1b 84 /* we do not handle interrupt stacks yet */
87889501 85 if (!object_is_on_stack(stack))
81520a1b
SR
86 return;
87
a5e25883 88 local_irq_save(flags);
0199c4e6 89 arch_spin_lock(&max_stack_lock);
e5a81b62 90
4df29712
SRRH
91 /* In case another CPU set the tracer_frame on us */
92 if (unlikely(!frame_size))
93 this_size -= tracer_frame;
94
e5a81b62
SR
95 /* a race could have already updated it */
96 if (this_size <= max_stack_size)
97 goto out;
98
99 max_stack_size = this_size;
100
7eea4fce 101 max_stack_trace.nr_entries = 0;
72ac426a 102 max_stack_trace.skip = 3;
e5a81b62
SR
103
104 save_stack_trace(&max_stack_trace);
105
72ac426a
SRRH
106 /* Skip over the overhead of the stack tracer itself */
107 for (i = 0; i < max_stack_trace.nr_entries; i++) {
108 if (stack_dump_trace[i] == ip)
109 break;
110 }
d4ecbfc4 111
1b6cced6
SR
112 /*
113 * Now find where in the stack these are.
114 */
72ac426a 115 x = 0;
87889501 116 start = stack;
1b6cced6
SR
117 top = (unsigned long *)
118 (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
119
120 /*
121 * Loop through all the entries. One of the entries may
122 * for some reason be missed on the stack, so we may
123 * have to account for them. If they are all there, this
124 * loop will only happen once. This code only takes place
125 * on a new max, so it is far from a fast path.
126 */
127 while (i < max_stack_trace.nr_entries) {
0a37119d 128 int found = 0;
1b6cced6 129
72ac426a 130 stack_dump_index[x] = this_size;
1b6cced6
SR
131 p = start;
132
133 for (; p < top && i < max_stack_trace.nr_entries; p++) {
72ac426a
SRRH
134 if (stack_dump_trace[i] == ULONG_MAX)
135 break;
1b6cced6 136 if (*p == stack_dump_trace[i]) {
72ac426a
SRRH
137 stack_dump_trace[x] = stack_dump_trace[i++];
138 this_size = stack_dump_index[x++] =
1b6cced6 139 (top - p) * sizeof(unsigned long);
0a37119d 140 found = 1;
1b6cced6
SR
141 /* Start the search from here */
142 start = p + 1;
4df29712
SRRH
143 /*
144 * We do not want to show the overhead
145 * of the stack tracer stack in the
146 * max stack. If we haven't figured
147 * out what that is, then figure it out
148 * now.
149 */
72ac426a 150 if (unlikely(!tracer_frame)) {
4df29712
SRRH
151 tracer_frame = (p - stack) *
152 sizeof(unsigned long);
153 max_stack_size -= tracer_frame;
154 }
1b6cced6
SR
155 }
156 }
157
0a37119d
SR
158 if (!found)
159 i++;
1b6cced6
SR
160 }
161
72ac426a
SRRH
162 max_stack_trace.nr_entries = x;
163 for (; x < i; x++)
164 stack_dump_trace[x] = ULONG_MAX;
165
a70857e4 166 if (task_stack_end_corrupted(current)) {
e3172181
MK
167 print_max_stack();
168 BUG();
169 }
170
e5a81b62 171 out:
0199c4e6 172 arch_spin_unlock(&max_stack_lock);
a5e25883 173 local_irq_restore(flags);
e5a81b62
SR
174}
175
176static void
a1e2e31d
SR
177stack_trace_call(unsigned long ip, unsigned long parent_ip,
178 struct ftrace_ops *op, struct pt_regs *pt_regs)
e5a81b62 179{
87889501 180 unsigned long stack;
5168ae50 181 int cpu;
e5a81b62 182
5168ae50 183 preempt_disable_notrace();
e5a81b62
SR
184
185 cpu = raw_smp_processor_id();
186 /* no atomic needed, we only modify this variable by this cpu */
187 if (per_cpu(trace_active, cpu)++ != 0)
188 goto out;
189
72ac426a 190 ip += MCOUNT_INSN_SIZE;
4df29712
SRRH
191
192 check_stack(ip, &stack);
e5a81b62
SR
193
194 out:
195 per_cpu(trace_active, cpu)--;
196 /* prevent recursion in schedule */
5168ae50 197 preempt_enable_notrace();
e5a81b62
SR
198}
199
200static struct ftrace_ops trace_ops __read_mostly =
201{
202 .func = stack_trace_call,
4740974a 203 .flags = FTRACE_OPS_FL_RECURSION_SAFE,
e5a81b62
SR
204};
205
206static ssize_t
207stack_max_size_read(struct file *filp, char __user *ubuf,
208 size_t count, loff_t *ppos)
209{
210 unsigned long *ptr = filp->private_data;
211 char buf[64];
212 int r;
213
214 r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
215 if (r > sizeof(buf))
216 r = sizeof(buf);
217 return simple_read_from_buffer(ubuf, count, ppos, buf, r);
218}
219
220static ssize_t
221stack_max_size_write(struct file *filp, const char __user *ubuf,
222 size_t count, loff_t *ppos)
223{
224 long *ptr = filp->private_data;
225 unsigned long val, flags;
e5a81b62 226 int ret;
4f48f8b7 227 int cpu;
e5a81b62 228
22fe9b54
PH
229 ret = kstrtoul_from_user(ubuf, count, 10, &val);
230 if (ret)
e5a81b62
SR
231 return ret;
232
a5e25883 233 local_irq_save(flags);
4f48f8b7
LJ
234
235 /*
236 * In case we trace inside arch_spin_lock() or after (NMI),
237 * we will cause circular lock, so we also need to increase
238 * the percpu trace_active here.
239 */
240 cpu = smp_processor_id();
241 per_cpu(trace_active, cpu)++;
242
0199c4e6 243 arch_spin_lock(&max_stack_lock);
e5a81b62 244 *ptr = val;
0199c4e6 245 arch_spin_unlock(&max_stack_lock);
4f48f8b7
LJ
246
247 per_cpu(trace_active, cpu)--;
a5e25883 248 local_irq_restore(flags);
e5a81b62
SR
249
250 return count;
251}
252
f38f1d2a 253static const struct file_operations stack_max_size_fops = {
e5a81b62
SR
254 .open = tracing_open_generic,
255 .read = stack_max_size_read,
256 .write = stack_max_size_write,
6038f373 257 .llseek = default_llseek,
e5a81b62
SR
258};
259
260static void *
2fc5f0cf 261__next(struct seq_file *m, loff_t *pos)
e5a81b62 262{
2fc5f0cf 263 long n = *pos - 1;
e5a81b62 264
72ac426a 265 if (n > max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX)
e5a81b62
SR
266 return NULL;
267
2fc5f0cf 268 m->private = (void *)n;
1b6cced6 269 return &m->private;
e5a81b62
SR
270}
271
2fc5f0cf
LZ
272static void *
273t_next(struct seq_file *m, void *v, loff_t *pos)
e5a81b62 274{
2fc5f0cf
LZ
275 (*pos)++;
276 return __next(m, pos);
277}
e5a81b62 278
2fc5f0cf
LZ
279static void *t_start(struct seq_file *m, loff_t *pos)
280{
4f48f8b7
LJ
281 int cpu;
282
e5a81b62 283 local_irq_disable();
4f48f8b7
LJ
284
285 cpu = smp_processor_id();
286 per_cpu(trace_active, cpu)++;
287
0199c4e6 288 arch_spin_lock(&max_stack_lock);
e5a81b62 289
522a110b
LW
290 if (*pos == 0)
291 return SEQ_START_TOKEN;
292
2fc5f0cf 293 return __next(m, pos);
e5a81b62
SR
294}
295
296static void t_stop(struct seq_file *m, void *p)
297{
4f48f8b7
LJ
298 int cpu;
299
0199c4e6 300 arch_spin_unlock(&max_stack_lock);
4f48f8b7
LJ
301
302 cpu = smp_processor_id();
303 per_cpu(trace_active, cpu)--;
304
e5a81b62
SR
305 local_irq_enable();
306}
307
962e3707 308static void trace_lookup_stack(struct seq_file *m, long i)
e5a81b62 309{
1b6cced6 310 unsigned long addr = stack_dump_trace[i];
e5a81b62 311
962e3707 312 seq_printf(m, "%pS\n", (void *)addr);
e5a81b62
SR
313}
314
e447e1df
SR
315static void print_disabled(struct seq_file *m)
316{
317 seq_puts(m, "#\n"
318 "# Stack tracer disabled\n"
319 "#\n"
320 "# To enable the stack tracer, either add 'stacktrace' to the\n"
321 "# kernel command line\n"
322 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
323 "#\n");
324}
325
e5a81b62
SR
326static int t_show(struct seq_file *m, void *v)
327{
522a110b 328 long i;
1b6cced6
SR
329 int size;
330
522a110b 331 if (v == SEQ_START_TOKEN) {
eb1871f3 332 seq_printf(m, " Depth Size Location"
1b6cced6 333 " (%d entries)\n"
eb1871f3 334 " ----- ---- --------\n",
72ac426a 335 max_stack_trace.nr_entries);
e447e1df
SR
336
337 if (!stack_tracer_enabled && !max_stack_size)
338 print_disabled(m);
339
1b6cced6
SR
340 return 0;
341 }
e5a81b62 342
522a110b
LW
343 i = *(long *)v;
344
1b6cced6
SR
345 if (i >= max_stack_trace.nr_entries ||
346 stack_dump_trace[i] == ULONG_MAX)
e5a81b62
SR
347 return 0;
348
1b6cced6
SR
349 if (i+1 == max_stack_trace.nr_entries ||
350 stack_dump_trace[i+1] == ULONG_MAX)
351 size = stack_dump_index[i];
352 else
353 size = stack_dump_index[i] - stack_dump_index[i+1];
354
355 seq_printf(m, "%3ld) %8d %5d ", i, stack_dump_index[i], size);
356
357 trace_lookup_stack(m, i);
e5a81b62
SR
358
359 return 0;
360}
361
f38f1d2a 362static const struct seq_operations stack_trace_seq_ops = {
e5a81b62
SR
363 .start = t_start,
364 .next = t_next,
365 .stop = t_stop,
366 .show = t_show,
367};
368
369static int stack_trace_open(struct inode *inode, struct file *file)
370{
d8cc1ab7 371 return seq_open(file, &stack_trace_seq_ops);
e5a81b62
SR
372}
373
f38f1d2a 374static const struct file_operations stack_trace_fops = {
e5a81b62
SR
375 .open = stack_trace_open,
376 .read = seq_read,
377 .llseek = seq_lseek,
d8cc1ab7 378 .release = seq_release,
e5a81b62
SR
379};
380
d2d45c7a
SR
381static int
382stack_trace_filter_open(struct inode *inode, struct file *file)
383{
384 return ftrace_regex_open(&trace_ops, FTRACE_ITER_FILTER,
385 inode, file);
386}
387
388static const struct file_operations stack_trace_filter_fops = {
389 .open = stack_trace_filter_open,
390 .read = seq_read,
391 .write = ftrace_filter_write,
098c879e 392 .llseek = tracing_lseek,
d2d45c7a
SR
393 .release = ftrace_regex_release,
394};
395
f38f1d2a
SR
396int
397stack_trace_sysctl(struct ctl_table *table, int write,
8d65af78 398 void __user *buffer, size_t *lenp,
f38f1d2a
SR
399 loff_t *ppos)
400{
401 int ret;
402
403 mutex_lock(&stack_sysctl_mutex);
404
8d65af78 405 ret = proc_dointvec(table, write, buffer, lenp, ppos);
f38f1d2a
SR
406
407 if (ret || !write ||
a32c7765 408 (last_stack_tracer_enabled == !!stack_tracer_enabled))
f38f1d2a
SR
409 goto out;
410
a32c7765 411 last_stack_tracer_enabled = !!stack_tracer_enabled;
f38f1d2a
SR
412
413 if (stack_tracer_enabled)
414 register_ftrace_function(&trace_ops);
415 else
416 unregister_ftrace_function(&trace_ops);
417
418 out:
419 mutex_unlock(&stack_sysctl_mutex);
420 return ret;
421}
422
762e1207
SR
423static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;
424
f38f1d2a
SR
425static __init int enable_stacktrace(char *str)
426{
762e1207
SR
427 if (strncmp(str, "_filter=", 8) == 0)
428 strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE);
429
e05a43b7
SR
430 stack_tracer_enabled = 1;
431 last_stack_tracer_enabled = 1;
f38f1d2a
SR
432 return 1;
433}
434__setup("stacktrace", enable_stacktrace);
435
e5a81b62
SR
436static __init int stack_trace_init(void)
437{
438 struct dentry *d_tracer;
e5a81b62
SR
439
440 d_tracer = tracing_init_dentry();
14a5ae40 441 if (IS_ERR(d_tracer))
ed6f1c99 442 return 0;
e5a81b62 443
5452af66
FW
444 trace_create_file("stack_max_size", 0644, d_tracer,
445 &max_stack_size, &stack_max_size_fops);
e5a81b62 446
5452af66
FW
447 trace_create_file("stack_trace", 0444, d_tracer,
448 NULL, &stack_trace_fops);
e5a81b62 449
d2d45c7a
SR
450 trace_create_file("stack_trace_filter", 0444, d_tracer,
451 NULL, &stack_trace_filter_fops);
452
762e1207
SR
453 if (stack_trace_filter_buf[0])
454 ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
455
e05a43b7 456 if (stack_tracer_enabled)
f38f1d2a 457 register_ftrace_function(&trace_ops);
e5a81b62
SR
458
459 return 0;
460}
461
462device_initcall(stack_trace_init);
This page took 0.533552 seconds and 5 git commands to generate.