[PATCH] x86: Some preparationary cleanup for stack trace
[deliverable/linux.git] / arch / i386 / kernel / stacktrace.c
1 /*
2 * arch/i386/kernel/stacktrace.c
3 *
4 * Stack trace management functions
5 *
6 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
7 */
8 #include <linux/sched.h>
9 #include <linux/stacktrace.h>
10
11 static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
12 {
13 return p > (void *)tinfo &&
14 p < (void *)tinfo + THREAD_SIZE - 3;
15 }
16
17 /*
18 * Save stack-backtrace addresses into a stack_trace buffer:
19 */
20 static inline unsigned long
21 save_context_stack(struct stack_trace *trace, unsigned int skip,
22 struct thread_info *tinfo, unsigned long *stack,
23 unsigned long ebp)
24 {
25 unsigned long addr;
26
27 #ifdef CONFIG_FRAME_POINTER
28 while (valid_stack_ptr(tinfo, (void *)ebp)) {
29 addr = *(unsigned long *)(ebp + 4);
30 if (!skip)
31 trace->entries[trace->nr_entries++] = addr;
32 else
33 skip--;
34 if (trace->nr_entries >= trace->max_entries)
35 break;
36 /*
37 * break out of recursive entries (such as
38 * end_of_stack_stop_unwind_function):
39 */
40 if (ebp == *(unsigned long *)ebp)
41 break;
42
43 ebp = *(unsigned long *)ebp;
44 }
45 #else
46 while (valid_stack_ptr(tinfo, stack)) {
47 addr = *stack++;
48 if (__kernel_text_address(addr)) {
49 if (!skip)
50 trace->entries[trace->nr_entries++] = addr;
51 else
52 skip--;
53 if (trace->nr_entries >= trace->max_entries)
54 break;
55 }
56 }
57 #endif
58
59 return ebp;
60 }
61
62 /*
63 * Save stack-backtrace addresses into a stack_trace buffer.
64 */
65 void save_stack_trace(struct stack_trace *trace, struct task_struct *task)
66 {
67 unsigned long ebp;
68 unsigned long *stack = &ebp;
69
70 WARN_ON(trace->nr_entries || !trace->max_entries);
71
72 if (!task || task == current) {
73 /* Grab ebp right from our regs: */
74 asm ("movl %%ebp, %0" : "=r" (ebp));
75 } else {
76 /* ebp is the last reg pushed by switch_to(): */
77 ebp = *(unsigned long *) task->thread.esp;
78 }
79
80 while (1) {
81 struct thread_info *context = (struct thread_info *)
82 ((unsigned long)stack & (~(THREAD_SIZE - 1)));
83
84 ebp = save_context_stack(trace, trace->skip, context, stack, ebp);
85 stack = (unsigned long *)context->previous_esp;
86 if (!stack || trace->nr_entries >= trace->max_entries)
87 break;
88 trace->entries[trace->nr_entries++] = ULONG_MAX;
89 if (trace->nr_entries >= trace->max_entries)
90 break;
91 }
92 }
93
This page took 0.031535 seconds and 5 git commands to generate.