Commit | Line | Data |
---|---|---|
21b32bbf | 1 | /* |
21b32bbf IM |
2 | * Stack trace management functions |
3 | * | |
8f47e163 | 4 | * Copyright (C) 2006-2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> |
21b32bbf IM |
5 | */ |
6 | #include <linux/sched.h> | |
7 | #include <linux/stacktrace.h> | |
c0b766f1 | 8 | #include <linux/module.h> |
02b67518 | 9 | #include <linux/uaccess.h> |
c0b766f1 | 10 | #include <asm/stacktrace.h> |
21b32bbf | 11 | |
c0b766f1 | 12 | static int save_stack_stack(void *data, char *name) |
21b32bbf | 13 | { |
29a67975 | 14 | return 0; |
c0b766f1 | 15 | } |
21b32bbf | 16 | |
568b329a | 17 | static int |
018378c5 | 18 | __save_stack_address(void *data, unsigned long addr, bool reliable, bool nosched) |
c0b766f1 | 19 | { |
ade1af77 | 20 | struct stack_trace *trace = data; |
147ec4d2 | 21 | #ifdef CONFIG_FRAME_POINTER |
1650743c | 22 | if (!reliable) |
568b329a | 23 | return 0; |
147ec4d2 | 24 | #endif |
018378c5 | 25 | if (nosched && in_sched_functions(addr)) |
568b329a | 26 | return 0; |
c0b766f1 AK |
27 | if (trace->skip > 0) { |
28 | trace->skip--; | |
568b329a | 29 | return 0; |
21b32bbf | 30 | } |
568b329a | 31 | if (trace->nr_entries < trace->max_entries) { |
c0b766f1 | 32 | trace->entries[trace->nr_entries++] = addr; |
568b329a AS |
33 | return 0; |
34 | } else { | |
35 | return -1; /* no more room, stop walking the stack */ | |
36 | } | |
21b32bbf IM |
37 | } |
38 | ||
568b329a | 39 | static int save_stack_address(void *data, unsigned long addr, int reliable) |
018378c5 ON |
40 | { |
41 | return __save_stack_address(data, addr, reliable, false); | |
42 | } | |
43 | ||
568b329a | 44 | static int |
5bc27dc2 | 45 | save_stack_address_nosched(void *data, unsigned long addr, int reliable) |
9745512c | 46 | { |
018378c5 | 47 | return __save_stack_address(data, addr, reliable, true); |
9745512c AV |
48 | } |
49 | ||
9689ba8a | 50 | static const struct stacktrace_ops save_stack_ops = { |
61c1917f FW |
51 | .stack = save_stack_stack, |
52 | .address = save_stack_address, | |
53 | .walk_stack = print_context_stack, | |
c0b766f1 | 54 | }; |
21b32bbf | 55 | |
9745512c | 56 | static const struct stacktrace_ops save_stack_ops_nosched = { |
61c1917f FW |
57 | .stack = save_stack_stack, |
58 | .address = save_stack_address_nosched, | |
59 | .walk_stack = print_context_stack, | |
9745512c AV |
60 | }; |
61 | ||
21b32bbf IM |
62 | /* |
63 | * Save stack-backtrace addresses into a stack_trace buffer. | |
21b32bbf | 64 | */ |
ab1b6f03 | 65 | void save_stack_trace(struct stack_trace *trace) |
21b32bbf | 66 | { |
e8e999cf | 67 | dump_trace(current, NULL, NULL, 0, &save_stack_ops, trace); |
006e84ee CM |
68 | if (trace->nr_entries < trace->max_entries) |
69 | trace->entries[trace->nr_entries++] = ULONG_MAX; | |
21b32bbf | 70 | } |
8594698e | 71 | EXPORT_SYMBOL_GPL(save_stack_trace); |
9745512c | 72 | |
39581062 | 73 | void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) |
acc6be54 | 74 | { |
e8e999cf | 75 | dump_trace(current, regs, NULL, 0, &save_stack_ops, trace); |
acc6be54 VN |
76 | if (trace->nr_entries < trace->max_entries) |
77 | trace->entries[trace->nr_entries++] = ULONG_MAX; | |
78 | } | |
79 | ||
9745512c AV |
80 | void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) |
81 | { | |
e8e999cf | 82 | dump_trace(tsk, NULL, NULL, 0, &save_stack_ops_nosched, trace); |
9745512c AV |
83 | if (trace->nr_entries < trace->max_entries) |
84 | trace->entries[trace->nr_entries++] = ULONG_MAX; | |
85 | } | |
8594698e | 86 | EXPORT_SYMBOL_GPL(save_stack_trace_tsk); |
02b67518 TE |
87 | |
88 | /* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */ | |
89 | ||
c9cf4dbb | 90 | struct stack_frame_user { |
02b67518 | 91 | const void __user *next_fp; |
8d7c6a96 | 92 | unsigned long ret_addr; |
02b67518 TE |
93 | }; |
94 | ||
c9cf4dbb FW |
95 | static int |
96 | copy_stack_frame(const void __user *fp, struct stack_frame_user *frame) | |
02b67518 TE |
97 | { |
98 | int ret; | |
99 | ||
100 | if (!access_ok(VERIFY_READ, fp, sizeof(*frame))) | |
101 | return 0; | |
102 | ||
103 | ret = 1; | |
104 | pagefault_disable(); | |
105 | if (__copy_from_user_inatomic(frame, fp, sizeof(*frame))) | |
106 | ret = 0; | |
107 | pagefault_enable(); | |
108 | ||
109 | return ret; | |
110 | } | |
111 | ||
8d7c6a96 TE |
112 | static inline void __save_stack_trace_user(struct stack_trace *trace) |
113 | { | |
114 | const struct pt_regs *regs = task_pt_regs(current); | |
115 | const void __user *fp = (const void __user *)regs->bp; | |
116 | ||
117 | if (trace->nr_entries < trace->max_entries) | |
118 | trace->entries[trace->nr_entries++] = regs->ip; | |
119 | ||
120 | while (trace->nr_entries < trace->max_entries) { | |
c9cf4dbb | 121 | struct stack_frame_user frame; |
8d7c6a96 TE |
122 | |
123 | frame.next_fp = NULL; | |
124 | frame.ret_addr = 0; | |
125 | if (!copy_stack_frame(fp, &frame)) | |
126 | break; | |
127 | if ((unsigned long)fp < regs->sp) | |
128 | break; | |
129 | if (frame.ret_addr) { | |
130 | trace->entries[trace->nr_entries++] = | |
131 | frame.ret_addr; | |
132 | } | |
133 | if (fp == frame.next_fp) | |
134 | break; | |
135 | fp = frame.next_fp; | |
136 | } | |
137 | } | |
138 | ||
02b67518 TE |
139 | void save_stack_trace_user(struct stack_trace *trace) |
140 | { | |
141 | /* | |
142 | * Trace user stack if we are not a kernel thread | |
143 | */ | |
144 | if (current->mm) { | |
8d7c6a96 | 145 | __save_stack_trace_user(trace); |
02b67518 TE |
146 | } |
147 | if (trace->nr_entries < trace->max_entries) | |
148 | trace->entries[trace->nr_entries++] = ULONG_MAX; | |
149 | } | |
150 |