x86, mm: Move down find_early_table_space()
[deliverable/linux.git] / arch / x86 / kernel / stacktrace.c
CommitLineData
21b32bbf 1/*
21b32bbf
IM
2 * Stack trace management functions
3 *
8f47e163 4 * Copyright (C) 2006-2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
21b32bbf
IM
5 */
6#include <linux/sched.h>
7#include <linux/stacktrace.h>
c0b766f1 8#include <linux/module.h>
02b67518 9#include <linux/uaccess.h>
c0b766f1 10#include <asm/stacktrace.h>
21b32bbf 11
c0b766f1 12static int save_stack_stack(void *data, char *name)
21b32bbf 13{
29a67975 14 return 0;
c0b766f1 15}
21b32bbf 16
018378c5
ON
17static void
18__save_stack_address(void *data, unsigned long addr, bool reliable, bool nosched)
c0b766f1 19{
ade1af77 20 struct stack_trace *trace = data;
147ec4d2 21#ifdef CONFIG_FRAME_POINTER
1650743c
VN
22 if (!reliable)
23 return;
147ec4d2 24#endif
018378c5
ON
25 if (nosched && in_sched_functions(addr))
26 return;
c0b766f1
AK
27 if (trace->skip > 0) {
28 trace->skip--;
29 return;
21b32bbf 30 }
006e84ee 31 if (trace->nr_entries < trace->max_entries)
c0b766f1 32 trace->entries[trace->nr_entries++] = addr;
21b32bbf
IM
33}
34
018378c5
ON
35static void save_stack_address(void *data, unsigned long addr, int reliable)
36{
37 return __save_stack_address(data, addr, reliable, false);
38}
39
5bc27dc2
AV
40static void
41save_stack_address_nosched(void *data, unsigned long addr, int reliable)
9745512c 42{
018378c5 43 return __save_stack_address(data, addr, reliable, true);
9745512c
AV
44}
45
9689ba8a 46static const struct stacktrace_ops save_stack_ops = {
61c1917f
FW
47 .stack = save_stack_stack,
48 .address = save_stack_address,
49 .walk_stack = print_context_stack,
c0b766f1 50};
21b32bbf 51
9745512c 52static const struct stacktrace_ops save_stack_ops_nosched = {
61c1917f
FW
53 .stack = save_stack_stack,
54 .address = save_stack_address_nosched,
55 .walk_stack = print_context_stack,
9745512c
AV
56};
57
21b32bbf
IM
58/*
59 * Save stack-backtrace addresses into a stack_trace buffer.
21b32bbf 60 */
ab1b6f03 61void save_stack_trace(struct stack_trace *trace)
21b32bbf 62{
e8e999cf 63 dump_trace(current, NULL, NULL, 0, &save_stack_ops, trace);
006e84ee
CM
64 if (trace->nr_entries < trace->max_entries)
65 trace->entries[trace->nr_entries++] = ULONG_MAX;
21b32bbf 66}
8594698e 67EXPORT_SYMBOL_GPL(save_stack_trace);
9745512c 68
39581062 69void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
acc6be54 70{
e8e999cf 71 dump_trace(current, regs, NULL, 0, &save_stack_ops, trace);
acc6be54
VN
72 if (trace->nr_entries < trace->max_entries)
73 trace->entries[trace->nr_entries++] = ULONG_MAX;
74}
75
9745512c
AV
76void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
77{
e8e999cf 78 dump_trace(tsk, NULL, NULL, 0, &save_stack_ops_nosched, trace);
9745512c
AV
79 if (trace->nr_entries < trace->max_entries)
80 trace->entries[trace->nr_entries++] = ULONG_MAX;
81}
8594698e 82EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
02b67518
TE
83
84/* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */
85
c9cf4dbb 86struct stack_frame_user {
02b67518 87 const void __user *next_fp;
8d7c6a96 88 unsigned long ret_addr;
02b67518
TE
89};
90
c9cf4dbb
FW
91static int
92copy_stack_frame(const void __user *fp, struct stack_frame_user *frame)
02b67518
TE
93{
94 int ret;
95
96 if (!access_ok(VERIFY_READ, fp, sizeof(*frame)))
97 return 0;
98
99 ret = 1;
100 pagefault_disable();
101 if (__copy_from_user_inatomic(frame, fp, sizeof(*frame)))
102 ret = 0;
103 pagefault_enable();
104
105 return ret;
106}
107
8d7c6a96
TE
108static inline void __save_stack_trace_user(struct stack_trace *trace)
109{
110 const struct pt_regs *regs = task_pt_regs(current);
111 const void __user *fp = (const void __user *)regs->bp;
112
113 if (trace->nr_entries < trace->max_entries)
114 trace->entries[trace->nr_entries++] = regs->ip;
115
116 while (trace->nr_entries < trace->max_entries) {
c9cf4dbb 117 struct stack_frame_user frame;
8d7c6a96
TE
118
119 frame.next_fp = NULL;
120 frame.ret_addr = 0;
121 if (!copy_stack_frame(fp, &frame))
122 break;
123 if ((unsigned long)fp < regs->sp)
124 break;
125 if (frame.ret_addr) {
126 trace->entries[trace->nr_entries++] =
127 frame.ret_addr;
128 }
129 if (fp == frame.next_fp)
130 break;
131 fp = frame.next_fp;
132 }
133}
134
02b67518
TE
135void save_stack_trace_user(struct stack_trace *trace)
136{
137 /*
138 * Trace user stack if we are not a kernel thread
139 */
140 if (current->mm) {
8d7c6a96 141 __save_stack_trace_user(trace);
02b67518
TE
142 }
143 if (trace->nr_entries < trace->max_entries)
144 trace->entries[trace->nr_entries++] = ULONG_MAX;
145}
146
This page took 0.698583 seconds and 5 git commands to generate.