2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
5 #include <linux/kallsyms.h>
6 #include <linux/kprobes.h>
7 #include <linux/uaccess.h>
8 #include <linux/utsname.h>
9 #include <linux/hardirq.h>
10 #include <linux/kdebug.h>
11 #include <linux/module.h>
12 #include <linux/ptrace.h>
13 #include <linux/ftrace.h>
14 #include <linux/kexec.h>
15 #include <linux/bug.h>
16 #include <linux/nmi.h>
17 #include <linux/sysfs.h>
19 #include <asm/stacktrace.h>
22 int panic_on_unrecovered_nmi
;
24 unsigned int code_bytes
= 64;
25 int kstack_depth_to_print
= 3 * STACKSLOTS_PER_LINE
;
26 static int die_counter
;
28 static void printk_stack_address(unsigned long address
, int reliable
,
31 printk("%s [<%p>] %s%pB\n",
32 (char *)data
, (void *)address
, reliable
? "" : "? ",
36 void printk_address(unsigned long address
)
38 pr_cont(" [<%p>] %pS\n", (void *)address
, (void *)address
);
41 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
43 print_ftrace_graph_addr(unsigned long addr
, void *data
,
44 const struct stacktrace_ops
*ops
,
45 struct task_struct
*task
, int *graph
)
47 unsigned long ret_addr
;
50 if (addr
!= (unsigned long)return_to_handler
)
53 index
= task
->curr_ret_stack
;
55 if (!task
->ret_stack
|| index
< *graph
)
59 ret_addr
= task
->ret_stack
[index
].ret
;
61 ops
->address(data
, ret_addr
, 1);
67 print_ftrace_graph_addr(unsigned long addr
, void *data
,
68 const struct stacktrace_ops
*ops
,
69 struct task_struct
*task
, int *graph
)
74 * x86-64 can have up to three kernel stacks:
77 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
80 static inline int valid_stack_ptr(struct task_struct
*task
,
81 void *p
, unsigned int size
, void *end
)
83 void *t
= task_stack_page(task
);
85 if (p
< end
&& p
>= (end
-THREAD_SIZE
))
90 return p
>= t
&& p
< t
+ THREAD_SIZE
- size
;
94 print_context_stack(struct task_struct
*task
,
95 unsigned long *stack
, unsigned long bp
,
96 const struct stacktrace_ops
*ops
, void *data
,
97 unsigned long *end
, int *graph
)
99 struct stack_frame
*frame
= (struct stack_frame
*)bp
;
102 * If we overflowed the stack into a guard page, jump back to the
103 * bottom of the usable stack.
105 if ((unsigned long)task_stack_page(task
) - (unsigned long)stack
<
107 stack
= (unsigned long *)task_stack_page(task
);
109 while (valid_stack_ptr(task
, stack
, sizeof(*stack
), end
)) {
113 if (__kernel_text_address(addr
)) {
114 if ((unsigned long) stack
== bp
+ sizeof(long)) {
115 ops
->address(data
, addr
, 1);
116 frame
= frame
->next_frame
;
117 bp
= (unsigned long) frame
;
119 ops
->address(data
, addr
, 0);
121 print_ftrace_graph_addr(addr
, data
, ops
, task
, graph
);
127 EXPORT_SYMBOL_GPL(print_context_stack
);
130 print_context_stack_bp(struct task_struct
*task
,
131 unsigned long *stack
, unsigned long bp
,
132 const struct stacktrace_ops
*ops
, void *data
,
133 unsigned long *end
, int *graph
)
135 struct stack_frame
*frame
= (struct stack_frame
*)bp
;
136 unsigned long *ret_addr
= &frame
->return_address
;
138 while (valid_stack_ptr(task
, ret_addr
, sizeof(*ret_addr
), end
)) {
139 unsigned long addr
= *ret_addr
;
141 if (!__kernel_text_address(addr
))
144 if (ops
->address(data
, addr
, 1))
146 frame
= frame
->next_frame
;
147 ret_addr
= &frame
->return_address
;
148 print_ftrace_graph_addr(addr
, data
, ops
, task
, graph
);
151 return (unsigned long)frame
;
153 EXPORT_SYMBOL_GPL(print_context_stack_bp
);
155 static int print_trace_stack(void *data
, char *name
)
157 printk("%s <%s> ", (char *)data
, name
);
162 * Print one address/symbol entries per line.
164 static int print_trace_address(void *data
, unsigned long addr
, int reliable
)
166 touch_nmi_watchdog();
167 printk_stack_address(addr
, reliable
, data
);
171 static const struct stacktrace_ops print_trace_ops
= {
172 .stack
= print_trace_stack
,
173 .address
= print_trace_address
,
174 .walk_stack
= print_context_stack
,
178 show_trace_log_lvl(struct task_struct
*task
, struct pt_regs
*regs
,
179 unsigned long *stack
, unsigned long bp
, char *log_lvl
)
181 printk("%sCall Trace:\n", log_lvl
);
182 dump_trace(task
, regs
, stack
, bp
, &print_trace_ops
, log_lvl
);
185 void show_trace(struct task_struct
*task
, struct pt_regs
*regs
,
186 unsigned long *stack
, unsigned long bp
)
188 show_trace_log_lvl(task
, regs
, stack
, bp
, "");
191 void show_stack(struct task_struct
*task
, unsigned long *sp
)
193 unsigned long bp
= 0;
197 * Stack frames below this one aren't interesting. Don't show them
198 * if we're printing for %current.
200 if (!sp
&& (!task
|| task
== current
)) {
202 bp
= stack_frame(current
, NULL
);
205 show_stack_log_lvl(task
, NULL
, sp
, bp
, "");
208 void show_stack_regs(struct pt_regs
*regs
)
210 show_stack_log_lvl(current
, regs
, (unsigned long *)regs
->sp
, regs
->bp
, "");
213 static arch_spinlock_t die_lock
= __ARCH_SPIN_LOCK_UNLOCKED
;
214 static int die_owner
= -1;
215 static unsigned int die_nest_count
;
217 unsigned long oops_begin(void)
224 /* racy, but better than risking deadlock. */
225 raw_local_irq_save(flags
);
226 cpu
= smp_processor_id();
227 if (!arch_spin_trylock(&die_lock
)) {
228 if (cpu
== die_owner
)
229 /* nested oops. should stop eventually */;
231 arch_spin_lock(&die_lock
);
239 EXPORT_SYMBOL_GPL(oops_begin
);
240 NOKPROBE_SYMBOL(oops_begin
);
242 void __noreturn
rewind_stack_do_exit(int signr
);
244 void oops_end(unsigned long flags
, struct pt_regs
*regs
, int signr
)
246 if (regs
&& kexec_should_crash(current
))
251 add_taint(TAINT_DIE
, LOCKDEP_NOW_UNRELIABLE
);
254 /* Nest count reaches zero, release the lock. */
255 arch_spin_unlock(&die_lock
);
256 raw_local_irq_restore(flags
);
262 panic("Fatal exception in interrupt");
264 panic("Fatal exception");
267 * We're not going to return, but we might be on an IST stack or
268 * have very little stack space left. Rewind the stack and kill
271 rewind_stack_do_exit(signr
);
273 NOKPROBE_SYMBOL(oops_end
);
275 int __die(const char *str
, struct pt_regs
*regs
, long err
)
282 "%s: %04lx [#%d]%s%s%s%s\n", str
, err
& 0xffff, ++die_counter
,
283 IS_ENABLED(CONFIG_PREEMPT
) ? " PREEMPT" : "",
284 IS_ENABLED(CONFIG_SMP
) ? " SMP" : "",
285 debug_pagealloc_enabled() ? " DEBUG_PAGEALLOC" : "",
286 IS_ENABLED(CONFIG_KASAN
) ? " KASAN" : "");
288 if (notify_die(DIE_OOPS
, str
, regs
, err
,
289 current
->thread
.trap_nr
, SIGSEGV
) == NOTIFY_STOP
)
295 if (user_mode(regs
)) {
297 ss
= regs
->ss
& 0xffff;
299 sp
= kernel_stack_pointer(regs
);
302 printk(KERN_EMERG
"EIP: [<%08lx>] ", regs
->ip
);
303 print_symbol("%s", regs
->ip
);
304 printk(" SS:ESP %04x:%08lx\n", ss
, sp
);
306 /* Executive summary in case the oops scrolled away */
307 printk(KERN_ALERT
"RIP ");
308 printk_address(regs
->ip
);
309 printk(" RSP <%016lx>\n", regs
->sp
);
313 NOKPROBE_SYMBOL(__die
);
316 * This is gone through when something in the kernel has done something bad
317 * and is about to be terminated:
319 void die(const char *str
, struct pt_regs
*regs
, long err
)
321 unsigned long flags
= oops_begin();
324 if (!user_mode(regs
))
325 report_bug(regs
->ip
, regs
);
327 if (__die(str
, regs
, err
))
329 oops_end(flags
, regs
, sig
);
332 static int __init
kstack_setup(char *s
)
340 ret
= kstrtoul(s
, 0, &val
);
343 kstack_depth_to_print
= val
;
346 early_param("kstack", kstack_setup
);
348 static int __init
code_bytes_setup(char *s
)
356 ret
= kstrtoul(s
, 0, &val
);
361 if (code_bytes
> 8192)
366 __setup("code_bytes=", code_bytes_setup
);