oprofile, x86: Fix race in nmi handler while starting counters
[deliverable/linux.git] / arch / x86 / oprofile / backtrace.c
CommitLineData
1da177e4
LT
1/**
2 * @file backtrace.c
3 *
4 * @remark Copyright 2002 OProfile authors
5 * @remark Read the file COPYING
6 *
7 * @author John Levon
8 * @author David Smith
9 */
10
11#include <linux/oprofile.h>
12#include <linux/sched.h>
13#include <linux/mm.h>
14#include <asm/ptrace.h>
c34d1b4d 15#include <asm/uaccess.h>
574a6042 16#include <asm/stacktrace.h>
f6dedecc 17#include <linux/compat.h>
1da177e4 18
574a6042
JB
19static int backtrace_stack(void *data, char *name)
20{
21 /* Yes, we want all stacks */
22 return 0;
23}
30379440 24
bc850d6b 25static void backtrace_address(void *data, unsigned long addr, int reliable)
574a6042
JB
26{
27 unsigned int *depth = data;
28
29 if ((*depth)--)
30 oprofile_add_trace(addr);
30379440
GB
31}
32
574a6042 33static struct stacktrace_ops backtrace_ops = {
61c1917f
FW
34 .stack = backtrace_stack,
35 .address = backtrace_address,
36 .walk_stack = print_context_stack,
574a6042
JB
37};
38
f6dedecc
JO
39#ifdef CONFIG_COMPAT
40static struct stack_frame_ia32 *
41dump_user_backtrace_32(struct stack_frame_ia32 *head)
42{
43 struct stack_frame_ia32 bufhead[2];
44 struct stack_frame_ia32 *fp;
45
46 /* Also check accessibility of one struct frame_head beyond */
47 if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
48 return NULL;
49 if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
50 return NULL;
51
52 fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
53
54 oprofile_add_trace(bufhead[0].return_address);
55
56 /* frame pointers should strictly progress back up the stack
57 * (towards higher addresses) */
58 if (head >= fp)
59 return NULL;
60
61 return fp;
62}
63
64static inline int
65x86_backtrace_32(struct pt_regs * const regs, unsigned int depth)
66{
67 struct stack_frame_ia32 *head;
68
69 /* User process is 32-bit */
70 if (!current || !test_thread_flag(TIF_IA32))
71 return 0;
72
73 head = (struct stack_frame_ia32 *) regs->bp;
74 while (depth-- && head)
75 head = dump_user_backtrace_32(head);
76
77 return 1;
78}
79
80#else
81static inline int
82x86_backtrace_32(struct pt_regs * const regs, unsigned int depth)
83{
84 return 0;
85}
86#endif /* CONFIG_COMPAT */
87
40c6b3cb 88static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
1da177e4 89{
40c6b3cb 90 struct stack_frame bufhead[2];
1da177e4 91
40c6b3cb 92 /* Also check accessibility of one struct stack_frame beyond */
c34d1b4d
HD
93 if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
94 return NULL;
95 if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
1da177e4
LT
96 return NULL;
97
40c6b3cb 98 oprofile_add_trace(bufhead[0].return_address);
1da177e4 99
c34d1b4d
HD
100 /* frame pointers should strictly progress back up the stack
101 * (towards higher addresses) */
40c6b3cb 102 if (head >= bufhead[0].next_frame)
c34d1b4d 103 return NULL;
1da177e4 104
40c6b3cb 105 return bufhead[0].next_frame;
1da177e4
LT
106}
107
1da177e4
LT
108void
109x86_backtrace(struct pt_regs * const regs, unsigned int depth)
110{
40c6b3cb 111 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
1da177e4 112
fa1e1bdf 113 if (!user_mode_vm(regs)) {
7b6c6c77 114 unsigned long stack = kernel_stack_pointer(regs);
574a6042 115 if (depth)
e8e999cf 116 dump_trace(NULL, regs, (unsigned long *)stack, 0,
574a6042 117 &backtrace_ops, &depth);
1da177e4
LT
118 return;
119 }
120
f6dedecc
JO
121 if (x86_backtrace_32(regs, depth))
122 return;
123
c34d1b4d 124 while (depth-- && head)
30379440 125 head = dump_user_backtrace(head);
1da177e4 126}
This page took 0.512338 seconds and 5 git commands to generate.