x86/asm/entry: Use user_mode_ignore_vm86() where appropriate
[deliverable/linux.git] / arch / x86 / include / asm / ptrace.h
CommitLineData
1965aae3
PA
1#ifndef _ASM_X86_PTRACE_H
2#define _ASM_X86_PTRACE_H
8fc37f2c 3
6330a30a 4#include <asm/segment.h>
b1cf540f 5#include <asm/page_types.h>
af170c50 6#include <uapi/asm/ptrace.h>
eee3af4a 7
8fc37f2c 8#ifndef __ASSEMBLY__
8fc37f2c 9#ifdef __i386__
65ea5b03
PA
10
11struct pt_regs {
92bc2056
HH
12 unsigned long bx;
13 unsigned long cx;
14 unsigned long dx;
15 unsigned long si;
16 unsigned long di;
17 unsigned long bp;
9902a702 18 unsigned long ax;
92bc2056
HH
19 unsigned long ds;
20 unsigned long es;
21 unsigned long fs;
ccbeed3a 22 unsigned long gs;
9902a702 23 unsigned long orig_ax;
92bc2056
HH
24 unsigned long ip;
25 unsigned long cs;
26 unsigned long flags;
27 unsigned long sp;
28 unsigned long ss;
65ea5b03 29};
8fc37f2c 30
8fc37f2c
TG
31#else /* __i386__ */
32
65ea5b03 33struct pt_regs {
e90e147c
DV
34/*
35 * C ABI says these regs are callee-preserved. They aren't saved on kernel entry
36 * unless syscall needs a complete, fully filled "struct pt_regs".
37 */
65ea5b03
PA
38 unsigned long r15;
39 unsigned long r14;
40 unsigned long r13;
41 unsigned long r12;
42 unsigned long bp;
43 unsigned long bx;
e90e147c 44/* These regs are callee-clobbered. Always saved on kernel entry. */
65ea5b03
PA
45 unsigned long r11;
46 unsigned long r10;
47 unsigned long r9;
48 unsigned long r8;
49 unsigned long ax;
50 unsigned long cx;
51 unsigned long dx;
52 unsigned long si;
53 unsigned long di;
e90e147c
DV
54/*
55 * On syscall entry, this is syscall#. On CPU exception, this is error code.
56 * On hw interrupt, it's IRQ number:
57 */
65ea5b03 58 unsigned long orig_ax;
e90e147c 59/* Return frame for iretq */
65ea5b03
PA
60 unsigned long ip;
61 unsigned long cs;
62 unsigned long flags;
63 unsigned long sp;
64 unsigned long ss;
65/* top of stack page */
66};
8fc37f2c 67
dbe3533b 68#endif /* !__i386__ */
8fc37f2c 69
318f5a2a
AL
70#ifdef CONFIG_PARAVIRT
71#include <asm/paravirt_types.h>
72#endif
8fc37f2c 73
93fa7636 74struct cpuinfo_x86;
8fc37f2c
TG
75struct task_struct;
76
dbe3533b 77extern unsigned long profile_pc(struct pt_regs *regs);
c46dd6b4 78#define profile_pc profile_pc
dbe3533b 79
8fc37f2c 80extern unsigned long
37cd9cf3 81convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs);
72f74fa2 82extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
da654b74 83 int error_code, int si_code);
72fa50f4 84
e0ffbaab
AL
85
86extern unsigned long syscall_trace_enter_phase1(struct pt_regs *, u32 arch);
87extern long syscall_trace_enter_phase2(struct pt_regs *, u32 arch,
88 unsigned long phase1_result);
89
c1686aea
JS
90extern long syscall_trace_enter(struct pt_regs *);
91extern void syscall_trace_leave(struct pt_regs *);
92
9902a702
HH
93static inline unsigned long regs_return_value(struct pt_regs *regs)
94{
95 return regs->ax;
96}
efd1ca52 97
90d43d72
HH
98/*
99 * user_mode_vm(regs) determines whether a register set came from user mode.
100 * This is true if V8086 mode was enabled OR if the register set was from
101 * protected mode with RPL-3 CS value. This tricky test checks that with
102 * one comparison. Many places in the kernel can bypass this full check
103 * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
104 */
105static inline int user_mode(struct pt_regs *regs)
106{
107#ifdef CONFIG_X86_32
108 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
109#else
110 return !!(regs->cs & 3);
111#endif
112}
113
114static inline int user_mode_vm(struct pt_regs *regs)
115{
116#ifdef CONFIG_X86_32
6b6891f9 117 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
72f74fa2 118 USER_RPL;
90d43d72
HH
119#else
120 return user_mode(regs);
121#endif
122}
123
a67e7277
AL
124/*
125 * This is the fastest way to check whether regs come from user space.
126 * It is unsafe if regs might come from vm86 mode, though -- in vm86
127 * mode, all bits of CS and SS are completely under the user's control.
128 * The CPU considers vm86 mode to be CPL 3 regardless of CS and SS.
129 *
130 * Do NOT use this function unless you have already ruled out the
131 * possibility that regs came from vm86 mode.
132 *
133 * We check for RPL != 0 instead of RPL == 3 because we don't use rings
134 * 1 or 2 and this is more efficient.
135 */
136static inline int user_mode_ignore_vm86(struct pt_regs *regs)
137{
138 return (regs->cs & SEGMENT_RPL_MASK) != 0;
139}
140
90d43d72
HH
141static inline int v8086_mode(struct pt_regs *regs)
142{
143#ifdef CONFIG_X86_32
6b6891f9 144 return (regs->flags & X86_VM_MASK);
90d43d72
HH
145#else
146 return 0; /* No V86 mode support in long mode */
147#endif
148}
149
318f5a2a
AL
150#ifdef CONFIG_X86_64
151static inline bool user_64bit_mode(struct pt_regs *regs)
152{
153#ifndef CONFIG_PARAVIRT
154 /*
155 * On non-paravirt systems, this is the only long mode CPL 3
156 * selector. We do not allow long mode selectors in the LDT.
157 */
158 return regs->cs == __USER_CS;
159#else
160 /* Headers are too twisted for this to go in paravirt.h. */
161 return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
162#endif
163}
9b064fc3 164
263042e4
DV
165#define current_user_stack_pointer() current_pt_regs()->sp
166#define compat_user_stack_pointer() current_pt_regs()->sp
318f5a2a
AL
167#endif
168
90d43d72 169#ifdef CONFIG_X86_32
10226238 170extern unsigned long kernel_stack_pointer(struct pt_regs *regs);
90d43d72 171#else
10226238
RR
172static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
173{
90d43d72 174 return regs->sp;
90d43d72 175}
10226238 176#endif
90d43d72 177
c46dd6b4
MF
178#define GET_IP(regs) ((regs)->ip)
179#define GET_FP(regs) ((regs)->bp)
180#define GET_USP(regs) ((regs)->sp)
90d43d72 181
c46dd6b4 182#include <asm-generic/ptrace.h>
68bd0f4e 183
b1cf540f
MH
184/* Query offset/name of register from its name/offset */
185extern int regs_query_register_offset(const char *name);
186extern const char *regs_query_register_name(unsigned int offset);
187#define MAX_REG_OFFSET (offsetof(struct pt_regs, ss))
188
189/**
190 * regs_get_register() - get register value from its offset
191 * @regs: pt_regs from which register value is gotten.
192 * @offset: offset number of the register.
193 *
e859cf86
MH
194 * regs_get_register returns the value of a register. The @offset is the
195 * offset of the register in struct pt_regs address which specified by @regs.
b1cf540f
MH
196 * If @offset is bigger than MAX_REG_OFFSET, this returns 0.
197 */
198static inline unsigned long regs_get_register(struct pt_regs *regs,
199 unsigned int offset)
200{
201 if (unlikely(offset > MAX_REG_OFFSET))
202 return 0;
6c8d8b3c
SR
203#ifdef CONFIG_X86_32
204 /*
205 * Traps from the kernel do not save sp and ss.
206 * Use the helper function to retrieve sp.
207 */
208 if (offset == offsetof(struct pt_regs, sp) &&
209 regs->cs == __KERNEL_CS)
210 return kernel_stack_pointer(regs);
211#endif
b1cf540f
MH
212 return *(unsigned long *)((unsigned long)regs + offset);
213}
214
215/**
216 * regs_within_kernel_stack() - check the address in the stack
217 * @regs: pt_regs which contains kernel stack pointer.
218 * @addr: address which is checked.
219 *
e859cf86 220 * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
b1cf540f
MH
221 * If @addr is within the kernel stack, it returns true. If not, returns false.
222 */
223static inline int regs_within_kernel_stack(struct pt_regs *regs,
224 unsigned long addr)
225{
226 return ((addr & ~(THREAD_SIZE - 1)) ==
227 (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
228}
229
230/**
231 * regs_get_kernel_stack_nth() - get Nth entry of the stack
232 * @regs: pt_regs which contains kernel stack pointer.
233 * @n: stack entry number.
234 *
235 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
e859cf86 236 * is specified by @regs. If the @n th entry is NOT in the kernel stack,
b1cf540f
MH
237 * this returns 0.
238 */
239static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
240 unsigned int n)
241{
242 unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
243 addr += n;
244 if (regs_within_kernel_stack(regs, (unsigned long)addr))
245 return *addr;
246 else
247 return 0;
248}
249
7f232343 250#define arch_has_single_step() (1)
10faa81e
RM
251#ifdef CONFIG_X86_DEBUGCTLMSR
252#define arch_has_block_step() (1)
253#else
254#define arch_has_block_step() (boot_cpu_data.x86 >= 6)
255#endif
256
7f38551f
ON
257#define ARCH_HAS_USER_SINGLE_STEP_INFO
258
b9cd18de
TH
259/*
260 * When hitting ptrace_stop(), we cannot return using SYSRET because
261 * that does not restore the full CPU state, only a minimal set. The
262 * ptracer can change arbitrary register values, which is usually okay
263 * because the usual ptrace stops run off the signal delivery path which
264 * forces IRET; however, ptrace_event() stops happen in arbitrary places
265 * in the kernel and don't force IRET path.
266 *
267 * So force IRET path after a ptrace stop.
268 */
269#define arch_ptrace_stop_needed(code, info) \
270({ \
1daeaa31 271 force_iret(); \
b9cd18de
TH
272 false; \
273})
274
efd1ca52
RM
275struct user_desc;
276extern int do_get_thread_area(struct task_struct *p, int idx,
277 struct user_desc __user *info);
278extern int do_set_thread_area(struct task_struct *p, int idx,
279 struct user_desc __user *info, int can_allocate);
280
8fc37f2c 281#endif /* !__ASSEMBLY__ */
1965aae3 282#endif /* _ASM_X86_PTRACE_H */
This page took 0.523854 seconds and 5 git commands to generate.