f4bed4971673c9f62f6548640d5cd1a55e7292e9
[deliverable/linux.git] / arch / x86 / ia32 / ia32entry.S
1 /*
2 * Compatibility mode system call entry point for x86-64.
3 *
4 * Copyright 2000-2002 Andi Kleen, SuSE Labs.
5 */
6
7 #include <asm/dwarf2.h>
8 #include <asm/calling.h>
9 #include <asm/asm-offsets.h>
10 #include <asm/current.h>
11 #include <asm/errno.h>
12 #include <asm/ia32_unistd.h>
13 #include <asm/thread_info.h>
14 #include <asm/segment.h>
15 #include <asm/irqflags.h>
16 #include <asm/asm.h>
17 #include <asm/smap.h>
18 #include <linux/linkage.h>
19 #include <linux/err.h>
20
21 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
22 #include <linux/elf-em.h>
23 #define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE)
24 #define __AUDIT_ARCH_LE 0x40000000
25
26 #ifndef CONFIG_AUDITSYSCALL
27 #define sysexit_audit ia32_ret_from_sys_call
28 #define sysretl_audit ia32_ret_from_sys_call
29 #endif
30
31 .section .entry.text, "ax"
32
33 .macro IA32_ARG_FIXUP noebp=0
34 movl %edi,%r8d
35 .if \noebp
36 .else
37 movl %ebp,%r9d
38 .endif
39 xchg %ecx,%esi
40 movl %ebx,%edi
41 movl %edx,%edx /* zero extension */
42 .endm
43
44 /* clobbers %eax */
45 .macro CLEAR_RREGS offset=0, _r9=rax
46 xorl %eax,%eax
47 movq %rax,\offset+R11(%rsp)
48 movq %rax,\offset+R10(%rsp)
49 movq %\_r9,\offset+R9(%rsp)
50 movq %rax,\offset+R8(%rsp)
51 .endm
52
53 /*
54 * Reload arg registers from stack in case ptrace changed them.
55 * We don't reload %eax because syscall_trace_enter() returned
56 * the %rax value we should see. Instead, we just truncate that
57 * value to 32 bits again as we did on entry from user mode.
58 * If it's a new value set by user_regset during entry tracing,
59 * this matches the normal truncation of the user-mode value.
60 * If it's -1 to make us punt the syscall, then (u32)-1 is still
61 * an appropriately invalid value.
62 */
63 .macro LOAD_ARGS32 offset, _r9=0
64 .if \_r9
65 movl \offset+R9(%rsp),%r9d
66 .endif
67 movl \offset+RCX(%rsp),%ecx
68 movl \offset+RDX(%rsp),%edx
69 movl \offset+RSI(%rsp),%esi
70 movl \offset+RDI(%rsp),%edi
71 movl %eax,%eax /* zero extension */
72 .endm
73
74 .macro CFI_STARTPROC32 simple
75 CFI_STARTPROC \simple
76 CFI_UNDEFINED r8
77 CFI_UNDEFINED r9
78 CFI_UNDEFINED r10
79 CFI_UNDEFINED r11
80 CFI_UNDEFINED r12
81 CFI_UNDEFINED r13
82 CFI_UNDEFINED r14
83 CFI_UNDEFINED r15
84 .endm
85
86 #ifdef CONFIG_PARAVIRT
87 ENTRY(native_usergs_sysret32)
88 swapgs
89 sysretl
90 ENDPROC(native_usergs_sysret32)
91
92 ENTRY(native_irq_enable_sysexit)
93 swapgs
94 sti
95 sysexit
96 ENDPROC(native_irq_enable_sysexit)
97 #endif
98
99 /*
100 * 32bit SYSENTER instruction entry.
101 *
102 * Arguments:
103 * %eax System call number.
104 * %ebx Arg1
105 * %ecx Arg2
106 * %edx Arg3
107 * %esi Arg4
108 * %edi Arg5
109 * %ebp user stack
110 * 0(%ebp) Arg6
111 *
112 * Interrupts off.
113 *
114 * This is purely a fast path. For anything complicated we use the int 0x80
115 * path below. Set up a complete hardware stack frame to share code
116 * with the int 0x80 path.
117 */
118 ENTRY(ia32_sysenter_target)
119 CFI_STARTPROC32 simple
120 CFI_SIGNAL_FRAME
121 CFI_DEF_CFA rsp,0
122 CFI_REGISTER rsp,rbp
123 SWAPGS_UNSAFE_STACK
124 movq PER_CPU_VAR(kernel_stack), %rsp
125 addq $(KERNEL_STACK_OFFSET),%rsp
126 /*
127 * No need to follow this irqs on/off section: the syscall
128 * disabled irqs, here we enable it straight after entry:
129 */
130 ENABLE_INTERRUPTS(CLBR_NONE)
131 movl %ebp,%ebp /* zero extension */
132 pushq_cfi $__USER32_DS
133 /*CFI_REL_OFFSET ss,0*/
134 pushq_cfi %rbp
135 CFI_REL_OFFSET rsp,0
136 pushfq_cfi
137 /*CFI_REL_OFFSET rflags,0*/
138 movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
139 CFI_REGISTER rip,r10
140 pushq_cfi $__USER32_CS
141 /*CFI_REL_OFFSET cs,0*/
142 movl %eax, %eax
143 pushq_cfi %r10
144 CFI_REL_OFFSET rip,0
145 pushq_cfi %rax
146 cld
147 ALLOC_PT_GPREGS_ON_STACK
148 SAVE_C_REGS_EXCEPT_R891011
149 /* no need to do an access_ok check here because rbp has been
150 32bit zero extended */
151 ASM_STAC
152 1: movl (%rbp),%ebp
153 _ASM_EXTABLE(1b,ia32_badarg)
154 ASM_CLAC
155
156 /*
157 * Sysenter doesn't filter flags, so we need to clear NT
158 * ourselves. To save a few cycles, we can check whether
159 * NT was set instead of doing an unconditional popfq.
160 */
161 testl $X86_EFLAGS_NT,EFLAGS-ARGOFFSET(%rsp)
162 jnz sysenter_fix_flags
163 sysenter_flags_fixed:
164
165 orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
166 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
167 CFI_REMEMBER_STATE
168 jnz sysenter_tracesys
169 cmpq $(IA32_NR_syscalls-1),%rax
170 ja ia32_badsys
171 sysenter_do_call:
172 IA32_ARG_FIXUP
173 sysenter_dispatch:
174 call *ia32_sys_call_table(,%rax,8)
175 movq %rax,RAX-ARGOFFSET(%rsp)
176 DISABLE_INTERRUPTS(CLBR_NONE)
177 TRACE_IRQS_OFF
178 testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
179 jnz sysexit_audit
180 sysexit_from_sys_call:
181 andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
182 /* clear IF, that popfq doesn't enable interrupts early */
183 andl $~0x200,EFLAGS-ARGOFFSET(%rsp)
184 movl RIP-ARGOFFSET(%rsp),%edx /* User %eip */
185 CFI_REGISTER rip,rdx
186 RESTORE_RSI_RDI
187 REMOVE_PT_GPREGS_FROM_STACK 3*8
188 xorq %r8,%r8
189 xorq %r9,%r9
190 xorq %r10,%r10
191 xorq %r11,%r11
192 popfq_cfi
193 /*CFI_RESTORE rflags*/
194 popq_cfi %rcx /* User %esp */
195 CFI_REGISTER rsp,rcx
196 TRACE_IRQS_ON
197 ENABLE_INTERRUPTS_SYSEXIT32
198
199 CFI_RESTORE_STATE
200
201 #ifdef CONFIG_AUDITSYSCALL
202 .macro auditsys_entry_common
203 movl %esi,%r8d /* 5th arg: 4th syscall arg */
204 movl %ecx,%r9d /*swap with edx*/
205 movl %edx,%ecx /* 4th arg: 3rd syscall arg */
206 movl %r9d,%edx /* 3rd arg: 2nd syscall arg */
207 movl %ebx,%esi /* 2nd arg: 1st syscall arg */
208 movl %eax,%edi /* 1st arg: syscall number */
209 call __audit_syscall_entry
210 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
211 cmpq $(IA32_NR_syscalls-1),%rax
212 ja ia32_badsys
213 movl %ebx,%edi /* reload 1st syscall arg */
214 movl RCX-ARGOFFSET(%rsp),%esi /* reload 2nd syscall arg */
215 movl RDX-ARGOFFSET(%rsp),%edx /* reload 3rd syscall arg */
216 movl RSI-ARGOFFSET(%rsp),%ecx /* reload 4th syscall arg */
217 movl RDI-ARGOFFSET(%rsp),%r8d /* reload 5th syscall arg */
218 .endm
219
220 .macro auditsys_exit exit
221 testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
222 jnz ia32_ret_from_sys_call
223 TRACE_IRQS_ON
224 ENABLE_INTERRUPTS(CLBR_NONE)
225 movl %eax,%esi /* second arg, syscall return value */
226 cmpl $-MAX_ERRNO,%eax /* is it an error ? */
227 jbe 1f
228 movslq %eax, %rsi /* if error sign extend to 64 bits */
229 1: setbe %al /* 1 if error, 0 if not */
230 movzbl %al,%edi /* zero-extend that into %edi */
231 call __audit_syscall_exit
232 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
233 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
234 DISABLE_INTERRUPTS(CLBR_NONE)
235 TRACE_IRQS_OFF
236 testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
237 jz \exit
238 CLEAR_RREGS -ARGOFFSET
239 jmp int_with_check
240 .endm
241
242 sysenter_auditsys:
243 auditsys_entry_common
244 movl %ebp,%r9d /* reload 6th syscall arg */
245 jmp sysenter_dispatch
246
247 sysexit_audit:
248 auditsys_exit sysexit_from_sys_call
249 #endif
250
251 sysenter_fix_flags:
252 pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_FIXED)
253 popfq_cfi
254 jmp sysenter_flags_fixed
255
256 sysenter_tracesys:
257 #ifdef CONFIG_AUDITSYSCALL
258 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
259 jz sysenter_auditsys
260 #endif
261 SAVE_EXTRA_REGS
262 CLEAR_RREGS
263 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
264 movq %rsp,%rdi /* &pt_regs -> arg1 */
265 call syscall_trace_enter
266 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
267 RESTORE_EXTRA_REGS
268 cmpq $(IA32_NR_syscalls-1),%rax
269 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
270 jmp sysenter_do_call
271 CFI_ENDPROC
272 ENDPROC(ia32_sysenter_target)
273
274 /*
275 * 32bit SYSCALL instruction entry.
276 *
277 * Arguments:
278 * %eax System call number.
279 * %ebx Arg1
280 * %ecx return EIP
281 * %edx Arg3
282 * %esi Arg4
283 * %edi Arg5
284 * %ebp Arg2 [note: not saved in the stack frame, should not be touched]
285 * %esp user stack
286 * 0(%esp) Arg6
287 *
288 * Interrupts off.
289 *
290 * This is purely a fast path. For anything complicated we use the int 0x80
291 * path below. Set up a complete hardware stack frame to share code
292 * with the int 0x80 path.
293 */
294 ENTRY(ia32_cstar_target)
295 CFI_STARTPROC32 simple
296 CFI_SIGNAL_FRAME
297 CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
298 CFI_REGISTER rip,rcx
299 /*CFI_REGISTER rflags,r11*/
300 SWAPGS_UNSAFE_STACK
301 movl %esp,%r8d
302 CFI_REGISTER rsp,r8
303 movq PER_CPU_VAR(kernel_stack),%rsp
304 /*
305 * No need to follow this irqs on/off section: the syscall
306 * disabled irqs and here we enable it straight after entry:
307 */
308 ENABLE_INTERRUPTS(CLBR_NONE)
309 ALLOC_PT_GPREGS_ON_STACK 8
310 SAVE_C_REGS_EXCEPT_RCX_R891011
311 movl %eax,%eax /* zero extension */
312 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
313 movq %rcx,RIP-ARGOFFSET(%rsp)
314 CFI_REL_OFFSET rip,RIP-ARGOFFSET
315 movq %rbp,RCX-ARGOFFSET(%rsp) /* this lies slightly to ptrace */
316 movl %ebp,%ecx
317 movq $__USER32_CS,CS-ARGOFFSET(%rsp)
318 movq $__USER32_DS,SS-ARGOFFSET(%rsp)
319 movq %r11,EFLAGS-ARGOFFSET(%rsp)
320 /*CFI_REL_OFFSET rflags,EFLAGS-ARGOFFSET*/
321 movq %r8,RSP-ARGOFFSET(%rsp)
322 CFI_REL_OFFSET rsp,RSP-ARGOFFSET
323 /* no need to do an access_ok check here because r8 has been
324 32bit zero extended */
325 /* hardware stack frame is complete now */
326 ASM_STAC
327 1: movl (%r8),%r9d
328 _ASM_EXTABLE(1b,ia32_badarg)
329 ASM_CLAC
330 orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
331 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
332 CFI_REMEMBER_STATE
333 jnz cstar_tracesys
334 cmpq $IA32_NR_syscalls-1,%rax
335 ja ia32_badsys
336 cstar_do_call:
337 IA32_ARG_FIXUP 1
338 cstar_dispatch:
339 call *ia32_sys_call_table(,%rax,8)
340 movq %rax,RAX-ARGOFFSET(%rsp)
341 DISABLE_INTERRUPTS(CLBR_NONE)
342 TRACE_IRQS_OFF
343 testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
344 jnz sysretl_audit
345 sysretl_from_sys_call:
346 andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
347 RESTORE_RSI_RDI_RDX
348 movl RIP-ARGOFFSET(%rsp),%ecx
349 CFI_REGISTER rip,rcx
350 movl EFLAGS-ARGOFFSET(%rsp),%r11d
351 /*CFI_REGISTER rflags,r11*/
352 xorq %r10,%r10
353 xorq %r9,%r9
354 xorq %r8,%r8
355 TRACE_IRQS_ON
356 movl RSP-ARGOFFSET(%rsp),%esp
357 CFI_RESTORE rsp
358 USERGS_SYSRET32
359
360 #ifdef CONFIG_AUDITSYSCALL
361 cstar_auditsys:
362 CFI_RESTORE_STATE
363 movl %r9d,R9-ARGOFFSET(%rsp) /* register to be clobbered by call */
364 auditsys_entry_common
365 movl R9-ARGOFFSET(%rsp),%r9d /* reload 6th syscall arg */
366 jmp cstar_dispatch
367
368 sysretl_audit:
369 auditsys_exit sysretl_from_sys_call
370 #endif
371
372 cstar_tracesys:
373 #ifdef CONFIG_AUDITSYSCALL
374 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
375 jz cstar_auditsys
376 #endif
377 xchgl %r9d,%ebp
378 SAVE_EXTRA_REGS
379 CLEAR_RREGS 0, r9
380 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
381 movq %rsp,%rdi /* &pt_regs -> arg1 */
382 call syscall_trace_enter
383 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
384 RESTORE_EXTRA_REGS
385 xchgl %ebp,%r9d
386 cmpq $(IA32_NR_syscalls-1),%rax
387 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
388 jmp cstar_do_call
389 END(ia32_cstar_target)
390
391 ia32_badarg:
392 ASM_CLAC
393 movq $-EFAULT,%rax
394 jmp ia32_sysret
395 CFI_ENDPROC
396
397 /*
398 * Emulated IA32 system calls via int 0x80.
399 *
400 * Arguments:
401 * %eax System call number.
402 * %ebx Arg1
403 * %ecx Arg2
404 * %edx Arg3
405 * %esi Arg4
406 * %edi Arg5
407 * %ebp Arg6 [note: not saved in the stack frame, should not be touched]
408 *
409 * Notes:
410 * Uses the same stack frame as the x86-64 version.
411 * All registers except %eax must be saved (but ptrace may violate that)
412 * Arguments are zero extended. For system calls that want sign extension and
413 * take long arguments a wrapper is needed. Most calls can just be called
414 * directly.
415 * Assumes it is only called from user space and entered with interrupts off.
416 */
417
418 ENTRY(ia32_syscall)
419 CFI_STARTPROC32 simple
420 CFI_SIGNAL_FRAME
421 CFI_DEF_CFA rsp,SS+8-RIP
422 /*CFI_REL_OFFSET ss,SS-RIP*/
423 CFI_REL_OFFSET rsp,RSP-RIP
424 /*CFI_REL_OFFSET rflags,EFLAGS-RIP*/
425 /*CFI_REL_OFFSET cs,CS-RIP*/
426 CFI_REL_OFFSET rip,RIP-RIP
427 PARAVIRT_ADJUST_EXCEPTION_FRAME
428 SWAPGS
429 /*
430 * No need to follow this irqs on/off section: the syscall
431 * disabled irqs and here we enable it straight after entry:
432 */
433 ENABLE_INTERRUPTS(CLBR_NONE)
434 movl %eax,%eax
435 pushq_cfi %rax
436 cld
437 /* note the registers are not zero extended to the sf.
438 this could be a problem. */
439 ALLOC_PT_GPREGS_ON_STACK
440 SAVE_C_REGS_EXCEPT_R891011
441 orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
442 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
443 jnz ia32_tracesys
444 cmpq $(IA32_NR_syscalls-1),%rax
445 ja ia32_badsys
446 ia32_do_call:
447 IA32_ARG_FIXUP
448 call *ia32_sys_call_table(,%rax,8) # xxx: rip relative
449 ia32_sysret:
450 movq %rax,RAX-ARGOFFSET(%rsp)
451 ia32_ret_from_sys_call:
452 CLEAR_RREGS -ARGOFFSET
453 jmp int_ret_from_sys_call
454
455 ia32_tracesys:
456 SAVE_EXTRA_REGS
457 CLEAR_RREGS
458 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
459 movq %rsp,%rdi /* &pt_regs -> arg1 */
460 call syscall_trace_enter
461 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
462 RESTORE_EXTRA_REGS
463 cmpq $(IA32_NR_syscalls-1),%rax
464 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
465 jmp ia32_do_call
466 END(ia32_syscall)
467
468 ia32_badsys:
469 movq $0,ORIG_RAX-ARGOFFSET(%rsp)
470 movq $-ENOSYS,%rax
471 jmp ia32_sysret
472
473 CFI_ENDPROC
474
475 .macro PTREGSCALL label, func
476 ALIGN
477 GLOBAL(\label)
478 leaq \func(%rip),%rax
479 jmp ia32_ptregs_common
480 .endm
481
482 CFI_STARTPROC32
483
484 PTREGSCALL stub32_rt_sigreturn, sys32_rt_sigreturn
485 PTREGSCALL stub32_sigreturn, sys32_sigreturn
486 PTREGSCALL stub32_execve, compat_sys_execve
487 PTREGSCALL stub32_execveat, compat_sys_execveat
488 PTREGSCALL stub32_fork, sys_fork
489 PTREGSCALL stub32_vfork, sys_vfork
490
491 ALIGN
492 GLOBAL(stub32_clone)
493 leaq sys_clone(%rip),%rax
494 mov %r8, %rcx
495 jmp ia32_ptregs_common
496
497 ALIGN
498 ia32_ptregs_common:
499 CFI_ENDPROC
500 CFI_STARTPROC32 simple
501 CFI_SIGNAL_FRAME
502 CFI_DEF_CFA rsp,SS+8-ARGOFFSET
503 CFI_REL_OFFSET rax,RAX-ARGOFFSET
504 CFI_REL_OFFSET rcx,RCX-ARGOFFSET
505 CFI_REL_OFFSET rdx,RDX-ARGOFFSET
506 CFI_REL_OFFSET rsi,RSI-ARGOFFSET
507 CFI_REL_OFFSET rdi,RDI-ARGOFFSET
508 CFI_REL_OFFSET rip,RIP-ARGOFFSET
509 /* CFI_REL_OFFSET cs,CS-ARGOFFSET*/
510 /* CFI_REL_OFFSET rflags,EFLAGS-ARGOFFSET*/
511 CFI_REL_OFFSET rsp,RSP-ARGOFFSET
512 /* CFI_REL_OFFSET ss,SS-ARGOFFSET*/
513 SAVE_EXTRA_REGS 8
514 call *%rax
515 RESTORE_EXTRA_REGS 8
516 ret
517 CFI_ENDPROC
518 END(ia32_ptregs_common)
This page took 0.058974 seconds and 4 git commands to generate.