Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
54ad726c IM |
2 | * Compatibility mode system call entry point for x86-64. |
3 | * | |
1da177e4 | 4 | * Copyright 2000-2002 Andi Kleen, SuSE Labs. |
54ad726c | 5 | */ |
d36f9479 | 6 | #include "calling.h" |
e2d5df93 | 7 | #include <asm/asm-offsets.h> |
1da177e4 LT |
8 | #include <asm/current.h> |
9 | #include <asm/errno.h> | |
54ad726c IM |
10 | #include <asm/ia32_unistd.h> |
11 | #include <asm/thread_info.h> | |
1da177e4 | 12 | #include <asm/segment.h> |
2601e64d | 13 | #include <asm/irqflags.h> |
1ce6f868 | 14 | #include <asm/asm.h> |
63bcff2a | 15 | #include <asm/smap.h> |
1da177e4 | 16 | #include <linux/linkage.h> |
d7e7528b | 17 | #include <linux/err.h> |
1da177e4 | 18 | |
5cbf1565 RM |
19 | /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ |
20 | #include <linux/elf-em.h> | |
21 | #define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE) | |
54ad726c | 22 | #define __AUDIT_ARCH_LE 0x40000000 |
5cbf1565 RM |
23 | |
24 | #ifndef CONFIG_AUDITSYSCALL | |
54ad726c IM |
25 | # define sysexit_audit ia32_ret_from_sys_call |
26 | # define sysretl_audit ia32_ret_from_sys_call | |
5cbf1565 RM |
27 | #endif |
28 | ||
ea714547 JO |
29 | .section .entry.text, "ax" |
30 | ||
2be29982 JF |
31 | #ifdef CONFIG_PARAVIRT |
32 | ENTRY(native_usergs_sysret32) | |
33 | swapgs | |
34 | sysretl | |
35 | ENDPROC(native_usergs_sysret32) | |
2be29982 JF |
36 | #endif |
37 | ||
1da177e4 | 38 | /* |
54ad726c | 39 | * 32-bit SYSENTER instruction entry. |
1da177e4 | 40 | * |
b87cf63e DV |
41 | * SYSENTER loads ss, rsp, cs, and rip from previously programmed MSRs. |
42 | * IF and VM in rflags are cleared (IOW: interrupts are off). | |
43 | * SYSENTER does not save anything on the stack, | |
44 | * and does not save old rip (!!!) and rflags. | |
45 | * | |
1da177e4 | 46 | * Arguments: |
b87cf63e DV |
47 | * eax system call number |
48 | * ebx arg1 | |
49 | * ecx arg2 | |
50 | * edx arg3 | |
51 | * esi arg4 | |
52 | * edi arg5 | |
53 | * ebp user stack | |
54 | * 0(%ebp) arg6 | |
55 | * | |
1da177e4 | 56 | * This is purely a fast path. For anything complicated we use the int 0x80 |
b87cf63e | 57 | * path below. We set up a complete hardware stack frame to share code |
1da177e4 | 58 | * with the int 0x80 path. |
b87cf63e | 59 | */ |
1da177e4 | 60 | ENTRY(ia32_sysenter_target) |
2601e64d | 61 | /* |
a232e3d5 DV |
62 | * Interrupts are off on entry. |
63 | * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON, | |
64 | * it is too small to ever cause noticeable irq latency. | |
2601e64d | 65 | */ |
a232e3d5 | 66 | SWAPGS_UNSAFE_STACK |
3a23208e | 67 | movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp |
66804154 | 68 | ENABLE_INTERRUPTS(CLBR_NONE) |
a232e3d5 | 69 | |
4ee8ec17 DV |
70 | /* Zero-extending 32-bit regs, do not remove */ |
71 | movl %ebp, %ebp | |
72 | movl %eax, %eax | |
73 | ||
4c9c0e91 | 74 | movl ASM_THREAD_INFO(TI_sysenter_return, %rsp, 0), %r10d |
4c9c0e91 DV |
75 | |
76 | /* Construct struct pt_regs on stack */ | |
131484c8 IM |
77 | pushq $__USER32_DS /* pt_regs->ss */ |
78 | pushq %rbp /* pt_regs->sp */ | |
79 | pushfq /* pt_regs->flags */ | |
80 | pushq $__USER32_CS /* pt_regs->cs */ | |
54ad726c | 81 | pushq %r10 /* pt_regs->ip = thread_info->sysenter_return */ |
131484c8 IM |
82 | pushq %rax /* pt_regs->orig_ax */ |
83 | pushq %rdi /* pt_regs->di */ | |
84 | pushq %rsi /* pt_regs->si */ | |
85 | pushq %rdx /* pt_regs->dx */ | |
86 | pushq %rcx /* pt_regs->cx */ | |
87 | pushq $-ENOSYS /* pt_regs->ax */ | |
1da177e4 | 88 | cld |
54ad726c | 89 | sub $(10*8), %rsp /* pt_regs->r8-11, bp, bx, r12-15 not saved */ |
4c9c0e91 | 90 | |
b87cf63e DV |
91 | /* |
92 | * no need to do an access_ok check here because rbp has been | |
54ad726c | 93 | * 32-bit zero extended |
b87cf63e | 94 | */ |
63bcff2a | 95 | ASM_STAC |
54ad726c IM |
96 | 1: movl (%rbp), %ebp |
97 | _ASM_EXTABLE(1b, ia32_badarg) | |
63bcff2a | 98 | ASM_CLAC |
8c7aa698 AL |
99 | |
100 | /* | |
101 | * Sysenter doesn't filter flags, so we need to clear NT | |
102 | * ourselves. To save a few cycles, we can check whether | |
103 | * NT was set instead of doing an unconditional popfq. | |
104 | */ | |
54ad726c IM |
105 | testl $X86_EFLAGS_NT, EFLAGS(%rsp) |
106 | jnz sysenter_fix_flags | |
8c7aa698 AL |
107 | sysenter_flags_fixed: |
108 | ||
dca5b52a IM |
109 | orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS) |
110 | testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) | |
54ad726c | 111 | jnz sysenter_tracesys |
131484c8 | 112 | |
d4d67150 | 113 | sysenter_do_call: |
54ad726c IM |
114 | /* 32-bit syscall -> 64-bit C ABI argument conversion */ |
115 | movl %edi, %r8d /* arg5 */ | |
116 | movl %ebp, %r9d /* arg6 */ | |
117 | xchg %ecx, %esi /* rsi:arg2, rcx:arg4 */ | |
118 | movl %ebx, %edi /* arg1 */ | |
119 | movl %edx, %edx /* arg3 (zero extension) */ | |
5cbf1565 | 120 | sysenter_dispatch: |
54ad726c | 121 | cmpq $(IA32_NR_syscalls-1), %rax |
3f5159a9 | 122 | ja 1f |
54ad726c IM |
123 | call *ia32_sys_call_table(, %rax, 8) |
124 | movq %rax, RAX(%rsp) | |
3f5159a9 | 125 | 1: |
66804154 | 126 | DISABLE_INTERRUPTS(CLBR_NONE) |
2601e64d | 127 | TRACE_IRQS_OFF |
dca5b52a | 128 | testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) |
5cbf1565 RM |
129 | jnz sysexit_audit |
130 | sysexit_from_sys_call: | |
4214a16b AL |
131 | /* |
132 | * NB: SYSEXIT is not obviously safe for 64-bit kernels -- an | |
133 | * NMI between STI and SYSEXIT has poorly specified behavior, | |
134 | * and and NMI followed by an IRQ with usergs is fatal. So | |
135 | * we just pretend we're using SYSEXIT but we really use | |
136 | * SYSRETL instead. | |
137 | * | |
138 | * This code path is still called 'sysexit' because it pairs | |
139 | * with 'sysenter' and it uses the SYSENTER calling convention. | |
140 | */ | |
54ad726c IM |
141 | andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS) |
142 | movl RIP(%rsp), %ecx /* User %eip */ | |
76f5df43 | 143 | RESTORE_RSI_RDI |
54ad726c IM |
144 | xorl %edx, %edx /* Do not leak kernel information */ |
145 | xorq %r8, %r8 | |
146 | xorq %r9, %r9 | |
147 | xorq %r10, %r10 | |
148 | movl EFLAGS(%rsp), %r11d /* User eflags */ | |
2601e64d | 149 | TRACE_IRQS_ON |
4214a16b AL |
150 | |
151 | /* | |
152 | * SYSRETL works even on Intel CPUs. Use it in preference to SYSEXIT, | |
153 | * since it avoids a dicey window with interrupts enabled. | |
154 | */ | |
54ad726c | 155 | movl RSP(%rsp), %esp |
4214a16b | 156 | |
b87cf63e | 157 | /* |
4214a16b AL |
158 | * USERGS_SYSRET32 does: |
159 | * gsbase = user's gs base | |
160 | * eip = ecx | |
161 | * rflags = r11 | |
162 | * cs = __USER32_CS | |
163 | * ss = __USER_DS | |
164 | * | |
165 | * The prologue set RIP(%rsp) to VDSO32_SYSENTER_RETURN, which does: | |
166 | * | |
167 | * pop %ebp | |
168 | * pop %edx | |
169 | * pop %ecx | |
170 | * | |
171 | * Therefore, we invoke SYSRETL with EDX and R8-R10 zeroed to | |
172 | * avoid info leaks. R11 ends up with VDSO32_SYSENTER_RETURN's | |
173 | * address (already known to user code), and R12-R15 are | |
174 | * callee-saved and therefore don't contain any interesting | |
175 | * kernel data. | |
b87cf63e | 176 | */ |
4214a16b | 177 | USERGS_SYSRET32 |
1da177e4 | 178 | |
5cbf1565 RM |
179 | #ifdef CONFIG_AUDITSYSCALL |
180 | .macro auditsys_entry_common | |
54ad726c IM |
181 | movl %esi, %r8d /* 5th arg: 4th syscall arg */ |
182 | movl %ecx, %r9d /* swap with edx */ | |
183 | movl %edx, %ecx /* 4th arg: 3rd syscall arg */ | |
184 | movl %r9d, %edx /* 3rd arg: 2nd syscall arg */ | |
185 | movl %ebx, %esi /* 2nd arg: 1st syscall arg */ | |
186 | movl %eax, %edi /* 1st arg: syscall number */ | |
187 | call __audit_syscall_entry | |
188 | movl ORIG_RAX(%rsp), %eax /* reload syscall number */ | |
189 | movl %ebx, %edi /* reload 1st syscall arg */ | |
190 | movl RCX(%rsp), %esi /* reload 2nd syscall arg */ | |
191 | movl RDX(%rsp), %edx /* reload 3rd syscall arg */ | |
192 | movl RSI(%rsp), %ecx /* reload 4th syscall arg */ | |
193 | movl RDI(%rsp), %r8d /* reload 5th syscall arg */ | |
5cbf1565 RM |
194 | .endm |
195 | ||
81766741 | 196 | .macro auditsys_exit exit |
54ad726c IM |
197 | testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) |
198 | jnz ia32_ret_from_sys_call | |
5cbf1565 | 199 | TRACE_IRQS_ON |
40a1ef95 | 200 | ENABLE_INTERRUPTS(CLBR_NONE) |
54ad726c IM |
201 | movl %eax, %esi /* second arg, syscall return value */ |
202 | cmpl $-MAX_ERRNO, %eax /* is it an error ? */ | |
203 | jbe 1f | |
204 | movslq %eax, %rsi /* if error sign extend to 64 bits */ | |
205 | 1: setbe %al /* 1 if error, 0 if not */ | |
206 | movzbl %al, %edi /* zero-extend that into %edi */ | |
207 | call __audit_syscall_exit | |
208 | movq RAX(%rsp), %rax /* reload syscall return value */ | |
209 | movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %edi | |
40a1ef95 | 210 | DISABLE_INTERRUPTS(CLBR_NONE) |
5cbf1565 | 211 | TRACE_IRQS_OFF |
54ad726c IM |
212 | testl %edi, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) |
213 | jz \exit | |
214 | xorl %eax, %eax /* Do not leak kernel information */ | |
ef0cd5dc DV |
215 | movq %rax, R11(%rsp) |
216 | movq %rax, R10(%rsp) | |
217 | movq %rax, R9(%rsp) | |
218 | movq %rax, R8(%rsp) | |
54ad726c | 219 | jmp int_with_check |
5cbf1565 RM |
220 | .endm |
221 | ||
222 | sysenter_auditsys: | |
5cbf1565 | 223 | auditsys_entry_common |
54ad726c IM |
224 | movl %ebp, %r9d /* reload 6th syscall arg */ |
225 | jmp sysenter_dispatch | |
5cbf1565 RM |
226 | |
227 | sysexit_audit: | |
228 | auditsys_exit sysexit_from_sys_call | |
229 | #endif | |
230 | ||
8c7aa698 | 231 | sysenter_fix_flags: |
131484c8 IM |
232 | pushq $(X86_EFLAGS_IF|X86_EFLAGS_FIXED) |
233 | popfq | |
54ad726c | 234 | jmp sysenter_flags_fixed |
8c7aa698 | 235 | |
5cbf1565 | 236 | sysenter_tracesys: |
5cbf1565 | 237 | #ifdef CONFIG_AUDITSYSCALL |
dca5b52a | 238 | testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) |
5cbf1565 RM |
239 | jz sysenter_auditsys |
240 | #endif | |
76f5df43 | 241 | SAVE_EXTRA_REGS |
54ad726c | 242 | xorl %eax, %eax /* Do not leak kernel information */ |
ef0cd5dc DV |
243 | movq %rax, R11(%rsp) |
244 | movq %rax, R10(%rsp) | |
245 | movq %rax, R9(%rsp) | |
246 | movq %rax, R8(%rsp) | |
54ad726c | 247 | movq %rsp, %rdi /* &pt_regs -> arg1 */ |
1da177e4 | 248 | call syscall_trace_enter |
73cbf687 DV |
249 | |
250 | /* Reload arg registers from stack. (see sysenter_tracesys) */ | |
251 | movl RCX(%rsp), %ecx | |
252 | movl RDX(%rsp), %edx | |
253 | movl RSI(%rsp), %esi | |
254 | movl RDI(%rsp), %edi | |
54ad726c | 255 | movl %eax, %eax /* zero extension */ |
73cbf687 | 256 | |
76f5df43 | 257 | RESTORE_EXTRA_REGS |
1da177e4 | 258 | jmp sysenter_do_call |
4b787e0b | 259 | ENDPROC(ia32_sysenter_target) |
1da177e4 LT |
260 | |
261 | /* | |
54ad726c | 262 | * 32-bit SYSCALL instruction entry. |
1da177e4 | 263 | * |
54ad726c | 264 | * 32-bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11, |
b87cf63e DV |
265 | * then loads new ss, cs, and rip from previously programmed MSRs. |
266 | * rflags gets masked by a value from another MSR (so CLD and CLAC | |
267 | * are not needed). SYSCALL does not save anything on the stack | |
268 | * and does not change rsp. | |
269 | * | |
270 | * Note: rflags saving+masking-with-MSR happens only in Long mode | |
54ad726c | 271 | * (in legacy 32-bit mode, IF, RF and VM bits are cleared and that's it). |
b87cf63e DV |
272 | * Don't get confused: rflags saving+masking depends on Long Mode Active bit |
273 | * (EFER.LMA=1), NOT on bitness of userspace where SYSCALL executes | |
274 | * or target CS descriptor's L bit (SYSCALL does not read segment descriptors). | |
275 | * | |
1da177e4 | 276 | * Arguments: |
b87cf63e DV |
277 | * eax system call number |
278 | * ecx return address | |
279 | * ebx arg1 | |
280 | * ebp arg2 (note: not saved in the stack frame, should not be touched) | |
281 | * edx arg3 | |
282 | * esi arg4 | |
283 | * edi arg5 | |
284 | * esp user stack | |
285 | * 0(%esp) arg6 | |
286 | * | |
1da177e4 | 287 | * This is purely a fast path. For anything complicated we use the int 0x80 |
b87cf63e DV |
288 | * path below. We set up a complete hardware stack frame to share code |
289 | * with the int 0x80 path. | |
290 | */ | |
2cd23553 | 291 | ENTRY(entry_SYSCALL_compat) |
a232e3d5 DV |
292 | /* |
293 | * Interrupts are off on entry. | |
294 | * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON, | |
295 | * it is too small to ever cause noticeable irq latency. | |
296 | */ | |
457da70e | 297 | SWAPGS_UNSAFE_STACK |
54ad726c IM |
298 | movl %esp, %r8d |
299 | movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp | |
66804154 | 300 | ENABLE_INTERRUPTS(CLBR_NONE) |
a232e3d5 | 301 | |
4ee8ec17 | 302 | /* Zero-extending 32-bit regs, do not remove */ |
54ad726c | 303 | movl %eax, %eax |
4ee8ec17 | 304 | |
4c9c0e91 | 305 | /* Construct struct pt_regs on stack */ |
131484c8 IM |
306 | pushq $__USER32_DS /* pt_regs->ss */ |
307 | pushq %r8 /* pt_regs->sp */ | |
308 | pushq %r11 /* pt_regs->flags */ | |
309 | pushq $__USER32_CS /* pt_regs->cs */ | |
310 | pushq %rcx /* pt_regs->ip */ | |
311 | pushq %rax /* pt_regs->orig_ax */ | |
312 | pushq %rdi /* pt_regs->di */ | |
313 | pushq %rsi /* pt_regs->si */ | |
314 | pushq %rdx /* pt_regs->dx */ | |
315 | pushq %rbp /* pt_regs->cx */ | |
54ad726c | 316 | movl %ebp, %ecx |
131484c8 | 317 | pushq $-ENOSYS /* pt_regs->ax */ |
54ad726c | 318 | sub $(10*8), %rsp /* pt_regs->r8-11, bp, bx, r12-15 not saved */ |
4c9c0e91 | 319 | |
b87cf63e | 320 | /* |
54ad726c IM |
321 | * No need to do an access_ok check here because r8 has been |
322 | * 32-bit zero extended: | |
b87cf63e | 323 | */ |
63bcff2a | 324 | ASM_STAC |
54ad726c IM |
325 | 1: movl (%r8), %ebp |
326 | _ASM_EXTABLE(1b, ia32_badarg) | |
63bcff2a | 327 | ASM_CLAC |
dca5b52a IM |
328 | orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS) |
329 | testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) | |
1da177e4 | 330 | jnz cstar_tracesys |
131484c8 | 331 | |
295286a8 | 332 | cstar_do_call: |
54ad726c IM |
333 | /* 32-bit syscall -> 64-bit C ABI argument conversion */ |
334 | movl %edi, %r8d /* arg5 */ | |
335 | movl %ebp, %r9d /* arg6 */ | |
336 | xchg %ecx, %esi /* rsi:arg2, rcx:arg4 */ | |
337 | movl %ebx, %edi /* arg1 */ | |
338 | movl %edx, %edx /* arg3 (zero extension) */ | |
339 | ||
5cbf1565 | 340 | cstar_dispatch: |
54ad726c | 341 | cmpq $(IA32_NR_syscalls-1), %rax |
3f5159a9 | 342 | ja 1f |
54ad726c IM |
343 | |
344 | call *ia32_sys_call_table(, %rax, 8) | |
345 | movq %rax, RAX(%rsp) | |
3f5159a9 | 346 | 1: |
66804154 | 347 | DISABLE_INTERRUPTS(CLBR_NONE) |
2601e64d | 348 | TRACE_IRQS_OFF |
54ad726c IM |
349 | testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) |
350 | jnz sysretl_audit | |
351 | ||
5cbf1565 | 352 | sysretl_from_sys_call: |
54ad726c | 353 | andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS) |
53e9accf | 354 | movl RCX(%rsp), %ebp |
76f5df43 | 355 | RESTORE_RSI_RDI_RDX |
54ad726c IM |
356 | movl RIP(%rsp), %ecx |
357 | movl EFLAGS(%rsp), %r11d | |
358 | xorq %r10, %r10 | |
359 | xorq %r9, %r9 | |
360 | xorq %r8, %r8 | |
2601e64d | 361 | TRACE_IRQS_ON |
54ad726c | 362 | movl RSP(%rsp), %esp |
b87cf63e | 363 | /* |
54ad726c | 364 | * 64-bit->32-bit SYSRET restores eip from ecx, |
b87cf63e DV |
365 | * eflags from r11 (but RF and VM bits are forced to 0), |
366 | * cs and ss are loaded from MSRs. | |
54ad726c | 367 | * (Note: 32-bit->32-bit SYSRET is different: since r11 |
b87cf63e | 368 | * does not exist, it merely sets eflags.IF=1). |
61f01dd9 AL |
369 | * |
370 | * NB: On AMD CPUs with the X86_BUG_SYSRET_SS_ATTRS bug, the ss | |
371 | * descriptor is not reinitialized. This means that we must | |
372 | * avoid SYSRET with SS == NULL, which could happen if we schedule, | |
373 | * exit the kernel, and re-enter using an interrupt vector. (All | |
374 | * interrupt entries on x86_64 set SS to NULL.) We prevent that | |
375 | * from happening by reloading SS in __switch_to. | |
b87cf63e | 376 | */ |
2be29982 | 377 | USERGS_SYSRET32 |
b87cf63e | 378 | |
5cbf1565 RM |
379 | #ifdef CONFIG_AUDITSYSCALL |
380 | cstar_auditsys: | |
5cbf1565 | 381 | auditsys_entry_common |
54ad726c IM |
382 | movl %ebp, %r9d /* reload 6th syscall arg */ |
383 | jmp cstar_dispatch | |
5cbf1565 RM |
384 | |
385 | sysretl_audit: | |
81766741 | 386 | auditsys_exit sysretl_from_sys_call |
5cbf1565 RM |
387 | #endif |
388 | ||
389 | cstar_tracesys: | |
390 | #ifdef CONFIG_AUDITSYSCALL | |
54ad726c IM |
391 | testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) |
392 | jz cstar_auditsys | |
5cbf1565 | 393 | #endif |
76f5df43 | 394 | SAVE_EXTRA_REGS |
54ad726c | 395 | xorl %eax, %eax /* Do not leak kernel information */ |
ef0cd5dc DV |
396 | movq %rax, R11(%rsp) |
397 | movq %rax, R10(%rsp) | |
53e9accf | 398 | movq %rax, R9(%rsp) |
ef0cd5dc | 399 | movq %rax, R8(%rsp) |
54ad726c | 400 | movq %rsp, %rdi /* &pt_regs -> arg1 */ |
53e9accf | 401 | call syscall_trace_enter |
73cbf687 DV |
402 | |
403 | /* Reload arg registers from stack. (see sysenter_tracesys) */ | |
404 | movl RCX(%rsp), %ecx | |
405 | movl RDX(%rsp), %edx | |
406 | movl RSI(%rsp), %esi | |
407 | movl RDI(%rsp), %edi | |
54ad726c | 408 | movl %eax, %eax /* zero extension */ |
73cbf687 | 409 | |
76f5df43 | 410 | RESTORE_EXTRA_REGS |
53e9accf | 411 | jmp cstar_do_call |
2cd23553 | 412 | END(entry_SYSCALL_compat) |
54ad726c | 413 | |
1da177e4 | 414 | ia32_badarg: |
63bcff2a | 415 | ASM_CLAC |
54ad726c IM |
416 | movq $-EFAULT, %rax |
417 | jmp ia32_sysret | |
1da177e4 | 418 | |
61b1e3e7 | 419 | ia32_ret_from_sys_call: |
54ad726c | 420 | xorl %eax, %eax /* Do not leak kernel information */ |
ef0cd5dc DV |
421 | movq %rax, R11(%rsp) |
422 | movq %rax, R10(%rsp) | |
423 | movq %rax, R9(%rsp) | |
424 | movq %rax, R8(%rsp) | |
54ad726c | 425 | jmp int_ret_from_sys_call |
61b1e3e7 | 426 | |
b87cf63e DV |
427 | /* |
428 | * Emulated IA32 system calls via int 0x80. | |
1da177e4 | 429 | * |
b87cf63e DV |
430 | * Arguments: |
431 | * eax system call number | |
432 | * ebx arg1 | |
433 | * ecx arg2 | |
434 | * edx arg3 | |
435 | * esi arg4 | |
436 | * edi arg5 | |
437 | * ebp arg6 (note: not saved in the stack frame, should not be touched) | |
1da177e4 LT |
438 | * |
439 | * Notes: | |
b87cf63e DV |
440 | * Uses the same stack frame as the x86-64 version. |
441 | * All registers except eax must be saved (but ptrace may violate that). | |
1da177e4 LT |
442 | * Arguments are zero extended. For system calls that want sign extension and |
443 | * take long arguments a wrapper is needed. Most calls can just be called | |
444 | * directly. | |
b87cf63e DV |
445 | * Assumes it is only called from user space and entered with interrupts off. |
446 | */ | |
1da177e4 | 447 | |
2cd23553 | 448 | ENTRY(entry_INT80_compat) |
2601e64d | 449 | /* |
a232e3d5 DV |
450 | * Interrupts are off on entry. |
451 | * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON, | |
452 | * it is too small to ever cause noticeable irq latency. | |
2601e64d | 453 | */ |
a232e3d5 DV |
454 | PARAVIRT_ADJUST_EXCEPTION_FRAME |
455 | SWAPGS | |
66804154 | 456 | ENABLE_INTERRUPTS(CLBR_NONE) |
a232e3d5 | 457 | |
4ee8ec17 | 458 | /* Zero-extending 32-bit regs, do not remove */ |
54ad726c | 459 | movl %eax, %eax |
4ee8ec17 | 460 | |
4c9c0e91 | 461 | /* Construct struct pt_regs on stack (iret frame is already on stack) */ |
131484c8 IM |
462 | pushq %rax /* pt_regs->orig_ax */ |
463 | pushq %rdi /* pt_regs->di */ | |
464 | pushq %rsi /* pt_regs->si */ | |
465 | pushq %rdx /* pt_regs->dx */ | |
466 | pushq %rcx /* pt_regs->cx */ | |
467 | pushq $-ENOSYS /* pt_regs->ax */ | |
61b1e3e7 DV |
468 | pushq $0 /* pt_regs->r8 */ |
469 | pushq $0 /* pt_regs->r9 */ | |
470 | pushq $0 /* pt_regs->r10 */ | |
471 | pushq $0 /* pt_regs->r11 */ | |
1da177e4 | 472 | cld |
54ad726c IM |
473 | sub $(6*8), %rsp /* pt_regs->bp, bx, r12-15 not saved */ |
474 | ||
475 | orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS) | |
476 | testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) | |
477 | jnz ia32_tracesys | |
4c9c0e91 | 478 | |
c09249f8 | 479 | ia32_do_call: |
54ad726c IM |
480 | /* 32-bit syscall -> 64-bit C ABI argument conversion */ |
481 | movl %edi, %r8d /* arg5 */ | |
482 | movl %ebp, %r9d /* arg6 */ | |
483 | xchg %ecx, %esi /* rsi:arg2, rcx:arg4 */ | |
484 | movl %ebx, %edi /* arg1 */ | |
485 | movl %edx, %edx /* arg3 (zero extension) */ | |
486 | cmpq $(IA32_NR_syscalls-1), %rax | |
3f5159a9 | 487 | ja 1f |
54ad726c IM |
488 | |
489 | call *ia32_sys_call_table(, %rax, 8) /* RIP relative */ | |
490 | ||
1da177e4 | 491 | ia32_sysret: |
54ad726c | 492 | movq %rax, RAX(%rsp) |
3f5159a9 | 493 | 1: |
54ad726c | 494 | jmp int_ret_from_sys_call |
1da177e4 | 495 | |
76f5df43 DV |
496 | ia32_tracesys: |
497 | SAVE_EXTRA_REGS | |
54ad726c IM |
498 | movq %rsp, %rdi /* &pt_regs -> arg1 */ |
499 | call syscall_trace_enter | |
73cbf687 DV |
500 | /* |
501 | * Reload arg registers from stack in case ptrace changed them. | |
502 | * Don't reload %eax because syscall_trace_enter() returned | |
503 | * the %rax value we should see. But do truncate it to 32 bits. | |
504 | * If it's -1 to make us punt the syscall, then (u32)-1 is still | |
505 | * an appropriately invalid value. | |
506 | */ | |
507 | movl RCX(%rsp), %ecx | |
508 | movl RDX(%rsp), %edx | |
509 | movl RSI(%rsp), %esi | |
510 | movl RDI(%rsp), %edi | |
54ad726c | 511 | movl %eax, %eax /* zero extension */ |
76f5df43 | 512 | RESTORE_EXTRA_REGS |
54ad726c | 513 | jmp ia32_do_call |
2cd23553 | 514 | END(entry_INT80_compat) |
1da177e4 | 515 | |
d2475b8f | 516 | .macro PTREGSCALL label, func |
f6b2bc84 JB |
517 | ALIGN |
518 | GLOBAL(\label) | |
54ad726c IM |
519 | leaq \func(%rip), %rax |
520 | jmp ia32_ptregs_common | |
1da177e4 LT |
521 | .endm |
522 | ||
54ad726c IM |
523 | PTREGSCALL stub32_rt_sigreturn, sys32_rt_sigreturn |
524 | PTREGSCALL stub32_sigreturn, sys32_sigreturn | |
525 | PTREGSCALL stub32_fork, sys_fork | |
526 | PTREGSCALL stub32_vfork, sys_vfork | |
1da177e4 | 527 | |
1d4b4b29 AV |
528 | ALIGN |
529 | GLOBAL(stub32_clone) | |
54ad726c | 530 | leaq sys_clone(%rip), %rax |
5cdc683b | 531 | /* |
7a5a9824 DV |
532 | * The 32-bit clone ABI is: clone(..., int tls_val, int *child_tidptr). |
533 | * The 64-bit clone ABI is: clone(..., int *child_tidptr, int tls_val). | |
534 | * | |
535 | * The native 64-bit kernel's sys_clone() implements the latter, | |
536 | * so we need to swap arguments here before calling it: | |
5cdc683b | 537 | */ |
7a5a9824 | 538 | xchg %r8, %rcx |
54ad726c | 539 | jmp ia32_ptregs_common |
1d4b4b29 | 540 | |
f6b2bc84 JB |
541 | ALIGN |
542 | ia32_ptregs_common: | |
76f5df43 | 543 | SAVE_EXTRA_REGS 8 |
54ad726c | 544 | call *%rax |
76f5df43 DV |
545 | RESTORE_EXTRA_REGS 8 |
546 | ret | |
4b787e0b | 547 | END(ia32_ptregs_common) |