ftrace/x86_32: Simplify parameter setup for ftrace_regs_caller
[deliverable/linux.git] / arch / x86 / kernel / entry_32.S
CommitLineData
1da177e4 1/*
1da177e4
LT
2 *
3 * Copyright (C) 1991, 1992 Linus Torvalds
4 */
5
6/*
7 * entry.S contains the system-call and fault low-level handling routines.
8 * This also contains the timer-interrupt handler, as well as all interrupts
9 * and faults that can result in a task-switch.
10 *
11 * NOTE: This code handles signal-recognition, which happens every time
12 * after a timer-interrupt and after each system call.
13 *
14 * I changed all the .align's to 4 (16 byte alignment), as that's faster
15 * on a 486.
16 *
889f21ce 17 * Stack layout in 'syscall_exit':
1da177e4
LT
18 * ptrace needs to have all regs on the stack.
19 * if the order here is changed, it needs to be
20 * updated in fork.c:copy_process, signal.c:do_signal,
21 * ptrace.c and ptrace.h
22 *
23 * 0(%esp) - %ebx
24 * 4(%esp) - %ecx
25 * 8(%esp) - %edx
26 * C(%esp) - %esi
27 * 10(%esp) - %edi
28 * 14(%esp) - %ebp
29 * 18(%esp) - %eax
30 * 1C(%esp) - %ds
31 * 20(%esp) - %es
464d1a78 32 * 24(%esp) - %fs
ccbeed3a
TH
33 * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS
34 * 2C(%esp) - orig_eax
35 * 30(%esp) - %eip
36 * 34(%esp) - %cs
37 * 38(%esp) - %eflags
38 * 3C(%esp) - %oldesp
39 * 40(%esp) - %oldss
1da177e4
LT
40 *
41 * "current" is in register %ebx during any slow entries.
42 */
43
1da177e4 44#include <linux/linkage.h>
d7e7528b 45#include <linux/err.h>
1da177e4 46#include <asm/thread_info.h>
55f327fa 47#include <asm/irqflags.h>
1da177e4
LT
48#include <asm/errno.h>
49#include <asm/segment.h>
50#include <asm/smp.h>
0341c14d 51#include <asm/page_types.h>
be44d2aa 52#include <asm/percpu.h>
fe7cacc1 53#include <asm/dwarf2.h>
ab68ed98 54#include <asm/processor-flags.h>
395a59d0 55#include <asm/ftrace.h>
9b7dc567 56#include <asm/irq_vectors.h>
40d2e763 57#include <asm/cpufeature.h>
b4ca46e4 58#include <asm/alternative-asm.h>
6837a54d 59#include <asm/asm.h>
1da177e4 60
af0575bb
RM
61/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
62#include <linux/elf-em.h>
63#define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE)
64#define __AUDIT_ARCH_LE 0x40000000
65
66#ifndef CONFIG_AUDITSYSCALL
67#define sysenter_audit syscall_trace_entry
68#define sysexit_audit syscall_exit_work
69#endif
70
ea714547
JO
71 .section .entry.text, "ax"
72
139ec7c4
RR
73/*
74 * We use macros for low-level operations which need to be overridden
75 * for paravirtualization. The following will never clobber any registers:
76 * INTERRUPT_RETURN (aka. "iret")
77 * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
d75cd22f 78 * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
139ec7c4
RR
79 *
80 * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
81 * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
82 * Allowing a register to be clobbered can shrink the paravirt replacement
83 * enough to patch inline, increasing performance.
84 */
85
1da177e4 86#ifdef CONFIG_PREEMPT
139ec7c4 87#define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
1da177e4 88#else
139ec7c4 89#define preempt_stop(clobbers)
2e04bc76 90#define resume_kernel restore_all
1da177e4
LT
91#endif
92
55f327fa
IM
93.macro TRACE_IRQS_IRET
94#ifdef CONFIG_TRACE_IRQFLAGS
ab68ed98 95 testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off?
55f327fa
IM
96 jz 1f
97 TRACE_IRQS_ON
981:
99#endif
100.endm
101
ccbeed3a
TH
102/*
103 * User gs save/restore
104 *
105 * %gs is used for userland TLS and kernel only uses it for stack
106 * canary which is required to be at %gs:20 by gcc. Read the comment
107 * at the top of stackprotector.h for more info.
108 *
109 * Local labels 98 and 99 are used.
110 */
111#ifdef CONFIG_X86_32_LAZY_GS
112
113 /* unfortunately push/pop can't be no-op */
114.macro PUSH_GS
df5d1874 115 pushl_cfi $0
ccbeed3a
TH
116.endm
117.macro POP_GS pop=0
118 addl $(4 + \pop), %esp
119 CFI_ADJUST_CFA_OFFSET -(4 + \pop)
120.endm
121.macro POP_GS_EX
122.endm
123
124 /* all the rest are no-op */
125.macro PTGS_TO_GS
126.endm
127.macro PTGS_TO_GS_EX
128.endm
129.macro GS_TO_REG reg
130.endm
131.macro REG_TO_PTGS reg
132.endm
133.macro SET_KERNEL_GS reg
134.endm
135
136#else /* CONFIG_X86_32_LAZY_GS */
137
138.macro PUSH_GS
df5d1874 139 pushl_cfi %gs
ccbeed3a
TH
140 /*CFI_REL_OFFSET gs, 0*/
141.endm
142
143.macro POP_GS pop=0
df5d1874 14498: popl_cfi %gs
ccbeed3a
TH
145 /*CFI_RESTORE gs*/
146 .if \pop <> 0
147 add $\pop, %esp
148 CFI_ADJUST_CFA_OFFSET -\pop
149 .endif
150.endm
151.macro POP_GS_EX
152.pushsection .fixup, "ax"
15399: movl $0, (%esp)
154 jmp 98b
ccbeed3a 155.popsection
6837a54d 156 _ASM_EXTABLE(98b,99b)
ccbeed3a
TH
157.endm
158
159.macro PTGS_TO_GS
16098: mov PT_GS(%esp), %gs
161.endm
162.macro PTGS_TO_GS_EX
163.pushsection .fixup, "ax"
16499: movl $0, PT_GS(%esp)
165 jmp 98b
ccbeed3a 166.popsection
6837a54d 167 _ASM_EXTABLE(98b,99b)
ccbeed3a
TH
168.endm
169
170.macro GS_TO_REG reg
171 movl %gs, \reg
172 /*CFI_REGISTER gs, \reg*/
173.endm
174.macro REG_TO_PTGS reg
175 movl \reg, PT_GS(%esp)
176 /*CFI_REL_OFFSET gs, PT_GS*/
177.endm
178.macro SET_KERNEL_GS reg
60a5317f 179 movl $(__KERNEL_STACK_CANARY), \reg
ccbeed3a
TH
180 movl \reg, %gs
181.endm
182
183#endif /* CONFIG_X86_32_LAZY_GS */
184
f0d96110
TH
185.macro SAVE_ALL
186 cld
ccbeed3a 187 PUSH_GS
df5d1874 188 pushl_cfi %fs
f0d96110 189 /*CFI_REL_OFFSET fs, 0;*/
df5d1874 190 pushl_cfi %es
f0d96110 191 /*CFI_REL_OFFSET es, 0;*/
df5d1874 192 pushl_cfi %ds
f0d96110 193 /*CFI_REL_OFFSET ds, 0;*/
df5d1874 194 pushl_cfi %eax
f0d96110 195 CFI_REL_OFFSET eax, 0
df5d1874 196 pushl_cfi %ebp
f0d96110 197 CFI_REL_OFFSET ebp, 0
df5d1874 198 pushl_cfi %edi
f0d96110 199 CFI_REL_OFFSET edi, 0
df5d1874 200 pushl_cfi %esi
f0d96110 201 CFI_REL_OFFSET esi, 0
df5d1874 202 pushl_cfi %edx
f0d96110 203 CFI_REL_OFFSET edx, 0
df5d1874 204 pushl_cfi %ecx
f0d96110 205 CFI_REL_OFFSET ecx, 0
df5d1874 206 pushl_cfi %ebx
f0d96110
TH
207 CFI_REL_OFFSET ebx, 0
208 movl $(__USER_DS), %edx
209 movl %edx, %ds
210 movl %edx, %es
211 movl $(__KERNEL_PERCPU), %edx
464d1a78 212 movl %edx, %fs
ccbeed3a 213 SET_KERNEL_GS %edx
f0d96110 214.endm
1da177e4 215
f0d96110 216.macro RESTORE_INT_REGS
df5d1874 217 popl_cfi %ebx
f0d96110 218 CFI_RESTORE ebx
df5d1874 219 popl_cfi %ecx
f0d96110 220 CFI_RESTORE ecx
df5d1874 221 popl_cfi %edx
f0d96110 222 CFI_RESTORE edx
df5d1874 223 popl_cfi %esi
f0d96110 224 CFI_RESTORE esi
df5d1874 225 popl_cfi %edi
f0d96110 226 CFI_RESTORE edi
df5d1874 227 popl_cfi %ebp
f0d96110 228 CFI_RESTORE ebp
df5d1874 229 popl_cfi %eax
fe7cacc1 230 CFI_RESTORE eax
f0d96110 231.endm
1da177e4 232
ccbeed3a 233.macro RESTORE_REGS pop=0
f0d96110 234 RESTORE_INT_REGS
df5d1874 2351: popl_cfi %ds
f0d96110 236 /*CFI_RESTORE ds;*/
df5d1874 2372: popl_cfi %es
f0d96110 238 /*CFI_RESTORE es;*/
df5d1874 2393: popl_cfi %fs
f0d96110 240 /*CFI_RESTORE fs;*/
ccbeed3a 241 POP_GS \pop
f0d96110
TH
242.pushsection .fixup, "ax"
2434: movl $0, (%esp)
244 jmp 1b
2455: movl $0, (%esp)
246 jmp 2b
2476: movl $0, (%esp)
248 jmp 3b
f95d47ca 249.popsection
6837a54d
PA
250 _ASM_EXTABLE(1b,4b)
251 _ASM_EXTABLE(2b,5b)
252 _ASM_EXTABLE(3b,6b)
ccbeed3a 253 POP_GS_EX
f0d96110 254.endm
1da177e4 255
f0d96110
TH
256.macro RING0_INT_FRAME
257 CFI_STARTPROC simple
258 CFI_SIGNAL_FRAME
259 CFI_DEF_CFA esp, 3*4
260 /*CFI_OFFSET cs, -2*4;*/
fe7cacc1 261 CFI_OFFSET eip, -3*4
f0d96110 262.endm
fe7cacc1 263
f0d96110
TH
264.macro RING0_EC_FRAME
265 CFI_STARTPROC simple
266 CFI_SIGNAL_FRAME
267 CFI_DEF_CFA esp, 4*4
268 /*CFI_OFFSET cs, -2*4;*/
fe7cacc1 269 CFI_OFFSET eip, -3*4
f0d96110 270.endm
fe7cacc1 271
f0d96110
TH
272.macro RING0_PTREGS_FRAME
273 CFI_STARTPROC simple
274 CFI_SIGNAL_FRAME
275 CFI_DEF_CFA esp, PT_OLDESP-PT_EBX
276 /*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/
277 CFI_OFFSET eip, PT_EIP-PT_OLDESP
278 /*CFI_OFFSET es, PT_ES-PT_OLDESP;*/
279 /*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/
280 CFI_OFFSET eax, PT_EAX-PT_OLDESP
281 CFI_OFFSET ebp, PT_EBP-PT_OLDESP
282 CFI_OFFSET edi, PT_EDI-PT_OLDESP
283 CFI_OFFSET esi, PT_ESI-PT_OLDESP
284 CFI_OFFSET edx, PT_EDX-PT_OLDESP
285 CFI_OFFSET ecx, PT_ECX-PT_OLDESP
eb5b7b9d 286 CFI_OFFSET ebx, PT_EBX-PT_OLDESP
f0d96110 287.endm
1da177e4
LT
288
289ENTRY(ret_from_fork)
fe7cacc1 290 CFI_STARTPROC
df5d1874 291 pushl_cfi %eax
1da177e4
LT
292 call schedule_tail
293 GET_THREAD_INFO(%ebp)
df5d1874
JB
294 popl_cfi %eax
295 pushl_cfi $0x0202 # Reset kernel eflags
296 popfl_cfi
1da177e4 297 jmp syscall_exit
fe7cacc1 298 CFI_ENDPROC
47a55cd7 299END(ret_from_fork)
1da177e4 300
a00e817f
MH
301/*
302 * Interrupt exit functions should be protected against kprobes
303 */
304 .pushsection .kprobes.text, "ax"
1da177e4
LT
305/*
306 * Return to user mode is not as complex as all this looks,
307 * but we want the default path for a system call return to
308 * go as quickly as possible which is why some of this is
309 * less clear than it otherwise should be.
310 */
311
312 # userspace resumption stub bypassing syscall exit tracing
313 ALIGN
fe7cacc1 314 RING0_PTREGS_FRAME
1da177e4 315ret_from_exception:
139ec7c4 316 preempt_stop(CLBR_ANY)
1da177e4
LT
317ret_from_intr:
318 GET_THREAD_INFO(%ebp)
29a2e283 319#ifdef CONFIG_VM86
eb5b7b9d
JF
320 movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
321 movb PT_CS(%esp), %al
ab68ed98 322 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
29a2e283
DA
323#else
324 /*
325 * We can be coming here from a syscall done in the kernel space,
326 * e.g. a failed kernel_execve().
327 */
328 movl PT_CS(%esp), %eax
329 andl $SEGMENT_RPL_MASK, %eax
330#endif
78be3706
RR
331 cmpl $USER_RPL, %eax
332 jb resume_kernel # not returning to v8086 or userspace
f95d47ca 333
1da177e4 334ENTRY(resume_userspace)
c7e872e7 335 LOCKDEP_SYS_EXIT
139ec7c4 336 DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
1da177e4
LT
337 # setting need_resched or sigpending
338 # between sampling and the iret
e32e58a9 339 TRACE_IRQS_OFF
1da177e4
LT
340 movl TI_flags(%ebp), %ecx
341 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
342 # int/exception return?
343 jne work_pending
344 jmp restore_all
47a55cd7 345END(ret_from_exception)
1da177e4
LT
346
347#ifdef CONFIG_PREEMPT
348ENTRY(resume_kernel)
139ec7c4 349 DISABLE_INTERRUPTS(CLBR_ANY)
1da177e4 350 cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
2e04bc76 351 jnz restore_all
1da177e4
LT
352need_resched:
353 movl TI_flags(%ebp), %ecx # need_resched set ?
354 testb $_TIF_NEED_RESCHED, %cl
355 jz restore_all
ab68ed98 356 testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ?
1da177e4
LT
357 jz restore_all
358 call preempt_schedule_irq
359 jmp need_resched
47a55cd7 360END(resume_kernel)
1da177e4 361#endif
fe7cacc1 362 CFI_ENDPROC
a00e817f
MH
363/*
364 * End of kprobes section
365 */
366 .popsection
1da177e4
LT
367
368/* SYSENTER_RETURN points to after the "sysenter" instruction in
369 the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
370
371 # sysenter call handler stub
0aa97fb2 372ENTRY(ia32_sysenter_target)
fe7cacc1 373 CFI_STARTPROC simple
adf14236 374 CFI_SIGNAL_FRAME
fe7cacc1
JB
375 CFI_DEF_CFA esp, 0
376 CFI_REGISTER esp, ebp
faca6227 377 movl TSS_sysenter_sp0(%esp),%esp
1da177e4 378sysenter_past_esp:
55f327fa 379 /*
d93c870b
JF
380 * Interrupts are disabled here, but we can't trace it until
381 * enough kernel state to call TRACE_IRQS_OFF can be called - but
382 * we immediately enable interrupts at that point anyway.
55f327fa 383 */
3234282f 384 pushl_cfi $__USER_DS
fe7cacc1 385 /*CFI_REL_OFFSET ss, 0*/
df5d1874 386 pushl_cfi %ebp
fe7cacc1 387 CFI_REL_OFFSET esp, 0
df5d1874 388 pushfl_cfi
d93c870b 389 orl $X86_EFLAGS_IF, (%esp)
3234282f 390 pushl_cfi $__USER_CS
fe7cacc1 391 /*CFI_REL_OFFSET cs, 0*/
e6e5494c
IM
392 /*
393 * Push current_thread_info()->sysenter_return to the stack.
394 * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
395 * pushed above; +8 corresponds to copy_thread's esp0 setting.
396 */
7bf04be8 397 pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
fe7cacc1 398 CFI_REL_OFFSET eip, 0
1da177e4 399
df5d1874 400 pushl_cfi %eax
d93c870b
JF
401 SAVE_ALL
402 ENABLE_INTERRUPTS(CLBR_NONE)
403
1da177e4
LT
404/*
405 * Load the potential sixth argument from user stack.
406 * Careful about security.
407 */
408 cmpl $__PAGE_OFFSET-3,%ebp
409 jae syscall_fault
4101: movl (%ebp),%ebp
d93c870b 411 movl %ebp,PT_EBP(%esp)
6837a54d 412 _ASM_EXTABLE(1b,syscall_fault)
1da177e4 413
1da177e4
LT
414 GET_THREAD_INFO(%ebp)
415
88200bc2 416 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
af0575bb
RM
417 jnz sysenter_audit
418sysenter_do_call:
303395ac 419 cmpl $(NR_syscalls), %eax
1da177e4
LT
420 jae syscall_badsys
421 call *sys_call_table(,%eax,4)
eb5b7b9d 422 movl %eax,PT_EAX(%esp)
c7e872e7 423 LOCKDEP_SYS_EXIT
42c24fa2 424 DISABLE_INTERRUPTS(CLBR_ANY)
55f327fa 425 TRACE_IRQS_OFF
1da177e4 426 movl TI_flags(%ebp), %ecx
88200bc2 427 testl $_TIF_ALLWORK_MASK, %ecx
af0575bb
RM
428 jne sysexit_audit
429sysenter_exit:
1da177e4 430/* if something modifies registers it must also disable sysexit */
eb5b7b9d
JF
431 movl PT_EIP(%esp), %edx
432 movl PT_OLDESP(%esp), %ecx
1da177e4 433 xorl %ebp,%ebp
55f327fa 434 TRACE_IRQS_ON
464d1a78 4351: mov PT_FS(%esp), %fs
ccbeed3a 436 PTGS_TO_GS
d75cd22f 437 ENABLE_INTERRUPTS_SYSEXIT
af0575bb
RM
438
439#ifdef CONFIG_AUDITSYSCALL
440sysenter_audit:
88200bc2 441 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
af0575bb
RM
442 jnz syscall_trace_entry
443 addl $4,%esp
444 CFI_ADJUST_CFA_OFFSET -4
445 /* %esi already in 8(%esp) 6th arg: 4th syscall arg */
446 /* %edx already in 4(%esp) 5th arg: 3rd syscall arg */
447 /* %ecx already in 0(%esp) 4th arg: 2nd syscall arg */
448 movl %ebx,%ecx /* 3rd arg: 1st syscall arg */
449 movl %eax,%edx /* 2nd arg: syscall number */
450 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
b05d8447 451 call __audit_syscall_entry
df5d1874 452 pushl_cfi %ebx
af0575bb
RM
453 movl PT_EAX(%esp),%eax /* reload syscall number */
454 jmp sysenter_do_call
455
456sysexit_audit:
88200bc2 457 testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
af0575bb
RM
458 jne syscall_exit_work
459 TRACE_IRQS_ON
460 ENABLE_INTERRUPTS(CLBR_ANY)
461 movl %eax,%edx /* second arg, syscall return value */
d7e7528b
EP
462 cmpl $-MAX_ERRNO,%eax /* is it an error ? */
463 setbe %al /* 1 if so, 0 if not */
af0575bb 464 movzbl %al,%eax /* zero-extend that */
d7e7528b 465 call __audit_syscall_exit
af0575bb
RM
466 DISABLE_INTERRUPTS(CLBR_ANY)
467 TRACE_IRQS_OFF
468 movl TI_flags(%ebp), %ecx
88200bc2 469 testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
af0575bb
RM
470 jne syscall_exit_work
471 movl PT_EAX(%esp),%eax /* reload syscall return value */
472 jmp sysenter_exit
473#endif
474
fe7cacc1 475 CFI_ENDPROC
f95d47ca 476.pushsection .fixup,"ax"
464d1a78 4772: movl $0,PT_FS(%esp)
f95d47ca 478 jmp 1b
f95d47ca 479.popsection
6837a54d 480 _ASM_EXTABLE(1b,2b)
ccbeed3a 481 PTGS_TO_GS_EX
0aa97fb2 482ENDPROC(ia32_sysenter_target)
1da177e4 483
a00e817f
MH
484/*
485 * syscall stub including irq exit should be protected against kprobes
486 */
487 .pushsection .kprobes.text, "ax"
1da177e4
LT
488 # system call handler stub
489ENTRY(system_call)
fe7cacc1 490 RING0_INT_FRAME # can't unwind into user space anyway
df5d1874 491 pushl_cfi %eax # save orig_eax
1da177e4
LT
492 SAVE_ALL
493 GET_THREAD_INFO(%ebp)
ed75e8d5 494 # system call tracing in operation / emulation
88200bc2 495 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
1da177e4 496 jnz syscall_trace_entry
303395ac 497 cmpl $(NR_syscalls), %eax
1da177e4
LT
498 jae syscall_badsys
499syscall_call:
500 call *sys_call_table(,%eax,4)
eb5b7b9d 501 movl %eax,PT_EAX(%esp) # store the return value
1da177e4 502syscall_exit:
c7e872e7 503 LOCKDEP_SYS_EXIT
139ec7c4 504 DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
1da177e4
LT
505 # setting need_resched or sigpending
506 # between sampling and the iret
55f327fa 507 TRACE_IRQS_OFF
1da177e4 508 movl TI_flags(%ebp), %ecx
88200bc2 509 testl $_TIF_ALLWORK_MASK, %ecx # current->work
1da177e4
LT
510 jne syscall_exit_work
511
512restore_all:
2e04bc76
AH
513 TRACE_IRQS_IRET
514restore_all_notrace:
eb5b7b9d
JF
515 movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
516 # Warning: PT_OLDSS(%esp) contains the wrong/random values if we
5df24082
SS
517 # are returning to the kernel.
518 # See comments in process.c:copy_thread() for details.
eb5b7b9d
JF
519 movb PT_OLDSS(%esp), %ah
520 movb PT_CS(%esp), %al
ab68ed98 521 andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
78be3706 522 cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
fe7cacc1 523 CFI_REMEMBER_STATE
1da177e4
LT
524 je ldt_ss # returning to user-space with LDT SS
525restore_nocheck:
ccbeed3a 526 RESTORE_REGS 4 # skip orig_eax/error_code
f7f3d791 527irq_return:
3701d863 528 INTERRUPT_RETURN
1da177e4 529.section .fixup,"ax"
90e9f536 530ENTRY(iret_exc)
a879cbbb
LT
531 pushl $0 # no error code
532 pushl $do_iret_error
533 jmp error_code
1da177e4 534.previous
6837a54d 535 _ASM_EXTABLE(irq_return,iret_exc)
1da177e4 536
fe7cacc1 537 CFI_RESTORE_STATE
1da177e4 538ldt_ss:
eb5b7b9d 539 larl PT_OLDSS(%esp), %eax
1da177e4
LT
540 jnz restore_nocheck
541 testl $0x00400000, %eax # returning to 32bit stack?
542 jnz restore_nocheck # allright, normal return
d3561b7f
RR
543
544#ifdef CONFIG_PARAVIRT
545 /*
546 * The kernel can't run on a non-flat stack if paravirt mode
547 * is active. Rather than try to fixup the high bits of
548 * ESP, bypass this code entirely. This may break DOSemu
549 * and/or Wine support in a paravirt VM, although the option
550 * is still available to implement the setting of the high
551 * 16-bits in the INTERRUPT_RETURN paravirt-op.
552 */
93b1eab3 553 cmpl $0, pv_info+PARAVIRT_enabled
d3561b7f
RR
554 jne restore_nocheck
555#endif
556
dc4c2a0a
AH
557/*
558 * Setup and switch to ESPFIX stack
559 *
560 * We're returning to userspace with a 16 bit stack. The CPU will not
561 * restore the high word of ESP for us on executing iret... This is an
562 * "official" bug of all the x86-compatible CPUs, which we can work
563 * around to make dosemu and wine happy. We do this by preloading the
564 * high word of ESP with the high word of the userspace ESP while
565 * compensating for the offset by changing to the ESPFIX segment with
566 * a base address that matches for the difference.
567 */
72c511dd 568#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
dc4c2a0a
AH
569 mov %esp, %edx /* load kernel esp */
570 mov PT_OLDESP(%esp), %eax /* load userspace esp */
571 mov %dx, %ax /* eax: new kernel esp */
572 sub %eax, %edx /* offset (low word is 0) */
dc4c2a0a 573 shr $16, %edx
72c511dd
BG
574 mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
575 mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
df5d1874
JB
576 pushl_cfi $__ESPFIX_SS
577 pushl_cfi %eax /* new kernel esp */
2e04bc76
AH
578 /* Disable interrupts, but do not irqtrace this section: we
579 * will soon execute iret and the tracer was already set to
580 * the irqstate after the iret */
139ec7c4 581 DISABLE_INTERRUPTS(CLBR_EAX)
dc4c2a0a 582 lss (%esp), %esp /* switch to espfix segment */
be44d2aa
SS
583 CFI_ADJUST_CFA_OFFSET -8
584 jmp restore_nocheck
fe7cacc1 585 CFI_ENDPROC
47a55cd7 586ENDPROC(system_call)
1da177e4
LT
587
588 # perform work that needs to be done immediately before resumption
589 ALIGN
fe7cacc1 590 RING0_PTREGS_FRAME # can't unwind into user space anyway
1da177e4
LT
591work_pending:
592 testb $_TIF_NEED_RESCHED, %cl
593 jz work_notifysig
594work_resched:
595 call schedule
c7e872e7 596 LOCKDEP_SYS_EXIT
139ec7c4 597 DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
1da177e4
LT
598 # setting need_resched or sigpending
599 # between sampling and the iret
55f327fa 600 TRACE_IRQS_OFF
1da177e4
LT
601 movl TI_flags(%ebp), %ecx
602 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
603 # than syscall tracing?
604 jz restore_all
605 testb $_TIF_NEED_RESCHED, %cl
606 jnz work_resched
607
608work_notifysig: # deal with pending signals and
609 # notify-resume requests
74b47a78 610#ifdef CONFIG_VM86
ab68ed98 611 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
1da177e4
LT
612 movl %esp, %eax
613 jne work_notifysig_v86 # returning to kernel-space or
614 # vm86-space
3596ff4e
SD
615 TRACE_IRQS_ON
616 ENABLE_INTERRUPTS(CLBR_NONE)
44fbbb3d
AV
617 movb PT_CS(%esp), %bl
618 andb $SEGMENT_RPL_MASK, %bl
619 cmpb $USER_RPL, %bl
620 jb resume_kernel
1da177e4
LT
621 xorl %edx, %edx
622 call do_notify_resume
44fbbb3d 623 jmp resume_userspace
1da177e4
LT
624
625 ALIGN
626work_notifysig_v86:
df5d1874 627 pushl_cfi %ecx # save ti_flags for do_notify_resume
1da177e4 628 call save_v86_state # %eax contains pt_regs pointer
df5d1874 629 popl_cfi %ecx
1da177e4 630 movl %eax, %esp
74b47a78
JK
631#else
632 movl %esp, %eax
633#endif
3596ff4e
SD
634 TRACE_IRQS_ON
635 ENABLE_INTERRUPTS(CLBR_NONE)
44fbbb3d
AV
636 movb PT_CS(%esp), %bl
637 andb $SEGMENT_RPL_MASK, %bl
638 cmpb $USER_RPL, %bl
639 jb resume_kernel
1da177e4
LT
640 xorl %edx, %edx
641 call do_notify_resume
44fbbb3d 642 jmp resume_userspace
47a55cd7 643END(work_pending)
1da177e4
LT
644
645 # perform syscall exit tracing
646 ALIGN
647syscall_trace_entry:
eb5b7b9d 648 movl $-ENOSYS,PT_EAX(%esp)
1da177e4 649 movl %esp, %eax
d4d67150
RM
650 call syscall_trace_enter
651 /* What it returned is what we'll actually use. */
303395ac 652 cmpl $(NR_syscalls), %eax
1da177e4
LT
653 jnae syscall_call
654 jmp syscall_exit
47a55cd7 655END(syscall_trace_entry)
1da177e4
LT
656
657 # perform syscall exit tracing
658 ALIGN
659syscall_exit_work:
88200bc2 660 testl $_TIF_WORK_SYSCALL_EXIT, %ecx
1da177e4 661 jz work_pending
55f327fa 662 TRACE_IRQS_ON
d4d67150 663 ENABLE_INTERRUPTS(CLBR_ANY) # could let syscall_trace_leave() call
1da177e4
LT
664 # schedule() instead
665 movl %esp, %eax
d4d67150 666 call syscall_trace_leave
1da177e4 667 jmp resume_userspace
47a55cd7 668END(syscall_exit_work)
fe7cacc1 669 CFI_ENDPROC
1da177e4 670
fe7cacc1 671 RING0_INT_FRAME # can't unwind into user space anyway
1da177e4 672syscall_fault:
1da177e4 673 GET_THREAD_INFO(%ebp)
eb5b7b9d 674 movl $-EFAULT,PT_EAX(%esp)
1da177e4 675 jmp resume_userspace
47a55cd7 676END(syscall_fault)
1da177e4 677
1da177e4 678syscall_badsys:
eb5b7b9d 679 movl $-ENOSYS,PT_EAX(%esp)
1da177e4 680 jmp resume_userspace
47a55cd7 681END(syscall_badsys)
fe7cacc1 682 CFI_ENDPROC
a00e817f
MH
683/*
684 * End of kprobes section
685 */
686 .popsection
1da177e4 687
253f29a4
BG
688/*
689 * System calls that need a pt_regs pointer.
690 */
e258e4e0 691#define PTREGSCALL0(name) \
303395ac 692ENTRY(ptregs_##name) ; \
253f29a4 693 leal 4(%esp),%eax; \
303395ac
PA
694 jmp sys_##name; \
695ENDPROC(ptregs_##name)
253f29a4 696
e258e4e0 697#define PTREGSCALL1(name) \
303395ac 698ENTRY(ptregs_##name) ; \
e258e4e0 699 leal 4(%esp),%edx; \
ce9119ad 700 movl (PT_EBX+4)(%esp),%eax; \
303395ac
PA
701 jmp sys_##name; \
702ENDPROC(ptregs_##name)
e258e4e0
BG
703
704#define PTREGSCALL2(name) \
303395ac 705ENTRY(ptregs_##name) ; \
e258e4e0 706 leal 4(%esp),%ecx; \
ce9119ad
PA
707 movl (PT_ECX+4)(%esp),%edx; \
708 movl (PT_EBX+4)(%esp),%eax; \
303395ac
PA
709 jmp sys_##name; \
710ENDPROC(ptregs_##name)
e258e4e0
BG
711
712#define PTREGSCALL3(name) \
303395ac 713ENTRY(ptregs_##name) ; \
a34107b5 714 CFI_STARTPROC; \
e258e4e0 715 leal 4(%esp),%eax; \
a34107b5 716 pushl_cfi %eax; \
e258e4e0
BG
717 movl PT_EDX(%eax),%ecx; \
718 movl PT_ECX(%eax),%edx; \
719 movl PT_EBX(%eax),%eax; \
720 call sys_##name; \
721 addl $4,%esp; \
a34107b5
JB
722 CFI_ADJUST_CFA_OFFSET -4; \
723 ret; \
724 CFI_ENDPROC; \
725ENDPROC(ptregs_##name)
e258e4e0 726
27f59559 727PTREGSCALL1(iopl)
e258e4e0 728PTREGSCALL0(fork)
e258e4e0 729PTREGSCALL0(vfork)
11cf88bd 730PTREGSCALL3(execve)
052acad4 731PTREGSCALL2(sigaltstack)
e258e4e0
BG
732PTREGSCALL0(sigreturn)
733PTREGSCALL0(rt_sigreturn)
f1382f15
BG
734PTREGSCALL2(vm86)
735PTREGSCALL1(vm86old)
253f29a4 736
f839bbc5 737/* Clone is an oddball. The 4th arg is in %edi */
303395ac 738ENTRY(ptregs_clone)
a34107b5 739 CFI_STARTPROC
f839bbc5 740 leal 4(%esp),%eax
a34107b5
JB
741 pushl_cfi %eax
742 pushl_cfi PT_EDI(%eax)
f839bbc5
BG
743 movl PT_EDX(%eax),%ecx
744 movl PT_ECX(%eax),%edx
745 movl PT_EBX(%eax),%eax
746 call sys_clone
747 addl $8,%esp
a34107b5 748 CFI_ADJUST_CFA_OFFSET -8
f839bbc5 749 ret
a34107b5
JB
750 CFI_ENDPROC
751ENDPROC(ptregs_clone)
f839bbc5 752
f0d96110 753.macro FIXUP_ESPFIX_STACK
dc4c2a0a
AH
754/*
755 * Switch back for ESPFIX stack to the normal zerobased stack
756 *
757 * We can't call C functions using the ESPFIX stack. This code reads
758 * the high word of the segment base from the GDT and swiches to the
759 * normal stack and adjusts ESP with the matching offset.
760 */
761 /* fixup the stack */
72c511dd
BG
762 mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
763 mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
dc4c2a0a
AH
764 shl $16, %eax
765 addl %esp, %eax /* the adjusted stack pointer */
df5d1874
JB
766 pushl_cfi $__KERNEL_DS
767 pushl_cfi %eax
dc4c2a0a 768 lss (%esp), %esp /* switch to the normal stack segment */
f0d96110
TH
769 CFI_ADJUST_CFA_OFFSET -8
770.endm
771.macro UNWIND_ESPFIX_STACK
772 movl %ss, %eax
773 /* see if on espfix stack */
774 cmpw $__ESPFIX_SS, %ax
775 jne 27f
776 movl $__KERNEL_DS, %eax
777 movl %eax, %ds
778 movl %eax, %es
779 /* switch to normal stack */
780 FIXUP_ESPFIX_STACK
78127:
782.endm
1da177e4
LT
783
784/*
b7c6244f
PA
785 * Build the entry stubs and pointer table with some assembler magic.
786 * We pack 7 stubs into a single 32-byte chunk, which will fit in a
787 * single cache line on all modern x86 implementations.
1da177e4 788 */
4687518c 789.section .init.rodata,"a"
1da177e4 790ENTRY(interrupt)
ea714547 791.section .entry.text, "ax"
b7c6244f
PA
792 .p2align 5
793 .p2align CONFIG_X86_L1_CACHE_SHIFT
1da177e4 794ENTRY(irq_entries_start)
fe7cacc1 795 RING0_INT_FRAME
4687518c 796vector=FIRST_EXTERNAL_VECTOR
b7c6244f
PA
797.rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7
798 .balign 32
799 .rept 7
800 .if vector < NR_VECTORS
8665596e 801 .if vector <> FIRST_EXTERNAL_VECTOR
fe7cacc1 802 CFI_ADJUST_CFA_OFFSET -4
b7c6244f 803 .endif
df5d1874 8041: pushl_cfi $(~vector+0x80) /* Note: always in signed byte range */
8665596e 805 .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6
b7c6244f
PA
806 jmp 2f
807 .endif
808 .previous
1da177e4 809 .long 1b
ea714547 810 .section .entry.text, "ax"
1da177e4 811vector=vector+1
b7c6244f
PA
812 .endif
813 .endr
8142: jmp common_interrupt
1da177e4 815.endr
47a55cd7
JB
816END(irq_entries_start)
817
818.previous
819END(interrupt)
820.previous
1da177e4 821
55f327fa
IM
822/*
823 * the CPU automatically disables interrupts when executing an IRQ vector,
824 * so IRQ-flags tracing has to follow that:
825 */
b7c6244f 826 .p2align CONFIG_X86_L1_CACHE_SHIFT
1da177e4 827common_interrupt:
b7c6244f 828 addl $-0x80,(%esp) /* Adjust vector into the [-256,-1] range */
1da177e4 829 SAVE_ALL
55f327fa 830 TRACE_IRQS_OFF
1da177e4
LT
831 movl %esp,%eax
832 call do_IRQ
833 jmp ret_from_intr
47a55cd7 834ENDPROC(common_interrupt)
fe7cacc1 835 CFI_ENDPROC
1da177e4 836
a00e817f
MH
837/*
838 * Irq entries should be protected against kprobes
839 */
840 .pushsection .kprobes.text, "ax"
02cf94c3 841#define BUILD_INTERRUPT3(name, nr, fn) \
1da177e4 842ENTRY(name) \
fe7cacc1 843 RING0_INT_FRAME; \
df5d1874 844 pushl_cfi $~(nr); \
fe7cacc1 845 SAVE_ALL; \
55f327fa 846 TRACE_IRQS_OFF \
1da177e4 847 movl %esp,%eax; \
02cf94c3 848 call fn; \
55f327fa 849 jmp ret_from_intr; \
47a55cd7
JB
850 CFI_ENDPROC; \
851ENDPROC(name)
1da177e4 852
02cf94c3
TH
853#define BUILD_INTERRUPT(name, nr) BUILD_INTERRUPT3(name, nr, smp_##name)
854
1da177e4 855/* The include is where all of the SMP etc. interrupts come from */
1164dd00 856#include <asm/entry_arch.h>
1da177e4 857
1da177e4 858ENTRY(coprocessor_error)
fe7cacc1 859 RING0_INT_FRAME
df5d1874
JB
860 pushl_cfi $0
861 pushl_cfi $do_coprocessor_error
1da177e4 862 jmp error_code
fe7cacc1 863 CFI_ENDPROC
47a55cd7 864END(coprocessor_error)
1da177e4
LT
865
866ENTRY(simd_coprocessor_error)
fe7cacc1 867 RING0_INT_FRAME
df5d1874 868 pushl_cfi $0
40d2e763
BG
869#ifdef CONFIG_X86_INVD_BUG
870 /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
df5d1874 871661: pushl_cfi $do_general_protection
40d2e763
BG
872662:
873.section .altinstructions,"a"
b4ca46e4 874 altinstruction_entry 661b, 663f, X86_FEATURE_XMM, 662b-661b, 664f-663f
40d2e763
BG
875.previous
876.section .altinstr_replacement,"ax"
877663: pushl $do_simd_coprocessor_error
878664:
879.previous
880#else
df5d1874 881 pushl_cfi $do_simd_coprocessor_error
40d2e763 882#endif
1da177e4 883 jmp error_code
fe7cacc1 884 CFI_ENDPROC
47a55cd7 885END(simd_coprocessor_error)
1da177e4
LT
886
887ENTRY(device_not_available)
fe7cacc1 888 RING0_INT_FRAME
df5d1874
JB
889 pushl_cfi $-1 # mark this as an int
890 pushl_cfi $do_device_not_available
7643e9b9 891 jmp error_code
fe7cacc1 892 CFI_ENDPROC
47a55cd7 893END(device_not_available)
1da177e4 894
d3561b7f
RR
895#ifdef CONFIG_PARAVIRT
896ENTRY(native_iret)
3701d863 897 iret
6837a54d 898 _ASM_EXTABLE(native_iret, iret_exc)
47a55cd7 899END(native_iret)
d3561b7f 900
d75cd22f 901ENTRY(native_irq_enable_sysexit)
d3561b7f
RR
902 sti
903 sysexit
d75cd22f 904END(native_irq_enable_sysexit)
d3561b7f
RR
905#endif
906
1da177e4 907ENTRY(overflow)
fe7cacc1 908 RING0_INT_FRAME
df5d1874
JB
909 pushl_cfi $0
910 pushl_cfi $do_overflow
1da177e4 911 jmp error_code
fe7cacc1 912 CFI_ENDPROC
47a55cd7 913END(overflow)
1da177e4
LT
914
915ENTRY(bounds)
fe7cacc1 916 RING0_INT_FRAME
df5d1874
JB
917 pushl_cfi $0
918 pushl_cfi $do_bounds
1da177e4 919 jmp error_code
fe7cacc1 920 CFI_ENDPROC
47a55cd7 921END(bounds)
1da177e4
LT
922
923ENTRY(invalid_op)
fe7cacc1 924 RING0_INT_FRAME
df5d1874
JB
925 pushl_cfi $0
926 pushl_cfi $do_invalid_op
1da177e4 927 jmp error_code
fe7cacc1 928 CFI_ENDPROC
47a55cd7 929END(invalid_op)
1da177e4
LT
930
931ENTRY(coprocessor_segment_overrun)
fe7cacc1 932 RING0_INT_FRAME
df5d1874
JB
933 pushl_cfi $0
934 pushl_cfi $do_coprocessor_segment_overrun
1da177e4 935 jmp error_code
fe7cacc1 936 CFI_ENDPROC
47a55cd7 937END(coprocessor_segment_overrun)
1da177e4
LT
938
939ENTRY(invalid_TSS)
fe7cacc1 940 RING0_EC_FRAME
df5d1874 941 pushl_cfi $do_invalid_TSS
1da177e4 942 jmp error_code
fe7cacc1 943 CFI_ENDPROC
47a55cd7 944END(invalid_TSS)
1da177e4
LT
945
946ENTRY(segment_not_present)
fe7cacc1 947 RING0_EC_FRAME
df5d1874 948 pushl_cfi $do_segment_not_present
1da177e4 949 jmp error_code
fe7cacc1 950 CFI_ENDPROC
47a55cd7 951END(segment_not_present)
1da177e4
LT
952
953ENTRY(stack_segment)
fe7cacc1 954 RING0_EC_FRAME
df5d1874 955 pushl_cfi $do_stack_segment
1da177e4 956 jmp error_code
fe7cacc1 957 CFI_ENDPROC
47a55cd7 958END(stack_segment)
1da177e4 959
1da177e4 960ENTRY(alignment_check)
fe7cacc1 961 RING0_EC_FRAME
df5d1874 962 pushl_cfi $do_alignment_check
1da177e4 963 jmp error_code
fe7cacc1 964 CFI_ENDPROC
47a55cd7 965END(alignment_check)
1da177e4 966
d28c4393
P
967ENTRY(divide_error)
968 RING0_INT_FRAME
df5d1874
JB
969 pushl_cfi $0 # no error code
970 pushl_cfi $do_divide_error
1da177e4 971 jmp error_code
fe7cacc1 972 CFI_ENDPROC
47a55cd7 973END(divide_error)
1da177e4
LT
974
975#ifdef CONFIG_X86_MCE
976ENTRY(machine_check)
fe7cacc1 977 RING0_INT_FRAME
df5d1874
JB
978 pushl_cfi $0
979 pushl_cfi machine_check_vector
1da177e4 980 jmp error_code
fe7cacc1 981 CFI_ENDPROC
47a55cd7 982END(machine_check)
1da177e4
LT
983#endif
984
985ENTRY(spurious_interrupt_bug)
fe7cacc1 986 RING0_INT_FRAME
df5d1874
JB
987 pushl_cfi $0
988 pushl_cfi $do_spurious_interrupt_bug
1da177e4 989 jmp error_code
fe7cacc1 990 CFI_ENDPROC
47a55cd7 991END(spurious_interrupt_bug)
a00e817f
MH
992/*
993 * End of kprobes section
994 */
995 .popsection
1da177e4 996
02ba1a32
AK
997ENTRY(kernel_thread_helper)
998 pushl $0 # fake return address for unwinder
999 CFI_STARTPROC
e840227c
BG
1000 movl %edi,%eax
1001 call *%esi
02ba1a32 1002 call do_exit
5f5db591 1003 ud2 # padding for call trace
02ba1a32
AK
1004 CFI_ENDPROC
1005ENDPROC(kernel_thread_helper)
1006
5ead97c8 1007#ifdef CONFIG_XEN
e2a81baf
JF
1008/* Xen doesn't set %esp to be precisely what the normal sysenter
1009 entrypoint expects, so fix it up before using the normal path. */
1010ENTRY(xen_sysenter_target)
1011 RING0_INT_FRAME
1012 addl $5*4, %esp /* remove xen-provided frame */
2ddf9b7b 1013 CFI_ADJUST_CFA_OFFSET -5*4
e2a81baf 1014 jmp sysenter_past_esp
557d7d4e 1015 CFI_ENDPROC
e2a81baf 1016
5ead97c8
JF
1017ENTRY(xen_hypervisor_callback)
1018 CFI_STARTPROC
df5d1874 1019 pushl_cfi $0
5ead97c8
JF
1020 SAVE_ALL
1021 TRACE_IRQS_OFF
9ec2b804
JF
1022
1023 /* Check to see if we got the event in the critical
1024 region in xen_iret_direct, after we've reenabled
1025 events and checked for pending events. This simulates
1026 iret instruction's behaviour where it delivers a
1027 pending interrupt when enabling interrupts. */
1028 movl PT_EIP(%esp),%eax
1029 cmpl $xen_iret_start_crit,%eax
1030 jb 1f
1031 cmpl $xen_iret_end_crit,%eax
1032 jae 1f
1033
0f2c8769 1034 jmp xen_iret_crit_fixup
e2a81baf 1035
e2a81baf 1036ENTRY(xen_do_upcall)
b77797fb 10371: mov %esp, %eax
5ead97c8
JF
1038 call xen_evtchn_do_upcall
1039 jmp ret_from_intr
1040 CFI_ENDPROC
1041ENDPROC(xen_hypervisor_callback)
1042
1043# Hypervisor uses this for application faults while it executes.
1044# We get here for two reasons:
1045# 1. Fault while reloading DS, ES, FS or GS
1046# 2. Fault while executing IRET
1047# Category 1 we fix up by reattempting the load, and zeroing the segment
1048# register if the load fails.
1049# Category 2 we fix up by jumping to do_iret_error. We cannot use the
1050# normal Linux return path in this case because if we use the IRET hypercall
1051# to pop the stack frame we end up in an infinite loop of failsafe callbacks.
1052# We distinguish between categories by maintaining a status value in EAX.
1053ENTRY(xen_failsafe_callback)
1054 CFI_STARTPROC
df5d1874 1055 pushl_cfi %eax
5ead97c8
JF
1056 movl $1,%eax
10571: mov 4(%esp),%ds
10582: mov 8(%esp),%es
10593: mov 12(%esp),%fs
10604: mov 16(%esp),%gs
1061 testl %eax,%eax
df5d1874 1062 popl_cfi %eax
5ead97c8
JF
1063 lea 16(%esp),%esp
1064 CFI_ADJUST_CFA_OFFSET -16
1065 jz 5f
1066 addl $16,%esp
1067 jmp iret_exc # EAX != 0 => Category 2 (Bad IRET)
df5d1874 10685: pushl_cfi $0 # EAX == 0 => Category 1 (Bad segment)
5ead97c8
JF
1069 SAVE_ALL
1070 jmp ret_from_exception
1071 CFI_ENDPROC
1072
1073.section .fixup,"ax"
10746: xorl %eax,%eax
1075 movl %eax,4(%esp)
1076 jmp 1b
10777: xorl %eax,%eax
1078 movl %eax,8(%esp)
1079 jmp 2b
10808: xorl %eax,%eax
1081 movl %eax,12(%esp)
1082 jmp 3b
10839: xorl %eax,%eax
1084 movl %eax,16(%esp)
1085 jmp 4b
1086.previous
6837a54d
PA
1087 _ASM_EXTABLE(1b,6b)
1088 _ASM_EXTABLE(2b,7b)
1089 _ASM_EXTABLE(3b,8b)
1090 _ASM_EXTABLE(4b,9b)
5ead97c8
JF
1091ENDPROC(xen_failsafe_callback)
1092
38e20b07
SY
1093BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
1094 xen_evtchn_do_upcall)
1095
5ead97c8
JF
1096#endif /* CONFIG_XEN */
1097
606576ce 1098#ifdef CONFIG_FUNCTION_TRACER
d61f82d0
SR
1099#ifdef CONFIG_DYNAMIC_FTRACE
1100
1101ENTRY(mcount)
d61f82d0
SR
1102 ret
1103END(mcount)
1104
1105ENTRY(ftrace_caller)
60a7ecf4
SR
1106 cmpl $0, function_trace_stop
1107 jne ftrace_stub
1108
d61f82d0
SR
1109 pushl %eax
1110 pushl %ecx
1111 pushl %edx
08f6fba5
SR
1112 pushl $0 /* Pass NULL as regs pointer */
1113 movl 4*4(%esp), %eax
d61f82d0 1114 movl 0x4(%ebp), %edx
28fb5dfa 1115 leal function_trace_op, %ecx
395a59d0 1116 subl $MCOUNT_INSN_SIZE, %eax
d61f82d0
SR
1117
1118.globl ftrace_call
1119ftrace_call:
1120 call ftrace_stub
1121
08f6fba5 1122 addl $4,%esp /* skip NULL pointer */
d61f82d0
SR
1123 popl %edx
1124 popl %ecx
1125 popl %eax
4de72395 1126ftrace_ret:
5a45cfe1
SR
1127#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1128.globl ftrace_graph_call
1129ftrace_graph_call:
1130 jmp ftrace_stub
1131#endif
d61f82d0
SR
1132
1133.globl ftrace_stub
1134ftrace_stub:
1135 ret
1136END(ftrace_caller)
1137
4de72395
SR
1138ENTRY(ftrace_regs_caller)
1139 pushf /* push flags before compare (in cs location) */
1140 cmpl $0, function_trace_stop
1141 jne ftrace_restore_flags
1142
1143 /*
1144 * i386 does not save SS and ESP when coming from kernel.
1145 * Instead, to get sp, &regs->sp is used (see ptrace.h).
1146 * Unfortunately, that means eflags must be at the same location
1147 * as the current return ip is. We move the return ip into the
1148 * ip location, and move flags into the return ip location.
1149 */
1150 pushl 4(%esp) /* save return ip into ip slot */
1151 subl $MCOUNT_INSN_SIZE, (%esp) /* Adjust ip */
1152
1153 pushl $0 /* Load 0 into orig_ax */
1154 pushl %gs
1155 pushl %fs
1156 pushl %es
1157 pushl %ds
1158 pushl %eax
1159 pushl %ebp
1160 pushl %edi
1161 pushl %esi
1162 pushl %edx
1163 pushl %ecx
1164 pushl %ebx
1165
1166 movl 13*4(%esp), %eax /* Get the saved flags */
1167 movl %eax, 14*4(%esp) /* Move saved flags into regs->flags location */
1168 /* clobbering return ip */
1169 movl $__KERNEL_CS,13*4(%esp)
1170
1171 movl 12*4(%esp), %eax /* Load ip (1st parameter) */
e4ea3f6b 1172 movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */
4de72395 1173 leal function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */
e4ea3f6b 1174 pushl %esp /* Save pt_regs as 4th parameter */
4de72395
SR
1175
1176GLOBAL(ftrace_regs_call)
1177 call ftrace_stub
1178
1179 addl $4, %esp /* Skip pt_regs */
1180 movl 14*4(%esp), %eax /* Move flags back into cs */
1181 movl %eax, 13*4(%esp) /* Needed to keep addl from modifying flags */
1182 movl 12*4(%esp), %eax /* Get return ip from regs->ip */
1183 addl $MCOUNT_INSN_SIZE, %eax
1184 movl %eax, 14*4(%esp) /* Put return ip back for ret */
1185
1186 popl %ebx
1187 popl %ecx
1188 popl %edx
1189 popl %esi
1190 popl %edi
1191 popl %ebp
1192 popl %eax
1193 popl %ds
1194 popl %es
1195 popl %fs
1196 popl %gs
1197 addl $8, %esp /* Skip orig_ax and ip */
1198 popf /* Pop flags at end (no addl to corrupt flags) */
1199 jmp ftrace_ret
1200
1201ftrace_restore_flags:
1202 popf
1203 jmp ftrace_stub
d61f82d0
SR
1204#else /* ! CONFIG_DYNAMIC_FTRACE */
1205
16444a8a 1206ENTRY(mcount)
60a7ecf4
SR
1207 cmpl $0, function_trace_stop
1208 jne ftrace_stub
1209
16444a8a
ACM
1210 cmpl $ftrace_stub, ftrace_trace_function
1211 jnz trace
fb52607a 1212#ifdef CONFIG_FUNCTION_GRAPH_TRACER
c2324b69 1213 cmpl $ftrace_stub, ftrace_graph_return
fb52607a 1214 jnz ftrace_graph_caller
e49dc19c
SR
1215
1216 cmpl $ftrace_graph_entry_stub, ftrace_graph_entry
1217 jnz ftrace_graph_caller
caf4b323 1218#endif
16444a8a
ACM
1219.globl ftrace_stub
1220ftrace_stub:
1221 ret
1222
1223 /* taken from glibc */
1224trace:
1225 pushl %eax
1226 pushl %ecx
1227 pushl %edx
1228 movl 0xc(%esp), %eax
1229 movl 0x4(%ebp), %edx
395a59d0 1230 subl $MCOUNT_INSN_SIZE, %eax
16444a8a 1231
d61f82d0 1232 call *ftrace_trace_function
16444a8a
ACM
1233
1234 popl %edx
1235 popl %ecx
1236 popl %eax
16444a8a
ACM
1237 jmp ftrace_stub
1238END(mcount)
d61f82d0 1239#endif /* CONFIG_DYNAMIC_FTRACE */
606576ce 1240#endif /* CONFIG_FUNCTION_TRACER */
16444a8a 1241
fb52607a
FW
1242#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1243ENTRY(ftrace_graph_caller)
e7d3737e
FW
1244 cmpl $0, function_trace_stop
1245 jne ftrace_stub
1246
caf4b323
FW
1247 pushl %eax
1248 pushl %ecx
1249 pushl %edx
1dc1c6ad 1250 movl 0xc(%esp), %edx
caf4b323 1251 lea 0x4(%ebp), %eax
71e308a2 1252 movl (%ebp), %ecx
bb4304c7 1253 subl $MCOUNT_INSN_SIZE, %edx
caf4b323 1254 call prepare_ftrace_return
caf4b323
FW
1255 popl %edx
1256 popl %ecx
1257 popl %eax
e7d3737e 1258 ret
fb52607a 1259END(ftrace_graph_caller)
caf4b323
FW
1260
1261.globl return_to_handler
1262return_to_handler:
caf4b323 1263 pushl %eax
caf4b323 1264 pushl %edx
71e308a2 1265 movl %ebp, %eax
caf4b323 1266 call ftrace_return_to_handler
194ec341 1267 movl %eax, %ecx
caf4b323 1268 popl %edx
caf4b323 1269 popl %eax
194ec341 1270 jmp *%ecx
e7d3737e 1271#endif
16444a8a 1272
d211af05
AH
1273/*
1274 * Some functions should be protected against kprobes
1275 */
1276 .pushsection .kprobes.text, "ax"
1277
1278ENTRY(page_fault)
1279 RING0_EC_FRAME
df5d1874 1280 pushl_cfi $do_page_fault
d211af05
AH
1281 ALIGN
1282error_code:
ccbeed3a 1283 /* the function address is in %gs's slot on the stack */
df5d1874 1284 pushl_cfi %fs
ccbeed3a 1285 /*CFI_REL_OFFSET fs, 0*/
df5d1874 1286 pushl_cfi %es
d211af05 1287 /*CFI_REL_OFFSET es, 0*/
df5d1874 1288 pushl_cfi %ds
d211af05 1289 /*CFI_REL_OFFSET ds, 0*/
df5d1874 1290 pushl_cfi %eax
d211af05 1291 CFI_REL_OFFSET eax, 0
df5d1874 1292 pushl_cfi %ebp
d211af05 1293 CFI_REL_OFFSET ebp, 0
df5d1874 1294 pushl_cfi %edi
d211af05 1295 CFI_REL_OFFSET edi, 0
df5d1874 1296 pushl_cfi %esi
d211af05 1297 CFI_REL_OFFSET esi, 0
df5d1874 1298 pushl_cfi %edx
d211af05 1299 CFI_REL_OFFSET edx, 0
df5d1874 1300 pushl_cfi %ecx
d211af05 1301 CFI_REL_OFFSET ecx, 0
df5d1874 1302 pushl_cfi %ebx
d211af05
AH
1303 CFI_REL_OFFSET ebx, 0
1304 cld
d211af05
AH
1305 movl $(__KERNEL_PERCPU), %ecx
1306 movl %ecx, %fs
1307 UNWIND_ESPFIX_STACK
ccbeed3a
TH
1308 GS_TO_REG %ecx
1309 movl PT_GS(%esp), %edi # get the function address
d211af05
AH
1310 movl PT_ORIG_EAX(%esp), %edx # get the error code
1311 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
ccbeed3a
TH
1312 REG_TO_PTGS %ecx
1313 SET_KERNEL_GS %ecx
d211af05
AH
1314 movl $(__USER_DS), %ecx
1315 movl %ecx, %ds
1316 movl %ecx, %es
1317 TRACE_IRQS_OFF
1318 movl %esp,%eax # pt_regs pointer
1319 call *%edi
1320 jmp ret_from_exception
1321 CFI_ENDPROC
1322END(page_fault)
1323
1324/*
1325 * Debug traps and NMI can happen at the one SYSENTER instruction
1326 * that sets up the real kernel stack. Check here, since we can't
1327 * allow the wrong stack to be used.
1328 *
1329 * "TSS_sysenter_sp0+12" is because the NMI/debug handler will have
1330 * already pushed 3 words if it hits on the sysenter instruction:
1331 * eflags, cs and eip.
1332 *
1333 * We just load the right stack, and push the three (known) values
1334 * by hand onto the new stack - while updating the return eip past
1335 * the instruction that would have done it for sysenter.
1336 */
f0d96110
TH
1337.macro FIX_STACK offset ok label
1338 cmpw $__KERNEL_CS, 4(%esp)
1339 jne \ok
1340\label:
1341 movl TSS_sysenter_sp0 + \offset(%esp), %esp
1342 CFI_DEF_CFA esp, 0
1343 CFI_UNDEFINED eip
df5d1874
JB
1344 pushfl_cfi
1345 pushl_cfi $__KERNEL_CS
1346 pushl_cfi $sysenter_past_esp
d211af05 1347 CFI_REL_OFFSET eip, 0
f0d96110 1348.endm
d211af05
AH
1349
1350ENTRY(debug)
1351 RING0_INT_FRAME
1352 cmpl $ia32_sysenter_target,(%esp)
1353 jne debug_stack_correct
f0d96110 1354 FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn
d211af05 1355debug_stack_correct:
df5d1874 1356 pushl_cfi $-1 # mark this as an int
d211af05
AH
1357 SAVE_ALL
1358 TRACE_IRQS_OFF
1359 xorl %edx,%edx # error code 0
1360 movl %esp,%eax # pt_regs pointer
1361 call do_debug
1362 jmp ret_from_exception
1363 CFI_ENDPROC
1364END(debug)
1365
1366/*
1367 * NMI is doubly nasty. It can happen _while_ we're handling
1368 * a debug fault, and the debug fault hasn't yet been able to
1369 * clear up the stack. So we first check whether we got an
1370 * NMI on the sysenter entry path, but after that we need to
1371 * check whether we got an NMI on the debug path where the debug
1372 * fault happened on the sysenter path.
1373 */
1374ENTRY(nmi)
1375 RING0_INT_FRAME
df5d1874 1376 pushl_cfi %eax
d211af05
AH
1377 movl %ss, %eax
1378 cmpw $__ESPFIX_SS, %ax
df5d1874 1379 popl_cfi %eax
d211af05
AH
1380 je nmi_espfix_stack
1381 cmpl $ia32_sysenter_target,(%esp)
1382 je nmi_stack_fixup
df5d1874 1383 pushl_cfi %eax
d211af05
AH
1384 movl %esp,%eax
1385 /* Do not access memory above the end of our stack page,
1386 * it might not exist.
1387 */
1388 andl $(THREAD_SIZE-1),%eax
1389 cmpl $(THREAD_SIZE-20),%eax
df5d1874 1390 popl_cfi %eax
d211af05
AH
1391 jae nmi_stack_correct
1392 cmpl $ia32_sysenter_target,12(%esp)
1393 je nmi_debug_stack_check
1394nmi_stack_correct:
1395 /* We have a RING0_INT_FRAME here */
df5d1874 1396 pushl_cfi %eax
d211af05 1397 SAVE_ALL
d211af05
AH
1398 xorl %edx,%edx # zero error code
1399 movl %esp,%eax # pt_regs pointer
1400 call do_nmi
2e04bc76 1401 jmp restore_all_notrace
d211af05
AH
1402 CFI_ENDPROC
1403
1404nmi_stack_fixup:
1405 RING0_INT_FRAME
f0d96110 1406 FIX_STACK 12, nmi_stack_correct, 1
d211af05
AH
1407 jmp nmi_stack_correct
1408
1409nmi_debug_stack_check:
1410 /* We have a RING0_INT_FRAME here */
1411 cmpw $__KERNEL_CS,16(%esp)
1412 jne nmi_stack_correct
1413 cmpl $debug,(%esp)
1414 jb nmi_stack_correct
1415 cmpl $debug_esp_fix_insn,(%esp)
1416 ja nmi_stack_correct
f0d96110 1417 FIX_STACK 24, nmi_stack_correct, 1
d211af05
AH
1418 jmp nmi_stack_correct
1419
1420nmi_espfix_stack:
1421 /* We have a RING0_INT_FRAME here.
1422 *
1423 * create the pointer to lss back
1424 */
df5d1874
JB
1425 pushl_cfi %ss
1426 pushl_cfi %esp
bda3a897 1427 addl $4, (%esp)
d211af05
AH
1428 /* copy the iret frame of 12 bytes */
1429 .rept 3
df5d1874 1430 pushl_cfi 16(%esp)
d211af05 1431 .endr
df5d1874 1432 pushl_cfi %eax
d211af05 1433 SAVE_ALL
d211af05
AH
1434 FIXUP_ESPFIX_STACK # %eax == %esp
1435 xorl %edx,%edx # zero error code
1436 call do_nmi
1437 RESTORE_REGS
1438 lss 12+4(%esp), %esp # back to espfix stack
1439 CFI_ADJUST_CFA_OFFSET -24
1440 jmp irq_return
1441 CFI_ENDPROC
1442END(nmi)
1443
1444ENTRY(int3)
1445 RING0_INT_FRAME
df5d1874 1446 pushl_cfi $-1 # mark this as an int
d211af05
AH
1447 SAVE_ALL
1448 TRACE_IRQS_OFF
1449 xorl %edx,%edx # zero error code
1450 movl %esp,%eax # pt_regs pointer
1451 call do_int3
1452 jmp ret_from_exception
1453 CFI_ENDPROC
1454END(int3)
1455
1456ENTRY(general_protection)
1457 RING0_EC_FRAME
df5d1874 1458 pushl_cfi $do_general_protection
d211af05
AH
1459 jmp error_code
1460 CFI_ENDPROC
1461END(general_protection)
1462
631bc487
GN
1463#ifdef CONFIG_KVM_GUEST
1464ENTRY(async_page_fault)
1465 RING0_EC_FRAME
60cf637a 1466 pushl_cfi $do_async_page_fault
631bc487
GN
1467 jmp error_code
1468 CFI_ENDPROC
2ae9d293 1469END(async_page_fault)
631bc487
GN
1470#endif
1471
d211af05
AH
1472/*
1473 * End of kprobes section
1474 */
1475 .popsection
This page took 0.73747 seconds and 5 git commands to generate.