[PATCH] x86-64: miscellaneous entry.S adjustments
[deliverable/linux.git] / arch / i386 / kernel / entry.S
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/i386/entry.S
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7/*
8 * entry.S contains the system-call and fault low-level handling routines.
9 * This also contains the timer-interrupt handler, as well as all interrupts
10 * and faults that can result in a task-switch.
11 *
12 * NOTE: This code handles signal-recognition, which happens every time
13 * after a timer-interrupt and after each system call.
14 *
15 * I changed all the .align's to 4 (16 byte alignment), as that's faster
16 * on a 486.
17 *
18 * Stack layout in 'ret_from_system_call':
19 * ptrace needs to have all regs on the stack.
20 * if the order here is changed, it needs to be
21 * updated in fork.c:copy_process, signal.c:do_signal,
22 * ptrace.c and ptrace.h
23 *
24 * 0(%esp) - %ebx
25 * 4(%esp) - %ecx
26 * 8(%esp) - %edx
27 * C(%esp) - %esi
28 * 10(%esp) - %edi
29 * 14(%esp) - %ebp
30 * 18(%esp) - %eax
31 * 1C(%esp) - %ds
32 * 20(%esp) - %es
33 * 24(%esp) - orig_eax
34 * 28(%esp) - %eip
35 * 2C(%esp) - %cs
36 * 30(%esp) - %eflags
37 * 34(%esp) - %oldesp
38 * 38(%esp) - %oldss
39 *
40 * "current" is in register %ebx during any slow entries.
41 */
42
1da177e4
LT
43#include <linux/linkage.h>
44#include <asm/thread_info.h>
55f327fa 45#include <asm/irqflags.h>
1da177e4
LT
46#include <asm/errno.h>
47#include <asm/segment.h>
48#include <asm/smp.h>
49#include <asm/page.h>
50#include <asm/desc.h>
be44d2aa 51#include <asm/percpu.h>
fe7cacc1 52#include <asm/dwarf2.h>
1da177e4
LT
53#include "irq_vectors.h"
54
55#define nr_syscalls ((syscall_table_size)/4)
56
57EBX = 0x00
58ECX = 0x04
59EDX = 0x08
60ESI = 0x0C
61EDI = 0x10
62EBP = 0x14
63EAX = 0x18
64DS = 0x1C
65ES = 0x20
66ORIG_EAX = 0x24
67EIP = 0x28
68CS = 0x2C
69EFLAGS = 0x30
70OLDESP = 0x34
71OLDSS = 0x38
72
73CF_MASK = 0x00000001
74TF_MASK = 0x00000100
75IF_MASK = 0x00000200
76DF_MASK = 0x00000400
77NT_MASK = 0x00004000
78VM_MASK = 0x00020000
79
0da5db31
RR
80/* These are replaces for paravirtualization */
81#define DISABLE_INTERRUPTS cli
82#define ENABLE_INTERRUPTS sti
83#define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
84#define INTERRUPT_RETURN iret
85#define GET_CR0_INTO_EAX movl %cr0, %eax
86
1da177e4 87#ifdef CONFIG_PREEMPT
0da5db31 88#define preempt_stop DISABLE_INTERRUPTS; TRACE_IRQS_OFF
1da177e4
LT
89#else
90#define preempt_stop
91#define resume_kernel restore_nocheck
92#endif
93
55f327fa
IM
94.macro TRACE_IRQS_IRET
95#ifdef CONFIG_TRACE_IRQFLAGS
96 testl $IF_MASK,EFLAGS(%esp) # interrupts off?
97 jz 1f
98 TRACE_IRQS_ON
991:
100#endif
101.endm
102
4031ff38
AG
103#ifdef CONFIG_VM86
104#define resume_userspace_sig check_userspace
105#else
106#define resume_userspace_sig resume_userspace
107#endif
108
1da177e4
LT
109#define SAVE_ALL \
110 cld; \
111 pushl %es; \
fe7cacc1
JB
112 CFI_ADJUST_CFA_OFFSET 4;\
113 /*CFI_REL_OFFSET es, 0;*/\
1da177e4 114 pushl %ds; \
fe7cacc1
JB
115 CFI_ADJUST_CFA_OFFSET 4;\
116 /*CFI_REL_OFFSET ds, 0;*/\
1da177e4 117 pushl %eax; \
fe7cacc1
JB
118 CFI_ADJUST_CFA_OFFSET 4;\
119 CFI_REL_OFFSET eax, 0;\
1da177e4 120 pushl %ebp; \
fe7cacc1
JB
121 CFI_ADJUST_CFA_OFFSET 4;\
122 CFI_REL_OFFSET ebp, 0;\
1da177e4 123 pushl %edi; \
fe7cacc1
JB
124 CFI_ADJUST_CFA_OFFSET 4;\
125 CFI_REL_OFFSET edi, 0;\
1da177e4 126 pushl %esi; \
fe7cacc1
JB
127 CFI_ADJUST_CFA_OFFSET 4;\
128 CFI_REL_OFFSET esi, 0;\
1da177e4 129 pushl %edx; \
fe7cacc1
JB
130 CFI_ADJUST_CFA_OFFSET 4;\
131 CFI_REL_OFFSET edx, 0;\
1da177e4 132 pushl %ecx; \
fe7cacc1
JB
133 CFI_ADJUST_CFA_OFFSET 4;\
134 CFI_REL_OFFSET ecx, 0;\
1da177e4 135 pushl %ebx; \
fe7cacc1
JB
136 CFI_ADJUST_CFA_OFFSET 4;\
137 CFI_REL_OFFSET ebx, 0;\
1da177e4
LT
138 movl $(__USER_DS), %edx; \
139 movl %edx, %ds; \
140 movl %edx, %es;
141
142#define RESTORE_INT_REGS \
143 popl %ebx; \
fe7cacc1
JB
144 CFI_ADJUST_CFA_OFFSET -4;\
145 CFI_RESTORE ebx;\
1da177e4 146 popl %ecx; \
fe7cacc1
JB
147 CFI_ADJUST_CFA_OFFSET -4;\
148 CFI_RESTORE ecx;\
1da177e4 149 popl %edx; \
fe7cacc1
JB
150 CFI_ADJUST_CFA_OFFSET -4;\
151 CFI_RESTORE edx;\
1da177e4 152 popl %esi; \
fe7cacc1
JB
153 CFI_ADJUST_CFA_OFFSET -4;\
154 CFI_RESTORE esi;\
1da177e4 155 popl %edi; \
fe7cacc1
JB
156 CFI_ADJUST_CFA_OFFSET -4;\
157 CFI_RESTORE edi;\
1da177e4 158 popl %ebp; \
fe7cacc1
JB
159 CFI_ADJUST_CFA_OFFSET -4;\
160 CFI_RESTORE ebp;\
161 popl %eax; \
162 CFI_ADJUST_CFA_OFFSET -4;\
163 CFI_RESTORE eax
1da177e4
LT
164
165#define RESTORE_REGS \
166 RESTORE_INT_REGS; \
1671: popl %ds; \
fe7cacc1
JB
168 CFI_ADJUST_CFA_OFFSET -4;\
169 /*CFI_RESTORE ds;*/\
1da177e4 1702: popl %es; \
fe7cacc1
JB
171 CFI_ADJUST_CFA_OFFSET -4;\
172 /*CFI_RESTORE es;*/\
1da177e4
LT
173.section .fixup,"ax"; \
1743: movl $0,(%esp); \
175 jmp 1b; \
1764: movl $0,(%esp); \
177 jmp 2b; \
178.previous; \
179.section __ex_table,"a";\
180 .align 4; \
181 .long 1b,3b; \
182 .long 2b,4b; \
183.previous
184
fe7cacc1
JB
185#define RING0_INT_FRAME \
186 CFI_STARTPROC simple;\
adf14236 187 CFI_SIGNAL_FRAME;\
fe7cacc1
JB
188 CFI_DEF_CFA esp, 3*4;\
189 /*CFI_OFFSET cs, -2*4;*/\
190 CFI_OFFSET eip, -3*4
191
192#define RING0_EC_FRAME \
193 CFI_STARTPROC simple;\
adf14236 194 CFI_SIGNAL_FRAME;\
fe7cacc1
JB
195 CFI_DEF_CFA esp, 4*4;\
196 /*CFI_OFFSET cs, -2*4;*/\
197 CFI_OFFSET eip, -3*4
198
199#define RING0_PTREGS_FRAME \
200 CFI_STARTPROC simple;\
adf14236 201 CFI_SIGNAL_FRAME;\
fe7cacc1
JB
202 CFI_DEF_CFA esp, OLDESP-EBX;\
203 /*CFI_OFFSET cs, CS-OLDESP;*/\
204 CFI_OFFSET eip, EIP-OLDESP;\
205 /*CFI_OFFSET es, ES-OLDESP;*/\
206 /*CFI_OFFSET ds, DS-OLDESP;*/\
207 CFI_OFFSET eax, EAX-OLDESP;\
208 CFI_OFFSET ebp, EBP-OLDESP;\
209 CFI_OFFSET edi, EDI-OLDESP;\
210 CFI_OFFSET esi, ESI-OLDESP;\
211 CFI_OFFSET edx, EDX-OLDESP;\
212 CFI_OFFSET ecx, ECX-OLDESP;\
213 CFI_OFFSET ebx, EBX-OLDESP
1da177e4
LT
214
215ENTRY(ret_from_fork)
fe7cacc1 216 CFI_STARTPROC
1da177e4 217 pushl %eax
25d7dfda 218 CFI_ADJUST_CFA_OFFSET 4
1da177e4
LT
219 call schedule_tail
220 GET_THREAD_INFO(%ebp)
221 popl %eax
fe7cacc1 222 CFI_ADJUST_CFA_OFFSET -4
47a5c6fa
LT
223 pushl $0x0202 # Reset kernel eflags
224 CFI_ADJUST_CFA_OFFSET 4
225 popfl
226 CFI_ADJUST_CFA_OFFSET -4
1da177e4 227 jmp syscall_exit
fe7cacc1 228 CFI_ENDPROC
1da177e4
LT
229
230/*
231 * Return to user mode is not as complex as all this looks,
232 * but we want the default path for a system call return to
233 * go as quickly as possible which is why some of this is
234 * less clear than it otherwise should be.
235 */
236
237 # userspace resumption stub bypassing syscall exit tracing
238 ALIGN
fe7cacc1 239 RING0_PTREGS_FRAME
1da177e4
LT
240ret_from_exception:
241 preempt_stop
242ret_from_intr:
243 GET_THREAD_INFO(%ebp)
4031ff38 244check_userspace:
1da177e4
LT
245 movl EFLAGS(%esp), %eax # mix EFLAGS and CS
246 movb CS(%esp), %al
78be3706
RR
247 andl $(VM_MASK | SEGMENT_RPL_MASK), %eax
248 cmpl $USER_RPL, %eax
249 jb resume_kernel # not returning to v8086 or userspace
1da177e4 250ENTRY(resume_userspace)
0da5db31 251 DISABLE_INTERRUPTS # make sure we don't miss an interrupt
1da177e4
LT
252 # setting need_resched or sigpending
253 # between sampling and the iret
254 movl TI_flags(%ebp), %ecx
255 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
256 # int/exception return?
257 jne work_pending
258 jmp restore_all
259
260#ifdef CONFIG_PREEMPT
261ENTRY(resume_kernel)
0da5db31 262 DISABLE_INTERRUPTS
1da177e4
LT
263 cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
264 jnz restore_nocheck
265need_resched:
266 movl TI_flags(%ebp), %ecx # need_resched set ?
267 testb $_TIF_NEED_RESCHED, %cl
268 jz restore_all
269 testl $IF_MASK,EFLAGS(%esp) # interrupts off (exception path) ?
270 jz restore_all
271 call preempt_schedule_irq
272 jmp need_resched
273#endif
fe7cacc1 274 CFI_ENDPROC
1da177e4
LT
275
276/* SYSENTER_RETURN points to after the "sysenter" instruction in
277 the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
278
279 # sysenter call handler stub
280ENTRY(sysenter_entry)
fe7cacc1 281 CFI_STARTPROC simple
adf14236 282 CFI_SIGNAL_FRAME
fe7cacc1
JB
283 CFI_DEF_CFA esp, 0
284 CFI_REGISTER esp, ebp
1da177e4
LT
285 movl TSS_sysenter_esp0(%esp),%esp
286sysenter_past_esp:
55f327fa
IM
287 /*
288 * No need to follow this irqs on/off section: the syscall
289 * disabled irqs and here we enable it straight after entry:
290 */
0da5db31 291 ENABLE_INTERRUPTS
1da177e4 292 pushl $(__USER_DS)
fe7cacc1
JB
293 CFI_ADJUST_CFA_OFFSET 4
294 /*CFI_REL_OFFSET ss, 0*/
1da177e4 295 pushl %ebp
fe7cacc1
JB
296 CFI_ADJUST_CFA_OFFSET 4
297 CFI_REL_OFFSET esp, 0
1da177e4 298 pushfl
fe7cacc1 299 CFI_ADJUST_CFA_OFFSET 4
1da177e4 300 pushl $(__USER_CS)
fe7cacc1
JB
301 CFI_ADJUST_CFA_OFFSET 4
302 /*CFI_REL_OFFSET cs, 0*/
e6e5494c
IM
303 /*
304 * Push current_thread_info()->sysenter_return to the stack.
305 * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
306 * pushed above; +8 corresponds to copy_thread's esp0 setting.
307 */
308 pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
fe7cacc1
JB
309 CFI_ADJUST_CFA_OFFSET 4
310 CFI_REL_OFFSET eip, 0
1da177e4
LT
311
312/*
313 * Load the potential sixth argument from user stack.
314 * Careful about security.
315 */
316 cmpl $__PAGE_OFFSET-3,%ebp
317 jae syscall_fault
3181: movl (%ebp),%ebp
319.section __ex_table,"a"
320 .align 4
321 .long 1b,syscall_fault
322.previous
323
324 pushl %eax
fe7cacc1 325 CFI_ADJUST_CFA_OFFSET 4
1da177e4
LT
326 SAVE_ALL
327 GET_THREAD_INFO(%ebp)
328
329 /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
ed75e8d5 330 testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
1da177e4
LT
331 jnz syscall_trace_entry
332 cmpl $(nr_syscalls), %eax
333 jae syscall_badsys
334 call *sys_call_table(,%eax,4)
335 movl %eax,EAX(%esp)
0da5db31 336 DISABLE_INTERRUPTS
55f327fa 337 TRACE_IRQS_OFF
1da177e4
LT
338 movl TI_flags(%ebp), %ecx
339 testw $_TIF_ALLWORK_MASK, %cx
340 jne syscall_exit_work
341/* if something modifies registers it must also disable sysexit */
342 movl EIP(%esp), %edx
343 movl OLDESP(%esp), %ecx
344 xorl %ebp,%ebp
55f327fa 345 TRACE_IRQS_ON
0da5db31 346 ENABLE_INTERRUPTS_SYSEXIT
fe7cacc1 347 CFI_ENDPROC
1da177e4
LT
348
349
350 # system call handler stub
351ENTRY(system_call)
fe7cacc1 352 RING0_INT_FRAME # can't unwind into user space anyway
1da177e4 353 pushl %eax # save orig_eax
fe7cacc1 354 CFI_ADJUST_CFA_OFFSET 4
1da177e4
LT
355 SAVE_ALL
356 GET_THREAD_INFO(%ebp)
635cf99a
CE
357 testl $TF_MASK,EFLAGS(%esp)
358 jz no_singlestep
359 orl $_TIF_SINGLESTEP,TI_flags(%ebp)
360no_singlestep:
ed75e8d5 361 # system call tracing in operation / emulation
1da177e4 362 /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
ed75e8d5 363 testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
1da177e4
LT
364 jnz syscall_trace_entry
365 cmpl $(nr_syscalls), %eax
366 jae syscall_badsys
367syscall_call:
368 call *sys_call_table(,%eax,4)
369 movl %eax,EAX(%esp) # store the return value
370syscall_exit:
0da5db31 371 DISABLE_INTERRUPTS # make sure we don't miss an interrupt
1da177e4
LT
372 # setting need_resched or sigpending
373 # between sampling and the iret
55f327fa 374 TRACE_IRQS_OFF
1da177e4
LT
375 movl TI_flags(%ebp), %ecx
376 testw $_TIF_ALLWORK_MASK, %cx # current->work
377 jne syscall_exit_work
378
379restore_all:
380 movl EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
5df24082
SS
381 # Warning: OLDSS(%esp) contains the wrong/random values if we
382 # are returning to the kernel.
383 # See comments in process.c:copy_thread() for details.
1da177e4
LT
384 movb OLDSS(%esp), %ah
385 movb CS(%esp), %al
78be3706
RR
386 andl $(VM_MASK | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
387 cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
fe7cacc1 388 CFI_REMEMBER_STATE
1da177e4
LT
389 je ldt_ss # returning to user-space with LDT SS
390restore_nocheck:
55f327fa
IM
391 TRACE_IRQS_IRET
392restore_nocheck_notrace:
1da177e4
LT
393 RESTORE_REGS
394 addl $4, %esp
fe7cacc1 395 CFI_ADJUST_CFA_OFFSET -4
0da5db31 3961: INTERRUPT_RETURN
1da177e4
LT
397.section .fixup,"ax"
398iret_exc:
55f327fa 399 TRACE_IRQS_ON
0da5db31 400 ENABLE_INTERRUPTS
a879cbbb
LT
401 pushl $0 # no error code
402 pushl $do_iret_error
403 jmp error_code
1da177e4
LT
404.previous
405.section __ex_table,"a"
406 .align 4
407 .long 1b,iret_exc
408.previous
409
fe7cacc1 410 CFI_RESTORE_STATE
1da177e4
LT
411ldt_ss:
412 larl OLDSS(%esp), %eax
413 jnz restore_nocheck
414 testl $0x00400000, %eax # returning to 32bit stack?
415 jnz restore_nocheck # allright, normal return
416 /* If returning to userspace with 16bit stack,
417 * try to fix the higher word of ESP, as the CPU
418 * won't restore it.
419 * This is an "official" bug of all the x86-compatible
420 * CPUs, which we can try to work around to make
421 * dosemu and wine happy. */
be44d2aa
SS
422 movl OLDESP(%esp), %eax
423 movl %esp, %edx
424 call patch_espfix_desc
425 pushl $__ESPFIX_SS
426 CFI_ADJUST_CFA_OFFSET 4
427 pushl %eax
428 CFI_ADJUST_CFA_OFFSET 4
0da5db31 429 DISABLE_INTERRUPTS
55f327fa 430 TRACE_IRQS_OFF
be44d2aa
SS
431 lss (%esp), %esp
432 CFI_ADJUST_CFA_OFFSET -8
433 jmp restore_nocheck
fe7cacc1 434 CFI_ENDPROC
1da177e4
LT
435
436 # perform work that needs to be done immediately before resumption
437 ALIGN
fe7cacc1 438 RING0_PTREGS_FRAME # can't unwind into user space anyway
1da177e4
LT
439work_pending:
440 testb $_TIF_NEED_RESCHED, %cl
441 jz work_notifysig
442work_resched:
443 call schedule
0da5db31 444 DISABLE_INTERRUPTS # make sure we don't miss an interrupt
1da177e4
LT
445 # setting need_resched or sigpending
446 # between sampling and the iret
55f327fa 447 TRACE_IRQS_OFF
1da177e4
LT
448 movl TI_flags(%ebp), %ecx
449 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
450 # than syscall tracing?
451 jz restore_all
452 testb $_TIF_NEED_RESCHED, %cl
453 jnz work_resched
454
455work_notifysig: # deal with pending signals and
456 # notify-resume requests
457 testl $VM_MASK, EFLAGS(%esp)
458 movl %esp, %eax
459 jne work_notifysig_v86 # returning to kernel-space or
460 # vm86-space
461 xorl %edx, %edx
462 call do_notify_resume
4031ff38 463 jmp resume_userspace_sig
1da177e4
LT
464
465 ALIGN
466work_notifysig_v86:
64ca9004 467#ifdef CONFIG_VM86
1da177e4 468 pushl %ecx # save ti_flags for do_notify_resume
fe7cacc1 469 CFI_ADJUST_CFA_OFFSET 4
1da177e4
LT
470 call save_v86_state # %eax contains pt_regs pointer
471 popl %ecx
fe7cacc1 472 CFI_ADJUST_CFA_OFFSET -4
1da177e4
LT
473 movl %eax, %esp
474 xorl %edx, %edx
475 call do_notify_resume
4031ff38 476 jmp resume_userspace_sig
64ca9004 477#endif
1da177e4
LT
478
479 # perform syscall exit tracing
480 ALIGN
481syscall_trace_entry:
482 movl $-ENOSYS,EAX(%esp)
483 movl %esp, %eax
484 xorl %edx,%edx
485 call do_syscall_trace
ed75e8d5 486 cmpl $0, %eax
640aa46e 487 jne resume_userspace # ret != 0 -> running under PTRACE_SYSEMU,
ed75e8d5 488 # so must skip actual syscall
1da177e4
LT
489 movl ORIG_EAX(%esp), %eax
490 cmpl $(nr_syscalls), %eax
491 jnae syscall_call
492 jmp syscall_exit
493
494 # perform syscall exit tracing
495 ALIGN
496syscall_exit_work:
497 testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl
498 jz work_pending
55f327fa 499 TRACE_IRQS_ON
0da5db31 500 ENABLE_INTERRUPTS # could let do_syscall_trace() call
1da177e4
LT
501 # schedule() instead
502 movl %esp, %eax
503 movl $1, %edx
504 call do_syscall_trace
505 jmp resume_userspace
fe7cacc1 506 CFI_ENDPROC
1da177e4 507
fe7cacc1 508 RING0_INT_FRAME # can't unwind into user space anyway
1da177e4
LT
509syscall_fault:
510 pushl %eax # save orig_eax
fe7cacc1 511 CFI_ADJUST_CFA_OFFSET 4
1da177e4
LT
512 SAVE_ALL
513 GET_THREAD_INFO(%ebp)
514 movl $-EFAULT,EAX(%esp)
515 jmp resume_userspace
516
1da177e4
LT
517syscall_badsys:
518 movl $-ENOSYS,EAX(%esp)
519 jmp resume_userspace
fe7cacc1 520 CFI_ENDPROC
1da177e4
LT
521
522#define FIXUP_ESPFIX_STACK \
be44d2aa
SS
523 /* since we are on a wrong stack, we cant make it a C code :( */ \
524 GET_THREAD_INFO(%ebp); \
525 movl TI_cpu(%ebp), %ebx; \
526 PER_CPU(cpu_gdt_descr, %ebx); \
527 movl GDS_address(%ebx), %ebx; \
528 GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah); \
529 addl %esp, %eax; \
530 pushl $__KERNEL_DS; \
531 CFI_ADJUST_CFA_OFFSET 4; \
1da177e4 532 pushl %eax; \
fe7cacc1 533 CFI_ADJUST_CFA_OFFSET 4; \
be44d2aa
SS
534 lss (%esp), %esp; \
535 CFI_ADJUST_CFA_OFFSET -8;
536#define UNWIND_ESPFIX_STACK \
1da177e4 537 movl %ss, %eax; \
be44d2aa 538 /* see if on espfix stack */ \
1da177e4 539 cmpw $__ESPFIX_SS, %ax; \
be44d2aa
SS
540 jne 27f; \
541 movl $__KERNEL_DS, %eax; \
fe7cacc1
JB
542 movl %eax, %ds; \
543 movl %eax, %es; \
be44d2aa 544 /* switch to normal stack */ \
fe7cacc1 545 FIXUP_ESPFIX_STACK; \
be44d2aa 54627:;
1da177e4
LT
547
548/*
549 * Build the entry stubs and pointer table with
550 * some assembler magic.
551 */
552.data
553ENTRY(interrupt)
554.text
555
556vector=0
557ENTRY(irq_entries_start)
fe7cacc1 558 RING0_INT_FRAME
1da177e4
LT
559.rept NR_IRQS
560 ALIGN
fe7cacc1
JB
561 .if vector
562 CFI_ADJUST_CFA_OFFSET -4
563 .endif
19eadf98 5641: pushl $~(vector)
fe7cacc1 565 CFI_ADJUST_CFA_OFFSET 4
1da177e4
LT
566 jmp common_interrupt
567.data
568 .long 1b
569.text
570vector=vector+1
571.endr
572
55f327fa
IM
573/*
574 * the CPU automatically disables interrupts when executing an IRQ vector,
575 * so IRQ-flags tracing has to follow that:
576 */
1da177e4
LT
577 ALIGN
578common_interrupt:
579 SAVE_ALL
55f327fa 580 TRACE_IRQS_OFF
1da177e4
LT
581 movl %esp,%eax
582 call do_IRQ
583 jmp ret_from_intr
fe7cacc1 584 CFI_ENDPROC
1da177e4
LT
585
586#define BUILD_INTERRUPT(name, nr) \
587ENTRY(name) \
fe7cacc1 588 RING0_INT_FRAME; \
19eadf98 589 pushl $~(nr); \
fe7cacc1
JB
590 CFI_ADJUST_CFA_OFFSET 4; \
591 SAVE_ALL; \
55f327fa 592 TRACE_IRQS_OFF \
1da177e4
LT
593 movl %esp,%eax; \
594 call smp_/**/name; \
55f327fa 595 jmp ret_from_intr; \
fe7cacc1 596 CFI_ENDPROC
1da177e4
LT
597
598/* The include is where all of the SMP etc. interrupts come from */
599#include "entry_arch.h"
600
d28c4393
P
601KPROBE_ENTRY(page_fault)
602 RING0_EC_FRAME
603 pushl $do_page_fault
fe7cacc1 604 CFI_ADJUST_CFA_OFFSET 4
1da177e4
LT
605 ALIGN
606error_code:
607 pushl %ds
fe7cacc1
JB
608 CFI_ADJUST_CFA_OFFSET 4
609 /*CFI_REL_OFFSET ds, 0*/
1da177e4 610 pushl %eax
fe7cacc1
JB
611 CFI_ADJUST_CFA_OFFSET 4
612 CFI_REL_OFFSET eax, 0
1da177e4 613 pushl %ebp
fe7cacc1
JB
614 CFI_ADJUST_CFA_OFFSET 4
615 CFI_REL_OFFSET ebp, 0
1da177e4 616 pushl %edi
fe7cacc1
JB
617 CFI_ADJUST_CFA_OFFSET 4
618 CFI_REL_OFFSET edi, 0
1da177e4 619 pushl %esi
fe7cacc1
JB
620 CFI_ADJUST_CFA_OFFSET 4
621 CFI_REL_OFFSET esi, 0
1da177e4 622 pushl %edx
fe7cacc1
JB
623 CFI_ADJUST_CFA_OFFSET 4
624 CFI_REL_OFFSET edx, 0
1da177e4 625 pushl %ecx
fe7cacc1
JB
626 CFI_ADJUST_CFA_OFFSET 4
627 CFI_REL_OFFSET ecx, 0
1da177e4 628 pushl %ebx
fe7cacc1
JB
629 CFI_ADJUST_CFA_OFFSET 4
630 CFI_REL_OFFSET ebx, 0
1da177e4
LT
631 cld
632 pushl %es
fe7cacc1
JB
633 CFI_ADJUST_CFA_OFFSET 4
634 /*CFI_REL_OFFSET es, 0*/
1da177e4
LT
635 UNWIND_ESPFIX_STACK
636 popl %ecx
fe7cacc1
JB
637 CFI_ADJUST_CFA_OFFSET -4
638 /*CFI_REGISTER es, ecx*/
1da177e4
LT
639 movl ES(%esp), %edi # get the function address
640 movl ORIG_EAX(%esp), %edx # get the error code
be44d2aa 641 movl $-1, ORIG_EAX(%esp)
1da177e4 642 movl %ecx, ES(%esp)
fe7cacc1 643 /*CFI_REL_OFFSET es, ES*/
1da177e4
LT
644 movl $(__USER_DS), %ecx
645 movl %ecx, %ds
646 movl %ecx, %es
647 movl %esp,%eax # pt_regs pointer
648 call *%edi
649 jmp ret_from_exception
fe7cacc1 650 CFI_ENDPROC
d28c4393 651KPROBE_END(page_fault)
1da177e4
LT
652
653ENTRY(coprocessor_error)
fe7cacc1 654 RING0_INT_FRAME
1da177e4 655 pushl $0
fe7cacc1 656 CFI_ADJUST_CFA_OFFSET 4
1da177e4 657 pushl $do_coprocessor_error
fe7cacc1 658 CFI_ADJUST_CFA_OFFSET 4
1da177e4 659 jmp error_code
fe7cacc1 660 CFI_ENDPROC
1da177e4
LT
661
662ENTRY(simd_coprocessor_error)
fe7cacc1 663 RING0_INT_FRAME
1da177e4 664 pushl $0
fe7cacc1 665 CFI_ADJUST_CFA_OFFSET 4
1da177e4 666 pushl $do_simd_coprocessor_error
fe7cacc1 667 CFI_ADJUST_CFA_OFFSET 4
1da177e4 668 jmp error_code
fe7cacc1 669 CFI_ENDPROC
1da177e4
LT
670
671ENTRY(device_not_available)
fe7cacc1 672 RING0_INT_FRAME
1da177e4 673 pushl $-1 # mark this as an int
fe7cacc1 674 CFI_ADJUST_CFA_OFFSET 4
1da177e4 675 SAVE_ALL
0da5db31 676 GET_CR0_INTO_EAX
1da177e4
LT
677 testl $0x4, %eax # EM (math emulation bit)
678 jne device_not_available_emulate
679 preempt_stop
680 call math_state_restore
681 jmp ret_from_exception
682device_not_available_emulate:
683 pushl $0 # temporary storage for ORIG_EIP
fe7cacc1 684 CFI_ADJUST_CFA_OFFSET 4
1da177e4
LT
685 call math_emulate
686 addl $4, %esp
fe7cacc1 687 CFI_ADJUST_CFA_OFFSET -4
1da177e4 688 jmp ret_from_exception
fe7cacc1 689 CFI_ENDPROC
1da177e4
LT
690
691/*
692 * Debug traps and NMI can happen at the one SYSENTER instruction
693 * that sets up the real kernel stack. Check here, since we can't
694 * allow the wrong stack to be used.
695 *
696 * "TSS_sysenter_esp0+12" is because the NMI/debug handler will have
697 * already pushed 3 words if it hits on the sysenter instruction:
698 * eflags, cs and eip.
699 *
700 * We just load the right stack, and push the three (known) values
701 * by hand onto the new stack - while updating the return eip past
702 * the instruction that would have done it for sysenter.
703 */
704#define FIX_STACK(offset, ok, label) \
705 cmpw $__KERNEL_CS,4(%esp); \
706 jne ok; \
707label: \
708 movl TSS_sysenter_esp0+offset(%esp),%esp; \
a549b86d
CE
709 CFI_DEF_CFA esp, 0; \
710 CFI_UNDEFINED eip; \
1da177e4 711 pushfl; \
a549b86d 712 CFI_ADJUST_CFA_OFFSET 4; \
1da177e4 713 pushl $__KERNEL_CS; \
a549b86d
CE
714 CFI_ADJUST_CFA_OFFSET 4; \
715 pushl $sysenter_past_esp; \
716 CFI_ADJUST_CFA_OFFSET 4; \
717 CFI_REL_OFFSET eip, 0
1da177e4 718
3d97ae5b 719KPROBE_ENTRY(debug)
fe7cacc1 720 RING0_INT_FRAME
1da177e4
LT
721 cmpl $sysenter_entry,(%esp)
722 jne debug_stack_correct
723 FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
724debug_stack_correct:
725 pushl $-1 # mark this as an int
fe7cacc1 726 CFI_ADJUST_CFA_OFFSET 4
1da177e4
LT
727 SAVE_ALL
728 xorl %edx,%edx # error code 0
729 movl %esp,%eax # pt_regs pointer
730 call do_debug
1da177e4 731 jmp ret_from_exception
fe7cacc1 732 CFI_ENDPROC
d28c4393
P
733KPROBE_END(debug)
734
1da177e4
LT
735/*
736 * NMI is doubly nasty. It can happen _while_ we're handling
737 * a debug fault, and the debug fault hasn't yet been able to
738 * clear up the stack. So we first check whether we got an
739 * NMI on the sysenter entry path, but after that we need to
740 * check whether we got an NMI on the debug path where the debug
741 * fault happened on the sysenter path.
742 */