[PATCH] i386: don't taint UP K7's running SMP kernels.
[deliverable/linux.git] / arch / i386 / kernel / entry.S
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/i386/entry.S
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7/*
8 * entry.S contains the system-call and fault low-level handling routines.
9 * This also contains the timer-interrupt handler, as well as all interrupts
10 * and faults that can result in a task-switch.
11 *
12 * NOTE: This code handles signal-recognition, which happens every time
13 * after a timer-interrupt and after each system call.
14 *
15 * I changed all the .align's to 4 (16 byte alignment), as that's faster
16 * on a 486.
17 *
18 * Stack layout in 'ret_from_system_call':
19 * ptrace needs to have all regs on the stack.
20 * if the order here is changed, it needs to be
21 * updated in fork.c:copy_process, signal.c:do_signal,
22 * ptrace.c and ptrace.h
23 *
24 * 0(%esp) - %ebx
25 * 4(%esp) - %ecx
26 * 8(%esp) - %edx
27 * C(%esp) - %esi
28 * 10(%esp) - %edi
29 * 14(%esp) - %ebp
30 * 18(%esp) - %eax
31 * 1C(%esp) - %ds
32 * 20(%esp) - %es
33 * 24(%esp) - orig_eax
34 * 28(%esp) - %eip
35 * 2C(%esp) - %cs
36 * 30(%esp) - %eflags
37 * 34(%esp) - %oldesp
38 * 38(%esp) - %oldss
39 *
40 * "current" is in register %ebx during any slow entries.
41 */
42
1da177e4
LT
43#include <linux/linkage.h>
44#include <asm/thread_info.h>
55f327fa 45#include <asm/irqflags.h>
1da177e4
LT
46#include <asm/errno.h>
47#include <asm/segment.h>
48#include <asm/smp.h>
49#include <asm/page.h>
50#include <asm/desc.h>
fe7cacc1 51#include <asm/dwarf2.h>
1da177e4
LT
52#include "irq_vectors.h"
53
54#define nr_syscalls ((syscall_table_size)/4)
55
56EBX = 0x00
57ECX = 0x04
58EDX = 0x08
59ESI = 0x0C
60EDI = 0x10
61EBP = 0x14
62EAX = 0x18
63DS = 0x1C
64ES = 0x20
65ORIG_EAX = 0x24
66EIP = 0x28
67CS = 0x2C
68EFLAGS = 0x30
69OLDESP = 0x34
70OLDSS = 0x38
71
72CF_MASK = 0x00000001
73TF_MASK = 0x00000100
74IF_MASK = 0x00000200
75DF_MASK = 0x00000400
76NT_MASK = 0x00004000
77VM_MASK = 0x00020000
78
79#ifdef CONFIG_PREEMPT
55f327fa 80#define preempt_stop cli; TRACE_IRQS_OFF
1da177e4
LT
81#else
82#define preempt_stop
83#define resume_kernel restore_nocheck
84#endif
85
55f327fa
IM
86.macro TRACE_IRQS_IRET
87#ifdef CONFIG_TRACE_IRQFLAGS
88 testl $IF_MASK,EFLAGS(%esp) # interrupts off?
89 jz 1f
90 TRACE_IRQS_ON
911:
92#endif
93.endm
94
4031ff38
AG
95#ifdef CONFIG_VM86
96#define resume_userspace_sig check_userspace
97#else
98#define resume_userspace_sig resume_userspace
99#endif
100
1da177e4
LT
101#define SAVE_ALL \
102 cld; \
103 pushl %es; \
fe7cacc1
JB
104 CFI_ADJUST_CFA_OFFSET 4;\
105 /*CFI_REL_OFFSET es, 0;*/\
1da177e4 106 pushl %ds; \
fe7cacc1
JB
107 CFI_ADJUST_CFA_OFFSET 4;\
108 /*CFI_REL_OFFSET ds, 0;*/\
1da177e4 109 pushl %eax; \
fe7cacc1
JB
110 CFI_ADJUST_CFA_OFFSET 4;\
111 CFI_REL_OFFSET eax, 0;\
1da177e4 112 pushl %ebp; \
fe7cacc1
JB
113 CFI_ADJUST_CFA_OFFSET 4;\
114 CFI_REL_OFFSET ebp, 0;\
1da177e4 115 pushl %edi; \
fe7cacc1
JB
116 CFI_ADJUST_CFA_OFFSET 4;\
117 CFI_REL_OFFSET edi, 0;\
1da177e4 118 pushl %esi; \
fe7cacc1
JB
119 CFI_ADJUST_CFA_OFFSET 4;\
120 CFI_REL_OFFSET esi, 0;\
1da177e4 121 pushl %edx; \
fe7cacc1
JB
122 CFI_ADJUST_CFA_OFFSET 4;\
123 CFI_REL_OFFSET edx, 0;\
1da177e4 124 pushl %ecx; \
fe7cacc1
JB
125 CFI_ADJUST_CFA_OFFSET 4;\
126 CFI_REL_OFFSET ecx, 0;\
1da177e4 127 pushl %ebx; \
fe7cacc1
JB
128 CFI_ADJUST_CFA_OFFSET 4;\
129 CFI_REL_OFFSET ebx, 0;\
1da177e4
LT
130 movl $(__USER_DS), %edx; \
131 movl %edx, %ds; \
132 movl %edx, %es;
133
134#define RESTORE_INT_REGS \
135 popl %ebx; \
fe7cacc1
JB
136 CFI_ADJUST_CFA_OFFSET -4;\
137 CFI_RESTORE ebx;\
1da177e4 138 popl %ecx; \
fe7cacc1
JB
139 CFI_ADJUST_CFA_OFFSET -4;\
140 CFI_RESTORE ecx;\
1da177e4 141 popl %edx; \
fe7cacc1
JB
142 CFI_ADJUST_CFA_OFFSET -4;\
143 CFI_RESTORE edx;\
1da177e4 144 popl %esi; \
fe7cacc1
JB
145 CFI_ADJUST_CFA_OFFSET -4;\
146 CFI_RESTORE esi;\
1da177e4 147 popl %edi; \
fe7cacc1
JB
148 CFI_ADJUST_CFA_OFFSET -4;\
149 CFI_RESTORE edi;\
1da177e4 150 popl %ebp; \
fe7cacc1
JB
151 CFI_ADJUST_CFA_OFFSET -4;\
152 CFI_RESTORE ebp;\
153 popl %eax; \
154 CFI_ADJUST_CFA_OFFSET -4;\
155 CFI_RESTORE eax
1da177e4
LT
156
157#define RESTORE_REGS \
158 RESTORE_INT_REGS; \
1591: popl %ds; \
fe7cacc1
JB
160 CFI_ADJUST_CFA_OFFSET -4;\
161 /*CFI_RESTORE ds;*/\
1da177e4 1622: popl %es; \
fe7cacc1
JB
163 CFI_ADJUST_CFA_OFFSET -4;\
164 /*CFI_RESTORE es;*/\
1da177e4
LT
165.section .fixup,"ax"; \
1663: movl $0,(%esp); \
167 jmp 1b; \
1684: movl $0,(%esp); \
169 jmp 2b; \
170.previous; \
171.section __ex_table,"a";\
172 .align 4; \
173 .long 1b,3b; \
174 .long 2b,4b; \
175.previous
176
fe7cacc1
JB
177#define RING0_INT_FRAME \
178 CFI_STARTPROC simple;\
179 CFI_DEF_CFA esp, 3*4;\
180 /*CFI_OFFSET cs, -2*4;*/\
181 CFI_OFFSET eip, -3*4
182
183#define RING0_EC_FRAME \
184 CFI_STARTPROC simple;\
185 CFI_DEF_CFA esp, 4*4;\
186 /*CFI_OFFSET cs, -2*4;*/\
187 CFI_OFFSET eip, -3*4
188
189#define RING0_PTREGS_FRAME \
190 CFI_STARTPROC simple;\
191 CFI_DEF_CFA esp, OLDESP-EBX;\
192 /*CFI_OFFSET cs, CS-OLDESP;*/\
193 CFI_OFFSET eip, EIP-OLDESP;\
194 /*CFI_OFFSET es, ES-OLDESP;*/\
195 /*CFI_OFFSET ds, DS-OLDESP;*/\
196 CFI_OFFSET eax, EAX-OLDESP;\
197 CFI_OFFSET ebp, EBP-OLDESP;\
198 CFI_OFFSET edi, EDI-OLDESP;\
199 CFI_OFFSET esi, ESI-OLDESP;\
200 CFI_OFFSET edx, EDX-OLDESP;\
201 CFI_OFFSET ecx, ECX-OLDESP;\
202 CFI_OFFSET ebx, EBX-OLDESP
1da177e4
LT
203
204ENTRY(ret_from_fork)
fe7cacc1 205 CFI_STARTPROC
1da177e4 206 pushl %eax
25d7dfda 207 CFI_ADJUST_CFA_OFFSET 4
1da177e4
LT
208 call schedule_tail
209 GET_THREAD_INFO(%ebp)
210 popl %eax
fe7cacc1 211 CFI_ADJUST_CFA_OFFSET -4
47a5c6fa
LT
212 pushl $0x0202 # Reset kernel eflags
213 CFI_ADJUST_CFA_OFFSET 4
214 popfl
215 CFI_ADJUST_CFA_OFFSET -4
1da177e4 216 jmp syscall_exit
fe7cacc1 217 CFI_ENDPROC
1da177e4
LT
218
219/*
220 * Return to user mode is not as complex as all this looks,
221 * but we want the default path for a system call return to
222 * go as quickly as possible which is why some of this is
223 * less clear than it otherwise should be.
224 */
225
226 # userspace resumption stub bypassing syscall exit tracing
227 ALIGN
fe7cacc1 228 RING0_PTREGS_FRAME
1da177e4
LT
229ret_from_exception:
230 preempt_stop
231ret_from_intr:
232 GET_THREAD_INFO(%ebp)
4031ff38 233check_userspace:
1da177e4
LT
234 movl EFLAGS(%esp), %eax # mix EFLAGS and CS
235 movb CS(%esp), %al
236 testl $(VM_MASK | 3), %eax
237 jz resume_kernel
238ENTRY(resume_userspace)
239 cli # make sure we don't miss an interrupt
240 # setting need_resched or sigpending
241 # between sampling and the iret
242 movl TI_flags(%ebp), %ecx
243 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
244 # int/exception return?
245 jne work_pending
246 jmp restore_all
247
248#ifdef CONFIG_PREEMPT
249ENTRY(resume_kernel)
250 cli
251 cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
252 jnz restore_nocheck
253need_resched:
254 movl TI_flags(%ebp), %ecx # need_resched set ?
255 testb $_TIF_NEED_RESCHED, %cl
256 jz restore_all
257 testl $IF_MASK,EFLAGS(%esp) # interrupts off (exception path) ?
258 jz restore_all
259 call preempt_schedule_irq
260 jmp need_resched
261#endif
fe7cacc1 262 CFI_ENDPROC
1da177e4
LT
263
264/* SYSENTER_RETURN points to after the "sysenter" instruction in
265 the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
266
267 # sysenter call handler stub
268ENTRY(sysenter_entry)
fe7cacc1
JB
269 CFI_STARTPROC simple
270 CFI_DEF_CFA esp, 0
271 CFI_REGISTER esp, ebp
1da177e4
LT
272 movl TSS_sysenter_esp0(%esp),%esp
273sysenter_past_esp:
55f327fa
IM
274 /*
275 * No need to follow this irqs on/off section: the syscall
276 * disabled irqs and here we enable it straight after entry:
277 */
1da177e4
LT
278 sti
279 pushl $(__USER_DS)
fe7cacc1
JB
280 CFI_ADJUST_CFA_OFFSET 4
281 /*CFI_REL_OFFSET ss, 0*/
1da177e4 282 pushl %ebp
fe7cacc1
JB
283 CFI_ADJUST_CFA_OFFSET 4
284 CFI_REL_OFFSET esp, 0
1da177e4 285 pushfl
fe7cacc1 286 CFI_ADJUST_CFA_OFFSET 4
1da177e4 287 pushl $(__USER_CS)
fe7cacc1
JB
288 CFI_ADJUST_CFA_OFFSET 4
289 /*CFI_REL_OFFSET cs, 0*/
e6e5494c
IM
290 /*
291 * Push current_thread_info()->sysenter_return to the stack.
292 * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
293 * pushed above; +8 corresponds to copy_thread's esp0 setting.
294 */
295 pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
fe7cacc1
JB
296 CFI_ADJUST_CFA_OFFSET 4
297 CFI_REL_OFFSET eip, 0
1da177e4
LT
298
299/*
300 * Load the potential sixth argument from user stack.
301 * Careful about security.
302 */
303 cmpl $__PAGE_OFFSET-3,%ebp
304 jae syscall_fault
3051: movl (%ebp),%ebp
306.section __ex_table,"a"
307 .align 4
308 .long 1b,syscall_fault
309.previous
310
311 pushl %eax
fe7cacc1 312 CFI_ADJUST_CFA_OFFSET 4
1da177e4
LT
313 SAVE_ALL
314 GET_THREAD_INFO(%ebp)
315
316 /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
ed75e8d5 317 testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
1da177e4
LT
318 jnz syscall_trace_entry
319 cmpl $(nr_syscalls), %eax
320 jae syscall_badsys
321 call *sys_call_table(,%eax,4)
322 movl %eax,EAX(%esp)
323 cli
55f327fa 324 TRACE_IRQS_OFF
1da177e4
LT
325 movl TI_flags(%ebp), %ecx
326 testw $_TIF_ALLWORK_MASK, %cx
327 jne syscall_exit_work
328/* if something modifies registers it must also disable sysexit */
329 movl EIP(%esp), %edx
330 movl OLDESP(%esp), %ecx
331 xorl %ebp,%ebp
55f327fa 332 TRACE_IRQS_ON
1da177e4
LT
333 sti
334 sysexit
fe7cacc1 335 CFI_ENDPROC
1da177e4
LT
336
337
338 # system call handler stub
339ENTRY(system_call)
fe7cacc1 340 RING0_INT_FRAME # can't unwind into user space anyway
1da177e4 341 pushl %eax # save orig_eax
fe7cacc1 342 CFI_ADJUST_CFA_OFFSET 4
1da177e4
LT
343 SAVE_ALL
344 GET_THREAD_INFO(%ebp)
635cf99a
CE
345 testl $TF_MASK,EFLAGS(%esp)
346 jz no_singlestep
347 orl $_TIF_SINGLESTEP,TI_flags(%ebp)
348no_singlestep:
ed75e8d5 349 # system call tracing in operation / emulation
1da177e4 350 /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
ed75e8d5 351 testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
1da177e4
LT
352 jnz syscall_trace_entry
353 cmpl $(nr_syscalls), %eax
354 jae syscall_badsys
355syscall_call:
356 call *sys_call_table(,%eax,4)
357 movl %eax,EAX(%esp) # store the return value
358syscall_exit:
359 cli # make sure we don't miss an interrupt
360 # setting need_resched or sigpending
361 # between sampling and the iret
55f327fa 362 TRACE_IRQS_OFF
1da177e4
LT
363 movl TI_flags(%ebp), %ecx
364 testw $_TIF_ALLWORK_MASK, %cx # current->work
365 jne syscall_exit_work
366
367restore_all:
368 movl EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
5df24082
SS
369 # Warning: OLDSS(%esp) contains the wrong/random values if we
370 # are returning to the kernel.
371 # See comments in process.c:copy_thread() for details.
1da177e4
LT
372 movb OLDSS(%esp), %ah
373 movb CS(%esp), %al
374 andl $(VM_MASK | (4 << 8) | 3), %eax
375 cmpl $((4 << 8) | 3), %eax
fe7cacc1 376 CFI_REMEMBER_STATE
1da177e4
LT
377 je ldt_ss # returning to user-space with LDT SS
378restore_nocheck:
55f327fa
IM
379 TRACE_IRQS_IRET
380restore_nocheck_notrace:
1da177e4
LT
381 RESTORE_REGS
382 addl $4, %esp
fe7cacc1 383 CFI_ADJUST_CFA_OFFSET -4
1da177e4
LT
3841: iret
385.section .fixup,"ax"
386iret_exc:
55f327fa 387 TRACE_IRQS_ON
1da177e4 388 sti
a879cbbb
LT
389 pushl $0 # no error code
390 pushl $do_iret_error
391 jmp error_code
1da177e4
LT
392.previous
393.section __ex_table,"a"
394 .align 4
395 .long 1b,iret_exc
396.previous
397
fe7cacc1 398 CFI_RESTORE_STATE
1da177e4
LT
399ldt_ss:
400 larl OLDSS(%esp), %eax
401 jnz restore_nocheck
402 testl $0x00400000, %eax # returning to 32bit stack?
403 jnz restore_nocheck # allright, normal return
404 /* If returning to userspace with 16bit stack,
405 * try to fix the higher word of ESP, as the CPU
406 * won't restore it.
407 * This is an "official" bug of all the x86-compatible
408 * CPUs, which we can try to work around to make
409 * dosemu and wine happy. */
410 subl $8, %esp # reserve space for switch16 pointer
fe7cacc1 411 CFI_ADJUST_CFA_OFFSET 8
1da177e4 412 cli
55f327fa 413 TRACE_IRQS_OFF
1da177e4
LT
414 movl %esp, %eax
415 /* Set up the 16bit stack frame with switch32 pointer on top,
416 * and a switch16 pointer on top of the current frame. */
417 call setup_x86_bogus_stack
fe7cacc1 418 CFI_ADJUST_CFA_OFFSET -8 # frame has moved
55f327fa 419 TRACE_IRQS_IRET
1da177e4
LT
420 RESTORE_REGS
421 lss 20+4(%esp), %esp # switch to 16bit stack
4221: iret
423.section __ex_table,"a"
424 .align 4
425 .long 1b,iret_exc
426.previous
fe7cacc1 427 CFI_ENDPROC
1da177e4
LT
428
429 # perform work that needs to be done immediately before resumption
430 ALIGN
fe7cacc1 431 RING0_PTREGS_FRAME # can't unwind into user space anyway
1da177e4
LT
432work_pending:
433 testb $_TIF_NEED_RESCHED, %cl
434 jz work_notifysig
435work_resched:
436 call schedule
437 cli # make sure we don't miss an interrupt
438 # setting need_resched or sigpending
439 # between sampling and the iret
55f327fa 440 TRACE_IRQS_OFF
1da177e4
LT
441 movl TI_flags(%ebp), %ecx
442 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
443 # than syscall tracing?
444 jz restore_all
445 testb $_TIF_NEED_RESCHED, %cl
446 jnz work_resched
447
448work_notifysig: # deal with pending signals and
449 # notify-resume requests
450 testl $VM_MASK, EFLAGS(%esp)
451 movl %esp, %eax
452 jne work_notifysig_v86 # returning to kernel-space or
453 # vm86-space
454 xorl %edx, %edx
455 call do_notify_resume
4031ff38 456 jmp resume_userspace_sig
1da177e4
LT
457
458 ALIGN
459work_notifysig_v86:
64ca9004 460#ifdef CONFIG_VM86
1da177e4 461 pushl %ecx # save ti_flags for do_notify_resume
fe7cacc1 462 CFI_ADJUST_CFA_OFFSET 4
1da177e4
LT
463 call save_v86_state # %eax contains pt_regs pointer
464 popl %ecx
fe7cacc1 465 CFI_ADJUST_CFA_OFFSET -4
1da177e4
LT
466 movl %eax, %esp
467 xorl %edx, %edx
468 call do_notify_resume
4031ff38 469 jmp resume_userspace_sig
64ca9004 470#endif
1da177e4
LT
471
472 # perform syscall exit tracing
473 ALIGN
474syscall_trace_entry:
475 movl $-ENOSYS,EAX(%esp)
476 movl %esp, %eax
477 xorl %edx,%edx
478 call do_syscall_trace
ed75e8d5 479 cmpl $0, %eax
640aa46e 480 jne resume_userspace # ret != 0 -> running under PTRACE_SYSEMU,
ed75e8d5 481 # so must skip actual syscall
1da177e4
LT
482 movl ORIG_EAX(%esp), %eax
483 cmpl $(nr_syscalls), %eax
484 jnae syscall_call
485 jmp syscall_exit
486
487 # perform syscall exit tracing
488 ALIGN
489syscall_exit_work:
490 testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl
491 jz work_pending
55f327fa 492 TRACE_IRQS_ON
1da177e4
LT
493 sti # could let do_syscall_trace() call
494 # schedule() instead
495 movl %esp, %eax
496 movl $1, %edx
497 call do_syscall_trace
498 jmp resume_userspace
fe7cacc1 499 CFI_ENDPROC
1da177e4 500
fe7cacc1 501 RING0_INT_FRAME # can't unwind into user space anyway
1da177e4
LT
502syscall_fault:
503 pushl %eax # save orig_eax
fe7cacc1 504 CFI_ADJUST_CFA_OFFSET 4
1da177e4
LT
505 SAVE_ALL
506 GET_THREAD_INFO(%ebp)
507 movl $-EFAULT,EAX(%esp)
508 jmp resume_userspace
509
1da177e4
LT
510syscall_badsys:
511 movl $-ENOSYS,EAX(%esp)
512 jmp resume_userspace
fe7cacc1 513 CFI_ENDPROC
1da177e4
LT
514
515#define FIXUP_ESPFIX_STACK \
516 movl %esp, %eax; \
517 /* switch to 32bit stack using the pointer on top of 16bit stack */ \
518 lss %ss:CPU_16BIT_STACK_SIZE-8, %esp; \
519 /* copy data from 16bit stack to 32bit stack */ \
520 call fixup_x86_bogus_stack; \
521 /* put ESP to the proper location */ \
522 movl %eax, %esp;
523#define UNWIND_ESPFIX_STACK \
524 pushl %eax; \
fe7cacc1 525 CFI_ADJUST_CFA_OFFSET 4; \
1da177e4
LT
526 movl %ss, %eax; \
527 /* see if on 16bit stack */ \
528 cmpw $__ESPFIX_SS, %ax; \
fe7cacc1
JB
529 je 28f; \
53027: popl %eax; \
531 CFI_ADJUST_CFA_OFFSET -4; \
532.section .fixup,"ax"; \
53328: movl $__KERNEL_DS, %eax; \
534 movl %eax, %ds; \
535 movl %eax, %es; \
1da177e4 536 /* switch to 32bit stack */ \
fe7cacc1
JB
537 FIXUP_ESPFIX_STACK; \
538 jmp 27b; \
539.previous
1da177e4
LT
540
541/*
542 * Build the entry stubs and pointer table with
543 * some assembler magic.
544 */
545.data
546ENTRY(interrupt)
547.text
548
549vector=0
550ENTRY(irq_entries_start)
fe7cacc1 551 RING0_INT_FRAME
1da177e4
LT
552.rept NR_IRQS
553 ALIGN
fe7cacc1
JB
554 .if vector
555 CFI_ADJUST_CFA_OFFSET -4
556 .endif
19eadf98 5571: pushl $~(vector)
fe7cacc1 558 CFI_ADJUST_CFA_OFFSET 4
1da177e4
LT
559 jmp common_interrupt
560.data
561 .long 1b
562.text
563vector=vector+1
564.endr
565
55f327fa
IM
566/*
567 * the CPU automatically disables interrupts when executing an IRQ vector,
568 * so IRQ-flags tracing has to follow that:
569 */
1da177e4
LT
570 ALIGN
571common_interrupt:
572 SAVE_ALL
55f327fa 573 TRACE_IRQS_OFF
1da177e4
LT
574 movl %esp,%eax
575 call do_IRQ
576 jmp ret_from_intr
fe7cacc1 577 CFI_ENDPROC
1da177e4
LT
578
579#define BUILD_INTERRUPT(name, nr) \
580ENTRY(name) \
fe7cacc1 581 RING0_INT_FRAME; \
19eadf98 582 pushl $~(nr); \
fe7cacc1
JB
583 CFI_ADJUST_CFA_OFFSET 4; \
584 SAVE_ALL; \
55f327fa 585 TRACE_IRQS_OFF \
1da177e4
LT
586 movl %esp,%eax; \
587 call smp_/**/name; \
55f327fa 588 jmp ret_from_intr; \
fe7cacc1 589 CFI_ENDPROC
1da177e4
LT
590
591/* The include is where all of the SMP etc. interrupts come from */
592#include "entry_arch.h"
593
594ENTRY(divide_error)
fe7cacc1 595 RING0_INT_FRAME
1da177e4 596 pushl $0 # no error code
fe7cacc1 597 CFI_ADJUST_CFA_OFFSET 4
1da177e4 598 pushl $do_divide_error
fe7cacc1 599 CFI_ADJUST_CFA_OFFSET 4
1da177e4
LT
600 ALIGN
601error_code:
602 pushl %ds
fe7cacc1
JB
603 CFI_ADJUST_CFA_OFFSET 4
604 /*CFI_REL_OFFSET ds, 0*/
1da177e4 605 pushl %eax
fe7cacc1
JB
606 CFI_ADJUST_CFA_OFFSET 4
607 CFI_REL_OFFSET eax, 0
1da177e4
LT
608 xorl %eax, %eax
609 pushl %ebp
fe7cacc1
JB
610 CFI_ADJUST_CFA_OFFSET 4
611 CFI_REL_OFFSET ebp, 0
1da177e4 612 pushl %edi
fe7cacc1
JB
613 CFI_ADJUST_CFA_OFFSET 4
614 CFI_REL_OFFSET edi, 0
1da177e4 615 pushl %esi
fe7cacc1
JB
616 CFI_ADJUST_CFA_OFFSET 4
617 CFI_REL_OFFSET esi, 0
1da177e4 618 pushl %edx
fe7cacc1
JB
619 CFI_ADJUST_CFA_OFFSET 4
620 CFI_REL_OFFSET edx, 0
1da177e4
LT
621 decl %eax # eax = -1
622 pushl %ecx
fe7cacc1
JB
623 CFI_ADJUST_CFA_OFFSET 4
624 CFI_REL_OFFSET ecx, 0
1da177e4 625 pushl %ebx
fe7cacc1
JB
626 CFI_ADJUST_CFA_OFFSET 4
627 CFI_REL_OFFSET ebx, 0
1da177e4
LT
628 cld
629 pushl %es
fe7cacc1
JB
630 CFI_ADJUST_CFA_OFFSET 4
631 /*CFI_REL_OFFSET es, 0*/
1da177e4
LT
632 UNWIND_ESPFIX_STACK
633 popl %ecx
fe7cacc1
JB
634 CFI_ADJUST_CFA_OFFSET -4
635 /*CFI_REGISTER es, ecx*/
1da177e4
LT
636 movl ES(%esp), %edi # get the function address
637 movl ORIG_EAX(%esp), %edx # get the error code
638 movl %eax, ORIG_EAX(%esp)
639 movl %ecx, ES(%esp)
fe7cacc1 640 /*CFI_REL_OFFSET es, ES*/
1da177e4
LT
641 movl $(__USER_DS), %ecx
642 movl %ecx, %ds
643 movl %ecx, %es
644 movl %esp,%eax # pt_regs pointer
645 call *%edi
646 jmp ret_from_exception
fe7cacc1 647 CFI_ENDPROC
1da177e4
LT
648
649ENTRY(coprocessor_error)
fe7cacc1 650 RING0_INT_FRAME
1da177e4 651 pushl $0
fe7cacc1 652 CFI_ADJUST_CFA_OFFSET 4
1da177e4 653 pushl $do_coprocessor_error
fe7cacc1 654 CFI_ADJUST_CFA_OFFSET 4
1da177e4 655 jmp error_code
fe7cacc1 656 CFI_ENDPROC
1da177e4
LT
657
658ENTRY(simd_coprocessor_error)
fe7cacc1 659 RING0_INT_FRAME
1da177e4 660 pushl $0
fe7cacc1 661 CFI_ADJUST_CFA_OFFSET 4
1da177e4 662 pushl $do_simd_coprocessor_error
fe7cacc1 663 CFI_ADJUST_CFA_OFFSET 4
1da177e4 664 jmp error_code
fe7cacc1 665 CFI_ENDPROC
1da177e4
LT
666
667ENTRY(device_not_available)
fe7cacc1 668 RING0_INT_FRAME
1da177e4 669 pushl $-1 # mark this as an int
fe7cacc1 670 CFI_ADJUST_CFA_OFFSET 4
1da177e4
LT
671 SAVE_ALL
672 movl %cr0, %eax
673 testl $0x4, %eax # EM (math emulation bit)
674 jne device_not_available_emulate
675 preempt_stop
676 call math_state_restore
677 jmp ret_from_exception
678device_not_available_emulate:
679 pushl $0 # temporary storage for ORIG_EIP
fe7cacc1 680 CFI_ADJUST_CFA_OFFSET 4
1da177e4
LT
681 call math_emulate
682 addl $4, %esp
fe7cacc1 683 CFI_ADJUST_CFA_OFFSET -4
1da177e4 684 jmp ret_from_exception
fe7cacc1 685 CFI_ENDPROC
1da177e4
LT
686
687/*
688 * Debug traps and NMI can happen at the one SYSENTER instruction
689 * that sets up the real kernel stack. Check here, since we can't
690 * allow the wrong stack to be used.
691 *
692 * "TSS_sysenter_esp0+12" is because the NMI/debug handler will have
693 * already pushed 3 words if it hits on the sysenter instruction:
694 * eflags, cs and eip.
695 *
696 * We just load the right stack, and push the three (known) values
697 * by hand onto the new stack - while updating the return eip past
698 * the instruction that would have done it for sysenter.
699 */
700#define FIX_STACK(offset, ok, label) \
701 cmpw $__KERNEL_CS,4(%esp); \
702 jne ok; \
703label: \
704 movl TSS_sysenter_esp0+offset(%esp),%esp; \
705 pushfl; \
706 pushl $__KERNEL_CS; \
707 pushl $sysenter_past_esp
708
3d97ae5b 709KPROBE_ENTRY(debug)
fe7cacc1 710 RING0_INT_FRAME
1da177e4
LT
711 cmpl $sysenter_entry,(%esp)
712 jne debug_stack_correct
713 FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
714debug_stack_correct:
715 pushl $-1 # mark this as an int
fe7cacc1 716 CFI_ADJUST_CFA_OFFSET 4
1da177e4
LT
717 SAVE_ALL
718 xorl %edx,%edx # error code 0
719 movl %esp,%eax # pt_regs pointer
720 call do_debug
1da177e4 721 jmp ret_from_exception
fe7cacc1 722 CFI_ENDPROC
3d97ae5b 723 .previous .text
1da177e4
LT
724/*
725 * NMI is doubly nasty. It can happen _while_ we're handling
726 * a debug fault, and the debug fault hasn't yet been able to
727 * clear up the stack. So we first check whether we got an
728 * NMI on the sysenter entry path, but after that we need to
729 * check whether we got an NMI on the debug path where the debug
730 * fault happened on the sysenter path.
731 */
732ENTRY(nmi)
fe7cacc1 733 RING0_INT_FRAME
1da177e4 734 pushl %eax
fe7cacc1 735 CFI_ADJUST_CFA_OFFSET 4
1da177e4
LT
736 movl %ss, %eax
737 cmpw $__ESPFIX_SS, %ax
738 popl %eax
fe7cacc1 739 CFI_ADJUST_CFA_OFFSET -4
1da177e4
LT
740 je nmi_16bit_stack
741 cmpl $sysenter_entry,(%esp)
742 je nmi_stack_fixup
743 pushl %eax
fe7cacc1 744 CFI_ADJUST_CFA_OFFSET 4
1da177e4
LT
745 movl %esp,%eax
746 /* Do not access memory above the end of our stack page,
747 * it might not exist.
748 */
749 andl $(THREAD_SIZE-1),%eax
750 cmpl $(THREAD_SIZE-20),%eax
751 popl %eax
fe7cacc1 752 CFI_ADJUST_CFA_OFFSET -4
1da177e4
LT
753 jae nmi_stack_correct
754 cmpl $sysenter_entry,12(%esp)
755 je nmi_debug_stack_check
756nmi_stack_correct:
757 pushl %eax
fe7cacc1 758 CFI_ADJUST_CFA_OFFSET 4
1da177e4
LT
759 SAVE_ALL
760 xorl %edx,%edx # zero error code
761 movl %esp,%eax # pt_regs pointer
762 call do_nmi
55f327fa 763 jmp restore_nocheck_notrace
fe7cacc1 764 CFI_ENDPROC
1da177e4
LT
765
766nmi_stack_fixup:
767 FIX_STACK(12,nmi_stack_correct, 1)
768 jmp nmi_stack_correct
769nmi_debug_stack_check:
770 cmpw $__KERNEL_CS,16(%esp)
771 jne nmi_stack_correct
e2718208
JB
772 cmpl $debug,(%esp)
773 jb nmi_stack_correct
1da177e4 774 cmpl $debug_esp_fix_insn,(%esp)
e2718208 775 ja nmi_stack_correct
1da177e4
LT
776 FIX_STACK(24,nmi_stack_correct, 1)
777 jmp nmi_stack_correct
778
779nmi_16bit_stack:
fe7cacc1 780 RING0_INT_FRAME
1da177e4
LT
781 /* create the pointer to lss back */
782 pushl %ss
fe7cacc1 783 CFI_ADJUST_CFA_OFFSET 4
1da177e4 784 pushl %esp
fe7cacc1 785 CFI_ADJUST_CFA_OFFSET 4
1da177e4
LT
786 movzwl %sp, %esp
787 addw $4, (%esp)
788 /* copy the iret frame of 12 bytes */
789 .rept 3
790 pushl 16(%esp)
fe7cacc1 791 CFI_ADJUST_CFA_OFFSET 4
1da177e4
LT
792 .endr
793 pushl %eax
fe7cacc1 794 CFI_ADJUST_CFA_OFFSET 4
1da177e4
LT
795 SAVE_ALL
796 FIXUP_ESPFIX_STACK # %eax == %esp
fe7cacc1 797 CFI_ADJUST_CFA_OFFSET -20 # the frame has now moved
1da177e4
LT
798 xorl %edx,%edx # zero error code
799 call do_nmi
800 RESTORE_REGS
801 lss 12+4(%esp), %esp # back to 16bit stack
8021: iret
fe7cacc1 803 CFI_ENDPROC
1da177e4
LT
804.section __ex_table,"a"
805 .align 4
806 .long 1b,iret_exc
807.previous
808
3d97ae5b 809KPROBE_ENTRY(int3)
fe7cacc1 810 RING0_INT_FRAME
1da177e4 811 pushl $-1 # mark this as an int
fe7cacc1 812 CFI_ADJUST_CFA_OFFSET 4
1da177e4
LT
813 SAVE_ALL
814 xorl %edx,%edx # zero error code
815 movl %esp,%eax # pt_regs pointer
816 call do_int3
1da177e4 817 jmp ret_from_exception
fe7cacc1 818 CFI_ENDPROC
3d97ae5b 819 .previous .text
1da177e4
LT
820
821ENTRY(overflow)
fe7cacc1 822 RING0_INT_FRAME
1da177e4 823 pushl $0
fe7cacc1 824 CFI_ADJUST_CFA_OFFSET 4
1da177e4 825 pushl $do_overflow
fe7cacc1 826 CFI_ADJUST_CFA_OFFSET 4
1da177e4 827 jmp error_code
fe7cacc1 828 CFI_ENDPROC
1da177e4
LT
829
830ENTRY(bounds)
fe7cacc1 831 RING0_INT_FRAME
1da177e4 832 pushl $0
fe7cacc1 833 CFI_ADJUST_CFA_OFFSET 4
1da177e4 834 pushl $do_bounds
fe7cacc1 835 CFI_ADJUST_CFA_OFFSET 4
1da177e4 836 jmp error_code
fe7cacc1 837 CFI_ENDPROC
1da177e4
LT
838
839ENTRY(invalid_op)
fe7cacc1 840 RING0_INT_FRAME
1da177e4 841 pushl $0
fe7cacc1 842 CFI_ADJUST_CFA_OFFSET 4
1da177e4 843 pushl $do_invalid_op
fe7cacc1 844 CFI_ADJUST_CFA_OFFSET 4
1da177e4 845 jmp error_code
fe7cacc1 846 CFI_ENDPROC
1da177e4
LT
847
848ENTRY(coprocessor_segment_overrun)
fe7cacc1 849 RING0_INT_FRAME
1da177e4 850 pushl $0
fe7cacc1 851 CFI_ADJUST_CFA_OFFSET 4
1da177e4 852 pushl $do_coprocessor_segment_overrun
fe7cacc1 853 CFI_ADJUST_CFA_OFFSET 4
1da177e4 854 jmp error_code
fe7cacc1 855 CFI_ENDPROC
1da177e4
LT
856
857ENTRY(invalid_TSS)
fe7cacc1 858 RING0_EC_FRAME
1da177e4 859 pushl $do_invalid_TSS
fe7cacc1 860 CFI_ADJUST_CFA_OFFSET 4
1da177e4 861 jmp error_code
fe7cacc1 862 CFI_ENDPROC
1da177e4
LT
863
864ENTRY(segment_not_present)
fe7cacc1 865 RING0_EC_FRAME
1da177e4 866 pushl $do_segment_not_present
fe7cacc1 867 CFI_ADJUST_CFA_OFFSET 4
1da177e4 868 jmp error_code
fe7cacc1 869 CFI_ENDPROC
1da177e4
LT
870
871ENTRY(stack_segment)
fe7cacc1 872 RING0_EC_FRAME
1da177e4 873 pushl $do_stack_segment
fe7cacc1 874 CFI_ADJUST_CFA_OFFSET 4
1da177e4 875 jmp error_code
fe7cacc1 876 CFI_ENDPROC
1da177e4 877
3d97ae5b 878KPROBE_ENTRY(general_protection)
fe7cacc1 879 RING0_EC_FRAME
1da177e4 880 pushl $do_general_protection
fe7cacc1 881 CFI_ADJUST_CFA_OFFSET 4
1da177e4 882 jmp error_code
fe7cacc1 883 CFI_ENDPROC
3d97ae5b 884 .previous .text
1da177e4
LT
885
886ENTRY(alignment_check)
fe7cacc1 887 RING0_EC_FRAME
1da177e4 888 pushl $do_alignment_check
fe7cacc1 889 CFI_ADJUST_CFA_OFFSET 4
1da177e4 890 jmp error_code
fe7cacc1 891 CFI_ENDPROC
1da177e4 892
3d97ae5b 893KPROBE_ENTRY(page_fault)
fe7cacc1 894 RING0_EC_FRAME
1da177e4 895 pushl $do_page_fault
fe7cacc1 896 CFI_ADJUST_CFA_OFFSET 4
1da177e4 897 jmp error_code
fe7cacc1 898 CFI_ENDPROC
3d97ae5b 899 .previous .text
1da177e4
LT
900
901#ifdef CONFIG_X86_MCE
902ENTRY(machine_check)
fe7cacc1 903 RING0_INT_FRAME
1da177e4 904 pushl $0
fe7cacc1 905 CFI_ADJUST_CFA_OFFSET 4
1da177e4 906 pushl machine_check_vector
fe7cacc1 907 CFI_ADJUST_CFA_OFFSET 4
1da177e4 908 jmp error_code
fe7cacc1 909 CFI_ENDPROC
1da177e4
LT
910#endif
911
912ENTRY(spurious_interrupt_bug)
fe7cacc1 913 RING0_INT_FRAME
1da177e4 914 pushl $0
fe7cacc1 915 CFI_ADJUST_CFA_OFFSET 4
1da177e4 916 pushl $do_spurious_interrupt_bug
fe7cacc1 917 CFI_ADJUST_CFA_OFFSET 4
1da177e4 918 jmp error_code
fe7cacc1 919 CFI_ENDPROC
1da177e4 920
176a2718
JB
921#ifdef CONFIG_STACK_UNWIND
922ENTRY(arch_unwind_init_running)
fe7cacc1 923 CFI_STARTPROC
176a2718
JB
924 movl 4(%esp), %edx
925 movl (%esp), %ecx
926 leal 4(%esp), %eax
927 movl %ebx, EBX(%edx)
928 xorl %ebx, %ebx
929 movl %ebx, ECX(%edx)
930 movl %ebx, EDX(%edx)
931 movl %esi, ESI(%edx)
932 movl %edi, EDI(%edx)
933 movl %ebp, EBP(%edx)
934 movl %ebx, EAX(%edx)
935 movl $__USER_DS, DS(%edx)
936 movl $__USER_DS, ES(%edx)
937 movl %ebx, ORIG_EAX(%edx)
938 movl %ecx, EIP(%edx)
939 movl 12(%esp), %ecx
940 movl $__KERNEL_CS, CS(%edx)
941 movl %ebx, EFLAGS(%edx)
942 movl %eax, OLDESP(%edx)
943 movl 8(%esp), %eax
944 movl %ecx, 8(%esp)
945 movl EBX(%edx), %ebx
946 movl $__KERNEL_DS, OLDSS(%edx)
947 jmpl *%eax
fe7cacc1 948 CFI_ENDPROC
176a2718
JB
949ENDPROC(arch_unwind_init_running)
950#endif
951
bb152f53 952.section .rodata,"a"
5e7b83ff 953#include "syscall_table.S"
1da177e4
LT
954
955syscall_table_size=(.-sys_call_table)
This page took 0.294415 seconds and 5 git commands to generate.