[PATCH] i386: Fix entry.S code with !CONFIG_VM86
[deliverable/linux.git] / arch / i386 / kernel / entry.S
1 /*
2 * linux/arch/i386/entry.S
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7 /*
8 * entry.S contains the system-call and fault low-level handling routines.
9 * This also contains the timer-interrupt handler, as well as all interrupts
10 * and faults that can result in a task-switch.
11 *
12 * NOTE: This code handles signal-recognition, which happens every time
13 * after a timer-interrupt and after each system call.
14 *
15 * I changed all the .align's to 4 (16 byte alignment), as that's faster
16 * on a 486.
17 *
18 * Stack layout in 'ret_from_system_call':
19 * ptrace needs to have all regs on the stack.
20 * if the order here is changed, it needs to be
21 * updated in fork.c:copy_process, signal.c:do_signal,
22 * ptrace.c and ptrace.h
23 *
24 * 0(%esp) - %ebx
25 * 4(%esp) - %ecx
26 * 8(%esp) - %edx
27 * C(%esp) - %esi
28 * 10(%esp) - %edi
29 * 14(%esp) - %ebp
30 * 18(%esp) - %eax
31 * 1C(%esp) - %ds
32 * 20(%esp) - %es
33 * 24(%esp) - %gs
34 * 28(%esp) - orig_eax
35 * 2C(%esp) - %eip
36 * 30(%esp) - %cs
37 * 34(%esp) - %eflags
38 * 38(%esp) - %oldesp
39 * 3C(%esp) - %oldss
40 *
41 * "current" is in register %ebx during any slow entries.
42 */
43
44 #include <linux/linkage.h>
45 #include <asm/thread_info.h>
46 #include <asm/irqflags.h>
47 #include <asm/errno.h>
48 #include <asm/segment.h>
49 #include <asm/smp.h>
50 #include <asm/page.h>
51 #include <asm/desc.h>
52 #include <asm/percpu.h>
53 #include <asm/dwarf2.h>
54 #include "irq_vectors.h"
55
56 #define nr_syscalls ((syscall_table_size)/4)
57
58 CF_MASK = 0x00000001
59 TF_MASK = 0x00000100
60 IF_MASK = 0x00000200
61 DF_MASK = 0x00000400
62 NT_MASK = 0x00004000
63 VM_MASK = 0x00020000
64
65 /* These are replaces for paravirtualization */
66 #define DISABLE_INTERRUPTS cli
67 #define ENABLE_INTERRUPTS sti
68 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
69 #define INTERRUPT_RETURN iret
70 #define GET_CR0_INTO_EAX movl %cr0, %eax
71
72 #ifdef CONFIG_PREEMPT
73 #define preempt_stop DISABLE_INTERRUPTS; TRACE_IRQS_OFF
74 #else
75 #define preempt_stop
76 #define resume_kernel restore_nocheck
77 #endif
78
79 .macro TRACE_IRQS_IRET
80 #ifdef CONFIG_TRACE_IRQFLAGS
81 testl $IF_MASK,PT_EFLAGS(%esp) # interrupts off?
82 jz 1f
83 TRACE_IRQS_ON
84 1:
85 #endif
86 .endm
87
88 #ifdef CONFIG_VM86
89 #define resume_userspace_sig check_userspace
90 #else
91 #define resume_userspace_sig resume_userspace
92 #endif
93
94 #define SAVE_ALL \
95 cld; \
96 pushl %gs; \
97 CFI_ADJUST_CFA_OFFSET 4;\
98 /*CFI_REL_OFFSET gs, 0;*/\
99 pushl %es; \
100 CFI_ADJUST_CFA_OFFSET 4;\
101 /*CFI_REL_OFFSET es, 0;*/\
102 pushl %ds; \
103 CFI_ADJUST_CFA_OFFSET 4;\
104 /*CFI_REL_OFFSET ds, 0;*/\
105 pushl %eax; \
106 CFI_ADJUST_CFA_OFFSET 4;\
107 CFI_REL_OFFSET eax, 0;\
108 pushl %ebp; \
109 CFI_ADJUST_CFA_OFFSET 4;\
110 CFI_REL_OFFSET ebp, 0;\
111 pushl %edi; \
112 CFI_ADJUST_CFA_OFFSET 4;\
113 CFI_REL_OFFSET edi, 0;\
114 pushl %esi; \
115 CFI_ADJUST_CFA_OFFSET 4;\
116 CFI_REL_OFFSET esi, 0;\
117 pushl %edx; \
118 CFI_ADJUST_CFA_OFFSET 4;\
119 CFI_REL_OFFSET edx, 0;\
120 pushl %ecx; \
121 CFI_ADJUST_CFA_OFFSET 4;\
122 CFI_REL_OFFSET ecx, 0;\
123 pushl %ebx; \
124 CFI_ADJUST_CFA_OFFSET 4;\
125 CFI_REL_OFFSET ebx, 0;\
126 movl $(__USER_DS), %edx; \
127 movl %edx, %ds; \
128 movl %edx, %es; \
129 movl $(__KERNEL_PDA), %edx; \
130 movl %edx, %gs
131
132 #define RESTORE_INT_REGS \
133 popl %ebx; \
134 CFI_ADJUST_CFA_OFFSET -4;\
135 CFI_RESTORE ebx;\
136 popl %ecx; \
137 CFI_ADJUST_CFA_OFFSET -4;\
138 CFI_RESTORE ecx;\
139 popl %edx; \
140 CFI_ADJUST_CFA_OFFSET -4;\
141 CFI_RESTORE edx;\
142 popl %esi; \
143 CFI_ADJUST_CFA_OFFSET -4;\
144 CFI_RESTORE esi;\
145 popl %edi; \
146 CFI_ADJUST_CFA_OFFSET -4;\
147 CFI_RESTORE edi;\
148 popl %ebp; \
149 CFI_ADJUST_CFA_OFFSET -4;\
150 CFI_RESTORE ebp;\
151 popl %eax; \
152 CFI_ADJUST_CFA_OFFSET -4;\
153 CFI_RESTORE eax
154
155 #define RESTORE_REGS \
156 RESTORE_INT_REGS; \
157 1: popl %ds; \
158 CFI_ADJUST_CFA_OFFSET -4;\
159 /*CFI_RESTORE ds;*/\
160 2: popl %es; \
161 CFI_ADJUST_CFA_OFFSET -4;\
162 /*CFI_RESTORE es;*/\
163 3: popl %gs; \
164 CFI_ADJUST_CFA_OFFSET -4;\
165 /*CFI_RESTORE gs;*/\
166 .pushsection .fixup,"ax"; \
167 4: movl $0,(%esp); \
168 jmp 1b; \
169 5: movl $0,(%esp); \
170 jmp 2b; \
171 6: movl $0,(%esp); \
172 jmp 3b; \
173 .section __ex_table,"a";\
174 .align 4; \
175 .long 1b,4b; \
176 .long 2b,5b; \
177 .long 3b,6b; \
178 .popsection
179
180 #define RING0_INT_FRAME \
181 CFI_STARTPROC simple;\
182 CFI_SIGNAL_FRAME;\
183 CFI_DEF_CFA esp, 3*4;\
184 /*CFI_OFFSET cs, -2*4;*/\
185 CFI_OFFSET eip, -3*4
186
187 #define RING0_EC_FRAME \
188 CFI_STARTPROC simple;\
189 CFI_SIGNAL_FRAME;\
190 CFI_DEF_CFA esp, 4*4;\
191 /*CFI_OFFSET cs, -2*4;*/\
192 CFI_OFFSET eip, -3*4
193
194 #define RING0_PTREGS_FRAME \
195 CFI_STARTPROC simple;\
196 CFI_SIGNAL_FRAME;\
197 CFI_DEF_CFA esp, PT_OLDESP-PT_EBX;\
198 /*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/\
199 CFI_OFFSET eip, PT_EIP-PT_OLDESP;\
200 /*CFI_OFFSET es, PT_ES-PT_OLDESP;*/\
201 /*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/\
202 CFI_OFFSET eax, PT_EAX-PT_OLDESP;\
203 CFI_OFFSET ebp, PT_EBP-PT_OLDESP;\
204 CFI_OFFSET edi, PT_EDI-PT_OLDESP;\
205 CFI_OFFSET esi, PT_ESI-PT_OLDESP;\
206 CFI_OFFSET edx, PT_EDX-PT_OLDESP;\
207 CFI_OFFSET ecx, PT_ECX-PT_OLDESP;\
208 CFI_OFFSET ebx, PT_EBX-PT_OLDESP
209
210 ENTRY(ret_from_fork)
211 CFI_STARTPROC
212 pushl %eax
213 CFI_ADJUST_CFA_OFFSET 4
214 call schedule_tail
215 GET_THREAD_INFO(%ebp)
216 popl %eax
217 CFI_ADJUST_CFA_OFFSET -4
218 pushl $0x0202 # Reset kernel eflags
219 CFI_ADJUST_CFA_OFFSET 4
220 popfl
221 CFI_ADJUST_CFA_OFFSET -4
222 jmp syscall_exit
223 CFI_ENDPROC
224
225 /*
226 * Return to user mode is not as complex as all this looks,
227 * but we want the default path for a system call return to
228 * go as quickly as possible which is why some of this is
229 * less clear than it otherwise should be.
230 */
231
232 # userspace resumption stub bypassing syscall exit tracing
233 ALIGN
234 RING0_PTREGS_FRAME
235 ret_from_exception:
236 preempt_stop
237 ret_from_intr:
238 GET_THREAD_INFO(%ebp)
239 check_userspace:
240 movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
241 movb PT_CS(%esp), %al
242 andl $(VM_MASK | SEGMENT_RPL_MASK), %eax
243 cmpl $USER_RPL, %eax
244 jb resume_kernel # not returning to v8086 or userspace
245
246 ENTRY(resume_userspace)
247 DISABLE_INTERRUPTS # make sure we don't miss an interrupt
248 # setting need_resched or sigpending
249 # between sampling and the iret
250 movl TI_flags(%ebp), %ecx
251 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
252 # int/exception return?
253 jne work_pending
254 jmp restore_all
255
256 #ifdef CONFIG_PREEMPT
257 ENTRY(resume_kernel)
258 DISABLE_INTERRUPTS
259 cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
260 jnz restore_nocheck
261 need_resched:
262 movl TI_flags(%ebp), %ecx # need_resched set ?
263 testb $_TIF_NEED_RESCHED, %cl
264 jz restore_all
265 testl $IF_MASK,PT_EFLAGS(%esp) # interrupts off (exception path) ?
266 jz restore_all
267 call preempt_schedule_irq
268 jmp need_resched
269 #endif
270 CFI_ENDPROC
271
272 /* SYSENTER_RETURN points to after the "sysenter" instruction in
273 the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
274
275 # sysenter call handler stub
276 ENTRY(sysenter_entry)
277 CFI_STARTPROC simple
278 CFI_SIGNAL_FRAME
279 CFI_DEF_CFA esp, 0
280 CFI_REGISTER esp, ebp
281 movl TSS_sysenter_esp0(%esp),%esp
282 sysenter_past_esp:
283 /*
284 * No need to follow this irqs on/off section: the syscall
285 * disabled irqs and here we enable it straight after entry:
286 */
287 ENABLE_INTERRUPTS
288 pushl $(__USER_DS)
289 CFI_ADJUST_CFA_OFFSET 4
290 /*CFI_REL_OFFSET ss, 0*/
291 pushl %ebp
292 CFI_ADJUST_CFA_OFFSET 4
293 CFI_REL_OFFSET esp, 0
294 pushfl
295 CFI_ADJUST_CFA_OFFSET 4
296 pushl $(__USER_CS)
297 CFI_ADJUST_CFA_OFFSET 4
298 /*CFI_REL_OFFSET cs, 0*/
299 /*
300 * Push current_thread_info()->sysenter_return to the stack.
301 * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
302 * pushed above; +8 corresponds to copy_thread's esp0 setting.
303 */
304 pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
305 CFI_ADJUST_CFA_OFFSET 4
306 CFI_REL_OFFSET eip, 0
307
308 /*
309 * Load the potential sixth argument from user stack.
310 * Careful about security.
311 */
312 cmpl $__PAGE_OFFSET-3,%ebp
313 jae syscall_fault
314 1: movl (%ebp),%ebp
315 .section __ex_table,"a"
316 .align 4
317 .long 1b,syscall_fault
318 .previous
319
320 pushl %eax
321 CFI_ADJUST_CFA_OFFSET 4
322 SAVE_ALL
323 GET_THREAD_INFO(%ebp)
324
325 /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
326 testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
327 jnz syscall_trace_entry
328 cmpl $(nr_syscalls), %eax
329 jae syscall_badsys
330 call *sys_call_table(,%eax,4)
331 movl %eax,PT_EAX(%esp)
332 DISABLE_INTERRUPTS
333 TRACE_IRQS_OFF
334 movl TI_flags(%ebp), %ecx
335 testw $_TIF_ALLWORK_MASK, %cx
336 jne syscall_exit_work
337 /* if something modifies registers it must also disable sysexit */
338 movl PT_EIP(%esp), %edx
339 movl PT_OLDESP(%esp), %ecx
340 xorl %ebp,%ebp
341 TRACE_IRQS_ON
342 1: mov PT_GS(%esp), %gs
343 ENABLE_INTERRUPTS_SYSEXIT
344 CFI_ENDPROC
345 .pushsection .fixup,"ax"
346 2: movl $0,PT_GS(%esp)
347 jmp 1b
348 .section __ex_table,"a"
349 .align 4
350 .long 1b,2b
351 .popsection
352
353 # system call handler stub
354 ENTRY(system_call)
355 RING0_INT_FRAME # can't unwind into user space anyway
356 pushl %eax # save orig_eax
357 CFI_ADJUST_CFA_OFFSET 4
358 SAVE_ALL
359 GET_THREAD_INFO(%ebp)
360 testl $TF_MASK,PT_EFLAGS(%esp)
361 jz no_singlestep
362 orl $_TIF_SINGLESTEP,TI_flags(%ebp)
363 no_singlestep:
364 # system call tracing in operation / emulation
365 /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
366 testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
367 jnz syscall_trace_entry
368 cmpl $(nr_syscalls), %eax
369 jae syscall_badsys
370 syscall_call:
371 call *sys_call_table(,%eax,4)
372 movl %eax,PT_EAX(%esp) # store the return value
373 syscall_exit:
374 DISABLE_INTERRUPTS # make sure we don't miss an interrupt
375 # setting need_resched or sigpending
376 # between sampling and the iret
377 TRACE_IRQS_OFF
378 movl TI_flags(%ebp), %ecx
379 testw $_TIF_ALLWORK_MASK, %cx # current->work
380 jne syscall_exit_work
381
382 restore_all:
383 movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
384 # Warning: PT_OLDSS(%esp) contains the wrong/random values if we
385 # are returning to the kernel.
386 # See comments in process.c:copy_thread() for details.
387 movb PT_OLDSS(%esp), %ah
388 movb PT_CS(%esp), %al
389 andl $(VM_MASK | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
390 cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
391 CFI_REMEMBER_STATE
392 je ldt_ss # returning to user-space with LDT SS
393 restore_nocheck:
394 TRACE_IRQS_IRET
395 restore_nocheck_notrace:
396 RESTORE_REGS
397 addl $4, %esp # skip orig_eax/error_code
398 CFI_ADJUST_CFA_OFFSET -4
399 1: INTERRUPT_RETURN
400 .section .fixup,"ax"
401 iret_exc:
402 TRACE_IRQS_ON
403 ENABLE_INTERRUPTS
404 pushl $0 # no error code
405 pushl $do_iret_error
406 jmp error_code
407 .previous
408 .section __ex_table,"a"
409 .align 4
410 .long 1b,iret_exc
411 .previous
412
413 CFI_RESTORE_STATE
414 ldt_ss:
415 larl PT_OLDSS(%esp), %eax
416 jnz restore_nocheck
417 testl $0x00400000, %eax # returning to 32bit stack?
418 jnz restore_nocheck # allright, normal return
419 /* If returning to userspace with 16bit stack,
420 * try to fix the higher word of ESP, as the CPU
421 * won't restore it.
422 * This is an "official" bug of all the x86-compatible
423 * CPUs, which we can try to work around to make
424 * dosemu and wine happy. */
425 movl PT_OLDESP(%esp), %eax
426 movl %esp, %edx
427 call patch_espfix_desc
428 pushl $__ESPFIX_SS
429 CFI_ADJUST_CFA_OFFSET 4
430 pushl %eax
431 CFI_ADJUST_CFA_OFFSET 4
432 DISABLE_INTERRUPTS
433 TRACE_IRQS_OFF
434 lss (%esp), %esp
435 CFI_ADJUST_CFA_OFFSET -8
436 jmp restore_nocheck
437 CFI_ENDPROC
438
439 # perform work that needs to be done immediately before resumption
440 ALIGN
441 RING0_PTREGS_FRAME # can't unwind into user space anyway
442 work_pending:
443 testb $_TIF_NEED_RESCHED, %cl
444 jz work_notifysig
445 work_resched:
446 call schedule
447 DISABLE_INTERRUPTS # make sure we don't miss an interrupt
448 # setting need_resched or sigpending
449 # between sampling and the iret
450 TRACE_IRQS_OFF
451 movl TI_flags(%ebp), %ecx
452 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
453 # than syscall tracing?
454 jz restore_all
455 testb $_TIF_NEED_RESCHED, %cl
456 jnz work_resched
457
458 work_notifysig: # deal with pending signals and
459 # notify-resume requests
460 #ifdef CONFIG_VM86
461 testl $VM_MASK, PT_EFLAGS(%esp)
462 movl %esp, %eax
463 jne work_notifysig_v86 # returning to kernel-space or
464 # vm86-space
465 xorl %edx, %edx
466 call do_notify_resume
467 jmp resume_userspace_sig
468
469 ALIGN
470 work_notifysig_v86:
471 pushl %ecx # save ti_flags for do_notify_resume
472 CFI_ADJUST_CFA_OFFSET 4
473 call save_v86_state # %eax contains pt_regs pointer
474 popl %ecx
475 CFI_ADJUST_CFA_OFFSET -4
476 movl %eax, %esp
477 #else
478 movl %esp, %eax
479 #endif
480 xorl %edx, %edx
481 call do_notify_resume
482 jmp resume_userspace_sig
483
484 # perform syscall exit tracing
485 ALIGN
486 syscall_trace_entry:
487 movl $-ENOSYS,PT_EAX(%esp)
488 movl %esp, %eax
489 xorl %edx,%edx
490 call do_syscall_trace
491 cmpl $0, %eax
492 jne resume_userspace # ret != 0 -> running under PTRACE_SYSEMU,
493 # so must skip actual syscall
494 movl PT_ORIG_EAX(%esp), %eax
495 cmpl $(nr_syscalls), %eax
496 jnae syscall_call
497 jmp syscall_exit
498
499 # perform syscall exit tracing
500 ALIGN
501 syscall_exit_work:
502 testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl
503 jz work_pending
504 TRACE_IRQS_ON
505 ENABLE_INTERRUPTS # could let do_syscall_trace() call
506 # schedule() instead
507 movl %esp, %eax
508 movl $1, %edx
509 call do_syscall_trace
510 jmp resume_userspace
511 CFI_ENDPROC
512
513 RING0_INT_FRAME # can't unwind into user space anyway
514 syscall_fault:
515 pushl %eax # save orig_eax
516 CFI_ADJUST_CFA_OFFSET 4
517 SAVE_ALL
518 GET_THREAD_INFO(%ebp)
519 movl $-EFAULT,PT_EAX(%esp)
520 jmp resume_userspace
521
522 syscall_badsys:
523 movl $-ENOSYS,PT_EAX(%esp)
524 jmp resume_userspace
525 CFI_ENDPROC
526
527 #define FIXUP_ESPFIX_STACK \
528 /* since we are on a wrong stack, we cant make it a C code :( */ \
529 movl %gs:PDA_cpu, %ebx; \
530 PER_CPU(cpu_gdt_descr, %ebx); \
531 movl GDS_address(%ebx), %ebx; \
532 GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah); \
533 addl %esp, %eax; \
534 pushl $__KERNEL_DS; \
535 CFI_ADJUST_CFA_OFFSET 4; \
536 pushl %eax; \
537 CFI_ADJUST_CFA_OFFSET 4; \
538 lss (%esp), %esp; \
539 CFI_ADJUST_CFA_OFFSET -8;
540 #define UNWIND_ESPFIX_STACK \
541 movl %ss, %eax; \
542 /* see if on espfix stack */ \
543 cmpw $__ESPFIX_SS, %ax; \
544 jne 27f; \
545 movl $__KERNEL_DS, %eax; \
546 movl %eax, %ds; \
547 movl %eax, %es; \
548 /* switch to normal stack */ \
549 FIXUP_ESPFIX_STACK; \
550 27:;
551
552 /*
553 * Build the entry stubs and pointer table with
554 * some assembler magic.
555 */
556 .data
557 ENTRY(interrupt)
558 .text
559
560 vector=0
561 ENTRY(irq_entries_start)
562 RING0_INT_FRAME
563 .rept NR_IRQS
564 ALIGN
565 .if vector
566 CFI_ADJUST_CFA_OFFSET -4
567 .endif
568 1: pushl $~(vector)
569 CFI_ADJUST_CFA_OFFSET 4
570 jmp common_interrupt
571 .data
572 .long 1b
573 .text
574 vector=vector+1
575 .endr
576
577 /*
578 * the CPU automatically disables interrupts when executing an IRQ vector,
579 * so IRQ-flags tracing has to follow that:
580 */
581 ALIGN
582 common_interrupt:
583 SAVE_ALL
584 TRACE_IRQS_OFF
585 movl %esp,%eax
586 call do_IRQ
587 jmp ret_from_intr
588 CFI_ENDPROC
589
590 #define BUILD_INTERRUPT(name, nr) \
591 ENTRY(name) \
592 RING0_INT_FRAME; \
593 pushl $~(nr); \
594 CFI_ADJUST_CFA_OFFSET 4; \
595 SAVE_ALL; \
596 TRACE_IRQS_OFF \
597 movl %esp,%eax; \
598 call smp_/**/name; \
599 jmp ret_from_intr; \
600 CFI_ENDPROC
601
602 /* The include is where all of the SMP etc. interrupts come from */
603 #include "entry_arch.h"
604
605 KPROBE_ENTRY(page_fault)
606 RING0_EC_FRAME
607 pushl $do_page_fault
608 CFI_ADJUST_CFA_OFFSET 4
609 ALIGN
610 error_code:
611 /* the function address is in %gs's slot on the stack */
612 pushl %es
613 CFI_ADJUST_CFA_OFFSET 4
614 /*CFI_REL_OFFSET es, 0*/
615 pushl %ds
616 CFI_ADJUST_CFA_OFFSET 4
617 /*CFI_REL_OFFSET ds, 0*/
618 pushl %eax
619 CFI_ADJUST_CFA_OFFSET 4
620 CFI_REL_OFFSET eax, 0
621 pushl %ebp
622 CFI_ADJUST_CFA_OFFSET 4
623 CFI_REL_OFFSET ebp, 0
624 pushl %edi
625 CFI_ADJUST_CFA_OFFSET 4
626 CFI_REL_OFFSET edi, 0
627 pushl %esi
628 CFI_ADJUST_CFA_OFFSET 4
629 CFI_REL_OFFSET esi, 0
630 pushl %edx
631 CFI_ADJUST_CFA_OFFSET 4
632 CFI_REL_OFFSET edx, 0
633 pushl %ecx
634 CFI_ADJUST_CFA_OFFSET 4
635 CFI_REL_OFFSET ecx, 0
636 pushl %ebx
637 CFI_ADJUST_CFA_OFFSET 4
638 CFI_REL_OFFSET ebx, 0
639 cld
640 pushl %gs
641 CFI_ADJUST_CFA_OFFSET 4
642 /*CFI_REL_OFFSET gs, 0*/
643 movl $(__KERNEL_PDA), %ecx
644 movl %ecx, %gs
645 UNWIND_ESPFIX_STACK
646 popl %ecx
647 CFI_ADJUST_CFA_OFFSET -4
648 /*CFI_REGISTER es, ecx*/
649 movl PT_GS(%esp), %edi # get the function address
650 movl PT_ORIG_EAX(%esp), %edx # get the error code
651 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
652 mov %ecx, PT_GS(%esp)
653 /*CFI_REL_OFFSET gs, ES*/
654 movl $(__USER_DS), %ecx
655 movl %ecx, %ds
656 movl %ecx, %es
657 movl %esp,%eax # pt_regs pointer
658 call *%edi
659 jmp ret_from_exception
660 CFI_ENDPROC
661 KPROBE_END(page_fault)
662
663 ENTRY(coprocessor_error)
664 RING0_INT_FRAME
665 pushl $0
666 CFI_ADJUST_CFA_OFFSET 4
667 pushl $do_coprocessor_error
668 CFI_ADJUST_CFA_OFFSET 4
669 jmp error_code
670 CFI_ENDPROC
671
672 ENTRY(simd_coprocessor_error)
673 RING0_INT_FRAME
674 pushl $0
675 CFI_ADJUST_CFA_OFFSET 4
676 pushl $do_simd_coprocessor_error
677 CFI_ADJUST_CFA_OFFSET 4
678 jmp error_code
679 CFI_ENDPROC
680
681 ENTRY(device_not_available)
682 RING0_INT_FRAME
683 pushl $-1 # mark this as an int
684 CFI_ADJUST_CFA_OFFSET 4
685 SAVE_ALL
686 GET_CR0_INTO_EAX
687 testl $0x4, %eax # EM (math emulation bit)
688 jne device_not_available_emulate
689 preempt_stop
690 call math_state_restore
691 jmp ret_from_exception
692 device_not_available_emulate:
693 pushl $0 # temporary storage for ORIG_EIP
694 CFI_ADJUST_CFA_OFFSET 4
695 call math_emulate
696 addl $4, %esp
697 CFI_ADJUST_CFA_OFFSET -4
698 jmp ret_from_exception
699 CFI_ENDPROC
700
701 /*
702 * Debug traps and NMI can happen at the one SYSENTER instruction
703 * that sets up the real kernel stack. Check here, since we can't
704 * allow the wrong stack to be used.
705 *
706 * "TSS_sysenter_esp0+12" is because the NMI/debug handler will have
707 * already pushed 3 words if it hits on the sysenter instruction:
708 * eflags, cs and eip.
709 *
710 * We just load the right stack, and push the three (known) values
711 * by hand onto the new stack - while updating the return eip past
712 * the instruction that would have done it for sysenter.
713 */
714 #define FIX_STACK(offset, ok, label) \
715 cmpw $__KERNEL_CS,4(%esp); \
716 jne ok; \
717 label: \
718 movl TSS_sysenter_esp0+offset(%esp),%esp; \
719 CFI_DEF_CFA esp, 0; \
720 CFI_UNDEFINED eip; \
721 pushfl; \
722 CFI_ADJUST_CFA_OFFSET 4; \
723 pushl $__KERNEL_CS; \
724 CFI_ADJUST_CFA_OFFSET 4; \
725 pushl $sysenter_past_esp; \
726 CFI_ADJUST_CFA_OFFSET 4; \
727 CFI_REL_OFFSET eip, 0
728
729 KPROBE_ENTRY(debug)
730 RING0_INT_FRAME
731 cmpl $sysenter_entry,(%esp)
732 jne debug_stack_correct
733 FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
734 debug_stack_correct:
735 pushl $-1 # mark this as an int
736 CFI_ADJUST_CFA_OFFSET 4
737 SAVE_ALL
738 xorl %edx,%edx # error code 0
739 movl %esp,%eax # pt_regs pointer
740 call do_debug
741 jmp ret_from_exception
742 CFI_ENDPROC
743 KPROBE_END(debug)
744
745 /*
746 * NMI is doubly nasty. It can happen _while_ we're handling
747 * a debug fault, and the debug fault hasn't yet been able to
748 * clear up the stack. So we first check whether we got an
749 * NMI on the sysenter entry path, but after that we need to
750 * check whether we got an NMI on the debug path where the debug
751 * fault happened on the sysenter path.
752 */
753 KPROBE_ENTRY(nmi)
754 RING0_INT_FRAME
755 pushl %eax
756 CFI_ADJUST_CFA_OFFSET 4
757 movl %ss, %eax
758 cmpw $__ESPFIX_SS, %ax
759 popl %eax
760 CFI_ADJUST_CFA_OFFSET -4
761 je nmi_espfix_stack
762 cmpl $sysenter_entry,(%esp)
763 je nmi_stack_fixup
764 pushl %eax
765 CFI_ADJUST_CFA_OFFSET 4
766 movl %esp,%eax
767 /* Do not access memory above the end of our stack page,
768 * it might not exist.
769 */
770 andl $(THREAD_SIZE-1),%eax
771 cmpl $(THREAD_SIZE-20),%eax
772 popl %eax
773 CFI_ADJUST_CFA_OFFSET -4
774 jae nmi_stack_correct
775 cmpl $sysenter_entry,12(%esp)
776 je nmi_debug_stack_check
777 nmi_stack_correct:
778 /* We have a RING0_INT_FRAME here */
779 pushl %eax
780 CFI_ADJUST_CFA_OFFSET 4
781 SAVE_ALL
782 xorl %edx,%edx # zero error code
783 movl %esp,%eax # pt_regs pointer
784 call do_nmi
785 jmp restore_nocheck_notrace
786 CFI_ENDPROC
787
788 nmi_stack_fixup:
789 RING0_INT_FRAME
790 FIX_STACK(12,nmi_stack_correct, 1)
791 jmp nmi_stack_correct
792
793 nmi_debug_stack_check:
794 /* We have a RING0_INT_FRAME here */
795 cmpw $__KERNEL_CS,16(%esp)
796 jne nmi_stack_correct
797 cmpl $debug,(%esp)
798 jb nmi_stack_correct
799 cmpl $debug_esp_fix_insn,(%esp)
800 ja nmi_stack_correct
801 FIX_STACK(24,nmi_stack_correct, 1)
802 jmp nmi_stack_correct
803
804 nmi_espfix_stack:
805 /* We have a RING0_INT_FRAME here.
806 *
807 * create the pointer to lss back
808 */
809 pushl %ss
810 CFI_ADJUST_CFA_OFFSET 4
811 pushl %esp
812 CFI_ADJUST_CFA_OFFSET 4
813 addw $4, (%esp)
814 /* copy the iret frame of 12 bytes */
815 .rept 3
816 pushl 16(%esp)
817 CFI_ADJUST_CFA_OFFSET 4
818 .endr
819 pushl %eax
820 CFI_ADJUST_CFA_OFFSET 4
821 SAVE_ALL
822 FIXUP_ESPFIX_STACK # %eax == %esp
823 xorl %edx,%edx # zero error code
824 call do_nmi
825 RESTORE_REGS
826 lss 12+4(%esp), %esp # back to espfix stack
827 CFI_ADJUST_CFA_OFFSET -24
828 1: INTERRUPT_RETURN
829 CFI_ENDPROC
830 .section __ex_table,"a"
831 .align 4
832 .long 1b,iret_exc
833 .previous
834 KPROBE_END(nmi)
835
836 KPROBE_ENTRY(int3)
837 RING0_INT_FRAME
838 pushl $-1 # mark this as an int
839 CFI_ADJUST_CFA_OFFSET 4
840 SAVE_ALL
841 xorl %edx,%edx # zero error code
842 movl %esp,%eax # pt_regs pointer
843 call do_int3
844 jmp ret_from_exception
845 CFI_ENDPROC
846 KPROBE_END(int3)
847
848 ENTRY(overflow)
849 RING0_INT_FRAME
850 pushl $0
851 CFI_ADJUST_CFA_OFFSET 4
852 pushl $do_overflow
853 CFI_ADJUST_CFA_OFFSET 4
854 jmp error_code
855 CFI_ENDPROC
856
857 ENTRY(bounds)
858 RING0_INT_FRAME
859 pushl $0
860 CFI_ADJUST_CFA_OFFSET 4
861 pushl $do_bounds
862 CFI_ADJUST_CFA_OFFSET 4
863 jmp error_code
864 CFI_ENDPROC
865
866 ENTRY(invalid_op)
867 RING0_INT_FRAME
868 pushl $0
869 CFI_ADJUST_CFA_OFFSET 4
870 pushl $do_invalid_op
871 CFI_ADJUST_CFA_OFFSET 4
872 jmp error_code
873 CFI_ENDPROC
874
875 ENTRY(coprocessor_segment_overrun)
876 RING0_INT_FRAME
877 pushl $0
878 CFI_ADJUST_CFA_OFFSET 4
879 pushl $do_coprocessor_segment_overrun
880 CFI_ADJUST_CFA_OFFSET 4
881 jmp error_code
882 CFI_ENDPROC
883
884 ENTRY(invalid_TSS)
885 RING0_EC_FRAME
886 pushl $do_invalid_TSS
887 CFI_ADJUST_CFA_OFFSET 4
888 jmp error_code
889 CFI_ENDPROC
890
891 ENTRY(segment_not_present)
892 RING0_EC_FRAME
893 pushl $do_segment_not_present
894 CFI_ADJUST_CFA_OFFSET 4
895 jmp error_code
896 CFI_ENDPROC
897
898 ENTRY(stack_segment)
899 RING0_EC_FRAME
900 pushl $do_stack_segment
901 CFI_ADJUST_CFA_OFFSET 4
902 jmp error_code
903 CFI_ENDPROC
904
905 KPROBE_ENTRY(general_protection)
906 RING0_EC_FRAME
907 pushl $do_general_protection
908 CFI_ADJUST_CFA_OFFSET 4
909 jmp error_code
910 CFI_ENDPROC
911 KPROBE_END(general_protection)
912
913 ENTRY(alignment_check)
914 RING0_EC_FRAME
915 pushl $do_alignment_check
916 CFI_ADJUST_CFA_OFFSET 4
917 jmp error_code
918 CFI_ENDPROC
919
920 ENTRY(divide_error)
921 RING0_INT_FRAME
922 pushl $0 # no error code
923 CFI_ADJUST_CFA_OFFSET 4
924 pushl $do_divide_error
925 CFI_ADJUST_CFA_OFFSET 4
926 jmp error_code
927 CFI_ENDPROC
928
929 #ifdef CONFIG_X86_MCE
930 ENTRY(machine_check)
931 RING0_INT_FRAME
932 pushl $0
933 CFI_ADJUST_CFA_OFFSET 4
934 pushl machine_check_vector
935 CFI_ADJUST_CFA_OFFSET 4
936 jmp error_code
937 CFI_ENDPROC
938 #endif
939
940 ENTRY(spurious_interrupt_bug)
941 RING0_INT_FRAME
942 pushl $0
943 CFI_ADJUST_CFA_OFFSET 4
944 pushl $do_spurious_interrupt_bug
945 CFI_ADJUST_CFA_OFFSET 4
946 jmp error_code
947 CFI_ENDPROC
948
949 #ifdef CONFIG_STACK_UNWIND
950 ENTRY(arch_unwind_init_running)
951 CFI_STARTPROC
952 movl 4(%esp), %edx
953 movl (%esp), %ecx
954 leal 4(%esp), %eax
955 movl %ebx, PT_EBX(%edx)
956 xorl %ebx, %ebx
957 movl %ebx, PT_ECX(%edx)
958 movl %ebx, PT_EDX(%edx)
959 movl %esi, PT_ESI(%edx)
960 movl %edi, PT_EDI(%edx)
961 movl %ebp, PT_EBP(%edx)
962 movl %ebx, PT_EAX(%edx)
963 movl $__USER_DS, PT_DS(%edx)
964 movl $__USER_DS, PT_ES(%edx)
965 movl $0, PT_GS(%edx)
966 movl %ebx, PT_ORIG_EAX(%edx)
967 movl %ecx, PT_EIP(%edx)
968 movl 12(%esp), %ecx
969 movl $__KERNEL_CS, PT_CS(%edx)
970 movl %ebx, PT_EFLAGS(%edx)
971 movl %eax, PT_OLDESP(%edx)
972 movl 8(%esp), %eax
973 movl %ecx, 8(%esp)
974 movl PT_EBX(%edx), %ebx
975 movl $__KERNEL_DS, PT_OLDSS(%edx)
976 jmpl *%eax
977 CFI_ENDPROC
978 ENDPROC(arch_unwind_init_running)
979 #endif
980
981 ENTRY(kernel_thread_helper)
982 pushl $0 # fake return address for unwinder
983 CFI_STARTPROC
984 movl %edx,%eax
985 push %edx
986 CFI_ADJUST_CFA_OFFSET 4
987 call *%ebx
988 push %eax
989 CFI_ADJUST_CFA_OFFSET 4
990 call do_exit
991 CFI_ENDPROC
992 ENDPROC(kernel_thread_helper)
993
994 .section .rodata,"a"
995 #include "syscall_table.S"
996
997 syscall_table_size=(.-sys_call_table)
This page took 0.052059 seconds and 5 git commands to generate.