[PATCH] i386: Allow a kernel not to be in ring 0
[deliverable/linux.git] / arch / i386 / kernel / entry.S
1 /*
2 * linux/arch/i386/entry.S
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7 /*
8 * entry.S contains the system-call and fault low-level handling routines.
9 * This also contains the timer-interrupt handler, as well as all interrupts
10 * and faults that can result in a task-switch.
11 *
12 * NOTE: This code handles signal-recognition, which happens every time
13 * after a timer-interrupt and after each system call.
14 *
15 * I changed all the .align's to 4 (16 byte alignment), as that's faster
16 * on a 486.
17 *
18 * Stack layout in 'ret_from_system_call':
19 * ptrace needs to have all regs on the stack.
20 * if the order here is changed, it needs to be
21 * updated in fork.c:copy_process, signal.c:do_signal,
22 * ptrace.c and ptrace.h
23 *
24 * 0(%esp) - %ebx
25 * 4(%esp) - %ecx
26 * 8(%esp) - %edx
27 * C(%esp) - %esi
28 * 10(%esp) - %edi
29 * 14(%esp) - %ebp
30 * 18(%esp) - %eax
31 * 1C(%esp) - %ds
32 * 20(%esp) - %es
33 * 24(%esp) - orig_eax
34 * 28(%esp) - %eip
35 * 2C(%esp) - %cs
36 * 30(%esp) - %eflags
37 * 34(%esp) - %oldesp
38 * 38(%esp) - %oldss
39 *
40 * "current" is in register %ebx during any slow entries.
41 */
42
43 #include <linux/linkage.h>
44 #include <asm/thread_info.h>
45 #include <asm/irqflags.h>
46 #include <asm/errno.h>
47 #include <asm/segment.h>
48 #include <asm/smp.h>
49 #include <asm/page.h>
50 #include <asm/desc.h>
51 #include <asm/dwarf2.h>
52 #include "irq_vectors.h"
53
54 #define nr_syscalls ((syscall_table_size)/4)
55
56 EBX = 0x00
57 ECX = 0x04
58 EDX = 0x08
59 ESI = 0x0C
60 EDI = 0x10
61 EBP = 0x14
62 EAX = 0x18
63 DS = 0x1C
64 ES = 0x20
65 ORIG_EAX = 0x24
66 EIP = 0x28
67 CS = 0x2C
68 EFLAGS = 0x30
69 OLDESP = 0x34
70 OLDSS = 0x38
71
72 CF_MASK = 0x00000001
73 TF_MASK = 0x00000100
74 IF_MASK = 0x00000200
75 DF_MASK = 0x00000400
76 NT_MASK = 0x00004000
77 VM_MASK = 0x00020000
78
79 /* These are replaces for paravirtualization */
80 #define DISABLE_INTERRUPTS cli
81 #define ENABLE_INTERRUPTS sti
82 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
83 #define INTERRUPT_RETURN iret
84 #define GET_CR0_INTO_EAX movl %cr0, %eax
85
86 #ifdef CONFIG_PREEMPT
87 #define preempt_stop DISABLE_INTERRUPTS; TRACE_IRQS_OFF
88 #else
89 #define preempt_stop
90 #define resume_kernel restore_nocheck
91 #endif
92
93 .macro TRACE_IRQS_IRET
94 #ifdef CONFIG_TRACE_IRQFLAGS
95 testl $IF_MASK,EFLAGS(%esp) # interrupts off?
96 jz 1f
97 TRACE_IRQS_ON
98 1:
99 #endif
100 .endm
101
102 #ifdef CONFIG_VM86
103 #define resume_userspace_sig check_userspace
104 #else
105 #define resume_userspace_sig resume_userspace
106 #endif
107
108 #define SAVE_ALL \
109 cld; \
110 pushl %es; \
111 CFI_ADJUST_CFA_OFFSET 4;\
112 /*CFI_REL_OFFSET es, 0;*/\
113 pushl %ds; \
114 CFI_ADJUST_CFA_OFFSET 4;\
115 /*CFI_REL_OFFSET ds, 0;*/\
116 pushl %eax; \
117 CFI_ADJUST_CFA_OFFSET 4;\
118 CFI_REL_OFFSET eax, 0;\
119 pushl %ebp; \
120 CFI_ADJUST_CFA_OFFSET 4;\
121 CFI_REL_OFFSET ebp, 0;\
122 pushl %edi; \
123 CFI_ADJUST_CFA_OFFSET 4;\
124 CFI_REL_OFFSET edi, 0;\
125 pushl %esi; \
126 CFI_ADJUST_CFA_OFFSET 4;\
127 CFI_REL_OFFSET esi, 0;\
128 pushl %edx; \
129 CFI_ADJUST_CFA_OFFSET 4;\
130 CFI_REL_OFFSET edx, 0;\
131 pushl %ecx; \
132 CFI_ADJUST_CFA_OFFSET 4;\
133 CFI_REL_OFFSET ecx, 0;\
134 pushl %ebx; \
135 CFI_ADJUST_CFA_OFFSET 4;\
136 CFI_REL_OFFSET ebx, 0;\
137 movl $(__USER_DS), %edx; \
138 movl %edx, %ds; \
139 movl %edx, %es;
140
141 #define RESTORE_INT_REGS \
142 popl %ebx; \
143 CFI_ADJUST_CFA_OFFSET -4;\
144 CFI_RESTORE ebx;\
145 popl %ecx; \
146 CFI_ADJUST_CFA_OFFSET -4;\
147 CFI_RESTORE ecx;\
148 popl %edx; \
149 CFI_ADJUST_CFA_OFFSET -4;\
150 CFI_RESTORE edx;\
151 popl %esi; \
152 CFI_ADJUST_CFA_OFFSET -4;\
153 CFI_RESTORE esi;\
154 popl %edi; \
155 CFI_ADJUST_CFA_OFFSET -4;\
156 CFI_RESTORE edi;\
157 popl %ebp; \
158 CFI_ADJUST_CFA_OFFSET -4;\
159 CFI_RESTORE ebp;\
160 popl %eax; \
161 CFI_ADJUST_CFA_OFFSET -4;\
162 CFI_RESTORE eax
163
164 #define RESTORE_REGS \
165 RESTORE_INT_REGS; \
166 1: popl %ds; \
167 CFI_ADJUST_CFA_OFFSET -4;\
168 /*CFI_RESTORE ds;*/\
169 2: popl %es; \
170 CFI_ADJUST_CFA_OFFSET -4;\
171 /*CFI_RESTORE es;*/\
172 .section .fixup,"ax"; \
173 3: movl $0,(%esp); \
174 jmp 1b; \
175 4: movl $0,(%esp); \
176 jmp 2b; \
177 .previous; \
178 .section __ex_table,"a";\
179 .align 4; \
180 .long 1b,3b; \
181 .long 2b,4b; \
182 .previous
183
184 #define RING0_INT_FRAME \
185 CFI_STARTPROC simple;\
186 CFI_DEF_CFA esp, 3*4;\
187 /*CFI_OFFSET cs, -2*4;*/\
188 CFI_OFFSET eip, -3*4
189
190 #define RING0_EC_FRAME \
191 CFI_STARTPROC simple;\
192 CFI_DEF_CFA esp, 4*4;\
193 /*CFI_OFFSET cs, -2*4;*/\
194 CFI_OFFSET eip, -3*4
195
196 #define RING0_PTREGS_FRAME \
197 CFI_STARTPROC simple;\
198 CFI_DEF_CFA esp, OLDESP-EBX;\
199 /*CFI_OFFSET cs, CS-OLDESP;*/\
200 CFI_OFFSET eip, EIP-OLDESP;\
201 /*CFI_OFFSET es, ES-OLDESP;*/\
202 /*CFI_OFFSET ds, DS-OLDESP;*/\
203 CFI_OFFSET eax, EAX-OLDESP;\
204 CFI_OFFSET ebp, EBP-OLDESP;\
205 CFI_OFFSET edi, EDI-OLDESP;\
206 CFI_OFFSET esi, ESI-OLDESP;\
207 CFI_OFFSET edx, EDX-OLDESP;\
208 CFI_OFFSET ecx, ECX-OLDESP;\
209 CFI_OFFSET ebx, EBX-OLDESP
210
211 ENTRY(ret_from_fork)
212 CFI_STARTPROC
213 pushl %eax
214 CFI_ADJUST_CFA_OFFSET 4
215 call schedule_tail
216 GET_THREAD_INFO(%ebp)
217 popl %eax
218 CFI_ADJUST_CFA_OFFSET -4
219 pushl $0x0202 # Reset kernel eflags
220 CFI_ADJUST_CFA_OFFSET 4
221 popfl
222 CFI_ADJUST_CFA_OFFSET -4
223 jmp syscall_exit
224 CFI_ENDPROC
225
226 /*
227 * Return to user mode is not as complex as all this looks,
228 * but we want the default path for a system call return to
229 * go as quickly as possible which is why some of this is
230 * less clear than it otherwise should be.
231 */
232
233 # userspace resumption stub bypassing syscall exit tracing
234 ALIGN
235 RING0_PTREGS_FRAME
236 ret_from_exception:
237 preempt_stop
238 ret_from_intr:
239 GET_THREAD_INFO(%ebp)
240 check_userspace:
241 movl EFLAGS(%esp), %eax # mix EFLAGS and CS
242 movb CS(%esp), %al
243 andl $(VM_MASK | SEGMENT_RPL_MASK), %eax
244 cmpl $USER_RPL, %eax
245 jb resume_kernel # not returning to v8086 or userspace
246 ENTRY(resume_userspace)
247 DISABLE_INTERRUPTS # make sure we don't miss an interrupt
248 # setting need_resched or sigpending
249 # between sampling and the iret
250 movl TI_flags(%ebp), %ecx
251 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
252 # int/exception return?
253 jne work_pending
254 jmp restore_all
255
256 #ifdef CONFIG_PREEMPT
257 ENTRY(resume_kernel)
258 DISABLE_INTERRUPTS
259 cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
260 jnz restore_nocheck
261 need_resched:
262 movl TI_flags(%ebp), %ecx # need_resched set ?
263 testb $_TIF_NEED_RESCHED, %cl
264 jz restore_all
265 testl $IF_MASK,EFLAGS(%esp) # interrupts off (exception path) ?
266 jz restore_all
267 call preempt_schedule_irq
268 jmp need_resched
269 #endif
270 CFI_ENDPROC
271
272 /* SYSENTER_RETURN points to after the "sysenter" instruction in
273 the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
274
275 # sysenter call handler stub
276 ENTRY(sysenter_entry)
277 CFI_STARTPROC simple
278 CFI_DEF_CFA esp, 0
279 CFI_REGISTER esp, ebp
280 movl TSS_sysenter_esp0(%esp),%esp
281 sysenter_past_esp:
282 /*
283 * No need to follow this irqs on/off section: the syscall
284 * disabled irqs and here we enable it straight after entry:
285 */
286 ENABLE_INTERRUPTS
287 pushl $(__USER_DS)
288 CFI_ADJUST_CFA_OFFSET 4
289 /*CFI_REL_OFFSET ss, 0*/
290 pushl %ebp
291 CFI_ADJUST_CFA_OFFSET 4
292 CFI_REL_OFFSET esp, 0
293 pushfl
294 CFI_ADJUST_CFA_OFFSET 4
295 pushl $(__USER_CS)
296 CFI_ADJUST_CFA_OFFSET 4
297 /*CFI_REL_OFFSET cs, 0*/
298 /*
299 * Push current_thread_info()->sysenter_return to the stack.
300 * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
301 * pushed above; +8 corresponds to copy_thread's esp0 setting.
302 */
303 pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
304 CFI_ADJUST_CFA_OFFSET 4
305 CFI_REL_OFFSET eip, 0
306
307 /*
308 * Load the potential sixth argument from user stack.
309 * Careful about security.
310 */
311 cmpl $__PAGE_OFFSET-3,%ebp
312 jae syscall_fault
313 1: movl (%ebp),%ebp
314 .section __ex_table,"a"
315 .align 4
316 .long 1b,syscall_fault
317 .previous
318
319 pushl %eax
320 CFI_ADJUST_CFA_OFFSET 4
321 SAVE_ALL
322 GET_THREAD_INFO(%ebp)
323
324 /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
325 testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
326 jnz syscall_trace_entry
327 cmpl $(nr_syscalls), %eax
328 jae syscall_badsys
329 call *sys_call_table(,%eax,4)
330 movl %eax,EAX(%esp)
331 DISABLE_INTERRUPTS
332 TRACE_IRQS_OFF
333 movl TI_flags(%ebp), %ecx
334 testw $_TIF_ALLWORK_MASK, %cx
335 jne syscall_exit_work
336 /* if something modifies registers it must also disable sysexit */
337 movl EIP(%esp), %edx
338 movl OLDESP(%esp), %ecx
339 xorl %ebp,%ebp
340 TRACE_IRQS_ON
341 ENABLE_INTERRUPTS_SYSEXIT
342 CFI_ENDPROC
343
344
345 # system call handler stub
346 ENTRY(system_call)
347 RING0_INT_FRAME # can't unwind into user space anyway
348 pushl %eax # save orig_eax
349 CFI_ADJUST_CFA_OFFSET 4
350 SAVE_ALL
351 GET_THREAD_INFO(%ebp)
352 testl $TF_MASK,EFLAGS(%esp)
353 jz no_singlestep
354 orl $_TIF_SINGLESTEP,TI_flags(%ebp)
355 no_singlestep:
356 # system call tracing in operation / emulation
357 /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
358 testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
359 jnz syscall_trace_entry
360 cmpl $(nr_syscalls), %eax
361 jae syscall_badsys
362 syscall_call:
363 call *sys_call_table(,%eax,4)
364 movl %eax,EAX(%esp) # store the return value
365 syscall_exit:
366 DISABLE_INTERRUPTS # make sure we don't miss an interrupt
367 # setting need_resched or sigpending
368 # between sampling and the iret
369 TRACE_IRQS_OFF
370 movl TI_flags(%ebp), %ecx
371 testw $_TIF_ALLWORK_MASK, %cx # current->work
372 jne syscall_exit_work
373
374 restore_all:
375 movl EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
376 # Warning: OLDSS(%esp) contains the wrong/random values if we
377 # are returning to the kernel.
378 # See comments in process.c:copy_thread() for details.
379 movb OLDSS(%esp), %ah
380 movb CS(%esp), %al
381 andl $(VM_MASK | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
382 cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
383 CFI_REMEMBER_STATE
384 je ldt_ss # returning to user-space with LDT SS
385 restore_nocheck:
386 TRACE_IRQS_IRET
387 restore_nocheck_notrace:
388 RESTORE_REGS
389 addl $4, %esp
390 CFI_ADJUST_CFA_OFFSET -4
391 1: INTERRUPT_RETURN
392 .section .fixup,"ax"
393 iret_exc:
394 TRACE_IRQS_ON
395 ENABLE_INTERRUPTS
396 pushl $0 # no error code
397 pushl $do_iret_error
398 jmp error_code
399 .previous
400 .section __ex_table,"a"
401 .align 4
402 .long 1b,iret_exc
403 .previous
404
405 CFI_RESTORE_STATE
406 ldt_ss:
407 larl OLDSS(%esp), %eax
408 jnz restore_nocheck
409 testl $0x00400000, %eax # returning to 32bit stack?
410 jnz restore_nocheck # allright, normal return
411 /* If returning to userspace with 16bit stack,
412 * try to fix the higher word of ESP, as the CPU
413 * won't restore it.
414 * This is an "official" bug of all the x86-compatible
415 * CPUs, which we can try to work around to make
416 * dosemu and wine happy. */
417 subl $8, %esp # reserve space for switch16 pointer
418 CFI_ADJUST_CFA_OFFSET 8
419 DISABLE_INTERRUPTS
420 TRACE_IRQS_OFF
421 movl %esp, %eax
422 /* Set up the 16bit stack frame with switch32 pointer on top,
423 * and a switch16 pointer on top of the current frame. */
424 call setup_x86_bogus_stack
425 CFI_ADJUST_CFA_OFFSET -8 # frame has moved
426 TRACE_IRQS_IRET
427 RESTORE_REGS
428 lss 20+4(%esp), %esp # switch to 16bit stack
429 1: INTERRUPT_RETURN
430 .section __ex_table,"a"
431 .align 4
432 .long 1b,iret_exc
433 .previous
434 CFI_ENDPROC
435
436 # perform work that needs to be done immediately before resumption
437 ALIGN
438 RING0_PTREGS_FRAME # can't unwind into user space anyway
439 work_pending:
440 testb $_TIF_NEED_RESCHED, %cl
441 jz work_notifysig
442 work_resched:
443 call schedule
444 DISABLE_INTERRUPTS # make sure we don't miss an interrupt
445 # setting need_resched or sigpending
446 # between sampling and the iret
447 TRACE_IRQS_OFF
448 movl TI_flags(%ebp), %ecx
449 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
450 # than syscall tracing?
451 jz restore_all
452 testb $_TIF_NEED_RESCHED, %cl
453 jnz work_resched
454
455 work_notifysig: # deal with pending signals and
456 # notify-resume requests
457 testl $VM_MASK, EFLAGS(%esp)
458 movl %esp, %eax
459 jne work_notifysig_v86 # returning to kernel-space or
460 # vm86-space
461 xorl %edx, %edx
462 call do_notify_resume
463 jmp resume_userspace_sig
464
465 ALIGN
466 work_notifysig_v86:
467 #ifdef CONFIG_VM86
468 pushl %ecx # save ti_flags for do_notify_resume
469 CFI_ADJUST_CFA_OFFSET 4
470 call save_v86_state # %eax contains pt_regs pointer
471 popl %ecx
472 CFI_ADJUST_CFA_OFFSET -4
473 movl %eax, %esp
474 xorl %edx, %edx
475 call do_notify_resume
476 jmp resume_userspace_sig
477 #endif
478
479 # perform syscall exit tracing
480 ALIGN
481 syscall_trace_entry:
482 movl $-ENOSYS,EAX(%esp)
483 movl %esp, %eax
484 xorl %edx,%edx
485 call do_syscall_trace
486 cmpl $0, %eax
487 jne resume_userspace # ret != 0 -> running under PTRACE_SYSEMU,
488 # so must skip actual syscall
489 movl ORIG_EAX(%esp), %eax
490 cmpl $(nr_syscalls), %eax
491 jnae syscall_call
492 jmp syscall_exit
493
494 # perform syscall exit tracing
495 ALIGN
496 syscall_exit_work:
497 testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl
498 jz work_pending
499 TRACE_IRQS_ON
500 ENABLE_INTERRUPTS # could let do_syscall_trace() call
501 # schedule() instead
502 movl %esp, %eax
503 movl $1, %edx
504 call do_syscall_trace
505 jmp resume_userspace
506 CFI_ENDPROC
507
508 RING0_INT_FRAME # can't unwind into user space anyway
509 syscall_fault:
510 pushl %eax # save orig_eax
511 CFI_ADJUST_CFA_OFFSET 4
512 SAVE_ALL
513 GET_THREAD_INFO(%ebp)
514 movl $-EFAULT,EAX(%esp)
515 jmp resume_userspace
516
517 syscall_badsys:
518 movl $-ENOSYS,EAX(%esp)
519 jmp resume_userspace
520 CFI_ENDPROC
521
522 #define FIXUP_ESPFIX_STACK \
523 movl %esp, %eax; \
524 /* switch to 32bit stack using the pointer on top of 16bit stack */ \
525 lss %ss:CPU_16BIT_STACK_SIZE-8, %esp; \
526 /* copy data from 16bit stack to 32bit stack */ \
527 call fixup_x86_bogus_stack; \
528 /* put ESP to the proper location */ \
529 movl %eax, %esp;
530 #define UNWIND_ESPFIX_STACK \
531 pushl %eax; \
532 CFI_ADJUST_CFA_OFFSET 4; \
533 movl %ss, %eax; \
534 /* see if on 16bit stack */ \
535 cmpw $__ESPFIX_SS, %ax; \
536 je 28f; \
537 27: popl %eax; \
538 CFI_ADJUST_CFA_OFFSET -4; \
539 .section .fixup,"ax"; \
540 28: movl $__KERNEL_DS, %eax; \
541 movl %eax, %ds; \
542 movl %eax, %es; \
543 /* switch to 32bit stack */ \
544 FIXUP_ESPFIX_STACK; \
545 jmp 27b; \
546 .previous
547
548 /*
549 * Build the entry stubs and pointer table with
550 * some assembler magic.
551 */
552 .data
553 ENTRY(interrupt)
554 .text
555
556 vector=0
557 ENTRY(irq_entries_start)
558 RING0_INT_FRAME
559 .rept NR_IRQS
560 ALIGN
561 .if vector
562 CFI_ADJUST_CFA_OFFSET -4
563 .endif
564 1: pushl $~(vector)
565 CFI_ADJUST_CFA_OFFSET 4
566 jmp common_interrupt
567 .data
568 .long 1b
569 .text
570 vector=vector+1
571 .endr
572
573 /*
574 * the CPU automatically disables interrupts when executing an IRQ vector,
575 * so IRQ-flags tracing has to follow that:
576 */
577 ALIGN
578 common_interrupt:
579 SAVE_ALL
580 TRACE_IRQS_OFF
581 movl %esp,%eax
582 call do_IRQ
583 jmp ret_from_intr
584 CFI_ENDPROC
585
586 #define BUILD_INTERRUPT(name, nr) \
587 ENTRY(name) \
588 RING0_INT_FRAME; \
589 pushl $~(nr); \
590 CFI_ADJUST_CFA_OFFSET 4; \
591 SAVE_ALL; \
592 TRACE_IRQS_OFF \
593 movl %esp,%eax; \
594 call smp_/**/name; \
595 jmp ret_from_intr; \
596 CFI_ENDPROC
597
598 /* The include is where all of the SMP etc. interrupts come from */
599 #include "entry_arch.h"
600
601 KPROBE_ENTRY(page_fault)
602 RING0_EC_FRAME
603 pushl $do_page_fault
604 CFI_ADJUST_CFA_OFFSET 4
605 ALIGN
606 error_code:
607 pushl %ds
608 CFI_ADJUST_CFA_OFFSET 4
609 /*CFI_REL_OFFSET ds, 0*/
610 pushl %eax
611 CFI_ADJUST_CFA_OFFSET 4
612 CFI_REL_OFFSET eax, 0
613 xorl %eax, %eax
614 pushl %ebp
615 CFI_ADJUST_CFA_OFFSET 4
616 CFI_REL_OFFSET ebp, 0
617 pushl %edi
618 CFI_ADJUST_CFA_OFFSET 4
619 CFI_REL_OFFSET edi, 0
620 pushl %esi
621 CFI_ADJUST_CFA_OFFSET 4
622 CFI_REL_OFFSET esi, 0
623 pushl %edx
624 CFI_ADJUST_CFA_OFFSET 4
625 CFI_REL_OFFSET edx, 0
626 decl %eax # eax = -1
627 pushl %ecx
628 CFI_ADJUST_CFA_OFFSET 4
629 CFI_REL_OFFSET ecx, 0
630 pushl %ebx
631 CFI_ADJUST_CFA_OFFSET 4
632 CFI_REL_OFFSET ebx, 0
633 cld
634 pushl %es
635 CFI_ADJUST_CFA_OFFSET 4
636 /*CFI_REL_OFFSET es, 0*/
637 UNWIND_ESPFIX_STACK
638 popl %ecx
639 CFI_ADJUST_CFA_OFFSET -4
640 /*CFI_REGISTER es, ecx*/
641 movl ES(%esp), %edi # get the function address
642 movl ORIG_EAX(%esp), %edx # get the error code
643 movl %eax, ORIG_EAX(%esp)
644 movl %ecx, ES(%esp)
645 /*CFI_REL_OFFSET es, ES*/
646 movl $(__USER_DS), %ecx
647 movl %ecx, %ds
648 movl %ecx, %es
649 movl %esp,%eax # pt_regs pointer
650 call *%edi
651 jmp ret_from_exception
652 CFI_ENDPROC
653 KPROBE_END(page_fault)
654
655 ENTRY(coprocessor_error)
656 RING0_INT_FRAME
657 pushl $0
658 CFI_ADJUST_CFA_OFFSET 4
659 pushl $do_coprocessor_error
660 CFI_ADJUST_CFA_OFFSET 4
661 jmp error_code
662 CFI_ENDPROC
663
664 ENTRY(simd_coprocessor_error)
665 RING0_INT_FRAME
666 pushl $0
667 CFI_ADJUST_CFA_OFFSET 4
668 pushl $do_simd_coprocessor_error
669 CFI_ADJUST_CFA_OFFSET 4
670 jmp error_code
671 CFI_ENDPROC
672
673 ENTRY(device_not_available)
674 RING0_INT_FRAME
675 pushl $-1 # mark this as an int
676 CFI_ADJUST_CFA_OFFSET 4
677 SAVE_ALL
678 GET_CR0_INTO_EAX
679 testl $0x4, %eax # EM (math emulation bit)
680 jne device_not_available_emulate
681 preempt_stop
682 call math_state_restore
683 jmp ret_from_exception
684 device_not_available_emulate:
685 pushl $0 # temporary storage for ORIG_EIP
686 CFI_ADJUST_CFA_OFFSET 4
687 call math_emulate
688 addl $4, %esp
689 CFI_ADJUST_CFA_OFFSET -4
690 jmp ret_from_exception
691 CFI_ENDPROC
692
693 /*
694 * Debug traps and NMI can happen at the one SYSENTER instruction
695 * that sets up the real kernel stack. Check here, since we can't
696 * allow the wrong stack to be used.
697 *
698 * "TSS_sysenter_esp0+12" is because the NMI/debug handler will have
699 * already pushed 3 words if it hits on the sysenter instruction:
700 * eflags, cs and eip.
701 *
702 * We just load the right stack, and push the three (known) values
703 * by hand onto the new stack - while updating the return eip past
704 * the instruction that would have done it for sysenter.
705 */
706 #define FIX_STACK(offset, ok, label) \
707 cmpw $__KERNEL_CS,4(%esp); \
708 jne ok; \
709 label: \
710 movl TSS_sysenter_esp0+offset(%esp),%esp; \
711 CFI_DEF_CFA esp, 0; \
712 CFI_UNDEFINED eip; \
713 pushfl; \
714 CFI_ADJUST_CFA_OFFSET 4; \
715 pushl $__KERNEL_CS; \
716 CFI_ADJUST_CFA_OFFSET 4; \
717 pushl $sysenter_past_esp; \
718 CFI_ADJUST_CFA_OFFSET 4; \
719 CFI_REL_OFFSET eip, 0
720
721 KPROBE_ENTRY(debug)
722 RING0_INT_FRAME
723 cmpl $sysenter_entry,(%esp)
724 jne debug_stack_correct
725 FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
726 debug_stack_correct:
727 pushl $-1 # mark this as an int
728 CFI_ADJUST_CFA_OFFSET 4
729 SAVE_ALL
730 xorl %edx,%edx # error code 0
731 movl %esp,%eax # pt_regs pointer
732 call do_debug
733 jmp ret_from_exception
734 CFI_ENDPROC
735 KPROBE_END(debug)
736
737 /*
738 * NMI is doubly nasty. It can happen _while_ we're handling
739 * a debug fault, and the debug fault hasn't yet been able to
740 * clear up the stack. So we first check whether we got an
741 * NMI on the sysenter entry path, but after that we need to
742 * check whether we got an NMI on the debug path where the debug
743 * fault happened on the sysenter path.
744 */
745 KPROBE_ENTRY(nmi)
746 RING0_INT_FRAME
747 pushl %eax
748 CFI_ADJUST_CFA_OFFSET 4
749 movl %ss, %eax
750 cmpw $__ESPFIX_SS, %ax
751 popl %eax
752 CFI_ADJUST_CFA_OFFSET -4
753 je nmi_16bit_stack
754 cmpl $sysenter_entry,(%esp)
755 je nmi_stack_fixup
756 pushl %eax
757 CFI_ADJUST_CFA_OFFSET 4
758 movl %esp,%eax
759 /* Do not access memory above the end of our stack page,
760 * it might not exist.
761 */
762 andl $(THREAD_SIZE-1),%eax
763 cmpl $(THREAD_SIZE-20),%eax
764 popl %eax
765 CFI_ADJUST_CFA_OFFSET -4
766 jae nmi_stack_correct
767 cmpl $sysenter_entry,12(%esp)
768 je nmi_debug_stack_check
769 nmi_stack_correct:
770 /* We have a RING0_INT_FRAME here */
771 pushl %eax
772 CFI_ADJUST_CFA_OFFSET 4
773 SAVE_ALL
774 xorl %edx,%edx # zero error code
775 movl %esp,%eax # pt_regs pointer
776 call do_nmi
777 jmp restore_nocheck_notrace
778 CFI_ENDPROC
779
780 nmi_stack_fixup:
781 RING0_INT_FRAME
782 FIX_STACK(12,nmi_stack_correct, 1)
783 jmp nmi_stack_correct
784
785 nmi_debug_stack_check:
786 /* We have a RING0_INT_FRAME here */
787 cmpw $__KERNEL_CS,16(%esp)
788 jne nmi_stack_correct
789 cmpl $debug,(%esp)
790 jb nmi_stack_correct
791 cmpl $debug_esp_fix_insn,(%esp)
792 ja nmi_stack_correct
793 FIX_STACK(24,nmi_stack_correct, 1)
794 jmp nmi_stack_correct
795
796 nmi_16bit_stack:
797 /* We have a RING0_INT_FRAME here.
798 *
799 * create the pointer to lss back
800 */
801 pushl %ss
802 CFI_ADJUST_CFA_OFFSET 4
803 pushl %esp
804 CFI_ADJUST_CFA_OFFSET 4
805 movzwl %sp, %esp
806 addw $4, (%esp)
807 /* copy the iret frame of 12 bytes */
808 .rept 3
809 pushl 16(%esp)
810 CFI_ADJUST_CFA_OFFSET 4
811 .endr
812 pushl %eax
813 CFI_ADJUST_CFA_OFFSET 4
814 SAVE_ALL
815 FIXUP_ESPFIX_STACK # %eax == %esp
816 CFI_ADJUST_CFA_OFFSET -20 # the frame has now moved
817 xorl %edx,%edx # zero error code
818 call do_nmi
819 RESTORE_REGS
820 lss 12+4(%esp), %esp # back to 16bit stack
821 1: INTERRUPT_RETURN
822 CFI_ENDPROC
823 .section __ex_table,"a"
824 .align 4
825 .long 1b,iret_exc
826 .previous
827 KPROBE_END(nmi)
828
829 KPROBE_ENTRY(int3)
830 RING0_INT_FRAME
831 pushl $-1 # mark this as an int
832 CFI_ADJUST_CFA_OFFSET 4
833 SAVE_ALL
834 xorl %edx,%edx # zero error code
835 movl %esp,%eax # pt_regs pointer
836 call do_int3
837 jmp ret_from_exception
838 CFI_ENDPROC
839 KPROBE_END(int3)
840
841 ENTRY(overflow)
842 RING0_INT_FRAME
843 pushl $0
844 CFI_ADJUST_CFA_OFFSET 4
845 pushl $do_overflow
846 CFI_ADJUST_CFA_OFFSET 4
847 jmp error_code
848 CFI_ENDPROC
849
850 ENTRY(bounds)
851 RING0_INT_FRAME
852 pushl $0
853 CFI_ADJUST_CFA_OFFSET 4
854 pushl $do_bounds
855 CFI_ADJUST_CFA_OFFSET 4
856 jmp error_code
857 CFI_ENDPROC
858
859 ENTRY(invalid_op)
860 RING0_INT_FRAME
861 pushl $0
862 CFI_ADJUST_CFA_OFFSET 4
863 pushl $do_invalid_op
864 CFI_ADJUST_CFA_OFFSET 4
865 jmp error_code
866 CFI_ENDPROC
867
868 ENTRY(coprocessor_segment_overrun)
869 RING0_INT_FRAME
870 pushl $0
871 CFI_ADJUST_CFA_OFFSET 4
872 pushl $do_coprocessor_segment_overrun
873 CFI_ADJUST_CFA_OFFSET 4
874 jmp error_code
875 CFI_ENDPROC
876
877 ENTRY(invalid_TSS)
878 RING0_EC_FRAME
879 pushl $do_invalid_TSS
880 CFI_ADJUST_CFA_OFFSET 4
881 jmp error_code
882 CFI_ENDPROC
883
884 ENTRY(segment_not_present)
885 RING0_EC_FRAME
886 pushl $do_segment_not_present
887 CFI_ADJUST_CFA_OFFSET 4
888 jmp error_code
889 CFI_ENDPROC
890
891 ENTRY(stack_segment)
892 RING0_EC_FRAME
893 pushl $do_stack_segment
894 CFI_ADJUST_CFA_OFFSET 4
895 jmp error_code
896 CFI_ENDPROC
897
898 KPROBE_ENTRY(general_protection)
899 RING0_EC_FRAME
900 pushl $do_general_protection
901 CFI_ADJUST_CFA_OFFSET 4
902 jmp error_code
903 CFI_ENDPROC
904 KPROBE_END(general_protection)
905
906 ENTRY(alignment_check)
907 RING0_EC_FRAME
908 pushl $do_alignment_check
909 CFI_ADJUST_CFA_OFFSET 4
910 jmp error_code
911 CFI_ENDPROC
912
913 ENTRY(divide_error)
914 RING0_INT_FRAME
915 pushl $0 # no error code
916 CFI_ADJUST_CFA_OFFSET 4
917 pushl $do_divide_error
918 CFI_ADJUST_CFA_OFFSET 4
919 jmp error_code
920 CFI_ENDPROC
921
922 #ifdef CONFIG_X86_MCE
923 ENTRY(machine_check)
924 RING0_INT_FRAME
925 pushl $0
926 CFI_ADJUST_CFA_OFFSET 4
927 pushl machine_check_vector
928 CFI_ADJUST_CFA_OFFSET 4
929 jmp error_code
930 CFI_ENDPROC
931 #endif
932
933 ENTRY(spurious_interrupt_bug)
934 RING0_INT_FRAME
935 pushl $0
936 CFI_ADJUST_CFA_OFFSET 4
937 pushl $do_spurious_interrupt_bug
938 CFI_ADJUST_CFA_OFFSET 4
939 jmp error_code
940 CFI_ENDPROC
941
942 #ifdef CONFIG_STACK_UNWIND
943 ENTRY(arch_unwind_init_running)
944 CFI_STARTPROC
945 movl 4(%esp), %edx
946 movl (%esp), %ecx
947 leal 4(%esp), %eax
948 movl %ebx, EBX(%edx)
949 xorl %ebx, %ebx
950 movl %ebx, ECX(%edx)
951 movl %ebx, EDX(%edx)
952 movl %esi, ESI(%edx)
953 movl %edi, EDI(%edx)
954 movl %ebp, EBP(%edx)
955 movl %ebx, EAX(%edx)
956 movl $__USER_DS, DS(%edx)
957 movl $__USER_DS, ES(%edx)
958 movl %ebx, ORIG_EAX(%edx)
959 movl %ecx, EIP(%edx)
960 movl 12(%esp), %ecx
961 movl $__KERNEL_CS, CS(%edx)
962 movl %ebx, EFLAGS(%edx)
963 movl %eax, OLDESP(%edx)
964 movl 8(%esp), %eax
965 movl %ecx, 8(%esp)
966 movl EBX(%edx), %ebx
967 movl $__KERNEL_DS, OLDSS(%edx)
968 jmpl *%eax
969 CFI_ENDPROC
970 ENDPROC(arch_unwind_init_running)
971 #endif
972
973 ENTRY(kernel_thread_helper)
974 pushl $0 # fake return address for unwinder
975 CFI_STARTPROC
976 movl %edx,%eax
977 push %edx
978 CFI_ADJUST_CFA_OFFSET 4
979 call *%ebx
980 push %eax
981 CFI_ADJUST_CFA_OFFSET 4
982 call do_exit
983 CFI_ENDPROC
984 ENDPROC(kernel_thread_helper)
985
986 .section .rodata,"a"
987 #include "syscall_table.S"
988
989 syscall_table_size=(.-sys_call_table)
This page took 0.052162 seconds and 5 git commands to generate.