audit: inline audit_syscall_entry to reduce burden on archs
[deliverable/linux.git] / arch / x86 / kernel / entry_32.S
1 /*
2 *
3 * Copyright (C) 1991, 1992 Linus Torvalds
4 */
5
6 /*
7 * entry.S contains the system-call and fault low-level handling routines.
8 * This also contains the timer-interrupt handler, as well as all interrupts
9 * and faults that can result in a task-switch.
10 *
11 * NOTE: This code handles signal-recognition, which happens every time
12 * after a timer-interrupt and after each system call.
13 *
14 * I changed all the .align's to 4 (16 byte alignment), as that's faster
15 * on a 486.
16 *
17 * Stack layout in 'syscall_exit':
18 * ptrace needs to have all regs on the stack.
19 * if the order here is changed, it needs to be
20 * updated in fork.c:copy_process, signal.c:do_signal,
21 * ptrace.c and ptrace.h
22 *
23 * 0(%esp) - %ebx
24 * 4(%esp) - %ecx
25 * 8(%esp) - %edx
26 * C(%esp) - %esi
27 * 10(%esp) - %edi
28 * 14(%esp) - %ebp
29 * 18(%esp) - %eax
30 * 1C(%esp) - %ds
31 * 20(%esp) - %es
32 * 24(%esp) - %fs
33 * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS
34 * 2C(%esp) - orig_eax
35 * 30(%esp) - %eip
36 * 34(%esp) - %cs
37 * 38(%esp) - %eflags
38 * 3C(%esp) - %oldesp
39 * 40(%esp) - %oldss
40 *
41 * "current" is in register %ebx during any slow entries.
42 */
43
44 #include <linux/linkage.h>
45 #include <linux/err.h>
46 #include <asm/thread_info.h>
47 #include <asm/irqflags.h>
48 #include <asm/errno.h>
49 #include <asm/segment.h>
50 #include <asm/smp.h>
51 #include <asm/page_types.h>
52 #include <asm/percpu.h>
53 #include <asm/dwarf2.h>
54 #include <asm/processor-flags.h>
55 #include <asm/ftrace.h>
56 #include <asm/irq_vectors.h>
57 #include <asm/cpufeature.h>
58 #include <asm/alternative-asm.h>
59
60 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
61 #include <linux/elf-em.h>
62 #define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE)
63 #define __AUDIT_ARCH_LE 0x40000000
64
65 #ifndef CONFIG_AUDITSYSCALL
66 #define sysenter_audit syscall_trace_entry
67 #define sysexit_audit syscall_exit_work
68 #endif
69
70 .section .entry.text, "ax"
71
72 /*
73 * We use macros for low-level operations which need to be overridden
74 * for paravirtualization. The following will never clobber any registers:
75 * INTERRUPT_RETURN (aka. "iret")
76 * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
77 * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
78 *
79 * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
80 * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
81 * Allowing a register to be clobbered can shrink the paravirt replacement
82 * enough to patch inline, increasing performance.
83 */
84
85 #define nr_syscalls ((syscall_table_size)/4)
86
87 #ifdef CONFIG_PREEMPT
88 #define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
89 #else
90 #define preempt_stop(clobbers)
91 #define resume_kernel restore_all
92 #endif
93
94 .macro TRACE_IRQS_IRET
95 #ifdef CONFIG_TRACE_IRQFLAGS
96 testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off?
97 jz 1f
98 TRACE_IRQS_ON
99 1:
100 #endif
101 .endm
102
103 #ifdef CONFIG_VM86
104 #define resume_userspace_sig check_userspace
105 #else
106 #define resume_userspace_sig resume_userspace
107 #endif
108
109 /*
110 * User gs save/restore
111 *
112 * %gs is used for userland TLS and kernel only uses it for stack
113 * canary which is required to be at %gs:20 by gcc. Read the comment
114 * at the top of stackprotector.h for more info.
115 *
116 * Local labels 98 and 99 are used.
117 */
118 #ifdef CONFIG_X86_32_LAZY_GS
119
120 /* unfortunately push/pop can't be no-op */
121 .macro PUSH_GS
122 pushl_cfi $0
123 .endm
124 .macro POP_GS pop=0
125 addl $(4 + \pop), %esp
126 CFI_ADJUST_CFA_OFFSET -(4 + \pop)
127 .endm
128 .macro POP_GS_EX
129 .endm
130
131 /* all the rest are no-op */
132 .macro PTGS_TO_GS
133 .endm
134 .macro PTGS_TO_GS_EX
135 .endm
136 .macro GS_TO_REG reg
137 .endm
138 .macro REG_TO_PTGS reg
139 .endm
140 .macro SET_KERNEL_GS reg
141 .endm
142
143 #else /* CONFIG_X86_32_LAZY_GS */
144
145 .macro PUSH_GS
146 pushl_cfi %gs
147 /*CFI_REL_OFFSET gs, 0*/
148 .endm
149
150 .macro POP_GS pop=0
151 98: popl_cfi %gs
152 /*CFI_RESTORE gs*/
153 .if \pop <> 0
154 add $\pop, %esp
155 CFI_ADJUST_CFA_OFFSET -\pop
156 .endif
157 .endm
158 .macro POP_GS_EX
159 .pushsection .fixup, "ax"
160 99: movl $0, (%esp)
161 jmp 98b
162 .section __ex_table, "a"
163 .align 4
164 .long 98b, 99b
165 .popsection
166 .endm
167
168 .macro PTGS_TO_GS
169 98: mov PT_GS(%esp), %gs
170 .endm
171 .macro PTGS_TO_GS_EX
172 .pushsection .fixup, "ax"
173 99: movl $0, PT_GS(%esp)
174 jmp 98b
175 .section __ex_table, "a"
176 .align 4
177 .long 98b, 99b
178 .popsection
179 .endm
180
181 .macro GS_TO_REG reg
182 movl %gs, \reg
183 /*CFI_REGISTER gs, \reg*/
184 .endm
185 .macro REG_TO_PTGS reg
186 movl \reg, PT_GS(%esp)
187 /*CFI_REL_OFFSET gs, PT_GS*/
188 .endm
189 .macro SET_KERNEL_GS reg
190 movl $(__KERNEL_STACK_CANARY), \reg
191 movl \reg, %gs
192 .endm
193
194 #endif /* CONFIG_X86_32_LAZY_GS */
195
196 .macro SAVE_ALL
197 cld
198 PUSH_GS
199 pushl_cfi %fs
200 /*CFI_REL_OFFSET fs, 0;*/
201 pushl_cfi %es
202 /*CFI_REL_OFFSET es, 0;*/
203 pushl_cfi %ds
204 /*CFI_REL_OFFSET ds, 0;*/
205 pushl_cfi %eax
206 CFI_REL_OFFSET eax, 0
207 pushl_cfi %ebp
208 CFI_REL_OFFSET ebp, 0
209 pushl_cfi %edi
210 CFI_REL_OFFSET edi, 0
211 pushl_cfi %esi
212 CFI_REL_OFFSET esi, 0
213 pushl_cfi %edx
214 CFI_REL_OFFSET edx, 0
215 pushl_cfi %ecx
216 CFI_REL_OFFSET ecx, 0
217 pushl_cfi %ebx
218 CFI_REL_OFFSET ebx, 0
219 movl $(__USER_DS), %edx
220 movl %edx, %ds
221 movl %edx, %es
222 movl $(__KERNEL_PERCPU), %edx
223 movl %edx, %fs
224 SET_KERNEL_GS %edx
225 .endm
226
227 .macro RESTORE_INT_REGS
228 popl_cfi %ebx
229 CFI_RESTORE ebx
230 popl_cfi %ecx
231 CFI_RESTORE ecx
232 popl_cfi %edx
233 CFI_RESTORE edx
234 popl_cfi %esi
235 CFI_RESTORE esi
236 popl_cfi %edi
237 CFI_RESTORE edi
238 popl_cfi %ebp
239 CFI_RESTORE ebp
240 popl_cfi %eax
241 CFI_RESTORE eax
242 .endm
243
244 .macro RESTORE_REGS pop=0
245 RESTORE_INT_REGS
246 1: popl_cfi %ds
247 /*CFI_RESTORE ds;*/
248 2: popl_cfi %es
249 /*CFI_RESTORE es;*/
250 3: popl_cfi %fs
251 /*CFI_RESTORE fs;*/
252 POP_GS \pop
253 .pushsection .fixup, "ax"
254 4: movl $0, (%esp)
255 jmp 1b
256 5: movl $0, (%esp)
257 jmp 2b
258 6: movl $0, (%esp)
259 jmp 3b
260 .section __ex_table, "a"
261 .align 4
262 .long 1b, 4b
263 .long 2b, 5b
264 .long 3b, 6b
265 .popsection
266 POP_GS_EX
267 .endm
268
269 .macro RING0_INT_FRAME
270 CFI_STARTPROC simple
271 CFI_SIGNAL_FRAME
272 CFI_DEF_CFA esp, 3*4
273 /*CFI_OFFSET cs, -2*4;*/
274 CFI_OFFSET eip, -3*4
275 .endm
276
277 .macro RING0_EC_FRAME
278 CFI_STARTPROC simple
279 CFI_SIGNAL_FRAME
280 CFI_DEF_CFA esp, 4*4
281 /*CFI_OFFSET cs, -2*4;*/
282 CFI_OFFSET eip, -3*4
283 .endm
284
285 .macro RING0_PTREGS_FRAME
286 CFI_STARTPROC simple
287 CFI_SIGNAL_FRAME
288 CFI_DEF_CFA esp, PT_OLDESP-PT_EBX
289 /*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/
290 CFI_OFFSET eip, PT_EIP-PT_OLDESP
291 /*CFI_OFFSET es, PT_ES-PT_OLDESP;*/
292 /*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/
293 CFI_OFFSET eax, PT_EAX-PT_OLDESP
294 CFI_OFFSET ebp, PT_EBP-PT_OLDESP
295 CFI_OFFSET edi, PT_EDI-PT_OLDESP
296 CFI_OFFSET esi, PT_ESI-PT_OLDESP
297 CFI_OFFSET edx, PT_EDX-PT_OLDESP
298 CFI_OFFSET ecx, PT_ECX-PT_OLDESP
299 CFI_OFFSET ebx, PT_EBX-PT_OLDESP
300 .endm
301
302 ENTRY(ret_from_fork)
303 CFI_STARTPROC
304 pushl_cfi %eax
305 call schedule_tail
306 GET_THREAD_INFO(%ebp)
307 popl_cfi %eax
308 pushl_cfi $0x0202 # Reset kernel eflags
309 popfl_cfi
310 jmp syscall_exit
311 CFI_ENDPROC
312 END(ret_from_fork)
313
314 /*
315 * Interrupt exit functions should be protected against kprobes
316 */
317 .pushsection .kprobes.text, "ax"
318 /*
319 * Return to user mode is not as complex as all this looks,
320 * but we want the default path for a system call return to
321 * go as quickly as possible which is why some of this is
322 * less clear than it otherwise should be.
323 */
324
325 # userspace resumption stub bypassing syscall exit tracing
326 ALIGN
327 RING0_PTREGS_FRAME
328 ret_from_exception:
329 preempt_stop(CLBR_ANY)
330 ret_from_intr:
331 GET_THREAD_INFO(%ebp)
332 check_userspace:
333 movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
334 movb PT_CS(%esp), %al
335 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
336 cmpl $USER_RPL, %eax
337 jb resume_kernel # not returning to v8086 or userspace
338
339 ENTRY(resume_userspace)
340 LOCKDEP_SYS_EXIT
341 DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
342 # setting need_resched or sigpending
343 # between sampling and the iret
344 TRACE_IRQS_OFF
345 movl TI_flags(%ebp), %ecx
346 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
347 # int/exception return?
348 jne work_pending
349 jmp restore_all
350 END(ret_from_exception)
351
352 #ifdef CONFIG_PREEMPT
353 ENTRY(resume_kernel)
354 DISABLE_INTERRUPTS(CLBR_ANY)
355 cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
356 jnz restore_all
357 need_resched:
358 movl TI_flags(%ebp), %ecx # need_resched set ?
359 testb $_TIF_NEED_RESCHED, %cl
360 jz restore_all
361 testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ?
362 jz restore_all
363 call preempt_schedule_irq
364 jmp need_resched
365 END(resume_kernel)
366 #endif
367 CFI_ENDPROC
368 /*
369 * End of kprobes section
370 */
371 .popsection
372
373 /* SYSENTER_RETURN points to after the "sysenter" instruction in
374 the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
375
376 # sysenter call handler stub
377 ENTRY(ia32_sysenter_target)
378 CFI_STARTPROC simple
379 CFI_SIGNAL_FRAME
380 CFI_DEF_CFA esp, 0
381 CFI_REGISTER esp, ebp
382 movl TSS_sysenter_sp0(%esp),%esp
383 sysenter_past_esp:
384 /*
385 * Interrupts are disabled here, but we can't trace it until
386 * enough kernel state to call TRACE_IRQS_OFF can be called - but
387 * we immediately enable interrupts at that point anyway.
388 */
389 pushl_cfi $__USER_DS
390 /*CFI_REL_OFFSET ss, 0*/
391 pushl_cfi %ebp
392 CFI_REL_OFFSET esp, 0
393 pushfl_cfi
394 orl $X86_EFLAGS_IF, (%esp)
395 pushl_cfi $__USER_CS
396 /*CFI_REL_OFFSET cs, 0*/
397 /*
398 * Push current_thread_info()->sysenter_return to the stack.
399 * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
400 * pushed above; +8 corresponds to copy_thread's esp0 setting.
401 */
402 pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
403 CFI_REL_OFFSET eip, 0
404
405 pushl_cfi %eax
406 SAVE_ALL
407 ENABLE_INTERRUPTS(CLBR_NONE)
408
409 /*
410 * Load the potential sixth argument from user stack.
411 * Careful about security.
412 */
413 cmpl $__PAGE_OFFSET-3,%ebp
414 jae syscall_fault
415 1: movl (%ebp),%ebp
416 movl %ebp,PT_EBP(%esp)
417 .section __ex_table,"a"
418 .align 4
419 .long 1b,syscall_fault
420 .previous
421
422 GET_THREAD_INFO(%ebp)
423
424 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
425 jnz sysenter_audit
426 sysenter_do_call:
427 cmpl $(nr_syscalls), %eax
428 jae syscall_badsys
429 call *sys_call_table(,%eax,4)
430 movl %eax,PT_EAX(%esp)
431 LOCKDEP_SYS_EXIT
432 DISABLE_INTERRUPTS(CLBR_ANY)
433 TRACE_IRQS_OFF
434 movl TI_flags(%ebp), %ecx
435 testl $_TIF_ALLWORK_MASK, %ecx
436 jne sysexit_audit
437 sysenter_exit:
438 /* if something modifies registers it must also disable sysexit */
439 movl PT_EIP(%esp), %edx
440 movl PT_OLDESP(%esp), %ecx
441 xorl %ebp,%ebp
442 TRACE_IRQS_ON
443 1: mov PT_FS(%esp), %fs
444 PTGS_TO_GS
445 ENABLE_INTERRUPTS_SYSEXIT
446
447 #ifdef CONFIG_AUDITSYSCALL
448 sysenter_audit:
449 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
450 jnz syscall_trace_entry
451 addl $4,%esp
452 CFI_ADJUST_CFA_OFFSET -4
453 /* %esi already in 8(%esp) 6th arg: 4th syscall arg */
454 /* %edx already in 4(%esp) 5th arg: 3rd syscall arg */
455 /* %ecx already in 0(%esp) 4th arg: 2nd syscall arg */
456 movl %ebx,%ecx /* 3rd arg: 1st syscall arg */
457 movl %eax,%edx /* 2nd arg: syscall number */
458 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
459 call __audit_syscall_entry
460 pushl_cfi %ebx
461 movl PT_EAX(%esp),%eax /* reload syscall number */
462 jmp sysenter_do_call
463
464 sysexit_audit:
465 testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
466 jne syscall_exit_work
467 TRACE_IRQS_ON
468 ENABLE_INTERRUPTS(CLBR_ANY)
469 movl %eax,%edx /* second arg, syscall return value */
470 cmpl $-MAX_ERRNO,%eax /* is it an error ? */
471 setbe %al /* 1 if so, 0 if not */
472 movzbl %al,%eax /* zero-extend that */
473 call __audit_syscall_exit
474 DISABLE_INTERRUPTS(CLBR_ANY)
475 TRACE_IRQS_OFF
476 movl TI_flags(%ebp), %ecx
477 testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
478 jne syscall_exit_work
479 movl PT_EAX(%esp),%eax /* reload syscall return value */
480 jmp sysenter_exit
481 #endif
482
483 CFI_ENDPROC
484 .pushsection .fixup,"ax"
485 2: movl $0,PT_FS(%esp)
486 jmp 1b
487 .section __ex_table,"a"
488 .align 4
489 .long 1b,2b
490 .popsection
491 PTGS_TO_GS_EX
492 ENDPROC(ia32_sysenter_target)
493
494 /*
495 * syscall stub including irq exit should be protected against kprobes
496 */
497 .pushsection .kprobes.text, "ax"
498 # system call handler stub
499 ENTRY(system_call)
500 RING0_INT_FRAME # can't unwind into user space anyway
501 pushl_cfi %eax # save orig_eax
502 SAVE_ALL
503 GET_THREAD_INFO(%ebp)
504 # system call tracing in operation / emulation
505 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
506 jnz syscall_trace_entry
507 cmpl $(nr_syscalls), %eax
508 jae syscall_badsys
509 syscall_call:
510 call *sys_call_table(,%eax,4)
511 movl %eax,PT_EAX(%esp) # store the return value
512 syscall_exit:
513 LOCKDEP_SYS_EXIT
514 DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
515 # setting need_resched or sigpending
516 # between sampling and the iret
517 TRACE_IRQS_OFF
518 movl TI_flags(%ebp), %ecx
519 testl $_TIF_ALLWORK_MASK, %ecx # current->work
520 jne syscall_exit_work
521
522 restore_all:
523 TRACE_IRQS_IRET
524 restore_all_notrace:
525 movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
526 # Warning: PT_OLDSS(%esp) contains the wrong/random values if we
527 # are returning to the kernel.
528 # See comments in process.c:copy_thread() for details.
529 movb PT_OLDSS(%esp), %ah
530 movb PT_CS(%esp), %al
531 andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
532 cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
533 CFI_REMEMBER_STATE
534 je ldt_ss # returning to user-space with LDT SS
535 restore_nocheck:
536 RESTORE_REGS 4 # skip orig_eax/error_code
537 irq_return:
538 INTERRUPT_RETURN
539 .section .fixup,"ax"
540 ENTRY(iret_exc)
541 pushl $0 # no error code
542 pushl $do_iret_error
543 jmp error_code
544 .previous
545 .section __ex_table,"a"
546 .align 4
547 .long irq_return,iret_exc
548 .previous
549
550 CFI_RESTORE_STATE
551 ldt_ss:
552 larl PT_OLDSS(%esp), %eax
553 jnz restore_nocheck
554 testl $0x00400000, %eax # returning to 32bit stack?
555 jnz restore_nocheck # allright, normal return
556
557 #ifdef CONFIG_PARAVIRT
558 /*
559 * The kernel can't run on a non-flat stack if paravirt mode
560 * is active. Rather than try to fixup the high bits of
561 * ESP, bypass this code entirely. This may break DOSemu
562 * and/or Wine support in a paravirt VM, although the option
563 * is still available to implement the setting of the high
564 * 16-bits in the INTERRUPT_RETURN paravirt-op.
565 */
566 cmpl $0, pv_info+PARAVIRT_enabled
567 jne restore_nocheck
568 #endif
569
570 /*
571 * Setup and switch to ESPFIX stack
572 *
573 * We're returning to userspace with a 16 bit stack. The CPU will not
574 * restore the high word of ESP for us on executing iret... This is an
575 * "official" bug of all the x86-compatible CPUs, which we can work
576 * around to make dosemu and wine happy. We do this by preloading the
577 * high word of ESP with the high word of the userspace ESP while
578 * compensating for the offset by changing to the ESPFIX segment with
579 * a base address that matches for the difference.
580 */
581 #define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
582 mov %esp, %edx /* load kernel esp */
583 mov PT_OLDESP(%esp), %eax /* load userspace esp */
584 mov %dx, %ax /* eax: new kernel esp */
585 sub %eax, %edx /* offset (low word is 0) */
586 shr $16, %edx
587 mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
588 mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
589 pushl_cfi $__ESPFIX_SS
590 pushl_cfi %eax /* new kernel esp */
591 /* Disable interrupts, but do not irqtrace this section: we
592 * will soon execute iret and the tracer was already set to
593 * the irqstate after the iret */
594 DISABLE_INTERRUPTS(CLBR_EAX)
595 lss (%esp), %esp /* switch to espfix segment */
596 CFI_ADJUST_CFA_OFFSET -8
597 jmp restore_nocheck
598 CFI_ENDPROC
599 ENDPROC(system_call)
600
601 # perform work that needs to be done immediately before resumption
602 ALIGN
603 RING0_PTREGS_FRAME # can't unwind into user space anyway
604 work_pending:
605 testb $_TIF_NEED_RESCHED, %cl
606 jz work_notifysig
607 work_resched:
608 call schedule
609 LOCKDEP_SYS_EXIT
610 DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
611 # setting need_resched or sigpending
612 # between sampling and the iret
613 TRACE_IRQS_OFF
614 movl TI_flags(%ebp), %ecx
615 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
616 # than syscall tracing?
617 jz restore_all
618 testb $_TIF_NEED_RESCHED, %cl
619 jnz work_resched
620
621 work_notifysig: # deal with pending signals and
622 # notify-resume requests
623 #ifdef CONFIG_VM86
624 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
625 movl %esp, %eax
626 jne work_notifysig_v86 # returning to kernel-space or
627 # vm86-space
628 TRACE_IRQS_ON
629 ENABLE_INTERRUPTS(CLBR_NONE)
630 xorl %edx, %edx
631 call do_notify_resume
632 jmp resume_userspace_sig
633
634 ALIGN
635 work_notifysig_v86:
636 pushl_cfi %ecx # save ti_flags for do_notify_resume
637 call save_v86_state # %eax contains pt_regs pointer
638 popl_cfi %ecx
639 movl %eax, %esp
640 #else
641 movl %esp, %eax
642 #endif
643 TRACE_IRQS_ON
644 ENABLE_INTERRUPTS(CLBR_NONE)
645 xorl %edx, %edx
646 call do_notify_resume
647 jmp resume_userspace_sig
648 END(work_pending)
649
650 # perform syscall exit tracing
651 ALIGN
652 syscall_trace_entry:
653 movl $-ENOSYS,PT_EAX(%esp)
654 movl %esp, %eax
655 call syscall_trace_enter
656 /* What it returned is what we'll actually use. */
657 cmpl $(nr_syscalls), %eax
658 jnae syscall_call
659 jmp syscall_exit
660 END(syscall_trace_entry)
661
662 # perform syscall exit tracing
663 ALIGN
664 syscall_exit_work:
665 testl $_TIF_WORK_SYSCALL_EXIT, %ecx
666 jz work_pending
667 TRACE_IRQS_ON
668 ENABLE_INTERRUPTS(CLBR_ANY) # could let syscall_trace_leave() call
669 # schedule() instead
670 movl %esp, %eax
671 call syscall_trace_leave
672 jmp resume_userspace
673 END(syscall_exit_work)
674 CFI_ENDPROC
675
676 RING0_INT_FRAME # can't unwind into user space anyway
677 syscall_fault:
678 GET_THREAD_INFO(%ebp)
679 movl $-EFAULT,PT_EAX(%esp)
680 jmp resume_userspace
681 END(syscall_fault)
682
683 syscall_badsys:
684 movl $-ENOSYS,PT_EAX(%esp)
685 jmp resume_userspace
686 END(syscall_badsys)
687 CFI_ENDPROC
688 /*
689 * End of kprobes section
690 */
691 .popsection
692
693 /*
694 * System calls that need a pt_regs pointer.
695 */
696 #define PTREGSCALL0(name) \
697 ALIGN; \
698 ptregs_##name: \
699 leal 4(%esp),%eax; \
700 jmp sys_##name;
701
702 #define PTREGSCALL1(name) \
703 ALIGN; \
704 ptregs_##name: \
705 leal 4(%esp),%edx; \
706 movl (PT_EBX+4)(%esp),%eax; \
707 jmp sys_##name;
708
709 #define PTREGSCALL2(name) \
710 ALIGN; \
711 ptregs_##name: \
712 leal 4(%esp),%ecx; \
713 movl (PT_ECX+4)(%esp),%edx; \
714 movl (PT_EBX+4)(%esp),%eax; \
715 jmp sys_##name;
716
717 #define PTREGSCALL3(name) \
718 ALIGN; \
719 ptregs_##name: \
720 CFI_STARTPROC; \
721 leal 4(%esp),%eax; \
722 pushl_cfi %eax; \
723 movl PT_EDX(%eax),%ecx; \
724 movl PT_ECX(%eax),%edx; \
725 movl PT_EBX(%eax),%eax; \
726 call sys_##name; \
727 addl $4,%esp; \
728 CFI_ADJUST_CFA_OFFSET -4; \
729 ret; \
730 CFI_ENDPROC; \
731 ENDPROC(ptregs_##name)
732
733 PTREGSCALL1(iopl)
734 PTREGSCALL0(fork)
735 PTREGSCALL0(vfork)
736 PTREGSCALL3(execve)
737 PTREGSCALL2(sigaltstack)
738 PTREGSCALL0(sigreturn)
739 PTREGSCALL0(rt_sigreturn)
740 PTREGSCALL2(vm86)
741 PTREGSCALL1(vm86old)
742
743 /* Clone is an oddball. The 4th arg is in %edi */
744 ALIGN;
745 ptregs_clone:
746 CFI_STARTPROC
747 leal 4(%esp),%eax
748 pushl_cfi %eax
749 pushl_cfi PT_EDI(%eax)
750 movl PT_EDX(%eax),%ecx
751 movl PT_ECX(%eax),%edx
752 movl PT_EBX(%eax),%eax
753 call sys_clone
754 addl $8,%esp
755 CFI_ADJUST_CFA_OFFSET -8
756 ret
757 CFI_ENDPROC
758 ENDPROC(ptregs_clone)
759
760 .macro FIXUP_ESPFIX_STACK
761 /*
762 * Switch back for ESPFIX stack to the normal zerobased stack
763 *
764 * We can't call C functions using the ESPFIX stack. This code reads
765 * the high word of the segment base from the GDT and swiches to the
766 * normal stack and adjusts ESP with the matching offset.
767 */
768 /* fixup the stack */
769 mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
770 mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
771 shl $16, %eax
772 addl %esp, %eax /* the adjusted stack pointer */
773 pushl_cfi $__KERNEL_DS
774 pushl_cfi %eax
775 lss (%esp), %esp /* switch to the normal stack segment */
776 CFI_ADJUST_CFA_OFFSET -8
777 .endm
778 .macro UNWIND_ESPFIX_STACK
779 movl %ss, %eax
780 /* see if on espfix stack */
781 cmpw $__ESPFIX_SS, %ax
782 jne 27f
783 movl $__KERNEL_DS, %eax
784 movl %eax, %ds
785 movl %eax, %es
786 /* switch to normal stack */
787 FIXUP_ESPFIX_STACK
788 27:
789 .endm
790
791 /*
792 * Build the entry stubs and pointer table with some assembler magic.
793 * We pack 7 stubs into a single 32-byte chunk, which will fit in a
794 * single cache line on all modern x86 implementations.
795 */
796 .section .init.rodata,"a"
797 ENTRY(interrupt)
798 .section .entry.text, "ax"
799 .p2align 5
800 .p2align CONFIG_X86_L1_CACHE_SHIFT
801 ENTRY(irq_entries_start)
802 RING0_INT_FRAME
803 vector=FIRST_EXTERNAL_VECTOR
804 .rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7
805 .balign 32
806 .rept 7
807 .if vector < NR_VECTORS
808 .if vector <> FIRST_EXTERNAL_VECTOR
809 CFI_ADJUST_CFA_OFFSET -4
810 .endif
811 1: pushl_cfi $(~vector+0x80) /* Note: always in signed byte range */
812 .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6
813 jmp 2f
814 .endif
815 .previous
816 .long 1b
817 .section .entry.text, "ax"
818 vector=vector+1
819 .endif
820 .endr
821 2: jmp common_interrupt
822 .endr
823 END(irq_entries_start)
824
825 .previous
826 END(interrupt)
827 .previous
828
829 /*
830 * the CPU automatically disables interrupts when executing an IRQ vector,
831 * so IRQ-flags tracing has to follow that:
832 */
833 .p2align CONFIG_X86_L1_CACHE_SHIFT
834 common_interrupt:
835 addl $-0x80,(%esp) /* Adjust vector into the [-256,-1] range */
836 SAVE_ALL
837 TRACE_IRQS_OFF
838 movl %esp,%eax
839 call do_IRQ
840 jmp ret_from_intr
841 ENDPROC(common_interrupt)
842 CFI_ENDPROC
843
844 /*
845 * Irq entries should be protected against kprobes
846 */
847 .pushsection .kprobes.text, "ax"
848 #define BUILD_INTERRUPT3(name, nr, fn) \
849 ENTRY(name) \
850 RING0_INT_FRAME; \
851 pushl_cfi $~(nr); \
852 SAVE_ALL; \
853 TRACE_IRQS_OFF \
854 movl %esp,%eax; \
855 call fn; \
856 jmp ret_from_intr; \
857 CFI_ENDPROC; \
858 ENDPROC(name)
859
860 #define BUILD_INTERRUPT(name, nr) BUILD_INTERRUPT3(name, nr, smp_##name)
861
862 /* The include is where all of the SMP etc. interrupts come from */
863 #include <asm/entry_arch.h>
864
865 ENTRY(coprocessor_error)
866 RING0_INT_FRAME
867 pushl_cfi $0
868 pushl_cfi $do_coprocessor_error
869 jmp error_code
870 CFI_ENDPROC
871 END(coprocessor_error)
872
873 ENTRY(simd_coprocessor_error)
874 RING0_INT_FRAME
875 pushl_cfi $0
876 #ifdef CONFIG_X86_INVD_BUG
877 /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
878 661: pushl_cfi $do_general_protection
879 662:
880 .section .altinstructions,"a"
881 altinstruction_entry 661b, 663f, X86_FEATURE_XMM, 662b-661b, 664f-663f
882 .previous
883 .section .altinstr_replacement,"ax"
884 663: pushl $do_simd_coprocessor_error
885 664:
886 .previous
887 #else
888 pushl_cfi $do_simd_coprocessor_error
889 #endif
890 jmp error_code
891 CFI_ENDPROC
892 END(simd_coprocessor_error)
893
894 ENTRY(device_not_available)
895 RING0_INT_FRAME
896 pushl_cfi $-1 # mark this as an int
897 pushl_cfi $do_device_not_available
898 jmp error_code
899 CFI_ENDPROC
900 END(device_not_available)
901
902 #ifdef CONFIG_PARAVIRT
903 ENTRY(native_iret)
904 iret
905 .section __ex_table,"a"
906 .align 4
907 .long native_iret, iret_exc
908 .previous
909 END(native_iret)
910
911 ENTRY(native_irq_enable_sysexit)
912 sti
913 sysexit
914 END(native_irq_enable_sysexit)
915 #endif
916
917 ENTRY(overflow)
918 RING0_INT_FRAME
919 pushl_cfi $0
920 pushl_cfi $do_overflow
921 jmp error_code
922 CFI_ENDPROC
923 END(overflow)
924
925 ENTRY(bounds)
926 RING0_INT_FRAME
927 pushl_cfi $0
928 pushl_cfi $do_bounds
929 jmp error_code
930 CFI_ENDPROC
931 END(bounds)
932
933 ENTRY(invalid_op)
934 RING0_INT_FRAME
935 pushl_cfi $0
936 pushl_cfi $do_invalid_op
937 jmp error_code
938 CFI_ENDPROC
939 END(invalid_op)
940
941 ENTRY(coprocessor_segment_overrun)
942 RING0_INT_FRAME
943 pushl_cfi $0
944 pushl_cfi $do_coprocessor_segment_overrun
945 jmp error_code
946 CFI_ENDPROC
947 END(coprocessor_segment_overrun)
948
949 ENTRY(invalid_TSS)
950 RING0_EC_FRAME
951 pushl_cfi $do_invalid_TSS
952 jmp error_code
953 CFI_ENDPROC
954 END(invalid_TSS)
955
956 ENTRY(segment_not_present)
957 RING0_EC_FRAME
958 pushl_cfi $do_segment_not_present
959 jmp error_code
960 CFI_ENDPROC
961 END(segment_not_present)
962
963 ENTRY(stack_segment)
964 RING0_EC_FRAME
965 pushl_cfi $do_stack_segment
966 jmp error_code
967 CFI_ENDPROC
968 END(stack_segment)
969
970 ENTRY(alignment_check)
971 RING0_EC_FRAME
972 pushl_cfi $do_alignment_check
973 jmp error_code
974 CFI_ENDPROC
975 END(alignment_check)
976
977 ENTRY(divide_error)
978 RING0_INT_FRAME
979 pushl_cfi $0 # no error code
980 pushl_cfi $do_divide_error
981 jmp error_code
982 CFI_ENDPROC
983 END(divide_error)
984
985 #ifdef CONFIG_X86_MCE
986 ENTRY(machine_check)
987 RING0_INT_FRAME
988 pushl_cfi $0
989 pushl_cfi machine_check_vector
990 jmp error_code
991 CFI_ENDPROC
992 END(machine_check)
993 #endif
994
995 ENTRY(spurious_interrupt_bug)
996 RING0_INT_FRAME
997 pushl_cfi $0
998 pushl_cfi $do_spurious_interrupt_bug
999 jmp error_code
1000 CFI_ENDPROC
1001 END(spurious_interrupt_bug)
1002 /*
1003 * End of kprobes section
1004 */
1005 .popsection
1006
1007 ENTRY(kernel_thread_helper)
1008 pushl $0 # fake return address for unwinder
1009 CFI_STARTPROC
1010 movl %edi,%eax
1011 call *%esi
1012 call do_exit
1013 ud2 # padding for call trace
1014 CFI_ENDPROC
1015 ENDPROC(kernel_thread_helper)
1016
1017 #ifdef CONFIG_XEN
1018 /* Xen doesn't set %esp to be precisely what the normal sysenter
1019 entrypoint expects, so fix it up before using the normal path. */
1020 ENTRY(xen_sysenter_target)
1021 RING0_INT_FRAME
1022 addl $5*4, %esp /* remove xen-provided frame */
1023 CFI_ADJUST_CFA_OFFSET -5*4
1024 jmp sysenter_past_esp
1025 CFI_ENDPROC
1026
1027 ENTRY(xen_hypervisor_callback)
1028 CFI_STARTPROC
1029 pushl_cfi $0
1030 SAVE_ALL
1031 TRACE_IRQS_OFF
1032
1033 /* Check to see if we got the event in the critical
1034 region in xen_iret_direct, after we've reenabled
1035 events and checked for pending events. This simulates
1036 iret instruction's behaviour where it delivers a
1037 pending interrupt when enabling interrupts. */
1038 movl PT_EIP(%esp),%eax
1039 cmpl $xen_iret_start_crit,%eax
1040 jb 1f
1041 cmpl $xen_iret_end_crit,%eax
1042 jae 1f
1043
1044 jmp xen_iret_crit_fixup
1045
1046 ENTRY(xen_do_upcall)
1047 1: mov %esp, %eax
1048 call xen_evtchn_do_upcall
1049 jmp ret_from_intr
1050 CFI_ENDPROC
1051 ENDPROC(xen_hypervisor_callback)
1052
1053 # Hypervisor uses this for application faults while it executes.
1054 # We get here for two reasons:
1055 # 1. Fault while reloading DS, ES, FS or GS
1056 # 2. Fault while executing IRET
1057 # Category 1 we fix up by reattempting the load, and zeroing the segment
1058 # register if the load fails.
1059 # Category 2 we fix up by jumping to do_iret_error. We cannot use the
1060 # normal Linux return path in this case because if we use the IRET hypercall
1061 # to pop the stack frame we end up in an infinite loop of failsafe callbacks.
1062 # We distinguish between categories by maintaining a status value in EAX.
1063 ENTRY(xen_failsafe_callback)
1064 CFI_STARTPROC
1065 pushl_cfi %eax
1066 movl $1,%eax
1067 1: mov 4(%esp),%ds
1068 2: mov 8(%esp),%es
1069 3: mov 12(%esp),%fs
1070 4: mov 16(%esp),%gs
1071 testl %eax,%eax
1072 popl_cfi %eax
1073 lea 16(%esp),%esp
1074 CFI_ADJUST_CFA_OFFSET -16
1075 jz 5f
1076 addl $16,%esp
1077 jmp iret_exc # EAX != 0 => Category 2 (Bad IRET)
1078 5: pushl_cfi $0 # EAX == 0 => Category 1 (Bad segment)
1079 SAVE_ALL
1080 jmp ret_from_exception
1081 CFI_ENDPROC
1082
1083 .section .fixup,"ax"
1084 6: xorl %eax,%eax
1085 movl %eax,4(%esp)
1086 jmp 1b
1087 7: xorl %eax,%eax
1088 movl %eax,8(%esp)
1089 jmp 2b
1090 8: xorl %eax,%eax
1091 movl %eax,12(%esp)
1092 jmp 3b
1093 9: xorl %eax,%eax
1094 movl %eax,16(%esp)
1095 jmp 4b
1096 .previous
1097 .section __ex_table,"a"
1098 .align 4
1099 .long 1b,6b
1100 .long 2b,7b
1101 .long 3b,8b
1102 .long 4b,9b
1103 .previous
1104 ENDPROC(xen_failsafe_callback)
1105
1106 BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
1107 xen_evtchn_do_upcall)
1108
1109 #endif /* CONFIG_XEN */
1110
1111 #ifdef CONFIG_FUNCTION_TRACER
1112 #ifdef CONFIG_DYNAMIC_FTRACE
1113
1114 ENTRY(mcount)
1115 ret
1116 END(mcount)
1117
1118 ENTRY(ftrace_caller)
1119 cmpl $0, function_trace_stop
1120 jne ftrace_stub
1121
1122 pushl %eax
1123 pushl %ecx
1124 pushl %edx
1125 movl 0xc(%esp), %eax
1126 movl 0x4(%ebp), %edx
1127 subl $MCOUNT_INSN_SIZE, %eax
1128
1129 .globl ftrace_call
1130 ftrace_call:
1131 call ftrace_stub
1132
1133 popl %edx
1134 popl %ecx
1135 popl %eax
1136 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1137 .globl ftrace_graph_call
1138 ftrace_graph_call:
1139 jmp ftrace_stub
1140 #endif
1141
1142 .globl ftrace_stub
1143 ftrace_stub:
1144 ret
1145 END(ftrace_caller)
1146
1147 #else /* ! CONFIG_DYNAMIC_FTRACE */
1148
1149 ENTRY(mcount)
1150 cmpl $0, function_trace_stop
1151 jne ftrace_stub
1152
1153 cmpl $ftrace_stub, ftrace_trace_function
1154 jnz trace
1155 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1156 cmpl $ftrace_stub, ftrace_graph_return
1157 jnz ftrace_graph_caller
1158
1159 cmpl $ftrace_graph_entry_stub, ftrace_graph_entry
1160 jnz ftrace_graph_caller
1161 #endif
1162 .globl ftrace_stub
1163 ftrace_stub:
1164 ret
1165
1166 /* taken from glibc */
1167 trace:
1168 pushl %eax
1169 pushl %ecx
1170 pushl %edx
1171 movl 0xc(%esp), %eax
1172 movl 0x4(%ebp), %edx
1173 subl $MCOUNT_INSN_SIZE, %eax
1174
1175 call *ftrace_trace_function
1176
1177 popl %edx
1178 popl %ecx
1179 popl %eax
1180 jmp ftrace_stub
1181 END(mcount)
1182 #endif /* CONFIG_DYNAMIC_FTRACE */
1183 #endif /* CONFIG_FUNCTION_TRACER */
1184
1185 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1186 ENTRY(ftrace_graph_caller)
1187 cmpl $0, function_trace_stop
1188 jne ftrace_stub
1189
1190 pushl %eax
1191 pushl %ecx
1192 pushl %edx
1193 movl 0xc(%esp), %edx
1194 lea 0x4(%ebp), %eax
1195 movl (%ebp), %ecx
1196 subl $MCOUNT_INSN_SIZE, %edx
1197 call prepare_ftrace_return
1198 popl %edx
1199 popl %ecx
1200 popl %eax
1201 ret
1202 END(ftrace_graph_caller)
1203
1204 .globl return_to_handler
1205 return_to_handler:
1206 pushl %eax
1207 pushl %edx
1208 movl %ebp, %eax
1209 call ftrace_return_to_handler
1210 movl %eax, %ecx
1211 popl %edx
1212 popl %eax
1213 jmp *%ecx
1214 #endif
1215
1216 .section .rodata,"a"
1217 #include "syscall_table_32.S"
1218
1219 syscall_table_size=(.-sys_call_table)
1220
1221 /*
1222 * Some functions should be protected against kprobes
1223 */
1224 .pushsection .kprobes.text, "ax"
1225
1226 ENTRY(page_fault)
1227 RING0_EC_FRAME
1228 pushl_cfi $do_page_fault
1229 ALIGN
1230 error_code:
1231 /* the function address is in %gs's slot on the stack */
1232 pushl_cfi %fs
1233 /*CFI_REL_OFFSET fs, 0*/
1234 pushl_cfi %es
1235 /*CFI_REL_OFFSET es, 0*/
1236 pushl_cfi %ds
1237 /*CFI_REL_OFFSET ds, 0*/
1238 pushl_cfi %eax
1239 CFI_REL_OFFSET eax, 0
1240 pushl_cfi %ebp
1241 CFI_REL_OFFSET ebp, 0
1242 pushl_cfi %edi
1243 CFI_REL_OFFSET edi, 0
1244 pushl_cfi %esi
1245 CFI_REL_OFFSET esi, 0
1246 pushl_cfi %edx
1247 CFI_REL_OFFSET edx, 0
1248 pushl_cfi %ecx
1249 CFI_REL_OFFSET ecx, 0
1250 pushl_cfi %ebx
1251 CFI_REL_OFFSET ebx, 0
1252 cld
1253 movl $(__KERNEL_PERCPU), %ecx
1254 movl %ecx, %fs
1255 UNWIND_ESPFIX_STACK
1256 GS_TO_REG %ecx
1257 movl PT_GS(%esp), %edi # get the function address
1258 movl PT_ORIG_EAX(%esp), %edx # get the error code
1259 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
1260 REG_TO_PTGS %ecx
1261 SET_KERNEL_GS %ecx
1262 movl $(__USER_DS), %ecx
1263 movl %ecx, %ds
1264 movl %ecx, %es
1265 TRACE_IRQS_OFF
1266 movl %esp,%eax # pt_regs pointer
1267 call *%edi
1268 jmp ret_from_exception
1269 CFI_ENDPROC
1270 END(page_fault)
1271
1272 /*
1273 * Debug traps and NMI can happen at the one SYSENTER instruction
1274 * that sets up the real kernel stack. Check here, since we can't
1275 * allow the wrong stack to be used.
1276 *
1277 * "TSS_sysenter_sp0+12" is because the NMI/debug handler will have
1278 * already pushed 3 words if it hits on the sysenter instruction:
1279 * eflags, cs and eip.
1280 *
1281 * We just load the right stack, and push the three (known) values
1282 * by hand onto the new stack - while updating the return eip past
1283 * the instruction that would have done it for sysenter.
1284 */
1285 .macro FIX_STACK offset ok label
1286 cmpw $__KERNEL_CS, 4(%esp)
1287 jne \ok
1288 \label:
1289 movl TSS_sysenter_sp0 + \offset(%esp), %esp
1290 CFI_DEF_CFA esp, 0
1291 CFI_UNDEFINED eip
1292 pushfl_cfi
1293 pushl_cfi $__KERNEL_CS
1294 pushl_cfi $sysenter_past_esp
1295 CFI_REL_OFFSET eip, 0
1296 .endm
1297
1298 ENTRY(debug)
1299 RING0_INT_FRAME
1300 cmpl $ia32_sysenter_target,(%esp)
1301 jne debug_stack_correct
1302 FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn
1303 debug_stack_correct:
1304 pushl_cfi $-1 # mark this as an int
1305 SAVE_ALL
1306 TRACE_IRQS_OFF
1307 xorl %edx,%edx # error code 0
1308 movl %esp,%eax # pt_regs pointer
1309 call do_debug
1310 jmp ret_from_exception
1311 CFI_ENDPROC
1312 END(debug)
1313
1314 /*
1315 * NMI is doubly nasty. It can happen _while_ we're handling
1316 * a debug fault, and the debug fault hasn't yet been able to
1317 * clear up the stack. So we first check whether we got an
1318 * NMI on the sysenter entry path, but after that we need to
1319 * check whether we got an NMI on the debug path where the debug
1320 * fault happened on the sysenter path.
1321 */
1322 ENTRY(nmi)
1323 RING0_INT_FRAME
1324 pushl_cfi %eax
1325 movl %ss, %eax
1326 cmpw $__ESPFIX_SS, %ax
1327 popl_cfi %eax
1328 je nmi_espfix_stack
1329 cmpl $ia32_sysenter_target,(%esp)
1330 je nmi_stack_fixup
1331 pushl_cfi %eax
1332 movl %esp,%eax
1333 /* Do not access memory above the end of our stack page,
1334 * it might not exist.
1335 */
1336 andl $(THREAD_SIZE-1),%eax
1337 cmpl $(THREAD_SIZE-20),%eax
1338 popl_cfi %eax
1339 jae nmi_stack_correct
1340 cmpl $ia32_sysenter_target,12(%esp)
1341 je nmi_debug_stack_check
1342 nmi_stack_correct:
1343 /* We have a RING0_INT_FRAME here */
1344 pushl_cfi %eax
1345 SAVE_ALL
1346 xorl %edx,%edx # zero error code
1347 movl %esp,%eax # pt_regs pointer
1348 call do_nmi
1349 jmp restore_all_notrace
1350 CFI_ENDPROC
1351
1352 nmi_stack_fixup:
1353 RING0_INT_FRAME
1354 FIX_STACK 12, nmi_stack_correct, 1
1355 jmp nmi_stack_correct
1356
1357 nmi_debug_stack_check:
1358 /* We have a RING0_INT_FRAME here */
1359 cmpw $__KERNEL_CS,16(%esp)
1360 jne nmi_stack_correct
1361 cmpl $debug,(%esp)
1362 jb nmi_stack_correct
1363 cmpl $debug_esp_fix_insn,(%esp)
1364 ja nmi_stack_correct
1365 FIX_STACK 24, nmi_stack_correct, 1
1366 jmp nmi_stack_correct
1367
1368 nmi_espfix_stack:
1369 /* We have a RING0_INT_FRAME here.
1370 *
1371 * create the pointer to lss back
1372 */
1373 pushl_cfi %ss
1374 pushl_cfi %esp
1375 addl $4, (%esp)
1376 /* copy the iret frame of 12 bytes */
1377 .rept 3
1378 pushl_cfi 16(%esp)
1379 .endr
1380 pushl_cfi %eax
1381 SAVE_ALL
1382 FIXUP_ESPFIX_STACK # %eax == %esp
1383 xorl %edx,%edx # zero error code
1384 call do_nmi
1385 RESTORE_REGS
1386 lss 12+4(%esp), %esp # back to espfix stack
1387 CFI_ADJUST_CFA_OFFSET -24
1388 jmp irq_return
1389 CFI_ENDPROC
1390 END(nmi)
1391
1392 ENTRY(int3)
1393 RING0_INT_FRAME
1394 pushl_cfi $-1 # mark this as an int
1395 SAVE_ALL
1396 TRACE_IRQS_OFF
1397 xorl %edx,%edx # zero error code
1398 movl %esp,%eax # pt_regs pointer
1399 call do_int3
1400 jmp ret_from_exception
1401 CFI_ENDPROC
1402 END(int3)
1403
1404 ENTRY(general_protection)
1405 RING0_EC_FRAME
1406 pushl_cfi $do_general_protection
1407 jmp error_code
1408 CFI_ENDPROC
1409 END(general_protection)
1410
1411 #ifdef CONFIG_KVM_GUEST
1412 ENTRY(async_page_fault)
1413 RING0_EC_FRAME
1414 pushl_cfi $do_async_page_fault
1415 jmp error_code
1416 CFI_ENDPROC
1417 END(async_page_fault)
1418 #endif
1419
1420 /*
1421 * End of kprobes section
1422 */
1423 .popsection
This page took 0.090102 seconds and 5 git commands to generate.