x86: remove irqbalance in kernel for 32 bit
[deliverable/linux.git] / arch / x86 / kernel / entry_64.S
1 /*
2 * linux/arch/x86_64/entry.S
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
6 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
7 */
8
9 /*
10 * entry.S contains the system-call and fault low-level handling routines.
11 *
12 * NOTE: This code handles signal-recognition, which happens every time
13 * after an interrupt and after each system call.
14 *
15 * Normal syscalls and interrupts don't save a full stack frame, this is
16 * only done for syscall tracing, signals or fork/exec et.al.
17 *
18 * A note on terminology:
19 * - top of stack: Architecture defined interrupt frame from SS to RIP
20 * at the top of the kernel process stack.
21 * - partial stack frame: partially saved registers upto R11.
22 * - full stack frame: Like partial stack frame, but all register saved.
23 *
24 * Some macro usage:
25 * - CFI macros are used to generate dwarf2 unwind information for better
26 * backtraces. They don't change any code.
27 * - SAVE_ALL/RESTORE_ALL - Save/restore all registers
28 * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify.
29 * There are unfortunately lots of special cases where some registers
30 * not touched. The macro is a big mess that should be cleaned up.
31 * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS.
32 * Gives a full stack frame.
33 * - ENTRY/END Define functions in the symbol table.
34 * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack
35 * frame that is otherwise undefined after a SYSCALL
36 * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
37 * - errorentry/paranoidentry/zeroentry - Define exception entry points.
38 */
39
40 #include <linux/linkage.h>
41 #include <asm/segment.h>
42 #include <asm/cache.h>
43 #include <asm/errno.h>
44 #include <asm/dwarf2.h>
45 #include <asm/calling.h>
46 #include <asm/asm-offsets.h>
47 #include <asm/msr.h>
48 #include <asm/unistd.h>
49 #include <asm/thread_info.h>
50 #include <asm/hw_irq.h>
51 #include <asm/page.h>
52 #include <asm/irqflags.h>
53 #include <asm/paravirt.h>
54 #include <asm/ftrace.h>
55
56 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
57 #include <linux/elf-em.h>
58 #define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
59 #define __AUDIT_ARCH_64BIT 0x80000000
60 #define __AUDIT_ARCH_LE 0x40000000
61
62 .code64
63
64 #ifdef CONFIG_FTRACE
65 #ifdef CONFIG_DYNAMIC_FTRACE
66 ENTRY(mcount)
67
68 subq $0x38, %rsp
69 movq %rax, (%rsp)
70 movq %rcx, 8(%rsp)
71 movq %rdx, 16(%rsp)
72 movq %rsi, 24(%rsp)
73 movq %rdi, 32(%rsp)
74 movq %r8, 40(%rsp)
75 movq %r9, 48(%rsp)
76
77 movq 0x38(%rsp), %rdi
78 subq $MCOUNT_INSN_SIZE, %rdi
79
80 .globl mcount_call
81 mcount_call:
82 call ftrace_stub
83
84 movq 48(%rsp), %r9
85 movq 40(%rsp), %r8
86 movq 32(%rsp), %rdi
87 movq 24(%rsp), %rsi
88 movq 16(%rsp), %rdx
89 movq 8(%rsp), %rcx
90 movq (%rsp), %rax
91 addq $0x38, %rsp
92
93 retq
94 END(mcount)
95
96 ENTRY(ftrace_caller)
97
98 /* taken from glibc */
99 subq $0x38, %rsp
100 movq %rax, (%rsp)
101 movq %rcx, 8(%rsp)
102 movq %rdx, 16(%rsp)
103 movq %rsi, 24(%rsp)
104 movq %rdi, 32(%rsp)
105 movq %r8, 40(%rsp)
106 movq %r9, 48(%rsp)
107
108 movq 0x38(%rsp), %rdi
109 movq 8(%rbp), %rsi
110 subq $MCOUNT_INSN_SIZE, %rdi
111
112 .globl ftrace_call
113 ftrace_call:
114 call ftrace_stub
115
116 movq 48(%rsp), %r9
117 movq 40(%rsp), %r8
118 movq 32(%rsp), %rdi
119 movq 24(%rsp), %rsi
120 movq 16(%rsp), %rdx
121 movq 8(%rsp), %rcx
122 movq (%rsp), %rax
123 addq $0x38, %rsp
124
125 .globl ftrace_stub
126 ftrace_stub:
127 retq
128 END(ftrace_caller)
129
130 #else /* ! CONFIG_DYNAMIC_FTRACE */
131 ENTRY(mcount)
132 cmpq $ftrace_stub, ftrace_trace_function
133 jnz trace
134 .globl ftrace_stub
135 ftrace_stub:
136 retq
137
138 trace:
139 /* taken from glibc */
140 subq $0x38, %rsp
141 movq %rax, (%rsp)
142 movq %rcx, 8(%rsp)
143 movq %rdx, 16(%rsp)
144 movq %rsi, 24(%rsp)
145 movq %rdi, 32(%rsp)
146 movq %r8, 40(%rsp)
147 movq %r9, 48(%rsp)
148
149 movq 0x38(%rsp), %rdi
150 movq 8(%rbp), %rsi
151 subq $MCOUNT_INSN_SIZE, %rdi
152
153 call *ftrace_trace_function
154
155 movq 48(%rsp), %r9
156 movq 40(%rsp), %r8
157 movq 32(%rsp), %rdi
158 movq 24(%rsp), %rsi
159 movq 16(%rsp), %rdx
160 movq 8(%rsp), %rcx
161 movq (%rsp), %rax
162 addq $0x38, %rsp
163
164 jmp ftrace_stub
165 END(mcount)
166 #endif /* CONFIG_DYNAMIC_FTRACE */
167 #endif /* CONFIG_FTRACE */
168
169 #ifndef CONFIG_PREEMPT
170 #define retint_kernel retint_restore_args
171 #endif
172
173 #ifdef CONFIG_PARAVIRT
174 ENTRY(native_usergs_sysret64)
175 swapgs
176 sysretq
177 #endif /* CONFIG_PARAVIRT */
178
179
180 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
181 #ifdef CONFIG_TRACE_IRQFLAGS
182 bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
183 jnc 1f
184 TRACE_IRQS_ON
185 1:
186 #endif
187 .endm
188
189 /*
190 * C code is not supposed to know about undefined top of stack. Every time
191 * a C function with an pt_regs argument is called from the SYSCALL based
192 * fast path FIXUP_TOP_OF_STACK is needed.
193 * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
194 * manipulation.
195 */
196
197 /* %rsp:at FRAMEEND */
198 .macro FIXUP_TOP_OF_STACK tmp
199 movq %gs:pda_oldrsp,\tmp
200 movq \tmp,RSP(%rsp)
201 movq $__USER_DS,SS(%rsp)
202 movq $__USER_CS,CS(%rsp)
203 movq $-1,RCX(%rsp)
204 movq R11(%rsp),\tmp /* get eflags */
205 movq \tmp,EFLAGS(%rsp)
206 .endm
207
208 .macro RESTORE_TOP_OF_STACK tmp,offset=0
209 movq RSP-\offset(%rsp),\tmp
210 movq \tmp,%gs:pda_oldrsp
211 movq EFLAGS-\offset(%rsp),\tmp
212 movq \tmp,R11-\offset(%rsp)
213 .endm
214
215 .macro FAKE_STACK_FRAME child_rip
216 /* push in order ss, rsp, eflags, cs, rip */
217 xorl %eax, %eax
218 pushq $__KERNEL_DS /* ss */
219 CFI_ADJUST_CFA_OFFSET 8
220 /*CFI_REL_OFFSET ss,0*/
221 pushq %rax /* rsp */
222 CFI_ADJUST_CFA_OFFSET 8
223 CFI_REL_OFFSET rsp,0
224 pushq $(1<<9) /* eflags - interrupts on */
225 CFI_ADJUST_CFA_OFFSET 8
226 /*CFI_REL_OFFSET rflags,0*/
227 pushq $__KERNEL_CS /* cs */
228 CFI_ADJUST_CFA_OFFSET 8
229 /*CFI_REL_OFFSET cs,0*/
230 pushq \child_rip /* rip */
231 CFI_ADJUST_CFA_OFFSET 8
232 CFI_REL_OFFSET rip,0
233 pushq %rax /* orig rax */
234 CFI_ADJUST_CFA_OFFSET 8
235 .endm
236
237 .macro UNFAKE_STACK_FRAME
238 addq $8*6, %rsp
239 CFI_ADJUST_CFA_OFFSET -(6*8)
240 .endm
241
242 .macro CFI_DEFAULT_STACK start=1
243 .if \start
244 CFI_STARTPROC simple
245 CFI_SIGNAL_FRAME
246 CFI_DEF_CFA rsp,SS+8
247 .else
248 CFI_DEF_CFA_OFFSET SS+8
249 .endif
250 CFI_REL_OFFSET r15,R15
251 CFI_REL_OFFSET r14,R14
252 CFI_REL_OFFSET r13,R13
253 CFI_REL_OFFSET r12,R12
254 CFI_REL_OFFSET rbp,RBP
255 CFI_REL_OFFSET rbx,RBX
256 CFI_REL_OFFSET r11,R11
257 CFI_REL_OFFSET r10,R10
258 CFI_REL_OFFSET r9,R9
259 CFI_REL_OFFSET r8,R8
260 CFI_REL_OFFSET rax,RAX
261 CFI_REL_OFFSET rcx,RCX
262 CFI_REL_OFFSET rdx,RDX
263 CFI_REL_OFFSET rsi,RSI
264 CFI_REL_OFFSET rdi,RDI
265 CFI_REL_OFFSET rip,RIP
266 /*CFI_REL_OFFSET cs,CS*/
267 /*CFI_REL_OFFSET rflags,EFLAGS*/
268 CFI_REL_OFFSET rsp,RSP
269 /*CFI_REL_OFFSET ss,SS*/
270 .endm
271 /*
272 * A newly forked process directly context switches into this.
273 */
274 /* rdi: prev */
275 ENTRY(ret_from_fork)
276 CFI_DEFAULT_STACK
277 push kernel_eflags(%rip)
278 CFI_ADJUST_CFA_OFFSET 8
279 popf # reset kernel eflags
280 CFI_ADJUST_CFA_OFFSET -8
281 call schedule_tail
282 GET_THREAD_INFO(%rcx)
283 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
284 jnz rff_trace
285 rff_action:
286 RESTORE_REST
287 testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread?
288 je int_ret_from_sys_call
289 testl $_TIF_IA32,TI_flags(%rcx)
290 jnz int_ret_from_sys_call
291 RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
292 jmp ret_from_sys_call
293 rff_trace:
294 movq %rsp,%rdi
295 call syscall_trace_leave
296 GET_THREAD_INFO(%rcx)
297 jmp rff_action
298 CFI_ENDPROC
299 END(ret_from_fork)
300
301 /*
302 * System call entry. Upto 6 arguments in registers are supported.
303 *
304 * SYSCALL does not save anything on the stack and does not change the
305 * stack pointer.
306 */
307
308 /*
309 * Register setup:
310 * rax system call number
311 * rdi arg0
312 * rcx return address for syscall/sysret, C arg3
313 * rsi arg1
314 * rdx arg2
315 * r10 arg3 (--> moved to rcx for C)
316 * r8 arg4
317 * r9 arg5
318 * r11 eflags for syscall/sysret, temporary for C
319 * r12-r15,rbp,rbx saved by C code, not touched.
320 *
321 * Interrupts are off on entry.
322 * Only called from user space.
323 *
324 * XXX if we had a free scratch register we could save the RSP into the stack frame
325 * and report it properly in ps. Unfortunately we haven't.
326 *
327 * When user can change the frames always force IRET. That is because
328 * it deals with uncanonical addresses better. SYSRET has trouble
329 * with them due to bugs in both AMD and Intel CPUs.
330 */
331
332 ENTRY(system_call)
333 CFI_STARTPROC simple
334 CFI_SIGNAL_FRAME
335 CFI_DEF_CFA rsp,PDA_STACKOFFSET
336 CFI_REGISTER rip,rcx
337 /*CFI_REGISTER rflags,r11*/
338 SWAPGS_UNSAFE_STACK
339 /*
340 * A hypervisor implementation might want to use a label
341 * after the swapgs, so that it can do the swapgs
342 * for the guest and jump here on syscall.
343 */
344 ENTRY(system_call_after_swapgs)
345
346 movq %rsp,%gs:pda_oldrsp
347 movq %gs:pda_kernelstack,%rsp
348 /*
349 * No need to follow this irqs off/on section - it's straight
350 * and short:
351 */
352 ENABLE_INTERRUPTS(CLBR_NONE)
353 SAVE_ARGS 8,1
354 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
355 movq %rcx,RIP-ARGOFFSET(%rsp)
356 CFI_REL_OFFSET rip,RIP-ARGOFFSET
357 GET_THREAD_INFO(%rcx)
358 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
359 jnz tracesys
360 system_call_fastpath:
361 cmpq $__NR_syscall_max,%rax
362 ja badsys
363 movq %r10,%rcx
364 call *sys_call_table(,%rax,8) # XXX: rip relative
365 movq %rax,RAX-ARGOFFSET(%rsp)
366 /*
367 * Syscall return path ending with SYSRET (fast path)
368 * Has incomplete stack frame and undefined top of stack.
369 */
370 ret_from_sys_call:
371 movl $_TIF_ALLWORK_MASK,%edi
372 /* edi: flagmask */
373 sysret_check:
374 LOCKDEP_SYS_EXIT
375 GET_THREAD_INFO(%rcx)
376 DISABLE_INTERRUPTS(CLBR_NONE)
377 TRACE_IRQS_OFF
378 movl TI_flags(%rcx),%edx
379 andl %edi,%edx
380 jnz sysret_careful
381 CFI_REMEMBER_STATE
382 /*
383 * sysretq will re-enable interrupts:
384 */
385 TRACE_IRQS_ON
386 movq RIP-ARGOFFSET(%rsp),%rcx
387 CFI_REGISTER rip,rcx
388 RESTORE_ARGS 0,-ARG_SKIP,1
389 /*CFI_REGISTER rflags,r11*/
390 movq %gs:pda_oldrsp, %rsp
391 USERGS_SYSRET64
392
393 CFI_RESTORE_STATE
394 /* Handle reschedules */
395 /* edx: work, edi: workmask */
396 sysret_careful:
397 bt $TIF_NEED_RESCHED,%edx
398 jnc sysret_signal
399 TRACE_IRQS_ON
400 ENABLE_INTERRUPTS(CLBR_NONE)
401 pushq %rdi
402 CFI_ADJUST_CFA_OFFSET 8
403 call schedule
404 popq %rdi
405 CFI_ADJUST_CFA_OFFSET -8
406 jmp sysret_check
407
408 /* Handle a signal */
409 sysret_signal:
410 TRACE_IRQS_ON
411 ENABLE_INTERRUPTS(CLBR_NONE)
412 #ifdef CONFIG_AUDITSYSCALL
413 bt $TIF_SYSCALL_AUDIT,%edx
414 jc sysret_audit
415 #endif
416 /* edx: work flags (arg3) */
417 leaq do_notify_resume(%rip),%rax
418 leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
419 xorl %esi,%esi # oldset -> arg2
420 call ptregscall_common
421 movl $_TIF_WORK_MASK,%edi
422 /* Use IRET because user could have changed frame. This
423 works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
424 DISABLE_INTERRUPTS(CLBR_NONE)
425 TRACE_IRQS_OFF
426 jmp int_with_check
427
428 badsys:
429 movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
430 jmp ret_from_sys_call
431
432 #ifdef CONFIG_AUDITSYSCALL
433 /*
434 * Fast path for syscall audit without full syscall trace.
435 * We just call audit_syscall_entry() directly, and then
436 * jump back to the normal fast path.
437 */
438 auditsys:
439 movq %r10,%r9 /* 6th arg: 4th syscall arg */
440 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
441 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
442 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
443 movq %rax,%rsi /* 2nd arg: syscall number */
444 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
445 call audit_syscall_entry
446 LOAD_ARGS 0 /* reload call-clobbered registers */
447 jmp system_call_fastpath
448
449 /*
450 * Return fast path for syscall audit. Call audit_syscall_exit()
451 * directly and then jump back to the fast path with TIF_SYSCALL_AUDIT
452 * masked off.
453 */
454 sysret_audit:
455 movq %rax,%rsi /* second arg, syscall return value */
456 cmpq $0,%rax /* is it < 0? */
457 setl %al /* 1 if so, 0 if not */
458 movzbl %al,%edi /* zero-extend that into %edi */
459 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
460 call audit_syscall_exit
461 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
462 jmp sysret_check
463 #endif /* CONFIG_AUDITSYSCALL */
464
465 /* Do syscall tracing */
466 tracesys:
467 #ifdef CONFIG_AUDITSYSCALL
468 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
469 jz auditsys
470 #endif
471 SAVE_REST
472 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
473 FIXUP_TOP_OF_STACK %rdi
474 movq %rsp,%rdi
475 call syscall_trace_enter
476 /*
477 * Reload arg registers from stack in case ptrace changed them.
478 * We don't reload %rax because syscall_trace_enter() returned
479 * the value it wants us to use in the table lookup.
480 */
481 LOAD_ARGS ARGOFFSET, 1
482 RESTORE_REST
483 cmpq $__NR_syscall_max,%rax
484 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
485 movq %r10,%rcx /* fixup for C */
486 call *sys_call_table(,%rax,8)
487 movq %rax,RAX-ARGOFFSET(%rsp)
488 /* Use IRET because user could have changed frame */
489
490 /*
491 * Syscall return path ending with IRET.
492 * Has correct top of stack, but partial stack frame.
493 */
494 .globl int_ret_from_sys_call
495 .globl int_with_check
496 int_ret_from_sys_call:
497 DISABLE_INTERRUPTS(CLBR_NONE)
498 TRACE_IRQS_OFF
499 testl $3,CS-ARGOFFSET(%rsp)
500 je retint_restore_args
501 movl $_TIF_ALLWORK_MASK,%edi
502 /* edi: mask to check */
503 int_with_check:
504 LOCKDEP_SYS_EXIT_IRQ
505 GET_THREAD_INFO(%rcx)
506 movl TI_flags(%rcx),%edx
507 andl %edi,%edx
508 jnz int_careful
509 andl $~TS_COMPAT,TI_status(%rcx)
510 jmp retint_swapgs
511
512 /* Either reschedule or signal or syscall exit tracking needed. */
513 /* First do a reschedule test. */
514 /* edx: work, edi: workmask */
515 int_careful:
516 bt $TIF_NEED_RESCHED,%edx
517 jnc int_very_careful
518 TRACE_IRQS_ON
519 ENABLE_INTERRUPTS(CLBR_NONE)
520 pushq %rdi
521 CFI_ADJUST_CFA_OFFSET 8
522 call schedule
523 popq %rdi
524 CFI_ADJUST_CFA_OFFSET -8
525 DISABLE_INTERRUPTS(CLBR_NONE)
526 TRACE_IRQS_OFF
527 jmp int_with_check
528
529 /* handle signals and tracing -- both require a full stack frame */
530 int_very_careful:
531 TRACE_IRQS_ON
532 ENABLE_INTERRUPTS(CLBR_NONE)
533 SAVE_REST
534 /* Check for syscall exit trace */
535 testl $_TIF_WORK_SYSCALL_EXIT,%edx
536 jz int_signal
537 pushq %rdi
538 CFI_ADJUST_CFA_OFFSET 8
539 leaq 8(%rsp),%rdi # &ptregs -> arg1
540 call syscall_trace_leave
541 popq %rdi
542 CFI_ADJUST_CFA_OFFSET -8
543 andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi
544 jmp int_restore_rest
545
546 int_signal:
547 testl $_TIF_DO_NOTIFY_MASK,%edx
548 jz 1f
549 movq %rsp,%rdi # &ptregs -> arg1
550 xorl %esi,%esi # oldset -> arg2
551 call do_notify_resume
552 1: movl $_TIF_WORK_MASK,%edi
553 int_restore_rest:
554 RESTORE_REST
555 DISABLE_INTERRUPTS(CLBR_NONE)
556 TRACE_IRQS_OFF
557 jmp int_with_check
558 CFI_ENDPROC
559 END(system_call)
560
561 /*
562 * Certain special system calls that need to save a complete full stack frame.
563 */
564
565 .macro PTREGSCALL label,func,arg
566 .globl \label
567 \label:
568 leaq \func(%rip),%rax
569 leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
570 jmp ptregscall_common
571 END(\label)
572 .endm
573
574 CFI_STARTPROC
575
576 PTREGSCALL stub_clone, sys_clone, %r8
577 PTREGSCALL stub_fork, sys_fork, %rdi
578 PTREGSCALL stub_vfork, sys_vfork, %rdi
579 PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
580 PTREGSCALL stub_iopl, sys_iopl, %rsi
581
582 ENTRY(ptregscall_common)
583 popq %r11
584 CFI_ADJUST_CFA_OFFSET -8
585 CFI_REGISTER rip, r11
586 SAVE_REST
587 movq %r11, %r15
588 CFI_REGISTER rip, r15
589 FIXUP_TOP_OF_STACK %r11
590 call *%rax
591 RESTORE_TOP_OF_STACK %r11
592 movq %r15, %r11
593 CFI_REGISTER rip, r11
594 RESTORE_REST
595 pushq %r11
596 CFI_ADJUST_CFA_OFFSET 8
597 CFI_REL_OFFSET rip, 0
598 ret
599 CFI_ENDPROC
600 END(ptregscall_common)
601
602 ENTRY(stub_execve)
603 CFI_STARTPROC
604 popq %r11
605 CFI_ADJUST_CFA_OFFSET -8
606 CFI_REGISTER rip, r11
607 SAVE_REST
608 FIXUP_TOP_OF_STACK %r11
609 movq %rsp, %rcx
610 call sys_execve
611 RESTORE_TOP_OF_STACK %r11
612 movq %rax,RAX(%rsp)
613 RESTORE_REST
614 jmp int_ret_from_sys_call
615 CFI_ENDPROC
616 END(stub_execve)
617
618 /*
619 * sigreturn is special because it needs to restore all registers on return.
620 * This cannot be done with SYSRET, so use the IRET return path instead.
621 */
622 ENTRY(stub_rt_sigreturn)
623 CFI_STARTPROC
624 addq $8, %rsp
625 CFI_ADJUST_CFA_OFFSET -8
626 SAVE_REST
627 movq %rsp,%rdi
628 FIXUP_TOP_OF_STACK %r11
629 call sys_rt_sigreturn
630 movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
631 RESTORE_REST
632 jmp int_ret_from_sys_call
633 CFI_ENDPROC
634 END(stub_rt_sigreturn)
635
636 /*
637 * initial frame state for interrupts and exceptions
638 */
639 .macro _frame ref
640 CFI_STARTPROC simple
641 CFI_SIGNAL_FRAME
642 CFI_DEF_CFA rsp,SS+8-\ref
643 /*CFI_REL_OFFSET ss,SS-\ref*/
644 CFI_REL_OFFSET rsp,RSP-\ref
645 /*CFI_REL_OFFSET rflags,EFLAGS-\ref*/
646 /*CFI_REL_OFFSET cs,CS-\ref*/
647 CFI_REL_OFFSET rip,RIP-\ref
648 .endm
649
650 /* initial frame state for interrupts (and exceptions without error code) */
651 #define INTR_FRAME _frame RIP
652 /* initial frame state for exceptions with error code (and interrupts with
653 vector already pushed) */
654 #define XCPT_FRAME _frame ORIG_RAX
655
656 /*
657 * Interrupt entry/exit.
658 *
659 * Interrupt entry points save only callee clobbered registers in fast path.
660 *
661 * Entry runs with interrupts off.
662 */
663
664 /* 0(%rsp): interrupt number */
665 .macro interrupt func
666 cld
667 SAVE_ARGS
668 leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler
669 pushq %rbp
670 /*
671 * Save rbp twice: One is for marking the stack frame, as usual, and the
672 * other, to fill pt_regs properly. This is because bx comes right
673 * before the last saved register in that structure, and not bp. If the
674 * base pointer were in the place bx is today, this would not be needed.
675 */
676 movq %rbp, -8(%rsp)
677 CFI_ADJUST_CFA_OFFSET 8
678 CFI_REL_OFFSET rbp, 0
679 movq %rsp,%rbp
680 CFI_DEF_CFA_REGISTER rbp
681 testl $3,CS(%rdi)
682 je 1f
683 SWAPGS
684 /* irqcount is used to check if a CPU is already on an interrupt
685 stack or not. While this is essentially redundant with preempt_count
686 it is a little cheaper to use a separate counter in the PDA
687 (short of moving irq_enter into assembly, which would be too
688 much work) */
689 1: incl %gs:pda_irqcount
690 cmoveq %gs:pda_irqstackptr,%rsp
691 push %rbp # backlink for old unwinder
692 /*
693 * We entered an interrupt context - irqs are off:
694 */
695 TRACE_IRQS_OFF
696 call \func
697 .endm
698
699 ENTRY(common_interrupt)
700 XCPT_FRAME
701 interrupt do_IRQ
702 /* 0(%rsp): oldrsp-ARGOFFSET */
703 ret_from_intr:
704 DISABLE_INTERRUPTS(CLBR_NONE)
705 TRACE_IRQS_OFF
706 decl %gs:pda_irqcount
707 leaveq
708 CFI_DEF_CFA_REGISTER rsp
709 CFI_ADJUST_CFA_OFFSET -8
710 exit_intr:
711 GET_THREAD_INFO(%rcx)
712 testl $3,CS-ARGOFFSET(%rsp)
713 je retint_kernel
714
715 /* Interrupt came from user space */
716 /*
717 * Has a correct top of stack, but a partial stack frame
718 * %rcx: thread info. Interrupts off.
719 */
720 retint_with_reschedule:
721 movl $_TIF_WORK_MASK,%edi
722 retint_check:
723 LOCKDEP_SYS_EXIT_IRQ
724 movl TI_flags(%rcx),%edx
725 andl %edi,%edx
726 CFI_REMEMBER_STATE
727 jnz retint_careful
728
729 retint_swapgs: /* return to user-space */
730 /*
731 * The iretq could re-enable interrupts:
732 */
733 DISABLE_INTERRUPTS(CLBR_ANY)
734 TRACE_IRQS_IRETQ
735 SWAPGS
736 jmp restore_args
737
738 retint_restore_args: /* return to kernel space */
739 DISABLE_INTERRUPTS(CLBR_ANY)
740 /*
741 * The iretq could re-enable interrupts:
742 */
743 TRACE_IRQS_IRETQ
744 restore_args:
745 RESTORE_ARGS 0,8,0
746
747 irq_return:
748 INTERRUPT_RETURN
749
750 .section __ex_table, "a"
751 .quad irq_return, bad_iret
752 .previous
753
754 #ifdef CONFIG_PARAVIRT
755 ENTRY(native_iret)
756 iretq
757
758 .section __ex_table,"a"
759 .quad native_iret, bad_iret
760 .previous
761 #endif
762
763 .section .fixup,"ax"
764 bad_iret:
765 /*
766 * The iret traps when the %cs or %ss being restored is bogus.
767 * We've lost the original trap vector and error code.
768 * #GPF is the most likely one to get for an invalid selector.
769 * So pretend we completed the iret and took the #GPF in user mode.
770 *
771 * We are now running with the kernel GS after exception recovery.
772 * But error_entry expects us to have user GS to match the user %cs,
773 * so swap back.
774 */
775 pushq $0
776
777 SWAPGS
778 jmp general_protection
779
780 .previous
781
782 /* edi: workmask, edx: work */
783 retint_careful:
784 CFI_RESTORE_STATE
785 bt $TIF_NEED_RESCHED,%edx
786 jnc retint_signal
787 TRACE_IRQS_ON
788 ENABLE_INTERRUPTS(CLBR_NONE)
789 pushq %rdi
790 CFI_ADJUST_CFA_OFFSET 8
791 call schedule
792 popq %rdi
793 CFI_ADJUST_CFA_OFFSET -8
794 GET_THREAD_INFO(%rcx)
795 DISABLE_INTERRUPTS(CLBR_NONE)
796 TRACE_IRQS_OFF
797 jmp retint_check
798
799 retint_signal:
800 testl $_TIF_DO_NOTIFY_MASK,%edx
801 jz retint_swapgs
802 TRACE_IRQS_ON
803 ENABLE_INTERRUPTS(CLBR_NONE)
804 SAVE_REST
805 movq $-1,ORIG_RAX(%rsp)
806 xorl %esi,%esi # oldset
807 movq %rsp,%rdi # &pt_regs
808 call do_notify_resume
809 RESTORE_REST
810 DISABLE_INTERRUPTS(CLBR_NONE)
811 TRACE_IRQS_OFF
812 GET_THREAD_INFO(%rcx)
813 jmp retint_with_reschedule
814
815 #ifdef CONFIG_PREEMPT
816 /* Returning to kernel space. Check if we need preemption */
817 /* rcx: threadinfo. interrupts off. */
818 ENTRY(retint_kernel)
819 cmpl $0,TI_preempt_count(%rcx)
820 jnz retint_restore_args
821 bt $TIF_NEED_RESCHED,TI_flags(%rcx)
822 jnc retint_restore_args
823 bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
824 jnc retint_restore_args
825 call preempt_schedule_irq
826 jmp exit_intr
827 #endif
828
829 CFI_ENDPROC
830 END(common_interrupt)
831
832 /*
833 * APIC interrupts.
834 */
835 .macro apicinterrupt num,func
836 INTR_FRAME
837 pushq $~(\num)
838 CFI_ADJUST_CFA_OFFSET 8
839 interrupt \func
840 jmp ret_from_intr
841 CFI_ENDPROC
842 .endm
843
844 ENTRY(thermal_interrupt)
845 apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
846 END(thermal_interrupt)
847
848 ENTRY(threshold_interrupt)
849 apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
850 END(threshold_interrupt)
851
852 #ifdef CONFIG_SMP
853 ENTRY(reschedule_interrupt)
854 apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
855 END(reschedule_interrupt)
856
857 .macro INVALIDATE_ENTRY num
858 ENTRY(invalidate_interrupt\num)
859 apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
860 END(invalidate_interrupt\num)
861 .endm
862
863 INVALIDATE_ENTRY 0
864 INVALIDATE_ENTRY 1
865 INVALIDATE_ENTRY 2
866 INVALIDATE_ENTRY 3
867 INVALIDATE_ENTRY 4
868 INVALIDATE_ENTRY 5
869 INVALIDATE_ENTRY 6
870 INVALIDATE_ENTRY 7
871
872 ENTRY(call_function_interrupt)
873 apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
874 END(call_function_interrupt)
875 ENTRY(call_function_single_interrupt)
876 apicinterrupt CALL_FUNCTION_SINGLE_VECTOR,smp_call_function_single_interrupt
877 END(call_function_single_interrupt)
878 ENTRY(irq_move_cleanup_interrupt)
879 apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt
880 END(irq_move_cleanup_interrupt)
881 #endif
882
883 ENTRY(apic_timer_interrupt)
884 apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
885 END(apic_timer_interrupt)
886
887 ENTRY(uv_bau_message_intr1)
888 apicinterrupt 220,uv_bau_message_interrupt
889 END(uv_bau_message_intr1)
890
891 ENTRY(error_interrupt)
892 apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
893 END(error_interrupt)
894
895 ENTRY(spurious_interrupt)
896 apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
897 END(spurious_interrupt)
898
899 /*
900 * Exception entry points.
901 */
902 .macro zeroentry sym
903 INTR_FRAME
904 PARAVIRT_ADJUST_EXCEPTION_FRAME
905 pushq $0 /* push error code/oldrax */
906 CFI_ADJUST_CFA_OFFSET 8
907 pushq %rax /* push real oldrax to the rdi slot */
908 CFI_ADJUST_CFA_OFFSET 8
909 CFI_REL_OFFSET rax,0
910 leaq \sym(%rip),%rax
911 jmp error_entry
912 CFI_ENDPROC
913 .endm
914
915 .macro errorentry sym
916 XCPT_FRAME
917 PARAVIRT_ADJUST_EXCEPTION_FRAME
918 pushq %rax
919 CFI_ADJUST_CFA_OFFSET 8
920 CFI_REL_OFFSET rax,0
921 leaq \sym(%rip),%rax
922 jmp error_entry
923 CFI_ENDPROC
924 .endm
925
926 /* error code is on the stack already */
927 /* handle NMI like exceptions that can happen everywhere */
928 .macro paranoidentry sym, ist=0, irqtrace=1
929 SAVE_ALL
930 cld
931 movl $1,%ebx
932 movl $MSR_GS_BASE,%ecx
933 rdmsr
934 testl %edx,%edx
935 js 1f
936 SWAPGS
937 xorl %ebx,%ebx
938 1:
939 .if \ist
940 movq %gs:pda_data_offset, %rbp
941 .endif
942 .if \irqtrace
943 TRACE_IRQS_OFF
944 .endif
945 movq %rsp,%rdi
946 movq ORIG_RAX(%rsp),%rsi
947 movq $-1,ORIG_RAX(%rsp)
948 .if \ist
949 subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
950 .endif
951 call \sym
952 .if \ist
953 addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
954 .endif
955 DISABLE_INTERRUPTS(CLBR_NONE)
956 .if \irqtrace
957 TRACE_IRQS_OFF
958 .endif
959 .endm
960
961 /*
962 * "Paranoid" exit path from exception stack.
963 * Paranoid because this is used by NMIs and cannot take
964 * any kernel state for granted.
965 * We don't do kernel preemption checks here, because only
966 * NMI should be common and it does not enable IRQs and
967 * cannot get reschedule ticks.
968 *
969 * "trace" is 0 for the NMI handler only, because irq-tracing
970 * is fundamentally NMI-unsafe. (we cannot change the soft and
971 * hard flags at once, atomically)
972 */
973 .macro paranoidexit trace=1
974 /* ebx: no swapgs flag */
975 paranoid_exit\trace:
976 testl %ebx,%ebx /* swapgs needed? */
977 jnz paranoid_restore\trace
978 testl $3,CS(%rsp)
979 jnz paranoid_userspace\trace
980 paranoid_swapgs\trace:
981 .if \trace
982 TRACE_IRQS_IRETQ 0
983 .endif
984 SWAPGS_UNSAFE_STACK
985 paranoid_restore\trace:
986 RESTORE_ALL 8
987 jmp irq_return
988 paranoid_userspace\trace:
989 GET_THREAD_INFO(%rcx)
990 movl TI_flags(%rcx),%ebx
991 andl $_TIF_WORK_MASK,%ebx
992 jz paranoid_swapgs\trace
993 movq %rsp,%rdi /* &pt_regs */
994 call sync_regs
995 movq %rax,%rsp /* switch stack for scheduling */
996 testl $_TIF_NEED_RESCHED,%ebx
997 jnz paranoid_schedule\trace
998 movl %ebx,%edx /* arg3: thread flags */
999 .if \trace
1000 TRACE_IRQS_ON
1001 .endif
1002 ENABLE_INTERRUPTS(CLBR_NONE)
1003 xorl %esi,%esi /* arg2: oldset */
1004 movq %rsp,%rdi /* arg1: &pt_regs */
1005 call do_notify_resume
1006 DISABLE_INTERRUPTS(CLBR_NONE)
1007 .if \trace
1008 TRACE_IRQS_OFF
1009 .endif
1010 jmp paranoid_userspace\trace
1011 paranoid_schedule\trace:
1012 .if \trace
1013 TRACE_IRQS_ON
1014 .endif
1015 ENABLE_INTERRUPTS(CLBR_ANY)
1016 call schedule
1017 DISABLE_INTERRUPTS(CLBR_ANY)
1018 .if \trace
1019 TRACE_IRQS_OFF
1020 .endif
1021 jmp paranoid_userspace\trace
1022 CFI_ENDPROC
1023 .endm
1024
1025 /*
1026 * Exception entry point. This expects an error code/orig_rax on the stack
1027 * and the exception handler in %rax.
1028 */
1029 KPROBE_ENTRY(error_entry)
1030 _frame RDI
1031 CFI_REL_OFFSET rax,0
1032 /* rdi slot contains rax, oldrax contains error code */
1033 cld
1034 subq $14*8,%rsp
1035 CFI_ADJUST_CFA_OFFSET (14*8)
1036 movq %rsi,13*8(%rsp)
1037 CFI_REL_OFFSET rsi,RSI
1038 movq 14*8(%rsp),%rsi /* load rax from rdi slot */
1039 CFI_REGISTER rax,rsi
1040 movq %rdx,12*8(%rsp)
1041 CFI_REL_OFFSET rdx,RDX
1042 movq %rcx,11*8(%rsp)
1043 CFI_REL_OFFSET rcx,RCX
1044 movq %rsi,10*8(%rsp) /* store rax */
1045 CFI_REL_OFFSET rax,RAX
1046 movq %r8, 9*8(%rsp)
1047 CFI_REL_OFFSET r8,R8
1048 movq %r9, 8*8(%rsp)
1049 CFI_REL_OFFSET r9,R9
1050 movq %r10,7*8(%rsp)
1051 CFI_REL_OFFSET r10,R10
1052 movq %r11,6*8(%rsp)
1053 CFI_REL_OFFSET r11,R11
1054 movq %rbx,5*8(%rsp)
1055 CFI_REL_OFFSET rbx,RBX
1056 movq %rbp,4*8(%rsp)
1057 CFI_REL_OFFSET rbp,RBP
1058 movq %r12,3*8(%rsp)
1059 CFI_REL_OFFSET r12,R12
1060 movq %r13,2*8(%rsp)
1061 CFI_REL_OFFSET r13,R13
1062 movq %r14,1*8(%rsp)
1063 CFI_REL_OFFSET r14,R14
1064 movq %r15,(%rsp)
1065 CFI_REL_OFFSET r15,R15
1066 xorl %ebx,%ebx
1067 testl $3,CS(%rsp)
1068 je error_kernelspace
1069 error_swapgs:
1070 SWAPGS
1071 error_sti:
1072 TRACE_IRQS_OFF
1073 movq %rdi,RDI(%rsp)
1074 CFI_REL_OFFSET rdi,RDI
1075 movq %rsp,%rdi
1076 movq ORIG_RAX(%rsp),%rsi /* get error code */
1077 movq $-1,ORIG_RAX(%rsp)
1078 call *%rax
1079 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
1080 error_exit:
1081 movl %ebx,%eax
1082 RESTORE_REST
1083 DISABLE_INTERRUPTS(CLBR_NONE)
1084 TRACE_IRQS_OFF
1085 GET_THREAD_INFO(%rcx)
1086 testl %eax,%eax
1087 jne retint_kernel
1088 LOCKDEP_SYS_EXIT_IRQ
1089 movl TI_flags(%rcx),%edx
1090 movl $_TIF_WORK_MASK,%edi
1091 andl %edi,%edx
1092 jnz retint_careful
1093 jmp retint_swapgs
1094 CFI_ENDPROC
1095
1096 error_kernelspace:
1097 incl %ebx
1098 /* There are two places in the kernel that can potentially fault with
1099 usergs. Handle them here. The exception handlers after
1100 iret run with kernel gs again, so don't set the user space flag.
1101 B stepping K8s sometimes report an truncated RIP for IRET
1102 exceptions returning to compat mode. Check for these here too. */
1103 leaq irq_return(%rip),%rcx
1104 cmpq %rcx,RIP(%rsp)
1105 je error_swapgs
1106 movl %ecx,%ecx /* zero extend */
1107 cmpq %rcx,RIP(%rsp)
1108 je error_swapgs
1109 cmpq $gs_change,RIP(%rsp)
1110 je error_swapgs
1111 jmp error_sti
1112 KPROBE_END(error_entry)
1113
1114 /* Reload gs selector with exception handling */
1115 /* edi: new selector */
1116 ENTRY(native_load_gs_index)
1117 CFI_STARTPROC
1118 pushf
1119 CFI_ADJUST_CFA_OFFSET 8
1120 DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI))
1121 SWAPGS
1122 gs_change:
1123 movl %edi,%gs
1124 2: mfence /* workaround */
1125 SWAPGS
1126 popf
1127 CFI_ADJUST_CFA_OFFSET -8
1128 ret
1129 CFI_ENDPROC
1130 ENDPROC(native_load_gs_index)
1131
1132 .section __ex_table,"a"
1133 .align 8
1134 .quad gs_change,bad_gs
1135 .previous
1136 .section .fixup,"ax"
1137 /* running with kernelgs */
1138 bad_gs:
1139 SWAPGS /* switch back to user gs */
1140 xorl %eax,%eax
1141 movl %eax,%gs
1142 jmp 2b
1143 .previous
1144
1145 /*
1146 * Create a kernel thread.
1147 *
1148 * C extern interface:
1149 * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
1150 *
1151 * asm input arguments:
1152 * rdi: fn, rsi: arg, rdx: flags
1153 */
1154 ENTRY(kernel_thread)
1155 CFI_STARTPROC
1156 FAKE_STACK_FRAME $child_rip
1157 SAVE_ALL
1158
1159 # rdi: flags, rsi: usp, rdx: will be &pt_regs
1160 movq %rdx,%rdi
1161 orq kernel_thread_flags(%rip),%rdi
1162 movq $-1, %rsi
1163 movq %rsp, %rdx
1164
1165 xorl %r8d,%r8d
1166 xorl %r9d,%r9d
1167
1168 # clone now
1169 call do_fork
1170 movq %rax,RAX(%rsp)
1171 xorl %edi,%edi
1172
1173 /*
1174 * It isn't worth to check for reschedule here,
1175 * so internally to the x86_64 port you can rely on kernel_thread()
1176 * not to reschedule the child before returning, this avoids the need
1177 * of hacks for example to fork off the per-CPU idle tasks.
1178 * [Hopefully no generic code relies on the reschedule -AK]
1179 */
1180 RESTORE_ALL
1181 UNFAKE_STACK_FRAME
1182 ret
1183 CFI_ENDPROC
1184 ENDPROC(kernel_thread)
1185
1186 child_rip:
1187 pushq $0 # fake return address
1188 CFI_STARTPROC
1189 /*
1190 * Here we are in the child and the registers are set as they were
1191 * at kernel_thread() invocation in the parent.
1192 */
1193 movq %rdi, %rax
1194 movq %rsi, %rdi
1195 call *%rax
1196 # exit
1197 mov %eax, %edi
1198 call do_exit
1199 CFI_ENDPROC
1200 ENDPROC(child_rip)
1201
1202 /*
1203 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
1204 *
1205 * C extern interface:
1206 * extern long execve(char *name, char **argv, char **envp)
1207 *
1208 * asm input arguments:
1209 * rdi: name, rsi: argv, rdx: envp
1210 *
1211 * We want to fallback into:
1212 * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs *regs)
1213 *
1214 * do_sys_execve asm fallback arguments:
1215 * rdi: name, rsi: argv, rdx: envp, rcx: fake frame on the stack
1216 */
1217 ENTRY(kernel_execve)
1218 CFI_STARTPROC
1219 FAKE_STACK_FRAME $0
1220 SAVE_ALL
1221 movq %rsp,%rcx
1222 call sys_execve
1223 movq %rax, RAX(%rsp)
1224 RESTORE_REST
1225 testq %rax,%rax
1226 je int_ret_from_sys_call
1227 RESTORE_ARGS
1228 UNFAKE_STACK_FRAME
1229 ret
1230 CFI_ENDPROC
1231 ENDPROC(kernel_execve)
1232
1233 KPROBE_ENTRY(page_fault)
1234 errorentry do_page_fault
1235 KPROBE_END(page_fault)
1236
1237 ENTRY(coprocessor_error)
1238 zeroentry do_coprocessor_error
1239 END(coprocessor_error)
1240
1241 ENTRY(simd_coprocessor_error)
1242 zeroentry do_simd_coprocessor_error
1243 END(simd_coprocessor_error)
1244
1245 ENTRY(device_not_available)
1246 zeroentry do_device_not_available
1247 END(device_not_available)
1248
1249 /* runs on exception stack */
1250 KPROBE_ENTRY(debug)
1251 INTR_FRAME
1252 PARAVIRT_ADJUST_EXCEPTION_FRAME
1253 pushq $0
1254 CFI_ADJUST_CFA_OFFSET 8
1255 paranoidentry do_debug, DEBUG_STACK
1256 paranoidexit
1257 KPROBE_END(debug)
1258
1259 /* runs on exception stack */
1260 KPROBE_ENTRY(nmi)
1261 INTR_FRAME
1262 PARAVIRT_ADJUST_EXCEPTION_FRAME
1263 pushq $-1
1264 CFI_ADJUST_CFA_OFFSET 8
1265 paranoidentry do_nmi, 0, 0
1266 #ifdef CONFIG_TRACE_IRQFLAGS
1267 paranoidexit 0
1268 #else
1269 jmp paranoid_exit1
1270 CFI_ENDPROC
1271 #endif
1272 KPROBE_END(nmi)
1273
1274 KPROBE_ENTRY(int3)
1275 INTR_FRAME
1276 PARAVIRT_ADJUST_EXCEPTION_FRAME
1277 pushq $0
1278 CFI_ADJUST_CFA_OFFSET 8
1279 paranoidentry do_int3, DEBUG_STACK
1280 jmp paranoid_exit1
1281 CFI_ENDPROC
1282 KPROBE_END(int3)
1283
1284 ENTRY(overflow)
1285 zeroentry do_overflow
1286 END(overflow)
1287
1288 ENTRY(bounds)
1289 zeroentry do_bounds
1290 END(bounds)
1291
1292 ENTRY(invalid_op)
1293 zeroentry do_invalid_op
1294 END(invalid_op)
1295
1296 ENTRY(coprocessor_segment_overrun)
1297 zeroentry do_coprocessor_segment_overrun
1298 END(coprocessor_segment_overrun)
1299
1300 /* runs on exception stack */
1301 ENTRY(double_fault)
1302 XCPT_FRAME
1303 PARAVIRT_ADJUST_EXCEPTION_FRAME
1304 paranoidentry do_double_fault
1305 jmp paranoid_exit1
1306 CFI_ENDPROC
1307 END(double_fault)
1308
1309 ENTRY(invalid_TSS)
1310 errorentry do_invalid_TSS
1311 END(invalid_TSS)
1312
1313 ENTRY(segment_not_present)
1314 errorentry do_segment_not_present
1315 END(segment_not_present)
1316
1317 /* runs on exception stack */
1318 ENTRY(stack_segment)
1319 XCPT_FRAME
1320 PARAVIRT_ADJUST_EXCEPTION_FRAME
1321 paranoidentry do_stack_segment
1322 jmp paranoid_exit1
1323 CFI_ENDPROC
1324 END(stack_segment)
1325
1326 KPROBE_ENTRY(general_protection)
1327 errorentry do_general_protection
1328 KPROBE_END(general_protection)
1329
1330 ENTRY(alignment_check)
1331 errorentry do_alignment_check
1332 END(alignment_check)
1333
1334 ENTRY(divide_error)
1335 zeroentry do_divide_error
1336 END(divide_error)
1337
1338 ENTRY(spurious_interrupt_bug)
1339 zeroentry do_spurious_interrupt_bug
1340 END(spurious_interrupt_bug)
1341
1342 #ifdef CONFIG_X86_MCE
1343 /* runs on exception stack */
1344 ENTRY(machine_check)
1345 INTR_FRAME
1346 PARAVIRT_ADJUST_EXCEPTION_FRAME
1347 pushq $0
1348 CFI_ADJUST_CFA_OFFSET 8
1349 paranoidentry do_machine_check
1350 jmp paranoid_exit1
1351 CFI_ENDPROC
1352 END(machine_check)
1353 #endif
1354
1355 /* Call softirq on interrupt stack. Interrupts are off. */
1356 ENTRY(call_softirq)
1357 CFI_STARTPROC
1358 push %rbp
1359 CFI_ADJUST_CFA_OFFSET 8
1360 CFI_REL_OFFSET rbp,0
1361 mov %rsp,%rbp
1362 CFI_DEF_CFA_REGISTER rbp
1363 incl %gs:pda_irqcount
1364 cmove %gs:pda_irqstackptr,%rsp
1365 push %rbp # backlink for old unwinder
1366 call __do_softirq
1367 leaveq
1368 CFI_DEF_CFA_REGISTER rsp
1369 CFI_ADJUST_CFA_OFFSET -8
1370 decl %gs:pda_irqcount
1371 ret
1372 CFI_ENDPROC
1373 ENDPROC(call_softirq)
1374
1375 KPROBE_ENTRY(ignore_sysret)
1376 CFI_STARTPROC
1377 mov $-ENOSYS,%eax
1378 sysret
1379 CFI_ENDPROC
1380 ENDPROC(ignore_sysret)
1381
1382 #ifdef CONFIG_XEN
1383 ENTRY(xen_hypervisor_callback)
1384 zeroentry xen_do_hypervisor_callback
1385 END(xen_hypervisor_callback)
1386
1387 /*
1388 # A note on the "critical region" in our callback handler.
1389 # We want to avoid stacking callback handlers due to events occurring
1390 # during handling of the last event. To do this, we keep events disabled
1391 # until we've done all processing. HOWEVER, we must enable events before
1392 # popping the stack frame (can't be done atomically) and so it would still
1393 # be possible to get enough handler activations to overflow the stack.
1394 # Although unlikely, bugs of that kind are hard to track down, so we'd
1395 # like to avoid the possibility.
1396 # So, on entry to the handler we detect whether we interrupted an
1397 # existing activation in its critical region -- if so, we pop the current
1398 # activation and restart the handler using the previous one.
1399 */
1400 ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
1401 CFI_STARTPROC
1402 /* Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
1403 see the correct pointer to the pt_regs */
1404 movq %rdi, %rsp # we don't return, adjust the stack frame
1405 CFI_ENDPROC
1406 CFI_DEFAULT_STACK
1407 11: incl %gs:pda_irqcount
1408 movq %rsp,%rbp
1409 CFI_DEF_CFA_REGISTER rbp
1410 cmovzq %gs:pda_irqstackptr,%rsp
1411 pushq %rbp # backlink for old unwinder
1412 call xen_evtchn_do_upcall
1413 popq %rsp
1414 CFI_DEF_CFA_REGISTER rsp
1415 decl %gs:pda_irqcount
1416 jmp error_exit
1417 CFI_ENDPROC
1418 END(do_hypervisor_callback)
1419
1420 /*
1421 # Hypervisor uses this for application faults while it executes.
1422 # We get here for two reasons:
1423 # 1. Fault while reloading DS, ES, FS or GS
1424 # 2. Fault while executing IRET
1425 # Category 1 we do not need to fix up as Xen has already reloaded all segment
1426 # registers that could be reloaded and zeroed the others.
1427 # Category 2 we fix up by killing the current process. We cannot use the
1428 # normal Linux return path in this case because if we use the IRET hypercall
1429 # to pop the stack frame we end up in an infinite loop of failsafe callbacks.
1430 # We distinguish between categories by comparing each saved segment register
1431 # with its current contents: any discrepancy means we in category 1.
1432 */
1433 ENTRY(xen_failsafe_callback)
1434 framesz = (RIP-0x30) /* workaround buggy gas */
1435 _frame framesz
1436 CFI_REL_OFFSET rcx, 0
1437 CFI_REL_OFFSET r11, 8
1438 movw %ds,%cx
1439 cmpw %cx,0x10(%rsp)
1440 CFI_REMEMBER_STATE
1441 jne 1f
1442 movw %es,%cx
1443 cmpw %cx,0x18(%rsp)
1444 jne 1f
1445 movw %fs,%cx
1446 cmpw %cx,0x20(%rsp)
1447 jne 1f
1448 movw %gs,%cx
1449 cmpw %cx,0x28(%rsp)
1450 jne 1f
1451 /* All segments match their saved values => Category 2 (Bad IRET). */
1452 movq (%rsp),%rcx
1453 CFI_RESTORE rcx
1454 movq 8(%rsp),%r11
1455 CFI_RESTORE r11
1456 addq $0x30,%rsp
1457 CFI_ADJUST_CFA_OFFSET -0x30
1458 pushq $0
1459 CFI_ADJUST_CFA_OFFSET 8
1460 pushq %r11
1461 CFI_ADJUST_CFA_OFFSET 8
1462 pushq %rcx
1463 CFI_ADJUST_CFA_OFFSET 8
1464 jmp general_protection
1465 CFI_RESTORE_STATE
1466 1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
1467 movq (%rsp),%rcx
1468 CFI_RESTORE rcx
1469 movq 8(%rsp),%r11
1470 CFI_RESTORE r11
1471 addq $0x30,%rsp
1472 CFI_ADJUST_CFA_OFFSET -0x30
1473 pushq $0
1474 CFI_ADJUST_CFA_OFFSET 8
1475 SAVE_ALL
1476 jmp error_exit
1477 CFI_ENDPROC
1478 END(xen_failsafe_callback)
1479
1480 #endif /* CONFIG_XEN */
This page took 0.239592 seconds and 5 git commands to generate.