x86: 64 bits: shrink and align IRQ stubs
[deliverable/linux.git] / arch / x86 / kernel / entry_64.S
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/x86_64/entry.S
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
6 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
1da177e4
LT
7 */
8
9/*
10 * entry.S contains the system-call and fault low-level handling routines.
11 *
12 * NOTE: This code handles signal-recognition, which happens every time
13 * after an interrupt and after each system call.
14 *
15 * Normal syscalls and interrupts don't save a full stack frame, this is
16 * only done for syscall tracing, signals or fork/exec et.al.
17 *
18 * A note on terminology:
19 * - top of stack: Architecture defined interrupt frame from SS to RIP
20 * at the top of the kernel process stack.
21 * - partial stack frame: partially saved registers upto R11.
22 * - full stack frame: Like partial stack frame, but all register saved.
2e91a17b
AK
23 *
24 * Some macro usage:
25 * - CFI macros are used to generate dwarf2 unwind information for better
26 * backtraces. They don't change any code.
27 * - SAVE_ALL/RESTORE_ALL - Save/restore all registers
28 * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify.
29 * There are unfortunately lots of special cases where some registers
30 * not touched. The macro is a big mess that should be cleaned up.
31 * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS.
32 * Gives a full stack frame.
33 * - ENTRY/END Define functions in the symbol table.
34 * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack
35 * frame that is otherwise undefined after a SYSCALL
36 * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
37 * - errorentry/paranoidentry/zeroentry - Define exception entry points.
1da177e4
LT
38 */
39
1da177e4
LT
40#include <linux/linkage.h>
41#include <asm/segment.h>
1da177e4
LT
42#include <asm/cache.h>
43#include <asm/errno.h>
44#include <asm/dwarf2.h>
45#include <asm/calling.h>
e2d5df93 46#include <asm/asm-offsets.h>
1da177e4
LT
47#include <asm/msr.h>
48#include <asm/unistd.h>
49#include <asm/thread_info.h>
50#include <asm/hw_irq.h>
5f8efbb9 51#include <asm/page.h>
2601e64d 52#include <asm/irqflags.h>
72fe4858 53#include <asm/paravirt.h>
395a59d0 54#include <asm/ftrace.h>
1da177e4 55
86a1c34a
RM
56/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
57#include <linux/elf-em.h>
58#define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
59#define __AUDIT_ARCH_64BIT 0x80000000
60#define __AUDIT_ARCH_LE 0x40000000
61
1da177e4
LT
62 .code64
63
606576ce 64#ifdef CONFIG_FUNCTION_TRACER
d61f82d0
SR
65#ifdef CONFIG_DYNAMIC_FTRACE
66ENTRY(mcount)
d61f82d0
SR
67 retq
68END(mcount)
69
70ENTRY(ftrace_caller)
71
72 /* taken from glibc */
73 subq $0x38, %rsp
74 movq %rax, (%rsp)
75 movq %rcx, 8(%rsp)
76 movq %rdx, 16(%rsp)
77 movq %rsi, 24(%rsp)
78 movq %rdi, 32(%rsp)
79 movq %r8, 40(%rsp)
80 movq %r9, 48(%rsp)
81
82 movq 0x38(%rsp), %rdi
83 movq 8(%rbp), %rsi
395a59d0 84 subq $MCOUNT_INSN_SIZE, %rdi
d61f82d0
SR
85
86.globl ftrace_call
87ftrace_call:
88 call ftrace_stub
89
90 movq 48(%rsp), %r9
91 movq 40(%rsp), %r8
92 movq 32(%rsp), %rdi
93 movq 24(%rsp), %rsi
94 movq 16(%rsp), %rdx
95 movq 8(%rsp), %rcx
96 movq (%rsp), %rax
97 addq $0x38, %rsp
98
99.globl ftrace_stub
100ftrace_stub:
101 retq
102END(ftrace_caller)
103
104#else /* ! CONFIG_DYNAMIC_FTRACE */
16444a8a
ACM
105ENTRY(mcount)
106 cmpq $ftrace_stub, ftrace_trace_function
107 jnz trace
108.globl ftrace_stub
109ftrace_stub:
110 retq
111
112trace:
113 /* taken from glibc */
114 subq $0x38, %rsp
115 movq %rax, (%rsp)
116 movq %rcx, 8(%rsp)
117 movq %rdx, 16(%rsp)
118 movq %rsi, 24(%rsp)
119 movq %rdi, 32(%rsp)
120 movq %r8, 40(%rsp)
121 movq %r9, 48(%rsp)
122
123 movq 0x38(%rsp), %rdi
124 movq 8(%rbp), %rsi
395a59d0 125 subq $MCOUNT_INSN_SIZE, %rdi
16444a8a
ACM
126
127 call *ftrace_trace_function
128
129 movq 48(%rsp), %r9
130 movq 40(%rsp), %r8
131 movq 32(%rsp), %rdi
132 movq 24(%rsp), %rsi
133 movq 16(%rsp), %rdx
134 movq 8(%rsp), %rcx
135 movq (%rsp), %rax
136 addq $0x38, %rsp
137
138 jmp ftrace_stub
139END(mcount)
d61f82d0 140#endif /* CONFIG_DYNAMIC_FTRACE */
606576ce 141#endif /* CONFIG_FUNCTION_TRACER */
16444a8a 142
dc37db4d 143#ifndef CONFIG_PREEMPT
1da177e4
LT
144#define retint_kernel retint_restore_args
145#endif
2601e64d 146
72fe4858 147#ifdef CONFIG_PARAVIRT
2be29982 148ENTRY(native_usergs_sysret64)
72fe4858
GOC
149 swapgs
150 sysretq
151#endif /* CONFIG_PARAVIRT */
152
2601e64d
IM
153
154.macro TRACE_IRQS_IRETQ offset=ARGOFFSET
155#ifdef CONFIG_TRACE_IRQFLAGS
156 bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
157 jnc 1f
158 TRACE_IRQS_ON
1591:
160#endif
161.endm
162
1da177e4
LT
163/*
164 * C code is not supposed to know about undefined top of stack. Every time
165 * a C function with an pt_regs argument is called from the SYSCALL based
166 * fast path FIXUP_TOP_OF_STACK is needed.
167 * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
168 * manipulation.
169 */
170
171 /* %rsp:at FRAMEEND */
172 .macro FIXUP_TOP_OF_STACK tmp
173 movq %gs:pda_oldrsp,\tmp
174 movq \tmp,RSP(%rsp)
175 movq $__USER_DS,SS(%rsp)
176 movq $__USER_CS,CS(%rsp)
177 movq $-1,RCX(%rsp)
178 movq R11(%rsp),\tmp /* get eflags */
179 movq \tmp,EFLAGS(%rsp)
180 .endm
181
182 .macro RESTORE_TOP_OF_STACK tmp,offset=0
183 movq RSP-\offset(%rsp),\tmp
184 movq \tmp,%gs:pda_oldrsp
185 movq EFLAGS-\offset(%rsp),\tmp
186 movq \tmp,R11-\offset(%rsp)
187 .endm
188
189 .macro FAKE_STACK_FRAME child_rip
190 /* push in order ss, rsp, eflags, cs, rip */
3829ee6b 191 xorl %eax, %eax
e04e0a63 192 pushq $__KERNEL_DS /* ss */
1da177e4 193 CFI_ADJUST_CFA_OFFSET 8
7effaa88 194 /*CFI_REL_OFFSET ss,0*/
1da177e4
LT
195 pushq %rax /* rsp */
196 CFI_ADJUST_CFA_OFFSET 8
7effaa88 197 CFI_REL_OFFSET rsp,0
1da177e4
LT
198 pushq $(1<<9) /* eflags - interrupts on */
199 CFI_ADJUST_CFA_OFFSET 8
7effaa88 200 /*CFI_REL_OFFSET rflags,0*/
1da177e4
LT
201 pushq $__KERNEL_CS /* cs */
202 CFI_ADJUST_CFA_OFFSET 8
7effaa88 203 /*CFI_REL_OFFSET cs,0*/
1da177e4
LT
204 pushq \child_rip /* rip */
205 CFI_ADJUST_CFA_OFFSET 8
7effaa88 206 CFI_REL_OFFSET rip,0
1da177e4
LT
207 pushq %rax /* orig rax */
208 CFI_ADJUST_CFA_OFFSET 8
209 .endm
210
211 .macro UNFAKE_STACK_FRAME
212 addq $8*6, %rsp
213 CFI_ADJUST_CFA_OFFSET -(6*8)
214 .endm
215
7effaa88
JB
216 .macro CFI_DEFAULT_STACK start=1
217 .if \start
218 CFI_STARTPROC simple
adf14236 219 CFI_SIGNAL_FRAME
7effaa88
JB
220 CFI_DEF_CFA rsp,SS+8
221 .else
222 CFI_DEF_CFA_OFFSET SS+8
223 .endif
224 CFI_REL_OFFSET r15,R15
225 CFI_REL_OFFSET r14,R14
226 CFI_REL_OFFSET r13,R13
227 CFI_REL_OFFSET r12,R12
228 CFI_REL_OFFSET rbp,RBP
229 CFI_REL_OFFSET rbx,RBX
230 CFI_REL_OFFSET r11,R11
231 CFI_REL_OFFSET r10,R10
232 CFI_REL_OFFSET r9,R9
233 CFI_REL_OFFSET r8,R8
234 CFI_REL_OFFSET rax,RAX
235 CFI_REL_OFFSET rcx,RCX
236 CFI_REL_OFFSET rdx,RDX
237 CFI_REL_OFFSET rsi,RSI
238 CFI_REL_OFFSET rdi,RDI
239 CFI_REL_OFFSET rip,RIP
240 /*CFI_REL_OFFSET cs,CS*/
241 /*CFI_REL_OFFSET rflags,EFLAGS*/
242 CFI_REL_OFFSET rsp,RSP
243 /*CFI_REL_OFFSET ss,SS*/
1da177e4
LT
244 .endm
245/*
246 * A newly forked process directly context switches into this.
247 */
248/* rdi: prev */
249ENTRY(ret_from_fork)
1da177e4 250 CFI_DEFAULT_STACK
658fdbef 251 push kernel_eflags(%rip)
e0a5a5d9 252 CFI_ADJUST_CFA_OFFSET 8
658fdbef 253 popf # reset kernel eflags
e0a5a5d9 254 CFI_ADJUST_CFA_OFFSET -8
1da177e4
LT
255 call schedule_tail
256 GET_THREAD_INFO(%rcx)
26ccb8a7 257 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
1da177e4
LT
258 jnz rff_trace
259rff_action:
260 RESTORE_REST
261 testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread?
262 je int_ret_from_sys_call
26ccb8a7 263 testl $_TIF_IA32,TI_flags(%rcx)
1da177e4
LT
264 jnz int_ret_from_sys_call
265 RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
266 jmp ret_from_sys_call
267rff_trace:
268 movq %rsp,%rdi
269 call syscall_trace_leave
270 GET_THREAD_INFO(%rcx)
271 jmp rff_action
272 CFI_ENDPROC
4b787e0b 273END(ret_from_fork)
1da177e4
LT
274
275/*
276 * System call entry. Upto 6 arguments in registers are supported.
277 *
278 * SYSCALL does not save anything on the stack and does not change the
279 * stack pointer.
280 */
281
282/*
283 * Register setup:
284 * rax system call number
285 * rdi arg0
286 * rcx return address for syscall/sysret, C arg3
287 * rsi arg1
288 * rdx arg2
289 * r10 arg3 (--> moved to rcx for C)
290 * r8 arg4
291 * r9 arg5
292 * r11 eflags for syscall/sysret, temporary for C
293 * r12-r15,rbp,rbx saved by C code, not touched.
294 *
295 * Interrupts are off on entry.
296 * Only called from user space.
297 *
298 * XXX if we had a free scratch register we could save the RSP into the stack frame
299 * and report it properly in ps. Unfortunately we haven't.
7bf36bbc
AK
300 *
301 * When user can change the frames always force IRET. That is because
302 * it deals with uncanonical addresses better. SYSRET has trouble
303 * with them due to bugs in both AMD and Intel CPUs.
1da177e4
LT
304 */
305
306ENTRY(system_call)
7effaa88 307 CFI_STARTPROC simple
adf14236 308 CFI_SIGNAL_FRAME
dffead4e 309 CFI_DEF_CFA rsp,PDA_STACKOFFSET
7effaa88
JB
310 CFI_REGISTER rip,rcx
311 /*CFI_REGISTER rflags,r11*/
72fe4858
GOC
312 SWAPGS_UNSAFE_STACK
313 /*
314 * A hypervisor implementation might want to use a label
315 * after the swapgs, so that it can do the swapgs
316 * for the guest and jump here on syscall.
317 */
318ENTRY(system_call_after_swapgs)
319
1da177e4
LT
320 movq %rsp,%gs:pda_oldrsp
321 movq %gs:pda_kernelstack,%rsp
2601e64d
IM
322 /*
323 * No need to follow this irqs off/on section - it's straight
324 * and short:
325 */
72fe4858 326 ENABLE_INTERRUPTS(CLBR_NONE)
1da177e4
LT
327 SAVE_ARGS 8,1
328 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
7effaa88
JB
329 movq %rcx,RIP-ARGOFFSET(%rsp)
330 CFI_REL_OFFSET rip,RIP-ARGOFFSET
1da177e4 331 GET_THREAD_INFO(%rcx)
d4d67150 332 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
1da177e4 333 jnz tracesys
86a1c34a 334system_call_fastpath:
1da177e4
LT
335 cmpq $__NR_syscall_max,%rax
336 ja badsys
337 movq %r10,%rcx
338 call *sys_call_table(,%rax,8) # XXX: rip relative
339 movq %rax,RAX-ARGOFFSET(%rsp)
340/*
341 * Syscall return path ending with SYSRET (fast path)
342 * Has incomplete stack frame and undefined top of stack.
343 */
1da177e4 344ret_from_sys_call:
11b854b2 345 movl $_TIF_ALLWORK_MASK,%edi
1da177e4
LT
346 /* edi: flagmask */
347sysret_check:
10cd706d 348 LOCKDEP_SYS_EXIT
1da177e4 349 GET_THREAD_INFO(%rcx)
72fe4858 350 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 351 TRACE_IRQS_OFF
26ccb8a7 352 movl TI_flags(%rcx),%edx
1da177e4
LT
353 andl %edi,%edx
354 jnz sysret_careful
bcddc015 355 CFI_REMEMBER_STATE
2601e64d
IM
356 /*
357 * sysretq will re-enable interrupts:
358 */
359 TRACE_IRQS_ON
1da177e4 360 movq RIP-ARGOFFSET(%rsp),%rcx
7effaa88 361 CFI_REGISTER rip,rcx
1da177e4 362 RESTORE_ARGS 0,-ARG_SKIP,1
7effaa88 363 /*CFI_REGISTER rflags,r11*/
c7245da6 364 movq %gs:pda_oldrsp, %rsp
2be29982 365 USERGS_SYSRET64
1da177e4 366
bcddc015 367 CFI_RESTORE_STATE
1da177e4
LT
368 /* Handle reschedules */
369 /* edx: work, edi: workmask */
370sysret_careful:
371 bt $TIF_NEED_RESCHED,%edx
372 jnc sysret_signal
2601e64d 373 TRACE_IRQS_ON
72fe4858 374 ENABLE_INTERRUPTS(CLBR_NONE)
1da177e4 375 pushq %rdi
7effaa88 376 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
377 call schedule
378 popq %rdi
7effaa88 379 CFI_ADJUST_CFA_OFFSET -8
1da177e4
LT
380 jmp sysret_check
381
382 /* Handle a signal */
383sysret_signal:
2601e64d 384 TRACE_IRQS_ON
72fe4858 385 ENABLE_INTERRUPTS(CLBR_NONE)
86a1c34a
RM
386#ifdef CONFIG_AUDITSYSCALL
387 bt $TIF_SYSCALL_AUDIT,%edx
388 jc sysret_audit
389#endif
10ffdbb8 390 /* edx: work flags (arg3) */
1da177e4
LT
391 leaq do_notify_resume(%rip),%rax
392 leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
393 xorl %esi,%esi # oldset -> arg2
394 call ptregscall_common
15e8f348 395 movl $_TIF_WORK_MASK,%edi
7bf36bbc
AK
396 /* Use IRET because user could have changed frame. This
397 works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
72fe4858 398 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 399 TRACE_IRQS_OFF
7bf36bbc 400 jmp int_with_check
1da177e4 401
7effaa88
JB
402badsys:
403 movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
404 jmp ret_from_sys_call
405
86a1c34a
RM
406#ifdef CONFIG_AUDITSYSCALL
407 /*
408 * Fast path for syscall audit without full syscall trace.
409 * We just call audit_syscall_entry() directly, and then
410 * jump back to the normal fast path.
411 */
412auditsys:
413 movq %r10,%r9 /* 6th arg: 4th syscall arg */
414 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
415 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
416 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
417 movq %rax,%rsi /* 2nd arg: syscall number */
418 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
419 call audit_syscall_entry
420 LOAD_ARGS 0 /* reload call-clobbered registers */
421 jmp system_call_fastpath
422
423 /*
424 * Return fast path for syscall audit. Call audit_syscall_exit()
425 * directly and then jump back to the fast path with TIF_SYSCALL_AUDIT
426 * masked off.
427 */
428sysret_audit:
429 movq %rax,%rsi /* second arg, syscall return value */
430 cmpq $0,%rax /* is it < 0? */
431 setl %al /* 1 if so, 0 if not */
432 movzbl %al,%edi /* zero-extend that into %edi */
433 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
434 call audit_syscall_exit
435 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
436 jmp sysret_check
437#endif /* CONFIG_AUDITSYSCALL */
438
1da177e4
LT
439 /* Do syscall tracing */
440tracesys:
86a1c34a
RM
441#ifdef CONFIG_AUDITSYSCALL
442 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
443 jz auditsys
444#endif
1da177e4 445 SAVE_REST
a31f8dd7 446 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
1da177e4
LT
447 FIXUP_TOP_OF_STACK %rdi
448 movq %rsp,%rdi
449 call syscall_trace_enter
d4d67150
RM
450 /*
451 * Reload arg registers from stack in case ptrace changed them.
452 * We don't reload %rax because syscall_trace_enter() returned
453 * the value it wants us to use in the table lookup.
454 */
455 LOAD_ARGS ARGOFFSET, 1
1da177e4
LT
456 RESTORE_REST
457 cmpq $__NR_syscall_max,%rax
a31f8dd7 458 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
1da177e4
LT
459 movq %r10,%rcx /* fixup for C */
460 call *sys_call_table(,%rax,8)
a31f8dd7 461 movq %rax,RAX-ARGOFFSET(%rsp)
7bf36bbc 462 /* Use IRET because user could have changed frame */
1da177e4 463
1da177e4
LT
464/*
465 * Syscall return path ending with IRET.
466 * Has correct top of stack, but partial stack frame.
bcddc015
JB
467 */
468 .globl int_ret_from_sys_call
5cbf1565 469 .globl int_with_check
bcddc015 470int_ret_from_sys_call:
72fe4858 471 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 472 TRACE_IRQS_OFF
1da177e4
LT
473 testl $3,CS-ARGOFFSET(%rsp)
474 je retint_restore_args
475 movl $_TIF_ALLWORK_MASK,%edi
476 /* edi: mask to check */
477int_with_check:
10cd706d 478 LOCKDEP_SYS_EXIT_IRQ
1da177e4 479 GET_THREAD_INFO(%rcx)
26ccb8a7 480 movl TI_flags(%rcx),%edx
1da177e4
LT
481 andl %edi,%edx
482 jnz int_careful
26ccb8a7 483 andl $~TS_COMPAT,TI_status(%rcx)
1da177e4
LT
484 jmp retint_swapgs
485
486 /* Either reschedule or signal or syscall exit tracking needed. */
487 /* First do a reschedule test. */
488 /* edx: work, edi: workmask */
489int_careful:
490 bt $TIF_NEED_RESCHED,%edx
491 jnc int_very_careful
2601e64d 492 TRACE_IRQS_ON
72fe4858 493 ENABLE_INTERRUPTS(CLBR_NONE)
1da177e4 494 pushq %rdi
7effaa88 495 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
496 call schedule
497 popq %rdi
7effaa88 498 CFI_ADJUST_CFA_OFFSET -8
72fe4858 499 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 500 TRACE_IRQS_OFF
1da177e4
LT
501 jmp int_with_check
502
503 /* handle signals and tracing -- both require a full stack frame */
504int_very_careful:
2601e64d 505 TRACE_IRQS_ON
72fe4858 506 ENABLE_INTERRUPTS(CLBR_NONE)
1da177e4
LT
507 SAVE_REST
508 /* Check for syscall exit trace */
d4d67150 509 testl $_TIF_WORK_SYSCALL_EXIT,%edx
1da177e4
LT
510 jz int_signal
511 pushq %rdi
7effaa88 512 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
513 leaq 8(%rsp),%rdi # &ptregs -> arg1
514 call syscall_trace_leave
515 popq %rdi
7effaa88 516 CFI_ADJUST_CFA_OFFSET -8
d4d67150 517 andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi
1da177e4
LT
518 jmp int_restore_rest
519
520int_signal:
8f4d37ec 521 testl $_TIF_DO_NOTIFY_MASK,%edx
1da177e4
LT
522 jz 1f
523 movq %rsp,%rdi # &ptregs -> arg1
524 xorl %esi,%esi # oldset -> arg2
525 call do_notify_resume
eca91e78 5261: movl $_TIF_WORK_MASK,%edi
1da177e4
LT
527int_restore_rest:
528 RESTORE_REST
72fe4858 529 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 530 TRACE_IRQS_OFF
1da177e4
LT
531 jmp int_with_check
532 CFI_ENDPROC
bcddc015 533END(system_call)
1da177e4
LT
534
535/*
536 * Certain special system calls that need to save a complete full stack frame.
537 */
538
539 .macro PTREGSCALL label,func,arg
540 .globl \label
541\label:
542 leaq \func(%rip),%rax
543 leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
544 jmp ptregscall_common
4b787e0b 545END(\label)
1da177e4
LT
546 .endm
547
7effaa88
JB
548 CFI_STARTPROC
549
1da177e4
LT
550 PTREGSCALL stub_clone, sys_clone, %r8
551 PTREGSCALL stub_fork, sys_fork, %rdi
552 PTREGSCALL stub_vfork, sys_vfork, %rdi
1da177e4
LT
553 PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
554 PTREGSCALL stub_iopl, sys_iopl, %rsi
555
556ENTRY(ptregscall_common)
1da177e4 557 popq %r11
7effaa88
JB
558 CFI_ADJUST_CFA_OFFSET -8
559 CFI_REGISTER rip, r11
1da177e4
LT
560 SAVE_REST
561 movq %r11, %r15
7effaa88 562 CFI_REGISTER rip, r15
1da177e4
LT
563 FIXUP_TOP_OF_STACK %r11
564 call *%rax
565 RESTORE_TOP_OF_STACK %r11
566 movq %r15, %r11
7effaa88 567 CFI_REGISTER rip, r11
1da177e4
LT
568 RESTORE_REST
569 pushq %r11
7effaa88
JB
570 CFI_ADJUST_CFA_OFFSET 8
571 CFI_REL_OFFSET rip, 0
1da177e4
LT
572 ret
573 CFI_ENDPROC
4b787e0b 574END(ptregscall_common)
1da177e4
LT
575
576ENTRY(stub_execve)
577 CFI_STARTPROC
578 popq %r11
7effaa88
JB
579 CFI_ADJUST_CFA_OFFSET -8
580 CFI_REGISTER rip, r11
1da177e4 581 SAVE_REST
1da177e4 582 FIXUP_TOP_OF_STACK %r11
5d119b2c 583 movq %rsp, %rcx
1da177e4 584 call sys_execve
1da177e4 585 RESTORE_TOP_OF_STACK %r11
1da177e4
LT
586 movq %rax,RAX(%rsp)
587 RESTORE_REST
588 jmp int_ret_from_sys_call
589 CFI_ENDPROC
4b787e0b 590END(stub_execve)
1da177e4
LT
591
592/*
593 * sigreturn is special because it needs to restore all registers on return.
594 * This cannot be done with SYSRET, so use the IRET return path instead.
595 */
596ENTRY(stub_rt_sigreturn)
597 CFI_STARTPROC
7effaa88
JB
598 addq $8, %rsp
599 CFI_ADJUST_CFA_OFFSET -8
1da177e4
LT
600 SAVE_REST
601 movq %rsp,%rdi
602 FIXUP_TOP_OF_STACK %r11
603 call sys_rt_sigreturn
604 movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
605 RESTORE_REST
606 jmp int_ret_from_sys_call
607 CFI_ENDPROC
4b787e0b 608END(stub_rt_sigreturn)
1da177e4 609
7effaa88
JB
610/*
611 * initial frame state for interrupts and exceptions
612 */
613 .macro _frame ref
614 CFI_STARTPROC simple
adf14236 615 CFI_SIGNAL_FRAME
7effaa88
JB
616 CFI_DEF_CFA rsp,SS+8-\ref
617 /*CFI_REL_OFFSET ss,SS-\ref*/
618 CFI_REL_OFFSET rsp,RSP-\ref
619 /*CFI_REL_OFFSET rflags,EFLAGS-\ref*/
620 /*CFI_REL_OFFSET cs,CS-\ref*/
621 CFI_REL_OFFSET rip,RIP-\ref
622 .endm
623
624/* initial frame state for interrupts (and exceptions without error code) */
625#define INTR_FRAME _frame RIP
626/* initial frame state for exceptions with error code (and interrupts with
627 vector already pushed) */
628#define XCPT_FRAME _frame ORIG_RAX
629
939b7871
PA
630/*
631 * Build the entry stubs and pointer table with some assembler magic.
632 * We pack 7 stubs into a single 32-byte chunk, which will fit in a
633 * single cache line on all modern x86 implementations.
634 */
635 .section .init.rodata,"a"
636ENTRY(interrupt)
637 .text
638 .p2align 5
639 .p2align CONFIG_X86_L1_CACHE_SHIFT
640ENTRY(irq_entries_start)
641 INTR_FRAME
642vector=FIRST_EXTERNAL_VECTOR
643.rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7
644 .balign 32
645 .rept 7
646 .if vector < NR_VECTORS
647 .if vector != FIRST_EXTERNAL_VECTOR
648 CFI_ADJUST_CFA_OFFSET -8
649 .endif
6501: pushq $(~vector+0x80) /* Note: always in signed byte range */
651 CFI_ADJUST_CFA_OFFSET 8
652 .if ((vector-FIRST_EXTERNAL_VECTOR)%7) != 6
653 jmp 2f
654 .endif
655 .previous
656 .quad 1b
657 .text
658vector=vector+1
659 .endif
660 .endr
6612: jmp common_interrupt
662.endr
663 CFI_ENDPROC
664END(irq_entries_start)
665
666.previous
667END(interrupt)
668.previous
669
1da177e4
LT
670/*
671 * Interrupt entry/exit.
672 *
673 * Interrupt entry points save only callee clobbered registers in fast path.
674 *
675 * Entry runs with interrupts off.
676 */
677
939b7871 678/* 0(%rsp): ~(interrupt number)+0x80 */
1da177e4 679 .macro interrupt func
939b7871 680 addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */
1da177e4 681 cld
1da177e4 682 SAVE_ARGS
939b7871 683 leaq -ARGOFFSET(%rsp),%rdi /* arg1 for handler */
1de9c3f6 684 pushq %rbp
097a0788
GC
685 /*
686 * Save rbp twice: One is for marking the stack frame, as usual, and the
687 * other, to fill pt_regs properly. This is because bx comes right
688 * before the last saved register in that structure, and not bp. If the
689 * base pointer were in the place bx is today, this would not be needed.
690 */
691 movq %rbp, -8(%rsp)
1de9c3f6
JB
692 CFI_ADJUST_CFA_OFFSET 8
693 CFI_REL_OFFSET rbp, 0
694 movq %rsp,%rbp
695 CFI_DEF_CFA_REGISTER rbp
1da177e4
LT
696 testl $3,CS(%rdi)
697 je 1f
72fe4858 698 SWAPGS
96e54049
AK
699 /* irqcount is used to check if a CPU is already on an interrupt
700 stack or not. While this is essentially redundant with preempt_count
701 it is a little cheaper to use a separate counter in the PDA
702 (short of moving irq_enter into assembly, which would be too
703 much work) */
7041: incl %gs:pda_irqcount
1de9c3f6 705 cmoveq %gs:pda_irqstackptr,%rsp
2699500b 706 push %rbp # backlink for old unwinder
2601e64d
IM
707 /*
708 * We entered an interrupt context - irqs are off:
709 */
710 TRACE_IRQS_OFF
1da177e4
LT
711 call \func
712 .endm
713
939b7871
PA
714 .p2align CONFIG_X86_L1_CACHE_SHIFT
715common_interrupt:
7effaa88 716 XCPT_FRAME
1da177e4
LT
717 interrupt do_IRQ
718 /* 0(%rsp): oldrsp-ARGOFFSET */
7effaa88 719ret_from_intr:
72fe4858 720 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 721 TRACE_IRQS_OFF
3829ee6b 722 decl %gs:pda_irqcount
1de9c3f6 723 leaveq
7effaa88 724 CFI_DEF_CFA_REGISTER rsp
1de9c3f6 725 CFI_ADJUST_CFA_OFFSET -8
7effaa88 726exit_intr:
1da177e4
LT
727 GET_THREAD_INFO(%rcx)
728 testl $3,CS-ARGOFFSET(%rsp)
729 je retint_kernel
730
731 /* Interrupt came from user space */
732 /*
733 * Has a correct top of stack, but a partial stack frame
734 * %rcx: thread info. Interrupts off.
735 */
736retint_with_reschedule:
737 movl $_TIF_WORK_MASK,%edi
7effaa88 738retint_check:
10cd706d 739 LOCKDEP_SYS_EXIT_IRQ
26ccb8a7 740 movl TI_flags(%rcx),%edx
1da177e4 741 andl %edi,%edx
7effaa88 742 CFI_REMEMBER_STATE
1da177e4 743 jnz retint_careful
10cd706d
PZ
744
745retint_swapgs: /* return to user-space */
2601e64d
IM
746 /*
747 * The iretq could re-enable interrupts:
748 */
72fe4858 749 DISABLE_INTERRUPTS(CLBR_ANY)
2601e64d 750 TRACE_IRQS_IRETQ
72fe4858 751 SWAPGS
2601e64d
IM
752 jmp restore_args
753
10cd706d 754retint_restore_args: /* return to kernel space */
72fe4858 755 DISABLE_INTERRUPTS(CLBR_ANY)
2601e64d
IM
756 /*
757 * The iretq could re-enable interrupts:
758 */
759 TRACE_IRQS_IRETQ
760restore_args:
3701d863
IM
761 RESTORE_ARGS 0,8,0
762
f7f3d791 763irq_return:
72fe4858 764 INTERRUPT_RETURN
3701d863
IM
765
766 .section __ex_table, "a"
767 .quad irq_return, bad_iret
768 .previous
769
770#ifdef CONFIG_PARAVIRT
72fe4858 771ENTRY(native_iret)
1da177e4
LT
772 iretq
773
774 .section __ex_table,"a"
72fe4858 775 .quad native_iret, bad_iret
1da177e4 776 .previous
3701d863
IM
777#endif
778
1da177e4 779 .section .fixup,"ax"
1da177e4 780bad_iret:
3aa4b37d
RM
781 /*
782 * The iret traps when the %cs or %ss being restored is bogus.
783 * We've lost the original trap vector and error code.
784 * #GPF is the most likely one to get for an invalid selector.
785 * So pretend we completed the iret and took the #GPF in user mode.
786 *
787 * We are now running with the kernel GS after exception recovery.
788 * But error_entry expects us to have user GS to match the user %cs,
789 * so swap back.
790 */
791 pushq $0
792
793 SWAPGS
794 jmp general_protection
795
72fe4858
GOC
796 .previous
797
7effaa88 798 /* edi: workmask, edx: work */
1da177e4 799retint_careful:
7effaa88 800 CFI_RESTORE_STATE
1da177e4
LT
801 bt $TIF_NEED_RESCHED,%edx
802 jnc retint_signal
2601e64d 803 TRACE_IRQS_ON
72fe4858 804 ENABLE_INTERRUPTS(CLBR_NONE)
1da177e4 805 pushq %rdi
7effaa88 806 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
807 call schedule
808 popq %rdi
7effaa88 809 CFI_ADJUST_CFA_OFFSET -8
1da177e4 810 GET_THREAD_INFO(%rcx)
72fe4858 811 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 812 TRACE_IRQS_OFF
1da177e4
LT
813 jmp retint_check
814
815retint_signal:
8f4d37ec 816 testl $_TIF_DO_NOTIFY_MASK,%edx
10ffdbb8 817 jz retint_swapgs
2601e64d 818 TRACE_IRQS_ON
72fe4858 819 ENABLE_INTERRUPTS(CLBR_NONE)
1da177e4
LT
820 SAVE_REST
821 movq $-1,ORIG_RAX(%rsp)
3829ee6b 822 xorl %esi,%esi # oldset
1da177e4
LT
823 movq %rsp,%rdi # &pt_regs
824 call do_notify_resume
825 RESTORE_REST
72fe4858 826 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 827 TRACE_IRQS_OFF
be9e6870 828 GET_THREAD_INFO(%rcx)
eca91e78 829 jmp retint_with_reschedule
1da177e4
LT
830
831#ifdef CONFIG_PREEMPT
832 /* Returning to kernel space. Check if we need preemption */
833 /* rcx: threadinfo. interrupts off. */
b06babac 834ENTRY(retint_kernel)
26ccb8a7 835 cmpl $0,TI_preempt_count(%rcx)
1da177e4 836 jnz retint_restore_args
26ccb8a7 837 bt $TIF_NEED_RESCHED,TI_flags(%rcx)
1da177e4
LT
838 jnc retint_restore_args
839 bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
840 jnc retint_restore_args
841 call preempt_schedule_irq
842 jmp exit_intr
843#endif
4b787e0b 844
1da177e4 845 CFI_ENDPROC
4b787e0b 846END(common_interrupt)
1da177e4
LT
847
848/*
849 * APIC interrupts.
850 */
851 .macro apicinterrupt num,func
7effaa88 852 INTR_FRAME
19eadf98 853 pushq $~(\num)
7effaa88 854 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
855 interrupt \func
856 jmp ret_from_intr
857 CFI_ENDPROC
858 .endm
859
860ENTRY(thermal_interrupt)
861 apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
4b787e0b 862END(thermal_interrupt)
1da177e4 863
89b831ef
JS
864ENTRY(threshold_interrupt)
865 apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
4b787e0b 866END(threshold_interrupt)
89b831ef 867
1da177e4
LT
868#ifdef CONFIG_SMP
869ENTRY(reschedule_interrupt)
870 apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
4b787e0b 871END(reschedule_interrupt)
1da177e4 872
e5bc8b6b
AK
873 .macro INVALIDATE_ENTRY num
874ENTRY(invalidate_interrupt\num)
875 apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
4b787e0b 876END(invalidate_interrupt\num)
e5bc8b6b
AK
877 .endm
878
879 INVALIDATE_ENTRY 0
880 INVALIDATE_ENTRY 1
881 INVALIDATE_ENTRY 2
882 INVALIDATE_ENTRY 3
883 INVALIDATE_ENTRY 4
884 INVALIDATE_ENTRY 5
885 INVALIDATE_ENTRY 6
886 INVALIDATE_ENTRY 7
1da177e4
LT
887
888ENTRY(call_function_interrupt)
889 apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
4b787e0b 890END(call_function_interrupt)
3b16cf87
JA
891ENTRY(call_function_single_interrupt)
892 apicinterrupt CALL_FUNCTION_SINGLE_VECTOR,smp_call_function_single_interrupt
893END(call_function_single_interrupt)
61014292
EB
894ENTRY(irq_move_cleanup_interrupt)
895 apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt
896END(irq_move_cleanup_interrupt)
1da177e4
LT
897#endif
898
1da177e4
LT
899ENTRY(apic_timer_interrupt)
900 apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
4b787e0b 901END(apic_timer_interrupt)
1da177e4 902
1812924b
CW
903ENTRY(uv_bau_message_intr1)
904 apicinterrupt 220,uv_bau_message_interrupt
905END(uv_bau_message_intr1)
906
1da177e4
LT
907ENTRY(error_interrupt)
908 apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
4b787e0b 909END(error_interrupt)
1da177e4
LT
910
911ENTRY(spurious_interrupt)
912 apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
4b787e0b 913END(spurious_interrupt)
1da177e4
LT
914
915/*
916 * Exception entry points.
917 */
918 .macro zeroentry sym
7effaa88 919 INTR_FRAME
fab58420 920 PARAVIRT_ADJUST_EXCEPTION_FRAME
1da177e4 921 pushq $0 /* push error code/oldrax */
7effaa88 922 CFI_ADJUST_CFA_OFFSET 8
1da177e4 923 pushq %rax /* push real oldrax to the rdi slot */
7effaa88 924 CFI_ADJUST_CFA_OFFSET 8
37550907 925 CFI_REL_OFFSET rax,0
1da177e4
LT
926 leaq \sym(%rip),%rax
927 jmp error_entry
7effaa88 928 CFI_ENDPROC
1da177e4
LT
929 .endm
930
931 .macro errorentry sym
7effaa88 932 XCPT_FRAME
fab58420 933 PARAVIRT_ADJUST_EXCEPTION_FRAME
1da177e4 934 pushq %rax
7effaa88 935 CFI_ADJUST_CFA_OFFSET 8
37550907 936 CFI_REL_OFFSET rax,0
1da177e4
LT
937 leaq \sym(%rip),%rax
938 jmp error_entry
7effaa88 939 CFI_ENDPROC
1da177e4
LT
940 .endm
941
942 /* error code is on the stack already */
943 /* handle NMI like exceptions that can happen everywhere */
2601e64d 944 .macro paranoidentry sym, ist=0, irqtrace=1
1da177e4
LT
945 SAVE_ALL
946 cld
947 movl $1,%ebx
948 movl $MSR_GS_BASE,%ecx
949 rdmsr
950 testl %edx,%edx
951 js 1f
72fe4858 952 SWAPGS
1da177e4 953 xorl %ebx,%ebx
b556b35e
JB
9541:
955 .if \ist
956 movq %gs:pda_data_offset, %rbp
957 .endif
7e61a793
AH
958 .if \irqtrace
959 TRACE_IRQS_OFF
960 .endif
b556b35e 961 movq %rsp,%rdi
1da177e4
LT
962 movq ORIG_RAX(%rsp),%rsi
963 movq $-1,ORIG_RAX(%rsp)
b556b35e 964 .if \ist
5f8efbb9 965 subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
b556b35e 966 .endif
1da177e4 967 call \sym
b556b35e 968 .if \ist
5f8efbb9 969 addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
b556b35e 970 .endif
72fe4858 971 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d
IM
972 .if \irqtrace
973 TRACE_IRQS_OFF
974 .endif
1da177e4 975 .endm
2601e64d
IM
976
977 /*
978 * "Paranoid" exit path from exception stack.
979 * Paranoid because this is used by NMIs and cannot take
980 * any kernel state for granted.
981 * We don't do kernel preemption checks here, because only
982 * NMI should be common and it does not enable IRQs and
983 * cannot get reschedule ticks.
984 *
985 * "trace" is 0 for the NMI handler only, because irq-tracing
986 * is fundamentally NMI-unsafe. (we cannot change the soft and
987 * hard flags at once, atomically)
988 */
989 .macro paranoidexit trace=1
990 /* ebx: no swapgs flag */
991paranoid_exit\trace:
992 testl %ebx,%ebx /* swapgs needed? */
993 jnz paranoid_restore\trace
994 testl $3,CS(%rsp)
995 jnz paranoid_userspace\trace
996paranoid_swapgs\trace:
7a0a2dff 997 .if \trace
2601e64d 998 TRACE_IRQS_IRETQ 0
7a0a2dff 999 .endif
72fe4858 1000 SWAPGS_UNSAFE_STACK
2601e64d
IM
1001paranoid_restore\trace:
1002 RESTORE_ALL 8
3701d863 1003 jmp irq_return
2601e64d
IM
1004paranoid_userspace\trace:
1005 GET_THREAD_INFO(%rcx)
26ccb8a7 1006 movl TI_flags(%rcx),%ebx
2601e64d
IM
1007 andl $_TIF_WORK_MASK,%ebx
1008 jz paranoid_swapgs\trace
1009 movq %rsp,%rdi /* &pt_regs */
1010 call sync_regs
1011 movq %rax,%rsp /* switch stack for scheduling */
1012 testl $_TIF_NEED_RESCHED,%ebx
1013 jnz paranoid_schedule\trace
1014 movl %ebx,%edx /* arg3: thread flags */
1015 .if \trace
1016 TRACE_IRQS_ON
1017 .endif
72fe4858 1018 ENABLE_INTERRUPTS(CLBR_NONE)
2601e64d
IM
1019 xorl %esi,%esi /* arg2: oldset */
1020 movq %rsp,%rdi /* arg1: &pt_regs */
1021 call do_notify_resume
72fe4858 1022 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d
IM
1023 .if \trace
1024 TRACE_IRQS_OFF
1025 .endif
1026 jmp paranoid_userspace\trace
1027paranoid_schedule\trace:
1028 .if \trace
1029 TRACE_IRQS_ON
1030 .endif
72fe4858 1031 ENABLE_INTERRUPTS(CLBR_ANY)
2601e64d 1032 call schedule
72fe4858 1033 DISABLE_INTERRUPTS(CLBR_ANY)
2601e64d
IM
1034 .if \trace
1035 TRACE_IRQS_OFF
1036 .endif
1037 jmp paranoid_userspace\trace
1038 CFI_ENDPROC
1039 .endm
1040
1da177e4
LT
1041/*
1042 * Exception entry point. This expects an error code/orig_rax on the stack
1043 * and the exception handler in %rax.
1044 */
d28c4393 1045KPROBE_ENTRY(error_entry)
7effaa88 1046 _frame RDI
37550907 1047 CFI_REL_OFFSET rax,0
1da177e4
LT
1048 /* rdi slot contains rax, oldrax contains error code */
1049 cld
1050 subq $14*8,%rsp
1051 CFI_ADJUST_CFA_OFFSET (14*8)
1052 movq %rsi,13*8(%rsp)
1053 CFI_REL_OFFSET rsi,RSI
1054 movq 14*8(%rsp),%rsi /* load rax from rdi slot */
37550907 1055 CFI_REGISTER rax,rsi
1da177e4
LT
1056 movq %rdx,12*8(%rsp)
1057 CFI_REL_OFFSET rdx,RDX
1058 movq %rcx,11*8(%rsp)
1059 CFI_REL_OFFSET rcx,RCX
1060 movq %rsi,10*8(%rsp) /* store rax */
1061 CFI_REL_OFFSET rax,RAX
1062 movq %r8, 9*8(%rsp)
1063 CFI_REL_OFFSET r8,R8
1064 movq %r9, 8*8(%rsp)
1065 CFI_REL_OFFSET r9,R9
1066 movq %r10,7*8(%rsp)
1067 CFI_REL_OFFSET r10,R10
1068 movq %r11,6*8(%rsp)
1069 CFI_REL_OFFSET r11,R11
1070 movq %rbx,5*8(%rsp)
1071 CFI_REL_OFFSET rbx,RBX
1072 movq %rbp,4*8(%rsp)
1073 CFI_REL_OFFSET rbp,RBP
1074 movq %r12,3*8(%rsp)
1075 CFI_REL_OFFSET r12,R12
1076 movq %r13,2*8(%rsp)
1077 CFI_REL_OFFSET r13,R13
1078 movq %r14,1*8(%rsp)
1079 CFI_REL_OFFSET r14,R14
1080 movq %r15,(%rsp)
1081 CFI_REL_OFFSET r15,R15
1082 xorl %ebx,%ebx
1083 testl $3,CS(%rsp)
1084 je error_kernelspace
1085error_swapgs:
72fe4858 1086 SWAPGS
6b11d4ef
AH
1087error_sti:
1088 TRACE_IRQS_OFF
1da177e4 1089 movq %rdi,RDI(%rsp)
37550907 1090 CFI_REL_OFFSET rdi,RDI
1da177e4
LT
1091 movq %rsp,%rdi
1092 movq ORIG_RAX(%rsp),%rsi /* get error code */
1093 movq $-1,ORIG_RAX(%rsp)
1094 call *%rax
10cd706d
PZ
1095 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
1096error_exit:
1097 movl %ebx,%eax
1da177e4 1098 RESTORE_REST
72fe4858 1099 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 1100 TRACE_IRQS_OFF
1da177e4
LT
1101 GET_THREAD_INFO(%rcx)
1102 testl %eax,%eax
1103 jne retint_kernel
10cd706d 1104 LOCKDEP_SYS_EXIT_IRQ
26ccb8a7 1105 movl TI_flags(%rcx),%edx
1da177e4
LT
1106 movl $_TIF_WORK_MASK,%edi
1107 andl %edi,%edx
1108 jnz retint_careful
10cd706d 1109 jmp retint_swapgs
1da177e4
LT
1110 CFI_ENDPROC
1111
1112error_kernelspace:
1113 incl %ebx
1114 /* There are two places in the kernel that can potentially fault with
1115 usergs. Handle them here. The exception handlers after
1116 iret run with kernel gs again, so don't set the user space flag.
1117 B stepping K8s sometimes report an truncated RIP for IRET
1118 exceptions returning to compat mode. Check for these here too. */
9d8ad5d6
VN
1119 leaq irq_return(%rip),%rcx
1120 cmpq %rcx,RIP(%rsp)
1da177e4 1121 je error_swapgs
9d8ad5d6
VN
1122 movl %ecx,%ecx /* zero extend */
1123 cmpq %rcx,RIP(%rsp)
1da177e4
LT
1124 je error_swapgs
1125 cmpq $gs_change,RIP(%rsp)
1126 je error_swapgs
1127 jmp error_sti
d28c4393 1128KPROBE_END(error_entry)
1da177e4
LT
1129
1130 /* Reload gs selector with exception handling */
1131 /* edi: new selector */
9f9d489a 1132ENTRY(native_load_gs_index)
7effaa88 1133 CFI_STARTPROC
1da177e4 1134 pushf
7effaa88 1135 CFI_ADJUST_CFA_OFFSET 8
72fe4858
GOC
1136 DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI))
1137 SWAPGS
1da177e4
LT
1138gs_change:
1139 movl %edi,%gs
11402: mfence /* workaround */
72fe4858 1141 SWAPGS
1da177e4 1142 popf
7effaa88 1143 CFI_ADJUST_CFA_OFFSET -8
1da177e4 1144 ret
7effaa88 1145 CFI_ENDPROC
9f9d489a 1146ENDPROC(native_load_gs_index)
1da177e4
LT
1147
1148 .section __ex_table,"a"
1149 .align 8
1150 .quad gs_change,bad_gs
1151 .previous
1152 .section .fixup,"ax"
1153 /* running with kernelgs */
1154bad_gs:
72fe4858 1155 SWAPGS /* switch back to user gs */
1da177e4
LT
1156 xorl %eax,%eax
1157 movl %eax,%gs
1158 jmp 2b
1159 .previous
1160
1161/*
1162 * Create a kernel thread.
1163 *
1164 * C extern interface:
1165 * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
1166 *
1167 * asm input arguments:
1168 * rdi: fn, rsi: arg, rdx: flags
1169 */
1170ENTRY(kernel_thread)
1171 CFI_STARTPROC
1172 FAKE_STACK_FRAME $child_rip
1173 SAVE_ALL
1174
1175 # rdi: flags, rsi: usp, rdx: will be &pt_regs
1176 movq %rdx,%rdi
1177 orq kernel_thread_flags(%rip),%rdi
1178 movq $-1, %rsi
1179 movq %rsp, %rdx
1180
1181 xorl %r8d,%r8d
1182 xorl %r9d,%r9d
1183
1184 # clone now
1185 call do_fork
1186 movq %rax,RAX(%rsp)
1187 xorl %edi,%edi
1188
1189 /*
1190 * It isn't worth to check for reschedule here,
1191 * so internally to the x86_64 port you can rely on kernel_thread()
1192 * not to reschedule the child before returning, this avoids the need
1193 * of hacks for example to fork off the per-CPU idle tasks.
1194 * [Hopefully no generic code relies on the reschedule -AK]
1195 */
1196 RESTORE_ALL
1197 UNFAKE_STACK_FRAME
1198 ret
1199 CFI_ENDPROC
4b787e0b 1200ENDPROC(kernel_thread)
1da177e4
LT
1201
1202child_rip:
c05991ed
AK
1203 pushq $0 # fake return address
1204 CFI_STARTPROC
1da177e4
LT
1205 /*
1206 * Here we are in the child and the registers are set as they were
1207 * at kernel_thread() invocation in the parent.
1208 */
1209 movq %rdi, %rax
1210 movq %rsi, %rdi
1211 call *%rax
1212 # exit
1c5b5cfd 1213 mov %eax, %edi
1da177e4 1214 call do_exit
c05991ed 1215 CFI_ENDPROC
4b787e0b 1216ENDPROC(child_rip)
1da177e4
LT
1217
1218/*
1219 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
1220 *
1221 * C extern interface:
1222 * extern long execve(char *name, char **argv, char **envp)
1223 *
1224 * asm input arguments:
1225 * rdi: name, rsi: argv, rdx: envp
1226 *
1227 * We want to fallback into:
5d119b2c 1228 * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs *regs)
1da177e4
LT
1229 *
1230 * do_sys_execve asm fallback arguments:
5d119b2c 1231 * rdi: name, rsi: argv, rdx: envp, rcx: fake frame on the stack
1da177e4 1232 */
3db03b4a 1233ENTRY(kernel_execve)
1da177e4
LT
1234 CFI_STARTPROC
1235 FAKE_STACK_FRAME $0
1236 SAVE_ALL
5d119b2c 1237 movq %rsp,%rcx
1da177e4
LT
1238 call sys_execve
1239 movq %rax, RAX(%rsp)
1240 RESTORE_REST
1241 testq %rax,%rax
1242 je int_ret_from_sys_call
1243 RESTORE_ARGS
1244 UNFAKE_STACK_FRAME
1245 ret
1246 CFI_ENDPROC
3db03b4a 1247ENDPROC(kernel_execve)
1da177e4 1248
0f2fbdcb 1249KPROBE_ENTRY(page_fault)
1da177e4 1250 errorentry do_page_fault
d28c4393 1251KPROBE_END(page_fault)
1da177e4
LT
1252
1253ENTRY(coprocessor_error)
1254 zeroentry do_coprocessor_error
4b787e0b 1255END(coprocessor_error)
1da177e4
LT
1256
1257ENTRY(simd_coprocessor_error)
1258 zeroentry do_simd_coprocessor_error
4b787e0b 1259END(simd_coprocessor_error)
1da177e4
LT
1260
1261ENTRY(device_not_available)
e407d620 1262 zeroentry do_device_not_available
4b787e0b 1263END(device_not_available)
1da177e4
LT
1264
1265 /* runs on exception stack */
0f2fbdcb 1266KPROBE_ENTRY(debug)
7effaa88 1267 INTR_FRAME
09402947 1268 PARAVIRT_ADJUST_EXCEPTION_FRAME
1da177e4
LT
1269 pushq $0
1270 CFI_ADJUST_CFA_OFFSET 8
5f8efbb9 1271 paranoidentry do_debug, DEBUG_STACK
2601e64d 1272 paranoidexit
d28c4393 1273KPROBE_END(debug)
1da177e4
LT
1274
1275 /* runs on exception stack */
eddb6fb9 1276KPROBE_ENTRY(nmi)
7effaa88 1277 INTR_FRAME
09402947 1278 PARAVIRT_ADJUST_EXCEPTION_FRAME
1da177e4 1279 pushq $-1
7effaa88 1280 CFI_ADJUST_CFA_OFFSET 8
2601e64d
IM
1281 paranoidentry do_nmi, 0, 0
1282#ifdef CONFIG_TRACE_IRQFLAGS
1283 paranoidexit 0
1284#else
1285 jmp paranoid_exit1
1286 CFI_ENDPROC
1287#endif
d28c4393 1288KPROBE_END(nmi)
6fefb0d1 1289
0f2fbdcb 1290KPROBE_ENTRY(int3)
b556b35e 1291 INTR_FRAME
09402947 1292 PARAVIRT_ADJUST_EXCEPTION_FRAME
b556b35e
JB
1293 pushq $0
1294 CFI_ADJUST_CFA_OFFSET 8
5f8efbb9 1295 paranoidentry do_int3, DEBUG_STACK
2601e64d 1296 jmp paranoid_exit1
b556b35e 1297 CFI_ENDPROC
d28c4393 1298KPROBE_END(int3)
1da177e4
LT
1299
1300ENTRY(overflow)
1301 zeroentry do_overflow
4b787e0b 1302END(overflow)
1da177e4
LT
1303
1304ENTRY(bounds)
1305 zeroentry do_bounds
4b787e0b 1306END(bounds)
1da177e4
LT
1307
1308ENTRY(invalid_op)
1309 zeroentry do_invalid_op
4b787e0b 1310END(invalid_op)
1da177e4
LT
1311
1312ENTRY(coprocessor_segment_overrun)
1313 zeroentry do_coprocessor_segment_overrun
4b787e0b 1314END(coprocessor_segment_overrun)
1da177e4 1315
1da177e4
LT
1316 /* runs on exception stack */
1317ENTRY(double_fault)
7effaa88 1318 XCPT_FRAME
09402947 1319 PARAVIRT_ADJUST_EXCEPTION_FRAME
1da177e4 1320 paranoidentry do_double_fault
2601e64d 1321 jmp paranoid_exit1
1da177e4 1322 CFI_ENDPROC
4b787e0b 1323END(double_fault)
1da177e4
LT
1324
1325ENTRY(invalid_TSS)
1326 errorentry do_invalid_TSS
4b787e0b 1327END(invalid_TSS)
1da177e4
LT
1328
1329ENTRY(segment_not_present)
1330 errorentry do_segment_not_present
4b787e0b 1331END(segment_not_present)
1da177e4
LT
1332
1333 /* runs on exception stack */
1334ENTRY(stack_segment)
7effaa88 1335 XCPT_FRAME
09402947 1336 PARAVIRT_ADJUST_EXCEPTION_FRAME
1da177e4 1337 paranoidentry do_stack_segment
2601e64d 1338 jmp paranoid_exit1
1da177e4 1339 CFI_ENDPROC
4b787e0b 1340END(stack_segment)
1da177e4 1341
0f2fbdcb 1342KPROBE_ENTRY(general_protection)
1da177e4 1343 errorentry do_general_protection
d28c4393 1344KPROBE_END(general_protection)
1da177e4
LT
1345
1346ENTRY(alignment_check)
1347 errorentry do_alignment_check
4b787e0b 1348END(alignment_check)
1da177e4
LT
1349
1350ENTRY(divide_error)
1351 zeroentry do_divide_error
4b787e0b 1352END(divide_error)
1da177e4
LT
1353
1354ENTRY(spurious_interrupt_bug)
1355 zeroentry do_spurious_interrupt_bug
4b787e0b 1356END(spurious_interrupt_bug)
1da177e4
LT
1357
1358#ifdef CONFIG_X86_MCE
1359 /* runs on exception stack */
1360ENTRY(machine_check)
7effaa88 1361 INTR_FRAME
09402947 1362 PARAVIRT_ADJUST_EXCEPTION_FRAME
1da177e4
LT
1363 pushq $0
1364 CFI_ADJUST_CFA_OFFSET 8
1365 paranoidentry do_machine_check
2601e64d 1366 jmp paranoid_exit1
1da177e4 1367 CFI_ENDPROC
4b787e0b 1368END(machine_check)
1da177e4
LT
1369#endif
1370
2699500b 1371/* Call softirq on interrupt stack. Interrupts are off. */
ed6b676c 1372ENTRY(call_softirq)
7effaa88 1373 CFI_STARTPROC
2699500b
AK
1374 push %rbp
1375 CFI_ADJUST_CFA_OFFSET 8
1376 CFI_REL_OFFSET rbp,0
1377 mov %rsp,%rbp
1378 CFI_DEF_CFA_REGISTER rbp
ed6b676c 1379 incl %gs:pda_irqcount
2699500b
AK
1380 cmove %gs:pda_irqstackptr,%rsp
1381 push %rbp # backlink for old unwinder
ed6b676c 1382 call __do_softirq
2699500b 1383 leaveq
7effaa88 1384 CFI_DEF_CFA_REGISTER rsp
2699500b 1385 CFI_ADJUST_CFA_OFFSET -8
ed6b676c 1386 decl %gs:pda_irqcount
ed6b676c 1387 ret
7effaa88 1388 CFI_ENDPROC
4b787e0b 1389ENDPROC(call_softirq)
75154f40
AK
1390
1391KPROBE_ENTRY(ignore_sysret)
1392 CFI_STARTPROC
1393 mov $-ENOSYS,%eax
1394 sysret
1395 CFI_ENDPROC
1396ENDPROC(ignore_sysret)
3d75e1b8
JF
1397
1398#ifdef CONFIG_XEN
1399ENTRY(xen_hypervisor_callback)
1400 zeroentry xen_do_hypervisor_callback
1401END(xen_hypervisor_callback)
1402
1403/*
1404# A note on the "critical region" in our callback handler.
1405# We want to avoid stacking callback handlers due to events occurring
1406# during handling of the last event. To do this, we keep events disabled
1407# until we've done all processing. HOWEVER, we must enable events before
1408# popping the stack frame (can't be done atomically) and so it would still
1409# be possible to get enough handler activations to overflow the stack.
1410# Although unlikely, bugs of that kind are hard to track down, so we'd
1411# like to avoid the possibility.
1412# So, on entry to the handler we detect whether we interrupted an
1413# existing activation in its critical region -- if so, we pop the current
1414# activation and restart the handler using the previous one.
1415*/
1416ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
1417 CFI_STARTPROC
1418/* Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
1419 see the correct pointer to the pt_regs */
1420 movq %rdi, %rsp # we don't return, adjust the stack frame
1421 CFI_ENDPROC
1422 CFI_DEFAULT_STACK
142311: incl %gs:pda_irqcount
1424 movq %rsp,%rbp
1425 CFI_DEF_CFA_REGISTER rbp
1426 cmovzq %gs:pda_irqstackptr,%rsp
1427 pushq %rbp # backlink for old unwinder
1428 call xen_evtchn_do_upcall
1429 popq %rsp
1430 CFI_DEF_CFA_REGISTER rsp
1431 decl %gs:pda_irqcount
1432 jmp error_exit
1433 CFI_ENDPROC
1434END(do_hypervisor_callback)
1435
1436/*
1437# Hypervisor uses this for application faults while it executes.
1438# We get here for two reasons:
1439# 1. Fault while reloading DS, ES, FS or GS
1440# 2. Fault while executing IRET
1441# Category 1 we do not need to fix up as Xen has already reloaded all segment
1442# registers that could be reloaded and zeroed the others.
1443# Category 2 we fix up by killing the current process. We cannot use the
1444# normal Linux return path in this case because if we use the IRET hypercall
1445# to pop the stack frame we end up in an infinite loop of failsafe callbacks.
1446# We distinguish between categories by comparing each saved segment register
1447# with its current contents: any discrepancy means we in category 1.
1448*/
1449ENTRY(xen_failsafe_callback)
4a5c3e77
JF
1450 framesz = (RIP-0x30) /* workaround buggy gas */
1451 _frame framesz
3d75e1b8
JF
1452 CFI_REL_OFFSET rcx, 0
1453 CFI_REL_OFFSET r11, 8
1454 movw %ds,%cx
1455 cmpw %cx,0x10(%rsp)
1456 CFI_REMEMBER_STATE
1457 jne 1f
1458 movw %es,%cx
1459 cmpw %cx,0x18(%rsp)
1460 jne 1f
1461 movw %fs,%cx
1462 cmpw %cx,0x20(%rsp)
1463 jne 1f
1464 movw %gs,%cx
1465 cmpw %cx,0x28(%rsp)
1466 jne 1f
1467 /* All segments match their saved values => Category 2 (Bad IRET). */
1468 movq (%rsp),%rcx
1469 CFI_RESTORE rcx
1470 movq 8(%rsp),%r11
1471 CFI_RESTORE r11
1472 addq $0x30,%rsp
1473 CFI_ADJUST_CFA_OFFSET -0x30
4a5c3e77
JF
1474 pushq $0
1475 CFI_ADJUST_CFA_OFFSET 8
1476 pushq %r11
1477 CFI_ADJUST_CFA_OFFSET 8
1478 pushq %rcx
1479 CFI_ADJUST_CFA_OFFSET 8
1480 jmp general_protection
3d75e1b8
JF
1481 CFI_RESTORE_STATE
14821: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
1483 movq (%rsp),%rcx
1484 CFI_RESTORE rcx
1485 movq 8(%rsp),%r11
1486 CFI_RESTORE r11
1487 addq $0x30,%rsp
1488 CFI_ADJUST_CFA_OFFSET -0x30
1489 pushq $0
1490 CFI_ADJUST_CFA_OFFSET 8
1491 SAVE_ALL
1492 jmp error_exit
1493 CFI_ENDPROC
3d75e1b8
JF
1494END(xen_failsafe_callback)
1495
1496#endif /* CONFIG_XEN */
This page took 0.697418 seconds and 5 git commands to generate.