Merge branches 'tracing/branch-tracer', 'tracing/ftrace', 'tracing/function-graph...
[deliverable/linux.git] / arch / x86 / kernel / entry_64.S
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/x86_64/entry.S
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
6 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
1da177e4
LT
7 */
8
9/*
10 * entry.S contains the system-call and fault low-level handling routines.
11 *
12 * NOTE: This code handles signal-recognition, which happens every time
13 * after an interrupt and after each system call.
14 *
15 * Normal syscalls and interrupts don't save a full stack frame, this is
16 * only done for syscall tracing, signals or fork/exec et.al.
17 *
18 * A note on terminology:
19 * - top of stack: Architecture defined interrupt frame from SS to RIP
20 * at the top of the kernel process stack.
21 * - partial stack frame: partially saved registers upto R11.
22 * - full stack frame: Like partial stack frame, but all register saved.
2e91a17b
AK
23 *
24 * Some macro usage:
25 * - CFI macros are used to generate dwarf2 unwind information for better
26 * backtraces. They don't change any code.
27 * - SAVE_ALL/RESTORE_ALL - Save/restore all registers
28 * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify.
29 * There are unfortunately lots of special cases where some registers
30 * not touched. The macro is a big mess that should be cleaned up.
31 * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS.
32 * Gives a full stack frame.
33 * - ENTRY/END Define functions in the symbol table.
34 * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack
35 * frame that is otherwise undefined after a SYSCALL
36 * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
37 * - errorentry/paranoidentry/zeroentry - Define exception entry points.
1da177e4
LT
38 */
39
1da177e4
LT
40#include <linux/linkage.h>
41#include <asm/segment.h>
1da177e4
LT
42#include <asm/cache.h>
43#include <asm/errno.h>
44#include <asm/dwarf2.h>
45#include <asm/calling.h>
e2d5df93 46#include <asm/asm-offsets.h>
1da177e4
LT
47#include <asm/msr.h>
48#include <asm/unistd.h>
49#include <asm/thread_info.h>
50#include <asm/hw_irq.h>
5f8efbb9 51#include <asm/page.h>
2601e64d 52#include <asm/irqflags.h>
72fe4858 53#include <asm/paravirt.h>
395a59d0 54#include <asm/ftrace.h>
1da177e4 55
86a1c34a
RM
56/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
57#include <linux/elf-em.h>
58#define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
59#define __AUDIT_ARCH_64BIT 0x80000000
60#define __AUDIT_ARCH_LE 0x40000000
61
1da177e4
LT
62 .code64
63
606576ce 64#ifdef CONFIG_FUNCTION_TRACER
d61f82d0
SR
65#ifdef CONFIG_DYNAMIC_FTRACE
66ENTRY(mcount)
d61f82d0
SR
67 retq
68END(mcount)
69
70ENTRY(ftrace_caller)
60a7ecf4
SR
71 cmpl $0, function_trace_stop
72 jne ftrace_stub
d61f82d0
SR
73
74 /* taken from glibc */
75 subq $0x38, %rsp
76 movq %rax, (%rsp)
77 movq %rcx, 8(%rsp)
78 movq %rdx, 16(%rsp)
79 movq %rsi, 24(%rsp)
80 movq %rdi, 32(%rsp)
81 movq %r8, 40(%rsp)
82 movq %r9, 48(%rsp)
83
84 movq 0x38(%rsp), %rdi
85 movq 8(%rbp), %rsi
395a59d0 86 subq $MCOUNT_INSN_SIZE, %rdi
d61f82d0
SR
87
88.globl ftrace_call
89ftrace_call:
90 call ftrace_stub
91
92 movq 48(%rsp), %r9
93 movq 40(%rsp), %r8
94 movq 32(%rsp), %rdi
95 movq 24(%rsp), %rsi
96 movq 16(%rsp), %rdx
97 movq 8(%rsp), %rcx
98 movq (%rsp), %rax
99 addq $0x38, %rsp
100
101.globl ftrace_stub
102ftrace_stub:
103 retq
104END(ftrace_caller)
105
106#else /* ! CONFIG_DYNAMIC_FTRACE */
16444a8a 107ENTRY(mcount)
60a7ecf4
SR
108 cmpl $0, function_trace_stop
109 jne ftrace_stub
110
16444a8a
ACM
111 cmpq $ftrace_stub, ftrace_trace_function
112 jnz trace
113.globl ftrace_stub
114ftrace_stub:
115 retq
116
117trace:
118 /* taken from glibc */
119 subq $0x38, %rsp
120 movq %rax, (%rsp)
121 movq %rcx, 8(%rsp)
122 movq %rdx, 16(%rsp)
123 movq %rsi, 24(%rsp)
124 movq %rdi, 32(%rsp)
125 movq %r8, 40(%rsp)
126 movq %r9, 48(%rsp)
127
128 movq 0x38(%rsp), %rdi
129 movq 8(%rbp), %rsi
395a59d0 130 subq $MCOUNT_INSN_SIZE, %rdi
16444a8a
ACM
131
132 call *ftrace_trace_function
133
134 movq 48(%rsp), %r9
135 movq 40(%rsp), %r8
136 movq 32(%rsp), %rdi
137 movq 24(%rsp), %rsi
138 movq 16(%rsp), %rdx
139 movq 8(%rsp), %rcx
140 movq (%rsp), %rax
141 addq $0x38, %rsp
142
143 jmp ftrace_stub
144END(mcount)
d61f82d0 145#endif /* CONFIG_DYNAMIC_FTRACE */
606576ce 146#endif /* CONFIG_FUNCTION_TRACER */
16444a8a 147
dc37db4d 148#ifndef CONFIG_PREEMPT
1da177e4
LT
149#define retint_kernel retint_restore_args
150#endif
2601e64d 151
72fe4858 152#ifdef CONFIG_PARAVIRT
2be29982 153ENTRY(native_usergs_sysret64)
72fe4858
GOC
154 swapgs
155 sysretq
156#endif /* CONFIG_PARAVIRT */
157
2601e64d
IM
158
159.macro TRACE_IRQS_IRETQ offset=ARGOFFSET
160#ifdef CONFIG_TRACE_IRQFLAGS
161 bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
162 jnc 1f
163 TRACE_IRQS_ON
1641:
165#endif
166.endm
167
1da177e4
LT
168/*
169 * C code is not supposed to know about undefined top of stack. Every time
170 * a C function with an pt_regs argument is called from the SYSCALL based
171 * fast path FIXUP_TOP_OF_STACK is needed.
172 * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
173 * manipulation.
174 */
175
176 /* %rsp:at FRAMEEND */
177 .macro FIXUP_TOP_OF_STACK tmp
178 movq %gs:pda_oldrsp,\tmp
179 movq \tmp,RSP(%rsp)
180 movq $__USER_DS,SS(%rsp)
181 movq $__USER_CS,CS(%rsp)
182 movq $-1,RCX(%rsp)
183 movq R11(%rsp),\tmp /* get eflags */
184 movq \tmp,EFLAGS(%rsp)
185 .endm
186
187 .macro RESTORE_TOP_OF_STACK tmp,offset=0
188 movq RSP-\offset(%rsp),\tmp
189 movq \tmp,%gs:pda_oldrsp
190 movq EFLAGS-\offset(%rsp),\tmp
191 movq \tmp,R11-\offset(%rsp)
192 .endm
193
194 .macro FAKE_STACK_FRAME child_rip
195 /* push in order ss, rsp, eflags, cs, rip */
3829ee6b 196 xorl %eax, %eax
e04e0a63 197 pushq $__KERNEL_DS /* ss */
1da177e4 198 CFI_ADJUST_CFA_OFFSET 8
7effaa88 199 /*CFI_REL_OFFSET ss,0*/
1da177e4
LT
200 pushq %rax /* rsp */
201 CFI_ADJUST_CFA_OFFSET 8
7effaa88 202 CFI_REL_OFFSET rsp,0
1da177e4
LT
203 pushq $(1<<9) /* eflags - interrupts on */
204 CFI_ADJUST_CFA_OFFSET 8
7effaa88 205 /*CFI_REL_OFFSET rflags,0*/
1da177e4
LT
206 pushq $__KERNEL_CS /* cs */
207 CFI_ADJUST_CFA_OFFSET 8
7effaa88 208 /*CFI_REL_OFFSET cs,0*/
1da177e4
LT
209 pushq \child_rip /* rip */
210 CFI_ADJUST_CFA_OFFSET 8
7effaa88 211 CFI_REL_OFFSET rip,0
1da177e4
LT
212 pushq %rax /* orig rax */
213 CFI_ADJUST_CFA_OFFSET 8
214 .endm
215
216 .macro UNFAKE_STACK_FRAME
217 addq $8*6, %rsp
218 CFI_ADJUST_CFA_OFFSET -(6*8)
219 .endm
220
7effaa88
JB
221 .macro CFI_DEFAULT_STACK start=1
222 .if \start
223 CFI_STARTPROC simple
adf14236 224 CFI_SIGNAL_FRAME
7effaa88
JB
225 CFI_DEF_CFA rsp,SS+8
226 .else
227 CFI_DEF_CFA_OFFSET SS+8
228 .endif
229 CFI_REL_OFFSET r15,R15
230 CFI_REL_OFFSET r14,R14
231 CFI_REL_OFFSET r13,R13
232 CFI_REL_OFFSET r12,R12
233 CFI_REL_OFFSET rbp,RBP
234 CFI_REL_OFFSET rbx,RBX
235 CFI_REL_OFFSET r11,R11
236 CFI_REL_OFFSET r10,R10
237 CFI_REL_OFFSET r9,R9
238 CFI_REL_OFFSET r8,R8
239 CFI_REL_OFFSET rax,RAX
240 CFI_REL_OFFSET rcx,RCX
241 CFI_REL_OFFSET rdx,RDX
242 CFI_REL_OFFSET rsi,RSI
243 CFI_REL_OFFSET rdi,RDI
244 CFI_REL_OFFSET rip,RIP
245 /*CFI_REL_OFFSET cs,CS*/
246 /*CFI_REL_OFFSET rflags,EFLAGS*/
247 CFI_REL_OFFSET rsp,RSP
248 /*CFI_REL_OFFSET ss,SS*/
1da177e4
LT
249 .endm
250/*
251 * A newly forked process directly context switches into this.
252 */
253/* rdi: prev */
254ENTRY(ret_from_fork)
1da177e4 255 CFI_DEFAULT_STACK
658fdbef 256 push kernel_eflags(%rip)
e0a5a5d9 257 CFI_ADJUST_CFA_OFFSET 8
658fdbef 258 popf # reset kernel eflags
e0a5a5d9 259 CFI_ADJUST_CFA_OFFSET -8
1da177e4
LT
260 call schedule_tail
261 GET_THREAD_INFO(%rcx)
26ccb8a7 262 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
1da177e4
LT
263 jnz rff_trace
264rff_action:
265 RESTORE_REST
266 testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread?
267 je int_ret_from_sys_call
26ccb8a7 268 testl $_TIF_IA32,TI_flags(%rcx)
1da177e4
LT
269 jnz int_ret_from_sys_call
270 RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
271 jmp ret_from_sys_call
272rff_trace:
273 movq %rsp,%rdi
274 call syscall_trace_leave
275 GET_THREAD_INFO(%rcx)
276 jmp rff_action
277 CFI_ENDPROC
4b787e0b 278END(ret_from_fork)
1da177e4
LT
279
280/*
281 * System call entry. Upto 6 arguments in registers are supported.
282 *
283 * SYSCALL does not save anything on the stack and does not change the
284 * stack pointer.
285 */
286
287/*
288 * Register setup:
289 * rax system call number
290 * rdi arg0
291 * rcx return address for syscall/sysret, C arg3
292 * rsi arg1
293 * rdx arg2
294 * r10 arg3 (--> moved to rcx for C)
295 * r8 arg4
296 * r9 arg5
297 * r11 eflags for syscall/sysret, temporary for C
298 * r12-r15,rbp,rbx saved by C code, not touched.
299 *
300 * Interrupts are off on entry.
301 * Only called from user space.
302 *
303 * XXX if we had a free scratch register we could save the RSP into the stack frame
304 * and report it properly in ps. Unfortunately we haven't.
7bf36bbc
AK
305 *
306 * When user can change the frames always force IRET. That is because
307 * it deals with uncanonical addresses better. SYSRET has trouble
308 * with them due to bugs in both AMD and Intel CPUs.
1da177e4
LT
309 */
310
311ENTRY(system_call)
7effaa88 312 CFI_STARTPROC simple
adf14236 313 CFI_SIGNAL_FRAME
dffead4e 314 CFI_DEF_CFA rsp,PDA_STACKOFFSET
7effaa88
JB
315 CFI_REGISTER rip,rcx
316 /*CFI_REGISTER rflags,r11*/
72fe4858
GOC
317 SWAPGS_UNSAFE_STACK
318 /*
319 * A hypervisor implementation might want to use a label
320 * after the swapgs, so that it can do the swapgs
321 * for the guest and jump here on syscall.
322 */
323ENTRY(system_call_after_swapgs)
324
1da177e4
LT
325 movq %rsp,%gs:pda_oldrsp
326 movq %gs:pda_kernelstack,%rsp
2601e64d
IM
327 /*
328 * No need to follow this irqs off/on section - it's straight
329 * and short:
330 */
72fe4858 331 ENABLE_INTERRUPTS(CLBR_NONE)
1da177e4
LT
332 SAVE_ARGS 8,1
333 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
7effaa88
JB
334 movq %rcx,RIP-ARGOFFSET(%rsp)
335 CFI_REL_OFFSET rip,RIP-ARGOFFSET
1da177e4 336 GET_THREAD_INFO(%rcx)
d4d67150 337 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
1da177e4 338 jnz tracesys
86a1c34a 339system_call_fastpath:
1da177e4
LT
340 cmpq $__NR_syscall_max,%rax
341 ja badsys
342 movq %r10,%rcx
343 call *sys_call_table(,%rax,8) # XXX: rip relative
344 movq %rax,RAX-ARGOFFSET(%rsp)
345/*
346 * Syscall return path ending with SYSRET (fast path)
347 * Has incomplete stack frame and undefined top of stack.
348 */
1da177e4 349ret_from_sys_call:
11b854b2 350 movl $_TIF_ALLWORK_MASK,%edi
1da177e4
LT
351 /* edi: flagmask */
352sysret_check:
10cd706d 353 LOCKDEP_SYS_EXIT
1da177e4 354 GET_THREAD_INFO(%rcx)
72fe4858 355 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 356 TRACE_IRQS_OFF
26ccb8a7 357 movl TI_flags(%rcx),%edx
1da177e4
LT
358 andl %edi,%edx
359 jnz sysret_careful
bcddc015 360 CFI_REMEMBER_STATE
2601e64d
IM
361 /*
362 * sysretq will re-enable interrupts:
363 */
364 TRACE_IRQS_ON
1da177e4 365 movq RIP-ARGOFFSET(%rsp),%rcx
7effaa88 366 CFI_REGISTER rip,rcx
1da177e4 367 RESTORE_ARGS 0,-ARG_SKIP,1
7effaa88 368 /*CFI_REGISTER rflags,r11*/
c7245da6 369 movq %gs:pda_oldrsp, %rsp
2be29982 370 USERGS_SYSRET64
1da177e4 371
bcddc015 372 CFI_RESTORE_STATE
1da177e4
LT
373 /* Handle reschedules */
374 /* edx: work, edi: workmask */
375sysret_careful:
376 bt $TIF_NEED_RESCHED,%edx
377 jnc sysret_signal
2601e64d 378 TRACE_IRQS_ON
72fe4858 379 ENABLE_INTERRUPTS(CLBR_NONE)
1da177e4 380 pushq %rdi
7effaa88 381 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
382 call schedule
383 popq %rdi
7effaa88 384 CFI_ADJUST_CFA_OFFSET -8
1da177e4
LT
385 jmp sysret_check
386
387 /* Handle a signal */
388sysret_signal:
2601e64d 389 TRACE_IRQS_ON
72fe4858 390 ENABLE_INTERRUPTS(CLBR_NONE)
86a1c34a
RM
391#ifdef CONFIG_AUDITSYSCALL
392 bt $TIF_SYSCALL_AUDIT,%edx
393 jc sysret_audit
394#endif
10ffdbb8 395 /* edx: work flags (arg3) */
1da177e4
LT
396 leaq do_notify_resume(%rip),%rax
397 leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
398 xorl %esi,%esi # oldset -> arg2
399 call ptregscall_common
15e8f348 400 movl $_TIF_WORK_MASK,%edi
7bf36bbc
AK
401 /* Use IRET because user could have changed frame. This
402 works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
72fe4858 403 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 404 TRACE_IRQS_OFF
7bf36bbc 405 jmp int_with_check
1da177e4 406
7effaa88
JB
407badsys:
408 movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
409 jmp ret_from_sys_call
410
86a1c34a
RM
411#ifdef CONFIG_AUDITSYSCALL
412 /*
413 * Fast path for syscall audit without full syscall trace.
414 * We just call audit_syscall_entry() directly, and then
415 * jump back to the normal fast path.
416 */
417auditsys:
418 movq %r10,%r9 /* 6th arg: 4th syscall arg */
419 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
420 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
421 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
422 movq %rax,%rsi /* 2nd arg: syscall number */
423 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
424 call audit_syscall_entry
425 LOAD_ARGS 0 /* reload call-clobbered registers */
426 jmp system_call_fastpath
427
428 /*
429 * Return fast path for syscall audit. Call audit_syscall_exit()
430 * directly and then jump back to the fast path with TIF_SYSCALL_AUDIT
431 * masked off.
432 */
433sysret_audit:
434 movq %rax,%rsi /* second arg, syscall return value */
435 cmpq $0,%rax /* is it < 0? */
436 setl %al /* 1 if so, 0 if not */
437 movzbl %al,%edi /* zero-extend that into %edi */
438 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
439 call audit_syscall_exit
440 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
441 jmp sysret_check
442#endif /* CONFIG_AUDITSYSCALL */
443
1da177e4
LT
444 /* Do syscall tracing */
445tracesys:
86a1c34a
RM
446#ifdef CONFIG_AUDITSYSCALL
447 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
448 jz auditsys
449#endif
1da177e4 450 SAVE_REST
a31f8dd7 451 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
1da177e4
LT
452 FIXUP_TOP_OF_STACK %rdi
453 movq %rsp,%rdi
454 call syscall_trace_enter
d4d67150
RM
455 /*
456 * Reload arg registers from stack in case ptrace changed them.
457 * We don't reload %rax because syscall_trace_enter() returned
458 * the value it wants us to use in the table lookup.
459 */
460 LOAD_ARGS ARGOFFSET, 1
1da177e4
LT
461 RESTORE_REST
462 cmpq $__NR_syscall_max,%rax
a31f8dd7 463 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
1da177e4
LT
464 movq %r10,%rcx /* fixup for C */
465 call *sys_call_table(,%rax,8)
a31f8dd7 466 movq %rax,RAX-ARGOFFSET(%rsp)
7bf36bbc 467 /* Use IRET because user could have changed frame */
1da177e4 468
1da177e4
LT
469/*
470 * Syscall return path ending with IRET.
471 * Has correct top of stack, but partial stack frame.
bcddc015
JB
472 */
473 .globl int_ret_from_sys_call
5cbf1565 474 .globl int_with_check
bcddc015 475int_ret_from_sys_call:
72fe4858 476 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 477 TRACE_IRQS_OFF
1da177e4
LT
478 testl $3,CS-ARGOFFSET(%rsp)
479 je retint_restore_args
480 movl $_TIF_ALLWORK_MASK,%edi
481 /* edi: mask to check */
482int_with_check:
10cd706d 483 LOCKDEP_SYS_EXIT_IRQ
1da177e4 484 GET_THREAD_INFO(%rcx)
26ccb8a7 485 movl TI_flags(%rcx),%edx
1da177e4
LT
486 andl %edi,%edx
487 jnz int_careful
26ccb8a7 488 andl $~TS_COMPAT,TI_status(%rcx)
1da177e4
LT
489 jmp retint_swapgs
490
491 /* Either reschedule or signal or syscall exit tracking needed. */
492 /* First do a reschedule test. */
493 /* edx: work, edi: workmask */
494int_careful:
495 bt $TIF_NEED_RESCHED,%edx
496 jnc int_very_careful
2601e64d 497 TRACE_IRQS_ON
72fe4858 498 ENABLE_INTERRUPTS(CLBR_NONE)
1da177e4 499 pushq %rdi
7effaa88 500 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
501 call schedule
502 popq %rdi
7effaa88 503 CFI_ADJUST_CFA_OFFSET -8
72fe4858 504 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 505 TRACE_IRQS_OFF
1da177e4
LT
506 jmp int_with_check
507
508 /* handle signals and tracing -- both require a full stack frame */
509int_very_careful:
2601e64d 510 TRACE_IRQS_ON
72fe4858 511 ENABLE_INTERRUPTS(CLBR_NONE)
1da177e4
LT
512 SAVE_REST
513 /* Check for syscall exit trace */
d4d67150 514 testl $_TIF_WORK_SYSCALL_EXIT,%edx
1da177e4
LT
515 jz int_signal
516 pushq %rdi
7effaa88 517 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
518 leaq 8(%rsp),%rdi # &ptregs -> arg1
519 call syscall_trace_leave
520 popq %rdi
7effaa88 521 CFI_ADJUST_CFA_OFFSET -8
d4d67150 522 andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi
1da177e4
LT
523 jmp int_restore_rest
524
525int_signal:
8f4d37ec 526 testl $_TIF_DO_NOTIFY_MASK,%edx
1da177e4
LT
527 jz 1f
528 movq %rsp,%rdi # &ptregs -> arg1
529 xorl %esi,%esi # oldset -> arg2
530 call do_notify_resume
eca91e78 5311: movl $_TIF_WORK_MASK,%edi
1da177e4
LT
532int_restore_rest:
533 RESTORE_REST
72fe4858 534 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 535 TRACE_IRQS_OFF
1da177e4
LT
536 jmp int_with_check
537 CFI_ENDPROC
bcddc015 538END(system_call)
1da177e4
LT
539
540/*
541 * Certain special system calls that need to save a complete full stack frame.
542 */
543
544 .macro PTREGSCALL label,func,arg
545 .globl \label
546\label:
547 leaq \func(%rip),%rax
548 leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
549 jmp ptregscall_common
4b787e0b 550END(\label)
1da177e4
LT
551 .endm
552
7effaa88
JB
553 CFI_STARTPROC
554
1da177e4
LT
555 PTREGSCALL stub_clone, sys_clone, %r8
556 PTREGSCALL stub_fork, sys_fork, %rdi
557 PTREGSCALL stub_vfork, sys_vfork, %rdi
1da177e4
LT
558 PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
559 PTREGSCALL stub_iopl, sys_iopl, %rsi
560
561ENTRY(ptregscall_common)
1da177e4 562 popq %r11
7effaa88
JB
563 CFI_ADJUST_CFA_OFFSET -8
564 CFI_REGISTER rip, r11
1da177e4
LT
565 SAVE_REST
566 movq %r11, %r15
7effaa88 567 CFI_REGISTER rip, r15
1da177e4
LT
568 FIXUP_TOP_OF_STACK %r11
569 call *%rax
570 RESTORE_TOP_OF_STACK %r11
571 movq %r15, %r11
7effaa88 572 CFI_REGISTER rip, r11
1da177e4
LT
573 RESTORE_REST
574 pushq %r11
7effaa88
JB
575 CFI_ADJUST_CFA_OFFSET 8
576 CFI_REL_OFFSET rip, 0
1da177e4
LT
577 ret
578 CFI_ENDPROC
4b787e0b 579END(ptregscall_common)
1da177e4
LT
580
581ENTRY(stub_execve)
582 CFI_STARTPROC
583 popq %r11
7effaa88
JB
584 CFI_ADJUST_CFA_OFFSET -8
585 CFI_REGISTER rip, r11
1da177e4 586 SAVE_REST
1da177e4 587 FIXUP_TOP_OF_STACK %r11
5d119b2c 588 movq %rsp, %rcx
1da177e4 589 call sys_execve
1da177e4 590 RESTORE_TOP_OF_STACK %r11
1da177e4
LT
591 movq %rax,RAX(%rsp)
592 RESTORE_REST
593 jmp int_ret_from_sys_call
594 CFI_ENDPROC
4b787e0b 595END(stub_execve)
1da177e4
LT
596
597/*
598 * sigreturn is special because it needs to restore all registers on return.
599 * This cannot be done with SYSRET, so use the IRET return path instead.
600 */
601ENTRY(stub_rt_sigreturn)
602 CFI_STARTPROC
7effaa88
JB
603 addq $8, %rsp
604 CFI_ADJUST_CFA_OFFSET -8
1da177e4
LT
605 SAVE_REST
606 movq %rsp,%rdi
607 FIXUP_TOP_OF_STACK %r11
608 call sys_rt_sigreturn
609 movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
610 RESTORE_REST
611 jmp int_ret_from_sys_call
612 CFI_ENDPROC
4b787e0b 613END(stub_rt_sigreturn)
1da177e4 614
7effaa88
JB
615/*
616 * initial frame state for interrupts and exceptions
617 */
618 .macro _frame ref
619 CFI_STARTPROC simple
adf14236 620 CFI_SIGNAL_FRAME
7effaa88
JB
621 CFI_DEF_CFA rsp,SS+8-\ref
622 /*CFI_REL_OFFSET ss,SS-\ref*/
623 CFI_REL_OFFSET rsp,RSP-\ref
624 /*CFI_REL_OFFSET rflags,EFLAGS-\ref*/
625 /*CFI_REL_OFFSET cs,CS-\ref*/
626 CFI_REL_OFFSET rip,RIP-\ref
627 .endm
628
629/* initial frame state for interrupts (and exceptions without error code) */
630#define INTR_FRAME _frame RIP
631/* initial frame state for exceptions with error code (and interrupts with
632 vector already pushed) */
633#define XCPT_FRAME _frame ORIG_RAX
634
1da177e4
LT
635/*
636 * Interrupt entry/exit.
637 *
638 * Interrupt entry points save only callee clobbered registers in fast path.
639 *
640 * Entry runs with interrupts off.
641 */
642
643/* 0(%rsp): interrupt number */
644 .macro interrupt func
1da177e4 645 cld
1da177e4
LT
646 SAVE_ARGS
647 leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler
1de9c3f6 648 pushq %rbp
097a0788
GC
649 /*
650 * Save rbp twice: One is for marking the stack frame, as usual, and the
651 * other, to fill pt_regs properly. This is because bx comes right
652 * before the last saved register in that structure, and not bp. If the
653 * base pointer were in the place bx is today, this would not be needed.
654 */
655 movq %rbp, -8(%rsp)
1de9c3f6
JB
656 CFI_ADJUST_CFA_OFFSET 8
657 CFI_REL_OFFSET rbp, 0
658 movq %rsp,%rbp
659 CFI_DEF_CFA_REGISTER rbp
1da177e4
LT
660 testl $3,CS(%rdi)
661 je 1f
72fe4858 662 SWAPGS
96e54049
AK
663 /* irqcount is used to check if a CPU is already on an interrupt
664 stack or not. While this is essentially redundant with preempt_count
665 it is a little cheaper to use a separate counter in the PDA
666 (short of moving irq_enter into assembly, which would be too
667 much work) */
6681: incl %gs:pda_irqcount
1de9c3f6 669 cmoveq %gs:pda_irqstackptr,%rsp
2699500b 670 push %rbp # backlink for old unwinder
2601e64d
IM
671 /*
672 * We entered an interrupt context - irqs are off:
673 */
674 TRACE_IRQS_OFF
1da177e4
LT
675 call \func
676 .endm
677
678ENTRY(common_interrupt)
7effaa88 679 XCPT_FRAME
1da177e4
LT
680 interrupt do_IRQ
681 /* 0(%rsp): oldrsp-ARGOFFSET */
7effaa88 682ret_from_intr:
72fe4858 683 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 684 TRACE_IRQS_OFF
3829ee6b 685 decl %gs:pda_irqcount
1de9c3f6 686 leaveq
7effaa88 687 CFI_DEF_CFA_REGISTER rsp
1de9c3f6 688 CFI_ADJUST_CFA_OFFSET -8
7effaa88 689exit_intr:
1da177e4
LT
690 GET_THREAD_INFO(%rcx)
691 testl $3,CS-ARGOFFSET(%rsp)
692 je retint_kernel
693
694 /* Interrupt came from user space */
695 /*
696 * Has a correct top of stack, but a partial stack frame
697 * %rcx: thread info. Interrupts off.
698 */
699retint_with_reschedule:
700 movl $_TIF_WORK_MASK,%edi
7effaa88 701retint_check:
10cd706d 702 LOCKDEP_SYS_EXIT_IRQ
26ccb8a7 703 movl TI_flags(%rcx),%edx
1da177e4 704 andl %edi,%edx
7effaa88 705 CFI_REMEMBER_STATE
1da177e4 706 jnz retint_careful
10cd706d
PZ
707
708retint_swapgs: /* return to user-space */
2601e64d
IM
709 /*
710 * The iretq could re-enable interrupts:
711 */
72fe4858 712 DISABLE_INTERRUPTS(CLBR_ANY)
2601e64d 713 TRACE_IRQS_IRETQ
72fe4858 714 SWAPGS
2601e64d
IM
715 jmp restore_args
716
10cd706d 717retint_restore_args: /* return to kernel space */
72fe4858 718 DISABLE_INTERRUPTS(CLBR_ANY)
2601e64d
IM
719 /*
720 * The iretq could re-enable interrupts:
721 */
722 TRACE_IRQS_IRETQ
723restore_args:
3701d863
IM
724 RESTORE_ARGS 0,8,0
725
f7f3d791 726irq_return:
72fe4858 727 INTERRUPT_RETURN
3701d863
IM
728
729 .section __ex_table, "a"
730 .quad irq_return, bad_iret
731 .previous
732
733#ifdef CONFIG_PARAVIRT
72fe4858 734ENTRY(native_iret)
1da177e4
LT
735 iretq
736
737 .section __ex_table,"a"
72fe4858 738 .quad native_iret, bad_iret
1da177e4 739 .previous
3701d863
IM
740#endif
741
1da177e4 742 .section .fixup,"ax"
1da177e4 743bad_iret:
3aa4b37d
RM
744 /*
745 * The iret traps when the %cs or %ss being restored is bogus.
746 * We've lost the original trap vector and error code.
747 * #GPF is the most likely one to get for an invalid selector.
748 * So pretend we completed the iret and took the #GPF in user mode.
749 *
750 * We are now running with the kernel GS after exception recovery.
751 * But error_entry expects us to have user GS to match the user %cs,
752 * so swap back.
753 */
754 pushq $0
755
756 SWAPGS
757 jmp general_protection
758
72fe4858
GOC
759 .previous
760
7effaa88 761 /* edi: workmask, edx: work */
1da177e4 762retint_careful:
7effaa88 763 CFI_RESTORE_STATE
1da177e4
LT
764 bt $TIF_NEED_RESCHED,%edx
765 jnc retint_signal
2601e64d 766 TRACE_IRQS_ON
72fe4858 767 ENABLE_INTERRUPTS(CLBR_NONE)
1da177e4 768 pushq %rdi
7effaa88 769 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
770 call schedule
771 popq %rdi
7effaa88 772 CFI_ADJUST_CFA_OFFSET -8
1da177e4 773 GET_THREAD_INFO(%rcx)
72fe4858 774 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 775 TRACE_IRQS_OFF
1da177e4
LT
776 jmp retint_check
777
778retint_signal:
8f4d37ec 779 testl $_TIF_DO_NOTIFY_MASK,%edx
10ffdbb8 780 jz retint_swapgs
2601e64d 781 TRACE_IRQS_ON
72fe4858 782 ENABLE_INTERRUPTS(CLBR_NONE)
1da177e4
LT
783 SAVE_REST
784 movq $-1,ORIG_RAX(%rsp)
3829ee6b 785 xorl %esi,%esi # oldset
1da177e4
LT
786 movq %rsp,%rdi # &pt_regs
787 call do_notify_resume
788 RESTORE_REST
72fe4858 789 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 790 TRACE_IRQS_OFF
be9e6870 791 GET_THREAD_INFO(%rcx)
eca91e78 792 jmp retint_with_reschedule
1da177e4
LT
793
794#ifdef CONFIG_PREEMPT
795 /* Returning to kernel space. Check if we need preemption */
796 /* rcx: threadinfo. interrupts off. */
b06babac 797ENTRY(retint_kernel)
26ccb8a7 798 cmpl $0,TI_preempt_count(%rcx)
1da177e4 799 jnz retint_restore_args
26ccb8a7 800 bt $TIF_NEED_RESCHED,TI_flags(%rcx)
1da177e4
LT
801 jnc retint_restore_args
802 bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
803 jnc retint_restore_args
804 call preempt_schedule_irq
805 jmp exit_intr
806#endif
4b787e0b 807
1da177e4 808 CFI_ENDPROC
4b787e0b 809END(common_interrupt)
1da177e4
LT
810
811/*
812 * APIC interrupts.
813 */
814 .macro apicinterrupt num,func
7effaa88 815 INTR_FRAME
19eadf98 816 pushq $~(\num)
7effaa88 817 CFI_ADJUST_CFA_OFFSET 8
1da177e4
LT
818 interrupt \func
819 jmp ret_from_intr
820 CFI_ENDPROC
821 .endm
822
823ENTRY(thermal_interrupt)
824 apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
4b787e0b 825END(thermal_interrupt)
1da177e4 826
89b831ef
JS
827ENTRY(threshold_interrupt)
828 apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
4b787e0b 829END(threshold_interrupt)
89b831ef 830
1da177e4
LT
831#ifdef CONFIG_SMP
832ENTRY(reschedule_interrupt)
833 apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
4b787e0b 834END(reschedule_interrupt)
1da177e4 835
e5bc8b6b
AK
836 .macro INVALIDATE_ENTRY num
837ENTRY(invalidate_interrupt\num)
838 apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
4b787e0b 839END(invalidate_interrupt\num)
e5bc8b6b
AK
840 .endm
841
842 INVALIDATE_ENTRY 0
843 INVALIDATE_ENTRY 1
844 INVALIDATE_ENTRY 2
845 INVALIDATE_ENTRY 3
846 INVALIDATE_ENTRY 4
847 INVALIDATE_ENTRY 5
848 INVALIDATE_ENTRY 6
849 INVALIDATE_ENTRY 7
1da177e4
LT
850
851ENTRY(call_function_interrupt)
852 apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
4b787e0b 853END(call_function_interrupt)
3b16cf87
JA
854ENTRY(call_function_single_interrupt)
855 apicinterrupt CALL_FUNCTION_SINGLE_VECTOR,smp_call_function_single_interrupt
856END(call_function_single_interrupt)
61014292
EB
857ENTRY(irq_move_cleanup_interrupt)
858 apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt
859END(irq_move_cleanup_interrupt)
1da177e4
LT
860#endif
861
1da177e4
LT
862ENTRY(apic_timer_interrupt)
863 apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
4b787e0b 864END(apic_timer_interrupt)
1da177e4 865
1812924b
CW
866ENTRY(uv_bau_message_intr1)
867 apicinterrupt 220,uv_bau_message_interrupt
868END(uv_bau_message_intr1)
869
1da177e4
LT
870ENTRY(error_interrupt)
871 apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
4b787e0b 872END(error_interrupt)
1da177e4
LT
873
874ENTRY(spurious_interrupt)
875 apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
4b787e0b 876END(spurious_interrupt)
1da177e4
LT
877
878/*
879 * Exception entry points.
880 */
881 .macro zeroentry sym
7effaa88 882 INTR_FRAME
fab58420 883 PARAVIRT_ADJUST_EXCEPTION_FRAME
1da177e4 884 pushq $0 /* push error code/oldrax */
7effaa88 885 CFI_ADJUST_CFA_OFFSET 8
1da177e4 886 pushq %rax /* push real oldrax to the rdi slot */
7effaa88 887 CFI_ADJUST_CFA_OFFSET 8
37550907 888 CFI_REL_OFFSET rax,0
1da177e4
LT
889 leaq \sym(%rip),%rax
890 jmp error_entry
7effaa88 891 CFI_ENDPROC
1da177e4
LT
892 .endm
893
894 .macro errorentry sym
7effaa88 895 XCPT_FRAME
fab58420 896 PARAVIRT_ADJUST_EXCEPTION_FRAME
1da177e4 897 pushq %rax
7effaa88 898 CFI_ADJUST_CFA_OFFSET 8
37550907 899 CFI_REL_OFFSET rax,0
1da177e4
LT
900 leaq \sym(%rip),%rax
901 jmp error_entry
7effaa88 902 CFI_ENDPROC
1da177e4
LT
903 .endm
904
905 /* error code is on the stack already */
906 /* handle NMI like exceptions that can happen everywhere */
2601e64d 907 .macro paranoidentry sym, ist=0, irqtrace=1
1da177e4
LT
908 SAVE_ALL
909 cld
910 movl $1,%ebx
911 movl $MSR_GS_BASE,%ecx
912 rdmsr
913 testl %edx,%edx
914 js 1f
72fe4858 915 SWAPGS
1da177e4 916 xorl %ebx,%ebx
b556b35e
JB
9171:
918 .if \ist
919 movq %gs:pda_data_offset, %rbp
920 .endif
7e61a793
AH
921 .if \irqtrace
922 TRACE_IRQS_OFF
923 .endif
b556b35e 924 movq %rsp,%rdi
1da177e4
LT
925 movq ORIG_RAX(%rsp),%rsi
926 movq $-1,ORIG_RAX(%rsp)
b556b35e 927 .if \ist
5f8efbb9 928 subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
b556b35e 929 .endif
1da177e4 930 call \sym
b556b35e 931 .if \ist
5f8efbb9 932 addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
b556b35e 933 .endif
72fe4858 934 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d
IM
935 .if \irqtrace
936 TRACE_IRQS_OFF
937 .endif
1da177e4 938 .endm
2601e64d
IM
939
940 /*
941 * "Paranoid" exit path from exception stack.
942 * Paranoid because this is used by NMIs and cannot take
943 * any kernel state for granted.
944 * We don't do kernel preemption checks here, because only
945 * NMI should be common and it does not enable IRQs and
946 * cannot get reschedule ticks.
947 *
948 * "trace" is 0 for the NMI handler only, because irq-tracing
949 * is fundamentally NMI-unsafe. (we cannot change the soft and
950 * hard flags at once, atomically)
951 */
952 .macro paranoidexit trace=1
953 /* ebx: no swapgs flag */
954paranoid_exit\trace:
955 testl %ebx,%ebx /* swapgs needed? */
956 jnz paranoid_restore\trace
957 testl $3,CS(%rsp)
958 jnz paranoid_userspace\trace
959paranoid_swapgs\trace:
7a0a2dff 960 .if \trace
2601e64d 961 TRACE_IRQS_IRETQ 0
7a0a2dff 962 .endif
72fe4858 963 SWAPGS_UNSAFE_STACK
2601e64d
IM
964paranoid_restore\trace:
965 RESTORE_ALL 8
3701d863 966 jmp irq_return
2601e64d
IM
967paranoid_userspace\trace:
968 GET_THREAD_INFO(%rcx)
26ccb8a7 969 movl TI_flags(%rcx),%ebx
2601e64d
IM
970 andl $_TIF_WORK_MASK,%ebx
971 jz paranoid_swapgs\trace
972 movq %rsp,%rdi /* &pt_regs */
973 call sync_regs
974 movq %rax,%rsp /* switch stack for scheduling */
975 testl $_TIF_NEED_RESCHED,%ebx
976 jnz paranoid_schedule\trace
977 movl %ebx,%edx /* arg3: thread flags */
978 .if \trace
979 TRACE_IRQS_ON
980 .endif
72fe4858 981 ENABLE_INTERRUPTS(CLBR_NONE)
2601e64d
IM
982 xorl %esi,%esi /* arg2: oldset */
983 movq %rsp,%rdi /* arg1: &pt_regs */
984 call do_notify_resume
72fe4858 985 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d
IM
986 .if \trace
987 TRACE_IRQS_OFF
988 .endif
989 jmp paranoid_userspace\trace
990paranoid_schedule\trace:
991 .if \trace
992 TRACE_IRQS_ON
993 .endif
72fe4858 994 ENABLE_INTERRUPTS(CLBR_ANY)
2601e64d 995 call schedule
72fe4858 996 DISABLE_INTERRUPTS(CLBR_ANY)
2601e64d
IM
997 .if \trace
998 TRACE_IRQS_OFF
999 .endif
1000 jmp paranoid_userspace\trace
1001 CFI_ENDPROC
1002 .endm
1003
1da177e4
LT
1004/*
1005 * Exception entry point. This expects an error code/orig_rax on the stack
1006 * and the exception handler in %rax.
1007 */
d28c4393 1008KPROBE_ENTRY(error_entry)
7effaa88 1009 _frame RDI
37550907 1010 CFI_REL_OFFSET rax,0
1da177e4
LT
1011 /* rdi slot contains rax, oldrax contains error code */
1012 cld
1013 subq $14*8,%rsp
1014 CFI_ADJUST_CFA_OFFSET (14*8)
1015 movq %rsi,13*8(%rsp)
1016 CFI_REL_OFFSET rsi,RSI
1017 movq 14*8(%rsp),%rsi /* load rax from rdi slot */
37550907 1018 CFI_REGISTER rax,rsi
1da177e4
LT
1019 movq %rdx,12*8(%rsp)
1020 CFI_REL_OFFSET rdx,RDX
1021 movq %rcx,11*8(%rsp)
1022 CFI_REL_OFFSET rcx,RCX
1023 movq %rsi,10*8(%rsp) /* store rax */
1024 CFI_REL_OFFSET rax,RAX
1025 movq %r8, 9*8(%rsp)
1026 CFI_REL_OFFSET r8,R8
1027 movq %r9, 8*8(%rsp)
1028 CFI_REL_OFFSET r9,R9
1029 movq %r10,7*8(%rsp)
1030 CFI_REL_OFFSET r10,R10
1031 movq %r11,6*8(%rsp)
1032 CFI_REL_OFFSET r11,R11
1033 movq %rbx,5*8(%rsp)
1034 CFI_REL_OFFSET rbx,RBX
1035 movq %rbp,4*8(%rsp)
1036 CFI_REL_OFFSET rbp,RBP
1037 movq %r12,3*8(%rsp)
1038 CFI_REL_OFFSET r12,R12
1039 movq %r13,2*8(%rsp)
1040 CFI_REL_OFFSET r13,R13
1041 movq %r14,1*8(%rsp)
1042 CFI_REL_OFFSET r14,R14
1043 movq %r15,(%rsp)
1044 CFI_REL_OFFSET r15,R15
1045 xorl %ebx,%ebx
1046 testl $3,CS(%rsp)
1047 je error_kernelspace
1048error_swapgs:
72fe4858 1049 SWAPGS
6b11d4ef
AH
1050error_sti:
1051 TRACE_IRQS_OFF
1da177e4 1052 movq %rdi,RDI(%rsp)
37550907 1053 CFI_REL_OFFSET rdi,RDI
1da177e4
LT
1054 movq %rsp,%rdi
1055 movq ORIG_RAX(%rsp),%rsi /* get error code */
1056 movq $-1,ORIG_RAX(%rsp)
1057 call *%rax
10cd706d
PZ
1058 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
1059error_exit:
1060 movl %ebx,%eax
1da177e4 1061 RESTORE_REST
72fe4858 1062 DISABLE_INTERRUPTS(CLBR_NONE)
2601e64d 1063 TRACE_IRQS_OFF
1da177e4
LT
1064 GET_THREAD_INFO(%rcx)
1065 testl %eax,%eax
1066 jne retint_kernel
10cd706d 1067 LOCKDEP_SYS_EXIT_IRQ
26ccb8a7 1068 movl TI_flags(%rcx),%edx
1da177e4
LT
1069 movl $_TIF_WORK_MASK,%edi
1070 andl %edi,%edx
1071 jnz retint_careful
10cd706d 1072 jmp retint_swapgs
1da177e4
LT
1073 CFI_ENDPROC
1074
1075error_kernelspace:
1076 incl %ebx
1077 /* There are two places in the kernel that can potentially fault with
1078 usergs. Handle them here. The exception handlers after
1079 iret run with kernel gs again, so don't set the user space flag.
1080 B stepping K8s sometimes report an truncated RIP for IRET
1081 exceptions returning to compat mode. Check for these here too. */
9d8ad5d6
VN
1082 leaq irq_return(%rip),%rcx
1083 cmpq %rcx,RIP(%rsp)
1da177e4 1084 je error_swapgs
9d8ad5d6
VN
1085 movl %ecx,%ecx /* zero extend */
1086 cmpq %rcx,RIP(%rsp)
1da177e4
LT
1087 je error_swapgs
1088 cmpq $gs_change,RIP(%rsp)
1089 je error_swapgs
1090 jmp error_sti
d28c4393 1091KPROBE_END(error_entry)
1da177e4
LT
1092
1093 /* Reload gs selector with exception handling */
1094 /* edi: new selector */
9f9d489a 1095ENTRY(native_load_gs_index)
7effaa88 1096 CFI_STARTPROC
1da177e4 1097 pushf
7effaa88 1098 CFI_ADJUST_CFA_OFFSET 8
72fe4858
GOC
1099 DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI))
1100 SWAPGS
1da177e4
LT
1101gs_change:
1102 movl %edi,%gs
11032: mfence /* workaround */
72fe4858 1104 SWAPGS
1da177e4 1105 popf
7effaa88 1106 CFI_ADJUST_CFA_OFFSET -8
1da177e4 1107 ret
7effaa88 1108 CFI_ENDPROC
9f9d489a 1109ENDPROC(native_load_gs_index)
1da177e4
LT
1110
1111 .section __ex_table,"a"
1112 .align 8
1113 .quad gs_change,bad_gs
1114 .previous
1115 .section .fixup,"ax"
1116 /* running with kernelgs */
1117bad_gs:
72fe4858 1118 SWAPGS /* switch back to user gs */
1da177e4
LT
1119 xorl %eax,%eax
1120 movl %eax,%gs
1121 jmp 2b
1122 .previous
1123
1124/*
1125 * Create a kernel thread.
1126 *
1127 * C extern interface:
1128 * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
1129 *
1130 * asm input arguments:
1131 * rdi: fn, rsi: arg, rdx: flags
1132 */
1133ENTRY(kernel_thread)
1134 CFI_STARTPROC
1135 FAKE_STACK_FRAME $child_rip
1136 SAVE_ALL
1137
1138 # rdi: flags, rsi: usp, rdx: will be &pt_regs
1139 movq %rdx,%rdi
1140 orq kernel_thread_flags(%rip),%rdi
1141 movq $-1, %rsi
1142 movq %rsp, %rdx
1143
1144 xorl %r8d,%r8d
1145 xorl %r9d,%r9d
1146
1147 # clone now
1148 call do_fork
1149 movq %rax,RAX(%rsp)
1150 xorl %edi,%edi
1151
1152 /*
1153 * It isn't worth to check for reschedule here,
1154 * so internally to the x86_64 port you can rely on kernel_thread()
1155 * not to reschedule the child before returning, this avoids the need
1156 * of hacks for example to fork off the per-CPU idle tasks.
1157 * [Hopefully no generic code relies on the reschedule -AK]
1158 */
1159 RESTORE_ALL
1160 UNFAKE_STACK_FRAME
1161 ret
1162 CFI_ENDPROC
4b787e0b 1163ENDPROC(kernel_thread)
1da177e4
LT
1164
1165child_rip:
c05991ed
AK
1166 pushq $0 # fake return address
1167 CFI_STARTPROC
1da177e4
LT
1168 /*
1169 * Here we are in the child and the registers are set as they were
1170 * at kernel_thread() invocation in the parent.
1171 */
1172 movq %rdi, %rax
1173 movq %rsi, %rdi
1174 call *%rax
1175 # exit
1c5b5cfd 1176 mov %eax, %edi
1da177e4 1177 call do_exit
c05991ed 1178 CFI_ENDPROC
4b787e0b 1179ENDPROC(child_rip)
1da177e4
LT
1180
1181/*
1182 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
1183 *
1184 * C extern interface:
1185 * extern long execve(char *name, char **argv, char **envp)
1186 *
1187 * asm input arguments:
1188 * rdi: name, rsi: argv, rdx: envp
1189 *
1190 * We want to fallback into:
5d119b2c 1191 * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs *regs)
1da177e4
LT
1192 *
1193 * do_sys_execve asm fallback arguments:
5d119b2c 1194 * rdi: name, rsi: argv, rdx: envp, rcx: fake frame on the stack
1da177e4 1195 */
3db03b4a 1196ENTRY(kernel_execve)
1da177e4
LT
1197 CFI_STARTPROC
1198 FAKE_STACK_FRAME $0
1199 SAVE_ALL
5d119b2c 1200 movq %rsp,%rcx
1da177e4
LT
1201 call sys_execve
1202 movq %rax, RAX(%rsp)
1203 RESTORE_REST
1204 testq %rax,%rax
1205 je int_ret_from_sys_call
1206 RESTORE_ARGS
1207 UNFAKE_STACK_FRAME
1208 ret
1209 CFI_ENDPROC
3db03b4a 1210ENDPROC(kernel_execve)
1da177e4 1211
0f2fbdcb 1212KPROBE_ENTRY(page_fault)
1da177e4 1213 errorentry do_page_fault
d28c4393 1214KPROBE_END(page_fault)
1da177e4
LT
1215
1216ENTRY(coprocessor_error)
1217 zeroentry do_coprocessor_error
4b787e0b 1218END(coprocessor_error)
1da177e4
LT
1219
1220ENTRY(simd_coprocessor_error)
1221 zeroentry do_simd_coprocessor_error
4b787e0b 1222END(simd_coprocessor_error)
1da177e4
LT
1223
1224ENTRY(device_not_available)
e407d620 1225 zeroentry do_device_not_available
4b787e0b 1226END(device_not_available)
1da177e4
LT
1227
1228 /* runs on exception stack */
0f2fbdcb 1229KPROBE_ENTRY(debug)
7effaa88 1230 INTR_FRAME
09402947 1231 PARAVIRT_ADJUST_EXCEPTION_FRAME
1da177e4
LT
1232 pushq $0
1233 CFI_ADJUST_CFA_OFFSET 8
5f8efbb9 1234 paranoidentry do_debug, DEBUG_STACK
2601e64d 1235 paranoidexit
d28c4393 1236KPROBE_END(debug)
1da177e4
LT
1237
1238 /* runs on exception stack */
eddb6fb9 1239KPROBE_ENTRY(nmi)
7effaa88 1240 INTR_FRAME
09402947 1241 PARAVIRT_ADJUST_EXCEPTION_FRAME
1da177e4 1242 pushq $-1
7effaa88 1243 CFI_ADJUST_CFA_OFFSET 8
2601e64d
IM
1244 paranoidentry do_nmi, 0, 0
1245#ifdef CONFIG_TRACE_IRQFLAGS
1246 paranoidexit 0
1247#else
1248 jmp paranoid_exit1
1249 CFI_ENDPROC
1250#endif
d28c4393 1251KPROBE_END(nmi)
6fefb0d1 1252
0f2fbdcb 1253KPROBE_ENTRY(int3)
b556b35e 1254 INTR_FRAME
09402947 1255 PARAVIRT_ADJUST_EXCEPTION_FRAME
b556b35e
JB
1256 pushq $0
1257 CFI_ADJUST_CFA_OFFSET 8
5f8efbb9 1258 paranoidentry do_int3, DEBUG_STACK
2601e64d 1259 jmp paranoid_exit1
b556b35e 1260 CFI_ENDPROC
d28c4393 1261KPROBE_END(int3)
1da177e4
LT
1262
1263ENTRY(overflow)
1264 zeroentry do_overflow
4b787e0b 1265END(overflow)
1da177e4
LT
1266
1267ENTRY(bounds)
1268 zeroentry do_bounds
4b787e0b 1269END(bounds)
1da177e4
LT
1270
1271ENTRY(invalid_op)
1272 zeroentry do_invalid_op
4b787e0b 1273END(invalid_op)
1da177e4
LT
1274
1275ENTRY(coprocessor_segment_overrun)
1276 zeroentry do_coprocessor_segment_overrun
4b787e0b 1277END(coprocessor_segment_overrun)
1da177e4 1278
1da177e4
LT
1279 /* runs on exception stack */
1280ENTRY(double_fault)
7effaa88 1281 XCPT_FRAME
09402947 1282 PARAVIRT_ADJUST_EXCEPTION_FRAME
1da177e4 1283 paranoidentry do_double_fault
2601e64d 1284 jmp paranoid_exit1
1da177e4 1285 CFI_ENDPROC
4b787e0b 1286END(double_fault)
1da177e4
LT
1287
1288ENTRY(invalid_TSS)
1289 errorentry do_invalid_TSS
4b787e0b 1290END(invalid_TSS)
1da177e4
LT
1291
1292ENTRY(segment_not_present)
1293 errorentry do_segment_not_present
4b787e0b 1294END(segment_not_present)
1da177e4
LT
1295
1296 /* runs on exception stack */
1297ENTRY(stack_segment)
7effaa88 1298 XCPT_FRAME
09402947 1299 PARAVIRT_ADJUST_EXCEPTION_FRAME
1da177e4 1300 paranoidentry do_stack_segment
2601e64d 1301 jmp paranoid_exit1
1da177e4 1302 CFI_ENDPROC
4b787e0b 1303END(stack_segment)
1da177e4 1304
0f2fbdcb 1305KPROBE_ENTRY(general_protection)
1da177e4 1306 errorentry do_general_protection
d28c4393 1307KPROBE_END(general_protection)
1da177e4
LT
1308
1309ENTRY(alignment_check)
1310 errorentry do_alignment_check
4b787e0b 1311END(alignment_check)
1da177e4
LT
1312
1313ENTRY(divide_error)
1314 zeroentry do_divide_error
4b787e0b 1315END(divide_error)
1da177e4
LT
1316
1317ENTRY(spurious_interrupt_bug)
1318 zeroentry do_spurious_interrupt_bug
4b787e0b 1319END(spurious_interrupt_bug)
1da177e4
LT
1320
1321#ifdef CONFIG_X86_MCE
1322 /* runs on exception stack */
1323ENTRY(machine_check)
7effaa88 1324 INTR_FRAME
09402947 1325 PARAVIRT_ADJUST_EXCEPTION_FRAME
1da177e4
LT
1326 pushq $0
1327 CFI_ADJUST_CFA_OFFSET 8
1328 paranoidentry do_machine_check
2601e64d 1329 jmp paranoid_exit1
1da177e4 1330 CFI_ENDPROC
4b787e0b 1331END(machine_check)
1da177e4
LT
1332#endif
1333
2699500b 1334/* Call softirq on interrupt stack. Interrupts are off. */
ed6b676c 1335ENTRY(call_softirq)
7effaa88 1336 CFI_STARTPROC
2699500b
AK
1337 push %rbp
1338 CFI_ADJUST_CFA_OFFSET 8
1339 CFI_REL_OFFSET rbp,0
1340 mov %rsp,%rbp
1341 CFI_DEF_CFA_REGISTER rbp
ed6b676c 1342 incl %gs:pda_irqcount
2699500b
AK
1343 cmove %gs:pda_irqstackptr,%rsp
1344 push %rbp # backlink for old unwinder
ed6b676c 1345 call __do_softirq
2699500b 1346 leaveq
7effaa88 1347 CFI_DEF_CFA_REGISTER rsp
2699500b 1348 CFI_ADJUST_CFA_OFFSET -8
ed6b676c 1349 decl %gs:pda_irqcount
ed6b676c 1350 ret
7effaa88 1351 CFI_ENDPROC
4b787e0b 1352ENDPROC(call_softirq)
75154f40
AK
1353
1354KPROBE_ENTRY(ignore_sysret)
1355 CFI_STARTPROC
1356 mov $-ENOSYS,%eax
1357 sysret
1358 CFI_ENDPROC
1359ENDPROC(ignore_sysret)
3d75e1b8
JF
1360
1361#ifdef CONFIG_XEN
1362ENTRY(xen_hypervisor_callback)
1363 zeroentry xen_do_hypervisor_callback
1364END(xen_hypervisor_callback)
1365
1366/*
1367# A note on the "critical region" in our callback handler.
1368# We want to avoid stacking callback handlers due to events occurring
1369# during handling of the last event. To do this, we keep events disabled
1370# until we've done all processing. HOWEVER, we must enable events before
1371# popping the stack frame (can't be done atomically) and so it would still
1372# be possible to get enough handler activations to overflow the stack.
1373# Although unlikely, bugs of that kind are hard to track down, so we'd
1374# like to avoid the possibility.
1375# So, on entry to the handler we detect whether we interrupted an
1376# existing activation in its critical region -- if so, we pop the current
1377# activation and restart the handler using the previous one.
1378*/
1379ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
1380 CFI_STARTPROC
1381/* Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
1382 see the correct pointer to the pt_regs */
1383 movq %rdi, %rsp # we don't return, adjust the stack frame
1384 CFI_ENDPROC
1385 CFI_DEFAULT_STACK
138611: incl %gs:pda_irqcount
1387 movq %rsp,%rbp
1388 CFI_DEF_CFA_REGISTER rbp
1389 cmovzq %gs:pda_irqstackptr,%rsp
1390 pushq %rbp # backlink for old unwinder
1391 call xen_evtchn_do_upcall
1392 popq %rsp
1393 CFI_DEF_CFA_REGISTER rsp
1394 decl %gs:pda_irqcount
1395 jmp error_exit
1396 CFI_ENDPROC
1397END(do_hypervisor_callback)
1398
1399/*
1400# Hypervisor uses this for application faults while it executes.
1401# We get here for two reasons:
1402# 1. Fault while reloading DS, ES, FS or GS
1403# 2. Fault while executing IRET
1404# Category 1 we do not need to fix up as Xen has already reloaded all segment
1405# registers that could be reloaded and zeroed the others.
1406# Category 2 we fix up by killing the current process. We cannot use the
1407# normal Linux return path in this case because if we use the IRET hypercall
1408# to pop the stack frame we end up in an infinite loop of failsafe callbacks.
1409# We distinguish between categories by comparing each saved segment register
1410# with its current contents: any discrepancy means we in category 1.
1411*/
1412ENTRY(xen_failsafe_callback)
4a5c3e77
JF
1413 framesz = (RIP-0x30) /* workaround buggy gas */
1414 _frame framesz
3d75e1b8
JF
1415 CFI_REL_OFFSET rcx, 0
1416 CFI_REL_OFFSET r11, 8
1417 movw %ds,%cx
1418 cmpw %cx,0x10(%rsp)
1419 CFI_REMEMBER_STATE
1420 jne 1f
1421 movw %es,%cx
1422 cmpw %cx,0x18(%rsp)
1423 jne 1f
1424 movw %fs,%cx
1425 cmpw %cx,0x20(%rsp)
1426 jne 1f
1427 movw %gs,%cx
1428 cmpw %cx,0x28(%rsp)
1429 jne 1f
1430 /* All segments match their saved values => Category 2 (Bad IRET). */
1431 movq (%rsp),%rcx
1432 CFI_RESTORE rcx
1433 movq 8(%rsp),%r11
1434 CFI_RESTORE r11
1435 addq $0x30,%rsp
1436 CFI_ADJUST_CFA_OFFSET -0x30
4a5c3e77
JF
1437 pushq $0
1438 CFI_ADJUST_CFA_OFFSET 8
1439 pushq %r11
1440 CFI_ADJUST_CFA_OFFSET 8
1441 pushq %rcx
1442 CFI_ADJUST_CFA_OFFSET 8
1443 jmp general_protection
3d75e1b8
JF
1444 CFI_RESTORE_STATE
14451: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
1446 movq (%rsp),%rcx
1447 CFI_RESTORE rcx
1448 movq 8(%rsp),%r11
1449 CFI_RESTORE r11
1450 addq $0x30,%rsp
1451 CFI_ADJUST_CFA_OFFSET -0x30
1452 pushq $0
1453 CFI_ADJUST_CFA_OFFSET 8
1454 SAVE_ALL
1455 jmp error_exit
1456 CFI_ENDPROC
3d75e1b8
JF
1457END(xen_failsafe_callback)
1458
1459#endif /* CONFIG_XEN */
This page took 2.323562 seconds and 5 git commands to generate.