Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/x86_64/entry.S | |
3 | * | |
4 | * Copyright (C) 1991, 1992 Linus Torvalds | |
5 | * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs | |
6 | * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> | |
1da177e4 LT |
7 | */ |
8 | ||
9 | /* | |
10 | * entry.S contains the system-call and fault low-level handling routines. | |
11 | * | |
12 | * NOTE: This code handles signal-recognition, which happens every time | |
13 | * after an interrupt and after each system call. | |
14 | * | |
15 | * Normal syscalls and interrupts don't save a full stack frame, this is | |
16 | * only done for syscall tracing, signals or fork/exec et.al. | |
17 | * | |
18 | * A note on terminology: | |
19 | * - top of stack: Architecture defined interrupt frame from SS to RIP | |
20 | * at the top of the kernel process stack. | |
21 | * - partial stack frame: partially saved registers upto R11. | |
22 | * - full stack frame: Like partial stack frame, but all register saved. | |
2e91a17b AK |
23 | * |
24 | * Some macro usage: | |
25 | * - CFI macros are used to generate dwarf2 unwind information for better | |
26 | * backtraces. They don't change any code. | |
27 | * - SAVE_ALL/RESTORE_ALL - Save/restore all registers | |
28 | * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify. | |
29 | * There are unfortunately lots of special cases where some registers | |
30 | * not touched. The macro is a big mess that should be cleaned up. | |
31 | * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS. | |
32 | * Gives a full stack frame. | |
33 | * - ENTRY/END Define functions in the symbol table. | |
34 | * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack | |
35 | * frame that is otherwise undefined after a SYSCALL | |
36 | * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging. | |
37 | * - errorentry/paranoidentry/zeroentry - Define exception entry points. | |
1da177e4 LT |
38 | */ |
39 | ||
1da177e4 LT |
40 | #include <linux/linkage.h> |
41 | #include <asm/segment.h> | |
1da177e4 LT |
42 | #include <asm/cache.h> |
43 | #include <asm/errno.h> | |
44 | #include <asm/dwarf2.h> | |
45 | #include <asm/calling.h> | |
e2d5df93 | 46 | #include <asm/asm-offsets.h> |
1da177e4 LT |
47 | #include <asm/msr.h> |
48 | #include <asm/unistd.h> | |
49 | #include <asm/thread_info.h> | |
50 | #include <asm/hw_irq.h> | |
5f8efbb9 | 51 | #include <asm/page.h> |
2601e64d | 52 | #include <asm/irqflags.h> |
72fe4858 | 53 | #include <asm/paravirt.h> |
1da177e4 LT |
54 | |
55 | .code64 | |
56 | ||
dc37db4d | 57 | #ifndef CONFIG_PREEMPT |
1da177e4 LT |
58 | #define retint_kernel retint_restore_args |
59 | #endif | |
2601e64d | 60 | |
72fe4858 GOC |
61 | #ifdef CONFIG_PARAVIRT |
62 | ENTRY(native_irq_enable_syscall_ret) | |
63 | movq %gs:pda_oldrsp,%rsp | |
64 | swapgs | |
65 | sysretq | |
66 | #endif /* CONFIG_PARAVIRT */ | |
67 | ||
2601e64d IM |
68 | |
69 | .macro TRACE_IRQS_IRETQ offset=ARGOFFSET | |
70 | #ifdef CONFIG_TRACE_IRQFLAGS | |
71 | bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */ | |
72 | jnc 1f | |
73 | TRACE_IRQS_ON | |
74 | 1: | |
75 | #endif | |
76 | .endm | |
77 | ||
1da177e4 LT |
78 | /* |
79 | * C code is not supposed to know about undefined top of stack. Every time | |
80 | * a C function with an pt_regs argument is called from the SYSCALL based | |
81 | * fast path FIXUP_TOP_OF_STACK is needed. | |
82 | * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs | |
83 | * manipulation. | |
84 | */ | |
85 | ||
86 | /* %rsp:at FRAMEEND */ | |
87 | .macro FIXUP_TOP_OF_STACK tmp | |
88 | movq %gs:pda_oldrsp,\tmp | |
89 | movq \tmp,RSP(%rsp) | |
90 | movq $__USER_DS,SS(%rsp) | |
91 | movq $__USER_CS,CS(%rsp) | |
92 | movq $-1,RCX(%rsp) | |
93 | movq R11(%rsp),\tmp /* get eflags */ | |
94 | movq \tmp,EFLAGS(%rsp) | |
95 | .endm | |
96 | ||
97 | .macro RESTORE_TOP_OF_STACK tmp,offset=0 | |
98 | movq RSP-\offset(%rsp),\tmp | |
99 | movq \tmp,%gs:pda_oldrsp | |
100 | movq EFLAGS-\offset(%rsp),\tmp | |
101 | movq \tmp,R11-\offset(%rsp) | |
102 | .endm | |
103 | ||
104 | .macro FAKE_STACK_FRAME child_rip | |
105 | /* push in order ss, rsp, eflags, cs, rip */ | |
3829ee6b | 106 | xorl %eax, %eax |
1da177e4 LT |
107 | pushq %rax /* ss */ |
108 | CFI_ADJUST_CFA_OFFSET 8 | |
7effaa88 | 109 | /*CFI_REL_OFFSET ss,0*/ |
1da177e4 LT |
110 | pushq %rax /* rsp */ |
111 | CFI_ADJUST_CFA_OFFSET 8 | |
7effaa88 | 112 | CFI_REL_OFFSET rsp,0 |
1da177e4 LT |
113 | pushq $(1<<9) /* eflags - interrupts on */ |
114 | CFI_ADJUST_CFA_OFFSET 8 | |
7effaa88 | 115 | /*CFI_REL_OFFSET rflags,0*/ |
1da177e4 LT |
116 | pushq $__KERNEL_CS /* cs */ |
117 | CFI_ADJUST_CFA_OFFSET 8 | |
7effaa88 | 118 | /*CFI_REL_OFFSET cs,0*/ |
1da177e4 LT |
119 | pushq \child_rip /* rip */ |
120 | CFI_ADJUST_CFA_OFFSET 8 | |
7effaa88 | 121 | CFI_REL_OFFSET rip,0 |
1da177e4 LT |
122 | pushq %rax /* orig rax */ |
123 | CFI_ADJUST_CFA_OFFSET 8 | |
124 | .endm | |
125 | ||
126 | .macro UNFAKE_STACK_FRAME | |
127 | addq $8*6, %rsp | |
128 | CFI_ADJUST_CFA_OFFSET -(6*8) | |
129 | .endm | |
130 | ||
7effaa88 JB |
131 | .macro CFI_DEFAULT_STACK start=1 |
132 | .if \start | |
133 | CFI_STARTPROC simple | |
adf14236 | 134 | CFI_SIGNAL_FRAME |
7effaa88 JB |
135 | CFI_DEF_CFA rsp,SS+8 |
136 | .else | |
137 | CFI_DEF_CFA_OFFSET SS+8 | |
138 | .endif | |
139 | CFI_REL_OFFSET r15,R15 | |
140 | CFI_REL_OFFSET r14,R14 | |
141 | CFI_REL_OFFSET r13,R13 | |
142 | CFI_REL_OFFSET r12,R12 | |
143 | CFI_REL_OFFSET rbp,RBP | |
144 | CFI_REL_OFFSET rbx,RBX | |
145 | CFI_REL_OFFSET r11,R11 | |
146 | CFI_REL_OFFSET r10,R10 | |
147 | CFI_REL_OFFSET r9,R9 | |
148 | CFI_REL_OFFSET r8,R8 | |
149 | CFI_REL_OFFSET rax,RAX | |
150 | CFI_REL_OFFSET rcx,RCX | |
151 | CFI_REL_OFFSET rdx,RDX | |
152 | CFI_REL_OFFSET rsi,RSI | |
153 | CFI_REL_OFFSET rdi,RDI | |
154 | CFI_REL_OFFSET rip,RIP | |
155 | /*CFI_REL_OFFSET cs,CS*/ | |
156 | /*CFI_REL_OFFSET rflags,EFLAGS*/ | |
157 | CFI_REL_OFFSET rsp,RSP | |
158 | /*CFI_REL_OFFSET ss,SS*/ | |
1da177e4 LT |
159 | .endm |
160 | /* | |
161 | * A newly forked process directly context switches into this. | |
162 | */ | |
163 | /* rdi: prev */ | |
164 | ENTRY(ret_from_fork) | |
1da177e4 | 165 | CFI_DEFAULT_STACK |
658fdbef AK |
166 | push kernel_eflags(%rip) |
167 | CFI_ADJUST_CFA_OFFSET 4 | |
168 | popf # reset kernel eflags | |
169 | CFI_ADJUST_CFA_OFFSET -4 | |
1da177e4 LT |
170 | call schedule_tail |
171 | GET_THREAD_INFO(%rcx) | |
172 | testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),threadinfo_flags(%rcx) | |
173 | jnz rff_trace | |
174 | rff_action: | |
175 | RESTORE_REST | |
176 | testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread? | |
177 | je int_ret_from_sys_call | |
178 | testl $_TIF_IA32,threadinfo_flags(%rcx) | |
179 | jnz int_ret_from_sys_call | |
180 | RESTORE_TOP_OF_STACK %rdi,ARGOFFSET | |
181 | jmp ret_from_sys_call | |
182 | rff_trace: | |
183 | movq %rsp,%rdi | |
184 | call syscall_trace_leave | |
185 | GET_THREAD_INFO(%rcx) | |
186 | jmp rff_action | |
187 | CFI_ENDPROC | |
4b787e0b | 188 | END(ret_from_fork) |
1da177e4 LT |
189 | |
190 | /* | |
191 | * System call entry. Upto 6 arguments in registers are supported. | |
192 | * | |
193 | * SYSCALL does not save anything on the stack and does not change the | |
194 | * stack pointer. | |
195 | */ | |
196 | ||
197 | /* | |
198 | * Register setup: | |
199 | * rax system call number | |
200 | * rdi arg0 | |
201 | * rcx return address for syscall/sysret, C arg3 | |
202 | * rsi arg1 | |
203 | * rdx arg2 | |
204 | * r10 arg3 (--> moved to rcx for C) | |
205 | * r8 arg4 | |
206 | * r9 arg5 | |
207 | * r11 eflags for syscall/sysret, temporary for C | |
208 | * r12-r15,rbp,rbx saved by C code, not touched. | |
209 | * | |
210 | * Interrupts are off on entry. | |
211 | * Only called from user space. | |
212 | * | |
213 | * XXX if we had a free scratch register we could save the RSP into the stack frame | |
214 | * and report it properly in ps. Unfortunately we haven't. | |
7bf36bbc AK |
215 | * |
216 | * When user can change the frames always force IRET. That is because | |
217 | * it deals with uncanonical addresses better. SYSRET has trouble | |
218 | * with them due to bugs in both AMD and Intel CPUs. | |
1da177e4 LT |
219 | */ |
220 | ||
221 | ENTRY(system_call) | |
7effaa88 | 222 | CFI_STARTPROC simple |
adf14236 | 223 | CFI_SIGNAL_FRAME |
dffead4e | 224 | CFI_DEF_CFA rsp,PDA_STACKOFFSET |
7effaa88 JB |
225 | CFI_REGISTER rip,rcx |
226 | /*CFI_REGISTER rflags,r11*/ | |
72fe4858 GOC |
227 | SWAPGS_UNSAFE_STACK |
228 | /* | |
229 | * A hypervisor implementation might want to use a label | |
230 | * after the swapgs, so that it can do the swapgs | |
231 | * for the guest and jump here on syscall. | |
232 | */ | |
233 | ENTRY(system_call_after_swapgs) | |
234 | ||
1da177e4 LT |
235 | movq %rsp,%gs:pda_oldrsp |
236 | movq %gs:pda_kernelstack,%rsp | |
2601e64d IM |
237 | /* |
238 | * No need to follow this irqs off/on section - it's straight | |
239 | * and short: | |
240 | */ | |
72fe4858 | 241 | ENABLE_INTERRUPTS(CLBR_NONE) |
1da177e4 LT |
242 | SAVE_ARGS 8,1 |
243 | movq %rax,ORIG_RAX-ARGOFFSET(%rsp) | |
7effaa88 JB |
244 | movq %rcx,RIP-ARGOFFSET(%rsp) |
245 | CFI_REL_OFFSET rip,RIP-ARGOFFSET | |
1da177e4 LT |
246 | GET_THREAD_INFO(%rcx) |
247 | testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%rcx) | |
248 | jnz tracesys | |
249 | cmpq $__NR_syscall_max,%rax | |
250 | ja badsys | |
251 | movq %r10,%rcx | |
252 | call *sys_call_table(,%rax,8) # XXX: rip relative | |
253 | movq %rax,RAX-ARGOFFSET(%rsp) | |
254 | /* | |
255 | * Syscall return path ending with SYSRET (fast path) | |
256 | * Has incomplete stack frame and undefined top of stack. | |
257 | */ | |
1da177e4 | 258 | ret_from_sys_call: |
11b854b2 | 259 | movl $_TIF_ALLWORK_MASK,%edi |
1da177e4 LT |
260 | /* edi: flagmask */ |
261 | sysret_check: | |
10cd706d | 262 | LOCKDEP_SYS_EXIT |
1da177e4 | 263 | GET_THREAD_INFO(%rcx) |
72fe4858 | 264 | DISABLE_INTERRUPTS(CLBR_NONE) |
2601e64d | 265 | TRACE_IRQS_OFF |
1da177e4 LT |
266 | movl threadinfo_flags(%rcx),%edx |
267 | andl %edi,%edx | |
268 | jnz sysret_careful | |
bcddc015 | 269 | CFI_REMEMBER_STATE |
2601e64d IM |
270 | /* |
271 | * sysretq will re-enable interrupts: | |
272 | */ | |
273 | TRACE_IRQS_ON | |
1da177e4 | 274 | movq RIP-ARGOFFSET(%rsp),%rcx |
7effaa88 | 275 | CFI_REGISTER rip,rcx |
1da177e4 | 276 | RESTORE_ARGS 0,-ARG_SKIP,1 |
7effaa88 | 277 | /*CFI_REGISTER rflags,r11*/ |
72fe4858 | 278 | ENABLE_INTERRUPTS_SYSCALL_RET |
1da177e4 | 279 | |
bcddc015 | 280 | CFI_RESTORE_STATE |
1da177e4 LT |
281 | /* Handle reschedules */ |
282 | /* edx: work, edi: workmask */ | |
283 | sysret_careful: | |
284 | bt $TIF_NEED_RESCHED,%edx | |
285 | jnc sysret_signal | |
2601e64d | 286 | TRACE_IRQS_ON |
72fe4858 | 287 | ENABLE_INTERRUPTS(CLBR_NONE) |
1da177e4 | 288 | pushq %rdi |
7effaa88 | 289 | CFI_ADJUST_CFA_OFFSET 8 |
1da177e4 LT |
290 | call schedule |
291 | popq %rdi | |
7effaa88 | 292 | CFI_ADJUST_CFA_OFFSET -8 |
1da177e4 LT |
293 | jmp sysret_check |
294 | ||
295 | /* Handle a signal */ | |
296 | sysret_signal: | |
2601e64d | 297 | TRACE_IRQS_ON |
72fe4858 | 298 | ENABLE_INTERRUPTS(CLBR_NONE) |
8f4d37ec | 299 | testl $_TIF_DO_NOTIFY_MASK,%edx |
10ffdbb8 AK |
300 | jz 1f |
301 | ||
302 | /* Really a signal */ | |
303 | /* edx: work flags (arg3) */ | |
1da177e4 LT |
304 | leaq do_notify_resume(%rip),%rax |
305 | leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1 | |
306 | xorl %esi,%esi # oldset -> arg2 | |
307 | call ptregscall_common | |
10ffdbb8 | 308 | 1: movl $_TIF_NEED_RESCHED,%edi |
7bf36bbc AK |
309 | /* Use IRET because user could have changed frame. This |
310 | works because ptregscall_common has called FIXUP_TOP_OF_STACK. */ | |
72fe4858 | 311 | DISABLE_INTERRUPTS(CLBR_NONE) |
2601e64d | 312 | TRACE_IRQS_OFF |
7bf36bbc | 313 | jmp int_with_check |
1da177e4 | 314 | |
7effaa88 JB |
315 | badsys: |
316 | movq $-ENOSYS,RAX-ARGOFFSET(%rsp) | |
317 | jmp ret_from_sys_call | |
318 | ||
1da177e4 LT |
319 | /* Do syscall tracing */ |
320 | tracesys: | |
321 | SAVE_REST | |
322 | movq $-ENOSYS,RAX(%rsp) | |
323 | FIXUP_TOP_OF_STACK %rdi | |
324 | movq %rsp,%rdi | |
325 | call syscall_trace_enter | |
326 | LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */ | |
327 | RESTORE_REST | |
328 | cmpq $__NR_syscall_max,%rax | |
cc7d479f JB |
329 | movq $-ENOSYS,%rcx |
330 | cmova %rcx,%rax | |
1da177e4 LT |
331 | ja 1f |
332 | movq %r10,%rcx /* fixup for C */ | |
333 | call *sys_call_table(,%rax,8) | |
822ff019 | 334 | 1: movq %rax,RAX-ARGOFFSET(%rsp) |
7bf36bbc | 335 | /* Use IRET because user could have changed frame */ |
1da177e4 | 336 | |
1da177e4 LT |
337 | /* |
338 | * Syscall return path ending with IRET. | |
339 | * Has correct top of stack, but partial stack frame. | |
bcddc015 JB |
340 | */ |
341 | .globl int_ret_from_sys_call | |
342 | int_ret_from_sys_call: | |
72fe4858 | 343 | DISABLE_INTERRUPTS(CLBR_NONE) |
2601e64d | 344 | TRACE_IRQS_OFF |
1da177e4 LT |
345 | testl $3,CS-ARGOFFSET(%rsp) |
346 | je retint_restore_args | |
347 | movl $_TIF_ALLWORK_MASK,%edi | |
348 | /* edi: mask to check */ | |
349 | int_with_check: | |
10cd706d | 350 | LOCKDEP_SYS_EXIT_IRQ |
1da177e4 LT |
351 | GET_THREAD_INFO(%rcx) |
352 | movl threadinfo_flags(%rcx),%edx | |
353 | andl %edi,%edx | |
354 | jnz int_careful | |
bf2fcc6f | 355 | andl $~TS_COMPAT,threadinfo_status(%rcx) |
1da177e4 LT |
356 | jmp retint_swapgs |
357 | ||
358 | /* Either reschedule or signal or syscall exit tracking needed. */ | |
359 | /* First do a reschedule test. */ | |
360 | /* edx: work, edi: workmask */ | |
361 | int_careful: | |
362 | bt $TIF_NEED_RESCHED,%edx | |
363 | jnc int_very_careful | |
2601e64d | 364 | TRACE_IRQS_ON |
72fe4858 | 365 | ENABLE_INTERRUPTS(CLBR_NONE) |
1da177e4 | 366 | pushq %rdi |
7effaa88 | 367 | CFI_ADJUST_CFA_OFFSET 8 |
1da177e4 LT |
368 | call schedule |
369 | popq %rdi | |
7effaa88 | 370 | CFI_ADJUST_CFA_OFFSET -8 |
72fe4858 | 371 | DISABLE_INTERRUPTS(CLBR_NONE) |
2601e64d | 372 | TRACE_IRQS_OFF |
1da177e4 LT |
373 | jmp int_with_check |
374 | ||
375 | /* handle signals and tracing -- both require a full stack frame */ | |
376 | int_very_careful: | |
2601e64d | 377 | TRACE_IRQS_ON |
72fe4858 | 378 | ENABLE_INTERRUPTS(CLBR_NONE) |
1da177e4 LT |
379 | SAVE_REST |
380 | /* Check for syscall exit trace */ | |
381 | testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edx | |
382 | jz int_signal | |
383 | pushq %rdi | |
7effaa88 | 384 | CFI_ADJUST_CFA_OFFSET 8 |
1da177e4 LT |
385 | leaq 8(%rsp),%rdi # &ptregs -> arg1 |
386 | call syscall_trace_leave | |
387 | popq %rdi | |
7effaa88 | 388 | CFI_ADJUST_CFA_OFFSET -8 |
36c1104e | 389 | andl $~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edi |
1da177e4 LT |
390 | jmp int_restore_rest |
391 | ||
392 | int_signal: | |
8f4d37ec | 393 | testl $_TIF_DO_NOTIFY_MASK,%edx |
1da177e4 LT |
394 | jz 1f |
395 | movq %rsp,%rdi # &ptregs -> arg1 | |
396 | xorl %esi,%esi # oldset -> arg2 | |
397 | call do_notify_resume | |
398 | 1: movl $_TIF_NEED_RESCHED,%edi | |
399 | int_restore_rest: | |
400 | RESTORE_REST | |
72fe4858 | 401 | DISABLE_INTERRUPTS(CLBR_NONE) |
2601e64d | 402 | TRACE_IRQS_OFF |
1da177e4 LT |
403 | jmp int_with_check |
404 | CFI_ENDPROC | |
bcddc015 | 405 | END(system_call) |
1da177e4 LT |
406 | |
407 | /* | |
408 | * Certain special system calls that need to save a complete full stack frame. | |
409 | */ | |
410 | ||
411 | .macro PTREGSCALL label,func,arg | |
412 | .globl \label | |
413 | \label: | |
414 | leaq \func(%rip),%rax | |
415 | leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */ | |
416 | jmp ptregscall_common | |
4b787e0b | 417 | END(\label) |
1da177e4 LT |
418 | .endm |
419 | ||
7effaa88 JB |
420 | CFI_STARTPROC |
421 | ||
1da177e4 LT |
422 | PTREGSCALL stub_clone, sys_clone, %r8 |
423 | PTREGSCALL stub_fork, sys_fork, %rdi | |
424 | PTREGSCALL stub_vfork, sys_vfork, %rdi | |
425 | PTREGSCALL stub_rt_sigsuspend, sys_rt_sigsuspend, %rdx | |
426 | PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx | |
427 | PTREGSCALL stub_iopl, sys_iopl, %rsi | |
428 | ||
429 | ENTRY(ptregscall_common) | |
1da177e4 | 430 | popq %r11 |
7effaa88 JB |
431 | CFI_ADJUST_CFA_OFFSET -8 |
432 | CFI_REGISTER rip, r11 | |
1da177e4 LT |
433 | SAVE_REST |
434 | movq %r11, %r15 | |
7effaa88 | 435 | CFI_REGISTER rip, r15 |
1da177e4 LT |
436 | FIXUP_TOP_OF_STACK %r11 |
437 | call *%rax | |
438 | RESTORE_TOP_OF_STACK %r11 | |
439 | movq %r15, %r11 | |
7effaa88 | 440 | CFI_REGISTER rip, r11 |
1da177e4 LT |
441 | RESTORE_REST |
442 | pushq %r11 | |
7effaa88 JB |
443 | CFI_ADJUST_CFA_OFFSET 8 |
444 | CFI_REL_OFFSET rip, 0 | |
1da177e4 LT |
445 | ret |
446 | CFI_ENDPROC | |
4b787e0b | 447 | END(ptregscall_common) |
1da177e4 LT |
448 | |
449 | ENTRY(stub_execve) | |
450 | CFI_STARTPROC | |
451 | popq %r11 | |
7effaa88 JB |
452 | CFI_ADJUST_CFA_OFFSET -8 |
453 | CFI_REGISTER rip, r11 | |
1da177e4 | 454 | SAVE_REST |
1da177e4 LT |
455 | FIXUP_TOP_OF_STACK %r11 |
456 | call sys_execve | |
1da177e4 | 457 | RESTORE_TOP_OF_STACK %r11 |
1da177e4 LT |
458 | movq %rax,RAX(%rsp) |
459 | RESTORE_REST | |
460 | jmp int_ret_from_sys_call | |
461 | CFI_ENDPROC | |
4b787e0b | 462 | END(stub_execve) |
1da177e4 LT |
463 | |
464 | /* | |
465 | * sigreturn is special because it needs to restore all registers on return. | |
466 | * This cannot be done with SYSRET, so use the IRET return path instead. | |
467 | */ | |
468 | ENTRY(stub_rt_sigreturn) | |
469 | CFI_STARTPROC | |
7effaa88 JB |
470 | addq $8, %rsp |
471 | CFI_ADJUST_CFA_OFFSET -8 | |
1da177e4 LT |
472 | SAVE_REST |
473 | movq %rsp,%rdi | |
474 | FIXUP_TOP_OF_STACK %r11 | |
475 | call sys_rt_sigreturn | |
476 | movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer | |
477 | RESTORE_REST | |
478 | jmp int_ret_from_sys_call | |
479 | CFI_ENDPROC | |
4b787e0b | 480 | END(stub_rt_sigreturn) |
1da177e4 | 481 | |
7effaa88 JB |
482 | /* |
483 | * initial frame state for interrupts and exceptions | |
484 | */ | |
485 | .macro _frame ref | |
486 | CFI_STARTPROC simple | |
adf14236 | 487 | CFI_SIGNAL_FRAME |
7effaa88 JB |
488 | CFI_DEF_CFA rsp,SS+8-\ref |
489 | /*CFI_REL_OFFSET ss,SS-\ref*/ | |
490 | CFI_REL_OFFSET rsp,RSP-\ref | |
491 | /*CFI_REL_OFFSET rflags,EFLAGS-\ref*/ | |
492 | /*CFI_REL_OFFSET cs,CS-\ref*/ | |
493 | CFI_REL_OFFSET rip,RIP-\ref | |
494 | .endm | |
495 | ||
496 | /* initial frame state for interrupts (and exceptions without error code) */ | |
497 | #define INTR_FRAME _frame RIP | |
498 | /* initial frame state for exceptions with error code (and interrupts with | |
499 | vector already pushed) */ | |
500 | #define XCPT_FRAME _frame ORIG_RAX | |
501 | ||
1da177e4 LT |
502 | /* |
503 | * Interrupt entry/exit. | |
504 | * | |
505 | * Interrupt entry points save only callee clobbered registers in fast path. | |
506 | * | |
507 | * Entry runs with interrupts off. | |
508 | */ | |
509 | ||
510 | /* 0(%rsp): interrupt number */ | |
511 | .macro interrupt func | |
1da177e4 | 512 | cld |
1da177e4 LT |
513 | SAVE_ARGS |
514 | leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler | |
1de9c3f6 JB |
515 | pushq %rbp |
516 | CFI_ADJUST_CFA_OFFSET 8 | |
517 | CFI_REL_OFFSET rbp, 0 | |
518 | movq %rsp,%rbp | |
519 | CFI_DEF_CFA_REGISTER rbp | |
1da177e4 LT |
520 | testl $3,CS(%rdi) |
521 | je 1f | |
72fe4858 | 522 | SWAPGS |
96e54049 AK |
523 | /* irqcount is used to check if a CPU is already on an interrupt |
524 | stack or not. While this is essentially redundant with preempt_count | |
525 | it is a little cheaper to use a separate counter in the PDA | |
526 | (short of moving irq_enter into assembly, which would be too | |
527 | much work) */ | |
528 | 1: incl %gs:pda_irqcount | |
1de9c3f6 | 529 | cmoveq %gs:pda_irqstackptr,%rsp |
2699500b | 530 | push %rbp # backlink for old unwinder |
2601e64d IM |
531 | /* |
532 | * We entered an interrupt context - irqs are off: | |
533 | */ | |
534 | TRACE_IRQS_OFF | |
1da177e4 LT |
535 | call \func |
536 | .endm | |
537 | ||
538 | ENTRY(common_interrupt) | |
7effaa88 | 539 | XCPT_FRAME |
1da177e4 LT |
540 | interrupt do_IRQ |
541 | /* 0(%rsp): oldrsp-ARGOFFSET */ | |
7effaa88 | 542 | ret_from_intr: |
72fe4858 | 543 | DISABLE_INTERRUPTS(CLBR_NONE) |
2601e64d | 544 | TRACE_IRQS_OFF |
3829ee6b | 545 | decl %gs:pda_irqcount |
1de9c3f6 | 546 | leaveq |
7effaa88 | 547 | CFI_DEF_CFA_REGISTER rsp |
1de9c3f6 | 548 | CFI_ADJUST_CFA_OFFSET -8 |
7effaa88 | 549 | exit_intr: |
1da177e4 LT |
550 | GET_THREAD_INFO(%rcx) |
551 | testl $3,CS-ARGOFFSET(%rsp) | |
552 | je retint_kernel | |
553 | ||
554 | /* Interrupt came from user space */ | |
555 | /* | |
556 | * Has a correct top of stack, but a partial stack frame | |
557 | * %rcx: thread info. Interrupts off. | |
558 | */ | |
559 | retint_with_reschedule: | |
560 | movl $_TIF_WORK_MASK,%edi | |
7effaa88 | 561 | retint_check: |
10cd706d | 562 | LOCKDEP_SYS_EXIT_IRQ |
1da177e4 LT |
563 | movl threadinfo_flags(%rcx),%edx |
564 | andl %edi,%edx | |
7effaa88 | 565 | CFI_REMEMBER_STATE |
1da177e4 | 566 | jnz retint_careful |
10cd706d PZ |
567 | |
568 | retint_swapgs: /* return to user-space */ | |
2601e64d IM |
569 | /* |
570 | * The iretq could re-enable interrupts: | |
571 | */ | |
72fe4858 | 572 | DISABLE_INTERRUPTS(CLBR_ANY) |
2601e64d | 573 | TRACE_IRQS_IRETQ |
72fe4858 | 574 | SWAPGS |
2601e64d IM |
575 | jmp restore_args |
576 | ||
10cd706d | 577 | retint_restore_args: /* return to kernel space */ |
72fe4858 | 578 | DISABLE_INTERRUPTS(CLBR_ANY) |
2601e64d IM |
579 | /* |
580 | * The iretq could re-enable interrupts: | |
581 | */ | |
582 | TRACE_IRQS_IRETQ | |
583 | restore_args: | |
1da177e4 LT |
584 | RESTORE_ARGS 0,8,0 |
585 | iret_label: | |
72fe4858 GOC |
586 | #ifdef CONFIG_PARAVIRT |
587 | INTERRUPT_RETURN | |
588 | #endif | |
589 | ENTRY(native_iret) | |
1da177e4 LT |
590 | iretq |
591 | ||
592 | .section __ex_table,"a" | |
72fe4858 | 593 | .quad native_iret, bad_iret |
1da177e4 LT |
594 | .previous |
595 | .section .fixup,"ax" | |
596 | /* force a signal here? this matches i386 behaviour */ | |
597 | /* running with kernel gs */ | |
598 | bad_iret: | |
3076a492 | 599 | movq $11,%rdi /* SIGSEGV */ |
2601e64d | 600 | TRACE_IRQS_ON |
72fe4858 GOC |
601 | ENABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI)) |
602 | jmp do_exit | |
603 | .previous | |
604 | ||
7effaa88 | 605 | /* edi: workmask, edx: work */ |
1da177e4 | 606 | retint_careful: |
7effaa88 | 607 | CFI_RESTORE_STATE |
1da177e4 LT |
608 | bt $TIF_NEED_RESCHED,%edx |
609 | jnc retint_signal | |
2601e64d | 610 | TRACE_IRQS_ON |
72fe4858 | 611 | ENABLE_INTERRUPTS(CLBR_NONE) |
1da177e4 | 612 | pushq %rdi |
7effaa88 | 613 | CFI_ADJUST_CFA_OFFSET 8 |
1da177e4 LT |
614 | call schedule |
615 | popq %rdi | |
7effaa88 | 616 | CFI_ADJUST_CFA_OFFSET -8 |
1da177e4 | 617 | GET_THREAD_INFO(%rcx) |
72fe4858 | 618 | DISABLE_INTERRUPTS(CLBR_NONE) |
2601e64d | 619 | TRACE_IRQS_OFF |
1da177e4 LT |
620 | jmp retint_check |
621 | ||
622 | retint_signal: | |
8f4d37ec | 623 | testl $_TIF_DO_NOTIFY_MASK,%edx |
10ffdbb8 | 624 | jz retint_swapgs |
2601e64d | 625 | TRACE_IRQS_ON |
72fe4858 | 626 | ENABLE_INTERRUPTS(CLBR_NONE) |
1da177e4 LT |
627 | SAVE_REST |
628 | movq $-1,ORIG_RAX(%rsp) | |
3829ee6b | 629 | xorl %esi,%esi # oldset |
1da177e4 LT |
630 | movq %rsp,%rdi # &pt_regs |
631 | call do_notify_resume | |
632 | RESTORE_REST | |
72fe4858 | 633 | DISABLE_INTERRUPTS(CLBR_NONE) |
2601e64d | 634 | TRACE_IRQS_OFF |
10ffdbb8 | 635 | movl $_TIF_NEED_RESCHED,%edi |
be9e6870 | 636 | GET_THREAD_INFO(%rcx) |
1da177e4 LT |
637 | jmp retint_check |
638 | ||
639 | #ifdef CONFIG_PREEMPT | |
640 | /* Returning to kernel space. Check if we need preemption */ | |
641 | /* rcx: threadinfo. interrupts off. */ | |
b06babac | 642 | ENTRY(retint_kernel) |
1da177e4 LT |
643 | cmpl $0,threadinfo_preempt_count(%rcx) |
644 | jnz retint_restore_args | |
645 | bt $TIF_NEED_RESCHED,threadinfo_flags(%rcx) | |
646 | jnc retint_restore_args | |
647 | bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */ | |
648 | jnc retint_restore_args | |
649 | call preempt_schedule_irq | |
650 | jmp exit_intr | |
651 | #endif | |
4b787e0b | 652 | |
1da177e4 | 653 | CFI_ENDPROC |
4b787e0b | 654 | END(common_interrupt) |
1da177e4 LT |
655 | |
656 | /* | |
657 | * APIC interrupts. | |
658 | */ | |
659 | .macro apicinterrupt num,func | |
7effaa88 | 660 | INTR_FRAME |
19eadf98 | 661 | pushq $~(\num) |
7effaa88 | 662 | CFI_ADJUST_CFA_OFFSET 8 |
1da177e4 LT |
663 | interrupt \func |
664 | jmp ret_from_intr | |
665 | CFI_ENDPROC | |
666 | .endm | |
667 | ||
668 | ENTRY(thermal_interrupt) | |
669 | apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt | |
4b787e0b | 670 | END(thermal_interrupt) |
1da177e4 | 671 | |
89b831ef JS |
672 | ENTRY(threshold_interrupt) |
673 | apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt | |
4b787e0b | 674 | END(threshold_interrupt) |
89b831ef | 675 | |
1da177e4 LT |
676 | #ifdef CONFIG_SMP |
677 | ENTRY(reschedule_interrupt) | |
678 | apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt | |
4b787e0b | 679 | END(reschedule_interrupt) |
1da177e4 | 680 | |
e5bc8b6b AK |
681 | .macro INVALIDATE_ENTRY num |
682 | ENTRY(invalidate_interrupt\num) | |
683 | apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt | |
4b787e0b | 684 | END(invalidate_interrupt\num) |
e5bc8b6b AK |
685 | .endm |
686 | ||
687 | INVALIDATE_ENTRY 0 | |
688 | INVALIDATE_ENTRY 1 | |
689 | INVALIDATE_ENTRY 2 | |
690 | INVALIDATE_ENTRY 3 | |
691 | INVALIDATE_ENTRY 4 | |
692 | INVALIDATE_ENTRY 5 | |
693 | INVALIDATE_ENTRY 6 | |
694 | INVALIDATE_ENTRY 7 | |
1da177e4 LT |
695 | |
696 | ENTRY(call_function_interrupt) | |
697 | apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt | |
4b787e0b | 698 | END(call_function_interrupt) |
61014292 EB |
699 | ENTRY(irq_move_cleanup_interrupt) |
700 | apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt | |
701 | END(irq_move_cleanup_interrupt) | |
1da177e4 LT |
702 | #endif |
703 | ||
1da177e4 LT |
704 | ENTRY(apic_timer_interrupt) |
705 | apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt | |
4b787e0b | 706 | END(apic_timer_interrupt) |
1da177e4 LT |
707 | |
708 | ENTRY(error_interrupt) | |
709 | apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt | |
4b787e0b | 710 | END(error_interrupt) |
1da177e4 LT |
711 | |
712 | ENTRY(spurious_interrupt) | |
713 | apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt | |
4b787e0b | 714 | END(spurious_interrupt) |
1da177e4 LT |
715 | |
716 | /* | |
717 | * Exception entry points. | |
718 | */ | |
719 | .macro zeroentry sym | |
7effaa88 | 720 | INTR_FRAME |
1da177e4 | 721 | pushq $0 /* push error code/oldrax */ |
7effaa88 | 722 | CFI_ADJUST_CFA_OFFSET 8 |
1da177e4 | 723 | pushq %rax /* push real oldrax to the rdi slot */ |
7effaa88 | 724 | CFI_ADJUST_CFA_OFFSET 8 |
37550907 | 725 | CFI_REL_OFFSET rax,0 |
1da177e4 LT |
726 | leaq \sym(%rip),%rax |
727 | jmp error_entry | |
7effaa88 | 728 | CFI_ENDPROC |
1da177e4 LT |
729 | .endm |
730 | ||
731 | .macro errorentry sym | |
7effaa88 | 732 | XCPT_FRAME |
1da177e4 | 733 | pushq %rax |
7effaa88 | 734 | CFI_ADJUST_CFA_OFFSET 8 |
37550907 | 735 | CFI_REL_OFFSET rax,0 |
1da177e4 LT |
736 | leaq \sym(%rip),%rax |
737 | jmp error_entry | |
7effaa88 | 738 | CFI_ENDPROC |
1da177e4 LT |
739 | .endm |
740 | ||
741 | /* error code is on the stack already */ | |
742 | /* handle NMI like exceptions that can happen everywhere */ | |
2601e64d | 743 | .macro paranoidentry sym, ist=0, irqtrace=1 |
1da177e4 LT |
744 | SAVE_ALL |
745 | cld | |
746 | movl $1,%ebx | |
747 | movl $MSR_GS_BASE,%ecx | |
748 | rdmsr | |
749 | testl %edx,%edx | |
750 | js 1f | |
72fe4858 | 751 | SWAPGS |
1da177e4 | 752 | xorl %ebx,%ebx |
b556b35e JB |
753 | 1: |
754 | .if \ist | |
755 | movq %gs:pda_data_offset, %rbp | |
756 | .endif | |
757 | movq %rsp,%rdi | |
1da177e4 LT |
758 | movq ORIG_RAX(%rsp),%rsi |
759 | movq $-1,ORIG_RAX(%rsp) | |
b556b35e | 760 | .if \ist |
5f8efbb9 | 761 | subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp) |
b556b35e | 762 | .endif |
1da177e4 | 763 | call \sym |
b556b35e | 764 | .if \ist |
5f8efbb9 | 765 | addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp) |
b556b35e | 766 | .endif |
72fe4858 | 767 | DISABLE_INTERRUPTS(CLBR_NONE) |
2601e64d IM |
768 | .if \irqtrace |
769 | TRACE_IRQS_OFF | |
770 | .endif | |
1da177e4 | 771 | .endm |
2601e64d IM |
772 | |
773 | /* | |
774 | * "Paranoid" exit path from exception stack. | |
775 | * Paranoid because this is used by NMIs and cannot take | |
776 | * any kernel state for granted. | |
777 | * We don't do kernel preemption checks here, because only | |
778 | * NMI should be common and it does not enable IRQs and | |
779 | * cannot get reschedule ticks. | |
780 | * | |
781 | * "trace" is 0 for the NMI handler only, because irq-tracing | |
782 | * is fundamentally NMI-unsafe. (we cannot change the soft and | |
783 | * hard flags at once, atomically) | |
784 | */ | |
785 | .macro paranoidexit trace=1 | |
786 | /* ebx: no swapgs flag */ | |
787 | paranoid_exit\trace: | |
788 | testl %ebx,%ebx /* swapgs needed? */ | |
789 | jnz paranoid_restore\trace | |
790 | testl $3,CS(%rsp) | |
791 | jnz paranoid_userspace\trace | |
792 | paranoid_swapgs\trace: | |
7a0a2dff | 793 | .if \trace |
2601e64d | 794 | TRACE_IRQS_IRETQ 0 |
7a0a2dff | 795 | .endif |
72fe4858 | 796 | SWAPGS_UNSAFE_STACK |
2601e64d IM |
797 | paranoid_restore\trace: |
798 | RESTORE_ALL 8 | |
72fe4858 | 799 | INTERRUPT_RETURN |
2601e64d IM |
800 | paranoid_userspace\trace: |
801 | GET_THREAD_INFO(%rcx) | |
802 | movl threadinfo_flags(%rcx),%ebx | |
803 | andl $_TIF_WORK_MASK,%ebx | |
804 | jz paranoid_swapgs\trace | |
805 | movq %rsp,%rdi /* &pt_regs */ | |
806 | call sync_regs | |
807 | movq %rax,%rsp /* switch stack for scheduling */ | |
808 | testl $_TIF_NEED_RESCHED,%ebx | |
809 | jnz paranoid_schedule\trace | |
810 | movl %ebx,%edx /* arg3: thread flags */ | |
811 | .if \trace | |
812 | TRACE_IRQS_ON | |
813 | .endif | |
72fe4858 | 814 | ENABLE_INTERRUPTS(CLBR_NONE) |
2601e64d IM |
815 | xorl %esi,%esi /* arg2: oldset */ |
816 | movq %rsp,%rdi /* arg1: &pt_regs */ | |
817 | call do_notify_resume | |
72fe4858 | 818 | DISABLE_INTERRUPTS(CLBR_NONE) |
2601e64d IM |
819 | .if \trace |
820 | TRACE_IRQS_OFF | |
821 | .endif | |
822 | jmp paranoid_userspace\trace | |
823 | paranoid_schedule\trace: | |
824 | .if \trace | |
825 | TRACE_IRQS_ON | |
826 | .endif | |
72fe4858 | 827 | ENABLE_INTERRUPTS(CLBR_ANY) |
2601e64d | 828 | call schedule |
72fe4858 | 829 | DISABLE_INTERRUPTS(CLBR_ANY) |
2601e64d IM |
830 | .if \trace |
831 | TRACE_IRQS_OFF | |
832 | .endif | |
833 | jmp paranoid_userspace\trace | |
834 | CFI_ENDPROC | |
835 | .endm | |
836 | ||
1da177e4 LT |
837 | /* |
838 | * Exception entry point. This expects an error code/orig_rax on the stack | |
839 | * and the exception handler in %rax. | |
840 | */ | |
d28c4393 | 841 | KPROBE_ENTRY(error_entry) |
7effaa88 | 842 | _frame RDI |
37550907 | 843 | CFI_REL_OFFSET rax,0 |
1da177e4 LT |
844 | /* rdi slot contains rax, oldrax contains error code */ |
845 | cld | |
846 | subq $14*8,%rsp | |
847 | CFI_ADJUST_CFA_OFFSET (14*8) | |
848 | movq %rsi,13*8(%rsp) | |
849 | CFI_REL_OFFSET rsi,RSI | |
850 | movq 14*8(%rsp),%rsi /* load rax from rdi slot */ | |
37550907 | 851 | CFI_REGISTER rax,rsi |
1da177e4 LT |
852 | movq %rdx,12*8(%rsp) |
853 | CFI_REL_OFFSET rdx,RDX | |
854 | movq %rcx,11*8(%rsp) | |
855 | CFI_REL_OFFSET rcx,RCX | |
856 | movq %rsi,10*8(%rsp) /* store rax */ | |
857 | CFI_REL_OFFSET rax,RAX | |
858 | movq %r8, 9*8(%rsp) | |
859 | CFI_REL_OFFSET r8,R8 | |
860 | movq %r9, 8*8(%rsp) | |
861 | CFI_REL_OFFSET r9,R9 | |
862 | movq %r10,7*8(%rsp) | |
863 | CFI_REL_OFFSET r10,R10 | |
864 | movq %r11,6*8(%rsp) | |
865 | CFI_REL_OFFSET r11,R11 | |
866 | movq %rbx,5*8(%rsp) | |
867 | CFI_REL_OFFSET rbx,RBX | |
868 | movq %rbp,4*8(%rsp) | |
869 | CFI_REL_OFFSET rbp,RBP | |
870 | movq %r12,3*8(%rsp) | |
871 | CFI_REL_OFFSET r12,R12 | |
872 | movq %r13,2*8(%rsp) | |
873 | CFI_REL_OFFSET r13,R13 | |
874 | movq %r14,1*8(%rsp) | |
875 | CFI_REL_OFFSET r14,R14 | |
876 | movq %r15,(%rsp) | |
877 | CFI_REL_OFFSET r15,R15 | |
878 | xorl %ebx,%ebx | |
879 | testl $3,CS(%rsp) | |
880 | je error_kernelspace | |
881 | error_swapgs: | |
72fe4858 | 882 | SWAPGS |
1da177e4 LT |
883 | error_sti: |
884 | movq %rdi,RDI(%rsp) | |
37550907 | 885 | CFI_REL_OFFSET rdi,RDI |
1da177e4 LT |
886 | movq %rsp,%rdi |
887 | movq ORIG_RAX(%rsp),%rsi /* get error code */ | |
888 | movq $-1,ORIG_RAX(%rsp) | |
889 | call *%rax | |
10cd706d PZ |
890 | /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */ |
891 | error_exit: | |
892 | movl %ebx,%eax | |
1da177e4 | 893 | RESTORE_REST |
72fe4858 | 894 | DISABLE_INTERRUPTS(CLBR_NONE) |
2601e64d | 895 | TRACE_IRQS_OFF |
1da177e4 LT |
896 | GET_THREAD_INFO(%rcx) |
897 | testl %eax,%eax | |
898 | jne retint_kernel | |
10cd706d | 899 | LOCKDEP_SYS_EXIT_IRQ |
1da177e4 LT |
900 | movl threadinfo_flags(%rcx),%edx |
901 | movl $_TIF_WORK_MASK,%edi | |
902 | andl %edi,%edx | |
903 | jnz retint_careful | |
10cd706d | 904 | jmp retint_swapgs |
1da177e4 LT |
905 | CFI_ENDPROC |
906 | ||
907 | error_kernelspace: | |
908 | incl %ebx | |
909 | /* There are two places in the kernel that can potentially fault with | |
910 | usergs. Handle them here. The exception handlers after | |
911 | iret run with kernel gs again, so don't set the user space flag. | |
912 | B stepping K8s sometimes report an truncated RIP for IRET | |
913 | exceptions returning to compat mode. Check for these here too. */ | |
914 | leaq iret_label(%rip),%rbp | |
915 | cmpq %rbp,RIP(%rsp) | |
916 | je error_swapgs | |
917 | movl %ebp,%ebp /* zero extend */ | |
918 | cmpq %rbp,RIP(%rsp) | |
919 | je error_swapgs | |
920 | cmpq $gs_change,RIP(%rsp) | |
921 | je error_swapgs | |
922 | jmp error_sti | |
d28c4393 | 923 | KPROBE_END(error_entry) |
1da177e4 LT |
924 | |
925 | /* Reload gs selector with exception handling */ | |
926 | /* edi: new selector */ | |
927 | ENTRY(load_gs_index) | |
7effaa88 | 928 | CFI_STARTPROC |
1da177e4 | 929 | pushf |
7effaa88 | 930 | CFI_ADJUST_CFA_OFFSET 8 |
72fe4858 GOC |
931 | DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI)) |
932 | SWAPGS | |
1da177e4 LT |
933 | gs_change: |
934 | movl %edi,%gs | |
935 | 2: mfence /* workaround */ | |
72fe4858 | 936 | SWAPGS |
1da177e4 | 937 | popf |
7effaa88 | 938 | CFI_ADJUST_CFA_OFFSET -8 |
1da177e4 | 939 | ret |
7effaa88 | 940 | CFI_ENDPROC |
4b787e0b | 941 | ENDPROC(load_gs_index) |
1da177e4 LT |
942 | |
943 | .section __ex_table,"a" | |
944 | .align 8 | |
945 | .quad gs_change,bad_gs | |
946 | .previous | |
947 | .section .fixup,"ax" | |
948 | /* running with kernelgs */ | |
949 | bad_gs: | |
72fe4858 | 950 | SWAPGS /* switch back to user gs */ |
1da177e4 LT |
951 | xorl %eax,%eax |
952 | movl %eax,%gs | |
953 | jmp 2b | |
954 | .previous | |
955 | ||
956 | /* | |
957 | * Create a kernel thread. | |
958 | * | |
959 | * C extern interface: | |
960 | * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) | |
961 | * | |
962 | * asm input arguments: | |
963 | * rdi: fn, rsi: arg, rdx: flags | |
964 | */ | |
965 | ENTRY(kernel_thread) | |
966 | CFI_STARTPROC | |
967 | FAKE_STACK_FRAME $child_rip | |
968 | SAVE_ALL | |
969 | ||
970 | # rdi: flags, rsi: usp, rdx: will be &pt_regs | |
971 | movq %rdx,%rdi | |
972 | orq kernel_thread_flags(%rip),%rdi | |
973 | movq $-1, %rsi | |
974 | movq %rsp, %rdx | |
975 | ||
976 | xorl %r8d,%r8d | |
977 | xorl %r9d,%r9d | |
978 | ||
979 | # clone now | |
980 | call do_fork | |
981 | movq %rax,RAX(%rsp) | |
982 | xorl %edi,%edi | |
983 | ||
984 | /* | |
985 | * It isn't worth to check for reschedule here, | |
986 | * so internally to the x86_64 port you can rely on kernel_thread() | |
987 | * not to reschedule the child before returning, this avoids the need | |
988 | * of hacks for example to fork off the per-CPU idle tasks. | |
989 | * [Hopefully no generic code relies on the reschedule -AK] | |
990 | */ | |
991 | RESTORE_ALL | |
992 | UNFAKE_STACK_FRAME | |
993 | ret | |
994 | CFI_ENDPROC | |
4b787e0b | 995 | ENDPROC(kernel_thread) |
1da177e4 LT |
996 | |
997 | child_rip: | |
c05991ed AK |
998 | pushq $0 # fake return address |
999 | CFI_STARTPROC | |
1da177e4 LT |
1000 | /* |
1001 | * Here we are in the child and the registers are set as they were | |
1002 | * at kernel_thread() invocation in the parent. | |
1003 | */ | |
1004 | movq %rdi, %rax | |
1005 | movq %rsi, %rdi | |
1006 | call *%rax | |
1007 | # exit | |
1c5b5cfd | 1008 | mov %eax, %edi |
1da177e4 | 1009 | call do_exit |
c05991ed | 1010 | CFI_ENDPROC |
4b787e0b | 1011 | ENDPROC(child_rip) |
1da177e4 LT |
1012 | |
1013 | /* | |
1014 | * execve(). This function needs to use IRET, not SYSRET, to set up all state properly. | |
1015 | * | |
1016 | * C extern interface: | |
1017 | * extern long execve(char *name, char **argv, char **envp) | |
1018 | * | |
1019 | * asm input arguments: | |
1020 | * rdi: name, rsi: argv, rdx: envp | |
1021 | * | |
1022 | * We want to fallback into: | |
1023 | * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs regs) | |
1024 | * | |
1025 | * do_sys_execve asm fallback arguments: | |
1026 | * rdi: name, rsi: argv, rdx: envp, fake frame on the stack | |
1027 | */ | |
3db03b4a | 1028 | ENTRY(kernel_execve) |
1da177e4 LT |
1029 | CFI_STARTPROC |
1030 | FAKE_STACK_FRAME $0 | |
1031 | SAVE_ALL | |
1032 | call sys_execve | |
1033 | movq %rax, RAX(%rsp) | |
1034 | RESTORE_REST | |
1035 | testq %rax,%rax | |
1036 | je int_ret_from_sys_call | |
1037 | RESTORE_ARGS | |
1038 | UNFAKE_STACK_FRAME | |
1039 | ret | |
1040 | CFI_ENDPROC | |
3db03b4a | 1041 | ENDPROC(kernel_execve) |
1da177e4 | 1042 | |
0f2fbdcb | 1043 | KPROBE_ENTRY(page_fault) |
1da177e4 | 1044 | errorentry do_page_fault |
d28c4393 | 1045 | KPROBE_END(page_fault) |
1da177e4 LT |
1046 | |
1047 | ENTRY(coprocessor_error) | |
1048 | zeroentry do_coprocessor_error | |
4b787e0b | 1049 | END(coprocessor_error) |
1da177e4 LT |
1050 | |
1051 | ENTRY(simd_coprocessor_error) | |
1052 | zeroentry do_simd_coprocessor_error | |
4b787e0b | 1053 | END(simd_coprocessor_error) |
1da177e4 LT |
1054 | |
1055 | ENTRY(device_not_available) | |
1056 | zeroentry math_state_restore | |
4b787e0b | 1057 | END(device_not_available) |
1da177e4 LT |
1058 | |
1059 | /* runs on exception stack */ | |
0f2fbdcb | 1060 | KPROBE_ENTRY(debug) |
7effaa88 | 1061 | INTR_FRAME |
1da177e4 LT |
1062 | pushq $0 |
1063 | CFI_ADJUST_CFA_OFFSET 8 | |
5f8efbb9 | 1064 | paranoidentry do_debug, DEBUG_STACK |
2601e64d | 1065 | paranoidexit |
d28c4393 | 1066 | KPROBE_END(debug) |
1da177e4 LT |
1067 | |
1068 | /* runs on exception stack */ | |
eddb6fb9 | 1069 | KPROBE_ENTRY(nmi) |
7effaa88 | 1070 | INTR_FRAME |
1da177e4 | 1071 | pushq $-1 |
7effaa88 | 1072 | CFI_ADJUST_CFA_OFFSET 8 |
2601e64d IM |
1073 | paranoidentry do_nmi, 0, 0 |
1074 | #ifdef CONFIG_TRACE_IRQFLAGS | |
1075 | paranoidexit 0 | |
1076 | #else | |
1077 | jmp paranoid_exit1 | |
1078 | CFI_ENDPROC | |
1079 | #endif | |
d28c4393 | 1080 | KPROBE_END(nmi) |
6fefb0d1 | 1081 | |
0f2fbdcb | 1082 | KPROBE_ENTRY(int3) |
b556b35e JB |
1083 | INTR_FRAME |
1084 | pushq $0 | |
1085 | CFI_ADJUST_CFA_OFFSET 8 | |
5f8efbb9 | 1086 | paranoidentry do_int3, DEBUG_STACK |
2601e64d | 1087 | jmp paranoid_exit1 |
b556b35e | 1088 | CFI_ENDPROC |
d28c4393 | 1089 | KPROBE_END(int3) |
1da177e4 LT |
1090 | |
1091 | ENTRY(overflow) | |
1092 | zeroentry do_overflow | |
4b787e0b | 1093 | END(overflow) |
1da177e4 LT |
1094 | |
1095 | ENTRY(bounds) | |
1096 | zeroentry do_bounds | |
4b787e0b | 1097 | END(bounds) |
1da177e4 LT |
1098 | |
1099 | ENTRY(invalid_op) | |
1100 | zeroentry do_invalid_op | |
4b787e0b | 1101 | END(invalid_op) |
1da177e4 LT |
1102 | |
1103 | ENTRY(coprocessor_segment_overrun) | |
1104 | zeroentry do_coprocessor_segment_overrun | |
4b787e0b | 1105 | END(coprocessor_segment_overrun) |
1da177e4 LT |
1106 | |
1107 | ENTRY(reserved) | |
1108 | zeroentry do_reserved | |
4b787e0b | 1109 | END(reserved) |
1da177e4 LT |
1110 | |
1111 | /* runs on exception stack */ | |
1112 | ENTRY(double_fault) | |
7effaa88 | 1113 | XCPT_FRAME |
1da177e4 | 1114 | paranoidentry do_double_fault |
2601e64d | 1115 | jmp paranoid_exit1 |
1da177e4 | 1116 | CFI_ENDPROC |
4b787e0b | 1117 | END(double_fault) |
1da177e4 LT |
1118 | |
1119 | ENTRY(invalid_TSS) | |
1120 | errorentry do_invalid_TSS | |
4b787e0b | 1121 | END(invalid_TSS) |
1da177e4 LT |
1122 | |
1123 | ENTRY(segment_not_present) | |
1124 | errorentry do_segment_not_present | |
4b787e0b | 1125 | END(segment_not_present) |
1da177e4 LT |
1126 | |
1127 | /* runs on exception stack */ | |
1128 | ENTRY(stack_segment) | |
7effaa88 | 1129 | XCPT_FRAME |
1da177e4 | 1130 | paranoidentry do_stack_segment |
2601e64d | 1131 | jmp paranoid_exit1 |
1da177e4 | 1132 | CFI_ENDPROC |
4b787e0b | 1133 | END(stack_segment) |
1da177e4 | 1134 | |
0f2fbdcb | 1135 | KPROBE_ENTRY(general_protection) |
1da177e4 | 1136 | errorentry do_general_protection |
d28c4393 | 1137 | KPROBE_END(general_protection) |
1da177e4 LT |
1138 | |
1139 | ENTRY(alignment_check) | |
1140 | errorentry do_alignment_check | |
4b787e0b | 1141 | END(alignment_check) |
1da177e4 LT |
1142 | |
1143 | ENTRY(divide_error) | |
1144 | zeroentry do_divide_error | |
4b787e0b | 1145 | END(divide_error) |
1da177e4 LT |
1146 | |
1147 | ENTRY(spurious_interrupt_bug) | |
1148 | zeroentry do_spurious_interrupt_bug | |
4b787e0b | 1149 | END(spurious_interrupt_bug) |
1da177e4 LT |
1150 | |
1151 | #ifdef CONFIG_X86_MCE | |
1152 | /* runs on exception stack */ | |
1153 | ENTRY(machine_check) | |
7effaa88 | 1154 | INTR_FRAME |
1da177e4 LT |
1155 | pushq $0 |
1156 | CFI_ADJUST_CFA_OFFSET 8 | |
1157 | paranoidentry do_machine_check | |
2601e64d | 1158 | jmp paranoid_exit1 |
1da177e4 | 1159 | CFI_ENDPROC |
4b787e0b | 1160 | END(machine_check) |
1da177e4 LT |
1161 | #endif |
1162 | ||
2699500b | 1163 | /* Call softirq on interrupt stack. Interrupts are off. */ |
ed6b676c | 1164 | ENTRY(call_softirq) |
7effaa88 | 1165 | CFI_STARTPROC |
2699500b AK |
1166 | push %rbp |
1167 | CFI_ADJUST_CFA_OFFSET 8 | |
1168 | CFI_REL_OFFSET rbp,0 | |
1169 | mov %rsp,%rbp | |
1170 | CFI_DEF_CFA_REGISTER rbp | |
ed6b676c | 1171 | incl %gs:pda_irqcount |
2699500b AK |
1172 | cmove %gs:pda_irqstackptr,%rsp |
1173 | push %rbp # backlink for old unwinder | |
ed6b676c | 1174 | call __do_softirq |
2699500b | 1175 | leaveq |
7effaa88 | 1176 | CFI_DEF_CFA_REGISTER rsp |
2699500b | 1177 | CFI_ADJUST_CFA_OFFSET -8 |
ed6b676c | 1178 | decl %gs:pda_irqcount |
ed6b676c | 1179 | ret |
7effaa88 | 1180 | CFI_ENDPROC |
4b787e0b | 1181 | ENDPROC(call_softirq) |
75154f40 AK |
1182 | |
1183 | KPROBE_ENTRY(ignore_sysret) | |
1184 | CFI_STARTPROC | |
1185 | mov $-ENOSYS,%eax | |
1186 | sysret | |
1187 | CFI_ENDPROC | |
1188 | ENDPROC(ignore_sysret) |