Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
a49976d1 | 2 | * Copyright (C) 1991,1992 Linus Torvalds |
1da177e4 | 3 | * |
a49976d1 | 4 | * entry_32.S contains the system-call and low-level fault and trap handling routines. |
1da177e4 | 5 | * |
39e8701f | 6 | * Stack layout while running C code: |
a49976d1 IM |
7 | * ptrace needs to have all registers on the stack. |
8 | * If the order here is changed, it needs to be | |
9 | * updated in fork.c:copy_process(), signal.c:do_signal(), | |
1da177e4 LT |
10 | * ptrace.c and ptrace.h |
11 | * | |
12 | * 0(%esp) - %ebx | |
13 | * 4(%esp) - %ecx | |
14 | * 8(%esp) - %edx | |
9b47feb7 | 15 | * C(%esp) - %esi |
1da177e4 LT |
16 | * 10(%esp) - %edi |
17 | * 14(%esp) - %ebp | |
18 | * 18(%esp) - %eax | |
19 | * 1C(%esp) - %ds | |
20 | * 20(%esp) - %es | |
464d1a78 | 21 | * 24(%esp) - %fs |
ccbeed3a TH |
22 | * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS |
23 | * 2C(%esp) - orig_eax | |
24 | * 30(%esp) - %eip | |
25 | * 34(%esp) - %cs | |
26 | * 38(%esp) - %eflags | |
27 | * 3C(%esp) - %oldesp | |
28 | * 40(%esp) - %oldss | |
1da177e4 LT |
29 | */ |
30 | ||
1da177e4 | 31 | #include <linux/linkage.h> |
d7e7528b | 32 | #include <linux/err.h> |
1da177e4 | 33 | #include <asm/thread_info.h> |
55f327fa | 34 | #include <asm/irqflags.h> |
1da177e4 LT |
35 | #include <asm/errno.h> |
36 | #include <asm/segment.h> | |
37 | #include <asm/smp.h> | |
0341c14d | 38 | #include <asm/page_types.h> |
be44d2aa | 39 | #include <asm/percpu.h> |
ab68ed98 | 40 | #include <asm/processor-flags.h> |
395a59d0 | 41 | #include <asm/ftrace.h> |
9b7dc567 | 42 | #include <asm/irq_vectors.h> |
40d2e763 | 43 | #include <asm/cpufeature.h> |
b4ca46e4 | 44 | #include <asm/alternative-asm.h> |
6837a54d | 45 | #include <asm/asm.h> |
e59d1b0a | 46 | #include <asm/smap.h> |
1da177e4 | 47 | |
ea714547 JO |
48 | .section .entry.text, "ax" |
49 | ||
139ec7c4 RR |
50 | /* |
51 | * We use macros for low-level operations which need to be overridden | |
52 | * for paravirtualization. The following will never clobber any registers: | |
53 | * INTERRUPT_RETURN (aka. "iret") | |
54 | * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax") | |
d75cd22f | 55 | * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit"). |
139ec7c4 RR |
56 | * |
57 | * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must | |
58 | * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY). | |
59 | * Allowing a register to be clobbered can shrink the paravirt replacement | |
60 | * enough to patch inline, increasing performance. | |
61 | */ | |
62 | ||
1da177e4 | 63 | #ifdef CONFIG_PREEMPT |
a49976d1 | 64 | # define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF |
1da177e4 | 65 | #else |
a49976d1 IM |
66 | # define preempt_stop(clobbers) |
67 | # define resume_kernel restore_all | |
1da177e4 LT |
68 | #endif |
69 | ||
55f327fa IM |
70 | .macro TRACE_IRQS_IRET |
71 | #ifdef CONFIG_TRACE_IRQFLAGS | |
a49976d1 IM |
72 | testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off? |
73 | jz 1f | |
55f327fa IM |
74 | TRACE_IRQS_ON |
75 | 1: | |
76 | #endif | |
77 | .endm | |
78 | ||
ccbeed3a TH |
79 | /* |
80 | * User gs save/restore | |
81 | * | |
82 | * %gs is used for userland TLS and kernel only uses it for stack | |
83 | * canary which is required to be at %gs:20 by gcc. Read the comment | |
84 | * at the top of stackprotector.h for more info. | |
85 | * | |
86 | * Local labels 98 and 99 are used. | |
87 | */ | |
88 | #ifdef CONFIG_X86_32_LAZY_GS | |
89 | ||
90 | /* unfortunately push/pop can't be no-op */ | |
91 | .macro PUSH_GS | |
a49976d1 | 92 | pushl $0 |
ccbeed3a TH |
93 | .endm |
94 | .macro POP_GS pop=0 | |
a49976d1 | 95 | addl $(4 + \pop), %esp |
ccbeed3a TH |
96 | .endm |
97 | .macro POP_GS_EX | |
98 | .endm | |
99 | ||
100 | /* all the rest are no-op */ | |
101 | .macro PTGS_TO_GS | |
102 | .endm | |
103 | .macro PTGS_TO_GS_EX | |
104 | .endm | |
105 | .macro GS_TO_REG reg | |
106 | .endm | |
107 | .macro REG_TO_PTGS reg | |
108 | .endm | |
109 | .macro SET_KERNEL_GS reg | |
110 | .endm | |
111 | ||
112 | #else /* CONFIG_X86_32_LAZY_GS */ | |
113 | ||
114 | .macro PUSH_GS | |
a49976d1 | 115 | pushl %gs |
ccbeed3a TH |
116 | .endm |
117 | ||
118 | .macro POP_GS pop=0 | |
a49976d1 | 119 | 98: popl %gs |
ccbeed3a | 120 | .if \pop <> 0 |
9b47feb7 | 121 | add $\pop, %esp |
ccbeed3a TH |
122 | .endif |
123 | .endm | |
124 | .macro POP_GS_EX | |
125 | .pushsection .fixup, "ax" | |
a49976d1 IM |
126 | 99: movl $0, (%esp) |
127 | jmp 98b | |
ccbeed3a | 128 | .popsection |
a49976d1 | 129 | _ASM_EXTABLE(98b, 99b) |
ccbeed3a TH |
130 | .endm |
131 | ||
132 | .macro PTGS_TO_GS | |
a49976d1 | 133 | 98: mov PT_GS(%esp), %gs |
ccbeed3a TH |
134 | .endm |
135 | .macro PTGS_TO_GS_EX | |
136 | .pushsection .fixup, "ax" | |
a49976d1 IM |
137 | 99: movl $0, PT_GS(%esp) |
138 | jmp 98b | |
ccbeed3a | 139 | .popsection |
a49976d1 | 140 | _ASM_EXTABLE(98b, 99b) |
ccbeed3a TH |
141 | .endm |
142 | ||
143 | .macro GS_TO_REG reg | |
a49976d1 | 144 | movl %gs, \reg |
ccbeed3a TH |
145 | .endm |
146 | .macro REG_TO_PTGS reg | |
a49976d1 | 147 | movl \reg, PT_GS(%esp) |
ccbeed3a TH |
148 | .endm |
149 | .macro SET_KERNEL_GS reg | |
a49976d1 IM |
150 | movl $(__KERNEL_STACK_CANARY), \reg |
151 | movl \reg, %gs | |
ccbeed3a TH |
152 | .endm |
153 | ||
a49976d1 | 154 | #endif /* CONFIG_X86_32_LAZY_GS */ |
ccbeed3a | 155 | |
150ac78d | 156 | .macro SAVE_ALL pt_regs_ax=%eax |
f0d96110 | 157 | cld |
ccbeed3a | 158 | PUSH_GS |
a49976d1 IM |
159 | pushl %fs |
160 | pushl %es | |
161 | pushl %ds | |
150ac78d | 162 | pushl \pt_regs_ax |
a49976d1 IM |
163 | pushl %ebp |
164 | pushl %edi | |
165 | pushl %esi | |
166 | pushl %edx | |
167 | pushl %ecx | |
168 | pushl %ebx | |
169 | movl $(__USER_DS), %edx | |
170 | movl %edx, %ds | |
171 | movl %edx, %es | |
172 | movl $(__KERNEL_PERCPU), %edx | |
173 | movl %edx, %fs | |
ccbeed3a | 174 | SET_KERNEL_GS %edx |
f0d96110 | 175 | .endm |
1da177e4 | 176 | |
f0d96110 | 177 | .macro RESTORE_INT_REGS |
a49976d1 IM |
178 | popl %ebx |
179 | popl %ecx | |
180 | popl %edx | |
181 | popl %esi | |
182 | popl %edi | |
183 | popl %ebp | |
184 | popl %eax | |
f0d96110 | 185 | .endm |
1da177e4 | 186 | |
ccbeed3a | 187 | .macro RESTORE_REGS pop=0 |
f0d96110 | 188 | RESTORE_INT_REGS |
a49976d1 IM |
189 | 1: popl %ds |
190 | 2: popl %es | |
191 | 3: popl %fs | |
ccbeed3a | 192 | POP_GS \pop |
f0d96110 | 193 | .pushsection .fixup, "ax" |
a49976d1 IM |
194 | 4: movl $0, (%esp) |
195 | jmp 1b | |
196 | 5: movl $0, (%esp) | |
197 | jmp 2b | |
198 | 6: movl $0, (%esp) | |
199 | jmp 3b | |
f95d47ca | 200 | .popsection |
a49976d1 IM |
201 | _ASM_EXTABLE(1b, 4b) |
202 | _ASM_EXTABLE(2b, 5b) | |
203 | _ASM_EXTABLE(3b, 6b) | |
ccbeed3a | 204 | POP_GS_EX |
f0d96110 | 205 | .endm |
1da177e4 | 206 | |
1da177e4 | 207 | ENTRY(ret_from_fork) |
a49976d1 IM |
208 | pushl %eax |
209 | call schedule_tail | |
1da177e4 | 210 | GET_THREAD_INFO(%ebp) |
a49976d1 IM |
211 | popl %eax |
212 | pushl $0x0202 # Reset kernel eflags | |
131484c8 | 213 | popfl |
39e8701f AL |
214 | |
215 | /* When we fork, we trace the syscall return in the child, too. */ | |
216 | movl %esp, %eax | |
217 | call syscall_return_slowpath | |
218 | jmp restore_all | |
47a55cd7 | 219 | END(ret_from_fork) |
1da177e4 | 220 | |
22e2430d | 221 | ENTRY(ret_from_kernel_thread) |
a49976d1 IM |
222 | pushl %eax |
223 | call schedule_tail | |
6783eaa2 | 224 | GET_THREAD_INFO(%ebp) |
a49976d1 IM |
225 | popl %eax |
226 | pushl $0x0202 # Reset kernel eflags | |
131484c8 | 227 | popfl |
a49976d1 IM |
228 | movl PT_EBP(%esp), %eax |
229 | call *PT_EBX(%esp) | |
230 | movl $0, PT_EAX(%esp) | |
39e8701f AL |
231 | |
232 | /* | |
233 | * Kernel threads return to userspace as if returning from a syscall. | |
234 | * We should check whether anything actually uses this path and, if so, | |
235 | * consider switching it over to ret_from_fork. | |
236 | */ | |
237 | movl %esp, %eax | |
238 | call syscall_return_slowpath | |
239 | jmp restore_all | |
22e2430d | 240 | ENDPROC(ret_from_kernel_thread) |
6783eaa2 | 241 | |
1da177e4 LT |
242 | /* |
243 | * Return to user mode is not as complex as all this looks, | |
244 | * but we want the default path for a system call return to | |
245 | * go as quickly as possible which is why some of this is | |
246 | * less clear than it otherwise should be. | |
247 | */ | |
248 | ||
249 | # userspace resumption stub bypassing syscall exit tracing | |
250 | ALIGN | |
251 | ret_from_exception: | |
139ec7c4 | 252 | preempt_stop(CLBR_ANY) |
1da177e4 LT |
253 | ret_from_intr: |
254 | GET_THREAD_INFO(%ebp) | |
29a2e283 | 255 | #ifdef CONFIG_VM86 |
a49976d1 IM |
256 | movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS |
257 | movb PT_CS(%esp), %al | |
258 | andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax | |
29a2e283 DA |
259 | #else |
260 | /* | |
6783eaa2 | 261 | * We can be coming here from child spawned by kernel_thread(). |
29a2e283 | 262 | */ |
a49976d1 IM |
263 | movl PT_CS(%esp), %eax |
264 | andl $SEGMENT_RPL_MASK, %eax | |
29a2e283 | 265 | #endif |
a49976d1 IM |
266 | cmpl $USER_RPL, %eax |
267 | jb resume_kernel # not returning to v8086 or userspace | |
f95d47ca | 268 | |
1da177e4 | 269 | ENTRY(resume_userspace) |
5d73fc70 | 270 | DISABLE_INTERRUPTS(CLBR_ANY) |
e32e58a9 | 271 | TRACE_IRQS_OFF |
5d73fc70 AL |
272 | movl %esp, %eax |
273 | call prepare_exit_to_usermode | |
a49976d1 | 274 | jmp restore_all |
47a55cd7 | 275 | END(ret_from_exception) |
1da177e4 LT |
276 | |
277 | #ifdef CONFIG_PREEMPT | |
278 | ENTRY(resume_kernel) | |
139ec7c4 | 279 | DISABLE_INTERRUPTS(CLBR_ANY) |
1da177e4 | 280 | need_resched: |
a49976d1 IM |
281 | cmpl $0, PER_CPU_VAR(__preempt_count) |
282 | jnz restore_all | |
283 | testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ? | |
284 | jz restore_all | |
285 | call preempt_schedule_irq | |
286 | jmp need_resched | |
47a55cd7 | 287 | END(resume_kernel) |
1da177e4 LT |
288 | #endif |
289 | ||
a49976d1 | 290 | # SYSENTER call handler stub |
4c8cd0c5 | 291 | ENTRY(entry_SYSENTER_32) |
a49976d1 | 292 | movl TSS_sysenter_sp0(%esp), %esp |
1da177e4 | 293 | sysenter_past_esp: |
5f310f73 | 294 | pushl $__USER_DS /* pt_regs->ss */ |
30bfa7b3 | 295 | pushl %ebp /* pt_regs->sp (stashed in bp) */ |
5f310f73 | 296 | pushfl /* pt_regs->flags (except IF = 0) */ |
04d1d281 | 297 | ASM_CLAC /* Clear AC after saving FLAGS */ |
5f310f73 AL |
298 | orl $X86_EFLAGS_IF, (%esp) /* Fix IF */ |
299 | pushl $__USER_CS /* pt_regs->cs */ | |
300 | pushl $0 /* pt_regs->ip = 0 (placeholder) */ | |
301 | pushl %eax /* pt_regs->orig_ax */ | |
302 | SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest */ | |
303 | ||
55f327fa | 304 | /* |
5f310f73 AL |
305 | * User mode is traced as though IRQs are on, and SYSENTER |
306 | * turned them off. | |
e6e5494c | 307 | */ |
55f327fa | 308 | TRACE_IRQS_OFF |
5f310f73 AL |
309 | |
310 | movl %esp, %eax | |
311 | call do_fast_syscall_32 | |
91e2eea9 BO |
312 | /* XEN PV guests always use IRET path */ |
313 | ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \ | |
314 | "jmp .Lsyscall_32_done", X86_FEATURE_XENPV | |
5f310f73 AL |
315 | |
316 | /* Opportunistic SYSEXIT */ | |
317 | TRACE_IRQS_ON /* User mode traces as IRQs on. */ | |
318 | movl PT_EIP(%esp), %edx /* pt_regs->ip */ | |
319 | movl PT_OLDESP(%esp), %ecx /* pt_regs->sp */ | |
3bd29515 AL |
320 | 1: mov PT_FS(%esp), %fs |
321 | PTGS_TO_GS | |
5f310f73 AL |
322 | popl %ebx /* pt_regs->bx */ |
323 | addl $2*4, %esp /* skip pt_regs->cx and pt_regs->dx */ | |
324 | popl %esi /* pt_regs->si */ | |
325 | popl %edi /* pt_regs->di */ | |
326 | popl %ebp /* pt_regs->bp */ | |
327 | popl %eax /* pt_regs->ax */ | |
5f310f73 AL |
328 | |
329 | /* | |
330 | * Return back to the vDSO, which will pop ecx and edx. | |
331 | * Don't bother with DS and ES (they already contain __USER_DS). | |
332 | */ | |
88c15ec9 BO |
333 | sti |
334 | sysexit | |
af0575bb | 335 | |
a49976d1 IM |
336 | .pushsection .fixup, "ax" |
337 | 2: movl $0, PT_FS(%esp) | |
338 | jmp 1b | |
f95d47ca | 339 | .popsection |
a49976d1 | 340 | _ASM_EXTABLE(1b, 2b) |
ccbeed3a | 341 | PTGS_TO_GS_EX |
4c8cd0c5 | 342 | ENDPROC(entry_SYSENTER_32) |
1da177e4 LT |
343 | |
344 | # system call handler stub | |
b2502b41 | 345 | ENTRY(entry_INT80_32) |
e59d1b0a | 346 | ASM_CLAC |
150ac78d | 347 | pushl %eax /* pt_regs->orig_ax */ |
5f310f73 | 348 | SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest */ |
150ac78d AL |
349 | |
350 | /* | |
657c1eea AL |
351 | * User mode is traced as though IRQs are on. Unlike the 64-bit |
352 | * case, INT80 is a trap gate on 32-bit kernels, so interrupts | |
353 | * are already on (unless user code is messing around with iopl). | |
150ac78d | 354 | */ |
150ac78d AL |
355 | |
356 | movl %esp, %eax | |
657c1eea | 357 | call do_syscall_32_irqs_on |
5f310f73 | 358 | .Lsyscall_32_done: |
1da177e4 LT |
359 | |
360 | restore_all: | |
2e04bc76 AH |
361 | TRACE_IRQS_IRET |
362 | restore_all_notrace: | |
34273f41 | 363 | #ifdef CONFIG_X86_ESPFIX32 |
a49976d1 IM |
364 | movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS |
365 | /* | |
366 | * Warning: PT_OLDSS(%esp) contains the wrong/random values if we | |
367 | * are returning to the kernel. | |
368 | * See comments in process.c:copy_thread() for details. | |
369 | */ | |
370 | movb PT_OLDSS(%esp), %ah | |
371 | movb PT_CS(%esp), %al | |
372 | andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax | |
373 | cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax | |
374 | je ldt_ss # returning to user-space with LDT SS | |
34273f41 | 375 | #endif |
1da177e4 | 376 | restore_nocheck: |
a49976d1 | 377 | RESTORE_REGS 4 # skip orig_eax/error_code |
f7f3d791 | 378 | irq_return: |
3701d863 | 379 | INTERRUPT_RETURN |
a49976d1 IM |
380 | .section .fixup, "ax" |
381 | ENTRY(iret_exc ) | |
382 | pushl $0 # no error code | |
383 | pushl $do_iret_error | |
384 | jmp error_code | |
1da177e4 | 385 | .previous |
a49976d1 | 386 | _ASM_EXTABLE(irq_return, iret_exc) |
1da177e4 | 387 | |
34273f41 | 388 | #ifdef CONFIG_X86_ESPFIX32 |
1da177e4 | 389 | ldt_ss: |
d3561b7f RR |
390 | #ifdef CONFIG_PARAVIRT |
391 | /* | |
392 | * The kernel can't run on a non-flat stack if paravirt mode | |
393 | * is active. Rather than try to fixup the high bits of | |
394 | * ESP, bypass this code entirely. This may break DOSemu | |
395 | * and/or Wine support in a paravirt VM, although the option | |
396 | * is still available to implement the setting of the high | |
397 | * 16-bits in the INTERRUPT_RETURN paravirt-op. | |
398 | */ | |
a49976d1 IM |
399 | cmpl $0, pv_info+PARAVIRT_enabled |
400 | jne restore_nocheck | |
d3561b7f RR |
401 | #endif |
402 | ||
dc4c2a0a AH |
403 | /* |
404 | * Setup and switch to ESPFIX stack | |
405 | * | |
406 | * We're returning to userspace with a 16 bit stack. The CPU will not | |
407 | * restore the high word of ESP for us on executing iret... This is an | |
408 | * "official" bug of all the x86-compatible CPUs, which we can work | |
409 | * around to make dosemu and wine happy. We do this by preloading the | |
410 | * high word of ESP with the high word of the userspace ESP while | |
411 | * compensating for the offset by changing to the ESPFIX segment with | |
412 | * a base address that matches for the difference. | |
413 | */ | |
72c511dd | 414 | #define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8) |
a49976d1 IM |
415 | mov %esp, %edx /* load kernel esp */ |
416 | mov PT_OLDESP(%esp), %eax /* load userspace esp */ | |
417 | mov %dx, %ax /* eax: new kernel esp */ | |
9b47feb7 DV |
418 | sub %eax, %edx /* offset (low word is 0) */ |
419 | shr $16, %edx | |
a49976d1 IM |
420 | mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */ |
421 | mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */ | |
422 | pushl $__ESPFIX_SS | |
423 | pushl %eax /* new kernel esp */ | |
424 | /* | |
425 | * Disable interrupts, but do not irqtrace this section: we | |
2e04bc76 | 426 | * will soon execute iret and the tracer was already set to |
a49976d1 IM |
427 | * the irqstate after the IRET: |
428 | */ | |
139ec7c4 | 429 | DISABLE_INTERRUPTS(CLBR_EAX) |
a49976d1 IM |
430 | lss (%esp), %esp /* switch to espfix segment */ |
431 | jmp restore_nocheck | |
34273f41 | 432 | #endif |
b2502b41 | 433 | ENDPROC(entry_INT80_32) |
1da177e4 | 434 | |
f0d96110 | 435 | .macro FIXUP_ESPFIX_STACK |
dc4c2a0a AH |
436 | /* |
437 | * Switch back for ESPFIX stack to the normal zerobased stack | |
438 | * | |
439 | * We can't call C functions using the ESPFIX stack. This code reads | |
440 | * the high word of the segment base from the GDT and swiches to the | |
441 | * normal stack and adjusts ESP with the matching offset. | |
442 | */ | |
34273f41 | 443 | #ifdef CONFIG_X86_ESPFIX32 |
dc4c2a0a | 444 | /* fixup the stack */ |
a49976d1 IM |
445 | mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */ |
446 | mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */ | |
9b47feb7 | 447 | shl $16, %eax |
a49976d1 IM |
448 | addl %esp, %eax /* the adjusted stack pointer */ |
449 | pushl $__KERNEL_DS | |
450 | pushl %eax | |
451 | lss (%esp), %esp /* switch to the normal stack segment */ | |
34273f41 | 452 | #endif |
f0d96110 TH |
453 | .endm |
454 | .macro UNWIND_ESPFIX_STACK | |
34273f41 | 455 | #ifdef CONFIG_X86_ESPFIX32 |
a49976d1 | 456 | movl %ss, %eax |
f0d96110 | 457 | /* see if on espfix stack */ |
a49976d1 IM |
458 | cmpw $__ESPFIX_SS, %ax |
459 | jne 27f | |
460 | movl $__KERNEL_DS, %eax | |
461 | movl %eax, %ds | |
462 | movl %eax, %es | |
f0d96110 TH |
463 | /* switch to normal stack */ |
464 | FIXUP_ESPFIX_STACK | |
465 | 27: | |
34273f41 | 466 | #endif |
f0d96110 | 467 | .endm |
1da177e4 LT |
468 | |
469 | /* | |
3304c9c3 DV |
470 | * Build the entry stubs with some assembler magic. |
471 | * We pack 1 stub into every 8-byte block. | |
1da177e4 | 472 | */ |
3304c9c3 | 473 | .align 8 |
1da177e4 | 474 | ENTRY(irq_entries_start) |
3304c9c3 DV |
475 | vector=FIRST_EXTERNAL_VECTOR |
476 | .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR) | |
a49976d1 | 477 | pushl $(~vector+0x80) /* Note: always in signed byte range */ |
3304c9c3 DV |
478 | vector=vector+1 |
479 | jmp common_interrupt | |
3304c9c3 DV |
480 | .align 8 |
481 | .endr | |
47a55cd7 JB |
482 | END(irq_entries_start) |
483 | ||
55f327fa IM |
484 | /* |
485 | * the CPU automatically disables interrupts when executing an IRQ vector, | |
486 | * so IRQ-flags tracing has to follow that: | |
487 | */ | |
b7c6244f | 488 | .p2align CONFIG_X86_L1_CACHE_SHIFT |
1da177e4 | 489 | common_interrupt: |
e59d1b0a | 490 | ASM_CLAC |
a49976d1 | 491 | addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */ |
1da177e4 | 492 | SAVE_ALL |
55f327fa | 493 | TRACE_IRQS_OFF |
a49976d1 IM |
494 | movl %esp, %eax |
495 | call do_IRQ | |
496 | jmp ret_from_intr | |
47a55cd7 | 497 | ENDPROC(common_interrupt) |
1da177e4 | 498 | |
02cf94c3 | 499 | #define BUILD_INTERRUPT3(name, nr, fn) \ |
1da177e4 | 500 | ENTRY(name) \ |
e59d1b0a | 501 | ASM_CLAC; \ |
a49976d1 | 502 | pushl $~(nr); \ |
fe7cacc1 | 503 | SAVE_ALL; \ |
55f327fa | 504 | TRACE_IRQS_OFF \ |
a49976d1 IM |
505 | movl %esp, %eax; \ |
506 | call fn; \ | |
507 | jmp ret_from_intr; \ | |
47a55cd7 | 508 | ENDPROC(name) |
1da177e4 | 509 | |
cf910e83 SA |
510 | |
511 | #ifdef CONFIG_TRACING | |
a49976d1 | 512 | # define TRACE_BUILD_INTERRUPT(name, nr) BUILD_INTERRUPT3(trace_##name, nr, smp_trace_##name) |
cf910e83 | 513 | #else |
a49976d1 | 514 | # define TRACE_BUILD_INTERRUPT(name, nr) |
cf910e83 SA |
515 | #endif |
516 | ||
a49976d1 IM |
517 | #define BUILD_INTERRUPT(name, nr) \ |
518 | BUILD_INTERRUPT3(name, nr, smp_##name); \ | |
cf910e83 | 519 | TRACE_BUILD_INTERRUPT(name, nr) |
02cf94c3 | 520 | |
1da177e4 | 521 | /* The include is where all of the SMP etc. interrupts come from */ |
1164dd00 | 522 | #include <asm/entry_arch.h> |
1da177e4 | 523 | |
1da177e4 | 524 | ENTRY(coprocessor_error) |
e59d1b0a | 525 | ASM_CLAC |
a49976d1 IM |
526 | pushl $0 |
527 | pushl $do_coprocessor_error | |
528 | jmp error_code | |
47a55cd7 | 529 | END(coprocessor_error) |
1da177e4 LT |
530 | |
531 | ENTRY(simd_coprocessor_error) | |
e59d1b0a | 532 | ASM_CLAC |
a49976d1 | 533 | pushl $0 |
40d2e763 BG |
534 | #ifdef CONFIG_X86_INVD_BUG |
535 | /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */ | |
a49976d1 IM |
536 | ALTERNATIVE "pushl $do_general_protection", \ |
537 | "pushl $do_simd_coprocessor_error", \ | |
8e65f6e0 | 538 | X86_FEATURE_XMM |
40d2e763 | 539 | #else |
a49976d1 | 540 | pushl $do_simd_coprocessor_error |
40d2e763 | 541 | #endif |
a49976d1 | 542 | jmp error_code |
47a55cd7 | 543 | END(simd_coprocessor_error) |
1da177e4 LT |
544 | |
545 | ENTRY(device_not_available) | |
e59d1b0a | 546 | ASM_CLAC |
a49976d1 IM |
547 | pushl $-1 # mark this as an int |
548 | pushl $do_device_not_available | |
549 | jmp error_code | |
47a55cd7 | 550 | END(device_not_available) |
1da177e4 | 551 | |
d3561b7f RR |
552 | #ifdef CONFIG_PARAVIRT |
553 | ENTRY(native_iret) | |
3701d863 | 554 | iret |
6837a54d | 555 | _ASM_EXTABLE(native_iret, iret_exc) |
47a55cd7 | 556 | END(native_iret) |
d3561b7f RR |
557 | #endif |
558 | ||
1da177e4 | 559 | ENTRY(overflow) |
e59d1b0a | 560 | ASM_CLAC |
a49976d1 IM |
561 | pushl $0 |
562 | pushl $do_overflow | |
563 | jmp error_code | |
47a55cd7 | 564 | END(overflow) |
1da177e4 LT |
565 | |
566 | ENTRY(bounds) | |
e59d1b0a | 567 | ASM_CLAC |
a49976d1 IM |
568 | pushl $0 |
569 | pushl $do_bounds | |
570 | jmp error_code | |
47a55cd7 | 571 | END(bounds) |
1da177e4 LT |
572 | |
573 | ENTRY(invalid_op) | |
e59d1b0a | 574 | ASM_CLAC |
a49976d1 IM |
575 | pushl $0 |
576 | pushl $do_invalid_op | |
577 | jmp error_code | |
47a55cd7 | 578 | END(invalid_op) |
1da177e4 LT |
579 | |
580 | ENTRY(coprocessor_segment_overrun) | |
e59d1b0a | 581 | ASM_CLAC |
a49976d1 IM |
582 | pushl $0 |
583 | pushl $do_coprocessor_segment_overrun | |
584 | jmp error_code | |
47a55cd7 | 585 | END(coprocessor_segment_overrun) |
1da177e4 LT |
586 | |
587 | ENTRY(invalid_TSS) | |
e59d1b0a | 588 | ASM_CLAC |
a49976d1 IM |
589 | pushl $do_invalid_TSS |
590 | jmp error_code | |
47a55cd7 | 591 | END(invalid_TSS) |
1da177e4 LT |
592 | |
593 | ENTRY(segment_not_present) | |
e59d1b0a | 594 | ASM_CLAC |
a49976d1 IM |
595 | pushl $do_segment_not_present |
596 | jmp error_code | |
47a55cd7 | 597 | END(segment_not_present) |
1da177e4 LT |
598 | |
599 | ENTRY(stack_segment) | |
e59d1b0a | 600 | ASM_CLAC |
a49976d1 IM |
601 | pushl $do_stack_segment |
602 | jmp error_code | |
47a55cd7 | 603 | END(stack_segment) |
1da177e4 | 604 | |
1da177e4 | 605 | ENTRY(alignment_check) |
e59d1b0a | 606 | ASM_CLAC |
a49976d1 IM |
607 | pushl $do_alignment_check |
608 | jmp error_code | |
47a55cd7 | 609 | END(alignment_check) |
1da177e4 | 610 | |
d28c4393 | 611 | ENTRY(divide_error) |
e59d1b0a | 612 | ASM_CLAC |
a49976d1 IM |
613 | pushl $0 # no error code |
614 | pushl $do_divide_error | |
615 | jmp error_code | |
47a55cd7 | 616 | END(divide_error) |
1da177e4 LT |
617 | |
618 | #ifdef CONFIG_X86_MCE | |
619 | ENTRY(machine_check) | |
e59d1b0a | 620 | ASM_CLAC |
a49976d1 IM |
621 | pushl $0 |
622 | pushl machine_check_vector | |
623 | jmp error_code | |
47a55cd7 | 624 | END(machine_check) |
1da177e4 LT |
625 | #endif |
626 | ||
627 | ENTRY(spurious_interrupt_bug) | |
e59d1b0a | 628 | ASM_CLAC |
a49976d1 IM |
629 | pushl $0 |
630 | pushl $do_spurious_interrupt_bug | |
631 | jmp error_code | |
47a55cd7 | 632 | END(spurious_interrupt_bug) |
1da177e4 | 633 | |
5ead97c8 | 634 | #ifdef CONFIG_XEN |
a49976d1 IM |
635 | /* |
636 | * Xen doesn't set %esp to be precisely what the normal SYSENTER | |
637 | * entry point expects, so fix it up before using the normal path. | |
638 | */ | |
e2a81baf | 639 | ENTRY(xen_sysenter_target) |
a49976d1 IM |
640 | addl $5*4, %esp /* remove xen-provided frame */ |
641 | jmp sysenter_past_esp | |
e2a81baf | 642 | |
5ead97c8 | 643 | ENTRY(xen_hypervisor_callback) |
a49976d1 | 644 | pushl $-1 /* orig_ax = -1 => not a system call */ |
5ead97c8 JF |
645 | SAVE_ALL |
646 | TRACE_IRQS_OFF | |
9ec2b804 | 647 | |
a49976d1 IM |
648 | /* |
649 | * Check to see if we got the event in the critical | |
650 | * region in xen_iret_direct, after we've reenabled | |
651 | * events and checked for pending events. This simulates | |
652 | * iret instruction's behaviour where it delivers a | |
653 | * pending interrupt when enabling interrupts: | |
654 | */ | |
655 | movl PT_EIP(%esp), %eax | |
656 | cmpl $xen_iret_start_crit, %eax | |
657 | jb 1f | |
658 | cmpl $xen_iret_end_crit, %eax | |
659 | jae 1f | |
9ec2b804 | 660 | |
a49976d1 | 661 | jmp xen_iret_crit_fixup |
e2a81baf | 662 | |
e2a81baf | 663 | ENTRY(xen_do_upcall) |
a49976d1 IM |
664 | 1: mov %esp, %eax |
665 | call xen_evtchn_do_upcall | |
fdfd811d | 666 | #ifndef CONFIG_PREEMPT |
a49976d1 | 667 | call xen_maybe_preempt_hcall |
fdfd811d | 668 | #endif |
a49976d1 | 669 | jmp ret_from_intr |
5ead97c8 JF |
670 | ENDPROC(xen_hypervisor_callback) |
671 | ||
a49976d1 IM |
672 | /* |
673 | * Hypervisor uses this for application faults while it executes. | |
674 | * We get here for two reasons: | |
675 | * 1. Fault while reloading DS, ES, FS or GS | |
676 | * 2. Fault while executing IRET | |
677 | * Category 1 we fix up by reattempting the load, and zeroing the segment | |
678 | * register if the load fails. | |
679 | * Category 2 we fix up by jumping to do_iret_error. We cannot use the | |
680 | * normal Linux return path in this case because if we use the IRET hypercall | |
681 | * to pop the stack frame we end up in an infinite loop of failsafe callbacks. | |
682 | * We distinguish between categories by maintaining a status value in EAX. | |
683 | */ | |
5ead97c8 | 684 | ENTRY(xen_failsafe_callback) |
a49976d1 IM |
685 | pushl %eax |
686 | movl $1, %eax | |
687 | 1: mov 4(%esp), %ds | |
688 | 2: mov 8(%esp), %es | |
689 | 3: mov 12(%esp), %fs | |
690 | 4: mov 16(%esp), %gs | |
a349e23d DV |
691 | /* EAX == 0 => Category 1 (Bad segment) |
692 | EAX != 0 => Category 2 (Bad IRET) */ | |
a49976d1 IM |
693 | testl %eax, %eax |
694 | popl %eax | |
695 | lea 16(%esp), %esp | |
696 | jz 5f | |
697 | jmp iret_exc | |
698 | 5: pushl $-1 /* orig_ax = -1 => not a system call */ | |
5ead97c8 | 699 | SAVE_ALL |
a49976d1 IM |
700 | jmp ret_from_exception |
701 | ||
702 | .section .fixup, "ax" | |
703 | 6: xorl %eax, %eax | |
704 | movl %eax, 4(%esp) | |
705 | jmp 1b | |
706 | 7: xorl %eax, %eax | |
707 | movl %eax, 8(%esp) | |
708 | jmp 2b | |
709 | 8: xorl %eax, %eax | |
710 | movl %eax, 12(%esp) | |
711 | jmp 3b | |
712 | 9: xorl %eax, %eax | |
713 | movl %eax, 16(%esp) | |
714 | jmp 4b | |
5ead97c8 | 715 | .previous |
a49976d1 IM |
716 | _ASM_EXTABLE(1b, 6b) |
717 | _ASM_EXTABLE(2b, 7b) | |
718 | _ASM_EXTABLE(3b, 8b) | |
719 | _ASM_EXTABLE(4b, 9b) | |
5ead97c8 JF |
720 | ENDPROC(xen_failsafe_callback) |
721 | ||
bc2b0331 | 722 | BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR, |
38e20b07 SY |
723 | xen_evtchn_do_upcall) |
724 | ||
a49976d1 | 725 | #endif /* CONFIG_XEN */ |
bc2b0331 S |
726 | |
727 | #if IS_ENABLED(CONFIG_HYPERV) | |
728 | ||
729 | BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR, | |
730 | hyperv_vector_handler) | |
731 | ||
732 | #endif /* CONFIG_HYPERV */ | |
5ead97c8 | 733 | |
606576ce | 734 | #ifdef CONFIG_FUNCTION_TRACER |
d61f82d0 SR |
735 | #ifdef CONFIG_DYNAMIC_FTRACE |
736 | ||
737 | ENTRY(mcount) | |
d61f82d0 SR |
738 | ret |
739 | END(mcount) | |
740 | ||
741 | ENTRY(ftrace_caller) | |
a49976d1 IM |
742 | pushl %eax |
743 | pushl %ecx | |
744 | pushl %edx | |
745 | pushl $0 /* Pass NULL as regs pointer */ | |
746 | movl 4*4(%esp), %eax | |
747 | movl 0x4(%ebp), %edx | |
748 | movl function_trace_op, %ecx | |
749 | subl $MCOUNT_INSN_SIZE, %eax | |
d61f82d0 SR |
750 | |
751 | .globl ftrace_call | |
752 | ftrace_call: | |
a49976d1 | 753 | call ftrace_stub |
d61f82d0 | 754 | |
a49976d1 IM |
755 | addl $4, %esp /* skip NULL pointer */ |
756 | popl %edx | |
757 | popl %ecx | |
758 | popl %eax | |
4de72395 | 759 | ftrace_ret: |
5a45cfe1 SR |
760 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
761 | .globl ftrace_graph_call | |
762 | ftrace_graph_call: | |
a49976d1 | 763 | jmp ftrace_stub |
5a45cfe1 | 764 | #endif |
d61f82d0 SR |
765 | |
766 | .globl ftrace_stub | |
767 | ftrace_stub: | |
768 | ret | |
769 | END(ftrace_caller) | |
770 | ||
4de72395 SR |
771 | ENTRY(ftrace_regs_caller) |
772 | pushf /* push flags before compare (in cs location) */ | |
4de72395 SR |
773 | |
774 | /* | |
775 | * i386 does not save SS and ESP when coming from kernel. | |
776 | * Instead, to get sp, ®s->sp is used (see ptrace.h). | |
777 | * Unfortunately, that means eflags must be at the same location | |
778 | * as the current return ip is. We move the return ip into the | |
779 | * ip location, and move flags into the return ip location. | |
780 | */ | |
a49976d1 IM |
781 | pushl 4(%esp) /* save return ip into ip slot */ |
782 | ||
783 | pushl $0 /* Load 0 into orig_ax */ | |
784 | pushl %gs | |
785 | pushl %fs | |
786 | pushl %es | |
787 | pushl %ds | |
788 | pushl %eax | |
789 | pushl %ebp | |
790 | pushl %edi | |
791 | pushl %esi | |
792 | pushl %edx | |
793 | pushl %ecx | |
794 | pushl %ebx | |
795 | ||
796 | movl 13*4(%esp), %eax /* Get the saved flags */ | |
797 | movl %eax, 14*4(%esp) /* Move saved flags into regs->flags location */ | |
798 | /* clobbering return ip */ | |
799 | movl $__KERNEL_CS, 13*4(%esp) | |
800 | ||
801 | movl 12*4(%esp), %eax /* Load ip (1st parameter) */ | |
802 | subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */ | |
803 | movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */ | |
804 | movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */ | |
805 | pushl %esp /* Save pt_regs as 4th parameter */ | |
4de72395 SR |
806 | |
807 | GLOBAL(ftrace_regs_call) | |
a49976d1 IM |
808 | call ftrace_stub |
809 | ||
810 | addl $4, %esp /* Skip pt_regs */ | |
811 | movl 14*4(%esp), %eax /* Move flags back into cs */ | |
812 | movl %eax, 13*4(%esp) /* Needed to keep addl from modifying flags */ | |
813 | movl 12*4(%esp), %eax /* Get return ip from regs->ip */ | |
814 | movl %eax, 14*4(%esp) /* Put return ip back for ret */ | |
815 | ||
816 | popl %ebx | |
817 | popl %ecx | |
818 | popl %edx | |
819 | popl %esi | |
820 | popl %edi | |
821 | popl %ebp | |
822 | popl %eax | |
823 | popl %ds | |
824 | popl %es | |
825 | popl %fs | |
826 | popl %gs | |
827 | addl $8, %esp /* Skip orig_ax and ip */ | |
828 | popf /* Pop flags at end (no addl to corrupt flags) */ | |
829 | jmp ftrace_ret | |
4de72395 | 830 | |
4de72395 | 831 | popf |
a49976d1 | 832 | jmp ftrace_stub |
d61f82d0 SR |
833 | #else /* ! CONFIG_DYNAMIC_FTRACE */ |
834 | ||
16444a8a | 835 | ENTRY(mcount) |
a49976d1 IM |
836 | cmpl $__PAGE_OFFSET, %esp |
837 | jb ftrace_stub /* Paging not enabled yet? */ | |
af058ab0 | 838 | |
a49976d1 IM |
839 | cmpl $ftrace_stub, ftrace_trace_function |
840 | jnz trace | |
fb52607a | 841 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
a49976d1 IM |
842 | cmpl $ftrace_stub, ftrace_graph_return |
843 | jnz ftrace_graph_caller | |
e49dc19c | 844 | |
a49976d1 IM |
845 | cmpl $ftrace_graph_entry_stub, ftrace_graph_entry |
846 | jnz ftrace_graph_caller | |
caf4b323 | 847 | #endif |
16444a8a ACM |
848 | .globl ftrace_stub |
849 | ftrace_stub: | |
850 | ret | |
851 | ||
852 | /* taken from glibc */ | |
853 | trace: | |
a49976d1 IM |
854 | pushl %eax |
855 | pushl %ecx | |
856 | pushl %edx | |
857 | movl 0xc(%esp), %eax | |
858 | movl 0x4(%ebp), %edx | |
859 | subl $MCOUNT_INSN_SIZE, %eax | |
860 | ||
861 | call *ftrace_trace_function | |
862 | ||
863 | popl %edx | |
864 | popl %ecx | |
865 | popl %eax | |
866 | jmp ftrace_stub | |
16444a8a | 867 | END(mcount) |
d61f82d0 | 868 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
606576ce | 869 | #endif /* CONFIG_FUNCTION_TRACER */ |
16444a8a | 870 | |
fb52607a FW |
871 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
872 | ENTRY(ftrace_graph_caller) | |
a49976d1 IM |
873 | pushl %eax |
874 | pushl %ecx | |
875 | pushl %edx | |
876 | movl 0xc(%esp), %eax | |
877 | lea 0x4(%ebp), %edx | |
878 | movl (%ebp), %ecx | |
879 | subl $MCOUNT_INSN_SIZE, %eax | |
880 | call prepare_ftrace_return | |
881 | popl %edx | |
882 | popl %ecx | |
883 | popl %eax | |
e7d3737e | 884 | ret |
fb52607a | 885 | END(ftrace_graph_caller) |
caf4b323 FW |
886 | |
887 | .globl return_to_handler | |
888 | return_to_handler: | |
a49976d1 IM |
889 | pushl %eax |
890 | pushl %edx | |
891 | movl %ebp, %eax | |
892 | call ftrace_return_to_handler | |
893 | movl %eax, %ecx | |
894 | popl %edx | |
895 | popl %eax | |
896 | jmp *%ecx | |
e7d3737e | 897 | #endif |
16444a8a | 898 | |
25c74b10 SA |
899 | #ifdef CONFIG_TRACING |
900 | ENTRY(trace_page_fault) | |
25c74b10 | 901 | ASM_CLAC |
a49976d1 IM |
902 | pushl $trace_do_page_fault |
903 | jmp error_code | |
25c74b10 SA |
904 | END(trace_page_fault) |
905 | #endif | |
906 | ||
d211af05 | 907 | ENTRY(page_fault) |
e59d1b0a | 908 | ASM_CLAC |
a49976d1 | 909 | pushl $do_page_fault |
d211af05 AH |
910 | ALIGN |
911 | error_code: | |
ccbeed3a | 912 | /* the function address is in %gs's slot on the stack */ |
a49976d1 IM |
913 | pushl %fs |
914 | pushl %es | |
915 | pushl %ds | |
916 | pushl %eax | |
917 | pushl %ebp | |
918 | pushl %edi | |
919 | pushl %esi | |
920 | pushl %edx | |
921 | pushl %ecx | |
922 | pushl %ebx | |
d211af05 | 923 | cld |
a49976d1 IM |
924 | movl $(__KERNEL_PERCPU), %ecx |
925 | movl %ecx, %fs | |
d211af05 | 926 | UNWIND_ESPFIX_STACK |
ccbeed3a | 927 | GS_TO_REG %ecx |
a49976d1 IM |
928 | movl PT_GS(%esp), %edi # get the function address |
929 | movl PT_ORIG_EAX(%esp), %edx # get the error code | |
930 | movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart | |
ccbeed3a TH |
931 | REG_TO_PTGS %ecx |
932 | SET_KERNEL_GS %ecx | |
a49976d1 IM |
933 | movl $(__USER_DS), %ecx |
934 | movl %ecx, %ds | |
935 | movl %ecx, %es | |
d211af05 | 936 | TRACE_IRQS_OFF |
a49976d1 IM |
937 | movl %esp, %eax # pt_regs pointer |
938 | call *%edi | |
939 | jmp ret_from_exception | |
d211af05 AH |
940 | END(page_fault) |
941 | ||
942 | /* | |
943 | * Debug traps and NMI can happen at the one SYSENTER instruction | |
944 | * that sets up the real kernel stack. Check here, since we can't | |
945 | * allow the wrong stack to be used. | |
946 | * | |
947 | * "TSS_sysenter_sp0+12" is because the NMI/debug handler will have | |
948 | * already pushed 3 words if it hits on the sysenter instruction: | |
949 | * eflags, cs and eip. | |
950 | * | |
951 | * We just load the right stack, and push the three (known) values | |
952 | * by hand onto the new stack - while updating the return eip past | |
953 | * the instruction that would have done it for sysenter. | |
954 | */ | |
f0d96110 | 955 | .macro FIX_STACK offset ok label |
a49976d1 IM |
956 | cmpw $__KERNEL_CS, 4(%esp) |
957 | jne \ok | |
f0d96110 | 958 | \label: |
a49976d1 | 959 | movl TSS_sysenter_sp0 + \offset(%esp), %esp |
131484c8 | 960 | pushfl |
a49976d1 IM |
961 | pushl $__KERNEL_CS |
962 | pushl $sysenter_past_esp | |
f0d96110 | 963 | .endm |
d211af05 AH |
964 | |
965 | ENTRY(debug) | |
e59d1b0a | 966 | ASM_CLAC |
a49976d1 IM |
967 | cmpl $entry_SYSENTER_32, (%esp) |
968 | jne debug_stack_correct | |
f0d96110 | 969 | FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn |
d211af05 | 970 | debug_stack_correct: |
a49976d1 | 971 | pushl $-1 # mark this as an int |
d211af05 AH |
972 | SAVE_ALL |
973 | TRACE_IRQS_OFF | |
a49976d1 IM |
974 | xorl %edx, %edx # error code 0 |
975 | movl %esp, %eax # pt_regs pointer | |
976 | call do_debug | |
977 | jmp ret_from_exception | |
d211af05 AH |
978 | END(debug) |
979 | ||
980 | /* | |
981 | * NMI is doubly nasty. It can happen _while_ we're handling | |
982 | * a debug fault, and the debug fault hasn't yet been able to | |
983 | * clear up the stack. So we first check whether we got an | |
984 | * NMI on the sysenter entry path, but after that we need to | |
985 | * check whether we got an NMI on the debug path where the debug | |
986 | * fault happened on the sysenter path. | |
987 | */ | |
988 | ENTRY(nmi) | |
e59d1b0a | 989 | ASM_CLAC |
34273f41 | 990 | #ifdef CONFIG_X86_ESPFIX32 |
a49976d1 IM |
991 | pushl %eax |
992 | movl %ss, %eax | |
993 | cmpw $__ESPFIX_SS, %ax | |
994 | popl %eax | |
995 | je nmi_espfix_stack | |
34273f41 | 996 | #endif |
a49976d1 IM |
997 | cmpl $entry_SYSENTER_32, (%esp) |
998 | je nmi_stack_fixup | |
999 | pushl %eax | |
1000 | movl %esp, %eax | |
1001 | /* | |
1002 | * Do not access memory above the end of our stack page, | |
d211af05 AH |
1003 | * it might not exist. |
1004 | */ | |
a49976d1 IM |
1005 | andl $(THREAD_SIZE-1), %eax |
1006 | cmpl $(THREAD_SIZE-20), %eax | |
1007 | popl %eax | |
1008 | jae nmi_stack_correct | |
1009 | cmpl $entry_SYSENTER_32, 12(%esp) | |
1010 | je nmi_debug_stack_check | |
d211af05 | 1011 | nmi_stack_correct: |
a49976d1 | 1012 | pushl %eax |
d211af05 | 1013 | SAVE_ALL |
a49976d1 IM |
1014 | xorl %edx, %edx # zero error code |
1015 | movl %esp, %eax # pt_regs pointer | |
1016 | call do_nmi | |
1017 | jmp restore_all_notrace | |
d211af05 AH |
1018 | |
1019 | nmi_stack_fixup: | |
f0d96110 | 1020 | FIX_STACK 12, nmi_stack_correct, 1 |
a49976d1 | 1021 | jmp nmi_stack_correct |
d211af05 AH |
1022 | |
1023 | nmi_debug_stack_check: | |
a49976d1 IM |
1024 | cmpw $__KERNEL_CS, 16(%esp) |
1025 | jne nmi_stack_correct | |
1026 | cmpl $debug, (%esp) | |
1027 | jb nmi_stack_correct | |
1028 | cmpl $debug_esp_fix_insn, (%esp) | |
1029 | ja nmi_stack_correct | |
f0d96110 | 1030 | FIX_STACK 24, nmi_stack_correct, 1 |
a49976d1 | 1031 | jmp nmi_stack_correct |
d211af05 | 1032 | |
34273f41 | 1033 | #ifdef CONFIG_X86_ESPFIX32 |
d211af05 | 1034 | nmi_espfix_stack: |
131484c8 | 1035 | /* |
d211af05 AH |
1036 | * create the pointer to lss back |
1037 | */ | |
a49976d1 IM |
1038 | pushl %ss |
1039 | pushl %esp | |
1040 | addl $4, (%esp) | |
d211af05 AH |
1041 | /* copy the iret frame of 12 bytes */ |
1042 | .rept 3 | |
a49976d1 | 1043 | pushl 16(%esp) |
d211af05 | 1044 | .endr |
a49976d1 | 1045 | pushl %eax |
d211af05 | 1046 | SAVE_ALL |
a49976d1 IM |
1047 | FIXUP_ESPFIX_STACK # %eax == %esp |
1048 | xorl %edx, %edx # zero error code | |
1049 | call do_nmi | |
d211af05 | 1050 | RESTORE_REGS |
a49976d1 IM |
1051 | lss 12+4(%esp), %esp # back to espfix stack |
1052 | jmp irq_return | |
34273f41 | 1053 | #endif |
d211af05 AH |
1054 | END(nmi) |
1055 | ||
1056 | ENTRY(int3) | |
e59d1b0a | 1057 | ASM_CLAC |
a49976d1 | 1058 | pushl $-1 # mark this as an int |
d211af05 AH |
1059 | SAVE_ALL |
1060 | TRACE_IRQS_OFF | |
a49976d1 IM |
1061 | xorl %edx, %edx # zero error code |
1062 | movl %esp, %eax # pt_regs pointer | |
1063 | call do_int3 | |
1064 | jmp ret_from_exception | |
d211af05 AH |
1065 | END(int3) |
1066 | ||
1067 | ENTRY(general_protection) | |
a49976d1 IM |
1068 | pushl $do_general_protection |
1069 | jmp error_code | |
d211af05 AH |
1070 | END(general_protection) |
1071 | ||
631bc487 GN |
1072 | #ifdef CONFIG_KVM_GUEST |
1073 | ENTRY(async_page_fault) | |
e59d1b0a | 1074 | ASM_CLAC |
a49976d1 IM |
1075 | pushl $do_async_page_fault |
1076 | jmp error_code | |
2ae9d293 | 1077 | END(async_page_fault) |
631bc487 | 1078 | #endif |