Merge tag 'omap-late-cleanups' of git://git.kernel.org/pub/scm/linux/kernel/git/arm...
[deliverable/linux.git] / arch / x86 / kernel / entry_64.S
1 /*
2 * linux/arch/x86_64/entry.S
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
6 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
7 */
8
9 /*
10 * entry.S contains the system-call and fault low-level handling routines.
11 *
12 * Some of this is documented in Documentation/x86/entry_64.txt
13 *
14 * NOTE: This code handles signal-recognition, which happens every time
15 * after an interrupt and after each system call.
16 *
17 * Normal syscalls and interrupts don't save a full stack frame, this is
18 * only done for syscall tracing, signals or fork/exec et.al.
19 *
20 * A note on terminology:
21 * - top of stack: Architecture defined interrupt frame from SS to RIP
22 * at the top of the kernel process stack.
23 * - partial stack frame: partially saved registers up to R11.
24 * - full stack frame: Like partial stack frame, but all register saved.
25 *
26 * Some macro usage:
27 * - CFI macros are used to generate dwarf2 unwind information for better
28 * backtraces. They don't change any code.
29 * - SAVE_ALL/RESTORE_ALL - Save/restore all registers
30 * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify.
31 * There are unfortunately lots of special cases where some registers
32 * not touched. The macro is a big mess that should be cleaned up.
33 * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS.
34 * Gives a full stack frame.
35 * - ENTRY/END Define functions in the symbol table.
36 * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack
37 * frame that is otherwise undefined after a SYSCALL
38 * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
39 * - errorentry/paranoidentry/zeroentry - Define exception entry points.
40 */
41
42 #include <linux/linkage.h>
43 #include <asm/segment.h>
44 #include <asm/cache.h>
45 #include <asm/errno.h>
46 #include <asm/dwarf2.h>
47 #include <asm/calling.h>
48 #include <asm/asm-offsets.h>
49 #include <asm/msr.h>
50 #include <asm/unistd.h>
51 #include <asm/thread_info.h>
52 #include <asm/hw_irq.h>
53 #include <asm/page_types.h>
54 #include <asm/irqflags.h>
55 #include <asm/paravirt.h>
56 #include <asm/ftrace.h>
57 #include <asm/percpu.h>
58 #include <asm/asm.h>
59 #include <asm/context_tracking.h>
60 #include <asm/smap.h>
61 #include <linux/err.h>
62
63 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
64 #include <linux/elf-em.h>
65 #define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
66 #define __AUDIT_ARCH_64BIT 0x80000000
67 #define __AUDIT_ARCH_LE 0x40000000
68
69 .code64
70 .section .entry.text, "ax"
71
72 #ifdef CONFIG_FUNCTION_TRACER
73
74 #ifdef CC_USING_FENTRY
75 # define function_hook __fentry__
76 #else
77 # define function_hook mcount
78 #endif
79
80 #ifdef CONFIG_DYNAMIC_FTRACE
81
82 ENTRY(function_hook)
83 retq
84 END(function_hook)
85
86 /* skip is set if stack has been adjusted */
87 .macro ftrace_caller_setup skip=0
88 MCOUNT_SAVE_FRAME \skip
89
90 /* Load the ftrace_ops into the 3rd parameter */
91 leaq function_trace_op, %rdx
92
93 /* Load ip into the first parameter */
94 movq RIP(%rsp), %rdi
95 subq $MCOUNT_INSN_SIZE, %rdi
96 /* Load the parent_ip into the second parameter */
97 #ifdef CC_USING_FENTRY
98 movq SS+16(%rsp), %rsi
99 #else
100 movq 8(%rbp), %rsi
101 #endif
102 .endm
103
104 ENTRY(ftrace_caller)
105 /* Check if tracing was disabled (quick check) */
106 cmpl $0, function_trace_stop
107 jne ftrace_stub
108
109 ftrace_caller_setup
110 /* regs go into 4th parameter (but make it NULL) */
111 movq $0, %rcx
112
113 GLOBAL(ftrace_call)
114 call ftrace_stub
115
116 MCOUNT_RESTORE_FRAME
117 ftrace_return:
118
119 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
120 GLOBAL(ftrace_graph_call)
121 jmp ftrace_stub
122 #endif
123
124 GLOBAL(ftrace_stub)
125 retq
126 END(ftrace_caller)
127
128 ENTRY(ftrace_regs_caller)
129 /* Save the current flags before compare (in SS location)*/
130 pushfq
131
132 /* Check if tracing was disabled (quick check) */
133 cmpl $0, function_trace_stop
134 jne ftrace_restore_flags
135
136 /* skip=8 to skip flags saved in SS */
137 ftrace_caller_setup 8
138
139 /* Save the rest of pt_regs */
140 movq %r15, R15(%rsp)
141 movq %r14, R14(%rsp)
142 movq %r13, R13(%rsp)
143 movq %r12, R12(%rsp)
144 movq %r11, R11(%rsp)
145 movq %r10, R10(%rsp)
146 movq %rbp, RBP(%rsp)
147 movq %rbx, RBX(%rsp)
148 /* Copy saved flags */
149 movq SS(%rsp), %rcx
150 movq %rcx, EFLAGS(%rsp)
151 /* Kernel segments */
152 movq $__KERNEL_DS, %rcx
153 movq %rcx, SS(%rsp)
154 movq $__KERNEL_CS, %rcx
155 movq %rcx, CS(%rsp)
156 /* Stack - skipping return address */
157 leaq SS+16(%rsp), %rcx
158 movq %rcx, RSP(%rsp)
159
160 /* regs go into 4th parameter */
161 leaq (%rsp), %rcx
162
163 GLOBAL(ftrace_regs_call)
164 call ftrace_stub
165
166 /* Copy flags back to SS, to restore them */
167 movq EFLAGS(%rsp), %rax
168 movq %rax, SS(%rsp)
169
170 /* Handlers can change the RIP */
171 movq RIP(%rsp), %rax
172 movq %rax, SS+8(%rsp)
173
174 /* restore the rest of pt_regs */
175 movq R15(%rsp), %r15
176 movq R14(%rsp), %r14
177 movq R13(%rsp), %r13
178 movq R12(%rsp), %r12
179 movq R10(%rsp), %r10
180 movq RBP(%rsp), %rbp
181 movq RBX(%rsp), %rbx
182
183 /* skip=8 to skip flags saved in SS */
184 MCOUNT_RESTORE_FRAME 8
185
186 /* Restore flags */
187 popfq
188
189 jmp ftrace_return
190 ftrace_restore_flags:
191 popfq
192 jmp ftrace_stub
193
194 END(ftrace_regs_caller)
195
196
197 #else /* ! CONFIG_DYNAMIC_FTRACE */
198
199 ENTRY(function_hook)
200 cmpl $0, function_trace_stop
201 jne ftrace_stub
202
203 cmpq $ftrace_stub, ftrace_trace_function
204 jnz trace
205
206 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
207 cmpq $ftrace_stub, ftrace_graph_return
208 jnz ftrace_graph_caller
209
210 cmpq $ftrace_graph_entry_stub, ftrace_graph_entry
211 jnz ftrace_graph_caller
212 #endif
213
214 GLOBAL(ftrace_stub)
215 retq
216
217 trace:
218 MCOUNT_SAVE_FRAME
219
220 movq RIP(%rsp), %rdi
221 #ifdef CC_USING_FENTRY
222 movq SS+16(%rsp), %rsi
223 #else
224 movq 8(%rbp), %rsi
225 #endif
226 subq $MCOUNT_INSN_SIZE, %rdi
227
228 call *ftrace_trace_function
229
230 MCOUNT_RESTORE_FRAME
231
232 jmp ftrace_stub
233 END(function_hook)
234 #endif /* CONFIG_DYNAMIC_FTRACE */
235 #endif /* CONFIG_FUNCTION_TRACER */
236
237 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
238 ENTRY(ftrace_graph_caller)
239 MCOUNT_SAVE_FRAME
240
241 #ifdef CC_USING_FENTRY
242 leaq SS+16(%rsp), %rdi
243 movq $0, %rdx /* No framepointers needed */
244 #else
245 leaq 8(%rbp), %rdi
246 movq (%rbp), %rdx
247 #endif
248 movq RIP(%rsp), %rsi
249 subq $MCOUNT_INSN_SIZE, %rsi
250
251 call prepare_ftrace_return
252
253 MCOUNT_RESTORE_FRAME
254
255 retq
256 END(ftrace_graph_caller)
257
258 GLOBAL(return_to_handler)
259 subq $24, %rsp
260
261 /* Save the return values */
262 movq %rax, (%rsp)
263 movq %rdx, 8(%rsp)
264 movq %rbp, %rdi
265
266 call ftrace_return_to_handler
267
268 movq %rax, %rdi
269 movq 8(%rsp), %rdx
270 movq (%rsp), %rax
271 addq $24, %rsp
272 jmp *%rdi
273 #endif
274
275
276 #ifndef CONFIG_PREEMPT
277 #define retint_kernel retint_restore_args
278 #endif
279
280 #ifdef CONFIG_PARAVIRT
281 ENTRY(native_usergs_sysret64)
282 swapgs
283 sysretq
284 ENDPROC(native_usergs_sysret64)
285 #endif /* CONFIG_PARAVIRT */
286
287
288 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
289 #ifdef CONFIG_TRACE_IRQFLAGS
290 bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
291 jnc 1f
292 TRACE_IRQS_ON
293 1:
294 #endif
295 .endm
296
297 /*
298 * When dynamic function tracer is enabled it will add a breakpoint
299 * to all locations that it is about to modify, sync CPUs, update
300 * all the code, sync CPUs, then remove the breakpoints. In this time
301 * if lockdep is enabled, it might jump back into the debug handler
302 * outside the updating of the IST protection. (TRACE_IRQS_ON/OFF).
303 *
304 * We need to change the IDT table before calling TRACE_IRQS_ON/OFF to
305 * make sure the stack pointer does not get reset back to the top
306 * of the debug stack, and instead just reuses the current stack.
307 */
308 #if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_TRACE_IRQFLAGS)
309
310 .macro TRACE_IRQS_OFF_DEBUG
311 call debug_stack_set_zero
312 TRACE_IRQS_OFF
313 call debug_stack_reset
314 .endm
315
316 .macro TRACE_IRQS_ON_DEBUG
317 call debug_stack_set_zero
318 TRACE_IRQS_ON
319 call debug_stack_reset
320 .endm
321
322 .macro TRACE_IRQS_IRETQ_DEBUG offset=ARGOFFSET
323 bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
324 jnc 1f
325 TRACE_IRQS_ON_DEBUG
326 1:
327 .endm
328
329 #else
330 # define TRACE_IRQS_OFF_DEBUG TRACE_IRQS_OFF
331 # define TRACE_IRQS_ON_DEBUG TRACE_IRQS_ON
332 # define TRACE_IRQS_IRETQ_DEBUG TRACE_IRQS_IRETQ
333 #endif
334
335 /*
336 * C code is not supposed to know about undefined top of stack. Every time
337 * a C function with an pt_regs argument is called from the SYSCALL based
338 * fast path FIXUP_TOP_OF_STACK is needed.
339 * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
340 * manipulation.
341 */
342
343 /* %rsp:at FRAMEEND */
344 .macro FIXUP_TOP_OF_STACK tmp offset=0
345 movq PER_CPU_VAR(old_rsp),\tmp
346 movq \tmp,RSP+\offset(%rsp)
347 movq $__USER_DS,SS+\offset(%rsp)
348 movq $__USER_CS,CS+\offset(%rsp)
349 movq $-1,RCX+\offset(%rsp)
350 movq R11+\offset(%rsp),\tmp /* get eflags */
351 movq \tmp,EFLAGS+\offset(%rsp)
352 .endm
353
354 .macro RESTORE_TOP_OF_STACK tmp offset=0
355 movq RSP+\offset(%rsp),\tmp
356 movq \tmp,PER_CPU_VAR(old_rsp)
357 movq EFLAGS+\offset(%rsp),\tmp
358 movq \tmp,R11+\offset(%rsp)
359 .endm
360
361 .macro FAKE_STACK_FRAME child_rip
362 /* push in order ss, rsp, eflags, cs, rip */
363 xorl %eax, %eax
364 pushq_cfi $__KERNEL_DS /* ss */
365 /*CFI_REL_OFFSET ss,0*/
366 pushq_cfi %rax /* rsp */
367 CFI_REL_OFFSET rsp,0
368 pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_BIT1) /* eflags - interrupts on */
369 /*CFI_REL_OFFSET rflags,0*/
370 pushq_cfi $__KERNEL_CS /* cs */
371 /*CFI_REL_OFFSET cs,0*/
372 pushq_cfi \child_rip /* rip */
373 CFI_REL_OFFSET rip,0
374 pushq_cfi %rax /* orig rax */
375 .endm
376
377 .macro UNFAKE_STACK_FRAME
378 addq $8*6, %rsp
379 CFI_ADJUST_CFA_OFFSET -(6*8)
380 .endm
381
382 /*
383 * initial frame state for interrupts (and exceptions without error code)
384 */
385 .macro EMPTY_FRAME start=1 offset=0
386 .if \start
387 CFI_STARTPROC simple
388 CFI_SIGNAL_FRAME
389 CFI_DEF_CFA rsp,8+\offset
390 .else
391 CFI_DEF_CFA_OFFSET 8+\offset
392 .endif
393 .endm
394
395 /*
396 * initial frame state for interrupts (and exceptions without error code)
397 */
398 .macro INTR_FRAME start=1 offset=0
399 EMPTY_FRAME \start, SS+8+\offset-RIP
400 /*CFI_REL_OFFSET ss, SS+\offset-RIP*/
401 CFI_REL_OFFSET rsp, RSP+\offset-RIP
402 /*CFI_REL_OFFSET rflags, EFLAGS+\offset-RIP*/
403 /*CFI_REL_OFFSET cs, CS+\offset-RIP*/
404 CFI_REL_OFFSET rip, RIP+\offset-RIP
405 .endm
406
407 /*
408 * initial frame state for exceptions with error code (and interrupts
409 * with vector already pushed)
410 */
411 .macro XCPT_FRAME start=1 offset=0
412 INTR_FRAME \start, RIP+\offset-ORIG_RAX
413 /*CFI_REL_OFFSET orig_rax, ORIG_RAX-ORIG_RAX*/
414 .endm
415
416 /*
417 * frame that enables calling into C.
418 */
419 .macro PARTIAL_FRAME start=1 offset=0
420 XCPT_FRAME \start, ORIG_RAX+\offset-ARGOFFSET
421 CFI_REL_OFFSET rdi, RDI+\offset-ARGOFFSET
422 CFI_REL_OFFSET rsi, RSI+\offset-ARGOFFSET
423 CFI_REL_OFFSET rdx, RDX+\offset-ARGOFFSET
424 CFI_REL_OFFSET rcx, RCX+\offset-ARGOFFSET
425 CFI_REL_OFFSET rax, RAX+\offset-ARGOFFSET
426 CFI_REL_OFFSET r8, R8+\offset-ARGOFFSET
427 CFI_REL_OFFSET r9, R9+\offset-ARGOFFSET
428 CFI_REL_OFFSET r10, R10+\offset-ARGOFFSET
429 CFI_REL_OFFSET r11, R11+\offset-ARGOFFSET
430 .endm
431
432 /*
433 * frame that enables passing a complete pt_regs to a C function.
434 */
435 .macro DEFAULT_FRAME start=1 offset=0
436 PARTIAL_FRAME \start, R11+\offset-R15
437 CFI_REL_OFFSET rbx, RBX+\offset
438 CFI_REL_OFFSET rbp, RBP+\offset
439 CFI_REL_OFFSET r12, R12+\offset
440 CFI_REL_OFFSET r13, R13+\offset
441 CFI_REL_OFFSET r14, R14+\offset
442 CFI_REL_OFFSET r15, R15+\offset
443 .endm
444
445 /* save partial stack frame */
446 .macro SAVE_ARGS_IRQ
447 cld
448 /* start from rbp in pt_regs and jump over */
449 movq_cfi rdi, (RDI-RBP)
450 movq_cfi rsi, (RSI-RBP)
451 movq_cfi rdx, (RDX-RBP)
452 movq_cfi rcx, (RCX-RBP)
453 movq_cfi rax, (RAX-RBP)
454 movq_cfi r8, (R8-RBP)
455 movq_cfi r9, (R9-RBP)
456 movq_cfi r10, (R10-RBP)
457 movq_cfi r11, (R11-RBP)
458
459 /* Save rbp so that we can unwind from get_irq_regs() */
460 movq_cfi rbp, 0
461
462 /* Save previous stack value */
463 movq %rsp, %rsi
464
465 leaq -RBP(%rsp),%rdi /* arg1 for handler */
466 testl $3, CS-RBP(%rsi)
467 je 1f
468 SWAPGS
469 /*
470 * irq_count is used to check if a CPU is already on an interrupt stack
471 * or not. While this is essentially redundant with preempt_count it is
472 * a little cheaper to use a separate counter in the PDA (short of
473 * moving irq_enter into assembly, which would be too much work)
474 */
475 1: incl PER_CPU_VAR(irq_count)
476 cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp
477 CFI_DEF_CFA_REGISTER rsi
478
479 /* Store previous stack value */
480 pushq %rsi
481 CFI_ESCAPE 0x0f /* DW_CFA_def_cfa_expression */, 6, \
482 0x77 /* DW_OP_breg7 */, 0, \
483 0x06 /* DW_OP_deref */, \
484 0x08 /* DW_OP_const1u */, SS+8-RBP, \
485 0x22 /* DW_OP_plus */
486 /* We entered an interrupt context - irqs are off: */
487 TRACE_IRQS_OFF
488 .endm
489
490 ENTRY(save_rest)
491 PARTIAL_FRAME 1 (REST_SKIP+8)
492 movq 5*8+16(%rsp), %r11 /* save return address */
493 movq_cfi rbx, RBX+16
494 movq_cfi rbp, RBP+16
495 movq_cfi r12, R12+16
496 movq_cfi r13, R13+16
497 movq_cfi r14, R14+16
498 movq_cfi r15, R15+16
499 movq %r11, 8(%rsp) /* return address */
500 FIXUP_TOP_OF_STACK %r11, 16
501 ret
502 CFI_ENDPROC
503 END(save_rest)
504
505 /* save complete stack frame */
506 .pushsection .kprobes.text, "ax"
507 ENTRY(save_paranoid)
508 XCPT_FRAME 1 RDI+8
509 cld
510 movq_cfi rdi, RDI+8
511 movq_cfi rsi, RSI+8
512 movq_cfi rdx, RDX+8
513 movq_cfi rcx, RCX+8
514 movq_cfi rax, RAX+8
515 movq_cfi r8, R8+8
516 movq_cfi r9, R9+8
517 movq_cfi r10, R10+8
518 movq_cfi r11, R11+8
519 movq_cfi rbx, RBX+8
520 movq_cfi rbp, RBP+8
521 movq_cfi r12, R12+8
522 movq_cfi r13, R13+8
523 movq_cfi r14, R14+8
524 movq_cfi r15, R15+8
525 movl $1,%ebx
526 movl $MSR_GS_BASE,%ecx
527 rdmsr
528 testl %edx,%edx
529 js 1f /* negative -> in kernel */
530 SWAPGS
531 xorl %ebx,%ebx
532 1: ret
533 CFI_ENDPROC
534 END(save_paranoid)
535 .popsection
536
537 /*
538 * A newly forked process directly context switches into this address.
539 *
540 * rdi: prev task we switched from
541 */
542 ENTRY(ret_from_fork)
543 DEFAULT_FRAME
544
545 LOCK ; btr $TIF_FORK,TI_flags(%r8)
546
547 pushq_cfi $0x0002
548 popfq_cfi # reset kernel eflags
549
550 call schedule_tail # rdi: 'prev' task parameter
551
552 GET_THREAD_INFO(%rcx)
553
554 RESTORE_REST
555
556 testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
557 jz 1f
558
559 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
560 jnz int_ret_from_sys_call
561
562 RESTORE_TOP_OF_STACK %rdi, -ARGOFFSET
563 jmp ret_from_sys_call # go to the SYSRET fastpath
564
565 1:
566 subq $REST_SKIP, %rsp # leave space for volatiles
567 CFI_ADJUST_CFA_OFFSET REST_SKIP
568 movq %rbp, %rdi
569 call *%rbx
570 movl $0, RAX(%rsp)
571 RESTORE_REST
572 jmp int_ret_from_sys_call
573 CFI_ENDPROC
574 END(ret_from_fork)
575
576 /*
577 * System call entry. Up to 6 arguments in registers are supported.
578 *
579 * SYSCALL does not save anything on the stack and does not change the
580 * stack pointer. However, it does mask the flags register for us, so
581 * CLD and CLAC are not needed.
582 */
583
584 /*
585 * Register setup:
586 * rax system call number
587 * rdi arg0
588 * rcx return address for syscall/sysret, C arg3
589 * rsi arg1
590 * rdx arg2
591 * r10 arg3 (--> moved to rcx for C)
592 * r8 arg4
593 * r9 arg5
594 * r11 eflags for syscall/sysret, temporary for C
595 * r12-r15,rbp,rbx saved by C code, not touched.
596 *
597 * Interrupts are off on entry.
598 * Only called from user space.
599 *
600 * XXX if we had a free scratch register we could save the RSP into the stack frame
601 * and report it properly in ps. Unfortunately we haven't.
602 *
603 * When user can change the frames always force IRET. That is because
604 * it deals with uncanonical addresses better. SYSRET has trouble
605 * with them due to bugs in both AMD and Intel CPUs.
606 */
607
608 ENTRY(system_call)
609 CFI_STARTPROC simple
610 CFI_SIGNAL_FRAME
611 CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
612 CFI_REGISTER rip,rcx
613 /*CFI_REGISTER rflags,r11*/
614 SWAPGS_UNSAFE_STACK
615 /*
616 * A hypervisor implementation might want to use a label
617 * after the swapgs, so that it can do the swapgs
618 * for the guest and jump here on syscall.
619 */
620 GLOBAL(system_call_after_swapgs)
621
622 movq %rsp,PER_CPU_VAR(old_rsp)
623 movq PER_CPU_VAR(kernel_stack),%rsp
624 /*
625 * No need to follow this irqs off/on section - it's straight
626 * and short:
627 */
628 ENABLE_INTERRUPTS(CLBR_NONE)
629 SAVE_ARGS 8,0
630 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
631 movq %rcx,RIP-ARGOFFSET(%rsp)
632 CFI_REL_OFFSET rip,RIP-ARGOFFSET
633 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
634 jnz tracesys
635 system_call_fastpath:
636 #if __SYSCALL_MASK == ~0
637 cmpq $__NR_syscall_max,%rax
638 #else
639 andl $__SYSCALL_MASK,%eax
640 cmpl $__NR_syscall_max,%eax
641 #endif
642 ja badsys
643 movq %r10,%rcx
644 call *sys_call_table(,%rax,8) # XXX: rip relative
645 movq %rax,RAX-ARGOFFSET(%rsp)
646 /*
647 * Syscall return path ending with SYSRET (fast path)
648 * Has incomplete stack frame and undefined top of stack.
649 */
650 ret_from_sys_call:
651 movl $_TIF_ALLWORK_MASK,%edi
652 /* edi: flagmask */
653 sysret_check:
654 LOCKDEP_SYS_EXIT
655 DISABLE_INTERRUPTS(CLBR_NONE)
656 TRACE_IRQS_OFF
657 movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
658 andl %edi,%edx
659 jnz sysret_careful
660 CFI_REMEMBER_STATE
661 /*
662 * sysretq will re-enable interrupts:
663 */
664 TRACE_IRQS_ON
665 movq RIP-ARGOFFSET(%rsp),%rcx
666 CFI_REGISTER rip,rcx
667 RESTORE_ARGS 1,-ARG_SKIP,0
668 /*CFI_REGISTER rflags,r11*/
669 movq PER_CPU_VAR(old_rsp), %rsp
670 USERGS_SYSRET64
671
672 CFI_RESTORE_STATE
673 /* Handle reschedules */
674 /* edx: work, edi: workmask */
675 sysret_careful:
676 bt $TIF_NEED_RESCHED,%edx
677 jnc sysret_signal
678 TRACE_IRQS_ON
679 ENABLE_INTERRUPTS(CLBR_NONE)
680 pushq_cfi %rdi
681 SCHEDULE_USER
682 popq_cfi %rdi
683 jmp sysret_check
684
685 /* Handle a signal */
686 sysret_signal:
687 TRACE_IRQS_ON
688 ENABLE_INTERRUPTS(CLBR_NONE)
689 #ifdef CONFIG_AUDITSYSCALL
690 bt $TIF_SYSCALL_AUDIT,%edx
691 jc sysret_audit
692 #endif
693 /*
694 * We have a signal, or exit tracing or single-step.
695 * These all wind up with the iret return path anyway,
696 * so just join that path right now.
697 */
698 FIXUP_TOP_OF_STACK %r11, -ARGOFFSET
699 jmp int_check_syscall_exit_work
700
701 badsys:
702 movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
703 jmp ret_from_sys_call
704
705 #ifdef CONFIG_AUDITSYSCALL
706 /*
707 * Fast path for syscall audit without full syscall trace.
708 * We just call __audit_syscall_entry() directly, and then
709 * jump back to the normal fast path.
710 */
711 auditsys:
712 movq %r10,%r9 /* 6th arg: 4th syscall arg */
713 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
714 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
715 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
716 movq %rax,%rsi /* 2nd arg: syscall number */
717 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
718 call __audit_syscall_entry
719 LOAD_ARGS 0 /* reload call-clobbered registers */
720 jmp system_call_fastpath
721
722 /*
723 * Return fast path for syscall audit. Call __audit_syscall_exit()
724 * directly and then jump back to the fast path with TIF_SYSCALL_AUDIT
725 * masked off.
726 */
727 sysret_audit:
728 movq RAX-ARGOFFSET(%rsp),%rsi /* second arg, syscall return value */
729 cmpq $-MAX_ERRNO,%rsi /* is it < -MAX_ERRNO? */
730 setbe %al /* 1 if so, 0 if not */
731 movzbl %al,%edi /* zero-extend that into %edi */
732 call __audit_syscall_exit
733 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
734 jmp sysret_check
735 #endif /* CONFIG_AUDITSYSCALL */
736
737 /* Do syscall tracing */
738 tracesys:
739 #ifdef CONFIG_AUDITSYSCALL
740 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
741 jz auditsys
742 #endif
743 SAVE_REST
744 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
745 FIXUP_TOP_OF_STACK %rdi
746 movq %rsp,%rdi
747 call syscall_trace_enter
748 /*
749 * Reload arg registers from stack in case ptrace changed them.
750 * We don't reload %rax because syscall_trace_enter() returned
751 * the value it wants us to use in the table lookup.
752 */
753 LOAD_ARGS ARGOFFSET, 1
754 RESTORE_REST
755 #if __SYSCALL_MASK == ~0
756 cmpq $__NR_syscall_max,%rax
757 #else
758 andl $__SYSCALL_MASK,%eax
759 cmpl $__NR_syscall_max,%eax
760 #endif
761 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
762 movq %r10,%rcx /* fixup for C */
763 call *sys_call_table(,%rax,8)
764 movq %rax,RAX-ARGOFFSET(%rsp)
765 /* Use IRET because user could have changed frame */
766
767 /*
768 * Syscall return path ending with IRET.
769 * Has correct top of stack, but partial stack frame.
770 */
771 GLOBAL(int_ret_from_sys_call)
772 DISABLE_INTERRUPTS(CLBR_NONE)
773 TRACE_IRQS_OFF
774 movl $_TIF_ALLWORK_MASK,%edi
775 /* edi: mask to check */
776 GLOBAL(int_with_check)
777 LOCKDEP_SYS_EXIT_IRQ
778 GET_THREAD_INFO(%rcx)
779 movl TI_flags(%rcx),%edx
780 andl %edi,%edx
781 jnz int_careful
782 andl $~TS_COMPAT,TI_status(%rcx)
783 jmp retint_swapgs
784
785 /* Either reschedule or signal or syscall exit tracking needed. */
786 /* First do a reschedule test. */
787 /* edx: work, edi: workmask */
788 int_careful:
789 bt $TIF_NEED_RESCHED,%edx
790 jnc int_very_careful
791 TRACE_IRQS_ON
792 ENABLE_INTERRUPTS(CLBR_NONE)
793 pushq_cfi %rdi
794 SCHEDULE_USER
795 popq_cfi %rdi
796 DISABLE_INTERRUPTS(CLBR_NONE)
797 TRACE_IRQS_OFF
798 jmp int_with_check
799
800 /* handle signals and tracing -- both require a full stack frame */
801 int_very_careful:
802 TRACE_IRQS_ON
803 ENABLE_INTERRUPTS(CLBR_NONE)
804 int_check_syscall_exit_work:
805 SAVE_REST
806 /* Check for syscall exit trace */
807 testl $_TIF_WORK_SYSCALL_EXIT,%edx
808 jz int_signal
809 pushq_cfi %rdi
810 leaq 8(%rsp),%rdi # &ptregs -> arg1
811 call syscall_trace_leave
812 popq_cfi %rdi
813 andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi
814 jmp int_restore_rest
815
816 int_signal:
817 testl $_TIF_DO_NOTIFY_MASK,%edx
818 jz 1f
819 movq %rsp,%rdi # &ptregs -> arg1
820 xorl %esi,%esi # oldset -> arg2
821 call do_notify_resume
822 1: movl $_TIF_WORK_MASK,%edi
823 int_restore_rest:
824 RESTORE_REST
825 DISABLE_INTERRUPTS(CLBR_NONE)
826 TRACE_IRQS_OFF
827 jmp int_with_check
828 CFI_ENDPROC
829 END(system_call)
830
831 /*
832 * Certain special system calls that need to save a complete full stack frame.
833 */
834 .macro PTREGSCALL label,func,arg
835 ENTRY(\label)
836 PARTIAL_FRAME 1 8 /* offset 8: return address */
837 subq $REST_SKIP, %rsp
838 CFI_ADJUST_CFA_OFFSET REST_SKIP
839 call save_rest
840 DEFAULT_FRAME 0 8 /* offset 8: return address */
841 leaq 8(%rsp), \arg /* pt_regs pointer */
842 call \func
843 jmp ptregscall_common
844 CFI_ENDPROC
845 END(\label)
846 .endm
847
848 .macro FORK_LIKE func
849 ENTRY(stub_\func)
850 CFI_STARTPROC
851 popq %r11 /* save return address */
852 PARTIAL_FRAME 0
853 SAVE_REST
854 pushq %r11 /* put it back on stack */
855 FIXUP_TOP_OF_STACK %r11, 8
856 DEFAULT_FRAME 0 8 /* offset 8: return address */
857 call sys_\func
858 RESTORE_TOP_OF_STACK %r11, 8
859 ret $REST_SKIP /* pop extended registers */
860 CFI_ENDPROC
861 END(stub_\func)
862 .endm
863
864 FORK_LIKE clone
865 FORK_LIKE fork
866 FORK_LIKE vfork
867 PTREGSCALL stub_iopl, sys_iopl, %rsi
868
869 ENTRY(ptregscall_common)
870 DEFAULT_FRAME 1 8 /* offset 8: return address */
871 RESTORE_TOP_OF_STACK %r11, 8
872 movq_cfi_restore R15+8, r15
873 movq_cfi_restore R14+8, r14
874 movq_cfi_restore R13+8, r13
875 movq_cfi_restore R12+8, r12
876 movq_cfi_restore RBP+8, rbp
877 movq_cfi_restore RBX+8, rbx
878 ret $REST_SKIP /* pop extended registers */
879 CFI_ENDPROC
880 END(ptregscall_common)
881
882 ENTRY(stub_execve)
883 CFI_STARTPROC
884 addq $8, %rsp
885 PARTIAL_FRAME 0
886 SAVE_REST
887 FIXUP_TOP_OF_STACK %r11
888 call sys_execve
889 RESTORE_TOP_OF_STACK %r11
890 movq %rax,RAX(%rsp)
891 RESTORE_REST
892 jmp int_ret_from_sys_call
893 CFI_ENDPROC
894 END(stub_execve)
895
896 /*
897 * sigreturn is special because it needs to restore all registers on return.
898 * This cannot be done with SYSRET, so use the IRET return path instead.
899 */
900 ENTRY(stub_rt_sigreturn)
901 CFI_STARTPROC
902 addq $8, %rsp
903 PARTIAL_FRAME 0
904 SAVE_REST
905 movq %rsp,%rdi
906 FIXUP_TOP_OF_STACK %r11
907 call sys_rt_sigreturn
908 movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
909 RESTORE_REST
910 jmp int_ret_from_sys_call
911 CFI_ENDPROC
912 END(stub_rt_sigreturn)
913
914 #ifdef CONFIG_X86_X32_ABI
915 ENTRY(stub_x32_rt_sigreturn)
916 CFI_STARTPROC
917 addq $8, %rsp
918 PARTIAL_FRAME 0
919 SAVE_REST
920 movq %rsp,%rdi
921 FIXUP_TOP_OF_STACK %r11
922 call sys32_x32_rt_sigreturn
923 movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
924 RESTORE_REST
925 jmp int_ret_from_sys_call
926 CFI_ENDPROC
927 END(stub_x32_rt_sigreturn)
928
929 ENTRY(stub_x32_execve)
930 CFI_STARTPROC
931 addq $8, %rsp
932 PARTIAL_FRAME 0
933 SAVE_REST
934 FIXUP_TOP_OF_STACK %r11
935 call compat_sys_execve
936 RESTORE_TOP_OF_STACK %r11
937 movq %rax,RAX(%rsp)
938 RESTORE_REST
939 jmp int_ret_from_sys_call
940 CFI_ENDPROC
941 END(stub_x32_execve)
942
943 #endif
944
945 /*
946 * Build the entry stubs and pointer table with some assembler magic.
947 * We pack 7 stubs into a single 32-byte chunk, which will fit in a
948 * single cache line on all modern x86 implementations.
949 */
950 .section .init.rodata,"a"
951 ENTRY(interrupt)
952 .section .entry.text
953 .p2align 5
954 .p2align CONFIG_X86_L1_CACHE_SHIFT
955 ENTRY(irq_entries_start)
956 INTR_FRAME
957 vector=FIRST_EXTERNAL_VECTOR
958 .rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7
959 .balign 32
960 .rept 7
961 .if vector < NR_VECTORS
962 .if vector <> FIRST_EXTERNAL_VECTOR
963 CFI_ADJUST_CFA_OFFSET -8
964 .endif
965 1: pushq_cfi $(~vector+0x80) /* Note: always in signed byte range */
966 .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6
967 jmp 2f
968 .endif
969 .previous
970 .quad 1b
971 .section .entry.text
972 vector=vector+1
973 .endif
974 .endr
975 2: jmp common_interrupt
976 .endr
977 CFI_ENDPROC
978 END(irq_entries_start)
979
980 .previous
981 END(interrupt)
982 .previous
983
984 /*
985 * Interrupt entry/exit.
986 *
987 * Interrupt entry points save only callee clobbered registers in fast path.
988 *
989 * Entry runs with interrupts off.
990 */
991
992 /* 0(%rsp): ~(interrupt number) */
993 .macro interrupt func
994 /* reserve pt_regs for scratch regs and rbp */
995 subq $ORIG_RAX-RBP, %rsp
996 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
997 SAVE_ARGS_IRQ
998 call \func
999 .endm
1000
1001 /*
1002 * Interrupt entry/exit should be protected against kprobes
1003 */
1004 .pushsection .kprobes.text, "ax"
1005 /*
1006 * The interrupt stubs push (~vector+0x80) onto the stack and
1007 * then jump to common_interrupt.
1008 */
1009 .p2align CONFIG_X86_L1_CACHE_SHIFT
1010 common_interrupt:
1011 XCPT_FRAME
1012 ASM_CLAC
1013 addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */
1014 interrupt do_IRQ
1015 /* 0(%rsp): old_rsp-ARGOFFSET */
1016 ret_from_intr:
1017 DISABLE_INTERRUPTS(CLBR_NONE)
1018 TRACE_IRQS_OFF
1019 decl PER_CPU_VAR(irq_count)
1020
1021 /* Restore saved previous stack */
1022 popq %rsi
1023 CFI_DEF_CFA rsi,SS+8-RBP /* reg/off reset after def_cfa_expr */
1024 leaq ARGOFFSET-RBP(%rsi), %rsp
1025 CFI_DEF_CFA_REGISTER rsp
1026 CFI_ADJUST_CFA_OFFSET RBP-ARGOFFSET
1027
1028 exit_intr:
1029 GET_THREAD_INFO(%rcx)
1030 testl $3,CS-ARGOFFSET(%rsp)
1031 je retint_kernel
1032
1033 /* Interrupt came from user space */
1034 /*
1035 * Has a correct top of stack, but a partial stack frame
1036 * %rcx: thread info. Interrupts off.
1037 */
1038 retint_with_reschedule:
1039 movl $_TIF_WORK_MASK,%edi
1040 retint_check:
1041 LOCKDEP_SYS_EXIT_IRQ
1042 movl TI_flags(%rcx),%edx
1043 andl %edi,%edx
1044 CFI_REMEMBER_STATE
1045 jnz retint_careful
1046
1047 retint_swapgs: /* return to user-space */
1048 /*
1049 * The iretq could re-enable interrupts:
1050 */
1051 DISABLE_INTERRUPTS(CLBR_ANY)
1052 TRACE_IRQS_IRETQ
1053 SWAPGS
1054 jmp restore_args
1055
1056 retint_restore_args: /* return to kernel space */
1057 DISABLE_INTERRUPTS(CLBR_ANY)
1058 /*
1059 * The iretq could re-enable interrupts:
1060 */
1061 TRACE_IRQS_IRETQ
1062 restore_args:
1063 RESTORE_ARGS 1,8,1
1064
1065 irq_return:
1066 INTERRUPT_RETURN
1067 _ASM_EXTABLE(irq_return, bad_iret)
1068
1069 #ifdef CONFIG_PARAVIRT
1070 ENTRY(native_iret)
1071 iretq
1072 _ASM_EXTABLE(native_iret, bad_iret)
1073 #endif
1074
1075 .section .fixup,"ax"
1076 bad_iret:
1077 /*
1078 * The iret traps when the %cs or %ss being restored is bogus.
1079 * We've lost the original trap vector and error code.
1080 * #GPF is the most likely one to get for an invalid selector.
1081 * So pretend we completed the iret and took the #GPF in user mode.
1082 *
1083 * We are now running with the kernel GS after exception recovery.
1084 * But error_entry expects us to have user GS to match the user %cs,
1085 * so swap back.
1086 */
1087 pushq $0
1088
1089 SWAPGS
1090 jmp general_protection
1091
1092 .previous
1093
1094 /* edi: workmask, edx: work */
1095 retint_careful:
1096 CFI_RESTORE_STATE
1097 bt $TIF_NEED_RESCHED,%edx
1098 jnc retint_signal
1099 TRACE_IRQS_ON
1100 ENABLE_INTERRUPTS(CLBR_NONE)
1101 pushq_cfi %rdi
1102 SCHEDULE_USER
1103 popq_cfi %rdi
1104 GET_THREAD_INFO(%rcx)
1105 DISABLE_INTERRUPTS(CLBR_NONE)
1106 TRACE_IRQS_OFF
1107 jmp retint_check
1108
1109 retint_signal:
1110 testl $_TIF_DO_NOTIFY_MASK,%edx
1111 jz retint_swapgs
1112 TRACE_IRQS_ON
1113 ENABLE_INTERRUPTS(CLBR_NONE)
1114 SAVE_REST
1115 movq $-1,ORIG_RAX(%rsp)
1116 xorl %esi,%esi # oldset
1117 movq %rsp,%rdi # &pt_regs
1118 call do_notify_resume
1119 RESTORE_REST
1120 DISABLE_INTERRUPTS(CLBR_NONE)
1121 TRACE_IRQS_OFF
1122 GET_THREAD_INFO(%rcx)
1123 jmp retint_with_reschedule
1124
1125 #ifdef CONFIG_PREEMPT
1126 /* Returning to kernel space. Check if we need preemption */
1127 /* rcx: threadinfo. interrupts off. */
1128 ENTRY(retint_kernel)
1129 cmpl $0,TI_preempt_count(%rcx)
1130 jnz retint_restore_args
1131 bt $TIF_NEED_RESCHED,TI_flags(%rcx)
1132 jnc retint_restore_args
1133 bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
1134 jnc retint_restore_args
1135 call preempt_schedule_irq
1136 jmp exit_intr
1137 #endif
1138
1139 CFI_ENDPROC
1140 END(common_interrupt)
1141 /*
1142 * End of kprobes section
1143 */
1144 .popsection
1145
1146 /*
1147 * APIC interrupts.
1148 */
1149 .macro apicinterrupt num sym do_sym
1150 ENTRY(\sym)
1151 INTR_FRAME
1152 ASM_CLAC
1153 pushq_cfi $~(\num)
1154 .Lcommon_\sym:
1155 interrupt \do_sym
1156 jmp ret_from_intr
1157 CFI_ENDPROC
1158 END(\sym)
1159 .endm
1160
1161 #ifdef CONFIG_SMP
1162 apicinterrupt IRQ_MOVE_CLEANUP_VECTOR \
1163 irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt
1164 apicinterrupt REBOOT_VECTOR \
1165 reboot_interrupt smp_reboot_interrupt
1166 #endif
1167
1168 #ifdef CONFIG_X86_UV
1169 apicinterrupt UV_BAU_MESSAGE \
1170 uv_bau_message_intr1 uv_bau_message_interrupt
1171 #endif
1172 apicinterrupt LOCAL_TIMER_VECTOR \
1173 apic_timer_interrupt smp_apic_timer_interrupt
1174 apicinterrupt X86_PLATFORM_IPI_VECTOR \
1175 x86_platform_ipi smp_x86_platform_ipi
1176
1177 apicinterrupt THRESHOLD_APIC_VECTOR \
1178 threshold_interrupt smp_threshold_interrupt
1179 apicinterrupt THERMAL_APIC_VECTOR \
1180 thermal_interrupt smp_thermal_interrupt
1181
1182 #ifdef CONFIG_SMP
1183 apicinterrupt CALL_FUNCTION_SINGLE_VECTOR \
1184 call_function_single_interrupt smp_call_function_single_interrupt
1185 apicinterrupt CALL_FUNCTION_VECTOR \
1186 call_function_interrupt smp_call_function_interrupt
1187 apicinterrupt RESCHEDULE_VECTOR \
1188 reschedule_interrupt smp_reschedule_interrupt
1189 #endif
1190
1191 apicinterrupt ERROR_APIC_VECTOR \
1192 error_interrupt smp_error_interrupt
1193 apicinterrupt SPURIOUS_APIC_VECTOR \
1194 spurious_interrupt smp_spurious_interrupt
1195
1196 #ifdef CONFIG_IRQ_WORK
1197 apicinterrupt IRQ_WORK_VECTOR \
1198 irq_work_interrupt smp_irq_work_interrupt
1199 #endif
1200
1201 /*
1202 * Exception entry points.
1203 */
1204 .macro zeroentry sym do_sym
1205 ENTRY(\sym)
1206 INTR_FRAME
1207 ASM_CLAC
1208 PARAVIRT_ADJUST_EXCEPTION_FRAME
1209 pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
1210 subq $ORIG_RAX-R15, %rsp
1211 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
1212 call error_entry
1213 DEFAULT_FRAME 0
1214 movq %rsp,%rdi /* pt_regs pointer */
1215 xorl %esi,%esi /* no error code */
1216 call \do_sym
1217 jmp error_exit /* %ebx: no swapgs flag */
1218 CFI_ENDPROC
1219 END(\sym)
1220 .endm
1221
1222 .macro paranoidzeroentry sym do_sym
1223 ENTRY(\sym)
1224 INTR_FRAME
1225 ASM_CLAC
1226 PARAVIRT_ADJUST_EXCEPTION_FRAME
1227 pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
1228 subq $ORIG_RAX-R15, %rsp
1229 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
1230 call save_paranoid
1231 TRACE_IRQS_OFF
1232 movq %rsp,%rdi /* pt_regs pointer */
1233 xorl %esi,%esi /* no error code */
1234 call \do_sym
1235 jmp paranoid_exit /* %ebx: no swapgs flag */
1236 CFI_ENDPROC
1237 END(\sym)
1238 .endm
1239
1240 #define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
1241 .macro paranoidzeroentry_ist sym do_sym ist
1242 ENTRY(\sym)
1243 INTR_FRAME
1244 ASM_CLAC
1245 PARAVIRT_ADJUST_EXCEPTION_FRAME
1246 pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
1247 subq $ORIG_RAX-R15, %rsp
1248 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
1249 call save_paranoid
1250 TRACE_IRQS_OFF_DEBUG
1251 movq %rsp,%rdi /* pt_regs pointer */
1252 xorl %esi,%esi /* no error code */
1253 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
1254 call \do_sym
1255 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
1256 jmp paranoid_exit /* %ebx: no swapgs flag */
1257 CFI_ENDPROC
1258 END(\sym)
1259 .endm
1260
1261 .macro errorentry sym do_sym
1262 ENTRY(\sym)
1263 XCPT_FRAME
1264 ASM_CLAC
1265 PARAVIRT_ADJUST_EXCEPTION_FRAME
1266 subq $ORIG_RAX-R15, %rsp
1267 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
1268 call error_entry
1269 DEFAULT_FRAME 0
1270 movq %rsp,%rdi /* pt_regs pointer */
1271 movq ORIG_RAX(%rsp),%rsi /* get error code */
1272 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
1273 call \do_sym
1274 jmp error_exit /* %ebx: no swapgs flag */
1275 CFI_ENDPROC
1276 END(\sym)
1277 .endm
1278
1279 /* error code is on the stack already */
1280 .macro paranoiderrorentry sym do_sym
1281 ENTRY(\sym)
1282 XCPT_FRAME
1283 ASM_CLAC
1284 PARAVIRT_ADJUST_EXCEPTION_FRAME
1285 subq $ORIG_RAX-R15, %rsp
1286 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
1287 call save_paranoid
1288 DEFAULT_FRAME 0
1289 TRACE_IRQS_OFF
1290 movq %rsp,%rdi /* pt_regs pointer */
1291 movq ORIG_RAX(%rsp),%rsi /* get error code */
1292 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
1293 call \do_sym
1294 jmp paranoid_exit /* %ebx: no swapgs flag */
1295 CFI_ENDPROC
1296 END(\sym)
1297 .endm
1298
1299 zeroentry divide_error do_divide_error
1300 zeroentry overflow do_overflow
1301 zeroentry bounds do_bounds
1302 zeroentry invalid_op do_invalid_op
1303 zeroentry device_not_available do_device_not_available
1304 paranoiderrorentry double_fault do_double_fault
1305 zeroentry coprocessor_segment_overrun do_coprocessor_segment_overrun
1306 errorentry invalid_TSS do_invalid_TSS
1307 errorentry segment_not_present do_segment_not_present
1308 zeroentry spurious_interrupt_bug do_spurious_interrupt_bug
1309 zeroentry coprocessor_error do_coprocessor_error
1310 errorentry alignment_check do_alignment_check
1311 zeroentry simd_coprocessor_error do_simd_coprocessor_error
1312
1313
1314 /* Reload gs selector with exception handling */
1315 /* edi: new selector */
1316 ENTRY(native_load_gs_index)
1317 CFI_STARTPROC
1318 pushfq_cfi
1319 DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
1320 SWAPGS
1321 gs_change:
1322 movl %edi,%gs
1323 2: mfence /* workaround */
1324 SWAPGS
1325 popfq_cfi
1326 ret
1327 CFI_ENDPROC
1328 END(native_load_gs_index)
1329
1330 _ASM_EXTABLE(gs_change,bad_gs)
1331 .section .fixup,"ax"
1332 /* running with kernelgs */
1333 bad_gs:
1334 SWAPGS /* switch back to user gs */
1335 xorl %eax,%eax
1336 movl %eax,%gs
1337 jmp 2b
1338 .previous
1339
1340 /* Call softirq on interrupt stack. Interrupts are off. */
1341 ENTRY(call_softirq)
1342 CFI_STARTPROC
1343 pushq_cfi %rbp
1344 CFI_REL_OFFSET rbp,0
1345 mov %rsp,%rbp
1346 CFI_DEF_CFA_REGISTER rbp
1347 incl PER_CPU_VAR(irq_count)
1348 cmove PER_CPU_VAR(irq_stack_ptr),%rsp
1349 push %rbp # backlink for old unwinder
1350 call __do_softirq
1351 leaveq
1352 CFI_RESTORE rbp
1353 CFI_DEF_CFA_REGISTER rsp
1354 CFI_ADJUST_CFA_OFFSET -8
1355 decl PER_CPU_VAR(irq_count)
1356 ret
1357 CFI_ENDPROC
1358 END(call_softirq)
1359
1360 #ifdef CONFIG_XEN
1361 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
1362
1363 /*
1364 * A note on the "critical region" in our callback handler.
1365 * We want to avoid stacking callback handlers due to events occurring
1366 * during handling of the last event. To do this, we keep events disabled
1367 * until we've done all processing. HOWEVER, we must enable events before
1368 * popping the stack frame (can't be done atomically) and so it would still
1369 * be possible to get enough handler activations to overflow the stack.
1370 * Although unlikely, bugs of that kind are hard to track down, so we'd
1371 * like to avoid the possibility.
1372 * So, on entry to the handler we detect whether we interrupted an
1373 * existing activation in its critical region -- if so, we pop the current
1374 * activation and restart the handler using the previous one.
1375 */
1376 ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
1377 CFI_STARTPROC
1378 /*
1379 * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
1380 * see the correct pointer to the pt_regs
1381 */
1382 movq %rdi, %rsp # we don't return, adjust the stack frame
1383 CFI_ENDPROC
1384 DEFAULT_FRAME
1385 11: incl PER_CPU_VAR(irq_count)
1386 movq %rsp,%rbp
1387 CFI_DEF_CFA_REGISTER rbp
1388 cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp
1389 pushq %rbp # backlink for old unwinder
1390 call xen_evtchn_do_upcall
1391 popq %rsp
1392 CFI_DEF_CFA_REGISTER rsp
1393 decl PER_CPU_VAR(irq_count)
1394 jmp error_exit
1395 CFI_ENDPROC
1396 END(xen_do_hypervisor_callback)
1397
1398 /*
1399 * Hypervisor uses this for application faults while it executes.
1400 * We get here for two reasons:
1401 * 1. Fault while reloading DS, ES, FS or GS
1402 * 2. Fault while executing IRET
1403 * Category 1 we do not need to fix up as Xen has already reloaded all segment
1404 * registers that could be reloaded and zeroed the others.
1405 * Category 2 we fix up by killing the current process. We cannot use the
1406 * normal Linux return path in this case because if we use the IRET hypercall
1407 * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
1408 * We distinguish between categories by comparing each saved segment register
1409 * with its current contents: any discrepancy means we in category 1.
1410 */
1411 ENTRY(xen_failsafe_callback)
1412 INTR_FRAME 1 (6*8)
1413 /*CFI_REL_OFFSET gs,GS*/
1414 /*CFI_REL_OFFSET fs,FS*/
1415 /*CFI_REL_OFFSET es,ES*/
1416 /*CFI_REL_OFFSET ds,DS*/
1417 CFI_REL_OFFSET r11,8
1418 CFI_REL_OFFSET rcx,0
1419 movw %ds,%cx
1420 cmpw %cx,0x10(%rsp)
1421 CFI_REMEMBER_STATE
1422 jne 1f
1423 movw %es,%cx
1424 cmpw %cx,0x18(%rsp)
1425 jne 1f
1426 movw %fs,%cx
1427 cmpw %cx,0x20(%rsp)
1428 jne 1f
1429 movw %gs,%cx
1430 cmpw %cx,0x28(%rsp)
1431 jne 1f
1432 /* All segments match their saved values => Category 2 (Bad IRET). */
1433 movq (%rsp),%rcx
1434 CFI_RESTORE rcx
1435 movq 8(%rsp),%r11
1436 CFI_RESTORE r11
1437 addq $0x30,%rsp
1438 CFI_ADJUST_CFA_OFFSET -0x30
1439 pushq_cfi $0 /* RIP */
1440 pushq_cfi %r11
1441 pushq_cfi %rcx
1442 jmp general_protection
1443 CFI_RESTORE_STATE
1444 1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
1445 movq (%rsp),%rcx
1446 CFI_RESTORE rcx
1447 movq 8(%rsp),%r11
1448 CFI_RESTORE r11
1449 addq $0x30,%rsp
1450 CFI_ADJUST_CFA_OFFSET -0x30
1451 pushq_cfi $-1 /* orig_ax = -1 => not a system call */
1452 SAVE_ALL
1453 jmp error_exit
1454 CFI_ENDPROC
1455 END(xen_failsafe_callback)
1456
1457 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
1458 xen_hvm_callback_vector xen_evtchn_do_upcall
1459
1460 #endif /* CONFIG_XEN */
1461
1462 /*
1463 * Some functions should be protected against kprobes
1464 */
1465 .pushsection .kprobes.text, "ax"
1466
1467 paranoidzeroentry_ist debug do_debug DEBUG_STACK
1468 paranoidzeroentry_ist int3 do_int3 DEBUG_STACK
1469 paranoiderrorentry stack_segment do_stack_segment
1470 #ifdef CONFIG_XEN
1471 zeroentry xen_debug do_debug
1472 zeroentry xen_int3 do_int3
1473 errorentry xen_stack_segment do_stack_segment
1474 #endif
1475 errorentry general_protection do_general_protection
1476 errorentry page_fault do_page_fault
1477 #ifdef CONFIG_KVM_GUEST
1478 errorentry async_page_fault do_async_page_fault
1479 #endif
1480 #ifdef CONFIG_X86_MCE
1481 paranoidzeroentry machine_check *machine_check_vector(%rip)
1482 #endif
1483
1484 /*
1485 * "Paranoid" exit path from exception stack.
1486 * Paranoid because this is used by NMIs and cannot take
1487 * any kernel state for granted.
1488 * We don't do kernel preemption checks here, because only
1489 * NMI should be common and it does not enable IRQs and
1490 * cannot get reschedule ticks.
1491 *
1492 * "trace" is 0 for the NMI handler only, because irq-tracing
1493 * is fundamentally NMI-unsafe. (we cannot change the soft and
1494 * hard flags at once, atomically)
1495 */
1496
1497 /* ebx: no swapgs flag */
1498 ENTRY(paranoid_exit)
1499 DEFAULT_FRAME
1500 DISABLE_INTERRUPTS(CLBR_NONE)
1501 TRACE_IRQS_OFF_DEBUG
1502 testl %ebx,%ebx /* swapgs needed? */
1503 jnz paranoid_restore
1504 testl $3,CS(%rsp)
1505 jnz paranoid_userspace
1506 paranoid_swapgs:
1507 TRACE_IRQS_IRETQ 0
1508 SWAPGS_UNSAFE_STACK
1509 RESTORE_ALL 8
1510 jmp irq_return
1511 paranoid_restore:
1512 TRACE_IRQS_IRETQ_DEBUG 0
1513 RESTORE_ALL 8
1514 jmp irq_return
1515 paranoid_userspace:
1516 GET_THREAD_INFO(%rcx)
1517 movl TI_flags(%rcx),%ebx
1518 andl $_TIF_WORK_MASK,%ebx
1519 jz paranoid_swapgs
1520 movq %rsp,%rdi /* &pt_regs */
1521 call sync_regs
1522 movq %rax,%rsp /* switch stack for scheduling */
1523 testl $_TIF_NEED_RESCHED,%ebx
1524 jnz paranoid_schedule
1525 movl %ebx,%edx /* arg3: thread flags */
1526 TRACE_IRQS_ON
1527 ENABLE_INTERRUPTS(CLBR_NONE)
1528 xorl %esi,%esi /* arg2: oldset */
1529 movq %rsp,%rdi /* arg1: &pt_regs */
1530 call do_notify_resume
1531 DISABLE_INTERRUPTS(CLBR_NONE)
1532 TRACE_IRQS_OFF
1533 jmp paranoid_userspace
1534 paranoid_schedule:
1535 TRACE_IRQS_ON
1536 ENABLE_INTERRUPTS(CLBR_ANY)
1537 SCHEDULE_USER
1538 DISABLE_INTERRUPTS(CLBR_ANY)
1539 TRACE_IRQS_OFF
1540 jmp paranoid_userspace
1541 CFI_ENDPROC
1542 END(paranoid_exit)
1543
1544 /*
1545 * Exception entry point. This expects an error code/orig_rax on the stack.
1546 * returns in "no swapgs flag" in %ebx.
1547 */
1548 ENTRY(error_entry)
1549 XCPT_FRAME
1550 CFI_ADJUST_CFA_OFFSET 15*8
1551 /* oldrax contains error code */
1552 cld
1553 movq_cfi rdi, RDI+8
1554 movq_cfi rsi, RSI+8
1555 movq_cfi rdx, RDX+8
1556 movq_cfi rcx, RCX+8
1557 movq_cfi rax, RAX+8
1558 movq_cfi r8, R8+8
1559 movq_cfi r9, R9+8
1560 movq_cfi r10, R10+8
1561 movq_cfi r11, R11+8
1562 movq_cfi rbx, RBX+8
1563 movq_cfi rbp, RBP+8
1564 movq_cfi r12, R12+8
1565 movq_cfi r13, R13+8
1566 movq_cfi r14, R14+8
1567 movq_cfi r15, R15+8
1568 xorl %ebx,%ebx
1569 testl $3,CS+8(%rsp)
1570 je error_kernelspace
1571 error_swapgs:
1572 SWAPGS
1573 error_sti:
1574 TRACE_IRQS_OFF
1575 ret
1576
1577 /*
1578 * There are two places in the kernel that can potentially fault with
1579 * usergs. Handle them here. The exception handlers after iret run with
1580 * kernel gs again, so don't set the user space flag. B stepping K8s
1581 * sometimes report an truncated RIP for IRET exceptions returning to
1582 * compat mode. Check for these here too.
1583 */
1584 error_kernelspace:
1585 incl %ebx
1586 leaq irq_return(%rip),%rcx
1587 cmpq %rcx,RIP+8(%rsp)
1588 je error_swapgs
1589 movl %ecx,%eax /* zero extend */
1590 cmpq %rax,RIP+8(%rsp)
1591 je bstep_iret
1592 cmpq $gs_change,RIP+8(%rsp)
1593 je error_swapgs
1594 jmp error_sti
1595
1596 bstep_iret:
1597 /* Fix truncated RIP */
1598 movq %rcx,RIP+8(%rsp)
1599 jmp error_swapgs
1600 CFI_ENDPROC
1601 END(error_entry)
1602
1603
1604 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
1605 ENTRY(error_exit)
1606 DEFAULT_FRAME
1607 movl %ebx,%eax
1608 RESTORE_REST
1609 DISABLE_INTERRUPTS(CLBR_NONE)
1610 TRACE_IRQS_OFF
1611 GET_THREAD_INFO(%rcx)
1612 testl %eax,%eax
1613 jne retint_kernel
1614 LOCKDEP_SYS_EXIT_IRQ
1615 movl TI_flags(%rcx),%edx
1616 movl $_TIF_WORK_MASK,%edi
1617 andl %edi,%edx
1618 jnz retint_careful
1619 jmp retint_swapgs
1620 CFI_ENDPROC
1621 END(error_exit)
1622
1623 /*
1624 * Test if a given stack is an NMI stack or not.
1625 */
1626 .macro test_in_nmi reg stack nmi_ret normal_ret
1627 cmpq %\reg, \stack
1628 ja \normal_ret
1629 subq $EXCEPTION_STKSZ, %\reg
1630 cmpq %\reg, \stack
1631 jb \normal_ret
1632 jmp \nmi_ret
1633 .endm
1634
1635 /* runs on exception stack */
1636 ENTRY(nmi)
1637 INTR_FRAME
1638 PARAVIRT_ADJUST_EXCEPTION_FRAME
1639 /*
1640 * We allow breakpoints in NMIs. If a breakpoint occurs, then
1641 * the iretq it performs will take us out of NMI context.
1642 * This means that we can have nested NMIs where the next
1643 * NMI is using the top of the stack of the previous NMI. We
1644 * can't let it execute because the nested NMI will corrupt the
1645 * stack of the previous NMI. NMI handlers are not re-entrant
1646 * anyway.
1647 *
1648 * To handle this case we do the following:
1649 * Check the a special location on the stack that contains
1650 * a variable that is set when NMIs are executing.
1651 * The interrupted task's stack is also checked to see if it
1652 * is an NMI stack.
1653 * If the variable is not set and the stack is not the NMI
1654 * stack then:
1655 * o Set the special variable on the stack
1656 * o Copy the interrupt frame into a "saved" location on the stack
1657 * o Copy the interrupt frame into a "copy" location on the stack
1658 * o Continue processing the NMI
1659 * If the variable is set or the previous stack is the NMI stack:
1660 * o Modify the "copy" location to jump to the repeate_nmi
1661 * o return back to the first NMI
1662 *
1663 * Now on exit of the first NMI, we first clear the stack variable
1664 * The NMI stack will tell any nested NMIs at that point that it is
1665 * nested. Then we pop the stack normally with iret, and if there was
1666 * a nested NMI that updated the copy interrupt stack frame, a
1667 * jump will be made to the repeat_nmi code that will handle the second
1668 * NMI.
1669 */
1670
1671 /* Use %rdx as out temp variable throughout */
1672 pushq_cfi %rdx
1673 CFI_REL_OFFSET rdx, 0
1674
1675 /*
1676 * If %cs was not the kernel segment, then the NMI triggered in user
1677 * space, which means it is definitely not nested.
1678 */
1679 cmpl $__KERNEL_CS, 16(%rsp)
1680 jne first_nmi
1681
1682 /*
1683 * Check the special variable on the stack to see if NMIs are
1684 * executing.
1685 */
1686 cmpl $1, -8(%rsp)
1687 je nested_nmi
1688
1689 /*
1690 * Now test if the previous stack was an NMI stack.
1691 * We need the double check. We check the NMI stack to satisfy the
1692 * race when the first NMI clears the variable before returning.
1693 * We check the variable because the first NMI could be in a
1694 * breakpoint routine using a breakpoint stack.
1695 */
1696 lea 6*8(%rsp), %rdx
1697 test_in_nmi rdx, 4*8(%rsp), nested_nmi, first_nmi
1698 CFI_REMEMBER_STATE
1699
1700 nested_nmi:
1701 /*
1702 * Do nothing if we interrupted the fixup in repeat_nmi.
1703 * It's about to repeat the NMI handler, so we are fine
1704 * with ignoring this one.
1705 */
1706 movq $repeat_nmi, %rdx
1707 cmpq 8(%rsp), %rdx
1708 ja 1f
1709 movq $end_repeat_nmi, %rdx
1710 cmpq 8(%rsp), %rdx
1711 ja nested_nmi_out
1712
1713 1:
1714 /* Set up the interrupted NMIs stack to jump to repeat_nmi */
1715 leaq -1*8(%rsp), %rdx
1716 movq %rdx, %rsp
1717 CFI_ADJUST_CFA_OFFSET 1*8
1718 leaq -10*8(%rsp), %rdx
1719 pushq_cfi $__KERNEL_DS
1720 pushq_cfi %rdx
1721 pushfq_cfi
1722 pushq_cfi $__KERNEL_CS
1723 pushq_cfi $repeat_nmi
1724
1725 /* Put stack back */
1726 addq $(6*8), %rsp
1727 CFI_ADJUST_CFA_OFFSET -6*8
1728
1729 nested_nmi_out:
1730 popq_cfi %rdx
1731 CFI_RESTORE rdx
1732
1733 /* No need to check faults here */
1734 INTERRUPT_RETURN
1735
1736 CFI_RESTORE_STATE
1737 first_nmi:
1738 /*
1739 * Because nested NMIs will use the pushed location that we
1740 * stored in rdx, we must keep that space available.
1741 * Here's what our stack frame will look like:
1742 * +-------------------------+
1743 * | original SS |
1744 * | original Return RSP |
1745 * | original RFLAGS |
1746 * | original CS |
1747 * | original RIP |
1748 * +-------------------------+
1749 * | temp storage for rdx |
1750 * +-------------------------+
1751 * | NMI executing variable |
1752 * +-------------------------+
1753 * | copied SS |
1754 * | copied Return RSP |
1755 * | copied RFLAGS |
1756 * | copied CS |
1757 * | copied RIP |
1758 * +-------------------------+
1759 * | Saved SS |
1760 * | Saved Return RSP |
1761 * | Saved RFLAGS |
1762 * | Saved CS |
1763 * | Saved RIP |
1764 * +-------------------------+
1765 * | pt_regs |
1766 * +-------------------------+
1767 *
1768 * The saved stack frame is used to fix up the copied stack frame
1769 * that a nested NMI may change to make the interrupted NMI iret jump
1770 * to the repeat_nmi. The original stack frame and the temp storage
1771 * is also used by nested NMIs and can not be trusted on exit.
1772 */
1773 /* Do not pop rdx, nested NMIs will corrupt that part of the stack */
1774 movq (%rsp), %rdx
1775 CFI_RESTORE rdx
1776
1777 /* Set the NMI executing variable on the stack. */
1778 pushq_cfi $1
1779
1780 /*
1781 * Leave room for the "copied" frame
1782 */
1783 subq $(5*8), %rsp
1784
1785 /* Copy the stack frame to the Saved frame */
1786 .rept 5
1787 pushq_cfi 11*8(%rsp)
1788 .endr
1789 CFI_DEF_CFA_OFFSET SS+8-RIP
1790
1791 /* Everything up to here is safe from nested NMIs */
1792
1793 /*
1794 * If there was a nested NMI, the first NMI's iret will return
1795 * here. But NMIs are still enabled and we can take another
1796 * nested NMI. The nested NMI checks the interrupted RIP to see
1797 * if it is between repeat_nmi and end_repeat_nmi, and if so
1798 * it will just return, as we are about to repeat an NMI anyway.
1799 * This makes it safe to copy to the stack frame that a nested
1800 * NMI will update.
1801 */
1802 repeat_nmi:
1803 /*
1804 * Update the stack variable to say we are still in NMI (the update
1805 * is benign for the non-repeat case, where 1 was pushed just above
1806 * to this very stack slot).
1807 */
1808 movq $1, 10*8(%rsp)
1809
1810 /* Make another copy, this one may be modified by nested NMIs */
1811 addq $(10*8), %rsp
1812 CFI_ADJUST_CFA_OFFSET -10*8
1813 .rept 5
1814 pushq_cfi -6*8(%rsp)
1815 .endr
1816 subq $(5*8), %rsp
1817 CFI_DEF_CFA_OFFSET SS+8-RIP
1818 end_repeat_nmi:
1819
1820 /*
1821 * Everything below this point can be preempted by a nested
1822 * NMI if the first NMI took an exception and reset our iret stack
1823 * so that we repeat another NMI.
1824 */
1825 pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
1826 subq $ORIG_RAX-R15, %rsp
1827 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
1828 /*
1829 * Use save_paranoid to handle SWAPGS, but no need to use paranoid_exit
1830 * as we should not be calling schedule in NMI context.
1831 * Even with normal interrupts enabled. An NMI should not be
1832 * setting NEED_RESCHED or anything that normal interrupts and
1833 * exceptions might do.
1834 */
1835 call save_paranoid
1836 DEFAULT_FRAME 0
1837
1838 /*
1839 * Save off the CR2 register. If we take a page fault in the NMI then
1840 * it could corrupt the CR2 value. If the NMI preempts a page fault
1841 * handler before it was able to read the CR2 register, and then the
1842 * NMI itself takes a page fault, the page fault that was preempted
1843 * will read the information from the NMI page fault and not the
1844 * origin fault. Save it off and restore it if it changes.
1845 * Use the r12 callee-saved register.
1846 */
1847 movq %cr2, %r12
1848
1849 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
1850 movq %rsp,%rdi
1851 movq $-1,%rsi
1852 call do_nmi
1853
1854 /* Did the NMI take a page fault? Restore cr2 if it did */
1855 movq %cr2, %rcx
1856 cmpq %rcx, %r12
1857 je 1f
1858 movq %r12, %cr2
1859 1:
1860
1861 testl %ebx,%ebx /* swapgs needed? */
1862 jnz nmi_restore
1863 nmi_swapgs:
1864 SWAPGS_UNSAFE_STACK
1865 nmi_restore:
1866 RESTORE_ALL 8
1867
1868 /* Pop the extra iret frame */
1869 addq $(5*8), %rsp
1870
1871 /* Clear the NMI executing stack variable */
1872 movq $0, 5*8(%rsp)
1873 jmp irq_return
1874 CFI_ENDPROC
1875 END(nmi)
1876
1877 ENTRY(ignore_sysret)
1878 CFI_STARTPROC
1879 mov $-ENOSYS,%eax
1880 sysret
1881 CFI_ENDPROC
1882 END(ignore_sysret)
1883
1884 /*
1885 * End of kprobes section
1886 */
1887 .popsection
This page took 0.067234 seconds and 6 git commands to generate.