Merge tag 'pinctrl-v4.8-1' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw...
[deliverable/linux.git] / arch / x86 / xen / xen-asm_64.S
1 /*
2 * Asm versions of Xen pv-ops, suitable for either direct use or
3 * inlining. The inline versions are the same as the direct-use
4 * versions, with the pre- and post-amble chopped off.
5 *
6 * This code is encoded for size rather than absolute efficiency, with
7 * a view to being able to inline as much as possible.
8 *
9 * We only bother with direct forms (ie, vcpu in pda) of the
10 * operations here; the indirect forms are better handled in C, since
11 * they're generally too large to inline anyway.
12 */
13
14 #include <asm/errno.h>
15 #include <asm/percpu.h>
16 #include <asm/processor-flags.h>
17 #include <asm/segment.h>
18 #include <asm/asm-offsets.h>
19 #include <asm/thread_info.h>
20
21 #include <xen/interface/xen.h>
22
23 #include "xen-asm.h"
24
25 ENTRY(xen_adjust_exception_frame)
26 mov 8+0(%rsp), %rcx
27 mov 8+8(%rsp), %r11
28 ret $16
29 ENDPROC(xen_adjust_exception_frame)
30
31 hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
32 /*
33 * Xen64 iret frame:
34 *
35 * ss
36 * rsp
37 * rflags
38 * cs
39 * rip <-- standard iret frame
40 *
41 * flags
42 *
43 * rcx }
44 * r11 }<-- pushed by hypercall page
45 * rsp->rax }
46 */
47 ENTRY(xen_iret)
48 pushq $0
49 1: jmp hypercall_iret
50 ENDPATCH(xen_iret)
51 RELOC(xen_iret, 1b+1)
52
53 ENTRY(xen_sysret64)
54 /*
55 * We're already on the usermode stack at this point, but
56 * still with the kernel gs, so we can easily switch back
57 */
58 movq %rsp, PER_CPU_VAR(rsp_scratch)
59 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
60
61 pushq $__USER_DS
62 pushq PER_CPU_VAR(rsp_scratch)
63 pushq %r11
64 pushq $__USER_CS
65 pushq %rcx
66
67 pushq $VGCF_in_syscall
68 1: jmp hypercall_iret
69 ENDPATCH(xen_sysret64)
70 RELOC(xen_sysret64, 1b+1)
71
72 /*
73 * Xen handles syscall callbacks much like ordinary exceptions, which
74 * means we have:
75 * - kernel gs
76 * - kernel rsp
77 * - an iret-like stack frame on the stack (including rcx and r11):
78 * ss
79 * rsp
80 * rflags
81 * cs
82 * rip
83 * r11
84 * rsp->rcx
85 *
86 * In all the entrypoints, we undo all that to make it look like a
87 * CPU-generated syscall/sysenter and jump to the normal entrypoint.
88 */
89
90 .macro undo_xen_syscall
91 mov 0*8(%rsp), %rcx
92 mov 1*8(%rsp), %r11
93 mov 5*8(%rsp), %rsp
94 .endm
95
96 /* Normal 64-bit system call target */
97 ENTRY(xen_syscall_target)
98 undo_xen_syscall
99 jmp entry_SYSCALL_64_after_swapgs
100 ENDPROC(xen_syscall_target)
101
102 #ifdef CONFIG_IA32_EMULATION
103
104 /* 32-bit compat syscall target */
105 ENTRY(xen_syscall32_target)
106 undo_xen_syscall
107 jmp entry_SYSCALL_compat
108 ENDPROC(xen_syscall32_target)
109
110 /* 32-bit compat sysenter target */
111 ENTRY(xen_sysenter_target)
112 undo_xen_syscall
113 jmp entry_SYSENTER_compat
114 ENDPROC(xen_sysenter_target)
115
116 #else /* !CONFIG_IA32_EMULATION */
117
118 ENTRY(xen_syscall32_target)
119 ENTRY(xen_sysenter_target)
120 lea 16(%rsp), %rsp /* strip %rcx, %r11 */
121 mov $-ENOSYS, %rax
122 pushq $0
123 jmp hypercall_iret
124 ENDPROC(xen_syscall32_target)
125 ENDPROC(xen_sysenter_target)
126
127 #endif /* CONFIG_IA32_EMULATION */
This page took 0.033857 seconds and 5 git commands to generate.