Merge tag 'dm-4.6-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device...
[deliverable/linux.git] / arch / x86 / xen / xen-asm_64.S
1 /*
2 * Asm versions of Xen pv-ops, suitable for either direct use or
3 * inlining. The inline versions are the same as the direct-use
4 * versions, with the pre- and post-amble chopped off.
5 *
6 * This code is encoded for size rather than absolute efficiency, with
7 * a view to being able to inline as much as possible.
8 *
9 * We only bother with direct forms (ie, vcpu in pda) of the
10 * operations here; the indirect forms are better handled in C, since
11 * they're generally too large to inline anyway.
12 */
13
14 #include <asm/errno.h>
15 #include <asm/percpu.h>
16 #include <asm/processor-flags.h>
17 #include <asm/segment.h>
18 #include <asm/asm-offsets.h>
19 #include <asm/thread_info.h>
20
21 #include <xen/interface/xen.h>
22
23 #include "xen-asm.h"
24
25 ENTRY(xen_adjust_exception_frame)
26 mov 8+0(%rsp), %rcx
27 mov 8+8(%rsp), %r11
28 ret $16
29
30 hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
31 /*
32 * Xen64 iret frame:
33 *
34 * ss
35 * rsp
36 * rflags
37 * cs
38 * rip <-- standard iret frame
39 *
40 * flags
41 *
42 * rcx }
43 * r11 }<-- pushed by hypercall page
44 * rsp->rax }
45 */
46 ENTRY(xen_iret)
47 pushq $0
48 1: jmp hypercall_iret
49 ENDPATCH(xen_iret)
50 RELOC(xen_iret, 1b+1)
51
52 ENTRY(xen_sysret64)
53 /*
54 * We're already on the usermode stack at this point, but
55 * still with the kernel gs, so we can easily switch back
56 */
57 movq %rsp, PER_CPU_VAR(rsp_scratch)
58 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
59
60 pushq $__USER_DS
61 pushq PER_CPU_VAR(rsp_scratch)
62 pushq %r11
63 pushq $__USER_CS
64 pushq %rcx
65
66 pushq $VGCF_in_syscall
67 1: jmp hypercall_iret
68 ENDPATCH(xen_sysret64)
69 RELOC(xen_sysret64, 1b+1)
70
71 /*
72 * Xen handles syscall callbacks much like ordinary exceptions, which
73 * means we have:
74 * - kernel gs
75 * - kernel rsp
76 * - an iret-like stack frame on the stack (including rcx and r11):
77 * ss
78 * rsp
79 * rflags
80 * cs
81 * rip
82 * r11
83 * rsp->rcx
84 *
85 * In all the entrypoints, we undo all that to make it look like a
86 * CPU-generated syscall/sysenter and jump to the normal entrypoint.
87 */
88
89 .macro undo_xen_syscall
90 mov 0*8(%rsp), %rcx
91 mov 1*8(%rsp), %r11
92 mov 5*8(%rsp), %rsp
93 .endm
94
95 /* Normal 64-bit system call target */
96 ENTRY(xen_syscall_target)
97 undo_xen_syscall
98 jmp entry_SYSCALL_64_after_swapgs
99 ENDPROC(xen_syscall_target)
100
101 #ifdef CONFIG_IA32_EMULATION
102
103 /* 32-bit compat syscall target */
104 ENTRY(xen_syscall32_target)
105 undo_xen_syscall
106 jmp entry_SYSCALL_compat
107 ENDPROC(xen_syscall32_target)
108
109 /* 32-bit compat sysenter target */
110 ENTRY(xen_sysenter_target)
111 undo_xen_syscall
112 jmp entry_SYSENTER_compat
113 ENDPROC(xen_sysenter_target)
114
115 #else /* !CONFIG_IA32_EMULATION */
116
117 ENTRY(xen_syscall32_target)
118 ENTRY(xen_sysenter_target)
119 lea 16(%rsp), %rsp /* strip %rcx, %r11 */
120 mov $-ENOSYS, %rax
121 pushq $0
122 jmp hypercall_iret
123 ENDPROC(xen_syscall32_target)
124 ENDPROC(xen_sysenter_target)
125
126 #endif /* CONFIG_IA32_EMULATION */
This page took 0.035735 seconds and 5 git commands to generate.