Commit | Line | Data |
---|---|---|
cdacc127 JF |
1 | /* |
2 | Asm versions of Xen pv-ops, suitable for either direct use or inlining. | |
3 | The inline versions are the same as the direct-use versions, with the | |
4 | pre- and post-amble chopped off. | |
5 | ||
6 | This code is encoded for size rather than absolute efficiency, | |
7 | with a view to being able to inline as much as possible. | |
8 | ||
9 | We only bother with direct forms (ie, vcpu in pda) of the operations | |
10 | here; the indirect forms are better handled in C, since they're | |
11 | generally too large to inline anyway. | |
12 | */ | |
13 | ||
14 | #include <linux/linkage.h> | |
15 | ||
16 | #include <asm/asm-offsets.h> | |
17 | #include <asm/processor-flags.h> | |
18 | ||
19 | #include <xen/interface/xen.h> | |
20 | ||
21 | #define RELOC(x, v) .globl x##_reloc; x##_reloc=v | |
22 | #define ENDPATCH(x) .globl x##_end; x##_end=. | |
23 | ||
24 | /* Pseudo-flag used for virtual NMI, which we don't implement yet */ | |
25 | #define XEN_EFLAGS_NMI 0x80000000 | |
26 | ||
27 | #if 0 | |
28 | #include <asm/percpu.h> | |
29 | ||
30 | /* | |
31 | Enable events. This clears the event mask and tests the pending | |
32 | event status with one and operation. If there are pending | |
33 | events, then enter the hypervisor to get them handled. | |
34 | */ | |
35 | ENTRY(xen_irq_enable_direct) | |
36 | /* Unmask events */ | |
37 | movb $0, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask) | |
38 | ||
39 | /* Preempt here doesn't matter because that will deal with | |
40 | any pending interrupts. The pending check may end up being | |
41 | run on the wrong CPU, but that doesn't hurt. */ | |
42 | ||
43 | /* Test for pending */ | |
44 | testb $0xff, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_pending) | |
45 | jz 1f | |
46 | ||
47 | 2: call check_events | |
48 | 1: | |
49 | ENDPATCH(xen_irq_enable_direct) | |
50 | ret | |
51 | ENDPROC(xen_irq_enable_direct) | |
52 | RELOC(xen_irq_enable_direct, 2b+1) | |
53 | ||
54 | /* | |
55 | Disabling events is simply a matter of making the event mask | |
56 | non-zero. | |
57 | */ | |
58 | ENTRY(xen_irq_disable_direct) | |
59 | movb $1, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask) | |
60 | ENDPATCH(xen_irq_disable_direct) | |
61 | ret | |
62 | ENDPROC(xen_irq_disable_direct) | |
63 | RELOC(xen_irq_disable_direct, 0) | |
64 | ||
65 | /* | |
66 | (xen_)save_fl is used to get the current interrupt enable status. | |
67 | Callers expect the status to be in X86_EFLAGS_IF, and other bits | |
68 | may be set in the return value. We take advantage of this by | |
69 | making sure that X86_EFLAGS_IF has the right value (and other bits | |
70 | in that byte are 0), but other bits in the return value are | |
71 | undefined. We need to toggle the state of the bit, because | |
72 | Xen and x86 use opposite senses (mask vs enable). | |
73 | */ | |
74 | ENTRY(xen_save_fl_direct) | |
75 | testb $0xff, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask) | |
76 | setz %ah | |
77 | addb %ah,%ah | |
78 | ENDPATCH(xen_save_fl_direct) | |
79 | ret | |
80 | ENDPROC(xen_save_fl_direct) | |
81 | RELOC(xen_save_fl_direct, 0) | |
82 | ||
83 | /* | |
84 | In principle the caller should be passing us a value return | |
85 | from xen_save_fl_direct, but for robustness sake we test only | |
86 | the X86_EFLAGS_IF flag rather than the whole byte. After | |
87 | setting the interrupt mask state, it checks for unmasked | |
88 | pending events and enters the hypervisor to get them delivered | |
89 | if so. | |
90 | */ | |
91 | ENTRY(xen_restore_fl_direct) | |
92 | testb $X86_EFLAGS_IF>>8, %ah | |
93 | setz PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask) | |
94 | /* Preempt here doesn't matter because that will deal with | |
95 | any pending interrupts. The pending check may end up being | |
96 | run on the wrong CPU, but that doesn't hurt. */ | |
97 | ||
98 | /* check for unmasked and pending */ | |
99 | cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_pending) | |
100 | jz 1f | |
101 | 2: call check_events | |
102 | 1: | |
103 | ENDPATCH(xen_restore_fl_direct) | |
104 | ret | |
105 | ENDPROC(xen_restore_fl_direct) | |
106 | RELOC(xen_restore_fl_direct, 2b+1) | |
107 | ||
108 | ||
109 | /* | |
110 | Force an event check by making a hypercall, | |
111 | but preserve regs before making the call. | |
112 | */ | |
113 | check_events: | |
114 | push %rax | |
115 | push %rcx | |
116 | push %rdx | |
117 | push %rsi | |
118 | push %rdi | |
119 | push %r8 | |
120 | push %r9 | |
121 | push %r10 | |
122 | push %r11 | |
123 | call force_evtchn_callback | |
124 | pop %r11 | |
125 | pop %r10 | |
126 | pop %r9 | |
127 | pop %r8 | |
128 | pop %rdi | |
129 | pop %rsi | |
130 | pop %rdx | |
131 | pop %rcx | |
132 | pop %rax | |
133 | ret | |
134 | #endif | |
135 | ||
136 | ENTRY(xen_iret) | |
137 | pushq $0 | |
138 | jmp hypercall_page + __HYPERVISOR_iret * 32 | |
139 | ||
140 | ENTRY(xen_sysexit) | |
141 | ud2a |