virtio: refactor find_vqs
[deliverable/linux.git] / arch / x86 / lguest / i386_head.S
CommitLineData
07ad157f
RR
1#include <linux/linkage.h>
2#include <linux/lguest.h>
47436aa4 3#include <asm/lguest_hcall.h>
07ad157f
RR
4#include <asm/asm-offsets.h>
5#include <asm/thread_info.h>
876be9d8 6#include <asm/processor-flags.h>
07ad157f 7
a6bd8e13
RR
8/*G:020 Our story starts with the kernel booting into startup_32 in
9 * arch/x86/kernel/head_32.S. It expects a boot header, which is created by
10 * the bootloader (the Launcher in our case).
11 *
12 * The startup_32 function does very little: it clears the uninitialized global
13 * C variables which we expect to be zero (ie. BSS) and then copies the boot
14 * header and kernel command line somewhere safe. Finally it checks the
15 * 'hardware_subarch' field. This was introduced in 2.6.24 for lguest and Xen:
16 * if it's set to '1' (lguest's assigned number), then it calls us here.
47436aa4
RR
17 *
18 * WARNING: be very careful here! We're running at addresses equal to physical
19 * addesses (around 0), not above PAGE_OFFSET as most code expectes
20 * (eg. 0xC0000000). Jumps are relative, so they're OK, but we can't touch any
a6bd8e13 21 * data without remembering to subtract __PAGE_OFFSET!
07ad157f 22 *
b2b47c21
RR
23 * The .section line puts this code in .init.text so it will be discarded after
24 * boot. */
07ad157f 25.section .init.text, "ax", @progbits
814a0e5c 26ENTRY(lguest_entry)
e1e72965
RR
27 /* We make the "initialization" hypercall now to tell the Host about
28 * us, and also find out where it put our page tables. */
47436aa4 29 movl $LHCALL_LGUEST_INIT, %eax
4cd8b5e2
MZ
30 movl $lguest_data - __PAGE_OFFSET, %ebx
31 .byte 0x0f,0x01,0xc1 /* KVM_HYPERCALL */
47436aa4 32
47436aa4
RR
33 /* Set up the initial stack so we can run C code. */
34 movl $(init_thread_union+THREAD_SIZE),%esp
35
47436aa4
RR
36 /* Jumps are relative, and we're running __PAGE_OFFSET too low at the
37 * moment. */
38 jmp lguest_init+__PAGE_OFFSET
07ad157f 39
b2b47c21 40/*G:055 We create a macro which puts the assembler code between lgstart_ and
bbbd2bf0
RR
41 * lgend_ markers. These templates are put in the .text section: they can't be
42 * discarded after boot as we may need to patch modules, too. */
43.text
07ad157f
RR
44#define LGUEST_PATCH(name, insns...) \
45 lgstart_##name: insns; lgend_##name:; \
46 .globl lgstart_##name; .globl lgend_##name
47
48LGUEST_PATCH(cli, movl $0, lguest_data+LGUEST_DATA_irq_enabled)
07ad157f 49LGUEST_PATCH(pushf, movl lguest_data+LGUEST_DATA_irq_enabled, %eax)
61f4bc83
RR
50
51/*G:033 But using those wrappers is inefficient (we'll see why that doesn't
52 * matter for save_fl and irq_disable later). If we write our routines
53 * carefully in assembler, we can avoid clobbering any registers and avoid
54 * jumping through the wrapper functions.
55 *
56 * I skipped over our first piece of assembler, but this one is worth studying
57 * in a bit more detail so I'll describe in easy stages. First, the routine
58 * to enable interrupts: */
59ENTRY(lg_irq_enable)
60 /* The reverse of irq_disable, this sets lguest_data.irq_enabled to
61 * X86_EFLAGS_IF (ie. "Interrupts enabled"). */
62 movl $X86_EFLAGS_IF, lguest_data+LGUEST_DATA_irq_enabled
63 /* But now we need to check if the Host wants to know: there might have
64 * been interrupts waiting to be delivered, in which case it will have
65 * set lguest_data.irq_pending to X86_EFLAGS_IF. If it's not zero, we
66 * jump to send_interrupts, otherwise we're done. */
67 testl $0, lguest_data+LGUEST_DATA_irq_pending
68 jnz send_interrupts
69 /* One cool thing about x86 is that you can do many things without using
70 * a register. In this case, the normal path hasn't needed to save or
71 * restore any registers at all! */
72 ret
73send_interrupts:
74 /* OK, now we need a register: eax is used for the hypercall number,
75 * which is LHCALL_SEND_INTERRUPTS.
76 *
77 * We used not to bother with this pending detection at all, which was
78 * much simpler. Sooner or later the Host would realize it had to
79 * send us an interrupt. But that turns out to make performance 7
80 * times worse on a simple tcp benchmark. So now we do this the hard
81 * way. */
82 pushl %eax
83 movl $LHCALL_SEND_INTERRUPTS, %eax
84 /* This is a vmcall instruction (same thing that KVM uses). Older
85 * assembler versions might not know the "vmcall" instruction, so we
86 * create one manually here. */
87 .byte 0x0f,0x01,0xc1 /* KVM_HYPERCALL */
88 popl %eax
89 ret
90
91/* Finally, the "popf" or "restore flags" routine. The %eax register holds the
92 * flags (in practice, either X86_EFLAGS_IF or 0): if it's X86_EFLAGS_IF we're
93 * enabling interrupts again, if it's 0 we're leaving them off. */
94ENTRY(lg_restore_fl)
95 /* This is just "lguest_data.irq_enabled = flags;" */
96 movl %eax, lguest_data+LGUEST_DATA_irq_enabled
97 /* Now, if the %eax value has enabled interrupts and
98 * lguest_data.irq_pending is set, we want to tell the Host so it can
99 * deliver any outstanding interrupts. Fortunately, both values will
100 * be X86_EFLAGS_IF (ie. 512) in that case, and the "testl"
101 * instruction will AND them together for us. If both are set, we
102 * jump to send_interrupts. */
103 testl lguest_data+LGUEST_DATA_irq_pending, %eax
104 jnz send_interrupts
105 /* Again, the normal path has used no extra registers. Clever, huh? */
106 ret
07ad157f 107
07ad157f
RR
108/* These demark the EIP range where host should never deliver interrupts. */
109.global lguest_noirq_start
110.global lguest_noirq_end
111
f56a384e
RR
112/*M:004 When the Host reflects a trap or injects an interrupt into the Guest,
113 * it sets the eflags interrupt bit on the stack based on
114 * lguest_data.irq_enabled, so the Guest iret logic does the right thing when
115 * restoring it. However, when the Host sets the Guest up for direct traps,
116 * such as system calls, the processor is the one to push eflags onto the
117 * stack, and the interrupt bit will be 1 (in reality, interrupts are always
118 * enabled in the Guest).
119 *
120 * This turns out to be harmless: the only trap which should happen under Linux
121 * with interrupts disabled is Page Fault (due to our lazy mapping of vmalloc
122 * regions), which has to be reflected through the Host anyway. If another
123 * trap *does* go off when interrupts are disabled, the Guest will panic, and
124 * we'll never get to this iret! :*/
125
b2b47c21
RR
126/*G:045 There is one final paravirt_op that the Guest implements, and glancing
127 * at it you can see why I left it to last. It's *cool*! It's in *assembler*!
128 *
129 * The "iret" instruction is used to return from an interrupt or trap. The
130 * stack looks like this:
131 * old address
132 * old code segment & privilege level
133 * old processor flags ("eflags")
134 *
135 * The "iret" instruction pops those values off the stack and restores them all
136 * at once. The only problem is that eflags includes the Interrupt Flag which
137 * the Guest can't change: the CPU will simply ignore it when we do an "iret".
138 * So we have to copy eflags from the stack to lguest_data.irq_enabled before
139 * we do the "iret".
140 *
141 * There are two problems with this: firstly, we need to use a register to do
142 * the copy and secondly, the whole thing needs to be atomic. The first
143 * problem is easy to solve: push %eax on the stack so we can use it, and then
144 * restore it at the end just before the real "iret".
145 *
146 * The second is harder: copying eflags to lguest_data.irq_enabled will turn
147 * interrupts on before we're finished, so we could be interrupted before we
148 * return to userspace or wherever. Our solution to this is to surround the
149 * code with lguest_noirq_start: and lguest_noirq_end: labels. We tell the
150 * Host that it is *never* to interrupt us there, even if interrupts seem to be
151 * enabled. */
07ad157f
RR
152ENTRY(lguest_iret)
153 pushl %eax
154 movl 12(%esp), %eax
155lguest_noirq_start:
b2b47c21
RR
156 /* Note the %ss: segment prefix here. Normal data accesses use the
157 * "ds" segment, but that will have already been restored for whatever
158 * we're returning to (such as userspace): we can't trust it. The %ss:
159 * prefix makes sure we use the stack segment, which is still valid. */
07ad157f
RR
160 movl %eax,%ss:lguest_data+LGUEST_DATA_irq_enabled
161 popl %eax
162 iret
163lguest_noirq_end:
This page took 0.2237 seconds and 5 git commands to generate.