| 1 | #include <linux/linkage.h> |
| 2 | #include <linux/lguest.h> |
| 3 | #include <asm/lguest_hcall.h> |
| 4 | #include <asm/asm-offsets.h> |
| 5 | #include <asm/thread_info.h> |
| 6 | #include <asm/processor-flags.h> |
| 7 | |
| 8 | /*G:020 |
| 9 | |
| 10 | * Our story starts with the bzImage: booting starts at startup_32 in |
| 11 | * arch/x86/boot/compressed/head_32.S. This merely uncompresses the real |
| 12 | * kernel in place and then jumps into it: startup_32 in |
| 13 | * arch/x86/kernel/head_32.S. Both routines expects a boot header in the %esi |
| 14 | * register, which is created by the bootloader (the Launcher in our case). |
| 15 | * |
| 16 | * The startup_32 function does very little: it clears the uninitialized global |
| 17 | * C variables which we expect to be zero (ie. BSS) and then copies the boot |
| 18 | * header and kernel command line somewhere safe, and populates some initial |
| 19 | * page tables. Finally it checks the 'hardware_subarch' field. This was |
| 20 | * introduced in 2.6.24 for lguest and Xen: if it's set to '1' (lguest's |
| 21 | * assigned number), then it calls us here. |
| 22 | * |
| 23 | * WARNING: be very careful here! We're running at addresses equal to physical |
| 24 | * addresses (around 0), not above PAGE_OFFSET as most code expects |
| 25 | * (eg. 0xC0000000). Jumps are relative, so they're OK, but we can't touch any |
| 26 | * data without remembering to subtract __PAGE_OFFSET! |
| 27 | * |
| 28 | * The .section line puts this code in .init.text so it will be discarded after |
| 29 | * boot. |
| 30 | */ |
| 31 | .section .init.text, "ax", @progbits |
| 32 | ENTRY(lguest_entry) |
| 33 | /* |
| 34 | * We make the "initialization" hypercall now to tell the Host where |
| 35 | * our lguest_data struct is. |
| 36 | */ |
| 37 | movl $LHCALL_LGUEST_INIT, %eax |
| 38 | movl $lguest_data - __PAGE_OFFSET, %ebx |
| 39 | int $LGUEST_TRAP_ENTRY |
| 40 | |
| 41 | /* Now turn our pagetables on; setup by arch/x86/kernel/head_32.S. */ |
| 42 | movl $LHCALL_NEW_PGTABLE, %eax |
| 43 | movl $(initial_page_table - __PAGE_OFFSET), %ebx |
| 44 | int $LGUEST_TRAP_ENTRY |
| 45 | |
| 46 | /* Set up the initial stack so we can run C code. */ |
| 47 | movl $(init_thread_union+THREAD_SIZE),%esp |
| 48 | |
| 49 | /* Jumps are relative: we're running __PAGE_OFFSET too low. */ |
| 50 | jmp lguest_init+__PAGE_OFFSET |
| 51 | |
| 52 | /*G:055 |
| 53 | * We create a macro which puts the assembler code between lgstart_ and lgend_ |
| 54 | * markers. These templates are put in the .text section: they can't be |
| 55 | * discarded after boot as we may need to patch modules, too. |
| 56 | */ |
| 57 | .text |
| 58 | #define LGUEST_PATCH(name, insns...) \ |
| 59 | lgstart_##name: insns; lgend_##name:; \ |
| 60 | .globl lgstart_##name; .globl lgend_##name |
| 61 | |
| 62 | LGUEST_PATCH(cli, movl $0, lguest_data+LGUEST_DATA_irq_enabled) |
| 63 | LGUEST_PATCH(pushf, movl lguest_data+LGUEST_DATA_irq_enabled, %eax) |
| 64 | |
| 65 | /*G:033 |
| 66 | * But using those wrappers is inefficient (we'll see why that doesn't matter |
| 67 | * for save_fl and irq_disable later). If we write our routines carefully in |
| 68 | * assembler, we can avoid clobbering any registers and avoid jumping through |
| 69 | * the wrapper functions. |
| 70 | * |
| 71 | * I skipped over our first piece of assembler, but this one is worth studying |
| 72 | * in a bit more detail so I'll describe in easy stages. First, the routine to |
| 73 | * enable interrupts: |
| 74 | */ |
| 75 | ENTRY(lg_irq_enable) |
| 76 | /* |
| 77 | * The reverse of irq_disable, this sets lguest_data.irq_enabled to |
| 78 | * X86_EFLAGS_IF (ie. "Interrupts enabled"). |
| 79 | */ |
| 80 | movl $X86_EFLAGS_IF, lguest_data+LGUEST_DATA_irq_enabled |
| 81 | /* |
| 82 | * But now we need to check if the Host wants to know: there might have |
| 83 | * been interrupts waiting to be delivered, in which case it will have |
| 84 | * set lguest_data.irq_pending to X86_EFLAGS_IF. If it's not zero, we |
| 85 | * jump to send_interrupts, otherwise we're done. |
| 86 | */ |
| 87 | cmpl $0, lguest_data+LGUEST_DATA_irq_pending |
| 88 | jnz send_interrupts |
| 89 | /* |
| 90 | * One cool thing about x86 is that you can do many things without using |
| 91 | * a register. In this case, the normal path hasn't needed to save or |
| 92 | * restore any registers at all! |
| 93 | */ |
| 94 | ret |
| 95 | send_interrupts: |
| 96 | /* |
| 97 | * OK, now we need a register: eax is used for the hypercall number, |
| 98 | * which is LHCALL_SEND_INTERRUPTS. |
| 99 | * |
| 100 | * We used not to bother with this pending detection at all, which was |
| 101 | * much simpler. Sooner or later the Host would realize it had to |
| 102 | * send us an interrupt. But that turns out to make performance 7 |
| 103 | * times worse on a simple tcp benchmark. So now we do this the hard |
| 104 | * way. |
| 105 | */ |
| 106 | pushl %eax |
| 107 | movl $LHCALL_SEND_INTERRUPTS, %eax |
| 108 | /* This is the actual hypercall trap. */ |
| 109 | int $LGUEST_TRAP_ENTRY |
| 110 | /* Put eax back the way we found it. */ |
| 111 | popl %eax |
| 112 | ret |
| 113 | |
| 114 | /* |
| 115 | * Finally, the "popf" or "restore flags" routine. The %eax register holds the |
| 116 | * flags (in practice, either X86_EFLAGS_IF or 0): if it's X86_EFLAGS_IF we're |
| 117 | * enabling interrupts again, if it's 0 we're leaving them off. |
| 118 | */ |
| 119 | ENTRY(lg_restore_fl) |
| 120 | /* This is just "lguest_data.irq_enabled = flags;" */ |
| 121 | movl %eax, lguest_data+LGUEST_DATA_irq_enabled |
| 122 | /* |
| 123 | * Now, if the %eax value has enabled interrupts and |
| 124 | * lguest_data.irq_pending is set, we want to tell the Host so it can |
| 125 | * deliver any outstanding interrupts. Fortunately, both values will |
| 126 | * be X86_EFLAGS_IF (ie. 512) in that case, and the "testl" |
| 127 | * instruction will AND them together for us. If both are set, we |
| 128 | * jump to send_interrupts. |
| 129 | */ |
| 130 | testl lguest_data+LGUEST_DATA_irq_pending, %eax |
| 131 | jnz send_interrupts |
| 132 | /* Again, the normal path has used no extra registers. Clever, huh? */ |
| 133 | ret |
| 134 | /*:*/ |
| 135 | |
| 136 | /* These demark the EIP where host should never deliver interrupts. */ |
| 137 | .global lguest_noirq_iret |
| 138 | |
| 139 | /*M:004 |
| 140 | * When the Host reflects a trap or injects an interrupt into the Guest, it |
| 141 | * sets the eflags interrupt bit on the stack based on lguest_data.irq_enabled, |
| 142 | * so the Guest iret logic does the right thing when restoring it. However, |
| 143 | * when the Host sets the Guest up for direct traps, such as system calls, the |
| 144 | * processor is the one to push eflags onto the stack, and the interrupt bit |
| 145 | * will be 1 (in reality, interrupts are always enabled in the Guest). |
| 146 | * |
| 147 | * This turns out to be harmless: the only trap which should happen under Linux |
| 148 | * with interrupts disabled is Page Fault (due to our lazy mapping of vmalloc |
| 149 | * regions), which has to be reflected through the Host anyway. If another |
| 150 | * trap *does* go off when interrupts are disabled, the Guest will panic, and |
| 151 | * we'll never get to this iret! |
| 152 | :*/ |
| 153 | |
| 154 | /*G:045 |
| 155 | * There is one final paravirt_op that the Guest implements, and glancing at it |
| 156 | * you can see why I left it to last. It's *cool*! It's in *assembler*! |
| 157 | * |
| 158 | * The "iret" instruction is used to return from an interrupt or trap. The |
| 159 | * stack looks like this: |
| 160 | * old address |
| 161 | * old code segment & privilege level |
| 162 | * old processor flags ("eflags") |
| 163 | * |
| 164 | * The "iret" instruction pops those values off the stack and restores them all |
| 165 | * at once. The only problem is that eflags includes the Interrupt Flag which |
| 166 | * the Guest can't change: the CPU will simply ignore it when we do an "iret". |
| 167 | * So we have to copy eflags from the stack to lguest_data.irq_enabled before |
| 168 | * we do the "iret". |
| 169 | * |
| 170 | * There are two problems with this: firstly, we can't clobber any registers |
| 171 | * and secondly, the whole thing needs to be atomic. The first problem |
| 172 | * is solved by using "push memory"/"pop memory" instruction pair for copying. |
| 173 | * |
| 174 | * The second is harder: copying eflags to lguest_data.irq_enabled will turn |
| 175 | * interrupts on before we're finished, so we could be interrupted before we |
| 176 | * return to userspace or wherever. Our solution to this is to tell the |
| 177 | * Host that it is *never* to interrupt us there, even if interrupts seem to be |
| 178 | * enabled. (It's not necessary to protect pop instruction, since |
| 179 | * data gets updated only after it completes, so we only need to protect |
| 180 | * one instruction, iret). |
| 181 | */ |
| 182 | ENTRY(lguest_iret) |
| 183 | pushl 2*4(%esp) |
| 184 | /* |
| 185 | * Note the %ss: segment prefix here. Normal data accesses use the |
| 186 | * "ds" segment, but that will have already been restored for whatever |
| 187 | * we're returning to (such as userspace): we can't trust it. The %ss: |
| 188 | * prefix makes sure we use the stack segment, which is still valid. |
| 189 | */ |
| 190 | popl %ss:lguest_data+LGUEST_DATA_irq_enabled |
| 191 | lguest_noirq_iret: |
| 192 | iret |