Commit | Line | Data |
---|---|---|
f938d2c8 RR |
1 | /*P:800 Interrupts (traps) are complicated enough to earn their own file. |
2 | * There are three classes of interrupts: | |
3 | * | |
4 | * 1) Real hardware interrupts which occur while we're running the Guest, | |
5 | * 2) Interrupts for virtual devices attached to the Guest, and | |
6 | * 3) Traps and faults from the Guest. | |
7 | * | |
8 | * Real hardware interrupts must be delivered to the Host, not the Guest. | |
9 | * Virtual interrupts must be delivered to the Guest, but we make them look | |
10 | * just like real hardware would deliver them. Traps from the Guest can be set | |
11 | * up to go directly back into the Guest, but sometimes the Host wants to see | |
12 | * them first, so we also have a way of "reflecting" them into the Guest as if | |
13 | * they had been delivered to it directly. :*/ | |
d7e28ffe | 14 | #include <linux/uaccess.h> |
c18acd73 RR |
15 | #include <linux/interrupt.h> |
16 | #include <linux/module.h> | |
d7e28ffe RR |
17 | #include "lg.h" |
18 | ||
c18acd73 RR |
19 | /* Allow Guests to use a non-128 (ie. non-Linux) syscall trap. */ |
20 | static unsigned int syscall_vector = SYSCALL_VECTOR; | |
21 | module_param(syscall_vector, uint, 0444); | |
22 | ||
bff672e6 | 23 | /* The address of the interrupt handler is split into two bits: */ |
d7e28ffe RR |
24 | static unsigned long idt_address(u32 lo, u32 hi) |
25 | { | |
26 | return (lo & 0x0000FFFF) | (hi & 0xFFFF0000); | |
27 | } | |
28 | ||
bff672e6 RR |
29 | /* The "type" of the interrupt handler is a 4 bit field: we only support a |
30 | * couple of types. */ | |
d7e28ffe RR |
31 | static int idt_type(u32 lo, u32 hi) |
32 | { | |
33 | return (hi >> 8) & 0xF; | |
34 | } | |
35 | ||
bff672e6 | 36 | /* An IDT entry can't be used unless the "present" bit is set. */ |
df1693ab | 37 | static bool idt_present(u32 lo, u32 hi) |
d7e28ffe RR |
38 | { |
39 | return (hi & 0x8000); | |
40 | } | |
41 | ||
bff672e6 RR |
42 | /* We need a helper to "push" a value onto the Guest's stack, since that's a |
43 | * big part of what delivering an interrupt does. */ | |
382ac6b3 | 44 | static void push_guest_stack(struct lg_cpu *cpu, unsigned long *gstack, u32 val) |
d7e28ffe | 45 | { |
bff672e6 | 46 | /* Stack grows upwards: move stack then write value. */ |
d7e28ffe | 47 | *gstack -= 4; |
382ac6b3 | 48 | lgwrite(cpu, *gstack, u32, val); |
d7e28ffe RR |
49 | } |
50 | ||
bff672e6 RR |
51 | /*H:210 The set_guest_interrupt() routine actually delivers the interrupt or |
52 | * trap. The mechanics of delivering traps and interrupts to the Guest are the | |
53 | * same, except some traps have an "error code" which gets pushed onto the | |
54 | * stack as well: the caller tells us if this is one. | |
55 | * | |
56 | * "lo" and "hi" are the two parts of the Interrupt Descriptor Table for this | |
57 | * interrupt or trap. It's split into two parts for traditional reasons: gcc | |
58 | * on i386 used to be frightened by 64 bit numbers. | |
59 | * | |
60 | * We set up the stack just like the CPU does for a real interrupt, so it's | |
61 | * identical for the Guest (and the standard "iret" instruction will undo | |
62 | * it). */ | |
df1693ab MZ |
63 | static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi, |
64 | bool has_err) | |
d7e28ffe | 65 | { |
47436aa4 | 66 | unsigned long gstack, origstack; |
d7e28ffe | 67 | u32 eflags, ss, irq_enable; |
47436aa4 | 68 | unsigned long virtstack; |
d7e28ffe | 69 | |
bff672e6 RR |
70 | /* There are two cases for interrupts: one where the Guest is already |
71 | * in the kernel, and a more complex one where the Guest is in | |
72 | * userspace. We check the privilege level to find out. */ | |
a53a35a8 | 73 | if ((cpu->regs->ss&0x3) != GUEST_PL) { |
bff672e6 RR |
74 | /* The Guest told us their kernel stack with the SET_STACK |
75 | * hypercall: both the virtual address and the segment */ | |
4665ac8e GOC |
76 | virtstack = cpu->esp1; |
77 | ss = cpu->ss1; | |
47436aa4 | 78 | |
1713608f | 79 | origstack = gstack = guest_pa(cpu, virtstack); |
bff672e6 RR |
80 | /* We push the old stack segment and pointer onto the new |
81 | * stack: when the Guest does an "iret" back from the interrupt | |
82 | * handler the CPU will notice they're dropping privilege | |
83 | * levels and expect these here. */ | |
382ac6b3 GOC |
84 | push_guest_stack(cpu, &gstack, cpu->regs->ss); |
85 | push_guest_stack(cpu, &gstack, cpu->regs->esp); | |
d7e28ffe | 86 | } else { |
bff672e6 | 87 | /* We're staying on the same Guest (kernel) stack. */ |
a53a35a8 GOC |
88 | virtstack = cpu->regs->esp; |
89 | ss = cpu->regs->ss; | |
47436aa4 | 90 | |
1713608f | 91 | origstack = gstack = guest_pa(cpu, virtstack); |
d7e28ffe RR |
92 | } |
93 | ||
bff672e6 RR |
94 | /* Remember that we never let the Guest actually disable interrupts, so |
95 | * the "Interrupt Flag" bit is always set. We copy that bit from the | |
e1e72965 RR |
96 | * Guest's "irq_enabled" field into the eflags word: we saw the Guest |
97 | * copy it back in "lguest_iret". */ | |
a53a35a8 | 98 | eflags = cpu->regs->eflags; |
382ac6b3 | 99 | if (get_user(irq_enable, &cpu->lg->lguest_data->irq_enabled) == 0 |
e5faff45 RR |
100 | && !(irq_enable & X86_EFLAGS_IF)) |
101 | eflags &= ~X86_EFLAGS_IF; | |
d7e28ffe | 102 | |
bff672e6 RR |
103 | /* An interrupt is expected to push three things on the stack: the old |
104 | * "eflags" word, the old code segment, and the old instruction | |
105 | * pointer. */ | |
382ac6b3 GOC |
106 | push_guest_stack(cpu, &gstack, eflags); |
107 | push_guest_stack(cpu, &gstack, cpu->regs->cs); | |
108 | push_guest_stack(cpu, &gstack, cpu->regs->eip); | |
d7e28ffe | 109 | |
bff672e6 | 110 | /* For the six traps which supply an error code, we push that, too. */ |
d7e28ffe | 111 | if (has_err) |
382ac6b3 | 112 | push_guest_stack(cpu, &gstack, cpu->regs->errcode); |
d7e28ffe | 113 | |
bff672e6 RR |
114 | /* Now we've pushed all the old state, we change the stack, the code |
115 | * segment and the address to execute. */ | |
a53a35a8 GOC |
116 | cpu->regs->ss = ss; |
117 | cpu->regs->esp = virtstack + (gstack - origstack); | |
118 | cpu->regs->cs = (__KERNEL_CS|GUEST_PL); | |
119 | cpu->regs->eip = idt_address(lo, hi); | |
d7e28ffe | 120 | |
bff672e6 RR |
121 | /* There are two kinds of interrupt handlers: 0xE is an "interrupt |
122 | * gate" which expects interrupts to be disabled on entry. */ | |
d7e28ffe | 123 | if (idt_type(lo, hi) == 0xE) |
382ac6b3 GOC |
124 | if (put_user(0, &cpu->lg->lguest_data->irq_enabled)) |
125 | kill_guest(cpu, "Disabling interrupts"); | |
d7e28ffe RR |
126 | } |
127 | ||
e1e72965 | 128 | /*H:205 |
bff672e6 RR |
129 | * Virtual Interrupts. |
130 | * | |
abd41f03 RR |
131 | * interrupt_pending() returns the first pending interrupt which isn't blocked |
132 | * by the Guest. It is called before every entry to the Guest, and just before | |
133 | * we go to sleep when the Guest has halted itself. */ | |
a32a8813 | 134 | unsigned int interrupt_pending(struct lg_cpu *cpu, bool *more) |
d7e28ffe RR |
135 | { |
136 | unsigned int irq; | |
137 | DECLARE_BITMAP(blk, LGUEST_IRQS); | |
d7e28ffe | 138 | |
bff672e6 | 139 | /* If the Guest hasn't even initialized yet, we can do nothing. */ |
382ac6b3 | 140 | if (!cpu->lg->lguest_data) |
abd41f03 | 141 | return LGUEST_IRQS; |
d7e28ffe | 142 | |
bff672e6 RR |
143 | /* Take our "irqs_pending" array and remove any interrupts the Guest |
144 | * wants blocked: the result ends up in "blk". */ | |
382ac6b3 | 145 | if (copy_from_user(&blk, cpu->lg->lguest_data->blocked_interrupts, |
d7e28ffe | 146 | sizeof(blk))) |
abd41f03 | 147 | return LGUEST_IRQS; |
177e449d | 148 | bitmap_andnot(blk, cpu->irqs_pending, blk, LGUEST_IRQS); |
d7e28ffe | 149 | |
bff672e6 | 150 | /* Find the first interrupt. */ |
d7e28ffe | 151 | irq = find_first_bit(blk, LGUEST_IRQS); |
a32a8813 | 152 | *more = find_next_bit(blk, LGUEST_IRQS, irq+1); |
abd41f03 RR |
153 | |
154 | return irq; | |
155 | } | |
156 | ||
157 | /* This actually diverts the Guest to running an interrupt handler, once an | |
158 | * interrupt has been identified by interrupt_pending(). */ | |
a32a8813 | 159 | void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq, bool more) |
abd41f03 RR |
160 | { |
161 | struct desc_struct *idt; | |
162 | ||
163 | BUG_ON(irq >= LGUEST_IRQS); | |
d7e28ffe | 164 | |
bff672e6 RR |
165 | /* They may be in the middle of an iret, where they asked us never to |
166 | * deliver interrupts. */ | |
382ac6b3 GOC |
167 | if (cpu->regs->eip >= cpu->lg->noirq_start && |
168 | (cpu->regs->eip < cpu->lg->noirq_end)) | |
d7e28ffe RR |
169 | return; |
170 | ||
bff672e6 | 171 | /* If they're halted, interrupts restart them. */ |
66686c2a | 172 | if (cpu->halted) { |
d7e28ffe | 173 | /* Re-enable interrupts. */ |
382ac6b3 GOC |
174 | if (put_user(X86_EFLAGS_IF, &cpu->lg->lguest_data->irq_enabled)) |
175 | kill_guest(cpu, "Re-enabling interrupts"); | |
66686c2a | 176 | cpu->halted = 0; |
d7e28ffe | 177 | } else { |
bff672e6 | 178 | /* Otherwise we check if they have interrupts disabled. */ |
d7e28ffe | 179 | u32 irq_enabled; |
382ac6b3 | 180 | if (get_user(irq_enabled, &cpu->lg->lguest_data->irq_enabled)) |
d7e28ffe | 181 | irq_enabled = 0; |
a32a8813 RR |
182 | if (!irq_enabled) { |
183 | /* Make sure they know an IRQ is pending. */ | |
184 | put_user(X86_EFLAGS_IF, | |
185 | &cpu->lg->lguest_data->irq_pending); | |
d7e28ffe | 186 | return; |
a32a8813 | 187 | } |
d7e28ffe RR |
188 | } |
189 | ||
bff672e6 RR |
190 | /* Look at the IDT entry the Guest gave us for this interrupt. The |
191 | * first 32 (FIRST_EXTERNAL_VECTOR) entries are for traps, so we skip | |
192 | * over them. */ | |
fc708b3e | 193 | idt = &cpu->arch.idt[FIRST_EXTERNAL_VECTOR+irq]; |
bff672e6 | 194 | /* If they don't have a handler (yet?), we just ignore it */ |
d7e28ffe | 195 | if (idt_present(idt->a, idt->b)) { |
bff672e6 | 196 | /* OK, mark it no longer pending and deliver it. */ |
177e449d | 197 | clear_bit(irq, cpu->irqs_pending); |
bff672e6 RR |
198 | /* set_guest_interrupt() takes the interrupt descriptor and a |
199 | * flag to say whether this interrupt pushes an error code onto | |
200 | * the stack as well: virtual interrupts never do. */ | |
df1693ab | 201 | set_guest_interrupt(cpu, idt->a, idt->b, false); |
d7e28ffe | 202 | } |
6c8dca5d RR |
203 | |
204 | /* Every time we deliver an interrupt, we update the timestamp in the | |
205 | * Guest's lguest_data struct. It would be better for the Guest if we | |
206 | * did this more often, but it can actually be quite slow: doing it | |
207 | * here is a compromise which means at least it gets updated every | |
208 | * timer interrupt. */ | |
382ac6b3 | 209 | write_timestamp(cpu); |
a32a8813 RR |
210 | |
211 | /* If there are no other interrupts we want to deliver, clear | |
212 | * the pending flag. */ | |
213 | if (!more) | |
214 | put_user(0, &cpu->lg->lguest_data->irq_pending); | |
d7e28ffe | 215 | } |
c18acd73 RR |
216 | /*:*/ |
217 | ||
218 | /* Linux uses trap 128 for system calls. Plan9 uses 64, and Ron Minnich sent | |
219 | * me a patch, so we support that too. It'd be a big step for lguest if half | |
220 | * the Plan 9 user base were to start using it. | |
221 | * | |
222 | * Actually now I think of it, it's possible that Ron *is* half the Plan 9 | |
223 | * userbase. Oh well. */ | |
224 | static bool could_be_syscall(unsigned int num) | |
225 | { | |
226 | /* Normal Linux SYSCALL_VECTOR or reserved vector? */ | |
227 | return num == SYSCALL_VECTOR || num == syscall_vector; | |
228 | } | |
229 | ||
230 | /* The syscall vector it wants must be unused by Host. */ | |
231 | bool check_syscall_vector(struct lguest *lg) | |
232 | { | |
233 | u32 vector; | |
234 | ||
235 | if (get_user(vector, &lg->lguest_data->syscall_vec)) | |
236 | return false; | |
237 | ||
238 | return could_be_syscall(vector); | |
239 | } | |
240 | ||
241 | int init_interrupts(void) | |
242 | { | |
243 | /* If they want some strange system call vector, reserve it now */ | |
b77b881f YL |
244 | if (syscall_vector != SYSCALL_VECTOR) { |
245 | if (test_bit(syscall_vector, used_vectors) || | |
246 | vector_used_by_percpu_irq(syscall_vector)) { | |
247 | printk(KERN_ERR "lg: couldn't reserve syscall %u\n", | |
248 | syscall_vector); | |
249 | return -EBUSY; | |
250 | } | |
251 | set_bit(syscall_vector, used_vectors); | |
c18acd73 | 252 | } |
b77b881f | 253 | |
c18acd73 RR |
254 | return 0; |
255 | } | |
256 | ||
257 | void free_interrupts(void) | |
258 | { | |
259 | if (syscall_vector != SYSCALL_VECTOR) | |
260 | clear_bit(syscall_vector, used_vectors); | |
261 | } | |
d7e28ffe | 262 | |
a6bd8e13 RR |
263 | /*H:220 Now we've got the routines to deliver interrupts, delivering traps like |
264 | * page fault is easy. The only trick is that Intel decided that some traps | |
265 | * should have error codes: */ | |
df1693ab | 266 | static bool has_err(unsigned int trap) |
d7e28ffe RR |
267 | { |
268 | return (trap == 8 || (trap >= 10 && trap <= 14) || trap == 17); | |
269 | } | |
270 | ||
bff672e6 | 271 | /* deliver_trap() returns true if it could deliver the trap. */ |
df1693ab | 272 | bool deliver_trap(struct lg_cpu *cpu, unsigned int num) |
d7e28ffe | 273 | { |
0d027c01 RR |
274 | /* Trap numbers are always 8 bit, but we set an impossible trap number |
275 | * for traps inside the Switcher, so check that here. */ | |
fc708b3e | 276 | if (num >= ARRAY_SIZE(cpu->arch.idt)) |
df1693ab | 277 | return false; |
d7e28ffe | 278 | |
bff672e6 RR |
279 | /* Early on the Guest hasn't set the IDT entries (or maybe it put a |
280 | * bogus one in): if we fail here, the Guest will be killed. */ | |
fc708b3e | 281 | if (!idt_present(cpu->arch.idt[num].a, cpu->arch.idt[num].b)) |
df1693ab | 282 | return false; |
fc708b3e GOC |
283 | set_guest_interrupt(cpu, cpu->arch.idt[num].a, |
284 | cpu->arch.idt[num].b, has_err(num)); | |
df1693ab | 285 | return true; |
d7e28ffe RR |
286 | } |
287 | ||
bff672e6 RR |
288 | /*H:250 Here's the hard part: returning to the Host every time a trap happens |
289 | * and then calling deliver_trap() and re-entering the Guest is slow. | |
e1e72965 RR |
290 | * Particularly because Guest userspace system calls are traps (usually trap |
291 | * 128). | |
bff672e6 RR |
292 | * |
293 | * So we'd like to set up the IDT to tell the CPU to deliver traps directly | |
294 | * into the Guest. This is possible, but the complexities cause the size of | |
295 | * this file to double! However, 150 lines of code is worth writing for taking | |
296 | * system calls down from 1750ns to 270ns. Plus, if lguest didn't do it, all | |
e1e72965 | 297 | * the other hypervisors would beat it up at lunchtime. |
bff672e6 | 298 | * |
56adbe9d RR |
299 | * This routine indicates if a particular trap number could be delivered |
300 | * directly. */ | |
df1693ab | 301 | static bool direct_trap(unsigned int num) |
d7e28ffe | 302 | { |
bff672e6 RR |
303 | /* Hardware interrupts don't go to the Guest at all (except system |
304 | * call). */ | |
c18acd73 | 305 | if (num >= FIRST_EXTERNAL_VECTOR && !could_be_syscall(num)) |
df1693ab | 306 | return false; |
d7e28ffe | 307 | |
bff672e6 RR |
308 | /* The Host needs to see page faults (for shadow paging and to save the |
309 | * fault address), general protection faults (in/out emulation) and | |
4cd8b5e2 MZ |
310 | * device not available (TS handling), invalid opcode fault (kvm hcall), |
311 | * and of course, the hypercall trap. */ | |
312 | return num != 14 && num != 13 && num != 7 && | |
313 | num != 6 && num != LGUEST_TRAP_ENTRY; | |
d7e28ffe | 314 | } |
f56a384e RR |
315 | /*:*/ |
316 | ||
317 | /*M:005 The Guest has the ability to turn its interrupt gates into trap gates, | |
318 | * if it is careful. The Host will let trap gates can go directly to the | |
319 | * Guest, but the Guest needs the interrupts atomically disabled for an | |
320 | * interrupt gate. It can do this by pointing the trap gate at instructions | |
321 | * within noirq_start and noirq_end, where it can safely disable interrupts. */ | |
322 | ||
323 | /*M:006 The Guests do not use the sysenter (fast system call) instruction, | |
324 | * because it's hardcoded to enter privilege level 0 and so can't go direct. | |
325 | * It's about twice as fast as the older "int 0x80" system call, so it might | |
326 | * still be worthwhile to handle it in the Switcher and lcall down to the | |
327 | * Guest. The sysenter semantics are hairy tho: search for that keyword in | |
328 | * entry.S :*/ | |
d7e28ffe | 329 | |
bff672e6 RR |
330 | /*H:260 When we make traps go directly into the Guest, we need to make sure |
331 | * the kernel stack is valid (ie. mapped in the page tables). Otherwise, the | |
332 | * CPU trying to deliver the trap will fault while trying to push the interrupt | |
333 | * words on the stack: this is called a double fault, and it forces us to kill | |
334 | * the Guest. | |
335 | * | |
336 | * Which is deeply unfair, because (literally!) it wasn't the Guests' fault. */ | |
4665ac8e | 337 | void pin_stack_pages(struct lg_cpu *cpu) |
d7e28ffe RR |
338 | { |
339 | unsigned int i; | |
340 | ||
bff672e6 RR |
341 | /* Depending on the CONFIG_4KSTACKS option, the Guest can have one or |
342 | * two pages of stack space. */ | |
382ac6b3 | 343 | for (i = 0; i < cpu->lg->stack_pages; i++) |
8057d763 RR |
344 | /* The stack grows *upwards*, so the address we're given is the |
345 | * start of the page after the kernel stack. Subtract one to | |
346 | * get back onto the first stack page, and keep subtracting to | |
347 | * get to the rest of the stack pages. */ | |
1713608f | 348 | pin_page(cpu, cpu->esp1 - 1 - i * PAGE_SIZE); |
d7e28ffe RR |
349 | } |
350 | ||
bff672e6 RR |
351 | /* Direct traps also mean that we need to know whenever the Guest wants to use |
352 | * a different kernel stack, so we can change the IDT entries to use that | |
353 | * stack. The IDT entries expect a virtual address, so unlike most addresses | |
354 | * the Guest gives us, the "esp" (stack pointer) value here is virtual, not | |
355 | * physical. | |
356 | * | |
357 | * In Linux each process has its own kernel stack, so this happens a lot: we | |
358 | * change stacks on each context switch. */ | |
4665ac8e | 359 | void guest_set_stack(struct lg_cpu *cpu, u32 seg, u32 esp, unsigned int pages) |
d7e28ffe | 360 | { |
e1e72965 | 361 | /* You are not allowed have a stack segment with privilege level 0: bad |
bff672e6 | 362 | * Guest! */ |
d7e28ffe | 363 | if ((seg & 0x3) != GUEST_PL) |
382ac6b3 | 364 | kill_guest(cpu, "bad stack segment %i", seg); |
bff672e6 | 365 | /* We only expect one or two stack pages. */ |
d7e28ffe | 366 | if (pages > 2) |
382ac6b3 | 367 | kill_guest(cpu, "bad stack pages %u", pages); |
bff672e6 | 368 | /* Save where the stack is, and how many pages */ |
4665ac8e GOC |
369 | cpu->ss1 = seg; |
370 | cpu->esp1 = esp; | |
371 | cpu->lg->stack_pages = pages; | |
bff672e6 | 372 | /* Make sure the new stack pages are mapped */ |
4665ac8e | 373 | pin_stack_pages(cpu); |
d7e28ffe RR |
374 | } |
375 | ||
bff672e6 RR |
376 | /* All this reference to mapping stacks leads us neatly into the other complex |
377 | * part of the Host: page table handling. */ | |
378 | ||
379 | /*H:235 This is the routine which actually checks the Guest's IDT entry and | |
e1e72965 | 380 | * transfers it into the entry in "struct lguest": */ |
382ac6b3 | 381 | static void set_trap(struct lg_cpu *cpu, struct desc_struct *trap, |
d7e28ffe RR |
382 | unsigned int num, u32 lo, u32 hi) |
383 | { | |
384 | u8 type = idt_type(lo, hi); | |
385 | ||
bff672e6 | 386 | /* We zero-out a not-present entry */ |
d7e28ffe RR |
387 | if (!idt_present(lo, hi)) { |
388 | trap->a = trap->b = 0; | |
389 | return; | |
390 | } | |
391 | ||
bff672e6 | 392 | /* We only support interrupt and trap gates. */ |
d7e28ffe | 393 | if (type != 0xE && type != 0xF) |
382ac6b3 | 394 | kill_guest(cpu, "bad IDT type %i", type); |
d7e28ffe | 395 | |
bff672e6 RR |
396 | /* We only copy the handler address, present bit, privilege level and |
397 | * type. The privilege level controls where the trap can be triggered | |
398 | * manually with an "int" instruction. This is usually GUEST_PL, | |
399 | * except for system calls which userspace can use. */ | |
d7e28ffe RR |
400 | trap->a = ((__KERNEL_CS|GUEST_PL)<<16) | (lo&0x0000FFFF); |
401 | trap->b = (hi&0xFFFFEF00); | |
402 | } | |
403 | ||
bff672e6 RR |
404 | /*H:230 While we're here, dealing with delivering traps and interrupts to the |
405 | * Guest, we might as well complete the picture: how the Guest tells us where | |
406 | * it wants them to go. This would be simple, except making traps fast | |
407 | * requires some tricks. | |
408 | * | |
409 | * We saw the Guest setting Interrupt Descriptor Table (IDT) entries with the | |
410 | * LHCALL_LOAD_IDT_ENTRY hypercall before: that comes here. */ | |
fc708b3e | 411 | void load_guest_idt_entry(struct lg_cpu *cpu, unsigned int num, u32 lo, u32 hi) |
d7e28ffe | 412 | { |
bff672e6 RR |
413 | /* Guest never handles: NMI, doublefault, spurious interrupt or |
414 | * hypercall. We ignore when it tries to set them. */ | |
d7e28ffe RR |
415 | if (num == 2 || num == 8 || num == 15 || num == LGUEST_TRAP_ENTRY) |
416 | return; | |
417 | ||
bff672e6 RR |
418 | /* Mark the IDT as changed: next time the Guest runs we'll know we have |
419 | * to copy this again. */ | |
ae3749dc | 420 | cpu->changed |= CHANGED_IDT; |
bff672e6 | 421 | |
56adbe9d | 422 | /* Check that the Guest doesn't try to step outside the bounds. */ |
fc708b3e | 423 | if (num >= ARRAY_SIZE(cpu->arch.idt)) |
382ac6b3 | 424 | kill_guest(cpu, "Setting idt entry %u", num); |
56adbe9d | 425 | else |
382ac6b3 | 426 | set_trap(cpu, &cpu->arch.idt[num], num, lo, hi); |
d7e28ffe RR |
427 | } |
428 | ||
bff672e6 RR |
429 | /* The default entry for each interrupt points into the Switcher routines which |
430 | * simply return to the Host. The run_guest() loop will then call | |
431 | * deliver_trap() to bounce it back into the Guest. */ | |
d7e28ffe RR |
432 | static void default_idt_entry(struct desc_struct *idt, |
433 | int trap, | |
0c12091d RR |
434 | const unsigned long handler, |
435 | const struct desc_struct *base) | |
d7e28ffe | 436 | { |
bff672e6 | 437 | /* A present interrupt gate. */ |
d7e28ffe RR |
438 | u32 flags = 0x8e00; |
439 | ||
bff672e6 RR |
440 | /* Set the privilege level on the entry for the hypercall: this allows |
441 | * the Guest to use the "int" instruction to trigger it. */ | |
d7e28ffe RR |
442 | if (trap == LGUEST_TRAP_ENTRY) |
443 | flags |= (GUEST_PL << 13); | |
0c12091d RR |
444 | else if (base) |
445 | /* Copy priv. level from what Guest asked for. This allows | |
446 | * debug (int 3) traps from Guest userspace, for example. */ | |
447 | flags |= (base->b & 0x6000); | |
d7e28ffe | 448 | |
bff672e6 | 449 | /* Now pack it into the IDT entry in its weird format. */ |
d7e28ffe RR |
450 | idt->a = (LGUEST_CS<<16) | (handler&0x0000FFFF); |
451 | idt->b = (handler&0xFFFF0000) | flags; | |
452 | } | |
453 | ||
bff672e6 | 454 | /* When the Guest first starts, we put default entries into the IDT. */ |
d7e28ffe RR |
455 | void setup_default_idt_entries(struct lguest_ro_state *state, |
456 | const unsigned long *def) | |
457 | { | |
458 | unsigned int i; | |
459 | ||
460 | for (i = 0; i < ARRAY_SIZE(state->guest_idt); i++) | |
0c12091d | 461 | default_idt_entry(&state->guest_idt[i], i, def[i], NULL); |
d7e28ffe RR |
462 | } |
463 | ||
bff672e6 RR |
464 | /*H:240 We don't use the IDT entries in the "struct lguest" directly, instead |
465 | * we copy them into the IDT which we've set up for Guests on this CPU, just | |
466 | * before we run the Guest. This routine does that copy. */ | |
fc708b3e | 467 | void copy_traps(const struct lg_cpu *cpu, struct desc_struct *idt, |
d7e28ffe RR |
468 | const unsigned long *def) |
469 | { | |
470 | unsigned int i; | |
471 | ||
bff672e6 RR |
472 | /* We can simply copy the direct traps, otherwise we use the default |
473 | * ones in the Switcher: they will return to the Host. */ | |
fc708b3e | 474 | for (i = 0; i < ARRAY_SIZE(cpu->arch.idt); i++) { |
0c12091d RR |
475 | const struct desc_struct *gidt = &cpu->arch.idt[i]; |
476 | ||
56adbe9d RR |
477 | /* If no Guest can ever override this trap, leave it alone. */ |
478 | if (!direct_trap(i)) | |
479 | continue; | |
480 | ||
481 | /* Only trap gates (type 15) can go direct to the Guest. | |
482 | * Interrupt gates (type 14) disable interrupts as they are | |
483 | * entered, which we never let the Guest do. Not present | |
0c12091d RR |
484 | * entries (type 0x0) also can't go direct, of course. |
485 | * | |
486 | * If it can't go direct, we still need to copy the priv. level: | |
487 | * they might want to give userspace access to a software | |
488 | * interrupt. */ | |
489 | if (idt_type(gidt->a, gidt->b) == 0xF) | |
490 | idt[i] = *gidt; | |
d7e28ffe | 491 | else |
0c12091d | 492 | default_idt_entry(&idt[i], i, def[i], gidt); |
d7e28ffe | 493 | } |
d7e28ffe RR |
494 | } |
495 | ||
e1e72965 RR |
496 | /*H:200 |
497 | * The Guest Clock. | |
498 | * | |
499 | * There are two sources of virtual interrupts. We saw one in lguest_user.c: | |
500 | * the Launcher sending interrupts for virtual devices. The other is the Guest | |
501 | * timer interrupt. | |
502 | * | |
503 | * The Guest uses the LHCALL_SET_CLOCKEVENT hypercall to tell us how long to | |
504 | * the next timer interrupt (in nanoseconds). We use the high-resolution timer | |
505 | * infrastructure to set a callback at that time. | |
506 | * | |
507 | * 0 means "turn off the clock". */ | |
ad8d8f3b | 508 | void guest_set_clockevent(struct lg_cpu *cpu, unsigned long delta) |
d7e28ffe RR |
509 | { |
510 | ktime_t expires; | |
511 | ||
512 | if (unlikely(delta == 0)) { | |
513 | /* Clock event device is shutting down. */ | |
ad8d8f3b | 514 | hrtimer_cancel(&cpu->hrt); |
d7e28ffe RR |
515 | return; |
516 | } | |
517 | ||
e1e72965 RR |
518 | /* We use wallclock time here, so the Guest might not be running for |
519 | * all the time between now and the timer interrupt it asked for. This | |
520 | * is almost always the right thing to do. */ | |
d7e28ffe | 521 | expires = ktime_add_ns(ktime_get_real(), delta); |
ad8d8f3b | 522 | hrtimer_start(&cpu->hrt, expires, HRTIMER_MODE_ABS); |
d7e28ffe RR |
523 | } |
524 | ||
e1e72965 | 525 | /* This is the function called when the Guest's timer expires. */ |
d7e28ffe RR |
526 | static enum hrtimer_restart clockdev_fn(struct hrtimer *timer) |
527 | { | |
ad8d8f3b | 528 | struct lg_cpu *cpu = container_of(timer, struct lg_cpu, hrt); |
d7e28ffe | 529 | |
e1e72965 | 530 | /* Remember the first interrupt is the timer interrupt. */ |
177e449d | 531 | set_bit(0, cpu->irqs_pending); |
a6c372de RR |
532 | /* Guest may be stopped or running on another CPU. */ |
533 | if (!wake_up_process(cpu->tsk)) | |
534 | kick_process(cpu->tsk); | |
d7e28ffe RR |
535 | return HRTIMER_NORESTART; |
536 | } | |
537 | ||
e1e72965 | 538 | /* This sets up the timer for this Guest. */ |
ad8d8f3b | 539 | void init_clockdev(struct lg_cpu *cpu) |
d7e28ffe | 540 | { |
ad8d8f3b GOC |
541 | hrtimer_init(&cpu->hrt, CLOCK_REALTIME, HRTIMER_MODE_ABS); |
542 | cpu->hrt.function = clockdev_fn; | |
d7e28ffe | 543 | } |