Merge branch 'x86/urgent' into x86/pat
[deliverable/linux.git] / drivers / lguest / interrupts_and_traps.c
1 /*P:800
2 * Interrupts (traps) are complicated enough to earn their own file.
3 * There are three classes of interrupts:
4 *
5 * 1) Real hardware interrupts which occur while we're running the Guest,
6 * 2) Interrupts for virtual devices attached to the Guest, and
7 * 3) Traps and faults from the Guest.
8 *
9 * Real hardware interrupts must be delivered to the Host, not the Guest.
10 * Virtual interrupts must be delivered to the Guest, but we make them look
11 * just like real hardware would deliver them. Traps from the Guest can be set
12 * up to go directly back into the Guest, but sometimes the Host wants to see
13 * them first, so we also have a way of "reflecting" them into the Guest as if
14 * they had been delivered to it directly.
15 :*/
16 #include <linux/uaccess.h>
17 #include <linux/interrupt.h>
18 #include <linux/module.h>
19 #include "lg.h"
20
21 /* Allow Guests to use a non-128 (ie. non-Linux) syscall trap. */
22 static unsigned int syscall_vector = SYSCALL_VECTOR;
23 module_param(syscall_vector, uint, 0444);
24
25 /* The address of the interrupt handler is split into two bits: */
26 static unsigned long idt_address(u32 lo, u32 hi)
27 {
28 return (lo & 0x0000FFFF) | (hi & 0xFFFF0000);
29 }
30
31 /*
32 * The "type" of the interrupt handler is a 4 bit field: we only support a
33 * couple of types.
34 */
35 static int idt_type(u32 lo, u32 hi)
36 {
37 return (hi >> 8) & 0xF;
38 }
39
40 /* An IDT entry can't be used unless the "present" bit is set. */
41 static bool idt_present(u32 lo, u32 hi)
42 {
43 return (hi & 0x8000);
44 }
45
46 /*
47 * We need a helper to "push" a value onto the Guest's stack, since that's a
48 * big part of what delivering an interrupt does.
49 */
50 static void push_guest_stack(struct lg_cpu *cpu, unsigned long *gstack, u32 val)
51 {
52 /* Stack grows upwards: move stack then write value. */
53 *gstack -= 4;
54 lgwrite(cpu, *gstack, u32, val);
55 }
56
57 /*H:210
58 * The set_guest_interrupt() routine actually delivers the interrupt or
59 * trap. The mechanics of delivering traps and interrupts to the Guest are the
60 * same, except some traps have an "error code" which gets pushed onto the
61 * stack as well: the caller tells us if this is one.
62 *
63 * "lo" and "hi" are the two parts of the Interrupt Descriptor Table for this
64 * interrupt or trap. It's split into two parts for traditional reasons: gcc
65 * on i386 used to be frightened by 64 bit numbers.
66 *
67 * We set up the stack just like the CPU does for a real interrupt, so it's
68 * identical for the Guest (and the standard "iret" instruction will undo
69 * it).
70 */
71 static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi,
72 bool has_err)
73 {
74 unsigned long gstack, origstack;
75 u32 eflags, ss, irq_enable;
76 unsigned long virtstack;
77
78 /*
79 * There are two cases for interrupts: one where the Guest is already
80 * in the kernel, and a more complex one where the Guest is in
81 * userspace. We check the privilege level to find out.
82 */
83 if ((cpu->regs->ss&0x3) != GUEST_PL) {
84 /*
85 * The Guest told us their kernel stack with the SET_STACK
86 * hypercall: both the virtual address and the segment.
87 */
88 virtstack = cpu->esp1;
89 ss = cpu->ss1;
90
91 origstack = gstack = guest_pa(cpu, virtstack);
92 /*
93 * We push the old stack segment and pointer onto the new
94 * stack: when the Guest does an "iret" back from the interrupt
95 * handler the CPU will notice they're dropping privilege
96 * levels and expect these here.
97 */
98 push_guest_stack(cpu, &gstack, cpu->regs->ss);
99 push_guest_stack(cpu, &gstack, cpu->regs->esp);
100 } else {
101 /* We're staying on the same Guest (kernel) stack. */
102 virtstack = cpu->regs->esp;
103 ss = cpu->regs->ss;
104
105 origstack = gstack = guest_pa(cpu, virtstack);
106 }
107
108 /*
109 * Remember that we never let the Guest actually disable interrupts, so
110 * the "Interrupt Flag" bit is always set. We copy that bit from the
111 * Guest's "irq_enabled" field into the eflags word: we saw the Guest
112 * copy it back in "lguest_iret".
113 */
114 eflags = cpu->regs->eflags;
115 if (get_user(irq_enable, &cpu->lg->lguest_data->irq_enabled) == 0
116 && !(irq_enable & X86_EFLAGS_IF))
117 eflags &= ~X86_EFLAGS_IF;
118
119 /*
120 * An interrupt is expected to push three things on the stack: the old
121 * "eflags" word, the old code segment, and the old instruction
122 * pointer.
123 */
124 push_guest_stack(cpu, &gstack, eflags);
125 push_guest_stack(cpu, &gstack, cpu->regs->cs);
126 push_guest_stack(cpu, &gstack, cpu->regs->eip);
127
128 /* For the six traps which supply an error code, we push that, too. */
129 if (has_err)
130 push_guest_stack(cpu, &gstack, cpu->regs->errcode);
131
132 /*
133 * Now we've pushed all the old state, we change the stack, the code
134 * segment and the address to execute.
135 */
136 cpu->regs->ss = ss;
137 cpu->regs->esp = virtstack + (gstack - origstack);
138 cpu->regs->cs = (__KERNEL_CS|GUEST_PL);
139 cpu->regs->eip = idt_address(lo, hi);
140
141 /*
142 * There are two kinds of interrupt handlers: 0xE is an "interrupt
143 * gate" which expects interrupts to be disabled on entry.
144 */
145 if (idt_type(lo, hi) == 0xE)
146 if (put_user(0, &cpu->lg->lguest_data->irq_enabled))
147 kill_guest(cpu, "Disabling interrupts");
148 }
149
150 /*H:205
151 * Virtual Interrupts.
152 *
153 * interrupt_pending() returns the first pending interrupt which isn't blocked
154 * by the Guest. It is called before every entry to the Guest, and just before
155 * we go to sleep when the Guest has halted itself.
156 */
157 unsigned int interrupt_pending(struct lg_cpu *cpu, bool *more)
158 {
159 unsigned int irq;
160 DECLARE_BITMAP(blk, LGUEST_IRQS);
161
162 /* If the Guest hasn't even initialized yet, we can do nothing. */
163 if (!cpu->lg->lguest_data)
164 return LGUEST_IRQS;
165
166 /*
167 * Take our "irqs_pending" array and remove any interrupts the Guest
168 * wants blocked: the result ends up in "blk".
169 */
170 if (copy_from_user(&blk, cpu->lg->lguest_data->blocked_interrupts,
171 sizeof(blk)))
172 return LGUEST_IRQS;
173 bitmap_andnot(blk, cpu->irqs_pending, blk, LGUEST_IRQS);
174
175 /* Find the first interrupt. */
176 irq = find_first_bit(blk, LGUEST_IRQS);
177 *more = find_next_bit(blk, LGUEST_IRQS, irq+1);
178
179 return irq;
180 }
181
182 /*
183 * This actually diverts the Guest to running an interrupt handler, once an
184 * interrupt has been identified by interrupt_pending().
185 */
186 void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq, bool more)
187 {
188 struct desc_struct *idt;
189
190 BUG_ON(irq >= LGUEST_IRQS);
191
192 /*
193 * They may be in the middle of an iret, where they asked us never to
194 * deliver interrupts.
195 */
196 if (cpu->regs->eip >= cpu->lg->noirq_start &&
197 (cpu->regs->eip < cpu->lg->noirq_end))
198 return;
199
200 /* If they're halted, interrupts restart them. */
201 if (cpu->halted) {
202 /* Re-enable interrupts. */
203 if (put_user(X86_EFLAGS_IF, &cpu->lg->lguest_data->irq_enabled))
204 kill_guest(cpu, "Re-enabling interrupts");
205 cpu->halted = 0;
206 } else {
207 /* Otherwise we check if they have interrupts disabled. */
208 u32 irq_enabled;
209 if (get_user(irq_enabled, &cpu->lg->lguest_data->irq_enabled))
210 irq_enabled = 0;
211 if (!irq_enabled) {
212 /* Make sure they know an IRQ is pending. */
213 put_user(X86_EFLAGS_IF,
214 &cpu->lg->lguest_data->irq_pending);
215 return;
216 }
217 }
218
219 /*
220 * Look at the IDT entry the Guest gave us for this interrupt. The
221 * first 32 (FIRST_EXTERNAL_VECTOR) entries are for traps, so we skip
222 * over them.
223 */
224 idt = &cpu->arch.idt[FIRST_EXTERNAL_VECTOR+irq];
225 /* If they don't have a handler (yet?), we just ignore it */
226 if (idt_present(idt->a, idt->b)) {
227 /* OK, mark it no longer pending and deliver it. */
228 clear_bit(irq, cpu->irqs_pending);
229 /*
230 * set_guest_interrupt() takes the interrupt descriptor and a
231 * flag to say whether this interrupt pushes an error code onto
232 * the stack as well: virtual interrupts never do.
233 */
234 set_guest_interrupt(cpu, idt->a, idt->b, false);
235 }
236
237 /*
238 * Every time we deliver an interrupt, we update the timestamp in the
239 * Guest's lguest_data struct. It would be better for the Guest if we
240 * did this more often, but it can actually be quite slow: doing it
241 * here is a compromise which means at least it gets updated every
242 * timer interrupt.
243 */
244 write_timestamp(cpu);
245
246 /*
247 * If there are no other interrupts we want to deliver, clear
248 * the pending flag.
249 */
250 if (!more)
251 put_user(0, &cpu->lg->lguest_data->irq_pending);
252 }
253
254 /* And this is the routine when we want to set an interrupt for the Guest. */
255 void set_interrupt(struct lg_cpu *cpu, unsigned int irq)
256 {
257 /*
258 * Next time the Guest runs, the core code will see if it can deliver
259 * this interrupt.
260 */
261 set_bit(irq, cpu->irqs_pending);
262
263 /*
264 * Make sure it sees it; it might be asleep (eg. halted), or running
265 * the Guest right now, in which case kick_process() will knock it out.
266 */
267 if (!wake_up_process(cpu->tsk))
268 kick_process(cpu->tsk);
269 }
270 /*:*/
271
272 /*
273 * Linux uses trap 128 for system calls. Plan9 uses 64, and Ron Minnich sent
274 * me a patch, so we support that too. It'd be a big step for lguest if half
275 * the Plan 9 user base were to start using it.
276 *
277 * Actually now I think of it, it's possible that Ron *is* half the Plan 9
278 * userbase. Oh well.
279 */
280 static bool could_be_syscall(unsigned int num)
281 {
282 /* Normal Linux SYSCALL_VECTOR or reserved vector? */
283 return num == SYSCALL_VECTOR || num == syscall_vector;
284 }
285
286 /* The syscall vector it wants must be unused by Host. */
287 bool check_syscall_vector(struct lguest *lg)
288 {
289 u32 vector;
290
291 if (get_user(vector, &lg->lguest_data->syscall_vec))
292 return false;
293
294 return could_be_syscall(vector);
295 }
296
297 int init_interrupts(void)
298 {
299 /* If they want some strange system call vector, reserve it now */
300 if (syscall_vector != SYSCALL_VECTOR) {
301 if (test_bit(syscall_vector, used_vectors) ||
302 vector_used_by_percpu_irq(syscall_vector)) {
303 printk(KERN_ERR "lg: couldn't reserve syscall %u\n",
304 syscall_vector);
305 return -EBUSY;
306 }
307 set_bit(syscall_vector, used_vectors);
308 }
309
310 return 0;
311 }
312
313 void free_interrupts(void)
314 {
315 if (syscall_vector != SYSCALL_VECTOR)
316 clear_bit(syscall_vector, used_vectors);
317 }
318
319 /*H:220
320 * Now we've got the routines to deliver interrupts, delivering traps like
321 * page fault is easy. The only trick is that Intel decided that some traps
322 * should have error codes:
323 */
324 static bool has_err(unsigned int trap)
325 {
326 return (trap == 8 || (trap >= 10 && trap <= 14) || trap == 17);
327 }
328
329 /* deliver_trap() returns true if it could deliver the trap. */
330 bool deliver_trap(struct lg_cpu *cpu, unsigned int num)
331 {
332 /*
333 * Trap numbers are always 8 bit, but we set an impossible trap number
334 * for traps inside the Switcher, so check that here.
335 */
336 if (num >= ARRAY_SIZE(cpu->arch.idt))
337 return false;
338
339 /*
340 * Early on the Guest hasn't set the IDT entries (or maybe it put a
341 * bogus one in): if we fail here, the Guest will be killed.
342 */
343 if (!idt_present(cpu->arch.idt[num].a, cpu->arch.idt[num].b))
344 return false;
345 set_guest_interrupt(cpu, cpu->arch.idt[num].a,
346 cpu->arch.idt[num].b, has_err(num));
347 return true;
348 }
349
350 /*H:250
351 * Here's the hard part: returning to the Host every time a trap happens
352 * and then calling deliver_trap() and re-entering the Guest is slow.
353 * Particularly because Guest userspace system calls are traps (usually trap
354 * 128).
355 *
356 * So we'd like to set up the IDT to tell the CPU to deliver traps directly
357 * into the Guest. This is possible, but the complexities cause the size of
358 * this file to double! However, 150 lines of code is worth writing for taking
359 * system calls down from 1750ns to 270ns. Plus, if lguest didn't do it, all
360 * the other hypervisors would beat it up at lunchtime.
361 *
362 * This routine indicates if a particular trap number could be delivered
363 * directly.
364 */
365 static bool direct_trap(unsigned int num)
366 {
367 /*
368 * Hardware interrupts don't go to the Guest at all (except system
369 * call).
370 */
371 if (num >= FIRST_EXTERNAL_VECTOR && !could_be_syscall(num))
372 return false;
373
374 /*
375 * The Host needs to see page faults (for shadow paging and to save the
376 * fault address), general protection faults (in/out emulation) and
377 * device not available (TS handling), invalid opcode fault (kvm hcall),
378 * and of course, the hypercall trap.
379 */
380 return num != 14 && num != 13 && num != 7 &&
381 num != 6 && num != LGUEST_TRAP_ENTRY;
382 }
383 /*:*/
384
385 /*M:005
386 * The Guest has the ability to turn its interrupt gates into trap gates,
387 * if it is careful. The Host will let trap gates can go directly to the
388 * Guest, but the Guest needs the interrupts atomically disabled for an
389 * interrupt gate. It can do this by pointing the trap gate at instructions
390 * within noirq_start and noirq_end, where it can safely disable interrupts.
391 */
392
393 /*M:006
394 * The Guests do not use the sysenter (fast system call) instruction,
395 * because it's hardcoded to enter privilege level 0 and so can't go direct.
396 * It's about twice as fast as the older "int 0x80" system call, so it might
397 * still be worthwhile to handle it in the Switcher and lcall down to the
398 * Guest. The sysenter semantics are hairy tho: search for that keyword in
399 * entry.S
400 :*/
401
402 /*H:260
403 * When we make traps go directly into the Guest, we need to make sure
404 * the kernel stack is valid (ie. mapped in the page tables). Otherwise, the
405 * CPU trying to deliver the trap will fault while trying to push the interrupt
406 * words on the stack: this is called a double fault, and it forces us to kill
407 * the Guest.
408 *
409 * Which is deeply unfair, because (literally!) it wasn't the Guests' fault.
410 */
411 void pin_stack_pages(struct lg_cpu *cpu)
412 {
413 unsigned int i;
414
415 /*
416 * Depending on the CONFIG_4KSTACKS option, the Guest can have one or
417 * two pages of stack space.
418 */
419 for (i = 0; i < cpu->lg->stack_pages; i++)
420 /*
421 * The stack grows *upwards*, so the address we're given is the
422 * start of the page after the kernel stack. Subtract one to
423 * get back onto the first stack page, and keep subtracting to
424 * get to the rest of the stack pages.
425 */
426 pin_page(cpu, cpu->esp1 - 1 - i * PAGE_SIZE);
427 }
428
429 /*
430 * Direct traps also mean that we need to know whenever the Guest wants to use
431 * a different kernel stack, so we can change the IDT entries to use that
432 * stack. The IDT entries expect a virtual address, so unlike most addresses
433 * the Guest gives us, the "esp" (stack pointer) value here is virtual, not
434 * physical.
435 *
436 * In Linux each process has its own kernel stack, so this happens a lot: we
437 * change stacks on each context switch.
438 */
439 void guest_set_stack(struct lg_cpu *cpu, u32 seg, u32 esp, unsigned int pages)
440 {
441 /*
442 * You're not allowed a stack segment with privilege level 0: bad Guest!
443 */
444 if ((seg & 0x3) != GUEST_PL)
445 kill_guest(cpu, "bad stack segment %i", seg);
446 /* We only expect one or two stack pages. */
447 if (pages > 2)
448 kill_guest(cpu, "bad stack pages %u", pages);
449 /* Save where the stack is, and how many pages */
450 cpu->ss1 = seg;
451 cpu->esp1 = esp;
452 cpu->lg->stack_pages = pages;
453 /* Make sure the new stack pages are mapped */
454 pin_stack_pages(cpu);
455 }
456
457 /*
458 * All this reference to mapping stacks leads us neatly into the other complex
459 * part of the Host: page table handling.
460 */
461
462 /*H:235
463 * This is the routine which actually checks the Guest's IDT entry and
464 * transfers it into the entry in "struct lguest":
465 */
466 static void set_trap(struct lg_cpu *cpu, struct desc_struct *trap,
467 unsigned int num, u32 lo, u32 hi)
468 {
469 u8 type = idt_type(lo, hi);
470
471 /* We zero-out a not-present entry */
472 if (!idt_present(lo, hi)) {
473 trap->a = trap->b = 0;
474 return;
475 }
476
477 /* We only support interrupt and trap gates. */
478 if (type != 0xE && type != 0xF)
479 kill_guest(cpu, "bad IDT type %i", type);
480
481 /*
482 * We only copy the handler address, present bit, privilege level and
483 * type. The privilege level controls where the trap can be triggered
484 * manually with an "int" instruction. This is usually GUEST_PL,
485 * except for system calls which userspace can use.
486 */
487 trap->a = ((__KERNEL_CS|GUEST_PL)<<16) | (lo&0x0000FFFF);
488 trap->b = (hi&0xFFFFEF00);
489 }
490
491 /*H:230
492 * While we're here, dealing with delivering traps and interrupts to the
493 * Guest, we might as well complete the picture: how the Guest tells us where
494 * it wants them to go. This would be simple, except making traps fast
495 * requires some tricks.
496 *
497 * We saw the Guest setting Interrupt Descriptor Table (IDT) entries with the
498 * LHCALL_LOAD_IDT_ENTRY hypercall before: that comes here.
499 */
500 void load_guest_idt_entry(struct lg_cpu *cpu, unsigned int num, u32 lo, u32 hi)
501 {
502 /*
503 * Guest never handles: NMI, doublefault, spurious interrupt or
504 * hypercall. We ignore when it tries to set them.
505 */
506 if (num == 2 || num == 8 || num == 15 || num == LGUEST_TRAP_ENTRY)
507 return;
508
509 /*
510 * Mark the IDT as changed: next time the Guest runs we'll know we have
511 * to copy this again.
512 */
513 cpu->changed |= CHANGED_IDT;
514
515 /* Check that the Guest doesn't try to step outside the bounds. */
516 if (num >= ARRAY_SIZE(cpu->arch.idt))
517 kill_guest(cpu, "Setting idt entry %u", num);
518 else
519 set_trap(cpu, &cpu->arch.idt[num], num, lo, hi);
520 }
521
522 /*
523 * The default entry for each interrupt points into the Switcher routines which
524 * simply return to the Host. The run_guest() loop will then call
525 * deliver_trap() to bounce it back into the Guest.
526 */
527 static void default_idt_entry(struct desc_struct *idt,
528 int trap,
529 const unsigned long handler,
530 const struct desc_struct *base)
531 {
532 /* A present interrupt gate. */
533 u32 flags = 0x8e00;
534
535 /*
536 * Set the privilege level on the entry for the hypercall: this allows
537 * the Guest to use the "int" instruction to trigger it.
538 */
539 if (trap == LGUEST_TRAP_ENTRY)
540 flags |= (GUEST_PL << 13);
541 else if (base)
542 /*
543 * Copy privilege level from what Guest asked for. This allows
544 * debug (int 3) traps from Guest userspace, for example.
545 */
546 flags |= (base->b & 0x6000);
547
548 /* Now pack it into the IDT entry in its weird format. */
549 idt->a = (LGUEST_CS<<16) | (handler&0x0000FFFF);
550 idt->b = (handler&0xFFFF0000) | flags;
551 }
552
553 /* When the Guest first starts, we put default entries into the IDT. */
554 void setup_default_idt_entries(struct lguest_ro_state *state,
555 const unsigned long *def)
556 {
557 unsigned int i;
558
559 for (i = 0; i < ARRAY_SIZE(state->guest_idt); i++)
560 default_idt_entry(&state->guest_idt[i], i, def[i], NULL);
561 }
562
563 /*H:240
564 * We don't use the IDT entries in the "struct lguest" directly, instead
565 * we copy them into the IDT which we've set up for Guests on this CPU, just
566 * before we run the Guest. This routine does that copy.
567 */
568 void copy_traps(const struct lg_cpu *cpu, struct desc_struct *idt,
569 const unsigned long *def)
570 {
571 unsigned int i;
572
573 /*
574 * We can simply copy the direct traps, otherwise we use the default
575 * ones in the Switcher: they will return to the Host.
576 */
577 for (i = 0; i < ARRAY_SIZE(cpu->arch.idt); i++) {
578 const struct desc_struct *gidt = &cpu->arch.idt[i];
579
580 /* If no Guest can ever override this trap, leave it alone. */
581 if (!direct_trap(i))
582 continue;
583
584 /*
585 * Only trap gates (type 15) can go direct to the Guest.
586 * Interrupt gates (type 14) disable interrupts as they are
587 * entered, which we never let the Guest do. Not present
588 * entries (type 0x0) also can't go direct, of course.
589 *
590 * If it can't go direct, we still need to copy the priv. level:
591 * they might want to give userspace access to a software
592 * interrupt.
593 */
594 if (idt_type(gidt->a, gidt->b) == 0xF)
595 idt[i] = *gidt;
596 else
597 default_idt_entry(&idt[i], i, def[i], gidt);
598 }
599 }
600
601 /*H:200
602 * The Guest Clock.
603 *
604 * There are two sources of virtual interrupts. We saw one in lguest_user.c:
605 * the Launcher sending interrupts for virtual devices. The other is the Guest
606 * timer interrupt.
607 *
608 * The Guest uses the LHCALL_SET_CLOCKEVENT hypercall to tell us how long to
609 * the next timer interrupt (in nanoseconds). We use the high-resolution timer
610 * infrastructure to set a callback at that time.
611 *
612 * 0 means "turn off the clock".
613 */
614 void guest_set_clockevent(struct lg_cpu *cpu, unsigned long delta)
615 {
616 ktime_t expires;
617
618 if (unlikely(delta == 0)) {
619 /* Clock event device is shutting down. */
620 hrtimer_cancel(&cpu->hrt);
621 return;
622 }
623
624 /*
625 * We use wallclock time here, so the Guest might not be running for
626 * all the time between now and the timer interrupt it asked for. This
627 * is almost always the right thing to do.
628 */
629 expires = ktime_add_ns(ktime_get_real(), delta);
630 hrtimer_start(&cpu->hrt, expires, HRTIMER_MODE_ABS);
631 }
632
633 /* This is the function called when the Guest's timer expires. */
634 static enum hrtimer_restart clockdev_fn(struct hrtimer *timer)
635 {
636 struct lg_cpu *cpu = container_of(timer, struct lg_cpu, hrt);
637
638 /* Remember the first interrupt is the timer interrupt. */
639 set_interrupt(cpu, 0);
640 return HRTIMER_NORESTART;
641 }
642
643 /* This sets up the timer for this Guest. */
644 void init_clockdev(struct lg_cpu *cpu)
645 {
646 hrtimer_init(&cpu->hrt, CLOCK_REALTIME, HRTIMER_MODE_ABS);
647 cpu->hrt.function = clockdev_fn;
648 }
This page took 0.047449 seconds and 5 git commands to generate.