KVM: Portability: Move some includes to x86.c
[deliverable/linux.git] / drivers / kvm / x86.c
CommitLineData
043405e1
CO
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * derived from drivers/kvm/kvm_main.c
5 *
6 * Copyright (C) 2006 Qumranet, Inc.
7 *
8 * Authors:
9 * Avi Kivity <avi@qumranet.com>
10 * Yaniv Kamay <yaniv@qumranet.com>
11 *
12 * This work is licensed under the terms of the GNU GPL, version 2. See
13 * the COPYING file in the top-level directory.
14 *
15 */
16
313a3dc7 17#include "kvm.h"
043405e1 18#include "x86.h"
d825ed0a 19#include "x86_emulate.h"
5fb76f9b 20#include "segment_descriptor.h"
313a3dc7
CO
21#include "irq.h"
22
23#include <linux/kvm.h>
24#include <linux/fs.h>
25#include <linux/vmalloc.h>
5fb76f9b 26#include <linux/module.h>
043405e1
CO
27
28#include <asm/uaccess.h>
d825ed0a 29#include <asm/msr.h>
043405e1 30
313a3dc7 31#define MAX_IO_MSRS 256
a03490ed
CO
32#define CR0_RESERVED_BITS \
33 (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
34 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
35 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
36#define CR4_RESERVED_BITS \
37 (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
38 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
39 | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \
40 | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
41
42#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
15c4a640 43#define EFER_RESERVED_BITS 0xfffffffffffff2fe
313a3dc7 44
417bc304
HB
45#define STAT_OFFSET(x) offsetof(struct kvm_vcpu, stat.x)
46
47struct kvm_stats_debugfs_item debugfs_entries[] = {
48 { "pf_fixed", STAT_OFFSET(pf_fixed) },
49 { "pf_guest", STAT_OFFSET(pf_guest) },
50 { "tlb_flush", STAT_OFFSET(tlb_flush) },
51 { "invlpg", STAT_OFFSET(invlpg) },
52 { "exits", STAT_OFFSET(exits) },
53 { "io_exits", STAT_OFFSET(io_exits) },
54 { "mmio_exits", STAT_OFFSET(mmio_exits) },
55 { "signal_exits", STAT_OFFSET(signal_exits) },
56 { "irq_window", STAT_OFFSET(irq_window_exits) },
57 { "halt_exits", STAT_OFFSET(halt_exits) },
58 { "halt_wakeup", STAT_OFFSET(halt_wakeup) },
59 { "request_irq", STAT_OFFSET(request_irq_exits) },
60 { "irq_exits", STAT_OFFSET(irq_exits) },
61 { "light_exits", STAT_OFFSET(light_exits) },
62 { "efer_reload", STAT_OFFSET(efer_reload) },
63 { NULL }
64};
65
66
5fb76f9b
CO
67unsigned long segment_base(u16 selector)
68{
69 struct descriptor_table gdt;
70 struct segment_descriptor *d;
71 unsigned long table_base;
72 unsigned long v;
73
74 if (selector == 0)
75 return 0;
76
77 asm("sgdt %0" : "=m"(gdt));
78 table_base = gdt.base;
79
80 if (selector & 4) { /* from ldt */
81 u16 ldt_selector;
82
83 asm("sldt %0" : "=g"(ldt_selector));
84 table_base = segment_base(ldt_selector);
85 }
86 d = (struct segment_descriptor *)(table_base + (selector & ~7));
87 v = d->base_low | ((unsigned long)d->base_mid << 16) |
88 ((unsigned long)d->base_high << 24);
89#ifdef CONFIG_X86_64
90 if (d->system == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
91 v |= ((unsigned long) \
92 ((struct segment_descriptor_64 *)d)->base_higher) << 32;
93#endif
94 return v;
95}
96EXPORT_SYMBOL_GPL(segment_base);
97
6866b83e
CO
98u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
99{
100 if (irqchip_in_kernel(vcpu->kvm))
101 return vcpu->apic_base;
102 else
103 return vcpu->apic_base;
104}
105EXPORT_SYMBOL_GPL(kvm_get_apic_base);
106
107void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
108{
109 /* TODO: reserve bits check */
110 if (irqchip_in_kernel(vcpu->kvm))
111 kvm_lapic_set_base(vcpu, data);
112 else
113 vcpu->apic_base = data;
114}
115EXPORT_SYMBOL_GPL(kvm_set_apic_base);
116
a03490ed
CO
117static void inject_gp(struct kvm_vcpu *vcpu)
118{
119 kvm_x86_ops->inject_gp(vcpu, 0);
120}
121
122/*
123 * Load the pae pdptrs. Return true is they are all valid.
124 */
125int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
126{
127 gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
128 unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
129 int i;
130 int ret;
131 u64 pdpte[ARRAY_SIZE(vcpu->pdptrs)];
132
133 mutex_lock(&vcpu->kvm->lock);
134 ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
135 offset * sizeof(u64), sizeof(pdpte));
136 if (ret < 0) {
137 ret = 0;
138 goto out;
139 }
140 for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
141 if ((pdpte[i] & 1) && (pdpte[i] & 0xfffffff0000001e6ull)) {
142 ret = 0;
143 goto out;
144 }
145 }
146 ret = 1;
147
148 memcpy(vcpu->pdptrs, pdpte, sizeof(vcpu->pdptrs));
149out:
150 mutex_unlock(&vcpu->kvm->lock);
151
152 return ret;
153}
154
155void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
156{
157 if (cr0 & CR0_RESERVED_BITS) {
158 printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
159 cr0, vcpu->cr0);
160 inject_gp(vcpu);
161 return;
162 }
163
164 if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
165 printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
166 inject_gp(vcpu);
167 return;
168 }
169
170 if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
171 printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
172 "and a clear PE flag\n");
173 inject_gp(vcpu);
174 return;
175 }
176
177 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
178#ifdef CONFIG_X86_64
179 if ((vcpu->shadow_efer & EFER_LME)) {
180 int cs_db, cs_l;
181
182 if (!is_pae(vcpu)) {
183 printk(KERN_DEBUG "set_cr0: #GP, start paging "
184 "in long mode while PAE is disabled\n");
185 inject_gp(vcpu);
186 return;
187 }
188 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
189 if (cs_l) {
190 printk(KERN_DEBUG "set_cr0: #GP, start paging "
191 "in long mode while CS.L == 1\n");
192 inject_gp(vcpu);
193 return;
194
195 }
196 } else
197#endif
198 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->cr3)) {
199 printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
200 "reserved bits\n");
201 inject_gp(vcpu);
202 return;
203 }
204
205 }
206
207 kvm_x86_ops->set_cr0(vcpu, cr0);
208 vcpu->cr0 = cr0;
209
210 mutex_lock(&vcpu->kvm->lock);
211 kvm_mmu_reset_context(vcpu);
212 mutex_unlock(&vcpu->kvm->lock);
213 return;
214}
215EXPORT_SYMBOL_GPL(set_cr0);
216
217void lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
218{
219 set_cr0(vcpu, (vcpu->cr0 & ~0x0ful) | (msw & 0x0f));
220}
221EXPORT_SYMBOL_GPL(lmsw);
222
223void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
224{
225 if (cr4 & CR4_RESERVED_BITS) {
226 printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
227 inject_gp(vcpu);
228 return;
229 }
230
231 if (is_long_mode(vcpu)) {
232 if (!(cr4 & X86_CR4_PAE)) {
233 printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
234 "in long mode\n");
235 inject_gp(vcpu);
236 return;
237 }
238 } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & X86_CR4_PAE)
239 && !load_pdptrs(vcpu, vcpu->cr3)) {
240 printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
241 inject_gp(vcpu);
242 return;
243 }
244
245 if (cr4 & X86_CR4_VMXE) {
246 printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
247 inject_gp(vcpu);
248 return;
249 }
250 kvm_x86_ops->set_cr4(vcpu, cr4);
251 vcpu->cr4 = cr4;
252 mutex_lock(&vcpu->kvm->lock);
253 kvm_mmu_reset_context(vcpu);
254 mutex_unlock(&vcpu->kvm->lock);
255}
256EXPORT_SYMBOL_GPL(set_cr4);
257
258void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
259{
260 if (is_long_mode(vcpu)) {
261 if (cr3 & CR3_L_MODE_RESERVED_BITS) {
262 printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
263 inject_gp(vcpu);
264 return;
265 }
266 } else {
267 if (is_pae(vcpu)) {
268 if (cr3 & CR3_PAE_RESERVED_BITS) {
269 printk(KERN_DEBUG
270 "set_cr3: #GP, reserved bits\n");
271 inject_gp(vcpu);
272 return;
273 }
274 if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
275 printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
276 "reserved bits\n");
277 inject_gp(vcpu);
278 return;
279 }
280 }
281 /*
282 * We don't check reserved bits in nonpae mode, because
283 * this isn't enforced, and VMware depends on this.
284 */
285 }
286
287 mutex_lock(&vcpu->kvm->lock);
288 /*
289 * Does the new cr3 value map to physical memory? (Note, we
290 * catch an invalid cr3 even in real-mode, because it would
291 * cause trouble later on when we turn on paging anyway.)
292 *
293 * A real CPU would silently accept an invalid cr3 and would
294 * attempt to use it - with largely undefined (and often hard
295 * to debug) behavior on the guest side.
296 */
297 if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
298 inject_gp(vcpu);
299 else {
300 vcpu->cr3 = cr3;
301 vcpu->mmu.new_cr3(vcpu);
302 }
303 mutex_unlock(&vcpu->kvm->lock);
304}
305EXPORT_SYMBOL_GPL(set_cr3);
306
307void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
308{
309 if (cr8 & CR8_RESERVED_BITS) {
310 printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
311 inject_gp(vcpu);
312 return;
313 }
314 if (irqchip_in_kernel(vcpu->kvm))
315 kvm_lapic_set_tpr(vcpu, cr8);
316 else
317 vcpu->cr8 = cr8;
318}
319EXPORT_SYMBOL_GPL(set_cr8);
320
321unsigned long get_cr8(struct kvm_vcpu *vcpu)
322{
323 if (irqchip_in_kernel(vcpu->kvm))
324 return kvm_lapic_get_cr8(vcpu);
325 else
326 return vcpu->cr8;
327}
328EXPORT_SYMBOL_GPL(get_cr8);
329
043405e1
CO
330/*
331 * List of msr numbers which we expose to userspace through KVM_GET_MSRS
332 * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
333 *
334 * This list is modified at module load time to reflect the
335 * capabilities of the host cpu.
336 */
337static u32 msrs_to_save[] = {
338 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
339 MSR_K6_STAR,
340#ifdef CONFIG_X86_64
341 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
342#endif
343 MSR_IA32_TIME_STAMP_COUNTER,
344};
345
346static unsigned num_msrs_to_save;
347
348static u32 emulated_msrs[] = {
349 MSR_IA32_MISC_ENABLE,
350};
351
15c4a640
CO
352#ifdef CONFIG_X86_64
353
354static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
355{
356 if (efer & EFER_RESERVED_BITS) {
357 printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
358 efer);
359 inject_gp(vcpu);
360 return;
361 }
362
363 if (is_paging(vcpu)
364 && (vcpu->shadow_efer & EFER_LME) != (efer & EFER_LME)) {
365 printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
366 inject_gp(vcpu);
367 return;
368 }
369
370 kvm_x86_ops->set_efer(vcpu, efer);
371
372 efer &= ~EFER_LMA;
373 efer |= vcpu->shadow_efer & EFER_LMA;
374
375 vcpu->shadow_efer = efer;
376}
377
378#endif
379
380/*
381 * Writes msr value into into the appropriate "register".
382 * Returns 0 on success, non-0 otherwise.
383 * Assumes vcpu_load() was already called.
384 */
385int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
386{
387 return kvm_x86_ops->set_msr(vcpu, msr_index, data);
388}
389
313a3dc7
CO
390/*
391 * Adapt set_msr() to msr_io()'s calling convention
392 */
393static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
394{
395 return kvm_set_msr(vcpu, index, *data);
396}
397
15c4a640
CO
398
399int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
400{
401 switch (msr) {
402#ifdef CONFIG_X86_64
403 case MSR_EFER:
404 set_efer(vcpu, data);
405 break;
406#endif
407 case MSR_IA32_MC0_STATUS:
408 pr_unimpl(vcpu, "%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n",
409 __FUNCTION__, data);
410 break;
411 case MSR_IA32_MCG_STATUS:
412 pr_unimpl(vcpu, "%s: MSR_IA32_MCG_STATUS 0x%llx, nop\n",
413 __FUNCTION__, data);
414 break;
415 case MSR_IA32_UCODE_REV:
416 case MSR_IA32_UCODE_WRITE:
417 case 0x200 ... 0x2ff: /* MTRRs */
418 break;
419 case MSR_IA32_APICBASE:
420 kvm_set_apic_base(vcpu, data);
421 break;
422 case MSR_IA32_MISC_ENABLE:
423 vcpu->ia32_misc_enable_msr = data;
424 break;
425 default:
426 pr_unimpl(vcpu, "unhandled wrmsr: 0x%x\n", msr);
427 return 1;
428 }
429 return 0;
430}
431EXPORT_SYMBOL_GPL(kvm_set_msr_common);
432
433
434/*
435 * Reads an msr value (of 'msr_index') into 'pdata'.
436 * Returns 0 on success, non-0 otherwise.
437 * Assumes vcpu_load() was already called.
438 */
439int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
440{
441 return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
442}
443
444int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
445{
446 u64 data;
447
448 switch (msr) {
449 case 0xc0010010: /* SYSCFG */
450 case 0xc0010015: /* HWCR */
451 case MSR_IA32_PLATFORM_ID:
452 case MSR_IA32_P5_MC_ADDR:
453 case MSR_IA32_P5_MC_TYPE:
454 case MSR_IA32_MC0_CTL:
455 case MSR_IA32_MCG_STATUS:
456 case MSR_IA32_MCG_CAP:
457 case MSR_IA32_MC0_MISC:
458 case MSR_IA32_MC0_MISC+4:
459 case MSR_IA32_MC0_MISC+8:
460 case MSR_IA32_MC0_MISC+12:
461 case MSR_IA32_MC0_MISC+16:
462 case MSR_IA32_UCODE_REV:
463 case MSR_IA32_PERF_STATUS:
464 case MSR_IA32_EBL_CR_POWERON:
465 /* MTRR registers */
466 case 0xfe:
467 case 0x200 ... 0x2ff:
468 data = 0;
469 break;
470 case 0xcd: /* fsb frequency */
471 data = 3;
472 break;
473 case MSR_IA32_APICBASE:
474 data = kvm_get_apic_base(vcpu);
475 break;
476 case MSR_IA32_MISC_ENABLE:
477 data = vcpu->ia32_misc_enable_msr;
478 break;
479#ifdef CONFIG_X86_64
480 case MSR_EFER:
481 data = vcpu->shadow_efer;
482 break;
483#endif
484 default:
485 pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
486 return 1;
487 }
488 *pdata = data;
489 return 0;
490}
491EXPORT_SYMBOL_GPL(kvm_get_msr_common);
492
313a3dc7
CO
493/*
494 * Read or write a bunch of msrs. All parameters are kernel addresses.
495 *
496 * @return number of msrs set successfully.
497 */
498static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
499 struct kvm_msr_entry *entries,
500 int (*do_msr)(struct kvm_vcpu *vcpu,
501 unsigned index, u64 *data))
502{
503 int i;
504
505 vcpu_load(vcpu);
506
507 for (i = 0; i < msrs->nmsrs; ++i)
508 if (do_msr(vcpu, entries[i].index, &entries[i].data))
509 break;
510
511 vcpu_put(vcpu);
512
513 return i;
514}
515
516/*
517 * Read or write a bunch of msrs. Parameters are user addresses.
518 *
519 * @return number of msrs set successfully.
520 */
521static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
522 int (*do_msr)(struct kvm_vcpu *vcpu,
523 unsigned index, u64 *data),
524 int writeback)
525{
526 struct kvm_msrs msrs;
527 struct kvm_msr_entry *entries;
528 int r, n;
529 unsigned size;
530
531 r = -EFAULT;
532 if (copy_from_user(&msrs, user_msrs, sizeof msrs))
533 goto out;
534
535 r = -E2BIG;
536 if (msrs.nmsrs >= MAX_IO_MSRS)
537 goto out;
538
539 r = -ENOMEM;
540 size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
541 entries = vmalloc(size);
542 if (!entries)
543 goto out;
544
545 r = -EFAULT;
546 if (copy_from_user(entries, user_msrs->entries, size))
547 goto out_free;
548
549 r = n = __msr_io(vcpu, &msrs, entries, do_msr);
550 if (r < 0)
551 goto out_free;
552
553 r = -EFAULT;
554 if (writeback && copy_to_user(user_msrs->entries, entries, size))
555 goto out_free;
556
557 r = n;
558
559out_free:
560 vfree(entries);
561out:
562 return r;
563}
564
043405e1
CO
565long kvm_arch_dev_ioctl(struct file *filp,
566 unsigned int ioctl, unsigned long arg)
567{
568 void __user *argp = (void __user *)arg;
569 long r;
570
571 switch (ioctl) {
572 case KVM_GET_MSR_INDEX_LIST: {
573 struct kvm_msr_list __user *user_msr_list = argp;
574 struct kvm_msr_list msr_list;
575 unsigned n;
576
577 r = -EFAULT;
578 if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
579 goto out;
580 n = msr_list.nmsrs;
581 msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
582 if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
583 goto out;
584 r = -E2BIG;
585 if (n < num_msrs_to_save)
586 goto out;
587 r = -EFAULT;
588 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
589 num_msrs_to_save * sizeof(u32)))
590 goto out;
591 if (copy_to_user(user_msr_list->indices
592 + num_msrs_to_save * sizeof(u32),
593 &emulated_msrs,
594 ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
595 goto out;
596 r = 0;
597 break;
598 }
599 default:
600 r = -EINVAL;
601 }
602out:
603 return r;
604}
605
313a3dc7
CO
606void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
607{
608 kvm_x86_ops->vcpu_load(vcpu, cpu);
609}
610
611void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
612{
613 kvm_x86_ops->vcpu_put(vcpu);
614}
615
616static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
617{
618 u64 efer;
619 int i;
620 struct kvm_cpuid_entry *e, *entry;
621
622 rdmsrl(MSR_EFER, efer);
623 entry = NULL;
624 for (i = 0; i < vcpu->cpuid_nent; ++i) {
625 e = &vcpu->cpuid_entries[i];
626 if (e->function == 0x80000001) {
627 entry = e;
628 break;
629 }
630 }
631 if (entry && (entry->edx & (1 << 20)) && !(efer & EFER_NX)) {
632 entry->edx &= ~(1 << 20);
633 printk(KERN_INFO "kvm: guest NX capability removed\n");
634 }
635}
636
637static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
638 struct kvm_cpuid *cpuid,
639 struct kvm_cpuid_entry __user *entries)
640{
641 int r;
642
643 r = -E2BIG;
644 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
645 goto out;
646 r = -EFAULT;
647 if (copy_from_user(&vcpu->cpuid_entries, entries,
648 cpuid->nent * sizeof(struct kvm_cpuid_entry)))
649 goto out;
650 vcpu->cpuid_nent = cpuid->nent;
651 cpuid_fix_nx_cap(vcpu);
652 return 0;
653
654out:
655 return r;
656}
657
658static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
659 struct kvm_lapic_state *s)
660{
661 vcpu_load(vcpu);
662 memcpy(s->regs, vcpu->apic->regs, sizeof *s);
663 vcpu_put(vcpu);
664
665 return 0;
666}
667
668static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
669 struct kvm_lapic_state *s)
670{
671 vcpu_load(vcpu);
672 memcpy(vcpu->apic->regs, s->regs, sizeof *s);
673 kvm_apic_post_state_restore(vcpu);
674 vcpu_put(vcpu);
675
676 return 0;
677}
678
679long kvm_arch_vcpu_ioctl(struct file *filp,
680 unsigned int ioctl, unsigned long arg)
681{
682 struct kvm_vcpu *vcpu = filp->private_data;
683 void __user *argp = (void __user *)arg;
684 int r;
685
686 switch (ioctl) {
687 case KVM_GET_LAPIC: {
688 struct kvm_lapic_state lapic;
689
690 memset(&lapic, 0, sizeof lapic);
691 r = kvm_vcpu_ioctl_get_lapic(vcpu, &lapic);
692 if (r)
693 goto out;
694 r = -EFAULT;
695 if (copy_to_user(argp, &lapic, sizeof lapic))
696 goto out;
697 r = 0;
698 break;
699 }
700 case KVM_SET_LAPIC: {
701 struct kvm_lapic_state lapic;
702
703 r = -EFAULT;
704 if (copy_from_user(&lapic, argp, sizeof lapic))
705 goto out;
706 r = kvm_vcpu_ioctl_set_lapic(vcpu, &lapic);;
707 if (r)
708 goto out;
709 r = 0;
710 break;
711 }
712 case KVM_SET_CPUID: {
713 struct kvm_cpuid __user *cpuid_arg = argp;
714 struct kvm_cpuid cpuid;
715
716 r = -EFAULT;
717 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
718 goto out;
719 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
720 if (r)
721 goto out;
722 break;
723 }
724 case KVM_GET_MSRS:
725 r = msr_io(vcpu, argp, kvm_get_msr, 1);
726 break;
727 case KVM_SET_MSRS:
728 r = msr_io(vcpu, argp, do_set_msr, 0);
729 break;
730 default:
731 r = -EINVAL;
732 }
733out:
734 return r;
735}
736
1fe779f8
CO
737static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
738{
739 int ret;
740
741 if (addr > (unsigned int)(-3 * PAGE_SIZE))
742 return -1;
743 ret = kvm_x86_ops->set_tss_addr(kvm, addr);
744 return ret;
745}
746
747static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
748 u32 kvm_nr_mmu_pages)
749{
750 if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
751 return -EINVAL;
752
753 mutex_lock(&kvm->lock);
754
755 kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
756 kvm->n_requested_mmu_pages = kvm_nr_mmu_pages;
757
758 mutex_unlock(&kvm->lock);
759 return 0;
760}
761
762static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
763{
764 return kvm->n_alloc_mmu_pages;
765}
766
767/*
768 * Set a new alias region. Aliases map a portion of physical memory into
769 * another portion. This is useful for memory windows, for example the PC
770 * VGA region.
771 */
772static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
773 struct kvm_memory_alias *alias)
774{
775 int r, n;
776 struct kvm_mem_alias *p;
777
778 r = -EINVAL;
779 /* General sanity checks */
780 if (alias->memory_size & (PAGE_SIZE - 1))
781 goto out;
782 if (alias->guest_phys_addr & (PAGE_SIZE - 1))
783 goto out;
784 if (alias->slot >= KVM_ALIAS_SLOTS)
785 goto out;
786 if (alias->guest_phys_addr + alias->memory_size
787 < alias->guest_phys_addr)
788 goto out;
789 if (alias->target_phys_addr + alias->memory_size
790 < alias->target_phys_addr)
791 goto out;
792
793 mutex_lock(&kvm->lock);
794
795 p = &kvm->aliases[alias->slot];
796 p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
797 p->npages = alias->memory_size >> PAGE_SHIFT;
798 p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;
799
800 for (n = KVM_ALIAS_SLOTS; n > 0; --n)
801 if (kvm->aliases[n - 1].npages)
802 break;
803 kvm->naliases = n;
804
805 kvm_mmu_zap_all(kvm);
806
807 mutex_unlock(&kvm->lock);
808
809 return 0;
810
811out:
812 return r;
813}
814
815static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
816{
817 int r;
818
819 r = 0;
820 switch (chip->chip_id) {
821 case KVM_IRQCHIP_PIC_MASTER:
822 memcpy(&chip->chip.pic,
823 &pic_irqchip(kvm)->pics[0],
824 sizeof(struct kvm_pic_state));
825 break;
826 case KVM_IRQCHIP_PIC_SLAVE:
827 memcpy(&chip->chip.pic,
828 &pic_irqchip(kvm)->pics[1],
829 sizeof(struct kvm_pic_state));
830 break;
831 case KVM_IRQCHIP_IOAPIC:
832 memcpy(&chip->chip.ioapic,
833 ioapic_irqchip(kvm),
834 sizeof(struct kvm_ioapic_state));
835 break;
836 default:
837 r = -EINVAL;
838 break;
839 }
840 return r;
841}
842
843static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
844{
845 int r;
846
847 r = 0;
848 switch (chip->chip_id) {
849 case KVM_IRQCHIP_PIC_MASTER:
850 memcpy(&pic_irqchip(kvm)->pics[0],
851 &chip->chip.pic,
852 sizeof(struct kvm_pic_state));
853 break;
854 case KVM_IRQCHIP_PIC_SLAVE:
855 memcpy(&pic_irqchip(kvm)->pics[1],
856 &chip->chip.pic,
857 sizeof(struct kvm_pic_state));
858 break;
859 case KVM_IRQCHIP_IOAPIC:
860 memcpy(ioapic_irqchip(kvm),
861 &chip->chip.ioapic,
862 sizeof(struct kvm_ioapic_state));
863 break;
864 default:
865 r = -EINVAL;
866 break;
867 }
868 kvm_pic_update_irq(pic_irqchip(kvm));
869 return r;
870}
871
872long kvm_arch_vm_ioctl(struct file *filp,
873 unsigned int ioctl, unsigned long arg)
874{
875 struct kvm *kvm = filp->private_data;
876 void __user *argp = (void __user *)arg;
877 int r = -EINVAL;
878
879 switch (ioctl) {
880 case KVM_SET_TSS_ADDR:
881 r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
882 if (r < 0)
883 goto out;
884 break;
885 case KVM_SET_MEMORY_REGION: {
886 struct kvm_memory_region kvm_mem;
887 struct kvm_userspace_memory_region kvm_userspace_mem;
888
889 r = -EFAULT;
890 if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
891 goto out;
892 kvm_userspace_mem.slot = kvm_mem.slot;
893 kvm_userspace_mem.flags = kvm_mem.flags;
894 kvm_userspace_mem.guest_phys_addr = kvm_mem.guest_phys_addr;
895 kvm_userspace_mem.memory_size = kvm_mem.memory_size;
896 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 0);
897 if (r)
898 goto out;
899 break;
900 }
901 case KVM_SET_NR_MMU_PAGES:
902 r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
903 if (r)
904 goto out;
905 break;
906 case KVM_GET_NR_MMU_PAGES:
907 r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
908 break;
909 case KVM_SET_MEMORY_ALIAS: {
910 struct kvm_memory_alias alias;
911
912 r = -EFAULT;
913 if (copy_from_user(&alias, argp, sizeof alias))
914 goto out;
915 r = kvm_vm_ioctl_set_memory_alias(kvm, &alias);
916 if (r)
917 goto out;
918 break;
919 }
920 case KVM_CREATE_IRQCHIP:
921 r = -ENOMEM;
922 kvm->vpic = kvm_create_pic(kvm);
923 if (kvm->vpic) {
924 r = kvm_ioapic_init(kvm);
925 if (r) {
926 kfree(kvm->vpic);
927 kvm->vpic = NULL;
928 goto out;
929 }
930 } else
931 goto out;
932 break;
933 case KVM_IRQ_LINE: {
934 struct kvm_irq_level irq_event;
935
936 r = -EFAULT;
937 if (copy_from_user(&irq_event, argp, sizeof irq_event))
938 goto out;
939 if (irqchip_in_kernel(kvm)) {
940 mutex_lock(&kvm->lock);
941 if (irq_event.irq < 16)
942 kvm_pic_set_irq(pic_irqchip(kvm),
943 irq_event.irq,
944 irq_event.level);
945 kvm_ioapic_set_irq(kvm->vioapic,
946 irq_event.irq,
947 irq_event.level);
948 mutex_unlock(&kvm->lock);
949 r = 0;
950 }
951 break;
952 }
953 case KVM_GET_IRQCHIP: {
954 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
955 struct kvm_irqchip chip;
956
957 r = -EFAULT;
958 if (copy_from_user(&chip, argp, sizeof chip))
959 goto out;
960 r = -ENXIO;
961 if (!irqchip_in_kernel(kvm))
962 goto out;
963 r = kvm_vm_ioctl_get_irqchip(kvm, &chip);
964 if (r)
965 goto out;
966 r = -EFAULT;
967 if (copy_to_user(argp, &chip, sizeof chip))
968 goto out;
969 r = 0;
970 break;
971 }
972 case KVM_SET_IRQCHIP: {
973 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
974 struct kvm_irqchip chip;
975
976 r = -EFAULT;
977 if (copy_from_user(&chip, argp, sizeof chip))
978 goto out;
979 r = -ENXIO;
980 if (!irqchip_in_kernel(kvm))
981 goto out;
982 r = kvm_vm_ioctl_set_irqchip(kvm, &chip);
983 if (r)
984 goto out;
985 r = 0;
986 break;
987 }
988 default:
989 ;
990 }
991out:
992 return r;
993}
994
043405e1
CO
995static __init void kvm_init_msr_list(void)
996{
997 u32 dummy[2];
998 unsigned i, j;
999
1000 for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) {
1001 if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
1002 continue;
1003 if (j < i)
1004 msrs_to_save[j] = msrs_to_save[i];
1005 j++;
1006 }
1007 num_msrs_to_save = j;
1008}
1009
bbd9b64e
CO
1010/*
1011 * Only apic need an MMIO device hook, so shortcut now..
1012 */
1013static struct kvm_io_device *vcpu_find_pervcpu_dev(struct kvm_vcpu *vcpu,
1014 gpa_t addr)
1015{
1016 struct kvm_io_device *dev;
1017
1018 if (vcpu->apic) {
1019 dev = &vcpu->apic->dev;
1020 if (dev->in_range(dev, addr))
1021 return dev;
1022 }
1023 return NULL;
1024}
1025
1026
1027static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
1028 gpa_t addr)
1029{
1030 struct kvm_io_device *dev;
1031
1032 dev = vcpu_find_pervcpu_dev(vcpu, addr);
1033 if (dev == NULL)
1034 dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr);
1035 return dev;
1036}
1037
1038int emulator_read_std(unsigned long addr,
1039 void *val,
1040 unsigned int bytes,
1041 struct kvm_vcpu *vcpu)
1042{
1043 void *data = val;
1044
1045 while (bytes) {
1046 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
1047 unsigned offset = addr & (PAGE_SIZE-1);
1048 unsigned tocopy = min(bytes, (unsigned)PAGE_SIZE - offset);
1049 int ret;
1050
1051 if (gpa == UNMAPPED_GVA)
1052 return X86EMUL_PROPAGATE_FAULT;
1053 ret = kvm_read_guest(vcpu->kvm, gpa, data, tocopy);
1054 if (ret < 0)
1055 return X86EMUL_UNHANDLEABLE;
1056
1057 bytes -= tocopy;
1058 data += tocopy;
1059 addr += tocopy;
1060 }
1061
1062 return X86EMUL_CONTINUE;
1063}
1064EXPORT_SYMBOL_GPL(emulator_read_std);
1065
1066static int emulator_write_std(unsigned long addr,
1067 const void *val,
1068 unsigned int bytes,
1069 struct kvm_vcpu *vcpu)
1070{
1071 pr_unimpl(vcpu, "emulator_write_std: addr %lx n %d\n", addr, bytes);
1072 return X86EMUL_UNHANDLEABLE;
1073}
1074
1075static int emulator_read_emulated(unsigned long addr,
1076 void *val,
1077 unsigned int bytes,
1078 struct kvm_vcpu *vcpu)
1079{
1080 struct kvm_io_device *mmio_dev;
1081 gpa_t gpa;
1082
1083 if (vcpu->mmio_read_completed) {
1084 memcpy(val, vcpu->mmio_data, bytes);
1085 vcpu->mmio_read_completed = 0;
1086 return X86EMUL_CONTINUE;
1087 }
1088
1089 gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
1090
1091 /* For APIC access vmexit */
1092 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
1093 goto mmio;
1094
1095 if (emulator_read_std(addr, val, bytes, vcpu)
1096 == X86EMUL_CONTINUE)
1097 return X86EMUL_CONTINUE;
1098 if (gpa == UNMAPPED_GVA)
1099 return X86EMUL_PROPAGATE_FAULT;
1100
1101mmio:
1102 /*
1103 * Is this MMIO handled locally?
1104 */
1105 mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
1106 if (mmio_dev) {
1107 kvm_iodevice_read(mmio_dev, gpa, bytes, val);
1108 return X86EMUL_CONTINUE;
1109 }
1110
1111 vcpu->mmio_needed = 1;
1112 vcpu->mmio_phys_addr = gpa;
1113 vcpu->mmio_size = bytes;
1114 vcpu->mmio_is_write = 0;
1115
1116 return X86EMUL_UNHANDLEABLE;
1117}
1118
1119static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
1120 const void *val, int bytes)
1121{
1122 int ret;
1123
1124 ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
1125 if (ret < 0)
1126 return 0;
1127 kvm_mmu_pte_write(vcpu, gpa, val, bytes);
1128 return 1;
1129}
1130
1131static int emulator_write_emulated_onepage(unsigned long addr,
1132 const void *val,
1133 unsigned int bytes,
1134 struct kvm_vcpu *vcpu)
1135{
1136 struct kvm_io_device *mmio_dev;
1137 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
1138
1139 if (gpa == UNMAPPED_GVA) {
1140 kvm_x86_ops->inject_page_fault(vcpu, addr, 2);
1141 return X86EMUL_PROPAGATE_FAULT;
1142 }
1143
1144 /* For APIC access vmexit */
1145 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
1146 goto mmio;
1147
1148 if (emulator_write_phys(vcpu, gpa, val, bytes))
1149 return X86EMUL_CONTINUE;
1150
1151mmio:
1152 /*
1153 * Is this MMIO handled locally?
1154 */
1155 mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
1156 if (mmio_dev) {
1157 kvm_iodevice_write(mmio_dev, gpa, bytes, val);
1158 return X86EMUL_CONTINUE;
1159 }
1160
1161 vcpu->mmio_needed = 1;
1162 vcpu->mmio_phys_addr = gpa;
1163 vcpu->mmio_size = bytes;
1164 vcpu->mmio_is_write = 1;
1165 memcpy(vcpu->mmio_data, val, bytes);
1166
1167 return X86EMUL_CONTINUE;
1168}
1169
1170int emulator_write_emulated(unsigned long addr,
1171 const void *val,
1172 unsigned int bytes,
1173 struct kvm_vcpu *vcpu)
1174{
1175 /* Crossing a page boundary? */
1176 if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
1177 int rc, now;
1178
1179 now = -addr & ~PAGE_MASK;
1180 rc = emulator_write_emulated_onepage(addr, val, now, vcpu);
1181 if (rc != X86EMUL_CONTINUE)
1182 return rc;
1183 addr += now;
1184 val += now;
1185 bytes -= now;
1186 }
1187 return emulator_write_emulated_onepage(addr, val, bytes, vcpu);
1188}
1189EXPORT_SYMBOL_GPL(emulator_write_emulated);
1190
1191static int emulator_cmpxchg_emulated(unsigned long addr,
1192 const void *old,
1193 const void *new,
1194 unsigned int bytes,
1195 struct kvm_vcpu *vcpu)
1196{
1197 static int reported;
1198
1199 if (!reported) {
1200 reported = 1;
1201 printk(KERN_WARNING "kvm: emulating exchange as write\n");
1202 }
1203 return emulator_write_emulated(addr, new, bytes, vcpu);
1204}
1205
1206static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
1207{
1208 return kvm_x86_ops->get_segment_base(vcpu, seg);
1209}
1210
1211int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
1212{
1213 return X86EMUL_CONTINUE;
1214}
1215
1216int emulate_clts(struct kvm_vcpu *vcpu)
1217{
1218 kvm_x86_ops->set_cr0(vcpu, vcpu->cr0 & ~X86_CR0_TS);
1219 return X86EMUL_CONTINUE;
1220}
1221
1222int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
1223{
1224 struct kvm_vcpu *vcpu = ctxt->vcpu;
1225
1226 switch (dr) {
1227 case 0 ... 3:
1228 *dest = kvm_x86_ops->get_dr(vcpu, dr);
1229 return X86EMUL_CONTINUE;
1230 default:
1231 pr_unimpl(vcpu, "%s: unexpected dr %u\n", __FUNCTION__, dr);
1232 return X86EMUL_UNHANDLEABLE;
1233 }
1234}
1235
1236int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
1237{
1238 unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
1239 int exception;
1240
1241 kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception);
1242 if (exception) {
1243 /* FIXME: better handling */
1244 return X86EMUL_UNHANDLEABLE;
1245 }
1246 return X86EMUL_CONTINUE;
1247}
1248
1249void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
1250{
1251 static int reported;
1252 u8 opcodes[4];
1253 unsigned long rip = vcpu->rip;
1254 unsigned long rip_linear;
1255
1256 rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS);
1257
1258 if (reported)
1259 return;
1260
1261 emulator_read_std(rip_linear, (void *)opcodes, 4, vcpu);
1262
1263 printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n",
1264 context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
1265 reported = 1;
1266}
1267EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
1268
1269struct x86_emulate_ops emulate_ops = {
1270 .read_std = emulator_read_std,
1271 .write_std = emulator_write_std,
1272 .read_emulated = emulator_read_emulated,
1273 .write_emulated = emulator_write_emulated,
1274 .cmpxchg_emulated = emulator_cmpxchg_emulated,
1275};
1276
1277int emulate_instruction(struct kvm_vcpu *vcpu,
1278 struct kvm_run *run,
1279 unsigned long cr2,
1280 u16 error_code,
1281 int no_decode)
1282{
1283 int r;
1284
1285 vcpu->mmio_fault_cr2 = cr2;
1286 kvm_x86_ops->cache_regs(vcpu);
1287
1288 vcpu->mmio_is_write = 0;
1289 vcpu->pio.string = 0;
1290
1291 if (!no_decode) {
1292 int cs_db, cs_l;
1293 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
1294
1295 vcpu->emulate_ctxt.vcpu = vcpu;
1296 vcpu->emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
1297 vcpu->emulate_ctxt.cr2 = cr2;
1298 vcpu->emulate_ctxt.mode =
1299 (vcpu->emulate_ctxt.eflags & X86_EFLAGS_VM)
1300 ? X86EMUL_MODE_REAL : cs_l
1301 ? X86EMUL_MODE_PROT64 : cs_db
1302 ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
1303
1304 if (vcpu->emulate_ctxt.mode == X86EMUL_MODE_PROT64) {
1305 vcpu->emulate_ctxt.cs_base = 0;
1306 vcpu->emulate_ctxt.ds_base = 0;
1307 vcpu->emulate_ctxt.es_base = 0;
1308 vcpu->emulate_ctxt.ss_base = 0;
1309 } else {
1310 vcpu->emulate_ctxt.cs_base =
1311 get_segment_base(vcpu, VCPU_SREG_CS);
1312 vcpu->emulate_ctxt.ds_base =
1313 get_segment_base(vcpu, VCPU_SREG_DS);
1314 vcpu->emulate_ctxt.es_base =
1315 get_segment_base(vcpu, VCPU_SREG_ES);
1316 vcpu->emulate_ctxt.ss_base =
1317 get_segment_base(vcpu, VCPU_SREG_SS);
1318 }
1319
1320 vcpu->emulate_ctxt.gs_base =
1321 get_segment_base(vcpu, VCPU_SREG_GS);
1322 vcpu->emulate_ctxt.fs_base =
1323 get_segment_base(vcpu, VCPU_SREG_FS);
1324
1325 r = x86_decode_insn(&vcpu->emulate_ctxt, &emulate_ops);
1326 if (r) {
1327 if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
1328 return EMULATE_DONE;
1329 return EMULATE_FAIL;
1330 }
1331 }
1332
1333 r = x86_emulate_insn(&vcpu->emulate_ctxt, &emulate_ops);
1334
1335 if (vcpu->pio.string)
1336 return EMULATE_DO_MMIO;
1337
1338 if ((r || vcpu->mmio_is_write) && run) {
1339 run->exit_reason = KVM_EXIT_MMIO;
1340 run->mmio.phys_addr = vcpu->mmio_phys_addr;
1341 memcpy(run->mmio.data, vcpu->mmio_data, 8);
1342 run->mmio.len = vcpu->mmio_size;
1343 run->mmio.is_write = vcpu->mmio_is_write;
1344 }
1345
1346 if (r) {
1347 if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
1348 return EMULATE_DONE;
1349 if (!vcpu->mmio_needed) {
1350 kvm_report_emulation_failure(vcpu, "mmio");
1351 return EMULATE_FAIL;
1352 }
1353 return EMULATE_DO_MMIO;
1354 }
1355
1356 kvm_x86_ops->decache_regs(vcpu);
1357 kvm_x86_ops->set_rflags(vcpu, vcpu->emulate_ctxt.eflags);
1358
1359 if (vcpu->mmio_is_write) {
1360 vcpu->mmio_needed = 0;
1361 return EMULATE_DO_MMIO;
1362 }
1363
1364 return EMULATE_DONE;
1365}
1366EXPORT_SYMBOL_GPL(emulate_instruction);
1367
de7d789a
CO
1368static void free_pio_guest_pages(struct kvm_vcpu *vcpu)
1369{
1370 int i;
1371
1372 for (i = 0; i < ARRAY_SIZE(vcpu->pio.guest_pages); ++i)
1373 if (vcpu->pio.guest_pages[i]) {
1374 kvm_release_page(vcpu->pio.guest_pages[i]);
1375 vcpu->pio.guest_pages[i] = NULL;
1376 }
1377}
1378
1379static int pio_copy_data(struct kvm_vcpu *vcpu)
1380{
1381 void *p = vcpu->pio_data;
1382 void *q;
1383 unsigned bytes;
1384 int nr_pages = vcpu->pio.guest_pages[1] ? 2 : 1;
1385
1386 q = vmap(vcpu->pio.guest_pages, nr_pages, VM_READ|VM_WRITE,
1387 PAGE_KERNEL);
1388 if (!q) {
1389 free_pio_guest_pages(vcpu);
1390 return -ENOMEM;
1391 }
1392 q += vcpu->pio.guest_page_offset;
1393 bytes = vcpu->pio.size * vcpu->pio.cur_count;
1394 if (vcpu->pio.in)
1395 memcpy(q, p, bytes);
1396 else
1397 memcpy(p, q, bytes);
1398 q -= vcpu->pio.guest_page_offset;
1399 vunmap(q);
1400 free_pio_guest_pages(vcpu);
1401 return 0;
1402}
1403
1404int complete_pio(struct kvm_vcpu *vcpu)
1405{
1406 struct kvm_pio_request *io = &vcpu->pio;
1407 long delta;
1408 int r;
1409
1410 kvm_x86_ops->cache_regs(vcpu);
1411
1412 if (!io->string) {
1413 if (io->in)
1414 memcpy(&vcpu->regs[VCPU_REGS_RAX], vcpu->pio_data,
1415 io->size);
1416 } else {
1417 if (io->in) {
1418 r = pio_copy_data(vcpu);
1419 if (r) {
1420 kvm_x86_ops->cache_regs(vcpu);
1421 return r;
1422 }
1423 }
1424
1425 delta = 1;
1426 if (io->rep) {
1427 delta *= io->cur_count;
1428 /*
1429 * The size of the register should really depend on
1430 * current address size.
1431 */
1432 vcpu->regs[VCPU_REGS_RCX] -= delta;
1433 }
1434 if (io->down)
1435 delta = -delta;
1436 delta *= io->size;
1437 if (io->in)
1438 vcpu->regs[VCPU_REGS_RDI] += delta;
1439 else
1440 vcpu->regs[VCPU_REGS_RSI] += delta;
1441 }
1442
1443 kvm_x86_ops->decache_regs(vcpu);
1444
1445 io->count -= io->cur_count;
1446 io->cur_count = 0;
1447
1448 return 0;
1449}
1450
1451static void kernel_pio(struct kvm_io_device *pio_dev,
1452 struct kvm_vcpu *vcpu,
1453 void *pd)
1454{
1455 /* TODO: String I/O for in kernel device */
1456
1457 mutex_lock(&vcpu->kvm->lock);
1458 if (vcpu->pio.in)
1459 kvm_iodevice_read(pio_dev, vcpu->pio.port,
1460 vcpu->pio.size,
1461 pd);
1462 else
1463 kvm_iodevice_write(pio_dev, vcpu->pio.port,
1464 vcpu->pio.size,
1465 pd);
1466 mutex_unlock(&vcpu->kvm->lock);
1467}
1468
1469static void pio_string_write(struct kvm_io_device *pio_dev,
1470 struct kvm_vcpu *vcpu)
1471{
1472 struct kvm_pio_request *io = &vcpu->pio;
1473 void *pd = vcpu->pio_data;
1474 int i;
1475
1476 mutex_lock(&vcpu->kvm->lock);
1477 for (i = 0; i < io->cur_count; i++) {
1478 kvm_iodevice_write(pio_dev, io->port,
1479 io->size,
1480 pd);
1481 pd += io->size;
1482 }
1483 mutex_unlock(&vcpu->kvm->lock);
1484}
1485
1486static struct kvm_io_device *vcpu_find_pio_dev(struct kvm_vcpu *vcpu,
1487 gpa_t addr)
1488{
1489 return kvm_io_bus_find_dev(&vcpu->kvm->pio_bus, addr);
1490}
1491
1492int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
1493 int size, unsigned port)
1494{
1495 struct kvm_io_device *pio_dev;
1496
1497 vcpu->run->exit_reason = KVM_EXIT_IO;
1498 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
1499 vcpu->run->io.size = vcpu->pio.size = size;
1500 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
1501 vcpu->run->io.count = vcpu->pio.count = vcpu->pio.cur_count = 1;
1502 vcpu->run->io.port = vcpu->pio.port = port;
1503 vcpu->pio.in = in;
1504 vcpu->pio.string = 0;
1505 vcpu->pio.down = 0;
1506 vcpu->pio.guest_page_offset = 0;
1507 vcpu->pio.rep = 0;
1508
1509 kvm_x86_ops->cache_regs(vcpu);
1510 memcpy(vcpu->pio_data, &vcpu->regs[VCPU_REGS_RAX], 4);
1511 kvm_x86_ops->decache_regs(vcpu);
1512
1513 kvm_x86_ops->skip_emulated_instruction(vcpu);
1514
1515 pio_dev = vcpu_find_pio_dev(vcpu, port);
1516 if (pio_dev) {
1517 kernel_pio(pio_dev, vcpu, vcpu->pio_data);
1518 complete_pio(vcpu);
1519 return 1;
1520 }
1521 return 0;
1522}
1523EXPORT_SYMBOL_GPL(kvm_emulate_pio);
1524
1525int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
1526 int size, unsigned long count, int down,
1527 gva_t address, int rep, unsigned port)
1528{
1529 unsigned now, in_page;
1530 int i, ret = 0;
1531 int nr_pages = 1;
1532 struct page *page;
1533 struct kvm_io_device *pio_dev;
1534
1535 vcpu->run->exit_reason = KVM_EXIT_IO;
1536 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
1537 vcpu->run->io.size = vcpu->pio.size = size;
1538 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
1539 vcpu->run->io.count = vcpu->pio.count = vcpu->pio.cur_count = count;
1540 vcpu->run->io.port = vcpu->pio.port = port;
1541 vcpu->pio.in = in;
1542 vcpu->pio.string = 1;
1543 vcpu->pio.down = down;
1544 vcpu->pio.guest_page_offset = offset_in_page(address);
1545 vcpu->pio.rep = rep;
1546
1547 if (!count) {
1548 kvm_x86_ops->skip_emulated_instruction(vcpu);
1549 return 1;
1550 }
1551
1552 if (!down)
1553 in_page = PAGE_SIZE - offset_in_page(address);
1554 else
1555 in_page = offset_in_page(address) + size;
1556 now = min(count, (unsigned long)in_page / size);
1557 if (!now) {
1558 /*
1559 * String I/O straddles page boundary. Pin two guest pages
1560 * so that we satisfy atomicity constraints. Do just one
1561 * transaction to avoid complexity.
1562 */
1563 nr_pages = 2;
1564 now = 1;
1565 }
1566 if (down) {
1567 /*
1568 * String I/O in reverse. Yuck. Kill the guest, fix later.
1569 */
1570 pr_unimpl(vcpu, "guest string pio down\n");
1571 inject_gp(vcpu);
1572 return 1;
1573 }
1574 vcpu->run->io.count = now;
1575 vcpu->pio.cur_count = now;
1576
1577 if (vcpu->pio.cur_count == vcpu->pio.count)
1578 kvm_x86_ops->skip_emulated_instruction(vcpu);
1579
1580 for (i = 0; i < nr_pages; ++i) {
1581 mutex_lock(&vcpu->kvm->lock);
1582 page = gva_to_page(vcpu, address + i * PAGE_SIZE);
1583 vcpu->pio.guest_pages[i] = page;
1584 mutex_unlock(&vcpu->kvm->lock);
1585 if (!page) {
1586 inject_gp(vcpu);
1587 free_pio_guest_pages(vcpu);
1588 return 1;
1589 }
1590 }
1591
1592 pio_dev = vcpu_find_pio_dev(vcpu, port);
1593 if (!vcpu->pio.in) {
1594 /* string PIO write */
1595 ret = pio_copy_data(vcpu);
1596 if (ret >= 0 && pio_dev) {
1597 pio_string_write(pio_dev, vcpu);
1598 complete_pio(vcpu);
1599 if (vcpu->pio.count == 0)
1600 ret = 1;
1601 }
1602 } else if (pio_dev)
1603 pr_unimpl(vcpu, "no string pio read support yet, "
1604 "port %x size %d count %ld\n",
1605 port, size, count);
1606
1607 return ret;
1608}
1609EXPORT_SYMBOL_GPL(kvm_emulate_pio_string);
1610
043405e1
CO
1611__init void kvm_arch_init(void)
1612{
1613 kvm_init_msr_list();
1614}
8776e519
HB
1615
1616int kvm_emulate_halt(struct kvm_vcpu *vcpu)
1617{
1618 ++vcpu->stat.halt_exits;
1619 if (irqchip_in_kernel(vcpu->kvm)) {
1620 vcpu->mp_state = VCPU_MP_STATE_HALTED;
1621 kvm_vcpu_block(vcpu);
1622 if (vcpu->mp_state != VCPU_MP_STATE_RUNNABLE)
1623 return -EINTR;
1624 return 1;
1625 } else {
1626 vcpu->run->exit_reason = KVM_EXIT_HLT;
1627 return 0;
1628 }
1629}
1630EXPORT_SYMBOL_GPL(kvm_emulate_halt);
1631
1632int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
1633{
1634 unsigned long nr, a0, a1, a2, a3, ret;
1635
1636 kvm_x86_ops->cache_regs(vcpu);
1637
1638 nr = vcpu->regs[VCPU_REGS_RAX];
1639 a0 = vcpu->regs[VCPU_REGS_RBX];
1640 a1 = vcpu->regs[VCPU_REGS_RCX];
1641 a2 = vcpu->regs[VCPU_REGS_RDX];
1642 a3 = vcpu->regs[VCPU_REGS_RSI];
1643
1644 if (!is_long_mode(vcpu)) {
1645 nr &= 0xFFFFFFFF;
1646 a0 &= 0xFFFFFFFF;
1647 a1 &= 0xFFFFFFFF;
1648 a2 &= 0xFFFFFFFF;
1649 a3 &= 0xFFFFFFFF;
1650 }
1651
1652 switch (nr) {
1653 default:
1654 ret = -KVM_ENOSYS;
1655 break;
1656 }
1657 vcpu->regs[VCPU_REGS_RAX] = ret;
1658 kvm_x86_ops->decache_regs(vcpu);
1659 return 0;
1660}
1661EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
1662
1663int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
1664{
1665 char instruction[3];
1666 int ret = 0;
1667
1668 mutex_lock(&vcpu->kvm->lock);
1669
1670 /*
1671 * Blow out the MMU to ensure that no other VCPU has an active mapping
1672 * to ensure that the updated hypercall appears atomically across all
1673 * VCPUs.
1674 */
1675 kvm_mmu_zap_all(vcpu->kvm);
1676
1677 kvm_x86_ops->cache_regs(vcpu);
1678 kvm_x86_ops->patch_hypercall(vcpu, instruction);
1679 if (emulator_write_emulated(vcpu->rip, instruction, 3, vcpu)
1680 != X86EMUL_CONTINUE)
1681 ret = -EFAULT;
1682
1683 mutex_unlock(&vcpu->kvm->lock);
1684
1685 return ret;
1686}
1687
1688static u64 mk_cr_64(u64 curr_cr, u32 new_val)
1689{
1690 return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
1691}
1692
1693void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
1694{
1695 struct descriptor_table dt = { limit, base };
1696
1697 kvm_x86_ops->set_gdt(vcpu, &dt);
1698}
1699
1700void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
1701{
1702 struct descriptor_table dt = { limit, base };
1703
1704 kvm_x86_ops->set_idt(vcpu, &dt);
1705}
1706
1707void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
1708 unsigned long *rflags)
1709{
1710 lmsw(vcpu, msw);
1711 *rflags = kvm_x86_ops->get_rflags(vcpu);
1712}
1713
1714unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
1715{
1716 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
1717 switch (cr) {
1718 case 0:
1719 return vcpu->cr0;
1720 case 2:
1721 return vcpu->cr2;
1722 case 3:
1723 return vcpu->cr3;
1724 case 4:
1725 return vcpu->cr4;
1726 default:
1727 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
1728 return 0;
1729 }
1730}
1731
1732void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
1733 unsigned long *rflags)
1734{
1735 switch (cr) {
1736 case 0:
1737 set_cr0(vcpu, mk_cr_64(vcpu->cr0, val));
1738 *rflags = kvm_x86_ops->get_rflags(vcpu);
1739 break;
1740 case 2:
1741 vcpu->cr2 = val;
1742 break;
1743 case 3:
1744 set_cr3(vcpu, val);
1745 break;
1746 case 4:
1747 set_cr4(vcpu, mk_cr_64(vcpu->cr4, val));
1748 break;
1749 default:
1750 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
1751 }
1752}
1753
1754void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
1755{
1756 int i;
1757 u32 function;
1758 struct kvm_cpuid_entry *e, *best;
1759
1760 kvm_x86_ops->cache_regs(vcpu);
1761 function = vcpu->regs[VCPU_REGS_RAX];
1762 vcpu->regs[VCPU_REGS_RAX] = 0;
1763 vcpu->regs[VCPU_REGS_RBX] = 0;
1764 vcpu->regs[VCPU_REGS_RCX] = 0;
1765 vcpu->regs[VCPU_REGS_RDX] = 0;
1766 best = NULL;
1767 for (i = 0; i < vcpu->cpuid_nent; ++i) {
1768 e = &vcpu->cpuid_entries[i];
1769 if (e->function == function) {
1770 best = e;
1771 break;
1772 }
1773 /*
1774 * Both basic or both extended?
1775 */
1776 if (((e->function ^ function) & 0x80000000) == 0)
1777 if (!best || e->function > best->function)
1778 best = e;
1779 }
1780 if (best) {
1781 vcpu->regs[VCPU_REGS_RAX] = best->eax;
1782 vcpu->regs[VCPU_REGS_RBX] = best->ebx;
1783 vcpu->regs[VCPU_REGS_RCX] = best->ecx;
1784 vcpu->regs[VCPU_REGS_RDX] = best->edx;
1785 }
1786 kvm_x86_ops->decache_regs(vcpu);
1787 kvm_x86_ops->skip_emulated_instruction(vcpu);
1788}
1789EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
d0752060 1790
b6c7a5dc
HB
1791/*
1792 * Check if userspace requested an interrupt window, and that the
1793 * interrupt window is open.
1794 *
1795 * No need to exit to userspace if we already have an interrupt queued.
1796 */
1797static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
1798 struct kvm_run *kvm_run)
1799{
1800 return (!vcpu->irq_summary &&
1801 kvm_run->request_interrupt_window &&
1802 vcpu->interrupt_window_open &&
1803 (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF));
1804}
1805
1806static void post_kvm_run_save(struct kvm_vcpu *vcpu,
1807 struct kvm_run *kvm_run)
1808{
1809 kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
1810 kvm_run->cr8 = get_cr8(vcpu);
1811 kvm_run->apic_base = kvm_get_apic_base(vcpu);
1812 if (irqchip_in_kernel(vcpu->kvm))
1813 kvm_run->ready_for_interrupt_injection = 1;
1814 else
1815 kvm_run->ready_for_interrupt_injection =
1816 (vcpu->interrupt_window_open &&
1817 vcpu->irq_summary == 0);
1818}
1819
1820static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1821{
1822 int r;
1823
1824 if (unlikely(vcpu->mp_state == VCPU_MP_STATE_SIPI_RECEIVED)) {
1825 pr_debug("vcpu %d received sipi with vector # %x\n",
1826 vcpu->vcpu_id, vcpu->sipi_vector);
1827 kvm_lapic_reset(vcpu);
1828 r = kvm_x86_ops->vcpu_reset(vcpu);
1829 if (r)
1830 return r;
1831 vcpu->mp_state = VCPU_MP_STATE_RUNNABLE;
1832 }
1833
1834preempted:
1835 if (vcpu->guest_debug.enabled)
1836 kvm_x86_ops->guest_debug_pre(vcpu);
1837
1838again:
1839 r = kvm_mmu_reload(vcpu);
1840 if (unlikely(r))
1841 goto out;
1842
1843 kvm_inject_pending_timer_irqs(vcpu);
1844
1845 preempt_disable();
1846
1847 kvm_x86_ops->prepare_guest_switch(vcpu);
1848 kvm_load_guest_fpu(vcpu);
1849
1850 local_irq_disable();
1851
1852 if (signal_pending(current)) {
1853 local_irq_enable();
1854 preempt_enable();
1855 r = -EINTR;
1856 kvm_run->exit_reason = KVM_EXIT_INTR;
1857 ++vcpu->stat.signal_exits;
1858 goto out;
1859 }
1860
1861 if (irqchip_in_kernel(vcpu->kvm))
1862 kvm_x86_ops->inject_pending_irq(vcpu);
1863 else if (!vcpu->mmio_read_completed)
1864 kvm_x86_ops->inject_pending_vectors(vcpu, kvm_run);
1865
1866 vcpu->guest_mode = 1;
1867 kvm_guest_enter();
1868
1869 if (vcpu->requests)
1870 if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
1871 kvm_x86_ops->tlb_flush(vcpu);
1872
1873 kvm_x86_ops->run(vcpu, kvm_run);
1874
1875 vcpu->guest_mode = 0;
1876 local_irq_enable();
1877
1878 ++vcpu->stat.exits;
1879
1880 /*
1881 * We must have an instruction between local_irq_enable() and
1882 * kvm_guest_exit(), so the timer interrupt isn't delayed by
1883 * the interrupt shadow. The stat.exits increment will do nicely.
1884 * But we need to prevent reordering, hence this barrier():
1885 */
1886 barrier();
1887
1888 kvm_guest_exit();
1889
1890 preempt_enable();
1891
1892 /*
1893 * Profile KVM exit RIPs:
1894 */
1895 if (unlikely(prof_on == KVM_PROFILING)) {
1896 kvm_x86_ops->cache_regs(vcpu);
1897 profile_hit(KVM_PROFILING, (void *)vcpu->rip);
1898 }
1899
1900 r = kvm_x86_ops->handle_exit(kvm_run, vcpu);
1901
1902 if (r > 0) {
1903 if (dm_request_for_irq_injection(vcpu, kvm_run)) {
1904 r = -EINTR;
1905 kvm_run->exit_reason = KVM_EXIT_INTR;
1906 ++vcpu->stat.request_irq_exits;
1907 goto out;
1908 }
1909 if (!need_resched()) {
1910 ++vcpu->stat.light_exits;
1911 goto again;
1912 }
1913 }
1914
1915out:
1916 if (r > 0) {
1917 kvm_resched(vcpu);
1918 goto preempted;
1919 }
1920
1921 post_kvm_run_save(vcpu, kvm_run);
1922
1923 return r;
1924}
1925
1926int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1927{
1928 int r;
1929 sigset_t sigsaved;
1930
1931 vcpu_load(vcpu);
1932
1933 if (unlikely(vcpu->mp_state == VCPU_MP_STATE_UNINITIALIZED)) {
1934 kvm_vcpu_block(vcpu);
1935 vcpu_put(vcpu);
1936 return -EAGAIN;
1937 }
1938
1939 if (vcpu->sigset_active)
1940 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1941
1942 /* re-sync apic's tpr */
1943 if (!irqchip_in_kernel(vcpu->kvm))
1944 set_cr8(vcpu, kvm_run->cr8);
1945
1946 if (vcpu->pio.cur_count) {
1947 r = complete_pio(vcpu);
1948 if (r)
1949 goto out;
1950 }
1951#if CONFIG_HAS_IOMEM
1952 if (vcpu->mmio_needed) {
1953 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
1954 vcpu->mmio_read_completed = 1;
1955 vcpu->mmio_needed = 0;
1956 r = emulate_instruction(vcpu, kvm_run,
1957 vcpu->mmio_fault_cr2, 0, 1);
1958 if (r == EMULATE_DO_MMIO) {
1959 /*
1960 * Read-modify-write. Back to userspace.
1961 */
1962 r = 0;
1963 goto out;
1964 }
1965 }
1966#endif
1967 if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL) {
1968 kvm_x86_ops->cache_regs(vcpu);
1969 vcpu->regs[VCPU_REGS_RAX] = kvm_run->hypercall.ret;
1970 kvm_x86_ops->decache_regs(vcpu);
1971 }
1972
1973 r = __vcpu_run(vcpu, kvm_run);
1974
1975out:
1976 if (vcpu->sigset_active)
1977 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1978
1979 vcpu_put(vcpu);
1980 return r;
1981}
1982
1983int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1984{
1985 vcpu_load(vcpu);
1986
1987 kvm_x86_ops->cache_regs(vcpu);
1988
1989 regs->rax = vcpu->regs[VCPU_REGS_RAX];
1990 regs->rbx = vcpu->regs[VCPU_REGS_RBX];
1991 regs->rcx = vcpu->regs[VCPU_REGS_RCX];
1992 regs->rdx = vcpu->regs[VCPU_REGS_RDX];
1993 regs->rsi = vcpu->regs[VCPU_REGS_RSI];
1994 regs->rdi = vcpu->regs[VCPU_REGS_RDI];
1995 regs->rsp = vcpu->regs[VCPU_REGS_RSP];
1996 regs->rbp = vcpu->regs[VCPU_REGS_RBP];
1997#ifdef CONFIG_X86_64
1998 regs->r8 = vcpu->regs[VCPU_REGS_R8];
1999 regs->r9 = vcpu->regs[VCPU_REGS_R9];
2000 regs->r10 = vcpu->regs[VCPU_REGS_R10];
2001 regs->r11 = vcpu->regs[VCPU_REGS_R11];
2002 regs->r12 = vcpu->regs[VCPU_REGS_R12];
2003 regs->r13 = vcpu->regs[VCPU_REGS_R13];
2004 regs->r14 = vcpu->regs[VCPU_REGS_R14];
2005 regs->r15 = vcpu->regs[VCPU_REGS_R15];
2006#endif
2007
2008 regs->rip = vcpu->rip;
2009 regs->rflags = kvm_x86_ops->get_rflags(vcpu);
2010
2011 /*
2012 * Don't leak debug flags in case they were set for guest debugging
2013 */
2014 if (vcpu->guest_debug.enabled && vcpu->guest_debug.singlestep)
2015 regs->rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
2016
2017 vcpu_put(vcpu);
2018
2019 return 0;
2020}
2021
2022int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2023{
2024 vcpu_load(vcpu);
2025
2026 vcpu->regs[VCPU_REGS_RAX] = regs->rax;
2027 vcpu->regs[VCPU_REGS_RBX] = regs->rbx;
2028 vcpu->regs[VCPU_REGS_RCX] = regs->rcx;
2029 vcpu->regs[VCPU_REGS_RDX] = regs->rdx;
2030 vcpu->regs[VCPU_REGS_RSI] = regs->rsi;
2031 vcpu->regs[VCPU_REGS_RDI] = regs->rdi;
2032 vcpu->regs[VCPU_REGS_RSP] = regs->rsp;
2033 vcpu->regs[VCPU_REGS_RBP] = regs->rbp;
2034#ifdef CONFIG_X86_64
2035 vcpu->regs[VCPU_REGS_R8] = regs->r8;
2036 vcpu->regs[VCPU_REGS_R9] = regs->r9;
2037 vcpu->regs[VCPU_REGS_R10] = regs->r10;
2038 vcpu->regs[VCPU_REGS_R11] = regs->r11;
2039 vcpu->regs[VCPU_REGS_R12] = regs->r12;
2040 vcpu->regs[VCPU_REGS_R13] = regs->r13;
2041 vcpu->regs[VCPU_REGS_R14] = regs->r14;
2042 vcpu->regs[VCPU_REGS_R15] = regs->r15;
2043#endif
2044
2045 vcpu->rip = regs->rip;
2046 kvm_x86_ops->set_rflags(vcpu, regs->rflags);
2047
2048 kvm_x86_ops->decache_regs(vcpu);
2049
2050 vcpu_put(vcpu);
2051
2052 return 0;
2053}
2054
2055static void get_segment(struct kvm_vcpu *vcpu,
2056 struct kvm_segment *var, int seg)
2057{
2058 return kvm_x86_ops->get_segment(vcpu, var, seg);
2059}
2060
2061void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
2062{
2063 struct kvm_segment cs;
2064
2065 get_segment(vcpu, &cs, VCPU_SREG_CS);
2066 *db = cs.db;
2067 *l = cs.l;
2068}
2069EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
2070
2071int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
2072 struct kvm_sregs *sregs)
2073{
2074 struct descriptor_table dt;
2075 int pending_vec;
2076
2077 vcpu_load(vcpu);
2078
2079 get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
2080 get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
2081 get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
2082 get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
2083 get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
2084 get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
2085
2086 get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
2087 get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
2088
2089 kvm_x86_ops->get_idt(vcpu, &dt);
2090 sregs->idt.limit = dt.limit;
2091 sregs->idt.base = dt.base;
2092 kvm_x86_ops->get_gdt(vcpu, &dt);
2093 sregs->gdt.limit = dt.limit;
2094 sregs->gdt.base = dt.base;
2095
2096 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
2097 sregs->cr0 = vcpu->cr0;
2098 sregs->cr2 = vcpu->cr2;
2099 sregs->cr3 = vcpu->cr3;
2100 sregs->cr4 = vcpu->cr4;
2101 sregs->cr8 = get_cr8(vcpu);
2102 sregs->efer = vcpu->shadow_efer;
2103 sregs->apic_base = kvm_get_apic_base(vcpu);
2104
2105 if (irqchip_in_kernel(vcpu->kvm)) {
2106 memset(sregs->interrupt_bitmap, 0,
2107 sizeof sregs->interrupt_bitmap);
2108 pending_vec = kvm_x86_ops->get_irq(vcpu);
2109 if (pending_vec >= 0)
2110 set_bit(pending_vec,
2111 (unsigned long *)sregs->interrupt_bitmap);
2112 } else
2113 memcpy(sregs->interrupt_bitmap, vcpu->irq_pending,
2114 sizeof sregs->interrupt_bitmap);
2115
2116 vcpu_put(vcpu);
2117
2118 return 0;
2119}
2120
2121static void set_segment(struct kvm_vcpu *vcpu,
2122 struct kvm_segment *var, int seg)
2123{
2124 return kvm_x86_ops->set_segment(vcpu, var, seg);
2125}
2126
2127int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2128 struct kvm_sregs *sregs)
2129{
2130 int mmu_reset_needed = 0;
2131 int i, pending_vec, max_bits;
2132 struct descriptor_table dt;
2133
2134 vcpu_load(vcpu);
2135
2136 dt.limit = sregs->idt.limit;
2137 dt.base = sregs->idt.base;
2138 kvm_x86_ops->set_idt(vcpu, &dt);
2139 dt.limit = sregs->gdt.limit;
2140 dt.base = sregs->gdt.base;
2141 kvm_x86_ops->set_gdt(vcpu, &dt);
2142
2143 vcpu->cr2 = sregs->cr2;
2144 mmu_reset_needed |= vcpu->cr3 != sregs->cr3;
2145 vcpu->cr3 = sregs->cr3;
2146
2147 set_cr8(vcpu, sregs->cr8);
2148
2149 mmu_reset_needed |= vcpu->shadow_efer != sregs->efer;
2150#ifdef CONFIG_X86_64
2151 kvm_x86_ops->set_efer(vcpu, sregs->efer);
2152#endif
2153 kvm_set_apic_base(vcpu, sregs->apic_base);
2154
2155 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
2156
2157 mmu_reset_needed |= vcpu->cr0 != sregs->cr0;
2158 vcpu->cr0 = sregs->cr0;
2159 kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
2160
2161 mmu_reset_needed |= vcpu->cr4 != sregs->cr4;
2162 kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
2163 if (!is_long_mode(vcpu) && is_pae(vcpu))
2164 load_pdptrs(vcpu, vcpu->cr3);
2165
2166 if (mmu_reset_needed)
2167 kvm_mmu_reset_context(vcpu);
2168
2169 if (!irqchip_in_kernel(vcpu->kvm)) {
2170 memcpy(vcpu->irq_pending, sregs->interrupt_bitmap,
2171 sizeof vcpu->irq_pending);
2172 vcpu->irq_summary = 0;
2173 for (i = 0; i < ARRAY_SIZE(vcpu->irq_pending); ++i)
2174 if (vcpu->irq_pending[i])
2175 __set_bit(i, &vcpu->irq_summary);
2176 } else {
2177 max_bits = (sizeof sregs->interrupt_bitmap) << 3;
2178 pending_vec = find_first_bit(
2179 (const unsigned long *)sregs->interrupt_bitmap,
2180 max_bits);
2181 /* Only pending external irq is handled here */
2182 if (pending_vec < max_bits) {
2183 kvm_x86_ops->set_irq(vcpu, pending_vec);
2184 pr_debug("Set back pending irq %d\n",
2185 pending_vec);
2186 }
2187 }
2188
2189 set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
2190 set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
2191 set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
2192 set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
2193 set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
2194 set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
2195
2196 set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
2197 set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
2198
2199 vcpu_put(vcpu);
2200
2201 return 0;
2202}
2203
2204int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
2205 struct kvm_debug_guest *dbg)
2206{
2207 int r;
2208
2209 vcpu_load(vcpu);
2210
2211 r = kvm_x86_ops->set_guest_debug(vcpu, dbg);
2212
2213 vcpu_put(vcpu);
2214
2215 return r;
2216}
2217
d0752060
HB
2218/*
2219 * fxsave fpu state. Taken from x86_64/processor.h. To be killed when
2220 * we have asm/x86/processor.h
2221 */
2222struct fxsave {
2223 u16 cwd;
2224 u16 swd;
2225 u16 twd;
2226 u16 fop;
2227 u64 rip;
2228 u64 rdp;
2229 u32 mxcsr;
2230 u32 mxcsr_mask;
2231 u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
2232#ifdef CONFIG_X86_64
2233 u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
2234#else
2235 u32 xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
2236#endif
2237};
2238
2239int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2240{
2241 struct fxsave *fxsave = (struct fxsave *)&vcpu->guest_fx_image;
2242
2243 vcpu_load(vcpu);
2244
2245 memcpy(fpu->fpr, fxsave->st_space, 128);
2246 fpu->fcw = fxsave->cwd;
2247 fpu->fsw = fxsave->swd;
2248 fpu->ftwx = fxsave->twd;
2249 fpu->last_opcode = fxsave->fop;
2250 fpu->last_ip = fxsave->rip;
2251 fpu->last_dp = fxsave->rdp;
2252 memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
2253
2254 vcpu_put(vcpu);
2255
2256 return 0;
2257}
2258
2259int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2260{
2261 struct fxsave *fxsave = (struct fxsave *)&vcpu->guest_fx_image;
2262
2263 vcpu_load(vcpu);
2264
2265 memcpy(fxsave->st_space, fpu->fpr, 128);
2266 fxsave->cwd = fpu->fcw;
2267 fxsave->swd = fpu->fsw;
2268 fxsave->twd = fpu->ftwx;
2269 fxsave->fop = fpu->last_opcode;
2270 fxsave->rip = fpu->last_ip;
2271 fxsave->rdp = fpu->last_dp;
2272 memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
2273
2274 vcpu_put(vcpu);
2275
2276 return 0;
2277}
2278
2279void fx_init(struct kvm_vcpu *vcpu)
2280{
2281 unsigned after_mxcsr_mask;
2282
2283 /* Initialize guest FPU by resetting ours and saving into guest's */
2284 preempt_disable();
2285 fx_save(&vcpu->host_fx_image);
2286 fpu_init();
2287 fx_save(&vcpu->guest_fx_image);
2288 fx_restore(&vcpu->host_fx_image);
2289 preempt_enable();
2290
2291 vcpu->cr0 |= X86_CR0_ET;
2292 after_mxcsr_mask = offsetof(struct i387_fxsave_struct, st_space);
2293 vcpu->guest_fx_image.mxcsr = 0x1f80;
2294 memset((void *)&vcpu->guest_fx_image + after_mxcsr_mask,
2295 0, sizeof(struct i387_fxsave_struct) - after_mxcsr_mask);
2296}
2297EXPORT_SYMBOL_GPL(fx_init);
2298
2299void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
2300{
2301 if (!vcpu->fpu_active || vcpu->guest_fpu_loaded)
2302 return;
2303
2304 vcpu->guest_fpu_loaded = 1;
2305 fx_save(&vcpu->host_fx_image);
2306 fx_restore(&vcpu->guest_fx_image);
2307}
2308EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
2309
2310void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
2311{
2312 if (!vcpu->guest_fpu_loaded)
2313 return;
2314
2315 vcpu->guest_fpu_loaded = 0;
2316 fx_save(&vcpu->guest_fx_image);
2317 fx_restore(&vcpu->host_fx_image);
2318}
2319EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
This page took 0.11547 seconds and 5 git commands to generate.