KVM: align valid EFER bits with the features of the host system
[deliverable/linux.git] / arch / x86 / kvm / x86.c
CommitLineData
043405e1
CO
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * derived from drivers/kvm/kvm_main.c
5 *
6 * Copyright (C) 2006 Qumranet, Inc.
7 *
8 * Authors:
9 * Avi Kivity <avi@qumranet.com>
10 * Yaniv Kamay <yaniv@qumranet.com>
11 *
12 * This work is licensed under the terms of the GNU GPL, version 2. See
13 * the COPYING file in the top-level directory.
14 *
15 */
16
edf88417 17#include <linux/kvm_host.h>
5fb76f9b 18#include "segment_descriptor.h"
313a3dc7 19#include "irq.h"
1d737c8a 20#include "mmu.h"
313a3dc7
CO
21
22#include <linux/kvm.h>
23#include <linux/fs.h>
24#include <linux/vmalloc.h>
5fb76f9b 25#include <linux/module.h>
0de10343 26#include <linux/mman.h>
2bacc55c 27#include <linux/highmem.h>
043405e1
CO
28
29#include <asm/uaccess.h>
d825ed0a 30#include <asm/msr.h>
043405e1 31
313a3dc7 32#define MAX_IO_MSRS 256
a03490ed
CO
33#define CR0_RESERVED_BITS \
34 (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
35 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
36 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
37#define CR4_RESERVED_BITS \
38 (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
39 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
40 | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \
41 | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
42
43#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
50a37eb4
JR
44/* EFER defaults:
45 * - enable syscall per default because its emulated by KVM
46 * - enable LME and LMA per default on 64 bit KVM
47 */
48#ifdef CONFIG_X86_64
49static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffafeULL;
50#else
51static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffffeULL;
52#endif
313a3dc7 53
ba1389b7
AK
54#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
55#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
417bc304 56
674eea0f
AK
57static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
58 struct kvm_cpuid_entry2 __user *entries);
59
97896d04
ZX
60struct kvm_x86_ops *kvm_x86_ops;
61
417bc304 62struct kvm_stats_debugfs_item debugfs_entries[] = {
ba1389b7
AK
63 { "pf_fixed", VCPU_STAT(pf_fixed) },
64 { "pf_guest", VCPU_STAT(pf_guest) },
65 { "tlb_flush", VCPU_STAT(tlb_flush) },
66 { "invlpg", VCPU_STAT(invlpg) },
67 { "exits", VCPU_STAT(exits) },
68 { "io_exits", VCPU_STAT(io_exits) },
69 { "mmio_exits", VCPU_STAT(mmio_exits) },
70 { "signal_exits", VCPU_STAT(signal_exits) },
71 { "irq_window", VCPU_STAT(irq_window_exits) },
72 { "halt_exits", VCPU_STAT(halt_exits) },
73 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
74 { "request_irq", VCPU_STAT(request_irq_exits) },
75 { "irq_exits", VCPU_STAT(irq_exits) },
76 { "host_state_reload", VCPU_STAT(host_state_reload) },
77 { "efer_reload", VCPU_STAT(efer_reload) },
78 { "fpu_reload", VCPU_STAT(fpu_reload) },
79 { "insn_emulation", VCPU_STAT(insn_emulation) },
80 { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
4cee5764
AK
81 { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
82 { "mmu_pte_write", VM_STAT(mmu_pte_write) },
83 { "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
84 { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
85 { "mmu_flooded", VM_STAT(mmu_flooded) },
86 { "mmu_recycled", VM_STAT(mmu_recycled) },
dfc5aa00 87 { "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
0f74a24c 88 { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
417bc304
HB
89 { NULL }
90};
91
92
5fb76f9b
CO
93unsigned long segment_base(u16 selector)
94{
95 struct descriptor_table gdt;
96 struct segment_descriptor *d;
97 unsigned long table_base;
98 unsigned long v;
99
100 if (selector == 0)
101 return 0;
102
103 asm("sgdt %0" : "=m"(gdt));
104 table_base = gdt.base;
105
106 if (selector & 4) { /* from ldt */
107 u16 ldt_selector;
108
109 asm("sldt %0" : "=g"(ldt_selector));
110 table_base = segment_base(ldt_selector);
111 }
112 d = (struct segment_descriptor *)(table_base + (selector & ~7));
113 v = d->base_low | ((unsigned long)d->base_mid << 16) |
114 ((unsigned long)d->base_high << 24);
115#ifdef CONFIG_X86_64
116 if (d->system == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
117 v |= ((unsigned long) \
118 ((struct segment_descriptor_64 *)d)->base_higher) << 32;
119#endif
120 return v;
121}
122EXPORT_SYMBOL_GPL(segment_base);
123
6866b83e
CO
124u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
125{
126 if (irqchip_in_kernel(vcpu->kvm))
ad312c7c 127 return vcpu->arch.apic_base;
6866b83e 128 else
ad312c7c 129 return vcpu->arch.apic_base;
6866b83e
CO
130}
131EXPORT_SYMBOL_GPL(kvm_get_apic_base);
132
133void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
134{
135 /* TODO: reserve bits check */
136 if (irqchip_in_kernel(vcpu->kvm))
137 kvm_lapic_set_base(vcpu, data);
138 else
ad312c7c 139 vcpu->arch.apic_base = data;
6866b83e
CO
140}
141EXPORT_SYMBOL_GPL(kvm_set_apic_base);
142
298101da
AK
143void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
144{
ad312c7c
ZX
145 WARN_ON(vcpu->arch.exception.pending);
146 vcpu->arch.exception.pending = true;
147 vcpu->arch.exception.has_error_code = false;
148 vcpu->arch.exception.nr = nr;
298101da
AK
149}
150EXPORT_SYMBOL_GPL(kvm_queue_exception);
151
c3c91fee
AK
152void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr,
153 u32 error_code)
154{
155 ++vcpu->stat.pf_guest;
ad312c7c 156 if (vcpu->arch.exception.pending && vcpu->arch.exception.nr == PF_VECTOR) {
c3c91fee
AK
157 printk(KERN_DEBUG "kvm: inject_page_fault:"
158 " double fault 0x%lx\n", addr);
ad312c7c
ZX
159 vcpu->arch.exception.nr = DF_VECTOR;
160 vcpu->arch.exception.error_code = 0;
c3c91fee
AK
161 return;
162 }
ad312c7c 163 vcpu->arch.cr2 = addr;
c3c91fee
AK
164 kvm_queue_exception_e(vcpu, PF_VECTOR, error_code);
165}
166
298101da
AK
167void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
168{
ad312c7c
ZX
169 WARN_ON(vcpu->arch.exception.pending);
170 vcpu->arch.exception.pending = true;
171 vcpu->arch.exception.has_error_code = true;
172 vcpu->arch.exception.nr = nr;
173 vcpu->arch.exception.error_code = error_code;
298101da
AK
174}
175EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
176
177static void __queue_exception(struct kvm_vcpu *vcpu)
178{
ad312c7c
ZX
179 kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
180 vcpu->arch.exception.has_error_code,
181 vcpu->arch.exception.error_code);
298101da
AK
182}
183
a03490ed
CO
184/*
185 * Load the pae pdptrs. Return true is they are all valid.
186 */
187int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
188{
189 gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
190 unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
191 int i;
192 int ret;
ad312c7c 193 u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
a03490ed 194
72dc67a6 195 down_read(&vcpu->kvm->slots_lock);
a03490ed
CO
196 ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
197 offset * sizeof(u64), sizeof(pdpte));
198 if (ret < 0) {
199 ret = 0;
200 goto out;
201 }
202 for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
203 if ((pdpte[i] & 1) && (pdpte[i] & 0xfffffff0000001e6ull)) {
204 ret = 0;
205 goto out;
206 }
207 }
208 ret = 1;
209
ad312c7c 210 memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs));
a03490ed 211out:
72dc67a6 212 up_read(&vcpu->kvm->slots_lock);
a03490ed
CO
213
214 return ret;
215}
216
d835dfec
AK
217static bool pdptrs_changed(struct kvm_vcpu *vcpu)
218{
ad312c7c 219 u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
d835dfec
AK
220 bool changed = true;
221 int r;
222
223 if (is_long_mode(vcpu) || !is_pae(vcpu))
224 return false;
225
72dc67a6 226 down_read(&vcpu->kvm->slots_lock);
ad312c7c 227 r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte));
d835dfec
AK
228 if (r < 0)
229 goto out;
ad312c7c 230 changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0;
d835dfec 231out:
72dc67a6 232 up_read(&vcpu->kvm->slots_lock);
d835dfec
AK
233
234 return changed;
235}
236
a03490ed
CO
237void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
238{
239 if (cr0 & CR0_RESERVED_BITS) {
240 printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
ad312c7c 241 cr0, vcpu->arch.cr0);
c1a5d4f9 242 kvm_inject_gp(vcpu, 0);
a03490ed
CO
243 return;
244 }
245
246 if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
247 printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
c1a5d4f9 248 kvm_inject_gp(vcpu, 0);
a03490ed
CO
249 return;
250 }
251
252 if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
253 printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
254 "and a clear PE flag\n");
c1a5d4f9 255 kvm_inject_gp(vcpu, 0);
a03490ed
CO
256 return;
257 }
258
259 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
260#ifdef CONFIG_X86_64
ad312c7c 261 if ((vcpu->arch.shadow_efer & EFER_LME)) {
a03490ed
CO
262 int cs_db, cs_l;
263
264 if (!is_pae(vcpu)) {
265 printk(KERN_DEBUG "set_cr0: #GP, start paging "
266 "in long mode while PAE is disabled\n");
c1a5d4f9 267 kvm_inject_gp(vcpu, 0);
a03490ed
CO
268 return;
269 }
270 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
271 if (cs_l) {
272 printk(KERN_DEBUG "set_cr0: #GP, start paging "
273 "in long mode while CS.L == 1\n");
c1a5d4f9 274 kvm_inject_gp(vcpu, 0);
a03490ed
CO
275 return;
276
277 }
278 } else
279#endif
ad312c7c 280 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
a03490ed
CO
281 printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
282 "reserved bits\n");
c1a5d4f9 283 kvm_inject_gp(vcpu, 0);
a03490ed
CO
284 return;
285 }
286
287 }
288
289 kvm_x86_ops->set_cr0(vcpu, cr0);
ad312c7c 290 vcpu->arch.cr0 = cr0;
a03490ed 291
a03490ed 292 kvm_mmu_reset_context(vcpu);
a03490ed
CO
293 return;
294}
295EXPORT_SYMBOL_GPL(set_cr0);
296
297void lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
298{
ad312c7c 299 set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f));
a03490ed
CO
300}
301EXPORT_SYMBOL_GPL(lmsw);
302
303void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
304{
305 if (cr4 & CR4_RESERVED_BITS) {
306 printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
c1a5d4f9 307 kvm_inject_gp(vcpu, 0);
a03490ed
CO
308 return;
309 }
310
311 if (is_long_mode(vcpu)) {
312 if (!(cr4 & X86_CR4_PAE)) {
313 printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
314 "in long mode\n");
c1a5d4f9 315 kvm_inject_gp(vcpu, 0);
a03490ed
CO
316 return;
317 }
318 } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & X86_CR4_PAE)
ad312c7c 319 && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
a03490ed 320 printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
c1a5d4f9 321 kvm_inject_gp(vcpu, 0);
a03490ed
CO
322 return;
323 }
324
325 if (cr4 & X86_CR4_VMXE) {
326 printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
c1a5d4f9 327 kvm_inject_gp(vcpu, 0);
a03490ed
CO
328 return;
329 }
330 kvm_x86_ops->set_cr4(vcpu, cr4);
ad312c7c 331 vcpu->arch.cr4 = cr4;
a03490ed 332 kvm_mmu_reset_context(vcpu);
a03490ed
CO
333}
334EXPORT_SYMBOL_GPL(set_cr4);
335
336void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
337{
ad312c7c 338 if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) {
d835dfec
AK
339 kvm_mmu_flush_tlb(vcpu);
340 return;
341 }
342
a03490ed
CO
343 if (is_long_mode(vcpu)) {
344 if (cr3 & CR3_L_MODE_RESERVED_BITS) {
345 printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
c1a5d4f9 346 kvm_inject_gp(vcpu, 0);
a03490ed
CO
347 return;
348 }
349 } else {
350 if (is_pae(vcpu)) {
351 if (cr3 & CR3_PAE_RESERVED_BITS) {
352 printk(KERN_DEBUG
353 "set_cr3: #GP, reserved bits\n");
c1a5d4f9 354 kvm_inject_gp(vcpu, 0);
a03490ed
CO
355 return;
356 }
357 if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
358 printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
359 "reserved bits\n");
c1a5d4f9 360 kvm_inject_gp(vcpu, 0);
a03490ed
CO
361 return;
362 }
363 }
364 /*
365 * We don't check reserved bits in nonpae mode, because
366 * this isn't enforced, and VMware depends on this.
367 */
368 }
369
72dc67a6 370 down_read(&vcpu->kvm->slots_lock);
a03490ed
CO
371 /*
372 * Does the new cr3 value map to physical memory? (Note, we
373 * catch an invalid cr3 even in real-mode, because it would
374 * cause trouble later on when we turn on paging anyway.)
375 *
376 * A real CPU would silently accept an invalid cr3 and would
377 * attempt to use it - with largely undefined (and often hard
378 * to debug) behavior on the guest side.
379 */
380 if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
c1a5d4f9 381 kvm_inject_gp(vcpu, 0);
a03490ed 382 else {
ad312c7c
ZX
383 vcpu->arch.cr3 = cr3;
384 vcpu->arch.mmu.new_cr3(vcpu);
a03490ed 385 }
72dc67a6 386 up_read(&vcpu->kvm->slots_lock);
a03490ed
CO
387}
388EXPORT_SYMBOL_GPL(set_cr3);
389
390void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
391{
392 if (cr8 & CR8_RESERVED_BITS) {
393 printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
c1a5d4f9 394 kvm_inject_gp(vcpu, 0);
a03490ed
CO
395 return;
396 }
397 if (irqchip_in_kernel(vcpu->kvm))
398 kvm_lapic_set_tpr(vcpu, cr8);
399 else
ad312c7c 400 vcpu->arch.cr8 = cr8;
a03490ed
CO
401}
402EXPORT_SYMBOL_GPL(set_cr8);
403
404unsigned long get_cr8(struct kvm_vcpu *vcpu)
405{
406 if (irqchip_in_kernel(vcpu->kvm))
407 return kvm_lapic_get_cr8(vcpu);
408 else
ad312c7c 409 return vcpu->arch.cr8;
a03490ed
CO
410}
411EXPORT_SYMBOL_GPL(get_cr8);
412
043405e1
CO
413/*
414 * List of msr numbers which we expose to userspace through KVM_GET_MSRS
415 * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
416 *
417 * This list is modified at module load time to reflect the
418 * capabilities of the host cpu.
419 */
420static u32 msrs_to_save[] = {
421 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
422 MSR_K6_STAR,
423#ifdef CONFIG_X86_64
424 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
425#endif
426 MSR_IA32_TIME_STAMP_COUNTER,
427};
428
429static unsigned num_msrs_to_save;
430
431static u32 emulated_msrs[] = {
432 MSR_IA32_MISC_ENABLE,
433};
434
15c4a640
CO
435#ifdef CONFIG_X86_64
436
437static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
438{
f2b4b7dd 439 if (efer & efer_reserved_bits) {
15c4a640
CO
440 printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
441 efer);
c1a5d4f9 442 kvm_inject_gp(vcpu, 0);
15c4a640
CO
443 return;
444 }
445
446 if (is_paging(vcpu)
ad312c7c 447 && (vcpu->arch.shadow_efer & EFER_LME) != (efer & EFER_LME)) {
15c4a640 448 printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
c1a5d4f9 449 kvm_inject_gp(vcpu, 0);
15c4a640
CO
450 return;
451 }
452
453 kvm_x86_ops->set_efer(vcpu, efer);
454
455 efer &= ~EFER_LMA;
ad312c7c 456 efer |= vcpu->arch.shadow_efer & EFER_LMA;
15c4a640 457
ad312c7c 458 vcpu->arch.shadow_efer = efer;
15c4a640
CO
459}
460
461#endif
462
f2b4b7dd
JR
463void kvm_enable_efer_bits(u64 mask)
464{
465 efer_reserved_bits &= ~mask;
466}
467EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
468
469
15c4a640
CO
470/*
471 * Writes msr value into into the appropriate "register".
472 * Returns 0 on success, non-0 otherwise.
473 * Assumes vcpu_load() was already called.
474 */
475int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
476{
477 return kvm_x86_ops->set_msr(vcpu, msr_index, data);
478}
479
313a3dc7
CO
480/*
481 * Adapt set_msr() to msr_io()'s calling convention
482 */
483static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
484{
485 return kvm_set_msr(vcpu, index, *data);
486}
487
15c4a640
CO
488
489int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
490{
491 switch (msr) {
492#ifdef CONFIG_X86_64
493 case MSR_EFER:
494 set_efer(vcpu, data);
495 break;
496#endif
497 case MSR_IA32_MC0_STATUS:
498 pr_unimpl(vcpu, "%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n",
499 __FUNCTION__, data);
500 break;
501 case MSR_IA32_MCG_STATUS:
502 pr_unimpl(vcpu, "%s: MSR_IA32_MCG_STATUS 0x%llx, nop\n",
503 __FUNCTION__, data);
504 break;
c7ac679c
JR
505 case MSR_IA32_MCG_CTL:
506 pr_unimpl(vcpu, "%s: MSR_IA32_MCG_CTL 0x%llx, nop\n",
507 __FUNCTION__, data);
508 break;
15c4a640
CO
509 case MSR_IA32_UCODE_REV:
510 case MSR_IA32_UCODE_WRITE:
511 case 0x200 ... 0x2ff: /* MTRRs */
512 break;
513 case MSR_IA32_APICBASE:
514 kvm_set_apic_base(vcpu, data);
515 break;
516 case MSR_IA32_MISC_ENABLE:
ad312c7c 517 vcpu->arch.ia32_misc_enable_msr = data;
15c4a640
CO
518 break;
519 default:
565f1fbd 520 pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", msr, data);
15c4a640
CO
521 return 1;
522 }
523 return 0;
524}
525EXPORT_SYMBOL_GPL(kvm_set_msr_common);
526
527
528/*
529 * Reads an msr value (of 'msr_index') into 'pdata'.
530 * Returns 0 on success, non-0 otherwise.
531 * Assumes vcpu_load() was already called.
532 */
533int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
534{
535 return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
536}
537
538int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
539{
540 u64 data;
541
542 switch (msr) {
543 case 0xc0010010: /* SYSCFG */
544 case 0xc0010015: /* HWCR */
545 case MSR_IA32_PLATFORM_ID:
546 case MSR_IA32_P5_MC_ADDR:
547 case MSR_IA32_P5_MC_TYPE:
548 case MSR_IA32_MC0_CTL:
549 case MSR_IA32_MCG_STATUS:
550 case MSR_IA32_MCG_CAP:
c7ac679c 551 case MSR_IA32_MCG_CTL:
15c4a640
CO
552 case MSR_IA32_MC0_MISC:
553 case MSR_IA32_MC0_MISC+4:
554 case MSR_IA32_MC0_MISC+8:
555 case MSR_IA32_MC0_MISC+12:
556 case MSR_IA32_MC0_MISC+16:
557 case MSR_IA32_UCODE_REV:
558 case MSR_IA32_PERF_STATUS:
559 case MSR_IA32_EBL_CR_POWERON:
560 /* MTRR registers */
561 case 0xfe:
562 case 0x200 ... 0x2ff:
563 data = 0;
564 break;
565 case 0xcd: /* fsb frequency */
566 data = 3;
567 break;
568 case MSR_IA32_APICBASE:
569 data = kvm_get_apic_base(vcpu);
570 break;
571 case MSR_IA32_MISC_ENABLE:
ad312c7c 572 data = vcpu->arch.ia32_misc_enable_msr;
15c4a640
CO
573 break;
574#ifdef CONFIG_X86_64
575 case MSR_EFER:
ad312c7c 576 data = vcpu->arch.shadow_efer;
15c4a640
CO
577 break;
578#endif
579 default:
580 pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
581 return 1;
582 }
583 *pdata = data;
584 return 0;
585}
586EXPORT_SYMBOL_GPL(kvm_get_msr_common);
587
313a3dc7
CO
588/*
589 * Read or write a bunch of msrs. All parameters are kernel addresses.
590 *
591 * @return number of msrs set successfully.
592 */
593static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
594 struct kvm_msr_entry *entries,
595 int (*do_msr)(struct kvm_vcpu *vcpu,
596 unsigned index, u64 *data))
597{
598 int i;
599
600 vcpu_load(vcpu);
601
602 for (i = 0; i < msrs->nmsrs; ++i)
603 if (do_msr(vcpu, entries[i].index, &entries[i].data))
604 break;
605
606 vcpu_put(vcpu);
607
608 return i;
609}
610
611/*
612 * Read or write a bunch of msrs. Parameters are user addresses.
613 *
614 * @return number of msrs set successfully.
615 */
616static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
617 int (*do_msr)(struct kvm_vcpu *vcpu,
618 unsigned index, u64 *data),
619 int writeback)
620{
621 struct kvm_msrs msrs;
622 struct kvm_msr_entry *entries;
623 int r, n;
624 unsigned size;
625
626 r = -EFAULT;
627 if (copy_from_user(&msrs, user_msrs, sizeof msrs))
628 goto out;
629
630 r = -E2BIG;
631 if (msrs.nmsrs >= MAX_IO_MSRS)
632 goto out;
633
634 r = -ENOMEM;
635 size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
636 entries = vmalloc(size);
637 if (!entries)
638 goto out;
639
640 r = -EFAULT;
641 if (copy_from_user(entries, user_msrs->entries, size))
642 goto out_free;
643
644 r = n = __msr_io(vcpu, &msrs, entries, do_msr);
645 if (r < 0)
646 goto out_free;
647
648 r = -EFAULT;
649 if (writeback && copy_to_user(user_msrs->entries, entries, size))
650 goto out_free;
651
652 r = n;
653
654out_free:
655 vfree(entries);
656out:
657 return r;
658}
659
e9b11c17
ZX
660/*
661 * Make sure that a cpu that is being hot-unplugged does not have any vcpus
662 * cached on it.
663 */
664void decache_vcpus_on_cpu(int cpu)
665{
666 struct kvm *vm;
667 struct kvm_vcpu *vcpu;
668 int i;
669
670 spin_lock(&kvm_lock);
671 list_for_each_entry(vm, &vm_list, vm_list)
672 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
673 vcpu = vm->vcpus[i];
674 if (!vcpu)
675 continue;
676 /*
677 * If the vcpu is locked, then it is running on some
678 * other cpu and therefore it is not cached on the
679 * cpu in question.
680 *
681 * If it's not locked, check the last cpu it executed
682 * on.
683 */
684 if (mutex_trylock(&vcpu->mutex)) {
685 if (vcpu->cpu == cpu) {
686 kvm_x86_ops->vcpu_decache(vcpu);
687 vcpu->cpu = -1;
688 }
689 mutex_unlock(&vcpu->mutex);
690 }
691 }
692 spin_unlock(&kvm_lock);
693}
694
018d00d2
ZX
695int kvm_dev_ioctl_check_extension(long ext)
696{
697 int r;
698
699 switch (ext) {
700 case KVM_CAP_IRQCHIP:
701 case KVM_CAP_HLT:
702 case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
703 case KVM_CAP_USER_MEMORY:
704 case KVM_CAP_SET_TSS_ADDR:
07716717 705 case KVM_CAP_EXT_CPUID:
018d00d2
ZX
706 r = 1;
707 break;
774ead3a
AK
708 case KVM_CAP_VAPIC:
709 r = !kvm_x86_ops->cpu_has_accelerated_tpr();
710 break;
018d00d2
ZX
711 default:
712 r = 0;
713 break;
714 }
715 return r;
716
717}
718
043405e1
CO
719long kvm_arch_dev_ioctl(struct file *filp,
720 unsigned int ioctl, unsigned long arg)
721{
722 void __user *argp = (void __user *)arg;
723 long r;
724
725 switch (ioctl) {
726 case KVM_GET_MSR_INDEX_LIST: {
727 struct kvm_msr_list __user *user_msr_list = argp;
728 struct kvm_msr_list msr_list;
729 unsigned n;
730
731 r = -EFAULT;
732 if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
733 goto out;
734 n = msr_list.nmsrs;
735 msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
736 if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
737 goto out;
738 r = -E2BIG;
739 if (n < num_msrs_to_save)
740 goto out;
741 r = -EFAULT;
742 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
743 num_msrs_to_save * sizeof(u32)))
744 goto out;
745 if (copy_to_user(user_msr_list->indices
746 + num_msrs_to_save * sizeof(u32),
747 &emulated_msrs,
748 ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
749 goto out;
750 r = 0;
751 break;
752 }
674eea0f
AK
753 case KVM_GET_SUPPORTED_CPUID: {
754 struct kvm_cpuid2 __user *cpuid_arg = argp;
755 struct kvm_cpuid2 cpuid;
756
757 r = -EFAULT;
758 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
759 goto out;
760 r = kvm_dev_ioctl_get_supported_cpuid(&cpuid,
761 cpuid_arg->entries);
762 if (r)
763 goto out;
764
765 r = -EFAULT;
766 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
767 goto out;
768 r = 0;
769 break;
770 }
043405e1
CO
771 default:
772 r = -EINVAL;
773 }
774out:
775 return r;
776}
777
313a3dc7
CO
778void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
779{
780 kvm_x86_ops->vcpu_load(vcpu, cpu);
781}
782
783void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
784{
785 kvm_x86_ops->vcpu_put(vcpu);
9327fd11 786 kvm_put_guest_fpu(vcpu);
313a3dc7
CO
787}
788
07716717 789static int is_efer_nx(void)
313a3dc7
CO
790{
791 u64 efer;
313a3dc7
CO
792
793 rdmsrl(MSR_EFER, efer);
07716717
DK
794 return efer & EFER_NX;
795}
796
797static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
798{
799 int i;
800 struct kvm_cpuid_entry2 *e, *entry;
801
313a3dc7 802 entry = NULL;
ad312c7c
ZX
803 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
804 e = &vcpu->arch.cpuid_entries[i];
313a3dc7
CO
805 if (e->function == 0x80000001) {
806 entry = e;
807 break;
808 }
809 }
07716717 810 if (entry && (entry->edx & (1 << 20)) && !is_efer_nx()) {
313a3dc7
CO
811 entry->edx &= ~(1 << 20);
812 printk(KERN_INFO "kvm: guest NX capability removed\n");
813 }
814}
815
07716717 816/* when an old userspace process fills a new kernel module */
313a3dc7
CO
817static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
818 struct kvm_cpuid *cpuid,
819 struct kvm_cpuid_entry __user *entries)
07716717
DK
820{
821 int r, i;
822 struct kvm_cpuid_entry *cpuid_entries;
823
824 r = -E2BIG;
825 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
826 goto out;
827 r = -ENOMEM;
828 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
829 if (!cpuid_entries)
830 goto out;
831 r = -EFAULT;
832 if (copy_from_user(cpuid_entries, entries,
833 cpuid->nent * sizeof(struct kvm_cpuid_entry)))
834 goto out_free;
835 for (i = 0; i < cpuid->nent; i++) {
ad312c7c
ZX
836 vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
837 vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
838 vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
839 vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
840 vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
841 vcpu->arch.cpuid_entries[i].index = 0;
842 vcpu->arch.cpuid_entries[i].flags = 0;
843 vcpu->arch.cpuid_entries[i].padding[0] = 0;
844 vcpu->arch.cpuid_entries[i].padding[1] = 0;
845 vcpu->arch.cpuid_entries[i].padding[2] = 0;
846 }
847 vcpu->arch.cpuid_nent = cpuid->nent;
07716717
DK
848 cpuid_fix_nx_cap(vcpu);
849 r = 0;
850
851out_free:
852 vfree(cpuid_entries);
853out:
854 return r;
855}
856
857static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
858 struct kvm_cpuid2 *cpuid,
859 struct kvm_cpuid_entry2 __user *entries)
313a3dc7
CO
860{
861 int r;
862
863 r = -E2BIG;
864 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
865 goto out;
866 r = -EFAULT;
ad312c7c 867 if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
07716717 868 cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
313a3dc7 869 goto out;
ad312c7c 870 vcpu->arch.cpuid_nent = cpuid->nent;
313a3dc7
CO
871 return 0;
872
873out:
874 return r;
875}
876
07716717
DK
877static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
878 struct kvm_cpuid2 *cpuid,
879 struct kvm_cpuid_entry2 __user *entries)
880{
881 int r;
882
883 r = -E2BIG;
ad312c7c 884 if (cpuid->nent < vcpu->arch.cpuid_nent)
07716717
DK
885 goto out;
886 r = -EFAULT;
ad312c7c
ZX
887 if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
888 vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
07716717
DK
889 goto out;
890 return 0;
891
892out:
ad312c7c 893 cpuid->nent = vcpu->arch.cpuid_nent;
07716717
DK
894 return r;
895}
896
897static inline u32 bit(int bitno)
898{
899 return 1 << (bitno & 31);
900}
901
902static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
903 u32 index)
904{
905 entry->function = function;
906 entry->index = index;
907 cpuid_count(entry->function, entry->index,
908 &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
909 entry->flags = 0;
910}
911
912static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
913 u32 index, int *nent, int maxnent)
914{
915 const u32 kvm_supported_word0_x86_features = bit(X86_FEATURE_FPU) |
916 bit(X86_FEATURE_VME) | bit(X86_FEATURE_DE) |
917 bit(X86_FEATURE_PSE) | bit(X86_FEATURE_TSC) |
918 bit(X86_FEATURE_MSR) | bit(X86_FEATURE_PAE) |
919 bit(X86_FEATURE_CX8) | bit(X86_FEATURE_APIC) |
920 bit(X86_FEATURE_SEP) | bit(X86_FEATURE_PGE) |
921 bit(X86_FEATURE_CMOV) | bit(X86_FEATURE_PSE36) |
922 bit(X86_FEATURE_CLFLSH) | bit(X86_FEATURE_MMX) |
923 bit(X86_FEATURE_FXSR) | bit(X86_FEATURE_XMM) |
924 bit(X86_FEATURE_XMM2) | bit(X86_FEATURE_SELFSNOOP);
925 const u32 kvm_supported_word1_x86_features = bit(X86_FEATURE_FPU) |
926 bit(X86_FEATURE_VME) | bit(X86_FEATURE_DE) |
927 bit(X86_FEATURE_PSE) | bit(X86_FEATURE_TSC) |
928 bit(X86_FEATURE_MSR) | bit(X86_FEATURE_PAE) |
929 bit(X86_FEATURE_CX8) | bit(X86_FEATURE_APIC) |
930 bit(X86_FEATURE_PGE) |
931 bit(X86_FEATURE_CMOV) | bit(X86_FEATURE_PSE36) |
932 bit(X86_FEATURE_MMX) | bit(X86_FEATURE_FXSR) |
933 bit(X86_FEATURE_SYSCALL) |
934 (bit(X86_FEATURE_NX) && is_efer_nx()) |
935#ifdef CONFIG_X86_64
936 bit(X86_FEATURE_LM) |
937#endif
938 bit(X86_FEATURE_MMXEXT) |
939 bit(X86_FEATURE_3DNOWEXT) |
940 bit(X86_FEATURE_3DNOW);
941 const u32 kvm_supported_word3_x86_features =
942 bit(X86_FEATURE_XMM3) | bit(X86_FEATURE_CX16);
943 const u32 kvm_supported_word6_x86_features =
944 bit(X86_FEATURE_LAHF_LM) | bit(X86_FEATURE_CMP_LEGACY);
945
946 /* all func 2 cpuid_count() should be called on the same cpu */
947 get_cpu();
948 do_cpuid_1_ent(entry, function, index);
949 ++*nent;
950
951 switch (function) {
952 case 0:
953 entry->eax = min(entry->eax, (u32)0xb);
954 break;
955 case 1:
956 entry->edx &= kvm_supported_word0_x86_features;
957 entry->ecx &= kvm_supported_word3_x86_features;
958 break;
959 /* function 2 entries are STATEFUL. That is, repeated cpuid commands
960 * may return different values. This forces us to get_cpu() before
961 * issuing the first command, and also to emulate this annoying behavior
962 * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
963 case 2: {
964 int t, times = entry->eax & 0xff;
965
966 entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
967 for (t = 1; t < times && *nent < maxnent; ++t) {
968 do_cpuid_1_ent(&entry[t], function, 0);
969 entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
970 ++*nent;
971 }
972 break;
973 }
974 /* function 4 and 0xb have additional index. */
975 case 4: {
976 int index, cache_type;
977
978 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
979 /* read more entries until cache_type is zero */
980 for (index = 1; *nent < maxnent; ++index) {
981 cache_type = entry[index - 1].eax & 0x1f;
982 if (!cache_type)
983 break;
984 do_cpuid_1_ent(&entry[index], function, index);
985 entry[index].flags |=
986 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
987 ++*nent;
988 }
989 break;
990 }
991 case 0xb: {
992 int index, level_type;
993
994 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
995 /* read more entries until level_type is zero */
996 for (index = 1; *nent < maxnent; ++index) {
997 level_type = entry[index - 1].ecx & 0xff;
998 if (!level_type)
999 break;
1000 do_cpuid_1_ent(&entry[index], function, index);
1001 entry[index].flags |=
1002 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1003 ++*nent;
1004 }
1005 break;
1006 }
1007 case 0x80000000:
1008 entry->eax = min(entry->eax, 0x8000001a);
1009 break;
1010 case 0x80000001:
1011 entry->edx &= kvm_supported_word1_x86_features;
1012 entry->ecx &= kvm_supported_word6_x86_features;
1013 break;
1014 }
1015 put_cpu();
1016}
1017
674eea0f 1018static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
07716717
DK
1019 struct kvm_cpuid_entry2 __user *entries)
1020{
1021 struct kvm_cpuid_entry2 *cpuid_entries;
1022 int limit, nent = 0, r = -E2BIG;
1023 u32 func;
1024
1025 if (cpuid->nent < 1)
1026 goto out;
1027 r = -ENOMEM;
1028 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
1029 if (!cpuid_entries)
1030 goto out;
1031
1032 do_cpuid_ent(&cpuid_entries[0], 0, 0, &nent, cpuid->nent);
1033 limit = cpuid_entries[0].eax;
1034 for (func = 1; func <= limit && nent < cpuid->nent; ++func)
1035 do_cpuid_ent(&cpuid_entries[nent], func, 0,
1036 &nent, cpuid->nent);
1037 r = -E2BIG;
1038 if (nent >= cpuid->nent)
1039 goto out_free;
1040
1041 do_cpuid_ent(&cpuid_entries[nent], 0x80000000, 0, &nent, cpuid->nent);
1042 limit = cpuid_entries[nent - 1].eax;
1043 for (func = 0x80000001; func <= limit && nent < cpuid->nent; ++func)
1044 do_cpuid_ent(&cpuid_entries[nent], func, 0,
1045 &nent, cpuid->nent);
1046 r = -EFAULT;
1047 if (copy_to_user(entries, cpuid_entries,
1048 nent * sizeof(struct kvm_cpuid_entry2)))
1049 goto out_free;
1050 cpuid->nent = nent;
1051 r = 0;
1052
1053out_free:
1054 vfree(cpuid_entries);
1055out:
1056 return r;
1057}
1058
313a3dc7
CO
1059static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
1060 struct kvm_lapic_state *s)
1061{
1062 vcpu_load(vcpu);
ad312c7c 1063 memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s);
313a3dc7
CO
1064 vcpu_put(vcpu);
1065
1066 return 0;
1067}
1068
1069static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
1070 struct kvm_lapic_state *s)
1071{
1072 vcpu_load(vcpu);
ad312c7c 1073 memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s);
313a3dc7
CO
1074 kvm_apic_post_state_restore(vcpu);
1075 vcpu_put(vcpu);
1076
1077 return 0;
1078}
1079
f77bc6a4
ZX
1080static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
1081 struct kvm_interrupt *irq)
1082{
1083 if (irq->irq < 0 || irq->irq >= 256)
1084 return -EINVAL;
1085 if (irqchip_in_kernel(vcpu->kvm))
1086 return -ENXIO;
1087 vcpu_load(vcpu);
1088
ad312c7c
ZX
1089 set_bit(irq->irq, vcpu->arch.irq_pending);
1090 set_bit(irq->irq / BITS_PER_LONG, &vcpu->arch.irq_summary);
f77bc6a4
ZX
1091
1092 vcpu_put(vcpu);
1093
1094 return 0;
1095}
1096
b209749f
AK
1097static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
1098 struct kvm_tpr_access_ctl *tac)
1099{
1100 if (tac->flags)
1101 return -EINVAL;
1102 vcpu->arch.tpr_access_reporting = !!tac->enabled;
1103 return 0;
1104}
1105
313a3dc7
CO
1106long kvm_arch_vcpu_ioctl(struct file *filp,
1107 unsigned int ioctl, unsigned long arg)
1108{
1109 struct kvm_vcpu *vcpu = filp->private_data;
1110 void __user *argp = (void __user *)arg;
1111 int r;
1112
1113 switch (ioctl) {
1114 case KVM_GET_LAPIC: {
1115 struct kvm_lapic_state lapic;
1116
1117 memset(&lapic, 0, sizeof lapic);
1118 r = kvm_vcpu_ioctl_get_lapic(vcpu, &lapic);
1119 if (r)
1120 goto out;
1121 r = -EFAULT;
1122 if (copy_to_user(argp, &lapic, sizeof lapic))
1123 goto out;
1124 r = 0;
1125 break;
1126 }
1127 case KVM_SET_LAPIC: {
1128 struct kvm_lapic_state lapic;
1129
1130 r = -EFAULT;
1131 if (copy_from_user(&lapic, argp, sizeof lapic))
1132 goto out;
1133 r = kvm_vcpu_ioctl_set_lapic(vcpu, &lapic);;
1134 if (r)
1135 goto out;
1136 r = 0;
1137 break;
1138 }
f77bc6a4
ZX
1139 case KVM_INTERRUPT: {
1140 struct kvm_interrupt irq;
1141
1142 r = -EFAULT;
1143 if (copy_from_user(&irq, argp, sizeof irq))
1144 goto out;
1145 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
1146 if (r)
1147 goto out;
1148 r = 0;
1149 break;
1150 }
313a3dc7
CO
1151 case KVM_SET_CPUID: {
1152 struct kvm_cpuid __user *cpuid_arg = argp;
1153 struct kvm_cpuid cpuid;
1154
1155 r = -EFAULT;
1156 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1157 goto out;
1158 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
1159 if (r)
1160 goto out;
1161 break;
1162 }
07716717
DK
1163 case KVM_SET_CPUID2: {
1164 struct kvm_cpuid2 __user *cpuid_arg = argp;
1165 struct kvm_cpuid2 cpuid;
1166
1167 r = -EFAULT;
1168 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1169 goto out;
1170 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
1171 cpuid_arg->entries);
1172 if (r)
1173 goto out;
1174 break;
1175 }
1176 case KVM_GET_CPUID2: {
1177 struct kvm_cpuid2 __user *cpuid_arg = argp;
1178 struct kvm_cpuid2 cpuid;
1179
1180 r = -EFAULT;
1181 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1182 goto out;
1183 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
1184 cpuid_arg->entries);
1185 if (r)
1186 goto out;
1187 r = -EFAULT;
1188 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
1189 goto out;
1190 r = 0;
1191 break;
1192 }
313a3dc7
CO
1193 case KVM_GET_MSRS:
1194 r = msr_io(vcpu, argp, kvm_get_msr, 1);
1195 break;
1196 case KVM_SET_MSRS:
1197 r = msr_io(vcpu, argp, do_set_msr, 0);
1198 break;
b209749f
AK
1199 case KVM_TPR_ACCESS_REPORTING: {
1200 struct kvm_tpr_access_ctl tac;
1201
1202 r = -EFAULT;
1203 if (copy_from_user(&tac, argp, sizeof tac))
1204 goto out;
1205 r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
1206 if (r)
1207 goto out;
1208 r = -EFAULT;
1209 if (copy_to_user(argp, &tac, sizeof tac))
1210 goto out;
1211 r = 0;
1212 break;
1213 };
b93463aa
AK
1214 case KVM_SET_VAPIC_ADDR: {
1215 struct kvm_vapic_addr va;
1216
1217 r = -EINVAL;
1218 if (!irqchip_in_kernel(vcpu->kvm))
1219 goto out;
1220 r = -EFAULT;
1221 if (copy_from_user(&va, argp, sizeof va))
1222 goto out;
1223 r = 0;
1224 kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
1225 break;
1226 }
313a3dc7
CO
1227 default:
1228 r = -EINVAL;
1229 }
1230out:
1231 return r;
1232}
1233
1fe779f8
CO
1234static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
1235{
1236 int ret;
1237
1238 if (addr > (unsigned int)(-3 * PAGE_SIZE))
1239 return -1;
1240 ret = kvm_x86_ops->set_tss_addr(kvm, addr);
1241 return ret;
1242}
1243
1244static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
1245 u32 kvm_nr_mmu_pages)
1246{
1247 if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
1248 return -EINVAL;
1249
72dc67a6 1250 down_write(&kvm->slots_lock);
1fe779f8
CO
1251
1252 kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
f05e70ac 1253 kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
1fe779f8 1254
72dc67a6 1255 up_write(&kvm->slots_lock);
1fe779f8
CO
1256 return 0;
1257}
1258
1259static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
1260{
f05e70ac 1261 return kvm->arch.n_alloc_mmu_pages;
1fe779f8
CO
1262}
1263
e9f85cde
ZX
1264gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
1265{
1266 int i;
1267 struct kvm_mem_alias *alias;
1268
d69fb81f
ZX
1269 for (i = 0; i < kvm->arch.naliases; ++i) {
1270 alias = &kvm->arch.aliases[i];
e9f85cde
ZX
1271 if (gfn >= alias->base_gfn
1272 && gfn < alias->base_gfn + alias->npages)
1273 return alias->target_gfn + gfn - alias->base_gfn;
1274 }
1275 return gfn;
1276}
1277
1fe779f8
CO
1278/*
1279 * Set a new alias region. Aliases map a portion of physical memory into
1280 * another portion. This is useful for memory windows, for example the PC
1281 * VGA region.
1282 */
1283static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
1284 struct kvm_memory_alias *alias)
1285{
1286 int r, n;
1287 struct kvm_mem_alias *p;
1288
1289 r = -EINVAL;
1290 /* General sanity checks */
1291 if (alias->memory_size & (PAGE_SIZE - 1))
1292 goto out;
1293 if (alias->guest_phys_addr & (PAGE_SIZE - 1))
1294 goto out;
1295 if (alias->slot >= KVM_ALIAS_SLOTS)
1296 goto out;
1297 if (alias->guest_phys_addr + alias->memory_size
1298 < alias->guest_phys_addr)
1299 goto out;
1300 if (alias->target_phys_addr + alias->memory_size
1301 < alias->target_phys_addr)
1302 goto out;
1303
72dc67a6 1304 down_write(&kvm->slots_lock);
1fe779f8 1305
d69fb81f 1306 p = &kvm->arch.aliases[alias->slot];
1fe779f8
CO
1307 p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
1308 p->npages = alias->memory_size >> PAGE_SHIFT;
1309 p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;
1310
1311 for (n = KVM_ALIAS_SLOTS; n > 0; --n)
d69fb81f 1312 if (kvm->arch.aliases[n - 1].npages)
1fe779f8 1313 break;
d69fb81f 1314 kvm->arch.naliases = n;
1fe779f8
CO
1315
1316 kvm_mmu_zap_all(kvm);
1317
72dc67a6 1318 up_write(&kvm->slots_lock);
1fe779f8
CO
1319
1320 return 0;
1321
1322out:
1323 return r;
1324}
1325
1326static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
1327{
1328 int r;
1329
1330 r = 0;
1331 switch (chip->chip_id) {
1332 case KVM_IRQCHIP_PIC_MASTER:
1333 memcpy(&chip->chip.pic,
1334 &pic_irqchip(kvm)->pics[0],
1335 sizeof(struct kvm_pic_state));
1336 break;
1337 case KVM_IRQCHIP_PIC_SLAVE:
1338 memcpy(&chip->chip.pic,
1339 &pic_irqchip(kvm)->pics[1],
1340 sizeof(struct kvm_pic_state));
1341 break;
1342 case KVM_IRQCHIP_IOAPIC:
1343 memcpy(&chip->chip.ioapic,
1344 ioapic_irqchip(kvm),
1345 sizeof(struct kvm_ioapic_state));
1346 break;
1347 default:
1348 r = -EINVAL;
1349 break;
1350 }
1351 return r;
1352}
1353
1354static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
1355{
1356 int r;
1357
1358 r = 0;
1359 switch (chip->chip_id) {
1360 case KVM_IRQCHIP_PIC_MASTER:
1361 memcpy(&pic_irqchip(kvm)->pics[0],
1362 &chip->chip.pic,
1363 sizeof(struct kvm_pic_state));
1364 break;
1365 case KVM_IRQCHIP_PIC_SLAVE:
1366 memcpy(&pic_irqchip(kvm)->pics[1],
1367 &chip->chip.pic,
1368 sizeof(struct kvm_pic_state));
1369 break;
1370 case KVM_IRQCHIP_IOAPIC:
1371 memcpy(ioapic_irqchip(kvm),
1372 &chip->chip.ioapic,
1373 sizeof(struct kvm_ioapic_state));
1374 break;
1375 default:
1376 r = -EINVAL;
1377 break;
1378 }
1379 kvm_pic_update_irq(pic_irqchip(kvm));
1380 return r;
1381}
1382
5bb064dc
ZX
1383/*
1384 * Get (and clear) the dirty memory log for a memory slot.
1385 */
1386int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1387 struct kvm_dirty_log *log)
1388{
1389 int r;
1390 int n;
1391 struct kvm_memory_slot *memslot;
1392 int is_dirty = 0;
1393
72dc67a6 1394 down_write(&kvm->slots_lock);
5bb064dc
ZX
1395
1396 r = kvm_get_dirty_log(kvm, log, &is_dirty);
1397 if (r)
1398 goto out;
1399
1400 /* If nothing is dirty, don't bother messing with page tables. */
1401 if (is_dirty) {
1402 kvm_mmu_slot_remove_write_access(kvm, log->slot);
1403 kvm_flush_remote_tlbs(kvm);
1404 memslot = &kvm->memslots[log->slot];
1405 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
1406 memset(memslot->dirty_bitmap, 0, n);
1407 }
1408 r = 0;
1409out:
72dc67a6 1410 up_write(&kvm->slots_lock);
5bb064dc
ZX
1411 return r;
1412}
1413
1fe779f8
CO
1414long kvm_arch_vm_ioctl(struct file *filp,
1415 unsigned int ioctl, unsigned long arg)
1416{
1417 struct kvm *kvm = filp->private_data;
1418 void __user *argp = (void __user *)arg;
1419 int r = -EINVAL;
1420
1421 switch (ioctl) {
1422 case KVM_SET_TSS_ADDR:
1423 r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
1424 if (r < 0)
1425 goto out;
1426 break;
1427 case KVM_SET_MEMORY_REGION: {
1428 struct kvm_memory_region kvm_mem;
1429 struct kvm_userspace_memory_region kvm_userspace_mem;
1430
1431 r = -EFAULT;
1432 if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
1433 goto out;
1434 kvm_userspace_mem.slot = kvm_mem.slot;
1435 kvm_userspace_mem.flags = kvm_mem.flags;
1436 kvm_userspace_mem.guest_phys_addr = kvm_mem.guest_phys_addr;
1437 kvm_userspace_mem.memory_size = kvm_mem.memory_size;
1438 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 0);
1439 if (r)
1440 goto out;
1441 break;
1442 }
1443 case KVM_SET_NR_MMU_PAGES:
1444 r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
1445 if (r)
1446 goto out;
1447 break;
1448 case KVM_GET_NR_MMU_PAGES:
1449 r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
1450 break;
1451 case KVM_SET_MEMORY_ALIAS: {
1452 struct kvm_memory_alias alias;
1453
1454 r = -EFAULT;
1455 if (copy_from_user(&alias, argp, sizeof alias))
1456 goto out;
1457 r = kvm_vm_ioctl_set_memory_alias(kvm, &alias);
1458 if (r)
1459 goto out;
1460 break;
1461 }
1462 case KVM_CREATE_IRQCHIP:
1463 r = -ENOMEM;
d7deeeb0
ZX
1464 kvm->arch.vpic = kvm_create_pic(kvm);
1465 if (kvm->arch.vpic) {
1fe779f8
CO
1466 r = kvm_ioapic_init(kvm);
1467 if (r) {
d7deeeb0
ZX
1468 kfree(kvm->arch.vpic);
1469 kvm->arch.vpic = NULL;
1fe779f8
CO
1470 goto out;
1471 }
1472 } else
1473 goto out;
1474 break;
1475 case KVM_IRQ_LINE: {
1476 struct kvm_irq_level irq_event;
1477
1478 r = -EFAULT;
1479 if (copy_from_user(&irq_event, argp, sizeof irq_event))
1480 goto out;
1481 if (irqchip_in_kernel(kvm)) {
1482 mutex_lock(&kvm->lock);
1483 if (irq_event.irq < 16)
1484 kvm_pic_set_irq(pic_irqchip(kvm),
1485 irq_event.irq,
1486 irq_event.level);
d7deeeb0 1487 kvm_ioapic_set_irq(kvm->arch.vioapic,
1fe779f8
CO
1488 irq_event.irq,
1489 irq_event.level);
1490 mutex_unlock(&kvm->lock);
1491 r = 0;
1492 }
1493 break;
1494 }
1495 case KVM_GET_IRQCHIP: {
1496 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
1497 struct kvm_irqchip chip;
1498
1499 r = -EFAULT;
1500 if (copy_from_user(&chip, argp, sizeof chip))
1501 goto out;
1502 r = -ENXIO;
1503 if (!irqchip_in_kernel(kvm))
1504 goto out;
1505 r = kvm_vm_ioctl_get_irqchip(kvm, &chip);
1506 if (r)
1507 goto out;
1508 r = -EFAULT;
1509 if (copy_to_user(argp, &chip, sizeof chip))
1510 goto out;
1511 r = 0;
1512 break;
1513 }
1514 case KVM_SET_IRQCHIP: {
1515 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
1516 struct kvm_irqchip chip;
1517
1518 r = -EFAULT;
1519 if (copy_from_user(&chip, argp, sizeof chip))
1520 goto out;
1521 r = -ENXIO;
1522 if (!irqchip_in_kernel(kvm))
1523 goto out;
1524 r = kvm_vm_ioctl_set_irqchip(kvm, &chip);
1525 if (r)
1526 goto out;
1527 r = 0;
1528 break;
1529 }
1530 default:
1531 ;
1532 }
1533out:
1534 return r;
1535}
1536
a16b043c 1537static void kvm_init_msr_list(void)
043405e1
CO
1538{
1539 u32 dummy[2];
1540 unsigned i, j;
1541
1542 for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) {
1543 if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
1544 continue;
1545 if (j < i)
1546 msrs_to_save[j] = msrs_to_save[i];
1547 j++;
1548 }
1549 num_msrs_to_save = j;
1550}
1551
bbd9b64e
CO
1552/*
1553 * Only apic need an MMIO device hook, so shortcut now..
1554 */
1555static struct kvm_io_device *vcpu_find_pervcpu_dev(struct kvm_vcpu *vcpu,
1556 gpa_t addr)
1557{
1558 struct kvm_io_device *dev;
1559
ad312c7c
ZX
1560 if (vcpu->arch.apic) {
1561 dev = &vcpu->arch.apic->dev;
bbd9b64e
CO
1562 if (dev->in_range(dev, addr))
1563 return dev;
1564 }
1565 return NULL;
1566}
1567
1568
1569static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
1570 gpa_t addr)
1571{
1572 struct kvm_io_device *dev;
1573
1574 dev = vcpu_find_pervcpu_dev(vcpu, addr);
1575 if (dev == NULL)
1576 dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr);
1577 return dev;
1578}
1579
1580int emulator_read_std(unsigned long addr,
1581 void *val,
1582 unsigned int bytes,
1583 struct kvm_vcpu *vcpu)
1584{
1585 void *data = val;
10589a46 1586 int r = X86EMUL_CONTINUE;
bbd9b64e 1587
72dc67a6 1588 down_read(&vcpu->kvm->slots_lock);
bbd9b64e 1589 while (bytes) {
ad312c7c 1590 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
bbd9b64e
CO
1591 unsigned offset = addr & (PAGE_SIZE-1);
1592 unsigned tocopy = min(bytes, (unsigned)PAGE_SIZE - offset);
1593 int ret;
1594
10589a46
MT
1595 if (gpa == UNMAPPED_GVA) {
1596 r = X86EMUL_PROPAGATE_FAULT;
1597 goto out;
1598 }
bbd9b64e 1599 ret = kvm_read_guest(vcpu->kvm, gpa, data, tocopy);
10589a46
MT
1600 if (ret < 0) {
1601 r = X86EMUL_UNHANDLEABLE;
1602 goto out;
1603 }
bbd9b64e
CO
1604
1605 bytes -= tocopy;
1606 data += tocopy;
1607 addr += tocopy;
1608 }
10589a46 1609out:
72dc67a6 1610 up_read(&vcpu->kvm->slots_lock);
10589a46 1611 return r;
bbd9b64e
CO
1612}
1613EXPORT_SYMBOL_GPL(emulator_read_std);
1614
bbd9b64e
CO
1615static int emulator_read_emulated(unsigned long addr,
1616 void *val,
1617 unsigned int bytes,
1618 struct kvm_vcpu *vcpu)
1619{
1620 struct kvm_io_device *mmio_dev;
1621 gpa_t gpa;
1622
1623 if (vcpu->mmio_read_completed) {
1624 memcpy(val, vcpu->mmio_data, bytes);
1625 vcpu->mmio_read_completed = 0;
1626 return X86EMUL_CONTINUE;
1627 }
1628
72dc67a6 1629 down_read(&vcpu->kvm->slots_lock);
ad312c7c 1630 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
72dc67a6 1631 up_read(&vcpu->kvm->slots_lock);
bbd9b64e
CO
1632
1633 /* For APIC access vmexit */
1634 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
1635 goto mmio;
1636
1637 if (emulator_read_std(addr, val, bytes, vcpu)
1638 == X86EMUL_CONTINUE)
1639 return X86EMUL_CONTINUE;
1640 if (gpa == UNMAPPED_GVA)
1641 return X86EMUL_PROPAGATE_FAULT;
1642
1643mmio:
1644 /*
1645 * Is this MMIO handled locally?
1646 */
10589a46 1647 mutex_lock(&vcpu->kvm->lock);
bbd9b64e
CO
1648 mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
1649 if (mmio_dev) {
1650 kvm_iodevice_read(mmio_dev, gpa, bytes, val);
10589a46 1651 mutex_unlock(&vcpu->kvm->lock);
bbd9b64e
CO
1652 return X86EMUL_CONTINUE;
1653 }
10589a46 1654 mutex_unlock(&vcpu->kvm->lock);
bbd9b64e
CO
1655
1656 vcpu->mmio_needed = 1;
1657 vcpu->mmio_phys_addr = gpa;
1658 vcpu->mmio_size = bytes;
1659 vcpu->mmio_is_write = 0;
1660
1661 return X86EMUL_UNHANDLEABLE;
1662}
1663
1664static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
1665 const void *val, int bytes)
1666{
1667 int ret;
1668
72dc67a6 1669 down_read(&vcpu->kvm->slots_lock);
bbd9b64e 1670 ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
10589a46 1671 if (ret < 0) {
72dc67a6 1672 up_read(&vcpu->kvm->slots_lock);
bbd9b64e 1673 return 0;
10589a46 1674 }
bbd9b64e 1675 kvm_mmu_pte_write(vcpu, gpa, val, bytes);
72dc67a6 1676 up_read(&vcpu->kvm->slots_lock);
bbd9b64e
CO
1677 return 1;
1678}
1679
1680static int emulator_write_emulated_onepage(unsigned long addr,
1681 const void *val,
1682 unsigned int bytes,
1683 struct kvm_vcpu *vcpu)
1684{
1685 struct kvm_io_device *mmio_dev;
10589a46
MT
1686 gpa_t gpa;
1687
72dc67a6 1688 down_read(&vcpu->kvm->slots_lock);
10589a46 1689 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
72dc67a6 1690 up_read(&vcpu->kvm->slots_lock);
bbd9b64e
CO
1691
1692 if (gpa == UNMAPPED_GVA) {
c3c91fee 1693 kvm_inject_page_fault(vcpu, addr, 2);
bbd9b64e
CO
1694 return X86EMUL_PROPAGATE_FAULT;
1695 }
1696
1697 /* For APIC access vmexit */
1698 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
1699 goto mmio;
1700
1701 if (emulator_write_phys(vcpu, gpa, val, bytes))
1702 return X86EMUL_CONTINUE;
1703
1704mmio:
1705 /*
1706 * Is this MMIO handled locally?
1707 */
10589a46 1708 mutex_lock(&vcpu->kvm->lock);
bbd9b64e
CO
1709 mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
1710 if (mmio_dev) {
1711 kvm_iodevice_write(mmio_dev, gpa, bytes, val);
10589a46 1712 mutex_unlock(&vcpu->kvm->lock);
bbd9b64e
CO
1713 return X86EMUL_CONTINUE;
1714 }
10589a46 1715 mutex_unlock(&vcpu->kvm->lock);
bbd9b64e
CO
1716
1717 vcpu->mmio_needed = 1;
1718 vcpu->mmio_phys_addr = gpa;
1719 vcpu->mmio_size = bytes;
1720 vcpu->mmio_is_write = 1;
1721 memcpy(vcpu->mmio_data, val, bytes);
1722
1723 return X86EMUL_CONTINUE;
1724}
1725
1726int emulator_write_emulated(unsigned long addr,
1727 const void *val,
1728 unsigned int bytes,
1729 struct kvm_vcpu *vcpu)
1730{
1731 /* Crossing a page boundary? */
1732 if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
1733 int rc, now;
1734
1735 now = -addr & ~PAGE_MASK;
1736 rc = emulator_write_emulated_onepage(addr, val, now, vcpu);
1737 if (rc != X86EMUL_CONTINUE)
1738 return rc;
1739 addr += now;
1740 val += now;
1741 bytes -= now;
1742 }
1743 return emulator_write_emulated_onepage(addr, val, bytes, vcpu);
1744}
1745EXPORT_SYMBOL_GPL(emulator_write_emulated);
1746
1747static int emulator_cmpxchg_emulated(unsigned long addr,
1748 const void *old,
1749 const void *new,
1750 unsigned int bytes,
1751 struct kvm_vcpu *vcpu)
1752{
1753 static int reported;
1754
1755 if (!reported) {
1756 reported = 1;
1757 printk(KERN_WARNING "kvm: emulating exchange as write\n");
1758 }
2bacc55c
MT
1759#ifndef CONFIG_X86_64
1760 /* guests cmpxchg8b have to be emulated atomically */
1761 if (bytes == 8) {
10589a46 1762 gpa_t gpa;
2bacc55c 1763 struct page *page;
c0b49b0d 1764 char *kaddr;
2bacc55c
MT
1765 u64 val;
1766
72dc67a6 1767 down_read(&vcpu->kvm->slots_lock);
10589a46
MT
1768 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
1769
2bacc55c
MT
1770 if (gpa == UNMAPPED_GVA ||
1771 (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
1772 goto emul_write;
1773
1774 if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
1775 goto emul_write;
1776
1777 val = *(u64 *)new;
72dc67a6
IE
1778
1779 down_read(&current->mm->mmap_sem);
2bacc55c 1780 page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
72dc67a6
IE
1781 up_read(&current->mm->mmap_sem);
1782
c0b49b0d
AM
1783 kaddr = kmap_atomic(page, KM_USER0);
1784 set_64bit((u64 *)(kaddr + offset_in_page(gpa)), val);
1785 kunmap_atomic(kaddr, KM_USER0);
2bacc55c 1786 kvm_release_page_dirty(page);
10589a46 1787 emul_write:
72dc67a6 1788 up_read(&vcpu->kvm->slots_lock);
2bacc55c 1789 }
2bacc55c
MT
1790#endif
1791
bbd9b64e
CO
1792 return emulator_write_emulated(addr, new, bytes, vcpu);
1793}
1794
1795static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
1796{
1797 return kvm_x86_ops->get_segment_base(vcpu, seg);
1798}
1799
1800int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
1801{
1802 return X86EMUL_CONTINUE;
1803}
1804
1805int emulate_clts(struct kvm_vcpu *vcpu)
1806{
ad312c7c 1807 kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 & ~X86_CR0_TS);
bbd9b64e
CO
1808 return X86EMUL_CONTINUE;
1809}
1810
1811int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
1812{
1813 struct kvm_vcpu *vcpu = ctxt->vcpu;
1814
1815 switch (dr) {
1816 case 0 ... 3:
1817 *dest = kvm_x86_ops->get_dr(vcpu, dr);
1818 return X86EMUL_CONTINUE;
1819 default:
1820 pr_unimpl(vcpu, "%s: unexpected dr %u\n", __FUNCTION__, dr);
1821 return X86EMUL_UNHANDLEABLE;
1822 }
1823}
1824
1825int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
1826{
1827 unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
1828 int exception;
1829
1830 kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception);
1831 if (exception) {
1832 /* FIXME: better handling */
1833 return X86EMUL_UNHANDLEABLE;
1834 }
1835 return X86EMUL_CONTINUE;
1836}
1837
1838void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
1839{
1840 static int reported;
1841 u8 opcodes[4];
ad312c7c 1842 unsigned long rip = vcpu->arch.rip;
bbd9b64e
CO
1843 unsigned long rip_linear;
1844
1845 rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS);
1846
1847 if (reported)
1848 return;
1849
1850 emulator_read_std(rip_linear, (void *)opcodes, 4, vcpu);
1851
1852 printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n",
1853 context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
1854 reported = 1;
1855}
1856EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
1857
1858struct x86_emulate_ops emulate_ops = {
1859 .read_std = emulator_read_std,
bbd9b64e
CO
1860 .read_emulated = emulator_read_emulated,
1861 .write_emulated = emulator_write_emulated,
1862 .cmpxchg_emulated = emulator_cmpxchg_emulated,
1863};
1864
1865int emulate_instruction(struct kvm_vcpu *vcpu,
1866 struct kvm_run *run,
1867 unsigned long cr2,
1868 u16 error_code,
571008da 1869 int emulation_type)
bbd9b64e
CO
1870{
1871 int r;
571008da 1872 struct decode_cache *c;
bbd9b64e 1873
ad312c7c 1874 vcpu->arch.mmio_fault_cr2 = cr2;
bbd9b64e
CO
1875 kvm_x86_ops->cache_regs(vcpu);
1876
1877 vcpu->mmio_is_write = 0;
ad312c7c 1878 vcpu->arch.pio.string = 0;
bbd9b64e 1879
571008da 1880 if (!(emulation_type & EMULTYPE_NO_DECODE)) {
bbd9b64e
CO
1881 int cs_db, cs_l;
1882 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
1883
ad312c7c
ZX
1884 vcpu->arch.emulate_ctxt.vcpu = vcpu;
1885 vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
1886 vcpu->arch.emulate_ctxt.mode =
1887 (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
bbd9b64e
CO
1888 ? X86EMUL_MODE_REAL : cs_l
1889 ? X86EMUL_MODE_PROT64 : cs_db
1890 ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
1891
ad312c7c
ZX
1892 if (vcpu->arch.emulate_ctxt.mode == X86EMUL_MODE_PROT64) {
1893 vcpu->arch.emulate_ctxt.cs_base = 0;
1894 vcpu->arch.emulate_ctxt.ds_base = 0;
1895 vcpu->arch.emulate_ctxt.es_base = 0;
1896 vcpu->arch.emulate_ctxt.ss_base = 0;
bbd9b64e 1897 } else {
ad312c7c 1898 vcpu->arch.emulate_ctxt.cs_base =
bbd9b64e 1899 get_segment_base(vcpu, VCPU_SREG_CS);
ad312c7c 1900 vcpu->arch.emulate_ctxt.ds_base =
bbd9b64e 1901 get_segment_base(vcpu, VCPU_SREG_DS);
ad312c7c 1902 vcpu->arch.emulate_ctxt.es_base =
bbd9b64e 1903 get_segment_base(vcpu, VCPU_SREG_ES);
ad312c7c 1904 vcpu->arch.emulate_ctxt.ss_base =
bbd9b64e
CO
1905 get_segment_base(vcpu, VCPU_SREG_SS);
1906 }
1907
ad312c7c 1908 vcpu->arch.emulate_ctxt.gs_base =
bbd9b64e 1909 get_segment_base(vcpu, VCPU_SREG_GS);
ad312c7c 1910 vcpu->arch.emulate_ctxt.fs_base =
bbd9b64e
CO
1911 get_segment_base(vcpu, VCPU_SREG_FS);
1912
ad312c7c 1913 r = x86_decode_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
571008da
SY
1914
1915 /* Reject the instructions other than VMCALL/VMMCALL when
1916 * try to emulate invalid opcode */
1917 c = &vcpu->arch.emulate_ctxt.decode;
1918 if ((emulation_type & EMULTYPE_TRAP_UD) &&
1919 (!(c->twobyte && c->b == 0x01 &&
1920 (c->modrm_reg == 0 || c->modrm_reg == 3) &&
1921 c->modrm_mod == 3 && c->modrm_rm == 1)))
1922 return EMULATE_FAIL;
1923
f2b5756b 1924 ++vcpu->stat.insn_emulation;
bbd9b64e 1925 if (r) {
f2b5756b 1926 ++vcpu->stat.insn_emulation_fail;
bbd9b64e
CO
1927 if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
1928 return EMULATE_DONE;
1929 return EMULATE_FAIL;
1930 }
1931 }
1932
ad312c7c 1933 r = x86_emulate_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
bbd9b64e 1934
ad312c7c 1935 if (vcpu->arch.pio.string)
bbd9b64e
CO
1936 return EMULATE_DO_MMIO;
1937
1938 if ((r || vcpu->mmio_is_write) && run) {
1939 run->exit_reason = KVM_EXIT_MMIO;
1940 run->mmio.phys_addr = vcpu->mmio_phys_addr;
1941 memcpy(run->mmio.data, vcpu->mmio_data, 8);
1942 run->mmio.len = vcpu->mmio_size;
1943 run->mmio.is_write = vcpu->mmio_is_write;
1944 }
1945
1946 if (r) {
1947 if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
1948 return EMULATE_DONE;
1949 if (!vcpu->mmio_needed) {
1950 kvm_report_emulation_failure(vcpu, "mmio");
1951 return EMULATE_FAIL;
1952 }
1953 return EMULATE_DO_MMIO;
1954 }
1955
1956 kvm_x86_ops->decache_regs(vcpu);
ad312c7c 1957 kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
bbd9b64e
CO
1958
1959 if (vcpu->mmio_is_write) {
1960 vcpu->mmio_needed = 0;
1961 return EMULATE_DO_MMIO;
1962 }
1963
1964 return EMULATE_DONE;
1965}
1966EXPORT_SYMBOL_GPL(emulate_instruction);
1967
de7d789a
CO
1968static void free_pio_guest_pages(struct kvm_vcpu *vcpu)
1969{
1970 int i;
1971
ad312c7c
ZX
1972 for (i = 0; i < ARRAY_SIZE(vcpu->arch.pio.guest_pages); ++i)
1973 if (vcpu->arch.pio.guest_pages[i]) {
1974 kvm_release_page_dirty(vcpu->arch.pio.guest_pages[i]);
1975 vcpu->arch.pio.guest_pages[i] = NULL;
de7d789a
CO
1976 }
1977}
1978
1979static int pio_copy_data(struct kvm_vcpu *vcpu)
1980{
ad312c7c 1981 void *p = vcpu->arch.pio_data;
de7d789a
CO
1982 void *q;
1983 unsigned bytes;
ad312c7c 1984 int nr_pages = vcpu->arch.pio.guest_pages[1] ? 2 : 1;
de7d789a 1985
ad312c7c 1986 q = vmap(vcpu->arch.pio.guest_pages, nr_pages, VM_READ|VM_WRITE,
de7d789a
CO
1987 PAGE_KERNEL);
1988 if (!q) {
1989 free_pio_guest_pages(vcpu);
1990 return -ENOMEM;
1991 }
ad312c7c
ZX
1992 q += vcpu->arch.pio.guest_page_offset;
1993 bytes = vcpu->arch.pio.size * vcpu->arch.pio.cur_count;
1994 if (vcpu->arch.pio.in)
de7d789a
CO
1995 memcpy(q, p, bytes);
1996 else
1997 memcpy(p, q, bytes);
ad312c7c 1998 q -= vcpu->arch.pio.guest_page_offset;
de7d789a
CO
1999 vunmap(q);
2000 free_pio_guest_pages(vcpu);
2001 return 0;
2002}
2003
2004int complete_pio(struct kvm_vcpu *vcpu)
2005{
ad312c7c 2006 struct kvm_pio_request *io = &vcpu->arch.pio;
de7d789a
CO
2007 long delta;
2008 int r;
2009
2010 kvm_x86_ops->cache_regs(vcpu);
2011
2012 if (!io->string) {
2013 if (io->in)
ad312c7c 2014 memcpy(&vcpu->arch.regs[VCPU_REGS_RAX], vcpu->arch.pio_data,
de7d789a
CO
2015 io->size);
2016 } else {
2017 if (io->in) {
2018 r = pio_copy_data(vcpu);
2019 if (r) {
2020 kvm_x86_ops->cache_regs(vcpu);
2021 return r;
2022 }
2023 }
2024
2025 delta = 1;
2026 if (io->rep) {
2027 delta *= io->cur_count;
2028 /*
2029 * The size of the register should really depend on
2030 * current address size.
2031 */
ad312c7c 2032 vcpu->arch.regs[VCPU_REGS_RCX] -= delta;
de7d789a
CO
2033 }
2034 if (io->down)
2035 delta = -delta;
2036 delta *= io->size;
2037 if (io->in)
ad312c7c 2038 vcpu->arch.regs[VCPU_REGS_RDI] += delta;
de7d789a 2039 else
ad312c7c 2040 vcpu->arch.regs[VCPU_REGS_RSI] += delta;
de7d789a
CO
2041 }
2042
2043 kvm_x86_ops->decache_regs(vcpu);
2044
2045 io->count -= io->cur_count;
2046 io->cur_count = 0;
2047
2048 return 0;
2049}
2050
2051static void kernel_pio(struct kvm_io_device *pio_dev,
2052 struct kvm_vcpu *vcpu,
2053 void *pd)
2054{
2055 /* TODO: String I/O for in kernel device */
2056
2057 mutex_lock(&vcpu->kvm->lock);
ad312c7c
ZX
2058 if (vcpu->arch.pio.in)
2059 kvm_iodevice_read(pio_dev, vcpu->arch.pio.port,
2060 vcpu->arch.pio.size,
de7d789a
CO
2061 pd);
2062 else
ad312c7c
ZX
2063 kvm_iodevice_write(pio_dev, vcpu->arch.pio.port,
2064 vcpu->arch.pio.size,
de7d789a
CO
2065 pd);
2066 mutex_unlock(&vcpu->kvm->lock);
2067}
2068
2069static void pio_string_write(struct kvm_io_device *pio_dev,
2070 struct kvm_vcpu *vcpu)
2071{
ad312c7c
ZX
2072 struct kvm_pio_request *io = &vcpu->arch.pio;
2073 void *pd = vcpu->arch.pio_data;
de7d789a
CO
2074 int i;
2075
2076 mutex_lock(&vcpu->kvm->lock);
2077 for (i = 0; i < io->cur_count; i++) {
2078 kvm_iodevice_write(pio_dev, io->port,
2079 io->size,
2080 pd);
2081 pd += io->size;
2082 }
2083 mutex_unlock(&vcpu->kvm->lock);
2084}
2085
2086static struct kvm_io_device *vcpu_find_pio_dev(struct kvm_vcpu *vcpu,
2087 gpa_t addr)
2088{
2089 return kvm_io_bus_find_dev(&vcpu->kvm->pio_bus, addr);
2090}
2091
2092int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2093 int size, unsigned port)
2094{
2095 struct kvm_io_device *pio_dev;
2096
2097 vcpu->run->exit_reason = KVM_EXIT_IO;
2098 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
ad312c7c 2099 vcpu->run->io.size = vcpu->arch.pio.size = size;
de7d789a 2100 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
ad312c7c
ZX
2101 vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = 1;
2102 vcpu->run->io.port = vcpu->arch.pio.port = port;
2103 vcpu->arch.pio.in = in;
2104 vcpu->arch.pio.string = 0;
2105 vcpu->arch.pio.down = 0;
2106 vcpu->arch.pio.guest_page_offset = 0;
2107 vcpu->arch.pio.rep = 0;
de7d789a
CO
2108
2109 kvm_x86_ops->cache_regs(vcpu);
ad312c7c 2110 memcpy(vcpu->arch.pio_data, &vcpu->arch.regs[VCPU_REGS_RAX], 4);
de7d789a
CO
2111 kvm_x86_ops->decache_regs(vcpu);
2112
2113 kvm_x86_ops->skip_emulated_instruction(vcpu);
2114
2115 pio_dev = vcpu_find_pio_dev(vcpu, port);
2116 if (pio_dev) {
ad312c7c 2117 kernel_pio(pio_dev, vcpu, vcpu->arch.pio_data);
de7d789a
CO
2118 complete_pio(vcpu);
2119 return 1;
2120 }
2121 return 0;
2122}
2123EXPORT_SYMBOL_GPL(kvm_emulate_pio);
2124
2125int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2126 int size, unsigned long count, int down,
2127 gva_t address, int rep, unsigned port)
2128{
2129 unsigned now, in_page;
2130 int i, ret = 0;
2131 int nr_pages = 1;
2132 struct page *page;
2133 struct kvm_io_device *pio_dev;
2134
2135 vcpu->run->exit_reason = KVM_EXIT_IO;
2136 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
ad312c7c 2137 vcpu->run->io.size = vcpu->arch.pio.size = size;
de7d789a 2138 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
ad312c7c
ZX
2139 vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = count;
2140 vcpu->run->io.port = vcpu->arch.pio.port = port;
2141 vcpu->arch.pio.in = in;
2142 vcpu->arch.pio.string = 1;
2143 vcpu->arch.pio.down = down;
2144 vcpu->arch.pio.guest_page_offset = offset_in_page(address);
2145 vcpu->arch.pio.rep = rep;
de7d789a
CO
2146
2147 if (!count) {
2148 kvm_x86_ops->skip_emulated_instruction(vcpu);
2149 return 1;
2150 }
2151
2152 if (!down)
2153 in_page = PAGE_SIZE - offset_in_page(address);
2154 else
2155 in_page = offset_in_page(address) + size;
2156 now = min(count, (unsigned long)in_page / size);
2157 if (!now) {
2158 /*
2159 * String I/O straddles page boundary. Pin two guest pages
2160 * so that we satisfy atomicity constraints. Do just one
2161 * transaction to avoid complexity.
2162 */
2163 nr_pages = 2;
2164 now = 1;
2165 }
2166 if (down) {
2167 /*
2168 * String I/O in reverse. Yuck. Kill the guest, fix later.
2169 */
2170 pr_unimpl(vcpu, "guest string pio down\n");
c1a5d4f9 2171 kvm_inject_gp(vcpu, 0);
de7d789a
CO
2172 return 1;
2173 }
2174 vcpu->run->io.count = now;
ad312c7c 2175 vcpu->arch.pio.cur_count = now;
de7d789a 2176
ad312c7c 2177 if (vcpu->arch.pio.cur_count == vcpu->arch.pio.count)
de7d789a
CO
2178 kvm_x86_ops->skip_emulated_instruction(vcpu);
2179
2180 for (i = 0; i < nr_pages; ++i) {
72dc67a6 2181 down_read(&vcpu->kvm->slots_lock);
de7d789a 2182 page = gva_to_page(vcpu, address + i * PAGE_SIZE);
ad312c7c 2183 vcpu->arch.pio.guest_pages[i] = page;
72dc67a6 2184 up_read(&vcpu->kvm->slots_lock);
de7d789a 2185 if (!page) {
c1a5d4f9 2186 kvm_inject_gp(vcpu, 0);
de7d789a
CO
2187 free_pio_guest_pages(vcpu);
2188 return 1;
2189 }
2190 }
2191
2192 pio_dev = vcpu_find_pio_dev(vcpu, port);
ad312c7c 2193 if (!vcpu->arch.pio.in) {
de7d789a
CO
2194 /* string PIO write */
2195 ret = pio_copy_data(vcpu);
2196 if (ret >= 0 && pio_dev) {
2197 pio_string_write(pio_dev, vcpu);
2198 complete_pio(vcpu);
ad312c7c 2199 if (vcpu->arch.pio.count == 0)
de7d789a
CO
2200 ret = 1;
2201 }
2202 } else if (pio_dev)
2203 pr_unimpl(vcpu, "no string pio read support yet, "
2204 "port %x size %d count %ld\n",
2205 port, size, count);
2206
2207 return ret;
2208}
2209EXPORT_SYMBOL_GPL(kvm_emulate_pio_string);
2210
f8c16bba 2211int kvm_arch_init(void *opaque)
043405e1 2212{
56c6d28a 2213 int r;
f8c16bba
ZX
2214 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
2215
f8c16bba
ZX
2216 if (kvm_x86_ops) {
2217 printk(KERN_ERR "kvm: already loaded the other module\n");
56c6d28a
ZX
2218 r = -EEXIST;
2219 goto out;
f8c16bba
ZX
2220 }
2221
2222 if (!ops->cpu_has_kvm_support()) {
2223 printk(KERN_ERR "kvm: no hardware support\n");
56c6d28a
ZX
2224 r = -EOPNOTSUPP;
2225 goto out;
f8c16bba
ZX
2226 }
2227 if (ops->disabled_by_bios()) {
2228 printk(KERN_ERR "kvm: disabled by bios\n");
56c6d28a
ZX
2229 r = -EOPNOTSUPP;
2230 goto out;
f8c16bba
ZX
2231 }
2232
97db56ce
AK
2233 r = kvm_mmu_module_init();
2234 if (r)
2235 goto out;
2236
2237 kvm_init_msr_list();
2238
f8c16bba 2239 kvm_x86_ops = ops;
56c6d28a 2240 kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
f8c16bba 2241 return 0;
56c6d28a
ZX
2242
2243out:
56c6d28a 2244 return r;
043405e1 2245}
8776e519 2246
f8c16bba
ZX
2247void kvm_arch_exit(void)
2248{
2249 kvm_x86_ops = NULL;
56c6d28a
ZX
2250 kvm_mmu_module_exit();
2251}
f8c16bba 2252
8776e519
HB
2253int kvm_emulate_halt(struct kvm_vcpu *vcpu)
2254{
2255 ++vcpu->stat.halt_exits;
2256 if (irqchip_in_kernel(vcpu->kvm)) {
ad312c7c 2257 vcpu->arch.mp_state = VCPU_MP_STATE_HALTED;
8776e519 2258 kvm_vcpu_block(vcpu);
ad312c7c 2259 if (vcpu->arch.mp_state != VCPU_MP_STATE_RUNNABLE)
8776e519
HB
2260 return -EINTR;
2261 return 1;
2262 } else {
2263 vcpu->run->exit_reason = KVM_EXIT_HLT;
2264 return 0;
2265 }
2266}
2267EXPORT_SYMBOL_GPL(kvm_emulate_halt);
2268
2269int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
2270{
2271 unsigned long nr, a0, a1, a2, a3, ret;
2272
2273 kvm_x86_ops->cache_regs(vcpu);
2274
ad312c7c
ZX
2275 nr = vcpu->arch.regs[VCPU_REGS_RAX];
2276 a0 = vcpu->arch.regs[VCPU_REGS_RBX];
2277 a1 = vcpu->arch.regs[VCPU_REGS_RCX];
2278 a2 = vcpu->arch.regs[VCPU_REGS_RDX];
2279 a3 = vcpu->arch.regs[VCPU_REGS_RSI];
8776e519
HB
2280
2281 if (!is_long_mode(vcpu)) {
2282 nr &= 0xFFFFFFFF;
2283 a0 &= 0xFFFFFFFF;
2284 a1 &= 0xFFFFFFFF;
2285 a2 &= 0xFFFFFFFF;
2286 a3 &= 0xFFFFFFFF;
2287 }
2288
2289 switch (nr) {
b93463aa
AK
2290 case KVM_HC_VAPIC_POLL_IRQ:
2291 ret = 0;
2292 break;
8776e519
HB
2293 default:
2294 ret = -KVM_ENOSYS;
2295 break;
2296 }
ad312c7c 2297 vcpu->arch.regs[VCPU_REGS_RAX] = ret;
8776e519
HB
2298 kvm_x86_ops->decache_regs(vcpu);
2299 return 0;
2300}
2301EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
2302
2303int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
2304{
2305 char instruction[3];
2306 int ret = 0;
2307
8776e519
HB
2308
2309 /*
2310 * Blow out the MMU to ensure that no other VCPU has an active mapping
2311 * to ensure that the updated hypercall appears atomically across all
2312 * VCPUs.
2313 */
2314 kvm_mmu_zap_all(vcpu->kvm);
2315
2316 kvm_x86_ops->cache_regs(vcpu);
2317 kvm_x86_ops->patch_hypercall(vcpu, instruction);
ad312c7c 2318 if (emulator_write_emulated(vcpu->arch.rip, instruction, 3, vcpu)
8776e519
HB
2319 != X86EMUL_CONTINUE)
2320 ret = -EFAULT;
2321
8776e519
HB
2322 return ret;
2323}
2324
2325static u64 mk_cr_64(u64 curr_cr, u32 new_val)
2326{
2327 return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
2328}
2329
2330void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
2331{
2332 struct descriptor_table dt = { limit, base };
2333
2334 kvm_x86_ops->set_gdt(vcpu, &dt);
2335}
2336
2337void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
2338{
2339 struct descriptor_table dt = { limit, base };
2340
2341 kvm_x86_ops->set_idt(vcpu, &dt);
2342}
2343
2344void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
2345 unsigned long *rflags)
2346{
2347 lmsw(vcpu, msw);
2348 *rflags = kvm_x86_ops->get_rflags(vcpu);
2349}
2350
2351unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
2352{
2353 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
2354 switch (cr) {
2355 case 0:
ad312c7c 2356 return vcpu->arch.cr0;
8776e519 2357 case 2:
ad312c7c 2358 return vcpu->arch.cr2;
8776e519 2359 case 3:
ad312c7c 2360 return vcpu->arch.cr3;
8776e519 2361 case 4:
ad312c7c 2362 return vcpu->arch.cr4;
152ff9be
JR
2363 case 8:
2364 return get_cr8(vcpu);
8776e519
HB
2365 default:
2366 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
2367 return 0;
2368 }
2369}
2370
2371void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
2372 unsigned long *rflags)
2373{
2374 switch (cr) {
2375 case 0:
ad312c7c 2376 set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val));
8776e519
HB
2377 *rflags = kvm_x86_ops->get_rflags(vcpu);
2378 break;
2379 case 2:
ad312c7c 2380 vcpu->arch.cr2 = val;
8776e519
HB
2381 break;
2382 case 3:
2383 set_cr3(vcpu, val);
2384 break;
2385 case 4:
ad312c7c 2386 set_cr4(vcpu, mk_cr_64(vcpu->arch.cr4, val));
8776e519 2387 break;
152ff9be
JR
2388 case 8:
2389 set_cr8(vcpu, val & 0xfUL);
2390 break;
8776e519
HB
2391 default:
2392 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
2393 }
2394}
2395
07716717
DK
2396static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
2397{
ad312c7c
ZX
2398 struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
2399 int j, nent = vcpu->arch.cpuid_nent;
07716717
DK
2400
2401 e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
2402 /* when no next entry is found, the current entry[i] is reselected */
2403 for (j = i + 1; j == i; j = (j + 1) % nent) {
ad312c7c 2404 struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
07716717
DK
2405 if (ej->function == e->function) {
2406 ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
2407 return j;
2408 }
2409 }
2410 return 0; /* silence gcc, even though control never reaches here */
2411}
2412
2413/* find an entry with matching function, matching index (if needed), and that
2414 * should be read next (if it's stateful) */
2415static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
2416 u32 function, u32 index)
2417{
2418 if (e->function != function)
2419 return 0;
2420 if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
2421 return 0;
2422 if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
2423 !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
2424 return 0;
2425 return 1;
2426}
2427
8776e519
HB
2428void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
2429{
2430 int i;
07716717
DK
2431 u32 function, index;
2432 struct kvm_cpuid_entry2 *e, *best;
8776e519
HB
2433
2434 kvm_x86_ops->cache_regs(vcpu);
ad312c7c
ZX
2435 function = vcpu->arch.regs[VCPU_REGS_RAX];
2436 index = vcpu->arch.regs[VCPU_REGS_RCX];
2437 vcpu->arch.regs[VCPU_REGS_RAX] = 0;
2438 vcpu->arch.regs[VCPU_REGS_RBX] = 0;
2439 vcpu->arch.regs[VCPU_REGS_RCX] = 0;
2440 vcpu->arch.regs[VCPU_REGS_RDX] = 0;
8776e519 2441 best = NULL;
ad312c7c
ZX
2442 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
2443 e = &vcpu->arch.cpuid_entries[i];
07716717
DK
2444 if (is_matching_cpuid_entry(e, function, index)) {
2445 if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
2446 move_to_next_stateful_cpuid_entry(vcpu, i);
8776e519
HB
2447 best = e;
2448 break;
2449 }
2450 /*
2451 * Both basic or both extended?
2452 */
2453 if (((e->function ^ function) & 0x80000000) == 0)
2454 if (!best || e->function > best->function)
2455 best = e;
2456 }
2457 if (best) {
ad312c7c
ZX
2458 vcpu->arch.regs[VCPU_REGS_RAX] = best->eax;
2459 vcpu->arch.regs[VCPU_REGS_RBX] = best->ebx;
2460 vcpu->arch.regs[VCPU_REGS_RCX] = best->ecx;
2461 vcpu->arch.regs[VCPU_REGS_RDX] = best->edx;
8776e519
HB
2462 }
2463 kvm_x86_ops->decache_regs(vcpu);
2464 kvm_x86_ops->skip_emulated_instruction(vcpu);
2465}
2466EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
d0752060 2467
b6c7a5dc
HB
2468/*
2469 * Check if userspace requested an interrupt window, and that the
2470 * interrupt window is open.
2471 *
2472 * No need to exit to userspace if we already have an interrupt queued.
2473 */
2474static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
2475 struct kvm_run *kvm_run)
2476{
ad312c7c 2477 return (!vcpu->arch.irq_summary &&
b6c7a5dc 2478 kvm_run->request_interrupt_window &&
ad312c7c 2479 vcpu->arch.interrupt_window_open &&
b6c7a5dc
HB
2480 (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF));
2481}
2482
2483static void post_kvm_run_save(struct kvm_vcpu *vcpu,
2484 struct kvm_run *kvm_run)
2485{
2486 kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
2487 kvm_run->cr8 = get_cr8(vcpu);
2488 kvm_run->apic_base = kvm_get_apic_base(vcpu);
2489 if (irqchip_in_kernel(vcpu->kvm))
2490 kvm_run->ready_for_interrupt_injection = 1;
2491 else
2492 kvm_run->ready_for_interrupt_injection =
ad312c7c
ZX
2493 (vcpu->arch.interrupt_window_open &&
2494 vcpu->arch.irq_summary == 0);
b6c7a5dc
HB
2495}
2496
b93463aa
AK
2497static void vapic_enter(struct kvm_vcpu *vcpu)
2498{
2499 struct kvm_lapic *apic = vcpu->arch.apic;
2500 struct page *page;
2501
2502 if (!apic || !apic->vapic_addr)
2503 return;
2504
10589a46 2505 down_read(&current->mm->mmap_sem);
b93463aa 2506 page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
10589a46 2507 up_read(&current->mm->mmap_sem);
72dc67a6
IE
2508
2509 vcpu->arch.apic->vapic_page = page;
b93463aa
AK
2510}
2511
2512static void vapic_exit(struct kvm_vcpu *vcpu)
2513{
2514 struct kvm_lapic *apic = vcpu->arch.apic;
2515
2516 if (!apic || !apic->vapic_addr)
2517 return;
2518
2519 kvm_release_page_dirty(apic->vapic_page);
2520 mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
2521}
2522
b6c7a5dc
HB
2523static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2524{
2525 int r;
2526
ad312c7c 2527 if (unlikely(vcpu->arch.mp_state == VCPU_MP_STATE_SIPI_RECEIVED)) {
b6c7a5dc 2528 pr_debug("vcpu %d received sipi with vector # %x\n",
ad312c7c 2529 vcpu->vcpu_id, vcpu->arch.sipi_vector);
b6c7a5dc
HB
2530 kvm_lapic_reset(vcpu);
2531 r = kvm_x86_ops->vcpu_reset(vcpu);
2532 if (r)
2533 return r;
ad312c7c 2534 vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE;
b6c7a5dc
HB
2535 }
2536
b93463aa
AK
2537 vapic_enter(vcpu);
2538
b6c7a5dc
HB
2539preempted:
2540 if (vcpu->guest_debug.enabled)
2541 kvm_x86_ops->guest_debug_pre(vcpu);
2542
2543again:
2544 r = kvm_mmu_reload(vcpu);
2545 if (unlikely(r))
2546 goto out;
2547
2f52d58c
AK
2548 if (vcpu->requests) {
2549 if (test_and_clear_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests))
2550 __kvm_migrate_apic_timer(vcpu);
b93463aa
AK
2551 if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS,
2552 &vcpu->requests)) {
2553 kvm_run->exit_reason = KVM_EXIT_TPR_ACCESS;
2554 r = 0;
2555 goto out;
2556 }
2f52d58c 2557 }
b93463aa 2558
b6c7a5dc
HB
2559 kvm_inject_pending_timer_irqs(vcpu);
2560
2561 preempt_disable();
2562
2563 kvm_x86_ops->prepare_guest_switch(vcpu);
2564 kvm_load_guest_fpu(vcpu);
2565
2566 local_irq_disable();
2567
6c142801
AK
2568 if (need_resched()) {
2569 local_irq_enable();
2570 preempt_enable();
2571 r = 1;
2572 goto out;
2573 }
2574
b6c7a5dc
HB
2575 if (signal_pending(current)) {
2576 local_irq_enable();
2577 preempt_enable();
2578 r = -EINTR;
2579 kvm_run->exit_reason = KVM_EXIT_INTR;
2580 ++vcpu->stat.signal_exits;
2581 goto out;
2582 }
2583
ad312c7c 2584 if (vcpu->arch.exception.pending)
298101da
AK
2585 __queue_exception(vcpu);
2586 else if (irqchip_in_kernel(vcpu->kvm))
b6c7a5dc 2587 kvm_x86_ops->inject_pending_irq(vcpu);
eb9774f0 2588 else
b6c7a5dc
HB
2589 kvm_x86_ops->inject_pending_vectors(vcpu, kvm_run);
2590
b93463aa
AK
2591 kvm_lapic_sync_to_vapic(vcpu);
2592
b6c7a5dc
HB
2593 vcpu->guest_mode = 1;
2594 kvm_guest_enter();
2595
2596 if (vcpu->requests)
2597 if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
2598 kvm_x86_ops->tlb_flush(vcpu);
2599
2600 kvm_x86_ops->run(vcpu, kvm_run);
2601
2602 vcpu->guest_mode = 0;
2603 local_irq_enable();
2604
2605 ++vcpu->stat.exits;
2606
2607 /*
2608 * We must have an instruction between local_irq_enable() and
2609 * kvm_guest_exit(), so the timer interrupt isn't delayed by
2610 * the interrupt shadow. The stat.exits increment will do nicely.
2611 * But we need to prevent reordering, hence this barrier():
2612 */
2613 barrier();
2614
2615 kvm_guest_exit();
2616
2617 preempt_enable();
2618
2619 /*
2620 * Profile KVM exit RIPs:
2621 */
2622 if (unlikely(prof_on == KVM_PROFILING)) {
2623 kvm_x86_ops->cache_regs(vcpu);
ad312c7c 2624 profile_hit(KVM_PROFILING, (void *)vcpu->arch.rip);
b6c7a5dc
HB
2625 }
2626
ad312c7c
ZX
2627 if (vcpu->arch.exception.pending && kvm_x86_ops->exception_injected(vcpu))
2628 vcpu->arch.exception.pending = false;
298101da 2629
b93463aa
AK
2630 kvm_lapic_sync_from_vapic(vcpu);
2631
b6c7a5dc
HB
2632 r = kvm_x86_ops->handle_exit(kvm_run, vcpu);
2633
2634 if (r > 0) {
2635 if (dm_request_for_irq_injection(vcpu, kvm_run)) {
2636 r = -EINTR;
2637 kvm_run->exit_reason = KVM_EXIT_INTR;
2638 ++vcpu->stat.request_irq_exits;
2639 goto out;
2640 }
e1beb1d3 2641 if (!need_resched())
b6c7a5dc 2642 goto again;
b6c7a5dc
HB
2643 }
2644
2645out:
2646 if (r > 0) {
2647 kvm_resched(vcpu);
2648 goto preempted;
2649 }
2650
2651 post_kvm_run_save(vcpu, kvm_run);
2652
b93463aa
AK
2653 vapic_exit(vcpu);
2654
b6c7a5dc
HB
2655 return r;
2656}
2657
2658int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2659{
2660 int r;
2661 sigset_t sigsaved;
2662
2663 vcpu_load(vcpu);
2664
ad312c7c 2665 if (unlikely(vcpu->arch.mp_state == VCPU_MP_STATE_UNINITIALIZED)) {
b6c7a5dc
HB
2666 kvm_vcpu_block(vcpu);
2667 vcpu_put(vcpu);
2668 return -EAGAIN;
2669 }
2670
2671 if (vcpu->sigset_active)
2672 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
2673
2674 /* re-sync apic's tpr */
2675 if (!irqchip_in_kernel(vcpu->kvm))
2676 set_cr8(vcpu, kvm_run->cr8);
2677
ad312c7c 2678 if (vcpu->arch.pio.cur_count) {
b6c7a5dc
HB
2679 r = complete_pio(vcpu);
2680 if (r)
2681 goto out;
2682 }
2683#if CONFIG_HAS_IOMEM
2684 if (vcpu->mmio_needed) {
2685 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
2686 vcpu->mmio_read_completed = 1;
2687 vcpu->mmio_needed = 0;
2688 r = emulate_instruction(vcpu, kvm_run,
571008da
SY
2689 vcpu->arch.mmio_fault_cr2, 0,
2690 EMULTYPE_NO_DECODE);
b6c7a5dc
HB
2691 if (r == EMULATE_DO_MMIO) {
2692 /*
2693 * Read-modify-write. Back to userspace.
2694 */
2695 r = 0;
2696 goto out;
2697 }
2698 }
2699#endif
2700 if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL) {
2701 kvm_x86_ops->cache_regs(vcpu);
ad312c7c 2702 vcpu->arch.regs[VCPU_REGS_RAX] = kvm_run->hypercall.ret;
b6c7a5dc
HB
2703 kvm_x86_ops->decache_regs(vcpu);
2704 }
2705
2706 r = __vcpu_run(vcpu, kvm_run);
2707
2708out:
2709 if (vcpu->sigset_active)
2710 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
2711
2712 vcpu_put(vcpu);
2713 return r;
2714}
2715
2716int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2717{
2718 vcpu_load(vcpu);
2719
2720 kvm_x86_ops->cache_regs(vcpu);
2721
ad312c7c
ZX
2722 regs->rax = vcpu->arch.regs[VCPU_REGS_RAX];
2723 regs->rbx = vcpu->arch.regs[VCPU_REGS_RBX];
2724 regs->rcx = vcpu->arch.regs[VCPU_REGS_RCX];
2725 regs->rdx = vcpu->arch.regs[VCPU_REGS_RDX];
2726 regs->rsi = vcpu->arch.regs[VCPU_REGS_RSI];
2727 regs->rdi = vcpu->arch.regs[VCPU_REGS_RDI];
2728 regs->rsp = vcpu->arch.regs[VCPU_REGS_RSP];
2729 regs->rbp = vcpu->arch.regs[VCPU_REGS_RBP];
b6c7a5dc 2730#ifdef CONFIG_X86_64
ad312c7c
ZX
2731 regs->r8 = vcpu->arch.regs[VCPU_REGS_R8];
2732 regs->r9 = vcpu->arch.regs[VCPU_REGS_R9];
2733 regs->r10 = vcpu->arch.regs[VCPU_REGS_R10];
2734 regs->r11 = vcpu->arch.regs[VCPU_REGS_R11];
2735 regs->r12 = vcpu->arch.regs[VCPU_REGS_R12];
2736 regs->r13 = vcpu->arch.regs[VCPU_REGS_R13];
2737 regs->r14 = vcpu->arch.regs[VCPU_REGS_R14];
2738 regs->r15 = vcpu->arch.regs[VCPU_REGS_R15];
b6c7a5dc
HB
2739#endif
2740
ad312c7c 2741 regs->rip = vcpu->arch.rip;
b6c7a5dc
HB
2742 regs->rflags = kvm_x86_ops->get_rflags(vcpu);
2743
2744 /*
2745 * Don't leak debug flags in case they were set for guest debugging
2746 */
2747 if (vcpu->guest_debug.enabled && vcpu->guest_debug.singlestep)
2748 regs->rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
2749
2750 vcpu_put(vcpu);
2751
2752 return 0;
2753}
2754
2755int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2756{
2757 vcpu_load(vcpu);
2758
ad312c7c
ZX
2759 vcpu->arch.regs[VCPU_REGS_RAX] = regs->rax;
2760 vcpu->arch.regs[VCPU_REGS_RBX] = regs->rbx;
2761 vcpu->arch.regs[VCPU_REGS_RCX] = regs->rcx;
2762 vcpu->arch.regs[VCPU_REGS_RDX] = regs->rdx;
2763 vcpu->arch.regs[VCPU_REGS_RSI] = regs->rsi;
2764 vcpu->arch.regs[VCPU_REGS_RDI] = regs->rdi;
2765 vcpu->arch.regs[VCPU_REGS_RSP] = regs->rsp;
2766 vcpu->arch.regs[VCPU_REGS_RBP] = regs->rbp;
b6c7a5dc 2767#ifdef CONFIG_X86_64
ad312c7c
ZX
2768 vcpu->arch.regs[VCPU_REGS_R8] = regs->r8;
2769 vcpu->arch.regs[VCPU_REGS_R9] = regs->r9;
2770 vcpu->arch.regs[VCPU_REGS_R10] = regs->r10;
2771 vcpu->arch.regs[VCPU_REGS_R11] = regs->r11;
2772 vcpu->arch.regs[VCPU_REGS_R12] = regs->r12;
2773 vcpu->arch.regs[VCPU_REGS_R13] = regs->r13;
2774 vcpu->arch.regs[VCPU_REGS_R14] = regs->r14;
2775 vcpu->arch.regs[VCPU_REGS_R15] = regs->r15;
b6c7a5dc
HB
2776#endif
2777
ad312c7c 2778 vcpu->arch.rip = regs->rip;
b6c7a5dc
HB
2779 kvm_x86_ops->set_rflags(vcpu, regs->rflags);
2780
2781 kvm_x86_ops->decache_regs(vcpu);
2782
2783 vcpu_put(vcpu);
2784
2785 return 0;
2786}
2787
2788static void get_segment(struct kvm_vcpu *vcpu,
2789 struct kvm_segment *var, int seg)
2790{
2791 return kvm_x86_ops->get_segment(vcpu, var, seg);
2792}
2793
2794void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
2795{
2796 struct kvm_segment cs;
2797
2798 get_segment(vcpu, &cs, VCPU_SREG_CS);
2799 *db = cs.db;
2800 *l = cs.l;
2801}
2802EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
2803
2804int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
2805 struct kvm_sregs *sregs)
2806{
2807 struct descriptor_table dt;
2808 int pending_vec;
2809
2810 vcpu_load(vcpu);
2811
2812 get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
2813 get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
2814 get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
2815 get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
2816 get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
2817 get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
2818
2819 get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
2820 get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
2821
2822 kvm_x86_ops->get_idt(vcpu, &dt);
2823 sregs->idt.limit = dt.limit;
2824 sregs->idt.base = dt.base;
2825 kvm_x86_ops->get_gdt(vcpu, &dt);
2826 sregs->gdt.limit = dt.limit;
2827 sregs->gdt.base = dt.base;
2828
2829 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
ad312c7c
ZX
2830 sregs->cr0 = vcpu->arch.cr0;
2831 sregs->cr2 = vcpu->arch.cr2;
2832 sregs->cr3 = vcpu->arch.cr3;
2833 sregs->cr4 = vcpu->arch.cr4;
b6c7a5dc 2834 sregs->cr8 = get_cr8(vcpu);
ad312c7c 2835 sregs->efer = vcpu->arch.shadow_efer;
b6c7a5dc
HB
2836 sregs->apic_base = kvm_get_apic_base(vcpu);
2837
2838 if (irqchip_in_kernel(vcpu->kvm)) {
2839 memset(sregs->interrupt_bitmap, 0,
2840 sizeof sregs->interrupt_bitmap);
2841 pending_vec = kvm_x86_ops->get_irq(vcpu);
2842 if (pending_vec >= 0)
2843 set_bit(pending_vec,
2844 (unsigned long *)sregs->interrupt_bitmap);
2845 } else
ad312c7c 2846 memcpy(sregs->interrupt_bitmap, vcpu->arch.irq_pending,
b6c7a5dc
HB
2847 sizeof sregs->interrupt_bitmap);
2848
2849 vcpu_put(vcpu);
2850
2851 return 0;
2852}
2853
2854static void set_segment(struct kvm_vcpu *vcpu,
2855 struct kvm_segment *var, int seg)
2856{
2857 return kvm_x86_ops->set_segment(vcpu, var, seg);
2858}
2859
2860int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2861 struct kvm_sregs *sregs)
2862{
2863 int mmu_reset_needed = 0;
2864 int i, pending_vec, max_bits;
2865 struct descriptor_table dt;
2866
2867 vcpu_load(vcpu);
2868
2869 dt.limit = sregs->idt.limit;
2870 dt.base = sregs->idt.base;
2871 kvm_x86_ops->set_idt(vcpu, &dt);
2872 dt.limit = sregs->gdt.limit;
2873 dt.base = sregs->gdt.base;
2874 kvm_x86_ops->set_gdt(vcpu, &dt);
2875
ad312c7c
ZX
2876 vcpu->arch.cr2 = sregs->cr2;
2877 mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3;
2878 vcpu->arch.cr3 = sregs->cr3;
b6c7a5dc
HB
2879
2880 set_cr8(vcpu, sregs->cr8);
2881
ad312c7c 2882 mmu_reset_needed |= vcpu->arch.shadow_efer != sregs->efer;
b6c7a5dc
HB
2883#ifdef CONFIG_X86_64
2884 kvm_x86_ops->set_efer(vcpu, sregs->efer);
2885#endif
2886 kvm_set_apic_base(vcpu, sregs->apic_base);
2887
2888 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
2889
ad312c7c 2890 mmu_reset_needed |= vcpu->arch.cr0 != sregs->cr0;
b6c7a5dc 2891 kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
d7306163 2892 vcpu->arch.cr0 = sregs->cr0;
b6c7a5dc 2893
ad312c7c 2894 mmu_reset_needed |= vcpu->arch.cr4 != sregs->cr4;
b6c7a5dc
HB
2895 kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
2896 if (!is_long_mode(vcpu) && is_pae(vcpu))
ad312c7c 2897 load_pdptrs(vcpu, vcpu->arch.cr3);
b6c7a5dc
HB
2898
2899 if (mmu_reset_needed)
2900 kvm_mmu_reset_context(vcpu);
2901
2902 if (!irqchip_in_kernel(vcpu->kvm)) {
ad312c7c
ZX
2903 memcpy(vcpu->arch.irq_pending, sregs->interrupt_bitmap,
2904 sizeof vcpu->arch.irq_pending);
2905 vcpu->arch.irq_summary = 0;
2906 for (i = 0; i < ARRAY_SIZE(vcpu->arch.irq_pending); ++i)
2907 if (vcpu->arch.irq_pending[i])
2908 __set_bit(i, &vcpu->arch.irq_summary);
b6c7a5dc
HB
2909 } else {
2910 max_bits = (sizeof sregs->interrupt_bitmap) << 3;
2911 pending_vec = find_first_bit(
2912 (const unsigned long *)sregs->interrupt_bitmap,
2913 max_bits);
2914 /* Only pending external irq is handled here */
2915 if (pending_vec < max_bits) {
2916 kvm_x86_ops->set_irq(vcpu, pending_vec);
2917 pr_debug("Set back pending irq %d\n",
2918 pending_vec);
2919 }
2920 }
2921
2922 set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
2923 set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
2924 set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
2925 set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
2926 set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
2927 set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
2928
2929 set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
2930 set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
2931
2932 vcpu_put(vcpu);
2933
2934 return 0;
2935}
2936
2937int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
2938 struct kvm_debug_guest *dbg)
2939{
2940 int r;
2941
2942 vcpu_load(vcpu);
2943
2944 r = kvm_x86_ops->set_guest_debug(vcpu, dbg);
2945
2946 vcpu_put(vcpu);
2947
2948 return r;
2949}
2950
d0752060
HB
2951/*
2952 * fxsave fpu state. Taken from x86_64/processor.h. To be killed when
2953 * we have asm/x86/processor.h
2954 */
2955struct fxsave {
2956 u16 cwd;
2957 u16 swd;
2958 u16 twd;
2959 u16 fop;
2960 u64 rip;
2961 u64 rdp;
2962 u32 mxcsr;
2963 u32 mxcsr_mask;
2964 u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
2965#ifdef CONFIG_X86_64
2966 u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
2967#else
2968 u32 xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
2969#endif
2970};
2971
8b006791
ZX
2972/*
2973 * Translate a guest virtual address to a guest physical address.
2974 */
2975int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
2976 struct kvm_translation *tr)
2977{
2978 unsigned long vaddr = tr->linear_address;
2979 gpa_t gpa;
2980
2981 vcpu_load(vcpu);
72dc67a6 2982 down_read(&vcpu->kvm->slots_lock);
ad312c7c 2983 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vaddr);
72dc67a6 2984 up_read(&vcpu->kvm->slots_lock);
8b006791
ZX
2985 tr->physical_address = gpa;
2986 tr->valid = gpa != UNMAPPED_GVA;
2987 tr->writeable = 1;
2988 tr->usermode = 0;
8b006791
ZX
2989 vcpu_put(vcpu);
2990
2991 return 0;
2992}
2993
d0752060
HB
2994int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2995{
ad312c7c 2996 struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
d0752060
HB
2997
2998 vcpu_load(vcpu);
2999
3000 memcpy(fpu->fpr, fxsave->st_space, 128);
3001 fpu->fcw = fxsave->cwd;
3002 fpu->fsw = fxsave->swd;
3003 fpu->ftwx = fxsave->twd;
3004 fpu->last_opcode = fxsave->fop;
3005 fpu->last_ip = fxsave->rip;
3006 fpu->last_dp = fxsave->rdp;
3007 memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
3008
3009 vcpu_put(vcpu);
3010
3011 return 0;
3012}
3013
3014int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3015{
ad312c7c 3016 struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
d0752060
HB
3017
3018 vcpu_load(vcpu);
3019
3020 memcpy(fxsave->st_space, fpu->fpr, 128);
3021 fxsave->cwd = fpu->fcw;
3022 fxsave->swd = fpu->fsw;
3023 fxsave->twd = fpu->ftwx;
3024 fxsave->fop = fpu->last_opcode;
3025 fxsave->rip = fpu->last_ip;
3026 fxsave->rdp = fpu->last_dp;
3027 memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
3028
3029 vcpu_put(vcpu);
3030
3031 return 0;
3032}
3033
3034void fx_init(struct kvm_vcpu *vcpu)
3035{
3036 unsigned after_mxcsr_mask;
3037
3038 /* Initialize guest FPU by resetting ours and saving into guest's */
3039 preempt_disable();
ad312c7c 3040 fx_save(&vcpu->arch.host_fx_image);
d0752060 3041 fpu_init();
ad312c7c
ZX
3042 fx_save(&vcpu->arch.guest_fx_image);
3043 fx_restore(&vcpu->arch.host_fx_image);
d0752060
HB
3044 preempt_enable();
3045
ad312c7c 3046 vcpu->arch.cr0 |= X86_CR0_ET;
d0752060 3047 after_mxcsr_mask = offsetof(struct i387_fxsave_struct, st_space);
ad312c7c
ZX
3048 vcpu->arch.guest_fx_image.mxcsr = 0x1f80;
3049 memset((void *)&vcpu->arch.guest_fx_image + after_mxcsr_mask,
d0752060
HB
3050 0, sizeof(struct i387_fxsave_struct) - after_mxcsr_mask);
3051}
3052EXPORT_SYMBOL_GPL(fx_init);
3053
3054void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
3055{
3056 if (!vcpu->fpu_active || vcpu->guest_fpu_loaded)
3057 return;
3058
3059 vcpu->guest_fpu_loaded = 1;
ad312c7c
ZX
3060 fx_save(&vcpu->arch.host_fx_image);
3061 fx_restore(&vcpu->arch.guest_fx_image);
d0752060
HB
3062}
3063EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
3064
3065void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
3066{
3067 if (!vcpu->guest_fpu_loaded)
3068 return;
3069
3070 vcpu->guest_fpu_loaded = 0;
ad312c7c
ZX
3071 fx_save(&vcpu->arch.guest_fx_image);
3072 fx_restore(&vcpu->arch.host_fx_image);
f096ed85 3073 ++vcpu->stat.fpu_reload;
d0752060
HB
3074}
3075EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
e9b11c17
ZX
3076
3077void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
3078{
3079 kvm_x86_ops->vcpu_free(vcpu);
3080}
3081
3082struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
3083 unsigned int id)
3084{
26e5215f
AK
3085 return kvm_x86_ops->vcpu_create(kvm, id);
3086}
e9b11c17 3087
26e5215f
AK
3088int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
3089{
3090 int r;
e9b11c17
ZX
3091
3092 /* We do fxsave: this must be aligned. */
ad312c7c 3093 BUG_ON((unsigned long)&vcpu->arch.host_fx_image & 0xF);
e9b11c17
ZX
3094
3095 vcpu_load(vcpu);
3096 r = kvm_arch_vcpu_reset(vcpu);
3097 if (r == 0)
3098 r = kvm_mmu_setup(vcpu);
3099 vcpu_put(vcpu);
3100 if (r < 0)
3101 goto free_vcpu;
3102
26e5215f 3103 return 0;
e9b11c17
ZX
3104free_vcpu:
3105 kvm_x86_ops->vcpu_free(vcpu);
26e5215f 3106 return r;
e9b11c17
ZX
3107}
3108
d40ccc62 3109void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
e9b11c17
ZX
3110{
3111 vcpu_load(vcpu);
3112 kvm_mmu_unload(vcpu);
3113 vcpu_put(vcpu);
3114
3115 kvm_x86_ops->vcpu_free(vcpu);
3116}
3117
3118int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
3119{
3120 return kvm_x86_ops->vcpu_reset(vcpu);
3121}
3122
3123void kvm_arch_hardware_enable(void *garbage)
3124{
3125 kvm_x86_ops->hardware_enable(garbage);
3126}
3127
3128void kvm_arch_hardware_disable(void *garbage)
3129{
3130 kvm_x86_ops->hardware_disable(garbage);
3131}
3132
3133int kvm_arch_hardware_setup(void)
3134{
3135 return kvm_x86_ops->hardware_setup();
3136}
3137
3138void kvm_arch_hardware_unsetup(void)
3139{
3140 kvm_x86_ops->hardware_unsetup();
3141}
3142
3143void kvm_arch_check_processor_compat(void *rtn)
3144{
3145 kvm_x86_ops->check_processor_compatibility(rtn);
3146}
3147
3148int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
3149{
3150 struct page *page;
3151 struct kvm *kvm;
3152 int r;
3153
3154 BUG_ON(vcpu->kvm == NULL);
3155 kvm = vcpu->kvm;
3156
ad312c7c 3157 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
e9b11c17 3158 if (!irqchip_in_kernel(kvm) || vcpu->vcpu_id == 0)
ad312c7c 3159 vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE;
e9b11c17 3160 else
ad312c7c 3161 vcpu->arch.mp_state = VCPU_MP_STATE_UNINITIALIZED;
e9b11c17
ZX
3162
3163 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
3164 if (!page) {
3165 r = -ENOMEM;
3166 goto fail;
3167 }
ad312c7c 3168 vcpu->arch.pio_data = page_address(page);
e9b11c17
ZX
3169
3170 r = kvm_mmu_create(vcpu);
3171 if (r < 0)
3172 goto fail_free_pio_data;
3173
3174 if (irqchip_in_kernel(kvm)) {
3175 r = kvm_create_lapic(vcpu);
3176 if (r < 0)
3177 goto fail_mmu_destroy;
3178 }
3179
3180 return 0;
3181
3182fail_mmu_destroy:
3183 kvm_mmu_destroy(vcpu);
3184fail_free_pio_data:
ad312c7c 3185 free_page((unsigned long)vcpu->arch.pio_data);
e9b11c17
ZX
3186fail:
3187 return r;
3188}
3189
3190void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
3191{
3192 kvm_free_lapic(vcpu);
3193 kvm_mmu_destroy(vcpu);
ad312c7c 3194 free_page((unsigned long)vcpu->arch.pio_data);
e9b11c17 3195}
d19a9cd2
ZX
3196
3197struct kvm *kvm_arch_create_vm(void)
3198{
3199 struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
3200
3201 if (!kvm)
3202 return ERR_PTR(-ENOMEM);
3203
f05e70ac 3204 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
d19a9cd2
ZX
3205
3206 return kvm;
3207}
3208
3209static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
3210{
3211 vcpu_load(vcpu);
3212 kvm_mmu_unload(vcpu);
3213 vcpu_put(vcpu);
3214}
3215
3216static void kvm_free_vcpus(struct kvm *kvm)
3217{
3218 unsigned int i;
3219
3220 /*
3221 * Unpin any mmu pages first.
3222 */
3223 for (i = 0; i < KVM_MAX_VCPUS; ++i)
3224 if (kvm->vcpus[i])
3225 kvm_unload_vcpu_mmu(kvm->vcpus[i]);
3226 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
3227 if (kvm->vcpus[i]) {
3228 kvm_arch_vcpu_free(kvm->vcpus[i]);
3229 kvm->vcpus[i] = NULL;
3230 }
3231 }
3232
3233}
3234
3235void kvm_arch_destroy_vm(struct kvm *kvm)
3236{
d7deeeb0
ZX
3237 kfree(kvm->arch.vpic);
3238 kfree(kvm->arch.vioapic);
d19a9cd2
ZX
3239 kvm_free_vcpus(kvm);
3240 kvm_free_physmem(kvm);
3241 kfree(kvm);
3242}
0de10343
ZX
3243
3244int kvm_arch_set_memory_region(struct kvm *kvm,
3245 struct kvm_userspace_memory_region *mem,
3246 struct kvm_memory_slot old,
3247 int user_alloc)
3248{
3249 int npages = mem->memory_size >> PAGE_SHIFT;
3250 struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
3251
3252 /*To keep backward compatibility with older userspace,
3253 *x86 needs to hanlde !user_alloc case.
3254 */
3255 if (!user_alloc) {
3256 if (npages && !old.rmap) {
72dc67a6 3257 down_write(&current->mm->mmap_sem);
0de10343
ZX
3258 memslot->userspace_addr = do_mmap(NULL, 0,
3259 npages * PAGE_SIZE,
3260 PROT_READ | PROT_WRITE,
3261 MAP_SHARED | MAP_ANONYMOUS,
3262 0);
72dc67a6 3263 up_write(&current->mm->mmap_sem);
0de10343
ZX
3264
3265 if (IS_ERR((void *)memslot->userspace_addr))
3266 return PTR_ERR((void *)memslot->userspace_addr);
3267 } else {
3268 if (!old.user_alloc && old.rmap) {
3269 int ret;
3270
72dc67a6 3271 down_write(&current->mm->mmap_sem);
0de10343
ZX
3272 ret = do_munmap(current->mm, old.userspace_addr,
3273 old.npages * PAGE_SIZE);
72dc67a6 3274 up_write(&current->mm->mmap_sem);
0de10343
ZX
3275 if (ret < 0)
3276 printk(KERN_WARNING
3277 "kvm_vm_ioctl_set_memory_region: "
3278 "failed to munmap memory\n");
3279 }
3280 }
3281 }
3282
f05e70ac 3283 if (!kvm->arch.n_requested_mmu_pages) {
0de10343
ZX
3284 unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
3285 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
3286 }
3287
3288 kvm_mmu_slot_remove_write_access(kvm, mem->slot);
3289 kvm_flush_remote_tlbs(kvm);
3290
3291 return 0;
3292}
1d737c8a
ZX
3293
3294int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
3295{
3296 return vcpu->arch.mp_state == VCPU_MP_STATE_RUNNABLE
3297 || vcpu->arch.mp_state == VCPU_MP_STATE_SIPI_RECEIVED;
3298}
5736199a
ZX
3299
3300static void vcpu_kick_intr(void *info)
3301{
3302#ifdef DEBUG
3303 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)info;
3304 printk(KERN_DEBUG "vcpu_kick_intr %p \n", vcpu);
3305#endif
3306}
3307
3308void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
3309{
3310 int ipi_pcpu = vcpu->cpu;
3311
3312 if (waitqueue_active(&vcpu->wq)) {
3313 wake_up_interruptible(&vcpu->wq);
3314 ++vcpu->stat.halt_wakeup;
3315 }
3316 if (vcpu->guest_mode)
3317 smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0, 0);
3318}
This page took 0.358142 seconds and 5 git commands to generate.