2 * Kernel-based Virtual Machine driver for Linux
4 * This header defines architecture specific interfaces, x86 version
6 * This work is licensed under the terms of the GNU GPL, version 2. See
7 * the COPYING file in the top-level directory.
11 #ifndef ASM_KVM_HOST_H
12 #define ASM_KVM_HOST_H
14 #include <linux/types.h>
17 #include <linux/kvm.h>
18 #include <linux/kvm_para.h>
19 #include <linux/kvm_types.h>
23 #define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1)
24 #define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD))
25 #define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \
26 0xFFFFFF0000000000ULL)
28 #define KVM_GUEST_CR0_MASK \
29 (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE \
30 | X86_CR0_NW | X86_CR0_CD)
31 #define KVM_VM_CR0_ALWAYS_ON \
32 (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE | X86_CR0_TS \
34 #define KVM_GUEST_CR4_MASK \
35 (X86_CR4_VME | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_VMXE)
36 #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
37 #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
39 #define INVALID_PAGE (~(hpa_t)0)
40 #define UNMAPPED_GVA (~(gpa_t)0)
42 /* shadow tables are PAE even on non-PAE hosts */
43 #define KVM_HPAGE_SHIFT 21
44 #define KVM_HPAGE_SIZE (1UL << KVM_HPAGE_SHIFT)
45 #define KVM_HPAGE_MASK (~(KVM_HPAGE_SIZE - 1))
47 #define KVM_PAGES_PER_HPAGE (KVM_HPAGE_SIZE / PAGE_SIZE)
59 #define SELECTOR_TI_MASK (1 << 2)
60 #define SELECTOR_RPL_MASK 0x03
64 #define KVM_ALIAS_SLOTS 4
66 #define KVM_PERMILLE_MMU_PAGES 20
67 #define KVM_MIN_ALLOC_MMU_PAGES 64
68 #define KVM_MMU_HASH_SHIFT 10
69 #define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
70 #define KVM_MIN_FREE_MMU_PAGES 5
71 #define KVM_REFILL_PAGES 25
72 #define KVM_MAX_CPUID_ENTRIES 40
74 extern spinlock_t kvm_lock
;
75 extern struct list_head vm_list
;
113 #include <asm/kvm_x86_emulate.h>
115 #define KVM_NR_MEM_OBJS 40
118 * We don't want allocation failures within the mmu code, so we preallocate
119 * enough memory for a single page fault in a cache.
121 struct kvm_mmu_memory_cache
{
123 void *objects
[KVM_NR_MEM_OBJS
];
126 #define NR_PTE_CHAIN_ENTRIES 5
128 struct kvm_pte_chain
{
129 u64
*parent_ptes
[NR_PTE_CHAIN_ENTRIES
];
130 struct hlist_node link
;
134 * kvm_mmu_page_role, below, is defined as:
136 * bits 0:3 - total guest paging levels (2-4, or zero for real mode)
137 * bits 4:7 - page table level for this shadow (1-4)
138 * bits 8:9 - page table quadrant for 2-level guests
139 * bit 16 - "metaphysical" - gfn is not a real page (huge page/real mode)
140 * bits 17:19 - common access permissions for all ptes in this shadow page
142 union kvm_mmu_page_role
{
148 unsigned pad_for_nice_hex_output
:6;
149 unsigned metaphysical
:1;
155 struct kvm_mmu_page
{
156 struct list_head link
;
157 struct hlist_node hash_link
;
160 * The following two entries are used to key the shadow page in the
164 union kvm_mmu_page_role role
;
167 /* hold the gfn of each spte inside spt */
169 unsigned long slot_bitmap
; /* One bit set per slot which has memory
170 * in this shadow page.
172 int multimapped
; /* More than one parent_pte? */
173 int root_count
; /* Currently serving as active root */
175 u64
*parent_pte
; /* !multimapped */
176 struct hlist_head parent_ptes
; /* multimapped, kvm_pte_chain */
181 * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level
182 * 32-bit). The kvm_mmu structure abstracts the details of the current mmu
186 void (*new_cr3
)(struct kvm_vcpu
*vcpu
);
187 int (*page_fault
)(struct kvm_vcpu
*vcpu
, gva_t gva
, u32 err
);
188 void (*free
)(struct kvm_vcpu
*vcpu
);
189 gpa_t (*gva_to_gpa
)(struct kvm_vcpu
*vcpu
, gva_t gva
);
190 void (*prefetch_page
)(struct kvm_vcpu
*vcpu
,
191 struct kvm_mmu_page
*page
);
194 int shadow_root_level
;
199 struct kvm_vcpu_arch
{
201 int interrupt_window_open
;
202 unsigned long irq_summary
; /* bit vector: 1 per word in irq_pending */
203 DECLARE_BITMAP(irq_pending
, KVM_NR_INTERRUPTS
);
204 unsigned long regs
[NR_VCPU_REGS
]; /* for rsp: vcpu_load_rsp_rip() */
205 unsigned long rip
; /* needs vcpu_load_rsp_rip() */
212 u64 pdptrs
[4]; /* pae */
215 struct kvm_lapic
*apic
; /* kernel irqchip context */
216 #define VCPU_MP_STATE_RUNNABLE 0
217 #define VCPU_MP_STATE_UNINITIALIZED 1
218 #define VCPU_MP_STATE_INIT_RECEIVED 2
219 #define VCPU_MP_STATE_SIPI_RECEIVED 3
220 #define VCPU_MP_STATE_HALTED 4
223 u64 ia32_misc_enable_msr
;
224 bool tpr_access_reporting
;
228 struct kvm_mmu_memory_cache mmu_pte_chain_cache
;
229 struct kvm_mmu_memory_cache mmu_rmap_desc_cache
;
230 struct kvm_mmu_memory_cache mmu_page_cache
;
231 struct kvm_mmu_memory_cache mmu_page_header_cache
;
233 gfn_t last_pt_write_gfn
;
234 int last_pt_write_count
;
235 u64
*last_pte_updated
;
238 gfn_t gfn
; /* presumed gfn during guest pte update */
239 struct page
*page
; /* page corresponding to that gfn */
243 struct i387_fxsave_struct host_fx_image
;
244 struct i387_fxsave_struct guest_fx_image
;
246 gva_t mmio_fault_cr2
;
247 struct kvm_pio_request pio
;
250 struct kvm_queued_exception
{
260 struct kvm_save_segment
{
265 } tr
, es
, ds
, fs
, gs
;
267 int halt_request
; /* real mode on Intel only */
270 struct kvm_cpuid_entry2 cpuid_entries
[KVM_MAX_CPUID_ENTRIES
];
271 /* emulate context */
273 struct x86_emulate_ctxt emulate_ctxt
;
276 struct kvm_vcpu_time_info hv_clock
;
277 unsigned int time_offset
;
278 struct page
*time_page
;
281 struct kvm_mem_alias
{
283 unsigned long npages
;
289 struct kvm_mem_alias aliases
[KVM_ALIAS_SLOTS
];
291 unsigned int n_free_mmu_pages
;
292 unsigned int n_requested_mmu_pages
;
293 unsigned int n_alloc_mmu_pages
;
294 struct hlist_head mmu_page_hash
[KVM_NUM_MMU_PAGES
];
296 * Hash table of struct kvm_mmu_page.
298 struct list_head active_mmu_pages
;
299 struct kvm_pic
*vpic
;
300 struct kvm_ioapic
*vioapic
;
301 struct kvm_pit
*vpit
;
303 int round_robin_prev_vcpu
;
304 unsigned int tss_addr
;
305 struct page
*apic_access_page
;
311 u32 mmu_shadow_zapped
;
318 u32 remote_tlb_flush
;
322 struct kvm_vcpu_stat
{
332 u32 irq_window_exits
;
335 u32 request_irq_exits
;
337 u32 host_state_reload
;
341 u32 insn_emulation_fail
;
345 struct descriptor_table
{
348 } __attribute__((packed
));
351 int (*cpu_has_kvm_support
)(void); /* __init */
352 int (*disabled_by_bios
)(void); /* __init */
353 void (*hardware_enable
)(void *dummy
); /* __init */
354 void (*hardware_disable
)(void *dummy
);
355 void (*check_processor_compatibility
)(void *rtn
);
356 int (*hardware_setup
)(void); /* __init */
357 void (*hardware_unsetup
)(void); /* __exit */
358 bool (*cpu_has_accelerated_tpr
)(void);
360 /* Create, but do not attach this VCPU */
361 struct kvm_vcpu
*(*vcpu_create
)(struct kvm
*kvm
, unsigned id
);
362 void (*vcpu_free
)(struct kvm_vcpu
*vcpu
);
363 int (*vcpu_reset
)(struct kvm_vcpu
*vcpu
);
365 void (*prepare_guest_switch
)(struct kvm_vcpu
*vcpu
);
366 void (*vcpu_load
)(struct kvm_vcpu
*vcpu
, int cpu
);
367 void (*vcpu_put
)(struct kvm_vcpu
*vcpu
);
368 void (*vcpu_decache
)(struct kvm_vcpu
*vcpu
);
370 int (*set_guest_debug
)(struct kvm_vcpu
*vcpu
,
371 struct kvm_debug_guest
*dbg
);
372 void (*guest_debug_pre
)(struct kvm_vcpu
*vcpu
);
373 int (*get_msr
)(struct kvm_vcpu
*vcpu
, u32 msr_index
, u64
*pdata
);
374 int (*set_msr
)(struct kvm_vcpu
*vcpu
, u32 msr_index
, u64 data
);
375 u64 (*get_segment_base
)(struct kvm_vcpu
*vcpu
, int seg
);
376 void (*get_segment
)(struct kvm_vcpu
*vcpu
,
377 struct kvm_segment
*var
, int seg
);
378 void (*set_segment
)(struct kvm_vcpu
*vcpu
,
379 struct kvm_segment
*var
, int seg
);
380 void (*get_cs_db_l_bits
)(struct kvm_vcpu
*vcpu
, int *db
, int *l
);
381 void (*decache_cr4_guest_bits
)(struct kvm_vcpu
*vcpu
);
382 void (*set_cr0
)(struct kvm_vcpu
*vcpu
, unsigned long cr0
);
383 void (*set_cr3
)(struct kvm_vcpu
*vcpu
, unsigned long cr3
);
384 void (*set_cr4
)(struct kvm_vcpu
*vcpu
, unsigned long cr4
);
385 void (*set_efer
)(struct kvm_vcpu
*vcpu
, u64 efer
);
386 void (*get_idt
)(struct kvm_vcpu
*vcpu
, struct descriptor_table
*dt
);
387 void (*set_idt
)(struct kvm_vcpu
*vcpu
, struct descriptor_table
*dt
);
388 void (*get_gdt
)(struct kvm_vcpu
*vcpu
, struct descriptor_table
*dt
);
389 void (*set_gdt
)(struct kvm_vcpu
*vcpu
, struct descriptor_table
*dt
);
390 unsigned long (*get_dr
)(struct kvm_vcpu
*vcpu
, int dr
);
391 void (*set_dr
)(struct kvm_vcpu
*vcpu
, int dr
, unsigned long value
,
393 void (*cache_regs
)(struct kvm_vcpu
*vcpu
);
394 void (*decache_regs
)(struct kvm_vcpu
*vcpu
);
395 unsigned long (*get_rflags
)(struct kvm_vcpu
*vcpu
);
396 void (*set_rflags
)(struct kvm_vcpu
*vcpu
, unsigned long rflags
);
398 void (*tlb_flush
)(struct kvm_vcpu
*vcpu
);
400 void (*run
)(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
);
401 int (*handle_exit
)(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
);
402 void (*skip_emulated_instruction
)(struct kvm_vcpu
*vcpu
);
403 void (*patch_hypercall
)(struct kvm_vcpu
*vcpu
,
404 unsigned char *hypercall_addr
);
405 int (*get_irq
)(struct kvm_vcpu
*vcpu
);
406 void (*set_irq
)(struct kvm_vcpu
*vcpu
, int vec
);
407 void (*queue_exception
)(struct kvm_vcpu
*vcpu
, unsigned nr
,
408 bool has_error_code
, u32 error_code
);
409 bool (*exception_injected
)(struct kvm_vcpu
*vcpu
);
410 void (*inject_pending_irq
)(struct kvm_vcpu
*vcpu
);
411 void (*inject_pending_vectors
)(struct kvm_vcpu
*vcpu
,
412 struct kvm_run
*run
);
414 int (*set_tss_addr
)(struct kvm
*kvm
, unsigned int addr
);
417 extern struct kvm_x86_ops
*kvm_x86_ops
;
419 int kvm_mmu_module_init(void);
420 void kvm_mmu_module_exit(void);
422 void kvm_mmu_destroy(struct kvm_vcpu
*vcpu
);
423 int kvm_mmu_create(struct kvm_vcpu
*vcpu
);
424 int kvm_mmu_setup(struct kvm_vcpu
*vcpu
);
425 void kvm_mmu_set_nonpresent_ptes(u64 trap_pte
, u64 notrap_pte
);
427 int kvm_mmu_reset_context(struct kvm_vcpu
*vcpu
);
428 void kvm_mmu_slot_remove_write_access(struct kvm
*kvm
, int slot
);
429 void kvm_mmu_zap_all(struct kvm
*kvm
);
430 unsigned int kvm_mmu_calculate_mmu_pages(struct kvm
*kvm
);
431 void kvm_mmu_change_mmu_pages(struct kvm
*kvm
, unsigned int kvm_nr_mmu_pages
);
433 int load_pdptrs(struct kvm_vcpu
*vcpu
, unsigned long cr3
);
435 int __emulator_write_phys(struct kvm_vcpu
*vcpu
, gpa_t gpa
,
436 const void *val
, int bytes
);
437 int kvm_pv_mmu_op(struct kvm_vcpu
*vcpu
, unsigned long bytes
,
438 gpa_t addr
, unsigned long *ret
);
440 extern bool tdp_enabled
;
442 enum emulation_result
{
443 EMULATE_DONE
, /* no further processing */
444 EMULATE_DO_MMIO
, /* kvm_run filled with mmio request */
445 EMULATE_FAIL
, /* can't emulate this instruction */
448 #define EMULTYPE_NO_DECODE (1 << 0)
449 #define EMULTYPE_TRAP_UD (1 << 1)
450 int emulate_instruction(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
,
451 unsigned long cr2
, u16 error_code
, int emulation_type
);
452 void kvm_report_emulation_failure(struct kvm_vcpu
*cvpu
, const char *context
);
453 void realmode_lgdt(struct kvm_vcpu
*vcpu
, u16 size
, unsigned long address
);
454 void realmode_lidt(struct kvm_vcpu
*vcpu
, u16 size
, unsigned long address
);
455 void realmode_lmsw(struct kvm_vcpu
*vcpu
, unsigned long msw
,
456 unsigned long *rflags
);
458 unsigned long realmode_get_cr(struct kvm_vcpu
*vcpu
, int cr
);
459 void realmode_set_cr(struct kvm_vcpu
*vcpu
, int cr
, unsigned long value
,
460 unsigned long *rflags
);
461 void kvm_enable_efer_bits(u64
);
462 int kvm_get_msr(struct kvm_vcpu
*vcpu
, u32 msr_index
, u64
*data
);
463 int kvm_set_msr(struct kvm_vcpu
*vcpu
, u32 msr_index
, u64 data
);
465 struct x86_emulate_ctxt
;
467 int kvm_emulate_pio(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
, int in
,
468 int size
, unsigned port
);
469 int kvm_emulate_pio_string(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
, int in
,
470 int size
, unsigned long count
, int down
,
471 gva_t address
, int rep
, unsigned port
);
472 void kvm_emulate_cpuid(struct kvm_vcpu
*vcpu
);
473 int kvm_emulate_halt(struct kvm_vcpu
*vcpu
);
474 int emulate_invlpg(struct kvm_vcpu
*vcpu
, gva_t address
);
475 int emulate_clts(struct kvm_vcpu
*vcpu
);
476 int emulator_get_dr(struct x86_emulate_ctxt
*ctxt
, int dr
,
477 unsigned long *dest
);
478 int emulator_set_dr(struct x86_emulate_ctxt
*ctxt
, int dr
,
479 unsigned long value
);
481 void kvm_set_cr0(struct kvm_vcpu
*vcpu
, unsigned long cr0
);
482 void kvm_set_cr3(struct kvm_vcpu
*vcpu
, unsigned long cr0
);
483 void kvm_set_cr4(struct kvm_vcpu
*vcpu
, unsigned long cr0
);
484 void kvm_set_cr8(struct kvm_vcpu
*vcpu
, unsigned long cr0
);
485 unsigned long kvm_get_cr8(struct kvm_vcpu
*vcpu
);
486 void kvm_lmsw(struct kvm_vcpu
*vcpu
, unsigned long msw
);
487 void kvm_get_cs_db_l_bits(struct kvm_vcpu
*vcpu
, int *db
, int *l
);
489 int kvm_get_msr_common(struct kvm_vcpu
*vcpu
, u32 msr
, u64
*pdata
);
490 int kvm_set_msr_common(struct kvm_vcpu
*vcpu
, u32 msr
, u64 data
);
492 void kvm_queue_exception(struct kvm_vcpu
*vcpu
, unsigned nr
);
493 void kvm_queue_exception_e(struct kvm_vcpu
*vcpu
, unsigned nr
, u32 error_code
);
494 void kvm_inject_page_fault(struct kvm_vcpu
*vcpu
, unsigned long cr2
,
497 void fx_init(struct kvm_vcpu
*vcpu
);
499 int emulator_read_std(unsigned long addr
,
502 struct kvm_vcpu
*vcpu
);
503 int emulator_write_emulated(unsigned long addr
,
506 struct kvm_vcpu
*vcpu
);
508 unsigned long segment_base(u16 selector
);
510 void kvm_mmu_flush_tlb(struct kvm_vcpu
*vcpu
);
511 void kvm_mmu_pte_write(struct kvm_vcpu
*vcpu
, gpa_t gpa
,
512 const u8
*new, int bytes
);
513 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu
*vcpu
, gva_t gva
);
514 void __kvm_mmu_free_some_pages(struct kvm_vcpu
*vcpu
);
515 int kvm_mmu_load(struct kvm_vcpu
*vcpu
);
516 void kvm_mmu_unload(struct kvm_vcpu
*vcpu
);
518 int kvm_emulate_hypercall(struct kvm_vcpu
*vcpu
);
520 int kvm_fix_hypercall(struct kvm_vcpu
*vcpu
);
522 int kvm_mmu_page_fault(struct kvm_vcpu
*vcpu
, gva_t gva
, u32 error_code
);
524 void kvm_enable_tdp(void);
526 int load_pdptrs(struct kvm_vcpu
*vcpu
, unsigned long cr3
);
527 int complete_pio(struct kvm_vcpu
*vcpu
);
529 static inline struct kvm_mmu_page
*page_header(hpa_t shadow_page
)
531 struct page
*page
= pfn_to_page(shadow_page
>> PAGE_SHIFT
);
533 return (struct kvm_mmu_page
*)page_private(page
);
536 static inline u16
read_fs(void)
539 asm("mov %%fs, %0" : "=g"(seg
));
543 static inline u16
read_gs(void)
546 asm("mov %%gs, %0" : "=g"(seg
));
550 static inline u16
read_ldt(void)
553 asm("sldt %0" : "=g"(ldt
));
557 static inline void load_fs(u16 sel
)
559 asm("mov %0, %%fs" : : "rm"(sel
));
562 static inline void load_gs(u16 sel
)
564 asm("mov %0, %%gs" : : "rm"(sel
));
568 static inline void load_ldt(u16 sel
)
570 asm("lldt %0" : : "rm"(sel
));
574 static inline void get_idt(struct descriptor_table
*table
)
576 asm("sidt %0" : "=m"(*table
));
579 static inline void get_gdt(struct descriptor_table
*table
)
581 asm("sgdt %0" : "=m"(*table
));
584 static inline unsigned long read_tr_base(void)
587 asm("str %0" : "=g"(tr
));
588 return segment_base(tr
);
592 static inline unsigned long read_msr(unsigned long msr
)
601 static inline void fx_save(struct i387_fxsave_struct
*image
)
603 asm("fxsave (%0)":: "r" (image
));
606 static inline void fx_restore(struct i387_fxsave_struct
*image
)
608 asm("fxrstor (%0)":: "r" (image
));
611 static inline void fpu_init(void)
616 static inline u32
get_rdx_init_val(void)
618 return 0x600; /* P6 family */
621 static inline void kvm_inject_gp(struct kvm_vcpu
*vcpu
, u32 error_code
)
623 kvm_queue_exception_e(vcpu
, GP_VECTOR
, error_code
);
626 #define ASM_VMX_VMCLEAR_RAX ".byte 0x66, 0x0f, 0xc7, 0x30"
627 #define ASM_VMX_VMLAUNCH ".byte 0x0f, 0x01, 0xc2"
628 #define ASM_VMX_VMRESUME ".byte 0x0f, 0x01, 0xc3"
629 #define ASM_VMX_VMPTRLD_RAX ".byte 0x0f, 0xc7, 0x30"
630 #define ASM_VMX_VMREAD_RDX_RAX ".byte 0x0f, 0x78, 0xd0"
631 #define ASM_VMX_VMWRITE_RAX_RDX ".byte 0x0f, 0x79, 0xd0"
632 #define ASM_VMX_VMWRITE_RSP_RDX ".byte 0x0f, 0x79, 0xd4"
633 #define ASM_VMX_VMXOFF ".byte 0x0f, 0x01, 0xc4"
634 #define ASM_VMX_VMXON_RAX ".byte 0xf3, 0x0f, 0xc7, 0x30"
635 #define ASM_VMX_INVVPID ".byte 0x66, 0x0f, 0x38, 0x81, 0x08"
637 #define MSR_IA32_TIME_STAMP_COUNTER 0x010
639 #define TSS_IOPB_BASE_OFFSET 0x66
640 #define TSS_BASE_SIZE 0x68
641 #define TSS_IOPB_SIZE (65536 / 8)
642 #define TSS_REDIRECTION_SIZE (256 / 8)
643 #define RMODE_TSS_SIZE \
644 (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)