Commit | Line | Data |
---|---|---|
26eef70c AK |
1 | #ifndef ARCH_X86_KVM_X86_H |
2 | #define ARCH_X86_KVM_X86_H | |
3 | ||
4 | #include <linux/kvm_host.h> | |
3eeb3288 | 5 | #include "kvm_cache_regs.h" |
26eef70c AK |
6 | |
7 | static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu) | |
8 | { | |
9 | vcpu->arch.exception.pending = false; | |
10 | } | |
11 | ||
66fd3f7f GN |
12 | static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector, |
13 | bool soft) | |
937a7eae AK |
14 | { |
15 | vcpu->arch.interrupt.pending = true; | |
66fd3f7f | 16 | vcpu->arch.interrupt.soft = soft; |
937a7eae AK |
17 | vcpu->arch.interrupt.nr = vector; |
18 | } | |
19 | ||
20 | static inline void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu) | |
21 | { | |
22 | vcpu->arch.interrupt.pending = false; | |
23 | } | |
24 | ||
3298b75c GN |
25 | static inline bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu) |
26 | { | |
27 | return vcpu->arch.exception.pending || vcpu->arch.interrupt.pending || | |
28 | vcpu->arch.nmi_injected; | |
29 | } | |
66fd3f7f GN |
30 | |
31 | static inline bool kvm_exception_is_soft(unsigned int nr) | |
32 | { | |
33 | return (nr == BP_VECTOR) || (nr == OF_VECTOR); | |
34 | } | |
fc61b800 | 35 | |
3eeb3288 AK |
36 | static inline bool is_protmode(struct kvm_vcpu *vcpu) |
37 | { | |
38 | return kvm_read_cr0_bits(vcpu, X86_CR0_PE); | |
39 | } | |
40 | ||
836a1b3c AK |
41 | static inline int is_long_mode(struct kvm_vcpu *vcpu) |
42 | { | |
43 | #ifdef CONFIG_X86_64 | |
f6801dff | 44 | return vcpu->arch.efer & EFER_LMA; |
836a1b3c AK |
45 | #else |
46 | return 0; | |
47 | #endif | |
48 | } | |
49 | ||
5777392e NA |
50 | static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu) |
51 | { | |
52 | int cs_db, cs_l; | |
53 | ||
54 | if (!is_long_mode(vcpu)) | |
55 | return false; | |
56 | kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); | |
57 | return cs_l; | |
58 | } | |
59 | ||
6539e738 JR |
60 | static inline bool mmu_is_nested(struct kvm_vcpu *vcpu) |
61 | { | |
62 | return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu; | |
63 | } | |
64 | ||
836a1b3c AK |
65 | static inline int is_pae(struct kvm_vcpu *vcpu) |
66 | { | |
67 | return kvm_read_cr4_bits(vcpu, X86_CR4_PAE); | |
68 | } | |
69 | ||
70 | static inline int is_pse(struct kvm_vcpu *vcpu) | |
71 | { | |
72 | return kvm_read_cr4_bits(vcpu, X86_CR4_PSE); | |
73 | } | |
74 | ||
75 | static inline int is_paging(struct kvm_vcpu *vcpu) | |
76 | { | |
c36fc04e | 77 | return likely(kvm_read_cr0_bits(vcpu, X86_CR0_PG)); |
836a1b3c AK |
78 | } |
79 | ||
24d1b15f JR |
80 | static inline u32 bit(int bitno) |
81 | { | |
82 | return 1 << (bitno & 31); | |
83 | } | |
84 | ||
bebb106a XG |
85 | static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu, |
86 | gva_t gva, gfn_t gfn, unsigned access) | |
87 | { | |
88 | vcpu->arch.mmio_gva = gva & PAGE_MASK; | |
89 | vcpu->arch.access = access; | |
90 | vcpu->arch.mmio_gfn = gfn; | |
91 | } | |
92 | ||
93 | /* | |
94 | * Clear the mmio cache info for the given gva, | |
95 | * specially, if gva is ~0ul, we clear all mmio cache info. | |
96 | */ | |
97 | static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva) | |
98 | { | |
99 | if (gva != (~0ul) && vcpu->arch.mmio_gva != (gva & PAGE_MASK)) | |
100 | return; | |
101 | ||
102 | vcpu->arch.mmio_gva = 0; | |
103 | } | |
104 | ||
105 | static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva) | |
106 | { | |
107 | if (vcpu->arch.mmio_gva && vcpu->arch.mmio_gva == (gva & PAGE_MASK)) | |
108 | return true; | |
109 | ||
110 | return false; | |
111 | } | |
112 | ||
113 | static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) | |
114 | { | |
115 | if (vcpu->arch.mmio_gfn && vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT) | |
116 | return true; | |
117 | ||
118 | return false; | |
119 | } | |
120 | ||
5777392e NA |
121 | static inline unsigned long kvm_register_readl(struct kvm_vcpu *vcpu, |
122 | enum kvm_reg reg) | |
123 | { | |
124 | unsigned long val = kvm_register_read(vcpu, reg); | |
125 | ||
126 | return is_64_bit_mode(vcpu) ? val : (u32)val; | |
127 | } | |
128 | ||
27e6fb5d NA |
129 | static inline void kvm_register_writel(struct kvm_vcpu *vcpu, |
130 | enum kvm_reg reg, | |
131 | unsigned long val) | |
132 | { | |
133 | if (!is_64_bit_mode(vcpu)) | |
134 | val = (u32)val; | |
135 | return kvm_register_write(vcpu, reg, val); | |
136 | } | |
137 | ||
ff9d07a0 ZY |
138 | void kvm_before_handle_nmi(struct kvm_vcpu *vcpu); |
139 | void kvm_after_handle_nmi(struct kvm_vcpu *vcpu); | |
71f9833b | 140 | int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip); |
ff9d07a0 | 141 | |
8fe8ab46 | 142 | void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr); |
99e3e30a | 143 | |
064aea77 NHE |
144 | int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt, |
145 | gva_t addr, void *val, unsigned int bytes, | |
146 | struct x86_exception *exception); | |
147 | ||
6a4d7550 NHE |
148 | int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt, |
149 | gva_t addr, void *val, unsigned int bytes, | |
150 | struct x86_exception *exception); | |
151 | ||
390bd528 LJ |
152 | #define KVM_SUPPORTED_XCR0 (XSTATE_FP | XSTATE_SSE | XSTATE_YMM \ |
153 | | XSTATE_BNDREGS | XSTATE_BNDCSR) | |
00b27a3e AK |
154 | extern u64 host_xcr0; |
155 | ||
4ff41732 PB |
156 | extern u64 kvm_supported_xcr0(void); |
157 | ||
9ed96e87 MT |
158 | extern unsigned int min_timer_period_us; |
159 | ||
54e9818f | 160 | extern struct static_key kvm_no_apic_vcpu; |
26eef70c | 161 | #endif |