Commit | Line | Data |
---|---|---|
26eef70c AK |
1 | #ifndef ARCH_X86_KVM_X86_H |
2 | #define ARCH_X86_KVM_X86_H | |
3 | ||
4 | #include <linux/kvm_host.h> | |
3eeb3288 | 5 | #include "kvm_cache_regs.h" |
26eef70c | 6 | |
74545705 RK |
7 | #define MSR_IA32_CR_PAT_DEFAULT 0x0007040600070406ULL |
8 | ||
26eef70c AK |
9 | static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu) |
10 | { | |
11 | vcpu->arch.exception.pending = false; | |
12 | } | |
13 | ||
66fd3f7f GN |
14 | static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector, |
15 | bool soft) | |
937a7eae AK |
16 | { |
17 | vcpu->arch.interrupt.pending = true; | |
66fd3f7f | 18 | vcpu->arch.interrupt.soft = soft; |
937a7eae AK |
19 | vcpu->arch.interrupt.nr = vector; |
20 | } | |
21 | ||
22 | static inline void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu) | |
23 | { | |
24 | vcpu->arch.interrupt.pending = false; | |
25 | } | |
26 | ||
3298b75c GN |
27 | static inline bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu) |
28 | { | |
29 | return vcpu->arch.exception.pending || vcpu->arch.interrupt.pending || | |
30 | vcpu->arch.nmi_injected; | |
31 | } | |
66fd3f7f GN |
32 | |
33 | static inline bool kvm_exception_is_soft(unsigned int nr) | |
34 | { | |
35 | return (nr == BP_VECTOR) || (nr == OF_VECTOR); | |
36 | } | |
fc61b800 | 37 | |
3eeb3288 AK |
38 | static inline bool is_protmode(struct kvm_vcpu *vcpu) |
39 | { | |
40 | return kvm_read_cr0_bits(vcpu, X86_CR0_PE); | |
41 | } | |
42 | ||
836a1b3c AK |
43 | static inline int is_long_mode(struct kvm_vcpu *vcpu) |
44 | { | |
45 | #ifdef CONFIG_X86_64 | |
f6801dff | 46 | return vcpu->arch.efer & EFER_LMA; |
836a1b3c AK |
47 | #else |
48 | return 0; | |
49 | #endif | |
50 | } | |
51 | ||
5777392e NA |
52 | static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu) |
53 | { | |
54 | int cs_db, cs_l; | |
55 | ||
56 | if (!is_long_mode(vcpu)) | |
57 | return false; | |
58 | kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); | |
59 | return cs_l; | |
60 | } | |
61 | ||
6539e738 JR |
62 | static inline bool mmu_is_nested(struct kvm_vcpu *vcpu) |
63 | { | |
64 | return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu; | |
65 | } | |
66 | ||
836a1b3c AK |
67 | static inline int is_pae(struct kvm_vcpu *vcpu) |
68 | { | |
69 | return kvm_read_cr4_bits(vcpu, X86_CR4_PAE); | |
70 | } | |
71 | ||
72 | static inline int is_pse(struct kvm_vcpu *vcpu) | |
73 | { | |
74 | return kvm_read_cr4_bits(vcpu, X86_CR4_PSE); | |
75 | } | |
76 | ||
77 | static inline int is_paging(struct kvm_vcpu *vcpu) | |
78 | { | |
c36fc04e | 79 | return likely(kvm_read_cr0_bits(vcpu, X86_CR0_PG)); |
836a1b3c AK |
80 | } |
81 | ||
24d1b15f JR |
82 | static inline u32 bit(int bitno) |
83 | { | |
84 | return 1 << (bitno & 31); | |
85 | } | |
86 | ||
bebb106a XG |
87 | static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu, |
88 | gva_t gva, gfn_t gfn, unsigned access) | |
89 | { | |
90 | vcpu->arch.mmio_gva = gva & PAGE_MASK; | |
91 | vcpu->arch.access = access; | |
92 | vcpu->arch.mmio_gfn = gfn; | |
56f17dd3 DM |
93 | vcpu->arch.mmio_gen = kvm_memslots(vcpu->kvm)->generation; |
94 | } | |
95 | ||
96 | static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu) | |
97 | { | |
98 | return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation; | |
bebb106a XG |
99 | } |
100 | ||
101 | /* | |
56f17dd3 DM |
102 | * Clear the mmio cache info for the given gva. If gva is MMIO_GVA_ANY, we |
103 | * clear all mmio cache info. | |
bebb106a | 104 | */ |
56f17dd3 DM |
105 | #define MMIO_GVA_ANY (~(gva_t)0) |
106 | ||
bebb106a XG |
107 | static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva) |
108 | { | |
56f17dd3 | 109 | if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK)) |
bebb106a XG |
110 | return; |
111 | ||
112 | vcpu->arch.mmio_gva = 0; | |
113 | } | |
114 | ||
115 | static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva) | |
116 | { | |
56f17dd3 DM |
117 | if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva && |
118 | vcpu->arch.mmio_gva == (gva & PAGE_MASK)) | |
bebb106a XG |
119 | return true; |
120 | ||
121 | return false; | |
122 | } | |
123 | ||
124 | static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) | |
125 | { | |
56f17dd3 DM |
126 | if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn && |
127 | vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT) | |
bebb106a XG |
128 | return true; |
129 | ||
130 | return false; | |
131 | } | |
132 | ||
5777392e NA |
133 | static inline unsigned long kvm_register_readl(struct kvm_vcpu *vcpu, |
134 | enum kvm_reg reg) | |
135 | { | |
136 | unsigned long val = kvm_register_read(vcpu, reg); | |
137 | ||
138 | return is_64_bit_mode(vcpu) ? val : (u32)val; | |
139 | } | |
140 | ||
27e6fb5d NA |
141 | static inline void kvm_register_writel(struct kvm_vcpu *vcpu, |
142 | enum kvm_reg reg, | |
143 | unsigned long val) | |
144 | { | |
145 | if (!is_64_bit_mode(vcpu)) | |
146 | val = (u32)val; | |
147 | return kvm_register_write(vcpu, reg, val); | |
148 | } | |
149 | ||
e83d5887 AS |
150 | static inline u64 get_kernel_ns(void) |
151 | { | |
152 | return ktime_get_boot_ns(); | |
153 | } | |
154 | ||
41dbc6bc PB |
155 | static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk) |
156 | { | |
157 | return !(kvm->arch.disabled_quirks & quirk); | |
158 | } | |
159 | ||
ff9d07a0 ZY |
160 | void kvm_before_handle_nmi(struct kvm_vcpu *vcpu); |
161 | void kvm_after_handle_nmi(struct kvm_vcpu *vcpu); | |
bab5bb39 | 162 | void kvm_set_pending_timer(struct kvm_vcpu *vcpu); |
71f9833b | 163 | int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip); |
ff9d07a0 | 164 | |
8fe8ab46 | 165 | void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr); |
99e3e30a | 166 | |
064aea77 NHE |
167 | int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt, |
168 | gva_t addr, void *val, unsigned int bytes, | |
169 | struct x86_exception *exception); | |
170 | ||
6a4d7550 NHE |
171 | int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt, |
172 | gva_t addr, void *val, unsigned int bytes, | |
173 | struct x86_exception *exception); | |
174 | ||
19efffa2 | 175 | void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu); |
ff53604b | 176 | u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn); |
4566654b | 177 | bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data); |
ff53604b XG |
178 | int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data); |
179 | int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); | |
6a39bbc5 XG |
180 | bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, |
181 | int page_num); | |
4566654b | 182 | |
d91cab78 DH |
183 | #define KVM_SUPPORTED_XCR0 (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \ |
184 | | XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \ | |
185 | | XFEATURE_MASK_BNDCSR | XFEATURE_MASK_AVX512) | |
00b27a3e AK |
186 | extern u64 host_xcr0; |
187 | ||
4ff41732 PB |
188 | extern u64 kvm_supported_xcr0(void); |
189 | ||
9ed96e87 MT |
190 | extern unsigned int min_timer_period_us; |
191 | ||
d0659d94 MT |
192 | extern unsigned int lapic_timer_advance_ns; |
193 | ||
54e9818f | 194 | extern struct static_key kvm_no_apic_vcpu; |
b51012de PB |
195 | |
196 | /* Same "calling convention" as do_div: | |
197 | * - divide (n << 32) by base | |
198 | * - put result in n | |
199 | * - return remainder | |
200 | */ | |
201 | #define do_shl32_div32(n, base) \ | |
202 | ({ \ | |
203 | u32 __quot, __rem; \ | |
204 | asm("divl %2" : "=a" (__quot), "=d" (__rem) \ | |
205 | : "rm" (base), "0" (0), "1" ((u32) n)); \ | |
206 | n = __quot; \ | |
207 | __rem; \ | |
208 | }) | |
209 | ||
26eef70c | 210 | #endif |