KVM: x86: move MTRR related code to a separate file
[deliverable/linux.git] / arch / x86 / kvm / mtrr.c
1 /*
2 * vMTRR implementation
3 *
4 * Copyright (C) 2006 Qumranet, Inc.
5 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
6 * Copyright(C) 2015 Intel Corporation.
7 *
8 * Authors:
9 * Yaniv Kamay <yaniv@qumranet.com>
10 * Avi Kivity <avi@qumranet.com>
11 * Marcelo Tosatti <mtosatti@redhat.com>
12 * Paolo Bonzini <pbonzini@redhat.com>
13 * Xiao Guangrong <guangrong.xiao@linux.intel.com>
14 *
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
17 */
18
19 #include <linux/kvm_host.h>
20 #include <asm/mtrr.h>
21
22 #include "cpuid.h"
23 #include "mmu.h"
24
25 static bool msr_mtrr_valid(unsigned msr)
26 {
27 switch (msr) {
28 case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
29 case MSR_MTRRfix64K_00000:
30 case MSR_MTRRfix16K_80000:
31 case MSR_MTRRfix16K_A0000:
32 case MSR_MTRRfix4K_C0000:
33 case MSR_MTRRfix4K_C8000:
34 case MSR_MTRRfix4K_D0000:
35 case MSR_MTRRfix4K_D8000:
36 case MSR_MTRRfix4K_E0000:
37 case MSR_MTRRfix4K_E8000:
38 case MSR_MTRRfix4K_F0000:
39 case MSR_MTRRfix4K_F8000:
40 case MSR_MTRRdefType:
41 case MSR_IA32_CR_PAT:
42 return true;
43 case 0x2f8:
44 return true;
45 }
46 return false;
47 }
48
49 static bool valid_pat_type(unsigned t)
50 {
51 return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */
52 }
53
54 static bool valid_mtrr_type(unsigned t)
55 {
56 return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
57 }
58
59 bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
60 {
61 int i;
62 u64 mask;
63
64 if (!msr_mtrr_valid(msr))
65 return false;
66
67 if (msr == MSR_IA32_CR_PAT) {
68 for (i = 0; i < 8; i++)
69 if (!valid_pat_type((data >> (i * 8)) & 0xff))
70 return false;
71 return true;
72 } else if (msr == MSR_MTRRdefType) {
73 if (data & ~0xcff)
74 return false;
75 return valid_mtrr_type(data & 0xff);
76 } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) {
77 for (i = 0; i < 8 ; i++)
78 if (!valid_mtrr_type((data >> (i * 8)) & 0xff))
79 return false;
80 return true;
81 }
82
83 /* variable MTRRs */
84 WARN_ON(!(msr >= 0x200 && msr < 0x200 + 2 * KVM_NR_VAR_MTRR));
85
86 mask = (~0ULL) << cpuid_maxphyaddr(vcpu);
87 if ((msr & 1) == 0) {
88 /* MTRR base */
89 if (!valid_mtrr_type(data & 0xff))
90 return false;
91 mask |= 0xf00;
92 } else
93 /* MTRR mask */
94 mask |= 0x7ff;
95 if (data & mask) {
96 kvm_inject_gp(vcpu, 0);
97 return false;
98 }
99
100 return true;
101 }
102 EXPORT_SYMBOL_GPL(kvm_mtrr_valid);
103
104 static void update_mtrr(struct kvm_vcpu *vcpu, u32 msr)
105 {
106 struct mtrr_state_type *mtrr_state = &vcpu->arch.mtrr_state;
107 unsigned char mtrr_enabled = mtrr_state->enabled;
108 gfn_t start, end, mask;
109 int index;
110 bool is_fixed = true;
111
112 if (msr == MSR_IA32_CR_PAT || !tdp_enabled ||
113 !kvm_arch_has_noncoherent_dma(vcpu->kvm))
114 return;
115
116 if (!(mtrr_enabled & 0x2) && msr != MSR_MTRRdefType)
117 return;
118
119 switch (msr) {
120 case MSR_MTRRfix64K_00000:
121 start = 0x0;
122 end = 0x80000;
123 break;
124 case MSR_MTRRfix16K_80000:
125 start = 0x80000;
126 end = 0xa0000;
127 break;
128 case MSR_MTRRfix16K_A0000:
129 start = 0xa0000;
130 end = 0xc0000;
131 break;
132 case MSR_MTRRfix4K_C0000 ... MSR_MTRRfix4K_F8000:
133 index = msr - MSR_MTRRfix4K_C0000;
134 start = 0xc0000 + index * (32 << 10);
135 end = start + (32 << 10);
136 break;
137 case MSR_MTRRdefType:
138 is_fixed = false;
139 start = 0x0;
140 end = ~0ULL;
141 break;
142 default:
143 /* variable range MTRRs. */
144 is_fixed = false;
145 index = (msr - 0x200) / 2;
146 start = (((u64)mtrr_state->var_ranges[index].base_hi) << 32) +
147 (mtrr_state->var_ranges[index].base_lo & PAGE_MASK);
148 mask = (((u64)mtrr_state->var_ranges[index].mask_hi) << 32) +
149 (mtrr_state->var_ranges[index].mask_lo & PAGE_MASK);
150 mask |= ~0ULL << cpuid_maxphyaddr(vcpu);
151
152 end = ((start & mask) | ~mask) + 1;
153 }
154
155 if (is_fixed && !(mtrr_enabled & 0x1))
156 return;
157
158 kvm_zap_gfn_range(vcpu->kvm, gpa_to_gfn(start), gpa_to_gfn(end));
159 }
160
161 int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
162 {
163 u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
164
165 if (!kvm_mtrr_valid(vcpu, msr, data))
166 return 1;
167
168 if (msr == MSR_MTRRdefType) {
169 vcpu->arch.mtrr_state.def_type = data;
170 vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10;
171 } else if (msr == MSR_MTRRfix64K_00000)
172 p[0] = data;
173 else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
174 p[1 + msr - MSR_MTRRfix16K_80000] = data;
175 else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
176 p[3 + msr - MSR_MTRRfix4K_C0000] = data;
177 else if (msr == MSR_IA32_CR_PAT)
178 vcpu->arch.pat = data;
179 else { /* Variable MTRRs */
180 int idx, is_mtrr_mask;
181 u64 *pt;
182
183 idx = (msr - 0x200) / 2;
184 is_mtrr_mask = msr - 0x200 - 2 * idx;
185 if (!is_mtrr_mask)
186 pt =
187 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
188 else
189 pt =
190 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
191 *pt = data;
192 }
193
194 update_mtrr(vcpu, msr);
195 return 0;
196 }
197
198 int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
199 {
200 u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
201
202 if (!msr_mtrr_valid(msr))
203 return 1;
204
205 if (msr == MSR_MTRRdefType)
206 *pdata = vcpu->arch.mtrr_state.def_type +
207 (vcpu->arch.mtrr_state.enabled << 10);
208 else if (msr == MSR_MTRRfix64K_00000)
209 *pdata = p[0];
210 else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
211 *pdata = p[1 + msr - MSR_MTRRfix16K_80000];
212 else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
213 *pdata = p[3 + msr - MSR_MTRRfix4K_C0000];
214 else if (msr == MSR_IA32_CR_PAT)
215 *pdata = vcpu->arch.pat;
216 else { /* Variable MTRRs */
217 int idx, is_mtrr_mask;
218 u64 *pt;
219
220 idx = (msr - 0x200) / 2;
221 is_mtrr_mask = msr - 0x200 - 2 * idx;
222 if (!is_mtrr_mask)
223 pt =
224 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
225 else
226 pt =
227 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
228 *pdata = *pt;
229 }
230
231 return 0;
232 }
233
234 /*
235 * The function is based on mtrr_type_lookup() in
236 * arch/x86/kernel/cpu/mtrr/generic.c
237 */
238 static int get_mtrr_type(struct mtrr_state_type *mtrr_state,
239 u64 start, u64 end)
240 {
241 u64 base, mask;
242 u8 prev_match, curr_match;
243 int i, num_var_ranges = KVM_NR_VAR_MTRR;
244
245 /* MTRR is completely disabled, use UC for all of physical memory. */
246 if (!(mtrr_state->enabled & 0x2))
247 return MTRR_TYPE_UNCACHABLE;
248
249 /* Make end inclusive end, instead of exclusive */
250 end--;
251
252 /* Look in fixed ranges. Just return the type as per start */
253 if (mtrr_state->have_fixed && (mtrr_state->enabled & 0x1) &&
254 (start < 0x100000)) {
255 int idx;
256
257 if (start < 0x80000) {
258 idx = 0;
259 idx += (start >> 16);
260 return mtrr_state->fixed_ranges[idx];
261 } else if (start < 0xC0000) {
262 idx = 1 * 8;
263 idx += ((start - 0x80000) >> 14);
264 return mtrr_state->fixed_ranges[idx];
265 } else if (start < 0x1000000) {
266 idx = 3 * 8;
267 idx += ((start - 0xC0000) >> 12);
268 return mtrr_state->fixed_ranges[idx];
269 }
270 }
271
272 /*
273 * Look in variable ranges
274 * Look of multiple ranges matching this address and pick type
275 * as per MTRR precedence
276 */
277 prev_match = 0xFF;
278 for (i = 0; i < num_var_ranges; ++i) {
279 unsigned short start_state, end_state;
280
281 if (!(mtrr_state->var_ranges[i].mask_lo & (1 << 11)))
282 continue;
283
284 base = (((u64)mtrr_state->var_ranges[i].base_hi) << 32) +
285 (mtrr_state->var_ranges[i].base_lo & PAGE_MASK);
286 mask = (((u64)mtrr_state->var_ranges[i].mask_hi) << 32) +
287 (mtrr_state->var_ranges[i].mask_lo & PAGE_MASK);
288
289 start_state = ((start & mask) == (base & mask));
290 end_state = ((end & mask) == (base & mask));
291 if (start_state != end_state)
292 return 0xFE;
293
294 if ((start & mask) != (base & mask))
295 continue;
296
297 curr_match = mtrr_state->var_ranges[i].base_lo & 0xff;
298 if (prev_match == 0xFF) {
299 prev_match = curr_match;
300 continue;
301 }
302
303 if (prev_match == MTRR_TYPE_UNCACHABLE ||
304 curr_match == MTRR_TYPE_UNCACHABLE)
305 return MTRR_TYPE_UNCACHABLE;
306
307 if ((prev_match == MTRR_TYPE_WRBACK &&
308 curr_match == MTRR_TYPE_WRTHROUGH) ||
309 (prev_match == MTRR_TYPE_WRTHROUGH &&
310 curr_match == MTRR_TYPE_WRBACK)) {
311 prev_match = MTRR_TYPE_WRTHROUGH;
312 curr_match = MTRR_TYPE_WRTHROUGH;
313 }
314
315 if (prev_match != curr_match)
316 return MTRR_TYPE_UNCACHABLE;
317 }
318
319 if (prev_match != 0xFF)
320 return prev_match;
321
322 return mtrr_state->def_type;
323 }
324
325 u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
326 {
327 u8 mtrr;
328
329 mtrr = get_mtrr_type(&vcpu->arch.mtrr_state, gfn << PAGE_SHIFT,
330 (gfn << PAGE_SHIFT) + PAGE_SIZE);
331 if (mtrr == 0xfe || mtrr == 0xff)
332 mtrr = MTRR_TYPE_WRBACK;
333 return mtrr;
334 }
335 EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type);
This page took 0.075189 seconds and 5 git commands to generate.