4 * Copyright (C) 2006 Qumranet, Inc.
5 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
6 * Copyright(C) 2015 Intel Corporation.
9 * Yaniv Kamay <yaniv@qumranet.com>
10 * Avi Kivity <avi@qumranet.com>
11 * Marcelo Tosatti <mtosatti@redhat.com>
12 * Paolo Bonzini <pbonzini@redhat.com>
13 * Xiao Guangrong <guangrong.xiao@linux.intel.com>
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
19 #include <linux/kvm_host.h>
25 static bool msr_mtrr_valid(unsigned msr
)
28 case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR
- 1:
29 case MSR_MTRRfix64K_00000
:
30 case MSR_MTRRfix16K_80000
:
31 case MSR_MTRRfix16K_A0000
:
32 case MSR_MTRRfix4K_C0000
:
33 case MSR_MTRRfix4K_C8000
:
34 case MSR_MTRRfix4K_D0000
:
35 case MSR_MTRRfix4K_D8000
:
36 case MSR_MTRRfix4K_E0000
:
37 case MSR_MTRRfix4K_E8000
:
38 case MSR_MTRRfix4K_F0000
:
39 case MSR_MTRRfix4K_F8000
:
49 static bool valid_pat_type(unsigned t
)
51 return t
< 8 && (1 << t
) & 0xf3; /* 0, 1, 4, 5, 6, 7 */
54 static bool valid_mtrr_type(unsigned t
)
56 return t
< 8 && (1 << t
) & 0x73; /* 0, 1, 4, 5, 6 */
59 bool kvm_mtrr_valid(struct kvm_vcpu
*vcpu
, u32 msr
, u64 data
)
64 if (!msr_mtrr_valid(msr
))
67 if (msr
== MSR_IA32_CR_PAT
) {
68 for (i
= 0; i
< 8; i
++)
69 if (!valid_pat_type((data
>> (i
* 8)) & 0xff))
72 } else if (msr
== MSR_MTRRdefType
) {
75 return valid_mtrr_type(data
& 0xff);
76 } else if (msr
>= MSR_MTRRfix64K_00000
&& msr
<= MSR_MTRRfix4K_F8000
) {
77 for (i
= 0; i
< 8 ; i
++)
78 if (!valid_mtrr_type((data
>> (i
* 8)) & 0xff))
84 WARN_ON(!(msr
>= 0x200 && msr
< 0x200 + 2 * KVM_NR_VAR_MTRR
));
86 mask
= (~0ULL) << cpuid_maxphyaddr(vcpu
);
89 if (!valid_mtrr_type(data
& 0xff))
96 kvm_inject_gp(vcpu
, 0);
102 EXPORT_SYMBOL_GPL(kvm_mtrr_valid
);
104 static void update_mtrr(struct kvm_vcpu
*vcpu
, u32 msr
)
106 struct mtrr_state_type
*mtrr_state
= &vcpu
->arch
.mtrr_state
;
107 unsigned char mtrr_enabled
= mtrr_state
->enabled
;
108 gfn_t start
, end
, mask
;
110 bool is_fixed
= true;
112 if (msr
== MSR_IA32_CR_PAT
|| !tdp_enabled
||
113 !kvm_arch_has_noncoherent_dma(vcpu
->kvm
))
116 if (!(mtrr_enabled
& 0x2) && msr
!= MSR_MTRRdefType
)
120 case MSR_MTRRfix64K_00000
:
124 case MSR_MTRRfix16K_80000
:
128 case MSR_MTRRfix16K_A0000
:
132 case MSR_MTRRfix4K_C0000
... MSR_MTRRfix4K_F8000
:
133 index
= msr
- MSR_MTRRfix4K_C0000
;
134 start
= 0xc0000 + index
* (32 << 10);
135 end
= start
+ (32 << 10);
137 case MSR_MTRRdefType
:
143 /* variable range MTRRs. */
145 index
= (msr
- 0x200) / 2;
146 start
= (((u64
)mtrr_state
->var_ranges
[index
].base_hi
) << 32) +
147 (mtrr_state
->var_ranges
[index
].base_lo
& PAGE_MASK
);
148 mask
= (((u64
)mtrr_state
->var_ranges
[index
].mask_hi
) << 32) +
149 (mtrr_state
->var_ranges
[index
].mask_lo
& PAGE_MASK
);
150 mask
|= ~0ULL << cpuid_maxphyaddr(vcpu
);
152 end
= ((start
& mask
) | ~mask
) + 1;
155 if (is_fixed
&& !(mtrr_enabled
& 0x1))
158 kvm_zap_gfn_range(vcpu
->kvm
, gpa_to_gfn(start
), gpa_to_gfn(end
));
161 int kvm_mtrr_set_msr(struct kvm_vcpu
*vcpu
, u32 msr
, u64 data
)
163 u64
*p
= (u64
*)&vcpu
->arch
.mtrr_state
.fixed_ranges
;
165 if (!kvm_mtrr_valid(vcpu
, msr
, data
))
168 if (msr
== MSR_MTRRdefType
) {
169 vcpu
->arch
.mtrr_state
.def_type
= data
;
170 vcpu
->arch
.mtrr_state
.enabled
= (data
& 0xc00) >> 10;
171 } else if (msr
== MSR_MTRRfix64K_00000
)
173 else if (msr
== MSR_MTRRfix16K_80000
|| msr
== MSR_MTRRfix16K_A0000
)
174 p
[1 + msr
- MSR_MTRRfix16K_80000
] = data
;
175 else if (msr
>= MSR_MTRRfix4K_C0000
&& msr
<= MSR_MTRRfix4K_F8000
)
176 p
[3 + msr
- MSR_MTRRfix4K_C0000
] = data
;
177 else if (msr
== MSR_IA32_CR_PAT
)
178 vcpu
->arch
.pat
= data
;
179 else { /* Variable MTRRs */
180 int idx
, is_mtrr_mask
;
183 idx
= (msr
- 0x200) / 2;
184 is_mtrr_mask
= msr
- 0x200 - 2 * idx
;
187 (u64
*)&vcpu
->arch
.mtrr_state
.var_ranges
[idx
].base_lo
;
190 (u64
*)&vcpu
->arch
.mtrr_state
.var_ranges
[idx
].mask_lo
;
194 update_mtrr(vcpu
, msr
);
198 int kvm_mtrr_get_msr(struct kvm_vcpu
*vcpu
, u32 msr
, u64
*pdata
)
200 u64
*p
= (u64
*)&vcpu
->arch
.mtrr_state
.fixed_ranges
;
202 if (!msr_mtrr_valid(msr
))
205 if (msr
== MSR_MTRRdefType
)
206 *pdata
= vcpu
->arch
.mtrr_state
.def_type
+
207 (vcpu
->arch
.mtrr_state
.enabled
<< 10);
208 else if (msr
== MSR_MTRRfix64K_00000
)
210 else if (msr
== MSR_MTRRfix16K_80000
|| msr
== MSR_MTRRfix16K_A0000
)
211 *pdata
= p
[1 + msr
- MSR_MTRRfix16K_80000
];
212 else if (msr
>= MSR_MTRRfix4K_C0000
&& msr
<= MSR_MTRRfix4K_F8000
)
213 *pdata
= p
[3 + msr
- MSR_MTRRfix4K_C0000
];
214 else if (msr
== MSR_IA32_CR_PAT
)
215 *pdata
= vcpu
->arch
.pat
;
216 else { /* Variable MTRRs */
217 int idx
, is_mtrr_mask
;
220 idx
= (msr
- 0x200) / 2;
221 is_mtrr_mask
= msr
- 0x200 - 2 * idx
;
224 (u64
*)&vcpu
->arch
.mtrr_state
.var_ranges
[idx
].base_lo
;
227 (u64
*)&vcpu
->arch
.mtrr_state
.var_ranges
[idx
].mask_lo
;
235 * The function is based on mtrr_type_lookup() in
236 * arch/x86/kernel/cpu/mtrr/generic.c
238 static int get_mtrr_type(struct mtrr_state_type
*mtrr_state
,
242 u8 prev_match
, curr_match
;
243 int i
, num_var_ranges
= KVM_NR_VAR_MTRR
;
245 /* MTRR is completely disabled, use UC for all of physical memory. */
246 if (!(mtrr_state
->enabled
& 0x2))
247 return MTRR_TYPE_UNCACHABLE
;
249 /* Make end inclusive end, instead of exclusive */
252 /* Look in fixed ranges. Just return the type as per start */
253 if (mtrr_state
->have_fixed
&& (mtrr_state
->enabled
& 0x1) &&
254 (start
< 0x100000)) {
257 if (start
< 0x80000) {
259 idx
+= (start
>> 16);
260 return mtrr_state
->fixed_ranges
[idx
];
261 } else if (start
< 0xC0000) {
263 idx
+= ((start
- 0x80000) >> 14);
264 return mtrr_state
->fixed_ranges
[idx
];
265 } else if (start
< 0x1000000) {
267 idx
+= ((start
- 0xC0000) >> 12);
268 return mtrr_state
->fixed_ranges
[idx
];
273 * Look in variable ranges
274 * Look of multiple ranges matching this address and pick type
275 * as per MTRR precedence
278 for (i
= 0; i
< num_var_ranges
; ++i
) {
279 unsigned short start_state
, end_state
;
281 if (!(mtrr_state
->var_ranges
[i
].mask_lo
& (1 << 11)))
284 base
= (((u64
)mtrr_state
->var_ranges
[i
].base_hi
) << 32) +
285 (mtrr_state
->var_ranges
[i
].base_lo
& PAGE_MASK
);
286 mask
= (((u64
)mtrr_state
->var_ranges
[i
].mask_hi
) << 32) +
287 (mtrr_state
->var_ranges
[i
].mask_lo
& PAGE_MASK
);
289 start_state
= ((start
& mask
) == (base
& mask
));
290 end_state
= ((end
& mask
) == (base
& mask
));
291 if (start_state
!= end_state
)
294 if ((start
& mask
) != (base
& mask
))
297 curr_match
= mtrr_state
->var_ranges
[i
].base_lo
& 0xff;
298 if (prev_match
== 0xFF) {
299 prev_match
= curr_match
;
303 if (prev_match
== MTRR_TYPE_UNCACHABLE
||
304 curr_match
== MTRR_TYPE_UNCACHABLE
)
305 return MTRR_TYPE_UNCACHABLE
;
307 if ((prev_match
== MTRR_TYPE_WRBACK
&&
308 curr_match
== MTRR_TYPE_WRTHROUGH
) ||
309 (prev_match
== MTRR_TYPE_WRTHROUGH
&&
310 curr_match
== MTRR_TYPE_WRBACK
)) {
311 prev_match
= MTRR_TYPE_WRTHROUGH
;
312 curr_match
= MTRR_TYPE_WRTHROUGH
;
315 if (prev_match
!= curr_match
)
316 return MTRR_TYPE_UNCACHABLE
;
319 if (prev_match
!= 0xFF)
322 return mtrr_state
->def_type
;
325 u8
kvm_mtrr_get_guest_memory_type(struct kvm_vcpu
*vcpu
, gfn_t gfn
)
329 mtrr
= get_mtrr_type(&vcpu
->arch
.mtrr_state
, gfn
<< PAGE_SHIFT
,
330 (gfn
<< PAGE_SHIFT
) + PAGE_SIZE
);
331 if (mtrr
== 0xfe || mtrr
== 0xff)
332 mtrr
= MTRR_TYPE_WRBACK
;
335 EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type
);