Commit | Line | Data |
---|---|---|
6aa8b732 AK |
1 | /* |
2 | * Kernel-based Virtual Machine driver for Linux | |
3 | * | |
4 | * This module enables machines with Intel VT-x extensions to run virtual | |
5 | * machines without emulation or binary translation. | |
6 | * | |
7 | * MMU support | |
8 | * | |
9 | * Copyright (C) 2006 Qumranet, Inc. | |
10 | * | |
11 | * Authors: | |
12 | * Yaniv Kamay <yaniv@qumranet.com> | |
13 | * Avi Kivity <avi@qumranet.com> | |
14 | * | |
15 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
16 | * the COPYING file in the top-level directory. | |
17 | * | |
18 | */ | |
19 | ||
20 | /* | |
21 | * We need the mmu code to access both 32-bit and 64-bit guest ptes, | |
22 | * so the code in this file is compiled twice, once per pte size. | |
23 | */ | |
24 | ||
25 | #if PTTYPE == 64 | |
26 | #define pt_element_t u64 | |
27 | #define guest_walker guest_walker64 | |
28 | #define FNAME(name) paging##64_##name | |
29 | #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK | |
30 | #define PT_DIR_BASE_ADDR_MASK PT64_DIR_BASE_ADDR_MASK | |
31 | #define PT_INDEX(addr, level) PT64_INDEX(addr, level) | |
32 | #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level) | |
33 | #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level) | |
34 | #define PT_PTE_COPY_MASK PT64_PTE_COPY_MASK | |
6aa8b732 AK |
35 | #elif PTTYPE == 32 |
36 | #define pt_element_t u32 | |
37 | #define guest_walker guest_walker32 | |
38 | #define FNAME(name) paging##32_##name | |
39 | #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK | |
40 | #define PT_DIR_BASE_ADDR_MASK PT32_DIR_BASE_ADDR_MASK | |
41 | #define PT_INDEX(addr, level) PT32_INDEX(addr, level) | |
42 | #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level) | |
43 | #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level) | |
44 | #define PT_PTE_COPY_MASK PT32_PTE_COPY_MASK | |
6aa8b732 AK |
45 | #else |
46 | #error Invalid PTTYPE value | |
47 | #endif | |
48 | ||
49 | /* | |
50 | * The guest_walker structure emulates the behavior of the hardware page | |
51 | * table walker. | |
52 | */ | |
53 | struct guest_walker { | |
54 | int level; | |
55 | pt_element_t *table; | |
56 | pt_element_t inherited_ar; | |
57 | }; | |
58 | ||
59 | static void FNAME(init_walker)(struct guest_walker *walker, | |
60 | struct kvm_vcpu *vcpu) | |
61 | { | |
62 | hpa_t hpa; | |
63 | struct kvm_memory_slot *slot; | |
64 | ||
65 | walker->level = vcpu->mmu.root_level; | |
66 | slot = gfn_to_memslot(vcpu->kvm, | |
67 | (vcpu->cr3 & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT); | |
68 | hpa = safe_gpa_to_hpa(vcpu, vcpu->cr3 & PT64_BASE_ADDR_MASK); | |
69 | walker->table = kmap_atomic(pfn_to_page(hpa >> PAGE_SHIFT), KM_USER0); | |
70 | ||
a9058ecd | 71 | ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) || |
6aa8b732 AK |
72 | (vcpu->cr3 & ~(PAGE_MASK | CR3_FLAGS_MASK)) == 0); |
73 | ||
74 | walker->table = (pt_element_t *)( (unsigned long)walker->table | | |
75 | (unsigned long)(vcpu->cr3 & ~(PAGE_MASK | CR3_FLAGS_MASK)) ); | |
76 | walker->inherited_ar = PT_USER_MASK | PT_WRITABLE_MASK; | |
77 | } | |
78 | ||
79 | static void FNAME(release_walker)(struct guest_walker *walker) | |
80 | { | |
81 | kunmap_atomic(walker->table, KM_USER0); | |
82 | } | |
83 | ||
84 | static void FNAME(set_pte)(struct kvm_vcpu *vcpu, u64 guest_pte, | |
85 | u64 *shadow_pte, u64 access_bits) | |
86 | { | |
87 | ASSERT(*shadow_pte == 0); | |
88 | access_bits &= guest_pte; | |
89 | *shadow_pte = (guest_pte & PT_PTE_COPY_MASK); | |
90 | set_pte_common(vcpu, shadow_pte, guest_pte & PT_BASE_ADDR_MASK, | |
91 | guest_pte & PT_DIRTY_MASK, access_bits); | |
92 | } | |
93 | ||
94 | static void FNAME(set_pde)(struct kvm_vcpu *vcpu, u64 guest_pde, | |
95 | u64 *shadow_pte, u64 access_bits, | |
96 | int index) | |
97 | { | |
98 | gpa_t gaddr; | |
99 | ||
100 | ASSERT(*shadow_pte == 0); | |
101 | access_bits &= guest_pde; | |
102 | gaddr = (guest_pde & PT_DIR_BASE_ADDR_MASK) + PAGE_SIZE * index; | |
103 | if (PTTYPE == 32 && is_cpuid_PSE36()) | |
104 | gaddr |= (guest_pde & PT32_DIR_PSE36_MASK) << | |
105 | (32 - PT32_DIR_PSE36_SHIFT); | |
8c7bb723 | 106 | *shadow_pte = guest_pde & PT_PTE_COPY_MASK; |
6aa8b732 AK |
107 | set_pte_common(vcpu, shadow_pte, gaddr, |
108 | guest_pde & PT_DIRTY_MASK, access_bits); | |
109 | } | |
110 | ||
111 | /* | |
112 | * Fetch a guest pte from a specific level in the paging hierarchy. | |
113 | */ | |
114 | static pt_element_t *FNAME(fetch_guest)(struct kvm_vcpu *vcpu, | |
115 | struct guest_walker *walker, | |
116 | int level, | |
117 | gva_t addr) | |
118 | { | |
119 | ||
120 | ASSERT(level > 0 && level <= walker->level); | |
121 | ||
122 | for (;;) { | |
123 | int index = PT_INDEX(addr, walker->level); | |
124 | hpa_t paddr; | |
125 | ||
126 | ASSERT(((unsigned long)walker->table & PAGE_MASK) == | |
127 | ((unsigned long)&walker->table[index] & PAGE_MASK)); | |
128 | if (level == walker->level || | |
129 | !is_present_pte(walker->table[index]) || | |
130 | (walker->level == PT_DIRECTORY_LEVEL && | |
131 | (walker->table[index] & PT_PAGE_SIZE_MASK) && | |
132 | (PTTYPE == 64 || is_pse(vcpu)))) | |
133 | return &walker->table[index]; | |
a9058ecd | 134 | if (walker->level != 3 || is_long_mode(vcpu)) |
6aa8b732 AK |
135 | walker->inherited_ar &= walker->table[index]; |
136 | paddr = safe_gpa_to_hpa(vcpu, walker->table[index] & PT_BASE_ADDR_MASK); | |
137 | kunmap_atomic(walker->table, KM_USER0); | |
138 | walker->table = kmap_atomic(pfn_to_page(paddr >> PAGE_SHIFT), | |
139 | KM_USER0); | |
140 | --walker->level; | |
141 | } | |
142 | } | |
143 | ||
144 | /* | |
145 | * Fetch a shadow pte for a specific level in the paging hierarchy. | |
146 | */ | |
147 | static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, | |
148 | struct guest_walker *walker) | |
149 | { | |
150 | hpa_t shadow_addr; | |
151 | int level; | |
152 | u64 *prev_shadow_ent = NULL; | |
153 | ||
154 | shadow_addr = vcpu->mmu.root_hpa; | |
155 | level = vcpu->mmu.shadow_root_level; | |
156 | ||
157 | for (; ; level--) { | |
158 | u32 index = SHADOW_PT_INDEX(addr, level); | |
159 | u64 *shadow_ent = ((u64 *)__va(shadow_addr)) + index; | |
160 | pt_element_t *guest_ent; | |
8c7bb723 | 161 | u64 shadow_pte; |
6aa8b732 AK |
162 | |
163 | if (is_present_pte(*shadow_ent) || is_io_pte(*shadow_ent)) { | |
164 | if (level == PT_PAGE_TABLE_LEVEL) | |
165 | return shadow_ent; | |
166 | shadow_addr = *shadow_ent & PT64_BASE_ADDR_MASK; | |
167 | prev_shadow_ent = shadow_ent; | |
168 | continue; | |
169 | } | |
170 | ||
171 | if (PTTYPE == 32 && level > PT32_ROOT_LEVEL) { | |
172 | ASSERT(level == PT32E_ROOT_LEVEL); | |
173 | guest_ent = FNAME(fetch_guest)(vcpu, walker, | |
174 | PT32_ROOT_LEVEL, addr); | |
175 | } else | |
176 | guest_ent = FNAME(fetch_guest)(vcpu, walker, | |
177 | level, addr); | |
178 | ||
179 | if (!is_present_pte(*guest_ent)) | |
180 | return NULL; | |
181 | ||
182 | /* Don't set accessed bit on PAE PDPTRs */ | |
183 | if (vcpu->mmu.root_level != 3 || walker->level != 3) | |
184 | *guest_ent |= PT_ACCESSED_MASK; | |
185 | ||
186 | if (level == PT_PAGE_TABLE_LEVEL) { | |
187 | ||
188 | if (walker->level == PT_DIRECTORY_LEVEL) { | |
189 | if (prev_shadow_ent) | |
190 | *prev_shadow_ent |= PT_SHADOW_PS_MARK; | |
191 | FNAME(set_pde)(vcpu, *guest_ent, shadow_ent, | |
192 | walker->inherited_ar, | |
193 | PT_INDEX(addr, PT_PAGE_TABLE_LEVEL)); | |
194 | } else { | |
195 | ASSERT(walker->level == PT_PAGE_TABLE_LEVEL); | |
196 | FNAME(set_pte)(vcpu, *guest_ent, shadow_ent, walker->inherited_ar); | |
197 | } | |
198 | return shadow_ent; | |
199 | } | |
200 | ||
201 | shadow_addr = kvm_mmu_alloc_page(vcpu, shadow_ent); | |
202 | if (!VALID_PAGE(shadow_addr)) | |
203 | return ERR_PTR(-ENOMEM); | |
8c7bb723 AK |
204 | shadow_pte = shadow_addr | PT_PRESENT_MASK; |
205 | if (vcpu->mmu.root_level > 3 || level != 3) | |
206 | shadow_pte |= PT_ACCESSED_MASK | |
207 | | PT_WRITABLE_MASK | PT_USER_MASK; | |
208 | *shadow_ent = shadow_pte; | |
6aa8b732 AK |
209 | prev_shadow_ent = shadow_ent; |
210 | } | |
211 | } | |
212 | ||
213 | /* | |
214 | * The guest faulted for write. We need to | |
215 | * | |
216 | * - check write permissions | |
217 | * - update the guest pte dirty bit | |
218 | * - update our own dirty page tracking structures | |
219 | */ | |
220 | static int FNAME(fix_write_pf)(struct kvm_vcpu *vcpu, | |
221 | u64 *shadow_ent, | |
222 | struct guest_walker *walker, | |
223 | gva_t addr, | |
224 | int user) | |
225 | { | |
226 | pt_element_t *guest_ent; | |
227 | int writable_shadow; | |
228 | gfn_t gfn; | |
229 | ||
230 | if (is_writeble_pte(*shadow_ent)) | |
231 | return 0; | |
232 | ||
233 | writable_shadow = *shadow_ent & PT_SHADOW_WRITABLE_MASK; | |
234 | if (user) { | |
235 | /* | |
236 | * User mode access. Fail if it's a kernel page or a read-only | |
237 | * page. | |
238 | */ | |
239 | if (!(*shadow_ent & PT_SHADOW_USER_MASK) || !writable_shadow) | |
240 | return 0; | |
241 | ASSERT(*shadow_ent & PT_USER_MASK); | |
242 | } else | |
243 | /* | |
244 | * Kernel mode access. Fail if it's a read-only page and | |
245 | * supervisor write protection is enabled. | |
246 | */ | |
247 | if (!writable_shadow) { | |
248 | if (is_write_protection(vcpu)) | |
249 | return 0; | |
250 | *shadow_ent &= ~PT_USER_MASK; | |
251 | } | |
252 | ||
253 | guest_ent = FNAME(fetch_guest)(vcpu, walker, PT_PAGE_TABLE_LEVEL, addr); | |
254 | ||
255 | if (!is_present_pte(*guest_ent)) { | |
256 | *shadow_ent = 0; | |
257 | return 0; | |
258 | } | |
259 | ||
260 | gfn = (*guest_ent & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT; | |
261 | mark_page_dirty(vcpu->kvm, gfn); | |
262 | *shadow_ent |= PT_WRITABLE_MASK; | |
263 | *guest_ent |= PT_DIRTY_MASK; | |
264 | ||
265 | return 1; | |
266 | } | |
267 | ||
268 | /* | |
269 | * Page fault handler. There are several causes for a page fault: | |
270 | * - there is no shadow pte for the guest pte | |
271 | * - write access through a shadow pte marked read only so that we can set | |
272 | * the dirty bit | |
273 | * - write access to a shadow pte marked read only so we can update the page | |
274 | * dirty bitmap, when userspace requests it | |
275 | * - mmio access; in this case we will never install a present shadow pte | |
276 | * - normal guest page fault due to the guest pte marked not present, not | |
277 | * writable, or not executable | |
278 | * | |
279 | * Returns: 1 if we need to emulate the instruction, 0 otherwise | |
280 | */ | |
281 | static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, | |
282 | u32 error_code) | |
283 | { | |
284 | int write_fault = error_code & PFERR_WRITE_MASK; | |
285 | int pte_present = error_code & PFERR_PRESENT_MASK; | |
286 | int user_fault = error_code & PFERR_USER_MASK; | |
287 | struct guest_walker walker; | |
288 | u64 *shadow_pte; | |
289 | int fixed; | |
290 | ||
291 | /* | |
292 | * Look up the shadow pte for the faulting address. | |
293 | */ | |
294 | for (;;) { | |
295 | FNAME(init_walker)(&walker, vcpu); | |
296 | shadow_pte = FNAME(fetch)(vcpu, addr, &walker); | |
297 | if (IS_ERR(shadow_pte)) { /* must be -ENOMEM */ | |
298 | nonpaging_flush(vcpu); | |
299 | FNAME(release_walker)(&walker); | |
300 | continue; | |
301 | } | |
302 | break; | |
303 | } | |
304 | ||
305 | /* | |
306 | * The page is not mapped by the guest. Let the guest handle it. | |
307 | */ | |
308 | if (!shadow_pte) { | |
309 | inject_page_fault(vcpu, addr, error_code); | |
310 | FNAME(release_walker)(&walker); | |
311 | return 0; | |
312 | } | |
313 | ||
314 | /* | |
315 | * Update the shadow pte. | |
316 | */ | |
317 | if (write_fault) | |
318 | fixed = FNAME(fix_write_pf)(vcpu, shadow_pte, &walker, addr, | |
319 | user_fault); | |
320 | else | |
321 | fixed = fix_read_pf(shadow_pte); | |
322 | ||
323 | FNAME(release_walker)(&walker); | |
324 | ||
325 | /* | |
326 | * mmio: emulate if accessible, otherwise its a guest fault. | |
327 | */ | |
328 | if (is_io_pte(*shadow_pte)) { | |
329 | if (may_access(*shadow_pte, write_fault, user_fault)) | |
330 | return 1; | |
331 | pgprintk("%s: io work, no access\n", __FUNCTION__); | |
332 | inject_page_fault(vcpu, addr, | |
333 | error_code | PFERR_PRESENT_MASK); | |
334 | return 0; | |
335 | } | |
336 | ||
337 | /* | |
338 | * pte not present, guest page fault. | |
339 | */ | |
340 | if (pte_present && !fixed) { | |
341 | inject_page_fault(vcpu, addr, error_code); | |
342 | return 0; | |
343 | } | |
344 | ||
345 | ++kvm_stat.pf_fixed; | |
346 | ||
347 | return 0; | |
348 | } | |
349 | ||
350 | static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr) | |
351 | { | |
352 | struct guest_walker walker; | |
353 | pt_element_t guest_pte; | |
354 | gpa_t gpa; | |
355 | ||
356 | FNAME(init_walker)(&walker, vcpu); | |
357 | guest_pte = *FNAME(fetch_guest)(vcpu, &walker, PT_PAGE_TABLE_LEVEL, | |
358 | vaddr); | |
359 | FNAME(release_walker)(&walker); | |
360 | ||
361 | if (!is_present_pte(guest_pte)) | |
362 | return UNMAPPED_GVA; | |
363 | ||
364 | if (walker.level == PT_DIRECTORY_LEVEL) { | |
365 | ASSERT((guest_pte & PT_PAGE_SIZE_MASK)); | |
366 | ASSERT(PTTYPE == 64 || is_pse(vcpu)); | |
367 | ||
368 | gpa = (guest_pte & PT_DIR_BASE_ADDR_MASK) | (vaddr & | |
369 | (PT_LEVEL_MASK(PT_PAGE_TABLE_LEVEL) | ~PAGE_MASK)); | |
370 | ||
371 | if (PTTYPE == 32 && is_cpuid_PSE36()) | |
372 | gpa |= (guest_pte & PT32_DIR_PSE36_MASK) << | |
373 | (32 - PT32_DIR_PSE36_SHIFT); | |
374 | } else { | |
375 | gpa = (guest_pte & PT_BASE_ADDR_MASK); | |
376 | gpa |= (vaddr & ~PAGE_MASK); | |
377 | } | |
378 | ||
379 | return gpa; | |
380 | } | |
381 | ||
382 | #undef pt_element_t | |
383 | #undef guest_walker | |
384 | #undef FNAME | |
385 | #undef PT_BASE_ADDR_MASK | |
386 | #undef PT_INDEX | |
387 | #undef SHADOW_PT_INDEX | |
388 | #undef PT_LEVEL_MASK | |
389 | #undef PT_PTE_COPY_MASK | |
390 | #undef PT_NON_PTE_COPY_MASK | |
391 | #undef PT_DIR_BASE_ADDR_MASK |