KVM: MMU: Make gfn_to_page() always safe
[deliverable/linux.git] / drivers / kvm / paging_tmpl.h
1 /*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * MMU support
8 *
9 * Copyright (C) 2006 Qumranet, Inc.
10 *
11 * Authors:
12 * Yaniv Kamay <yaniv@qumranet.com>
13 * Avi Kivity <avi@qumranet.com>
14 *
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
17 *
18 */
19
20 /*
21 * We need the mmu code to access both 32-bit and 64-bit guest ptes,
22 * so the code in this file is compiled twice, once per pte size.
23 */
24
25 #if PTTYPE == 64
26 #define pt_element_t u64
27 #define guest_walker guest_walker64
28 #define FNAME(name) paging##64_##name
29 #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
30 #define PT_DIR_BASE_ADDR_MASK PT64_DIR_BASE_ADDR_MASK
31 #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
32 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
33 #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level)
34 #define PT_LEVEL_BITS PT64_LEVEL_BITS
35 #ifdef CONFIG_X86_64
36 #define PT_MAX_FULL_LEVELS 4
37 #else
38 #define PT_MAX_FULL_LEVELS 2
39 #endif
40 #elif PTTYPE == 32
41 #define pt_element_t u32
42 #define guest_walker guest_walker32
43 #define FNAME(name) paging##32_##name
44 #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
45 #define PT_DIR_BASE_ADDR_MASK PT32_DIR_BASE_ADDR_MASK
46 #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
47 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
48 #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level)
49 #define PT_LEVEL_BITS PT32_LEVEL_BITS
50 #define PT_MAX_FULL_LEVELS 2
51 #else
52 #error Invalid PTTYPE value
53 #endif
54
55 /*
56 * The guest_walker structure emulates the behavior of the hardware page
57 * table walker.
58 */
59 struct guest_walker {
60 int level;
61 gfn_t table_gfn[PT_MAX_FULL_LEVELS];
62 pt_element_t pte;
63 pt_element_t inherited_ar;
64 gfn_t gfn;
65 u32 error_code;
66 };
67
68 /*
69 * Fetch a guest pte for a guest virtual address
70 */
71 static int FNAME(walk_addr)(struct guest_walker *walker,
72 struct kvm_vcpu *vcpu, gva_t addr,
73 int write_fault, int user_fault, int fetch_fault)
74 {
75 struct page *page;
76 pt_element_t *table;
77 pt_element_t pte;
78 gfn_t table_gfn;
79 unsigned index;
80 gpa_t pte_gpa;
81
82 pgprintk("%s: addr %lx\n", __FUNCTION__, addr);
83 walker->level = vcpu->mmu.root_level;
84 pte = vcpu->cr3;
85 #if PTTYPE == 64
86 if (!is_long_mode(vcpu)) {
87 pte = vcpu->pdptrs[(addr >> 30) & 3];
88 if (!is_present_pte(pte))
89 goto not_present;
90 --walker->level;
91 }
92 #endif
93 ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
94 (vcpu->cr3 & CR3_NONPAE_RESERVED_BITS) == 0);
95
96 walker->inherited_ar = PT_USER_MASK | PT_WRITABLE_MASK;
97
98 for (;;) {
99 index = PT_INDEX(addr, walker->level);
100
101 table_gfn = (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
102 walker->table_gfn[walker->level - 1] = table_gfn;
103 pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__,
104 walker->level - 1, table_gfn);
105
106 page = gfn_to_page(vcpu->kvm, (pte & PT64_BASE_ADDR_MASK)
107 >> PAGE_SHIFT);
108
109 table = kmap_atomic(page, KM_USER0);
110 pte = table[index];
111 kunmap_atomic(table, KM_USER0);
112
113 if (!is_present_pte(pte))
114 goto not_present;
115
116 if (write_fault && !is_writeble_pte(pte))
117 if (user_fault || is_write_protection(vcpu))
118 goto access_error;
119
120 if (user_fault && !(pte & PT_USER_MASK))
121 goto access_error;
122
123 #if PTTYPE == 64
124 if (fetch_fault && is_nx(vcpu) && (pte & PT64_NX_MASK))
125 goto access_error;
126 #endif
127
128 if (!(pte & PT_ACCESSED_MASK)) {
129 mark_page_dirty(vcpu->kvm, table_gfn);
130 pte |= PT_ACCESSED_MASK;
131 table = kmap_atomic(page, KM_USER0);
132 table[index] = pte;
133 kunmap_atomic(table, KM_USER0);
134 }
135
136 if (walker->level == PT_PAGE_TABLE_LEVEL) {
137 walker->gfn = (pte & PT_BASE_ADDR_MASK) >> PAGE_SHIFT;
138 break;
139 }
140
141 if (walker->level == PT_DIRECTORY_LEVEL
142 && (pte & PT_PAGE_SIZE_MASK)
143 && (PTTYPE == 64 || is_pse(vcpu))) {
144 walker->gfn = (pte & PT_DIR_BASE_ADDR_MASK)
145 >> PAGE_SHIFT;
146 walker->gfn += PT_INDEX(addr, PT_PAGE_TABLE_LEVEL);
147 break;
148 }
149
150 walker->inherited_ar &= pte;
151 --walker->level;
152 }
153
154 if (write_fault && !is_dirty_pte(pte)) {
155 mark_page_dirty(vcpu->kvm, table_gfn);
156 pte |= PT_DIRTY_MASK;
157 table = kmap_atomic(page, KM_USER0);
158 table[index] = pte;
159 kunmap_atomic(table, KM_USER0);
160 pte_gpa = table_gfn << PAGE_SHIFT;
161 pte_gpa += index * sizeof(pt_element_t);
162 kvm_mmu_pte_write(vcpu, pte_gpa, (u8 *)&pte, sizeof(pte));
163 }
164
165 walker->pte = pte;
166 pgprintk("%s: pte %llx\n", __FUNCTION__, (u64)pte);
167 return 1;
168
169 not_present:
170 walker->error_code = 0;
171 goto err;
172
173 access_error:
174 walker->error_code = PFERR_PRESENT_MASK;
175
176 err:
177 if (write_fault)
178 walker->error_code |= PFERR_WRITE_MASK;
179 if (user_fault)
180 walker->error_code |= PFERR_USER_MASK;
181 if (fetch_fault)
182 walker->error_code |= PFERR_FETCH_MASK;
183 return 0;
184 }
185
186 static void FNAME(set_pte_common)(struct kvm_vcpu *vcpu,
187 u64 *shadow_pte,
188 gpa_t gaddr,
189 pt_element_t gpte,
190 u64 access_bits,
191 int user_fault,
192 int write_fault,
193 int *ptwrite,
194 struct guest_walker *walker,
195 gfn_t gfn)
196 {
197 hpa_t paddr;
198 int dirty = gpte & PT_DIRTY_MASK;
199 u64 spte;
200 int was_rmapped = is_rmap_pte(*shadow_pte);
201
202 pgprintk("%s: spte %llx gpte %llx access %llx write_fault %d"
203 " user_fault %d gfn %lx\n",
204 __FUNCTION__, *shadow_pte, (u64)gpte, access_bits,
205 write_fault, user_fault, gfn);
206
207 /*
208 * We don't set the accessed bit, since we sometimes want to see
209 * whether the guest actually used the pte (in order to detect
210 * demand paging).
211 */
212 spte = PT_PRESENT_MASK | PT_DIRTY_MASK;
213 spte |= gpte & PT64_NX_MASK;
214 if (!dirty)
215 access_bits &= ~PT_WRITABLE_MASK;
216
217 paddr = gpa_to_hpa(vcpu->kvm, gaddr & PT64_BASE_ADDR_MASK);
218
219 spte |= PT_PRESENT_MASK;
220 if (access_bits & PT_USER_MASK)
221 spte |= PT_USER_MASK;
222
223 if (is_error_hpa(paddr)) {
224 set_shadow_pte(shadow_pte,
225 shadow_trap_nonpresent_pte | PT_SHADOW_IO_MARK);
226 return;
227 }
228
229 spte |= paddr;
230
231 if ((access_bits & PT_WRITABLE_MASK)
232 || (write_fault && !is_write_protection(vcpu) && !user_fault)) {
233 struct kvm_mmu_page *shadow;
234
235 spte |= PT_WRITABLE_MASK;
236 if (user_fault) {
237 mmu_unshadow(vcpu->kvm, gfn);
238 goto unshadowed;
239 }
240
241 shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn);
242 if (shadow) {
243 pgprintk("%s: found shadow page for %lx, marking ro\n",
244 __FUNCTION__, gfn);
245 access_bits &= ~PT_WRITABLE_MASK;
246 if (is_writeble_pte(spte)) {
247 spte &= ~PT_WRITABLE_MASK;
248 kvm_x86_ops->tlb_flush(vcpu);
249 }
250 if (write_fault)
251 *ptwrite = 1;
252 }
253 }
254
255 unshadowed:
256
257 if (access_bits & PT_WRITABLE_MASK)
258 mark_page_dirty(vcpu->kvm, gaddr >> PAGE_SHIFT);
259
260 pgprintk("%s: setting spte %llx\n", __FUNCTION__, spte);
261 set_shadow_pte(shadow_pte, spte);
262 page_header_update_slot(vcpu->kvm, shadow_pte, gaddr);
263 if (!was_rmapped)
264 rmap_add(vcpu, shadow_pte, (gaddr & PT64_BASE_ADDR_MASK)
265 >> PAGE_SHIFT);
266 if (!ptwrite || !*ptwrite)
267 vcpu->last_pte_updated = shadow_pte;
268 }
269
270 static void FNAME(set_pte)(struct kvm_vcpu *vcpu, pt_element_t gpte,
271 u64 *shadow_pte, u64 access_bits,
272 int user_fault, int write_fault, int *ptwrite,
273 struct guest_walker *walker, gfn_t gfn)
274 {
275 access_bits &= gpte;
276 FNAME(set_pte_common)(vcpu, shadow_pte, gpte & PT_BASE_ADDR_MASK,
277 gpte, access_bits, user_fault, write_fault,
278 ptwrite, walker, gfn);
279 }
280
281 static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
282 u64 *spte, const void *pte, int bytes,
283 int offset_in_pte)
284 {
285 pt_element_t gpte;
286
287 gpte = *(const pt_element_t *)pte;
288 if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) {
289 if (!offset_in_pte && !is_present_pte(gpte))
290 set_shadow_pte(spte, shadow_notrap_nonpresent_pte);
291 return;
292 }
293 if (bytes < sizeof(pt_element_t))
294 return;
295 pgprintk("%s: gpte %llx spte %p\n", __FUNCTION__, (u64)gpte, spte);
296 FNAME(set_pte)(vcpu, gpte, spte, PT_USER_MASK | PT_WRITABLE_MASK, 0,
297 0, NULL, NULL,
298 (gpte & PT_BASE_ADDR_MASK) >> PAGE_SHIFT);
299 }
300
301 static void FNAME(set_pde)(struct kvm_vcpu *vcpu, pt_element_t gpde,
302 u64 *shadow_pte, u64 access_bits,
303 int user_fault, int write_fault, int *ptwrite,
304 struct guest_walker *walker, gfn_t gfn)
305 {
306 gpa_t gaddr;
307
308 access_bits &= gpde;
309 gaddr = (gpa_t)gfn << PAGE_SHIFT;
310 if (PTTYPE == 32 && is_cpuid_PSE36())
311 gaddr |= (gpde & PT32_DIR_PSE36_MASK) <<
312 (32 - PT32_DIR_PSE36_SHIFT);
313 FNAME(set_pte_common)(vcpu, shadow_pte, gaddr,
314 gpde, access_bits, user_fault, write_fault,
315 ptwrite, walker, gfn);
316 }
317
318 /*
319 * Fetch a shadow pte for a specific level in the paging hierarchy.
320 */
321 static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
322 struct guest_walker *walker,
323 int user_fault, int write_fault, int *ptwrite)
324 {
325 hpa_t shadow_addr;
326 int level;
327 u64 *shadow_ent;
328 u64 *prev_shadow_ent = NULL;
329
330 if (!is_present_pte(walker->pte))
331 return NULL;
332
333 shadow_addr = vcpu->mmu.root_hpa;
334 level = vcpu->mmu.shadow_root_level;
335 if (level == PT32E_ROOT_LEVEL) {
336 shadow_addr = vcpu->mmu.pae_root[(addr >> 30) & 3];
337 shadow_addr &= PT64_BASE_ADDR_MASK;
338 --level;
339 }
340
341 for (; ; level--) {
342 u32 index = SHADOW_PT_INDEX(addr, level);
343 struct kvm_mmu_page *shadow_page;
344 u64 shadow_pte;
345 int metaphysical;
346 gfn_t table_gfn;
347 unsigned hugepage_access = 0;
348
349 shadow_ent = ((u64 *)__va(shadow_addr)) + index;
350 if (is_shadow_present_pte(*shadow_ent)) {
351 if (level == PT_PAGE_TABLE_LEVEL)
352 break;
353 shadow_addr = *shadow_ent & PT64_BASE_ADDR_MASK;
354 prev_shadow_ent = shadow_ent;
355 continue;
356 }
357
358 if (level == PT_PAGE_TABLE_LEVEL)
359 break;
360
361 if (level - 1 == PT_PAGE_TABLE_LEVEL
362 && walker->level == PT_DIRECTORY_LEVEL) {
363 metaphysical = 1;
364 hugepage_access = walker->pte;
365 hugepage_access &= PT_USER_MASK | PT_WRITABLE_MASK;
366 if (!is_dirty_pte(walker->pte))
367 hugepage_access &= ~PT_WRITABLE_MASK;
368 hugepage_access >>= PT_WRITABLE_SHIFT;
369 if (walker->pte & PT64_NX_MASK)
370 hugepage_access |= (1 << 2);
371 table_gfn = (walker->pte & PT_BASE_ADDR_MASK)
372 >> PAGE_SHIFT;
373 } else {
374 metaphysical = 0;
375 table_gfn = walker->table_gfn[level - 2];
376 }
377 shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
378 metaphysical, hugepage_access,
379 shadow_ent);
380 shadow_addr = __pa(shadow_page->spt);
381 shadow_pte = shadow_addr | PT_PRESENT_MASK | PT_ACCESSED_MASK
382 | PT_WRITABLE_MASK | PT_USER_MASK;
383 *shadow_ent = shadow_pte;
384 prev_shadow_ent = shadow_ent;
385 }
386
387 if (walker->level == PT_DIRECTORY_LEVEL) {
388 FNAME(set_pde)(vcpu, walker->pte, shadow_ent,
389 walker->inherited_ar, user_fault, write_fault,
390 ptwrite, walker, walker->gfn);
391 } else {
392 ASSERT(walker->level == PT_PAGE_TABLE_LEVEL);
393 FNAME(set_pte)(vcpu, walker->pte, shadow_ent,
394 walker->inherited_ar, user_fault, write_fault,
395 ptwrite, walker, walker->gfn);
396 }
397 return shadow_ent;
398 }
399
400 /*
401 * Page fault handler. There are several causes for a page fault:
402 * - there is no shadow pte for the guest pte
403 * - write access through a shadow pte marked read only so that we can set
404 * the dirty bit
405 * - write access to a shadow pte marked read only so we can update the page
406 * dirty bitmap, when userspace requests it
407 * - mmio access; in this case we will never install a present shadow pte
408 * - normal guest page fault due to the guest pte marked not present, not
409 * writable, or not executable
410 *
411 * Returns: 1 if we need to emulate the instruction, 0 otherwise, or
412 * a negative value on error.
413 */
414 static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
415 u32 error_code)
416 {
417 int write_fault = error_code & PFERR_WRITE_MASK;
418 int user_fault = error_code & PFERR_USER_MASK;
419 int fetch_fault = error_code & PFERR_FETCH_MASK;
420 struct guest_walker walker;
421 u64 *shadow_pte;
422 int write_pt = 0;
423 int r;
424
425 pgprintk("%s: addr %lx err %x\n", __FUNCTION__, addr, error_code);
426 kvm_mmu_audit(vcpu, "pre page fault");
427
428 r = mmu_topup_memory_caches(vcpu);
429 if (r)
430 return r;
431
432 /*
433 * Look up the shadow pte for the faulting address.
434 */
435 r = FNAME(walk_addr)(&walker, vcpu, addr, write_fault, user_fault,
436 fetch_fault);
437
438 /*
439 * The page is not mapped by the guest. Let the guest handle it.
440 */
441 if (!r) {
442 pgprintk("%s: guest page fault\n", __FUNCTION__);
443 inject_page_fault(vcpu, addr, walker.error_code);
444 vcpu->last_pt_write_count = 0; /* reset fork detector */
445 return 0;
446 }
447
448 shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
449 &write_pt);
450 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __FUNCTION__,
451 shadow_pte, *shadow_pte, write_pt);
452
453 if (!write_pt)
454 vcpu->last_pt_write_count = 0; /* reset fork detector */
455
456 /*
457 * mmio: emulate if accessible, otherwise its a guest fault.
458 */
459 if (is_io_pte(*shadow_pte))
460 return 1;
461
462 ++vcpu->stat.pf_fixed;
463 kvm_mmu_audit(vcpu, "post page fault (fixed)");
464
465 return write_pt;
466 }
467
468 static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
469 {
470 struct guest_walker walker;
471 gpa_t gpa = UNMAPPED_GVA;
472 int r;
473
474 r = FNAME(walk_addr)(&walker, vcpu, vaddr, 0, 0, 0);
475
476 if (r) {
477 gpa = (gpa_t)walker.gfn << PAGE_SHIFT;
478 gpa |= vaddr & ~PAGE_MASK;
479 }
480
481 return gpa;
482 }
483
484 static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
485 struct kvm_mmu_page *sp)
486 {
487 int i;
488 pt_element_t *gpt;
489
490 if (sp->role.metaphysical || PTTYPE == 32) {
491 nonpaging_prefetch_page(vcpu, sp);
492 return;
493 }
494
495 gpt = kmap_atomic(gfn_to_page(vcpu->kvm, sp->gfn), KM_USER0);
496 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
497 if (is_present_pte(gpt[i]))
498 sp->spt[i] = shadow_trap_nonpresent_pte;
499 else
500 sp->spt[i] = shadow_notrap_nonpresent_pte;
501 kunmap_atomic(gpt, KM_USER0);
502 }
503
504 #undef pt_element_t
505 #undef guest_walker
506 #undef FNAME
507 #undef PT_BASE_ADDR_MASK
508 #undef PT_INDEX
509 #undef SHADOW_PT_INDEX
510 #undef PT_LEVEL_MASK
511 #undef PT_DIR_BASE_ADDR_MASK
512 #undef PT_LEVEL_BITS
513 #undef PT_MAX_FULL_LEVELS
This page took 0.04423 seconds and 6 git commands to generate.