KVM: MMU: Fold fix_write_pf() into set_pte_common()
[deliverable/linux.git] / drivers / kvm / paging_tmpl.h
1 /*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * MMU support
8 *
9 * Copyright (C) 2006 Qumranet, Inc.
10 *
11 * Authors:
12 * Yaniv Kamay <yaniv@qumranet.com>
13 * Avi Kivity <avi@qumranet.com>
14 *
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
17 *
18 */
19
20 /*
21 * We need the mmu code to access both 32-bit and 64-bit guest ptes,
22 * so the code in this file is compiled twice, once per pte size.
23 */
24
25 #if PTTYPE == 64
26 #define pt_element_t u64
27 #define guest_walker guest_walker64
28 #define FNAME(name) paging##64_##name
29 #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
30 #define PT_DIR_BASE_ADDR_MASK PT64_DIR_BASE_ADDR_MASK
31 #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
32 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
33 #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level)
34 #define PT_PTE_COPY_MASK PT64_PTE_COPY_MASK
35 #ifdef CONFIG_X86_64
36 #define PT_MAX_FULL_LEVELS 4
37 #else
38 #define PT_MAX_FULL_LEVELS 2
39 #endif
40 #elif PTTYPE == 32
41 #define pt_element_t u32
42 #define guest_walker guest_walker32
43 #define FNAME(name) paging##32_##name
44 #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
45 #define PT_DIR_BASE_ADDR_MASK PT32_DIR_BASE_ADDR_MASK
46 #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
47 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
48 #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level)
49 #define PT_PTE_COPY_MASK PT32_PTE_COPY_MASK
50 #define PT_MAX_FULL_LEVELS 2
51 #else
52 #error Invalid PTTYPE value
53 #endif
54
55 /*
56 * The guest_walker structure emulates the behavior of the hardware page
57 * table walker.
58 */
59 struct guest_walker {
60 int level;
61 gfn_t table_gfn[PT_MAX_FULL_LEVELS];
62 pt_element_t *table;
63 pt_element_t *ptep;
64 pt_element_t inherited_ar;
65 gfn_t gfn;
66 u32 error_code;
67 };
68
69 /*
70 * Fetch a guest pte for a guest virtual address
71 */
72 static int FNAME(walk_addr)(struct guest_walker *walker,
73 struct kvm_vcpu *vcpu, gva_t addr,
74 int write_fault, int user_fault, int fetch_fault)
75 {
76 hpa_t hpa;
77 struct kvm_memory_slot *slot;
78 pt_element_t *ptep;
79 pt_element_t root;
80 gfn_t table_gfn;
81
82 pgprintk("%s: addr %lx\n", __FUNCTION__, addr);
83 walker->level = vcpu->mmu.root_level;
84 walker->table = NULL;
85 root = vcpu->cr3;
86 #if PTTYPE == 64
87 if (!is_long_mode(vcpu)) {
88 walker->ptep = &vcpu->pdptrs[(addr >> 30) & 3];
89 root = *walker->ptep;
90 if (!(root & PT_PRESENT_MASK))
91 goto not_present;
92 --walker->level;
93 }
94 #endif
95 table_gfn = (root & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
96 walker->table_gfn[walker->level - 1] = table_gfn;
97 pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__,
98 walker->level - 1, table_gfn);
99 slot = gfn_to_memslot(vcpu->kvm, table_gfn);
100 hpa = safe_gpa_to_hpa(vcpu, root & PT64_BASE_ADDR_MASK);
101 walker->table = kmap_atomic(pfn_to_page(hpa >> PAGE_SHIFT), KM_USER0);
102
103 ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
104 (vcpu->cr3 & ~(PAGE_MASK | CR3_FLAGS_MASK)) == 0);
105
106 walker->inherited_ar = PT_USER_MASK | PT_WRITABLE_MASK;
107
108 for (;;) {
109 int index = PT_INDEX(addr, walker->level);
110 hpa_t paddr;
111
112 ptep = &walker->table[index];
113 ASSERT(((unsigned long)walker->table & PAGE_MASK) ==
114 ((unsigned long)ptep & PAGE_MASK));
115
116 if (!is_present_pte(*ptep))
117 goto not_present;
118
119 if (write_fault && !is_writeble_pte(*ptep))
120 if (user_fault || is_write_protection(vcpu))
121 goto access_error;
122
123 if (user_fault && !(*ptep & PT_USER_MASK))
124 goto access_error;
125
126 #if PTTYPE == 64
127 if (fetch_fault && is_nx(vcpu) && (*ptep & PT64_NX_MASK))
128 goto access_error;
129 #endif
130
131 if (!(*ptep & PT_ACCESSED_MASK)) {
132 mark_page_dirty(vcpu->kvm, table_gfn);
133 *ptep |= PT_ACCESSED_MASK;
134 }
135
136 if (walker->level == PT_PAGE_TABLE_LEVEL) {
137 walker->gfn = (*ptep & PT_BASE_ADDR_MASK)
138 >> PAGE_SHIFT;
139 break;
140 }
141
142 if (walker->level == PT_DIRECTORY_LEVEL
143 && (*ptep & PT_PAGE_SIZE_MASK)
144 && (PTTYPE == 64 || is_pse(vcpu))) {
145 walker->gfn = (*ptep & PT_DIR_BASE_ADDR_MASK)
146 >> PAGE_SHIFT;
147 walker->gfn += PT_INDEX(addr, PT_PAGE_TABLE_LEVEL);
148 break;
149 }
150
151 walker->inherited_ar &= walker->table[index];
152 table_gfn = (*ptep & PT_BASE_ADDR_MASK) >> PAGE_SHIFT;
153 paddr = safe_gpa_to_hpa(vcpu, *ptep & PT_BASE_ADDR_MASK);
154 kunmap_atomic(walker->table, KM_USER0);
155 walker->table = kmap_atomic(pfn_to_page(paddr >> PAGE_SHIFT),
156 KM_USER0);
157 --walker->level;
158 walker->table_gfn[walker->level - 1 ] = table_gfn;
159 pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__,
160 walker->level - 1, table_gfn);
161 }
162 walker->ptep = ptep;
163 pgprintk("%s: pte %llx\n", __FUNCTION__, (u64)*ptep);
164 return 1;
165
166 not_present:
167 walker->error_code = 0;
168 goto err;
169
170 access_error:
171 walker->error_code = PFERR_PRESENT_MASK;
172
173 err:
174 if (write_fault)
175 walker->error_code |= PFERR_WRITE_MASK;
176 if (user_fault)
177 walker->error_code |= PFERR_USER_MASK;
178 if (fetch_fault)
179 walker->error_code |= PFERR_FETCH_MASK;
180 return 0;
181 }
182
183 static void FNAME(release_walker)(struct guest_walker *walker)
184 {
185 if (walker->table)
186 kunmap_atomic(walker->table, KM_USER0);
187 }
188
189 static void FNAME(mark_pagetable_dirty)(struct kvm *kvm,
190 struct guest_walker *walker)
191 {
192 mark_page_dirty(kvm, walker->table_gfn[walker->level - 1]);
193 }
194
195 static void FNAME(set_pte_common)(struct kvm_vcpu *vcpu,
196 u64 *shadow_pte,
197 gpa_t gaddr,
198 pt_element_t *gpte,
199 u64 access_bits,
200 int user_fault,
201 int write_fault,
202 int *ptwrite,
203 struct guest_walker *walker,
204 gfn_t gfn)
205 {
206 hpa_t paddr;
207 int dirty = *gpte & PT_DIRTY_MASK;
208 int was_rmapped = is_rmap_pte(*shadow_pte);
209
210 pgprintk("%s: spte %llx gpte %llx access %llx write_fault %d"
211 " user_fault %d gfn %lx\n",
212 __FUNCTION__, *shadow_pte, (u64)*gpte, access_bits,
213 write_fault, user_fault, gfn);
214
215 if (write_fault && !dirty) {
216 *gpte |= PT_DIRTY_MASK;
217 dirty = 1;
218 FNAME(mark_pagetable_dirty)(vcpu->kvm, walker);
219 }
220
221 *shadow_pte |= access_bits << PT_SHADOW_BITS_OFFSET;
222 if (!dirty)
223 access_bits &= ~PT_WRITABLE_MASK;
224
225 paddr = gpa_to_hpa(vcpu, gaddr & PT64_BASE_ADDR_MASK);
226
227 *shadow_pte |= PT_PRESENT_MASK;
228 if (access_bits & PT_USER_MASK)
229 *shadow_pte |= PT_USER_MASK;
230
231 if (is_error_hpa(paddr)) {
232 *shadow_pte |= gaddr;
233 *shadow_pte |= PT_SHADOW_IO_MARK;
234 *shadow_pte &= ~PT_PRESENT_MASK;
235 return;
236 }
237
238 *shadow_pte |= paddr;
239
240 if (!write_fault && (*shadow_pte & PT_SHADOW_USER_MASK) &&
241 !(*shadow_pte & PT_USER_MASK)) {
242 /*
243 * If supervisor write protect is disabled, we shadow kernel
244 * pages as user pages so we can trap the write access.
245 */
246 *shadow_pte |= PT_USER_MASK;
247 *shadow_pte &= ~PT_WRITABLE_MASK;
248 access_bits &= ~PT_WRITABLE_MASK;
249 }
250
251 if ((access_bits & PT_WRITABLE_MASK)
252 || (write_fault && !is_write_protection(vcpu) && !user_fault)) {
253 struct kvm_mmu_page *shadow;
254
255 *shadow_pte |= PT_WRITABLE_MASK;
256 if (user_fault) {
257 mmu_unshadow(vcpu, gfn);
258 goto unshadowed;
259 }
260
261 shadow = kvm_mmu_lookup_page(vcpu, gfn);
262 if (shadow) {
263 pgprintk("%s: found shadow page for %lx, marking ro\n",
264 __FUNCTION__, gfn);
265 access_bits &= ~PT_WRITABLE_MASK;
266 if (is_writeble_pte(*shadow_pte)) {
267 *shadow_pte &= ~PT_WRITABLE_MASK;
268 kvm_arch_ops->tlb_flush(vcpu);
269 }
270 if (write_fault)
271 *ptwrite = 1;
272 }
273 }
274
275 unshadowed:
276
277 if (access_bits & PT_WRITABLE_MASK)
278 mark_page_dirty(vcpu->kvm, gaddr >> PAGE_SHIFT);
279
280 page_header_update_slot(vcpu->kvm, shadow_pte, gaddr);
281 if (!was_rmapped)
282 rmap_add(vcpu, shadow_pte);
283 }
284
285 static void FNAME(set_pte)(struct kvm_vcpu *vcpu, pt_element_t *gpte,
286 u64 *shadow_pte, u64 access_bits,
287 int user_fault, int write_fault, int *ptwrite,
288 struct guest_walker *walker, gfn_t gfn)
289 {
290 access_bits &= *gpte;
291 *shadow_pte |= (*gpte & PT_PTE_COPY_MASK);
292 FNAME(set_pte_common)(vcpu, shadow_pte, *gpte & PT_BASE_ADDR_MASK,
293 gpte, access_bits, user_fault, write_fault,
294 ptwrite, walker, gfn);
295 }
296
297 static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
298 u64 *spte, const void *pte, int bytes)
299 {
300 pt_element_t gpte;
301
302 if (bytes < sizeof(pt_element_t))
303 return;
304 gpte = *(const pt_element_t *)pte;
305 if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK))
306 return;
307 pgprintk("%s: gpte %llx spte %p\n", __FUNCTION__, (u64)gpte, spte);
308 FNAME(set_pte)(vcpu, &gpte, spte, PT_USER_MASK | PT_WRITABLE_MASK, 0,
309 0, NULL, NULL,
310 (gpte & PT_BASE_ADDR_MASK) >> PAGE_SHIFT);
311 }
312
313 static void FNAME(set_pde)(struct kvm_vcpu *vcpu, pt_element_t *gpde,
314 u64 *shadow_pte, u64 access_bits,
315 int user_fault, int write_fault, int *ptwrite,
316 struct guest_walker *walker, gfn_t gfn)
317 {
318 gpa_t gaddr;
319
320 access_bits &= *gpde;
321 gaddr = (gpa_t)gfn << PAGE_SHIFT;
322 if (PTTYPE == 32 && is_cpuid_PSE36())
323 gaddr |= (*gpde & PT32_DIR_PSE36_MASK) <<
324 (32 - PT32_DIR_PSE36_SHIFT);
325 *shadow_pte |= *gpde & PT_PTE_COPY_MASK;
326 FNAME(set_pte_common)(vcpu, shadow_pte, gaddr,
327 gpde, access_bits, user_fault, write_fault,
328 ptwrite, walker, gfn);
329 }
330
331 /*
332 * Fetch a shadow pte for a specific level in the paging hierarchy.
333 */
334 static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
335 struct guest_walker *walker,
336 int user_fault, int write_fault, int *ptwrite)
337 {
338 hpa_t shadow_addr;
339 int level;
340 u64 *shadow_ent;
341 u64 *prev_shadow_ent = NULL;
342 pt_element_t *guest_ent = walker->ptep;
343
344 if (!is_present_pte(*guest_ent))
345 return NULL;
346
347 shadow_addr = vcpu->mmu.root_hpa;
348 level = vcpu->mmu.shadow_root_level;
349 if (level == PT32E_ROOT_LEVEL) {
350 shadow_addr = vcpu->mmu.pae_root[(addr >> 30) & 3];
351 shadow_addr &= PT64_BASE_ADDR_MASK;
352 --level;
353 }
354
355 for (; ; level--) {
356 u32 index = SHADOW_PT_INDEX(addr, level);
357 struct kvm_mmu_page *shadow_page;
358 u64 shadow_pte;
359 int metaphysical;
360 gfn_t table_gfn;
361 unsigned hugepage_access = 0;
362
363 shadow_ent = ((u64 *)__va(shadow_addr)) + index;
364 if (is_present_pte(*shadow_ent) || is_io_pte(*shadow_ent)) {
365 if (level == PT_PAGE_TABLE_LEVEL)
366 break;
367 shadow_addr = *shadow_ent & PT64_BASE_ADDR_MASK;
368 prev_shadow_ent = shadow_ent;
369 continue;
370 }
371
372 if (level == PT_PAGE_TABLE_LEVEL)
373 break;
374
375 if (level - 1 == PT_PAGE_TABLE_LEVEL
376 && walker->level == PT_DIRECTORY_LEVEL) {
377 metaphysical = 1;
378 hugepage_access = *guest_ent;
379 hugepage_access &= PT_USER_MASK | PT_WRITABLE_MASK;
380 hugepage_access >>= PT_WRITABLE_SHIFT;
381 table_gfn = (*guest_ent & PT_BASE_ADDR_MASK)
382 >> PAGE_SHIFT;
383 } else {
384 metaphysical = 0;
385 table_gfn = walker->table_gfn[level - 2];
386 }
387 shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
388 metaphysical, hugepage_access,
389 shadow_ent);
390 shadow_addr = __pa(shadow_page->spt);
391 shadow_pte = shadow_addr | PT_PRESENT_MASK | PT_ACCESSED_MASK
392 | PT_WRITABLE_MASK | PT_USER_MASK;
393 *shadow_ent = shadow_pte;
394 prev_shadow_ent = shadow_ent;
395 }
396
397 if (walker->level == PT_DIRECTORY_LEVEL) {
398 if (prev_shadow_ent)
399 *prev_shadow_ent |= PT_SHADOW_PS_MARK;
400 FNAME(set_pde)(vcpu, guest_ent, shadow_ent,
401 walker->inherited_ar, user_fault, write_fault,
402 ptwrite, walker, walker->gfn);
403 } else {
404 ASSERT(walker->level == PT_PAGE_TABLE_LEVEL);
405 FNAME(set_pte)(vcpu, guest_ent, shadow_ent,
406 walker->inherited_ar, user_fault, write_fault,
407 ptwrite, walker, walker->gfn);
408 }
409 return shadow_ent;
410 }
411
412 /*
413 * Page fault handler. There are several causes for a page fault:
414 * - there is no shadow pte for the guest pte
415 * - write access through a shadow pte marked read only so that we can set
416 * the dirty bit
417 * - write access to a shadow pte marked read only so we can update the page
418 * dirty bitmap, when userspace requests it
419 * - mmio access; in this case we will never install a present shadow pte
420 * - normal guest page fault due to the guest pte marked not present, not
421 * writable, or not executable
422 *
423 * Returns: 1 if we need to emulate the instruction, 0 otherwise, or
424 * a negative value on error.
425 */
426 static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
427 u32 error_code)
428 {
429 int write_fault = error_code & PFERR_WRITE_MASK;
430 int user_fault = error_code & PFERR_USER_MASK;
431 int fetch_fault = error_code & PFERR_FETCH_MASK;
432 struct guest_walker walker;
433 u64 *shadow_pte;
434 int write_pt = 0;
435 int r;
436
437 pgprintk("%s: addr %lx err %x\n", __FUNCTION__, addr, error_code);
438 kvm_mmu_audit(vcpu, "pre page fault");
439
440 r = mmu_topup_memory_caches(vcpu);
441 if (r)
442 return r;
443
444 /*
445 * Look up the shadow pte for the faulting address.
446 */
447 r = FNAME(walk_addr)(&walker, vcpu, addr, write_fault, user_fault,
448 fetch_fault);
449
450 /*
451 * The page is not mapped by the guest. Let the guest handle it.
452 */
453 if (!r) {
454 pgprintk("%s: guest page fault\n", __FUNCTION__);
455 inject_page_fault(vcpu, addr, walker.error_code);
456 FNAME(release_walker)(&walker);
457 vcpu->last_pt_write_count = 0; /* reset fork detector */
458 return 0;
459 }
460
461 shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
462 &write_pt);
463 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __FUNCTION__,
464 shadow_pte, *shadow_pte, write_pt);
465
466 FNAME(release_walker)(&walker);
467
468 if (!write_pt)
469 vcpu->last_pt_write_count = 0; /* reset fork detector */
470
471 /*
472 * mmio: emulate if accessible, otherwise its a guest fault.
473 */
474 if (is_io_pte(*shadow_pte))
475 return 1;
476
477 ++vcpu->stat.pf_fixed;
478 kvm_mmu_audit(vcpu, "post page fault (fixed)");
479
480 return write_pt;
481 }
482
483 static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
484 {
485 struct guest_walker walker;
486 gpa_t gpa = UNMAPPED_GVA;
487 int r;
488
489 r = FNAME(walk_addr)(&walker, vcpu, vaddr, 0, 0, 0);
490
491 if (r) {
492 gpa = (gpa_t)walker.gfn << PAGE_SHIFT;
493 gpa |= vaddr & ~PAGE_MASK;
494 }
495
496 FNAME(release_walker)(&walker);
497 return gpa;
498 }
499
500 #undef pt_element_t
501 #undef guest_walker
502 #undef FNAME
503 #undef PT_BASE_ADDR_MASK
504 #undef PT_INDEX
505 #undef SHADOW_PT_INDEX
506 #undef PT_LEVEL_MASK
507 #undef PT_PTE_COPY_MASK
508 #undef PT_NON_PTE_COPY_MASK
509 #undef PT_DIR_BASE_ADDR_MASK
510 #undef PT_MAX_FULL_LEVELS
This page took 0.043663 seconds and 5 git commands to generate.