Merge tag 'asm-generic' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/asm...
[deliverable/linux.git] / arch / x86 / kvm / paging_tmpl.h
1 /*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * MMU support
8 *
9 * Copyright (C) 2006 Qumranet, Inc.
10 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
11 *
12 * Authors:
13 * Yaniv Kamay <yaniv@qumranet.com>
14 * Avi Kivity <avi@qumranet.com>
15 *
16 * This work is licensed under the terms of the GNU GPL, version 2. See
17 * the COPYING file in the top-level directory.
18 *
19 */
20
21 /*
22 * We need the mmu code to access both 32-bit and 64-bit guest ptes,
23 * so the code in this file is compiled twice, once per pte size.
24 */
25
26 #if PTTYPE == 64
27 #define pt_element_t u64
28 #define guest_walker guest_walker64
29 #define FNAME(name) paging##64_##name
30 #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
31 #define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl)
32 #define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
33 #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
34 #define PT_LEVEL_BITS PT64_LEVEL_BITS
35 #ifdef CONFIG_X86_64
36 #define PT_MAX_FULL_LEVELS 4
37 #define CMPXCHG cmpxchg
38 #else
39 #define CMPXCHG cmpxchg64
40 #define PT_MAX_FULL_LEVELS 2
41 #endif
42 #elif PTTYPE == 32
43 #define pt_element_t u32
44 #define guest_walker guest_walker32
45 #define FNAME(name) paging##32_##name
46 #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
47 #define PT_LVL_ADDR_MASK(lvl) PT32_LVL_ADDR_MASK(lvl)
48 #define PT_LVL_OFFSET_MASK(lvl) PT32_LVL_OFFSET_MASK(lvl)
49 #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
50 #define PT_LEVEL_BITS PT32_LEVEL_BITS
51 #define PT_MAX_FULL_LEVELS 2
52 #define CMPXCHG cmpxchg
53 #else
54 #error Invalid PTTYPE value
55 #endif
56
57 #define gpte_to_gfn_lvl FNAME(gpte_to_gfn_lvl)
58 #define gpte_to_gfn(pte) gpte_to_gfn_lvl((pte), PT_PAGE_TABLE_LEVEL)
59
60 /*
61 * The guest_walker structure emulates the behavior of the hardware page
62 * table walker.
63 */
64 struct guest_walker {
65 int level;
66 unsigned max_level;
67 gfn_t table_gfn[PT_MAX_FULL_LEVELS];
68 pt_element_t ptes[PT_MAX_FULL_LEVELS];
69 pt_element_t prefetch_ptes[PTE_PREFETCH_NUM];
70 gpa_t pte_gpa[PT_MAX_FULL_LEVELS];
71 pt_element_t __user *ptep_user[PT_MAX_FULL_LEVELS];
72 unsigned pt_access;
73 unsigned pte_access;
74 gfn_t gfn;
75 struct x86_exception fault;
76 };
77
78 static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl)
79 {
80 return (gpte & PT_LVL_ADDR_MASK(lvl)) >> PAGE_SHIFT;
81 }
82
83 static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
84 pt_element_t __user *ptep_user, unsigned index,
85 pt_element_t orig_pte, pt_element_t new_pte)
86 {
87 int npages;
88 pt_element_t ret;
89 pt_element_t *table;
90 struct page *page;
91
92 npages = get_user_pages_fast((unsigned long)ptep_user, 1, 1, &page);
93 /* Check if the user is doing something meaningless. */
94 if (unlikely(npages != 1))
95 return -EFAULT;
96
97 table = kmap_atomic(page);
98 ret = CMPXCHG(&table[index], orig_pte, new_pte);
99 kunmap_atomic(table);
100
101 kvm_release_page_dirty(page);
102
103 return (ret != orig_pte);
104 }
105
106 static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
107 struct kvm_mmu *mmu,
108 struct guest_walker *walker,
109 int write_fault)
110 {
111 unsigned level, index;
112 pt_element_t pte, orig_pte;
113 pt_element_t __user *ptep_user;
114 gfn_t table_gfn;
115 int ret;
116
117 for (level = walker->max_level; level >= walker->level; --level) {
118 pte = orig_pte = walker->ptes[level - 1];
119 table_gfn = walker->table_gfn[level - 1];
120 ptep_user = walker->ptep_user[level - 1];
121 index = offset_in_page(ptep_user) / sizeof(pt_element_t);
122 if (!(pte & PT_ACCESSED_MASK)) {
123 trace_kvm_mmu_set_accessed_bit(table_gfn, index, sizeof(pte));
124 pte |= PT_ACCESSED_MASK;
125 }
126 if (level == walker->level && write_fault && !is_dirty_gpte(pte)) {
127 trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
128 pte |= PT_DIRTY_MASK;
129 }
130 if (pte == orig_pte)
131 continue;
132
133 ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index, orig_pte, pte);
134 if (ret)
135 return ret;
136
137 mark_page_dirty(vcpu->kvm, table_gfn);
138 walker->ptes[level] = pte;
139 }
140 return 0;
141 }
142
143 /*
144 * Fetch a guest pte for a guest virtual address
145 */
146 static int FNAME(walk_addr_generic)(struct guest_walker *walker,
147 struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
148 gva_t addr, u32 access)
149 {
150 int ret;
151 pt_element_t pte;
152 pt_element_t __user *uninitialized_var(ptep_user);
153 gfn_t table_gfn;
154 unsigned index, pt_access, pte_access, accessed_dirty, shift;
155 gpa_t pte_gpa;
156 int offset;
157 const int write_fault = access & PFERR_WRITE_MASK;
158 const int user_fault = access & PFERR_USER_MASK;
159 const int fetch_fault = access & PFERR_FETCH_MASK;
160 u16 errcode = 0;
161 gpa_t real_gpa;
162 gfn_t gfn;
163
164 trace_kvm_mmu_pagetable_walk(addr, access);
165 retry_walk:
166 walker->level = mmu->root_level;
167 pte = mmu->get_cr3(vcpu);
168
169 #if PTTYPE == 64
170 if (walker->level == PT32E_ROOT_LEVEL) {
171 pte = mmu->get_pdptr(vcpu, (addr >> 30) & 3);
172 trace_kvm_mmu_paging_element(pte, walker->level);
173 if (!is_present_gpte(pte))
174 goto error;
175 --walker->level;
176 }
177 #endif
178 walker->max_level = walker->level;
179 ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
180 (mmu->get_cr3(vcpu) & CR3_NONPAE_RESERVED_BITS) == 0);
181
182 accessed_dirty = PT_ACCESSED_MASK;
183 pt_access = pte_access = ACC_ALL;
184 ++walker->level;
185
186 do {
187 gfn_t real_gfn;
188 unsigned long host_addr;
189
190 pt_access &= pte_access;
191 --walker->level;
192
193 index = PT_INDEX(addr, walker->level);
194
195 table_gfn = gpte_to_gfn(pte);
196 offset = index * sizeof(pt_element_t);
197 pte_gpa = gfn_to_gpa(table_gfn) + offset;
198 walker->table_gfn[walker->level - 1] = table_gfn;
199 walker->pte_gpa[walker->level - 1] = pte_gpa;
200
201 real_gfn = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn),
202 PFERR_USER_MASK|PFERR_WRITE_MASK);
203 if (unlikely(real_gfn == UNMAPPED_GVA))
204 goto error;
205 real_gfn = gpa_to_gfn(real_gfn);
206
207 host_addr = gfn_to_hva(vcpu->kvm, real_gfn);
208 if (unlikely(kvm_is_error_hva(host_addr)))
209 goto error;
210
211 ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
212 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
213 goto error;
214 walker->ptep_user[walker->level - 1] = ptep_user;
215
216 trace_kvm_mmu_paging_element(pte, walker->level);
217
218 if (unlikely(!is_present_gpte(pte)))
219 goto error;
220
221 if (unlikely(is_rsvd_bits_set(&vcpu->arch.mmu, pte,
222 walker->level))) {
223 errcode |= PFERR_RSVD_MASK | PFERR_PRESENT_MASK;
224 goto error;
225 }
226
227 accessed_dirty &= pte;
228 pte_access = pt_access & gpte_access(vcpu, pte);
229
230 walker->ptes[walker->level - 1] = pte;
231 } while (!is_last_gpte(mmu, walker->level, pte));
232
233 if (unlikely(permission_fault(mmu, pte_access, access))) {
234 errcode |= PFERR_PRESENT_MASK;
235 goto error;
236 }
237
238 gfn = gpte_to_gfn_lvl(pte, walker->level);
239 gfn += (addr & PT_LVL_OFFSET_MASK(walker->level)) >> PAGE_SHIFT;
240
241 if (PTTYPE == 32 && walker->level == PT_DIRECTORY_LEVEL && is_cpuid_PSE36())
242 gfn += pse36_gfn_delta(pte);
243
244 real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(gfn), access);
245 if (real_gpa == UNMAPPED_GVA)
246 return 0;
247
248 walker->gfn = real_gpa >> PAGE_SHIFT;
249
250 if (!write_fault)
251 protect_clean_gpte(&pte_access, pte);
252
253 /*
254 * On a write fault, fold the dirty bit into accessed_dirty by shifting it one
255 * place right.
256 *
257 * On a read fault, do nothing.
258 */
259 shift = write_fault >> ilog2(PFERR_WRITE_MASK);
260 shift *= PT_DIRTY_SHIFT - PT_ACCESSED_SHIFT;
261 accessed_dirty &= pte >> shift;
262
263 if (unlikely(!accessed_dirty)) {
264 ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker, write_fault);
265 if (unlikely(ret < 0))
266 goto error;
267 else if (ret)
268 goto retry_walk;
269 }
270
271 walker->pt_access = pt_access;
272 walker->pte_access = pte_access;
273 pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
274 __func__, (u64)pte, pte_access, pt_access);
275 return 1;
276
277 error:
278 errcode |= write_fault | user_fault;
279 if (fetch_fault && (mmu->nx ||
280 kvm_read_cr4_bits(vcpu, X86_CR4_SMEP)))
281 errcode |= PFERR_FETCH_MASK;
282
283 walker->fault.vector = PF_VECTOR;
284 walker->fault.error_code_valid = true;
285 walker->fault.error_code = errcode;
286 walker->fault.address = addr;
287 walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu;
288
289 trace_kvm_mmu_walker_error(walker->fault.error_code);
290 return 0;
291 }
292
293 static int FNAME(walk_addr)(struct guest_walker *walker,
294 struct kvm_vcpu *vcpu, gva_t addr, u32 access)
295 {
296 return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.mmu, addr,
297 access);
298 }
299
300 static int FNAME(walk_addr_nested)(struct guest_walker *walker,
301 struct kvm_vcpu *vcpu, gva_t addr,
302 u32 access)
303 {
304 return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.nested_mmu,
305 addr, access);
306 }
307
308 static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
309 struct kvm_mmu_page *sp, u64 *spte,
310 pt_element_t gpte)
311 {
312 if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL))
313 goto no_present;
314
315 if (!is_present_gpte(gpte))
316 goto no_present;
317
318 if (!(gpte & PT_ACCESSED_MASK))
319 goto no_present;
320
321 return false;
322
323 no_present:
324 drop_spte(vcpu->kvm, spte);
325 return true;
326 }
327
328 static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
329 u64 *spte, const void *pte)
330 {
331 pt_element_t gpte;
332 unsigned pte_access;
333 pfn_t pfn;
334
335 gpte = *(const pt_element_t *)pte;
336 if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
337 return;
338
339 pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
340 pte_access = sp->role.access & gpte_access(vcpu, gpte);
341 protect_clean_gpte(&pte_access, gpte);
342 pfn = gfn_to_pfn_atomic(vcpu->kvm, gpte_to_gfn(gpte));
343 if (mmu_invalid_pfn(pfn))
344 return;
345
346 /*
347 * we call mmu_set_spte() with host_writable = true because that
348 * vcpu->arch.update_pte.pfn was fetched from get_user_pages(write = 1).
349 */
350 mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0,
351 NULL, PT_PAGE_TABLE_LEVEL,
352 gpte_to_gfn(gpte), pfn, true, true);
353 }
354
355 static bool FNAME(gpte_changed)(struct kvm_vcpu *vcpu,
356 struct guest_walker *gw, int level)
357 {
358 pt_element_t curr_pte;
359 gpa_t base_gpa, pte_gpa = gw->pte_gpa[level - 1];
360 u64 mask;
361 int r, index;
362
363 if (level == PT_PAGE_TABLE_LEVEL) {
364 mask = PTE_PREFETCH_NUM * sizeof(pt_element_t) - 1;
365 base_gpa = pte_gpa & ~mask;
366 index = (pte_gpa - base_gpa) / sizeof(pt_element_t);
367
368 r = kvm_read_guest_atomic(vcpu->kvm, base_gpa,
369 gw->prefetch_ptes, sizeof(gw->prefetch_ptes));
370 curr_pte = gw->prefetch_ptes[index];
371 } else
372 r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa,
373 &curr_pte, sizeof(curr_pte));
374
375 return r || curr_pte != gw->ptes[level - 1];
376 }
377
378 static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
379 u64 *sptep)
380 {
381 struct kvm_mmu_page *sp;
382 pt_element_t *gptep = gw->prefetch_ptes;
383 u64 *spte;
384 int i;
385
386 sp = page_header(__pa(sptep));
387
388 if (sp->role.level > PT_PAGE_TABLE_LEVEL)
389 return;
390
391 if (sp->role.direct)
392 return __direct_pte_prefetch(vcpu, sp, sptep);
393
394 i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
395 spte = sp->spt + i;
396
397 for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
398 pt_element_t gpte;
399 unsigned pte_access;
400 gfn_t gfn;
401 pfn_t pfn;
402
403 if (spte == sptep)
404 continue;
405
406 if (is_shadow_present_pte(*spte))
407 continue;
408
409 gpte = gptep[i];
410
411 if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
412 continue;
413
414 pte_access = sp->role.access & gpte_access(vcpu, gpte);
415 protect_clean_gpte(&pte_access, gpte);
416 gfn = gpte_to_gfn(gpte);
417 pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
418 pte_access & ACC_WRITE_MASK);
419 if (mmu_invalid_pfn(pfn))
420 break;
421
422 mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0,
423 NULL, PT_PAGE_TABLE_LEVEL, gfn,
424 pfn, true, true);
425 }
426 }
427
428 /*
429 * Fetch a shadow pte for a specific level in the paging hierarchy.
430 */
431 static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
432 struct guest_walker *gw,
433 int user_fault, int write_fault, int hlevel,
434 int *emulate, pfn_t pfn, bool map_writable,
435 bool prefault)
436 {
437 unsigned access = gw->pt_access;
438 struct kvm_mmu_page *sp = NULL;
439 int top_level;
440 unsigned direct_access;
441 struct kvm_shadow_walk_iterator it;
442
443 if (!is_present_gpte(gw->ptes[gw->level - 1]))
444 return NULL;
445
446 direct_access = gw->pte_access;
447
448 top_level = vcpu->arch.mmu.root_level;
449 if (top_level == PT32E_ROOT_LEVEL)
450 top_level = PT32_ROOT_LEVEL;
451 /*
452 * Verify that the top-level gpte is still there. Since the page
453 * is a root page, it is either write protected (and cannot be
454 * changed from now on) or it is invalid (in which case, we don't
455 * really care if it changes underneath us after this point).
456 */
457 if (FNAME(gpte_changed)(vcpu, gw, top_level))
458 goto out_gpte_changed;
459
460 for (shadow_walk_init(&it, vcpu, addr);
461 shadow_walk_okay(&it) && it.level > gw->level;
462 shadow_walk_next(&it)) {
463 gfn_t table_gfn;
464
465 clear_sp_write_flooding_count(it.sptep);
466 drop_large_spte(vcpu, it.sptep);
467
468 sp = NULL;
469 if (!is_shadow_present_pte(*it.sptep)) {
470 table_gfn = gw->table_gfn[it.level - 2];
471 sp = kvm_mmu_get_page(vcpu, table_gfn, addr, it.level-1,
472 false, access, it.sptep);
473 }
474
475 /*
476 * Verify that the gpte in the page we've just write
477 * protected is still there.
478 */
479 if (FNAME(gpte_changed)(vcpu, gw, it.level - 1))
480 goto out_gpte_changed;
481
482 if (sp)
483 link_shadow_page(it.sptep, sp);
484 }
485
486 for (;
487 shadow_walk_okay(&it) && it.level > hlevel;
488 shadow_walk_next(&it)) {
489 gfn_t direct_gfn;
490
491 clear_sp_write_flooding_count(it.sptep);
492 validate_direct_spte(vcpu, it.sptep, direct_access);
493
494 drop_large_spte(vcpu, it.sptep);
495
496 if (is_shadow_present_pte(*it.sptep))
497 continue;
498
499 direct_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
500
501 sp = kvm_mmu_get_page(vcpu, direct_gfn, addr, it.level-1,
502 true, direct_access, it.sptep);
503 link_shadow_page(it.sptep, sp);
504 }
505
506 clear_sp_write_flooding_count(it.sptep);
507 mmu_set_spte(vcpu, it.sptep, access, gw->pte_access,
508 user_fault, write_fault, emulate, it.level,
509 gw->gfn, pfn, prefault, map_writable);
510 FNAME(pte_prefetch)(vcpu, gw, it.sptep);
511
512 return it.sptep;
513
514 out_gpte_changed:
515 if (sp)
516 kvm_mmu_put_page(sp, it.sptep);
517 kvm_release_pfn_clean(pfn);
518 return NULL;
519 }
520
521 /*
522 * Page fault handler. There are several causes for a page fault:
523 * - there is no shadow pte for the guest pte
524 * - write access through a shadow pte marked read only so that we can set
525 * the dirty bit
526 * - write access to a shadow pte marked read only so we can update the page
527 * dirty bitmap, when userspace requests it
528 * - mmio access; in this case we will never install a present shadow pte
529 * - normal guest page fault due to the guest pte marked not present, not
530 * writable, or not executable
531 *
532 * Returns: 1 if we need to emulate the instruction, 0 otherwise, or
533 * a negative value on error.
534 */
535 static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
536 bool prefault)
537 {
538 int write_fault = error_code & PFERR_WRITE_MASK;
539 int user_fault = error_code & PFERR_USER_MASK;
540 struct guest_walker walker;
541 u64 *sptep;
542 int emulate = 0;
543 int r;
544 pfn_t pfn;
545 int level = PT_PAGE_TABLE_LEVEL;
546 int force_pt_level;
547 unsigned long mmu_seq;
548 bool map_writable;
549
550 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
551
552 if (unlikely(error_code & PFERR_RSVD_MASK))
553 return handle_mmio_page_fault(vcpu, addr, error_code,
554 mmu_is_nested(vcpu));
555
556 r = mmu_topup_memory_caches(vcpu);
557 if (r)
558 return r;
559
560 /*
561 * Look up the guest pte for the faulting address.
562 */
563 r = FNAME(walk_addr)(&walker, vcpu, addr, error_code);
564
565 /*
566 * The page is not mapped by the guest. Let the guest handle it.
567 */
568 if (!r) {
569 pgprintk("%s: guest page fault\n", __func__);
570 if (!prefault)
571 inject_page_fault(vcpu, &walker.fault);
572
573 return 0;
574 }
575
576 if (walker.level >= PT_DIRECTORY_LEVEL)
577 force_pt_level = mapping_level_dirty_bitmap(vcpu, walker.gfn);
578 else
579 force_pt_level = 1;
580 if (!force_pt_level) {
581 level = min(walker.level, mapping_level(vcpu, walker.gfn));
582 walker.gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1);
583 }
584
585 mmu_seq = vcpu->kvm->mmu_notifier_seq;
586 smp_rmb();
587
588 if (try_async_pf(vcpu, prefault, walker.gfn, addr, &pfn, write_fault,
589 &map_writable))
590 return 0;
591
592 if (handle_abnormal_pfn(vcpu, mmu_is_nested(vcpu) ? 0 : addr,
593 walker.gfn, pfn, walker.pte_access, &r))
594 return r;
595
596 spin_lock(&vcpu->kvm->mmu_lock);
597 if (mmu_notifier_retry(vcpu, mmu_seq))
598 goto out_unlock;
599
600 kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
601 kvm_mmu_free_some_pages(vcpu);
602 if (!force_pt_level)
603 transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level);
604 sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
605 level, &emulate, pfn, map_writable, prefault);
606 (void)sptep;
607 pgprintk("%s: shadow pte %p %llx emulate %d\n", __func__,
608 sptep, *sptep, emulate);
609
610 ++vcpu->stat.pf_fixed;
611 kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
612 spin_unlock(&vcpu->kvm->mmu_lock);
613
614 return emulate;
615
616 out_unlock:
617 spin_unlock(&vcpu->kvm->mmu_lock);
618 kvm_release_pfn_clean(pfn);
619 return 0;
620 }
621
622 static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp)
623 {
624 int offset = 0;
625
626 WARN_ON(sp->role.level != PT_PAGE_TABLE_LEVEL);
627
628 if (PTTYPE == 32)
629 offset = sp->role.quadrant << PT64_LEVEL_BITS;
630
631 return gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t);
632 }
633
634 static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
635 {
636 struct kvm_shadow_walk_iterator iterator;
637 struct kvm_mmu_page *sp;
638 int level;
639 u64 *sptep;
640
641 vcpu_clear_mmio_info(vcpu, gva);
642
643 /*
644 * No need to check return value here, rmap_can_add() can
645 * help us to skip pte prefetch later.
646 */
647 mmu_topup_memory_caches(vcpu);
648
649 spin_lock(&vcpu->kvm->mmu_lock);
650 for_each_shadow_entry(vcpu, gva, iterator) {
651 level = iterator.level;
652 sptep = iterator.sptep;
653
654 sp = page_header(__pa(sptep));
655 if (is_last_spte(*sptep, level)) {
656 pt_element_t gpte;
657 gpa_t pte_gpa;
658
659 if (!sp->unsync)
660 break;
661
662 pte_gpa = FNAME(get_level1_sp_gpa)(sp);
663 pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
664
665 if (mmu_page_zap_pte(vcpu->kvm, sp, sptep))
666 kvm_flush_remote_tlbs(vcpu->kvm);
667
668 if (!rmap_can_add(vcpu))
669 break;
670
671 if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
672 sizeof(pt_element_t)))
673 break;
674
675 FNAME(update_pte)(vcpu, sp, sptep, &gpte);
676 }
677
678 if (!is_shadow_present_pte(*sptep) || !sp->unsync_children)
679 break;
680 }
681 spin_unlock(&vcpu->kvm->mmu_lock);
682 }
683
684 static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
685 struct x86_exception *exception)
686 {
687 struct guest_walker walker;
688 gpa_t gpa = UNMAPPED_GVA;
689 int r;
690
691 r = FNAME(walk_addr)(&walker, vcpu, vaddr, access);
692
693 if (r) {
694 gpa = gfn_to_gpa(walker.gfn);
695 gpa |= vaddr & ~PAGE_MASK;
696 } else if (exception)
697 *exception = walker.fault;
698
699 return gpa;
700 }
701
702 static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr,
703 u32 access,
704 struct x86_exception *exception)
705 {
706 struct guest_walker walker;
707 gpa_t gpa = UNMAPPED_GVA;
708 int r;
709
710 r = FNAME(walk_addr_nested)(&walker, vcpu, vaddr, access);
711
712 if (r) {
713 gpa = gfn_to_gpa(walker.gfn);
714 gpa |= vaddr & ~PAGE_MASK;
715 } else if (exception)
716 *exception = walker.fault;
717
718 return gpa;
719 }
720
721 /*
722 * Using the cached information from sp->gfns is safe because:
723 * - The spte has a reference to the struct page, so the pfn for a given gfn
724 * can't change unless all sptes pointing to it are nuked first.
725 *
726 * Note:
727 * We should flush all tlbs if spte is dropped even though guest is
728 * responsible for it. Since if we don't, kvm_mmu_notifier_invalidate_page
729 * and kvm_mmu_notifier_invalidate_range_start detect the mapping page isn't
730 * used by guest then tlbs are not flushed, so guest is allowed to access the
731 * freed pages.
732 * And we increase kvm->tlbs_dirty to delay tlbs flush in this case.
733 */
734 static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
735 {
736 int i, nr_present = 0;
737 bool host_writable;
738 gpa_t first_pte_gpa;
739
740 /* direct kvm_mmu_page can not be unsync. */
741 BUG_ON(sp->role.direct);
742
743 first_pte_gpa = FNAME(get_level1_sp_gpa)(sp);
744
745 for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
746 unsigned pte_access;
747 pt_element_t gpte;
748 gpa_t pte_gpa;
749 gfn_t gfn;
750
751 if (!sp->spt[i])
752 continue;
753
754 pte_gpa = first_pte_gpa + i * sizeof(pt_element_t);
755
756 if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
757 sizeof(pt_element_t)))
758 return -EINVAL;
759
760 if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
761 vcpu->kvm->tlbs_dirty++;
762 continue;
763 }
764
765 gfn = gpte_to_gfn(gpte);
766 pte_access = sp->role.access;
767 pte_access &= gpte_access(vcpu, gpte);
768 protect_clean_gpte(&pte_access, gpte);
769
770 if (sync_mmio_spte(&sp->spt[i], gfn, pte_access, &nr_present))
771 continue;
772
773 if (gfn != sp->gfns[i]) {
774 drop_spte(vcpu->kvm, &sp->spt[i]);
775 vcpu->kvm->tlbs_dirty++;
776 continue;
777 }
778
779 nr_present++;
780
781 host_writable = sp->spt[i] & SPTE_HOST_WRITEABLE;
782
783 set_spte(vcpu, &sp->spt[i], pte_access, 0, 0,
784 PT_PAGE_TABLE_LEVEL, gfn,
785 spte_to_pfn(sp->spt[i]), true, false,
786 host_writable);
787 }
788
789 return !nr_present;
790 }
791
792 #undef pt_element_t
793 #undef guest_walker
794 #undef FNAME
795 #undef PT_BASE_ADDR_MASK
796 #undef PT_INDEX
797 #undef PT_LVL_ADDR_MASK
798 #undef PT_LVL_OFFSET_MASK
799 #undef PT_LEVEL_BITS
800 #undef PT_MAX_FULL_LEVELS
801 #undef gpte_to_gfn
802 #undef gpte_to_gfn_lvl
803 #undef CMPXCHG
This page took 0.048294 seconds and 6 git commands to generate.