0c7461d3a5be68750d5f12710315d8f45d857237
[deliverable/linux.git] / arch / x86 / kvm / paging_tmpl.h
1 /*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * MMU support
8 *
9 * Copyright (C) 2006 Qumranet, Inc.
10 * Copyright 2010 Red Hat, Inc. and/or its affilates.
11 *
12 * Authors:
13 * Yaniv Kamay <yaniv@qumranet.com>
14 * Avi Kivity <avi@qumranet.com>
15 *
16 * This work is licensed under the terms of the GNU GPL, version 2. See
17 * the COPYING file in the top-level directory.
18 *
19 */
20
21 /*
22 * We need the mmu code to access both 32-bit and 64-bit guest ptes,
23 * so the code in this file is compiled twice, once per pte size.
24 */
25
26 #if PTTYPE == 64
27 #define pt_element_t u64
28 #define guest_walker guest_walker64
29 #define FNAME(name) paging##64_##name
30 #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
31 #define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl)
32 #define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
33 #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
34 #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level)
35 #define PT_LEVEL_BITS PT64_LEVEL_BITS
36 #ifdef CONFIG_X86_64
37 #define PT_MAX_FULL_LEVELS 4
38 #define CMPXCHG cmpxchg
39 #else
40 #define CMPXCHG cmpxchg64
41 #define PT_MAX_FULL_LEVELS 2
42 #endif
43 #elif PTTYPE == 32
44 #define pt_element_t u32
45 #define guest_walker guest_walker32
46 #define FNAME(name) paging##32_##name
47 #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
48 #define PT_LVL_ADDR_MASK(lvl) PT32_LVL_ADDR_MASK(lvl)
49 #define PT_LVL_OFFSET_MASK(lvl) PT32_LVL_OFFSET_MASK(lvl)
50 #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
51 #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level)
52 #define PT_LEVEL_BITS PT32_LEVEL_BITS
53 #define PT_MAX_FULL_LEVELS 2
54 #define CMPXCHG cmpxchg
55 #else
56 #error Invalid PTTYPE value
57 #endif
58
59 #define gpte_to_gfn_lvl FNAME(gpte_to_gfn_lvl)
60 #define gpte_to_gfn(pte) gpte_to_gfn_lvl((pte), PT_PAGE_TABLE_LEVEL)
61
62 /*
63 * The guest_walker structure emulates the behavior of the hardware page
64 * table walker.
65 */
66 struct guest_walker {
67 int level;
68 gfn_t table_gfn[PT_MAX_FULL_LEVELS];
69 pt_element_t ptes[PT_MAX_FULL_LEVELS];
70 gpa_t pte_gpa[PT_MAX_FULL_LEVELS];
71 unsigned pt_access;
72 unsigned pte_access;
73 gfn_t gfn;
74 u32 error_code;
75 };
76
77 static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl)
78 {
79 return (gpte & PT_LVL_ADDR_MASK(lvl)) >> PAGE_SHIFT;
80 }
81
82 static bool FNAME(cmpxchg_gpte)(struct kvm *kvm,
83 gfn_t table_gfn, unsigned index,
84 pt_element_t orig_pte, pt_element_t new_pte)
85 {
86 pt_element_t ret;
87 pt_element_t *table;
88 struct page *page;
89
90 page = gfn_to_page(kvm, table_gfn);
91
92 table = kmap_atomic(page, KM_USER0);
93 ret = CMPXCHG(&table[index], orig_pte, new_pte);
94 kunmap_atomic(table, KM_USER0);
95
96 kvm_release_page_dirty(page);
97
98 return (ret != orig_pte);
99 }
100
101 static unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, pt_element_t gpte)
102 {
103 unsigned access;
104
105 access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK;
106 #if PTTYPE == 64
107 if (is_nx(vcpu))
108 access &= ~(gpte >> PT64_NX_SHIFT);
109 #endif
110 return access;
111 }
112
113 /*
114 * Fetch a guest pte for a guest virtual address
115 */
116 static int FNAME(walk_addr)(struct guest_walker *walker,
117 struct kvm_vcpu *vcpu, gva_t addr,
118 int write_fault, int user_fault, int fetch_fault)
119 {
120 pt_element_t pte;
121 gfn_t table_gfn;
122 unsigned index, pt_access, uninitialized_var(pte_access);
123 gpa_t pte_gpa;
124 bool eperm, present, rsvd_fault;
125
126 trace_kvm_mmu_pagetable_walk(addr, write_fault, user_fault,
127 fetch_fault);
128 walk:
129 present = true;
130 eperm = rsvd_fault = false;
131 walker->level = vcpu->arch.mmu.root_level;
132 pte = vcpu->arch.cr3;
133 #if PTTYPE == 64
134 if (!is_long_mode(vcpu)) {
135 pte = kvm_pdptr_read(vcpu, (addr >> 30) & 3);
136 trace_kvm_mmu_paging_element(pte, walker->level);
137 if (!is_present_gpte(pte)) {
138 present = false;
139 goto error;
140 }
141 --walker->level;
142 }
143 #endif
144 ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
145 (vcpu->arch.cr3 & CR3_NONPAE_RESERVED_BITS) == 0);
146
147 pt_access = ACC_ALL;
148
149 for (;;) {
150 index = PT_INDEX(addr, walker->level);
151
152 table_gfn = gpte_to_gfn(pte);
153 pte_gpa = gfn_to_gpa(table_gfn);
154 pte_gpa += index * sizeof(pt_element_t);
155 walker->table_gfn[walker->level - 1] = table_gfn;
156 walker->pte_gpa[walker->level - 1] = pte_gpa;
157
158 if (kvm_read_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte))) {
159 present = false;
160 break;
161 }
162
163 trace_kvm_mmu_paging_element(pte, walker->level);
164
165 if (!is_present_gpte(pte)) {
166 present = false;
167 break;
168 }
169
170 if (is_rsvd_bits_set(vcpu, pte, walker->level)) {
171 rsvd_fault = true;
172 break;
173 }
174
175 if (write_fault && !is_writable_pte(pte))
176 if (user_fault || is_write_protection(vcpu))
177 eperm = true;
178
179 if (user_fault && !(pte & PT_USER_MASK))
180 eperm = true;
181
182 #if PTTYPE == 64
183 if (fetch_fault && (pte & PT64_NX_MASK))
184 eperm = true;
185 #endif
186
187 if (!eperm && !rsvd_fault && !(pte & PT_ACCESSED_MASK)) {
188 trace_kvm_mmu_set_accessed_bit(table_gfn, index,
189 sizeof(pte));
190 if (FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn,
191 index, pte, pte|PT_ACCESSED_MASK))
192 goto walk;
193 mark_page_dirty(vcpu->kvm, table_gfn);
194 pte |= PT_ACCESSED_MASK;
195 }
196
197 pte_access = pt_access & FNAME(gpte_access)(vcpu, pte);
198
199 walker->ptes[walker->level - 1] = pte;
200
201 if ((walker->level == PT_PAGE_TABLE_LEVEL) ||
202 ((walker->level == PT_DIRECTORY_LEVEL) &&
203 is_large_pte(pte) &&
204 (PTTYPE == 64 || is_pse(vcpu))) ||
205 ((walker->level == PT_PDPE_LEVEL) &&
206 is_large_pte(pte) &&
207 is_long_mode(vcpu))) {
208 int lvl = walker->level;
209
210 walker->gfn = gpte_to_gfn_lvl(pte, lvl);
211 walker->gfn += (addr & PT_LVL_OFFSET_MASK(lvl))
212 >> PAGE_SHIFT;
213
214 if (PTTYPE == 32 &&
215 walker->level == PT_DIRECTORY_LEVEL &&
216 is_cpuid_PSE36())
217 walker->gfn += pse36_gfn_delta(pte);
218
219 break;
220 }
221
222 pt_access = pte_access;
223 --walker->level;
224 }
225
226 if (!present || eperm || rsvd_fault)
227 goto error;
228
229 if (write_fault && !is_dirty_gpte(pte)) {
230 bool ret;
231
232 trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
233 ret = FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn, index, pte,
234 pte|PT_DIRTY_MASK);
235 if (ret)
236 goto walk;
237 mark_page_dirty(vcpu->kvm, table_gfn);
238 pte |= PT_DIRTY_MASK;
239 walker->ptes[walker->level - 1] = pte;
240 }
241
242 walker->pt_access = pt_access;
243 walker->pte_access = pte_access;
244 pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
245 __func__, (u64)pte, pte_access, pt_access);
246 return 1;
247
248 error:
249 walker->error_code = 0;
250 if (present)
251 walker->error_code |= PFERR_PRESENT_MASK;
252 if (write_fault)
253 walker->error_code |= PFERR_WRITE_MASK;
254 if (user_fault)
255 walker->error_code |= PFERR_USER_MASK;
256 if (fetch_fault && is_nx(vcpu))
257 walker->error_code |= PFERR_FETCH_MASK;
258 if (rsvd_fault)
259 walker->error_code |= PFERR_RSVD_MASK;
260 trace_kvm_mmu_walker_error(walker->error_code);
261 return 0;
262 }
263
264 static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
265 u64 *spte, const void *pte)
266 {
267 pt_element_t gpte;
268 unsigned pte_access;
269 pfn_t pfn;
270 u64 new_spte;
271
272 gpte = *(const pt_element_t *)pte;
273 if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) {
274 if (!is_present_gpte(gpte)) {
275 if (sp->unsync)
276 new_spte = shadow_trap_nonpresent_pte;
277 else
278 new_spte = shadow_notrap_nonpresent_pte;
279 __set_spte(spte, new_spte);
280 }
281 return;
282 }
283 pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
284 pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
285 if (gpte_to_gfn(gpte) != vcpu->arch.update_pte.gfn)
286 return;
287 pfn = vcpu->arch.update_pte.pfn;
288 if (is_error_pfn(pfn))
289 return;
290 if (mmu_notifier_retry(vcpu, vcpu->arch.update_pte.mmu_seq))
291 return;
292 kvm_get_pfn(pfn);
293 /*
294 * we call mmu_set_spte() with reset_host_protection = true beacuse that
295 * vcpu->arch.update_pte.pfn was fetched from get_user_pages(write = 1).
296 */
297 mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0,
298 is_dirty_gpte(gpte), NULL, PT_PAGE_TABLE_LEVEL,
299 gpte_to_gfn(gpte), pfn, true, true);
300 }
301
302 /*
303 * Fetch a shadow pte for a specific level in the paging hierarchy.
304 */
305 static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
306 struct guest_walker *gw,
307 int user_fault, int write_fault, int hlevel,
308 int *ptwrite, pfn_t pfn)
309 {
310 unsigned access = gw->pt_access;
311 struct kvm_mmu_page *sp;
312 u64 *sptep = NULL;
313 int direct;
314 gfn_t table_gfn;
315 int r;
316 int level;
317 bool dirty = is_dirty_gpte(gw->ptes[gw->level - 1]);
318 unsigned direct_access;
319 pt_element_t curr_pte;
320 struct kvm_shadow_walk_iterator iterator;
321
322 if (!is_present_gpte(gw->ptes[gw->level - 1]))
323 return NULL;
324
325 direct_access = gw->pt_access & gw->pte_access;
326 if (!dirty)
327 direct_access &= ~ACC_WRITE_MASK;
328
329 for_each_shadow_entry(vcpu, addr, iterator) {
330 level = iterator.level;
331 sptep = iterator.sptep;
332 if (iterator.level == hlevel) {
333 mmu_set_spte(vcpu, sptep, access,
334 gw->pte_access & access,
335 user_fault, write_fault,
336 dirty, ptwrite, level,
337 gw->gfn, pfn, false, true);
338 break;
339 }
340
341 if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)
342 && level == gw->level)
343 validate_direct_spte(vcpu, sptep, direct_access);
344
345 drop_large_spte(vcpu, sptep);
346
347 if (is_shadow_present_pte(*sptep))
348 continue;
349
350 if (level <= gw->level) {
351 direct = 1;
352 access = direct_access;
353
354 /*
355 * It is a large guest pages backed by small host pages,
356 * So we set @direct(@sp->role.direct)=1, and set
357 * @table_gfn(@sp->gfn)=the base page frame for linear
358 * translations.
359 */
360 table_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1);
361 access &= gw->pte_access;
362 } else {
363 direct = 0;
364 table_gfn = gw->table_gfn[level - 2];
365 }
366 sp = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
367 direct, access, sptep);
368 if (!direct) {
369 r = kvm_read_guest_atomic(vcpu->kvm,
370 gw->pte_gpa[level - 2],
371 &curr_pte, sizeof(curr_pte));
372 if (r || curr_pte != gw->ptes[level - 2]) {
373 kvm_mmu_put_page(sp, sptep);
374 kvm_release_pfn_clean(pfn);
375 sptep = NULL;
376 break;
377 }
378 }
379
380 link_shadow_page(sptep, sp);
381 }
382
383 return sptep;
384 }
385
386 /*
387 * Page fault handler. There are several causes for a page fault:
388 * - there is no shadow pte for the guest pte
389 * - write access through a shadow pte marked read only so that we can set
390 * the dirty bit
391 * - write access to a shadow pte marked read only so we can update the page
392 * dirty bitmap, when userspace requests it
393 * - mmio access; in this case we will never install a present shadow pte
394 * - normal guest page fault due to the guest pte marked not present, not
395 * writable, or not executable
396 *
397 * Returns: 1 if we need to emulate the instruction, 0 otherwise, or
398 * a negative value on error.
399 */
400 static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
401 u32 error_code)
402 {
403 int write_fault = error_code & PFERR_WRITE_MASK;
404 int user_fault = error_code & PFERR_USER_MASK;
405 int fetch_fault = error_code & PFERR_FETCH_MASK;
406 struct guest_walker walker;
407 u64 *sptep;
408 int write_pt = 0;
409 int r;
410 pfn_t pfn;
411 int level = PT_PAGE_TABLE_LEVEL;
412 unsigned long mmu_seq;
413
414 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
415 kvm_mmu_audit(vcpu, "pre page fault");
416
417 r = mmu_topup_memory_caches(vcpu);
418 if (r)
419 return r;
420
421 /*
422 * Look up the guest pte for the faulting address.
423 */
424 r = FNAME(walk_addr)(&walker, vcpu, addr, write_fault, user_fault,
425 fetch_fault);
426
427 /*
428 * The page is not mapped by the guest. Let the guest handle it.
429 */
430 if (!r) {
431 pgprintk("%s: guest page fault\n", __func__);
432 inject_page_fault(vcpu, addr, walker.error_code);
433 vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
434 return 0;
435 }
436
437 if (walker.level >= PT_DIRECTORY_LEVEL) {
438 level = min(walker.level, mapping_level(vcpu, walker.gfn));
439 walker.gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1);
440 }
441
442 mmu_seq = vcpu->kvm->mmu_notifier_seq;
443 smp_rmb();
444 pfn = gfn_to_pfn(vcpu->kvm, walker.gfn);
445
446 /* mmio */
447 if (is_error_pfn(pfn))
448 return kvm_handle_bad_page(vcpu->kvm, walker.gfn, pfn);
449
450 spin_lock(&vcpu->kvm->mmu_lock);
451 if (mmu_notifier_retry(vcpu, mmu_seq))
452 goto out_unlock;
453 kvm_mmu_free_some_pages(vcpu);
454 sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
455 level, &write_pt, pfn);
456 (void)sptep;
457 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__,
458 sptep, *sptep, write_pt);
459
460 if (!write_pt)
461 vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
462
463 ++vcpu->stat.pf_fixed;
464 kvm_mmu_audit(vcpu, "post page fault (fixed)");
465 spin_unlock(&vcpu->kvm->mmu_lock);
466
467 return write_pt;
468
469 out_unlock:
470 spin_unlock(&vcpu->kvm->mmu_lock);
471 kvm_release_pfn_clean(pfn);
472 return 0;
473 }
474
475 static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
476 {
477 struct kvm_shadow_walk_iterator iterator;
478 struct kvm_mmu_page *sp;
479 gpa_t pte_gpa = -1;
480 int level;
481 u64 *sptep;
482 int need_flush = 0;
483
484 spin_lock(&vcpu->kvm->mmu_lock);
485
486 for_each_shadow_entry(vcpu, gva, iterator) {
487 level = iterator.level;
488 sptep = iterator.sptep;
489
490 sp = page_header(__pa(sptep));
491 if (is_last_spte(*sptep, level)) {
492 int offset, shift;
493
494 if (!sp->unsync)
495 break;
496
497 shift = PAGE_SHIFT -
498 (PT_LEVEL_BITS - PT64_LEVEL_BITS) * level;
499 offset = sp->role.quadrant << shift;
500
501 pte_gpa = (sp->gfn << PAGE_SHIFT) + offset;
502 pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
503
504 if (is_shadow_present_pte(*sptep)) {
505 if (is_large_pte(*sptep))
506 --vcpu->kvm->stat.lpages;
507 drop_spte(vcpu->kvm, sptep,
508 shadow_trap_nonpresent_pte);
509 need_flush = 1;
510 } else
511 __set_spte(sptep, shadow_trap_nonpresent_pte);
512 break;
513 }
514
515 if (!is_shadow_present_pte(*sptep) || !sp->unsync_children)
516 break;
517 }
518
519 if (need_flush)
520 kvm_flush_remote_tlbs(vcpu->kvm);
521
522 atomic_inc(&vcpu->kvm->arch.invlpg_counter);
523
524 spin_unlock(&vcpu->kvm->mmu_lock);
525
526 if (pte_gpa == -1)
527 return;
528
529 if (mmu_topup_memory_caches(vcpu))
530 return;
531 kvm_mmu_pte_write(vcpu, pte_gpa, NULL, sizeof(pt_element_t), 0);
532 }
533
534 static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
535 u32 *error)
536 {
537 struct guest_walker walker;
538 gpa_t gpa = UNMAPPED_GVA;
539 int r;
540
541 r = FNAME(walk_addr)(&walker, vcpu, vaddr,
542 !!(access & PFERR_WRITE_MASK),
543 !!(access & PFERR_USER_MASK),
544 !!(access & PFERR_FETCH_MASK));
545
546 if (r) {
547 gpa = gfn_to_gpa(walker.gfn);
548 gpa |= vaddr & ~PAGE_MASK;
549 } else if (error)
550 *error = walker.error_code;
551
552 return gpa;
553 }
554
555 static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
556 struct kvm_mmu_page *sp)
557 {
558 int i, j, offset, r;
559 pt_element_t pt[256 / sizeof(pt_element_t)];
560 gpa_t pte_gpa;
561
562 if (sp->role.direct
563 || (PTTYPE == 32 && sp->role.level > PT_PAGE_TABLE_LEVEL)) {
564 nonpaging_prefetch_page(vcpu, sp);
565 return;
566 }
567
568 pte_gpa = gfn_to_gpa(sp->gfn);
569 if (PTTYPE == 32) {
570 offset = sp->role.quadrant << PT64_LEVEL_BITS;
571 pte_gpa += offset * sizeof(pt_element_t);
572 }
573
574 for (i = 0; i < PT64_ENT_PER_PAGE; i += ARRAY_SIZE(pt)) {
575 r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa, pt, sizeof pt);
576 pte_gpa += ARRAY_SIZE(pt) * sizeof(pt_element_t);
577 for (j = 0; j < ARRAY_SIZE(pt); ++j)
578 if (r || is_present_gpte(pt[j]))
579 sp->spt[i+j] = shadow_trap_nonpresent_pte;
580 else
581 sp->spt[i+j] = shadow_notrap_nonpresent_pte;
582 }
583 }
584
585 /*
586 * Using the cached information from sp->gfns is safe because:
587 * - The spte has a reference to the struct page, so the pfn for a given gfn
588 * can't change unless all sptes pointing to it are nuked first.
589 */
590 static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
591 bool clear_unsync)
592 {
593 int i, offset, nr_present;
594 bool reset_host_protection;
595 gpa_t first_pte_gpa;
596
597 offset = nr_present = 0;
598
599 /* direct kvm_mmu_page can not be unsync. */
600 BUG_ON(sp->role.direct);
601
602 if (PTTYPE == 32)
603 offset = sp->role.quadrant << PT64_LEVEL_BITS;
604
605 first_pte_gpa = gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t);
606
607 for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
608 unsigned pte_access;
609 pt_element_t gpte;
610 gpa_t pte_gpa;
611 gfn_t gfn;
612
613 if (!is_shadow_present_pte(sp->spt[i]))
614 continue;
615
616 pte_gpa = first_pte_gpa + i * sizeof(pt_element_t);
617
618 if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
619 sizeof(pt_element_t)))
620 return -EINVAL;
621
622 gfn = gpte_to_gfn(gpte);
623 if (gfn != sp->gfns[i] ||
624 !is_present_gpte(gpte) || !(gpte & PT_ACCESSED_MASK)) {
625 u64 nonpresent;
626
627 if (is_present_gpte(gpte) || !clear_unsync)
628 nonpresent = shadow_trap_nonpresent_pte;
629 else
630 nonpresent = shadow_notrap_nonpresent_pte;
631 drop_spte(vcpu->kvm, &sp->spt[i], nonpresent);
632 continue;
633 }
634
635 nr_present++;
636 pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
637 if (!(sp->spt[i] & SPTE_HOST_WRITEABLE)) {
638 pte_access &= ~ACC_WRITE_MASK;
639 reset_host_protection = 0;
640 } else {
641 reset_host_protection = 1;
642 }
643 set_spte(vcpu, &sp->spt[i], pte_access, 0, 0,
644 is_dirty_gpte(gpte), PT_PAGE_TABLE_LEVEL, gfn,
645 spte_to_pfn(sp->spt[i]), true, false,
646 reset_host_protection);
647 }
648
649 return !nr_present;
650 }
651
652 #undef pt_element_t
653 #undef guest_walker
654 #undef FNAME
655 #undef PT_BASE_ADDR_MASK
656 #undef PT_INDEX
657 #undef PT_LEVEL_MASK
658 #undef PT_LVL_ADDR_MASK
659 #undef PT_LVL_OFFSET_MASK
660 #undef PT_LEVEL_BITS
661 #undef PT_MAX_FULL_LEVELS
662 #undef gpte_to_gfn
663 #undef gpte_to_gfn_lvl
664 #undef CMPXCHG
This page took 0.045261 seconds and 4 git commands to generate.