KVM: MMU: skip global pgtables on sync due to cr3 switch
[deliverable/linux.git] / arch / x86 / kvm / paging_tmpl.h
CommitLineData
6aa8b732
AK
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * MMU support
8 *
9 * Copyright (C) 2006 Qumranet, Inc.
10 *
11 * Authors:
12 * Yaniv Kamay <yaniv@qumranet.com>
13 * Avi Kivity <avi@qumranet.com>
14 *
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
17 *
18 */
19
20/*
21 * We need the mmu code to access both 32-bit and 64-bit guest ptes,
22 * so the code in this file is compiled twice, once per pte size.
23 */
24
25#if PTTYPE == 64
26 #define pt_element_t u64
27 #define guest_walker guest_walker64
abb9e0b8 28 #define shadow_walker shadow_walker64
6aa8b732
AK
29 #define FNAME(name) paging##64_##name
30 #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
31 #define PT_DIR_BASE_ADDR_MASK PT64_DIR_BASE_ADDR_MASK
32 #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
6aa8b732 33 #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level)
c7addb90 34 #define PT_LEVEL_BITS PT64_LEVEL_BITS
cea0f0e7
AK
35 #ifdef CONFIG_X86_64
36 #define PT_MAX_FULL_LEVELS 4
b3e4e63f 37 #define CMPXCHG cmpxchg
cea0f0e7 38 #else
b3e4e63f 39 #define CMPXCHG cmpxchg64
cea0f0e7
AK
40 #define PT_MAX_FULL_LEVELS 2
41 #endif
6aa8b732
AK
42#elif PTTYPE == 32
43 #define pt_element_t u32
44 #define guest_walker guest_walker32
abb9e0b8 45 #define shadow_walker shadow_walker32
6aa8b732
AK
46 #define FNAME(name) paging##32_##name
47 #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
48 #define PT_DIR_BASE_ADDR_MASK PT32_DIR_BASE_ADDR_MASK
49 #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
6aa8b732 50 #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level)
c7addb90 51 #define PT_LEVEL_BITS PT32_LEVEL_BITS
cea0f0e7 52 #define PT_MAX_FULL_LEVELS 2
b3e4e63f 53 #define CMPXCHG cmpxchg
6aa8b732
AK
54#else
55 #error Invalid PTTYPE value
56#endif
57
5fb07ddb
AK
58#define gpte_to_gfn FNAME(gpte_to_gfn)
59#define gpte_to_gfn_pde FNAME(gpte_to_gfn_pde)
60
6aa8b732
AK
61/*
62 * The guest_walker structure emulates the behavior of the hardware page
63 * table walker.
64 */
65struct guest_walker {
66 int level;
cea0f0e7 67 gfn_t table_gfn[PT_MAX_FULL_LEVELS];
7819026e
MT
68 pt_element_t ptes[PT_MAX_FULL_LEVELS];
69 gpa_t pte_gpa[PT_MAX_FULL_LEVELS];
fe135d2c
AK
70 unsigned pt_access;
71 unsigned pte_access;
815af8d4 72 gfn_t gfn;
7993ba43 73 u32 error_code;
6aa8b732
AK
74};
75
abb9e0b8
AK
76struct shadow_walker {
77 struct kvm_shadow_walk walker;
78 struct guest_walker *guest_walker;
79 int user_fault;
80 int write_fault;
81 int largepage;
82 int *ptwrite;
83 pfn_t pfn;
84 u64 *sptep;
85};
86
5fb07ddb
AK
87static gfn_t gpte_to_gfn(pt_element_t gpte)
88{
89 return (gpte & PT_BASE_ADDR_MASK) >> PAGE_SHIFT;
90}
91
92static gfn_t gpte_to_gfn_pde(pt_element_t gpte)
93{
94 return (gpte & PT_DIR_BASE_ADDR_MASK) >> PAGE_SHIFT;
95}
96
b3e4e63f
MT
97static bool FNAME(cmpxchg_gpte)(struct kvm *kvm,
98 gfn_t table_gfn, unsigned index,
99 pt_element_t orig_pte, pt_element_t new_pte)
100{
101 pt_element_t ret;
102 pt_element_t *table;
103 struct page *page;
104
105 page = gfn_to_page(kvm, table_gfn);
72dc67a6 106
b3e4e63f 107 table = kmap_atomic(page, KM_USER0);
b3e4e63f 108 ret = CMPXCHG(&table[index], orig_pte, new_pte);
b3e4e63f
MT
109 kunmap_atomic(table, KM_USER0);
110
111 kvm_release_page_dirty(page);
112
113 return (ret != orig_pte);
114}
115
bedbe4ee
AK
116static unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, pt_element_t gpte)
117{
118 unsigned access;
119
120 access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK;
121#if PTTYPE == 64
122 if (is_nx(vcpu))
123 access &= ~(gpte >> PT64_NX_SHIFT);
124#endif
125 return access;
126}
127
ac79c978
AK
128/*
129 * Fetch a guest pte for a guest virtual address
130 */
7993ba43
AK
131static int FNAME(walk_addr)(struct guest_walker *walker,
132 struct kvm_vcpu *vcpu, gva_t addr,
73b1087e 133 int write_fault, int user_fault, int fetch_fault)
6aa8b732 134{
42bf3f0a 135 pt_element_t pte;
cea0f0e7 136 gfn_t table_gfn;
fe135d2c 137 unsigned index, pt_access, pte_access;
42bf3f0a 138 gpa_t pte_gpa;
6aa8b732 139
b8688d51 140 pgprintk("%s: addr %lx\n", __func__, addr);
b3e4e63f 141walk:
ad312c7c
ZX
142 walker->level = vcpu->arch.mmu.root_level;
143 pte = vcpu->arch.cr3;
1b0973bd
AK
144#if PTTYPE == 64
145 if (!is_long_mode(vcpu)) {
ad312c7c 146 pte = vcpu->arch.pdptrs[(addr >> 30) & 3];
42bf3f0a 147 if (!is_present_pte(pte))
7993ba43 148 goto not_present;
1b0973bd
AK
149 --walker->level;
150 }
151#endif
a9058ecd 152 ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
24993d53 153 (vcpu->arch.cr3 & CR3_NONPAE_RESERVED_BITS) == 0);
6aa8b732 154
fe135d2c 155 pt_access = ACC_ALL;
ac79c978
AK
156
157 for (;;) {
42bf3f0a 158 index = PT_INDEX(addr, walker->level);
ac79c978 159
5fb07ddb 160 table_gfn = gpte_to_gfn(pte);
1755fbcc 161 pte_gpa = gfn_to_gpa(table_gfn);
ec8d4eae 162 pte_gpa += index * sizeof(pt_element_t);
42bf3f0a 163 walker->table_gfn[walker->level - 1] = table_gfn;
7819026e 164 walker->pte_gpa[walker->level - 1] = pte_gpa;
b8688d51 165 pgprintk("%s: table_gfn[%d] %lx\n", __func__,
42bf3f0a
AK
166 walker->level - 1, table_gfn);
167
ec8d4eae 168 kvm_read_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte));
42bf3f0a
AK
169
170 if (!is_present_pte(pte))
7993ba43
AK
171 goto not_present;
172
42bf3f0a 173 if (write_fault && !is_writeble_pte(pte))
7993ba43
AK
174 if (user_fault || is_write_protection(vcpu))
175 goto access_error;
176
42bf3f0a 177 if (user_fault && !(pte & PT_USER_MASK))
7993ba43
AK
178 goto access_error;
179
73b1087e 180#if PTTYPE == 64
42bf3f0a 181 if (fetch_fault && is_nx(vcpu) && (pte & PT64_NX_MASK))
73b1087e
AK
182 goto access_error;
183#endif
184
42bf3f0a 185 if (!(pte & PT_ACCESSED_MASK)) {
bf3f8e86 186 mark_page_dirty(vcpu->kvm, table_gfn);
b3e4e63f
MT
187 if (FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn,
188 index, pte, pte|PT_ACCESSED_MASK))
189 goto walk;
42bf3f0a 190 pte |= PT_ACCESSED_MASK;
bf3f8e86 191 }
815af8d4 192
bedbe4ee 193 pte_access = pt_access & FNAME(gpte_access)(vcpu, pte);
fe135d2c 194
7819026e
MT
195 walker->ptes[walker->level - 1] = pte;
196
815af8d4 197 if (walker->level == PT_PAGE_TABLE_LEVEL) {
5fb07ddb 198 walker->gfn = gpte_to_gfn(pte);
815af8d4
AK
199 break;
200 }
201
202 if (walker->level == PT_DIRECTORY_LEVEL
42bf3f0a 203 && (pte & PT_PAGE_SIZE_MASK)
815af8d4 204 && (PTTYPE == 64 || is_pse(vcpu))) {
5fb07ddb 205 walker->gfn = gpte_to_gfn_pde(pte);
815af8d4 206 walker->gfn += PT_INDEX(addr, PT_PAGE_TABLE_LEVEL);
da928521
AK
207 if (PTTYPE == 32 && is_cpuid_PSE36())
208 walker->gfn += pse36_gfn_delta(pte);
ac79c978 209 break;
815af8d4 210 }
ac79c978 211
fe135d2c 212 pt_access = pte_access;
ac79c978
AK
213 --walker->level;
214 }
42bf3f0a
AK
215
216 if (write_fault && !is_dirty_pte(pte)) {
b3e4e63f
MT
217 bool ret;
218
42bf3f0a 219 mark_page_dirty(vcpu->kvm, table_gfn);
b3e4e63f
MT
220 ret = FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn, index, pte,
221 pte|PT_DIRTY_MASK);
222 if (ret)
223 goto walk;
42bf3f0a 224 pte |= PT_DIRTY_MASK;
42bf3f0a 225 kvm_mmu_pte_write(vcpu, pte_gpa, (u8 *)&pte, sizeof(pte));
7819026e 226 walker->ptes[walker->level - 1] = pte;
42bf3f0a
AK
227 }
228
fe135d2c
AK
229 walker->pt_access = pt_access;
230 walker->pte_access = pte_access;
231 pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
b8688d51 232 __func__, (u64)pte, pt_access, pte_access);
7993ba43
AK
233 return 1;
234
235not_present:
236 walker->error_code = 0;
237 goto err;
238
239access_error:
240 walker->error_code = PFERR_PRESENT_MASK;
241
242err:
243 if (write_fault)
244 walker->error_code |= PFERR_WRITE_MASK;
245 if (user_fault)
246 walker->error_code |= PFERR_USER_MASK;
73b1087e
AK
247 if (fetch_fault)
248 walker->error_code |= PFERR_FETCH_MASK;
fe551881 249 return 0;
6aa8b732
AK
250}
251
0028425f 252static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
489f1d65 253 u64 *spte, const void *pte)
0028425f
AK
254{
255 pt_element_t gpte;
41074d07 256 unsigned pte_access;
35149e21 257 pfn_t pfn;
05da4558 258 int largepage = vcpu->arch.update_pte.largepage;
0028425f 259
0028425f 260 gpte = *(const pt_element_t *)pte;
c7addb90 261 if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) {
489f1d65 262 if (!is_present_pte(gpte))
c7addb90
AK
263 set_shadow_pte(spte, shadow_notrap_nonpresent_pte);
264 return;
265 }
b8688d51 266 pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
41074d07 267 pte_access = page->role.access & FNAME(gpte_access)(vcpu, gpte);
d7824fff
AK
268 if (gpte_to_gfn(gpte) != vcpu->arch.update_pte.gfn)
269 return;
35149e21
AL
270 pfn = vcpu->arch.update_pte.pfn;
271 if (is_error_pfn(pfn))
d7824fff 272 return;
e930bffe
AA
273 if (mmu_notifier_retry(vcpu, vcpu->arch.update_pte.mmu_seq))
274 return;
35149e21 275 kvm_get_pfn(pfn);
1c4f1fd6 276 mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0,
6cffe8ca
MT
277 gpte & PT_DIRTY_MASK, NULL, largepage,
278 gpte & PT_GLOBAL_MASK, gpte_to_gfn(gpte),
35149e21 279 pfn, true);
0028425f
AK
280}
281
6aa8b732
AK
282/*
283 * Fetch a shadow pte for a specific level in the paging hierarchy.
284 */
abb9e0b8 285static int FNAME(shadow_walk_entry)(struct kvm_shadow_walk *_sw,
d40a1ee4 286 struct kvm_vcpu *vcpu, u64 addr,
abb9e0b8 287 u64 *sptep, int level)
6aa8b732 288{
abb9e0b8
AK
289 struct shadow_walker *sw =
290 container_of(_sw, struct shadow_walker, walker);
291 struct guest_walker *gw = sw->guest_walker;
292 unsigned access = gw->pt_access;
293 struct kvm_mmu_page *shadow_page;
294 u64 spte;
295 int metaphysical;
296 gfn_t table_gfn;
297 int r;
298 pt_element_t curr_pte;
299
300 if (level == PT_PAGE_TABLE_LEVEL
301 || (sw->largepage && level == PT_DIRECTORY_LEVEL)) {
302 mmu_set_spte(vcpu, sptep, access, gw->pte_access & access,
303 sw->user_fault, sw->write_fault,
304 gw->ptes[gw->level-1] & PT_DIRTY_MASK,
6cffe8ca
MT
305 sw->ptwrite, sw->largepage,
306 gw->ptes[gw->level-1] & PT_GLOBAL_MASK,
307 gw->gfn, sw->pfn, false);
abb9e0b8
AK
308 sw->sptep = sptep;
309 return 1;
aef3d3fe 310 }
6aa8b732 311
abb9e0b8
AK
312 if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep))
313 return 0;
6aa8b732 314
93a423e7
MT
315 if (is_large_pte(*sptep)) {
316 set_shadow_pte(sptep, shadow_trap_nonpresent_pte);
317 kvm_flush_remote_tlbs(vcpu->kvm);
abb9e0b8 318 rmap_remove(vcpu->kvm, sptep);
93a423e7 319 }
abb9e0b8
AK
320
321 if (level == PT_DIRECTORY_LEVEL && gw->level == PT_DIRECTORY_LEVEL) {
322 metaphysical = 1;
323 if (!is_dirty_pte(gw->ptes[level - 1]))
324 access &= ~ACC_WRITE_MASK;
325 table_gfn = gpte_to_gfn(gw->ptes[level - 1]);
326 } else {
327 metaphysical = 0;
328 table_gfn = gw->table_gfn[level - 2];
329 }
d40a1ee4 330 shadow_page = kvm_mmu_get_page(vcpu, table_gfn, (gva_t)addr, level-1,
abb9e0b8
AK
331 metaphysical, access, sptep);
332 if (!metaphysical) {
333 r = kvm_read_guest_atomic(vcpu->kvm, gw->pte_gpa[level - 2],
334 &curr_pte, sizeof(curr_pte));
335 if (r || curr_pte != gw->ptes[level - 2]) {
6c475352 336 kvm_mmu_put_page(shadow_page, sptep);
abb9e0b8
AK
337 kvm_release_pfn_clean(sw->pfn);
338 sw->sptep = NULL;
339 return 1;
7819026e 340 }
6aa8b732 341 }
ef0197e8 342
abb9e0b8
AK
343 spte = __pa(shadow_page->spt) | PT_PRESENT_MASK | PT_ACCESSED_MASK
344 | PT_WRITABLE_MASK | PT_USER_MASK;
345 *sptep = spte;
346 return 0;
347}
348
349static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
350 struct guest_walker *guest_walker,
351 int user_fault, int write_fault, int largepage,
352 int *ptwrite, pfn_t pfn)
353{
354 struct shadow_walker walker = {
355 .walker = { .entry = FNAME(shadow_walk_entry), },
356 .guest_walker = guest_walker,
357 .user_fault = user_fault,
358 .write_fault = write_fault,
359 .largepage = largepage,
360 .ptwrite = ptwrite,
361 .pfn = pfn,
362 };
363
364 if (!is_present_pte(guest_walker->ptes[guest_walker->level - 1]))
365 return NULL;
366
367 walk_shadow(&walker.walker, vcpu, addr);
050e6499 368
abb9e0b8 369 return walker.sptep;
6aa8b732
AK
370}
371
6aa8b732
AK
372/*
373 * Page fault handler. There are several causes for a page fault:
374 * - there is no shadow pte for the guest pte
375 * - write access through a shadow pte marked read only so that we can set
376 * the dirty bit
377 * - write access to a shadow pte marked read only so we can update the page
378 * dirty bitmap, when userspace requests it
379 * - mmio access; in this case we will never install a present shadow pte
380 * - normal guest page fault due to the guest pte marked not present, not
381 * writable, or not executable
382 *
e2dec939
AK
383 * Returns: 1 if we need to emulate the instruction, 0 otherwise, or
384 * a negative value on error.
6aa8b732
AK
385 */
386static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
387 u32 error_code)
388{
389 int write_fault = error_code & PFERR_WRITE_MASK;
6aa8b732 390 int user_fault = error_code & PFERR_USER_MASK;
73b1087e 391 int fetch_fault = error_code & PFERR_FETCH_MASK;
6aa8b732
AK
392 struct guest_walker walker;
393 u64 *shadow_pte;
cea0f0e7 394 int write_pt = 0;
e2dec939 395 int r;
35149e21 396 pfn_t pfn;
05da4558 397 int largepage = 0;
e930bffe 398 unsigned long mmu_seq;
6aa8b732 399
b8688d51 400 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
37a7d8b0 401 kvm_mmu_audit(vcpu, "pre page fault");
714b93da 402
e2dec939
AK
403 r = mmu_topup_memory_caches(vcpu);
404 if (r)
405 return r;
714b93da 406
6aa8b732
AK
407 /*
408 * Look up the shadow pte for the faulting address.
409 */
73b1087e
AK
410 r = FNAME(walk_addr)(&walker, vcpu, addr, write_fault, user_fault,
411 fetch_fault);
6aa8b732
AK
412
413 /*
414 * The page is not mapped by the guest. Let the guest handle it.
415 */
7993ba43 416 if (!r) {
b8688d51 417 pgprintk("%s: guest page fault\n", __func__);
7993ba43 418 inject_page_fault(vcpu, addr, walker.error_code);
ad312c7c 419 vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
6aa8b732
AK
420 return 0;
421 }
422
05da4558
MT
423 if (walker.level == PT_DIRECTORY_LEVEL) {
424 gfn_t large_gfn;
425 large_gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE-1);
426 if (is_largepage_backed(vcpu, large_gfn)) {
427 walker.gfn = large_gfn;
428 largepage = 1;
429 }
430 }
e930bffe 431 mmu_seq = vcpu->kvm->mmu_notifier_seq;
4c2155ce 432 smp_rmb();
35149e21 433 pfn = gfn_to_pfn(vcpu->kvm, walker.gfn);
d7824fff 434
d196e343 435 /* mmio */
35149e21 436 if (is_error_pfn(pfn)) {
ebb0e626 437 pgprintk("gfn %lx is mmio\n", walker.gfn);
35149e21 438 kvm_release_pfn_clean(pfn);
d196e343
AK
439 return 1;
440 }
441
aaee2c94 442 spin_lock(&vcpu->kvm->mmu_lock);
e930bffe
AA
443 if (mmu_notifier_retry(vcpu, mmu_seq))
444 goto out_unlock;
eb787d10 445 kvm_mmu_free_some_pages(vcpu);
97a0a01e 446 shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
35149e21 447 largepage, &write_pt, pfn);
05da4558 448
b8688d51 449 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__,
97a0a01e 450 shadow_pte, *shadow_pte, write_pt);
cea0f0e7 451
a25f7e1f 452 if (!write_pt)
ad312c7c 453 vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
a25f7e1f 454
1165f5fe 455 ++vcpu->stat.pf_fixed;
37a7d8b0 456 kvm_mmu_audit(vcpu, "post page fault (fixed)");
aaee2c94 457 spin_unlock(&vcpu->kvm->mmu_lock);
6aa8b732 458
cea0f0e7 459 return write_pt;
e930bffe
AA
460
461out_unlock:
462 spin_unlock(&vcpu->kvm->mmu_lock);
463 kvm_release_pfn_clean(pfn);
464 return 0;
6aa8b732
AK
465}
466
a7052897
MT
467static int FNAME(shadow_invlpg_entry)(struct kvm_shadow_walk *_sw,
468 struct kvm_vcpu *vcpu, u64 addr,
469 u64 *sptep, int level)
470{
471
472 if (level == PT_PAGE_TABLE_LEVEL) {
473 if (is_shadow_present_pte(*sptep))
474 rmap_remove(vcpu->kvm, sptep);
475 set_shadow_pte(sptep, shadow_trap_nonpresent_pte);
476 return 1;
477 }
478 if (!is_shadow_present_pte(*sptep))
479 return 1;
480 return 0;
481}
482
483static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
484{
485 struct shadow_walker walker = {
486 .walker = { .entry = FNAME(shadow_invlpg_entry), },
487 };
488
489 walk_shadow(&walker.walker, vcpu, gva);
490}
491
6aa8b732
AK
492static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
493{
494 struct guest_walker walker;
e119d117
AK
495 gpa_t gpa = UNMAPPED_GVA;
496 int r;
6aa8b732 497
e119d117 498 r = FNAME(walk_addr)(&walker, vcpu, vaddr, 0, 0, 0);
6aa8b732 499
e119d117 500 if (r) {
1755fbcc 501 gpa = gfn_to_gpa(walker.gfn);
e119d117 502 gpa |= vaddr & ~PAGE_MASK;
6aa8b732
AK
503 }
504
505 return gpa;
506}
507
c7addb90
AK
508static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
509 struct kvm_mmu_page *sp)
510{
eab9f71f
AK
511 int i, j, offset, r;
512 pt_element_t pt[256 / sizeof(pt_element_t)];
513 gpa_t pte_gpa;
c7addb90 514
e5a4c8ca
AK
515 if (sp->role.metaphysical
516 || (PTTYPE == 32 && sp->role.level > PT_PAGE_TABLE_LEVEL)) {
c7addb90
AK
517 nonpaging_prefetch_page(vcpu, sp);
518 return;
519 }
520
eab9f71f
AK
521 pte_gpa = gfn_to_gpa(sp->gfn);
522 if (PTTYPE == 32) {
e5a4c8ca 523 offset = sp->role.quadrant << PT64_LEVEL_BITS;
eab9f71f
AK
524 pte_gpa += offset * sizeof(pt_element_t);
525 }
7ec54588 526
eab9f71f
AK
527 for (i = 0; i < PT64_ENT_PER_PAGE; i += ARRAY_SIZE(pt)) {
528 r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa, pt, sizeof pt);
529 pte_gpa += ARRAY_SIZE(pt) * sizeof(pt_element_t);
530 for (j = 0; j < ARRAY_SIZE(pt); ++j)
531 if (r || is_present_pte(pt[j]))
532 sp->spt[i+j] = shadow_trap_nonpresent_pte;
533 else
534 sp->spt[i+j] = shadow_notrap_nonpresent_pte;
7ec54588 535 }
c7addb90
AK
536}
537
e8bc217a
MT
538/*
539 * Using the cached information from sp->gfns is safe because:
540 * - The spte has a reference to the struct page, so the pfn for a given gfn
541 * can't change unless all sptes pointing to it are nuked first.
542 * - Alias changes zap the entire shadow cache.
543 */
544static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
545{
546 int i, offset, nr_present;
547
548 offset = nr_present = 0;
549
550 if (PTTYPE == 32)
551 offset = sp->role.quadrant << PT64_LEVEL_BITS;
552
553 for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
554 unsigned pte_access;
555 pt_element_t gpte;
556 gpa_t pte_gpa;
557 gfn_t gfn = sp->gfns[i];
558
559 if (!is_shadow_present_pte(sp->spt[i]))
560 continue;
561
562 pte_gpa = gfn_to_gpa(sp->gfn);
563 pte_gpa += (i+offset) * sizeof(pt_element_t);
564
565 if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
566 sizeof(pt_element_t)))
567 return -EINVAL;
568
569 if (gpte_to_gfn(gpte) != gfn || !is_present_pte(gpte) ||
570 !(gpte & PT_ACCESSED_MASK)) {
571 u64 nonpresent;
572
573 rmap_remove(vcpu->kvm, &sp->spt[i]);
574 if (is_present_pte(gpte))
575 nonpresent = shadow_trap_nonpresent_pte;
576 else
577 nonpresent = shadow_notrap_nonpresent_pte;
578 set_shadow_pte(&sp->spt[i], nonpresent);
579 continue;
580 }
581
582 nr_present++;
583 pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
584 set_spte(vcpu, &sp->spt[i], pte_access, 0, 0,
6cffe8ca 585 is_dirty_pte(gpte), 0, gpte & PT_GLOBAL_MASK, gfn,
4731d4c7 586 spte_to_pfn(sp->spt[i]), true, false);
e8bc217a
MT
587 }
588
589 return !nr_present;
590}
591
6aa8b732
AK
592#undef pt_element_t
593#undef guest_walker
abb9e0b8 594#undef shadow_walker
6aa8b732
AK
595#undef FNAME
596#undef PT_BASE_ADDR_MASK
597#undef PT_INDEX
6aa8b732 598#undef PT_LEVEL_MASK
6aa8b732 599#undef PT_DIR_BASE_ADDR_MASK
c7addb90 600#undef PT_LEVEL_BITS
cea0f0e7 601#undef PT_MAX_FULL_LEVELS
5fb07ddb
AK
602#undef gpte_to_gfn
603#undef gpte_to_gfn_pde
b3e4e63f 604#undef CMPXCHG
This page took 0.29823 seconds and 5 git commands to generate.