Revert "KVM: x86: check for cr3 validity in ioctl_set_sregs"
[deliverable/linux.git] / arch / x86 / kvm / paging_tmpl.h
CommitLineData
6aa8b732
AK
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * MMU support
8 *
9 * Copyright (C) 2006 Qumranet, Inc.
10 *
11 * Authors:
12 * Yaniv Kamay <yaniv@qumranet.com>
13 * Avi Kivity <avi@qumranet.com>
14 *
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
17 *
18 */
19
20/*
21 * We need the mmu code to access both 32-bit and 64-bit guest ptes,
22 * so the code in this file is compiled twice, once per pte size.
23 */
24
25#if PTTYPE == 64
26 #define pt_element_t u64
27 #define guest_walker guest_walker64
28 #define FNAME(name) paging##64_##name
29 #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
30 #define PT_DIR_BASE_ADDR_MASK PT64_DIR_BASE_ADDR_MASK
31 #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
6aa8b732 32 #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level)
c7addb90 33 #define PT_LEVEL_BITS PT64_LEVEL_BITS
cea0f0e7
AK
34 #ifdef CONFIG_X86_64
35 #define PT_MAX_FULL_LEVELS 4
b3e4e63f 36 #define CMPXCHG cmpxchg
cea0f0e7 37 #else
b3e4e63f 38 #define CMPXCHG cmpxchg64
cea0f0e7
AK
39 #define PT_MAX_FULL_LEVELS 2
40 #endif
6aa8b732
AK
41#elif PTTYPE == 32
42 #define pt_element_t u32
43 #define guest_walker guest_walker32
44 #define FNAME(name) paging##32_##name
45 #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
46 #define PT_DIR_BASE_ADDR_MASK PT32_DIR_BASE_ADDR_MASK
47 #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
6aa8b732 48 #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level)
c7addb90 49 #define PT_LEVEL_BITS PT32_LEVEL_BITS
cea0f0e7 50 #define PT_MAX_FULL_LEVELS 2
b3e4e63f 51 #define CMPXCHG cmpxchg
6aa8b732
AK
52#else
53 #error Invalid PTTYPE value
54#endif
55
5fb07ddb
AK
56#define gpte_to_gfn FNAME(gpte_to_gfn)
57#define gpte_to_gfn_pde FNAME(gpte_to_gfn_pde)
58
6aa8b732
AK
59/*
60 * The guest_walker structure emulates the behavior of the hardware page
61 * table walker.
62 */
63struct guest_walker {
64 int level;
cea0f0e7 65 gfn_t table_gfn[PT_MAX_FULL_LEVELS];
7819026e
MT
66 pt_element_t ptes[PT_MAX_FULL_LEVELS];
67 gpa_t pte_gpa[PT_MAX_FULL_LEVELS];
fe135d2c
AK
68 unsigned pt_access;
69 unsigned pte_access;
815af8d4 70 gfn_t gfn;
7993ba43 71 u32 error_code;
6aa8b732
AK
72};
73
5fb07ddb
AK
74static gfn_t gpte_to_gfn(pt_element_t gpte)
75{
76 return (gpte & PT_BASE_ADDR_MASK) >> PAGE_SHIFT;
77}
78
79static gfn_t gpte_to_gfn_pde(pt_element_t gpte)
80{
81 return (gpte & PT_DIR_BASE_ADDR_MASK) >> PAGE_SHIFT;
82}
83
b3e4e63f
MT
84static bool FNAME(cmpxchg_gpte)(struct kvm *kvm,
85 gfn_t table_gfn, unsigned index,
86 pt_element_t orig_pte, pt_element_t new_pte)
87{
88 pt_element_t ret;
89 pt_element_t *table;
90 struct page *page;
91
92 page = gfn_to_page(kvm, table_gfn);
72dc67a6 93
b3e4e63f 94 table = kmap_atomic(page, KM_USER0);
b3e4e63f 95 ret = CMPXCHG(&table[index], orig_pte, new_pte);
b3e4e63f
MT
96 kunmap_atomic(table, KM_USER0);
97
98 kvm_release_page_dirty(page);
99
100 return (ret != orig_pte);
101}
102
bedbe4ee
AK
103static unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, pt_element_t gpte)
104{
105 unsigned access;
106
107 access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK;
108#if PTTYPE == 64
109 if (is_nx(vcpu))
110 access &= ~(gpte >> PT64_NX_SHIFT);
111#endif
112 return access;
113}
114
ac79c978
AK
115/*
116 * Fetch a guest pte for a guest virtual address
117 */
7993ba43
AK
118static int FNAME(walk_addr)(struct guest_walker *walker,
119 struct kvm_vcpu *vcpu, gva_t addr,
73b1087e 120 int write_fault, int user_fault, int fetch_fault)
6aa8b732 121{
42bf3f0a 122 pt_element_t pte;
cea0f0e7 123 gfn_t table_gfn;
fe135d2c 124 unsigned index, pt_access, pte_access;
42bf3f0a 125 gpa_t pte_gpa;
82725b20 126 int rsvd_fault = 0;
6aa8b732 127
b8688d51 128 pgprintk("%s: addr %lx\n", __func__, addr);
b3e4e63f 129walk:
ad312c7c
ZX
130 walker->level = vcpu->arch.mmu.root_level;
131 pte = vcpu->arch.cr3;
1b0973bd
AK
132#if PTTYPE == 64
133 if (!is_long_mode(vcpu)) {
6de4f3ad 134 pte = kvm_pdptr_read(vcpu, (addr >> 30) & 3);
43a3795a 135 if (!is_present_gpte(pte))
7993ba43 136 goto not_present;
1b0973bd
AK
137 --walker->level;
138 }
139#endif
a9058ecd 140 ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
24993d53 141 (vcpu->arch.cr3 & CR3_NONPAE_RESERVED_BITS) == 0);
6aa8b732 142
fe135d2c 143 pt_access = ACC_ALL;
ac79c978
AK
144
145 for (;;) {
42bf3f0a 146 index = PT_INDEX(addr, walker->level);
ac79c978 147
5fb07ddb 148 table_gfn = gpte_to_gfn(pte);
1755fbcc 149 pte_gpa = gfn_to_gpa(table_gfn);
ec8d4eae 150 pte_gpa += index * sizeof(pt_element_t);
42bf3f0a 151 walker->table_gfn[walker->level - 1] = table_gfn;
7819026e 152 walker->pte_gpa[walker->level - 1] = pte_gpa;
b8688d51 153 pgprintk("%s: table_gfn[%d] %lx\n", __func__,
42bf3f0a
AK
154 walker->level - 1, table_gfn);
155
ec8d4eae 156 kvm_read_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte));
42bf3f0a 157
43a3795a 158 if (!is_present_gpte(pte))
7993ba43
AK
159 goto not_present;
160
82725b20
DE
161 rsvd_fault = is_rsvd_bits_set(vcpu, pte, walker->level);
162 if (rsvd_fault)
163 goto access_error;
164
42bf3f0a 165 if (write_fault && !is_writeble_pte(pte))
7993ba43
AK
166 if (user_fault || is_write_protection(vcpu))
167 goto access_error;
168
42bf3f0a 169 if (user_fault && !(pte & PT_USER_MASK))
7993ba43
AK
170 goto access_error;
171
73b1087e 172#if PTTYPE == 64
42bf3f0a 173 if (fetch_fault && is_nx(vcpu) && (pte & PT64_NX_MASK))
73b1087e
AK
174 goto access_error;
175#endif
176
42bf3f0a 177 if (!(pte & PT_ACCESSED_MASK)) {
bf3f8e86 178 mark_page_dirty(vcpu->kvm, table_gfn);
b3e4e63f
MT
179 if (FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn,
180 index, pte, pte|PT_ACCESSED_MASK))
181 goto walk;
42bf3f0a 182 pte |= PT_ACCESSED_MASK;
bf3f8e86 183 }
815af8d4 184
bedbe4ee 185 pte_access = pt_access & FNAME(gpte_access)(vcpu, pte);
fe135d2c 186
7819026e
MT
187 walker->ptes[walker->level - 1] = pte;
188
815af8d4 189 if (walker->level == PT_PAGE_TABLE_LEVEL) {
5fb07ddb 190 walker->gfn = gpte_to_gfn(pte);
815af8d4
AK
191 break;
192 }
193
194 if (walker->level == PT_DIRECTORY_LEVEL
42bf3f0a 195 && (pte & PT_PAGE_SIZE_MASK)
815af8d4 196 && (PTTYPE == 64 || is_pse(vcpu))) {
5fb07ddb 197 walker->gfn = gpte_to_gfn_pde(pte);
815af8d4 198 walker->gfn += PT_INDEX(addr, PT_PAGE_TABLE_LEVEL);
da928521
AK
199 if (PTTYPE == 32 && is_cpuid_PSE36())
200 walker->gfn += pse36_gfn_delta(pte);
ac79c978 201 break;
815af8d4 202 }
ac79c978 203
fe135d2c 204 pt_access = pte_access;
ac79c978
AK
205 --walker->level;
206 }
42bf3f0a 207
43a3795a 208 if (write_fault && !is_dirty_gpte(pte)) {
b3e4e63f
MT
209 bool ret;
210
42bf3f0a 211 mark_page_dirty(vcpu->kvm, table_gfn);
b3e4e63f
MT
212 ret = FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn, index, pte,
213 pte|PT_DIRTY_MASK);
214 if (ret)
215 goto walk;
42bf3f0a 216 pte |= PT_DIRTY_MASK;
7819026e 217 walker->ptes[walker->level - 1] = pte;
42bf3f0a
AK
218 }
219
fe135d2c
AK
220 walker->pt_access = pt_access;
221 walker->pte_access = pte_access;
222 pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
b8688d51 223 __func__, (u64)pte, pt_access, pte_access);
7993ba43
AK
224 return 1;
225
226not_present:
227 walker->error_code = 0;
228 goto err;
229
230access_error:
231 walker->error_code = PFERR_PRESENT_MASK;
232
233err:
234 if (write_fault)
235 walker->error_code |= PFERR_WRITE_MASK;
236 if (user_fault)
237 walker->error_code |= PFERR_USER_MASK;
73b1087e
AK
238 if (fetch_fault)
239 walker->error_code |= PFERR_FETCH_MASK;
82725b20
DE
240 if (rsvd_fault)
241 walker->error_code |= PFERR_RSVD_MASK;
fe551881 242 return 0;
6aa8b732
AK
243}
244
0028425f 245static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
489f1d65 246 u64 *spte, const void *pte)
0028425f
AK
247{
248 pt_element_t gpte;
41074d07 249 unsigned pte_access;
35149e21 250 pfn_t pfn;
05da4558 251 int largepage = vcpu->arch.update_pte.largepage;
0028425f 252
0028425f 253 gpte = *(const pt_element_t *)pte;
c7addb90 254 if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) {
43a3795a 255 if (!is_present_gpte(gpte))
d555c333 256 __set_spte(spte, shadow_notrap_nonpresent_pte);
c7addb90
AK
257 return;
258 }
b8688d51 259 pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
41074d07 260 pte_access = page->role.access & FNAME(gpte_access)(vcpu, gpte);
d7824fff
AK
261 if (gpte_to_gfn(gpte) != vcpu->arch.update_pte.gfn)
262 return;
35149e21
AL
263 pfn = vcpu->arch.update_pte.pfn;
264 if (is_error_pfn(pfn))
d7824fff 265 return;
e930bffe
AA
266 if (mmu_notifier_retry(vcpu, vcpu->arch.update_pte.mmu_seq))
267 return;
35149e21 268 kvm_get_pfn(pfn);
1c4f1fd6 269 mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0,
6cffe8ca 270 gpte & PT_DIRTY_MASK, NULL, largepage,
c2d0ee46 271 gpte_to_gfn(gpte), pfn, true);
0028425f
AK
272}
273
6aa8b732
AK
274/*
275 * Fetch a shadow pte for a specific level in the paging hierarchy.
276 */
e7a04c99
AK
277static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
278 struct guest_walker *gw,
279 int user_fault, int write_fault, int largepage,
280 int *ptwrite, pfn_t pfn)
6aa8b732 281{
abb9e0b8
AK
282 unsigned access = gw->pt_access;
283 struct kvm_mmu_page *shadow_page;
bde89223 284 u64 spte, *sptep = NULL;
f6e2c02b 285 int direct;
abb9e0b8
AK
286 gfn_t table_gfn;
287 int r;
e7a04c99 288 int level;
abb9e0b8 289 pt_element_t curr_pte;
e7a04c99 290 struct kvm_shadow_walk_iterator iterator;
abb9e0b8 291
43a3795a 292 if (!is_present_gpte(gw->ptes[gw->level - 1]))
e7a04c99 293 return NULL;
6aa8b732 294
e7a04c99
AK
295 for_each_shadow_entry(vcpu, addr, iterator) {
296 level = iterator.level;
297 sptep = iterator.sptep;
298 if (level == PT_PAGE_TABLE_LEVEL
299 || (largepage && level == PT_DIRECTORY_LEVEL)) {
300 mmu_set_spte(vcpu, sptep, access,
301 gw->pte_access & access,
302 user_fault, write_fault,
303 gw->ptes[gw->level-1] & PT_DIRTY_MASK,
304 ptwrite, largepage,
e7a04c99
AK
305 gw->gfn, pfn, false);
306 break;
307 }
6aa8b732 308
e7a04c99
AK
309 if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep))
310 continue;
abb9e0b8 311
e7a04c99 312 if (is_large_pte(*sptep)) {
c5bc2242 313 rmap_remove(vcpu->kvm, sptep);
d555c333 314 __set_spte(sptep, shadow_trap_nonpresent_pte);
e7a04c99 315 kvm_flush_remote_tlbs(vcpu->kvm);
7819026e 316 }
ef0197e8 317
e7a04c99
AK
318 if (level == PT_DIRECTORY_LEVEL
319 && gw->level == PT_DIRECTORY_LEVEL) {
f6e2c02b 320 direct = 1;
43a3795a 321 if (!is_dirty_gpte(gw->ptes[level - 1]))
e7a04c99
AK
322 access &= ~ACC_WRITE_MASK;
323 table_gfn = gpte_to_gfn(gw->ptes[level - 1]);
324 } else {
f6e2c02b 325 direct = 0;
e7a04c99
AK
326 table_gfn = gw->table_gfn[level - 2];
327 }
328 shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
f6e2c02b
AK
329 direct, access, sptep);
330 if (!direct) {
e7a04c99
AK
331 r = kvm_read_guest_atomic(vcpu->kvm,
332 gw->pte_gpa[level - 2],
333 &curr_pte, sizeof(curr_pte));
334 if (r || curr_pte != gw->ptes[level - 2]) {
335 kvm_mmu_put_page(shadow_page, sptep);
336 kvm_release_pfn_clean(pfn);
337 sptep = NULL;
338 break;
339 }
340 }
abb9e0b8 341
e7a04c99
AK
342 spte = __pa(shadow_page->spt)
343 | PT_PRESENT_MASK | PT_ACCESSED_MASK
344 | PT_WRITABLE_MASK | PT_USER_MASK;
345 *sptep = spte;
346 }
050e6499 347
e7a04c99 348 return sptep;
6aa8b732
AK
349}
350
6aa8b732
AK
351/*
352 * Page fault handler. There are several causes for a page fault:
353 * - there is no shadow pte for the guest pte
354 * - write access through a shadow pte marked read only so that we can set
355 * the dirty bit
356 * - write access to a shadow pte marked read only so we can update the page
357 * dirty bitmap, when userspace requests it
358 * - mmio access; in this case we will never install a present shadow pte
359 * - normal guest page fault due to the guest pte marked not present, not
360 * writable, or not executable
361 *
e2dec939
AK
362 * Returns: 1 if we need to emulate the instruction, 0 otherwise, or
363 * a negative value on error.
6aa8b732
AK
364 */
365static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
366 u32 error_code)
367{
368 int write_fault = error_code & PFERR_WRITE_MASK;
6aa8b732 369 int user_fault = error_code & PFERR_USER_MASK;
73b1087e 370 int fetch_fault = error_code & PFERR_FETCH_MASK;
6aa8b732 371 struct guest_walker walker;
d555c333 372 u64 *sptep;
cea0f0e7 373 int write_pt = 0;
e2dec939 374 int r;
35149e21 375 pfn_t pfn;
05da4558 376 int largepage = 0;
e930bffe 377 unsigned long mmu_seq;
6aa8b732 378
b8688d51 379 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
37a7d8b0 380 kvm_mmu_audit(vcpu, "pre page fault");
714b93da 381
e2dec939
AK
382 r = mmu_topup_memory_caches(vcpu);
383 if (r)
384 return r;
714b93da 385
6aa8b732 386 /*
a8b876b1 387 * Look up the guest pte for the faulting address.
6aa8b732 388 */
73b1087e
AK
389 r = FNAME(walk_addr)(&walker, vcpu, addr, write_fault, user_fault,
390 fetch_fault);
6aa8b732
AK
391
392 /*
393 * The page is not mapped by the guest. Let the guest handle it.
394 */
7993ba43 395 if (!r) {
b8688d51 396 pgprintk("%s: guest page fault\n", __func__);
7993ba43 397 inject_page_fault(vcpu, addr, walker.error_code);
ad312c7c 398 vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
6aa8b732
AK
399 return 0;
400 }
401
05da4558
MT
402 if (walker.level == PT_DIRECTORY_LEVEL) {
403 gfn_t large_gfn;
ec04b260
JR
404 large_gfn = walker.gfn &
405 ~(KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL) - 1);
05da4558
MT
406 if (is_largepage_backed(vcpu, large_gfn)) {
407 walker.gfn = large_gfn;
408 largepage = 1;
409 }
410 }
e930bffe 411 mmu_seq = vcpu->kvm->mmu_notifier_seq;
4c2155ce 412 smp_rmb();
35149e21 413 pfn = gfn_to_pfn(vcpu->kvm, walker.gfn);
d7824fff 414
d196e343 415 /* mmio */
35149e21 416 if (is_error_pfn(pfn)) {
ebb0e626 417 pgprintk("gfn %lx is mmio\n", walker.gfn);
35149e21 418 kvm_release_pfn_clean(pfn);
d196e343
AK
419 return 1;
420 }
421
aaee2c94 422 spin_lock(&vcpu->kvm->mmu_lock);
e930bffe
AA
423 if (mmu_notifier_retry(vcpu, mmu_seq))
424 goto out_unlock;
eb787d10 425 kvm_mmu_free_some_pages(vcpu);
d555c333
AK
426 sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
427 largepage, &write_pt, pfn);
05da4558 428
b8688d51 429 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__,
d555c333 430 sptep, *sptep, write_pt);
cea0f0e7 431
a25f7e1f 432 if (!write_pt)
ad312c7c 433 vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
a25f7e1f 434
1165f5fe 435 ++vcpu->stat.pf_fixed;
37a7d8b0 436 kvm_mmu_audit(vcpu, "post page fault (fixed)");
aaee2c94 437 spin_unlock(&vcpu->kvm->mmu_lock);
6aa8b732 438
cea0f0e7 439 return write_pt;
e930bffe
AA
440
441out_unlock:
442 spin_unlock(&vcpu->kvm->mmu_lock);
443 kvm_release_pfn_clean(pfn);
444 return 0;
6aa8b732
AK
445}
446
a461930b 447static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
a7052897 448{
a461930b
AK
449 struct kvm_shadow_walk_iterator iterator;
450 pt_element_t gpte;
451 gpa_t pte_gpa = -1;
452 int level;
453 u64 *sptep;
4539b358 454 int need_flush = 0;
a461930b
AK
455
456 spin_lock(&vcpu->kvm->mmu_lock);
a7052897 457
a461930b
AK
458 for_each_shadow_entry(vcpu, gva, iterator) {
459 level = iterator.level;
460 sptep = iterator.sptep;
ad218f85 461
a461930b
AK
462 /* FIXME: properly handle invlpg on large guest pages */
463 if (level == PT_PAGE_TABLE_LEVEL ||
464 ((level == PT_DIRECTORY_LEVEL) && is_large_pte(*sptep))) {
465 struct kvm_mmu_page *sp = page_header(__pa(sptep));
ad218f85 466
a461930b
AK
467 pte_gpa = (sp->gfn << PAGE_SHIFT);
468 pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
469
470 if (is_shadow_present_pte(*sptep)) {
471 rmap_remove(vcpu->kvm, sptep);
472 if (is_large_pte(*sptep))
473 --vcpu->kvm->stat.lpages;
4539b358 474 need_flush = 1;
a461930b 475 }
d555c333 476 __set_spte(sptep, shadow_trap_nonpresent_pte);
a461930b 477 break;
87917239 478 }
a7052897 479
a461930b
AK
480 if (!is_shadow_present_pte(*sptep))
481 break;
482 }
a7052897 483
4539b358
AA
484 if (need_flush)
485 kvm_flush_remote_tlbs(vcpu->kvm);
ad218f85 486 spin_unlock(&vcpu->kvm->mmu_lock);
a461930b
AK
487
488 if (pte_gpa == -1)
ad218f85 489 return;
a461930b 490 if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
ad218f85
MT
491 sizeof(pt_element_t)))
492 return;
43a3795a 493 if (is_present_gpte(gpte) && (gpte & PT_ACCESSED_MASK)) {
ad218f85
MT
494 if (mmu_topup_memory_caches(vcpu))
495 return;
a461930b 496 kvm_mmu_pte_write(vcpu, pte_gpa, (const u8 *)&gpte,
ad218f85
MT
497 sizeof(pt_element_t), 0);
498 }
a7052897
MT
499}
500
6aa8b732
AK
501static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
502{
503 struct guest_walker walker;
e119d117
AK
504 gpa_t gpa = UNMAPPED_GVA;
505 int r;
6aa8b732 506
e119d117 507 r = FNAME(walk_addr)(&walker, vcpu, vaddr, 0, 0, 0);
6aa8b732 508
e119d117 509 if (r) {
1755fbcc 510 gpa = gfn_to_gpa(walker.gfn);
e119d117 511 gpa |= vaddr & ~PAGE_MASK;
6aa8b732
AK
512 }
513
514 return gpa;
515}
516
c7addb90
AK
517static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
518 struct kvm_mmu_page *sp)
519{
eab9f71f
AK
520 int i, j, offset, r;
521 pt_element_t pt[256 / sizeof(pt_element_t)];
522 gpa_t pte_gpa;
c7addb90 523
f6e2c02b 524 if (sp->role.direct
e5a4c8ca 525 || (PTTYPE == 32 && sp->role.level > PT_PAGE_TABLE_LEVEL)) {
c7addb90
AK
526 nonpaging_prefetch_page(vcpu, sp);
527 return;
528 }
529
eab9f71f
AK
530 pte_gpa = gfn_to_gpa(sp->gfn);
531 if (PTTYPE == 32) {
e5a4c8ca 532 offset = sp->role.quadrant << PT64_LEVEL_BITS;
eab9f71f
AK
533 pte_gpa += offset * sizeof(pt_element_t);
534 }
7ec54588 535
eab9f71f
AK
536 for (i = 0; i < PT64_ENT_PER_PAGE; i += ARRAY_SIZE(pt)) {
537 r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa, pt, sizeof pt);
538 pte_gpa += ARRAY_SIZE(pt) * sizeof(pt_element_t);
539 for (j = 0; j < ARRAY_SIZE(pt); ++j)
43a3795a 540 if (r || is_present_gpte(pt[j]))
eab9f71f
AK
541 sp->spt[i+j] = shadow_trap_nonpresent_pte;
542 else
543 sp->spt[i+j] = shadow_notrap_nonpresent_pte;
7ec54588 544 }
c7addb90
AK
545}
546
e8bc217a
MT
547/*
548 * Using the cached information from sp->gfns is safe because:
549 * - The spte has a reference to the struct page, so the pfn for a given gfn
550 * can't change unless all sptes pointing to it are nuked first.
551 * - Alias changes zap the entire shadow cache.
552 */
553static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
554{
555 int i, offset, nr_present;
556
557 offset = nr_present = 0;
558
559 if (PTTYPE == 32)
560 offset = sp->role.quadrant << PT64_LEVEL_BITS;
561
562 for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
563 unsigned pte_access;
564 pt_element_t gpte;
565 gpa_t pte_gpa;
566 gfn_t gfn = sp->gfns[i];
567
568 if (!is_shadow_present_pte(sp->spt[i]))
569 continue;
570
571 pte_gpa = gfn_to_gpa(sp->gfn);
572 pte_gpa += (i+offset) * sizeof(pt_element_t);
573
574 if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
575 sizeof(pt_element_t)))
576 return -EINVAL;
577
43a3795a 578 if (gpte_to_gfn(gpte) != gfn || !is_present_gpte(gpte) ||
e8bc217a
MT
579 !(gpte & PT_ACCESSED_MASK)) {
580 u64 nonpresent;
581
582 rmap_remove(vcpu->kvm, &sp->spt[i]);
43a3795a 583 if (is_present_gpte(gpte))
e8bc217a
MT
584 nonpresent = shadow_trap_nonpresent_pte;
585 else
586 nonpresent = shadow_notrap_nonpresent_pte;
d555c333 587 __set_spte(&sp->spt[i], nonpresent);
e8bc217a
MT
588 continue;
589 }
590
591 nr_present++;
592 pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
593 set_spte(vcpu, &sp->spt[i], pte_access, 0, 0,
43a3795a 594 is_dirty_gpte(gpte), 0, gfn,
4731d4c7 595 spte_to_pfn(sp->spt[i]), true, false);
e8bc217a
MT
596 }
597
598 return !nr_present;
599}
600
6aa8b732
AK
601#undef pt_element_t
602#undef guest_walker
603#undef FNAME
604#undef PT_BASE_ADDR_MASK
605#undef PT_INDEX
6aa8b732 606#undef PT_LEVEL_MASK
6aa8b732 607#undef PT_DIR_BASE_ADDR_MASK
c7addb90 608#undef PT_LEVEL_BITS
cea0f0e7 609#undef PT_MAX_FULL_LEVELS
5fb07ddb
AK
610#undef gpte_to_gfn
611#undef gpte_to_gfn_pde
b3e4e63f 612#undef CMPXCHG
This page took 0.33881 seconds and 5 git commands to generate.