KVM: MMU: large page support
[deliverable/linux.git] / arch / x86 / kvm / paging_tmpl.h
CommitLineData
6aa8b732
AK
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * MMU support
8 *
9 * Copyright (C) 2006 Qumranet, Inc.
10 *
11 * Authors:
12 * Yaniv Kamay <yaniv@qumranet.com>
13 * Avi Kivity <avi@qumranet.com>
14 *
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
17 *
18 */
19
20/*
21 * We need the mmu code to access both 32-bit and 64-bit guest ptes,
22 * so the code in this file is compiled twice, once per pte size.
23 */
24
25#if PTTYPE == 64
26 #define pt_element_t u64
27 #define guest_walker guest_walker64
28 #define FNAME(name) paging##64_##name
29 #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
30 #define PT_DIR_BASE_ADDR_MASK PT64_DIR_BASE_ADDR_MASK
31 #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
32 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
33 #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level)
c7addb90 34 #define PT_LEVEL_BITS PT64_LEVEL_BITS
cea0f0e7
AK
35 #ifdef CONFIG_X86_64
36 #define PT_MAX_FULL_LEVELS 4
b3e4e63f 37 #define CMPXCHG cmpxchg
cea0f0e7 38 #else
b3e4e63f 39 #define CMPXCHG cmpxchg64
cea0f0e7
AK
40 #define PT_MAX_FULL_LEVELS 2
41 #endif
6aa8b732
AK
42#elif PTTYPE == 32
43 #define pt_element_t u32
44 #define guest_walker guest_walker32
45 #define FNAME(name) paging##32_##name
46 #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
47 #define PT_DIR_BASE_ADDR_MASK PT32_DIR_BASE_ADDR_MASK
48 #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
49 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
50 #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level)
c7addb90 51 #define PT_LEVEL_BITS PT32_LEVEL_BITS
cea0f0e7 52 #define PT_MAX_FULL_LEVELS 2
b3e4e63f 53 #define CMPXCHG cmpxchg
6aa8b732
AK
54#else
55 #error Invalid PTTYPE value
56#endif
57
5fb07ddb
AK
58#define gpte_to_gfn FNAME(gpte_to_gfn)
59#define gpte_to_gfn_pde FNAME(gpte_to_gfn_pde)
60
6aa8b732
AK
61/*
62 * The guest_walker structure emulates the behavior of the hardware page
63 * table walker.
64 */
65struct guest_walker {
66 int level;
cea0f0e7 67 gfn_t table_gfn[PT_MAX_FULL_LEVELS];
7819026e
MT
68 pt_element_t ptes[PT_MAX_FULL_LEVELS];
69 gpa_t pte_gpa[PT_MAX_FULL_LEVELS];
fe135d2c
AK
70 unsigned pt_access;
71 unsigned pte_access;
815af8d4 72 gfn_t gfn;
7993ba43 73 u32 error_code;
6aa8b732
AK
74};
75
5fb07ddb
AK
76static gfn_t gpte_to_gfn(pt_element_t gpte)
77{
78 return (gpte & PT_BASE_ADDR_MASK) >> PAGE_SHIFT;
79}
80
81static gfn_t gpte_to_gfn_pde(pt_element_t gpte)
82{
83 return (gpte & PT_DIR_BASE_ADDR_MASK) >> PAGE_SHIFT;
84}
85
b3e4e63f
MT
86static bool FNAME(cmpxchg_gpte)(struct kvm *kvm,
87 gfn_t table_gfn, unsigned index,
88 pt_element_t orig_pte, pt_element_t new_pte)
89{
90 pt_element_t ret;
91 pt_element_t *table;
92 struct page *page;
93
72dc67a6 94 down_read(&current->mm->mmap_sem);
b3e4e63f 95 page = gfn_to_page(kvm, table_gfn);
72dc67a6
IE
96 up_read(&current->mm->mmap_sem);
97
b3e4e63f
MT
98 table = kmap_atomic(page, KM_USER0);
99
100 ret = CMPXCHG(&table[index], orig_pte, new_pte);
101
102 kunmap_atomic(table, KM_USER0);
103
104 kvm_release_page_dirty(page);
105
106 return (ret != orig_pte);
107}
108
bedbe4ee
AK
109static unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, pt_element_t gpte)
110{
111 unsigned access;
112
113 access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK;
114#if PTTYPE == 64
115 if (is_nx(vcpu))
116 access &= ~(gpte >> PT64_NX_SHIFT);
117#endif
118 return access;
119}
120
ac79c978
AK
121/*
122 * Fetch a guest pte for a guest virtual address
123 */
7993ba43
AK
124static int FNAME(walk_addr)(struct guest_walker *walker,
125 struct kvm_vcpu *vcpu, gva_t addr,
73b1087e 126 int write_fault, int user_fault, int fetch_fault)
6aa8b732 127{
42bf3f0a 128 pt_element_t pte;
cea0f0e7 129 gfn_t table_gfn;
fe135d2c 130 unsigned index, pt_access, pte_access;
42bf3f0a 131 gpa_t pte_gpa;
6aa8b732 132
cea0f0e7 133 pgprintk("%s: addr %lx\n", __FUNCTION__, addr);
b3e4e63f 134walk:
ad312c7c
ZX
135 walker->level = vcpu->arch.mmu.root_level;
136 pte = vcpu->arch.cr3;
1b0973bd
AK
137#if PTTYPE == 64
138 if (!is_long_mode(vcpu)) {
ad312c7c 139 pte = vcpu->arch.pdptrs[(addr >> 30) & 3];
42bf3f0a 140 if (!is_present_pte(pte))
7993ba43 141 goto not_present;
1b0973bd
AK
142 --walker->level;
143 }
144#endif
a9058ecd 145 ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
24993d53 146 (vcpu->arch.cr3 & CR3_NONPAE_RESERVED_BITS) == 0);
6aa8b732 147
fe135d2c 148 pt_access = ACC_ALL;
ac79c978
AK
149
150 for (;;) {
42bf3f0a 151 index = PT_INDEX(addr, walker->level);
ac79c978 152
5fb07ddb 153 table_gfn = gpte_to_gfn(pte);
1755fbcc 154 pte_gpa = gfn_to_gpa(table_gfn);
ec8d4eae 155 pte_gpa += index * sizeof(pt_element_t);
42bf3f0a 156 walker->table_gfn[walker->level - 1] = table_gfn;
7819026e 157 walker->pte_gpa[walker->level - 1] = pte_gpa;
42bf3f0a
AK
158 pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__,
159 walker->level - 1, table_gfn);
160
ec8d4eae 161 kvm_read_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte));
42bf3f0a
AK
162
163 if (!is_present_pte(pte))
7993ba43
AK
164 goto not_present;
165
42bf3f0a 166 if (write_fault && !is_writeble_pte(pte))
7993ba43
AK
167 if (user_fault || is_write_protection(vcpu))
168 goto access_error;
169
42bf3f0a 170 if (user_fault && !(pte & PT_USER_MASK))
7993ba43
AK
171 goto access_error;
172
73b1087e 173#if PTTYPE == 64
42bf3f0a 174 if (fetch_fault && is_nx(vcpu) && (pte & PT64_NX_MASK))
73b1087e
AK
175 goto access_error;
176#endif
177
42bf3f0a 178 if (!(pte & PT_ACCESSED_MASK)) {
bf3f8e86 179 mark_page_dirty(vcpu->kvm, table_gfn);
b3e4e63f
MT
180 if (FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn,
181 index, pte, pte|PT_ACCESSED_MASK))
182 goto walk;
42bf3f0a 183 pte |= PT_ACCESSED_MASK;
bf3f8e86 184 }
815af8d4 185
bedbe4ee 186 pte_access = pt_access & FNAME(gpte_access)(vcpu, pte);
fe135d2c 187
7819026e
MT
188 walker->ptes[walker->level - 1] = pte;
189
815af8d4 190 if (walker->level == PT_PAGE_TABLE_LEVEL) {
5fb07ddb 191 walker->gfn = gpte_to_gfn(pte);
815af8d4
AK
192 break;
193 }
194
195 if (walker->level == PT_DIRECTORY_LEVEL
42bf3f0a 196 && (pte & PT_PAGE_SIZE_MASK)
815af8d4 197 && (PTTYPE == 64 || is_pse(vcpu))) {
5fb07ddb 198 walker->gfn = gpte_to_gfn_pde(pte);
815af8d4 199 walker->gfn += PT_INDEX(addr, PT_PAGE_TABLE_LEVEL);
da928521
AK
200 if (PTTYPE == 32 && is_cpuid_PSE36())
201 walker->gfn += pse36_gfn_delta(pte);
ac79c978 202 break;
815af8d4 203 }
ac79c978 204
fe135d2c 205 pt_access = pte_access;
ac79c978
AK
206 --walker->level;
207 }
42bf3f0a
AK
208
209 if (write_fault && !is_dirty_pte(pte)) {
b3e4e63f
MT
210 bool ret;
211
42bf3f0a 212 mark_page_dirty(vcpu->kvm, table_gfn);
b3e4e63f
MT
213 ret = FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn, index, pte,
214 pte|PT_DIRTY_MASK);
215 if (ret)
216 goto walk;
42bf3f0a 217 pte |= PT_DIRTY_MASK;
42bf3f0a 218 kvm_mmu_pte_write(vcpu, pte_gpa, (u8 *)&pte, sizeof(pte));
7819026e 219 walker->ptes[walker->level - 1] = pte;
42bf3f0a
AK
220 }
221
fe135d2c
AK
222 walker->pt_access = pt_access;
223 walker->pte_access = pte_access;
224 pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
225 __FUNCTION__, (u64)pte, pt_access, pte_access);
7993ba43
AK
226 return 1;
227
228not_present:
229 walker->error_code = 0;
230 goto err;
231
232access_error:
233 walker->error_code = PFERR_PRESENT_MASK;
234
235err:
236 if (write_fault)
237 walker->error_code |= PFERR_WRITE_MASK;
238 if (user_fault)
239 walker->error_code |= PFERR_USER_MASK;
73b1087e
AK
240 if (fetch_fault)
241 walker->error_code |= PFERR_FETCH_MASK;
fe551881 242 return 0;
6aa8b732
AK
243}
244
0028425f 245static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
489f1d65 246 u64 *spte, const void *pte)
0028425f
AK
247{
248 pt_element_t gpte;
41074d07 249 unsigned pte_access;
d7824fff 250 struct page *npage;
05da4558 251 int largepage = vcpu->arch.update_pte.largepage;
0028425f 252
0028425f 253 gpte = *(const pt_element_t *)pte;
c7addb90 254 if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) {
489f1d65 255 if (!is_present_pte(gpte))
c7addb90
AK
256 set_shadow_pte(spte, shadow_notrap_nonpresent_pte);
257 return;
258 }
0028425f 259 pgprintk("%s: gpte %llx spte %p\n", __FUNCTION__, (u64)gpte, spte);
41074d07 260 pte_access = page->role.access & FNAME(gpte_access)(vcpu, gpte);
d7824fff
AK
261 if (gpte_to_gfn(gpte) != vcpu->arch.update_pte.gfn)
262 return;
263 npage = vcpu->arch.update_pte.page;
264 if (!npage)
265 return;
266 get_page(npage);
1c4f1fd6 267 mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0,
05da4558
MT
268 gpte & PT_DIRTY_MASK, NULL, largepage, gpte_to_gfn(gpte),
269 npage);
0028425f
AK
270}
271
6aa8b732
AK
272/*
273 * Fetch a shadow pte for a specific level in the paging hierarchy.
274 */
275static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
97a0a01e 276 struct guest_walker *walker,
05da4558
MT
277 int user_fault, int write_fault, int largepage,
278 int *ptwrite, struct page *page)
6aa8b732
AK
279{
280 hpa_t shadow_addr;
281 int level;
ef0197e8 282 u64 *shadow_ent;
fe135d2c 283 unsigned access = walker->pt_access;
ac79c978 284
7819026e 285 if (!is_present_pte(walker->ptes[walker->level - 1]))
ac79c978 286 return NULL;
6aa8b732 287
ad312c7c
ZX
288 shadow_addr = vcpu->arch.mmu.root_hpa;
289 level = vcpu->arch.mmu.shadow_root_level;
aef3d3fe 290 if (level == PT32E_ROOT_LEVEL) {
ad312c7c 291 shadow_addr = vcpu->arch.mmu.pae_root[(addr >> 30) & 3];
aef3d3fe
AK
292 shadow_addr &= PT64_BASE_ADDR_MASK;
293 --level;
294 }
6aa8b732
AK
295
296 for (; ; level--) {
297 u32 index = SHADOW_PT_INDEX(addr, level);
25c0de2c 298 struct kvm_mmu_page *shadow_page;
8c7bb723 299 u64 shadow_pte;
cea0f0e7
AK
300 int metaphysical;
301 gfn_t table_gfn;
6aa8b732 302
ef0197e8 303 shadow_ent = ((u64 *)__va(shadow_addr)) + index;
5882842f
DE
304 if (level == PT_PAGE_TABLE_LEVEL)
305 break;
05da4558
MT
306
307 if (largepage && level == PT_DIRECTORY_LEVEL)
308 break;
309
310 if (is_shadow_present_pte(*shadow_ent)
311 && !is_large_pte(*shadow_ent)) {
6aa8b732 312 shadow_addr = *shadow_ent & PT64_BASE_ADDR_MASK;
6aa8b732
AK
313 continue;
314 }
315
05da4558
MT
316 if (is_large_pte(*shadow_ent))
317 rmap_remove(vcpu->kvm, shadow_ent);
318
cea0f0e7
AK
319 if (level - 1 == PT_PAGE_TABLE_LEVEL
320 && walker->level == PT_DIRECTORY_LEVEL) {
321 metaphysical = 1;
7819026e 322 if (!is_dirty_pte(walker->ptes[level - 1]))
fe135d2c 323 access &= ~ACC_WRITE_MASK;
7819026e 324 table_gfn = gpte_to_gfn(walker->ptes[level - 1]);
cea0f0e7
AK
325 } else {
326 metaphysical = 0;
327 table_gfn = walker->table_gfn[level - 2];
328 }
329 shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
fe135d2c 330 metaphysical, access,
f7d9c7b7
AK
331 shadow_ent);
332 if (!metaphysical) {
7ec54588 333 int r;
7819026e 334 pt_element_t curr_pte;
7ec54588
MT
335 r = kvm_read_guest_atomic(vcpu->kvm,
336 walker->pte_gpa[level - 2],
337 &curr_pte, sizeof(curr_pte));
d7824fff
AK
338 if (r || curr_pte != walker->ptes[level - 2]) {
339 kvm_release_page_clean(page);
7819026e 340 return NULL;
d7824fff 341 }
7819026e 342 }
47ad8e68 343 shadow_addr = __pa(shadow_page->spt);
aef3d3fe
AK
344 shadow_pte = shadow_addr | PT_PRESENT_MASK | PT_ACCESSED_MASK
345 | PT_WRITABLE_MASK | PT_USER_MASK;
8c7bb723 346 *shadow_ent = shadow_pte;
6aa8b732 347 }
ef0197e8 348
1c4f1fd6 349 mmu_set_spte(vcpu, shadow_ent, access, walker->pte_access & access,
7819026e
MT
350 user_fault, write_fault,
351 walker->ptes[walker->level-1] & PT_DIRTY_MASK,
05da4558 352 ptwrite, largepage, walker->gfn, page);
050e6499 353
ef0197e8 354 return shadow_ent;
6aa8b732
AK
355}
356
6aa8b732
AK
357/*
358 * Page fault handler. There are several causes for a page fault:
359 * - there is no shadow pte for the guest pte
360 * - write access through a shadow pte marked read only so that we can set
361 * the dirty bit
362 * - write access to a shadow pte marked read only so we can update the page
363 * dirty bitmap, when userspace requests it
364 * - mmio access; in this case we will never install a present shadow pte
365 * - normal guest page fault due to the guest pte marked not present, not
366 * writable, or not executable
367 *
e2dec939
AK
368 * Returns: 1 if we need to emulate the instruction, 0 otherwise, or
369 * a negative value on error.
6aa8b732
AK
370 */
371static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
372 u32 error_code)
373{
374 int write_fault = error_code & PFERR_WRITE_MASK;
6aa8b732 375 int user_fault = error_code & PFERR_USER_MASK;
73b1087e 376 int fetch_fault = error_code & PFERR_FETCH_MASK;
6aa8b732
AK
377 struct guest_walker walker;
378 u64 *shadow_pte;
cea0f0e7 379 int write_pt = 0;
e2dec939 380 int r;
d7824fff 381 struct page *page;
05da4558 382 int largepage = 0;
6aa8b732 383
cea0f0e7 384 pgprintk("%s: addr %lx err %x\n", __FUNCTION__, addr, error_code);
37a7d8b0 385 kvm_mmu_audit(vcpu, "pre page fault");
714b93da 386
e2dec939
AK
387 r = mmu_topup_memory_caches(vcpu);
388 if (r)
389 return r;
714b93da 390
72dc67a6 391 down_read(&vcpu->kvm->slots_lock);
6aa8b732
AK
392 /*
393 * Look up the shadow pte for the faulting address.
394 */
73b1087e
AK
395 r = FNAME(walk_addr)(&walker, vcpu, addr, write_fault, user_fault,
396 fetch_fault);
6aa8b732
AK
397
398 /*
399 * The page is not mapped by the guest. Let the guest handle it.
400 */
7993ba43
AK
401 if (!r) {
402 pgprintk("%s: guest page fault\n", __FUNCTION__);
403 inject_page_fault(vcpu, addr, walker.error_code);
ad312c7c 404 vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
72dc67a6 405 up_read(&vcpu->kvm->slots_lock);
6aa8b732
AK
406 return 0;
407 }
408
72dc67a6 409 down_read(&current->mm->mmap_sem);
05da4558
MT
410 if (walker.level == PT_DIRECTORY_LEVEL) {
411 gfn_t large_gfn;
412 large_gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE-1);
413 if (is_largepage_backed(vcpu, large_gfn)) {
414 walker.gfn = large_gfn;
415 largepage = 1;
416 }
417 }
d7824fff 418 page = gfn_to_page(vcpu->kvm, walker.gfn);
72dc67a6 419 up_read(&current->mm->mmap_sem);
d7824fff 420
d196e343
AK
421 /* mmio */
422 if (is_error_page(page)) {
423 pgprintk("gfn %x is mmio\n", walker.gfn);
424 kvm_release_page_clean(page);
425 up_read(&vcpu->kvm->slots_lock);
426 return 1;
427 }
428
aaee2c94 429 spin_lock(&vcpu->kvm->mmu_lock);
eb787d10 430 kvm_mmu_free_some_pages(vcpu);
97a0a01e 431 shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
05da4558
MT
432 largepage, &write_pt, page);
433
97a0a01e
AK
434 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __FUNCTION__,
435 shadow_pte, *shadow_pte, write_pt);
cea0f0e7 436
a25f7e1f 437 if (!write_pt)
ad312c7c 438 vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
a25f7e1f 439
1165f5fe 440 ++vcpu->stat.pf_fixed;
37a7d8b0 441 kvm_mmu_audit(vcpu, "post page fault (fixed)");
aaee2c94 442 spin_unlock(&vcpu->kvm->mmu_lock);
72dc67a6 443 up_read(&vcpu->kvm->slots_lock);
6aa8b732 444
cea0f0e7 445 return write_pt;
6aa8b732
AK
446}
447
448static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
449{
450 struct guest_walker walker;
e119d117
AK
451 gpa_t gpa = UNMAPPED_GVA;
452 int r;
6aa8b732 453
e119d117 454 r = FNAME(walk_addr)(&walker, vcpu, vaddr, 0, 0, 0);
6aa8b732 455
e119d117 456 if (r) {
1755fbcc 457 gpa = gfn_to_gpa(walker.gfn);
e119d117 458 gpa |= vaddr & ~PAGE_MASK;
6aa8b732
AK
459 }
460
461 return gpa;
462}
463
c7addb90
AK
464static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
465 struct kvm_mmu_page *sp)
466{
7ec54588
MT
467 int i, offset = 0, r = 0;
468 pt_element_t pt;
c7addb90 469
e5a4c8ca
AK
470 if (sp->role.metaphysical
471 || (PTTYPE == 32 && sp->role.level > PT_PAGE_TABLE_LEVEL)) {
c7addb90
AK
472 nonpaging_prefetch_page(vcpu, sp);
473 return;
474 }
475
e5a4c8ca
AK
476 if (PTTYPE == 32)
477 offset = sp->role.quadrant << PT64_LEVEL_BITS;
7ec54588
MT
478
479 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
480 gpa_t pte_gpa = gfn_to_gpa(sp->gfn);
481 pte_gpa += (i+offset) * sizeof(pt_element_t);
482
483 r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &pt,
484 sizeof(pt_element_t));
485 if (r || is_present_pte(pt))
c7addb90
AK
486 sp->spt[i] = shadow_trap_nonpresent_pte;
487 else
488 sp->spt[i] = shadow_notrap_nonpresent_pte;
7ec54588 489 }
c7addb90
AK
490}
491
6aa8b732
AK
492#undef pt_element_t
493#undef guest_walker
494#undef FNAME
495#undef PT_BASE_ADDR_MASK
496#undef PT_INDEX
497#undef SHADOW_PT_INDEX
498#undef PT_LEVEL_MASK
6aa8b732 499#undef PT_DIR_BASE_ADDR_MASK
c7addb90 500#undef PT_LEVEL_BITS
cea0f0e7 501#undef PT_MAX_FULL_LEVELS
5fb07ddb
AK
502#undef gpte_to_gfn
503#undef gpte_to_gfn_pde
b3e4e63f 504#undef CMPXCHG
This page took 0.278997 seconds and 5 git commands to generate.