2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
9 * Copyright (C) 2006 Qumranet, Inc.
12 * Yaniv Kamay <yaniv@qumranet.com>
13 * Avi Kivity <avi@qumranet.com>
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
21 * We need the mmu code to access both 32-bit and 64-bit guest ptes,
22 * so the code in this file is compiled twice, once per pte size.
26 #define pt_element_t u64
27 #define guest_walker guest_walker64
28 #define FNAME(name) paging##64_##name
29 #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
30 #define PT_DIR_BASE_ADDR_MASK PT64_DIR_BASE_ADDR_MASK
31 #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
32 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
33 #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level)
34 #define PT_LEVEL_BITS PT64_LEVEL_BITS
36 #define PT_MAX_FULL_LEVELS 4
38 #define PT_MAX_FULL_LEVELS 2
41 #define pt_element_t u32
42 #define guest_walker guest_walker32
43 #define FNAME(name) paging##32_##name
44 #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
45 #define PT_DIR_BASE_ADDR_MASK PT32_DIR_BASE_ADDR_MASK
46 #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
47 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
48 #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level)
49 #define PT_LEVEL_BITS PT32_LEVEL_BITS
50 #define PT_MAX_FULL_LEVELS 2
52 #error Invalid PTTYPE value
56 * The guest_walker structure emulates the behavior of the hardware page
61 gfn_t table_gfn
[PT_MAX_FULL_LEVELS
];
63 pt_element_t inherited_ar
;
69 * Fetch a guest pte for a guest virtual address
71 static int FNAME(walk_addr
)(struct guest_walker
*walker
,
72 struct kvm_vcpu
*vcpu
, gva_t addr
,
73 int write_fault
, int user_fault
, int fetch_fault
)
82 pgprintk("%s: addr %lx\n", __FUNCTION__
, addr
);
83 walker
->level
= vcpu
->mmu
.root_level
;
86 if (!is_long_mode(vcpu
)) {
87 pte
= vcpu
->pdptrs
[(addr
>> 30) & 3];
88 if (!is_present_pte(pte
))
93 ASSERT((!is_long_mode(vcpu
) && is_pae(vcpu
)) ||
94 (vcpu
->cr3
& CR3_NONPAE_RESERVED_BITS
) == 0);
96 walker
->inherited_ar
= PT_USER_MASK
| PT_WRITABLE_MASK
;
99 index
= PT_INDEX(addr
, walker
->level
);
101 table_gfn
= (pte
& PT64_BASE_ADDR_MASK
) >> PAGE_SHIFT
;
102 walker
->table_gfn
[walker
->level
- 1] = table_gfn
;
103 pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__
,
104 walker
->level
- 1, table_gfn
);
106 page
= gfn_to_page(vcpu
->kvm
, (pte
& PT64_BASE_ADDR_MASK
)
109 table
= kmap_atomic(page
, KM_USER0
);
111 kunmap_atomic(table
, KM_USER0
);
113 if (!is_present_pte(pte
))
116 if (write_fault
&& !is_writeble_pte(pte
))
117 if (user_fault
|| is_write_protection(vcpu
))
120 if (user_fault
&& !(pte
& PT_USER_MASK
))
124 if (fetch_fault
&& is_nx(vcpu
) && (pte
& PT64_NX_MASK
))
128 if (!(pte
& PT_ACCESSED_MASK
)) {
129 mark_page_dirty(vcpu
->kvm
, table_gfn
);
130 pte
|= PT_ACCESSED_MASK
;
131 table
= kmap_atomic(page
, KM_USER0
);
133 kunmap_atomic(table
, KM_USER0
);
136 if (walker
->level
== PT_PAGE_TABLE_LEVEL
) {
137 walker
->gfn
= (pte
& PT_BASE_ADDR_MASK
) >> PAGE_SHIFT
;
141 if (walker
->level
== PT_DIRECTORY_LEVEL
142 && (pte
& PT_PAGE_SIZE_MASK
)
143 && (PTTYPE
== 64 || is_pse(vcpu
))) {
144 walker
->gfn
= (pte
& PT_DIR_BASE_ADDR_MASK
)
146 walker
->gfn
+= PT_INDEX(addr
, PT_PAGE_TABLE_LEVEL
);
150 walker
->inherited_ar
&= pte
;
154 if (write_fault
&& !is_dirty_pte(pte
)) {
155 mark_page_dirty(vcpu
->kvm
, table_gfn
);
156 pte
|= PT_DIRTY_MASK
;
157 table
= kmap_atomic(page
, KM_USER0
);
159 kunmap_atomic(table
, KM_USER0
);
160 pte_gpa
= table_gfn
<< PAGE_SHIFT
;
161 pte_gpa
+= index
* sizeof(pt_element_t
);
162 kvm_mmu_pte_write(vcpu
, pte_gpa
, (u8
*)&pte
, sizeof(pte
));
166 pgprintk("%s: pte %llx\n", __FUNCTION__
, (u64
)pte
);
170 walker
->error_code
= 0;
174 walker
->error_code
= PFERR_PRESENT_MASK
;
178 walker
->error_code
|= PFERR_WRITE_MASK
;
180 walker
->error_code
|= PFERR_USER_MASK
;
182 walker
->error_code
|= PFERR_FETCH_MASK
;
186 static void FNAME(set_pte_common
)(struct kvm_vcpu
*vcpu
,
194 struct guest_walker
*walker
,
198 int dirty
= gpte
& PT_DIRTY_MASK
;
200 int was_rmapped
= is_rmap_pte(*shadow_pte
);
202 pgprintk("%s: spte %llx gpte %llx access %llx write_fault %d"
203 " user_fault %d gfn %lx\n",
204 __FUNCTION__
, *shadow_pte
, (u64
)gpte
, access_bits
,
205 write_fault
, user_fault
, gfn
);
208 * We don't set the accessed bit, since we sometimes want to see
209 * whether the guest actually used the pte (in order to detect
212 spte
= PT_PRESENT_MASK
| PT_DIRTY_MASK
;
213 spte
|= gpte
& PT64_NX_MASK
;
215 access_bits
&= ~PT_WRITABLE_MASK
;
217 paddr
= gpa_to_hpa(vcpu
->kvm
, gaddr
& PT64_BASE_ADDR_MASK
);
219 spte
|= PT_PRESENT_MASK
;
220 if (access_bits
& PT_USER_MASK
)
221 spte
|= PT_USER_MASK
;
223 if (is_error_hpa(paddr
)) {
224 set_shadow_pte(shadow_pte
,
225 shadow_trap_nonpresent_pte
| PT_SHADOW_IO_MARK
);
231 if ((access_bits
& PT_WRITABLE_MASK
)
232 || (write_fault
&& !is_write_protection(vcpu
) && !user_fault
)) {
233 struct kvm_mmu_page
*shadow
;
235 spte
|= PT_WRITABLE_MASK
;
237 mmu_unshadow(vcpu
->kvm
, gfn
);
241 shadow
= kvm_mmu_lookup_page(vcpu
->kvm
, gfn
);
243 pgprintk("%s: found shadow page for %lx, marking ro\n",
245 access_bits
&= ~PT_WRITABLE_MASK
;
246 if (is_writeble_pte(spte
)) {
247 spte
&= ~PT_WRITABLE_MASK
;
248 kvm_x86_ops
->tlb_flush(vcpu
);
257 if (access_bits
& PT_WRITABLE_MASK
)
258 mark_page_dirty(vcpu
->kvm
, gaddr
>> PAGE_SHIFT
);
260 pgprintk("%s: setting spte %llx\n", __FUNCTION__
, spte
);
261 set_shadow_pte(shadow_pte
, spte
);
262 page_header_update_slot(vcpu
->kvm
, shadow_pte
, gaddr
);
264 rmap_add(vcpu
, shadow_pte
, (gaddr
& PT64_BASE_ADDR_MASK
)
266 if (!ptwrite
|| !*ptwrite
)
267 vcpu
->last_pte_updated
= shadow_pte
;
270 static void FNAME(set_pte
)(struct kvm_vcpu
*vcpu
, pt_element_t gpte
,
271 u64
*shadow_pte
, u64 access_bits
,
272 int user_fault
, int write_fault
, int *ptwrite
,
273 struct guest_walker
*walker
, gfn_t gfn
)
276 FNAME(set_pte_common
)(vcpu
, shadow_pte
, gpte
& PT_BASE_ADDR_MASK
,
277 gpte
, access_bits
, user_fault
, write_fault
,
278 ptwrite
, walker
, gfn
);
281 static void FNAME(update_pte
)(struct kvm_vcpu
*vcpu
, struct kvm_mmu_page
*page
,
282 u64
*spte
, const void *pte
, int bytes
,
287 gpte
= *(const pt_element_t
*)pte
;
288 if (~gpte
& (PT_PRESENT_MASK
| PT_ACCESSED_MASK
)) {
289 if (!offset_in_pte
&& !is_present_pte(gpte
))
290 set_shadow_pte(spte
, shadow_notrap_nonpresent_pte
);
293 if (bytes
< sizeof(pt_element_t
))
295 pgprintk("%s: gpte %llx spte %p\n", __FUNCTION__
, (u64
)gpte
, spte
);
296 FNAME(set_pte
)(vcpu
, gpte
, spte
, PT_USER_MASK
| PT_WRITABLE_MASK
, 0,
298 (gpte
& PT_BASE_ADDR_MASK
) >> PAGE_SHIFT
);
301 static void FNAME(set_pde
)(struct kvm_vcpu
*vcpu
, pt_element_t gpde
,
302 u64
*shadow_pte
, u64 access_bits
,
303 int user_fault
, int write_fault
, int *ptwrite
,
304 struct guest_walker
*walker
, gfn_t gfn
)
309 gaddr
= (gpa_t
)gfn
<< PAGE_SHIFT
;
310 if (PTTYPE
== 32 && is_cpuid_PSE36())
311 gaddr
|= (gpde
& PT32_DIR_PSE36_MASK
) <<
312 (32 - PT32_DIR_PSE36_SHIFT
);
313 FNAME(set_pte_common
)(vcpu
, shadow_pte
, gaddr
,
314 gpde
, access_bits
, user_fault
, write_fault
,
315 ptwrite
, walker
, gfn
);
319 * Fetch a shadow pte for a specific level in the paging hierarchy.
321 static u64
*FNAME(fetch
)(struct kvm_vcpu
*vcpu
, gva_t addr
,
322 struct guest_walker
*walker
,
323 int user_fault
, int write_fault
, int *ptwrite
)
328 u64
*prev_shadow_ent
= NULL
;
330 if (!is_present_pte(walker
->pte
))
333 shadow_addr
= vcpu
->mmu
.root_hpa
;
334 level
= vcpu
->mmu
.shadow_root_level
;
335 if (level
== PT32E_ROOT_LEVEL
) {
336 shadow_addr
= vcpu
->mmu
.pae_root
[(addr
>> 30) & 3];
337 shadow_addr
&= PT64_BASE_ADDR_MASK
;
342 u32 index
= SHADOW_PT_INDEX(addr
, level
);
343 struct kvm_mmu_page
*shadow_page
;
347 unsigned hugepage_access
= 0;
349 shadow_ent
= ((u64
*)__va(shadow_addr
)) + index
;
350 if (is_shadow_present_pte(*shadow_ent
)) {
351 if (level
== PT_PAGE_TABLE_LEVEL
)
353 shadow_addr
= *shadow_ent
& PT64_BASE_ADDR_MASK
;
354 prev_shadow_ent
= shadow_ent
;
358 if (level
== PT_PAGE_TABLE_LEVEL
)
361 if (level
- 1 == PT_PAGE_TABLE_LEVEL
362 && walker
->level
== PT_DIRECTORY_LEVEL
) {
364 hugepage_access
= walker
->pte
;
365 hugepage_access
&= PT_USER_MASK
| PT_WRITABLE_MASK
;
366 if (!is_dirty_pte(walker
->pte
))
367 hugepage_access
&= ~PT_WRITABLE_MASK
;
368 hugepage_access
>>= PT_WRITABLE_SHIFT
;
369 if (walker
->pte
& PT64_NX_MASK
)
370 hugepage_access
|= (1 << 2);
371 table_gfn
= (walker
->pte
& PT_BASE_ADDR_MASK
)
375 table_gfn
= walker
->table_gfn
[level
- 2];
377 shadow_page
= kvm_mmu_get_page(vcpu
, table_gfn
, addr
, level
-1,
378 metaphysical
, hugepage_access
,
380 shadow_addr
= __pa(shadow_page
->spt
);
381 shadow_pte
= shadow_addr
| PT_PRESENT_MASK
| PT_ACCESSED_MASK
382 | PT_WRITABLE_MASK
| PT_USER_MASK
;
383 *shadow_ent
= shadow_pte
;
384 prev_shadow_ent
= shadow_ent
;
387 if (walker
->level
== PT_DIRECTORY_LEVEL
) {
388 FNAME(set_pde
)(vcpu
, walker
->pte
, shadow_ent
,
389 walker
->inherited_ar
, user_fault
, write_fault
,
390 ptwrite
, walker
, walker
->gfn
);
392 ASSERT(walker
->level
== PT_PAGE_TABLE_LEVEL
);
393 FNAME(set_pte
)(vcpu
, walker
->pte
, shadow_ent
,
394 walker
->inherited_ar
, user_fault
, write_fault
,
395 ptwrite
, walker
, walker
->gfn
);
401 * Page fault handler. There are several causes for a page fault:
402 * - there is no shadow pte for the guest pte
403 * - write access through a shadow pte marked read only so that we can set
405 * - write access to a shadow pte marked read only so we can update the page
406 * dirty bitmap, when userspace requests it
407 * - mmio access; in this case we will never install a present shadow pte
408 * - normal guest page fault due to the guest pte marked not present, not
409 * writable, or not executable
411 * Returns: 1 if we need to emulate the instruction, 0 otherwise, or
412 * a negative value on error.
414 static int FNAME(page_fault
)(struct kvm_vcpu
*vcpu
, gva_t addr
,
417 int write_fault
= error_code
& PFERR_WRITE_MASK
;
418 int user_fault
= error_code
& PFERR_USER_MASK
;
419 int fetch_fault
= error_code
& PFERR_FETCH_MASK
;
420 struct guest_walker walker
;
425 pgprintk("%s: addr %lx err %x\n", __FUNCTION__
, addr
, error_code
);
426 kvm_mmu_audit(vcpu
, "pre page fault");
428 r
= mmu_topup_memory_caches(vcpu
);
433 * Look up the shadow pte for the faulting address.
435 r
= FNAME(walk_addr
)(&walker
, vcpu
, addr
, write_fault
, user_fault
,
439 * The page is not mapped by the guest. Let the guest handle it.
442 pgprintk("%s: guest page fault\n", __FUNCTION__
);
443 inject_page_fault(vcpu
, addr
, walker
.error_code
);
444 vcpu
->last_pt_write_count
= 0; /* reset fork detector */
448 shadow_pte
= FNAME(fetch
)(vcpu
, addr
, &walker
, user_fault
, write_fault
,
450 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __FUNCTION__
,
451 shadow_pte
, *shadow_pte
, write_pt
);
454 vcpu
->last_pt_write_count
= 0; /* reset fork detector */
457 * mmio: emulate if accessible, otherwise its a guest fault.
459 if (is_io_pte(*shadow_pte
))
462 ++vcpu
->stat
.pf_fixed
;
463 kvm_mmu_audit(vcpu
, "post page fault (fixed)");
468 static gpa_t
FNAME(gva_to_gpa
)(struct kvm_vcpu
*vcpu
, gva_t vaddr
)
470 struct guest_walker walker
;
471 gpa_t gpa
= UNMAPPED_GVA
;
474 r
= FNAME(walk_addr
)(&walker
, vcpu
, vaddr
, 0, 0, 0);
477 gpa
= (gpa_t
)walker
.gfn
<< PAGE_SHIFT
;
478 gpa
|= vaddr
& ~PAGE_MASK
;
484 static void FNAME(prefetch_page
)(struct kvm_vcpu
*vcpu
,
485 struct kvm_mmu_page
*sp
)
490 if (sp
->role
.metaphysical
|| PTTYPE
== 32) {
491 nonpaging_prefetch_page(vcpu
, sp
);
495 gpt
= kmap_atomic(gfn_to_page(vcpu
->kvm
, sp
->gfn
), KM_USER0
);
496 for (i
= 0; i
< PT64_ENT_PER_PAGE
; ++i
)
497 if (is_present_pte(gpt
[i
]))
498 sp
->spt
[i
] = shadow_trap_nonpresent_pte
;
500 sp
->spt
[i
] = shadow_notrap_nonpresent_pte
;
501 kunmap_atomic(gpt
, KM_USER0
);
507 #undef PT_BASE_ADDR_MASK
509 #undef SHADOW_PT_INDEX
511 #undef PT_DIR_BASE_ADDR_MASK
513 #undef PT_MAX_FULL_LEVELS