2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
9 * Copyright (C) 2006 Qumranet, Inc.
12 * Yaniv Kamay <yaniv@qumranet.com>
13 * Avi Kivity <avi@qumranet.com>
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
21 * We need the mmu code to access both 32-bit and 64-bit guest ptes,
22 * so the code in this file is compiled twice, once per pte size.
26 #define pt_element_t u64
27 #define guest_walker guest_walker64
28 #define FNAME(name) paging##64_##name
29 #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
30 #define PT_DIR_BASE_ADDR_MASK PT64_DIR_BASE_ADDR_MASK
31 #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
32 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
33 #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level)
34 #define PT_LEVEL_BITS PT64_LEVEL_BITS
36 #define PT_MAX_FULL_LEVELS 4
38 #define PT_MAX_FULL_LEVELS 2
41 #define pt_element_t u32
42 #define guest_walker guest_walker32
43 #define FNAME(name) paging##32_##name
44 #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
45 #define PT_DIR_BASE_ADDR_MASK PT32_DIR_BASE_ADDR_MASK
46 #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
47 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
48 #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level)
49 #define PT_LEVEL_BITS PT32_LEVEL_BITS
50 #define PT_MAX_FULL_LEVELS 2
52 #error Invalid PTTYPE value
55 #define gpte_to_gfn FNAME(gpte_to_gfn)
56 #define gpte_to_gfn_pde FNAME(gpte_to_gfn_pde)
59 * The guest_walker structure emulates the behavior of the hardware page
64 gfn_t table_gfn
[PT_MAX_FULL_LEVELS
];
66 pt_element_t inherited_ar
;
71 static gfn_t
gpte_to_gfn(pt_element_t gpte
)
73 return (gpte
& PT_BASE_ADDR_MASK
) >> PAGE_SHIFT
;
76 static gfn_t
gpte_to_gfn_pde(pt_element_t gpte
)
78 return (gpte
& PT_DIR_BASE_ADDR_MASK
) >> PAGE_SHIFT
;
82 * Fetch a guest pte for a guest virtual address
84 static int FNAME(walk_addr
)(struct guest_walker
*walker
,
85 struct kvm_vcpu
*vcpu
, gva_t addr
,
86 int write_fault
, int user_fault
, int fetch_fault
)
93 pgprintk("%s: addr %lx\n", __FUNCTION__
, addr
);
94 walker
->level
= vcpu
->mmu
.root_level
;
97 if (!is_long_mode(vcpu
)) {
98 pte
= vcpu
->pdptrs
[(addr
>> 30) & 3];
99 if (!is_present_pte(pte
))
104 ASSERT((!is_long_mode(vcpu
) && is_pae(vcpu
)) ||
105 (vcpu
->cr3
& CR3_NONPAE_RESERVED_BITS
) == 0);
107 walker
->inherited_ar
= PT_USER_MASK
| PT_WRITABLE_MASK
;
110 index
= PT_INDEX(addr
, walker
->level
);
112 table_gfn
= gpte_to_gfn(pte
);
113 pte_gpa
= table_gfn
<< PAGE_SHIFT
;
114 pte_gpa
+= index
* sizeof(pt_element_t
);
115 walker
->table_gfn
[walker
->level
- 1] = table_gfn
;
116 pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__
,
117 walker
->level
- 1, table_gfn
);
119 kvm_read_guest(vcpu
->kvm
, pte_gpa
, &pte
, sizeof(pte
));
121 if (!is_present_pte(pte
))
124 if (write_fault
&& !is_writeble_pte(pte
))
125 if (user_fault
|| is_write_protection(vcpu
))
128 if (user_fault
&& !(pte
& PT_USER_MASK
))
132 if (fetch_fault
&& is_nx(vcpu
) && (pte
& PT64_NX_MASK
))
136 if (!(pte
& PT_ACCESSED_MASK
)) {
137 mark_page_dirty(vcpu
->kvm
, table_gfn
);
138 pte
|= PT_ACCESSED_MASK
;
139 kvm_write_guest(vcpu
->kvm
, pte_gpa
, &pte
, sizeof(pte
));
142 if (walker
->level
== PT_PAGE_TABLE_LEVEL
) {
143 walker
->gfn
= gpte_to_gfn(pte
);
147 if (walker
->level
== PT_DIRECTORY_LEVEL
148 && (pte
& PT_PAGE_SIZE_MASK
)
149 && (PTTYPE
== 64 || is_pse(vcpu
))) {
150 walker
->gfn
= gpte_to_gfn_pde(pte
);
151 walker
->gfn
+= PT_INDEX(addr
, PT_PAGE_TABLE_LEVEL
);
152 if (PTTYPE
== 32 && is_cpuid_PSE36())
153 walker
->gfn
+= pse36_gfn_delta(pte
);
157 walker
->inherited_ar
&= pte
;
161 if (write_fault
&& !is_dirty_pte(pte
)) {
162 mark_page_dirty(vcpu
->kvm
, table_gfn
);
163 pte
|= PT_DIRTY_MASK
;
164 kvm_write_guest(vcpu
->kvm
, pte_gpa
, &pte
, sizeof(pte
));
165 kvm_mmu_pte_write(vcpu
, pte_gpa
, (u8
*)&pte
, sizeof(pte
));
169 pgprintk("%s: pte %llx\n", __FUNCTION__
, (u64
)pte
);
173 walker
->error_code
= 0;
177 walker
->error_code
= PFERR_PRESENT_MASK
;
181 walker
->error_code
|= PFERR_WRITE_MASK
;
183 walker
->error_code
|= PFERR_USER_MASK
;
185 walker
->error_code
|= PFERR_FETCH_MASK
;
189 static void FNAME(set_pte
)(struct kvm_vcpu
*vcpu
, pt_element_t gpte
,
190 u64
*shadow_pte
, u64 access_bits
,
191 int user_fault
, int write_fault
,
192 int *ptwrite
, struct guest_walker
*walker
,
195 int dirty
= gpte
& PT_DIRTY_MASK
;
197 int was_rmapped
= is_rmap_pte(*shadow_pte
);
200 pgprintk("%s: spte %llx gpte %llx access %llx write_fault %d"
201 " user_fault %d gfn %lx\n",
202 __FUNCTION__
, *shadow_pte
, (u64
)gpte
, access_bits
,
203 write_fault
, user_fault
, gfn
);
207 * We don't set the accessed bit, since we sometimes want to see
208 * whether the guest actually used the pte (in order to detect
211 spte
= PT_PRESENT_MASK
| PT_DIRTY_MASK
;
212 spte
|= gpte
& PT64_NX_MASK
;
214 access_bits
&= ~PT_WRITABLE_MASK
;
216 page
= gfn_to_page(vcpu
->kvm
, gfn
);
218 spte
|= PT_PRESENT_MASK
;
219 if (access_bits
& PT_USER_MASK
)
220 spte
|= PT_USER_MASK
;
222 if (is_error_page(page
)) {
223 set_shadow_pte(shadow_pte
,
224 shadow_trap_nonpresent_pte
| PT_SHADOW_IO_MARK
);
225 kvm_release_page_clean(page
);
229 spte
|= page_to_phys(page
);
231 if ((access_bits
& PT_WRITABLE_MASK
)
232 || (write_fault
&& !is_write_protection(vcpu
) && !user_fault
)) {
233 struct kvm_mmu_page
*shadow
;
235 spte
|= PT_WRITABLE_MASK
;
237 mmu_unshadow(vcpu
->kvm
, gfn
);
241 shadow
= kvm_mmu_lookup_page(vcpu
->kvm
, gfn
);
243 pgprintk("%s: found shadow page for %lx, marking ro\n",
245 access_bits
&= ~PT_WRITABLE_MASK
;
246 if (is_writeble_pte(spte
)) {
247 spte
&= ~PT_WRITABLE_MASK
;
248 kvm_x86_ops
->tlb_flush(vcpu
);
257 if (access_bits
& PT_WRITABLE_MASK
)
258 mark_page_dirty(vcpu
->kvm
, gfn
);
260 pgprintk("%s: setting spte %llx\n", __FUNCTION__
, spte
);
261 set_shadow_pte(shadow_pte
, spte
);
262 page_header_update_slot(vcpu
->kvm
, shadow_pte
,
263 (gpa_t
)gfn
<< PAGE_SHIFT
);
265 rmap_add(vcpu
, shadow_pte
, gfn
);
266 if (!is_rmap_pte(*shadow_pte
))
267 kvm_release_page_clean(page
);
270 kvm_release_page_clean(page
);
271 if (!ptwrite
|| !*ptwrite
)
272 vcpu
->last_pte_updated
= shadow_pte
;
275 static void FNAME(update_pte
)(struct kvm_vcpu
*vcpu
, struct kvm_mmu_page
*page
,
276 u64
*spte
, const void *pte
, int bytes
,
281 gpte
= *(const pt_element_t
*)pte
;
282 if (~gpte
& (PT_PRESENT_MASK
| PT_ACCESSED_MASK
)) {
283 if (!offset_in_pte
&& !is_present_pte(gpte
))
284 set_shadow_pte(spte
, shadow_notrap_nonpresent_pte
);
287 if (bytes
< sizeof(pt_element_t
))
289 pgprintk("%s: gpte %llx spte %p\n", __FUNCTION__
, (u64
)gpte
, spte
);
290 FNAME(set_pte
)(vcpu
, gpte
, spte
, PT_USER_MASK
| PT_WRITABLE_MASK
, 0,
291 0, NULL
, NULL
, gpte_to_gfn(gpte
));
295 * Fetch a shadow pte for a specific level in the paging hierarchy.
297 static u64
*FNAME(fetch
)(struct kvm_vcpu
*vcpu
, gva_t addr
,
298 struct guest_walker
*walker
,
299 int user_fault
, int write_fault
, int *ptwrite
)
304 u64
*prev_shadow_ent
= NULL
;
306 if (!is_present_pte(walker
->pte
))
309 shadow_addr
= vcpu
->mmu
.root_hpa
;
310 level
= vcpu
->mmu
.shadow_root_level
;
311 if (level
== PT32E_ROOT_LEVEL
) {
312 shadow_addr
= vcpu
->mmu
.pae_root
[(addr
>> 30) & 3];
313 shadow_addr
&= PT64_BASE_ADDR_MASK
;
318 u32 index
= SHADOW_PT_INDEX(addr
, level
);
319 struct kvm_mmu_page
*shadow_page
;
323 unsigned hugepage_access
= 0;
325 shadow_ent
= ((u64
*)__va(shadow_addr
)) + index
;
326 if (is_shadow_present_pte(*shadow_ent
)) {
327 if (level
== PT_PAGE_TABLE_LEVEL
)
329 shadow_addr
= *shadow_ent
& PT64_BASE_ADDR_MASK
;
330 prev_shadow_ent
= shadow_ent
;
334 if (level
== PT_PAGE_TABLE_LEVEL
)
337 if (level
- 1 == PT_PAGE_TABLE_LEVEL
338 && walker
->level
== PT_DIRECTORY_LEVEL
) {
340 hugepage_access
= walker
->pte
;
341 hugepage_access
&= PT_USER_MASK
| PT_WRITABLE_MASK
;
342 if (!is_dirty_pte(walker
->pte
))
343 hugepage_access
&= ~PT_WRITABLE_MASK
;
344 hugepage_access
>>= PT_WRITABLE_SHIFT
;
345 if (walker
->pte
& PT64_NX_MASK
)
346 hugepage_access
|= (1 << 2);
347 table_gfn
= gpte_to_gfn(walker
->pte
);
350 table_gfn
= walker
->table_gfn
[level
- 2];
352 shadow_page
= kvm_mmu_get_page(vcpu
, table_gfn
, addr
, level
-1,
353 metaphysical
, hugepage_access
,
355 shadow_addr
= __pa(shadow_page
->spt
);
356 shadow_pte
= shadow_addr
| PT_PRESENT_MASK
| PT_ACCESSED_MASK
357 | PT_WRITABLE_MASK
| PT_USER_MASK
;
358 *shadow_ent
= shadow_pte
;
359 prev_shadow_ent
= shadow_ent
;
362 FNAME(set_pte
)(vcpu
, walker
->pte
, shadow_ent
,
363 walker
->inherited_ar
, user_fault
, write_fault
,
364 ptwrite
, walker
, walker
->gfn
);
370 * Page fault handler. There are several causes for a page fault:
371 * - there is no shadow pte for the guest pte
372 * - write access through a shadow pte marked read only so that we can set
374 * - write access to a shadow pte marked read only so we can update the page
375 * dirty bitmap, when userspace requests it
376 * - mmio access; in this case we will never install a present shadow pte
377 * - normal guest page fault due to the guest pte marked not present, not
378 * writable, or not executable
380 * Returns: 1 if we need to emulate the instruction, 0 otherwise, or
381 * a negative value on error.
383 static int FNAME(page_fault
)(struct kvm_vcpu
*vcpu
, gva_t addr
,
386 int write_fault
= error_code
& PFERR_WRITE_MASK
;
387 int user_fault
= error_code
& PFERR_USER_MASK
;
388 int fetch_fault
= error_code
& PFERR_FETCH_MASK
;
389 struct guest_walker walker
;
394 pgprintk("%s: addr %lx err %x\n", __FUNCTION__
, addr
, error_code
);
395 kvm_mmu_audit(vcpu
, "pre page fault");
397 r
= mmu_topup_memory_caches(vcpu
);
402 * Look up the shadow pte for the faulting address.
404 r
= FNAME(walk_addr
)(&walker
, vcpu
, addr
, write_fault
, user_fault
,
408 * The page is not mapped by the guest. Let the guest handle it.
411 pgprintk("%s: guest page fault\n", __FUNCTION__
);
412 inject_page_fault(vcpu
, addr
, walker
.error_code
);
413 vcpu
->last_pt_write_count
= 0; /* reset fork detector */
417 shadow_pte
= FNAME(fetch
)(vcpu
, addr
, &walker
, user_fault
, write_fault
,
419 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __FUNCTION__
,
420 shadow_pte
, *shadow_pte
, write_pt
);
423 vcpu
->last_pt_write_count
= 0; /* reset fork detector */
426 * mmio: emulate if accessible, otherwise its a guest fault.
428 if (is_io_pte(*shadow_pte
))
431 ++vcpu
->stat
.pf_fixed
;
432 kvm_mmu_audit(vcpu
, "post page fault (fixed)");
437 static gpa_t
FNAME(gva_to_gpa
)(struct kvm_vcpu
*vcpu
, gva_t vaddr
)
439 struct guest_walker walker
;
440 gpa_t gpa
= UNMAPPED_GVA
;
443 r
= FNAME(walk_addr
)(&walker
, vcpu
, vaddr
, 0, 0, 0);
446 gpa
= (gpa_t
)walker
.gfn
<< PAGE_SHIFT
;
447 gpa
|= vaddr
& ~PAGE_MASK
;
453 static void FNAME(prefetch_page
)(struct kvm_vcpu
*vcpu
,
454 struct kvm_mmu_page
*sp
)
460 if (sp
->role
.metaphysical
461 || (PTTYPE
== 32 && sp
->role
.level
> PT_PAGE_TABLE_LEVEL
)) {
462 nonpaging_prefetch_page(vcpu
, sp
);
467 offset
= sp
->role
.quadrant
<< PT64_LEVEL_BITS
;
468 page
= gfn_to_page(vcpu
->kvm
, sp
->gfn
);
469 gpt
= kmap_atomic(page
, KM_USER0
);
470 for (i
= 0; i
< PT64_ENT_PER_PAGE
; ++i
)
471 if (is_present_pte(gpt
[offset
+ i
]))
472 sp
->spt
[i
] = shadow_trap_nonpresent_pte
;
474 sp
->spt
[i
] = shadow_notrap_nonpresent_pte
;
475 kunmap_atomic(gpt
, KM_USER0
);
476 kvm_release_page_clean(page
);
482 #undef PT_BASE_ADDR_MASK
484 #undef SHADOW_PT_INDEX
486 #undef PT_DIR_BASE_ADDR_MASK
488 #undef PT_MAX_FULL_LEVELS
490 #undef gpte_to_gfn_pde