2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
9 * Copyright (C) 2006 Qumranet, Inc.
12 * Yaniv Kamay <yaniv@qumranet.com>
13 * Avi Kivity <avi@qumranet.com>
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
21 * We need the mmu code to access both 32-bit and 64-bit guest ptes,
22 * so the code in this file is compiled twice, once per pte size.
26 #define pt_element_t u64
27 #define guest_walker guest_walker64
28 #define FNAME(name) paging##64_##name
29 #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
30 #define PT_DIR_BASE_ADDR_MASK PT64_DIR_BASE_ADDR_MASK
31 #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
32 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
33 #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level)
34 #define PT_LEVEL_BITS PT64_LEVEL_BITS
36 #define PT_MAX_FULL_LEVELS 4
37 #define CMPXCHG cmpxchg
39 #define CMPXCHG cmpxchg64
40 #define PT_MAX_FULL_LEVELS 2
43 #define pt_element_t u32
44 #define guest_walker guest_walker32
45 #define FNAME(name) paging##32_##name
46 #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
47 #define PT_DIR_BASE_ADDR_MASK PT32_DIR_BASE_ADDR_MASK
48 #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
49 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
50 #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level)
51 #define PT_LEVEL_BITS PT32_LEVEL_BITS
52 #define PT_MAX_FULL_LEVELS 2
53 #define CMPXCHG cmpxchg
55 #error Invalid PTTYPE value
58 #define gpte_to_gfn FNAME(gpte_to_gfn)
59 #define gpte_to_gfn_pde FNAME(gpte_to_gfn_pde)
62 * The guest_walker structure emulates the behavior of the hardware page
67 gfn_t table_gfn
[PT_MAX_FULL_LEVELS
];
75 static gfn_t
gpte_to_gfn(pt_element_t gpte
)
77 return (gpte
& PT_BASE_ADDR_MASK
) >> PAGE_SHIFT
;
80 static gfn_t
gpte_to_gfn_pde(pt_element_t gpte
)
82 return (gpte
& PT_DIR_BASE_ADDR_MASK
) >> PAGE_SHIFT
;
85 static bool FNAME(cmpxchg_gpte
)(struct kvm
*kvm
,
86 gfn_t table_gfn
, unsigned index
,
87 pt_element_t orig_pte
, pt_element_t new_pte
)
93 page
= gfn_to_page(kvm
, table_gfn
);
94 table
= kmap_atomic(page
, KM_USER0
);
96 ret
= CMPXCHG(&table
[index
], orig_pte
, new_pte
);
98 kunmap_atomic(table
, KM_USER0
);
100 kvm_release_page_dirty(page
);
102 return (ret
!= orig_pte
);
105 static unsigned FNAME(gpte_access
)(struct kvm_vcpu
*vcpu
, pt_element_t gpte
)
109 access
= (gpte
& (PT_WRITABLE_MASK
| PT_USER_MASK
)) | ACC_EXEC_MASK
;
112 access
&= ~(gpte
>> PT64_NX_SHIFT
);
118 * Fetch a guest pte for a guest virtual address
120 static int FNAME(walk_addr
)(struct guest_walker
*walker
,
121 struct kvm_vcpu
*vcpu
, gva_t addr
,
122 int write_fault
, int user_fault
, int fetch_fault
)
126 unsigned index
, pt_access
, pte_access
;
129 pgprintk("%s: addr %lx\n", __FUNCTION__
, addr
);
131 walker
->level
= vcpu
->mmu
.root_level
;
134 if (!is_long_mode(vcpu
)) {
135 pte
= vcpu
->pdptrs
[(addr
>> 30) & 3];
136 if (!is_present_pte(pte
))
141 ASSERT((!is_long_mode(vcpu
) && is_pae(vcpu
)) ||
142 (vcpu
->cr3
& CR3_NONPAE_RESERVED_BITS
) == 0);
147 index
= PT_INDEX(addr
, walker
->level
);
149 table_gfn
= gpte_to_gfn(pte
);
150 pte_gpa
= gfn_to_gpa(table_gfn
);
151 pte_gpa
+= index
* sizeof(pt_element_t
);
152 walker
->table_gfn
[walker
->level
- 1] = table_gfn
;
153 pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__
,
154 walker
->level
- 1, table_gfn
);
156 kvm_read_guest(vcpu
->kvm
, pte_gpa
, &pte
, sizeof(pte
));
158 if (!is_present_pte(pte
))
161 if (write_fault
&& !is_writeble_pte(pte
))
162 if (user_fault
|| is_write_protection(vcpu
))
165 if (user_fault
&& !(pte
& PT_USER_MASK
))
169 if (fetch_fault
&& is_nx(vcpu
) && (pte
& PT64_NX_MASK
))
173 if (!(pte
& PT_ACCESSED_MASK
)) {
174 mark_page_dirty(vcpu
->kvm
, table_gfn
);
175 if (FNAME(cmpxchg_gpte
)(vcpu
->kvm
, table_gfn
,
176 index
, pte
, pte
|PT_ACCESSED_MASK
))
178 pte
|= PT_ACCESSED_MASK
;
181 pte_access
= pt_access
& FNAME(gpte_access
)(vcpu
, pte
);
183 if (walker
->level
== PT_PAGE_TABLE_LEVEL
) {
184 walker
->gfn
= gpte_to_gfn(pte
);
188 if (walker
->level
== PT_DIRECTORY_LEVEL
189 && (pte
& PT_PAGE_SIZE_MASK
)
190 && (PTTYPE
== 64 || is_pse(vcpu
))) {
191 walker
->gfn
= gpte_to_gfn_pde(pte
);
192 walker
->gfn
+= PT_INDEX(addr
, PT_PAGE_TABLE_LEVEL
);
193 if (PTTYPE
== 32 && is_cpuid_PSE36())
194 walker
->gfn
+= pse36_gfn_delta(pte
);
198 pt_access
= pte_access
;
202 if (write_fault
&& !is_dirty_pte(pte
)) {
205 mark_page_dirty(vcpu
->kvm
, table_gfn
);
206 ret
= FNAME(cmpxchg_gpte
)(vcpu
->kvm
, table_gfn
, index
, pte
,
210 pte
|= PT_DIRTY_MASK
;
211 kvm_mmu_pte_write(vcpu
, pte_gpa
, (u8
*)&pte
, sizeof(pte
));
215 walker
->pt_access
= pt_access
;
216 walker
->pte_access
= pte_access
;
217 pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
218 __FUNCTION__
, (u64
)pte
, pt_access
, pte_access
);
222 walker
->error_code
= 0;
226 walker
->error_code
= PFERR_PRESENT_MASK
;
230 walker
->error_code
|= PFERR_WRITE_MASK
;
232 walker
->error_code
|= PFERR_USER_MASK
;
234 walker
->error_code
|= PFERR_FETCH_MASK
;
238 static void FNAME(set_pte
)(struct kvm_vcpu
*vcpu
, pt_element_t gpte
,
239 u64
*shadow_pte
, unsigned pt_access
,
241 int user_fault
, int write_fault
,
242 int *ptwrite
, struct guest_walker
*walker
,
245 int dirty
= gpte
& PT_DIRTY_MASK
;
247 int was_rmapped
= is_rmap_pte(*shadow_pte
);
250 pgprintk("%s: spte %llx gpte %llx access %x write_fault %d"
251 " user_fault %d gfn %lx\n",
252 __FUNCTION__
, *shadow_pte
, (u64
)gpte
, pt_access
,
253 write_fault
, user_fault
, gfn
);
256 * We don't set the accessed bit, since we sometimes want to see
257 * whether the guest actually used the pte (in order to detect
260 spte
= PT_PRESENT_MASK
| PT_DIRTY_MASK
;
261 spte
|= gpte
& PT64_NX_MASK
;
263 pte_access
&= ~ACC_WRITE_MASK
;
264 if (!(pte_access
& ACC_EXEC_MASK
))
265 spte
|= PT64_NX_MASK
;
267 page
= gfn_to_page(vcpu
->kvm
, gfn
);
269 spte
|= PT_PRESENT_MASK
;
270 if (pte_access
& ACC_USER_MASK
)
271 spte
|= PT_USER_MASK
;
273 if (is_error_page(page
)) {
274 set_shadow_pte(shadow_pte
,
275 shadow_trap_nonpresent_pte
| PT_SHADOW_IO_MARK
);
276 kvm_release_page_clean(page
);
280 spte
|= page_to_phys(page
);
282 if ((pte_access
& ACC_WRITE_MASK
)
283 || (write_fault
&& !is_write_protection(vcpu
) && !user_fault
)) {
284 struct kvm_mmu_page
*shadow
;
286 spte
|= PT_WRITABLE_MASK
;
288 mmu_unshadow(vcpu
->kvm
, gfn
);
292 shadow
= kvm_mmu_lookup_page(vcpu
->kvm
, gfn
);
294 pgprintk("%s: found shadow page for %lx, marking ro\n",
296 pte_access
&= ~ACC_WRITE_MASK
;
297 if (is_writeble_pte(spte
)) {
298 spte
&= ~PT_WRITABLE_MASK
;
299 kvm_x86_ops
->tlb_flush(vcpu
);
308 if (pte_access
& ACC_WRITE_MASK
)
309 mark_page_dirty(vcpu
->kvm
, gfn
);
311 pgprintk("%s: setting spte %llx\n", __FUNCTION__
, spte
);
312 set_shadow_pte(shadow_pte
, spte
);
313 page_header_update_slot(vcpu
->kvm
, shadow_pte
, gfn
);
315 rmap_add(vcpu
, shadow_pte
, gfn
);
316 if (!is_rmap_pte(*shadow_pte
))
317 kvm_release_page_clean(page
);
320 kvm_release_page_clean(page
);
321 if (!ptwrite
|| !*ptwrite
)
322 vcpu
->last_pte_updated
= shadow_pte
;
325 static void FNAME(update_pte
)(struct kvm_vcpu
*vcpu
, struct kvm_mmu_page
*page
,
326 u64
*spte
, const void *pte
, int bytes
,
331 gpte
= *(const pt_element_t
*)pte
;
332 if (~gpte
& (PT_PRESENT_MASK
| PT_ACCESSED_MASK
)) {
333 if (!offset_in_pte
&& !is_present_pte(gpte
))
334 set_shadow_pte(spte
, shadow_notrap_nonpresent_pte
);
337 if (bytes
< sizeof(pt_element_t
))
339 pgprintk("%s: gpte %llx spte %p\n", __FUNCTION__
, (u64
)gpte
, spte
);
340 FNAME(set_pte
)(vcpu
, gpte
, spte
, ACC_ALL
, ACC_ALL
,
341 0, 0, NULL
, NULL
, gpte_to_gfn(gpte
));
345 * Fetch a shadow pte for a specific level in the paging hierarchy.
347 static u64
*FNAME(fetch
)(struct kvm_vcpu
*vcpu
, gva_t addr
,
348 struct guest_walker
*walker
,
349 int user_fault
, int write_fault
, int *ptwrite
)
354 unsigned access
= walker
->pt_access
;
356 if (!is_present_pte(walker
->pte
))
359 shadow_addr
= vcpu
->mmu
.root_hpa
;
360 level
= vcpu
->mmu
.shadow_root_level
;
361 if (level
== PT32E_ROOT_LEVEL
) {
362 shadow_addr
= vcpu
->mmu
.pae_root
[(addr
>> 30) & 3];
363 shadow_addr
&= PT64_BASE_ADDR_MASK
;
368 u32 index
= SHADOW_PT_INDEX(addr
, level
);
369 struct kvm_mmu_page
*shadow_page
;
374 shadow_ent
= ((u64
*)__va(shadow_addr
)) + index
;
375 if (is_shadow_present_pte(*shadow_ent
)) {
376 if (level
== PT_PAGE_TABLE_LEVEL
)
378 shadow_addr
= *shadow_ent
& PT64_BASE_ADDR_MASK
;
382 if (level
== PT_PAGE_TABLE_LEVEL
)
385 if (level
- 1 == PT_PAGE_TABLE_LEVEL
386 && walker
->level
== PT_DIRECTORY_LEVEL
) {
388 if (!is_dirty_pte(walker
->pte
))
389 access
&= ~ACC_WRITE_MASK
;
390 table_gfn
= gpte_to_gfn(walker
->pte
);
393 table_gfn
= walker
->table_gfn
[level
- 2];
395 shadow_page
= kvm_mmu_get_page(vcpu
, table_gfn
, addr
, level
-1,
396 metaphysical
, access
,
398 shadow_addr
= __pa(shadow_page
->spt
);
399 shadow_pte
= shadow_addr
| PT_PRESENT_MASK
| PT_ACCESSED_MASK
400 | PT_WRITABLE_MASK
| PT_USER_MASK
;
401 *shadow_ent
= shadow_pte
;
404 FNAME(set_pte
)(vcpu
, walker
->pte
, shadow_ent
,
405 access
, walker
->pte_access
& access
,
406 user_fault
, write_fault
,
407 ptwrite
, walker
, walker
->gfn
);
413 * Page fault handler. There are several causes for a page fault:
414 * - there is no shadow pte for the guest pte
415 * - write access through a shadow pte marked read only so that we can set
417 * - write access to a shadow pte marked read only so we can update the page
418 * dirty bitmap, when userspace requests it
419 * - mmio access; in this case we will never install a present shadow pte
420 * - normal guest page fault due to the guest pte marked not present, not
421 * writable, or not executable
423 * Returns: 1 if we need to emulate the instruction, 0 otherwise, or
424 * a negative value on error.
426 static int FNAME(page_fault
)(struct kvm_vcpu
*vcpu
, gva_t addr
,
429 int write_fault
= error_code
& PFERR_WRITE_MASK
;
430 int user_fault
= error_code
& PFERR_USER_MASK
;
431 int fetch_fault
= error_code
& PFERR_FETCH_MASK
;
432 struct guest_walker walker
;
437 pgprintk("%s: addr %lx err %x\n", __FUNCTION__
, addr
, error_code
);
438 kvm_mmu_audit(vcpu
, "pre page fault");
440 r
= mmu_topup_memory_caches(vcpu
);
445 * Look up the shadow pte for the faulting address.
447 r
= FNAME(walk_addr
)(&walker
, vcpu
, addr
, write_fault
, user_fault
,
451 * The page is not mapped by the guest. Let the guest handle it.
454 pgprintk("%s: guest page fault\n", __FUNCTION__
);
455 inject_page_fault(vcpu
, addr
, walker
.error_code
);
456 vcpu
->last_pt_write_count
= 0; /* reset fork detector */
460 shadow_pte
= FNAME(fetch
)(vcpu
, addr
, &walker
, user_fault
, write_fault
,
462 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __FUNCTION__
,
463 shadow_pte
, *shadow_pte
, write_pt
);
466 vcpu
->last_pt_write_count
= 0; /* reset fork detector */
469 * mmio: emulate if accessible, otherwise its a guest fault.
471 if (is_io_pte(*shadow_pte
))
474 ++vcpu
->stat
.pf_fixed
;
475 kvm_mmu_audit(vcpu
, "post page fault (fixed)");
480 static gpa_t
FNAME(gva_to_gpa
)(struct kvm_vcpu
*vcpu
, gva_t vaddr
)
482 struct guest_walker walker
;
483 gpa_t gpa
= UNMAPPED_GVA
;
486 r
= FNAME(walk_addr
)(&walker
, vcpu
, vaddr
, 0, 0, 0);
489 gpa
= gfn_to_gpa(walker
.gfn
);
490 gpa
|= vaddr
& ~PAGE_MASK
;
496 static void FNAME(prefetch_page
)(struct kvm_vcpu
*vcpu
,
497 struct kvm_mmu_page
*sp
)
503 if (sp
->role
.metaphysical
504 || (PTTYPE
== 32 && sp
->role
.level
> PT_PAGE_TABLE_LEVEL
)) {
505 nonpaging_prefetch_page(vcpu
, sp
);
510 offset
= sp
->role
.quadrant
<< PT64_LEVEL_BITS
;
511 page
= gfn_to_page(vcpu
->kvm
, sp
->gfn
);
512 gpt
= kmap_atomic(page
, KM_USER0
);
513 for (i
= 0; i
< PT64_ENT_PER_PAGE
; ++i
)
514 if (is_present_pte(gpt
[offset
+ i
]))
515 sp
->spt
[i
] = shadow_trap_nonpresent_pte
;
517 sp
->spt
[i
] = shadow_notrap_nonpresent_pte
;
518 kunmap_atomic(gpt
, KM_USER0
);
519 kvm_release_page_clean(page
);
525 #undef PT_BASE_ADDR_MASK
527 #undef SHADOW_PT_INDEX
529 #undef PT_DIR_BASE_ADDR_MASK
531 #undef PT_MAX_FULL_LEVELS
533 #undef gpte_to_gfn_pde