Commit | Line | Data |
---|---|---|
6aa8b732 AK |
1 | /* |
2 | * Kernel-based Virtual Machine driver for Linux | |
3 | * | |
4 | * This module enables machines with Intel VT-x extensions to run virtual | |
5 | * machines without emulation or binary translation. | |
6 | * | |
7 | * MMU support | |
8 | * | |
9 | * Copyright (C) 2006 Qumranet, Inc. | |
221d059d | 10 | * Copyright 2010 Red Hat, Inc. and/or its affilates. |
6aa8b732 AK |
11 | * |
12 | * Authors: | |
13 | * Yaniv Kamay <yaniv@qumranet.com> | |
14 | * Avi Kivity <avi@qumranet.com> | |
15 | * | |
16 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
17 | * the COPYING file in the top-level directory. | |
18 | * | |
19 | */ | |
20 | ||
21 | /* | |
22 | * We need the mmu code to access both 32-bit and 64-bit guest ptes, | |
23 | * so the code in this file is compiled twice, once per pte size. | |
24 | */ | |
25 | ||
26 | #if PTTYPE == 64 | |
27 | #define pt_element_t u64 | |
28 | #define guest_walker guest_walker64 | |
29 | #define FNAME(name) paging##64_##name | |
30 | #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK | |
e04da980 JR |
31 | #define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl) |
32 | #define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl) | |
6aa8b732 | 33 | #define PT_INDEX(addr, level) PT64_INDEX(addr, level) |
6aa8b732 | 34 | #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level) |
c7addb90 | 35 | #define PT_LEVEL_BITS PT64_LEVEL_BITS |
cea0f0e7 AK |
36 | #ifdef CONFIG_X86_64 |
37 | #define PT_MAX_FULL_LEVELS 4 | |
b3e4e63f | 38 | #define CMPXCHG cmpxchg |
cea0f0e7 | 39 | #else |
b3e4e63f | 40 | #define CMPXCHG cmpxchg64 |
cea0f0e7 AK |
41 | #define PT_MAX_FULL_LEVELS 2 |
42 | #endif | |
6aa8b732 AK |
43 | #elif PTTYPE == 32 |
44 | #define pt_element_t u32 | |
45 | #define guest_walker guest_walker32 | |
46 | #define FNAME(name) paging##32_##name | |
47 | #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK | |
e04da980 JR |
48 | #define PT_LVL_ADDR_MASK(lvl) PT32_LVL_ADDR_MASK(lvl) |
49 | #define PT_LVL_OFFSET_MASK(lvl) PT32_LVL_OFFSET_MASK(lvl) | |
6aa8b732 | 50 | #define PT_INDEX(addr, level) PT32_INDEX(addr, level) |
6aa8b732 | 51 | #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level) |
c7addb90 | 52 | #define PT_LEVEL_BITS PT32_LEVEL_BITS |
cea0f0e7 | 53 | #define PT_MAX_FULL_LEVELS 2 |
b3e4e63f | 54 | #define CMPXCHG cmpxchg |
6aa8b732 AK |
55 | #else |
56 | #error Invalid PTTYPE value | |
57 | #endif | |
58 | ||
e04da980 JR |
59 | #define gpte_to_gfn_lvl FNAME(gpte_to_gfn_lvl) |
60 | #define gpte_to_gfn(pte) gpte_to_gfn_lvl((pte), PT_PAGE_TABLE_LEVEL) | |
5fb07ddb | 61 | |
6aa8b732 AK |
62 | /* |
63 | * The guest_walker structure emulates the behavior of the hardware page | |
64 | * table walker. | |
65 | */ | |
66 | struct guest_walker { | |
67 | int level; | |
cea0f0e7 | 68 | gfn_t table_gfn[PT_MAX_FULL_LEVELS]; |
7819026e MT |
69 | pt_element_t ptes[PT_MAX_FULL_LEVELS]; |
70 | gpa_t pte_gpa[PT_MAX_FULL_LEVELS]; | |
fe135d2c AK |
71 | unsigned pt_access; |
72 | unsigned pte_access; | |
815af8d4 | 73 | gfn_t gfn; |
7993ba43 | 74 | u32 error_code; |
6aa8b732 AK |
75 | }; |
76 | ||
e04da980 | 77 | static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl) |
5fb07ddb | 78 | { |
e04da980 | 79 | return (gpte & PT_LVL_ADDR_MASK(lvl)) >> PAGE_SHIFT; |
5fb07ddb AK |
80 | } |
81 | ||
b3e4e63f MT |
82 | static bool FNAME(cmpxchg_gpte)(struct kvm *kvm, |
83 | gfn_t table_gfn, unsigned index, | |
84 | pt_element_t orig_pte, pt_element_t new_pte) | |
85 | { | |
86 | pt_element_t ret; | |
87 | pt_element_t *table; | |
88 | struct page *page; | |
89 | ||
90 | page = gfn_to_page(kvm, table_gfn); | |
72dc67a6 | 91 | |
b3e4e63f | 92 | table = kmap_atomic(page, KM_USER0); |
b3e4e63f | 93 | ret = CMPXCHG(&table[index], orig_pte, new_pte); |
b3e4e63f MT |
94 | kunmap_atomic(table, KM_USER0); |
95 | ||
96 | kvm_release_page_dirty(page); | |
97 | ||
98 | return (ret != orig_pte); | |
99 | } | |
100 | ||
bedbe4ee AK |
101 | static unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, pt_element_t gpte) |
102 | { | |
103 | unsigned access; | |
104 | ||
105 | access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK; | |
106 | #if PTTYPE == 64 | |
107 | if (is_nx(vcpu)) | |
108 | access &= ~(gpte >> PT64_NX_SHIFT); | |
109 | #endif | |
110 | return access; | |
111 | } | |
112 | ||
ac79c978 AK |
113 | /* |
114 | * Fetch a guest pte for a guest virtual address | |
115 | */ | |
7993ba43 AK |
116 | static int FNAME(walk_addr)(struct guest_walker *walker, |
117 | struct kvm_vcpu *vcpu, gva_t addr, | |
73b1087e | 118 | int write_fault, int user_fault, int fetch_fault) |
6aa8b732 | 119 | { |
42bf3f0a | 120 | pt_element_t pte; |
cea0f0e7 | 121 | gfn_t table_gfn; |
fe135d2c | 122 | unsigned index, pt_access, pte_access; |
42bf3f0a | 123 | gpa_t pte_gpa; |
82725b20 | 124 | int rsvd_fault = 0; |
6aa8b732 | 125 | |
07420171 AK |
126 | trace_kvm_mmu_pagetable_walk(addr, write_fault, user_fault, |
127 | fetch_fault); | |
b3e4e63f | 128 | walk: |
ad312c7c ZX |
129 | walker->level = vcpu->arch.mmu.root_level; |
130 | pte = vcpu->arch.cr3; | |
1b0973bd AK |
131 | #if PTTYPE == 64 |
132 | if (!is_long_mode(vcpu)) { | |
6de4f3ad | 133 | pte = kvm_pdptr_read(vcpu, (addr >> 30) & 3); |
07420171 | 134 | trace_kvm_mmu_paging_element(pte, walker->level); |
43a3795a | 135 | if (!is_present_gpte(pte)) |
7993ba43 | 136 | goto not_present; |
1b0973bd AK |
137 | --walker->level; |
138 | } | |
139 | #endif | |
a9058ecd | 140 | ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) || |
24993d53 | 141 | (vcpu->arch.cr3 & CR3_NONPAE_RESERVED_BITS) == 0); |
6aa8b732 | 142 | |
fe135d2c | 143 | pt_access = ACC_ALL; |
ac79c978 AK |
144 | |
145 | for (;;) { | |
42bf3f0a | 146 | index = PT_INDEX(addr, walker->level); |
ac79c978 | 147 | |
5fb07ddb | 148 | table_gfn = gpte_to_gfn(pte); |
1755fbcc | 149 | pte_gpa = gfn_to_gpa(table_gfn); |
ec8d4eae | 150 | pte_gpa += index * sizeof(pt_element_t); |
42bf3f0a | 151 | walker->table_gfn[walker->level - 1] = table_gfn; |
7819026e | 152 | walker->pte_gpa[walker->level - 1] = pte_gpa; |
42bf3f0a | 153 | |
a6085fba MT |
154 | if (kvm_read_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte))) |
155 | goto not_present; | |
156 | ||
07420171 | 157 | trace_kvm_mmu_paging_element(pte, walker->level); |
42bf3f0a | 158 | |
43a3795a | 159 | if (!is_present_gpte(pte)) |
7993ba43 AK |
160 | goto not_present; |
161 | ||
82725b20 DE |
162 | rsvd_fault = is_rsvd_bits_set(vcpu, pte, walker->level); |
163 | if (rsvd_fault) | |
164 | goto access_error; | |
165 | ||
8dae4445 | 166 | if (write_fault && !is_writable_pte(pte)) |
7993ba43 AK |
167 | if (user_fault || is_write_protection(vcpu)) |
168 | goto access_error; | |
169 | ||
42bf3f0a | 170 | if (user_fault && !(pte & PT_USER_MASK)) |
7993ba43 AK |
171 | goto access_error; |
172 | ||
73b1087e | 173 | #if PTTYPE == 64 |
24222c2f | 174 | if (fetch_fault && (pte & PT64_NX_MASK)) |
73b1087e AK |
175 | goto access_error; |
176 | #endif | |
177 | ||
42bf3f0a | 178 | if (!(pte & PT_ACCESSED_MASK)) { |
07420171 AK |
179 | trace_kvm_mmu_set_accessed_bit(table_gfn, index, |
180 | sizeof(pte)); | |
b3e4e63f MT |
181 | if (FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn, |
182 | index, pte, pte|PT_ACCESSED_MASK)) | |
183 | goto walk; | |
f3b8c964 | 184 | mark_page_dirty(vcpu->kvm, table_gfn); |
42bf3f0a | 185 | pte |= PT_ACCESSED_MASK; |
bf3f8e86 | 186 | } |
815af8d4 | 187 | |
bedbe4ee | 188 | pte_access = pt_access & FNAME(gpte_access)(vcpu, pte); |
fe135d2c | 189 | |
7819026e MT |
190 | walker->ptes[walker->level - 1] = pte; |
191 | ||
e04da980 JR |
192 | if ((walker->level == PT_PAGE_TABLE_LEVEL) || |
193 | ((walker->level == PT_DIRECTORY_LEVEL) && | |
814a59d2 | 194 | is_large_pte(pte) && |
e04da980 JR |
195 | (PTTYPE == 64 || is_pse(vcpu))) || |
196 | ((walker->level == PT_PDPE_LEVEL) && | |
814a59d2 | 197 | is_large_pte(pte) && |
e04da980 JR |
198 | is_long_mode(vcpu))) { |
199 | int lvl = walker->level; | |
200 | ||
201 | walker->gfn = gpte_to_gfn_lvl(pte, lvl); | |
202 | walker->gfn += (addr & PT_LVL_OFFSET_MASK(lvl)) | |
203 | >> PAGE_SHIFT; | |
204 | ||
205 | if (PTTYPE == 32 && | |
206 | walker->level == PT_DIRECTORY_LEVEL && | |
207 | is_cpuid_PSE36()) | |
da928521 | 208 | walker->gfn += pse36_gfn_delta(pte); |
e04da980 | 209 | |
ac79c978 | 210 | break; |
815af8d4 | 211 | } |
ac79c978 | 212 | |
fe135d2c | 213 | pt_access = pte_access; |
ac79c978 AK |
214 | --walker->level; |
215 | } | |
42bf3f0a | 216 | |
43a3795a | 217 | if (write_fault && !is_dirty_gpte(pte)) { |
b3e4e63f MT |
218 | bool ret; |
219 | ||
07420171 | 220 | trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte)); |
b3e4e63f MT |
221 | ret = FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn, index, pte, |
222 | pte|PT_DIRTY_MASK); | |
223 | if (ret) | |
224 | goto walk; | |
f3b8c964 | 225 | mark_page_dirty(vcpu->kvm, table_gfn); |
42bf3f0a | 226 | pte |= PT_DIRTY_MASK; |
7819026e | 227 | walker->ptes[walker->level - 1] = pte; |
42bf3f0a AK |
228 | } |
229 | ||
fe135d2c AK |
230 | walker->pt_access = pt_access; |
231 | walker->pte_access = pte_access; | |
232 | pgprintk("%s: pte %llx pte_access %x pt_access %x\n", | |
518c5a05 | 233 | __func__, (u64)pte, pte_access, pt_access); |
7993ba43 AK |
234 | return 1; |
235 | ||
236 | not_present: | |
237 | walker->error_code = 0; | |
238 | goto err; | |
239 | ||
240 | access_error: | |
241 | walker->error_code = PFERR_PRESENT_MASK; | |
242 | ||
243 | err: | |
244 | if (write_fault) | |
245 | walker->error_code |= PFERR_WRITE_MASK; | |
246 | if (user_fault) | |
247 | walker->error_code |= PFERR_USER_MASK; | |
73b1087e AK |
248 | if (fetch_fault) |
249 | walker->error_code |= PFERR_FETCH_MASK; | |
82725b20 DE |
250 | if (rsvd_fault) |
251 | walker->error_code |= PFERR_RSVD_MASK; | |
07420171 | 252 | trace_kvm_mmu_walker_error(walker->error_code); |
fe551881 | 253 | return 0; |
6aa8b732 AK |
254 | } |
255 | ||
ac3cd03c | 256 | static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, |
489f1d65 | 257 | u64 *spte, const void *pte) |
0028425f AK |
258 | { |
259 | pt_element_t gpte; | |
41074d07 | 260 | unsigned pte_access; |
35149e21 | 261 | pfn_t pfn; |
fbc5d139 | 262 | u64 new_spte; |
0028425f | 263 | |
0028425f | 264 | gpte = *(const pt_element_t *)pte; |
c7addb90 | 265 | if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) { |
fbc5d139 | 266 | if (!is_present_gpte(gpte)) { |
ac3cd03c | 267 | if (sp->unsync) |
fbc5d139 AK |
268 | new_spte = shadow_trap_nonpresent_pte; |
269 | else | |
270 | new_spte = shadow_notrap_nonpresent_pte; | |
271 | __set_spte(spte, new_spte); | |
272 | } | |
c7addb90 AK |
273 | return; |
274 | } | |
b8688d51 | 275 | pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte); |
ac3cd03c | 276 | pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte); |
d7824fff AK |
277 | if (gpte_to_gfn(gpte) != vcpu->arch.update_pte.gfn) |
278 | return; | |
35149e21 AL |
279 | pfn = vcpu->arch.update_pte.pfn; |
280 | if (is_error_pfn(pfn)) | |
d7824fff | 281 | return; |
e930bffe AA |
282 | if (mmu_notifier_retry(vcpu, vcpu->arch.update_pte.mmu_seq)) |
283 | return; | |
35149e21 | 284 | kvm_get_pfn(pfn); |
1403283a IE |
285 | /* |
286 | * we call mmu_set_spte() with reset_host_protection = true beacuse that | |
287 | * vcpu->arch.update_pte.pfn was fetched from get_user_pages(write = 1). | |
288 | */ | |
ac3cd03c | 289 | mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0, |
cb83cad2 | 290 | is_dirty_gpte(gpte), NULL, PT_PAGE_TABLE_LEVEL, |
1403283a | 291 | gpte_to_gfn(gpte), pfn, true, true); |
0028425f AK |
292 | } |
293 | ||
6aa8b732 AK |
294 | /* |
295 | * Fetch a shadow pte for a specific level in the paging hierarchy. | |
296 | */ | |
e7a04c99 AK |
297 | static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, |
298 | struct guest_walker *gw, | |
7e4e4056 | 299 | int user_fault, int write_fault, int hlevel, |
e7a04c99 | 300 | int *ptwrite, pfn_t pfn) |
6aa8b732 | 301 | { |
abb9e0b8 | 302 | unsigned access = gw->pt_access; |
ac3cd03c | 303 | struct kvm_mmu_page *sp; |
bde89223 | 304 | u64 spte, *sptep = NULL; |
f6e2c02b | 305 | int direct; |
abb9e0b8 AK |
306 | gfn_t table_gfn; |
307 | int r; | |
e7a04c99 | 308 | int level; |
abb9e0b8 | 309 | pt_element_t curr_pte; |
e7a04c99 | 310 | struct kvm_shadow_walk_iterator iterator; |
abb9e0b8 | 311 | |
43a3795a | 312 | if (!is_present_gpte(gw->ptes[gw->level - 1])) |
e7a04c99 | 313 | return NULL; |
6aa8b732 | 314 | |
e7a04c99 AK |
315 | for_each_shadow_entry(vcpu, addr, iterator) { |
316 | level = iterator.level; | |
317 | sptep = iterator.sptep; | |
7e4e4056 | 318 | if (iterator.level == hlevel) { |
e7a04c99 AK |
319 | mmu_set_spte(vcpu, sptep, access, |
320 | gw->pte_access & access, | |
321 | user_fault, write_fault, | |
cb83cad2 | 322 | is_dirty_gpte(gw->ptes[gw->level-1]), |
852e3c19 | 323 | ptwrite, level, |
1403283a | 324 | gw->gfn, pfn, false, true); |
e7a04c99 AK |
325 | break; |
326 | } | |
6aa8b732 | 327 | |
e7a04c99 AK |
328 | if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) |
329 | continue; | |
abb9e0b8 | 330 | |
e7a04c99 | 331 | if (is_large_pte(*sptep)) { |
c5bc2242 | 332 | rmap_remove(vcpu->kvm, sptep); |
d555c333 | 333 | __set_spte(sptep, shadow_trap_nonpresent_pte); |
e7a04c99 | 334 | kvm_flush_remote_tlbs(vcpu->kvm); |
7819026e | 335 | } |
ef0197e8 | 336 | |
7e4e4056 JR |
337 | if (level <= gw->level) { |
338 | int delta = level - gw->level + 1; | |
f6e2c02b | 339 | direct = 1; |
7e4e4056 | 340 | if (!is_dirty_gpte(gw->ptes[level - delta])) |
e7a04c99 | 341 | access &= ~ACC_WRITE_MASK; |
3af1817a LJ |
342 | /* |
343 | * It is a large guest pages backed by small host pages, | |
ac3cd03c XG |
344 | * So we set @direct(@sp->role.direct)=1, and set |
345 | * @table_gfn(@sp->gfn)=the base page frame for linear | |
346 | * translations. | |
3af1817a LJ |
347 | */ |
348 | table_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1); | |
6aa0b9de | 349 | access &= gw->pte_access; |
e7a04c99 | 350 | } else { |
f6e2c02b | 351 | direct = 0; |
e7a04c99 AK |
352 | table_gfn = gw->table_gfn[level - 2]; |
353 | } | |
ac3cd03c | 354 | sp = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1, |
f6e2c02b AK |
355 | direct, access, sptep); |
356 | if (!direct) { | |
e7a04c99 AK |
357 | r = kvm_read_guest_atomic(vcpu->kvm, |
358 | gw->pte_gpa[level - 2], | |
359 | &curr_pte, sizeof(curr_pte)); | |
360 | if (r || curr_pte != gw->ptes[level - 2]) { | |
ac3cd03c | 361 | kvm_mmu_put_page(sp, sptep); |
e7a04c99 AK |
362 | kvm_release_pfn_clean(pfn); |
363 | sptep = NULL; | |
364 | break; | |
365 | } | |
366 | } | |
abb9e0b8 | 367 | |
ac3cd03c | 368 | spte = __pa(sp->spt) |
e7a04c99 AK |
369 | | PT_PRESENT_MASK | PT_ACCESSED_MASK |
370 | | PT_WRITABLE_MASK | PT_USER_MASK; | |
371 | *sptep = spte; | |
372 | } | |
050e6499 | 373 | |
e7a04c99 | 374 | return sptep; |
6aa8b732 AK |
375 | } |
376 | ||
6aa8b732 AK |
377 | /* |
378 | * Page fault handler. There are several causes for a page fault: | |
379 | * - there is no shadow pte for the guest pte | |
380 | * - write access through a shadow pte marked read only so that we can set | |
381 | * the dirty bit | |
382 | * - write access to a shadow pte marked read only so we can update the page | |
383 | * dirty bitmap, when userspace requests it | |
384 | * - mmio access; in this case we will never install a present shadow pte | |
385 | * - normal guest page fault due to the guest pte marked not present, not | |
386 | * writable, or not executable | |
387 | * | |
e2dec939 AK |
388 | * Returns: 1 if we need to emulate the instruction, 0 otherwise, or |
389 | * a negative value on error. | |
6aa8b732 AK |
390 | */ |
391 | static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, | |
392 | u32 error_code) | |
393 | { | |
394 | int write_fault = error_code & PFERR_WRITE_MASK; | |
6aa8b732 | 395 | int user_fault = error_code & PFERR_USER_MASK; |
73b1087e | 396 | int fetch_fault = error_code & PFERR_FETCH_MASK; |
6aa8b732 | 397 | struct guest_walker walker; |
d555c333 | 398 | u64 *sptep; |
cea0f0e7 | 399 | int write_pt = 0; |
e2dec939 | 400 | int r; |
35149e21 | 401 | pfn_t pfn; |
7e4e4056 | 402 | int level = PT_PAGE_TABLE_LEVEL; |
e930bffe | 403 | unsigned long mmu_seq; |
6aa8b732 | 404 | |
b8688d51 | 405 | pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code); |
37a7d8b0 | 406 | kvm_mmu_audit(vcpu, "pre page fault"); |
714b93da | 407 | |
e2dec939 AK |
408 | r = mmu_topup_memory_caches(vcpu); |
409 | if (r) | |
410 | return r; | |
714b93da | 411 | |
6aa8b732 | 412 | /* |
a8b876b1 | 413 | * Look up the guest pte for the faulting address. |
6aa8b732 | 414 | */ |
73b1087e AK |
415 | r = FNAME(walk_addr)(&walker, vcpu, addr, write_fault, user_fault, |
416 | fetch_fault); | |
6aa8b732 AK |
417 | |
418 | /* | |
419 | * The page is not mapped by the guest. Let the guest handle it. | |
420 | */ | |
7993ba43 | 421 | if (!r) { |
b8688d51 | 422 | pgprintk("%s: guest page fault\n", __func__); |
7993ba43 | 423 | inject_page_fault(vcpu, addr, walker.error_code); |
ad312c7c | 424 | vcpu->arch.last_pt_write_count = 0; /* reset fork detector */ |
6aa8b732 AK |
425 | return 0; |
426 | } | |
427 | ||
7e4e4056 JR |
428 | if (walker.level >= PT_DIRECTORY_LEVEL) { |
429 | level = min(walker.level, mapping_level(vcpu, walker.gfn)); | |
430 | walker.gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1); | |
05da4558 | 431 | } |
7e4e4056 | 432 | |
e930bffe | 433 | mmu_seq = vcpu->kvm->mmu_notifier_seq; |
4c2155ce | 434 | smp_rmb(); |
35149e21 | 435 | pfn = gfn_to_pfn(vcpu->kvm, walker.gfn); |
d7824fff | 436 | |
d196e343 | 437 | /* mmio */ |
bf998156 HY |
438 | if (is_error_pfn(pfn)) |
439 | return kvm_handle_bad_page(vcpu->kvm, walker.gfn, pfn); | |
d196e343 | 440 | |
aaee2c94 | 441 | spin_lock(&vcpu->kvm->mmu_lock); |
e930bffe AA |
442 | if (mmu_notifier_retry(vcpu, mmu_seq)) |
443 | goto out_unlock; | |
eb787d10 | 444 | kvm_mmu_free_some_pages(vcpu); |
d555c333 | 445 | sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault, |
7e4e4056 | 446 | level, &write_pt, pfn); |
a24e8099 | 447 | (void)sptep; |
b8688d51 | 448 | pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__, |
d555c333 | 449 | sptep, *sptep, write_pt); |
cea0f0e7 | 450 | |
a25f7e1f | 451 | if (!write_pt) |
ad312c7c | 452 | vcpu->arch.last_pt_write_count = 0; /* reset fork detector */ |
a25f7e1f | 453 | |
1165f5fe | 454 | ++vcpu->stat.pf_fixed; |
37a7d8b0 | 455 | kvm_mmu_audit(vcpu, "post page fault (fixed)"); |
aaee2c94 | 456 | spin_unlock(&vcpu->kvm->mmu_lock); |
6aa8b732 | 457 | |
cea0f0e7 | 458 | return write_pt; |
e930bffe AA |
459 | |
460 | out_unlock: | |
461 | spin_unlock(&vcpu->kvm->mmu_lock); | |
462 | kvm_release_pfn_clean(pfn); | |
463 | return 0; | |
6aa8b732 AK |
464 | } |
465 | ||
a461930b | 466 | static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) |
a7052897 | 467 | { |
a461930b | 468 | struct kvm_shadow_walk_iterator iterator; |
f78978aa | 469 | struct kvm_mmu_page *sp; |
08e850c6 | 470 | gpa_t pte_gpa = -1; |
a461930b AK |
471 | int level; |
472 | u64 *sptep; | |
4539b358 | 473 | int need_flush = 0; |
a461930b AK |
474 | |
475 | spin_lock(&vcpu->kvm->mmu_lock); | |
a7052897 | 476 | |
a461930b AK |
477 | for_each_shadow_entry(vcpu, gva, iterator) { |
478 | level = iterator.level; | |
479 | sptep = iterator.sptep; | |
ad218f85 | 480 | |
f78978aa | 481 | sp = page_header(__pa(sptep)); |
884a0ff0 | 482 | if (is_last_spte(*sptep, level)) { |
22c9b2d1 | 483 | int offset, shift; |
08e850c6 | 484 | |
f78978aa XG |
485 | if (!sp->unsync) |
486 | break; | |
487 | ||
22c9b2d1 XG |
488 | shift = PAGE_SHIFT - |
489 | (PT_LEVEL_BITS - PT64_LEVEL_BITS) * level; | |
490 | offset = sp->role.quadrant << shift; | |
491 | ||
492 | pte_gpa = (sp->gfn << PAGE_SHIFT) + offset; | |
08e850c6 | 493 | pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t); |
a461930b AK |
494 | |
495 | if (is_shadow_present_pte(*sptep)) { | |
496 | rmap_remove(vcpu->kvm, sptep); | |
497 | if (is_large_pte(*sptep)) | |
498 | --vcpu->kvm->stat.lpages; | |
4539b358 | 499 | need_flush = 1; |
a461930b | 500 | } |
d555c333 | 501 | __set_spte(sptep, shadow_trap_nonpresent_pte); |
a461930b | 502 | break; |
87917239 | 503 | } |
a7052897 | 504 | |
f78978aa | 505 | if (!is_shadow_present_pte(*sptep) || !sp->unsync_children) |
a461930b AK |
506 | break; |
507 | } | |
a7052897 | 508 | |
4539b358 AA |
509 | if (need_flush) |
510 | kvm_flush_remote_tlbs(vcpu->kvm); | |
08e850c6 AK |
511 | |
512 | atomic_inc(&vcpu->kvm->arch.invlpg_counter); | |
513 | ||
ad218f85 | 514 | spin_unlock(&vcpu->kvm->mmu_lock); |
08e850c6 AK |
515 | |
516 | if (pte_gpa == -1) | |
517 | return; | |
518 | ||
519 | if (mmu_topup_memory_caches(vcpu)) | |
520 | return; | |
521 | kvm_mmu_pte_write(vcpu, pte_gpa, NULL, sizeof(pt_element_t), 0); | |
a7052897 MT |
522 | } |
523 | ||
1871c602 GN |
524 | static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access, |
525 | u32 *error) | |
6aa8b732 AK |
526 | { |
527 | struct guest_walker walker; | |
e119d117 AK |
528 | gpa_t gpa = UNMAPPED_GVA; |
529 | int r; | |
6aa8b732 | 530 | |
1871c602 GN |
531 | r = FNAME(walk_addr)(&walker, vcpu, vaddr, |
532 | !!(access & PFERR_WRITE_MASK), | |
533 | !!(access & PFERR_USER_MASK), | |
534 | !!(access & PFERR_FETCH_MASK)); | |
6aa8b732 | 535 | |
e119d117 | 536 | if (r) { |
1755fbcc | 537 | gpa = gfn_to_gpa(walker.gfn); |
e119d117 | 538 | gpa |= vaddr & ~PAGE_MASK; |
1871c602 GN |
539 | } else if (error) |
540 | *error = walker.error_code; | |
6aa8b732 AK |
541 | |
542 | return gpa; | |
543 | } | |
544 | ||
c7addb90 AK |
545 | static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu, |
546 | struct kvm_mmu_page *sp) | |
547 | { | |
eab9f71f AK |
548 | int i, j, offset, r; |
549 | pt_element_t pt[256 / sizeof(pt_element_t)]; | |
550 | gpa_t pte_gpa; | |
c7addb90 | 551 | |
f6e2c02b | 552 | if (sp->role.direct |
e5a4c8ca | 553 | || (PTTYPE == 32 && sp->role.level > PT_PAGE_TABLE_LEVEL)) { |
c7addb90 AK |
554 | nonpaging_prefetch_page(vcpu, sp); |
555 | return; | |
556 | } | |
557 | ||
eab9f71f AK |
558 | pte_gpa = gfn_to_gpa(sp->gfn); |
559 | if (PTTYPE == 32) { | |
e5a4c8ca | 560 | offset = sp->role.quadrant << PT64_LEVEL_BITS; |
eab9f71f AK |
561 | pte_gpa += offset * sizeof(pt_element_t); |
562 | } | |
7ec54588 | 563 | |
eab9f71f AK |
564 | for (i = 0; i < PT64_ENT_PER_PAGE; i += ARRAY_SIZE(pt)) { |
565 | r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa, pt, sizeof pt); | |
566 | pte_gpa += ARRAY_SIZE(pt) * sizeof(pt_element_t); | |
567 | for (j = 0; j < ARRAY_SIZE(pt); ++j) | |
43a3795a | 568 | if (r || is_present_gpte(pt[j])) |
eab9f71f AK |
569 | sp->spt[i+j] = shadow_trap_nonpresent_pte; |
570 | else | |
571 | sp->spt[i+j] = shadow_notrap_nonpresent_pte; | |
7ec54588 | 572 | } |
c7addb90 AK |
573 | } |
574 | ||
e8bc217a MT |
575 | /* |
576 | * Using the cached information from sp->gfns is safe because: | |
577 | * - The spte has a reference to the struct page, so the pfn for a given gfn | |
578 | * can't change unless all sptes pointing to it are nuked first. | |
579 | * - Alias changes zap the entire shadow cache. | |
580 | */ | |
be71e061 XG |
581 | static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, |
582 | bool clear_unsync) | |
e8bc217a MT |
583 | { |
584 | int i, offset, nr_present; | |
1403283a | 585 | bool reset_host_protection; |
51fb60d8 | 586 | gpa_t first_pte_gpa; |
e8bc217a MT |
587 | |
588 | offset = nr_present = 0; | |
589 | ||
2032a93d LJ |
590 | /* direct kvm_mmu_page can not be unsync. */ |
591 | BUG_ON(sp->role.direct); | |
592 | ||
e8bc217a MT |
593 | if (PTTYPE == 32) |
594 | offset = sp->role.quadrant << PT64_LEVEL_BITS; | |
595 | ||
51fb60d8 GJ |
596 | first_pte_gpa = gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t); |
597 | ||
e8bc217a MT |
598 | for (i = 0; i < PT64_ENT_PER_PAGE; i++) { |
599 | unsigned pte_access; | |
600 | pt_element_t gpte; | |
601 | gpa_t pte_gpa; | |
f55c3f41 | 602 | gfn_t gfn; |
e8bc217a MT |
603 | |
604 | if (!is_shadow_present_pte(sp->spt[i])) | |
605 | continue; | |
606 | ||
51fb60d8 | 607 | pte_gpa = first_pte_gpa + i * sizeof(pt_element_t); |
e8bc217a MT |
608 | |
609 | if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte, | |
610 | sizeof(pt_element_t))) | |
611 | return -EINVAL; | |
612 | ||
f55c3f41 XG |
613 | gfn = gpte_to_gfn(gpte); |
614 | if (unalias_gfn(vcpu->kvm, gfn) != sp->gfns[i] || | |
615 | !is_present_gpte(gpte) || !(gpte & PT_ACCESSED_MASK)) { | |
e8bc217a MT |
616 | u64 nonpresent; |
617 | ||
618 | rmap_remove(vcpu->kvm, &sp->spt[i]); | |
be71e061 | 619 | if (is_present_gpte(gpte) || !clear_unsync) |
e8bc217a MT |
620 | nonpresent = shadow_trap_nonpresent_pte; |
621 | else | |
622 | nonpresent = shadow_notrap_nonpresent_pte; | |
d555c333 | 623 | __set_spte(&sp->spt[i], nonpresent); |
e8bc217a MT |
624 | continue; |
625 | } | |
626 | ||
627 | nr_present++; | |
628 | pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte); | |
1403283a IE |
629 | if (!(sp->spt[i] & SPTE_HOST_WRITEABLE)) { |
630 | pte_access &= ~ACC_WRITE_MASK; | |
631 | reset_host_protection = 0; | |
632 | } else { | |
633 | reset_host_protection = 1; | |
634 | } | |
e8bc217a | 635 | set_spte(vcpu, &sp->spt[i], pte_access, 0, 0, |
7e4e4056 | 636 | is_dirty_gpte(gpte), PT_PAGE_TABLE_LEVEL, gfn, |
1403283a IE |
637 | spte_to_pfn(sp->spt[i]), true, false, |
638 | reset_host_protection); | |
e8bc217a MT |
639 | } |
640 | ||
641 | return !nr_present; | |
642 | } | |
643 | ||
6aa8b732 AK |
644 | #undef pt_element_t |
645 | #undef guest_walker | |
646 | #undef FNAME | |
647 | #undef PT_BASE_ADDR_MASK | |
648 | #undef PT_INDEX | |
6aa8b732 | 649 | #undef PT_LEVEL_MASK |
e04da980 JR |
650 | #undef PT_LVL_ADDR_MASK |
651 | #undef PT_LVL_OFFSET_MASK | |
c7addb90 | 652 | #undef PT_LEVEL_BITS |
cea0f0e7 | 653 | #undef PT_MAX_FULL_LEVELS |
5fb07ddb | 654 | #undef gpte_to_gfn |
e04da980 | 655 | #undef gpte_to_gfn_lvl |
b3e4e63f | 656 | #undef CMPXCHG |