| 1 | /* |
| 2 | * Kernel-based Virtual Machine driver for Linux |
| 3 | * |
| 4 | * This module enables machines with Intel VT-x extensions to run virtual |
| 5 | * machines without emulation or binary translation. |
| 6 | * |
| 7 | * MMU support |
| 8 | * |
| 9 | * Copyright (C) 2006 Qumranet, Inc. |
| 10 | * Copyright 2010 Red Hat, Inc. and/or its affiliates. |
| 11 | * |
| 12 | * Authors: |
| 13 | * Yaniv Kamay <yaniv@qumranet.com> |
| 14 | * Avi Kivity <avi@qumranet.com> |
| 15 | * |
| 16 | * This work is licensed under the terms of the GNU GPL, version 2. See |
| 17 | * the COPYING file in the top-level directory. |
| 18 | * |
| 19 | */ |
| 20 | |
| 21 | /* |
| 22 | * We need the mmu code to access both 32-bit and 64-bit guest ptes, |
| 23 | * so the code in this file is compiled twice, once per pte size. |
| 24 | */ |
| 25 | |
| 26 | #if PTTYPE == 64 |
| 27 | #define pt_element_t u64 |
| 28 | #define guest_walker guest_walker64 |
| 29 | #define FNAME(name) paging##64_##name |
| 30 | #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK |
| 31 | #define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl) |
| 32 | #define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl) |
| 33 | #define PT_INDEX(addr, level) PT64_INDEX(addr, level) |
| 34 | #define PT_LEVEL_BITS PT64_LEVEL_BITS |
| 35 | #ifdef CONFIG_X86_64 |
| 36 | #define PT_MAX_FULL_LEVELS 4 |
| 37 | #define CMPXCHG cmpxchg |
| 38 | #else |
| 39 | #define CMPXCHG cmpxchg64 |
| 40 | #define PT_MAX_FULL_LEVELS 2 |
| 41 | #endif |
| 42 | #elif PTTYPE == 32 |
| 43 | #define pt_element_t u32 |
| 44 | #define guest_walker guest_walker32 |
| 45 | #define FNAME(name) paging##32_##name |
| 46 | #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK |
| 47 | #define PT_LVL_ADDR_MASK(lvl) PT32_LVL_ADDR_MASK(lvl) |
| 48 | #define PT_LVL_OFFSET_MASK(lvl) PT32_LVL_OFFSET_MASK(lvl) |
| 49 | #define PT_INDEX(addr, level) PT32_INDEX(addr, level) |
| 50 | #define PT_LEVEL_BITS PT32_LEVEL_BITS |
| 51 | #define PT_MAX_FULL_LEVELS 2 |
| 52 | #define CMPXCHG cmpxchg |
| 53 | #else |
| 54 | #error Invalid PTTYPE value |
| 55 | #endif |
| 56 | |
| 57 | #define gpte_to_gfn_lvl FNAME(gpte_to_gfn_lvl) |
| 58 | #define gpte_to_gfn(pte) gpte_to_gfn_lvl((pte), PT_PAGE_TABLE_LEVEL) |
| 59 | |
| 60 | /* |
| 61 | * The guest_walker structure emulates the behavior of the hardware page |
| 62 | * table walker. |
| 63 | */ |
| 64 | struct guest_walker { |
| 65 | int level; |
| 66 | gfn_t table_gfn[PT_MAX_FULL_LEVELS]; |
| 67 | pt_element_t ptes[PT_MAX_FULL_LEVELS]; |
| 68 | pt_element_t prefetch_ptes[PTE_PREFETCH_NUM]; |
| 69 | gpa_t pte_gpa[PT_MAX_FULL_LEVELS]; |
| 70 | unsigned pt_access; |
| 71 | unsigned pte_access; |
| 72 | gfn_t gfn; |
| 73 | struct x86_exception fault; |
| 74 | }; |
| 75 | |
| 76 | static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl) |
| 77 | { |
| 78 | return (gpte & PT_LVL_ADDR_MASK(lvl)) >> PAGE_SHIFT; |
| 79 | } |
| 80 | |
| 81 | static bool FNAME(cmpxchg_gpte)(struct kvm *kvm, |
| 82 | gfn_t table_gfn, unsigned index, |
| 83 | pt_element_t orig_pte, pt_element_t new_pte) |
| 84 | { |
| 85 | pt_element_t ret; |
| 86 | pt_element_t *table; |
| 87 | struct page *page; |
| 88 | |
| 89 | page = gfn_to_page(kvm, table_gfn); |
| 90 | |
| 91 | table = kmap_atomic(page, KM_USER0); |
| 92 | ret = CMPXCHG(&table[index], orig_pte, new_pte); |
| 93 | kunmap_atomic(table, KM_USER0); |
| 94 | |
| 95 | kvm_release_page_dirty(page); |
| 96 | |
| 97 | return (ret != orig_pte); |
| 98 | } |
| 99 | |
| 100 | static unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, pt_element_t gpte) |
| 101 | { |
| 102 | unsigned access; |
| 103 | |
| 104 | access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK; |
| 105 | #if PTTYPE == 64 |
| 106 | if (vcpu->arch.mmu.nx) |
| 107 | access &= ~(gpte >> PT64_NX_SHIFT); |
| 108 | #endif |
| 109 | return access; |
| 110 | } |
| 111 | |
| 112 | /* |
| 113 | * Fetch a guest pte for a guest virtual address |
| 114 | */ |
| 115 | static int FNAME(walk_addr_generic)(struct guest_walker *walker, |
| 116 | struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, |
| 117 | gva_t addr, u32 access) |
| 118 | { |
| 119 | pt_element_t pte; |
| 120 | gfn_t table_gfn; |
| 121 | unsigned index, pt_access, uninitialized_var(pte_access); |
| 122 | gpa_t pte_gpa; |
| 123 | bool eperm, present, rsvd_fault; |
| 124 | int offset, write_fault, user_fault, fetch_fault; |
| 125 | |
| 126 | write_fault = access & PFERR_WRITE_MASK; |
| 127 | user_fault = access & PFERR_USER_MASK; |
| 128 | fetch_fault = access & PFERR_FETCH_MASK; |
| 129 | |
| 130 | trace_kvm_mmu_pagetable_walk(addr, write_fault, user_fault, |
| 131 | fetch_fault); |
| 132 | walk: |
| 133 | present = true; |
| 134 | eperm = rsvd_fault = false; |
| 135 | walker->level = mmu->root_level; |
| 136 | pte = mmu->get_cr3(vcpu); |
| 137 | |
| 138 | #if PTTYPE == 64 |
| 139 | if (walker->level == PT32E_ROOT_LEVEL) { |
| 140 | pte = kvm_pdptr_read_mmu(vcpu, mmu, (addr >> 30) & 3); |
| 141 | trace_kvm_mmu_paging_element(pte, walker->level); |
| 142 | if (!is_present_gpte(pte)) { |
| 143 | present = false; |
| 144 | goto error; |
| 145 | } |
| 146 | --walker->level; |
| 147 | } |
| 148 | #endif |
| 149 | ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) || |
| 150 | (mmu->get_cr3(vcpu) & CR3_NONPAE_RESERVED_BITS) == 0); |
| 151 | |
| 152 | pt_access = ACC_ALL; |
| 153 | |
| 154 | for (;;) { |
| 155 | index = PT_INDEX(addr, walker->level); |
| 156 | |
| 157 | table_gfn = gpte_to_gfn(pte); |
| 158 | offset = index * sizeof(pt_element_t); |
| 159 | pte_gpa = gfn_to_gpa(table_gfn) + offset; |
| 160 | walker->table_gfn[walker->level - 1] = table_gfn; |
| 161 | walker->pte_gpa[walker->level - 1] = pte_gpa; |
| 162 | |
| 163 | if (kvm_read_guest_page_mmu(vcpu, mmu, table_gfn, &pte, |
| 164 | offset, sizeof(pte), |
| 165 | PFERR_USER_MASK|PFERR_WRITE_MASK)) { |
| 166 | present = false; |
| 167 | break; |
| 168 | } |
| 169 | |
| 170 | trace_kvm_mmu_paging_element(pte, walker->level); |
| 171 | |
| 172 | if (!is_present_gpte(pte)) { |
| 173 | present = false; |
| 174 | break; |
| 175 | } |
| 176 | |
| 177 | if (is_rsvd_bits_set(&vcpu->arch.mmu, pte, walker->level)) { |
| 178 | rsvd_fault = true; |
| 179 | break; |
| 180 | } |
| 181 | |
| 182 | if (write_fault && !is_writable_pte(pte)) |
| 183 | if (user_fault || is_write_protection(vcpu)) |
| 184 | eperm = true; |
| 185 | |
| 186 | if (user_fault && !(pte & PT_USER_MASK)) |
| 187 | eperm = true; |
| 188 | |
| 189 | #if PTTYPE == 64 |
| 190 | if (fetch_fault && (pte & PT64_NX_MASK)) |
| 191 | eperm = true; |
| 192 | #endif |
| 193 | |
| 194 | if (!eperm && !rsvd_fault && !(pte & PT_ACCESSED_MASK)) { |
| 195 | trace_kvm_mmu_set_accessed_bit(table_gfn, index, |
| 196 | sizeof(pte)); |
| 197 | if (FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn, |
| 198 | index, pte, pte|PT_ACCESSED_MASK)) |
| 199 | goto walk; |
| 200 | mark_page_dirty(vcpu->kvm, table_gfn); |
| 201 | pte |= PT_ACCESSED_MASK; |
| 202 | } |
| 203 | |
| 204 | pte_access = pt_access & FNAME(gpte_access)(vcpu, pte); |
| 205 | |
| 206 | walker->ptes[walker->level - 1] = pte; |
| 207 | |
| 208 | if ((walker->level == PT_PAGE_TABLE_LEVEL) || |
| 209 | ((walker->level == PT_DIRECTORY_LEVEL) && |
| 210 | is_large_pte(pte) && |
| 211 | (PTTYPE == 64 || is_pse(vcpu))) || |
| 212 | ((walker->level == PT_PDPE_LEVEL) && |
| 213 | is_large_pte(pte) && |
| 214 | mmu->root_level == PT64_ROOT_LEVEL)) { |
| 215 | int lvl = walker->level; |
| 216 | gpa_t real_gpa; |
| 217 | gfn_t gfn; |
| 218 | u32 ac; |
| 219 | |
| 220 | gfn = gpte_to_gfn_lvl(pte, lvl); |
| 221 | gfn += (addr & PT_LVL_OFFSET_MASK(lvl)) >> PAGE_SHIFT; |
| 222 | |
| 223 | if (PTTYPE == 32 && |
| 224 | walker->level == PT_DIRECTORY_LEVEL && |
| 225 | is_cpuid_PSE36()) |
| 226 | gfn += pse36_gfn_delta(pte); |
| 227 | |
| 228 | ac = write_fault | fetch_fault | user_fault; |
| 229 | |
| 230 | real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(gfn), |
| 231 | ac); |
| 232 | if (real_gpa == UNMAPPED_GVA) |
| 233 | return 0; |
| 234 | |
| 235 | walker->gfn = real_gpa >> PAGE_SHIFT; |
| 236 | |
| 237 | break; |
| 238 | } |
| 239 | |
| 240 | pt_access = pte_access; |
| 241 | --walker->level; |
| 242 | } |
| 243 | |
| 244 | if (!present || eperm || rsvd_fault) |
| 245 | goto error; |
| 246 | |
| 247 | if (write_fault && !is_dirty_gpte(pte)) { |
| 248 | bool ret; |
| 249 | |
| 250 | trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte)); |
| 251 | ret = FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn, index, pte, |
| 252 | pte|PT_DIRTY_MASK); |
| 253 | if (ret) |
| 254 | goto walk; |
| 255 | mark_page_dirty(vcpu->kvm, table_gfn); |
| 256 | pte |= PT_DIRTY_MASK; |
| 257 | walker->ptes[walker->level - 1] = pte; |
| 258 | } |
| 259 | |
| 260 | walker->pt_access = pt_access; |
| 261 | walker->pte_access = pte_access; |
| 262 | pgprintk("%s: pte %llx pte_access %x pt_access %x\n", |
| 263 | __func__, (u64)pte, pte_access, pt_access); |
| 264 | return 1; |
| 265 | |
| 266 | error: |
| 267 | walker->fault.vector = PF_VECTOR; |
| 268 | walker->fault.error_code_valid = true; |
| 269 | walker->fault.error_code = 0; |
| 270 | if (present) |
| 271 | walker->fault.error_code |= PFERR_PRESENT_MASK; |
| 272 | |
| 273 | walker->fault.error_code |= write_fault | user_fault; |
| 274 | |
| 275 | if (fetch_fault && mmu->nx) |
| 276 | walker->fault.error_code |= PFERR_FETCH_MASK; |
| 277 | if (rsvd_fault) |
| 278 | walker->fault.error_code |= PFERR_RSVD_MASK; |
| 279 | |
| 280 | walker->fault.address = addr; |
| 281 | walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu; |
| 282 | |
| 283 | trace_kvm_mmu_walker_error(walker->fault.error_code); |
| 284 | return 0; |
| 285 | } |
| 286 | |
| 287 | static int FNAME(walk_addr)(struct guest_walker *walker, |
| 288 | struct kvm_vcpu *vcpu, gva_t addr, u32 access) |
| 289 | { |
| 290 | return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.mmu, addr, |
| 291 | access); |
| 292 | } |
| 293 | |
| 294 | static int FNAME(walk_addr_nested)(struct guest_walker *walker, |
| 295 | struct kvm_vcpu *vcpu, gva_t addr, |
| 296 | u32 access) |
| 297 | { |
| 298 | return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.nested_mmu, |
| 299 | addr, access); |
| 300 | } |
| 301 | |
| 302 | static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu, |
| 303 | struct kvm_mmu_page *sp, u64 *spte, |
| 304 | pt_element_t gpte) |
| 305 | { |
| 306 | u64 nonpresent = shadow_trap_nonpresent_pte; |
| 307 | |
| 308 | if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL)) |
| 309 | goto no_present; |
| 310 | |
| 311 | if (!is_present_gpte(gpte)) { |
| 312 | if (!sp->unsync) |
| 313 | nonpresent = shadow_notrap_nonpresent_pte; |
| 314 | goto no_present; |
| 315 | } |
| 316 | |
| 317 | if (!(gpte & PT_ACCESSED_MASK)) |
| 318 | goto no_present; |
| 319 | |
| 320 | return false; |
| 321 | |
| 322 | no_present: |
| 323 | drop_spte(vcpu->kvm, spte, nonpresent); |
| 324 | return true; |
| 325 | } |
| 326 | |
| 327 | static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, |
| 328 | u64 *spte, const void *pte) |
| 329 | { |
| 330 | pt_element_t gpte; |
| 331 | unsigned pte_access; |
| 332 | pfn_t pfn; |
| 333 | |
| 334 | gpte = *(const pt_element_t *)pte; |
| 335 | if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte)) |
| 336 | return; |
| 337 | |
| 338 | pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte); |
| 339 | pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte); |
| 340 | pfn = vcpu->arch.update_pte.pfn; |
| 341 | if (is_error_pfn(pfn)) |
| 342 | return; |
| 343 | if (mmu_notifier_retry(vcpu, vcpu->arch.update_pte.mmu_seq)) |
| 344 | return; |
| 345 | kvm_get_pfn(pfn); |
| 346 | /* |
| 347 | * we call mmu_set_spte() with host_writable = true beacuse that |
| 348 | * vcpu->arch.update_pte.pfn was fetched from get_user_pages(write = 1). |
| 349 | */ |
| 350 | mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0, |
| 351 | is_dirty_gpte(gpte), NULL, PT_PAGE_TABLE_LEVEL, |
| 352 | gpte_to_gfn(gpte), pfn, true, true); |
| 353 | } |
| 354 | |
| 355 | static bool FNAME(gpte_changed)(struct kvm_vcpu *vcpu, |
| 356 | struct guest_walker *gw, int level) |
| 357 | { |
| 358 | pt_element_t curr_pte; |
| 359 | gpa_t base_gpa, pte_gpa = gw->pte_gpa[level - 1]; |
| 360 | u64 mask; |
| 361 | int r, index; |
| 362 | |
| 363 | if (level == PT_PAGE_TABLE_LEVEL) { |
| 364 | mask = PTE_PREFETCH_NUM * sizeof(pt_element_t) - 1; |
| 365 | base_gpa = pte_gpa & ~mask; |
| 366 | index = (pte_gpa - base_gpa) / sizeof(pt_element_t); |
| 367 | |
| 368 | r = kvm_read_guest_atomic(vcpu->kvm, base_gpa, |
| 369 | gw->prefetch_ptes, sizeof(gw->prefetch_ptes)); |
| 370 | curr_pte = gw->prefetch_ptes[index]; |
| 371 | } else |
| 372 | r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa, |
| 373 | &curr_pte, sizeof(curr_pte)); |
| 374 | |
| 375 | return r || curr_pte != gw->ptes[level - 1]; |
| 376 | } |
| 377 | |
| 378 | static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw, |
| 379 | u64 *sptep) |
| 380 | { |
| 381 | struct kvm_mmu_page *sp; |
| 382 | pt_element_t *gptep = gw->prefetch_ptes; |
| 383 | u64 *spte; |
| 384 | int i; |
| 385 | |
| 386 | sp = page_header(__pa(sptep)); |
| 387 | |
| 388 | if (sp->role.level > PT_PAGE_TABLE_LEVEL) |
| 389 | return; |
| 390 | |
| 391 | if (sp->role.direct) |
| 392 | return __direct_pte_prefetch(vcpu, sp, sptep); |
| 393 | |
| 394 | i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1); |
| 395 | spte = sp->spt + i; |
| 396 | |
| 397 | for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) { |
| 398 | pt_element_t gpte; |
| 399 | unsigned pte_access; |
| 400 | gfn_t gfn; |
| 401 | pfn_t pfn; |
| 402 | bool dirty; |
| 403 | |
| 404 | if (spte == sptep) |
| 405 | continue; |
| 406 | |
| 407 | if (*spte != shadow_trap_nonpresent_pte) |
| 408 | continue; |
| 409 | |
| 410 | gpte = gptep[i]; |
| 411 | |
| 412 | if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte)) |
| 413 | continue; |
| 414 | |
| 415 | pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte); |
| 416 | gfn = gpte_to_gfn(gpte); |
| 417 | dirty = is_dirty_gpte(gpte); |
| 418 | pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn, |
| 419 | (pte_access & ACC_WRITE_MASK) && dirty); |
| 420 | if (is_error_pfn(pfn)) { |
| 421 | kvm_release_pfn_clean(pfn); |
| 422 | break; |
| 423 | } |
| 424 | |
| 425 | mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0, |
| 426 | dirty, NULL, PT_PAGE_TABLE_LEVEL, gfn, |
| 427 | pfn, true, true); |
| 428 | } |
| 429 | } |
| 430 | |
| 431 | /* |
| 432 | * Fetch a shadow pte for a specific level in the paging hierarchy. |
| 433 | */ |
| 434 | static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, |
| 435 | struct guest_walker *gw, |
| 436 | int user_fault, int write_fault, int hlevel, |
| 437 | int *ptwrite, pfn_t pfn, bool map_writable, |
| 438 | bool prefault) |
| 439 | { |
| 440 | unsigned access = gw->pt_access; |
| 441 | struct kvm_mmu_page *sp = NULL; |
| 442 | bool dirty = is_dirty_gpte(gw->ptes[gw->level - 1]); |
| 443 | int top_level; |
| 444 | unsigned direct_access; |
| 445 | struct kvm_shadow_walk_iterator it; |
| 446 | |
| 447 | if (!is_present_gpte(gw->ptes[gw->level - 1])) |
| 448 | return NULL; |
| 449 | |
| 450 | direct_access = gw->pt_access & gw->pte_access; |
| 451 | if (!dirty) |
| 452 | direct_access &= ~ACC_WRITE_MASK; |
| 453 | |
| 454 | top_level = vcpu->arch.mmu.root_level; |
| 455 | if (top_level == PT32E_ROOT_LEVEL) |
| 456 | top_level = PT32_ROOT_LEVEL; |
| 457 | /* |
| 458 | * Verify that the top-level gpte is still there. Since the page |
| 459 | * is a root page, it is either write protected (and cannot be |
| 460 | * changed from now on) or it is invalid (in which case, we don't |
| 461 | * really care if it changes underneath us after this point). |
| 462 | */ |
| 463 | if (FNAME(gpte_changed)(vcpu, gw, top_level)) |
| 464 | goto out_gpte_changed; |
| 465 | |
| 466 | for (shadow_walk_init(&it, vcpu, addr); |
| 467 | shadow_walk_okay(&it) && it.level > gw->level; |
| 468 | shadow_walk_next(&it)) { |
| 469 | gfn_t table_gfn; |
| 470 | |
| 471 | drop_large_spte(vcpu, it.sptep); |
| 472 | |
| 473 | sp = NULL; |
| 474 | if (!is_shadow_present_pte(*it.sptep)) { |
| 475 | table_gfn = gw->table_gfn[it.level - 2]; |
| 476 | sp = kvm_mmu_get_page(vcpu, table_gfn, addr, it.level-1, |
| 477 | false, access, it.sptep); |
| 478 | } |
| 479 | |
| 480 | /* |
| 481 | * Verify that the gpte in the page we've just write |
| 482 | * protected is still there. |
| 483 | */ |
| 484 | if (FNAME(gpte_changed)(vcpu, gw, it.level - 1)) |
| 485 | goto out_gpte_changed; |
| 486 | |
| 487 | if (sp) |
| 488 | link_shadow_page(it.sptep, sp); |
| 489 | } |
| 490 | |
| 491 | for (; |
| 492 | shadow_walk_okay(&it) && it.level > hlevel; |
| 493 | shadow_walk_next(&it)) { |
| 494 | gfn_t direct_gfn; |
| 495 | |
| 496 | validate_direct_spte(vcpu, it.sptep, direct_access); |
| 497 | |
| 498 | drop_large_spte(vcpu, it.sptep); |
| 499 | |
| 500 | if (is_shadow_present_pte(*it.sptep)) |
| 501 | continue; |
| 502 | |
| 503 | direct_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1); |
| 504 | |
| 505 | sp = kvm_mmu_get_page(vcpu, direct_gfn, addr, it.level-1, |
| 506 | true, direct_access, it.sptep); |
| 507 | link_shadow_page(it.sptep, sp); |
| 508 | } |
| 509 | |
| 510 | mmu_set_spte(vcpu, it.sptep, access, gw->pte_access & access, |
| 511 | user_fault, write_fault, dirty, ptwrite, it.level, |
| 512 | gw->gfn, pfn, prefault, map_writable); |
| 513 | FNAME(pte_prefetch)(vcpu, gw, it.sptep); |
| 514 | |
| 515 | return it.sptep; |
| 516 | |
| 517 | out_gpte_changed: |
| 518 | if (sp) |
| 519 | kvm_mmu_put_page(sp, it.sptep); |
| 520 | kvm_release_pfn_clean(pfn); |
| 521 | return NULL; |
| 522 | } |
| 523 | |
| 524 | /* |
| 525 | * Page fault handler. There are several causes for a page fault: |
| 526 | * - there is no shadow pte for the guest pte |
| 527 | * - write access through a shadow pte marked read only so that we can set |
| 528 | * the dirty bit |
| 529 | * - write access to a shadow pte marked read only so we can update the page |
| 530 | * dirty bitmap, when userspace requests it |
| 531 | * - mmio access; in this case we will never install a present shadow pte |
| 532 | * - normal guest page fault due to the guest pte marked not present, not |
| 533 | * writable, or not executable |
| 534 | * |
| 535 | * Returns: 1 if we need to emulate the instruction, 0 otherwise, or |
| 536 | * a negative value on error. |
| 537 | */ |
| 538 | static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, |
| 539 | bool prefault) |
| 540 | { |
| 541 | int write_fault = error_code & PFERR_WRITE_MASK; |
| 542 | int user_fault = error_code & PFERR_USER_MASK; |
| 543 | struct guest_walker walker; |
| 544 | u64 *sptep; |
| 545 | int write_pt = 0; |
| 546 | int r; |
| 547 | pfn_t pfn; |
| 548 | int level = PT_PAGE_TABLE_LEVEL; |
| 549 | int force_pt_level; |
| 550 | unsigned long mmu_seq; |
| 551 | bool map_writable; |
| 552 | |
| 553 | pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code); |
| 554 | |
| 555 | r = mmu_topup_memory_caches(vcpu); |
| 556 | if (r) |
| 557 | return r; |
| 558 | |
| 559 | /* |
| 560 | * Look up the guest pte for the faulting address. |
| 561 | */ |
| 562 | r = FNAME(walk_addr)(&walker, vcpu, addr, error_code); |
| 563 | |
| 564 | /* |
| 565 | * The page is not mapped by the guest. Let the guest handle it. |
| 566 | */ |
| 567 | if (!r) { |
| 568 | pgprintk("%s: guest page fault\n", __func__); |
| 569 | if (!prefault) { |
| 570 | inject_page_fault(vcpu, &walker.fault); |
| 571 | /* reset fork detector */ |
| 572 | vcpu->arch.last_pt_write_count = 0; |
| 573 | } |
| 574 | return 0; |
| 575 | } |
| 576 | |
| 577 | if (walker.level >= PT_DIRECTORY_LEVEL) |
| 578 | force_pt_level = mapping_level_dirty_bitmap(vcpu, walker.gfn); |
| 579 | else |
| 580 | force_pt_level = 1; |
| 581 | if (!force_pt_level) { |
| 582 | level = min(walker.level, mapping_level(vcpu, walker.gfn)); |
| 583 | walker.gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1); |
| 584 | } |
| 585 | |
| 586 | mmu_seq = vcpu->kvm->mmu_notifier_seq; |
| 587 | smp_rmb(); |
| 588 | |
| 589 | if (try_async_pf(vcpu, prefault, walker.gfn, addr, &pfn, write_fault, |
| 590 | &map_writable)) |
| 591 | return 0; |
| 592 | |
| 593 | /* mmio */ |
| 594 | if (is_error_pfn(pfn)) |
| 595 | return kvm_handle_bad_page(vcpu->kvm, walker.gfn, pfn); |
| 596 | |
| 597 | spin_lock(&vcpu->kvm->mmu_lock); |
| 598 | if (mmu_notifier_retry(vcpu, mmu_seq)) |
| 599 | goto out_unlock; |
| 600 | |
| 601 | trace_kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT); |
| 602 | kvm_mmu_free_some_pages(vcpu); |
| 603 | if (!force_pt_level) |
| 604 | transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level); |
| 605 | sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault, |
| 606 | level, &write_pt, pfn, map_writable, prefault); |
| 607 | (void)sptep; |
| 608 | pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__, |
| 609 | sptep, *sptep, write_pt); |
| 610 | |
| 611 | if (!write_pt) |
| 612 | vcpu->arch.last_pt_write_count = 0; /* reset fork detector */ |
| 613 | |
| 614 | ++vcpu->stat.pf_fixed; |
| 615 | trace_kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT); |
| 616 | spin_unlock(&vcpu->kvm->mmu_lock); |
| 617 | |
| 618 | return write_pt; |
| 619 | |
| 620 | out_unlock: |
| 621 | spin_unlock(&vcpu->kvm->mmu_lock); |
| 622 | kvm_release_pfn_clean(pfn); |
| 623 | return 0; |
| 624 | } |
| 625 | |
| 626 | static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) |
| 627 | { |
| 628 | struct kvm_shadow_walk_iterator iterator; |
| 629 | struct kvm_mmu_page *sp; |
| 630 | gpa_t pte_gpa = -1; |
| 631 | int level; |
| 632 | u64 *sptep; |
| 633 | int need_flush = 0; |
| 634 | |
| 635 | spin_lock(&vcpu->kvm->mmu_lock); |
| 636 | |
| 637 | for_each_shadow_entry(vcpu, gva, iterator) { |
| 638 | level = iterator.level; |
| 639 | sptep = iterator.sptep; |
| 640 | |
| 641 | sp = page_header(__pa(sptep)); |
| 642 | if (is_last_spte(*sptep, level)) { |
| 643 | int offset, shift; |
| 644 | |
| 645 | if (!sp->unsync) |
| 646 | break; |
| 647 | |
| 648 | shift = PAGE_SHIFT - |
| 649 | (PT_LEVEL_BITS - PT64_LEVEL_BITS) * level; |
| 650 | offset = sp->role.quadrant << shift; |
| 651 | |
| 652 | pte_gpa = (sp->gfn << PAGE_SHIFT) + offset; |
| 653 | pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t); |
| 654 | |
| 655 | if (is_shadow_present_pte(*sptep)) { |
| 656 | if (is_large_pte(*sptep)) |
| 657 | --vcpu->kvm->stat.lpages; |
| 658 | drop_spte(vcpu->kvm, sptep, |
| 659 | shadow_trap_nonpresent_pte); |
| 660 | need_flush = 1; |
| 661 | } else |
| 662 | __set_spte(sptep, shadow_trap_nonpresent_pte); |
| 663 | break; |
| 664 | } |
| 665 | |
| 666 | if (!is_shadow_present_pte(*sptep) || !sp->unsync_children) |
| 667 | break; |
| 668 | } |
| 669 | |
| 670 | if (need_flush) |
| 671 | kvm_flush_remote_tlbs(vcpu->kvm); |
| 672 | |
| 673 | atomic_inc(&vcpu->kvm->arch.invlpg_counter); |
| 674 | |
| 675 | spin_unlock(&vcpu->kvm->mmu_lock); |
| 676 | |
| 677 | if (pte_gpa == -1) |
| 678 | return; |
| 679 | |
| 680 | if (mmu_topup_memory_caches(vcpu)) |
| 681 | return; |
| 682 | kvm_mmu_pte_write(vcpu, pte_gpa, NULL, sizeof(pt_element_t), 0); |
| 683 | } |
| 684 | |
| 685 | static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access, |
| 686 | struct x86_exception *exception) |
| 687 | { |
| 688 | struct guest_walker walker; |
| 689 | gpa_t gpa = UNMAPPED_GVA; |
| 690 | int r; |
| 691 | |
| 692 | r = FNAME(walk_addr)(&walker, vcpu, vaddr, access); |
| 693 | |
| 694 | if (r) { |
| 695 | gpa = gfn_to_gpa(walker.gfn); |
| 696 | gpa |= vaddr & ~PAGE_MASK; |
| 697 | } else if (exception) |
| 698 | *exception = walker.fault; |
| 699 | |
| 700 | return gpa; |
| 701 | } |
| 702 | |
| 703 | static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr, |
| 704 | u32 access, |
| 705 | struct x86_exception *exception) |
| 706 | { |
| 707 | struct guest_walker walker; |
| 708 | gpa_t gpa = UNMAPPED_GVA; |
| 709 | int r; |
| 710 | |
| 711 | r = FNAME(walk_addr_nested)(&walker, vcpu, vaddr, access); |
| 712 | |
| 713 | if (r) { |
| 714 | gpa = gfn_to_gpa(walker.gfn); |
| 715 | gpa |= vaddr & ~PAGE_MASK; |
| 716 | } else if (exception) |
| 717 | *exception = walker.fault; |
| 718 | |
| 719 | return gpa; |
| 720 | } |
| 721 | |
| 722 | static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu, |
| 723 | struct kvm_mmu_page *sp) |
| 724 | { |
| 725 | int i, j, offset, r; |
| 726 | pt_element_t pt[256 / sizeof(pt_element_t)]; |
| 727 | gpa_t pte_gpa; |
| 728 | |
| 729 | if (sp->role.direct |
| 730 | || (PTTYPE == 32 && sp->role.level > PT_PAGE_TABLE_LEVEL)) { |
| 731 | nonpaging_prefetch_page(vcpu, sp); |
| 732 | return; |
| 733 | } |
| 734 | |
| 735 | pte_gpa = gfn_to_gpa(sp->gfn); |
| 736 | if (PTTYPE == 32) { |
| 737 | offset = sp->role.quadrant << PT64_LEVEL_BITS; |
| 738 | pte_gpa += offset * sizeof(pt_element_t); |
| 739 | } |
| 740 | |
| 741 | for (i = 0; i < PT64_ENT_PER_PAGE; i += ARRAY_SIZE(pt)) { |
| 742 | r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa, pt, sizeof pt); |
| 743 | pte_gpa += ARRAY_SIZE(pt) * sizeof(pt_element_t); |
| 744 | for (j = 0; j < ARRAY_SIZE(pt); ++j) |
| 745 | if (r || is_present_gpte(pt[j])) |
| 746 | sp->spt[i+j] = shadow_trap_nonpresent_pte; |
| 747 | else |
| 748 | sp->spt[i+j] = shadow_notrap_nonpresent_pte; |
| 749 | } |
| 750 | } |
| 751 | |
| 752 | /* |
| 753 | * Using the cached information from sp->gfns is safe because: |
| 754 | * - The spte has a reference to the struct page, so the pfn for a given gfn |
| 755 | * can't change unless all sptes pointing to it are nuked first. |
| 756 | * |
| 757 | * Note: |
| 758 | * We should flush all tlbs if spte is dropped even though guest is |
| 759 | * responsible for it. Since if we don't, kvm_mmu_notifier_invalidate_page |
| 760 | * and kvm_mmu_notifier_invalidate_range_start detect the mapping page isn't |
| 761 | * used by guest then tlbs are not flushed, so guest is allowed to access the |
| 762 | * freed pages. |
| 763 | * And we increase kvm->tlbs_dirty to delay tlbs flush in this case. |
| 764 | */ |
| 765 | static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) |
| 766 | { |
| 767 | int i, offset, nr_present; |
| 768 | bool host_writable; |
| 769 | gpa_t first_pte_gpa; |
| 770 | |
| 771 | offset = nr_present = 0; |
| 772 | |
| 773 | /* direct kvm_mmu_page can not be unsync. */ |
| 774 | BUG_ON(sp->role.direct); |
| 775 | |
| 776 | if (PTTYPE == 32) |
| 777 | offset = sp->role.quadrant << PT64_LEVEL_BITS; |
| 778 | |
| 779 | first_pte_gpa = gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t); |
| 780 | |
| 781 | for (i = 0; i < PT64_ENT_PER_PAGE; i++) { |
| 782 | unsigned pte_access; |
| 783 | pt_element_t gpte; |
| 784 | gpa_t pte_gpa; |
| 785 | gfn_t gfn; |
| 786 | |
| 787 | if (!is_shadow_present_pte(sp->spt[i])) |
| 788 | continue; |
| 789 | |
| 790 | pte_gpa = first_pte_gpa + i * sizeof(pt_element_t); |
| 791 | |
| 792 | if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte, |
| 793 | sizeof(pt_element_t))) |
| 794 | return -EINVAL; |
| 795 | |
| 796 | gfn = gpte_to_gfn(gpte); |
| 797 | |
| 798 | if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) { |
| 799 | vcpu->kvm->tlbs_dirty++; |
| 800 | continue; |
| 801 | } |
| 802 | |
| 803 | if (gfn != sp->gfns[i]) { |
| 804 | drop_spte(vcpu->kvm, &sp->spt[i], |
| 805 | shadow_trap_nonpresent_pte); |
| 806 | vcpu->kvm->tlbs_dirty++; |
| 807 | continue; |
| 808 | } |
| 809 | |
| 810 | nr_present++; |
| 811 | pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte); |
| 812 | host_writable = sp->spt[i] & SPTE_HOST_WRITEABLE; |
| 813 | |
| 814 | set_spte(vcpu, &sp->spt[i], pte_access, 0, 0, |
| 815 | is_dirty_gpte(gpte), PT_PAGE_TABLE_LEVEL, gfn, |
| 816 | spte_to_pfn(sp->spt[i]), true, false, |
| 817 | host_writable); |
| 818 | } |
| 819 | |
| 820 | return !nr_present; |
| 821 | } |
| 822 | |
| 823 | #undef pt_element_t |
| 824 | #undef guest_walker |
| 825 | #undef FNAME |
| 826 | #undef PT_BASE_ADDR_MASK |
| 827 | #undef PT_INDEX |
| 828 | #undef PT_LVL_ADDR_MASK |
| 829 | #undef PT_LVL_OFFSET_MASK |
| 830 | #undef PT_LEVEL_BITS |
| 831 | #undef PT_MAX_FULL_LEVELS |
| 832 | #undef gpte_to_gfn |
| 833 | #undef gpte_to_gfn_lvl |
| 834 | #undef CMPXCHG |