| 1 | /* |
| 2 | * Kernel-based Virtual Machine driver for Linux |
| 3 | * |
| 4 | * This module enables machines with Intel VT-x extensions to run virtual |
| 5 | * machines without emulation or binary translation. |
| 6 | * |
| 7 | * MMU support |
| 8 | * |
| 9 | * Copyright (C) 2006 Qumranet, Inc. |
| 10 | * Copyright 2010 Red Hat, Inc. and/or its affiliates. |
| 11 | * |
| 12 | * Authors: |
| 13 | * Yaniv Kamay <yaniv@qumranet.com> |
| 14 | * Avi Kivity <avi@qumranet.com> |
| 15 | * |
| 16 | * This work is licensed under the terms of the GNU GPL, version 2. See |
| 17 | * the COPYING file in the top-level directory. |
| 18 | * |
| 19 | */ |
| 20 | |
| 21 | #include "irq.h" |
| 22 | #include "mmu.h" |
| 23 | #include "x86.h" |
| 24 | #include "kvm_cache_regs.h" |
| 25 | |
| 26 | #include <linux/kvm_host.h> |
| 27 | #include <linux/types.h> |
| 28 | #include <linux/string.h> |
| 29 | #include <linux/mm.h> |
| 30 | #include <linux/highmem.h> |
| 31 | #include <linux/module.h> |
| 32 | #include <linux/swap.h> |
| 33 | #include <linux/hugetlb.h> |
| 34 | #include <linux/compiler.h> |
| 35 | #include <linux/srcu.h> |
| 36 | #include <linux/slab.h> |
| 37 | #include <linux/uaccess.h> |
| 38 | |
| 39 | #include <asm/page.h> |
| 40 | #include <asm/cmpxchg.h> |
| 41 | #include <asm/io.h> |
| 42 | #include <asm/vmx.h> |
| 43 | |
| 44 | /* |
| 45 | * When setting this variable to true it enables Two-Dimensional-Paging |
| 46 | * where the hardware walks 2 page tables: |
| 47 | * 1. the guest-virtual to guest-physical |
| 48 | * 2. while doing 1. it walks guest-physical to host-physical |
| 49 | * If the hardware supports that we don't need to do shadow paging. |
| 50 | */ |
| 51 | bool tdp_enabled = false; |
| 52 | |
| 53 | enum { |
| 54 | AUDIT_PRE_PAGE_FAULT, |
| 55 | AUDIT_POST_PAGE_FAULT, |
| 56 | AUDIT_PRE_PTE_WRITE, |
| 57 | AUDIT_POST_PTE_WRITE, |
| 58 | AUDIT_PRE_SYNC, |
| 59 | AUDIT_POST_SYNC |
| 60 | }; |
| 61 | |
| 62 | #undef MMU_DEBUG |
| 63 | |
| 64 | #ifdef MMU_DEBUG |
| 65 | |
| 66 | #define pgprintk(x...) do { if (dbg) printk(x); } while (0) |
| 67 | #define rmap_printk(x...) do { if (dbg) printk(x); } while (0) |
| 68 | |
| 69 | #else |
| 70 | |
| 71 | #define pgprintk(x...) do { } while (0) |
| 72 | #define rmap_printk(x...) do { } while (0) |
| 73 | |
| 74 | #endif |
| 75 | |
| 76 | #ifdef MMU_DEBUG |
| 77 | static bool dbg = 0; |
| 78 | module_param(dbg, bool, 0644); |
| 79 | #endif |
| 80 | |
| 81 | #ifndef MMU_DEBUG |
| 82 | #define ASSERT(x) do { } while (0) |
| 83 | #else |
| 84 | #define ASSERT(x) \ |
| 85 | if (!(x)) { \ |
| 86 | printk(KERN_WARNING "assertion failed %s:%d: %s\n", \ |
| 87 | __FILE__, __LINE__, #x); \ |
| 88 | } |
| 89 | #endif |
| 90 | |
| 91 | #define PTE_PREFETCH_NUM 8 |
| 92 | |
| 93 | #define PT_FIRST_AVAIL_BITS_SHIFT 9 |
| 94 | #define PT64_SECOND_AVAIL_BITS_SHIFT 52 |
| 95 | |
| 96 | #define PT64_LEVEL_BITS 9 |
| 97 | |
| 98 | #define PT64_LEVEL_SHIFT(level) \ |
| 99 | (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS) |
| 100 | |
| 101 | #define PT64_INDEX(address, level)\ |
| 102 | (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1)) |
| 103 | |
| 104 | |
| 105 | #define PT32_LEVEL_BITS 10 |
| 106 | |
| 107 | #define PT32_LEVEL_SHIFT(level) \ |
| 108 | (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS) |
| 109 | |
| 110 | #define PT32_LVL_OFFSET_MASK(level) \ |
| 111 | (PT32_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \ |
| 112 | * PT32_LEVEL_BITS))) - 1)) |
| 113 | |
| 114 | #define PT32_INDEX(address, level)\ |
| 115 | (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1)) |
| 116 | |
| 117 | |
| 118 | #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1)) |
| 119 | #define PT64_DIR_BASE_ADDR_MASK \ |
| 120 | (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1)) |
| 121 | #define PT64_LVL_ADDR_MASK(level) \ |
| 122 | (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \ |
| 123 | * PT64_LEVEL_BITS))) - 1)) |
| 124 | #define PT64_LVL_OFFSET_MASK(level) \ |
| 125 | (PT64_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \ |
| 126 | * PT64_LEVEL_BITS))) - 1)) |
| 127 | |
| 128 | #define PT32_BASE_ADDR_MASK PAGE_MASK |
| 129 | #define PT32_DIR_BASE_ADDR_MASK \ |
| 130 | (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1)) |
| 131 | #define PT32_LVL_ADDR_MASK(level) \ |
| 132 | (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \ |
| 133 | * PT32_LEVEL_BITS))) - 1)) |
| 134 | |
| 135 | #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \ |
| 136 | | PT64_NX_MASK) |
| 137 | |
| 138 | #define PTE_LIST_EXT 4 |
| 139 | |
| 140 | #define ACC_EXEC_MASK 1 |
| 141 | #define ACC_WRITE_MASK PT_WRITABLE_MASK |
| 142 | #define ACC_USER_MASK PT_USER_MASK |
| 143 | #define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK) |
| 144 | |
| 145 | #include <trace/events/kvm.h> |
| 146 | |
| 147 | #define CREATE_TRACE_POINTS |
| 148 | #include "mmutrace.h" |
| 149 | |
| 150 | #define SPTE_HOST_WRITEABLE (1ULL << PT_FIRST_AVAIL_BITS_SHIFT) |
| 151 | |
| 152 | #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level) |
| 153 | |
| 154 | struct pte_list_desc { |
| 155 | u64 *sptes[PTE_LIST_EXT]; |
| 156 | struct pte_list_desc *more; |
| 157 | }; |
| 158 | |
| 159 | struct kvm_shadow_walk_iterator { |
| 160 | u64 addr; |
| 161 | hpa_t shadow_addr; |
| 162 | u64 *sptep; |
| 163 | int level; |
| 164 | unsigned index; |
| 165 | }; |
| 166 | |
| 167 | #define for_each_shadow_entry(_vcpu, _addr, _walker) \ |
| 168 | for (shadow_walk_init(&(_walker), _vcpu, _addr); \ |
| 169 | shadow_walk_okay(&(_walker)); \ |
| 170 | shadow_walk_next(&(_walker))) |
| 171 | |
| 172 | #define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte) \ |
| 173 | for (shadow_walk_init(&(_walker), _vcpu, _addr); \ |
| 174 | shadow_walk_okay(&(_walker)) && \ |
| 175 | ({ spte = mmu_spte_get_lockless(_walker.sptep); 1; }); \ |
| 176 | __shadow_walk_next(&(_walker), spte)) |
| 177 | |
| 178 | static struct kmem_cache *pte_list_desc_cache; |
| 179 | static struct kmem_cache *mmu_page_header_cache; |
| 180 | static struct percpu_counter kvm_total_used_mmu_pages; |
| 181 | |
| 182 | static u64 __read_mostly shadow_nx_mask; |
| 183 | static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */ |
| 184 | static u64 __read_mostly shadow_user_mask; |
| 185 | static u64 __read_mostly shadow_accessed_mask; |
| 186 | static u64 __read_mostly shadow_dirty_mask; |
| 187 | static u64 __read_mostly shadow_mmio_mask; |
| 188 | |
| 189 | static void mmu_spte_set(u64 *sptep, u64 spte); |
| 190 | |
| 191 | void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask) |
| 192 | { |
| 193 | shadow_mmio_mask = mmio_mask; |
| 194 | } |
| 195 | EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask); |
| 196 | |
| 197 | static void mark_mmio_spte(u64 *sptep, u64 gfn, unsigned access) |
| 198 | { |
| 199 | access &= ACC_WRITE_MASK | ACC_USER_MASK; |
| 200 | |
| 201 | trace_mark_mmio_spte(sptep, gfn, access); |
| 202 | mmu_spte_set(sptep, shadow_mmio_mask | access | gfn << PAGE_SHIFT); |
| 203 | } |
| 204 | |
| 205 | static bool is_mmio_spte(u64 spte) |
| 206 | { |
| 207 | return (spte & shadow_mmio_mask) == shadow_mmio_mask; |
| 208 | } |
| 209 | |
| 210 | static gfn_t get_mmio_spte_gfn(u64 spte) |
| 211 | { |
| 212 | return (spte & ~shadow_mmio_mask) >> PAGE_SHIFT; |
| 213 | } |
| 214 | |
| 215 | static unsigned get_mmio_spte_access(u64 spte) |
| 216 | { |
| 217 | return (spte & ~shadow_mmio_mask) & ~PAGE_MASK; |
| 218 | } |
| 219 | |
| 220 | static bool set_mmio_spte(u64 *sptep, gfn_t gfn, pfn_t pfn, unsigned access) |
| 221 | { |
| 222 | if (unlikely(is_noslot_pfn(pfn))) { |
| 223 | mark_mmio_spte(sptep, gfn, access); |
| 224 | return true; |
| 225 | } |
| 226 | |
| 227 | return false; |
| 228 | } |
| 229 | |
| 230 | static inline u64 rsvd_bits(int s, int e) |
| 231 | { |
| 232 | return ((1ULL << (e - s + 1)) - 1) << s; |
| 233 | } |
| 234 | |
| 235 | void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, |
| 236 | u64 dirty_mask, u64 nx_mask, u64 x_mask) |
| 237 | { |
| 238 | shadow_user_mask = user_mask; |
| 239 | shadow_accessed_mask = accessed_mask; |
| 240 | shadow_dirty_mask = dirty_mask; |
| 241 | shadow_nx_mask = nx_mask; |
| 242 | shadow_x_mask = x_mask; |
| 243 | } |
| 244 | EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes); |
| 245 | |
| 246 | static int is_cpuid_PSE36(void) |
| 247 | { |
| 248 | return 1; |
| 249 | } |
| 250 | |
| 251 | static int is_nx(struct kvm_vcpu *vcpu) |
| 252 | { |
| 253 | return vcpu->arch.efer & EFER_NX; |
| 254 | } |
| 255 | |
| 256 | static int is_shadow_present_pte(u64 pte) |
| 257 | { |
| 258 | return pte & PT_PRESENT_MASK && !is_mmio_spte(pte); |
| 259 | } |
| 260 | |
| 261 | static int is_large_pte(u64 pte) |
| 262 | { |
| 263 | return pte & PT_PAGE_SIZE_MASK; |
| 264 | } |
| 265 | |
| 266 | static int is_dirty_gpte(unsigned long pte) |
| 267 | { |
| 268 | return pte & PT_DIRTY_MASK; |
| 269 | } |
| 270 | |
| 271 | static int is_rmap_spte(u64 pte) |
| 272 | { |
| 273 | return is_shadow_present_pte(pte); |
| 274 | } |
| 275 | |
| 276 | static int is_last_spte(u64 pte, int level) |
| 277 | { |
| 278 | if (level == PT_PAGE_TABLE_LEVEL) |
| 279 | return 1; |
| 280 | if (is_large_pte(pte)) |
| 281 | return 1; |
| 282 | return 0; |
| 283 | } |
| 284 | |
| 285 | static pfn_t spte_to_pfn(u64 pte) |
| 286 | { |
| 287 | return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT; |
| 288 | } |
| 289 | |
| 290 | static gfn_t pse36_gfn_delta(u32 gpte) |
| 291 | { |
| 292 | int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT; |
| 293 | |
| 294 | return (gpte & PT32_DIR_PSE36_MASK) << shift; |
| 295 | } |
| 296 | |
| 297 | #ifdef CONFIG_X86_64 |
| 298 | static void __set_spte(u64 *sptep, u64 spte) |
| 299 | { |
| 300 | *sptep = spte; |
| 301 | } |
| 302 | |
| 303 | static void __update_clear_spte_fast(u64 *sptep, u64 spte) |
| 304 | { |
| 305 | *sptep = spte; |
| 306 | } |
| 307 | |
| 308 | static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) |
| 309 | { |
| 310 | return xchg(sptep, spte); |
| 311 | } |
| 312 | |
| 313 | static u64 __get_spte_lockless(u64 *sptep) |
| 314 | { |
| 315 | return ACCESS_ONCE(*sptep); |
| 316 | } |
| 317 | |
| 318 | static bool __check_direct_spte_mmio_pf(u64 spte) |
| 319 | { |
| 320 | /* It is valid if the spte is zapped. */ |
| 321 | return spte == 0ull; |
| 322 | } |
| 323 | #else |
| 324 | union split_spte { |
| 325 | struct { |
| 326 | u32 spte_low; |
| 327 | u32 spte_high; |
| 328 | }; |
| 329 | u64 spte; |
| 330 | }; |
| 331 | |
| 332 | static void count_spte_clear(u64 *sptep, u64 spte) |
| 333 | { |
| 334 | struct kvm_mmu_page *sp = page_header(__pa(sptep)); |
| 335 | |
| 336 | if (is_shadow_present_pte(spte)) |
| 337 | return; |
| 338 | |
| 339 | /* Ensure the spte is completely set before we increase the count */ |
| 340 | smp_wmb(); |
| 341 | sp->clear_spte_count++; |
| 342 | } |
| 343 | |
| 344 | static void __set_spte(u64 *sptep, u64 spte) |
| 345 | { |
| 346 | union split_spte *ssptep, sspte; |
| 347 | |
| 348 | ssptep = (union split_spte *)sptep; |
| 349 | sspte = (union split_spte)spte; |
| 350 | |
| 351 | ssptep->spte_high = sspte.spte_high; |
| 352 | |
| 353 | /* |
| 354 | * If we map the spte from nonpresent to present, We should store |
| 355 | * the high bits firstly, then set present bit, so cpu can not |
| 356 | * fetch this spte while we are setting the spte. |
| 357 | */ |
| 358 | smp_wmb(); |
| 359 | |
| 360 | ssptep->spte_low = sspte.spte_low; |
| 361 | } |
| 362 | |
| 363 | static void __update_clear_spte_fast(u64 *sptep, u64 spte) |
| 364 | { |
| 365 | union split_spte *ssptep, sspte; |
| 366 | |
| 367 | ssptep = (union split_spte *)sptep; |
| 368 | sspte = (union split_spte)spte; |
| 369 | |
| 370 | ssptep->spte_low = sspte.spte_low; |
| 371 | |
| 372 | /* |
| 373 | * If we map the spte from present to nonpresent, we should clear |
| 374 | * present bit firstly to avoid vcpu fetch the old high bits. |
| 375 | */ |
| 376 | smp_wmb(); |
| 377 | |
| 378 | ssptep->spte_high = sspte.spte_high; |
| 379 | count_spte_clear(sptep, spte); |
| 380 | } |
| 381 | |
| 382 | static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) |
| 383 | { |
| 384 | union split_spte *ssptep, sspte, orig; |
| 385 | |
| 386 | ssptep = (union split_spte *)sptep; |
| 387 | sspte = (union split_spte)spte; |
| 388 | |
| 389 | /* xchg acts as a barrier before the setting of the high bits */ |
| 390 | orig.spte_low = xchg(&ssptep->spte_low, sspte.spte_low); |
| 391 | orig.spte_high = ssptep->spte_high; |
| 392 | ssptep->spte_high = sspte.spte_high; |
| 393 | count_spte_clear(sptep, spte); |
| 394 | |
| 395 | return orig.spte; |
| 396 | } |
| 397 | |
| 398 | /* |
| 399 | * The idea using the light way get the spte on x86_32 guest is from |
| 400 | * gup_get_pte(arch/x86/mm/gup.c). |
| 401 | * The difference is we can not catch the spte tlb flush if we leave |
| 402 | * guest mode, so we emulate it by increase clear_spte_count when spte |
| 403 | * is cleared. |
| 404 | */ |
| 405 | static u64 __get_spte_lockless(u64 *sptep) |
| 406 | { |
| 407 | struct kvm_mmu_page *sp = page_header(__pa(sptep)); |
| 408 | union split_spte spte, *orig = (union split_spte *)sptep; |
| 409 | int count; |
| 410 | |
| 411 | retry: |
| 412 | count = sp->clear_spte_count; |
| 413 | smp_rmb(); |
| 414 | |
| 415 | spte.spte_low = orig->spte_low; |
| 416 | smp_rmb(); |
| 417 | |
| 418 | spte.spte_high = orig->spte_high; |
| 419 | smp_rmb(); |
| 420 | |
| 421 | if (unlikely(spte.spte_low != orig->spte_low || |
| 422 | count != sp->clear_spte_count)) |
| 423 | goto retry; |
| 424 | |
| 425 | return spte.spte; |
| 426 | } |
| 427 | |
| 428 | static bool __check_direct_spte_mmio_pf(u64 spte) |
| 429 | { |
| 430 | union split_spte sspte = (union split_spte)spte; |
| 431 | u32 high_mmio_mask = shadow_mmio_mask >> 32; |
| 432 | |
| 433 | /* It is valid if the spte is zapped. */ |
| 434 | if (spte == 0ull) |
| 435 | return true; |
| 436 | |
| 437 | /* It is valid if the spte is being zapped. */ |
| 438 | if (sspte.spte_low == 0ull && |
| 439 | (sspte.spte_high & high_mmio_mask) == high_mmio_mask) |
| 440 | return true; |
| 441 | |
| 442 | return false; |
| 443 | } |
| 444 | #endif |
| 445 | |
| 446 | static bool spte_has_volatile_bits(u64 spte) |
| 447 | { |
| 448 | if (!shadow_accessed_mask) |
| 449 | return false; |
| 450 | |
| 451 | if (!is_shadow_present_pte(spte)) |
| 452 | return false; |
| 453 | |
| 454 | if ((spte & shadow_accessed_mask) && |
| 455 | (!is_writable_pte(spte) || (spte & shadow_dirty_mask))) |
| 456 | return false; |
| 457 | |
| 458 | return true; |
| 459 | } |
| 460 | |
| 461 | static bool spte_is_bit_cleared(u64 old_spte, u64 new_spte, u64 bit_mask) |
| 462 | { |
| 463 | return (old_spte & bit_mask) && !(new_spte & bit_mask); |
| 464 | } |
| 465 | |
| 466 | /* Rules for using mmu_spte_set: |
| 467 | * Set the sptep from nonpresent to present. |
| 468 | * Note: the sptep being assigned *must* be either not present |
| 469 | * or in a state where the hardware will not attempt to update |
| 470 | * the spte. |
| 471 | */ |
| 472 | static void mmu_spte_set(u64 *sptep, u64 new_spte) |
| 473 | { |
| 474 | WARN_ON(is_shadow_present_pte(*sptep)); |
| 475 | __set_spte(sptep, new_spte); |
| 476 | } |
| 477 | |
| 478 | /* Rules for using mmu_spte_update: |
| 479 | * Update the state bits, it means the mapped pfn is not changged. |
| 480 | */ |
| 481 | static void mmu_spte_update(u64 *sptep, u64 new_spte) |
| 482 | { |
| 483 | u64 mask, old_spte = *sptep; |
| 484 | |
| 485 | WARN_ON(!is_rmap_spte(new_spte)); |
| 486 | |
| 487 | if (!is_shadow_present_pte(old_spte)) |
| 488 | return mmu_spte_set(sptep, new_spte); |
| 489 | |
| 490 | new_spte |= old_spte & shadow_dirty_mask; |
| 491 | |
| 492 | mask = shadow_accessed_mask; |
| 493 | if (is_writable_pte(old_spte)) |
| 494 | mask |= shadow_dirty_mask; |
| 495 | |
| 496 | if (!spte_has_volatile_bits(old_spte) || (new_spte & mask) == mask) |
| 497 | __update_clear_spte_fast(sptep, new_spte); |
| 498 | else |
| 499 | old_spte = __update_clear_spte_slow(sptep, new_spte); |
| 500 | |
| 501 | if (!shadow_accessed_mask) |
| 502 | return; |
| 503 | |
| 504 | if (spte_is_bit_cleared(old_spte, new_spte, shadow_accessed_mask)) |
| 505 | kvm_set_pfn_accessed(spte_to_pfn(old_spte)); |
| 506 | if (spte_is_bit_cleared(old_spte, new_spte, shadow_dirty_mask)) |
| 507 | kvm_set_pfn_dirty(spte_to_pfn(old_spte)); |
| 508 | } |
| 509 | |
| 510 | /* |
| 511 | * Rules for using mmu_spte_clear_track_bits: |
| 512 | * It sets the sptep from present to nonpresent, and track the |
| 513 | * state bits, it is used to clear the last level sptep. |
| 514 | */ |
| 515 | static int mmu_spte_clear_track_bits(u64 *sptep) |
| 516 | { |
| 517 | pfn_t pfn; |
| 518 | u64 old_spte = *sptep; |
| 519 | |
| 520 | if (!spte_has_volatile_bits(old_spte)) |
| 521 | __update_clear_spte_fast(sptep, 0ull); |
| 522 | else |
| 523 | old_spte = __update_clear_spte_slow(sptep, 0ull); |
| 524 | |
| 525 | if (!is_rmap_spte(old_spte)) |
| 526 | return 0; |
| 527 | |
| 528 | pfn = spte_to_pfn(old_spte); |
| 529 | if (!shadow_accessed_mask || old_spte & shadow_accessed_mask) |
| 530 | kvm_set_pfn_accessed(pfn); |
| 531 | if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask)) |
| 532 | kvm_set_pfn_dirty(pfn); |
| 533 | return 1; |
| 534 | } |
| 535 | |
| 536 | /* |
| 537 | * Rules for using mmu_spte_clear_no_track: |
| 538 | * Directly clear spte without caring the state bits of sptep, |
| 539 | * it is used to set the upper level spte. |
| 540 | */ |
| 541 | static void mmu_spte_clear_no_track(u64 *sptep) |
| 542 | { |
| 543 | __update_clear_spte_fast(sptep, 0ull); |
| 544 | } |
| 545 | |
| 546 | static u64 mmu_spte_get_lockless(u64 *sptep) |
| 547 | { |
| 548 | return __get_spte_lockless(sptep); |
| 549 | } |
| 550 | |
| 551 | static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu) |
| 552 | { |
| 553 | rcu_read_lock(); |
| 554 | atomic_inc(&vcpu->kvm->arch.reader_counter); |
| 555 | |
| 556 | /* Increase the counter before walking shadow page table */ |
| 557 | smp_mb__after_atomic_inc(); |
| 558 | } |
| 559 | |
| 560 | static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu) |
| 561 | { |
| 562 | /* Decrease the counter after walking shadow page table finished */ |
| 563 | smp_mb__before_atomic_dec(); |
| 564 | atomic_dec(&vcpu->kvm->arch.reader_counter); |
| 565 | rcu_read_unlock(); |
| 566 | } |
| 567 | |
| 568 | static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, |
| 569 | struct kmem_cache *base_cache, int min) |
| 570 | { |
| 571 | void *obj; |
| 572 | |
| 573 | if (cache->nobjs >= min) |
| 574 | return 0; |
| 575 | while (cache->nobjs < ARRAY_SIZE(cache->objects)) { |
| 576 | obj = kmem_cache_zalloc(base_cache, GFP_KERNEL); |
| 577 | if (!obj) |
| 578 | return -ENOMEM; |
| 579 | cache->objects[cache->nobjs++] = obj; |
| 580 | } |
| 581 | return 0; |
| 582 | } |
| 583 | |
| 584 | static int mmu_memory_cache_free_objects(struct kvm_mmu_memory_cache *cache) |
| 585 | { |
| 586 | return cache->nobjs; |
| 587 | } |
| 588 | |
| 589 | static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc, |
| 590 | struct kmem_cache *cache) |
| 591 | { |
| 592 | while (mc->nobjs) |
| 593 | kmem_cache_free(cache, mc->objects[--mc->nobjs]); |
| 594 | } |
| 595 | |
| 596 | static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache, |
| 597 | int min) |
| 598 | { |
| 599 | void *page; |
| 600 | |
| 601 | if (cache->nobjs >= min) |
| 602 | return 0; |
| 603 | while (cache->nobjs < ARRAY_SIZE(cache->objects)) { |
| 604 | page = (void *)__get_free_page(GFP_KERNEL); |
| 605 | if (!page) |
| 606 | return -ENOMEM; |
| 607 | cache->objects[cache->nobjs++] = page; |
| 608 | } |
| 609 | return 0; |
| 610 | } |
| 611 | |
| 612 | static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc) |
| 613 | { |
| 614 | while (mc->nobjs) |
| 615 | free_page((unsigned long)mc->objects[--mc->nobjs]); |
| 616 | } |
| 617 | |
| 618 | static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu) |
| 619 | { |
| 620 | int r; |
| 621 | |
| 622 | r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache, |
| 623 | pte_list_desc_cache, 8 + PTE_PREFETCH_NUM); |
| 624 | if (r) |
| 625 | goto out; |
| 626 | r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8); |
| 627 | if (r) |
| 628 | goto out; |
| 629 | r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache, |
| 630 | mmu_page_header_cache, 4); |
| 631 | out: |
| 632 | return r; |
| 633 | } |
| 634 | |
| 635 | static void mmu_free_memory_caches(struct kvm_vcpu *vcpu) |
| 636 | { |
| 637 | mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache, |
| 638 | pte_list_desc_cache); |
| 639 | mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache); |
| 640 | mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache, |
| 641 | mmu_page_header_cache); |
| 642 | } |
| 643 | |
| 644 | static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc, |
| 645 | size_t size) |
| 646 | { |
| 647 | void *p; |
| 648 | |
| 649 | BUG_ON(!mc->nobjs); |
| 650 | p = mc->objects[--mc->nobjs]; |
| 651 | return p; |
| 652 | } |
| 653 | |
| 654 | static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_vcpu *vcpu) |
| 655 | { |
| 656 | return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache, |
| 657 | sizeof(struct pte_list_desc)); |
| 658 | } |
| 659 | |
| 660 | static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc) |
| 661 | { |
| 662 | kmem_cache_free(pte_list_desc_cache, pte_list_desc); |
| 663 | } |
| 664 | |
| 665 | static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index) |
| 666 | { |
| 667 | if (!sp->role.direct) |
| 668 | return sp->gfns[index]; |
| 669 | |
| 670 | return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS)); |
| 671 | } |
| 672 | |
| 673 | static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn) |
| 674 | { |
| 675 | if (sp->role.direct) |
| 676 | BUG_ON(gfn != kvm_mmu_page_get_gfn(sp, index)); |
| 677 | else |
| 678 | sp->gfns[index] = gfn; |
| 679 | } |
| 680 | |
| 681 | /* |
| 682 | * Return the pointer to the large page information for a given gfn, |
| 683 | * handling slots that are not large page aligned. |
| 684 | */ |
| 685 | static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn, |
| 686 | struct kvm_memory_slot *slot, |
| 687 | int level) |
| 688 | { |
| 689 | unsigned long idx; |
| 690 | |
| 691 | idx = gfn_to_index(gfn, slot->base_gfn, level); |
| 692 | return &slot->arch.lpage_info[level - 2][idx]; |
| 693 | } |
| 694 | |
| 695 | static void account_shadowed(struct kvm *kvm, gfn_t gfn) |
| 696 | { |
| 697 | struct kvm_memory_slot *slot; |
| 698 | struct kvm_lpage_info *linfo; |
| 699 | int i; |
| 700 | |
| 701 | slot = gfn_to_memslot(kvm, gfn); |
| 702 | for (i = PT_DIRECTORY_LEVEL; |
| 703 | i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) { |
| 704 | linfo = lpage_info_slot(gfn, slot, i); |
| 705 | linfo->write_count += 1; |
| 706 | } |
| 707 | kvm->arch.indirect_shadow_pages++; |
| 708 | } |
| 709 | |
| 710 | static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn) |
| 711 | { |
| 712 | struct kvm_memory_slot *slot; |
| 713 | struct kvm_lpage_info *linfo; |
| 714 | int i; |
| 715 | |
| 716 | slot = gfn_to_memslot(kvm, gfn); |
| 717 | for (i = PT_DIRECTORY_LEVEL; |
| 718 | i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) { |
| 719 | linfo = lpage_info_slot(gfn, slot, i); |
| 720 | linfo->write_count -= 1; |
| 721 | WARN_ON(linfo->write_count < 0); |
| 722 | } |
| 723 | kvm->arch.indirect_shadow_pages--; |
| 724 | } |
| 725 | |
| 726 | static int has_wrprotected_page(struct kvm *kvm, |
| 727 | gfn_t gfn, |
| 728 | int level) |
| 729 | { |
| 730 | struct kvm_memory_slot *slot; |
| 731 | struct kvm_lpage_info *linfo; |
| 732 | |
| 733 | slot = gfn_to_memslot(kvm, gfn); |
| 734 | if (slot) { |
| 735 | linfo = lpage_info_slot(gfn, slot, level); |
| 736 | return linfo->write_count; |
| 737 | } |
| 738 | |
| 739 | return 1; |
| 740 | } |
| 741 | |
| 742 | static int host_mapping_level(struct kvm *kvm, gfn_t gfn) |
| 743 | { |
| 744 | unsigned long page_size; |
| 745 | int i, ret = 0; |
| 746 | |
| 747 | page_size = kvm_host_page_size(kvm, gfn); |
| 748 | |
| 749 | for (i = PT_PAGE_TABLE_LEVEL; |
| 750 | i < (PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES); ++i) { |
| 751 | if (page_size >= KVM_HPAGE_SIZE(i)) |
| 752 | ret = i; |
| 753 | else |
| 754 | break; |
| 755 | } |
| 756 | |
| 757 | return ret; |
| 758 | } |
| 759 | |
| 760 | static struct kvm_memory_slot * |
| 761 | gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn, |
| 762 | bool no_dirty_log) |
| 763 | { |
| 764 | struct kvm_memory_slot *slot; |
| 765 | |
| 766 | slot = gfn_to_memslot(vcpu->kvm, gfn); |
| 767 | if (!slot || slot->flags & KVM_MEMSLOT_INVALID || |
| 768 | (no_dirty_log && slot->dirty_bitmap)) |
| 769 | slot = NULL; |
| 770 | |
| 771 | return slot; |
| 772 | } |
| 773 | |
| 774 | static bool mapping_level_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t large_gfn) |
| 775 | { |
| 776 | return !gfn_to_memslot_dirty_bitmap(vcpu, large_gfn, true); |
| 777 | } |
| 778 | |
| 779 | static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn) |
| 780 | { |
| 781 | int host_level, level, max_level; |
| 782 | |
| 783 | host_level = host_mapping_level(vcpu->kvm, large_gfn); |
| 784 | |
| 785 | if (host_level == PT_PAGE_TABLE_LEVEL) |
| 786 | return host_level; |
| 787 | |
| 788 | max_level = kvm_x86_ops->get_lpage_level() < host_level ? |
| 789 | kvm_x86_ops->get_lpage_level() : host_level; |
| 790 | |
| 791 | for (level = PT_DIRECTORY_LEVEL; level <= max_level; ++level) |
| 792 | if (has_wrprotected_page(vcpu->kvm, large_gfn, level)) |
| 793 | break; |
| 794 | |
| 795 | return level - 1; |
| 796 | } |
| 797 | |
| 798 | /* |
| 799 | * Pte mapping structures: |
| 800 | * |
| 801 | * If pte_list bit zero is zero, then pte_list point to the spte. |
| 802 | * |
| 803 | * If pte_list bit zero is one, (then pte_list & ~1) points to a struct |
| 804 | * pte_list_desc containing more mappings. |
| 805 | * |
| 806 | * Returns the number of pte entries before the spte was added or zero if |
| 807 | * the spte was not added. |
| 808 | * |
| 809 | */ |
| 810 | static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte, |
| 811 | unsigned long *pte_list) |
| 812 | { |
| 813 | struct pte_list_desc *desc; |
| 814 | int i, count = 0; |
| 815 | |
| 816 | if (!*pte_list) { |
| 817 | rmap_printk("pte_list_add: %p %llx 0->1\n", spte, *spte); |
| 818 | *pte_list = (unsigned long)spte; |
| 819 | } else if (!(*pte_list & 1)) { |
| 820 | rmap_printk("pte_list_add: %p %llx 1->many\n", spte, *spte); |
| 821 | desc = mmu_alloc_pte_list_desc(vcpu); |
| 822 | desc->sptes[0] = (u64 *)*pte_list; |
| 823 | desc->sptes[1] = spte; |
| 824 | *pte_list = (unsigned long)desc | 1; |
| 825 | ++count; |
| 826 | } else { |
| 827 | rmap_printk("pte_list_add: %p %llx many->many\n", spte, *spte); |
| 828 | desc = (struct pte_list_desc *)(*pte_list & ~1ul); |
| 829 | while (desc->sptes[PTE_LIST_EXT-1] && desc->more) { |
| 830 | desc = desc->more; |
| 831 | count += PTE_LIST_EXT; |
| 832 | } |
| 833 | if (desc->sptes[PTE_LIST_EXT-1]) { |
| 834 | desc->more = mmu_alloc_pte_list_desc(vcpu); |
| 835 | desc = desc->more; |
| 836 | } |
| 837 | for (i = 0; desc->sptes[i]; ++i) |
| 838 | ++count; |
| 839 | desc->sptes[i] = spte; |
| 840 | } |
| 841 | return count; |
| 842 | } |
| 843 | |
| 844 | static u64 *pte_list_next(unsigned long *pte_list, u64 *spte) |
| 845 | { |
| 846 | struct pte_list_desc *desc; |
| 847 | u64 *prev_spte; |
| 848 | int i; |
| 849 | |
| 850 | if (!*pte_list) |
| 851 | return NULL; |
| 852 | else if (!(*pte_list & 1)) { |
| 853 | if (!spte) |
| 854 | return (u64 *)*pte_list; |
| 855 | return NULL; |
| 856 | } |
| 857 | desc = (struct pte_list_desc *)(*pte_list & ~1ul); |
| 858 | prev_spte = NULL; |
| 859 | while (desc) { |
| 860 | for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i) { |
| 861 | if (prev_spte == spte) |
| 862 | return desc->sptes[i]; |
| 863 | prev_spte = desc->sptes[i]; |
| 864 | } |
| 865 | desc = desc->more; |
| 866 | } |
| 867 | return NULL; |
| 868 | } |
| 869 | |
| 870 | static void |
| 871 | pte_list_desc_remove_entry(unsigned long *pte_list, struct pte_list_desc *desc, |
| 872 | int i, struct pte_list_desc *prev_desc) |
| 873 | { |
| 874 | int j; |
| 875 | |
| 876 | for (j = PTE_LIST_EXT - 1; !desc->sptes[j] && j > i; --j) |
| 877 | ; |
| 878 | desc->sptes[i] = desc->sptes[j]; |
| 879 | desc->sptes[j] = NULL; |
| 880 | if (j != 0) |
| 881 | return; |
| 882 | if (!prev_desc && !desc->more) |
| 883 | *pte_list = (unsigned long)desc->sptes[0]; |
| 884 | else |
| 885 | if (prev_desc) |
| 886 | prev_desc->more = desc->more; |
| 887 | else |
| 888 | *pte_list = (unsigned long)desc->more | 1; |
| 889 | mmu_free_pte_list_desc(desc); |
| 890 | } |
| 891 | |
| 892 | static void pte_list_remove(u64 *spte, unsigned long *pte_list) |
| 893 | { |
| 894 | struct pte_list_desc *desc; |
| 895 | struct pte_list_desc *prev_desc; |
| 896 | int i; |
| 897 | |
| 898 | if (!*pte_list) { |
| 899 | printk(KERN_ERR "pte_list_remove: %p 0->BUG\n", spte); |
| 900 | BUG(); |
| 901 | } else if (!(*pte_list & 1)) { |
| 902 | rmap_printk("pte_list_remove: %p 1->0\n", spte); |
| 903 | if ((u64 *)*pte_list != spte) { |
| 904 | printk(KERN_ERR "pte_list_remove: %p 1->BUG\n", spte); |
| 905 | BUG(); |
| 906 | } |
| 907 | *pte_list = 0; |
| 908 | } else { |
| 909 | rmap_printk("pte_list_remove: %p many->many\n", spte); |
| 910 | desc = (struct pte_list_desc *)(*pte_list & ~1ul); |
| 911 | prev_desc = NULL; |
| 912 | while (desc) { |
| 913 | for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i) |
| 914 | if (desc->sptes[i] == spte) { |
| 915 | pte_list_desc_remove_entry(pte_list, |
| 916 | desc, i, |
| 917 | prev_desc); |
| 918 | return; |
| 919 | } |
| 920 | prev_desc = desc; |
| 921 | desc = desc->more; |
| 922 | } |
| 923 | pr_err("pte_list_remove: %p many->many\n", spte); |
| 924 | BUG(); |
| 925 | } |
| 926 | } |
| 927 | |
| 928 | typedef void (*pte_list_walk_fn) (u64 *spte); |
| 929 | static void pte_list_walk(unsigned long *pte_list, pte_list_walk_fn fn) |
| 930 | { |
| 931 | struct pte_list_desc *desc; |
| 932 | int i; |
| 933 | |
| 934 | if (!*pte_list) |
| 935 | return; |
| 936 | |
| 937 | if (!(*pte_list & 1)) |
| 938 | return fn((u64 *)*pte_list); |
| 939 | |
| 940 | desc = (struct pte_list_desc *)(*pte_list & ~1ul); |
| 941 | while (desc) { |
| 942 | for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i) |
| 943 | fn(desc->sptes[i]); |
| 944 | desc = desc->more; |
| 945 | } |
| 946 | } |
| 947 | |
| 948 | static unsigned long *__gfn_to_rmap(gfn_t gfn, int level, |
| 949 | struct kvm_memory_slot *slot) |
| 950 | { |
| 951 | struct kvm_lpage_info *linfo; |
| 952 | |
| 953 | if (likely(level == PT_PAGE_TABLE_LEVEL)) |
| 954 | return &slot->rmap[gfn - slot->base_gfn]; |
| 955 | |
| 956 | linfo = lpage_info_slot(gfn, slot, level); |
| 957 | return &linfo->rmap_pde; |
| 958 | } |
| 959 | |
| 960 | /* |
| 961 | * Take gfn and return the reverse mapping to it. |
| 962 | */ |
| 963 | static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level) |
| 964 | { |
| 965 | struct kvm_memory_slot *slot; |
| 966 | |
| 967 | slot = gfn_to_memslot(kvm, gfn); |
| 968 | return __gfn_to_rmap(gfn, level, slot); |
| 969 | } |
| 970 | |
| 971 | static bool rmap_can_add(struct kvm_vcpu *vcpu) |
| 972 | { |
| 973 | struct kvm_mmu_memory_cache *cache; |
| 974 | |
| 975 | cache = &vcpu->arch.mmu_pte_list_desc_cache; |
| 976 | return mmu_memory_cache_free_objects(cache); |
| 977 | } |
| 978 | |
| 979 | static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) |
| 980 | { |
| 981 | struct kvm_mmu_page *sp; |
| 982 | unsigned long *rmapp; |
| 983 | |
| 984 | sp = page_header(__pa(spte)); |
| 985 | kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn); |
| 986 | rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level); |
| 987 | return pte_list_add(vcpu, spte, rmapp); |
| 988 | } |
| 989 | |
| 990 | static u64 *rmap_next(unsigned long *rmapp, u64 *spte) |
| 991 | { |
| 992 | return pte_list_next(rmapp, spte); |
| 993 | } |
| 994 | |
| 995 | static void rmap_remove(struct kvm *kvm, u64 *spte) |
| 996 | { |
| 997 | struct kvm_mmu_page *sp; |
| 998 | gfn_t gfn; |
| 999 | unsigned long *rmapp; |
| 1000 | |
| 1001 | sp = page_header(__pa(spte)); |
| 1002 | gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt); |
| 1003 | rmapp = gfn_to_rmap(kvm, gfn, sp->role.level); |
| 1004 | pte_list_remove(spte, rmapp); |
| 1005 | } |
| 1006 | |
| 1007 | static void drop_spte(struct kvm *kvm, u64 *sptep) |
| 1008 | { |
| 1009 | if (mmu_spte_clear_track_bits(sptep)) |
| 1010 | rmap_remove(kvm, sptep); |
| 1011 | } |
| 1012 | |
| 1013 | static int __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp, int level) |
| 1014 | { |
| 1015 | u64 *spte = NULL; |
| 1016 | int write_protected = 0; |
| 1017 | |
| 1018 | while ((spte = rmap_next(rmapp, spte))) { |
| 1019 | BUG_ON(!(*spte & PT_PRESENT_MASK)); |
| 1020 | rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte); |
| 1021 | |
| 1022 | if (!is_writable_pte(*spte)) |
| 1023 | continue; |
| 1024 | |
| 1025 | if (level == PT_PAGE_TABLE_LEVEL) { |
| 1026 | mmu_spte_update(spte, *spte & ~PT_WRITABLE_MASK); |
| 1027 | } else { |
| 1028 | BUG_ON(!is_large_pte(*spte)); |
| 1029 | drop_spte(kvm, spte); |
| 1030 | --kvm->stat.lpages; |
| 1031 | spte = NULL; |
| 1032 | } |
| 1033 | |
| 1034 | write_protected = 1; |
| 1035 | } |
| 1036 | |
| 1037 | return write_protected; |
| 1038 | } |
| 1039 | |
| 1040 | int kvm_mmu_rmap_write_protect(struct kvm *kvm, u64 gfn, |
| 1041 | struct kvm_memory_slot *slot) |
| 1042 | { |
| 1043 | unsigned long *rmapp; |
| 1044 | int i, write_protected = 0; |
| 1045 | |
| 1046 | for (i = PT_PAGE_TABLE_LEVEL; |
| 1047 | i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) { |
| 1048 | rmapp = __gfn_to_rmap(gfn, i, slot); |
| 1049 | write_protected |= __rmap_write_protect(kvm, rmapp, i); |
| 1050 | } |
| 1051 | |
| 1052 | return write_protected; |
| 1053 | } |
| 1054 | |
| 1055 | static int rmap_write_protect(struct kvm *kvm, u64 gfn) |
| 1056 | { |
| 1057 | struct kvm_memory_slot *slot; |
| 1058 | |
| 1059 | slot = gfn_to_memslot(kvm, gfn); |
| 1060 | return kvm_mmu_rmap_write_protect(kvm, gfn, slot); |
| 1061 | } |
| 1062 | |
| 1063 | static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, |
| 1064 | unsigned long data) |
| 1065 | { |
| 1066 | u64 *spte; |
| 1067 | int need_tlb_flush = 0; |
| 1068 | |
| 1069 | while ((spte = rmap_next(rmapp, NULL))) { |
| 1070 | BUG_ON(!(*spte & PT_PRESENT_MASK)); |
| 1071 | rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte); |
| 1072 | drop_spte(kvm, spte); |
| 1073 | need_tlb_flush = 1; |
| 1074 | } |
| 1075 | return need_tlb_flush; |
| 1076 | } |
| 1077 | |
| 1078 | static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp, |
| 1079 | unsigned long data) |
| 1080 | { |
| 1081 | int need_flush = 0; |
| 1082 | u64 *spte, new_spte; |
| 1083 | pte_t *ptep = (pte_t *)data; |
| 1084 | pfn_t new_pfn; |
| 1085 | |
| 1086 | WARN_ON(pte_huge(*ptep)); |
| 1087 | new_pfn = pte_pfn(*ptep); |
| 1088 | spte = rmap_next(rmapp, NULL); |
| 1089 | while (spte) { |
| 1090 | BUG_ON(!is_shadow_present_pte(*spte)); |
| 1091 | rmap_printk("kvm_set_pte_rmapp: spte %p %llx\n", spte, *spte); |
| 1092 | need_flush = 1; |
| 1093 | if (pte_write(*ptep)) { |
| 1094 | drop_spte(kvm, spte); |
| 1095 | spte = rmap_next(rmapp, NULL); |
| 1096 | } else { |
| 1097 | new_spte = *spte &~ (PT64_BASE_ADDR_MASK); |
| 1098 | new_spte |= (u64)new_pfn << PAGE_SHIFT; |
| 1099 | |
| 1100 | new_spte &= ~PT_WRITABLE_MASK; |
| 1101 | new_spte &= ~SPTE_HOST_WRITEABLE; |
| 1102 | new_spte &= ~shadow_accessed_mask; |
| 1103 | mmu_spte_clear_track_bits(spte); |
| 1104 | mmu_spte_set(spte, new_spte); |
| 1105 | spte = rmap_next(rmapp, spte); |
| 1106 | } |
| 1107 | } |
| 1108 | if (need_flush) |
| 1109 | kvm_flush_remote_tlbs(kvm); |
| 1110 | |
| 1111 | return 0; |
| 1112 | } |
| 1113 | |
| 1114 | static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, |
| 1115 | unsigned long data, |
| 1116 | int (*handler)(struct kvm *kvm, unsigned long *rmapp, |
| 1117 | unsigned long data)) |
| 1118 | { |
| 1119 | int j; |
| 1120 | int ret; |
| 1121 | int retval = 0; |
| 1122 | struct kvm_memslots *slots; |
| 1123 | struct kvm_memory_slot *memslot; |
| 1124 | |
| 1125 | slots = kvm_memslots(kvm); |
| 1126 | |
| 1127 | kvm_for_each_memslot(memslot, slots) { |
| 1128 | unsigned long start = memslot->userspace_addr; |
| 1129 | unsigned long end; |
| 1130 | |
| 1131 | end = start + (memslot->npages << PAGE_SHIFT); |
| 1132 | if (hva >= start && hva < end) { |
| 1133 | gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT; |
| 1134 | gfn_t gfn = memslot->base_gfn + gfn_offset; |
| 1135 | |
| 1136 | ret = handler(kvm, &memslot->rmap[gfn_offset], data); |
| 1137 | |
| 1138 | for (j = 0; j < KVM_NR_PAGE_SIZES - 1; ++j) { |
| 1139 | struct kvm_lpage_info *linfo; |
| 1140 | |
| 1141 | linfo = lpage_info_slot(gfn, memslot, |
| 1142 | PT_DIRECTORY_LEVEL + j); |
| 1143 | ret |= handler(kvm, &linfo->rmap_pde, data); |
| 1144 | } |
| 1145 | trace_kvm_age_page(hva, memslot, ret); |
| 1146 | retval |= ret; |
| 1147 | } |
| 1148 | } |
| 1149 | |
| 1150 | return retval; |
| 1151 | } |
| 1152 | |
| 1153 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) |
| 1154 | { |
| 1155 | return kvm_handle_hva(kvm, hva, 0, kvm_unmap_rmapp); |
| 1156 | } |
| 1157 | |
| 1158 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) |
| 1159 | { |
| 1160 | kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp); |
| 1161 | } |
| 1162 | |
| 1163 | static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp, |
| 1164 | unsigned long data) |
| 1165 | { |
| 1166 | u64 *spte; |
| 1167 | int young = 0; |
| 1168 | |
| 1169 | /* |
| 1170 | * Emulate the accessed bit for EPT, by checking if this page has |
| 1171 | * an EPT mapping, and clearing it if it does. On the next access, |
| 1172 | * a new EPT mapping will be established. |
| 1173 | * This has some overhead, but not as much as the cost of swapping |
| 1174 | * out actively used pages or breaking up actively used hugepages. |
| 1175 | */ |
| 1176 | if (!shadow_accessed_mask) |
| 1177 | return kvm_unmap_rmapp(kvm, rmapp, data); |
| 1178 | |
| 1179 | spte = rmap_next(rmapp, NULL); |
| 1180 | while (spte) { |
| 1181 | int _young; |
| 1182 | u64 _spte = *spte; |
| 1183 | BUG_ON(!(_spte & PT_PRESENT_MASK)); |
| 1184 | _young = _spte & PT_ACCESSED_MASK; |
| 1185 | if (_young) { |
| 1186 | young = 1; |
| 1187 | clear_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte); |
| 1188 | } |
| 1189 | spte = rmap_next(rmapp, spte); |
| 1190 | } |
| 1191 | return young; |
| 1192 | } |
| 1193 | |
| 1194 | static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp, |
| 1195 | unsigned long data) |
| 1196 | { |
| 1197 | u64 *spte; |
| 1198 | int young = 0; |
| 1199 | |
| 1200 | /* |
| 1201 | * If there's no access bit in the secondary pte set by the |
| 1202 | * hardware it's up to gup-fast/gup to set the access bit in |
| 1203 | * the primary pte or in the page structure. |
| 1204 | */ |
| 1205 | if (!shadow_accessed_mask) |
| 1206 | goto out; |
| 1207 | |
| 1208 | spte = rmap_next(rmapp, NULL); |
| 1209 | while (spte) { |
| 1210 | u64 _spte = *spte; |
| 1211 | BUG_ON(!(_spte & PT_PRESENT_MASK)); |
| 1212 | young = _spte & PT_ACCESSED_MASK; |
| 1213 | if (young) { |
| 1214 | young = 1; |
| 1215 | break; |
| 1216 | } |
| 1217 | spte = rmap_next(rmapp, spte); |
| 1218 | } |
| 1219 | out: |
| 1220 | return young; |
| 1221 | } |
| 1222 | |
| 1223 | #define RMAP_RECYCLE_THRESHOLD 1000 |
| 1224 | |
| 1225 | static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) |
| 1226 | { |
| 1227 | unsigned long *rmapp; |
| 1228 | struct kvm_mmu_page *sp; |
| 1229 | |
| 1230 | sp = page_header(__pa(spte)); |
| 1231 | |
| 1232 | rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level); |
| 1233 | |
| 1234 | kvm_unmap_rmapp(vcpu->kvm, rmapp, 0); |
| 1235 | kvm_flush_remote_tlbs(vcpu->kvm); |
| 1236 | } |
| 1237 | |
| 1238 | int kvm_age_hva(struct kvm *kvm, unsigned long hva) |
| 1239 | { |
| 1240 | return kvm_handle_hva(kvm, hva, 0, kvm_age_rmapp); |
| 1241 | } |
| 1242 | |
| 1243 | int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) |
| 1244 | { |
| 1245 | return kvm_handle_hva(kvm, hva, 0, kvm_test_age_rmapp); |
| 1246 | } |
| 1247 | |
| 1248 | #ifdef MMU_DEBUG |
| 1249 | static int is_empty_shadow_page(u64 *spt) |
| 1250 | { |
| 1251 | u64 *pos; |
| 1252 | u64 *end; |
| 1253 | |
| 1254 | for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++) |
| 1255 | if (is_shadow_present_pte(*pos)) { |
| 1256 | printk(KERN_ERR "%s: %p %llx\n", __func__, |
| 1257 | pos, *pos); |
| 1258 | return 0; |
| 1259 | } |
| 1260 | return 1; |
| 1261 | } |
| 1262 | #endif |
| 1263 | |
| 1264 | /* |
| 1265 | * This value is the sum of all of the kvm instances's |
| 1266 | * kvm->arch.n_used_mmu_pages values. We need a global, |
| 1267 | * aggregate version in order to make the slab shrinker |
| 1268 | * faster |
| 1269 | */ |
| 1270 | static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, int nr) |
| 1271 | { |
| 1272 | kvm->arch.n_used_mmu_pages += nr; |
| 1273 | percpu_counter_add(&kvm_total_used_mmu_pages, nr); |
| 1274 | } |
| 1275 | |
| 1276 | /* |
| 1277 | * Remove the sp from shadow page cache, after call it, |
| 1278 | * we can not find this sp from the cache, and the shadow |
| 1279 | * page table is still valid. |
| 1280 | * It should be under the protection of mmu lock. |
| 1281 | */ |
| 1282 | static void kvm_mmu_isolate_page(struct kvm_mmu_page *sp) |
| 1283 | { |
| 1284 | ASSERT(is_empty_shadow_page(sp->spt)); |
| 1285 | hlist_del(&sp->hash_link); |
| 1286 | if (!sp->role.direct) |
| 1287 | free_page((unsigned long)sp->gfns); |
| 1288 | } |
| 1289 | |
| 1290 | /* |
| 1291 | * Free the shadow page table and the sp, we can do it |
| 1292 | * out of the protection of mmu lock. |
| 1293 | */ |
| 1294 | static void kvm_mmu_free_page(struct kvm_mmu_page *sp) |
| 1295 | { |
| 1296 | list_del(&sp->link); |
| 1297 | free_page((unsigned long)sp->spt); |
| 1298 | kmem_cache_free(mmu_page_header_cache, sp); |
| 1299 | } |
| 1300 | |
| 1301 | static unsigned kvm_page_table_hashfn(gfn_t gfn) |
| 1302 | { |
| 1303 | return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1); |
| 1304 | } |
| 1305 | |
| 1306 | static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu, |
| 1307 | struct kvm_mmu_page *sp, u64 *parent_pte) |
| 1308 | { |
| 1309 | if (!parent_pte) |
| 1310 | return; |
| 1311 | |
| 1312 | pte_list_add(vcpu, parent_pte, &sp->parent_ptes); |
| 1313 | } |
| 1314 | |
| 1315 | static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp, |
| 1316 | u64 *parent_pte) |
| 1317 | { |
| 1318 | pte_list_remove(parent_pte, &sp->parent_ptes); |
| 1319 | } |
| 1320 | |
| 1321 | static void drop_parent_pte(struct kvm_mmu_page *sp, |
| 1322 | u64 *parent_pte) |
| 1323 | { |
| 1324 | mmu_page_remove_parent_pte(sp, parent_pte); |
| 1325 | mmu_spte_clear_no_track(parent_pte); |
| 1326 | } |
| 1327 | |
| 1328 | static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, |
| 1329 | u64 *parent_pte, int direct) |
| 1330 | { |
| 1331 | struct kvm_mmu_page *sp; |
| 1332 | sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, |
| 1333 | sizeof *sp); |
| 1334 | sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE); |
| 1335 | if (!direct) |
| 1336 | sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, |
| 1337 | PAGE_SIZE); |
| 1338 | set_page_private(virt_to_page(sp->spt), (unsigned long)sp); |
| 1339 | list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages); |
| 1340 | bitmap_zero(sp->slot_bitmap, KVM_MEM_SLOTS_NUM); |
| 1341 | sp->parent_ptes = 0; |
| 1342 | mmu_page_add_parent_pte(vcpu, sp, parent_pte); |
| 1343 | kvm_mod_used_mmu_pages(vcpu->kvm, +1); |
| 1344 | return sp; |
| 1345 | } |
| 1346 | |
| 1347 | static void mark_unsync(u64 *spte); |
| 1348 | static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp) |
| 1349 | { |
| 1350 | pte_list_walk(&sp->parent_ptes, mark_unsync); |
| 1351 | } |
| 1352 | |
| 1353 | static void mark_unsync(u64 *spte) |
| 1354 | { |
| 1355 | struct kvm_mmu_page *sp; |
| 1356 | unsigned int index; |
| 1357 | |
| 1358 | sp = page_header(__pa(spte)); |
| 1359 | index = spte - sp->spt; |
| 1360 | if (__test_and_set_bit(index, sp->unsync_child_bitmap)) |
| 1361 | return; |
| 1362 | if (sp->unsync_children++) |
| 1363 | return; |
| 1364 | kvm_mmu_mark_parents_unsync(sp); |
| 1365 | } |
| 1366 | |
| 1367 | static int nonpaging_sync_page(struct kvm_vcpu *vcpu, |
| 1368 | struct kvm_mmu_page *sp) |
| 1369 | { |
| 1370 | return 1; |
| 1371 | } |
| 1372 | |
| 1373 | static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva) |
| 1374 | { |
| 1375 | } |
| 1376 | |
| 1377 | static void nonpaging_update_pte(struct kvm_vcpu *vcpu, |
| 1378 | struct kvm_mmu_page *sp, u64 *spte, |
| 1379 | const void *pte) |
| 1380 | { |
| 1381 | WARN_ON(1); |
| 1382 | } |
| 1383 | |
| 1384 | #define KVM_PAGE_ARRAY_NR 16 |
| 1385 | |
| 1386 | struct kvm_mmu_pages { |
| 1387 | struct mmu_page_and_offset { |
| 1388 | struct kvm_mmu_page *sp; |
| 1389 | unsigned int idx; |
| 1390 | } page[KVM_PAGE_ARRAY_NR]; |
| 1391 | unsigned int nr; |
| 1392 | }; |
| 1393 | |
| 1394 | static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp, |
| 1395 | int idx) |
| 1396 | { |
| 1397 | int i; |
| 1398 | |
| 1399 | if (sp->unsync) |
| 1400 | for (i=0; i < pvec->nr; i++) |
| 1401 | if (pvec->page[i].sp == sp) |
| 1402 | return 0; |
| 1403 | |
| 1404 | pvec->page[pvec->nr].sp = sp; |
| 1405 | pvec->page[pvec->nr].idx = idx; |
| 1406 | pvec->nr++; |
| 1407 | return (pvec->nr == KVM_PAGE_ARRAY_NR); |
| 1408 | } |
| 1409 | |
| 1410 | static int __mmu_unsync_walk(struct kvm_mmu_page *sp, |
| 1411 | struct kvm_mmu_pages *pvec) |
| 1412 | { |
| 1413 | int i, ret, nr_unsync_leaf = 0; |
| 1414 | |
| 1415 | for_each_set_bit(i, sp->unsync_child_bitmap, 512) { |
| 1416 | struct kvm_mmu_page *child; |
| 1417 | u64 ent = sp->spt[i]; |
| 1418 | |
| 1419 | if (!is_shadow_present_pte(ent) || is_large_pte(ent)) |
| 1420 | goto clear_child_bitmap; |
| 1421 | |
| 1422 | child = page_header(ent & PT64_BASE_ADDR_MASK); |
| 1423 | |
| 1424 | if (child->unsync_children) { |
| 1425 | if (mmu_pages_add(pvec, child, i)) |
| 1426 | return -ENOSPC; |
| 1427 | |
| 1428 | ret = __mmu_unsync_walk(child, pvec); |
| 1429 | if (!ret) |
| 1430 | goto clear_child_bitmap; |
| 1431 | else if (ret > 0) |
| 1432 | nr_unsync_leaf += ret; |
| 1433 | else |
| 1434 | return ret; |
| 1435 | } else if (child->unsync) { |
| 1436 | nr_unsync_leaf++; |
| 1437 | if (mmu_pages_add(pvec, child, i)) |
| 1438 | return -ENOSPC; |
| 1439 | } else |
| 1440 | goto clear_child_bitmap; |
| 1441 | |
| 1442 | continue; |
| 1443 | |
| 1444 | clear_child_bitmap: |
| 1445 | __clear_bit(i, sp->unsync_child_bitmap); |
| 1446 | sp->unsync_children--; |
| 1447 | WARN_ON((int)sp->unsync_children < 0); |
| 1448 | } |
| 1449 | |
| 1450 | |
| 1451 | return nr_unsync_leaf; |
| 1452 | } |
| 1453 | |
| 1454 | static int mmu_unsync_walk(struct kvm_mmu_page *sp, |
| 1455 | struct kvm_mmu_pages *pvec) |
| 1456 | { |
| 1457 | if (!sp->unsync_children) |
| 1458 | return 0; |
| 1459 | |
| 1460 | mmu_pages_add(pvec, sp, 0); |
| 1461 | return __mmu_unsync_walk(sp, pvec); |
| 1462 | } |
| 1463 | |
| 1464 | static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp) |
| 1465 | { |
| 1466 | WARN_ON(!sp->unsync); |
| 1467 | trace_kvm_mmu_sync_page(sp); |
| 1468 | sp->unsync = 0; |
| 1469 | --kvm->stat.mmu_unsync; |
| 1470 | } |
| 1471 | |
| 1472 | static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp, |
| 1473 | struct list_head *invalid_list); |
| 1474 | static void kvm_mmu_commit_zap_page(struct kvm *kvm, |
| 1475 | struct list_head *invalid_list); |
| 1476 | |
| 1477 | #define for_each_gfn_sp(kvm, sp, gfn, pos) \ |
| 1478 | hlist_for_each_entry(sp, pos, \ |
| 1479 | &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \ |
| 1480 | if ((sp)->gfn != (gfn)) {} else |
| 1481 | |
| 1482 | #define for_each_gfn_indirect_valid_sp(kvm, sp, gfn, pos) \ |
| 1483 | hlist_for_each_entry(sp, pos, \ |
| 1484 | &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \ |
| 1485 | if ((sp)->gfn != (gfn) || (sp)->role.direct || \ |
| 1486 | (sp)->role.invalid) {} else |
| 1487 | |
| 1488 | /* @sp->gfn should be write-protected at the call site */ |
| 1489 | static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, |
| 1490 | struct list_head *invalid_list, bool clear_unsync) |
| 1491 | { |
| 1492 | if (sp->role.cr4_pae != !!is_pae(vcpu)) { |
| 1493 | kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); |
| 1494 | return 1; |
| 1495 | } |
| 1496 | |
| 1497 | if (clear_unsync) |
| 1498 | kvm_unlink_unsync_page(vcpu->kvm, sp); |
| 1499 | |
| 1500 | if (vcpu->arch.mmu.sync_page(vcpu, sp)) { |
| 1501 | kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); |
| 1502 | return 1; |
| 1503 | } |
| 1504 | |
| 1505 | kvm_mmu_flush_tlb(vcpu); |
| 1506 | return 0; |
| 1507 | } |
| 1508 | |
| 1509 | static int kvm_sync_page_transient(struct kvm_vcpu *vcpu, |
| 1510 | struct kvm_mmu_page *sp) |
| 1511 | { |
| 1512 | LIST_HEAD(invalid_list); |
| 1513 | int ret; |
| 1514 | |
| 1515 | ret = __kvm_sync_page(vcpu, sp, &invalid_list, false); |
| 1516 | if (ret) |
| 1517 | kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); |
| 1518 | |
| 1519 | return ret; |
| 1520 | } |
| 1521 | |
| 1522 | #ifdef CONFIG_KVM_MMU_AUDIT |
| 1523 | #include "mmu_audit.c" |
| 1524 | #else |
| 1525 | static void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) { } |
| 1526 | static void mmu_audit_disable(void) { } |
| 1527 | #endif |
| 1528 | |
| 1529 | static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, |
| 1530 | struct list_head *invalid_list) |
| 1531 | { |
| 1532 | return __kvm_sync_page(vcpu, sp, invalid_list, true); |
| 1533 | } |
| 1534 | |
| 1535 | /* @gfn should be write-protected at the call site */ |
| 1536 | static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) |
| 1537 | { |
| 1538 | struct kvm_mmu_page *s; |
| 1539 | struct hlist_node *node; |
| 1540 | LIST_HEAD(invalid_list); |
| 1541 | bool flush = false; |
| 1542 | |
| 1543 | for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) { |
| 1544 | if (!s->unsync) |
| 1545 | continue; |
| 1546 | |
| 1547 | WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL); |
| 1548 | kvm_unlink_unsync_page(vcpu->kvm, s); |
| 1549 | if ((s->role.cr4_pae != !!is_pae(vcpu)) || |
| 1550 | (vcpu->arch.mmu.sync_page(vcpu, s))) { |
| 1551 | kvm_mmu_prepare_zap_page(vcpu->kvm, s, &invalid_list); |
| 1552 | continue; |
| 1553 | } |
| 1554 | flush = true; |
| 1555 | } |
| 1556 | |
| 1557 | kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); |
| 1558 | if (flush) |
| 1559 | kvm_mmu_flush_tlb(vcpu); |
| 1560 | } |
| 1561 | |
| 1562 | struct mmu_page_path { |
| 1563 | struct kvm_mmu_page *parent[PT64_ROOT_LEVEL-1]; |
| 1564 | unsigned int idx[PT64_ROOT_LEVEL-1]; |
| 1565 | }; |
| 1566 | |
| 1567 | #define for_each_sp(pvec, sp, parents, i) \ |
| 1568 | for (i = mmu_pages_next(&pvec, &parents, -1), \ |
| 1569 | sp = pvec.page[i].sp; \ |
| 1570 | i < pvec.nr && ({ sp = pvec.page[i].sp; 1;}); \ |
| 1571 | i = mmu_pages_next(&pvec, &parents, i)) |
| 1572 | |
| 1573 | static int mmu_pages_next(struct kvm_mmu_pages *pvec, |
| 1574 | struct mmu_page_path *parents, |
| 1575 | int i) |
| 1576 | { |
| 1577 | int n; |
| 1578 | |
| 1579 | for (n = i+1; n < pvec->nr; n++) { |
| 1580 | struct kvm_mmu_page *sp = pvec->page[n].sp; |
| 1581 | |
| 1582 | if (sp->role.level == PT_PAGE_TABLE_LEVEL) { |
| 1583 | parents->idx[0] = pvec->page[n].idx; |
| 1584 | return n; |
| 1585 | } |
| 1586 | |
| 1587 | parents->parent[sp->role.level-2] = sp; |
| 1588 | parents->idx[sp->role.level-1] = pvec->page[n].idx; |
| 1589 | } |
| 1590 | |
| 1591 | return n; |
| 1592 | } |
| 1593 | |
| 1594 | static void mmu_pages_clear_parents(struct mmu_page_path *parents) |
| 1595 | { |
| 1596 | struct kvm_mmu_page *sp; |
| 1597 | unsigned int level = 0; |
| 1598 | |
| 1599 | do { |
| 1600 | unsigned int idx = parents->idx[level]; |
| 1601 | |
| 1602 | sp = parents->parent[level]; |
| 1603 | if (!sp) |
| 1604 | return; |
| 1605 | |
| 1606 | --sp->unsync_children; |
| 1607 | WARN_ON((int)sp->unsync_children < 0); |
| 1608 | __clear_bit(idx, sp->unsync_child_bitmap); |
| 1609 | level++; |
| 1610 | } while (level < PT64_ROOT_LEVEL-1 && !sp->unsync_children); |
| 1611 | } |
| 1612 | |
| 1613 | static void kvm_mmu_pages_init(struct kvm_mmu_page *parent, |
| 1614 | struct mmu_page_path *parents, |
| 1615 | struct kvm_mmu_pages *pvec) |
| 1616 | { |
| 1617 | parents->parent[parent->role.level-1] = NULL; |
| 1618 | pvec->nr = 0; |
| 1619 | } |
| 1620 | |
| 1621 | static void mmu_sync_children(struct kvm_vcpu *vcpu, |
| 1622 | struct kvm_mmu_page *parent) |
| 1623 | { |
| 1624 | int i; |
| 1625 | struct kvm_mmu_page *sp; |
| 1626 | struct mmu_page_path parents; |
| 1627 | struct kvm_mmu_pages pages; |
| 1628 | LIST_HEAD(invalid_list); |
| 1629 | |
| 1630 | kvm_mmu_pages_init(parent, &parents, &pages); |
| 1631 | while (mmu_unsync_walk(parent, &pages)) { |
| 1632 | int protected = 0; |
| 1633 | |
| 1634 | for_each_sp(pages, sp, parents, i) |
| 1635 | protected |= rmap_write_protect(vcpu->kvm, sp->gfn); |
| 1636 | |
| 1637 | if (protected) |
| 1638 | kvm_flush_remote_tlbs(vcpu->kvm); |
| 1639 | |
| 1640 | for_each_sp(pages, sp, parents, i) { |
| 1641 | kvm_sync_page(vcpu, sp, &invalid_list); |
| 1642 | mmu_pages_clear_parents(&parents); |
| 1643 | } |
| 1644 | kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); |
| 1645 | cond_resched_lock(&vcpu->kvm->mmu_lock); |
| 1646 | kvm_mmu_pages_init(parent, &parents, &pages); |
| 1647 | } |
| 1648 | } |
| 1649 | |
| 1650 | static void init_shadow_page_table(struct kvm_mmu_page *sp) |
| 1651 | { |
| 1652 | int i; |
| 1653 | |
| 1654 | for (i = 0; i < PT64_ENT_PER_PAGE; ++i) |
| 1655 | sp->spt[i] = 0ull; |
| 1656 | } |
| 1657 | |
| 1658 | static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp) |
| 1659 | { |
| 1660 | sp->write_flooding_count = 0; |
| 1661 | } |
| 1662 | |
| 1663 | static void clear_sp_write_flooding_count(u64 *spte) |
| 1664 | { |
| 1665 | struct kvm_mmu_page *sp = page_header(__pa(spte)); |
| 1666 | |
| 1667 | __clear_sp_write_flooding_count(sp); |
| 1668 | } |
| 1669 | |
| 1670 | static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, |
| 1671 | gfn_t gfn, |
| 1672 | gva_t gaddr, |
| 1673 | unsigned level, |
| 1674 | int direct, |
| 1675 | unsigned access, |
| 1676 | u64 *parent_pte) |
| 1677 | { |
| 1678 | union kvm_mmu_page_role role; |
| 1679 | unsigned quadrant; |
| 1680 | struct kvm_mmu_page *sp; |
| 1681 | struct hlist_node *node; |
| 1682 | bool need_sync = false; |
| 1683 | |
| 1684 | role = vcpu->arch.mmu.base_role; |
| 1685 | role.level = level; |
| 1686 | role.direct = direct; |
| 1687 | if (role.direct) |
| 1688 | role.cr4_pae = 0; |
| 1689 | role.access = access; |
| 1690 | if (!vcpu->arch.mmu.direct_map |
| 1691 | && vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) { |
| 1692 | quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level)); |
| 1693 | quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1; |
| 1694 | role.quadrant = quadrant; |
| 1695 | } |
| 1696 | for_each_gfn_sp(vcpu->kvm, sp, gfn, node) { |
| 1697 | if (!need_sync && sp->unsync) |
| 1698 | need_sync = true; |
| 1699 | |
| 1700 | if (sp->role.word != role.word) |
| 1701 | continue; |
| 1702 | |
| 1703 | if (sp->unsync && kvm_sync_page_transient(vcpu, sp)) |
| 1704 | break; |
| 1705 | |
| 1706 | mmu_page_add_parent_pte(vcpu, sp, parent_pte); |
| 1707 | if (sp->unsync_children) { |
| 1708 | kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); |
| 1709 | kvm_mmu_mark_parents_unsync(sp); |
| 1710 | } else if (sp->unsync) |
| 1711 | kvm_mmu_mark_parents_unsync(sp); |
| 1712 | |
| 1713 | __clear_sp_write_flooding_count(sp); |
| 1714 | trace_kvm_mmu_get_page(sp, false); |
| 1715 | return sp; |
| 1716 | } |
| 1717 | ++vcpu->kvm->stat.mmu_cache_miss; |
| 1718 | sp = kvm_mmu_alloc_page(vcpu, parent_pte, direct); |
| 1719 | if (!sp) |
| 1720 | return sp; |
| 1721 | sp->gfn = gfn; |
| 1722 | sp->role = role; |
| 1723 | hlist_add_head(&sp->hash_link, |
| 1724 | &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]); |
| 1725 | if (!direct) { |
| 1726 | if (rmap_write_protect(vcpu->kvm, gfn)) |
| 1727 | kvm_flush_remote_tlbs(vcpu->kvm); |
| 1728 | if (level > PT_PAGE_TABLE_LEVEL && need_sync) |
| 1729 | kvm_sync_pages(vcpu, gfn); |
| 1730 | |
| 1731 | account_shadowed(vcpu->kvm, gfn); |
| 1732 | } |
| 1733 | init_shadow_page_table(sp); |
| 1734 | trace_kvm_mmu_get_page(sp, true); |
| 1735 | return sp; |
| 1736 | } |
| 1737 | |
| 1738 | static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator, |
| 1739 | struct kvm_vcpu *vcpu, u64 addr) |
| 1740 | { |
| 1741 | iterator->addr = addr; |
| 1742 | iterator->shadow_addr = vcpu->arch.mmu.root_hpa; |
| 1743 | iterator->level = vcpu->arch.mmu.shadow_root_level; |
| 1744 | |
| 1745 | if (iterator->level == PT64_ROOT_LEVEL && |
| 1746 | vcpu->arch.mmu.root_level < PT64_ROOT_LEVEL && |
| 1747 | !vcpu->arch.mmu.direct_map) |
| 1748 | --iterator->level; |
| 1749 | |
| 1750 | if (iterator->level == PT32E_ROOT_LEVEL) { |
| 1751 | iterator->shadow_addr |
| 1752 | = vcpu->arch.mmu.pae_root[(addr >> 30) & 3]; |
| 1753 | iterator->shadow_addr &= PT64_BASE_ADDR_MASK; |
| 1754 | --iterator->level; |
| 1755 | if (!iterator->shadow_addr) |
| 1756 | iterator->level = 0; |
| 1757 | } |
| 1758 | } |
| 1759 | |
| 1760 | static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator) |
| 1761 | { |
| 1762 | if (iterator->level < PT_PAGE_TABLE_LEVEL) |
| 1763 | return false; |
| 1764 | |
| 1765 | iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level); |
| 1766 | iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index; |
| 1767 | return true; |
| 1768 | } |
| 1769 | |
| 1770 | static void __shadow_walk_next(struct kvm_shadow_walk_iterator *iterator, |
| 1771 | u64 spte) |
| 1772 | { |
| 1773 | if (is_last_spte(spte, iterator->level)) { |
| 1774 | iterator->level = 0; |
| 1775 | return; |
| 1776 | } |
| 1777 | |
| 1778 | iterator->shadow_addr = spte & PT64_BASE_ADDR_MASK; |
| 1779 | --iterator->level; |
| 1780 | } |
| 1781 | |
| 1782 | static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator) |
| 1783 | { |
| 1784 | return __shadow_walk_next(iterator, *iterator->sptep); |
| 1785 | } |
| 1786 | |
| 1787 | static void link_shadow_page(u64 *sptep, struct kvm_mmu_page *sp) |
| 1788 | { |
| 1789 | u64 spte; |
| 1790 | |
| 1791 | spte = __pa(sp->spt) |
| 1792 | | PT_PRESENT_MASK | PT_ACCESSED_MASK |
| 1793 | | PT_WRITABLE_MASK | PT_USER_MASK; |
| 1794 | mmu_spte_set(sptep, spte); |
| 1795 | } |
| 1796 | |
| 1797 | static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep) |
| 1798 | { |
| 1799 | if (is_large_pte(*sptep)) { |
| 1800 | drop_spte(vcpu->kvm, sptep); |
| 1801 | --vcpu->kvm->stat.lpages; |
| 1802 | kvm_flush_remote_tlbs(vcpu->kvm); |
| 1803 | } |
| 1804 | } |
| 1805 | |
| 1806 | static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep, |
| 1807 | unsigned direct_access) |
| 1808 | { |
| 1809 | if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) { |
| 1810 | struct kvm_mmu_page *child; |
| 1811 | |
| 1812 | /* |
| 1813 | * For the direct sp, if the guest pte's dirty bit |
| 1814 | * changed form clean to dirty, it will corrupt the |
| 1815 | * sp's access: allow writable in the read-only sp, |
| 1816 | * so we should update the spte at this point to get |
| 1817 | * a new sp with the correct access. |
| 1818 | */ |
| 1819 | child = page_header(*sptep & PT64_BASE_ADDR_MASK); |
| 1820 | if (child->role.access == direct_access) |
| 1821 | return; |
| 1822 | |
| 1823 | drop_parent_pte(child, sptep); |
| 1824 | kvm_flush_remote_tlbs(vcpu->kvm); |
| 1825 | } |
| 1826 | } |
| 1827 | |
| 1828 | static bool mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp, |
| 1829 | u64 *spte) |
| 1830 | { |
| 1831 | u64 pte; |
| 1832 | struct kvm_mmu_page *child; |
| 1833 | |
| 1834 | pte = *spte; |
| 1835 | if (is_shadow_present_pte(pte)) { |
| 1836 | if (is_last_spte(pte, sp->role.level)) { |
| 1837 | drop_spte(kvm, spte); |
| 1838 | if (is_large_pte(pte)) |
| 1839 | --kvm->stat.lpages; |
| 1840 | } else { |
| 1841 | child = page_header(pte & PT64_BASE_ADDR_MASK); |
| 1842 | drop_parent_pte(child, spte); |
| 1843 | } |
| 1844 | return true; |
| 1845 | } |
| 1846 | |
| 1847 | if (is_mmio_spte(pte)) |
| 1848 | mmu_spte_clear_no_track(spte); |
| 1849 | |
| 1850 | return false; |
| 1851 | } |
| 1852 | |
| 1853 | static void kvm_mmu_page_unlink_children(struct kvm *kvm, |
| 1854 | struct kvm_mmu_page *sp) |
| 1855 | { |
| 1856 | unsigned i; |
| 1857 | |
| 1858 | for (i = 0; i < PT64_ENT_PER_PAGE; ++i) |
| 1859 | mmu_page_zap_pte(kvm, sp, sp->spt + i); |
| 1860 | } |
| 1861 | |
| 1862 | static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte) |
| 1863 | { |
| 1864 | mmu_page_remove_parent_pte(sp, parent_pte); |
| 1865 | } |
| 1866 | |
| 1867 | static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp) |
| 1868 | { |
| 1869 | u64 *parent_pte; |
| 1870 | |
| 1871 | while ((parent_pte = pte_list_next(&sp->parent_ptes, NULL))) |
| 1872 | drop_parent_pte(sp, parent_pte); |
| 1873 | } |
| 1874 | |
| 1875 | static int mmu_zap_unsync_children(struct kvm *kvm, |
| 1876 | struct kvm_mmu_page *parent, |
| 1877 | struct list_head *invalid_list) |
| 1878 | { |
| 1879 | int i, zapped = 0; |
| 1880 | struct mmu_page_path parents; |
| 1881 | struct kvm_mmu_pages pages; |
| 1882 | |
| 1883 | if (parent->role.level == PT_PAGE_TABLE_LEVEL) |
| 1884 | return 0; |
| 1885 | |
| 1886 | kvm_mmu_pages_init(parent, &parents, &pages); |
| 1887 | while (mmu_unsync_walk(parent, &pages)) { |
| 1888 | struct kvm_mmu_page *sp; |
| 1889 | |
| 1890 | for_each_sp(pages, sp, parents, i) { |
| 1891 | kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); |
| 1892 | mmu_pages_clear_parents(&parents); |
| 1893 | zapped++; |
| 1894 | } |
| 1895 | kvm_mmu_pages_init(parent, &parents, &pages); |
| 1896 | } |
| 1897 | |
| 1898 | return zapped; |
| 1899 | } |
| 1900 | |
| 1901 | static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp, |
| 1902 | struct list_head *invalid_list) |
| 1903 | { |
| 1904 | int ret; |
| 1905 | |
| 1906 | trace_kvm_mmu_prepare_zap_page(sp); |
| 1907 | ++kvm->stat.mmu_shadow_zapped; |
| 1908 | ret = mmu_zap_unsync_children(kvm, sp, invalid_list); |
| 1909 | kvm_mmu_page_unlink_children(kvm, sp); |
| 1910 | kvm_mmu_unlink_parents(kvm, sp); |
| 1911 | if (!sp->role.invalid && !sp->role.direct) |
| 1912 | unaccount_shadowed(kvm, sp->gfn); |
| 1913 | if (sp->unsync) |
| 1914 | kvm_unlink_unsync_page(kvm, sp); |
| 1915 | if (!sp->root_count) { |
| 1916 | /* Count self */ |
| 1917 | ret++; |
| 1918 | list_move(&sp->link, invalid_list); |
| 1919 | kvm_mod_used_mmu_pages(kvm, -1); |
| 1920 | } else { |
| 1921 | list_move(&sp->link, &kvm->arch.active_mmu_pages); |
| 1922 | kvm_reload_remote_mmus(kvm); |
| 1923 | } |
| 1924 | |
| 1925 | sp->role.invalid = 1; |
| 1926 | return ret; |
| 1927 | } |
| 1928 | |
| 1929 | static void kvm_mmu_isolate_pages(struct list_head *invalid_list) |
| 1930 | { |
| 1931 | struct kvm_mmu_page *sp; |
| 1932 | |
| 1933 | list_for_each_entry(sp, invalid_list, link) |
| 1934 | kvm_mmu_isolate_page(sp); |
| 1935 | } |
| 1936 | |
| 1937 | static void free_pages_rcu(struct rcu_head *head) |
| 1938 | { |
| 1939 | struct kvm_mmu_page *next, *sp; |
| 1940 | |
| 1941 | sp = container_of(head, struct kvm_mmu_page, rcu); |
| 1942 | while (sp) { |
| 1943 | if (!list_empty(&sp->link)) |
| 1944 | next = list_first_entry(&sp->link, |
| 1945 | struct kvm_mmu_page, link); |
| 1946 | else |
| 1947 | next = NULL; |
| 1948 | kvm_mmu_free_page(sp); |
| 1949 | sp = next; |
| 1950 | } |
| 1951 | } |
| 1952 | |
| 1953 | static void kvm_mmu_commit_zap_page(struct kvm *kvm, |
| 1954 | struct list_head *invalid_list) |
| 1955 | { |
| 1956 | struct kvm_mmu_page *sp; |
| 1957 | |
| 1958 | if (list_empty(invalid_list)) |
| 1959 | return; |
| 1960 | |
| 1961 | kvm_flush_remote_tlbs(kvm); |
| 1962 | |
| 1963 | if (atomic_read(&kvm->arch.reader_counter)) { |
| 1964 | kvm_mmu_isolate_pages(invalid_list); |
| 1965 | sp = list_first_entry(invalid_list, struct kvm_mmu_page, link); |
| 1966 | list_del_init(invalid_list); |
| 1967 | |
| 1968 | trace_kvm_mmu_delay_free_pages(sp); |
| 1969 | call_rcu(&sp->rcu, free_pages_rcu); |
| 1970 | return; |
| 1971 | } |
| 1972 | |
| 1973 | do { |
| 1974 | sp = list_first_entry(invalid_list, struct kvm_mmu_page, link); |
| 1975 | WARN_ON(!sp->role.invalid || sp->root_count); |
| 1976 | kvm_mmu_isolate_page(sp); |
| 1977 | kvm_mmu_free_page(sp); |
| 1978 | } while (!list_empty(invalid_list)); |
| 1979 | |
| 1980 | } |
| 1981 | |
| 1982 | /* |
| 1983 | * Changing the number of mmu pages allocated to the vm |
| 1984 | * Note: if goal_nr_mmu_pages is too small, you will get dead lock |
| 1985 | */ |
| 1986 | void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages) |
| 1987 | { |
| 1988 | LIST_HEAD(invalid_list); |
| 1989 | /* |
| 1990 | * If we set the number of mmu pages to be smaller be than the |
| 1991 | * number of actived pages , we must to free some mmu pages before we |
| 1992 | * change the value |
| 1993 | */ |
| 1994 | |
| 1995 | if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) { |
| 1996 | while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages && |
| 1997 | !list_empty(&kvm->arch.active_mmu_pages)) { |
| 1998 | struct kvm_mmu_page *page; |
| 1999 | |
| 2000 | page = container_of(kvm->arch.active_mmu_pages.prev, |
| 2001 | struct kvm_mmu_page, link); |
| 2002 | kvm_mmu_prepare_zap_page(kvm, page, &invalid_list); |
| 2003 | } |
| 2004 | kvm_mmu_commit_zap_page(kvm, &invalid_list); |
| 2005 | goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages; |
| 2006 | } |
| 2007 | |
| 2008 | kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages; |
| 2009 | } |
| 2010 | |
| 2011 | int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) |
| 2012 | { |
| 2013 | struct kvm_mmu_page *sp; |
| 2014 | struct hlist_node *node; |
| 2015 | LIST_HEAD(invalid_list); |
| 2016 | int r; |
| 2017 | |
| 2018 | pgprintk("%s: looking for gfn %llx\n", __func__, gfn); |
| 2019 | r = 0; |
| 2020 | spin_lock(&kvm->mmu_lock); |
| 2021 | for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node) { |
| 2022 | pgprintk("%s: gfn %llx role %x\n", __func__, gfn, |
| 2023 | sp->role.word); |
| 2024 | r = 1; |
| 2025 | kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); |
| 2026 | } |
| 2027 | kvm_mmu_commit_zap_page(kvm, &invalid_list); |
| 2028 | spin_unlock(&kvm->mmu_lock); |
| 2029 | |
| 2030 | return r; |
| 2031 | } |
| 2032 | EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page); |
| 2033 | |
| 2034 | static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn) |
| 2035 | { |
| 2036 | int slot = memslot_id(kvm, gfn); |
| 2037 | struct kvm_mmu_page *sp = page_header(__pa(pte)); |
| 2038 | |
| 2039 | __set_bit(slot, sp->slot_bitmap); |
| 2040 | } |
| 2041 | |
| 2042 | /* |
| 2043 | * The function is based on mtrr_type_lookup() in |
| 2044 | * arch/x86/kernel/cpu/mtrr/generic.c |
| 2045 | */ |
| 2046 | static int get_mtrr_type(struct mtrr_state_type *mtrr_state, |
| 2047 | u64 start, u64 end) |
| 2048 | { |
| 2049 | int i; |
| 2050 | u64 base, mask; |
| 2051 | u8 prev_match, curr_match; |
| 2052 | int num_var_ranges = KVM_NR_VAR_MTRR; |
| 2053 | |
| 2054 | if (!mtrr_state->enabled) |
| 2055 | return 0xFF; |
| 2056 | |
| 2057 | /* Make end inclusive end, instead of exclusive */ |
| 2058 | end--; |
| 2059 | |
| 2060 | /* Look in fixed ranges. Just return the type as per start */ |
| 2061 | if (mtrr_state->have_fixed && (start < 0x100000)) { |
| 2062 | int idx; |
| 2063 | |
| 2064 | if (start < 0x80000) { |
| 2065 | idx = 0; |
| 2066 | idx += (start >> 16); |
| 2067 | return mtrr_state->fixed_ranges[idx]; |
| 2068 | } else if (start < 0xC0000) { |
| 2069 | idx = 1 * 8; |
| 2070 | idx += ((start - 0x80000) >> 14); |
| 2071 | return mtrr_state->fixed_ranges[idx]; |
| 2072 | } else if (start < 0x1000000) { |
| 2073 | idx = 3 * 8; |
| 2074 | idx += ((start - 0xC0000) >> 12); |
| 2075 | return mtrr_state->fixed_ranges[idx]; |
| 2076 | } |
| 2077 | } |
| 2078 | |
| 2079 | /* |
| 2080 | * Look in variable ranges |
| 2081 | * Look of multiple ranges matching this address and pick type |
| 2082 | * as per MTRR precedence |
| 2083 | */ |
| 2084 | if (!(mtrr_state->enabled & 2)) |
| 2085 | return mtrr_state->def_type; |
| 2086 | |
| 2087 | prev_match = 0xFF; |
| 2088 | for (i = 0; i < num_var_ranges; ++i) { |
| 2089 | unsigned short start_state, end_state; |
| 2090 | |
| 2091 | if (!(mtrr_state->var_ranges[i].mask_lo & (1 << 11))) |
| 2092 | continue; |
| 2093 | |
| 2094 | base = (((u64)mtrr_state->var_ranges[i].base_hi) << 32) + |
| 2095 | (mtrr_state->var_ranges[i].base_lo & PAGE_MASK); |
| 2096 | mask = (((u64)mtrr_state->var_ranges[i].mask_hi) << 32) + |
| 2097 | (mtrr_state->var_ranges[i].mask_lo & PAGE_MASK); |
| 2098 | |
| 2099 | start_state = ((start & mask) == (base & mask)); |
| 2100 | end_state = ((end & mask) == (base & mask)); |
| 2101 | if (start_state != end_state) |
| 2102 | return 0xFE; |
| 2103 | |
| 2104 | if ((start & mask) != (base & mask)) |
| 2105 | continue; |
| 2106 | |
| 2107 | curr_match = mtrr_state->var_ranges[i].base_lo & 0xff; |
| 2108 | if (prev_match == 0xFF) { |
| 2109 | prev_match = curr_match; |
| 2110 | continue; |
| 2111 | } |
| 2112 | |
| 2113 | if (prev_match == MTRR_TYPE_UNCACHABLE || |
| 2114 | curr_match == MTRR_TYPE_UNCACHABLE) |
| 2115 | return MTRR_TYPE_UNCACHABLE; |
| 2116 | |
| 2117 | if ((prev_match == MTRR_TYPE_WRBACK && |
| 2118 | curr_match == MTRR_TYPE_WRTHROUGH) || |
| 2119 | (prev_match == MTRR_TYPE_WRTHROUGH && |
| 2120 | curr_match == MTRR_TYPE_WRBACK)) { |
| 2121 | prev_match = MTRR_TYPE_WRTHROUGH; |
| 2122 | curr_match = MTRR_TYPE_WRTHROUGH; |
| 2123 | } |
| 2124 | |
| 2125 | if (prev_match != curr_match) |
| 2126 | return MTRR_TYPE_UNCACHABLE; |
| 2127 | } |
| 2128 | |
| 2129 | if (prev_match != 0xFF) |
| 2130 | return prev_match; |
| 2131 | |
| 2132 | return mtrr_state->def_type; |
| 2133 | } |
| 2134 | |
| 2135 | u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn) |
| 2136 | { |
| 2137 | u8 mtrr; |
| 2138 | |
| 2139 | mtrr = get_mtrr_type(&vcpu->arch.mtrr_state, gfn << PAGE_SHIFT, |
| 2140 | (gfn << PAGE_SHIFT) + PAGE_SIZE); |
| 2141 | if (mtrr == 0xfe || mtrr == 0xff) |
| 2142 | mtrr = MTRR_TYPE_WRBACK; |
| 2143 | return mtrr; |
| 2144 | } |
| 2145 | EXPORT_SYMBOL_GPL(kvm_get_guest_memory_type); |
| 2146 | |
| 2147 | static void __kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) |
| 2148 | { |
| 2149 | trace_kvm_mmu_unsync_page(sp); |
| 2150 | ++vcpu->kvm->stat.mmu_unsync; |
| 2151 | sp->unsync = 1; |
| 2152 | |
| 2153 | kvm_mmu_mark_parents_unsync(sp); |
| 2154 | } |
| 2155 | |
| 2156 | static void kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) |
| 2157 | { |
| 2158 | struct kvm_mmu_page *s; |
| 2159 | struct hlist_node *node; |
| 2160 | |
| 2161 | for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) { |
| 2162 | if (s->unsync) |
| 2163 | continue; |
| 2164 | WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL); |
| 2165 | __kvm_unsync_page(vcpu, s); |
| 2166 | } |
| 2167 | } |
| 2168 | |
| 2169 | static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn, |
| 2170 | bool can_unsync) |
| 2171 | { |
| 2172 | struct kvm_mmu_page *s; |
| 2173 | struct hlist_node *node; |
| 2174 | bool need_unsync = false; |
| 2175 | |
| 2176 | for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) { |
| 2177 | if (!can_unsync) |
| 2178 | return 1; |
| 2179 | |
| 2180 | if (s->role.level != PT_PAGE_TABLE_LEVEL) |
| 2181 | return 1; |
| 2182 | |
| 2183 | if (!need_unsync && !s->unsync) { |
| 2184 | need_unsync = true; |
| 2185 | } |
| 2186 | } |
| 2187 | if (need_unsync) |
| 2188 | kvm_unsync_pages(vcpu, gfn); |
| 2189 | return 0; |
| 2190 | } |
| 2191 | |
| 2192 | static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, |
| 2193 | unsigned pte_access, int user_fault, |
| 2194 | int write_fault, int level, |
| 2195 | gfn_t gfn, pfn_t pfn, bool speculative, |
| 2196 | bool can_unsync, bool host_writable) |
| 2197 | { |
| 2198 | u64 spte, entry = *sptep; |
| 2199 | int ret = 0; |
| 2200 | |
| 2201 | if (set_mmio_spte(sptep, gfn, pfn, pte_access)) |
| 2202 | return 0; |
| 2203 | |
| 2204 | spte = PT_PRESENT_MASK; |
| 2205 | if (!speculative) |
| 2206 | spte |= shadow_accessed_mask; |
| 2207 | |
| 2208 | if (pte_access & ACC_EXEC_MASK) |
| 2209 | spte |= shadow_x_mask; |
| 2210 | else |
| 2211 | spte |= shadow_nx_mask; |
| 2212 | if (pte_access & ACC_USER_MASK) |
| 2213 | spte |= shadow_user_mask; |
| 2214 | if (level > PT_PAGE_TABLE_LEVEL) |
| 2215 | spte |= PT_PAGE_SIZE_MASK; |
| 2216 | if (tdp_enabled) |
| 2217 | spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn, |
| 2218 | kvm_is_mmio_pfn(pfn)); |
| 2219 | |
| 2220 | if (host_writable) |
| 2221 | spte |= SPTE_HOST_WRITEABLE; |
| 2222 | else |
| 2223 | pte_access &= ~ACC_WRITE_MASK; |
| 2224 | |
| 2225 | spte |= (u64)pfn << PAGE_SHIFT; |
| 2226 | |
| 2227 | if ((pte_access & ACC_WRITE_MASK) |
| 2228 | || (!vcpu->arch.mmu.direct_map && write_fault |
| 2229 | && !is_write_protection(vcpu) && !user_fault)) { |
| 2230 | |
| 2231 | if (level > PT_PAGE_TABLE_LEVEL && |
| 2232 | has_wrprotected_page(vcpu->kvm, gfn, level)) { |
| 2233 | ret = 1; |
| 2234 | drop_spte(vcpu->kvm, sptep); |
| 2235 | goto done; |
| 2236 | } |
| 2237 | |
| 2238 | spte |= PT_WRITABLE_MASK; |
| 2239 | |
| 2240 | if (!vcpu->arch.mmu.direct_map |
| 2241 | && !(pte_access & ACC_WRITE_MASK)) { |
| 2242 | spte &= ~PT_USER_MASK; |
| 2243 | /* |
| 2244 | * If we converted a user page to a kernel page, |
| 2245 | * so that the kernel can write to it when cr0.wp=0, |
| 2246 | * then we should prevent the kernel from executing it |
| 2247 | * if SMEP is enabled. |
| 2248 | */ |
| 2249 | if (kvm_read_cr4_bits(vcpu, X86_CR4_SMEP)) |
| 2250 | spte |= PT64_NX_MASK; |
| 2251 | } |
| 2252 | |
| 2253 | /* |
| 2254 | * Optimization: for pte sync, if spte was writable the hash |
| 2255 | * lookup is unnecessary (and expensive). Write protection |
| 2256 | * is responsibility of mmu_get_page / kvm_sync_page. |
| 2257 | * Same reasoning can be applied to dirty page accounting. |
| 2258 | */ |
| 2259 | if (!can_unsync && is_writable_pte(*sptep)) |
| 2260 | goto set_pte; |
| 2261 | |
| 2262 | if (mmu_need_write_protect(vcpu, gfn, can_unsync)) { |
| 2263 | pgprintk("%s: found shadow page for %llx, marking ro\n", |
| 2264 | __func__, gfn); |
| 2265 | ret = 1; |
| 2266 | pte_access &= ~ACC_WRITE_MASK; |
| 2267 | if (is_writable_pte(spte)) |
| 2268 | spte &= ~PT_WRITABLE_MASK; |
| 2269 | } |
| 2270 | } |
| 2271 | |
| 2272 | if (pte_access & ACC_WRITE_MASK) |
| 2273 | mark_page_dirty(vcpu->kvm, gfn); |
| 2274 | |
| 2275 | set_pte: |
| 2276 | mmu_spte_update(sptep, spte); |
| 2277 | /* |
| 2278 | * If we overwrite a writable spte with a read-only one we |
| 2279 | * should flush remote TLBs. Otherwise rmap_write_protect |
| 2280 | * will find a read-only spte, even though the writable spte |
| 2281 | * might be cached on a CPU's TLB. |
| 2282 | */ |
| 2283 | if (is_writable_pte(entry) && !is_writable_pte(*sptep)) |
| 2284 | kvm_flush_remote_tlbs(vcpu->kvm); |
| 2285 | done: |
| 2286 | return ret; |
| 2287 | } |
| 2288 | |
| 2289 | static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, |
| 2290 | unsigned pt_access, unsigned pte_access, |
| 2291 | int user_fault, int write_fault, |
| 2292 | int *emulate, int level, gfn_t gfn, |
| 2293 | pfn_t pfn, bool speculative, |
| 2294 | bool host_writable) |
| 2295 | { |
| 2296 | int was_rmapped = 0; |
| 2297 | int rmap_count; |
| 2298 | |
| 2299 | pgprintk("%s: spte %llx access %x write_fault %d" |
| 2300 | " user_fault %d gfn %llx\n", |
| 2301 | __func__, *sptep, pt_access, |
| 2302 | write_fault, user_fault, gfn); |
| 2303 | |
| 2304 | if (is_rmap_spte(*sptep)) { |
| 2305 | /* |
| 2306 | * If we overwrite a PTE page pointer with a 2MB PMD, unlink |
| 2307 | * the parent of the now unreachable PTE. |
| 2308 | */ |
| 2309 | if (level > PT_PAGE_TABLE_LEVEL && |
| 2310 | !is_large_pte(*sptep)) { |
| 2311 | struct kvm_mmu_page *child; |
| 2312 | u64 pte = *sptep; |
| 2313 | |
| 2314 | child = page_header(pte & PT64_BASE_ADDR_MASK); |
| 2315 | drop_parent_pte(child, sptep); |
| 2316 | kvm_flush_remote_tlbs(vcpu->kvm); |
| 2317 | } else if (pfn != spte_to_pfn(*sptep)) { |
| 2318 | pgprintk("hfn old %llx new %llx\n", |
| 2319 | spte_to_pfn(*sptep), pfn); |
| 2320 | drop_spte(vcpu->kvm, sptep); |
| 2321 | kvm_flush_remote_tlbs(vcpu->kvm); |
| 2322 | } else |
| 2323 | was_rmapped = 1; |
| 2324 | } |
| 2325 | |
| 2326 | if (set_spte(vcpu, sptep, pte_access, user_fault, write_fault, |
| 2327 | level, gfn, pfn, speculative, true, |
| 2328 | host_writable)) { |
| 2329 | if (write_fault) |
| 2330 | *emulate = 1; |
| 2331 | kvm_mmu_flush_tlb(vcpu); |
| 2332 | } |
| 2333 | |
| 2334 | if (unlikely(is_mmio_spte(*sptep) && emulate)) |
| 2335 | *emulate = 1; |
| 2336 | |
| 2337 | pgprintk("%s: setting spte %llx\n", __func__, *sptep); |
| 2338 | pgprintk("instantiating %s PTE (%s) at %llx (%llx) addr %p\n", |
| 2339 | is_large_pte(*sptep)? "2MB" : "4kB", |
| 2340 | *sptep & PT_PRESENT_MASK ?"RW":"R", gfn, |
| 2341 | *sptep, sptep); |
| 2342 | if (!was_rmapped && is_large_pte(*sptep)) |
| 2343 | ++vcpu->kvm->stat.lpages; |
| 2344 | |
| 2345 | if (is_shadow_present_pte(*sptep)) { |
| 2346 | page_header_update_slot(vcpu->kvm, sptep, gfn); |
| 2347 | if (!was_rmapped) { |
| 2348 | rmap_count = rmap_add(vcpu, sptep, gfn); |
| 2349 | if (rmap_count > RMAP_RECYCLE_THRESHOLD) |
| 2350 | rmap_recycle(vcpu, sptep, gfn); |
| 2351 | } |
| 2352 | } |
| 2353 | kvm_release_pfn_clean(pfn); |
| 2354 | } |
| 2355 | |
| 2356 | static void nonpaging_new_cr3(struct kvm_vcpu *vcpu) |
| 2357 | { |
| 2358 | } |
| 2359 | |
| 2360 | static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, |
| 2361 | bool no_dirty_log) |
| 2362 | { |
| 2363 | struct kvm_memory_slot *slot; |
| 2364 | unsigned long hva; |
| 2365 | |
| 2366 | slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, no_dirty_log); |
| 2367 | if (!slot) { |
| 2368 | get_page(fault_page); |
| 2369 | return page_to_pfn(fault_page); |
| 2370 | } |
| 2371 | |
| 2372 | hva = gfn_to_hva_memslot(slot, gfn); |
| 2373 | |
| 2374 | return hva_to_pfn_atomic(vcpu->kvm, hva); |
| 2375 | } |
| 2376 | |
| 2377 | static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu, |
| 2378 | struct kvm_mmu_page *sp, |
| 2379 | u64 *start, u64 *end) |
| 2380 | { |
| 2381 | struct page *pages[PTE_PREFETCH_NUM]; |
| 2382 | unsigned access = sp->role.access; |
| 2383 | int i, ret; |
| 2384 | gfn_t gfn; |
| 2385 | |
| 2386 | gfn = kvm_mmu_page_get_gfn(sp, start - sp->spt); |
| 2387 | if (!gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK)) |
| 2388 | return -1; |
| 2389 | |
| 2390 | ret = gfn_to_page_many_atomic(vcpu->kvm, gfn, pages, end - start); |
| 2391 | if (ret <= 0) |
| 2392 | return -1; |
| 2393 | |
| 2394 | for (i = 0; i < ret; i++, gfn++, start++) |
| 2395 | mmu_set_spte(vcpu, start, ACC_ALL, |
| 2396 | access, 0, 0, NULL, |
| 2397 | sp->role.level, gfn, |
| 2398 | page_to_pfn(pages[i]), true, true); |
| 2399 | |
| 2400 | return 0; |
| 2401 | } |
| 2402 | |
| 2403 | static void __direct_pte_prefetch(struct kvm_vcpu *vcpu, |
| 2404 | struct kvm_mmu_page *sp, u64 *sptep) |
| 2405 | { |
| 2406 | u64 *spte, *start = NULL; |
| 2407 | int i; |
| 2408 | |
| 2409 | WARN_ON(!sp->role.direct); |
| 2410 | |
| 2411 | i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1); |
| 2412 | spte = sp->spt + i; |
| 2413 | |
| 2414 | for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) { |
| 2415 | if (is_shadow_present_pte(*spte) || spte == sptep) { |
| 2416 | if (!start) |
| 2417 | continue; |
| 2418 | if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0) |
| 2419 | break; |
| 2420 | start = NULL; |
| 2421 | } else if (!start) |
| 2422 | start = spte; |
| 2423 | } |
| 2424 | } |
| 2425 | |
| 2426 | static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep) |
| 2427 | { |
| 2428 | struct kvm_mmu_page *sp; |
| 2429 | |
| 2430 | /* |
| 2431 | * Since it's no accessed bit on EPT, it's no way to |
| 2432 | * distinguish between actually accessed translations |
| 2433 | * and prefetched, so disable pte prefetch if EPT is |
| 2434 | * enabled. |
| 2435 | */ |
| 2436 | if (!shadow_accessed_mask) |
| 2437 | return; |
| 2438 | |
| 2439 | sp = page_header(__pa(sptep)); |
| 2440 | if (sp->role.level > PT_PAGE_TABLE_LEVEL) |
| 2441 | return; |
| 2442 | |
| 2443 | __direct_pte_prefetch(vcpu, sp, sptep); |
| 2444 | } |
| 2445 | |
| 2446 | static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write, |
| 2447 | int map_writable, int level, gfn_t gfn, pfn_t pfn, |
| 2448 | bool prefault) |
| 2449 | { |
| 2450 | struct kvm_shadow_walk_iterator iterator; |
| 2451 | struct kvm_mmu_page *sp; |
| 2452 | int emulate = 0; |
| 2453 | gfn_t pseudo_gfn; |
| 2454 | |
| 2455 | for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) { |
| 2456 | if (iterator.level == level) { |
| 2457 | unsigned pte_access = ACC_ALL; |
| 2458 | |
| 2459 | mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, pte_access, |
| 2460 | 0, write, &emulate, |
| 2461 | level, gfn, pfn, prefault, map_writable); |
| 2462 | direct_pte_prefetch(vcpu, iterator.sptep); |
| 2463 | ++vcpu->stat.pf_fixed; |
| 2464 | break; |
| 2465 | } |
| 2466 | |
| 2467 | if (!is_shadow_present_pte(*iterator.sptep)) { |
| 2468 | u64 base_addr = iterator.addr; |
| 2469 | |
| 2470 | base_addr &= PT64_LVL_ADDR_MASK(iterator.level); |
| 2471 | pseudo_gfn = base_addr >> PAGE_SHIFT; |
| 2472 | sp = kvm_mmu_get_page(vcpu, pseudo_gfn, iterator.addr, |
| 2473 | iterator.level - 1, |
| 2474 | 1, ACC_ALL, iterator.sptep); |
| 2475 | if (!sp) { |
| 2476 | pgprintk("nonpaging_map: ENOMEM\n"); |
| 2477 | kvm_release_pfn_clean(pfn); |
| 2478 | return -ENOMEM; |
| 2479 | } |
| 2480 | |
| 2481 | mmu_spte_set(iterator.sptep, |
| 2482 | __pa(sp->spt) |
| 2483 | | PT_PRESENT_MASK | PT_WRITABLE_MASK |
| 2484 | | shadow_user_mask | shadow_x_mask |
| 2485 | | shadow_accessed_mask); |
| 2486 | } |
| 2487 | } |
| 2488 | return emulate; |
| 2489 | } |
| 2490 | |
| 2491 | static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *tsk) |
| 2492 | { |
| 2493 | siginfo_t info; |
| 2494 | |
| 2495 | info.si_signo = SIGBUS; |
| 2496 | info.si_errno = 0; |
| 2497 | info.si_code = BUS_MCEERR_AR; |
| 2498 | info.si_addr = (void __user *)address; |
| 2499 | info.si_addr_lsb = PAGE_SHIFT; |
| 2500 | |
| 2501 | send_sig_info(SIGBUS, &info, tsk); |
| 2502 | } |
| 2503 | |
| 2504 | static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, pfn_t pfn) |
| 2505 | { |
| 2506 | kvm_release_pfn_clean(pfn); |
| 2507 | if (is_hwpoison_pfn(pfn)) { |
| 2508 | kvm_send_hwpoison_signal(gfn_to_hva(vcpu->kvm, gfn), current); |
| 2509 | return 0; |
| 2510 | } |
| 2511 | |
| 2512 | return -EFAULT; |
| 2513 | } |
| 2514 | |
| 2515 | static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu, |
| 2516 | gfn_t *gfnp, pfn_t *pfnp, int *levelp) |
| 2517 | { |
| 2518 | pfn_t pfn = *pfnp; |
| 2519 | gfn_t gfn = *gfnp; |
| 2520 | int level = *levelp; |
| 2521 | |
| 2522 | /* |
| 2523 | * Check if it's a transparent hugepage. If this would be an |
| 2524 | * hugetlbfs page, level wouldn't be set to |
| 2525 | * PT_PAGE_TABLE_LEVEL and there would be no adjustment done |
| 2526 | * here. |
| 2527 | */ |
| 2528 | if (!is_error_pfn(pfn) && !kvm_is_mmio_pfn(pfn) && |
| 2529 | level == PT_PAGE_TABLE_LEVEL && |
| 2530 | PageTransCompound(pfn_to_page(pfn)) && |
| 2531 | !has_wrprotected_page(vcpu->kvm, gfn, PT_DIRECTORY_LEVEL)) { |
| 2532 | unsigned long mask; |
| 2533 | /* |
| 2534 | * mmu_notifier_retry was successful and we hold the |
| 2535 | * mmu_lock here, so the pmd can't become splitting |
| 2536 | * from under us, and in turn |
| 2537 | * __split_huge_page_refcount() can't run from under |
| 2538 | * us and we can safely transfer the refcount from |
| 2539 | * PG_tail to PG_head as we switch the pfn to tail to |
| 2540 | * head. |
| 2541 | */ |
| 2542 | *levelp = level = PT_DIRECTORY_LEVEL; |
| 2543 | mask = KVM_PAGES_PER_HPAGE(level) - 1; |
| 2544 | VM_BUG_ON((gfn & mask) != (pfn & mask)); |
| 2545 | if (pfn & mask) { |
| 2546 | gfn &= ~mask; |
| 2547 | *gfnp = gfn; |
| 2548 | kvm_release_pfn_clean(pfn); |
| 2549 | pfn &= ~mask; |
| 2550 | if (!get_page_unless_zero(pfn_to_page(pfn))) |
| 2551 | BUG(); |
| 2552 | *pfnp = pfn; |
| 2553 | } |
| 2554 | } |
| 2555 | } |
| 2556 | |
| 2557 | static bool mmu_invalid_pfn(pfn_t pfn) |
| 2558 | { |
| 2559 | return unlikely(is_invalid_pfn(pfn)); |
| 2560 | } |
| 2561 | |
| 2562 | static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, |
| 2563 | pfn_t pfn, unsigned access, int *ret_val) |
| 2564 | { |
| 2565 | bool ret = true; |
| 2566 | |
| 2567 | /* The pfn is invalid, report the error! */ |
| 2568 | if (unlikely(is_invalid_pfn(pfn))) { |
| 2569 | *ret_val = kvm_handle_bad_page(vcpu, gfn, pfn); |
| 2570 | goto exit; |
| 2571 | } |
| 2572 | |
| 2573 | if (unlikely(is_noslot_pfn(pfn))) |
| 2574 | vcpu_cache_mmio_info(vcpu, gva, gfn, access); |
| 2575 | |
| 2576 | ret = false; |
| 2577 | exit: |
| 2578 | return ret; |
| 2579 | } |
| 2580 | |
| 2581 | static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, |
| 2582 | gva_t gva, pfn_t *pfn, bool write, bool *writable); |
| 2583 | |
| 2584 | static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn, |
| 2585 | bool prefault) |
| 2586 | { |
| 2587 | int r; |
| 2588 | int level; |
| 2589 | int force_pt_level; |
| 2590 | pfn_t pfn; |
| 2591 | unsigned long mmu_seq; |
| 2592 | bool map_writable; |
| 2593 | |
| 2594 | force_pt_level = mapping_level_dirty_bitmap(vcpu, gfn); |
| 2595 | if (likely(!force_pt_level)) { |
| 2596 | level = mapping_level(vcpu, gfn); |
| 2597 | /* |
| 2598 | * This path builds a PAE pagetable - so we can map |
| 2599 | * 2mb pages at maximum. Therefore check if the level |
| 2600 | * is larger than that. |
| 2601 | */ |
| 2602 | if (level > PT_DIRECTORY_LEVEL) |
| 2603 | level = PT_DIRECTORY_LEVEL; |
| 2604 | |
| 2605 | gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1); |
| 2606 | } else |
| 2607 | level = PT_PAGE_TABLE_LEVEL; |
| 2608 | |
| 2609 | mmu_seq = vcpu->kvm->mmu_notifier_seq; |
| 2610 | smp_rmb(); |
| 2611 | |
| 2612 | if (try_async_pf(vcpu, prefault, gfn, v, &pfn, write, &map_writable)) |
| 2613 | return 0; |
| 2614 | |
| 2615 | if (handle_abnormal_pfn(vcpu, v, gfn, pfn, ACC_ALL, &r)) |
| 2616 | return r; |
| 2617 | |
| 2618 | spin_lock(&vcpu->kvm->mmu_lock); |
| 2619 | if (mmu_notifier_retry(vcpu, mmu_seq)) |
| 2620 | goto out_unlock; |
| 2621 | kvm_mmu_free_some_pages(vcpu); |
| 2622 | if (likely(!force_pt_level)) |
| 2623 | transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level); |
| 2624 | r = __direct_map(vcpu, v, write, map_writable, level, gfn, pfn, |
| 2625 | prefault); |
| 2626 | spin_unlock(&vcpu->kvm->mmu_lock); |
| 2627 | |
| 2628 | |
| 2629 | return r; |
| 2630 | |
| 2631 | out_unlock: |
| 2632 | spin_unlock(&vcpu->kvm->mmu_lock); |
| 2633 | kvm_release_pfn_clean(pfn); |
| 2634 | return 0; |
| 2635 | } |
| 2636 | |
| 2637 | |
| 2638 | static void mmu_free_roots(struct kvm_vcpu *vcpu) |
| 2639 | { |
| 2640 | int i; |
| 2641 | struct kvm_mmu_page *sp; |
| 2642 | LIST_HEAD(invalid_list); |
| 2643 | |
| 2644 | if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) |
| 2645 | return; |
| 2646 | spin_lock(&vcpu->kvm->mmu_lock); |
| 2647 | if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL && |
| 2648 | (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL || |
| 2649 | vcpu->arch.mmu.direct_map)) { |
| 2650 | hpa_t root = vcpu->arch.mmu.root_hpa; |
| 2651 | |
| 2652 | sp = page_header(root); |
| 2653 | --sp->root_count; |
| 2654 | if (!sp->root_count && sp->role.invalid) { |
| 2655 | kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list); |
| 2656 | kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); |
| 2657 | } |
| 2658 | vcpu->arch.mmu.root_hpa = INVALID_PAGE; |
| 2659 | spin_unlock(&vcpu->kvm->mmu_lock); |
| 2660 | return; |
| 2661 | } |
| 2662 | for (i = 0; i < 4; ++i) { |
| 2663 | hpa_t root = vcpu->arch.mmu.pae_root[i]; |
| 2664 | |
| 2665 | if (root) { |
| 2666 | root &= PT64_BASE_ADDR_MASK; |
| 2667 | sp = page_header(root); |
| 2668 | --sp->root_count; |
| 2669 | if (!sp->root_count && sp->role.invalid) |
| 2670 | kvm_mmu_prepare_zap_page(vcpu->kvm, sp, |
| 2671 | &invalid_list); |
| 2672 | } |
| 2673 | vcpu->arch.mmu.pae_root[i] = INVALID_PAGE; |
| 2674 | } |
| 2675 | kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); |
| 2676 | spin_unlock(&vcpu->kvm->mmu_lock); |
| 2677 | vcpu->arch.mmu.root_hpa = INVALID_PAGE; |
| 2678 | } |
| 2679 | |
| 2680 | static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn) |
| 2681 | { |
| 2682 | int ret = 0; |
| 2683 | |
| 2684 | if (!kvm_is_visible_gfn(vcpu->kvm, root_gfn)) { |
| 2685 | kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); |
| 2686 | ret = 1; |
| 2687 | } |
| 2688 | |
| 2689 | return ret; |
| 2690 | } |
| 2691 | |
| 2692 | static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) |
| 2693 | { |
| 2694 | struct kvm_mmu_page *sp; |
| 2695 | unsigned i; |
| 2696 | |
| 2697 | if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) { |
| 2698 | spin_lock(&vcpu->kvm->mmu_lock); |
| 2699 | kvm_mmu_free_some_pages(vcpu); |
| 2700 | sp = kvm_mmu_get_page(vcpu, 0, 0, PT64_ROOT_LEVEL, |
| 2701 | 1, ACC_ALL, NULL); |
| 2702 | ++sp->root_count; |
| 2703 | spin_unlock(&vcpu->kvm->mmu_lock); |
| 2704 | vcpu->arch.mmu.root_hpa = __pa(sp->spt); |
| 2705 | } else if (vcpu->arch.mmu.shadow_root_level == PT32E_ROOT_LEVEL) { |
| 2706 | for (i = 0; i < 4; ++i) { |
| 2707 | hpa_t root = vcpu->arch.mmu.pae_root[i]; |
| 2708 | |
| 2709 | ASSERT(!VALID_PAGE(root)); |
| 2710 | spin_lock(&vcpu->kvm->mmu_lock); |
| 2711 | kvm_mmu_free_some_pages(vcpu); |
| 2712 | sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT), |
| 2713 | i << 30, |
| 2714 | PT32_ROOT_LEVEL, 1, ACC_ALL, |
| 2715 | NULL); |
| 2716 | root = __pa(sp->spt); |
| 2717 | ++sp->root_count; |
| 2718 | spin_unlock(&vcpu->kvm->mmu_lock); |
| 2719 | vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK; |
| 2720 | } |
| 2721 | vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root); |
| 2722 | } else |
| 2723 | BUG(); |
| 2724 | |
| 2725 | return 0; |
| 2726 | } |
| 2727 | |
| 2728 | static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) |
| 2729 | { |
| 2730 | struct kvm_mmu_page *sp; |
| 2731 | u64 pdptr, pm_mask; |
| 2732 | gfn_t root_gfn; |
| 2733 | int i; |
| 2734 | |
| 2735 | root_gfn = vcpu->arch.mmu.get_cr3(vcpu) >> PAGE_SHIFT; |
| 2736 | |
| 2737 | if (mmu_check_root(vcpu, root_gfn)) |
| 2738 | return 1; |
| 2739 | |
| 2740 | /* |
| 2741 | * Do we shadow a long mode page table? If so we need to |
| 2742 | * write-protect the guests page table root. |
| 2743 | */ |
| 2744 | if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) { |
| 2745 | hpa_t root = vcpu->arch.mmu.root_hpa; |
| 2746 | |
| 2747 | ASSERT(!VALID_PAGE(root)); |
| 2748 | |
| 2749 | spin_lock(&vcpu->kvm->mmu_lock); |
| 2750 | kvm_mmu_free_some_pages(vcpu); |
| 2751 | sp = kvm_mmu_get_page(vcpu, root_gfn, 0, PT64_ROOT_LEVEL, |
| 2752 | 0, ACC_ALL, NULL); |
| 2753 | root = __pa(sp->spt); |
| 2754 | ++sp->root_count; |
| 2755 | spin_unlock(&vcpu->kvm->mmu_lock); |
| 2756 | vcpu->arch.mmu.root_hpa = root; |
| 2757 | return 0; |
| 2758 | } |
| 2759 | |
| 2760 | /* |
| 2761 | * We shadow a 32 bit page table. This may be a legacy 2-level |
| 2762 | * or a PAE 3-level page table. In either case we need to be aware that |
| 2763 | * the shadow page table may be a PAE or a long mode page table. |
| 2764 | */ |
| 2765 | pm_mask = PT_PRESENT_MASK; |
| 2766 | if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) |
| 2767 | pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK; |
| 2768 | |
| 2769 | for (i = 0; i < 4; ++i) { |
| 2770 | hpa_t root = vcpu->arch.mmu.pae_root[i]; |
| 2771 | |
| 2772 | ASSERT(!VALID_PAGE(root)); |
| 2773 | if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) { |
| 2774 | pdptr = vcpu->arch.mmu.get_pdptr(vcpu, i); |
| 2775 | if (!is_present_gpte(pdptr)) { |
| 2776 | vcpu->arch.mmu.pae_root[i] = 0; |
| 2777 | continue; |
| 2778 | } |
| 2779 | root_gfn = pdptr >> PAGE_SHIFT; |
| 2780 | if (mmu_check_root(vcpu, root_gfn)) |
| 2781 | return 1; |
| 2782 | } |
| 2783 | spin_lock(&vcpu->kvm->mmu_lock); |
| 2784 | kvm_mmu_free_some_pages(vcpu); |
| 2785 | sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, |
| 2786 | PT32_ROOT_LEVEL, 0, |
| 2787 | ACC_ALL, NULL); |
| 2788 | root = __pa(sp->spt); |
| 2789 | ++sp->root_count; |
| 2790 | spin_unlock(&vcpu->kvm->mmu_lock); |
| 2791 | |
| 2792 | vcpu->arch.mmu.pae_root[i] = root | pm_mask; |
| 2793 | } |
| 2794 | vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root); |
| 2795 | |
| 2796 | /* |
| 2797 | * If we shadow a 32 bit page table with a long mode page |
| 2798 | * table we enter this path. |
| 2799 | */ |
| 2800 | if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) { |
| 2801 | if (vcpu->arch.mmu.lm_root == NULL) { |
| 2802 | /* |
| 2803 | * The additional page necessary for this is only |
| 2804 | * allocated on demand. |
| 2805 | */ |
| 2806 | |
| 2807 | u64 *lm_root; |
| 2808 | |
| 2809 | lm_root = (void*)get_zeroed_page(GFP_KERNEL); |
| 2810 | if (lm_root == NULL) |
| 2811 | return 1; |
| 2812 | |
| 2813 | lm_root[0] = __pa(vcpu->arch.mmu.pae_root) | pm_mask; |
| 2814 | |
| 2815 | vcpu->arch.mmu.lm_root = lm_root; |
| 2816 | } |
| 2817 | |
| 2818 | vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.lm_root); |
| 2819 | } |
| 2820 | |
| 2821 | return 0; |
| 2822 | } |
| 2823 | |
| 2824 | static int mmu_alloc_roots(struct kvm_vcpu *vcpu) |
| 2825 | { |
| 2826 | if (vcpu->arch.mmu.direct_map) |
| 2827 | return mmu_alloc_direct_roots(vcpu); |
| 2828 | else |
| 2829 | return mmu_alloc_shadow_roots(vcpu); |
| 2830 | } |
| 2831 | |
| 2832 | static void mmu_sync_roots(struct kvm_vcpu *vcpu) |
| 2833 | { |
| 2834 | int i; |
| 2835 | struct kvm_mmu_page *sp; |
| 2836 | |
| 2837 | if (vcpu->arch.mmu.direct_map) |
| 2838 | return; |
| 2839 | |
| 2840 | if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) |
| 2841 | return; |
| 2842 | |
| 2843 | vcpu_clear_mmio_info(vcpu, ~0ul); |
| 2844 | kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC); |
| 2845 | if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) { |
| 2846 | hpa_t root = vcpu->arch.mmu.root_hpa; |
| 2847 | sp = page_header(root); |
| 2848 | mmu_sync_children(vcpu, sp); |
| 2849 | kvm_mmu_audit(vcpu, AUDIT_POST_SYNC); |
| 2850 | return; |
| 2851 | } |
| 2852 | for (i = 0; i < 4; ++i) { |
| 2853 | hpa_t root = vcpu->arch.mmu.pae_root[i]; |
| 2854 | |
| 2855 | if (root && VALID_PAGE(root)) { |
| 2856 | root &= PT64_BASE_ADDR_MASK; |
| 2857 | sp = page_header(root); |
| 2858 | mmu_sync_children(vcpu, sp); |
| 2859 | } |
| 2860 | } |
| 2861 | kvm_mmu_audit(vcpu, AUDIT_POST_SYNC); |
| 2862 | } |
| 2863 | |
| 2864 | void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu) |
| 2865 | { |
| 2866 | spin_lock(&vcpu->kvm->mmu_lock); |
| 2867 | mmu_sync_roots(vcpu); |
| 2868 | spin_unlock(&vcpu->kvm->mmu_lock); |
| 2869 | } |
| 2870 | |
| 2871 | static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr, |
| 2872 | u32 access, struct x86_exception *exception) |
| 2873 | { |
| 2874 | if (exception) |
| 2875 | exception->error_code = 0; |
| 2876 | return vaddr; |
| 2877 | } |
| 2878 | |
| 2879 | static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gva_t vaddr, |
| 2880 | u32 access, |
| 2881 | struct x86_exception *exception) |
| 2882 | { |
| 2883 | if (exception) |
| 2884 | exception->error_code = 0; |
| 2885 | return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access); |
| 2886 | } |
| 2887 | |
| 2888 | static bool quickly_check_mmio_pf(struct kvm_vcpu *vcpu, u64 addr, bool direct) |
| 2889 | { |
| 2890 | if (direct) |
| 2891 | return vcpu_match_mmio_gpa(vcpu, addr); |
| 2892 | |
| 2893 | return vcpu_match_mmio_gva(vcpu, addr); |
| 2894 | } |
| 2895 | |
| 2896 | |
| 2897 | /* |
| 2898 | * On direct hosts, the last spte is only allows two states |
| 2899 | * for mmio page fault: |
| 2900 | * - It is the mmio spte |
| 2901 | * - It is zapped or it is being zapped. |
| 2902 | * |
| 2903 | * This function completely checks the spte when the last spte |
| 2904 | * is not the mmio spte. |
| 2905 | */ |
| 2906 | static bool check_direct_spte_mmio_pf(u64 spte) |
| 2907 | { |
| 2908 | return __check_direct_spte_mmio_pf(spte); |
| 2909 | } |
| 2910 | |
| 2911 | static u64 walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr) |
| 2912 | { |
| 2913 | struct kvm_shadow_walk_iterator iterator; |
| 2914 | u64 spte = 0ull; |
| 2915 | |
| 2916 | walk_shadow_page_lockless_begin(vcpu); |
| 2917 | for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) |
| 2918 | if (!is_shadow_present_pte(spte)) |
| 2919 | break; |
| 2920 | walk_shadow_page_lockless_end(vcpu); |
| 2921 | |
| 2922 | return spte; |
| 2923 | } |
| 2924 | |
| 2925 | /* |
| 2926 | * If it is a real mmio page fault, return 1 and emulat the instruction |
| 2927 | * directly, return 0 to let CPU fault again on the address, -1 is |
| 2928 | * returned if bug is detected. |
| 2929 | */ |
| 2930 | int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct) |
| 2931 | { |
| 2932 | u64 spte; |
| 2933 | |
| 2934 | if (quickly_check_mmio_pf(vcpu, addr, direct)) |
| 2935 | return 1; |
| 2936 | |
| 2937 | spte = walk_shadow_page_get_mmio_spte(vcpu, addr); |
| 2938 | |
| 2939 | if (is_mmio_spte(spte)) { |
| 2940 | gfn_t gfn = get_mmio_spte_gfn(spte); |
| 2941 | unsigned access = get_mmio_spte_access(spte); |
| 2942 | |
| 2943 | if (direct) |
| 2944 | addr = 0; |
| 2945 | |
| 2946 | trace_handle_mmio_page_fault(addr, gfn, access); |
| 2947 | vcpu_cache_mmio_info(vcpu, addr, gfn, access); |
| 2948 | return 1; |
| 2949 | } |
| 2950 | |
| 2951 | /* |
| 2952 | * It's ok if the gva is remapped by other cpus on shadow guest, |
| 2953 | * it's a BUG if the gfn is not a mmio page. |
| 2954 | */ |
| 2955 | if (direct && !check_direct_spte_mmio_pf(spte)) |
| 2956 | return -1; |
| 2957 | |
| 2958 | /* |
| 2959 | * If the page table is zapped by other cpus, let CPU fault again on |
| 2960 | * the address. |
| 2961 | */ |
| 2962 | return 0; |
| 2963 | } |
| 2964 | EXPORT_SYMBOL_GPL(handle_mmio_page_fault_common); |
| 2965 | |
| 2966 | static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, |
| 2967 | u32 error_code, bool direct) |
| 2968 | { |
| 2969 | int ret; |
| 2970 | |
| 2971 | ret = handle_mmio_page_fault_common(vcpu, addr, direct); |
| 2972 | WARN_ON(ret < 0); |
| 2973 | return ret; |
| 2974 | } |
| 2975 | |
| 2976 | static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, |
| 2977 | u32 error_code, bool prefault) |
| 2978 | { |
| 2979 | gfn_t gfn; |
| 2980 | int r; |
| 2981 | |
| 2982 | pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code); |
| 2983 | |
| 2984 | if (unlikely(error_code & PFERR_RSVD_MASK)) |
| 2985 | return handle_mmio_page_fault(vcpu, gva, error_code, true); |
| 2986 | |
| 2987 | r = mmu_topup_memory_caches(vcpu); |
| 2988 | if (r) |
| 2989 | return r; |
| 2990 | |
| 2991 | ASSERT(vcpu); |
| 2992 | ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa)); |
| 2993 | |
| 2994 | gfn = gva >> PAGE_SHIFT; |
| 2995 | |
| 2996 | return nonpaging_map(vcpu, gva & PAGE_MASK, |
| 2997 | error_code & PFERR_WRITE_MASK, gfn, prefault); |
| 2998 | } |
| 2999 | |
| 3000 | static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn) |
| 3001 | { |
| 3002 | struct kvm_arch_async_pf arch; |
| 3003 | |
| 3004 | arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id; |
| 3005 | arch.gfn = gfn; |
| 3006 | arch.direct_map = vcpu->arch.mmu.direct_map; |
| 3007 | arch.cr3 = vcpu->arch.mmu.get_cr3(vcpu); |
| 3008 | |
| 3009 | return kvm_setup_async_pf(vcpu, gva, gfn, &arch); |
| 3010 | } |
| 3011 | |
| 3012 | static bool can_do_async_pf(struct kvm_vcpu *vcpu) |
| 3013 | { |
| 3014 | if (unlikely(!irqchip_in_kernel(vcpu->kvm) || |
| 3015 | kvm_event_needs_reinjection(vcpu))) |
| 3016 | return false; |
| 3017 | |
| 3018 | return kvm_x86_ops->interrupt_allowed(vcpu); |
| 3019 | } |
| 3020 | |
| 3021 | static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, |
| 3022 | gva_t gva, pfn_t *pfn, bool write, bool *writable) |
| 3023 | { |
| 3024 | bool async; |
| 3025 | |
| 3026 | *pfn = gfn_to_pfn_async(vcpu->kvm, gfn, &async, write, writable); |
| 3027 | |
| 3028 | if (!async) |
| 3029 | return false; /* *pfn has correct page already */ |
| 3030 | |
| 3031 | put_page(pfn_to_page(*pfn)); |
| 3032 | |
| 3033 | if (!prefault && can_do_async_pf(vcpu)) { |
| 3034 | trace_kvm_try_async_get_page(gva, gfn); |
| 3035 | if (kvm_find_async_pf_gfn(vcpu, gfn)) { |
| 3036 | trace_kvm_async_pf_doublefault(gva, gfn); |
| 3037 | kvm_make_request(KVM_REQ_APF_HALT, vcpu); |
| 3038 | return true; |
| 3039 | } else if (kvm_arch_setup_async_pf(vcpu, gva, gfn)) |
| 3040 | return true; |
| 3041 | } |
| 3042 | |
| 3043 | *pfn = gfn_to_pfn_prot(vcpu->kvm, gfn, write, writable); |
| 3044 | |
| 3045 | return false; |
| 3046 | } |
| 3047 | |
| 3048 | static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code, |
| 3049 | bool prefault) |
| 3050 | { |
| 3051 | pfn_t pfn; |
| 3052 | int r; |
| 3053 | int level; |
| 3054 | int force_pt_level; |
| 3055 | gfn_t gfn = gpa >> PAGE_SHIFT; |
| 3056 | unsigned long mmu_seq; |
| 3057 | int write = error_code & PFERR_WRITE_MASK; |
| 3058 | bool map_writable; |
| 3059 | |
| 3060 | ASSERT(vcpu); |
| 3061 | ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa)); |
| 3062 | |
| 3063 | if (unlikely(error_code & PFERR_RSVD_MASK)) |
| 3064 | return handle_mmio_page_fault(vcpu, gpa, error_code, true); |
| 3065 | |
| 3066 | r = mmu_topup_memory_caches(vcpu); |
| 3067 | if (r) |
| 3068 | return r; |
| 3069 | |
| 3070 | force_pt_level = mapping_level_dirty_bitmap(vcpu, gfn); |
| 3071 | if (likely(!force_pt_level)) { |
| 3072 | level = mapping_level(vcpu, gfn); |
| 3073 | gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1); |
| 3074 | } else |
| 3075 | level = PT_PAGE_TABLE_LEVEL; |
| 3076 | |
| 3077 | mmu_seq = vcpu->kvm->mmu_notifier_seq; |
| 3078 | smp_rmb(); |
| 3079 | |
| 3080 | if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable)) |
| 3081 | return 0; |
| 3082 | |
| 3083 | if (handle_abnormal_pfn(vcpu, 0, gfn, pfn, ACC_ALL, &r)) |
| 3084 | return r; |
| 3085 | |
| 3086 | spin_lock(&vcpu->kvm->mmu_lock); |
| 3087 | if (mmu_notifier_retry(vcpu, mmu_seq)) |
| 3088 | goto out_unlock; |
| 3089 | kvm_mmu_free_some_pages(vcpu); |
| 3090 | if (likely(!force_pt_level)) |
| 3091 | transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level); |
| 3092 | r = __direct_map(vcpu, gpa, write, map_writable, |
| 3093 | level, gfn, pfn, prefault); |
| 3094 | spin_unlock(&vcpu->kvm->mmu_lock); |
| 3095 | |
| 3096 | return r; |
| 3097 | |
| 3098 | out_unlock: |
| 3099 | spin_unlock(&vcpu->kvm->mmu_lock); |
| 3100 | kvm_release_pfn_clean(pfn); |
| 3101 | return 0; |
| 3102 | } |
| 3103 | |
| 3104 | static void nonpaging_free(struct kvm_vcpu *vcpu) |
| 3105 | { |
| 3106 | mmu_free_roots(vcpu); |
| 3107 | } |
| 3108 | |
| 3109 | static int nonpaging_init_context(struct kvm_vcpu *vcpu, |
| 3110 | struct kvm_mmu *context) |
| 3111 | { |
| 3112 | context->new_cr3 = nonpaging_new_cr3; |
| 3113 | context->page_fault = nonpaging_page_fault; |
| 3114 | context->gva_to_gpa = nonpaging_gva_to_gpa; |
| 3115 | context->free = nonpaging_free; |
| 3116 | context->sync_page = nonpaging_sync_page; |
| 3117 | context->invlpg = nonpaging_invlpg; |
| 3118 | context->update_pte = nonpaging_update_pte; |
| 3119 | context->root_level = 0; |
| 3120 | context->shadow_root_level = PT32E_ROOT_LEVEL; |
| 3121 | context->root_hpa = INVALID_PAGE; |
| 3122 | context->direct_map = true; |
| 3123 | context->nx = false; |
| 3124 | return 0; |
| 3125 | } |
| 3126 | |
| 3127 | void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu) |
| 3128 | { |
| 3129 | ++vcpu->stat.tlb_flush; |
| 3130 | kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); |
| 3131 | } |
| 3132 | |
| 3133 | static void paging_new_cr3(struct kvm_vcpu *vcpu) |
| 3134 | { |
| 3135 | pgprintk("%s: cr3 %lx\n", __func__, kvm_read_cr3(vcpu)); |
| 3136 | mmu_free_roots(vcpu); |
| 3137 | } |
| 3138 | |
| 3139 | static unsigned long get_cr3(struct kvm_vcpu *vcpu) |
| 3140 | { |
| 3141 | return kvm_read_cr3(vcpu); |
| 3142 | } |
| 3143 | |
| 3144 | static void inject_page_fault(struct kvm_vcpu *vcpu, |
| 3145 | struct x86_exception *fault) |
| 3146 | { |
| 3147 | vcpu->arch.mmu.inject_page_fault(vcpu, fault); |
| 3148 | } |
| 3149 | |
| 3150 | static void paging_free(struct kvm_vcpu *vcpu) |
| 3151 | { |
| 3152 | nonpaging_free(vcpu); |
| 3153 | } |
| 3154 | |
| 3155 | static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level) |
| 3156 | { |
| 3157 | int bit7; |
| 3158 | |
| 3159 | bit7 = (gpte >> 7) & 1; |
| 3160 | return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) != 0; |
| 3161 | } |
| 3162 | |
| 3163 | static bool sync_mmio_spte(u64 *sptep, gfn_t gfn, unsigned access, |
| 3164 | int *nr_present) |
| 3165 | { |
| 3166 | if (unlikely(is_mmio_spte(*sptep))) { |
| 3167 | if (gfn != get_mmio_spte_gfn(*sptep)) { |
| 3168 | mmu_spte_clear_no_track(sptep); |
| 3169 | return true; |
| 3170 | } |
| 3171 | |
| 3172 | (*nr_present)++; |
| 3173 | mark_mmio_spte(sptep, gfn, access); |
| 3174 | return true; |
| 3175 | } |
| 3176 | |
| 3177 | return false; |
| 3178 | } |
| 3179 | |
| 3180 | #define PTTYPE 64 |
| 3181 | #include "paging_tmpl.h" |
| 3182 | #undef PTTYPE |
| 3183 | |
| 3184 | #define PTTYPE 32 |
| 3185 | #include "paging_tmpl.h" |
| 3186 | #undef PTTYPE |
| 3187 | |
| 3188 | static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, |
| 3189 | struct kvm_mmu *context) |
| 3190 | { |
| 3191 | int maxphyaddr = cpuid_maxphyaddr(vcpu); |
| 3192 | u64 exb_bit_rsvd = 0; |
| 3193 | |
| 3194 | if (!context->nx) |
| 3195 | exb_bit_rsvd = rsvd_bits(63, 63); |
| 3196 | switch (context->root_level) { |
| 3197 | case PT32_ROOT_LEVEL: |
| 3198 | /* no rsvd bits for 2 level 4K page table entries */ |
| 3199 | context->rsvd_bits_mask[0][1] = 0; |
| 3200 | context->rsvd_bits_mask[0][0] = 0; |
| 3201 | context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0]; |
| 3202 | |
| 3203 | if (!is_pse(vcpu)) { |
| 3204 | context->rsvd_bits_mask[1][1] = 0; |
| 3205 | break; |
| 3206 | } |
| 3207 | |
| 3208 | if (is_cpuid_PSE36()) |
| 3209 | /* 36bits PSE 4MB page */ |
| 3210 | context->rsvd_bits_mask[1][1] = rsvd_bits(17, 21); |
| 3211 | else |
| 3212 | /* 32 bits PSE 4MB page */ |
| 3213 | context->rsvd_bits_mask[1][1] = rsvd_bits(13, 21); |
| 3214 | break; |
| 3215 | case PT32E_ROOT_LEVEL: |
| 3216 | context->rsvd_bits_mask[0][2] = |
| 3217 | rsvd_bits(maxphyaddr, 63) | |
| 3218 | rsvd_bits(7, 8) | rsvd_bits(1, 2); /* PDPTE */ |
| 3219 | context->rsvd_bits_mask[0][1] = exb_bit_rsvd | |
| 3220 | rsvd_bits(maxphyaddr, 62); /* PDE */ |
| 3221 | context->rsvd_bits_mask[0][0] = exb_bit_rsvd | |
| 3222 | rsvd_bits(maxphyaddr, 62); /* PTE */ |
| 3223 | context->rsvd_bits_mask[1][1] = exb_bit_rsvd | |
| 3224 | rsvd_bits(maxphyaddr, 62) | |
| 3225 | rsvd_bits(13, 20); /* large page */ |
| 3226 | context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0]; |
| 3227 | break; |
| 3228 | case PT64_ROOT_LEVEL: |
| 3229 | context->rsvd_bits_mask[0][3] = exb_bit_rsvd | |
| 3230 | rsvd_bits(maxphyaddr, 51) | rsvd_bits(7, 8); |
| 3231 | context->rsvd_bits_mask[0][2] = exb_bit_rsvd | |
| 3232 | rsvd_bits(maxphyaddr, 51) | rsvd_bits(7, 8); |
| 3233 | context->rsvd_bits_mask[0][1] = exb_bit_rsvd | |
| 3234 | rsvd_bits(maxphyaddr, 51); |
| 3235 | context->rsvd_bits_mask[0][0] = exb_bit_rsvd | |
| 3236 | rsvd_bits(maxphyaddr, 51); |
| 3237 | context->rsvd_bits_mask[1][3] = context->rsvd_bits_mask[0][3]; |
| 3238 | context->rsvd_bits_mask[1][2] = exb_bit_rsvd | |
| 3239 | rsvd_bits(maxphyaddr, 51) | |
| 3240 | rsvd_bits(13, 29); |
| 3241 | context->rsvd_bits_mask[1][1] = exb_bit_rsvd | |
| 3242 | rsvd_bits(maxphyaddr, 51) | |
| 3243 | rsvd_bits(13, 20); /* large page */ |
| 3244 | context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0]; |
| 3245 | break; |
| 3246 | } |
| 3247 | } |
| 3248 | |
| 3249 | static int paging64_init_context_common(struct kvm_vcpu *vcpu, |
| 3250 | struct kvm_mmu *context, |
| 3251 | int level) |
| 3252 | { |
| 3253 | context->nx = is_nx(vcpu); |
| 3254 | context->root_level = level; |
| 3255 | |
| 3256 | reset_rsvds_bits_mask(vcpu, context); |
| 3257 | |
| 3258 | ASSERT(is_pae(vcpu)); |
| 3259 | context->new_cr3 = paging_new_cr3; |
| 3260 | context->page_fault = paging64_page_fault; |
| 3261 | context->gva_to_gpa = paging64_gva_to_gpa; |
| 3262 | context->sync_page = paging64_sync_page; |
| 3263 | context->invlpg = paging64_invlpg; |
| 3264 | context->update_pte = paging64_update_pte; |
| 3265 | context->free = paging_free; |
| 3266 | context->shadow_root_level = level; |
| 3267 | context->root_hpa = INVALID_PAGE; |
| 3268 | context->direct_map = false; |
| 3269 | return 0; |
| 3270 | } |
| 3271 | |
| 3272 | static int paging64_init_context(struct kvm_vcpu *vcpu, |
| 3273 | struct kvm_mmu *context) |
| 3274 | { |
| 3275 | return paging64_init_context_common(vcpu, context, PT64_ROOT_LEVEL); |
| 3276 | } |
| 3277 | |
| 3278 | static int paging32_init_context(struct kvm_vcpu *vcpu, |
| 3279 | struct kvm_mmu *context) |
| 3280 | { |
| 3281 | context->nx = false; |
| 3282 | context->root_level = PT32_ROOT_LEVEL; |
| 3283 | |
| 3284 | reset_rsvds_bits_mask(vcpu, context); |
| 3285 | |
| 3286 | context->new_cr3 = paging_new_cr3; |
| 3287 | context->page_fault = paging32_page_fault; |
| 3288 | context->gva_to_gpa = paging32_gva_to_gpa; |
| 3289 | context->free = paging_free; |
| 3290 | context->sync_page = paging32_sync_page; |
| 3291 | context->invlpg = paging32_invlpg; |
| 3292 | context->update_pte = paging32_update_pte; |
| 3293 | context->shadow_root_level = PT32E_ROOT_LEVEL; |
| 3294 | context->root_hpa = INVALID_PAGE; |
| 3295 | context->direct_map = false; |
| 3296 | return 0; |
| 3297 | } |
| 3298 | |
| 3299 | static int paging32E_init_context(struct kvm_vcpu *vcpu, |
| 3300 | struct kvm_mmu *context) |
| 3301 | { |
| 3302 | return paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL); |
| 3303 | } |
| 3304 | |
| 3305 | static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) |
| 3306 | { |
| 3307 | struct kvm_mmu *context = vcpu->arch.walk_mmu; |
| 3308 | |
| 3309 | context->base_role.word = 0; |
| 3310 | context->new_cr3 = nonpaging_new_cr3; |
| 3311 | context->page_fault = tdp_page_fault; |
| 3312 | context->free = nonpaging_free; |
| 3313 | context->sync_page = nonpaging_sync_page; |
| 3314 | context->invlpg = nonpaging_invlpg; |
| 3315 | context->update_pte = nonpaging_update_pte; |
| 3316 | context->shadow_root_level = kvm_x86_ops->get_tdp_level(); |
| 3317 | context->root_hpa = INVALID_PAGE; |
| 3318 | context->direct_map = true; |
| 3319 | context->set_cr3 = kvm_x86_ops->set_tdp_cr3; |
| 3320 | context->get_cr3 = get_cr3; |
| 3321 | context->get_pdptr = kvm_pdptr_read; |
| 3322 | context->inject_page_fault = kvm_inject_page_fault; |
| 3323 | |
| 3324 | if (!is_paging(vcpu)) { |
| 3325 | context->nx = false; |
| 3326 | context->gva_to_gpa = nonpaging_gva_to_gpa; |
| 3327 | context->root_level = 0; |
| 3328 | } else if (is_long_mode(vcpu)) { |
| 3329 | context->nx = is_nx(vcpu); |
| 3330 | context->root_level = PT64_ROOT_LEVEL; |
| 3331 | reset_rsvds_bits_mask(vcpu, context); |
| 3332 | context->gva_to_gpa = paging64_gva_to_gpa; |
| 3333 | } else if (is_pae(vcpu)) { |
| 3334 | context->nx = is_nx(vcpu); |
| 3335 | context->root_level = PT32E_ROOT_LEVEL; |
| 3336 | reset_rsvds_bits_mask(vcpu, context); |
| 3337 | context->gva_to_gpa = paging64_gva_to_gpa; |
| 3338 | } else { |
| 3339 | context->nx = false; |
| 3340 | context->root_level = PT32_ROOT_LEVEL; |
| 3341 | reset_rsvds_bits_mask(vcpu, context); |
| 3342 | context->gva_to_gpa = paging32_gva_to_gpa; |
| 3343 | } |
| 3344 | |
| 3345 | return 0; |
| 3346 | } |
| 3347 | |
| 3348 | int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context) |
| 3349 | { |
| 3350 | int r; |
| 3351 | bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP); |
| 3352 | ASSERT(vcpu); |
| 3353 | ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); |
| 3354 | |
| 3355 | if (!is_paging(vcpu)) |
| 3356 | r = nonpaging_init_context(vcpu, context); |
| 3357 | else if (is_long_mode(vcpu)) |
| 3358 | r = paging64_init_context(vcpu, context); |
| 3359 | else if (is_pae(vcpu)) |
| 3360 | r = paging32E_init_context(vcpu, context); |
| 3361 | else |
| 3362 | r = paging32_init_context(vcpu, context); |
| 3363 | |
| 3364 | vcpu->arch.mmu.base_role.cr4_pae = !!is_pae(vcpu); |
| 3365 | vcpu->arch.mmu.base_role.cr0_wp = is_write_protection(vcpu); |
| 3366 | vcpu->arch.mmu.base_role.smep_andnot_wp |
| 3367 | = smep && !is_write_protection(vcpu); |
| 3368 | |
| 3369 | return r; |
| 3370 | } |
| 3371 | EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu); |
| 3372 | |
| 3373 | static int init_kvm_softmmu(struct kvm_vcpu *vcpu) |
| 3374 | { |
| 3375 | int r = kvm_init_shadow_mmu(vcpu, vcpu->arch.walk_mmu); |
| 3376 | |
| 3377 | vcpu->arch.walk_mmu->set_cr3 = kvm_x86_ops->set_cr3; |
| 3378 | vcpu->arch.walk_mmu->get_cr3 = get_cr3; |
| 3379 | vcpu->arch.walk_mmu->get_pdptr = kvm_pdptr_read; |
| 3380 | vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault; |
| 3381 | |
| 3382 | return r; |
| 3383 | } |
| 3384 | |
| 3385 | static int init_kvm_nested_mmu(struct kvm_vcpu *vcpu) |
| 3386 | { |
| 3387 | struct kvm_mmu *g_context = &vcpu->arch.nested_mmu; |
| 3388 | |
| 3389 | g_context->get_cr3 = get_cr3; |
| 3390 | g_context->get_pdptr = kvm_pdptr_read; |
| 3391 | g_context->inject_page_fault = kvm_inject_page_fault; |
| 3392 | |
| 3393 | /* |
| 3394 | * Note that arch.mmu.gva_to_gpa translates l2_gva to l1_gpa. The |
| 3395 | * translation of l2_gpa to l1_gpa addresses is done using the |
| 3396 | * arch.nested_mmu.gva_to_gpa function. Basically the gva_to_gpa |
| 3397 | * functions between mmu and nested_mmu are swapped. |
| 3398 | */ |
| 3399 | if (!is_paging(vcpu)) { |
| 3400 | g_context->nx = false; |
| 3401 | g_context->root_level = 0; |
| 3402 | g_context->gva_to_gpa = nonpaging_gva_to_gpa_nested; |
| 3403 | } else if (is_long_mode(vcpu)) { |
| 3404 | g_context->nx = is_nx(vcpu); |
| 3405 | g_context->root_level = PT64_ROOT_LEVEL; |
| 3406 | reset_rsvds_bits_mask(vcpu, g_context); |
| 3407 | g_context->gva_to_gpa = paging64_gva_to_gpa_nested; |
| 3408 | } else if (is_pae(vcpu)) { |
| 3409 | g_context->nx = is_nx(vcpu); |
| 3410 | g_context->root_level = PT32E_ROOT_LEVEL; |
| 3411 | reset_rsvds_bits_mask(vcpu, g_context); |
| 3412 | g_context->gva_to_gpa = paging64_gva_to_gpa_nested; |
| 3413 | } else { |
| 3414 | g_context->nx = false; |
| 3415 | g_context->root_level = PT32_ROOT_LEVEL; |
| 3416 | reset_rsvds_bits_mask(vcpu, g_context); |
| 3417 | g_context->gva_to_gpa = paging32_gva_to_gpa_nested; |
| 3418 | } |
| 3419 | |
| 3420 | return 0; |
| 3421 | } |
| 3422 | |
| 3423 | static int init_kvm_mmu(struct kvm_vcpu *vcpu) |
| 3424 | { |
| 3425 | if (mmu_is_nested(vcpu)) |
| 3426 | return init_kvm_nested_mmu(vcpu); |
| 3427 | else if (tdp_enabled) |
| 3428 | return init_kvm_tdp_mmu(vcpu); |
| 3429 | else |
| 3430 | return init_kvm_softmmu(vcpu); |
| 3431 | } |
| 3432 | |
| 3433 | static void destroy_kvm_mmu(struct kvm_vcpu *vcpu) |
| 3434 | { |
| 3435 | ASSERT(vcpu); |
| 3436 | if (VALID_PAGE(vcpu->arch.mmu.root_hpa)) |
| 3437 | /* mmu.free() should set root_hpa = INVALID_PAGE */ |
| 3438 | vcpu->arch.mmu.free(vcpu); |
| 3439 | } |
| 3440 | |
| 3441 | int kvm_mmu_reset_context(struct kvm_vcpu *vcpu) |
| 3442 | { |
| 3443 | destroy_kvm_mmu(vcpu); |
| 3444 | return init_kvm_mmu(vcpu); |
| 3445 | } |
| 3446 | EXPORT_SYMBOL_GPL(kvm_mmu_reset_context); |
| 3447 | |
| 3448 | int kvm_mmu_load(struct kvm_vcpu *vcpu) |
| 3449 | { |
| 3450 | int r; |
| 3451 | |
| 3452 | r = mmu_topup_memory_caches(vcpu); |
| 3453 | if (r) |
| 3454 | goto out; |
| 3455 | r = mmu_alloc_roots(vcpu); |
| 3456 | spin_lock(&vcpu->kvm->mmu_lock); |
| 3457 | mmu_sync_roots(vcpu); |
| 3458 | spin_unlock(&vcpu->kvm->mmu_lock); |
| 3459 | if (r) |
| 3460 | goto out; |
| 3461 | /* set_cr3() should ensure TLB has been flushed */ |
| 3462 | vcpu->arch.mmu.set_cr3(vcpu, vcpu->arch.mmu.root_hpa); |
| 3463 | out: |
| 3464 | return r; |
| 3465 | } |
| 3466 | EXPORT_SYMBOL_GPL(kvm_mmu_load); |
| 3467 | |
| 3468 | void kvm_mmu_unload(struct kvm_vcpu *vcpu) |
| 3469 | { |
| 3470 | mmu_free_roots(vcpu); |
| 3471 | } |
| 3472 | EXPORT_SYMBOL_GPL(kvm_mmu_unload); |
| 3473 | |
| 3474 | static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu, |
| 3475 | struct kvm_mmu_page *sp, u64 *spte, |
| 3476 | const void *new) |
| 3477 | { |
| 3478 | if (sp->role.level != PT_PAGE_TABLE_LEVEL) { |
| 3479 | ++vcpu->kvm->stat.mmu_pde_zapped; |
| 3480 | return; |
| 3481 | } |
| 3482 | |
| 3483 | ++vcpu->kvm->stat.mmu_pte_updated; |
| 3484 | vcpu->arch.mmu.update_pte(vcpu, sp, spte, new); |
| 3485 | } |
| 3486 | |
| 3487 | static bool need_remote_flush(u64 old, u64 new) |
| 3488 | { |
| 3489 | if (!is_shadow_present_pte(old)) |
| 3490 | return false; |
| 3491 | if (!is_shadow_present_pte(new)) |
| 3492 | return true; |
| 3493 | if ((old ^ new) & PT64_BASE_ADDR_MASK) |
| 3494 | return true; |
| 3495 | old ^= PT64_NX_MASK; |
| 3496 | new ^= PT64_NX_MASK; |
| 3497 | return (old & ~new & PT64_PERM_MASK) != 0; |
| 3498 | } |
| 3499 | |
| 3500 | static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, bool zap_page, |
| 3501 | bool remote_flush, bool local_flush) |
| 3502 | { |
| 3503 | if (zap_page) |
| 3504 | return; |
| 3505 | |
| 3506 | if (remote_flush) |
| 3507 | kvm_flush_remote_tlbs(vcpu->kvm); |
| 3508 | else if (local_flush) |
| 3509 | kvm_mmu_flush_tlb(vcpu); |
| 3510 | } |
| 3511 | |
| 3512 | static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa, |
| 3513 | const u8 *new, int *bytes) |
| 3514 | { |
| 3515 | u64 gentry; |
| 3516 | int r; |
| 3517 | |
| 3518 | /* |
| 3519 | * Assume that the pte write on a page table of the same type |
| 3520 | * as the current vcpu paging mode since we update the sptes only |
| 3521 | * when they have the same mode. |
| 3522 | */ |
| 3523 | if (is_pae(vcpu) && *bytes == 4) { |
| 3524 | /* Handle a 32-bit guest writing two halves of a 64-bit gpte */ |
| 3525 | *gpa &= ~(gpa_t)7; |
| 3526 | *bytes = 8; |
| 3527 | r = kvm_read_guest(vcpu->kvm, *gpa, &gentry, min(*bytes, 8)); |
| 3528 | if (r) |
| 3529 | gentry = 0; |
| 3530 | new = (const u8 *)&gentry; |
| 3531 | } |
| 3532 | |
| 3533 | switch (*bytes) { |
| 3534 | case 4: |
| 3535 | gentry = *(const u32 *)new; |
| 3536 | break; |
| 3537 | case 8: |
| 3538 | gentry = *(const u64 *)new; |
| 3539 | break; |
| 3540 | default: |
| 3541 | gentry = 0; |
| 3542 | break; |
| 3543 | } |
| 3544 | |
| 3545 | return gentry; |
| 3546 | } |
| 3547 | |
| 3548 | /* |
| 3549 | * If we're seeing too many writes to a page, it may no longer be a page table, |
| 3550 | * or we may be forking, in which case it is better to unmap the page. |
| 3551 | */ |
| 3552 | static bool detect_write_flooding(struct kvm_mmu_page *sp) |
| 3553 | { |
| 3554 | /* |
| 3555 | * Skip write-flooding detected for the sp whose level is 1, because |
| 3556 | * it can become unsync, then the guest page is not write-protected. |
| 3557 | */ |
| 3558 | if (sp->role.level == 1) |
| 3559 | return false; |
| 3560 | |
| 3561 | return ++sp->write_flooding_count >= 3; |
| 3562 | } |
| 3563 | |
| 3564 | /* |
| 3565 | * Misaligned accesses are too much trouble to fix up; also, they usually |
| 3566 | * indicate a page is not used as a page table. |
| 3567 | */ |
| 3568 | static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa, |
| 3569 | int bytes) |
| 3570 | { |
| 3571 | unsigned offset, pte_size, misaligned; |
| 3572 | |
| 3573 | pgprintk("misaligned: gpa %llx bytes %d role %x\n", |
| 3574 | gpa, bytes, sp->role.word); |
| 3575 | |
| 3576 | offset = offset_in_page(gpa); |
| 3577 | pte_size = sp->role.cr4_pae ? 8 : 4; |
| 3578 | |
| 3579 | /* |
| 3580 | * Sometimes, the OS only writes the last one bytes to update status |
| 3581 | * bits, for example, in linux, andb instruction is used in clear_bit(). |
| 3582 | */ |
| 3583 | if (!(offset & (pte_size - 1)) && bytes == 1) |
| 3584 | return false; |
| 3585 | |
| 3586 | misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1); |
| 3587 | misaligned |= bytes < 4; |
| 3588 | |
| 3589 | return misaligned; |
| 3590 | } |
| 3591 | |
| 3592 | static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte) |
| 3593 | { |
| 3594 | unsigned page_offset, quadrant; |
| 3595 | u64 *spte; |
| 3596 | int level; |
| 3597 | |
| 3598 | page_offset = offset_in_page(gpa); |
| 3599 | level = sp->role.level; |
| 3600 | *nspte = 1; |
| 3601 | if (!sp->role.cr4_pae) { |
| 3602 | page_offset <<= 1; /* 32->64 */ |
| 3603 | /* |
| 3604 | * A 32-bit pde maps 4MB while the shadow pdes map |
| 3605 | * only 2MB. So we need to double the offset again |
| 3606 | * and zap two pdes instead of one. |
| 3607 | */ |
| 3608 | if (level == PT32_ROOT_LEVEL) { |
| 3609 | page_offset &= ~7; /* kill rounding error */ |
| 3610 | page_offset <<= 1; |
| 3611 | *nspte = 2; |
| 3612 | } |
| 3613 | quadrant = page_offset >> PAGE_SHIFT; |
| 3614 | page_offset &= ~PAGE_MASK; |
| 3615 | if (quadrant != sp->role.quadrant) |
| 3616 | return NULL; |
| 3617 | } |
| 3618 | |
| 3619 | spte = &sp->spt[page_offset / sizeof(*spte)]; |
| 3620 | return spte; |
| 3621 | } |
| 3622 | |
| 3623 | void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, |
| 3624 | const u8 *new, int bytes) |
| 3625 | { |
| 3626 | gfn_t gfn = gpa >> PAGE_SHIFT; |
| 3627 | union kvm_mmu_page_role mask = { .word = 0 }; |
| 3628 | struct kvm_mmu_page *sp; |
| 3629 | struct hlist_node *node; |
| 3630 | LIST_HEAD(invalid_list); |
| 3631 | u64 entry, gentry, *spte; |
| 3632 | int npte; |
| 3633 | bool remote_flush, local_flush, zap_page; |
| 3634 | |
| 3635 | /* |
| 3636 | * If we don't have indirect shadow pages, it means no page is |
| 3637 | * write-protected, so we can exit simply. |
| 3638 | */ |
| 3639 | if (!ACCESS_ONCE(vcpu->kvm->arch.indirect_shadow_pages)) |
| 3640 | return; |
| 3641 | |
| 3642 | zap_page = remote_flush = local_flush = false; |
| 3643 | |
| 3644 | pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes); |
| 3645 | |
| 3646 | gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, new, &bytes); |
| 3647 | |
| 3648 | /* |
| 3649 | * No need to care whether allocation memory is successful |
| 3650 | * or not since pte prefetch is skiped if it does not have |
| 3651 | * enough objects in the cache. |
| 3652 | */ |
| 3653 | mmu_topup_memory_caches(vcpu); |
| 3654 | |
| 3655 | spin_lock(&vcpu->kvm->mmu_lock); |
| 3656 | ++vcpu->kvm->stat.mmu_pte_write; |
| 3657 | kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE); |
| 3658 | |
| 3659 | mask.cr0_wp = mask.cr4_pae = mask.nxe = 1; |
| 3660 | for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn, node) { |
| 3661 | if (detect_write_misaligned(sp, gpa, bytes) || |
| 3662 | detect_write_flooding(sp)) { |
| 3663 | zap_page |= !!kvm_mmu_prepare_zap_page(vcpu->kvm, sp, |
| 3664 | &invalid_list); |
| 3665 | ++vcpu->kvm->stat.mmu_flooded; |
| 3666 | continue; |
| 3667 | } |
| 3668 | |
| 3669 | spte = get_written_sptes(sp, gpa, &npte); |
| 3670 | if (!spte) |
| 3671 | continue; |
| 3672 | |
| 3673 | local_flush = true; |
| 3674 | while (npte--) { |
| 3675 | entry = *spte; |
| 3676 | mmu_page_zap_pte(vcpu->kvm, sp, spte); |
| 3677 | if (gentry && |
| 3678 | !((sp->role.word ^ vcpu->arch.mmu.base_role.word) |
| 3679 | & mask.word) && rmap_can_add(vcpu)) |
| 3680 | mmu_pte_write_new_pte(vcpu, sp, spte, &gentry); |
| 3681 | if (!remote_flush && need_remote_flush(entry, *spte)) |
| 3682 | remote_flush = true; |
| 3683 | ++spte; |
| 3684 | } |
| 3685 | } |
| 3686 | mmu_pte_write_flush_tlb(vcpu, zap_page, remote_flush, local_flush); |
| 3687 | kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); |
| 3688 | kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE); |
| 3689 | spin_unlock(&vcpu->kvm->mmu_lock); |
| 3690 | } |
| 3691 | |
| 3692 | int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) |
| 3693 | { |
| 3694 | gpa_t gpa; |
| 3695 | int r; |
| 3696 | |
| 3697 | if (vcpu->arch.mmu.direct_map) |
| 3698 | return 0; |
| 3699 | |
| 3700 | gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL); |
| 3701 | |
| 3702 | r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT); |
| 3703 | |
| 3704 | return r; |
| 3705 | } |
| 3706 | EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt); |
| 3707 | |
| 3708 | void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) |
| 3709 | { |
| 3710 | LIST_HEAD(invalid_list); |
| 3711 | |
| 3712 | while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES && |
| 3713 | !list_empty(&vcpu->kvm->arch.active_mmu_pages)) { |
| 3714 | struct kvm_mmu_page *sp; |
| 3715 | |
| 3716 | sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev, |
| 3717 | struct kvm_mmu_page, link); |
| 3718 | kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list); |
| 3719 | ++vcpu->kvm->stat.mmu_recycled; |
| 3720 | } |
| 3721 | kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); |
| 3722 | } |
| 3723 | |
| 3724 | static bool is_mmio_page_fault(struct kvm_vcpu *vcpu, gva_t addr) |
| 3725 | { |
| 3726 | if (vcpu->arch.mmu.direct_map || mmu_is_nested(vcpu)) |
| 3727 | return vcpu_match_mmio_gpa(vcpu, addr); |
| 3728 | |
| 3729 | return vcpu_match_mmio_gva(vcpu, addr); |
| 3730 | } |
| 3731 | |
| 3732 | int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code, |
| 3733 | void *insn, int insn_len) |
| 3734 | { |
| 3735 | int r, emulation_type = EMULTYPE_RETRY; |
| 3736 | enum emulation_result er; |
| 3737 | |
| 3738 | r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false); |
| 3739 | if (r < 0) |
| 3740 | goto out; |
| 3741 | |
| 3742 | if (!r) { |
| 3743 | r = 1; |
| 3744 | goto out; |
| 3745 | } |
| 3746 | |
| 3747 | if (is_mmio_page_fault(vcpu, cr2)) |
| 3748 | emulation_type = 0; |
| 3749 | |
| 3750 | er = x86_emulate_instruction(vcpu, cr2, emulation_type, insn, insn_len); |
| 3751 | |
| 3752 | switch (er) { |
| 3753 | case EMULATE_DONE: |
| 3754 | return 1; |
| 3755 | case EMULATE_DO_MMIO: |
| 3756 | ++vcpu->stat.mmio_exits; |
| 3757 | /* fall through */ |
| 3758 | case EMULATE_FAIL: |
| 3759 | return 0; |
| 3760 | default: |
| 3761 | BUG(); |
| 3762 | } |
| 3763 | out: |
| 3764 | return r; |
| 3765 | } |
| 3766 | EXPORT_SYMBOL_GPL(kvm_mmu_page_fault); |
| 3767 | |
| 3768 | void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva) |
| 3769 | { |
| 3770 | vcpu->arch.mmu.invlpg(vcpu, gva); |
| 3771 | kvm_mmu_flush_tlb(vcpu); |
| 3772 | ++vcpu->stat.invlpg; |
| 3773 | } |
| 3774 | EXPORT_SYMBOL_GPL(kvm_mmu_invlpg); |
| 3775 | |
| 3776 | void kvm_enable_tdp(void) |
| 3777 | { |
| 3778 | tdp_enabled = true; |
| 3779 | } |
| 3780 | EXPORT_SYMBOL_GPL(kvm_enable_tdp); |
| 3781 | |
| 3782 | void kvm_disable_tdp(void) |
| 3783 | { |
| 3784 | tdp_enabled = false; |
| 3785 | } |
| 3786 | EXPORT_SYMBOL_GPL(kvm_disable_tdp); |
| 3787 | |
| 3788 | static void free_mmu_pages(struct kvm_vcpu *vcpu) |
| 3789 | { |
| 3790 | free_page((unsigned long)vcpu->arch.mmu.pae_root); |
| 3791 | if (vcpu->arch.mmu.lm_root != NULL) |
| 3792 | free_page((unsigned long)vcpu->arch.mmu.lm_root); |
| 3793 | } |
| 3794 | |
| 3795 | static int alloc_mmu_pages(struct kvm_vcpu *vcpu) |
| 3796 | { |
| 3797 | struct page *page; |
| 3798 | int i; |
| 3799 | |
| 3800 | ASSERT(vcpu); |
| 3801 | |
| 3802 | /* |
| 3803 | * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64. |
| 3804 | * Therefore we need to allocate shadow page tables in the first |
| 3805 | * 4GB of memory, which happens to fit the DMA32 zone. |
| 3806 | */ |
| 3807 | page = alloc_page(GFP_KERNEL | __GFP_DMA32); |
| 3808 | if (!page) |
| 3809 | return -ENOMEM; |
| 3810 | |
| 3811 | vcpu->arch.mmu.pae_root = page_address(page); |
| 3812 | for (i = 0; i < 4; ++i) |
| 3813 | vcpu->arch.mmu.pae_root[i] = INVALID_PAGE; |
| 3814 | |
| 3815 | return 0; |
| 3816 | } |
| 3817 | |
| 3818 | int kvm_mmu_create(struct kvm_vcpu *vcpu) |
| 3819 | { |
| 3820 | ASSERT(vcpu); |
| 3821 | |
| 3822 | vcpu->arch.walk_mmu = &vcpu->arch.mmu; |
| 3823 | vcpu->arch.mmu.root_hpa = INVALID_PAGE; |
| 3824 | vcpu->arch.mmu.translate_gpa = translate_gpa; |
| 3825 | vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa; |
| 3826 | |
| 3827 | return alloc_mmu_pages(vcpu); |
| 3828 | } |
| 3829 | |
| 3830 | int kvm_mmu_setup(struct kvm_vcpu *vcpu) |
| 3831 | { |
| 3832 | ASSERT(vcpu); |
| 3833 | ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); |
| 3834 | |
| 3835 | return init_kvm_mmu(vcpu); |
| 3836 | } |
| 3837 | |
| 3838 | void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot) |
| 3839 | { |
| 3840 | struct kvm_mmu_page *sp; |
| 3841 | |
| 3842 | list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) { |
| 3843 | int i; |
| 3844 | u64 *pt; |
| 3845 | |
| 3846 | if (!test_bit(slot, sp->slot_bitmap)) |
| 3847 | continue; |
| 3848 | |
| 3849 | pt = sp->spt; |
| 3850 | for (i = 0; i < PT64_ENT_PER_PAGE; ++i) { |
| 3851 | if (!is_shadow_present_pte(pt[i]) || |
| 3852 | !is_last_spte(pt[i], sp->role.level)) |
| 3853 | continue; |
| 3854 | |
| 3855 | if (is_large_pte(pt[i])) { |
| 3856 | drop_spte(kvm, &pt[i]); |
| 3857 | --kvm->stat.lpages; |
| 3858 | continue; |
| 3859 | } |
| 3860 | |
| 3861 | /* avoid RMW */ |
| 3862 | if (is_writable_pte(pt[i])) |
| 3863 | mmu_spte_update(&pt[i], |
| 3864 | pt[i] & ~PT_WRITABLE_MASK); |
| 3865 | } |
| 3866 | } |
| 3867 | kvm_flush_remote_tlbs(kvm); |
| 3868 | } |
| 3869 | |
| 3870 | void kvm_mmu_zap_all(struct kvm *kvm) |
| 3871 | { |
| 3872 | struct kvm_mmu_page *sp, *node; |
| 3873 | LIST_HEAD(invalid_list); |
| 3874 | |
| 3875 | spin_lock(&kvm->mmu_lock); |
| 3876 | restart: |
| 3877 | list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) |
| 3878 | if (kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list)) |
| 3879 | goto restart; |
| 3880 | |
| 3881 | kvm_mmu_commit_zap_page(kvm, &invalid_list); |
| 3882 | spin_unlock(&kvm->mmu_lock); |
| 3883 | } |
| 3884 | |
| 3885 | static void kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm, |
| 3886 | struct list_head *invalid_list) |
| 3887 | { |
| 3888 | struct kvm_mmu_page *page; |
| 3889 | |
| 3890 | page = container_of(kvm->arch.active_mmu_pages.prev, |
| 3891 | struct kvm_mmu_page, link); |
| 3892 | kvm_mmu_prepare_zap_page(kvm, page, invalid_list); |
| 3893 | } |
| 3894 | |
| 3895 | static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc) |
| 3896 | { |
| 3897 | struct kvm *kvm; |
| 3898 | struct kvm *kvm_freed = NULL; |
| 3899 | int nr_to_scan = sc->nr_to_scan; |
| 3900 | |
| 3901 | if (nr_to_scan == 0) |
| 3902 | goto out; |
| 3903 | |
| 3904 | raw_spin_lock(&kvm_lock); |
| 3905 | |
| 3906 | list_for_each_entry(kvm, &vm_list, vm_list) { |
| 3907 | int idx; |
| 3908 | LIST_HEAD(invalid_list); |
| 3909 | |
| 3910 | idx = srcu_read_lock(&kvm->srcu); |
| 3911 | spin_lock(&kvm->mmu_lock); |
| 3912 | if (!kvm_freed && nr_to_scan > 0 && |
| 3913 | kvm->arch.n_used_mmu_pages > 0) { |
| 3914 | kvm_mmu_remove_some_alloc_mmu_pages(kvm, |
| 3915 | &invalid_list); |
| 3916 | kvm_freed = kvm; |
| 3917 | } |
| 3918 | nr_to_scan--; |
| 3919 | |
| 3920 | kvm_mmu_commit_zap_page(kvm, &invalid_list); |
| 3921 | spin_unlock(&kvm->mmu_lock); |
| 3922 | srcu_read_unlock(&kvm->srcu, idx); |
| 3923 | } |
| 3924 | if (kvm_freed) |
| 3925 | list_move_tail(&kvm_freed->vm_list, &vm_list); |
| 3926 | |
| 3927 | raw_spin_unlock(&kvm_lock); |
| 3928 | |
| 3929 | out: |
| 3930 | return percpu_counter_read_positive(&kvm_total_used_mmu_pages); |
| 3931 | } |
| 3932 | |
| 3933 | static struct shrinker mmu_shrinker = { |
| 3934 | .shrink = mmu_shrink, |
| 3935 | .seeks = DEFAULT_SEEKS * 10, |
| 3936 | }; |
| 3937 | |
| 3938 | static void mmu_destroy_caches(void) |
| 3939 | { |
| 3940 | if (pte_list_desc_cache) |
| 3941 | kmem_cache_destroy(pte_list_desc_cache); |
| 3942 | if (mmu_page_header_cache) |
| 3943 | kmem_cache_destroy(mmu_page_header_cache); |
| 3944 | } |
| 3945 | |
| 3946 | int kvm_mmu_module_init(void) |
| 3947 | { |
| 3948 | pte_list_desc_cache = kmem_cache_create("pte_list_desc", |
| 3949 | sizeof(struct pte_list_desc), |
| 3950 | 0, 0, NULL); |
| 3951 | if (!pte_list_desc_cache) |
| 3952 | goto nomem; |
| 3953 | |
| 3954 | mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header", |
| 3955 | sizeof(struct kvm_mmu_page), |
| 3956 | 0, 0, NULL); |
| 3957 | if (!mmu_page_header_cache) |
| 3958 | goto nomem; |
| 3959 | |
| 3960 | if (percpu_counter_init(&kvm_total_used_mmu_pages, 0)) |
| 3961 | goto nomem; |
| 3962 | |
| 3963 | register_shrinker(&mmu_shrinker); |
| 3964 | |
| 3965 | return 0; |
| 3966 | |
| 3967 | nomem: |
| 3968 | mmu_destroy_caches(); |
| 3969 | return -ENOMEM; |
| 3970 | } |
| 3971 | |
| 3972 | /* |
| 3973 | * Caculate mmu pages needed for kvm. |
| 3974 | */ |
| 3975 | unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm) |
| 3976 | { |
| 3977 | unsigned int nr_mmu_pages; |
| 3978 | unsigned int nr_pages = 0; |
| 3979 | struct kvm_memslots *slots; |
| 3980 | struct kvm_memory_slot *memslot; |
| 3981 | |
| 3982 | slots = kvm_memslots(kvm); |
| 3983 | |
| 3984 | kvm_for_each_memslot(memslot, slots) |
| 3985 | nr_pages += memslot->npages; |
| 3986 | |
| 3987 | nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000; |
| 3988 | nr_mmu_pages = max(nr_mmu_pages, |
| 3989 | (unsigned int) KVM_MIN_ALLOC_MMU_PAGES); |
| 3990 | |
| 3991 | return nr_mmu_pages; |
| 3992 | } |
| 3993 | |
| 3994 | int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]) |
| 3995 | { |
| 3996 | struct kvm_shadow_walk_iterator iterator; |
| 3997 | u64 spte; |
| 3998 | int nr_sptes = 0; |
| 3999 | |
| 4000 | walk_shadow_page_lockless_begin(vcpu); |
| 4001 | for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) { |
| 4002 | sptes[iterator.level-1] = spte; |
| 4003 | nr_sptes++; |
| 4004 | if (!is_shadow_present_pte(spte)) |
| 4005 | break; |
| 4006 | } |
| 4007 | walk_shadow_page_lockless_end(vcpu); |
| 4008 | |
| 4009 | return nr_sptes; |
| 4010 | } |
| 4011 | EXPORT_SYMBOL_GPL(kvm_mmu_get_spte_hierarchy); |
| 4012 | |
| 4013 | void kvm_mmu_destroy(struct kvm_vcpu *vcpu) |
| 4014 | { |
| 4015 | ASSERT(vcpu); |
| 4016 | |
| 4017 | destroy_kvm_mmu(vcpu); |
| 4018 | free_mmu_pages(vcpu); |
| 4019 | mmu_free_memory_caches(vcpu); |
| 4020 | } |
| 4021 | |
| 4022 | void kvm_mmu_module_exit(void) |
| 4023 | { |
| 4024 | mmu_destroy_caches(); |
| 4025 | percpu_counter_destroy(&kvm_total_used_mmu_pages); |
| 4026 | unregister_shrinker(&mmu_shrinker); |
| 4027 | mmu_audit_disable(); |
| 4028 | } |