| 1 | #ifndef _ASM_X86_PGTABLE_H |
| 2 | #define _ASM_X86_PGTABLE_H |
| 3 | |
| 4 | #include <asm/page.h> |
| 5 | #include <asm/e820.h> |
| 6 | |
| 7 | #include <asm/pgtable_types.h> |
| 8 | |
| 9 | /* |
| 10 | * Macro to mark a page protection value as UC- |
| 11 | */ |
| 12 | #define pgprot_noncached(prot) \ |
| 13 | ((boot_cpu_data.x86 > 3) \ |
| 14 | ? (__pgprot(pgprot_val(prot) | _PAGE_CACHE_UC_MINUS)) \ |
| 15 | : (prot)) |
| 16 | |
| 17 | #ifndef __ASSEMBLY__ |
| 18 | |
| 19 | #include <asm/x86_init.h> |
| 20 | |
| 21 | /* |
| 22 | * ZERO_PAGE is a global shared page that is always zero: used |
| 23 | * for zero-mapped memory areas etc.. |
| 24 | */ |
| 25 | extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; |
| 26 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) |
| 27 | |
| 28 | extern spinlock_t pgd_lock; |
| 29 | extern struct list_head pgd_list; |
| 30 | |
| 31 | extern struct mm_struct *pgd_page_get_mm(struct page *page); |
| 32 | |
| 33 | #ifdef CONFIG_PARAVIRT |
| 34 | #include <asm/paravirt.h> |
| 35 | #else /* !CONFIG_PARAVIRT */ |
| 36 | #define set_pte(ptep, pte) native_set_pte(ptep, pte) |
| 37 | #define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte) |
| 38 | #define set_pmd_at(mm, addr, pmdp, pmd) native_set_pmd_at(mm, addr, pmdp, pmd) |
| 39 | |
| 40 | #define set_pte_atomic(ptep, pte) \ |
| 41 | native_set_pte_atomic(ptep, pte) |
| 42 | |
| 43 | #define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd) |
| 44 | |
| 45 | #ifndef __PAGETABLE_PUD_FOLDED |
| 46 | #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd) |
| 47 | #define pgd_clear(pgd) native_pgd_clear(pgd) |
| 48 | #endif |
| 49 | |
| 50 | #ifndef set_pud |
| 51 | # define set_pud(pudp, pud) native_set_pud(pudp, pud) |
| 52 | #endif |
| 53 | |
| 54 | #ifndef __PAGETABLE_PMD_FOLDED |
| 55 | #define pud_clear(pud) native_pud_clear(pud) |
| 56 | #endif |
| 57 | |
| 58 | #define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep) |
| 59 | #define pmd_clear(pmd) native_pmd_clear(pmd) |
| 60 | |
| 61 | #define pte_update(mm, addr, ptep) do { } while (0) |
| 62 | #define pte_update_defer(mm, addr, ptep) do { } while (0) |
| 63 | #define pmd_update(mm, addr, ptep) do { } while (0) |
| 64 | #define pmd_update_defer(mm, addr, ptep) do { } while (0) |
| 65 | |
| 66 | #define pgd_val(x) native_pgd_val(x) |
| 67 | #define __pgd(x) native_make_pgd(x) |
| 68 | |
| 69 | #ifndef __PAGETABLE_PUD_FOLDED |
| 70 | #define pud_val(x) native_pud_val(x) |
| 71 | #define __pud(x) native_make_pud(x) |
| 72 | #endif |
| 73 | |
| 74 | #ifndef __PAGETABLE_PMD_FOLDED |
| 75 | #define pmd_val(x) native_pmd_val(x) |
| 76 | #define __pmd(x) native_make_pmd(x) |
| 77 | #endif |
| 78 | |
| 79 | #define pte_val(x) native_pte_val(x) |
| 80 | #define __pte(x) native_make_pte(x) |
| 81 | |
| 82 | #define arch_end_context_switch(prev) do {} while(0) |
| 83 | |
| 84 | #endif /* CONFIG_PARAVIRT */ |
| 85 | |
| 86 | /* |
| 87 | * The following only work if pte_present() is true. |
| 88 | * Undefined behaviour if not.. |
| 89 | */ |
| 90 | static inline int pte_dirty(pte_t pte) |
| 91 | { |
| 92 | return pte_flags(pte) & _PAGE_DIRTY; |
| 93 | } |
| 94 | |
| 95 | static inline int pte_young(pte_t pte) |
| 96 | { |
| 97 | return pte_flags(pte) & _PAGE_ACCESSED; |
| 98 | } |
| 99 | |
| 100 | static inline int pmd_young(pmd_t pmd) |
| 101 | { |
| 102 | return pmd_flags(pmd) & _PAGE_ACCESSED; |
| 103 | } |
| 104 | |
| 105 | static inline int pte_write(pte_t pte) |
| 106 | { |
| 107 | return pte_flags(pte) & _PAGE_RW; |
| 108 | } |
| 109 | |
| 110 | static inline int pte_file(pte_t pte) |
| 111 | { |
| 112 | return pte_flags(pte) & _PAGE_FILE; |
| 113 | } |
| 114 | |
| 115 | static inline int pte_huge(pte_t pte) |
| 116 | { |
| 117 | return pte_flags(pte) & _PAGE_PSE; |
| 118 | } |
| 119 | |
| 120 | static inline int pte_global(pte_t pte) |
| 121 | { |
| 122 | return pte_flags(pte) & _PAGE_GLOBAL; |
| 123 | } |
| 124 | |
| 125 | static inline int pte_exec(pte_t pte) |
| 126 | { |
| 127 | return !(pte_flags(pte) & _PAGE_NX); |
| 128 | } |
| 129 | |
| 130 | static inline int pte_special(pte_t pte) |
| 131 | { |
| 132 | return pte_flags(pte) & _PAGE_SPECIAL; |
| 133 | } |
| 134 | |
| 135 | static inline unsigned long pte_pfn(pte_t pte) |
| 136 | { |
| 137 | return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT; |
| 138 | } |
| 139 | |
| 140 | static inline unsigned long pmd_pfn(pmd_t pmd) |
| 141 | { |
| 142 | return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT; |
| 143 | } |
| 144 | |
| 145 | #define pte_page(pte) pfn_to_page(pte_pfn(pte)) |
| 146 | |
| 147 | static inline int pmd_large(pmd_t pte) |
| 148 | { |
| 149 | return pmd_flags(pte) & _PAGE_PSE; |
| 150 | } |
| 151 | |
| 152 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 153 | static inline int pmd_trans_splitting(pmd_t pmd) |
| 154 | { |
| 155 | return pmd_val(pmd) & _PAGE_SPLITTING; |
| 156 | } |
| 157 | |
| 158 | static inline int pmd_trans_huge(pmd_t pmd) |
| 159 | { |
| 160 | return pmd_val(pmd) & _PAGE_PSE; |
| 161 | } |
| 162 | |
| 163 | static inline int has_transparent_hugepage(void) |
| 164 | { |
| 165 | return cpu_has_pse; |
| 166 | } |
| 167 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
| 168 | |
| 169 | static inline pte_t pte_set_flags(pte_t pte, pteval_t set) |
| 170 | { |
| 171 | pteval_t v = native_pte_val(pte); |
| 172 | |
| 173 | return native_make_pte(v | set); |
| 174 | } |
| 175 | |
| 176 | static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear) |
| 177 | { |
| 178 | pteval_t v = native_pte_val(pte); |
| 179 | |
| 180 | return native_make_pte(v & ~clear); |
| 181 | } |
| 182 | |
| 183 | static inline pte_t pte_mkclean(pte_t pte) |
| 184 | { |
| 185 | return pte_clear_flags(pte, _PAGE_DIRTY); |
| 186 | } |
| 187 | |
| 188 | static inline pte_t pte_mkold(pte_t pte) |
| 189 | { |
| 190 | return pte_clear_flags(pte, _PAGE_ACCESSED); |
| 191 | } |
| 192 | |
| 193 | static inline pte_t pte_wrprotect(pte_t pte) |
| 194 | { |
| 195 | return pte_clear_flags(pte, _PAGE_RW); |
| 196 | } |
| 197 | |
| 198 | static inline pte_t pte_mkexec(pte_t pte) |
| 199 | { |
| 200 | return pte_clear_flags(pte, _PAGE_NX); |
| 201 | } |
| 202 | |
| 203 | static inline pte_t pte_mkdirty(pte_t pte) |
| 204 | { |
| 205 | return pte_set_flags(pte, _PAGE_DIRTY); |
| 206 | } |
| 207 | |
| 208 | static inline pte_t pte_mkyoung(pte_t pte) |
| 209 | { |
| 210 | return pte_set_flags(pte, _PAGE_ACCESSED); |
| 211 | } |
| 212 | |
| 213 | static inline pte_t pte_mkwrite(pte_t pte) |
| 214 | { |
| 215 | return pte_set_flags(pte, _PAGE_RW); |
| 216 | } |
| 217 | |
| 218 | static inline pte_t pte_mkhuge(pte_t pte) |
| 219 | { |
| 220 | return pte_set_flags(pte, _PAGE_PSE); |
| 221 | } |
| 222 | |
| 223 | static inline pte_t pte_clrhuge(pte_t pte) |
| 224 | { |
| 225 | return pte_clear_flags(pte, _PAGE_PSE); |
| 226 | } |
| 227 | |
| 228 | static inline pte_t pte_mkglobal(pte_t pte) |
| 229 | { |
| 230 | return pte_set_flags(pte, _PAGE_GLOBAL); |
| 231 | } |
| 232 | |
| 233 | static inline pte_t pte_clrglobal(pte_t pte) |
| 234 | { |
| 235 | return pte_clear_flags(pte, _PAGE_GLOBAL); |
| 236 | } |
| 237 | |
| 238 | static inline pte_t pte_mkspecial(pte_t pte) |
| 239 | { |
| 240 | return pte_set_flags(pte, _PAGE_SPECIAL); |
| 241 | } |
| 242 | |
| 243 | static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set) |
| 244 | { |
| 245 | pmdval_t v = native_pmd_val(pmd); |
| 246 | |
| 247 | return __pmd(v | set); |
| 248 | } |
| 249 | |
| 250 | static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear) |
| 251 | { |
| 252 | pmdval_t v = native_pmd_val(pmd); |
| 253 | |
| 254 | return __pmd(v & ~clear); |
| 255 | } |
| 256 | |
| 257 | static inline pmd_t pmd_mkold(pmd_t pmd) |
| 258 | { |
| 259 | return pmd_clear_flags(pmd, _PAGE_ACCESSED); |
| 260 | } |
| 261 | |
| 262 | static inline pmd_t pmd_wrprotect(pmd_t pmd) |
| 263 | { |
| 264 | return pmd_clear_flags(pmd, _PAGE_RW); |
| 265 | } |
| 266 | |
| 267 | static inline pmd_t pmd_mkdirty(pmd_t pmd) |
| 268 | { |
| 269 | return pmd_set_flags(pmd, _PAGE_DIRTY); |
| 270 | } |
| 271 | |
| 272 | static inline pmd_t pmd_mkhuge(pmd_t pmd) |
| 273 | { |
| 274 | return pmd_set_flags(pmd, _PAGE_PSE); |
| 275 | } |
| 276 | |
| 277 | static inline pmd_t pmd_mkyoung(pmd_t pmd) |
| 278 | { |
| 279 | return pmd_set_flags(pmd, _PAGE_ACCESSED); |
| 280 | } |
| 281 | |
| 282 | static inline pmd_t pmd_mkwrite(pmd_t pmd) |
| 283 | { |
| 284 | return pmd_set_flags(pmd, _PAGE_RW); |
| 285 | } |
| 286 | |
| 287 | static inline pmd_t pmd_mknotpresent(pmd_t pmd) |
| 288 | { |
| 289 | return pmd_clear_flags(pmd, _PAGE_PRESENT); |
| 290 | } |
| 291 | |
| 292 | /* |
| 293 | * Mask out unsupported bits in a present pgprot. Non-present pgprots |
| 294 | * can use those bits for other purposes, so leave them be. |
| 295 | */ |
| 296 | static inline pgprotval_t massage_pgprot(pgprot_t pgprot) |
| 297 | { |
| 298 | pgprotval_t protval = pgprot_val(pgprot); |
| 299 | |
| 300 | if (protval & _PAGE_PRESENT) |
| 301 | protval &= __supported_pte_mask; |
| 302 | |
| 303 | return protval; |
| 304 | } |
| 305 | |
| 306 | static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) |
| 307 | { |
| 308 | return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) | |
| 309 | massage_pgprot(pgprot)); |
| 310 | } |
| 311 | |
| 312 | static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) |
| 313 | { |
| 314 | return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) | |
| 315 | massage_pgprot(pgprot)); |
| 316 | } |
| 317 | |
| 318 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
| 319 | { |
| 320 | pteval_t val = pte_val(pte); |
| 321 | |
| 322 | /* |
| 323 | * Chop off the NX bit (if present), and add the NX portion of |
| 324 | * the newprot (if present): |
| 325 | */ |
| 326 | val &= _PAGE_CHG_MASK; |
| 327 | val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK; |
| 328 | |
| 329 | return __pte(val); |
| 330 | } |
| 331 | |
| 332 | static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) |
| 333 | { |
| 334 | pmdval_t val = pmd_val(pmd); |
| 335 | |
| 336 | val &= _HPAGE_CHG_MASK; |
| 337 | val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK; |
| 338 | |
| 339 | return __pmd(val); |
| 340 | } |
| 341 | |
| 342 | /* mprotect needs to preserve PAT bits when updating vm_page_prot */ |
| 343 | #define pgprot_modify pgprot_modify |
| 344 | static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) |
| 345 | { |
| 346 | pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK; |
| 347 | pgprotval_t addbits = pgprot_val(newprot); |
| 348 | return __pgprot(preservebits | addbits); |
| 349 | } |
| 350 | |
| 351 | #define pte_pgprot(x) __pgprot(pte_flags(x) & PTE_FLAGS_MASK) |
| 352 | |
| 353 | #define canon_pgprot(p) __pgprot(massage_pgprot(p)) |
| 354 | |
| 355 | static inline int is_new_memtype_allowed(u64 paddr, unsigned long size, |
| 356 | unsigned long flags, |
| 357 | unsigned long new_flags) |
| 358 | { |
| 359 | /* |
| 360 | * PAT type is always WB for untracked ranges, so no need to check. |
| 361 | */ |
| 362 | if (x86_platform.is_untracked_pat_range(paddr, paddr + size)) |
| 363 | return 1; |
| 364 | |
| 365 | /* |
| 366 | * Certain new memtypes are not allowed with certain |
| 367 | * requested memtype: |
| 368 | * - request is uncached, return cannot be write-back |
| 369 | * - request is write-combine, return cannot be write-back |
| 370 | */ |
| 371 | if ((flags == _PAGE_CACHE_UC_MINUS && |
| 372 | new_flags == _PAGE_CACHE_WB) || |
| 373 | (flags == _PAGE_CACHE_WC && |
| 374 | new_flags == _PAGE_CACHE_WB)) { |
| 375 | return 0; |
| 376 | } |
| 377 | |
| 378 | return 1; |
| 379 | } |
| 380 | |
| 381 | pmd_t *populate_extra_pmd(unsigned long vaddr); |
| 382 | pte_t *populate_extra_pte(unsigned long vaddr); |
| 383 | #endif /* __ASSEMBLY__ */ |
| 384 | |
| 385 | #ifdef CONFIG_X86_32 |
| 386 | # include <asm/pgtable_32.h> |
| 387 | #else |
| 388 | # include <asm/pgtable_64.h> |
| 389 | #endif |
| 390 | |
| 391 | #ifndef __ASSEMBLY__ |
| 392 | #include <linux/mm_types.h> |
| 393 | |
| 394 | static inline int pte_none(pte_t pte) |
| 395 | { |
| 396 | return !pte.pte; |
| 397 | } |
| 398 | |
| 399 | #define __HAVE_ARCH_PTE_SAME |
| 400 | static inline int pte_same(pte_t a, pte_t b) |
| 401 | { |
| 402 | return a.pte == b.pte; |
| 403 | } |
| 404 | |
| 405 | static inline int pte_present(pte_t a) |
| 406 | { |
| 407 | return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE | |
| 408 | _PAGE_NUMA); |
| 409 | } |
| 410 | |
| 411 | #define pte_accessible pte_accessible |
| 412 | static inline int pte_accessible(pte_t a) |
| 413 | { |
| 414 | return pte_flags(a) & _PAGE_PRESENT; |
| 415 | } |
| 416 | |
| 417 | static inline int pte_hidden(pte_t pte) |
| 418 | { |
| 419 | return pte_flags(pte) & _PAGE_HIDDEN; |
| 420 | } |
| 421 | |
| 422 | static inline int pmd_present(pmd_t pmd) |
| 423 | { |
| 424 | /* |
| 425 | * Checking for _PAGE_PSE is needed too because |
| 426 | * split_huge_page will temporarily clear the present bit (but |
| 427 | * the _PAGE_PSE flag will remain set at all times while the |
| 428 | * _PAGE_PRESENT bit is clear). |
| 429 | */ |
| 430 | return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE | |
| 431 | _PAGE_NUMA); |
| 432 | } |
| 433 | |
| 434 | static inline int pmd_none(pmd_t pmd) |
| 435 | { |
| 436 | /* Only check low word on 32-bit platforms, since it might be |
| 437 | out of sync with upper half. */ |
| 438 | return (unsigned long)native_pmd_val(pmd) == 0; |
| 439 | } |
| 440 | |
| 441 | static inline unsigned long pmd_page_vaddr(pmd_t pmd) |
| 442 | { |
| 443 | return (unsigned long)__va(pmd_val(pmd) & PTE_PFN_MASK); |
| 444 | } |
| 445 | |
| 446 | /* |
| 447 | * Currently stuck as a macro due to indirect forward reference to |
| 448 | * linux/mmzone.h's __section_mem_map_addr() definition: |
| 449 | */ |
| 450 | #define pmd_page(pmd) pfn_to_page((pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT) |
| 451 | |
| 452 | /* |
| 453 | * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD] |
| 454 | * |
| 455 | * this macro returns the index of the entry in the pmd page which would |
| 456 | * control the given virtual address |
| 457 | */ |
| 458 | static inline unsigned long pmd_index(unsigned long address) |
| 459 | { |
| 460 | return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1); |
| 461 | } |
| 462 | |
| 463 | /* |
| 464 | * Conversion functions: convert a page and protection to a page entry, |
| 465 | * and a page entry and page directory to the page they refer to. |
| 466 | * |
| 467 | * (Currently stuck as a macro because of indirect forward reference |
| 468 | * to linux/mm.h:page_to_nid()) |
| 469 | */ |
| 470 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) |
| 471 | |
| 472 | /* |
| 473 | * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE] |
| 474 | * |
| 475 | * this function returns the index of the entry in the pte page which would |
| 476 | * control the given virtual address |
| 477 | */ |
| 478 | static inline unsigned long pte_index(unsigned long address) |
| 479 | { |
| 480 | return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); |
| 481 | } |
| 482 | |
| 483 | static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address) |
| 484 | { |
| 485 | return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address); |
| 486 | } |
| 487 | |
| 488 | static inline int pmd_bad(pmd_t pmd) |
| 489 | { |
| 490 | #ifdef CONFIG_NUMA_BALANCING |
| 491 | /* pmd_numa check */ |
| 492 | if ((pmd_flags(pmd) & (_PAGE_NUMA|_PAGE_PRESENT)) == _PAGE_NUMA) |
| 493 | return 0; |
| 494 | #endif |
| 495 | return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE; |
| 496 | } |
| 497 | |
| 498 | static inline unsigned long pages_to_mb(unsigned long npg) |
| 499 | { |
| 500 | return npg >> (20 - PAGE_SHIFT); |
| 501 | } |
| 502 | |
| 503 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ |
| 504 | remap_pfn_range(vma, vaddr, pfn, size, prot) |
| 505 | |
| 506 | #if PAGETABLE_LEVELS > 2 |
| 507 | static inline int pud_none(pud_t pud) |
| 508 | { |
| 509 | return native_pud_val(pud) == 0; |
| 510 | } |
| 511 | |
| 512 | static inline int pud_present(pud_t pud) |
| 513 | { |
| 514 | return pud_flags(pud) & _PAGE_PRESENT; |
| 515 | } |
| 516 | |
| 517 | static inline unsigned long pud_page_vaddr(pud_t pud) |
| 518 | { |
| 519 | return (unsigned long)__va((unsigned long)pud_val(pud) & PTE_PFN_MASK); |
| 520 | } |
| 521 | |
| 522 | /* |
| 523 | * Currently stuck as a macro due to indirect forward reference to |
| 524 | * linux/mmzone.h's __section_mem_map_addr() definition: |
| 525 | */ |
| 526 | #define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT) |
| 527 | |
| 528 | /* Find an entry in the second-level page table.. */ |
| 529 | static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) |
| 530 | { |
| 531 | return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address); |
| 532 | } |
| 533 | |
| 534 | static inline int pud_large(pud_t pud) |
| 535 | { |
| 536 | return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) == |
| 537 | (_PAGE_PSE | _PAGE_PRESENT); |
| 538 | } |
| 539 | |
| 540 | static inline int pud_bad(pud_t pud) |
| 541 | { |
| 542 | return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0; |
| 543 | } |
| 544 | #else |
| 545 | static inline int pud_large(pud_t pud) |
| 546 | { |
| 547 | return 0; |
| 548 | } |
| 549 | #endif /* PAGETABLE_LEVELS > 2 */ |
| 550 | |
| 551 | #if PAGETABLE_LEVELS > 3 |
| 552 | static inline int pgd_present(pgd_t pgd) |
| 553 | { |
| 554 | return pgd_flags(pgd) & _PAGE_PRESENT; |
| 555 | } |
| 556 | |
| 557 | static inline unsigned long pgd_page_vaddr(pgd_t pgd) |
| 558 | { |
| 559 | return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK); |
| 560 | } |
| 561 | |
| 562 | /* |
| 563 | * Currently stuck as a macro due to indirect forward reference to |
| 564 | * linux/mmzone.h's __section_mem_map_addr() definition: |
| 565 | */ |
| 566 | #define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT) |
| 567 | |
| 568 | /* to find an entry in a page-table-directory. */ |
| 569 | static inline unsigned long pud_index(unsigned long address) |
| 570 | { |
| 571 | return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1); |
| 572 | } |
| 573 | |
| 574 | static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address) |
| 575 | { |
| 576 | return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address); |
| 577 | } |
| 578 | |
| 579 | static inline int pgd_bad(pgd_t pgd) |
| 580 | { |
| 581 | return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE; |
| 582 | } |
| 583 | |
| 584 | static inline int pgd_none(pgd_t pgd) |
| 585 | { |
| 586 | return !native_pgd_val(pgd); |
| 587 | } |
| 588 | #endif /* PAGETABLE_LEVELS > 3 */ |
| 589 | |
| 590 | #endif /* __ASSEMBLY__ */ |
| 591 | |
| 592 | /* |
| 593 | * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD] |
| 594 | * |
| 595 | * this macro returns the index of the entry in the pgd page which would |
| 596 | * control the given virtual address |
| 597 | */ |
| 598 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) |
| 599 | |
| 600 | /* |
| 601 | * pgd_offset() returns a (pgd_t *) |
| 602 | * pgd_index() is used get the offset into the pgd page's array of pgd_t's; |
| 603 | */ |
| 604 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address))) |
| 605 | /* |
| 606 | * a shortcut which implies the use of the kernel's pgd, instead |
| 607 | * of a process's |
| 608 | */ |
| 609 | #define pgd_offset_k(address) pgd_offset(&init_mm, (address)) |
| 610 | |
| 611 | |
| 612 | #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET) |
| 613 | #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY) |
| 614 | |
| 615 | #ifndef __ASSEMBLY__ |
| 616 | |
| 617 | extern int direct_gbpages; |
| 618 | |
| 619 | /* local pte updates need not use xchg for locking */ |
| 620 | static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep) |
| 621 | { |
| 622 | pte_t res = *ptep; |
| 623 | |
| 624 | /* Pure native function needs no input for mm, addr */ |
| 625 | native_pte_clear(NULL, 0, ptep); |
| 626 | return res; |
| 627 | } |
| 628 | |
| 629 | static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp) |
| 630 | { |
| 631 | pmd_t res = *pmdp; |
| 632 | |
| 633 | native_pmd_clear(pmdp); |
| 634 | return res; |
| 635 | } |
| 636 | |
| 637 | static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr, |
| 638 | pte_t *ptep , pte_t pte) |
| 639 | { |
| 640 | native_set_pte(ptep, pte); |
| 641 | } |
| 642 | |
| 643 | static inline void native_set_pmd_at(struct mm_struct *mm, unsigned long addr, |
| 644 | pmd_t *pmdp , pmd_t pmd) |
| 645 | { |
| 646 | native_set_pmd(pmdp, pmd); |
| 647 | } |
| 648 | |
| 649 | #ifndef CONFIG_PARAVIRT |
| 650 | /* |
| 651 | * Rules for using pte_update - it must be called after any PTE update which |
| 652 | * has not been done using the set_pte / clear_pte interfaces. It is used by |
| 653 | * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE |
| 654 | * updates should either be sets, clears, or set_pte_atomic for P->P |
| 655 | * transitions, which means this hook should only be called for user PTEs. |
| 656 | * This hook implies a P->P protection or access change has taken place, which |
| 657 | * requires a subsequent TLB flush. The notification can optionally be delayed |
| 658 | * until the TLB flush event by using the pte_update_defer form of the |
| 659 | * interface, but care must be taken to assure that the flush happens while |
| 660 | * still holding the same page table lock so that the shadow and primary pages |
| 661 | * do not become out of sync on SMP. |
| 662 | */ |
| 663 | #define pte_update(mm, addr, ptep) do { } while (0) |
| 664 | #define pte_update_defer(mm, addr, ptep) do { } while (0) |
| 665 | #endif |
| 666 | |
| 667 | /* |
| 668 | * We only update the dirty/accessed state if we set |
| 669 | * the dirty bit by hand in the kernel, since the hardware |
| 670 | * will do the accessed bit for us, and we don't want to |
| 671 | * race with other CPU's that might be updating the dirty |
| 672 | * bit at the same time. |
| 673 | */ |
| 674 | struct vm_area_struct; |
| 675 | |
| 676 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS |
| 677 | extern int ptep_set_access_flags(struct vm_area_struct *vma, |
| 678 | unsigned long address, pte_t *ptep, |
| 679 | pte_t entry, int dirty); |
| 680 | |
| 681 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG |
| 682 | extern int ptep_test_and_clear_young(struct vm_area_struct *vma, |
| 683 | unsigned long addr, pte_t *ptep); |
| 684 | |
| 685 | #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH |
| 686 | extern int ptep_clear_flush_young(struct vm_area_struct *vma, |
| 687 | unsigned long address, pte_t *ptep); |
| 688 | |
| 689 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR |
| 690 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, |
| 691 | pte_t *ptep) |
| 692 | { |
| 693 | pte_t pte = native_ptep_get_and_clear(ptep); |
| 694 | pte_update(mm, addr, ptep); |
| 695 | return pte; |
| 696 | } |
| 697 | |
| 698 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL |
| 699 | static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, |
| 700 | unsigned long addr, pte_t *ptep, |
| 701 | int full) |
| 702 | { |
| 703 | pte_t pte; |
| 704 | if (full) { |
| 705 | /* |
| 706 | * Full address destruction in progress; paravirt does not |
| 707 | * care about updates and native needs no locking |
| 708 | */ |
| 709 | pte = native_local_ptep_get_and_clear(ptep); |
| 710 | } else { |
| 711 | pte = ptep_get_and_clear(mm, addr, ptep); |
| 712 | } |
| 713 | return pte; |
| 714 | } |
| 715 | |
| 716 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT |
| 717 | static inline void ptep_set_wrprotect(struct mm_struct *mm, |
| 718 | unsigned long addr, pte_t *ptep) |
| 719 | { |
| 720 | clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte); |
| 721 | pte_update(mm, addr, ptep); |
| 722 | } |
| 723 | |
| 724 | #define flush_tlb_fix_spurious_fault(vma, address) do { } while (0) |
| 725 | |
| 726 | #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot)) |
| 727 | |
| 728 | #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS |
| 729 | extern int pmdp_set_access_flags(struct vm_area_struct *vma, |
| 730 | unsigned long address, pmd_t *pmdp, |
| 731 | pmd_t entry, int dirty); |
| 732 | |
| 733 | #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG |
| 734 | extern int pmdp_test_and_clear_young(struct vm_area_struct *vma, |
| 735 | unsigned long addr, pmd_t *pmdp); |
| 736 | |
| 737 | #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH |
| 738 | extern int pmdp_clear_flush_young(struct vm_area_struct *vma, |
| 739 | unsigned long address, pmd_t *pmdp); |
| 740 | |
| 741 | |
| 742 | #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH |
| 743 | extern void pmdp_splitting_flush(struct vm_area_struct *vma, |
| 744 | unsigned long addr, pmd_t *pmdp); |
| 745 | |
| 746 | #define __HAVE_ARCH_PMD_WRITE |
| 747 | static inline int pmd_write(pmd_t pmd) |
| 748 | { |
| 749 | return pmd_flags(pmd) & _PAGE_RW; |
| 750 | } |
| 751 | |
| 752 | #define __HAVE_ARCH_PMDP_GET_AND_CLEAR |
| 753 | static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, unsigned long addr, |
| 754 | pmd_t *pmdp) |
| 755 | { |
| 756 | pmd_t pmd = native_pmdp_get_and_clear(pmdp); |
| 757 | pmd_update(mm, addr, pmdp); |
| 758 | return pmd; |
| 759 | } |
| 760 | |
| 761 | #define __HAVE_ARCH_PMDP_SET_WRPROTECT |
| 762 | static inline void pmdp_set_wrprotect(struct mm_struct *mm, |
| 763 | unsigned long addr, pmd_t *pmdp) |
| 764 | { |
| 765 | clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp); |
| 766 | pmd_update(mm, addr, pmdp); |
| 767 | } |
| 768 | |
| 769 | /* |
| 770 | * clone_pgd_range(pgd_t *dst, pgd_t *src, int count); |
| 771 | * |
| 772 | * dst - pointer to pgd range anwhere on a pgd page |
| 773 | * src - "" |
| 774 | * count - the number of pgds to copy. |
| 775 | * |
| 776 | * dst and src can be on the same page, but the range must not overlap, |
| 777 | * and must not cross a page boundary. |
| 778 | */ |
| 779 | static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) |
| 780 | { |
| 781 | memcpy(dst, src, count * sizeof(pgd_t)); |
| 782 | } |
| 783 | |
| 784 | |
| 785 | #include <asm-generic/pgtable.h> |
| 786 | #endif /* __ASSEMBLY__ */ |
| 787 | |
| 788 | #endif /* _ASM_X86_PGTABLE_H */ |