mm: save soft-dirty bits on swapped pages
[deliverable/linux.git] / arch / x86 / include / asm / pgtable.h
1 #ifndef _ASM_X86_PGTABLE_H
2 #define _ASM_X86_PGTABLE_H
3
4 #include <asm/page.h>
5 #include <asm/e820.h>
6
7 #include <asm/pgtable_types.h>
8
9 /*
10 * Macro to mark a page protection value as UC-
11 */
12 #define pgprot_noncached(prot) \
13 ((boot_cpu_data.x86 > 3) \
14 ? (__pgprot(pgprot_val(prot) | _PAGE_CACHE_UC_MINUS)) \
15 : (prot))
16
17 #ifndef __ASSEMBLY__
18
19 #include <asm/x86_init.h>
20
21 /*
22 * ZERO_PAGE is a global shared page that is always zero: used
23 * for zero-mapped memory areas etc..
24 */
25 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
26 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
27
28 extern spinlock_t pgd_lock;
29 extern struct list_head pgd_list;
30
31 extern struct mm_struct *pgd_page_get_mm(struct page *page);
32
33 #ifdef CONFIG_PARAVIRT
34 #include <asm/paravirt.h>
35 #else /* !CONFIG_PARAVIRT */
36 #define set_pte(ptep, pte) native_set_pte(ptep, pte)
37 #define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
38 #define set_pmd_at(mm, addr, pmdp, pmd) native_set_pmd_at(mm, addr, pmdp, pmd)
39
40 #define set_pte_atomic(ptep, pte) \
41 native_set_pte_atomic(ptep, pte)
42
43 #define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
44
45 #ifndef __PAGETABLE_PUD_FOLDED
46 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
47 #define pgd_clear(pgd) native_pgd_clear(pgd)
48 #endif
49
50 #ifndef set_pud
51 # define set_pud(pudp, pud) native_set_pud(pudp, pud)
52 #endif
53
54 #ifndef __PAGETABLE_PMD_FOLDED
55 #define pud_clear(pud) native_pud_clear(pud)
56 #endif
57
58 #define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
59 #define pmd_clear(pmd) native_pmd_clear(pmd)
60
61 #define pte_update(mm, addr, ptep) do { } while (0)
62 #define pte_update_defer(mm, addr, ptep) do { } while (0)
63 #define pmd_update(mm, addr, ptep) do { } while (0)
64 #define pmd_update_defer(mm, addr, ptep) do { } while (0)
65
66 #define pgd_val(x) native_pgd_val(x)
67 #define __pgd(x) native_make_pgd(x)
68
69 #ifndef __PAGETABLE_PUD_FOLDED
70 #define pud_val(x) native_pud_val(x)
71 #define __pud(x) native_make_pud(x)
72 #endif
73
74 #ifndef __PAGETABLE_PMD_FOLDED
75 #define pmd_val(x) native_pmd_val(x)
76 #define __pmd(x) native_make_pmd(x)
77 #endif
78
79 #define pte_val(x) native_pte_val(x)
80 #define __pte(x) native_make_pte(x)
81
82 #define arch_end_context_switch(prev) do {} while(0)
83
84 #endif /* CONFIG_PARAVIRT */
85
86 /*
87 * The following only work if pte_present() is true.
88 * Undefined behaviour if not..
89 */
90 static inline int pte_dirty(pte_t pte)
91 {
92 return pte_flags(pte) & _PAGE_DIRTY;
93 }
94
95 static inline int pte_young(pte_t pte)
96 {
97 return pte_flags(pte) & _PAGE_ACCESSED;
98 }
99
100 static inline int pmd_young(pmd_t pmd)
101 {
102 return pmd_flags(pmd) & _PAGE_ACCESSED;
103 }
104
105 static inline int pte_write(pte_t pte)
106 {
107 return pte_flags(pte) & _PAGE_RW;
108 }
109
110 static inline int pte_file(pte_t pte)
111 {
112 return pte_flags(pte) & _PAGE_FILE;
113 }
114
115 static inline int pte_huge(pte_t pte)
116 {
117 return pte_flags(pte) & _PAGE_PSE;
118 }
119
120 static inline int pte_global(pte_t pte)
121 {
122 return pte_flags(pte) & _PAGE_GLOBAL;
123 }
124
125 static inline int pte_exec(pte_t pte)
126 {
127 return !(pte_flags(pte) & _PAGE_NX);
128 }
129
130 static inline int pte_special(pte_t pte)
131 {
132 return pte_flags(pte) & _PAGE_SPECIAL;
133 }
134
135 static inline unsigned long pte_pfn(pte_t pte)
136 {
137 return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
138 }
139
140 static inline unsigned long pmd_pfn(pmd_t pmd)
141 {
142 return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT;
143 }
144
145 static inline unsigned long pud_pfn(pud_t pud)
146 {
147 return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
148 }
149
150 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
151
152 static inline int pmd_large(pmd_t pte)
153 {
154 return pmd_flags(pte) & _PAGE_PSE;
155 }
156
157 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
158 static inline int pmd_trans_splitting(pmd_t pmd)
159 {
160 return pmd_val(pmd) & _PAGE_SPLITTING;
161 }
162
163 static inline int pmd_trans_huge(pmd_t pmd)
164 {
165 return pmd_val(pmd) & _PAGE_PSE;
166 }
167
168 static inline int has_transparent_hugepage(void)
169 {
170 return cpu_has_pse;
171 }
172 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
173
174 static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
175 {
176 pteval_t v = native_pte_val(pte);
177
178 return native_make_pte(v | set);
179 }
180
181 static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
182 {
183 pteval_t v = native_pte_val(pte);
184
185 return native_make_pte(v & ~clear);
186 }
187
188 static inline pte_t pte_mkclean(pte_t pte)
189 {
190 return pte_clear_flags(pte, _PAGE_DIRTY);
191 }
192
193 static inline pte_t pte_mkold(pte_t pte)
194 {
195 return pte_clear_flags(pte, _PAGE_ACCESSED);
196 }
197
198 static inline pte_t pte_wrprotect(pte_t pte)
199 {
200 return pte_clear_flags(pte, _PAGE_RW);
201 }
202
203 static inline pte_t pte_mkexec(pte_t pte)
204 {
205 return pte_clear_flags(pte, _PAGE_NX);
206 }
207
208 static inline pte_t pte_mkdirty(pte_t pte)
209 {
210 return pte_set_flags(pte, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
211 }
212
213 static inline pte_t pte_mkyoung(pte_t pte)
214 {
215 return pte_set_flags(pte, _PAGE_ACCESSED);
216 }
217
218 static inline pte_t pte_mkwrite(pte_t pte)
219 {
220 return pte_set_flags(pte, _PAGE_RW);
221 }
222
223 static inline pte_t pte_mkhuge(pte_t pte)
224 {
225 return pte_set_flags(pte, _PAGE_PSE);
226 }
227
228 static inline pte_t pte_clrhuge(pte_t pte)
229 {
230 return pte_clear_flags(pte, _PAGE_PSE);
231 }
232
233 static inline pte_t pte_mkglobal(pte_t pte)
234 {
235 return pte_set_flags(pte, _PAGE_GLOBAL);
236 }
237
238 static inline pte_t pte_clrglobal(pte_t pte)
239 {
240 return pte_clear_flags(pte, _PAGE_GLOBAL);
241 }
242
243 static inline pte_t pte_mkspecial(pte_t pte)
244 {
245 return pte_set_flags(pte, _PAGE_SPECIAL);
246 }
247
248 static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
249 {
250 pmdval_t v = native_pmd_val(pmd);
251
252 return __pmd(v | set);
253 }
254
255 static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
256 {
257 pmdval_t v = native_pmd_val(pmd);
258
259 return __pmd(v & ~clear);
260 }
261
262 static inline pmd_t pmd_mkold(pmd_t pmd)
263 {
264 return pmd_clear_flags(pmd, _PAGE_ACCESSED);
265 }
266
267 static inline pmd_t pmd_wrprotect(pmd_t pmd)
268 {
269 return pmd_clear_flags(pmd, _PAGE_RW);
270 }
271
272 static inline pmd_t pmd_mkdirty(pmd_t pmd)
273 {
274 return pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
275 }
276
277 static inline pmd_t pmd_mkhuge(pmd_t pmd)
278 {
279 return pmd_set_flags(pmd, _PAGE_PSE);
280 }
281
282 static inline pmd_t pmd_mkyoung(pmd_t pmd)
283 {
284 return pmd_set_flags(pmd, _PAGE_ACCESSED);
285 }
286
287 static inline pmd_t pmd_mkwrite(pmd_t pmd)
288 {
289 return pmd_set_flags(pmd, _PAGE_RW);
290 }
291
292 static inline pmd_t pmd_mknotpresent(pmd_t pmd)
293 {
294 return pmd_clear_flags(pmd, _PAGE_PRESENT);
295 }
296
297 static inline int pte_soft_dirty(pte_t pte)
298 {
299 return pte_flags(pte) & _PAGE_SOFT_DIRTY;
300 }
301
302 static inline int pmd_soft_dirty(pmd_t pmd)
303 {
304 return pmd_flags(pmd) & _PAGE_SOFT_DIRTY;
305 }
306
307 static inline pte_t pte_mksoft_dirty(pte_t pte)
308 {
309 return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
310 }
311
312 static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
313 {
314 return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
315 }
316
317 static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
318 {
319 return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY);
320 }
321
322 static inline int pte_swp_soft_dirty(pte_t pte)
323 {
324 return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY;
325 }
326
327 static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
328 {
329 return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
330 }
331
332 /*
333 * Mask out unsupported bits in a present pgprot. Non-present pgprots
334 * can use those bits for other purposes, so leave them be.
335 */
336 static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
337 {
338 pgprotval_t protval = pgprot_val(pgprot);
339
340 if (protval & _PAGE_PRESENT)
341 protval &= __supported_pte_mask;
342
343 return protval;
344 }
345
346 static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
347 {
348 return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) |
349 massage_pgprot(pgprot));
350 }
351
352 static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
353 {
354 return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) |
355 massage_pgprot(pgprot));
356 }
357
358 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
359 {
360 pteval_t val = pte_val(pte);
361
362 /*
363 * Chop off the NX bit (if present), and add the NX portion of
364 * the newprot (if present):
365 */
366 val &= _PAGE_CHG_MASK;
367 val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK;
368
369 return __pte(val);
370 }
371
372 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
373 {
374 pmdval_t val = pmd_val(pmd);
375
376 val &= _HPAGE_CHG_MASK;
377 val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK;
378
379 return __pmd(val);
380 }
381
382 /* mprotect needs to preserve PAT bits when updating vm_page_prot */
383 #define pgprot_modify pgprot_modify
384 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
385 {
386 pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
387 pgprotval_t addbits = pgprot_val(newprot);
388 return __pgprot(preservebits | addbits);
389 }
390
391 #define pte_pgprot(x) __pgprot(pte_flags(x) & PTE_FLAGS_MASK)
392
393 #define canon_pgprot(p) __pgprot(massage_pgprot(p))
394
395 static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
396 unsigned long flags,
397 unsigned long new_flags)
398 {
399 /*
400 * PAT type is always WB for untracked ranges, so no need to check.
401 */
402 if (x86_platform.is_untracked_pat_range(paddr, paddr + size))
403 return 1;
404
405 /*
406 * Certain new memtypes are not allowed with certain
407 * requested memtype:
408 * - request is uncached, return cannot be write-back
409 * - request is write-combine, return cannot be write-back
410 */
411 if ((flags == _PAGE_CACHE_UC_MINUS &&
412 new_flags == _PAGE_CACHE_WB) ||
413 (flags == _PAGE_CACHE_WC &&
414 new_flags == _PAGE_CACHE_WB)) {
415 return 0;
416 }
417
418 return 1;
419 }
420
421 pmd_t *populate_extra_pmd(unsigned long vaddr);
422 pte_t *populate_extra_pte(unsigned long vaddr);
423 #endif /* __ASSEMBLY__ */
424
425 #ifdef CONFIG_X86_32
426 # include <asm/pgtable_32.h>
427 #else
428 # include <asm/pgtable_64.h>
429 #endif
430
431 #ifndef __ASSEMBLY__
432 #include <linux/mm_types.h>
433 #include <linux/log2.h>
434
435 static inline int pte_none(pte_t pte)
436 {
437 return !pte.pte;
438 }
439
440 #define __HAVE_ARCH_PTE_SAME
441 static inline int pte_same(pte_t a, pte_t b)
442 {
443 return a.pte == b.pte;
444 }
445
446 static inline int pte_present(pte_t a)
447 {
448 return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE |
449 _PAGE_NUMA);
450 }
451
452 #define pte_accessible pte_accessible
453 static inline int pte_accessible(pte_t a)
454 {
455 return pte_flags(a) & _PAGE_PRESENT;
456 }
457
458 static inline int pte_hidden(pte_t pte)
459 {
460 return pte_flags(pte) & _PAGE_HIDDEN;
461 }
462
463 static inline int pmd_present(pmd_t pmd)
464 {
465 /*
466 * Checking for _PAGE_PSE is needed too because
467 * split_huge_page will temporarily clear the present bit (but
468 * the _PAGE_PSE flag will remain set at all times while the
469 * _PAGE_PRESENT bit is clear).
470 */
471 return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE |
472 _PAGE_NUMA);
473 }
474
475 static inline int pmd_none(pmd_t pmd)
476 {
477 /* Only check low word on 32-bit platforms, since it might be
478 out of sync with upper half. */
479 return (unsigned long)native_pmd_val(pmd) == 0;
480 }
481
482 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
483 {
484 return (unsigned long)__va(pmd_val(pmd) & PTE_PFN_MASK);
485 }
486
487 /*
488 * Currently stuck as a macro due to indirect forward reference to
489 * linux/mmzone.h's __section_mem_map_addr() definition:
490 */
491 #define pmd_page(pmd) pfn_to_page((pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT)
492
493 /*
494 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
495 *
496 * this macro returns the index of the entry in the pmd page which would
497 * control the given virtual address
498 */
499 static inline unsigned long pmd_index(unsigned long address)
500 {
501 return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
502 }
503
504 /*
505 * Conversion functions: convert a page and protection to a page entry,
506 * and a page entry and page directory to the page they refer to.
507 *
508 * (Currently stuck as a macro because of indirect forward reference
509 * to linux/mm.h:page_to_nid())
510 */
511 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
512
513 /*
514 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
515 *
516 * this function returns the index of the entry in the pte page which would
517 * control the given virtual address
518 */
519 static inline unsigned long pte_index(unsigned long address)
520 {
521 return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
522 }
523
524 static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
525 {
526 return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
527 }
528
529 static inline int pmd_bad(pmd_t pmd)
530 {
531 #ifdef CONFIG_NUMA_BALANCING
532 /* pmd_numa check */
533 if ((pmd_flags(pmd) & (_PAGE_NUMA|_PAGE_PRESENT)) == _PAGE_NUMA)
534 return 0;
535 #endif
536 return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
537 }
538
539 static inline unsigned long pages_to_mb(unsigned long npg)
540 {
541 return npg >> (20 - PAGE_SHIFT);
542 }
543
544 #if PAGETABLE_LEVELS > 2
545 static inline int pud_none(pud_t pud)
546 {
547 return native_pud_val(pud) == 0;
548 }
549
550 static inline int pud_present(pud_t pud)
551 {
552 return pud_flags(pud) & _PAGE_PRESENT;
553 }
554
555 static inline unsigned long pud_page_vaddr(pud_t pud)
556 {
557 return (unsigned long)__va((unsigned long)pud_val(pud) & PTE_PFN_MASK);
558 }
559
560 /*
561 * Currently stuck as a macro due to indirect forward reference to
562 * linux/mmzone.h's __section_mem_map_addr() definition:
563 */
564 #define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
565
566 /* Find an entry in the second-level page table.. */
567 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
568 {
569 return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
570 }
571
572 static inline int pud_large(pud_t pud)
573 {
574 return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
575 (_PAGE_PSE | _PAGE_PRESENT);
576 }
577
578 static inline int pud_bad(pud_t pud)
579 {
580 return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
581 }
582 #else
583 static inline int pud_large(pud_t pud)
584 {
585 return 0;
586 }
587 #endif /* PAGETABLE_LEVELS > 2 */
588
589 #if PAGETABLE_LEVELS > 3
590 static inline int pgd_present(pgd_t pgd)
591 {
592 return pgd_flags(pgd) & _PAGE_PRESENT;
593 }
594
595 static inline unsigned long pgd_page_vaddr(pgd_t pgd)
596 {
597 return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
598 }
599
600 /*
601 * Currently stuck as a macro due to indirect forward reference to
602 * linux/mmzone.h's __section_mem_map_addr() definition:
603 */
604 #define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
605
606 /* to find an entry in a page-table-directory. */
607 static inline unsigned long pud_index(unsigned long address)
608 {
609 return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
610 }
611
612 static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
613 {
614 return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address);
615 }
616
617 static inline int pgd_bad(pgd_t pgd)
618 {
619 return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
620 }
621
622 static inline int pgd_none(pgd_t pgd)
623 {
624 return !native_pgd_val(pgd);
625 }
626 #endif /* PAGETABLE_LEVELS > 3 */
627
628 #endif /* __ASSEMBLY__ */
629
630 /*
631 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
632 *
633 * this macro returns the index of the entry in the pgd page which would
634 * control the given virtual address
635 */
636 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
637
638 /*
639 * pgd_offset() returns a (pgd_t *)
640 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
641 */
642 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
643 /*
644 * a shortcut which implies the use of the kernel's pgd, instead
645 * of a process's
646 */
647 #define pgd_offset_k(address) pgd_offset(&init_mm, (address))
648
649
650 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
651 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
652
653 #ifndef __ASSEMBLY__
654
655 extern int direct_gbpages;
656 void init_mem_mapping(void);
657 void early_alloc_pgt_buf(void);
658
659 /* local pte updates need not use xchg for locking */
660 static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
661 {
662 pte_t res = *ptep;
663
664 /* Pure native function needs no input for mm, addr */
665 native_pte_clear(NULL, 0, ptep);
666 return res;
667 }
668
669 static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp)
670 {
671 pmd_t res = *pmdp;
672
673 native_pmd_clear(pmdp);
674 return res;
675 }
676
677 static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
678 pte_t *ptep , pte_t pte)
679 {
680 native_set_pte(ptep, pte);
681 }
682
683 static inline void native_set_pmd_at(struct mm_struct *mm, unsigned long addr,
684 pmd_t *pmdp , pmd_t pmd)
685 {
686 native_set_pmd(pmdp, pmd);
687 }
688
689 #ifndef CONFIG_PARAVIRT
690 /*
691 * Rules for using pte_update - it must be called after any PTE update which
692 * has not been done using the set_pte / clear_pte interfaces. It is used by
693 * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE
694 * updates should either be sets, clears, or set_pte_atomic for P->P
695 * transitions, which means this hook should only be called for user PTEs.
696 * This hook implies a P->P protection or access change has taken place, which
697 * requires a subsequent TLB flush. The notification can optionally be delayed
698 * until the TLB flush event by using the pte_update_defer form of the
699 * interface, but care must be taken to assure that the flush happens while
700 * still holding the same page table lock so that the shadow and primary pages
701 * do not become out of sync on SMP.
702 */
703 #define pte_update(mm, addr, ptep) do { } while (0)
704 #define pte_update_defer(mm, addr, ptep) do { } while (0)
705 #endif
706
707 /*
708 * We only update the dirty/accessed state if we set
709 * the dirty bit by hand in the kernel, since the hardware
710 * will do the accessed bit for us, and we don't want to
711 * race with other CPU's that might be updating the dirty
712 * bit at the same time.
713 */
714 struct vm_area_struct;
715
716 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
717 extern int ptep_set_access_flags(struct vm_area_struct *vma,
718 unsigned long address, pte_t *ptep,
719 pte_t entry, int dirty);
720
721 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
722 extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
723 unsigned long addr, pte_t *ptep);
724
725 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
726 extern int ptep_clear_flush_young(struct vm_area_struct *vma,
727 unsigned long address, pte_t *ptep);
728
729 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
730 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
731 pte_t *ptep)
732 {
733 pte_t pte = native_ptep_get_and_clear(ptep);
734 pte_update(mm, addr, ptep);
735 return pte;
736 }
737
738 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
739 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
740 unsigned long addr, pte_t *ptep,
741 int full)
742 {
743 pte_t pte;
744 if (full) {
745 /*
746 * Full address destruction in progress; paravirt does not
747 * care about updates and native needs no locking
748 */
749 pte = native_local_ptep_get_and_clear(ptep);
750 } else {
751 pte = ptep_get_and_clear(mm, addr, ptep);
752 }
753 return pte;
754 }
755
756 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
757 static inline void ptep_set_wrprotect(struct mm_struct *mm,
758 unsigned long addr, pte_t *ptep)
759 {
760 clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
761 pte_update(mm, addr, ptep);
762 }
763
764 #define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
765
766 #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
767
768 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
769 extern int pmdp_set_access_flags(struct vm_area_struct *vma,
770 unsigned long address, pmd_t *pmdp,
771 pmd_t entry, int dirty);
772
773 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
774 extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
775 unsigned long addr, pmd_t *pmdp);
776
777 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
778 extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
779 unsigned long address, pmd_t *pmdp);
780
781
782 #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
783 extern void pmdp_splitting_flush(struct vm_area_struct *vma,
784 unsigned long addr, pmd_t *pmdp);
785
786 #define __HAVE_ARCH_PMD_WRITE
787 static inline int pmd_write(pmd_t pmd)
788 {
789 return pmd_flags(pmd) & _PAGE_RW;
790 }
791
792 #define __HAVE_ARCH_PMDP_GET_AND_CLEAR
793 static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, unsigned long addr,
794 pmd_t *pmdp)
795 {
796 pmd_t pmd = native_pmdp_get_and_clear(pmdp);
797 pmd_update(mm, addr, pmdp);
798 return pmd;
799 }
800
801 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
802 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
803 unsigned long addr, pmd_t *pmdp)
804 {
805 clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
806 pmd_update(mm, addr, pmdp);
807 }
808
809 /*
810 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
811 *
812 * dst - pointer to pgd range anwhere on a pgd page
813 * src - ""
814 * count - the number of pgds to copy.
815 *
816 * dst and src can be on the same page, but the range must not overlap,
817 * and must not cross a page boundary.
818 */
819 static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
820 {
821 memcpy(dst, src, count * sizeof(pgd_t));
822 }
823
824 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
825 static inline int page_level_shift(enum pg_level level)
826 {
827 return (PAGE_SHIFT - PTE_SHIFT) + level * PTE_SHIFT;
828 }
829 static inline unsigned long page_level_size(enum pg_level level)
830 {
831 return 1UL << page_level_shift(level);
832 }
833 static inline unsigned long page_level_mask(enum pg_level level)
834 {
835 return ~(page_level_size(level) - 1);
836 }
837
838 /*
839 * The x86 doesn't have any external MMU info: the kernel page
840 * tables contain all the necessary information.
841 */
842 static inline void update_mmu_cache(struct vm_area_struct *vma,
843 unsigned long addr, pte_t *ptep)
844 {
845 }
846 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
847 unsigned long addr, pmd_t *pmd)
848 {
849 }
850
851 #include <asm-generic/pgtable.h>
852 #endif /* __ASSEMBLY__ */
853
854 #endif /* _ASM_X86_PGTABLE_H */
This page took 0.068853 seconds and 5 git commands to generate.