Commit | Line | Data |
---|---|---|
53492b1d GS |
1 | /* |
2 | * IBM System z Huge TLB Page Support for Kernel. | |
3 | * | |
4 | * Copyright IBM Corp. 2008 | |
5 | * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com> | |
6 | */ | |
7 | ||
8 | #ifndef _ASM_S390_HUGETLB_H | |
9 | #define _ASM_S390_HUGETLB_H | |
10 | ||
11 | #include <asm/page.h> | |
12 | #include <asm/pgtable.h> | |
13 | ||
14 | ||
15 | #define is_hugepage_only_range(mm, addr, len) 0 | |
16 | #define hugetlb_free_pgd_range free_pgd_range | |
7f9be775 | 17 | #define hugepages_supported() (MACHINE_HAS_HPAGE) |
53492b1d GS |
18 | |
19 | void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, | |
20 | pte_t *ptep, pte_t pte); | |
e5098611 MS |
21 | pte_t huge_ptep_get(pte_t *ptep); |
22 | pte_t huge_ptep_get_and_clear(struct mm_struct *mm, | |
23 | unsigned long addr, pte_t *ptep); | |
53492b1d GS |
24 | |
25 | /* | |
26 | * If the arch doesn't supply something else, assume that hugepage | |
27 | * size aligned regions are ok without further preparation. | |
28 | */ | |
a5516438 AK |
29 | static inline int prepare_hugepage_range(struct file *file, |
30 | unsigned long addr, unsigned long len) | |
53492b1d GS |
31 | { |
32 | if (len & ~HPAGE_MASK) | |
33 | return -EINVAL; | |
34 | if (addr & ~HPAGE_MASK) | |
35 | return -EINVAL; | |
36 | return 0; | |
37 | } | |
38 | ||
5d3a551c | 39 | #define arch_clear_hugepage_flags(page) do { } while (0) |
53492b1d | 40 | |
e5098611 MS |
41 | static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr, |
42 | pte_t *ptep) | |
53492b1d | 43 | { |
e5098611 | 44 | pte_val(*ptep) = _SEGMENT_ENTRY_EMPTY; |
53492b1d GS |
45 | } |
46 | ||
e5098611 MS |
47 | static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, |
48 | unsigned long address, pte_t *ptep) | |
53492b1d | 49 | { |
e5098611 | 50 | huge_ptep_get_and_clear(vma->vm_mm, address, ptep); |
53492b1d GS |
51 | } |
52 | ||
e5098611 MS |
53 | static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, |
54 | unsigned long addr, pte_t *ptep, | |
55 | pte_t pte, int dirty) | |
53492b1d | 56 | { |
e5098611 MS |
57 | int changed = !pte_same(huge_ptep_get(ptep), pte); |
58 | if (changed) { | |
59 | huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); | |
60 | set_huge_pte_at(vma->vm_mm, addr, ptep, pte); | |
53492b1d | 61 | } |
e5098611 | 62 | return changed; |
53492b1d GS |
63 | } |
64 | ||
e5098611 MS |
65 | static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, |
66 | unsigned long addr, pte_t *ptep) | |
53492b1d | 67 | { |
e5098611 MS |
68 | pte_t pte = huge_ptep_get_and_clear(mm, addr, ptep); |
69 | set_huge_pte_at(mm, addr, ptep, pte_wrprotect(pte)); | |
53492b1d GS |
70 | } |
71 | ||
e5098611 | 72 | static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot) |
53492b1d | 73 | { |
e5098611 | 74 | return mk_pte(page, pgprot); |
53492b1d GS |
75 | } |
76 | ||
e5098611 | 77 | static inline int huge_pte_none(pte_t pte) |
106c992a | 78 | { |
e5098611 | 79 | return pte_none(pte); |
106c992a GS |
80 | } |
81 | ||
82 | static inline int huge_pte_write(pte_t pte) | |
83 | { | |
e5098611 | 84 | return pte_write(pte); |
106c992a GS |
85 | } |
86 | ||
87 | static inline int huge_pte_dirty(pte_t pte) | |
88 | { | |
e5098611 | 89 | return pte_dirty(pte); |
106c992a GS |
90 | } |
91 | ||
92 | static inline pte_t huge_pte_mkwrite(pte_t pte) | |
93 | { | |
e5098611 | 94 | return pte_mkwrite(pte); |
106c992a GS |
95 | } |
96 | ||
97 | static inline pte_t huge_pte_mkdirty(pte_t pte) | |
98 | { | |
e5098611 | 99 | return pte_mkdirty(pte); |
106c992a GS |
100 | } |
101 | ||
e5098611 | 102 | static inline pte_t huge_pte_wrprotect(pte_t pte) |
106c992a | 103 | { |
e5098611 | 104 | return pte_wrprotect(pte); |
106c992a GS |
105 | } |
106 | ||
e5098611 | 107 | static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot) |
106c992a | 108 | { |
e5098611 | 109 | return pte_modify(pte, newprot); |
106c992a GS |
110 | } |
111 | ||
53492b1d | 112 | #endif /* _ASM_S390_HUGETLB_H */ |