Merge tag 'metag-for-v4.8' of git://git.kernel.org/pub/scm/linux/kernel/git/jhogan...
[deliverable/linux.git] / arch / powerpc / include / asm / hugetlb.h
1 #ifndef _ASM_POWERPC_HUGETLB_H
2 #define _ASM_POWERPC_HUGETLB_H
3
4 #ifdef CONFIG_HUGETLB_PAGE
5 #include <asm/page.h>
6 #include <asm-generic/hugetlb.h>
7
8 extern struct kmem_cache *hugepte_cache;
9
10 #ifdef CONFIG_PPC_BOOK3S_64
11
12 #include <asm/book3s/64/hugetlb-radix.h>
13 /*
14 * This should work for other subarchs too. But right now we use the
15 * new format only for 64bit book3s
16 */
17 static inline pte_t *hugepd_page(hugepd_t hpd)
18 {
19 BUG_ON(!hugepd_ok(hpd));
20 /*
21 * We have only four bits to encode, MMU page size
22 */
23 BUILD_BUG_ON((MMU_PAGE_COUNT - 1) > 0xf);
24 return __va(hpd.pd & HUGEPD_ADDR_MASK);
25 }
26
27 static inline unsigned int hugepd_mmu_psize(hugepd_t hpd)
28 {
29 return (hpd.pd & HUGEPD_SHIFT_MASK) >> 2;
30 }
31
32 static inline unsigned int hugepd_shift(hugepd_t hpd)
33 {
34 return mmu_psize_to_shift(hugepd_mmu_psize(hpd));
35 }
36 static inline void flush_hugetlb_page(struct vm_area_struct *vma,
37 unsigned long vmaddr)
38 {
39 if (radix_enabled())
40 return radix__flush_hugetlb_page(vma, vmaddr);
41 }
42
43 static inline void __local_flush_hugetlb_page(struct vm_area_struct *vma,
44 unsigned long vmaddr)
45 {
46 if (radix_enabled())
47 return radix__local_flush_hugetlb_page(vma, vmaddr);
48 }
49 #else
50
51 static inline pte_t *hugepd_page(hugepd_t hpd)
52 {
53 BUG_ON(!hugepd_ok(hpd));
54 return (pte_t *)((hpd.pd & ~HUGEPD_SHIFT_MASK) | PD_HUGE);
55 }
56
57 static inline unsigned int hugepd_shift(hugepd_t hpd)
58 {
59 return hpd.pd & HUGEPD_SHIFT_MASK;
60 }
61
62 #endif /* CONFIG_PPC_BOOK3S_64 */
63
64
65 static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
66 unsigned pdshift)
67 {
68 /*
69 * On FSL BookE, we have multiple higher-level table entries that
70 * point to the same hugepte. Just use the first one since they're all
71 * identical. So for that case, idx=0.
72 */
73 unsigned long idx = 0;
74
75 pte_t *dir = hugepd_page(hpd);
76 #ifndef CONFIG_PPC_FSL_BOOK3E
77 idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(hpd);
78 #endif
79
80 return dir + idx;
81 }
82
83 pte_t *huge_pte_offset_and_shift(struct mm_struct *mm,
84 unsigned long addr, unsigned *shift);
85
86 void flush_dcache_icache_hugepage(struct page *page);
87
88 #if defined(CONFIG_PPC_MM_SLICES)
89 int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
90 unsigned long len);
91 #else
92 static inline int is_hugepage_only_range(struct mm_struct *mm,
93 unsigned long addr,
94 unsigned long len)
95 {
96 return 0;
97 }
98 #endif
99
100 void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
101 pte_t pte);
102 void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
103
104 void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
105 unsigned long end, unsigned long floor,
106 unsigned long ceiling);
107
108 /*
109 * The version of vma_mmu_pagesize() in arch/powerpc/mm/hugetlbpage.c needs
110 * to override the version in mm/hugetlb.c
111 */
112 #define vma_mmu_pagesize vma_mmu_pagesize
113
114 /*
115 * If the arch doesn't supply something else, assume that hugepage
116 * size aligned regions are ok without further preparation.
117 */
118 static inline int prepare_hugepage_range(struct file *file,
119 unsigned long addr, unsigned long len)
120 {
121 struct hstate *h = hstate_file(file);
122 if (len & ~huge_page_mask(h))
123 return -EINVAL;
124 if (addr & ~huge_page_mask(h))
125 return -EINVAL;
126 return 0;
127 }
128
129 static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
130 pte_t *ptep, pte_t pte)
131 {
132 set_pte_at(mm, addr, ptep, pte);
133 }
134
135 static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
136 unsigned long addr, pte_t *ptep)
137 {
138 #ifdef CONFIG_PPC64
139 return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1));
140 #else
141 return __pte(pte_update(ptep, ~0UL, 0));
142 #endif
143 }
144
145 static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
146 unsigned long addr, pte_t *ptep)
147 {
148 pte_t pte;
149 pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
150 flush_tlb_page(vma, addr);
151 }
152
153 static inline int huge_pte_none(pte_t pte)
154 {
155 return pte_none(pte);
156 }
157
158 static inline pte_t huge_pte_wrprotect(pte_t pte)
159 {
160 return pte_wrprotect(pte);
161 }
162
163 static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
164 unsigned long addr, pte_t *ptep,
165 pte_t pte, int dirty)
166 {
167 #ifdef HUGETLB_NEED_PRELOAD
168 /*
169 * The "return 1" forces a call of update_mmu_cache, which will write a
170 * TLB entry. Without this, platforms that don't do a write of the TLB
171 * entry in the TLB miss handler asm will fault ad infinitum.
172 */
173 ptep_set_access_flags(vma, addr, ptep, pte, dirty);
174 return 1;
175 #else
176 return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
177 #endif
178 }
179
180 static inline pte_t huge_ptep_get(pte_t *ptep)
181 {
182 return *ptep;
183 }
184
185 static inline void arch_clear_hugepage_flags(struct page *page)
186 {
187 }
188
189 #else /* ! CONFIG_HUGETLB_PAGE */
190 static inline void flush_hugetlb_page(struct vm_area_struct *vma,
191 unsigned long vmaddr)
192 {
193 }
194
195 #define hugepd_shift(x) 0
196 static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
197 unsigned pdshift)
198 {
199 return 0;
200 }
201 #endif /* CONFIG_HUGETLB_PAGE */
202
203 /*
204 * FSL Book3E platforms require special gpage handling - the gpages
205 * are reserved early in the boot process by memblock instead of via
206 * the .dts as on IBM platforms.
207 */
208 #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_FSL_BOOK3E)
209 extern void __init reserve_hugetlb_gpages(void);
210 #else
211 static inline void reserve_hugetlb_gpages(void)
212 {
213 }
214 #endif
215
216 #endif /* _ASM_POWERPC_HUGETLB_H */
This page took 0.034309 seconds and 5 git commands to generate.