mm/hugetlb: reduce arch dependent code around follow_huge_*
[deliverable/linux.git] / arch / tile / mm / hugetlbpage.c
1 /*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 *
14 * TILE Huge TLB Page Support for Kernel.
15 * Taken from i386 hugetlb implementation:
16 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
17 */
18
19 #include <linux/init.h>
20 #include <linux/fs.h>
21 #include <linux/mm.h>
22 #include <linux/hugetlb.h>
23 #include <linux/pagemap.h>
24 #include <linux/slab.h>
25 #include <linux/err.h>
26 #include <linux/sysctl.h>
27 #include <linux/mman.h>
28 #include <asm/tlb.h>
29 #include <asm/tlbflush.h>
30 #include <asm/setup.h>
31
32 #ifdef CONFIG_HUGETLB_SUPER_PAGES
33
34 /*
35 * Provide an additional huge page size (in addition to the regular default
36 * huge page size) if no "hugepagesz" arguments are specified.
37 * Note that it must be smaller than the default huge page size so
38 * that it's possible to allocate them on demand from the buddy allocator.
39 * You can change this to 64K (on a 16K build), 256K, 1M, or 4M,
40 * or not define it at all.
41 */
42 #define ADDITIONAL_HUGE_SIZE (1024 * 1024UL)
43
44 /* "Extra" page-size multipliers, one per level of the page table. */
45 int huge_shift[HUGE_SHIFT_ENTRIES] = {
46 #ifdef ADDITIONAL_HUGE_SIZE
47 #define ADDITIONAL_HUGE_SHIFT __builtin_ctzl(ADDITIONAL_HUGE_SIZE / PAGE_SIZE)
48 [HUGE_SHIFT_PAGE] = ADDITIONAL_HUGE_SHIFT
49 #endif
50 };
51
52 #endif
53
54 pte_t *huge_pte_alloc(struct mm_struct *mm,
55 unsigned long addr, unsigned long sz)
56 {
57 pgd_t *pgd;
58 pud_t *pud;
59
60 addr &= -sz; /* Mask off any low bits in the address. */
61
62 pgd = pgd_offset(mm, addr);
63 pud = pud_alloc(mm, pgd, addr);
64
65 #ifdef CONFIG_HUGETLB_SUPER_PAGES
66 if (sz >= PGDIR_SIZE) {
67 BUG_ON(sz != PGDIR_SIZE &&
68 sz != PGDIR_SIZE << huge_shift[HUGE_SHIFT_PGDIR]);
69 return (pte_t *)pud;
70 } else {
71 pmd_t *pmd = pmd_alloc(mm, pud, addr);
72 if (sz >= PMD_SIZE) {
73 BUG_ON(sz != PMD_SIZE &&
74 sz != (PMD_SIZE << huge_shift[HUGE_SHIFT_PMD]));
75 return (pte_t *)pmd;
76 }
77 else {
78 if (sz != PAGE_SIZE << huge_shift[HUGE_SHIFT_PAGE])
79 panic("Unexpected page size %#lx\n", sz);
80 return pte_alloc_map(mm, NULL, pmd, addr);
81 }
82 }
83 #else
84 BUG_ON(sz != PMD_SIZE);
85 return (pte_t *) pmd_alloc(mm, pud, addr);
86 #endif
87 }
88
89 static pte_t *get_pte(pte_t *base, int index, int level)
90 {
91 pte_t *ptep = base + index;
92 #ifdef CONFIG_HUGETLB_SUPER_PAGES
93 if (!pte_present(*ptep) && huge_shift[level] != 0) {
94 unsigned long mask = -1UL << huge_shift[level];
95 pte_t *super_ptep = base + (index & mask);
96 pte_t pte = *super_ptep;
97 if (pte_present(pte) && pte_super(pte))
98 ptep = super_ptep;
99 }
100 #endif
101 return ptep;
102 }
103
104 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
105 {
106 pgd_t *pgd;
107 pud_t *pud;
108 pmd_t *pmd;
109 #ifdef CONFIG_HUGETLB_SUPER_PAGES
110 pte_t *pte;
111 #endif
112
113 /* Get the top-level page table entry. */
114 pgd = (pgd_t *)get_pte((pte_t *)mm->pgd, pgd_index(addr), 0);
115
116 /* We don't have four levels. */
117 pud = pud_offset(pgd, addr);
118 #ifndef __PAGETABLE_PUD_FOLDED
119 # error support fourth page table level
120 #endif
121 if (!pud_present(*pud))
122 return NULL;
123
124 /* Check for an L0 huge PTE, if we have three levels. */
125 #ifndef __PAGETABLE_PMD_FOLDED
126 if (pud_huge(*pud))
127 return (pte_t *)pud;
128
129 pmd = (pmd_t *)get_pte((pte_t *)pud_page_vaddr(*pud),
130 pmd_index(addr), 1);
131 if (!pmd_present(*pmd))
132 return NULL;
133 #else
134 pmd = pmd_offset(pud, addr);
135 #endif
136
137 /* Check for an L1 huge PTE. */
138 if (pmd_huge(*pmd))
139 return (pte_t *)pmd;
140
141 #ifdef CONFIG_HUGETLB_SUPER_PAGES
142 /* Check for an L2 huge PTE. */
143 pte = get_pte((pte_t *)pmd_page_vaddr(*pmd), pte_index(addr), 2);
144 if (!pte_present(*pte))
145 return NULL;
146 if (pte_super(*pte))
147 return pte;
148 #endif
149
150 return NULL;
151 }
152
153 int pmd_huge(pmd_t pmd)
154 {
155 return !!(pmd_val(pmd) & _PAGE_HUGE_PAGE);
156 }
157
158 int pud_huge(pud_t pud)
159 {
160 return !!(pud_val(pud) & _PAGE_HUGE_PAGE);
161 }
162
163 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
164 {
165 return 0;
166 }
167
168 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
169 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
170 unsigned long addr, unsigned long len,
171 unsigned long pgoff, unsigned long flags)
172 {
173 struct hstate *h = hstate_file(file);
174 struct vm_unmapped_area_info info;
175
176 info.flags = 0;
177 info.length = len;
178 info.low_limit = TASK_UNMAPPED_BASE;
179 info.high_limit = TASK_SIZE;
180 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
181 info.align_offset = 0;
182 return vm_unmapped_area(&info);
183 }
184
185 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
186 unsigned long addr0, unsigned long len,
187 unsigned long pgoff, unsigned long flags)
188 {
189 struct hstate *h = hstate_file(file);
190 struct vm_unmapped_area_info info;
191 unsigned long addr;
192
193 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
194 info.length = len;
195 info.low_limit = PAGE_SIZE;
196 info.high_limit = current->mm->mmap_base;
197 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
198 info.align_offset = 0;
199 addr = vm_unmapped_area(&info);
200
201 /*
202 * A failed mmap() very likely causes application failure,
203 * so fall back to the bottom-up function here. This scenario
204 * can happen with large stack limits and large mmap()
205 * allocations.
206 */
207 if (addr & ~PAGE_MASK) {
208 VM_BUG_ON(addr != -ENOMEM);
209 info.flags = 0;
210 info.low_limit = TASK_UNMAPPED_BASE;
211 info.high_limit = TASK_SIZE;
212 addr = vm_unmapped_area(&info);
213 }
214
215 return addr;
216 }
217
218 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
219 unsigned long len, unsigned long pgoff, unsigned long flags)
220 {
221 struct hstate *h = hstate_file(file);
222 struct mm_struct *mm = current->mm;
223 struct vm_area_struct *vma;
224
225 if (len & ~huge_page_mask(h))
226 return -EINVAL;
227 if (len > TASK_SIZE)
228 return -ENOMEM;
229
230 if (flags & MAP_FIXED) {
231 if (prepare_hugepage_range(file, addr, len))
232 return -EINVAL;
233 return addr;
234 }
235
236 if (addr) {
237 addr = ALIGN(addr, huge_page_size(h));
238 vma = find_vma(mm, addr);
239 if (TASK_SIZE - len >= addr &&
240 (!vma || addr + len <= vma->vm_start))
241 return addr;
242 }
243 if (current->mm->get_unmapped_area == arch_get_unmapped_area)
244 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
245 pgoff, flags);
246 else
247 return hugetlb_get_unmapped_area_topdown(file, addr, len,
248 pgoff, flags);
249 }
250 #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
251
252 #ifdef CONFIG_HUGETLB_SUPER_PAGES
253 static __init int __setup_hugepagesz(unsigned long ps)
254 {
255 int log_ps = __builtin_ctzl(ps);
256 int level, base_shift;
257
258 if ((1UL << log_ps) != ps || (log_ps & 1) != 0) {
259 pr_warn("Not enabling %ld byte huge pages; must be a power of four\n",
260 ps);
261 return -EINVAL;
262 }
263
264 if (ps > 64*1024*1024*1024UL) {
265 pr_warn("Not enabling %ld MB huge pages; largest legal value is 64 GB\n",
266 ps >> 20);
267 return -EINVAL;
268 } else if (ps >= PUD_SIZE) {
269 static long hv_jpage_size;
270 if (hv_jpage_size == 0)
271 hv_jpage_size = hv_sysconf(HV_SYSCONF_PAGE_SIZE_JUMBO);
272 if (hv_jpage_size != PUD_SIZE) {
273 pr_warn("Not enabling >= %ld MB huge pages: hypervisor reports size %ld\n",
274 PUD_SIZE >> 20, hv_jpage_size);
275 return -EINVAL;
276 }
277 level = 0;
278 base_shift = PUD_SHIFT;
279 } else if (ps >= PMD_SIZE) {
280 level = 1;
281 base_shift = PMD_SHIFT;
282 } else if (ps > PAGE_SIZE) {
283 level = 2;
284 base_shift = PAGE_SHIFT;
285 } else {
286 pr_err("hugepagesz: huge page size %ld too small\n", ps);
287 return -EINVAL;
288 }
289
290 if (log_ps != base_shift) {
291 int shift_val = log_ps - base_shift;
292 if (huge_shift[level] != 0) {
293 int old_shift = base_shift + huge_shift[level];
294 pr_warn("Not enabling %ld MB huge pages; already have size %ld MB\n",
295 ps >> 20, (1UL << old_shift) >> 20);
296 return -EINVAL;
297 }
298 if (hv_set_pte_super_shift(level, shift_val) != 0) {
299 pr_warn("Not enabling %ld MB huge pages; no hypervisor support\n",
300 ps >> 20);
301 return -EINVAL;
302 }
303 printk(KERN_DEBUG "Enabled %ld MB huge pages\n", ps >> 20);
304 huge_shift[level] = shift_val;
305 }
306
307 hugetlb_add_hstate(log_ps - PAGE_SHIFT);
308
309 return 0;
310 }
311
312 static bool saw_hugepagesz;
313
314 static __init int setup_hugepagesz(char *opt)
315 {
316 if (!saw_hugepagesz) {
317 saw_hugepagesz = true;
318 memset(huge_shift, 0, sizeof(huge_shift));
319 }
320 return __setup_hugepagesz(memparse(opt, NULL));
321 }
322 __setup("hugepagesz=", setup_hugepagesz);
323
324 #ifdef ADDITIONAL_HUGE_SIZE
325 /*
326 * Provide an additional huge page size if no "hugepagesz" args are given.
327 * In that case, all the cores have properly set up their hv super_shift
328 * already, but we need to notify the hugetlb code to enable the
329 * new huge page size from the Linux point of view.
330 */
331 static __init int add_default_hugepagesz(void)
332 {
333 if (!saw_hugepagesz) {
334 BUILD_BUG_ON(ADDITIONAL_HUGE_SIZE >= PMD_SIZE ||
335 ADDITIONAL_HUGE_SIZE <= PAGE_SIZE);
336 BUILD_BUG_ON((PAGE_SIZE << ADDITIONAL_HUGE_SHIFT) !=
337 ADDITIONAL_HUGE_SIZE);
338 BUILD_BUG_ON(ADDITIONAL_HUGE_SHIFT & 1);
339 hugetlb_add_hstate(ADDITIONAL_HUGE_SHIFT);
340 }
341 return 0;
342 }
343 arch_initcall(add_default_hugepagesz);
344 #endif
345
346 #endif /* CONFIG_HUGETLB_SUPER_PAGES */
This page took 0.036669 seconds and 5 git commands to generate.