Merge remote-tracking branch 'xtensa/for_next'
[deliverable/linux.git] / arch / tile / mm / hugetlbpage.c
1 /*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 *
14 * TILE Huge TLB Page Support for Kernel.
15 * Taken from i386 hugetlb implementation:
16 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
17 */
18
19 #include <linux/init.h>
20 #include <linux/fs.h>
21 #include <linux/mm.h>
22 #include <linux/hugetlb.h>
23 #include <linux/pagemap.h>
24 #include <linux/slab.h>
25 #include <linux/err.h>
26 #include <linux/sysctl.h>
27 #include <linux/mman.h>
28 #include <asm/tlb.h>
29 #include <asm/tlbflush.h>
30 #include <asm/setup.h>
31
32 #ifdef CONFIG_HUGETLB_SUPER_PAGES
33
34 /*
35 * Provide an additional huge page size (in addition to the regular default
36 * huge page size) if no "hugepagesz" arguments are specified.
37 * Note that it must be smaller than the default huge page size so
38 * that it's possible to allocate them on demand from the buddy allocator.
39 * You can change this to 64K (on a 16K build), 256K, 1M, or 4M,
40 * or not define it at all.
41 */
42 #define ADDITIONAL_HUGE_SIZE (1024 * 1024UL)
43
44 /* "Extra" page-size multipliers, one per level of the page table. */
45 int huge_shift[HUGE_SHIFT_ENTRIES] = {
46 #ifdef ADDITIONAL_HUGE_SIZE
47 #define ADDITIONAL_HUGE_SHIFT __builtin_ctzl(ADDITIONAL_HUGE_SIZE / PAGE_SIZE)
48 [HUGE_SHIFT_PAGE] = ADDITIONAL_HUGE_SHIFT
49 #endif
50 };
51
52 #endif
53
54 pte_t *huge_pte_alloc(struct mm_struct *mm,
55 unsigned long addr, unsigned long sz)
56 {
57 pgd_t *pgd;
58 pud_t *pud;
59
60 addr &= -sz; /* Mask off any low bits in the address. */
61
62 pgd = pgd_offset(mm, addr);
63 pud = pud_alloc(mm, pgd, addr);
64
65 #ifdef CONFIG_HUGETLB_SUPER_PAGES
66 if (sz >= PGDIR_SIZE) {
67 BUG_ON(sz != PGDIR_SIZE &&
68 sz != PGDIR_SIZE << huge_shift[HUGE_SHIFT_PGDIR]);
69 return (pte_t *)pud;
70 } else {
71 pmd_t *pmd = pmd_alloc(mm, pud, addr);
72 if (sz >= PMD_SIZE) {
73 BUG_ON(sz != PMD_SIZE &&
74 sz != (PMD_SIZE << huge_shift[HUGE_SHIFT_PMD]));
75 return (pte_t *)pmd;
76 }
77 else {
78 if (sz != PAGE_SIZE << huge_shift[HUGE_SHIFT_PAGE])
79 panic("Unexpected page size %#lx\n", sz);
80 return pte_alloc_map(mm, pmd, addr);
81 }
82 }
83 #else
84 BUG_ON(sz != PMD_SIZE);
85 return (pte_t *) pmd_alloc(mm, pud, addr);
86 #endif
87 }
88
89 static pte_t *get_pte(pte_t *base, int index, int level)
90 {
91 pte_t *ptep = base + index;
92 #ifdef CONFIG_HUGETLB_SUPER_PAGES
93 if (!pte_present(*ptep) && huge_shift[level] != 0) {
94 unsigned long mask = -1UL << huge_shift[level];
95 pte_t *super_ptep = base + (index & mask);
96 pte_t pte = *super_ptep;
97 if (pte_present(pte) && pte_super(pte))
98 ptep = super_ptep;
99 }
100 #endif
101 return ptep;
102 }
103
104 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
105 {
106 pgd_t *pgd;
107 pud_t *pud;
108 pmd_t *pmd;
109 #ifdef CONFIG_HUGETLB_SUPER_PAGES
110 pte_t *pte;
111 #endif
112
113 /* Get the top-level page table entry. */
114 pgd = (pgd_t *)get_pte((pte_t *)mm->pgd, pgd_index(addr), 0);
115
116 /* We don't have four levels. */
117 pud = pud_offset(pgd, addr);
118 #ifndef __PAGETABLE_PUD_FOLDED
119 # error support fourth page table level
120 #endif
121 if (!pud_present(*pud))
122 return NULL;
123
124 /* Check for an L0 huge PTE, if we have three levels. */
125 #ifndef __PAGETABLE_PMD_FOLDED
126 if (pud_huge(*pud))
127 return (pte_t *)pud;
128
129 pmd = (pmd_t *)get_pte((pte_t *)pud_page_vaddr(*pud),
130 pmd_index(addr), 1);
131 if (!pmd_present(*pmd))
132 return NULL;
133 #else
134 pmd = pmd_offset(pud, addr);
135 #endif
136
137 /* Check for an L1 huge PTE. */
138 if (pmd_huge(*pmd))
139 return (pte_t *)pmd;
140
141 #ifdef CONFIG_HUGETLB_SUPER_PAGES
142 /* Check for an L2 huge PTE. */
143 pte = get_pte((pte_t *)pmd_page_vaddr(*pmd), pte_index(addr), 2);
144 if (!pte_present(*pte))
145 return NULL;
146 if (pte_super(*pte))
147 return pte;
148 #endif
149
150 return NULL;
151 }
152
153 int pmd_huge(pmd_t pmd)
154 {
155 return !!(pmd_val(pmd) & _PAGE_HUGE_PAGE);
156 }
157
158 int pud_huge(pud_t pud)
159 {
160 return !!(pud_val(pud) & _PAGE_HUGE_PAGE);
161 }
162
163 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
164 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
165 unsigned long addr, unsigned long len,
166 unsigned long pgoff, unsigned long flags)
167 {
168 struct hstate *h = hstate_file(file);
169 struct vm_unmapped_area_info info;
170
171 info.flags = 0;
172 info.length = len;
173 info.low_limit = TASK_UNMAPPED_BASE;
174 info.high_limit = TASK_SIZE;
175 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
176 info.align_offset = 0;
177 return vm_unmapped_area(&info);
178 }
179
180 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
181 unsigned long addr0, unsigned long len,
182 unsigned long pgoff, unsigned long flags)
183 {
184 struct hstate *h = hstate_file(file);
185 struct vm_unmapped_area_info info;
186 unsigned long addr;
187
188 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
189 info.length = len;
190 info.low_limit = PAGE_SIZE;
191 info.high_limit = current->mm->mmap_base;
192 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
193 info.align_offset = 0;
194 addr = vm_unmapped_area(&info);
195
196 /*
197 * A failed mmap() very likely causes application failure,
198 * so fall back to the bottom-up function here. This scenario
199 * can happen with large stack limits and large mmap()
200 * allocations.
201 */
202 if (addr & ~PAGE_MASK) {
203 VM_BUG_ON(addr != -ENOMEM);
204 info.flags = 0;
205 info.low_limit = TASK_UNMAPPED_BASE;
206 info.high_limit = TASK_SIZE;
207 addr = vm_unmapped_area(&info);
208 }
209
210 return addr;
211 }
212
213 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
214 unsigned long len, unsigned long pgoff, unsigned long flags)
215 {
216 struct hstate *h = hstate_file(file);
217 struct mm_struct *mm = current->mm;
218 struct vm_area_struct *vma;
219
220 if (len & ~huge_page_mask(h))
221 return -EINVAL;
222 if (len > TASK_SIZE)
223 return -ENOMEM;
224
225 if (flags & MAP_FIXED) {
226 if (prepare_hugepage_range(file, addr, len))
227 return -EINVAL;
228 return addr;
229 }
230
231 if (addr) {
232 addr = ALIGN(addr, huge_page_size(h));
233 vma = find_vma(mm, addr);
234 if (TASK_SIZE - len >= addr &&
235 (!vma || addr + len <= vma->vm_start))
236 return addr;
237 }
238 if (current->mm->get_unmapped_area == arch_get_unmapped_area)
239 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
240 pgoff, flags);
241 else
242 return hugetlb_get_unmapped_area_topdown(file, addr, len,
243 pgoff, flags);
244 }
245 #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
246
247 #ifdef CONFIG_HUGETLB_SUPER_PAGES
248 static __init int __setup_hugepagesz(unsigned long ps)
249 {
250 int log_ps = __builtin_ctzl(ps);
251 int level, base_shift;
252
253 if ((1UL << log_ps) != ps || (log_ps & 1) != 0) {
254 pr_warn("Not enabling %ld byte huge pages; must be a power of four\n",
255 ps);
256 return -EINVAL;
257 }
258
259 if (ps > 64*1024*1024*1024UL) {
260 pr_warn("Not enabling %ld MB huge pages; largest legal value is 64 GB\n",
261 ps >> 20);
262 return -EINVAL;
263 } else if (ps >= PUD_SIZE) {
264 static long hv_jpage_size;
265 if (hv_jpage_size == 0)
266 hv_jpage_size = hv_sysconf(HV_SYSCONF_PAGE_SIZE_JUMBO);
267 if (hv_jpage_size != PUD_SIZE) {
268 pr_warn("Not enabling >= %ld MB huge pages: hypervisor reports size %ld\n",
269 PUD_SIZE >> 20, hv_jpage_size);
270 return -EINVAL;
271 }
272 level = 0;
273 base_shift = PUD_SHIFT;
274 } else if (ps >= PMD_SIZE) {
275 level = 1;
276 base_shift = PMD_SHIFT;
277 } else if (ps > PAGE_SIZE) {
278 level = 2;
279 base_shift = PAGE_SHIFT;
280 } else {
281 pr_err("hugepagesz: huge page size %ld too small\n", ps);
282 return -EINVAL;
283 }
284
285 if (log_ps != base_shift) {
286 int shift_val = log_ps - base_shift;
287 if (huge_shift[level] != 0) {
288 int old_shift = base_shift + huge_shift[level];
289 pr_warn("Not enabling %ld MB huge pages; already have size %ld MB\n",
290 ps >> 20, (1UL << old_shift) >> 20);
291 return -EINVAL;
292 }
293 if (hv_set_pte_super_shift(level, shift_val) != 0) {
294 pr_warn("Not enabling %ld MB huge pages; no hypervisor support\n",
295 ps >> 20);
296 return -EINVAL;
297 }
298 printk(KERN_DEBUG "Enabled %ld MB huge pages\n", ps >> 20);
299 huge_shift[level] = shift_val;
300 }
301
302 hugetlb_add_hstate(log_ps - PAGE_SHIFT);
303
304 return 0;
305 }
306
307 static bool saw_hugepagesz;
308
309 static __init int setup_hugepagesz(char *opt)
310 {
311 int rc;
312
313 if (!saw_hugepagesz) {
314 saw_hugepagesz = true;
315 memset(huge_shift, 0, sizeof(huge_shift));
316 }
317 rc = __setup_hugepagesz(memparse(opt, NULL));
318 if (rc)
319 hugetlb_bad_size();
320 return rc;
321 }
322 __setup("hugepagesz=", setup_hugepagesz);
323
324 #ifdef ADDITIONAL_HUGE_SIZE
325 /*
326 * Provide an additional huge page size if no "hugepagesz" args are given.
327 * In that case, all the cores have properly set up their hv super_shift
328 * already, but we need to notify the hugetlb code to enable the
329 * new huge page size from the Linux point of view.
330 */
331 static __init int add_default_hugepagesz(void)
332 {
333 if (!saw_hugepagesz) {
334 BUILD_BUG_ON(ADDITIONAL_HUGE_SIZE >= PMD_SIZE ||
335 ADDITIONAL_HUGE_SIZE <= PAGE_SIZE);
336 BUILD_BUG_ON((PAGE_SIZE << ADDITIONAL_HUGE_SHIFT) !=
337 ADDITIONAL_HUGE_SIZE);
338 BUILD_BUG_ON(ADDITIONAL_HUGE_SHIFT & 1);
339 hugetlb_add_hstate(ADDITIONAL_HUGE_SHIFT);
340 }
341 return 0;
342 }
343 arch_initcall(add_default_hugepagesz);
344 #endif
345
346 #endif /* CONFIG_HUGETLB_SUPER_PAGES */
This page took 0.045462 seconds and 5 git commands to generate.