mm/hugetlb: reduce arch dependent code around follow_huge_*
[deliverable/linux.git] / arch / x86 / mm / hugetlbpage.c
1 /*
2 * IA-32 Huge TLB Page Support for Kernel.
3 *
4 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
5 */
6
7 #include <linux/init.h>
8 #include <linux/fs.h>
9 #include <linux/mm.h>
10 #include <linux/hugetlb.h>
11 #include <linux/pagemap.h>
12 #include <linux/err.h>
13 #include <linux/sysctl.h>
14 #include <asm/mman.h>
15 #include <asm/tlb.h>
16 #include <asm/tlbflush.h>
17 #include <asm/pgalloc.h>
18
19 #if 0 /* This is just for testing */
20 struct page *
21 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
22 {
23 unsigned long start = address;
24 int length = 1;
25 int nr;
26 struct page *page;
27 struct vm_area_struct *vma;
28
29 vma = find_vma(mm, addr);
30 if (!vma || !is_vm_hugetlb_page(vma))
31 return ERR_PTR(-EINVAL);
32
33 pte = huge_pte_offset(mm, address);
34
35 /* hugetlb should be locked, and hence, prefaulted */
36 WARN_ON(!pte || pte_none(*pte));
37
38 page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
39
40 WARN_ON(!PageHead(page));
41
42 return page;
43 }
44
45 int pmd_huge(pmd_t pmd)
46 {
47 return 0;
48 }
49
50 int pud_huge(pud_t pud)
51 {
52 return 0;
53 }
54
55 #else
56
57 int pmd_huge(pmd_t pmd)
58 {
59 return !!(pmd_val(pmd) & _PAGE_PSE);
60 }
61
62 int pud_huge(pud_t pud)
63 {
64 return !!(pud_val(pud) & _PAGE_PSE);
65 }
66 #endif
67
68 #ifdef CONFIG_HUGETLB_PAGE
69 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
70 unsigned long addr, unsigned long len,
71 unsigned long pgoff, unsigned long flags)
72 {
73 struct hstate *h = hstate_file(file);
74 struct vm_unmapped_area_info info;
75
76 info.flags = 0;
77 info.length = len;
78 info.low_limit = current->mm->mmap_legacy_base;
79 info.high_limit = TASK_SIZE;
80 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
81 info.align_offset = 0;
82 return vm_unmapped_area(&info);
83 }
84
85 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
86 unsigned long addr0, unsigned long len,
87 unsigned long pgoff, unsigned long flags)
88 {
89 struct hstate *h = hstate_file(file);
90 struct vm_unmapped_area_info info;
91 unsigned long addr;
92
93 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
94 info.length = len;
95 info.low_limit = PAGE_SIZE;
96 info.high_limit = current->mm->mmap_base;
97 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
98 info.align_offset = 0;
99 addr = vm_unmapped_area(&info);
100
101 /*
102 * A failed mmap() very likely causes application failure,
103 * so fall back to the bottom-up function here. This scenario
104 * can happen with large stack limits and large mmap()
105 * allocations.
106 */
107 if (addr & ~PAGE_MASK) {
108 VM_BUG_ON(addr != -ENOMEM);
109 info.flags = 0;
110 info.low_limit = TASK_UNMAPPED_BASE;
111 info.high_limit = TASK_SIZE;
112 addr = vm_unmapped_area(&info);
113 }
114
115 return addr;
116 }
117
118 unsigned long
119 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
120 unsigned long len, unsigned long pgoff, unsigned long flags)
121 {
122 struct hstate *h = hstate_file(file);
123 struct mm_struct *mm = current->mm;
124 struct vm_area_struct *vma;
125
126 if (len & ~huge_page_mask(h))
127 return -EINVAL;
128 if (len > TASK_SIZE)
129 return -ENOMEM;
130
131 if (flags & MAP_FIXED) {
132 if (prepare_hugepage_range(file, addr, len))
133 return -EINVAL;
134 return addr;
135 }
136
137 if (addr) {
138 addr = ALIGN(addr, huge_page_size(h));
139 vma = find_vma(mm, addr);
140 if (TASK_SIZE - len >= addr &&
141 (!vma || addr + len <= vma->vm_start))
142 return addr;
143 }
144 if (mm->get_unmapped_area == arch_get_unmapped_area)
145 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
146 pgoff, flags);
147 else
148 return hugetlb_get_unmapped_area_topdown(file, addr, len,
149 pgoff, flags);
150 }
151 #endif /* CONFIG_HUGETLB_PAGE */
152
153 #ifdef CONFIG_X86_64
154 static __init int setup_hugepagesz(char *opt)
155 {
156 unsigned long ps = memparse(opt, &opt);
157 if (ps == PMD_SIZE) {
158 hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
159 } else if (ps == PUD_SIZE && cpu_has_gbpages) {
160 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
161 } else {
162 printk(KERN_ERR "hugepagesz: Unsupported page size %lu M\n",
163 ps >> 20);
164 return 0;
165 }
166 return 1;
167 }
168 __setup("hugepagesz=", setup_hugepagesz);
169
170 #ifdef CONFIG_CMA
171 static __init int gigantic_pages_init(void)
172 {
173 /* With CMA we can allocate gigantic pages at runtime */
174 if (cpu_has_gbpages && !size_to_hstate(1UL << PUD_SHIFT))
175 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
176 return 0;
177 }
178 arch_initcall(gigantic_pages_init);
179 #endif
180 #endif
This page took 0.040497 seconds and 5 git commands to generate.