9d980d88b7477a82f757e75d0efda0c867c76d43
[deliverable/linux.git] / arch / x86 / mm / hugetlbpage.c
1 /*
2 * IA-32 Huge TLB Page Support for Kernel.
3 *
4 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
5 */
6
7 #include <linux/init.h>
8 #include <linux/fs.h>
9 #include <linux/mm.h>
10 #include <linux/hugetlb.h>
11 #include <linux/pagemap.h>
12 #include <linux/err.h>
13 #include <linux/sysctl.h>
14 #include <asm/mman.h>
15 #include <asm/tlb.h>
16 #include <asm/tlbflush.h>
17 #include <asm/pgalloc.h>
18
19 #if 0 /* This is just for testing */
20 struct page *
21 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
22 {
23 unsigned long start = address;
24 int length = 1;
25 int nr;
26 struct page *page;
27 struct vm_area_struct *vma;
28
29 vma = find_vma(mm, addr);
30 if (!vma || !is_vm_hugetlb_page(vma))
31 return ERR_PTR(-EINVAL);
32
33 pte = huge_pte_offset(mm, address);
34
35 /* hugetlb should be locked, and hence, prefaulted */
36 WARN_ON(!pte || pte_none(*pte));
37
38 page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
39
40 WARN_ON(!PageHead(page));
41
42 return page;
43 }
44
45 int pmd_huge(pmd_t pmd)
46 {
47 return 0;
48 }
49
50 int pud_huge(pud_t pud)
51 {
52 return 0;
53 }
54
55 struct page *
56 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
57 pmd_t *pmd, int write)
58 {
59 return NULL;
60 }
61
62 int pmd_huge_support(void)
63 {
64 return 0;
65 }
66 #else
67
68 struct page *
69 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
70 {
71 return ERR_PTR(-EINVAL);
72 }
73
74 int pmd_huge(pmd_t pmd)
75 {
76 return !!(pmd_val(pmd) & _PAGE_PSE);
77 }
78
79 int pud_huge(pud_t pud)
80 {
81 return !!(pud_val(pud) & _PAGE_PSE);
82 }
83
84 int pmd_huge_support(void)
85 {
86 return 1;
87 }
88 #endif
89
90 /* x86_64 also uses this file */
91
92 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
93 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
94 unsigned long addr, unsigned long len,
95 unsigned long pgoff, unsigned long flags)
96 {
97 struct hstate *h = hstate_file(file);
98 struct vm_unmapped_area_info info;
99
100 info.flags = 0;
101 info.length = len;
102 info.low_limit = TASK_UNMAPPED_BASE;
103 info.high_limit = TASK_SIZE;
104 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
105 info.align_offset = 0;
106 return vm_unmapped_area(&info);
107 }
108
109 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
110 unsigned long addr0, unsigned long len,
111 unsigned long pgoff, unsigned long flags)
112 {
113 struct hstate *h = hstate_file(file);
114 struct vm_unmapped_area_info info;
115 unsigned long addr;
116
117 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
118 info.length = len;
119 info.low_limit = PAGE_SIZE;
120 info.high_limit = current->mm->mmap_base;
121 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
122 info.align_offset = 0;
123 addr = vm_unmapped_area(&info);
124
125 /*
126 * A failed mmap() very likely causes application failure,
127 * so fall back to the bottom-up function here. This scenario
128 * can happen with large stack limits and large mmap()
129 * allocations.
130 */
131 if (addr & ~PAGE_MASK) {
132 VM_BUG_ON(addr != -ENOMEM);
133 info.flags = 0;
134 info.low_limit = TASK_UNMAPPED_BASE;
135 info.high_limit = TASK_SIZE;
136 addr = vm_unmapped_area(&info);
137 }
138
139 return addr;
140 }
141
142 unsigned long
143 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
144 unsigned long len, unsigned long pgoff, unsigned long flags)
145 {
146 struct hstate *h = hstate_file(file);
147 struct mm_struct *mm = current->mm;
148 struct vm_area_struct *vma;
149
150 if (len & ~huge_page_mask(h))
151 return -EINVAL;
152 if (len > TASK_SIZE)
153 return -ENOMEM;
154
155 if (flags & MAP_FIXED) {
156 if (prepare_hugepage_range(file, addr, len))
157 return -EINVAL;
158 return addr;
159 }
160
161 if (addr) {
162 addr = ALIGN(addr, huge_page_size(h));
163 vma = find_vma(mm, addr);
164 if (TASK_SIZE - len >= addr &&
165 (!vma || addr + len <= vma->vm_start))
166 return addr;
167 }
168 if (mm->get_unmapped_area == arch_get_unmapped_area)
169 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
170 pgoff, flags);
171 else
172 return hugetlb_get_unmapped_area_topdown(file, addr, len,
173 pgoff, flags);
174 }
175
176 #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/
177
178 #ifdef CONFIG_X86_64
179 static __init int setup_hugepagesz(char *opt)
180 {
181 unsigned long ps = memparse(opt, &opt);
182 if (ps == PMD_SIZE) {
183 hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
184 } else if (ps == PUD_SIZE && cpu_has_gbpages) {
185 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
186 } else {
187 printk(KERN_ERR "hugepagesz: Unsupported page size %lu M\n",
188 ps >> 20);
189 return 0;
190 }
191 return 1;
192 }
193 __setup("hugepagesz=", setup_hugepagesz);
194 #endif
This page took 0.033502 seconds and 4 git commands to generate.