Merge tag 'ftracetest-3.18' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt...
[deliverable/linux.git] / arch / sparc / mm / hugetlbpage.c
CommitLineData
1da177e4
LT
1/*
2 * SPARC64 Huge TLB page support.
3 *
f6b83f07 4 * Copyright (C) 2002, 2003, 2006 David S. Miller (davem@davemloft.net)
1da177e4
LT
5 */
6
1da177e4
LT
7#include <linux/fs.h>
8#include <linux/mm.h>
9#include <linux/hugetlb.h>
10#include <linux/pagemap.h>
1da177e4
LT
11#include <linux/sysctl.h>
12
13#include <asm/mman.h>
14#include <asm/pgalloc.h>
15#include <asm/tlb.h>
16#include <asm/tlbflush.h>
17#include <asm/cacheflush.h>
18#include <asm/mmu_context.h>
19
f6b83f07
DM
20/* Slightly simplified from the non-hugepage variant because by
21 * definition we don't have to worry about any page coloring stuff
22 */
f6b83f07
DM
23
24static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
25 unsigned long addr,
26 unsigned long len,
27 unsigned long pgoff,
28 unsigned long flags)
29{
f6b83f07 30 unsigned long task_size = TASK_SIZE;
2aea28b9 31 struct vm_unmapped_area_info info;
f6b83f07
DM
32
33 if (test_thread_flag(TIF_32BIT))
34 task_size = STACK_TOP32;
f6b83f07 35
2aea28b9
ML
36 info.flags = 0;
37 info.length = len;
38 info.low_limit = TASK_UNMAPPED_BASE;
39 info.high_limit = min(task_size, VA_EXCLUDE_START);
40 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
41 info.align_offset = 0;
42 addr = vm_unmapped_area(&info);
43
44 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
45 VM_BUG_ON(addr != -ENOMEM);
46 info.low_limit = VA_EXCLUDE_END;
47 info.high_limit = task_size;
48 addr = vm_unmapped_area(&info);
f6b83f07
DM
49 }
50
2aea28b9 51 return addr;
f6b83f07
DM
52}
53
54static unsigned long
55hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
56 const unsigned long len,
57 const unsigned long pgoff,
58 const unsigned long flags)
59{
f6b83f07
DM
60 struct mm_struct *mm = current->mm;
61 unsigned long addr = addr0;
2aea28b9 62 struct vm_unmapped_area_info info;
f6b83f07
DM
63
64 /* This should only ever run for 32-bit processes. */
65 BUG_ON(!test_thread_flag(TIF_32BIT));
66
2aea28b9
ML
67 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
68 info.length = len;
69 info.low_limit = PAGE_SIZE;
70 info.high_limit = mm->mmap_base;
71 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
72 info.align_offset = 0;
73 addr = vm_unmapped_area(&info);
f6b83f07 74
f6b83f07
DM
75 /*
76 * A failed mmap() very likely causes application failure,
77 * so fall back to the bottom-up function here. This scenario
78 * can happen with large stack limits and large mmap()
79 * allocations.
80 */
2aea28b9
ML
81 if (addr & ~PAGE_MASK) {
82 VM_BUG_ON(addr != -ENOMEM);
83 info.flags = 0;
84 info.low_limit = TASK_UNMAPPED_BASE;
85 info.high_limit = STACK_TOP32;
86 addr = vm_unmapped_area(&info);
87 }
f6b83f07
DM
88
89 return addr;
90}
91
92unsigned long
93hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
94 unsigned long len, unsigned long pgoff, unsigned long flags)
95{
96 struct mm_struct *mm = current->mm;
97 struct vm_area_struct *vma;
98 unsigned long task_size = TASK_SIZE;
99
100 if (test_thread_flag(TIF_32BIT))
101 task_size = STACK_TOP32;
102
103 if (len & ~HPAGE_MASK)
104 return -EINVAL;
105 if (len > task_size)
106 return -ENOMEM;
107
ac35ee48 108 if (flags & MAP_FIXED) {
a5516438 109 if (prepare_hugepage_range(file, addr, len))
ac35ee48
BH
110 return -EINVAL;
111 return addr;
112 }
113
f6b83f07
DM
114 if (addr) {
115 addr = ALIGN(addr, HPAGE_SIZE);
116 vma = find_vma(mm, addr);
117 if (task_size - len >= addr &&
118 (!vma || addr + len <= vma->vm_start))
119 return addr;
120 }
121 if (mm->get_unmapped_area == arch_get_unmapped_area)
122 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
123 pgoff, flags);
124 else
125 return hugetlb_get_unmapped_area_topdown(file, addr, len,
126 pgoff, flags);
127}
128
a5516438
AK
129pte_t *huge_pte_alloc(struct mm_struct *mm,
130 unsigned long addr, unsigned long sz)
1da177e4
LT
131{
132 pgd_t *pgd;
133 pud_t *pud;
134 pmd_t *pmd;
135 pte_t *pte = NULL;
136
9df1dab1
DM
137 /* We must align the address, because our caller will run
138 * set_huge_pte_at() on whatever we return, which writes out
139 * all of the sub-ptes for the hugepage range. So we have
140 * to give it the first such sub-pte.
141 */
142 addr &= HPAGE_MASK;
143
1da177e4 144 pgd = pgd_offset(mm, addr);
dcc1e8dd
DM
145 pud = pud_alloc(mm, pgd, addr);
146 if (pud) {
147 pmd = pmd_alloc(mm, pud, addr);
148 if (pmd)
8ac1f832 149 pte = pte_alloc_map(mm, NULL, pmd, addr);
1da177e4
LT
150 }
151 return pte;
152}
153
63551ae0 154pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
1da177e4
LT
155{
156 pgd_t *pgd;
157 pud_t *pud;
158 pmd_t *pmd;
159 pte_t *pte = NULL;
160
f6b83f07
DM
161 addr &= HPAGE_MASK;
162
1da177e4 163 pgd = pgd_offset(mm, addr);
f6b83f07 164 if (!pgd_none(*pgd)) {
1da177e4 165 pud = pud_offset(pgd, addr);
f6b83f07 166 if (!pud_none(*pud)) {
1da177e4 167 pmd = pmd_offset(pud, addr);
f6b83f07 168 if (!pmd_none(*pmd))
1da177e4
LT
169 pte = pte_offset_map(pmd, addr);
170 }
171 }
172 return pte;
173}
174
39dde65c
CK
175int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
176{
177 return 0;
178}
179
63551ae0
DG
180void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
181 pte_t *ptep, pte_t entry)
1da177e4 182{
63551ae0
DG
183 int i;
184
dcc1e8dd
DM
185 if (!pte_present(*ptep) && pte_present(entry))
186 mm->context.huge_pte_count++;
187
bb8236f2 188 addr &= HPAGE_MASK;
63551ae0
DG
189 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
190 set_pte_at(mm, addr, ptep, entry);
191 ptep++;
192 addr += PAGE_SIZE;
193 pte_val(entry) += PAGE_SIZE;
194 }
195}
1da177e4 196
63551ae0
DG
197pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
198 pte_t *ptep)
199{
200 pte_t entry;
201 int i;
1da177e4 202
63551ae0 203 entry = *ptep;
dcc1e8dd
DM
204 if (pte_present(entry))
205 mm->context.huge_pte_count--;
1da177e4 206
bb8236f2
DM
207 addr &= HPAGE_MASK;
208
1da177e4 209 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
63551ae0 210 pte_clear(mm, addr, ptep);
1da177e4 211 addr += PAGE_SIZE;
63551ae0 212 ptep++;
1da177e4 213 }
63551ae0
DG
214
215 return entry;
1da177e4
LT
216}
217
1da177e4
LT
218struct page *follow_huge_addr(struct mm_struct *mm,
219 unsigned long address, int write)
220{
221 return ERR_PTR(-EINVAL);
222}
223
224int pmd_huge(pmd_t pmd)
225{
226 return 0;
227}
228
ceb86879
AK
229int pud_huge(pud_t pud)
230{
231 return 0;
232}
233
1da177e4
LT
234struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
235 pmd_t *pmd, int write)
236{
237 return NULL;
238}
This page took 0.700768 seconds and 5 git commands to generate.