2 * arch/metag/mm/hugetlbpage.c
4 * METAG HugeTLB page support.
8 * Cloned from sparc64 by Paul Mundt.
10 * Copyright (C) 2002, 2003 David S. Miller (davem@redhat.com)
13 #include <linux/init.h>
16 #include <linux/hugetlb.h>
17 #include <linux/pagemap.h>
18 #include <linux/sysctl.h>
21 #include <asm/pgalloc.h>
23 #include <asm/tlbflush.h>
24 #include <asm/cacheflush.h>
27 * If the arch doesn't supply something else, assume that hugepage
28 * size aligned regions are ok without further preparation.
30 int prepare_hugepage_range(struct file
*file
, unsigned long addr
,
33 struct mm_struct
*mm
= current
->mm
;
34 struct hstate
*h
= hstate_file(file
);
35 struct vm_area_struct
*vma
;
37 if (len
& ~huge_page_mask(h
))
39 if (addr
& ~huge_page_mask(h
))
41 if (TASK_SIZE
- len
< addr
)
44 vma
= find_vma(mm
, ALIGN_HUGEPT(addr
));
45 if (vma
&& !(vma
->vm_flags
& MAP_HUGETLB
))
48 vma
= find_vma(mm
, addr
);
50 if (addr
+ len
> vma
->vm_start
)
52 if (!(vma
->vm_flags
& MAP_HUGETLB
) &&
53 (ALIGN_HUGEPT(addr
+ len
) > vma
->vm_start
))
59 pte_t
*huge_pte_alloc(struct mm_struct
*mm
,
60 unsigned long addr
, unsigned long sz
)
67 pgd
= pgd_offset(mm
, addr
);
68 pud
= pud_offset(pgd
, addr
);
69 pmd
= pmd_offset(pud
, addr
);
70 pte
= pte_alloc_map(mm
, NULL
, pmd
, addr
);
71 pgd
->pgd
&= ~_PAGE_SZ_MASK
;
72 pgd
->pgd
|= _PAGE_SZHUGE
;
77 pte_t
*huge_pte_offset(struct mm_struct
*mm
, unsigned long addr
)
84 pgd
= pgd_offset(mm
, addr
);
85 pud
= pud_offset(pgd
, addr
);
86 pmd
= pmd_offset(pud
, addr
);
87 pte
= pte_offset_kernel(pmd
, addr
);
92 int huge_pmd_unshare(struct mm_struct
*mm
, unsigned long *addr
, pte_t
*ptep
)
97 struct page
*follow_huge_addr(struct mm_struct
*mm
,
98 unsigned long address
, int write
)
100 return ERR_PTR(-EINVAL
);
103 int pmd_huge(pmd_t pmd
)
105 return pmd_page_shift(pmd
) > PAGE_SHIFT
;
108 int pud_huge(pud_t pud
)
113 struct page
*follow_huge_pmd(struct mm_struct
*mm
, unsigned long address
,
114 pmd_t
*pmd
, int write
)
119 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
122 * Look for an unmapped area starting after another hugetlb vma.
123 * There are guaranteed to be no huge pte's spare if all the huge pages are
124 * full size (4MB), so in that case compile out this search.
126 #if HPAGE_SHIFT == HUGEPT_SHIFT
127 static inline unsigned long
128 hugetlb_get_unmapped_area_existing(unsigned long len
)
134 hugetlb_get_unmapped_area_existing(unsigned long len
)
136 struct mm_struct
*mm
= current
->mm
;
137 struct vm_area_struct
*vma
;
138 unsigned long start_addr
, addr
;
141 if (mm
->context
.part_huge
) {
142 start_addr
= mm
->context
.part_huge
;
145 start_addr
= TASK_UNMAPPED_BASE
;
151 for (vma
= find_vma(mm
, addr
); ; vma
= vma
->vm_next
) {
152 if ((!vma
&& !after_huge
) || TASK_SIZE
- len
< addr
) {
154 * Start a new search - just in case we missed
157 if (start_addr
!= TASK_UNMAPPED_BASE
) {
158 start_addr
= TASK_UNMAPPED_BASE
;
163 /* skip ahead if we've aligned right over some vmas */
164 if (vma
&& vma
->vm_end
<= addr
)
166 /* space before the next vma? */
167 if (after_huge
&& (!vma
|| ALIGN_HUGEPT(addr
+ len
)
169 unsigned long end
= addr
+ len
;
170 if (end
& HUGEPT_MASK
)
171 mm
->context
.part_huge
= end
;
172 else if (addr
== mm
->context
.part_huge
)
173 mm
->context
.part_huge
= 0;
176 if (vma
&& (vma
->vm_flags
& MAP_HUGETLB
)) {
177 /* space after a huge vma in 2nd level page table? */
178 if (vma
->vm_end
& HUGEPT_MASK
) {
180 /* no need to align to the next PT block */
186 addr
= ALIGN_HUGEPT(vma
->vm_end
);
191 /* Do a full search to find an area without any nearby normal pages. */
193 hugetlb_get_unmapped_area_new_pmd(unsigned long len
)
195 struct mm_struct
*mm
= current
->mm
;
196 struct vm_area_struct
*vma
;
197 unsigned long start_addr
, addr
;
199 if (ALIGN_HUGEPT(len
) > mm
->cached_hole_size
)
200 start_addr
= mm
->free_area_cache
;
202 start_addr
= TASK_UNMAPPED_BASE
;
205 addr
= ALIGN_HUGEPT(start_addr
);
207 for (vma
= find_vma(mm
, addr
); ; vma
= vma
->vm_next
) {
208 if (TASK_SIZE
- len
< addr
) {
210 * Start a new search - just in case we missed
213 if (start_addr
!= TASK_UNMAPPED_BASE
) {
214 start_addr
= TASK_UNMAPPED_BASE
;
215 mm
->cached_hole_size
= 0;
220 /* skip ahead if we've aligned right over some vmas */
221 if (vma
&& vma
->vm_end
<= addr
)
223 if (!vma
|| ALIGN_HUGEPT(addr
+ len
) <= vma
->vm_start
) {
224 #if HPAGE_SHIFT < HUGEPT_SHIFT
225 if (len
& HUGEPT_MASK
)
226 mm
->context
.part_huge
= addr
+ len
;
230 addr
= ALIGN_HUGEPT(vma
->vm_end
);
235 hugetlb_get_unmapped_area(struct file
*file
, unsigned long addr
,
236 unsigned long len
, unsigned long pgoff
, unsigned long flags
)
238 struct hstate
*h
= hstate_file(file
);
240 if (len
& ~huge_page_mask(h
))
245 if (flags
& MAP_FIXED
) {
246 if (prepare_hugepage_range(file
, addr
, len
))
252 addr
= ALIGN(addr
, huge_page_size(h
));
253 if (!prepare_hugepage_range(file
, addr
, len
))
258 * Look for an existing hugetlb vma with space after it (this is to to
259 * minimise fragmentation caused by huge pages.
261 addr
= hugetlb_get_unmapped_area_existing(len
);
266 * Find an unmapped naturally aligned set of 4MB blocks that we can use
269 addr
= hugetlb_get_unmapped_area_new_pmd(len
);
276 #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/
278 /* necessary for boot time 4MB huge page allocation */
279 static __init
int setup_hugepagesz(char *opt
)
281 unsigned long ps
= memparse(opt
, &opt
);
282 if (ps
== (1 << HPAGE_SHIFT
)) {
283 hugetlb_add_hstate(HPAGE_SHIFT
- PAGE_SHIFT
);
285 pr_err("hugepagesz: Unsupported page size %lu M\n",
291 __setup("hugepagesz=", setup_hugepagesz
);
This page took 0.038095 seconds and 5 git commands to generate.