2 * IA-32 Huge TLB Page Support for Kernel.
4 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
7 #include <linux/init.h>
10 #include <linux/hugetlb.h>
11 #include <linux/pagemap.h>
12 #include <linux/err.h>
13 #include <linux/sysctl.h>
16 #include <asm/tlbflush.h>
17 #include <asm/pgalloc.h>
19 #if 0 /* This is just for testing */
21 follow_huge_addr(struct mm_struct
*mm
, unsigned long address
, int write
)
23 unsigned long start
= address
;
27 struct vm_area_struct
*vma
;
29 vma
= find_vma(mm
, addr
);
30 if (!vma
|| !is_vm_hugetlb_page(vma
))
31 return ERR_PTR(-EINVAL
);
33 pte
= huge_pte_offset(mm
, address
);
35 /* hugetlb should be locked, and hence, prefaulted */
36 WARN_ON(!pte
|| pte_none(*pte
));
38 page
= &pte_page(*pte
)[vpfn
% (HPAGE_SIZE
/PAGE_SIZE
)];
40 WARN_ON(!PageHead(page
));
45 int pmd_huge(pmd_t pmd
)
50 int pud_huge(pud_t pud
)
57 int pmd_huge(pmd_t pmd
)
59 return !!(pmd_val(pmd
) & _PAGE_PSE
);
62 int pud_huge(pud_t pud
)
64 return !!(pud_val(pud
) & _PAGE_PSE
);
68 #ifdef CONFIG_HUGETLB_PAGE
69 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file
*file
,
70 unsigned long addr
, unsigned long len
,
71 unsigned long pgoff
, unsigned long flags
)
73 struct hstate
*h
= hstate_file(file
);
74 struct vm_unmapped_area_info info
;
78 info
.low_limit
= current
->mm
->mmap_legacy_base
;
79 info
.high_limit
= TASK_SIZE
;
80 info
.align_mask
= PAGE_MASK
& ~huge_page_mask(h
);
81 info
.align_offset
= 0;
82 return vm_unmapped_area(&info
);
85 static unsigned long hugetlb_get_unmapped_area_topdown(struct file
*file
,
86 unsigned long addr0
, unsigned long len
,
87 unsigned long pgoff
, unsigned long flags
)
89 struct hstate
*h
= hstate_file(file
);
90 struct vm_unmapped_area_info info
;
93 info
.flags
= VM_UNMAPPED_AREA_TOPDOWN
;
95 info
.low_limit
= PAGE_SIZE
;
96 info
.high_limit
= current
->mm
->mmap_base
;
97 info
.align_mask
= PAGE_MASK
& ~huge_page_mask(h
);
98 info
.align_offset
= 0;
99 addr
= vm_unmapped_area(&info
);
102 * A failed mmap() very likely causes application failure,
103 * so fall back to the bottom-up function here. This scenario
104 * can happen with large stack limits and large mmap()
107 if (addr
& ~PAGE_MASK
) {
108 VM_BUG_ON(addr
!= -ENOMEM
);
110 info
.low_limit
= TASK_UNMAPPED_BASE
;
111 info
.high_limit
= TASK_SIZE
;
112 addr
= vm_unmapped_area(&info
);
119 hugetlb_get_unmapped_area(struct file
*file
, unsigned long addr
,
120 unsigned long len
, unsigned long pgoff
, unsigned long flags
)
122 struct hstate
*h
= hstate_file(file
);
123 struct mm_struct
*mm
= current
->mm
;
124 struct vm_area_struct
*vma
;
126 if (len
& ~huge_page_mask(h
))
131 if (flags
& MAP_FIXED
) {
132 if (prepare_hugepage_range(file
, addr
, len
))
138 addr
= ALIGN(addr
, huge_page_size(h
));
139 vma
= find_vma(mm
, addr
);
140 if (TASK_SIZE
- len
>= addr
&&
141 (!vma
|| addr
+ len
<= vma
->vm_start
))
144 if (mm
->get_unmapped_area
== arch_get_unmapped_area
)
145 return hugetlb_get_unmapped_area_bottomup(file
, addr
, len
,
148 return hugetlb_get_unmapped_area_topdown(file
, addr
, len
,
151 #endif /* CONFIG_HUGETLB_PAGE */
154 static __init
int setup_hugepagesz(char *opt
)
156 unsigned long ps
= memparse(opt
, &opt
);
157 if (ps
== PMD_SIZE
) {
158 hugetlb_add_hstate(PMD_SHIFT
- PAGE_SHIFT
);
159 } else if (ps
== PUD_SIZE
&& cpu_has_gbpages
) {
160 hugetlb_add_hstate(PUD_SHIFT
- PAGE_SHIFT
);
162 printk(KERN_ERR
"hugepagesz: Unsupported page size %lu M\n",
168 __setup("hugepagesz=", setup_hugepagesz
);
171 static __init
int gigantic_pages_init(void)
173 /* With CMA we can allocate gigantic pages at runtime */
174 if (cpu_has_gbpages
&& !size_to_hstate(1UL << PUD_SHIFT
))
175 hugetlb_add_hstate(PUD_SHIFT
- PAGE_SHIFT
);
178 arch_initcall(gigantic_pages_init
);