Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * IA-64 Huge TLB Page Support for Kernel. | |
3 | * | |
4 | * Copyright (C) 2002-2004 Rohit Seth <rohit.seth@intel.com> | |
5 | * Copyright (C) 2003-2004 Ken Chen <kenneth.w.chen@intel.com> | |
6 | * | |
7 | * Sep, 2003: add numa support | |
8 | * Feb, 2004: dynamic hugetlb page size via boot parameter | |
9 | */ | |
10 | ||
1da177e4 LT |
11 | #include <linux/init.h> |
12 | #include <linux/fs.h> | |
13 | #include <linux/mm.h> | |
14 | #include <linux/hugetlb.h> | |
15 | #include <linux/pagemap.h> | |
1da177e4 LT |
16 | #include <linux/slab.h> |
17 | #include <linux/sysctl.h> | |
9be26f4c | 18 | #include <linux/log2.h> |
1da177e4 LT |
19 | #include <asm/mman.h> |
20 | #include <asm/pgalloc.h> | |
21 | #include <asm/tlb.h> | |
22 | #include <asm/tlbflush.h> | |
23 | ||
24 | unsigned int hpage_shift=HPAGE_SHIFT_DEFAULT; | |
25 | ||
63551ae0 | 26 | pte_t * |
a5516438 | 27 | huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz) |
1da177e4 LT |
28 | { |
29 | unsigned long taddr = htlbpage_to_page(addr); | |
30 | pgd_t *pgd; | |
31 | pud_t *pud; | |
32 | pmd_t *pmd; | |
33 | pte_t *pte = NULL; | |
34 | ||
35 | pgd = pgd_offset(mm, taddr); | |
36 | pud = pud_alloc(mm, pgd, taddr); | |
37 | if (pud) { | |
38 | pmd = pmd_alloc(mm, pud, taddr); | |
39 | if (pmd) | |
40 | pte = pte_alloc_map(mm, pmd, taddr); | |
41 | } | |
42 | return pte; | |
43 | } | |
44 | ||
63551ae0 | 45 | pte_t * |
1da177e4 LT |
46 | huge_pte_offset (struct mm_struct *mm, unsigned long addr) |
47 | { | |
48 | unsigned long taddr = htlbpage_to_page(addr); | |
49 | pgd_t *pgd; | |
50 | pud_t *pud; | |
51 | pmd_t *pmd; | |
52 | pte_t *pte = NULL; | |
53 | ||
54 | pgd = pgd_offset(mm, taddr); | |
55 | if (pgd_present(*pgd)) { | |
56 | pud = pud_offset(pgd, taddr); | |
57 | if (pud_present(*pud)) { | |
58 | pmd = pmd_offset(pud, taddr); | |
59 | if (pmd_present(*pmd)) | |
60 | pte = pte_offset_map(pmd, taddr); | |
61 | } | |
62 | } | |
63 | ||
64 | return pte; | |
65 | } | |
66 | ||
39dde65c CK |
67 | int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) |
68 | { | |
69 | return 0; | |
70 | } | |
71 | ||
1da177e4 LT |
72 | #define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; } |
73 | ||
1da177e4 | 74 | /* |
42b88bef DG |
75 | * Don't actually need to do any preparation, but need to make sure |
76 | * the address is in the right region. | |
1da177e4 | 77 | */ |
a5516438 AK |
78 | int prepare_hugepage_range(struct file *file, |
79 | unsigned long addr, unsigned long len) | |
1da177e4 LT |
80 | { |
81 | if (len & ~HPAGE_MASK) | |
82 | return -EINVAL; | |
83 | if (addr & ~HPAGE_MASK) | |
84 | return -EINVAL; | |
0a41e250 | 85 | if (REGION_NUMBER(addr) != RGN_HPAGE) |
1da177e4 LT |
86 | return -EINVAL; |
87 | ||
88 | return 0; | |
89 | } | |
90 | ||
1da177e4 LT |
91 | struct page *follow_huge_addr(struct mm_struct *mm, unsigned long addr, int write) |
92 | { | |
93 | struct page *page; | |
94 | pte_t *ptep; | |
95 | ||
0a41e250 | 96 | if (REGION_NUMBER(addr) != RGN_HPAGE) |
1da177e4 LT |
97 | return ERR_PTR(-EINVAL); |
98 | ||
99 | ptep = huge_pte_offset(mm, addr); | |
100 | if (!ptep || pte_none(*ptep)) | |
101 | return NULL; | |
102 | page = pte_page(*ptep); | |
103 | page += ((addr & ~HPAGE_MASK) >> PAGE_SHIFT); | |
104 | return page; | |
105 | } | |
106 | int pmd_huge(pmd_t pmd) | |
107 | { | |
108 | return 0; | |
109 | } | |
110 | struct page * | |
111 | follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write) | |
112 | { | |
113 | return NULL; | |
114 | } | |
115 | ||
42b77728 | 116 | void hugetlb_free_pgd_range(struct mmu_gather *tlb, |
3bf5ee95 HD |
117 | unsigned long addr, unsigned long end, |
118 | unsigned long floor, unsigned long ceiling) | |
1da177e4 | 119 | { |
3bf5ee95 | 120 | /* |
2332c9ae | 121 | * This is called to free hugetlb page tables. |
3bf5ee95 HD |
122 | * |
123 | * The offset of these addresses from the base of the hugetlb | |
124 | * region must be scaled down by HPAGE_SIZE/PAGE_SIZE so that | |
125 | * the standard free_pgd_range will free the right page tables. | |
126 | * | |
127 | * If floor and ceiling are also in the hugetlb region, they | |
128 | * must likewise be scaled down; but if outside, left unchanged. | |
129 | */ | |
130 | ||
131 | addr = htlbpage_to_page(addr); | |
132 | end = htlbpage_to_page(end); | |
2332c9ae | 133 | if (REGION_NUMBER(floor) == RGN_HPAGE) |
3bf5ee95 | 134 | floor = htlbpage_to_page(floor); |
2332c9ae | 135 | if (REGION_NUMBER(ceiling) == RGN_HPAGE) |
3bf5ee95 HD |
136 | ceiling = htlbpage_to_page(ceiling); |
137 | ||
138 | free_pgd_range(tlb, addr, end, floor, ceiling); | |
1da177e4 LT |
139 | } |
140 | ||
1da177e4 LT |
141 | unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, |
142 | unsigned long pgoff, unsigned long flags) | |
143 | { | |
144 | struct vm_area_struct *vmm; | |
145 | ||
146 | if (len > RGN_MAP_LIMIT) | |
147 | return -ENOMEM; | |
148 | if (len & ~HPAGE_MASK) | |
149 | return -EINVAL; | |
afa37394 BH |
150 | |
151 | /* Handle MAP_FIXED */ | |
152 | if (flags & MAP_FIXED) { | |
a5516438 | 153 | if (prepare_hugepage_range(file, addr, len)) |
afa37394 BH |
154 | return -EINVAL; |
155 | return addr; | |
156 | } | |
157 | ||
0a41e250 PC |
158 | /* This code assumes that RGN_HPAGE != 0. */ |
159 | if ((REGION_NUMBER(addr) != RGN_HPAGE) || (addr & (HPAGE_SIZE - 1))) | |
1da177e4 LT |
160 | addr = HPAGE_REGION_BASE; |
161 | else | |
162 | addr = ALIGN(addr, HPAGE_SIZE); | |
163 | for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) { | |
164 | /* At this point: (!vmm || addr < vmm->vm_end). */ | |
165 | if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT) | |
166 | return -ENOMEM; | |
167 | if (!vmm || (addr + len) <= vmm->vm_start) | |
168 | return addr; | |
169 | addr = ALIGN(vmm->vm_end, HPAGE_SIZE); | |
170 | } | |
171 | } | |
172 | ||
173 | static int __init hugetlb_setup_sz(char *str) | |
174 | { | |
175 | u64 tr_pages; | |
176 | unsigned long long size; | |
177 | ||
178 | if (ia64_pal_vm_page_size(&tr_pages, NULL) != 0) | |
179 | /* | |
180 | * shouldn't happen, but just in case. | |
181 | */ | |
182 | tr_pages = 0x15557000UL; | |
183 | ||
184 | size = memparse(str, &str); | |
9be26f4c | 185 | if (*str || !is_power_of_2(size) || !(tr_pages & size) || |
1da177e4 LT |
186 | size <= PAGE_SIZE || |
187 | size >= (1UL << PAGE_SHIFT << MAX_ORDER)) { | |
188 | printk(KERN_WARNING "Invalid huge page size specified\n"); | |
189 | return 1; | |
190 | } | |
191 | ||
192 | hpage_shift = __ffs(size); | |
193 | /* | |
194 | * boot cpu already executed ia64_mmu_init, and has HPAGE_SHIFT_DEFAULT | |
195 | * override here with new page shift. | |
196 | */ | |
197 | ia64_set_rr(HPAGE_REGION_BASE, hpage_shift << 2); | |
d9c23400 | 198 | return 0; |
1da177e4 | 199 | } |
d9c23400 | 200 | early_param("hugepagesz", hugetlb_setup_sz); |