Commit | Line | Data |
---|---|---|
f4eb07c1 | 1 | /* |
f4eb07c1 HC |
2 | * Copyright IBM Corp. 2006 |
3 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> | |
4 | */ | |
5 | ||
6 | #include <linux/bootmem.h> | |
7 | #include <linux/pfn.h> | |
8 | #include <linux/mm.h> | |
9 | #include <linux/module.h> | |
10 | #include <linux/list.h> | |
53492b1d | 11 | #include <linux/hugetlb.h> |
5a0e3ad6 | 12 | #include <linux/slab.h> |
50be6345 | 13 | #include <linux/memblock.h> |
f4eb07c1 HC |
14 | #include <asm/pgalloc.h> |
15 | #include <asm/pgtable.h> | |
16 | #include <asm/setup.h> | |
17 | #include <asm/tlbflush.h> | |
53492b1d | 18 | #include <asm/sections.h> |
f4eb07c1 | 19 | |
f4eb07c1 HC |
20 | static DEFINE_MUTEX(vmem_mutex); |
21 | ||
22 | struct memory_segment { | |
23 | struct list_head list; | |
24 | unsigned long start; | |
25 | unsigned long size; | |
26 | }; | |
27 | ||
28 | static LIST_HEAD(mem_segs); | |
29 | ||
67060d9c HC |
30 | static void __ref *vmem_alloc_pages(unsigned int order) |
31 | { | |
32 | if (slab_is_available()) | |
33 | return (void *)__get_free_pages(GFP_KERNEL, order); | |
34 | return alloc_bootmem_pages((1 << order) * PAGE_SIZE); | |
35 | } | |
36 | ||
37 | static inline pud_t *vmem_pud_alloc(void) | |
5a216a20 MS |
38 | { |
39 | pud_t *pud = NULL; | |
40 | ||
67060d9c | 41 | pud = vmem_alloc_pages(2); |
5a216a20 MS |
42 | if (!pud) |
43 | return NULL; | |
8fc63658 | 44 | clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4); |
5a216a20 MS |
45 | return pud; |
46 | } | |
190a1d72 | 47 | |
67060d9c | 48 | static inline pmd_t *vmem_pmd_alloc(void) |
f4eb07c1 | 49 | { |
3610cce8 | 50 | pmd_t *pmd = NULL; |
f4eb07c1 | 51 | |
67060d9c | 52 | pmd = vmem_alloc_pages(2); |
f4eb07c1 HC |
53 | if (!pmd) |
54 | return NULL; | |
8fc63658 | 55 | clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4); |
f4eb07c1 HC |
56 | return pmd; |
57 | } | |
58 | ||
e5992f2e | 59 | static pte_t __ref *vmem_pte_alloc(unsigned long address) |
f4eb07c1 | 60 | { |
146e4b3c | 61 | pte_t *pte; |
f4eb07c1 | 62 | |
146e4b3c | 63 | if (slab_is_available()) |
527e30b4 | 64 | pte = (pte_t *) page_table_alloc(&init_mm); |
146e4b3c | 65 | else |
50be6345 PH |
66 | pte = alloc_bootmem_align(PTRS_PER_PTE * sizeof(pte_t), |
67 | PTRS_PER_PTE * sizeof(pte_t)); | |
f4eb07c1 HC |
68 | if (!pte) |
69 | return NULL; | |
e5098611 | 70 | clear_table((unsigned long *) pte, _PAGE_INVALID, |
6af7eea2 | 71 | PTRS_PER_PTE * sizeof(pte_t)); |
f4eb07c1 HC |
72 | return pte; |
73 | } | |
74 | ||
75 | /* | |
76 | * Add a physical memory range to the 1:1 mapping. | |
77 | */ | |
17f34580 | 78 | static int vmem_add_mem(unsigned long start, unsigned long size, int ro) |
f4eb07c1 | 79 | { |
378b1e7a HC |
80 | unsigned long end = start + size; |
81 | unsigned long address = start; | |
f4eb07c1 | 82 | pgd_t *pg_dir; |
190a1d72 | 83 | pud_t *pu_dir; |
f4eb07c1 HC |
84 | pmd_t *pm_dir; |
85 | pte_t *pt_dir; | |
f4eb07c1 HC |
86 | int ret = -ENOMEM; |
87 | ||
378b1e7a | 88 | while (address < end) { |
f4eb07c1 HC |
89 | pg_dir = pgd_offset_k(address); |
90 | if (pgd_none(*pg_dir)) { | |
190a1d72 MS |
91 | pu_dir = vmem_pud_alloc(); |
92 | if (!pu_dir) | |
93 | goto out; | |
b2fa47e6 | 94 | pgd_populate(&init_mm, pg_dir, pu_dir); |
190a1d72 | 95 | } |
190a1d72 | 96 | pu_dir = pud_offset(pg_dir, address); |
5a79859a | 97 | #ifndef CONFIG_DEBUG_PAGEALLOC |
18da2369 HC |
98 | if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address && |
99 | !(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) { | |
abf09bed MS |
100 | pud_val(*pu_dir) = __pa(address) | |
101 | _REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE | | |
e5098611 | 102 | (ro ? _REGION_ENTRY_PROTECT : 0); |
18da2369 HC |
103 | address += PUD_SIZE; |
104 | continue; | |
105 | } | |
106 | #endif | |
190a1d72 | 107 | if (pud_none(*pu_dir)) { |
f4eb07c1 HC |
108 | pm_dir = vmem_pmd_alloc(); |
109 | if (!pm_dir) | |
110 | goto out; | |
b2fa47e6 | 111 | pud_populate(&init_mm, pu_dir, pm_dir); |
f4eb07c1 | 112 | } |
190a1d72 | 113 | pm_dir = pmd_offset(pu_dir, address); |
5a79859a | 114 | #ifndef CONFIG_DEBUG_PAGEALLOC |
fc7e48aa HC |
115 | if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address && |
116 | !(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) { | |
abf09bed MS |
117 | pmd_val(*pm_dir) = __pa(address) | |
118 | _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE | | |
0944fe3f | 119 | _SEGMENT_ENTRY_YOUNG | |
e5098611 | 120 | (ro ? _SEGMENT_ENTRY_PROTECT : 0); |
378b1e7a | 121 | address += PMD_SIZE; |
53492b1d GS |
122 | continue; |
123 | } | |
124 | #endif | |
f4eb07c1 | 125 | if (pmd_none(*pm_dir)) { |
e5992f2e | 126 | pt_dir = vmem_pte_alloc(address); |
f4eb07c1 HC |
127 | if (!pt_dir) |
128 | goto out; | |
b2fa47e6 | 129 | pmd_populate(&init_mm, pm_dir, pt_dir); |
f4eb07c1 HC |
130 | } |
131 | ||
132 | pt_dir = pte_offset_kernel(pm_dir, address); | |
e5098611 MS |
133 | pte_val(*pt_dir) = __pa(address) | |
134 | pgprot_val(ro ? PAGE_KERNEL_RO : PAGE_KERNEL); | |
378b1e7a | 135 | address += PAGE_SIZE; |
f4eb07c1 HC |
136 | } |
137 | ret = 0; | |
138 | out: | |
f4eb07c1 HC |
139 | return ret; |
140 | } | |
141 | ||
142 | /* | |
143 | * Remove a physical memory range from the 1:1 mapping. | |
144 | * Currently only invalidates page table entries. | |
145 | */ | |
146 | static void vmem_remove_range(unsigned long start, unsigned long size) | |
147 | { | |
378b1e7a HC |
148 | unsigned long end = start + size; |
149 | unsigned long address = start; | |
f4eb07c1 | 150 | pgd_t *pg_dir; |
190a1d72 | 151 | pud_t *pu_dir; |
f4eb07c1 HC |
152 | pmd_t *pm_dir; |
153 | pte_t *pt_dir; | |
154 | pte_t pte; | |
155 | ||
e5098611 | 156 | pte_val(pte) = _PAGE_INVALID; |
378b1e7a | 157 | while (address < end) { |
f4eb07c1 | 158 | pg_dir = pgd_offset_k(address); |
fc7e48aa HC |
159 | if (pgd_none(*pg_dir)) { |
160 | address += PGDIR_SIZE; | |
161 | continue; | |
162 | } | |
190a1d72 | 163 | pu_dir = pud_offset(pg_dir, address); |
fc7e48aa HC |
164 | if (pud_none(*pu_dir)) { |
165 | address += PUD_SIZE; | |
f4eb07c1 | 166 | continue; |
fc7e48aa | 167 | } |
18da2369 HC |
168 | if (pud_large(*pu_dir)) { |
169 | pud_clear(pu_dir); | |
170 | address += PUD_SIZE; | |
171 | continue; | |
172 | } | |
190a1d72 | 173 | pm_dir = pmd_offset(pu_dir, address); |
fc7e48aa HC |
174 | if (pmd_none(*pm_dir)) { |
175 | address += PMD_SIZE; | |
f4eb07c1 | 176 | continue; |
fc7e48aa | 177 | } |
378b1e7a | 178 | if (pmd_large(*pm_dir)) { |
b2fa47e6 | 179 | pmd_clear(pm_dir); |
378b1e7a | 180 | address += PMD_SIZE; |
53492b1d GS |
181 | continue; |
182 | } | |
f4eb07c1 | 183 | pt_dir = pte_offset_kernel(pm_dir, address); |
c1821c2e | 184 | *pt_dir = pte; |
378b1e7a | 185 | address += PAGE_SIZE; |
f4eb07c1 | 186 | } |
378b1e7a | 187 | flush_tlb_kernel_range(start, end); |
f4eb07c1 HC |
188 | } |
189 | ||
190 | /* | |
191 | * Add a backed mem_map array to the virtual mem_map array. | |
192 | */ | |
0aad818b | 193 | int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) |
f4eb07c1 | 194 | { |
0aad818b | 195 | unsigned long address = start; |
f4eb07c1 | 196 | pgd_t *pg_dir; |
190a1d72 | 197 | pud_t *pu_dir; |
f4eb07c1 HC |
198 | pmd_t *pm_dir; |
199 | pte_t *pt_dir; | |
f4eb07c1 HC |
200 | int ret = -ENOMEM; |
201 | ||
0aad818b | 202 | for (address = start; address < end;) { |
f4eb07c1 HC |
203 | pg_dir = pgd_offset_k(address); |
204 | if (pgd_none(*pg_dir)) { | |
190a1d72 MS |
205 | pu_dir = vmem_pud_alloc(); |
206 | if (!pu_dir) | |
207 | goto out; | |
b2fa47e6 | 208 | pgd_populate(&init_mm, pg_dir, pu_dir); |
190a1d72 MS |
209 | } |
210 | ||
211 | pu_dir = pud_offset(pg_dir, address); | |
212 | if (pud_none(*pu_dir)) { | |
f4eb07c1 HC |
213 | pm_dir = vmem_pmd_alloc(); |
214 | if (!pm_dir) | |
215 | goto out; | |
b2fa47e6 | 216 | pud_populate(&init_mm, pu_dir, pm_dir); |
f4eb07c1 HC |
217 | } |
218 | ||
190a1d72 | 219 | pm_dir = pmd_offset(pu_dir, address); |
f4eb07c1 | 220 | if (pmd_none(*pm_dir)) { |
f7817968 HC |
221 | /* Use 1MB frames for vmemmap if available. We always |
222 | * use large frames even if they are only partially | |
223 | * used. | |
224 | * Otherwise we would have also page tables since | |
225 | * vmemmap_populate gets called for each section | |
226 | * separately. */ | |
227 | if (MACHINE_HAS_EDAT1) { | |
228 | void *new_page; | |
229 | ||
230 | new_page = vmemmap_alloc_block(PMD_SIZE, node); | |
231 | if (!new_page) | |
232 | goto out; | |
abf09bed | 233 | pmd_val(*pm_dir) = __pa(new_page) | |
6a5c1482 | 234 | _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE; |
f7817968 HC |
235 | address = (address + PMD_SIZE) & PMD_MASK; |
236 | continue; | |
237 | } | |
e5992f2e | 238 | pt_dir = vmem_pte_alloc(address); |
f4eb07c1 HC |
239 | if (!pt_dir) |
240 | goto out; | |
b2fa47e6 | 241 | pmd_populate(&init_mm, pm_dir, pt_dir); |
f7817968 HC |
242 | } else if (pmd_large(*pm_dir)) { |
243 | address = (address + PMD_SIZE) & PMD_MASK; | |
244 | continue; | |
f4eb07c1 HC |
245 | } |
246 | ||
247 | pt_dir = pte_offset_kernel(pm_dir, address); | |
248 | if (pte_none(*pt_dir)) { | |
70c9d296 | 249 | void *new_page; |
f4eb07c1 | 250 | |
70c9d296 | 251 | new_page = vmemmap_alloc_block(PAGE_SIZE, node); |
f4eb07c1 HC |
252 | if (!new_page) |
253 | goto out; | |
e5098611 MS |
254 | pte_val(*pt_dir) = |
255 | __pa(new_page) | pgprot_val(PAGE_KERNEL); | |
f4eb07c1 | 256 | } |
f7817968 | 257 | address += PAGE_SIZE; |
f4eb07c1 HC |
258 | } |
259 | ret = 0; | |
260 | out: | |
f4eb07c1 HC |
261 | return ret; |
262 | } | |
263 | ||
0aad818b | 264 | void vmemmap_free(unsigned long start, unsigned long end) |
0197518c TC |
265 | { |
266 | } | |
267 | ||
f4eb07c1 HC |
268 | /* |
269 | * Add memory segment to the segment list if it doesn't overlap with | |
270 | * an already present segment. | |
271 | */ | |
272 | static int insert_memory_segment(struct memory_segment *seg) | |
273 | { | |
274 | struct memory_segment *tmp; | |
275 | ||
ee0ddadd | 276 | if (seg->start + seg->size > VMEM_MAX_PHYS || |
f4eb07c1 HC |
277 | seg->start + seg->size < seg->start) |
278 | return -ERANGE; | |
279 | ||
280 | list_for_each_entry(tmp, &mem_segs, list) { | |
281 | if (seg->start >= tmp->start + tmp->size) | |
282 | continue; | |
283 | if (seg->start + seg->size <= tmp->start) | |
284 | continue; | |
285 | return -ENOSPC; | |
286 | } | |
287 | list_add(&seg->list, &mem_segs); | |
288 | return 0; | |
289 | } | |
290 | ||
291 | /* | |
292 | * Remove memory segment from the segment list. | |
293 | */ | |
294 | static void remove_memory_segment(struct memory_segment *seg) | |
295 | { | |
296 | list_del(&seg->list); | |
297 | } | |
298 | ||
299 | static void __remove_shared_memory(struct memory_segment *seg) | |
300 | { | |
301 | remove_memory_segment(seg); | |
302 | vmem_remove_range(seg->start, seg->size); | |
303 | } | |
304 | ||
17f34580 | 305 | int vmem_remove_mapping(unsigned long start, unsigned long size) |
f4eb07c1 HC |
306 | { |
307 | struct memory_segment *seg; | |
308 | int ret; | |
309 | ||
310 | mutex_lock(&vmem_mutex); | |
311 | ||
312 | ret = -ENOENT; | |
313 | list_for_each_entry(seg, &mem_segs, list) { | |
314 | if (seg->start == start && seg->size == size) | |
315 | break; | |
316 | } | |
317 | ||
318 | if (seg->start != start || seg->size != size) | |
319 | goto out; | |
320 | ||
321 | ret = 0; | |
322 | __remove_shared_memory(seg); | |
323 | kfree(seg); | |
324 | out: | |
325 | mutex_unlock(&vmem_mutex); | |
326 | return ret; | |
327 | } | |
328 | ||
17f34580 | 329 | int vmem_add_mapping(unsigned long start, unsigned long size) |
f4eb07c1 HC |
330 | { |
331 | struct memory_segment *seg; | |
f4eb07c1 HC |
332 | int ret; |
333 | ||
334 | mutex_lock(&vmem_mutex); | |
335 | ret = -ENOMEM; | |
336 | seg = kzalloc(sizeof(*seg), GFP_KERNEL); | |
337 | if (!seg) | |
338 | goto out; | |
339 | seg->start = start; | |
340 | seg->size = size; | |
341 | ||
342 | ret = insert_memory_segment(seg); | |
343 | if (ret) | |
344 | goto out_free; | |
345 | ||
53492b1d | 346 | ret = vmem_add_mem(start, size, 0); |
f4eb07c1 HC |
347 | if (ret) |
348 | goto out_remove; | |
f4eb07c1 HC |
349 | goto out; |
350 | ||
351 | out_remove: | |
352 | __remove_shared_memory(seg); | |
353 | out_free: | |
354 | kfree(seg); | |
355 | out: | |
356 | mutex_unlock(&vmem_mutex); | |
357 | return ret; | |
358 | } | |
359 | ||
360 | /* | |
361 | * map whole physical memory to virtual memory (identity mapping) | |
5fd9c6e2 CB |
362 | * we reserve enough space in the vmalloc area for vmemmap to hotplug |
363 | * additional memory segments. | |
f4eb07c1 HC |
364 | */ |
365 | void __init vmem_map_init(void) | |
366 | { | |
53492b1d | 367 | unsigned long ro_start, ro_end; |
50be6345 PH |
368 | struct memblock_region *reg; |
369 | phys_addr_t start, end; | |
f4eb07c1 | 370 | |
8fe234d3 HC |
371 | ro_start = PFN_ALIGN((unsigned long)&_stext); |
372 | ro_end = (unsigned long)&_eshared & PAGE_MASK; | |
50be6345 PH |
373 | for_each_memblock(memory, reg) { |
374 | start = reg->base; | |
375 | end = reg->base + reg->size - 1; | |
53492b1d GS |
376 | if (start >= ro_end || end <= ro_start) |
377 | vmem_add_mem(start, end - start, 0); | |
378 | else if (start >= ro_start && end <= ro_end) | |
379 | vmem_add_mem(start, end - start, 1); | |
380 | else if (start >= ro_start) { | |
381 | vmem_add_mem(start, ro_end - start, 1); | |
382 | vmem_add_mem(ro_end, end - ro_end, 0); | |
383 | } else if (end < ro_end) { | |
384 | vmem_add_mem(start, ro_start - start, 0); | |
385 | vmem_add_mem(ro_start, end - ro_start, 1); | |
386 | } else { | |
387 | vmem_add_mem(start, ro_start - start, 0); | |
388 | vmem_add_mem(ro_start, ro_end - ro_start, 1); | |
389 | vmem_add_mem(ro_end, end - ro_end, 0); | |
390 | } | |
391 | } | |
f4eb07c1 HC |
392 | } |
393 | ||
394 | /* | |
50be6345 PH |
395 | * Convert memblock.memory to a memory segment list so there is a single |
396 | * list that contains all memory segments. | |
f4eb07c1 HC |
397 | */ |
398 | static int __init vmem_convert_memory_chunk(void) | |
399 | { | |
50be6345 | 400 | struct memblock_region *reg; |
f4eb07c1 | 401 | struct memory_segment *seg; |
f4eb07c1 HC |
402 | |
403 | mutex_lock(&vmem_mutex); | |
50be6345 | 404 | for_each_memblock(memory, reg) { |
f4eb07c1 HC |
405 | seg = kzalloc(sizeof(*seg), GFP_KERNEL); |
406 | if (!seg) | |
407 | panic("Out of memory...\n"); | |
50be6345 PH |
408 | seg->start = reg->base; |
409 | seg->size = reg->size; | |
f4eb07c1 HC |
410 | insert_memory_segment(seg); |
411 | } | |
412 | mutex_unlock(&vmem_mutex); | |
413 | return 0; | |
414 | } | |
415 | ||
416 | core_initcall(vmem_convert_memory_chunk); |