3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
9 * Derived from "arch/i386/mm/init.c"
10 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
12 * Dave Engebretsen <engebret@us.ibm.com>
13 * Rework for PPC64 port.
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
24 #include <linux/signal.h>
25 #include <linux/sched.h>
26 #include <linux/kernel.h>
27 #include <linux/errno.h>
28 #include <linux/string.h>
29 #include <linux/types.h>
30 #include <linux/mman.h>
32 #include <linux/swap.h>
33 #include <linux/stddef.h>
34 #include <linux/vmalloc.h>
35 #include <linux/init.h>
36 #include <linux/delay.h>
37 #include <linux/highmem.h>
38 #include <linux/idr.h>
39 #include <linux/nodemask.h>
40 #include <linux/module.h>
41 #include <linux/poison.h>
42 #include <linux/memblock.h>
43 #include <linux/hugetlb.h>
44 #include <linux/slab.h>
46 #include <asm/pgalloc.h>
51 #include <asm/mmu_context.h>
52 #include <asm/pgtable.h>
54 #include <asm/uaccess.h>
56 #include <asm/machdep.h>
59 #include <asm/processor.h>
60 #include <asm/mmzone.h>
61 #include <asm/cputable.h>
62 #include <asm/sections.h>
63 #include <asm/iommu.h>
68 #ifdef CONFIG_PPC_STD_MMU_64
69 #if PGTABLE_RANGE > USER_VSID_RANGE
70 #warning Limited user VSID range means pagetable space is wasted
73 #if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
74 #warning TASK_SIZE is smaller than it needs to be.
76 #endif /* CONFIG_PPC_STD_MMU_64 */
78 phys_addr_t memstart_addr
= ~0;
79 EXPORT_SYMBOL_GPL(memstart_addr
);
80 phys_addr_t kernstart_addr
;
81 EXPORT_SYMBOL_GPL(kernstart_addr
);
83 static void pgd_ctor(void *addr
)
85 memset(addr
, 0, PGD_TABLE_SIZE
);
88 static void pmd_ctor(void *addr
)
90 memset(addr
, 0, PMD_TABLE_SIZE
);
93 struct kmem_cache
*pgtable_cache
[MAX_PGTABLE_INDEX_SIZE
];
96 * Create a kmem_cache() for pagetables. This is not used for PTE
97 * pages - they're linked to struct page, come from the normal free
98 * pages pool and have a different entry size (see real_pte_t) to
99 * everything else. Caches created by this function are used for all
100 * the higher level pagetables, and for hugepage pagetables.
102 void pgtable_cache_add(unsigned shift
, void (*ctor
)(void *))
105 unsigned long table_size
= sizeof(void *) << shift
;
106 unsigned long align
= table_size
;
108 /* When batching pgtable pointers for RCU freeing, we store
109 * the index size in the low bits. Table alignment must be
110 * big enough to fit it.
112 * Likewise, hugeapge pagetable pointers contain a (different)
113 * shift value in the low bits. All tables must be aligned so
114 * as to leave enough 0 bits in the address to contain it. */
115 unsigned long minalign
= max(MAX_PGTABLE_INDEX_SIZE
+ 1,
116 HUGEPD_SHIFT_MASK
+ 1);
117 struct kmem_cache
*new;
119 /* It would be nice if this was a BUILD_BUG_ON(), but at the
120 * moment, gcc doesn't seem to recognize is_power_of_2 as a
121 * constant expression, so so much for that. */
122 BUG_ON(!is_power_of_2(minalign
));
123 BUG_ON((shift
< 1) || (shift
> MAX_PGTABLE_INDEX_SIZE
));
125 if (PGT_CACHE(shift
))
126 return; /* Already have a cache of this size */
128 align
= max_t(unsigned long, align
, minalign
);
129 name
= kasprintf(GFP_KERNEL
, "pgtable-2^%d", shift
);
130 new = kmem_cache_create(name
, table_size
, align
, 0, ctor
);
132 pgtable_cache
[shift
- 1] = new;
133 pr_debug("Allocated pgtable cache for order %d\n", shift
);
137 void pgtable_cache_init(void)
139 pgtable_cache_add(PGD_INDEX_SIZE
, pgd_ctor
);
140 pgtable_cache_add(PMD_CACHE_INDEX
, pmd_ctor
);
141 if (!PGT_CACHE(PGD_INDEX_SIZE
) || !PGT_CACHE(PMD_CACHE_INDEX
))
142 panic("Couldn't allocate pgtable caches");
143 /* In all current configs, when the PUD index exists it's the
144 * same size as either the pgd or pmd index. Verify that the
145 * initialization above has also created a PUD cache. This
146 * will need re-examiniation if we add new possibilities for
147 * the pagetable layout. */
148 BUG_ON(PUD_INDEX_SIZE
&& !PGT_CACHE(PUD_INDEX_SIZE
));
151 #ifdef CONFIG_SPARSEMEM_VMEMMAP
153 * Given an address within the vmemmap, determine the pfn of the page that
154 * represents the start of the section it is within. Note that we have to
155 * do this by hand as the proffered address may not be correctly aligned.
156 * Subtraction of non-aligned pointers produces undefined results.
158 static unsigned long __meminit
vmemmap_section_start(unsigned long page
)
160 unsigned long offset
= page
- ((unsigned long)(vmemmap
));
162 /* Return the pfn of the start of the section. */
163 return (offset
/ sizeof(struct page
)) & PAGE_SECTION_MASK
;
167 * Check if this vmemmap page is already initialised. If any section
168 * which overlaps this vmemmap page is initialised then this page is
169 * initialised already.
171 static int __meminit
vmemmap_populated(unsigned long start
, int page_size
)
173 unsigned long end
= start
+ page_size
;
174 start
= (unsigned long)(pfn_to_page(vmemmap_section_start(start
)));
176 for (; start
< end
; start
+= (PAGES_PER_SECTION
* sizeof(struct page
)))
177 if (pfn_valid(page_to_pfn((struct page
*)start
)))
183 /* On hash-based CPUs, the vmemmap is bolted in the hash table.
185 * On Book3E CPUs, the vmemmap is currently mapped in the top half of
186 * the vmalloc space using normal page tables, though the size of
187 * pages encoded in the PTEs can be different
190 #ifdef CONFIG_PPC_BOOK3E
191 static void __meminit
vmemmap_create_mapping(unsigned long start
,
192 unsigned long page_size
,
195 /* Create a PTE encoding without page size */
196 unsigned long i
, flags
= _PAGE_PRESENT
| _PAGE_ACCESSED
|
199 /* PTEs only contain page size encodings up to 32M */
200 BUG_ON(mmu_psize_defs
[mmu_vmemmap_psize
].enc
> 0xf);
202 /* Encode the size in the PTE */
203 flags
|= mmu_psize_defs
[mmu_vmemmap_psize
].enc
<< 8;
205 /* For each PTE for that area, map things. Note that we don't
206 * increment phys because all PTEs are of the large size and
207 * thus must have the low bits clear
209 for (i
= 0; i
< page_size
; i
+= PAGE_SIZE
)
210 BUG_ON(map_kernel_page(start
+ i
, phys
, flags
));
213 #ifdef CONFIG_MEMORY_HOTPLUG
214 static void vmemmap_remove_mapping(unsigned long start
,
215 unsigned long page_size
)
219 #else /* CONFIG_PPC_BOOK3E */
220 static void __meminit
vmemmap_create_mapping(unsigned long start
,
221 unsigned long page_size
,
224 int mapped
= htab_bolt_mapping(start
, start
+ page_size
, phys
,
225 pgprot_val(PAGE_KERNEL
),
231 #ifdef CONFIG_MEMORY_HOTPLUG
232 static void vmemmap_remove_mapping(unsigned long start
,
233 unsigned long page_size
)
235 int mapped
= htab_remove_mapping(start
, start
+ page_size
,
242 #endif /* CONFIG_PPC_BOOK3E */
244 struct vmemmap_backing
*vmemmap_list
;
245 static struct vmemmap_backing
*next
;
247 static int num_freed
;
249 static __meminit
struct vmemmap_backing
* vmemmap_list_alloc(int node
)
251 struct vmemmap_backing
*vmem_back
;
252 /* get from freed entries first */
261 /* allocate a page when required and hand out chunks */
263 next
= vmemmap_alloc_block(PAGE_SIZE
, node
);
264 if (unlikely(!next
)) {
268 num_left
= PAGE_SIZE
/ sizeof(struct vmemmap_backing
);
276 static __meminit
void vmemmap_list_populate(unsigned long phys
,
280 struct vmemmap_backing
*vmem_back
;
282 vmem_back
= vmemmap_list_alloc(node
);
283 if (unlikely(!vmem_back
)) {
288 vmem_back
->phys
= phys
;
289 vmem_back
->virt_addr
= start
;
290 vmem_back
->list
= vmemmap_list
;
292 vmemmap_list
= vmem_back
;
295 int __meminit
vmemmap_populate(unsigned long start
, unsigned long end
, int node
)
297 unsigned long page_size
= 1 << mmu_psize_defs
[mmu_vmemmap_psize
].shift
;
299 /* Align to the page size of the linear mapping. */
300 start
= _ALIGN_DOWN(start
, page_size
);
302 pr_debug("vmemmap_populate %lx..%lx, node %d\n", start
, end
, node
);
304 for (; start
< end
; start
+= page_size
) {
307 if (vmemmap_populated(start
, page_size
))
310 p
= vmemmap_alloc_block(page_size
, node
);
314 vmemmap_list_populate(__pa(p
), start
, node
);
316 pr_debug(" * %016lx..%016lx allocated at %p\n",
317 start
, start
+ page_size
, p
);
319 vmemmap_create_mapping(start
, page_size
, __pa(p
));
325 #ifdef CONFIG_MEMORY_HOTPLUG
326 static unsigned long vmemmap_list_free(unsigned long start
)
328 struct vmemmap_backing
*vmem_back
, *vmem_back_prev
;
330 vmem_back_prev
= vmem_back
= vmemmap_list
;
332 /* look for it with prev pointer recorded */
333 for (; vmem_back
; vmem_back
= vmem_back
->list
) {
334 if (vmem_back
->virt_addr
== start
)
336 vmem_back_prev
= vmem_back
;
339 if (unlikely(!vmem_back
)) {
344 /* remove it from vmemmap_list */
345 if (vmem_back
== vmemmap_list
) /* remove head */
346 vmemmap_list
= vmem_back
->list
;
348 vmem_back_prev
->list
= vmem_back
->list
;
350 /* next point to this freed entry */
351 vmem_back
->list
= next
;
355 return vmem_back
->phys
;
358 void __ref
vmemmap_free(unsigned long start
, unsigned long end
)
360 unsigned long page_size
= 1 << mmu_psize_defs
[mmu_vmemmap_psize
].shift
;
362 start
= _ALIGN_DOWN(start
, page_size
);
364 pr_debug("vmemmap_free %lx...%lx\n", start
, end
);
366 for (; start
< end
; start
+= page_size
) {
370 * the section has already be marked as invalid, so
371 * vmemmap_populated() true means some other sections still
372 * in this page, so skip it.
374 if (vmemmap_populated(start
, page_size
))
377 addr
= vmemmap_list_free(start
);
379 struct page
*page
= pfn_to_page(addr
>> PAGE_SHIFT
);
381 if (PageReserved(page
)) {
382 /* allocated from bootmem */
383 if (page_size
< PAGE_SIZE
) {
385 * this shouldn't happen, but if it is
386 * the case, leave the memory there
390 unsigned int nr_pages
=
391 1 << get_order(page_size
);
393 free_reserved_page(page
++);
396 free_pages((unsigned long)(__va(addr
)),
397 get_order(page_size
));
399 vmemmap_remove_mapping(start
, page_size
);
404 void register_page_bootmem_memmap(unsigned long section_nr
,
405 struct page
*start_page
, unsigned long size
)
410 * We do not have access to the sparsemem vmemmap, so we fallback to
411 * walking the list of sparsemem blocks which we already maintain for
412 * the sake of crashdump. In the long run, we might want to maintain
413 * a tree if performance of that linear walk becomes a problem.
415 * realmode_pfn_to_page functions can fail due to:
416 * 1) As real sparsemem blocks do not lay in RAM continously (they
417 * are in virtual address space which is not available in the real mode),
418 * the requested page struct can be split between blocks so get_page/put_page
420 * 2) When huge pages are used, the get_page/put_page API will fail
421 * in real mode as the linked addresses in the page struct are virtual
424 struct page
*realmode_pfn_to_page(unsigned long pfn
)
426 struct vmemmap_backing
*vmem_back
;
428 unsigned long page_size
= 1 << mmu_psize_defs
[mmu_vmemmap_psize
].shift
;
429 unsigned long pg_va
= (unsigned long) pfn_to_page(pfn
);
431 for (vmem_back
= vmemmap_list
; vmem_back
; vmem_back
= vmem_back
->list
) {
432 if (pg_va
< vmem_back
->virt_addr
)
435 /* After vmemmap_list entry free is possible, need check all */
436 if ((pg_va
+ sizeof(struct page
)) <=
437 (vmem_back
->virt_addr
+ page_size
)) {
438 page
= (struct page
*) (vmem_back
->phys
+ pg_va
-
439 vmem_back
->virt_addr
);
444 /* Probably that page struct is split between real pages */
447 EXPORT_SYMBOL_GPL(realmode_pfn_to_page
);
449 #elif defined(CONFIG_FLATMEM)
451 struct page
*realmode_pfn_to_page(unsigned long pfn
)
453 struct page
*page
= pfn_to_page(pfn
);
456 EXPORT_SYMBOL_GPL(realmode_pfn_to_page
);
458 #endif /* CONFIG_SPARSEMEM_VMEMMAP/CONFIG_FLATMEM */