powerpc/mm: Runtime allocation of mmu context maps for nohash CPUs
[deliverable/linux.git] / arch / powerpc / mm / init_32.c
CommitLineData
14cf11af
PM
1/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
14cf11af
PM
8 * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
9 *
10 * Derived from "arch/i386/mm/init.c"
11 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 *
18 */
19
14cf11af
PM
20#include <linux/module.h>
21#include <linux/sched.h>
22#include <linux/kernel.h>
23#include <linux/errno.h>
24#include <linux/string.h>
25#include <linux/types.h>
26#include <linux/mm.h>
27#include <linux/stddef.h>
28#include <linux/init.h>
29#include <linux/bootmem.h>
30#include <linux/highmem.h>
31#include <linux/initrd.h>
32#include <linux/pagemap.h>
d9b2b2a2 33#include <linux/lmb.h>
14cf11af
PM
34
35#include <asm/pgalloc.h>
36#include <asm/prom.h>
37#include <asm/io.h>
14cf11af
PM
38#include <asm/pgtable.h>
39#include <asm/mmu.h>
40#include <asm/smp.h>
41#include <asm/machdep.h>
42#include <asm/btext.h>
43#include <asm/tlb.h>
7c8c6b97 44#include <asm/sections.h>
5f25f065 45#include <asm/system.h>
14cf11af 46
14cf11af
PM
47#include "mmu_decl.h"
48
49#if defined(CONFIG_KERNEL_START_BOOL) || defined(CONFIG_LOWMEM_SIZE_BOOL)
50/* The ammount of lowmem must be within 0xF0000000 - KERNELBASE. */
51#if (CONFIG_LOWMEM_SIZE > (0xF0000000 - KERNELBASE))
52#error "You must adjust CONFIG_LOWMEM_SIZE or CONFIG_START_KERNEL"
53#endif
54#endif
55#define MAX_LOW_MEM CONFIG_LOWMEM_SIZE
56
57DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
58
2bf3016f
SR
59phys_addr_t total_memory;
60phys_addr_t total_lowmem;
14cf11af 61
37dd2bad
KG
62phys_addr_t memstart_addr = (phys_addr_t)~0ull;
63EXPORT_SYMBOL(memstart_addr);
64phys_addr_t kernstart_addr;
65EXPORT_SYMBOL(kernstart_addr);
99c62dd7 66phys_addr_t lowmem_end_addr;
14cf11af 67
14cf11af
PM
68int boot_mapsize;
69#ifdef CONFIG_PPC_PMAC
70unsigned long agp_special_page;
5c8c56eb 71EXPORT_SYMBOL(agp_special_page);
14cf11af
PM
72#endif
73
14cf11af 74void MMU_init(void);
14cf11af
PM
75
76/* XXX should be in current.h -- paulus */
77extern struct task_struct *current_set[NR_CPUS];
78
14cf11af
PM
79/*
80 * this tells the system to map all of ram with the segregs
81 * (i.e. page tables) instead of the bats.
82 * -- Cort
83 */
84int __map_without_bats;
85int __map_without_ltlbs;
86
14cf11af
PM
87/* max amount of low RAM to map in */
88unsigned long __max_low_memory = MAX_LOW_MEM;
89
90/*
09b5e63f 91 * address of the limit of what is accessible with initial MMU setup -
7c8c6b97 92 * 256MB usually, but only 16MB on 601.
14cf11af 93 */
09b5e63f 94phys_addr_t __initial_memory_limit_addr = (phys_addr_t)0x10000000;
14cf11af
PM
95
96/*
97 * Check for command-line options that affect what MMU_init will do.
98 */
99void MMU_setup(void)
100{
101 /* Check for nobats option (used in mapin_ram). */
102 if (strstr(cmd_line, "nobats")) {
103 __map_without_bats = 1;
104 }
105
106 if (strstr(cmd_line, "noltlbs")) {
107 __map_without_ltlbs = 1;
108 }
88df6e90
BH
109#ifdef CONFIG_DEBUG_PAGEALLOC
110 __map_without_bats = 1;
111 __map_without_ltlbs = 1;
112#endif
14cf11af
PM
113}
114
115/*
116 * MMU_init sets up the basic memory mappings for the kernel,
117 * including both RAM and possibly some I/O regions,
118 * and sets up the page tables and the MMU hardware ready to go.
119 */
120void __init MMU_init(void)
121{
122 if (ppc_md.progress)
123 ppc_md.progress("MMU:enter", 0x111);
124
7c8c6b97
PM
125 /* 601 can only access 16MB at the moment */
126 if (PVR_VER(mfspr(SPRN_PVR)) == 1)
09b5e63f 127 __initial_memory_limit_addr = 0x01000000;
544cdabe
JT
128 /* 8xx can only access 8MB at the moment */
129 if (PVR_VER(mfspr(SPRN_PVR)) == 0x50)
09b5e63f 130 __initial_memory_limit_addr = 0x00800000;
7c8c6b97 131
14cf11af
PM
132 /* parse args from command line */
133 MMU_setup();
134
7c8c6b97
PM
135 if (lmb.memory.cnt > 1) {
136 lmb.memory.cnt = 1;
137 lmb_analyze();
138 printk(KERN_WARNING "Only using first contiguous memory region");
139 }
140
99c62dd7 141 total_lowmem = total_memory = lmb_end_of_DRAM() - memstart_addr;
d7917ba7 142 lowmem_end_addr = memstart_addr + total_lowmem;
7c8c6b97 143
14cf11af
PM
144#ifdef CONFIG_FSL_BOOKE
145 /* Freescale Book-E parts expect lowmem to be mapped by fixed TLB
146 * entries, so we need to adjust lowmem to match the amount we can map
147 * in the fixed entries */
148 adjust_total_lowmem();
149#endif /* CONFIG_FSL_BOOKE */
fa39dc43 150
14cf11af
PM
151 if (total_lowmem > __max_low_memory) {
152 total_lowmem = __max_low_memory;
d7917ba7 153 lowmem_end_addr = memstart_addr + total_lowmem;
14cf11af
PM
154#ifndef CONFIG_HIGHMEM
155 total_memory = total_lowmem;
d7917ba7 156 lmb_enforce_memory_limit(lowmem_end_addr);
fa39dc43 157 lmb_analyze();
14cf11af
PM
158#endif /* CONFIG_HIGHMEM */
159 }
14cf11af
PM
160
161 /* Initialize the MMU hardware */
162 if (ppc_md.progress)
163 ppc_md.progress("MMU:hw init", 0x300);
164 MMU_init_hw();
165
166 /* Map in all of RAM starting at KERNELBASE */
167 if (ppc_md.progress)
168 ppc_md.progress("MMU:mapin", 0x301);
169 mapin_ram();
170
171#ifdef CONFIG_HIGHMEM
172 ioremap_base = PKMAP_BASE;
173#else
174 ioremap_base = 0xfe000000UL; /* for now, could be 0xfffff000 */
175#endif /* CONFIG_HIGHMEM */
176 ioremap_bot = ioremap_base;
177
178 /* Map in I/O resources */
179 if (ppc_md.progress)
180 ppc_md.progress("MMU:setio", 0x302);
14cf11af 181
14cf11af
PM
182 if (ppc_md.progress)
183 ppc_md.progress("MMU:exit", 0x211);
51d3082f
BH
184
185 /* From now on, btext is no longer BAT mapped if it was at all */
186#ifdef CONFIG_BOOTX_TEXT
187 btext_unmap();
188#endif
14cf11af
PM
189}
190
191/* This is only called until mem_init is done. */
192void __init *early_get_page(void)
193{
194 void *p;
195
196 if (init_bootmem_done) {
197 p = alloc_bootmem_pages(PAGE_SIZE);
198 } else {
7c8c6b97 199 p = __va(lmb_alloc_base(PAGE_SIZE, PAGE_SIZE,
09b5e63f 200 __initial_memory_limit_addr));
14cf11af
PM
201 }
202 return p;
203}
204
205/* Free up now-unused memory */
206static void free_sec(unsigned long start, unsigned long end, const char *name)
207{
208 unsigned long cnt = 0;
209
210 while (start < end) {
211 ClearPageReserved(virt_to_page(start));
7835e98b 212 init_page_count(virt_to_page(start));
14cf11af
PM
213 free_page(start);
214 cnt++;
215 start += PAGE_SIZE;
216 }
217 if (cnt) {
218 printk(" %ldk %s", cnt << (PAGE_SHIFT - 10), name);
219 totalram_pages += cnt;
220 }
221}
222
223void free_initmem(void)
224{
225#define FREESEC(TYPE) \
226 free_sec((unsigned long)(&__ ## TYPE ## _begin), \
227 (unsigned long)(&__ ## TYPE ## _end), \
228 #TYPE);
229
230 printk ("Freeing unused kernel memory:");
231 FREESEC(init);
232 printk("\n");
233 ppc_md.progress = NULL;
234#undef FREESEC
235}
236
237#ifdef CONFIG_BLK_DEV_INITRD
238void free_initrd_mem(unsigned long start, unsigned long end)
239{
240 if (start < end)
241 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
242 for (; start < end; start += PAGE_SIZE) {
243 ClearPageReserved(virt_to_page(start));
7835e98b 244 init_page_count(virt_to_page(start));
14cf11af
PM
245 free_page(start);
246 totalram_pages++;
247 }
248}
249#endif
df174e3b
ES
250
251#ifdef CONFIG_PROC_KCORE
252static struct kcore_list kcore_vmem;
253
254static int __init setup_kcore(void)
255{
256 int i;
257
258 for (i = 0; i < lmb.memory.cnt; i++) {
259 unsigned long base;
260 unsigned long size;
261 struct kcore_list *kcore_mem;
262
263 base = lmb.memory.region[i].base;
264 size = lmb.memory.region[i].size;
265
266 kcore_mem = kmalloc(sizeof(struct kcore_list), GFP_ATOMIC);
267 if (!kcore_mem)
e48b1b45 268 panic("%s: kmalloc failed\n", __func__);
df174e3b
ES
269
270 /* must stay under 32 bits */
271 if ( 0xfffffffful - (unsigned long)__va(base) < size) {
272 size = 0xfffffffful - (unsigned long)(__va(base));
273 printk(KERN_DEBUG "setup_kcore: restrict size=%lx\n",
274 size);
275 }
276
277 kclist_add(kcore_mem, __va(base), size);
278 }
279
280 kclist_add(&kcore_vmem, (void *)VMALLOC_START,
281 VMALLOC_END-VMALLOC_START);
282
283 return 0;
284}
285module_init(setup_kcore);
286#endif
This page took 0.288305 seconds and 5 git commands to generate.