Merge http://ftp.arm.linux.org.uk/pub/linux/arm/kernel/git-cur/linux-2.6-arm into...
[deliverable/linux.git] / arch / powerpc / mm / init_32.c
1 /*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
8 * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
9 *
10 * Derived from "arch/i386/mm/init.c"
11 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 *
18 */
19
20 #include <linux/module.h>
21 #include <linux/sched.h>
22 #include <linux/kernel.h>
23 #include <linux/errno.h>
24 #include <linux/string.h>
25 #include <linux/types.h>
26 #include <linux/mm.h>
27 #include <linux/stddef.h>
28 #include <linux/init.h>
29 #include <linux/bootmem.h>
30 #include <linux/highmem.h>
31 #include <linux/initrd.h>
32 #include <linux/pagemap.h>
33 #include <linux/lmb.h>
34
35 #include <asm/pgalloc.h>
36 #include <asm/prom.h>
37 #include <asm/io.h>
38 #include <asm/mmu_context.h>
39 #include <asm/pgtable.h>
40 #include <asm/mmu.h>
41 #include <asm/smp.h>
42 #include <asm/machdep.h>
43 #include <asm/btext.h>
44 #include <asm/tlb.h>
45 #include <asm/sections.h>
46 #include <asm/system.h>
47
48 #include "mmu_decl.h"
49
50 #if defined(CONFIG_KERNEL_START_BOOL) || defined(CONFIG_LOWMEM_SIZE_BOOL)
51 /* The ammount of lowmem must be within 0xF0000000 - KERNELBASE. */
52 #if (CONFIG_LOWMEM_SIZE > (0xF0000000 - KERNELBASE))
53 #error "You must adjust CONFIG_LOWMEM_SIZE or CONFIG_START_KERNEL"
54 #endif
55 #endif
56 #define MAX_LOW_MEM CONFIG_LOWMEM_SIZE
57
58 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
59
60 phys_addr_t total_memory;
61 phys_addr_t total_lowmem;
62
63 phys_addr_t memstart_addr = (phys_addr_t)~0ull;
64 EXPORT_SYMBOL(memstart_addr);
65 phys_addr_t kernstart_addr;
66 EXPORT_SYMBOL(kernstart_addr);
67 phys_addr_t lowmem_end_addr;
68
69 int boot_mapsize;
70 #ifdef CONFIG_PPC_PMAC
71 unsigned long agp_special_page;
72 EXPORT_SYMBOL(agp_special_page);
73 #endif
74
75 void MMU_init(void);
76
77 /* XXX should be in current.h -- paulus */
78 extern struct task_struct *current_set[NR_CPUS];
79
80 /*
81 * this tells the system to map all of ram with the segregs
82 * (i.e. page tables) instead of the bats.
83 * -- Cort
84 */
85 int __map_without_bats;
86 int __map_without_ltlbs;
87
88 /* max amount of low RAM to map in */
89 unsigned long __max_low_memory = MAX_LOW_MEM;
90
91 /*
92 * address of the limit of what is accessible with initial MMU setup -
93 * 256MB usually, but only 16MB on 601.
94 */
95 phys_addr_t __initial_memory_limit_addr = (phys_addr_t)0x10000000;
96
97 /*
98 * Check for command-line options that affect what MMU_init will do.
99 */
100 void MMU_setup(void)
101 {
102 /* Check for nobats option (used in mapin_ram). */
103 if (strstr(cmd_line, "nobats")) {
104 __map_without_bats = 1;
105 }
106
107 if (strstr(cmd_line, "noltlbs")) {
108 __map_without_ltlbs = 1;
109 }
110 #ifdef CONFIG_DEBUG_PAGEALLOC
111 __map_without_bats = 1;
112 __map_without_ltlbs = 1;
113 #endif
114 }
115
116 /*
117 * MMU_init sets up the basic memory mappings for the kernel,
118 * including both RAM and possibly some I/O regions,
119 * and sets up the page tables and the MMU hardware ready to go.
120 */
121 void __init MMU_init(void)
122 {
123 if (ppc_md.progress)
124 ppc_md.progress("MMU:enter", 0x111);
125
126 /* 601 can only access 16MB at the moment */
127 if (PVR_VER(mfspr(SPRN_PVR)) == 1)
128 __initial_memory_limit_addr = 0x01000000;
129 /* 8xx can only access 8MB at the moment */
130 if (PVR_VER(mfspr(SPRN_PVR)) == 0x50)
131 __initial_memory_limit_addr = 0x00800000;
132
133 /* parse args from command line */
134 MMU_setup();
135
136 if (lmb.memory.cnt > 1) {
137 lmb.memory.cnt = 1;
138 lmb_analyze();
139 printk(KERN_WARNING "Only using first contiguous memory region");
140 }
141
142 total_lowmem = total_memory = lmb_end_of_DRAM() - memstart_addr;
143 lowmem_end_addr = memstart_addr + total_lowmem;
144
145 #ifdef CONFIG_FSL_BOOKE
146 /* Freescale Book-E parts expect lowmem to be mapped by fixed TLB
147 * entries, so we need to adjust lowmem to match the amount we can map
148 * in the fixed entries */
149 adjust_total_lowmem();
150 #endif /* CONFIG_FSL_BOOKE */
151
152 if (total_lowmem > __max_low_memory) {
153 total_lowmem = __max_low_memory;
154 lowmem_end_addr = memstart_addr + total_lowmem;
155 #ifndef CONFIG_HIGHMEM
156 total_memory = total_lowmem;
157 lmb_enforce_memory_limit(lowmem_end_addr);
158 lmb_analyze();
159 #endif /* CONFIG_HIGHMEM */
160 }
161
162 /* Initialize the MMU hardware */
163 if (ppc_md.progress)
164 ppc_md.progress("MMU:hw init", 0x300);
165 MMU_init_hw();
166
167 /* Map in all of RAM starting at KERNELBASE */
168 if (ppc_md.progress)
169 ppc_md.progress("MMU:mapin", 0x301);
170 mapin_ram();
171
172 #ifdef CONFIG_HIGHMEM
173 ioremap_base = PKMAP_BASE;
174 #else
175 ioremap_base = 0xfe000000UL; /* for now, could be 0xfffff000 */
176 #endif /* CONFIG_HIGHMEM */
177 ioremap_bot = ioremap_base;
178
179 /* Map in I/O resources */
180 if (ppc_md.progress)
181 ppc_md.progress("MMU:setio", 0x302);
182
183 /* Initialize the context management stuff */
184 mmu_context_init();
185
186 if (ppc_md.progress)
187 ppc_md.progress("MMU:exit", 0x211);
188
189 /* From now on, btext is no longer BAT mapped if it was at all */
190 #ifdef CONFIG_BOOTX_TEXT
191 btext_unmap();
192 #endif
193 }
194
195 /* This is only called until mem_init is done. */
196 void __init *early_get_page(void)
197 {
198 void *p;
199
200 if (init_bootmem_done) {
201 p = alloc_bootmem_pages(PAGE_SIZE);
202 } else {
203 p = __va(lmb_alloc_base(PAGE_SIZE, PAGE_SIZE,
204 __initial_memory_limit_addr));
205 }
206 return p;
207 }
208
209 /* Free up now-unused memory */
210 static void free_sec(unsigned long start, unsigned long end, const char *name)
211 {
212 unsigned long cnt = 0;
213
214 while (start < end) {
215 ClearPageReserved(virt_to_page(start));
216 init_page_count(virt_to_page(start));
217 free_page(start);
218 cnt++;
219 start += PAGE_SIZE;
220 }
221 if (cnt) {
222 printk(" %ldk %s", cnt << (PAGE_SHIFT - 10), name);
223 totalram_pages += cnt;
224 }
225 }
226
227 void free_initmem(void)
228 {
229 #define FREESEC(TYPE) \
230 free_sec((unsigned long)(&__ ## TYPE ## _begin), \
231 (unsigned long)(&__ ## TYPE ## _end), \
232 #TYPE);
233
234 printk ("Freeing unused kernel memory:");
235 FREESEC(init);
236 printk("\n");
237 ppc_md.progress = NULL;
238 #undef FREESEC
239 }
240
241 #ifdef CONFIG_BLK_DEV_INITRD
242 void free_initrd_mem(unsigned long start, unsigned long end)
243 {
244 if (start < end)
245 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
246 for (; start < end; start += PAGE_SIZE) {
247 ClearPageReserved(virt_to_page(start));
248 init_page_count(virt_to_page(start));
249 free_page(start);
250 totalram_pages++;
251 }
252 }
253 #endif
254
255 #ifdef CONFIG_PROC_KCORE
256 static struct kcore_list kcore_vmem;
257
258 static int __init setup_kcore(void)
259 {
260 int i;
261
262 for (i = 0; i < lmb.memory.cnt; i++) {
263 unsigned long base;
264 unsigned long size;
265 struct kcore_list *kcore_mem;
266
267 base = lmb.memory.region[i].base;
268 size = lmb.memory.region[i].size;
269
270 kcore_mem = kmalloc(sizeof(struct kcore_list), GFP_ATOMIC);
271 if (!kcore_mem)
272 panic("%s: kmalloc failed\n", __func__);
273
274 /* must stay under 32 bits */
275 if ( 0xfffffffful - (unsigned long)__va(base) < size) {
276 size = 0xfffffffful - (unsigned long)(__va(base));
277 printk(KERN_DEBUG "setup_kcore: restrict size=%lx\n",
278 size);
279 }
280
281 kclist_add(kcore_mem, __va(base), size);
282 }
283
284 kclist_add(&kcore_vmem, (void *)VMALLOC_START,
285 VMALLOC_END-VMALLOC_START);
286
287 return 0;
288 }
289 module_init(setup_kcore);
290 #endif
This page took 0.041457 seconds and 6 git commands to generate.