Merge branch 'topic/ali5451-cleanup' into for-linus
[deliverable/linux.git] / arch / arm / mm / init.c
1 /*
2 * linux/arch/arm/mm/init.c
3 *
4 * Copyright (C) 1995-2005 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10 #include <linux/kernel.h>
11 #include <linux/errno.h>
12 #include <linux/swap.h>
13 #include <linux/init.h>
14 #include <linux/bootmem.h>
15 #include <linux/mman.h>
16 #include <linux/nodemask.h>
17 #include <linux/initrd.h>
18 #include <linux/highmem.h>
19
20 #include <asm/mach-types.h>
21 #include <asm/sections.h>
22 #include <asm/setup.h>
23 #include <asm/sizes.h>
24 #include <asm/tlb.h>
25
26 #include <asm/mach/arch.h>
27 #include <asm/mach/map.h>
28
29 #include "mm.h"
30
31 static unsigned long phys_initrd_start __initdata = 0;
32 static unsigned long phys_initrd_size __initdata = 0;
33
34 static void __init early_initrd(char **p)
35 {
36 unsigned long start, size;
37
38 start = memparse(*p, p);
39 if (**p == ',') {
40 size = memparse((*p) + 1, p);
41
42 phys_initrd_start = start;
43 phys_initrd_size = size;
44 }
45 }
46 __early_param("initrd=", early_initrd);
47
48 static int __init parse_tag_initrd(const struct tag *tag)
49 {
50 printk(KERN_WARNING "ATAG_INITRD is deprecated; "
51 "please update your bootloader.\n");
52 phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
53 phys_initrd_size = tag->u.initrd.size;
54 return 0;
55 }
56
57 __tagtable(ATAG_INITRD, parse_tag_initrd);
58
59 static int __init parse_tag_initrd2(const struct tag *tag)
60 {
61 phys_initrd_start = tag->u.initrd.start;
62 phys_initrd_size = tag->u.initrd.size;
63 return 0;
64 }
65
66 __tagtable(ATAG_INITRD2, parse_tag_initrd2);
67
68 /*
69 * This keeps memory configuration data used by a couple memory
70 * initialization functions, as well as show_mem() for the skipping
71 * of holes in the memory map. It is populated by arm_add_memory().
72 */
73 struct meminfo meminfo;
74
75 void show_mem(void)
76 {
77 int free = 0, total = 0, reserved = 0;
78 int shared = 0, cached = 0, slab = 0, node, i;
79 struct meminfo * mi = &meminfo;
80
81 printk("Mem-info:\n");
82 show_free_areas();
83 for_each_online_node(node) {
84 pg_data_t *n = NODE_DATA(node);
85 struct page *map = pgdat_page_nr(n, 0) - n->node_start_pfn;
86
87 for_each_nodebank (i,mi,node) {
88 struct membank *bank = &mi->bank[i];
89 unsigned int pfn1, pfn2;
90 struct page *page, *end;
91
92 pfn1 = bank_pfn_start(bank);
93 pfn2 = bank_pfn_end(bank);
94
95 page = map + pfn1;
96 end = map + pfn2;
97
98 do {
99 total++;
100 if (PageReserved(page))
101 reserved++;
102 else if (PageSwapCache(page))
103 cached++;
104 else if (PageSlab(page))
105 slab++;
106 else if (!page_count(page))
107 free++;
108 else
109 shared += page_count(page) - 1;
110 page++;
111 } while (page < end);
112 }
113 }
114
115 printk("%d pages of RAM\n", total);
116 printk("%d free pages\n", free);
117 printk("%d reserved pages\n", reserved);
118 printk("%d slab pages\n", slab);
119 printk("%d pages shared\n", shared);
120 printk("%d pages swap cached\n", cached);
121 }
122
123 static void __init find_node_limits(int node, struct meminfo *mi,
124 unsigned long *min, unsigned long *max_low, unsigned long *max_high)
125 {
126 int i;
127
128 *min = -1UL;
129 *max_low = *max_high = 0;
130
131 for_each_nodebank(i, mi, node) {
132 struct membank *bank = &mi->bank[i];
133 unsigned long start, end;
134
135 start = bank_pfn_start(bank);
136 end = bank_pfn_end(bank);
137
138 if (*min > start)
139 *min = start;
140 if (*max_high < end)
141 *max_high = end;
142 if (bank->highmem)
143 continue;
144 if (*max_low < end)
145 *max_low = end;
146 }
147 }
148
149 /*
150 * FIXME: We really want to avoid allocating the bootmap bitmap
151 * over the top of the initrd. Hopefully, this is located towards
152 * the start of a bank, so if we allocate the bootmap bitmap at
153 * the end, we won't clash.
154 */
155 static unsigned int __init
156 find_bootmap_pfn(int node, struct meminfo *mi, unsigned int bootmap_pages)
157 {
158 unsigned int start_pfn, i, bootmap_pfn;
159
160 start_pfn = PAGE_ALIGN(__pa(_end)) >> PAGE_SHIFT;
161 bootmap_pfn = 0;
162
163 for_each_nodebank(i, mi, node) {
164 struct membank *bank = &mi->bank[i];
165 unsigned int start, end;
166
167 start = bank_pfn_start(bank);
168 end = bank_pfn_end(bank);
169
170 if (end < start_pfn)
171 continue;
172
173 if (start < start_pfn)
174 start = start_pfn;
175
176 if (end <= start)
177 continue;
178
179 if (end - start >= bootmap_pages) {
180 bootmap_pfn = start;
181 break;
182 }
183 }
184
185 if (bootmap_pfn == 0)
186 BUG();
187
188 return bootmap_pfn;
189 }
190
191 static int __init check_initrd(struct meminfo *mi)
192 {
193 int initrd_node = -2;
194 #ifdef CONFIG_BLK_DEV_INITRD
195 unsigned long end = phys_initrd_start + phys_initrd_size;
196
197 /*
198 * Make sure that the initrd is within a valid area of
199 * memory.
200 */
201 if (phys_initrd_size) {
202 unsigned int i;
203
204 initrd_node = -1;
205
206 for (i = 0; i < mi->nr_banks; i++) {
207 struct membank *bank = &mi->bank[i];
208 if (bank_phys_start(bank) <= phys_initrd_start &&
209 end <= bank_phys_end(bank))
210 initrd_node = bank->node;
211 }
212 }
213
214 if (initrd_node == -1) {
215 printk(KERN_ERR "INITRD: 0x%08lx+0x%08lx extends beyond "
216 "physical memory - disabling initrd\n",
217 phys_initrd_start, phys_initrd_size);
218 phys_initrd_start = phys_initrd_size = 0;
219 }
220 #endif
221
222 return initrd_node;
223 }
224
225 static inline void map_memory_bank(struct membank *bank)
226 {
227 #ifdef CONFIG_MMU
228 struct map_desc map;
229
230 map.pfn = bank_pfn_start(bank);
231 map.virtual = __phys_to_virt(bank_phys_start(bank));
232 map.length = bank_phys_size(bank);
233 map.type = MT_MEMORY;
234
235 create_mapping(&map);
236 #endif
237 }
238
239 static void __init bootmem_init_node(int node, struct meminfo *mi,
240 unsigned long start_pfn, unsigned long end_pfn)
241 {
242 unsigned long boot_pfn;
243 unsigned int boot_pages;
244 pg_data_t *pgdat;
245 int i;
246
247 /*
248 * Map the memory banks for this node.
249 */
250 for_each_nodebank(i, mi, node) {
251 struct membank *bank = &mi->bank[i];
252
253 if (!bank->highmem)
254 map_memory_bank(bank);
255 }
256
257 /*
258 * Allocate the bootmem bitmap page.
259 */
260 boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
261 boot_pfn = find_bootmap_pfn(node, mi, boot_pages);
262
263 /*
264 * Initialise the bootmem allocator for this node, handing the
265 * memory banks over to bootmem.
266 */
267 node_set_online(node);
268 pgdat = NODE_DATA(node);
269 init_bootmem_node(pgdat, boot_pfn, start_pfn, end_pfn);
270
271 for_each_nodebank(i, mi, node) {
272 struct membank *bank = &mi->bank[i];
273 if (!bank->highmem)
274 free_bootmem_node(pgdat, bank_phys_start(bank), bank_phys_size(bank));
275 memory_present(node, bank_pfn_start(bank), bank_pfn_end(bank));
276 }
277
278 /*
279 * Reserve the bootmem bitmap for this node.
280 */
281 reserve_bootmem_node(pgdat, boot_pfn << PAGE_SHIFT,
282 boot_pages << PAGE_SHIFT, BOOTMEM_DEFAULT);
283 }
284
285 static void __init bootmem_reserve_initrd(int node)
286 {
287 #ifdef CONFIG_BLK_DEV_INITRD
288 pg_data_t *pgdat = NODE_DATA(node);
289 int res;
290
291 res = reserve_bootmem_node(pgdat, phys_initrd_start,
292 phys_initrd_size, BOOTMEM_EXCLUSIVE);
293
294 if (res == 0) {
295 initrd_start = __phys_to_virt(phys_initrd_start);
296 initrd_end = initrd_start + phys_initrd_size;
297 } else {
298 printk(KERN_ERR
299 "INITRD: 0x%08lx+0x%08lx overlaps in-use "
300 "memory region - disabling initrd\n",
301 phys_initrd_start, phys_initrd_size);
302 }
303 #endif
304 }
305
306 static void __init bootmem_free_node(int node, struct meminfo *mi)
307 {
308 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
309 unsigned long min, max_low, max_high;
310 int i;
311
312 find_node_limits(node, mi, &min, &max_low, &max_high);
313
314 /*
315 * initialise the zones within this node.
316 */
317 memset(zone_size, 0, sizeof(zone_size));
318
319 /*
320 * The size of this node has already been determined. If we need
321 * to do anything fancy with the allocation of this memory to the
322 * zones, now is the time to do it.
323 */
324 zone_size[0] = max_low - min;
325 #ifdef CONFIG_HIGHMEM
326 zone_size[ZONE_HIGHMEM] = max_high - max_low;
327 #endif
328
329 /*
330 * For each bank in this node, calculate the size of the holes.
331 * holes = node_size - sum(bank_sizes_in_node)
332 */
333 memcpy(zhole_size, zone_size, sizeof(zhole_size));
334 for_each_nodebank(i, mi, node) {
335 int idx = 0;
336 #ifdef CONFIG_HIGHMEM
337 if (mi->bank[i].highmem)
338 idx = ZONE_HIGHMEM;
339 #endif
340 zhole_size[idx] -= bank_pfn_size(&mi->bank[i]);
341 }
342
343 /*
344 * Adjust the sizes according to any special requirements for
345 * this machine type.
346 */
347 arch_adjust_zones(node, zone_size, zhole_size);
348
349 free_area_init_node(node, zone_size, min, zhole_size);
350 }
351
352 void __init bootmem_init(void)
353 {
354 struct meminfo *mi = &meminfo;
355 unsigned long min, max_low, max_high;
356 int node, initrd_node;
357
358 /*
359 * Locate which node contains the ramdisk image, if any.
360 */
361 initrd_node = check_initrd(mi);
362
363 max_low = max_high = 0;
364
365 /*
366 * Run through each node initialising the bootmem allocator.
367 */
368 for_each_node(node) {
369 unsigned long node_low, node_high;
370
371 find_node_limits(node, mi, &min, &node_low, &node_high);
372
373 if (node_low > max_low)
374 max_low = node_low;
375 if (node_high > max_high)
376 max_high = node_high;
377
378 /*
379 * If there is no memory in this node, ignore it.
380 * (We can't have nodes which have no lowmem)
381 */
382 if (node_low == 0)
383 continue;
384
385 bootmem_init_node(node, mi, min, node_low);
386
387 /*
388 * Reserve any special node zero regions.
389 */
390 if (node == 0)
391 reserve_node_zero(NODE_DATA(node));
392
393 /*
394 * If the initrd is in this node, reserve its memory.
395 */
396 if (node == initrd_node)
397 bootmem_reserve_initrd(node);
398 }
399
400 /*
401 * sparse_init() needs the bootmem allocator up and running.
402 */
403 sparse_init();
404
405 /*
406 * Now free memory in each node - free_area_init_node needs
407 * the sparse mem_map arrays initialized by sparse_init()
408 * for memmap_init_zone(), otherwise all PFNs are invalid.
409 */
410 for_each_node(node)
411 bootmem_free_node(node, mi);
412
413 high_memory = __va((max_low << PAGE_SHIFT) - 1) + 1;
414
415 /*
416 * This doesn't seem to be used by the Linux memory manager any
417 * more, but is used by ll_rw_block. If we can get rid of it, we
418 * also get rid of some of the stuff above as well.
419 *
420 * Note: max_low_pfn and max_pfn reflect the number of _pages_ in
421 * the system, not the maximum PFN.
422 */
423 max_low_pfn = max_low - PHYS_PFN_OFFSET;
424 max_pfn = max_high - PHYS_PFN_OFFSET;
425 }
426
427 static inline int free_area(unsigned long pfn, unsigned long end, char *s)
428 {
429 unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10);
430
431 for (; pfn < end; pfn++) {
432 struct page *page = pfn_to_page(pfn);
433 ClearPageReserved(page);
434 init_page_count(page);
435 __free_page(page);
436 pages++;
437 }
438
439 if (size && s)
440 printk(KERN_INFO "Freeing %s memory: %dK\n", s, size);
441
442 return pages;
443 }
444
445 static inline void
446 free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn)
447 {
448 struct page *start_pg, *end_pg;
449 unsigned long pg, pgend;
450
451 /*
452 * Convert start_pfn/end_pfn to a struct page pointer.
453 */
454 start_pg = pfn_to_page(start_pfn);
455 end_pg = pfn_to_page(end_pfn);
456
457 /*
458 * Convert to physical addresses, and
459 * round start upwards and end downwards.
460 */
461 pg = PAGE_ALIGN(__pa(start_pg));
462 pgend = __pa(end_pg) & PAGE_MASK;
463
464 /*
465 * If there are free pages between these,
466 * free the section of the memmap array.
467 */
468 if (pg < pgend)
469 free_bootmem_node(NODE_DATA(node), pg, pgend - pg);
470 }
471
472 /*
473 * The mem_map array can get very big. Free the unused area of the memory map.
474 */
475 static void __init free_unused_memmap_node(int node, struct meminfo *mi)
476 {
477 unsigned long bank_start, prev_bank_end = 0;
478 unsigned int i;
479
480 /*
481 * [FIXME] This relies on each bank being in address order. This
482 * may not be the case, especially if the user has provided the
483 * information on the command line.
484 */
485 for_each_nodebank(i, mi, node) {
486 struct membank *bank = &mi->bank[i];
487
488 bank_start = bank_pfn_start(bank);
489 if (bank_start < prev_bank_end) {
490 printk(KERN_ERR "MEM: unordered memory banks. "
491 "Not freeing memmap.\n");
492 break;
493 }
494
495 /*
496 * If we had a previous bank, and there is a space
497 * between the current bank and the previous, free it.
498 */
499 if (prev_bank_end && prev_bank_end != bank_start)
500 free_memmap(node, prev_bank_end, bank_start);
501
502 prev_bank_end = bank_pfn_end(bank);
503 }
504 }
505
506 /*
507 * mem_init() marks the free areas in the mem_map and tells us how much
508 * memory is free. This is done after various parts of the system have
509 * claimed their memory after the kernel image.
510 */
511 void __init mem_init(void)
512 {
513 unsigned int codesize, datasize, initsize;
514 int i, node;
515
516 #ifndef CONFIG_DISCONTIGMEM
517 max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
518 #endif
519
520 /* this will put all unused low memory onto the freelists */
521 for_each_online_node(node) {
522 pg_data_t *pgdat = NODE_DATA(node);
523
524 free_unused_memmap_node(node, &meminfo);
525
526 if (pgdat->node_spanned_pages != 0)
527 totalram_pages += free_all_bootmem_node(pgdat);
528 }
529
530 #ifdef CONFIG_SA1111
531 /* now that our DMA memory is actually so designated, we can free it */
532 totalram_pages += free_area(PHYS_PFN_OFFSET,
533 __phys_to_pfn(__pa(swapper_pg_dir)), NULL);
534 #endif
535
536 #ifdef CONFIG_HIGHMEM
537 /* set highmem page free */
538 for_each_online_node(node) {
539 for_each_nodebank (i, &meminfo, node) {
540 unsigned long start = bank_pfn_start(&meminfo.bank[i]);
541 unsigned long end = bank_pfn_end(&meminfo.bank[i]);
542 if (start >= max_low_pfn + PHYS_PFN_OFFSET)
543 totalhigh_pages += free_area(start, end, NULL);
544 }
545 }
546 totalram_pages += totalhigh_pages;
547 #endif
548
549 /*
550 * Since our memory may not be contiguous, calculate the
551 * real number of pages we have in this system
552 */
553 printk(KERN_INFO "Memory:");
554 num_physpages = 0;
555 for (i = 0; i < meminfo.nr_banks; i++) {
556 num_physpages += bank_pfn_size(&meminfo.bank[i]);
557 printk(" %ldMB", bank_phys_size(&meminfo.bank[i]) >> 20);
558 }
559 printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
560
561 codesize = _etext - _text;
562 datasize = _end - _data;
563 initsize = __init_end - __init_begin;
564
565 printk(KERN_NOTICE "Memory: %luKB available (%dK code, "
566 "%dK data, %dK init, %luK highmem)\n",
567 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
568 codesize >> 10, datasize >> 10, initsize >> 10,
569 (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10)));
570
571 if (PAGE_SIZE >= 16384 && num_physpages <= 128) {
572 extern int sysctl_overcommit_memory;
573 /*
574 * On a machine this small we won't get
575 * anywhere without overcommit, so turn
576 * it on by default.
577 */
578 sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
579 }
580 }
581
582 void free_initmem(void)
583 {
584 if (!machine_is_integrator() && !machine_is_cintegrator())
585 totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)),
586 __phys_to_pfn(__pa(__init_end)),
587 "init");
588 }
589
590 #ifdef CONFIG_BLK_DEV_INITRD
591
592 static int keep_initrd;
593
594 void free_initrd_mem(unsigned long start, unsigned long end)
595 {
596 if (!keep_initrd)
597 totalram_pages += free_area(__phys_to_pfn(__pa(start)),
598 __phys_to_pfn(__pa(end)),
599 "initrd");
600 }
601
602 static int __init keepinitrd_setup(char *__unused)
603 {
604 keep_initrd = 1;
605 return 1;
606 }
607
608 __setup("keepinitrd", keepinitrd_setup);
609 #endif
This page took 0.049565 seconds and 6 git commands to generate.