2 * linux/arch/arm/mm/init.c
4 * Copyright (C) 1995-2005 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/kernel.h>
11 #include <linux/errno.h>
12 #include <linux/swap.h>
13 #include <linux/init.h>
14 #include <linux/bootmem.h>
15 #include <linux/mman.h>
16 #include <linux/nodemask.h>
17 #include <linux/initrd.h>
18 #include <linux/highmem.h>
20 #include <asm/mach-types.h>
21 #include <asm/sections.h>
22 #include <asm/setup.h>
23 #include <asm/sizes.h>
25 #include <asm/fixmap.h>
27 #include <asm/mach/arch.h>
28 #include <asm/mach/map.h>
32 static unsigned long phys_initrd_start __initdata
= 0;
33 static unsigned long phys_initrd_size __initdata
= 0;
35 static int __init
early_initrd(char *p
)
37 unsigned long start
, size
;
40 start
= memparse(p
, &endp
);
42 size
= memparse(endp
+ 1, NULL
);
44 phys_initrd_start
= start
;
45 phys_initrd_size
= size
;
49 early_param("initrd", early_initrd
);
51 static int __init
parse_tag_initrd(const struct tag
*tag
)
53 printk(KERN_WARNING
"ATAG_INITRD is deprecated; "
54 "please update your bootloader.\n");
55 phys_initrd_start
= __virt_to_phys(tag
->u
.initrd
.start
);
56 phys_initrd_size
= tag
->u
.initrd
.size
;
60 __tagtable(ATAG_INITRD
, parse_tag_initrd
);
62 static int __init
parse_tag_initrd2(const struct tag
*tag
)
64 phys_initrd_start
= tag
->u
.initrd
.start
;
65 phys_initrd_size
= tag
->u
.initrd
.size
;
69 __tagtable(ATAG_INITRD2
, parse_tag_initrd2
);
72 * This keeps memory configuration data used by a couple memory
73 * initialization functions, as well as show_mem() for the skipping
74 * of holes in the memory map. It is populated by arm_add_memory().
76 struct meminfo meminfo
;
80 int free
= 0, total
= 0, reserved
= 0;
81 int shared
= 0, cached
= 0, slab
= 0, node
, i
;
82 struct meminfo
* mi
= &meminfo
;
84 printk("Mem-info:\n");
86 for_each_online_node(node
) {
87 pg_data_t
*n
= NODE_DATA(node
);
88 struct page
*map
= pgdat_page_nr(n
, 0) - n
->node_start_pfn
;
90 for_each_nodebank (i
,mi
,node
) {
91 struct membank
*bank
= &mi
->bank
[i
];
92 unsigned int pfn1
, pfn2
;
93 struct page
*page
, *end
;
95 pfn1
= bank_pfn_start(bank
);
96 pfn2
= bank_pfn_end(bank
);
103 if (PageReserved(page
))
105 else if (PageSwapCache(page
))
107 else if (PageSlab(page
))
109 else if (!page_count(page
))
112 shared
+= page_count(page
) - 1;
114 } while (page
< end
);
118 printk("%d pages of RAM\n", total
);
119 printk("%d free pages\n", free
);
120 printk("%d reserved pages\n", reserved
);
121 printk("%d slab pages\n", slab
);
122 printk("%d pages shared\n", shared
);
123 printk("%d pages swap cached\n", cached
);
126 static void __init
find_node_limits(int node
, struct meminfo
*mi
,
127 unsigned long *min
, unsigned long *max_low
, unsigned long *max_high
)
132 *max_low
= *max_high
= 0;
134 for_each_nodebank(i
, mi
, node
) {
135 struct membank
*bank
= &mi
->bank
[i
];
136 unsigned long start
, end
;
138 start
= bank_pfn_start(bank
);
139 end
= bank_pfn_end(bank
);
153 * FIXME: We really want to avoid allocating the bootmap bitmap
154 * over the top of the initrd. Hopefully, this is located towards
155 * the start of a bank, so if we allocate the bootmap bitmap at
156 * the end, we won't clash.
158 static unsigned int __init
159 find_bootmap_pfn(int node
, struct meminfo
*mi
, unsigned int bootmap_pages
)
161 unsigned int start_pfn
, i
, bootmap_pfn
;
163 start_pfn
= PAGE_ALIGN(__pa(_end
)) >> PAGE_SHIFT
;
166 for_each_nodebank(i
, mi
, node
) {
167 struct membank
*bank
= &mi
->bank
[i
];
168 unsigned int start
, end
;
170 start
= bank_pfn_start(bank
);
171 end
= bank_pfn_end(bank
);
176 if (start
< start_pfn
)
182 if (end
- start
>= bootmap_pages
) {
188 if (bootmap_pfn
== 0)
194 static int __init
check_initrd(struct meminfo
*mi
)
196 int initrd_node
= -2;
197 #ifdef CONFIG_BLK_DEV_INITRD
198 unsigned long end
= phys_initrd_start
+ phys_initrd_size
;
201 * Make sure that the initrd is within a valid area of
204 if (phys_initrd_size
) {
209 for (i
= 0; i
< mi
->nr_banks
; i
++) {
210 struct membank
*bank
= &mi
->bank
[i
];
211 if (bank_phys_start(bank
) <= phys_initrd_start
&&
212 end
<= bank_phys_end(bank
))
213 initrd_node
= bank
->node
;
217 if (initrd_node
== -1) {
218 printk(KERN_ERR
"INITRD: 0x%08lx+0x%08lx extends beyond "
219 "physical memory - disabling initrd\n",
220 phys_initrd_start
, phys_initrd_size
);
221 phys_initrd_start
= phys_initrd_size
= 0;
228 static void __init
bootmem_init_node(int node
, struct meminfo
*mi
,
229 unsigned long start_pfn
, unsigned long end_pfn
)
231 unsigned long boot_pfn
;
232 unsigned int boot_pages
;
237 * Allocate the bootmem bitmap page.
239 boot_pages
= bootmem_bootmap_pages(end_pfn
- start_pfn
);
240 boot_pfn
= find_bootmap_pfn(node
, mi
, boot_pages
);
243 * Initialise the bootmem allocator for this node, handing the
244 * memory banks over to bootmem.
246 node_set_online(node
);
247 pgdat
= NODE_DATA(node
);
248 init_bootmem_node(pgdat
, boot_pfn
, start_pfn
, end_pfn
);
250 for_each_nodebank(i
, mi
, node
) {
251 struct membank
*bank
= &mi
->bank
[i
];
253 free_bootmem_node(pgdat
, bank_phys_start(bank
), bank_phys_size(bank
));
257 * Reserve the bootmem bitmap for this node.
259 reserve_bootmem_node(pgdat
, boot_pfn
<< PAGE_SHIFT
,
260 boot_pages
<< PAGE_SHIFT
, BOOTMEM_DEFAULT
);
263 static void __init
bootmem_reserve_initrd(int node
)
265 #ifdef CONFIG_BLK_DEV_INITRD
266 pg_data_t
*pgdat
= NODE_DATA(node
);
269 res
= reserve_bootmem_node(pgdat
, phys_initrd_start
,
270 phys_initrd_size
, BOOTMEM_EXCLUSIVE
);
273 initrd_start
= __phys_to_virt(phys_initrd_start
);
274 initrd_end
= initrd_start
+ phys_initrd_size
;
277 "INITRD: 0x%08lx+0x%08lx overlaps in-use "
278 "memory region - disabling initrd\n",
279 phys_initrd_start
, phys_initrd_size
);
284 static void __init
bootmem_free_node(int node
, struct meminfo
*mi
)
286 unsigned long zone_size
[MAX_NR_ZONES
], zhole_size
[MAX_NR_ZONES
];
287 unsigned long min
, max_low
, max_high
;
290 find_node_limits(node
, mi
, &min
, &max_low
, &max_high
);
293 * initialise the zones within this node.
295 memset(zone_size
, 0, sizeof(zone_size
));
298 * The size of this node has already been determined. If we need
299 * to do anything fancy with the allocation of this memory to the
300 * zones, now is the time to do it.
302 zone_size
[0] = max_low
- min
;
303 #ifdef CONFIG_HIGHMEM
304 zone_size
[ZONE_HIGHMEM
] = max_high
- max_low
;
308 * For each bank in this node, calculate the size of the holes.
309 * holes = node_size - sum(bank_sizes_in_node)
311 memcpy(zhole_size
, zone_size
, sizeof(zhole_size
));
312 for_each_nodebank(i
, mi
, node
) {
314 #ifdef CONFIG_HIGHMEM
315 if (mi
->bank
[i
].highmem
)
318 zhole_size
[idx
] -= bank_pfn_size(&mi
->bank
[i
]);
322 * Adjust the sizes according to any special requirements for
325 arch_adjust_zones(node
, zone_size
, zhole_size
);
327 free_area_init_node(node
, zone_size
, min
, zhole_size
);
330 #ifndef CONFIG_SPARSEMEM
331 int pfn_valid(unsigned long pfn
)
333 struct meminfo
*mi
= &meminfo
;
334 unsigned int left
= 0, right
= mi
->nr_banks
;
337 unsigned int mid
= (right
+ left
) / 2;
338 struct membank
*bank
= &mi
->bank
[mid
];
340 if (pfn
< bank_pfn_start(bank
))
342 else if (pfn
>= bank_pfn_end(bank
))
346 } while (left
< right
);
349 EXPORT_SYMBOL(pfn_valid
);
351 static void arm_memory_present(struct meminfo
*mi
, int node
)
355 static void arm_memory_present(struct meminfo
*mi
, int node
)
358 for_each_nodebank(i
, mi
, node
) {
359 struct membank
*bank
= &mi
->bank
[i
];
360 memory_present(node
, bank_pfn_start(bank
), bank_pfn_end(bank
));
365 void __init
bootmem_init(void)
367 struct meminfo
*mi
= &meminfo
;
368 unsigned long min
, max_low
, max_high
;
369 int node
, initrd_node
;
372 * Locate which node contains the ramdisk image, if any.
374 initrd_node
= check_initrd(mi
);
376 max_low
= max_high
= 0;
379 * Run through each node initialising the bootmem allocator.
381 for_each_node(node
) {
382 unsigned long node_low
, node_high
;
384 find_node_limits(node
, mi
, &min
, &node_low
, &node_high
);
386 if (node_low
> max_low
)
388 if (node_high
> max_high
)
389 max_high
= node_high
;
392 * If there is no memory in this node, ignore it.
393 * (We can't have nodes which have no lowmem)
398 bootmem_init_node(node
, mi
, min
, node_low
);
401 * Reserve any special node zero regions.
404 reserve_node_zero(NODE_DATA(node
));
407 * If the initrd is in this node, reserve its memory.
409 if (node
== initrd_node
)
410 bootmem_reserve_initrd(node
);
413 * Sparsemem tries to allocate bootmem in memory_present(),
414 * so must be done after the fixed reservations
416 arm_memory_present(mi
, node
);
420 * sparse_init() needs the bootmem allocator up and running.
425 * Now free memory in each node - free_area_init_node needs
426 * the sparse mem_map arrays initialized by sparse_init()
427 * for memmap_init_zone(), otherwise all PFNs are invalid.
430 bootmem_free_node(node
, mi
);
432 high_memory
= __va((max_low
<< PAGE_SHIFT
) - 1) + 1;
435 * This doesn't seem to be used by the Linux memory manager any
436 * more, but is used by ll_rw_block. If we can get rid of it, we
437 * also get rid of some of the stuff above as well.
439 * Note: max_low_pfn and max_pfn reflect the number of _pages_ in
440 * the system, not the maximum PFN.
442 max_low_pfn
= max_low
- PHYS_PFN_OFFSET
;
443 max_pfn
= max_high
- PHYS_PFN_OFFSET
;
446 static inline int free_area(unsigned long pfn
, unsigned long end
, char *s
)
448 unsigned int pages
= 0, size
= (end
- pfn
) << (PAGE_SHIFT
- 10);
450 for (; pfn
< end
; pfn
++) {
451 struct page
*page
= pfn_to_page(pfn
);
452 ClearPageReserved(page
);
453 init_page_count(page
);
459 printk(KERN_INFO
"Freeing %s memory: %dK\n", s
, size
);
465 free_memmap(int node
, unsigned long start_pfn
, unsigned long end_pfn
)
467 struct page
*start_pg
, *end_pg
;
468 unsigned long pg
, pgend
;
471 * Convert start_pfn/end_pfn to a struct page pointer.
473 start_pg
= pfn_to_page(start_pfn
- 1) + 1;
474 end_pg
= pfn_to_page(end_pfn
);
477 * Convert to physical addresses, and
478 * round start upwards and end downwards.
480 pg
= PAGE_ALIGN(__pa(start_pg
));
481 pgend
= __pa(end_pg
) & PAGE_MASK
;
484 * If there are free pages between these,
485 * free the section of the memmap array.
488 free_bootmem_node(NODE_DATA(node
), pg
, pgend
- pg
);
492 * The mem_map array can get very big. Free the unused area of the memory map.
494 static void __init
free_unused_memmap_node(int node
, struct meminfo
*mi
)
496 unsigned long bank_start
, prev_bank_end
= 0;
500 * [FIXME] This relies on each bank being in address order. This
501 * may not be the case, especially if the user has provided the
502 * information on the command line.
504 for_each_nodebank(i
, mi
, node
) {
505 struct membank
*bank
= &mi
->bank
[i
];
507 bank_start
= bank_pfn_start(bank
);
508 if (bank_start
< prev_bank_end
) {
509 printk(KERN_ERR
"MEM: unordered memory banks. "
510 "Not freeing memmap.\n");
515 * If we had a previous bank, and there is a space
516 * between the current bank and the previous, free it.
518 if (prev_bank_end
&& prev_bank_end
!= bank_start
)
519 free_memmap(node
, prev_bank_end
, bank_start
);
521 prev_bank_end
= bank_pfn_end(bank
);
526 * mem_init() marks the free areas in the mem_map and tells us how much
527 * memory is free. This is done after various parts of the system have
528 * claimed their memory after the kernel image.
530 void __init
mem_init(void)
532 unsigned long reserved_pages
, free_pages
;
535 #ifndef CONFIG_DISCONTIGMEM
536 max_mapnr
= pfn_to_page(max_pfn
+ PHYS_PFN_OFFSET
) - mem_map
;
539 /* this will put all unused low memory onto the freelists */
540 for_each_online_node(node
) {
541 pg_data_t
*pgdat
= NODE_DATA(node
);
543 free_unused_memmap_node(node
, &meminfo
);
545 if (pgdat
->node_spanned_pages
!= 0)
546 totalram_pages
+= free_all_bootmem_node(pgdat
);
550 /* now that our DMA memory is actually so designated, we can free it */
551 totalram_pages
+= free_area(PHYS_PFN_OFFSET
,
552 __phys_to_pfn(__pa(swapper_pg_dir
)), NULL
);
555 #ifdef CONFIG_HIGHMEM
556 /* set highmem page free */
557 for_each_online_node(node
) {
558 for_each_nodebank (i
, &meminfo
, node
) {
559 unsigned long start
= bank_pfn_start(&meminfo
.bank
[i
]);
560 unsigned long end
= bank_pfn_end(&meminfo
.bank
[i
]);
561 if (start
>= max_low_pfn
+ PHYS_PFN_OFFSET
)
562 totalhigh_pages
+= free_area(start
, end
, NULL
);
565 totalram_pages
+= totalhigh_pages
;
568 reserved_pages
= free_pages
= 0;
570 for_each_online_node(node
) {
571 pg_data_t
*n
= NODE_DATA(node
);
572 struct page
*map
= pgdat_page_nr(n
, 0) - n
->node_start_pfn
;
574 for_each_nodebank(i
, &meminfo
, node
) {
575 struct membank
*bank
= &meminfo
.bank
[i
];
576 unsigned int pfn1
, pfn2
;
577 struct page
*page
, *end
;
579 pfn1
= bank_pfn_start(bank
);
580 pfn2
= bank_pfn_end(bank
);
586 if (PageReserved(page
))
588 else if (!page_count(page
))
591 } while (page
< end
);
596 * Since our memory may not be contiguous, calculate the
597 * real number of pages we have in this system
599 printk(KERN_INFO
"Memory:");
601 for (i
= 0; i
< meminfo
.nr_banks
; i
++) {
602 num_physpages
+= bank_pfn_size(&meminfo
.bank
[i
]);
603 printk(" %ldMB", bank_phys_size(&meminfo
.bank
[i
]) >> 20);
605 printk(" = %luMB total\n", num_physpages
>> (20 - PAGE_SHIFT
));
607 printk(KERN_NOTICE
"Memory: %luk/%luk available, %luk reserved, %luK highmem\n",
608 nr_free_pages() << (PAGE_SHIFT
-10),
609 free_pages
<< (PAGE_SHIFT
-10),
610 reserved_pages
<< (PAGE_SHIFT
-10),
611 totalhigh_pages
<< (PAGE_SHIFT
-10));
613 #define MLK(b, t) b, t, ((t) - (b)) >> 10
614 #define MLM(b, t) b, t, ((t) - (b)) >> 20
615 #define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
617 printk(KERN_NOTICE
"Virtual kernel memory layout:\n"
618 " vector : 0x%08lx - 0x%08lx (%4ld kB)\n"
619 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
621 " DMA : 0x%08lx - 0x%08lx (%4ld MB)\n"
623 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
624 " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
625 #ifdef CONFIG_HIGHMEM
626 " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n"
628 " modules : 0x%08lx - 0x%08lx (%4ld MB)\n"
629 " .init : 0x%p" " - 0x%p" " (%4d kB)\n"
630 " .text : 0x%p" " - 0x%p" " (%4d kB)\n"
631 " .data : 0x%p" " - 0x%p" " (%4d kB)\n",
633 MLK(UL(CONFIG_VECTORS_BASE
), UL(CONFIG_VECTORS_BASE
) +
635 MLK(FIXADDR_START
, FIXADDR_TOP
),
637 MLM(CONSISTENT_BASE
, CONSISTENT_END
),
639 MLM(VMALLOC_START
, VMALLOC_END
),
640 MLM(PAGE_OFFSET
, (unsigned long)high_memory
),
641 #ifdef CONFIG_HIGHMEM
642 MLM(PKMAP_BASE
, (PKMAP_BASE
) + (LAST_PKMAP
) *
645 MLM(MODULES_VADDR
, MODULES_END
),
647 MLK_ROUNDUP(__init_begin
, __init_end
),
648 MLK_ROUNDUP(_text
, _etext
),
649 MLK_ROUNDUP(_data
, _edata
));
656 * Check boundaries twice: Some fundamental inconsistencies can
657 * be detected at build time already.
660 BUILD_BUG_ON(VMALLOC_END
> CONSISTENT_BASE
);
661 BUG_ON(VMALLOC_END
> CONSISTENT_BASE
);
663 BUILD_BUG_ON(TASK_SIZE
> MODULES_VADDR
);
664 BUG_ON(TASK_SIZE
> MODULES_VADDR
);
667 #ifdef CONFIG_HIGHMEM
668 BUILD_BUG_ON(PKMAP_BASE
+ LAST_PKMAP
* PAGE_SIZE
> PAGE_OFFSET
);
669 BUG_ON(PKMAP_BASE
+ LAST_PKMAP
* PAGE_SIZE
> PAGE_OFFSET
);
672 if (PAGE_SIZE
>= 16384 && num_physpages
<= 128) {
673 extern int sysctl_overcommit_memory
;
675 * On a machine this small we won't get
676 * anywhere without overcommit, so turn
679 sysctl_overcommit_memory
= OVERCOMMIT_ALWAYS
;
683 void free_initmem(void)
685 #ifdef CONFIG_HAVE_TCM
686 extern char *__tcm_start
, *__tcm_end
;
688 totalram_pages
+= free_area(__phys_to_pfn(__pa(__tcm_start
)),
689 __phys_to_pfn(__pa(__tcm_end
)),
693 if (!machine_is_integrator() && !machine_is_cintegrator())
694 totalram_pages
+= free_area(__phys_to_pfn(__pa(__init_begin
)),
695 __phys_to_pfn(__pa(__init_end
)),
699 #ifdef CONFIG_BLK_DEV_INITRD
701 static int keep_initrd
;
703 void free_initrd_mem(unsigned long start
, unsigned long end
)
706 totalram_pages
+= free_area(__phys_to_pfn(__pa(start
)),
707 __phys_to_pfn(__pa(end
)),
711 static int __init
keepinitrd_setup(char *__unused
)
717 __setup("keepinitrd", keepinitrd_setup
);