2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1995 Linus Torvalds
7 * Copyright (C) 1995 Waldorf Electronics
8 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03 Ralf Baechle
9 * Copyright (C) 1996 Stoned Elipot
10 * Copyright (C) 1999 Silicon Graphics, Inc.
11 * Copyright (C) 2000, 2001, 2002, 2007 Maciej W. Rozycki
13 #include <linux/init.h>
14 #include <linux/ioport.h>
15 #include <linux/export.h>
16 #include <linux/screen_info.h>
17 #include <linux/memblock.h>
18 #include <linux/bootmem.h>
19 #include <linux/initrd.h>
20 #include <linux/root_dev.h>
21 #include <linux/highmem.h>
22 #include <linux/console.h>
23 #include <linux/pfn.h>
24 #include <linux/debugfs.h>
25 #include <linux/kexec.h>
26 #include <linux/sizes.h>
27 #include <linux/device.h>
28 #include <linux/dma-contiguous.h>
30 #include <asm/addrspace.h>
31 #include <asm/bootinfo.h>
33 #include <asm/cache.h>
35 #include <asm/sections.h>
36 #include <asm/setup.h>
37 #include <asm/smp-ops.h>
40 struct cpuinfo_mips cpu_data
[NR_CPUS
] __read_mostly
;
42 EXPORT_SYMBOL(cpu_data
);
45 struct screen_info screen_info
;
49 * Despite it's name this variable is even if we don't have PCI
51 unsigned int PCI_DMA_BUS_IS_PHYS
;
53 EXPORT_SYMBOL(PCI_DMA_BUS_IS_PHYS
);
58 * These are initialized so they are in the .data section
60 unsigned long mips_machtype __read_mostly
= MACH_UNKNOWN
;
62 EXPORT_SYMBOL(mips_machtype
);
64 struct boot_mem_map boot_mem_map
;
66 static char __initdata command_line
[COMMAND_LINE_SIZE
];
67 char __initdata arcs_cmdline
[COMMAND_LINE_SIZE
];
69 #ifdef CONFIG_CMDLINE_BOOL
70 static char __initdata builtin_cmdline
[COMMAND_LINE_SIZE
] = CONFIG_CMDLINE
;
74 * mips_io_port_base is the begin of the address space to which x86 style
75 * I/O ports are mapped.
77 const unsigned long mips_io_port_base
= -1;
78 EXPORT_SYMBOL(mips_io_port_base
);
80 static struct resource code_resource
= { .name
= "Kernel code", };
81 static struct resource data_resource
= { .name
= "Kernel data", };
83 static void *detect_magic __initdata
= detect_memory_region
;
85 void __init
add_memory_region(phys_t start
, phys_t size
, long type
)
87 int x
= boot_mem_map
.nr_map
;
91 if (start
+ size
< start
) {
92 pr_warning("Trying to add an invalid memory region, skipped\n");
97 * Try to merge with existing entry, if any.
99 for (i
= 0; i
< boot_mem_map
.nr_map
; i
++) {
100 struct boot_mem_map_entry
*entry
= boot_mem_map
.map
+ i
;
103 if (entry
->type
!= type
)
106 if (start
+ size
< entry
->addr
)
107 continue; /* no overlap */
109 if (entry
->addr
+ entry
->size
< start
)
110 continue; /* no overlap */
112 top
= max(entry
->addr
+ entry
->size
, start
+ size
);
113 entry
->addr
= min(entry
->addr
, start
);
114 entry
->size
= top
- entry
->addr
;
119 if (boot_mem_map
.nr_map
== BOOT_MEM_MAP_MAX
) {
120 pr_err("Ooops! Too many entries in the memory map!\n");
124 boot_mem_map
.map
[x
].addr
= start
;
125 boot_mem_map
.map
[x
].size
= size
;
126 boot_mem_map
.map
[x
].type
= type
;
127 boot_mem_map
.nr_map
++;
130 void __init
detect_memory_region(phys_t start
, phys_t sz_min
, phys_t sz_max
)
132 void *dm
= &detect_magic
;
135 for (size
= sz_min
; size
< sz_max
; size
<<= 1) {
136 if (!memcmp(dm
, dm
+ size
, sizeof(detect_magic
)))
140 pr_debug("Memory: %lluMB of RAM detected at 0x%llx (min: %lluMB, max: %lluMB)\n",
141 ((unsigned long long) size
) / SZ_1M
,
142 (unsigned long long) start
,
143 ((unsigned long long) sz_min
) / SZ_1M
,
144 ((unsigned long long) sz_max
) / SZ_1M
);
146 add_memory_region(start
, size
, BOOT_MEM_RAM
);
149 static void __init
print_memory_map(void)
152 const int field
= 2 * sizeof(unsigned long);
154 for (i
= 0; i
< boot_mem_map
.nr_map
; i
++) {
155 printk(KERN_INFO
" memory: %0*Lx @ %0*Lx ",
156 field
, (unsigned long long) boot_mem_map
.map
[i
].size
,
157 field
, (unsigned long long) boot_mem_map
.map
[i
].addr
);
159 switch (boot_mem_map
.map
[i
].type
) {
161 printk(KERN_CONT
"(usable)\n");
163 case BOOT_MEM_INIT_RAM
:
164 printk(KERN_CONT
"(usable after init)\n");
166 case BOOT_MEM_ROM_DATA
:
167 printk(KERN_CONT
"(ROM data)\n");
169 case BOOT_MEM_RESERVED
:
170 printk(KERN_CONT
"(reserved)\n");
173 printk(KERN_CONT
"type %lu\n", boot_mem_map
.map
[i
].type
);
182 #ifdef CONFIG_BLK_DEV_INITRD
184 static int __init
rd_start_early(char *p
)
186 unsigned long start
= memparse(p
, &p
);
189 /* Guess if the sign extension was forgotten by bootloader */
193 initrd_start
= start
;
197 early_param("rd_start", rd_start_early
);
199 static int __init
rd_size_early(char *p
)
201 initrd_end
+= memparse(p
, &p
);
204 early_param("rd_size", rd_size_early
);
206 /* it returns the next free pfn after initrd */
207 static unsigned long __init
init_initrd(void)
212 * Board specific code or command line parser should have
213 * already set up initrd_start and initrd_end. In these cases
214 * perfom sanity checks and use them if all looks good.
216 if (!initrd_start
|| initrd_end
<= initrd_start
)
219 if (initrd_start
& ~PAGE_MASK
) {
220 pr_err("initrd start must be page aligned\n");
223 if (initrd_start
< PAGE_OFFSET
) {
224 pr_err("initrd start < PAGE_OFFSET\n");
229 * Sanitize initrd addresses. For example firmware
230 * can't guess if they need to pass them through
231 * 64-bits values if the kernel has been built in pure
232 * 32-bit. We need also to switch from KSEG0 to XKPHYS
233 * addresses now, so the code can now safely use __pa().
235 end
= __pa(initrd_end
);
236 initrd_end
= (unsigned long)__va(end
);
237 initrd_start
= (unsigned long)__va(__pa(initrd_start
));
239 ROOT_DEV
= Root_RAM0
;
247 static void __init
finalize_initrd(void)
249 unsigned long size
= initrd_end
- initrd_start
;
252 printk(KERN_INFO
"Initrd not found or empty");
255 if (__pa(initrd_end
) > PFN_PHYS(max_low_pfn
)) {
256 printk(KERN_ERR
"Initrd extends beyond end of memory");
260 reserve_bootmem(__pa(initrd_start
), size
, BOOTMEM_DEFAULT
);
261 initrd_below_start_ok
= 1;
263 pr_info("Initial ramdisk at: 0x%lx (%lu bytes)\n",
267 printk(KERN_CONT
" - disabling initrd\n");
272 #else /* !CONFIG_BLK_DEV_INITRD */
274 static unsigned long __init
init_initrd(void)
279 #define finalize_initrd() do {} while (0)
284 * Initialize the bootmem allocator. It also setup initrd related data
287 #if defined(CONFIG_SGI_IP27) || (defined(CONFIG_CPU_LOONGSON3) && defined(CONFIG_NUMA))
289 static void __init
bootmem_init(void)
295 #else /* !CONFIG_SGI_IP27 */
297 static void __init
bootmem_init(void)
299 unsigned long reserved_end
;
300 unsigned long mapstart
= ~0UL;
301 unsigned long bootmap_size
;
305 * Sanity check any INITRD first. We don't take it into account
306 * for bootmem setup initially, rely on the end-of-kernel-code
307 * as our memory range starting point. Once bootmem is inited we
308 * will reserve the area used for the initrd.
311 reserved_end
= (unsigned long) PFN_UP(__pa_symbol(&_end
));
314 * max_low_pfn is not a number of pages. The number of pages
315 * of the system is given by 'max_low_pfn - min_low_pfn'.
321 * Find the highest page frame number we have available.
323 for (i
= 0; i
< boot_mem_map
.nr_map
; i
++) {
324 unsigned long start
, end
;
326 if (boot_mem_map
.map
[i
].type
!= BOOT_MEM_RAM
)
329 start
= PFN_UP(boot_mem_map
.map
[i
].addr
);
330 end
= PFN_DOWN(boot_mem_map
.map
[i
].addr
331 + boot_mem_map
.map
[i
].size
);
333 if (end
> max_low_pfn
)
335 if (start
< min_low_pfn
)
337 if (end
<= reserved_end
)
339 if (start
>= mapstart
)
341 mapstart
= max(reserved_end
, start
);
344 if (min_low_pfn
>= max_low_pfn
)
345 panic("Incorrect memory mapping !!!");
346 if (min_low_pfn
> ARCH_PFN_OFFSET
) {
347 pr_info("Wasting %lu bytes for tracking %lu unused pages\n",
348 (min_low_pfn
- ARCH_PFN_OFFSET
) * sizeof(struct page
),
349 min_low_pfn
- ARCH_PFN_OFFSET
);
350 } else if (min_low_pfn
< ARCH_PFN_OFFSET
) {
351 pr_info("%lu free pages won't be used\n",
352 ARCH_PFN_OFFSET
- min_low_pfn
);
354 min_low_pfn
= ARCH_PFN_OFFSET
;
357 * Determine low and high memory ranges
359 max_pfn
= max_low_pfn
;
360 if (max_low_pfn
> PFN_DOWN(HIGHMEM_START
)) {
361 #ifdef CONFIG_HIGHMEM
362 highstart_pfn
= PFN_DOWN(HIGHMEM_START
);
363 highend_pfn
= max_low_pfn
;
365 max_low_pfn
= PFN_DOWN(HIGHMEM_START
);
368 #ifdef CONFIG_BLK_DEV_INITRD
370 * mapstart should be after initrd_end
373 mapstart
= max(mapstart
, (unsigned long)PFN_UP(__pa(initrd_end
)));
377 * Initialize the boot-time allocator with low memory only.
379 bootmap_size
= init_bootmem_node(NODE_DATA(0), mapstart
,
380 min_low_pfn
, max_low_pfn
);
383 for (i
= 0; i
< boot_mem_map
.nr_map
; i
++) {
384 unsigned long start
, end
;
386 start
= PFN_UP(boot_mem_map
.map
[i
].addr
);
387 end
= PFN_DOWN(boot_mem_map
.map
[i
].addr
388 + boot_mem_map
.map
[i
].size
);
390 if (start
<= min_low_pfn
)
395 #ifndef CONFIG_HIGHMEM
396 if (end
> max_low_pfn
)
400 * ... finally, is the area going away?
406 memblock_add_node(PFN_PHYS(start
), PFN_PHYS(end
- start
), 0);
410 * Register fully available low RAM pages with the bootmem allocator.
412 for (i
= 0; i
< boot_mem_map
.nr_map
; i
++) {
413 unsigned long start
, end
, size
;
415 start
= PFN_UP(boot_mem_map
.map
[i
].addr
);
416 end
= PFN_DOWN(boot_mem_map
.map
[i
].addr
417 + boot_mem_map
.map
[i
].size
);
420 * Reserve usable memory.
422 switch (boot_mem_map
.map
[i
].type
) {
425 case BOOT_MEM_INIT_RAM
:
426 memory_present(0, start
, end
);
429 /* Not usable memory */
434 * We are rounding up the start address of usable memory
435 * and at the end of the usable range downwards.
437 if (start
>= max_low_pfn
)
439 if (start
< reserved_end
)
440 start
= reserved_end
;
441 if (end
> max_low_pfn
)
445 * ... finally, is the area going away?
451 /* Register lowmem ranges */
452 free_bootmem(PFN_PHYS(start
), size
<< PAGE_SHIFT
);
453 memory_present(0, start
, end
);
457 * Reserve the bootmap memory.
459 reserve_bootmem(PFN_PHYS(mapstart
), bootmap_size
, BOOTMEM_DEFAULT
);
462 * Reserve initrd memory if needed.
467 #endif /* CONFIG_SGI_IP27 */
470 * arch_mem_init - initialize memory management subsystem
472 * o plat_mem_setup() detects the memory configuration and will record detected
473 * memory areas using add_memory_region.
475 * At this stage the memory configuration of the system is known to the
476 * kernel but generic memory management system is still entirely uninitialized.
481 * o dma_continguous_reserve()
483 * At this stage the bootmem allocator is ready to use.
485 * NOTE: historically plat_mem_setup did the entire platform initialization.
486 * This was rather impractical because it meant plat_mem_setup had to
487 * get away without any kind of memory allocator. To keep old code from
488 * breaking plat_setup was just renamed to plat_setup and a second platform
489 * initialization hook for anything else was introduced.
492 static int usermem __initdata
;
494 static int __init
early_parse_mem(char *p
)
496 unsigned long start
, size
;
499 * If a user specifies memory size, we
500 * blow away any automatically generated
504 boot_mem_map
.nr_map
= 0;
508 size
= memparse(p
, &p
);
510 start
= memparse(p
+ 1, &p
);
512 add_memory_region(start
, size
, BOOT_MEM_RAM
);
515 early_param("mem", early_parse_mem
);
517 #ifdef CONFIG_PROC_VMCORE
518 unsigned long setup_elfcorehdr
, setup_elfcorehdr_size
;
519 static int __init
early_parse_elfcorehdr(char *p
)
523 setup_elfcorehdr
= memparse(p
, &p
);
525 for (i
= 0; i
< boot_mem_map
.nr_map
; i
++) {
526 unsigned long start
= boot_mem_map
.map
[i
].addr
;
527 unsigned long end
= (boot_mem_map
.map
[i
].addr
+
528 boot_mem_map
.map
[i
].size
);
529 if (setup_elfcorehdr
>= start
&& setup_elfcorehdr
< end
) {
531 * Reserve from the elf core header to the end of
532 * the memory segment, that should all be kdump
535 setup_elfcorehdr_size
= end
- setup_elfcorehdr
;
540 * If we don't find it in the memory map, then we shouldn't
541 * have to worry about it, as the new kernel won't use it.
545 early_param("elfcorehdr", early_parse_elfcorehdr
);
548 static void __init
arch_mem_addpart(phys_t mem
, phys_t end
, int type
)
557 /* Make sure it is in the boot_mem_map */
558 for (i
= 0; i
< boot_mem_map
.nr_map
; i
++) {
559 if (mem
>= boot_mem_map
.map
[i
].addr
&&
560 mem
< (boot_mem_map
.map
[i
].addr
+
561 boot_mem_map
.map
[i
].size
))
564 add_memory_region(mem
, size
, type
);
568 static inline unsigned long long get_total_mem(void)
570 unsigned long long total
;
572 total
= max_pfn
- min_low_pfn
;
573 return total
<< PAGE_SHIFT
;
576 static void __init
mips_parse_crashkernel(void)
578 unsigned long long total_mem
;
579 unsigned long long crash_size
, crash_base
;
582 total_mem
= get_total_mem();
583 ret
= parse_crashkernel(boot_command_line
, total_mem
,
584 &crash_size
, &crash_base
);
585 if (ret
!= 0 || crash_size
<= 0)
588 crashk_res
.start
= crash_base
;
589 crashk_res
.end
= crash_base
+ crash_size
- 1;
592 static void __init
request_crashkernel(struct resource
*res
)
596 ret
= request_resource(res
, &crashk_res
);
598 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n",
599 (unsigned long)((crashk_res
.end
-
600 crashk_res
.start
+ 1) >> 20),
601 (unsigned long)(crashk_res
.start
>> 20));
603 #else /* !defined(CONFIG_KEXEC) */
604 static void __init
mips_parse_crashkernel(void)
608 static void __init
request_crashkernel(struct resource
*res
)
611 #endif /* !defined(CONFIG_KEXEC) */
613 static void __init
arch_mem_init(char **cmdline_p
)
615 struct memblock_region
*reg
;
616 extern void plat_mem_setup(void);
618 /* call board setup routine */
622 * Make sure all kernel memory is in the maps. The "UP" and
623 * "DOWN" are opposite for initdata since if it crosses over
624 * into another memory section you don't want that to be
625 * freed when the initdata is freed.
627 arch_mem_addpart(PFN_DOWN(__pa_symbol(&_text
)) << PAGE_SHIFT
,
628 PFN_UP(__pa_symbol(&_edata
)) << PAGE_SHIFT
,
630 arch_mem_addpart(PFN_UP(__pa_symbol(&__init_begin
)) << PAGE_SHIFT
,
631 PFN_DOWN(__pa_symbol(&__init_end
)) << PAGE_SHIFT
,
634 pr_info("Determined physical RAM map:\n");
637 #ifdef CONFIG_CMDLINE_BOOL
638 #ifdef CONFIG_CMDLINE_OVERRIDE
639 strlcpy(boot_command_line
, builtin_cmdline
, COMMAND_LINE_SIZE
);
641 if (builtin_cmdline
[0]) {
642 strlcat(arcs_cmdline
, " ", COMMAND_LINE_SIZE
);
643 strlcat(arcs_cmdline
, builtin_cmdline
, COMMAND_LINE_SIZE
);
645 strlcpy(boot_command_line
, arcs_cmdline
, COMMAND_LINE_SIZE
);
648 strlcpy(boot_command_line
, arcs_cmdline
, COMMAND_LINE_SIZE
);
650 strlcpy(command_line
, boot_command_line
, COMMAND_LINE_SIZE
);
652 *cmdline_p
= command_line
;
657 pr_info("User-defined physical RAM map:\n");
662 #ifdef CONFIG_PROC_VMCORE
663 if (setup_elfcorehdr
&& setup_elfcorehdr_size
) {
664 printk(KERN_INFO
"kdump reserved memory at %lx-%lx\n",
665 setup_elfcorehdr
, setup_elfcorehdr_size
);
666 reserve_bootmem(setup_elfcorehdr
, setup_elfcorehdr_size
,
671 mips_parse_crashkernel();
673 if (crashk_res
.start
!= crashk_res
.end
)
674 reserve_bootmem(crashk_res
.start
,
675 crashk_res
.end
- crashk_res
.start
+ 1,
680 plat_swiotlb_setup();
683 dma_contiguous_reserve(PFN_PHYS(max_low_pfn
));
684 /* Tell bootmem about cma reserved memblock section */
685 for_each_memblock(reserved
, reg
)
687 reserve_bootmem(reg
->base
, reg
->size
, BOOTMEM_DEFAULT
);
690 static void __init
resource_init(void)
694 if (UNCAC_BASE
!= IO_BASE
)
697 code_resource
.start
= __pa_symbol(&_text
);
698 code_resource
.end
= __pa_symbol(&_etext
) - 1;
699 data_resource
.start
= __pa_symbol(&_etext
);
700 data_resource
.end
= __pa_symbol(&_edata
) - 1;
702 for (i
= 0; i
< boot_mem_map
.nr_map
; i
++) {
703 struct resource
*res
;
704 unsigned long start
, end
;
706 start
= boot_mem_map
.map
[i
].addr
;
707 end
= boot_mem_map
.map
[i
].addr
+ boot_mem_map
.map
[i
].size
- 1;
708 if (start
>= HIGHMEM_START
)
710 if (end
>= HIGHMEM_START
)
711 end
= HIGHMEM_START
- 1;
713 res
= alloc_bootmem(sizeof(struct resource
));
714 switch (boot_mem_map
.map
[i
].type
) {
716 case BOOT_MEM_INIT_RAM
:
717 case BOOT_MEM_ROM_DATA
:
718 res
->name
= "System RAM";
720 case BOOT_MEM_RESERVED
:
722 res
->name
= "reserved";
728 res
->flags
= IORESOURCE_MEM
| IORESOURCE_BUSY
;
729 request_resource(&iomem_resource
, res
);
732 * We don't know which RAM region contains kernel data,
733 * so we try it repeatedly and let the resource manager
736 request_resource(res
, &code_resource
);
737 request_resource(res
, &data_resource
);
738 request_crashkernel(res
);
743 static void __init
prefill_possible_map(void)
745 int i
, possible
= num_possible_cpus();
747 if (possible
> nr_cpu_ids
)
748 possible
= nr_cpu_ids
;
750 for (i
= 0; i
< possible
; i
++)
751 set_cpu_possible(i
, true);
752 for (; i
< NR_CPUS
; i
++)
753 set_cpu_possible(i
, false);
755 nr_cpu_ids
= possible
;
758 static inline void prefill_possible_map(void) {}
761 void __init
setup_arch(char **cmdline_p
)
766 #ifdef CONFIG_EARLY_PRINTK
767 setup_early_printk();
772 #if defined(CONFIG_VT)
773 #if defined(CONFIG_VGA_CONSOLE)
774 conswitchp
= &vga_con
;
775 #elif defined(CONFIG_DUMMY_CONSOLE)
776 conswitchp
= &dummy_con
;
780 arch_mem_init(cmdline_p
);
784 prefill_possible_map();
789 unsigned long kernelsp
[NR_CPUS
];
790 unsigned long fw_arg0
, fw_arg1
, fw_arg2
, fw_arg3
;
792 #ifdef CONFIG_DEBUG_FS
793 struct dentry
*mips_debugfs_dir
;
794 static int __init
debugfs_mips(void)
798 d
= debugfs_create_dir("mips", NULL
);
801 mips_debugfs_dir
= d
;
804 arch_initcall(debugfs_mips
);