ARM: 6619/1: nommu: avoid mapping vectors page when !CONFIG_MMU
[deliverable/linux.git] / arch / arm / kernel / setup.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/kernel/setup.c
3 *
4 * Copyright (C) 1995-2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
1da177e4
LT
10#include <linux/module.h>
11#include <linux/kernel.h>
12#include <linux/stddef.h>
13#include <linux/ioport.h>
14#include <linux/delay.h>
15#include <linux/utsname.h>
16#include <linux/initrd.h>
17#include <linux/console.h>
18#include <linux/bootmem.h>
19#include <linux/seq_file.h>
894673ee 20#include <linux/screen_info.h>
1da177e4 21#include <linux/init.h>
3c57fb43 22#include <linux/kexec.h>
cea0bb1b 23#include <linux/crash_dump.h>
1da177e4
LT
24#include <linux/root_dev.h>
25#include <linux/cpu.h>
26#include <linux/interrupt.h>
7bbb7940 27#include <linux/smp.h>
4e950f6f 28#include <linux/fs.h>
e119bfff 29#include <linux/proc_fs.h>
2778f620 30#include <linux/memblock.h>
1da177e4 31
b86040a5 32#include <asm/unified.h>
1da177e4 33#include <asm/cpu.h>
0ba8b9b2 34#include <asm/cputype.h>
1da177e4 35#include <asm/elf.h>
1da177e4 36#include <asm/procinfo.h>
37efe642 37#include <asm/sections.h>
1da177e4 38#include <asm/setup.h>
f00ec48f 39#include <asm/smp_plat.h>
1da177e4
LT
40#include <asm/mach-types.h>
41#include <asm/cacheflush.h>
46097c7d 42#include <asm/cachetype.h>
1da177e4
LT
43#include <asm/tlbflush.h>
44
45#include <asm/mach/arch.h>
46#include <asm/mach/irq.h>
47#include <asm/mach/time.h>
5cbad0eb 48#include <asm/traps.h>
bff595c1 49#include <asm/unwind.h>
1da177e4 50
73a65b3f 51#if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
0fc1c832 52#include "compat.h"
73a65b3f 53#endif
4cd9d6f7 54#include "atags.h"
bc581770 55#include "tcm.h"
0fc1c832 56
1da177e4
LT
57#ifndef MEM_SIZE
58#define MEM_SIZE (16*1024*1024)
59#endif
60
61#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
62char fpe_type[8];
63
64static int __init fpe_setup(char *line)
65{
66 memcpy(fpe_type, line, 8);
67 return 1;
68}
69
70__setup("fpe=", fpe_setup);
71#endif
72
4b5f32ce 73extern void paging_init(struct machine_desc *desc);
1da177e4 74extern void reboot_setup(char *str);
1da177e4
LT
75
76unsigned int processor_id;
c18f6581 77EXPORT_SYMBOL(processor_id);
0385ebc0 78unsigned int __machine_arch_type __read_mostly;
1da177e4 79EXPORT_SYMBOL(__machine_arch_type);
0385ebc0 80unsigned int cacheid __read_mostly;
c0e95878 81EXPORT_SYMBOL(cacheid);
1da177e4 82
9d20fdd5
BG
83unsigned int __atags_pointer __initdata;
84
1da177e4
LT
85unsigned int system_rev;
86EXPORT_SYMBOL(system_rev);
87
88unsigned int system_serial_low;
89EXPORT_SYMBOL(system_serial_low);
90
91unsigned int system_serial_high;
92EXPORT_SYMBOL(system_serial_high);
93
0385ebc0 94unsigned int elf_hwcap __read_mostly;
1da177e4
LT
95EXPORT_SYMBOL(elf_hwcap);
96
97
98#ifdef MULTI_CPU
0385ebc0 99struct processor processor __read_mostly;
1da177e4
LT
100#endif
101#ifdef MULTI_TLB
0385ebc0 102struct cpu_tlb_fns cpu_tlb __read_mostly;
1da177e4
LT
103#endif
104#ifdef MULTI_USER
0385ebc0 105struct cpu_user_fns cpu_user __read_mostly;
1da177e4
LT
106#endif
107#ifdef MULTI_CACHE
0385ebc0 108struct cpu_cache_fns cpu_cache __read_mostly;
1da177e4 109#endif
953233dc 110#ifdef CONFIG_OUTER_CACHE
0385ebc0 111struct outer_cache_fns outer_cache __read_mostly;
6c09f09d 112EXPORT_SYMBOL(outer_cache);
953233dc 113#endif
1da177e4 114
ccea7a19
RK
115struct stack {
116 u32 irq[3];
117 u32 abt[3];
118 u32 und[3];
119} ____cacheline_aligned;
120
121static struct stack stacks[NR_CPUS];
122
1da177e4
LT
123char elf_platform[ELF_PLATFORM_SIZE];
124EXPORT_SYMBOL(elf_platform);
125
1da177e4
LT
126static const char *cpu_name;
127static const char *machine_name;
48ab7e09 128static char __initdata cmd_line[COMMAND_LINE_SIZE];
8ff1443c 129struct machine_desc *machine_desc __initdata;
1da177e4
LT
130
131static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
132static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
133#define ENDIANNESS ((char)endian_test.l)
134
135DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
136
137/*
138 * Standard memory resources
139 */
140static struct resource mem_res[] = {
740e518e
GKH
141 {
142 .name = "Video RAM",
143 .start = 0,
144 .end = 0,
145 .flags = IORESOURCE_MEM
146 },
147 {
148 .name = "Kernel text",
149 .start = 0,
150 .end = 0,
151 .flags = IORESOURCE_MEM
152 },
153 {
154 .name = "Kernel data",
155 .start = 0,
156 .end = 0,
157 .flags = IORESOURCE_MEM
158 }
1da177e4
LT
159};
160
161#define video_ram mem_res[0]
162#define kernel_code mem_res[1]
163#define kernel_data mem_res[2]
164
165static struct resource io_res[] = {
740e518e
GKH
166 {
167 .name = "reserved",
168 .start = 0x3bc,
169 .end = 0x3be,
170 .flags = IORESOURCE_IO | IORESOURCE_BUSY
171 },
172 {
173 .name = "reserved",
174 .start = 0x378,
175 .end = 0x37f,
176 .flags = IORESOURCE_IO | IORESOURCE_BUSY
177 },
178 {
179 .name = "reserved",
180 .start = 0x278,
181 .end = 0x27f,
182 .flags = IORESOURCE_IO | IORESOURCE_BUSY
183 }
1da177e4
LT
184};
185
186#define lp0 io_res[0]
187#define lp1 io_res[1]
188#define lp2 io_res[2]
189
1da177e4
LT
190static const char *proc_arch[] = {
191 "undefined/unknown",
192 "3",
193 "4",
194 "4T",
195 "5",
196 "5T",
197 "5TE",
198 "5TEJ",
199 "6TEJ",
6b090a25 200 "7",
1da177e4
LT
201 "?(11)",
202 "?(12)",
203 "?(13)",
204 "?(14)",
205 "?(15)",
206 "?(16)",
207 "?(17)",
208};
209
1da177e4
LT
210int cpu_architecture(void)
211{
212 int cpu_arch;
213
0ba8b9b2 214 if ((read_cpuid_id() & 0x0008f000) == 0) {
1da177e4 215 cpu_arch = CPU_ARCH_UNKNOWN;
0ba8b9b2
RK
216 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
217 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
218 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
219 cpu_arch = (read_cpuid_id() >> 16) & 7;
1da177e4
LT
220 if (cpu_arch)
221 cpu_arch += CPU_ARCH_ARMv3;
0ba8b9b2 222 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
180005c4
CM
223 unsigned int mmfr0;
224
225 /* Revised CPUID format. Read the Memory Model Feature
226 * Register 0 and check for VMSAv7 or PMSAv7 */
227 asm("mrc p15, 0, %0, c0, c1, 4"
228 : "=r" (mmfr0));
229 if ((mmfr0 & 0x0000000f) == 0x00000003 ||
230 (mmfr0 & 0x000000f0) == 0x00000030)
231 cpu_arch = CPU_ARCH_ARMv7;
232 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
233 (mmfr0 & 0x000000f0) == 0x00000020)
234 cpu_arch = CPU_ARCH_ARMv6;
235 else
236 cpu_arch = CPU_ARCH_UNKNOWN;
237 } else
238 cpu_arch = CPU_ARCH_UNKNOWN;
1da177e4
LT
239
240 return cpu_arch;
241}
242
8925ec4c
WD
243static int cpu_has_aliasing_icache(unsigned int arch)
244{
245 int aliasing_icache;
246 unsigned int id_reg, num_sets, line_size;
247
248 /* arch specifies the register format */
249 switch (arch) {
250 case CPU_ARCH_ARMv7:
5fb31a96
LW
251 asm("mcr p15, 2, %0, c0, c0, 0 @ set CSSELR"
252 : /* No output operands */
8925ec4c 253 : "r" (1));
5fb31a96
LW
254 isb();
255 asm("mrc p15, 1, %0, c0, c0, 0 @ read CCSIDR"
256 : "=r" (id_reg));
8925ec4c
WD
257 line_size = 4 << ((id_reg & 0x7) + 2);
258 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
259 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
260 break;
261 case CPU_ARCH_ARMv6:
262 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
263 break;
264 default:
265 /* I-cache aliases will be handled by D-cache aliasing code */
266 aliasing_icache = 0;
267 }
268
269 return aliasing_icache;
270}
271
c0e95878
RK
272static void __init cacheid_init(void)
273{
274 unsigned int cachetype = read_cpuid_cachetype();
275 unsigned int arch = cpu_architecture();
276
b57ee99f
CM
277 if (arch >= CPU_ARCH_ARMv6) {
278 if ((cachetype & (7 << 29)) == 4 << 29) {
279 /* ARMv7 register format */
280 cacheid = CACHEID_VIPT_NONALIASING;
281 if ((cachetype & (3 << 14)) == 1 << 14)
282 cacheid |= CACHEID_ASID_TAGGED;
8925ec4c
WD
283 else if (cpu_has_aliasing_icache(CPU_ARCH_ARMv7))
284 cacheid |= CACHEID_VIPT_I_ALIASING;
285 } else if (cachetype & (1 << 23)) {
c0e95878 286 cacheid = CACHEID_VIPT_ALIASING;
8925ec4c 287 } else {
c0e95878 288 cacheid = CACHEID_VIPT_NONALIASING;
8925ec4c
WD
289 if (cpu_has_aliasing_icache(CPU_ARCH_ARMv6))
290 cacheid |= CACHEID_VIPT_I_ALIASING;
291 }
c0e95878
RK
292 } else {
293 cacheid = CACHEID_VIVT;
294 }
2b4ae1f1
RK
295
296 printk("CPU: %s data cache, %s instruction cache\n",
297 cache_is_vivt() ? "VIVT" :
298 cache_is_vipt_aliasing() ? "VIPT aliasing" :
299 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown",
300 cache_is_vivt() ? "VIVT" :
301 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
8925ec4c 302 icache_is_vipt_aliasing() ? "VIPT aliasing" :
2b4ae1f1 303 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
c0e95878
RK
304}
305
1da177e4
LT
306/*
307 * These functions re-use the assembly code in head.S, which
308 * already provide the required functionality.
309 */
0f44ba1d 310extern struct proc_info_list *lookup_processor_type(unsigned int);
1da177e4
LT
311extern struct machine_desc *lookup_machine_type(unsigned int);
312
f159f4ed
TL
313static void __init feat_v6_fixup(void)
314{
315 int id = read_cpuid_id();
316
317 if ((id & 0xff0f0000) != 0x41070000)
318 return;
319
320 /*
321 * HWCAP_TLS is available only on 1136 r1p0 and later,
322 * see also kuser_get_tls_init.
323 */
324 if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
325 elf_hwcap &= ~HWCAP_TLS;
326}
327
1da177e4
LT
328static void __init setup_processor(void)
329{
330 struct proc_info_list *list;
331
332 /*
333 * locate processor in the list of supported processor
334 * types. The linker builds this table for us from the
335 * entries in arch/arm/mm/proc-*.S
336 */
0ba8b9b2 337 list = lookup_processor_type(read_cpuid_id());
1da177e4
LT
338 if (!list) {
339 printk("CPU configuration botched (ID %08x), unable "
0ba8b9b2 340 "to continue.\n", read_cpuid_id());
1da177e4
LT
341 while (1);
342 }
343
344 cpu_name = list->cpu_name;
345
346#ifdef MULTI_CPU
347 processor = *list->proc;
348#endif
349#ifdef MULTI_TLB
350 cpu_tlb = *list->tlb;
351#endif
352#ifdef MULTI_USER
353 cpu_user = *list->user;
354#endif
355#ifdef MULTI_CACHE
356 cpu_cache = *list->cache;
357#endif
358
4e19025b 359 printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
0ba8b9b2 360 cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
264edb35 361 proc_arch[cpu_architecture()], cr_alignment);
1da177e4 362
96b644bd 363 sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS);
1da177e4
LT
364 sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
365 elf_hwcap = list->elf_hwcap;
adeff422
CM
366#ifndef CONFIG_ARM_THUMB
367 elf_hwcap &= ~HWCAP_THUMB;
368#endif
1da177e4 369
f159f4ed
TL
370 feat_v6_fixup();
371
c0e95878 372 cacheid_init();
1da177e4
LT
373 cpu_proc_init();
374}
375
ccea7a19
RK
376/*
377 * cpu_init - initialise one CPU.
378 *
90f1e084 379 * cpu_init sets up the per-CPU stacks.
ccea7a19 380 */
36c5ed23 381void cpu_init(void)
ccea7a19
RK
382{
383 unsigned int cpu = smp_processor_id();
384 struct stack *stk = &stacks[cpu];
385
386 if (cpu >= NR_CPUS) {
387 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
388 BUG();
389 }
390
b86040a5
CM
391 /*
392 * Define the placement constraint for the inline asm directive below.
393 * In Thumb-2, msr with an immediate value is not allowed.
394 */
395#ifdef CONFIG_THUMB2_KERNEL
396#define PLC "r"
397#else
398#define PLC "I"
399#endif
400
ccea7a19
RK
401 /*
402 * setup stacks for re-entrant exception handlers
403 */
404 __asm__ (
405 "msr cpsr_c, %1\n\t"
b86040a5
CM
406 "add r14, %0, %2\n\t"
407 "mov sp, r14\n\t"
ccea7a19 408 "msr cpsr_c, %3\n\t"
b86040a5
CM
409 "add r14, %0, %4\n\t"
410 "mov sp, r14\n\t"
ccea7a19 411 "msr cpsr_c, %5\n\t"
b86040a5
CM
412 "add r14, %0, %6\n\t"
413 "mov sp, r14\n\t"
ccea7a19
RK
414 "msr cpsr_c, %7"
415 :
416 : "r" (stk),
b86040a5 417 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
ccea7a19 418 "I" (offsetof(struct stack, irq[0])),
b86040a5 419 PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
ccea7a19 420 "I" (offsetof(struct stack, abt[0])),
b86040a5 421 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
ccea7a19 422 "I" (offsetof(struct stack, und[0])),
b86040a5 423 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
aaaa3f9e 424 : "r14");
ccea7a19
RK
425}
426
1da177e4
LT
427static struct machine_desc * __init setup_machine(unsigned int nr)
428{
429 struct machine_desc *list;
430
431 /*
432 * locate machine in the list of supported machines.
433 */
434 list = lookup_machine_type(nr);
435 if (!list) {
436 printk("Machine configuration botched (nr %d), unable "
437 "to continue.\n", nr);
438 while (1);
439 }
440
441 printk("Machine: %s\n", list->name);
442
443 return list;
444}
445
4b5f32ce 446static int __init arm_add_memory(unsigned long start, unsigned long size)
3a669411 447{
4b5f32ce
NP
448 struct membank *bank = &meminfo.bank[meminfo.nr_banks];
449
450 if (meminfo.nr_banks >= NR_BANKS) {
451 printk(KERN_CRIT "NR_BANKS too low, "
452 "ignoring memory at %#lx\n", start);
453 return -EINVAL;
454 }
05f96ef1 455
3a669411
RK
456 /*
457 * Ensure that start/size are aligned to a page boundary.
458 * Size is appropriately rounded down, start is rounded up.
459 */
460 size -= start & ~PAGE_MASK;
05f96ef1
RK
461 bank->start = PAGE_ALIGN(start);
462 bank->size = size & PAGE_MASK;
4b5f32ce
NP
463
464 /*
465 * Check whether this memory region has non-zero size or
466 * invalid node number.
467 */
be370302 468 if (bank->size == 0)
4b5f32ce
NP
469 return -EINVAL;
470
471 meminfo.nr_banks++;
472 return 0;
3a669411
RK
473}
474
1da177e4
LT
475/*
476 * Pick out the memory size. We look for mem=size@start,
477 * where start and size are "size[KkMm]"
478 */
2b0d8c25 479static int __init early_mem(char *p)
1da177e4
LT
480{
481 static int usermem __initdata = 0;
482 unsigned long size, start;
2b0d8c25 483 char *endp;
1da177e4
LT
484
485 /*
486 * If the user specifies memory size, we
487 * blow away any automatically generated
488 * size.
489 */
490 if (usermem == 0) {
491 usermem = 1;
492 meminfo.nr_banks = 0;
493 }
494
495 start = PHYS_OFFSET;
2b0d8c25
JK
496 size = memparse(p, &endp);
497 if (*endp == '@')
498 start = memparse(endp + 1, NULL);
1da177e4 499
1c97b73e 500 arm_add_memory(start, size);
1da177e4 501
2b0d8c25 502 return 0;
1da177e4 503}
2b0d8c25 504early_param("mem", early_mem);
1da177e4
LT
505
506static void __init
507setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
508{
509#ifdef CONFIG_BLK_DEV_RAM
510 extern int rd_size, rd_image_start, rd_prompt, rd_doload;
511
512 rd_image_start = image_start;
513 rd_prompt = prompt;
514 rd_doload = doload;
515
516 if (rd_sz)
517 rd_size = rd_sz;
518#endif
519}
520
521static void __init
522request_standard_resources(struct meminfo *mi, struct machine_desc *mdesc)
523{
524 struct resource *res;
525 int i;
526
37efe642
RK
527 kernel_code.start = virt_to_phys(_text);
528 kernel_code.end = virt_to_phys(_etext - 1);
842eab40 529 kernel_data.start = virt_to_phys(_sdata);
37efe642 530 kernel_data.end = virt_to_phys(_end - 1);
1da177e4
LT
531
532 for (i = 0; i < mi->nr_banks; i++) {
1da177e4
LT
533 if (mi->bank[i].size == 0)
534 continue;
535
1da177e4
LT
536 res = alloc_bootmem_low(sizeof(*res));
537 res->name = "System RAM";
3319f5e5
NP
538 res->start = mi->bank[i].start;
539 res->end = mi->bank[i].start + mi->bank[i].size - 1;
1da177e4
LT
540 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
541
542 request_resource(&iomem_resource, res);
543
544 if (kernel_code.start >= res->start &&
545 kernel_code.end <= res->end)
546 request_resource(res, &kernel_code);
547 if (kernel_data.start >= res->start &&
548 kernel_data.end <= res->end)
549 request_resource(res, &kernel_data);
550 }
551
552 if (mdesc->video_start) {
553 video_ram.start = mdesc->video_start;
554 video_ram.end = mdesc->video_end;
555 request_resource(&iomem_resource, &video_ram);
556 }
557
558 /*
559 * Some machines don't have the possibility of ever
560 * possessing lp0, lp1 or lp2
561 */
562 if (mdesc->reserve_lp0)
563 request_resource(&ioport_resource, &lp0);
564 if (mdesc->reserve_lp1)
565 request_resource(&ioport_resource, &lp1);
566 if (mdesc->reserve_lp2)
567 request_resource(&ioport_resource, &lp2);
568}
569
570/*
571 * Tag parsing.
572 *
573 * This is the new way of passing data to the kernel at boot time. Rather
574 * than passing a fixed inflexible structure to the kernel, we pass a list
575 * of variable-sized tags to the kernel. The first tag must be a ATAG_CORE
576 * tag for the list to be recognised (to distinguish the tagged list from
577 * a param_struct). The list is terminated with a zero-length tag (this tag
578 * is not parsed in any way).
579 */
580static int __init parse_tag_core(const struct tag *tag)
581{
582 if (tag->hdr.size > 2) {
583 if ((tag->u.core.flags & 1) == 0)
584 root_mountflags &= ~MS_RDONLY;
585 ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
586 }
587 return 0;
588}
589
590__tagtable(ATAG_CORE, parse_tag_core);
591
592static int __init parse_tag_mem32(const struct tag *tag)
593{
4b5f32ce 594 return arm_add_memory(tag->u.mem.start, tag->u.mem.size);
1da177e4
LT
595}
596
597__tagtable(ATAG_MEM, parse_tag_mem32);
598
599#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
600struct screen_info screen_info = {
601 .orig_video_lines = 30,
602 .orig_video_cols = 80,
603 .orig_video_mode = 0,
604 .orig_video_ega_bx = 0,
605 .orig_video_isVGA = 1,
606 .orig_video_points = 8
607};
608
609static int __init parse_tag_videotext(const struct tag *tag)
610{
611 screen_info.orig_x = tag->u.videotext.x;
612 screen_info.orig_y = tag->u.videotext.y;
613 screen_info.orig_video_page = tag->u.videotext.video_page;
614 screen_info.orig_video_mode = tag->u.videotext.video_mode;
615 screen_info.orig_video_cols = tag->u.videotext.video_cols;
616 screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
617 screen_info.orig_video_lines = tag->u.videotext.video_lines;
618 screen_info.orig_video_isVGA = tag->u.videotext.video_isvga;
619 screen_info.orig_video_points = tag->u.videotext.video_points;
620 return 0;
621}
622
623__tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
624#endif
625
626static int __init parse_tag_ramdisk(const struct tag *tag)
627{
628 setup_ramdisk((tag->u.ramdisk.flags & 1) == 0,
629 (tag->u.ramdisk.flags & 2) == 0,
630 tag->u.ramdisk.start, tag->u.ramdisk.size);
631 return 0;
632}
633
634__tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
635
1da177e4
LT
636static int __init parse_tag_serialnr(const struct tag *tag)
637{
638 system_serial_low = tag->u.serialnr.low;
639 system_serial_high = tag->u.serialnr.high;
640 return 0;
641}
642
643__tagtable(ATAG_SERIAL, parse_tag_serialnr);
644
645static int __init parse_tag_revision(const struct tag *tag)
646{
647 system_rev = tag->u.revision.rev;
648 return 0;
649}
650
651__tagtable(ATAG_REVISION, parse_tag_revision);
652
92d2040d 653#ifndef CONFIG_CMDLINE_FORCE
1da177e4
LT
654static int __init parse_tag_cmdline(const struct tag *tag)
655{
656 strlcpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE);
657 return 0;
658}
659
660__tagtable(ATAG_CMDLINE, parse_tag_cmdline);
92d2040d 661#endif /* CONFIG_CMDLINE_FORCE */
1da177e4
LT
662
663/*
664 * Scan the tag table for this tag, and call its parse function.
665 * The tag table is built by the linker from all the __tagtable
666 * declarations.
667 */
668static int __init parse_tag(const struct tag *tag)
669{
670 extern struct tagtable __tagtable_begin, __tagtable_end;
671 struct tagtable *t;
672
673 for (t = &__tagtable_begin; t < &__tagtable_end; t++)
674 if (tag->hdr.tag == t->tag) {
675 t->parse(tag);
676 break;
677 }
678
679 return t < &__tagtable_end;
680}
681
682/*
683 * Parse all tags in the list, checking both the global and architecture
684 * specific tag tables.
685 */
686static void __init parse_tags(const struct tag *t)
687{
688 for (; t->hdr.size; t = tag_next(t))
689 if (!parse_tag(t))
690 printk(KERN_WARNING
691 "Ignoring unrecognised tag 0x%08x\n",
692 t->hdr.tag);
693}
694
695/*
696 * This holds our defaults.
697 */
698static struct init_tags {
699 struct tag_header hdr1;
700 struct tag_core core;
701 struct tag_header hdr2;
702 struct tag_mem32 mem;
703 struct tag_header hdr3;
704} init_tags __initdata = {
705 { tag_size(tag_core), ATAG_CORE },
706 { 1, PAGE_SIZE, 0xff },
707 { tag_size(tag_mem32), ATAG_MEM },
708 { MEM_SIZE, PHYS_OFFSET },
709 { 0, ATAG_NONE }
710};
711
1da177e4
LT
712static int __init customize_machine(void)
713{
714 /* customizes platform devices, or adds new ones */
8ff1443c
RK
715 if (machine_desc->init_machine)
716 machine_desc->init_machine();
1da177e4
LT
717 return 0;
718}
719arch_initcall(customize_machine);
720
3c57fb43
MW
721#ifdef CONFIG_KEXEC
722static inline unsigned long long get_total_mem(void)
723{
724 unsigned long total;
725
726 total = max_low_pfn - min_low_pfn;
727 return total << PAGE_SHIFT;
728}
729
730/**
731 * reserve_crashkernel() - reserves memory are for crash kernel
732 *
733 * This function reserves memory area given in "crashkernel=" kernel command
734 * line parameter. The memory reserved is used by a dump capture kernel when
735 * primary kernel is crashing.
736 */
737static void __init reserve_crashkernel(void)
738{
739 unsigned long long crash_size, crash_base;
740 unsigned long long total_mem;
741 int ret;
742
743 total_mem = get_total_mem();
744 ret = parse_crashkernel(boot_command_line, total_mem,
745 &crash_size, &crash_base);
746 if (ret)
747 return;
748
749 ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
750 if (ret < 0) {
751 printk(KERN_WARNING "crashkernel reservation failed - "
752 "memory is in use (0x%lx)\n", (unsigned long)crash_base);
753 return;
754 }
755
756 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
757 "for crashkernel (System RAM: %ldMB)\n",
758 (unsigned long)(crash_size >> 20),
759 (unsigned long)(crash_base >> 20),
760 (unsigned long)(total_mem >> 20));
761
762 crashk_res.start = crash_base;
763 crashk_res.end = crash_base + crash_size - 1;
764 insert_resource(&iomem_resource, &crashk_res);
765}
766#else
767static inline void reserve_crashkernel(void) {}
768#endif /* CONFIG_KEXEC */
769
cea0bb1b
MW
770/*
771 * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by
772 * is_kdump_kernel() to determine if we are booting after a panic. Hence
773 * ifdef it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE.
774 */
775
776#ifdef CONFIG_CRASH_DUMP
777/*
778 * elfcorehdr= specifies the location of elf core header stored by the crashed
779 * kernel. This option will be passed by kexec loader to the capture kernel.
780 */
781static int __init setup_elfcorehdr(char *arg)
782{
783 char *end;
784
785 if (!arg)
786 return -EINVAL;
787
788 elfcorehdr_addr = memparse(arg, &end);
789 return end > arg ? 0 : -EINVAL;
790}
791early_param("elfcorehdr", setup_elfcorehdr);
792#endif /* CONFIG_CRASH_DUMP */
793
73a65b3f
UKK
794static void __init squash_mem_tags(struct tag *tag)
795{
796 for (; tag->hdr.size; tag = tag_next(tag))
797 if (tag->hdr.tag == ATAG_MEM)
798 tag->hdr.tag = ATAG_NONE;
799}
800
1da177e4
LT
801void __init setup_arch(char **cmdline_p)
802{
803 struct tag *tags = (struct tag *)&init_tags;
804 struct machine_desc *mdesc;
805 char *from = default_command_line;
806
bff595c1
CM
807 unwind_init();
808
1da177e4
LT
809 setup_processor();
810 mdesc = setup_machine(machine_arch_type);
8ff1443c 811 machine_desc = mdesc;
1da177e4
LT
812 machine_name = mdesc->name;
813
814 if (mdesc->soft_reboot)
815 reboot_setup("s");
816
9d20fdd5
BG
817 if (__atags_pointer)
818 tags = phys_to_virt(__atags_pointer);
819 else if (mdesc->boot_params)
f9bd6ea4 820 tags = phys_to_virt(mdesc->boot_params);
1da177e4 821
73a65b3f 822#if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
1da177e4
LT
823 /*
824 * If we have the old style parameters, convert them to
825 * a tag list.
826 */
827 if (tags->hdr.tag != ATAG_CORE)
828 convert_to_tag_list(tags);
73a65b3f 829#endif
1da177e4
LT
830 if (tags->hdr.tag != ATAG_CORE)
831 tags = (struct tag *)&init_tags;
832
833 if (mdesc->fixup)
834 mdesc->fixup(mdesc, tags, &from, &meminfo);
835
836 if (tags->hdr.tag == ATAG_CORE) {
837 if (meminfo.nr_banks != 0)
838 squash_mem_tags(tags);
4cd9d6f7 839 save_atags(tags);
1da177e4
LT
840 parse_tags(tags);
841 }
842
37efe642
RK
843 init_mm.start_code = (unsigned long) _text;
844 init_mm.end_code = (unsigned long) _etext;
845 init_mm.end_data = (unsigned long) _edata;
846 init_mm.brk = (unsigned long) _end;
1da177e4 847
2b0d8c25
JK
848 /* parse_early_param needs a boot_command_line */
849 strlcpy(boot_command_line, from, COMMAND_LINE_SIZE);
850
48ab7e09
JK
851 /* populate cmd_line too for later use, preserving boot_command_line */
852 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
853 *cmdline_p = cmd_line;
2b0d8c25
JK
854
855 parse_early_param();
856
8d717a52 857 arm_memblock_init(&meminfo, mdesc);
2778f620 858
4b5f32ce 859 paging_init(mdesc);
1da177e4
LT
860 request_standard_resources(&meminfo, mdesc);
861
7bbb7940 862#ifdef CONFIG_SMP
f00ec48f
RK
863 if (is_smp())
864 smp_init_cpus();
7bbb7940 865#endif
3c57fb43 866 reserve_crashkernel();
7bbb7940 867
ccea7a19 868 cpu_init();
bc581770 869 tcm_init();
ccea7a19 870
52108641 871#ifdef CONFIG_MULTI_IRQ_HANDLER
872 handle_arch_irq = mdesc->handle_irq;
873#endif
1da177e4
LT
874
875#ifdef CONFIG_VT
876#if defined(CONFIG_VGA_CONSOLE)
877 conswitchp = &vga_con;
878#elif defined(CONFIG_DUMMY_CONSOLE)
879 conswitchp = &dummy_con;
880#endif
881#endif
5cbad0eb 882 early_trap_init();
dec12e62
RK
883
884 if (mdesc->init_early)
885 mdesc->init_early();
1da177e4
LT
886}
887
888
889static int __init topology_init(void)
890{
891 int cpu;
892
66fb8bd2
RK
893 for_each_possible_cpu(cpu) {
894 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
895 cpuinfo->cpu.hotpluggable = 1;
896 register_cpu(&cpuinfo->cpu, cpu);
897 }
1da177e4
LT
898
899 return 0;
900}
1da177e4
LT
901subsys_initcall(topology_init);
902
e119bfff
RK
903#ifdef CONFIG_HAVE_PROC_CPU
904static int __init proc_cpu_init(void)
905{
906 struct proc_dir_entry *res;
907
908 res = proc_mkdir("cpu", NULL);
909 if (!res)
910 return -ENOMEM;
911 return 0;
912}
913fs_initcall(proc_cpu_init);
914#endif
915
1da177e4
LT
916static const char *hwcap_str[] = {
917 "swp",
918 "half",
919 "thumb",
920 "26bit",
921 "fastmult",
922 "fpa",
923 "vfp",
924 "edsp",
925 "java",
8f7f9435 926 "iwmmxt",
99e4a6dd 927 "crunch",
4369ae16 928 "thumbee",
2bedbdf4 929 "neon",
7279dc3e
CM
930 "vfpv3",
931 "vfpv3d16",
1da177e4
LT
932 NULL
933};
934
1da177e4
LT
935static int c_show(struct seq_file *m, void *v)
936{
937 int i;
938
939 seq_printf(m, "Processor\t: %s rev %d (%s)\n",
0ba8b9b2 940 cpu_name, read_cpuid_id() & 15, elf_platform);
1da177e4
LT
941
942#if defined(CONFIG_SMP)
943 for_each_online_cpu(i) {
15559722
RK
944 /*
945 * glibc reads /proc/cpuinfo to determine the number of
946 * online processors, looking for lines beginning with
947 * "processor". Give glibc what it expects.
948 */
949 seq_printf(m, "processor\t: %d\n", i);
1da177e4
LT
950 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
951 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
952 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
953 }
954#else /* CONFIG_SMP */
955 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
956 loops_per_jiffy / (500000/HZ),
957 (loops_per_jiffy / (5000/HZ)) % 100);
958#endif
959
960 /* dump out the processor features */
961 seq_puts(m, "Features\t: ");
962
963 for (i = 0; hwcap_str[i]; i++)
964 if (elf_hwcap & (1 << i))
965 seq_printf(m, "%s ", hwcap_str[i]);
966
0ba8b9b2 967 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
1da177e4
LT
968 seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
969
0ba8b9b2 970 if ((read_cpuid_id() & 0x0008f000) == 0x00000000) {
1da177e4 971 /* pre-ARM7 */
0ba8b9b2 972 seq_printf(m, "CPU part\t: %07x\n", read_cpuid_id() >> 4);
1da177e4 973 } else {
0ba8b9b2 974 if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
1da177e4
LT
975 /* ARM7 */
976 seq_printf(m, "CPU variant\t: 0x%02x\n",
0ba8b9b2 977 (read_cpuid_id() >> 16) & 127);
1da177e4
LT
978 } else {
979 /* post-ARM7 */
980 seq_printf(m, "CPU variant\t: 0x%x\n",
0ba8b9b2 981 (read_cpuid_id() >> 20) & 15);
1da177e4
LT
982 }
983 seq_printf(m, "CPU part\t: 0x%03x\n",
0ba8b9b2 984 (read_cpuid_id() >> 4) & 0xfff);
1da177e4 985 }
0ba8b9b2 986 seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
1da177e4 987
1da177e4
LT
988 seq_puts(m, "\n");
989
990 seq_printf(m, "Hardware\t: %s\n", machine_name);
991 seq_printf(m, "Revision\t: %04x\n", system_rev);
992 seq_printf(m, "Serial\t\t: %08x%08x\n",
993 system_serial_high, system_serial_low);
994
995 return 0;
996}
997
998static void *c_start(struct seq_file *m, loff_t *pos)
999{
1000 return *pos < 1 ? (void *)1 : NULL;
1001}
1002
1003static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1004{
1005 ++*pos;
1006 return NULL;
1007}
1008
1009static void c_stop(struct seq_file *m, void *v)
1010{
1011}
1012
2ffd6e18 1013const struct seq_operations cpuinfo_op = {
1da177e4
LT
1014 .start = c_start,
1015 .next = c_next,
1016 .stop = c_stop,
1017 .show = c_show
1018};
This page took 0.604158 seconds and 5 git commands to generate.