[ARM] Allow gcc to optimise arm_add_memory a little more
[deliverable/linux.git] / arch / arm / kernel / setup.c
1 /*
2 * linux/arch/arm/kernel/setup.c
3 *
4 * Copyright (C) 1995-2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/init.h>
22 #include <linux/root_dev.h>
23 #include <linux/cpu.h>
24 #include <linux/interrupt.h>
25 #include <linux/smp.h>
26
27 #include <asm/cpu.h>
28 #include <asm/elf.h>
29 #include <asm/procinfo.h>
30 #include <asm/setup.h>
31 #include <asm/mach-types.h>
32 #include <asm/cacheflush.h>
33 #include <asm/tlbflush.h>
34
35 #include <asm/mach/arch.h>
36 #include <asm/mach/irq.h>
37 #include <asm/mach/time.h>
38
39 #include "compat.h"
40
41 #ifndef MEM_SIZE
42 #define MEM_SIZE (16*1024*1024)
43 #endif
44
45 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
46 char fpe_type[8];
47
48 static int __init fpe_setup(char *line)
49 {
50 memcpy(fpe_type, line, 8);
51 return 1;
52 }
53
54 __setup("fpe=", fpe_setup);
55 #endif
56
57 extern void paging_init(struct meminfo *, struct machine_desc *desc);
58 extern void reboot_setup(char *str);
59 extern int root_mountflags;
60 extern void _stext, _text, _etext, __data_start, _edata, _end;
61
62 unsigned int processor_id;
63 unsigned int __machine_arch_type;
64 EXPORT_SYMBOL(__machine_arch_type);
65
66 unsigned int system_rev;
67 EXPORT_SYMBOL(system_rev);
68
69 unsigned int system_serial_low;
70 EXPORT_SYMBOL(system_serial_low);
71
72 unsigned int system_serial_high;
73 EXPORT_SYMBOL(system_serial_high);
74
75 unsigned int elf_hwcap;
76 EXPORT_SYMBOL(elf_hwcap);
77
78
79 #ifdef MULTI_CPU
80 struct processor processor;
81 #endif
82 #ifdef MULTI_TLB
83 struct cpu_tlb_fns cpu_tlb;
84 #endif
85 #ifdef MULTI_USER
86 struct cpu_user_fns cpu_user;
87 #endif
88 #ifdef MULTI_CACHE
89 struct cpu_cache_fns cpu_cache;
90 #endif
91
92 struct stack {
93 u32 irq[3];
94 u32 abt[3];
95 u32 und[3];
96 } ____cacheline_aligned;
97
98 static struct stack stacks[NR_CPUS];
99
100 char elf_platform[ELF_PLATFORM_SIZE];
101 EXPORT_SYMBOL(elf_platform);
102
103 unsigned long phys_initrd_start __initdata = 0;
104 unsigned long phys_initrd_size __initdata = 0;
105
106 static struct meminfo meminfo __initdata = { 0, };
107 static const char *cpu_name;
108 static const char *machine_name;
109 static char command_line[COMMAND_LINE_SIZE];
110
111 static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
112 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
113 #define ENDIANNESS ((char)endian_test.l)
114
115 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
116
117 /*
118 * Standard memory resources
119 */
120 static struct resource mem_res[] = {
121 {
122 .name = "Video RAM",
123 .start = 0,
124 .end = 0,
125 .flags = IORESOURCE_MEM
126 },
127 {
128 .name = "Kernel text",
129 .start = 0,
130 .end = 0,
131 .flags = IORESOURCE_MEM
132 },
133 {
134 .name = "Kernel data",
135 .start = 0,
136 .end = 0,
137 .flags = IORESOURCE_MEM
138 }
139 };
140
141 #define video_ram mem_res[0]
142 #define kernel_code mem_res[1]
143 #define kernel_data mem_res[2]
144
145 static struct resource io_res[] = {
146 {
147 .name = "reserved",
148 .start = 0x3bc,
149 .end = 0x3be,
150 .flags = IORESOURCE_IO | IORESOURCE_BUSY
151 },
152 {
153 .name = "reserved",
154 .start = 0x378,
155 .end = 0x37f,
156 .flags = IORESOURCE_IO | IORESOURCE_BUSY
157 },
158 {
159 .name = "reserved",
160 .start = 0x278,
161 .end = 0x27f,
162 .flags = IORESOURCE_IO | IORESOURCE_BUSY
163 }
164 };
165
166 #define lp0 io_res[0]
167 #define lp1 io_res[1]
168 #define lp2 io_res[2]
169
170 static const char *cache_types[16] = {
171 "write-through",
172 "write-back",
173 "write-back",
174 "undefined 3",
175 "undefined 4",
176 "undefined 5",
177 "write-back",
178 "write-back",
179 "undefined 8",
180 "undefined 9",
181 "undefined 10",
182 "undefined 11",
183 "undefined 12",
184 "undefined 13",
185 "write-back",
186 "undefined 15",
187 };
188
189 static const char *cache_clean[16] = {
190 "not required",
191 "read-block",
192 "cp15 c7 ops",
193 "undefined 3",
194 "undefined 4",
195 "undefined 5",
196 "cp15 c7 ops",
197 "cp15 c7 ops",
198 "undefined 8",
199 "undefined 9",
200 "undefined 10",
201 "undefined 11",
202 "undefined 12",
203 "undefined 13",
204 "cp15 c7 ops",
205 "undefined 15",
206 };
207
208 static const char *cache_lockdown[16] = {
209 "not supported",
210 "not supported",
211 "not supported",
212 "undefined 3",
213 "undefined 4",
214 "undefined 5",
215 "format A",
216 "format B",
217 "undefined 8",
218 "undefined 9",
219 "undefined 10",
220 "undefined 11",
221 "undefined 12",
222 "undefined 13",
223 "format C",
224 "undefined 15",
225 };
226
227 static const char *proc_arch[] = {
228 "undefined/unknown",
229 "3",
230 "4",
231 "4T",
232 "5",
233 "5T",
234 "5TE",
235 "5TEJ",
236 "6TEJ",
237 "7",
238 "?(11)",
239 "?(12)",
240 "?(13)",
241 "?(14)",
242 "?(15)",
243 "?(16)",
244 "?(17)",
245 };
246
247 #define CACHE_TYPE(x) (((x) >> 25) & 15)
248 #define CACHE_S(x) ((x) & (1 << 24))
249 #define CACHE_DSIZE(x) (((x) >> 12) & 4095) /* only if S=1 */
250 #define CACHE_ISIZE(x) ((x) & 4095)
251
252 #define CACHE_SIZE(y) (((y) >> 6) & 7)
253 #define CACHE_ASSOC(y) (((y) >> 3) & 7)
254 #define CACHE_M(y) ((y) & (1 << 2))
255 #define CACHE_LINE(y) ((y) & 3)
256
257 static inline void dump_cache(const char *prefix, int cpu, unsigned int cache)
258 {
259 unsigned int mult = 2 + (CACHE_M(cache) ? 1 : 0);
260
261 printk("CPU%u: %s: %d bytes, associativity %d, %d byte lines, %d sets\n",
262 cpu, prefix,
263 mult << (8 + CACHE_SIZE(cache)),
264 (mult << CACHE_ASSOC(cache)) >> 1,
265 8 << CACHE_LINE(cache),
266 1 << (6 + CACHE_SIZE(cache) - CACHE_ASSOC(cache) -
267 CACHE_LINE(cache)));
268 }
269
270 static void __init dump_cpu_info(int cpu)
271 {
272 unsigned int info = read_cpuid(CPUID_CACHETYPE);
273
274 if (info != processor_id) {
275 printk("CPU%u: D %s %s cache\n", cpu, cache_is_vivt() ? "VIVT" : "VIPT",
276 cache_types[CACHE_TYPE(info)]);
277 if (CACHE_S(info)) {
278 dump_cache("I cache", cpu, CACHE_ISIZE(info));
279 dump_cache("D cache", cpu, CACHE_DSIZE(info));
280 } else {
281 dump_cache("cache", cpu, CACHE_ISIZE(info));
282 }
283 }
284
285 if (arch_is_coherent())
286 printk("Cache coherency enabled\n");
287 }
288
289 int cpu_architecture(void)
290 {
291 int cpu_arch;
292
293 if ((processor_id & 0x0008f000) == 0) {
294 cpu_arch = CPU_ARCH_UNKNOWN;
295 } else if ((processor_id & 0x0008f000) == 0x00007000) {
296 cpu_arch = (processor_id & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
297 } else if ((processor_id & 0x00080000) == 0x00000000) {
298 cpu_arch = (processor_id >> 16) & 7;
299 if (cpu_arch)
300 cpu_arch += CPU_ARCH_ARMv3;
301 } else {
302 /* the revised CPUID */
303 cpu_arch = ((processor_id >> 12) & 0xf) - 0xb + CPU_ARCH_ARMv6;
304 }
305
306 return cpu_arch;
307 }
308
309 /*
310 * These functions re-use the assembly code in head.S, which
311 * already provide the required functionality.
312 */
313 extern struct proc_info_list *lookup_processor_type(unsigned int);
314 extern struct machine_desc *lookup_machine_type(unsigned int);
315
316 static void __init setup_processor(void)
317 {
318 struct proc_info_list *list;
319
320 /*
321 * locate processor in the list of supported processor
322 * types. The linker builds this table for us from the
323 * entries in arch/arm/mm/proc-*.S
324 */
325 list = lookup_processor_type(processor_id);
326 if (!list) {
327 printk("CPU configuration botched (ID %08x), unable "
328 "to continue.\n", processor_id);
329 while (1);
330 }
331
332 cpu_name = list->cpu_name;
333
334 #ifdef MULTI_CPU
335 processor = *list->proc;
336 #endif
337 #ifdef MULTI_TLB
338 cpu_tlb = *list->tlb;
339 #endif
340 #ifdef MULTI_USER
341 cpu_user = *list->user;
342 #endif
343 #ifdef MULTI_CACHE
344 cpu_cache = *list->cache;
345 #endif
346
347 printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
348 cpu_name, processor_id, (int)processor_id & 15,
349 proc_arch[cpu_architecture()], cr_alignment);
350
351 sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS);
352 sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
353 elf_hwcap = list->elf_hwcap;
354 #ifndef CONFIG_ARM_THUMB
355 elf_hwcap &= ~HWCAP_THUMB;
356 #endif
357 #ifndef CONFIG_VFP
358 elf_hwcap &= ~HWCAP_VFP;
359 #endif
360 #ifndef CONFIG_IWMMXT
361 elf_hwcap &= ~HWCAP_IWMMXT;
362 #endif
363
364 cpu_proc_init();
365 }
366
367 /*
368 * cpu_init - initialise one CPU.
369 *
370 * cpu_init dumps the cache information, initialises SMP specific
371 * information, and sets up the per-CPU stacks.
372 */
373 void cpu_init(void)
374 {
375 unsigned int cpu = smp_processor_id();
376 struct stack *stk = &stacks[cpu];
377
378 if (cpu >= NR_CPUS) {
379 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
380 BUG();
381 }
382
383 if (system_state == SYSTEM_BOOTING)
384 dump_cpu_info(cpu);
385
386 /*
387 * setup stacks for re-entrant exception handlers
388 */
389 __asm__ (
390 "msr cpsr_c, %1\n\t"
391 "add sp, %0, %2\n\t"
392 "msr cpsr_c, %3\n\t"
393 "add sp, %0, %4\n\t"
394 "msr cpsr_c, %5\n\t"
395 "add sp, %0, %6\n\t"
396 "msr cpsr_c, %7"
397 :
398 : "r" (stk),
399 "I" (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
400 "I" (offsetof(struct stack, irq[0])),
401 "I" (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
402 "I" (offsetof(struct stack, abt[0])),
403 "I" (PSR_F_BIT | PSR_I_BIT | UND_MODE),
404 "I" (offsetof(struct stack, und[0])),
405 "I" (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
406 : "r14");
407 }
408
409 static struct machine_desc * __init setup_machine(unsigned int nr)
410 {
411 struct machine_desc *list;
412
413 /*
414 * locate machine in the list of supported machines.
415 */
416 list = lookup_machine_type(nr);
417 if (!list) {
418 printk("Machine configuration botched (nr %d), unable "
419 "to continue.\n", nr);
420 while (1);
421 }
422
423 printk("Machine: %s\n", list->name);
424
425 return list;
426 }
427
428 static void __init early_initrd(char **p)
429 {
430 unsigned long start, size;
431
432 start = memparse(*p, p);
433 if (**p == ',') {
434 size = memparse((*p) + 1, p);
435
436 phys_initrd_start = start;
437 phys_initrd_size = size;
438 }
439 }
440 __early_param("initrd=", early_initrd);
441
442 static void __init arm_add_memory(unsigned long start, unsigned long size)
443 {
444 struct membank *bank;
445
446 /*
447 * Ensure that start/size are aligned to a page boundary.
448 * Size is appropriately rounded down, start is rounded up.
449 */
450 size -= start & ~PAGE_MASK;
451
452 bank = &meminfo.bank[meminfo.nr_banks++];
453
454 bank->start = PAGE_ALIGN(start);
455 bank->size = size & PAGE_MASK;
456 bank->node = PHYS_TO_NID(start);
457 }
458
459 /*
460 * Pick out the memory size. We look for mem=size@start,
461 * where start and size are "size[KkMm]"
462 */
463 static void __init early_mem(char **p)
464 {
465 static int usermem __initdata = 0;
466 unsigned long size, start;
467
468 /*
469 * If the user specifies memory size, we
470 * blow away any automatically generated
471 * size.
472 */
473 if (usermem == 0) {
474 usermem = 1;
475 meminfo.nr_banks = 0;
476 }
477
478 start = PHYS_OFFSET;
479 size = memparse(*p, p);
480 if (**p == '@')
481 start = memparse(*p + 1, p);
482
483 arm_add_memory(start, size);
484 }
485 __early_param("mem=", early_mem);
486
487 /*
488 * Initial parsing of the command line.
489 */
490 static void __init parse_cmdline(char **cmdline_p, char *from)
491 {
492 char c = ' ', *to = command_line;
493 int len = 0;
494
495 for (;;) {
496 if (c == ' ') {
497 extern struct early_params __early_begin, __early_end;
498 struct early_params *p;
499
500 for (p = &__early_begin; p < &__early_end; p++) {
501 int len = strlen(p->arg);
502
503 if (memcmp(from, p->arg, len) == 0) {
504 if (to != command_line)
505 to -= 1;
506 from += len;
507 p->fn(&from);
508
509 while (*from != ' ' && *from != '\0')
510 from++;
511 break;
512 }
513 }
514 }
515 c = *from++;
516 if (!c)
517 break;
518 if (COMMAND_LINE_SIZE <= ++len)
519 break;
520 *to++ = c;
521 }
522 *to = '\0';
523 *cmdline_p = command_line;
524 }
525
526 static void __init
527 setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
528 {
529 #ifdef CONFIG_BLK_DEV_RAM
530 extern int rd_size, rd_image_start, rd_prompt, rd_doload;
531
532 rd_image_start = image_start;
533 rd_prompt = prompt;
534 rd_doload = doload;
535
536 if (rd_sz)
537 rd_size = rd_sz;
538 #endif
539 }
540
541 static void __init
542 request_standard_resources(struct meminfo *mi, struct machine_desc *mdesc)
543 {
544 struct resource *res;
545 int i;
546
547 kernel_code.start = virt_to_phys(&_text);
548 kernel_code.end = virt_to_phys(&_etext - 1);
549 kernel_data.start = virt_to_phys(&__data_start);
550 kernel_data.end = virt_to_phys(&_end - 1);
551
552 for (i = 0; i < mi->nr_banks; i++) {
553 unsigned long virt_start, virt_end;
554
555 if (mi->bank[i].size == 0)
556 continue;
557
558 virt_start = __phys_to_virt(mi->bank[i].start);
559 virt_end = virt_start + mi->bank[i].size - 1;
560
561 res = alloc_bootmem_low(sizeof(*res));
562 res->name = "System RAM";
563 res->start = __virt_to_phys(virt_start);
564 res->end = __virt_to_phys(virt_end);
565 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
566
567 request_resource(&iomem_resource, res);
568
569 if (kernel_code.start >= res->start &&
570 kernel_code.end <= res->end)
571 request_resource(res, &kernel_code);
572 if (kernel_data.start >= res->start &&
573 kernel_data.end <= res->end)
574 request_resource(res, &kernel_data);
575 }
576
577 if (mdesc->video_start) {
578 video_ram.start = mdesc->video_start;
579 video_ram.end = mdesc->video_end;
580 request_resource(&iomem_resource, &video_ram);
581 }
582
583 /*
584 * Some machines don't have the possibility of ever
585 * possessing lp0, lp1 or lp2
586 */
587 if (mdesc->reserve_lp0)
588 request_resource(&ioport_resource, &lp0);
589 if (mdesc->reserve_lp1)
590 request_resource(&ioport_resource, &lp1);
591 if (mdesc->reserve_lp2)
592 request_resource(&ioport_resource, &lp2);
593 }
594
595 /*
596 * Tag parsing.
597 *
598 * This is the new way of passing data to the kernel at boot time. Rather
599 * than passing a fixed inflexible structure to the kernel, we pass a list
600 * of variable-sized tags to the kernel. The first tag must be a ATAG_CORE
601 * tag for the list to be recognised (to distinguish the tagged list from
602 * a param_struct). The list is terminated with a zero-length tag (this tag
603 * is not parsed in any way).
604 */
605 static int __init parse_tag_core(const struct tag *tag)
606 {
607 if (tag->hdr.size > 2) {
608 if ((tag->u.core.flags & 1) == 0)
609 root_mountflags &= ~MS_RDONLY;
610 ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
611 }
612 return 0;
613 }
614
615 __tagtable(ATAG_CORE, parse_tag_core);
616
617 static int __init parse_tag_mem32(const struct tag *tag)
618 {
619 if (meminfo.nr_banks >= NR_BANKS) {
620 printk(KERN_WARNING
621 "Ignoring memory bank 0x%08x size %dKB\n",
622 tag->u.mem.start, tag->u.mem.size / 1024);
623 return -EINVAL;
624 }
625 arm_add_memory(tag->u.mem.start, tag->u.mem.size);
626 return 0;
627 }
628
629 __tagtable(ATAG_MEM, parse_tag_mem32);
630
631 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
632 struct screen_info screen_info = {
633 .orig_video_lines = 30,
634 .orig_video_cols = 80,
635 .orig_video_mode = 0,
636 .orig_video_ega_bx = 0,
637 .orig_video_isVGA = 1,
638 .orig_video_points = 8
639 };
640
641 static int __init parse_tag_videotext(const struct tag *tag)
642 {
643 screen_info.orig_x = tag->u.videotext.x;
644 screen_info.orig_y = tag->u.videotext.y;
645 screen_info.orig_video_page = tag->u.videotext.video_page;
646 screen_info.orig_video_mode = tag->u.videotext.video_mode;
647 screen_info.orig_video_cols = tag->u.videotext.video_cols;
648 screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
649 screen_info.orig_video_lines = tag->u.videotext.video_lines;
650 screen_info.orig_video_isVGA = tag->u.videotext.video_isvga;
651 screen_info.orig_video_points = tag->u.videotext.video_points;
652 return 0;
653 }
654
655 __tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
656 #endif
657
658 static int __init parse_tag_ramdisk(const struct tag *tag)
659 {
660 setup_ramdisk((tag->u.ramdisk.flags & 1) == 0,
661 (tag->u.ramdisk.flags & 2) == 0,
662 tag->u.ramdisk.start, tag->u.ramdisk.size);
663 return 0;
664 }
665
666 __tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
667
668 static int __init parse_tag_initrd(const struct tag *tag)
669 {
670 printk(KERN_WARNING "ATAG_INITRD is deprecated; "
671 "please update your bootloader.\n");
672 phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
673 phys_initrd_size = tag->u.initrd.size;
674 return 0;
675 }
676
677 __tagtable(ATAG_INITRD, parse_tag_initrd);
678
679 static int __init parse_tag_initrd2(const struct tag *tag)
680 {
681 phys_initrd_start = tag->u.initrd.start;
682 phys_initrd_size = tag->u.initrd.size;
683 return 0;
684 }
685
686 __tagtable(ATAG_INITRD2, parse_tag_initrd2);
687
688 static int __init parse_tag_serialnr(const struct tag *tag)
689 {
690 system_serial_low = tag->u.serialnr.low;
691 system_serial_high = tag->u.serialnr.high;
692 return 0;
693 }
694
695 __tagtable(ATAG_SERIAL, parse_tag_serialnr);
696
697 static int __init parse_tag_revision(const struct tag *tag)
698 {
699 system_rev = tag->u.revision.rev;
700 return 0;
701 }
702
703 __tagtable(ATAG_REVISION, parse_tag_revision);
704
705 static int __init parse_tag_cmdline(const struct tag *tag)
706 {
707 strlcpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE);
708 return 0;
709 }
710
711 __tagtable(ATAG_CMDLINE, parse_tag_cmdline);
712
713 /*
714 * Scan the tag table for this tag, and call its parse function.
715 * The tag table is built by the linker from all the __tagtable
716 * declarations.
717 */
718 static int __init parse_tag(const struct tag *tag)
719 {
720 extern struct tagtable __tagtable_begin, __tagtable_end;
721 struct tagtable *t;
722
723 for (t = &__tagtable_begin; t < &__tagtable_end; t++)
724 if (tag->hdr.tag == t->tag) {
725 t->parse(tag);
726 break;
727 }
728
729 return t < &__tagtable_end;
730 }
731
732 /*
733 * Parse all tags in the list, checking both the global and architecture
734 * specific tag tables.
735 */
736 static void __init parse_tags(const struct tag *t)
737 {
738 for (; t->hdr.size; t = tag_next(t))
739 if (!parse_tag(t))
740 printk(KERN_WARNING
741 "Ignoring unrecognised tag 0x%08x\n",
742 t->hdr.tag);
743 }
744
745 /*
746 * This holds our defaults.
747 */
748 static struct init_tags {
749 struct tag_header hdr1;
750 struct tag_core core;
751 struct tag_header hdr2;
752 struct tag_mem32 mem;
753 struct tag_header hdr3;
754 } init_tags __initdata = {
755 { tag_size(tag_core), ATAG_CORE },
756 { 1, PAGE_SIZE, 0xff },
757 { tag_size(tag_mem32), ATAG_MEM },
758 { MEM_SIZE, PHYS_OFFSET },
759 { 0, ATAG_NONE }
760 };
761
762 static void (*init_machine)(void) __initdata;
763
764 static int __init customize_machine(void)
765 {
766 /* customizes platform devices, or adds new ones */
767 if (init_machine)
768 init_machine();
769 return 0;
770 }
771 arch_initcall(customize_machine);
772
773 void __init setup_arch(char **cmdline_p)
774 {
775 struct tag *tags = (struct tag *)&init_tags;
776 struct machine_desc *mdesc;
777 char *from = default_command_line;
778
779 setup_processor();
780 mdesc = setup_machine(machine_arch_type);
781 machine_name = mdesc->name;
782
783 if (mdesc->soft_reboot)
784 reboot_setup("s");
785
786 if (mdesc->boot_params)
787 tags = phys_to_virt(mdesc->boot_params);
788
789 /*
790 * If we have the old style parameters, convert them to
791 * a tag list.
792 */
793 if (tags->hdr.tag != ATAG_CORE)
794 convert_to_tag_list(tags);
795 if (tags->hdr.tag != ATAG_CORE)
796 tags = (struct tag *)&init_tags;
797
798 if (mdesc->fixup)
799 mdesc->fixup(mdesc, tags, &from, &meminfo);
800
801 if (tags->hdr.tag == ATAG_CORE) {
802 if (meminfo.nr_banks != 0)
803 squash_mem_tags(tags);
804 parse_tags(tags);
805 }
806
807 init_mm.start_code = (unsigned long) &_text;
808 init_mm.end_code = (unsigned long) &_etext;
809 init_mm.end_data = (unsigned long) &_edata;
810 init_mm.brk = (unsigned long) &_end;
811
812 memcpy(saved_command_line, from, COMMAND_LINE_SIZE);
813 saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
814 parse_cmdline(cmdline_p, from);
815 paging_init(&meminfo, mdesc);
816 request_standard_resources(&meminfo, mdesc);
817
818 #ifdef CONFIG_SMP
819 smp_init_cpus();
820 #endif
821
822 cpu_init();
823
824 /*
825 * Set up various architecture-specific pointers
826 */
827 init_arch_irq = mdesc->init_irq;
828 system_timer = mdesc->timer;
829 init_machine = mdesc->init_machine;
830
831 #ifdef CONFIG_VT
832 #if defined(CONFIG_VGA_CONSOLE)
833 conswitchp = &vga_con;
834 #elif defined(CONFIG_DUMMY_CONSOLE)
835 conswitchp = &dummy_con;
836 #endif
837 #endif
838 }
839
840
841 static int __init topology_init(void)
842 {
843 int cpu;
844
845 for_each_possible_cpu(cpu)
846 register_cpu(&per_cpu(cpu_data, cpu).cpu, cpu);
847
848 return 0;
849 }
850
851 subsys_initcall(topology_init);
852
853 static const char *hwcap_str[] = {
854 "swp",
855 "half",
856 "thumb",
857 "26bit",
858 "fastmult",
859 "fpa",
860 "vfp",
861 "edsp",
862 "java",
863 "iwmmxt",
864 NULL
865 };
866
867 static void
868 c_show_cache(struct seq_file *m, const char *type, unsigned int cache)
869 {
870 unsigned int mult = 2 + (CACHE_M(cache) ? 1 : 0);
871
872 seq_printf(m, "%s size\t\t: %d\n"
873 "%s assoc\t\t: %d\n"
874 "%s line length\t: %d\n"
875 "%s sets\t\t: %d\n",
876 type, mult << (8 + CACHE_SIZE(cache)),
877 type, (mult << CACHE_ASSOC(cache)) >> 1,
878 type, 8 << CACHE_LINE(cache),
879 type, 1 << (6 + CACHE_SIZE(cache) - CACHE_ASSOC(cache) -
880 CACHE_LINE(cache)));
881 }
882
883 static int c_show(struct seq_file *m, void *v)
884 {
885 int i;
886
887 seq_printf(m, "Processor\t: %s rev %d (%s)\n",
888 cpu_name, (int)processor_id & 15, elf_platform);
889
890 #if defined(CONFIG_SMP)
891 for_each_online_cpu(i) {
892 /*
893 * glibc reads /proc/cpuinfo to determine the number of
894 * online processors, looking for lines beginning with
895 * "processor". Give glibc what it expects.
896 */
897 seq_printf(m, "processor\t: %d\n", i);
898 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
899 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
900 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
901 }
902 #else /* CONFIG_SMP */
903 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
904 loops_per_jiffy / (500000/HZ),
905 (loops_per_jiffy / (5000/HZ)) % 100);
906 #endif
907
908 /* dump out the processor features */
909 seq_puts(m, "Features\t: ");
910
911 for (i = 0; hwcap_str[i]; i++)
912 if (elf_hwcap & (1 << i))
913 seq_printf(m, "%s ", hwcap_str[i]);
914
915 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", processor_id >> 24);
916 seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
917
918 if ((processor_id & 0x0008f000) == 0x00000000) {
919 /* pre-ARM7 */
920 seq_printf(m, "CPU part\t\t: %07x\n", processor_id >> 4);
921 } else {
922 if ((processor_id & 0x0008f000) == 0x00007000) {
923 /* ARM7 */
924 seq_printf(m, "CPU variant\t: 0x%02x\n",
925 (processor_id >> 16) & 127);
926 } else {
927 /* post-ARM7 */
928 seq_printf(m, "CPU variant\t: 0x%x\n",
929 (processor_id >> 20) & 15);
930 }
931 seq_printf(m, "CPU part\t: 0x%03x\n",
932 (processor_id >> 4) & 0xfff);
933 }
934 seq_printf(m, "CPU revision\t: %d\n", processor_id & 15);
935
936 {
937 unsigned int cache_info = read_cpuid(CPUID_CACHETYPE);
938 if (cache_info != processor_id) {
939 seq_printf(m, "Cache type\t: %s\n"
940 "Cache clean\t: %s\n"
941 "Cache lockdown\t: %s\n"
942 "Cache format\t: %s\n",
943 cache_types[CACHE_TYPE(cache_info)],
944 cache_clean[CACHE_TYPE(cache_info)],
945 cache_lockdown[CACHE_TYPE(cache_info)],
946 CACHE_S(cache_info) ? "Harvard" : "Unified");
947
948 if (CACHE_S(cache_info)) {
949 c_show_cache(m, "I", CACHE_ISIZE(cache_info));
950 c_show_cache(m, "D", CACHE_DSIZE(cache_info));
951 } else {
952 c_show_cache(m, "Cache", CACHE_ISIZE(cache_info));
953 }
954 }
955 }
956
957 seq_puts(m, "\n");
958
959 seq_printf(m, "Hardware\t: %s\n", machine_name);
960 seq_printf(m, "Revision\t: %04x\n", system_rev);
961 seq_printf(m, "Serial\t\t: %08x%08x\n",
962 system_serial_high, system_serial_low);
963
964 return 0;
965 }
966
967 static void *c_start(struct seq_file *m, loff_t *pos)
968 {
969 return *pos < 1 ? (void *)1 : NULL;
970 }
971
972 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
973 {
974 ++*pos;
975 return NULL;
976 }
977
978 static void c_stop(struct seq_file *m, void *v)
979 {
980 }
981
982 struct seq_operations cpuinfo_op = {
983 .start = c_start,
984 .next = c_next,
985 .stop = c_stop,
986 .show = c_show
987 };
This page took 0.05309 seconds and 5 git commands to generate.