ARM: 5580/2: ARM TCM (Tightly-Coupled Memory) support v3
[deliverable/linux.git] / arch / arm / kernel / setup.c
1 /*
2 * linux/arch/arm/kernel/setup.c
3 *
4 * Copyright (C) 1995-2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/init.h>
22 #include <linux/root_dev.h>
23 #include <linux/cpu.h>
24 #include <linux/interrupt.h>
25 #include <linux/smp.h>
26 #include <linux/fs.h>
27
28 #include <asm/unified.h>
29 #include <asm/cpu.h>
30 #include <asm/cputype.h>
31 #include <asm/elf.h>
32 #include <asm/procinfo.h>
33 #include <asm/sections.h>
34 #include <asm/setup.h>
35 #include <asm/mach-types.h>
36 #include <asm/cacheflush.h>
37 #include <asm/cachetype.h>
38 #include <asm/tlbflush.h>
39
40 #include <asm/mach/arch.h>
41 #include <asm/mach/irq.h>
42 #include <asm/mach/time.h>
43 #include <asm/traps.h>
44 #include <asm/unwind.h>
45
46 #include "compat.h"
47 #include "atags.h"
48 #include "tcm.h"
49
50 #ifndef MEM_SIZE
51 #define MEM_SIZE (16*1024*1024)
52 #endif
53
54 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
55 char fpe_type[8];
56
57 static int __init fpe_setup(char *line)
58 {
59 memcpy(fpe_type, line, 8);
60 return 1;
61 }
62
63 __setup("fpe=", fpe_setup);
64 #endif
65
66 extern void paging_init(struct machine_desc *desc);
67 extern void reboot_setup(char *str);
68
69 unsigned int processor_id;
70 EXPORT_SYMBOL(processor_id);
71 unsigned int __machine_arch_type;
72 EXPORT_SYMBOL(__machine_arch_type);
73 unsigned int cacheid;
74 EXPORT_SYMBOL(cacheid);
75
76 unsigned int __atags_pointer __initdata;
77
78 unsigned int system_rev;
79 EXPORT_SYMBOL(system_rev);
80
81 unsigned int system_serial_low;
82 EXPORT_SYMBOL(system_serial_low);
83
84 unsigned int system_serial_high;
85 EXPORT_SYMBOL(system_serial_high);
86
87 unsigned int elf_hwcap;
88 EXPORT_SYMBOL(elf_hwcap);
89
90
91 #ifdef MULTI_CPU
92 struct processor processor;
93 #endif
94 #ifdef MULTI_TLB
95 struct cpu_tlb_fns cpu_tlb;
96 #endif
97 #ifdef MULTI_USER
98 struct cpu_user_fns cpu_user;
99 #endif
100 #ifdef MULTI_CACHE
101 struct cpu_cache_fns cpu_cache;
102 #endif
103 #ifdef CONFIG_OUTER_CACHE
104 struct outer_cache_fns outer_cache;
105 #endif
106
107 struct stack {
108 u32 irq[3];
109 u32 abt[3];
110 u32 und[3];
111 } ____cacheline_aligned;
112
113 static struct stack stacks[NR_CPUS];
114
115 char elf_platform[ELF_PLATFORM_SIZE];
116 EXPORT_SYMBOL(elf_platform);
117
118 static const char *cpu_name;
119 static const char *machine_name;
120 static char __initdata command_line[COMMAND_LINE_SIZE];
121
122 static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
123 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
124 #define ENDIANNESS ((char)endian_test.l)
125
126 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
127
128 /*
129 * Standard memory resources
130 */
131 static struct resource mem_res[] = {
132 {
133 .name = "Video RAM",
134 .start = 0,
135 .end = 0,
136 .flags = IORESOURCE_MEM
137 },
138 {
139 .name = "Kernel text",
140 .start = 0,
141 .end = 0,
142 .flags = IORESOURCE_MEM
143 },
144 {
145 .name = "Kernel data",
146 .start = 0,
147 .end = 0,
148 .flags = IORESOURCE_MEM
149 }
150 };
151
152 #define video_ram mem_res[0]
153 #define kernel_code mem_res[1]
154 #define kernel_data mem_res[2]
155
156 static struct resource io_res[] = {
157 {
158 .name = "reserved",
159 .start = 0x3bc,
160 .end = 0x3be,
161 .flags = IORESOURCE_IO | IORESOURCE_BUSY
162 },
163 {
164 .name = "reserved",
165 .start = 0x378,
166 .end = 0x37f,
167 .flags = IORESOURCE_IO | IORESOURCE_BUSY
168 },
169 {
170 .name = "reserved",
171 .start = 0x278,
172 .end = 0x27f,
173 .flags = IORESOURCE_IO | IORESOURCE_BUSY
174 }
175 };
176
177 #define lp0 io_res[0]
178 #define lp1 io_res[1]
179 #define lp2 io_res[2]
180
181 static const char *proc_arch[] = {
182 "undefined/unknown",
183 "3",
184 "4",
185 "4T",
186 "5",
187 "5T",
188 "5TE",
189 "5TEJ",
190 "6TEJ",
191 "7",
192 "?(11)",
193 "?(12)",
194 "?(13)",
195 "?(14)",
196 "?(15)",
197 "?(16)",
198 "?(17)",
199 };
200
201 int cpu_architecture(void)
202 {
203 int cpu_arch;
204
205 if ((read_cpuid_id() & 0x0008f000) == 0) {
206 cpu_arch = CPU_ARCH_UNKNOWN;
207 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
208 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
209 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
210 cpu_arch = (read_cpuid_id() >> 16) & 7;
211 if (cpu_arch)
212 cpu_arch += CPU_ARCH_ARMv3;
213 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
214 unsigned int mmfr0;
215
216 /* Revised CPUID format. Read the Memory Model Feature
217 * Register 0 and check for VMSAv7 or PMSAv7 */
218 asm("mrc p15, 0, %0, c0, c1, 4"
219 : "=r" (mmfr0));
220 if ((mmfr0 & 0x0000000f) == 0x00000003 ||
221 (mmfr0 & 0x000000f0) == 0x00000030)
222 cpu_arch = CPU_ARCH_ARMv7;
223 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
224 (mmfr0 & 0x000000f0) == 0x00000020)
225 cpu_arch = CPU_ARCH_ARMv6;
226 else
227 cpu_arch = CPU_ARCH_UNKNOWN;
228 } else
229 cpu_arch = CPU_ARCH_UNKNOWN;
230
231 return cpu_arch;
232 }
233
234 static void __init cacheid_init(void)
235 {
236 unsigned int cachetype = read_cpuid_cachetype();
237 unsigned int arch = cpu_architecture();
238
239 if (arch >= CPU_ARCH_ARMv6) {
240 if ((cachetype & (7 << 29)) == 4 << 29) {
241 /* ARMv7 register format */
242 cacheid = CACHEID_VIPT_NONALIASING;
243 if ((cachetype & (3 << 14)) == 1 << 14)
244 cacheid |= CACHEID_ASID_TAGGED;
245 } else if (cachetype & (1 << 23))
246 cacheid = CACHEID_VIPT_ALIASING;
247 else
248 cacheid = CACHEID_VIPT_NONALIASING;
249 } else {
250 cacheid = CACHEID_VIVT;
251 }
252
253 printk("CPU: %s data cache, %s instruction cache\n",
254 cache_is_vivt() ? "VIVT" :
255 cache_is_vipt_aliasing() ? "VIPT aliasing" :
256 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown",
257 cache_is_vivt() ? "VIVT" :
258 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
259 cache_is_vipt_aliasing() ? "VIPT aliasing" :
260 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
261 }
262
263 /*
264 * These functions re-use the assembly code in head.S, which
265 * already provide the required functionality.
266 */
267 extern struct proc_info_list *lookup_processor_type(unsigned int);
268 extern struct machine_desc *lookup_machine_type(unsigned int);
269
270 static void __init setup_processor(void)
271 {
272 struct proc_info_list *list;
273
274 /*
275 * locate processor in the list of supported processor
276 * types. The linker builds this table for us from the
277 * entries in arch/arm/mm/proc-*.S
278 */
279 list = lookup_processor_type(read_cpuid_id());
280 if (!list) {
281 printk("CPU configuration botched (ID %08x), unable "
282 "to continue.\n", read_cpuid_id());
283 while (1);
284 }
285
286 cpu_name = list->cpu_name;
287
288 #ifdef MULTI_CPU
289 processor = *list->proc;
290 #endif
291 #ifdef MULTI_TLB
292 cpu_tlb = *list->tlb;
293 #endif
294 #ifdef MULTI_USER
295 cpu_user = *list->user;
296 #endif
297 #ifdef MULTI_CACHE
298 cpu_cache = *list->cache;
299 #endif
300
301 printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
302 cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
303 proc_arch[cpu_architecture()], cr_alignment);
304
305 sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS);
306 sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
307 elf_hwcap = list->elf_hwcap;
308 #ifndef CONFIG_ARM_THUMB
309 elf_hwcap &= ~HWCAP_THUMB;
310 #endif
311
312 cacheid_init();
313 cpu_proc_init();
314 }
315
316 /*
317 * cpu_init - initialise one CPU.
318 *
319 * cpu_init sets up the per-CPU stacks.
320 */
321 void cpu_init(void)
322 {
323 unsigned int cpu = smp_processor_id();
324 struct stack *stk = &stacks[cpu];
325
326 if (cpu >= NR_CPUS) {
327 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
328 BUG();
329 }
330
331 /*
332 * Define the placement constraint for the inline asm directive below.
333 * In Thumb-2, msr with an immediate value is not allowed.
334 */
335 #ifdef CONFIG_THUMB2_KERNEL
336 #define PLC "r"
337 #else
338 #define PLC "I"
339 #endif
340
341 /*
342 * setup stacks for re-entrant exception handlers
343 */
344 __asm__ (
345 "msr cpsr_c, %1\n\t"
346 "add r14, %0, %2\n\t"
347 "mov sp, r14\n\t"
348 "msr cpsr_c, %3\n\t"
349 "add r14, %0, %4\n\t"
350 "mov sp, r14\n\t"
351 "msr cpsr_c, %5\n\t"
352 "add r14, %0, %6\n\t"
353 "mov sp, r14\n\t"
354 "msr cpsr_c, %7"
355 :
356 : "r" (stk),
357 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
358 "I" (offsetof(struct stack, irq[0])),
359 PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
360 "I" (offsetof(struct stack, abt[0])),
361 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
362 "I" (offsetof(struct stack, und[0])),
363 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
364 : "r14");
365 }
366
367 static struct machine_desc * __init setup_machine(unsigned int nr)
368 {
369 struct machine_desc *list;
370
371 /*
372 * locate machine in the list of supported machines.
373 */
374 list = lookup_machine_type(nr);
375 if (!list) {
376 printk("Machine configuration botched (nr %d), unable "
377 "to continue.\n", nr);
378 while (1);
379 }
380
381 printk("Machine: %s\n", list->name);
382
383 return list;
384 }
385
386 static int __init arm_add_memory(unsigned long start, unsigned long size)
387 {
388 struct membank *bank = &meminfo.bank[meminfo.nr_banks];
389
390 if (meminfo.nr_banks >= NR_BANKS) {
391 printk(KERN_CRIT "NR_BANKS too low, "
392 "ignoring memory at %#lx\n", start);
393 return -EINVAL;
394 }
395
396 /*
397 * Ensure that start/size are aligned to a page boundary.
398 * Size is appropriately rounded down, start is rounded up.
399 */
400 size -= start & ~PAGE_MASK;
401 bank->start = PAGE_ALIGN(start);
402 bank->size = size & PAGE_MASK;
403 bank->node = PHYS_TO_NID(start);
404
405 /*
406 * Check whether this memory region has non-zero size or
407 * invalid node number.
408 */
409 if (bank->size == 0 || bank->node >= MAX_NUMNODES)
410 return -EINVAL;
411
412 meminfo.nr_banks++;
413 return 0;
414 }
415
416 /*
417 * Pick out the memory size. We look for mem=size@start,
418 * where start and size are "size[KkMm]"
419 */
420 static void __init early_mem(char **p)
421 {
422 static int usermem __initdata = 0;
423 unsigned long size, start;
424
425 /*
426 * If the user specifies memory size, we
427 * blow away any automatically generated
428 * size.
429 */
430 if (usermem == 0) {
431 usermem = 1;
432 meminfo.nr_banks = 0;
433 }
434
435 start = PHYS_OFFSET;
436 size = memparse(*p, p);
437 if (**p == '@')
438 start = memparse(*p + 1, p);
439
440 arm_add_memory(start, size);
441 }
442 __early_param("mem=", early_mem);
443
444 /*
445 * Initial parsing of the command line.
446 */
447 static void __init parse_cmdline(char **cmdline_p, char *from)
448 {
449 char c = ' ', *to = command_line;
450 int len = 0;
451
452 for (;;) {
453 if (c == ' ') {
454 extern struct early_params __early_begin, __early_end;
455 struct early_params *p;
456
457 for (p = &__early_begin; p < &__early_end; p++) {
458 int arglen = strlen(p->arg);
459
460 if (memcmp(from, p->arg, arglen) == 0) {
461 if (to != command_line)
462 to -= 1;
463 from += arglen;
464 p->fn(&from);
465
466 while (*from != ' ' && *from != '\0')
467 from++;
468 break;
469 }
470 }
471 }
472 c = *from++;
473 if (!c)
474 break;
475 if (COMMAND_LINE_SIZE <= ++len)
476 break;
477 *to++ = c;
478 }
479 *to = '\0';
480 *cmdline_p = command_line;
481 }
482
483 static void __init
484 setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
485 {
486 #ifdef CONFIG_BLK_DEV_RAM
487 extern int rd_size, rd_image_start, rd_prompt, rd_doload;
488
489 rd_image_start = image_start;
490 rd_prompt = prompt;
491 rd_doload = doload;
492
493 if (rd_sz)
494 rd_size = rd_sz;
495 #endif
496 }
497
498 static void __init
499 request_standard_resources(struct meminfo *mi, struct machine_desc *mdesc)
500 {
501 struct resource *res;
502 int i;
503
504 kernel_code.start = virt_to_phys(_text);
505 kernel_code.end = virt_to_phys(_etext - 1);
506 kernel_data.start = virt_to_phys(_data);
507 kernel_data.end = virt_to_phys(_end - 1);
508
509 for (i = 0; i < mi->nr_banks; i++) {
510 if (mi->bank[i].size == 0)
511 continue;
512
513 res = alloc_bootmem_low(sizeof(*res));
514 res->name = "System RAM";
515 res->start = mi->bank[i].start;
516 res->end = mi->bank[i].start + mi->bank[i].size - 1;
517 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
518
519 request_resource(&iomem_resource, res);
520
521 if (kernel_code.start >= res->start &&
522 kernel_code.end <= res->end)
523 request_resource(res, &kernel_code);
524 if (kernel_data.start >= res->start &&
525 kernel_data.end <= res->end)
526 request_resource(res, &kernel_data);
527 }
528
529 if (mdesc->video_start) {
530 video_ram.start = mdesc->video_start;
531 video_ram.end = mdesc->video_end;
532 request_resource(&iomem_resource, &video_ram);
533 }
534
535 /*
536 * Some machines don't have the possibility of ever
537 * possessing lp0, lp1 or lp2
538 */
539 if (mdesc->reserve_lp0)
540 request_resource(&ioport_resource, &lp0);
541 if (mdesc->reserve_lp1)
542 request_resource(&ioport_resource, &lp1);
543 if (mdesc->reserve_lp2)
544 request_resource(&ioport_resource, &lp2);
545 }
546
547 /*
548 * Tag parsing.
549 *
550 * This is the new way of passing data to the kernel at boot time. Rather
551 * than passing a fixed inflexible structure to the kernel, we pass a list
552 * of variable-sized tags to the kernel. The first tag must be a ATAG_CORE
553 * tag for the list to be recognised (to distinguish the tagged list from
554 * a param_struct). The list is terminated with a zero-length tag (this tag
555 * is not parsed in any way).
556 */
557 static int __init parse_tag_core(const struct tag *tag)
558 {
559 if (tag->hdr.size > 2) {
560 if ((tag->u.core.flags & 1) == 0)
561 root_mountflags &= ~MS_RDONLY;
562 ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
563 }
564 return 0;
565 }
566
567 __tagtable(ATAG_CORE, parse_tag_core);
568
569 static int __init parse_tag_mem32(const struct tag *tag)
570 {
571 return arm_add_memory(tag->u.mem.start, tag->u.mem.size);
572 }
573
574 __tagtable(ATAG_MEM, parse_tag_mem32);
575
576 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
577 struct screen_info screen_info = {
578 .orig_video_lines = 30,
579 .orig_video_cols = 80,
580 .orig_video_mode = 0,
581 .orig_video_ega_bx = 0,
582 .orig_video_isVGA = 1,
583 .orig_video_points = 8
584 };
585
586 static int __init parse_tag_videotext(const struct tag *tag)
587 {
588 screen_info.orig_x = tag->u.videotext.x;
589 screen_info.orig_y = tag->u.videotext.y;
590 screen_info.orig_video_page = tag->u.videotext.video_page;
591 screen_info.orig_video_mode = tag->u.videotext.video_mode;
592 screen_info.orig_video_cols = tag->u.videotext.video_cols;
593 screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
594 screen_info.orig_video_lines = tag->u.videotext.video_lines;
595 screen_info.orig_video_isVGA = tag->u.videotext.video_isvga;
596 screen_info.orig_video_points = tag->u.videotext.video_points;
597 return 0;
598 }
599
600 __tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
601 #endif
602
603 static int __init parse_tag_ramdisk(const struct tag *tag)
604 {
605 setup_ramdisk((tag->u.ramdisk.flags & 1) == 0,
606 (tag->u.ramdisk.flags & 2) == 0,
607 tag->u.ramdisk.start, tag->u.ramdisk.size);
608 return 0;
609 }
610
611 __tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
612
613 static int __init parse_tag_serialnr(const struct tag *tag)
614 {
615 system_serial_low = tag->u.serialnr.low;
616 system_serial_high = tag->u.serialnr.high;
617 return 0;
618 }
619
620 __tagtable(ATAG_SERIAL, parse_tag_serialnr);
621
622 static int __init parse_tag_revision(const struct tag *tag)
623 {
624 system_rev = tag->u.revision.rev;
625 return 0;
626 }
627
628 __tagtable(ATAG_REVISION, parse_tag_revision);
629
630 static int __init parse_tag_cmdline(const struct tag *tag)
631 {
632 strlcpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE);
633 return 0;
634 }
635
636 __tagtable(ATAG_CMDLINE, parse_tag_cmdline);
637
638 /*
639 * Scan the tag table for this tag, and call its parse function.
640 * The tag table is built by the linker from all the __tagtable
641 * declarations.
642 */
643 static int __init parse_tag(const struct tag *tag)
644 {
645 extern struct tagtable __tagtable_begin, __tagtable_end;
646 struct tagtable *t;
647
648 for (t = &__tagtable_begin; t < &__tagtable_end; t++)
649 if (tag->hdr.tag == t->tag) {
650 t->parse(tag);
651 break;
652 }
653
654 return t < &__tagtable_end;
655 }
656
657 /*
658 * Parse all tags in the list, checking both the global and architecture
659 * specific tag tables.
660 */
661 static void __init parse_tags(const struct tag *t)
662 {
663 for (; t->hdr.size; t = tag_next(t))
664 if (!parse_tag(t))
665 printk(KERN_WARNING
666 "Ignoring unrecognised tag 0x%08x\n",
667 t->hdr.tag);
668 }
669
670 /*
671 * This holds our defaults.
672 */
673 static struct init_tags {
674 struct tag_header hdr1;
675 struct tag_core core;
676 struct tag_header hdr2;
677 struct tag_mem32 mem;
678 struct tag_header hdr3;
679 } init_tags __initdata = {
680 { tag_size(tag_core), ATAG_CORE },
681 { 1, PAGE_SIZE, 0xff },
682 { tag_size(tag_mem32), ATAG_MEM },
683 { MEM_SIZE, PHYS_OFFSET },
684 { 0, ATAG_NONE }
685 };
686
687 static void (*init_machine)(void) __initdata;
688
689 static int __init customize_machine(void)
690 {
691 /* customizes platform devices, or adds new ones */
692 if (init_machine)
693 init_machine();
694 return 0;
695 }
696 arch_initcall(customize_machine);
697
698 void __init setup_arch(char **cmdline_p)
699 {
700 struct tag *tags = (struct tag *)&init_tags;
701 struct machine_desc *mdesc;
702 char *from = default_command_line;
703
704 unwind_init();
705
706 setup_processor();
707 mdesc = setup_machine(machine_arch_type);
708 machine_name = mdesc->name;
709
710 if (mdesc->soft_reboot)
711 reboot_setup("s");
712
713 if (__atags_pointer)
714 tags = phys_to_virt(__atags_pointer);
715 else if (mdesc->boot_params)
716 tags = phys_to_virt(mdesc->boot_params);
717
718 /*
719 * If we have the old style parameters, convert them to
720 * a tag list.
721 */
722 if (tags->hdr.tag != ATAG_CORE)
723 convert_to_tag_list(tags);
724 if (tags->hdr.tag != ATAG_CORE)
725 tags = (struct tag *)&init_tags;
726
727 if (mdesc->fixup)
728 mdesc->fixup(mdesc, tags, &from, &meminfo);
729
730 if (tags->hdr.tag == ATAG_CORE) {
731 if (meminfo.nr_banks != 0)
732 squash_mem_tags(tags);
733 save_atags(tags);
734 parse_tags(tags);
735 }
736
737 init_mm.start_code = (unsigned long) _text;
738 init_mm.end_code = (unsigned long) _etext;
739 init_mm.end_data = (unsigned long) _edata;
740 init_mm.brk = (unsigned long) _end;
741
742 memcpy(boot_command_line, from, COMMAND_LINE_SIZE);
743 boot_command_line[COMMAND_LINE_SIZE-1] = '\0';
744 parse_cmdline(cmdline_p, from);
745 paging_init(mdesc);
746 request_standard_resources(&meminfo, mdesc);
747
748 #ifdef CONFIG_SMP
749 smp_init_cpus();
750 #endif
751
752 cpu_init();
753 tcm_init();
754
755 /*
756 * Set up various architecture-specific pointers
757 */
758 init_arch_irq = mdesc->init_irq;
759 system_timer = mdesc->timer;
760 init_machine = mdesc->init_machine;
761
762 #ifdef CONFIG_VT
763 #if defined(CONFIG_VGA_CONSOLE)
764 conswitchp = &vga_con;
765 #elif defined(CONFIG_DUMMY_CONSOLE)
766 conswitchp = &dummy_con;
767 #endif
768 #endif
769 early_trap_init();
770 }
771
772
773 static int __init topology_init(void)
774 {
775 int cpu;
776
777 for_each_possible_cpu(cpu) {
778 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
779 cpuinfo->cpu.hotpluggable = 1;
780 register_cpu(&cpuinfo->cpu, cpu);
781 }
782
783 return 0;
784 }
785
786 subsys_initcall(topology_init);
787
788 static const char *hwcap_str[] = {
789 "swp",
790 "half",
791 "thumb",
792 "26bit",
793 "fastmult",
794 "fpa",
795 "vfp",
796 "edsp",
797 "java",
798 "iwmmxt",
799 "crunch",
800 "thumbee",
801 "neon",
802 "vfpv3",
803 "vfpv3d16",
804 NULL
805 };
806
807 static int c_show(struct seq_file *m, void *v)
808 {
809 int i;
810
811 seq_printf(m, "Processor\t: %s rev %d (%s)\n",
812 cpu_name, read_cpuid_id() & 15, elf_platform);
813
814 #if defined(CONFIG_SMP)
815 for_each_online_cpu(i) {
816 /*
817 * glibc reads /proc/cpuinfo to determine the number of
818 * online processors, looking for lines beginning with
819 * "processor". Give glibc what it expects.
820 */
821 seq_printf(m, "processor\t: %d\n", i);
822 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
823 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
824 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
825 }
826 #else /* CONFIG_SMP */
827 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
828 loops_per_jiffy / (500000/HZ),
829 (loops_per_jiffy / (5000/HZ)) % 100);
830 #endif
831
832 /* dump out the processor features */
833 seq_puts(m, "Features\t: ");
834
835 for (i = 0; hwcap_str[i]; i++)
836 if (elf_hwcap & (1 << i))
837 seq_printf(m, "%s ", hwcap_str[i]);
838
839 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
840 seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
841
842 if ((read_cpuid_id() & 0x0008f000) == 0x00000000) {
843 /* pre-ARM7 */
844 seq_printf(m, "CPU part\t: %07x\n", read_cpuid_id() >> 4);
845 } else {
846 if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
847 /* ARM7 */
848 seq_printf(m, "CPU variant\t: 0x%02x\n",
849 (read_cpuid_id() >> 16) & 127);
850 } else {
851 /* post-ARM7 */
852 seq_printf(m, "CPU variant\t: 0x%x\n",
853 (read_cpuid_id() >> 20) & 15);
854 }
855 seq_printf(m, "CPU part\t: 0x%03x\n",
856 (read_cpuid_id() >> 4) & 0xfff);
857 }
858 seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
859
860 seq_puts(m, "\n");
861
862 seq_printf(m, "Hardware\t: %s\n", machine_name);
863 seq_printf(m, "Revision\t: %04x\n", system_rev);
864 seq_printf(m, "Serial\t\t: %08x%08x\n",
865 system_serial_high, system_serial_low);
866
867 return 0;
868 }
869
870 static void *c_start(struct seq_file *m, loff_t *pos)
871 {
872 return *pos < 1 ? (void *)1 : NULL;
873 }
874
875 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
876 {
877 ++*pos;
878 return NULL;
879 }
880
881 static void c_stop(struct seq_file *m, void *v)
882 {
883 }
884
885 const struct seq_operations cpuinfo_op = {
886 .start = c_start,
887 .next = c_next,
888 .stop = c_stop,
889 .show = c_show
890 };
This page took 0.051467 seconds and 5 git commands to generate.