ARM: 8318/1: treat CPU feature register fields as signed quantities
[deliverable/linux.git] / arch / arm / kernel / setup.c
1 /*
2 * linux/arch/arm/kernel/setup.c
3 *
4 * Copyright (C) 1995-2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10 #include <linux/export.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/of_iommu.h>
22 #include <linux/of_platform.h>
23 #include <linux/init.h>
24 #include <linux/kexec.h>
25 #include <linux/of_fdt.h>
26 #include <linux/cpu.h>
27 #include <linux/interrupt.h>
28 #include <linux/smp.h>
29 #include <linux/proc_fs.h>
30 #include <linux/memblock.h>
31 #include <linux/bug.h>
32 #include <linux/compiler.h>
33 #include <linux/sort.h>
34
35 #include <asm/unified.h>
36 #include <asm/cp15.h>
37 #include <asm/cpu.h>
38 #include <asm/cputype.h>
39 #include <asm/elf.h>
40 #include <asm/procinfo.h>
41 #include <asm/psci.h>
42 #include <asm/sections.h>
43 #include <asm/setup.h>
44 #include <asm/smp_plat.h>
45 #include <asm/mach-types.h>
46 #include <asm/cacheflush.h>
47 #include <asm/cachetype.h>
48 #include <asm/tlbflush.h>
49
50 #include <asm/prom.h>
51 #include <asm/mach/arch.h>
52 #include <asm/mach/irq.h>
53 #include <asm/mach/time.h>
54 #include <asm/system_info.h>
55 #include <asm/system_misc.h>
56 #include <asm/traps.h>
57 #include <asm/unwind.h>
58 #include <asm/memblock.h>
59 #include <asm/virt.h>
60
61 #include "atags.h"
62
63
64 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
65 char fpe_type[8];
66
67 static int __init fpe_setup(char *line)
68 {
69 memcpy(fpe_type, line, 8);
70 return 1;
71 }
72
73 __setup("fpe=", fpe_setup);
74 #endif
75
76 extern void init_default_cache_policy(unsigned long);
77 extern void paging_init(const struct machine_desc *desc);
78 extern void early_paging_init(const struct machine_desc *,
79 struct proc_info_list *);
80 extern void sanity_check_meminfo(void);
81 extern enum reboot_mode reboot_mode;
82 extern void setup_dma_zone(const struct machine_desc *desc);
83
84 unsigned int processor_id;
85 EXPORT_SYMBOL(processor_id);
86 unsigned int __machine_arch_type __read_mostly;
87 EXPORT_SYMBOL(__machine_arch_type);
88 unsigned int cacheid __read_mostly;
89 EXPORT_SYMBOL(cacheid);
90
91 unsigned int __atags_pointer __initdata;
92
93 unsigned int system_rev;
94 EXPORT_SYMBOL(system_rev);
95
96 unsigned int system_serial_low;
97 EXPORT_SYMBOL(system_serial_low);
98
99 unsigned int system_serial_high;
100 EXPORT_SYMBOL(system_serial_high);
101
102 unsigned int elf_hwcap __read_mostly;
103 EXPORT_SYMBOL(elf_hwcap);
104
105 unsigned int elf_hwcap2 __read_mostly;
106 EXPORT_SYMBOL(elf_hwcap2);
107
108
109 #ifdef MULTI_CPU
110 struct processor processor __read_mostly;
111 #endif
112 #ifdef MULTI_TLB
113 struct cpu_tlb_fns cpu_tlb __read_mostly;
114 #endif
115 #ifdef MULTI_USER
116 struct cpu_user_fns cpu_user __read_mostly;
117 #endif
118 #ifdef MULTI_CACHE
119 struct cpu_cache_fns cpu_cache __read_mostly;
120 #endif
121 #ifdef CONFIG_OUTER_CACHE
122 struct outer_cache_fns outer_cache __read_mostly;
123 EXPORT_SYMBOL(outer_cache);
124 #endif
125
126 /*
127 * Cached cpu_architecture() result for use by assembler code.
128 * C code should use the cpu_architecture() function instead of accessing this
129 * variable directly.
130 */
131 int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
132
133 struct stack {
134 u32 irq[3];
135 u32 abt[3];
136 u32 und[3];
137 u32 fiq[3];
138 } ____cacheline_aligned;
139
140 #ifndef CONFIG_CPU_V7M
141 static struct stack stacks[NR_CPUS];
142 #endif
143
144 char elf_platform[ELF_PLATFORM_SIZE];
145 EXPORT_SYMBOL(elf_platform);
146
147 static const char *cpu_name;
148 static const char *machine_name;
149 static char __initdata cmd_line[COMMAND_LINE_SIZE];
150 const struct machine_desc *machine_desc __initdata;
151
152 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
153 #define ENDIANNESS ((char)endian_test.l)
154
155 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
156
157 /*
158 * Standard memory resources
159 */
160 static struct resource mem_res[] = {
161 {
162 .name = "Video RAM",
163 .start = 0,
164 .end = 0,
165 .flags = IORESOURCE_MEM
166 },
167 {
168 .name = "Kernel code",
169 .start = 0,
170 .end = 0,
171 .flags = IORESOURCE_MEM
172 },
173 {
174 .name = "Kernel data",
175 .start = 0,
176 .end = 0,
177 .flags = IORESOURCE_MEM
178 }
179 };
180
181 #define video_ram mem_res[0]
182 #define kernel_code mem_res[1]
183 #define kernel_data mem_res[2]
184
185 static struct resource io_res[] = {
186 {
187 .name = "reserved",
188 .start = 0x3bc,
189 .end = 0x3be,
190 .flags = IORESOURCE_IO | IORESOURCE_BUSY
191 },
192 {
193 .name = "reserved",
194 .start = 0x378,
195 .end = 0x37f,
196 .flags = IORESOURCE_IO | IORESOURCE_BUSY
197 },
198 {
199 .name = "reserved",
200 .start = 0x278,
201 .end = 0x27f,
202 .flags = IORESOURCE_IO | IORESOURCE_BUSY
203 }
204 };
205
206 #define lp0 io_res[0]
207 #define lp1 io_res[1]
208 #define lp2 io_res[2]
209
210 static const char *proc_arch[] = {
211 "undefined/unknown",
212 "3",
213 "4",
214 "4T",
215 "5",
216 "5T",
217 "5TE",
218 "5TEJ",
219 "6TEJ",
220 "7",
221 "7M",
222 "?(12)",
223 "?(13)",
224 "?(14)",
225 "?(15)",
226 "?(16)",
227 "?(17)",
228 };
229
230 #ifdef CONFIG_CPU_V7M
231 static int __get_cpu_architecture(void)
232 {
233 return CPU_ARCH_ARMv7M;
234 }
235 #else
236 static int __get_cpu_architecture(void)
237 {
238 int cpu_arch;
239
240 if ((read_cpuid_id() & 0x0008f000) == 0) {
241 cpu_arch = CPU_ARCH_UNKNOWN;
242 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
243 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
244 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
245 cpu_arch = (read_cpuid_id() >> 16) & 7;
246 if (cpu_arch)
247 cpu_arch += CPU_ARCH_ARMv3;
248 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
249 unsigned int mmfr0;
250
251 /* Revised CPUID format. Read the Memory Model Feature
252 * Register 0 and check for VMSAv7 or PMSAv7 */
253 asm("mrc p15, 0, %0, c0, c1, 4"
254 : "=r" (mmfr0));
255 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
256 (mmfr0 & 0x000000f0) >= 0x00000030)
257 cpu_arch = CPU_ARCH_ARMv7;
258 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
259 (mmfr0 & 0x000000f0) == 0x00000020)
260 cpu_arch = CPU_ARCH_ARMv6;
261 else
262 cpu_arch = CPU_ARCH_UNKNOWN;
263 } else
264 cpu_arch = CPU_ARCH_UNKNOWN;
265
266 return cpu_arch;
267 }
268 #endif
269
270 int __pure cpu_architecture(void)
271 {
272 BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
273
274 return __cpu_architecture;
275 }
276
277 static int cpu_has_aliasing_icache(unsigned int arch)
278 {
279 int aliasing_icache;
280 unsigned int id_reg, num_sets, line_size;
281
282 /* PIPT caches never alias. */
283 if (icache_is_pipt())
284 return 0;
285
286 /* arch specifies the register format */
287 switch (arch) {
288 case CPU_ARCH_ARMv7:
289 asm("mcr p15, 2, %0, c0, c0, 0 @ set CSSELR"
290 : /* No output operands */
291 : "r" (1));
292 isb();
293 asm("mrc p15, 1, %0, c0, c0, 0 @ read CCSIDR"
294 : "=r" (id_reg));
295 line_size = 4 << ((id_reg & 0x7) + 2);
296 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
297 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
298 break;
299 case CPU_ARCH_ARMv6:
300 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
301 break;
302 default:
303 /* I-cache aliases will be handled by D-cache aliasing code */
304 aliasing_icache = 0;
305 }
306
307 return aliasing_icache;
308 }
309
310 static void __init cacheid_init(void)
311 {
312 unsigned int arch = cpu_architecture();
313
314 if (arch == CPU_ARCH_ARMv7M) {
315 cacheid = 0;
316 } else if (arch >= CPU_ARCH_ARMv6) {
317 unsigned int cachetype = read_cpuid_cachetype();
318 if ((cachetype & (7 << 29)) == 4 << 29) {
319 /* ARMv7 register format */
320 arch = CPU_ARCH_ARMv7;
321 cacheid = CACHEID_VIPT_NONALIASING;
322 switch (cachetype & (3 << 14)) {
323 case (1 << 14):
324 cacheid |= CACHEID_ASID_TAGGED;
325 break;
326 case (3 << 14):
327 cacheid |= CACHEID_PIPT;
328 break;
329 }
330 } else {
331 arch = CPU_ARCH_ARMv6;
332 if (cachetype & (1 << 23))
333 cacheid = CACHEID_VIPT_ALIASING;
334 else
335 cacheid = CACHEID_VIPT_NONALIASING;
336 }
337 if (cpu_has_aliasing_icache(arch))
338 cacheid |= CACHEID_VIPT_I_ALIASING;
339 } else {
340 cacheid = CACHEID_VIVT;
341 }
342
343 pr_info("CPU: %s data cache, %s instruction cache\n",
344 cache_is_vivt() ? "VIVT" :
345 cache_is_vipt_aliasing() ? "VIPT aliasing" :
346 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
347 cache_is_vivt() ? "VIVT" :
348 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
349 icache_is_vipt_aliasing() ? "VIPT aliasing" :
350 icache_is_pipt() ? "PIPT" :
351 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
352 }
353
354 /*
355 * These functions re-use the assembly code in head.S, which
356 * already provide the required functionality.
357 */
358 extern struct proc_info_list *lookup_processor_type(unsigned int);
359
360 void __init early_print(const char *str, ...)
361 {
362 extern void printascii(const char *);
363 char buf[256];
364 va_list ap;
365
366 va_start(ap, str);
367 vsnprintf(buf, sizeof(buf), str, ap);
368 va_end(ap);
369
370 #ifdef CONFIG_DEBUG_LL
371 printascii(buf);
372 #endif
373 printk("%s", buf);
374 }
375
376 static void __init cpuid_init_hwcaps(void)
377 {
378 int block;
379
380 if (cpu_architecture() < CPU_ARCH_ARMv7)
381 return;
382
383 block = cpuid_feature_extract(CPUID_EXT_ISAR0, 24);
384 if (block >= 2)
385 elf_hwcap |= HWCAP_IDIVA;
386 if (block >= 1)
387 elf_hwcap |= HWCAP_IDIVT;
388
389 /* LPAE implies atomic ldrd/strd instructions */
390 block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
391 if (block >= 5)
392 elf_hwcap |= HWCAP_LPAE;
393 }
394
395 static void __init elf_hwcap_fixup(void)
396 {
397 unsigned id = read_cpuid_id();
398
399 /*
400 * HWCAP_TLS is available only on 1136 r1p0 and later,
401 * see also kuser_get_tls_init.
402 */
403 if (read_cpuid_part() == ARM_CPU_PART_ARM1136 &&
404 ((id >> 20) & 3) == 0) {
405 elf_hwcap &= ~HWCAP_TLS;
406 return;
407 }
408
409 /* Verify if CPUID scheme is implemented */
410 if ((id & 0x000f0000) != 0x000f0000)
411 return;
412
413 /*
414 * If the CPU supports LDREX/STREX and LDREXB/STREXB,
415 * avoid advertising SWP; it may not be atomic with
416 * multiprocessing cores.
417 */
418 if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
419 (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
420 cpuid_feature_extract(CPUID_EXT_ISAR3, 20) >= 3))
421 elf_hwcap &= ~HWCAP_SWP;
422 }
423
424 /*
425 * cpu_init - initialise one CPU.
426 *
427 * cpu_init sets up the per-CPU stacks.
428 */
429 void notrace cpu_init(void)
430 {
431 #ifndef CONFIG_CPU_V7M
432 unsigned int cpu = smp_processor_id();
433 struct stack *stk = &stacks[cpu];
434
435 if (cpu >= NR_CPUS) {
436 pr_crit("CPU%u: bad primary CPU number\n", cpu);
437 BUG();
438 }
439
440 /*
441 * This only works on resume and secondary cores. For booting on the
442 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
443 */
444 set_my_cpu_offset(per_cpu_offset(cpu));
445
446 cpu_proc_init();
447
448 /*
449 * Define the placement constraint for the inline asm directive below.
450 * In Thumb-2, msr with an immediate value is not allowed.
451 */
452 #ifdef CONFIG_THUMB2_KERNEL
453 #define PLC "r"
454 #else
455 #define PLC "I"
456 #endif
457
458 /*
459 * setup stacks for re-entrant exception handlers
460 */
461 __asm__ (
462 "msr cpsr_c, %1\n\t"
463 "add r14, %0, %2\n\t"
464 "mov sp, r14\n\t"
465 "msr cpsr_c, %3\n\t"
466 "add r14, %0, %4\n\t"
467 "mov sp, r14\n\t"
468 "msr cpsr_c, %5\n\t"
469 "add r14, %0, %6\n\t"
470 "mov sp, r14\n\t"
471 "msr cpsr_c, %7\n\t"
472 "add r14, %0, %8\n\t"
473 "mov sp, r14\n\t"
474 "msr cpsr_c, %9"
475 :
476 : "r" (stk),
477 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
478 "I" (offsetof(struct stack, irq[0])),
479 PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
480 "I" (offsetof(struct stack, abt[0])),
481 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
482 "I" (offsetof(struct stack, und[0])),
483 PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
484 "I" (offsetof(struct stack, fiq[0])),
485 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
486 : "r14");
487 #endif
488 }
489
490 u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
491
492 void __init smp_setup_processor_id(void)
493 {
494 int i;
495 u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
496 u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
497
498 cpu_logical_map(0) = cpu;
499 for (i = 1; i < nr_cpu_ids; ++i)
500 cpu_logical_map(i) = i == cpu ? 0 : i;
501
502 /*
503 * clear __my_cpu_offset on boot CPU to avoid hang caused by
504 * using percpu variable early, for example, lockdep will
505 * access percpu variable inside lock_release
506 */
507 set_my_cpu_offset(0);
508
509 pr_info("Booting Linux on physical CPU 0x%x\n", mpidr);
510 }
511
512 struct mpidr_hash mpidr_hash;
513 #ifdef CONFIG_SMP
514 /**
515 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
516 * level in order to build a linear index from an
517 * MPIDR value. Resulting algorithm is a collision
518 * free hash carried out through shifting and ORing
519 */
520 static void __init smp_build_mpidr_hash(void)
521 {
522 u32 i, affinity;
523 u32 fs[3], bits[3], ls, mask = 0;
524 /*
525 * Pre-scan the list of MPIDRS and filter out bits that do
526 * not contribute to affinity levels, ie they never toggle.
527 */
528 for_each_possible_cpu(i)
529 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
530 pr_debug("mask of set bits 0x%x\n", mask);
531 /*
532 * Find and stash the last and first bit set at all affinity levels to
533 * check how many bits are required to represent them.
534 */
535 for (i = 0; i < 3; i++) {
536 affinity = MPIDR_AFFINITY_LEVEL(mask, i);
537 /*
538 * Find the MSB bit and LSB bits position
539 * to determine how many bits are required
540 * to express the affinity level.
541 */
542 ls = fls(affinity);
543 fs[i] = affinity ? ffs(affinity) - 1 : 0;
544 bits[i] = ls - fs[i];
545 }
546 /*
547 * An index can be created from the MPIDR by isolating the
548 * significant bits at each affinity level and by shifting
549 * them in order to compress the 24 bits values space to a
550 * compressed set of values. This is equivalent to hashing
551 * the MPIDR through shifting and ORing. It is a collision free
552 * hash though not minimal since some levels might contain a number
553 * of CPUs that is not an exact power of 2 and their bit
554 * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
555 */
556 mpidr_hash.shift_aff[0] = fs[0];
557 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
558 mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
559 (bits[1] + bits[0]);
560 mpidr_hash.mask = mask;
561 mpidr_hash.bits = bits[2] + bits[1] + bits[0];
562 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
563 mpidr_hash.shift_aff[0],
564 mpidr_hash.shift_aff[1],
565 mpidr_hash.shift_aff[2],
566 mpidr_hash.mask,
567 mpidr_hash.bits);
568 /*
569 * 4x is an arbitrary value used to warn on a hash table much bigger
570 * than expected on most systems.
571 */
572 if (mpidr_hash_size() > 4 * num_possible_cpus())
573 pr_warn("Large number of MPIDR hash buckets detected\n");
574 sync_cache_w(&mpidr_hash);
575 }
576 #endif
577
578 static void __init setup_processor(void)
579 {
580 struct proc_info_list *list;
581
582 /*
583 * locate processor in the list of supported processor
584 * types. The linker builds this table for us from the
585 * entries in arch/arm/mm/proc-*.S
586 */
587 list = lookup_processor_type(read_cpuid_id());
588 if (!list) {
589 pr_err("CPU configuration botched (ID %08x), unable to continue.\n",
590 read_cpuid_id());
591 while (1);
592 }
593
594 cpu_name = list->cpu_name;
595 __cpu_architecture = __get_cpu_architecture();
596
597 #ifdef MULTI_CPU
598 processor = *list->proc;
599 #endif
600 #ifdef MULTI_TLB
601 cpu_tlb = *list->tlb;
602 #endif
603 #ifdef MULTI_USER
604 cpu_user = *list->user;
605 #endif
606 #ifdef MULTI_CACHE
607 cpu_cache = *list->cache;
608 #endif
609
610 pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
611 cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
612 proc_arch[cpu_architecture()], get_cr());
613
614 snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
615 list->arch_name, ENDIANNESS);
616 snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
617 list->elf_name, ENDIANNESS);
618 elf_hwcap = list->elf_hwcap;
619
620 cpuid_init_hwcaps();
621
622 #ifndef CONFIG_ARM_THUMB
623 elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
624 #endif
625 #ifdef CONFIG_MMU
626 init_default_cache_policy(list->__cpu_mm_mmu_flags);
627 #endif
628 erratum_a15_798181_init();
629
630 elf_hwcap_fixup();
631
632 cacheid_init();
633 cpu_init();
634 }
635
636 void __init dump_machine_table(void)
637 {
638 const struct machine_desc *p;
639
640 early_print("Available machine support:\n\nID (hex)\tNAME\n");
641 for_each_machine_desc(p)
642 early_print("%08x\t%s\n", p->nr, p->name);
643
644 early_print("\nPlease check your kernel config and/or bootloader.\n");
645
646 while (true)
647 /* can't use cpu_relax() here as it may require MMU setup */;
648 }
649
650 int __init arm_add_memory(u64 start, u64 size)
651 {
652 u64 aligned_start;
653
654 /*
655 * Ensure that start/size are aligned to a page boundary.
656 * Size is rounded down, start is rounded up.
657 */
658 aligned_start = PAGE_ALIGN(start);
659 if (aligned_start > start + size)
660 size = 0;
661 else
662 size -= aligned_start - start;
663
664 #ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
665 if (aligned_start > ULONG_MAX) {
666 pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
667 (long long)start);
668 return -EINVAL;
669 }
670
671 if (aligned_start + size > ULONG_MAX) {
672 pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n",
673 (long long)start);
674 /*
675 * To ensure bank->start + bank->size is representable in
676 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
677 * This means we lose a page after masking.
678 */
679 size = ULONG_MAX - aligned_start;
680 }
681 #endif
682
683 if (aligned_start < PHYS_OFFSET) {
684 if (aligned_start + size <= PHYS_OFFSET) {
685 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
686 aligned_start, aligned_start + size);
687 return -EINVAL;
688 }
689
690 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
691 aligned_start, (u64)PHYS_OFFSET);
692
693 size -= PHYS_OFFSET - aligned_start;
694 aligned_start = PHYS_OFFSET;
695 }
696
697 start = aligned_start;
698 size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
699
700 /*
701 * Check whether this memory region has non-zero size or
702 * invalid node number.
703 */
704 if (size == 0)
705 return -EINVAL;
706
707 memblock_add(start, size);
708 return 0;
709 }
710
711 /*
712 * Pick out the memory size. We look for mem=size@start,
713 * where start and size are "size[KkMm]"
714 */
715
716 static int __init early_mem(char *p)
717 {
718 static int usermem __initdata = 0;
719 u64 size;
720 u64 start;
721 char *endp;
722
723 /*
724 * If the user specifies memory size, we
725 * blow away any automatically generated
726 * size.
727 */
728 if (usermem == 0) {
729 usermem = 1;
730 memblock_remove(memblock_start_of_DRAM(),
731 memblock_end_of_DRAM() - memblock_start_of_DRAM());
732 }
733
734 start = PHYS_OFFSET;
735 size = memparse(p, &endp);
736 if (*endp == '@')
737 start = memparse(endp + 1, NULL);
738
739 arm_add_memory(start, size);
740
741 return 0;
742 }
743 early_param("mem", early_mem);
744
745 static void __init request_standard_resources(const struct machine_desc *mdesc)
746 {
747 struct memblock_region *region;
748 struct resource *res;
749
750 kernel_code.start = virt_to_phys(_text);
751 kernel_code.end = virt_to_phys(_etext - 1);
752 kernel_data.start = virt_to_phys(_sdata);
753 kernel_data.end = virt_to_phys(_end - 1);
754
755 for_each_memblock(memory, region) {
756 res = memblock_virt_alloc(sizeof(*res), 0);
757 res->name = "System RAM";
758 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
759 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
760 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
761
762 request_resource(&iomem_resource, res);
763
764 if (kernel_code.start >= res->start &&
765 kernel_code.end <= res->end)
766 request_resource(res, &kernel_code);
767 if (kernel_data.start >= res->start &&
768 kernel_data.end <= res->end)
769 request_resource(res, &kernel_data);
770 }
771
772 if (mdesc->video_start) {
773 video_ram.start = mdesc->video_start;
774 video_ram.end = mdesc->video_end;
775 request_resource(&iomem_resource, &video_ram);
776 }
777
778 /*
779 * Some machines don't have the possibility of ever
780 * possessing lp0, lp1 or lp2
781 */
782 if (mdesc->reserve_lp0)
783 request_resource(&ioport_resource, &lp0);
784 if (mdesc->reserve_lp1)
785 request_resource(&ioport_resource, &lp1);
786 if (mdesc->reserve_lp2)
787 request_resource(&ioport_resource, &lp2);
788 }
789
790 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
791 struct screen_info screen_info = {
792 .orig_video_lines = 30,
793 .orig_video_cols = 80,
794 .orig_video_mode = 0,
795 .orig_video_ega_bx = 0,
796 .orig_video_isVGA = 1,
797 .orig_video_points = 8
798 };
799 #endif
800
801 static int __init customize_machine(void)
802 {
803 /*
804 * customizes platform devices, or adds new ones
805 * On DT based machines, we fall back to populating the
806 * machine from the device tree, if no callback is provided,
807 * otherwise we would always need an init_machine callback.
808 */
809 of_iommu_init();
810 if (machine_desc->init_machine)
811 machine_desc->init_machine();
812 #ifdef CONFIG_OF
813 else
814 of_platform_populate(NULL, of_default_bus_match_table,
815 NULL, NULL);
816 #endif
817 return 0;
818 }
819 arch_initcall(customize_machine);
820
821 static int __init init_machine_late(void)
822 {
823 if (machine_desc->init_late)
824 machine_desc->init_late();
825 return 0;
826 }
827 late_initcall(init_machine_late);
828
829 #ifdef CONFIG_KEXEC
830 static inline unsigned long long get_total_mem(void)
831 {
832 unsigned long total;
833
834 total = max_low_pfn - min_low_pfn;
835 return total << PAGE_SHIFT;
836 }
837
838 /**
839 * reserve_crashkernel() - reserves memory are for crash kernel
840 *
841 * This function reserves memory area given in "crashkernel=" kernel command
842 * line parameter. The memory reserved is used by a dump capture kernel when
843 * primary kernel is crashing.
844 */
845 static void __init reserve_crashkernel(void)
846 {
847 unsigned long long crash_size, crash_base;
848 unsigned long long total_mem;
849 int ret;
850
851 total_mem = get_total_mem();
852 ret = parse_crashkernel(boot_command_line, total_mem,
853 &crash_size, &crash_base);
854 if (ret)
855 return;
856
857 ret = memblock_reserve(crash_base, crash_size);
858 if (ret < 0) {
859 pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n",
860 (unsigned long)crash_base);
861 return;
862 }
863
864 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
865 (unsigned long)(crash_size >> 20),
866 (unsigned long)(crash_base >> 20),
867 (unsigned long)(total_mem >> 20));
868
869 crashk_res.start = crash_base;
870 crashk_res.end = crash_base + crash_size - 1;
871 insert_resource(&iomem_resource, &crashk_res);
872 }
873 #else
874 static inline void reserve_crashkernel(void) {}
875 #endif /* CONFIG_KEXEC */
876
877 void __init hyp_mode_check(void)
878 {
879 #ifdef CONFIG_ARM_VIRT_EXT
880 sync_boot_mode();
881
882 if (is_hyp_mode_available()) {
883 pr_info("CPU: All CPU(s) started in HYP mode.\n");
884 pr_info("CPU: Virtualization extensions available.\n");
885 } else if (is_hyp_mode_mismatched()) {
886 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
887 __boot_cpu_mode & MODE_MASK);
888 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
889 } else
890 pr_info("CPU: All CPU(s) started in SVC mode.\n");
891 #endif
892 }
893
894 void __init setup_arch(char **cmdline_p)
895 {
896 const struct machine_desc *mdesc;
897
898 setup_processor();
899 mdesc = setup_machine_fdt(__atags_pointer);
900 if (!mdesc)
901 mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
902 machine_desc = mdesc;
903 machine_name = mdesc->name;
904 dump_stack_set_arch_desc("%s", mdesc->name);
905
906 if (mdesc->reboot_mode != REBOOT_HARD)
907 reboot_mode = mdesc->reboot_mode;
908
909 init_mm.start_code = (unsigned long) _text;
910 init_mm.end_code = (unsigned long) _etext;
911 init_mm.end_data = (unsigned long) _edata;
912 init_mm.brk = (unsigned long) _end;
913
914 /* populate cmd_line too for later use, preserving boot_command_line */
915 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
916 *cmdline_p = cmd_line;
917
918 parse_early_param();
919
920 early_paging_init(mdesc, lookup_processor_type(read_cpuid_id()));
921 setup_dma_zone(mdesc);
922 sanity_check_meminfo();
923 arm_memblock_init(mdesc);
924
925 paging_init(mdesc);
926 request_standard_resources(mdesc);
927
928 if (mdesc->restart)
929 arm_pm_restart = mdesc->restart;
930
931 unflatten_device_tree();
932
933 arm_dt_init_cpu_maps();
934 psci_init();
935 #ifdef CONFIG_SMP
936 if (is_smp()) {
937 if (!mdesc->smp_init || !mdesc->smp_init()) {
938 if (psci_smp_available())
939 smp_set_ops(&psci_smp_ops);
940 else if (mdesc->smp)
941 smp_set_ops(mdesc->smp);
942 }
943 smp_init_cpus();
944 smp_build_mpidr_hash();
945 }
946 #endif
947
948 if (!is_smp())
949 hyp_mode_check();
950
951 reserve_crashkernel();
952
953 #ifdef CONFIG_MULTI_IRQ_HANDLER
954 handle_arch_irq = mdesc->handle_irq;
955 #endif
956
957 #ifdef CONFIG_VT
958 #if defined(CONFIG_VGA_CONSOLE)
959 conswitchp = &vga_con;
960 #elif defined(CONFIG_DUMMY_CONSOLE)
961 conswitchp = &dummy_con;
962 #endif
963 #endif
964
965 if (mdesc->init_early)
966 mdesc->init_early();
967 }
968
969
970 static int __init topology_init(void)
971 {
972 int cpu;
973
974 for_each_possible_cpu(cpu) {
975 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
976 cpuinfo->cpu.hotpluggable = 1;
977 register_cpu(&cpuinfo->cpu, cpu);
978 }
979
980 return 0;
981 }
982 subsys_initcall(topology_init);
983
984 #ifdef CONFIG_HAVE_PROC_CPU
985 static int __init proc_cpu_init(void)
986 {
987 struct proc_dir_entry *res;
988
989 res = proc_mkdir("cpu", NULL);
990 if (!res)
991 return -ENOMEM;
992 return 0;
993 }
994 fs_initcall(proc_cpu_init);
995 #endif
996
997 static const char *hwcap_str[] = {
998 "swp",
999 "half",
1000 "thumb",
1001 "26bit",
1002 "fastmult",
1003 "fpa",
1004 "vfp",
1005 "edsp",
1006 "java",
1007 "iwmmxt",
1008 "crunch",
1009 "thumbee",
1010 "neon",
1011 "vfpv3",
1012 "vfpv3d16",
1013 "tls",
1014 "vfpv4",
1015 "idiva",
1016 "idivt",
1017 "vfpd32",
1018 "lpae",
1019 "evtstrm",
1020 NULL
1021 };
1022
1023 static const char *hwcap2_str[] = {
1024 "aes",
1025 "pmull",
1026 "sha1",
1027 "sha2",
1028 "crc32",
1029 NULL
1030 };
1031
1032 static int c_show(struct seq_file *m, void *v)
1033 {
1034 int i, j;
1035 u32 cpuid;
1036
1037 for_each_online_cpu(i) {
1038 /*
1039 * glibc reads /proc/cpuinfo to determine the number of
1040 * online processors, looking for lines beginning with
1041 * "processor". Give glibc what it expects.
1042 */
1043 seq_printf(m, "processor\t: %d\n", i);
1044 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
1045 seq_printf(m, "model name\t: %s rev %d (%s)\n",
1046 cpu_name, cpuid & 15, elf_platform);
1047
1048 #if defined(CONFIG_SMP)
1049 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1050 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1051 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1052 #else
1053 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1054 loops_per_jiffy / (500000/HZ),
1055 (loops_per_jiffy / (5000/HZ)) % 100);
1056 #endif
1057 /* dump out the processor features */
1058 seq_puts(m, "Features\t: ");
1059
1060 for (j = 0; hwcap_str[j]; j++)
1061 if (elf_hwcap & (1 << j))
1062 seq_printf(m, "%s ", hwcap_str[j]);
1063
1064 for (j = 0; hwcap2_str[j]; j++)
1065 if (elf_hwcap2 & (1 << j))
1066 seq_printf(m, "%s ", hwcap2_str[j]);
1067
1068 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
1069 seq_printf(m, "CPU architecture: %s\n",
1070 proc_arch[cpu_architecture()]);
1071
1072 if ((cpuid & 0x0008f000) == 0x00000000) {
1073 /* pre-ARM7 */
1074 seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
1075 } else {
1076 if ((cpuid & 0x0008f000) == 0x00007000) {
1077 /* ARM7 */
1078 seq_printf(m, "CPU variant\t: 0x%02x\n",
1079 (cpuid >> 16) & 127);
1080 } else {
1081 /* post-ARM7 */
1082 seq_printf(m, "CPU variant\t: 0x%x\n",
1083 (cpuid >> 20) & 15);
1084 }
1085 seq_printf(m, "CPU part\t: 0x%03x\n",
1086 (cpuid >> 4) & 0xfff);
1087 }
1088 seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
1089 }
1090
1091 seq_printf(m, "Hardware\t: %s\n", machine_name);
1092 seq_printf(m, "Revision\t: %04x\n", system_rev);
1093 seq_printf(m, "Serial\t\t: %08x%08x\n",
1094 system_serial_high, system_serial_low);
1095
1096 return 0;
1097 }
1098
1099 static void *c_start(struct seq_file *m, loff_t *pos)
1100 {
1101 return *pos < 1 ? (void *)1 : NULL;
1102 }
1103
1104 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1105 {
1106 ++*pos;
1107 return NULL;
1108 }
1109
1110 static void c_stop(struct seq_file *m, void *v)
1111 {
1112 }
1113
1114 const struct seq_operations cpuinfo_op = {
1115 .start = c_start,
1116 .next = c_next,
1117 .stop = c_stop,
1118 .show = c_show
1119 };
This page took 0.051975 seconds and 6 git commands to generate.