Merge tag 'for-4.1' of git://git.kernel.org/pub/scm/linux/kernel/git/kishon/linux...
[deliverable/linux.git] / arch / arm / kernel / setup.c
1 /*
2 * linux/arch/arm/kernel/setup.c
3 *
4 * Copyright (C) 1995-2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10 #include <linux/export.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/of_iommu.h>
22 #include <linux/of_platform.h>
23 #include <linux/init.h>
24 #include <linux/kexec.h>
25 #include <linux/of_fdt.h>
26 #include <linux/cpu.h>
27 #include <linux/interrupt.h>
28 #include <linux/smp.h>
29 #include <linux/proc_fs.h>
30 #include <linux/memblock.h>
31 #include <linux/bug.h>
32 #include <linux/compiler.h>
33 #include <linux/sort.h>
34
35 #include <asm/unified.h>
36 #include <asm/cp15.h>
37 #include <asm/cpu.h>
38 #include <asm/cputype.h>
39 #include <asm/elf.h>
40 #include <asm/procinfo.h>
41 #include <asm/psci.h>
42 #include <asm/sections.h>
43 #include <asm/setup.h>
44 #include <asm/smp_plat.h>
45 #include <asm/mach-types.h>
46 #include <asm/cacheflush.h>
47 #include <asm/cachetype.h>
48 #include <asm/tlbflush.h>
49
50 #include <asm/prom.h>
51 #include <asm/mach/arch.h>
52 #include <asm/mach/irq.h>
53 #include <asm/mach/time.h>
54 #include <asm/system_info.h>
55 #include <asm/system_misc.h>
56 #include <asm/traps.h>
57 #include <asm/unwind.h>
58 #include <asm/memblock.h>
59 #include <asm/virt.h>
60
61 #include "atags.h"
62
63
64 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
65 char fpe_type[8];
66
67 static int __init fpe_setup(char *line)
68 {
69 memcpy(fpe_type, line, 8);
70 return 1;
71 }
72
73 __setup("fpe=", fpe_setup);
74 #endif
75
76 extern void init_default_cache_policy(unsigned long);
77 extern void paging_init(const struct machine_desc *desc);
78 extern void early_paging_init(const struct machine_desc *,
79 struct proc_info_list *);
80 extern void sanity_check_meminfo(void);
81 extern enum reboot_mode reboot_mode;
82 extern void setup_dma_zone(const struct machine_desc *desc);
83
84 unsigned int processor_id;
85 EXPORT_SYMBOL(processor_id);
86 unsigned int __machine_arch_type __read_mostly;
87 EXPORT_SYMBOL(__machine_arch_type);
88 unsigned int cacheid __read_mostly;
89 EXPORT_SYMBOL(cacheid);
90
91 unsigned int __atags_pointer __initdata;
92
93 unsigned int system_rev;
94 EXPORT_SYMBOL(system_rev);
95
96 unsigned int system_serial_low;
97 EXPORT_SYMBOL(system_serial_low);
98
99 unsigned int system_serial_high;
100 EXPORT_SYMBOL(system_serial_high);
101
102 unsigned int elf_hwcap __read_mostly;
103 EXPORT_SYMBOL(elf_hwcap);
104
105 unsigned int elf_hwcap2 __read_mostly;
106 EXPORT_SYMBOL(elf_hwcap2);
107
108
109 #ifdef MULTI_CPU
110 struct processor processor __read_mostly;
111 #endif
112 #ifdef MULTI_TLB
113 struct cpu_tlb_fns cpu_tlb __read_mostly;
114 #endif
115 #ifdef MULTI_USER
116 struct cpu_user_fns cpu_user __read_mostly;
117 #endif
118 #ifdef MULTI_CACHE
119 struct cpu_cache_fns cpu_cache __read_mostly;
120 #endif
121 #ifdef CONFIG_OUTER_CACHE
122 struct outer_cache_fns outer_cache __read_mostly;
123 EXPORT_SYMBOL(outer_cache);
124 #endif
125
126 /*
127 * Cached cpu_architecture() result for use by assembler code.
128 * C code should use the cpu_architecture() function instead of accessing this
129 * variable directly.
130 */
131 int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
132
133 struct stack {
134 u32 irq[3];
135 u32 abt[3];
136 u32 und[3];
137 u32 fiq[3];
138 } ____cacheline_aligned;
139
140 #ifndef CONFIG_CPU_V7M
141 static struct stack stacks[NR_CPUS];
142 #endif
143
144 char elf_platform[ELF_PLATFORM_SIZE];
145 EXPORT_SYMBOL(elf_platform);
146
147 static const char *cpu_name;
148 static const char *machine_name;
149 static char __initdata cmd_line[COMMAND_LINE_SIZE];
150 const struct machine_desc *machine_desc __initdata;
151
152 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
153 #define ENDIANNESS ((char)endian_test.l)
154
155 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
156
157 /*
158 * Standard memory resources
159 */
160 static struct resource mem_res[] = {
161 {
162 .name = "Video RAM",
163 .start = 0,
164 .end = 0,
165 .flags = IORESOURCE_MEM
166 },
167 {
168 .name = "Kernel code",
169 .start = 0,
170 .end = 0,
171 .flags = IORESOURCE_MEM
172 },
173 {
174 .name = "Kernel data",
175 .start = 0,
176 .end = 0,
177 .flags = IORESOURCE_MEM
178 }
179 };
180
181 #define video_ram mem_res[0]
182 #define kernel_code mem_res[1]
183 #define kernel_data mem_res[2]
184
185 static struct resource io_res[] = {
186 {
187 .name = "reserved",
188 .start = 0x3bc,
189 .end = 0x3be,
190 .flags = IORESOURCE_IO | IORESOURCE_BUSY
191 },
192 {
193 .name = "reserved",
194 .start = 0x378,
195 .end = 0x37f,
196 .flags = IORESOURCE_IO | IORESOURCE_BUSY
197 },
198 {
199 .name = "reserved",
200 .start = 0x278,
201 .end = 0x27f,
202 .flags = IORESOURCE_IO | IORESOURCE_BUSY
203 }
204 };
205
206 #define lp0 io_res[0]
207 #define lp1 io_res[1]
208 #define lp2 io_res[2]
209
210 static const char *proc_arch[] = {
211 "undefined/unknown",
212 "3",
213 "4",
214 "4T",
215 "5",
216 "5T",
217 "5TE",
218 "5TEJ",
219 "6TEJ",
220 "7",
221 "7M",
222 "?(12)",
223 "?(13)",
224 "?(14)",
225 "?(15)",
226 "?(16)",
227 "?(17)",
228 };
229
230 #ifdef CONFIG_CPU_V7M
231 static int __get_cpu_architecture(void)
232 {
233 return CPU_ARCH_ARMv7M;
234 }
235 #else
236 static int __get_cpu_architecture(void)
237 {
238 int cpu_arch;
239
240 if ((read_cpuid_id() & 0x0008f000) == 0) {
241 cpu_arch = CPU_ARCH_UNKNOWN;
242 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
243 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
244 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
245 cpu_arch = (read_cpuid_id() >> 16) & 7;
246 if (cpu_arch)
247 cpu_arch += CPU_ARCH_ARMv3;
248 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
249 /* Revised CPUID format. Read the Memory Model Feature
250 * Register 0 and check for VMSAv7 or PMSAv7 */
251 unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
252 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
253 (mmfr0 & 0x000000f0) >= 0x00000030)
254 cpu_arch = CPU_ARCH_ARMv7;
255 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
256 (mmfr0 & 0x000000f0) == 0x00000020)
257 cpu_arch = CPU_ARCH_ARMv6;
258 else
259 cpu_arch = CPU_ARCH_UNKNOWN;
260 } else
261 cpu_arch = CPU_ARCH_UNKNOWN;
262
263 return cpu_arch;
264 }
265 #endif
266
267 int __pure cpu_architecture(void)
268 {
269 BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
270
271 return __cpu_architecture;
272 }
273
274 static int cpu_has_aliasing_icache(unsigned int arch)
275 {
276 int aliasing_icache;
277 unsigned int id_reg, num_sets, line_size;
278
279 /* PIPT caches never alias. */
280 if (icache_is_pipt())
281 return 0;
282
283 /* arch specifies the register format */
284 switch (arch) {
285 case CPU_ARCH_ARMv7:
286 asm("mcr p15, 2, %0, c0, c0, 0 @ set CSSELR"
287 : /* No output operands */
288 : "r" (1));
289 isb();
290 asm("mrc p15, 1, %0, c0, c0, 0 @ read CCSIDR"
291 : "=r" (id_reg));
292 line_size = 4 << ((id_reg & 0x7) + 2);
293 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
294 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
295 break;
296 case CPU_ARCH_ARMv6:
297 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
298 break;
299 default:
300 /* I-cache aliases will be handled by D-cache aliasing code */
301 aliasing_icache = 0;
302 }
303
304 return aliasing_icache;
305 }
306
307 static void __init cacheid_init(void)
308 {
309 unsigned int arch = cpu_architecture();
310
311 if (arch == CPU_ARCH_ARMv7M) {
312 cacheid = 0;
313 } else if (arch >= CPU_ARCH_ARMv6) {
314 unsigned int cachetype = read_cpuid_cachetype();
315 if ((cachetype & (7 << 29)) == 4 << 29) {
316 /* ARMv7 register format */
317 arch = CPU_ARCH_ARMv7;
318 cacheid = CACHEID_VIPT_NONALIASING;
319 switch (cachetype & (3 << 14)) {
320 case (1 << 14):
321 cacheid |= CACHEID_ASID_TAGGED;
322 break;
323 case (3 << 14):
324 cacheid |= CACHEID_PIPT;
325 break;
326 }
327 } else {
328 arch = CPU_ARCH_ARMv6;
329 if (cachetype & (1 << 23))
330 cacheid = CACHEID_VIPT_ALIASING;
331 else
332 cacheid = CACHEID_VIPT_NONALIASING;
333 }
334 if (cpu_has_aliasing_icache(arch))
335 cacheid |= CACHEID_VIPT_I_ALIASING;
336 } else {
337 cacheid = CACHEID_VIVT;
338 }
339
340 pr_info("CPU: %s data cache, %s instruction cache\n",
341 cache_is_vivt() ? "VIVT" :
342 cache_is_vipt_aliasing() ? "VIPT aliasing" :
343 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
344 cache_is_vivt() ? "VIVT" :
345 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
346 icache_is_vipt_aliasing() ? "VIPT aliasing" :
347 icache_is_pipt() ? "PIPT" :
348 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
349 }
350
351 /*
352 * These functions re-use the assembly code in head.S, which
353 * already provide the required functionality.
354 */
355 extern struct proc_info_list *lookup_processor_type(unsigned int);
356
357 void __init early_print(const char *str, ...)
358 {
359 extern void printascii(const char *);
360 char buf[256];
361 va_list ap;
362
363 va_start(ap, str);
364 vsnprintf(buf, sizeof(buf), str, ap);
365 va_end(ap);
366
367 #ifdef CONFIG_DEBUG_LL
368 printascii(buf);
369 #endif
370 printk("%s", buf);
371 }
372
373 static void __init cpuid_init_hwcaps(void)
374 {
375 unsigned int divide_instrs, vmsa;
376
377 if (cpu_architecture() < CPU_ARCH_ARMv7)
378 return;
379
380 divide_instrs = (read_cpuid_ext(CPUID_EXT_ISAR0) & 0x0f000000) >> 24;
381
382 switch (divide_instrs) {
383 case 2:
384 elf_hwcap |= HWCAP_IDIVA;
385 case 1:
386 elf_hwcap |= HWCAP_IDIVT;
387 }
388
389 /* LPAE implies atomic ldrd/strd instructions */
390 vmsa = (read_cpuid_ext(CPUID_EXT_MMFR0) & 0xf) >> 0;
391 if (vmsa >= 5)
392 elf_hwcap |= HWCAP_LPAE;
393 }
394
395 static void __init elf_hwcap_fixup(void)
396 {
397 unsigned id = read_cpuid_id();
398 unsigned sync_prim;
399
400 /*
401 * HWCAP_TLS is available only on 1136 r1p0 and later,
402 * see also kuser_get_tls_init.
403 */
404 if (read_cpuid_part() == ARM_CPU_PART_ARM1136 &&
405 ((id >> 20) & 3) == 0) {
406 elf_hwcap &= ~HWCAP_TLS;
407 return;
408 }
409
410 /* Verify if CPUID scheme is implemented */
411 if ((id & 0x000f0000) != 0x000f0000)
412 return;
413
414 /*
415 * If the CPU supports LDREX/STREX and LDREXB/STREXB,
416 * avoid advertising SWP; it may not be atomic with
417 * multiprocessing cores.
418 */
419 sync_prim = ((read_cpuid_ext(CPUID_EXT_ISAR3) >> 8) & 0xf0) |
420 ((read_cpuid_ext(CPUID_EXT_ISAR4) >> 20) & 0x0f);
421 if (sync_prim >= 0x13)
422 elf_hwcap &= ~HWCAP_SWP;
423 }
424
425 /*
426 * cpu_init - initialise one CPU.
427 *
428 * cpu_init sets up the per-CPU stacks.
429 */
430 void notrace cpu_init(void)
431 {
432 #ifndef CONFIG_CPU_V7M
433 unsigned int cpu = smp_processor_id();
434 struct stack *stk = &stacks[cpu];
435
436 if (cpu >= NR_CPUS) {
437 pr_crit("CPU%u: bad primary CPU number\n", cpu);
438 BUG();
439 }
440
441 /*
442 * This only works on resume and secondary cores. For booting on the
443 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
444 */
445 set_my_cpu_offset(per_cpu_offset(cpu));
446
447 cpu_proc_init();
448
449 /*
450 * Define the placement constraint for the inline asm directive below.
451 * In Thumb-2, msr with an immediate value is not allowed.
452 */
453 #ifdef CONFIG_THUMB2_KERNEL
454 #define PLC "r"
455 #else
456 #define PLC "I"
457 #endif
458
459 /*
460 * setup stacks for re-entrant exception handlers
461 */
462 __asm__ (
463 "msr cpsr_c, %1\n\t"
464 "add r14, %0, %2\n\t"
465 "mov sp, r14\n\t"
466 "msr cpsr_c, %3\n\t"
467 "add r14, %0, %4\n\t"
468 "mov sp, r14\n\t"
469 "msr cpsr_c, %5\n\t"
470 "add r14, %0, %6\n\t"
471 "mov sp, r14\n\t"
472 "msr cpsr_c, %7\n\t"
473 "add r14, %0, %8\n\t"
474 "mov sp, r14\n\t"
475 "msr cpsr_c, %9"
476 :
477 : "r" (stk),
478 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
479 "I" (offsetof(struct stack, irq[0])),
480 PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
481 "I" (offsetof(struct stack, abt[0])),
482 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
483 "I" (offsetof(struct stack, und[0])),
484 PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
485 "I" (offsetof(struct stack, fiq[0])),
486 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
487 : "r14");
488 #endif
489 }
490
491 u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
492
493 void __init smp_setup_processor_id(void)
494 {
495 int i;
496 u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
497 u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
498
499 cpu_logical_map(0) = cpu;
500 for (i = 1; i < nr_cpu_ids; ++i)
501 cpu_logical_map(i) = i == cpu ? 0 : i;
502
503 /*
504 * clear __my_cpu_offset on boot CPU to avoid hang caused by
505 * using percpu variable early, for example, lockdep will
506 * access percpu variable inside lock_release
507 */
508 set_my_cpu_offset(0);
509
510 pr_info("Booting Linux on physical CPU 0x%x\n", mpidr);
511 }
512
513 struct mpidr_hash mpidr_hash;
514 #ifdef CONFIG_SMP
515 /**
516 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
517 * level in order to build a linear index from an
518 * MPIDR value. Resulting algorithm is a collision
519 * free hash carried out through shifting and ORing
520 */
521 static void __init smp_build_mpidr_hash(void)
522 {
523 u32 i, affinity;
524 u32 fs[3], bits[3], ls, mask = 0;
525 /*
526 * Pre-scan the list of MPIDRS and filter out bits that do
527 * not contribute to affinity levels, ie they never toggle.
528 */
529 for_each_possible_cpu(i)
530 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
531 pr_debug("mask of set bits 0x%x\n", mask);
532 /*
533 * Find and stash the last and first bit set at all affinity levels to
534 * check how many bits are required to represent them.
535 */
536 for (i = 0; i < 3; i++) {
537 affinity = MPIDR_AFFINITY_LEVEL(mask, i);
538 /*
539 * Find the MSB bit and LSB bits position
540 * to determine how many bits are required
541 * to express the affinity level.
542 */
543 ls = fls(affinity);
544 fs[i] = affinity ? ffs(affinity) - 1 : 0;
545 bits[i] = ls - fs[i];
546 }
547 /*
548 * An index can be created from the MPIDR by isolating the
549 * significant bits at each affinity level and by shifting
550 * them in order to compress the 24 bits values space to a
551 * compressed set of values. This is equivalent to hashing
552 * the MPIDR through shifting and ORing. It is a collision free
553 * hash though not minimal since some levels might contain a number
554 * of CPUs that is not an exact power of 2 and their bit
555 * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
556 */
557 mpidr_hash.shift_aff[0] = fs[0];
558 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
559 mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
560 (bits[1] + bits[0]);
561 mpidr_hash.mask = mask;
562 mpidr_hash.bits = bits[2] + bits[1] + bits[0];
563 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
564 mpidr_hash.shift_aff[0],
565 mpidr_hash.shift_aff[1],
566 mpidr_hash.shift_aff[2],
567 mpidr_hash.mask,
568 mpidr_hash.bits);
569 /*
570 * 4x is an arbitrary value used to warn on a hash table much bigger
571 * than expected on most systems.
572 */
573 if (mpidr_hash_size() > 4 * num_possible_cpus())
574 pr_warn("Large number of MPIDR hash buckets detected\n");
575 sync_cache_w(&mpidr_hash);
576 }
577 #endif
578
579 static void __init setup_processor(void)
580 {
581 struct proc_info_list *list;
582
583 /*
584 * locate processor in the list of supported processor
585 * types. The linker builds this table for us from the
586 * entries in arch/arm/mm/proc-*.S
587 */
588 list = lookup_processor_type(read_cpuid_id());
589 if (!list) {
590 pr_err("CPU configuration botched (ID %08x), unable to continue.\n",
591 read_cpuid_id());
592 while (1);
593 }
594
595 cpu_name = list->cpu_name;
596 __cpu_architecture = __get_cpu_architecture();
597
598 #ifdef MULTI_CPU
599 processor = *list->proc;
600 #endif
601 #ifdef MULTI_TLB
602 cpu_tlb = *list->tlb;
603 #endif
604 #ifdef MULTI_USER
605 cpu_user = *list->user;
606 #endif
607 #ifdef MULTI_CACHE
608 cpu_cache = *list->cache;
609 #endif
610
611 pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
612 cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
613 proc_arch[cpu_architecture()], get_cr());
614
615 snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
616 list->arch_name, ENDIANNESS);
617 snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
618 list->elf_name, ENDIANNESS);
619 elf_hwcap = list->elf_hwcap;
620
621 cpuid_init_hwcaps();
622
623 #ifndef CONFIG_ARM_THUMB
624 elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
625 #endif
626 #ifdef CONFIG_MMU
627 init_default_cache_policy(list->__cpu_mm_mmu_flags);
628 #endif
629 erratum_a15_798181_init();
630
631 elf_hwcap_fixup();
632
633 cacheid_init();
634 cpu_init();
635 }
636
637 void __init dump_machine_table(void)
638 {
639 const struct machine_desc *p;
640
641 early_print("Available machine support:\n\nID (hex)\tNAME\n");
642 for_each_machine_desc(p)
643 early_print("%08x\t%s\n", p->nr, p->name);
644
645 early_print("\nPlease check your kernel config and/or bootloader.\n");
646
647 while (true)
648 /* can't use cpu_relax() here as it may require MMU setup */;
649 }
650
651 int __init arm_add_memory(u64 start, u64 size)
652 {
653 u64 aligned_start;
654
655 /*
656 * Ensure that start/size are aligned to a page boundary.
657 * Size is rounded down, start is rounded up.
658 */
659 aligned_start = PAGE_ALIGN(start);
660 if (aligned_start > start + size)
661 size = 0;
662 else
663 size -= aligned_start - start;
664
665 #ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
666 if (aligned_start > ULONG_MAX) {
667 pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
668 (long long)start);
669 return -EINVAL;
670 }
671
672 if (aligned_start + size > ULONG_MAX) {
673 pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n",
674 (long long)start);
675 /*
676 * To ensure bank->start + bank->size is representable in
677 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
678 * This means we lose a page after masking.
679 */
680 size = ULONG_MAX - aligned_start;
681 }
682 #endif
683
684 if (aligned_start < PHYS_OFFSET) {
685 if (aligned_start + size <= PHYS_OFFSET) {
686 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
687 aligned_start, aligned_start + size);
688 return -EINVAL;
689 }
690
691 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
692 aligned_start, (u64)PHYS_OFFSET);
693
694 size -= PHYS_OFFSET - aligned_start;
695 aligned_start = PHYS_OFFSET;
696 }
697
698 start = aligned_start;
699 size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
700
701 /*
702 * Check whether this memory region has non-zero size or
703 * invalid node number.
704 */
705 if (size == 0)
706 return -EINVAL;
707
708 memblock_add(start, size);
709 return 0;
710 }
711
712 /*
713 * Pick out the memory size. We look for mem=size@start,
714 * where start and size are "size[KkMm]"
715 */
716
717 static int __init early_mem(char *p)
718 {
719 static int usermem __initdata = 0;
720 u64 size;
721 u64 start;
722 char *endp;
723
724 /*
725 * If the user specifies memory size, we
726 * blow away any automatically generated
727 * size.
728 */
729 if (usermem == 0) {
730 usermem = 1;
731 memblock_remove(memblock_start_of_DRAM(),
732 memblock_end_of_DRAM() - memblock_start_of_DRAM());
733 }
734
735 start = PHYS_OFFSET;
736 size = memparse(p, &endp);
737 if (*endp == '@')
738 start = memparse(endp + 1, NULL);
739
740 arm_add_memory(start, size);
741
742 return 0;
743 }
744 early_param("mem", early_mem);
745
746 static void __init request_standard_resources(const struct machine_desc *mdesc)
747 {
748 struct memblock_region *region;
749 struct resource *res;
750
751 kernel_code.start = virt_to_phys(_text);
752 kernel_code.end = virt_to_phys(_etext - 1);
753 kernel_data.start = virt_to_phys(_sdata);
754 kernel_data.end = virt_to_phys(_end - 1);
755
756 for_each_memblock(memory, region) {
757 res = memblock_virt_alloc(sizeof(*res), 0);
758 res->name = "System RAM";
759 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
760 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
761 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
762
763 request_resource(&iomem_resource, res);
764
765 if (kernel_code.start >= res->start &&
766 kernel_code.end <= res->end)
767 request_resource(res, &kernel_code);
768 if (kernel_data.start >= res->start &&
769 kernel_data.end <= res->end)
770 request_resource(res, &kernel_data);
771 }
772
773 if (mdesc->video_start) {
774 video_ram.start = mdesc->video_start;
775 video_ram.end = mdesc->video_end;
776 request_resource(&iomem_resource, &video_ram);
777 }
778
779 /*
780 * Some machines don't have the possibility of ever
781 * possessing lp0, lp1 or lp2
782 */
783 if (mdesc->reserve_lp0)
784 request_resource(&ioport_resource, &lp0);
785 if (mdesc->reserve_lp1)
786 request_resource(&ioport_resource, &lp1);
787 if (mdesc->reserve_lp2)
788 request_resource(&ioport_resource, &lp2);
789 }
790
791 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
792 struct screen_info screen_info = {
793 .orig_video_lines = 30,
794 .orig_video_cols = 80,
795 .orig_video_mode = 0,
796 .orig_video_ega_bx = 0,
797 .orig_video_isVGA = 1,
798 .orig_video_points = 8
799 };
800 #endif
801
802 static int __init customize_machine(void)
803 {
804 /*
805 * customizes platform devices, or adds new ones
806 * On DT based machines, we fall back to populating the
807 * machine from the device tree, if no callback is provided,
808 * otherwise we would always need an init_machine callback.
809 */
810 of_iommu_init();
811 if (machine_desc->init_machine)
812 machine_desc->init_machine();
813 #ifdef CONFIG_OF
814 else
815 of_platform_populate(NULL, of_default_bus_match_table,
816 NULL, NULL);
817 #endif
818 return 0;
819 }
820 arch_initcall(customize_machine);
821
822 static int __init init_machine_late(void)
823 {
824 if (machine_desc->init_late)
825 machine_desc->init_late();
826 return 0;
827 }
828 late_initcall(init_machine_late);
829
830 #ifdef CONFIG_KEXEC
831 static inline unsigned long long get_total_mem(void)
832 {
833 unsigned long total;
834
835 total = max_low_pfn - min_low_pfn;
836 return total << PAGE_SHIFT;
837 }
838
839 /**
840 * reserve_crashkernel() - reserves memory are for crash kernel
841 *
842 * This function reserves memory area given in "crashkernel=" kernel command
843 * line parameter. The memory reserved is used by a dump capture kernel when
844 * primary kernel is crashing.
845 */
846 static void __init reserve_crashkernel(void)
847 {
848 unsigned long long crash_size, crash_base;
849 unsigned long long total_mem;
850 int ret;
851
852 total_mem = get_total_mem();
853 ret = parse_crashkernel(boot_command_line, total_mem,
854 &crash_size, &crash_base);
855 if (ret)
856 return;
857
858 ret = memblock_reserve(crash_base, crash_size);
859 if (ret < 0) {
860 pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n",
861 (unsigned long)crash_base);
862 return;
863 }
864
865 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
866 (unsigned long)(crash_size >> 20),
867 (unsigned long)(crash_base >> 20),
868 (unsigned long)(total_mem >> 20));
869
870 crashk_res.start = crash_base;
871 crashk_res.end = crash_base + crash_size - 1;
872 insert_resource(&iomem_resource, &crashk_res);
873 }
874 #else
875 static inline void reserve_crashkernel(void) {}
876 #endif /* CONFIG_KEXEC */
877
878 void __init hyp_mode_check(void)
879 {
880 #ifdef CONFIG_ARM_VIRT_EXT
881 sync_boot_mode();
882
883 if (is_hyp_mode_available()) {
884 pr_info("CPU: All CPU(s) started in HYP mode.\n");
885 pr_info("CPU: Virtualization extensions available.\n");
886 } else if (is_hyp_mode_mismatched()) {
887 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
888 __boot_cpu_mode & MODE_MASK);
889 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
890 } else
891 pr_info("CPU: All CPU(s) started in SVC mode.\n");
892 #endif
893 }
894
895 void __init setup_arch(char **cmdline_p)
896 {
897 const struct machine_desc *mdesc;
898
899 setup_processor();
900 mdesc = setup_machine_fdt(__atags_pointer);
901 if (!mdesc)
902 mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
903 machine_desc = mdesc;
904 machine_name = mdesc->name;
905 dump_stack_set_arch_desc("%s", mdesc->name);
906
907 if (mdesc->reboot_mode != REBOOT_HARD)
908 reboot_mode = mdesc->reboot_mode;
909
910 init_mm.start_code = (unsigned long) _text;
911 init_mm.end_code = (unsigned long) _etext;
912 init_mm.end_data = (unsigned long) _edata;
913 init_mm.brk = (unsigned long) _end;
914
915 /* populate cmd_line too for later use, preserving boot_command_line */
916 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
917 *cmdline_p = cmd_line;
918
919 parse_early_param();
920
921 early_paging_init(mdesc, lookup_processor_type(read_cpuid_id()));
922 setup_dma_zone(mdesc);
923 sanity_check_meminfo();
924 arm_memblock_init(mdesc);
925
926 paging_init(mdesc);
927 request_standard_resources(mdesc);
928
929 if (mdesc->restart)
930 arm_pm_restart = mdesc->restart;
931
932 unflatten_device_tree();
933
934 arm_dt_init_cpu_maps();
935 psci_init();
936 #ifdef CONFIG_SMP
937 if (is_smp()) {
938 if (!mdesc->smp_init || !mdesc->smp_init()) {
939 if (psci_smp_available())
940 smp_set_ops(&psci_smp_ops);
941 else if (mdesc->smp)
942 smp_set_ops(mdesc->smp);
943 }
944 smp_init_cpus();
945 smp_build_mpidr_hash();
946 }
947 #endif
948
949 if (!is_smp())
950 hyp_mode_check();
951
952 reserve_crashkernel();
953
954 #ifdef CONFIG_MULTI_IRQ_HANDLER
955 handle_arch_irq = mdesc->handle_irq;
956 #endif
957
958 #ifdef CONFIG_VT
959 #if defined(CONFIG_VGA_CONSOLE)
960 conswitchp = &vga_con;
961 #elif defined(CONFIG_DUMMY_CONSOLE)
962 conswitchp = &dummy_con;
963 #endif
964 #endif
965
966 if (mdesc->init_early)
967 mdesc->init_early();
968 }
969
970
971 static int __init topology_init(void)
972 {
973 int cpu;
974
975 for_each_possible_cpu(cpu) {
976 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
977 cpuinfo->cpu.hotpluggable = 1;
978 register_cpu(&cpuinfo->cpu, cpu);
979 }
980
981 return 0;
982 }
983 subsys_initcall(topology_init);
984
985 #ifdef CONFIG_HAVE_PROC_CPU
986 static int __init proc_cpu_init(void)
987 {
988 struct proc_dir_entry *res;
989
990 res = proc_mkdir("cpu", NULL);
991 if (!res)
992 return -ENOMEM;
993 return 0;
994 }
995 fs_initcall(proc_cpu_init);
996 #endif
997
998 static const char *hwcap_str[] = {
999 "swp",
1000 "half",
1001 "thumb",
1002 "26bit",
1003 "fastmult",
1004 "fpa",
1005 "vfp",
1006 "edsp",
1007 "java",
1008 "iwmmxt",
1009 "crunch",
1010 "thumbee",
1011 "neon",
1012 "vfpv3",
1013 "vfpv3d16",
1014 "tls",
1015 "vfpv4",
1016 "idiva",
1017 "idivt",
1018 "vfpd32",
1019 "lpae",
1020 "evtstrm",
1021 NULL
1022 };
1023
1024 static const char *hwcap2_str[] = {
1025 "aes",
1026 "pmull",
1027 "sha1",
1028 "sha2",
1029 "crc32",
1030 NULL
1031 };
1032
1033 static int c_show(struct seq_file *m, void *v)
1034 {
1035 int i, j;
1036 u32 cpuid;
1037
1038 for_each_online_cpu(i) {
1039 /*
1040 * glibc reads /proc/cpuinfo to determine the number of
1041 * online processors, looking for lines beginning with
1042 * "processor". Give glibc what it expects.
1043 */
1044 seq_printf(m, "processor\t: %d\n", i);
1045 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
1046 seq_printf(m, "model name\t: %s rev %d (%s)\n",
1047 cpu_name, cpuid & 15, elf_platform);
1048
1049 #if defined(CONFIG_SMP)
1050 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1051 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1052 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1053 #else
1054 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1055 loops_per_jiffy / (500000/HZ),
1056 (loops_per_jiffy / (5000/HZ)) % 100);
1057 #endif
1058 /* dump out the processor features */
1059 seq_puts(m, "Features\t: ");
1060
1061 for (j = 0; hwcap_str[j]; j++)
1062 if (elf_hwcap & (1 << j))
1063 seq_printf(m, "%s ", hwcap_str[j]);
1064
1065 for (j = 0; hwcap2_str[j]; j++)
1066 if (elf_hwcap2 & (1 << j))
1067 seq_printf(m, "%s ", hwcap2_str[j]);
1068
1069 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
1070 seq_printf(m, "CPU architecture: %s\n",
1071 proc_arch[cpu_architecture()]);
1072
1073 if ((cpuid & 0x0008f000) == 0x00000000) {
1074 /* pre-ARM7 */
1075 seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
1076 } else {
1077 if ((cpuid & 0x0008f000) == 0x00007000) {
1078 /* ARM7 */
1079 seq_printf(m, "CPU variant\t: 0x%02x\n",
1080 (cpuid >> 16) & 127);
1081 } else {
1082 /* post-ARM7 */
1083 seq_printf(m, "CPU variant\t: 0x%x\n",
1084 (cpuid >> 20) & 15);
1085 }
1086 seq_printf(m, "CPU part\t: 0x%03x\n",
1087 (cpuid >> 4) & 0xfff);
1088 }
1089 seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
1090 }
1091
1092 seq_printf(m, "Hardware\t: %s\n", machine_name);
1093 seq_printf(m, "Revision\t: %04x\n", system_rev);
1094 seq_printf(m, "Serial\t\t: %08x%08x\n",
1095 system_serial_high, system_serial_low);
1096
1097 return 0;
1098 }
1099
1100 static void *c_start(struct seq_file *m, loff_t *pos)
1101 {
1102 return *pos < 1 ? (void *)1 : NULL;
1103 }
1104
1105 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1106 {
1107 ++*pos;
1108 return NULL;
1109 }
1110
1111 static void c_stop(struct seq_file *m, void *v)
1112 {
1113 }
1114
1115 const struct seq_operations cpuinfo_op = {
1116 .start = c_start,
1117 .next = c_next,
1118 .stop = c_stop,
1119 .show = c_show
1120 };
This page took 0.075855 seconds and 5 git commands to generate.