ARM: 8319/1: advertise availability of v8 Crypto instructions
[deliverable/linux.git] / arch / arm / kernel / setup.c
1 /*
2 * linux/arch/arm/kernel/setup.c
3 *
4 * Copyright (C) 1995-2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10 #include <linux/export.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/of_iommu.h>
22 #include <linux/of_platform.h>
23 #include <linux/init.h>
24 #include <linux/kexec.h>
25 #include <linux/of_fdt.h>
26 #include <linux/cpu.h>
27 #include <linux/interrupt.h>
28 #include <linux/smp.h>
29 #include <linux/proc_fs.h>
30 #include <linux/memblock.h>
31 #include <linux/bug.h>
32 #include <linux/compiler.h>
33 #include <linux/sort.h>
34
35 #include <asm/unified.h>
36 #include <asm/cp15.h>
37 #include <asm/cpu.h>
38 #include <asm/cputype.h>
39 #include <asm/elf.h>
40 #include <asm/procinfo.h>
41 #include <asm/psci.h>
42 #include <asm/sections.h>
43 #include <asm/setup.h>
44 #include <asm/smp_plat.h>
45 #include <asm/mach-types.h>
46 #include <asm/cacheflush.h>
47 #include <asm/cachetype.h>
48 #include <asm/tlbflush.h>
49
50 #include <asm/prom.h>
51 #include <asm/mach/arch.h>
52 #include <asm/mach/irq.h>
53 #include <asm/mach/time.h>
54 #include <asm/system_info.h>
55 #include <asm/system_misc.h>
56 #include <asm/traps.h>
57 #include <asm/unwind.h>
58 #include <asm/memblock.h>
59 #include <asm/virt.h>
60
61 #include "atags.h"
62
63
64 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
65 char fpe_type[8];
66
67 static int __init fpe_setup(char *line)
68 {
69 memcpy(fpe_type, line, 8);
70 return 1;
71 }
72
73 __setup("fpe=", fpe_setup);
74 #endif
75
76 extern void init_default_cache_policy(unsigned long);
77 extern void paging_init(const struct machine_desc *desc);
78 extern void early_paging_init(const struct machine_desc *,
79 struct proc_info_list *);
80 extern void sanity_check_meminfo(void);
81 extern enum reboot_mode reboot_mode;
82 extern void setup_dma_zone(const struct machine_desc *desc);
83
84 unsigned int processor_id;
85 EXPORT_SYMBOL(processor_id);
86 unsigned int __machine_arch_type __read_mostly;
87 EXPORT_SYMBOL(__machine_arch_type);
88 unsigned int cacheid __read_mostly;
89 EXPORT_SYMBOL(cacheid);
90
91 unsigned int __atags_pointer __initdata;
92
93 unsigned int system_rev;
94 EXPORT_SYMBOL(system_rev);
95
96 unsigned int system_serial_low;
97 EXPORT_SYMBOL(system_serial_low);
98
99 unsigned int system_serial_high;
100 EXPORT_SYMBOL(system_serial_high);
101
102 unsigned int elf_hwcap __read_mostly;
103 EXPORT_SYMBOL(elf_hwcap);
104
105 unsigned int elf_hwcap2 __read_mostly;
106 EXPORT_SYMBOL(elf_hwcap2);
107
108
109 #ifdef MULTI_CPU
110 struct processor processor __read_mostly;
111 #endif
112 #ifdef MULTI_TLB
113 struct cpu_tlb_fns cpu_tlb __read_mostly;
114 #endif
115 #ifdef MULTI_USER
116 struct cpu_user_fns cpu_user __read_mostly;
117 #endif
118 #ifdef MULTI_CACHE
119 struct cpu_cache_fns cpu_cache __read_mostly;
120 #endif
121 #ifdef CONFIG_OUTER_CACHE
122 struct outer_cache_fns outer_cache __read_mostly;
123 EXPORT_SYMBOL(outer_cache);
124 #endif
125
126 /*
127 * Cached cpu_architecture() result for use by assembler code.
128 * C code should use the cpu_architecture() function instead of accessing this
129 * variable directly.
130 */
131 int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
132
133 struct stack {
134 u32 irq[3];
135 u32 abt[3];
136 u32 und[3];
137 u32 fiq[3];
138 } ____cacheline_aligned;
139
140 #ifndef CONFIG_CPU_V7M
141 static struct stack stacks[NR_CPUS];
142 #endif
143
144 char elf_platform[ELF_PLATFORM_SIZE];
145 EXPORT_SYMBOL(elf_platform);
146
147 static const char *cpu_name;
148 static const char *machine_name;
149 static char __initdata cmd_line[COMMAND_LINE_SIZE];
150 const struct machine_desc *machine_desc __initdata;
151
152 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
153 #define ENDIANNESS ((char)endian_test.l)
154
155 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
156
157 /*
158 * Standard memory resources
159 */
160 static struct resource mem_res[] = {
161 {
162 .name = "Video RAM",
163 .start = 0,
164 .end = 0,
165 .flags = IORESOURCE_MEM
166 },
167 {
168 .name = "Kernel code",
169 .start = 0,
170 .end = 0,
171 .flags = IORESOURCE_MEM
172 },
173 {
174 .name = "Kernel data",
175 .start = 0,
176 .end = 0,
177 .flags = IORESOURCE_MEM
178 }
179 };
180
181 #define video_ram mem_res[0]
182 #define kernel_code mem_res[1]
183 #define kernel_data mem_res[2]
184
185 static struct resource io_res[] = {
186 {
187 .name = "reserved",
188 .start = 0x3bc,
189 .end = 0x3be,
190 .flags = IORESOURCE_IO | IORESOURCE_BUSY
191 },
192 {
193 .name = "reserved",
194 .start = 0x378,
195 .end = 0x37f,
196 .flags = IORESOURCE_IO | IORESOURCE_BUSY
197 },
198 {
199 .name = "reserved",
200 .start = 0x278,
201 .end = 0x27f,
202 .flags = IORESOURCE_IO | IORESOURCE_BUSY
203 }
204 };
205
206 #define lp0 io_res[0]
207 #define lp1 io_res[1]
208 #define lp2 io_res[2]
209
210 static const char *proc_arch[] = {
211 "undefined/unknown",
212 "3",
213 "4",
214 "4T",
215 "5",
216 "5T",
217 "5TE",
218 "5TEJ",
219 "6TEJ",
220 "7",
221 "7M",
222 "?(12)",
223 "?(13)",
224 "?(14)",
225 "?(15)",
226 "?(16)",
227 "?(17)",
228 };
229
230 #ifdef CONFIG_CPU_V7M
231 static int __get_cpu_architecture(void)
232 {
233 return CPU_ARCH_ARMv7M;
234 }
235 #else
236 static int __get_cpu_architecture(void)
237 {
238 int cpu_arch;
239
240 if ((read_cpuid_id() & 0x0008f000) == 0) {
241 cpu_arch = CPU_ARCH_UNKNOWN;
242 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
243 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
244 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
245 cpu_arch = (read_cpuid_id() >> 16) & 7;
246 if (cpu_arch)
247 cpu_arch += CPU_ARCH_ARMv3;
248 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
249 unsigned int mmfr0;
250
251 /* Revised CPUID format. Read the Memory Model Feature
252 * Register 0 and check for VMSAv7 or PMSAv7 */
253 asm("mrc p15, 0, %0, c0, c1, 4"
254 : "=r" (mmfr0));
255 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
256 (mmfr0 & 0x000000f0) >= 0x00000030)
257 cpu_arch = CPU_ARCH_ARMv7;
258 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
259 (mmfr0 & 0x000000f0) == 0x00000020)
260 cpu_arch = CPU_ARCH_ARMv6;
261 else
262 cpu_arch = CPU_ARCH_UNKNOWN;
263 } else
264 cpu_arch = CPU_ARCH_UNKNOWN;
265
266 return cpu_arch;
267 }
268 #endif
269
270 int __pure cpu_architecture(void)
271 {
272 BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
273
274 return __cpu_architecture;
275 }
276
277 static int cpu_has_aliasing_icache(unsigned int arch)
278 {
279 int aliasing_icache;
280 unsigned int id_reg, num_sets, line_size;
281
282 /* PIPT caches never alias. */
283 if (icache_is_pipt())
284 return 0;
285
286 /* arch specifies the register format */
287 switch (arch) {
288 case CPU_ARCH_ARMv7:
289 asm("mcr p15, 2, %0, c0, c0, 0 @ set CSSELR"
290 : /* No output operands */
291 : "r" (1));
292 isb();
293 asm("mrc p15, 1, %0, c0, c0, 0 @ read CCSIDR"
294 : "=r" (id_reg));
295 line_size = 4 << ((id_reg & 0x7) + 2);
296 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
297 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
298 break;
299 case CPU_ARCH_ARMv6:
300 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
301 break;
302 default:
303 /* I-cache aliases will be handled by D-cache aliasing code */
304 aliasing_icache = 0;
305 }
306
307 return aliasing_icache;
308 }
309
310 static void __init cacheid_init(void)
311 {
312 unsigned int arch = cpu_architecture();
313
314 if (arch == CPU_ARCH_ARMv7M) {
315 cacheid = 0;
316 } else if (arch >= CPU_ARCH_ARMv6) {
317 unsigned int cachetype = read_cpuid_cachetype();
318 if ((cachetype & (7 << 29)) == 4 << 29) {
319 /* ARMv7 register format */
320 arch = CPU_ARCH_ARMv7;
321 cacheid = CACHEID_VIPT_NONALIASING;
322 switch (cachetype & (3 << 14)) {
323 case (1 << 14):
324 cacheid |= CACHEID_ASID_TAGGED;
325 break;
326 case (3 << 14):
327 cacheid |= CACHEID_PIPT;
328 break;
329 }
330 } else {
331 arch = CPU_ARCH_ARMv6;
332 if (cachetype & (1 << 23))
333 cacheid = CACHEID_VIPT_ALIASING;
334 else
335 cacheid = CACHEID_VIPT_NONALIASING;
336 }
337 if (cpu_has_aliasing_icache(arch))
338 cacheid |= CACHEID_VIPT_I_ALIASING;
339 } else {
340 cacheid = CACHEID_VIVT;
341 }
342
343 pr_info("CPU: %s data cache, %s instruction cache\n",
344 cache_is_vivt() ? "VIVT" :
345 cache_is_vipt_aliasing() ? "VIPT aliasing" :
346 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
347 cache_is_vivt() ? "VIVT" :
348 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
349 icache_is_vipt_aliasing() ? "VIPT aliasing" :
350 icache_is_pipt() ? "PIPT" :
351 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
352 }
353
354 /*
355 * These functions re-use the assembly code in head.S, which
356 * already provide the required functionality.
357 */
358 extern struct proc_info_list *lookup_processor_type(unsigned int);
359
360 void __init early_print(const char *str, ...)
361 {
362 extern void printascii(const char *);
363 char buf[256];
364 va_list ap;
365
366 va_start(ap, str);
367 vsnprintf(buf, sizeof(buf), str, ap);
368 va_end(ap);
369
370 #ifdef CONFIG_DEBUG_LL
371 printascii(buf);
372 #endif
373 printk("%s", buf);
374 }
375
376 static void __init cpuid_init_hwcaps(void)
377 {
378 int block;
379 u32 isar5;
380
381 if (cpu_architecture() < CPU_ARCH_ARMv7)
382 return;
383
384 block = cpuid_feature_extract(CPUID_EXT_ISAR0, 24);
385 if (block >= 2)
386 elf_hwcap |= HWCAP_IDIVA;
387 if (block >= 1)
388 elf_hwcap |= HWCAP_IDIVT;
389
390 /* LPAE implies atomic ldrd/strd instructions */
391 block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
392 if (block >= 5)
393 elf_hwcap |= HWCAP_LPAE;
394
395 /* check for supported v8 Crypto instructions */
396 isar5 = read_cpuid_ext(CPUID_EXT_ISAR5);
397
398 block = cpuid_feature_extract_field(isar5, 4);
399 if (block >= 2)
400 elf_hwcap2 |= HWCAP2_PMULL;
401 if (block >= 1)
402 elf_hwcap2 |= HWCAP2_AES;
403
404 block = cpuid_feature_extract_field(isar5, 8);
405 if (block >= 1)
406 elf_hwcap2 |= HWCAP2_SHA1;
407
408 block = cpuid_feature_extract_field(isar5, 12);
409 if (block >= 1)
410 elf_hwcap2 |= HWCAP2_SHA2;
411
412 block = cpuid_feature_extract_field(isar5, 16);
413 if (block >= 1)
414 elf_hwcap2 |= HWCAP2_CRC32;
415 }
416
417 static void __init elf_hwcap_fixup(void)
418 {
419 unsigned id = read_cpuid_id();
420
421 /*
422 * HWCAP_TLS is available only on 1136 r1p0 and later,
423 * see also kuser_get_tls_init.
424 */
425 if (read_cpuid_part() == ARM_CPU_PART_ARM1136 &&
426 ((id >> 20) & 3) == 0) {
427 elf_hwcap &= ~HWCAP_TLS;
428 return;
429 }
430
431 /* Verify if CPUID scheme is implemented */
432 if ((id & 0x000f0000) != 0x000f0000)
433 return;
434
435 /*
436 * If the CPU supports LDREX/STREX and LDREXB/STREXB,
437 * avoid advertising SWP; it may not be atomic with
438 * multiprocessing cores.
439 */
440 if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
441 (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
442 cpuid_feature_extract(CPUID_EXT_ISAR3, 20) >= 3))
443 elf_hwcap &= ~HWCAP_SWP;
444 }
445
446 /*
447 * cpu_init - initialise one CPU.
448 *
449 * cpu_init sets up the per-CPU stacks.
450 */
451 void notrace cpu_init(void)
452 {
453 #ifndef CONFIG_CPU_V7M
454 unsigned int cpu = smp_processor_id();
455 struct stack *stk = &stacks[cpu];
456
457 if (cpu >= NR_CPUS) {
458 pr_crit("CPU%u: bad primary CPU number\n", cpu);
459 BUG();
460 }
461
462 /*
463 * This only works on resume and secondary cores. For booting on the
464 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
465 */
466 set_my_cpu_offset(per_cpu_offset(cpu));
467
468 cpu_proc_init();
469
470 /*
471 * Define the placement constraint for the inline asm directive below.
472 * In Thumb-2, msr with an immediate value is not allowed.
473 */
474 #ifdef CONFIG_THUMB2_KERNEL
475 #define PLC "r"
476 #else
477 #define PLC "I"
478 #endif
479
480 /*
481 * setup stacks for re-entrant exception handlers
482 */
483 __asm__ (
484 "msr cpsr_c, %1\n\t"
485 "add r14, %0, %2\n\t"
486 "mov sp, r14\n\t"
487 "msr cpsr_c, %3\n\t"
488 "add r14, %0, %4\n\t"
489 "mov sp, r14\n\t"
490 "msr cpsr_c, %5\n\t"
491 "add r14, %0, %6\n\t"
492 "mov sp, r14\n\t"
493 "msr cpsr_c, %7\n\t"
494 "add r14, %0, %8\n\t"
495 "mov sp, r14\n\t"
496 "msr cpsr_c, %9"
497 :
498 : "r" (stk),
499 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
500 "I" (offsetof(struct stack, irq[0])),
501 PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
502 "I" (offsetof(struct stack, abt[0])),
503 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
504 "I" (offsetof(struct stack, und[0])),
505 PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
506 "I" (offsetof(struct stack, fiq[0])),
507 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
508 : "r14");
509 #endif
510 }
511
512 u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
513
514 void __init smp_setup_processor_id(void)
515 {
516 int i;
517 u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
518 u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
519
520 cpu_logical_map(0) = cpu;
521 for (i = 1; i < nr_cpu_ids; ++i)
522 cpu_logical_map(i) = i == cpu ? 0 : i;
523
524 /*
525 * clear __my_cpu_offset on boot CPU to avoid hang caused by
526 * using percpu variable early, for example, lockdep will
527 * access percpu variable inside lock_release
528 */
529 set_my_cpu_offset(0);
530
531 pr_info("Booting Linux on physical CPU 0x%x\n", mpidr);
532 }
533
534 struct mpidr_hash mpidr_hash;
535 #ifdef CONFIG_SMP
536 /**
537 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
538 * level in order to build a linear index from an
539 * MPIDR value. Resulting algorithm is a collision
540 * free hash carried out through shifting and ORing
541 */
542 static void __init smp_build_mpidr_hash(void)
543 {
544 u32 i, affinity;
545 u32 fs[3], bits[3], ls, mask = 0;
546 /*
547 * Pre-scan the list of MPIDRS and filter out bits that do
548 * not contribute to affinity levels, ie they never toggle.
549 */
550 for_each_possible_cpu(i)
551 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
552 pr_debug("mask of set bits 0x%x\n", mask);
553 /*
554 * Find and stash the last and first bit set at all affinity levels to
555 * check how many bits are required to represent them.
556 */
557 for (i = 0; i < 3; i++) {
558 affinity = MPIDR_AFFINITY_LEVEL(mask, i);
559 /*
560 * Find the MSB bit and LSB bits position
561 * to determine how many bits are required
562 * to express the affinity level.
563 */
564 ls = fls(affinity);
565 fs[i] = affinity ? ffs(affinity) - 1 : 0;
566 bits[i] = ls - fs[i];
567 }
568 /*
569 * An index can be created from the MPIDR by isolating the
570 * significant bits at each affinity level and by shifting
571 * them in order to compress the 24 bits values space to a
572 * compressed set of values. This is equivalent to hashing
573 * the MPIDR through shifting and ORing. It is a collision free
574 * hash though not minimal since some levels might contain a number
575 * of CPUs that is not an exact power of 2 and their bit
576 * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
577 */
578 mpidr_hash.shift_aff[0] = fs[0];
579 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
580 mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
581 (bits[1] + bits[0]);
582 mpidr_hash.mask = mask;
583 mpidr_hash.bits = bits[2] + bits[1] + bits[0];
584 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
585 mpidr_hash.shift_aff[0],
586 mpidr_hash.shift_aff[1],
587 mpidr_hash.shift_aff[2],
588 mpidr_hash.mask,
589 mpidr_hash.bits);
590 /*
591 * 4x is an arbitrary value used to warn on a hash table much bigger
592 * than expected on most systems.
593 */
594 if (mpidr_hash_size() > 4 * num_possible_cpus())
595 pr_warn("Large number of MPIDR hash buckets detected\n");
596 sync_cache_w(&mpidr_hash);
597 }
598 #endif
599
600 static void __init setup_processor(void)
601 {
602 struct proc_info_list *list;
603
604 /*
605 * locate processor in the list of supported processor
606 * types. The linker builds this table for us from the
607 * entries in arch/arm/mm/proc-*.S
608 */
609 list = lookup_processor_type(read_cpuid_id());
610 if (!list) {
611 pr_err("CPU configuration botched (ID %08x), unable to continue.\n",
612 read_cpuid_id());
613 while (1);
614 }
615
616 cpu_name = list->cpu_name;
617 __cpu_architecture = __get_cpu_architecture();
618
619 #ifdef MULTI_CPU
620 processor = *list->proc;
621 #endif
622 #ifdef MULTI_TLB
623 cpu_tlb = *list->tlb;
624 #endif
625 #ifdef MULTI_USER
626 cpu_user = *list->user;
627 #endif
628 #ifdef MULTI_CACHE
629 cpu_cache = *list->cache;
630 #endif
631
632 pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
633 cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
634 proc_arch[cpu_architecture()], get_cr());
635
636 snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
637 list->arch_name, ENDIANNESS);
638 snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
639 list->elf_name, ENDIANNESS);
640 elf_hwcap = list->elf_hwcap;
641
642 cpuid_init_hwcaps();
643
644 #ifndef CONFIG_ARM_THUMB
645 elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
646 #endif
647 #ifdef CONFIG_MMU
648 init_default_cache_policy(list->__cpu_mm_mmu_flags);
649 #endif
650 erratum_a15_798181_init();
651
652 elf_hwcap_fixup();
653
654 cacheid_init();
655 cpu_init();
656 }
657
658 void __init dump_machine_table(void)
659 {
660 const struct machine_desc *p;
661
662 early_print("Available machine support:\n\nID (hex)\tNAME\n");
663 for_each_machine_desc(p)
664 early_print("%08x\t%s\n", p->nr, p->name);
665
666 early_print("\nPlease check your kernel config and/or bootloader.\n");
667
668 while (true)
669 /* can't use cpu_relax() here as it may require MMU setup */;
670 }
671
672 int __init arm_add_memory(u64 start, u64 size)
673 {
674 u64 aligned_start;
675
676 /*
677 * Ensure that start/size are aligned to a page boundary.
678 * Size is rounded down, start is rounded up.
679 */
680 aligned_start = PAGE_ALIGN(start);
681 if (aligned_start > start + size)
682 size = 0;
683 else
684 size -= aligned_start - start;
685
686 #ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
687 if (aligned_start > ULONG_MAX) {
688 pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
689 (long long)start);
690 return -EINVAL;
691 }
692
693 if (aligned_start + size > ULONG_MAX) {
694 pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n",
695 (long long)start);
696 /*
697 * To ensure bank->start + bank->size is representable in
698 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
699 * This means we lose a page after masking.
700 */
701 size = ULONG_MAX - aligned_start;
702 }
703 #endif
704
705 if (aligned_start < PHYS_OFFSET) {
706 if (aligned_start + size <= PHYS_OFFSET) {
707 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
708 aligned_start, aligned_start + size);
709 return -EINVAL;
710 }
711
712 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
713 aligned_start, (u64)PHYS_OFFSET);
714
715 size -= PHYS_OFFSET - aligned_start;
716 aligned_start = PHYS_OFFSET;
717 }
718
719 start = aligned_start;
720 size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
721
722 /*
723 * Check whether this memory region has non-zero size or
724 * invalid node number.
725 */
726 if (size == 0)
727 return -EINVAL;
728
729 memblock_add(start, size);
730 return 0;
731 }
732
733 /*
734 * Pick out the memory size. We look for mem=size@start,
735 * where start and size are "size[KkMm]"
736 */
737
738 static int __init early_mem(char *p)
739 {
740 static int usermem __initdata = 0;
741 u64 size;
742 u64 start;
743 char *endp;
744
745 /*
746 * If the user specifies memory size, we
747 * blow away any automatically generated
748 * size.
749 */
750 if (usermem == 0) {
751 usermem = 1;
752 memblock_remove(memblock_start_of_DRAM(),
753 memblock_end_of_DRAM() - memblock_start_of_DRAM());
754 }
755
756 start = PHYS_OFFSET;
757 size = memparse(p, &endp);
758 if (*endp == '@')
759 start = memparse(endp + 1, NULL);
760
761 arm_add_memory(start, size);
762
763 return 0;
764 }
765 early_param("mem", early_mem);
766
767 static void __init request_standard_resources(const struct machine_desc *mdesc)
768 {
769 struct memblock_region *region;
770 struct resource *res;
771
772 kernel_code.start = virt_to_phys(_text);
773 kernel_code.end = virt_to_phys(_etext - 1);
774 kernel_data.start = virt_to_phys(_sdata);
775 kernel_data.end = virt_to_phys(_end - 1);
776
777 for_each_memblock(memory, region) {
778 res = memblock_virt_alloc(sizeof(*res), 0);
779 res->name = "System RAM";
780 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
781 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
782 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
783
784 request_resource(&iomem_resource, res);
785
786 if (kernel_code.start >= res->start &&
787 kernel_code.end <= res->end)
788 request_resource(res, &kernel_code);
789 if (kernel_data.start >= res->start &&
790 kernel_data.end <= res->end)
791 request_resource(res, &kernel_data);
792 }
793
794 if (mdesc->video_start) {
795 video_ram.start = mdesc->video_start;
796 video_ram.end = mdesc->video_end;
797 request_resource(&iomem_resource, &video_ram);
798 }
799
800 /*
801 * Some machines don't have the possibility of ever
802 * possessing lp0, lp1 or lp2
803 */
804 if (mdesc->reserve_lp0)
805 request_resource(&ioport_resource, &lp0);
806 if (mdesc->reserve_lp1)
807 request_resource(&ioport_resource, &lp1);
808 if (mdesc->reserve_lp2)
809 request_resource(&ioport_resource, &lp2);
810 }
811
812 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
813 struct screen_info screen_info = {
814 .orig_video_lines = 30,
815 .orig_video_cols = 80,
816 .orig_video_mode = 0,
817 .orig_video_ega_bx = 0,
818 .orig_video_isVGA = 1,
819 .orig_video_points = 8
820 };
821 #endif
822
823 static int __init customize_machine(void)
824 {
825 /*
826 * customizes platform devices, or adds new ones
827 * On DT based machines, we fall back to populating the
828 * machine from the device tree, if no callback is provided,
829 * otherwise we would always need an init_machine callback.
830 */
831 of_iommu_init();
832 if (machine_desc->init_machine)
833 machine_desc->init_machine();
834 #ifdef CONFIG_OF
835 else
836 of_platform_populate(NULL, of_default_bus_match_table,
837 NULL, NULL);
838 #endif
839 return 0;
840 }
841 arch_initcall(customize_machine);
842
843 static int __init init_machine_late(void)
844 {
845 if (machine_desc->init_late)
846 machine_desc->init_late();
847 return 0;
848 }
849 late_initcall(init_machine_late);
850
851 #ifdef CONFIG_KEXEC
852 static inline unsigned long long get_total_mem(void)
853 {
854 unsigned long total;
855
856 total = max_low_pfn - min_low_pfn;
857 return total << PAGE_SHIFT;
858 }
859
860 /**
861 * reserve_crashkernel() - reserves memory are for crash kernel
862 *
863 * This function reserves memory area given in "crashkernel=" kernel command
864 * line parameter. The memory reserved is used by a dump capture kernel when
865 * primary kernel is crashing.
866 */
867 static void __init reserve_crashkernel(void)
868 {
869 unsigned long long crash_size, crash_base;
870 unsigned long long total_mem;
871 int ret;
872
873 total_mem = get_total_mem();
874 ret = parse_crashkernel(boot_command_line, total_mem,
875 &crash_size, &crash_base);
876 if (ret)
877 return;
878
879 ret = memblock_reserve(crash_base, crash_size);
880 if (ret < 0) {
881 pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n",
882 (unsigned long)crash_base);
883 return;
884 }
885
886 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
887 (unsigned long)(crash_size >> 20),
888 (unsigned long)(crash_base >> 20),
889 (unsigned long)(total_mem >> 20));
890
891 crashk_res.start = crash_base;
892 crashk_res.end = crash_base + crash_size - 1;
893 insert_resource(&iomem_resource, &crashk_res);
894 }
895 #else
896 static inline void reserve_crashkernel(void) {}
897 #endif /* CONFIG_KEXEC */
898
899 void __init hyp_mode_check(void)
900 {
901 #ifdef CONFIG_ARM_VIRT_EXT
902 sync_boot_mode();
903
904 if (is_hyp_mode_available()) {
905 pr_info("CPU: All CPU(s) started in HYP mode.\n");
906 pr_info("CPU: Virtualization extensions available.\n");
907 } else if (is_hyp_mode_mismatched()) {
908 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
909 __boot_cpu_mode & MODE_MASK);
910 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
911 } else
912 pr_info("CPU: All CPU(s) started in SVC mode.\n");
913 #endif
914 }
915
916 void __init setup_arch(char **cmdline_p)
917 {
918 const struct machine_desc *mdesc;
919
920 setup_processor();
921 mdesc = setup_machine_fdt(__atags_pointer);
922 if (!mdesc)
923 mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
924 machine_desc = mdesc;
925 machine_name = mdesc->name;
926 dump_stack_set_arch_desc("%s", mdesc->name);
927
928 if (mdesc->reboot_mode != REBOOT_HARD)
929 reboot_mode = mdesc->reboot_mode;
930
931 init_mm.start_code = (unsigned long) _text;
932 init_mm.end_code = (unsigned long) _etext;
933 init_mm.end_data = (unsigned long) _edata;
934 init_mm.brk = (unsigned long) _end;
935
936 /* populate cmd_line too for later use, preserving boot_command_line */
937 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
938 *cmdline_p = cmd_line;
939
940 parse_early_param();
941
942 early_paging_init(mdesc, lookup_processor_type(read_cpuid_id()));
943 setup_dma_zone(mdesc);
944 sanity_check_meminfo();
945 arm_memblock_init(mdesc);
946
947 paging_init(mdesc);
948 request_standard_resources(mdesc);
949
950 if (mdesc->restart)
951 arm_pm_restart = mdesc->restart;
952
953 unflatten_device_tree();
954
955 arm_dt_init_cpu_maps();
956 psci_init();
957 #ifdef CONFIG_SMP
958 if (is_smp()) {
959 if (!mdesc->smp_init || !mdesc->smp_init()) {
960 if (psci_smp_available())
961 smp_set_ops(&psci_smp_ops);
962 else if (mdesc->smp)
963 smp_set_ops(mdesc->smp);
964 }
965 smp_init_cpus();
966 smp_build_mpidr_hash();
967 }
968 #endif
969
970 if (!is_smp())
971 hyp_mode_check();
972
973 reserve_crashkernel();
974
975 #ifdef CONFIG_MULTI_IRQ_HANDLER
976 handle_arch_irq = mdesc->handle_irq;
977 #endif
978
979 #ifdef CONFIG_VT
980 #if defined(CONFIG_VGA_CONSOLE)
981 conswitchp = &vga_con;
982 #elif defined(CONFIG_DUMMY_CONSOLE)
983 conswitchp = &dummy_con;
984 #endif
985 #endif
986
987 if (mdesc->init_early)
988 mdesc->init_early();
989 }
990
991
992 static int __init topology_init(void)
993 {
994 int cpu;
995
996 for_each_possible_cpu(cpu) {
997 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
998 cpuinfo->cpu.hotpluggable = 1;
999 register_cpu(&cpuinfo->cpu, cpu);
1000 }
1001
1002 return 0;
1003 }
1004 subsys_initcall(topology_init);
1005
1006 #ifdef CONFIG_HAVE_PROC_CPU
1007 static int __init proc_cpu_init(void)
1008 {
1009 struct proc_dir_entry *res;
1010
1011 res = proc_mkdir("cpu", NULL);
1012 if (!res)
1013 return -ENOMEM;
1014 return 0;
1015 }
1016 fs_initcall(proc_cpu_init);
1017 #endif
1018
1019 static const char *hwcap_str[] = {
1020 "swp",
1021 "half",
1022 "thumb",
1023 "26bit",
1024 "fastmult",
1025 "fpa",
1026 "vfp",
1027 "edsp",
1028 "java",
1029 "iwmmxt",
1030 "crunch",
1031 "thumbee",
1032 "neon",
1033 "vfpv3",
1034 "vfpv3d16",
1035 "tls",
1036 "vfpv4",
1037 "idiva",
1038 "idivt",
1039 "vfpd32",
1040 "lpae",
1041 "evtstrm",
1042 NULL
1043 };
1044
1045 static const char *hwcap2_str[] = {
1046 "aes",
1047 "pmull",
1048 "sha1",
1049 "sha2",
1050 "crc32",
1051 NULL
1052 };
1053
1054 static int c_show(struct seq_file *m, void *v)
1055 {
1056 int i, j;
1057 u32 cpuid;
1058
1059 for_each_online_cpu(i) {
1060 /*
1061 * glibc reads /proc/cpuinfo to determine the number of
1062 * online processors, looking for lines beginning with
1063 * "processor". Give glibc what it expects.
1064 */
1065 seq_printf(m, "processor\t: %d\n", i);
1066 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
1067 seq_printf(m, "model name\t: %s rev %d (%s)\n",
1068 cpu_name, cpuid & 15, elf_platform);
1069
1070 #if defined(CONFIG_SMP)
1071 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1072 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1073 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1074 #else
1075 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1076 loops_per_jiffy / (500000/HZ),
1077 (loops_per_jiffy / (5000/HZ)) % 100);
1078 #endif
1079 /* dump out the processor features */
1080 seq_puts(m, "Features\t: ");
1081
1082 for (j = 0; hwcap_str[j]; j++)
1083 if (elf_hwcap & (1 << j))
1084 seq_printf(m, "%s ", hwcap_str[j]);
1085
1086 for (j = 0; hwcap2_str[j]; j++)
1087 if (elf_hwcap2 & (1 << j))
1088 seq_printf(m, "%s ", hwcap2_str[j]);
1089
1090 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
1091 seq_printf(m, "CPU architecture: %s\n",
1092 proc_arch[cpu_architecture()]);
1093
1094 if ((cpuid & 0x0008f000) == 0x00000000) {
1095 /* pre-ARM7 */
1096 seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
1097 } else {
1098 if ((cpuid & 0x0008f000) == 0x00007000) {
1099 /* ARM7 */
1100 seq_printf(m, "CPU variant\t: 0x%02x\n",
1101 (cpuid >> 16) & 127);
1102 } else {
1103 /* post-ARM7 */
1104 seq_printf(m, "CPU variant\t: 0x%x\n",
1105 (cpuid >> 20) & 15);
1106 }
1107 seq_printf(m, "CPU part\t: 0x%03x\n",
1108 (cpuid >> 4) & 0xfff);
1109 }
1110 seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
1111 }
1112
1113 seq_printf(m, "Hardware\t: %s\n", machine_name);
1114 seq_printf(m, "Revision\t: %04x\n", system_rev);
1115 seq_printf(m, "Serial\t\t: %08x%08x\n",
1116 system_serial_high, system_serial_low);
1117
1118 return 0;
1119 }
1120
1121 static void *c_start(struct seq_file *m, loff_t *pos)
1122 {
1123 return *pos < 1 ? (void *)1 : NULL;
1124 }
1125
1126 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1127 {
1128 ++*pos;
1129 return NULL;
1130 }
1131
1132 static void c_stop(struct seq_file *m, void *v)
1133 {
1134 }
1135
1136 const struct seq_operations cpuinfo_op = {
1137 .start = c_start,
1138 .next = c_next,
1139 .stop = c_stop,
1140 .show = c_show
1141 };
This page took 0.074751 seconds and 5 git commands to generate.