ARM: 8319/1: advertise availability of v8 Crypto instructions
[deliverable/linux.git] / arch / arm / kernel / setup.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/kernel/setup.c
3 *
4 * Copyright (C) 1995-2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
ecea4ab6 10#include <linux/export.h>
1da177e4
LT
11#include <linux/kernel.h>
12#include <linux/stddef.h>
13#include <linux/ioport.h>
14#include <linux/delay.h>
15#include <linux/utsname.h>
16#include <linux/initrd.h>
17#include <linux/console.h>
18#include <linux/bootmem.h>
19#include <linux/seq_file.h>
894673ee 20#include <linux/screen_info.h>
af4dda73 21#include <linux/of_iommu.h>
883a106b 22#include <linux/of_platform.h>
1da177e4 23#include <linux/init.h>
3c57fb43 24#include <linux/kexec.h>
93c02ab4 25#include <linux/of_fdt.h>
1da177e4
LT
26#include <linux/cpu.h>
27#include <linux/interrupt.h>
7bbb7940 28#include <linux/smp.h>
e119bfff 29#include <linux/proc_fs.h>
2778f620 30#include <linux/memblock.h>
2ecccf90
DM
31#include <linux/bug.h>
32#include <linux/compiler.h>
27a3f0e9 33#include <linux/sort.h>
1da177e4 34
b86040a5 35#include <asm/unified.h>
15d07dc9 36#include <asm/cp15.h>
1da177e4 37#include <asm/cpu.h>
0ba8b9b2 38#include <asm/cputype.h>
1da177e4 39#include <asm/elf.h>
1da177e4 40#include <asm/procinfo.h>
05774088 41#include <asm/psci.h>
37efe642 42#include <asm/sections.h>
1da177e4 43#include <asm/setup.h>
f00ec48f 44#include <asm/smp_plat.h>
1da177e4
LT
45#include <asm/mach-types.h>
46#include <asm/cacheflush.h>
46097c7d 47#include <asm/cachetype.h>
1da177e4
LT
48#include <asm/tlbflush.h>
49
93c02ab4 50#include <asm/prom.h>
1da177e4
LT
51#include <asm/mach/arch.h>
52#include <asm/mach/irq.h>
53#include <asm/mach/time.h>
9f97da78
DH
54#include <asm/system_info.h>
55#include <asm/system_misc.h>
5cbad0eb 56#include <asm/traps.h>
bff595c1 57#include <asm/unwind.h>
1c16d242 58#include <asm/memblock.h>
4588c34d 59#include <asm/virt.h>
1da177e4 60
4cd9d6f7 61#include "atags.h"
0fc1c832 62
1da177e4
LT
63
64#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
65char fpe_type[8];
66
67static int __init fpe_setup(char *line)
68{
69 memcpy(fpe_type, line, 8);
70 return 1;
71}
72
73__setup("fpe=", fpe_setup);
74#endif
75
ca8f0b0a 76extern void init_default_cache_policy(unsigned long);
ff69a4c8 77extern void paging_init(const struct machine_desc *desc);
a77e0c7b
SS
78extern void early_paging_init(const struct machine_desc *,
79 struct proc_info_list *);
0371d3f7 80extern void sanity_check_meminfo(void);
16d6d5b0 81extern enum reboot_mode reboot_mode;
ff69a4c8 82extern void setup_dma_zone(const struct machine_desc *desc);
1da177e4
LT
83
84unsigned int processor_id;
c18f6581 85EXPORT_SYMBOL(processor_id);
0385ebc0 86unsigned int __machine_arch_type __read_mostly;
1da177e4 87EXPORT_SYMBOL(__machine_arch_type);
0385ebc0 88unsigned int cacheid __read_mostly;
c0e95878 89EXPORT_SYMBOL(cacheid);
1da177e4 90
9d20fdd5
BG
91unsigned int __atags_pointer __initdata;
92
1da177e4
LT
93unsigned int system_rev;
94EXPORT_SYMBOL(system_rev);
95
96unsigned int system_serial_low;
97EXPORT_SYMBOL(system_serial_low);
98
99unsigned int system_serial_high;
100EXPORT_SYMBOL(system_serial_high);
101
0385ebc0 102unsigned int elf_hwcap __read_mostly;
1da177e4
LT
103EXPORT_SYMBOL(elf_hwcap);
104
b342ea4e
AB
105unsigned int elf_hwcap2 __read_mostly;
106EXPORT_SYMBOL(elf_hwcap2);
107
1da177e4
LT
108
109#ifdef MULTI_CPU
0385ebc0 110struct processor processor __read_mostly;
1da177e4
LT
111#endif
112#ifdef MULTI_TLB
0385ebc0 113struct cpu_tlb_fns cpu_tlb __read_mostly;
1da177e4
LT
114#endif
115#ifdef MULTI_USER
0385ebc0 116struct cpu_user_fns cpu_user __read_mostly;
1da177e4
LT
117#endif
118#ifdef MULTI_CACHE
0385ebc0 119struct cpu_cache_fns cpu_cache __read_mostly;
1da177e4 120#endif
953233dc 121#ifdef CONFIG_OUTER_CACHE
0385ebc0 122struct outer_cache_fns outer_cache __read_mostly;
6c09f09d 123EXPORT_SYMBOL(outer_cache);
953233dc 124#endif
1da177e4 125
2ecccf90
DM
126/*
127 * Cached cpu_architecture() result for use by assembler code.
128 * C code should use the cpu_architecture() function instead of accessing this
129 * variable directly.
130 */
131int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
132
ccea7a19
RK
133struct stack {
134 u32 irq[3];
135 u32 abt[3];
136 u32 und[3];
c0e7f7ee 137 u32 fiq[3];
ccea7a19
RK
138} ____cacheline_aligned;
139
55bdd694 140#ifndef CONFIG_CPU_V7M
ccea7a19 141static struct stack stacks[NR_CPUS];
55bdd694 142#endif
ccea7a19 143
1da177e4
LT
144char elf_platform[ELF_PLATFORM_SIZE];
145EXPORT_SYMBOL(elf_platform);
146
1da177e4
LT
147static const char *cpu_name;
148static const char *machine_name;
48ab7e09 149static char __initdata cmd_line[COMMAND_LINE_SIZE];
ff69a4c8 150const struct machine_desc *machine_desc __initdata;
1da177e4 151
1da177e4
LT
152static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
153#define ENDIANNESS ((char)endian_test.l)
154
155DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
156
157/*
158 * Standard memory resources
159 */
160static struct resource mem_res[] = {
740e518e
GKH
161 {
162 .name = "Video RAM",
163 .start = 0,
164 .end = 0,
165 .flags = IORESOURCE_MEM
166 },
167 {
a36d8e5b 168 .name = "Kernel code",
740e518e
GKH
169 .start = 0,
170 .end = 0,
171 .flags = IORESOURCE_MEM
172 },
173 {
174 .name = "Kernel data",
175 .start = 0,
176 .end = 0,
177 .flags = IORESOURCE_MEM
178 }
1da177e4
LT
179};
180
181#define video_ram mem_res[0]
182#define kernel_code mem_res[1]
183#define kernel_data mem_res[2]
184
185static struct resource io_res[] = {
740e518e
GKH
186 {
187 .name = "reserved",
188 .start = 0x3bc,
189 .end = 0x3be,
190 .flags = IORESOURCE_IO | IORESOURCE_BUSY
191 },
192 {
193 .name = "reserved",
194 .start = 0x378,
195 .end = 0x37f,
196 .flags = IORESOURCE_IO | IORESOURCE_BUSY
197 },
198 {
199 .name = "reserved",
200 .start = 0x278,
201 .end = 0x27f,
202 .flags = IORESOURCE_IO | IORESOURCE_BUSY
203 }
1da177e4
LT
204};
205
206#define lp0 io_res[0]
207#define lp1 io_res[1]
208#define lp2 io_res[2]
209
1da177e4
LT
210static const char *proc_arch[] = {
211 "undefined/unknown",
212 "3",
213 "4",
214 "4T",
215 "5",
216 "5T",
217 "5TE",
218 "5TEJ",
219 "6TEJ",
6b090a25 220 "7",
55bdd694 221 "7M",
1da177e4
LT
222 "?(12)",
223 "?(13)",
224 "?(14)",
225 "?(15)",
226 "?(16)",
227 "?(17)",
228};
229
55bdd694
CM
230#ifdef CONFIG_CPU_V7M
231static int __get_cpu_architecture(void)
232{
233 return CPU_ARCH_ARMv7M;
234}
235#else
2ecccf90 236static int __get_cpu_architecture(void)
1da177e4
LT
237{
238 int cpu_arch;
239
0ba8b9b2 240 if ((read_cpuid_id() & 0x0008f000) == 0) {
1da177e4 241 cpu_arch = CPU_ARCH_UNKNOWN;
0ba8b9b2
RK
242 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
243 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
244 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
245 cpu_arch = (read_cpuid_id() >> 16) & 7;
1da177e4
LT
246 if (cpu_arch)
247 cpu_arch += CPU_ARCH_ARMv3;
0ba8b9b2 248 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
180005c4
CM
249 unsigned int mmfr0;
250
251 /* Revised CPUID format. Read the Memory Model Feature
252 * Register 0 and check for VMSAv7 or PMSAv7 */
253 asm("mrc p15, 0, %0, c0, c1, 4"
254 : "=r" (mmfr0));
315cfe78
CM
255 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
256 (mmfr0 & 0x000000f0) >= 0x00000030)
180005c4
CM
257 cpu_arch = CPU_ARCH_ARMv7;
258 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
259 (mmfr0 & 0x000000f0) == 0x00000020)
260 cpu_arch = CPU_ARCH_ARMv6;
261 else
262 cpu_arch = CPU_ARCH_UNKNOWN;
263 } else
264 cpu_arch = CPU_ARCH_UNKNOWN;
1da177e4
LT
265
266 return cpu_arch;
267}
55bdd694 268#endif
1da177e4 269
2ecccf90
DM
270int __pure cpu_architecture(void)
271{
272 BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
273
274 return __cpu_architecture;
275}
276
8925ec4c
WD
277static int cpu_has_aliasing_icache(unsigned int arch)
278{
279 int aliasing_icache;
280 unsigned int id_reg, num_sets, line_size;
281
7f94e9cc
WD
282 /* PIPT caches never alias. */
283 if (icache_is_pipt())
284 return 0;
285
8925ec4c
WD
286 /* arch specifies the register format */
287 switch (arch) {
288 case CPU_ARCH_ARMv7:
5fb31a96
LW
289 asm("mcr p15, 2, %0, c0, c0, 0 @ set CSSELR"
290 : /* No output operands */
8925ec4c 291 : "r" (1));
5fb31a96
LW
292 isb();
293 asm("mrc p15, 1, %0, c0, c0, 0 @ read CCSIDR"
294 : "=r" (id_reg));
8925ec4c
WD
295 line_size = 4 << ((id_reg & 0x7) + 2);
296 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
297 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
298 break;
299 case CPU_ARCH_ARMv6:
300 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
301 break;
302 default:
303 /* I-cache aliases will be handled by D-cache aliasing code */
304 aliasing_icache = 0;
305 }
306
307 return aliasing_icache;
308}
309
c0e95878
RK
310static void __init cacheid_init(void)
311{
c0e95878
RK
312 unsigned int arch = cpu_architecture();
313
55bdd694
CM
314 if (arch == CPU_ARCH_ARMv7M) {
315 cacheid = 0;
316 } else if (arch >= CPU_ARCH_ARMv6) {
ac52e83f 317 unsigned int cachetype = read_cpuid_cachetype();
b57ee99f
CM
318 if ((cachetype & (7 << 29)) == 4 << 29) {
319 /* ARMv7 register format */
72dc53ac 320 arch = CPU_ARCH_ARMv7;
b57ee99f 321 cacheid = CACHEID_VIPT_NONALIASING;
7f94e9cc
WD
322 switch (cachetype & (3 << 14)) {
323 case (1 << 14):
b57ee99f 324 cacheid |= CACHEID_ASID_TAGGED;
7f94e9cc
WD
325 break;
326 case (3 << 14):
327 cacheid |= CACHEID_PIPT;
328 break;
329 }
8925ec4c 330 } else {
72dc53ac
WD
331 arch = CPU_ARCH_ARMv6;
332 if (cachetype & (1 << 23))
333 cacheid = CACHEID_VIPT_ALIASING;
334 else
335 cacheid = CACHEID_VIPT_NONALIASING;
8925ec4c 336 }
72dc53ac
WD
337 if (cpu_has_aliasing_icache(arch))
338 cacheid |= CACHEID_VIPT_I_ALIASING;
c0e95878
RK
339 } else {
340 cacheid = CACHEID_VIVT;
341 }
2b4ae1f1 342
1b0f6681 343 pr_info("CPU: %s data cache, %s instruction cache\n",
2b4ae1f1
RK
344 cache_is_vivt() ? "VIVT" :
345 cache_is_vipt_aliasing() ? "VIPT aliasing" :
7f94e9cc 346 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
2b4ae1f1
RK
347 cache_is_vivt() ? "VIVT" :
348 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
8925ec4c 349 icache_is_vipt_aliasing() ? "VIPT aliasing" :
7f94e9cc 350 icache_is_pipt() ? "PIPT" :
2b4ae1f1 351 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
c0e95878
RK
352}
353
1da177e4
LT
354/*
355 * These functions re-use the assembly code in head.S, which
356 * already provide the required functionality.
357 */
0f44ba1d 358extern struct proc_info_list *lookup_processor_type(unsigned int);
6fc31d54 359
93c02ab4 360void __init early_print(const char *str, ...)
6fc31d54
RK
361{
362 extern void printascii(const char *);
363 char buf[256];
364 va_list ap;
365
366 va_start(ap, str);
367 vsnprintf(buf, sizeof(buf), str, ap);
368 va_end(ap);
369
370#ifdef CONFIG_DEBUG_LL
371 printascii(buf);
372#endif
373 printk("%s", buf);
374}
375
8164f7af
SB
376static void __init cpuid_init_hwcaps(void)
377{
b8c9592b 378 int block;
a092aedb 379 u32 isar5;
8164f7af
SB
380
381 if (cpu_architecture() < CPU_ARCH_ARMv7)
382 return;
383
b8c9592b
AB
384 block = cpuid_feature_extract(CPUID_EXT_ISAR0, 24);
385 if (block >= 2)
8164f7af 386 elf_hwcap |= HWCAP_IDIVA;
b8c9592b 387 if (block >= 1)
8164f7af 388 elf_hwcap |= HWCAP_IDIVT;
a469abd0
WD
389
390 /* LPAE implies atomic ldrd/strd instructions */
b8c9592b
AB
391 block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
392 if (block >= 5)
a469abd0 393 elf_hwcap |= HWCAP_LPAE;
a092aedb
AB
394
395 /* check for supported v8 Crypto instructions */
396 isar5 = read_cpuid_ext(CPUID_EXT_ISAR5);
397
398 block = cpuid_feature_extract_field(isar5, 4);
399 if (block >= 2)
400 elf_hwcap2 |= HWCAP2_PMULL;
401 if (block >= 1)
402 elf_hwcap2 |= HWCAP2_AES;
403
404 block = cpuid_feature_extract_field(isar5, 8);
405 if (block >= 1)
406 elf_hwcap2 |= HWCAP2_SHA1;
407
408 block = cpuid_feature_extract_field(isar5, 12);
409 if (block >= 1)
410 elf_hwcap2 |= HWCAP2_SHA2;
411
412 block = cpuid_feature_extract_field(isar5, 16);
413 if (block >= 1)
414 elf_hwcap2 |= HWCAP2_CRC32;
8164f7af
SB
415}
416
58171bf2 417static void __init elf_hwcap_fixup(void)
f159f4ed 418{
58171bf2 419 unsigned id = read_cpuid_id();
f159f4ed
TL
420
421 /*
422 * HWCAP_TLS is available only on 1136 r1p0 and later,
423 * see also kuser_get_tls_init.
424 */
58171bf2
RK
425 if (read_cpuid_part() == ARM_CPU_PART_ARM1136 &&
426 ((id >> 20) & 3) == 0) {
f159f4ed 427 elf_hwcap &= ~HWCAP_TLS;
58171bf2
RK
428 return;
429 }
430
431 /* Verify if CPUID scheme is implemented */
432 if ((id & 0x000f0000) != 0x000f0000)
433 return;
434
435 /*
436 * If the CPU supports LDREX/STREX and LDREXB/STREXB,
437 * avoid advertising SWP; it may not be atomic with
438 * multiprocessing cores.
439 */
b8c9592b
AB
440 if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
441 (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
442 cpuid_feature_extract(CPUID_EXT_ISAR3, 20) >= 3))
58171bf2 443 elf_hwcap &= ~HWCAP_SWP;
f159f4ed
TL
444}
445
ccea7a19
RK
446/*
447 * cpu_init - initialise one CPU.
448 *
90f1e084 449 * cpu_init sets up the per-CPU stacks.
ccea7a19 450 */
1783d457 451void notrace cpu_init(void)
ccea7a19 452{
55bdd694 453#ifndef CONFIG_CPU_V7M
ccea7a19
RK
454 unsigned int cpu = smp_processor_id();
455 struct stack *stk = &stacks[cpu];
456
457 if (cpu >= NR_CPUS) {
1b0f6681 458 pr_crit("CPU%u: bad primary CPU number\n", cpu);
ccea7a19
RK
459 BUG();
460 }
461
14318efb
RH
462 /*
463 * This only works on resume and secondary cores. For booting on the
464 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
465 */
466 set_my_cpu_offset(per_cpu_offset(cpu));
467
b69874e4
RK
468 cpu_proc_init();
469
b86040a5
CM
470 /*
471 * Define the placement constraint for the inline asm directive below.
472 * In Thumb-2, msr with an immediate value is not allowed.
473 */
474#ifdef CONFIG_THUMB2_KERNEL
475#define PLC "r"
476#else
477#define PLC "I"
478#endif
479
ccea7a19
RK
480 /*
481 * setup stacks for re-entrant exception handlers
482 */
483 __asm__ (
484 "msr cpsr_c, %1\n\t"
b86040a5
CM
485 "add r14, %0, %2\n\t"
486 "mov sp, r14\n\t"
ccea7a19 487 "msr cpsr_c, %3\n\t"
b86040a5
CM
488 "add r14, %0, %4\n\t"
489 "mov sp, r14\n\t"
ccea7a19 490 "msr cpsr_c, %5\n\t"
b86040a5
CM
491 "add r14, %0, %6\n\t"
492 "mov sp, r14\n\t"
c0e7f7ee
DT
493 "msr cpsr_c, %7\n\t"
494 "add r14, %0, %8\n\t"
495 "mov sp, r14\n\t"
496 "msr cpsr_c, %9"
ccea7a19
RK
497 :
498 : "r" (stk),
b86040a5 499 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
ccea7a19 500 "I" (offsetof(struct stack, irq[0])),
b86040a5 501 PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
ccea7a19 502 "I" (offsetof(struct stack, abt[0])),
b86040a5 503 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
ccea7a19 504 "I" (offsetof(struct stack, und[0])),
c0e7f7ee
DT
505 PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
506 "I" (offsetof(struct stack, fiq[0])),
b86040a5 507 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
aaaa3f9e 508 : "r14");
55bdd694 509#endif
ccea7a19
RK
510}
511
18d7f152 512u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
eb50439b
WD
513
514void __init smp_setup_processor_id(void)
515{
516 int i;
cb8cf4f8
LP
517 u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
518 u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
eb50439b
WD
519
520 cpu_logical_map(0) = cpu;
cb8cf4f8 521 for (i = 1; i < nr_cpu_ids; ++i)
eb50439b
WD
522 cpu_logical_map(i) = i == cpu ? 0 : i;
523
9394c1c6
ML
524 /*
525 * clear __my_cpu_offset on boot CPU to avoid hang caused by
526 * using percpu variable early, for example, lockdep will
527 * access percpu variable inside lock_release
528 */
529 set_my_cpu_offset(0);
530
1b0f6681 531 pr_info("Booting Linux on physical CPU 0x%x\n", mpidr);
eb50439b
WD
532}
533
8cf72172
LP
534struct mpidr_hash mpidr_hash;
535#ifdef CONFIG_SMP
536/**
537 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
538 * level in order to build a linear index from an
539 * MPIDR value. Resulting algorithm is a collision
540 * free hash carried out through shifting and ORing
541 */
542static void __init smp_build_mpidr_hash(void)
543{
544 u32 i, affinity;
545 u32 fs[3], bits[3], ls, mask = 0;
546 /*
547 * Pre-scan the list of MPIDRS and filter out bits that do
548 * not contribute to affinity levels, ie they never toggle.
549 */
550 for_each_possible_cpu(i)
551 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
552 pr_debug("mask of set bits 0x%x\n", mask);
553 /*
554 * Find and stash the last and first bit set at all affinity levels to
555 * check how many bits are required to represent them.
556 */
557 for (i = 0; i < 3; i++) {
558 affinity = MPIDR_AFFINITY_LEVEL(mask, i);
559 /*
560 * Find the MSB bit and LSB bits position
561 * to determine how many bits are required
562 * to express the affinity level.
563 */
564 ls = fls(affinity);
565 fs[i] = affinity ? ffs(affinity) - 1 : 0;
566 bits[i] = ls - fs[i];
567 }
568 /*
569 * An index can be created from the MPIDR by isolating the
570 * significant bits at each affinity level and by shifting
571 * them in order to compress the 24 bits values space to a
572 * compressed set of values. This is equivalent to hashing
573 * the MPIDR through shifting and ORing. It is a collision free
574 * hash though not minimal since some levels might contain a number
575 * of CPUs that is not an exact power of 2 and their bit
576 * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
577 */
578 mpidr_hash.shift_aff[0] = fs[0];
579 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
580 mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
581 (bits[1] + bits[0]);
582 mpidr_hash.mask = mask;
583 mpidr_hash.bits = bits[2] + bits[1] + bits[0];
584 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
585 mpidr_hash.shift_aff[0],
586 mpidr_hash.shift_aff[1],
587 mpidr_hash.shift_aff[2],
588 mpidr_hash.mask,
589 mpidr_hash.bits);
590 /*
591 * 4x is an arbitrary value used to warn on a hash table much bigger
592 * than expected on most systems.
593 */
594 if (mpidr_hash_size() > 4 * num_possible_cpus())
595 pr_warn("Large number of MPIDR hash buckets detected\n");
596 sync_cache_w(&mpidr_hash);
597}
598#endif
599
b69874e4
RK
600static void __init setup_processor(void)
601{
602 struct proc_info_list *list;
603
604 /*
605 * locate processor in the list of supported processor
606 * types. The linker builds this table for us from the
607 * entries in arch/arm/mm/proc-*.S
608 */
609 list = lookup_processor_type(read_cpuid_id());
610 if (!list) {
1b0f6681
OJ
611 pr_err("CPU configuration botched (ID %08x), unable to continue.\n",
612 read_cpuid_id());
b69874e4
RK
613 while (1);
614 }
615
616 cpu_name = list->cpu_name;
2ecccf90 617 __cpu_architecture = __get_cpu_architecture();
b69874e4
RK
618
619#ifdef MULTI_CPU
620 processor = *list->proc;
621#endif
622#ifdef MULTI_TLB
623 cpu_tlb = *list->tlb;
624#endif
625#ifdef MULTI_USER
626 cpu_user = *list->user;
627#endif
628#ifdef MULTI_CACHE
629 cpu_cache = *list->cache;
630#endif
631
1b0f6681
OJ
632 pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
633 cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
4585eaff 634 proc_arch[cpu_architecture()], get_cr());
b69874e4 635
a34dbfb0
WD
636 snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
637 list->arch_name, ENDIANNESS);
638 snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
639 list->elf_name, ENDIANNESS);
b69874e4 640 elf_hwcap = list->elf_hwcap;
8164f7af
SB
641
642 cpuid_init_hwcaps();
643
b69874e4 644#ifndef CONFIG_ARM_THUMB
c40e3641 645 elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
b69874e4 646#endif
ca8f0b0a
RK
647#ifdef CONFIG_MMU
648 init_default_cache_policy(list->__cpu_mm_mmu_flags);
649#endif
92871b94
RH
650 erratum_a15_798181_init();
651
58171bf2 652 elf_hwcap_fixup();
b69874e4
RK
653
654 cacheid_init();
655 cpu_init();
656}
657
93c02ab4 658void __init dump_machine_table(void)
1da177e4 659{
ff69a4c8 660 const struct machine_desc *p;
1da177e4 661
6291319d
GL
662 early_print("Available machine support:\n\nID (hex)\tNAME\n");
663 for_each_machine_desc(p)
dce72dd0 664 early_print("%08x\t%s\n", p->nr, p->name);
1da177e4 665
dce72dd0 666 early_print("\nPlease check your kernel config and/or bootloader.\n");
1da177e4 667
dce72dd0
NP
668 while (true)
669 /* can't use cpu_relax() here as it may require MMU setup */;
1da177e4
LT
670}
671
6a5014aa 672int __init arm_add_memory(u64 start, u64 size)
3a669411 673{
6d7d5da7 674 u64 aligned_start;
4b5f32ce 675
3a669411
RK
676 /*
677 * Ensure that start/size are aligned to a page boundary.
909ba297 678 * Size is rounded down, start is rounded up.
3a669411 679 */
6d7d5da7 680 aligned_start = PAGE_ALIGN(start);
909ba297
MY
681 if (aligned_start > start + size)
682 size = 0;
683 else
684 size -= aligned_start - start;
e5ab8580 685
6d7d5da7
MD
686#ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
687 if (aligned_start > ULONG_MAX) {
1b0f6681
OJ
688 pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
689 (long long)start);
6d7d5da7
MD
690 return -EINVAL;
691 }
692
693 if (aligned_start + size > ULONG_MAX) {
1b0f6681
OJ
694 pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n",
695 (long long)start);
e5ab8580
WD
696 /*
697 * To ensure bank->start + bank->size is representable in
698 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
699 * This means we lose a page after masking.
700 */
6d7d5da7 701 size = ULONG_MAX - aligned_start;
e5ab8580
WD
702 }
703#endif
704
571b1437
RK
705 if (aligned_start < PHYS_OFFSET) {
706 if (aligned_start + size <= PHYS_OFFSET) {
707 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
708 aligned_start, aligned_start + size);
709 return -EINVAL;
710 }
711
712 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
713 aligned_start, (u64)PHYS_OFFSET);
714
715 size -= PHYS_OFFSET - aligned_start;
716 aligned_start = PHYS_OFFSET;
717 }
718
1c2f87c2
LA
719 start = aligned_start;
720 size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
4b5f32ce
NP
721
722 /*
723 * Check whether this memory region has non-zero size or
724 * invalid node number.
725 */
1c2f87c2 726 if (size == 0)
4b5f32ce
NP
727 return -EINVAL;
728
1c2f87c2 729 memblock_add(start, size);
4b5f32ce 730 return 0;
3a669411
RK
731}
732
1da177e4
LT
733/*
734 * Pick out the memory size. We look for mem=size@start,
735 * where start and size are "size[KkMm]"
736 */
1c2f87c2 737
2b0d8c25 738static int __init early_mem(char *p)
1da177e4
LT
739{
740 static int usermem __initdata = 0;
6a5014aa
MD
741 u64 size;
742 u64 start;
2b0d8c25 743 char *endp;
1da177e4
LT
744
745 /*
746 * If the user specifies memory size, we
747 * blow away any automatically generated
748 * size.
749 */
750 if (usermem == 0) {
751 usermem = 1;
1c2f87c2
LA
752 memblock_remove(memblock_start_of_DRAM(),
753 memblock_end_of_DRAM() - memblock_start_of_DRAM());
1da177e4
LT
754 }
755
756 start = PHYS_OFFSET;
2b0d8c25
JK
757 size = memparse(p, &endp);
758 if (*endp == '@')
759 start = memparse(endp + 1, NULL);
1da177e4 760
1c97b73e 761 arm_add_memory(start, size);
1da177e4 762
2b0d8c25 763 return 0;
1da177e4 764}
2b0d8c25 765early_param("mem", early_mem);
1da177e4 766
ff69a4c8 767static void __init request_standard_resources(const struct machine_desc *mdesc)
1da177e4 768{
11b9369c 769 struct memblock_region *region;
1da177e4 770 struct resource *res;
1da177e4 771
37efe642
RK
772 kernel_code.start = virt_to_phys(_text);
773 kernel_code.end = virt_to_phys(_etext - 1);
842eab40 774 kernel_data.start = virt_to_phys(_sdata);
37efe642 775 kernel_data.end = virt_to_phys(_end - 1);
1da177e4 776
11b9369c 777 for_each_memblock(memory, region) {
ca474408 778 res = memblock_virt_alloc(sizeof(*res), 0);
1da177e4 779 res->name = "System RAM";
11b9369c
DZ
780 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
781 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
1da177e4
LT
782 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
783
784 request_resource(&iomem_resource, res);
785
786 if (kernel_code.start >= res->start &&
787 kernel_code.end <= res->end)
788 request_resource(res, &kernel_code);
789 if (kernel_data.start >= res->start &&
790 kernel_data.end <= res->end)
791 request_resource(res, &kernel_data);
792 }
793
794 if (mdesc->video_start) {
795 video_ram.start = mdesc->video_start;
796 video_ram.end = mdesc->video_end;
797 request_resource(&iomem_resource, &video_ram);
798 }
799
800 /*
801 * Some machines don't have the possibility of ever
802 * possessing lp0, lp1 or lp2
803 */
804 if (mdesc->reserve_lp0)
805 request_resource(&ioport_resource, &lp0);
806 if (mdesc->reserve_lp1)
807 request_resource(&ioport_resource, &lp1);
808 if (mdesc->reserve_lp2)
809 request_resource(&ioport_resource, &lp2);
810}
811
1da177e4
LT
812#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
813struct screen_info screen_info = {
814 .orig_video_lines = 30,
815 .orig_video_cols = 80,
816 .orig_video_mode = 0,
817 .orig_video_ega_bx = 0,
818 .orig_video_isVGA = 1,
819 .orig_video_points = 8
820};
4394c124 821#endif
1da177e4 822
1da177e4
LT
823static int __init customize_machine(void)
824{
883a106b
AB
825 /*
826 * customizes platform devices, or adds new ones
827 * On DT based machines, we fall back to populating the
828 * machine from the device tree, if no callback is provided,
829 * otherwise we would always need an init_machine callback.
830 */
af4dda73 831 of_iommu_init();
8ff1443c
RK
832 if (machine_desc->init_machine)
833 machine_desc->init_machine();
883a106b
AB
834#ifdef CONFIG_OF
835 else
836 of_platform_populate(NULL, of_default_bus_match_table,
837 NULL, NULL);
838#endif
1da177e4
LT
839 return 0;
840}
841arch_initcall(customize_machine);
842
90de4137
SG
843static int __init init_machine_late(void)
844{
845 if (machine_desc->init_late)
846 machine_desc->init_late();
847 return 0;
848}
849late_initcall(init_machine_late);
850
3c57fb43
MW
851#ifdef CONFIG_KEXEC
852static inline unsigned long long get_total_mem(void)
853{
854 unsigned long total;
855
856 total = max_low_pfn - min_low_pfn;
857 return total << PAGE_SHIFT;
858}
859
860/**
861 * reserve_crashkernel() - reserves memory are for crash kernel
862 *
863 * This function reserves memory area given in "crashkernel=" kernel command
864 * line parameter. The memory reserved is used by a dump capture kernel when
865 * primary kernel is crashing.
866 */
867static void __init reserve_crashkernel(void)
868{
869 unsigned long long crash_size, crash_base;
870 unsigned long long total_mem;
871 int ret;
872
873 total_mem = get_total_mem();
874 ret = parse_crashkernel(boot_command_line, total_mem,
875 &crash_size, &crash_base);
876 if (ret)
877 return;
878
84f452b1 879 ret = memblock_reserve(crash_base, crash_size);
3c57fb43 880 if (ret < 0) {
1b0f6681
OJ
881 pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n",
882 (unsigned long)crash_base);
3c57fb43
MW
883 return;
884 }
885
1b0f6681
OJ
886 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
887 (unsigned long)(crash_size >> 20),
888 (unsigned long)(crash_base >> 20),
889 (unsigned long)(total_mem >> 20));
3c57fb43
MW
890
891 crashk_res.start = crash_base;
892 crashk_res.end = crash_base + crash_size - 1;
893 insert_resource(&iomem_resource, &crashk_res);
894}
895#else
896static inline void reserve_crashkernel(void) {}
897#endif /* CONFIG_KEXEC */
898
4588c34d
DM
899void __init hyp_mode_check(void)
900{
901#ifdef CONFIG_ARM_VIRT_EXT
8fbac214
MR
902 sync_boot_mode();
903
4588c34d
DM
904 if (is_hyp_mode_available()) {
905 pr_info("CPU: All CPU(s) started in HYP mode.\n");
906 pr_info("CPU: Virtualization extensions available.\n");
907 } else if (is_hyp_mode_mismatched()) {
908 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
909 __boot_cpu_mode & MODE_MASK);
910 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
911 } else
912 pr_info("CPU: All CPU(s) started in SVC mode.\n");
913#endif
914}
915
6291319d
GL
916void __init setup_arch(char **cmdline_p)
917{
ff69a4c8 918 const struct machine_desc *mdesc;
6291319d 919
6291319d 920 setup_processor();
93c02ab4
GL
921 mdesc = setup_machine_fdt(__atags_pointer);
922 if (!mdesc)
b8b499c8 923 mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
6291319d
GL
924 machine_desc = mdesc;
925 machine_name = mdesc->name;
719c9d14 926 dump_stack_set_arch_desc("%s", mdesc->name);
6291319d 927
16d6d5b0
RH
928 if (mdesc->reboot_mode != REBOOT_HARD)
929 reboot_mode = mdesc->reboot_mode;
6291319d 930
37efe642
RK
931 init_mm.start_code = (unsigned long) _text;
932 init_mm.end_code = (unsigned long) _etext;
933 init_mm.end_data = (unsigned long) _edata;
934 init_mm.brk = (unsigned long) _end;
1da177e4 935
48ab7e09
JK
936 /* populate cmd_line too for later use, preserving boot_command_line */
937 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
938 *cmdline_p = cmd_line;
2b0d8c25
JK
939
940 parse_early_param();
941
a77e0c7b 942 early_paging_init(mdesc, lookup_processor_type(read_cpuid_id()));
7c927322 943 setup_dma_zone(mdesc);
0371d3f7 944 sanity_check_meminfo();
1c2f87c2 945 arm_memblock_init(mdesc);
2778f620 946
4b5f32ce 947 paging_init(mdesc);
11b9369c 948 request_standard_resources(mdesc);
1da177e4 949
a528721d
RK
950 if (mdesc->restart)
951 arm_pm_restart = mdesc->restart;
952
93c02ab4
GL
953 unflatten_device_tree();
954
5587164e 955 arm_dt_init_cpu_maps();
05774088 956 psci_init();
7bbb7940 957#ifdef CONFIG_SMP
abcee5fb 958 if (is_smp()) {
b382b940
JM
959 if (!mdesc->smp_init || !mdesc->smp_init()) {
960 if (psci_smp_available())
961 smp_set_ops(&psci_smp_ops);
962 else if (mdesc->smp)
963 smp_set_ops(mdesc->smp);
964 }
f00ec48f 965 smp_init_cpus();
8cf72172 966 smp_build_mpidr_hash();
abcee5fb 967 }
7bbb7940 968#endif
4588c34d
DM
969
970 if (!is_smp())
971 hyp_mode_check();
972
3c57fb43 973 reserve_crashkernel();
7bbb7940 974
52108641 975#ifdef CONFIG_MULTI_IRQ_HANDLER
976 handle_arch_irq = mdesc->handle_irq;
977#endif
1da177e4
LT
978
979#ifdef CONFIG_VT
980#if defined(CONFIG_VGA_CONSOLE)
981 conswitchp = &vga_con;
982#elif defined(CONFIG_DUMMY_CONSOLE)
983 conswitchp = &dummy_con;
984#endif
985#endif
dec12e62
RK
986
987 if (mdesc->init_early)
988 mdesc->init_early();
1da177e4
LT
989}
990
991
992static int __init topology_init(void)
993{
994 int cpu;
995
66fb8bd2
RK
996 for_each_possible_cpu(cpu) {
997 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
998 cpuinfo->cpu.hotpluggable = 1;
999 register_cpu(&cpuinfo->cpu, cpu);
1000 }
1da177e4
LT
1001
1002 return 0;
1003}
1da177e4
LT
1004subsys_initcall(topology_init);
1005
e119bfff
RK
1006#ifdef CONFIG_HAVE_PROC_CPU
1007static int __init proc_cpu_init(void)
1008{
1009 struct proc_dir_entry *res;
1010
1011 res = proc_mkdir("cpu", NULL);
1012 if (!res)
1013 return -ENOMEM;
1014 return 0;
1015}
1016fs_initcall(proc_cpu_init);
1017#endif
1018
1da177e4
LT
1019static const char *hwcap_str[] = {
1020 "swp",
1021 "half",
1022 "thumb",
1023 "26bit",
1024 "fastmult",
1025 "fpa",
1026 "vfp",
1027 "edsp",
1028 "java",
8f7f9435 1029 "iwmmxt",
99e4a6dd 1030 "crunch",
4369ae16 1031 "thumbee",
2bedbdf4 1032 "neon",
7279dc3e
CM
1033 "vfpv3",
1034 "vfpv3d16",
254cdf8e
WD
1035 "tls",
1036 "vfpv4",
1037 "idiva",
1038 "idivt",
ab8d46c0 1039 "vfpd32",
a469abd0 1040 "lpae",
e9faebc6 1041 "evtstrm",
1da177e4
LT
1042 NULL
1043};
1044
b342ea4e 1045static const char *hwcap2_str[] = {
8258a989
AB
1046 "aes",
1047 "pmull",
1048 "sha1",
1049 "sha2",
1050 "crc32",
b342ea4e
AB
1051 NULL
1052};
1053
1da177e4
LT
1054static int c_show(struct seq_file *m, void *v)
1055{
b4b8f770
LP
1056 int i, j;
1057 u32 cpuid;
1da177e4 1058
1da177e4 1059 for_each_online_cpu(i) {
15559722
RK
1060 /*
1061 * glibc reads /proc/cpuinfo to determine the number of
1062 * online processors, looking for lines beginning with
1063 * "processor". Give glibc what it expects.
1064 */
1065 seq_printf(m, "processor\t: %d\n", i);
b4b8f770
LP
1066 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
1067 seq_printf(m, "model name\t: %s rev %d (%s)\n",
1068 cpu_name, cpuid & 15, elf_platform);
1069
4bf9636c
PM
1070#if defined(CONFIG_SMP)
1071 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1072 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1073 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1074#else
1075 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1076 loops_per_jiffy / (500000/HZ),
1077 (loops_per_jiffy / (5000/HZ)) % 100);
1078#endif
b4b8f770
LP
1079 /* dump out the processor features */
1080 seq_puts(m, "Features\t: ");
1da177e4 1081
b4b8f770
LP
1082 for (j = 0; hwcap_str[j]; j++)
1083 if (elf_hwcap & (1 << j))
1084 seq_printf(m, "%s ", hwcap_str[j]);
1da177e4 1085
b342ea4e
AB
1086 for (j = 0; hwcap2_str[j]; j++)
1087 if (elf_hwcap2 & (1 << j))
1088 seq_printf(m, "%s ", hwcap2_str[j]);
1089
b4b8f770
LP
1090 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
1091 seq_printf(m, "CPU architecture: %s\n",
1092 proc_arch[cpu_architecture()]);
1da177e4 1093
b4b8f770
LP
1094 if ((cpuid & 0x0008f000) == 0x00000000) {
1095 /* pre-ARM7 */
1096 seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
1da177e4 1097 } else {
b4b8f770
LP
1098 if ((cpuid & 0x0008f000) == 0x00007000) {
1099 /* ARM7 */
1100 seq_printf(m, "CPU variant\t: 0x%02x\n",
1101 (cpuid >> 16) & 127);
1102 } else {
1103 /* post-ARM7 */
1104 seq_printf(m, "CPU variant\t: 0x%x\n",
1105 (cpuid >> 20) & 15);
1106 }
1107 seq_printf(m, "CPU part\t: 0x%03x\n",
1108 (cpuid >> 4) & 0xfff);
1da177e4 1109 }
b4b8f770 1110 seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
1da177e4 1111 }
1da177e4
LT
1112
1113 seq_printf(m, "Hardware\t: %s\n", machine_name);
1114 seq_printf(m, "Revision\t: %04x\n", system_rev);
1115 seq_printf(m, "Serial\t\t: %08x%08x\n",
1116 system_serial_high, system_serial_low);
1117
1118 return 0;
1119}
1120
1121static void *c_start(struct seq_file *m, loff_t *pos)
1122{
1123 return *pos < 1 ? (void *)1 : NULL;
1124}
1125
1126static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1127{
1128 ++*pos;
1129 return NULL;
1130}
1131
1132static void c_stop(struct seq_file *m, void *v)
1133{
1134}
1135
2ffd6e18 1136const struct seq_operations cpuinfo_op = {
1da177e4
LT
1137 .start = c_start,
1138 .next = c_next,
1139 .stop = c_stop,
1140 .show = c_show
1141};
This page took 0.812221 seconds and 5 git commands to generate.