arm/xen: Correctly check if the event channel interrupt is present
[deliverable/linux.git] / arch / arm / kernel / setup.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/kernel/setup.c
3 *
4 * Copyright (C) 1995-2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
ecea4ab6 10#include <linux/export.h>
1da177e4
LT
11#include <linux/kernel.h>
12#include <linux/stddef.h>
13#include <linux/ioport.h>
14#include <linux/delay.h>
15#include <linux/utsname.h>
16#include <linux/initrd.h>
17#include <linux/console.h>
18#include <linux/bootmem.h>
19#include <linux/seq_file.h>
894673ee 20#include <linux/screen_info.h>
af4dda73 21#include <linux/of_iommu.h>
883a106b 22#include <linux/of_platform.h>
1da177e4 23#include <linux/init.h>
3c57fb43 24#include <linux/kexec.h>
93c02ab4 25#include <linux/of_fdt.h>
1da177e4
LT
26#include <linux/cpu.h>
27#include <linux/interrupt.h>
7bbb7940 28#include <linux/smp.h>
e119bfff 29#include <linux/proc_fs.h>
2778f620 30#include <linux/memblock.h>
2ecccf90
DM
31#include <linux/bug.h>
32#include <linux/compiler.h>
27a3f0e9 33#include <linux/sort.h>
1da177e4 34
b86040a5 35#include <asm/unified.h>
15d07dc9 36#include <asm/cp15.h>
1da177e4 37#include <asm/cpu.h>
0ba8b9b2 38#include <asm/cputype.h>
1da177e4 39#include <asm/elf.h>
1da177e4 40#include <asm/procinfo.h>
05774088 41#include <asm/psci.h>
37efe642 42#include <asm/sections.h>
1da177e4 43#include <asm/setup.h>
f00ec48f 44#include <asm/smp_plat.h>
1da177e4
LT
45#include <asm/mach-types.h>
46#include <asm/cacheflush.h>
46097c7d 47#include <asm/cachetype.h>
1da177e4
LT
48#include <asm/tlbflush.h>
49
93c02ab4 50#include <asm/prom.h>
1da177e4
LT
51#include <asm/mach/arch.h>
52#include <asm/mach/irq.h>
53#include <asm/mach/time.h>
9f97da78
DH
54#include <asm/system_info.h>
55#include <asm/system_misc.h>
5cbad0eb 56#include <asm/traps.h>
bff595c1 57#include <asm/unwind.h>
1c16d242 58#include <asm/memblock.h>
4588c34d 59#include <asm/virt.h>
1da177e4 60
4cd9d6f7 61#include "atags.h"
0fc1c832 62
1da177e4
LT
63
64#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
65char fpe_type[8];
66
67static int __init fpe_setup(char *line)
68{
69 memcpy(fpe_type, line, 8);
70 return 1;
71}
72
73__setup("fpe=", fpe_setup);
74#endif
75
ca8f0b0a 76extern void init_default_cache_policy(unsigned long);
ff69a4c8 77extern void paging_init(const struct machine_desc *desc);
a77e0c7b
SS
78extern void early_paging_init(const struct machine_desc *,
79 struct proc_info_list *);
0371d3f7 80extern void sanity_check_meminfo(void);
16d6d5b0 81extern enum reboot_mode reboot_mode;
ff69a4c8 82extern void setup_dma_zone(const struct machine_desc *desc);
1da177e4
LT
83
84unsigned int processor_id;
c18f6581 85EXPORT_SYMBOL(processor_id);
0385ebc0 86unsigned int __machine_arch_type __read_mostly;
1da177e4 87EXPORT_SYMBOL(__machine_arch_type);
0385ebc0 88unsigned int cacheid __read_mostly;
c0e95878 89EXPORT_SYMBOL(cacheid);
1da177e4 90
9d20fdd5
BG
91unsigned int __atags_pointer __initdata;
92
1da177e4
LT
93unsigned int system_rev;
94EXPORT_SYMBOL(system_rev);
95
96unsigned int system_serial_low;
97EXPORT_SYMBOL(system_serial_low);
98
99unsigned int system_serial_high;
100EXPORT_SYMBOL(system_serial_high);
101
0385ebc0 102unsigned int elf_hwcap __read_mostly;
1da177e4
LT
103EXPORT_SYMBOL(elf_hwcap);
104
b342ea4e
AB
105unsigned int elf_hwcap2 __read_mostly;
106EXPORT_SYMBOL(elf_hwcap2);
107
1da177e4
LT
108
109#ifdef MULTI_CPU
0385ebc0 110struct processor processor __read_mostly;
1da177e4
LT
111#endif
112#ifdef MULTI_TLB
0385ebc0 113struct cpu_tlb_fns cpu_tlb __read_mostly;
1da177e4
LT
114#endif
115#ifdef MULTI_USER
0385ebc0 116struct cpu_user_fns cpu_user __read_mostly;
1da177e4
LT
117#endif
118#ifdef MULTI_CACHE
0385ebc0 119struct cpu_cache_fns cpu_cache __read_mostly;
1da177e4 120#endif
953233dc 121#ifdef CONFIG_OUTER_CACHE
0385ebc0 122struct outer_cache_fns outer_cache __read_mostly;
6c09f09d 123EXPORT_SYMBOL(outer_cache);
953233dc 124#endif
1da177e4 125
2ecccf90
DM
126/*
127 * Cached cpu_architecture() result for use by assembler code.
128 * C code should use the cpu_architecture() function instead of accessing this
129 * variable directly.
130 */
131int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
132
ccea7a19
RK
133struct stack {
134 u32 irq[3];
135 u32 abt[3];
136 u32 und[3];
c0e7f7ee 137 u32 fiq[3];
ccea7a19
RK
138} ____cacheline_aligned;
139
55bdd694 140#ifndef CONFIG_CPU_V7M
ccea7a19 141static struct stack stacks[NR_CPUS];
55bdd694 142#endif
ccea7a19 143
1da177e4
LT
144char elf_platform[ELF_PLATFORM_SIZE];
145EXPORT_SYMBOL(elf_platform);
146
1da177e4
LT
147static const char *cpu_name;
148static const char *machine_name;
48ab7e09 149static char __initdata cmd_line[COMMAND_LINE_SIZE];
ff69a4c8 150const struct machine_desc *machine_desc __initdata;
1da177e4 151
1da177e4
LT
152static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
153#define ENDIANNESS ((char)endian_test.l)
154
155DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
156
157/*
158 * Standard memory resources
159 */
160static struct resource mem_res[] = {
740e518e
GKH
161 {
162 .name = "Video RAM",
163 .start = 0,
164 .end = 0,
165 .flags = IORESOURCE_MEM
166 },
167 {
a36d8e5b 168 .name = "Kernel code",
740e518e
GKH
169 .start = 0,
170 .end = 0,
171 .flags = IORESOURCE_MEM
172 },
173 {
174 .name = "Kernel data",
175 .start = 0,
176 .end = 0,
177 .flags = IORESOURCE_MEM
178 }
1da177e4
LT
179};
180
181#define video_ram mem_res[0]
182#define kernel_code mem_res[1]
183#define kernel_data mem_res[2]
184
185static struct resource io_res[] = {
740e518e
GKH
186 {
187 .name = "reserved",
188 .start = 0x3bc,
189 .end = 0x3be,
190 .flags = IORESOURCE_IO | IORESOURCE_BUSY
191 },
192 {
193 .name = "reserved",
194 .start = 0x378,
195 .end = 0x37f,
196 .flags = IORESOURCE_IO | IORESOURCE_BUSY
197 },
198 {
199 .name = "reserved",
200 .start = 0x278,
201 .end = 0x27f,
202 .flags = IORESOURCE_IO | IORESOURCE_BUSY
203 }
1da177e4
LT
204};
205
206#define lp0 io_res[0]
207#define lp1 io_res[1]
208#define lp2 io_res[2]
209
1da177e4
LT
210static const char *proc_arch[] = {
211 "undefined/unknown",
212 "3",
213 "4",
214 "4T",
215 "5",
216 "5T",
217 "5TE",
218 "5TEJ",
219 "6TEJ",
6b090a25 220 "7",
55bdd694 221 "7M",
1da177e4
LT
222 "?(12)",
223 "?(13)",
224 "?(14)",
225 "?(15)",
226 "?(16)",
227 "?(17)",
228};
229
55bdd694
CM
230#ifdef CONFIG_CPU_V7M
231static int __get_cpu_architecture(void)
232{
233 return CPU_ARCH_ARMv7M;
234}
235#else
2ecccf90 236static int __get_cpu_architecture(void)
1da177e4
LT
237{
238 int cpu_arch;
239
0ba8b9b2 240 if ((read_cpuid_id() & 0x0008f000) == 0) {
1da177e4 241 cpu_arch = CPU_ARCH_UNKNOWN;
0ba8b9b2
RK
242 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
243 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
244 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
245 cpu_arch = (read_cpuid_id() >> 16) & 7;
1da177e4
LT
246 if (cpu_arch)
247 cpu_arch += CPU_ARCH_ARMv3;
0ba8b9b2 248 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
180005c4
CM
249 /* Revised CPUID format. Read the Memory Model Feature
250 * Register 0 and check for VMSAv7 or PMSAv7 */
526299ce 251 unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
315cfe78
CM
252 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
253 (mmfr0 & 0x000000f0) >= 0x00000030)
180005c4
CM
254 cpu_arch = CPU_ARCH_ARMv7;
255 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
256 (mmfr0 & 0x000000f0) == 0x00000020)
257 cpu_arch = CPU_ARCH_ARMv6;
258 else
259 cpu_arch = CPU_ARCH_UNKNOWN;
260 } else
261 cpu_arch = CPU_ARCH_UNKNOWN;
1da177e4
LT
262
263 return cpu_arch;
264}
55bdd694 265#endif
1da177e4 266
2ecccf90
DM
267int __pure cpu_architecture(void)
268{
269 BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
270
271 return __cpu_architecture;
272}
273
8925ec4c
WD
274static int cpu_has_aliasing_icache(unsigned int arch)
275{
276 int aliasing_icache;
277 unsigned int id_reg, num_sets, line_size;
278
7f94e9cc
WD
279 /* PIPT caches never alias. */
280 if (icache_is_pipt())
281 return 0;
282
8925ec4c
WD
283 /* arch specifies the register format */
284 switch (arch) {
285 case CPU_ARCH_ARMv7:
5fb31a96
LW
286 asm("mcr p15, 2, %0, c0, c0, 0 @ set CSSELR"
287 : /* No output operands */
8925ec4c 288 : "r" (1));
5fb31a96
LW
289 isb();
290 asm("mrc p15, 1, %0, c0, c0, 0 @ read CCSIDR"
291 : "=r" (id_reg));
8925ec4c
WD
292 line_size = 4 << ((id_reg & 0x7) + 2);
293 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
294 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
295 break;
296 case CPU_ARCH_ARMv6:
297 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
298 break;
299 default:
300 /* I-cache aliases will be handled by D-cache aliasing code */
301 aliasing_icache = 0;
302 }
303
304 return aliasing_icache;
305}
306
c0e95878
RK
307static void __init cacheid_init(void)
308{
c0e95878
RK
309 unsigned int arch = cpu_architecture();
310
55bdd694
CM
311 if (arch == CPU_ARCH_ARMv7M) {
312 cacheid = 0;
313 } else if (arch >= CPU_ARCH_ARMv6) {
ac52e83f 314 unsigned int cachetype = read_cpuid_cachetype();
b57ee99f
CM
315 if ((cachetype & (7 << 29)) == 4 << 29) {
316 /* ARMv7 register format */
72dc53ac 317 arch = CPU_ARCH_ARMv7;
b57ee99f 318 cacheid = CACHEID_VIPT_NONALIASING;
7f94e9cc
WD
319 switch (cachetype & (3 << 14)) {
320 case (1 << 14):
b57ee99f 321 cacheid |= CACHEID_ASID_TAGGED;
7f94e9cc
WD
322 break;
323 case (3 << 14):
324 cacheid |= CACHEID_PIPT;
325 break;
326 }
8925ec4c 327 } else {
72dc53ac
WD
328 arch = CPU_ARCH_ARMv6;
329 if (cachetype & (1 << 23))
330 cacheid = CACHEID_VIPT_ALIASING;
331 else
332 cacheid = CACHEID_VIPT_NONALIASING;
8925ec4c 333 }
72dc53ac
WD
334 if (cpu_has_aliasing_icache(arch))
335 cacheid |= CACHEID_VIPT_I_ALIASING;
c0e95878
RK
336 } else {
337 cacheid = CACHEID_VIVT;
338 }
2b4ae1f1 339
1b0f6681 340 pr_info("CPU: %s data cache, %s instruction cache\n",
2b4ae1f1
RK
341 cache_is_vivt() ? "VIVT" :
342 cache_is_vipt_aliasing() ? "VIPT aliasing" :
7f94e9cc 343 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
2b4ae1f1
RK
344 cache_is_vivt() ? "VIVT" :
345 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
8925ec4c 346 icache_is_vipt_aliasing() ? "VIPT aliasing" :
7f94e9cc 347 icache_is_pipt() ? "PIPT" :
2b4ae1f1 348 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
c0e95878
RK
349}
350
1da177e4
LT
351/*
352 * These functions re-use the assembly code in head.S, which
353 * already provide the required functionality.
354 */
0f44ba1d 355extern struct proc_info_list *lookup_processor_type(unsigned int);
6fc31d54 356
93c02ab4 357void __init early_print(const char *str, ...)
6fc31d54
RK
358{
359 extern void printascii(const char *);
360 char buf[256];
361 va_list ap;
362
363 va_start(ap, str);
364 vsnprintf(buf, sizeof(buf), str, ap);
365 va_end(ap);
366
367#ifdef CONFIG_DEBUG_LL
368 printascii(buf);
369#endif
370 printk("%s", buf);
371}
372
8164f7af
SB
373static void __init cpuid_init_hwcaps(void)
374{
b8c9592b 375 int block;
a092aedb 376 u32 isar5;
8164f7af
SB
377
378 if (cpu_architecture() < CPU_ARCH_ARMv7)
379 return;
380
b8c9592b
AB
381 block = cpuid_feature_extract(CPUID_EXT_ISAR0, 24);
382 if (block >= 2)
8164f7af 383 elf_hwcap |= HWCAP_IDIVA;
b8c9592b 384 if (block >= 1)
8164f7af 385 elf_hwcap |= HWCAP_IDIVT;
a469abd0
WD
386
387 /* LPAE implies atomic ldrd/strd instructions */
b8c9592b
AB
388 block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
389 if (block >= 5)
a469abd0 390 elf_hwcap |= HWCAP_LPAE;
a092aedb
AB
391
392 /* check for supported v8 Crypto instructions */
393 isar5 = read_cpuid_ext(CPUID_EXT_ISAR5);
394
395 block = cpuid_feature_extract_field(isar5, 4);
396 if (block >= 2)
397 elf_hwcap2 |= HWCAP2_PMULL;
398 if (block >= 1)
399 elf_hwcap2 |= HWCAP2_AES;
400
401 block = cpuid_feature_extract_field(isar5, 8);
402 if (block >= 1)
403 elf_hwcap2 |= HWCAP2_SHA1;
404
405 block = cpuid_feature_extract_field(isar5, 12);
406 if (block >= 1)
407 elf_hwcap2 |= HWCAP2_SHA2;
408
409 block = cpuid_feature_extract_field(isar5, 16);
410 if (block >= 1)
411 elf_hwcap2 |= HWCAP2_CRC32;
8164f7af
SB
412}
413
58171bf2 414static void __init elf_hwcap_fixup(void)
f159f4ed 415{
58171bf2 416 unsigned id = read_cpuid_id();
f159f4ed
TL
417
418 /*
419 * HWCAP_TLS is available only on 1136 r1p0 and later,
420 * see also kuser_get_tls_init.
421 */
58171bf2
RK
422 if (read_cpuid_part() == ARM_CPU_PART_ARM1136 &&
423 ((id >> 20) & 3) == 0) {
f159f4ed 424 elf_hwcap &= ~HWCAP_TLS;
58171bf2
RK
425 return;
426 }
427
428 /* Verify if CPUID scheme is implemented */
429 if ((id & 0x000f0000) != 0x000f0000)
430 return;
431
432 /*
433 * If the CPU supports LDREX/STREX and LDREXB/STREXB,
434 * avoid advertising SWP; it may not be atomic with
435 * multiprocessing cores.
436 */
b8c9592b
AB
437 if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
438 (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
439 cpuid_feature_extract(CPUID_EXT_ISAR3, 20) >= 3))
58171bf2 440 elf_hwcap &= ~HWCAP_SWP;
f159f4ed
TL
441}
442
ccea7a19
RK
443/*
444 * cpu_init - initialise one CPU.
445 *
90f1e084 446 * cpu_init sets up the per-CPU stacks.
ccea7a19 447 */
1783d457 448void notrace cpu_init(void)
ccea7a19 449{
55bdd694 450#ifndef CONFIG_CPU_V7M
ccea7a19
RK
451 unsigned int cpu = smp_processor_id();
452 struct stack *stk = &stacks[cpu];
453
454 if (cpu >= NR_CPUS) {
1b0f6681 455 pr_crit("CPU%u: bad primary CPU number\n", cpu);
ccea7a19
RK
456 BUG();
457 }
458
14318efb
RH
459 /*
460 * This only works on resume and secondary cores. For booting on the
461 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
462 */
463 set_my_cpu_offset(per_cpu_offset(cpu));
464
b69874e4
RK
465 cpu_proc_init();
466
b86040a5
CM
467 /*
468 * Define the placement constraint for the inline asm directive below.
469 * In Thumb-2, msr with an immediate value is not allowed.
470 */
471#ifdef CONFIG_THUMB2_KERNEL
472#define PLC "r"
473#else
474#define PLC "I"
475#endif
476
ccea7a19
RK
477 /*
478 * setup stacks for re-entrant exception handlers
479 */
480 __asm__ (
481 "msr cpsr_c, %1\n\t"
b86040a5
CM
482 "add r14, %0, %2\n\t"
483 "mov sp, r14\n\t"
ccea7a19 484 "msr cpsr_c, %3\n\t"
b86040a5
CM
485 "add r14, %0, %4\n\t"
486 "mov sp, r14\n\t"
ccea7a19 487 "msr cpsr_c, %5\n\t"
b86040a5
CM
488 "add r14, %0, %6\n\t"
489 "mov sp, r14\n\t"
c0e7f7ee
DT
490 "msr cpsr_c, %7\n\t"
491 "add r14, %0, %8\n\t"
492 "mov sp, r14\n\t"
493 "msr cpsr_c, %9"
ccea7a19
RK
494 :
495 : "r" (stk),
b86040a5 496 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
ccea7a19 497 "I" (offsetof(struct stack, irq[0])),
b86040a5 498 PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
ccea7a19 499 "I" (offsetof(struct stack, abt[0])),
b86040a5 500 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
ccea7a19 501 "I" (offsetof(struct stack, und[0])),
c0e7f7ee
DT
502 PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
503 "I" (offsetof(struct stack, fiq[0])),
b86040a5 504 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
aaaa3f9e 505 : "r14");
55bdd694 506#endif
ccea7a19
RK
507}
508
18d7f152 509u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
eb50439b
WD
510
511void __init smp_setup_processor_id(void)
512{
513 int i;
cb8cf4f8
LP
514 u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
515 u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
eb50439b
WD
516
517 cpu_logical_map(0) = cpu;
cb8cf4f8 518 for (i = 1; i < nr_cpu_ids; ++i)
eb50439b
WD
519 cpu_logical_map(i) = i == cpu ? 0 : i;
520
9394c1c6
ML
521 /*
522 * clear __my_cpu_offset on boot CPU to avoid hang caused by
523 * using percpu variable early, for example, lockdep will
524 * access percpu variable inside lock_release
525 */
526 set_my_cpu_offset(0);
527
1b0f6681 528 pr_info("Booting Linux on physical CPU 0x%x\n", mpidr);
eb50439b
WD
529}
530
8cf72172
LP
531struct mpidr_hash mpidr_hash;
532#ifdef CONFIG_SMP
533/**
534 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
535 * level in order to build a linear index from an
536 * MPIDR value. Resulting algorithm is a collision
537 * free hash carried out through shifting and ORing
538 */
539static void __init smp_build_mpidr_hash(void)
540{
541 u32 i, affinity;
542 u32 fs[3], bits[3], ls, mask = 0;
543 /*
544 * Pre-scan the list of MPIDRS and filter out bits that do
545 * not contribute to affinity levels, ie they never toggle.
546 */
547 for_each_possible_cpu(i)
548 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
549 pr_debug("mask of set bits 0x%x\n", mask);
550 /*
551 * Find and stash the last and first bit set at all affinity levels to
552 * check how many bits are required to represent them.
553 */
554 for (i = 0; i < 3; i++) {
555 affinity = MPIDR_AFFINITY_LEVEL(mask, i);
556 /*
557 * Find the MSB bit and LSB bits position
558 * to determine how many bits are required
559 * to express the affinity level.
560 */
561 ls = fls(affinity);
562 fs[i] = affinity ? ffs(affinity) - 1 : 0;
563 bits[i] = ls - fs[i];
564 }
565 /*
566 * An index can be created from the MPIDR by isolating the
567 * significant bits at each affinity level and by shifting
568 * them in order to compress the 24 bits values space to a
569 * compressed set of values. This is equivalent to hashing
570 * the MPIDR through shifting and ORing. It is a collision free
571 * hash though not minimal since some levels might contain a number
572 * of CPUs that is not an exact power of 2 and their bit
573 * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
574 */
575 mpidr_hash.shift_aff[0] = fs[0];
576 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
577 mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
578 (bits[1] + bits[0]);
579 mpidr_hash.mask = mask;
580 mpidr_hash.bits = bits[2] + bits[1] + bits[0];
581 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
582 mpidr_hash.shift_aff[0],
583 mpidr_hash.shift_aff[1],
584 mpidr_hash.shift_aff[2],
585 mpidr_hash.mask,
586 mpidr_hash.bits);
587 /*
588 * 4x is an arbitrary value used to warn on a hash table much bigger
589 * than expected on most systems.
590 */
591 if (mpidr_hash_size() > 4 * num_possible_cpus())
592 pr_warn("Large number of MPIDR hash buckets detected\n");
593 sync_cache_w(&mpidr_hash);
594}
595#endif
596
b69874e4
RK
597static void __init setup_processor(void)
598{
599 struct proc_info_list *list;
600
601 /*
602 * locate processor in the list of supported processor
603 * types. The linker builds this table for us from the
604 * entries in arch/arm/mm/proc-*.S
605 */
606 list = lookup_processor_type(read_cpuid_id());
607 if (!list) {
1b0f6681
OJ
608 pr_err("CPU configuration botched (ID %08x), unable to continue.\n",
609 read_cpuid_id());
b69874e4
RK
610 while (1);
611 }
612
613 cpu_name = list->cpu_name;
2ecccf90 614 __cpu_architecture = __get_cpu_architecture();
b69874e4
RK
615
616#ifdef MULTI_CPU
617 processor = *list->proc;
618#endif
619#ifdef MULTI_TLB
620 cpu_tlb = *list->tlb;
621#endif
622#ifdef MULTI_USER
623 cpu_user = *list->user;
624#endif
625#ifdef MULTI_CACHE
626 cpu_cache = *list->cache;
627#endif
628
1b0f6681
OJ
629 pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
630 cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
4585eaff 631 proc_arch[cpu_architecture()], get_cr());
b69874e4 632
a34dbfb0
WD
633 snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
634 list->arch_name, ENDIANNESS);
635 snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
636 list->elf_name, ENDIANNESS);
b69874e4 637 elf_hwcap = list->elf_hwcap;
8164f7af
SB
638
639 cpuid_init_hwcaps();
640
b69874e4 641#ifndef CONFIG_ARM_THUMB
c40e3641 642 elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
b69874e4 643#endif
ca8f0b0a
RK
644#ifdef CONFIG_MMU
645 init_default_cache_policy(list->__cpu_mm_mmu_flags);
646#endif
92871b94
RH
647 erratum_a15_798181_init();
648
58171bf2 649 elf_hwcap_fixup();
b69874e4
RK
650
651 cacheid_init();
652 cpu_init();
653}
654
93c02ab4 655void __init dump_machine_table(void)
1da177e4 656{
ff69a4c8 657 const struct machine_desc *p;
1da177e4 658
6291319d
GL
659 early_print("Available machine support:\n\nID (hex)\tNAME\n");
660 for_each_machine_desc(p)
dce72dd0 661 early_print("%08x\t%s\n", p->nr, p->name);
1da177e4 662
dce72dd0 663 early_print("\nPlease check your kernel config and/or bootloader.\n");
1da177e4 664
dce72dd0
NP
665 while (true)
666 /* can't use cpu_relax() here as it may require MMU setup */;
1da177e4
LT
667}
668
6a5014aa 669int __init arm_add_memory(u64 start, u64 size)
3a669411 670{
6d7d5da7 671 u64 aligned_start;
4b5f32ce 672
3a669411
RK
673 /*
674 * Ensure that start/size are aligned to a page boundary.
909ba297 675 * Size is rounded down, start is rounded up.
3a669411 676 */
6d7d5da7 677 aligned_start = PAGE_ALIGN(start);
909ba297
MY
678 if (aligned_start > start + size)
679 size = 0;
680 else
681 size -= aligned_start - start;
e5ab8580 682
6d7d5da7
MD
683#ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
684 if (aligned_start > ULONG_MAX) {
1b0f6681
OJ
685 pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
686 (long long)start);
6d7d5da7
MD
687 return -EINVAL;
688 }
689
690 if (aligned_start + size > ULONG_MAX) {
1b0f6681
OJ
691 pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n",
692 (long long)start);
e5ab8580
WD
693 /*
694 * To ensure bank->start + bank->size is representable in
695 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
696 * This means we lose a page after masking.
697 */
6d7d5da7 698 size = ULONG_MAX - aligned_start;
e5ab8580
WD
699 }
700#endif
701
571b1437
RK
702 if (aligned_start < PHYS_OFFSET) {
703 if (aligned_start + size <= PHYS_OFFSET) {
704 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
705 aligned_start, aligned_start + size);
706 return -EINVAL;
707 }
708
709 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
710 aligned_start, (u64)PHYS_OFFSET);
711
712 size -= PHYS_OFFSET - aligned_start;
713 aligned_start = PHYS_OFFSET;
714 }
715
1c2f87c2
LA
716 start = aligned_start;
717 size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
4b5f32ce
NP
718
719 /*
720 * Check whether this memory region has non-zero size or
721 * invalid node number.
722 */
1c2f87c2 723 if (size == 0)
4b5f32ce
NP
724 return -EINVAL;
725
1c2f87c2 726 memblock_add(start, size);
4b5f32ce 727 return 0;
3a669411
RK
728}
729
1da177e4
LT
730/*
731 * Pick out the memory size. We look for mem=size@start,
732 * where start and size are "size[KkMm]"
733 */
1c2f87c2 734
2b0d8c25 735static int __init early_mem(char *p)
1da177e4
LT
736{
737 static int usermem __initdata = 0;
6a5014aa
MD
738 u64 size;
739 u64 start;
2b0d8c25 740 char *endp;
1da177e4
LT
741
742 /*
743 * If the user specifies memory size, we
744 * blow away any automatically generated
745 * size.
746 */
747 if (usermem == 0) {
748 usermem = 1;
1c2f87c2
LA
749 memblock_remove(memblock_start_of_DRAM(),
750 memblock_end_of_DRAM() - memblock_start_of_DRAM());
1da177e4
LT
751 }
752
753 start = PHYS_OFFSET;
2b0d8c25
JK
754 size = memparse(p, &endp);
755 if (*endp == '@')
756 start = memparse(endp + 1, NULL);
1da177e4 757
1c97b73e 758 arm_add_memory(start, size);
1da177e4 759
2b0d8c25 760 return 0;
1da177e4 761}
2b0d8c25 762early_param("mem", early_mem);
1da177e4 763
ff69a4c8 764static void __init request_standard_resources(const struct machine_desc *mdesc)
1da177e4 765{
11b9369c 766 struct memblock_region *region;
1da177e4 767 struct resource *res;
1da177e4 768
37efe642
RK
769 kernel_code.start = virt_to_phys(_text);
770 kernel_code.end = virt_to_phys(_etext - 1);
842eab40 771 kernel_data.start = virt_to_phys(_sdata);
37efe642 772 kernel_data.end = virt_to_phys(_end - 1);
1da177e4 773
11b9369c 774 for_each_memblock(memory, region) {
ca474408 775 res = memblock_virt_alloc(sizeof(*res), 0);
1da177e4 776 res->name = "System RAM";
11b9369c
DZ
777 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
778 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
1da177e4
LT
779 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
780
781 request_resource(&iomem_resource, res);
782
783 if (kernel_code.start >= res->start &&
784 kernel_code.end <= res->end)
785 request_resource(res, &kernel_code);
786 if (kernel_data.start >= res->start &&
787 kernel_data.end <= res->end)
788 request_resource(res, &kernel_data);
789 }
790
791 if (mdesc->video_start) {
792 video_ram.start = mdesc->video_start;
793 video_ram.end = mdesc->video_end;
794 request_resource(&iomem_resource, &video_ram);
795 }
796
797 /*
798 * Some machines don't have the possibility of ever
799 * possessing lp0, lp1 or lp2
800 */
801 if (mdesc->reserve_lp0)
802 request_resource(&ioport_resource, &lp0);
803 if (mdesc->reserve_lp1)
804 request_resource(&ioport_resource, &lp1);
805 if (mdesc->reserve_lp2)
806 request_resource(&ioport_resource, &lp2);
807}
808
1da177e4
LT
809#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
810struct screen_info screen_info = {
811 .orig_video_lines = 30,
812 .orig_video_cols = 80,
813 .orig_video_mode = 0,
814 .orig_video_ega_bx = 0,
815 .orig_video_isVGA = 1,
816 .orig_video_points = 8
817};
4394c124 818#endif
1da177e4 819
1da177e4
LT
820static int __init customize_machine(void)
821{
883a106b
AB
822 /*
823 * customizes platform devices, or adds new ones
824 * On DT based machines, we fall back to populating the
825 * machine from the device tree, if no callback is provided,
826 * otherwise we would always need an init_machine callback.
827 */
af4dda73 828 of_iommu_init();
8ff1443c
RK
829 if (machine_desc->init_machine)
830 machine_desc->init_machine();
883a106b
AB
831#ifdef CONFIG_OF
832 else
833 of_platform_populate(NULL, of_default_bus_match_table,
834 NULL, NULL);
835#endif
1da177e4
LT
836 return 0;
837}
838arch_initcall(customize_machine);
839
90de4137
SG
840static int __init init_machine_late(void)
841{
842 if (machine_desc->init_late)
843 machine_desc->init_late();
844 return 0;
845}
846late_initcall(init_machine_late);
847
3c57fb43
MW
848#ifdef CONFIG_KEXEC
849static inline unsigned long long get_total_mem(void)
850{
851 unsigned long total;
852
853 total = max_low_pfn - min_low_pfn;
854 return total << PAGE_SHIFT;
855}
856
857/**
858 * reserve_crashkernel() - reserves memory are for crash kernel
859 *
860 * This function reserves memory area given in "crashkernel=" kernel command
861 * line parameter. The memory reserved is used by a dump capture kernel when
862 * primary kernel is crashing.
863 */
864static void __init reserve_crashkernel(void)
865{
866 unsigned long long crash_size, crash_base;
867 unsigned long long total_mem;
868 int ret;
869
870 total_mem = get_total_mem();
871 ret = parse_crashkernel(boot_command_line, total_mem,
872 &crash_size, &crash_base);
873 if (ret)
874 return;
875
84f452b1 876 ret = memblock_reserve(crash_base, crash_size);
3c57fb43 877 if (ret < 0) {
1b0f6681
OJ
878 pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n",
879 (unsigned long)crash_base);
3c57fb43
MW
880 return;
881 }
882
1b0f6681
OJ
883 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
884 (unsigned long)(crash_size >> 20),
885 (unsigned long)(crash_base >> 20),
886 (unsigned long)(total_mem >> 20));
3c57fb43
MW
887
888 crashk_res.start = crash_base;
889 crashk_res.end = crash_base + crash_size - 1;
890 insert_resource(&iomem_resource, &crashk_res);
891}
892#else
893static inline void reserve_crashkernel(void) {}
894#endif /* CONFIG_KEXEC */
895
4588c34d
DM
896void __init hyp_mode_check(void)
897{
898#ifdef CONFIG_ARM_VIRT_EXT
8fbac214
MR
899 sync_boot_mode();
900
4588c34d
DM
901 if (is_hyp_mode_available()) {
902 pr_info("CPU: All CPU(s) started in HYP mode.\n");
903 pr_info("CPU: Virtualization extensions available.\n");
904 } else if (is_hyp_mode_mismatched()) {
905 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
906 __boot_cpu_mode & MODE_MASK);
907 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
908 } else
909 pr_info("CPU: All CPU(s) started in SVC mode.\n");
910#endif
911}
912
6291319d
GL
913void __init setup_arch(char **cmdline_p)
914{
ff69a4c8 915 const struct machine_desc *mdesc;
6291319d 916
6291319d 917 setup_processor();
93c02ab4
GL
918 mdesc = setup_machine_fdt(__atags_pointer);
919 if (!mdesc)
b8b499c8 920 mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
6291319d
GL
921 machine_desc = mdesc;
922 machine_name = mdesc->name;
719c9d14 923 dump_stack_set_arch_desc("%s", mdesc->name);
6291319d 924
16d6d5b0
RH
925 if (mdesc->reboot_mode != REBOOT_HARD)
926 reboot_mode = mdesc->reboot_mode;
6291319d 927
37efe642
RK
928 init_mm.start_code = (unsigned long) _text;
929 init_mm.end_code = (unsigned long) _etext;
930 init_mm.end_data = (unsigned long) _edata;
931 init_mm.brk = (unsigned long) _end;
1da177e4 932
48ab7e09
JK
933 /* populate cmd_line too for later use, preserving boot_command_line */
934 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
935 *cmdline_p = cmd_line;
2b0d8c25
JK
936
937 parse_early_param();
938
a77e0c7b 939 early_paging_init(mdesc, lookup_processor_type(read_cpuid_id()));
7c927322 940 setup_dma_zone(mdesc);
0371d3f7 941 sanity_check_meminfo();
1c2f87c2 942 arm_memblock_init(mdesc);
2778f620 943
4b5f32ce 944 paging_init(mdesc);
11b9369c 945 request_standard_resources(mdesc);
1da177e4 946
a528721d
RK
947 if (mdesc->restart)
948 arm_pm_restart = mdesc->restart;
949
93c02ab4
GL
950 unflatten_device_tree();
951
5587164e 952 arm_dt_init_cpu_maps();
05774088 953 psci_init();
7bbb7940 954#ifdef CONFIG_SMP
abcee5fb 955 if (is_smp()) {
b382b940
JM
956 if (!mdesc->smp_init || !mdesc->smp_init()) {
957 if (psci_smp_available())
958 smp_set_ops(&psci_smp_ops);
959 else if (mdesc->smp)
960 smp_set_ops(mdesc->smp);
961 }
f00ec48f 962 smp_init_cpus();
8cf72172 963 smp_build_mpidr_hash();
abcee5fb 964 }
7bbb7940 965#endif
4588c34d
DM
966
967 if (!is_smp())
968 hyp_mode_check();
969
3c57fb43 970 reserve_crashkernel();
7bbb7940 971
52108641 972#ifdef CONFIG_MULTI_IRQ_HANDLER
973 handle_arch_irq = mdesc->handle_irq;
974#endif
1da177e4
LT
975
976#ifdef CONFIG_VT
977#if defined(CONFIG_VGA_CONSOLE)
978 conswitchp = &vga_con;
979#elif defined(CONFIG_DUMMY_CONSOLE)
980 conswitchp = &dummy_con;
981#endif
982#endif
dec12e62
RK
983
984 if (mdesc->init_early)
985 mdesc->init_early();
1da177e4
LT
986}
987
988
989static int __init topology_init(void)
990{
991 int cpu;
992
66fb8bd2
RK
993 for_each_possible_cpu(cpu) {
994 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
995 cpuinfo->cpu.hotpluggable = 1;
996 register_cpu(&cpuinfo->cpu, cpu);
997 }
1da177e4
LT
998
999 return 0;
1000}
1da177e4
LT
1001subsys_initcall(topology_init);
1002
e119bfff
RK
1003#ifdef CONFIG_HAVE_PROC_CPU
1004static int __init proc_cpu_init(void)
1005{
1006 struct proc_dir_entry *res;
1007
1008 res = proc_mkdir("cpu", NULL);
1009 if (!res)
1010 return -ENOMEM;
1011 return 0;
1012}
1013fs_initcall(proc_cpu_init);
1014#endif
1015
1da177e4
LT
1016static const char *hwcap_str[] = {
1017 "swp",
1018 "half",
1019 "thumb",
1020 "26bit",
1021 "fastmult",
1022 "fpa",
1023 "vfp",
1024 "edsp",
1025 "java",
8f7f9435 1026 "iwmmxt",
99e4a6dd 1027 "crunch",
4369ae16 1028 "thumbee",
2bedbdf4 1029 "neon",
7279dc3e
CM
1030 "vfpv3",
1031 "vfpv3d16",
254cdf8e
WD
1032 "tls",
1033 "vfpv4",
1034 "idiva",
1035 "idivt",
ab8d46c0 1036 "vfpd32",
a469abd0 1037 "lpae",
e9faebc6 1038 "evtstrm",
1da177e4
LT
1039 NULL
1040};
1041
b342ea4e 1042static const char *hwcap2_str[] = {
8258a989
AB
1043 "aes",
1044 "pmull",
1045 "sha1",
1046 "sha2",
1047 "crc32",
b342ea4e
AB
1048 NULL
1049};
1050
1da177e4
LT
1051static int c_show(struct seq_file *m, void *v)
1052{
b4b8f770
LP
1053 int i, j;
1054 u32 cpuid;
1da177e4 1055
1da177e4 1056 for_each_online_cpu(i) {
15559722
RK
1057 /*
1058 * glibc reads /proc/cpuinfo to determine the number of
1059 * online processors, looking for lines beginning with
1060 * "processor". Give glibc what it expects.
1061 */
1062 seq_printf(m, "processor\t: %d\n", i);
b4b8f770
LP
1063 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
1064 seq_printf(m, "model name\t: %s rev %d (%s)\n",
1065 cpu_name, cpuid & 15, elf_platform);
1066
4bf9636c
PM
1067#if defined(CONFIG_SMP)
1068 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1069 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1070 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1071#else
1072 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1073 loops_per_jiffy / (500000/HZ),
1074 (loops_per_jiffy / (5000/HZ)) % 100);
1075#endif
b4b8f770
LP
1076 /* dump out the processor features */
1077 seq_puts(m, "Features\t: ");
1da177e4 1078
b4b8f770
LP
1079 for (j = 0; hwcap_str[j]; j++)
1080 if (elf_hwcap & (1 << j))
1081 seq_printf(m, "%s ", hwcap_str[j]);
1da177e4 1082
b342ea4e
AB
1083 for (j = 0; hwcap2_str[j]; j++)
1084 if (elf_hwcap2 & (1 << j))
1085 seq_printf(m, "%s ", hwcap2_str[j]);
1086
b4b8f770
LP
1087 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
1088 seq_printf(m, "CPU architecture: %s\n",
1089 proc_arch[cpu_architecture()]);
1da177e4 1090
b4b8f770
LP
1091 if ((cpuid & 0x0008f000) == 0x00000000) {
1092 /* pre-ARM7 */
1093 seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
1da177e4 1094 } else {
b4b8f770
LP
1095 if ((cpuid & 0x0008f000) == 0x00007000) {
1096 /* ARM7 */
1097 seq_printf(m, "CPU variant\t: 0x%02x\n",
1098 (cpuid >> 16) & 127);
1099 } else {
1100 /* post-ARM7 */
1101 seq_printf(m, "CPU variant\t: 0x%x\n",
1102 (cpuid >> 20) & 15);
1103 }
1104 seq_printf(m, "CPU part\t: 0x%03x\n",
1105 (cpuid >> 4) & 0xfff);
1da177e4 1106 }
b4b8f770 1107 seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
1da177e4 1108 }
1da177e4
LT
1109
1110 seq_printf(m, "Hardware\t: %s\n", machine_name);
1111 seq_printf(m, "Revision\t: %04x\n", system_rev);
1112 seq_printf(m, "Serial\t\t: %08x%08x\n",
1113 system_serial_high, system_serial_low);
1114
1115 return 0;
1116}
1117
1118static void *c_start(struct seq_file *m, loff_t *pos)
1119{
1120 return *pos < 1 ? (void *)1 : NULL;
1121}
1122
1123static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1124{
1125 ++*pos;
1126 return NULL;
1127}
1128
1129static void c_stop(struct seq_file *m, void *v)
1130{
1131}
1132
2ffd6e18 1133const struct seq_operations cpuinfo_op = {
1da177e4
LT
1134 .start = c_start,
1135 .next = c_next,
1136 .stop = c_stop,
1137 .show = c_show
1138};
This page took 0.875266 seconds and 5 git commands to generate.