ARM: 8318/1: treat CPU feature register fields as signed quantities
[deliverable/linux.git] / arch / arm / kernel / setup.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/kernel/setup.c
3 *
4 * Copyright (C) 1995-2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
ecea4ab6 10#include <linux/export.h>
1da177e4
LT
11#include <linux/kernel.h>
12#include <linux/stddef.h>
13#include <linux/ioport.h>
14#include <linux/delay.h>
15#include <linux/utsname.h>
16#include <linux/initrd.h>
17#include <linux/console.h>
18#include <linux/bootmem.h>
19#include <linux/seq_file.h>
894673ee 20#include <linux/screen_info.h>
af4dda73 21#include <linux/of_iommu.h>
883a106b 22#include <linux/of_platform.h>
1da177e4 23#include <linux/init.h>
3c57fb43 24#include <linux/kexec.h>
93c02ab4 25#include <linux/of_fdt.h>
1da177e4
LT
26#include <linux/cpu.h>
27#include <linux/interrupt.h>
7bbb7940 28#include <linux/smp.h>
e119bfff 29#include <linux/proc_fs.h>
2778f620 30#include <linux/memblock.h>
2ecccf90
DM
31#include <linux/bug.h>
32#include <linux/compiler.h>
27a3f0e9 33#include <linux/sort.h>
1da177e4 34
b86040a5 35#include <asm/unified.h>
15d07dc9 36#include <asm/cp15.h>
1da177e4 37#include <asm/cpu.h>
0ba8b9b2 38#include <asm/cputype.h>
1da177e4 39#include <asm/elf.h>
1da177e4 40#include <asm/procinfo.h>
05774088 41#include <asm/psci.h>
37efe642 42#include <asm/sections.h>
1da177e4 43#include <asm/setup.h>
f00ec48f 44#include <asm/smp_plat.h>
1da177e4
LT
45#include <asm/mach-types.h>
46#include <asm/cacheflush.h>
46097c7d 47#include <asm/cachetype.h>
1da177e4
LT
48#include <asm/tlbflush.h>
49
93c02ab4 50#include <asm/prom.h>
1da177e4
LT
51#include <asm/mach/arch.h>
52#include <asm/mach/irq.h>
53#include <asm/mach/time.h>
9f97da78
DH
54#include <asm/system_info.h>
55#include <asm/system_misc.h>
5cbad0eb 56#include <asm/traps.h>
bff595c1 57#include <asm/unwind.h>
1c16d242 58#include <asm/memblock.h>
4588c34d 59#include <asm/virt.h>
1da177e4 60
4cd9d6f7 61#include "atags.h"
0fc1c832 62
1da177e4
LT
63
64#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
65char fpe_type[8];
66
67static int __init fpe_setup(char *line)
68{
69 memcpy(fpe_type, line, 8);
70 return 1;
71}
72
73__setup("fpe=", fpe_setup);
74#endif
75
ca8f0b0a 76extern void init_default_cache_policy(unsigned long);
ff69a4c8 77extern void paging_init(const struct machine_desc *desc);
a77e0c7b
SS
78extern void early_paging_init(const struct machine_desc *,
79 struct proc_info_list *);
0371d3f7 80extern void sanity_check_meminfo(void);
16d6d5b0 81extern enum reboot_mode reboot_mode;
ff69a4c8 82extern void setup_dma_zone(const struct machine_desc *desc);
1da177e4
LT
83
84unsigned int processor_id;
c18f6581 85EXPORT_SYMBOL(processor_id);
0385ebc0 86unsigned int __machine_arch_type __read_mostly;
1da177e4 87EXPORT_SYMBOL(__machine_arch_type);
0385ebc0 88unsigned int cacheid __read_mostly;
c0e95878 89EXPORT_SYMBOL(cacheid);
1da177e4 90
9d20fdd5
BG
91unsigned int __atags_pointer __initdata;
92
1da177e4
LT
93unsigned int system_rev;
94EXPORT_SYMBOL(system_rev);
95
96unsigned int system_serial_low;
97EXPORT_SYMBOL(system_serial_low);
98
99unsigned int system_serial_high;
100EXPORT_SYMBOL(system_serial_high);
101
0385ebc0 102unsigned int elf_hwcap __read_mostly;
1da177e4
LT
103EXPORT_SYMBOL(elf_hwcap);
104
b342ea4e
AB
105unsigned int elf_hwcap2 __read_mostly;
106EXPORT_SYMBOL(elf_hwcap2);
107
1da177e4
LT
108
109#ifdef MULTI_CPU
0385ebc0 110struct processor processor __read_mostly;
1da177e4
LT
111#endif
112#ifdef MULTI_TLB
0385ebc0 113struct cpu_tlb_fns cpu_tlb __read_mostly;
1da177e4
LT
114#endif
115#ifdef MULTI_USER
0385ebc0 116struct cpu_user_fns cpu_user __read_mostly;
1da177e4
LT
117#endif
118#ifdef MULTI_CACHE
0385ebc0 119struct cpu_cache_fns cpu_cache __read_mostly;
1da177e4 120#endif
953233dc 121#ifdef CONFIG_OUTER_CACHE
0385ebc0 122struct outer_cache_fns outer_cache __read_mostly;
6c09f09d 123EXPORT_SYMBOL(outer_cache);
953233dc 124#endif
1da177e4 125
2ecccf90
DM
126/*
127 * Cached cpu_architecture() result for use by assembler code.
128 * C code should use the cpu_architecture() function instead of accessing this
129 * variable directly.
130 */
131int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
132
ccea7a19
RK
133struct stack {
134 u32 irq[3];
135 u32 abt[3];
136 u32 und[3];
c0e7f7ee 137 u32 fiq[3];
ccea7a19
RK
138} ____cacheline_aligned;
139
55bdd694 140#ifndef CONFIG_CPU_V7M
ccea7a19 141static struct stack stacks[NR_CPUS];
55bdd694 142#endif
ccea7a19 143
1da177e4
LT
144char elf_platform[ELF_PLATFORM_SIZE];
145EXPORT_SYMBOL(elf_platform);
146
1da177e4
LT
147static const char *cpu_name;
148static const char *machine_name;
48ab7e09 149static char __initdata cmd_line[COMMAND_LINE_SIZE];
ff69a4c8 150const struct machine_desc *machine_desc __initdata;
1da177e4 151
1da177e4
LT
152static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
153#define ENDIANNESS ((char)endian_test.l)
154
155DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
156
157/*
158 * Standard memory resources
159 */
160static struct resource mem_res[] = {
740e518e
GKH
161 {
162 .name = "Video RAM",
163 .start = 0,
164 .end = 0,
165 .flags = IORESOURCE_MEM
166 },
167 {
a36d8e5b 168 .name = "Kernel code",
740e518e
GKH
169 .start = 0,
170 .end = 0,
171 .flags = IORESOURCE_MEM
172 },
173 {
174 .name = "Kernel data",
175 .start = 0,
176 .end = 0,
177 .flags = IORESOURCE_MEM
178 }
1da177e4
LT
179};
180
181#define video_ram mem_res[0]
182#define kernel_code mem_res[1]
183#define kernel_data mem_res[2]
184
185static struct resource io_res[] = {
740e518e
GKH
186 {
187 .name = "reserved",
188 .start = 0x3bc,
189 .end = 0x3be,
190 .flags = IORESOURCE_IO | IORESOURCE_BUSY
191 },
192 {
193 .name = "reserved",
194 .start = 0x378,
195 .end = 0x37f,
196 .flags = IORESOURCE_IO | IORESOURCE_BUSY
197 },
198 {
199 .name = "reserved",
200 .start = 0x278,
201 .end = 0x27f,
202 .flags = IORESOURCE_IO | IORESOURCE_BUSY
203 }
1da177e4
LT
204};
205
206#define lp0 io_res[0]
207#define lp1 io_res[1]
208#define lp2 io_res[2]
209
1da177e4
LT
210static const char *proc_arch[] = {
211 "undefined/unknown",
212 "3",
213 "4",
214 "4T",
215 "5",
216 "5T",
217 "5TE",
218 "5TEJ",
219 "6TEJ",
6b090a25 220 "7",
55bdd694 221 "7M",
1da177e4
LT
222 "?(12)",
223 "?(13)",
224 "?(14)",
225 "?(15)",
226 "?(16)",
227 "?(17)",
228};
229
55bdd694
CM
230#ifdef CONFIG_CPU_V7M
231static int __get_cpu_architecture(void)
232{
233 return CPU_ARCH_ARMv7M;
234}
235#else
2ecccf90 236static int __get_cpu_architecture(void)
1da177e4
LT
237{
238 int cpu_arch;
239
0ba8b9b2 240 if ((read_cpuid_id() & 0x0008f000) == 0) {
1da177e4 241 cpu_arch = CPU_ARCH_UNKNOWN;
0ba8b9b2
RK
242 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
243 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
244 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
245 cpu_arch = (read_cpuid_id() >> 16) & 7;
1da177e4
LT
246 if (cpu_arch)
247 cpu_arch += CPU_ARCH_ARMv3;
0ba8b9b2 248 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
180005c4
CM
249 unsigned int mmfr0;
250
251 /* Revised CPUID format. Read the Memory Model Feature
252 * Register 0 and check for VMSAv7 or PMSAv7 */
253 asm("mrc p15, 0, %0, c0, c1, 4"
254 : "=r" (mmfr0));
315cfe78
CM
255 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
256 (mmfr0 & 0x000000f0) >= 0x00000030)
180005c4
CM
257 cpu_arch = CPU_ARCH_ARMv7;
258 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
259 (mmfr0 & 0x000000f0) == 0x00000020)
260 cpu_arch = CPU_ARCH_ARMv6;
261 else
262 cpu_arch = CPU_ARCH_UNKNOWN;
263 } else
264 cpu_arch = CPU_ARCH_UNKNOWN;
1da177e4
LT
265
266 return cpu_arch;
267}
55bdd694 268#endif
1da177e4 269
2ecccf90
DM
270int __pure cpu_architecture(void)
271{
272 BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
273
274 return __cpu_architecture;
275}
276
8925ec4c
WD
277static int cpu_has_aliasing_icache(unsigned int arch)
278{
279 int aliasing_icache;
280 unsigned int id_reg, num_sets, line_size;
281
7f94e9cc
WD
282 /* PIPT caches never alias. */
283 if (icache_is_pipt())
284 return 0;
285
8925ec4c
WD
286 /* arch specifies the register format */
287 switch (arch) {
288 case CPU_ARCH_ARMv7:
5fb31a96
LW
289 asm("mcr p15, 2, %0, c0, c0, 0 @ set CSSELR"
290 : /* No output operands */
8925ec4c 291 : "r" (1));
5fb31a96
LW
292 isb();
293 asm("mrc p15, 1, %0, c0, c0, 0 @ read CCSIDR"
294 : "=r" (id_reg));
8925ec4c
WD
295 line_size = 4 << ((id_reg & 0x7) + 2);
296 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
297 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
298 break;
299 case CPU_ARCH_ARMv6:
300 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
301 break;
302 default:
303 /* I-cache aliases will be handled by D-cache aliasing code */
304 aliasing_icache = 0;
305 }
306
307 return aliasing_icache;
308}
309
c0e95878
RK
310static void __init cacheid_init(void)
311{
c0e95878
RK
312 unsigned int arch = cpu_architecture();
313
55bdd694
CM
314 if (arch == CPU_ARCH_ARMv7M) {
315 cacheid = 0;
316 } else if (arch >= CPU_ARCH_ARMv6) {
ac52e83f 317 unsigned int cachetype = read_cpuid_cachetype();
b57ee99f
CM
318 if ((cachetype & (7 << 29)) == 4 << 29) {
319 /* ARMv7 register format */
72dc53ac 320 arch = CPU_ARCH_ARMv7;
b57ee99f 321 cacheid = CACHEID_VIPT_NONALIASING;
7f94e9cc
WD
322 switch (cachetype & (3 << 14)) {
323 case (1 << 14):
b57ee99f 324 cacheid |= CACHEID_ASID_TAGGED;
7f94e9cc
WD
325 break;
326 case (3 << 14):
327 cacheid |= CACHEID_PIPT;
328 break;
329 }
8925ec4c 330 } else {
72dc53ac
WD
331 arch = CPU_ARCH_ARMv6;
332 if (cachetype & (1 << 23))
333 cacheid = CACHEID_VIPT_ALIASING;
334 else
335 cacheid = CACHEID_VIPT_NONALIASING;
8925ec4c 336 }
72dc53ac
WD
337 if (cpu_has_aliasing_icache(arch))
338 cacheid |= CACHEID_VIPT_I_ALIASING;
c0e95878
RK
339 } else {
340 cacheid = CACHEID_VIVT;
341 }
2b4ae1f1 342
1b0f6681 343 pr_info("CPU: %s data cache, %s instruction cache\n",
2b4ae1f1
RK
344 cache_is_vivt() ? "VIVT" :
345 cache_is_vipt_aliasing() ? "VIPT aliasing" :
7f94e9cc 346 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
2b4ae1f1
RK
347 cache_is_vivt() ? "VIVT" :
348 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
8925ec4c 349 icache_is_vipt_aliasing() ? "VIPT aliasing" :
7f94e9cc 350 icache_is_pipt() ? "PIPT" :
2b4ae1f1 351 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
c0e95878
RK
352}
353
1da177e4
LT
354/*
355 * These functions re-use the assembly code in head.S, which
356 * already provide the required functionality.
357 */
0f44ba1d 358extern struct proc_info_list *lookup_processor_type(unsigned int);
6fc31d54 359
93c02ab4 360void __init early_print(const char *str, ...)
6fc31d54
RK
361{
362 extern void printascii(const char *);
363 char buf[256];
364 va_list ap;
365
366 va_start(ap, str);
367 vsnprintf(buf, sizeof(buf), str, ap);
368 va_end(ap);
369
370#ifdef CONFIG_DEBUG_LL
371 printascii(buf);
372#endif
373 printk("%s", buf);
374}
375
8164f7af
SB
376static void __init cpuid_init_hwcaps(void)
377{
b8c9592b 378 int block;
8164f7af
SB
379
380 if (cpu_architecture() < CPU_ARCH_ARMv7)
381 return;
382
b8c9592b
AB
383 block = cpuid_feature_extract(CPUID_EXT_ISAR0, 24);
384 if (block >= 2)
8164f7af 385 elf_hwcap |= HWCAP_IDIVA;
b8c9592b 386 if (block >= 1)
8164f7af 387 elf_hwcap |= HWCAP_IDIVT;
a469abd0
WD
388
389 /* LPAE implies atomic ldrd/strd instructions */
b8c9592b
AB
390 block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
391 if (block >= 5)
a469abd0 392 elf_hwcap |= HWCAP_LPAE;
8164f7af
SB
393}
394
58171bf2 395static void __init elf_hwcap_fixup(void)
f159f4ed 396{
58171bf2 397 unsigned id = read_cpuid_id();
f159f4ed
TL
398
399 /*
400 * HWCAP_TLS is available only on 1136 r1p0 and later,
401 * see also kuser_get_tls_init.
402 */
58171bf2
RK
403 if (read_cpuid_part() == ARM_CPU_PART_ARM1136 &&
404 ((id >> 20) & 3) == 0) {
f159f4ed 405 elf_hwcap &= ~HWCAP_TLS;
58171bf2
RK
406 return;
407 }
408
409 /* Verify if CPUID scheme is implemented */
410 if ((id & 0x000f0000) != 0x000f0000)
411 return;
412
413 /*
414 * If the CPU supports LDREX/STREX and LDREXB/STREXB,
415 * avoid advertising SWP; it may not be atomic with
416 * multiprocessing cores.
417 */
b8c9592b
AB
418 if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
419 (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
420 cpuid_feature_extract(CPUID_EXT_ISAR3, 20) >= 3))
58171bf2 421 elf_hwcap &= ~HWCAP_SWP;
f159f4ed
TL
422}
423
ccea7a19
RK
424/*
425 * cpu_init - initialise one CPU.
426 *
90f1e084 427 * cpu_init sets up the per-CPU stacks.
ccea7a19 428 */
1783d457 429void notrace cpu_init(void)
ccea7a19 430{
55bdd694 431#ifndef CONFIG_CPU_V7M
ccea7a19
RK
432 unsigned int cpu = smp_processor_id();
433 struct stack *stk = &stacks[cpu];
434
435 if (cpu >= NR_CPUS) {
1b0f6681 436 pr_crit("CPU%u: bad primary CPU number\n", cpu);
ccea7a19
RK
437 BUG();
438 }
439
14318efb
RH
440 /*
441 * This only works on resume and secondary cores. For booting on the
442 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
443 */
444 set_my_cpu_offset(per_cpu_offset(cpu));
445
b69874e4
RK
446 cpu_proc_init();
447
b86040a5
CM
448 /*
449 * Define the placement constraint for the inline asm directive below.
450 * In Thumb-2, msr with an immediate value is not allowed.
451 */
452#ifdef CONFIG_THUMB2_KERNEL
453#define PLC "r"
454#else
455#define PLC "I"
456#endif
457
ccea7a19
RK
458 /*
459 * setup stacks for re-entrant exception handlers
460 */
461 __asm__ (
462 "msr cpsr_c, %1\n\t"
b86040a5
CM
463 "add r14, %0, %2\n\t"
464 "mov sp, r14\n\t"
ccea7a19 465 "msr cpsr_c, %3\n\t"
b86040a5
CM
466 "add r14, %0, %4\n\t"
467 "mov sp, r14\n\t"
ccea7a19 468 "msr cpsr_c, %5\n\t"
b86040a5
CM
469 "add r14, %0, %6\n\t"
470 "mov sp, r14\n\t"
c0e7f7ee
DT
471 "msr cpsr_c, %7\n\t"
472 "add r14, %0, %8\n\t"
473 "mov sp, r14\n\t"
474 "msr cpsr_c, %9"
ccea7a19
RK
475 :
476 : "r" (stk),
b86040a5 477 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
ccea7a19 478 "I" (offsetof(struct stack, irq[0])),
b86040a5 479 PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
ccea7a19 480 "I" (offsetof(struct stack, abt[0])),
b86040a5 481 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
ccea7a19 482 "I" (offsetof(struct stack, und[0])),
c0e7f7ee
DT
483 PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
484 "I" (offsetof(struct stack, fiq[0])),
b86040a5 485 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
aaaa3f9e 486 : "r14");
55bdd694 487#endif
ccea7a19
RK
488}
489
18d7f152 490u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
eb50439b
WD
491
492void __init smp_setup_processor_id(void)
493{
494 int i;
cb8cf4f8
LP
495 u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
496 u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
eb50439b
WD
497
498 cpu_logical_map(0) = cpu;
cb8cf4f8 499 for (i = 1; i < nr_cpu_ids; ++i)
eb50439b
WD
500 cpu_logical_map(i) = i == cpu ? 0 : i;
501
9394c1c6
ML
502 /*
503 * clear __my_cpu_offset on boot CPU to avoid hang caused by
504 * using percpu variable early, for example, lockdep will
505 * access percpu variable inside lock_release
506 */
507 set_my_cpu_offset(0);
508
1b0f6681 509 pr_info("Booting Linux on physical CPU 0x%x\n", mpidr);
eb50439b
WD
510}
511
8cf72172
LP
512struct mpidr_hash mpidr_hash;
513#ifdef CONFIG_SMP
514/**
515 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
516 * level in order to build a linear index from an
517 * MPIDR value. Resulting algorithm is a collision
518 * free hash carried out through shifting and ORing
519 */
520static void __init smp_build_mpidr_hash(void)
521{
522 u32 i, affinity;
523 u32 fs[3], bits[3], ls, mask = 0;
524 /*
525 * Pre-scan the list of MPIDRS and filter out bits that do
526 * not contribute to affinity levels, ie they never toggle.
527 */
528 for_each_possible_cpu(i)
529 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
530 pr_debug("mask of set bits 0x%x\n", mask);
531 /*
532 * Find and stash the last and first bit set at all affinity levels to
533 * check how many bits are required to represent them.
534 */
535 for (i = 0; i < 3; i++) {
536 affinity = MPIDR_AFFINITY_LEVEL(mask, i);
537 /*
538 * Find the MSB bit and LSB bits position
539 * to determine how many bits are required
540 * to express the affinity level.
541 */
542 ls = fls(affinity);
543 fs[i] = affinity ? ffs(affinity) - 1 : 0;
544 bits[i] = ls - fs[i];
545 }
546 /*
547 * An index can be created from the MPIDR by isolating the
548 * significant bits at each affinity level and by shifting
549 * them in order to compress the 24 bits values space to a
550 * compressed set of values. This is equivalent to hashing
551 * the MPIDR through shifting and ORing. It is a collision free
552 * hash though not minimal since some levels might contain a number
553 * of CPUs that is not an exact power of 2 and their bit
554 * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
555 */
556 mpidr_hash.shift_aff[0] = fs[0];
557 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
558 mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
559 (bits[1] + bits[0]);
560 mpidr_hash.mask = mask;
561 mpidr_hash.bits = bits[2] + bits[1] + bits[0];
562 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
563 mpidr_hash.shift_aff[0],
564 mpidr_hash.shift_aff[1],
565 mpidr_hash.shift_aff[2],
566 mpidr_hash.mask,
567 mpidr_hash.bits);
568 /*
569 * 4x is an arbitrary value used to warn on a hash table much bigger
570 * than expected on most systems.
571 */
572 if (mpidr_hash_size() > 4 * num_possible_cpus())
573 pr_warn("Large number of MPIDR hash buckets detected\n");
574 sync_cache_w(&mpidr_hash);
575}
576#endif
577
b69874e4
RK
578static void __init setup_processor(void)
579{
580 struct proc_info_list *list;
581
582 /*
583 * locate processor in the list of supported processor
584 * types. The linker builds this table for us from the
585 * entries in arch/arm/mm/proc-*.S
586 */
587 list = lookup_processor_type(read_cpuid_id());
588 if (!list) {
1b0f6681
OJ
589 pr_err("CPU configuration botched (ID %08x), unable to continue.\n",
590 read_cpuid_id());
b69874e4
RK
591 while (1);
592 }
593
594 cpu_name = list->cpu_name;
2ecccf90 595 __cpu_architecture = __get_cpu_architecture();
b69874e4
RK
596
597#ifdef MULTI_CPU
598 processor = *list->proc;
599#endif
600#ifdef MULTI_TLB
601 cpu_tlb = *list->tlb;
602#endif
603#ifdef MULTI_USER
604 cpu_user = *list->user;
605#endif
606#ifdef MULTI_CACHE
607 cpu_cache = *list->cache;
608#endif
609
1b0f6681
OJ
610 pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
611 cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
4585eaff 612 proc_arch[cpu_architecture()], get_cr());
b69874e4 613
a34dbfb0
WD
614 snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
615 list->arch_name, ENDIANNESS);
616 snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
617 list->elf_name, ENDIANNESS);
b69874e4 618 elf_hwcap = list->elf_hwcap;
8164f7af
SB
619
620 cpuid_init_hwcaps();
621
b69874e4 622#ifndef CONFIG_ARM_THUMB
c40e3641 623 elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
b69874e4 624#endif
ca8f0b0a
RK
625#ifdef CONFIG_MMU
626 init_default_cache_policy(list->__cpu_mm_mmu_flags);
627#endif
92871b94
RH
628 erratum_a15_798181_init();
629
58171bf2 630 elf_hwcap_fixup();
b69874e4
RK
631
632 cacheid_init();
633 cpu_init();
634}
635
93c02ab4 636void __init dump_machine_table(void)
1da177e4 637{
ff69a4c8 638 const struct machine_desc *p;
1da177e4 639
6291319d
GL
640 early_print("Available machine support:\n\nID (hex)\tNAME\n");
641 for_each_machine_desc(p)
dce72dd0 642 early_print("%08x\t%s\n", p->nr, p->name);
1da177e4 643
dce72dd0 644 early_print("\nPlease check your kernel config and/or bootloader.\n");
1da177e4 645
dce72dd0
NP
646 while (true)
647 /* can't use cpu_relax() here as it may require MMU setup */;
1da177e4
LT
648}
649
6a5014aa 650int __init arm_add_memory(u64 start, u64 size)
3a669411 651{
6d7d5da7 652 u64 aligned_start;
4b5f32ce 653
3a669411
RK
654 /*
655 * Ensure that start/size are aligned to a page boundary.
909ba297 656 * Size is rounded down, start is rounded up.
3a669411 657 */
6d7d5da7 658 aligned_start = PAGE_ALIGN(start);
909ba297
MY
659 if (aligned_start > start + size)
660 size = 0;
661 else
662 size -= aligned_start - start;
e5ab8580 663
6d7d5da7
MD
664#ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
665 if (aligned_start > ULONG_MAX) {
1b0f6681
OJ
666 pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
667 (long long)start);
6d7d5da7
MD
668 return -EINVAL;
669 }
670
671 if (aligned_start + size > ULONG_MAX) {
1b0f6681
OJ
672 pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n",
673 (long long)start);
e5ab8580
WD
674 /*
675 * To ensure bank->start + bank->size is representable in
676 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
677 * This means we lose a page after masking.
678 */
6d7d5da7 679 size = ULONG_MAX - aligned_start;
e5ab8580
WD
680 }
681#endif
682
571b1437
RK
683 if (aligned_start < PHYS_OFFSET) {
684 if (aligned_start + size <= PHYS_OFFSET) {
685 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
686 aligned_start, aligned_start + size);
687 return -EINVAL;
688 }
689
690 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
691 aligned_start, (u64)PHYS_OFFSET);
692
693 size -= PHYS_OFFSET - aligned_start;
694 aligned_start = PHYS_OFFSET;
695 }
696
1c2f87c2
LA
697 start = aligned_start;
698 size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
4b5f32ce
NP
699
700 /*
701 * Check whether this memory region has non-zero size or
702 * invalid node number.
703 */
1c2f87c2 704 if (size == 0)
4b5f32ce
NP
705 return -EINVAL;
706
1c2f87c2 707 memblock_add(start, size);
4b5f32ce 708 return 0;
3a669411
RK
709}
710
1da177e4
LT
711/*
712 * Pick out the memory size. We look for mem=size@start,
713 * where start and size are "size[KkMm]"
714 */
1c2f87c2 715
2b0d8c25 716static int __init early_mem(char *p)
1da177e4
LT
717{
718 static int usermem __initdata = 0;
6a5014aa
MD
719 u64 size;
720 u64 start;
2b0d8c25 721 char *endp;
1da177e4
LT
722
723 /*
724 * If the user specifies memory size, we
725 * blow away any automatically generated
726 * size.
727 */
728 if (usermem == 0) {
729 usermem = 1;
1c2f87c2
LA
730 memblock_remove(memblock_start_of_DRAM(),
731 memblock_end_of_DRAM() - memblock_start_of_DRAM());
1da177e4
LT
732 }
733
734 start = PHYS_OFFSET;
2b0d8c25
JK
735 size = memparse(p, &endp);
736 if (*endp == '@')
737 start = memparse(endp + 1, NULL);
1da177e4 738
1c97b73e 739 arm_add_memory(start, size);
1da177e4 740
2b0d8c25 741 return 0;
1da177e4 742}
2b0d8c25 743early_param("mem", early_mem);
1da177e4 744
ff69a4c8 745static void __init request_standard_resources(const struct machine_desc *mdesc)
1da177e4 746{
11b9369c 747 struct memblock_region *region;
1da177e4 748 struct resource *res;
1da177e4 749
37efe642
RK
750 kernel_code.start = virt_to_phys(_text);
751 kernel_code.end = virt_to_phys(_etext - 1);
842eab40 752 kernel_data.start = virt_to_phys(_sdata);
37efe642 753 kernel_data.end = virt_to_phys(_end - 1);
1da177e4 754
11b9369c 755 for_each_memblock(memory, region) {
ca474408 756 res = memblock_virt_alloc(sizeof(*res), 0);
1da177e4 757 res->name = "System RAM";
11b9369c
DZ
758 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
759 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
1da177e4
LT
760 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
761
762 request_resource(&iomem_resource, res);
763
764 if (kernel_code.start >= res->start &&
765 kernel_code.end <= res->end)
766 request_resource(res, &kernel_code);
767 if (kernel_data.start >= res->start &&
768 kernel_data.end <= res->end)
769 request_resource(res, &kernel_data);
770 }
771
772 if (mdesc->video_start) {
773 video_ram.start = mdesc->video_start;
774 video_ram.end = mdesc->video_end;
775 request_resource(&iomem_resource, &video_ram);
776 }
777
778 /*
779 * Some machines don't have the possibility of ever
780 * possessing lp0, lp1 or lp2
781 */
782 if (mdesc->reserve_lp0)
783 request_resource(&ioport_resource, &lp0);
784 if (mdesc->reserve_lp1)
785 request_resource(&ioport_resource, &lp1);
786 if (mdesc->reserve_lp2)
787 request_resource(&ioport_resource, &lp2);
788}
789
1da177e4
LT
790#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
791struct screen_info screen_info = {
792 .orig_video_lines = 30,
793 .orig_video_cols = 80,
794 .orig_video_mode = 0,
795 .orig_video_ega_bx = 0,
796 .orig_video_isVGA = 1,
797 .orig_video_points = 8
798};
4394c124 799#endif
1da177e4 800
1da177e4
LT
801static int __init customize_machine(void)
802{
883a106b
AB
803 /*
804 * customizes platform devices, or adds new ones
805 * On DT based machines, we fall back to populating the
806 * machine from the device tree, if no callback is provided,
807 * otherwise we would always need an init_machine callback.
808 */
af4dda73 809 of_iommu_init();
8ff1443c
RK
810 if (machine_desc->init_machine)
811 machine_desc->init_machine();
883a106b
AB
812#ifdef CONFIG_OF
813 else
814 of_platform_populate(NULL, of_default_bus_match_table,
815 NULL, NULL);
816#endif
1da177e4
LT
817 return 0;
818}
819arch_initcall(customize_machine);
820
90de4137
SG
821static int __init init_machine_late(void)
822{
823 if (machine_desc->init_late)
824 machine_desc->init_late();
825 return 0;
826}
827late_initcall(init_machine_late);
828
3c57fb43
MW
829#ifdef CONFIG_KEXEC
830static inline unsigned long long get_total_mem(void)
831{
832 unsigned long total;
833
834 total = max_low_pfn - min_low_pfn;
835 return total << PAGE_SHIFT;
836}
837
838/**
839 * reserve_crashkernel() - reserves memory are for crash kernel
840 *
841 * This function reserves memory area given in "crashkernel=" kernel command
842 * line parameter. The memory reserved is used by a dump capture kernel when
843 * primary kernel is crashing.
844 */
845static void __init reserve_crashkernel(void)
846{
847 unsigned long long crash_size, crash_base;
848 unsigned long long total_mem;
849 int ret;
850
851 total_mem = get_total_mem();
852 ret = parse_crashkernel(boot_command_line, total_mem,
853 &crash_size, &crash_base);
854 if (ret)
855 return;
856
84f452b1 857 ret = memblock_reserve(crash_base, crash_size);
3c57fb43 858 if (ret < 0) {
1b0f6681
OJ
859 pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n",
860 (unsigned long)crash_base);
3c57fb43
MW
861 return;
862 }
863
1b0f6681
OJ
864 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
865 (unsigned long)(crash_size >> 20),
866 (unsigned long)(crash_base >> 20),
867 (unsigned long)(total_mem >> 20));
3c57fb43
MW
868
869 crashk_res.start = crash_base;
870 crashk_res.end = crash_base + crash_size - 1;
871 insert_resource(&iomem_resource, &crashk_res);
872}
873#else
874static inline void reserve_crashkernel(void) {}
875#endif /* CONFIG_KEXEC */
876
4588c34d
DM
877void __init hyp_mode_check(void)
878{
879#ifdef CONFIG_ARM_VIRT_EXT
8fbac214
MR
880 sync_boot_mode();
881
4588c34d
DM
882 if (is_hyp_mode_available()) {
883 pr_info("CPU: All CPU(s) started in HYP mode.\n");
884 pr_info("CPU: Virtualization extensions available.\n");
885 } else if (is_hyp_mode_mismatched()) {
886 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
887 __boot_cpu_mode & MODE_MASK);
888 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
889 } else
890 pr_info("CPU: All CPU(s) started in SVC mode.\n");
891#endif
892}
893
6291319d
GL
894void __init setup_arch(char **cmdline_p)
895{
ff69a4c8 896 const struct machine_desc *mdesc;
6291319d 897
6291319d 898 setup_processor();
93c02ab4
GL
899 mdesc = setup_machine_fdt(__atags_pointer);
900 if (!mdesc)
b8b499c8 901 mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
6291319d
GL
902 machine_desc = mdesc;
903 machine_name = mdesc->name;
719c9d14 904 dump_stack_set_arch_desc("%s", mdesc->name);
6291319d 905
16d6d5b0
RH
906 if (mdesc->reboot_mode != REBOOT_HARD)
907 reboot_mode = mdesc->reboot_mode;
6291319d 908
37efe642
RK
909 init_mm.start_code = (unsigned long) _text;
910 init_mm.end_code = (unsigned long) _etext;
911 init_mm.end_data = (unsigned long) _edata;
912 init_mm.brk = (unsigned long) _end;
1da177e4 913
48ab7e09
JK
914 /* populate cmd_line too for later use, preserving boot_command_line */
915 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
916 *cmdline_p = cmd_line;
2b0d8c25
JK
917
918 parse_early_param();
919
a77e0c7b 920 early_paging_init(mdesc, lookup_processor_type(read_cpuid_id()));
7c927322 921 setup_dma_zone(mdesc);
0371d3f7 922 sanity_check_meminfo();
1c2f87c2 923 arm_memblock_init(mdesc);
2778f620 924
4b5f32ce 925 paging_init(mdesc);
11b9369c 926 request_standard_resources(mdesc);
1da177e4 927
a528721d
RK
928 if (mdesc->restart)
929 arm_pm_restart = mdesc->restart;
930
93c02ab4
GL
931 unflatten_device_tree();
932
5587164e 933 arm_dt_init_cpu_maps();
05774088 934 psci_init();
7bbb7940 935#ifdef CONFIG_SMP
abcee5fb 936 if (is_smp()) {
b382b940
JM
937 if (!mdesc->smp_init || !mdesc->smp_init()) {
938 if (psci_smp_available())
939 smp_set_ops(&psci_smp_ops);
940 else if (mdesc->smp)
941 smp_set_ops(mdesc->smp);
942 }
f00ec48f 943 smp_init_cpus();
8cf72172 944 smp_build_mpidr_hash();
abcee5fb 945 }
7bbb7940 946#endif
4588c34d
DM
947
948 if (!is_smp())
949 hyp_mode_check();
950
3c57fb43 951 reserve_crashkernel();
7bbb7940 952
52108641 953#ifdef CONFIG_MULTI_IRQ_HANDLER
954 handle_arch_irq = mdesc->handle_irq;
955#endif
1da177e4
LT
956
957#ifdef CONFIG_VT
958#if defined(CONFIG_VGA_CONSOLE)
959 conswitchp = &vga_con;
960#elif defined(CONFIG_DUMMY_CONSOLE)
961 conswitchp = &dummy_con;
962#endif
963#endif
dec12e62
RK
964
965 if (mdesc->init_early)
966 mdesc->init_early();
1da177e4
LT
967}
968
969
970static int __init topology_init(void)
971{
972 int cpu;
973
66fb8bd2
RK
974 for_each_possible_cpu(cpu) {
975 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
976 cpuinfo->cpu.hotpluggable = 1;
977 register_cpu(&cpuinfo->cpu, cpu);
978 }
1da177e4
LT
979
980 return 0;
981}
1da177e4
LT
982subsys_initcall(topology_init);
983
e119bfff
RK
984#ifdef CONFIG_HAVE_PROC_CPU
985static int __init proc_cpu_init(void)
986{
987 struct proc_dir_entry *res;
988
989 res = proc_mkdir("cpu", NULL);
990 if (!res)
991 return -ENOMEM;
992 return 0;
993}
994fs_initcall(proc_cpu_init);
995#endif
996
1da177e4
LT
997static const char *hwcap_str[] = {
998 "swp",
999 "half",
1000 "thumb",
1001 "26bit",
1002 "fastmult",
1003 "fpa",
1004 "vfp",
1005 "edsp",
1006 "java",
8f7f9435 1007 "iwmmxt",
99e4a6dd 1008 "crunch",
4369ae16 1009 "thumbee",
2bedbdf4 1010 "neon",
7279dc3e
CM
1011 "vfpv3",
1012 "vfpv3d16",
254cdf8e
WD
1013 "tls",
1014 "vfpv4",
1015 "idiva",
1016 "idivt",
ab8d46c0 1017 "vfpd32",
a469abd0 1018 "lpae",
e9faebc6 1019 "evtstrm",
1da177e4
LT
1020 NULL
1021};
1022
b342ea4e 1023static const char *hwcap2_str[] = {
8258a989
AB
1024 "aes",
1025 "pmull",
1026 "sha1",
1027 "sha2",
1028 "crc32",
b342ea4e
AB
1029 NULL
1030};
1031
1da177e4
LT
1032static int c_show(struct seq_file *m, void *v)
1033{
b4b8f770
LP
1034 int i, j;
1035 u32 cpuid;
1da177e4 1036
1da177e4 1037 for_each_online_cpu(i) {
15559722
RK
1038 /*
1039 * glibc reads /proc/cpuinfo to determine the number of
1040 * online processors, looking for lines beginning with
1041 * "processor". Give glibc what it expects.
1042 */
1043 seq_printf(m, "processor\t: %d\n", i);
b4b8f770
LP
1044 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
1045 seq_printf(m, "model name\t: %s rev %d (%s)\n",
1046 cpu_name, cpuid & 15, elf_platform);
1047
4bf9636c
PM
1048#if defined(CONFIG_SMP)
1049 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1050 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1051 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1052#else
1053 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1054 loops_per_jiffy / (500000/HZ),
1055 (loops_per_jiffy / (5000/HZ)) % 100);
1056#endif
b4b8f770
LP
1057 /* dump out the processor features */
1058 seq_puts(m, "Features\t: ");
1da177e4 1059
b4b8f770
LP
1060 for (j = 0; hwcap_str[j]; j++)
1061 if (elf_hwcap & (1 << j))
1062 seq_printf(m, "%s ", hwcap_str[j]);
1da177e4 1063
b342ea4e
AB
1064 for (j = 0; hwcap2_str[j]; j++)
1065 if (elf_hwcap2 & (1 << j))
1066 seq_printf(m, "%s ", hwcap2_str[j]);
1067
b4b8f770
LP
1068 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
1069 seq_printf(m, "CPU architecture: %s\n",
1070 proc_arch[cpu_architecture()]);
1da177e4 1071
b4b8f770
LP
1072 if ((cpuid & 0x0008f000) == 0x00000000) {
1073 /* pre-ARM7 */
1074 seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
1da177e4 1075 } else {
b4b8f770
LP
1076 if ((cpuid & 0x0008f000) == 0x00007000) {
1077 /* ARM7 */
1078 seq_printf(m, "CPU variant\t: 0x%02x\n",
1079 (cpuid >> 16) & 127);
1080 } else {
1081 /* post-ARM7 */
1082 seq_printf(m, "CPU variant\t: 0x%x\n",
1083 (cpuid >> 20) & 15);
1084 }
1085 seq_printf(m, "CPU part\t: 0x%03x\n",
1086 (cpuid >> 4) & 0xfff);
1da177e4 1087 }
b4b8f770 1088 seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
1da177e4 1089 }
1da177e4
LT
1090
1091 seq_printf(m, "Hardware\t: %s\n", machine_name);
1092 seq_printf(m, "Revision\t: %04x\n", system_rev);
1093 seq_printf(m, "Serial\t\t: %08x%08x\n",
1094 system_serial_high, system_serial_low);
1095
1096 return 0;
1097}
1098
1099static void *c_start(struct seq_file *m, loff_t *pos)
1100{
1101 return *pos < 1 ? (void *)1 : NULL;
1102}
1103
1104static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1105{
1106 ++*pos;
1107 return NULL;
1108}
1109
1110static void c_stop(struct seq_file *m, void *v)
1111{
1112}
1113
2ffd6e18 1114const struct seq_operations cpuinfo_op = {
1da177e4
LT
1115 .start = c_start,
1116 .next = c_next,
1117 .stop = c_stop,
1118 .show = c_show
1119};
This page took 0.901324 seconds and 5 git commands to generate.