Merge tag 'phy-for-4.6-rc' of git://git.kernel.org/pub/scm/linux/kernel/git/kishon...
[deliverable/linux.git] / arch / arm / kernel / setup.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/kernel/setup.c
3 *
4 * Copyright (C) 1995-2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
da58fb65 10#include <linux/efi.h>
ecea4ab6 11#include <linux/export.h>
1da177e4
LT
12#include <linux/kernel.h>
13#include <linux/stddef.h>
14#include <linux/ioport.h>
15#include <linux/delay.h>
16#include <linux/utsname.h>
17#include <linux/initrd.h>
18#include <linux/console.h>
19#include <linux/bootmem.h>
20#include <linux/seq_file.h>
894673ee 21#include <linux/screen_info.h>
af4dda73 22#include <linux/of_iommu.h>
883a106b 23#include <linux/of_platform.h>
1da177e4 24#include <linux/init.h>
3c57fb43 25#include <linux/kexec.h>
93c02ab4 26#include <linux/of_fdt.h>
1da177e4
LT
27#include <linux/cpu.h>
28#include <linux/interrupt.h>
7bbb7940 29#include <linux/smp.h>
e119bfff 30#include <linux/proc_fs.h>
2778f620 31#include <linux/memblock.h>
2ecccf90
DM
32#include <linux/bug.h>
33#include <linux/compiler.h>
27a3f0e9 34#include <linux/sort.h>
be120397 35#include <linux/psci.h>
1da177e4 36
b86040a5 37#include <asm/unified.h>
15d07dc9 38#include <asm/cp15.h>
1da177e4 39#include <asm/cpu.h>
0ba8b9b2 40#include <asm/cputype.h>
da58fb65 41#include <asm/efi.h>
1da177e4 42#include <asm/elf.h>
2937367b 43#include <asm/early_ioremap.h>
a5f4c561 44#include <asm/fixmap.h>
1da177e4 45#include <asm/procinfo.h>
05774088 46#include <asm/psci.h>
37efe642 47#include <asm/sections.h>
1da177e4 48#include <asm/setup.h>
f00ec48f 49#include <asm/smp_plat.h>
1da177e4
LT
50#include <asm/mach-types.h>
51#include <asm/cacheflush.h>
46097c7d 52#include <asm/cachetype.h>
1da177e4 53#include <asm/tlbflush.h>
5882bfef 54#include <asm/xen/hypervisor.h>
1da177e4 55
93c02ab4 56#include <asm/prom.h>
1da177e4
LT
57#include <asm/mach/arch.h>
58#include <asm/mach/irq.h>
59#include <asm/mach/time.h>
9f97da78
DH
60#include <asm/system_info.h>
61#include <asm/system_misc.h>
5cbad0eb 62#include <asm/traps.h>
bff595c1 63#include <asm/unwind.h>
1c16d242 64#include <asm/memblock.h>
4588c34d 65#include <asm/virt.h>
1da177e4 66
4cd9d6f7 67#include "atags.h"
0fc1c832 68
1da177e4
LT
69
70#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
71char fpe_type[8];
72
73static int __init fpe_setup(char *line)
74{
75 memcpy(fpe_type, line, 8);
76 return 1;
77}
78
79__setup("fpe=", fpe_setup);
80#endif
81
ca8f0b0a 82extern void init_default_cache_policy(unsigned long);
ff69a4c8 83extern void paging_init(const struct machine_desc *desc);
1221ed10 84extern void early_paging_init(const struct machine_desc *);
0371d3f7 85extern void sanity_check_meminfo(void);
16d6d5b0 86extern enum reboot_mode reboot_mode;
ff69a4c8 87extern void setup_dma_zone(const struct machine_desc *desc);
1da177e4
LT
88
89unsigned int processor_id;
c18f6581 90EXPORT_SYMBOL(processor_id);
0385ebc0 91unsigned int __machine_arch_type __read_mostly;
1da177e4 92EXPORT_SYMBOL(__machine_arch_type);
0385ebc0 93unsigned int cacheid __read_mostly;
c0e95878 94EXPORT_SYMBOL(cacheid);
1da177e4 95
9d20fdd5
BG
96unsigned int __atags_pointer __initdata;
97
1da177e4
LT
98unsigned int system_rev;
99EXPORT_SYMBOL(system_rev);
100
3f599875
PK
101const char *system_serial;
102EXPORT_SYMBOL(system_serial);
103
1da177e4
LT
104unsigned int system_serial_low;
105EXPORT_SYMBOL(system_serial_low);
106
107unsigned int system_serial_high;
108EXPORT_SYMBOL(system_serial_high);
109
0385ebc0 110unsigned int elf_hwcap __read_mostly;
1da177e4
LT
111EXPORT_SYMBOL(elf_hwcap);
112
b342ea4e
AB
113unsigned int elf_hwcap2 __read_mostly;
114EXPORT_SYMBOL(elf_hwcap2);
115
1da177e4
LT
116
117#ifdef MULTI_CPU
0385ebc0 118struct processor processor __read_mostly;
1da177e4
LT
119#endif
120#ifdef MULTI_TLB
0385ebc0 121struct cpu_tlb_fns cpu_tlb __read_mostly;
1da177e4
LT
122#endif
123#ifdef MULTI_USER
0385ebc0 124struct cpu_user_fns cpu_user __read_mostly;
1da177e4
LT
125#endif
126#ifdef MULTI_CACHE
0385ebc0 127struct cpu_cache_fns cpu_cache __read_mostly;
1da177e4 128#endif
953233dc 129#ifdef CONFIG_OUTER_CACHE
0385ebc0 130struct outer_cache_fns outer_cache __read_mostly;
6c09f09d 131EXPORT_SYMBOL(outer_cache);
953233dc 132#endif
1da177e4 133
2ecccf90
DM
134/*
135 * Cached cpu_architecture() result for use by assembler code.
136 * C code should use the cpu_architecture() function instead of accessing this
137 * variable directly.
138 */
139int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
140
ccea7a19
RK
141struct stack {
142 u32 irq[3];
143 u32 abt[3];
144 u32 und[3];
c0e7f7ee 145 u32 fiq[3];
ccea7a19
RK
146} ____cacheline_aligned;
147
55bdd694 148#ifndef CONFIG_CPU_V7M
ccea7a19 149static struct stack stacks[NR_CPUS];
55bdd694 150#endif
ccea7a19 151
1da177e4
LT
152char elf_platform[ELF_PLATFORM_SIZE];
153EXPORT_SYMBOL(elf_platform);
154
1da177e4
LT
155static const char *cpu_name;
156static const char *machine_name;
48ab7e09 157static char __initdata cmd_line[COMMAND_LINE_SIZE];
ff69a4c8 158const struct machine_desc *machine_desc __initdata;
1da177e4 159
1da177e4
LT
160static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
161#define ENDIANNESS ((char)endian_test.l)
162
163DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
164
165/*
166 * Standard memory resources
167 */
168static struct resource mem_res[] = {
740e518e
GKH
169 {
170 .name = "Video RAM",
171 .start = 0,
172 .end = 0,
173 .flags = IORESOURCE_MEM
174 },
175 {
a36d8e5b 176 .name = "Kernel code",
740e518e
GKH
177 .start = 0,
178 .end = 0,
35d98e93 179 .flags = IORESOURCE_SYSTEM_RAM
740e518e
GKH
180 },
181 {
182 .name = "Kernel data",
183 .start = 0,
184 .end = 0,
35d98e93 185 .flags = IORESOURCE_SYSTEM_RAM
740e518e 186 }
1da177e4
LT
187};
188
189#define video_ram mem_res[0]
190#define kernel_code mem_res[1]
191#define kernel_data mem_res[2]
192
193static struct resource io_res[] = {
740e518e
GKH
194 {
195 .name = "reserved",
196 .start = 0x3bc,
197 .end = 0x3be,
198 .flags = IORESOURCE_IO | IORESOURCE_BUSY
199 },
200 {
201 .name = "reserved",
202 .start = 0x378,
203 .end = 0x37f,
204 .flags = IORESOURCE_IO | IORESOURCE_BUSY
205 },
206 {
207 .name = "reserved",
208 .start = 0x278,
209 .end = 0x27f,
210 .flags = IORESOURCE_IO | IORESOURCE_BUSY
211 }
1da177e4
LT
212};
213
214#define lp0 io_res[0]
215#define lp1 io_res[1]
216#define lp2 io_res[2]
217
1da177e4
LT
218static const char *proc_arch[] = {
219 "undefined/unknown",
220 "3",
221 "4",
222 "4T",
223 "5",
224 "5T",
225 "5TE",
226 "5TEJ",
227 "6TEJ",
6b090a25 228 "7",
55bdd694 229 "7M",
1da177e4
LT
230 "?(12)",
231 "?(13)",
232 "?(14)",
233 "?(15)",
234 "?(16)",
235 "?(17)",
236};
237
55bdd694
CM
238#ifdef CONFIG_CPU_V7M
239static int __get_cpu_architecture(void)
240{
241 return CPU_ARCH_ARMv7M;
242}
243#else
2ecccf90 244static int __get_cpu_architecture(void)
1da177e4
LT
245{
246 int cpu_arch;
247
0ba8b9b2 248 if ((read_cpuid_id() & 0x0008f000) == 0) {
1da177e4 249 cpu_arch = CPU_ARCH_UNKNOWN;
0ba8b9b2
RK
250 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
251 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
252 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
253 cpu_arch = (read_cpuid_id() >> 16) & 7;
1da177e4
LT
254 if (cpu_arch)
255 cpu_arch += CPU_ARCH_ARMv3;
0ba8b9b2 256 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
180005c4
CM
257 /* Revised CPUID format. Read the Memory Model Feature
258 * Register 0 and check for VMSAv7 or PMSAv7 */
526299ce 259 unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
315cfe78
CM
260 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
261 (mmfr0 & 0x000000f0) >= 0x00000030)
180005c4
CM
262 cpu_arch = CPU_ARCH_ARMv7;
263 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
264 (mmfr0 & 0x000000f0) == 0x00000020)
265 cpu_arch = CPU_ARCH_ARMv6;
266 else
267 cpu_arch = CPU_ARCH_UNKNOWN;
268 } else
269 cpu_arch = CPU_ARCH_UNKNOWN;
1da177e4
LT
270
271 return cpu_arch;
272}
55bdd694 273#endif
1da177e4 274
2ecccf90
DM
275int __pure cpu_architecture(void)
276{
277 BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
278
279 return __cpu_architecture;
280}
281
8925ec4c
WD
282static int cpu_has_aliasing_icache(unsigned int arch)
283{
284 int aliasing_icache;
285 unsigned int id_reg, num_sets, line_size;
286
7f94e9cc
WD
287 /* PIPT caches never alias. */
288 if (icache_is_pipt())
289 return 0;
290
8925ec4c
WD
291 /* arch specifies the register format */
292 switch (arch) {
293 case CPU_ARCH_ARMv7:
5fb31a96
LW
294 asm("mcr p15, 2, %0, c0, c0, 0 @ set CSSELR"
295 : /* No output operands */
8925ec4c 296 : "r" (1));
5fb31a96
LW
297 isb();
298 asm("mrc p15, 1, %0, c0, c0, 0 @ read CCSIDR"
299 : "=r" (id_reg));
8925ec4c
WD
300 line_size = 4 << ((id_reg & 0x7) + 2);
301 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
302 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
303 break;
304 case CPU_ARCH_ARMv6:
305 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
306 break;
307 default:
308 /* I-cache aliases will be handled by D-cache aliasing code */
309 aliasing_icache = 0;
310 }
311
312 return aliasing_icache;
313}
314
c0e95878
RK
315static void __init cacheid_init(void)
316{
c0e95878
RK
317 unsigned int arch = cpu_architecture();
318
55bdd694
CM
319 if (arch == CPU_ARCH_ARMv7M) {
320 cacheid = 0;
321 } else if (arch >= CPU_ARCH_ARMv6) {
ac52e83f 322 unsigned int cachetype = read_cpuid_cachetype();
b57ee99f
CM
323 if ((cachetype & (7 << 29)) == 4 << 29) {
324 /* ARMv7 register format */
72dc53ac 325 arch = CPU_ARCH_ARMv7;
b57ee99f 326 cacheid = CACHEID_VIPT_NONALIASING;
7f94e9cc
WD
327 switch (cachetype & (3 << 14)) {
328 case (1 << 14):
b57ee99f 329 cacheid |= CACHEID_ASID_TAGGED;
7f94e9cc
WD
330 break;
331 case (3 << 14):
332 cacheid |= CACHEID_PIPT;
333 break;
334 }
8925ec4c 335 } else {
72dc53ac
WD
336 arch = CPU_ARCH_ARMv6;
337 if (cachetype & (1 << 23))
338 cacheid = CACHEID_VIPT_ALIASING;
339 else
340 cacheid = CACHEID_VIPT_NONALIASING;
8925ec4c 341 }
72dc53ac
WD
342 if (cpu_has_aliasing_icache(arch))
343 cacheid |= CACHEID_VIPT_I_ALIASING;
c0e95878
RK
344 } else {
345 cacheid = CACHEID_VIVT;
346 }
2b4ae1f1 347
1b0f6681 348 pr_info("CPU: %s data cache, %s instruction cache\n",
2b4ae1f1
RK
349 cache_is_vivt() ? "VIVT" :
350 cache_is_vipt_aliasing() ? "VIPT aliasing" :
7f94e9cc 351 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
2b4ae1f1
RK
352 cache_is_vivt() ? "VIVT" :
353 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
8925ec4c 354 icache_is_vipt_aliasing() ? "VIPT aliasing" :
7f94e9cc 355 icache_is_pipt() ? "PIPT" :
2b4ae1f1 356 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
c0e95878
RK
357}
358
1da177e4
LT
359/*
360 * These functions re-use the assembly code in head.S, which
361 * already provide the required functionality.
362 */
0f44ba1d 363extern struct proc_info_list *lookup_processor_type(unsigned int);
6fc31d54 364
93c02ab4 365void __init early_print(const char *str, ...)
6fc31d54
RK
366{
367 extern void printascii(const char *);
368 char buf[256];
369 va_list ap;
370
371 va_start(ap, str);
372 vsnprintf(buf, sizeof(buf), str, ap);
373 va_end(ap);
374
375#ifdef CONFIG_DEBUG_LL
376 printascii(buf);
377#endif
378 printk("%s", buf);
379}
380
42f25bdd
NP
381#ifdef CONFIG_ARM_PATCH_IDIV
382
383static inline u32 __attribute_const__ sdiv_instruction(void)
384{
385 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
386 /* "sdiv r0, r0, r1" */
387 u32 insn = __opcode_thumb32_compose(0xfb90, 0xf0f1);
388 return __opcode_to_mem_thumb32(insn);
389 }
390
391 /* "sdiv r0, r0, r1" */
392 return __opcode_to_mem_arm(0xe710f110);
393}
394
395static inline u32 __attribute_const__ udiv_instruction(void)
396{
397 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
398 /* "udiv r0, r0, r1" */
399 u32 insn = __opcode_thumb32_compose(0xfbb0, 0xf0f1);
400 return __opcode_to_mem_thumb32(insn);
401 }
402
403 /* "udiv r0, r0, r1" */
404 return __opcode_to_mem_arm(0xe730f110);
405}
406
407static inline u32 __attribute_const__ bx_lr_instruction(void)
408{
409 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
410 /* "bx lr; nop" */
411 u32 insn = __opcode_thumb32_compose(0x4770, 0x46c0);
412 return __opcode_to_mem_thumb32(insn);
413 }
414
415 /* "bx lr" */
416 return __opcode_to_mem_arm(0xe12fff1e);
417}
418
419static void __init patch_aeabi_idiv(void)
420{
421 extern void __aeabi_uidiv(void);
422 extern void __aeabi_idiv(void);
423 uintptr_t fn_addr;
424 unsigned int mask;
425
426 mask = IS_ENABLED(CONFIG_THUMB2_KERNEL) ? HWCAP_IDIVT : HWCAP_IDIVA;
427 if (!(elf_hwcap & mask))
428 return;
429
430 pr_info("CPU: div instructions available: patching division code\n");
431
432 fn_addr = ((uintptr_t)&__aeabi_uidiv) & ~1;
208fae5c 433 asm ("" : "+g" (fn_addr));
42f25bdd
NP
434 ((u32 *)fn_addr)[0] = udiv_instruction();
435 ((u32 *)fn_addr)[1] = bx_lr_instruction();
436 flush_icache_range(fn_addr, fn_addr + 8);
437
438 fn_addr = ((uintptr_t)&__aeabi_idiv) & ~1;
208fae5c 439 asm ("" : "+g" (fn_addr));
42f25bdd
NP
440 ((u32 *)fn_addr)[0] = sdiv_instruction();
441 ((u32 *)fn_addr)[1] = bx_lr_instruction();
442 flush_icache_range(fn_addr, fn_addr + 8);
443}
444
445#else
446static inline void patch_aeabi_idiv(void) { }
447#endif
448
8164f7af
SB
449static void __init cpuid_init_hwcaps(void)
450{
b8c9592b 451 int block;
a092aedb 452 u32 isar5;
8164f7af
SB
453
454 if (cpu_architecture() < CPU_ARCH_ARMv7)
455 return;
456
b8c9592b
AB
457 block = cpuid_feature_extract(CPUID_EXT_ISAR0, 24);
458 if (block >= 2)
8164f7af 459 elf_hwcap |= HWCAP_IDIVA;
b8c9592b 460 if (block >= 1)
8164f7af 461 elf_hwcap |= HWCAP_IDIVT;
a469abd0
WD
462
463 /* LPAE implies atomic ldrd/strd instructions */
b8c9592b
AB
464 block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
465 if (block >= 5)
a469abd0 466 elf_hwcap |= HWCAP_LPAE;
a092aedb
AB
467
468 /* check for supported v8 Crypto instructions */
469 isar5 = read_cpuid_ext(CPUID_EXT_ISAR5);
470
471 block = cpuid_feature_extract_field(isar5, 4);
472 if (block >= 2)
473 elf_hwcap2 |= HWCAP2_PMULL;
474 if (block >= 1)
475 elf_hwcap2 |= HWCAP2_AES;
476
477 block = cpuid_feature_extract_field(isar5, 8);
478 if (block >= 1)
479 elf_hwcap2 |= HWCAP2_SHA1;
480
481 block = cpuid_feature_extract_field(isar5, 12);
482 if (block >= 1)
483 elf_hwcap2 |= HWCAP2_SHA2;
484
485 block = cpuid_feature_extract_field(isar5, 16);
486 if (block >= 1)
487 elf_hwcap2 |= HWCAP2_CRC32;
8164f7af
SB
488}
489
58171bf2 490static void __init elf_hwcap_fixup(void)
f159f4ed 491{
58171bf2 492 unsigned id = read_cpuid_id();
f159f4ed
TL
493
494 /*
495 * HWCAP_TLS is available only on 1136 r1p0 and later,
496 * see also kuser_get_tls_init.
497 */
58171bf2
RK
498 if (read_cpuid_part() == ARM_CPU_PART_ARM1136 &&
499 ((id >> 20) & 3) == 0) {
f159f4ed 500 elf_hwcap &= ~HWCAP_TLS;
58171bf2
RK
501 return;
502 }
503
504 /* Verify if CPUID scheme is implemented */
505 if ((id & 0x000f0000) != 0x000f0000)
506 return;
507
508 /*
509 * If the CPU supports LDREX/STREX and LDREXB/STREXB,
510 * avoid advertising SWP; it may not be atomic with
511 * multiprocessing cores.
512 */
b8c9592b
AB
513 if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
514 (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
515 cpuid_feature_extract(CPUID_EXT_ISAR3, 20) >= 3))
58171bf2 516 elf_hwcap &= ~HWCAP_SWP;
f159f4ed
TL
517}
518
ccea7a19
RK
519/*
520 * cpu_init - initialise one CPU.
521 *
90f1e084 522 * cpu_init sets up the per-CPU stacks.
ccea7a19 523 */
1783d457 524void notrace cpu_init(void)
ccea7a19 525{
55bdd694 526#ifndef CONFIG_CPU_V7M
ccea7a19
RK
527 unsigned int cpu = smp_processor_id();
528 struct stack *stk = &stacks[cpu];
529
530 if (cpu >= NR_CPUS) {
1b0f6681 531 pr_crit("CPU%u: bad primary CPU number\n", cpu);
ccea7a19
RK
532 BUG();
533 }
534
14318efb
RH
535 /*
536 * This only works on resume and secondary cores. For booting on the
537 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
538 */
539 set_my_cpu_offset(per_cpu_offset(cpu));
540
b69874e4
RK
541 cpu_proc_init();
542
b86040a5
CM
543 /*
544 * Define the placement constraint for the inline asm directive below.
545 * In Thumb-2, msr with an immediate value is not allowed.
546 */
547#ifdef CONFIG_THUMB2_KERNEL
548#define PLC "r"
549#else
550#define PLC "I"
551#endif
552
ccea7a19
RK
553 /*
554 * setup stacks for re-entrant exception handlers
555 */
556 __asm__ (
557 "msr cpsr_c, %1\n\t"
b86040a5
CM
558 "add r14, %0, %2\n\t"
559 "mov sp, r14\n\t"
ccea7a19 560 "msr cpsr_c, %3\n\t"
b86040a5
CM
561 "add r14, %0, %4\n\t"
562 "mov sp, r14\n\t"
ccea7a19 563 "msr cpsr_c, %5\n\t"
b86040a5
CM
564 "add r14, %0, %6\n\t"
565 "mov sp, r14\n\t"
c0e7f7ee
DT
566 "msr cpsr_c, %7\n\t"
567 "add r14, %0, %8\n\t"
568 "mov sp, r14\n\t"
569 "msr cpsr_c, %9"
ccea7a19
RK
570 :
571 : "r" (stk),
b86040a5 572 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
ccea7a19 573 "I" (offsetof(struct stack, irq[0])),
b86040a5 574 PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
ccea7a19 575 "I" (offsetof(struct stack, abt[0])),
b86040a5 576 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
ccea7a19 577 "I" (offsetof(struct stack, und[0])),
c0e7f7ee
DT
578 PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
579 "I" (offsetof(struct stack, fiq[0])),
b86040a5 580 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
aaaa3f9e 581 : "r14");
55bdd694 582#endif
ccea7a19
RK
583}
584
18d7f152 585u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
eb50439b
WD
586
587void __init smp_setup_processor_id(void)
588{
589 int i;
cb8cf4f8
LP
590 u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
591 u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
eb50439b
WD
592
593 cpu_logical_map(0) = cpu;
cb8cf4f8 594 for (i = 1; i < nr_cpu_ids; ++i)
eb50439b
WD
595 cpu_logical_map(i) = i == cpu ? 0 : i;
596
9394c1c6
ML
597 /*
598 * clear __my_cpu_offset on boot CPU to avoid hang caused by
599 * using percpu variable early, for example, lockdep will
600 * access percpu variable inside lock_release
601 */
602 set_my_cpu_offset(0);
603
1b0f6681 604 pr_info("Booting Linux on physical CPU 0x%x\n", mpidr);
eb50439b
WD
605}
606
8cf72172
LP
607struct mpidr_hash mpidr_hash;
608#ifdef CONFIG_SMP
609/**
610 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
611 * level in order to build a linear index from an
612 * MPIDR value. Resulting algorithm is a collision
613 * free hash carried out through shifting and ORing
614 */
615static void __init smp_build_mpidr_hash(void)
616{
617 u32 i, affinity;
618 u32 fs[3], bits[3], ls, mask = 0;
619 /*
620 * Pre-scan the list of MPIDRS and filter out bits that do
621 * not contribute to affinity levels, ie they never toggle.
622 */
623 for_each_possible_cpu(i)
624 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
625 pr_debug("mask of set bits 0x%x\n", mask);
626 /*
627 * Find and stash the last and first bit set at all affinity levels to
628 * check how many bits are required to represent them.
629 */
630 for (i = 0; i < 3; i++) {
631 affinity = MPIDR_AFFINITY_LEVEL(mask, i);
632 /*
633 * Find the MSB bit and LSB bits position
634 * to determine how many bits are required
635 * to express the affinity level.
636 */
637 ls = fls(affinity);
638 fs[i] = affinity ? ffs(affinity) - 1 : 0;
639 bits[i] = ls - fs[i];
640 }
641 /*
642 * An index can be created from the MPIDR by isolating the
643 * significant bits at each affinity level and by shifting
644 * them in order to compress the 24 bits values space to a
645 * compressed set of values. This is equivalent to hashing
646 * the MPIDR through shifting and ORing. It is a collision free
647 * hash though not minimal since some levels might contain a number
648 * of CPUs that is not an exact power of 2 and their bit
649 * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
650 */
651 mpidr_hash.shift_aff[0] = fs[0];
652 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
653 mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
654 (bits[1] + bits[0]);
655 mpidr_hash.mask = mask;
656 mpidr_hash.bits = bits[2] + bits[1] + bits[0];
657 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
658 mpidr_hash.shift_aff[0],
659 mpidr_hash.shift_aff[1],
660 mpidr_hash.shift_aff[2],
661 mpidr_hash.mask,
662 mpidr_hash.bits);
663 /*
664 * 4x is an arbitrary value used to warn on a hash table much bigger
665 * than expected on most systems.
666 */
667 if (mpidr_hash_size() > 4 * num_possible_cpus())
668 pr_warn("Large number of MPIDR hash buckets detected\n");
669 sync_cache_w(&mpidr_hash);
670}
671#endif
672
b69874e4
RK
673static void __init setup_processor(void)
674{
675 struct proc_info_list *list;
676
677 /*
678 * locate processor in the list of supported processor
679 * types. The linker builds this table for us from the
680 * entries in arch/arm/mm/proc-*.S
681 */
682 list = lookup_processor_type(read_cpuid_id());
683 if (!list) {
1b0f6681
OJ
684 pr_err("CPU configuration botched (ID %08x), unable to continue.\n",
685 read_cpuid_id());
b69874e4
RK
686 while (1);
687 }
688
689 cpu_name = list->cpu_name;
2ecccf90 690 __cpu_architecture = __get_cpu_architecture();
b69874e4
RK
691
692#ifdef MULTI_CPU
693 processor = *list->proc;
694#endif
695#ifdef MULTI_TLB
696 cpu_tlb = *list->tlb;
697#endif
698#ifdef MULTI_USER
699 cpu_user = *list->user;
700#endif
701#ifdef MULTI_CACHE
702 cpu_cache = *list->cache;
703#endif
704
1b0f6681
OJ
705 pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
706 cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
4585eaff 707 proc_arch[cpu_architecture()], get_cr());
b69874e4 708
a34dbfb0
WD
709 snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
710 list->arch_name, ENDIANNESS);
711 snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
712 list->elf_name, ENDIANNESS);
b69874e4 713 elf_hwcap = list->elf_hwcap;
8164f7af
SB
714
715 cpuid_init_hwcaps();
42f25bdd 716 patch_aeabi_idiv();
8164f7af 717
b69874e4 718#ifndef CONFIG_ARM_THUMB
c40e3641 719 elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
b69874e4 720#endif
ca8f0b0a
RK
721#ifdef CONFIG_MMU
722 init_default_cache_policy(list->__cpu_mm_mmu_flags);
723#endif
92871b94
RH
724 erratum_a15_798181_init();
725
58171bf2 726 elf_hwcap_fixup();
b69874e4
RK
727
728 cacheid_init();
729 cpu_init();
730}
731
93c02ab4 732void __init dump_machine_table(void)
1da177e4 733{
ff69a4c8 734 const struct machine_desc *p;
1da177e4 735
6291319d
GL
736 early_print("Available machine support:\n\nID (hex)\tNAME\n");
737 for_each_machine_desc(p)
dce72dd0 738 early_print("%08x\t%s\n", p->nr, p->name);
1da177e4 739
dce72dd0 740 early_print("\nPlease check your kernel config and/or bootloader.\n");
1da177e4 741
dce72dd0
NP
742 while (true)
743 /* can't use cpu_relax() here as it may require MMU setup */;
1da177e4
LT
744}
745
6a5014aa 746int __init arm_add_memory(u64 start, u64 size)
3a669411 747{
6d7d5da7 748 u64 aligned_start;
4b5f32ce 749
3a669411
RK
750 /*
751 * Ensure that start/size are aligned to a page boundary.
909ba297 752 * Size is rounded down, start is rounded up.
3a669411 753 */
6d7d5da7 754 aligned_start = PAGE_ALIGN(start);
909ba297
MY
755 if (aligned_start > start + size)
756 size = 0;
757 else
758 size -= aligned_start - start;
e5ab8580 759
6d7d5da7
MD
760#ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
761 if (aligned_start > ULONG_MAX) {
1b0f6681
OJ
762 pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
763 (long long)start);
6d7d5da7
MD
764 return -EINVAL;
765 }
766
767 if (aligned_start + size > ULONG_MAX) {
1b0f6681
OJ
768 pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n",
769 (long long)start);
e5ab8580
WD
770 /*
771 * To ensure bank->start + bank->size is representable in
772 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
773 * This means we lose a page after masking.
774 */
6d7d5da7 775 size = ULONG_MAX - aligned_start;
e5ab8580
WD
776 }
777#endif
778
571b1437
RK
779 if (aligned_start < PHYS_OFFSET) {
780 if (aligned_start + size <= PHYS_OFFSET) {
781 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
782 aligned_start, aligned_start + size);
783 return -EINVAL;
784 }
785
786 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
787 aligned_start, (u64)PHYS_OFFSET);
788
789 size -= PHYS_OFFSET - aligned_start;
790 aligned_start = PHYS_OFFSET;
791 }
792
1c2f87c2
LA
793 start = aligned_start;
794 size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
4b5f32ce
NP
795
796 /*
797 * Check whether this memory region has non-zero size or
798 * invalid node number.
799 */
1c2f87c2 800 if (size == 0)
4b5f32ce
NP
801 return -EINVAL;
802
1c2f87c2 803 memblock_add(start, size);
4b5f32ce 804 return 0;
3a669411
RK
805}
806
1da177e4
LT
807/*
808 * Pick out the memory size. We look for mem=size@start,
809 * where start and size are "size[KkMm]"
810 */
1c2f87c2 811
2b0d8c25 812static int __init early_mem(char *p)
1da177e4
LT
813{
814 static int usermem __initdata = 0;
6a5014aa
MD
815 u64 size;
816 u64 start;
2b0d8c25 817 char *endp;
1da177e4
LT
818
819 /*
820 * If the user specifies memory size, we
821 * blow away any automatically generated
822 * size.
823 */
824 if (usermem == 0) {
825 usermem = 1;
1c2f87c2
LA
826 memblock_remove(memblock_start_of_DRAM(),
827 memblock_end_of_DRAM() - memblock_start_of_DRAM());
1da177e4
LT
828 }
829
830 start = PHYS_OFFSET;
2b0d8c25
JK
831 size = memparse(p, &endp);
832 if (*endp == '@')
833 start = memparse(endp + 1, NULL);
1da177e4 834
1c97b73e 835 arm_add_memory(start, size);
1da177e4 836
2b0d8c25 837 return 0;
1da177e4 838}
2b0d8c25 839early_param("mem", early_mem);
1da177e4 840
ff69a4c8 841static void __init request_standard_resources(const struct machine_desc *mdesc)
1da177e4 842{
11b9369c 843 struct memblock_region *region;
1da177e4 844 struct resource *res;
1da177e4 845
37efe642
RK
846 kernel_code.start = virt_to_phys(_text);
847 kernel_code.end = virt_to_phys(_etext - 1);
842eab40 848 kernel_data.start = virt_to_phys(_sdata);
37efe642 849 kernel_data.end = virt_to_phys(_end - 1);
1da177e4 850
11b9369c 851 for_each_memblock(memory, region) {
ca474408 852 res = memblock_virt_alloc(sizeof(*res), 0);
1da177e4 853 res->name = "System RAM";
11b9369c
DZ
854 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
855 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
35d98e93 856 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
1da177e4
LT
857
858 request_resource(&iomem_resource, res);
859
860 if (kernel_code.start >= res->start &&
861 kernel_code.end <= res->end)
862 request_resource(res, &kernel_code);
863 if (kernel_data.start >= res->start &&
864 kernel_data.end <= res->end)
865 request_resource(res, &kernel_data);
866 }
867
868 if (mdesc->video_start) {
869 video_ram.start = mdesc->video_start;
870 video_ram.end = mdesc->video_end;
871 request_resource(&iomem_resource, &video_ram);
872 }
873
874 /*
875 * Some machines don't have the possibility of ever
876 * possessing lp0, lp1 or lp2
877 */
878 if (mdesc->reserve_lp0)
879 request_resource(&ioport_resource, &lp0);
880 if (mdesc->reserve_lp1)
881 request_resource(&ioport_resource, &lp1);
882 if (mdesc->reserve_lp2)
883 request_resource(&ioport_resource, &lp2);
884}
885
1da177e4
LT
886#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
887struct screen_info screen_info = {
888 .orig_video_lines = 30,
889 .orig_video_cols = 80,
890 .orig_video_mode = 0,
891 .orig_video_ega_bx = 0,
892 .orig_video_isVGA = 1,
893 .orig_video_points = 8
894};
4394c124 895#endif
1da177e4 896
1da177e4
LT
897static int __init customize_machine(void)
898{
883a106b
AB
899 /*
900 * customizes platform devices, or adds new ones
901 * On DT based machines, we fall back to populating the
902 * machine from the device tree, if no callback is provided,
903 * otherwise we would always need an init_machine callback.
904 */
af4dda73 905 of_iommu_init();
8ff1443c
RK
906 if (machine_desc->init_machine)
907 machine_desc->init_machine();
883a106b
AB
908#ifdef CONFIG_OF
909 else
910 of_platform_populate(NULL, of_default_bus_match_table,
911 NULL, NULL);
912#endif
1da177e4
LT
913 return 0;
914}
915arch_initcall(customize_machine);
916
90de4137
SG
917static int __init init_machine_late(void)
918{
3f599875
PK
919 struct device_node *root;
920 int ret;
921
90de4137
SG
922 if (machine_desc->init_late)
923 machine_desc->init_late();
3f599875
PK
924
925 root = of_find_node_by_path("/");
926 if (root) {
927 ret = of_property_read_string(root, "serial-number",
928 &system_serial);
929 if (ret)
930 system_serial = NULL;
931 }
932
933 if (!system_serial)
934 system_serial = kasprintf(GFP_KERNEL, "%08x%08x",
935 system_serial_high,
936 system_serial_low);
937
90de4137
SG
938 return 0;
939}
940late_initcall(init_machine_late);
941
3c57fb43
MW
942#ifdef CONFIG_KEXEC
943static inline unsigned long long get_total_mem(void)
944{
945 unsigned long total;
946
947 total = max_low_pfn - min_low_pfn;
948 return total << PAGE_SHIFT;
949}
950
951/**
952 * reserve_crashkernel() - reserves memory are for crash kernel
953 *
954 * This function reserves memory area given in "crashkernel=" kernel command
955 * line parameter. The memory reserved is used by a dump capture kernel when
956 * primary kernel is crashing.
957 */
958static void __init reserve_crashkernel(void)
959{
960 unsigned long long crash_size, crash_base;
961 unsigned long long total_mem;
962 int ret;
963
964 total_mem = get_total_mem();
965 ret = parse_crashkernel(boot_command_line, total_mem,
966 &crash_size, &crash_base);
967 if (ret)
968 return;
969
84f452b1 970 ret = memblock_reserve(crash_base, crash_size);
3c57fb43 971 if (ret < 0) {
1b0f6681
OJ
972 pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n",
973 (unsigned long)crash_base);
3c57fb43
MW
974 return;
975 }
976
1b0f6681
OJ
977 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
978 (unsigned long)(crash_size >> 20),
979 (unsigned long)(crash_base >> 20),
980 (unsigned long)(total_mem >> 20));
3c57fb43
MW
981
982 crashk_res.start = crash_base;
983 crashk_res.end = crash_base + crash_size - 1;
984 insert_resource(&iomem_resource, &crashk_res);
985}
986#else
987static inline void reserve_crashkernel(void) {}
988#endif /* CONFIG_KEXEC */
989
4588c34d
DM
990void __init hyp_mode_check(void)
991{
992#ifdef CONFIG_ARM_VIRT_EXT
8fbac214
MR
993 sync_boot_mode();
994
4588c34d
DM
995 if (is_hyp_mode_available()) {
996 pr_info("CPU: All CPU(s) started in HYP mode.\n");
997 pr_info("CPU: Virtualization extensions available.\n");
998 } else if (is_hyp_mode_mismatched()) {
999 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
1000 __boot_cpu_mode & MODE_MASK);
1001 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
1002 } else
1003 pr_info("CPU: All CPU(s) started in SVC mode.\n");
1004#endif
1005}
1006
6291319d
GL
1007void __init setup_arch(char **cmdline_p)
1008{
ff69a4c8 1009 const struct machine_desc *mdesc;
6291319d 1010
6291319d 1011 setup_processor();
93c02ab4
GL
1012 mdesc = setup_machine_fdt(__atags_pointer);
1013 if (!mdesc)
b8b499c8 1014 mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
6291319d
GL
1015 machine_desc = mdesc;
1016 machine_name = mdesc->name;
719c9d14 1017 dump_stack_set_arch_desc("%s", mdesc->name);
6291319d 1018
16d6d5b0
RH
1019 if (mdesc->reboot_mode != REBOOT_HARD)
1020 reboot_mode = mdesc->reboot_mode;
6291319d 1021
37efe642
RK
1022 init_mm.start_code = (unsigned long) _text;
1023 init_mm.end_code = (unsigned long) _etext;
1024 init_mm.end_data = (unsigned long) _edata;
1025 init_mm.brk = (unsigned long) _end;
1da177e4 1026
48ab7e09
JK
1027 /* populate cmd_line too for later use, preserving boot_command_line */
1028 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
1029 *cmdline_p = cmd_line;
2b0d8c25 1030
2937367b
AB
1031 early_fixmap_init();
1032 early_ioremap_init();
a5f4c561 1033
2b0d8c25
JK
1034 parse_early_param();
1035
1221ed10
RK
1036#ifdef CONFIG_MMU
1037 early_paging_init(mdesc);
1038#endif
7c927322 1039 setup_dma_zone(mdesc);
da58fb65 1040 efi_init();
0371d3f7 1041 sanity_check_meminfo();
1c2f87c2 1042 arm_memblock_init(mdesc);
2778f620 1043
2937367b
AB
1044 early_ioremap_reset();
1045
4b5f32ce 1046 paging_init(mdesc);
11b9369c 1047 request_standard_resources(mdesc);
1da177e4 1048
a528721d
RK
1049 if (mdesc->restart)
1050 arm_pm_restart = mdesc->restart;
1051
93c02ab4
GL
1052 unflatten_device_tree();
1053
5587164e 1054 arm_dt_init_cpu_maps();
be120397 1055 psci_dt_init();
5882bfef 1056 xen_early_init();
7bbb7940 1057#ifdef CONFIG_SMP
abcee5fb 1058 if (is_smp()) {
b382b940
JM
1059 if (!mdesc->smp_init || !mdesc->smp_init()) {
1060 if (psci_smp_available())
1061 smp_set_ops(&psci_smp_ops);
1062 else if (mdesc->smp)
1063 smp_set_ops(mdesc->smp);
1064 }
f00ec48f 1065 smp_init_cpus();
8cf72172 1066 smp_build_mpidr_hash();
abcee5fb 1067 }
7bbb7940 1068#endif
4588c34d
DM
1069
1070 if (!is_smp())
1071 hyp_mode_check();
1072
3c57fb43 1073 reserve_crashkernel();
7bbb7940 1074
52108641 1075#ifdef CONFIG_MULTI_IRQ_HANDLER
1076 handle_arch_irq = mdesc->handle_irq;
1077#endif
1da177e4
LT
1078
1079#ifdef CONFIG_VT
1080#if defined(CONFIG_VGA_CONSOLE)
1081 conswitchp = &vga_con;
1082#elif defined(CONFIG_DUMMY_CONSOLE)
1083 conswitchp = &dummy_con;
1084#endif
1085#endif
dec12e62
RK
1086
1087 if (mdesc->init_early)
1088 mdesc->init_early();
1da177e4
LT
1089}
1090
1091
1092static int __init topology_init(void)
1093{
1094 int cpu;
1095
66fb8bd2
RK
1096 for_each_possible_cpu(cpu) {
1097 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
787047ee 1098 cpuinfo->cpu.hotpluggable = platform_can_hotplug_cpu(cpu);
66fb8bd2
RK
1099 register_cpu(&cpuinfo->cpu, cpu);
1100 }
1da177e4
LT
1101
1102 return 0;
1103}
1da177e4
LT
1104subsys_initcall(topology_init);
1105
e119bfff
RK
1106#ifdef CONFIG_HAVE_PROC_CPU
1107static int __init proc_cpu_init(void)
1108{
1109 struct proc_dir_entry *res;
1110
1111 res = proc_mkdir("cpu", NULL);
1112 if (!res)
1113 return -ENOMEM;
1114 return 0;
1115}
1116fs_initcall(proc_cpu_init);
1117#endif
1118
1da177e4
LT
1119static const char *hwcap_str[] = {
1120 "swp",
1121 "half",
1122 "thumb",
1123 "26bit",
1124 "fastmult",
1125 "fpa",
1126 "vfp",
1127 "edsp",
1128 "java",
8f7f9435 1129 "iwmmxt",
99e4a6dd 1130 "crunch",
4369ae16 1131 "thumbee",
2bedbdf4 1132 "neon",
7279dc3e
CM
1133 "vfpv3",
1134 "vfpv3d16",
254cdf8e
WD
1135 "tls",
1136 "vfpv4",
1137 "idiva",
1138 "idivt",
ab8d46c0 1139 "vfpd32",
a469abd0 1140 "lpae",
e9faebc6 1141 "evtstrm",
1da177e4
LT
1142 NULL
1143};
1144
b342ea4e 1145static const char *hwcap2_str[] = {
8258a989
AB
1146 "aes",
1147 "pmull",
1148 "sha1",
1149 "sha2",
1150 "crc32",
b342ea4e
AB
1151 NULL
1152};
1153
1da177e4
LT
1154static int c_show(struct seq_file *m, void *v)
1155{
b4b8f770
LP
1156 int i, j;
1157 u32 cpuid;
1da177e4 1158
1da177e4 1159 for_each_online_cpu(i) {
15559722
RK
1160 /*
1161 * glibc reads /proc/cpuinfo to determine the number of
1162 * online processors, looking for lines beginning with
1163 * "processor". Give glibc what it expects.
1164 */
1165 seq_printf(m, "processor\t: %d\n", i);
b4b8f770
LP
1166 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
1167 seq_printf(m, "model name\t: %s rev %d (%s)\n",
1168 cpu_name, cpuid & 15, elf_platform);
1169
4bf9636c
PM
1170#if defined(CONFIG_SMP)
1171 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1172 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1173 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1174#else
1175 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1176 loops_per_jiffy / (500000/HZ),
1177 (loops_per_jiffy / (5000/HZ)) % 100);
1178#endif
b4b8f770
LP
1179 /* dump out the processor features */
1180 seq_puts(m, "Features\t: ");
1da177e4 1181
b4b8f770
LP
1182 for (j = 0; hwcap_str[j]; j++)
1183 if (elf_hwcap & (1 << j))
1184 seq_printf(m, "%s ", hwcap_str[j]);
1da177e4 1185
b342ea4e
AB
1186 for (j = 0; hwcap2_str[j]; j++)
1187 if (elf_hwcap2 & (1 << j))
1188 seq_printf(m, "%s ", hwcap2_str[j]);
1189
b4b8f770
LP
1190 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
1191 seq_printf(m, "CPU architecture: %s\n",
1192 proc_arch[cpu_architecture()]);
1da177e4 1193
b4b8f770
LP
1194 if ((cpuid & 0x0008f000) == 0x00000000) {
1195 /* pre-ARM7 */
1196 seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
1da177e4 1197 } else {
b4b8f770
LP
1198 if ((cpuid & 0x0008f000) == 0x00007000) {
1199 /* ARM7 */
1200 seq_printf(m, "CPU variant\t: 0x%02x\n",
1201 (cpuid >> 16) & 127);
1202 } else {
1203 /* post-ARM7 */
1204 seq_printf(m, "CPU variant\t: 0x%x\n",
1205 (cpuid >> 20) & 15);
1206 }
1207 seq_printf(m, "CPU part\t: 0x%03x\n",
1208 (cpuid >> 4) & 0xfff);
1da177e4 1209 }
b4b8f770 1210 seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
1da177e4 1211 }
1da177e4
LT
1212
1213 seq_printf(m, "Hardware\t: %s\n", machine_name);
1214 seq_printf(m, "Revision\t: %04x\n", system_rev);
3f599875 1215 seq_printf(m, "Serial\t\t: %s\n", system_serial);
1da177e4
LT
1216
1217 return 0;
1218}
1219
1220static void *c_start(struct seq_file *m, loff_t *pos)
1221{
1222 return *pos < 1 ? (void *)1 : NULL;
1223}
1224
1225static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1226{
1227 ++*pos;
1228 return NULL;
1229}
1230
1231static void c_stop(struct seq_file *m, void *v)
1232{
1233}
1234
2ffd6e18 1235const struct seq_operations cpuinfo_op = {
1da177e4
LT
1236 .start = c_start,
1237 .next = c_next,
1238 .stop = c_stop,
1239 .show = c_show
1240};
This page took 0.874973 seconds and 5 git commands to generate.