Merge branch 'drm-rockchip-next-fixes-2016-03-28' of https://github.com/markyzq/kerne...
[deliverable/linux.git] / arch / arm / kernel / setup.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/kernel/setup.c
3 *
4 * Copyright (C) 1995-2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
da58fb65 10#include <linux/efi.h>
ecea4ab6 11#include <linux/export.h>
1da177e4
LT
12#include <linux/kernel.h>
13#include <linux/stddef.h>
14#include <linux/ioport.h>
15#include <linux/delay.h>
16#include <linux/utsname.h>
17#include <linux/initrd.h>
18#include <linux/console.h>
19#include <linux/bootmem.h>
20#include <linux/seq_file.h>
894673ee 21#include <linux/screen_info.h>
af4dda73 22#include <linux/of_iommu.h>
883a106b 23#include <linux/of_platform.h>
1da177e4 24#include <linux/init.h>
3c57fb43 25#include <linux/kexec.h>
93c02ab4 26#include <linux/of_fdt.h>
1da177e4
LT
27#include <linux/cpu.h>
28#include <linux/interrupt.h>
7bbb7940 29#include <linux/smp.h>
e119bfff 30#include <linux/proc_fs.h>
2778f620 31#include <linux/memblock.h>
2ecccf90
DM
32#include <linux/bug.h>
33#include <linux/compiler.h>
27a3f0e9 34#include <linux/sort.h>
be120397 35#include <linux/psci.h>
1da177e4 36
b86040a5 37#include <asm/unified.h>
15d07dc9 38#include <asm/cp15.h>
1da177e4 39#include <asm/cpu.h>
0ba8b9b2 40#include <asm/cputype.h>
da58fb65 41#include <asm/efi.h>
1da177e4 42#include <asm/elf.h>
2937367b 43#include <asm/early_ioremap.h>
a5f4c561 44#include <asm/fixmap.h>
1da177e4 45#include <asm/procinfo.h>
05774088 46#include <asm/psci.h>
37efe642 47#include <asm/sections.h>
1da177e4 48#include <asm/setup.h>
f00ec48f 49#include <asm/smp_plat.h>
1da177e4
LT
50#include <asm/mach-types.h>
51#include <asm/cacheflush.h>
46097c7d 52#include <asm/cachetype.h>
1da177e4 53#include <asm/tlbflush.h>
5882bfef 54#include <asm/xen/hypervisor.h>
1da177e4 55
93c02ab4 56#include <asm/prom.h>
1da177e4
LT
57#include <asm/mach/arch.h>
58#include <asm/mach/irq.h>
59#include <asm/mach/time.h>
9f97da78
DH
60#include <asm/system_info.h>
61#include <asm/system_misc.h>
5cbad0eb 62#include <asm/traps.h>
bff595c1 63#include <asm/unwind.h>
1c16d242 64#include <asm/memblock.h>
4588c34d 65#include <asm/virt.h>
1da177e4 66
4cd9d6f7 67#include "atags.h"
0fc1c832 68
1da177e4
LT
69
70#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
71char fpe_type[8];
72
73static int __init fpe_setup(char *line)
74{
75 memcpy(fpe_type, line, 8);
76 return 1;
77}
78
79__setup("fpe=", fpe_setup);
80#endif
81
ca8f0b0a 82extern void init_default_cache_policy(unsigned long);
ff69a4c8 83extern void paging_init(const struct machine_desc *desc);
1221ed10 84extern void early_paging_init(const struct machine_desc *);
0371d3f7 85extern void sanity_check_meminfo(void);
16d6d5b0 86extern enum reboot_mode reboot_mode;
ff69a4c8 87extern void setup_dma_zone(const struct machine_desc *desc);
1da177e4
LT
88
89unsigned int processor_id;
c18f6581 90EXPORT_SYMBOL(processor_id);
0385ebc0 91unsigned int __machine_arch_type __read_mostly;
1da177e4 92EXPORT_SYMBOL(__machine_arch_type);
0385ebc0 93unsigned int cacheid __read_mostly;
c0e95878 94EXPORT_SYMBOL(cacheid);
1da177e4 95
9d20fdd5
BG
96unsigned int __atags_pointer __initdata;
97
1da177e4
LT
98unsigned int system_rev;
99EXPORT_SYMBOL(system_rev);
100
3f599875
PK
101const char *system_serial;
102EXPORT_SYMBOL(system_serial);
103
1da177e4
LT
104unsigned int system_serial_low;
105EXPORT_SYMBOL(system_serial_low);
106
107unsigned int system_serial_high;
108EXPORT_SYMBOL(system_serial_high);
109
0385ebc0 110unsigned int elf_hwcap __read_mostly;
1da177e4
LT
111EXPORT_SYMBOL(elf_hwcap);
112
b342ea4e
AB
113unsigned int elf_hwcap2 __read_mostly;
114EXPORT_SYMBOL(elf_hwcap2);
115
1da177e4
LT
116
117#ifdef MULTI_CPU
0385ebc0 118struct processor processor __read_mostly;
1da177e4
LT
119#endif
120#ifdef MULTI_TLB
0385ebc0 121struct cpu_tlb_fns cpu_tlb __read_mostly;
1da177e4
LT
122#endif
123#ifdef MULTI_USER
0385ebc0 124struct cpu_user_fns cpu_user __read_mostly;
1da177e4
LT
125#endif
126#ifdef MULTI_CACHE
0385ebc0 127struct cpu_cache_fns cpu_cache __read_mostly;
1da177e4 128#endif
953233dc 129#ifdef CONFIG_OUTER_CACHE
0385ebc0 130struct outer_cache_fns outer_cache __read_mostly;
6c09f09d 131EXPORT_SYMBOL(outer_cache);
953233dc 132#endif
1da177e4 133
2ecccf90
DM
134/*
135 * Cached cpu_architecture() result for use by assembler code.
136 * C code should use the cpu_architecture() function instead of accessing this
137 * variable directly.
138 */
139int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
140
ccea7a19
RK
141struct stack {
142 u32 irq[3];
143 u32 abt[3];
144 u32 und[3];
c0e7f7ee 145 u32 fiq[3];
ccea7a19
RK
146} ____cacheline_aligned;
147
55bdd694 148#ifndef CONFIG_CPU_V7M
ccea7a19 149static struct stack stacks[NR_CPUS];
55bdd694 150#endif
ccea7a19 151
1da177e4
LT
152char elf_platform[ELF_PLATFORM_SIZE];
153EXPORT_SYMBOL(elf_platform);
154
1da177e4
LT
155static const char *cpu_name;
156static const char *machine_name;
48ab7e09 157static char __initdata cmd_line[COMMAND_LINE_SIZE];
ff69a4c8 158const struct machine_desc *machine_desc __initdata;
1da177e4 159
1da177e4
LT
160static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
161#define ENDIANNESS ((char)endian_test.l)
162
163DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
164
165/*
166 * Standard memory resources
167 */
168static struct resource mem_res[] = {
740e518e
GKH
169 {
170 .name = "Video RAM",
171 .start = 0,
172 .end = 0,
173 .flags = IORESOURCE_MEM
174 },
175 {
a36d8e5b 176 .name = "Kernel code",
740e518e
GKH
177 .start = 0,
178 .end = 0,
35d98e93 179 .flags = IORESOURCE_SYSTEM_RAM
740e518e
GKH
180 },
181 {
182 .name = "Kernel data",
183 .start = 0,
184 .end = 0,
35d98e93 185 .flags = IORESOURCE_SYSTEM_RAM
740e518e 186 }
1da177e4
LT
187};
188
189#define video_ram mem_res[0]
190#define kernel_code mem_res[1]
191#define kernel_data mem_res[2]
192
193static struct resource io_res[] = {
740e518e
GKH
194 {
195 .name = "reserved",
196 .start = 0x3bc,
197 .end = 0x3be,
198 .flags = IORESOURCE_IO | IORESOURCE_BUSY
199 },
200 {
201 .name = "reserved",
202 .start = 0x378,
203 .end = 0x37f,
204 .flags = IORESOURCE_IO | IORESOURCE_BUSY
205 },
206 {
207 .name = "reserved",
208 .start = 0x278,
209 .end = 0x27f,
210 .flags = IORESOURCE_IO | IORESOURCE_BUSY
211 }
1da177e4
LT
212};
213
214#define lp0 io_res[0]
215#define lp1 io_res[1]
216#define lp2 io_res[2]
217
1da177e4
LT
218static const char *proc_arch[] = {
219 "undefined/unknown",
220 "3",
221 "4",
222 "4T",
223 "5",
224 "5T",
225 "5TE",
226 "5TEJ",
227 "6TEJ",
6b090a25 228 "7",
55bdd694 229 "7M",
1da177e4
LT
230 "?(12)",
231 "?(13)",
232 "?(14)",
233 "?(15)",
234 "?(16)",
235 "?(17)",
236};
237
55bdd694
CM
238#ifdef CONFIG_CPU_V7M
239static int __get_cpu_architecture(void)
240{
241 return CPU_ARCH_ARMv7M;
242}
243#else
2ecccf90 244static int __get_cpu_architecture(void)
1da177e4
LT
245{
246 int cpu_arch;
247
0ba8b9b2 248 if ((read_cpuid_id() & 0x0008f000) == 0) {
1da177e4 249 cpu_arch = CPU_ARCH_UNKNOWN;
0ba8b9b2
RK
250 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
251 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
252 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
253 cpu_arch = (read_cpuid_id() >> 16) & 7;
1da177e4
LT
254 if (cpu_arch)
255 cpu_arch += CPU_ARCH_ARMv3;
0ba8b9b2 256 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
180005c4
CM
257 /* Revised CPUID format. Read the Memory Model Feature
258 * Register 0 and check for VMSAv7 or PMSAv7 */
526299ce 259 unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
315cfe78
CM
260 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
261 (mmfr0 & 0x000000f0) >= 0x00000030)
180005c4
CM
262 cpu_arch = CPU_ARCH_ARMv7;
263 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
264 (mmfr0 & 0x000000f0) == 0x00000020)
265 cpu_arch = CPU_ARCH_ARMv6;
266 else
267 cpu_arch = CPU_ARCH_UNKNOWN;
268 } else
269 cpu_arch = CPU_ARCH_UNKNOWN;
1da177e4
LT
270
271 return cpu_arch;
272}
55bdd694 273#endif
1da177e4 274
2ecccf90
DM
275int __pure cpu_architecture(void)
276{
277 BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
278
279 return __cpu_architecture;
280}
281
8925ec4c
WD
282static int cpu_has_aliasing_icache(unsigned int arch)
283{
284 int aliasing_icache;
285 unsigned int id_reg, num_sets, line_size;
286
7f94e9cc
WD
287 /* PIPT caches never alias. */
288 if (icache_is_pipt())
289 return 0;
290
8925ec4c
WD
291 /* arch specifies the register format */
292 switch (arch) {
293 case CPU_ARCH_ARMv7:
5fb31a96
LW
294 asm("mcr p15, 2, %0, c0, c0, 0 @ set CSSELR"
295 : /* No output operands */
8925ec4c 296 : "r" (1));
5fb31a96
LW
297 isb();
298 asm("mrc p15, 1, %0, c0, c0, 0 @ read CCSIDR"
299 : "=r" (id_reg));
8925ec4c
WD
300 line_size = 4 << ((id_reg & 0x7) + 2);
301 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
302 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
303 break;
304 case CPU_ARCH_ARMv6:
305 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
306 break;
307 default:
308 /* I-cache aliases will be handled by D-cache aliasing code */
309 aliasing_icache = 0;
310 }
311
312 return aliasing_icache;
313}
314
c0e95878
RK
315static void __init cacheid_init(void)
316{
c0e95878
RK
317 unsigned int arch = cpu_architecture();
318
55bdd694
CM
319 if (arch == CPU_ARCH_ARMv7M) {
320 cacheid = 0;
321 } else if (arch >= CPU_ARCH_ARMv6) {
ac52e83f 322 unsigned int cachetype = read_cpuid_cachetype();
b57ee99f
CM
323 if ((cachetype & (7 << 29)) == 4 << 29) {
324 /* ARMv7 register format */
72dc53ac 325 arch = CPU_ARCH_ARMv7;
b57ee99f 326 cacheid = CACHEID_VIPT_NONALIASING;
7f94e9cc
WD
327 switch (cachetype & (3 << 14)) {
328 case (1 << 14):
b57ee99f 329 cacheid |= CACHEID_ASID_TAGGED;
7f94e9cc
WD
330 break;
331 case (3 << 14):
332 cacheid |= CACHEID_PIPT;
333 break;
334 }
8925ec4c 335 } else {
72dc53ac
WD
336 arch = CPU_ARCH_ARMv6;
337 if (cachetype & (1 << 23))
338 cacheid = CACHEID_VIPT_ALIASING;
339 else
340 cacheid = CACHEID_VIPT_NONALIASING;
8925ec4c 341 }
72dc53ac
WD
342 if (cpu_has_aliasing_icache(arch))
343 cacheid |= CACHEID_VIPT_I_ALIASING;
c0e95878
RK
344 } else {
345 cacheid = CACHEID_VIVT;
346 }
2b4ae1f1 347
1b0f6681 348 pr_info("CPU: %s data cache, %s instruction cache\n",
2b4ae1f1
RK
349 cache_is_vivt() ? "VIVT" :
350 cache_is_vipt_aliasing() ? "VIPT aliasing" :
7f94e9cc 351 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
2b4ae1f1
RK
352 cache_is_vivt() ? "VIVT" :
353 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
8925ec4c 354 icache_is_vipt_aliasing() ? "VIPT aliasing" :
7f94e9cc 355 icache_is_pipt() ? "PIPT" :
2b4ae1f1 356 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
c0e95878
RK
357}
358
1da177e4
LT
359/*
360 * These functions re-use the assembly code in head.S, which
361 * already provide the required functionality.
362 */
0f44ba1d 363extern struct proc_info_list *lookup_processor_type(unsigned int);
6fc31d54 364
93c02ab4 365void __init early_print(const char *str, ...)
6fc31d54
RK
366{
367 extern void printascii(const char *);
368 char buf[256];
369 va_list ap;
370
371 va_start(ap, str);
372 vsnprintf(buf, sizeof(buf), str, ap);
373 va_end(ap);
374
375#ifdef CONFIG_DEBUG_LL
376 printascii(buf);
377#endif
378 printk("%s", buf);
379}
380
42f25bdd
NP
381#ifdef CONFIG_ARM_PATCH_IDIV
382
383static inline u32 __attribute_const__ sdiv_instruction(void)
384{
385 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
386 /* "sdiv r0, r0, r1" */
387 u32 insn = __opcode_thumb32_compose(0xfb90, 0xf0f1);
388 return __opcode_to_mem_thumb32(insn);
389 }
390
391 /* "sdiv r0, r0, r1" */
392 return __opcode_to_mem_arm(0xe710f110);
393}
394
395static inline u32 __attribute_const__ udiv_instruction(void)
396{
397 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
398 /* "udiv r0, r0, r1" */
399 u32 insn = __opcode_thumb32_compose(0xfbb0, 0xf0f1);
400 return __opcode_to_mem_thumb32(insn);
401 }
402
403 /* "udiv r0, r0, r1" */
404 return __opcode_to_mem_arm(0xe730f110);
405}
406
407static inline u32 __attribute_const__ bx_lr_instruction(void)
408{
409 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
410 /* "bx lr; nop" */
411 u32 insn = __opcode_thumb32_compose(0x4770, 0x46c0);
412 return __opcode_to_mem_thumb32(insn);
413 }
414
415 /* "bx lr" */
416 return __opcode_to_mem_arm(0xe12fff1e);
417}
418
419static void __init patch_aeabi_idiv(void)
420{
421 extern void __aeabi_uidiv(void);
422 extern void __aeabi_idiv(void);
423 uintptr_t fn_addr;
424 unsigned int mask;
425
426 mask = IS_ENABLED(CONFIG_THUMB2_KERNEL) ? HWCAP_IDIVT : HWCAP_IDIVA;
427 if (!(elf_hwcap & mask))
428 return;
429
430 pr_info("CPU: div instructions available: patching division code\n");
431
432 fn_addr = ((uintptr_t)&__aeabi_uidiv) & ~1;
433 ((u32 *)fn_addr)[0] = udiv_instruction();
434 ((u32 *)fn_addr)[1] = bx_lr_instruction();
435 flush_icache_range(fn_addr, fn_addr + 8);
436
437 fn_addr = ((uintptr_t)&__aeabi_idiv) & ~1;
438 ((u32 *)fn_addr)[0] = sdiv_instruction();
439 ((u32 *)fn_addr)[1] = bx_lr_instruction();
440 flush_icache_range(fn_addr, fn_addr + 8);
441}
442
443#else
444static inline void patch_aeabi_idiv(void) { }
445#endif
446
8164f7af
SB
447static void __init cpuid_init_hwcaps(void)
448{
b8c9592b 449 int block;
a092aedb 450 u32 isar5;
8164f7af
SB
451
452 if (cpu_architecture() < CPU_ARCH_ARMv7)
453 return;
454
b8c9592b
AB
455 block = cpuid_feature_extract(CPUID_EXT_ISAR0, 24);
456 if (block >= 2)
8164f7af 457 elf_hwcap |= HWCAP_IDIVA;
b8c9592b 458 if (block >= 1)
8164f7af 459 elf_hwcap |= HWCAP_IDIVT;
a469abd0
WD
460
461 /* LPAE implies atomic ldrd/strd instructions */
b8c9592b
AB
462 block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
463 if (block >= 5)
a469abd0 464 elf_hwcap |= HWCAP_LPAE;
a092aedb
AB
465
466 /* check for supported v8 Crypto instructions */
467 isar5 = read_cpuid_ext(CPUID_EXT_ISAR5);
468
469 block = cpuid_feature_extract_field(isar5, 4);
470 if (block >= 2)
471 elf_hwcap2 |= HWCAP2_PMULL;
472 if (block >= 1)
473 elf_hwcap2 |= HWCAP2_AES;
474
475 block = cpuid_feature_extract_field(isar5, 8);
476 if (block >= 1)
477 elf_hwcap2 |= HWCAP2_SHA1;
478
479 block = cpuid_feature_extract_field(isar5, 12);
480 if (block >= 1)
481 elf_hwcap2 |= HWCAP2_SHA2;
482
483 block = cpuid_feature_extract_field(isar5, 16);
484 if (block >= 1)
485 elf_hwcap2 |= HWCAP2_CRC32;
8164f7af
SB
486}
487
58171bf2 488static void __init elf_hwcap_fixup(void)
f159f4ed 489{
58171bf2 490 unsigned id = read_cpuid_id();
f159f4ed
TL
491
492 /*
493 * HWCAP_TLS is available only on 1136 r1p0 and later,
494 * see also kuser_get_tls_init.
495 */
58171bf2
RK
496 if (read_cpuid_part() == ARM_CPU_PART_ARM1136 &&
497 ((id >> 20) & 3) == 0) {
f159f4ed 498 elf_hwcap &= ~HWCAP_TLS;
58171bf2
RK
499 return;
500 }
501
502 /* Verify if CPUID scheme is implemented */
503 if ((id & 0x000f0000) != 0x000f0000)
504 return;
505
506 /*
507 * If the CPU supports LDREX/STREX and LDREXB/STREXB,
508 * avoid advertising SWP; it may not be atomic with
509 * multiprocessing cores.
510 */
b8c9592b
AB
511 if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
512 (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
513 cpuid_feature_extract(CPUID_EXT_ISAR3, 20) >= 3))
58171bf2 514 elf_hwcap &= ~HWCAP_SWP;
f159f4ed
TL
515}
516
ccea7a19
RK
517/*
518 * cpu_init - initialise one CPU.
519 *
90f1e084 520 * cpu_init sets up the per-CPU stacks.
ccea7a19 521 */
1783d457 522void notrace cpu_init(void)
ccea7a19 523{
55bdd694 524#ifndef CONFIG_CPU_V7M
ccea7a19
RK
525 unsigned int cpu = smp_processor_id();
526 struct stack *stk = &stacks[cpu];
527
528 if (cpu >= NR_CPUS) {
1b0f6681 529 pr_crit("CPU%u: bad primary CPU number\n", cpu);
ccea7a19
RK
530 BUG();
531 }
532
14318efb
RH
533 /*
534 * This only works on resume and secondary cores. For booting on the
535 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
536 */
537 set_my_cpu_offset(per_cpu_offset(cpu));
538
b69874e4
RK
539 cpu_proc_init();
540
b86040a5
CM
541 /*
542 * Define the placement constraint for the inline asm directive below.
543 * In Thumb-2, msr with an immediate value is not allowed.
544 */
545#ifdef CONFIG_THUMB2_KERNEL
546#define PLC "r"
547#else
548#define PLC "I"
549#endif
550
ccea7a19
RK
551 /*
552 * setup stacks for re-entrant exception handlers
553 */
554 __asm__ (
555 "msr cpsr_c, %1\n\t"
b86040a5
CM
556 "add r14, %0, %2\n\t"
557 "mov sp, r14\n\t"
ccea7a19 558 "msr cpsr_c, %3\n\t"
b86040a5
CM
559 "add r14, %0, %4\n\t"
560 "mov sp, r14\n\t"
ccea7a19 561 "msr cpsr_c, %5\n\t"
b86040a5
CM
562 "add r14, %0, %6\n\t"
563 "mov sp, r14\n\t"
c0e7f7ee
DT
564 "msr cpsr_c, %7\n\t"
565 "add r14, %0, %8\n\t"
566 "mov sp, r14\n\t"
567 "msr cpsr_c, %9"
ccea7a19
RK
568 :
569 : "r" (stk),
b86040a5 570 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
ccea7a19 571 "I" (offsetof(struct stack, irq[0])),
b86040a5 572 PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
ccea7a19 573 "I" (offsetof(struct stack, abt[0])),
b86040a5 574 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
ccea7a19 575 "I" (offsetof(struct stack, und[0])),
c0e7f7ee
DT
576 PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
577 "I" (offsetof(struct stack, fiq[0])),
b86040a5 578 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
aaaa3f9e 579 : "r14");
55bdd694 580#endif
ccea7a19
RK
581}
582
18d7f152 583u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
eb50439b
WD
584
585void __init smp_setup_processor_id(void)
586{
587 int i;
cb8cf4f8
LP
588 u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
589 u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
eb50439b
WD
590
591 cpu_logical_map(0) = cpu;
cb8cf4f8 592 for (i = 1; i < nr_cpu_ids; ++i)
eb50439b
WD
593 cpu_logical_map(i) = i == cpu ? 0 : i;
594
9394c1c6
ML
595 /*
596 * clear __my_cpu_offset on boot CPU to avoid hang caused by
597 * using percpu variable early, for example, lockdep will
598 * access percpu variable inside lock_release
599 */
600 set_my_cpu_offset(0);
601
1b0f6681 602 pr_info("Booting Linux on physical CPU 0x%x\n", mpidr);
eb50439b
WD
603}
604
8cf72172
LP
605struct mpidr_hash mpidr_hash;
606#ifdef CONFIG_SMP
607/**
608 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
609 * level in order to build a linear index from an
610 * MPIDR value. Resulting algorithm is a collision
611 * free hash carried out through shifting and ORing
612 */
613static void __init smp_build_mpidr_hash(void)
614{
615 u32 i, affinity;
616 u32 fs[3], bits[3], ls, mask = 0;
617 /*
618 * Pre-scan the list of MPIDRS and filter out bits that do
619 * not contribute to affinity levels, ie they never toggle.
620 */
621 for_each_possible_cpu(i)
622 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
623 pr_debug("mask of set bits 0x%x\n", mask);
624 /*
625 * Find and stash the last and first bit set at all affinity levels to
626 * check how many bits are required to represent them.
627 */
628 for (i = 0; i < 3; i++) {
629 affinity = MPIDR_AFFINITY_LEVEL(mask, i);
630 /*
631 * Find the MSB bit and LSB bits position
632 * to determine how many bits are required
633 * to express the affinity level.
634 */
635 ls = fls(affinity);
636 fs[i] = affinity ? ffs(affinity) - 1 : 0;
637 bits[i] = ls - fs[i];
638 }
639 /*
640 * An index can be created from the MPIDR by isolating the
641 * significant bits at each affinity level and by shifting
642 * them in order to compress the 24 bits values space to a
643 * compressed set of values. This is equivalent to hashing
644 * the MPIDR through shifting and ORing. It is a collision free
645 * hash though not minimal since some levels might contain a number
646 * of CPUs that is not an exact power of 2 and their bit
647 * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
648 */
649 mpidr_hash.shift_aff[0] = fs[0];
650 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
651 mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
652 (bits[1] + bits[0]);
653 mpidr_hash.mask = mask;
654 mpidr_hash.bits = bits[2] + bits[1] + bits[0];
655 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
656 mpidr_hash.shift_aff[0],
657 mpidr_hash.shift_aff[1],
658 mpidr_hash.shift_aff[2],
659 mpidr_hash.mask,
660 mpidr_hash.bits);
661 /*
662 * 4x is an arbitrary value used to warn on a hash table much bigger
663 * than expected on most systems.
664 */
665 if (mpidr_hash_size() > 4 * num_possible_cpus())
666 pr_warn("Large number of MPIDR hash buckets detected\n");
667 sync_cache_w(&mpidr_hash);
668}
669#endif
670
b69874e4
RK
671static void __init setup_processor(void)
672{
673 struct proc_info_list *list;
674
675 /*
676 * locate processor in the list of supported processor
677 * types. The linker builds this table for us from the
678 * entries in arch/arm/mm/proc-*.S
679 */
680 list = lookup_processor_type(read_cpuid_id());
681 if (!list) {
1b0f6681
OJ
682 pr_err("CPU configuration botched (ID %08x), unable to continue.\n",
683 read_cpuid_id());
b69874e4
RK
684 while (1);
685 }
686
687 cpu_name = list->cpu_name;
2ecccf90 688 __cpu_architecture = __get_cpu_architecture();
b69874e4
RK
689
690#ifdef MULTI_CPU
691 processor = *list->proc;
692#endif
693#ifdef MULTI_TLB
694 cpu_tlb = *list->tlb;
695#endif
696#ifdef MULTI_USER
697 cpu_user = *list->user;
698#endif
699#ifdef MULTI_CACHE
700 cpu_cache = *list->cache;
701#endif
702
1b0f6681
OJ
703 pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
704 cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
4585eaff 705 proc_arch[cpu_architecture()], get_cr());
b69874e4 706
a34dbfb0
WD
707 snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
708 list->arch_name, ENDIANNESS);
709 snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
710 list->elf_name, ENDIANNESS);
b69874e4 711 elf_hwcap = list->elf_hwcap;
8164f7af
SB
712
713 cpuid_init_hwcaps();
42f25bdd 714 patch_aeabi_idiv();
8164f7af 715
b69874e4 716#ifndef CONFIG_ARM_THUMB
c40e3641 717 elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
b69874e4 718#endif
ca8f0b0a
RK
719#ifdef CONFIG_MMU
720 init_default_cache_policy(list->__cpu_mm_mmu_flags);
721#endif
92871b94
RH
722 erratum_a15_798181_init();
723
58171bf2 724 elf_hwcap_fixup();
b69874e4
RK
725
726 cacheid_init();
727 cpu_init();
728}
729
93c02ab4 730void __init dump_machine_table(void)
1da177e4 731{
ff69a4c8 732 const struct machine_desc *p;
1da177e4 733
6291319d
GL
734 early_print("Available machine support:\n\nID (hex)\tNAME\n");
735 for_each_machine_desc(p)
dce72dd0 736 early_print("%08x\t%s\n", p->nr, p->name);
1da177e4 737
dce72dd0 738 early_print("\nPlease check your kernel config and/or bootloader.\n");
1da177e4 739
dce72dd0
NP
740 while (true)
741 /* can't use cpu_relax() here as it may require MMU setup */;
1da177e4
LT
742}
743
6a5014aa 744int __init arm_add_memory(u64 start, u64 size)
3a669411 745{
6d7d5da7 746 u64 aligned_start;
4b5f32ce 747
3a669411
RK
748 /*
749 * Ensure that start/size are aligned to a page boundary.
909ba297 750 * Size is rounded down, start is rounded up.
3a669411 751 */
6d7d5da7 752 aligned_start = PAGE_ALIGN(start);
909ba297
MY
753 if (aligned_start > start + size)
754 size = 0;
755 else
756 size -= aligned_start - start;
e5ab8580 757
6d7d5da7
MD
758#ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
759 if (aligned_start > ULONG_MAX) {
1b0f6681
OJ
760 pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
761 (long long)start);
6d7d5da7
MD
762 return -EINVAL;
763 }
764
765 if (aligned_start + size > ULONG_MAX) {
1b0f6681
OJ
766 pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n",
767 (long long)start);
e5ab8580
WD
768 /*
769 * To ensure bank->start + bank->size is representable in
770 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
771 * This means we lose a page after masking.
772 */
6d7d5da7 773 size = ULONG_MAX - aligned_start;
e5ab8580
WD
774 }
775#endif
776
571b1437
RK
777 if (aligned_start < PHYS_OFFSET) {
778 if (aligned_start + size <= PHYS_OFFSET) {
779 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
780 aligned_start, aligned_start + size);
781 return -EINVAL;
782 }
783
784 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
785 aligned_start, (u64)PHYS_OFFSET);
786
787 size -= PHYS_OFFSET - aligned_start;
788 aligned_start = PHYS_OFFSET;
789 }
790
1c2f87c2
LA
791 start = aligned_start;
792 size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
4b5f32ce
NP
793
794 /*
795 * Check whether this memory region has non-zero size or
796 * invalid node number.
797 */
1c2f87c2 798 if (size == 0)
4b5f32ce
NP
799 return -EINVAL;
800
1c2f87c2 801 memblock_add(start, size);
4b5f32ce 802 return 0;
3a669411
RK
803}
804
1da177e4
LT
805/*
806 * Pick out the memory size. We look for mem=size@start,
807 * where start and size are "size[KkMm]"
808 */
1c2f87c2 809
2b0d8c25 810static int __init early_mem(char *p)
1da177e4
LT
811{
812 static int usermem __initdata = 0;
6a5014aa
MD
813 u64 size;
814 u64 start;
2b0d8c25 815 char *endp;
1da177e4
LT
816
817 /*
818 * If the user specifies memory size, we
819 * blow away any automatically generated
820 * size.
821 */
822 if (usermem == 0) {
823 usermem = 1;
1c2f87c2
LA
824 memblock_remove(memblock_start_of_DRAM(),
825 memblock_end_of_DRAM() - memblock_start_of_DRAM());
1da177e4
LT
826 }
827
828 start = PHYS_OFFSET;
2b0d8c25
JK
829 size = memparse(p, &endp);
830 if (*endp == '@')
831 start = memparse(endp + 1, NULL);
1da177e4 832
1c97b73e 833 arm_add_memory(start, size);
1da177e4 834
2b0d8c25 835 return 0;
1da177e4 836}
2b0d8c25 837early_param("mem", early_mem);
1da177e4 838
ff69a4c8 839static void __init request_standard_resources(const struct machine_desc *mdesc)
1da177e4 840{
11b9369c 841 struct memblock_region *region;
1da177e4 842 struct resource *res;
1da177e4 843
37efe642
RK
844 kernel_code.start = virt_to_phys(_text);
845 kernel_code.end = virt_to_phys(_etext - 1);
842eab40 846 kernel_data.start = virt_to_phys(_sdata);
37efe642 847 kernel_data.end = virt_to_phys(_end - 1);
1da177e4 848
11b9369c 849 for_each_memblock(memory, region) {
ca474408 850 res = memblock_virt_alloc(sizeof(*res), 0);
1da177e4 851 res->name = "System RAM";
11b9369c
DZ
852 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
853 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
35d98e93 854 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
1da177e4
LT
855
856 request_resource(&iomem_resource, res);
857
858 if (kernel_code.start >= res->start &&
859 kernel_code.end <= res->end)
860 request_resource(res, &kernel_code);
861 if (kernel_data.start >= res->start &&
862 kernel_data.end <= res->end)
863 request_resource(res, &kernel_data);
864 }
865
866 if (mdesc->video_start) {
867 video_ram.start = mdesc->video_start;
868 video_ram.end = mdesc->video_end;
869 request_resource(&iomem_resource, &video_ram);
870 }
871
872 /*
873 * Some machines don't have the possibility of ever
874 * possessing lp0, lp1 or lp2
875 */
876 if (mdesc->reserve_lp0)
877 request_resource(&ioport_resource, &lp0);
878 if (mdesc->reserve_lp1)
879 request_resource(&ioport_resource, &lp1);
880 if (mdesc->reserve_lp2)
881 request_resource(&ioport_resource, &lp2);
882}
883
1da177e4
LT
884#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
885struct screen_info screen_info = {
886 .orig_video_lines = 30,
887 .orig_video_cols = 80,
888 .orig_video_mode = 0,
889 .orig_video_ega_bx = 0,
890 .orig_video_isVGA = 1,
891 .orig_video_points = 8
892};
4394c124 893#endif
1da177e4 894
1da177e4
LT
895static int __init customize_machine(void)
896{
883a106b
AB
897 /*
898 * customizes platform devices, or adds new ones
899 * On DT based machines, we fall back to populating the
900 * machine from the device tree, if no callback is provided,
901 * otherwise we would always need an init_machine callback.
902 */
af4dda73 903 of_iommu_init();
8ff1443c
RK
904 if (machine_desc->init_machine)
905 machine_desc->init_machine();
883a106b
AB
906#ifdef CONFIG_OF
907 else
908 of_platform_populate(NULL, of_default_bus_match_table,
909 NULL, NULL);
910#endif
1da177e4
LT
911 return 0;
912}
913arch_initcall(customize_machine);
914
90de4137
SG
915static int __init init_machine_late(void)
916{
3f599875
PK
917 struct device_node *root;
918 int ret;
919
90de4137
SG
920 if (machine_desc->init_late)
921 machine_desc->init_late();
3f599875
PK
922
923 root = of_find_node_by_path("/");
924 if (root) {
925 ret = of_property_read_string(root, "serial-number",
926 &system_serial);
927 if (ret)
928 system_serial = NULL;
929 }
930
931 if (!system_serial)
932 system_serial = kasprintf(GFP_KERNEL, "%08x%08x",
933 system_serial_high,
934 system_serial_low);
935
90de4137
SG
936 return 0;
937}
938late_initcall(init_machine_late);
939
3c57fb43
MW
940#ifdef CONFIG_KEXEC
941static inline unsigned long long get_total_mem(void)
942{
943 unsigned long total;
944
945 total = max_low_pfn - min_low_pfn;
946 return total << PAGE_SHIFT;
947}
948
949/**
950 * reserve_crashkernel() - reserves memory are for crash kernel
951 *
952 * This function reserves memory area given in "crashkernel=" kernel command
953 * line parameter. The memory reserved is used by a dump capture kernel when
954 * primary kernel is crashing.
955 */
956static void __init reserve_crashkernel(void)
957{
958 unsigned long long crash_size, crash_base;
959 unsigned long long total_mem;
960 int ret;
961
962 total_mem = get_total_mem();
963 ret = parse_crashkernel(boot_command_line, total_mem,
964 &crash_size, &crash_base);
965 if (ret)
966 return;
967
84f452b1 968 ret = memblock_reserve(crash_base, crash_size);
3c57fb43 969 if (ret < 0) {
1b0f6681
OJ
970 pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n",
971 (unsigned long)crash_base);
3c57fb43
MW
972 return;
973 }
974
1b0f6681
OJ
975 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
976 (unsigned long)(crash_size >> 20),
977 (unsigned long)(crash_base >> 20),
978 (unsigned long)(total_mem >> 20));
3c57fb43
MW
979
980 crashk_res.start = crash_base;
981 crashk_res.end = crash_base + crash_size - 1;
982 insert_resource(&iomem_resource, &crashk_res);
983}
984#else
985static inline void reserve_crashkernel(void) {}
986#endif /* CONFIG_KEXEC */
987
4588c34d
DM
988void __init hyp_mode_check(void)
989{
990#ifdef CONFIG_ARM_VIRT_EXT
8fbac214
MR
991 sync_boot_mode();
992
4588c34d
DM
993 if (is_hyp_mode_available()) {
994 pr_info("CPU: All CPU(s) started in HYP mode.\n");
995 pr_info("CPU: Virtualization extensions available.\n");
996 } else if (is_hyp_mode_mismatched()) {
997 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
998 __boot_cpu_mode & MODE_MASK);
999 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
1000 } else
1001 pr_info("CPU: All CPU(s) started in SVC mode.\n");
1002#endif
1003}
1004
6291319d
GL
1005void __init setup_arch(char **cmdline_p)
1006{
ff69a4c8 1007 const struct machine_desc *mdesc;
6291319d 1008
6291319d 1009 setup_processor();
93c02ab4
GL
1010 mdesc = setup_machine_fdt(__atags_pointer);
1011 if (!mdesc)
b8b499c8 1012 mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
6291319d
GL
1013 machine_desc = mdesc;
1014 machine_name = mdesc->name;
719c9d14 1015 dump_stack_set_arch_desc("%s", mdesc->name);
6291319d 1016
16d6d5b0
RH
1017 if (mdesc->reboot_mode != REBOOT_HARD)
1018 reboot_mode = mdesc->reboot_mode;
6291319d 1019
37efe642
RK
1020 init_mm.start_code = (unsigned long) _text;
1021 init_mm.end_code = (unsigned long) _etext;
1022 init_mm.end_data = (unsigned long) _edata;
1023 init_mm.brk = (unsigned long) _end;
1da177e4 1024
48ab7e09
JK
1025 /* populate cmd_line too for later use, preserving boot_command_line */
1026 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
1027 *cmdline_p = cmd_line;
2b0d8c25 1028
2937367b
AB
1029 early_fixmap_init();
1030 early_ioremap_init();
a5f4c561 1031
2b0d8c25
JK
1032 parse_early_param();
1033
1221ed10
RK
1034#ifdef CONFIG_MMU
1035 early_paging_init(mdesc);
1036#endif
7c927322 1037 setup_dma_zone(mdesc);
da58fb65 1038 efi_init();
0371d3f7 1039 sanity_check_meminfo();
1c2f87c2 1040 arm_memblock_init(mdesc);
2778f620 1041
2937367b
AB
1042 early_ioremap_reset();
1043
4b5f32ce 1044 paging_init(mdesc);
11b9369c 1045 request_standard_resources(mdesc);
1da177e4 1046
a528721d
RK
1047 if (mdesc->restart)
1048 arm_pm_restart = mdesc->restart;
1049
93c02ab4
GL
1050 unflatten_device_tree();
1051
5587164e 1052 arm_dt_init_cpu_maps();
be120397 1053 psci_dt_init();
5882bfef 1054 xen_early_init();
7bbb7940 1055#ifdef CONFIG_SMP
abcee5fb 1056 if (is_smp()) {
b382b940
JM
1057 if (!mdesc->smp_init || !mdesc->smp_init()) {
1058 if (psci_smp_available())
1059 smp_set_ops(&psci_smp_ops);
1060 else if (mdesc->smp)
1061 smp_set_ops(mdesc->smp);
1062 }
f00ec48f 1063 smp_init_cpus();
8cf72172 1064 smp_build_mpidr_hash();
abcee5fb 1065 }
7bbb7940 1066#endif
4588c34d
DM
1067
1068 if (!is_smp())
1069 hyp_mode_check();
1070
3c57fb43 1071 reserve_crashkernel();
7bbb7940 1072
52108641 1073#ifdef CONFIG_MULTI_IRQ_HANDLER
1074 handle_arch_irq = mdesc->handle_irq;
1075#endif
1da177e4
LT
1076
1077#ifdef CONFIG_VT
1078#if defined(CONFIG_VGA_CONSOLE)
1079 conswitchp = &vga_con;
1080#elif defined(CONFIG_DUMMY_CONSOLE)
1081 conswitchp = &dummy_con;
1082#endif
1083#endif
dec12e62
RK
1084
1085 if (mdesc->init_early)
1086 mdesc->init_early();
1da177e4
LT
1087}
1088
1089
1090static int __init topology_init(void)
1091{
1092 int cpu;
1093
66fb8bd2
RK
1094 for_each_possible_cpu(cpu) {
1095 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
787047ee 1096 cpuinfo->cpu.hotpluggable = platform_can_hotplug_cpu(cpu);
66fb8bd2
RK
1097 register_cpu(&cpuinfo->cpu, cpu);
1098 }
1da177e4
LT
1099
1100 return 0;
1101}
1da177e4
LT
1102subsys_initcall(topology_init);
1103
e119bfff
RK
1104#ifdef CONFIG_HAVE_PROC_CPU
1105static int __init proc_cpu_init(void)
1106{
1107 struct proc_dir_entry *res;
1108
1109 res = proc_mkdir("cpu", NULL);
1110 if (!res)
1111 return -ENOMEM;
1112 return 0;
1113}
1114fs_initcall(proc_cpu_init);
1115#endif
1116
1da177e4
LT
1117static const char *hwcap_str[] = {
1118 "swp",
1119 "half",
1120 "thumb",
1121 "26bit",
1122 "fastmult",
1123 "fpa",
1124 "vfp",
1125 "edsp",
1126 "java",
8f7f9435 1127 "iwmmxt",
99e4a6dd 1128 "crunch",
4369ae16 1129 "thumbee",
2bedbdf4 1130 "neon",
7279dc3e
CM
1131 "vfpv3",
1132 "vfpv3d16",
254cdf8e
WD
1133 "tls",
1134 "vfpv4",
1135 "idiva",
1136 "idivt",
ab8d46c0 1137 "vfpd32",
a469abd0 1138 "lpae",
e9faebc6 1139 "evtstrm",
1da177e4
LT
1140 NULL
1141};
1142
b342ea4e 1143static const char *hwcap2_str[] = {
8258a989
AB
1144 "aes",
1145 "pmull",
1146 "sha1",
1147 "sha2",
1148 "crc32",
b342ea4e
AB
1149 NULL
1150};
1151
1da177e4
LT
1152static int c_show(struct seq_file *m, void *v)
1153{
b4b8f770
LP
1154 int i, j;
1155 u32 cpuid;
1da177e4 1156
1da177e4 1157 for_each_online_cpu(i) {
15559722
RK
1158 /*
1159 * glibc reads /proc/cpuinfo to determine the number of
1160 * online processors, looking for lines beginning with
1161 * "processor". Give glibc what it expects.
1162 */
1163 seq_printf(m, "processor\t: %d\n", i);
b4b8f770
LP
1164 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
1165 seq_printf(m, "model name\t: %s rev %d (%s)\n",
1166 cpu_name, cpuid & 15, elf_platform);
1167
4bf9636c
PM
1168#if defined(CONFIG_SMP)
1169 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1170 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1171 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1172#else
1173 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1174 loops_per_jiffy / (500000/HZ),
1175 (loops_per_jiffy / (5000/HZ)) % 100);
1176#endif
b4b8f770
LP
1177 /* dump out the processor features */
1178 seq_puts(m, "Features\t: ");
1da177e4 1179
b4b8f770
LP
1180 for (j = 0; hwcap_str[j]; j++)
1181 if (elf_hwcap & (1 << j))
1182 seq_printf(m, "%s ", hwcap_str[j]);
1da177e4 1183
b342ea4e
AB
1184 for (j = 0; hwcap2_str[j]; j++)
1185 if (elf_hwcap2 & (1 << j))
1186 seq_printf(m, "%s ", hwcap2_str[j]);
1187
b4b8f770
LP
1188 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
1189 seq_printf(m, "CPU architecture: %s\n",
1190 proc_arch[cpu_architecture()]);
1da177e4 1191
b4b8f770
LP
1192 if ((cpuid & 0x0008f000) == 0x00000000) {
1193 /* pre-ARM7 */
1194 seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
1da177e4 1195 } else {
b4b8f770
LP
1196 if ((cpuid & 0x0008f000) == 0x00007000) {
1197 /* ARM7 */
1198 seq_printf(m, "CPU variant\t: 0x%02x\n",
1199 (cpuid >> 16) & 127);
1200 } else {
1201 /* post-ARM7 */
1202 seq_printf(m, "CPU variant\t: 0x%x\n",
1203 (cpuid >> 20) & 15);
1204 }
1205 seq_printf(m, "CPU part\t: 0x%03x\n",
1206 (cpuid >> 4) & 0xfff);
1da177e4 1207 }
b4b8f770 1208 seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
1da177e4 1209 }
1da177e4
LT
1210
1211 seq_printf(m, "Hardware\t: %s\n", machine_name);
1212 seq_printf(m, "Revision\t: %04x\n", system_rev);
3f599875 1213 seq_printf(m, "Serial\t\t: %s\n", system_serial);
1da177e4
LT
1214
1215 return 0;
1216}
1217
1218static void *c_start(struct seq_file *m, loff_t *pos)
1219{
1220 return *pos < 1 ? (void *)1 : NULL;
1221}
1222
1223static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1224{
1225 ++*pos;
1226 return NULL;
1227}
1228
1229static void c_stop(struct seq_file *m, void *v)
1230{
1231}
1232
2ffd6e18 1233const struct seq_operations cpuinfo_op = {
1da177e4
LT
1234 .start = c_start,
1235 .next = c_next,
1236 .stop = c_stop,
1237 .show = c_show
1238};
This page took 0.96494 seconds and 5 git commands to generate.