ARM64: allow late use of early_ioremap
[deliverable/linux.git] / arch / arm64 / kernel / setup.c
CommitLineData
9703d9d7
CM
1/*
2 * Based on arch/arm/kernel/setup.c
3 *
4 * Copyright (C) 1995-2001 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/export.h>
21#include <linux/kernel.h>
22#include <linux/stddef.h>
23#include <linux/ioport.h>
24#include <linux/delay.h>
25#include <linux/utsname.h>
26#include <linux/initrd.h>
27#include <linux/console.h>
a41dc0e8 28#include <linux/cache.h>
9703d9d7
CM
29#include <linux/bootmem.h>
30#include <linux/seq_file.h>
31#include <linux/screen_info.h>
32#include <linux/init.h>
33#include <linux/kexec.h>
34#include <linux/crash_dump.h>
35#include <linux/root_dev.h>
de79a64d 36#include <linux/clk-provider.h>
9703d9d7
CM
37#include <linux/cpu.h>
38#include <linux/interrupt.h>
39#include <linux/smp.h>
40#include <linux/fs.h>
41#include <linux/proc_fs.h>
42#include <linux/memblock.h>
78d51e0b 43#include <linux/of_iommu.h>
9703d9d7 44#include <linux/of_fdt.h>
d6bafb9b 45#include <linux/of_platform.h>
f84d0275 46#include <linux/efi.h>
44b82b77 47#include <linux/personality.h>
9703d9d7 48
bf4b558e 49#include <asm/fixmap.h>
df857416 50#include <asm/cpu.h>
9703d9d7
CM
51#include <asm/cputype.h>
52#include <asm/elf.h>
53#include <asm/cputable.h>
930da09f 54#include <asm/cpufeature.h>
e8765b26 55#include <asm/cpu_ops.h>
9703d9d7
CM
56#include <asm/sections.h>
57#include <asm/setup.h>
4c7aa002 58#include <asm/smp_plat.h>
9703d9d7
CM
59#include <asm/cacheflush.h>
60#include <asm/tlbflush.h>
61#include <asm/traps.h>
62#include <asm/memblock.h>
e790f1de 63#include <asm/psci.h>
f84d0275 64#include <asm/efi.h>
9703d9d7
CM
65
66unsigned int processor_id;
67EXPORT_SYMBOL(processor_id);
68
25804e6a 69unsigned long elf_hwcap __read_mostly;
9703d9d7
CM
70EXPORT_SYMBOL_GPL(elf_hwcap);
71
46efe547
SK
72#ifdef CONFIG_COMPAT
73#define COMPAT_ELF_HWCAP_DEFAULT \
74 (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
75 COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
76 COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
77 COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
7d57511d
CM
78 COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
79 COMPAT_HWCAP_LPAE)
46efe547 80unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
28964d32 81unsigned int compat_elf_hwcap2 __read_mostly;
46efe547
SK
82#endif
83
06f9eb88 84DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
930da09f 85
9703d9d7 86static const char *cpu_name;
9703d9d7
CM
87phys_addr_t __fdt_pointer __initdata;
88
89/*
90 * Standard memory resources
91 */
92static struct resource mem_res[] = {
93 {
94 .name = "Kernel code",
95 .start = 0,
96 .end = 0,
97 .flags = IORESOURCE_MEM
98 },
99 {
100 .name = "Kernel data",
101 .start = 0,
102 .end = 0,
103 .flags = IORESOURCE_MEM
104 }
105};
106
107#define kernel_code mem_res[0]
108#define kernel_data mem_res[1]
109
110void __init early_print(const char *str, ...)
111{
112 char buf[256];
113 va_list ap;
114
115 va_start(ap, str);
116 vsnprintf(buf, sizeof(buf), str, ap);
117 va_end(ap);
118
119 printk("%s", buf);
120}
121
71586276
WD
122void __init smp_setup_processor_id(void)
123{
80708677
MR
124 u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
125 cpu_logical_map(0) = mpidr;
126
71586276
WD
127 /*
128 * clear __my_cpu_offset on boot CPU to avoid hang caused by
129 * using percpu variable early, for example, lockdep will
130 * access percpu variable inside lock_release
131 */
132 set_my_cpu_offset(0);
80708677 133 pr_info("Booting Linux on physical CPU 0x%lx\n", (unsigned long)mpidr);
71586276
WD
134}
135
6e15d0e0
SK
136bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
137{
138 return phys_id == cpu_logical_map(cpu);
139}
140
976d7d3f
LP
141struct mpidr_hash mpidr_hash;
142#ifdef CONFIG_SMP
143/**
144 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
145 * level in order to build a linear index from an
146 * MPIDR value. Resulting algorithm is a collision
147 * free hash carried out through shifting and ORing
148 */
149static void __init smp_build_mpidr_hash(void)
150{
151 u32 i, affinity, fs[4], bits[4], ls;
152 u64 mask = 0;
153 /*
154 * Pre-scan the list of MPIDRS and filter out bits that do
155 * not contribute to affinity levels, ie they never toggle.
156 */
157 for_each_possible_cpu(i)
158 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
159 pr_debug("mask of set bits %#llx\n", mask);
160 /*
161 * Find and stash the last and first bit set at all affinity levels to
162 * check how many bits are required to represent them.
163 */
164 for (i = 0; i < 4; i++) {
165 affinity = MPIDR_AFFINITY_LEVEL(mask, i);
166 /*
167 * Find the MSB bit and LSB bits position
168 * to determine how many bits are required
169 * to express the affinity level.
170 */
171 ls = fls(affinity);
172 fs[i] = affinity ? ffs(affinity) - 1 : 0;
173 bits[i] = ls - fs[i];
174 }
175 /*
176 * An index can be created from the MPIDR_EL1 by isolating the
177 * significant bits at each affinity level and by shifting
178 * them in order to compress the 32 bits values space to a
179 * compressed set of values. This is equivalent to hashing
180 * the MPIDR_EL1 through shifting and ORing. It is a collision free
181 * hash though not minimal since some levels might contain a number
182 * of CPUs that is not an exact power of 2 and their bit
183 * representation might contain holes, eg MPIDR_EL1[7:0] = {0x2, 0x80}.
184 */
185 mpidr_hash.shift_aff[0] = MPIDR_LEVEL_SHIFT(0) + fs[0];
186 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_SHIFT(1) + fs[1] - bits[0];
187 mpidr_hash.shift_aff[2] = MPIDR_LEVEL_SHIFT(2) + fs[2] -
188 (bits[1] + bits[0]);
189 mpidr_hash.shift_aff[3] = MPIDR_LEVEL_SHIFT(3) +
190 fs[3] - (bits[2] + bits[1] + bits[0]);
191 mpidr_hash.mask = mask;
192 mpidr_hash.bits = bits[3] + bits[2] + bits[1] + bits[0];
193 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] aff3[%u] mask[%#llx] bits[%u]\n",
194 mpidr_hash.shift_aff[0],
195 mpidr_hash.shift_aff[1],
196 mpidr_hash.shift_aff[2],
197 mpidr_hash.shift_aff[3],
198 mpidr_hash.mask,
199 mpidr_hash.bits);
200 /*
201 * 4x is an arbitrary value used to warn on a hash table much bigger
202 * than expected on most systems.
203 */
204 if (mpidr_hash_size() > 4 * num_possible_cpus())
205 pr_warn("Large number of MPIDR hash buckets detected\n");
206 __flush_dcache_area(&mpidr_hash, sizeof(struct mpidr_hash));
207}
208#endif
209
9703d9d7
CM
210static void __init setup_processor(void)
211{
212 struct cpu_info *cpu_info;
4bff28cc 213 u64 features, block;
a41dc0e8
CM
214 u32 cwg;
215 int cls;
9703d9d7 216
9703d9d7
CM
217 cpu_info = lookup_processor_type(read_cpuid_id());
218 if (!cpu_info) {
219 printk("CPU configuration botched (ID %08x), unable to continue.\n",
220 read_cpuid_id());
221 while (1);
222 }
223
224 cpu_name = cpu_info->cpu_name;
225
226 printk("CPU: %s [%08x] revision %d\n",
227 cpu_name, read_cpuid_id(), read_cpuid_id() & 15);
228
94ed1f2c 229 sprintf(init_utsname()->machine, ELF_PLATFORM);
9703d9d7 230 elf_hwcap = 0;
4bff28cc 231
df857416
MR
232 cpuinfo_store_boot_cpu();
233
a41dc0e8
CM
234 /*
235 * Check for sane CTR_EL0.CWG value.
236 */
237 cwg = cache_type_cwg();
238 cls = cache_line_size();
239 if (!cwg)
240 pr_warn("No Cache Writeback Granule information, assuming cache line size %d\n",
241 cls);
242 if (L1_CACHE_BYTES < cls)
243 pr_warn("L1_CACHE_BYTES smaller than the Cache Writeback Granule (%d < %d)\n",
244 L1_CACHE_BYTES, cls);
245
4bff28cc
SC
246 /*
247 * ID_AA64ISAR0_EL1 contains 4-bit wide signed feature blocks.
248 * The blocks we test below represent incremental functionality
249 * for non-negative values. Negative values are reserved.
250 */
251 features = read_cpuid(ID_AA64ISAR0_EL1);
252 block = (features >> 4) & 0xf;
253 if (!(block & 0x8)) {
254 switch (block) {
255 default:
256 case 2:
257 elf_hwcap |= HWCAP_PMULL;
258 case 1:
259 elf_hwcap |= HWCAP_AES;
260 case 0:
261 break;
262 }
263 }
264
265 block = (features >> 8) & 0xf;
266 if (block && !(block & 0x8))
267 elf_hwcap |= HWCAP_SHA1;
268
269 block = (features >> 12) & 0xf;
270 if (block && !(block & 0x8))
271 elf_hwcap |= HWCAP_SHA2;
272
273 block = (features >> 16) & 0xf;
274 if (block && !(block & 0x8))
275 elf_hwcap |= HWCAP_CRC32;
4cf761cd
AB
276
277#ifdef CONFIG_COMPAT
278 /*
279 * ID_ISAR5_EL1 carries similar information as above, but pertaining to
280 * the Aarch32 32-bit execution state.
281 */
282 features = read_cpuid(ID_ISAR5_EL1);
283 block = (features >> 4) & 0xf;
284 if (!(block & 0x8)) {
285 switch (block) {
286 default:
287 case 2:
288 compat_elf_hwcap2 |= COMPAT_HWCAP2_PMULL;
289 case 1:
290 compat_elf_hwcap2 |= COMPAT_HWCAP2_AES;
291 case 0:
292 break;
293 }
294 }
295
296 block = (features >> 8) & 0xf;
297 if (block && !(block & 0x8))
298 compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA1;
299
300 block = (features >> 12) & 0xf;
301 if (block && !(block & 0x8))
302 compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA2;
303
304 block = (features >> 16) & 0xf;
305 if (block && !(block & 0x8))
306 compat_elf_hwcap2 |= COMPAT_HWCAP2_CRC32;
307#endif
9703d9d7
CM
308}
309
310static void __init setup_machine_fdt(phys_addr_t dt_phys)
311{
d5189cc5 312 if (!dt_phys || !early_init_dt_scan(phys_to_virt(dt_phys))) {
9703d9d7
CM
313 early_print("\n"
314 "Error: invalid device tree blob at physical address 0x%p (virtual address 0x%p)\n"
d5189cc5 315 "The dtb must be 8-byte aligned and passed in the first 512MB of memory\n"
9703d9d7 316 "\nPlease check your bootloader.\n",
d5189cc5 317 dt_phys, phys_to_virt(dt_phys));
9703d9d7
CM
318
319 while (true)
320 cpu_relax();
321 }
5e39977e 322
44b82b77 323 dump_stack_set_arch_desc("%s (DT)", of_flat_dt_get_machine_name());
9703d9d7
CM
324}
325
9703d9d7
CM
326static void __init request_standard_resources(void)
327{
328 struct memblock_region *region;
329 struct resource *res;
330
331 kernel_code.start = virt_to_phys(_text);
332 kernel_code.end = virt_to_phys(_etext - 1);
333 kernel_data.start = virt_to_phys(_sdata);
334 kernel_data.end = virt_to_phys(_end - 1);
335
336 for_each_memblock(memory, region) {
337 res = alloc_bootmem_low(sizeof(*res));
338 res->name = "System RAM";
339 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
340 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
341 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
342
343 request_resource(&iomem_resource, res);
344
345 if (kernel_code.start >= res->start &&
346 kernel_code.end <= res->end)
347 request_resource(res, &kernel_code);
348 if (kernel_data.start >= res->start &&
349 kernel_data.end <= res->end)
350 request_resource(res, &kernel_data);
351 }
352}
353
4c7aa002
JM
354u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
355
9703d9d7
CM
356void __init setup_arch(char **cmdline_p)
357{
358 setup_processor();
359
360 setup_machine_fdt(__fdt_pointer);
361
362 init_mm.start_code = (unsigned long) _text;
363 init_mm.end_code = (unsigned long) _etext;
364 init_mm.end_data = (unsigned long) _edata;
365 init_mm.brk = (unsigned long) _end;
366
367 *cmdline_p = boot_command_line;
368
af86e597 369 early_fixmap_init();
bf4b558e 370 early_ioremap_init();
0bf757c7 371
9703d9d7
CM
372 parse_early_param();
373
7a9c43be
JM
374 /*
375 * Unmask asynchronous aborts after bringing up possible earlycon.
376 * (Report possible System Errors once we can report this occurred)
377 */
378 local_async_enable();
379
f84d0275 380 efi_init();
9703d9d7
CM
381 arm64_memblock_init();
382
383 paging_init();
384 request_standard_resources();
385
0e63ea48 386 early_ioremap_reset();
f84d0275 387
9703d9d7
CM
388 unflatten_device_tree();
389
e790f1de
WD
390 psci_init();
391
e8765b26 392 cpu_read_bootcpu_ops();
9703d9d7
CM
393#ifdef CONFIG_SMP
394 smp_init_cpus();
976d7d3f 395 smp_build_mpidr_hash();
9703d9d7
CM
396#endif
397
398#ifdef CONFIG_VT
399#if defined(CONFIG_VGA_CONSOLE)
400 conswitchp = &vga_con;
401#elif defined(CONFIG_DUMMY_CONSOLE)
402 conswitchp = &dummy_con;
403#endif
404#endif
405}
406
c560ecfe 407static int __init arm64_device_init(void)
de79a64d 408{
78d51e0b 409 of_iommu_init();
c560ecfe 410 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
de79a64d
CM
411 return 0;
412}
6ecba8eb 413arch_initcall_sync(arm64_device_init);
de79a64d 414
9703d9d7
CM
415static int __init topology_init(void)
416{
417 int i;
418
419 for_each_possible_cpu(i) {
df857416 420 struct cpu *cpu = &per_cpu(cpu_data.cpu, i);
9703d9d7
CM
421 cpu->hotpluggable = 1;
422 register_cpu(cpu, i);
423 }
424
425 return 0;
426}
427subsys_initcall(topology_init);
428
429static const char *hwcap_str[] = {
430 "fp",
431 "asimd",
46efe547 432 "evtstrm",
4bff28cc
SC
433 "aes",
434 "pmull",
435 "sha1",
436 "sha2",
437 "crc32",
9703d9d7
CM
438 NULL
439};
440
44b82b77
MR
441#ifdef CONFIG_COMPAT
442static const char *compat_hwcap_str[] = {
443 "swp",
444 "half",
445 "thumb",
446 "26bit",
447 "fastmult",
448 "fpa",
449 "vfp",
450 "edsp",
451 "java",
452 "iwmmxt",
453 "crunch",
454 "thumbee",
455 "neon",
456 "vfpv3",
457 "vfpv3d16",
458 "tls",
459 "vfpv4",
460 "idiva",
461 "idivt",
462 "vfpd32",
463 "lpae",
464 "evtstrm"
465};
466
467static const char *compat_hwcap2_str[] = {
468 "aes",
469 "pmull",
470 "sha1",
471 "sha2",
472 "crc32",
473 NULL
474};
475#endif /* CONFIG_COMPAT */
476
9703d9d7
CM
477static int c_show(struct seq_file *m, void *v)
478{
44b82b77 479 int i, j;
9703d9d7
CM
480
481 for_each_online_cpu(i) {
44b82b77
MR
482 struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
483 u32 midr = cpuinfo->reg_midr;
484
9703d9d7
CM
485 /*
486 * glibc reads /proc/cpuinfo to determine the number of
487 * online processors, looking for lines beginning with
488 * "processor". Give glibc what it expects.
489 */
490#ifdef CONFIG_SMP
491 seq_printf(m, "processor\t: %d\n", i);
492#endif
5e39977e 493
44b82b77
MR
494 /*
495 * Dump out the common processor features in a single line.
496 * Userspace should read the hwcaps with getauxval(AT_HWCAP)
497 * rather than attempting to parse this, but there's a body of
498 * software which does already (at least for 32-bit).
499 */
500 seq_puts(m, "Features\t:");
501 if (personality(current->personality) == PER_LINUX32) {
502#ifdef CONFIG_COMPAT
503 for (j = 0; compat_hwcap_str[j]; j++)
504 if (compat_elf_hwcap & (1 << j))
505 seq_printf(m, " %s", compat_hwcap_str[j]);
506
507 for (j = 0; compat_hwcap2_str[j]; j++)
508 if (compat_elf_hwcap2 & (1 << j))
509 seq_printf(m, " %s", compat_hwcap2_str[j]);
510#endif /* CONFIG_COMPAT */
511 } else {
512 for (j = 0; hwcap_str[j]; j++)
513 if (elf_hwcap & (1 << j))
514 seq_printf(m, " %s", hwcap_str[j]);
515 }
516 seq_puts(m, "\n");
517
518 seq_printf(m, "CPU implementer\t: 0x%02x\n",
519 MIDR_IMPLEMENTOR(midr));
520 seq_printf(m, "CPU architecture: 8\n");
521 seq_printf(m, "CPU variant\t: 0x%x\n", MIDR_VARIANT(midr));
522 seq_printf(m, "CPU part\t: 0x%03x\n", MIDR_PARTNUM(midr));
523 seq_printf(m, "CPU revision\t: %d\n\n", MIDR_REVISION(midr));
524 }
5e39977e 525
9703d9d7
CM
526 return 0;
527}
528
529static void *c_start(struct seq_file *m, loff_t *pos)
530{
531 return *pos < 1 ? (void *)1 : NULL;
532}
533
534static void *c_next(struct seq_file *m, void *v, loff_t *pos)
535{
536 ++*pos;
537 return NULL;
538}
539
540static void c_stop(struct seq_file *m, void *v)
541{
542}
543
544const struct seq_operations cpuinfo_op = {
545 .start = c_start,
546 .next = c_next,
547 .stop = c_stop,
548 .show = c_show
549};
This page took 0.153085 seconds and 5 git commands to generate.