arm64: Move cpu_resume into the text section
[deliverable/linux.git] / arch / arm64 / kernel / setup.c
CommitLineData
9703d9d7
CM
1/*
2 * Based on arch/arm/kernel/setup.c
3 *
4 * Copyright (C) 1995-2001 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/export.h>
21#include <linux/kernel.h>
22#include <linux/stddef.h>
23#include <linux/ioport.h>
24#include <linux/delay.h>
25#include <linux/utsname.h>
26#include <linux/initrd.h>
27#include <linux/console.h>
a41dc0e8 28#include <linux/cache.h>
9703d9d7
CM
29#include <linux/bootmem.h>
30#include <linux/seq_file.h>
31#include <linux/screen_info.h>
32#include <linux/init.h>
33#include <linux/kexec.h>
34#include <linux/crash_dump.h>
35#include <linux/root_dev.h>
de79a64d 36#include <linux/clk-provider.h>
9703d9d7
CM
37#include <linux/cpu.h>
38#include <linux/interrupt.h>
39#include <linux/smp.h>
40#include <linux/fs.h>
41#include <linux/proc_fs.h>
42#include <linux/memblock.h>
43#include <linux/of_fdt.h>
d6bafb9b 44#include <linux/of_platform.h>
f84d0275 45#include <linux/efi.h>
44b82b77 46#include <linux/personality.h>
9703d9d7 47
bf4b558e 48#include <asm/fixmap.h>
df857416 49#include <asm/cpu.h>
9703d9d7
CM
50#include <asm/cputype.h>
51#include <asm/elf.h>
52#include <asm/cputable.h>
930da09f 53#include <asm/cpufeature.h>
e8765b26 54#include <asm/cpu_ops.h>
9703d9d7
CM
55#include <asm/sections.h>
56#include <asm/setup.h>
4c7aa002 57#include <asm/smp_plat.h>
9703d9d7
CM
58#include <asm/cacheflush.h>
59#include <asm/tlbflush.h>
60#include <asm/traps.h>
61#include <asm/memblock.h>
e790f1de 62#include <asm/psci.h>
f84d0275 63#include <asm/efi.h>
9703d9d7
CM
64
65unsigned int processor_id;
66EXPORT_SYMBOL(processor_id);
67
25804e6a 68unsigned long elf_hwcap __read_mostly;
9703d9d7
CM
69EXPORT_SYMBOL_GPL(elf_hwcap);
70
46efe547
SK
71#ifdef CONFIG_COMPAT
72#define COMPAT_ELF_HWCAP_DEFAULT \
73 (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
74 COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
75 COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
76 COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
7d57511d
CM
77 COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
78 COMPAT_HWCAP_LPAE)
46efe547 79unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
28964d32 80unsigned int compat_elf_hwcap2 __read_mostly;
46efe547
SK
81#endif
82
930da09f
AP
83DECLARE_BITMAP(cpu_hwcaps, NCAPS);
84
9703d9d7 85static const char *cpu_name;
9703d9d7
CM
86phys_addr_t __fdt_pointer __initdata;
87
88/*
89 * Standard memory resources
90 */
91static struct resource mem_res[] = {
92 {
93 .name = "Kernel code",
94 .start = 0,
95 .end = 0,
96 .flags = IORESOURCE_MEM
97 },
98 {
99 .name = "Kernel data",
100 .start = 0,
101 .end = 0,
102 .flags = IORESOURCE_MEM
103 }
104};
105
106#define kernel_code mem_res[0]
107#define kernel_data mem_res[1]
108
109void __init early_print(const char *str, ...)
110{
111 char buf[256];
112 va_list ap;
113
114 va_start(ap, str);
115 vsnprintf(buf, sizeof(buf), str, ap);
116 va_end(ap);
117
118 printk("%s", buf);
119}
120
71586276
WD
121void __init smp_setup_processor_id(void)
122{
80708677
MR
123 u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
124 cpu_logical_map(0) = mpidr;
125
71586276
WD
126 /*
127 * clear __my_cpu_offset on boot CPU to avoid hang caused by
128 * using percpu variable early, for example, lockdep will
129 * access percpu variable inside lock_release
130 */
131 set_my_cpu_offset(0);
80708677 132 pr_info("Booting Linux on physical CPU 0x%lx\n", (unsigned long)mpidr);
71586276
WD
133}
134
6e15d0e0
SK
135bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
136{
137 return phys_id == cpu_logical_map(cpu);
138}
139
976d7d3f
LP
140struct mpidr_hash mpidr_hash;
141#ifdef CONFIG_SMP
142/**
143 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
144 * level in order to build a linear index from an
145 * MPIDR value. Resulting algorithm is a collision
146 * free hash carried out through shifting and ORing
147 */
148static void __init smp_build_mpidr_hash(void)
149{
150 u32 i, affinity, fs[4], bits[4], ls;
151 u64 mask = 0;
152 /*
153 * Pre-scan the list of MPIDRS and filter out bits that do
154 * not contribute to affinity levels, ie they never toggle.
155 */
156 for_each_possible_cpu(i)
157 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
158 pr_debug("mask of set bits %#llx\n", mask);
159 /*
160 * Find and stash the last and first bit set at all affinity levels to
161 * check how many bits are required to represent them.
162 */
163 for (i = 0; i < 4; i++) {
164 affinity = MPIDR_AFFINITY_LEVEL(mask, i);
165 /*
166 * Find the MSB bit and LSB bits position
167 * to determine how many bits are required
168 * to express the affinity level.
169 */
170 ls = fls(affinity);
171 fs[i] = affinity ? ffs(affinity) - 1 : 0;
172 bits[i] = ls - fs[i];
173 }
174 /*
175 * An index can be created from the MPIDR_EL1 by isolating the
176 * significant bits at each affinity level and by shifting
177 * them in order to compress the 32 bits values space to a
178 * compressed set of values. This is equivalent to hashing
179 * the MPIDR_EL1 through shifting and ORing. It is a collision free
180 * hash though not minimal since some levels might contain a number
181 * of CPUs that is not an exact power of 2 and their bit
182 * representation might contain holes, eg MPIDR_EL1[7:0] = {0x2, 0x80}.
183 */
184 mpidr_hash.shift_aff[0] = MPIDR_LEVEL_SHIFT(0) + fs[0];
185 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_SHIFT(1) + fs[1] - bits[0];
186 mpidr_hash.shift_aff[2] = MPIDR_LEVEL_SHIFT(2) + fs[2] -
187 (bits[1] + bits[0]);
188 mpidr_hash.shift_aff[3] = MPIDR_LEVEL_SHIFT(3) +
189 fs[3] - (bits[2] + bits[1] + bits[0]);
190 mpidr_hash.mask = mask;
191 mpidr_hash.bits = bits[3] + bits[2] + bits[1] + bits[0];
192 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] aff3[%u] mask[%#llx] bits[%u]\n",
193 mpidr_hash.shift_aff[0],
194 mpidr_hash.shift_aff[1],
195 mpidr_hash.shift_aff[2],
196 mpidr_hash.shift_aff[3],
197 mpidr_hash.mask,
198 mpidr_hash.bits);
199 /*
200 * 4x is an arbitrary value used to warn on a hash table much bigger
201 * than expected on most systems.
202 */
203 if (mpidr_hash_size() > 4 * num_possible_cpus())
204 pr_warn("Large number of MPIDR hash buckets detected\n");
205 __flush_dcache_area(&mpidr_hash, sizeof(struct mpidr_hash));
206}
207#endif
208
9703d9d7
CM
209static void __init setup_processor(void)
210{
211 struct cpu_info *cpu_info;
4bff28cc 212 u64 features, block;
a41dc0e8
CM
213 u32 cwg;
214 int cls;
9703d9d7 215
9703d9d7
CM
216 cpu_info = lookup_processor_type(read_cpuid_id());
217 if (!cpu_info) {
218 printk("CPU configuration botched (ID %08x), unable to continue.\n",
219 read_cpuid_id());
220 while (1);
221 }
222
223 cpu_name = cpu_info->cpu_name;
224
225 printk("CPU: %s [%08x] revision %d\n",
226 cpu_name, read_cpuid_id(), read_cpuid_id() & 15);
227
94ed1f2c 228 sprintf(init_utsname()->machine, ELF_PLATFORM);
9703d9d7 229 elf_hwcap = 0;
4bff28cc 230
df857416
MR
231 cpuinfo_store_boot_cpu();
232
a41dc0e8
CM
233 /*
234 * Check for sane CTR_EL0.CWG value.
235 */
236 cwg = cache_type_cwg();
237 cls = cache_line_size();
238 if (!cwg)
239 pr_warn("No Cache Writeback Granule information, assuming cache line size %d\n",
240 cls);
241 if (L1_CACHE_BYTES < cls)
242 pr_warn("L1_CACHE_BYTES smaller than the Cache Writeback Granule (%d < %d)\n",
243 L1_CACHE_BYTES, cls);
244
4bff28cc
SC
245 /*
246 * ID_AA64ISAR0_EL1 contains 4-bit wide signed feature blocks.
247 * The blocks we test below represent incremental functionality
248 * for non-negative values. Negative values are reserved.
249 */
250 features = read_cpuid(ID_AA64ISAR0_EL1);
251 block = (features >> 4) & 0xf;
252 if (!(block & 0x8)) {
253 switch (block) {
254 default:
255 case 2:
256 elf_hwcap |= HWCAP_PMULL;
257 case 1:
258 elf_hwcap |= HWCAP_AES;
259 case 0:
260 break;
261 }
262 }
263
264 block = (features >> 8) & 0xf;
265 if (block && !(block & 0x8))
266 elf_hwcap |= HWCAP_SHA1;
267
268 block = (features >> 12) & 0xf;
269 if (block && !(block & 0x8))
270 elf_hwcap |= HWCAP_SHA2;
271
272 block = (features >> 16) & 0xf;
273 if (block && !(block & 0x8))
274 elf_hwcap |= HWCAP_CRC32;
4cf761cd
AB
275
276#ifdef CONFIG_COMPAT
277 /*
278 * ID_ISAR5_EL1 carries similar information as above, but pertaining to
279 * the Aarch32 32-bit execution state.
280 */
281 features = read_cpuid(ID_ISAR5_EL1);
282 block = (features >> 4) & 0xf;
283 if (!(block & 0x8)) {
284 switch (block) {
285 default:
286 case 2:
287 compat_elf_hwcap2 |= COMPAT_HWCAP2_PMULL;
288 case 1:
289 compat_elf_hwcap2 |= COMPAT_HWCAP2_AES;
290 case 0:
291 break;
292 }
293 }
294
295 block = (features >> 8) & 0xf;
296 if (block && !(block & 0x8))
297 compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA1;
298
299 block = (features >> 12) & 0xf;
300 if (block && !(block & 0x8))
301 compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA2;
302
303 block = (features >> 16) & 0xf;
304 if (block && !(block & 0x8))
305 compat_elf_hwcap2 |= COMPAT_HWCAP2_CRC32;
306#endif
9703d9d7
CM
307}
308
309static void __init setup_machine_fdt(phys_addr_t dt_phys)
310{
d5189cc5 311 if (!dt_phys || !early_init_dt_scan(phys_to_virt(dt_phys))) {
9703d9d7
CM
312 early_print("\n"
313 "Error: invalid device tree blob at physical address 0x%p (virtual address 0x%p)\n"
d5189cc5 314 "The dtb must be 8-byte aligned and passed in the first 512MB of memory\n"
9703d9d7 315 "\nPlease check your bootloader.\n",
d5189cc5 316 dt_phys, phys_to_virt(dt_phys));
9703d9d7
CM
317
318 while (true)
319 cpu_relax();
320 }
5e39977e 321
44b82b77 322 dump_stack_set_arch_desc("%s (DT)", of_flat_dt_get_machine_name());
9703d9d7
CM
323}
324
9703d9d7
CM
325/*
326 * Limit the memory size that was specified via FDT.
327 */
328static int __init early_mem(char *p)
329{
330 phys_addr_t limit;
331
332 if (!p)
333 return 1;
334
335 limit = memparse(p, &p) & PAGE_MASK;
336 pr_notice("Memory limited to %lldMB\n", limit >> 20);
337
338 memblock_enforce_memory_limit(limit);
339
340 return 0;
341}
342early_param("mem", early_mem);
343
344static void __init request_standard_resources(void)
345{
346 struct memblock_region *region;
347 struct resource *res;
348
349 kernel_code.start = virt_to_phys(_text);
350 kernel_code.end = virt_to_phys(_etext - 1);
351 kernel_data.start = virt_to_phys(_sdata);
352 kernel_data.end = virt_to_phys(_end - 1);
353
354 for_each_memblock(memory, region) {
355 res = alloc_bootmem_low(sizeof(*res));
356 res->name = "System RAM";
357 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
358 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
359 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
360
361 request_resource(&iomem_resource, res);
362
363 if (kernel_code.start >= res->start &&
364 kernel_code.end <= res->end)
365 request_resource(res, &kernel_code);
366 if (kernel_data.start >= res->start &&
367 kernel_data.end <= res->end)
368 request_resource(res, &kernel_data);
369 }
370}
371
4c7aa002
JM
372u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
373
9703d9d7
CM
374void __init setup_arch(char **cmdline_p)
375{
376 setup_processor();
377
378 setup_machine_fdt(__fdt_pointer);
379
380 init_mm.start_code = (unsigned long) _text;
381 init_mm.end_code = (unsigned long) _etext;
382 init_mm.end_data = (unsigned long) _edata;
383 init_mm.brk = (unsigned long) _end;
384
385 *cmdline_p = boot_command_line;
386
bf4b558e 387 early_ioremap_init();
0bf757c7 388
9703d9d7
CM
389 parse_early_param();
390
7a9c43be
JM
391 /*
392 * Unmask asynchronous aborts after bringing up possible earlycon.
393 * (Report possible System Errors once we can report this occurred)
394 */
395 local_async_enable();
396
f84d0275 397 efi_init();
9703d9d7
CM
398 arm64_memblock_init();
399
400 paging_init();
401 request_standard_resources();
402
f84d0275
MS
403 efi_idmap_init();
404
9703d9d7
CM
405 unflatten_device_tree();
406
e790f1de
WD
407 psci_init();
408
e8765b26 409 cpu_read_bootcpu_ops();
9703d9d7
CM
410#ifdef CONFIG_SMP
411 smp_init_cpus();
976d7d3f 412 smp_build_mpidr_hash();
9703d9d7
CM
413#endif
414
415#ifdef CONFIG_VT
416#if defined(CONFIG_VGA_CONSOLE)
417 conswitchp = &vga_con;
418#elif defined(CONFIG_DUMMY_CONSOLE)
419 conswitchp = &dummy_con;
420#endif
421#endif
422}
423
c560ecfe 424static int __init arm64_device_init(void)
de79a64d 425{
c560ecfe 426 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
de79a64d
CM
427 return 0;
428}
6ecba8eb 429arch_initcall_sync(arm64_device_init);
de79a64d 430
9703d9d7
CM
431static int __init topology_init(void)
432{
433 int i;
434
435 for_each_possible_cpu(i) {
df857416 436 struct cpu *cpu = &per_cpu(cpu_data.cpu, i);
9703d9d7
CM
437 cpu->hotpluggable = 1;
438 register_cpu(cpu, i);
439 }
440
441 return 0;
442}
443subsys_initcall(topology_init);
444
445static const char *hwcap_str[] = {
446 "fp",
447 "asimd",
46efe547 448 "evtstrm",
4bff28cc
SC
449 "aes",
450 "pmull",
451 "sha1",
452 "sha2",
453 "crc32",
9703d9d7
CM
454 NULL
455};
456
44b82b77
MR
457#ifdef CONFIG_COMPAT
458static const char *compat_hwcap_str[] = {
459 "swp",
460 "half",
461 "thumb",
462 "26bit",
463 "fastmult",
464 "fpa",
465 "vfp",
466 "edsp",
467 "java",
468 "iwmmxt",
469 "crunch",
470 "thumbee",
471 "neon",
472 "vfpv3",
473 "vfpv3d16",
474 "tls",
475 "vfpv4",
476 "idiva",
477 "idivt",
478 "vfpd32",
479 "lpae",
480 "evtstrm"
481};
482
483static const char *compat_hwcap2_str[] = {
484 "aes",
485 "pmull",
486 "sha1",
487 "sha2",
488 "crc32",
489 NULL
490};
491#endif /* CONFIG_COMPAT */
492
9703d9d7
CM
493static int c_show(struct seq_file *m, void *v)
494{
44b82b77 495 int i, j;
9703d9d7
CM
496
497 for_each_online_cpu(i) {
44b82b77
MR
498 struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
499 u32 midr = cpuinfo->reg_midr;
500
9703d9d7
CM
501 /*
502 * glibc reads /proc/cpuinfo to determine the number of
503 * online processors, looking for lines beginning with
504 * "processor". Give glibc what it expects.
505 */
506#ifdef CONFIG_SMP
507 seq_printf(m, "processor\t: %d\n", i);
508#endif
5e39977e 509
44b82b77
MR
510 /*
511 * Dump out the common processor features in a single line.
512 * Userspace should read the hwcaps with getauxval(AT_HWCAP)
513 * rather than attempting to parse this, but there's a body of
514 * software which does already (at least for 32-bit).
515 */
516 seq_puts(m, "Features\t:");
517 if (personality(current->personality) == PER_LINUX32) {
518#ifdef CONFIG_COMPAT
519 for (j = 0; compat_hwcap_str[j]; j++)
520 if (compat_elf_hwcap & (1 << j))
521 seq_printf(m, " %s", compat_hwcap_str[j]);
522
523 for (j = 0; compat_hwcap2_str[j]; j++)
524 if (compat_elf_hwcap2 & (1 << j))
525 seq_printf(m, " %s", compat_hwcap2_str[j]);
526#endif /* CONFIG_COMPAT */
527 } else {
528 for (j = 0; hwcap_str[j]; j++)
529 if (elf_hwcap & (1 << j))
530 seq_printf(m, " %s", hwcap_str[j]);
531 }
532 seq_puts(m, "\n");
533
534 seq_printf(m, "CPU implementer\t: 0x%02x\n",
535 MIDR_IMPLEMENTOR(midr));
536 seq_printf(m, "CPU architecture: 8\n");
537 seq_printf(m, "CPU variant\t: 0x%x\n", MIDR_VARIANT(midr));
538 seq_printf(m, "CPU part\t: 0x%03x\n", MIDR_PARTNUM(midr));
539 seq_printf(m, "CPU revision\t: %d\n\n", MIDR_REVISION(midr));
540 }
5e39977e 541
9703d9d7
CM
542 return 0;
543}
544
545static void *c_start(struct seq_file *m, loff_t *pos)
546{
547 return *pos < 1 ? (void *)1 : NULL;
548}
549
550static void *c_next(struct seq_file *m, void *v, loff_t *pos)
551{
552 ++*pos;
553 return NULL;
554}
555
556static void c_stop(struct seq_file *m, void *v)
557{
558}
559
560const struct seq_operations cpuinfo_op = {
561 .start = c_start,
562 .next = c_next,
563 .stop = c_stop,
564 .show = c_show
565};
This page took 0.132957 seconds and 5 git commands to generate.