Merge remote-tracking branch 'orangefs/for-next'
[deliverable/linux.git] / arch / arm64 / kernel / head.S
1 /*
2 * Low-level CPU initialisation
3 * Based on arch/arm/kernel/head.S
4 *
5 * Copyright (C) 1994-2002 Russell King
6 * Copyright (C) 2003-2012 ARM Ltd.
7 * Authors: Catalin Marinas <catalin.marinas@arm.com>
8 * Will Deacon <will.deacon@arm.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 */
22
23 #include <linux/linkage.h>
24 #include <linux/init.h>
25 #include <linux/irqchip/arm-gic-v3.h>
26
27 #include <asm/assembler.h>
28 #include <asm/boot.h>
29 #include <asm/ptrace.h>
30 #include <asm/asm-offsets.h>
31 #include <asm/cache.h>
32 #include <asm/cputype.h>
33 #include <asm/elf.h>
34 #include <asm/kernel-pgtable.h>
35 #include <asm/kvm_arm.h>
36 #include <asm/memory.h>
37 #include <asm/pgtable-hwdef.h>
38 #include <asm/pgtable.h>
39 #include <asm/page.h>
40 #include <asm/smp.h>
41 #include <asm/sysreg.h>
42 #include <asm/thread_info.h>
43 #include <asm/virt.h>
44
45 #define __PHYS_OFFSET (KERNEL_START - TEXT_OFFSET)
46
47 #if (TEXT_OFFSET & 0xfff) != 0
48 #error TEXT_OFFSET must be at least 4KB aligned
49 #elif (PAGE_OFFSET & 0x1fffff) != 0
50 #error PAGE_OFFSET must be at least 2MB aligned
51 #elif TEXT_OFFSET > 0x1fffff
52 #error TEXT_OFFSET must be less than 2MB
53 #endif
54
55 /*
56 * Kernel startup entry point.
57 * ---------------------------
58 *
59 * The requirements are:
60 * MMU = off, D-cache = off, I-cache = on or off,
61 * x0 = physical address to the FDT blob.
62 *
63 * This code is mostly position independent so you call this at
64 * __pa(PAGE_OFFSET + TEXT_OFFSET).
65 *
66 * Note that the callee-saved registers are used for storing variables
67 * that are useful before the MMU is enabled. The allocations are described
68 * in the entry routines.
69 */
70 __HEAD
71 _head:
72 /*
73 * DO NOT MODIFY. Image header expected by Linux boot-loaders.
74 */
75 #ifdef CONFIG_EFI
76 /*
77 * This add instruction has no meaningful effect except that
78 * its opcode forms the magic "MZ" signature required by UEFI.
79 */
80 add x13, x18, #0x16
81 b stext
82 #else
83 b stext // branch to kernel start, magic
84 .long 0 // reserved
85 #endif
86 le64sym _kernel_offset_le // Image load offset from start of RAM, little-endian
87 le64sym _kernel_size_le // Effective size of kernel image, little-endian
88 le64sym _kernel_flags_le // Informative flags, little-endian
89 .quad 0 // reserved
90 .quad 0 // reserved
91 .quad 0 // reserved
92 .byte 0x41 // Magic number, "ARM\x64"
93 .byte 0x52
94 .byte 0x4d
95 .byte 0x64
96 #ifdef CONFIG_EFI
97 .long pe_header - _head // Offset to the PE header.
98 #else
99 .word 0 // reserved
100 #endif
101
102 #ifdef CONFIG_EFI
103 .align 3
104 pe_header:
105 .ascii "PE"
106 .short 0
107 coff_header:
108 .short 0xaa64 // AArch64
109 .short 2 // nr_sections
110 .long 0 // TimeDateStamp
111 .long 0 // PointerToSymbolTable
112 .long 1 // NumberOfSymbols
113 .short section_table - optional_header // SizeOfOptionalHeader
114 .short 0x206 // Characteristics.
115 // IMAGE_FILE_DEBUG_STRIPPED |
116 // IMAGE_FILE_EXECUTABLE_IMAGE |
117 // IMAGE_FILE_LINE_NUMS_STRIPPED
118 optional_header:
119 .short 0x20b // PE32+ format
120 .byte 0x02 // MajorLinkerVersion
121 .byte 0x14 // MinorLinkerVersion
122 .long _end - efi_header_end // SizeOfCode
123 .long 0 // SizeOfInitializedData
124 .long 0 // SizeOfUninitializedData
125 .long __efistub_entry - _head // AddressOfEntryPoint
126 .long efi_header_end - _head // BaseOfCode
127
128 extra_header_fields:
129 .quad 0 // ImageBase
130 .long 0x1000 // SectionAlignment
131 .long PECOFF_FILE_ALIGNMENT // FileAlignment
132 .short 0 // MajorOperatingSystemVersion
133 .short 0 // MinorOperatingSystemVersion
134 .short 0 // MajorImageVersion
135 .short 0 // MinorImageVersion
136 .short 0 // MajorSubsystemVersion
137 .short 0 // MinorSubsystemVersion
138 .long 0 // Win32VersionValue
139
140 .long _end - _head // SizeOfImage
141
142 // Everything before the kernel image is considered part of the header
143 .long efi_header_end - _head // SizeOfHeaders
144 .long 0 // CheckSum
145 .short 0xa // Subsystem (EFI application)
146 .short 0 // DllCharacteristics
147 .quad 0 // SizeOfStackReserve
148 .quad 0 // SizeOfStackCommit
149 .quad 0 // SizeOfHeapReserve
150 .quad 0 // SizeOfHeapCommit
151 .long 0 // LoaderFlags
152 .long 0x6 // NumberOfRvaAndSizes
153
154 .quad 0 // ExportTable
155 .quad 0 // ImportTable
156 .quad 0 // ResourceTable
157 .quad 0 // ExceptionTable
158 .quad 0 // CertificationTable
159 .quad 0 // BaseRelocationTable
160
161 // Section table
162 section_table:
163
164 /*
165 * The EFI application loader requires a relocation section
166 * because EFI applications must be relocatable. This is a
167 * dummy section as far as we are concerned.
168 */
169 .ascii ".reloc"
170 .byte 0
171 .byte 0 // end of 0 padding of section name
172 .long 0
173 .long 0
174 .long 0 // SizeOfRawData
175 .long 0 // PointerToRawData
176 .long 0 // PointerToRelocations
177 .long 0 // PointerToLineNumbers
178 .short 0 // NumberOfRelocations
179 .short 0 // NumberOfLineNumbers
180 .long 0x42100040 // Characteristics (section flags)
181
182
183 .ascii ".text"
184 .byte 0
185 .byte 0
186 .byte 0 // end of 0 padding of section name
187 .long _end - efi_header_end // VirtualSize
188 .long efi_header_end - _head // VirtualAddress
189 .long _edata - efi_header_end // SizeOfRawData
190 .long efi_header_end - _head // PointerToRawData
191
192 .long 0 // PointerToRelocations (0 for executables)
193 .long 0 // PointerToLineNumbers (0 for executables)
194 .short 0 // NumberOfRelocations (0 for executables)
195 .short 0 // NumberOfLineNumbers (0 for executables)
196 .long 0xe0500020 // Characteristics (section flags)
197
198 /*
199 * EFI will load .text onwards at the 4k section alignment
200 * described in the PE/COFF header. To ensure that instruction
201 * sequences using an adrp and a :lo12: immediate will function
202 * correctly at this alignment, we must ensure that .text is
203 * placed at a 4k boundary in the Image to begin with.
204 */
205 .align 12
206 efi_header_end:
207 #endif
208
209 __INIT
210
211 /*
212 * The following callee saved general purpose registers are used on the
213 * primary lowlevel boot path:
214 *
215 * Register Scope Purpose
216 * x21 stext() .. start_kernel() FDT pointer passed at boot in x0
217 * x23 stext() .. start_kernel() physical misalignment/KASLR offset
218 * x28 __create_page_tables() callee preserved temp register
219 * x19/x20 __primary_switch() callee preserved temp registers
220 */
221 ENTRY(stext)
222 bl preserve_boot_args
223 bl el2_setup // Drop to EL1, w0=cpu_boot_mode
224 adrp x23, __PHYS_OFFSET
225 and x23, x23, MIN_KIMG_ALIGN - 1 // KASLR offset, defaults to 0
226 bl set_cpu_boot_mode_flag
227 bl __create_page_tables
228 /*
229 * The following calls CPU setup code, see arch/arm64/mm/proc.S for
230 * details.
231 * On return, the CPU will be ready for the MMU to be turned on and
232 * the TCR will have been set.
233 */
234 bl __cpu_setup // initialise processor
235 b __primary_switch
236 ENDPROC(stext)
237
238 /*
239 * Preserve the arguments passed by the bootloader in x0 .. x3
240 */
241 preserve_boot_args:
242 mov x21, x0 // x21=FDT
243
244 adr_l x0, boot_args // record the contents of
245 stp x21, x1, [x0] // x0 .. x3 at kernel entry
246 stp x2, x3, [x0, #16]
247
248 dmb sy // needed before dc ivac with
249 // MMU off
250
251 add x1, x0, #0x20 // 4 x 8 bytes
252 b __inval_cache_range // tail call
253 ENDPROC(preserve_boot_args)
254
255 /*
256 * Macro to create a table entry to the next page.
257 *
258 * tbl: page table address
259 * virt: virtual address
260 * shift: #imm page table shift
261 * ptrs: #imm pointers per table page
262 *
263 * Preserves: virt
264 * Corrupts: tmp1, tmp2
265 * Returns: tbl -> next level table page address
266 */
267 .macro create_table_entry, tbl, virt, shift, ptrs, tmp1, tmp2
268 lsr \tmp1, \virt, #\shift
269 and \tmp1, \tmp1, #\ptrs - 1 // table index
270 add \tmp2, \tbl, #PAGE_SIZE
271 orr \tmp2, \tmp2, #PMD_TYPE_TABLE // address of next table and entry type
272 str \tmp2, [\tbl, \tmp1, lsl #3]
273 add \tbl, \tbl, #PAGE_SIZE // next level table page
274 .endm
275
276 /*
277 * Macro to populate the PGD (and possibily PUD) for the corresponding
278 * block entry in the next level (tbl) for the given virtual address.
279 *
280 * Preserves: tbl, next, virt
281 * Corrupts: tmp1, tmp2
282 */
283 .macro create_pgd_entry, tbl, virt, tmp1, tmp2
284 create_table_entry \tbl, \virt, PGDIR_SHIFT, PTRS_PER_PGD, \tmp1, \tmp2
285 #if SWAPPER_PGTABLE_LEVELS > 3
286 create_table_entry \tbl, \virt, PUD_SHIFT, PTRS_PER_PUD, \tmp1, \tmp2
287 #endif
288 #if SWAPPER_PGTABLE_LEVELS > 2
289 create_table_entry \tbl, \virt, SWAPPER_TABLE_SHIFT, PTRS_PER_PTE, \tmp1, \tmp2
290 #endif
291 .endm
292
293 /*
294 * Macro to populate block entries in the page table for the start..end
295 * virtual range (inclusive).
296 *
297 * Preserves: tbl, flags
298 * Corrupts: phys, start, end, pstate
299 */
300 .macro create_block_map, tbl, flags, phys, start, end
301 lsr \phys, \phys, #SWAPPER_BLOCK_SHIFT
302 lsr \start, \start, #SWAPPER_BLOCK_SHIFT
303 and \start, \start, #PTRS_PER_PTE - 1 // table index
304 orr \phys, \flags, \phys, lsl #SWAPPER_BLOCK_SHIFT // table entry
305 lsr \end, \end, #SWAPPER_BLOCK_SHIFT
306 and \end, \end, #PTRS_PER_PTE - 1 // table end index
307 9999: str \phys, [\tbl, \start, lsl #3] // store the entry
308 add \start, \start, #1 // next entry
309 add \phys, \phys, #SWAPPER_BLOCK_SIZE // next block
310 cmp \start, \end
311 b.ls 9999b
312 .endm
313
314 /*
315 * Setup the initial page tables. We only setup the barest amount which is
316 * required to get the kernel running. The following sections are required:
317 * - identity mapping to enable the MMU (low address, TTBR0)
318 * - first few MB of the kernel linear mapping to jump to once the MMU has
319 * been enabled
320 */
321 __create_page_tables:
322 mov x28, lr
323
324 /*
325 * Invalidate the idmap and swapper page tables to avoid potential
326 * dirty cache lines being evicted.
327 */
328 adrp x0, idmap_pg_dir
329 adrp x1, swapper_pg_dir + SWAPPER_DIR_SIZE
330 bl __inval_cache_range
331
332 /*
333 * Clear the idmap and swapper page tables.
334 */
335 adrp x0, idmap_pg_dir
336 adrp x6, swapper_pg_dir + SWAPPER_DIR_SIZE
337 1: stp xzr, xzr, [x0], #16
338 stp xzr, xzr, [x0], #16
339 stp xzr, xzr, [x0], #16
340 stp xzr, xzr, [x0], #16
341 cmp x0, x6
342 b.lo 1b
343
344 mov x7, SWAPPER_MM_MMUFLAGS
345
346 /*
347 * Create the identity mapping.
348 */
349 adrp x0, idmap_pg_dir
350 adrp x3, __idmap_text_start // __pa(__idmap_text_start)
351
352 #ifndef CONFIG_ARM64_VA_BITS_48
353 #define EXTRA_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3)
354 #define EXTRA_PTRS (1 << (48 - EXTRA_SHIFT))
355
356 /*
357 * If VA_BITS < 48, it may be too small to allow for an ID mapping to be
358 * created that covers system RAM if that is located sufficiently high
359 * in the physical address space. So for the ID map, use an extended
360 * virtual range in that case, by configuring an additional translation
361 * level.
362 * First, we have to verify our assumption that the current value of
363 * VA_BITS was chosen such that all translation levels are fully
364 * utilised, and that lowering T0SZ will always result in an additional
365 * translation level to be configured.
366 */
367 #if VA_BITS != EXTRA_SHIFT
368 #error "Mismatch between VA_BITS and page size/number of translation levels"
369 #endif
370
371 /*
372 * Calculate the maximum allowed value for TCR_EL1.T0SZ so that the
373 * entire ID map region can be mapped. As T0SZ == (64 - #bits used),
374 * this number conveniently equals the number of leading zeroes in
375 * the physical address of __idmap_text_end.
376 */
377 adrp x5, __idmap_text_end
378 clz x5, x5
379 cmp x5, TCR_T0SZ(VA_BITS) // default T0SZ small enough?
380 b.ge 1f // .. then skip additional level
381
382 adr_l x6, idmap_t0sz
383 str x5, [x6]
384 dmb sy
385 dc ivac, x6 // Invalidate potentially stale cache line
386
387 create_table_entry x0, x3, EXTRA_SHIFT, EXTRA_PTRS, x5, x6
388 1:
389 #endif
390
391 create_pgd_entry x0, x3, x5, x6
392 mov x5, x3 // __pa(__idmap_text_start)
393 adr_l x6, __idmap_text_end // __pa(__idmap_text_end)
394 create_block_map x0, x7, x3, x5, x6
395
396 /*
397 * Map the kernel image (starting with PHYS_OFFSET).
398 */
399 adrp x0, swapper_pg_dir
400 mov_q x5, KIMAGE_VADDR + TEXT_OFFSET // compile time __va(_text)
401 add x5, x5, x23 // add KASLR displacement
402 create_pgd_entry x0, x5, x3, x6
403 adrp x6, _end // runtime __pa(_end)
404 adrp x3, _text // runtime __pa(_text)
405 sub x6, x6, x3 // _end - _text
406 add x6, x6, x5 // runtime __va(_end)
407 create_block_map x0, x7, x3, x5, x6
408
409 /*
410 * Since the page tables have been populated with non-cacheable
411 * accesses (MMU disabled), invalidate the idmap and swapper page
412 * tables again to remove any speculatively loaded cache lines.
413 */
414 adrp x0, idmap_pg_dir
415 adrp x1, swapper_pg_dir + SWAPPER_DIR_SIZE
416 dmb sy
417 bl __inval_cache_range
418
419 ret x28
420 ENDPROC(__create_page_tables)
421 .ltorg
422
423 /*
424 * The following fragment of code is executed with the MMU enabled.
425 *
426 * x0 = __PHYS_OFFSET
427 */
428 __primary_switched:
429 adrp x4, init_thread_union
430 add sp, x4, #THREAD_SIZE
431 msr sp_el0, x4 // Save thread_info
432
433 adr_l x8, vectors // load VBAR_EL1 with virtual
434 msr vbar_el1, x8 // vector table address
435 isb
436
437 stp xzr, x30, [sp, #-16]!
438 mov x29, sp
439
440 str_l x21, __fdt_pointer, x5 // Save FDT pointer
441
442 ldr_l x4, kimage_vaddr // Save the offset between
443 sub x4, x4, x0 // the kernel virtual and
444 str_l x4, kimage_voffset, x5 // physical mappings
445
446 // Clear BSS
447 adr_l x0, __bss_start
448 mov x1, xzr
449 adr_l x2, __bss_stop
450 sub x2, x2, x0
451 bl __pi_memset
452 dsb ishst // Make zero page visible to PTW
453
454 #ifdef CONFIG_KASAN
455 bl kasan_early_init
456 #endif
457 #ifdef CONFIG_RANDOMIZE_BASE
458 tst x23, ~(MIN_KIMG_ALIGN - 1) // already running randomized?
459 b.ne 0f
460 mov x0, x21 // pass FDT address in x0
461 mov x1, x23 // pass modulo offset in x1
462 bl kaslr_early_init // parse FDT for KASLR options
463 cbz x0, 0f // KASLR disabled? just proceed
464 orr x23, x23, x0 // record KASLR offset
465 ldp x29, x30, [sp], #16 // we must enable KASLR, return
466 ret // to __primary_switch()
467 0:
468 #endif
469 b start_kernel
470 ENDPROC(__primary_switched)
471
472 /*
473 * end early head section, begin head code that is also used for
474 * hotplug and needs to have the same protections as the text region
475 */
476 .section ".idmap.text","ax"
477
478 ENTRY(kimage_vaddr)
479 .quad _text - TEXT_OFFSET
480
481 /*
482 * If we're fortunate enough to boot at EL2, ensure that the world is
483 * sane before dropping to EL1.
484 *
485 * Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in x20 if
486 * booted in EL1 or EL2 respectively.
487 */
488 ENTRY(el2_setup)
489 mrs x0, CurrentEL
490 cmp x0, #CurrentEL_EL2
491 b.ne 1f
492 mrs x0, sctlr_el2
493 CPU_BE( orr x0, x0, #(1 << 25) ) // Set the EE bit for EL2
494 CPU_LE( bic x0, x0, #(1 << 25) ) // Clear the EE bit for EL2
495 msr sctlr_el2, x0
496 b 2f
497 1: mrs x0, sctlr_el1
498 CPU_BE( orr x0, x0, #(3 << 24) ) // Set the EE and E0E bits for EL1
499 CPU_LE( bic x0, x0, #(3 << 24) ) // Clear the EE and E0E bits for EL1
500 msr sctlr_el1, x0
501 mov w0, #BOOT_CPU_MODE_EL1 // This cpu booted in EL1
502 isb
503 ret
504
505 2:
506 #ifdef CONFIG_ARM64_VHE
507 /*
508 * Check for VHE being present. For the rest of the EL2 setup,
509 * x2 being non-zero indicates that we do have VHE, and that the
510 * kernel is intended to run at EL2.
511 */
512 mrs x2, id_aa64mmfr1_el1
513 ubfx x2, x2, #8, #4
514 #else
515 mov x2, xzr
516 #endif
517
518 /* Hyp configuration. */
519 mov x0, #HCR_RW // 64-bit EL1
520 cbz x2, set_hcr
521 orr x0, x0, #HCR_TGE // Enable Host Extensions
522 orr x0, x0, #HCR_E2H
523 set_hcr:
524 msr hcr_el2, x0
525 isb
526
527 /* Generic timers. */
528 mrs x0, cnthctl_el2
529 orr x0, x0, #3 // Enable EL1 physical timers
530 msr cnthctl_el2, x0
531 msr cntvoff_el2, xzr // Clear virtual offset
532
533 #ifdef CONFIG_ARM_GIC_V3
534 /* GICv3 system register access */
535 mrs x0, id_aa64pfr0_el1
536 ubfx x0, x0, #24, #4
537 cmp x0, #1
538 b.ne 3f
539
540 mrs_s x0, ICC_SRE_EL2
541 orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1
542 orr x0, x0, #ICC_SRE_EL2_ENABLE // Set ICC_SRE_EL2.Enable==1
543 msr_s ICC_SRE_EL2, x0
544 isb // Make sure SRE is now set
545 mrs_s x0, ICC_SRE_EL2 // Read SRE back,
546 tbz x0, #0, 3f // and check that it sticks
547 msr_s ICH_HCR_EL2, xzr // Reset ICC_HCR_EL2 to defaults
548
549 3:
550 #endif
551
552 /* Populate ID registers. */
553 mrs x0, midr_el1
554 mrs x1, mpidr_el1
555 msr vpidr_el2, x0
556 msr vmpidr_el2, x1
557
558 /*
559 * When VHE is not in use, early init of EL2 and EL1 needs to be
560 * done here.
561 * When VHE _is_ in use, EL1 will not be used in the host and
562 * requires no configuration, and all non-hyp-specific EL2 setup
563 * will be done via the _EL1 system register aliases in __cpu_setup.
564 */
565 cbnz x2, 1f
566
567 /* sctlr_el1 */
568 mov x0, #0x0800 // Set/clear RES{1,0} bits
569 CPU_BE( movk x0, #0x33d0, lsl #16 ) // Set EE and E0E on BE systems
570 CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems
571 msr sctlr_el1, x0
572
573 /* Coprocessor traps. */
574 mov x0, #0x33ff
575 msr cptr_el2, x0 // Disable copro. traps to EL2
576 1:
577
578 #ifdef CONFIG_COMPAT
579 msr hstr_el2, xzr // Disable CP15 traps to EL2
580 #endif
581
582 /* EL2 debug */
583 mrs x0, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer
584 sbfx x0, x0, #8, #4
585 cmp x0, #1
586 b.lt 4f // Skip if no PMU present
587 mrs x0, pmcr_el0 // Disable debug access traps
588 ubfx x0, x0, #11, #5 // to EL2 and allow access to
589 msr mdcr_el2, x0 // all PMU counters from EL1
590 4:
591
592 /* Stage-2 translation */
593 msr vttbr_el2, xzr
594
595 cbz x2, install_el2_stub
596
597 mov w0, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2
598 isb
599 ret
600
601 install_el2_stub:
602 /* Hypervisor stub */
603 adrp x0, __hyp_stub_vectors
604 add x0, x0, #:lo12:__hyp_stub_vectors
605 msr vbar_el2, x0
606
607 /* spsr */
608 mov x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
609 PSR_MODE_EL1h)
610 msr spsr_el2, x0
611 msr elr_el2, lr
612 mov w0, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2
613 eret
614 ENDPROC(el2_setup)
615
616 /*
617 * Sets the __boot_cpu_mode flag depending on the CPU boot mode passed
618 * in x20. See arch/arm64/include/asm/virt.h for more info.
619 */
620 set_cpu_boot_mode_flag:
621 adr_l x1, __boot_cpu_mode
622 cmp w0, #BOOT_CPU_MODE_EL2
623 b.ne 1f
624 add x1, x1, #4
625 1: str w0, [x1] // This CPU has booted in EL1
626 dmb sy
627 dc ivac, x1 // Invalidate potentially stale cache line
628 ret
629 ENDPROC(set_cpu_boot_mode_flag)
630
631 /*
632 * These values are written with the MMU off, but read with the MMU on.
633 * Writers will invalidate the corresponding address, discarding up to a
634 * 'Cache Writeback Granule' (CWG) worth of data. The linker script ensures
635 * sufficient alignment that the CWG doesn't overlap another section.
636 */
637 .pushsection ".mmuoff.data.write", "aw"
638 /*
639 * We need to find out the CPU boot mode long after boot, so we need to
640 * store it in a writable variable.
641 *
642 * This is not in .bss, because we set it sufficiently early that the boot-time
643 * zeroing of .bss would clobber it.
644 */
645 ENTRY(__boot_cpu_mode)
646 .long BOOT_CPU_MODE_EL2
647 .long BOOT_CPU_MODE_EL1
648 /*
649 * The booting CPU updates the failed status @__early_cpu_boot_status,
650 * with MMU turned off.
651 */
652 ENTRY(__early_cpu_boot_status)
653 .long 0
654
655 .popsection
656
657 /*
658 * This provides a "holding pen" for platforms to hold all secondary
659 * cores are held until we're ready for them to initialise.
660 */
661 ENTRY(secondary_holding_pen)
662 bl el2_setup // Drop to EL1, w0=cpu_boot_mode
663 bl set_cpu_boot_mode_flag
664 mrs x0, mpidr_el1
665 mov_q x1, MPIDR_HWID_BITMASK
666 and x0, x0, x1
667 adr_l x3, secondary_holding_pen_release
668 pen: ldr x4, [x3]
669 cmp x4, x0
670 b.eq secondary_startup
671 wfe
672 b pen
673 ENDPROC(secondary_holding_pen)
674
675 /*
676 * Secondary entry point that jumps straight into the kernel. Only to
677 * be used where CPUs are brought online dynamically by the kernel.
678 */
679 ENTRY(secondary_entry)
680 bl el2_setup // Drop to EL1
681 bl set_cpu_boot_mode_flag
682 b secondary_startup
683 ENDPROC(secondary_entry)
684
685 secondary_startup:
686 /*
687 * Common entry point for secondary CPUs.
688 */
689 bl __cpu_setup // initialise processor
690 bl __enable_mmu
691 ldr x8, =__secondary_switched
692 br x8
693 ENDPROC(secondary_startup)
694
695 __secondary_switched:
696 adr_l x5, vectors
697 msr vbar_el1, x5
698 isb
699
700 adr_l x0, secondary_data
701 ldr x0, [x0, #CPU_BOOT_STACK] // get secondary_data.stack
702 mov sp, x0
703 and x0, x0, #~(THREAD_SIZE - 1)
704 msr sp_el0, x0 // save thread_info
705 mov x29, #0
706 b secondary_start_kernel
707 ENDPROC(__secondary_switched)
708
709 /*
710 * The booting CPU updates the failed status @__early_cpu_boot_status,
711 * with MMU turned off.
712 *
713 * update_early_cpu_boot_status tmp, status
714 * - Corrupts tmp1, tmp2
715 * - Writes 'status' to __early_cpu_boot_status and makes sure
716 * it is committed to memory.
717 */
718
719 .macro update_early_cpu_boot_status status, tmp1, tmp2
720 mov \tmp2, #\status
721 adr_l \tmp1, __early_cpu_boot_status
722 str \tmp2, [\tmp1]
723 dmb sy
724 dc ivac, \tmp1 // Invalidate potentially stale cache line
725 .endm
726
727 /*
728 * Enable the MMU.
729 *
730 * x0 = SCTLR_EL1 value for turning on the MMU.
731 *
732 * Returns to the caller via x30/lr. This requires the caller to be covered
733 * by the .idmap.text section.
734 *
735 * Checks if the selected granule size is supported by the CPU.
736 * If it isn't, park the CPU
737 */
738 ENTRY(__enable_mmu)
739 mrs x1, ID_AA64MMFR0_EL1
740 ubfx x2, x1, #ID_AA64MMFR0_TGRAN_SHIFT, 4
741 cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED
742 b.ne __no_granule_support
743 update_early_cpu_boot_status 0, x1, x2
744 adrp x1, idmap_pg_dir
745 adrp x2, swapper_pg_dir
746 msr ttbr0_el1, x1 // load TTBR0
747 msr ttbr1_el1, x2 // load TTBR1
748 isb
749 msr sctlr_el1, x0
750 isb
751 /*
752 * Invalidate the local I-cache so that any instructions fetched
753 * speculatively from the PoC are discarded, since they may have
754 * been dynamically patched at the PoU.
755 */
756 ic iallu
757 dsb nsh
758 isb
759 ret
760 ENDPROC(__enable_mmu)
761
762 __no_granule_support:
763 /* Indicate that this CPU can't boot and is stuck in the kernel */
764 update_early_cpu_boot_status CPU_STUCK_IN_KERNEL, x1, x2
765 1:
766 wfe
767 wfi
768 b 1b
769 ENDPROC(__no_granule_support)
770
771 #ifdef CONFIG_RELOCATABLE
772 __relocate_kernel:
773 /*
774 * Iterate over each entry in the relocation table, and apply the
775 * relocations in place.
776 */
777 ldr w9, =__rela_offset // offset to reloc table
778 ldr w10, =__rela_size // size of reloc table
779
780 mov_q x11, KIMAGE_VADDR // default virtual offset
781 add x11, x11, x23 // actual virtual offset
782 add x9, x9, x11 // __va(.rela)
783 add x10, x9, x10 // __va(.rela) + sizeof(.rela)
784
785 0: cmp x9, x10
786 b.hs 1f
787 ldp x11, x12, [x9], #24
788 ldr x13, [x9, #-8]
789 cmp w12, #R_AARCH64_RELATIVE
790 b.ne 0b
791 add x13, x13, x23 // relocate
792 str x13, [x11, x23]
793 b 0b
794 1: ret
795 ENDPROC(__relocate_kernel)
796 #endif
797
798 __primary_switch:
799 #ifdef CONFIG_RANDOMIZE_BASE
800 mov x19, x0 // preserve new SCTLR_EL1 value
801 mrs x20, sctlr_el1 // preserve old SCTLR_EL1 value
802 #endif
803
804 bl __enable_mmu
805 #ifdef CONFIG_RELOCATABLE
806 bl __relocate_kernel
807 #ifdef CONFIG_RANDOMIZE_BASE
808 ldr x8, =__primary_switched
809 adrp x0, __PHYS_OFFSET
810 blr x8
811
812 /*
813 * If we return here, we have a KASLR displacement in x23 which we need
814 * to take into account by discarding the current kernel mapping and
815 * creating a new one.
816 */
817 msr sctlr_el1, x20 // disable the MMU
818 isb
819 bl __create_page_tables // recreate kernel mapping
820
821 tlbi vmalle1 // Remove any stale TLB entries
822 dsb nsh
823
824 msr sctlr_el1, x19 // re-enable the MMU
825 isb
826 ic iallu // flush instructions fetched
827 dsb nsh // via old mapping
828 isb
829
830 bl __relocate_kernel
831 #endif
832 #endif
833 ldr x8, =__primary_switched
834 adrp x0, __PHYS_OFFSET
835 br x8
836 ENDPROC(__primary_switch)
This page took 0.065631 seconds and 5 git commands to generate.