Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/x86_64/kernel/head.S -- start in 32bit and switch to 64bit | |
3 | * | |
4 | * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE | |
5 | * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> | |
6 | * Copyright (C) 2000 Karsten Keil <kkeil@suse.de> | |
7 | * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de> | |
1ab60e0f | 8 | * Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com> |
1da177e4 LT |
9 | */ |
10 | ||
11 | ||
12 | #include <linux/linkage.h> | |
13 | #include <linux/threads.h> | |
f6c2e333 | 14 | #include <linux/init.h> |
1da177e4 LT |
15 | #include <asm/desc.h> |
16 | #include <asm/segment.h> | |
67dcbb6b | 17 | #include <asm/pgtable.h> |
1da177e4 LT |
18 | #include <asm/page.h> |
19 | #include <asm/msr.h> | |
20 | #include <asm/cache.h> | |
369101da | 21 | #include <asm/processor-flags.h> |
1ab60e0f | 22 | |
49a69787 GOC |
23 | #ifdef CONFIG_PARAVIRT |
24 | #include <asm/asm-offsets.h> | |
25 | #include <asm/paravirt.h> | |
26 | #else | |
27 | #define GET_CR2_INTO_RCX movq %cr2, %rcx | |
28 | #endif | |
29 | ||
1da177e4 | 30 | /* we are not able to switch in one step to the final KERNEL ADRESS SPACE |
1ab60e0f VG |
31 | * because we need identity-mapped pages. |
32 | * | |
1da177e4 LT |
33 | */ |
34 | ||
35 | .text | |
92417df0 | 36 | .section .text.head |
1ab60e0f VG |
37 | .code64 |
38 | .globl startup_64 | |
39 | startup_64: | |
40 | ||
1da177e4 | 41 | /* |
1ab60e0f VG |
42 | * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 1, |
43 | * and someone has loaded an identity mapped page table | |
44 | * for us. These identity mapped page tables map all of the | |
45 | * kernel pages and possibly all of memory. | |
46 | * | |
47 | * %esi holds a physical pointer to real_mode_data. | |
48 | * | |
49 | * We come here either directly from a 64bit bootloader, or from | |
50 | * arch/x86_64/boot/compressed/head.S. | |
51 | * | |
52 | * We only come here initially at boot nothing else comes here. | |
53 | * | |
54 | * Since we may be loaded at an address different from what we were | |
55 | * compiled to run at we first fixup the physical addresses in our page | |
56 | * tables and then reload them. | |
1da177e4 LT |
57 | */ |
58 | ||
1ab60e0f VG |
59 | /* Compute the delta between the address I am compiled to run at and the |
60 | * address I am actually running at. | |
1da177e4 | 61 | */ |
1ab60e0f VG |
62 | leaq _text(%rip), %rbp |
63 | subq $_text - __START_KERNEL_map, %rbp | |
64 | ||
65 | /* Is the address not 2M aligned? */ | |
66 | movq %rbp, %rax | |
31422c51 | 67 | andl $~PMD_PAGE_MASK, %eax |
1ab60e0f VG |
68 | testl %eax, %eax |
69 | jnz bad_address | |
70 | ||
71 | /* Is the address too large? */ | |
72 | leaq _text(%rip), %rdx | |
73 | movq $PGDIR_SIZE, %rax | |
74 | cmpq %rax, %rdx | |
75 | jae bad_address | |
76 | ||
77 | /* Fixup the physical addresses in the page table | |
1da177e4 | 78 | */ |
1ab60e0f VG |
79 | addq %rbp, init_level4_pgt + 0(%rip) |
80 | addq %rbp, init_level4_pgt + (258*8)(%rip) | |
81 | addq %rbp, init_level4_pgt + (511*8)(%rip) | |
82 | ||
83 | addq %rbp, level3_ident_pgt + 0(%rip) | |
b1c931e3 | 84 | |
1ab60e0f | 85 | addq %rbp, level3_kernel_pgt + (510*8)(%rip) |
b1c931e3 EB |
86 | addq %rbp, level3_kernel_pgt + (511*8)(%rip) |
87 | ||
88 | addq %rbp, level2_fixmap_pgt + (506*8)(%rip) | |
1ab60e0f VG |
89 | |
90 | /* Add an Identity mapping if I am above 1G */ | |
91 | leaq _text(%rip), %rdi | |
31422c51 | 92 | andq $PMD_PAGE_MASK, %rdi |
1ab60e0f VG |
93 | |
94 | movq %rdi, %rax | |
95 | shrq $PUD_SHIFT, %rax | |
96 | andq $(PTRS_PER_PUD - 1), %rax | |
97 | jz ident_complete | |
98 | ||
99 | leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx | |
100 | leaq level3_ident_pgt(%rip), %rbx | |
101 | movq %rdx, 0(%rbx, %rax, 8) | |
102 | ||
103 | movq %rdi, %rax | |
104 | shrq $PMD_SHIFT, %rax | |
105 | andq $(PTRS_PER_PMD - 1), %rax | |
106 | leaq __PAGE_KERNEL_LARGE_EXEC(%rdi), %rdx | |
107 | leaq level2_spare_pgt(%rip), %rbx | |
108 | movq %rdx, 0(%rbx, %rax, 8) | |
109 | ident_complete: | |
110 | ||
31eedd82 TG |
111 | /* |
112 | * Fixup the kernel text+data virtual addresses. Note that | |
113 | * we might write invalid pmds, when the kernel is relocated | |
114 | * cleanup_highmap() fixes this up along with the mappings | |
115 | * beyond _end. | |
1ab60e0f | 116 | */ |
31eedd82 | 117 | |
1ab60e0f VG |
118 | leaq level2_kernel_pgt(%rip), %rdi |
119 | leaq 4096(%rdi), %r8 | |
120 | /* See if it is a valid page table entry */ | |
121 | 1: testq $1, 0(%rdi) | |
122 | jz 2f | |
123 | addq %rbp, 0(%rdi) | |
124 | /* Go to the next page */ | |
125 | 2: addq $8, %rdi | |
126 | cmp %r8, %rdi | |
127 | jne 1b | |
128 | ||
129 | /* Fixup phys_base */ | |
130 | addq %rbp, phys_base(%rip) | |
1da177e4 | 131 | |
64e83b5a | 132 | #ifdef CONFIG_X86_TRAMPOLINE |
1ab60e0f VG |
133 | addq %rbp, trampoline_level4_pgt + 0(%rip) |
134 | addq %rbp, trampoline_level4_pgt + (511*8)(%rip) | |
135 | #endif | |
1da177e4 | 136 | |
1ab60e0f VG |
137 | /* Due to ENTRY(), sometimes the empty space gets filled with |
138 | * zeros. Better take a jmp than relying on empty space being | |
139 | * filled with 0x90 (nop) | |
1da177e4 | 140 | */ |
1ab60e0f | 141 | jmp secondary_startup_64 |
90b1c208 | 142 | ENTRY(secondary_startup_64) |
1ab60e0f VG |
143 | /* |
144 | * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 1, | |
145 | * and someone has loaded a mapped page table. | |
146 | * | |
147 | * %esi holds a physical pointer to real_mode_data. | |
148 | * | |
149 | * We come here either from startup_64 (using physical addresses) | |
150 | * or from trampoline.S (using virtual addresses). | |
151 | * | |
152 | * Using virtual addresses from trampoline.S removes the need | |
153 | * to have any identity mapped pages in the kernel page table | |
154 | * after the boot processor executes this code. | |
1da177e4 LT |
155 | */ |
156 | ||
157 | /* Enable PAE mode and PGE */ | |
05139d8f | 158 | movl $(X86_CR4_PAE | X86_CR4_PGE), %eax |
1da177e4 LT |
159 | movq %rax, %cr4 |
160 | ||
161 | /* Setup early boot stage 4 level pagetables. */ | |
cfd243d4 | 162 | movq $(init_level4_pgt - __START_KERNEL_map), %rax |
1ab60e0f | 163 | addq phys_base(%rip), %rax |
1da177e4 LT |
164 | movq %rax, %cr3 |
165 | ||
1ab60e0f VG |
166 | /* Ensure I am executing from virtual addresses */ |
167 | movq $1f, %rax | |
168 | jmp *%rax | |
169 | 1: | |
170 | ||
1da177e4 LT |
171 | /* Check if nx is implemented */ |
172 | movl $0x80000001, %eax | |
173 | cpuid | |
174 | movl %edx,%edi | |
175 | ||
176 | /* Setup EFER (Extended Feature Enable Register) */ | |
177 | movl $MSR_EFER, %ecx | |
178 | rdmsr | |
1ab60e0f VG |
179 | btsl $_EFER_SCE, %eax /* Enable System Call */ |
180 | btl $20,%edi /* No Execute supported? */ | |
1da177e4 LT |
181 | jnc 1f |
182 | btsl $_EFER_NX, %eax | |
1ab60e0f | 183 | 1: wrmsr /* Make changes effective */ |
1da177e4 LT |
184 | |
185 | /* Setup cr0 */ | |
369101da CG |
186 | #define CR0_STATE (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \ |
187 | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \ | |
188 | X86_CR0_PG) | |
189 | movl $CR0_STATE, %eax | |
1da177e4 LT |
190 | /* Make changes effective */ |
191 | movq %rax, %cr0 | |
192 | ||
193 | /* Setup a boot time stack */ | |
9cf4f298 | 194 | movq stack_start(%rip),%rsp |
1da177e4 LT |
195 | |
196 | /* zero EFLAGS after setting rsp */ | |
197 | pushq $0 | |
198 | popfq | |
199 | ||
200 | /* | |
201 | * We must switch to a new descriptor in kernel space for the GDT | |
202 | * because soon the kernel won't have access anymore to the userspace | |
203 | * addresses where we're currently running on. We have to do that here | |
204 | * because in 32bit we couldn't load a 64bit linear address. | |
205 | */ | |
a939098a | 206 | lgdt early_gdt_descr(%rip) |
1da177e4 | 207 | |
ffb60175 ZA |
208 | /* set up data segments. actually 0 would do too */ |
209 | movl $__KERNEL_DS,%eax | |
210 | movl %eax,%ds | |
211 | movl %eax,%ss | |
212 | movl %eax,%es | |
213 | ||
214 | /* | |
215 | * We don't really need to load %fs or %gs, but load them anyway | |
216 | * to kill any stale realmode selectors. This allows execution | |
217 | * under VT hardware. | |
218 | */ | |
219 | movl %eax,%fs | |
220 | movl %eax,%gs | |
221 | ||
1da177e4 LT |
222 | /* |
223 | * Setup up a dummy PDA. this is just for some early bootup code | |
224 | * that does in_interrupt() | |
225 | */ | |
226 | movl $MSR_GS_BASE,%ecx | |
227 | movq $empty_zero_page,%rax | |
228 | movq %rax,%rdx | |
229 | shrq $32,%rdx | |
230 | wrmsr | |
231 | ||
1da177e4 LT |
232 | /* esi is pointer to real mode structure with interesting info. |
233 | pass it to C */ | |
234 | movl %esi, %edi | |
235 | ||
236 | /* Finally jump to run C code and to be on real kernel address | |
237 | * Since we are running on identity-mapped space we have to jump | |
26374c7b EB |
238 | * to the full 64bit address, this is only possible as indirect |
239 | * jump. In addition we need to ensure %cs is set so we make this | |
240 | * a far return. | |
1da177e4 LT |
241 | */ |
242 | movq initial_code(%rip),%rax | |
26374c7b EB |
243 | pushq $0 # fake return address to stop unwinder |
244 | pushq $__KERNEL_CS # set correct cs | |
245 | pushq %rax # target address in negative space | |
246 | lretq | |
1da177e4 | 247 | |
e57113bc | 248 | /* SMP bootup changes these two */ |
da5968ae | 249 | __REFDATA |
e57113bc | 250 | .align 8 |
f1fbabb3 | 251 | ENTRY(initial_code) |
1da177e4 | 252 | .quad x86_64_start_kernel |
f1fbabb3 SR |
253 | __FINITDATA |
254 | ||
9cf4f298 | 255 | ENTRY(stack_start) |
1da177e4 | 256 | .quad init_thread_union+THREAD_SIZE-8 |
9cf4f298 | 257 | .word 0 |
1da177e4 | 258 | |
1ab60e0f VG |
259 | bad_address: |
260 | jmp bad_address | |
261 | ||
41bd4eac | 262 | .section ".init.text","ax" |
076f9776 | 263 | #ifdef CONFIG_EARLY_PRINTK |
8866cd9d RM |
264 | .globl early_idt_handlers |
265 | early_idt_handlers: | |
749c970a AK |
266 | i = 0 |
267 | .rept NUM_EXCEPTION_VECTORS | |
268 | movl $i, %esi | |
269 | jmp early_idt_handler | |
270 | i = i + 1 | |
271 | .endr | |
076f9776 | 272 | #endif |
8866cd9d | 273 | |
1da177e4 | 274 | ENTRY(early_idt_handler) |
076f9776 | 275 | #ifdef CONFIG_EARLY_PRINTK |
b957591f AK |
276 | cmpl $2,early_recursion_flag(%rip) |
277 | jz 1f | |
278 | incl early_recursion_flag(%rip) | |
49a69787 | 279 | GET_CR2_INTO_RCX |
8866cd9d RM |
280 | movq %rcx,%r9 |
281 | xorl %r8d,%r8d # zero for error code | |
282 | movl %esi,%ecx # get vector number | |
283 | # Test %ecx against mask of vectors that push error code. | |
284 | cmpl $31,%ecx | |
285 | ja 0f | |
286 | movl $1,%eax | |
287 | salq %cl,%rax | |
288 | testl $0x27d00,%eax | |
289 | je 0f | |
290 | popq %r8 # get error code | |
291 | 0: movq 0(%rsp),%rcx # get ip | |
292 | movq 8(%rsp),%rdx # get cs | |
293 | xorl %eax,%eax | |
1da177e4 LT |
294 | leaq early_idt_msg(%rip),%rdi |
295 | call early_printk | |
b957591f AK |
296 | cmpl $2,early_recursion_flag(%rip) |
297 | jz 1f | |
298 | call dump_stack | |
6574ffd7 AK |
299 | #ifdef CONFIG_KALLSYMS |
300 | leaq early_idt_ripmsg(%rip),%rdi | |
301 | movq 8(%rsp),%rsi # get rip again | |
302 | call __print_symbol | |
303 | #endif | |
076f9776 | 304 | #endif /* EARLY_PRINTK */ |
1da177e4 LT |
305 | 1: hlt |
306 | jmp 1b | |
076f9776 IM |
307 | |
308 | #ifdef CONFIG_EARLY_PRINTK | |
b957591f AK |
309 | early_recursion_flag: |
310 | .long 0 | |
1da177e4 LT |
311 | |
312 | early_idt_msg: | |
8866cd9d | 313 | .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n" |
6574ffd7 AK |
314 | early_idt_ripmsg: |
315 | .asciz "RIP %s\n" | |
076f9776 | 316 | #endif /* CONFIG_EARLY_PRINTK */ |
41bd4eac | 317 | .previous |
1da177e4 | 318 | |
1ab60e0f | 319 | .balign PAGE_SIZE |
1da177e4 | 320 | |
f0cf5d1a | 321 | #define NEXT_PAGE(name) \ |
67dcbb6b | 322 | .balign PAGE_SIZE; \ |
f0cf5d1a JB |
323 | ENTRY(name) |
324 | ||
67dcbb6b | 325 | /* Automate the creation of 1 to 1 mapping pmd entries */ |
0e192b99 CG |
326 | #define PMDS(START, PERM, COUNT) \ |
327 | i = 0 ; \ | |
328 | .rept (COUNT) ; \ | |
329 | .quad (START) + (i << PMD_SHIFT) + (PERM) ; \ | |
330 | i = i + 1 ; \ | |
67dcbb6b VG |
331 | .endr |
332 | ||
cfd243d4 VG |
333 | /* |
334 | * This default setting generates an ident mapping at address 0x100000 | |
335 | * and a mapping for the kernel that precisely maps virtual address | |
336 | * 0xffffffff80000000 to physical address 0x000000. (always using | |
337 | * 2Mbyte large pages provided by PAE mode) | |
338 | */ | |
f0cf5d1a | 339 | NEXT_PAGE(init_level4_pgt) |
cfd243d4 VG |
340 | .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE |
341 | .fill 257,8,0 | |
342 | .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE | |
343 | .fill 252,8,0 | |
344 | /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ | |
345 | .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE | |
1da177e4 | 346 | |
f0cf5d1a | 347 | NEXT_PAGE(level3_ident_pgt) |
67dcbb6b | 348 | .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE |
1da177e4 LT |
349 | .fill 511,8,0 |
350 | ||
f0cf5d1a | 351 | NEXT_PAGE(level3_kernel_pgt) |
1da177e4 LT |
352 | .fill 510,8,0 |
353 | /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */ | |
67dcbb6b | 354 | .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE |
b1c931e3 EB |
355 | .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE |
356 | ||
357 | NEXT_PAGE(level2_fixmap_pgt) | |
358 | .fill 506,8,0 | |
359 | .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE | |
360 | /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */ | |
361 | .fill 5,8,0 | |
362 | ||
363 | NEXT_PAGE(level1_fixmap_pgt) | |
364 | .fill 512,8,0 | |
1da177e4 | 365 | |
f0cf5d1a | 366 | NEXT_PAGE(level2_ident_pgt) |
67dcbb6b VG |
367 | /* Since I easily can, map the first 1G. |
368 | * Don't set NX because code runs from these pages. | |
369 | */ | |
88f3aec7 | 370 | PMDS(0, __PAGE_KERNEL_LARGE_EXEC, PTRS_PER_PMD) |
1ab60e0f | 371 | |
f0cf5d1a | 372 | NEXT_PAGE(level2_kernel_pgt) |
88f3aec7 | 373 | /* |
85eb69a1 | 374 | * 512 MB kernel mapping. We spend a full page on this pagetable |
88f3aec7 IM |
375 | * anyway. |
376 | * | |
377 | * The kernel code+data+bss must not be bigger than that. | |
378 | * | |
85eb69a1 | 379 | * (NOTE: at +512MB starts the module area, see MODULES_VADDR. |
88f3aec7 IM |
380 | * If you want to increase this then increase MODULES_VADDR |
381 | * too.) | |
382 | */ | |
383 | PMDS(0, __PAGE_KERNEL_LARGE_EXEC|_PAGE_GLOBAL, | |
d4afe414 | 384 | KERNEL_IMAGE_SIZE/PMD_SIZE) |
1da177e4 | 385 | |
1ab60e0f | 386 | NEXT_PAGE(level2_spare_pgt) |
88f3aec7 | 387 | .fill 512, 8, 0 |
1ab60e0f | 388 | |
67dcbb6b | 389 | #undef PMDS |
f0cf5d1a | 390 | #undef NEXT_PAGE |
1da177e4 | 391 | |
f0cf5d1a | 392 | .data |
1da177e4 | 393 | .align 16 |
a939098a GC |
394 | .globl early_gdt_descr |
395 | early_gdt_descr: | |
396 | .word GDT_ENTRIES*8-1 | |
397 | .quad per_cpu__gdt_page | |
1da177e4 | 398 | |
1ab60e0f VG |
399 | ENTRY(phys_base) |
400 | /* This must match the first entry in level2_kernel_pgt */ | |
401 | .quad 0x0000000000000000 | |
402 | ||
1da177e4 | 403 | |
e57113bc JB |
404 | .section .bss, "aw", @nobits |
405 | .align L1_CACHE_BYTES | |
406 | ENTRY(idt_table) | |
407 | .skip 256 * 16 | |
1da177e4 | 408 | |
e57113bc JB |
409 | .section .bss.page_aligned, "aw", @nobits |
410 | .align PAGE_SIZE | |
411 | ENTRY(empty_zero_page) | |
412 | .skip PAGE_SIZE |