x86, trampoline: Common infrastructure for low memory trampolines
[deliverable/linux.git] / arch / x86 / kernel / trampoline_64.S
CommitLineData
1da177e4
LT
1/*
2 *
3 * Trampoline.S Derived from Setup.S by Linus Torvalds
4 *
5 * 4 Jan 1997 Michael Chastain: changed to gnu as.
90b1c208 6 * 15 Sept 2005 Eric Biederman: 64bit PIC support
1da177e4
LT
7 *
8 * Entry: CS:IP point to the start of our code, we are
9 * in real mode with no stack, but the rest of the
10 * trampoline page to make our stack and everything else
11 * is a mystery.
12 *
1da177e4
LT
13 * On entry to trampoline_data, the processor is in real mode
14 * with 16-bit addressing and 16-bit data. CS has some value
15 * and IP is zero. Thus, data addresses need to be absolute
16 * (no relocation) and are taken with regard to r_base.
17 *
90b1c208
VG
18 * With the addition of trampoline_level4_pgt this code can
19 * now enter a 64bit kernel that lives at arbitrary 64bit
20 * physical addresses.
21 *
1da177e4
LT
22 * If you work on this file, check the object module with objdump
23 * --full-contents --reloc to make sure there are no relocation
90b1c208 24 * entries.
1da177e4
LT
25 */
26
27#include <linux/linkage.h>
5f685639 28#include <linux/init.h>
0341c14d
JF
29#include <asm/pgtable_types.h>
30#include <asm/page_types.h>
90b1c208
VG
31#include <asm/msr.h>
32#include <asm/segment.h>
0a1e8869 33#include <asm/processor-flags.h>
1da177e4 34
4822b7fc
PA
35 .section ".x86_trampoline","a"
36 .balign PAGE_SIZE
37 .code16
1da177e4
LT
38
39ENTRY(trampoline_data)
40r_base = .
90b1c208 41 cli # We should be safe anyway
0a1e8869 42 wbinvd
1da177e4
LT
43 mov %cs, %ax # Code and data in the same place
44 mov %ax, %ds
90b1c208
VG
45 mov %ax, %es
46 mov %ax, %ss
1da177e4 47
1da177e4 48
4822b7fc 49 movl $0xA5A5A5A5, trampoline_status - r_base
1da177e4
LT
50 # write marker for master knows we're running
51
90b1c208
VG
52 # Setup stack
53 movw $(trampoline_stack_end - r_base), %sp
54
55 call verify_cpu # Verify the cpu supports long mode
a4831e08
VG
56 testl %eax, %eax # Check for return code
57 jnz no_longmode
90b1c208
VG
58
59 mov %cs, %ax
60 movzx %ax, %esi # Find the 32bit trampoline location
61 shll $4, %esi
62
4822b7fc
PA
63 # Fixup the absolute vectors
64 leal (startup_32 - r_base)(%esi), %eax
65 movl %eax, startup_32_vector - r_base
66 leal (startup_64 - r_base)(%esi), %eax
67 movl %eax, startup_64_vector - r_base
68 leal (tgdt - r_base)(%esi), %eax
69 movl %eax, (tgdt + 2 - r_base)
90b1c208 70
983d5dbd
VG
71 /*
72 * GDT tables in non default location kernel can be beyond 16MB and
73 * lgdt will not be able to load the address as in real mode default
74 * operand size is 16bit. Use lgdtl instead to force operand size
75 * to 32 bit.
76 */
77
90b1c208
VG
78 lidtl tidt - r_base # load idt with 0, 0
79 lgdtl tgdt - r_base # load gdt with whatever is appropriate
1da177e4 80
0a1e8869
CG
81 mov $X86_CR0_PE, %ax # protected mode (PE) bit
82 lmsw %ax # into protected mode
90b1c208
VG
83
84 # flush prefetch and jump to startup_32
85 ljmpl *(startup_32_vector - r_base)
86
87 .code32
88 .balign 4
89startup_32:
90 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
91 movl %eax, %ds
92
0a1e8869
CG
93 movl $X86_CR4_PAE, %eax
94 movl %eax, %cr4 # Enable PAE mode
90b1c208
VG
95
96 # Setup trampoline 4 level pagetables
97 leal (trampoline_level4_pgt - r_base)(%esi), %eax
98 movl %eax, %cr3
99
100 movl $MSR_EFER, %ecx
101 movl $(1 << _EFER_LME), %eax # Enable Long Mode
102 xorl %edx, %edx
103 wrmsr
104
0a1e8869
CG
105 # Enable paging and in turn activate Long Mode
106 # Enable protected mode
107 movl $(X86_CR0_PG | X86_CR0_PE), %eax
90b1c208
VG
108 movl %eax, %cr0
109
110 /*
111 * At this point we're in long mode but in 32bit compatibility mode
112 * with EFER.LME = 1, CS.L = 0, CS.D = 1 (and in turn
113 * EFER.LMA = 1). Now we want to jump in 64bit mode, to do that we use
114 * the new gdt/idt that has __KERNEL_CS with CS.L = 1.
115 */
116 ljmp *(startup_64_vector - r_base)(%esi)
117
118 .code64
119 .balign 4
120startup_64:
121 # Now jump into the kernel using virtual addresses
122 movq $secondary_startup_64, %rax
123 jmp *%rax
124
125 .code16
90b1c208
VG
126no_longmode:
127 hlt
128 jmp no_longmode
c5cbac69 129#include "verify_cpu.S"
1da177e4 130
4822b7fc 131 .balign 4
1da177e4 132 # Careful these need to be in the same 64K segment as the above;
90b1c208 133tidt:
1da177e4
LT
134 .word 0 # idt limit = 0
135 .word 0, 0 # idt base = 0L
136
90b1c208
VG
137 # Duplicate the global descriptor table
138 # so the kernel can live anywhere
139 .balign 4
140tgdt:
141 .short tgdt_end - tgdt # gdt limit
142 .long tgdt - r_base
143 .short 0
144 .quad 0x00cf9b000000ffff # __KERNEL32_CS
145 .quad 0x00af9b000000ffff # __KERNEL_CS
146 .quad 0x00cf93000000ffff # __KERNEL_DS
147tgdt_end:
148
149 .balign 4
150startup_32_vector:
151 .long startup_32 - r_base
152 .word __KERNEL32_CS, 0
153
154 .balign 4
155startup_64_vector:
156 .long startup_64 - r_base
157 .word __KERNEL_CS, 0
158
4822b7fc
PA
159 .balign 4
160fixup_base:
161 .long 0
162ENTRY(trampoline_status)
163 .long 0
164
90b1c208
VG
165trampoline_stack:
166 .org 0x1000
167trampoline_stack_end:
168ENTRY(trampoline_level4_pgt)
169 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
170 .fill 510,8,0
171 .quad level3_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
1da177e4 172
90b1c208 173ENTRY(trampoline_end)
This page took 0.5521 seconds and 5 git commands to generate.