[PATCH] x86-64: Extend bzImage protocol for relocatable bzImage
[deliverable/linux.git] / arch / x86_64 / boot / compressed / head.S
CommitLineData
1da177e4
LT
1/*
2 * linux/boot/head.S
3 *
4 * Copyright (C) 1991, 1992, 1993 Linus Torvalds
1da177e4
LT
5 */
6
7/*
8 * head.S contains the 32-bit startup code.
9 *
10 * NOTE!!! Startup happens at absolute address 0x00001000, which is also where
11 * the page directory will exist. The startup code will be overwritten by
12 * the page directory. [According to comments etc elsewhere on a compressed
13 * kernel it will end up at 0x1000 + 1Mb I hope so as I assume this. - AC]
14 *
15 * Page 0 is deliberately kept safe, since System Management Mode code in
16 * laptops may need to access the BIOS data stored there. This is also
17 * useful for future device drivers that either access the BIOS via VM86
18 * mode.
19 */
20
21/*
f4549448 22 * High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996
1da177e4
LT
23 */
24.code32
25.text
26
27#include <linux/linkage.h>
28#include <asm/segment.h>
1ab60e0f 29#include <asm/pgtable.h>
d0537508 30#include <asm/page.h>
1ab60e0f 31#include <asm/msr.h>
1da177e4 32
1ab60e0f 33.section ".text.head"
1da177e4
LT
34 .code32
35 .globl startup_32
1ab60e0f 36
1da177e4
LT
37startup_32:
38 cld
39 cli
1ab60e0f
VG
40 movl $(__KERNEL_DS), %eax
41 movl %eax, %ds
42 movl %eax, %es
43 movl %eax, %ss
44
45/* Calculate the delta between where we were compiled to run
46 * at and where we were actually loaded at. This can only be done
47 * with a short local call on x86. Nothing else will tell us what
48 * address we are running at. The reserved chunk of the real-mode
49 * data at 0x34-0x3f are used as the stack for this calculation.
50 * Only 4 bytes are needed.
51 */
52 leal 0x40(%esi), %esp
53 call 1f
541: popl %ebp
55 subl $1b, %ebp
56
57/* Compute the delta between where we were compiled to run at
58 * and where the code will actually run at.
59 */
60/* %ebp contains the address we are loaded at by the boot loader and %ebx
61 * contains the address where we should move the kernel image temporarily
62 * for safe in-place decompression.
63 */
64
65#ifdef CONFIG_RELOCATABLE
66 movl %ebp, %ebx
67 addl $(LARGE_PAGE_SIZE -1), %ebx
68 andl $LARGE_PAGE_MASK, %ebx
69#else
70 movl $CONFIG_PHYSICAL_START, %ebx
71#endif
72
73 /* Replace the compressed data size with the uncompressed size */
74 subl input_len(%ebp), %ebx
75 movl output_len(%ebp), %eax
76 addl %eax, %ebx
77 /* Add 8 bytes for every 32K input block */
78 shrl $12, %eax
79 addl %eax, %ebx
80 /* Add 32K + 18 bytes of extra slack and align on a 4K boundary */
81 addl $(32768 + 18 + 4095), %ebx
82 andl $~4095, %ebx
1da177e4
LT
83
84/*
1ab60e0f 85 * Prepare for entering 64 bit mode
1da177e4 86 */
1ab60e0f
VG
87
88 /* Load new GDT with the 64bit segments using 32bit descriptor */
89 leal gdt(%ebp), %eax
90 movl %eax, gdt+2(%ebp)
91 lgdt gdt(%ebp)
92
93 /* Enable PAE mode */
94 xorl %eax, %eax
95 orl $(1 << 5), %eax
96 movl %eax, %cr4
97
98 /*
99 * Build early 4G boot pagetable
100 */
101 /* Initialize Page tables to 0*/
102 leal pgtable(%ebx), %edi
103 xorl %eax, %eax
104 movl $((4096*6)/4), %ecx
105 rep stosl
106
107 /* Build Level 4 */
108 leal pgtable + 0(%ebx), %edi
109 leal 0x1007 (%edi), %eax
110 movl %eax, 0(%edi)
111
112 /* Build Level 3 */
113 leal pgtable + 0x1000(%ebx), %edi
114 leal 0x1007(%edi), %eax
115 movl $4, %ecx
1161: movl %eax, 0x00(%edi)
117 addl $0x00001000, %eax
118 addl $8, %edi
119 decl %ecx
120 jnz 1b
121
122 /* Build Level 2 */
123 leal pgtable + 0x2000(%ebx), %edi
124 movl $0x00000183, %eax
125 movl $2048, %ecx
1261: movl %eax, 0(%edi)
127 addl $0x00200000, %eax
128 addl $8, %edi
129 decl %ecx
130 jnz 1b
131
132 /* Enable the boot page tables */
133 leal pgtable(%ebx), %eax
134 movl %eax, %cr3
135
136 /* Enable Long mode in EFER (Extended Feature Enable Register) */
137 movl $MSR_EFER, %ecx
138 rdmsr
139 btsl $_EFER_LME, %eax
140 wrmsr
141
142 /* Setup for the jump to 64bit mode
143 *
144 * When the jump is performend we will be in long mode but
145 * in 32bit compatibility mode with EFER.LME = 1, CS.L = 0, CS.D = 1
146 * (and in turn EFER.LMA = 1). To jump into 64bit mode we use
147 * the new gdt/idt that has __KERNEL_CS with CS.L = 1.
148 * We place all of the values on our mini stack so lret can
149 * used to perform that far jump.
150 */
151 pushl $__KERNEL_CS
152 leal startup_64(%ebp), %eax
153 pushl %eax
154
155 /* Enter paged protected Mode, activating Long Mode */
156 movl $0x80000001, %eax /* Enable Paging and Protected mode */
157 movl %eax, %cr0
158
159 /* Jump from 32bit compatibility mode into 64bit mode. */
160 lret
161
162 /* Be careful here startup_64 needs to be at a predictable
163 * address so I can export it in an ELF header. Bootloaders
164 * should look at the ELF header to find this address, as
165 * it may change in the future.
166 */
167 .code64
168 .org 0x100
169ENTRY(startup_64)
170 /* We come here either from startup_32 or directly from a
171 * 64bit bootloader. If we come here from a bootloader we depend on
172 * an identity mapped page table being provied that maps our
173 * entire text+data+bss and hopefully all of memory.
174 */
175
176 /* Setup data segments. */
177 xorl %eax, %eax
178 movl %eax, %ds
179 movl %eax, %es
180 movl %eax, %ss
181
182 /* Compute the decompressed kernel start address. It is where
183 * we were loaded at aligned to a 2M boundary. %rbp contains the
184 * decompressed kernel start address.
185 *
186 * If it is a relocatable kernel then decompress and run the kernel
187 * from load address aligned to 2MB addr, otherwise decompress and
188 * run the kernel from CONFIG_PHYSICAL_START
189 */
190
191 /* Start with the delta to where the kernel will run at. */
192#ifdef CONFIG_RELOCATABLE
193 leaq startup_32(%rip) /* - $startup_32 */, %rbp
194 addq $(LARGE_PAGE_SIZE - 1), %rbp
195 andq $LARGE_PAGE_MASK, %rbp
196 movq %rbp, %rbx
197#else
198 movq $CONFIG_PHYSICAL_START, %rbp
199 movq %rbp, %rbx
200#endif
201
202 /* Replace the compressed data size with the uncompressed size */
203 movl input_len(%rip), %eax
204 subq %rax, %rbx
205 movl output_len(%rip), %eax
206 addq %rax, %rbx
207 /* Add 8 bytes for every 32K input block */
208 shrq $12, %rax
209 addq %rax, %rbx
210 /* Add 32K + 18 bytes of extra slack and align on a 4K boundary */
211 addq $(32768 + 18 + 4095), %rbx
212 andq $~4095, %rbx
213
214/* Copy the compressed kernel to the end of our buffer
215 * where decompression in place becomes safe.
216 */
217 leaq _end(%rip), %r8
218 leaq _end(%rbx), %r9
219 movq $_end /* - $startup_32 */, %rcx
2201: subq $8, %r8
221 subq $8, %r9
222 movq 0(%r8), %rax
223 movq %rax, 0(%r9)
224 subq $8, %rcx
225 jnz 1b
226
227/*
228 * Jump to the relocated address.
229 */
230 leaq relocated(%rbx), %rax
231 jmp *%rax
232
233.section ".text"
234relocated:
235
1da177e4
LT
236/*
237 * Clear BSS
238 */
1ab60e0f
VG
239 xorq %rax, %rax
240 leaq _edata(%rbx), %rdi
241 leaq _end(%rbx), %rcx
242 subq %rdi, %rcx
1da177e4
LT
243 cld
244 rep
245 stosb
1ab60e0f
VG
246
247 /* Setup the stack */
248 leaq user_stack_end(%rip), %rsp
249
250 /* zero EFLAGS after setting rsp */
251 pushq $0
252 popfq
253
1da177e4
LT
254/*
255 * Do the decompression, and jump to the new kernel..
256 */
1ab60e0f
VG
257 pushq %rsi # Save the real mode argument
258 movq %rsi, %rdi # real mode address
259 leaq _heap(%rip), %rsi # _heap
260 leaq input_data(%rip), %rdx # input_data
261 movl input_len(%rip), %eax
262 movq %rax, %rcx # input_len
263 movq %rbp, %r8 # output
264 call decompress_kernel
265 popq %rsi
1da177e4 266
1da177e4
LT
267
268/*
1ab60e0f 269 * Jump to the decompressed kernel.
1da177e4 270 */
1ab60e0f 271 jmp *%rbp
1da177e4 272
1ab60e0f
VG
273 .data
274gdt:
275 .word gdt_end - gdt
276 .long gdt
277 .word 0
278 .quad 0x0000000000000000 /* NULL descriptor */
279 .quad 0x00af9a000000ffff /* __KERNEL_CS */
280 .quad 0x00cf92000000ffff /* __KERNEL_DS */
281gdt_end:
282 .bss
283/* Stack for uncompression */
284 .balign 4
285user_stack:
1da177e4 286 .fill 4096,4,0
1ab60e0f 287user_stack_end:
This page took 0.229009 seconds and 5 git commands to generate.