4 * Copyright (C) 1991, 1992, 1993 Linus Torvalds
8 * head.S contains the 32-bit startup code.
10 * NOTE!!! Startup happens at absolute address 0x00001000, which is also where
11 * the page directory will exist. The startup code will be overwritten by
12 * the page directory. [According to comments etc elsewhere on a compressed
13 * kernel it will end up at 0x1000 + 1Mb I hope so as I assume this. - AC]
15 * Page 0 is deliberately kept safe, since System Management Mode code in
16 * laptops may need to access the BIOS data stored there. This is also
17 * useful for future device drivers that either access the BIOS via VM86
22 * High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996
26 #include <linux/init.h>
27 #include <linux/linkage.h>
28 #include <asm/segment.h>
29 #include <asm/page_types.h>
31 #include <asm/asm-offsets.h>
35 #ifdef CONFIG_EFI_STUB
37 * We don't need the return address, so set up the stack so
38 * efi_main() can find its arugments.
50 subl BP_pref_address(%esi), %eax
51 add BP_code32_start(%esi), %eax
52 leal preferred_addr(%eax), %eax
59 * Test KEEP_SEGMENTS flag to see if the bootloader is asking
60 * us to not reload segments
62 testb $(1<<6), BP_loadflags(%esi)
75 * Calculate the delta between where we were compiled to run
76 * at and where we were actually loaded at. This can only be done
77 * with a short local call on x86. Nothing else will tell us what
78 * address we are running at. The reserved chunk of the real-mode
79 * data at 0x1e4 (defined as a scratch field) are used as the stack
80 * for this calculation. Only 4 bytes are needed.
82 leal (BP_scratch+4)(%esi), %esp
88 * %ebp contains the address we are loaded at by the boot loader and %ebx
89 * contains the address where we should move the kernel image temporarily
90 * for safe in-place decompression.
93 #ifdef CONFIG_RELOCATABLE
95 movl BP_kernel_alignment(%esi), %eax
101 movl $LOAD_PHYSICAL_ADDR, %ebx
104 /* Target address to relocate to for decompression */
105 addl $z_extract_offset, %ebx
107 /* Set up the stack */
108 leal boot_stack_end(%ebx), %esp
115 * Copy the compressed kernel to the end of our buffer
116 * where decompression in place becomes safe.
119 leal (_bss-4)(%ebp), %esi
120 leal (_bss-4)(%ebx), %edi
121 movl $(_bss - startup_32), %ecx
129 * Jump to the relocated address.
131 leal relocated(%ebx), %eax
139 * Clear BSS (stack is currently empty)
142 leal _bss(%ebx), %edi
143 leal _ebss(%ebx), %ecx
151 leal _got(%ebx), %edx
152 leal _egot(%ebx), %ecx
162 * Do the decompression, and jump to the new kernel..
164 leal z_extract_offset_negative(%ebx), %ebp
165 /* push arguments for decompress_kernel: */
166 pushl %ebp /* output address */
167 pushl $z_input_len /* input_len */
168 leal input_data(%ebx), %eax
169 pushl %eax /* input_data */
170 leal boot_heap(%ebx), %eax
171 pushl %eax /* heap area */
172 pushl %esi /* real mode pointer */
173 call decompress_kernel
176 #if CONFIG_RELOCATABLE
178 * Find the address of the relocations.
180 leal z_output_len(%ebp), %edi
183 * Calculate the delta between where vmlinux was compiled to run
184 * and where it was actually loaded.
187 subl $LOAD_PHYSICAL_ADDR, %ebx
188 jz 2f /* Nothing to be done if loaded at compiled addr. */
190 * Process relocations.
197 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
203 * Jump to the decompressed kernel.
209 * Stack and heap for uncompression
214 .fill BOOT_HEAP_SIZE, 1, 0
216 .fill BOOT_STACK_SIZE, 1, 0