2 * ld script for the x86 kernel
4 * Historic 32-bit version written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
6 * Modernisation, unification and other changes and fixes:
7 * Copyright (C) 2007-2009 Sam Ravnborg <sam@ravnborg.org>
10 * Don't define absolute symbols until and unless you know that symbol
11 * value is should remain constant even if kernel image is relocated
12 * at run time. Absolute symbols are not relocated. If symbol value should
13 * change if kernel is relocated, make the symbol section relative and
14 * put it inside the section definition.
18 #define LOAD_OFFSET __PAGE_OFFSET
20 #define LOAD_OFFSET __START_KERNEL_map
23 #include <asm-generic/vmlinux.lds.h>
24 #include <asm/asm-offsets.h>
25 #include <asm/thread_info.h>
26 #include <asm/page_types.h>
27 #include <asm/cache.h>
30 #undef i386 /* in case the preprocessor is a 32bit one */
32 OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT)
36 ENTRY(phys_startup_32)
39 OUTPUT_ARCH(i386:x86-64)
40 ENTRY(phys_startup_64)
44 #if defined(CONFIG_X86_64)
46 * On 64-bit, align RODATA to 2MB so we retain large page mappings for
47 * boundaries spanning kernel text, rodata and data sections.
49 * However, kernel identity mappings will have different RWX permissions
50 * to the pages mapping to text and to the pages padding (which are freed) the
51 * text section. Hence kernel identity mappings will be broken to smaller
52 * pages. For 64-bit, kernel text and kernel identity mappings are different,
53 * so we can enable protection checks as well as retain 2MB large page
54 * mappings for kernel text.
56 #define X64_ALIGN_RODATA_BEGIN . = ALIGN(HPAGE_SIZE);
58 #define X64_ALIGN_RODATA_END \
59 . = ALIGN(HPAGE_SIZE); \
60 __end_rodata_hpage_align = .;
64 #define X64_ALIGN_RODATA_BEGIN
65 #define X64_ALIGN_RODATA_END
70 text PT_LOAD FLAGS(5); /* R_E */
71 data PT_LOAD FLAGS(6); /* RW_ */
74 percpu PT_LOAD FLAGS(6); /* RW_ */
76 init PT_LOAD FLAGS(7); /* RWE */
78 note PT_NOTE FLAGS(0); /* ___ */
84 . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
85 phys_startup_32 = ABSOLUTE(startup_32 - LOAD_OFFSET);
88 phys_startup_64 = ABSOLUTE(startup_64 - LOAD_OFFSET);
91 /* Text and read-only data */
92 .text : AT(ADDR(.text) - LOAD_OFFSET) {
94 /* bootstrapping code */
107 /* End of text section */
113 EXCEPTION_TABLE(16) :text = 0x9090
115 /* .text should occupy whole number of pages */
116 . = ALIGN(PAGE_SIZE);
117 X64_ALIGN_RODATA_BEGIN
122 .data : AT(ADDR(.data) - LOAD_OFFSET) {
123 /* Start of data section */
127 INIT_TASK_DATA(THREAD_SIZE)
130 /* 32 bit has nosave before _edata */
134 PAGE_ALIGNED_DATA(PAGE_SIZE)
136 CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
141 /* rarely changed data like cpu maps */
142 READ_MOSTLY_DATA(INTERNODE_CACHE_BYTES)
144 /* End of data section */
149 . = ALIGN(PAGE_SIZE);
152 .vvar : AT(ADDR(.vvar) - LOAD_OFFSET) {
153 /* work around gold bug 13023 */
154 __vvar_beginning_hack = .;
156 /* Place all vvars at the offsets in asm/vvar.h. */
157 #define EMIT_VVAR(name, offset) \
158 . = __vvar_beginning_hack + offset; \
160 #define __VVAR_KERNEL_LDS
161 #include <asm/vvar.h>
162 #undef __VVAR_KERNEL_LDS
166 * Pad the rest of the page with zeros. Otherwise the loader
167 * can leave garbage here.
169 . = __vvar_beginning_hack + PAGE_SIZE;
172 . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE);
174 /* Init code and data - will be freed after init */
175 . = ALIGN(PAGE_SIZE);
176 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
177 __init_begin = .; /* paired with __init_end */
180 #if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
182 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
183 * output PHDR, so the next output section - .init.text - should
184 * start another segment - init.
186 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
187 ASSERT(SIZEOF(.data..percpu) < CONFIG_PHYSICAL_START,
188 "per-CPU data too large - increase CONFIG_PHYSICAL_START")
191 INIT_TEXT_SECTION(PAGE_SIZE)
197 * Section for code used exclusively before alternatives are run. All
198 * references to such code must be patched out by alternatives, normally
199 * by using X86_FEATURE_ALWAYS CPU feature bit.
201 * See static_cpu_has() for an example.
203 .altinstr_aux : AT(ADDR(.altinstr_aux) - LOAD_OFFSET) {
207 INIT_DATA_SECTION(16)
209 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
210 __x86_cpu_dev_start = .;
212 __x86_cpu_dev_end = .;
215 #ifdef CONFIG_X86_INTEL_MID
216 .x86_intel_mid_dev.init : AT(ADDR(.x86_intel_mid_dev.init) - \
218 __x86_intel_mid_dev_start = .;
219 *(.x86_intel_mid_dev.init)
220 __x86_intel_mid_dev_end = .;
225 * start address and size of operations which during runtime
226 * can be patched with virtualization friendly instructions or
227 * baremetal native ones. Think page table operations.
228 * Details in paravirt_types.h
231 .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) {
232 __parainstructions = .;
234 __parainstructions_end = .;
238 * struct alt_inst entries. From the header (alternative.h):
239 * "Alternative instructions for different CPU types or capabilities"
240 * Think locking instructions on spinlocks.
243 .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
244 __alt_instructions = .;
246 __alt_instructions_end = .;
250 * And here are the replacement instructions. The linker sticks
251 * them as binary blobs. The .altinstructions has enough data to
252 * get the address and the length of them to patch the kernel safely.
254 .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) {
255 *(.altinstr_replacement)
259 * struct iommu_table_entry entries are injected in this section.
260 * It is an array of IOMMUs which during run time gets sorted depending
261 * on its dependency order. After rootfs_initcall is complete
262 * this section can be safely removed.
264 .iommu_table : AT(ADDR(.iommu_table) - LOAD_OFFSET) {
267 __iommu_table_end = .;
271 .apicdrivers : AT(ADDR(.apicdrivers) - LOAD_OFFSET) {
274 __apicdrivers_end = .;
279 * .exit.text is discard at runtime, not link time, to deal with
280 * references from .altinstructions and .eh_frame
282 .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
286 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
290 #if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
291 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
294 . = ALIGN(PAGE_SIZE);
296 /* freed after init ends here */
297 .init.end : AT(ADDR(.init.end) - LOAD_OFFSET) {
302 * smp_locks might be freed after init
303 * start/end must be page aligned
305 . = ALIGN(PAGE_SIZE);
306 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
309 . = ALIGN(PAGE_SIZE);
314 .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
320 . = ALIGN(PAGE_SIZE);
321 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
323 *(.bss..page_aligned)
325 . = ALIGN(PAGE_SIZE);
329 . = ALIGN(PAGE_SIZE);
330 .brk : AT(ADDR(.brk) - LOAD_OFFSET) {
332 . += 64 * 1024; /* 64k alignment slop space */
333 *(.brk_reservation) /* areas brk users have reserved */
337 . = ALIGN(PAGE_SIZE); /* keep VO_INIT_SIZE page aligned */
343 /* Sections to be discarded */
347 *(__func_stack_frame_non_standard)
354 * The ASSERT() sink to . is intentional, for binutils 2.14 compatibility:
356 . = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
357 "kernel image bigger than KERNEL_IMAGE_SIZE");
360 * Per-cpu symbols which need to be offset from __per_cpu_load
361 * for the boot processor.
363 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
364 INIT_PER_CPU(gdt_page);
365 INIT_PER_CPU(irq_stack_union);
368 * Build-time check on the image size:
370 . = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
371 "kernel image bigger than KERNEL_IMAGE_SIZE");
374 . = ASSERT((irq_stack_union == 0),
375 "irq_stack_union is not at start of per-cpu area");
378 #endif /* CONFIG_X86_32 */
380 #ifdef CONFIG_KEXEC_CORE
381 #include <asm/kexec.h>
383 . = ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE,
384 "kexec control code size is too big");