Merge branch 'for-linus-4.7' of git://git.kernel.org/pub/scm/linux/kernel/git/mason...
[deliverable/linux.git] / include / asm-generic / vmlinux.lds.h
1 /*
2 * Helper macros to support writing architecture specific
3 * linker scripts.
4 *
5 * A minimal linker scripts has following content:
6 * [This is a sample, architectures may have special requiriements]
7 *
8 * OUTPUT_FORMAT(...)
9 * OUTPUT_ARCH(...)
10 * ENTRY(...)
11 * SECTIONS
12 * {
13 * . = START;
14 * __init_begin = .;
15 * HEAD_TEXT_SECTION
16 * INIT_TEXT_SECTION(PAGE_SIZE)
17 * INIT_DATA_SECTION(...)
18 * PERCPU_SECTION(CACHELINE_SIZE)
19 * __init_end = .;
20 *
21 * _stext = .;
22 * TEXT_SECTION = 0
23 * _etext = .;
24 *
25 * _sdata = .;
26 * RO_DATA_SECTION(PAGE_SIZE)
27 * RW_DATA_SECTION(...)
28 * _edata = .;
29 *
30 * EXCEPTION_TABLE(...)
31 * NOTES
32 *
33 * BSS_SECTION(0, 0, 0)
34 * _end = .;
35 *
36 * STABS_DEBUG
37 * DWARF_DEBUG
38 *
39 * DISCARDS // must be the last
40 * }
41 *
42 * [__init_begin, __init_end] is the init section that may be freed after init
43 * // __init_begin and __init_end should be page aligned, so that we can
44 * // free the whole .init memory
45 * [_stext, _etext] is the text section
46 * [_sdata, _edata] is the data section
47 *
48 * Some of the included output section have their own set of constants.
49 * Examples are: [__initramfs_start, __initramfs_end] for initramfs and
50 * [__nosave_begin, __nosave_end] for the nosave data
51 */
52
53 #ifndef LOAD_OFFSET
54 #define LOAD_OFFSET 0
55 #endif
56
57 #include <linux/export.h>
58
59 /* Align . to a 8 byte boundary equals to maximum function alignment. */
60 #define ALIGN_FUNCTION() . = ALIGN(8)
61
62 /*
63 * Align to a 32 byte boundary equal to the
64 * alignment gcc 4.5 uses for a struct
65 */
66 #define STRUCT_ALIGNMENT 32
67 #define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT)
68
69 /* The actual configuration determine if the init/exit sections
70 * are handled as text/data or they can be discarded (which
71 * often happens at runtime)
72 */
73 #ifdef CONFIG_HOTPLUG_CPU
74 #define CPU_KEEP(sec) *(.cpu##sec)
75 #define CPU_DISCARD(sec)
76 #else
77 #define CPU_KEEP(sec)
78 #define CPU_DISCARD(sec) *(.cpu##sec)
79 #endif
80
81 #if defined(CONFIG_MEMORY_HOTPLUG)
82 #define MEM_KEEP(sec) *(.mem##sec)
83 #define MEM_DISCARD(sec)
84 #else
85 #define MEM_KEEP(sec)
86 #define MEM_DISCARD(sec) *(.mem##sec)
87 #endif
88
89 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
90 #define MCOUNT_REC() . = ALIGN(8); \
91 VMLINUX_SYMBOL(__start_mcount_loc) = .; \
92 *(__mcount_loc) \
93 VMLINUX_SYMBOL(__stop_mcount_loc) = .;
94 #else
95 #define MCOUNT_REC()
96 #endif
97
98 #ifdef CONFIG_TRACE_BRANCH_PROFILING
99 #define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \
100 *(_ftrace_annotated_branch) \
101 VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .;
102 #else
103 #define LIKELY_PROFILE()
104 #endif
105
106 #ifdef CONFIG_PROFILE_ALL_BRANCHES
107 #define BRANCH_PROFILE() VMLINUX_SYMBOL(__start_branch_profile) = .; \
108 *(_ftrace_branch) \
109 VMLINUX_SYMBOL(__stop_branch_profile) = .;
110 #else
111 #define BRANCH_PROFILE()
112 #endif
113
114 #ifdef CONFIG_KPROBES
115 #define KPROBE_BLACKLIST() . = ALIGN(8); \
116 VMLINUX_SYMBOL(__start_kprobe_blacklist) = .; \
117 *(_kprobe_blacklist) \
118 VMLINUX_SYMBOL(__stop_kprobe_blacklist) = .;
119 #else
120 #define KPROBE_BLACKLIST()
121 #endif
122
123 #ifdef CONFIG_EVENT_TRACING
124 #define FTRACE_EVENTS() . = ALIGN(8); \
125 VMLINUX_SYMBOL(__start_ftrace_events) = .; \
126 *(_ftrace_events) \
127 VMLINUX_SYMBOL(__stop_ftrace_events) = .; \
128 VMLINUX_SYMBOL(__start_ftrace_enum_maps) = .; \
129 *(_ftrace_enum_map) \
130 VMLINUX_SYMBOL(__stop_ftrace_enum_maps) = .;
131 #else
132 #define FTRACE_EVENTS()
133 #endif
134
135 #ifdef CONFIG_TRACING
136 #define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \
137 *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \
138 VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .;
139 #define TRACEPOINT_STR() VMLINUX_SYMBOL(__start___tracepoint_str) = .; \
140 *(__tracepoint_str) /* Trace_printk fmt' pointer */ \
141 VMLINUX_SYMBOL(__stop___tracepoint_str) = .;
142 #else
143 #define TRACE_PRINTKS()
144 #define TRACEPOINT_STR()
145 #endif
146
147 #ifdef CONFIG_FTRACE_SYSCALLS
148 #define TRACE_SYSCALLS() . = ALIGN(8); \
149 VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \
150 *(__syscalls_metadata) \
151 VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
152 #else
153 #define TRACE_SYSCALLS()
154 #endif
155
156 #ifdef CONFIG_SERIAL_EARLYCON
157 #define EARLYCON_TABLE() STRUCT_ALIGN(); \
158 VMLINUX_SYMBOL(__earlycon_table) = .; \
159 *(__earlycon_table) \
160 VMLINUX_SYMBOL(__earlycon_table_end) = .;
161 #else
162 #define EARLYCON_TABLE()
163 #endif
164
165 #define ___OF_TABLE(cfg, name) _OF_TABLE_##cfg(name)
166 #define __OF_TABLE(cfg, name) ___OF_TABLE(cfg, name)
167 #define OF_TABLE(cfg, name) __OF_TABLE(config_enabled(cfg), name)
168 #define _OF_TABLE_0(name)
169 #define _OF_TABLE_1(name) \
170 . = ALIGN(8); \
171 VMLINUX_SYMBOL(__##name##_of_table) = .; \
172 *(__##name##_of_table) \
173 *(__##name##_of_table_end)
174
175 #define CLKSRC_OF_TABLES() OF_TABLE(CONFIG_CLKSRC_OF, clksrc)
176 #define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip)
177 #define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk)
178 #define IOMMU_OF_TABLES() OF_TABLE(CONFIG_OF_IOMMU, iommu)
179 #define RESERVEDMEM_OF_TABLES() OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem)
180 #define CPU_METHOD_OF_TABLES() OF_TABLE(CONFIG_SMP, cpu_method)
181 #define CPUIDLE_METHOD_OF_TABLES() OF_TABLE(CONFIG_CPU_IDLE, cpuidle_method)
182
183 #ifdef CONFIG_ACPI
184 #define ACPI_PROBE_TABLE(name) \
185 . = ALIGN(8); \
186 VMLINUX_SYMBOL(__##name##_acpi_probe_table) = .; \
187 *(__##name##_acpi_probe_table) \
188 VMLINUX_SYMBOL(__##name##_acpi_probe_table_end) = .;
189 #else
190 #define ACPI_PROBE_TABLE(name)
191 #endif
192
193 #define KERNEL_DTB() \
194 STRUCT_ALIGN(); \
195 VMLINUX_SYMBOL(__dtb_start) = .; \
196 *(.dtb.init.rodata) \
197 VMLINUX_SYMBOL(__dtb_end) = .;
198
199 /* .data section */
200 #define DATA_DATA \
201 *(.data) \
202 *(.ref.data) \
203 *(.data..shared_aligned) /* percpu related */ \
204 MEM_KEEP(init.data) \
205 MEM_KEEP(exit.data) \
206 *(.data.unlikely) \
207 STRUCT_ALIGN(); \
208 *(__tracepoints) \
209 /* implement dynamic printk debug */ \
210 . = ALIGN(8); \
211 VMLINUX_SYMBOL(__start___jump_table) = .; \
212 *(__jump_table) \
213 VMLINUX_SYMBOL(__stop___jump_table) = .; \
214 . = ALIGN(8); \
215 VMLINUX_SYMBOL(__start___verbose) = .; \
216 *(__verbose) \
217 VMLINUX_SYMBOL(__stop___verbose) = .; \
218 LIKELY_PROFILE() \
219 BRANCH_PROFILE() \
220 TRACE_PRINTKS() \
221 TRACEPOINT_STR()
222
223 /*
224 * Data section helpers
225 */
226 #define NOSAVE_DATA \
227 . = ALIGN(PAGE_SIZE); \
228 VMLINUX_SYMBOL(__nosave_begin) = .; \
229 *(.data..nosave) \
230 . = ALIGN(PAGE_SIZE); \
231 VMLINUX_SYMBOL(__nosave_end) = .;
232
233 #define PAGE_ALIGNED_DATA(page_align) \
234 . = ALIGN(page_align); \
235 *(.data..page_aligned)
236
237 #define READ_MOSTLY_DATA(align) \
238 . = ALIGN(align); \
239 *(.data..read_mostly) \
240 . = ALIGN(align);
241
242 #define CACHELINE_ALIGNED_DATA(align) \
243 . = ALIGN(align); \
244 *(.data..cacheline_aligned)
245
246 #define INIT_TASK_DATA(align) \
247 . = ALIGN(align); \
248 VMLINUX_SYMBOL(__start_init_task) = .; \
249 *(.data..init_task) \
250 VMLINUX_SYMBOL(__end_init_task) = .;
251
252 /*
253 * Read only Data
254 */
255 #define RO_DATA_SECTION(align) \
256 . = ALIGN((align)); \
257 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
258 VMLINUX_SYMBOL(__start_rodata) = .; \
259 *(.rodata) *(.rodata.*) \
260 *(.data..ro_after_init) /* Read only after init */ \
261 *(__vermagic) /* Kernel version magic */ \
262 . = ALIGN(8); \
263 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
264 *(__tracepoints_ptrs) /* Tracepoints: pointer array */\
265 VMLINUX_SYMBOL(__stop___tracepoints_ptrs) = .; \
266 *(__tracepoints_strings)/* Tracepoints: strings */ \
267 } \
268 \
269 .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \
270 *(.rodata1) \
271 } \
272 \
273 BUG_TABLE \
274 \
275 /* PCI quirks */ \
276 .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
277 VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
278 *(.pci_fixup_early) \
279 VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \
280 VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \
281 *(.pci_fixup_header) \
282 VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \
283 VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \
284 *(.pci_fixup_final) \
285 VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \
286 VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \
287 *(.pci_fixup_enable) \
288 VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \
289 VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \
290 *(.pci_fixup_resume) \
291 VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \
292 VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \
293 *(.pci_fixup_resume_early) \
294 VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \
295 VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \
296 *(.pci_fixup_suspend) \
297 VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \
298 VMLINUX_SYMBOL(__start_pci_fixups_suspend_late) = .; \
299 *(.pci_fixup_suspend_late) \
300 VMLINUX_SYMBOL(__end_pci_fixups_suspend_late) = .; \
301 } \
302 \
303 /* Built-in firmware blobs */ \
304 .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \
305 VMLINUX_SYMBOL(__start_builtin_fw) = .; \
306 *(.builtin_fw) \
307 VMLINUX_SYMBOL(__end_builtin_fw) = .; \
308 } \
309 \
310 TRACEDATA \
311 \
312 /* Kernel symbol table: Normal symbols */ \
313 __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
314 VMLINUX_SYMBOL(__start___ksymtab) = .; \
315 *(SORT(___ksymtab+*)) \
316 VMLINUX_SYMBOL(__stop___ksymtab) = .; \
317 } \
318 \
319 /* Kernel symbol table: GPL-only symbols */ \
320 __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
321 VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \
322 *(SORT(___ksymtab_gpl+*)) \
323 VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \
324 } \
325 \
326 /* Kernel symbol table: Normal unused symbols */ \
327 __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \
328 VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \
329 *(SORT(___ksymtab_unused+*)) \
330 VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \
331 } \
332 \
333 /* Kernel symbol table: GPL-only unused symbols */ \
334 __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
335 VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \
336 *(SORT(___ksymtab_unused_gpl+*)) \
337 VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \
338 } \
339 \
340 /* Kernel symbol table: GPL-future-only symbols */ \
341 __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
342 VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \
343 *(SORT(___ksymtab_gpl_future+*)) \
344 VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \
345 } \
346 \
347 /* Kernel symbol table: Normal symbols */ \
348 __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
349 VMLINUX_SYMBOL(__start___kcrctab) = .; \
350 *(SORT(___kcrctab+*)) \
351 VMLINUX_SYMBOL(__stop___kcrctab) = .; \
352 } \
353 \
354 /* Kernel symbol table: GPL-only symbols */ \
355 __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
356 VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \
357 *(SORT(___kcrctab_gpl+*)) \
358 VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \
359 } \
360 \
361 /* Kernel symbol table: Normal unused symbols */ \
362 __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \
363 VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \
364 *(SORT(___kcrctab_unused+*)) \
365 VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \
366 } \
367 \
368 /* Kernel symbol table: GPL-only unused symbols */ \
369 __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
370 VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \
371 *(SORT(___kcrctab_unused_gpl+*)) \
372 VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \
373 } \
374 \
375 /* Kernel symbol table: GPL-future-only symbols */ \
376 __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
377 VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \
378 *(SORT(___kcrctab_gpl_future+*)) \
379 VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \
380 } \
381 \
382 /* Kernel symbol table: strings */ \
383 __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
384 *(__ksymtab_strings) \
385 } \
386 \
387 /* __*init sections */ \
388 __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \
389 *(.ref.rodata) \
390 MEM_KEEP(init.rodata) \
391 MEM_KEEP(exit.rodata) \
392 } \
393 \
394 /* Built-in module parameters. */ \
395 __param : AT(ADDR(__param) - LOAD_OFFSET) { \
396 VMLINUX_SYMBOL(__start___param) = .; \
397 *(__param) \
398 VMLINUX_SYMBOL(__stop___param) = .; \
399 } \
400 \
401 /* Built-in module versions. */ \
402 __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \
403 VMLINUX_SYMBOL(__start___modver) = .; \
404 *(__modver) \
405 VMLINUX_SYMBOL(__stop___modver) = .; \
406 . = ALIGN((align)); \
407 VMLINUX_SYMBOL(__end_rodata) = .; \
408 } \
409 . = ALIGN((align));
410
411 /* RODATA & RO_DATA provided for backward compatibility.
412 * All archs are supposed to use RO_DATA() */
413 #define RODATA RO_DATA_SECTION(4096)
414 #define RO_DATA(align) RO_DATA_SECTION(align)
415
416 #define SECURITY_INIT \
417 .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
418 VMLINUX_SYMBOL(__security_initcall_start) = .; \
419 *(.security_initcall.init) \
420 VMLINUX_SYMBOL(__security_initcall_end) = .; \
421 }
422
423 /* .text section. Map to function alignment to avoid address changes
424 * during second ld run in second ld pass when generating System.map */
425 #define TEXT_TEXT \
426 ALIGN_FUNCTION(); \
427 *(.text.hot .text .text.fixup .text.unlikely) \
428 *(.ref.text) \
429 MEM_KEEP(init.text) \
430 MEM_KEEP(exit.text) \
431
432
433 /* sched.text is aling to function alignment to secure we have same
434 * address even at second ld pass when generating System.map */
435 #define SCHED_TEXT \
436 ALIGN_FUNCTION(); \
437 VMLINUX_SYMBOL(__sched_text_start) = .; \
438 *(.sched.text) \
439 VMLINUX_SYMBOL(__sched_text_end) = .;
440
441 /* spinlock.text is aling to function alignment to secure we have same
442 * address even at second ld pass when generating System.map */
443 #define LOCK_TEXT \
444 ALIGN_FUNCTION(); \
445 VMLINUX_SYMBOL(__lock_text_start) = .; \
446 *(.spinlock.text) \
447 VMLINUX_SYMBOL(__lock_text_end) = .;
448
449 #define KPROBES_TEXT \
450 ALIGN_FUNCTION(); \
451 VMLINUX_SYMBOL(__kprobes_text_start) = .; \
452 *(.kprobes.text) \
453 VMLINUX_SYMBOL(__kprobes_text_end) = .;
454
455 #define ENTRY_TEXT \
456 ALIGN_FUNCTION(); \
457 VMLINUX_SYMBOL(__entry_text_start) = .; \
458 *(.entry.text) \
459 VMLINUX_SYMBOL(__entry_text_end) = .;
460
461 #if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN)
462 #define IRQENTRY_TEXT \
463 ALIGN_FUNCTION(); \
464 VMLINUX_SYMBOL(__irqentry_text_start) = .; \
465 *(.irqentry.text) \
466 VMLINUX_SYMBOL(__irqentry_text_end) = .;
467 #else
468 #define IRQENTRY_TEXT
469 #endif
470
471 #if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN)
472 #define SOFTIRQENTRY_TEXT \
473 ALIGN_FUNCTION(); \
474 VMLINUX_SYMBOL(__softirqentry_text_start) = .; \
475 *(.softirqentry.text) \
476 VMLINUX_SYMBOL(__softirqentry_text_end) = .;
477 #else
478 #define SOFTIRQENTRY_TEXT
479 #endif
480
481 /* Section used for early init (in .S files) */
482 #define HEAD_TEXT *(.head.text)
483
484 #define HEAD_TEXT_SECTION \
485 .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { \
486 HEAD_TEXT \
487 }
488
489 /*
490 * Exception table
491 */
492 #define EXCEPTION_TABLE(align) \
493 . = ALIGN(align); \
494 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \
495 VMLINUX_SYMBOL(__start___ex_table) = .; \
496 *(__ex_table) \
497 VMLINUX_SYMBOL(__stop___ex_table) = .; \
498 }
499
500 /*
501 * Init task
502 */
503 #define INIT_TASK_DATA_SECTION(align) \
504 . = ALIGN(align); \
505 .data..init_task : AT(ADDR(.data..init_task) - LOAD_OFFSET) { \
506 INIT_TASK_DATA(align) \
507 }
508
509 #ifdef CONFIG_CONSTRUCTORS
510 #define KERNEL_CTORS() . = ALIGN(8); \
511 VMLINUX_SYMBOL(__ctors_start) = .; \
512 *(.ctors) \
513 *(SORT(.init_array.*)) \
514 *(.init_array) \
515 VMLINUX_SYMBOL(__ctors_end) = .;
516 #else
517 #define KERNEL_CTORS()
518 #endif
519
520 /* init and exit section handling */
521 #define INIT_DATA \
522 *(.init.data) \
523 MEM_DISCARD(init.data) \
524 KERNEL_CTORS() \
525 MCOUNT_REC() \
526 *(.init.rodata) \
527 FTRACE_EVENTS() \
528 TRACE_SYSCALLS() \
529 KPROBE_BLACKLIST() \
530 MEM_DISCARD(init.rodata) \
531 CLK_OF_TABLES() \
532 RESERVEDMEM_OF_TABLES() \
533 CLKSRC_OF_TABLES() \
534 IOMMU_OF_TABLES() \
535 CPU_METHOD_OF_TABLES() \
536 CPUIDLE_METHOD_OF_TABLES() \
537 KERNEL_DTB() \
538 IRQCHIP_OF_MATCH_TABLE() \
539 ACPI_PROBE_TABLE(irqchip) \
540 ACPI_PROBE_TABLE(clksrc) \
541 EARLYCON_TABLE()
542
543 #define INIT_TEXT \
544 *(.init.text) \
545 MEM_DISCARD(init.text)
546
547 #define EXIT_DATA \
548 *(.exit.data) \
549 MEM_DISCARD(exit.data) \
550 MEM_DISCARD(exit.rodata)
551
552 #define EXIT_TEXT \
553 *(.exit.text) \
554 MEM_DISCARD(exit.text)
555
556 #define EXIT_CALL \
557 *(.exitcall.exit)
558
559 /*
560 * bss (Block Started by Symbol) - uninitialized data
561 * zeroed during startup
562 */
563 #define SBSS(sbss_align) \
564 . = ALIGN(sbss_align); \
565 .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \
566 *(.sbss) \
567 *(.scommon) \
568 }
569
570 /*
571 * Allow archectures to redefine BSS_FIRST_SECTIONS to add extra
572 * sections to the front of bss.
573 */
574 #ifndef BSS_FIRST_SECTIONS
575 #define BSS_FIRST_SECTIONS
576 #endif
577
578 #define BSS(bss_align) \
579 . = ALIGN(bss_align); \
580 .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \
581 BSS_FIRST_SECTIONS \
582 *(.bss..page_aligned) \
583 *(.dynbss) \
584 *(.bss) \
585 *(COMMON) \
586 }
587
588 /*
589 * DWARF debug sections.
590 * Symbols in the DWARF debugging sections are relative to
591 * the beginning of the section so we begin them at 0.
592 */
593 #define DWARF_DEBUG \
594 /* DWARF 1 */ \
595 .debug 0 : { *(.debug) } \
596 .line 0 : { *(.line) } \
597 /* GNU DWARF 1 extensions */ \
598 .debug_srcinfo 0 : { *(.debug_srcinfo) } \
599 .debug_sfnames 0 : { *(.debug_sfnames) } \
600 /* DWARF 1.1 and DWARF 2 */ \
601 .debug_aranges 0 : { *(.debug_aranges) } \
602 .debug_pubnames 0 : { *(.debug_pubnames) } \
603 /* DWARF 2 */ \
604 .debug_info 0 : { *(.debug_info \
605 .gnu.linkonce.wi.*) } \
606 .debug_abbrev 0 : { *(.debug_abbrev) } \
607 .debug_line 0 : { *(.debug_line) } \
608 .debug_frame 0 : { *(.debug_frame) } \
609 .debug_str 0 : { *(.debug_str) } \
610 .debug_loc 0 : { *(.debug_loc) } \
611 .debug_macinfo 0 : { *(.debug_macinfo) } \
612 /* SGI/MIPS DWARF 2 extensions */ \
613 .debug_weaknames 0 : { *(.debug_weaknames) } \
614 .debug_funcnames 0 : { *(.debug_funcnames) } \
615 .debug_typenames 0 : { *(.debug_typenames) } \
616 .debug_varnames 0 : { *(.debug_varnames) } \
617
618 /* Stabs debugging sections. */
619 #define STABS_DEBUG \
620 .stab 0 : { *(.stab) } \
621 .stabstr 0 : { *(.stabstr) } \
622 .stab.excl 0 : { *(.stab.excl) } \
623 .stab.exclstr 0 : { *(.stab.exclstr) } \
624 .stab.index 0 : { *(.stab.index) } \
625 .stab.indexstr 0 : { *(.stab.indexstr) } \
626 .comment 0 : { *(.comment) }
627
628 #ifdef CONFIG_GENERIC_BUG
629 #define BUG_TABLE \
630 . = ALIGN(8); \
631 __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \
632 VMLINUX_SYMBOL(__start___bug_table) = .; \
633 *(__bug_table) \
634 VMLINUX_SYMBOL(__stop___bug_table) = .; \
635 }
636 #else
637 #define BUG_TABLE
638 #endif
639
640 #ifdef CONFIG_PM_TRACE
641 #define TRACEDATA \
642 . = ALIGN(4); \
643 .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \
644 VMLINUX_SYMBOL(__tracedata_start) = .; \
645 *(.tracedata) \
646 VMLINUX_SYMBOL(__tracedata_end) = .; \
647 }
648 #else
649 #define TRACEDATA
650 #endif
651
652 #define NOTES \
653 .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \
654 VMLINUX_SYMBOL(__start_notes) = .; \
655 *(.note.*) \
656 VMLINUX_SYMBOL(__stop_notes) = .; \
657 }
658
659 #define INIT_SETUP(initsetup_align) \
660 . = ALIGN(initsetup_align); \
661 VMLINUX_SYMBOL(__setup_start) = .; \
662 *(.init.setup) \
663 VMLINUX_SYMBOL(__setup_end) = .;
664
665 #define INIT_CALLS_LEVEL(level) \
666 VMLINUX_SYMBOL(__initcall##level##_start) = .; \
667 *(.initcall##level##.init) \
668 *(.initcall##level##s.init) \
669
670 #define INIT_CALLS \
671 VMLINUX_SYMBOL(__initcall_start) = .; \
672 *(.initcallearly.init) \
673 INIT_CALLS_LEVEL(0) \
674 INIT_CALLS_LEVEL(1) \
675 INIT_CALLS_LEVEL(2) \
676 INIT_CALLS_LEVEL(3) \
677 INIT_CALLS_LEVEL(4) \
678 INIT_CALLS_LEVEL(5) \
679 INIT_CALLS_LEVEL(rootfs) \
680 INIT_CALLS_LEVEL(6) \
681 INIT_CALLS_LEVEL(7) \
682 VMLINUX_SYMBOL(__initcall_end) = .;
683
684 #define CON_INITCALL \
685 VMLINUX_SYMBOL(__con_initcall_start) = .; \
686 *(.con_initcall.init) \
687 VMLINUX_SYMBOL(__con_initcall_end) = .;
688
689 #define SECURITY_INITCALL \
690 VMLINUX_SYMBOL(__security_initcall_start) = .; \
691 *(.security_initcall.init) \
692 VMLINUX_SYMBOL(__security_initcall_end) = .;
693
694 #ifdef CONFIG_BLK_DEV_INITRD
695 #define INIT_RAM_FS \
696 . = ALIGN(4); \
697 VMLINUX_SYMBOL(__initramfs_start) = .; \
698 *(.init.ramfs) \
699 . = ALIGN(8); \
700 *(.init.ramfs.info)
701 #else
702 #define INIT_RAM_FS
703 #endif
704
705 /*
706 * Default discarded sections.
707 *
708 * Some archs want to discard exit text/data at runtime rather than
709 * link time due to cross-section references such as alt instructions,
710 * bug table, eh_frame, etc. DISCARDS must be the last of output
711 * section definitions so that such archs put those in earlier section
712 * definitions.
713 */
714 #define DISCARDS \
715 /DISCARD/ : { \
716 EXIT_TEXT \
717 EXIT_DATA \
718 EXIT_CALL \
719 *(.discard) \
720 *(.discard.*) \
721 }
722
723 /**
724 * PERCPU_INPUT - the percpu input sections
725 * @cacheline: cacheline size
726 *
727 * The core percpu section names and core symbols which do not rely
728 * directly upon load addresses.
729 *
730 * @cacheline is used to align subsections to avoid false cacheline
731 * sharing between subsections for different purposes.
732 */
733 #define PERCPU_INPUT(cacheline) \
734 VMLINUX_SYMBOL(__per_cpu_start) = .; \
735 *(.data..percpu..first) \
736 . = ALIGN(PAGE_SIZE); \
737 *(.data..percpu..page_aligned) \
738 . = ALIGN(cacheline); \
739 *(.data..percpu..read_mostly) \
740 . = ALIGN(cacheline); \
741 *(.data..percpu) \
742 *(.data..percpu..shared_aligned) \
743 VMLINUX_SYMBOL(__per_cpu_end) = .;
744
745 /**
746 * PERCPU_VADDR - define output section for percpu area
747 * @cacheline: cacheline size
748 * @vaddr: explicit base address (optional)
749 * @phdr: destination PHDR (optional)
750 *
751 * Macro which expands to output section for percpu area.
752 *
753 * @cacheline is used to align subsections to avoid false cacheline
754 * sharing between subsections for different purposes.
755 *
756 * If @vaddr is not blank, it specifies explicit base address and all
757 * percpu symbols will be offset from the given address. If blank,
758 * @vaddr always equals @laddr + LOAD_OFFSET.
759 *
760 * @phdr defines the output PHDR to use if not blank. Be warned that
761 * output PHDR is sticky. If @phdr is specified, the next output
762 * section in the linker script will go there too. @phdr should have
763 * a leading colon.
764 *
765 * Note that this macros defines __per_cpu_load as an absolute symbol.
766 * If there is no need to put the percpu section at a predetermined
767 * address, use PERCPU_SECTION.
768 */
769 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
770 VMLINUX_SYMBOL(__per_cpu_load) = .; \
771 .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
772 - LOAD_OFFSET) { \
773 PERCPU_INPUT(cacheline) \
774 } phdr \
775 . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
776
777 /**
778 * PERCPU_SECTION - define output section for percpu area, simple version
779 * @cacheline: cacheline size
780 *
781 * Align to PAGE_SIZE and outputs output section for percpu area. This
782 * macro doesn't manipulate @vaddr or @phdr and __per_cpu_load and
783 * __per_cpu_start will be identical.
784 *
785 * This macro is equivalent to ALIGN(PAGE_SIZE); PERCPU_VADDR(@cacheline,,)
786 * except that __per_cpu_load is defined as a relative symbol against
787 * .data..percpu which is required for relocatable x86_32 configuration.
788 */
789 #define PERCPU_SECTION(cacheline) \
790 . = ALIGN(PAGE_SIZE); \
791 .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \
792 VMLINUX_SYMBOL(__per_cpu_load) = .; \
793 PERCPU_INPUT(cacheline) \
794 }
795
796
797 /*
798 * Definition of the high level *_SECTION macros
799 * They will fit only a subset of the architectures
800 */
801
802
803 /*
804 * Writeable data.
805 * All sections are combined in a single .data section.
806 * The sections following CONSTRUCTORS are arranged so their
807 * typical alignment matches.
808 * A cacheline is typical/always less than a PAGE_SIZE so
809 * the sections that has this restriction (or similar)
810 * is located before the ones requiring PAGE_SIZE alignment.
811 * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which
812 * matches the requirement of PAGE_ALIGNED_DATA.
813 *
814 * use 0 as page_align if page_aligned data is not used */
815 #define RW_DATA_SECTION(cacheline, pagealigned, inittask) \
816 . = ALIGN(PAGE_SIZE); \
817 .data : AT(ADDR(.data) - LOAD_OFFSET) { \
818 INIT_TASK_DATA(inittask) \
819 NOSAVE_DATA \
820 PAGE_ALIGNED_DATA(pagealigned) \
821 CACHELINE_ALIGNED_DATA(cacheline) \
822 READ_MOSTLY_DATA(cacheline) \
823 DATA_DATA \
824 CONSTRUCTORS \
825 }
826
827 #define INIT_TEXT_SECTION(inittext_align) \
828 . = ALIGN(inittext_align); \
829 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { \
830 VMLINUX_SYMBOL(_sinittext) = .; \
831 INIT_TEXT \
832 VMLINUX_SYMBOL(_einittext) = .; \
833 }
834
835 #define INIT_DATA_SECTION(initsetup_align) \
836 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { \
837 INIT_DATA \
838 INIT_SETUP(initsetup_align) \
839 INIT_CALLS \
840 CON_INITCALL \
841 SECURITY_INITCALL \
842 INIT_RAM_FS \
843 }
844
845 #define BSS_SECTION(sbss_align, bss_align, stop_align) \
846 . = ALIGN(sbss_align); \
847 VMLINUX_SYMBOL(__bss_start) = .; \
848 SBSS(sbss_align) \
849 BSS(bss_align) \
850 . = ALIGN(stop_align); \
851 VMLINUX_SYMBOL(__bss_stop) = .;
This page took 0.050192 seconds and 5 git commands to generate.