Finally eradicate CONFIG_HOTPLUG
[deliverable/linux.git] / include / asm-generic / vmlinux.lds.h
1 /*
2 * Helper macros to support writing architecture specific
3 * linker scripts.
4 *
5 * A minimal linker scripts has following content:
6 * [This is a sample, architectures may have special requiriements]
7 *
8 * OUTPUT_FORMAT(...)
9 * OUTPUT_ARCH(...)
10 * ENTRY(...)
11 * SECTIONS
12 * {
13 * . = START;
14 * __init_begin = .;
15 * HEAD_TEXT_SECTION
16 * INIT_TEXT_SECTION(PAGE_SIZE)
17 * INIT_DATA_SECTION(...)
18 * PERCPU_SECTION(CACHELINE_SIZE)
19 * __init_end = .;
20 *
21 * _stext = .;
22 * TEXT_SECTION = 0
23 * _etext = .;
24 *
25 * _sdata = .;
26 * RO_DATA_SECTION(PAGE_SIZE)
27 * RW_DATA_SECTION(...)
28 * _edata = .;
29 *
30 * EXCEPTION_TABLE(...)
31 * NOTES
32 *
33 * BSS_SECTION(0, 0, 0)
34 * _end = .;
35 *
36 * STABS_DEBUG
37 * DWARF_DEBUG
38 *
39 * DISCARDS // must be the last
40 * }
41 *
42 * [__init_begin, __init_end] is the init section that may be freed after init
43 * [_stext, _etext] is the text section
44 * [_sdata, _edata] is the data section
45 *
46 * Some of the included output section have their own set of constants.
47 * Examples are: [__initramfs_start, __initramfs_end] for initramfs and
48 * [__nosave_begin, __nosave_end] for the nosave data
49 */
50
51 #ifndef LOAD_OFFSET
52 #define LOAD_OFFSET 0
53 #endif
54
55 #include <linux/export.h>
56
57 /* Align . to a 8 byte boundary equals to maximum function alignment. */
58 #define ALIGN_FUNCTION() . = ALIGN(8)
59
60 /*
61 * Align to a 32 byte boundary equal to the
62 * alignment gcc 4.5 uses for a struct
63 */
64 #define STRUCT_ALIGNMENT 32
65 #define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT)
66
67 /* The actual configuration determine if the init/exit sections
68 * are handled as text/data or they can be discarded (which
69 * often happens at runtime)
70 */
71 #ifdef CONFIG_HOTPLUG_CPU
72 #define CPU_KEEP(sec) *(.cpu##sec)
73 #define CPU_DISCARD(sec)
74 #else
75 #define CPU_KEEP(sec)
76 #define CPU_DISCARD(sec) *(.cpu##sec)
77 #endif
78
79 #if defined(CONFIG_MEMORY_HOTPLUG)
80 #define MEM_KEEP(sec) *(.mem##sec)
81 #define MEM_DISCARD(sec)
82 #else
83 #define MEM_KEEP(sec)
84 #define MEM_DISCARD(sec) *(.mem##sec)
85 #endif
86
87 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
88 #define MCOUNT_REC() . = ALIGN(8); \
89 VMLINUX_SYMBOL(__start_mcount_loc) = .; \
90 *(__mcount_loc) \
91 VMLINUX_SYMBOL(__stop_mcount_loc) = .;
92 #else
93 #define MCOUNT_REC()
94 #endif
95
96 #ifdef CONFIG_TRACE_BRANCH_PROFILING
97 #define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \
98 *(_ftrace_annotated_branch) \
99 VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .;
100 #else
101 #define LIKELY_PROFILE()
102 #endif
103
104 #ifdef CONFIG_PROFILE_ALL_BRANCHES
105 #define BRANCH_PROFILE() VMLINUX_SYMBOL(__start_branch_profile) = .; \
106 *(_ftrace_branch) \
107 VMLINUX_SYMBOL(__stop_branch_profile) = .;
108 #else
109 #define BRANCH_PROFILE()
110 #endif
111
112 #ifdef CONFIG_EVENT_TRACING
113 #define FTRACE_EVENTS() . = ALIGN(8); \
114 VMLINUX_SYMBOL(__start_ftrace_events) = .; \
115 *(_ftrace_events) \
116 VMLINUX_SYMBOL(__stop_ftrace_events) = .;
117 #else
118 #define FTRACE_EVENTS()
119 #endif
120
121 #ifdef CONFIG_TRACING
122 #define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \
123 *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \
124 VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .;
125 #else
126 #define TRACE_PRINTKS()
127 #endif
128
129 #ifdef CONFIG_FTRACE_SYSCALLS
130 #define TRACE_SYSCALLS() . = ALIGN(8); \
131 VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \
132 *(__syscalls_metadata) \
133 VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
134 #else
135 #define TRACE_SYSCALLS()
136 #endif
137
138 #ifdef CONFIG_CLKSRC_OF
139 #define CLKSRC_OF_TABLES() . = ALIGN(8); \
140 VMLINUX_SYMBOL(__clksrc_of_table) = .; \
141 *(__clksrc_of_table) \
142 *(__clksrc_of_table_end)
143 #else
144 #define CLKSRC_OF_TABLES()
145 #endif
146
147 #ifdef CONFIG_IRQCHIP
148 #define IRQCHIP_OF_MATCH_TABLE() \
149 . = ALIGN(8); \
150 VMLINUX_SYMBOL(__irqchip_begin) = .; \
151 *(__irqchip_of_table) \
152 *(__irqchip_of_end)
153 #else
154 #define IRQCHIP_OF_MATCH_TABLE()
155 #endif
156
157 #ifdef CONFIG_COMMON_CLK
158 #define CLK_OF_TABLES() . = ALIGN(8); \
159 VMLINUX_SYMBOL(__clk_of_table) = .; \
160 *(__clk_of_table) \
161 *(__clk_of_table_end)
162 #else
163 #define CLK_OF_TABLES()
164 #endif
165
166 #define KERNEL_DTB() \
167 STRUCT_ALIGN(); \
168 VMLINUX_SYMBOL(__dtb_start) = .; \
169 *(.dtb.init.rodata) \
170 VMLINUX_SYMBOL(__dtb_end) = .;
171
172 /* .data section */
173 #define DATA_DATA \
174 *(.data) \
175 *(.ref.data) \
176 *(.data..shared_aligned) /* percpu related */ \
177 CPU_KEEP(init.data) \
178 CPU_KEEP(exit.data) \
179 MEM_KEEP(init.data) \
180 MEM_KEEP(exit.data) \
181 *(.data.unlikely) \
182 STRUCT_ALIGN(); \
183 *(__tracepoints) \
184 /* implement dynamic printk debug */ \
185 . = ALIGN(8); \
186 VMLINUX_SYMBOL(__start___jump_table) = .; \
187 *(__jump_table) \
188 VMLINUX_SYMBOL(__stop___jump_table) = .; \
189 . = ALIGN(8); \
190 VMLINUX_SYMBOL(__start___verbose) = .; \
191 *(__verbose) \
192 VMLINUX_SYMBOL(__stop___verbose) = .; \
193 LIKELY_PROFILE() \
194 BRANCH_PROFILE() \
195 TRACE_PRINTKS()
196
197 /*
198 * Data section helpers
199 */
200 #define NOSAVE_DATA \
201 . = ALIGN(PAGE_SIZE); \
202 VMLINUX_SYMBOL(__nosave_begin) = .; \
203 *(.data..nosave) \
204 . = ALIGN(PAGE_SIZE); \
205 VMLINUX_SYMBOL(__nosave_end) = .;
206
207 #define PAGE_ALIGNED_DATA(page_align) \
208 . = ALIGN(page_align); \
209 *(.data..page_aligned)
210
211 #define READ_MOSTLY_DATA(align) \
212 . = ALIGN(align); \
213 *(.data..read_mostly) \
214 . = ALIGN(align);
215
216 #define CACHELINE_ALIGNED_DATA(align) \
217 . = ALIGN(align); \
218 *(.data..cacheline_aligned)
219
220 #define INIT_TASK_DATA(align) \
221 . = ALIGN(align); \
222 *(.data..init_task)
223
224 /*
225 * Read only Data
226 */
227 #define RO_DATA_SECTION(align) \
228 . = ALIGN((align)); \
229 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
230 VMLINUX_SYMBOL(__start_rodata) = .; \
231 *(.rodata) *(.rodata.*) \
232 *(__vermagic) /* Kernel version magic */ \
233 . = ALIGN(8); \
234 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
235 *(__tracepoints_ptrs) /* Tracepoints: pointer array */\
236 VMLINUX_SYMBOL(__stop___tracepoints_ptrs) = .; \
237 *(__tracepoints_strings)/* Tracepoints: strings */ \
238 } \
239 \
240 .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \
241 *(.rodata1) \
242 } \
243 \
244 BUG_TABLE \
245 \
246 /* PCI quirks */ \
247 .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
248 VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
249 *(.pci_fixup_early) \
250 VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \
251 VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \
252 *(.pci_fixup_header) \
253 VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \
254 VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \
255 *(.pci_fixup_final) \
256 VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \
257 VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \
258 *(.pci_fixup_enable) \
259 VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \
260 VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \
261 *(.pci_fixup_resume) \
262 VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \
263 VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \
264 *(.pci_fixup_resume_early) \
265 VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \
266 VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \
267 *(.pci_fixup_suspend) \
268 VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \
269 } \
270 \
271 /* Built-in firmware blobs */ \
272 .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \
273 VMLINUX_SYMBOL(__start_builtin_fw) = .; \
274 *(.builtin_fw) \
275 VMLINUX_SYMBOL(__end_builtin_fw) = .; \
276 } \
277 \
278 /* RapidIO route ops */ \
279 .rio_ops : AT(ADDR(.rio_ops) - LOAD_OFFSET) { \
280 VMLINUX_SYMBOL(__start_rio_switch_ops) = .; \
281 *(.rio_switch_ops) \
282 VMLINUX_SYMBOL(__end_rio_switch_ops) = .; \
283 } \
284 \
285 TRACEDATA \
286 \
287 /* Kernel symbol table: Normal symbols */ \
288 __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
289 VMLINUX_SYMBOL(__start___ksymtab) = .; \
290 *(SORT(___ksymtab+*)) \
291 VMLINUX_SYMBOL(__stop___ksymtab) = .; \
292 } \
293 \
294 /* Kernel symbol table: GPL-only symbols */ \
295 __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
296 VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \
297 *(SORT(___ksymtab_gpl+*)) \
298 VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \
299 } \
300 \
301 /* Kernel symbol table: Normal unused symbols */ \
302 __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \
303 VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \
304 *(SORT(___ksymtab_unused+*)) \
305 VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \
306 } \
307 \
308 /* Kernel symbol table: GPL-only unused symbols */ \
309 __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
310 VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \
311 *(SORT(___ksymtab_unused_gpl+*)) \
312 VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \
313 } \
314 \
315 /* Kernel symbol table: GPL-future-only symbols */ \
316 __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
317 VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \
318 *(SORT(___ksymtab_gpl_future+*)) \
319 VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \
320 } \
321 \
322 /* Kernel symbol table: Normal symbols */ \
323 __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
324 VMLINUX_SYMBOL(__start___kcrctab) = .; \
325 *(SORT(___kcrctab+*)) \
326 VMLINUX_SYMBOL(__stop___kcrctab) = .; \
327 } \
328 \
329 /* Kernel symbol table: GPL-only symbols */ \
330 __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
331 VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \
332 *(SORT(___kcrctab_gpl+*)) \
333 VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \
334 } \
335 \
336 /* Kernel symbol table: Normal unused symbols */ \
337 __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \
338 VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \
339 *(SORT(___kcrctab_unused+*)) \
340 VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \
341 } \
342 \
343 /* Kernel symbol table: GPL-only unused symbols */ \
344 __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
345 VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \
346 *(SORT(___kcrctab_unused_gpl+*)) \
347 VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \
348 } \
349 \
350 /* Kernel symbol table: GPL-future-only symbols */ \
351 __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
352 VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \
353 *(SORT(___kcrctab_gpl_future+*)) \
354 VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \
355 } \
356 \
357 /* Kernel symbol table: strings */ \
358 __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
359 *(__ksymtab_strings) \
360 } \
361 \
362 /* __*init sections */ \
363 __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \
364 *(.ref.rodata) \
365 CPU_KEEP(init.rodata) \
366 CPU_KEEP(exit.rodata) \
367 MEM_KEEP(init.rodata) \
368 MEM_KEEP(exit.rodata) \
369 } \
370 \
371 /* Built-in module parameters. */ \
372 __param : AT(ADDR(__param) - LOAD_OFFSET) { \
373 VMLINUX_SYMBOL(__start___param) = .; \
374 *(__param) \
375 VMLINUX_SYMBOL(__stop___param) = .; \
376 } \
377 \
378 /* Built-in module versions. */ \
379 __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \
380 VMLINUX_SYMBOL(__start___modver) = .; \
381 *(__modver) \
382 VMLINUX_SYMBOL(__stop___modver) = .; \
383 . = ALIGN((align)); \
384 VMLINUX_SYMBOL(__end_rodata) = .; \
385 } \
386 . = ALIGN((align));
387
388 /* RODATA & RO_DATA provided for backward compatibility.
389 * All archs are supposed to use RO_DATA() */
390 #define RODATA RO_DATA_SECTION(4096)
391 #define RO_DATA(align) RO_DATA_SECTION(align)
392
393 #define SECURITY_INIT \
394 .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
395 VMLINUX_SYMBOL(__security_initcall_start) = .; \
396 *(.security_initcall.init) \
397 VMLINUX_SYMBOL(__security_initcall_end) = .; \
398 }
399
400 /* .text section. Map to function alignment to avoid address changes
401 * during second ld run in second ld pass when generating System.map */
402 #define TEXT_TEXT \
403 ALIGN_FUNCTION(); \
404 *(.text.hot) \
405 *(.text) \
406 *(.ref.text) \
407 CPU_KEEP(init.text) \
408 CPU_KEEP(exit.text) \
409 MEM_KEEP(init.text) \
410 MEM_KEEP(exit.text) \
411 *(.text.unlikely)
412
413
414 /* sched.text is aling to function alignment to secure we have same
415 * address even at second ld pass when generating System.map */
416 #define SCHED_TEXT \
417 ALIGN_FUNCTION(); \
418 VMLINUX_SYMBOL(__sched_text_start) = .; \
419 *(.sched.text) \
420 VMLINUX_SYMBOL(__sched_text_end) = .;
421
422 /* spinlock.text is aling to function alignment to secure we have same
423 * address even at second ld pass when generating System.map */
424 #define LOCK_TEXT \
425 ALIGN_FUNCTION(); \
426 VMLINUX_SYMBOL(__lock_text_start) = .; \
427 *(.spinlock.text) \
428 VMLINUX_SYMBOL(__lock_text_end) = .;
429
430 #define KPROBES_TEXT \
431 ALIGN_FUNCTION(); \
432 VMLINUX_SYMBOL(__kprobes_text_start) = .; \
433 *(.kprobes.text) \
434 VMLINUX_SYMBOL(__kprobes_text_end) = .;
435
436 #define ENTRY_TEXT \
437 ALIGN_FUNCTION(); \
438 VMLINUX_SYMBOL(__entry_text_start) = .; \
439 *(.entry.text) \
440 VMLINUX_SYMBOL(__entry_text_end) = .;
441
442 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
443 #define IRQENTRY_TEXT \
444 ALIGN_FUNCTION(); \
445 VMLINUX_SYMBOL(__irqentry_text_start) = .; \
446 *(.irqentry.text) \
447 VMLINUX_SYMBOL(__irqentry_text_end) = .;
448 #else
449 #define IRQENTRY_TEXT
450 #endif
451
452 /* Section used for early init (in .S files) */
453 #define HEAD_TEXT *(.head.text)
454
455 #define HEAD_TEXT_SECTION \
456 .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { \
457 HEAD_TEXT \
458 }
459
460 /*
461 * Exception table
462 */
463 #define EXCEPTION_TABLE(align) \
464 . = ALIGN(align); \
465 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \
466 VMLINUX_SYMBOL(__start___ex_table) = .; \
467 *(__ex_table) \
468 VMLINUX_SYMBOL(__stop___ex_table) = .; \
469 }
470
471 /*
472 * Init task
473 */
474 #define INIT_TASK_DATA_SECTION(align) \
475 . = ALIGN(align); \
476 .data..init_task : AT(ADDR(.data..init_task) - LOAD_OFFSET) { \
477 INIT_TASK_DATA(align) \
478 }
479
480 #ifdef CONFIG_CONSTRUCTORS
481 #define KERNEL_CTORS() . = ALIGN(8); \
482 VMLINUX_SYMBOL(__ctors_start) = .; \
483 *(.ctors) \
484 VMLINUX_SYMBOL(__ctors_end) = .;
485 #else
486 #define KERNEL_CTORS()
487 #endif
488
489 /* init and exit section handling */
490 #define INIT_DATA \
491 *(.init.data) \
492 CPU_DISCARD(init.data) \
493 MEM_DISCARD(init.data) \
494 KERNEL_CTORS() \
495 MCOUNT_REC() \
496 *(.init.rodata) \
497 FTRACE_EVENTS() \
498 TRACE_SYSCALLS() \
499 CPU_DISCARD(init.rodata) \
500 MEM_DISCARD(init.rodata) \
501 CLK_OF_TABLES() \
502 CLKSRC_OF_TABLES() \
503 KERNEL_DTB() \
504 IRQCHIP_OF_MATCH_TABLE()
505
506 #define INIT_TEXT \
507 *(.init.text) \
508 CPU_DISCARD(init.text) \
509 MEM_DISCARD(init.text)
510
511 #define EXIT_DATA \
512 *(.exit.data) \
513 CPU_DISCARD(exit.data) \
514 CPU_DISCARD(exit.rodata) \
515 MEM_DISCARD(exit.data) \
516 MEM_DISCARD(exit.rodata)
517
518 #define EXIT_TEXT \
519 *(.exit.text) \
520 CPU_DISCARD(exit.text) \
521 MEM_DISCARD(exit.text)
522
523 #define EXIT_CALL \
524 *(.exitcall.exit)
525
526 /*
527 * bss (Block Started by Symbol) - uninitialized data
528 * zeroed during startup
529 */
530 #define SBSS(sbss_align) \
531 . = ALIGN(sbss_align); \
532 .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \
533 *(.sbss) \
534 *(.scommon) \
535 }
536
537 /*
538 * Allow archectures to redefine BSS_FIRST_SECTIONS to add extra
539 * sections to the front of bss.
540 */
541 #ifndef BSS_FIRST_SECTIONS
542 #define BSS_FIRST_SECTIONS
543 #endif
544
545 #define BSS(bss_align) \
546 . = ALIGN(bss_align); \
547 .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \
548 BSS_FIRST_SECTIONS \
549 *(.bss..page_aligned) \
550 *(.dynbss) \
551 *(.bss) \
552 *(COMMON) \
553 }
554
555 /*
556 * DWARF debug sections.
557 * Symbols in the DWARF debugging sections are relative to
558 * the beginning of the section so we begin them at 0.
559 */
560 #define DWARF_DEBUG \
561 /* DWARF 1 */ \
562 .debug 0 : { *(.debug) } \
563 .line 0 : { *(.line) } \
564 /* GNU DWARF 1 extensions */ \
565 .debug_srcinfo 0 : { *(.debug_srcinfo) } \
566 .debug_sfnames 0 : { *(.debug_sfnames) } \
567 /* DWARF 1.1 and DWARF 2 */ \
568 .debug_aranges 0 : { *(.debug_aranges) } \
569 .debug_pubnames 0 : { *(.debug_pubnames) } \
570 /* DWARF 2 */ \
571 .debug_info 0 : { *(.debug_info \
572 .gnu.linkonce.wi.*) } \
573 .debug_abbrev 0 : { *(.debug_abbrev) } \
574 .debug_line 0 : { *(.debug_line) } \
575 .debug_frame 0 : { *(.debug_frame) } \
576 .debug_str 0 : { *(.debug_str) } \
577 .debug_loc 0 : { *(.debug_loc) } \
578 .debug_macinfo 0 : { *(.debug_macinfo) } \
579 /* SGI/MIPS DWARF 2 extensions */ \
580 .debug_weaknames 0 : { *(.debug_weaknames) } \
581 .debug_funcnames 0 : { *(.debug_funcnames) } \
582 .debug_typenames 0 : { *(.debug_typenames) } \
583 .debug_varnames 0 : { *(.debug_varnames) } \
584
585 /* Stabs debugging sections. */
586 #define STABS_DEBUG \
587 .stab 0 : { *(.stab) } \
588 .stabstr 0 : { *(.stabstr) } \
589 .stab.excl 0 : { *(.stab.excl) } \
590 .stab.exclstr 0 : { *(.stab.exclstr) } \
591 .stab.index 0 : { *(.stab.index) } \
592 .stab.indexstr 0 : { *(.stab.indexstr) } \
593 .comment 0 : { *(.comment) }
594
595 #ifdef CONFIG_GENERIC_BUG
596 #define BUG_TABLE \
597 . = ALIGN(8); \
598 __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \
599 VMLINUX_SYMBOL(__start___bug_table) = .; \
600 *(__bug_table) \
601 VMLINUX_SYMBOL(__stop___bug_table) = .; \
602 }
603 #else
604 #define BUG_TABLE
605 #endif
606
607 #ifdef CONFIG_PM_TRACE
608 #define TRACEDATA \
609 . = ALIGN(4); \
610 .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \
611 VMLINUX_SYMBOL(__tracedata_start) = .; \
612 *(.tracedata) \
613 VMLINUX_SYMBOL(__tracedata_end) = .; \
614 }
615 #else
616 #define TRACEDATA
617 #endif
618
619 #define NOTES \
620 .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \
621 VMLINUX_SYMBOL(__start_notes) = .; \
622 *(.note.*) \
623 VMLINUX_SYMBOL(__stop_notes) = .; \
624 }
625
626 #define INIT_SETUP(initsetup_align) \
627 . = ALIGN(initsetup_align); \
628 VMLINUX_SYMBOL(__setup_start) = .; \
629 *(.init.setup) \
630 VMLINUX_SYMBOL(__setup_end) = .;
631
632 #define INIT_CALLS_LEVEL(level) \
633 VMLINUX_SYMBOL(__initcall##level##_start) = .; \
634 *(.initcall##level##.init) \
635 *(.initcall##level##s.init) \
636
637 #define INIT_CALLS \
638 VMLINUX_SYMBOL(__initcall_start) = .; \
639 *(.initcallearly.init) \
640 INIT_CALLS_LEVEL(0) \
641 INIT_CALLS_LEVEL(1) \
642 INIT_CALLS_LEVEL(2) \
643 INIT_CALLS_LEVEL(3) \
644 INIT_CALLS_LEVEL(4) \
645 INIT_CALLS_LEVEL(5) \
646 INIT_CALLS_LEVEL(rootfs) \
647 INIT_CALLS_LEVEL(6) \
648 INIT_CALLS_LEVEL(7) \
649 VMLINUX_SYMBOL(__initcall_end) = .;
650
651 #define CON_INITCALL \
652 VMLINUX_SYMBOL(__con_initcall_start) = .; \
653 *(.con_initcall.init) \
654 VMLINUX_SYMBOL(__con_initcall_end) = .;
655
656 #define SECURITY_INITCALL \
657 VMLINUX_SYMBOL(__security_initcall_start) = .; \
658 *(.security_initcall.init) \
659 VMLINUX_SYMBOL(__security_initcall_end) = .;
660
661 #ifdef CONFIG_BLK_DEV_INITRD
662 #define INIT_RAM_FS \
663 . = ALIGN(4); \
664 VMLINUX_SYMBOL(__initramfs_start) = .; \
665 *(.init.ramfs) \
666 . = ALIGN(8); \
667 *(.init.ramfs.info)
668 #else
669 #define INIT_RAM_FS
670 #endif
671
672 /*
673 * Default discarded sections.
674 *
675 * Some archs want to discard exit text/data at runtime rather than
676 * link time due to cross-section references such as alt instructions,
677 * bug table, eh_frame, etc. DISCARDS must be the last of output
678 * section definitions so that such archs put those in earlier section
679 * definitions.
680 */
681 #define DISCARDS \
682 /DISCARD/ : { \
683 EXIT_TEXT \
684 EXIT_DATA \
685 EXIT_CALL \
686 *(.discard) \
687 *(.discard.*) \
688 }
689
690 /**
691 * PERCPU_INPUT - the percpu input sections
692 * @cacheline: cacheline size
693 *
694 * The core percpu section names and core symbols which do not rely
695 * directly upon load addresses.
696 *
697 * @cacheline is used to align subsections to avoid false cacheline
698 * sharing between subsections for different purposes.
699 */
700 #define PERCPU_INPUT(cacheline) \
701 VMLINUX_SYMBOL(__per_cpu_start) = .; \
702 *(.data..percpu..first) \
703 . = ALIGN(PAGE_SIZE); \
704 *(.data..percpu..page_aligned) \
705 . = ALIGN(cacheline); \
706 *(.data..percpu..readmostly) \
707 . = ALIGN(cacheline); \
708 *(.data..percpu) \
709 *(.data..percpu..shared_aligned) \
710 VMLINUX_SYMBOL(__per_cpu_end) = .;
711
712 /**
713 * PERCPU_VADDR - define output section for percpu area
714 * @cacheline: cacheline size
715 * @vaddr: explicit base address (optional)
716 * @phdr: destination PHDR (optional)
717 *
718 * Macro which expands to output section for percpu area.
719 *
720 * @cacheline is used to align subsections to avoid false cacheline
721 * sharing between subsections for different purposes.
722 *
723 * If @vaddr is not blank, it specifies explicit base address and all
724 * percpu symbols will be offset from the given address. If blank,
725 * @vaddr always equals @laddr + LOAD_OFFSET.
726 *
727 * @phdr defines the output PHDR to use if not blank. Be warned that
728 * output PHDR is sticky. If @phdr is specified, the next output
729 * section in the linker script will go there too. @phdr should have
730 * a leading colon.
731 *
732 * Note that this macros defines __per_cpu_load as an absolute symbol.
733 * If there is no need to put the percpu section at a predetermined
734 * address, use PERCPU_SECTION.
735 */
736 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
737 VMLINUX_SYMBOL(__per_cpu_load) = .; \
738 .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
739 - LOAD_OFFSET) { \
740 PERCPU_INPUT(cacheline) \
741 } phdr \
742 . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
743
744 /**
745 * PERCPU_SECTION - define output section for percpu area, simple version
746 * @cacheline: cacheline size
747 *
748 * Align to PAGE_SIZE and outputs output section for percpu area. This
749 * macro doesn't manipulate @vaddr or @phdr and __per_cpu_load and
750 * __per_cpu_start will be identical.
751 *
752 * This macro is equivalent to ALIGN(PAGE_SIZE); PERCPU_VADDR(@cacheline,,)
753 * except that __per_cpu_load is defined as a relative symbol against
754 * .data..percpu which is required for relocatable x86_32 configuration.
755 */
756 #define PERCPU_SECTION(cacheline) \
757 . = ALIGN(PAGE_SIZE); \
758 .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \
759 VMLINUX_SYMBOL(__per_cpu_load) = .; \
760 PERCPU_INPUT(cacheline) \
761 }
762
763
764 /*
765 * Definition of the high level *_SECTION macros
766 * They will fit only a subset of the architectures
767 */
768
769
770 /*
771 * Writeable data.
772 * All sections are combined in a single .data section.
773 * The sections following CONSTRUCTORS are arranged so their
774 * typical alignment matches.
775 * A cacheline is typical/always less than a PAGE_SIZE so
776 * the sections that has this restriction (or similar)
777 * is located before the ones requiring PAGE_SIZE alignment.
778 * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which
779 * matches the requirement of PAGE_ALIGNED_DATA.
780 *
781 * use 0 as page_align if page_aligned data is not used */
782 #define RW_DATA_SECTION(cacheline, pagealigned, inittask) \
783 . = ALIGN(PAGE_SIZE); \
784 .data : AT(ADDR(.data) - LOAD_OFFSET) { \
785 INIT_TASK_DATA(inittask) \
786 NOSAVE_DATA \
787 PAGE_ALIGNED_DATA(pagealigned) \
788 CACHELINE_ALIGNED_DATA(cacheline) \
789 READ_MOSTLY_DATA(cacheline) \
790 DATA_DATA \
791 CONSTRUCTORS \
792 }
793
794 #define INIT_TEXT_SECTION(inittext_align) \
795 . = ALIGN(inittext_align); \
796 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { \
797 VMLINUX_SYMBOL(_sinittext) = .; \
798 INIT_TEXT \
799 VMLINUX_SYMBOL(_einittext) = .; \
800 }
801
802 #define INIT_DATA_SECTION(initsetup_align) \
803 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { \
804 INIT_DATA \
805 INIT_SETUP(initsetup_align) \
806 INIT_CALLS \
807 CON_INITCALL \
808 SECURITY_INITCALL \
809 INIT_RAM_FS \
810 }
811
812 #define BSS_SECTION(sbss_align, bss_align, stop_align) \
813 . = ALIGN(sbss_align); \
814 VMLINUX_SYMBOL(__bss_start) = .; \
815 SBSS(sbss_align) \
816 BSS(bss_align) \
817 . = ALIGN(stop_align); \
818 VMLINUX_SYMBOL(__bss_stop) = .;
This page took 0.05699 seconds and 5 git commands to generate.