asm-generic/vmlinux.lds.h: shuffle INIT_TASK* macro names in vmlinux.lds.h
[deliverable/linux.git] / include / asm-generic / vmlinux.lds.h
1 /*
2 * Helper macros to support writing architecture specific
3 * linker scripts.
4 *
5 * A minimal linker scripts has following content:
6 * [This is a sample, architectures may have special requiriements]
7 *
8 * OUTPUT_FORMAT(...)
9 * OUTPUT_ARCH(...)
10 * ENTRY(...)
11 * SECTIONS
12 * {
13 * . = START;
14 * __init_begin = .;
15 * HEAD_TEXT_SECTION
16 * INIT_TEXT_SECTION(PAGE_SIZE)
17 * INIT_DATA_SECTION(...)
18 * PERCPU(PAGE_SIZE)
19 * __init_end = .;
20 *
21 * _stext = .;
22 * TEXT_SECTION = 0
23 * _etext = .;
24 *
25 * _sdata = .;
26 * RO_DATA_SECTION(PAGE_SIZE)
27 * RW_DATA_SECTION(...)
28 * _edata = .;
29 *
30 * EXCEPTION_TABLE(...)
31 * NOTES
32 *
33 * __bss_start = .;
34 * BSS_SECTION(0, 0)
35 * __bss_stop = .;
36 * _end = .;
37 *
38 * /DISCARD/ : {
39 * EXIT_TEXT
40 * EXIT_DATA
41 * EXIT_CALL
42 * }
43 * STABS_DEBUG
44 * DWARF_DEBUG
45 * }
46 *
47 * [__init_begin, __init_end] is the init section that may be freed after init
48 * [_stext, _etext] is the text section
49 * [_sdata, _edata] is the data section
50 *
51 * Some of the included output section have their own set of constants.
52 * Examples are: [__initramfs_start, __initramfs_end] for initramfs and
53 * [__nosave_begin, __nosave_end] for the nosave data
54 */
55
56 #ifndef LOAD_OFFSET
57 #define LOAD_OFFSET 0
58 #endif
59
60 #ifndef VMLINUX_SYMBOL
61 #define VMLINUX_SYMBOL(_sym_) _sym_
62 #endif
63
64 /* Align . to a 8 byte boundary equals to maximum function alignment. */
65 #define ALIGN_FUNCTION() . = ALIGN(8)
66
67 /* The actual configuration determine if the init/exit sections
68 * are handled as text/data or they can be discarded (which
69 * often happens at runtime)
70 */
71 #ifdef CONFIG_HOTPLUG
72 #define DEV_KEEP(sec) *(.dev##sec)
73 #define DEV_DISCARD(sec)
74 #else
75 #define DEV_KEEP(sec)
76 #define DEV_DISCARD(sec) *(.dev##sec)
77 #endif
78
79 #ifdef CONFIG_HOTPLUG_CPU
80 #define CPU_KEEP(sec) *(.cpu##sec)
81 #define CPU_DISCARD(sec)
82 #else
83 #define CPU_KEEP(sec)
84 #define CPU_DISCARD(sec) *(.cpu##sec)
85 #endif
86
87 #if defined(CONFIG_MEMORY_HOTPLUG)
88 #define MEM_KEEP(sec) *(.mem##sec)
89 #define MEM_DISCARD(sec)
90 #else
91 #define MEM_KEEP(sec)
92 #define MEM_DISCARD(sec) *(.mem##sec)
93 #endif
94
95 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
96 #define MCOUNT_REC() VMLINUX_SYMBOL(__start_mcount_loc) = .; \
97 *(__mcount_loc) \
98 VMLINUX_SYMBOL(__stop_mcount_loc) = .;
99 #else
100 #define MCOUNT_REC()
101 #endif
102
103 #ifdef CONFIG_TRACE_BRANCH_PROFILING
104 #define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \
105 *(_ftrace_annotated_branch) \
106 VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .;
107 #else
108 #define LIKELY_PROFILE()
109 #endif
110
111 #ifdef CONFIG_PROFILE_ALL_BRANCHES
112 #define BRANCH_PROFILE() VMLINUX_SYMBOL(__start_branch_profile) = .; \
113 *(_ftrace_branch) \
114 VMLINUX_SYMBOL(__stop_branch_profile) = .;
115 #else
116 #define BRANCH_PROFILE()
117 #endif
118
119 #ifdef CONFIG_EVENT_TRACING
120 #define FTRACE_EVENTS() VMLINUX_SYMBOL(__start_ftrace_events) = .; \
121 *(_ftrace_events) \
122 VMLINUX_SYMBOL(__stop_ftrace_events) = .;
123 #else
124 #define FTRACE_EVENTS()
125 #endif
126
127 #ifdef CONFIG_TRACING
128 #define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \
129 *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \
130 VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .;
131 #else
132 #define TRACE_PRINTKS()
133 #endif
134
135 #ifdef CONFIG_FTRACE_SYSCALLS
136 #define TRACE_SYSCALLS() VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \
137 *(__syscalls_metadata) \
138 VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
139 #else
140 #define TRACE_SYSCALLS()
141 #endif
142
143 /* .data section */
144 #define DATA_DATA \
145 *(.data) \
146 *(.ref.data) \
147 DEV_KEEP(init.data) \
148 DEV_KEEP(exit.data) \
149 CPU_KEEP(init.data) \
150 CPU_KEEP(exit.data) \
151 MEM_KEEP(init.data) \
152 MEM_KEEP(exit.data) \
153 . = ALIGN(8); \
154 VMLINUX_SYMBOL(__start___markers) = .; \
155 *(__markers) \
156 VMLINUX_SYMBOL(__stop___markers) = .; \
157 . = ALIGN(32); \
158 VMLINUX_SYMBOL(__start___tracepoints) = .; \
159 *(__tracepoints) \
160 VMLINUX_SYMBOL(__stop___tracepoints) = .; \
161 /* implement dynamic printk debug */ \
162 . = ALIGN(8); \
163 VMLINUX_SYMBOL(__start___verbose) = .; \
164 *(__verbose) \
165 VMLINUX_SYMBOL(__stop___verbose) = .; \
166 LIKELY_PROFILE() \
167 BRANCH_PROFILE() \
168 TRACE_PRINTKS() \
169 FTRACE_EVENTS() \
170 TRACE_SYSCALLS()
171
172 /*
173 * Data section helpers
174 */
175 #define NOSAVE_DATA \
176 . = ALIGN(PAGE_SIZE); \
177 VMLINUX_SYMBOL(__nosave_begin) = .; \
178 *(.data.nosave) \
179 . = ALIGN(PAGE_SIZE); \
180 VMLINUX_SYMBOL(__nosave_end) = .;
181
182 #define PAGE_ALIGNED_DATA(page_align) \
183 . = ALIGN(page_align); \
184 *(.data.page_aligned)
185
186 #define READ_MOSTLY_DATA(align) \
187 . = ALIGN(align); \
188 *(.data.read_mostly)
189
190 #define CACHELINE_ALIGNED_DATA(align) \
191 . = ALIGN(align); \
192 *(.data.cacheline_aligned)
193
194 #define INIT_TASK_DATA(align) \
195 . = ALIGN(align); \
196 *(.data.init_task)
197
198 /*
199 * Read only Data
200 */
201 #define RO_DATA_SECTION(align) \
202 . = ALIGN((align)); \
203 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
204 VMLINUX_SYMBOL(__start_rodata) = .; \
205 *(.rodata) *(.rodata.*) \
206 *(__vermagic) /* Kernel version magic */ \
207 *(__markers_strings) /* Markers: strings */ \
208 *(__tracepoints_strings)/* Tracepoints: strings */ \
209 } \
210 \
211 .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \
212 *(.rodata1) \
213 } \
214 \
215 BUG_TABLE \
216 \
217 /* PCI quirks */ \
218 .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
219 VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
220 *(.pci_fixup_early) \
221 VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \
222 VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \
223 *(.pci_fixup_header) \
224 VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \
225 VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \
226 *(.pci_fixup_final) \
227 VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \
228 VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \
229 *(.pci_fixup_enable) \
230 VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \
231 VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \
232 *(.pci_fixup_resume) \
233 VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \
234 VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \
235 *(.pci_fixup_resume_early) \
236 VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \
237 VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \
238 *(.pci_fixup_suspend) \
239 VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \
240 } \
241 \
242 /* Built-in firmware blobs */ \
243 .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \
244 VMLINUX_SYMBOL(__start_builtin_fw) = .; \
245 *(.builtin_fw) \
246 VMLINUX_SYMBOL(__end_builtin_fw) = .; \
247 } \
248 \
249 /* RapidIO route ops */ \
250 .rio_route : AT(ADDR(.rio_route) - LOAD_OFFSET) { \
251 VMLINUX_SYMBOL(__start_rio_route_ops) = .; \
252 *(.rio_route_ops) \
253 VMLINUX_SYMBOL(__end_rio_route_ops) = .; \
254 } \
255 \
256 TRACEDATA \
257 \
258 /* Kernel symbol table: Normal symbols */ \
259 __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
260 VMLINUX_SYMBOL(__start___ksymtab) = .; \
261 *(__ksymtab) \
262 VMLINUX_SYMBOL(__stop___ksymtab) = .; \
263 } \
264 \
265 /* Kernel symbol table: GPL-only symbols */ \
266 __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
267 VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \
268 *(__ksymtab_gpl) \
269 VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \
270 } \
271 \
272 /* Kernel symbol table: Normal unused symbols */ \
273 __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \
274 VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \
275 *(__ksymtab_unused) \
276 VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \
277 } \
278 \
279 /* Kernel symbol table: GPL-only unused symbols */ \
280 __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
281 VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \
282 *(__ksymtab_unused_gpl) \
283 VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \
284 } \
285 \
286 /* Kernel symbol table: GPL-future-only symbols */ \
287 __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
288 VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \
289 *(__ksymtab_gpl_future) \
290 VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \
291 } \
292 \
293 /* Kernel symbol table: Normal symbols */ \
294 __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
295 VMLINUX_SYMBOL(__start___kcrctab) = .; \
296 *(__kcrctab) \
297 VMLINUX_SYMBOL(__stop___kcrctab) = .; \
298 } \
299 \
300 /* Kernel symbol table: GPL-only symbols */ \
301 __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
302 VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \
303 *(__kcrctab_gpl) \
304 VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \
305 } \
306 \
307 /* Kernel symbol table: Normal unused symbols */ \
308 __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \
309 VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \
310 *(__kcrctab_unused) \
311 VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \
312 } \
313 \
314 /* Kernel symbol table: GPL-only unused symbols */ \
315 __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
316 VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \
317 *(__kcrctab_unused_gpl) \
318 VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \
319 } \
320 \
321 /* Kernel symbol table: GPL-future-only symbols */ \
322 __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
323 VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \
324 *(__kcrctab_gpl_future) \
325 VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \
326 } \
327 \
328 /* Kernel symbol table: strings */ \
329 __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
330 *(__ksymtab_strings) \
331 } \
332 \
333 /* __*init sections */ \
334 __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \
335 *(.ref.rodata) \
336 MCOUNT_REC() \
337 DEV_KEEP(init.rodata) \
338 DEV_KEEP(exit.rodata) \
339 CPU_KEEP(init.rodata) \
340 CPU_KEEP(exit.rodata) \
341 MEM_KEEP(init.rodata) \
342 MEM_KEEP(exit.rodata) \
343 } \
344 \
345 /* Built-in module parameters. */ \
346 __param : AT(ADDR(__param) - LOAD_OFFSET) { \
347 VMLINUX_SYMBOL(__start___param) = .; \
348 *(__param) \
349 VMLINUX_SYMBOL(__stop___param) = .; \
350 . = ALIGN((align)); \
351 VMLINUX_SYMBOL(__end_rodata) = .; \
352 } \
353 . = ALIGN((align));
354
355 /* RODATA & RO_DATA provided for backward compatibility.
356 * All archs are supposed to use RO_DATA() */
357 #define RODATA RO_DATA_SECTION(4096)
358 #define RO_DATA(align) RO_DATA_SECTION(align)
359
360 #define SECURITY_INIT \
361 .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
362 VMLINUX_SYMBOL(__security_initcall_start) = .; \
363 *(.security_initcall.init) \
364 VMLINUX_SYMBOL(__security_initcall_end) = .; \
365 }
366
367 /* .text section. Map to function alignment to avoid address changes
368 * during second ld run in second ld pass when generating System.map */
369 #define TEXT_TEXT \
370 ALIGN_FUNCTION(); \
371 *(.text.hot) \
372 *(.text) \
373 *(.ref.text) \
374 DEV_KEEP(init.text) \
375 DEV_KEEP(exit.text) \
376 CPU_KEEP(init.text) \
377 CPU_KEEP(exit.text) \
378 MEM_KEEP(init.text) \
379 MEM_KEEP(exit.text) \
380 *(.text.unlikely)
381
382
383 /* sched.text is aling to function alignment to secure we have same
384 * address even at second ld pass when generating System.map */
385 #define SCHED_TEXT \
386 ALIGN_FUNCTION(); \
387 VMLINUX_SYMBOL(__sched_text_start) = .; \
388 *(.sched.text) \
389 VMLINUX_SYMBOL(__sched_text_end) = .;
390
391 /* spinlock.text is aling to function alignment to secure we have same
392 * address even at second ld pass when generating System.map */
393 #define LOCK_TEXT \
394 ALIGN_FUNCTION(); \
395 VMLINUX_SYMBOL(__lock_text_start) = .; \
396 *(.spinlock.text) \
397 VMLINUX_SYMBOL(__lock_text_end) = .;
398
399 #define KPROBES_TEXT \
400 ALIGN_FUNCTION(); \
401 VMLINUX_SYMBOL(__kprobes_text_start) = .; \
402 *(.kprobes.text) \
403 VMLINUX_SYMBOL(__kprobes_text_end) = .;
404
405 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
406 #define IRQENTRY_TEXT \
407 ALIGN_FUNCTION(); \
408 VMLINUX_SYMBOL(__irqentry_text_start) = .; \
409 *(.irqentry.text) \
410 VMLINUX_SYMBOL(__irqentry_text_end) = .;
411 #else
412 #define IRQENTRY_TEXT
413 #endif
414
415 /* Section used for early init (in .S files) */
416 #define HEAD_TEXT *(.head.text)
417
418 #define HEAD_TEXT_SECTION \
419 .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { \
420 HEAD_TEXT \
421 }
422
423 /*
424 * Exception table
425 */
426 #define EXCEPTION_TABLE(align) \
427 . = ALIGN(align); \
428 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \
429 VMLINUX_SYMBOL(__start___ex_table) = .; \
430 *(__ex_table) \
431 VMLINUX_SYMBOL(__stop___ex_table) = .; \
432 }
433
434 /*
435 * Init task
436 */
437 #define INIT_TASK_DATA_SECTION(align) \
438 . = ALIGN(align); \
439 .data.init_task : { \
440 INIT_TASK_DATA(align) \
441 }
442
443 #ifdef CONFIG_CONSTRUCTORS
444 #define KERNEL_CTORS() VMLINUX_SYMBOL(__ctors_start) = .; \
445 *(.ctors) \
446 VMLINUX_SYMBOL(__ctors_end) = .;
447 #else
448 #define KERNEL_CTORS()
449 #endif
450
451 /* init and exit section handling */
452 #define INIT_DATA \
453 *(.init.data) \
454 DEV_DISCARD(init.data) \
455 CPU_DISCARD(init.data) \
456 MEM_DISCARD(init.data) \
457 KERNEL_CTORS() \
458 *(.init.rodata) \
459 DEV_DISCARD(init.rodata) \
460 CPU_DISCARD(init.rodata) \
461 MEM_DISCARD(init.rodata)
462
463 #define INIT_TEXT \
464 *(.init.text) \
465 DEV_DISCARD(init.text) \
466 CPU_DISCARD(init.text) \
467 MEM_DISCARD(init.text)
468
469 #define EXIT_DATA \
470 *(.exit.data) \
471 DEV_DISCARD(exit.data) \
472 DEV_DISCARD(exit.rodata) \
473 CPU_DISCARD(exit.data) \
474 CPU_DISCARD(exit.rodata) \
475 MEM_DISCARD(exit.data) \
476 MEM_DISCARD(exit.rodata)
477
478 #define EXIT_TEXT \
479 *(.exit.text) \
480 DEV_DISCARD(exit.text) \
481 CPU_DISCARD(exit.text) \
482 MEM_DISCARD(exit.text)
483
484 #define EXIT_CALL \
485 *(.exitcall.exit)
486
487 /*
488 * bss (Block Started by Symbol) - uninitialized data
489 * zeroed during startup
490 */
491 #define SBSS \
492 .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \
493 *(.sbss) \
494 *(.scommon) \
495 }
496
497 #define BSS(bss_align) \
498 . = ALIGN(bss_align); \
499 .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \
500 VMLINUX_SYMBOL(__bss_start) = .; \
501 *(.bss.page_aligned) \
502 *(.dynbss) \
503 *(.bss) \
504 *(COMMON) \
505 VMLINUX_SYMBOL(__bss_stop) = .; \
506 }
507
508 /*
509 * DWARF debug sections.
510 * Symbols in the DWARF debugging sections are relative to
511 * the beginning of the section so we begin them at 0.
512 */
513 #define DWARF_DEBUG \
514 /* DWARF 1 */ \
515 .debug 0 : { *(.debug) } \
516 .line 0 : { *(.line) } \
517 /* GNU DWARF 1 extensions */ \
518 .debug_srcinfo 0 : { *(.debug_srcinfo) } \
519 .debug_sfnames 0 : { *(.debug_sfnames) } \
520 /* DWARF 1.1 and DWARF 2 */ \
521 .debug_aranges 0 : { *(.debug_aranges) } \
522 .debug_pubnames 0 : { *(.debug_pubnames) } \
523 /* DWARF 2 */ \
524 .debug_info 0 : { *(.debug_info \
525 .gnu.linkonce.wi.*) } \
526 .debug_abbrev 0 : { *(.debug_abbrev) } \
527 .debug_line 0 : { *(.debug_line) } \
528 .debug_frame 0 : { *(.debug_frame) } \
529 .debug_str 0 : { *(.debug_str) } \
530 .debug_loc 0 : { *(.debug_loc) } \
531 .debug_macinfo 0 : { *(.debug_macinfo) } \
532 /* SGI/MIPS DWARF 2 extensions */ \
533 .debug_weaknames 0 : { *(.debug_weaknames) } \
534 .debug_funcnames 0 : { *(.debug_funcnames) } \
535 .debug_typenames 0 : { *(.debug_typenames) } \
536 .debug_varnames 0 : { *(.debug_varnames) } \
537
538 /* Stabs debugging sections. */
539 #define STABS_DEBUG \
540 .stab 0 : { *(.stab) } \
541 .stabstr 0 : { *(.stabstr) } \
542 .stab.excl 0 : { *(.stab.excl) } \
543 .stab.exclstr 0 : { *(.stab.exclstr) } \
544 .stab.index 0 : { *(.stab.index) } \
545 .stab.indexstr 0 : { *(.stab.indexstr) } \
546 .comment 0 : { *(.comment) }
547
548 #ifdef CONFIG_GENERIC_BUG
549 #define BUG_TABLE \
550 . = ALIGN(8); \
551 __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \
552 VMLINUX_SYMBOL(__start___bug_table) = .; \
553 *(__bug_table) \
554 VMLINUX_SYMBOL(__stop___bug_table) = .; \
555 }
556 #else
557 #define BUG_TABLE
558 #endif
559
560 #ifdef CONFIG_PM_TRACE
561 #define TRACEDATA \
562 . = ALIGN(4); \
563 .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \
564 VMLINUX_SYMBOL(__tracedata_start) = .; \
565 *(.tracedata) \
566 VMLINUX_SYMBOL(__tracedata_end) = .; \
567 }
568 #else
569 #define TRACEDATA
570 #endif
571
572 #define NOTES \
573 .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \
574 VMLINUX_SYMBOL(__start_notes) = .; \
575 *(.note.*) \
576 VMLINUX_SYMBOL(__stop_notes) = .; \
577 }
578
579 #define INIT_SETUP(initsetup_align) \
580 . = ALIGN(initsetup_align); \
581 VMLINUX_SYMBOL(__setup_start) = .; \
582 *(.init.setup) \
583 VMLINUX_SYMBOL(__setup_end) = .;
584
585 #define INITCALLS \
586 *(.initcallearly.init) \
587 VMLINUX_SYMBOL(__early_initcall_end) = .; \
588 *(.initcall0.init) \
589 *(.initcall0s.init) \
590 *(.initcall1.init) \
591 *(.initcall1s.init) \
592 *(.initcall2.init) \
593 *(.initcall2s.init) \
594 *(.initcall3.init) \
595 *(.initcall3s.init) \
596 *(.initcall4.init) \
597 *(.initcall4s.init) \
598 *(.initcall5.init) \
599 *(.initcall5s.init) \
600 *(.initcallrootfs.init) \
601 *(.initcall6.init) \
602 *(.initcall6s.init) \
603 *(.initcall7.init) \
604 *(.initcall7s.init)
605
606 #define INIT_CALLS \
607 VMLINUX_SYMBOL(__initcall_start) = .; \
608 INITCALLS \
609 VMLINUX_SYMBOL(__initcall_end) = .;
610
611 #define CON_INITCALL \
612 VMLINUX_SYMBOL(__con_initcall_start) = .; \
613 *(.con_initcall.init) \
614 VMLINUX_SYMBOL(__con_initcall_end) = .;
615
616 #define SECURITY_INITCALL \
617 VMLINUX_SYMBOL(__security_initcall_start) = .; \
618 *(.security_initcall.init) \
619 VMLINUX_SYMBOL(__security_initcall_end) = .;
620
621 #ifdef CONFIG_BLK_DEV_INITRD
622 #define INIT_RAM_FS \
623 . = ALIGN(PAGE_SIZE); \
624 VMLINUX_SYMBOL(__initramfs_start) = .; \
625 *(.init.ramfs) \
626 VMLINUX_SYMBOL(__initramfs_end) = .;
627 #else
628 #define INIT_RAM_FS
629 #endif
630
631 /**
632 * PERCPU_VADDR - define output section for percpu area
633 * @vaddr: explicit base address (optional)
634 * @phdr: destination PHDR (optional)
635 *
636 * Macro which expands to output section for percpu area. If @vaddr
637 * is not blank, it specifies explicit base address and all percpu
638 * symbols will be offset from the given address. If blank, @vaddr
639 * always equals @laddr + LOAD_OFFSET.
640 *
641 * @phdr defines the output PHDR to use if not blank. Be warned that
642 * output PHDR is sticky. If @phdr is specified, the next output
643 * section in the linker script will go there too. @phdr should have
644 * a leading colon.
645 *
646 * Note that this macros defines __per_cpu_load as an absolute symbol.
647 * If there is no need to put the percpu section at a predetermined
648 * address, use PERCPU().
649 */
650 #define PERCPU_VADDR(vaddr, phdr) \
651 VMLINUX_SYMBOL(__per_cpu_load) = .; \
652 .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
653 - LOAD_OFFSET) { \
654 VMLINUX_SYMBOL(__per_cpu_start) = .; \
655 *(.data.percpu.first) \
656 *(.data.percpu.page_aligned) \
657 *(.data.percpu) \
658 *(.data.percpu.shared_aligned) \
659 VMLINUX_SYMBOL(__per_cpu_end) = .; \
660 } phdr \
661 . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
662
663 /**
664 * PERCPU - define output section for percpu area, simple version
665 * @align: required alignment
666 *
667 * Align to @align and outputs output section for percpu area. This
668 * macro doesn't maniuplate @vaddr or @phdr and __per_cpu_load and
669 * __per_cpu_start will be identical.
670 *
671 * This macro is equivalent to ALIGN(align); PERCPU_VADDR( , ) except
672 * that __per_cpu_load is defined as a relative symbol against
673 * .data.percpu which is required for relocatable x86_32
674 * configuration.
675 */
676 #define PERCPU(align) \
677 . = ALIGN(align); \
678 .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \
679 VMLINUX_SYMBOL(__per_cpu_load) = .; \
680 VMLINUX_SYMBOL(__per_cpu_start) = .; \
681 *(.data.percpu.first) \
682 *(.data.percpu.page_aligned) \
683 *(.data.percpu) \
684 *(.data.percpu.shared_aligned) \
685 VMLINUX_SYMBOL(__per_cpu_end) = .; \
686 }
687
688
689 /*
690 * Definition of the high level *_SECTION macros
691 * They will fit only a subset of the architectures
692 */
693
694
695 /*
696 * Writeable data.
697 * All sections are combined in a single .data section.
698 * The sections following CONSTRUCTORS are arranged so their
699 * typical alignment matches.
700 * A cacheline is typical/always less than a PAGE_SIZE so
701 * the sections that has this restriction (or similar)
702 * is located before the ones requiring PAGE_SIZE alignment.
703 * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which
704 * matches the requirment of PAGE_ALIGNED_DATA.
705 *
706 * use 0 as page_align if page_aligned data is not used */
707 #define RW_DATA_SECTION(cacheline, pagealigned, inittask) \
708 . = ALIGN(PAGE_SIZE); \
709 .data : AT(ADDR(.data) - LOAD_OFFSET) { \
710 INIT_TASK_DATA(inittask) \
711 CACHELINE_ALIGNED_DATA(cacheline) \
712 READ_MOSTLY_DATA(cacheline) \
713 DATA_DATA \
714 CONSTRUCTORS \
715 NOSAVE_DATA \
716 PAGE_ALIGNED_DATA(pagealigned) \
717 }
718
719 #define INIT_TEXT_SECTION(inittext_align) \
720 . = ALIGN(inittext_align); \
721 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { \
722 VMLINUX_SYMBOL(_sinittext) = .; \
723 INIT_TEXT \
724 VMLINUX_SYMBOL(_einittext) = .; \
725 }
726
727 #define INIT_DATA_SECTION(initsetup_align) \
728 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { \
729 INIT_DATA \
730 INIT_SETUP(initsetup_align) \
731 INIT_CALLS \
732 CON_INITCALL \
733 SECURITY_INITCALL \
734 INIT_RAM_FS \
735 }
736
737 #define BSS_SECTION(sbss_align, bss_align) \
738 SBSS \
739 BSS(bss_align) \
740 . = ALIGN(4);
741
This page took 0.0557 seconds and 6 git commands to generate.