clocksource: add common of_clksrc_init() function
[deliverable/linux.git] / include / asm-generic / vmlinux.lds.h
... / ...
CommitLineData
1/*
2 * Helper macros to support writing architecture specific
3 * linker scripts.
4 *
5 * A minimal linker scripts has following content:
6 * [This is a sample, architectures may have special requiriements]
7 *
8 * OUTPUT_FORMAT(...)
9 * OUTPUT_ARCH(...)
10 * ENTRY(...)
11 * SECTIONS
12 * {
13 * . = START;
14 * __init_begin = .;
15 * HEAD_TEXT_SECTION
16 * INIT_TEXT_SECTION(PAGE_SIZE)
17 * INIT_DATA_SECTION(...)
18 * PERCPU_SECTION(CACHELINE_SIZE)
19 * __init_end = .;
20 *
21 * _stext = .;
22 * TEXT_SECTION = 0
23 * _etext = .;
24 *
25 * _sdata = .;
26 * RO_DATA_SECTION(PAGE_SIZE)
27 * RW_DATA_SECTION(...)
28 * _edata = .;
29 *
30 * EXCEPTION_TABLE(...)
31 * NOTES
32 *
33 * BSS_SECTION(0, 0, 0)
34 * _end = .;
35 *
36 * STABS_DEBUG
37 * DWARF_DEBUG
38 *
39 * DISCARDS // must be the last
40 * }
41 *
42 * [__init_begin, __init_end] is the init section that may be freed after init
43 * [_stext, _etext] is the text section
44 * [_sdata, _edata] is the data section
45 *
46 * Some of the included output section have their own set of constants.
47 * Examples are: [__initramfs_start, __initramfs_end] for initramfs and
48 * [__nosave_begin, __nosave_end] for the nosave data
49 */
50
51#ifndef LOAD_OFFSET
52#define LOAD_OFFSET 0
53#endif
54
55#ifndef SYMBOL_PREFIX
56#define VMLINUX_SYMBOL(sym) sym
57#else
58#define PASTE2(x,y) x##y
59#define PASTE(x,y) PASTE2(x,y)
60#define VMLINUX_SYMBOL(sym) PASTE(SYMBOL_PREFIX, sym)
61#endif
62
63/* Align . to a 8 byte boundary equals to maximum function alignment. */
64#define ALIGN_FUNCTION() . = ALIGN(8)
65
66/*
67 * Align to a 32 byte boundary equal to the
68 * alignment gcc 4.5 uses for a struct
69 */
70#define STRUCT_ALIGNMENT 32
71#define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT)
72
73/* The actual configuration determine if the init/exit sections
74 * are handled as text/data or they can be discarded (which
75 * often happens at runtime)
76 */
77#ifdef CONFIG_HOTPLUG
78#define DEV_KEEP(sec) *(.dev##sec)
79#define DEV_DISCARD(sec)
80#else
81#define DEV_KEEP(sec)
82#define DEV_DISCARD(sec) *(.dev##sec)
83#endif
84
85#ifdef CONFIG_HOTPLUG_CPU
86#define CPU_KEEP(sec) *(.cpu##sec)
87#define CPU_DISCARD(sec)
88#else
89#define CPU_KEEP(sec)
90#define CPU_DISCARD(sec) *(.cpu##sec)
91#endif
92
93#if defined(CONFIG_MEMORY_HOTPLUG)
94#define MEM_KEEP(sec) *(.mem##sec)
95#define MEM_DISCARD(sec)
96#else
97#define MEM_KEEP(sec)
98#define MEM_DISCARD(sec) *(.mem##sec)
99#endif
100
101#ifdef CONFIG_FTRACE_MCOUNT_RECORD
102#define MCOUNT_REC() . = ALIGN(8); \
103 VMLINUX_SYMBOL(__start_mcount_loc) = .; \
104 *(__mcount_loc) \
105 VMLINUX_SYMBOL(__stop_mcount_loc) = .;
106#else
107#define MCOUNT_REC()
108#endif
109
110#ifdef CONFIG_TRACE_BRANCH_PROFILING
111#define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \
112 *(_ftrace_annotated_branch) \
113 VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .;
114#else
115#define LIKELY_PROFILE()
116#endif
117
118#ifdef CONFIG_PROFILE_ALL_BRANCHES
119#define BRANCH_PROFILE() VMLINUX_SYMBOL(__start_branch_profile) = .; \
120 *(_ftrace_branch) \
121 VMLINUX_SYMBOL(__stop_branch_profile) = .;
122#else
123#define BRANCH_PROFILE()
124#endif
125
126#ifdef CONFIG_EVENT_TRACING
127#define FTRACE_EVENTS() . = ALIGN(8); \
128 VMLINUX_SYMBOL(__start_ftrace_events) = .; \
129 *(_ftrace_events) \
130 VMLINUX_SYMBOL(__stop_ftrace_events) = .;
131#else
132#define FTRACE_EVENTS()
133#endif
134
135#ifdef CONFIG_TRACING
136#define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \
137 *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \
138 VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .;
139#else
140#define TRACE_PRINTKS()
141#endif
142
143#ifdef CONFIG_FTRACE_SYSCALLS
144#define TRACE_SYSCALLS() . = ALIGN(8); \
145 VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \
146 *(__syscalls_metadata) \
147 VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
148#else
149#define TRACE_SYSCALLS()
150#endif
151
152#ifdef CONFIG_CLKSRC_OF
153#define CLKSRC_OF_TABLES() . = ALIGN(8); \
154 VMLINUX_SYMBOL(__clksrc_of_table) = .; \
155 *(__clksrc_of_table) \
156 *(__clksrc_of_table_end)
157#else
158#define CLKSRC_OF_TABLES()
159#endif
160
161#define KERNEL_DTB() \
162 STRUCT_ALIGN(); \
163 VMLINUX_SYMBOL(__dtb_start) = .; \
164 *(.dtb.init.rodata) \
165 VMLINUX_SYMBOL(__dtb_end) = .;
166
167/* .data section */
168#define DATA_DATA \
169 *(.data) \
170 *(.ref.data) \
171 *(.data..shared_aligned) /* percpu related */ \
172 DEV_KEEP(init.data) \
173 DEV_KEEP(exit.data) \
174 CPU_KEEP(init.data) \
175 CPU_KEEP(exit.data) \
176 MEM_KEEP(init.data) \
177 MEM_KEEP(exit.data) \
178 *(.data.unlikely) \
179 STRUCT_ALIGN(); \
180 *(__tracepoints) \
181 /* implement dynamic printk debug */ \
182 . = ALIGN(8); \
183 VMLINUX_SYMBOL(__start___jump_table) = .; \
184 *(__jump_table) \
185 VMLINUX_SYMBOL(__stop___jump_table) = .; \
186 . = ALIGN(8); \
187 VMLINUX_SYMBOL(__start___verbose) = .; \
188 *(__verbose) \
189 VMLINUX_SYMBOL(__stop___verbose) = .; \
190 LIKELY_PROFILE() \
191 BRANCH_PROFILE() \
192 TRACE_PRINTKS()
193
194/*
195 * Data section helpers
196 */
197#define NOSAVE_DATA \
198 . = ALIGN(PAGE_SIZE); \
199 VMLINUX_SYMBOL(__nosave_begin) = .; \
200 *(.data..nosave) \
201 . = ALIGN(PAGE_SIZE); \
202 VMLINUX_SYMBOL(__nosave_end) = .;
203
204#define PAGE_ALIGNED_DATA(page_align) \
205 . = ALIGN(page_align); \
206 *(.data..page_aligned)
207
208#define READ_MOSTLY_DATA(align) \
209 . = ALIGN(align); \
210 *(.data..read_mostly) \
211 . = ALIGN(align);
212
213#define CACHELINE_ALIGNED_DATA(align) \
214 . = ALIGN(align); \
215 *(.data..cacheline_aligned)
216
217#define INIT_TASK_DATA(align) \
218 . = ALIGN(align); \
219 *(.data..init_task)
220
221/*
222 * Read only Data
223 */
224#define RO_DATA_SECTION(align) \
225 . = ALIGN((align)); \
226 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
227 VMLINUX_SYMBOL(__start_rodata) = .; \
228 *(.rodata) *(.rodata.*) \
229 *(__vermagic) /* Kernel version magic */ \
230 . = ALIGN(8); \
231 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
232 *(__tracepoints_ptrs) /* Tracepoints: pointer array */\
233 VMLINUX_SYMBOL(__stop___tracepoints_ptrs) = .; \
234 *(__tracepoints_strings)/* Tracepoints: strings */ \
235 } \
236 \
237 .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \
238 *(.rodata1) \
239 } \
240 \
241 BUG_TABLE \
242 \
243 /* PCI quirks */ \
244 .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
245 VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
246 *(.pci_fixup_early) \
247 VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \
248 VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \
249 *(.pci_fixup_header) \
250 VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \
251 VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \
252 *(.pci_fixup_final) \
253 VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \
254 VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \
255 *(.pci_fixup_enable) \
256 VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \
257 VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \
258 *(.pci_fixup_resume) \
259 VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \
260 VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \
261 *(.pci_fixup_resume_early) \
262 VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \
263 VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \
264 *(.pci_fixup_suspend) \
265 VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \
266 } \
267 \
268 /* Built-in firmware blobs */ \
269 .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \
270 VMLINUX_SYMBOL(__start_builtin_fw) = .; \
271 *(.builtin_fw) \
272 VMLINUX_SYMBOL(__end_builtin_fw) = .; \
273 } \
274 \
275 /* RapidIO route ops */ \
276 .rio_ops : AT(ADDR(.rio_ops) - LOAD_OFFSET) { \
277 VMLINUX_SYMBOL(__start_rio_switch_ops) = .; \
278 *(.rio_switch_ops) \
279 VMLINUX_SYMBOL(__end_rio_switch_ops) = .; \
280 } \
281 \
282 TRACEDATA \
283 \
284 /* Kernel symbol table: Normal symbols */ \
285 __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
286 VMLINUX_SYMBOL(__start___ksymtab) = .; \
287 *(SORT(___ksymtab+*)) \
288 VMLINUX_SYMBOL(__stop___ksymtab) = .; \
289 } \
290 \
291 /* Kernel symbol table: GPL-only symbols */ \
292 __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
293 VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \
294 *(SORT(___ksymtab_gpl+*)) \
295 VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \
296 } \
297 \
298 /* Kernel symbol table: Normal unused symbols */ \
299 __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \
300 VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \
301 *(SORT(___ksymtab_unused+*)) \
302 VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \
303 } \
304 \
305 /* Kernel symbol table: GPL-only unused symbols */ \
306 __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
307 VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \
308 *(SORT(___ksymtab_unused_gpl+*)) \
309 VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \
310 } \
311 \
312 /* Kernel symbol table: GPL-future-only symbols */ \
313 __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
314 VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \
315 *(SORT(___ksymtab_gpl_future+*)) \
316 VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \
317 } \
318 \
319 /* Kernel symbol table: Normal symbols */ \
320 __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
321 VMLINUX_SYMBOL(__start___kcrctab) = .; \
322 *(SORT(___kcrctab+*)) \
323 VMLINUX_SYMBOL(__stop___kcrctab) = .; \
324 } \
325 \
326 /* Kernel symbol table: GPL-only symbols */ \
327 __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
328 VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \
329 *(SORT(___kcrctab_gpl+*)) \
330 VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \
331 } \
332 \
333 /* Kernel symbol table: Normal unused symbols */ \
334 __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \
335 VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \
336 *(SORT(___kcrctab_unused+*)) \
337 VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \
338 } \
339 \
340 /* Kernel symbol table: GPL-only unused symbols */ \
341 __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
342 VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \
343 *(SORT(___kcrctab_unused_gpl+*)) \
344 VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \
345 } \
346 \
347 /* Kernel symbol table: GPL-future-only symbols */ \
348 __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
349 VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \
350 *(SORT(___kcrctab_gpl_future+*)) \
351 VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \
352 } \
353 \
354 /* Kernel symbol table: strings */ \
355 __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
356 *(__ksymtab_strings) \
357 } \
358 \
359 /* __*init sections */ \
360 __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \
361 *(.ref.rodata) \
362 DEV_KEEP(init.rodata) \
363 DEV_KEEP(exit.rodata) \
364 CPU_KEEP(init.rodata) \
365 CPU_KEEP(exit.rodata) \
366 MEM_KEEP(init.rodata) \
367 MEM_KEEP(exit.rodata) \
368 } \
369 \
370 /* Built-in module parameters. */ \
371 __param : AT(ADDR(__param) - LOAD_OFFSET) { \
372 VMLINUX_SYMBOL(__start___param) = .; \
373 *(__param) \
374 VMLINUX_SYMBOL(__stop___param) = .; \
375 } \
376 \
377 /* Built-in module versions. */ \
378 __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \
379 VMLINUX_SYMBOL(__start___modver) = .; \
380 *(__modver) \
381 VMLINUX_SYMBOL(__stop___modver) = .; \
382 . = ALIGN((align)); \
383 VMLINUX_SYMBOL(__end_rodata) = .; \
384 } \
385 . = ALIGN((align));
386
387/* RODATA & RO_DATA provided for backward compatibility.
388 * All archs are supposed to use RO_DATA() */
389#define RODATA RO_DATA_SECTION(4096)
390#define RO_DATA(align) RO_DATA_SECTION(align)
391
392#define SECURITY_INIT \
393 .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
394 VMLINUX_SYMBOL(__security_initcall_start) = .; \
395 *(.security_initcall.init) \
396 VMLINUX_SYMBOL(__security_initcall_end) = .; \
397 }
398
399/* .text section. Map to function alignment to avoid address changes
400 * during second ld run in second ld pass when generating System.map */
401#define TEXT_TEXT \
402 ALIGN_FUNCTION(); \
403 *(.text.hot) \
404 *(.text) \
405 *(.ref.text) \
406 DEV_KEEP(init.text) \
407 DEV_KEEP(exit.text) \
408 CPU_KEEP(init.text) \
409 CPU_KEEP(exit.text) \
410 MEM_KEEP(init.text) \
411 MEM_KEEP(exit.text) \
412 *(.text.unlikely)
413
414
415/* sched.text is aling to function alignment to secure we have same
416 * address even at second ld pass when generating System.map */
417#define SCHED_TEXT \
418 ALIGN_FUNCTION(); \
419 VMLINUX_SYMBOL(__sched_text_start) = .; \
420 *(.sched.text) \
421 VMLINUX_SYMBOL(__sched_text_end) = .;
422
423/* spinlock.text is aling to function alignment to secure we have same
424 * address even at second ld pass when generating System.map */
425#define LOCK_TEXT \
426 ALIGN_FUNCTION(); \
427 VMLINUX_SYMBOL(__lock_text_start) = .; \
428 *(.spinlock.text) \
429 VMLINUX_SYMBOL(__lock_text_end) = .;
430
431#define KPROBES_TEXT \
432 ALIGN_FUNCTION(); \
433 VMLINUX_SYMBOL(__kprobes_text_start) = .; \
434 *(.kprobes.text) \
435 VMLINUX_SYMBOL(__kprobes_text_end) = .;
436
437#define ENTRY_TEXT \
438 ALIGN_FUNCTION(); \
439 VMLINUX_SYMBOL(__entry_text_start) = .; \
440 *(.entry.text) \
441 VMLINUX_SYMBOL(__entry_text_end) = .;
442
443#ifdef CONFIG_FUNCTION_GRAPH_TRACER
444#define IRQENTRY_TEXT \
445 ALIGN_FUNCTION(); \
446 VMLINUX_SYMBOL(__irqentry_text_start) = .; \
447 *(.irqentry.text) \
448 VMLINUX_SYMBOL(__irqentry_text_end) = .;
449#else
450#define IRQENTRY_TEXT
451#endif
452
453/* Section used for early init (in .S files) */
454#define HEAD_TEXT *(.head.text)
455
456#define HEAD_TEXT_SECTION \
457 .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { \
458 HEAD_TEXT \
459 }
460
461/*
462 * Exception table
463 */
464#define EXCEPTION_TABLE(align) \
465 . = ALIGN(align); \
466 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \
467 VMLINUX_SYMBOL(__start___ex_table) = .; \
468 *(__ex_table) \
469 VMLINUX_SYMBOL(__stop___ex_table) = .; \
470 }
471
472/*
473 * Init task
474 */
475#define INIT_TASK_DATA_SECTION(align) \
476 . = ALIGN(align); \
477 .data..init_task : AT(ADDR(.data..init_task) - LOAD_OFFSET) { \
478 INIT_TASK_DATA(align) \
479 }
480
481#ifdef CONFIG_CONSTRUCTORS
482#define KERNEL_CTORS() . = ALIGN(8); \
483 VMLINUX_SYMBOL(__ctors_start) = .; \
484 *(.ctors) \
485 VMLINUX_SYMBOL(__ctors_end) = .;
486#else
487#define KERNEL_CTORS()
488#endif
489
490/* init and exit section handling */
491#define INIT_DATA \
492 *(.init.data) \
493 DEV_DISCARD(init.data) \
494 CPU_DISCARD(init.data) \
495 MEM_DISCARD(init.data) \
496 KERNEL_CTORS() \
497 MCOUNT_REC() \
498 *(.init.rodata) \
499 FTRACE_EVENTS() \
500 TRACE_SYSCALLS() \
501 DEV_DISCARD(init.rodata) \
502 CPU_DISCARD(init.rodata) \
503 MEM_DISCARD(init.rodata) \
504 CLKSRC_OF_TABLES() \
505 KERNEL_DTB()
506
507#define INIT_TEXT \
508 *(.init.text) \
509 DEV_DISCARD(init.text) \
510 CPU_DISCARD(init.text) \
511 MEM_DISCARD(init.text)
512
513#define EXIT_DATA \
514 *(.exit.data) \
515 DEV_DISCARD(exit.data) \
516 DEV_DISCARD(exit.rodata) \
517 CPU_DISCARD(exit.data) \
518 CPU_DISCARD(exit.rodata) \
519 MEM_DISCARD(exit.data) \
520 MEM_DISCARD(exit.rodata)
521
522#define EXIT_TEXT \
523 *(.exit.text) \
524 DEV_DISCARD(exit.text) \
525 CPU_DISCARD(exit.text) \
526 MEM_DISCARD(exit.text)
527
528#define EXIT_CALL \
529 *(.exitcall.exit)
530
531/*
532 * bss (Block Started by Symbol) - uninitialized data
533 * zeroed during startup
534 */
535#define SBSS(sbss_align) \
536 . = ALIGN(sbss_align); \
537 .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \
538 *(.sbss) \
539 *(.scommon) \
540 }
541
542/*
543 * Allow archectures to redefine BSS_FIRST_SECTIONS to add extra
544 * sections to the front of bss.
545 */
546#ifndef BSS_FIRST_SECTIONS
547#define BSS_FIRST_SECTIONS
548#endif
549
550#define BSS(bss_align) \
551 . = ALIGN(bss_align); \
552 .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \
553 BSS_FIRST_SECTIONS \
554 *(.bss..page_aligned) \
555 *(.dynbss) \
556 *(.bss) \
557 *(COMMON) \
558 }
559
560/*
561 * DWARF debug sections.
562 * Symbols in the DWARF debugging sections are relative to
563 * the beginning of the section so we begin them at 0.
564 */
565#define DWARF_DEBUG \
566 /* DWARF 1 */ \
567 .debug 0 : { *(.debug) } \
568 .line 0 : { *(.line) } \
569 /* GNU DWARF 1 extensions */ \
570 .debug_srcinfo 0 : { *(.debug_srcinfo) } \
571 .debug_sfnames 0 : { *(.debug_sfnames) } \
572 /* DWARF 1.1 and DWARF 2 */ \
573 .debug_aranges 0 : { *(.debug_aranges) } \
574 .debug_pubnames 0 : { *(.debug_pubnames) } \
575 /* DWARF 2 */ \
576 .debug_info 0 : { *(.debug_info \
577 .gnu.linkonce.wi.*) } \
578 .debug_abbrev 0 : { *(.debug_abbrev) } \
579 .debug_line 0 : { *(.debug_line) } \
580 .debug_frame 0 : { *(.debug_frame) } \
581 .debug_str 0 : { *(.debug_str) } \
582 .debug_loc 0 : { *(.debug_loc) } \
583 .debug_macinfo 0 : { *(.debug_macinfo) } \
584 /* SGI/MIPS DWARF 2 extensions */ \
585 .debug_weaknames 0 : { *(.debug_weaknames) } \
586 .debug_funcnames 0 : { *(.debug_funcnames) } \
587 .debug_typenames 0 : { *(.debug_typenames) } \
588 .debug_varnames 0 : { *(.debug_varnames) } \
589
590 /* Stabs debugging sections. */
591#define STABS_DEBUG \
592 .stab 0 : { *(.stab) } \
593 .stabstr 0 : { *(.stabstr) } \
594 .stab.excl 0 : { *(.stab.excl) } \
595 .stab.exclstr 0 : { *(.stab.exclstr) } \
596 .stab.index 0 : { *(.stab.index) } \
597 .stab.indexstr 0 : { *(.stab.indexstr) } \
598 .comment 0 : { *(.comment) }
599
600#ifdef CONFIG_GENERIC_BUG
601#define BUG_TABLE \
602 . = ALIGN(8); \
603 __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \
604 VMLINUX_SYMBOL(__start___bug_table) = .; \
605 *(__bug_table) \
606 VMLINUX_SYMBOL(__stop___bug_table) = .; \
607 }
608#else
609#define BUG_TABLE
610#endif
611
612#ifdef CONFIG_PM_TRACE
613#define TRACEDATA \
614 . = ALIGN(4); \
615 .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \
616 VMLINUX_SYMBOL(__tracedata_start) = .; \
617 *(.tracedata) \
618 VMLINUX_SYMBOL(__tracedata_end) = .; \
619 }
620#else
621#define TRACEDATA
622#endif
623
624#define NOTES \
625 .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \
626 VMLINUX_SYMBOL(__start_notes) = .; \
627 *(.note.*) \
628 VMLINUX_SYMBOL(__stop_notes) = .; \
629 }
630
631#define INIT_SETUP(initsetup_align) \
632 . = ALIGN(initsetup_align); \
633 VMLINUX_SYMBOL(__setup_start) = .; \
634 *(.init.setup) \
635 VMLINUX_SYMBOL(__setup_end) = .;
636
637#define INIT_CALLS_LEVEL(level) \
638 VMLINUX_SYMBOL(__initcall##level##_start) = .; \
639 *(.initcall##level##.init) \
640 *(.initcall##level##s.init) \
641
642#define INIT_CALLS \
643 VMLINUX_SYMBOL(__initcall_start) = .; \
644 *(.initcallearly.init) \
645 INIT_CALLS_LEVEL(0) \
646 INIT_CALLS_LEVEL(1) \
647 INIT_CALLS_LEVEL(2) \
648 INIT_CALLS_LEVEL(3) \
649 INIT_CALLS_LEVEL(4) \
650 INIT_CALLS_LEVEL(5) \
651 INIT_CALLS_LEVEL(rootfs) \
652 INIT_CALLS_LEVEL(6) \
653 INIT_CALLS_LEVEL(7) \
654 VMLINUX_SYMBOL(__initcall_end) = .;
655
656#define CON_INITCALL \
657 VMLINUX_SYMBOL(__con_initcall_start) = .; \
658 *(.con_initcall.init) \
659 VMLINUX_SYMBOL(__con_initcall_end) = .;
660
661#define SECURITY_INITCALL \
662 VMLINUX_SYMBOL(__security_initcall_start) = .; \
663 *(.security_initcall.init) \
664 VMLINUX_SYMBOL(__security_initcall_end) = .;
665
666#ifdef CONFIG_BLK_DEV_INITRD
667#define INIT_RAM_FS \
668 . = ALIGN(4); \
669 VMLINUX_SYMBOL(__initramfs_start) = .; \
670 *(.init.ramfs) \
671 . = ALIGN(8); \
672 *(.init.ramfs.info)
673#else
674#define INIT_RAM_FS
675#endif
676
677/*
678 * Default discarded sections.
679 *
680 * Some archs want to discard exit text/data at runtime rather than
681 * link time due to cross-section references such as alt instructions,
682 * bug table, eh_frame, etc. DISCARDS must be the last of output
683 * section definitions so that such archs put those in earlier section
684 * definitions.
685 */
686#define DISCARDS \
687 /DISCARD/ : { \
688 EXIT_TEXT \
689 EXIT_DATA \
690 EXIT_CALL \
691 *(.discard) \
692 *(.discard.*) \
693 }
694
695/**
696 * PERCPU_INPUT - the percpu input sections
697 * @cacheline: cacheline size
698 *
699 * The core percpu section names and core symbols which do not rely
700 * directly upon load addresses.
701 *
702 * @cacheline is used to align subsections to avoid false cacheline
703 * sharing between subsections for different purposes.
704 */
705#define PERCPU_INPUT(cacheline) \
706 VMLINUX_SYMBOL(__per_cpu_start) = .; \
707 *(.data..percpu..first) \
708 . = ALIGN(PAGE_SIZE); \
709 *(.data..percpu..page_aligned) \
710 . = ALIGN(cacheline); \
711 *(.data..percpu..readmostly) \
712 . = ALIGN(cacheline); \
713 *(.data..percpu) \
714 *(.data..percpu..shared_aligned) \
715 VMLINUX_SYMBOL(__per_cpu_end) = .;
716
717/**
718 * PERCPU_VADDR - define output section for percpu area
719 * @cacheline: cacheline size
720 * @vaddr: explicit base address (optional)
721 * @phdr: destination PHDR (optional)
722 *
723 * Macro which expands to output section for percpu area.
724 *
725 * @cacheline is used to align subsections to avoid false cacheline
726 * sharing between subsections for different purposes.
727 *
728 * If @vaddr is not blank, it specifies explicit base address and all
729 * percpu symbols will be offset from the given address. If blank,
730 * @vaddr always equals @laddr + LOAD_OFFSET.
731 *
732 * @phdr defines the output PHDR to use if not blank. Be warned that
733 * output PHDR is sticky. If @phdr is specified, the next output
734 * section in the linker script will go there too. @phdr should have
735 * a leading colon.
736 *
737 * Note that this macros defines __per_cpu_load as an absolute symbol.
738 * If there is no need to put the percpu section at a predetermined
739 * address, use PERCPU_SECTION.
740 */
741#define PERCPU_VADDR(cacheline, vaddr, phdr) \
742 VMLINUX_SYMBOL(__per_cpu_load) = .; \
743 .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
744 - LOAD_OFFSET) { \
745 PERCPU_INPUT(cacheline) \
746 } phdr \
747 . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
748
749/**
750 * PERCPU_SECTION - define output section for percpu area, simple version
751 * @cacheline: cacheline size
752 *
753 * Align to PAGE_SIZE and outputs output section for percpu area. This
754 * macro doesn't manipulate @vaddr or @phdr and __per_cpu_load and
755 * __per_cpu_start will be identical.
756 *
757 * This macro is equivalent to ALIGN(PAGE_SIZE); PERCPU_VADDR(@cacheline,,)
758 * except that __per_cpu_load is defined as a relative symbol against
759 * .data..percpu which is required for relocatable x86_32 configuration.
760 */
761#define PERCPU_SECTION(cacheline) \
762 . = ALIGN(PAGE_SIZE); \
763 .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \
764 VMLINUX_SYMBOL(__per_cpu_load) = .; \
765 PERCPU_INPUT(cacheline) \
766 }
767
768
769/*
770 * Definition of the high level *_SECTION macros
771 * They will fit only a subset of the architectures
772 */
773
774
775/*
776 * Writeable data.
777 * All sections are combined in a single .data section.
778 * The sections following CONSTRUCTORS are arranged so their
779 * typical alignment matches.
780 * A cacheline is typical/always less than a PAGE_SIZE so
781 * the sections that has this restriction (or similar)
782 * is located before the ones requiring PAGE_SIZE alignment.
783 * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which
784 * matches the requirement of PAGE_ALIGNED_DATA.
785 *
786 * use 0 as page_align if page_aligned data is not used */
787#define RW_DATA_SECTION(cacheline, pagealigned, inittask) \
788 . = ALIGN(PAGE_SIZE); \
789 .data : AT(ADDR(.data) - LOAD_OFFSET) { \
790 INIT_TASK_DATA(inittask) \
791 NOSAVE_DATA \
792 PAGE_ALIGNED_DATA(pagealigned) \
793 CACHELINE_ALIGNED_DATA(cacheline) \
794 READ_MOSTLY_DATA(cacheline) \
795 DATA_DATA \
796 CONSTRUCTORS \
797 }
798
799#define INIT_TEXT_SECTION(inittext_align) \
800 . = ALIGN(inittext_align); \
801 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { \
802 VMLINUX_SYMBOL(_sinittext) = .; \
803 INIT_TEXT \
804 VMLINUX_SYMBOL(_einittext) = .; \
805 }
806
807#define INIT_DATA_SECTION(initsetup_align) \
808 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { \
809 INIT_DATA \
810 INIT_SETUP(initsetup_align) \
811 INIT_CALLS \
812 CON_INITCALL \
813 SECURITY_INITCALL \
814 INIT_RAM_FS \
815 }
816
817#define BSS_SECTION(sbss_align, bss_align, stop_align) \
818 . = ALIGN(sbss_align); \
819 VMLINUX_SYMBOL(__bss_start) = .; \
820 SBSS(sbss_align) \
821 BSS(bss_align) \
822 . = ALIGN(stop_align); \
823 VMLINUX_SYMBOL(__bss_stop) = .;
This page took 0.029791 seconds and 5 git commands to generate.