parisc: add ftrace (function and graph tracer) functionality
[deliverable/linux.git] / arch / parisc / kernel / vmlinux.lds.S
1 /* Kernel link layout for various "sections"
2 *
3 * Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org>
4 * Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
5 * Copyright (C) 2000 John Marvin <jsm at parisc-linux.org>
6 * Copyright (C) 2000 Michael Ang <mang with subcarrier.org>
7 * Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
8 * Copyright (C) 2003 James Bottomley <jejb with parisc-linux.org>
9 * Copyright (C) 2006 Helge Deller <deller@gmx.de>
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */
26 #include <asm-generic/vmlinux.lds.h>
27 /* needed for the processor specific cache alignment size */
28 #include <asm/cache.h>
29 #include <asm/page.h>
30 #include <asm/asm-offsets.h>
31
32 /* ld script to make hppa Linux kernel */
33 #ifndef CONFIG_64BIT
34 OUTPUT_FORMAT("elf32-hppa-linux")
35 OUTPUT_ARCH(hppa)
36 #else
37 OUTPUT_FORMAT("elf64-hppa-linux")
38 OUTPUT_ARCH(hppa:hppa2.0w)
39 #endif
40
41 ENTRY(_stext)
42 #ifndef CONFIG_64BIT
43 jiffies = jiffies_64 + 4;
44 #else
45 jiffies = jiffies_64;
46 #endif
47 SECTIONS
48 {
49 . = KERNEL_BINARY_TEXT_START;
50
51 _text = .; /* Text and read-only data */
52 .text ALIGN(16) : {
53 HEAD_TEXT
54 TEXT_TEXT
55 SCHED_TEXT
56 LOCK_TEXT
57 KPROBES_TEXT
58 IRQENTRY_TEXT
59 *(.text.do_softirq)
60 *(.text.sys_exit)
61 *(.text.do_sigaltstack)
62 *(.text.do_fork)
63 *(.text.*)
64 *(.fixup)
65 *(.lock.text) /* out-of-line lock text */
66 *(.gnu.warning)
67 } = 0
68 /* End of text section */
69 _etext = .;
70
71 RODATA
72
73 /* writeable */
74 /* Make sure this is page aligned so
75 * that we can properly leave these
76 * as writable
77 */
78 . = ALIGN(PAGE_SIZE);
79 data_start = .;
80 . = ALIGN(16);
81 /* Exception table */
82 __ex_table : {
83 __start___ex_table = .;
84 *(__ex_table)
85 __stop___ex_table = .;
86 }
87
88 NOTES
89
90 /* unwind info */
91 .PARISC.unwind : {
92 __start___unwind = .;
93 *(.PARISC.unwind)
94 __stop___unwind = .;
95 }
96
97 /* rarely changed data like cpu maps */
98 . = ALIGN(16);
99 .data.read_mostly : {
100 *(.data.read_mostly)
101 }
102
103 . = ALIGN(L1_CACHE_BYTES);
104 /* Data */
105 .data : {
106 DATA_DATA
107 CONSTRUCTORS
108 }
109
110 . = ALIGN(L1_CACHE_BYTES);
111 .data.cacheline_aligned : {
112 *(.data.cacheline_aligned)
113 }
114
115 /* PA-RISC locks requires 16-byte alignment */
116 . = ALIGN(16);
117 .data.lock_aligned : {
118 *(.data.lock_aligned)
119 }
120
121 /* nosave data is really only used for software suspend...it's here
122 * just in case we ever implement it
123 */
124 . = ALIGN(PAGE_SIZE);
125 __nosave_begin = .;
126 .data_nosave : {
127 *(.data.nosave)
128 }
129 . = ALIGN(PAGE_SIZE);
130 __nosave_end = .;
131
132 /* End of data section */
133 _edata = .;
134
135 /* BSS */
136 __bss_start = .;
137 /* page table entries need to be PAGE_SIZE aligned */
138 . = ALIGN(PAGE_SIZE);
139 .data.vmpages : {
140 *(.data.vm0.pmd)
141 *(.data.vm0.pgd)
142 *(.data.vm0.pte)
143 }
144 .bss : {
145 *(.bss)
146 *(COMMON)
147 }
148 __bss_stop = .;
149
150
151 /* assembler code expects init_task to be 16k aligned */
152 . = ALIGN(16384);
153 /* init_task */
154 .data.init_task : {
155 *(.data.init_task)
156 }
157
158 #ifdef CONFIG_64BIT
159 . = ALIGN(16);
160 /* Linkage tables */
161 .opd : {
162 *(.opd)
163 } PROVIDE (__gp = .);
164 .plt : {
165 *(.plt)
166 }
167 .dlt : {
168 *(.dlt)
169 }
170 #endif
171
172 /* reserve space for interrupt stack by aligning __init* to 16k */
173 . = ALIGN(16384);
174 __init_begin = .;
175 .init.text : {
176 _sinittext = .;
177 INIT_TEXT
178 _einittext = .;
179 }
180 .init.data : {
181 INIT_DATA
182 }
183 . = ALIGN(16);
184 .init.setup : {
185 __setup_start = .;
186 *(.init.setup)
187 __setup_end = .;
188 }
189 .initcall.init : {
190 __initcall_start = .;
191 INITCALLS
192 __initcall_end = .;
193 }
194 .con_initcall.init : {
195 __con_initcall_start = .;
196 *(.con_initcall.init)
197 __con_initcall_end = .;
198 }
199 SECURITY_INIT
200
201 /* alternate instruction replacement. This is a mechanism x86 uses
202 * to detect the CPU type and replace generic instruction sequences
203 * with CPU specific ones. We don't currently do this in PA, but
204 * it seems like a good idea...
205 */
206 . = ALIGN(4);
207 .altinstructions : {
208 __alt_instructions = .;
209 *(.altinstructions)
210 __alt_instructions_end = .;
211 }
212 .altinstr_replacement : {
213 *(.altinstr_replacement)
214 }
215
216 /* .exit.text is discard at runtime, not link time, to deal with references
217 * from .altinstructions and .eh_frame
218 */
219 .exit.text : {
220 EXIT_TEXT
221 }
222 .exit.data : {
223 EXIT_DATA
224 }
225 #ifdef CONFIG_BLK_DEV_INITRD
226 . = ALIGN(PAGE_SIZE);
227 .init.ramfs : {
228 __initramfs_start = .;
229 *(.init.ramfs)
230 __initramfs_end = .;
231 }
232 #endif
233
234 PERCPU(PAGE_SIZE)
235 . = ALIGN(PAGE_SIZE);
236 __init_end = .;
237 /* freed after init ends here */
238 _end = . ;
239
240 /* Sections to be discarded */
241 /DISCARD/ : {
242 *(.exitcall.exit)
243 #ifdef CONFIG_64BIT
244 /* temporary hack until binutils is fixed to not emit these
245 * for static binaries
246 */
247 *(.interp)
248 *(.dynsym)
249 *(.dynstr)
250 *(.dynamic)
251 *(.hash)
252 *(.gnu.hash)
253 #endif
254 }
255
256 STABS_DEBUG
257 .note 0 : { *(.note) }
258 }
This page took 0.0421 seconds and 5 git commands to generate.