* elf64-ppc.c (ppc64_elf_relocate_section): Correct NOP location
[deliverable/binutils-gdb.git] / bfd / elf32-arm.c
1 /* 32-bit ELF support for ARM
2 Copyright 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
3 2008, 2009, 2010 Free Software Foundation, Inc.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "sysdep.h"
23 #include <limits.h>
24
25 #include "bfd.h"
26 #include "libiberty.h"
27 #include "libbfd.h"
28 #include "elf-bfd.h"
29 #include "elf-vxworks.h"
30 #include "elf/arm.h"
31
32 /* Return the relocation section associated with NAME. HTAB is the
33 bfd's elf32_arm_link_hash_entry. */
34 #define RELOC_SECTION(HTAB, NAME) \
35 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
36
37 /* Return size of a relocation entry. HTAB is the bfd's
38 elf32_arm_link_hash_entry. */
39 #define RELOC_SIZE(HTAB) \
40 ((HTAB)->use_rel \
41 ? sizeof (Elf32_External_Rel) \
42 : sizeof (Elf32_External_Rela))
43
44 /* Return function to swap relocations in. HTAB is the bfd's
45 elf32_arm_link_hash_entry. */
46 #define SWAP_RELOC_IN(HTAB) \
47 ((HTAB)->use_rel \
48 ? bfd_elf32_swap_reloc_in \
49 : bfd_elf32_swap_reloca_in)
50
51 /* Return function to swap relocations out. HTAB is the bfd's
52 elf32_arm_link_hash_entry. */
53 #define SWAP_RELOC_OUT(HTAB) \
54 ((HTAB)->use_rel \
55 ? bfd_elf32_swap_reloc_out \
56 : bfd_elf32_swap_reloca_out)
57
58 #define elf_info_to_howto 0
59 #define elf_info_to_howto_rel elf32_arm_info_to_howto
60
61 #define ARM_ELF_ABI_VERSION 0
62 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
63
64 static bfd_boolean elf32_arm_write_section (bfd *output_bfd,
65 struct bfd_link_info *link_info,
66 asection *sec,
67 bfd_byte *contents);
68
69 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
70 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
71 in that slot. */
72
73 static reloc_howto_type elf32_arm_howto_table_1[] =
74 {
75 /* No relocation. */
76 HOWTO (R_ARM_NONE, /* type */
77 0, /* rightshift */
78 0, /* size (0 = byte, 1 = short, 2 = long) */
79 0, /* bitsize */
80 FALSE, /* pc_relative */
81 0, /* bitpos */
82 complain_overflow_dont,/* complain_on_overflow */
83 bfd_elf_generic_reloc, /* special_function */
84 "R_ARM_NONE", /* name */
85 FALSE, /* partial_inplace */
86 0, /* src_mask */
87 0, /* dst_mask */
88 FALSE), /* pcrel_offset */
89
90 HOWTO (R_ARM_PC24, /* type */
91 2, /* rightshift */
92 2, /* size (0 = byte, 1 = short, 2 = long) */
93 24, /* bitsize */
94 TRUE, /* pc_relative */
95 0, /* bitpos */
96 complain_overflow_signed,/* complain_on_overflow */
97 bfd_elf_generic_reloc, /* special_function */
98 "R_ARM_PC24", /* name */
99 FALSE, /* partial_inplace */
100 0x00ffffff, /* src_mask */
101 0x00ffffff, /* dst_mask */
102 TRUE), /* pcrel_offset */
103
104 /* 32 bit absolute */
105 HOWTO (R_ARM_ABS32, /* type */
106 0, /* rightshift */
107 2, /* size (0 = byte, 1 = short, 2 = long) */
108 32, /* bitsize */
109 FALSE, /* pc_relative */
110 0, /* bitpos */
111 complain_overflow_bitfield,/* complain_on_overflow */
112 bfd_elf_generic_reloc, /* special_function */
113 "R_ARM_ABS32", /* name */
114 FALSE, /* partial_inplace */
115 0xffffffff, /* src_mask */
116 0xffffffff, /* dst_mask */
117 FALSE), /* pcrel_offset */
118
119 /* standard 32bit pc-relative reloc */
120 HOWTO (R_ARM_REL32, /* type */
121 0, /* rightshift */
122 2, /* size (0 = byte, 1 = short, 2 = long) */
123 32, /* bitsize */
124 TRUE, /* pc_relative */
125 0, /* bitpos */
126 complain_overflow_bitfield,/* complain_on_overflow */
127 bfd_elf_generic_reloc, /* special_function */
128 "R_ARM_REL32", /* name */
129 FALSE, /* partial_inplace */
130 0xffffffff, /* src_mask */
131 0xffffffff, /* dst_mask */
132 TRUE), /* pcrel_offset */
133
134 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
135 HOWTO (R_ARM_LDR_PC_G0, /* type */
136 0, /* rightshift */
137 0, /* size (0 = byte, 1 = short, 2 = long) */
138 32, /* bitsize */
139 TRUE, /* pc_relative */
140 0, /* bitpos */
141 complain_overflow_dont,/* complain_on_overflow */
142 bfd_elf_generic_reloc, /* special_function */
143 "R_ARM_LDR_PC_G0", /* name */
144 FALSE, /* partial_inplace */
145 0xffffffff, /* src_mask */
146 0xffffffff, /* dst_mask */
147 TRUE), /* pcrel_offset */
148
149 /* 16 bit absolute */
150 HOWTO (R_ARM_ABS16, /* type */
151 0, /* rightshift */
152 1, /* size (0 = byte, 1 = short, 2 = long) */
153 16, /* bitsize */
154 FALSE, /* pc_relative */
155 0, /* bitpos */
156 complain_overflow_bitfield,/* complain_on_overflow */
157 bfd_elf_generic_reloc, /* special_function */
158 "R_ARM_ABS16", /* name */
159 FALSE, /* partial_inplace */
160 0x0000ffff, /* src_mask */
161 0x0000ffff, /* dst_mask */
162 FALSE), /* pcrel_offset */
163
164 /* 12 bit absolute */
165 HOWTO (R_ARM_ABS12, /* type */
166 0, /* rightshift */
167 2, /* size (0 = byte, 1 = short, 2 = long) */
168 12, /* bitsize */
169 FALSE, /* pc_relative */
170 0, /* bitpos */
171 complain_overflow_bitfield,/* complain_on_overflow */
172 bfd_elf_generic_reloc, /* special_function */
173 "R_ARM_ABS12", /* name */
174 FALSE, /* partial_inplace */
175 0x00000fff, /* src_mask */
176 0x00000fff, /* dst_mask */
177 FALSE), /* pcrel_offset */
178
179 HOWTO (R_ARM_THM_ABS5, /* type */
180 6, /* rightshift */
181 1, /* size (0 = byte, 1 = short, 2 = long) */
182 5, /* bitsize */
183 FALSE, /* pc_relative */
184 0, /* bitpos */
185 complain_overflow_bitfield,/* complain_on_overflow */
186 bfd_elf_generic_reloc, /* special_function */
187 "R_ARM_THM_ABS5", /* name */
188 FALSE, /* partial_inplace */
189 0x000007e0, /* src_mask */
190 0x000007e0, /* dst_mask */
191 FALSE), /* pcrel_offset */
192
193 /* 8 bit absolute */
194 HOWTO (R_ARM_ABS8, /* type */
195 0, /* rightshift */
196 0, /* size (0 = byte, 1 = short, 2 = long) */
197 8, /* bitsize */
198 FALSE, /* pc_relative */
199 0, /* bitpos */
200 complain_overflow_bitfield,/* complain_on_overflow */
201 bfd_elf_generic_reloc, /* special_function */
202 "R_ARM_ABS8", /* name */
203 FALSE, /* partial_inplace */
204 0x000000ff, /* src_mask */
205 0x000000ff, /* dst_mask */
206 FALSE), /* pcrel_offset */
207
208 HOWTO (R_ARM_SBREL32, /* type */
209 0, /* rightshift */
210 2, /* size (0 = byte, 1 = short, 2 = long) */
211 32, /* bitsize */
212 FALSE, /* pc_relative */
213 0, /* bitpos */
214 complain_overflow_dont,/* complain_on_overflow */
215 bfd_elf_generic_reloc, /* special_function */
216 "R_ARM_SBREL32", /* name */
217 FALSE, /* partial_inplace */
218 0xffffffff, /* src_mask */
219 0xffffffff, /* dst_mask */
220 FALSE), /* pcrel_offset */
221
222 HOWTO (R_ARM_THM_CALL, /* type */
223 1, /* rightshift */
224 2, /* size (0 = byte, 1 = short, 2 = long) */
225 24, /* bitsize */
226 TRUE, /* pc_relative */
227 0, /* bitpos */
228 complain_overflow_signed,/* complain_on_overflow */
229 bfd_elf_generic_reloc, /* special_function */
230 "R_ARM_THM_CALL", /* name */
231 FALSE, /* partial_inplace */
232 0x07ff07ff, /* src_mask */
233 0x07ff07ff, /* dst_mask */
234 TRUE), /* pcrel_offset */
235
236 HOWTO (R_ARM_THM_PC8, /* type */
237 1, /* rightshift */
238 1, /* size (0 = byte, 1 = short, 2 = long) */
239 8, /* bitsize */
240 TRUE, /* pc_relative */
241 0, /* bitpos */
242 complain_overflow_signed,/* complain_on_overflow */
243 bfd_elf_generic_reloc, /* special_function */
244 "R_ARM_THM_PC8", /* name */
245 FALSE, /* partial_inplace */
246 0x000000ff, /* src_mask */
247 0x000000ff, /* dst_mask */
248 TRUE), /* pcrel_offset */
249
250 HOWTO (R_ARM_BREL_ADJ, /* type */
251 1, /* rightshift */
252 1, /* size (0 = byte, 1 = short, 2 = long) */
253 32, /* bitsize */
254 FALSE, /* pc_relative */
255 0, /* bitpos */
256 complain_overflow_signed,/* complain_on_overflow */
257 bfd_elf_generic_reloc, /* special_function */
258 "R_ARM_BREL_ADJ", /* name */
259 FALSE, /* partial_inplace */
260 0xffffffff, /* src_mask */
261 0xffffffff, /* dst_mask */
262 FALSE), /* pcrel_offset */
263
264 HOWTO (R_ARM_SWI24, /* type */
265 0, /* rightshift */
266 0, /* size (0 = byte, 1 = short, 2 = long) */
267 0, /* bitsize */
268 FALSE, /* pc_relative */
269 0, /* bitpos */
270 complain_overflow_signed,/* complain_on_overflow */
271 bfd_elf_generic_reloc, /* special_function */
272 "R_ARM_SWI24", /* name */
273 FALSE, /* partial_inplace */
274 0x00000000, /* src_mask */
275 0x00000000, /* dst_mask */
276 FALSE), /* pcrel_offset */
277
278 HOWTO (R_ARM_THM_SWI8, /* type */
279 0, /* rightshift */
280 0, /* size (0 = byte, 1 = short, 2 = long) */
281 0, /* bitsize */
282 FALSE, /* pc_relative */
283 0, /* bitpos */
284 complain_overflow_signed,/* complain_on_overflow */
285 bfd_elf_generic_reloc, /* special_function */
286 "R_ARM_SWI8", /* name */
287 FALSE, /* partial_inplace */
288 0x00000000, /* src_mask */
289 0x00000000, /* dst_mask */
290 FALSE), /* pcrel_offset */
291
292 /* BLX instruction for the ARM. */
293 HOWTO (R_ARM_XPC25, /* type */
294 2, /* rightshift */
295 2, /* size (0 = byte, 1 = short, 2 = long) */
296 25, /* bitsize */
297 TRUE, /* pc_relative */
298 0, /* bitpos */
299 complain_overflow_signed,/* complain_on_overflow */
300 bfd_elf_generic_reloc, /* special_function */
301 "R_ARM_XPC25", /* name */
302 FALSE, /* partial_inplace */
303 0x00ffffff, /* src_mask */
304 0x00ffffff, /* dst_mask */
305 TRUE), /* pcrel_offset */
306
307 /* BLX instruction for the Thumb. */
308 HOWTO (R_ARM_THM_XPC22, /* type */
309 2, /* rightshift */
310 2, /* size (0 = byte, 1 = short, 2 = long) */
311 22, /* bitsize */
312 TRUE, /* pc_relative */
313 0, /* bitpos */
314 complain_overflow_signed,/* complain_on_overflow */
315 bfd_elf_generic_reloc, /* special_function */
316 "R_ARM_THM_XPC22", /* name */
317 FALSE, /* partial_inplace */
318 0x07ff07ff, /* src_mask */
319 0x07ff07ff, /* dst_mask */
320 TRUE), /* pcrel_offset */
321
322 /* Dynamic TLS relocations. */
323
324 HOWTO (R_ARM_TLS_DTPMOD32, /* type */
325 0, /* rightshift */
326 2, /* size (0 = byte, 1 = short, 2 = long) */
327 32, /* bitsize */
328 FALSE, /* pc_relative */
329 0, /* bitpos */
330 complain_overflow_bitfield,/* complain_on_overflow */
331 bfd_elf_generic_reloc, /* special_function */
332 "R_ARM_TLS_DTPMOD32", /* name */
333 TRUE, /* partial_inplace */
334 0xffffffff, /* src_mask */
335 0xffffffff, /* dst_mask */
336 FALSE), /* pcrel_offset */
337
338 HOWTO (R_ARM_TLS_DTPOFF32, /* type */
339 0, /* rightshift */
340 2, /* size (0 = byte, 1 = short, 2 = long) */
341 32, /* bitsize */
342 FALSE, /* pc_relative */
343 0, /* bitpos */
344 complain_overflow_bitfield,/* complain_on_overflow */
345 bfd_elf_generic_reloc, /* special_function */
346 "R_ARM_TLS_DTPOFF32", /* name */
347 TRUE, /* partial_inplace */
348 0xffffffff, /* src_mask */
349 0xffffffff, /* dst_mask */
350 FALSE), /* pcrel_offset */
351
352 HOWTO (R_ARM_TLS_TPOFF32, /* type */
353 0, /* rightshift */
354 2, /* size (0 = byte, 1 = short, 2 = long) */
355 32, /* bitsize */
356 FALSE, /* pc_relative */
357 0, /* bitpos */
358 complain_overflow_bitfield,/* complain_on_overflow */
359 bfd_elf_generic_reloc, /* special_function */
360 "R_ARM_TLS_TPOFF32", /* name */
361 TRUE, /* partial_inplace */
362 0xffffffff, /* src_mask */
363 0xffffffff, /* dst_mask */
364 FALSE), /* pcrel_offset */
365
366 /* Relocs used in ARM Linux */
367
368 HOWTO (R_ARM_COPY, /* type */
369 0, /* rightshift */
370 2, /* size (0 = byte, 1 = short, 2 = long) */
371 32, /* bitsize */
372 FALSE, /* pc_relative */
373 0, /* bitpos */
374 complain_overflow_bitfield,/* complain_on_overflow */
375 bfd_elf_generic_reloc, /* special_function */
376 "R_ARM_COPY", /* name */
377 TRUE, /* partial_inplace */
378 0xffffffff, /* src_mask */
379 0xffffffff, /* dst_mask */
380 FALSE), /* pcrel_offset */
381
382 HOWTO (R_ARM_GLOB_DAT, /* type */
383 0, /* rightshift */
384 2, /* size (0 = byte, 1 = short, 2 = long) */
385 32, /* bitsize */
386 FALSE, /* pc_relative */
387 0, /* bitpos */
388 complain_overflow_bitfield,/* complain_on_overflow */
389 bfd_elf_generic_reloc, /* special_function */
390 "R_ARM_GLOB_DAT", /* name */
391 TRUE, /* partial_inplace */
392 0xffffffff, /* src_mask */
393 0xffffffff, /* dst_mask */
394 FALSE), /* pcrel_offset */
395
396 HOWTO (R_ARM_JUMP_SLOT, /* type */
397 0, /* rightshift */
398 2, /* size (0 = byte, 1 = short, 2 = long) */
399 32, /* bitsize */
400 FALSE, /* pc_relative */
401 0, /* bitpos */
402 complain_overflow_bitfield,/* complain_on_overflow */
403 bfd_elf_generic_reloc, /* special_function */
404 "R_ARM_JUMP_SLOT", /* name */
405 TRUE, /* partial_inplace */
406 0xffffffff, /* src_mask */
407 0xffffffff, /* dst_mask */
408 FALSE), /* pcrel_offset */
409
410 HOWTO (R_ARM_RELATIVE, /* type */
411 0, /* rightshift */
412 2, /* size (0 = byte, 1 = short, 2 = long) */
413 32, /* bitsize */
414 FALSE, /* pc_relative */
415 0, /* bitpos */
416 complain_overflow_bitfield,/* complain_on_overflow */
417 bfd_elf_generic_reloc, /* special_function */
418 "R_ARM_RELATIVE", /* name */
419 TRUE, /* partial_inplace */
420 0xffffffff, /* src_mask */
421 0xffffffff, /* dst_mask */
422 FALSE), /* pcrel_offset */
423
424 HOWTO (R_ARM_GOTOFF32, /* type */
425 0, /* rightshift */
426 2, /* size (0 = byte, 1 = short, 2 = long) */
427 32, /* bitsize */
428 FALSE, /* pc_relative */
429 0, /* bitpos */
430 complain_overflow_bitfield,/* complain_on_overflow */
431 bfd_elf_generic_reloc, /* special_function */
432 "R_ARM_GOTOFF32", /* name */
433 TRUE, /* partial_inplace */
434 0xffffffff, /* src_mask */
435 0xffffffff, /* dst_mask */
436 FALSE), /* pcrel_offset */
437
438 HOWTO (R_ARM_GOTPC, /* type */
439 0, /* rightshift */
440 2, /* size (0 = byte, 1 = short, 2 = long) */
441 32, /* bitsize */
442 TRUE, /* pc_relative */
443 0, /* bitpos */
444 complain_overflow_bitfield,/* complain_on_overflow */
445 bfd_elf_generic_reloc, /* special_function */
446 "R_ARM_GOTPC", /* name */
447 TRUE, /* partial_inplace */
448 0xffffffff, /* src_mask */
449 0xffffffff, /* dst_mask */
450 TRUE), /* pcrel_offset */
451
452 HOWTO (R_ARM_GOT32, /* type */
453 0, /* rightshift */
454 2, /* size (0 = byte, 1 = short, 2 = long) */
455 32, /* bitsize */
456 FALSE, /* pc_relative */
457 0, /* bitpos */
458 complain_overflow_bitfield,/* complain_on_overflow */
459 bfd_elf_generic_reloc, /* special_function */
460 "R_ARM_GOT32", /* name */
461 TRUE, /* partial_inplace */
462 0xffffffff, /* src_mask */
463 0xffffffff, /* dst_mask */
464 FALSE), /* pcrel_offset */
465
466 HOWTO (R_ARM_PLT32, /* type */
467 2, /* rightshift */
468 2, /* size (0 = byte, 1 = short, 2 = long) */
469 24, /* bitsize */
470 TRUE, /* pc_relative */
471 0, /* bitpos */
472 complain_overflow_bitfield,/* complain_on_overflow */
473 bfd_elf_generic_reloc, /* special_function */
474 "R_ARM_PLT32", /* name */
475 FALSE, /* partial_inplace */
476 0x00ffffff, /* src_mask */
477 0x00ffffff, /* dst_mask */
478 TRUE), /* pcrel_offset */
479
480 HOWTO (R_ARM_CALL, /* type */
481 2, /* rightshift */
482 2, /* size (0 = byte, 1 = short, 2 = long) */
483 24, /* bitsize */
484 TRUE, /* pc_relative */
485 0, /* bitpos */
486 complain_overflow_signed,/* complain_on_overflow */
487 bfd_elf_generic_reloc, /* special_function */
488 "R_ARM_CALL", /* name */
489 FALSE, /* partial_inplace */
490 0x00ffffff, /* src_mask */
491 0x00ffffff, /* dst_mask */
492 TRUE), /* pcrel_offset */
493
494 HOWTO (R_ARM_JUMP24, /* type */
495 2, /* rightshift */
496 2, /* size (0 = byte, 1 = short, 2 = long) */
497 24, /* bitsize */
498 TRUE, /* pc_relative */
499 0, /* bitpos */
500 complain_overflow_signed,/* complain_on_overflow */
501 bfd_elf_generic_reloc, /* special_function */
502 "R_ARM_JUMP24", /* name */
503 FALSE, /* partial_inplace */
504 0x00ffffff, /* src_mask */
505 0x00ffffff, /* dst_mask */
506 TRUE), /* pcrel_offset */
507
508 HOWTO (R_ARM_THM_JUMP24, /* type */
509 1, /* rightshift */
510 2, /* size (0 = byte, 1 = short, 2 = long) */
511 24, /* bitsize */
512 TRUE, /* pc_relative */
513 0, /* bitpos */
514 complain_overflow_signed,/* complain_on_overflow */
515 bfd_elf_generic_reloc, /* special_function */
516 "R_ARM_THM_JUMP24", /* name */
517 FALSE, /* partial_inplace */
518 0x07ff2fff, /* src_mask */
519 0x07ff2fff, /* dst_mask */
520 TRUE), /* pcrel_offset */
521
522 HOWTO (R_ARM_BASE_ABS, /* type */
523 0, /* rightshift */
524 2, /* size (0 = byte, 1 = short, 2 = long) */
525 32, /* bitsize */
526 FALSE, /* pc_relative */
527 0, /* bitpos */
528 complain_overflow_dont,/* complain_on_overflow */
529 bfd_elf_generic_reloc, /* special_function */
530 "R_ARM_BASE_ABS", /* name */
531 FALSE, /* partial_inplace */
532 0xffffffff, /* src_mask */
533 0xffffffff, /* dst_mask */
534 FALSE), /* pcrel_offset */
535
536 HOWTO (R_ARM_ALU_PCREL7_0, /* type */
537 0, /* rightshift */
538 2, /* size (0 = byte, 1 = short, 2 = long) */
539 12, /* bitsize */
540 TRUE, /* pc_relative */
541 0, /* bitpos */
542 complain_overflow_dont,/* complain_on_overflow */
543 bfd_elf_generic_reloc, /* special_function */
544 "R_ARM_ALU_PCREL_7_0", /* name */
545 FALSE, /* partial_inplace */
546 0x00000fff, /* src_mask */
547 0x00000fff, /* dst_mask */
548 TRUE), /* pcrel_offset */
549
550 HOWTO (R_ARM_ALU_PCREL15_8, /* type */
551 0, /* rightshift */
552 2, /* size (0 = byte, 1 = short, 2 = long) */
553 12, /* bitsize */
554 TRUE, /* pc_relative */
555 8, /* bitpos */
556 complain_overflow_dont,/* complain_on_overflow */
557 bfd_elf_generic_reloc, /* special_function */
558 "R_ARM_ALU_PCREL_15_8",/* name */
559 FALSE, /* partial_inplace */
560 0x00000fff, /* src_mask */
561 0x00000fff, /* dst_mask */
562 TRUE), /* pcrel_offset */
563
564 HOWTO (R_ARM_ALU_PCREL23_15, /* type */
565 0, /* rightshift */
566 2, /* size (0 = byte, 1 = short, 2 = long) */
567 12, /* bitsize */
568 TRUE, /* pc_relative */
569 16, /* bitpos */
570 complain_overflow_dont,/* complain_on_overflow */
571 bfd_elf_generic_reloc, /* special_function */
572 "R_ARM_ALU_PCREL_23_15",/* name */
573 FALSE, /* partial_inplace */
574 0x00000fff, /* src_mask */
575 0x00000fff, /* dst_mask */
576 TRUE), /* pcrel_offset */
577
578 HOWTO (R_ARM_LDR_SBREL_11_0, /* type */
579 0, /* rightshift */
580 2, /* size (0 = byte, 1 = short, 2 = long) */
581 12, /* bitsize */
582 FALSE, /* pc_relative */
583 0, /* bitpos */
584 complain_overflow_dont,/* complain_on_overflow */
585 bfd_elf_generic_reloc, /* special_function */
586 "R_ARM_LDR_SBREL_11_0",/* name */
587 FALSE, /* partial_inplace */
588 0x00000fff, /* src_mask */
589 0x00000fff, /* dst_mask */
590 FALSE), /* pcrel_offset */
591
592 HOWTO (R_ARM_ALU_SBREL_19_12, /* type */
593 0, /* rightshift */
594 2, /* size (0 = byte, 1 = short, 2 = long) */
595 8, /* bitsize */
596 FALSE, /* pc_relative */
597 12, /* bitpos */
598 complain_overflow_dont,/* complain_on_overflow */
599 bfd_elf_generic_reloc, /* special_function */
600 "R_ARM_ALU_SBREL_19_12",/* name */
601 FALSE, /* partial_inplace */
602 0x000ff000, /* src_mask */
603 0x000ff000, /* dst_mask */
604 FALSE), /* pcrel_offset */
605
606 HOWTO (R_ARM_ALU_SBREL_27_20, /* type */
607 0, /* rightshift */
608 2, /* size (0 = byte, 1 = short, 2 = long) */
609 8, /* bitsize */
610 FALSE, /* pc_relative */
611 20, /* bitpos */
612 complain_overflow_dont,/* complain_on_overflow */
613 bfd_elf_generic_reloc, /* special_function */
614 "R_ARM_ALU_SBREL_27_20",/* name */
615 FALSE, /* partial_inplace */
616 0x0ff00000, /* src_mask */
617 0x0ff00000, /* dst_mask */
618 FALSE), /* pcrel_offset */
619
620 HOWTO (R_ARM_TARGET1, /* type */
621 0, /* rightshift */
622 2, /* size (0 = byte, 1 = short, 2 = long) */
623 32, /* bitsize */
624 FALSE, /* pc_relative */
625 0, /* bitpos */
626 complain_overflow_dont,/* complain_on_overflow */
627 bfd_elf_generic_reloc, /* special_function */
628 "R_ARM_TARGET1", /* name */
629 FALSE, /* partial_inplace */
630 0xffffffff, /* src_mask */
631 0xffffffff, /* dst_mask */
632 FALSE), /* pcrel_offset */
633
634 HOWTO (R_ARM_ROSEGREL32, /* type */
635 0, /* rightshift */
636 2, /* size (0 = byte, 1 = short, 2 = long) */
637 32, /* bitsize */
638 FALSE, /* pc_relative */
639 0, /* bitpos */
640 complain_overflow_dont,/* complain_on_overflow */
641 bfd_elf_generic_reloc, /* special_function */
642 "R_ARM_ROSEGREL32", /* name */
643 FALSE, /* partial_inplace */
644 0xffffffff, /* src_mask */
645 0xffffffff, /* dst_mask */
646 FALSE), /* pcrel_offset */
647
648 HOWTO (R_ARM_V4BX, /* type */
649 0, /* rightshift */
650 2, /* size (0 = byte, 1 = short, 2 = long) */
651 32, /* bitsize */
652 FALSE, /* pc_relative */
653 0, /* bitpos */
654 complain_overflow_dont,/* complain_on_overflow */
655 bfd_elf_generic_reloc, /* special_function */
656 "R_ARM_V4BX", /* name */
657 FALSE, /* partial_inplace */
658 0xffffffff, /* src_mask */
659 0xffffffff, /* dst_mask */
660 FALSE), /* pcrel_offset */
661
662 HOWTO (R_ARM_TARGET2, /* type */
663 0, /* rightshift */
664 2, /* size (0 = byte, 1 = short, 2 = long) */
665 32, /* bitsize */
666 FALSE, /* pc_relative */
667 0, /* bitpos */
668 complain_overflow_signed,/* complain_on_overflow */
669 bfd_elf_generic_reloc, /* special_function */
670 "R_ARM_TARGET2", /* name */
671 FALSE, /* partial_inplace */
672 0xffffffff, /* src_mask */
673 0xffffffff, /* dst_mask */
674 TRUE), /* pcrel_offset */
675
676 HOWTO (R_ARM_PREL31, /* type */
677 0, /* rightshift */
678 2, /* size (0 = byte, 1 = short, 2 = long) */
679 31, /* bitsize */
680 TRUE, /* pc_relative */
681 0, /* bitpos */
682 complain_overflow_signed,/* complain_on_overflow */
683 bfd_elf_generic_reloc, /* special_function */
684 "R_ARM_PREL31", /* name */
685 FALSE, /* partial_inplace */
686 0x7fffffff, /* src_mask */
687 0x7fffffff, /* dst_mask */
688 TRUE), /* pcrel_offset */
689
690 HOWTO (R_ARM_MOVW_ABS_NC, /* type */
691 0, /* rightshift */
692 2, /* size (0 = byte, 1 = short, 2 = long) */
693 16, /* bitsize */
694 FALSE, /* pc_relative */
695 0, /* bitpos */
696 complain_overflow_dont,/* complain_on_overflow */
697 bfd_elf_generic_reloc, /* special_function */
698 "R_ARM_MOVW_ABS_NC", /* name */
699 FALSE, /* partial_inplace */
700 0x000f0fff, /* src_mask */
701 0x000f0fff, /* dst_mask */
702 FALSE), /* pcrel_offset */
703
704 HOWTO (R_ARM_MOVT_ABS, /* type */
705 0, /* rightshift */
706 2, /* size (0 = byte, 1 = short, 2 = long) */
707 16, /* bitsize */
708 FALSE, /* pc_relative */
709 0, /* bitpos */
710 complain_overflow_bitfield,/* complain_on_overflow */
711 bfd_elf_generic_reloc, /* special_function */
712 "R_ARM_MOVT_ABS", /* name */
713 FALSE, /* partial_inplace */
714 0x000f0fff, /* src_mask */
715 0x000f0fff, /* dst_mask */
716 FALSE), /* pcrel_offset */
717
718 HOWTO (R_ARM_MOVW_PREL_NC, /* type */
719 0, /* rightshift */
720 2, /* size (0 = byte, 1 = short, 2 = long) */
721 16, /* bitsize */
722 TRUE, /* pc_relative */
723 0, /* bitpos */
724 complain_overflow_dont,/* complain_on_overflow */
725 bfd_elf_generic_reloc, /* special_function */
726 "R_ARM_MOVW_PREL_NC", /* name */
727 FALSE, /* partial_inplace */
728 0x000f0fff, /* src_mask */
729 0x000f0fff, /* dst_mask */
730 TRUE), /* pcrel_offset */
731
732 HOWTO (R_ARM_MOVT_PREL, /* type */
733 0, /* rightshift */
734 2, /* size (0 = byte, 1 = short, 2 = long) */
735 16, /* bitsize */
736 TRUE, /* pc_relative */
737 0, /* bitpos */
738 complain_overflow_bitfield,/* complain_on_overflow */
739 bfd_elf_generic_reloc, /* special_function */
740 "R_ARM_MOVT_PREL", /* name */
741 FALSE, /* partial_inplace */
742 0x000f0fff, /* src_mask */
743 0x000f0fff, /* dst_mask */
744 TRUE), /* pcrel_offset */
745
746 HOWTO (R_ARM_THM_MOVW_ABS_NC, /* type */
747 0, /* rightshift */
748 2, /* size (0 = byte, 1 = short, 2 = long) */
749 16, /* bitsize */
750 FALSE, /* pc_relative */
751 0, /* bitpos */
752 complain_overflow_dont,/* complain_on_overflow */
753 bfd_elf_generic_reloc, /* special_function */
754 "R_ARM_THM_MOVW_ABS_NC",/* name */
755 FALSE, /* partial_inplace */
756 0x040f70ff, /* src_mask */
757 0x040f70ff, /* dst_mask */
758 FALSE), /* pcrel_offset */
759
760 HOWTO (R_ARM_THM_MOVT_ABS, /* type */
761 0, /* rightshift */
762 2, /* size (0 = byte, 1 = short, 2 = long) */
763 16, /* bitsize */
764 FALSE, /* pc_relative */
765 0, /* bitpos */
766 complain_overflow_bitfield,/* complain_on_overflow */
767 bfd_elf_generic_reloc, /* special_function */
768 "R_ARM_THM_MOVT_ABS", /* name */
769 FALSE, /* partial_inplace */
770 0x040f70ff, /* src_mask */
771 0x040f70ff, /* dst_mask */
772 FALSE), /* pcrel_offset */
773
774 HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
775 0, /* rightshift */
776 2, /* size (0 = byte, 1 = short, 2 = long) */
777 16, /* bitsize */
778 TRUE, /* pc_relative */
779 0, /* bitpos */
780 complain_overflow_dont,/* complain_on_overflow */
781 bfd_elf_generic_reloc, /* special_function */
782 "R_ARM_THM_MOVW_PREL_NC",/* name */
783 FALSE, /* partial_inplace */
784 0x040f70ff, /* src_mask */
785 0x040f70ff, /* dst_mask */
786 TRUE), /* pcrel_offset */
787
788 HOWTO (R_ARM_THM_MOVT_PREL, /* type */
789 0, /* rightshift */
790 2, /* size (0 = byte, 1 = short, 2 = long) */
791 16, /* bitsize */
792 TRUE, /* pc_relative */
793 0, /* bitpos */
794 complain_overflow_bitfield,/* complain_on_overflow */
795 bfd_elf_generic_reloc, /* special_function */
796 "R_ARM_THM_MOVT_PREL", /* name */
797 FALSE, /* partial_inplace */
798 0x040f70ff, /* src_mask */
799 0x040f70ff, /* dst_mask */
800 TRUE), /* pcrel_offset */
801
802 HOWTO (R_ARM_THM_JUMP19, /* type */
803 1, /* rightshift */
804 2, /* size (0 = byte, 1 = short, 2 = long) */
805 19, /* bitsize */
806 TRUE, /* pc_relative */
807 0, /* bitpos */
808 complain_overflow_signed,/* complain_on_overflow */
809 bfd_elf_generic_reloc, /* special_function */
810 "R_ARM_THM_JUMP19", /* name */
811 FALSE, /* partial_inplace */
812 0x043f2fff, /* src_mask */
813 0x043f2fff, /* dst_mask */
814 TRUE), /* pcrel_offset */
815
816 HOWTO (R_ARM_THM_JUMP6, /* type */
817 1, /* rightshift */
818 1, /* size (0 = byte, 1 = short, 2 = long) */
819 6, /* bitsize */
820 TRUE, /* pc_relative */
821 0, /* bitpos */
822 complain_overflow_unsigned,/* complain_on_overflow */
823 bfd_elf_generic_reloc, /* special_function */
824 "R_ARM_THM_JUMP6", /* name */
825 FALSE, /* partial_inplace */
826 0x02f8, /* src_mask */
827 0x02f8, /* dst_mask */
828 TRUE), /* pcrel_offset */
829
830 /* These are declared as 13-bit signed relocations because we can
831 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
832 versa. */
833 HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
834 0, /* rightshift */
835 2, /* size (0 = byte, 1 = short, 2 = long) */
836 13, /* bitsize */
837 TRUE, /* pc_relative */
838 0, /* bitpos */
839 complain_overflow_dont,/* complain_on_overflow */
840 bfd_elf_generic_reloc, /* special_function */
841 "R_ARM_THM_ALU_PREL_11_0",/* name */
842 FALSE, /* partial_inplace */
843 0xffffffff, /* src_mask */
844 0xffffffff, /* dst_mask */
845 TRUE), /* pcrel_offset */
846
847 HOWTO (R_ARM_THM_PC12, /* type */
848 0, /* rightshift */
849 2, /* size (0 = byte, 1 = short, 2 = long) */
850 13, /* bitsize */
851 TRUE, /* pc_relative */
852 0, /* bitpos */
853 complain_overflow_dont,/* complain_on_overflow */
854 bfd_elf_generic_reloc, /* special_function */
855 "R_ARM_THM_PC12", /* name */
856 FALSE, /* partial_inplace */
857 0xffffffff, /* src_mask */
858 0xffffffff, /* dst_mask */
859 TRUE), /* pcrel_offset */
860
861 HOWTO (R_ARM_ABS32_NOI, /* type */
862 0, /* rightshift */
863 2, /* size (0 = byte, 1 = short, 2 = long) */
864 32, /* bitsize */
865 FALSE, /* pc_relative */
866 0, /* bitpos */
867 complain_overflow_dont,/* complain_on_overflow */
868 bfd_elf_generic_reloc, /* special_function */
869 "R_ARM_ABS32_NOI", /* name */
870 FALSE, /* partial_inplace */
871 0xffffffff, /* src_mask */
872 0xffffffff, /* dst_mask */
873 FALSE), /* pcrel_offset */
874
875 HOWTO (R_ARM_REL32_NOI, /* type */
876 0, /* rightshift */
877 2, /* size (0 = byte, 1 = short, 2 = long) */
878 32, /* bitsize */
879 TRUE, /* pc_relative */
880 0, /* bitpos */
881 complain_overflow_dont,/* complain_on_overflow */
882 bfd_elf_generic_reloc, /* special_function */
883 "R_ARM_REL32_NOI", /* name */
884 FALSE, /* partial_inplace */
885 0xffffffff, /* src_mask */
886 0xffffffff, /* dst_mask */
887 FALSE), /* pcrel_offset */
888
889 /* Group relocations. */
890
891 HOWTO (R_ARM_ALU_PC_G0_NC, /* type */
892 0, /* rightshift */
893 2, /* size (0 = byte, 1 = short, 2 = long) */
894 32, /* bitsize */
895 TRUE, /* pc_relative */
896 0, /* bitpos */
897 complain_overflow_dont,/* complain_on_overflow */
898 bfd_elf_generic_reloc, /* special_function */
899 "R_ARM_ALU_PC_G0_NC", /* name */
900 FALSE, /* partial_inplace */
901 0xffffffff, /* src_mask */
902 0xffffffff, /* dst_mask */
903 TRUE), /* pcrel_offset */
904
905 HOWTO (R_ARM_ALU_PC_G0, /* type */
906 0, /* rightshift */
907 2, /* size (0 = byte, 1 = short, 2 = long) */
908 32, /* bitsize */
909 TRUE, /* pc_relative */
910 0, /* bitpos */
911 complain_overflow_dont,/* complain_on_overflow */
912 bfd_elf_generic_reloc, /* special_function */
913 "R_ARM_ALU_PC_G0", /* name */
914 FALSE, /* partial_inplace */
915 0xffffffff, /* src_mask */
916 0xffffffff, /* dst_mask */
917 TRUE), /* pcrel_offset */
918
919 HOWTO (R_ARM_ALU_PC_G1_NC, /* type */
920 0, /* rightshift */
921 2, /* size (0 = byte, 1 = short, 2 = long) */
922 32, /* bitsize */
923 TRUE, /* pc_relative */
924 0, /* bitpos */
925 complain_overflow_dont,/* complain_on_overflow */
926 bfd_elf_generic_reloc, /* special_function */
927 "R_ARM_ALU_PC_G1_NC", /* name */
928 FALSE, /* partial_inplace */
929 0xffffffff, /* src_mask */
930 0xffffffff, /* dst_mask */
931 TRUE), /* pcrel_offset */
932
933 HOWTO (R_ARM_ALU_PC_G1, /* type */
934 0, /* rightshift */
935 2, /* size (0 = byte, 1 = short, 2 = long) */
936 32, /* bitsize */
937 TRUE, /* pc_relative */
938 0, /* bitpos */
939 complain_overflow_dont,/* complain_on_overflow */
940 bfd_elf_generic_reloc, /* special_function */
941 "R_ARM_ALU_PC_G1", /* name */
942 FALSE, /* partial_inplace */
943 0xffffffff, /* src_mask */
944 0xffffffff, /* dst_mask */
945 TRUE), /* pcrel_offset */
946
947 HOWTO (R_ARM_ALU_PC_G2, /* type */
948 0, /* rightshift */
949 2, /* size (0 = byte, 1 = short, 2 = long) */
950 32, /* bitsize */
951 TRUE, /* pc_relative */
952 0, /* bitpos */
953 complain_overflow_dont,/* complain_on_overflow */
954 bfd_elf_generic_reloc, /* special_function */
955 "R_ARM_ALU_PC_G2", /* name */
956 FALSE, /* partial_inplace */
957 0xffffffff, /* src_mask */
958 0xffffffff, /* dst_mask */
959 TRUE), /* pcrel_offset */
960
961 HOWTO (R_ARM_LDR_PC_G1, /* type */
962 0, /* rightshift */
963 2, /* size (0 = byte, 1 = short, 2 = long) */
964 32, /* bitsize */
965 TRUE, /* pc_relative */
966 0, /* bitpos */
967 complain_overflow_dont,/* complain_on_overflow */
968 bfd_elf_generic_reloc, /* special_function */
969 "R_ARM_LDR_PC_G1", /* name */
970 FALSE, /* partial_inplace */
971 0xffffffff, /* src_mask */
972 0xffffffff, /* dst_mask */
973 TRUE), /* pcrel_offset */
974
975 HOWTO (R_ARM_LDR_PC_G2, /* type */
976 0, /* rightshift */
977 2, /* size (0 = byte, 1 = short, 2 = long) */
978 32, /* bitsize */
979 TRUE, /* pc_relative */
980 0, /* bitpos */
981 complain_overflow_dont,/* complain_on_overflow */
982 bfd_elf_generic_reloc, /* special_function */
983 "R_ARM_LDR_PC_G2", /* name */
984 FALSE, /* partial_inplace */
985 0xffffffff, /* src_mask */
986 0xffffffff, /* dst_mask */
987 TRUE), /* pcrel_offset */
988
989 HOWTO (R_ARM_LDRS_PC_G0, /* type */
990 0, /* rightshift */
991 2, /* size (0 = byte, 1 = short, 2 = long) */
992 32, /* bitsize */
993 TRUE, /* pc_relative */
994 0, /* bitpos */
995 complain_overflow_dont,/* complain_on_overflow */
996 bfd_elf_generic_reloc, /* special_function */
997 "R_ARM_LDRS_PC_G0", /* name */
998 FALSE, /* partial_inplace */
999 0xffffffff, /* src_mask */
1000 0xffffffff, /* dst_mask */
1001 TRUE), /* pcrel_offset */
1002
1003 HOWTO (R_ARM_LDRS_PC_G1, /* type */
1004 0, /* rightshift */
1005 2, /* size (0 = byte, 1 = short, 2 = long) */
1006 32, /* bitsize */
1007 TRUE, /* pc_relative */
1008 0, /* bitpos */
1009 complain_overflow_dont,/* complain_on_overflow */
1010 bfd_elf_generic_reloc, /* special_function */
1011 "R_ARM_LDRS_PC_G1", /* name */
1012 FALSE, /* partial_inplace */
1013 0xffffffff, /* src_mask */
1014 0xffffffff, /* dst_mask */
1015 TRUE), /* pcrel_offset */
1016
1017 HOWTO (R_ARM_LDRS_PC_G2, /* type */
1018 0, /* rightshift */
1019 2, /* size (0 = byte, 1 = short, 2 = long) */
1020 32, /* bitsize */
1021 TRUE, /* pc_relative */
1022 0, /* bitpos */
1023 complain_overflow_dont,/* complain_on_overflow */
1024 bfd_elf_generic_reloc, /* special_function */
1025 "R_ARM_LDRS_PC_G2", /* name */
1026 FALSE, /* partial_inplace */
1027 0xffffffff, /* src_mask */
1028 0xffffffff, /* dst_mask */
1029 TRUE), /* pcrel_offset */
1030
1031 HOWTO (R_ARM_LDC_PC_G0, /* type */
1032 0, /* rightshift */
1033 2, /* size (0 = byte, 1 = short, 2 = long) */
1034 32, /* bitsize */
1035 TRUE, /* pc_relative */
1036 0, /* bitpos */
1037 complain_overflow_dont,/* complain_on_overflow */
1038 bfd_elf_generic_reloc, /* special_function */
1039 "R_ARM_LDC_PC_G0", /* name */
1040 FALSE, /* partial_inplace */
1041 0xffffffff, /* src_mask */
1042 0xffffffff, /* dst_mask */
1043 TRUE), /* pcrel_offset */
1044
1045 HOWTO (R_ARM_LDC_PC_G1, /* type */
1046 0, /* rightshift */
1047 2, /* size (0 = byte, 1 = short, 2 = long) */
1048 32, /* bitsize */
1049 TRUE, /* pc_relative */
1050 0, /* bitpos */
1051 complain_overflow_dont,/* complain_on_overflow */
1052 bfd_elf_generic_reloc, /* special_function */
1053 "R_ARM_LDC_PC_G1", /* name */
1054 FALSE, /* partial_inplace */
1055 0xffffffff, /* src_mask */
1056 0xffffffff, /* dst_mask */
1057 TRUE), /* pcrel_offset */
1058
1059 HOWTO (R_ARM_LDC_PC_G2, /* type */
1060 0, /* rightshift */
1061 2, /* size (0 = byte, 1 = short, 2 = long) */
1062 32, /* bitsize */
1063 TRUE, /* pc_relative */
1064 0, /* bitpos */
1065 complain_overflow_dont,/* complain_on_overflow */
1066 bfd_elf_generic_reloc, /* special_function */
1067 "R_ARM_LDC_PC_G2", /* name */
1068 FALSE, /* partial_inplace */
1069 0xffffffff, /* src_mask */
1070 0xffffffff, /* dst_mask */
1071 TRUE), /* pcrel_offset */
1072
1073 HOWTO (R_ARM_ALU_SB_G0_NC, /* type */
1074 0, /* rightshift */
1075 2, /* size (0 = byte, 1 = short, 2 = long) */
1076 32, /* bitsize */
1077 TRUE, /* pc_relative */
1078 0, /* bitpos */
1079 complain_overflow_dont,/* complain_on_overflow */
1080 bfd_elf_generic_reloc, /* special_function */
1081 "R_ARM_ALU_SB_G0_NC", /* name */
1082 FALSE, /* partial_inplace */
1083 0xffffffff, /* src_mask */
1084 0xffffffff, /* dst_mask */
1085 TRUE), /* pcrel_offset */
1086
1087 HOWTO (R_ARM_ALU_SB_G0, /* type */
1088 0, /* rightshift */
1089 2, /* size (0 = byte, 1 = short, 2 = long) */
1090 32, /* bitsize */
1091 TRUE, /* pc_relative */
1092 0, /* bitpos */
1093 complain_overflow_dont,/* complain_on_overflow */
1094 bfd_elf_generic_reloc, /* special_function */
1095 "R_ARM_ALU_SB_G0", /* name */
1096 FALSE, /* partial_inplace */
1097 0xffffffff, /* src_mask */
1098 0xffffffff, /* dst_mask */
1099 TRUE), /* pcrel_offset */
1100
1101 HOWTO (R_ARM_ALU_SB_G1_NC, /* type */
1102 0, /* rightshift */
1103 2, /* size (0 = byte, 1 = short, 2 = long) */
1104 32, /* bitsize */
1105 TRUE, /* pc_relative */
1106 0, /* bitpos */
1107 complain_overflow_dont,/* complain_on_overflow */
1108 bfd_elf_generic_reloc, /* special_function */
1109 "R_ARM_ALU_SB_G1_NC", /* name */
1110 FALSE, /* partial_inplace */
1111 0xffffffff, /* src_mask */
1112 0xffffffff, /* dst_mask */
1113 TRUE), /* pcrel_offset */
1114
1115 HOWTO (R_ARM_ALU_SB_G1, /* type */
1116 0, /* rightshift */
1117 2, /* size (0 = byte, 1 = short, 2 = long) */
1118 32, /* bitsize */
1119 TRUE, /* pc_relative */
1120 0, /* bitpos */
1121 complain_overflow_dont,/* complain_on_overflow */
1122 bfd_elf_generic_reloc, /* special_function */
1123 "R_ARM_ALU_SB_G1", /* name */
1124 FALSE, /* partial_inplace */
1125 0xffffffff, /* src_mask */
1126 0xffffffff, /* dst_mask */
1127 TRUE), /* pcrel_offset */
1128
1129 HOWTO (R_ARM_ALU_SB_G2, /* type */
1130 0, /* rightshift */
1131 2, /* size (0 = byte, 1 = short, 2 = long) */
1132 32, /* bitsize */
1133 TRUE, /* pc_relative */
1134 0, /* bitpos */
1135 complain_overflow_dont,/* complain_on_overflow */
1136 bfd_elf_generic_reloc, /* special_function */
1137 "R_ARM_ALU_SB_G2", /* name */
1138 FALSE, /* partial_inplace */
1139 0xffffffff, /* src_mask */
1140 0xffffffff, /* dst_mask */
1141 TRUE), /* pcrel_offset */
1142
1143 HOWTO (R_ARM_LDR_SB_G0, /* type */
1144 0, /* rightshift */
1145 2, /* size (0 = byte, 1 = short, 2 = long) */
1146 32, /* bitsize */
1147 TRUE, /* pc_relative */
1148 0, /* bitpos */
1149 complain_overflow_dont,/* complain_on_overflow */
1150 bfd_elf_generic_reloc, /* special_function */
1151 "R_ARM_LDR_SB_G0", /* name */
1152 FALSE, /* partial_inplace */
1153 0xffffffff, /* src_mask */
1154 0xffffffff, /* dst_mask */
1155 TRUE), /* pcrel_offset */
1156
1157 HOWTO (R_ARM_LDR_SB_G1, /* type */
1158 0, /* rightshift */
1159 2, /* size (0 = byte, 1 = short, 2 = long) */
1160 32, /* bitsize */
1161 TRUE, /* pc_relative */
1162 0, /* bitpos */
1163 complain_overflow_dont,/* complain_on_overflow */
1164 bfd_elf_generic_reloc, /* special_function */
1165 "R_ARM_LDR_SB_G1", /* name */
1166 FALSE, /* partial_inplace */
1167 0xffffffff, /* src_mask */
1168 0xffffffff, /* dst_mask */
1169 TRUE), /* pcrel_offset */
1170
1171 HOWTO (R_ARM_LDR_SB_G2, /* type */
1172 0, /* rightshift */
1173 2, /* size (0 = byte, 1 = short, 2 = long) */
1174 32, /* bitsize */
1175 TRUE, /* pc_relative */
1176 0, /* bitpos */
1177 complain_overflow_dont,/* complain_on_overflow */
1178 bfd_elf_generic_reloc, /* special_function */
1179 "R_ARM_LDR_SB_G2", /* name */
1180 FALSE, /* partial_inplace */
1181 0xffffffff, /* src_mask */
1182 0xffffffff, /* dst_mask */
1183 TRUE), /* pcrel_offset */
1184
1185 HOWTO (R_ARM_LDRS_SB_G0, /* type */
1186 0, /* rightshift */
1187 2, /* size (0 = byte, 1 = short, 2 = long) */
1188 32, /* bitsize */
1189 TRUE, /* pc_relative */
1190 0, /* bitpos */
1191 complain_overflow_dont,/* complain_on_overflow */
1192 bfd_elf_generic_reloc, /* special_function */
1193 "R_ARM_LDRS_SB_G0", /* name */
1194 FALSE, /* partial_inplace */
1195 0xffffffff, /* src_mask */
1196 0xffffffff, /* dst_mask */
1197 TRUE), /* pcrel_offset */
1198
1199 HOWTO (R_ARM_LDRS_SB_G1, /* type */
1200 0, /* rightshift */
1201 2, /* size (0 = byte, 1 = short, 2 = long) */
1202 32, /* bitsize */
1203 TRUE, /* pc_relative */
1204 0, /* bitpos */
1205 complain_overflow_dont,/* complain_on_overflow */
1206 bfd_elf_generic_reloc, /* special_function */
1207 "R_ARM_LDRS_SB_G1", /* name */
1208 FALSE, /* partial_inplace */
1209 0xffffffff, /* src_mask */
1210 0xffffffff, /* dst_mask */
1211 TRUE), /* pcrel_offset */
1212
1213 HOWTO (R_ARM_LDRS_SB_G2, /* type */
1214 0, /* rightshift */
1215 2, /* size (0 = byte, 1 = short, 2 = long) */
1216 32, /* bitsize */
1217 TRUE, /* pc_relative */
1218 0, /* bitpos */
1219 complain_overflow_dont,/* complain_on_overflow */
1220 bfd_elf_generic_reloc, /* special_function */
1221 "R_ARM_LDRS_SB_G2", /* name */
1222 FALSE, /* partial_inplace */
1223 0xffffffff, /* src_mask */
1224 0xffffffff, /* dst_mask */
1225 TRUE), /* pcrel_offset */
1226
1227 HOWTO (R_ARM_LDC_SB_G0, /* type */
1228 0, /* rightshift */
1229 2, /* size (0 = byte, 1 = short, 2 = long) */
1230 32, /* bitsize */
1231 TRUE, /* pc_relative */
1232 0, /* bitpos */
1233 complain_overflow_dont,/* complain_on_overflow */
1234 bfd_elf_generic_reloc, /* special_function */
1235 "R_ARM_LDC_SB_G0", /* name */
1236 FALSE, /* partial_inplace */
1237 0xffffffff, /* src_mask */
1238 0xffffffff, /* dst_mask */
1239 TRUE), /* pcrel_offset */
1240
1241 HOWTO (R_ARM_LDC_SB_G1, /* type */
1242 0, /* rightshift */
1243 2, /* size (0 = byte, 1 = short, 2 = long) */
1244 32, /* bitsize */
1245 TRUE, /* pc_relative */
1246 0, /* bitpos */
1247 complain_overflow_dont,/* complain_on_overflow */
1248 bfd_elf_generic_reloc, /* special_function */
1249 "R_ARM_LDC_SB_G1", /* name */
1250 FALSE, /* partial_inplace */
1251 0xffffffff, /* src_mask */
1252 0xffffffff, /* dst_mask */
1253 TRUE), /* pcrel_offset */
1254
1255 HOWTO (R_ARM_LDC_SB_G2, /* type */
1256 0, /* rightshift */
1257 2, /* size (0 = byte, 1 = short, 2 = long) */
1258 32, /* bitsize */
1259 TRUE, /* pc_relative */
1260 0, /* bitpos */
1261 complain_overflow_dont,/* complain_on_overflow */
1262 bfd_elf_generic_reloc, /* special_function */
1263 "R_ARM_LDC_SB_G2", /* name */
1264 FALSE, /* partial_inplace */
1265 0xffffffff, /* src_mask */
1266 0xffffffff, /* dst_mask */
1267 TRUE), /* pcrel_offset */
1268
1269 /* End of group relocations. */
1270
1271 HOWTO (R_ARM_MOVW_BREL_NC, /* type */
1272 0, /* rightshift */
1273 2, /* size (0 = byte, 1 = short, 2 = long) */
1274 16, /* bitsize */
1275 FALSE, /* pc_relative */
1276 0, /* bitpos */
1277 complain_overflow_dont,/* complain_on_overflow */
1278 bfd_elf_generic_reloc, /* special_function */
1279 "R_ARM_MOVW_BREL_NC", /* name */
1280 FALSE, /* partial_inplace */
1281 0x0000ffff, /* src_mask */
1282 0x0000ffff, /* dst_mask */
1283 FALSE), /* pcrel_offset */
1284
1285 HOWTO (R_ARM_MOVT_BREL, /* type */
1286 0, /* rightshift */
1287 2, /* size (0 = byte, 1 = short, 2 = long) */
1288 16, /* bitsize */
1289 FALSE, /* pc_relative */
1290 0, /* bitpos */
1291 complain_overflow_bitfield,/* complain_on_overflow */
1292 bfd_elf_generic_reloc, /* special_function */
1293 "R_ARM_MOVT_BREL", /* name */
1294 FALSE, /* partial_inplace */
1295 0x0000ffff, /* src_mask */
1296 0x0000ffff, /* dst_mask */
1297 FALSE), /* pcrel_offset */
1298
1299 HOWTO (R_ARM_MOVW_BREL, /* type */
1300 0, /* rightshift */
1301 2, /* size (0 = byte, 1 = short, 2 = long) */
1302 16, /* bitsize */
1303 FALSE, /* pc_relative */
1304 0, /* bitpos */
1305 complain_overflow_dont,/* complain_on_overflow */
1306 bfd_elf_generic_reloc, /* special_function */
1307 "R_ARM_MOVW_BREL", /* name */
1308 FALSE, /* partial_inplace */
1309 0x0000ffff, /* src_mask */
1310 0x0000ffff, /* dst_mask */
1311 FALSE), /* pcrel_offset */
1312
1313 HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1314 0, /* rightshift */
1315 2, /* size (0 = byte, 1 = short, 2 = long) */
1316 16, /* bitsize */
1317 FALSE, /* pc_relative */
1318 0, /* bitpos */
1319 complain_overflow_dont,/* complain_on_overflow */
1320 bfd_elf_generic_reloc, /* special_function */
1321 "R_ARM_THM_MOVW_BREL_NC",/* name */
1322 FALSE, /* partial_inplace */
1323 0x040f70ff, /* src_mask */
1324 0x040f70ff, /* dst_mask */
1325 FALSE), /* pcrel_offset */
1326
1327 HOWTO (R_ARM_THM_MOVT_BREL, /* type */
1328 0, /* rightshift */
1329 2, /* size (0 = byte, 1 = short, 2 = long) */
1330 16, /* bitsize */
1331 FALSE, /* pc_relative */
1332 0, /* bitpos */
1333 complain_overflow_bitfield,/* complain_on_overflow */
1334 bfd_elf_generic_reloc, /* special_function */
1335 "R_ARM_THM_MOVT_BREL", /* name */
1336 FALSE, /* partial_inplace */
1337 0x040f70ff, /* src_mask */
1338 0x040f70ff, /* dst_mask */
1339 FALSE), /* pcrel_offset */
1340
1341 HOWTO (R_ARM_THM_MOVW_BREL, /* type */
1342 0, /* rightshift */
1343 2, /* size (0 = byte, 1 = short, 2 = long) */
1344 16, /* bitsize */
1345 FALSE, /* pc_relative */
1346 0, /* bitpos */
1347 complain_overflow_dont,/* complain_on_overflow */
1348 bfd_elf_generic_reloc, /* special_function */
1349 "R_ARM_THM_MOVW_BREL", /* name */
1350 FALSE, /* partial_inplace */
1351 0x040f70ff, /* src_mask */
1352 0x040f70ff, /* dst_mask */
1353 FALSE), /* pcrel_offset */
1354
1355 EMPTY_HOWTO (90), /* Unallocated. */
1356 EMPTY_HOWTO (91),
1357 EMPTY_HOWTO (92),
1358 EMPTY_HOWTO (93),
1359
1360 HOWTO (R_ARM_PLT32_ABS, /* type */
1361 0, /* rightshift */
1362 2, /* size (0 = byte, 1 = short, 2 = long) */
1363 32, /* bitsize */
1364 FALSE, /* pc_relative */
1365 0, /* bitpos */
1366 complain_overflow_dont,/* complain_on_overflow */
1367 bfd_elf_generic_reloc, /* special_function */
1368 "R_ARM_PLT32_ABS", /* name */
1369 FALSE, /* partial_inplace */
1370 0xffffffff, /* src_mask */
1371 0xffffffff, /* dst_mask */
1372 FALSE), /* pcrel_offset */
1373
1374 HOWTO (R_ARM_GOT_ABS, /* type */
1375 0, /* rightshift */
1376 2, /* size (0 = byte, 1 = short, 2 = long) */
1377 32, /* bitsize */
1378 FALSE, /* pc_relative */
1379 0, /* bitpos */
1380 complain_overflow_dont,/* complain_on_overflow */
1381 bfd_elf_generic_reloc, /* special_function */
1382 "R_ARM_GOT_ABS", /* name */
1383 FALSE, /* partial_inplace */
1384 0xffffffff, /* src_mask */
1385 0xffffffff, /* dst_mask */
1386 FALSE), /* pcrel_offset */
1387
1388 HOWTO (R_ARM_GOT_PREL, /* type */
1389 0, /* rightshift */
1390 2, /* size (0 = byte, 1 = short, 2 = long) */
1391 32, /* bitsize */
1392 TRUE, /* pc_relative */
1393 0, /* bitpos */
1394 complain_overflow_dont, /* complain_on_overflow */
1395 bfd_elf_generic_reloc, /* special_function */
1396 "R_ARM_GOT_PREL", /* name */
1397 FALSE, /* partial_inplace */
1398 0xffffffff, /* src_mask */
1399 0xffffffff, /* dst_mask */
1400 TRUE), /* pcrel_offset */
1401
1402 HOWTO (R_ARM_GOT_BREL12, /* type */
1403 0, /* rightshift */
1404 2, /* size (0 = byte, 1 = short, 2 = long) */
1405 12, /* bitsize */
1406 FALSE, /* pc_relative */
1407 0, /* bitpos */
1408 complain_overflow_bitfield,/* complain_on_overflow */
1409 bfd_elf_generic_reloc, /* special_function */
1410 "R_ARM_GOT_BREL12", /* name */
1411 FALSE, /* partial_inplace */
1412 0x00000fff, /* src_mask */
1413 0x00000fff, /* dst_mask */
1414 FALSE), /* pcrel_offset */
1415
1416 HOWTO (R_ARM_GOTOFF12, /* type */
1417 0, /* rightshift */
1418 2, /* size (0 = byte, 1 = short, 2 = long) */
1419 12, /* bitsize */
1420 FALSE, /* pc_relative */
1421 0, /* bitpos */
1422 complain_overflow_bitfield,/* complain_on_overflow */
1423 bfd_elf_generic_reloc, /* special_function */
1424 "R_ARM_GOTOFF12", /* name */
1425 FALSE, /* partial_inplace */
1426 0x00000fff, /* src_mask */
1427 0x00000fff, /* dst_mask */
1428 FALSE), /* pcrel_offset */
1429
1430 EMPTY_HOWTO (R_ARM_GOTRELAX), /* reserved for future GOT-load optimizations */
1431
1432 /* GNU extension to record C++ vtable member usage */
1433 HOWTO (R_ARM_GNU_VTENTRY, /* type */
1434 0, /* rightshift */
1435 2, /* size (0 = byte, 1 = short, 2 = long) */
1436 0, /* bitsize */
1437 FALSE, /* pc_relative */
1438 0, /* bitpos */
1439 complain_overflow_dont, /* complain_on_overflow */
1440 _bfd_elf_rel_vtable_reloc_fn, /* special_function */
1441 "R_ARM_GNU_VTENTRY", /* name */
1442 FALSE, /* partial_inplace */
1443 0, /* src_mask */
1444 0, /* dst_mask */
1445 FALSE), /* pcrel_offset */
1446
1447 /* GNU extension to record C++ vtable hierarchy */
1448 HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1449 0, /* rightshift */
1450 2, /* size (0 = byte, 1 = short, 2 = long) */
1451 0, /* bitsize */
1452 FALSE, /* pc_relative */
1453 0, /* bitpos */
1454 complain_overflow_dont, /* complain_on_overflow */
1455 NULL, /* special_function */
1456 "R_ARM_GNU_VTINHERIT", /* name */
1457 FALSE, /* partial_inplace */
1458 0, /* src_mask */
1459 0, /* dst_mask */
1460 FALSE), /* pcrel_offset */
1461
1462 HOWTO (R_ARM_THM_JUMP11, /* type */
1463 1, /* rightshift */
1464 1, /* size (0 = byte, 1 = short, 2 = long) */
1465 11, /* bitsize */
1466 TRUE, /* pc_relative */
1467 0, /* bitpos */
1468 complain_overflow_signed, /* complain_on_overflow */
1469 bfd_elf_generic_reloc, /* special_function */
1470 "R_ARM_THM_JUMP11", /* name */
1471 FALSE, /* partial_inplace */
1472 0x000007ff, /* src_mask */
1473 0x000007ff, /* dst_mask */
1474 TRUE), /* pcrel_offset */
1475
1476 HOWTO (R_ARM_THM_JUMP8, /* type */
1477 1, /* rightshift */
1478 1, /* size (0 = byte, 1 = short, 2 = long) */
1479 8, /* bitsize */
1480 TRUE, /* pc_relative */
1481 0, /* bitpos */
1482 complain_overflow_signed, /* complain_on_overflow */
1483 bfd_elf_generic_reloc, /* special_function */
1484 "R_ARM_THM_JUMP8", /* name */
1485 FALSE, /* partial_inplace */
1486 0x000000ff, /* src_mask */
1487 0x000000ff, /* dst_mask */
1488 TRUE), /* pcrel_offset */
1489
1490 /* TLS relocations */
1491 HOWTO (R_ARM_TLS_GD32, /* type */
1492 0, /* rightshift */
1493 2, /* size (0 = byte, 1 = short, 2 = long) */
1494 32, /* bitsize */
1495 FALSE, /* pc_relative */
1496 0, /* bitpos */
1497 complain_overflow_bitfield,/* complain_on_overflow */
1498 NULL, /* special_function */
1499 "R_ARM_TLS_GD32", /* name */
1500 TRUE, /* partial_inplace */
1501 0xffffffff, /* src_mask */
1502 0xffffffff, /* dst_mask */
1503 FALSE), /* pcrel_offset */
1504
1505 HOWTO (R_ARM_TLS_LDM32, /* type */
1506 0, /* rightshift */
1507 2, /* size (0 = byte, 1 = short, 2 = long) */
1508 32, /* bitsize */
1509 FALSE, /* pc_relative */
1510 0, /* bitpos */
1511 complain_overflow_bitfield,/* complain_on_overflow */
1512 bfd_elf_generic_reloc, /* special_function */
1513 "R_ARM_TLS_LDM32", /* name */
1514 TRUE, /* partial_inplace */
1515 0xffffffff, /* src_mask */
1516 0xffffffff, /* dst_mask */
1517 FALSE), /* pcrel_offset */
1518
1519 HOWTO (R_ARM_TLS_LDO32, /* type */
1520 0, /* rightshift */
1521 2, /* size (0 = byte, 1 = short, 2 = long) */
1522 32, /* bitsize */
1523 FALSE, /* pc_relative */
1524 0, /* bitpos */
1525 complain_overflow_bitfield,/* complain_on_overflow */
1526 bfd_elf_generic_reloc, /* special_function */
1527 "R_ARM_TLS_LDO32", /* name */
1528 TRUE, /* partial_inplace */
1529 0xffffffff, /* src_mask */
1530 0xffffffff, /* dst_mask */
1531 FALSE), /* pcrel_offset */
1532
1533 HOWTO (R_ARM_TLS_IE32, /* type */
1534 0, /* rightshift */
1535 2, /* size (0 = byte, 1 = short, 2 = long) */
1536 32, /* bitsize */
1537 FALSE, /* pc_relative */
1538 0, /* bitpos */
1539 complain_overflow_bitfield,/* complain_on_overflow */
1540 NULL, /* special_function */
1541 "R_ARM_TLS_IE32", /* name */
1542 TRUE, /* partial_inplace */
1543 0xffffffff, /* src_mask */
1544 0xffffffff, /* dst_mask */
1545 FALSE), /* pcrel_offset */
1546
1547 HOWTO (R_ARM_TLS_LE32, /* type */
1548 0, /* rightshift */
1549 2, /* size (0 = byte, 1 = short, 2 = long) */
1550 32, /* bitsize */
1551 FALSE, /* pc_relative */
1552 0, /* bitpos */
1553 complain_overflow_bitfield,/* complain_on_overflow */
1554 bfd_elf_generic_reloc, /* special_function */
1555 "R_ARM_TLS_LE32", /* name */
1556 TRUE, /* partial_inplace */
1557 0xffffffff, /* src_mask */
1558 0xffffffff, /* dst_mask */
1559 FALSE), /* pcrel_offset */
1560
1561 HOWTO (R_ARM_TLS_LDO12, /* type */
1562 0, /* rightshift */
1563 2, /* size (0 = byte, 1 = short, 2 = long) */
1564 12, /* bitsize */
1565 FALSE, /* pc_relative */
1566 0, /* bitpos */
1567 complain_overflow_bitfield,/* complain_on_overflow */
1568 bfd_elf_generic_reloc, /* special_function */
1569 "R_ARM_TLS_LDO12", /* name */
1570 FALSE, /* partial_inplace */
1571 0x00000fff, /* src_mask */
1572 0x00000fff, /* dst_mask */
1573 FALSE), /* pcrel_offset */
1574
1575 HOWTO (R_ARM_TLS_LE12, /* type */
1576 0, /* rightshift */
1577 2, /* size (0 = byte, 1 = short, 2 = long) */
1578 12, /* bitsize */
1579 FALSE, /* pc_relative */
1580 0, /* bitpos */
1581 complain_overflow_bitfield,/* complain_on_overflow */
1582 bfd_elf_generic_reloc, /* special_function */
1583 "R_ARM_TLS_LE12", /* name */
1584 FALSE, /* partial_inplace */
1585 0x00000fff, /* src_mask */
1586 0x00000fff, /* dst_mask */
1587 FALSE), /* pcrel_offset */
1588
1589 HOWTO (R_ARM_TLS_IE12GP, /* type */
1590 0, /* rightshift */
1591 2, /* size (0 = byte, 1 = short, 2 = long) */
1592 12, /* bitsize */
1593 FALSE, /* pc_relative */
1594 0, /* bitpos */
1595 complain_overflow_bitfield,/* complain_on_overflow */
1596 bfd_elf_generic_reloc, /* special_function */
1597 "R_ARM_TLS_IE12GP", /* name */
1598 FALSE, /* partial_inplace */
1599 0x00000fff, /* src_mask */
1600 0x00000fff, /* dst_mask */
1601 FALSE), /* pcrel_offset */
1602 };
1603
1604 /* 112-127 private relocations
1605 128 R_ARM_ME_TOO, obsolete
1606 129-255 unallocated in AAELF.
1607
1608 249-255 extended, currently unused, relocations: */
1609
1610 static reloc_howto_type elf32_arm_howto_table_2[4] =
1611 {
1612 HOWTO (R_ARM_RREL32, /* type */
1613 0, /* rightshift */
1614 0, /* size (0 = byte, 1 = short, 2 = long) */
1615 0, /* bitsize */
1616 FALSE, /* pc_relative */
1617 0, /* bitpos */
1618 complain_overflow_dont,/* complain_on_overflow */
1619 bfd_elf_generic_reloc, /* special_function */
1620 "R_ARM_RREL32", /* name */
1621 FALSE, /* partial_inplace */
1622 0, /* src_mask */
1623 0, /* dst_mask */
1624 FALSE), /* pcrel_offset */
1625
1626 HOWTO (R_ARM_RABS32, /* type */
1627 0, /* rightshift */
1628 0, /* size (0 = byte, 1 = short, 2 = long) */
1629 0, /* bitsize */
1630 FALSE, /* pc_relative */
1631 0, /* bitpos */
1632 complain_overflow_dont,/* complain_on_overflow */
1633 bfd_elf_generic_reloc, /* special_function */
1634 "R_ARM_RABS32", /* name */
1635 FALSE, /* partial_inplace */
1636 0, /* src_mask */
1637 0, /* dst_mask */
1638 FALSE), /* pcrel_offset */
1639
1640 HOWTO (R_ARM_RPC24, /* type */
1641 0, /* rightshift */
1642 0, /* size (0 = byte, 1 = short, 2 = long) */
1643 0, /* bitsize */
1644 FALSE, /* pc_relative */
1645 0, /* bitpos */
1646 complain_overflow_dont,/* complain_on_overflow */
1647 bfd_elf_generic_reloc, /* special_function */
1648 "R_ARM_RPC24", /* name */
1649 FALSE, /* partial_inplace */
1650 0, /* src_mask */
1651 0, /* dst_mask */
1652 FALSE), /* pcrel_offset */
1653
1654 HOWTO (R_ARM_RBASE, /* type */
1655 0, /* rightshift */
1656 0, /* size (0 = byte, 1 = short, 2 = long) */
1657 0, /* bitsize */
1658 FALSE, /* pc_relative */
1659 0, /* bitpos */
1660 complain_overflow_dont,/* complain_on_overflow */
1661 bfd_elf_generic_reloc, /* special_function */
1662 "R_ARM_RBASE", /* name */
1663 FALSE, /* partial_inplace */
1664 0, /* src_mask */
1665 0, /* dst_mask */
1666 FALSE) /* pcrel_offset */
1667 };
1668
1669 static reloc_howto_type *
1670 elf32_arm_howto_from_type (unsigned int r_type)
1671 {
1672 if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1673 return &elf32_arm_howto_table_1[r_type];
1674
1675 if (r_type >= R_ARM_RREL32
1676 && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_2))
1677 return &elf32_arm_howto_table_2[r_type - R_ARM_RREL32];
1678
1679 return NULL;
1680 }
1681
1682 static void
1683 elf32_arm_info_to_howto (bfd * abfd ATTRIBUTE_UNUSED, arelent * bfd_reloc,
1684 Elf_Internal_Rela * elf_reloc)
1685 {
1686 unsigned int r_type;
1687
1688 r_type = ELF32_R_TYPE (elf_reloc->r_info);
1689 bfd_reloc->howto = elf32_arm_howto_from_type (r_type);
1690 }
1691
1692 struct elf32_arm_reloc_map
1693 {
1694 bfd_reloc_code_real_type bfd_reloc_val;
1695 unsigned char elf_reloc_val;
1696 };
1697
1698 /* All entries in this list must also be present in elf32_arm_howto_table. */
1699 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
1700 {
1701 {BFD_RELOC_NONE, R_ARM_NONE},
1702 {BFD_RELOC_ARM_PCREL_BRANCH, R_ARM_PC24},
1703 {BFD_RELOC_ARM_PCREL_CALL, R_ARM_CALL},
1704 {BFD_RELOC_ARM_PCREL_JUMP, R_ARM_JUMP24},
1705 {BFD_RELOC_ARM_PCREL_BLX, R_ARM_XPC25},
1706 {BFD_RELOC_THUMB_PCREL_BLX, R_ARM_THM_XPC22},
1707 {BFD_RELOC_32, R_ARM_ABS32},
1708 {BFD_RELOC_32_PCREL, R_ARM_REL32},
1709 {BFD_RELOC_8, R_ARM_ABS8},
1710 {BFD_RELOC_16, R_ARM_ABS16},
1711 {BFD_RELOC_ARM_OFFSET_IMM, R_ARM_ABS12},
1712 {BFD_RELOC_ARM_THUMB_OFFSET, R_ARM_THM_ABS5},
1713 {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
1714 {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
1715 {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
1716 {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
1717 {BFD_RELOC_THUMB_PCREL_BRANCH9, R_ARM_THM_JUMP8},
1718 {BFD_RELOC_THUMB_PCREL_BRANCH7, R_ARM_THM_JUMP6},
1719 {BFD_RELOC_ARM_GLOB_DAT, R_ARM_GLOB_DAT},
1720 {BFD_RELOC_ARM_JUMP_SLOT, R_ARM_JUMP_SLOT},
1721 {BFD_RELOC_ARM_RELATIVE, R_ARM_RELATIVE},
1722 {BFD_RELOC_ARM_GOTOFF, R_ARM_GOTOFF32},
1723 {BFD_RELOC_ARM_GOTPC, R_ARM_GOTPC},
1724 {BFD_RELOC_ARM_GOT_PREL, R_ARM_GOT_PREL},
1725 {BFD_RELOC_ARM_GOT32, R_ARM_GOT32},
1726 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1727 {BFD_RELOC_ARM_TARGET1, R_ARM_TARGET1},
1728 {BFD_RELOC_ARM_ROSEGREL32, R_ARM_ROSEGREL32},
1729 {BFD_RELOC_ARM_SBREL32, R_ARM_SBREL32},
1730 {BFD_RELOC_ARM_PREL31, R_ARM_PREL31},
1731 {BFD_RELOC_ARM_TARGET2, R_ARM_TARGET2},
1732 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1733 {BFD_RELOC_ARM_TLS_GD32, R_ARM_TLS_GD32},
1734 {BFD_RELOC_ARM_TLS_LDO32, R_ARM_TLS_LDO32},
1735 {BFD_RELOC_ARM_TLS_LDM32, R_ARM_TLS_LDM32},
1736 {BFD_RELOC_ARM_TLS_DTPMOD32, R_ARM_TLS_DTPMOD32},
1737 {BFD_RELOC_ARM_TLS_DTPOFF32, R_ARM_TLS_DTPOFF32},
1738 {BFD_RELOC_ARM_TLS_TPOFF32, R_ARM_TLS_TPOFF32},
1739 {BFD_RELOC_ARM_TLS_IE32, R_ARM_TLS_IE32},
1740 {BFD_RELOC_ARM_TLS_LE32, R_ARM_TLS_LE32},
1741 {BFD_RELOC_VTABLE_INHERIT, R_ARM_GNU_VTINHERIT},
1742 {BFD_RELOC_VTABLE_ENTRY, R_ARM_GNU_VTENTRY},
1743 {BFD_RELOC_ARM_MOVW, R_ARM_MOVW_ABS_NC},
1744 {BFD_RELOC_ARM_MOVT, R_ARM_MOVT_ABS},
1745 {BFD_RELOC_ARM_MOVW_PCREL, R_ARM_MOVW_PREL_NC},
1746 {BFD_RELOC_ARM_MOVT_PCREL, R_ARM_MOVT_PREL},
1747 {BFD_RELOC_ARM_THUMB_MOVW, R_ARM_THM_MOVW_ABS_NC},
1748 {BFD_RELOC_ARM_THUMB_MOVT, R_ARM_THM_MOVT_ABS},
1749 {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
1750 {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
1751 {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
1752 {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
1753 {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
1754 {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
1755 {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
1756 {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
1757 {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
1758 {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
1759 {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
1760 {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
1761 {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
1762 {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
1763 {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
1764 {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
1765 {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
1766 {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
1767 {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
1768 {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
1769 {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
1770 {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
1771 {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
1772 {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
1773 {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
1774 {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
1775 {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
1776 {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
1777 {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
1778 {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
1779 {BFD_RELOC_ARM_V4BX, R_ARM_V4BX}
1780 };
1781
1782 static reloc_howto_type *
1783 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1784 bfd_reloc_code_real_type code)
1785 {
1786 unsigned int i;
1787
1788 for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
1789 if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
1790 return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
1791
1792 return NULL;
1793 }
1794
1795 static reloc_howto_type *
1796 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1797 const char *r_name)
1798 {
1799 unsigned int i;
1800
1801 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
1802 if (elf32_arm_howto_table_1[i].name != NULL
1803 && strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
1804 return &elf32_arm_howto_table_1[i];
1805
1806 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
1807 if (elf32_arm_howto_table_2[i].name != NULL
1808 && strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
1809 return &elf32_arm_howto_table_2[i];
1810
1811 return NULL;
1812 }
1813
1814 /* Support for core dump NOTE sections. */
1815
1816 static bfd_boolean
1817 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
1818 {
1819 int offset;
1820 size_t size;
1821
1822 switch (note->descsz)
1823 {
1824 default:
1825 return FALSE;
1826
1827 case 148: /* Linux/ARM 32-bit. */
1828 /* pr_cursig */
1829 elf_tdata (abfd)->core_signal = bfd_get_16 (abfd, note->descdata + 12);
1830
1831 /* pr_pid */
1832 elf_tdata (abfd)->core_pid = bfd_get_32 (abfd, note->descdata + 24);
1833
1834 /* pr_reg */
1835 offset = 72;
1836 size = 72;
1837
1838 break;
1839 }
1840
1841 /* Make a ".reg/999" section. */
1842 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
1843 size, note->descpos + offset);
1844 }
1845
1846 static bfd_boolean
1847 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
1848 {
1849 switch (note->descsz)
1850 {
1851 default:
1852 return FALSE;
1853
1854 case 124: /* Linux/ARM elf_prpsinfo. */
1855 elf_tdata (abfd)->core_program
1856 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
1857 elf_tdata (abfd)->core_command
1858 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
1859 }
1860
1861 /* Note that for some reason, a spurious space is tacked
1862 onto the end of the args in some (at least one anyway)
1863 implementations, so strip it off if it exists. */
1864 {
1865 char *command = elf_tdata (abfd)->core_command;
1866 int n = strlen (command);
1867
1868 if (0 < n && command[n - 1] == ' ')
1869 command[n - 1] = '\0';
1870 }
1871
1872 return TRUE;
1873 }
1874
1875 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_vec
1876 #define TARGET_LITTLE_NAME "elf32-littlearm"
1877 #define TARGET_BIG_SYM bfd_elf32_bigarm_vec
1878 #define TARGET_BIG_NAME "elf32-bigarm"
1879
1880 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
1881 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
1882
1883 typedef unsigned long int insn32;
1884 typedef unsigned short int insn16;
1885
1886 /* In lieu of proper flags, assume all EABIv4 or later objects are
1887 interworkable. */
1888 #define INTERWORK_FLAG(abfd) \
1889 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
1890 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
1891 || ((abfd)->flags & BFD_LINKER_CREATED))
1892
1893 /* The linker script knows the section names for placement.
1894 The entry_names are used to do simple name mangling on the stubs.
1895 Given a function name, and its type, the stub can be found. The
1896 name can be changed. The only requirement is the %s be present. */
1897 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
1898 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
1899
1900 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
1901 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
1902
1903 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
1904 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
1905
1906 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
1907 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
1908
1909 #define STUB_ENTRY_NAME "__%s_veneer"
1910
1911 /* The name of the dynamic interpreter. This is put in the .interp
1912 section. */
1913 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
1914
1915 #ifdef FOUR_WORD_PLT
1916
1917 /* The first entry in a procedure linkage table looks like
1918 this. It is set up so that any shared library function that is
1919 called before the relocation has been set up calls the dynamic
1920 linker first. */
1921 static const bfd_vma elf32_arm_plt0_entry [] =
1922 {
1923 0xe52de004, /* str lr, [sp, #-4]! */
1924 0xe59fe010, /* ldr lr, [pc, #16] */
1925 0xe08fe00e, /* add lr, pc, lr */
1926 0xe5bef008, /* ldr pc, [lr, #8]! */
1927 };
1928
1929 /* Subsequent entries in a procedure linkage table look like
1930 this. */
1931 static const bfd_vma elf32_arm_plt_entry [] =
1932 {
1933 0xe28fc600, /* add ip, pc, #NN */
1934 0xe28cca00, /* add ip, ip, #NN */
1935 0xe5bcf000, /* ldr pc, [ip, #NN]! */
1936 0x00000000, /* unused */
1937 };
1938
1939 #else
1940
1941 /* The first entry in a procedure linkage table looks like
1942 this. It is set up so that any shared library function that is
1943 called before the relocation has been set up calls the dynamic
1944 linker first. */
1945 static const bfd_vma elf32_arm_plt0_entry [] =
1946 {
1947 0xe52de004, /* str lr, [sp, #-4]! */
1948 0xe59fe004, /* ldr lr, [pc, #4] */
1949 0xe08fe00e, /* add lr, pc, lr */
1950 0xe5bef008, /* ldr pc, [lr, #8]! */
1951 0x00000000, /* &GOT[0] - . */
1952 };
1953
1954 /* Subsequent entries in a procedure linkage table look like
1955 this. */
1956 static const bfd_vma elf32_arm_plt_entry [] =
1957 {
1958 0xe28fc600, /* add ip, pc, #0xNN00000 */
1959 0xe28cca00, /* add ip, ip, #0xNN000 */
1960 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
1961 };
1962
1963 #endif
1964
1965 /* The format of the first entry in the procedure linkage table
1966 for a VxWorks executable. */
1967 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
1968 {
1969 0xe52dc008, /* str ip,[sp,#-8]! */
1970 0xe59fc000, /* ldr ip,[pc] */
1971 0xe59cf008, /* ldr pc,[ip,#8] */
1972 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
1973 };
1974
1975 /* The format of subsequent entries in a VxWorks executable. */
1976 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
1977 {
1978 0xe59fc000, /* ldr ip,[pc] */
1979 0xe59cf000, /* ldr pc,[ip] */
1980 0x00000000, /* .long @got */
1981 0xe59fc000, /* ldr ip,[pc] */
1982 0xea000000, /* b _PLT */
1983 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
1984 };
1985
1986 /* The format of entries in a VxWorks shared library. */
1987 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
1988 {
1989 0xe59fc000, /* ldr ip,[pc] */
1990 0xe79cf009, /* ldr pc,[ip,r9] */
1991 0x00000000, /* .long @got */
1992 0xe59fc000, /* ldr ip,[pc] */
1993 0xe599f008, /* ldr pc,[r9,#8] */
1994 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
1995 };
1996
1997 /* An initial stub used if the PLT entry is referenced from Thumb code. */
1998 #define PLT_THUMB_STUB_SIZE 4
1999 static const bfd_vma elf32_arm_plt_thumb_stub [] =
2000 {
2001 0x4778, /* bx pc */
2002 0x46c0 /* nop */
2003 };
2004
2005 /* The entries in a PLT when using a DLL-based target with multiple
2006 address spaces. */
2007 static const bfd_vma elf32_arm_symbian_plt_entry [] =
2008 {
2009 0xe51ff004, /* ldr pc, [pc, #-4] */
2010 0x00000000, /* dcd R_ARM_GLOB_DAT(X) */
2011 };
2012
2013 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2014 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2015 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2016 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2017 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2018 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2019
2020 enum stub_insn_type
2021 {
2022 THUMB16_TYPE = 1,
2023 THUMB32_TYPE,
2024 ARM_TYPE,
2025 DATA_TYPE
2026 };
2027
2028 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2029 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2030 is inserted in arm_build_one_stub(). */
2031 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2032 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2033 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2034 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2035 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2036 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2037
2038 typedef struct
2039 {
2040 bfd_vma data;
2041 enum stub_insn_type type;
2042 unsigned int r_type;
2043 int reloc_addend;
2044 } insn_sequence;
2045
2046 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2047 to reach the stub if necessary. */
2048 static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2049 {
2050 ARM_INSN(0xe51ff004), /* ldr pc, [pc, #-4] */
2051 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2052 };
2053
2054 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2055 available. */
2056 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2057 {
2058 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2059 ARM_INSN(0xe12fff1c), /* bx ip */
2060 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2061 };
2062
2063 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2064 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2065 {
2066 THUMB16_INSN(0xb401), /* push {r0} */
2067 THUMB16_INSN(0x4802), /* ldr r0, [pc, #8] */
2068 THUMB16_INSN(0x4684), /* mov ip, r0 */
2069 THUMB16_INSN(0xbc01), /* pop {r0} */
2070 THUMB16_INSN(0x4760), /* bx ip */
2071 THUMB16_INSN(0xbf00), /* nop */
2072 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2073 };
2074
2075 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2076 allowed. */
2077 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2078 {
2079 THUMB16_INSN(0x4778), /* bx pc */
2080 THUMB16_INSN(0x46c0), /* nop */
2081 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2082 ARM_INSN(0xe12fff1c), /* bx ip */
2083 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2084 };
2085
2086 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2087 available. */
2088 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2089 {
2090 THUMB16_INSN(0x4778), /* bx pc */
2091 THUMB16_INSN(0x46c0), /* nop */
2092 ARM_INSN(0xe51ff004), /* ldr pc, [pc, #-4] */
2093 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2094 };
2095
2096 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2097 one, when the destination is close enough. */
2098 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2099 {
2100 THUMB16_INSN(0x4778), /* bx pc */
2101 THUMB16_INSN(0x46c0), /* nop */
2102 ARM_REL_INSN(0xea000000, -8), /* b (X-8) */
2103 };
2104
2105 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2106 blx to reach the stub if necessary. */
2107 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2108 {
2109 ARM_INSN(0xe59fc000), /* ldr r12, [pc] */
2110 ARM_INSN(0xe08ff00c), /* add pc, pc, ip */
2111 DATA_WORD(0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2112 };
2113
2114 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2115 blx to reach the stub if necessary. We can not add into pc;
2116 it is not guaranteed to mode switch (different in ARMv6 and
2117 ARMv7). */
2118 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2119 {
2120 ARM_INSN(0xe59fc004), /* ldr r12, [pc, #4] */
2121 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2122 ARM_INSN(0xe12fff1c), /* bx ip */
2123 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2124 };
2125
2126 /* V4T ARM -> ARM long branch stub, PIC. */
2127 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2128 {
2129 ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */
2130 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2131 ARM_INSN(0xe12fff1c), /* bx ip */
2132 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2133 };
2134
2135 /* V4T Thumb -> ARM long branch stub, PIC. */
2136 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2137 {
2138 THUMB16_INSN(0x4778), /* bx pc */
2139 THUMB16_INSN(0x46c0), /* nop */
2140 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2141 ARM_INSN(0xe08cf00f), /* add pc, ip, pc */
2142 DATA_WORD(0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2143 };
2144
2145 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2146 architectures. */
2147 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2148 {
2149 THUMB16_INSN(0xb401), /* push {r0} */
2150 THUMB16_INSN(0x4802), /* ldr r0, [pc, #8] */
2151 THUMB16_INSN(0x46fc), /* mov ip, pc */
2152 THUMB16_INSN(0x4484), /* add ip, r0 */
2153 THUMB16_INSN(0xbc01), /* pop {r0} */
2154 THUMB16_INSN(0x4760), /* bx ip */
2155 DATA_WORD(0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */
2156 };
2157
2158 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2159 allowed. */
2160 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2161 {
2162 THUMB16_INSN(0x4778), /* bx pc */
2163 THUMB16_INSN(0x46c0), /* nop */
2164 ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */
2165 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2166 ARM_INSN(0xe12fff1c), /* bx ip */
2167 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2168 };
2169
2170 /* Cortex-A8 erratum-workaround stubs. */
2171
2172 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2173 can't use a conditional branch to reach this stub). */
2174
2175 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2176 {
2177 THUMB16_BCOND_INSN(0xd001), /* b<cond>.n true. */
2178 THUMB32_B_INSN(0xf000b800, -4), /* b.w insn_after_original_branch. */
2179 THUMB32_B_INSN(0xf000b800, -4) /* true: b.w original_branch_dest. */
2180 };
2181
2182 /* Stub used for b.w and bl.w instructions. */
2183
2184 static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2185 {
2186 THUMB32_B_INSN(0xf000b800, -4) /* b.w original_branch_dest. */
2187 };
2188
2189 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2190 {
2191 THUMB32_B_INSN(0xf000b800, -4) /* b.w original_branch_dest. */
2192 };
2193
2194 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2195 instruction (which switches to ARM mode) to point to this stub. Jump to the
2196 real destination using an ARM-mode branch. */
2197
2198 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2199 {
2200 ARM_REL_INSN(0xea000000, -8) /* b original_branch_dest. */
2201 };
2202
2203 /* Section name for stubs is the associated section name plus this
2204 string. */
2205 #define STUB_SUFFIX ".stub"
2206
2207 /* One entry per long/short branch stub defined above. */
2208 #define DEF_STUBS \
2209 DEF_STUB(long_branch_any_any) \
2210 DEF_STUB(long_branch_v4t_arm_thumb) \
2211 DEF_STUB(long_branch_thumb_only) \
2212 DEF_STUB(long_branch_v4t_thumb_thumb) \
2213 DEF_STUB(long_branch_v4t_thumb_arm) \
2214 DEF_STUB(short_branch_v4t_thumb_arm) \
2215 DEF_STUB(long_branch_any_arm_pic) \
2216 DEF_STUB(long_branch_any_thumb_pic) \
2217 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2218 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2219 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2220 DEF_STUB(long_branch_thumb_only_pic) \
2221 DEF_STUB(a8_veneer_b_cond) \
2222 DEF_STUB(a8_veneer_b) \
2223 DEF_STUB(a8_veneer_bl) \
2224 DEF_STUB(a8_veneer_blx)
2225
2226 #define DEF_STUB(x) arm_stub_##x,
2227 enum elf32_arm_stub_type {
2228 arm_stub_none,
2229 DEF_STUBS
2230 /* Note the first a8_veneer type */
2231 arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond
2232 };
2233 #undef DEF_STUB
2234
2235 typedef struct
2236 {
2237 const insn_sequence* template_sequence;
2238 int template_size;
2239 } stub_def;
2240
2241 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2242 static const stub_def stub_definitions[] = {
2243 {NULL, 0},
2244 DEF_STUBS
2245 };
2246
2247 struct elf32_arm_stub_hash_entry
2248 {
2249 /* Base hash table entry structure. */
2250 struct bfd_hash_entry root;
2251
2252 /* The stub section. */
2253 asection *stub_sec;
2254
2255 /* Offset within stub_sec of the beginning of this stub. */
2256 bfd_vma stub_offset;
2257
2258 /* Given the symbol's value and its section we can determine its final
2259 value when building the stubs (so the stub knows where to jump). */
2260 bfd_vma target_value;
2261 asection *target_section;
2262
2263 /* Offset to apply to relocation referencing target_value. */
2264 bfd_vma target_addend;
2265
2266 /* The instruction which caused this stub to be generated (only valid for
2267 Cortex-A8 erratum workaround stubs at present). */
2268 unsigned long orig_insn;
2269
2270 /* The stub type. */
2271 enum elf32_arm_stub_type stub_type;
2272 /* Its encoding size in bytes. */
2273 int stub_size;
2274 /* Its template. */
2275 const insn_sequence *stub_template;
2276 /* The size of the template (number of entries). */
2277 int stub_template_size;
2278
2279 /* The symbol table entry, if any, that this was derived from. */
2280 struct elf32_arm_link_hash_entry *h;
2281
2282 /* Destination symbol type (STT_ARM_TFUNC, ...) */
2283 unsigned char st_type;
2284
2285 /* Where this stub is being called from, or, in the case of combined
2286 stub sections, the first input section in the group. */
2287 asection *id_sec;
2288
2289 /* The name for the local symbol at the start of this stub. The
2290 stub name in the hash table has to be unique; this does not, so
2291 it can be friendlier. */
2292 char *output_name;
2293 };
2294
2295 /* Used to build a map of a section. This is required for mixed-endian
2296 code/data. */
2297
2298 typedef struct elf32_elf_section_map
2299 {
2300 bfd_vma vma;
2301 char type;
2302 }
2303 elf32_arm_section_map;
2304
2305 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2306
2307 typedef enum
2308 {
2309 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2310 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2311 VFP11_ERRATUM_ARM_VENEER,
2312 VFP11_ERRATUM_THUMB_VENEER
2313 }
2314 elf32_vfp11_erratum_type;
2315
2316 typedef struct elf32_vfp11_erratum_list
2317 {
2318 struct elf32_vfp11_erratum_list *next;
2319 bfd_vma vma;
2320 union
2321 {
2322 struct
2323 {
2324 struct elf32_vfp11_erratum_list *veneer;
2325 unsigned int vfp_insn;
2326 } b;
2327 struct
2328 {
2329 struct elf32_vfp11_erratum_list *branch;
2330 unsigned int id;
2331 } v;
2332 } u;
2333 elf32_vfp11_erratum_type type;
2334 }
2335 elf32_vfp11_erratum_list;
2336
2337 typedef enum
2338 {
2339 DELETE_EXIDX_ENTRY,
2340 INSERT_EXIDX_CANTUNWIND_AT_END
2341 }
2342 arm_unwind_edit_type;
2343
2344 /* A (sorted) list of edits to apply to an unwind table. */
2345 typedef struct arm_unwind_table_edit
2346 {
2347 arm_unwind_edit_type type;
2348 /* Note: we sometimes want to insert an unwind entry corresponding to a
2349 section different from the one we're currently writing out, so record the
2350 (text) section this edit relates to here. */
2351 asection *linked_section;
2352 unsigned int index;
2353 struct arm_unwind_table_edit *next;
2354 }
2355 arm_unwind_table_edit;
2356
2357 typedef struct _arm_elf_section_data
2358 {
2359 /* Information about mapping symbols. */
2360 struct bfd_elf_section_data elf;
2361 unsigned int mapcount;
2362 unsigned int mapsize;
2363 elf32_arm_section_map *map;
2364 /* Information about CPU errata. */
2365 unsigned int erratumcount;
2366 elf32_vfp11_erratum_list *erratumlist;
2367 /* Information about unwind tables. */
2368 union
2369 {
2370 /* Unwind info attached to a text section. */
2371 struct
2372 {
2373 asection *arm_exidx_sec;
2374 } text;
2375
2376 /* Unwind info attached to an .ARM.exidx section. */
2377 struct
2378 {
2379 arm_unwind_table_edit *unwind_edit_list;
2380 arm_unwind_table_edit *unwind_edit_tail;
2381 } exidx;
2382 } u;
2383 }
2384 _arm_elf_section_data;
2385
2386 #define elf32_arm_section_data(sec) \
2387 ((_arm_elf_section_data *) elf_section_data (sec))
2388
2389 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
2390 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
2391 so may be created multiple times: we use an array of these entries whilst
2392 relaxing which we can refresh easily, then create stubs for each potentially
2393 erratum-triggering instruction once we've settled on a solution. */
2394
2395 struct a8_erratum_fix {
2396 bfd *input_bfd;
2397 asection *section;
2398 bfd_vma offset;
2399 bfd_vma addend;
2400 unsigned long orig_insn;
2401 char *stub_name;
2402 enum elf32_arm_stub_type stub_type;
2403 int st_type;
2404 };
2405
2406 /* A table of relocs applied to branches which might trigger Cortex-A8
2407 erratum. */
2408
2409 struct a8_erratum_reloc {
2410 bfd_vma from;
2411 bfd_vma destination;
2412 unsigned int r_type;
2413 unsigned char st_type;
2414 const char *sym_name;
2415 bfd_boolean non_a8_stub;
2416 };
2417
2418 /* The size of the thread control block. */
2419 #define TCB_SIZE 8
2420
2421 struct elf_arm_obj_tdata
2422 {
2423 struct elf_obj_tdata root;
2424
2425 /* tls_type for each local got entry. */
2426 char *local_got_tls_type;
2427
2428 /* Zero to warn when linking objects with incompatible enum sizes. */
2429 int no_enum_size_warning;
2430
2431 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
2432 int no_wchar_size_warning;
2433 };
2434
2435 #define elf_arm_tdata(bfd) \
2436 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
2437
2438 #define elf32_arm_local_got_tls_type(bfd) \
2439 (elf_arm_tdata (bfd)->local_got_tls_type)
2440
2441 #define is_arm_elf(bfd) \
2442 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
2443 && elf_tdata (bfd) != NULL \
2444 && elf_object_id (bfd) == ARM_ELF_DATA)
2445
2446 static bfd_boolean
2447 elf32_arm_mkobject (bfd *abfd)
2448 {
2449 return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
2450 ARM_ELF_DATA);
2451 }
2452
2453 /* The ARM linker needs to keep track of the number of relocs that it
2454 decides to copy in check_relocs for each symbol. This is so that
2455 it can discard PC relative relocs if it doesn't need them when
2456 linking with -Bsymbolic. We store the information in a field
2457 extending the regular ELF linker hash table. */
2458
2459 /* This structure keeps track of the number of relocs we have copied
2460 for a given symbol. */
2461 struct elf32_arm_relocs_copied
2462 {
2463 /* Next section. */
2464 struct elf32_arm_relocs_copied * next;
2465 /* A section in dynobj. */
2466 asection * section;
2467 /* Number of relocs copied in this section. */
2468 bfd_size_type count;
2469 /* Number of PC-relative relocs copied in this section. */
2470 bfd_size_type pc_count;
2471 };
2472
2473 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
2474
2475 /* Arm ELF linker hash entry. */
2476 struct elf32_arm_link_hash_entry
2477 {
2478 struct elf_link_hash_entry root;
2479
2480 /* Number of PC relative relocs copied for this symbol. */
2481 struct elf32_arm_relocs_copied * relocs_copied;
2482
2483 /* We reference count Thumb references to a PLT entry separately,
2484 so that we can emit the Thumb trampoline only if needed. */
2485 bfd_signed_vma plt_thumb_refcount;
2486
2487 /* Some references from Thumb code may be eliminated by BL->BLX
2488 conversion, so record them separately. */
2489 bfd_signed_vma plt_maybe_thumb_refcount;
2490
2491 /* Since PLT entries have variable size if the Thumb prologue is
2492 used, we need to record the index into .got.plt instead of
2493 recomputing it from the PLT offset. */
2494 bfd_signed_vma plt_got_offset;
2495
2496 #define GOT_UNKNOWN 0
2497 #define GOT_NORMAL 1
2498 #define GOT_TLS_GD 2
2499 #define GOT_TLS_IE 4
2500 unsigned char tls_type;
2501
2502 /* The symbol marking the real symbol location for exported thumb
2503 symbols with Arm stubs. */
2504 struct elf_link_hash_entry *export_glue;
2505
2506 /* A pointer to the most recently used stub hash entry against this
2507 symbol. */
2508 struct elf32_arm_stub_hash_entry *stub_cache;
2509 };
2510
2511 /* Traverse an arm ELF linker hash table. */
2512 #define elf32_arm_link_hash_traverse(table, func, info) \
2513 (elf_link_hash_traverse \
2514 (&(table)->root, \
2515 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
2516 (info)))
2517
2518 /* Get the ARM elf linker hash table from a link_info structure. */
2519 #define elf32_arm_hash_table(info) \
2520 (elf_hash_table_id ((struct elf_link_hash_table *) ((info)->hash)) \
2521 == ARM_ELF_DATA ? ((struct elf32_arm_link_hash_table *) ((info)->hash)) : NULL)
2522
2523 #define arm_stub_hash_lookup(table, string, create, copy) \
2524 ((struct elf32_arm_stub_hash_entry *) \
2525 bfd_hash_lookup ((table), (string), (create), (copy)))
2526
2527 /* Array to keep track of which stub sections have been created, and
2528 information on stub grouping. */
2529 struct map_stub
2530 {
2531 /* This is the section to which stubs in the group will be
2532 attached. */
2533 asection *link_sec;
2534 /* The stub section. */
2535 asection *stub_sec;
2536 };
2537
2538 /* ARM ELF linker hash table. */
2539 struct elf32_arm_link_hash_table
2540 {
2541 /* The main hash table. */
2542 struct elf_link_hash_table root;
2543
2544 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
2545 bfd_size_type thumb_glue_size;
2546
2547 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
2548 bfd_size_type arm_glue_size;
2549
2550 /* The size in bytes of section containing the ARMv4 BX veneers. */
2551 bfd_size_type bx_glue_size;
2552
2553 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
2554 veneer has been populated. */
2555 bfd_vma bx_glue_offset[15];
2556
2557 /* The size in bytes of the section containing glue for VFP11 erratum
2558 veneers. */
2559 bfd_size_type vfp11_erratum_glue_size;
2560
2561 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
2562 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
2563 elf32_arm_write_section(). */
2564 struct a8_erratum_fix *a8_erratum_fixes;
2565 unsigned int num_a8_erratum_fixes;
2566
2567 /* An arbitrary input BFD chosen to hold the glue sections. */
2568 bfd * bfd_of_glue_owner;
2569
2570 /* Nonzero to output a BE8 image. */
2571 int byteswap_code;
2572
2573 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
2574 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
2575 int target1_is_rel;
2576
2577 /* The relocation to use for R_ARM_TARGET2 relocations. */
2578 int target2_reloc;
2579
2580 /* 0 = Ignore R_ARM_V4BX.
2581 1 = Convert BX to MOV PC.
2582 2 = Generate v4 interworing stubs. */
2583 int fix_v4bx;
2584
2585 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
2586 int fix_cortex_a8;
2587
2588 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
2589 int use_blx;
2590
2591 /* What sort of code sequences we should look for which may trigger the
2592 VFP11 denorm erratum. */
2593 bfd_arm_vfp11_fix vfp11_fix;
2594
2595 /* Global counter for the number of fixes we have emitted. */
2596 int num_vfp11_fixes;
2597
2598 /* Nonzero to force PIC branch veneers. */
2599 int pic_veneer;
2600
2601 /* The number of bytes in the initial entry in the PLT. */
2602 bfd_size_type plt_header_size;
2603
2604 /* The number of bytes in the subsequent PLT etries. */
2605 bfd_size_type plt_entry_size;
2606
2607 /* True if the target system is VxWorks. */
2608 int vxworks_p;
2609
2610 /* True if the target system is Symbian OS. */
2611 int symbian_p;
2612
2613 /* True if the target uses REL relocations. */
2614 int use_rel;
2615
2616 /* Short-cuts to get to dynamic linker sections. */
2617 asection *sgot;
2618 asection *sgotplt;
2619 asection *srelgot;
2620 asection *splt;
2621 asection *srelplt;
2622 asection *sdynbss;
2623 asection *srelbss;
2624
2625 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
2626 asection *srelplt2;
2627
2628 /* Data for R_ARM_TLS_LDM32 relocations. */
2629 union
2630 {
2631 bfd_signed_vma refcount;
2632 bfd_vma offset;
2633 } tls_ldm_got;
2634
2635 /* Small local sym cache. */
2636 struct sym_cache sym_cache;
2637
2638 /* For convenience in allocate_dynrelocs. */
2639 bfd * obfd;
2640
2641 /* The stub hash table. */
2642 struct bfd_hash_table stub_hash_table;
2643
2644 /* Linker stub bfd. */
2645 bfd *stub_bfd;
2646
2647 /* Linker call-backs. */
2648 asection * (*add_stub_section) (const char *, asection *);
2649 void (*layout_sections_again) (void);
2650
2651 /* Array to keep track of which stub sections have been created, and
2652 information on stub grouping. */
2653 struct map_stub *stub_group;
2654
2655 /* Number of elements in stub_group. */
2656 int top_id;
2657
2658 /* Assorted information used by elf32_arm_size_stubs. */
2659 unsigned int bfd_count;
2660 int top_index;
2661 asection **input_list;
2662 };
2663
2664 /* Create an entry in an ARM ELF linker hash table. */
2665
2666 static struct bfd_hash_entry *
2667 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
2668 struct bfd_hash_table * table,
2669 const char * string)
2670 {
2671 struct elf32_arm_link_hash_entry * ret =
2672 (struct elf32_arm_link_hash_entry *) entry;
2673
2674 /* Allocate the structure if it has not already been allocated by a
2675 subclass. */
2676 if (ret == NULL)
2677 ret = (struct elf32_arm_link_hash_entry *)
2678 bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
2679 if (ret == NULL)
2680 return (struct bfd_hash_entry *) ret;
2681
2682 /* Call the allocation method of the superclass. */
2683 ret = ((struct elf32_arm_link_hash_entry *)
2684 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
2685 table, string));
2686 if (ret != NULL)
2687 {
2688 ret->relocs_copied = NULL;
2689 ret->tls_type = GOT_UNKNOWN;
2690 ret->plt_thumb_refcount = 0;
2691 ret->plt_maybe_thumb_refcount = 0;
2692 ret->plt_got_offset = -1;
2693 ret->export_glue = NULL;
2694
2695 ret->stub_cache = NULL;
2696 }
2697
2698 return (struct bfd_hash_entry *) ret;
2699 }
2700
2701 /* Initialize an entry in the stub hash table. */
2702
2703 static struct bfd_hash_entry *
2704 stub_hash_newfunc (struct bfd_hash_entry *entry,
2705 struct bfd_hash_table *table,
2706 const char *string)
2707 {
2708 /* Allocate the structure if it has not already been allocated by a
2709 subclass. */
2710 if (entry == NULL)
2711 {
2712 entry = (struct bfd_hash_entry *)
2713 bfd_hash_allocate (table, sizeof (struct elf32_arm_stub_hash_entry));
2714 if (entry == NULL)
2715 return entry;
2716 }
2717
2718 /* Call the allocation method of the superclass. */
2719 entry = bfd_hash_newfunc (entry, table, string);
2720 if (entry != NULL)
2721 {
2722 struct elf32_arm_stub_hash_entry *eh;
2723
2724 /* Initialize the local fields. */
2725 eh = (struct elf32_arm_stub_hash_entry *) entry;
2726 eh->stub_sec = NULL;
2727 eh->stub_offset = 0;
2728 eh->target_value = 0;
2729 eh->target_section = NULL;
2730 eh->target_addend = 0;
2731 eh->orig_insn = 0;
2732 eh->stub_type = arm_stub_none;
2733 eh->stub_size = 0;
2734 eh->stub_template = NULL;
2735 eh->stub_template_size = 0;
2736 eh->h = NULL;
2737 eh->id_sec = NULL;
2738 eh->output_name = NULL;
2739 }
2740
2741 return entry;
2742 }
2743
2744 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
2745 shortcuts to them in our hash table. */
2746
2747 static bfd_boolean
2748 create_got_section (bfd *dynobj, struct bfd_link_info *info)
2749 {
2750 struct elf32_arm_link_hash_table *htab;
2751
2752 htab = elf32_arm_hash_table (info);
2753 if (htab == NULL)
2754 return FALSE;
2755
2756 /* BPABI objects never have a GOT, or associated sections. */
2757 if (htab->symbian_p)
2758 return TRUE;
2759
2760 if (! _bfd_elf_create_got_section (dynobj, info))
2761 return FALSE;
2762
2763 htab->sgot = bfd_get_section_by_name (dynobj, ".got");
2764 htab->sgotplt = bfd_get_section_by_name (dynobj, ".got.plt");
2765 if (!htab->sgot || !htab->sgotplt)
2766 abort ();
2767
2768 htab->srelgot = bfd_get_section_by_name (dynobj,
2769 RELOC_SECTION (htab, ".got"));
2770 if (htab->srelgot == NULL)
2771 return FALSE;
2772 return TRUE;
2773 }
2774
2775 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
2776 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
2777 hash table. */
2778
2779 static bfd_boolean
2780 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
2781 {
2782 struct elf32_arm_link_hash_table *htab;
2783
2784 htab = elf32_arm_hash_table (info);
2785 if (htab == NULL)
2786 return FALSE;
2787
2788 if (!htab->sgot && !create_got_section (dynobj, info))
2789 return FALSE;
2790
2791 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
2792 return FALSE;
2793
2794 htab->splt = bfd_get_section_by_name (dynobj, ".plt");
2795 htab->srelplt = bfd_get_section_by_name (dynobj,
2796 RELOC_SECTION (htab, ".plt"));
2797 htab->sdynbss = bfd_get_section_by_name (dynobj, ".dynbss");
2798 if (!info->shared)
2799 htab->srelbss = bfd_get_section_by_name (dynobj,
2800 RELOC_SECTION (htab, ".bss"));
2801
2802 if (htab->vxworks_p)
2803 {
2804 if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
2805 return FALSE;
2806
2807 if (info->shared)
2808 {
2809 htab->plt_header_size = 0;
2810 htab->plt_entry_size
2811 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
2812 }
2813 else
2814 {
2815 htab->plt_header_size
2816 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
2817 htab->plt_entry_size
2818 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
2819 }
2820 }
2821
2822 if (!htab->splt
2823 || !htab->srelplt
2824 || !htab->sdynbss
2825 || (!info->shared && !htab->srelbss))
2826 abort ();
2827
2828 return TRUE;
2829 }
2830
2831 /* Copy the extra info we tack onto an elf_link_hash_entry. */
2832
2833 static void
2834 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
2835 struct elf_link_hash_entry *dir,
2836 struct elf_link_hash_entry *ind)
2837 {
2838 struct elf32_arm_link_hash_entry *edir, *eind;
2839
2840 edir = (struct elf32_arm_link_hash_entry *) dir;
2841 eind = (struct elf32_arm_link_hash_entry *) ind;
2842
2843 if (eind->relocs_copied != NULL)
2844 {
2845 if (edir->relocs_copied != NULL)
2846 {
2847 struct elf32_arm_relocs_copied **pp;
2848 struct elf32_arm_relocs_copied *p;
2849
2850 /* Add reloc counts against the indirect sym to the direct sym
2851 list. Merge any entries against the same section. */
2852 for (pp = &eind->relocs_copied; (p = *pp) != NULL; )
2853 {
2854 struct elf32_arm_relocs_copied *q;
2855
2856 for (q = edir->relocs_copied; q != NULL; q = q->next)
2857 if (q->section == p->section)
2858 {
2859 q->pc_count += p->pc_count;
2860 q->count += p->count;
2861 *pp = p->next;
2862 break;
2863 }
2864 if (q == NULL)
2865 pp = &p->next;
2866 }
2867 *pp = edir->relocs_copied;
2868 }
2869
2870 edir->relocs_copied = eind->relocs_copied;
2871 eind->relocs_copied = NULL;
2872 }
2873
2874 if (ind->root.type == bfd_link_hash_indirect)
2875 {
2876 /* Copy over PLT info. */
2877 edir->plt_thumb_refcount += eind->plt_thumb_refcount;
2878 eind->plt_thumb_refcount = 0;
2879 edir->plt_maybe_thumb_refcount += eind->plt_maybe_thumb_refcount;
2880 eind->plt_maybe_thumb_refcount = 0;
2881
2882 if (dir->got.refcount <= 0)
2883 {
2884 edir->tls_type = eind->tls_type;
2885 eind->tls_type = GOT_UNKNOWN;
2886 }
2887 }
2888
2889 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
2890 }
2891
2892 /* Create an ARM elf linker hash table. */
2893
2894 static struct bfd_link_hash_table *
2895 elf32_arm_link_hash_table_create (bfd *abfd)
2896 {
2897 struct elf32_arm_link_hash_table *ret;
2898 bfd_size_type amt = sizeof (struct elf32_arm_link_hash_table);
2899
2900 ret = (struct elf32_arm_link_hash_table *) bfd_malloc (amt);
2901 if (ret == NULL)
2902 return NULL;
2903
2904 if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
2905 elf32_arm_link_hash_newfunc,
2906 sizeof (struct elf32_arm_link_hash_entry),
2907 ARM_ELF_DATA))
2908 {
2909 free (ret);
2910 return NULL;
2911 }
2912
2913 ret->sgot = NULL;
2914 ret->sgotplt = NULL;
2915 ret->srelgot = NULL;
2916 ret->splt = NULL;
2917 ret->srelplt = NULL;
2918 ret->sdynbss = NULL;
2919 ret->srelbss = NULL;
2920 ret->srelplt2 = NULL;
2921 ret->thumb_glue_size = 0;
2922 ret->arm_glue_size = 0;
2923 ret->bx_glue_size = 0;
2924 memset (ret->bx_glue_offset, 0, sizeof (ret->bx_glue_offset));
2925 ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
2926 ret->vfp11_erratum_glue_size = 0;
2927 ret->num_vfp11_fixes = 0;
2928 ret->fix_cortex_a8 = 0;
2929 ret->bfd_of_glue_owner = NULL;
2930 ret->byteswap_code = 0;
2931 ret->target1_is_rel = 0;
2932 ret->target2_reloc = R_ARM_NONE;
2933 #ifdef FOUR_WORD_PLT
2934 ret->plt_header_size = 16;
2935 ret->plt_entry_size = 16;
2936 #else
2937 ret->plt_header_size = 20;
2938 ret->plt_entry_size = 12;
2939 #endif
2940 ret->fix_v4bx = 0;
2941 ret->use_blx = 0;
2942 ret->vxworks_p = 0;
2943 ret->symbian_p = 0;
2944 ret->use_rel = 1;
2945 ret->sym_cache.abfd = NULL;
2946 ret->obfd = abfd;
2947 ret->tls_ldm_got.refcount = 0;
2948 ret->stub_bfd = NULL;
2949 ret->add_stub_section = NULL;
2950 ret->layout_sections_again = NULL;
2951 ret->stub_group = NULL;
2952 ret->top_id = 0;
2953 ret->bfd_count = 0;
2954 ret->top_index = 0;
2955 ret->input_list = NULL;
2956
2957 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
2958 sizeof (struct elf32_arm_stub_hash_entry)))
2959 {
2960 free (ret);
2961 return NULL;
2962 }
2963
2964 return &ret->root.root;
2965 }
2966
2967 /* Free the derived linker hash table. */
2968
2969 static void
2970 elf32_arm_hash_table_free (struct bfd_link_hash_table *hash)
2971 {
2972 struct elf32_arm_link_hash_table *ret
2973 = (struct elf32_arm_link_hash_table *) hash;
2974
2975 bfd_hash_table_free (&ret->stub_hash_table);
2976 _bfd_generic_link_hash_table_free (hash);
2977 }
2978
2979 /* Determine if we're dealing with a Thumb only architecture. */
2980
2981 static bfd_boolean
2982 using_thumb_only (struct elf32_arm_link_hash_table *globals)
2983 {
2984 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2985 Tag_CPU_arch);
2986 int profile;
2987
2988 if (arch == TAG_CPU_ARCH_V6_M || arch == TAG_CPU_ARCH_V6S_M)
2989 return TRUE;
2990
2991 if (arch != TAG_CPU_ARCH_V7 && arch != TAG_CPU_ARCH_V7E_M)
2992 return FALSE;
2993
2994 profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2995 Tag_CPU_arch_profile);
2996
2997 return profile == 'M';
2998 }
2999
3000 /* Determine if we're dealing with a Thumb-2 object. */
3001
3002 static bfd_boolean
3003 using_thumb2 (struct elf32_arm_link_hash_table *globals)
3004 {
3005 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3006 Tag_CPU_arch);
3007 return arch == TAG_CPU_ARCH_V6T2 || arch >= TAG_CPU_ARCH_V7;
3008 }
3009
3010 /* Determine what kind of NOPs are available. */
3011
3012 static bfd_boolean
3013 arch_has_arm_nop (struct elf32_arm_link_hash_table *globals)
3014 {
3015 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3016 Tag_CPU_arch);
3017 return arch == TAG_CPU_ARCH_V6T2
3018 || arch == TAG_CPU_ARCH_V6K
3019 || arch == TAG_CPU_ARCH_V7
3020 || arch == TAG_CPU_ARCH_V7E_M;
3021 }
3022
3023 static bfd_boolean
3024 arch_has_thumb2_nop (struct elf32_arm_link_hash_table *globals)
3025 {
3026 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3027 Tag_CPU_arch);
3028 return (arch == TAG_CPU_ARCH_V6T2 || arch == TAG_CPU_ARCH_V7
3029 || arch == TAG_CPU_ARCH_V7E_M);
3030 }
3031
3032 static bfd_boolean
3033 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
3034 {
3035 switch (stub_type)
3036 {
3037 case arm_stub_long_branch_thumb_only:
3038 case arm_stub_long_branch_v4t_thumb_arm:
3039 case arm_stub_short_branch_v4t_thumb_arm:
3040 case arm_stub_long_branch_v4t_thumb_arm_pic:
3041 case arm_stub_long_branch_thumb_only_pic:
3042 return TRUE;
3043 case arm_stub_none:
3044 BFD_FAIL ();
3045 return FALSE;
3046 break;
3047 default:
3048 return FALSE;
3049 }
3050 }
3051
3052 /* Determine the type of stub needed, if any, for a call. */
3053
3054 static enum elf32_arm_stub_type
3055 arm_type_of_stub (struct bfd_link_info *info,
3056 asection *input_sec,
3057 const Elf_Internal_Rela *rel,
3058 int *actual_st_type,
3059 struct elf32_arm_link_hash_entry *hash,
3060 bfd_vma destination,
3061 asection *sym_sec,
3062 bfd *input_bfd,
3063 const char *name)
3064 {
3065 bfd_vma location;
3066 bfd_signed_vma branch_offset;
3067 unsigned int r_type;
3068 struct elf32_arm_link_hash_table * globals;
3069 int thumb2;
3070 int thumb_only;
3071 enum elf32_arm_stub_type stub_type = arm_stub_none;
3072 int use_plt = 0;
3073 int st_type = *actual_st_type;
3074
3075 /* We don't know the actual type of destination in case it is of
3076 type STT_SECTION: give up. */
3077 if (st_type == STT_SECTION)
3078 return stub_type;
3079
3080 globals = elf32_arm_hash_table (info);
3081 if (globals == NULL)
3082 return stub_type;
3083
3084 thumb_only = using_thumb_only (globals);
3085
3086 thumb2 = using_thumb2 (globals);
3087
3088 /* Determine where the call point is. */
3089 location = (input_sec->output_offset
3090 + input_sec->output_section->vma
3091 + rel->r_offset);
3092
3093 r_type = ELF32_R_TYPE (rel->r_info);
3094
3095 /* Keep a simpler condition, for the sake of clarity. */
3096 if (globals->splt != NULL
3097 && hash != NULL
3098 && hash->root.plt.offset != (bfd_vma) -1)
3099 {
3100 use_plt = 1;
3101
3102 /* Note when dealing with PLT entries: the main PLT stub is in
3103 ARM mode, so if the branch is in Thumb mode, another
3104 Thumb->ARM stub will be inserted later just before the ARM
3105 PLT stub. We don't take this extra distance into account
3106 here, because if a long branch stub is needed, we'll add a
3107 Thumb->Arm one and branch directly to the ARM PLT entry
3108 because it avoids spreading offset corrections in several
3109 places. */
3110
3111 destination = (globals->splt->output_section->vma
3112 + globals->splt->output_offset
3113 + hash->root.plt.offset);
3114 st_type = STT_FUNC;
3115 }
3116
3117 branch_offset = (bfd_signed_vma)(destination - location);
3118
3119 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
3120 {
3121 /* Handle cases where:
3122 - this call goes too far (different Thumb/Thumb2 max
3123 distance)
3124 - it's a Thumb->Arm call and blx is not available, or it's a
3125 Thumb->Arm branch (not bl). A stub is needed in this case,
3126 but only if this call is not through a PLT entry. Indeed,
3127 PLT stubs handle mode switching already.
3128 */
3129 if ((!thumb2
3130 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
3131 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
3132 || (thumb2
3133 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
3134 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
3135 || ((st_type != STT_ARM_TFUNC)
3136 && (((r_type == R_ARM_THM_CALL) && !globals->use_blx)
3137 || (r_type == R_ARM_THM_JUMP24))
3138 && !use_plt))
3139 {
3140 if (st_type == STT_ARM_TFUNC)
3141 {
3142 /* Thumb to thumb. */
3143 if (!thumb_only)
3144 {
3145 stub_type = (info->shared | globals->pic_veneer)
3146 /* PIC stubs. */
3147 ? ((globals->use_blx
3148 && (r_type ==R_ARM_THM_CALL))
3149 /* V5T and above. Stub starts with ARM code, so
3150 we must be able to switch mode before
3151 reaching it, which is only possible for 'bl'
3152 (ie R_ARM_THM_CALL relocation). */
3153 ? arm_stub_long_branch_any_thumb_pic
3154 /* On V4T, use Thumb code only. */
3155 : arm_stub_long_branch_v4t_thumb_thumb_pic)
3156
3157 /* non-PIC stubs. */
3158 : ((globals->use_blx
3159 && (r_type ==R_ARM_THM_CALL))
3160 /* V5T and above. */
3161 ? arm_stub_long_branch_any_any
3162 /* V4T. */
3163 : arm_stub_long_branch_v4t_thumb_thumb);
3164 }
3165 else
3166 {
3167 stub_type = (info->shared | globals->pic_veneer)
3168 /* PIC stub. */
3169 ? arm_stub_long_branch_thumb_only_pic
3170 /* non-PIC stub. */
3171 : arm_stub_long_branch_thumb_only;
3172 }
3173 }
3174 else
3175 {
3176 /* Thumb to arm. */
3177 if (sym_sec != NULL
3178 && sym_sec->owner != NULL
3179 && !INTERWORK_FLAG (sym_sec->owner))
3180 {
3181 (*_bfd_error_handler)
3182 (_("%B(%s): warning: interworking not enabled.\n"
3183 " first occurrence: %B: Thumb call to ARM"),
3184 sym_sec->owner, input_bfd, name);
3185 }
3186
3187 stub_type = (info->shared | globals->pic_veneer)
3188 /* PIC stubs. */
3189 ? ((globals->use_blx
3190 && (r_type ==R_ARM_THM_CALL))
3191 /* V5T and above. */
3192 ? arm_stub_long_branch_any_arm_pic
3193 /* V4T PIC stub. */
3194 : arm_stub_long_branch_v4t_thumb_arm_pic)
3195
3196 /* non-PIC stubs. */
3197 : ((globals->use_blx
3198 && (r_type ==R_ARM_THM_CALL))
3199 /* V5T and above. */
3200 ? arm_stub_long_branch_any_any
3201 /* V4T. */
3202 : arm_stub_long_branch_v4t_thumb_arm);
3203
3204 /* Handle v4t short branches. */
3205 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
3206 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
3207 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
3208 stub_type = arm_stub_short_branch_v4t_thumb_arm;
3209 }
3210 }
3211 }
3212 else if (r_type == R_ARM_CALL
3213 || r_type == R_ARM_JUMP24
3214 || r_type == R_ARM_PLT32)
3215 {
3216 if (st_type == STT_ARM_TFUNC)
3217 {
3218 /* Arm to thumb. */
3219
3220 if (sym_sec != NULL
3221 && sym_sec->owner != NULL
3222 && !INTERWORK_FLAG (sym_sec->owner))
3223 {
3224 (*_bfd_error_handler)
3225 (_("%B(%s): warning: interworking not enabled.\n"
3226 " first occurrence: %B: ARM call to Thumb"),
3227 sym_sec->owner, input_bfd, name);
3228 }
3229
3230 /* We have an extra 2-bytes reach because of
3231 the mode change (bit 24 (H) of BLX encoding). */
3232 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
3233 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
3234 || ((r_type == R_ARM_CALL) && !globals->use_blx)
3235 || (r_type == R_ARM_JUMP24)
3236 || (r_type == R_ARM_PLT32))
3237 {
3238 stub_type = (info->shared | globals->pic_veneer)
3239 /* PIC stubs. */
3240 ? ((globals->use_blx)
3241 /* V5T and above. */
3242 ? arm_stub_long_branch_any_thumb_pic
3243 /* V4T stub. */
3244 : arm_stub_long_branch_v4t_arm_thumb_pic)
3245
3246 /* non-PIC stubs. */
3247 : ((globals->use_blx)
3248 /* V5T and above. */
3249 ? arm_stub_long_branch_any_any
3250 /* V4T. */
3251 : arm_stub_long_branch_v4t_arm_thumb);
3252 }
3253 }
3254 else
3255 {
3256 /* Arm to arm. */
3257 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
3258 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
3259 {
3260 stub_type = (info->shared | globals->pic_veneer)
3261 /* PIC stubs. */
3262 ? arm_stub_long_branch_any_arm_pic
3263 /* non-PIC stubs. */
3264 : arm_stub_long_branch_any_any;
3265 }
3266 }
3267 }
3268
3269 /* If a stub is needed, record the actual destination type. */
3270 if (stub_type != arm_stub_none)
3271 {
3272 *actual_st_type = st_type;
3273 }
3274
3275 return stub_type;
3276 }
3277
3278 /* Build a name for an entry in the stub hash table. */
3279
3280 static char *
3281 elf32_arm_stub_name (const asection *input_section,
3282 const asection *sym_sec,
3283 const struct elf32_arm_link_hash_entry *hash,
3284 const Elf_Internal_Rela *rel,
3285 enum elf32_arm_stub_type stub_type)
3286 {
3287 char *stub_name;
3288 bfd_size_type len;
3289
3290 if (hash)
3291 {
3292 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1 + 2 + 1;
3293 stub_name = (char *) bfd_malloc (len);
3294 if (stub_name != NULL)
3295 sprintf (stub_name, "%08x_%s+%x_%d",
3296 input_section->id & 0xffffffff,
3297 hash->root.root.root.string,
3298 (int) rel->r_addend & 0xffffffff,
3299 (int) stub_type);
3300 }
3301 else
3302 {
3303 len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1;
3304 stub_name = (char *) bfd_malloc (len);
3305 if (stub_name != NULL)
3306 sprintf (stub_name, "%08x_%x:%x+%x_%d",
3307 input_section->id & 0xffffffff,
3308 sym_sec->id & 0xffffffff,
3309 (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
3310 (int) rel->r_addend & 0xffffffff,
3311 (int) stub_type);
3312 }
3313
3314 return stub_name;
3315 }
3316
3317 /* Look up an entry in the stub hash. Stub entries are cached because
3318 creating the stub name takes a bit of time. */
3319
3320 static struct elf32_arm_stub_hash_entry *
3321 elf32_arm_get_stub_entry (const asection *input_section,
3322 const asection *sym_sec,
3323 struct elf_link_hash_entry *hash,
3324 const Elf_Internal_Rela *rel,
3325 struct elf32_arm_link_hash_table *htab,
3326 enum elf32_arm_stub_type stub_type)
3327 {
3328 struct elf32_arm_stub_hash_entry *stub_entry;
3329 struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
3330 const asection *id_sec;
3331
3332 if ((input_section->flags & SEC_CODE) == 0)
3333 return NULL;
3334
3335 /* If this input section is part of a group of sections sharing one
3336 stub section, then use the id of the first section in the group.
3337 Stub names need to include a section id, as there may well be
3338 more than one stub used to reach say, printf, and we need to
3339 distinguish between them. */
3340 id_sec = htab->stub_group[input_section->id].link_sec;
3341
3342 if (h != NULL && h->stub_cache != NULL
3343 && h->stub_cache->h == h
3344 && h->stub_cache->id_sec == id_sec
3345 && h->stub_cache->stub_type == stub_type)
3346 {
3347 stub_entry = h->stub_cache;
3348 }
3349 else
3350 {
3351 char *stub_name;
3352
3353 stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel, stub_type);
3354 if (stub_name == NULL)
3355 return NULL;
3356
3357 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
3358 stub_name, FALSE, FALSE);
3359 if (h != NULL)
3360 h->stub_cache = stub_entry;
3361
3362 free (stub_name);
3363 }
3364
3365 return stub_entry;
3366 }
3367
3368 /* Find or create a stub section. Returns a pointer to the stub section, and
3369 the section to which the stub section will be attached (in *LINK_SEC_P).
3370 LINK_SEC_P may be NULL. */
3371
3372 static asection *
3373 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
3374 struct elf32_arm_link_hash_table *htab)
3375 {
3376 asection *link_sec;
3377 asection *stub_sec;
3378
3379 link_sec = htab->stub_group[section->id].link_sec;
3380 stub_sec = htab->stub_group[section->id].stub_sec;
3381 if (stub_sec == NULL)
3382 {
3383 stub_sec = htab->stub_group[link_sec->id].stub_sec;
3384 if (stub_sec == NULL)
3385 {
3386 size_t namelen;
3387 bfd_size_type len;
3388 char *s_name;
3389
3390 namelen = strlen (link_sec->name);
3391 len = namelen + sizeof (STUB_SUFFIX);
3392 s_name = (char *) bfd_alloc (htab->stub_bfd, len);
3393 if (s_name == NULL)
3394 return NULL;
3395
3396 memcpy (s_name, link_sec->name, namelen);
3397 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
3398 stub_sec = (*htab->add_stub_section) (s_name, link_sec);
3399 if (stub_sec == NULL)
3400 return NULL;
3401 htab->stub_group[link_sec->id].stub_sec = stub_sec;
3402 }
3403 htab->stub_group[section->id].stub_sec = stub_sec;
3404 }
3405
3406 if (link_sec_p)
3407 *link_sec_p = link_sec;
3408
3409 return stub_sec;
3410 }
3411
3412 /* Add a new stub entry to the stub hash. Not all fields of the new
3413 stub entry are initialised. */
3414
3415 static struct elf32_arm_stub_hash_entry *
3416 elf32_arm_add_stub (const char *stub_name,
3417 asection *section,
3418 struct elf32_arm_link_hash_table *htab)
3419 {
3420 asection *link_sec;
3421 asection *stub_sec;
3422 struct elf32_arm_stub_hash_entry *stub_entry;
3423
3424 stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab);
3425 if (stub_sec == NULL)
3426 return NULL;
3427
3428 /* Enter this entry into the linker stub hash table. */
3429 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
3430 TRUE, FALSE);
3431 if (stub_entry == NULL)
3432 {
3433 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
3434 section->owner,
3435 stub_name);
3436 return NULL;
3437 }
3438
3439 stub_entry->stub_sec = stub_sec;
3440 stub_entry->stub_offset = 0;
3441 stub_entry->id_sec = link_sec;
3442
3443 return stub_entry;
3444 }
3445
3446 /* Store an Arm insn into an output section not processed by
3447 elf32_arm_write_section. */
3448
3449 static void
3450 put_arm_insn (struct elf32_arm_link_hash_table * htab,
3451 bfd * output_bfd, bfd_vma val, void * ptr)
3452 {
3453 if (htab->byteswap_code != bfd_little_endian (output_bfd))
3454 bfd_putl32 (val, ptr);
3455 else
3456 bfd_putb32 (val, ptr);
3457 }
3458
3459 /* Store a 16-bit Thumb insn into an output section not processed by
3460 elf32_arm_write_section. */
3461
3462 static void
3463 put_thumb_insn (struct elf32_arm_link_hash_table * htab,
3464 bfd * output_bfd, bfd_vma val, void * ptr)
3465 {
3466 if (htab->byteswap_code != bfd_little_endian (output_bfd))
3467 bfd_putl16 (val, ptr);
3468 else
3469 bfd_putb16 (val, ptr);
3470 }
3471
3472 static bfd_reloc_status_type elf32_arm_final_link_relocate
3473 (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
3474 Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
3475 const char *, int, struct elf_link_hash_entry *, bfd_boolean *, char **);
3476
3477 static bfd_boolean
3478 arm_build_one_stub (struct bfd_hash_entry *gen_entry,
3479 void * in_arg)
3480 {
3481 #define MAXRELOCS 2
3482 struct elf32_arm_stub_hash_entry *stub_entry;
3483 struct elf32_arm_link_hash_table *globals;
3484 struct bfd_link_info *info;
3485 asection *stub_sec;
3486 bfd *stub_bfd;
3487 bfd_vma stub_addr;
3488 bfd_byte *loc;
3489 bfd_vma sym_value;
3490 int template_size;
3491 int size;
3492 const insn_sequence *template_sequence;
3493 int i;
3494 int stub_reloc_idx[MAXRELOCS] = {-1, -1};
3495 int stub_reloc_offset[MAXRELOCS] = {0, 0};
3496 int nrelocs = 0;
3497
3498 /* Massage our args to the form they really have. */
3499 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
3500 info = (struct bfd_link_info *) in_arg;
3501
3502 globals = elf32_arm_hash_table (info);
3503 if (globals == NULL)
3504 return FALSE;
3505
3506 stub_sec = stub_entry->stub_sec;
3507
3508 if ((globals->fix_cortex_a8 < 0)
3509 != (stub_entry->stub_type >= arm_stub_a8_veneer_lwm))
3510 /* We have to do the a8 fixes last, as they are less aligned than
3511 the other veneers. */
3512 return TRUE;
3513
3514 /* Make a note of the offset within the stubs for this entry. */
3515 stub_entry->stub_offset = stub_sec->size;
3516 loc = stub_sec->contents + stub_entry->stub_offset;
3517
3518 stub_bfd = stub_sec->owner;
3519
3520 /* This is the address of the start of the stub. */
3521 stub_addr = stub_sec->output_section->vma + stub_sec->output_offset
3522 + stub_entry->stub_offset;
3523
3524 /* This is the address of the stub destination. */
3525 sym_value = (stub_entry->target_value
3526 + stub_entry->target_section->output_offset
3527 + stub_entry->target_section->output_section->vma);
3528
3529 template_sequence = stub_entry->stub_template;
3530 template_size = stub_entry->stub_template_size;
3531
3532 size = 0;
3533 for (i = 0; i < template_size; i++)
3534 {
3535 switch (template_sequence[i].type)
3536 {
3537 case THUMB16_TYPE:
3538 {
3539 bfd_vma data = (bfd_vma) template_sequence[i].data;
3540 if (template_sequence[i].reloc_addend != 0)
3541 {
3542 /* We've borrowed the reloc_addend field to mean we should
3543 insert a condition code into this (Thumb-1 branch)
3544 instruction. See THUMB16_BCOND_INSN. */
3545 BFD_ASSERT ((data & 0xff00) == 0xd000);
3546 data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
3547 }
3548 bfd_put_16 (stub_bfd, data, loc + size);
3549 size += 2;
3550 }
3551 break;
3552
3553 case THUMB32_TYPE:
3554 bfd_put_16 (stub_bfd,
3555 (template_sequence[i].data >> 16) & 0xffff,
3556 loc + size);
3557 bfd_put_16 (stub_bfd, template_sequence[i].data & 0xffff,
3558 loc + size + 2);
3559 if (template_sequence[i].r_type != R_ARM_NONE)
3560 {
3561 stub_reloc_idx[nrelocs] = i;
3562 stub_reloc_offset[nrelocs++] = size;
3563 }
3564 size += 4;
3565 break;
3566
3567 case ARM_TYPE:
3568 bfd_put_32 (stub_bfd, template_sequence[i].data,
3569 loc + size);
3570 /* Handle cases where the target is encoded within the
3571 instruction. */
3572 if (template_sequence[i].r_type == R_ARM_JUMP24)
3573 {
3574 stub_reloc_idx[nrelocs] = i;
3575 stub_reloc_offset[nrelocs++] = size;
3576 }
3577 size += 4;
3578 break;
3579
3580 case DATA_TYPE:
3581 bfd_put_32 (stub_bfd, template_sequence[i].data, loc + size);
3582 stub_reloc_idx[nrelocs] = i;
3583 stub_reloc_offset[nrelocs++] = size;
3584 size += 4;
3585 break;
3586
3587 default:
3588 BFD_FAIL ();
3589 return FALSE;
3590 }
3591 }
3592
3593 stub_sec->size += size;
3594
3595 /* Stub size has already been computed in arm_size_one_stub. Check
3596 consistency. */
3597 BFD_ASSERT (size == stub_entry->stub_size);
3598
3599 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
3600 if (stub_entry->st_type == STT_ARM_TFUNC)
3601 sym_value |= 1;
3602
3603 /* Assume there is at least one and at most MAXRELOCS entries to relocate
3604 in each stub. */
3605 BFD_ASSERT (nrelocs != 0 && nrelocs <= MAXRELOCS);
3606
3607 for (i = 0; i < nrelocs; i++)
3608 if (template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP24
3609 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP19
3610 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_CALL
3611 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_XPC22)
3612 {
3613 Elf_Internal_Rela rel;
3614 bfd_boolean unresolved_reloc;
3615 char *error_message;
3616 int sym_flags
3617 = (template_sequence[stub_reloc_idx[i]].r_type != R_ARM_THM_XPC22)
3618 ? STT_ARM_TFUNC : 0;
3619 bfd_vma points_to = sym_value + stub_entry->target_addend;
3620
3621 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
3622 rel.r_info = ELF32_R_INFO (0,
3623 template_sequence[stub_reloc_idx[i]].r_type);
3624 rel.r_addend = template_sequence[stub_reloc_idx[i]].reloc_addend;
3625
3626 if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
3627 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
3628 template should refer back to the instruction after the original
3629 branch. */
3630 points_to = sym_value;
3631
3632 /* There may be unintended consequences if this is not true. */
3633 BFD_ASSERT (stub_entry->h == NULL);
3634
3635 /* Note: _bfd_final_link_relocate doesn't handle these relocations
3636 properly. We should probably use this function unconditionally,
3637 rather than only for certain relocations listed in the enclosing
3638 conditional, for the sake of consistency. */
3639 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
3640 (template_sequence[stub_reloc_idx[i]].r_type),
3641 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
3642 points_to, info, stub_entry->target_section, "", sym_flags,
3643 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
3644 &error_message);
3645 }
3646 else
3647 {
3648 Elf_Internal_Rela rel;
3649 bfd_boolean unresolved_reloc;
3650 char *error_message;
3651 bfd_vma points_to = sym_value + stub_entry->target_addend
3652 + template_sequence[stub_reloc_idx[i]].reloc_addend;
3653
3654 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
3655 rel.r_info = ELF32_R_INFO (0,
3656 template_sequence[stub_reloc_idx[i]].r_type);
3657 rel.r_addend = 0;
3658
3659 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
3660 (template_sequence[stub_reloc_idx[i]].r_type),
3661 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
3662 points_to, info, stub_entry->target_section, "", stub_entry->st_type,
3663 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
3664 &error_message);
3665 }
3666
3667 return TRUE;
3668 #undef MAXRELOCS
3669 }
3670
3671 /* Calculate the template, template size and instruction size for a stub.
3672 Return value is the instruction size. */
3673
3674 static unsigned int
3675 find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
3676 const insn_sequence **stub_template,
3677 int *stub_template_size)
3678 {
3679 const insn_sequence *template_sequence = NULL;
3680 int template_size = 0, i;
3681 unsigned int size;
3682
3683 template_sequence = stub_definitions[stub_type].template_sequence;
3684 template_size = stub_definitions[stub_type].template_size;
3685
3686 size = 0;
3687 for (i = 0; i < template_size; i++)
3688 {
3689 switch (template_sequence[i].type)
3690 {
3691 case THUMB16_TYPE:
3692 size += 2;
3693 break;
3694
3695 case ARM_TYPE:
3696 case THUMB32_TYPE:
3697 case DATA_TYPE:
3698 size += 4;
3699 break;
3700
3701 default:
3702 BFD_FAIL ();
3703 return FALSE;
3704 }
3705 }
3706
3707 if (stub_template)
3708 *stub_template = template_sequence;
3709
3710 if (stub_template_size)
3711 *stub_template_size = template_size;
3712
3713 return size;
3714 }
3715
3716 /* As above, but don't actually build the stub. Just bump offset so
3717 we know stub section sizes. */
3718
3719 static bfd_boolean
3720 arm_size_one_stub (struct bfd_hash_entry *gen_entry,
3721 void * in_arg)
3722 {
3723 struct elf32_arm_stub_hash_entry *stub_entry;
3724 struct elf32_arm_link_hash_table *htab;
3725 const insn_sequence *template_sequence;
3726 int template_size, size;
3727
3728 /* Massage our args to the form they really have. */
3729 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
3730 htab = (struct elf32_arm_link_hash_table *) in_arg;
3731
3732 BFD_ASSERT((stub_entry->stub_type > arm_stub_none)
3733 && stub_entry->stub_type < ARRAY_SIZE(stub_definitions));
3734
3735 size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence,
3736 &template_size);
3737
3738 stub_entry->stub_size = size;
3739 stub_entry->stub_template = template_sequence;
3740 stub_entry->stub_template_size = template_size;
3741
3742 size = (size + 7) & ~7;
3743 stub_entry->stub_sec->size += size;
3744
3745 return TRUE;
3746 }
3747
3748 /* External entry points for sizing and building linker stubs. */
3749
3750 /* Set up various things so that we can make a list of input sections
3751 for each output section included in the link. Returns -1 on error,
3752 0 when no stubs will be needed, and 1 on success. */
3753
3754 int
3755 elf32_arm_setup_section_lists (bfd *output_bfd,
3756 struct bfd_link_info *info)
3757 {
3758 bfd *input_bfd;
3759 unsigned int bfd_count;
3760 int top_id, top_index;
3761 asection *section;
3762 asection **input_list, **list;
3763 bfd_size_type amt;
3764 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3765
3766 if (htab == NULL)
3767 return 0;
3768 if (! is_elf_hash_table (htab))
3769 return 0;
3770
3771 /* Count the number of input BFDs and find the top input section id. */
3772 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
3773 input_bfd != NULL;
3774 input_bfd = input_bfd->link_next)
3775 {
3776 bfd_count += 1;
3777 for (section = input_bfd->sections;
3778 section != NULL;
3779 section = section->next)
3780 {
3781 if (top_id < section->id)
3782 top_id = section->id;
3783 }
3784 }
3785 htab->bfd_count = bfd_count;
3786
3787 amt = sizeof (struct map_stub) * (top_id + 1);
3788 htab->stub_group = (struct map_stub *) bfd_zmalloc (amt);
3789 if (htab->stub_group == NULL)
3790 return -1;
3791 htab->top_id = top_id;
3792
3793 /* We can't use output_bfd->section_count here to find the top output
3794 section index as some sections may have been removed, and
3795 _bfd_strip_section_from_output doesn't renumber the indices. */
3796 for (section = output_bfd->sections, top_index = 0;
3797 section != NULL;
3798 section = section->next)
3799 {
3800 if (top_index < section->index)
3801 top_index = section->index;
3802 }
3803
3804 htab->top_index = top_index;
3805 amt = sizeof (asection *) * (top_index + 1);
3806 input_list = (asection **) bfd_malloc (amt);
3807 htab->input_list = input_list;
3808 if (input_list == NULL)
3809 return -1;
3810
3811 /* For sections we aren't interested in, mark their entries with a
3812 value we can check later. */
3813 list = input_list + top_index;
3814 do
3815 *list = bfd_abs_section_ptr;
3816 while (list-- != input_list);
3817
3818 for (section = output_bfd->sections;
3819 section != NULL;
3820 section = section->next)
3821 {
3822 if ((section->flags & SEC_CODE) != 0)
3823 input_list[section->index] = NULL;
3824 }
3825
3826 return 1;
3827 }
3828
3829 /* The linker repeatedly calls this function for each input section,
3830 in the order that input sections are linked into output sections.
3831 Build lists of input sections to determine groupings between which
3832 we may insert linker stubs. */
3833
3834 void
3835 elf32_arm_next_input_section (struct bfd_link_info *info,
3836 asection *isec)
3837 {
3838 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3839
3840 if (htab == NULL)
3841 return;
3842
3843 if (isec->output_section->index <= htab->top_index)
3844 {
3845 asection **list = htab->input_list + isec->output_section->index;
3846
3847 if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0)
3848 {
3849 /* Steal the link_sec pointer for our list. */
3850 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
3851 /* This happens to make the list in reverse order,
3852 which we reverse later. */
3853 PREV_SEC (isec) = *list;
3854 *list = isec;
3855 }
3856 }
3857 }
3858
3859 /* See whether we can group stub sections together. Grouping stub
3860 sections may result in fewer stubs. More importantly, we need to
3861 put all .init* and .fini* stubs at the end of the .init or
3862 .fini output sections respectively, because glibc splits the
3863 _init and _fini functions into multiple parts. Putting a stub in
3864 the middle of a function is not a good idea. */
3865
3866 static void
3867 group_sections (struct elf32_arm_link_hash_table *htab,
3868 bfd_size_type stub_group_size,
3869 bfd_boolean stubs_always_after_branch)
3870 {
3871 asection **list = htab->input_list;
3872
3873 do
3874 {
3875 asection *tail = *list;
3876 asection *head;
3877
3878 if (tail == bfd_abs_section_ptr)
3879 continue;
3880
3881 /* Reverse the list: we must avoid placing stubs at the
3882 beginning of the section because the beginning of the text
3883 section may be required for an interrupt vector in bare metal
3884 code. */
3885 #define NEXT_SEC PREV_SEC
3886 head = NULL;
3887 while (tail != NULL)
3888 {
3889 /* Pop from tail. */
3890 asection *item = tail;
3891 tail = PREV_SEC (item);
3892
3893 /* Push on head. */
3894 NEXT_SEC (item) = head;
3895 head = item;
3896 }
3897
3898 while (head != NULL)
3899 {
3900 asection *curr;
3901 asection *next;
3902 bfd_vma stub_group_start = head->output_offset;
3903 bfd_vma end_of_next;
3904
3905 curr = head;
3906 while (NEXT_SEC (curr) != NULL)
3907 {
3908 next = NEXT_SEC (curr);
3909 end_of_next = next->output_offset + next->size;
3910 if (end_of_next - stub_group_start >= stub_group_size)
3911 /* End of NEXT is too far from start, so stop. */
3912 break;
3913 /* Add NEXT to the group. */
3914 curr = next;
3915 }
3916
3917 /* OK, the size from the start to the start of CURR is less
3918 than stub_group_size and thus can be handled by one stub
3919 section. (Or the head section is itself larger than
3920 stub_group_size, in which case we may be toast.)
3921 We should really be keeping track of the total size of
3922 stubs added here, as stubs contribute to the final output
3923 section size. */
3924 do
3925 {
3926 next = NEXT_SEC (head);
3927 /* Set up this stub group. */
3928 htab->stub_group[head->id].link_sec = curr;
3929 }
3930 while (head != curr && (head = next) != NULL);
3931
3932 /* But wait, there's more! Input sections up to stub_group_size
3933 bytes after the stub section can be handled by it too. */
3934 if (!stubs_always_after_branch)
3935 {
3936 stub_group_start = curr->output_offset + curr->size;
3937
3938 while (next != NULL)
3939 {
3940 end_of_next = next->output_offset + next->size;
3941 if (end_of_next - stub_group_start >= stub_group_size)
3942 /* End of NEXT is too far from stubs, so stop. */
3943 break;
3944 /* Add NEXT to the stub group. */
3945 head = next;
3946 next = NEXT_SEC (head);
3947 htab->stub_group[head->id].link_sec = curr;
3948 }
3949 }
3950 head = next;
3951 }
3952 }
3953 while (list++ != htab->input_list + htab->top_index);
3954
3955 free (htab->input_list);
3956 #undef PREV_SEC
3957 #undef NEXT_SEC
3958 }
3959
3960 /* Comparison function for sorting/searching relocations relating to Cortex-A8
3961 erratum fix. */
3962
3963 static int
3964 a8_reloc_compare (const void *a, const void *b)
3965 {
3966 const struct a8_erratum_reloc *ra = (const struct a8_erratum_reloc *) a;
3967 const struct a8_erratum_reloc *rb = (const struct a8_erratum_reloc *) b;
3968
3969 if (ra->from < rb->from)
3970 return -1;
3971 else if (ra->from > rb->from)
3972 return 1;
3973 else
3974 return 0;
3975 }
3976
3977 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
3978 const char *, char **);
3979
3980 /* Helper function to scan code for sequences which might trigger the Cortex-A8
3981 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
3982 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
3983 otherwise. */
3984
3985 static bfd_boolean
3986 cortex_a8_erratum_scan (bfd *input_bfd,
3987 struct bfd_link_info *info,
3988 struct a8_erratum_fix **a8_fixes_p,
3989 unsigned int *num_a8_fixes_p,
3990 unsigned int *a8_fix_table_size_p,
3991 struct a8_erratum_reloc *a8_relocs,
3992 unsigned int num_a8_relocs,
3993 unsigned prev_num_a8_fixes,
3994 bfd_boolean *stub_changed_p)
3995 {
3996 asection *section;
3997 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3998 struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
3999 unsigned int num_a8_fixes = *num_a8_fixes_p;
4000 unsigned int a8_fix_table_size = *a8_fix_table_size_p;
4001
4002 if (htab == NULL)
4003 return FALSE;
4004
4005 for (section = input_bfd->sections;
4006 section != NULL;
4007 section = section->next)
4008 {
4009 bfd_byte *contents = NULL;
4010 struct _arm_elf_section_data *sec_data;
4011 unsigned int span;
4012 bfd_vma base_vma;
4013
4014 if (elf_section_type (section) != SHT_PROGBITS
4015 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
4016 || (section->flags & SEC_EXCLUDE) != 0
4017 || (section->sec_info_type == ELF_INFO_TYPE_JUST_SYMS)
4018 || (section->output_section == bfd_abs_section_ptr))
4019 continue;
4020
4021 base_vma = section->output_section->vma + section->output_offset;
4022
4023 if (elf_section_data (section)->this_hdr.contents != NULL)
4024 contents = elf_section_data (section)->this_hdr.contents;
4025 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
4026 return TRUE;
4027
4028 sec_data = elf32_arm_section_data (section);
4029
4030 for (span = 0; span < sec_data->mapcount; span++)
4031 {
4032 unsigned int span_start = sec_data->map[span].vma;
4033 unsigned int span_end = (span == sec_data->mapcount - 1)
4034 ? section->size : sec_data->map[span + 1].vma;
4035 unsigned int i;
4036 char span_type = sec_data->map[span].type;
4037 bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
4038
4039 if (span_type != 't')
4040 continue;
4041
4042 /* Span is entirely within a single 4KB region: skip scanning. */
4043 if (((base_vma + span_start) & ~0xfff)
4044 == ((base_vma + span_end) & ~0xfff))
4045 continue;
4046
4047 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
4048
4049 * The opcode is BLX.W, BL.W, B.W, Bcc.W
4050 * The branch target is in the same 4KB region as the
4051 first half of the branch.
4052 * The instruction before the branch is a 32-bit
4053 length non-branch instruction. */
4054 for (i = span_start; i < span_end;)
4055 {
4056 unsigned int insn = bfd_getl16 (&contents[i]);
4057 bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
4058 bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch;
4059
4060 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
4061 insn_32bit = TRUE;
4062
4063 if (insn_32bit)
4064 {
4065 /* Load the rest of the insn (in manual-friendly order). */
4066 insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
4067
4068 /* Encoding T4: B<c>.W. */
4069 is_b = (insn & 0xf800d000) == 0xf0009000;
4070 /* Encoding T1: BL<c>.W. */
4071 is_bl = (insn & 0xf800d000) == 0xf000d000;
4072 /* Encoding T2: BLX<c>.W. */
4073 is_blx = (insn & 0xf800d000) == 0xf000c000;
4074 /* Encoding T3: B<c>.W (not permitted in IT block). */
4075 is_bcc = (insn & 0xf800d000) == 0xf0008000
4076 && (insn & 0x07f00000) != 0x03800000;
4077 }
4078
4079 is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
4080
4081 if (((base_vma + i) & 0xfff) == 0xffe
4082 && insn_32bit
4083 && is_32bit_branch
4084 && last_was_32bit
4085 && ! last_was_branch)
4086 {
4087 bfd_signed_vma offset = 0;
4088 bfd_boolean force_target_arm = FALSE;
4089 bfd_boolean force_target_thumb = FALSE;
4090 bfd_vma target;
4091 enum elf32_arm_stub_type stub_type = arm_stub_none;
4092 struct a8_erratum_reloc key, *found;
4093
4094 key.from = base_vma + i;
4095 found = (struct a8_erratum_reloc *)
4096 bsearch (&key, a8_relocs, num_a8_relocs,
4097 sizeof (struct a8_erratum_reloc),
4098 &a8_reloc_compare);
4099
4100 if (found)
4101 {
4102 char *error_message = NULL;
4103 struct elf_link_hash_entry *entry;
4104
4105 /* We don't care about the error returned from this
4106 function, only if there is glue or not. */
4107 entry = find_thumb_glue (info, found->sym_name,
4108 &error_message);
4109
4110 if (entry)
4111 found->non_a8_stub = TRUE;
4112
4113 if (found->r_type == R_ARM_THM_CALL
4114 && found->st_type != STT_ARM_TFUNC)
4115 force_target_arm = TRUE;
4116 else if (found->r_type == R_ARM_THM_CALL
4117 && found->st_type == STT_ARM_TFUNC)
4118 force_target_thumb = TRUE;
4119 }
4120
4121 /* Check if we have an offending branch instruction. */
4122
4123 if (found && found->non_a8_stub)
4124 /* We've already made a stub for this instruction, e.g.
4125 it's a long branch or a Thumb->ARM stub. Assume that
4126 stub will suffice to work around the A8 erratum (see
4127 setting of always_after_branch above). */
4128 ;
4129 else if (is_bcc)
4130 {
4131 offset = (insn & 0x7ff) << 1;
4132 offset |= (insn & 0x3f0000) >> 4;
4133 offset |= (insn & 0x2000) ? 0x40000 : 0;
4134 offset |= (insn & 0x800) ? 0x80000 : 0;
4135 offset |= (insn & 0x4000000) ? 0x100000 : 0;
4136 if (offset & 0x100000)
4137 offset |= ~ ((bfd_signed_vma) 0xfffff);
4138 stub_type = arm_stub_a8_veneer_b_cond;
4139 }
4140 else if (is_b || is_bl || is_blx)
4141 {
4142 int s = (insn & 0x4000000) != 0;
4143 int j1 = (insn & 0x2000) != 0;
4144 int j2 = (insn & 0x800) != 0;
4145 int i1 = !(j1 ^ s);
4146 int i2 = !(j2 ^ s);
4147
4148 offset = (insn & 0x7ff) << 1;
4149 offset |= (insn & 0x3ff0000) >> 4;
4150 offset |= i2 << 22;
4151 offset |= i1 << 23;
4152 offset |= s << 24;
4153 if (offset & 0x1000000)
4154 offset |= ~ ((bfd_signed_vma) 0xffffff);
4155
4156 if (is_blx)
4157 offset &= ~ ((bfd_signed_vma) 3);
4158
4159 stub_type = is_blx ? arm_stub_a8_veneer_blx :
4160 is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
4161 }
4162
4163 if (stub_type != arm_stub_none)
4164 {
4165 bfd_vma pc_for_insn = base_vma + i + 4;
4166
4167 /* The original instruction is a BL, but the target is
4168 an ARM instruction. If we were not making a stub,
4169 the BL would have been converted to a BLX. Use the
4170 BLX stub instead in that case. */
4171 if (htab->use_blx && force_target_arm
4172 && stub_type == arm_stub_a8_veneer_bl)
4173 {
4174 stub_type = arm_stub_a8_veneer_blx;
4175 is_blx = TRUE;
4176 is_bl = FALSE;
4177 }
4178 /* Conversely, if the original instruction was
4179 BLX but the target is Thumb mode, use the BL
4180 stub. */
4181 else if (force_target_thumb
4182 && stub_type == arm_stub_a8_veneer_blx)
4183 {
4184 stub_type = arm_stub_a8_veneer_bl;
4185 is_blx = FALSE;
4186 is_bl = TRUE;
4187 }
4188
4189 if (is_blx)
4190 pc_for_insn &= ~ ((bfd_vma) 3);
4191
4192 /* If we found a relocation, use the proper destination,
4193 not the offset in the (unrelocated) instruction.
4194 Note this is always done if we switched the stub type
4195 above. */
4196 if (found)
4197 offset =
4198 (bfd_signed_vma) (found->destination - pc_for_insn);
4199
4200 target = pc_for_insn + offset;
4201
4202 /* The BLX stub is ARM-mode code. Adjust the offset to
4203 take the different PC value (+8 instead of +4) into
4204 account. */
4205 if (stub_type == arm_stub_a8_veneer_blx)
4206 offset += 4;
4207
4208 if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
4209 {
4210 char *stub_name = NULL;
4211
4212 if (num_a8_fixes == a8_fix_table_size)
4213 {
4214 a8_fix_table_size *= 2;
4215 a8_fixes = (struct a8_erratum_fix *)
4216 bfd_realloc (a8_fixes,
4217 sizeof (struct a8_erratum_fix)
4218 * a8_fix_table_size);
4219 }
4220
4221 if (num_a8_fixes < prev_num_a8_fixes)
4222 {
4223 /* If we're doing a subsequent scan,
4224 check if we've found the same fix as
4225 before, and try and reuse the stub
4226 name. */
4227 stub_name = a8_fixes[num_a8_fixes].stub_name;
4228 if ((a8_fixes[num_a8_fixes].section != section)
4229 || (a8_fixes[num_a8_fixes].offset != i))
4230 {
4231 free (stub_name);
4232 stub_name = NULL;
4233 *stub_changed_p = TRUE;
4234 }
4235 }
4236
4237 if (!stub_name)
4238 {
4239 stub_name = (char *) bfd_malloc (8 + 1 + 8 + 1);
4240 if (stub_name != NULL)
4241 sprintf (stub_name, "%x:%x", section->id, i);
4242 }
4243
4244 a8_fixes[num_a8_fixes].input_bfd = input_bfd;
4245 a8_fixes[num_a8_fixes].section = section;
4246 a8_fixes[num_a8_fixes].offset = i;
4247 a8_fixes[num_a8_fixes].addend = offset;
4248 a8_fixes[num_a8_fixes].orig_insn = insn;
4249 a8_fixes[num_a8_fixes].stub_name = stub_name;
4250 a8_fixes[num_a8_fixes].stub_type = stub_type;
4251 a8_fixes[num_a8_fixes].st_type =
4252 is_blx ? STT_FUNC : STT_ARM_TFUNC;
4253
4254 num_a8_fixes++;
4255 }
4256 }
4257 }
4258
4259 i += insn_32bit ? 4 : 2;
4260 last_was_32bit = insn_32bit;
4261 last_was_branch = is_32bit_branch;
4262 }
4263 }
4264
4265 if (elf_section_data (section)->this_hdr.contents == NULL)
4266 free (contents);
4267 }
4268
4269 *a8_fixes_p = a8_fixes;
4270 *num_a8_fixes_p = num_a8_fixes;
4271 *a8_fix_table_size_p = a8_fix_table_size;
4272
4273 return FALSE;
4274 }
4275
4276 /* Determine and set the size of the stub section for a final link.
4277
4278 The basic idea here is to examine all the relocations looking for
4279 PC-relative calls to a target that is unreachable with a "bl"
4280 instruction. */
4281
4282 bfd_boolean
4283 elf32_arm_size_stubs (bfd *output_bfd,
4284 bfd *stub_bfd,
4285 struct bfd_link_info *info,
4286 bfd_signed_vma group_size,
4287 asection * (*add_stub_section) (const char *, asection *),
4288 void (*layout_sections_again) (void))
4289 {
4290 bfd_size_type stub_group_size;
4291 bfd_boolean stubs_always_after_branch;
4292 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4293 struct a8_erratum_fix *a8_fixes = NULL;
4294 unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
4295 struct a8_erratum_reloc *a8_relocs = NULL;
4296 unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
4297
4298 if (htab == NULL)
4299 return FALSE;
4300
4301 if (htab->fix_cortex_a8)
4302 {
4303 a8_fixes = (struct a8_erratum_fix *)
4304 bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size);
4305 a8_relocs = (struct a8_erratum_reloc *)
4306 bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size);
4307 }
4308
4309 /* Propagate mach to stub bfd, because it may not have been
4310 finalized when we created stub_bfd. */
4311 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
4312 bfd_get_mach (output_bfd));
4313
4314 /* Stash our params away. */
4315 htab->stub_bfd = stub_bfd;
4316 htab->add_stub_section = add_stub_section;
4317 htab->layout_sections_again = layout_sections_again;
4318 stubs_always_after_branch = group_size < 0;
4319
4320 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
4321 as the first half of a 32-bit branch straddling two 4K pages. This is a
4322 crude way of enforcing that. */
4323 if (htab->fix_cortex_a8)
4324 stubs_always_after_branch = 1;
4325
4326 if (group_size < 0)
4327 stub_group_size = -group_size;
4328 else
4329 stub_group_size = group_size;
4330
4331 if (stub_group_size == 1)
4332 {
4333 /* Default values. */
4334 /* Thumb branch range is +-4MB has to be used as the default
4335 maximum size (a given section can contain both ARM and Thumb
4336 code, so the worst case has to be taken into account).
4337
4338 This value is 24K less than that, which allows for 2025
4339 12-byte stubs. If we exceed that, then we will fail to link.
4340 The user will have to relink with an explicit group size
4341 option. */
4342 stub_group_size = 4170000;
4343 }
4344
4345 group_sections (htab, stub_group_size, stubs_always_after_branch);
4346
4347 /* If we're applying the cortex A8 fix, we need to determine the
4348 program header size now, because we cannot change it later --
4349 that could alter section placements. Notice the A8 erratum fix
4350 ends up requiring the section addresses to remain unchanged
4351 modulo the page size. That's something we cannot represent
4352 inside BFD, and we don't want to force the section alignment to
4353 be the page size. */
4354 if (htab->fix_cortex_a8)
4355 (*htab->layout_sections_again) ();
4356
4357 while (1)
4358 {
4359 bfd *input_bfd;
4360 unsigned int bfd_indx;
4361 asection *stub_sec;
4362 bfd_boolean stub_changed = FALSE;
4363 unsigned prev_num_a8_fixes = num_a8_fixes;
4364
4365 num_a8_fixes = 0;
4366 for (input_bfd = info->input_bfds, bfd_indx = 0;
4367 input_bfd != NULL;
4368 input_bfd = input_bfd->link_next, bfd_indx++)
4369 {
4370 Elf_Internal_Shdr *symtab_hdr;
4371 asection *section;
4372 Elf_Internal_Sym *local_syms = NULL;
4373
4374 num_a8_relocs = 0;
4375
4376 /* We'll need the symbol table in a second. */
4377 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
4378 if (symtab_hdr->sh_info == 0)
4379 continue;
4380
4381 /* Walk over each section attached to the input bfd. */
4382 for (section = input_bfd->sections;
4383 section != NULL;
4384 section = section->next)
4385 {
4386 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
4387
4388 /* If there aren't any relocs, then there's nothing more
4389 to do. */
4390 if ((section->flags & SEC_RELOC) == 0
4391 || section->reloc_count == 0
4392 || (section->flags & SEC_CODE) == 0)
4393 continue;
4394
4395 /* If this section is a link-once section that will be
4396 discarded, then don't create any stubs. */
4397 if (section->output_section == NULL
4398 || section->output_section->owner != output_bfd)
4399 continue;
4400
4401 /* Get the relocs. */
4402 internal_relocs
4403 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
4404 NULL, info->keep_memory);
4405 if (internal_relocs == NULL)
4406 goto error_ret_free_local;
4407
4408 /* Now examine each relocation. */
4409 irela = internal_relocs;
4410 irelaend = irela + section->reloc_count;
4411 for (; irela < irelaend; irela++)
4412 {
4413 unsigned int r_type, r_indx;
4414 enum elf32_arm_stub_type stub_type;
4415 struct elf32_arm_stub_hash_entry *stub_entry;
4416 asection *sym_sec;
4417 bfd_vma sym_value;
4418 bfd_vma destination;
4419 struct elf32_arm_link_hash_entry *hash;
4420 const char *sym_name;
4421 char *stub_name;
4422 const asection *id_sec;
4423 int st_type;
4424 bfd_boolean created_stub = FALSE;
4425
4426 r_type = ELF32_R_TYPE (irela->r_info);
4427 r_indx = ELF32_R_SYM (irela->r_info);
4428
4429 if (r_type >= (unsigned int) R_ARM_max)
4430 {
4431 bfd_set_error (bfd_error_bad_value);
4432 error_ret_free_internal:
4433 if (elf_section_data (section)->relocs == NULL)
4434 free (internal_relocs);
4435 goto error_ret_free_local;
4436 }
4437
4438 /* Only look for stubs on branch instructions. */
4439 if ((r_type != (unsigned int) R_ARM_CALL)
4440 && (r_type != (unsigned int) R_ARM_THM_CALL)
4441 && (r_type != (unsigned int) R_ARM_JUMP24)
4442 && (r_type != (unsigned int) R_ARM_THM_JUMP19)
4443 && (r_type != (unsigned int) R_ARM_THM_XPC22)
4444 && (r_type != (unsigned int) R_ARM_THM_JUMP24)
4445 && (r_type != (unsigned int) R_ARM_PLT32))
4446 continue;
4447
4448 /* Now determine the call target, its name, value,
4449 section. */
4450 sym_sec = NULL;
4451 sym_value = 0;
4452 destination = 0;
4453 hash = NULL;
4454 sym_name = NULL;
4455 if (r_indx < symtab_hdr->sh_info)
4456 {
4457 /* It's a local symbol. */
4458 Elf_Internal_Sym *sym;
4459 Elf_Internal_Shdr *hdr;
4460
4461 if (local_syms == NULL)
4462 {
4463 local_syms
4464 = (Elf_Internal_Sym *) symtab_hdr->contents;
4465 if (local_syms == NULL)
4466 local_syms
4467 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
4468 symtab_hdr->sh_info, 0,
4469 NULL, NULL, NULL);
4470 if (local_syms == NULL)
4471 goto error_ret_free_internal;
4472 }
4473
4474 sym = local_syms + r_indx;
4475 hdr = elf_elfsections (input_bfd)[sym->st_shndx];
4476 sym_sec = hdr->bfd_section;
4477 if (!sym_sec)
4478 /* This is an undefined symbol. It can never
4479 be resolved. */
4480 continue;
4481
4482 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
4483 sym_value = sym->st_value;
4484 destination = (sym_value + irela->r_addend
4485 + sym_sec->output_offset
4486 + sym_sec->output_section->vma);
4487 st_type = ELF_ST_TYPE (sym->st_info);
4488 sym_name
4489 = bfd_elf_string_from_elf_section (input_bfd,
4490 symtab_hdr->sh_link,
4491 sym->st_name);
4492 }
4493 else
4494 {
4495 /* It's an external symbol. */
4496 int e_indx;
4497
4498 e_indx = r_indx - symtab_hdr->sh_info;
4499 hash = ((struct elf32_arm_link_hash_entry *)
4500 elf_sym_hashes (input_bfd)[e_indx]);
4501
4502 while (hash->root.root.type == bfd_link_hash_indirect
4503 || hash->root.root.type == bfd_link_hash_warning)
4504 hash = ((struct elf32_arm_link_hash_entry *)
4505 hash->root.root.u.i.link);
4506
4507 if (hash->root.root.type == bfd_link_hash_defined
4508 || hash->root.root.type == bfd_link_hash_defweak)
4509 {
4510 sym_sec = hash->root.root.u.def.section;
4511 sym_value = hash->root.root.u.def.value;
4512
4513 struct elf32_arm_link_hash_table *globals =
4514 elf32_arm_hash_table (info);
4515
4516 /* For a destination in a shared library,
4517 use the PLT stub as target address to
4518 decide whether a branch stub is
4519 needed. */
4520 if (globals != NULL
4521 && globals->splt != NULL
4522 && hash != NULL
4523 && hash->root.plt.offset != (bfd_vma) -1)
4524 {
4525 sym_sec = globals->splt;
4526 sym_value = hash->root.plt.offset;
4527 if (sym_sec->output_section != NULL)
4528 destination = (sym_value
4529 + sym_sec->output_offset
4530 + sym_sec->output_section->vma);
4531 }
4532 else if (sym_sec->output_section != NULL)
4533 destination = (sym_value + irela->r_addend
4534 + sym_sec->output_offset
4535 + sym_sec->output_section->vma);
4536 }
4537 else if ((hash->root.root.type == bfd_link_hash_undefined)
4538 || (hash->root.root.type == bfd_link_hash_undefweak))
4539 {
4540 /* For a shared library, use the PLT stub as
4541 target address to decide whether a long
4542 branch stub is needed.
4543 For absolute code, they cannot be handled. */
4544 struct elf32_arm_link_hash_table *globals =
4545 elf32_arm_hash_table (info);
4546
4547 if (globals != NULL
4548 && globals->splt != NULL
4549 && hash != NULL
4550 && hash->root.plt.offset != (bfd_vma) -1)
4551 {
4552 sym_sec = globals->splt;
4553 sym_value = hash->root.plt.offset;
4554 if (sym_sec->output_section != NULL)
4555 destination = (sym_value
4556 + sym_sec->output_offset
4557 + sym_sec->output_section->vma);
4558 }
4559 else
4560 continue;
4561 }
4562 else
4563 {
4564 bfd_set_error (bfd_error_bad_value);
4565 goto error_ret_free_internal;
4566 }
4567 st_type = ELF_ST_TYPE (hash->root.type);
4568 sym_name = hash->root.root.root.string;
4569 }
4570
4571 do
4572 {
4573 /* Determine what (if any) linker stub is needed. */
4574 stub_type = arm_type_of_stub (info, section, irela,
4575 &st_type, hash,
4576 destination, sym_sec,
4577 input_bfd, sym_name);
4578 if (stub_type == arm_stub_none)
4579 break;
4580
4581 /* Support for grouping stub sections. */
4582 id_sec = htab->stub_group[section->id].link_sec;
4583
4584 /* Get the name of this stub. */
4585 stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash,
4586 irela, stub_type);
4587 if (!stub_name)
4588 goto error_ret_free_internal;
4589
4590 /* We've either created a stub for this reloc already,
4591 or we are about to. */
4592 created_stub = TRUE;
4593
4594 stub_entry = arm_stub_hash_lookup
4595 (&htab->stub_hash_table, stub_name,
4596 FALSE, FALSE);
4597 if (stub_entry != NULL)
4598 {
4599 /* The proper stub has already been created. */
4600 free (stub_name);
4601 stub_entry->target_value = sym_value;
4602 break;
4603 }
4604
4605 stub_entry = elf32_arm_add_stub (stub_name, section,
4606 htab);
4607 if (stub_entry == NULL)
4608 {
4609 free (stub_name);
4610 goto error_ret_free_internal;
4611 }
4612
4613 stub_entry->target_value = sym_value;
4614 stub_entry->target_section = sym_sec;
4615 stub_entry->stub_type = stub_type;
4616 stub_entry->h = hash;
4617 stub_entry->st_type = st_type;
4618
4619 if (sym_name == NULL)
4620 sym_name = "unnamed";
4621 stub_entry->output_name = (char *)
4622 bfd_alloc (htab->stub_bfd,
4623 sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
4624 + strlen (sym_name));
4625 if (stub_entry->output_name == NULL)
4626 {
4627 free (stub_name);
4628 goto error_ret_free_internal;
4629 }
4630
4631 /* For historical reasons, use the existing names for
4632 ARM-to-Thumb and Thumb-to-ARM stubs. */
4633 if ( ((r_type == (unsigned int) R_ARM_THM_CALL)
4634 || (r_type == (unsigned int) R_ARM_THM_JUMP24))
4635 && st_type != STT_ARM_TFUNC)
4636 sprintf (stub_entry->output_name,
4637 THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
4638 else if ( ((r_type == (unsigned int) R_ARM_CALL)
4639 || (r_type == (unsigned int) R_ARM_JUMP24))
4640 && st_type == STT_ARM_TFUNC)
4641 sprintf (stub_entry->output_name,
4642 ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
4643 else
4644 sprintf (stub_entry->output_name, STUB_ENTRY_NAME,
4645 sym_name);
4646
4647 stub_changed = TRUE;
4648 }
4649 while (0);
4650
4651 /* Look for relocations which might trigger Cortex-A8
4652 erratum. */
4653 if (htab->fix_cortex_a8
4654 && (r_type == (unsigned int) R_ARM_THM_JUMP24
4655 || r_type == (unsigned int) R_ARM_THM_JUMP19
4656 || r_type == (unsigned int) R_ARM_THM_CALL
4657 || r_type == (unsigned int) R_ARM_THM_XPC22))
4658 {
4659 bfd_vma from = section->output_section->vma
4660 + section->output_offset
4661 + irela->r_offset;
4662
4663 if ((from & 0xfff) == 0xffe)
4664 {
4665 /* Found a candidate. Note we haven't checked the
4666 destination is within 4K here: if we do so (and
4667 don't create an entry in a8_relocs) we can't tell
4668 that a branch should have been relocated when
4669 scanning later. */
4670 if (num_a8_relocs == a8_reloc_table_size)
4671 {
4672 a8_reloc_table_size *= 2;
4673 a8_relocs = (struct a8_erratum_reloc *)
4674 bfd_realloc (a8_relocs,
4675 sizeof (struct a8_erratum_reloc)
4676 * a8_reloc_table_size);
4677 }
4678
4679 a8_relocs[num_a8_relocs].from = from;
4680 a8_relocs[num_a8_relocs].destination = destination;
4681 a8_relocs[num_a8_relocs].r_type = r_type;
4682 a8_relocs[num_a8_relocs].st_type = st_type;
4683 a8_relocs[num_a8_relocs].sym_name = sym_name;
4684 a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
4685
4686 num_a8_relocs++;
4687 }
4688 }
4689 }
4690
4691 /* We're done with the internal relocs, free them. */
4692 if (elf_section_data (section)->relocs == NULL)
4693 free (internal_relocs);
4694 }
4695
4696 if (htab->fix_cortex_a8)
4697 {
4698 /* Sort relocs which might apply to Cortex-A8 erratum. */
4699 qsort (a8_relocs, num_a8_relocs,
4700 sizeof (struct a8_erratum_reloc),
4701 &a8_reloc_compare);
4702
4703 /* Scan for branches which might trigger Cortex-A8 erratum. */
4704 if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
4705 &num_a8_fixes, &a8_fix_table_size,
4706 a8_relocs, num_a8_relocs,
4707 prev_num_a8_fixes, &stub_changed)
4708 != 0)
4709 goto error_ret_free_local;
4710 }
4711 }
4712
4713 if (prev_num_a8_fixes != num_a8_fixes)
4714 stub_changed = TRUE;
4715
4716 if (!stub_changed)
4717 break;
4718
4719 /* OK, we've added some stubs. Find out the new size of the
4720 stub sections. */
4721 for (stub_sec = htab->stub_bfd->sections;
4722 stub_sec != NULL;
4723 stub_sec = stub_sec->next)
4724 {
4725 /* Ignore non-stub sections. */
4726 if (!strstr (stub_sec->name, STUB_SUFFIX))
4727 continue;
4728
4729 stub_sec->size = 0;
4730 }
4731
4732 bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
4733
4734 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
4735 if (htab->fix_cortex_a8)
4736 for (i = 0; i < num_a8_fixes; i++)
4737 {
4738 stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
4739 a8_fixes[i].section, htab);
4740
4741 if (stub_sec == NULL)
4742 goto error_ret_free_local;
4743
4744 stub_sec->size
4745 += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
4746 NULL);
4747 }
4748
4749
4750 /* Ask the linker to do its stuff. */
4751 (*htab->layout_sections_again) ();
4752 }
4753
4754 /* Add stubs for Cortex-A8 erratum fixes now. */
4755 if (htab->fix_cortex_a8)
4756 {
4757 for (i = 0; i < num_a8_fixes; i++)
4758 {
4759 struct elf32_arm_stub_hash_entry *stub_entry;
4760 char *stub_name = a8_fixes[i].stub_name;
4761 asection *section = a8_fixes[i].section;
4762 unsigned int section_id = a8_fixes[i].section->id;
4763 asection *link_sec = htab->stub_group[section_id].link_sec;
4764 asection *stub_sec = htab->stub_group[section_id].stub_sec;
4765 const insn_sequence *template_sequence;
4766 int template_size, size = 0;
4767
4768 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
4769 TRUE, FALSE);
4770 if (stub_entry == NULL)
4771 {
4772 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
4773 section->owner,
4774 stub_name);
4775 return FALSE;
4776 }
4777
4778 stub_entry->stub_sec = stub_sec;
4779 stub_entry->stub_offset = 0;
4780 stub_entry->id_sec = link_sec;
4781 stub_entry->stub_type = a8_fixes[i].stub_type;
4782 stub_entry->target_section = a8_fixes[i].section;
4783 stub_entry->target_value = a8_fixes[i].offset;
4784 stub_entry->target_addend = a8_fixes[i].addend;
4785 stub_entry->orig_insn = a8_fixes[i].orig_insn;
4786 stub_entry->st_type = a8_fixes[i].st_type;
4787
4788 size = find_stub_size_and_template (a8_fixes[i].stub_type,
4789 &template_sequence,
4790 &template_size);
4791
4792 stub_entry->stub_size = size;
4793 stub_entry->stub_template = template_sequence;
4794 stub_entry->stub_template_size = template_size;
4795 }
4796
4797 /* Stash the Cortex-A8 erratum fix array for use later in
4798 elf32_arm_write_section(). */
4799 htab->a8_erratum_fixes = a8_fixes;
4800 htab->num_a8_erratum_fixes = num_a8_fixes;
4801 }
4802 else
4803 {
4804 htab->a8_erratum_fixes = NULL;
4805 htab->num_a8_erratum_fixes = 0;
4806 }
4807 return TRUE;
4808
4809 error_ret_free_local:
4810 return FALSE;
4811 }
4812
4813 /* Build all the stubs associated with the current output file. The
4814 stubs are kept in a hash table attached to the main linker hash
4815 table. We also set up the .plt entries for statically linked PIC
4816 functions here. This function is called via arm_elf_finish in the
4817 linker. */
4818
4819 bfd_boolean
4820 elf32_arm_build_stubs (struct bfd_link_info *info)
4821 {
4822 asection *stub_sec;
4823 struct bfd_hash_table *table;
4824 struct elf32_arm_link_hash_table *htab;
4825
4826 htab = elf32_arm_hash_table (info);
4827 if (htab == NULL)
4828 return FALSE;
4829
4830 for (stub_sec = htab->stub_bfd->sections;
4831 stub_sec != NULL;
4832 stub_sec = stub_sec->next)
4833 {
4834 bfd_size_type size;
4835
4836 /* Ignore non-stub sections. */
4837 if (!strstr (stub_sec->name, STUB_SUFFIX))
4838 continue;
4839
4840 /* Allocate memory to hold the linker stubs. */
4841 size = stub_sec->size;
4842 stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size);
4843 if (stub_sec->contents == NULL && size != 0)
4844 return FALSE;
4845 stub_sec->size = 0;
4846 }
4847
4848 /* Build the stubs as directed by the stub hash table. */
4849 table = &htab->stub_hash_table;
4850 bfd_hash_traverse (table, arm_build_one_stub, info);
4851 if (htab->fix_cortex_a8)
4852 {
4853 /* Place the cortex a8 stubs last. */
4854 htab->fix_cortex_a8 = -1;
4855 bfd_hash_traverse (table, arm_build_one_stub, info);
4856 }
4857
4858 return TRUE;
4859 }
4860
4861 /* Locate the Thumb encoded calling stub for NAME. */
4862
4863 static struct elf_link_hash_entry *
4864 find_thumb_glue (struct bfd_link_info *link_info,
4865 const char *name,
4866 char **error_message)
4867 {
4868 char *tmp_name;
4869 struct elf_link_hash_entry *hash;
4870 struct elf32_arm_link_hash_table *hash_table;
4871
4872 /* We need a pointer to the armelf specific hash table. */
4873 hash_table = elf32_arm_hash_table (link_info);
4874 if (hash_table == NULL)
4875 return NULL;
4876
4877 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
4878 + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
4879
4880 BFD_ASSERT (tmp_name);
4881
4882 sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
4883
4884 hash = elf_link_hash_lookup
4885 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
4886
4887 if (hash == NULL
4888 && asprintf (error_message, _("unable to find THUMB glue '%s' for '%s'"),
4889 tmp_name, name) == -1)
4890 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
4891
4892 free (tmp_name);
4893
4894 return hash;
4895 }
4896
4897 /* Locate the ARM encoded calling stub for NAME. */
4898
4899 static struct elf_link_hash_entry *
4900 find_arm_glue (struct bfd_link_info *link_info,
4901 const char *name,
4902 char **error_message)
4903 {
4904 char *tmp_name;
4905 struct elf_link_hash_entry *myh;
4906 struct elf32_arm_link_hash_table *hash_table;
4907
4908 /* We need a pointer to the elfarm specific hash table. */
4909 hash_table = elf32_arm_hash_table (link_info);
4910 if (hash_table == NULL)
4911 return NULL;
4912
4913 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
4914 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
4915
4916 BFD_ASSERT (tmp_name);
4917
4918 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
4919
4920 myh = elf_link_hash_lookup
4921 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
4922
4923 if (myh == NULL
4924 && asprintf (error_message, _("unable to find ARM glue '%s' for '%s'"),
4925 tmp_name, name) == -1)
4926 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
4927
4928 free (tmp_name);
4929
4930 return myh;
4931 }
4932
4933 /* ARM->Thumb glue (static images):
4934
4935 .arm
4936 __func_from_arm:
4937 ldr r12, __func_addr
4938 bx r12
4939 __func_addr:
4940 .word func @ behave as if you saw a ARM_32 reloc.
4941
4942 (v5t static images)
4943 .arm
4944 __func_from_arm:
4945 ldr pc, __func_addr
4946 __func_addr:
4947 .word func @ behave as if you saw a ARM_32 reloc.
4948
4949 (relocatable images)
4950 .arm
4951 __func_from_arm:
4952 ldr r12, __func_offset
4953 add r12, r12, pc
4954 bx r12
4955 __func_offset:
4956 .word func - . */
4957
4958 #define ARM2THUMB_STATIC_GLUE_SIZE 12
4959 static const insn32 a2t1_ldr_insn = 0xe59fc000;
4960 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
4961 static const insn32 a2t3_func_addr_insn = 0x00000001;
4962
4963 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
4964 static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
4965 static const insn32 a2t2v5_func_addr_insn = 0x00000001;
4966
4967 #define ARM2THUMB_PIC_GLUE_SIZE 16
4968 static const insn32 a2t1p_ldr_insn = 0xe59fc004;
4969 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
4970 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
4971
4972 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
4973
4974 .thumb .thumb
4975 .align 2 .align 2
4976 __func_from_thumb: __func_from_thumb:
4977 bx pc push {r6, lr}
4978 nop ldr r6, __func_addr
4979 .arm mov lr, pc
4980 b func bx r6
4981 .arm
4982 ;; back_to_thumb
4983 ldmia r13! {r6, lr}
4984 bx lr
4985 __func_addr:
4986 .word func */
4987
4988 #define THUMB2ARM_GLUE_SIZE 8
4989 static const insn16 t2a1_bx_pc_insn = 0x4778;
4990 static const insn16 t2a2_noop_insn = 0x46c0;
4991 static const insn32 t2a3_b_insn = 0xea000000;
4992
4993 #define VFP11_ERRATUM_VENEER_SIZE 8
4994
4995 #define ARM_BX_VENEER_SIZE 12
4996 static const insn32 armbx1_tst_insn = 0xe3100001;
4997 static const insn32 armbx2_moveq_insn = 0x01a0f000;
4998 static const insn32 armbx3_bx_insn = 0xe12fff10;
4999
5000 #ifndef ELFARM_NABI_C_INCLUDED
5001 static void
5002 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
5003 {
5004 asection * s;
5005 bfd_byte * contents;
5006
5007 if (size == 0)
5008 {
5009 /* Do not include empty glue sections in the output. */
5010 if (abfd != NULL)
5011 {
5012 s = bfd_get_section_by_name (abfd, name);
5013 if (s != NULL)
5014 s->flags |= SEC_EXCLUDE;
5015 }
5016 return;
5017 }
5018
5019 BFD_ASSERT (abfd != NULL);
5020
5021 s = bfd_get_section_by_name (abfd, name);
5022 BFD_ASSERT (s != NULL);
5023
5024 contents = (bfd_byte *) bfd_alloc (abfd, size);
5025
5026 BFD_ASSERT (s->size == size);
5027 s->contents = contents;
5028 }
5029
5030 bfd_boolean
5031 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
5032 {
5033 struct elf32_arm_link_hash_table * globals;
5034
5035 globals = elf32_arm_hash_table (info);
5036 BFD_ASSERT (globals != NULL);
5037
5038 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5039 globals->arm_glue_size,
5040 ARM2THUMB_GLUE_SECTION_NAME);
5041
5042 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5043 globals->thumb_glue_size,
5044 THUMB2ARM_GLUE_SECTION_NAME);
5045
5046 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5047 globals->vfp11_erratum_glue_size,
5048 VFP11_ERRATUM_VENEER_SECTION_NAME);
5049
5050 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5051 globals->bx_glue_size,
5052 ARM_BX_GLUE_SECTION_NAME);
5053
5054 return TRUE;
5055 }
5056
5057 /* Allocate space and symbols for calling a Thumb function from Arm mode.
5058 returns the symbol identifying the stub. */
5059
5060 static struct elf_link_hash_entry *
5061 record_arm_to_thumb_glue (struct bfd_link_info * link_info,
5062 struct elf_link_hash_entry * h)
5063 {
5064 const char * name = h->root.root.string;
5065 asection * s;
5066 char * tmp_name;
5067 struct elf_link_hash_entry * myh;
5068 struct bfd_link_hash_entry * bh;
5069 struct elf32_arm_link_hash_table * globals;
5070 bfd_vma val;
5071 bfd_size_type size;
5072
5073 globals = elf32_arm_hash_table (link_info);
5074 BFD_ASSERT (globals != NULL);
5075 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
5076
5077 s = bfd_get_section_by_name
5078 (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
5079
5080 BFD_ASSERT (s != NULL);
5081
5082 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
5083 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
5084
5085 BFD_ASSERT (tmp_name);
5086
5087 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
5088
5089 myh = elf_link_hash_lookup
5090 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
5091
5092 if (myh != NULL)
5093 {
5094 /* We've already seen this guy. */
5095 free (tmp_name);
5096 return myh;
5097 }
5098
5099 /* The only trick here is using hash_table->arm_glue_size as the value.
5100 Even though the section isn't allocated yet, this is where we will be
5101 putting it. The +1 on the value marks that the stub has not been
5102 output yet - not that it is a Thumb function. */
5103 bh = NULL;
5104 val = globals->arm_glue_size + 1;
5105 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
5106 tmp_name, BSF_GLOBAL, s, val,
5107 NULL, TRUE, FALSE, &bh);
5108
5109 myh = (struct elf_link_hash_entry *) bh;
5110 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5111 myh->forced_local = 1;
5112
5113 free (tmp_name);
5114
5115 if (link_info->shared || globals->root.is_relocatable_executable
5116 || globals->pic_veneer)
5117 size = ARM2THUMB_PIC_GLUE_SIZE;
5118 else if (globals->use_blx)
5119 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
5120 else
5121 size = ARM2THUMB_STATIC_GLUE_SIZE;
5122
5123 s->size += size;
5124 globals->arm_glue_size += size;
5125
5126 return myh;
5127 }
5128
5129 /* Allocate space for ARMv4 BX veneers. */
5130
5131 static void
5132 record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
5133 {
5134 asection * s;
5135 struct elf32_arm_link_hash_table *globals;
5136 char *tmp_name;
5137 struct elf_link_hash_entry *myh;
5138 struct bfd_link_hash_entry *bh;
5139 bfd_vma val;
5140
5141 /* BX PC does not need a veneer. */
5142 if (reg == 15)
5143 return;
5144
5145 globals = elf32_arm_hash_table (link_info);
5146 BFD_ASSERT (globals != NULL);
5147 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
5148
5149 /* Check if this veneer has already been allocated. */
5150 if (globals->bx_glue_offset[reg])
5151 return;
5152
5153 s = bfd_get_section_by_name
5154 (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
5155
5156 BFD_ASSERT (s != NULL);
5157
5158 /* Add symbol for veneer. */
5159 tmp_name = (char *)
5160 bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
5161
5162 BFD_ASSERT (tmp_name);
5163
5164 sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
5165
5166 myh = elf_link_hash_lookup
5167 (&(globals)->root, tmp_name, FALSE, FALSE, FALSE);
5168
5169 BFD_ASSERT (myh == NULL);
5170
5171 bh = NULL;
5172 val = globals->bx_glue_size;
5173 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
5174 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
5175 NULL, TRUE, FALSE, &bh);
5176
5177 myh = (struct elf_link_hash_entry *) bh;
5178 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5179 myh->forced_local = 1;
5180
5181 s->size += ARM_BX_VENEER_SIZE;
5182 globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
5183 globals->bx_glue_size += ARM_BX_VENEER_SIZE;
5184 }
5185
5186
5187 /* Add an entry to the code/data map for section SEC. */
5188
5189 static void
5190 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
5191 {
5192 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
5193 unsigned int newidx;
5194
5195 if (sec_data->map == NULL)
5196 {
5197 sec_data->map = (elf32_arm_section_map *)
5198 bfd_malloc (sizeof (elf32_arm_section_map));
5199 sec_data->mapcount = 0;
5200 sec_data->mapsize = 1;
5201 }
5202
5203 newidx = sec_data->mapcount++;
5204
5205 if (sec_data->mapcount > sec_data->mapsize)
5206 {
5207 sec_data->mapsize *= 2;
5208 sec_data->map = (elf32_arm_section_map *)
5209 bfd_realloc_or_free (sec_data->map, sec_data->mapsize
5210 * sizeof (elf32_arm_section_map));
5211 }
5212
5213 if (sec_data->map)
5214 {
5215 sec_data->map[newidx].vma = vma;
5216 sec_data->map[newidx].type = type;
5217 }
5218 }
5219
5220
5221 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
5222 veneers are handled for now. */
5223
5224 static bfd_vma
5225 record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
5226 elf32_vfp11_erratum_list *branch,
5227 bfd *branch_bfd,
5228 asection *branch_sec,
5229 unsigned int offset)
5230 {
5231 asection *s;
5232 struct elf32_arm_link_hash_table *hash_table;
5233 char *tmp_name;
5234 struct elf_link_hash_entry *myh;
5235 struct bfd_link_hash_entry *bh;
5236 bfd_vma val;
5237 struct _arm_elf_section_data *sec_data;
5238 int errcount;
5239 elf32_vfp11_erratum_list *newerr;
5240
5241 hash_table = elf32_arm_hash_table (link_info);
5242 BFD_ASSERT (hash_table != NULL);
5243 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
5244
5245 s = bfd_get_section_by_name
5246 (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
5247
5248 sec_data = elf32_arm_section_data (s);
5249
5250 BFD_ASSERT (s != NULL);
5251
5252 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
5253 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
5254
5255 BFD_ASSERT (tmp_name);
5256
5257 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
5258 hash_table->num_vfp11_fixes);
5259
5260 myh = elf_link_hash_lookup
5261 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5262
5263 BFD_ASSERT (myh == NULL);
5264
5265 bh = NULL;
5266 val = hash_table->vfp11_erratum_glue_size;
5267 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
5268 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
5269 NULL, TRUE, FALSE, &bh);
5270
5271 myh = (struct elf_link_hash_entry *) bh;
5272 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5273 myh->forced_local = 1;
5274
5275 /* Link veneer back to calling location. */
5276 errcount = ++(sec_data->erratumcount);
5277 newerr = (elf32_vfp11_erratum_list *)
5278 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
5279
5280 newerr->type = VFP11_ERRATUM_ARM_VENEER;
5281 newerr->vma = -1;
5282 newerr->u.v.branch = branch;
5283 newerr->u.v.id = hash_table->num_vfp11_fixes;
5284 branch->u.b.veneer = newerr;
5285
5286 newerr->next = sec_data->erratumlist;
5287 sec_data->erratumlist = newerr;
5288
5289 /* A symbol for the return from the veneer. */
5290 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
5291 hash_table->num_vfp11_fixes);
5292
5293 myh = elf_link_hash_lookup
5294 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5295
5296 if (myh != NULL)
5297 abort ();
5298
5299 bh = NULL;
5300 val = offset + 4;
5301 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
5302 branch_sec, val, NULL, TRUE, FALSE, &bh);
5303
5304 myh = (struct elf_link_hash_entry *) bh;
5305 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5306 myh->forced_local = 1;
5307
5308 free (tmp_name);
5309
5310 /* Generate a mapping symbol for the veneer section, and explicitly add an
5311 entry for that symbol to the code/data map for the section. */
5312 if (hash_table->vfp11_erratum_glue_size == 0)
5313 {
5314 bh = NULL;
5315 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
5316 ever requires this erratum fix. */
5317 _bfd_generic_link_add_one_symbol (link_info,
5318 hash_table->bfd_of_glue_owner, "$a",
5319 BSF_LOCAL, s, 0, NULL,
5320 TRUE, FALSE, &bh);
5321
5322 myh = (struct elf_link_hash_entry *) bh;
5323 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
5324 myh->forced_local = 1;
5325
5326 /* The elf32_arm_init_maps function only cares about symbols from input
5327 BFDs. We must make a note of this generated mapping symbol
5328 ourselves so that code byteswapping works properly in
5329 elf32_arm_write_section. */
5330 elf32_arm_section_map_add (s, 'a', 0);
5331 }
5332
5333 s->size += VFP11_ERRATUM_VENEER_SIZE;
5334 hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
5335 hash_table->num_vfp11_fixes++;
5336
5337 /* The offset of the veneer. */
5338 return val;
5339 }
5340
5341 #define ARM_GLUE_SECTION_FLAGS \
5342 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
5343 | SEC_READONLY | SEC_LINKER_CREATED)
5344
5345 /* Create a fake section for use by the ARM backend of the linker. */
5346
5347 static bfd_boolean
5348 arm_make_glue_section (bfd * abfd, const char * name)
5349 {
5350 asection * sec;
5351
5352 sec = bfd_get_section_by_name (abfd, name);
5353 if (sec != NULL)
5354 /* Already made. */
5355 return TRUE;
5356
5357 sec = bfd_make_section_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
5358
5359 if (sec == NULL
5360 || !bfd_set_section_alignment (abfd, sec, 2))
5361 return FALSE;
5362
5363 /* Set the gc mark to prevent the section from being removed by garbage
5364 collection, despite the fact that no relocs refer to this section. */
5365 sec->gc_mark = 1;
5366
5367 return TRUE;
5368 }
5369
5370 /* Add the glue sections to ABFD. This function is called from the
5371 linker scripts in ld/emultempl/{armelf}.em. */
5372
5373 bfd_boolean
5374 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
5375 struct bfd_link_info *info)
5376 {
5377 /* If we are only performing a partial
5378 link do not bother adding the glue. */
5379 if (info->relocatable)
5380 return TRUE;
5381
5382 return arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
5383 && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
5384 && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
5385 && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
5386 }
5387
5388 /* Select a BFD to be used to hold the sections used by the glue code.
5389 This function is called from the linker scripts in ld/emultempl/
5390 {armelf/pe}.em. */
5391
5392 bfd_boolean
5393 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
5394 {
5395 struct elf32_arm_link_hash_table *globals;
5396
5397 /* If we are only performing a partial link
5398 do not bother getting a bfd to hold the glue. */
5399 if (info->relocatable)
5400 return TRUE;
5401
5402 /* Make sure we don't attach the glue sections to a dynamic object. */
5403 BFD_ASSERT (!(abfd->flags & DYNAMIC));
5404
5405 globals = elf32_arm_hash_table (info);
5406 BFD_ASSERT (globals != NULL);
5407
5408 if (globals->bfd_of_glue_owner != NULL)
5409 return TRUE;
5410
5411 /* Save the bfd for later use. */
5412 globals->bfd_of_glue_owner = abfd;
5413
5414 return TRUE;
5415 }
5416
5417 static void
5418 check_use_blx (struct elf32_arm_link_hash_table *globals)
5419 {
5420 if (bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
5421 Tag_CPU_arch) > 2)
5422 globals->use_blx = 1;
5423 }
5424
5425 bfd_boolean
5426 bfd_elf32_arm_process_before_allocation (bfd *abfd,
5427 struct bfd_link_info *link_info)
5428 {
5429 Elf_Internal_Shdr *symtab_hdr;
5430 Elf_Internal_Rela *internal_relocs = NULL;
5431 Elf_Internal_Rela *irel, *irelend;
5432 bfd_byte *contents = NULL;
5433
5434 asection *sec;
5435 struct elf32_arm_link_hash_table *globals;
5436
5437 /* If we are only performing a partial link do not bother
5438 to construct any glue. */
5439 if (link_info->relocatable)
5440 return TRUE;
5441
5442 /* Here we have a bfd that is to be included on the link. We have a
5443 hook to do reloc rummaging, before section sizes are nailed down. */
5444 globals = elf32_arm_hash_table (link_info);
5445 BFD_ASSERT (globals != NULL);
5446
5447 check_use_blx (globals);
5448
5449 if (globals->byteswap_code && !bfd_big_endian (abfd))
5450 {
5451 _bfd_error_handler (_("%B: BE8 images only valid in big-endian mode."),
5452 abfd);
5453 return FALSE;
5454 }
5455
5456 /* PR 5398: If we have not decided to include any loadable sections in
5457 the output then we will not have a glue owner bfd. This is OK, it
5458 just means that there is nothing else for us to do here. */
5459 if (globals->bfd_of_glue_owner == NULL)
5460 return TRUE;
5461
5462 /* Rummage around all the relocs and map the glue vectors. */
5463 sec = abfd->sections;
5464
5465 if (sec == NULL)
5466 return TRUE;
5467
5468 for (; sec != NULL; sec = sec->next)
5469 {
5470 if (sec->reloc_count == 0)
5471 continue;
5472
5473 if ((sec->flags & SEC_EXCLUDE) != 0)
5474 continue;
5475
5476 symtab_hdr = & elf_symtab_hdr (abfd);
5477
5478 /* Load the relocs. */
5479 internal_relocs
5480 = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, FALSE);
5481
5482 if (internal_relocs == NULL)
5483 goto error_return;
5484
5485 irelend = internal_relocs + sec->reloc_count;
5486 for (irel = internal_relocs; irel < irelend; irel++)
5487 {
5488 long r_type;
5489 unsigned long r_index;
5490
5491 struct elf_link_hash_entry *h;
5492
5493 r_type = ELF32_R_TYPE (irel->r_info);
5494 r_index = ELF32_R_SYM (irel->r_info);
5495
5496 /* These are the only relocation types we care about. */
5497 if ( r_type != R_ARM_PC24
5498 && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
5499 continue;
5500
5501 /* Get the section contents if we haven't done so already. */
5502 if (contents == NULL)
5503 {
5504 /* Get cached copy if it exists. */
5505 if (elf_section_data (sec)->this_hdr.contents != NULL)
5506 contents = elf_section_data (sec)->this_hdr.contents;
5507 else
5508 {
5509 /* Go get them off disk. */
5510 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
5511 goto error_return;
5512 }
5513 }
5514
5515 if (r_type == R_ARM_V4BX)
5516 {
5517 int reg;
5518
5519 reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
5520 record_arm_bx_glue (link_info, reg);
5521 continue;
5522 }
5523
5524 /* If the relocation is not against a symbol it cannot concern us. */
5525 h = NULL;
5526
5527 /* We don't care about local symbols. */
5528 if (r_index < symtab_hdr->sh_info)
5529 continue;
5530
5531 /* This is an external symbol. */
5532 r_index -= symtab_hdr->sh_info;
5533 h = (struct elf_link_hash_entry *)
5534 elf_sym_hashes (abfd)[r_index];
5535
5536 /* If the relocation is against a static symbol it must be within
5537 the current section and so cannot be a cross ARM/Thumb relocation. */
5538 if (h == NULL)
5539 continue;
5540
5541 /* If the call will go through a PLT entry then we do not need
5542 glue. */
5543 if (globals->splt != NULL && h->plt.offset != (bfd_vma) -1)
5544 continue;
5545
5546 switch (r_type)
5547 {
5548 case R_ARM_PC24:
5549 /* This one is a call from arm code. We need to look up
5550 the target of the call. If it is a thumb target, we
5551 insert glue. */
5552 if (ELF_ST_TYPE (h->type) == STT_ARM_TFUNC)
5553 record_arm_to_thumb_glue (link_info, h);
5554 break;
5555
5556 default:
5557 abort ();
5558 }
5559 }
5560
5561 if (contents != NULL
5562 && elf_section_data (sec)->this_hdr.contents != contents)
5563 free (contents);
5564 contents = NULL;
5565
5566 if (internal_relocs != NULL
5567 && elf_section_data (sec)->relocs != internal_relocs)
5568 free (internal_relocs);
5569 internal_relocs = NULL;
5570 }
5571
5572 return TRUE;
5573
5574 error_return:
5575 if (contents != NULL
5576 && elf_section_data (sec)->this_hdr.contents != contents)
5577 free (contents);
5578 if (internal_relocs != NULL
5579 && elf_section_data (sec)->relocs != internal_relocs)
5580 free (internal_relocs);
5581
5582 return FALSE;
5583 }
5584 #endif
5585
5586
5587 /* Initialise maps of ARM/Thumb/data for input BFDs. */
5588
5589 void
5590 bfd_elf32_arm_init_maps (bfd *abfd)
5591 {
5592 Elf_Internal_Sym *isymbuf;
5593 Elf_Internal_Shdr *hdr;
5594 unsigned int i, localsyms;
5595
5596 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
5597 if (! is_arm_elf (abfd))
5598 return;
5599
5600 if ((abfd->flags & DYNAMIC) != 0)
5601 return;
5602
5603 hdr = & elf_symtab_hdr (abfd);
5604 localsyms = hdr->sh_info;
5605
5606 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
5607 should contain the number of local symbols, which should come before any
5608 global symbols. Mapping symbols are always local. */
5609 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
5610 NULL);
5611
5612 /* No internal symbols read? Skip this BFD. */
5613 if (isymbuf == NULL)
5614 return;
5615
5616 for (i = 0; i < localsyms; i++)
5617 {
5618 Elf_Internal_Sym *isym = &isymbuf[i];
5619 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
5620 const char *name;
5621
5622 if (sec != NULL
5623 && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
5624 {
5625 name = bfd_elf_string_from_elf_section (abfd,
5626 hdr->sh_link, isym->st_name);
5627
5628 if (bfd_is_arm_special_symbol_name (name,
5629 BFD_ARM_SPECIAL_SYM_TYPE_MAP))
5630 elf32_arm_section_map_add (sec, name[1], isym->st_value);
5631 }
5632 }
5633 }
5634
5635
5636 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
5637 say what they wanted. */
5638
5639 void
5640 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
5641 {
5642 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5643 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
5644
5645 if (globals == NULL)
5646 return;
5647
5648 if (globals->fix_cortex_a8 == -1)
5649 {
5650 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
5651 if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
5652 && (out_attr[Tag_CPU_arch_profile].i == 'A'
5653 || out_attr[Tag_CPU_arch_profile].i == 0))
5654 globals->fix_cortex_a8 = 1;
5655 else
5656 globals->fix_cortex_a8 = 0;
5657 }
5658 }
5659
5660
5661 void
5662 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
5663 {
5664 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5665 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
5666
5667 if (globals == NULL)
5668 return;
5669 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
5670 if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
5671 {
5672 switch (globals->vfp11_fix)
5673 {
5674 case BFD_ARM_VFP11_FIX_DEFAULT:
5675 case BFD_ARM_VFP11_FIX_NONE:
5676 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
5677 break;
5678
5679 default:
5680 /* Give a warning, but do as the user requests anyway. */
5681 (*_bfd_error_handler) (_("%B: warning: selected VFP11 erratum "
5682 "workaround is not necessary for target architecture"), obfd);
5683 }
5684 }
5685 else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
5686 /* For earlier architectures, we might need the workaround, but do not
5687 enable it by default. If users is running with broken hardware, they
5688 must enable the erratum fix explicitly. */
5689 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
5690 }
5691
5692
5693 enum bfd_arm_vfp11_pipe
5694 {
5695 VFP11_FMAC,
5696 VFP11_LS,
5697 VFP11_DS,
5698 VFP11_BAD
5699 };
5700
5701 /* Return a VFP register number. This is encoded as RX:X for single-precision
5702 registers, or X:RX for double-precision registers, where RX is the group of
5703 four bits in the instruction encoding and X is the single extension bit.
5704 RX and X fields are specified using their lowest (starting) bit. The return
5705 value is:
5706
5707 0...31: single-precision registers s0...s31
5708 32...63: double-precision registers d0...d31.
5709
5710 Although X should be zero for VFP11 (encoding d0...d15 only), we might
5711 encounter VFP3 instructions, so we allow the full range for DP registers. */
5712
5713 static unsigned int
5714 bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx,
5715 unsigned int x)
5716 {
5717 if (is_double)
5718 return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
5719 else
5720 return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
5721 }
5722
5723 /* Set bits in *WMASK according to a register number REG as encoded by
5724 bfd_arm_vfp11_regno(). Ignore d16-d31. */
5725
5726 static void
5727 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
5728 {
5729 if (reg < 32)
5730 *wmask |= 1 << reg;
5731 else if (reg < 48)
5732 *wmask |= 3 << ((reg - 32) * 2);
5733 }
5734
5735 /* Return TRUE if WMASK overwrites anything in REGS. */
5736
5737 static bfd_boolean
5738 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
5739 {
5740 int i;
5741
5742 for (i = 0; i < numregs; i++)
5743 {
5744 unsigned int reg = regs[i];
5745
5746 if (reg < 32 && (wmask & (1 << reg)) != 0)
5747 return TRUE;
5748
5749 reg -= 32;
5750
5751 if (reg >= 16)
5752 continue;
5753
5754 if ((wmask & (3 << (reg * 2))) != 0)
5755 return TRUE;
5756 }
5757
5758 return FALSE;
5759 }
5760
5761 /* In this function, we're interested in two things: finding input registers
5762 for VFP data-processing instructions, and finding the set of registers which
5763 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
5764 hold the written set, so FLDM etc. are easy to deal with (we're only
5765 interested in 32 SP registers or 16 dp registers, due to the VFP version
5766 implemented by the chip in question). DP registers are marked by setting
5767 both SP registers in the write mask). */
5768
5769 static enum bfd_arm_vfp11_pipe
5770 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
5771 int *numregs)
5772 {
5773 enum bfd_arm_vfp11_pipe vpipe = VFP11_BAD;
5774 bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
5775
5776 if ((insn & 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
5777 {
5778 unsigned int pqrs;
5779 unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
5780 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
5781
5782 pqrs = ((insn & 0x00800000) >> 20)
5783 | ((insn & 0x00300000) >> 19)
5784 | ((insn & 0x00000040) >> 6);
5785
5786 switch (pqrs)
5787 {
5788 case 0: /* fmac[sd]. */
5789 case 1: /* fnmac[sd]. */
5790 case 2: /* fmsc[sd]. */
5791 case 3: /* fnmsc[sd]. */
5792 vpipe = VFP11_FMAC;
5793 bfd_arm_vfp11_write_mask (destmask, fd);
5794 regs[0] = fd;
5795 regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
5796 regs[2] = fm;
5797 *numregs = 3;
5798 break;
5799
5800 case 4: /* fmul[sd]. */
5801 case 5: /* fnmul[sd]. */
5802 case 6: /* fadd[sd]. */
5803 case 7: /* fsub[sd]. */
5804 vpipe = VFP11_FMAC;
5805 goto vfp_binop;
5806
5807 case 8: /* fdiv[sd]. */
5808 vpipe = VFP11_DS;
5809 vfp_binop:
5810 bfd_arm_vfp11_write_mask (destmask, fd);
5811 regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
5812 regs[1] = fm;
5813 *numregs = 2;
5814 break;
5815
5816 case 15: /* extended opcode. */
5817 {
5818 unsigned int extn = ((insn >> 15) & 0x1e)
5819 | ((insn >> 7) & 1);
5820
5821 switch (extn)
5822 {
5823 case 0: /* fcpy[sd]. */
5824 case 1: /* fabs[sd]. */
5825 case 2: /* fneg[sd]. */
5826 case 8: /* fcmp[sd]. */
5827 case 9: /* fcmpe[sd]. */
5828 case 10: /* fcmpz[sd]. */
5829 case 11: /* fcmpez[sd]. */
5830 case 16: /* fuito[sd]. */
5831 case 17: /* fsito[sd]. */
5832 case 24: /* ftoui[sd]. */
5833 case 25: /* ftouiz[sd]. */
5834 case 26: /* ftosi[sd]. */
5835 case 27: /* ftosiz[sd]. */
5836 /* These instructions will not bounce due to underflow. */
5837 *numregs = 0;
5838 vpipe = VFP11_FMAC;
5839 break;
5840
5841 case 3: /* fsqrt[sd]. */
5842 /* fsqrt cannot underflow, but it can (perhaps) overwrite
5843 registers to cause the erratum in previous instructions. */
5844 bfd_arm_vfp11_write_mask (destmask, fd);
5845 vpipe = VFP11_DS;
5846 break;
5847
5848 case 15: /* fcvt{ds,sd}. */
5849 {
5850 int rnum = 0;
5851
5852 bfd_arm_vfp11_write_mask (destmask, fd);
5853
5854 /* Only FCVTSD can underflow. */
5855 if ((insn & 0x100) != 0)
5856 regs[rnum++] = fm;
5857
5858 *numregs = rnum;
5859
5860 vpipe = VFP11_FMAC;
5861 }
5862 break;
5863
5864 default:
5865 return VFP11_BAD;
5866 }
5867 }
5868 break;
5869
5870 default:
5871 return VFP11_BAD;
5872 }
5873 }
5874 /* Two-register transfer. */
5875 else if ((insn & 0x0fe00ed0) == 0x0c400a10)
5876 {
5877 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
5878
5879 if ((insn & 0x100000) == 0)
5880 {
5881 if (is_double)
5882 bfd_arm_vfp11_write_mask (destmask, fm);
5883 else
5884 {
5885 bfd_arm_vfp11_write_mask (destmask, fm);
5886 bfd_arm_vfp11_write_mask (destmask, fm + 1);
5887 }
5888 }
5889
5890 vpipe = VFP11_LS;
5891 }
5892 else if ((insn & 0x0e100e00) == 0x0c100a00) /* A load insn. */
5893 {
5894 int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
5895 unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
5896
5897 switch (puw)
5898 {
5899 case 0: /* Two-reg transfer. We should catch these above. */
5900 abort ();
5901
5902 case 2: /* fldm[sdx]. */
5903 case 3:
5904 case 5:
5905 {
5906 unsigned int i, offset = insn & 0xff;
5907
5908 if (is_double)
5909 offset >>= 1;
5910
5911 for (i = fd; i < fd + offset; i++)
5912 bfd_arm_vfp11_write_mask (destmask, i);
5913 }
5914 break;
5915
5916 case 4: /* fld[sd]. */
5917 case 6:
5918 bfd_arm_vfp11_write_mask (destmask, fd);
5919 break;
5920
5921 default:
5922 return VFP11_BAD;
5923 }
5924
5925 vpipe = VFP11_LS;
5926 }
5927 /* Single-register transfer. Note L==0. */
5928 else if ((insn & 0x0f100e10) == 0x0e000a10)
5929 {
5930 unsigned int opcode = (insn >> 21) & 7;
5931 unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
5932
5933 switch (opcode)
5934 {
5935 case 0: /* fmsr/fmdlr. */
5936 case 1: /* fmdhr. */
5937 /* Mark fmdhr and fmdlr as writing to the whole of the DP
5938 destination register. I don't know if this is exactly right,
5939 but it is the conservative choice. */
5940 bfd_arm_vfp11_write_mask (destmask, fn);
5941 break;
5942
5943 case 7: /* fmxr. */
5944 break;
5945 }
5946
5947 vpipe = VFP11_LS;
5948 }
5949
5950 return vpipe;
5951 }
5952
5953
5954 static int elf32_arm_compare_mapping (const void * a, const void * b);
5955
5956
5957 /* Look for potentially-troublesome code sequences which might trigger the
5958 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
5959 (available from ARM) for details of the erratum. A short version is
5960 described in ld.texinfo. */
5961
5962 bfd_boolean
5963 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
5964 {
5965 asection *sec;
5966 bfd_byte *contents = NULL;
5967 int state = 0;
5968 int regs[3], numregs = 0;
5969 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5970 int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
5971
5972 if (globals == NULL)
5973 return FALSE;
5974
5975 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
5976 The states transition as follows:
5977
5978 0 -> 1 (vector) or 0 -> 2 (scalar)
5979 A VFP FMAC-pipeline instruction has been seen. Fill
5980 regs[0]..regs[numregs-1] with its input operands. Remember this
5981 instruction in 'first_fmac'.
5982
5983 1 -> 2
5984 Any instruction, except for a VFP instruction which overwrites
5985 regs[*].
5986
5987 1 -> 3 [ -> 0 ] or
5988 2 -> 3 [ -> 0 ]
5989 A VFP instruction has been seen which overwrites any of regs[*].
5990 We must make a veneer! Reset state to 0 before examining next
5991 instruction.
5992
5993 2 -> 0
5994 If we fail to match anything in state 2, reset to state 0 and reset
5995 the instruction pointer to the instruction after 'first_fmac'.
5996
5997 If the VFP11 vector mode is in use, there must be at least two unrelated
5998 instructions between anti-dependent VFP11 instructions to properly avoid
5999 triggering the erratum, hence the use of the extra state 1. */
6000
6001 /* If we are only performing a partial link do not bother
6002 to construct any glue. */
6003 if (link_info->relocatable)
6004 return TRUE;
6005
6006 /* Skip if this bfd does not correspond to an ELF image. */
6007 if (! is_arm_elf (abfd))
6008 return TRUE;
6009
6010 /* We should have chosen a fix type by the time we get here. */
6011 BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
6012
6013 if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
6014 return TRUE;
6015
6016 /* Skip this BFD if it corresponds to an executable or dynamic object. */
6017 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
6018 return TRUE;
6019
6020 for (sec = abfd->sections; sec != NULL; sec = sec->next)
6021 {
6022 unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
6023 struct _arm_elf_section_data *sec_data;
6024
6025 /* If we don't have executable progbits, we're not interested in this
6026 section. Also skip if section is to be excluded. */
6027 if (elf_section_type (sec) != SHT_PROGBITS
6028 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
6029 || (sec->flags & SEC_EXCLUDE) != 0
6030 || sec->sec_info_type == ELF_INFO_TYPE_JUST_SYMS
6031 || sec->output_section == bfd_abs_section_ptr
6032 || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
6033 continue;
6034
6035 sec_data = elf32_arm_section_data (sec);
6036
6037 if (sec_data->mapcount == 0)
6038 continue;
6039
6040 if (elf_section_data (sec)->this_hdr.contents != NULL)
6041 contents = elf_section_data (sec)->this_hdr.contents;
6042 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
6043 goto error_return;
6044
6045 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
6046 elf32_arm_compare_mapping);
6047
6048 for (span = 0; span < sec_data->mapcount; span++)
6049 {
6050 unsigned int span_start = sec_data->map[span].vma;
6051 unsigned int span_end = (span == sec_data->mapcount - 1)
6052 ? sec->size : sec_data->map[span + 1].vma;
6053 char span_type = sec_data->map[span].type;
6054
6055 /* FIXME: Only ARM mode is supported at present. We may need to
6056 support Thumb-2 mode also at some point. */
6057 if (span_type != 'a')
6058 continue;
6059
6060 for (i = span_start; i < span_end;)
6061 {
6062 unsigned int next_i = i + 4;
6063 unsigned int insn = bfd_big_endian (abfd)
6064 ? (contents[i] << 24)
6065 | (contents[i + 1] << 16)
6066 | (contents[i + 2] << 8)
6067 | contents[i + 3]
6068 : (contents[i + 3] << 24)
6069 | (contents[i + 2] << 16)
6070 | (contents[i + 1] << 8)
6071 | contents[i];
6072 unsigned int writemask = 0;
6073 enum bfd_arm_vfp11_pipe vpipe;
6074
6075 switch (state)
6076 {
6077 case 0:
6078 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
6079 &numregs);
6080 /* I'm assuming the VFP11 erratum can trigger with denorm
6081 operands on either the FMAC or the DS pipeline. This might
6082 lead to slightly overenthusiastic veneer insertion. */
6083 if (vpipe == VFP11_FMAC || vpipe == VFP11_DS)
6084 {
6085 state = use_vector ? 1 : 2;
6086 first_fmac = i;
6087 veneer_of_insn = insn;
6088 }
6089 break;
6090
6091 case 1:
6092 {
6093 int other_regs[3], other_numregs;
6094 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
6095 other_regs,
6096 &other_numregs);
6097 if (vpipe != VFP11_BAD
6098 && bfd_arm_vfp11_antidependency (writemask, regs,
6099 numregs))
6100 state = 3;
6101 else
6102 state = 2;
6103 }
6104 break;
6105
6106 case 2:
6107 {
6108 int other_regs[3], other_numregs;
6109 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
6110 other_regs,
6111 &other_numregs);
6112 if (vpipe != VFP11_BAD
6113 && bfd_arm_vfp11_antidependency (writemask, regs,
6114 numregs))
6115 state = 3;
6116 else
6117 {
6118 state = 0;
6119 next_i = first_fmac + 4;
6120 }
6121 }
6122 break;
6123
6124 case 3:
6125 abort (); /* Should be unreachable. */
6126 }
6127
6128 if (state == 3)
6129 {
6130 elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *)
6131 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
6132 int errcount;
6133
6134 errcount = ++(elf32_arm_section_data (sec)->erratumcount);
6135
6136 newerr->u.b.vfp_insn = veneer_of_insn;
6137
6138 switch (span_type)
6139 {
6140 case 'a':
6141 newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
6142 break;
6143
6144 default:
6145 abort ();
6146 }
6147
6148 record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
6149 first_fmac);
6150
6151 newerr->vma = -1;
6152
6153 newerr->next = sec_data->erratumlist;
6154 sec_data->erratumlist = newerr;
6155
6156 state = 0;
6157 }
6158
6159 i = next_i;
6160 }
6161 }
6162
6163 if (contents != NULL
6164 && elf_section_data (sec)->this_hdr.contents != contents)
6165 free (contents);
6166 contents = NULL;
6167 }
6168
6169 return TRUE;
6170
6171 error_return:
6172 if (contents != NULL
6173 && elf_section_data (sec)->this_hdr.contents != contents)
6174 free (contents);
6175
6176 return FALSE;
6177 }
6178
6179 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
6180 after sections have been laid out, using specially-named symbols. */
6181
6182 void
6183 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
6184 struct bfd_link_info *link_info)
6185 {
6186 asection *sec;
6187 struct elf32_arm_link_hash_table *globals;
6188 char *tmp_name;
6189
6190 if (link_info->relocatable)
6191 return;
6192
6193 /* Skip if this bfd does not correspond to an ELF image. */
6194 if (! is_arm_elf (abfd))
6195 return;
6196
6197 globals = elf32_arm_hash_table (link_info);
6198 if (globals == NULL)
6199 return;
6200
6201 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
6202 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
6203
6204 for (sec = abfd->sections; sec != NULL; sec = sec->next)
6205 {
6206 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
6207 elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
6208
6209 for (; errnode != NULL; errnode = errnode->next)
6210 {
6211 struct elf_link_hash_entry *myh;
6212 bfd_vma vma;
6213
6214 switch (errnode->type)
6215 {
6216 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
6217 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
6218 /* Find veneer symbol. */
6219 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
6220 errnode->u.b.veneer->u.v.id);
6221
6222 myh = elf_link_hash_lookup
6223 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6224
6225 if (myh == NULL)
6226 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6227 "`%s'"), abfd, tmp_name);
6228
6229 vma = myh->root.u.def.section->output_section->vma
6230 + myh->root.u.def.section->output_offset
6231 + myh->root.u.def.value;
6232
6233 errnode->u.b.veneer->vma = vma;
6234 break;
6235
6236 case VFP11_ERRATUM_ARM_VENEER:
6237 case VFP11_ERRATUM_THUMB_VENEER:
6238 /* Find return location. */
6239 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
6240 errnode->u.v.id);
6241
6242 myh = elf_link_hash_lookup
6243 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6244
6245 if (myh == NULL)
6246 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6247 "`%s'"), abfd, tmp_name);
6248
6249 vma = myh->root.u.def.section->output_section->vma
6250 + myh->root.u.def.section->output_offset
6251 + myh->root.u.def.value;
6252
6253 errnode->u.v.branch->vma = vma;
6254 break;
6255
6256 default:
6257 abort ();
6258 }
6259 }
6260 }
6261
6262 free (tmp_name);
6263 }
6264
6265
6266 /* Set target relocation values needed during linking. */
6267
6268 void
6269 bfd_elf32_arm_set_target_relocs (struct bfd *output_bfd,
6270 struct bfd_link_info *link_info,
6271 int target1_is_rel,
6272 char * target2_type,
6273 int fix_v4bx,
6274 int use_blx,
6275 bfd_arm_vfp11_fix vfp11_fix,
6276 int no_enum_warn, int no_wchar_warn,
6277 int pic_veneer, int fix_cortex_a8)
6278 {
6279 struct elf32_arm_link_hash_table *globals;
6280
6281 globals = elf32_arm_hash_table (link_info);
6282 if (globals == NULL)
6283 return;
6284
6285 globals->target1_is_rel = target1_is_rel;
6286 if (strcmp (target2_type, "rel") == 0)
6287 globals->target2_reloc = R_ARM_REL32;
6288 else if (strcmp (target2_type, "abs") == 0)
6289 globals->target2_reloc = R_ARM_ABS32;
6290 else if (strcmp (target2_type, "got-rel") == 0)
6291 globals->target2_reloc = R_ARM_GOT_PREL;
6292 else
6293 {
6294 _bfd_error_handler (_("Invalid TARGET2 relocation type '%s'."),
6295 target2_type);
6296 }
6297 globals->fix_v4bx = fix_v4bx;
6298 globals->use_blx |= use_blx;
6299 globals->vfp11_fix = vfp11_fix;
6300 globals->pic_veneer = pic_veneer;
6301 globals->fix_cortex_a8 = fix_cortex_a8;
6302
6303 BFD_ASSERT (is_arm_elf (output_bfd));
6304 elf_arm_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
6305 elf_arm_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
6306 }
6307
6308 /* Replace the target offset of a Thumb bl or b.w instruction. */
6309
6310 static void
6311 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
6312 {
6313 bfd_vma upper;
6314 bfd_vma lower;
6315 int reloc_sign;
6316
6317 BFD_ASSERT ((offset & 1) == 0);
6318
6319 upper = bfd_get_16 (abfd, insn);
6320 lower = bfd_get_16 (abfd, insn + 2);
6321 reloc_sign = (offset < 0) ? 1 : 0;
6322 upper = (upper & ~(bfd_vma) 0x7ff)
6323 | ((offset >> 12) & 0x3ff)
6324 | (reloc_sign << 10);
6325 lower = (lower & ~(bfd_vma) 0x2fff)
6326 | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
6327 | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
6328 | ((offset >> 1) & 0x7ff);
6329 bfd_put_16 (abfd, upper, insn);
6330 bfd_put_16 (abfd, lower, insn + 2);
6331 }
6332
6333 /* Thumb code calling an ARM function. */
6334
6335 static int
6336 elf32_thumb_to_arm_stub (struct bfd_link_info * info,
6337 const char * name,
6338 bfd * input_bfd,
6339 bfd * output_bfd,
6340 asection * input_section,
6341 bfd_byte * hit_data,
6342 asection * sym_sec,
6343 bfd_vma offset,
6344 bfd_signed_vma addend,
6345 bfd_vma val,
6346 char **error_message)
6347 {
6348 asection * s = 0;
6349 bfd_vma my_offset;
6350 long int ret_offset;
6351 struct elf_link_hash_entry * myh;
6352 struct elf32_arm_link_hash_table * globals;
6353
6354 myh = find_thumb_glue (info, name, error_message);
6355 if (myh == NULL)
6356 return FALSE;
6357
6358 globals = elf32_arm_hash_table (info);
6359 BFD_ASSERT (globals != NULL);
6360 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6361
6362 my_offset = myh->root.u.def.value;
6363
6364 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6365 THUMB2ARM_GLUE_SECTION_NAME);
6366
6367 BFD_ASSERT (s != NULL);
6368 BFD_ASSERT (s->contents != NULL);
6369 BFD_ASSERT (s->output_section != NULL);
6370
6371 if ((my_offset & 0x01) == 0x01)
6372 {
6373 if (sym_sec != NULL
6374 && sym_sec->owner != NULL
6375 && !INTERWORK_FLAG (sym_sec->owner))
6376 {
6377 (*_bfd_error_handler)
6378 (_("%B(%s): warning: interworking not enabled.\n"
6379 " first occurrence: %B: thumb call to arm"),
6380 sym_sec->owner, input_bfd, name);
6381
6382 return FALSE;
6383 }
6384
6385 --my_offset;
6386 myh->root.u.def.value = my_offset;
6387
6388 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
6389 s->contents + my_offset);
6390
6391 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
6392 s->contents + my_offset + 2);
6393
6394 ret_offset =
6395 /* Address of destination of the stub. */
6396 ((bfd_signed_vma) val)
6397 - ((bfd_signed_vma)
6398 /* Offset from the start of the current section
6399 to the start of the stubs. */
6400 (s->output_offset
6401 /* Offset of the start of this stub from the start of the stubs. */
6402 + my_offset
6403 /* Address of the start of the current section. */
6404 + s->output_section->vma)
6405 /* The branch instruction is 4 bytes into the stub. */
6406 + 4
6407 /* ARM branches work from the pc of the instruction + 8. */
6408 + 8);
6409
6410 put_arm_insn (globals, output_bfd,
6411 (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
6412 s->contents + my_offset + 4);
6413 }
6414
6415 BFD_ASSERT (my_offset <= globals->thumb_glue_size);
6416
6417 /* Now go back and fix up the original BL insn to point to here. */
6418 ret_offset =
6419 /* Address of where the stub is located. */
6420 (s->output_section->vma + s->output_offset + my_offset)
6421 /* Address of where the BL is located. */
6422 - (input_section->output_section->vma + input_section->output_offset
6423 + offset)
6424 /* Addend in the relocation. */
6425 - addend
6426 /* Biassing for PC-relative addressing. */
6427 - 8;
6428
6429 insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
6430
6431 return TRUE;
6432 }
6433
6434 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
6435
6436 static struct elf_link_hash_entry *
6437 elf32_arm_create_thumb_stub (struct bfd_link_info * info,
6438 const char * name,
6439 bfd * input_bfd,
6440 bfd * output_bfd,
6441 asection * sym_sec,
6442 bfd_vma val,
6443 asection * s,
6444 char ** error_message)
6445 {
6446 bfd_vma my_offset;
6447 long int ret_offset;
6448 struct elf_link_hash_entry * myh;
6449 struct elf32_arm_link_hash_table * globals;
6450
6451 myh = find_arm_glue (info, name, error_message);
6452 if (myh == NULL)
6453 return NULL;
6454
6455 globals = elf32_arm_hash_table (info);
6456 BFD_ASSERT (globals != NULL);
6457 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6458
6459 my_offset = myh->root.u.def.value;
6460
6461 if ((my_offset & 0x01) == 0x01)
6462 {
6463 if (sym_sec != NULL
6464 && sym_sec->owner != NULL
6465 && !INTERWORK_FLAG (sym_sec->owner))
6466 {
6467 (*_bfd_error_handler)
6468 (_("%B(%s): warning: interworking not enabled.\n"
6469 " first occurrence: %B: arm call to thumb"),
6470 sym_sec->owner, input_bfd, name);
6471 }
6472
6473 --my_offset;
6474 myh->root.u.def.value = my_offset;
6475
6476 if (info->shared || globals->root.is_relocatable_executable
6477 || globals->pic_veneer)
6478 {
6479 /* For relocatable objects we can't use absolute addresses,
6480 so construct the address from a relative offset. */
6481 /* TODO: If the offset is small it's probably worth
6482 constructing the address with adds. */
6483 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
6484 s->contents + my_offset);
6485 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
6486 s->contents + my_offset + 4);
6487 put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
6488 s->contents + my_offset + 8);
6489 /* Adjust the offset by 4 for the position of the add,
6490 and 8 for the pipeline offset. */
6491 ret_offset = (val - (s->output_offset
6492 + s->output_section->vma
6493 + my_offset + 12))
6494 | 1;
6495 bfd_put_32 (output_bfd, ret_offset,
6496 s->contents + my_offset + 12);
6497 }
6498 else if (globals->use_blx)
6499 {
6500 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
6501 s->contents + my_offset);
6502
6503 /* It's a thumb address. Add the low order bit. */
6504 bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
6505 s->contents + my_offset + 4);
6506 }
6507 else
6508 {
6509 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
6510 s->contents + my_offset);
6511
6512 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
6513 s->contents + my_offset + 4);
6514
6515 /* It's a thumb address. Add the low order bit. */
6516 bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
6517 s->contents + my_offset + 8);
6518
6519 my_offset += 12;
6520 }
6521 }
6522
6523 BFD_ASSERT (my_offset <= globals->arm_glue_size);
6524
6525 return myh;
6526 }
6527
6528 /* Arm code calling a Thumb function. */
6529
6530 static int
6531 elf32_arm_to_thumb_stub (struct bfd_link_info * info,
6532 const char * name,
6533 bfd * input_bfd,
6534 bfd * output_bfd,
6535 asection * input_section,
6536 bfd_byte * hit_data,
6537 asection * sym_sec,
6538 bfd_vma offset,
6539 bfd_signed_vma addend,
6540 bfd_vma val,
6541 char **error_message)
6542 {
6543 unsigned long int tmp;
6544 bfd_vma my_offset;
6545 asection * s;
6546 long int ret_offset;
6547 struct elf_link_hash_entry * myh;
6548 struct elf32_arm_link_hash_table * globals;
6549
6550 globals = elf32_arm_hash_table (info);
6551 BFD_ASSERT (globals != NULL);
6552 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6553
6554 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6555 ARM2THUMB_GLUE_SECTION_NAME);
6556 BFD_ASSERT (s != NULL);
6557 BFD_ASSERT (s->contents != NULL);
6558 BFD_ASSERT (s->output_section != NULL);
6559
6560 myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
6561 sym_sec, val, s, error_message);
6562 if (!myh)
6563 return FALSE;
6564
6565 my_offset = myh->root.u.def.value;
6566 tmp = bfd_get_32 (input_bfd, hit_data);
6567 tmp = tmp & 0xFF000000;
6568
6569 /* Somehow these are both 4 too far, so subtract 8. */
6570 ret_offset = (s->output_offset
6571 + my_offset
6572 + s->output_section->vma
6573 - (input_section->output_offset
6574 + input_section->output_section->vma
6575 + offset + addend)
6576 - 8);
6577
6578 tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
6579
6580 bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
6581
6582 return TRUE;
6583 }
6584
6585 /* Populate Arm stub for an exported Thumb function. */
6586
6587 static bfd_boolean
6588 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
6589 {
6590 struct bfd_link_info * info = (struct bfd_link_info *) inf;
6591 asection * s;
6592 struct elf_link_hash_entry * myh;
6593 struct elf32_arm_link_hash_entry *eh;
6594 struct elf32_arm_link_hash_table * globals;
6595 asection *sec;
6596 bfd_vma val;
6597 char *error_message;
6598
6599 eh = elf32_arm_hash_entry (h);
6600 /* Allocate stubs for exported Thumb functions on v4t. */
6601 if (eh->export_glue == NULL)
6602 return TRUE;
6603
6604 globals = elf32_arm_hash_table (info);
6605 BFD_ASSERT (globals != NULL);
6606 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6607
6608 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6609 ARM2THUMB_GLUE_SECTION_NAME);
6610 BFD_ASSERT (s != NULL);
6611 BFD_ASSERT (s->contents != NULL);
6612 BFD_ASSERT (s->output_section != NULL);
6613
6614 sec = eh->export_glue->root.u.def.section;
6615
6616 BFD_ASSERT (sec->output_section != NULL);
6617
6618 val = eh->export_glue->root.u.def.value + sec->output_offset
6619 + sec->output_section->vma;
6620
6621 myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
6622 h->root.u.def.section->owner,
6623 globals->obfd, sec, val, s,
6624 &error_message);
6625 BFD_ASSERT (myh);
6626 return TRUE;
6627 }
6628
6629 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
6630
6631 static bfd_vma
6632 elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
6633 {
6634 bfd_byte *p;
6635 bfd_vma glue_addr;
6636 asection *s;
6637 struct elf32_arm_link_hash_table *globals;
6638
6639 globals = elf32_arm_hash_table (info);
6640 BFD_ASSERT (globals != NULL);
6641 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6642
6643 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6644 ARM_BX_GLUE_SECTION_NAME);
6645 BFD_ASSERT (s != NULL);
6646 BFD_ASSERT (s->contents != NULL);
6647 BFD_ASSERT (s->output_section != NULL);
6648
6649 BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
6650
6651 glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
6652
6653 if ((globals->bx_glue_offset[reg] & 1) == 0)
6654 {
6655 p = s->contents + glue_addr;
6656 bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
6657 bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
6658 bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
6659 globals->bx_glue_offset[reg] |= 1;
6660 }
6661
6662 return glue_addr + s->output_section->vma + s->output_offset;
6663 }
6664
6665 /* Generate Arm stubs for exported Thumb symbols. */
6666 static void
6667 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
6668 struct bfd_link_info *link_info)
6669 {
6670 struct elf32_arm_link_hash_table * globals;
6671
6672 if (link_info == NULL)
6673 /* Ignore this if we are not called by the ELF backend linker. */
6674 return;
6675
6676 globals = elf32_arm_hash_table (link_info);
6677 if (globals == NULL)
6678 return;
6679
6680 /* If blx is available then exported Thumb symbols are OK and there is
6681 nothing to do. */
6682 if (globals->use_blx)
6683 return;
6684
6685 elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
6686 link_info);
6687 }
6688
6689 /* Some relocations map to different relocations depending on the
6690 target. Return the real relocation. */
6691
6692 static int
6693 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
6694 int r_type)
6695 {
6696 switch (r_type)
6697 {
6698 case R_ARM_TARGET1:
6699 if (globals->target1_is_rel)
6700 return R_ARM_REL32;
6701 else
6702 return R_ARM_ABS32;
6703
6704 case R_ARM_TARGET2:
6705 return globals->target2_reloc;
6706
6707 default:
6708 return r_type;
6709 }
6710 }
6711
6712 /* Return the base VMA address which should be subtracted from real addresses
6713 when resolving @dtpoff relocation.
6714 This is PT_TLS segment p_vaddr. */
6715
6716 static bfd_vma
6717 dtpoff_base (struct bfd_link_info *info)
6718 {
6719 /* If tls_sec is NULL, we should have signalled an error already. */
6720 if (elf_hash_table (info)->tls_sec == NULL)
6721 return 0;
6722 return elf_hash_table (info)->tls_sec->vma;
6723 }
6724
6725 /* Return the relocation value for @tpoff relocation
6726 if STT_TLS virtual address is ADDRESS. */
6727
6728 static bfd_vma
6729 tpoff (struct bfd_link_info *info, bfd_vma address)
6730 {
6731 struct elf_link_hash_table *htab = elf_hash_table (info);
6732 bfd_vma base;
6733
6734 /* If tls_sec is NULL, we should have signalled an error already. */
6735 if (htab->tls_sec == NULL)
6736 return 0;
6737 base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
6738 return address - htab->tls_sec->vma + base;
6739 }
6740
6741 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
6742 VALUE is the relocation value. */
6743
6744 static bfd_reloc_status_type
6745 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
6746 {
6747 if (value > 0xfff)
6748 return bfd_reloc_overflow;
6749
6750 value |= bfd_get_32 (abfd, data) & 0xfffff000;
6751 bfd_put_32 (abfd, value, data);
6752 return bfd_reloc_ok;
6753 }
6754
6755 /* For a given value of n, calculate the value of G_n as required to
6756 deal with group relocations. We return it in the form of an
6757 encoded constant-and-rotation, together with the final residual. If n is
6758 specified as less than zero, then final_residual is filled with the
6759 input value and no further action is performed. */
6760
6761 static bfd_vma
6762 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
6763 {
6764 int current_n;
6765 bfd_vma g_n;
6766 bfd_vma encoded_g_n = 0;
6767 bfd_vma residual = value; /* Also known as Y_n. */
6768
6769 for (current_n = 0; current_n <= n; current_n++)
6770 {
6771 int shift;
6772
6773 /* Calculate which part of the value to mask. */
6774 if (residual == 0)
6775 shift = 0;
6776 else
6777 {
6778 int msb;
6779
6780 /* Determine the most significant bit in the residual and
6781 align the resulting value to a 2-bit boundary. */
6782 for (msb = 30; msb >= 0; msb -= 2)
6783 if (residual & (3 << msb))
6784 break;
6785
6786 /* The desired shift is now (msb - 6), or zero, whichever
6787 is the greater. */
6788 shift = msb - 6;
6789 if (shift < 0)
6790 shift = 0;
6791 }
6792
6793 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
6794 g_n = residual & (0xff << shift);
6795 encoded_g_n = (g_n >> shift)
6796 | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
6797
6798 /* Calculate the residual for the next time around. */
6799 residual &= ~g_n;
6800 }
6801
6802 *final_residual = residual;
6803
6804 return encoded_g_n;
6805 }
6806
6807 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
6808 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
6809
6810 static int
6811 identify_add_or_sub (bfd_vma insn)
6812 {
6813 int opcode = insn & 0x1e00000;
6814
6815 if (opcode == 1 << 23) /* ADD */
6816 return 1;
6817
6818 if (opcode == 1 << 22) /* SUB */
6819 return -1;
6820
6821 return 0;
6822 }
6823
6824 /* Perform a relocation as part of a final link. */
6825
6826 static bfd_reloc_status_type
6827 elf32_arm_final_link_relocate (reloc_howto_type * howto,
6828 bfd * input_bfd,
6829 bfd * output_bfd,
6830 asection * input_section,
6831 bfd_byte * contents,
6832 Elf_Internal_Rela * rel,
6833 bfd_vma value,
6834 struct bfd_link_info * info,
6835 asection * sym_sec,
6836 const char * sym_name,
6837 int sym_flags,
6838 struct elf_link_hash_entry * h,
6839 bfd_boolean * unresolved_reloc_p,
6840 char ** error_message)
6841 {
6842 unsigned long r_type = howto->type;
6843 unsigned long r_symndx;
6844 bfd_byte * hit_data = contents + rel->r_offset;
6845 bfd * dynobj = NULL;
6846 Elf_Internal_Shdr * symtab_hdr;
6847 struct elf_link_hash_entry ** sym_hashes;
6848 bfd_vma * local_got_offsets;
6849 asection * sgot = NULL;
6850 asection * splt = NULL;
6851 asection * sreloc = NULL;
6852 bfd_vma addend;
6853 bfd_signed_vma signed_addend;
6854 struct elf32_arm_link_hash_table * globals;
6855
6856 globals = elf32_arm_hash_table (info);
6857 if (globals == NULL)
6858 return bfd_reloc_notsupported;
6859
6860 BFD_ASSERT (is_arm_elf (input_bfd));
6861
6862 /* Some relocation types map to different relocations depending on the
6863 target. We pick the right one here. */
6864 r_type = arm_real_reloc_type (globals, r_type);
6865 if (r_type != howto->type)
6866 howto = elf32_arm_howto_from_type (r_type);
6867
6868 /* If the start address has been set, then set the EF_ARM_HASENTRY
6869 flag. Setting this more than once is redundant, but the cost is
6870 not too high, and it keeps the code simple.
6871
6872 The test is done here, rather than somewhere else, because the
6873 start address is only set just before the final link commences.
6874
6875 Note - if the user deliberately sets a start address of 0, the
6876 flag will not be set. */
6877 if (bfd_get_start_address (output_bfd) != 0)
6878 elf_elfheader (output_bfd)->e_flags |= EF_ARM_HASENTRY;
6879
6880 dynobj = elf_hash_table (info)->dynobj;
6881 if (dynobj)
6882 {
6883 sgot = bfd_get_section_by_name (dynobj, ".got");
6884 splt = bfd_get_section_by_name (dynobj, ".plt");
6885 }
6886 symtab_hdr = & elf_symtab_hdr (input_bfd);
6887 sym_hashes = elf_sym_hashes (input_bfd);
6888 local_got_offsets = elf_local_got_offsets (input_bfd);
6889 r_symndx = ELF32_R_SYM (rel->r_info);
6890
6891 if (globals->use_rel)
6892 {
6893 addend = bfd_get_32 (input_bfd, hit_data) & howto->src_mask;
6894
6895 if (addend & ((howto->src_mask + 1) >> 1))
6896 {
6897 signed_addend = -1;
6898 signed_addend &= ~ howto->src_mask;
6899 signed_addend |= addend;
6900 }
6901 else
6902 signed_addend = addend;
6903 }
6904 else
6905 addend = signed_addend = rel->r_addend;
6906
6907 switch (r_type)
6908 {
6909 case R_ARM_NONE:
6910 /* We don't need to find a value for this symbol. It's just a
6911 marker. */
6912 *unresolved_reloc_p = FALSE;
6913 return bfd_reloc_ok;
6914
6915 case R_ARM_ABS12:
6916 if (!globals->vxworks_p)
6917 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
6918
6919 case R_ARM_PC24:
6920 case R_ARM_ABS32:
6921 case R_ARM_ABS32_NOI:
6922 case R_ARM_REL32:
6923 case R_ARM_REL32_NOI:
6924 case R_ARM_CALL:
6925 case R_ARM_JUMP24:
6926 case R_ARM_XPC25:
6927 case R_ARM_PREL31:
6928 case R_ARM_PLT32:
6929 /* Handle relocations which should use the PLT entry. ABS32/REL32
6930 will use the symbol's value, which may point to a PLT entry, but we
6931 don't need to handle that here. If we created a PLT entry, all
6932 branches in this object should go to it, except if the PLT is too
6933 far away, in which case a long branch stub should be inserted. */
6934 if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
6935 && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
6936 && r_type != R_ARM_CALL
6937 && r_type != R_ARM_JUMP24
6938 && r_type != R_ARM_PLT32)
6939 && h != NULL
6940 && splt != NULL
6941 && h->plt.offset != (bfd_vma) -1)
6942 {
6943 /* If we've created a .plt section, and assigned a PLT entry to
6944 this function, it should not be known to bind locally. If
6945 it were, we would have cleared the PLT entry. */
6946 BFD_ASSERT (!SYMBOL_CALLS_LOCAL (info, h));
6947
6948 value = (splt->output_section->vma
6949 + splt->output_offset
6950 + h->plt.offset);
6951 *unresolved_reloc_p = FALSE;
6952 return _bfd_final_link_relocate (howto, input_bfd, input_section,
6953 contents, rel->r_offset, value,
6954 rel->r_addend);
6955 }
6956
6957 /* When generating a shared object or relocatable executable, these
6958 relocations are copied into the output file to be resolved at
6959 run time. */
6960 if ((info->shared || globals->root.is_relocatable_executable)
6961 && (input_section->flags & SEC_ALLOC)
6962 && !(globals->vxworks_p
6963 && strcmp (input_section->output_section->name,
6964 ".tls_vars") == 0)
6965 && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
6966 || !SYMBOL_CALLS_LOCAL (info, h))
6967 && (!strstr (input_section->name, STUB_SUFFIX))
6968 && (h == NULL
6969 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6970 || h->root.type != bfd_link_hash_undefweak)
6971 && r_type != R_ARM_PC24
6972 && r_type != R_ARM_CALL
6973 && r_type != R_ARM_JUMP24
6974 && r_type != R_ARM_PREL31
6975 && r_type != R_ARM_PLT32)
6976 {
6977 Elf_Internal_Rela outrel;
6978 bfd_byte *loc;
6979 bfd_boolean skip, relocate;
6980
6981 *unresolved_reloc_p = FALSE;
6982
6983 if (sreloc == NULL)
6984 {
6985 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
6986 ! globals->use_rel);
6987
6988 if (sreloc == NULL)
6989 return bfd_reloc_notsupported;
6990 }
6991
6992 skip = FALSE;
6993 relocate = FALSE;
6994
6995 outrel.r_addend = addend;
6996 outrel.r_offset =
6997 _bfd_elf_section_offset (output_bfd, info, input_section,
6998 rel->r_offset);
6999 if (outrel.r_offset == (bfd_vma) -1)
7000 skip = TRUE;
7001 else if (outrel.r_offset == (bfd_vma) -2)
7002 skip = TRUE, relocate = TRUE;
7003 outrel.r_offset += (input_section->output_section->vma
7004 + input_section->output_offset);
7005
7006 if (skip)
7007 memset (&outrel, 0, sizeof outrel);
7008 else if (h != NULL
7009 && h->dynindx != -1
7010 && (!info->shared
7011 || !info->symbolic
7012 || !h->def_regular))
7013 outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
7014 else
7015 {
7016 int symbol;
7017
7018 /* This symbol is local, or marked to become local. */
7019 if (sym_flags == STT_ARM_TFUNC)
7020 value |= 1;
7021 if (globals->symbian_p)
7022 {
7023 asection *osec;
7024
7025 /* On Symbian OS, the data segment and text segement
7026 can be relocated independently. Therefore, we
7027 must indicate the segment to which this
7028 relocation is relative. The BPABI allows us to
7029 use any symbol in the right segment; we just use
7030 the section symbol as it is convenient. (We
7031 cannot use the symbol given by "h" directly as it
7032 will not appear in the dynamic symbol table.)
7033
7034 Note that the dynamic linker ignores the section
7035 symbol value, so we don't subtract osec->vma
7036 from the emitted reloc addend. */
7037 if (sym_sec)
7038 osec = sym_sec->output_section;
7039 else
7040 osec = input_section->output_section;
7041 symbol = elf_section_data (osec)->dynindx;
7042 if (symbol == 0)
7043 {
7044 struct elf_link_hash_table *htab = elf_hash_table (info);
7045
7046 if ((osec->flags & SEC_READONLY) == 0
7047 && htab->data_index_section != NULL)
7048 osec = htab->data_index_section;
7049 else
7050 osec = htab->text_index_section;
7051 symbol = elf_section_data (osec)->dynindx;
7052 }
7053 BFD_ASSERT (symbol != 0);
7054 }
7055 else
7056 /* On SVR4-ish systems, the dynamic loader cannot
7057 relocate the text and data segments independently,
7058 so the symbol does not matter. */
7059 symbol = 0;
7060 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
7061 if (globals->use_rel)
7062 relocate = TRUE;
7063 else
7064 outrel.r_addend += value;
7065 }
7066
7067 loc = sreloc->contents;
7068 loc += sreloc->reloc_count++ * RELOC_SIZE (globals);
7069 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7070
7071 /* If this reloc is against an external symbol, we do not want to
7072 fiddle with the addend. Otherwise, we need to include the symbol
7073 value so that it becomes an addend for the dynamic reloc. */
7074 if (! relocate)
7075 return bfd_reloc_ok;
7076
7077 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7078 contents, rel->r_offset, value,
7079 (bfd_vma) 0);
7080 }
7081 else switch (r_type)
7082 {
7083 case R_ARM_ABS12:
7084 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
7085
7086 case R_ARM_XPC25: /* Arm BLX instruction. */
7087 case R_ARM_CALL:
7088 case R_ARM_JUMP24:
7089 case R_ARM_PC24: /* Arm B/BL instruction. */
7090 case R_ARM_PLT32:
7091 {
7092 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
7093
7094 if (r_type == R_ARM_XPC25)
7095 {
7096 /* Check for Arm calling Arm function. */
7097 /* FIXME: Should we translate the instruction into a BL
7098 instruction instead ? */
7099 if (sym_flags != STT_ARM_TFUNC)
7100 (*_bfd_error_handler)
7101 (_("\%B: Warning: Arm BLX instruction targets Arm function '%s'."),
7102 input_bfd,
7103 h ? h->root.root.string : "(local)");
7104 }
7105 else if (r_type == R_ARM_PC24)
7106 {
7107 /* Check for Arm calling Thumb function. */
7108 if (sym_flags == STT_ARM_TFUNC)
7109 {
7110 if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
7111 output_bfd, input_section,
7112 hit_data, sym_sec, rel->r_offset,
7113 signed_addend, value,
7114 error_message))
7115 return bfd_reloc_ok;
7116 else
7117 return bfd_reloc_dangerous;
7118 }
7119 }
7120
7121 /* Check if a stub has to be inserted because the
7122 destination is too far or we are changing mode. */
7123 if ( r_type == R_ARM_CALL
7124 || r_type == R_ARM_JUMP24
7125 || r_type == R_ARM_PLT32)
7126 {
7127 enum elf32_arm_stub_type stub_type = arm_stub_none;
7128 struct elf32_arm_link_hash_entry *hash;
7129
7130 hash = (struct elf32_arm_link_hash_entry *) h;
7131 stub_type = arm_type_of_stub (info, input_section, rel,
7132 &sym_flags, hash,
7133 value, sym_sec,
7134 input_bfd, sym_name);
7135
7136 if (stub_type != arm_stub_none)
7137 {
7138 /* The target is out of reach, so redirect the
7139 branch to the local stub for this function. */
7140
7141 stub_entry = elf32_arm_get_stub_entry (input_section,
7142 sym_sec, h,
7143 rel, globals,
7144 stub_type);
7145 if (stub_entry != NULL)
7146 value = (stub_entry->stub_offset
7147 + stub_entry->stub_sec->output_offset
7148 + stub_entry->stub_sec->output_section->vma);
7149 }
7150 else
7151 {
7152 /* If the call goes through a PLT entry, make sure to
7153 check distance to the right destination address. */
7154 if (h != NULL
7155 && splt != NULL
7156 && h->plt.offset != (bfd_vma) -1)
7157 {
7158 value = (splt->output_section->vma
7159 + splt->output_offset
7160 + h->plt.offset);
7161 *unresolved_reloc_p = FALSE;
7162 /* The PLT entry is in ARM mode, regardless of the
7163 target function. */
7164 sym_flags = STT_FUNC;
7165 }
7166 }
7167 }
7168
7169 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
7170 where:
7171 S is the address of the symbol in the relocation.
7172 P is address of the instruction being relocated.
7173 A is the addend (extracted from the instruction) in bytes.
7174
7175 S is held in 'value'.
7176 P is the base address of the section containing the
7177 instruction plus the offset of the reloc into that
7178 section, ie:
7179 (input_section->output_section->vma +
7180 input_section->output_offset +
7181 rel->r_offset).
7182 A is the addend, converted into bytes, ie:
7183 (signed_addend * 4)
7184
7185 Note: None of these operations have knowledge of the pipeline
7186 size of the processor, thus it is up to the assembler to
7187 encode this information into the addend. */
7188 value -= (input_section->output_section->vma
7189 + input_section->output_offset);
7190 value -= rel->r_offset;
7191 if (globals->use_rel)
7192 value += (signed_addend << howto->size);
7193 else
7194 /* RELA addends do not have to be adjusted by howto->size. */
7195 value += signed_addend;
7196
7197 signed_addend = value;
7198 signed_addend >>= howto->rightshift;
7199
7200 /* A branch to an undefined weak symbol is turned into a jump to
7201 the next instruction unless a PLT entry will be created.
7202 Do the same for local undefined symbols.
7203 The jump to the next instruction is optimized as a NOP depending
7204 on the architecture. */
7205 if (h ? (h->root.type == bfd_link_hash_undefweak
7206 && !(splt != NULL && h->plt.offset != (bfd_vma) -1))
7207 : bfd_is_und_section (sym_sec))
7208 {
7209 value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000);
7210
7211 if (arch_has_arm_nop (globals))
7212 value |= 0x0320f000;
7213 else
7214 value |= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */
7215 }
7216 else
7217 {
7218 /* Perform a signed range check. */
7219 if ( signed_addend > ((bfd_signed_vma) (howto->dst_mask >> 1))
7220 || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
7221 return bfd_reloc_overflow;
7222
7223 addend = (value & 2);
7224
7225 value = (signed_addend & howto->dst_mask)
7226 | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
7227
7228 if (r_type == R_ARM_CALL)
7229 {
7230 /* Set the H bit in the BLX instruction. */
7231 if (sym_flags == STT_ARM_TFUNC)
7232 {
7233 if (addend)
7234 value |= (1 << 24);
7235 else
7236 value &= ~(bfd_vma)(1 << 24);
7237 }
7238
7239 /* Select the correct instruction (BL or BLX). */
7240 /* Only if we are not handling a BL to a stub. In this
7241 case, mode switching is performed by the stub. */
7242 if (sym_flags == STT_ARM_TFUNC && !stub_entry)
7243 value |= (1 << 28);
7244 else
7245 {
7246 value &= ~(bfd_vma)(1 << 28);
7247 value |= (1 << 24);
7248 }
7249 }
7250 }
7251 }
7252 break;
7253
7254 case R_ARM_ABS32:
7255 value += addend;
7256 if (sym_flags == STT_ARM_TFUNC)
7257 value |= 1;
7258 break;
7259
7260 case R_ARM_ABS32_NOI:
7261 value += addend;
7262 break;
7263
7264 case R_ARM_REL32:
7265 value += addend;
7266 if (sym_flags == STT_ARM_TFUNC)
7267 value |= 1;
7268 value -= (input_section->output_section->vma
7269 + input_section->output_offset + rel->r_offset);
7270 break;
7271
7272 case R_ARM_REL32_NOI:
7273 value += addend;
7274 value -= (input_section->output_section->vma
7275 + input_section->output_offset + rel->r_offset);
7276 break;
7277
7278 case R_ARM_PREL31:
7279 value -= (input_section->output_section->vma
7280 + input_section->output_offset + rel->r_offset);
7281 value += signed_addend;
7282 if (! h || h->root.type != bfd_link_hash_undefweak)
7283 {
7284 /* Check for overflow. */
7285 if ((value ^ (value >> 1)) & (1 << 30))
7286 return bfd_reloc_overflow;
7287 }
7288 value &= 0x7fffffff;
7289 value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
7290 if (sym_flags == STT_ARM_TFUNC)
7291 value |= 1;
7292 break;
7293 }
7294
7295 bfd_put_32 (input_bfd, value, hit_data);
7296 return bfd_reloc_ok;
7297
7298 case R_ARM_ABS8:
7299 value += addend;
7300
7301 /* There is no way to tell whether the user intended to use a signed or
7302 unsigned addend. When checking for overflow we accept either,
7303 as specified by the AAELF. */
7304 if ((long) value > 0xff || (long) value < -0x80)
7305 return bfd_reloc_overflow;
7306
7307 bfd_put_8 (input_bfd, value, hit_data);
7308 return bfd_reloc_ok;
7309
7310 case R_ARM_ABS16:
7311 value += addend;
7312
7313 /* See comment for R_ARM_ABS8. */
7314 if ((long) value > 0xffff || (long) value < -0x8000)
7315 return bfd_reloc_overflow;
7316
7317 bfd_put_16 (input_bfd, value, hit_data);
7318 return bfd_reloc_ok;
7319
7320 case R_ARM_THM_ABS5:
7321 /* Support ldr and str instructions for the thumb. */
7322 if (globals->use_rel)
7323 {
7324 /* Need to refetch addend. */
7325 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
7326 /* ??? Need to determine shift amount from operand size. */
7327 addend >>= howto->rightshift;
7328 }
7329 value += addend;
7330
7331 /* ??? Isn't value unsigned? */
7332 if ((long) value > 0x1f || (long) value < -0x10)
7333 return bfd_reloc_overflow;
7334
7335 /* ??? Value needs to be properly shifted into place first. */
7336 value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
7337 bfd_put_16 (input_bfd, value, hit_data);
7338 return bfd_reloc_ok;
7339
7340 case R_ARM_THM_ALU_PREL_11_0:
7341 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
7342 {
7343 bfd_vma insn;
7344 bfd_signed_vma relocation;
7345
7346 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
7347 | bfd_get_16 (input_bfd, hit_data + 2);
7348
7349 if (globals->use_rel)
7350 {
7351 signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
7352 | ((insn & (1 << 26)) >> 15);
7353 if (insn & 0xf00000)
7354 signed_addend = -signed_addend;
7355 }
7356
7357 relocation = value + signed_addend;
7358 relocation -= (input_section->output_section->vma
7359 + input_section->output_offset
7360 + rel->r_offset);
7361
7362 value = abs (relocation);
7363
7364 if (value >= 0x1000)
7365 return bfd_reloc_overflow;
7366
7367 insn = (insn & 0xfb0f8f00) | (value & 0xff)
7368 | ((value & 0x700) << 4)
7369 | ((value & 0x800) << 15);
7370 if (relocation < 0)
7371 insn |= 0xa00000;
7372
7373 bfd_put_16 (input_bfd, insn >> 16, hit_data);
7374 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
7375
7376 return bfd_reloc_ok;
7377 }
7378
7379 case R_ARM_THM_PC8:
7380 /* PR 10073: This reloc is not generated by the GNU toolchain,
7381 but it is supported for compatibility with third party libraries
7382 generated by other compilers, specifically the ARM/IAR. */
7383 {
7384 bfd_vma insn;
7385 bfd_signed_vma relocation;
7386
7387 insn = bfd_get_16 (input_bfd, hit_data);
7388
7389 if (globals->use_rel)
7390 addend = (insn & 0x00ff) << 2;
7391
7392 relocation = value + addend;
7393 relocation -= (input_section->output_section->vma
7394 + input_section->output_offset
7395 + rel->r_offset);
7396
7397 value = abs (relocation);
7398
7399 /* We do not check for overflow of this reloc. Although strictly
7400 speaking this is incorrect, it appears to be necessary in order
7401 to work with IAR generated relocs. Since GCC and GAS do not
7402 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
7403 a problem for them. */
7404 value &= 0x3fc;
7405
7406 insn = (insn & 0xff00) | (value >> 2);
7407
7408 bfd_put_16 (input_bfd, insn, hit_data);
7409
7410 return bfd_reloc_ok;
7411 }
7412
7413 case R_ARM_THM_PC12:
7414 /* Corresponds to: ldr.w reg, [pc, #offset]. */
7415 {
7416 bfd_vma insn;
7417 bfd_signed_vma relocation;
7418
7419 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
7420 | bfd_get_16 (input_bfd, hit_data + 2);
7421
7422 if (globals->use_rel)
7423 {
7424 signed_addend = insn & 0xfff;
7425 if (!(insn & (1 << 23)))
7426 signed_addend = -signed_addend;
7427 }
7428
7429 relocation = value + signed_addend;
7430 relocation -= (input_section->output_section->vma
7431 + input_section->output_offset
7432 + rel->r_offset);
7433
7434 value = abs (relocation);
7435
7436 if (value >= 0x1000)
7437 return bfd_reloc_overflow;
7438
7439 insn = (insn & 0xff7ff000) | value;
7440 if (relocation >= 0)
7441 insn |= (1 << 23);
7442
7443 bfd_put_16 (input_bfd, insn >> 16, hit_data);
7444 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
7445
7446 return bfd_reloc_ok;
7447 }
7448
7449 case R_ARM_THM_XPC22:
7450 case R_ARM_THM_CALL:
7451 case R_ARM_THM_JUMP24:
7452 /* Thumb BL (branch long instruction). */
7453 {
7454 bfd_vma relocation;
7455 bfd_vma reloc_sign;
7456 bfd_boolean overflow = FALSE;
7457 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
7458 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
7459 bfd_signed_vma reloc_signed_max;
7460 bfd_signed_vma reloc_signed_min;
7461 bfd_vma check;
7462 bfd_signed_vma signed_check;
7463 int bitsize;
7464 const int thumb2 = using_thumb2 (globals);
7465
7466 /* A branch to an undefined weak symbol is turned into a jump to
7467 the next instruction unless a PLT entry will be created.
7468 The jump to the next instruction is optimized as a NOP.W for
7469 Thumb-2 enabled architectures. */
7470 if (h && h->root.type == bfd_link_hash_undefweak
7471 && !(splt != NULL && h->plt.offset != (bfd_vma) -1))
7472 {
7473 if (arch_has_thumb2_nop (globals))
7474 {
7475 bfd_put_16 (input_bfd, 0xf3af, hit_data);
7476 bfd_put_16 (input_bfd, 0x8000, hit_data + 2);
7477 }
7478 else
7479 {
7480 bfd_put_16 (input_bfd, 0xe000, hit_data);
7481 bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
7482 }
7483 return bfd_reloc_ok;
7484 }
7485
7486 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
7487 with Thumb-1) involving the J1 and J2 bits. */
7488 if (globals->use_rel)
7489 {
7490 bfd_vma s = (upper_insn & (1 << 10)) >> 10;
7491 bfd_vma upper = upper_insn & 0x3ff;
7492 bfd_vma lower = lower_insn & 0x7ff;
7493 bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
7494 bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
7495 bfd_vma i1 = j1 ^ s ? 0 : 1;
7496 bfd_vma i2 = j2 ^ s ? 0 : 1;
7497
7498 addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
7499 /* Sign extend. */
7500 addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
7501
7502 signed_addend = addend;
7503 }
7504
7505 if (r_type == R_ARM_THM_XPC22)
7506 {
7507 /* Check for Thumb to Thumb call. */
7508 /* FIXME: Should we translate the instruction into a BL
7509 instruction instead ? */
7510 if (sym_flags == STT_ARM_TFUNC)
7511 (*_bfd_error_handler)
7512 (_("%B: Warning: Thumb BLX instruction targets thumb function '%s'."),
7513 input_bfd,
7514 h ? h->root.root.string : "(local)");
7515 }
7516 else
7517 {
7518 /* If it is not a call to Thumb, assume call to Arm.
7519 If it is a call relative to a section name, then it is not a
7520 function call at all, but rather a long jump. Calls through
7521 the PLT do not require stubs. */
7522 if (sym_flags != STT_ARM_TFUNC && sym_flags != STT_SECTION
7523 && (h == NULL || splt == NULL
7524 || h->plt.offset == (bfd_vma) -1))
7525 {
7526 if (globals->use_blx && r_type == R_ARM_THM_CALL)
7527 {
7528 /* Convert BL to BLX. */
7529 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7530 }
7531 else if (( r_type != R_ARM_THM_CALL)
7532 && (r_type != R_ARM_THM_JUMP24))
7533 {
7534 if (elf32_thumb_to_arm_stub
7535 (info, sym_name, input_bfd, output_bfd, input_section,
7536 hit_data, sym_sec, rel->r_offset, signed_addend, value,
7537 error_message))
7538 return bfd_reloc_ok;
7539 else
7540 return bfd_reloc_dangerous;
7541 }
7542 }
7543 else if (sym_flags == STT_ARM_TFUNC && globals->use_blx
7544 && r_type == R_ARM_THM_CALL)
7545 {
7546 /* Make sure this is a BL. */
7547 lower_insn |= 0x1800;
7548 }
7549 }
7550
7551 enum elf32_arm_stub_type stub_type = arm_stub_none;
7552 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
7553 {
7554 /* Check if a stub has to be inserted because the destination
7555 is too far. */
7556 struct elf32_arm_stub_hash_entry *stub_entry;
7557 struct elf32_arm_link_hash_entry *hash;
7558
7559 hash = (struct elf32_arm_link_hash_entry *) h;
7560
7561 stub_type = arm_type_of_stub (info, input_section, rel,
7562 &sym_flags, hash, value, sym_sec,
7563 input_bfd, sym_name);
7564
7565 if (stub_type != arm_stub_none)
7566 {
7567 /* The target is out of reach or we are changing modes, so
7568 redirect the branch to the local stub for this
7569 function. */
7570 stub_entry = elf32_arm_get_stub_entry (input_section,
7571 sym_sec, h,
7572 rel, globals,
7573 stub_type);
7574 if (stub_entry != NULL)
7575 value = (stub_entry->stub_offset
7576 + stub_entry->stub_sec->output_offset
7577 + stub_entry->stub_sec->output_section->vma);
7578
7579 /* If this call becomes a call to Arm, force BLX. */
7580 if (globals->use_blx && (r_type == R_ARM_THM_CALL))
7581 {
7582 if ((stub_entry
7583 && !arm_stub_is_thumb (stub_entry->stub_type))
7584 || (sym_flags != STT_ARM_TFUNC))
7585 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7586 }
7587 }
7588 }
7589
7590 /* Handle calls via the PLT. */
7591 if (stub_type == arm_stub_none
7592 && h != NULL
7593 && splt != NULL
7594 && h->plt.offset != (bfd_vma) -1)
7595 {
7596 value = (splt->output_section->vma
7597 + splt->output_offset
7598 + h->plt.offset);
7599
7600 if (globals->use_blx && r_type == R_ARM_THM_CALL)
7601 {
7602 /* If the Thumb BLX instruction is available, convert
7603 the BL to a BLX instruction to call the ARM-mode
7604 PLT entry. */
7605 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7606 sym_flags = STT_FUNC;
7607 }
7608 else
7609 {
7610 /* Target the Thumb stub before the ARM PLT entry. */
7611 value -= PLT_THUMB_STUB_SIZE;
7612 sym_flags = STT_ARM_TFUNC;
7613 }
7614 *unresolved_reloc_p = FALSE;
7615 }
7616
7617 relocation = value + signed_addend;
7618
7619 relocation -= (input_section->output_section->vma
7620 + input_section->output_offset
7621 + rel->r_offset);
7622
7623 check = relocation >> howto->rightshift;
7624
7625 /* If this is a signed value, the rightshift just dropped
7626 leading 1 bits (assuming twos complement). */
7627 if ((bfd_signed_vma) relocation >= 0)
7628 signed_check = check;
7629 else
7630 signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
7631
7632 /* Calculate the permissable maximum and minimum values for
7633 this relocation according to whether we're relocating for
7634 Thumb-2 or not. */
7635 bitsize = howto->bitsize;
7636 if (!thumb2)
7637 bitsize -= 2;
7638 reloc_signed_max = (1 << (bitsize - 1)) - 1;
7639 reloc_signed_min = ~reloc_signed_max;
7640
7641 /* Assumes two's complement. */
7642 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7643 overflow = TRUE;
7644
7645 if ((lower_insn & 0x5000) == 0x4000)
7646 /* For a BLX instruction, make sure that the relocation is rounded up
7647 to a word boundary. This follows the semantics of the instruction
7648 which specifies that bit 1 of the target address will come from bit
7649 1 of the base address. */
7650 relocation = (relocation + 2) & ~ 3;
7651
7652 /* Put RELOCATION back into the insn. Assumes two's complement.
7653 We use the Thumb-2 encoding, which is safe even if dealing with
7654 a Thumb-1 instruction by virtue of our overflow check above. */
7655 reloc_sign = (signed_check < 0) ? 1 : 0;
7656 upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
7657 | ((relocation >> 12) & 0x3ff)
7658 | (reloc_sign << 10);
7659 lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
7660 | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
7661 | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
7662 | ((relocation >> 1) & 0x7ff);
7663
7664 /* Put the relocated value back in the object file: */
7665 bfd_put_16 (input_bfd, upper_insn, hit_data);
7666 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
7667
7668 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
7669 }
7670 break;
7671
7672 case R_ARM_THM_JUMP19:
7673 /* Thumb32 conditional branch instruction. */
7674 {
7675 bfd_vma relocation;
7676 bfd_boolean overflow = FALSE;
7677 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
7678 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
7679 bfd_signed_vma reloc_signed_max = 0xffffe;
7680 bfd_signed_vma reloc_signed_min = -0x100000;
7681 bfd_signed_vma signed_check;
7682
7683 /* Need to refetch the addend, reconstruct the top three bits,
7684 and squish the two 11 bit pieces together. */
7685 if (globals->use_rel)
7686 {
7687 bfd_vma S = (upper_insn & 0x0400) >> 10;
7688 bfd_vma upper = (upper_insn & 0x003f);
7689 bfd_vma J1 = (lower_insn & 0x2000) >> 13;
7690 bfd_vma J2 = (lower_insn & 0x0800) >> 11;
7691 bfd_vma lower = (lower_insn & 0x07ff);
7692
7693 upper |= J1 << 6;
7694 upper |= J2 << 7;
7695 upper |= (!S) << 8;
7696 upper -= 0x0100; /* Sign extend. */
7697
7698 addend = (upper << 12) | (lower << 1);
7699 signed_addend = addend;
7700 }
7701
7702 /* Handle calls via the PLT. */
7703 if (h != NULL && splt != NULL && h->plt.offset != (bfd_vma) -1)
7704 {
7705 value = (splt->output_section->vma
7706 + splt->output_offset
7707 + h->plt.offset);
7708 /* Target the Thumb stub before the ARM PLT entry. */
7709 value -= PLT_THUMB_STUB_SIZE;
7710 *unresolved_reloc_p = FALSE;
7711 }
7712
7713 /* ??? Should handle interworking? GCC might someday try to
7714 use this for tail calls. */
7715
7716 relocation = value + signed_addend;
7717 relocation -= (input_section->output_section->vma
7718 + input_section->output_offset
7719 + rel->r_offset);
7720 signed_check = (bfd_signed_vma) relocation;
7721
7722 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7723 overflow = TRUE;
7724
7725 /* Put RELOCATION back into the insn. */
7726 {
7727 bfd_vma S = (relocation & 0x00100000) >> 20;
7728 bfd_vma J2 = (relocation & 0x00080000) >> 19;
7729 bfd_vma J1 = (relocation & 0x00040000) >> 18;
7730 bfd_vma hi = (relocation & 0x0003f000) >> 12;
7731 bfd_vma lo = (relocation & 0x00000ffe) >> 1;
7732
7733 upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
7734 lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
7735 }
7736
7737 /* Put the relocated value back in the object file: */
7738 bfd_put_16 (input_bfd, upper_insn, hit_data);
7739 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
7740
7741 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
7742 }
7743
7744 case R_ARM_THM_JUMP11:
7745 case R_ARM_THM_JUMP8:
7746 case R_ARM_THM_JUMP6:
7747 /* Thumb B (branch) instruction). */
7748 {
7749 bfd_signed_vma relocation;
7750 bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
7751 bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
7752 bfd_signed_vma signed_check;
7753
7754 /* CZB cannot jump backward. */
7755 if (r_type == R_ARM_THM_JUMP6)
7756 reloc_signed_min = 0;
7757
7758 if (globals->use_rel)
7759 {
7760 /* Need to refetch addend. */
7761 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
7762 if (addend & ((howto->src_mask + 1) >> 1))
7763 {
7764 signed_addend = -1;
7765 signed_addend &= ~ howto->src_mask;
7766 signed_addend |= addend;
7767 }
7768 else
7769 signed_addend = addend;
7770 /* The value in the insn has been right shifted. We need to
7771 undo this, so that we can perform the address calculation
7772 in terms of bytes. */
7773 signed_addend <<= howto->rightshift;
7774 }
7775 relocation = value + signed_addend;
7776
7777 relocation -= (input_section->output_section->vma
7778 + input_section->output_offset
7779 + rel->r_offset);
7780
7781 relocation >>= howto->rightshift;
7782 signed_check = relocation;
7783
7784 if (r_type == R_ARM_THM_JUMP6)
7785 relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
7786 else
7787 relocation &= howto->dst_mask;
7788 relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
7789
7790 bfd_put_16 (input_bfd, relocation, hit_data);
7791
7792 /* Assumes two's complement. */
7793 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7794 return bfd_reloc_overflow;
7795
7796 return bfd_reloc_ok;
7797 }
7798
7799 case R_ARM_ALU_PCREL7_0:
7800 case R_ARM_ALU_PCREL15_8:
7801 case R_ARM_ALU_PCREL23_15:
7802 {
7803 bfd_vma insn;
7804 bfd_vma relocation;
7805
7806 insn = bfd_get_32 (input_bfd, hit_data);
7807 if (globals->use_rel)
7808 {
7809 /* Extract the addend. */
7810 addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
7811 signed_addend = addend;
7812 }
7813 relocation = value + signed_addend;
7814
7815 relocation -= (input_section->output_section->vma
7816 + input_section->output_offset
7817 + rel->r_offset);
7818 insn = (insn & ~0xfff)
7819 | ((howto->bitpos << 7) & 0xf00)
7820 | ((relocation >> howto->bitpos) & 0xff);
7821 bfd_put_32 (input_bfd, value, hit_data);
7822 }
7823 return bfd_reloc_ok;
7824
7825 case R_ARM_GNU_VTINHERIT:
7826 case R_ARM_GNU_VTENTRY:
7827 return bfd_reloc_ok;
7828
7829 case R_ARM_GOTOFF32:
7830 /* Relocation is relative to the start of the
7831 global offset table. */
7832
7833 BFD_ASSERT (sgot != NULL);
7834 if (sgot == NULL)
7835 return bfd_reloc_notsupported;
7836
7837 /* If we are addressing a Thumb function, we need to adjust the
7838 address by one, so that attempts to call the function pointer will
7839 correctly interpret it as Thumb code. */
7840 if (sym_flags == STT_ARM_TFUNC)
7841 value += 1;
7842
7843 /* Note that sgot->output_offset is not involved in this
7844 calculation. We always want the start of .got. If we
7845 define _GLOBAL_OFFSET_TABLE in a different way, as is
7846 permitted by the ABI, we might have to change this
7847 calculation. */
7848 value -= sgot->output_section->vma;
7849 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7850 contents, rel->r_offset, value,
7851 rel->r_addend);
7852
7853 case R_ARM_GOTPC:
7854 /* Use global offset table as symbol value. */
7855 BFD_ASSERT (sgot != NULL);
7856
7857 if (sgot == NULL)
7858 return bfd_reloc_notsupported;
7859
7860 *unresolved_reloc_p = FALSE;
7861 value = sgot->output_section->vma;
7862 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7863 contents, rel->r_offset, value,
7864 rel->r_addend);
7865
7866 case R_ARM_GOT32:
7867 case R_ARM_GOT_PREL:
7868 /* Relocation is to the entry for this symbol in the
7869 global offset table. */
7870 if (sgot == NULL)
7871 return bfd_reloc_notsupported;
7872
7873 if (h != NULL)
7874 {
7875 bfd_vma off;
7876 bfd_boolean dyn;
7877
7878 off = h->got.offset;
7879 BFD_ASSERT (off != (bfd_vma) -1);
7880 dyn = globals->root.dynamic_sections_created;
7881
7882 if (! WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
7883 || (info->shared
7884 && SYMBOL_REFERENCES_LOCAL (info, h))
7885 || (ELF_ST_VISIBILITY (h->other)
7886 && h->root.type == bfd_link_hash_undefweak))
7887 {
7888 /* This is actually a static link, or it is a -Bsymbolic link
7889 and the symbol is defined locally. We must initialize this
7890 entry in the global offset table. Since the offset must
7891 always be a multiple of 4, we use the least significant bit
7892 to record whether we have initialized it already.
7893
7894 When doing a dynamic link, we create a .rel(a).got relocation
7895 entry to initialize the value. This is done in the
7896 finish_dynamic_symbol routine. */
7897 if ((off & 1) != 0)
7898 off &= ~1;
7899 else
7900 {
7901 /* If we are addressing a Thumb function, we need to
7902 adjust the address by one, so that attempts to
7903 call the function pointer will correctly
7904 interpret it as Thumb code. */
7905 if (sym_flags == STT_ARM_TFUNC)
7906 value |= 1;
7907
7908 bfd_put_32 (output_bfd, value, sgot->contents + off);
7909 h->got.offset |= 1;
7910 }
7911 }
7912 else
7913 *unresolved_reloc_p = FALSE;
7914
7915 value = sgot->output_offset + off;
7916 }
7917 else
7918 {
7919 bfd_vma off;
7920
7921 BFD_ASSERT (local_got_offsets != NULL &&
7922 local_got_offsets[r_symndx] != (bfd_vma) -1);
7923
7924 off = local_got_offsets[r_symndx];
7925
7926 /* The offset must always be a multiple of 4. We use the
7927 least significant bit to record whether we have already
7928 generated the necessary reloc. */
7929 if ((off & 1) != 0)
7930 off &= ~1;
7931 else
7932 {
7933 /* If we are addressing a Thumb function, we need to
7934 adjust the address by one, so that attempts to
7935 call the function pointer will correctly
7936 interpret it as Thumb code. */
7937 if (sym_flags == STT_ARM_TFUNC)
7938 value |= 1;
7939
7940 if (globals->use_rel)
7941 bfd_put_32 (output_bfd, value, sgot->contents + off);
7942
7943 if (info->shared)
7944 {
7945 asection * srelgot;
7946 Elf_Internal_Rela outrel;
7947 bfd_byte *loc;
7948
7949 srelgot = (bfd_get_section_by_name
7950 (dynobj, RELOC_SECTION (globals, ".got")));
7951 BFD_ASSERT (srelgot != NULL);
7952
7953 outrel.r_addend = addend + value;
7954 outrel.r_offset = (sgot->output_section->vma
7955 + sgot->output_offset
7956 + off);
7957 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
7958 loc = srelgot->contents;
7959 loc += srelgot->reloc_count++ * RELOC_SIZE (globals);
7960 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7961 }
7962
7963 local_got_offsets[r_symndx] |= 1;
7964 }
7965
7966 value = sgot->output_offset + off;
7967 }
7968 if (r_type != R_ARM_GOT32)
7969 value += sgot->output_section->vma;
7970
7971 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7972 contents, rel->r_offset, value,
7973 rel->r_addend);
7974
7975 case R_ARM_TLS_LDO32:
7976 value = value - dtpoff_base (info);
7977
7978 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7979 contents, rel->r_offset, value,
7980 rel->r_addend);
7981
7982 case R_ARM_TLS_LDM32:
7983 {
7984 bfd_vma off;
7985
7986 if (globals->sgot == NULL)
7987 abort ();
7988
7989 off = globals->tls_ldm_got.offset;
7990
7991 if ((off & 1) != 0)
7992 off &= ~1;
7993 else
7994 {
7995 /* If we don't know the module number, create a relocation
7996 for it. */
7997 if (info->shared)
7998 {
7999 Elf_Internal_Rela outrel;
8000 bfd_byte *loc;
8001
8002 if (globals->srelgot == NULL)
8003 abort ();
8004
8005 outrel.r_addend = 0;
8006 outrel.r_offset = (globals->sgot->output_section->vma
8007 + globals->sgot->output_offset + off);
8008 outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
8009
8010 if (globals->use_rel)
8011 bfd_put_32 (output_bfd, outrel.r_addend,
8012 globals->sgot->contents + off);
8013
8014 loc = globals->srelgot->contents;
8015 loc += globals->srelgot->reloc_count++ * RELOC_SIZE (globals);
8016 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
8017 }
8018 else
8019 bfd_put_32 (output_bfd, 1, globals->sgot->contents + off);
8020
8021 globals->tls_ldm_got.offset |= 1;
8022 }
8023
8024 value = globals->sgot->output_section->vma + globals->sgot->output_offset + off
8025 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
8026
8027 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8028 contents, rel->r_offset, value,
8029 rel->r_addend);
8030 }
8031
8032 case R_ARM_TLS_GD32:
8033 case R_ARM_TLS_IE32:
8034 {
8035 bfd_vma off;
8036 int indx;
8037 char tls_type;
8038
8039 if (globals->sgot == NULL)
8040 abort ();
8041
8042 indx = 0;
8043 if (h != NULL)
8044 {
8045 bfd_boolean dyn;
8046 dyn = globals->root.dynamic_sections_created;
8047 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
8048 && (!info->shared
8049 || !SYMBOL_REFERENCES_LOCAL (info, h)))
8050 {
8051 *unresolved_reloc_p = FALSE;
8052 indx = h->dynindx;
8053 }
8054 off = h->got.offset;
8055 tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
8056 }
8057 else
8058 {
8059 if (local_got_offsets == NULL)
8060 abort ();
8061 off = local_got_offsets[r_symndx];
8062 tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
8063 }
8064
8065 if (tls_type == GOT_UNKNOWN)
8066 abort ();
8067
8068 if ((off & 1) != 0)
8069 off &= ~1;
8070 else
8071 {
8072 bfd_boolean need_relocs = FALSE;
8073 Elf_Internal_Rela outrel;
8074 bfd_byte *loc = NULL;
8075 int cur_off = off;
8076
8077 /* The GOT entries have not been initialized yet. Do it
8078 now, and emit any relocations. If both an IE GOT and a
8079 GD GOT are necessary, we emit the GD first. */
8080
8081 if ((info->shared || indx != 0)
8082 && (h == NULL
8083 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
8084 || h->root.type != bfd_link_hash_undefweak))
8085 {
8086 need_relocs = TRUE;
8087 if (globals->srelgot == NULL)
8088 abort ();
8089 loc = globals->srelgot->contents;
8090 loc += globals->srelgot->reloc_count * RELOC_SIZE (globals);
8091 }
8092
8093 if (tls_type & GOT_TLS_GD)
8094 {
8095 if (need_relocs)
8096 {
8097 outrel.r_addend = 0;
8098 outrel.r_offset = (globals->sgot->output_section->vma
8099 + globals->sgot->output_offset
8100 + cur_off);
8101 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
8102
8103 if (globals->use_rel)
8104 bfd_put_32 (output_bfd, outrel.r_addend,
8105 globals->sgot->contents + cur_off);
8106
8107 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
8108 globals->srelgot->reloc_count++;
8109 loc += RELOC_SIZE (globals);
8110
8111 if (indx == 0)
8112 bfd_put_32 (output_bfd, value - dtpoff_base (info),
8113 globals->sgot->contents + cur_off + 4);
8114 else
8115 {
8116 outrel.r_addend = 0;
8117 outrel.r_info = ELF32_R_INFO (indx,
8118 R_ARM_TLS_DTPOFF32);
8119 outrel.r_offset += 4;
8120
8121 if (globals->use_rel)
8122 bfd_put_32 (output_bfd, outrel.r_addend,
8123 globals->sgot->contents + cur_off + 4);
8124
8125
8126 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
8127 globals->srelgot->reloc_count++;
8128 loc += RELOC_SIZE (globals);
8129 }
8130 }
8131 else
8132 {
8133 /* If we are not emitting relocations for a
8134 general dynamic reference, then we must be in a
8135 static link or an executable link with the
8136 symbol binding locally. Mark it as belonging
8137 to module 1, the executable. */
8138 bfd_put_32 (output_bfd, 1,
8139 globals->sgot->contents + cur_off);
8140 bfd_put_32 (output_bfd, value - dtpoff_base (info),
8141 globals->sgot->contents + cur_off + 4);
8142 }
8143
8144 cur_off += 8;
8145 }
8146
8147 if (tls_type & GOT_TLS_IE)
8148 {
8149 if (need_relocs)
8150 {
8151 if (indx == 0)
8152 outrel.r_addend = value - dtpoff_base (info);
8153 else
8154 outrel.r_addend = 0;
8155 outrel.r_offset = (globals->sgot->output_section->vma
8156 + globals->sgot->output_offset
8157 + cur_off);
8158 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
8159
8160 if (globals->use_rel)
8161 bfd_put_32 (output_bfd, outrel.r_addend,
8162 globals->sgot->contents + cur_off);
8163
8164 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
8165 globals->srelgot->reloc_count++;
8166 loc += RELOC_SIZE (globals);
8167 }
8168 else
8169 bfd_put_32 (output_bfd, tpoff (info, value),
8170 globals->sgot->contents + cur_off);
8171 cur_off += 4;
8172 }
8173
8174 if (h != NULL)
8175 h->got.offset |= 1;
8176 else
8177 local_got_offsets[r_symndx] |= 1;
8178 }
8179
8180 if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32)
8181 off += 8;
8182 value = globals->sgot->output_section->vma + globals->sgot->output_offset + off
8183 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
8184
8185 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8186 contents, rel->r_offset, value,
8187 rel->r_addend);
8188 }
8189
8190 case R_ARM_TLS_LE32:
8191 if (info->shared)
8192 {
8193 (*_bfd_error_handler)
8194 (_("%B(%A+0x%lx): R_ARM_TLS_LE32 relocation not permitted in shared object"),
8195 input_bfd, input_section,
8196 (long) rel->r_offset, howto->name);
8197 return (bfd_reloc_status_type) FALSE;
8198 }
8199 else
8200 value = tpoff (info, value);
8201
8202 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8203 contents, rel->r_offset, value,
8204 rel->r_addend);
8205
8206 case R_ARM_V4BX:
8207 if (globals->fix_v4bx)
8208 {
8209 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8210
8211 /* Ensure that we have a BX instruction. */
8212 BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
8213
8214 if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
8215 {
8216 /* Branch to veneer. */
8217 bfd_vma glue_addr;
8218 glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
8219 glue_addr -= input_section->output_section->vma
8220 + input_section->output_offset
8221 + rel->r_offset + 8;
8222 insn = (insn & 0xf0000000) | 0x0a000000
8223 | ((glue_addr >> 2) & 0x00ffffff);
8224 }
8225 else
8226 {
8227 /* Preserve Rm (lowest four bits) and the condition code
8228 (highest four bits). Other bits encode MOV PC,Rm. */
8229 insn = (insn & 0xf000000f) | 0x01a0f000;
8230 }
8231
8232 bfd_put_32 (input_bfd, insn, hit_data);
8233 }
8234 return bfd_reloc_ok;
8235
8236 case R_ARM_MOVW_ABS_NC:
8237 case R_ARM_MOVT_ABS:
8238 case R_ARM_MOVW_PREL_NC:
8239 case R_ARM_MOVT_PREL:
8240 /* Until we properly support segment-base-relative addressing then
8241 we assume the segment base to be zero, as for the group relocations.
8242 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
8243 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
8244 case R_ARM_MOVW_BREL_NC:
8245 case R_ARM_MOVW_BREL:
8246 case R_ARM_MOVT_BREL:
8247 {
8248 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8249
8250 if (globals->use_rel)
8251 {
8252 addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
8253 signed_addend = (addend ^ 0x8000) - 0x8000;
8254 }
8255
8256 value += signed_addend;
8257
8258 if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
8259 value -= (input_section->output_section->vma
8260 + input_section->output_offset + rel->r_offset);
8261
8262 if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
8263 return bfd_reloc_overflow;
8264
8265 if (sym_flags == STT_ARM_TFUNC)
8266 value |= 1;
8267
8268 if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
8269 || r_type == R_ARM_MOVT_BREL)
8270 value >>= 16;
8271
8272 insn &= 0xfff0f000;
8273 insn |= value & 0xfff;
8274 insn |= (value & 0xf000) << 4;
8275 bfd_put_32 (input_bfd, insn, hit_data);
8276 }
8277 return bfd_reloc_ok;
8278
8279 case R_ARM_THM_MOVW_ABS_NC:
8280 case R_ARM_THM_MOVT_ABS:
8281 case R_ARM_THM_MOVW_PREL_NC:
8282 case R_ARM_THM_MOVT_PREL:
8283 /* Until we properly support segment-base-relative addressing then
8284 we assume the segment base to be zero, as for the above relocations.
8285 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
8286 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
8287 as R_ARM_THM_MOVT_ABS. */
8288 case R_ARM_THM_MOVW_BREL_NC:
8289 case R_ARM_THM_MOVW_BREL:
8290 case R_ARM_THM_MOVT_BREL:
8291 {
8292 bfd_vma insn;
8293
8294 insn = bfd_get_16 (input_bfd, hit_data) << 16;
8295 insn |= bfd_get_16 (input_bfd, hit_data + 2);
8296
8297 if (globals->use_rel)
8298 {
8299 addend = ((insn >> 4) & 0xf000)
8300 | ((insn >> 15) & 0x0800)
8301 | ((insn >> 4) & 0x0700)
8302 | (insn & 0x00ff);
8303 signed_addend = (addend ^ 0x8000) - 0x8000;
8304 }
8305
8306 value += signed_addend;
8307
8308 if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
8309 value -= (input_section->output_section->vma
8310 + input_section->output_offset + rel->r_offset);
8311
8312 if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
8313 return bfd_reloc_overflow;
8314
8315 if (sym_flags == STT_ARM_TFUNC)
8316 value |= 1;
8317
8318 if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
8319 || r_type == R_ARM_THM_MOVT_BREL)
8320 value >>= 16;
8321
8322 insn &= 0xfbf08f00;
8323 insn |= (value & 0xf000) << 4;
8324 insn |= (value & 0x0800) << 15;
8325 insn |= (value & 0x0700) << 4;
8326 insn |= (value & 0x00ff);
8327
8328 bfd_put_16 (input_bfd, insn >> 16, hit_data);
8329 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
8330 }
8331 return bfd_reloc_ok;
8332
8333 case R_ARM_ALU_PC_G0_NC:
8334 case R_ARM_ALU_PC_G1_NC:
8335 case R_ARM_ALU_PC_G0:
8336 case R_ARM_ALU_PC_G1:
8337 case R_ARM_ALU_PC_G2:
8338 case R_ARM_ALU_SB_G0_NC:
8339 case R_ARM_ALU_SB_G1_NC:
8340 case R_ARM_ALU_SB_G0:
8341 case R_ARM_ALU_SB_G1:
8342 case R_ARM_ALU_SB_G2:
8343 {
8344 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8345 bfd_vma pc = input_section->output_section->vma
8346 + input_section->output_offset + rel->r_offset;
8347 /* sb should be the origin of the *segment* containing the symbol.
8348 It is not clear how to obtain this OS-dependent value, so we
8349 make an arbitrary choice of zero. */
8350 bfd_vma sb = 0;
8351 bfd_vma residual;
8352 bfd_vma g_n;
8353 bfd_signed_vma signed_value;
8354 int group = 0;
8355
8356 /* Determine which group of bits to select. */
8357 switch (r_type)
8358 {
8359 case R_ARM_ALU_PC_G0_NC:
8360 case R_ARM_ALU_PC_G0:
8361 case R_ARM_ALU_SB_G0_NC:
8362 case R_ARM_ALU_SB_G0:
8363 group = 0;
8364 break;
8365
8366 case R_ARM_ALU_PC_G1_NC:
8367 case R_ARM_ALU_PC_G1:
8368 case R_ARM_ALU_SB_G1_NC:
8369 case R_ARM_ALU_SB_G1:
8370 group = 1;
8371 break;
8372
8373 case R_ARM_ALU_PC_G2:
8374 case R_ARM_ALU_SB_G2:
8375 group = 2;
8376 break;
8377
8378 default:
8379 abort ();
8380 }
8381
8382 /* If REL, extract the addend from the insn. If RELA, it will
8383 have already been fetched for us. */
8384 if (globals->use_rel)
8385 {
8386 int negative;
8387 bfd_vma constant = insn & 0xff;
8388 bfd_vma rotation = (insn & 0xf00) >> 8;
8389
8390 if (rotation == 0)
8391 signed_addend = constant;
8392 else
8393 {
8394 /* Compensate for the fact that in the instruction, the
8395 rotation is stored in multiples of 2 bits. */
8396 rotation *= 2;
8397
8398 /* Rotate "constant" right by "rotation" bits. */
8399 signed_addend = (constant >> rotation) |
8400 (constant << (8 * sizeof (bfd_vma) - rotation));
8401 }
8402
8403 /* Determine if the instruction is an ADD or a SUB.
8404 (For REL, this determines the sign of the addend.) */
8405 negative = identify_add_or_sub (insn);
8406 if (negative == 0)
8407 {
8408 (*_bfd_error_handler)
8409 (_("%B(%A+0x%lx): Only ADD or SUB instructions are allowed for ALU group relocations"),
8410 input_bfd, input_section,
8411 (long) rel->r_offset, howto->name);
8412 return bfd_reloc_overflow;
8413 }
8414
8415 signed_addend *= negative;
8416 }
8417
8418 /* Compute the value (X) to go in the place. */
8419 if (r_type == R_ARM_ALU_PC_G0_NC
8420 || r_type == R_ARM_ALU_PC_G1_NC
8421 || r_type == R_ARM_ALU_PC_G0
8422 || r_type == R_ARM_ALU_PC_G1
8423 || r_type == R_ARM_ALU_PC_G2)
8424 /* PC relative. */
8425 signed_value = value - pc + signed_addend;
8426 else
8427 /* Section base relative. */
8428 signed_value = value - sb + signed_addend;
8429
8430 /* If the target symbol is a Thumb function, then set the
8431 Thumb bit in the address. */
8432 if (sym_flags == STT_ARM_TFUNC)
8433 signed_value |= 1;
8434
8435 /* Calculate the value of the relevant G_n, in encoded
8436 constant-with-rotation format. */
8437 g_n = calculate_group_reloc_mask (abs (signed_value), group,
8438 &residual);
8439
8440 /* Check for overflow if required. */
8441 if ((r_type == R_ARM_ALU_PC_G0
8442 || r_type == R_ARM_ALU_PC_G1
8443 || r_type == R_ARM_ALU_PC_G2
8444 || r_type == R_ARM_ALU_SB_G0
8445 || r_type == R_ARM_ALU_SB_G1
8446 || r_type == R_ARM_ALU_SB_G2) && residual != 0)
8447 {
8448 (*_bfd_error_handler)
8449 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8450 input_bfd, input_section,
8451 (long) rel->r_offset, abs (signed_value), howto->name);
8452 return bfd_reloc_overflow;
8453 }
8454
8455 /* Mask out the value and the ADD/SUB part of the opcode; take care
8456 not to destroy the S bit. */
8457 insn &= 0xff1ff000;
8458
8459 /* Set the opcode according to whether the value to go in the
8460 place is negative. */
8461 if (signed_value < 0)
8462 insn |= 1 << 22;
8463 else
8464 insn |= 1 << 23;
8465
8466 /* Encode the offset. */
8467 insn |= g_n;
8468
8469 bfd_put_32 (input_bfd, insn, hit_data);
8470 }
8471 return bfd_reloc_ok;
8472
8473 case R_ARM_LDR_PC_G0:
8474 case R_ARM_LDR_PC_G1:
8475 case R_ARM_LDR_PC_G2:
8476 case R_ARM_LDR_SB_G0:
8477 case R_ARM_LDR_SB_G1:
8478 case R_ARM_LDR_SB_G2:
8479 {
8480 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8481 bfd_vma pc = input_section->output_section->vma
8482 + input_section->output_offset + rel->r_offset;
8483 bfd_vma sb = 0; /* See note above. */
8484 bfd_vma residual;
8485 bfd_signed_vma signed_value;
8486 int group = 0;
8487
8488 /* Determine which groups of bits to calculate. */
8489 switch (r_type)
8490 {
8491 case R_ARM_LDR_PC_G0:
8492 case R_ARM_LDR_SB_G0:
8493 group = 0;
8494 break;
8495
8496 case R_ARM_LDR_PC_G1:
8497 case R_ARM_LDR_SB_G1:
8498 group = 1;
8499 break;
8500
8501 case R_ARM_LDR_PC_G2:
8502 case R_ARM_LDR_SB_G2:
8503 group = 2;
8504 break;
8505
8506 default:
8507 abort ();
8508 }
8509
8510 /* If REL, extract the addend from the insn. If RELA, it will
8511 have already been fetched for us. */
8512 if (globals->use_rel)
8513 {
8514 int negative = (insn & (1 << 23)) ? 1 : -1;
8515 signed_addend = negative * (insn & 0xfff);
8516 }
8517
8518 /* Compute the value (X) to go in the place. */
8519 if (r_type == R_ARM_LDR_PC_G0
8520 || r_type == R_ARM_LDR_PC_G1
8521 || r_type == R_ARM_LDR_PC_G2)
8522 /* PC relative. */
8523 signed_value = value - pc + signed_addend;
8524 else
8525 /* Section base relative. */
8526 signed_value = value - sb + signed_addend;
8527
8528 /* Calculate the value of the relevant G_{n-1} to obtain
8529 the residual at that stage. */
8530 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8531
8532 /* Check for overflow. */
8533 if (residual >= 0x1000)
8534 {
8535 (*_bfd_error_handler)
8536 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8537 input_bfd, input_section,
8538 (long) rel->r_offset, abs (signed_value), howto->name);
8539 return bfd_reloc_overflow;
8540 }
8541
8542 /* Mask out the value and U bit. */
8543 insn &= 0xff7ff000;
8544
8545 /* Set the U bit if the value to go in the place is non-negative. */
8546 if (signed_value >= 0)
8547 insn |= 1 << 23;
8548
8549 /* Encode the offset. */
8550 insn |= residual;
8551
8552 bfd_put_32 (input_bfd, insn, hit_data);
8553 }
8554 return bfd_reloc_ok;
8555
8556 case R_ARM_LDRS_PC_G0:
8557 case R_ARM_LDRS_PC_G1:
8558 case R_ARM_LDRS_PC_G2:
8559 case R_ARM_LDRS_SB_G0:
8560 case R_ARM_LDRS_SB_G1:
8561 case R_ARM_LDRS_SB_G2:
8562 {
8563 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8564 bfd_vma pc = input_section->output_section->vma
8565 + input_section->output_offset + rel->r_offset;
8566 bfd_vma sb = 0; /* See note above. */
8567 bfd_vma residual;
8568 bfd_signed_vma signed_value;
8569 int group = 0;
8570
8571 /* Determine which groups of bits to calculate. */
8572 switch (r_type)
8573 {
8574 case R_ARM_LDRS_PC_G0:
8575 case R_ARM_LDRS_SB_G0:
8576 group = 0;
8577 break;
8578
8579 case R_ARM_LDRS_PC_G1:
8580 case R_ARM_LDRS_SB_G1:
8581 group = 1;
8582 break;
8583
8584 case R_ARM_LDRS_PC_G2:
8585 case R_ARM_LDRS_SB_G2:
8586 group = 2;
8587 break;
8588
8589 default:
8590 abort ();
8591 }
8592
8593 /* If REL, extract the addend from the insn. If RELA, it will
8594 have already been fetched for us. */
8595 if (globals->use_rel)
8596 {
8597 int negative = (insn & (1 << 23)) ? 1 : -1;
8598 signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
8599 }
8600
8601 /* Compute the value (X) to go in the place. */
8602 if (r_type == R_ARM_LDRS_PC_G0
8603 || r_type == R_ARM_LDRS_PC_G1
8604 || r_type == R_ARM_LDRS_PC_G2)
8605 /* PC relative. */
8606 signed_value = value - pc + signed_addend;
8607 else
8608 /* Section base relative. */
8609 signed_value = value - sb + signed_addend;
8610
8611 /* Calculate the value of the relevant G_{n-1} to obtain
8612 the residual at that stage. */
8613 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8614
8615 /* Check for overflow. */
8616 if (residual >= 0x100)
8617 {
8618 (*_bfd_error_handler)
8619 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8620 input_bfd, input_section,
8621 (long) rel->r_offset, abs (signed_value), howto->name);
8622 return bfd_reloc_overflow;
8623 }
8624
8625 /* Mask out the value and U bit. */
8626 insn &= 0xff7ff0f0;
8627
8628 /* Set the U bit if the value to go in the place is non-negative. */
8629 if (signed_value >= 0)
8630 insn |= 1 << 23;
8631
8632 /* Encode the offset. */
8633 insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
8634
8635 bfd_put_32 (input_bfd, insn, hit_data);
8636 }
8637 return bfd_reloc_ok;
8638
8639 case R_ARM_LDC_PC_G0:
8640 case R_ARM_LDC_PC_G1:
8641 case R_ARM_LDC_PC_G2:
8642 case R_ARM_LDC_SB_G0:
8643 case R_ARM_LDC_SB_G1:
8644 case R_ARM_LDC_SB_G2:
8645 {
8646 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8647 bfd_vma pc = input_section->output_section->vma
8648 + input_section->output_offset + rel->r_offset;
8649 bfd_vma sb = 0; /* See note above. */
8650 bfd_vma residual;
8651 bfd_signed_vma signed_value;
8652 int group = 0;
8653
8654 /* Determine which groups of bits to calculate. */
8655 switch (r_type)
8656 {
8657 case R_ARM_LDC_PC_G0:
8658 case R_ARM_LDC_SB_G0:
8659 group = 0;
8660 break;
8661
8662 case R_ARM_LDC_PC_G1:
8663 case R_ARM_LDC_SB_G1:
8664 group = 1;
8665 break;
8666
8667 case R_ARM_LDC_PC_G2:
8668 case R_ARM_LDC_SB_G2:
8669 group = 2;
8670 break;
8671
8672 default:
8673 abort ();
8674 }
8675
8676 /* If REL, extract the addend from the insn. If RELA, it will
8677 have already been fetched for us. */
8678 if (globals->use_rel)
8679 {
8680 int negative = (insn & (1 << 23)) ? 1 : -1;
8681 signed_addend = negative * ((insn & 0xff) << 2);
8682 }
8683
8684 /* Compute the value (X) to go in the place. */
8685 if (r_type == R_ARM_LDC_PC_G0
8686 || r_type == R_ARM_LDC_PC_G1
8687 || r_type == R_ARM_LDC_PC_G2)
8688 /* PC relative. */
8689 signed_value = value - pc + signed_addend;
8690 else
8691 /* Section base relative. */
8692 signed_value = value - sb + signed_addend;
8693
8694 /* Calculate the value of the relevant G_{n-1} to obtain
8695 the residual at that stage. */
8696 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8697
8698 /* Check for overflow. (The absolute value to go in the place must be
8699 divisible by four and, after having been divided by four, must
8700 fit in eight bits.) */
8701 if ((residual & 0x3) != 0 || residual >= 0x400)
8702 {
8703 (*_bfd_error_handler)
8704 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8705 input_bfd, input_section,
8706 (long) rel->r_offset, abs (signed_value), howto->name);
8707 return bfd_reloc_overflow;
8708 }
8709
8710 /* Mask out the value and U bit. */
8711 insn &= 0xff7fff00;
8712
8713 /* Set the U bit if the value to go in the place is non-negative. */
8714 if (signed_value >= 0)
8715 insn |= 1 << 23;
8716
8717 /* Encode the offset. */
8718 insn |= residual >> 2;
8719
8720 bfd_put_32 (input_bfd, insn, hit_data);
8721 }
8722 return bfd_reloc_ok;
8723
8724 default:
8725 return bfd_reloc_notsupported;
8726 }
8727 }
8728
8729 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
8730 static void
8731 arm_add_to_rel (bfd * abfd,
8732 bfd_byte * address,
8733 reloc_howto_type * howto,
8734 bfd_signed_vma increment)
8735 {
8736 bfd_signed_vma addend;
8737
8738 if (howto->type == R_ARM_THM_CALL
8739 || howto->type == R_ARM_THM_JUMP24)
8740 {
8741 int upper_insn, lower_insn;
8742 int upper, lower;
8743
8744 upper_insn = bfd_get_16 (abfd, address);
8745 lower_insn = bfd_get_16 (abfd, address + 2);
8746 upper = upper_insn & 0x7ff;
8747 lower = lower_insn & 0x7ff;
8748
8749 addend = (upper << 12) | (lower << 1);
8750 addend += increment;
8751 addend >>= 1;
8752
8753 upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
8754 lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
8755
8756 bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
8757 bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
8758 }
8759 else
8760 {
8761 bfd_vma contents;
8762
8763 contents = bfd_get_32 (abfd, address);
8764
8765 /* Get the (signed) value from the instruction. */
8766 addend = contents & howto->src_mask;
8767 if (addend & ((howto->src_mask + 1) >> 1))
8768 {
8769 bfd_signed_vma mask;
8770
8771 mask = -1;
8772 mask &= ~ howto->src_mask;
8773 addend |= mask;
8774 }
8775
8776 /* Add in the increment, (which is a byte value). */
8777 switch (howto->type)
8778 {
8779 default:
8780 addend += increment;
8781 break;
8782
8783 case R_ARM_PC24:
8784 case R_ARM_PLT32:
8785 case R_ARM_CALL:
8786 case R_ARM_JUMP24:
8787 addend <<= howto->size;
8788 addend += increment;
8789
8790 /* Should we check for overflow here ? */
8791
8792 /* Drop any undesired bits. */
8793 addend >>= howto->rightshift;
8794 break;
8795 }
8796
8797 contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
8798
8799 bfd_put_32 (abfd, contents, address);
8800 }
8801 }
8802
8803 #define IS_ARM_TLS_RELOC(R_TYPE) \
8804 ((R_TYPE) == R_ARM_TLS_GD32 \
8805 || (R_TYPE) == R_ARM_TLS_LDO32 \
8806 || (R_TYPE) == R_ARM_TLS_LDM32 \
8807 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
8808 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
8809 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
8810 || (R_TYPE) == R_ARM_TLS_LE32 \
8811 || (R_TYPE) == R_ARM_TLS_IE32)
8812
8813 /* Relocate an ARM ELF section. */
8814
8815 static bfd_boolean
8816 elf32_arm_relocate_section (bfd * output_bfd,
8817 struct bfd_link_info * info,
8818 bfd * input_bfd,
8819 asection * input_section,
8820 bfd_byte * contents,
8821 Elf_Internal_Rela * relocs,
8822 Elf_Internal_Sym * local_syms,
8823 asection ** local_sections)
8824 {
8825 Elf_Internal_Shdr *symtab_hdr;
8826 struct elf_link_hash_entry **sym_hashes;
8827 Elf_Internal_Rela *rel;
8828 Elf_Internal_Rela *relend;
8829 const char *name;
8830 struct elf32_arm_link_hash_table * globals;
8831
8832 globals = elf32_arm_hash_table (info);
8833 if (globals == NULL)
8834 return FALSE;
8835
8836 symtab_hdr = & elf_symtab_hdr (input_bfd);
8837 sym_hashes = elf_sym_hashes (input_bfd);
8838
8839 rel = relocs;
8840 relend = relocs + input_section->reloc_count;
8841 for (; rel < relend; rel++)
8842 {
8843 int r_type;
8844 reloc_howto_type * howto;
8845 unsigned long r_symndx;
8846 Elf_Internal_Sym * sym;
8847 asection * sec;
8848 struct elf_link_hash_entry * h;
8849 bfd_vma relocation;
8850 bfd_reloc_status_type r;
8851 arelent bfd_reloc;
8852 char sym_type;
8853 bfd_boolean unresolved_reloc = FALSE;
8854 char *error_message = NULL;
8855
8856 r_symndx = ELF32_R_SYM (rel->r_info);
8857 r_type = ELF32_R_TYPE (rel->r_info);
8858 r_type = arm_real_reloc_type (globals, r_type);
8859
8860 if ( r_type == R_ARM_GNU_VTENTRY
8861 || r_type == R_ARM_GNU_VTINHERIT)
8862 continue;
8863
8864 bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
8865 howto = bfd_reloc.howto;
8866
8867 h = NULL;
8868 sym = NULL;
8869 sec = NULL;
8870
8871 if (r_symndx < symtab_hdr->sh_info)
8872 {
8873 sym = local_syms + r_symndx;
8874 sym_type = ELF32_ST_TYPE (sym->st_info);
8875 sec = local_sections[r_symndx];
8876
8877 /* An object file might have a reference to a local
8878 undefined symbol. This is a daft object file, but we
8879 should at least do something about it. V4BX & NONE
8880 relocations do not use the symbol and are explicitly
8881 allowed to use the undefined symbol, so allow those. */
8882 if (r_type != R_ARM_V4BX
8883 && r_type != R_ARM_NONE
8884 && bfd_is_und_section (sec)
8885 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
8886 {
8887 if (!info->callbacks->undefined_symbol
8888 (info, bfd_elf_string_from_elf_section
8889 (input_bfd, symtab_hdr->sh_link, sym->st_name),
8890 input_bfd, input_section,
8891 rel->r_offset, TRUE))
8892 return FALSE;
8893 }
8894
8895 if (globals->use_rel)
8896 {
8897 relocation = (sec->output_section->vma
8898 + sec->output_offset
8899 + sym->st_value);
8900 if (!info->relocatable
8901 && (sec->flags & SEC_MERGE)
8902 && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
8903 {
8904 asection *msec;
8905 bfd_vma addend, value;
8906
8907 switch (r_type)
8908 {
8909 case R_ARM_MOVW_ABS_NC:
8910 case R_ARM_MOVT_ABS:
8911 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
8912 addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
8913 addend = (addend ^ 0x8000) - 0x8000;
8914 break;
8915
8916 case R_ARM_THM_MOVW_ABS_NC:
8917 case R_ARM_THM_MOVT_ABS:
8918 value = bfd_get_16 (input_bfd, contents + rel->r_offset)
8919 << 16;
8920 value |= bfd_get_16 (input_bfd,
8921 contents + rel->r_offset + 2);
8922 addend = ((value & 0xf7000) >> 4) | (value & 0xff)
8923 | ((value & 0x04000000) >> 15);
8924 addend = (addend ^ 0x8000) - 0x8000;
8925 break;
8926
8927 default:
8928 if (howto->rightshift
8929 || (howto->src_mask & (howto->src_mask + 1)))
8930 {
8931 (*_bfd_error_handler)
8932 (_("%B(%A+0x%lx): %s relocation against SEC_MERGE section"),
8933 input_bfd, input_section,
8934 (long) rel->r_offset, howto->name);
8935 return FALSE;
8936 }
8937
8938 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
8939
8940 /* Get the (signed) value from the instruction. */
8941 addend = value & howto->src_mask;
8942 if (addend & ((howto->src_mask + 1) >> 1))
8943 {
8944 bfd_signed_vma mask;
8945
8946 mask = -1;
8947 mask &= ~ howto->src_mask;
8948 addend |= mask;
8949 }
8950 break;
8951 }
8952
8953 msec = sec;
8954 addend =
8955 _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
8956 - relocation;
8957 addend += msec->output_section->vma + msec->output_offset;
8958
8959 /* Cases here must match those in the preceeding
8960 switch statement. */
8961 switch (r_type)
8962 {
8963 case R_ARM_MOVW_ABS_NC:
8964 case R_ARM_MOVT_ABS:
8965 value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
8966 | (addend & 0xfff);
8967 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
8968 break;
8969
8970 case R_ARM_THM_MOVW_ABS_NC:
8971 case R_ARM_THM_MOVT_ABS:
8972 value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
8973 | (addend & 0xff) | ((addend & 0x0800) << 15);
8974 bfd_put_16 (input_bfd, value >> 16,
8975 contents + rel->r_offset);
8976 bfd_put_16 (input_bfd, value,
8977 contents + rel->r_offset + 2);
8978 break;
8979
8980 default:
8981 value = (value & ~ howto->dst_mask)
8982 | (addend & howto->dst_mask);
8983 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
8984 break;
8985 }
8986 }
8987 }
8988 else
8989 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
8990 }
8991 else
8992 {
8993 bfd_boolean warned;
8994
8995 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
8996 r_symndx, symtab_hdr, sym_hashes,
8997 h, sec, relocation,
8998 unresolved_reloc, warned);
8999
9000 sym_type = h->type;
9001 }
9002
9003 if (sec != NULL && elf_discarded_section (sec))
9004 {
9005 /* For relocs against symbols from removed linkonce sections,
9006 or sections discarded by a linker script, we just want the
9007 section contents zeroed. Avoid any special processing. */
9008 _bfd_clear_contents (howto, input_bfd, contents + rel->r_offset);
9009 rel->r_info = 0;
9010 rel->r_addend = 0;
9011 continue;
9012 }
9013
9014 if (info->relocatable)
9015 {
9016 /* This is a relocatable link. We don't have to change
9017 anything, unless the reloc is against a section symbol,
9018 in which case we have to adjust according to where the
9019 section symbol winds up in the output section. */
9020 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
9021 {
9022 if (globals->use_rel)
9023 arm_add_to_rel (input_bfd, contents + rel->r_offset,
9024 howto, (bfd_signed_vma) sec->output_offset);
9025 else
9026 rel->r_addend += sec->output_offset;
9027 }
9028 continue;
9029 }
9030
9031 if (h != NULL)
9032 name = h->root.root.string;
9033 else
9034 {
9035 name = (bfd_elf_string_from_elf_section
9036 (input_bfd, symtab_hdr->sh_link, sym->st_name));
9037 if (name == NULL || *name == '\0')
9038 name = bfd_section_name (input_bfd, sec);
9039 }
9040
9041 if (r_symndx != 0
9042 && r_type != R_ARM_NONE
9043 && (h == NULL
9044 || h->root.type == bfd_link_hash_defined
9045 || h->root.type == bfd_link_hash_defweak)
9046 && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
9047 {
9048 (*_bfd_error_handler)
9049 ((sym_type == STT_TLS
9050 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
9051 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
9052 input_bfd,
9053 input_section,
9054 (long) rel->r_offset,
9055 howto->name,
9056 name);
9057 }
9058
9059 r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
9060 input_section, contents, rel,
9061 relocation, info, sec, name,
9062 (h ? ELF_ST_TYPE (h->type) :
9063 ELF_ST_TYPE (sym->st_info)), h,
9064 &unresolved_reloc, &error_message);
9065
9066 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
9067 because such sections are not SEC_ALLOC and thus ld.so will
9068 not process them. */
9069 if (unresolved_reloc
9070 && !((input_section->flags & SEC_DEBUGGING) != 0
9071 && h->def_dynamic))
9072 {
9073 (*_bfd_error_handler)
9074 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
9075 input_bfd,
9076 input_section,
9077 (long) rel->r_offset,
9078 howto->name,
9079 h->root.root.string);
9080 return FALSE;
9081 }
9082
9083 if (r != bfd_reloc_ok)
9084 {
9085 switch (r)
9086 {
9087 case bfd_reloc_overflow:
9088 /* If the overflowing reloc was to an undefined symbol,
9089 we have already printed one error message and there
9090 is no point complaining again. */
9091 if ((! h ||
9092 h->root.type != bfd_link_hash_undefined)
9093 && (!((*info->callbacks->reloc_overflow)
9094 (info, (h ? &h->root : NULL), name, howto->name,
9095 (bfd_vma) 0, input_bfd, input_section,
9096 rel->r_offset))))
9097 return FALSE;
9098 break;
9099
9100 case bfd_reloc_undefined:
9101 if (!((*info->callbacks->undefined_symbol)
9102 (info, name, input_bfd, input_section,
9103 rel->r_offset, TRUE)))
9104 return FALSE;
9105 break;
9106
9107 case bfd_reloc_outofrange:
9108 error_message = _("out of range");
9109 goto common_error;
9110
9111 case bfd_reloc_notsupported:
9112 error_message = _("unsupported relocation");
9113 goto common_error;
9114
9115 case bfd_reloc_dangerous:
9116 /* error_message should already be set. */
9117 goto common_error;
9118
9119 default:
9120 error_message = _("unknown error");
9121 /* Fall through. */
9122
9123 common_error:
9124 BFD_ASSERT (error_message != NULL);
9125 if (!((*info->callbacks->reloc_dangerous)
9126 (info, error_message, input_bfd, input_section,
9127 rel->r_offset)))
9128 return FALSE;
9129 break;
9130 }
9131 }
9132 }
9133
9134 return TRUE;
9135 }
9136
9137 /* Add a new unwind edit to the list described by HEAD, TAIL. If TINDEX is zero,
9138 adds the edit to the start of the list. (The list must be built in order of
9139 ascending TINDEX: the function's callers are primarily responsible for
9140 maintaining that condition). */
9141
9142 static void
9143 add_unwind_table_edit (arm_unwind_table_edit **head,
9144 arm_unwind_table_edit **tail,
9145 arm_unwind_edit_type type,
9146 asection *linked_section,
9147 unsigned int tindex)
9148 {
9149 arm_unwind_table_edit *new_edit = (arm_unwind_table_edit *)
9150 xmalloc (sizeof (arm_unwind_table_edit));
9151
9152 new_edit->type = type;
9153 new_edit->linked_section = linked_section;
9154 new_edit->index = tindex;
9155
9156 if (tindex > 0)
9157 {
9158 new_edit->next = NULL;
9159
9160 if (*tail)
9161 (*tail)->next = new_edit;
9162
9163 (*tail) = new_edit;
9164
9165 if (!*head)
9166 (*head) = new_edit;
9167 }
9168 else
9169 {
9170 new_edit->next = *head;
9171
9172 if (!*tail)
9173 *tail = new_edit;
9174
9175 *head = new_edit;
9176 }
9177 }
9178
9179 static _arm_elf_section_data *get_arm_elf_section_data (asection *);
9180
9181 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
9182 static void
9183 adjust_exidx_size(asection *exidx_sec, int adjust)
9184 {
9185 asection *out_sec;
9186
9187 if (!exidx_sec->rawsize)
9188 exidx_sec->rawsize = exidx_sec->size;
9189
9190 bfd_set_section_size (exidx_sec->owner, exidx_sec, exidx_sec->size + adjust);
9191 out_sec = exidx_sec->output_section;
9192 /* Adjust size of output section. */
9193 bfd_set_section_size (out_sec->owner, out_sec, out_sec->size +adjust);
9194 }
9195
9196 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
9197 static void
9198 insert_cantunwind_after(asection *text_sec, asection *exidx_sec)
9199 {
9200 struct _arm_elf_section_data *exidx_arm_data;
9201
9202 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
9203 add_unwind_table_edit (
9204 &exidx_arm_data->u.exidx.unwind_edit_list,
9205 &exidx_arm_data->u.exidx.unwind_edit_tail,
9206 INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
9207
9208 adjust_exidx_size(exidx_sec, 8);
9209 }
9210
9211 /* Scan .ARM.exidx tables, and create a list describing edits which should be
9212 made to those tables, such that:
9213
9214 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
9215 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
9216 codes which have been inlined into the index).
9217
9218 The edits are applied when the tables are written
9219 (in elf32_arm_write_section).
9220 */
9221
9222 bfd_boolean
9223 elf32_arm_fix_exidx_coverage (asection **text_section_order,
9224 unsigned int num_text_sections,
9225 struct bfd_link_info *info)
9226 {
9227 bfd *inp;
9228 unsigned int last_second_word = 0, i;
9229 asection *last_exidx_sec = NULL;
9230 asection *last_text_sec = NULL;
9231 int last_unwind_type = -1;
9232
9233 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
9234 text sections. */
9235 for (inp = info->input_bfds; inp != NULL; inp = inp->link_next)
9236 {
9237 asection *sec;
9238
9239 for (sec = inp->sections; sec != NULL; sec = sec->next)
9240 {
9241 struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
9242 Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
9243
9244 if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
9245 continue;
9246
9247 if (elf_sec->linked_to)
9248 {
9249 Elf_Internal_Shdr *linked_hdr
9250 = &elf_section_data (elf_sec->linked_to)->this_hdr;
9251 struct _arm_elf_section_data *linked_sec_arm_data
9252 = get_arm_elf_section_data (linked_hdr->bfd_section);
9253
9254 if (linked_sec_arm_data == NULL)
9255 continue;
9256
9257 /* Link this .ARM.exidx section back from the text section it
9258 describes. */
9259 linked_sec_arm_data->u.text.arm_exidx_sec = sec;
9260 }
9261 }
9262 }
9263
9264 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
9265 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
9266 and add EXIDX_CANTUNWIND entries for sections with no unwind table data. */
9267
9268 for (i = 0; i < num_text_sections; i++)
9269 {
9270 asection *sec = text_section_order[i];
9271 asection *exidx_sec;
9272 struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
9273 struct _arm_elf_section_data *exidx_arm_data;
9274 bfd_byte *contents = NULL;
9275 int deleted_exidx_bytes = 0;
9276 bfd_vma j;
9277 arm_unwind_table_edit *unwind_edit_head = NULL;
9278 arm_unwind_table_edit *unwind_edit_tail = NULL;
9279 Elf_Internal_Shdr *hdr;
9280 bfd *ibfd;
9281
9282 if (arm_data == NULL)
9283 continue;
9284
9285 exidx_sec = arm_data->u.text.arm_exidx_sec;
9286 if (exidx_sec == NULL)
9287 {
9288 /* Section has no unwind data. */
9289 if (last_unwind_type == 0 || !last_exidx_sec)
9290 continue;
9291
9292 /* Ignore zero sized sections. */
9293 if (sec->size == 0)
9294 continue;
9295
9296 insert_cantunwind_after(last_text_sec, last_exidx_sec);
9297 last_unwind_type = 0;
9298 continue;
9299 }
9300
9301 /* Skip /DISCARD/ sections. */
9302 if (bfd_is_abs_section (exidx_sec->output_section))
9303 continue;
9304
9305 hdr = &elf_section_data (exidx_sec)->this_hdr;
9306 if (hdr->sh_type != SHT_ARM_EXIDX)
9307 continue;
9308
9309 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
9310 if (exidx_arm_data == NULL)
9311 continue;
9312
9313 ibfd = exidx_sec->owner;
9314
9315 if (hdr->contents != NULL)
9316 contents = hdr->contents;
9317 else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
9318 /* An error? */
9319 continue;
9320
9321 for (j = 0; j < hdr->sh_size; j += 8)
9322 {
9323 unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
9324 int unwind_type;
9325 int elide = 0;
9326
9327 /* An EXIDX_CANTUNWIND entry. */
9328 if (second_word == 1)
9329 {
9330 if (last_unwind_type == 0)
9331 elide = 1;
9332 unwind_type = 0;
9333 }
9334 /* Inlined unwinding data. Merge if equal to previous. */
9335 else if ((second_word & 0x80000000) != 0)
9336 {
9337 if (last_second_word == second_word && last_unwind_type == 1)
9338 elide = 1;
9339 unwind_type = 1;
9340 last_second_word = second_word;
9341 }
9342 /* Normal table entry. In theory we could merge these too,
9343 but duplicate entries are likely to be much less common. */
9344 else
9345 unwind_type = 2;
9346
9347 if (elide)
9348 {
9349 add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
9350 DELETE_EXIDX_ENTRY, NULL, j / 8);
9351
9352 deleted_exidx_bytes += 8;
9353 }
9354
9355 last_unwind_type = unwind_type;
9356 }
9357
9358 /* Free contents if we allocated it ourselves. */
9359 if (contents != hdr->contents)
9360 free (contents);
9361
9362 /* Record edits to be applied later (in elf32_arm_write_section). */
9363 exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
9364 exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
9365
9366 if (deleted_exidx_bytes > 0)
9367 adjust_exidx_size(exidx_sec, -deleted_exidx_bytes);
9368
9369 last_exidx_sec = exidx_sec;
9370 last_text_sec = sec;
9371 }
9372
9373 /* Add terminating CANTUNWIND entry. */
9374 if (last_exidx_sec && last_unwind_type != 0)
9375 insert_cantunwind_after(last_text_sec, last_exidx_sec);
9376
9377 return TRUE;
9378 }
9379
9380 static bfd_boolean
9381 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
9382 bfd *ibfd, const char *name)
9383 {
9384 asection *sec, *osec;
9385
9386 sec = bfd_get_section_by_name (ibfd, name);
9387 if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
9388 return TRUE;
9389
9390 osec = sec->output_section;
9391 if (elf32_arm_write_section (obfd, info, sec, sec->contents))
9392 return TRUE;
9393
9394 if (! bfd_set_section_contents (obfd, osec, sec->contents,
9395 sec->output_offset, sec->size))
9396 return FALSE;
9397
9398 return TRUE;
9399 }
9400
9401 static bfd_boolean
9402 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
9403 {
9404 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
9405 asection *sec, *osec;
9406
9407 if (globals == NULL)
9408 return FALSE;
9409
9410 /* Invoke the regular ELF backend linker to do all the work. */
9411 if (!bfd_elf_final_link (abfd, info))
9412 return FALSE;
9413
9414 /* Process stub sections (eg BE8 encoding, ...). */
9415 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
9416 int i;
9417 for(i=0; i<htab->top_id; i++) {
9418 sec = htab->stub_group[i].stub_sec;
9419 if (sec) {
9420 osec = sec->output_section;
9421 elf32_arm_write_section (abfd, info, sec, sec->contents);
9422 if (! bfd_set_section_contents (abfd, osec, sec->contents,
9423 sec->output_offset, sec->size))
9424 return FALSE;
9425 }
9426 }
9427
9428 /* Write out any glue sections now that we have created all the
9429 stubs. */
9430 if (globals->bfd_of_glue_owner != NULL)
9431 {
9432 if (! elf32_arm_output_glue_section (info, abfd,
9433 globals->bfd_of_glue_owner,
9434 ARM2THUMB_GLUE_SECTION_NAME))
9435 return FALSE;
9436
9437 if (! elf32_arm_output_glue_section (info, abfd,
9438 globals->bfd_of_glue_owner,
9439 THUMB2ARM_GLUE_SECTION_NAME))
9440 return FALSE;
9441
9442 if (! elf32_arm_output_glue_section (info, abfd,
9443 globals->bfd_of_glue_owner,
9444 VFP11_ERRATUM_VENEER_SECTION_NAME))
9445 return FALSE;
9446
9447 if (! elf32_arm_output_glue_section (info, abfd,
9448 globals->bfd_of_glue_owner,
9449 ARM_BX_GLUE_SECTION_NAME))
9450 return FALSE;
9451 }
9452
9453 return TRUE;
9454 }
9455
9456 /* Set the right machine number. */
9457
9458 static bfd_boolean
9459 elf32_arm_object_p (bfd *abfd)
9460 {
9461 unsigned int mach;
9462
9463 mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
9464
9465 if (mach != bfd_mach_arm_unknown)
9466 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
9467
9468 else if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
9469 bfd_default_set_arch_mach (abfd, bfd_arch_arm, bfd_mach_arm_ep9312);
9470
9471 else
9472 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
9473
9474 return TRUE;
9475 }
9476
9477 /* Function to keep ARM specific flags in the ELF header. */
9478
9479 static bfd_boolean
9480 elf32_arm_set_private_flags (bfd *abfd, flagword flags)
9481 {
9482 if (elf_flags_init (abfd)
9483 && elf_elfheader (abfd)->e_flags != flags)
9484 {
9485 if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
9486 {
9487 if (flags & EF_ARM_INTERWORK)
9488 (*_bfd_error_handler)
9489 (_("Warning: Not setting interworking flag of %B since it has already been specified as non-interworking"),
9490 abfd);
9491 else
9492 _bfd_error_handler
9493 (_("Warning: Clearing the interworking flag of %B due to outside request"),
9494 abfd);
9495 }
9496 }
9497 else
9498 {
9499 elf_elfheader (abfd)->e_flags = flags;
9500 elf_flags_init (abfd) = TRUE;
9501 }
9502
9503 return TRUE;
9504 }
9505
9506 /* Copy backend specific data from one object module to another. */
9507
9508 static bfd_boolean
9509 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
9510 {
9511 flagword in_flags;
9512 flagword out_flags;
9513
9514 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
9515 return TRUE;
9516
9517 in_flags = elf_elfheader (ibfd)->e_flags;
9518 out_flags = elf_elfheader (obfd)->e_flags;
9519
9520 if (elf_flags_init (obfd)
9521 && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
9522 && in_flags != out_flags)
9523 {
9524 /* Cannot mix APCS26 and APCS32 code. */
9525 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
9526 return FALSE;
9527
9528 /* Cannot mix float APCS and non-float APCS code. */
9529 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
9530 return FALSE;
9531
9532 /* If the src and dest have different interworking flags
9533 then turn off the interworking bit. */
9534 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
9535 {
9536 if (out_flags & EF_ARM_INTERWORK)
9537 _bfd_error_handler
9538 (_("Warning: Clearing the interworking flag of %B because non-interworking code in %B has been linked with it"),
9539 obfd, ibfd);
9540
9541 in_flags &= ~EF_ARM_INTERWORK;
9542 }
9543
9544 /* Likewise for PIC, though don't warn for this case. */
9545 if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
9546 in_flags &= ~EF_ARM_PIC;
9547 }
9548
9549 elf_elfheader (obfd)->e_flags = in_flags;
9550 elf_flags_init (obfd) = TRUE;
9551
9552 /* Also copy the EI_OSABI field. */
9553 elf_elfheader (obfd)->e_ident[EI_OSABI] =
9554 elf_elfheader (ibfd)->e_ident[EI_OSABI];
9555
9556 /* Copy object attributes. */
9557 _bfd_elf_copy_obj_attributes (ibfd, obfd);
9558
9559 return TRUE;
9560 }
9561
9562 /* Values for Tag_ABI_PCS_R9_use. */
9563 enum
9564 {
9565 AEABI_R9_V6,
9566 AEABI_R9_SB,
9567 AEABI_R9_TLS,
9568 AEABI_R9_unused
9569 };
9570
9571 /* Values for Tag_ABI_PCS_RW_data. */
9572 enum
9573 {
9574 AEABI_PCS_RW_data_absolute,
9575 AEABI_PCS_RW_data_PCrel,
9576 AEABI_PCS_RW_data_SBrel,
9577 AEABI_PCS_RW_data_unused
9578 };
9579
9580 /* Values for Tag_ABI_enum_size. */
9581 enum
9582 {
9583 AEABI_enum_unused,
9584 AEABI_enum_short,
9585 AEABI_enum_wide,
9586 AEABI_enum_forced_wide
9587 };
9588
9589 /* Determine whether an object attribute tag takes an integer, a
9590 string or both. */
9591
9592 static int
9593 elf32_arm_obj_attrs_arg_type (int tag)
9594 {
9595 if (tag == Tag_compatibility)
9596 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
9597 else if (tag == Tag_nodefaults)
9598 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
9599 else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
9600 return ATTR_TYPE_FLAG_STR_VAL;
9601 else if (tag < 32)
9602 return ATTR_TYPE_FLAG_INT_VAL;
9603 else
9604 return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
9605 }
9606
9607 /* The ABI defines that Tag_conformance should be emitted first, and that
9608 Tag_nodefaults should be second (if either is defined). This sets those
9609 two positions, and bumps up the position of all the remaining tags to
9610 compensate. */
9611 static int
9612 elf32_arm_obj_attrs_order (int num)
9613 {
9614 if (num == 4)
9615 return Tag_conformance;
9616 if (num == 5)
9617 return Tag_nodefaults;
9618 if ((num - 2) < Tag_nodefaults)
9619 return num - 2;
9620 if ((num - 1) < Tag_conformance)
9621 return num - 1;
9622 return num;
9623 }
9624
9625 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
9626 Returns -1 if no architecture could be read. */
9627
9628 static int
9629 get_secondary_compatible_arch (bfd *abfd)
9630 {
9631 obj_attribute *attr =
9632 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
9633
9634 /* Note: the tag and its argument below are uleb128 values, though
9635 currently-defined values fit in one byte for each. */
9636 if (attr->s
9637 && attr->s[0] == Tag_CPU_arch
9638 && (attr->s[1] & 128) != 128
9639 && attr->s[2] == 0)
9640 return attr->s[1];
9641
9642 /* This tag is "safely ignorable", so don't complain if it looks funny. */
9643 return -1;
9644 }
9645
9646 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
9647 The tag is removed if ARCH is -1. */
9648
9649 static void
9650 set_secondary_compatible_arch (bfd *abfd, int arch)
9651 {
9652 obj_attribute *attr =
9653 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
9654
9655 if (arch == -1)
9656 {
9657 attr->s = NULL;
9658 return;
9659 }
9660
9661 /* Note: the tag and its argument below are uleb128 values, though
9662 currently-defined values fit in one byte for each. */
9663 if (!attr->s)
9664 attr->s = (char *) bfd_alloc (abfd, 3);
9665 attr->s[0] = Tag_CPU_arch;
9666 attr->s[1] = arch;
9667 attr->s[2] = '\0';
9668 }
9669
9670 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
9671 into account. */
9672
9673 static int
9674 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
9675 int newtag, int secondary_compat)
9676 {
9677 #define T(X) TAG_CPU_ARCH_##X
9678 int tagl, tagh, result;
9679 const int v6t2[] =
9680 {
9681 T(V6T2), /* PRE_V4. */
9682 T(V6T2), /* V4. */
9683 T(V6T2), /* V4T. */
9684 T(V6T2), /* V5T. */
9685 T(V6T2), /* V5TE. */
9686 T(V6T2), /* V5TEJ. */
9687 T(V6T2), /* V6. */
9688 T(V7), /* V6KZ. */
9689 T(V6T2) /* V6T2. */
9690 };
9691 const int v6k[] =
9692 {
9693 T(V6K), /* PRE_V4. */
9694 T(V6K), /* V4. */
9695 T(V6K), /* V4T. */
9696 T(V6K), /* V5T. */
9697 T(V6K), /* V5TE. */
9698 T(V6K), /* V5TEJ. */
9699 T(V6K), /* V6. */
9700 T(V6KZ), /* V6KZ. */
9701 T(V7), /* V6T2. */
9702 T(V6K) /* V6K. */
9703 };
9704 const int v7[] =
9705 {
9706 T(V7), /* PRE_V4. */
9707 T(V7), /* V4. */
9708 T(V7), /* V4T. */
9709 T(V7), /* V5T. */
9710 T(V7), /* V5TE. */
9711 T(V7), /* V5TEJ. */
9712 T(V7), /* V6. */
9713 T(V7), /* V6KZ. */
9714 T(V7), /* V6T2. */
9715 T(V7), /* V6K. */
9716 T(V7) /* V7. */
9717 };
9718 const int v6_m[] =
9719 {
9720 -1, /* PRE_V4. */
9721 -1, /* V4. */
9722 T(V6K), /* V4T. */
9723 T(V6K), /* V5T. */
9724 T(V6K), /* V5TE. */
9725 T(V6K), /* V5TEJ. */
9726 T(V6K), /* V6. */
9727 T(V6KZ), /* V6KZ. */
9728 T(V7), /* V6T2. */
9729 T(V6K), /* V6K. */
9730 T(V7), /* V7. */
9731 T(V6_M) /* V6_M. */
9732 };
9733 const int v6s_m[] =
9734 {
9735 -1, /* PRE_V4. */
9736 -1, /* V4. */
9737 T(V6K), /* V4T. */
9738 T(V6K), /* V5T. */
9739 T(V6K), /* V5TE. */
9740 T(V6K), /* V5TEJ. */
9741 T(V6K), /* V6. */
9742 T(V6KZ), /* V6KZ. */
9743 T(V7), /* V6T2. */
9744 T(V6K), /* V6K. */
9745 T(V7), /* V7. */
9746 T(V6S_M), /* V6_M. */
9747 T(V6S_M) /* V6S_M. */
9748 };
9749 const int v7e_m[] =
9750 {
9751 -1, /* PRE_V4. */
9752 -1, /* V4. */
9753 T(V7E_M), /* V4T. */
9754 T(V7E_M), /* V5T. */
9755 T(V7E_M), /* V5TE. */
9756 T(V7E_M), /* V5TEJ. */
9757 T(V7E_M), /* V6. */
9758 T(V7E_M), /* V6KZ. */
9759 T(V7E_M), /* V6T2. */
9760 T(V7E_M), /* V6K. */
9761 T(V7E_M), /* V7. */
9762 T(V7E_M), /* V6_M. */
9763 T(V7E_M), /* V6S_M. */
9764 T(V7E_M) /* V7E_M. */
9765 };
9766 const int v4t_plus_v6_m[] =
9767 {
9768 -1, /* PRE_V4. */
9769 -1, /* V4. */
9770 T(V4T), /* V4T. */
9771 T(V5T), /* V5T. */
9772 T(V5TE), /* V5TE. */
9773 T(V5TEJ), /* V5TEJ. */
9774 T(V6), /* V6. */
9775 T(V6KZ), /* V6KZ. */
9776 T(V6T2), /* V6T2. */
9777 T(V6K), /* V6K. */
9778 T(V7), /* V7. */
9779 T(V6_M), /* V6_M. */
9780 T(V6S_M), /* V6S_M. */
9781 T(V7E_M), /* V7E_M. */
9782 T(V4T_PLUS_V6_M) /* V4T plus V6_M. */
9783 };
9784 const int *comb[] =
9785 {
9786 v6t2,
9787 v6k,
9788 v7,
9789 v6_m,
9790 v6s_m,
9791 v7e_m,
9792 /* Pseudo-architecture. */
9793 v4t_plus_v6_m
9794 };
9795
9796 /* Check we've not got a higher architecture than we know about. */
9797
9798 if (oldtag > MAX_TAG_CPU_ARCH || newtag > MAX_TAG_CPU_ARCH)
9799 {
9800 _bfd_error_handler (_("error: %B: Unknown CPU architecture"), ibfd);
9801 return -1;
9802 }
9803
9804 /* Override old tag if we have a Tag_also_compatible_with on the output. */
9805
9806 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
9807 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
9808 oldtag = T(V4T_PLUS_V6_M);
9809
9810 /* And override the new tag if we have a Tag_also_compatible_with on the
9811 input. */
9812
9813 if ((newtag == T(V6_M) && secondary_compat == T(V4T))
9814 || (newtag == T(V4T) && secondary_compat == T(V6_M)))
9815 newtag = T(V4T_PLUS_V6_M);
9816
9817 tagl = (oldtag < newtag) ? oldtag : newtag;
9818 result = tagh = (oldtag > newtag) ? oldtag : newtag;
9819
9820 /* Architectures before V6KZ add features monotonically. */
9821 if (tagh <= TAG_CPU_ARCH_V6KZ)
9822 return result;
9823
9824 result = comb[tagh - T(V6T2)][tagl];
9825
9826 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
9827 as the canonical version. */
9828 if (result == T(V4T_PLUS_V6_M))
9829 {
9830 result = T(V4T);
9831 *secondary_compat_out = T(V6_M);
9832 }
9833 else
9834 *secondary_compat_out = -1;
9835
9836 if (result == -1)
9837 {
9838 _bfd_error_handler (_("error: %B: Conflicting CPU architectures %d/%d"),
9839 ibfd, oldtag, newtag);
9840 return -1;
9841 }
9842
9843 return result;
9844 #undef T
9845 }
9846
9847 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
9848 are conflicting attributes. */
9849
9850 static bfd_boolean
9851 elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
9852 {
9853 obj_attribute *in_attr;
9854 obj_attribute *out_attr;
9855 obj_attribute_list *in_list;
9856 obj_attribute_list *out_list;
9857 obj_attribute_list **out_listp;
9858 /* Some tags have 0 = don't care, 1 = strong requirement,
9859 2 = weak requirement. */
9860 static const int order_021[3] = {0, 2, 1};
9861 int i;
9862 bfd_boolean result = TRUE;
9863
9864 /* Skip the linker stubs file. This preserves previous behavior
9865 of accepting unknown attributes in the first input file - but
9866 is that a bug? */
9867 if (ibfd->flags & BFD_LINKER_CREATED)
9868 return TRUE;
9869
9870 if (!elf_known_obj_attributes_proc (obfd)[0].i)
9871 {
9872 /* This is the first object. Copy the attributes. */
9873 _bfd_elf_copy_obj_attributes (ibfd, obfd);
9874
9875 out_attr = elf_known_obj_attributes_proc (obfd);
9876
9877 /* Use the Tag_null value to indicate the attributes have been
9878 initialized. */
9879 out_attr[0].i = 1;
9880
9881 /* We do not output objects with Tag_MPextension_use_legacy - we move
9882 the attribute's value to Tag_MPextension_use. */
9883 if (out_attr[Tag_MPextension_use_legacy].i != 0)
9884 {
9885 if (out_attr[Tag_MPextension_use].i != 0
9886 && out_attr[Tag_MPextension_use_legacy].i
9887 != out_attr[Tag_MPextension_use].i)
9888 {
9889 _bfd_error_handler
9890 (_("Error: %B has both the current and legacy "
9891 "Tag_MPextension_use attributes"), ibfd);
9892 result = FALSE;
9893 }
9894
9895 out_attr[Tag_MPextension_use] =
9896 out_attr[Tag_MPextension_use_legacy];
9897 out_attr[Tag_MPextension_use_legacy].type = 0;
9898 out_attr[Tag_MPextension_use_legacy].i = 0;
9899 }
9900
9901 return result;
9902 }
9903
9904 in_attr = elf_known_obj_attributes_proc (ibfd);
9905 out_attr = elf_known_obj_attributes_proc (obfd);
9906 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
9907 if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
9908 {
9909 /* Ignore mismatches if the object doesn't use floating point. */
9910 if (out_attr[Tag_ABI_FP_number_model].i == 0)
9911 out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
9912 else if (in_attr[Tag_ABI_FP_number_model].i != 0)
9913 {
9914 _bfd_error_handler
9915 (_("error: %B uses VFP register arguments, %B does not"),
9916 in_attr[Tag_ABI_VFP_args].i ? ibfd : obfd,
9917 in_attr[Tag_ABI_VFP_args].i ? obfd : ibfd);
9918 result = FALSE;
9919 }
9920 }
9921
9922 for (i = 4; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
9923 {
9924 /* Merge this attribute with existing attributes. */
9925 switch (i)
9926 {
9927 case Tag_CPU_raw_name:
9928 case Tag_CPU_name:
9929 /* These are merged after Tag_CPU_arch. */
9930 break;
9931
9932 case Tag_ABI_optimization_goals:
9933 case Tag_ABI_FP_optimization_goals:
9934 /* Use the first value seen. */
9935 break;
9936
9937 case Tag_CPU_arch:
9938 {
9939 int secondary_compat = -1, secondary_compat_out = -1;
9940 unsigned int saved_out_attr = out_attr[i].i;
9941 static const char *name_table[] = {
9942 /* These aren't real CPU names, but we can't guess
9943 that from the architecture version alone. */
9944 "Pre v4",
9945 "ARM v4",
9946 "ARM v4T",
9947 "ARM v5T",
9948 "ARM v5TE",
9949 "ARM v5TEJ",
9950 "ARM v6",
9951 "ARM v6KZ",
9952 "ARM v6T2",
9953 "ARM v6K",
9954 "ARM v7",
9955 "ARM v6-M",
9956 "ARM v6S-M"
9957 };
9958
9959 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
9960 secondary_compat = get_secondary_compatible_arch (ibfd);
9961 secondary_compat_out = get_secondary_compatible_arch (obfd);
9962 out_attr[i].i = tag_cpu_arch_combine (ibfd, out_attr[i].i,
9963 &secondary_compat_out,
9964 in_attr[i].i,
9965 secondary_compat);
9966 set_secondary_compatible_arch (obfd, secondary_compat_out);
9967
9968 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
9969 if (out_attr[i].i == saved_out_attr)
9970 ; /* Leave the names alone. */
9971 else if (out_attr[i].i == in_attr[i].i)
9972 {
9973 /* The output architecture has been changed to match the
9974 input architecture. Use the input names. */
9975 out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
9976 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
9977 : NULL;
9978 out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
9979 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
9980 : NULL;
9981 }
9982 else
9983 {
9984 out_attr[Tag_CPU_name].s = NULL;
9985 out_attr[Tag_CPU_raw_name].s = NULL;
9986 }
9987
9988 /* If we still don't have a value for Tag_CPU_name,
9989 make one up now. Tag_CPU_raw_name remains blank. */
9990 if (out_attr[Tag_CPU_name].s == NULL
9991 && out_attr[i].i < ARRAY_SIZE (name_table))
9992 out_attr[Tag_CPU_name].s =
9993 _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
9994 }
9995 break;
9996
9997 case Tag_ARM_ISA_use:
9998 case Tag_THUMB_ISA_use:
9999 case Tag_WMMX_arch:
10000 case Tag_Advanced_SIMD_arch:
10001 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
10002 case Tag_ABI_FP_rounding:
10003 case Tag_ABI_FP_exceptions:
10004 case Tag_ABI_FP_user_exceptions:
10005 case Tag_ABI_FP_number_model:
10006 case Tag_VFP_HP_extension:
10007 case Tag_CPU_unaligned_access:
10008 case Tag_T2EE_use:
10009 case Tag_Virtualization_use:
10010 case Tag_MPextension_use:
10011 /* Use the largest value specified. */
10012 if (in_attr[i].i > out_attr[i].i)
10013 out_attr[i].i = in_attr[i].i;
10014 break;
10015
10016 case Tag_ABI_align8_preserved:
10017 case Tag_ABI_PCS_RO_data:
10018 /* Use the smallest value specified. */
10019 if (in_attr[i].i < out_attr[i].i)
10020 out_attr[i].i = in_attr[i].i;
10021 break;
10022
10023 case Tag_ABI_align8_needed:
10024 if ((in_attr[i].i > 0 || out_attr[i].i > 0)
10025 && (in_attr[Tag_ABI_align8_preserved].i == 0
10026 || out_attr[Tag_ABI_align8_preserved].i == 0))
10027 {
10028 /* This error message should be enabled once all non-conformant
10029 binaries in the toolchain have had the attributes set
10030 properly.
10031 _bfd_error_handler
10032 (_("error: %B: 8-byte data alignment conflicts with %B"),
10033 obfd, ibfd);
10034 result = FALSE; */
10035 }
10036 /* Fall through. */
10037 case Tag_ABI_FP_denormal:
10038 case Tag_ABI_PCS_GOT_use:
10039 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
10040 value if greater than 2 (for future-proofing). */
10041 if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
10042 || (in_attr[i].i <= 2 && out_attr[i].i <= 2
10043 && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
10044 out_attr[i].i = in_attr[i].i;
10045 break;
10046
10047
10048 case Tag_CPU_arch_profile:
10049 if (out_attr[i].i != in_attr[i].i)
10050 {
10051 /* 0 will merge with anything.
10052 'A' and 'S' merge to 'A'.
10053 'R' and 'S' merge to 'R'.
10054 'M' and 'A|R|S' is an error. */
10055 if (out_attr[i].i == 0
10056 || (out_attr[i].i == 'S'
10057 && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
10058 out_attr[i].i = in_attr[i].i;
10059 else if (in_attr[i].i == 0
10060 || (in_attr[i].i == 'S'
10061 && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
10062 ; /* Do nothing. */
10063 else
10064 {
10065 _bfd_error_handler
10066 (_("error: %B: Conflicting architecture profiles %c/%c"),
10067 ibfd,
10068 in_attr[i].i ? in_attr[i].i : '0',
10069 out_attr[i].i ? out_attr[i].i : '0');
10070 result = FALSE;
10071 }
10072 }
10073 break;
10074 case Tag_VFP_arch:
10075 {
10076 static const struct
10077 {
10078 int ver;
10079 int regs;
10080 } vfp_versions[7] =
10081 {
10082 {0, 0},
10083 {1, 16},
10084 {2, 16},
10085 {3, 32},
10086 {3, 16},
10087 {4, 32},
10088 {4, 16}
10089 };
10090 int ver;
10091 int regs;
10092 int newval;
10093
10094 /* Values greater than 6 aren't defined, so just pick the
10095 biggest */
10096 if (in_attr[i].i > 6 && in_attr[i].i > out_attr[i].i)
10097 {
10098 out_attr[i] = in_attr[i];
10099 break;
10100 }
10101 /* The output uses the superset of input features
10102 (ISA version) and registers. */
10103 ver = vfp_versions[in_attr[i].i].ver;
10104 if (ver < vfp_versions[out_attr[i].i].ver)
10105 ver = vfp_versions[out_attr[i].i].ver;
10106 regs = vfp_versions[in_attr[i].i].regs;
10107 if (regs < vfp_versions[out_attr[i].i].regs)
10108 regs = vfp_versions[out_attr[i].i].regs;
10109 /* This assumes all possible supersets are also a valid
10110 options. */
10111 for (newval = 6; newval > 0; newval--)
10112 {
10113 if (regs == vfp_versions[newval].regs
10114 && ver == vfp_versions[newval].ver)
10115 break;
10116 }
10117 out_attr[i].i = newval;
10118 }
10119 break;
10120 case Tag_PCS_config:
10121 if (out_attr[i].i == 0)
10122 out_attr[i].i = in_attr[i].i;
10123 else if (in_attr[i].i != 0 && out_attr[i].i != 0)
10124 {
10125 /* It's sometimes ok to mix different configs, so this is only
10126 a warning. */
10127 _bfd_error_handler
10128 (_("Warning: %B: Conflicting platform configuration"), ibfd);
10129 }
10130 break;
10131 case Tag_ABI_PCS_R9_use:
10132 if (in_attr[i].i != out_attr[i].i
10133 && out_attr[i].i != AEABI_R9_unused
10134 && in_attr[i].i != AEABI_R9_unused)
10135 {
10136 _bfd_error_handler
10137 (_("error: %B: Conflicting use of R9"), ibfd);
10138 result = FALSE;
10139 }
10140 if (out_attr[i].i == AEABI_R9_unused)
10141 out_attr[i].i = in_attr[i].i;
10142 break;
10143 case Tag_ABI_PCS_RW_data:
10144 if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
10145 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
10146 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
10147 {
10148 _bfd_error_handler
10149 (_("error: %B: SB relative addressing conflicts with use of R9"),
10150 ibfd);
10151 result = FALSE;
10152 }
10153 /* Use the smallest value specified. */
10154 if (in_attr[i].i < out_attr[i].i)
10155 out_attr[i].i = in_attr[i].i;
10156 break;
10157 case Tag_ABI_PCS_wchar_t:
10158 if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
10159 && !elf_arm_tdata (obfd)->no_wchar_size_warning)
10160 {
10161 _bfd_error_handler
10162 (_("warning: %B uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
10163 ibfd, in_attr[i].i, out_attr[i].i);
10164 }
10165 else if (in_attr[i].i && !out_attr[i].i)
10166 out_attr[i].i = in_attr[i].i;
10167 break;
10168 case Tag_ABI_enum_size:
10169 if (in_attr[i].i != AEABI_enum_unused)
10170 {
10171 if (out_attr[i].i == AEABI_enum_unused
10172 || out_attr[i].i == AEABI_enum_forced_wide)
10173 {
10174 /* The existing object is compatible with anything.
10175 Use whatever requirements the new object has. */
10176 out_attr[i].i = in_attr[i].i;
10177 }
10178 else if (in_attr[i].i != AEABI_enum_forced_wide
10179 && out_attr[i].i != in_attr[i].i
10180 && !elf_arm_tdata (obfd)->no_enum_size_warning)
10181 {
10182 static const char *aeabi_enum_names[] =
10183 { "", "variable-size", "32-bit", "" };
10184 const char *in_name =
10185 in_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
10186 ? aeabi_enum_names[in_attr[i].i]
10187 : "<unknown>";
10188 const char *out_name =
10189 out_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
10190 ? aeabi_enum_names[out_attr[i].i]
10191 : "<unknown>";
10192 _bfd_error_handler
10193 (_("warning: %B uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
10194 ibfd, in_name, out_name);
10195 }
10196 }
10197 break;
10198 case Tag_ABI_VFP_args:
10199 /* Aready done. */
10200 break;
10201 case Tag_ABI_WMMX_args:
10202 if (in_attr[i].i != out_attr[i].i)
10203 {
10204 _bfd_error_handler
10205 (_("error: %B uses iWMMXt register arguments, %B does not"),
10206 ibfd, obfd);
10207 result = FALSE;
10208 }
10209 break;
10210 case Tag_compatibility:
10211 /* Merged in target-independent code. */
10212 break;
10213 case Tag_ABI_HardFP_use:
10214 /* 1 (SP) and 2 (DP) conflict, so combine to 3 (SP & DP). */
10215 if ((in_attr[i].i == 1 && out_attr[i].i == 2)
10216 || (in_attr[i].i == 2 && out_attr[i].i == 1))
10217 out_attr[i].i = 3;
10218 else if (in_attr[i].i > out_attr[i].i)
10219 out_attr[i].i = in_attr[i].i;
10220 break;
10221 case Tag_ABI_FP_16bit_format:
10222 if (in_attr[i].i != 0 && out_attr[i].i != 0)
10223 {
10224 if (in_attr[i].i != out_attr[i].i)
10225 {
10226 _bfd_error_handler
10227 (_("error: fp16 format mismatch between %B and %B"),
10228 ibfd, obfd);
10229 result = FALSE;
10230 }
10231 }
10232 if (in_attr[i].i != 0)
10233 out_attr[i].i = in_attr[i].i;
10234 break;
10235
10236 case Tag_DIV_use:
10237 /* This tag is set to zero if we can use UDIV and SDIV in Thumb
10238 mode on a v7-M or v7-R CPU; to one if we can not use UDIV or
10239 SDIV at all; and to two if we can use UDIV or SDIV on a v7-A
10240 CPU. We will merge as follows: If the input attribute's value
10241 is one then the output attribute's value remains unchanged. If
10242 the input attribute's value is zero or two then if the output
10243 attribute's value is one the output value is set to the input
10244 value, otherwise the output value must be the same as the
10245 inputs. */
10246 if (in_attr[i].i != 1 && out_attr[i].i != 1)
10247 {
10248 if (in_attr[i].i != out_attr[i].i)
10249 {
10250 _bfd_error_handler
10251 (_("DIV usage mismatch between %B and %B"),
10252 ibfd, obfd);
10253 result = FALSE;
10254 }
10255 }
10256
10257 if (in_attr[i].i != 1)
10258 out_attr[i].i = in_attr[i].i;
10259
10260 break;
10261
10262 case Tag_MPextension_use_legacy:
10263 /* We don't output objects with Tag_MPextension_use_legacy - we
10264 move the value to Tag_MPextension_use. */
10265 if (in_attr[i].i != 0 && in_attr[Tag_MPextension_use].i != 0)
10266 {
10267 if (in_attr[Tag_MPextension_use].i != in_attr[i].i)
10268 {
10269 _bfd_error_handler
10270 (_("%B has has both the current and legacy "
10271 "Tag_MPextension_use attributes"),
10272 ibfd);
10273 result = FALSE;
10274 }
10275 }
10276
10277 if (in_attr[i].i > out_attr[Tag_MPextension_use].i)
10278 out_attr[Tag_MPextension_use] = in_attr[i];
10279
10280 break;
10281
10282 case Tag_nodefaults:
10283 /* This tag is set if it exists, but the value is unused (and is
10284 typically zero). We don't actually need to do anything here -
10285 the merge happens automatically when the type flags are merged
10286 below. */
10287 break;
10288 case Tag_also_compatible_with:
10289 /* Already done in Tag_CPU_arch. */
10290 break;
10291 case Tag_conformance:
10292 /* Keep the attribute if it matches. Throw it away otherwise.
10293 No attribute means no claim to conform. */
10294 if (!in_attr[i].s || !out_attr[i].s
10295 || strcmp (in_attr[i].s, out_attr[i].s) != 0)
10296 out_attr[i].s = NULL;
10297 break;
10298
10299 default:
10300 {
10301 bfd *err_bfd = NULL;
10302
10303 /* The "known_obj_attributes" table does contain some undefined
10304 attributes. Ensure that there are unused. */
10305 if (out_attr[i].i != 0 || out_attr[i].s != NULL)
10306 err_bfd = obfd;
10307 else if (in_attr[i].i != 0 || in_attr[i].s != NULL)
10308 err_bfd = ibfd;
10309
10310 if (err_bfd != NULL)
10311 {
10312 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
10313 if ((i & 127) < 64)
10314 {
10315 _bfd_error_handler
10316 (_("%B: Unknown mandatory EABI object attribute %d"),
10317 err_bfd, i);
10318 bfd_set_error (bfd_error_bad_value);
10319 result = FALSE;
10320 }
10321 else
10322 {
10323 _bfd_error_handler
10324 (_("Warning: %B: Unknown EABI object attribute %d"),
10325 err_bfd, i);
10326 }
10327 }
10328
10329 /* Only pass on attributes that match in both inputs. */
10330 if (in_attr[i].i != out_attr[i].i
10331 || in_attr[i].s != out_attr[i].s
10332 || (in_attr[i].s != NULL && out_attr[i].s != NULL
10333 && strcmp (in_attr[i].s, out_attr[i].s) != 0))
10334 {
10335 out_attr[i].i = 0;
10336 out_attr[i].s = NULL;
10337 }
10338 }
10339 }
10340
10341 /* If out_attr was copied from in_attr then it won't have a type yet. */
10342 if (in_attr[i].type && !out_attr[i].type)
10343 out_attr[i].type = in_attr[i].type;
10344 }
10345
10346 /* Merge Tag_compatibility attributes and any common GNU ones. */
10347 if (!_bfd_elf_merge_object_attributes (ibfd, obfd))
10348 return FALSE;
10349
10350 /* Check for any attributes not known on ARM. */
10351 in_list = elf_other_obj_attributes_proc (ibfd);
10352 out_listp = &elf_other_obj_attributes_proc (obfd);
10353 out_list = *out_listp;
10354
10355 for (; in_list || out_list; )
10356 {
10357 bfd *err_bfd = NULL;
10358 int err_tag = 0;
10359
10360 /* The tags for each list are in numerical order. */
10361 /* If the tags are equal, then merge. */
10362 if (out_list && (!in_list || in_list->tag > out_list->tag))
10363 {
10364 /* This attribute only exists in obfd. We can't merge, and we don't
10365 know what the tag means, so delete it. */
10366 err_bfd = obfd;
10367 err_tag = out_list->tag;
10368 *out_listp = out_list->next;
10369 out_list = *out_listp;
10370 }
10371 else if (in_list && (!out_list || in_list->tag < out_list->tag))
10372 {
10373 /* This attribute only exists in ibfd. We can't merge, and we don't
10374 know what the tag means, so ignore it. */
10375 err_bfd = ibfd;
10376 err_tag = in_list->tag;
10377 in_list = in_list->next;
10378 }
10379 else /* The tags are equal. */
10380 {
10381 /* As present, all attributes in the list are unknown, and
10382 therefore can't be merged meaningfully. */
10383 err_bfd = obfd;
10384 err_tag = out_list->tag;
10385
10386 /* Only pass on attributes that match in both inputs. */
10387 if (in_list->attr.i != out_list->attr.i
10388 || in_list->attr.s != out_list->attr.s
10389 || (in_list->attr.s && out_list->attr.s
10390 && strcmp (in_list->attr.s, out_list->attr.s) != 0))
10391 {
10392 /* No match. Delete the attribute. */
10393 *out_listp = out_list->next;
10394 out_list = *out_listp;
10395 }
10396 else
10397 {
10398 /* Matched. Keep the attribute and move to the next. */
10399 out_list = out_list->next;
10400 in_list = in_list->next;
10401 }
10402 }
10403
10404 if (err_bfd)
10405 {
10406 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
10407 if ((err_tag & 127) < 64)
10408 {
10409 _bfd_error_handler
10410 (_("%B: Unknown mandatory EABI object attribute %d"),
10411 err_bfd, err_tag);
10412 bfd_set_error (bfd_error_bad_value);
10413 result = FALSE;
10414 }
10415 else
10416 {
10417 _bfd_error_handler
10418 (_("Warning: %B: Unknown EABI object attribute %d"),
10419 err_bfd, err_tag);
10420 }
10421 }
10422 }
10423 return result;
10424 }
10425
10426
10427 /* Return TRUE if the two EABI versions are incompatible. */
10428
10429 static bfd_boolean
10430 elf32_arm_versions_compatible (unsigned iver, unsigned over)
10431 {
10432 /* v4 and v5 are the same spec before and after it was released,
10433 so allow mixing them. */
10434 if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
10435 || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
10436 return TRUE;
10437
10438 return (iver == over);
10439 }
10440
10441 /* Merge backend specific data from an object file to the output
10442 object file when linking. */
10443
10444 static bfd_boolean
10445 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd);
10446
10447 /* Display the flags field. */
10448
10449 static bfd_boolean
10450 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
10451 {
10452 FILE * file = (FILE *) ptr;
10453 unsigned long flags;
10454
10455 BFD_ASSERT (abfd != NULL && ptr != NULL);
10456
10457 /* Print normal ELF private data. */
10458 _bfd_elf_print_private_bfd_data (abfd, ptr);
10459
10460 flags = elf_elfheader (abfd)->e_flags;
10461 /* Ignore init flag - it may not be set, despite the flags field
10462 containing valid data. */
10463
10464 /* xgettext:c-format */
10465 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
10466
10467 switch (EF_ARM_EABI_VERSION (flags))
10468 {
10469 case EF_ARM_EABI_UNKNOWN:
10470 /* The following flag bits are GNU extensions and not part of the
10471 official ARM ELF extended ABI. Hence they are only decoded if
10472 the EABI version is not set. */
10473 if (flags & EF_ARM_INTERWORK)
10474 fprintf (file, _(" [interworking enabled]"));
10475
10476 if (flags & EF_ARM_APCS_26)
10477 fprintf (file, " [APCS-26]");
10478 else
10479 fprintf (file, " [APCS-32]");
10480
10481 if (flags & EF_ARM_VFP_FLOAT)
10482 fprintf (file, _(" [VFP float format]"));
10483 else if (flags & EF_ARM_MAVERICK_FLOAT)
10484 fprintf (file, _(" [Maverick float format]"));
10485 else
10486 fprintf (file, _(" [FPA float format]"));
10487
10488 if (flags & EF_ARM_APCS_FLOAT)
10489 fprintf (file, _(" [floats passed in float registers]"));
10490
10491 if (flags & EF_ARM_PIC)
10492 fprintf (file, _(" [position independent]"));
10493
10494 if (flags & EF_ARM_NEW_ABI)
10495 fprintf (file, _(" [new ABI]"));
10496
10497 if (flags & EF_ARM_OLD_ABI)
10498 fprintf (file, _(" [old ABI]"));
10499
10500 if (flags & EF_ARM_SOFT_FLOAT)
10501 fprintf (file, _(" [software FP]"));
10502
10503 flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
10504 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
10505 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
10506 | EF_ARM_MAVERICK_FLOAT);
10507 break;
10508
10509 case EF_ARM_EABI_VER1:
10510 fprintf (file, _(" [Version1 EABI]"));
10511
10512 if (flags & EF_ARM_SYMSARESORTED)
10513 fprintf (file, _(" [sorted symbol table]"));
10514 else
10515 fprintf (file, _(" [unsorted symbol table]"));
10516
10517 flags &= ~ EF_ARM_SYMSARESORTED;
10518 break;
10519
10520 case EF_ARM_EABI_VER2:
10521 fprintf (file, _(" [Version2 EABI]"));
10522
10523 if (flags & EF_ARM_SYMSARESORTED)
10524 fprintf (file, _(" [sorted symbol table]"));
10525 else
10526 fprintf (file, _(" [unsorted symbol table]"));
10527
10528 if (flags & EF_ARM_DYNSYMSUSESEGIDX)
10529 fprintf (file, _(" [dynamic symbols use segment index]"));
10530
10531 if (flags & EF_ARM_MAPSYMSFIRST)
10532 fprintf (file, _(" [mapping symbols precede others]"));
10533
10534 flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
10535 | EF_ARM_MAPSYMSFIRST);
10536 break;
10537
10538 case EF_ARM_EABI_VER3:
10539 fprintf (file, _(" [Version3 EABI]"));
10540 break;
10541
10542 case EF_ARM_EABI_VER4:
10543 fprintf (file, _(" [Version4 EABI]"));
10544 goto eabi;
10545
10546 case EF_ARM_EABI_VER5:
10547 fprintf (file, _(" [Version5 EABI]"));
10548 eabi:
10549 if (flags & EF_ARM_BE8)
10550 fprintf (file, _(" [BE8]"));
10551
10552 if (flags & EF_ARM_LE8)
10553 fprintf (file, _(" [LE8]"));
10554
10555 flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
10556 break;
10557
10558 default:
10559 fprintf (file, _(" <EABI version unrecognised>"));
10560 break;
10561 }
10562
10563 flags &= ~ EF_ARM_EABIMASK;
10564
10565 if (flags & EF_ARM_RELEXEC)
10566 fprintf (file, _(" [relocatable executable]"));
10567
10568 if (flags & EF_ARM_HASENTRY)
10569 fprintf (file, _(" [has entry point]"));
10570
10571 flags &= ~ (EF_ARM_RELEXEC | EF_ARM_HASENTRY);
10572
10573 if (flags)
10574 fprintf (file, _("<Unrecognised flag bits set>"));
10575
10576 fputc ('\n', file);
10577
10578 return TRUE;
10579 }
10580
10581 static int
10582 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
10583 {
10584 switch (ELF_ST_TYPE (elf_sym->st_info))
10585 {
10586 case STT_ARM_TFUNC:
10587 return ELF_ST_TYPE (elf_sym->st_info);
10588
10589 case STT_ARM_16BIT:
10590 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
10591 This allows us to distinguish between data used by Thumb instructions
10592 and non-data (which is probably code) inside Thumb regions of an
10593 executable. */
10594 if (type != STT_OBJECT && type != STT_TLS)
10595 return ELF_ST_TYPE (elf_sym->st_info);
10596 break;
10597
10598 default:
10599 break;
10600 }
10601
10602 return type;
10603 }
10604
10605 static asection *
10606 elf32_arm_gc_mark_hook (asection *sec,
10607 struct bfd_link_info *info,
10608 Elf_Internal_Rela *rel,
10609 struct elf_link_hash_entry *h,
10610 Elf_Internal_Sym *sym)
10611 {
10612 if (h != NULL)
10613 switch (ELF32_R_TYPE (rel->r_info))
10614 {
10615 case R_ARM_GNU_VTINHERIT:
10616 case R_ARM_GNU_VTENTRY:
10617 return NULL;
10618 }
10619
10620 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
10621 }
10622
10623 /* Update the got entry reference counts for the section being removed. */
10624
10625 static bfd_boolean
10626 elf32_arm_gc_sweep_hook (bfd * abfd,
10627 struct bfd_link_info * info,
10628 asection * sec,
10629 const Elf_Internal_Rela * relocs)
10630 {
10631 Elf_Internal_Shdr *symtab_hdr;
10632 struct elf_link_hash_entry **sym_hashes;
10633 bfd_signed_vma *local_got_refcounts;
10634 const Elf_Internal_Rela *rel, *relend;
10635 struct elf32_arm_link_hash_table * globals;
10636
10637 if (info->relocatable)
10638 return TRUE;
10639
10640 globals = elf32_arm_hash_table (info);
10641 if (globals == NULL)
10642 return FALSE;
10643
10644 elf_section_data (sec)->local_dynrel = NULL;
10645
10646 symtab_hdr = & elf_symtab_hdr (abfd);
10647 sym_hashes = elf_sym_hashes (abfd);
10648 local_got_refcounts = elf_local_got_refcounts (abfd);
10649
10650 check_use_blx (globals);
10651
10652 relend = relocs + sec->reloc_count;
10653 for (rel = relocs; rel < relend; rel++)
10654 {
10655 unsigned long r_symndx;
10656 struct elf_link_hash_entry *h = NULL;
10657 int r_type;
10658
10659 r_symndx = ELF32_R_SYM (rel->r_info);
10660 if (r_symndx >= symtab_hdr->sh_info)
10661 {
10662 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
10663 while (h->root.type == bfd_link_hash_indirect
10664 || h->root.type == bfd_link_hash_warning)
10665 h = (struct elf_link_hash_entry *) h->root.u.i.link;
10666 }
10667
10668 r_type = ELF32_R_TYPE (rel->r_info);
10669 r_type = arm_real_reloc_type (globals, r_type);
10670 switch (r_type)
10671 {
10672 case R_ARM_GOT32:
10673 case R_ARM_GOT_PREL:
10674 case R_ARM_TLS_GD32:
10675 case R_ARM_TLS_IE32:
10676 if (h != NULL)
10677 {
10678 if (h->got.refcount > 0)
10679 h->got.refcount -= 1;
10680 }
10681 else if (local_got_refcounts != NULL)
10682 {
10683 if (local_got_refcounts[r_symndx] > 0)
10684 local_got_refcounts[r_symndx] -= 1;
10685 }
10686 break;
10687
10688 case R_ARM_TLS_LDM32:
10689 globals->tls_ldm_got.refcount -= 1;
10690 break;
10691
10692 case R_ARM_ABS32:
10693 case R_ARM_ABS32_NOI:
10694 case R_ARM_REL32:
10695 case R_ARM_REL32_NOI:
10696 case R_ARM_PC24:
10697 case R_ARM_PLT32:
10698 case R_ARM_CALL:
10699 case R_ARM_JUMP24:
10700 case R_ARM_PREL31:
10701 case R_ARM_THM_CALL:
10702 case R_ARM_THM_JUMP24:
10703 case R_ARM_THM_JUMP19:
10704 case R_ARM_MOVW_ABS_NC:
10705 case R_ARM_MOVT_ABS:
10706 case R_ARM_MOVW_PREL_NC:
10707 case R_ARM_MOVT_PREL:
10708 case R_ARM_THM_MOVW_ABS_NC:
10709 case R_ARM_THM_MOVT_ABS:
10710 case R_ARM_THM_MOVW_PREL_NC:
10711 case R_ARM_THM_MOVT_PREL:
10712 /* Should the interworking branches be here also? */
10713
10714 if (h != NULL)
10715 {
10716 struct elf32_arm_link_hash_entry *eh;
10717 struct elf32_arm_relocs_copied **pp;
10718 struct elf32_arm_relocs_copied *p;
10719
10720 eh = (struct elf32_arm_link_hash_entry *) h;
10721
10722 if (h->plt.refcount > 0)
10723 {
10724 h->plt.refcount -= 1;
10725 if (r_type == R_ARM_THM_CALL)
10726 eh->plt_maybe_thumb_refcount--;
10727
10728 if (r_type == R_ARM_THM_JUMP24
10729 || r_type == R_ARM_THM_JUMP19)
10730 eh->plt_thumb_refcount--;
10731 }
10732
10733 if (r_type == R_ARM_ABS32
10734 || r_type == R_ARM_REL32
10735 || r_type == R_ARM_ABS32_NOI
10736 || r_type == R_ARM_REL32_NOI)
10737 {
10738 for (pp = &eh->relocs_copied; (p = *pp) != NULL;
10739 pp = &p->next)
10740 if (p->section == sec)
10741 {
10742 p->count -= 1;
10743 if (ELF32_R_TYPE (rel->r_info) == R_ARM_REL32
10744 || ELF32_R_TYPE (rel->r_info) == R_ARM_REL32_NOI)
10745 p->pc_count -= 1;
10746 if (p->count == 0)
10747 *pp = p->next;
10748 break;
10749 }
10750 }
10751 }
10752 break;
10753
10754 default:
10755 break;
10756 }
10757 }
10758
10759 return TRUE;
10760 }
10761
10762 /* Look through the relocs for a section during the first phase. */
10763
10764 static bfd_boolean
10765 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
10766 asection *sec, const Elf_Internal_Rela *relocs)
10767 {
10768 Elf_Internal_Shdr *symtab_hdr;
10769 struct elf_link_hash_entry **sym_hashes;
10770 const Elf_Internal_Rela *rel;
10771 const Elf_Internal_Rela *rel_end;
10772 bfd *dynobj;
10773 asection *sreloc;
10774 bfd_vma *local_got_offsets;
10775 struct elf32_arm_link_hash_table *htab;
10776 bfd_boolean needs_plt;
10777 unsigned long nsyms;
10778
10779 if (info->relocatable)
10780 return TRUE;
10781
10782 BFD_ASSERT (is_arm_elf (abfd));
10783
10784 htab = elf32_arm_hash_table (info);
10785 if (htab == NULL)
10786 return FALSE;
10787
10788 sreloc = NULL;
10789
10790 /* Create dynamic sections for relocatable executables so that we can
10791 copy relocations. */
10792 if (htab->root.is_relocatable_executable
10793 && ! htab->root.dynamic_sections_created)
10794 {
10795 if (! _bfd_elf_link_create_dynamic_sections (abfd, info))
10796 return FALSE;
10797 }
10798
10799 dynobj = elf_hash_table (info)->dynobj;
10800 local_got_offsets = elf_local_got_offsets (abfd);
10801
10802 symtab_hdr = & elf_symtab_hdr (abfd);
10803 sym_hashes = elf_sym_hashes (abfd);
10804 nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
10805
10806 rel_end = relocs + sec->reloc_count;
10807 for (rel = relocs; rel < rel_end; rel++)
10808 {
10809 struct elf_link_hash_entry *h;
10810 struct elf32_arm_link_hash_entry *eh;
10811 unsigned long r_symndx;
10812 int r_type;
10813
10814 r_symndx = ELF32_R_SYM (rel->r_info);
10815 r_type = ELF32_R_TYPE (rel->r_info);
10816 r_type = arm_real_reloc_type (htab, r_type);
10817
10818 if (r_symndx >= nsyms
10819 /* PR 9934: It is possible to have relocations that do not
10820 refer to symbols, thus it is also possible to have an
10821 object file containing relocations but no symbol table. */
10822 && (r_symndx > 0 || nsyms > 0))
10823 {
10824 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
10825 r_symndx);
10826 return FALSE;
10827 }
10828
10829 if (nsyms == 0 || r_symndx < symtab_hdr->sh_info)
10830 h = NULL;
10831 else
10832 {
10833 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
10834 while (h->root.type == bfd_link_hash_indirect
10835 || h->root.type == bfd_link_hash_warning)
10836 h = (struct elf_link_hash_entry *) h->root.u.i.link;
10837 }
10838
10839 eh = (struct elf32_arm_link_hash_entry *) h;
10840
10841 switch (r_type)
10842 {
10843 case R_ARM_GOT32:
10844 case R_ARM_GOT_PREL:
10845 case R_ARM_TLS_GD32:
10846 case R_ARM_TLS_IE32:
10847 /* This symbol requires a global offset table entry. */
10848 {
10849 int tls_type, old_tls_type;
10850
10851 switch (r_type)
10852 {
10853 case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
10854 case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
10855 default: tls_type = GOT_NORMAL; break;
10856 }
10857
10858 if (h != NULL)
10859 {
10860 h->got.refcount++;
10861 old_tls_type = elf32_arm_hash_entry (h)->tls_type;
10862 }
10863 else
10864 {
10865 bfd_signed_vma *local_got_refcounts;
10866
10867 /* This is a global offset table entry for a local symbol. */
10868 local_got_refcounts = elf_local_got_refcounts (abfd);
10869 if (local_got_refcounts == NULL)
10870 {
10871 bfd_size_type size;
10872
10873 size = symtab_hdr->sh_info;
10874 size *= (sizeof (bfd_signed_vma) + sizeof (char));
10875 local_got_refcounts = (bfd_signed_vma *)
10876 bfd_zalloc (abfd, size);
10877 if (local_got_refcounts == NULL)
10878 return FALSE;
10879 elf_local_got_refcounts (abfd) = local_got_refcounts;
10880 elf32_arm_local_got_tls_type (abfd)
10881 = (char *) (local_got_refcounts + symtab_hdr->sh_info);
10882 }
10883 local_got_refcounts[r_symndx] += 1;
10884 old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
10885 }
10886
10887 /* We will already have issued an error message if there is a
10888 TLS / non-TLS mismatch, based on the symbol type. We don't
10889 support any linker relaxations. So just combine any TLS
10890 types needed. */
10891 if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
10892 && tls_type != GOT_NORMAL)
10893 tls_type |= old_tls_type;
10894
10895 if (old_tls_type != tls_type)
10896 {
10897 if (h != NULL)
10898 elf32_arm_hash_entry (h)->tls_type = tls_type;
10899 else
10900 elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
10901 }
10902 }
10903 /* Fall through. */
10904
10905 case R_ARM_TLS_LDM32:
10906 if (r_type == R_ARM_TLS_LDM32)
10907 htab->tls_ldm_got.refcount++;
10908 /* Fall through. */
10909
10910 case R_ARM_GOTOFF32:
10911 case R_ARM_GOTPC:
10912 if (htab->sgot == NULL)
10913 {
10914 if (htab->root.dynobj == NULL)
10915 htab->root.dynobj = abfd;
10916 if (!create_got_section (htab->root.dynobj, info))
10917 return FALSE;
10918 }
10919 break;
10920
10921 case R_ARM_ABS12:
10922 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
10923 ldr __GOTT_INDEX__ offsets. */
10924 if (!htab->vxworks_p)
10925 break;
10926 /* Fall through. */
10927
10928 case R_ARM_PC24:
10929 case R_ARM_PLT32:
10930 case R_ARM_CALL:
10931 case R_ARM_JUMP24:
10932 case R_ARM_PREL31:
10933 case R_ARM_THM_CALL:
10934 case R_ARM_THM_JUMP24:
10935 case R_ARM_THM_JUMP19:
10936 needs_plt = 1;
10937 goto normal_reloc;
10938
10939 case R_ARM_MOVW_ABS_NC:
10940 case R_ARM_MOVT_ABS:
10941 case R_ARM_THM_MOVW_ABS_NC:
10942 case R_ARM_THM_MOVT_ABS:
10943 if (info->shared)
10944 {
10945 (*_bfd_error_handler)
10946 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
10947 abfd, elf32_arm_howto_table_1[r_type].name,
10948 (h) ? h->root.root.string : "a local symbol");
10949 bfd_set_error (bfd_error_bad_value);
10950 return FALSE;
10951 }
10952
10953 /* Fall through. */
10954 case R_ARM_ABS32:
10955 case R_ARM_ABS32_NOI:
10956 case R_ARM_REL32:
10957 case R_ARM_REL32_NOI:
10958 case R_ARM_MOVW_PREL_NC:
10959 case R_ARM_MOVT_PREL:
10960 case R_ARM_THM_MOVW_PREL_NC:
10961 case R_ARM_THM_MOVT_PREL:
10962 needs_plt = 0;
10963 normal_reloc:
10964
10965 /* Should the interworking branches be listed here? */
10966 if (h != NULL)
10967 {
10968 /* If this reloc is in a read-only section, we might
10969 need a copy reloc. We can't check reliably at this
10970 stage whether the section is read-only, as input
10971 sections have not yet been mapped to output sections.
10972 Tentatively set the flag for now, and correct in
10973 adjust_dynamic_symbol. */
10974 if (!info->shared)
10975 h->non_got_ref = 1;
10976
10977 /* We may need a .plt entry if the function this reloc
10978 refers to is in a different object. We can't tell for
10979 sure yet, because something later might force the
10980 symbol local. */
10981 if (needs_plt)
10982 h->needs_plt = 1;
10983
10984 /* If we create a PLT entry, this relocation will reference
10985 it, even if it's an ABS32 relocation. */
10986 h->plt.refcount += 1;
10987
10988 /* It's too early to use htab->use_blx here, so we have to
10989 record possible blx references separately from
10990 relocs that definitely need a thumb stub. */
10991
10992 if (r_type == R_ARM_THM_CALL)
10993 eh->plt_maybe_thumb_refcount += 1;
10994
10995 if (r_type == R_ARM_THM_JUMP24
10996 || r_type == R_ARM_THM_JUMP19)
10997 eh->plt_thumb_refcount += 1;
10998 }
10999
11000 /* If we are creating a shared library or relocatable executable,
11001 and this is a reloc against a global symbol, or a non PC
11002 relative reloc against a local symbol, then we need to copy
11003 the reloc into the shared library. However, if we are linking
11004 with -Bsymbolic, we do not need to copy a reloc against a
11005 global symbol which is defined in an object we are
11006 including in the link (i.e., DEF_REGULAR is set). At
11007 this point we have not seen all the input files, so it is
11008 possible that DEF_REGULAR is not set now but will be set
11009 later (it is never cleared). We account for that
11010 possibility below by storing information in the
11011 relocs_copied field of the hash table entry. */
11012 if ((info->shared || htab->root.is_relocatable_executable)
11013 && (sec->flags & SEC_ALLOC) != 0
11014 && ((r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI)
11015 || (h != NULL && ! h->needs_plt
11016 && (! info->symbolic || ! h->def_regular))))
11017 {
11018 struct elf32_arm_relocs_copied *p, **head;
11019
11020 /* When creating a shared object, we must copy these
11021 reloc types into the output file. We create a reloc
11022 section in dynobj and make room for this reloc. */
11023 if (sreloc == NULL)
11024 {
11025 sreloc = _bfd_elf_make_dynamic_reloc_section
11026 (sec, dynobj, 2, abfd, ! htab->use_rel);
11027
11028 if (sreloc == NULL)
11029 return FALSE;
11030
11031 /* BPABI objects never have dynamic relocations mapped. */
11032 if (htab->symbian_p)
11033 {
11034 flagword flags;
11035
11036 flags = bfd_get_section_flags (dynobj, sreloc);
11037 flags &= ~(SEC_LOAD | SEC_ALLOC);
11038 bfd_set_section_flags (dynobj, sreloc, flags);
11039 }
11040 }
11041
11042 /* If this is a global symbol, we count the number of
11043 relocations we need for this symbol. */
11044 if (h != NULL)
11045 {
11046 head = &((struct elf32_arm_link_hash_entry *) h)->relocs_copied;
11047 }
11048 else
11049 {
11050 /* Track dynamic relocs needed for local syms too.
11051 We really need local syms available to do this
11052 easily. Oh well. */
11053 asection *s;
11054 void *vpp;
11055 Elf_Internal_Sym *isym;
11056
11057 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
11058 abfd, r_symndx);
11059 if (isym == NULL)
11060 return FALSE;
11061
11062 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
11063 if (s == NULL)
11064 s = sec;
11065
11066 vpp = &elf_section_data (s)->local_dynrel;
11067 head = (struct elf32_arm_relocs_copied **) vpp;
11068 }
11069
11070 p = *head;
11071 if (p == NULL || p->section != sec)
11072 {
11073 bfd_size_type amt = sizeof *p;
11074
11075 p = (struct elf32_arm_relocs_copied *)
11076 bfd_alloc (htab->root.dynobj, amt);
11077 if (p == NULL)
11078 return FALSE;
11079 p->next = *head;
11080 *head = p;
11081 p->section = sec;
11082 p->count = 0;
11083 p->pc_count = 0;
11084 }
11085
11086 if (r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
11087 p->pc_count += 1;
11088 p->count += 1;
11089 }
11090 break;
11091
11092 /* This relocation describes the C++ object vtable hierarchy.
11093 Reconstruct it for later use during GC. */
11094 case R_ARM_GNU_VTINHERIT:
11095 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
11096 return FALSE;
11097 break;
11098
11099 /* This relocation describes which C++ vtable entries are actually
11100 used. Record for later use during GC. */
11101 case R_ARM_GNU_VTENTRY:
11102 BFD_ASSERT (h != NULL);
11103 if (h != NULL
11104 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
11105 return FALSE;
11106 break;
11107 }
11108 }
11109
11110 return TRUE;
11111 }
11112
11113 /* Unwinding tables are not referenced directly. This pass marks them as
11114 required if the corresponding code section is marked. */
11115
11116 static bfd_boolean
11117 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
11118 elf_gc_mark_hook_fn gc_mark_hook)
11119 {
11120 bfd *sub;
11121 Elf_Internal_Shdr **elf_shdrp;
11122 bfd_boolean again;
11123
11124 /* Marking EH data may cause additional code sections to be marked,
11125 requiring multiple passes. */
11126 again = TRUE;
11127 while (again)
11128 {
11129 again = FALSE;
11130 for (sub = info->input_bfds; sub != NULL; sub = sub->link_next)
11131 {
11132 asection *o;
11133
11134 if (! is_arm_elf (sub))
11135 continue;
11136
11137 elf_shdrp = elf_elfsections (sub);
11138 for (o = sub->sections; o != NULL; o = o->next)
11139 {
11140 Elf_Internal_Shdr *hdr;
11141
11142 hdr = &elf_section_data (o)->this_hdr;
11143 if (hdr->sh_type == SHT_ARM_EXIDX
11144 && hdr->sh_link
11145 && hdr->sh_link < elf_numsections (sub)
11146 && !o->gc_mark
11147 && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
11148 {
11149 again = TRUE;
11150 if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
11151 return FALSE;
11152 }
11153 }
11154 }
11155 }
11156
11157 return TRUE;
11158 }
11159
11160 /* Treat mapping symbols as special target symbols. */
11161
11162 static bfd_boolean
11163 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
11164 {
11165 return bfd_is_arm_special_symbol_name (sym->name,
11166 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
11167 }
11168
11169 /* This is a copy of elf_find_function() from elf.c except that
11170 ARM mapping symbols are ignored when looking for function names
11171 and STT_ARM_TFUNC is considered to a function type. */
11172
11173 static bfd_boolean
11174 arm_elf_find_function (bfd * abfd ATTRIBUTE_UNUSED,
11175 asection * section,
11176 asymbol ** symbols,
11177 bfd_vma offset,
11178 const char ** filename_ptr,
11179 const char ** functionname_ptr)
11180 {
11181 const char * filename = NULL;
11182 asymbol * func = NULL;
11183 bfd_vma low_func = 0;
11184 asymbol ** p;
11185
11186 for (p = symbols; *p != NULL; p++)
11187 {
11188 elf_symbol_type *q;
11189
11190 q = (elf_symbol_type *) *p;
11191
11192 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
11193 {
11194 default:
11195 break;
11196 case STT_FILE:
11197 filename = bfd_asymbol_name (&q->symbol);
11198 break;
11199 case STT_FUNC:
11200 case STT_ARM_TFUNC:
11201 case STT_NOTYPE:
11202 /* Skip mapping symbols. */
11203 if ((q->symbol.flags & BSF_LOCAL)
11204 && bfd_is_arm_special_symbol_name (q->symbol.name,
11205 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
11206 continue;
11207 /* Fall through. */
11208 if (bfd_get_section (&q->symbol) == section
11209 && q->symbol.value >= low_func
11210 && q->symbol.value <= offset)
11211 {
11212 func = (asymbol *) q;
11213 low_func = q->symbol.value;
11214 }
11215 break;
11216 }
11217 }
11218
11219 if (func == NULL)
11220 return FALSE;
11221
11222 if (filename_ptr)
11223 *filename_ptr = filename;
11224 if (functionname_ptr)
11225 *functionname_ptr = bfd_asymbol_name (func);
11226
11227 return TRUE;
11228 }
11229
11230
11231 /* Find the nearest line to a particular section and offset, for error
11232 reporting. This code is a duplicate of the code in elf.c, except
11233 that it uses arm_elf_find_function. */
11234
11235 static bfd_boolean
11236 elf32_arm_find_nearest_line (bfd * abfd,
11237 asection * section,
11238 asymbol ** symbols,
11239 bfd_vma offset,
11240 const char ** filename_ptr,
11241 const char ** functionname_ptr,
11242 unsigned int * line_ptr)
11243 {
11244 bfd_boolean found = FALSE;
11245
11246 /* We skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain uses it. */
11247
11248 if (_bfd_dwarf2_find_nearest_line (abfd, section, symbols, offset,
11249 filename_ptr, functionname_ptr,
11250 line_ptr, 0,
11251 & elf_tdata (abfd)->dwarf2_find_line_info))
11252 {
11253 if (!*functionname_ptr)
11254 arm_elf_find_function (abfd, section, symbols, offset,
11255 *filename_ptr ? NULL : filename_ptr,
11256 functionname_ptr);
11257
11258 return TRUE;
11259 }
11260
11261 if (! _bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
11262 & found, filename_ptr,
11263 functionname_ptr, line_ptr,
11264 & elf_tdata (abfd)->line_info))
11265 return FALSE;
11266
11267 if (found && (*functionname_ptr || *line_ptr))
11268 return TRUE;
11269
11270 if (symbols == NULL)
11271 return FALSE;
11272
11273 if (! arm_elf_find_function (abfd, section, symbols, offset,
11274 filename_ptr, functionname_ptr))
11275 return FALSE;
11276
11277 *line_ptr = 0;
11278 return TRUE;
11279 }
11280
11281 static bfd_boolean
11282 elf32_arm_find_inliner_info (bfd * abfd,
11283 const char ** filename_ptr,
11284 const char ** functionname_ptr,
11285 unsigned int * line_ptr)
11286 {
11287 bfd_boolean found;
11288 found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
11289 functionname_ptr, line_ptr,
11290 & elf_tdata (abfd)->dwarf2_find_line_info);
11291 return found;
11292 }
11293
11294 /* Adjust a symbol defined by a dynamic object and referenced by a
11295 regular object. The current definition is in some section of the
11296 dynamic object, but we're not including those sections. We have to
11297 change the definition to something the rest of the link can
11298 understand. */
11299
11300 static bfd_boolean
11301 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
11302 struct elf_link_hash_entry * h)
11303 {
11304 bfd * dynobj;
11305 asection * s;
11306 struct elf32_arm_link_hash_entry * eh;
11307 struct elf32_arm_link_hash_table *globals;
11308
11309 globals = elf32_arm_hash_table (info);
11310 if (globals == NULL)
11311 return FALSE;
11312
11313 dynobj = elf_hash_table (info)->dynobj;
11314
11315 /* Make sure we know what is going on here. */
11316 BFD_ASSERT (dynobj != NULL
11317 && (h->needs_plt
11318 || h->u.weakdef != NULL
11319 || (h->def_dynamic
11320 && h->ref_regular
11321 && !h->def_regular)));
11322
11323 eh = (struct elf32_arm_link_hash_entry *) h;
11324
11325 /* If this is a function, put it in the procedure linkage table. We
11326 will fill in the contents of the procedure linkage table later,
11327 when we know the address of the .got section. */
11328 if (h->type == STT_FUNC || h->type == STT_ARM_TFUNC
11329 || h->needs_plt)
11330 {
11331 if (h->plt.refcount <= 0
11332 || SYMBOL_CALLS_LOCAL (info, h)
11333 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
11334 && h->root.type == bfd_link_hash_undefweak))
11335 {
11336 /* This case can occur if we saw a PLT32 reloc in an input
11337 file, but the symbol was never referred to by a dynamic
11338 object, or if all references were garbage collected. In
11339 such a case, we don't actually need to build a procedure
11340 linkage table, and we can just do a PC24 reloc instead. */
11341 h->plt.offset = (bfd_vma) -1;
11342 eh->plt_thumb_refcount = 0;
11343 eh->plt_maybe_thumb_refcount = 0;
11344 h->needs_plt = 0;
11345 }
11346
11347 return TRUE;
11348 }
11349 else
11350 {
11351 /* It's possible that we incorrectly decided a .plt reloc was
11352 needed for an R_ARM_PC24 or similar reloc to a non-function sym
11353 in check_relocs. We can't decide accurately between function
11354 and non-function syms in check-relocs; Objects loaded later in
11355 the link may change h->type. So fix it now. */
11356 h->plt.offset = (bfd_vma) -1;
11357 eh->plt_thumb_refcount = 0;
11358 eh->plt_maybe_thumb_refcount = 0;
11359 }
11360
11361 /* If this is a weak symbol, and there is a real definition, the
11362 processor independent code will have arranged for us to see the
11363 real definition first, and we can just use the same value. */
11364 if (h->u.weakdef != NULL)
11365 {
11366 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
11367 || h->u.weakdef->root.type == bfd_link_hash_defweak);
11368 h->root.u.def.section = h->u.weakdef->root.u.def.section;
11369 h->root.u.def.value = h->u.weakdef->root.u.def.value;
11370 return TRUE;
11371 }
11372
11373 /* If there are no non-GOT references, we do not need a copy
11374 relocation. */
11375 if (!h->non_got_ref)
11376 return TRUE;
11377
11378 /* This is a reference to a symbol defined by a dynamic object which
11379 is not a function. */
11380
11381 /* If we are creating a shared library, we must presume that the
11382 only references to the symbol are via the global offset table.
11383 For such cases we need not do anything here; the relocations will
11384 be handled correctly by relocate_section. Relocatable executables
11385 can reference data in shared objects directly, so we don't need to
11386 do anything here. */
11387 if (info->shared || globals->root.is_relocatable_executable)
11388 return TRUE;
11389
11390 if (h->size == 0)
11391 {
11392 (*_bfd_error_handler) (_("dynamic variable `%s' is zero size"),
11393 h->root.root.string);
11394 return TRUE;
11395 }
11396
11397 /* We must allocate the symbol in our .dynbss section, which will
11398 become part of the .bss section of the executable. There will be
11399 an entry for this symbol in the .dynsym section. The dynamic
11400 object will contain position independent code, so all references
11401 from the dynamic object to this symbol will go through the global
11402 offset table. The dynamic linker will use the .dynsym entry to
11403 determine the address it must put in the global offset table, so
11404 both the dynamic object and the regular object will refer to the
11405 same memory location for the variable. */
11406 s = bfd_get_section_by_name (dynobj, ".dynbss");
11407 BFD_ASSERT (s != NULL);
11408
11409 /* We must generate a R_ARM_COPY reloc to tell the dynamic linker to
11410 copy the initial value out of the dynamic object and into the
11411 runtime process image. We need to remember the offset into the
11412 .rel(a).bss section we are going to use. */
11413 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0)
11414 {
11415 asection *srel;
11416
11417 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (globals, ".bss"));
11418 BFD_ASSERT (srel != NULL);
11419 srel->size += RELOC_SIZE (globals);
11420 h->needs_copy = 1;
11421 }
11422
11423 return _bfd_elf_adjust_dynamic_copy (h, s);
11424 }
11425
11426 /* Allocate space in .plt, .got and associated reloc sections for
11427 dynamic relocs. */
11428
11429 static bfd_boolean
11430 allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf)
11431 {
11432 struct bfd_link_info *info;
11433 struct elf32_arm_link_hash_table *htab;
11434 struct elf32_arm_link_hash_entry *eh;
11435 struct elf32_arm_relocs_copied *p;
11436 bfd_signed_vma thumb_refs;
11437
11438 eh = (struct elf32_arm_link_hash_entry *) h;
11439
11440 if (h->root.type == bfd_link_hash_indirect)
11441 return TRUE;
11442
11443 if (h->root.type == bfd_link_hash_warning)
11444 /* When warning symbols are created, they **replace** the "real"
11445 entry in the hash table, thus we never get to see the real
11446 symbol in a hash traversal. So look at it now. */
11447 h = (struct elf_link_hash_entry *) h->root.u.i.link;
11448
11449 info = (struct bfd_link_info *) inf;
11450 htab = elf32_arm_hash_table (info);
11451 if (htab == NULL)
11452 return FALSE;
11453
11454 if (htab->root.dynamic_sections_created
11455 && h->plt.refcount > 0)
11456 {
11457 /* Make sure this symbol is output as a dynamic symbol.
11458 Undefined weak syms won't yet be marked as dynamic. */
11459 if (h->dynindx == -1
11460 && !h->forced_local)
11461 {
11462 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11463 return FALSE;
11464 }
11465
11466 if (info->shared
11467 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
11468 {
11469 asection *s = htab->splt;
11470
11471 /* If this is the first .plt entry, make room for the special
11472 first entry. */
11473 if (s->size == 0)
11474 s->size += htab->plt_header_size;
11475
11476 h->plt.offset = s->size;
11477
11478 /* If we will insert a Thumb trampoline before this PLT, leave room
11479 for it. */
11480 thumb_refs = eh->plt_thumb_refcount;
11481 if (!htab->use_blx)
11482 thumb_refs += eh->plt_maybe_thumb_refcount;
11483
11484 if (thumb_refs > 0)
11485 {
11486 h->plt.offset += PLT_THUMB_STUB_SIZE;
11487 s->size += PLT_THUMB_STUB_SIZE;
11488 }
11489
11490 /* If this symbol is not defined in a regular file, and we are
11491 not generating a shared library, then set the symbol to this
11492 location in the .plt. This is required to make function
11493 pointers compare as equal between the normal executable and
11494 the shared library. */
11495 if (! info->shared
11496 && !h->def_regular)
11497 {
11498 h->root.u.def.section = s;
11499 h->root.u.def.value = h->plt.offset;
11500
11501 /* Make sure the function is not marked as Thumb, in case
11502 it is the target of an ABS32 relocation, which will
11503 point to the PLT entry. */
11504 if (ELF_ST_TYPE (h->type) == STT_ARM_TFUNC)
11505 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
11506 }
11507
11508 /* Make room for this entry. */
11509 s->size += htab->plt_entry_size;
11510
11511 if (!htab->symbian_p)
11512 {
11513 /* We also need to make an entry in the .got.plt section, which
11514 will be placed in the .got section by the linker script. */
11515 eh->plt_got_offset = htab->sgotplt->size;
11516 htab->sgotplt->size += 4;
11517 }
11518
11519 /* We also need to make an entry in the .rel(a).plt section. */
11520 htab->srelplt->size += RELOC_SIZE (htab);
11521
11522 /* VxWorks executables have a second set of relocations for
11523 each PLT entry. They go in a separate relocation section,
11524 which is processed by the kernel loader. */
11525 if (htab->vxworks_p && !info->shared)
11526 {
11527 /* There is a relocation for the initial PLT entry:
11528 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
11529 if (h->plt.offset == htab->plt_header_size)
11530 htab->srelplt2->size += RELOC_SIZE (htab);
11531
11532 /* There are two extra relocations for each subsequent
11533 PLT entry: an R_ARM_32 relocation for the GOT entry,
11534 and an R_ARM_32 relocation for the PLT entry. */
11535 htab->srelplt2->size += RELOC_SIZE (htab) * 2;
11536 }
11537 }
11538 else
11539 {
11540 h->plt.offset = (bfd_vma) -1;
11541 h->needs_plt = 0;
11542 }
11543 }
11544 else
11545 {
11546 h->plt.offset = (bfd_vma) -1;
11547 h->needs_plt = 0;
11548 }
11549
11550 if (h->got.refcount > 0)
11551 {
11552 asection *s;
11553 bfd_boolean dyn;
11554 int tls_type = elf32_arm_hash_entry (h)->tls_type;
11555 int indx;
11556
11557 /* Make sure this symbol is output as a dynamic symbol.
11558 Undefined weak syms won't yet be marked as dynamic. */
11559 if (h->dynindx == -1
11560 && !h->forced_local)
11561 {
11562 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11563 return FALSE;
11564 }
11565
11566 if (!htab->symbian_p)
11567 {
11568 s = htab->sgot;
11569 h->got.offset = s->size;
11570
11571 if (tls_type == GOT_UNKNOWN)
11572 abort ();
11573
11574 if (tls_type == GOT_NORMAL)
11575 /* Non-TLS symbols need one GOT slot. */
11576 s->size += 4;
11577 else
11578 {
11579 if (tls_type & GOT_TLS_GD)
11580 /* R_ARM_TLS_GD32 needs 2 consecutive GOT slots. */
11581 s->size += 8;
11582 if (tls_type & GOT_TLS_IE)
11583 /* R_ARM_TLS_IE32 needs one GOT slot. */
11584 s->size += 4;
11585 }
11586
11587 dyn = htab->root.dynamic_sections_created;
11588
11589 indx = 0;
11590 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
11591 && (!info->shared
11592 || !SYMBOL_REFERENCES_LOCAL (info, h)))
11593 indx = h->dynindx;
11594
11595 if (tls_type != GOT_NORMAL
11596 && (info->shared || indx != 0)
11597 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11598 || h->root.type != bfd_link_hash_undefweak))
11599 {
11600 if (tls_type & GOT_TLS_IE)
11601 htab->srelgot->size += RELOC_SIZE (htab);
11602
11603 if (tls_type & GOT_TLS_GD)
11604 htab->srelgot->size += RELOC_SIZE (htab);
11605
11606 if ((tls_type & GOT_TLS_GD) && indx != 0)
11607 htab->srelgot->size += RELOC_SIZE (htab);
11608 }
11609 else if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11610 || h->root.type != bfd_link_hash_undefweak)
11611 && (info->shared
11612 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
11613 htab->srelgot->size += RELOC_SIZE (htab);
11614 }
11615 }
11616 else
11617 h->got.offset = (bfd_vma) -1;
11618
11619 /* Allocate stubs for exported Thumb functions on v4t. */
11620 if (!htab->use_blx && h->dynindx != -1
11621 && h->def_regular
11622 && ELF_ST_TYPE (h->type) == STT_ARM_TFUNC
11623 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
11624 {
11625 struct elf_link_hash_entry * th;
11626 struct bfd_link_hash_entry * bh;
11627 struct elf_link_hash_entry * myh;
11628 char name[1024];
11629 asection *s;
11630 bh = NULL;
11631 /* Create a new symbol to regist the real location of the function. */
11632 s = h->root.u.def.section;
11633 sprintf (name, "__real_%s", h->root.root.string);
11634 _bfd_generic_link_add_one_symbol (info, s->owner,
11635 name, BSF_GLOBAL, s,
11636 h->root.u.def.value,
11637 NULL, TRUE, FALSE, &bh);
11638
11639 myh = (struct elf_link_hash_entry *) bh;
11640 myh->type = ELF_ST_INFO (STB_LOCAL, STT_ARM_TFUNC);
11641 myh->forced_local = 1;
11642 eh->export_glue = myh;
11643 th = record_arm_to_thumb_glue (info, h);
11644 /* Point the symbol at the stub. */
11645 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
11646 h->root.u.def.section = th->root.u.def.section;
11647 h->root.u.def.value = th->root.u.def.value & ~1;
11648 }
11649
11650 if (eh->relocs_copied == NULL)
11651 return TRUE;
11652
11653 /* In the shared -Bsymbolic case, discard space allocated for
11654 dynamic pc-relative relocs against symbols which turn out to be
11655 defined in regular objects. For the normal shared case, discard
11656 space for pc-relative relocs that have become local due to symbol
11657 visibility changes. */
11658
11659 if (info->shared || htab->root.is_relocatable_executable)
11660 {
11661 /* The only relocs that use pc_count are R_ARM_REL32 and
11662 R_ARM_REL32_NOI, which will appear on something like
11663 ".long foo - .". We want calls to protected symbols to resolve
11664 directly to the function rather than going via the plt. If people
11665 want function pointer comparisons to work as expected then they
11666 should avoid writing assembly like ".long foo - .". */
11667 if (SYMBOL_CALLS_LOCAL (info, h))
11668 {
11669 struct elf32_arm_relocs_copied **pp;
11670
11671 for (pp = &eh->relocs_copied; (p = *pp) != NULL; )
11672 {
11673 p->count -= p->pc_count;
11674 p->pc_count = 0;
11675 if (p->count == 0)
11676 *pp = p->next;
11677 else
11678 pp = &p->next;
11679 }
11680 }
11681
11682 if (htab->vxworks_p)
11683 {
11684 struct elf32_arm_relocs_copied **pp;
11685
11686 for (pp = &eh->relocs_copied; (p = *pp) != NULL; )
11687 {
11688 if (strcmp (p->section->output_section->name, ".tls_vars") == 0)
11689 *pp = p->next;
11690 else
11691 pp = &p->next;
11692 }
11693 }
11694
11695 /* Also discard relocs on undefined weak syms with non-default
11696 visibility. */
11697 if (eh->relocs_copied != NULL
11698 && h->root.type == bfd_link_hash_undefweak)
11699 {
11700 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
11701 eh->relocs_copied = NULL;
11702
11703 /* Make sure undefined weak symbols are output as a dynamic
11704 symbol in PIEs. */
11705 else if (h->dynindx == -1
11706 && !h->forced_local)
11707 {
11708 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11709 return FALSE;
11710 }
11711 }
11712
11713 else if (htab->root.is_relocatable_executable && h->dynindx == -1
11714 && h->root.type == bfd_link_hash_new)
11715 {
11716 /* Output absolute symbols so that we can create relocations
11717 against them. For normal symbols we output a relocation
11718 against the section that contains them. */
11719 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11720 return FALSE;
11721 }
11722
11723 }
11724 else
11725 {
11726 /* For the non-shared case, discard space for relocs against
11727 symbols which turn out to need copy relocs or are not
11728 dynamic. */
11729
11730 if (!h->non_got_ref
11731 && ((h->def_dynamic
11732 && !h->def_regular)
11733 || (htab->root.dynamic_sections_created
11734 && (h->root.type == bfd_link_hash_undefweak
11735 || h->root.type == bfd_link_hash_undefined))))
11736 {
11737 /* Make sure this symbol is output as a dynamic symbol.
11738 Undefined weak syms won't yet be marked as dynamic. */
11739 if (h->dynindx == -1
11740 && !h->forced_local)
11741 {
11742 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11743 return FALSE;
11744 }
11745
11746 /* If that succeeded, we know we'll be keeping all the
11747 relocs. */
11748 if (h->dynindx != -1)
11749 goto keep;
11750 }
11751
11752 eh->relocs_copied = NULL;
11753
11754 keep: ;
11755 }
11756
11757 /* Finally, allocate space. */
11758 for (p = eh->relocs_copied; p != NULL; p = p->next)
11759 {
11760 asection *sreloc = elf_section_data (p->section)->sreloc;
11761 sreloc->size += p->count * RELOC_SIZE (htab);
11762 }
11763
11764 return TRUE;
11765 }
11766
11767 /* Find any dynamic relocs that apply to read-only sections. */
11768
11769 static bfd_boolean
11770 elf32_arm_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf)
11771 {
11772 struct elf32_arm_link_hash_entry * eh;
11773 struct elf32_arm_relocs_copied * p;
11774
11775 if (h->root.type == bfd_link_hash_warning)
11776 h = (struct elf_link_hash_entry *) h->root.u.i.link;
11777
11778 eh = (struct elf32_arm_link_hash_entry *) h;
11779 for (p = eh->relocs_copied; p != NULL; p = p->next)
11780 {
11781 asection *s = p->section;
11782
11783 if (s != NULL && (s->flags & SEC_READONLY) != 0)
11784 {
11785 struct bfd_link_info *info = (struct bfd_link_info *) inf;
11786
11787 info->flags |= DF_TEXTREL;
11788
11789 /* Not an error, just cut short the traversal. */
11790 return FALSE;
11791 }
11792 }
11793 return TRUE;
11794 }
11795
11796 void
11797 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
11798 int byteswap_code)
11799 {
11800 struct elf32_arm_link_hash_table *globals;
11801
11802 globals = elf32_arm_hash_table (info);
11803 if (globals == NULL)
11804 return;
11805
11806 globals->byteswap_code = byteswap_code;
11807 }
11808
11809 /* Set the sizes of the dynamic sections. */
11810
11811 static bfd_boolean
11812 elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
11813 struct bfd_link_info * info)
11814 {
11815 bfd * dynobj;
11816 asection * s;
11817 bfd_boolean plt;
11818 bfd_boolean relocs;
11819 bfd *ibfd;
11820 struct elf32_arm_link_hash_table *htab;
11821
11822 htab = elf32_arm_hash_table (info);
11823 if (htab == NULL)
11824 return FALSE;
11825
11826 dynobj = elf_hash_table (info)->dynobj;
11827 BFD_ASSERT (dynobj != NULL);
11828 check_use_blx (htab);
11829
11830 if (elf_hash_table (info)->dynamic_sections_created)
11831 {
11832 /* Set the contents of the .interp section to the interpreter. */
11833 if (info->executable)
11834 {
11835 s = bfd_get_section_by_name (dynobj, ".interp");
11836 BFD_ASSERT (s != NULL);
11837 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
11838 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
11839 }
11840 }
11841
11842 /* Set up .got offsets for local syms, and space for local dynamic
11843 relocs. */
11844 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
11845 {
11846 bfd_signed_vma *local_got;
11847 bfd_signed_vma *end_local_got;
11848 char *local_tls_type;
11849 bfd_size_type locsymcount;
11850 Elf_Internal_Shdr *symtab_hdr;
11851 asection *srel;
11852 bfd_boolean is_vxworks = htab->vxworks_p;
11853
11854 if (! is_arm_elf (ibfd))
11855 continue;
11856
11857 for (s = ibfd->sections; s != NULL; s = s->next)
11858 {
11859 struct elf32_arm_relocs_copied *p;
11860
11861 for (p = (struct elf32_arm_relocs_copied *)
11862 elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
11863 {
11864 if (!bfd_is_abs_section (p->section)
11865 && bfd_is_abs_section (p->section->output_section))
11866 {
11867 /* Input section has been discarded, either because
11868 it is a copy of a linkonce section or due to
11869 linker script /DISCARD/, so we'll be discarding
11870 the relocs too. */
11871 }
11872 else if (is_vxworks
11873 && strcmp (p->section->output_section->name,
11874 ".tls_vars") == 0)
11875 {
11876 /* Relocations in vxworks .tls_vars sections are
11877 handled specially by the loader. */
11878 }
11879 else if (p->count != 0)
11880 {
11881 srel = elf_section_data (p->section)->sreloc;
11882 srel->size += p->count * RELOC_SIZE (htab);
11883 if ((p->section->output_section->flags & SEC_READONLY) != 0)
11884 info->flags |= DF_TEXTREL;
11885 }
11886 }
11887 }
11888
11889 local_got = elf_local_got_refcounts (ibfd);
11890 if (!local_got)
11891 continue;
11892
11893 symtab_hdr = & elf_symtab_hdr (ibfd);
11894 locsymcount = symtab_hdr->sh_info;
11895 end_local_got = local_got + locsymcount;
11896 local_tls_type = elf32_arm_local_got_tls_type (ibfd);
11897 s = htab->sgot;
11898 srel = htab->srelgot;
11899 for (; local_got < end_local_got; ++local_got, ++local_tls_type)
11900 {
11901 if (*local_got > 0)
11902 {
11903 *local_got = s->size;
11904 if (*local_tls_type & GOT_TLS_GD)
11905 /* TLS_GD relocs need an 8-byte structure in the GOT. */
11906 s->size += 8;
11907 if (*local_tls_type & GOT_TLS_IE)
11908 s->size += 4;
11909 if (*local_tls_type == GOT_NORMAL)
11910 s->size += 4;
11911
11912 if (info->shared || *local_tls_type == GOT_TLS_GD)
11913 srel->size += RELOC_SIZE (htab);
11914 }
11915 else
11916 *local_got = (bfd_vma) -1;
11917 }
11918 }
11919
11920 if (htab->tls_ldm_got.refcount > 0)
11921 {
11922 /* Allocate two GOT entries and one dynamic relocation (if necessary)
11923 for R_ARM_TLS_LDM32 relocations. */
11924 htab->tls_ldm_got.offset = htab->sgot->size;
11925 htab->sgot->size += 8;
11926 if (info->shared)
11927 htab->srelgot->size += RELOC_SIZE (htab);
11928 }
11929 else
11930 htab->tls_ldm_got.offset = -1;
11931
11932 /* Allocate global sym .plt and .got entries, and space for global
11933 sym dynamic relocs. */
11934 elf_link_hash_traverse (& htab->root, allocate_dynrelocs, info);
11935
11936 /* Here we rummage through the found bfds to collect glue information. */
11937 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
11938 {
11939 if (! is_arm_elf (ibfd))
11940 continue;
11941
11942 /* Initialise mapping tables for code/data. */
11943 bfd_elf32_arm_init_maps (ibfd);
11944
11945 if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
11946 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info))
11947 /* xgettext:c-format */
11948 _bfd_error_handler (_("Errors encountered processing file %s"),
11949 ibfd->filename);
11950 }
11951
11952 /* Allocate space for the glue sections now that we've sized them. */
11953 bfd_elf32_arm_allocate_interworking_sections (info);
11954
11955 /* The check_relocs and adjust_dynamic_symbol entry points have
11956 determined the sizes of the various dynamic sections. Allocate
11957 memory for them. */
11958 plt = FALSE;
11959 relocs = FALSE;
11960 for (s = dynobj->sections; s != NULL; s = s->next)
11961 {
11962 const char * name;
11963
11964 if ((s->flags & SEC_LINKER_CREATED) == 0)
11965 continue;
11966
11967 /* It's OK to base decisions on the section name, because none
11968 of the dynobj section names depend upon the input files. */
11969 name = bfd_get_section_name (dynobj, s);
11970
11971 if (strcmp (name, ".plt") == 0)
11972 {
11973 /* Remember whether there is a PLT. */
11974 plt = s->size != 0;
11975 }
11976 else if (CONST_STRNEQ (name, ".rel"))
11977 {
11978 if (s->size != 0)
11979 {
11980 /* Remember whether there are any reloc sections other
11981 than .rel(a).plt and .rela.plt.unloaded. */
11982 if (s != htab->srelplt && s != htab->srelplt2)
11983 relocs = TRUE;
11984
11985 /* We use the reloc_count field as a counter if we need
11986 to copy relocs into the output file. */
11987 s->reloc_count = 0;
11988 }
11989 }
11990 else if (! CONST_STRNEQ (name, ".got")
11991 && strcmp (name, ".dynbss") != 0)
11992 {
11993 /* It's not one of our sections, so don't allocate space. */
11994 continue;
11995 }
11996
11997 if (s->size == 0)
11998 {
11999 /* If we don't need this section, strip it from the
12000 output file. This is mostly to handle .rel(a).bss and
12001 .rel(a).plt. We must create both sections in
12002 create_dynamic_sections, because they must be created
12003 before the linker maps input sections to output
12004 sections. The linker does that before
12005 adjust_dynamic_symbol is called, and it is that
12006 function which decides whether anything needs to go
12007 into these sections. */
12008 s->flags |= SEC_EXCLUDE;
12009 continue;
12010 }
12011
12012 if ((s->flags & SEC_HAS_CONTENTS) == 0)
12013 continue;
12014
12015 /* Allocate memory for the section contents. */
12016 s->contents = (unsigned char *) bfd_zalloc (dynobj, s->size);
12017 if (s->contents == NULL)
12018 return FALSE;
12019 }
12020
12021 if (elf_hash_table (info)->dynamic_sections_created)
12022 {
12023 /* Add some entries to the .dynamic section. We fill in the
12024 values later, in elf32_arm_finish_dynamic_sections, but we
12025 must add the entries now so that we get the correct size for
12026 the .dynamic section. The DT_DEBUG entry is filled in by the
12027 dynamic linker and used by the debugger. */
12028 #define add_dynamic_entry(TAG, VAL) \
12029 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
12030
12031 if (info->executable)
12032 {
12033 if (!add_dynamic_entry (DT_DEBUG, 0))
12034 return FALSE;
12035 }
12036
12037 if (plt)
12038 {
12039 if ( !add_dynamic_entry (DT_PLTGOT, 0)
12040 || !add_dynamic_entry (DT_PLTRELSZ, 0)
12041 || !add_dynamic_entry (DT_PLTREL,
12042 htab->use_rel ? DT_REL : DT_RELA)
12043 || !add_dynamic_entry (DT_JMPREL, 0))
12044 return FALSE;
12045 }
12046
12047 if (relocs)
12048 {
12049 if (htab->use_rel)
12050 {
12051 if (!add_dynamic_entry (DT_REL, 0)
12052 || !add_dynamic_entry (DT_RELSZ, 0)
12053 || !add_dynamic_entry (DT_RELENT, RELOC_SIZE (htab)))
12054 return FALSE;
12055 }
12056 else
12057 {
12058 if (!add_dynamic_entry (DT_RELA, 0)
12059 || !add_dynamic_entry (DT_RELASZ, 0)
12060 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
12061 return FALSE;
12062 }
12063 }
12064
12065 /* If any dynamic relocs apply to a read-only section,
12066 then we need a DT_TEXTREL entry. */
12067 if ((info->flags & DF_TEXTREL) == 0)
12068 elf_link_hash_traverse (& htab->root, elf32_arm_readonly_dynrelocs,
12069 info);
12070
12071 if ((info->flags & DF_TEXTREL) != 0)
12072 {
12073 if (!add_dynamic_entry (DT_TEXTREL, 0))
12074 return FALSE;
12075 }
12076 if (htab->vxworks_p
12077 && !elf_vxworks_add_dynamic_entries (output_bfd, info))
12078 return FALSE;
12079 }
12080 #undef add_dynamic_entry
12081
12082 return TRUE;
12083 }
12084
12085 /* Finish up dynamic symbol handling. We set the contents of various
12086 dynamic sections here. */
12087
12088 static bfd_boolean
12089 elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
12090 struct bfd_link_info * info,
12091 struct elf_link_hash_entry * h,
12092 Elf_Internal_Sym * sym)
12093 {
12094 bfd * dynobj;
12095 struct elf32_arm_link_hash_table *htab;
12096 struct elf32_arm_link_hash_entry *eh;
12097
12098 dynobj = elf_hash_table (info)->dynobj;
12099 htab = elf32_arm_hash_table (info);
12100 if (htab == NULL)
12101 return FALSE;
12102
12103 eh = (struct elf32_arm_link_hash_entry *) h;
12104
12105 if (h->plt.offset != (bfd_vma) -1)
12106 {
12107 asection * splt;
12108 asection * srel;
12109 bfd_byte *loc;
12110 bfd_vma plt_index;
12111 Elf_Internal_Rela rel;
12112
12113 /* This symbol has an entry in the procedure linkage table. Set
12114 it up. */
12115
12116 BFD_ASSERT (h->dynindx != -1);
12117
12118 splt = bfd_get_section_by_name (dynobj, ".plt");
12119 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (htab, ".plt"));
12120 BFD_ASSERT (splt != NULL && srel != NULL);
12121
12122 /* Fill in the entry in the procedure linkage table. */
12123 if (htab->symbian_p)
12124 {
12125 put_arm_insn (htab, output_bfd,
12126 elf32_arm_symbian_plt_entry[0],
12127 splt->contents + h->plt.offset);
12128 bfd_put_32 (output_bfd,
12129 elf32_arm_symbian_plt_entry[1],
12130 splt->contents + h->plt.offset + 4);
12131
12132 /* Fill in the entry in the .rel.plt section. */
12133 rel.r_offset = (splt->output_section->vma
12134 + splt->output_offset
12135 + h->plt.offset + 4);
12136 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
12137
12138 /* Get the index in the procedure linkage table which
12139 corresponds to this symbol. This is the index of this symbol
12140 in all the symbols for which we are making plt entries. The
12141 first entry in the procedure linkage table is reserved. */
12142 plt_index = ((h->plt.offset - htab->plt_header_size)
12143 / htab->plt_entry_size);
12144 }
12145 else
12146 {
12147 bfd_vma got_offset, got_address, plt_address;
12148 bfd_vma got_displacement;
12149 asection * sgot;
12150 bfd_byte * ptr;
12151
12152 sgot = bfd_get_section_by_name (dynobj, ".got.plt");
12153 BFD_ASSERT (sgot != NULL);
12154
12155 /* Get the offset into the .got.plt table of the entry that
12156 corresponds to this function. */
12157 got_offset = eh->plt_got_offset;
12158
12159 /* Get the index in the procedure linkage table which
12160 corresponds to this symbol. This is the index of this symbol
12161 in all the symbols for which we are making plt entries. The
12162 first three entries in .got.plt are reserved; after that
12163 symbols appear in the same order as in .plt. */
12164 plt_index = (got_offset - 12) / 4;
12165
12166 /* Calculate the address of the GOT entry. */
12167 got_address = (sgot->output_section->vma
12168 + sgot->output_offset
12169 + got_offset);
12170
12171 /* ...and the address of the PLT entry. */
12172 plt_address = (splt->output_section->vma
12173 + splt->output_offset
12174 + h->plt.offset);
12175
12176 ptr = htab->splt->contents + h->plt.offset;
12177 if (htab->vxworks_p && info->shared)
12178 {
12179 unsigned int i;
12180 bfd_vma val;
12181
12182 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
12183 {
12184 val = elf32_arm_vxworks_shared_plt_entry[i];
12185 if (i == 2)
12186 val |= got_address - sgot->output_section->vma;
12187 if (i == 5)
12188 val |= plt_index * RELOC_SIZE (htab);
12189 if (i == 2 || i == 5)
12190 bfd_put_32 (output_bfd, val, ptr);
12191 else
12192 put_arm_insn (htab, output_bfd, val, ptr);
12193 }
12194 }
12195 else if (htab->vxworks_p)
12196 {
12197 unsigned int i;
12198 bfd_vma val;
12199
12200 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
12201 {
12202 val = elf32_arm_vxworks_exec_plt_entry[i];
12203 if (i == 2)
12204 val |= got_address;
12205 if (i == 4)
12206 val |= 0xffffff & -((h->plt.offset + i * 4 + 8) >> 2);
12207 if (i == 5)
12208 val |= plt_index * RELOC_SIZE (htab);
12209 if (i == 2 || i == 5)
12210 bfd_put_32 (output_bfd, val, ptr);
12211 else
12212 put_arm_insn (htab, output_bfd, val, ptr);
12213 }
12214
12215 loc = (htab->srelplt2->contents
12216 + (plt_index * 2 + 1) * RELOC_SIZE (htab));
12217
12218 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
12219 referencing the GOT for this PLT entry. */
12220 rel.r_offset = plt_address + 8;
12221 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12222 rel.r_addend = got_offset;
12223 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12224 loc += RELOC_SIZE (htab);
12225
12226 /* Create the R_ARM_ABS32 relocation referencing the
12227 beginning of the PLT for this GOT entry. */
12228 rel.r_offset = got_address;
12229 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
12230 rel.r_addend = 0;
12231 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12232 }
12233 else
12234 {
12235 bfd_signed_vma thumb_refs;
12236 /* Calculate the displacement between the PLT slot and the
12237 entry in the GOT. The eight-byte offset accounts for the
12238 value produced by adding to pc in the first instruction
12239 of the PLT stub. */
12240 got_displacement = got_address - (plt_address + 8);
12241
12242 BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
12243
12244 thumb_refs = eh->plt_thumb_refcount;
12245 if (!htab->use_blx)
12246 thumb_refs += eh->plt_maybe_thumb_refcount;
12247
12248 if (thumb_refs > 0)
12249 {
12250 put_thumb_insn (htab, output_bfd,
12251 elf32_arm_plt_thumb_stub[0], ptr - 4);
12252 put_thumb_insn (htab, output_bfd,
12253 elf32_arm_plt_thumb_stub[1], ptr - 2);
12254 }
12255
12256 put_arm_insn (htab, output_bfd,
12257 elf32_arm_plt_entry[0]
12258 | ((got_displacement & 0x0ff00000) >> 20),
12259 ptr + 0);
12260 put_arm_insn (htab, output_bfd,
12261 elf32_arm_plt_entry[1]
12262 | ((got_displacement & 0x000ff000) >> 12),
12263 ptr+ 4);
12264 put_arm_insn (htab, output_bfd,
12265 elf32_arm_plt_entry[2]
12266 | (got_displacement & 0x00000fff),
12267 ptr + 8);
12268 #ifdef FOUR_WORD_PLT
12269 bfd_put_32 (output_bfd, elf32_arm_plt_entry[3], ptr + 12);
12270 #endif
12271 }
12272
12273 /* Fill in the entry in the global offset table. */
12274 bfd_put_32 (output_bfd,
12275 (splt->output_section->vma
12276 + splt->output_offset),
12277 sgot->contents + got_offset);
12278
12279 /* Fill in the entry in the .rel(a).plt section. */
12280 rel.r_addend = 0;
12281 rel.r_offset = got_address;
12282 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_JUMP_SLOT);
12283 }
12284
12285 loc = srel->contents + plt_index * RELOC_SIZE (htab);
12286 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12287
12288 if (!h->def_regular)
12289 {
12290 /* Mark the symbol as undefined, rather than as defined in
12291 the .plt section. Leave the value alone. */
12292 sym->st_shndx = SHN_UNDEF;
12293 /* If the symbol is weak, we do need to clear the value.
12294 Otherwise, the PLT entry would provide a definition for
12295 the symbol even if the symbol wasn't defined anywhere,
12296 and so the symbol would never be NULL. */
12297 if (!h->ref_regular_nonweak)
12298 sym->st_value = 0;
12299 }
12300 }
12301
12302 if (h->got.offset != (bfd_vma) -1
12303 && (elf32_arm_hash_entry (h)->tls_type & GOT_TLS_GD) == 0
12304 && (elf32_arm_hash_entry (h)->tls_type & GOT_TLS_IE) == 0)
12305 {
12306 asection * sgot;
12307 asection * srel;
12308 Elf_Internal_Rela rel;
12309 bfd_byte *loc;
12310 bfd_vma offset;
12311
12312 /* This symbol has an entry in the global offset table. Set it
12313 up. */
12314 sgot = bfd_get_section_by_name (dynobj, ".got");
12315 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (htab, ".got"));
12316 BFD_ASSERT (sgot != NULL && srel != NULL);
12317
12318 offset = (h->got.offset & ~(bfd_vma) 1);
12319 rel.r_addend = 0;
12320 rel.r_offset = (sgot->output_section->vma
12321 + sgot->output_offset
12322 + offset);
12323
12324 /* If this is a static link, or it is a -Bsymbolic link and the
12325 symbol is defined locally or was forced to be local because
12326 of a version file, we just want to emit a RELATIVE reloc.
12327 The entry in the global offset table will already have been
12328 initialized in the relocate_section function. */
12329 if (info->shared
12330 && SYMBOL_REFERENCES_LOCAL (info, h))
12331 {
12332 BFD_ASSERT ((h->got.offset & 1) != 0);
12333 rel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12334 if (!htab->use_rel)
12335 {
12336 rel.r_addend = bfd_get_32 (output_bfd, sgot->contents + offset);
12337 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + offset);
12338 }
12339 }
12340 else
12341 {
12342 BFD_ASSERT ((h->got.offset & 1) == 0);
12343 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + offset);
12344 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
12345 }
12346
12347 loc = srel->contents + srel->reloc_count++ * RELOC_SIZE (htab);
12348 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12349 }
12350
12351 if (h->needs_copy)
12352 {
12353 asection * s;
12354 Elf_Internal_Rela rel;
12355 bfd_byte *loc;
12356
12357 /* This symbol needs a copy reloc. Set it up. */
12358 BFD_ASSERT (h->dynindx != -1
12359 && (h->root.type == bfd_link_hash_defined
12360 || h->root.type == bfd_link_hash_defweak));
12361
12362 s = bfd_get_section_by_name (h->root.u.def.section->owner,
12363 RELOC_SECTION (htab, ".bss"));
12364 BFD_ASSERT (s != NULL);
12365
12366 rel.r_addend = 0;
12367 rel.r_offset = (h->root.u.def.value
12368 + h->root.u.def.section->output_section->vma
12369 + h->root.u.def.section->output_offset);
12370 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
12371 loc = s->contents + s->reloc_count++ * RELOC_SIZE (htab);
12372 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12373 }
12374
12375 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
12376 the _GLOBAL_OFFSET_TABLE_ symbol is not absolute: it is relative
12377 to the ".got" section. */
12378 if (strcmp (h->root.root.string, "_DYNAMIC") == 0
12379 || (!htab->vxworks_p && h == htab->root.hgot))
12380 sym->st_shndx = SHN_ABS;
12381
12382 return TRUE;
12383 }
12384
12385 /* Finish up the dynamic sections. */
12386
12387 static bfd_boolean
12388 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
12389 {
12390 bfd * dynobj;
12391 asection * sgot;
12392 asection * sdyn;
12393 struct elf32_arm_link_hash_table *htab;
12394
12395 htab = elf32_arm_hash_table (info);
12396 if (htab == NULL)
12397 return FALSE;
12398
12399 dynobj = elf_hash_table (info)->dynobj;
12400
12401 sgot = bfd_get_section_by_name (dynobj, ".got.plt");
12402 BFD_ASSERT (htab->symbian_p || sgot != NULL);
12403 sdyn = bfd_get_section_by_name (dynobj, ".dynamic");
12404
12405 if (elf_hash_table (info)->dynamic_sections_created)
12406 {
12407 asection *splt;
12408 Elf32_External_Dyn *dyncon, *dynconend;
12409
12410 splt = bfd_get_section_by_name (dynobj, ".plt");
12411 BFD_ASSERT (splt != NULL && sdyn != NULL);
12412
12413 dyncon = (Elf32_External_Dyn *) sdyn->contents;
12414 dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
12415
12416 for (; dyncon < dynconend; dyncon++)
12417 {
12418 Elf_Internal_Dyn dyn;
12419 const char * name;
12420 asection * s;
12421
12422 bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
12423
12424 switch (dyn.d_tag)
12425 {
12426 unsigned int type;
12427
12428 default:
12429 if (htab->vxworks_p
12430 && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
12431 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12432 break;
12433
12434 case DT_HASH:
12435 name = ".hash";
12436 goto get_vma_if_bpabi;
12437 case DT_STRTAB:
12438 name = ".dynstr";
12439 goto get_vma_if_bpabi;
12440 case DT_SYMTAB:
12441 name = ".dynsym";
12442 goto get_vma_if_bpabi;
12443 case DT_VERSYM:
12444 name = ".gnu.version";
12445 goto get_vma_if_bpabi;
12446 case DT_VERDEF:
12447 name = ".gnu.version_d";
12448 goto get_vma_if_bpabi;
12449 case DT_VERNEED:
12450 name = ".gnu.version_r";
12451 goto get_vma_if_bpabi;
12452
12453 case DT_PLTGOT:
12454 name = ".got";
12455 goto get_vma;
12456 case DT_JMPREL:
12457 name = RELOC_SECTION (htab, ".plt");
12458 get_vma:
12459 s = bfd_get_section_by_name (output_bfd, name);
12460 BFD_ASSERT (s != NULL);
12461 if (!htab->symbian_p)
12462 dyn.d_un.d_ptr = s->vma;
12463 else
12464 /* In the BPABI, tags in the PT_DYNAMIC section point
12465 at the file offset, not the memory address, for the
12466 convenience of the post linker. */
12467 dyn.d_un.d_ptr = s->filepos;
12468 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12469 break;
12470
12471 get_vma_if_bpabi:
12472 if (htab->symbian_p)
12473 goto get_vma;
12474 break;
12475
12476 case DT_PLTRELSZ:
12477 s = bfd_get_section_by_name (output_bfd,
12478 RELOC_SECTION (htab, ".plt"));
12479 BFD_ASSERT (s != NULL);
12480 dyn.d_un.d_val = s->size;
12481 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12482 break;
12483
12484 case DT_RELSZ:
12485 case DT_RELASZ:
12486 if (!htab->symbian_p)
12487 {
12488 /* My reading of the SVR4 ABI indicates that the
12489 procedure linkage table relocs (DT_JMPREL) should be
12490 included in the overall relocs (DT_REL). This is
12491 what Solaris does. However, UnixWare can not handle
12492 that case. Therefore, we override the DT_RELSZ entry
12493 here to make it not include the JMPREL relocs. Since
12494 the linker script arranges for .rel(a).plt to follow all
12495 other relocation sections, we don't have to worry
12496 about changing the DT_REL entry. */
12497 s = bfd_get_section_by_name (output_bfd,
12498 RELOC_SECTION (htab, ".plt"));
12499 if (s != NULL)
12500 dyn.d_un.d_val -= s->size;
12501 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12502 break;
12503 }
12504 /* Fall through. */
12505
12506 case DT_REL:
12507 case DT_RELA:
12508 /* In the BPABI, the DT_REL tag must point at the file
12509 offset, not the VMA, of the first relocation
12510 section. So, we use code similar to that in
12511 elflink.c, but do not check for SHF_ALLOC on the
12512 relcoation section, since relocations sections are
12513 never allocated under the BPABI. The comments above
12514 about Unixware notwithstanding, we include all of the
12515 relocations here. */
12516 if (htab->symbian_p)
12517 {
12518 unsigned int i;
12519 type = ((dyn.d_tag == DT_REL || dyn.d_tag == DT_RELSZ)
12520 ? SHT_REL : SHT_RELA);
12521 dyn.d_un.d_val = 0;
12522 for (i = 1; i < elf_numsections (output_bfd); i++)
12523 {
12524 Elf_Internal_Shdr *hdr
12525 = elf_elfsections (output_bfd)[i];
12526 if (hdr->sh_type == type)
12527 {
12528 if (dyn.d_tag == DT_RELSZ
12529 || dyn.d_tag == DT_RELASZ)
12530 dyn.d_un.d_val += hdr->sh_size;
12531 else if ((ufile_ptr) hdr->sh_offset
12532 <= dyn.d_un.d_val - 1)
12533 dyn.d_un.d_val = hdr->sh_offset;
12534 }
12535 }
12536 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12537 }
12538 break;
12539
12540 /* Set the bottom bit of DT_INIT/FINI if the
12541 corresponding function is Thumb. */
12542 case DT_INIT:
12543 name = info->init_function;
12544 goto get_sym;
12545 case DT_FINI:
12546 name = info->fini_function;
12547 get_sym:
12548 /* If it wasn't set by elf_bfd_final_link
12549 then there is nothing to adjust. */
12550 if (dyn.d_un.d_val != 0)
12551 {
12552 struct elf_link_hash_entry * eh;
12553
12554 eh = elf_link_hash_lookup (elf_hash_table (info), name,
12555 FALSE, FALSE, TRUE);
12556 if (eh != NULL
12557 && ELF_ST_TYPE (eh->type) == STT_ARM_TFUNC)
12558 {
12559 dyn.d_un.d_val |= 1;
12560 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12561 }
12562 }
12563 break;
12564 }
12565 }
12566
12567 /* Fill in the first entry in the procedure linkage table. */
12568 if (splt->size > 0 && htab->plt_header_size)
12569 {
12570 const bfd_vma *plt0_entry;
12571 bfd_vma got_address, plt_address, got_displacement;
12572
12573 /* Calculate the addresses of the GOT and PLT. */
12574 got_address = sgot->output_section->vma + sgot->output_offset;
12575 plt_address = splt->output_section->vma + splt->output_offset;
12576
12577 if (htab->vxworks_p)
12578 {
12579 /* The VxWorks GOT is relocated by the dynamic linker.
12580 Therefore, we must emit relocations rather than simply
12581 computing the values now. */
12582 Elf_Internal_Rela rel;
12583
12584 plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
12585 put_arm_insn (htab, output_bfd, plt0_entry[0],
12586 splt->contents + 0);
12587 put_arm_insn (htab, output_bfd, plt0_entry[1],
12588 splt->contents + 4);
12589 put_arm_insn (htab, output_bfd, plt0_entry[2],
12590 splt->contents + 8);
12591 bfd_put_32 (output_bfd, got_address, splt->contents + 12);
12592
12593 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
12594 rel.r_offset = plt_address + 12;
12595 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12596 rel.r_addend = 0;
12597 SWAP_RELOC_OUT (htab) (output_bfd, &rel,
12598 htab->srelplt2->contents);
12599 }
12600 else
12601 {
12602 got_displacement = got_address - (plt_address + 16);
12603
12604 plt0_entry = elf32_arm_plt0_entry;
12605 put_arm_insn (htab, output_bfd, plt0_entry[0],
12606 splt->contents + 0);
12607 put_arm_insn (htab, output_bfd, plt0_entry[1],
12608 splt->contents + 4);
12609 put_arm_insn (htab, output_bfd, plt0_entry[2],
12610 splt->contents + 8);
12611 put_arm_insn (htab, output_bfd, plt0_entry[3],
12612 splt->contents + 12);
12613
12614 #ifdef FOUR_WORD_PLT
12615 /* The displacement value goes in the otherwise-unused
12616 last word of the second entry. */
12617 bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
12618 #else
12619 bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
12620 #endif
12621 }
12622 }
12623
12624 /* UnixWare sets the entsize of .plt to 4, although that doesn't
12625 really seem like the right value. */
12626 if (splt->output_section->owner == output_bfd)
12627 elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
12628
12629 if (htab->vxworks_p && !info->shared && htab->splt->size > 0)
12630 {
12631 /* Correct the .rel(a).plt.unloaded relocations. They will have
12632 incorrect symbol indexes. */
12633 int num_plts;
12634 unsigned char *p;
12635
12636 num_plts = ((htab->splt->size - htab->plt_header_size)
12637 / htab->plt_entry_size);
12638 p = htab->srelplt2->contents + RELOC_SIZE (htab);
12639
12640 for (; num_plts; num_plts--)
12641 {
12642 Elf_Internal_Rela rel;
12643
12644 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
12645 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12646 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
12647 p += RELOC_SIZE (htab);
12648
12649 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
12650 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
12651 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
12652 p += RELOC_SIZE (htab);
12653 }
12654 }
12655 }
12656
12657 /* Fill in the first three entries in the global offset table. */
12658 if (sgot)
12659 {
12660 if (sgot->size > 0)
12661 {
12662 if (sdyn == NULL)
12663 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
12664 else
12665 bfd_put_32 (output_bfd,
12666 sdyn->output_section->vma + sdyn->output_offset,
12667 sgot->contents);
12668 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
12669 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
12670 }
12671
12672 elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
12673 }
12674
12675 return TRUE;
12676 }
12677
12678 static void
12679 elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATTRIBUTE_UNUSED)
12680 {
12681 Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */
12682 struct elf32_arm_link_hash_table *globals;
12683
12684 i_ehdrp = elf_elfheader (abfd);
12685
12686 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
12687 i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
12688 else
12689 i_ehdrp->e_ident[EI_OSABI] = 0;
12690 i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
12691
12692 if (link_info)
12693 {
12694 globals = elf32_arm_hash_table (link_info);
12695 if (globals != NULL && globals->byteswap_code)
12696 i_ehdrp->e_flags |= EF_ARM_BE8;
12697 }
12698 }
12699
12700 static enum elf_reloc_type_class
12701 elf32_arm_reloc_type_class (const Elf_Internal_Rela *rela)
12702 {
12703 switch ((int) ELF32_R_TYPE (rela->r_info))
12704 {
12705 case R_ARM_RELATIVE:
12706 return reloc_class_relative;
12707 case R_ARM_JUMP_SLOT:
12708 return reloc_class_plt;
12709 case R_ARM_COPY:
12710 return reloc_class_copy;
12711 default:
12712 return reloc_class_normal;
12713 }
12714 }
12715
12716 /* Set the right machine number for an Arm ELF file. */
12717
12718 static bfd_boolean
12719 elf32_arm_section_flags (flagword *flags, const Elf_Internal_Shdr *hdr)
12720 {
12721 if (hdr->sh_type == SHT_NOTE)
12722 *flags |= SEC_LINK_ONCE | SEC_LINK_DUPLICATES_SAME_CONTENTS;
12723
12724 return TRUE;
12725 }
12726
12727 static void
12728 elf32_arm_final_write_processing (bfd *abfd, bfd_boolean linker ATTRIBUTE_UNUSED)
12729 {
12730 bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
12731 }
12732
12733 /* Return TRUE if this is an unwinding table entry. */
12734
12735 static bfd_boolean
12736 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
12737 {
12738 return (CONST_STRNEQ (name, ELF_STRING_ARM_unwind)
12739 || CONST_STRNEQ (name, ELF_STRING_ARM_unwind_once));
12740 }
12741
12742
12743 /* Set the type and flags for an ARM section. We do this by
12744 the section name, which is a hack, but ought to work. */
12745
12746 static bfd_boolean
12747 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
12748 {
12749 const char * name;
12750
12751 name = bfd_get_section_name (abfd, sec);
12752
12753 if (is_arm_elf_unwind_section_name (abfd, name))
12754 {
12755 hdr->sh_type = SHT_ARM_EXIDX;
12756 hdr->sh_flags |= SHF_LINK_ORDER;
12757 }
12758 return TRUE;
12759 }
12760
12761 /* Handle an ARM specific section when reading an object file. This is
12762 called when bfd_section_from_shdr finds a section with an unknown
12763 type. */
12764
12765 static bfd_boolean
12766 elf32_arm_section_from_shdr (bfd *abfd,
12767 Elf_Internal_Shdr * hdr,
12768 const char *name,
12769 int shindex)
12770 {
12771 /* There ought to be a place to keep ELF backend specific flags, but
12772 at the moment there isn't one. We just keep track of the
12773 sections by their name, instead. Fortunately, the ABI gives
12774 names for all the ARM specific sections, so we will probably get
12775 away with this. */
12776 switch (hdr->sh_type)
12777 {
12778 case SHT_ARM_EXIDX:
12779 case SHT_ARM_PREEMPTMAP:
12780 case SHT_ARM_ATTRIBUTES:
12781 break;
12782
12783 default:
12784 return FALSE;
12785 }
12786
12787 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
12788 return FALSE;
12789
12790 return TRUE;
12791 }
12792
12793 static _arm_elf_section_data *
12794 get_arm_elf_section_data (asection * sec)
12795 {
12796 if (sec && sec->owner && is_arm_elf (sec->owner))
12797 return elf32_arm_section_data (sec);
12798 else
12799 return NULL;
12800 }
12801
12802 typedef struct
12803 {
12804 void *finfo;
12805 struct bfd_link_info *info;
12806 asection *sec;
12807 int sec_shndx;
12808 int (*func) (void *, const char *, Elf_Internal_Sym *,
12809 asection *, struct elf_link_hash_entry *);
12810 } output_arch_syminfo;
12811
12812 enum map_symbol_type
12813 {
12814 ARM_MAP_ARM,
12815 ARM_MAP_THUMB,
12816 ARM_MAP_DATA
12817 };
12818
12819
12820 /* Output a single mapping symbol. */
12821
12822 static bfd_boolean
12823 elf32_arm_output_map_sym (output_arch_syminfo *osi,
12824 enum map_symbol_type type,
12825 bfd_vma offset)
12826 {
12827 static const char *names[3] = {"$a", "$t", "$d"};
12828 Elf_Internal_Sym sym;
12829
12830 sym.st_value = osi->sec->output_section->vma
12831 + osi->sec->output_offset
12832 + offset;
12833 sym.st_size = 0;
12834 sym.st_other = 0;
12835 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
12836 sym.st_shndx = osi->sec_shndx;
12837 elf32_arm_section_map_add (osi->sec, names[type][1], offset);
12838 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
12839 }
12840
12841
12842 /* Output mapping symbols for PLT entries associated with H. */
12843
12844 static bfd_boolean
12845 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
12846 {
12847 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
12848 struct elf32_arm_link_hash_table *htab;
12849 struct elf32_arm_link_hash_entry *eh;
12850 bfd_vma addr;
12851
12852 if (h->root.type == bfd_link_hash_indirect)
12853 return TRUE;
12854
12855 if (h->root.type == bfd_link_hash_warning)
12856 /* When warning symbols are created, they **replace** the "real"
12857 entry in the hash table, thus we never get to see the real
12858 symbol in a hash traversal. So look at it now. */
12859 h = (struct elf_link_hash_entry *) h->root.u.i.link;
12860
12861 if (h->plt.offset == (bfd_vma) -1)
12862 return TRUE;
12863
12864 htab = elf32_arm_hash_table (osi->info);
12865 if (htab == NULL)
12866 return FALSE;
12867
12868 eh = (struct elf32_arm_link_hash_entry *) h;
12869 addr = h->plt.offset;
12870 if (htab->symbian_p)
12871 {
12872 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12873 return FALSE;
12874 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 4))
12875 return FALSE;
12876 }
12877 else if (htab->vxworks_p)
12878 {
12879 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12880 return FALSE;
12881 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
12882 return FALSE;
12883 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
12884 return FALSE;
12885 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
12886 return FALSE;
12887 }
12888 else
12889 {
12890 bfd_signed_vma thumb_refs;
12891
12892 thumb_refs = eh->plt_thumb_refcount;
12893 if (!htab->use_blx)
12894 thumb_refs += eh->plt_maybe_thumb_refcount;
12895
12896 if (thumb_refs > 0)
12897 {
12898 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
12899 return FALSE;
12900 }
12901 #ifdef FOUR_WORD_PLT
12902 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12903 return FALSE;
12904 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
12905 return FALSE;
12906 #else
12907 /* A three-word PLT with no Thumb thunk contains only Arm code,
12908 so only need to output a mapping symbol for the first PLT entry and
12909 entries with thumb thunks. */
12910 if (thumb_refs > 0 || addr == 20)
12911 {
12912 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12913 return FALSE;
12914 }
12915 #endif
12916 }
12917
12918 return TRUE;
12919 }
12920
12921 /* Output a single local symbol for a generated stub. */
12922
12923 static bfd_boolean
12924 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
12925 bfd_vma offset, bfd_vma size)
12926 {
12927 Elf_Internal_Sym sym;
12928
12929 sym.st_value = osi->sec->output_section->vma
12930 + osi->sec->output_offset
12931 + offset;
12932 sym.st_size = size;
12933 sym.st_other = 0;
12934 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
12935 sym.st_shndx = osi->sec_shndx;
12936 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
12937 }
12938
12939 static bfd_boolean
12940 arm_map_one_stub (struct bfd_hash_entry * gen_entry,
12941 void * in_arg)
12942 {
12943 struct elf32_arm_stub_hash_entry *stub_entry;
12944 struct bfd_link_info *info;
12945 asection *stub_sec;
12946 bfd_vma addr;
12947 char *stub_name;
12948 output_arch_syminfo *osi;
12949 const insn_sequence *template_sequence;
12950 enum stub_insn_type prev_type;
12951 int size;
12952 int i;
12953 enum map_symbol_type sym_type;
12954
12955 /* Massage our args to the form they really have. */
12956 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
12957 osi = (output_arch_syminfo *) in_arg;
12958
12959 info = osi->info;
12960
12961 stub_sec = stub_entry->stub_sec;
12962
12963 /* Ensure this stub is attached to the current section being
12964 processed. */
12965 if (stub_sec != osi->sec)
12966 return TRUE;
12967
12968 addr = (bfd_vma) stub_entry->stub_offset;
12969 stub_name = stub_entry->output_name;
12970
12971 template_sequence = stub_entry->stub_template;
12972 switch (template_sequence[0].type)
12973 {
12974 case ARM_TYPE:
12975 if (!elf32_arm_output_stub_sym (osi, stub_name, addr, stub_entry->stub_size))
12976 return FALSE;
12977 break;
12978 case THUMB16_TYPE:
12979 case THUMB32_TYPE:
12980 if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
12981 stub_entry->stub_size))
12982 return FALSE;
12983 break;
12984 default:
12985 BFD_FAIL ();
12986 return 0;
12987 }
12988
12989 prev_type = DATA_TYPE;
12990 size = 0;
12991 for (i = 0; i < stub_entry->stub_template_size; i++)
12992 {
12993 switch (template_sequence[i].type)
12994 {
12995 case ARM_TYPE:
12996 sym_type = ARM_MAP_ARM;
12997 break;
12998
12999 case THUMB16_TYPE:
13000 case THUMB32_TYPE:
13001 sym_type = ARM_MAP_THUMB;
13002 break;
13003
13004 case DATA_TYPE:
13005 sym_type = ARM_MAP_DATA;
13006 break;
13007
13008 default:
13009 BFD_FAIL ();
13010 return FALSE;
13011 }
13012
13013 if (template_sequence[i].type != prev_type)
13014 {
13015 prev_type = template_sequence[i].type;
13016 if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
13017 return FALSE;
13018 }
13019
13020 switch (template_sequence[i].type)
13021 {
13022 case ARM_TYPE:
13023 case THUMB32_TYPE:
13024 size += 4;
13025 break;
13026
13027 case THUMB16_TYPE:
13028 size += 2;
13029 break;
13030
13031 case DATA_TYPE:
13032 size += 4;
13033 break;
13034
13035 default:
13036 BFD_FAIL ();
13037 return FALSE;
13038 }
13039 }
13040
13041 return TRUE;
13042 }
13043
13044 /* Output mapping symbols for linker generated sections,
13045 and for those data-only sections that do not have a
13046 $d. */
13047
13048 static bfd_boolean
13049 elf32_arm_output_arch_local_syms (bfd *output_bfd,
13050 struct bfd_link_info *info,
13051 void *finfo,
13052 int (*func) (void *, const char *,
13053 Elf_Internal_Sym *,
13054 asection *,
13055 struct elf_link_hash_entry *))
13056 {
13057 output_arch_syminfo osi;
13058 struct elf32_arm_link_hash_table *htab;
13059 bfd_vma offset;
13060 bfd_size_type size;
13061 bfd *input_bfd;
13062
13063 htab = elf32_arm_hash_table (info);
13064 if (htab == NULL)
13065 return FALSE;
13066
13067 check_use_blx (htab);
13068
13069 osi.finfo = finfo;
13070 osi.info = info;
13071 osi.func = func;
13072
13073 /* Add a $d mapping symbol to data-only sections that
13074 don't have any mapping symbol. This may result in (harmless) redundant
13075 mapping symbols. */
13076 for (input_bfd = info->input_bfds;
13077 input_bfd != NULL;
13078 input_bfd = input_bfd->link_next)
13079 {
13080 if ((input_bfd->flags & (BFD_LINKER_CREATED | HAS_SYMS)) == HAS_SYMS)
13081 for (osi.sec = input_bfd->sections;
13082 osi.sec != NULL;
13083 osi.sec = osi.sec->next)
13084 {
13085 if (osi.sec->output_section != NULL
13086 && ((osi.sec->output_section->flags & (SEC_ALLOC | SEC_CODE))
13087 != 0)
13088 && (osi.sec->flags & (SEC_HAS_CONTENTS | SEC_LINKER_CREATED))
13089 == SEC_HAS_CONTENTS
13090 && get_arm_elf_section_data (osi.sec) != NULL
13091 && get_arm_elf_section_data (osi.sec)->mapcount == 0
13092 && osi.sec->size > 0)
13093 {
13094 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13095 (output_bfd, osi.sec->output_section);
13096 if (osi.sec_shndx != (int)SHN_BAD)
13097 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 0);
13098 }
13099 }
13100 }
13101
13102 /* ARM->Thumb glue. */
13103 if (htab->arm_glue_size > 0)
13104 {
13105 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
13106 ARM2THUMB_GLUE_SECTION_NAME);
13107
13108 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13109 (output_bfd, osi.sec->output_section);
13110 if (info->shared || htab->root.is_relocatable_executable
13111 || htab->pic_veneer)
13112 size = ARM2THUMB_PIC_GLUE_SIZE;
13113 else if (htab->use_blx)
13114 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
13115 else
13116 size = ARM2THUMB_STATIC_GLUE_SIZE;
13117
13118 for (offset = 0; offset < htab->arm_glue_size; offset += size)
13119 {
13120 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
13121 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
13122 }
13123 }
13124
13125 /* Thumb->ARM glue. */
13126 if (htab->thumb_glue_size > 0)
13127 {
13128 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
13129 THUMB2ARM_GLUE_SECTION_NAME);
13130
13131 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13132 (output_bfd, osi.sec->output_section);
13133 size = THUMB2ARM_GLUE_SIZE;
13134
13135 for (offset = 0; offset < htab->thumb_glue_size; offset += size)
13136 {
13137 elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
13138 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
13139 }
13140 }
13141
13142 /* ARMv4 BX veneers. */
13143 if (htab->bx_glue_size > 0)
13144 {
13145 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
13146 ARM_BX_GLUE_SECTION_NAME);
13147
13148 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13149 (output_bfd, osi.sec->output_section);
13150
13151 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
13152 }
13153
13154 /* Long calls stubs. */
13155 if (htab->stub_bfd && htab->stub_bfd->sections)
13156 {
13157 asection* stub_sec;
13158
13159 for (stub_sec = htab->stub_bfd->sections;
13160 stub_sec != NULL;
13161 stub_sec = stub_sec->next)
13162 {
13163 /* Ignore non-stub sections. */
13164 if (!strstr (stub_sec->name, STUB_SUFFIX))
13165 continue;
13166
13167 osi.sec = stub_sec;
13168
13169 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13170 (output_bfd, osi.sec->output_section);
13171
13172 bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
13173 }
13174 }
13175
13176 /* Finally, output mapping symbols for the PLT. */
13177 if (!htab->splt || htab->splt->size == 0)
13178 return TRUE;
13179
13180 osi.sec_shndx = _bfd_elf_section_from_bfd_section (output_bfd,
13181 htab->splt->output_section);
13182 osi.sec = htab->splt;
13183 /* Output mapping symbols for the plt header. SymbianOS does not have a
13184 plt header. */
13185 if (htab->vxworks_p)
13186 {
13187 /* VxWorks shared libraries have no PLT header. */
13188 if (!info->shared)
13189 {
13190 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
13191 return FALSE;
13192 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
13193 return FALSE;
13194 }
13195 }
13196 else if (!htab->symbian_p)
13197 {
13198 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
13199 return FALSE;
13200 #ifndef FOUR_WORD_PLT
13201 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
13202 return FALSE;
13203 #endif
13204 }
13205
13206 elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, (void *) &osi);
13207 return TRUE;
13208 }
13209
13210 /* Allocate target specific section data. */
13211
13212 static bfd_boolean
13213 elf32_arm_new_section_hook (bfd *abfd, asection *sec)
13214 {
13215 if (!sec->used_by_bfd)
13216 {
13217 _arm_elf_section_data *sdata;
13218 bfd_size_type amt = sizeof (*sdata);
13219
13220 sdata = (_arm_elf_section_data *) bfd_zalloc (abfd, amt);
13221 if (sdata == NULL)
13222 return FALSE;
13223 sec->used_by_bfd = sdata;
13224 }
13225
13226 return _bfd_elf_new_section_hook (abfd, sec);
13227 }
13228
13229
13230 /* Used to order a list of mapping symbols by address. */
13231
13232 static int
13233 elf32_arm_compare_mapping (const void * a, const void * b)
13234 {
13235 const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
13236 const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
13237
13238 if (amap->vma > bmap->vma)
13239 return 1;
13240 else if (amap->vma < bmap->vma)
13241 return -1;
13242 else if (amap->type > bmap->type)
13243 /* Ensure results do not depend on the host qsort for objects with
13244 multiple mapping symbols at the same address by sorting on type
13245 after vma. */
13246 return 1;
13247 else if (amap->type < bmap->type)
13248 return -1;
13249 else
13250 return 0;
13251 }
13252
13253 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
13254
13255 static unsigned long
13256 offset_prel31 (unsigned long addr, bfd_vma offset)
13257 {
13258 return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
13259 }
13260
13261 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
13262 relocations. */
13263
13264 static void
13265 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
13266 {
13267 unsigned long first_word = bfd_get_32 (output_bfd, from);
13268 unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
13269
13270 /* High bit of first word is supposed to be zero. */
13271 if ((first_word & 0x80000000ul) == 0)
13272 first_word = offset_prel31 (first_word, offset);
13273
13274 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
13275 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
13276 if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
13277 second_word = offset_prel31 (second_word, offset);
13278
13279 bfd_put_32 (output_bfd, first_word, to);
13280 bfd_put_32 (output_bfd, second_word, to + 4);
13281 }
13282
13283 /* Data for make_branch_to_a8_stub(). */
13284
13285 struct a8_branch_to_stub_data {
13286 asection *writing_section;
13287 bfd_byte *contents;
13288 };
13289
13290
13291 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
13292 places for a particular section. */
13293
13294 static bfd_boolean
13295 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
13296 void *in_arg)
13297 {
13298 struct elf32_arm_stub_hash_entry *stub_entry;
13299 struct a8_branch_to_stub_data *data;
13300 bfd_byte *contents;
13301 unsigned long branch_insn;
13302 bfd_vma veneered_insn_loc, veneer_entry_loc;
13303 bfd_signed_vma branch_offset;
13304 bfd *abfd;
13305 unsigned int target;
13306
13307 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
13308 data = (struct a8_branch_to_stub_data *) in_arg;
13309
13310 if (stub_entry->target_section != data->writing_section
13311 || stub_entry->stub_type < arm_stub_a8_veneer_b_cond)
13312 return TRUE;
13313
13314 contents = data->contents;
13315
13316 veneered_insn_loc = stub_entry->target_section->output_section->vma
13317 + stub_entry->target_section->output_offset
13318 + stub_entry->target_value;
13319
13320 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
13321 + stub_entry->stub_sec->output_offset
13322 + stub_entry->stub_offset;
13323
13324 if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
13325 veneered_insn_loc &= ~3u;
13326
13327 branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
13328
13329 abfd = stub_entry->target_section->owner;
13330 target = stub_entry->target_value;
13331
13332 /* We attempt to avoid this condition by setting stubs_always_after_branch
13333 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
13334 This check is just to be on the safe side... */
13335 if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
13336 {
13337 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub is "
13338 "allocated in unsafe location"), abfd);
13339 return FALSE;
13340 }
13341
13342 switch (stub_entry->stub_type)
13343 {
13344 case arm_stub_a8_veneer_b:
13345 case arm_stub_a8_veneer_b_cond:
13346 branch_insn = 0xf0009000;
13347 goto jump24;
13348
13349 case arm_stub_a8_veneer_blx:
13350 branch_insn = 0xf000e800;
13351 goto jump24;
13352
13353 case arm_stub_a8_veneer_bl:
13354 {
13355 unsigned int i1, j1, i2, j2, s;
13356
13357 branch_insn = 0xf000d000;
13358
13359 jump24:
13360 if (branch_offset < -16777216 || branch_offset > 16777214)
13361 {
13362 /* There's not much we can do apart from complain if this
13363 happens. */
13364 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub out "
13365 "of range (input file too large)"), abfd);
13366 return FALSE;
13367 }
13368
13369 /* i1 = not(j1 eor s), so:
13370 not i1 = j1 eor s
13371 j1 = (not i1) eor s. */
13372
13373 branch_insn |= (branch_offset >> 1) & 0x7ff;
13374 branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
13375 i2 = (branch_offset >> 22) & 1;
13376 i1 = (branch_offset >> 23) & 1;
13377 s = (branch_offset >> 24) & 1;
13378 j1 = (!i1) ^ s;
13379 j2 = (!i2) ^ s;
13380 branch_insn |= j2 << 11;
13381 branch_insn |= j1 << 13;
13382 branch_insn |= s << 26;
13383 }
13384 break;
13385
13386 default:
13387 BFD_FAIL ();
13388 return FALSE;
13389 }
13390
13391 bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[target]);
13392 bfd_put_16 (abfd, branch_insn & 0xffff, &contents[target + 2]);
13393
13394 return TRUE;
13395 }
13396
13397 /* Do code byteswapping. Return FALSE afterwards so that the section is
13398 written out as normal. */
13399
13400 static bfd_boolean
13401 elf32_arm_write_section (bfd *output_bfd,
13402 struct bfd_link_info *link_info,
13403 asection *sec,
13404 bfd_byte *contents)
13405 {
13406 unsigned int mapcount, errcount;
13407 _arm_elf_section_data *arm_data;
13408 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
13409 elf32_arm_section_map *map;
13410 elf32_vfp11_erratum_list *errnode;
13411 bfd_vma ptr;
13412 bfd_vma end;
13413 bfd_vma offset = sec->output_section->vma + sec->output_offset;
13414 bfd_byte tmp;
13415 unsigned int i;
13416
13417 if (globals == NULL)
13418 return FALSE;
13419
13420 /* If this section has not been allocated an _arm_elf_section_data
13421 structure then we cannot record anything. */
13422 arm_data = get_arm_elf_section_data (sec);
13423 if (arm_data == NULL)
13424 return FALSE;
13425
13426 mapcount = arm_data->mapcount;
13427 map = arm_data->map;
13428 errcount = arm_data->erratumcount;
13429
13430 if (errcount != 0)
13431 {
13432 unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
13433
13434 for (errnode = arm_data->erratumlist; errnode != 0;
13435 errnode = errnode->next)
13436 {
13437 bfd_vma target = errnode->vma - offset;
13438
13439 switch (errnode->type)
13440 {
13441 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
13442 {
13443 bfd_vma branch_to_veneer;
13444 /* Original condition code of instruction, plus bit mask for
13445 ARM B instruction. */
13446 unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
13447 | 0x0a000000;
13448
13449 /* The instruction is before the label. */
13450 target -= 4;
13451
13452 /* Above offset included in -4 below. */
13453 branch_to_veneer = errnode->u.b.veneer->vma
13454 - errnode->vma - 4;
13455
13456 if ((signed) branch_to_veneer < -(1 << 25)
13457 || (signed) branch_to_veneer >= (1 << 25))
13458 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
13459 "range"), output_bfd);
13460
13461 insn |= (branch_to_veneer >> 2) & 0xffffff;
13462 contents[endianflip ^ target] = insn & 0xff;
13463 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
13464 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
13465 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
13466 }
13467 break;
13468
13469 case VFP11_ERRATUM_ARM_VENEER:
13470 {
13471 bfd_vma branch_from_veneer;
13472 unsigned int insn;
13473
13474 /* Take size of veneer into account. */
13475 branch_from_veneer = errnode->u.v.branch->vma
13476 - errnode->vma - 12;
13477
13478 if ((signed) branch_from_veneer < -(1 << 25)
13479 || (signed) branch_from_veneer >= (1 << 25))
13480 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
13481 "range"), output_bfd);
13482
13483 /* Original instruction. */
13484 insn = errnode->u.v.branch->u.b.vfp_insn;
13485 contents[endianflip ^ target] = insn & 0xff;
13486 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
13487 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
13488 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
13489
13490 /* Branch back to insn after original insn. */
13491 insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
13492 contents[endianflip ^ (target + 4)] = insn & 0xff;
13493 contents[endianflip ^ (target + 5)] = (insn >> 8) & 0xff;
13494 contents[endianflip ^ (target + 6)] = (insn >> 16) & 0xff;
13495 contents[endianflip ^ (target + 7)] = (insn >> 24) & 0xff;
13496 }
13497 break;
13498
13499 default:
13500 abort ();
13501 }
13502 }
13503 }
13504
13505 if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
13506 {
13507 arm_unwind_table_edit *edit_node
13508 = arm_data->u.exidx.unwind_edit_list;
13509 /* Now, sec->size is the size of the section we will write. The original
13510 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
13511 markers) was sec->rawsize. (This isn't the case if we perform no
13512 edits, then rawsize will be zero and we should use size). */
13513 bfd_byte *edited_contents = (bfd_byte *) bfd_malloc (sec->size);
13514 unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
13515 unsigned int in_index, out_index;
13516 bfd_vma add_to_offsets = 0;
13517
13518 for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
13519 {
13520 if (edit_node)
13521 {
13522 unsigned int edit_index = edit_node->index;
13523
13524 if (in_index < edit_index && in_index * 8 < input_size)
13525 {
13526 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
13527 contents + in_index * 8, add_to_offsets);
13528 out_index++;
13529 in_index++;
13530 }
13531 else if (in_index == edit_index
13532 || (in_index * 8 >= input_size
13533 && edit_index == UINT_MAX))
13534 {
13535 switch (edit_node->type)
13536 {
13537 case DELETE_EXIDX_ENTRY:
13538 in_index++;
13539 add_to_offsets += 8;
13540 break;
13541
13542 case INSERT_EXIDX_CANTUNWIND_AT_END:
13543 {
13544 asection *text_sec = edit_node->linked_section;
13545 bfd_vma text_offset = text_sec->output_section->vma
13546 + text_sec->output_offset
13547 + text_sec->size;
13548 bfd_vma exidx_offset = offset + out_index * 8;
13549 unsigned long prel31_offset;
13550
13551 /* Note: this is meant to be equivalent to an
13552 R_ARM_PREL31 relocation. These synthetic
13553 EXIDX_CANTUNWIND markers are not relocated by the
13554 usual BFD method. */
13555 prel31_offset = (text_offset - exidx_offset)
13556 & 0x7ffffffful;
13557
13558 /* First address we can't unwind. */
13559 bfd_put_32 (output_bfd, prel31_offset,
13560 &edited_contents[out_index * 8]);
13561
13562 /* Code for EXIDX_CANTUNWIND. */
13563 bfd_put_32 (output_bfd, 0x1,
13564 &edited_contents[out_index * 8 + 4]);
13565
13566 out_index++;
13567 add_to_offsets -= 8;
13568 }
13569 break;
13570 }
13571
13572 edit_node = edit_node->next;
13573 }
13574 }
13575 else
13576 {
13577 /* No more edits, copy remaining entries verbatim. */
13578 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
13579 contents + in_index * 8, add_to_offsets);
13580 out_index++;
13581 in_index++;
13582 }
13583 }
13584
13585 if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
13586 bfd_set_section_contents (output_bfd, sec->output_section,
13587 edited_contents,
13588 (file_ptr) sec->output_offset, sec->size);
13589
13590 return TRUE;
13591 }
13592
13593 /* Fix code to point to Cortex-A8 erratum stubs. */
13594 if (globals->fix_cortex_a8)
13595 {
13596 struct a8_branch_to_stub_data data;
13597
13598 data.writing_section = sec;
13599 data.contents = contents;
13600
13601 bfd_hash_traverse (&globals->stub_hash_table, make_branch_to_a8_stub,
13602 &data);
13603 }
13604
13605 if (mapcount == 0)
13606 return FALSE;
13607
13608 if (globals->byteswap_code)
13609 {
13610 qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
13611
13612 ptr = map[0].vma;
13613 for (i = 0; i < mapcount; i++)
13614 {
13615 if (i == mapcount - 1)
13616 end = sec->size;
13617 else
13618 end = map[i + 1].vma;
13619
13620 switch (map[i].type)
13621 {
13622 case 'a':
13623 /* Byte swap code words. */
13624 while (ptr + 3 < end)
13625 {
13626 tmp = contents[ptr];
13627 contents[ptr] = contents[ptr + 3];
13628 contents[ptr + 3] = tmp;
13629 tmp = contents[ptr + 1];
13630 contents[ptr + 1] = contents[ptr + 2];
13631 contents[ptr + 2] = tmp;
13632 ptr += 4;
13633 }
13634 break;
13635
13636 case 't':
13637 /* Byte swap code halfwords. */
13638 while (ptr + 1 < end)
13639 {
13640 tmp = contents[ptr];
13641 contents[ptr] = contents[ptr + 1];
13642 contents[ptr + 1] = tmp;
13643 ptr += 2;
13644 }
13645 break;
13646
13647 case 'd':
13648 /* Leave data alone. */
13649 break;
13650 }
13651 ptr = end;
13652 }
13653 }
13654
13655 free (map);
13656 arm_data->mapcount = -1;
13657 arm_data->mapsize = 0;
13658 arm_data->map = NULL;
13659
13660 return FALSE;
13661 }
13662
13663 /* Display STT_ARM_TFUNC symbols as functions. */
13664
13665 static void
13666 elf32_arm_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
13667 asymbol *asym)
13668 {
13669 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
13670
13671 if (ELF_ST_TYPE (elfsym->internal_elf_sym.st_info) == STT_ARM_TFUNC)
13672 elfsym->symbol.flags |= BSF_FUNCTION;
13673 }
13674
13675
13676 /* Mangle thumb function symbols as we read them in. */
13677
13678 static bfd_boolean
13679 elf32_arm_swap_symbol_in (bfd * abfd,
13680 const void *psrc,
13681 const void *pshn,
13682 Elf_Internal_Sym *dst)
13683 {
13684 if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
13685 return FALSE;
13686
13687 /* New EABI objects mark thumb function symbols by setting the low bit of
13688 the address. Turn these into STT_ARM_TFUNC. */
13689 if ((ELF_ST_TYPE (dst->st_info) == STT_FUNC)
13690 && (dst->st_value & 1))
13691 {
13692 dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_ARM_TFUNC);
13693 dst->st_value &= ~(bfd_vma) 1;
13694 }
13695 return TRUE;
13696 }
13697
13698
13699 /* Mangle thumb function symbols as we write them out. */
13700
13701 static void
13702 elf32_arm_swap_symbol_out (bfd *abfd,
13703 const Elf_Internal_Sym *src,
13704 void *cdst,
13705 void *shndx)
13706 {
13707 Elf_Internal_Sym newsym;
13708
13709 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
13710 of the address set, as per the new EABI. We do this unconditionally
13711 because objcopy does not set the elf header flags until after
13712 it writes out the symbol table. */
13713 if (ELF_ST_TYPE (src->st_info) == STT_ARM_TFUNC)
13714 {
13715 newsym = *src;
13716 newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
13717 if (newsym.st_shndx != SHN_UNDEF)
13718 {
13719 /* Do this only for defined symbols. At link type, the static
13720 linker will simulate the work of dynamic linker of resolving
13721 symbols and will carry over the thumbness of found symbols to
13722 the output symbol table. It's not clear how it happens, but
13723 the thumbness of undefined symbols can well be different at
13724 runtime, and writing '1' for them will be confusing for users
13725 and possibly for dynamic linker itself.
13726 */
13727 newsym.st_value |= 1;
13728 }
13729
13730 src = &newsym;
13731 }
13732 bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
13733 }
13734
13735 /* Add the PT_ARM_EXIDX program header. */
13736
13737 static bfd_boolean
13738 elf32_arm_modify_segment_map (bfd *abfd,
13739 struct bfd_link_info *info ATTRIBUTE_UNUSED)
13740 {
13741 struct elf_segment_map *m;
13742 asection *sec;
13743
13744 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
13745 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
13746 {
13747 /* If there is already a PT_ARM_EXIDX header, then we do not
13748 want to add another one. This situation arises when running
13749 "strip"; the input binary already has the header. */
13750 m = elf_tdata (abfd)->segment_map;
13751 while (m && m->p_type != PT_ARM_EXIDX)
13752 m = m->next;
13753 if (!m)
13754 {
13755 m = (struct elf_segment_map *)
13756 bfd_zalloc (abfd, sizeof (struct elf_segment_map));
13757 if (m == NULL)
13758 return FALSE;
13759 m->p_type = PT_ARM_EXIDX;
13760 m->count = 1;
13761 m->sections[0] = sec;
13762
13763 m->next = elf_tdata (abfd)->segment_map;
13764 elf_tdata (abfd)->segment_map = m;
13765 }
13766 }
13767
13768 return TRUE;
13769 }
13770
13771 /* We may add a PT_ARM_EXIDX program header. */
13772
13773 static int
13774 elf32_arm_additional_program_headers (bfd *abfd,
13775 struct bfd_link_info *info ATTRIBUTE_UNUSED)
13776 {
13777 asection *sec;
13778
13779 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
13780 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
13781 return 1;
13782 else
13783 return 0;
13784 }
13785
13786 /* We have two function types: STT_FUNC and STT_ARM_TFUNC. */
13787
13788 static bfd_boolean
13789 elf32_arm_is_function_type (unsigned int type)
13790 {
13791 return (type == STT_FUNC) || (type == STT_ARM_TFUNC);
13792 }
13793
13794 /* We use this to override swap_symbol_in and swap_symbol_out. */
13795 const struct elf_size_info elf32_arm_size_info =
13796 {
13797 sizeof (Elf32_External_Ehdr),
13798 sizeof (Elf32_External_Phdr),
13799 sizeof (Elf32_External_Shdr),
13800 sizeof (Elf32_External_Rel),
13801 sizeof (Elf32_External_Rela),
13802 sizeof (Elf32_External_Sym),
13803 sizeof (Elf32_External_Dyn),
13804 sizeof (Elf_External_Note),
13805 4,
13806 1,
13807 32, 2,
13808 ELFCLASS32, EV_CURRENT,
13809 bfd_elf32_write_out_phdrs,
13810 bfd_elf32_write_shdrs_and_ehdr,
13811 bfd_elf32_checksum_contents,
13812 bfd_elf32_write_relocs,
13813 elf32_arm_swap_symbol_in,
13814 elf32_arm_swap_symbol_out,
13815 bfd_elf32_slurp_reloc_table,
13816 bfd_elf32_slurp_symbol_table,
13817 bfd_elf32_swap_dyn_in,
13818 bfd_elf32_swap_dyn_out,
13819 bfd_elf32_swap_reloc_in,
13820 bfd_elf32_swap_reloc_out,
13821 bfd_elf32_swap_reloca_in,
13822 bfd_elf32_swap_reloca_out
13823 };
13824
13825 #define ELF_ARCH bfd_arch_arm
13826 #define ELF_MACHINE_CODE EM_ARM
13827 #ifdef __QNXTARGET__
13828 #define ELF_MAXPAGESIZE 0x1000
13829 #else
13830 #define ELF_MAXPAGESIZE 0x8000
13831 #endif
13832 #define ELF_MINPAGESIZE 0x1000
13833 #define ELF_COMMONPAGESIZE 0x1000
13834
13835 #define bfd_elf32_mkobject elf32_arm_mkobject
13836
13837 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
13838 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
13839 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
13840 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
13841 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
13842 #define bfd_elf32_bfd_link_hash_table_free elf32_arm_hash_table_free
13843 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
13844 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
13845 #define bfd_elf32_find_nearest_line elf32_arm_find_nearest_line
13846 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
13847 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
13848 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
13849 #define bfd_elf32_bfd_final_link elf32_arm_final_link
13850
13851 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
13852 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
13853 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
13854 #define elf_backend_gc_sweep_hook elf32_arm_gc_sweep_hook
13855 #define elf_backend_check_relocs elf32_arm_check_relocs
13856 #define elf_backend_relocate_section elf32_arm_relocate_section
13857 #define elf_backend_write_section elf32_arm_write_section
13858 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
13859 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
13860 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
13861 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
13862 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
13863 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
13864 #define elf_backend_post_process_headers elf32_arm_post_process_headers
13865 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
13866 #define elf_backend_object_p elf32_arm_object_p
13867 #define elf_backend_section_flags elf32_arm_section_flags
13868 #define elf_backend_fake_sections elf32_arm_fake_sections
13869 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
13870 #define elf_backend_final_write_processing elf32_arm_final_write_processing
13871 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
13872 #define elf_backend_symbol_processing elf32_arm_symbol_processing
13873 #define elf_backend_size_info elf32_arm_size_info
13874 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
13875 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
13876 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
13877 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
13878 #define elf_backend_is_function_type elf32_arm_is_function_type
13879
13880 #define elf_backend_can_refcount 1
13881 #define elf_backend_can_gc_sections 1
13882 #define elf_backend_plt_readonly 1
13883 #define elf_backend_want_got_plt 1
13884 #define elf_backend_want_plt_sym 0
13885 #define elf_backend_may_use_rel_p 1
13886 #define elf_backend_may_use_rela_p 0
13887 #define elf_backend_default_use_rela_p 0
13888
13889 #define elf_backend_got_header_size 12
13890
13891 #undef elf_backend_obj_attrs_vendor
13892 #define elf_backend_obj_attrs_vendor "aeabi"
13893 #undef elf_backend_obj_attrs_section
13894 #define elf_backend_obj_attrs_section ".ARM.attributes"
13895 #undef elf_backend_obj_attrs_arg_type
13896 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
13897 #undef elf_backend_obj_attrs_section_type
13898 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
13899 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
13900
13901 #include "elf32-target.h"
13902
13903 /* VxWorks Targets. */
13904
13905 #undef TARGET_LITTLE_SYM
13906 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_vxworks_vec
13907 #undef TARGET_LITTLE_NAME
13908 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
13909 #undef TARGET_BIG_SYM
13910 #define TARGET_BIG_SYM bfd_elf32_bigarm_vxworks_vec
13911 #undef TARGET_BIG_NAME
13912 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
13913
13914 /* Like elf32_arm_link_hash_table_create -- but overrides
13915 appropriately for VxWorks. */
13916
13917 static struct bfd_link_hash_table *
13918 elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
13919 {
13920 struct bfd_link_hash_table *ret;
13921
13922 ret = elf32_arm_link_hash_table_create (abfd);
13923 if (ret)
13924 {
13925 struct elf32_arm_link_hash_table *htab
13926 = (struct elf32_arm_link_hash_table *) ret;
13927 htab->use_rel = 0;
13928 htab->vxworks_p = 1;
13929 }
13930 return ret;
13931 }
13932
13933 static void
13934 elf32_arm_vxworks_final_write_processing (bfd *abfd, bfd_boolean linker)
13935 {
13936 elf32_arm_final_write_processing (abfd, linker);
13937 elf_vxworks_final_write_processing (abfd, linker);
13938 }
13939
13940 #undef elf32_bed
13941 #define elf32_bed elf32_arm_vxworks_bed
13942
13943 #undef bfd_elf32_bfd_link_hash_table_create
13944 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
13945 #undef elf_backend_add_symbol_hook
13946 #define elf_backend_add_symbol_hook elf_vxworks_add_symbol_hook
13947 #undef elf_backend_final_write_processing
13948 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
13949 #undef elf_backend_emit_relocs
13950 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
13951
13952 #undef elf_backend_may_use_rel_p
13953 #define elf_backend_may_use_rel_p 0
13954 #undef elf_backend_may_use_rela_p
13955 #define elf_backend_may_use_rela_p 1
13956 #undef elf_backend_default_use_rela_p
13957 #define elf_backend_default_use_rela_p 1
13958 #undef elf_backend_want_plt_sym
13959 #define elf_backend_want_plt_sym 1
13960 #undef ELF_MAXPAGESIZE
13961 #define ELF_MAXPAGESIZE 0x1000
13962
13963 #include "elf32-target.h"
13964
13965
13966 /* Merge backend specific data from an object file to the output
13967 object file when linking. */
13968
13969 static bfd_boolean
13970 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd)
13971 {
13972 flagword out_flags;
13973 flagword in_flags;
13974 bfd_boolean flags_compatible = TRUE;
13975 asection *sec;
13976
13977 /* Check if we have the same endianess. */
13978 if (! _bfd_generic_verify_endian_match (ibfd, obfd))
13979 return FALSE;
13980
13981 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
13982 return TRUE;
13983
13984 if (!elf32_arm_merge_eabi_attributes (ibfd, obfd))
13985 return FALSE;
13986
13987 /* The input BFD must have had its flags initialised. */
13988 /* The following seems bogus to me -- The flags are initialized in
13989 the assembler but I don't think an elf_flags_init field is
13990 written into the object. */
13991 /* BFD_ASSERT (elf_flags_init (ibfd)); */
13992
13993 in_flags = elf_elfheader (ibfd)->e_flags;
13994 out_flags = elf_elfheader (obfd)->e_flags;
13995
13996 /* In theory there is no reason why we couldn't handle this. However
13997 in practice it isn't even close to working and there is no real
13998 reason to want it. */
13999 if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
14000 && !(ibfd->flags & DYNAMIC)
14001 && (in_flags & EF_ARM_BE8))
14002 {
14003 _bfd_error_handler (_("error: %B is already in final BE8 format"),
14004 ibfd);
14005 return FALSE;
14006 }
14007
14008 if (!elf_flags_init (obfd))
14009 {
14010 /* If the input is the default architecture and had the default
14011 flags then do not bother setting the flags for the output
14012 architecture, instead allow future merges to do this. If no
14013 future merges ever set these flags then they will retain their
14014 uninitialised values, which surprise surprise, correspond
14015 to the default values. */
14016 if (bfd_get_arch_info (ibfd)->the_default
14017 && elf_elfheader (ibfd)->e_flags == 0)
14018 return TRUE;
14019
14020 elf_flags_init (obfd) = TRUE;
14021 elf_elfheader (obfd)->e_flags = in_flags;
14022
14023 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
14024 && bfd_get_arch_info (obfd)->the_default)
14025 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
14026
14027 return TRUE;
14028 }
14029
14030 /* Determine what should happen if the input ARM architecture
14031 does not match the output ARM architecture. */
14032 if (! bfd_arm_merge_machines (ibfd, obfd))
14033 return FALSE;
14034
14035 /* Identical flags must be compatible. */
14036 if (in_flags == out_flags)
14037 return TRUE;
14038
14039 /* Check to see if the input BFD actually contains any sections. If
14040 not, its flags may not have been initialised either, but it
14041 cannot actually cause any incompatiblity. Do not short-circuit
14042 dynamic objects; their section list may be emptied by
14043 elf_link_add_object_symbols.
14044
14045 Also check to see if there are no code sections in the input.
14046 In this case there is no need to check for code specific flags.
14047 XXX - do we need to worry about floating-point format compatability
14048 in data sections ? */
14049 if (!(ibfd->flags & DYNAMIC))
14050 {
14051 bfd_boolean null_input_bfd = TRUE;
14052 bfd_boolean only_data_sections = TRUE;
14053
14054 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
14055 {
14056 /* Ignore synthetic glue sections. */
14057 if (strcmp (sec->name, ".glue_7")
14058 && strcmp (sec->name, ".glue_7t"))
14059 {
14060 if ((bfd_get_section_flags (ibfd, sec)
14061 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
14062 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
14063 only_data_sections = FALSE;
14064
14065 null_input_bfd = FALSE;
14066 break;
14067 }
14068 }
14069
14070 if (null_input_bfd || only_data_sections)
14071 return TRUE;
14072 }
14073
14074 /* Complain about various flag mismatches. */
14075 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
14076 EF_ARM_EABI_VERSION (out_flags)))
14077 {
14078 _bfd_error_handler
14079 (_("error: Source object %B has EABI version %d, but target %B has EABI version %d"),
14080 ibfd, obfd,
14081 (in_flags & EF_ARM_EABIMASK) >> 24,
14082 (out_flags & EF_ARM_EABIMASK) >> 24);
14083 return FALSE;
14084 }
14085
14086 /* Not sure what needs to be checked for EABI versions >= 1. */
14087 /* VxWorks libraries do not use these flags. */
14088 if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
14089 && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
14090 && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
14091 {
14092 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
14093 {
14094 _bfd_error_handler
14095 (_("error: %B is compiled for APCS-%d, whereas target %B uses APCS-%d"),
14096 ibfd, obfd,
14097 in_flags & EF_ARM_APCS_26 ? 26 : 32,
14098 out_flags & EF_ARM_APCS_26 ? 26 : 32);
14099 flags_compatible = FALSE;
14100 }
14101
14102 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
14103 {
14104 if (in_flags & EF_ARM_APCS_FLOAT)
14105 _bfd_error_handler
14106 (_("error: %B passes floats in float registers, whereas %B passes them in integer registers"),
14107 ibfd, obfd);
14108 else
14109 _bfd_error_handler
14110 (_("error: %B passes floats in integer registers, whereas %B passes them in float registers"),
14111 ibfd, obfd);
14112
14113 flags_compatible = FALSE;
14114 }
14115
14116 if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
14117 {
14118 if (in_flags & EF_ARM_VFP_FLOAT)
14119 _bfd_error_handler
14120 (_("error: %B uses VFP instructions, whereas %B does not"),
14121 ibfd, obfd);
14122 else
14123 _bfd_error_handler
14124 (_("error: %B uses FPA instructions, whereas %B does not"),
14125 ibfd, obfd);
14126
14127 flags_compatible = FALSE;
14128 }
14129
14130 if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
14131 {
14132 if (in_flags & EF_ARM_MAVERICK_FLOAT)
14133 _bfd_error_handler
14134 (_("error: %B uses Maverick instructions, whereas %B does not"),
14135 ibfd, obfd);
14136 else
14137 _bfd_error_handler
14138 (_("error: %B does not use Maverick instructions, whereas %B does"),
14139 ibfd, obfd);
14140
14141 flags_compatible = FALSE;
14142 }
14143
14144 #ifdef EF_ARM_SOFT_FLOAT
14145 if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
14146 {
14147 /* We can allow interworking between code that is VFP format
14148 layout, and uses either soft float or integer regs for
14149 passing floating point arguments and results. We already
14150 know that the APCS_FLOAT flags match; similarly for VFP
14151 flags. */
14152 if ((in_flags & EF_ARM_APCS_FLOAT) != 0
14153 || (in_flags & EF_ARM_VFP_FLOAT) == 0)
14154 {
14155 if (in_flags & EF_ARM_SOFT_FLOAT)
14156 _bfd_error_handler
14157 (_("error: %B uses software FP, whereas %B uses hardware FP"),
14158 ibfd, obfd);
14159 else
14160 _bfd_error_handler
14161 (_("error: %B uses hardware FP, whereas %B uses software FP"),
14162 ibfd, obfd);
14163
14164 flags_compatible = FALSE;
14165 }
14166 }
14167 #endif
14168
14169 /* Interworking mismatch is only a warning. */
14170 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
14171 {
14172 if (in_flags & EF_ARM_INTERWORK)
14173 {
14174 _bfd_error_handler
14175 (_("Warning: %B supports interworking, whereas %B does not"),
14176 ibfd, obfd);
14177 }
14178 else
14179 {
14180 _bfd_error_handler
14181 (_("Warning: %B does not support interworking, whereas %B does"),
14182 ibfd, obfd);
14183 }
14184 }
14185 }
14186
14187 return flags_compatible;
14188 }
14189
14190
14191 /* Symbian OS Targets. */
14192
14193 #undef TARGET_LITTLE_SYM
14194 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_symbian_vec
14195 #undef TARGET_LITTLE_NAME
14196 #define TARGET_LITTLE_NAME "elf32-littlearm-symbian"
14197 #undef TARGET_BIG_SYM
14198 #define TARGET_BIG_SYM bfd_elf32_bigarm_symbian_vec
14199 #undef TARGET_BIG_NAME
14200 #define TARGET_BIG_NAME "elf32-bigarm-symbian"
14201
14202 /* Like elf32_arm_link_hash_table_create -- but overrides
14203 appropriately for Symbian OS. */
14204
14205 static struct bfd_link_hash_table *
14206 elf32_arm_symbian_link_hash_table_create (bfd *abfd)
14207 {
14208 struct bfd_link_hash_table *ret;
14209
14210 ret = elf32_arm_link_hash_table_create (abfd);
14211 if (ret)
14212 {
14213 struct elf32_arm_link_hash_table *htab
14214 = (struct elf32_arm_link_hash_table *)ret;
14215 /* There is no PLT header for Symbian OS. */
14216 htab->plt_header_size = 0;
14217 /* The PLT entries are each one instruction and one word. */
14218 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry);
14219 htab->symbian_p = 1;
14220 /* Symbian uses armv5t or above, so use_blx is always true. */
14221 htab->use_blx = 1;
14222 htab->root.is_relocatable_executable = 1;
14223 }
14224 return ret;
14225 }
14226
14227 static const struct bfd_elf_special_section
14228 elf32_arm_symbian_special_sections[] =
14229 {
14230 /* In a BPABI executable, the dynamic linking sections do not go in
14231 the loadable read-only segment. The post-linker may wish to
14232 refer to these sections, but they are not part of the final
14233 program image. */
14234 { STRING_COMMA_LEN (".dynamic"), 0, SHT_DYNAMIC, 0 },
14235 { STRING_COMMA_LEN (".dynstr"), 0, SHT_STRTAB, 0 },
14236 { STRING_COMMA_LEN (".dynsym"), 0, SHT_DYNSYM, 0 },
14237 { STRING_COMMA_LEN (".got"), 0, SHT_PROGBITS, 0 },
14238 { STRING_COMMA_LEN (".hash"), 0, SHT_HASH, 0 },
14239 /* These sections do not need to be writable as the SymbianOS
14240 postlinker will arrange things so that no dynamic relocation is
14241 required. */
14242 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY, SHF_ALLOC },
14243 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY, SHF_ALLOC },
14244 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY, SHF_ALLOC },
14245 { NULL, 0, 0, 0, 0 }
14246 };
14247
14248 static void
14249 elf32_arm_symbian_begin_write_processing (bfd *abfd,
14250 struct bfd_link_info *link_info)
14251 {
14252 /* BPABI objects are never loaded directly by an OS kernel; they are
14253 processed by a postlinker first, into an OS-specific format. If
14254 the D_PAGED bit is set on the file, BFD will align segments on
14255 page boundaries, so that an OS can directly map the file. With
14256 BPABI objects, that just results in wasted space. In addition,
14257 because we clear the D_PAGED bit, map_sections_to_segments will
14258 recognize that the program headers should not be mapped into any
14259 loadable segment. */
14260 abfd->flags &= ~D_PAGED;
14261 elf32_arm_begin_write_processing (abfd, link_info);
14262 }
14263
14264 static bfd_boolean
14265 elf32_arm_symbian_modify_segment_map (bfd *abfd,
14266 struct bfd_link_info *info)
14267 {
14268 struct elf_segment_map *m;
14269 asection *dynsec;
14270
14271 /* BPABI shared libraries and executables should have a PT_DYNAMIC
14272 segment. However, because the .dynamic section is not marked
14273 with SEC_LOAD, the generic ELF code will not create such a
14274 segment. */
14275 dynsec = bfd_get_section_by_name (abfd, ".dynamic");
14276 if (dynsec)
14277 {
14278 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
14279 if (m->p_type == PT_DYNAMIC)
14280 break;
14281
14282 if (m == NULL)
14283 {
14284 m = _bfd_elf_make_dynamic_segment (abfd, dynsec);
14285 m->next = elf_tdata (abfd)->segment_map;
14286 elf_tdata (abfd)->segment_map = m;
14287 }
14288 }
14289
14290 /* Also call the generic arm routine. */
14291 return elf32_arm_modify_segment_map (abfd, info);
14292 }
14293
14294 /* Return address for Ith PLT stub in section PLT, for relocation REL
14295 or (bfd_vma) -1 if it should not be included. */
14296
14297 static bfd_vma
14298 elf32_arm_symbian_plt_sym_val (bfd_vma i, const asection *plt,
14299 const arelent *rel ATTRIBUTE_UNUSED)
14300 {
14301 return plt->vma + 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry) * i;
14302 }
14303
14304
14305 #undef elf32_bed
14306 #define elf32_bed elf32_arm_symbian_bed
14307
14308 /* The dynamic sections are not allocated on SymbianOS; the postlinker
14309 will process them and then discard them. */
14310 #undef ELF_DYNAMIC_SEC_FLAGS
14311 #define ELF_DYNAMIC_SEC_FLAGS \
14312 (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
14313
14314 #undef elf_backend_add_symbol_hook
14315 #undef elf_backend_emit_relocs
14316
14317 #undef bfd_elf32_bfd_link_hash_table_create
14318 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_symbian_link_hash_table_create
14319 #undef elf_backend_special_sections
14320 #define elf_backend_special_sections elf32_arm_symbian_special_sections
14321 #undef elf_backend_begin_write_processing
14322 #define elf_backend_begin_write_processing elf32_arm_symbian_begin_write_processing
14323 #undef elf_backend_final_write_processing
14324 #define elf_backend_final_write_processing elf32_arm_final_write_processing
14325
14326 #undef elf_backend_modify_segment_map
14327 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map
14328
14329 /* There is no .got section for BPABI objects, and hence no header. */
14330 #undef elf_backend_got_header_size
14331 #define elf_backend_got_header_size 0
14332
14333 /* Similarly, there is no .got.plt section. */
14334 #undef elf_backend_want_got_plt
14335 #define elf_backend_want_got_plt 0
14336
14337 #undef elf_backend_plt_sym_val
14338 #define elf_backend_plt_sym_val elf32_arm_symbian_plt_sym_val
14339
14340 #undef elf_backend_may_use_rel_p
14341 #define elf_backend_may_use_rel_p 1
14342 #undef elf_backend_may_use_rela_p
14343 #define elf_backend_may_use_rela_p 0
14344 #undef elf_backend_default_use_rela_p
14345 #define elf_backend_default_use_rela_p 0
14346 #undef elf_backend_want_plt_sym
14347 #define elf_backend_want_plt_sym 0
14348 #undef ELF_MAXPAGESIZE
14349 #define ELF_MAXPAGESIZE 0x8000
14350
14351 #include "elf32-target.h"
This page took 0.330437 seconds and 4 git commands to generate.