1 /* 32-bit ELF support for ARM
2 Copyright (C) 1998-2016 Free Software Foundation, Inc.
4 This file is part of BFD, the Binary File Descriptor library.
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
19 MA 02110-1301, USA. */
25 #include "bfd_stdint.h"
26 #include "libiberty.h"
30 #include "elf-vxworks.h"
33 /* Return the relocation section associated with NAME. HTAB is the
34 bfd's elf32_arm_link_hash_entry. */
35 #define RELOC_SECTION(HTAB, NAME) \
36 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
38 /* Return size of a relocation entry. HTAB is the bfd's
39 elf32_arm_link_hash_entry. */
40 #define RELOC_SIZE(HTAB) \
42 ? sizeof (Elf32_External_Rel) \
43 : sizeof (Elf32_External_Rela))
45 /* Return function to swap relocations in. HTAB is the bfd's
46 elf32_arm_link_hash_entry. */
47 #define SWAP_RELOC_IN(HTAB) \
49 ? bfd_elf32_swap_reloc_in \
50 : bfd_elf32_swap_reloca_in)
52 /* Return function to swap relocations out. HTAB is the bfd's
53 elf32_arm_link_hash_entry. */
54 #define SWAP_RELOC_OUT(HTAB) \
56 ? bfd_elf32_swap_reloc_out \
57 : bfd_elf32_swap_reloca_out)
59 #define elf_info_to_howto 0
60 #define elf_info_to_howto_rel elf32_arm_info_to_howto
62 #define ARM_ELF_ABI_VERSION 0
63 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
65 /* The Adjusted Place, as defined by AAELF. */
66 #define Pa(X) ((X) & 0xfffffffc)
68 static bfd_boolean
elf32_arm_write_section (bfd
*output_bfd
,
69 struct bfd_link_info
*link_info
,
73 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
74 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
77 static reloc_howto_type elf32_arm_howto_table_1
[] =
80 HOWTO (R_ARM_NONE
, /* type */
82 3, /* size (0 = byte, 1 = short, 2 = long) */
84 FALSE
, /* pc_relative */
86 complain_overflow_dont
,/* complain_on_overflow */
87 bfd_elf_generic_reloc
, /* special_function */
88 "R_ARM_NONE", /* name */
89 FALSE
, /* partial_inplace */
92 FALSE
), /* pcrel_offset */
94 HOWTO (R_ARM_PC24
, /* type */
96 2, /* size (0 = byte, 1 = short, 2 = long) */
98 TRUE
, /* pc_relative */
100 complain_overflow_signed
,/* complain_on_overflow */
101 bfd_elf_generic_reloc
, /* special_function */
102 "R_ARM_PC24", /* name */
103 FALSE
, /* partial_inplace */
104 0x00ffffff, /* src_mask */
105 0x00ffffff, /* dst_mask */
106 TRUE
), /* pcrel_offset */
108 /* 32 bit absolute */
109 HOWTO (R_ARM_ABS32
, /* type */
111 2, /* size (0 = byte, 1 = short, 2 = long) */
113 FALSE
, /* pc_relative */
115 complain_overflow_bitfield
,/* complain_on_overflow */
116 bfd_elf_generic_reloc
, /* special_function */
117 "R_ARM_ABS32", /* name */
118 FALSE
, /* partial_inplace */
119 0xffffffff, /* src_mask */
120 0xffffffff, /* dst_mask */
121 FALSE
), /* pcrel_offset */
123 /* standard 32bit pc-relative reloc */
124 HOWTO (R_ARM_REL32
, /* type */
126 2, /* size (0 = byte, 1 = short, 2 = long) */
128 TRUE
, /* pc_relative */
130 complain_overflow_bitfield
,/* complain_on_overflow */
131 bfd_elf_generic_reloc
, /* special_function */
132 "R_ARM_REL32", /* name */
133 FALSE
, /* partial_inplace */
134 0xffffffff, /* src_mask */
135 0xffffffff, /* dst_mask */
136 TRUE
), /* pcrel_offset */
138 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
139 HOWTO (R_ARM_LDR_PC_G0
, /* type */
141 0, /* size (0 = byte, 1 = short, 2 = long) */
143 TRUE
, /* pc_relative */
145 complain_overflow_dont
,/* complain_on_overflow */
146 bfd_elf_generic_reloc
, /* special_function */
147 "R_ARM_LDR_PC_G0", /* name */
148 FALSE
, /* partial_inplace */
149 0xffffffff, /* src_mask */
150 0xffffffff, /* dst_mask */
151 TRUE
), /* pcrel_offset */
153 /* 16 bit absolute */
154 HOWTO (R_ARM_ABS16
, /* type */
156 1, /* size (0 = byte, 1 = short, 2 = long) */
158 FALSE
, /* pc_relative */
160 complain_overflow_bitfield
,/* complain_on_overflow */
161 bfd_elf_generic_reloc
, /* special_function */
162 "R_ARM_ABS16", /* name */
163 FALSE
, /* partial_inplace */
164 0x0000ffff, /* src_mask */
165 0x0000ffff, /* dst_mask */
166 FALSE
), /* pcrel_offset */
168 /* 12 bit absolute */
169 HOWTO (R_ARM_ABS12
, /* type */
171 2, /* size (0 = byte, 1 = short, 2 = long) */
173 FALSE
, /* pc_relative */
175 complain_overflow_bitfield
,/* complain_on_overflow */
176 bfd_elf_generic_reloc
, /* special_function */
177 "R_ARM_ABS12", /* name */
178 FALSE
, /* partial_inplace */
179 0x00000fff, /* src_mask */
180 0x00000fff, /* dst_mask */
181 FALSE
), /* pcrel_offset */
183 HOWTO (R_ARM_THM_ABS5
, /* type */
185 1, /* size (0 = byte, 1 = short, 2 = long) */
187 FALSE
, /* pc_relative */
189 complain_overflow_bitfield
,/* complain_on_overflow */
190 bfd_elf_generic_reloc
, /* special_function */
191 "R_ARM_THM_ABS5", /* name */
192 FALSE
, /* partial_inplace */
193 0x000007e0, /* src_mask */
194 0x000007e0, /* dst_mask */
195 FALSE
), /* pcrel_offset */
198 HOWTO (R_ARM_ABS8
, /* type */
200 0, /* size (0 = byte, 1 = short, 2 = long) */
202 FALSE
, /* pc_relative */
204 complain_overflow_bitfield
,/* complain_on_overflow */
205 bfd_elf_generic_reloc
, /* special_function */
206 "R_ARM_ABS8", /* name */
207 FALSE
, /* partial_inplace */
208 0x000000ff, /* src_mask */
209 0x000000ff, /* dst_mask */
210 FALSE
), /* pcrel_offset */
212 HOWTO (R_ARM_SBREL32
, /* type */
214 2, /* size (0 = byte, 1 = short, 2 = long) */
216 FALSE
, /* pc_relative */
218 complain_overflow_dont
,/* complain_on_overflow */
219 bfd_elf_generic_reloc
, /* special_function */
220 "R_ARM_SBREL32", /* name */
221 FALSE
, /* partial_inplace */
222 0xffffffff, /* src_mask */
223 0xffffffff, /* dst_mask */
224 FALSE
), /* pcrel_offset */
226 HOWTO (R_ARM_THM_CALL
, /* type */
228 2, /* size (0 = byte, 1 = short, 2 = long) */
230 TRUE
, /* pc_relative */
232 complain_overflow_signed
,/* complain_on_overflow */
233 bfd_elf_generic_reloc
, /* special_function */
234 "R_ARM_THM_CALL", /* name */
235 FALSE
, /* partial_inplace */
236 0x07ff2fff, /* src_mask */
237 0x07ff2fff, /* dst_mask */
238 TRUE
), /* pcrel_offset */
240 HOWTO (R_ARM_THM_PC8
, /* type */
242 1, /* size (0 = byte, 1 = short, 2 = long) */
244 TRUE
, /* pc_relative */
246 complain_overflow_signed
,/* complain_on_overflow */
247 bfd_elf_generic_reloc
, /* special_function */
248 "R_ARM_THM_PC8", /* name */
249 FALSE
, /* partial_inplace */
250 0x000000ff, /* src_mask */
251 0x000000ff, /* dst_mask */
252 TRUE
), /* pcrel_offset */
254 HOWTO (R_ARM_BREL_ADJ
, /* type */
256 1, /* size (0 = byte, 1 = short, 2 = long) */
258 FALSE
, /* pc_relative */
260 complain_overflow_signed
,/* complain_on_overflow */
261 bfd_elf_generic_reloc
, /* special_function */
262 "R_ARM_BREL_ADJ", /* name */
263 FALSE
, /* partial_inplace */
264 0xffffffff, /* src_mask */
265 0xffffffff, /* dst_mask */
266 FALSE
), /* pcrel_offset */
268 HOWTO (R_ARM_TLS_DESC
, /* type */
270 2, /* size (0 = byte, 1 = short, 2 = long) */
272 FALSE
, /* pc_relative */
274 complain_overflow_bitfield
,/* complain_on_overflow */
275 bfd_elf_generic_reloc
, /* special_function */
276 "R_ARM_TLS_DESC", /* name */
277 FALSE
, /* partial_inplace */
278 0xffffffff, /* src_mask */
279 0xffffffff, /* dst_mask */
280 FALSE
), /* pcrel_offset */
282 HOWTO (R_ARM_THM_SWI8
, /* type */
284 0, /* size (0 = byte, 1 = short, 2 = long) */
286 FALSE
, /* pc_relative */
288 complain_overflow_signed
,/* complain_on_overflow */
289 bfd_elf_generic_reloc
, /* special_function */
290 "R_ARM_SWI8", /* name */
291 FALSE
, /* partial_inplace */
292 0x00000000, /* src_mask */
293 0x00000000, /* dst_mask */
294 FALSE
), /* pcrel_offset */
296 /* BLX instruction for the ARM. */
297 HOWTO (R_ARM_XPC25
, /* type */
299 2, /* size (0 = byte, 1 = short, 2 = long) */
301 TRUE
, /* pc_relative */
303 complain_overflow_signed
,/* complain_on_overflow */
304 bfd_elf_generic_reloc
, /* special_function */
305 "R_ARM_XPC25", /* name */
306 FALSE
, /* partial_inplace */
307 0x00ffffff, /* src_mask */
308 0x00ffffff, /* dst_mask */
309 TRUE
), /* pcrel_offset */
311 /* BLX instruction for the Thumb. */
312 HOWTO (R_ARM_THM_XPC22
, /* type */
314 2, /* size (0 = byte, 1 = short, 2 = long) */
316 TRUE
, /* pc_relative */
318 complain_overflow_signed
,/* complain_on_overflow */
319 bfd_elf_generic_reloc
, /* special_function */
320 "R_ARM_THM_XPC22", /* name */
321 FALSE
, /* partial_inplace */
322 0x07ff2fff, /* src_mask */
323 0x07ff2fff, /* dst_mask */
324 TRUE
), /* pcrel_offset */
326 /* Dynamic TLS relocations. */
328 HOWTO (R_ARM_TLS_DTPMOD32
, /* type */
330 2, /* size (0 = byte, 1 = short, 2 = long) */
332 FALSE
, /* pc_relative */
334 complain_overflow_bitfield
,/* complain_on_overflow */
335 bfd_elf_generic_reloc
, /* special_function */
336 "R_ARM_TLS_DTPMOD32", /* name */
337 TRUE
, /* partial_inplace */
338 0xffffffff, /* src_mask */
339 0xffffffff, /* dst_mask */
340 FALSE
), /* pcrel_offset */
342 HOWTO (R_ARM_TLS_DTPOFF32
, /* type */
344 2, /* size (0 = byte, 1 = short, 2 = long) */
346 FALSE
, /* pc_relative */
348 complain_overflow_bitfield
,/* complain_on_overflow */
349 bfd_elf_generic_reloc
, /* special_function */
350 "R_ARM_TLS_DTPOFF32", /* name */
351 TRUE
, /* partial_inplace */
352 0xffffffff, /* src_mask */
353 0xffffffff, /* dst_mask */
354 FALSE
), /* pcrel_offset */
356 HOWTO (R_ARM_TLS_TPOFF32
, /* type */
358 2, /* size (0 = byte, 1 = short, 2 = long) */
360 FALSE
, /* pc_relative */
362 complain_overflow_bitfield
,/* complain_on_overflow */
363 bfd_elf_generic_reloc
, /* special_function */
364 "R_ARM_TLS_TPOFF32", /* name */
365 TRUE
, /* partial_inplace */
366 0xffffffff, /* src_mask */
367 0xffffffff, /* dst_mask */
368 FALSE
), /* pcrel_offset */
370 /* Relocs used in ARM Linux */
372 HOWTO (R_ARM_COPY
, /* type */
374 2, /* size (0 = byte, 1 = short, 2 = long) */
376 FALSE
, /* pc_relative */
378 complain_overflow_bitfield
,/* complain_on_overflow */
379 bfd_elf_generic_reloc
, /* special_function */
380 "R_ARM_COPY", /* name */
381 TRUE
, /* partial_inplace */
382 0xffffffff, /* src_mask */
383 0xffffffff, /* dst_mask */
384 FALSE
), /* pcrel_offset */
386 HOWTO (R_ARM_GLOB_DAT
, /* type */
388 2, /* size (0 = byte, 1 = short, 2 = long) */
390 FALSE
, /* pc_relative */
392 complain_overflow_bitfield
,/* complain_on_overflow */
393 bfd_elf_generic_reloc
, /* special_function */
394 "R_ARM_GLOB_DAT", /* name */
395 TRUE
, /* partial_inplace */
396 0xffffffff, /* src_mask */
397 0xffffffff, /* dst_mask */
398 FALSE
), /* pcrel_offset */
400 HOWTO (R_ARM_JUMP_SLOT
, /* type */
402 2, /* size (0 = byte, 1 = short, 2 = long) */
404 FALSE
, /* pc_relative */
406 complain_overflow_bitfield
,/* complain_on_overflow */
407 bfd_elf_generic_reloc
, /* special_function */
408 "R_ARM_JUMP_SLOT", /* name */
409 TRUE
, /* partial_inplace */
410 0xffffffff, /* src_mask */
411 0xffffffff, /* dst_mask */
412 FALSE
), /* pcrel_offset */
414 HOWTO (R_ARM_RELATIVE
, /* type */
416 2, /* size (0 = byte, 1 = short, 2 = long) */
418 FALSE
, /* pc_relative */
420 complain_overflow_bitfield
,/* complain_on_overflow */
421 bfd_elf_generic_reloc
, /* special_function */
422 "R_ARM_RELATIVE", /* name */
423 TRUE
, /* partial_inplace */
424 0xffffffff, /* src_mask */
425 0xffffffff, /* dst_mask */
426 FALSE
), /* pcrel_offset */
428 HOWTO (R_ARM_GOTOFF32
, /* type */
430 2, /* size (0 = byte, 1 = short, 2 = long) */
432 FALSE
, /* pc_relative */
434 complain_overflow_bitfield
,/* complain_on_overflow */
435 bfd_elf_generic_reloc
, /* special_function */
436 "R_ARM_GOTOFF32", /* name */
437 TRUE
, /* partial_inplace */
438 0xffffffff, /* src_mask */
439 0xffffffff, /* dst_mask */
440 FALSE
), /* pcrel_offset */
442 HOWTO (R_ARM_GOTPC
, /* type */
444 2, /* size (0 = byte, 1 = short, 2 = long) */
446 TRUE
, /* pc_relative */
448 complain_overflow_bitfield
,/* complain_on_overflow */
449 bfd_elf_generic_reloc
, /* special_function */
450 "R_ARM_GOTPC", /* name */
451 TRUE
, /* partial_inplace */
452 0xffffffff, /* src_mask */
453 0xffffffff, /* dst_mask */
454 TRUE
), /* pcrel_offset */
456 HOWTO (R_ARM_GOT32
, /* type */
458 2, /* size (0 = byte, 1 = short, 2 = long) */
460 FALSE
, /* pc_relative */
462 complain_overflow_bitfield
,/* complain_on_overflow */
463 bfd_elf_generic_reloc
, /* special_function */
464 "R_ARM_GOT32", /* name */
465 TRUE
, /* partial_inplace */
466 0xffffffff, /* src_mask */
467 0xffffffff, /* dst_mask */
468 FALSE
), /* pcrel_offset */
470 HOWTO (R_ARM_PLT32
, /* type */
472 2, /* size (0 = byte, 1 = short, 2 = long) */
474 TRUE
, /* pc_relative */
476 complain_overflow_bitfield
,/* complain_on_overflow */
477 bfd_elf_generic_reloc
, /* special_function */
478 "R_ARM_PLT32", /* name */
479 FALSE
, /* partial_inplace */
480 0x00ffffff, /* src_mask */
481 0x00ffffff, /* dst_mask */
482 TRUE
), /* pcrel_offset */
484 HOWTO (R_ARM_CALL
, /* type */
486 2, /* size (0 = byte, 1 = short, 2 = long) */
488 TRUE
, /* pc_relative */
490 complain_overflow_signed
,/* complain_on_overflow */
491 bfd_elf_generic_reloc
, /* special_function */
492 "R_ARM_CALL", /* name */
493 FALSE
, /* partial_inplace */
494 0x00ffffff, /* src_mask */
495 0x00ffffff, /* dst_mask */
496 TRUE
), /* pcrel_offset */
498 HOWTO (R_ARM_JUMP24
, /* type */
500 2, /* size (0 = byte, 1 = short, 2 = long) */
502 TRUE
, /* pc_relative */
504 complain_overflow_signed
,/* complain_on_overflow */
505 bfd_elf_generic_reloc
, /* special_function */
506 "R_ARM_JUMP24", /* name */
507 FALSE
, /* partial_inplace */
508 0x00ffffff, /* src_mask */
509 0x00ffffff, /* dst_mask */
510 TRUE
), /* pcrel_offset */
512 HOWTO (R_ARM_THM_JUMP24
, /* type */
514 2, /* size (0 = byte, 1 = short, 2 = long) */
516 TRUE
, /* pc_relative */
518 complain_overflow_signed
,/* complain_on_overflow */
519 bfd_elf_generic_reloc
, /* special_function */
520 "R_ARM_THM_JUMP24", /* name */
521 FALSE
, /* partial_inplace */
522 0x07ff2fff, /* src_mask */
523 0x07ff2fff, /* dst_mask */
524 TRUE
), /* pcrel_offset */
526 HOWTO (R_ARM_BASE_ABS
, /* type */
528 2, /* size (0 = byte, 1 = short, 2 = long) */
530 FALSE
, /* pc_relative */
532 complain_overflow_dont
,/* complain_on_overflow */
533 bfd_elf_generic_reloc
, /* special_function */
534 "R_ARM_BASE_ABS", /* name */
535 FALSE
, /* partial_inplace */
536 0xffffffff, /* src_mask */
537 0xffffffff, /* dst_mask */
538 FALSE
), /* pcrel_offset */
540 HOWTO (R_ARM_ALU_PCREL7_0
, /* type */
542 2, /* size (0 = byte, 1 = short, 2 = long) */
544 TRUE
, /* pc_relative */
546 complain_overflow_dont
,/* complain_on_overflow */
547 bfd_elf_generic_reloc
, /* special_function */
548 "R_ARM_ALU_PCREL_7_0", /* name */
549 FALSE
, /* partial_inplace */
550 0x00000fff, /* src_mask */
551 0x00000fff, /* dst_mask */
552 TRUE
), /* pcrel_offset */
554 HOWTO (R_ARM_ALU_PCREL15_8
, /* type */
556 2, /* size (0 = byte, 1 = short, 2 = long) */
558 TRUE
, /* pc_relative */
560 complain_overflow_dont
,/* complain_on_overflow */
561 bfd_elf_generic_reloc
, /* special_function */
562 "R_ARM_ALU_PCREL_15_8",/* name */
563 FALSE
, /* partial_inplace */
564 0x00000fff, /* src_mask */
565 0x00000fff, /* dst_mask */
566 TRUE
), /* pcrel_offset */
568 HOWTO (R_ARM_ALU_PCREL23_15
, /* type */
570 2, /* size (0 = byte, 1 = short, 2 = long) */
572 TRUE
, /* pc_relative */
574 complain_overflow_dont
,/* complain_on_overflow */
575 bfd_elf_generic_reloc
, /* special_function */
576 "R_ARM_ALU_PCREL_23_15",/* name */
577 FALSE
, /* partial_inplace */
578 0x00000fff, /* src_mask */
579 0x00000fff, /* dst_mask */
580 TRUE
), /* pcrel_offset */
582 HOWTO (R_ARM_LDR_SBREL_11_0
, /* type */
584 2, /* size (0 = byte, 1 = short, 2 = long) */
586 FALSE
, /* pc_relative */
588 complain_overflow_dont
,/* complain_on_overflow */
589 bfd_elf_generic_reloc
, /* special_function */
590 "R_ARM_LDR_SBREL_11_0",/* name */
591 FALSE
, /* partial_inplace */
592 0x00000fff, /* src_mask */
593 0x00000fff, /* dst_mask */
594 FALSE
), /* pcrel_offset */
596 HOWTO (R_ARM_ALU_SBREL_19_12
, /* type */
598 2, /* size (0 = byte, 1 = short, 2 = long) */
600 FALSE
, /* pc_relative */
602 complain_overflow_dont
,/* complain_on_overflow */
603 bfd_elf_generic_reloc
, /* special_function */
604 "R_ARM_ALU_SBREL_19_12",/* name */
605 FALSE
, /* partial_inplace */
606 0x000ff000, /* src_mask */
607 0x000ff000, /* dst_mask */
608 FALSE
), /* pcrel_offset */
610 HOWTO (R_ARM_ALU_SBREL_27_20
, /* type */
612 2, /* size (0 = byte, 1 = short, 2 = long) */
614 FALSE
, /* pc_relative */
616 complain_overflow_dont
,/* complain_on_overflow */
617 bfd_elf_generic_reloc
, /* special_function */
618 "R_ARM_ALU_SBREL_27_20",/* name */
619 FALSE
, /* partial_inplace */
620 0x0ff00000, /* src_mask */
621 0x0ff00000, /* dst_mask */
622 FALSE
), /* pcrel_offset */
624 HOWTO (R_ARM_TARGET1
, /* type */
626 2, /* size (0 = byte, 1 = short, 2 = long) */
628 FALSE
, /* pc_relative */
630 complain_overflow_dont
,/* complain_on_overflow */
631 bfd_elf_generic_reloc
, /* special_function */
632 "R_ARM_TARGET1", /* name */
633 FALSE
, /* partial_inplace */
634 0xffffffff, /* src_mask */
635 0xffffffff, /* dst_mask */
636 FALSE
), /* pcrel_offset */
638 HOWTO (R_ARM_ROSEGREL32
, /* type */
640 2, /* size (0 = byte, 1 = short, 2 = long) */
642 FALSE
, /* pc_relative */
644 complain_overflow_dont
,/* complain_on_overflow */
645 bfd_elf_generic_reloc
, /* special_function */
646 "R_ARM_ROSEGREL32", /* name */
647 FALSE
, /* partial_inplace */
648 0xffffffff, /* src_mask */
649 0xffffffff, /* dst_mask */
650 FALSE
), /* pcrel_offset */
652 HOWTO (R_ARM_V4BX
, /* type */
654 2, /* size (0 = byte, 1 = short, 2 = long) */
656 FALSE
, /* pc_relative */
658 complain_overflow_dont
,/* complain_on_overflow */
659 bfd_elf_generic_reloc
, /* special_function */
660 "R_ARM_V4BX", /* name */
661 FALSE
, /* partial_inplace */
662 0xffffffff, /* src_mask */
663 0xffffffff, /* dst_mask */
664 FALSE
), /* pcrel_offset */
666 HOWTO (R_ARM_TARGET2
, /* type */
668 2, /* size (0 = byte, 1 = short, 2 = long) */
670 FALSE
, /* pc_relative */
672 complain_overflow_signed
,/* complain_on_overflow */
673 bfd_elf_generic_reloc
, /* special_function */
674 "R_ARM_TARGET2", /* name */
675 FALSE
, /* partial_inplace */
676 0xffffffff, /* src_mask */
677 0xffffffff, /* dst_mask */
678 TRUE
), /* pcrel_offset */
680 HOWTO (R_ARM_PREL31
, /* type */
682 2, /* size (0 = byte, 1 = short, 2 = long) */
684 TRUE
, /* pc_relative */
686 complain_overflow_signed
,/* complain_on_overflow */
687 bfd_elf_generic_reloc
, /* special_function */
688 "R_ARM_PREL31", /* name */
689 FALSE
, /* partial_inplace */
690 0x7fffffff, /* src_mask */
691 0x7fffffff, /* dst_mask */
692 TRUE
), /* pcrel_offset */
694 HOWTO (R_ARM_MOVW_ABS_NC
, /* type */
696 2, /* size (0 = byte, 1 = short, 2 = long) */
698 FALSE
, /* pc_relative */
700 complain_overflow_dont
,/* complain_on_overflow */
701 bfd_elf_generic_reloc
, /* special_function */
702 "R_ARM_MOVW_ABS_NC", /* name */
703 FALSE
, /* partial_inplace */
704 0x000f0fff, /* src_mask */
705 0x000f0fff, /* dst_mask */
706 FALSE
), /* pcrel_offset */
708 HOWTO (R_ARM_MOVT_ABS
, /* type */
710 2, /* size (0 = byte, 1 = short, 2 = long) */
712 FALSE
, /* pc_relative */
714 complain_overflow_bitfield
,/* complain_on_overflow */
715 bfd_elf_generic_reloc
, /* special_function */
716 "R_ARM_MOVT_ABS", /* name */
717 FALSE
, /* partial_inplace */
718 0x000f0fff, /* src_mask */
719 0x000f0fff, /* dst_mask */
720 FALSE
), /* pcrel_offset */
722 HOWTO (R_ARM_MOVW_PREL_NC
, /* type */
724 2, /* size (0 = byte, 1 = short, 2 = long) */
726 TRUE
, /* pc_relative */
728 complain_overflow_dont
,/* complain_on_overflow */
729 bfd_elf_generic_reloc
, /* special_function */
730 "R_ARM_MOVW_PREL_NC", /* name */
731 FALSE
, /* partial_inplace */
732 0x000f0fff, /* src_mask */
733 0x000f0fff, /* dst_mask */
734 TRUE
), /* pcrel_offset */
736 HOWTO (R_ARM_MOVT_PREL
, /* type */
738 2, /* size (0 = byte, 1 = short, 2 = long) */
740 TRUE
, /* pc_relative */
742 complain_overflow_bitfield
,/* complain_on_overflow */
743 bfd_elf_generic_reloc
, /* special_function */
744 "R_ARM_MOVT_PREL", /* name */
745 FALSE
, /* partial_inplace */
746 0x000f0fff, /* src_mask */
747 0x000f0fff, /* dst_mask */
748 TRUE
), /* pcrel_offset */
750 HOWTO (R_ARM_THM_MOVW_ABS_NC
, /* type */
752 2, /* size (0 = byte, 1 = short, 2 = long) */
754 FALSE
, /* pc_relative */
756 complain_overflow_dont
,/* complain_on_overflow */
757 bfd_elf_generic_reloc
, /* special_function */
758 "R_ARM_THM_MOVW_ABS_NC",/* name */
759 FALSE
, /* partial_inplace */
760 0x040f70ff, /* src_mask */
761 0x040f70ff, /* dst_mask */
762 FALSE
), /* pcrel_offset */
764 HOWTO (R_ARM_THM_MOVT_ABS
, /* type */
766 2, /* size (0 = byte, 1 = short, 2 = long) */
768 FALSE
, /* pc_relative */
770 complain_overflow_bitfield
,/* complain_on_overflow */
771 bfd_elf_generic_reloc
, /* special_function */
772 "R_ARM_THM_MOVT_ABS", /* name */
773 FALSE
, /* partial_inplace */
774 0x040f70ff, /* src_mask */
775 0x040f70ff, /* dst_mask */
776 FALSE
), /* pcrel_offset */
778 HOWTO (R_ARM_THM_MOVW_PREL_NC
,/* type */
780 2, /* size (0 = byte, 1 = short, 2 = long) */
782 TRUE
, /* pc_relative */
784 complain_overflow_dont
,/* complain_on_overflow */
785 bfd_elf_generic_reloc
, /* special_function */
786 "R_ARM_THM_MOVW_PREL_NC",/* name */
787 FALSE
, /* partial_inplace */
788 0x040f70ff, /* src_mask */
789 0x040f70ff, /* dst_mask */
790 TRUE
), /* pcrel_offset */
792 HOWTO (R_ARM_THM_MOVT_PREL
, /* type */
794 2, /* size (0 = byte, 1 = short, 2 = long) */
796 TRUE
, /* pc_relative */
798 complain_overflow_bitfield
,/* complain_on_overflow */
799 bfd_elf_generic_reloc
, /* special_function */
800 "R_ARM_THM_MOVT_PREL", /* name */
801 FALSE
, /* partial_inplace */
802 0x040f70ff, /* src_mask */
803 0x040f70ff, /* dst_mask */
804 TRUE
), /* pcrel_offset */
806 HOWTO (R_ARM_THM_JUMP19
, /* type */
808 2, /* size (0 = byte, 1 = short, 2 = long) */
810 TRUE
, /* pc_relative */
812 complain_overflow_signed
,/* complain_on_overflow */
813 bfd_elf_generic_reloc
, /* special_function */
814 "R_ARM_THM_JUMP19", /* name */
815 FALSE
, /* partial_inplace */
816 0x043f2fff, /* src_mask */
817 0x043f2fff, /* dst_mask */
818 TRUE
), /* pcrel_offset */
820 HOWTO (R_ARM_THM_JUMP6
, /* type */
822 1, /* size (0 = byte, 1 = short, 2 = long) */
824 TRUE
, /* pc_relative */
826 complain_overflow_unsigned
,/* complain_on_overflow */
827 bfd_elf_generic_reloc
, /* special_function */
828 "R_ARM_THM_JUMP6", /* name */
829 FALSE
, /* partial_inplace */
830 0x02f8, /* src_mask */
831 0x02f8, /* dst_mask */
832 TRUE
), /* pcrel_offset */
834 /* These are declared as 13-bit signed relocations because we can
835 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
837 HOWTO (R_ARM_THM_ALU_PREL_11_0
,/* type */
839 2, /* size (0 = byte, 1 = short, 2 = long) */
841 TRUE
, /* pc_relative */
843 complain_overflow_dont
,/* complain_on_overflow */
844 bfd_elf_generic_reloc
, /* special_function */
845 "R_ARM_THM_ALU_PREL_11_0",/* name */
846 FALSE
, /* partial_inplace */
847 0xffffffff, /* src_mask */
848 0xffffffff, /* dst_mask */
849 TRUE
), /* pcrel_offset */
851 HOWTO (R_ARM_THM_PC12
, /* type */
853 2, /* size (0 = byte, 1 = short, 2 = long) */
855 TRUE
, /* pc_relative */
857 complain_overflow_dont
,/* complain_on_overflow */
858 bfd_elf_generic_reloc
, /* special_function */
859 "R_ARM_THM_PC12", /* name */
860 FALSE
, /* partial_inplace */
861 0xffffffff, /* src_mask */
862 0xffffffff, /* dst_mask */
863 TRUE
), /* pcrel_offset */
865 HOWTO (R_ARM_ABS32_NOI
, /* type */
867 2, /* size (0 = byte, 1 = short, 2 = long) */
869 FALSE
, /* pc_relative */
871 complain_overflow_dont
,/* complain_on_overflow */
872 bfd_elf_generic_reloc
, /* special_function */
873 "R_ARM_ABS32_NOI", /* name */
874 FALSE
, /* partial_inplace */
875 0xffffffff, /* src_mask */
876 0xffffffff, /* dst_mask */
877 FALSE
), /* pcrel_offset */
879 HOWTO (R_ARM_REL32_NOI
, /* type */
881 2, /* size (0 = byte, 1 = short, 2 = long) */
883 TRUE
, /* pc_relative */
885 complain_overflow_dont
,/* complain_on_overflow */
886 bfd_elf_generic_reloc
, /* special_function */
887 "R_ARM_REL32_NOI", /* name */
888 FALSE
, /* partial_inplace */
889 0xffffffff, /* src_mask */
890 0xffffffff, /* dst_mask */
891 FALSE
), /* pcrel_offset */
893 /* Group relocations. */
895 HOWTO (R_ARM_ALU_PC_G0_NC
, /* type */
897 2, /* size (0 = byte, 1 = short, 2 = long) */
899 TRUE
, /* pc_relative */
901 complain_overflow_dont
,/* complain_on_overflow */
902 bfd_elf_generic_reloc
, /* special_function */
903 "R_ARM_ALU_PC_G0_NC", /* name */
904 FALSE
, /* partial_inplace */
905 0xffffffff, /* src_mask */
906 0xffffffff, /* dst_mask */
907 TRUE
), /* pcrel_offset */
909 HOWTO (R_ARM_ALU_PC_G0
, /* type */
911 2, /* size (0 = byte, 1 = short, 2 = long) */
913 TRUE
, /* pc_relative */
915 complain_overflow_dont
,/* complain_on_overflow */
916 bfd_elf_generic_reloc
, /* special_function */
917 "R_ARM_ALU_PC_G0", /* name */
918 FALSE
, /* partial_inplace */
919 0xffffffff, /* src_mask */
920 0xffffffff, /* dst_mask */
921 TRUE
), /* pcrel_offset */
923 HOWTO (R_ARM_ALU_PC_G1_NC
, /* type */
925 2, /* size (0 = byte, 1 = short, 2 = long) */
927 TRUE
, /* pc_relative */
929 complain_overflow_dont
,/* complain_on_overflow */
930 bfd_elf_generic_reloc
, /* special_function */
931 "R_ARM_ALU_PC_G1_NC", /* name */
932 FALSE
, /* partial_inplace */
933 0xffffffff, /* src_mask */
934 0xffffffff, /* dst_mask */
935 TRUE
), /* pcrel_offset */
937 HOWTO (R_ARM_ALU_PC_G1
, /* type */
939 2, /* size (0 = byte, 1 = short, 2 = long) */
941 TRUE
, /* pc_relative */
943 complain_overflow_dont
,/* complain_on_overflow */
944 bfd_elf_generic_reloc
, /* special_function */
945 "R_ARM_ALU_PC_G1", /* name */
946 FALSE
, /* partial_inplace */
947 0xffffffff, /* src_mask */
948 0xffffffff, /* dst_mask */
949 TRUE
), /* pcrel_offset */
951 HOWTO (R_ARM_ALU_PC_G2
, /* type */
953 2, /* size (0 = byte, 1 = short, 2 = long) */
955 TRUE
, /* pc_relative */
957 complain_overflow_dont
,/* complain_on_overflow */
958 bfd_elf_generic_reloc
, /* special_function */
959 "R_ARM_ALU_PC_G2", /* name */
960 FALSE
, /* partial_inplace */
961 0xffffffff, /* src_mask */
962 0xffffffff, /* dst_mask */
963 TRUE
), /* pcrel_offset */
965 HOWTO (R_ARM_LDR_PC_G1
, /* type */
967 2, /* size (0 = byte, 1 = short, 2 = long) */
969 TRUE
, /* pc_relative */
971 complain_overflow_dont
,/* complain_on_overflow */
972 bfd_elf_generic_reloc
, /* special_function */
973 "R_ARM_LDR_PC_G1", /* name */
974 FALSE
, /* partial_inplace */
975 0xffffffff, /* src_mask */
976 0xffffffff, /* dst_mask */
977 TRUE
), /* pcrel_offset */
979 HOWTO (R_ARM_LDR_PC_G2
, /* type */
981 2, /* size (0 = byte, 1 = short, 2 = long) */
983 TRUE
, /* pc_relative */
985 complain_overflow_dont
,/* complain_on_overflow */
986 bfd_elf_generic_reloc
, /* special_function */
987 "R_ARM_LDR_PC_G2", /* name */
988 FALSE
, /* partial_inplace */
989 0xffffffff, /* src_mask */
990 0xffffffff, /* dst_mask */
991 TRUE
), /* pcrel_offset */
993 HOWTO (R_ARM_LDRS_PC_G0
, /* type */
995 2, /* size (0 = byte, 1 = short, 2 = long) */
997 TRUE
, /* pc_relative */
999 complain_overflow_dont
,/* complain_on_overflow */
1000 bfd_elf_generic_reloc
, /* special_function */
1001 "R_ARM_LDRS_PC_G0", /* name */
1002 FALSE
, /* partial_inplace */
1003 0xffffffff, /* src_mask */
1004 0xffffffff, /* dst_mask */
1005 TRUE
), /* pcrel_offset */
1007 HOWTO (R_ARM_LDRS_PC_G1
, /* type */
1009 2, /* size (0 = byte, 1 = short, 2 = long) */
1011 TRUE
, /* pc_relative */
1013 complain_overflow_dont
,/* complain_on_overflow */
1014 bfd_elf_generic_reloc
, /* special_function */
1015 "R_ARM_LDRS_PC_G1", /* name */
1016 FALSE
, /* partial_inplace */
1017 0xffffffff, /* src_mask */
1018 0xffffffff, /* dst_mask */
1019 TRUE
), /* pcrel_offset */
1021 HOWTO (R_ARM_LDRS_PC_G2
, /* type */
1023 2, /* size (0 = byte, 1 = short, 2 = long) */
1025 TRUE
, /* pc_relative */
1027 complain_overflow_dont
,/* complain_on_overflow */
1028 bfd_elf_generic_reloc
, /* special_function */
1029 "R_ARM_LDRS_PC_G2", /* name */
1030 FALSE
, /* partial_inplace */
1031 0xffffffff, /* src_mask */
1032 0xffffffff, /* dst_mask */
1033 TRUE
), /* pcrel_offset */
1035 HOWTO (R_ARM_LDC_PC_G0
, /* type */
1037 2, /* size (0 = byte, 1 = short, 2 = long) */
1039 TRUE
, /* pc_relative */
1041 complain_overflow_dont
,/* complain_on_overflow */
1042 bfd_elf_generic_reloc
, /* special_function */
1043 "R_ARM_LDC_PC_G0", /* name */
1044 FALSE
, /* partial_inplace */
1045 0xffffffff, /* src_mask */
1046 0xffffffff, /* dst_mask */
1047 TRUE
), /* pcrel_offset */
1049 HOWTO (R_ARM_LDC_PC_G1
, /* type */
1051 2, /* size (0 = byte, 1 = short, 2 = long) */
1053 TRUE
, /* pc_relative */
1055 complain_overflow_dont
,/* complain_on_overflow */
1056 bfd_elf_generic_reloc
, /* special_function */
1057 "R_ARM_LDC_PC_G1", /* name */
1058 FALSE
, /* partial_inplace */
1059 0xffffffff, /* src_mask */
1060 0xffffffff, /* dst_mask */
1061 TRUE
), /* pcrel_offset */
1063 HOWTO (R_ARM_LDC_PC_G2
, /* type */
1065 2, /* size (0 = byte, 1 = short, 2 = long) */
1067 TRUE
, /* pc_relative */
1069 complain_overflow_dont
,/* complain_on_overflow */
1070 bfd_elf_generic_reloc
, /* special_function */
1071 "R_ARM_LDC_PC_G2", /* name */
1072 FALSE
, /* partial_inplace */
1073 0xffffffff, /* src_mask */
1074 0xffffffff, /* dst_mask */
1075 TRUE
), /* pcrel_offset */
1077 HOWTO (R_ARM_ALU_SB_G0_NC
, /* type */
1079 2, /* size (0 = byte, 1 = short, 2 = long) */
1081 TRUE
, /* pc_relative */
1083 complain_overflow_dont
,/* complain_on_overflow */
1084 bfd_elf_generic_reloc
, /* special_function */
1085 "R_ARM_ALU_SB_G0_NC", /* name */
1086 FALSE
, /* partial_inplace */
1087 0xffffffff, /* src_mask */
1088 0xffffffff, /* dst_mask */
1089 TRUE
), /* pcrel_offset */
1091 HOWTO (R_ARM_ALU_SB_G0
, /* type */
1093 2, /* size (0 = byte, 1 = short, 2 = long) */
1095 TRUE
, /* pc_relative */
1097 complain_overflow_dont
,/* complain_on_overflow */
1098 bfd_elf_generic_reloc
, /* special_function */
1099 "R_ARM_ALU_SB_G0", /* name */
1100 FALSE
, /* partial_inplace */
1101 0xffffffff, /* src_mask */
1102 0xffffffff, /* dst_mask */
1103 TRUE
), /* pcrel_offset */
1105 HOWTO (R_ARM_ALU_SB_G1_NC
, /* type */
1107 2, /* size (0 = byte, 1 = short, 2 = long) */
1109 TRUE
, /* pc_relative */
1111 complain_overflow_dont
,/* complain_on_overflow */
1112 bfd_elf_generic_reloc
, /* special_function */
1113 "R_ARM_ALU_SB_G1_NC", /* name */
1114 FALSE
, /* partial_inplace */
1115 0xffffffff, /* src_mask */
1116 0xffffffff, /* dst_mask */
1117 TRUE
), /* pcrel_offset */
1119 HOWTO (R_ARM_ALU_SB_G1
, /* type */
1121 2, /* size (0 = byte, 1 = short, 2 = long) */
1123 TRUE
, /* pc_relative */
1125 complain_overflow_dont
,/* complain_on_overflow */
1126 bfd_elf_generic_reloc
, /* special_function */
1127 "R_ARM_ALU_SB_G1", /* name */
1128 FALSE
, /* partial_inplace */
1129 0xffffffff, /* src_mask */
1130 0xffffffff, /* dst_mask */
1131 TRUE
), /* pcrel_offset */
1133 HOWTO (R_ARM_ALU_SB_G2
, /* type */
1135 2, /* size (0 = byte, 1 = short, 2 = long) */
1137 TRUE
, /* pc_relative */
1139 complain_overflow_dont
,/* complain_on_overflow */
1140 bfd_elf_generic_reloc
, /* special_function */
1141 "R_ARM_ALU_SB_G2", /* name */
1142 FALSE
, /* partial_inplace */
1143 0xffffffff, /* src_mask */
1144 0xffffffff, /* dst_mask */
1145 TRUE
), /* pcrel_offset */
1147 HOWTO (R_ARM_LDR_SB_G0
, /* type */
1149 2, /* size (0 = byte, 1 = short, 2 = long) */
1151 TRUE
, /* pc_relative */
1153 complain_overflow_dont
,/* complain_on_overflow */
1154 bfd_elf_generic_reloc
, /* special_function */
1155 "R_ARM_LDR_SB_G0", /* name */
1156 FALSE
, /* partial_inplace */
1157 0xffffffff, /* src_mask */
1158 0xffffffff, /* dst_mask */
1159 TRUE
), /* pcrel_offset */
1161 HOWTO (R_ARM_LDR_SB_G1
, /* type */
1163 2, /* size (0 = byte, 1 = short, 2 = long) */
1165 TRUE
, /* pc_relative */
1167 complain_overflow_dont
,/* complain_on_overflow */
1168 bfd_elf_generic_reloc
, /* special_function */
1169 "R_ARM_LDR_SB_G1", /* name */
1170 FALSE
, /* partial_inplace */
1171 0xffffffff, /* src_mask */
1172 0xffffffff, /* dst_mask */
1173 TRUE
), /* pcrel_offset */
1175 HOWTO (R_ARM_LDR_SB_G2
, /* type */
1177 2, /* size (0 = byte, 1 = short, 2 = long) */
1179 TRUE
, /* pc_relative */
1181 complain_overflow_dont
,/* complain_on_overflow */
1182 bfd_elf_generic_reloc
, /* special_function */
1183 "R_ARM_LDR_SB_G2", /* name */
1184 FALSE
, /* partial_inplace */
1185 0xffffffff, /* src_mask */
1186 0xffffffff, /* dst_mask */
1187 TRUE
), /* pcrel_offset */
1189 HOWTO (R_ARM_LDRS_SB_G0
, /* type */
1191 2, /* size (0 = byte, 1 = short, 2 = long) */
1193 TRUE
, /* pc_relative */
1195 complain_overflow_dont
,/* complain_on_overflow */
1196 bfd_elf_generic_reloc
, /* special_function */
1197 "R_ARM_LDRS_SB_G0", /* name */
1198 FALSE
, /* partial_inplace */
1199 0xffffffff, /* src_mask */
1200 0xffffffff, /* dst_mask */
1201 TRUE
), /* pcrel_offset */
1203 HOWTO (R_ARM_LDRS_SB_G1
, /* type */
1205 2, /* size (0 = byte, 1 = short, 2 = long) */
1207 TRUE
, /* pc_relative */
1209 complain_overflow_dont
,/* complain_on_overflow */
1210 bfd_elf_generic_reloc
, /* special_function */
1211 "R_ARM_LDRS_SB_G1", /* name */
1212 FALSE
, /* partial_inplace */
1213 0xffffffff, /* src_mask */
1214 0xffffffff, /* dst_mask */
1215 TRUE
), /* pcrel_offset */
1217 HOWTO (R_ARM_LDRS_SB_G2
, /* type */
1219 2, /* size (0 = byte, 1 = short, 2 = long) */
1221 TRUE
, /* pc_relative */
1223 complain_overflow_dont
,/* complain_on_overflow */
1224 bfd_elf_generic_reloc
, /* special_function */
1225 "R_ARM_LDRS_SB_G2", /* name */
1226 FALSE
, /* partial_inplace */
1227 0xffffffff, /* src_mask */
1228 0xffffffff, /* dst_mask */
1229 TRUE
), /* pcrel_offset */
1231 HOWTO (R_ARM_LDC_SB_G0
, /* type */
1233 2, /* size (0 = byte, 1 = short, 2 = long) */
1235 TRUE
, /* pc_relative */
1237 complain_overflow_dont
,/* complain_on_overflow */
1238 bfd_elf_generic_reloc
, /* special_function */
1239 "R_ARM_LDC_SB_G0", /* name */
1240 FALSE
, /* partial_inplace */
1241 0xffffffff, /* src_mask */
1242 0xffffffff, /* dst_mask */
1243 TRUE
), /* pcrel_offset */
1245 HOWTO (R_ARM_LDC_SB_G1
, /* type */
1247 2, /* size (0 = byte, 1 = short, 2 = long) */
1249 TRUE
, /* pc_relative */
1251 complain_overflow_dont
,/* complain_on_overflow */
1252 bfd_elf_generic_reloc
, /* special_function */
1253 "R_ARM_LDC_SB_G1", /* name */
1254 FALSE
, /* partial_inplace */
1255 0xffffffff, /* src_mask */
1256 0xffffffff, /* dst_mask */
1257 TRUE
), /* pcrel_offset */
1259 HOWTO (R_ARM_LDC_SB_G2
, /* type */
1261 2, /* size (0 = byte, 1 = short, 2 = long) */
1263 TRUE
, /* pc_relative */
1265 complain_overflow_dont
,/* complain_on_overflow */
1266 bfd_elf_generic_reloc
, /* special_function */
1267 "R_ARM_LDC_SB_G2", /* name */
1268 FALSE
, /* partial_inplace */
1269 0xffffffff, /* src_mask */
1270 0xffffffff, /* dst_mask */
1271 TRUE
), /* pcrel_offset */
1273 /* End of group relocations. */
1275 HOWTO (R_ARM_MOVW_BREL_NC
, /* type */
1277 2, /* size (0 = byte, 1 = short, 2 = long) */
1279 FALSE
, /* pc_relative */
1281 complain_overflow_dont
,/* complain_on_overflow */
1282 bfd_elf_generic_reloc
, /* special_function */
1283 "R_ARM_MOVW_BREL_NC", /* name */
1284 FALSE
, /* partial_inplace */
1285 0x0000ffff, /* src_mask */
1286 0x0000ffff, /* dst_mask */
1287 FALSE
), /* pcrel_offset */
1289 HOWTO (R_ARM_MOVT_BREL
, /* type */
1291 2, /* size (0 = byte, 1 = short, 2 = long) */
1293 FALSE
, /* pc_relative */
1295 complain_overflow_bitfield
,/* complain_on_overflow */
1296 bfd_elf_generic_reloc
, /* special_function */
1297 "R_ARM_MOVT_BREL", /* name */
1298 FALSE
, /* partial_inplace */
1299 0x0000ffff, /* src_mask */
1300 0x0000ffff, /* dst_mask */
1301 FALSE
), /* pcrel_offset */
1303 HOWTO (R_ARM_MOVW_BREL
, /* type */
1305 2, /* size (0 = byte, 1 = short, 2 = long) */
1307 FALSE
, /* pc_relative */
1309 complain_overflow_dont
,/* complain_on_overflow */
1310 bfd_elf_generic_reloc
, /* special_function */
1311 "R_ARM_MOVW_BREL", /* name */
1312 FALSE
, /* partial_inplace */
1313 0x0000ffff, /* src_mask */
1314 0x0000ffff, /* dst_mask */
1315 FALSE
), /* pcrel_offset */
1317 HOWTO (R_ARM_THM_MOVW_BREL_NC
,/* type */
1319 2, /* size (0 = byte, 1 = short, 2 = long) */
1321 FALSE
, /* pc_relative */
1323 complain_overflow_dont
,/* complain_on_overflow */
1324 bfd_elf_generic_reloc
, /* special_function */
1325 "R_ARM_THM_MOVW_BREL_NC",/* name */
1326 FALSE
, /* partial_inplace */
1327 0x040f70ff, /* src_mask */
1328 0x040f70ff, /* dst_mask */
1329 FALSE
), /* pcrel_offset */
1331 HOWTO (R_ARM_THM_MOVT_BREL
, /* type */
1333 2, /* size (0 = byte, 1 = short, 2 = long) */
1335 FALSE
, /* pc_relative */
1337 complain_overflow_bitfield
,/* complain_on_overflow */
1338 bfd_elf_generic_reloc
, /* special_function */
1339 "R_ARM_THM_MOVT_BREL", /* name */
1340 FALSE
, /* partial_inplace */
1341 0x040f70ff, /* src_mask */
1342 0x040f70ff, /* dst_mask */
1343 FALSE
), /* pcrel_offset */
1345 HOWTO (R_ARM_THM_MOVW_BREL
, /* type */
1347 2, /* size (0 = byte, 1 = short, 2 = long) */
1349 FALSE
, /* pc_relative */
1351 complain_overflow_dont
,/* complain_on_overflow */
1352 bfd_elf_generic_reloc
, /* special_function */
1353 "R_ARM_THM_MOVW_BREL", /* name */
1354 FALSE
, /* partial_inplace */
1355 0x040f70ff, /* src_mask */
1356 0x040f70ff, /* dst_mask */
1357 FALSE
), /* pcrel_offset */
1359 HOWTO (R_ARM_TLS_GOTDESC
, /* type */
1361 2, /* size (0 = byte, 1 = short, 2 = long) */
1363 FALSE
, /* pc_relative */
1365 complain_overflow_bitfield
,/* complain_on_overflow */
1366 NULL
, /* special_function */
1367 "R_ARM_TLS_GOTDESC", /* name */
1368 TRUE
, /* partial_inplace */
1369 0xffffffff, /* src_mask */
1370 0xffffffff, /* dst_mask */
1371 FALSE
), /* pcrel_offset */
1373 HOWTO (R_ARM_TLS_CALL
, /* type */
1375 2, /* size (0 = byte, 1 = short, 2 = long) */
1377 FALSE
, /* pc_relative */
1379 complain_overflow_dont
,/* complain_on_overflow */
1380 bfd_elf_generic_reloc
, /* special_function */
1381 "R_ARM_TLS_CALL", /* name */
1382 FALSE
, /* partial_inplace */
1383 0x00ffffff, /* src_mask */
1384 0x00ffffff, /* dst_mask */
1385 FALSE
), /* pcrel_offset */
1387 HOWTO (R_ARM_TLS_DESCSEQ
, /* type */
1389 2, /* size (0 = byte, 1 = short, 2 = long) */
1391 FALSE
, /* pc_relative */
1393 complain_overflow_bitfield
,/* complain_on_overflow */
1394 bfd_elf_generic_reloc
, /* special_function */
1395 "R_ARM_TLS_DESCSEQ", /* name */
1396 FALSE
, /* partial_inplace */
1397 0x00000000, /* src_mask */
1398 0x00000000, /* dst_mask */
1399 FALSE
), /* pcrel_offset */
1401 HOWTO (R_ARM_THM_TLS_CALL
, /* type */
1403 2, /* size (0 = byte, 1 = short, 2 = long) */
1405 FALSE
, /* pc_relative */
1407 complain_overflow_dont
,/* complain_on_overflow */
1408 bfd_elf_generic_reloc
, /* special_function */
1409 "R_ARM_THM_TLS_CALL", /* name */
1410 FALSE
, /* partial_inplace */
1411 0x07ff07ff, /* src_mask */
1412 0x07ff07ff, /* dst_mask */
1413 FALSE
), /* pcrel_offset */
1415 HOWTO (R_ARM_PLT32_ABS
, /* type */
1417 2, /* size (0 = byte, 1 = short, 2 = long) */
1419 FALSE
, /* pc_relative */
1421 complain_overflow_dont
,/* complain_on_overflow */
1422 bfd_elf_generic_reloc
, /* special_function */
1423 "R_ARM_PLT32_ABS", /* name */
1424 FALSE
, /* partial_inplace */
1425 0xffffffff, /* src_mask */
1426 0xffffffff, /* dst_mask */
1427 FALSE
), /* pcrel_offset */
1429 HOWTO (R_ARM_GOT_ABS
, /* type */
1431 2, /* size (0 = byte, 1 = short, 2 = long) */
1433 FALSE
, /* pc_relative */
1435 complain_overflow_dont
,/* complain_on_overflow */
1436 bfd_elf_generic_reloc
, /* special_function */
1437 "R_ARM_GOT_ABS", /* name */
1438 FALSE
, /* partial_inplace */
1439 0xffffffff, /* src_mask */
1440 0xffffffff, /* dst_mask */
1441 FALSE
), /* pcrel_offset */
1443 HOWTO (R_ARM_GOT_PREL
, /* type */
1445 2, /* size (0 = byte, 1 = short, 2 = long) */
1447 TRUE
, /* pc_relative */
1449 complain_overflow_dont
, /* complain_on_overflow */
1450 bfd_elf_generic_reloc
, /* special_function */
1451 "R_ARM_GOT_PREL", /* name */
1452 FALSE
, /* partial_inplace */
1453 0xffffffff, /* src_mask */
1454 0xffffffff, /* dst_mask */
1455 TRUE
), /* pcrel_offset */
1457 HOWTO (R_ARM_GOT_BREL12
, /* type */
1459 2, /* size (0 = byte, 1 = short, 2 = long) */
1461 FALSE
, /* pc_relative */
1463 complain_overflow_bitfield
,/* complain_on_overflow */
1464 bfd_elf_generic_reloc
, /* special_function */
1465 "R_ARM_GOT_BREL12", /* name */
1466 FALSE
, /* partial_inplace */
1467 0x00000fff, /* src_mask */
1468 0x00000fff, /* dst_mask */
1469 FALSE
), /* pcrel_offset */
1471 HOWTO (R_ARM_GOTOFF12
, /* type */
1473 2, /* size (0 = byte, 1 = short, 2 = long) */
1475 FALSE
, /* pc_relative */
1477 complain_overflow_bitfield
,/* complain_on_overflow */
1478 bfd_elf_generic_reloc
, /* special_function */
1479 "R_ARM_GOTOFF12", /* name */
1480 FALSE
, /* partial_inplace */
1481 0x00000fff, /* src_mask */
1482 0x00000fff, /* dst_mask */
1483 FALSE
), /* pcrel_offset */
1485 EMPTY_HOWTO (R_ARM_GOTRELAX
), /* reserved for future GOT-load optimizations */
1487 /* GNU extension to record C++ vtable member usage */
1488 HOWTO (R_ARM_GNU_VTENTRY
, /* type */
1490 2, /* size (0 = byte, 1 = short, 2 = long) */
1492 FALSE
, /* pc_relative */
1494 complain_overflow_dont
, /* complain_on_overflow */
1495 _bfd_elf_rel_vtable_reloc_fn
, /* special_function */
1496 "R_ARM_GNU_VTENTRY", /* name */
1497 FALSE
, /* partial_inplace */
1500 FALSE
), /* pcrel_offset */
1502 /* GNU extension to record C++ vtable hierarchy */
1503 HOWTO (R_ARM_GNU_VTINHERIT
, /* type */
1505 2, /* size (0 = byte, 1 = short, 2 = long) */
1507 FALSE
, /* pc_relative */
1509 complain_overflow_dont
, /* complain_on_overflow */
1510 NULL
, /* special_function */
1511 "R_ARM_GNU_VTINHERIT", /* name */
1512 FALSE
, /* partial_inplace */
1515 FALSE
), /* pcrel_offset */
1517 HOWTO (R_ARM_THM_JUMP11
, /* type */
1519 1, /* size (0 = byte, 1 = short, 2 = long) */
1521 TRUE
, /* pc_relative */
1523 complain_overflow_signed
, /* complain_on_overflow */
1524 bfd_elf_generic_reloc
, /* special_function */
1525 "R_ARM_THM_JUMP11", /* name */
1526 FALSE
, /* partial_inplace */
1527 0x000007ff, /* src_mask */
1528 0x000007ff, /* dst_mask */
1529 TRUE
), /* pcrel_offset */
1531 HOWTO (R_ARM_THM_JUMP8
, /* type */
1533 1, /* size (0 = byte, 1 = short, 2 = long) */
1535 TRUE
, /* pc_relative */
1537 complain_overflow_signed
, /* complain_on_overflow */
1538 bfd_elf_generic_reloc
, /* special_function */
1539 "R_ARM_THM_JUMP8", /* name */
1540 FALSE
, /* partial_inplace */
1541 0x000000ff, /* src_mask */
1542 0x000000ff, /* dst_mask */
1543 TRUE
), /* pcrel_offset */
1545 /* TLS relocations */
1546 HOWTO (R_ARM_TLS_GD32
, /* type */
1548 2, /* size (0 = byte, 1 = short, 2 = long) */
1550 FALSE
, /* pc_relative */
1552 complain_overflow_bitfield
,/* complain_on_overflow */
1553 NULL
, /* special_function */
1554 "R_ARM_TLS_GD32", /* name */
1555 TRUE
, /* partial_inplace */
1556 0xffffffff, /* src_mask */
1557 0xffffffff, /* dst_mask */
1558 FALSE
), /* pcrel_offset */
1560 HOWTO (R_ARM_TLS_LDM32
, /* type */
1562 2, /* size (0 = byte, 1 = short, 2 = long) */
1564 FALSE
, /* pc_relative */
1566 complain_overflow_bitfield
,/* complain_on_overflow */
1567 bfd_elf_generic_reloc
, /* special_function */
1568 "R_ARM_TLS_LDM32", /* name */
1569 TRUE
, /* partial_inplace */
1570 0xffffffff, /* src_mask */
1571 0xffffffff, /* dst_mask */
1572 FALSE
), /* pcrel_offset */
1574 HOWTO (R_ARM_TLS_LDO32
, /* type */
1576 2, /* size (0 = byte, 1 = short, 2 = long) */
1578 FALSE
, /* pc_relative */
1580 complain_overflow_bitfield
,/* complain_on_overflow */
1581 bfd_elf_generic_reloc
, /* special_function */
1582 "R_ARM_TLS_LDO32", /* name */
1583 TRUE
, /* partial_inplace */
1584 0xffffffff, /* src_mask */
1585 0xffffffff, /* dst_mask */
1586 FALSE
), /* pcrel_offset */
1588 HOWTO (R_ARM_TLS_IE32
, /* type */
1590 2, /* size (0 = byte, 1 = short, 2 = long) */
1592 FALSE
, /* pc_relative */
1594 complain_overflow_bitfield
,/* complain_on_overflow */
1595 NULL
, /* special_function */
1596 "R_ARM_TLS_IE32", /* name */
1597 TRUE
, /* partial_inplace */
1598 0xffffffff, /* src_mask */
1599 0xffffffff, /* dst_mask */
1600 FALSE
), /* pcrel_offset */
1602 HOWTO (R_ARM_TLS_LE32
, /* type */
1604 2, /* size (0 = byte, 1 = short, 2 = long) */
1606 FALSE
, /* pc_relative */
1608 complain_overflow_bitfield
,/* complain_on_overflow */
1609 NULL
, /* special_function */
1610 "R_ARM_TLS_LE32", /* name */
1611 TRUE
, /* partial_inplace */
1612 0xffffffff, /* src_mask */
1613 0xffffffff, /* dst_mask */
1614 FALSE
), /* pcrel_offset */
1616 HOWTO (R_ARM_TLS_LDO12
, /* type */
1618 2, /* size (0 = byte, 1 = short, 2 = long) */
1620 FALSE
, /* pc_relative */
1622 complain_overflow_bitfield
,/* complain_on_overflow */
1623 bfd_elf_generic_reloc
, /* special_function */
1624 "R_ARM_TLS_LDO12", /* name */
1625 FALSE
, /* partial_inplace */
1626 0x00000fff, /* src_mask */
1627 0x00000fff, /* dst_mask */
1628 FALSE
), /* pcrel_offset */
1630 HOWTO (R_ARM_TLS_LE12
, /* type */
1632 2, /* size (0 = byte, 1 = short, 2 = long) */
1634 FALSE
, /* pc_relative */
1636 complain_overflow_bitfield
,/* complain_on_overflow */
1637 bfd_elf_generic_reloc
, /* special_function */
1638 "R_ARM_TLS_LE12", /* name */
1639 FALSE
, /* partial_inplace */
1640 0x00000fff, /* src_mask */
1641 0x00000fff, /* dst_mask */
1642 FALSE
), /* pcrel_offset */
1644 HOWTO (R_ARM_TLS_IE12GP
, /* type */
1646 2, /* size (0 = byte, 1 = short, 2 = long) */
1648 FALSE
, /* pc_relative */
1650 complain_overflow_bitfield
,/* complain_on_overflow */
1651 bfd_elf_generic_reloc
, /* special_function */
1652 "R_ARM_TLS_IE12GP", /* name */
1653 FALSE
, /* partial_inplace */
1654 0x00000fff, /* src_mask */
1655 0x00000fff, /* dst_mask */
1656 FALSE
), /* pcrel_offset */
1658 /* 112-127 private relocations. */
1676 /* R_ARM_ME_TOO, obsolete. */
1679 HOWTO (R_ARM_THM_TLS_DESCSEQ
, /* type */
1681 1, /* size (0 = byte, 1 = short, 2 = long) */
1683 FALSE
, /* pc_relative */
1685 complain_overflow_bitfield
,/* complain_on_overflow */
1686 bfd_elf_generic_reloc
, /* special_function */
1687 "R_ARM_THM_TLS_DESCSEQ",/* name */
1688 FALSE
, /* partial_inplace */
1689 0x00000000, /* src_mask */
1690 0x00000000, /* dst_mask */
1691 FALSE
), /* pcrel_offset */
1694 HOWTO (R_ARM_THM_ALU_ABS_G0_NC
,/* type. */
1695 0, /* rightshift. */
1696 1, /* size (0 = byte, 1 = short, 2 = long). */
1698 FALSE
, /* pc_relative. */
1700 complain_overflow_bitfield
,/* complain_on_overflow. */
1701 bfd_elf_generic_reloc
, /* special_function. */
1702 "R_ARM_THM_ALU_ABS_G0_NC",/* name. */
1703 FALSE
, /* partial_inplace. */
1704 0x00000000, /* src_mask. */
1705 0x00000000, /* dst_mask. */
1706 FALSE
), /* pcrel_offset. */
1707 HOWTO (R_ARM_THM_ALU_ABS_G1_NC
,/* type. */
1708 0, /* rightshift. */
1709 1, /* size (0 = byte, 1 = short, 2 = long). */
1711 FALSE
, /* pc_relative. */
1713 complain_overflow_bitfield
,/* complain_on_overflow. */
1714 bfd_elf_generic_reloc
, /* special_function. */
1715 "R_ARM_THM_ALU_ABS_G1_NC",/* name. */
1716 FALSE
, /* partial_inplace. */
1717 0x00000000, /* src_mask. */
1718 0x00000000, /* dst_mask. */
1719 FALSE
), /* pcrel_offset. */
1720 HOWTO (R_ARM_THM_ALU_ABS_G2_NC
,/* type. */
1721 0, /* rightshift. */
1722 1, /* size (0 = byte, 1 = short, 2 = long). */
1724 FALSE
, /* pc_relative. */
1726 complain_overflow_bitfield
,/* complain_on_overflow. */
1727 bfd_elf_generic_reloc
, /* special_function. */
1728 "R_ARM_THM_ALU_ABS_G2_NC",/* name. */
1729 FALSE
, /* partial_inplace. */
1730 0x00000000, /* src_mask. */
1731 0x00000000, /* dst_mask. */
1732 FALSE
), /* pcrel_offset. */
1733 HOWTO (R_ARM_THM_ALU_ABS_G3_NC
,/* type. */
1734 0, /* rightshift. */
1735 1, /* size (0 = byte, 1 = short, 2 = long). */
1737 FALSE
, /* pc_relative. */
1739 complain_overflow_bitfield
,/* complain_on_overflow. */
1740 bfd_elf_generic_reloc
, /* special_function. */
1741 "R_ARM_THM_ALU_ABS_G3_NC",/* name. */
1742 FALSE
, /* partial_inplace. */
1743 0x00000000, /* src_mask. */
1744 0x00000000, /* dst_mask. */
1745 FALSE
), /* pcrel_offset. */
1749 static reloc_howto_type elf32_arm_howto_table_2
[1] =
1751 HOWTO (R_ARM_IRELATIVE
, /* type */
1753 2, /* size (0 = byte, 1 = short, 2 = long) */
1755 FALSE
, /* pc_relative */
1757 complain_overflow_bitfield
,/* complain_on_overflow */
1758 bfd_elf_generic_reloc
, /* special_function */
1759 "R_ARM_IRELATIVE", /* name */
1760 TRUE
, /* partial_inplace */
1761 0xffffffff, /* src_mask */
1762 0xffffffff, /* dst_mask */
1763 FALSE
) /* pcrel_offset */
1766 /* 249-255 extended, currently unused, relocations: */
1767 static reloc_howto_type elf32_arm_howto_table_3
[4] =
1769 HOWTO (R_ARM_RREL32
, /* type */
1771 0, /* size (0 = byte, 1 = short, 2 = long) */
1773 FALSE
, /* pc_relative */
1775 complain_overflow_dont
,/* complain_on_overflow */
1776 bfd_elf_generic_reloc
, /* special_function */
1777 "R_ARM_RREL32", /* name */
1778 FALSE
, /* partial_inplace */
1781 FALSE
), /* pcrel_offset */
1783 HOWTO (R_ARM_RABS32
, /* type */
1785 0, /* size (0 = byte, 1 = short, 2 = long) */
1787 FALSE
, /* pc_relative */
1789 complain_overflow_dont
,/* complain_on_overflow */
1790 bfd_elf_generic_reloc
, /* special_function */
1791 "R_ARM_RABS32", /* name */
1792 FALSE
, /* partial_inplace */
1795 FALSE
), /* pcrel_offset */
1797 HOWTO (R_ARM_RPC24
, /* type */
1799 0, /* size (0 = byte, 1 = short, 2 = long) */
1801 FALSE
, /* pc_relative */
1803 complain_overflow_dont
,/* complain_on_overflow */
1804 bfd_elf_generic_reloc
, /* special_function */
1805 "R_ARM_RPC24", /* name */
1806 FALSE
, /* partial_inplace */
1809 FALSE
), /* pcrel_offset */
1811 HOWTO (R_ARM_RBASE
, /* type */
1813 0, /* size (0 = byte, 1 = short, 2 = long) */
1815 FALSE
, /* pc_relative */
1817 complain_overflow_dont
,/* complain_on_overflow */
1818 bfd_elf_generic_reloc
, /* special_function */
1819 "R_ARM_RBASE", /* name */
1820 FALSE
, /* partial_inplace */
1823 FALSE
) /* pcrel_offset */
1826 static reloc_howto_type
*
1827 elf32_arm_howto_from_type (unsigned int r_type
)
1829 if (r_type
< ARRAY_SIZE (elf32_arm_howto_table_1
))
1830 return &elf32_arm_howto_table_1
[r_type
];
1832 if (r_type
== R_ARM_IRELATIVE
)
1833 return &elf32_arm_howto_table_2
[r_type
- R_ARM_IRELATIVE
];
1835 if (r_type
>= R_ARM_RREL32
1836 && r_type
< R_ARM_RREL32
+ ARRAY_SIZE (elf32_arm_howto_table_3
))
1837 return &elf32_arm_howto_table_3
[r_type
- R_ARM_RREL32
];
1843 elf32_arm_info_to_howto (bfd
* abfd ATTRIBUTE_UNUSED
, arelent
* bfd_reloc
,
1844 Elf_Internal_Rela
* elf_reloc
)
1846 unsigned int r_type
;
1848 r_type
= ELF32_R_TYPE (elf_reloc
->r_info
);
1849 bfd_reloc
->howto
= elf32_arm_howto_from_type (r_type
);
1852 struct elf32_arm_reloc_map
1854 bfd_reloc_code_real_type bfd_reloc_val
;
1855 unsigned char elf_reloc_val
;
1858 /* All entries in this list must also be present in elf32_arm_howto_table. */
1859 static const struct elf32_arm_reloc_map elf32_arm_reloc_map
[] =
1861 {BFD_RELOC_NONE
, R_ARM_NONE
},
1862 {BFD_RELOC_ARM_PCREL_BRANCH
, R_ARM_PC24
},
1863 {BFD_RELOC_ARM_PCREL_CALL
, R_ARM_CALL
},
1864 {BFD_RELOC_ARM_PCREL_JUMP
, R_ARM_JUMP24
},
1865 {BFD_RELOC_ARM_PCREL_BLX
, R_ARM_XPC25
},
1866 {BFD_RELOC_THUMB_PCREL_BLX
, R_ARM_THM_XPC22
},
1867 {BFD_RELOC_32
, R_ARM_ABS32
},
1868 {BFD_RELOC_32_PCREL
, R_ARM_REL32
},
1869 {BFD_RELOC_8
, R_ARM_ABS8
},
1870 {BFD_RELOC_16
, R_ARM_ABS16
},
1871 {BFD_RELOC_ARM_OFFSET_IMM
, R_ARM_ABS12
},
1872 {BFD_RELOC_ARM_THUMB_OFFSET
, R_ARM_THM_ABS5
},
1873 {BFD_RELOC_THUMB_PCREL_BRANCH25
, R_ARM_THM_JUMP24
},
1874 {BFD_RELOC_THUMB_PCREL_BRANCH23
, R_ARM_THM_CALL
},
1875 {BFD_RELOC_THUMB_PCREL_BRANCH12
, R_ARM_THM_JUMP11
},
1876 {BFD_RELOC_THUMB_PCREL_BRANCH20
, R_ARM_THM_JUMP19
},
1877 {BFD_RELOC_THUMB_PCREL_BRANCH9
, R_ARM_THM_JUMP8
},
1878 {BFD_RELOC_THUMB_PCREL_BRANCH7
, R_ARM_THM_JUMP6
},
1879 {BFD_RELOC_ARM_GLOB_DAT
, R_ARM_GLOB_DAT
},
1880 {BFD_RELOC_ARM_JUMP_SLOT
, R_ARM_JUMP_SLOT
},
1881 {BFD_RELOC_ARM_RELATIVE
, R_ARM_RELATIVE
},
1882 {BFD_RELOC_ARM_GOTOFF
, R_ARM_GOTOFF32
},
1883 {BFD_RELOC_ARM_GOTPC
, R_ARM_GOTPC
},
1884 {BFD_RELOC_ARM_GOT_PREL
, R_ARM_GOT_PREL
},
1885 {BFD_RELOC_ARM_GOT32
, R_ARM_GOT32
},
1886 {BFD_RELOC_ARM_PLT32
, R_ARM_PLT32
},
1887 {BFD_RELOC_ARM_TARGET1
, R_ARM_TARGET1
},
1888 {BFD_RELOC_ARM_ROSEGREL32
, R_ARM_ROSEGREL32
},
1889 {BFD_RELOC_ARM_SBREL32
, R_ARM_SBREL32
},
1890 {BFD_RELOC_ARM_PREL31
, R_ARM_PREL31
},
1891 {BFD_RELOC_ARM_TARGET2
, R_ARM_TARGET2
},
1892 {BFD_RELOC_ARM_PLT32
, R_ARM_PLT32
},
1893 {BFD_RELOC_ARM_TLS_GOTDESC
, R_ARM_TLS_GOTDESC
},
1894 {BFD_RELOC_ARM_TLS_CALL
, R_ARM_TLS_CALL
},
1895 {BFD_RELOC_ARM_THM_TLS_CALL
, R_ARM_THM_TLS_CALL
},
1896 {BFD_RELOC_ARM_TLS_DESCSEQ
, R_ARM_TLS_DESCSEQ
},
1897 {BFD_RELOC_ARM_THM_TLS_DESCSEQ
, R_ARM_THM_TLS_DESCSEQ
},
1898 {BFD_RELOC_ARM_TLS_DESC
, R_ARM_TLS_DESC
},
1899 {BFD_RELOC_ARM_TLS_GD32
, R_ARM_TLS_GD32
},
1900 {BFD_RELOC_ARM_TLS_LDO32
, R_ARM_TLS_LDO32
},
1901 {BFD_RELOC_ARM_TLS_LDM32
, R_ARM_TLS_LDM32
},
1902 {BFD_RELOC_ARM_TLS_DTPMOD32
, R_ARM_TLS_DTPMOD32
},
1903 {BFD_RELOC_ARM_TLS_DTPOFF32
, R_ARM_TLS_DTPOFF32
},
1904 {BFD_RELOC_ARM_TLS_TPOFF32
, R_ARM_TLS_TPOFF32
},
1905 {BFD_RELOC_ARM_TLS_IE32
, R_ARM_TLS_IE32
},
1906 {BFD_RELOC_ARM_TLS_LE32
, R_ARM_TLS_LE32
},
1907 {BFD_RELOC_ARM_IRELATIVE
, R_ARM_IRELATIVE
},
1908 {BFD_RELOC_VTABLE_INHERIT
, R_ARM_GNU_VTINHERIT
},
1909 {BFD_RELOC_VTABLE_ENTRY
, R_ARM_GNU_VTENTRY
},
1910 {BFD_RELOC_ARM_MOVW
, R_ARM_MOVW_ABS_NC
},
1911 {BFD_RELOC_ARM_MOVT
, R_ARM_MOVT_ABS
},
1912 {BFD_RELOC_ARM_MOVW_PCREL
, R_ARM_MOVW_PREL_NC
},
1913 {BFD_RELOC_ARM_MOVT_PCREL
, R_ARM_MOVT_PREL
},
1914 {BFD_RELOC_ARM_THUMB_MOVW
, R_ARM_THM_MOVW_ABS_NC
},
1915 {BFD_RELOC_ARM_THUMB_MOVT
, R_ARM_THM_MOVT_ABS
},
1916 {BFD_RELOC_ARM_THUMB_MOVW_PCREL
, R_ARM_THM_MOVW_PREL_NC
},
1917 {BFD_RELOC_ARM_THUMB_MOVT_PCREL
, R_ARM_THM_MOVT_PREL
},
1918 {BFD_RELOC_ARM_ALU_PC_G0_NC
, R_ARM_ALU_PC_G0_NC
},
1919 {BFD_RELOC_ARM_ALU_PC_G0
, R_ARM_ALU_PC_G0
},
1920 {BFD_RELOC_ARM_ALU_PC_G1_NC
, R_ARM_ALU_PC_G1_NC
},
1921 {BFD_RELOC_ARM_ALU_PC_G1
, R_ARM_ALU_PC_G1
},
1922 {BFD_RELOC_ARM_ALU_PC_G2
, R_ARM_ALU_PC_G2
},
1923 {BFD_RELOC_ARM_LDR_PC_G0
, R_ARM_LDR_PC_G0
},
1924 {BFD_RELOC_ARM_LDR_PC_G1
, R_ARM_LDR_PC_G1
},
1925 {BFD_RELOC_ARM_LDR_PC_G2
, R_ARM_LDR_PC_G2
},
1926 {BFD_RELOC_ARM_LDRS_PC_G0
, R_ARM_LDRS_PC_G0
},
1927 {BFD_RELOC_ARM_LDRS_PC_G1
, R_ARM_LDRS_PC_G1
},
1928 {BFD_RELOC_ARM_LDRS_PC_G2
, R_ARM_LDRS_PC_G2
},
1929 {BFD_RELOC_ARM_LDC_PC_G0
, R_ARM_LDC_PC_G0
},
1930 {BFD_RELOC_ARM_LDC_PC_G1
, R_ARM_LDC_PC_G1
},
1931 {BFD_RELOC_ARM_LDC_PC_G2
, R_ARM_LDC_PC_G2
},
1932 {BFD_RELOC_ARM_ALU_SB_G0_NC
, R_ARM_ALU_SB_G0_NC
},
1933 {BFD_RELOC_ARM_ALU_SB_G0
, R_ARM_ALU_SB_G0
},
1934 {BFD_RELOC_ARM_ALU_SB_G1_NC
, R_ARM_ALU_SB_G1_NC
},
1935 {BFD_RELOC_ARM_ALU_SB_G1
, R_ARM_ALU_SB_G1
},
1936 {BFD_RELOC_ARM_ALU_SB_G2
, R_ARM_ALU_SB_G2
},
1937 {BFD_RELOC_ARM_LDR_SB_G0
, R_ARM_LDR_SB_G0
},
1938 {BFD_RELOC_ARM_LDR_SB_G1
, R_ARM_LDR_SB_G1
},
1939 {BFD_RELOC_ARM_LDR_SB_G2
, R_ARM_LDR_SB_G2
},
1940 {BFD_RELOC_ARM_LDRS_SB_G0
, R_ARM_LDRS_SB_G0
},
1941 {BFD_RELOC_ARM_LDRS_SB_G1
, R_ARM_LDRS_SB_G1
},
1942 {BFD_RELOC_ARM_LDRS_SB_G2
, R_ARM_LDRS_SB_G2
},
1943 {BFD_RELOC_ARM_LDC_SB_G0
, R_ARM_LDC_SB_G0
},
1944 {BFD_RELOC_ARM_LDC_SB_G1
, R_ARM_LDC_SB_G1
},
1945 {BFD_RELOC_ARM_LDC_SB_G2
, R_ARM_LDC_SB_G2
},
1946 {BFD_RELOC_ARM_V4BX
, R_ARM_V4BX
},
1947 {BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
, R_ARM_THM_ALU_ABS_G3_NC
},
1948 {BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
, R_ARM_THM_ALU_ABS_G2_NC
},
1949 {BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
, R_ARM_THM_ALU_ABS_G1_NC
},
1950 {BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
, R_ARM_THM_ALU_ABS_G0_NC
}
1953 static reloc_howto_type
*
1954 elf32_arm_reloc_type_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
1955 bfd_reloc_code_real_type code
)
1959 for (i
= 0; i
< ARRAY_SIZE (elf32_arm_reloc_map
); i
++)
1960 if (elf32_arm_reloc_map
[i
].bfd_reloc_val
== code
)
1961 return elf32_arm_howto_from_type (elf32_arm_reloc_map
[i
].elf_reloc_val
);
1966 static reloc_howto_type
*
1967 elf32_arm_reloc_name_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
1972 for (i
= 0; i
< ARRAY_SIZE (elf32_arm_howto_table_1
); i
++)
1973 if (elf32_arm_howto_table_1
[i
].name
!= NULL
1974 && strcasecmp (elf32_arm_howto_table_1
[i
].name
, r_name
) == 0)
1975 return &elf32_arm_howto_table_1
[i
];
1977 for (i
= 0; i
< ARRAY_SIZE (elf32_arm_howto_table_2
); i
++)
1978 if (elf32_arm_howto_table_2
[i
].name
!= NULL
1979 && strcasecmp (elf32_arm_howto_table_2
[i
].name
, r_name
) == 0)
1980 return &elf32_arm_howto_table_2
[i
];
1982 for (i
= 0; i
< ARRAY_SIZE (elf32_arm_howto_table_3
); i
++)
1983 if (elf32_arm_howto_table_3
[i
].name
!= NULL
1984 && strcasecmp (elf32_arm_howto_table_3
[i
].name
, r_name
) == 0)
1985 return &elf32_arm_howto_table_3
[i
];
1990 /* Support for core dump NOTE sections. */
1993 elf32_arm_nabi_grok_prstatus (bfd
*abfd
, Elf_Internal_Note
*note
)
1998 switch (note
->descsz
)
2003 case 148: /* Linux/ARM 32-bit. */
2005 elf_tdata (abfd
)->core
->signal
= bfd_get_16 (abfd
, note
->descdata
+ 12);
2008 elf_tdata (abfd
)->core
->lwpid
= bfd_get_32 (abfd
, note
->descdata
+ 24);
2017 /* Make a ".reg/999" section. */
2018 return _bfd_elfcore_make_pseudosection (abfd
, ".reg",
2019 size
, note
->descpos
+ offset
);
2023 elf32_arm_nabi_grok_psinfo (bfd
*abfd
, Elf_Internal_Note
*note
)
2025 switch (note
->descsz
)
2030 case 124: /* Linux/ARM elf_prpsinfo. */
2031 elf_tdata (abfd
)->core
->pid
2032 = bfd_get_32 (abfd
, note
->descdata
+ 12);
2033 elf_tdata (abfd
)->core
->program
2034 = _bfd_elfcore_strndup (abfd
, note
->descdata
+ 28, 16);
2035 elf_tdata (abfd
)->core
->command
2036 = _bfd_elfcore_strndup (abfd
, note
->descdata
+ 44, 80);
2039 /* Note that for some reason, a spurious space is tacked
2040 onto the end of the args in some (at least one anyway)
2041 implementations, so strip it off if it exists. */
2043 char *command
= elf_tdata (abfd
)->core
->command
;
2044 int n
= strlen (command
);
2046 if (0 < n
&& command
[n
- 1] == ' ')
2047 command
[n
- 1] = '\0';
2054 elf32_arm_nabi_write_core_note (bfd
*abfd
, char *buf
, int *bufsiz
,
2067 va_start (ap
, note_type
);
2068 memset (data
, 0, sizeof (data
));
2069 strncpy (data
+ 28, va_arg (ap
, const char *), 16);
2070 strncpy (data
+ 44, va_arg (ap
, const char *), 80);
2073 return elfcore_write_note (abfd
, buf
, bufsiz
,
2074 "CORE", note_type
, data
, sizeof (data
));
2085 va_start (ap
, note_type
);
2086 memset (data
, 0, sizeof (data
));
2087 pid
= va_arg (ap
, long);
2088 bfd_put_32 (abfd
, pid
, data
+ 24);
2089 cursig
= va_arg (ap
, int);
2090 bfd_put_16 (abfd
, cursig
, data
+ 12);
2091 greg
= va_arg (ap
, const void *);
2092 memcpy (data
+ 72, greg
, 72);
2095 return elfcore_write_note (abfd
, buf
, bufsiz
,
2096 "CORE", note_type
, data
, sizeof (data
));
2101 #define TARGET_LITTLE_SYM arm_elf32_le_vec
2102 #define TARGET_LITTLE_NAME "elf32-littlearm"
2103 #define TARGET_BIG_SYM arm_elf32_be_vec
2104 #define TARGET_BIG_NAME "elf32-bigarm"
2106 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
2107 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
2108 #define elf_backend_write_core_note elf32_arm_nabi_write_core_note
2110 typedef unsigned long int insn32
;
2111 typedef unsigned short int insn16
;
2113 /* In lieu of proper flags, assume all EABIv4 or later objects are
2115 #define INTERWORK_FLAG(abfd) \
2116 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
2117 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
2118 || ((abfd)->flags & BFD_LINKER_CREATED))
2120 /* The linker script knows the section names for placement.
2121 The entry_names are used to do simple name mangling on the stubs.
2122 Given a function name, and its type, the stub can be found. The
2123 name can be changed. The only requirement is the %s be present. */
2124 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
2125 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
2127 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
2128 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
2130 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
2131 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
2133 #define STM32L4XX_ERRATUM_VENEER_SECTION_NAME ".text.stm32l4xx_veneer"
2134 #define STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "__stm32l4xx_veneer_%x"
2136 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
2137 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
2139 #define STUB_ENTRY_NAME "__%s_veneer"
2141 /* The name of the dynamic interpreter. This is put in the .interp
2143 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
2145 static const unsigned long tls_trampoline
[] =
2147 0xe08e0000, /* add r0, lr, r0 */
2148 0xe5901004, /* ldr r1, [r0,#4] */
2149 0xe12fff11, /* bx r1 */
2152 static const unsigned long dl_tlsdesc_lazy_trampoline
[] =
2154 0xe52d2004, /* push {r2} */
2155 0xe59f200c, /* ldr r2, [pc, #3f - . - 8] */
2156 0xe59f100c, /* ldr r1, [pc, #4f - . - 8] */
2157 0xe79f2002, /* 1: ldr r2, [pc, r2] */
2158 0xe081100f, /* 2: add r1, pc */
2159 0xe12fff12, /* bx r2 */
2160 0x00000014, /* 3: .word _GLOBAL_OFFSET_TABLE_ - 1b - 8
2161 + dl_tlsdesc_lazy_resolver(GOT) */
2162 0x00000018, /* 4: .word _GLOBAL_OFFSET_TABLE_ - 2b - 8 */
2165 #ifdef FOUR_WORD_PLT
2167 /* The first entry in a procedure linkage table looks like
2168 this. It is set up so that any shared library function that is
2169 called before the relocation has been set up calls the dynamic
2171 static const bfd_vma elf32_arm_plt0_entry
[] =
2173 0xe52de004, /* str lr, [sp, #-4]! */
2174 0xe59fe010, /* ldr lr, [pc, #16] */
2175 0xe08fe00e, /* add lr, pc, lr */
2176 0xe5bef008, /* ldr pc, [lr, #8]! */
2179 /* Subsequent entries in a procedure linkage table look like
2181 static const bfd_vma elf32_arm_plt_entry
[] =
2183 0xe28fc600, /* add ip, pc, #NN */
2184 0xe28cca00, /* add ip, ip, #NN */
2185 0xe5bcf000, /* ldr pc, [ip, #NN]! */
2186 0x00000000, /* unused */
2189 #else /* not FOUR_WORD_PLT */
2191 /* The first entry in a procedure linkage table looks like
2192 this. It is set up so that any shared library function that is
2193 called before the relocation has been set up calls the dynamic
2195 static const bfd_vma elf32_arm_plt0_entry
[] =
2197 0xe52de004, /* str lr, [sp, #-4]! */
2198 0xe59fe004, /* ldr lr, [pc, #4] */
2199 0xe08fe00e, /* add lr, pc, lr */
2200 0xe5bef008, /* ldr pc, [lr, #8]! */
2201 0x00000000, /* &GOT[0] - . */
2204 /* By default subsequent entries in a procedure linkage table look like
2205 this. Offsets that don't fit into 28 bits will cause link error. */
2206 static const bfd_vma elf32_arm_plt_entry_short
[] =
2208 0xe28fc600, /* add ip, pc, #0xNN00000 */
2209 0xe28cca00, /* add ip, ip, #0xNN000 */
2210 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2213 /* When explicitly asked, we'll use this "long" entry format
2214 which can cope with arbitrary displacements. */
2215 static const bfd_vma elf32_arm_plt_entry_long
[] =
2217 0xe28fc200, /* add ip, pc, #0xN0000000 */
2218 0xe28cc600, /* add ip, ip, #0xNN00000 */
2219 0xe28cca00, /* add ip, ip, #0xNN000 */
2220 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2223 static bfd_boolean elf32_arm_use_long_plt_entry
= FALSE
;
2225 #endif /* not FOUR_WORD_PLT */
2227 /* The first entry in a procedure linkage table looks like this.
2228 It is set up so that any shared library function that is called before the
2229 relocation has been set up calls the dynamic linker first. */
2230 static const bfd_vma elf32_thumb2_plt0_entry
[] =
2232 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2233 an instruction maybe encoded to one or two array elements. */
2234 0xf8dfb500, /* push {lr} */
2235 0x44fee008, /* ldr.w lr, [pc, #8] */
2237 0xff08f85e, /* ldr.w pc, [lr, #8]! */
2238 0x00000000, /* &GOT[0] - . */
2241 /* Subsequent entries in a procedure linkage table for thumb only target
2243 static const bfd_vma elf32_thumb2_plt_entry
[] =
2245 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2246 an instruction maybe encoded to one or two array elements. */
2247 0x0c00f240, /* movw ip, #0xNNNN */
2248 0x0c00f2c0, /* movt ip, #0xNNNN */
2249 0xf8dc44fc, /* add ip, pc */
2250 0xbf00f000 /* ldr.w pc, [ip] */
2254 /* The format of the first entry in the procedure linkage table
2255 for a VxWorks executable. */
2256 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry
[] =
2258 0xe52dc008, /* str ip,[sp,#-8]! */
2259 0xe59fc000, /* ldr ip,[pc] */
2260 0xe59cf008, /* ldr pc,[ip,#8] */
2261 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
2264 /* The format of subsequent entries in a VxWorks executable. */
2265 static const bfd_vma elf32_arm_vxworks_exec_plt_entry
[] =
2267 0xe59fc000, /* ldr ip,[pc] */
2268 0xe59cf000, /* ldr pc,[ip] */
2269 0x00000000, /* .long @got */
2270 0xe59fc000, /* ldr ip,[pc] */
2271 0xea000000, /* b _PLT */
2272 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2275 /* The format of entries in a VxWorks shared library. */
2276 static const bfd_vma elf32_arm_vxworks_shared_plt_entry
[] =
2278 0xe59fc000, /* ldr ip,[pc] */
2279 0xe79cf009, /* ldr pc,[ip,r9] */
2280 0x00000000, /* .long @got */
2281 0xe59fc000, /* ldr ip,[pc] */
2282 0xe599f008, /* ldr pc,[r9,#8] */
2283 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2286 /* An initial stub used if the PLT entry is referenced from Thumb code. */
2287 #define PLT_THUMB_STUB_SIZE 4
2288 static const bfd_vma elf32_arm_plt_thumb_stub
[] =
2294 /* The entries in a PLT when using a DLL-based target with multiple
2296 static const bfd_vma elf32_arm_symbian_plt_entry
[] =
2298 0xe51ff004, /* ldr pc, [pc, #-4] */
2299 0x00000000, /* dcd R_ARM_GLOB_DAT(X) */
2302 /* The first entry in a procedure linkage table looks like
2303 this. It is set up so that any shared library function that is
2304 called before the relocation has been set up calls the dynamic
2306 static const bfd_vma elf32_arm_nacl_plt0_entry
[] =
2309 0xe300c000, /* movw ip, #:lower16:&GOT[2]-.+8 */
2310 0xe340c000, /* movt ip, #:upper16:&GOT[2]-.+8 */
2311 0xe08cc00f, /* add ip, ip, pc */
2312 0xe52dc008, /* str ip, [sp, #-8]! */
2313 /* Second bundle: */
2314 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2315 0xe59cc000, /* ldr ip, [ip] */
2316 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2317 0xe12fff1c, /* bx ip */
2319 0xe320f000, /* nop */
2320 0xe320f000, /* nop */
2321 0xe320f000, /* nop */
2323 0xe50dc004, /* str ip, [sp, #-4] */
2324 /* Fourth bundle: */
2325 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2326 0xe59cc000, /* ldr ip, [ip] */
2327 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2328 0xe12fff1c, /* bx ip */
2330 #define ARM_NACL_PLT_TAIL_OFFSET (11 * 4)
2332 /* Subsequent entries in a procedure linkage table look like this. */
2333 static const bfd_vma elf32_arm_nacl_plt_entry
[] =
2335 0xe300c000, /* movw ip, #:lower16:&GOT[n]-.+8 */
2336 0xe340c000, /* movt ip, #:upper16:&GOT[n]-.+8 */
2337 0xe08cc00f, /* add ip, ip, pc */
2338 0xea000000, /* b .Lplt_tail */
2341 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2342 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2343 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2344 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2345 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2346 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2347 #define THM2_MAX_FWD_COND_BRANCH_OFFSET (((1 << 20) -2) + 4)
2348 #define THM2_MAX_BWD_COND_BRANCH_OFFSET (-(1 << 20) + 4)
2358 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2359 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2360 is inserted in arm_build_one_stub(). */
2361 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2362 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2363 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2364 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2365 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2366 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2371 enum stub_insn_type type
;
2372 unsigned int r_type
;
2376 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2377 to reach the stub if necessary. */
2378 static const insn_sequence elf32_arm_stub_long_branch_any_any
[] =
2380 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2381 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(X) */
2384 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2386 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb
[] =
2388 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2389 ARM_INSN (0xe12fff1c), /* bx ip */
2390 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(X) */
2393 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2394 static const insn_sequence elf32_arm_stub_long_branch_thumb_only
[] =
2396 THUMB16_INSN (0xb401), /* push {r0} */
2397 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2398 THUMB16_INSN (0x4684), /* mov ip, r0 */
2399 THUMB16_INSN (0xbc01), /* pop {r0} */
2400 THUMB16_INSN (0x4760), /* bx ip */
2401 THUMB16_INSN (0xbf00), /* nop */
2402 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(X) */
2405 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2407 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb
[] =
2409 THUMB16_INSN (0x4778), /* bx pc */
2410 THUMB16_INSN (0x46c0), /* nop */
2411 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2412 ARM_INSN (0xe12fff1c), /* bx ip */
2413 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(X) */
2416 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2418 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm
[] =
2420 THUMB16_INSN (0x4778), /* bx pc */
2421 THUMB16_INSN (0x46c0), /* nop */
2422 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2423 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(X) */
2426 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2427 one, when the destination is close enough. */
2428 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm
[] =
2430 THUMB16_INSN (0x4778), /* bx pc */
2431 THUMB16_INSN (0x46c0), /* nop */
2432 ARM_REL_INSN (0xea000000, -8), /* b (X-8) */
2435 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2436 blx to reach the stub if necessary. */
2437 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic
[] =
2439 ARM_INSN (0xe59fc000), /* ldr ip, [pc] */
2440 ARM_INSN (0xe08ff00c), /* add pc, pc, ip */
2441 DATA_WORD (0, R_ARM_REL32
, -4), /* dcd R_ARM_REL32(X-4) */
2444 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2445 blx to reach the stub if necessary. We can not add into pc;
2446 it is not guaranteed to mode switch (different in ARMv6 and
2448 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic
[] =
2450 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2451 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2452 ARM_INSN (0xe12fff1c), /* bx ip */
2453 DATA_WORD (0, R_ARM_REL32
, 0), /* dcd R_ARM_REL32(X) */
2456 /* V4T ARM -> ARM long branch stub, PIC. */
2457 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic
[] =
2459 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2460 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2461 ARM_INSN (0xe12fff1c), /* bx ip */
2462 DATA_WORD (0, R_ARM_REL32
, 0), /* dcd R_ARM_REL32(X) */
2465 /* V4T Thumb -> ARM long branch stub, PIC. */
2466 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic
[] =
2468 THUMB16_INSN (0x4778), /* bx pc */
2469 THUMB16_INSN (0x46c0), /* nop */
2470 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2471 ARM_INSN (0xe08cf00f), /* add pc, ip, pc */
2472 DATA_WORD (0, R_ARM_REL32
, -4), /* dcd R_ARM_REL32(X) */
2475 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2477 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic
[] =
2479 THUMB16_INSN (0xb401), /* push {r0} */
2480 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2481 THUMB16_INSN (0x46fc), /* mov ip, pc */
2482 THUMB16_INSN (0x4484), /* add ip, r0 */
2483 THUMB16_INSN (0xbc01), /* pop {r0} */
2484 THUMB16_INSN (0x4760), /* bx ip */
2485 DATA_WORD (0, R_ARM_REL32
, 4), /* dcd R_ARM_REL32(X) */
2488 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2490 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic
[] =
2492 THUMB16_INSN (0x4778), /* bx pc */
2493 THUMB16_INSN (0x46c0), /* nop */
2494 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2495 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2496 ARM_INSN (0xe12fff1c), /* bx ip */
2497 DATA_WORD (0, R_ARM_REL32
, 0), /* dcd R_ARM_REL32(X) */
2500 /* Thumb2/ARM -> TLS trampoline. Lowest common denominator, which is a
2501 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2502 static const insn_sequence elf32_arm_stub_long_branch_any_tls_pic
[] =
2504 ARM_INSN (0xe59f1000), /* ldr r1, [pc] */
2505 ARM_INSN (0xe08ff001), /* add pc, pc, r1 */
2506 DATA_WORD (0, R_ARM_REL32
, -4), /* dcd R_ARM_REL32(X-4) */
2509 /* V4T Thumb -> TLS trampoline. lowest common denominator, which is a
2510 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2511 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_tls_pic
[] =
2513 THUMB16_INSN (0x4778), /* bx pc */
2514 THUMB16_INSN (0x46c0), /* nop */
2515 ARM_INSN (0xe59f1000), /* ldr r1, [pc, #0] */
2516 ARM_INSN (0xe081f00f), /* add pc, r1, pc */
2517 DATA_WORD (0, R_ARM_REL32
, -4), /* dcd R_ARM_REL32(X) */
2520 /* NaCl ARM -> ARM long branch stub. */
2521 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl
[] =
2523 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2524 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2525 ARM_INSN (0xe12fff1c), /* bx ip */
2526 ARM_INSN (0xe320f000), /* nop */
2527 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2528 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(X) */
2529 DATA_WORD (0, R_ARM_NONE
, 0), /* .word 0 */
2530 DATA_WORD (0, R_ARM_NONE
, 0), /* .word 0 */
2533 /* NaCl ARM -> ARM long branch stub, PIC. */
2534 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl_pic
[] =
2536 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2537 ARM_INSN (0xe08cc00f), /* add ip, ip, pc */
2538 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2539 ARM_INSN (0xe12fff1c), /* bx ip */
2540 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2541 DATA_WORD (0, R_ARM_REL32
, 8), /* dcd R_ARM_REL32(X+8) */
2542 DATA_WORD (0, R_ARM_NONE
, 0), /* .word 0 */
2543 DATA_WORD (0, R_ARM_NONE
, 0), /* .word 0 */
2547 /* Cortex-A8 erratum-workaround stubs. */
2549 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2550 can't use a conditional branch to reach this stub). */
2552 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond
[] =
2554 THUMB16_BCOND_INSN (0xd001), /* b<cond>.n true. */
2555 THUMB32_B_INSN (0xf000b800, -4), /* b.w insn_after_original_branch. */
2556 THUMB32_B_INSN (0xf000b800, -4) /* true: b.w original_branch_dest. */
2559 /* Stub used for b.w and bl.w instructions. */
2561 static const insn_sequence elf32_arm_stub_a8_veneer_b
[] =
2563 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2566 static const insn_sequence elf32_arm_stub_a8_veneer_bl
[] =
2568 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2571 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2572 instruction (which switches to ARM mode) to point to this stub. Jump to the
2573 real destination using an ARM-mode branch. */
2575 static const insn_sequence elf32_arm_stub_a8_veneer_blx
[] =
2577 ARM_REL_INSN (0xea000000, -8) /* b original_branch_dest. */
2580 /* For each section group there can be a specially created linker section
2581 to hold the stubs for that group. The name of the stub section is based
2582 upon the name of another section within that group with the suffix below
2585 PR 13049: STUB_SUFFIX used to be ".stub", but this allowed the user to
2586 create what appeared to be a linker stub section when it actually
2587 contained user code/data. For example, consider this fragment:
2589 const char * stubborn_problems[] = { "np" };
2591 If this is compiled with "-fPIC -fdata-sections" then gcc produces a
2594 .data.rel.local.stubborn_problems
2596 This then causes problems in arm32_arm_build_stubs() as it triggers:
2598 // Ignore non-stub sections.
2599 if (!strstr (stub_sec->name, STUB_SUFFIX))
2602 And so the section would be ignored instead of being processed. Hence
2603 the change in definition of STUB_SUFFIX to a name that cannot be a valid
2605 #define STUB_SUFFIX ".__stub"
2607 /* One entry per long/short branch stub defined above. */
2609 DEF_STUB(long_branch_any_any) \
2610 DEF_STUB(long_branch_v4t_arm_thumb) \
2611 DEF_STUB(long_branch_thumb_only) \
2612 DEF_STUB(long_branch_v4t_thumb_thumb) \
2613 DEF_STUB(long_branch_v4t_thumb_arm) \
2614 DEF_STUB(short_branch_v4t_thumb_arm) \
2615 DEF_STUB(long_branch_any_arm_pic) \
2616 DEF_STUB(long_branch_any_thumb_pic) \
2617 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2618 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2619 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2620 DEF_STUB(long_branch_thumb_only_pic) \
2621 DEF_STUB(long_branch_any_tls_pic) \
2622 DEF_STUB(long_branch_v4t_thumb_tls_pic) \
2623 DEF_STUB(long_branch_arm_nacl) \
2624 DEF_STUB(long_branch_arm_nacl_pic) \
2625 DEF_STUB(a8_veneer_b_cond) \
2626 DEF_STUB(a8_veneer_b) \
2627 DEF_STUB(a8_veneer_bl) \
2628 DEF_STUB(a8_veneer_blx)
2630 #define DEF_STUB(x) arm_stub_##x,
2631 enum elf32_arm_stub_type
2639 /* Note the first a8_veneer type. */
2640 const unsigned arm_stub_a8_veneer_lwm
= arm_stub_a8_veneer_b_cond
;
2644 const insn_sequence
* template_sequence
;
2648 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2649 static const stub_def stub_definitions
[] =
2655 struct elf32_arm_stub_hash_entry
2657 /* Base hash table entry structure. */
2658 struct bfd_hash_entry root
;
2660 /* The stub section. */
2663 /* Offset within stub_sec of the beginning of this stub. */
2664 bfd_vma stub_offset
;
2666 /* Given the symbol's value and its section we can determine its final
2667 value when building the stubs (so the stub knows where to jump). */
2668 bfd_vma target_value
;
2669 asection
*target_section
;
2671 /* Same as above but for the source of the branch to the stub. Used for
2672 Cortex-A8 erratum workaround to patch it to branch to the stub. As
2673 such, source section does not need to be recorded since Cortex-A8 erratum
2674 workaround stubs are only generated when both source and target are in the
2676 bfd_vma source_value
;
2678 /* The instruction which caused this stub to be generated (only valid for
2679 Cortex-A8 erratum workaround stubs at present). */
2680 unsigned long orig_insn
;
2682 /* The stub type. */
2683 enum elf32_arm_stub_type stub_type
;
2684 /* Its encoding size in bytes. */
2687 const insn_sequence
*stub_template
;
2688 /* The size of the template (number of entries). */
2689 int stub_template_size
;
2691 /* The symbol table entry, if any, that this was derived from. */
2692 struct elf32_arm_link_hash_entry
*h
;
2694 /* Type of branch. */
2695 enum arm_st_branch_type branch_type
;
2697 /* Where this stub is being called from, or, in the case of combined
2698 stub sections, the first input section in the group. */
2701 /* The name for the local symbol at the start of this stub. The
2702 stub name in the hash table has to be unique; this does not, so
2703 it can be friendlier. */
2707 /* Used to build a map of a section. This is required for mixed-endian
2710 typedef struct elf32_elf_section_map
2715 elf32_arm_section_map
;
2717 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2721 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER
,
2722 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER
,
2723 VFP11_ERRATUM_ARM_VENEER
,
2724 VFP11_ERRATUM_THUMB_VENEER
2726 elf32_vfp11_erratum_type
;
2728 typedef struct elf32_vfp11_erratum_list
2730 struct elf32_vfp11_erratum_list
*next
;
2736 struct elf32_vfp11_erratum_list
*veneer
;
2737 unsigned int vfp_insn
;
2741 struct elf32_vfp11_erratum_list
*branch
;
2745 elf32_vfp11_erratum_type type
;
2747 elf32_vfp11_erratum_list
;
2749 /* Information about a STM32L4XX erratum veneer, or a branch to such a
2753 STM32L4XX_ERRATUM_BRANCH_TO_VENEER
,
2754 STM32L4XX_ERRATUM_VENEER
2756 elf32_stm32l4xx_erratum_type
;
2758 typedef struct elf32_stm32l4xx_erratum_list
2760 struct elf32_stm32l4xx_erratum_list
*next
;
2766 struct elf32_stm32l4xx_erratum_list
*veneer
;
2771 struct elf32_stm32l4xx_erratum_list
*branch
;
2775 elf32_stm32l4xx_erratum_type type
;
2777 elf32_stm32l4xx_erratum_list
;
2782 INSERT_EXIDX_CANTUNWIND_AT_END
2784 arm_unwind_edit_type
;
2786 /* A (sorted) list of edits to apply to an unwind table. */
2787 typedef struct arm_unwind_table_edit
2789 arm_unwind_edit_type type
;
2790 /* Note: we sometimes want to insert an unwind entry corresponding to a
2791 section different from the one we're currently writing out, so record the
2792 (text) section this edit relates to here. */
2793 asection
*linked_section
;
2795 struct arm_unwind_table_edit
*next
;
2797 arm_unwind_table_edit
;
2799 typedef struct _arm_elf_section_data
2801 /* Information about mapping symbols. */
2802 struct bfd_elf_section_data elf
;
2803 unsigned int mapcount
;
2804 unsigned int mapsize
;
2805 elf32_arm_section_map
*map
;
2806 /* Information about CPU errata. */
2807 unsigned int erratumcount
;
2808 elf32_vfp11_erratum_list
*erratumlist
;
2809 unsigned int stm32l4xx_erratumcount
;
2810 elf32_stm32l4xx_erratum_list
*stm32l4xx_erratumlist
;
2811 unsigned int additional_reloc_count
;
2812 /* Information about unwind tables. */
2815 /* Unwind info attached to a text section. */
2818 asection
*arm_exidx_sec
;
2821 /* Unwind info attached to an .ARM.exidx section. */
2824 arm_unwind_table_edit
*unwind_edit_list
;
2825 arm_unwind_table_edit
*unwind_edit_tail
;
2829 _arm_elf_section_data
;
2831 #define elf32_arm_section_data(sec) \
2832 ((_arm_elf_section_data *) elf_section_data (sec))
2834 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
2835 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
2836 so may be created multiple times: we use an array of these entries whilst
2837 relaxing which we can refresh easily, then create stubs for each potentially
2838 erratum-triggering instruction once we've settled on a solution. */
2840 struct a8_erratum_fix
2845 bfd_vma target_offset
;
2846 unsigned long orig_insn
;
2848 enum elf32_arm_stub_type stub_type
;
2849 enum arm_st_branch_type branch_type
;
2852 /* A table of relocs applied to branches which might trigger Cortex-A8
2855 struct a8_erratum_reloc
2858 bfd_vma destination
;
2859 struct elf32_arm_link_hash_entry
*hash
;
2860 const char *sym_name
;
2861 unsigned int r_type
;
2862 enum arm_st_branch_type branch_type
;
2863 bfd_boolean non_a8_stub
;
2866 /* The size of the thread control block. */
2869 /* ARM-specific information about a PLT entry, over and above the usual
2873 /* We reference count Thumb references to a PLT entry separately,
2874 so that we can emit the Thumb trampoline only if needed. */
2875 bfd_signed_vma thumb_refcount
;
2877 /* Some references from Thumb code may be eliminated by BL->BLX
2878 conversion, so record them separately. */
2879 bfd_signed_vma maybe_thumb_refcount
;
2881 /* How many of the recorded PLT accesses were from non-call relocations.
2882 This information is useful when deciding whether anything takes the
2883 address of an STT_GNU_IFUNC PLT. A value of 0 means that all
2884 non-call references to the function should resolve directly to the
2885 real runtime target. */
2886 unsigned int noncall_refcount
;
2888 /* Since PLT entries have variable size if the Thumb prologue is
2889 used, we need to record the index into .got.plt instead of
2890 recomputing it from the PLT offset. */
2891 bfd_signed_vma got_offset
;
2894 /* Information about an .iplt entry for a local STT_GNU_IFUNC symbol. */
2895 struct arm_local_iplt_info
2897 /* The information that is usually found in the generic ELF part of
2898 the hash table entry. */
2899 union gotplt_union root
;
2901 /* The information that is usually found in the ARM-specific part of
2902 the hash table entry. */
2903 struct arm_plt_info arm
;
2905 /* A list of all potential dynamic relocations against this symbol. */
2906 struct elf_dyn_relocs
*dyn_relocs
;
2909 struct elf_arm_obj_tdata
2911 struct elf_obj_tdata root
;
2913 /* tls_type for each local got entry. */
2914 char *local_got_tls_type
;
2916 /* GOTPLT entries for TLS descriptors. */
2917 bfd_vma
*local_tlsdesc_gotent
;
2919 /* Information for local symbols that need entries in .iplt. */
2920 struct arm_local_iplt_info
**local_iplt
;
2922 /* Zero to warn when linking objects with incompatible enum sizes. */
2923 int no_enum_size_warning
;
2925 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
2926 int no_wchar_size_warning
;
2929 #define elf_arm_tdata(bfd) \
2930 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
2932 #define elf32_arm_local_got_tls_type(bfd) \
2933 (elf_arm_tdata (bfd)->local_got_tls_type)
2935 #define elf32_arm_local_tlsdesc_gotent(bfd) \
2936 (elf_arm_tdata (bfd)->local_tlsdesc_gotent)
2938 #define elf32_arm_local_iplt(bfd) \
2939 (elf_arm_tdata (bfd)->local_iplt)
2941 #define is_arm_elf(bfd) \
2942 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
2943 && elf_tdata (bfd) != NULL \
2944 && elf_object_id (bfd) == ARM_ELF_DATA)
2947 elf32_arm_mkobject (bfd
*abfd
)
2949 return bfd_elf_allocate_object (abfd
, sizeof (struct elf_arm_obj_tdata
),
2953 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
2955 /* Arm ELF linker hash entry. */
2956 struct elf32_arm_link_hash_entry
2958 struct elf_link_hash_entry root
;
2960 /* Track dynamic relocs copied for this symbol. */
2961 struct elf_dyn_relocs
*dyn_relocs
;
2963 /* ARM-specific PLT information. */
2964 struct arm_plt_info plt
;
2966 #define GOT_UNKNOWN 0
2967 #define GOT_NORMAL 1
2968 #define GOT_TLS_GD 2
2969 #define GOT_TLS_IE 4
2970 #define GOT_TLS_GDESC 8
2971 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLS_GDESC))
2972 unsigned int tls_type
: 8;
2974 /* True if the symbol's PLT entry is in .iplt rather than .plt. */
2975 unsigned int is_iplt
: 1;
2977 unsigned int unused
: 23;
2979 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
2980 starting at the end of the jump table. */
2981 bfd_vma tlsdesc_got
;
2983 /* The symbol marking the real symbol location for exported thumb
2984 symbols with Arm stubs. */
2985 struct elf_link_hash_entry
*export_glue
;
2987 /* A pointer to the most recently used stub hash entry against this
2989 struct elf32_arm_stub_hash_entry
*stub_cache
;
2992 /* Traverse an arm ELF linker hash table. */
2993 #define elf32_arm_link_hash_traverse(table, func, info) \
2994 (elf_link_hash_traverse \
2996 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
2999 /* Get the ARM elf linker hash table from a link_info structure. */
3000 #define elf32_arm_hash_table(info) \
3001 (elf_hash_table_id ((struct elf_link_hash_table *) ((info)->hash)) \
3002 == ARM_ELF_DATA ? ((struct elf32_arm_link_hash_table *) ((info)->hash)) : NULL)
3004 #define arm_stub_hash_lookup(table, string, create, copy) \
3005 ((struct elf32_arm_stub_hash_entry *) \
3006 bfd_hash_lookup ((table), (string), (create), (copy)))
3008 /* Array to keep track of which stub sections have been created, and
3009 information on stub grouping. */
3012 /* This is the section to which stubs in the group will be
3015 /* The stub section. */
3019 #define elf32_arm_compute_jump_table_size(htab) \
3020 ((htab)->next_tls_desc_index * 4)
3022 /* ARM ELF linker hash table. */
3023 struct elf32_arm_link_hash_table
3025 /* The main hash table. */
3026 struct elf_link_hash_table root
;
3028 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
3029 bfd_size_type thumb_glue_size
;
3031 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
3032 bfd_size_type arm_glue_size
;
3034 /* The size in bytes of section containing the ARMv4 BX veneers. */
3035 bfd_size_type bx_glue_size
;
3037 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
3038 veneer has been populated. */
3039 bfd_vma bx_glue_offset
[15];
3041 /* The size in bytes of the section containing glue for VFP11 erratum
3043 bfd_size_type vfp11_erratum_glue_size
;
3045 /* The size in bytes of the section containing glue for STM32L4XX erratum
3047 bfd_size_type stm32l4xx_erratum_glue_size
;
3049 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
3050 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
3051 elf32_arm_write_section(). */
3052 struct a8_erratum_fix
*a8_erratum_fixes
;
3053 unsigned int num_a8_erratum_fixes
;
3055 /* An arbitrary input BFD chosen to hold the glue sections. */
3056 bfd
* bfd_of_glue_owner
;
3058 /* Nonzero to output a BE8 image. */
3061 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
3062 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
3065 /* The relocation to use for R_ARM_TARGET2 relocations. */
3068 /* 0 = Ignore R_ARM_V4BX.
3069 1 = Convert BX to MOV PC.
3070 2 = Generate v4 interworing stubs. */
3073 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
3076 /* Whether we should fix the ARM1176 BLX immediate issue. */
3079 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
3082 /* What sort of code sequences we should look for which may trigger the
3083 VFP11 denorm erratum. */
3084 bfd_arm_vfp11_fix vfp11_fix
;
3086 /* Global counter for the number of fixes we have emitted. */
3087 int num_vfp11_fixes
;
3089 /* What sort of code sequences we should look for which may trigger the
3090 STM32L4XX erratum. */
3091 bfd_arm_stm32l4xx_fix stm32l4xx_fix
;
3093 /* Global counter for the number of fixes we have emitted. */
3094 int num_stm32l4xx_fixes
;
3096 /* Nonzero to force PIC branch veneers. */
3099 /* The number of bytes in the initial entry in the PLT. */
3100 bfd_size_type plt_header_size
;
3102 /* The number of bytes in the subsequent PLT etries. */
3103 bfd_size_type plt_entry_size
;
3105 /* True if the target system is VxWorks. */
3108 /* True if the target system is Symbian OS. */
3111 /* True if the target system is Native Client. */
3114 /* True if the target uses REL relocations. */
3117 /* The index of the next unused R_ARM_TLS_DESC slot in .rel.plt. */
3118 bfd_vma next_tls_desc_index
;
3120 /* How many R_ARM_TLS_DESC relocations were generated so far. */
3121 bfd_vma num_tls_desc
;
3123 /* Short-cuts to get to dynamic linker sections. */
3127 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
3130 /* The offset into splt of the PLT entry for the TLS descriptor
3131 resolver. Special values are 0, if not necessary (or not found
3132 to be necessary yet), and -1 if needed but not determined
3134 bfd_vma dt_tlsdesc_plt
;
3136 /* The offset into sgot of the GOT entry used by the PLT entry
3138 bfd_vma dt_tlsdesc_got
;
3140 /* Offset in .plt section of tls_arm_trampoline. */
3141 bfd_vma tls_trampoline
;
3143 /* Data for R_ARM_TLS_LDM32 relocations. */
3146 bfd_signed_vma refcount
;
3150 /* Small local sym cache. */
3151 struct sym_cache sym_cache
;
3153 /* For convenience in allocate_dynrelocs. */
3156 /* The amount of space used by the reserved portion of the sgotplt
3157 section, plus whatever space is used by the jump slots. */
3158 bfd_vma sgotplt_jump_table_size
;
3160 /* The stub hash table. */
3161 struct bfd_hash_table stub_hash_table
;
3163 /* Linker stub bfd. */
3166 /* Linker call-backs. */
3167 asection
* (*add_stub_section
) (const char *, asection
*, asection
*,
3169 void (*layout_sections_again
) (void);
3171 /* Array to keep track of which stub sections have been created, and
3172 information on stub grouping. */
3173 struct map_stub
*stub_group
;
3175 /* Number of elements in stub_group. */
3176 unsigned int top_id
;
3178 /* Assorted information used by elf32_arm_size_stubs. */
3179 unsigned int bfd_count
;
3180 unsigned int top_index
;
3181 asection
**input_list
;
3185 ctz (unsigned int mask
)
3187 #if GCC_VERSION >= 3004
3188 return __builtin_ctz (mask
);
3192 for (i
= 0; i
< 8 * sizeof (mask
); i
++)
3203 popcount (unsigned int mask
)
3205 #if GCC_VERSION >= 3004
3206 return __builtin_popcount (mask
);
3208 unsigned int i
, sum
= 0;
3210 for (i
= 0; i
< 8 * sizeof (mask
); i
++)
3220 /* Create an entry in an ARM ELF linker hash table. */
3222 static struct bfd_hash_entry
*
3223 elf32_arm_link_hash_newfunc (struct bfd_hash_entry
* entry
,
3224 struct bfd_hash_table
* table
,
3225 const char * string
)
3227 struct elf32_arm_link_hash_entry
* ret
=
3228 (struct elf32_arm_link_hash_entry
*) entry
;
3230 /* Allocate the structure if it has not already been allocated by a
3233 ret
= (struct elf32_arm_link_hash_entry
*)
3234 bfd_hash_allocate (table
, sizeof (struct elf32_arm_link_hash_entry
));
3236 return (struct bfd_hash_entry
*) ret
;
3238 /* Call the allocation method of the superclass. */
3239 ret
= ((struct elf32_arm_link_hash_entry
*)
3240 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry
*) ret
,
3244 ret
->dyn_relocs
= NULL
;
3245 ret
->tls_type
= GOT_UNKNOWN
;
3246 ret
->tlsdesc_got
= (bfd_vma
) -1;
3247 ret
->plt
.thumb_refcount
= 0;
3248 ret
->plt
.maybe_thumb_refcount
= 0;
3249 ret
->plt
.noncall_refcount
= 0;
3250 ret
->plt
.got_offset
= -1;
3251 ret
->is_iplt
= FALSE
;
3252 ret
->export_glue
= NULL
;
3254 ret
->stub_cache
= NULL
;
3257 return (struct bfd_hash_entry
*) ret
;
3260 /* Ensure that we have allocated bookkeeping structures for ABFD's local
3264 elf32_arm_allocate_local_sym_info (bfd
*abfd
)
3266 if (elf_local_got_refcounts (abfd
) == NULL
)
3268 bfd_size_type num_syms
;
3272 num_syms
= elf_tdata (abfd
)->symtab_hdr
.sh_info
;
3273 size
= num_syms
* (sizeof (bfd_signed_vma
)
3274 + sizeof (struct arm_local_iplt_info
*)
3277 data
= bfd_zalloc (abfd
, size
);
3281 elf_local_got_refcounts (abfd
) = (bfd_signed_vma
*) data
;
3282 data
+= num_syms
* sizeof (bfd_signed_vma
);
3284 elf32_arm_local_iplt (abfd
) = (struct arm_local_iplt_info
**) data
;
3285 data
+= num_syms
* sizeof (struct arm_local_iplt_info
*);
3287 elf32_arm_local_tlsdesc_gotent (abfd
) = (bfd_vma
*) data
;
3288 data
+= num_syms
* sizeof (bfd_vma
);
3290 elf32_arm_local_got_tls_type (abfd
) = data
;
3295 /* Return the .iplt information for local symbol R_SYMNDX, which belongs
3296 to input bfd ABFD. Create the information if it doesn't already exist.
3297 Return null if an allocation fails. */
3299 static struct arm_local_iplt_info
*
3300 elf32_arm_create_local_iplt (bfd
*abfd
, unsigned long r_symndx
)
3302 struct arm_local_iplt_info
**ptr
;
3304 if (!elf32_arm_allocate_local_sym_info (abfd
))
3307 BFD_ASSERT (r_symndx
< elf_tdata (abfd
)->symtab_hdr
.sh_info
);
3308 ptr
= &elf32_arm_local_iplt (abfd
)[r_symndx
];
3310 *ptr
= bfd_zalloc (abfd
, sizeof (**ptr
));
3314 /* Try to obtain PLT information for the symbol with index R_SYMNDX
3315 in ABFD's symbol table. If the symbol is global, H points to its
3316 hash table entry, otherwise H is null.
3318 Return true if the symbol does have PLT information. When returning
3319 true, point *ROOT_PLT at the target-independent reference count/offset
3320 union and *ARM_PLT at the ARM-specific information. */
3323 elf32_arm_get_plt_info (bfd
*abfd
, struct elf32_arm_link_hash_entry
*h
,
3324 unsigned long r_symndx
, union gotplt_union
**root_plt
,
3325 struct arm_plt_info
**arm_plt
)
3327 struct arm_local_iplt_info
*local_iplt
;
3331 *root_plt
= &h
->root
.plt
;
3336 if (elf32_arm_local_iplt (abfd
) == NULL
)
3339 local_iplt
= elf32_arm_local_iplt (abfd
)[r_symndx
];
3340 if (local_iplt
== NULL
)
3343 *root_plt
= &local_iplt
->root
;
3344 *arm_plt
= &local_iplt
->arm
;
3348 /* Return true if the PLT described by ARM_PLT requires a Thumb stub
3352 elf32_arm_plt_needs_thumb_stub_p (struct bfd_link_info
*info
,
3353 struct arm_plt_info
*arm_plt
)
3355 struct elf32_arm_link_hash_table
*htab
;
3357 htab
= elf32_arm_hash_table (info
);
3358 return (arm_plt
->thumb_refcount
!= 0
3359 || (!htab
->use_blx
&& arm_plt
->maybe_thumb_refcount
!= 0));
3362 /* Return a pointer to the head of the dynamic reloc list that should
3363 be used for local symbol ISYM, which is symbol number R_SYMNDX in
3364 ABFD's symbol table. Return null if an error occurs. */
3366 static struct elf_dyn_relocs
**
3367 elf32_arm_get_local_dynreloc_list (bfd
*abfd
, unsigned long r_symndx
,
3368 Elf_Internal_Sym
*isym
)
3370 if (ELF32_ST_TYPE (isym
->st_info
) == STT_GNU_IFUNC
)
3372 struct arm_local_iplt_info
*local_iplt
;
3374 local_iplt
= elf32_arm_create_local_iplt (abfd
, r_symndx
);
3375 if (local_iplt
== NULL
)
3377 return &local_iplt
->dyn_relocs
;
3381 /* Track dynamic relocs needed for local syms too.
3382 We really need local syms available to do this
3387 s
= bfd_section_from_elf_index (abfd
, isym
->st_shndx
);
3391 vpp
= &elf_section_data (s
)->local_dynrel
;
3392 return (struct elf_dyn_relocs
**) vpp
;
3396 /* Initialize an entry in the stub hash table. */
3398 static struct bfd_hash_entry
*
3399 stub_hash_newfunc (struct bfd_hash_entry
*entry
,
3400 struct bfd_hash_table
*table
,
3403 /* Allocate the structure if it has not already been allocated by a
3407 entry
= (struct bfd_hash_entry
*)
3408 bfd_hash_allocate (table
, sizeof (struct elf32_arm_stub_hash_entry
));
3413 /* Call the allocation method of the superclass. */
3414 entry
= bfd_hash_newfunc (entry
, table
, string
);
3417 struct elf32_arm_stub_hash_entry
*eh
;
3419 /* Initialize the local fields. */
3420 eh
= (struct elf32_arm_stub_hash_entry
*) entry
;
3421 eh
->stub_sec
= NULL
;
3422 eh
->stub_offset
= 0;
3423 eh
->source_value
= 0;
3424 eh
->target_value
= 0;
3425 eh
->target_section
= NULL
;
3427 eh
->stub_type
= arm_stub_none
;
3429 eh
->stub_template
= NULL
;
3430 eh
->stub_template_size
= 0;
3433 eh
->output_name
= NULL
;
3439 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
3440 shortcuts to them in our hash table. */
3443 create_got_section (bfd
*dynobj
, struct bfd_link_info
*info
)
3445 struct elf32_arm_link_hash_table
*htab
;
3447 htab
= elf32_arm_hash_table (info
);
3451 /* BPABI objects never have a GOT, or associated sections. */
3452 if (htab
->symbian_p
)
3455 if (! _bfd_elf_create_got_section (dynobj
, info
))
3461 /* Create the .iplt, .rel(a).iplt and .igot.plt sections. */
3464 create_ifunc_sections (struct bfd_link_info
*info
)
3466 struct elf32_arm_link_hash_table
*htab
;
3467 const struct elf_backend_data
*bed
;
3472 htab
= elf32_arm_hash_table (info
);
3473 dynobj
= htab
->root
.dynobj
;
3474 bed
= get_elf_backend_data (dynobj
);
3475 flags
= bed
->dynamic_sec_flags
;
3477 if (htab
->root
.iplt
== NULL
)
3479 s
= bfd_make_section_anyway_with_flags (dynobj
, ".iplt",
3480 flags
| SEC_READONLY
| SEC_CODE
);
3482 || !bfd_set_section_alignment (dynobj
, s
, bed
->plt_alignment
))
3484 htab
->root
.iplt
= s
;
3487 if (htab
->root
.irelplt
== NULL
)
3489 s
= bfd_make_section_anyway_with_flags (dynobj
,
3490 RELOC_SECTION (htab
, ".iplt"),
3491 flags
| SEC_READONLY
);
3493 || !bfd_set_section_alignment (dynobj
, s
, bed
->s
->log_file_align
))
3495 htab
->root
.irelplt
= s
;
3498 if (htab
->root
.igotplt
== NULL
)
3500 s
= bfd_make_section_anyway_with_flags (dynobj
, ".igot.plt", flags
);
3502 || !bfd_set_section_alignment (dynobj
, s
, bed
->s
->log_file_align
))
3504 htab
->root
.igotplt
= s
;
3509 /* Determine if we're dealing with a Thumb only architecture. */
3512 using_thumb_only (struct elf32_arm_link_hash_table
*globals
)
3515 int profile
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
,
3516 Tag_CPU_arch_profile
);
3519 return profile
== 'M';
3521 arch
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
, Tag_CPU_arch
);
3523 if (arch
== TAG_CPU_ARCH_V6_M
3524 || arch
== TAG_CPU_ARCH_V6S_M
3525 || arch
== TAG_CPU_ARCH_V7E_M
3526 || arch
== TAG_CPU_ARCH_V8M_BASE
3527 || arch
== TAG_CPU_ARCH_V8M_MAIN
)
3533 /* Determine if we're dealing with a Thumb-2 object. */
3536 using_thumb2 (struct elf32_arm_link_hash_table
*globals
)
3538 int arch
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
,
3540 return arch
== TAG_CPU_ARCH_V6T2
|| arch
>= TAG_CPU_ARCH_V7
;
3543 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
3544 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
3548 elf32_arm_create_dynamic_sections (bfd
*dynobj
, struct bfd_link_info
*info
)
3550 struct elf32_arm_link_hash_table
*htab
;
3552 htab
= elf32_arm_hash_table (info
);
3556 if (!htab
->root
.sgot
&& !create_got_section (dynobj
, info
))
3559 if (!_bfd_elf_create_dynamic_sections (dynobj
, info
))
3562 htab
->sdynbss
= bfd_get_linker_section (dynobj
, ".dynbss");
3563 if (!bfd_link_pic (info
))
3564 htab
->srelbss
= bfd_get_linker_section (dynobj
,
3565 RELOC_SECTION (htab
, ".bss"));
3567 if (htab
->vxworks_p
)
3569 if (!elf_vxworks_create_dynamic_sections (dynobj
, info
, &htab
->srelplt2
))
3572 if (bfd_link_pic (info
))
3574 htab
->plt_header_size
= 0;
3575 htab
->plt_entry_size
3576 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry
);
3580 htab
->plt_header_size
3581 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry
);
3582 htab
->plt_entry_size
3583 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry
);
3586 if (elf_elfheader (dynobj
))
3587 elf_elfheader (dynobj
)->e_ident
[EI_CLASS
] = ELFCLASS32
;
3592 Test for thumb only architectures. Note - we cannot just call
3593 using_thumb_only() as the attributes in the output bfd have not been
3594 initialised at this point, so instead we use the input bfd. */
3595 bfd
* saved_obfd
= htab
->obfd
;
3597 htab
->obfd
= dynobj
;
3598 if (using_thumb_only (htab
))
3600 htab
->plt_header_size
= 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry
);
3601 htab
->plt_entry_size
= 4 * ARRAY_SIZE (elf32_thumb2_plt_entry
);
3603 htab
->obfd
= saved_obfd
;
3606 if (!htab
->root
.splt
3607 || !htab
->root
.srelplt
3609 || (!bfd_link_pic (info
) && !htab
->srelbss
))
3615 /* Copy the extra info we tack onto an elf_link_hash_entry. */
3618 elf32_arm_copy_indirect_symbol (struct bfd_link_info
*info
,
3619 struct elf_link_hash_entry
*dir
,
3620 struct elf_link_hash_entry
*ind
)
3622 struct elf32_arm_link_hash_entry
*edir
, *eind
;
3624 edir
= (struct elf32_arm_link_hash_entry
*) dir
;
3625 eind
= (struct elf32_arm_link_hash_entry
*) ind
;
3627 if (eind
->dyn_relocs
!= NULL
)
3629 if (edir
->dyn_relocs
!= NULL
)
3631 struct elf_dyn_relocs
**pp
;
3632 struct elf_dyn_relocs
*p
;
3634 /* Add reloc counts against the indirect sym to the direct sym
3635 list. Merge any entries against the same section. */
3636 for (pp
= &eind
->dyn_relocs
; (p
= *pp
) != NULL
; )
3638 struct elf_dyn_relocs
*q
;
3640 for (q
= edir
->dyn_relocs
; q
!= NULL
; q
= q
->next
)
3641 if (q
->sec
== p
->sec
)
3643 q
->pc_count
+= p
->pc_count
;
3644 q
->count
+= p
->count
;
3651 *pp
= edir
->dyn_relocs
;
3654 edir
->dyn_relocs
= eind
->dyn_relocs
;
3655 eind
->dyn_relocs
= NULL
;
3658 if (ind
->root
.type
== bfd_link_hash_indirect
)
3660 /* Copy over PLT info. */
3661 edir
->plt
.thumb_refcount
+= eind
->plt
.thumb_refcount
;
3662 eind
->plt
.thumb_refcount
= 0;
3663 edir
->plt
.maybe_thumb_refcount
+= eind
->plt
.maybe_thumb_refcount
;
3664 eind
->plt
.maybe_thumb_refcount
= 0;
3665 edir
->plt
.noncall_refcount
+= eind
->plt
.noncall_refcount
;
3666 eind
->plt
.noncall_refcount
= 0;
3668 /* We should only allocate a function to .iplt once the final
3669 symbol information is known. */
3670 BFD_ASSERT (!eind
->is_iplt
);
3672 if (dir
->got
.refcount
<= 0)
3674 edir
->tls_type
= eind
->tls_type
;
3675 eind
->tls_type
= GOT_UNKNOWN
;
3679 _bfd_elf_link_hash_copy_indirect (info
, dir
, ind
);
3682 /* Destroy an ARM elf linker hash table. */
3685 elf32_arm_link_hash_table_free (bfd
*obfd
)
3687 struct elf32_arm_link_hash_table
*ret
3688 = (struct elf32_arm_link_hash_table
*) obfd
->link
.hash
;
3690 bfd_hash_table_free (&ret
->stub_hash_table
);
3691 _bfd_elf_link_hash_table_free (obfd
);
3694 /* Create an ARM elf linker hash table. */
3696 static struct bfd_link_hash_table
*
3697 elf32_arm_link_hash_table_create (bfd
*abfd
)
3699 struct elf32_arm_link_hash_table
*ret
;
3700 bfd_size_type amt
= sizeof (struct elf32_arm_link_hash_table
);
3702 ret
= (struct elf32_arm_link_hash_table
*) bfd_zmalloc (amt
);
3706 if (!_bfd_elf_link_hash_table_init (& ret
->root
, abfd
,
3707 elf32_arm_link_hash_newfunc
,
3708 sizeof (struct elf32_arm_link_hash_entry
),
3715 ret
->vfp11_fix
= BFD_ARM_VFP11_FIX_NONE
;
3716 ret
->stm32l4xx_fix
= BFD_ARM_STM32L4XX_FIX_NONE
;
3717 #ifdef FOUR_WORD_PLT
3718 ret
->plt_header_size
= 16;
3719 ret
->plt_entry_size
= 16;
3721 ret
->plt_header_size
= 20;
3722 ret
->plt_entry_size
= elf32_arm_use_long_plt_entry
? 16 : 12;
3727 if (!bfd_hash_table_init (&ret
->stub_hash_table
, stub_hash_newfunc
,
3728 sizeof (struct elf32_arm_stub_hash_entry
)))
3730 _bfd_elf_link_hash_table_free (abfd
);
3733 ret
->root
.root
.hash_table_free
= elf32_arm_link_hash_table_free
;
3735 return &ret
->root
.root
;
3738 /* Determine what kind of NOPs are available. */
3741 arch_has_arm_nop (struct elf32_arm_link_hash_table
*globals
)
3743 const int arch
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
,
3745 return arch
== TAG_CPU_ARCH_V6T2
3746 || arch
== TAG_CPU_ARCH_V6K
3747 || arch
== TAG_CPU_ARCH_V7
3748 || arch
== TAG_CPU_ARCH_V7E_M
;
3752 arch_has_thumb2_nop (struct elf32_arm_link_hash_table
*globals
)
3754 const int arch
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
,
3756 return (arch
== TAG_CPU_ARCH_V6T2
|| arch
== TAG_CPU_ARCH_V7
3757 || arch
== TAG_CPU_ARCH_V7E_M
);
3761 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type
)
3765 case arm_stub_long_branch_thumb_only
:
3766 case arm_stub_long_branch_v4t_thumb_arm
:
3767 case arm_stub_short_branch_v4t_thumb_arm
:
3768 case arm_stub_long_branch_v4t_thumb_arm_pic
:
3769 case arm_stub_long_branch_v4t_thumb_tls_pic
:
3770 case arm_stub_long_branch_thumb_only_pic
:
3781 /* Determine the type of stub needed, if any, for a call. */
3783 static enum elf32_arm_stub_type
3784 arm_type_of_stub (struct bfd_link_info
*info
,
3785 asection
*input_sec
,
3786 const Elf_Internal_Rela
*rel
,
3787 unsigned char st_type
,
3788 enum arm_st_branch_type
*actual_branch_type
,
3789 struct elf32_arm_link_hash_entry
*hash
,
3790 bfd_vma destination
,
3796 bfd_signed_vma branch_offset
;
3797 unsigned int r_type
;
3798 struct elf32_arm_link_hash_table
* globals
;
3801 enum elf32_arm_stub_type stub_type
= arm_stub_none
;
3803 enum arm_st_branch_type branch_type
= *actual_branch_type
;
3804 union gotplt_union
*root_plt
;
3805 struct arm_plt_info
*arm_plt
;
3807 if (branch_type
== ST_BRANCH_LONG
)
3810 globals
= elf32_arm_hash_table (info
);
3811 if (globals
== NULL
)
3814 thumb_only
= using_thumb_only (globals
);
3816 thumb2
= using_thumb2 (globals
);
3818 /* Determine where the call point is. */
3819 location
= (input_sec
->output_offset
3820 + input_sec
->output_section
->vma
3823 r_type
= ELF32_R_TYPE (rel
->r_info
);
3825 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
3826 are considering a function call relocation. */
3827 if (thumb_only
&& (r_type
== R_ARM_THM_CALL
|| r_type
== R_ARM_THM_JUMP24
3828 || r_type
== R_ARM_THM_JUMP19
)
3829 && branch_type
== ST_BRANCH_TO_ARM
)
3830 branch_type
= ST_BRANCH_TO_THUMB
;
3832 /* For TLS call relocs, it is the caller's responsibility to provide
3833 the address of the appropriate trampoline. */
3834 if (r_type
!= R_ARM_TLS_CALL
3835 && r_type
!= R_ARM_THM_TLS_CALL
3836 && elf32_arm_get_plt_info (input_bfd
, hash
, ELF32_R_SYM (rel
->r_info
),
3837 &root_plt
, &arm_plt
)
3838 && root_plt
->offset
!= (bfd_vma
) -1)
3842 if (hash
== NULL
|| hash
->is_iplt
)
3843 splt
= globals
->root
.iplt
;
3845 splt
= globals
->root
.splt
;
3850 /* Note when dealing with PLT entries: the main PLT stub is in
3851 ARM mode, so if the branch is in Thumb mode, another
3852 Thumb->ARM stub will be inserted later just before the ARM
3853 PLT stub. We don't take this extra distance into account
3854 here, because if a long branch stub is needed, we'll add a
3855 Thumb->Arm one and branch directly to the ARM PLT entry
3856 because it avoids spreading offset corrections in several
3859 destination
= (splt
->output_section
->vma
3860 + splt
->output_offset
3861 + root_plt
->offset
);
3863 branch_type
= ST_BRANCH_TO_ARM
;
3866 /* Calls to STT_GNU_IFUNC symbols should go through a PLT. */
3867 BFD_ASSERT (st_type
!= STT_GNU_IFUNC
);
3869 branch_offset
= (bfd_signed_vma
)(destination
- location
);
3871 if (r_type
== R_ARM_THM_CALL
|| r_type
== R_ARM_THM_JUMP24
3872 || r_type
== R_ARM_THM_TLS_CALL
|| r_type
== R_ARM_THM_JUMP19
)
3874 /* Handle cases where:
3875 - this call goes too far (different Thumb/Thumb2 max
3877 - it's a Thumb->Arm call and blx is not available, or it's a
3878 Thumb->Arm branch (not bl). A stub is needed in this case,
3879 but only if this call is not through a PLT entry. Indeed,
3880 PLT stubs handle mode switching already.
3883 && (branch_offset
> THM_MAX_FWD_BRANCH_OFFSET
3884 || (branch_offset
< THM_MAX_BWD_BRANCH_OFFSET
)))
3886 && (branch_offset
> THM2_MAX_FWD_BRANCH_OFFSET
3887 || (branch_offset
< THM2_MAX_BWD_BRANCH_OFFSET
)))
3889 && (branch_offset
> THM2_MAX_FWD_COND_BRANCH_OFFSET
3890 || (branch_offset
< THM2_MAX_BWD_COND_BRANCH_OFFSET
))
3891 && (r_type
== R_ARM_THM_JUMP19
))
3892 || (branch_type
== ST_BRANCH_TO_ARM
3893 && (((r_type
== R_ARM_THM_CALL
3894 || r_type
== R_ARM_THM_TLS_CALL
) && !globals
->use_blx
)
3895 || (r_type
== R_ARM_THM_JUMP24
)
3896 || (r_type
== R_ARM_THM_JUMP19
))
3899 if (branch_type
== ST_BRANCH_TO_THUMB
)
3901 /* Thumb to thumb. */
3904 stub_type
= (bfd_link_pic (info
) | globals
->pic_veneer
)
3906 ? ((globals
->use_blx
3907 && (r_type
== R_ARM_THM_CALL
))
3908 /* V5T and above. Stub starts with ARM code, so
3909 we must be able to switch mode before
3910 reaching it, which is only possible for 'bl'
3911 (ie R_ARM_THM_CALL relocation). */
3912 ? arm_stub_long_branch_any_thumb_pic
3913 /* On V4T, use Thumb code only. */
3914 : arm_stub_long_branch_v4t_thumb_thumb_pic
)
3916 /* non-PIC stubs. */
3917 : ((globals
->use_blx
3918 && (r_type
== R_ARM_THM_CALL
))
3919 /* V5T and above. */
3920 ? arm_stub_long_branch_any_any
3922 : arm_stub_long_branch_v4t_thumb_thumb
);
3926 stub_type
= (bfd_link_pic (info
) | globals
->pic_veneer
)
3928 ? arm_stub_long_branch_thumb_only_pic
3930 : arm_stub_long_branch_thumb_only
;
3937 && sym_sec
->owner
!= NULL
3938 && !INTERWORK_FLAG (sym_sec
->owner
))
3940 (*_bfd_error_handler
)
3941 (_("%B(%s): warning: interworking not enabled.\n"
3942 " first occurrence: %B: Thumb call to ARM"),
3943 sym_sec
->owner
, input_bfd
, name
);
3947 (bfd_link_pic (info
) | globals
->pic_veneer
)
3949 ? (r_type
== R_ARM_THM_TLS_CALL
3950 /* TLS PIC stubs. */
3951 ? (globals
->use_blx
? arm_stub_long_branch_any_tls_pic
3952 : arm_stub_long_branch_v4t_thumb_tls_pic
)
3953 : ((globals
->use_blx
&& r_type
== R_ARM_THM_CALL
)
3954 /* V5T PIC and above. */
3955 ? arm_stub_long_branch_any_arm_pic
3957 : arm_stub_long_branch_v4t_thumb_arm_pic
))
3959 /* non-PIC stubs. */
3960 : ((globals
->use_blx
&& r_type
== R_ARM_THM_CALL
)
3961 /* V5T and above. */
3962 ? arm_stub_long_branch_any_any
3964 : arm_stub_long_branch_v4t_thumb_arm
);
3966 /* Handle v4t short branches. */
3967 if ((stub_type
== arm_stub_long_branch_v4t_thumb_arm
)
3968 && (branch_offset
<= THM_MAX_FWD_BRANCH_OFFSET
)
3969 && (branch_offset
>= THM_MAX_BWD_BRANCH_OFFSET
))
3970 stub_type
= arm_stub_short_branch_v4t_thumb_arm
;
3974 else if (r_type
== R_ARM_CALL
3975 || r_type
== R_ARM_JUMP24
3976 || r_type
== R_ARM_PLT32
3977 || r_type
== R_ARM_TLS_CALL
)
3979 if (branch_type
== ST_BRANCH_TO_THUMB
)
3984 && sym_sec
->owner
!= NULL
3985 && !INTERWORK_FLAG (sym_sec
->owner
))
3987 (*_bfd_error_handler
)
3988 (_("%B(%s): warning: interworking not enabled.\n"
3989 " first occurrence: %B: ARM call to Thumb"),
3990 sym_sec
->owner
, input_bfd
, name
);
3993 /* We have an extra 2-bytes reach because of
3994 the mode change (bit 24 (H) of BLX encoding). */
3995 if (branch_offset
> (ARM_MAX_FWD_BRANCH_OFFSET
+ 2)
3996 || (branch_offset
< ARM_MAX_BWD_BRANCH_OFFSET
)
3997 || (r_type
== R_ARM_CALL
&& !globals
->use_blx
)
3998 || (r_type
== R_ARM_JUMP24
)
3999 || (r_type
== R_ARM_PLT32
))
4001 stub_type
= (bfd_link_pic (info
) | globals
->pic_veneer
)
4003 ? ((globals
->use_blx
)
4004 /* V5T and above. */
4005 ? arm_stub_long_branch_any_thumb_pic
4007 : arm_stub_long_branch_v4t_arm_thumb_pic
)
4009 /* non-PIC stubs. */
4010 : ((globals
->use_blx
)
4011 /* V5T and above. */
4012 ? arm_stub_long_branch_any_any
4014 : arm_stub_long_branch_v4t_arm_thumb
);
4020 if (branch_offset
> ARM_MAX_FWD_BRANCH_OFFSET
4021 || (branch_offset
< ARM_MAX_BWD_BRANCH_OFFSET
))
4024 (bfd_link_pic (info
) | globals
->pic_veneer
)
4026 ? (r_type
== R_ARM_TLS_CALL
4028 ? arm_stub_long_branch_any_tls_pic
4030 ? arm_stub_long_branch_arm_nacl_pic
4031 : arm_stub_long_branch_any_arm_pic
))
4032 /* non-PIC stubs. */
4034 ? arm_stub_long_branch_arm_nacl
4035 : arm_stub_long_branch_any_any
);
4040 /* If a stub is needed, record the actual destination type. */
4041 if (stub_type
!= arm_stub_none
)
4042 *actual_branch_type
= branch_type
;
4047 /* Build a name for an entry in the stub hash table. */
4050 elf32_arm_stub_name (const asection
*input_section
,
4051 const asection
*sym_sec
,
4052 const struct elf32_arm_link_hash_entry
*hash
,
4053 const Elf_Internal_Rela
*rel
,
4054 enum elf32_arm_stub_type stub_type
)
4061 len
= 8 + 1 + strlen (hash
->root
.root
.root
.string
) + 1 + 8 + 1 + 2 + 1;
4062 stub_name
= (char *) bfd_malloc (len
);
4063 if (stub_name
!= NULL
)
4064 sprintf (stub_name
, "%08x_%s+%x_%d",
4065 input_section
->id
& 0xffffffff,
4066 hash
->root
.root
.root
.string
,
4067 (int) rel
->r_addend
& 0xffffffff,
4072 len
= 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1;
4073 stub_name
= (char *) bfd_malloc (len
);
4074 if (stub_name
!= NULL
)
4075 sprintf (stub_name
, "%08x_%x:%x+%x_%d",
4076 input_section
->id
& 0xffffffff,
4077 sym_sec
->id
& 0xffffffff,
4078 ELF32_R_TYPE (rel
->r_info
) == R_ARM_TLS_CALL
4079 || ELF32_R_TYPE (rel
->r_info
) == R_ARM_THM_TLS_CALL
4080 ? 0 : (int) ELF32_R_SYM (rel
->r_info
) & 0xffffffff,
4081 (int) rel
->r_addend
& 0xffffffff,
4088 /* Look up an entry in the stub hash. Stub entries are cached because
4089 creating the stub name takes a bit of time. */
4091 static struct elf32_arm_stub_hash_entry
*
4092 elf32_arm_get_stub_entry (const asection
*input_section
,
4093 const asection
*sym_sec
,
4094 struct elf_link_hash_entry
*hash
,
4095 const Elf_Internal_Rela
*rel
,
4096 struct elf32_arm_link_hash_table
*htab
,
4097 enum elf32_arm_stub_type stub_type
)
4099 struct elf32_arm_stub_hash_entry
*stub_entry
;
4100 struct elf32_arm_link_hash_entry
*h
= (struct elf32_arm_link_hash_entry
*) hash
;
4101 const asection
*id_sec
;
4103 if ((input_section
->flags
& SEC_CODE
) == 0)
4106 /* If this input section is part of a group of sections sharing one
4107 stub section, then use the id of the first section in the group.
4108 Stub names need to include a section id, as there may well be
4109 more than one stub used to reach say, printf, and we need to
4110 distinguish between them. */
4111 id_sec
= htab
->stub_group
[input_section
->id
].link_sec
;
4113 if (h
!= NULL
&& h
->stub_cache
!= NULL
4114 && h
->stub_cache
->h
== h
4115 && h
->stub_cache
->id_sec
== id_sec
4116 && h
->stub_cache
->stub_type
== stub_type
)
4118 stub_entry
= h
->stub_cache
;
4124 stub_name
= elf32_arm_stub_name (id_sec
, sym_sec
, h
, rel
, stub_type
);
4125 if (stub_name
== NULL
)
4128 stub_entry
= arm_stub_hash_lookup (&htab
->stub_hash_table
,
4129 stub_name
, FALSE
, FALSE
);
4131 h
->stub_cache
= stub_entry
;
4139 /* Whether veneers of type STUB_TYPE require to be in a dedicated output
4143 arm_dedicated_stub_output_section_required (enum elf32_arm_stub_type stub_type
)
4145 if (stub_type
>= max_stub_type
)
4146 abort (); /* Should be unreachable. */
4151 /* Required alignment (as a power of 2) for the dedicated section holding
4152 veneers of type STUB_TYPE, or 0 if veneers of this type are interspersed
4153 with input sections. */
4156 arm_dedicated_stub_output_section_required_alignment
4157 (enum elf32_arm_stub_type stub_type
)
4159 if (stub_type
>= max_stub_type
)
4160 abort (); /* Should be unreachable. */
4162 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type
));
4166 /* Name of the dedicated output section to put veneers of type STUB_TYPE, or
4167 NULL if veneers of this type are interspersed with input sections. */
4170 arm_dedicated_stub_output_section_name (enum elf32_arm_stub_type stub_type
)
4172 if (stub_type
>= max_stub_type
)
4173 abort (); /* Should be unreachable. */
4175 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type
));
4179 /* If veneers of type STUB_TYPE should go in a dedicated output section,
4180 returns the address of the hash table field in HTAB holding a pointer to the
4181 corresponding input section. Otherwise, returns NULL. */
4184 arm_dedicated_stub_input_section_ptr
4185 (struct elf32_arm_link_hash_table
*htab ATTRIBUTE_UNUSED
,
4186 enum elf32_arm_stub_type stub_type
)
4188 if (stub_type
>= max_stub_type
)
4189 abort (); /* Should be unreachable. */
4191 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type
));
4195 /* Find or create a stub section to contain a stub of type STUB_TYPE. SECTION
4196 is the section that branch into veneer and can be NULL if stub should go in
4197 a dedicated output section. Returns a pointer to the stub section, and the
4198 section to which the stub section will be attached (in *LINK_SEC_P).
4199 LINK_SEC_P may be NULL. */
4202 elf32_arm_create_or_find_stub_sec (asection
**link_sec_p
, asection
*section
,
4203 struct elf32_arm_link_hash_table
*htab
,
4204 enum elf32_arm_stub_type stub_type
)
4206 asection
*link_sec
, *out_sec
, **stub_sec_p
;
4207 const char *stub_sec_prefix
;
4208 bfd_boolean dedicated_output_section
=
4209 arm_dedicated_stub_output_section_required (stub_type
);
4212 if (dedicated_output_section
)
4214 bfd
*output_bfd
= htab
->obfd
;
4215 const char *out_sec_name
=
4216 arm_dedicated_stub_output_section_name (stub_type
);
4218 stub_sec_p
= arm_dedicated_stub_input_section_ptr (htab
, stub_type
);
4219 stub_sec_prefix
= out_sec_name
;
4220 align
= arm_dedicated_stub_output_section_required_alignment (stub_type
);
4221 out_sec
= bfd_get_section_by_name (output_bfd
, out_sec_name
);
4222 if (out_sec
== NULL
)
4224 (*_bfd_error_handler
) (_("No address assigned to the veneers output "
4225 "section %s"), out_sec_name
);
4231 link_sec
= htab
->stub_group
[section
->id
].link_sec
;
4232 BFD_ASSERT (link_sec
!= NULL
);
4233 stub_sec_p
= &htab
->stub_group
[section
->id
].stub_sec
;
4234 if (*stub_sec_p
== NULL
)
4235 stub_sec_p
= &htab
->stub_group
[link_sec
->id
].stub_sec
;
4236 stub_sec_prefix
= link_sec
->name
;
4237 out_sec
= link_sec
->output_section
;
4238 align
= htab
->nacl_p
? 4 : 3;
4241 if (*stub_sec_p
== NULL
)
4247 namelen
= strlen (stub_sec_prefix
);
4248 len
= namelen
+ sizeof (STUB_SUFFIX
);
4249 s_name
= (char *) bfd_alloc (htab
->stub_bfd
, len
);
4253 memcpy (s_name
, stub_sec_prefix
, namelen
);
4254 memcpy (s_name
+ namelen
, STUB_SUFFIX
, sizeof (STUB_SUFFIX
));
4255 *stub_sec_p
= (*htab
->add_stub_section
) (s_name
, out_sec
, link_sec
,
4257 if (*stub_sec_p
== NULL
)
4260 out_sec
->flags
|= SEC_ALLOC
| SEC_LOAD
| SEC_READONLY
| SEC_CODE
4261 | SEC_HAS_CONTENTS
| SEC_RELOC
| SEC_IN_MEMORY
4265 if (!dedicated_output_section
)
4266 htab
->stub_group
[section
->id
].stub_sec
= *stub_sec_p
;
4269 *link_sec_p
= link_sec
;
4274 /* Add a new stub entry to the stub hash. Not all fields of the new
4275 stub entry are initialised. */
4277 static struct elf32_arm_stub_hash_entry
*
4278 elf32_arm_add_stub (const char *stub_name
, asection
*section
,
4279 struct elf32_arm_link_hash_table
*htab
,
4280 enum elf32_arm_stub_type stub_type
)
4284 struct elf32_arm_stub_hash_entry
*stub_entry
;
4286 stub_sec
= elf32_arm_create_or_find_stub_sec (&link_sec
, section
, htab
,
4288 if (stub_sec
== NULL
)
4291 /* Enter this entry into the linker stub hash table. */
4292 stub_entry
= arm_stub_hash_lookup (&htab
->stub_hash_table
, stub_name
,
4294 if (stub_entry
== NULL
)
4296 if (section
== NULL
)
4298 (*_bfd_error_handler
) (_("%s: cannot create stub entry %s"),
4304 stub_entry
->stub_sec
= stub_sec
;
4305 stub_entry
->stub_offset
= 0;
4306 stub_entry
->id_sec
= link_sec
;
4311 /* Store an Arm insn into an output section not processed by
4312 elf32_arm_write_section. */
4315 put_arm_insn (struct elf32_arm_link_hash_table
* htab
,
4316 bfd
* output_bfd
, bfd_vma val
, void * ptr
)
4318 if (htab
->byteswap_code
!= bfd_little_endian (output_bfd
))
4319 bfd_putl32 (val
, ptr
);
4321 bfd_putb32 (val
, ptr
);
4324 /* Store a 16-bit Thumb insn into an output section not processed by
4325 elf32_arm_write_section. */
4328 put_thumb_insn (struct elf32_arm_link_hash_table
* htab
,
4329 bfd
* output_bfd
, bfd_vma val
, void * ptr
)
4331 if (htab
->byteswap_code
!= bfd_little_endian (output_bfd
))
4332 bfd_putl16 (val
, ptr
);
4334 bfd_putb16 (val
, ptr
);
4337 /* Store a Thumb2 insn into an output section not processed by
4338 elf32_arm_write_section. */
4341 put_thumb2_insn (struct elf32_arm_link_hash_table
* htab
,
4342 bfd
* output_bfd
, bfd_vma val
, bfd_byte
* ptr
)
4344 /* T2 instructions are 16-bit streamed. */
4345 if (htab
->byteswap_code
!= bfd_little_endian (output_bfd
))
4347 bfd_putl16 ((val
>> 16) & 0xffff, ptr
);
4348 bfd_putl16 ((val
& 0xffff), ptr
+ 2);
4352 bfd_putb16 ((val
>> 16) & 0xffff, ptr
);
4353 bfd_putb16 ((val
& 0xffff), ptr
+ 2);
4357 /* If it's possible to change R_TYPE to a more efficient access
4358 model, return the new reloc type. */
4361 elf32_arm_tls_transition (struct bfd_link_info
*info
, int r_type
,
4362 struct elf_link_hash_entry
*h
)
4364 int is_local
= (h
== NULL
);
4366 if (bfd_link_pic (info
)
4367 || (h
&& h
->root
.type
== bfd_link_hash_undefweak
))
4370 /* We do not support relaxations for Old TLS models. */
4373 case R_ARM_TLS_GOTDESC
:
4374 case R_ARM_TLS_CALL
:
4375 case R_ARM_THM_TLS_CALL
:
4376 case R_ARM_TLS_DESCSEQ
:
4377 case R_ARM_THM_TLS_DESCSEQ
:
4378 return is_local
? R_ARM_TLS_LE32
: R_ARM_TLS_IE32
;
4384 static bfd_reloc_status_type elf32_arm_final_link_relocate
4385 (reloc_howto_type
*, bfd
*, bfd
*, asection
*, bfd_byte
*,
4386 Elf_Internal_Rela
*, bfd_vma
, struct bfd_link_info
*, asection
*,
4387 const char *, unsigned char, enum arm_st_branch_type
,
4388 struct elf_link_hash_entry
*, bfd_boolean
*, char **);
4391 arm_stub_required_alignment (enum elf32_arm_stub_type stub_type
)
4395 case arm_stub_a8_veneer_b_cond
:
4396 case arm_stub_a8_veneer_b
:
4397 case arm_stub_a8_veneer_bl
:
4400 case arm_stub_long_branch_any_any
:
4401 case arm_stub_long_branch_v4t_arm_thumb
:
4402 case arm_stub_long_branch_thumb_only
:
4403 case arm_stub_long_branch_v4t_thumb_thumb
:
4404 case arm_stub_long_branch_v4t_thumb_arm
:
4405 case arm_stub_short_branch_v4t_thumb_arm
:
4406 case arm_stub_long_branch_any_arm_pic
:
4407 case arm_stub_long_branch_any_thumb_pic
:
4408 case arm_stub_long_branch_v4t_thumb_thumb_pic
:
4409 case arm_stub_long_branch_v4t_arm_thumb_pic
:
4410 case arm_stub_long_branch_v4t_thumb_arm_pic
:
4411 case arm_stub_long_branch_thumb_only_pic
:
4412 case arm_stub_long_branch_any_tls_pic
:
4413 case arm_stub_long_branch_v4t_thumb_tls_pic
:
4414 case arm_stub_a8_veneer_blx
:
4417 case arm_stub_long_branch_arm_nacl
:
4418 case arm_stub_long_branch_arm_nacl_pic
:
4422 abort (); /* Should be unreachable. */
4426 /* Returns whether stubs of type STUB_TYPE take over the symbol they are
4427 veneering (TRUE) or have their own symbol (FALSE). */
4430 arm_stub_sym_claimed (enum elf32_arm_stub_type stub_type
)
4432 if (stub_type
>= max_stub_type
)
4433 abort (); /* Should be unreachable. */
4439 arm_build_one_stub (struct bfd_hash_entry
*gen_entry
,
4443 struct elf32_arm_stub_hash_entry
*stub_entry
;
4444 struct elf32_arm_link_hash_table
*globals
;
4445 struct bfd_link_info
*info
;
4452 const insn_sequence
*template_sequence
;
4454 int stub_reloc_idx
[MAXRELOCS
] = {-1, -1};
4455 int stub_reloc_offset
[MAXRELOCS
] = {0, 0};
4458 /* Massage our args to the form they really have. */
4459 stub_entry
= (struct elf32_arm_stub_hash_entry
*) gen_entry
;
4460 info
= (struct bfd_link_info
*) in_arg
;
4462 globals
= elf32_arm_hash_table (info
);
4463 if (globals
== NULL
)
4466 stub_sec
= stub_entry
->stub_sec
;
4468 if ((globals
->fix_cortex_a8
< 0)
4469 != (arm_stub_required_alignment (stub_entry
->stub_type
) == 2))
4470 /* We have to do less-strictly-aligned fixes last. */
4473 /* Make a note of the offset within the stubs for this entry. */
4474 stub_entry
->stub_offset
= stub_sec
->size
;
4475 loc
= stub_sec
->contents
+ stub_entry
->stub_offset
;
4477 stub_bfd
= stub_sec
->owner
;
4479 /* This is the address of the stub destination. */
4480 sym_value
= (stub_entry
->target_value
4481 + stub_entry
->target_section
->output_offset
4482 + stub_entry
->target_section
->output_section
->vma
);
4484 template_sequence
= stub_entry
->stub_template
;
4485 template_size
= stub_entry
->stub_template_size
;
4488 for (i
= 0; i
< template_size
; i
++)
4490 switch (template_sequence
[i
].type
)
4494 bfd_vma data
= (bfd_vma
) template_sequence
[i
].data
;
4495 if (template_sequence
[i
].reloc_addend
!= 0)
4497 /* We've borrowed the reloc_addend field to mean we should
4498 insert a condition code into this (Thumb-1 branch)
4499 instruction. See THUMB16_BCOND_INSN. */
4500 BFD_ASSERT ((data
& 0xff00) == 0xd000);
4501 data
|= ((stub_entry
->orig_insn
>> 22) & 0xf) << 8;
4503 bfd_put_16 (stub_bfd
, data
, loc
+ size
);
4509 bfd_put_16 (stub_bfd
,
4510 (template_sequence
[i
].data
>> 16) & 0xffff,
4512 bfd_put_16 (stub_bfd
, template_sequence
[i
].data
& 0xffff,
4514 if (template_sequence
[i
].r_type
!= R_ARM_NONE
)
4516 stub_reloc_idx
[nrelocs
] = i
;
4517 stub_reloc_offset
[nrelocs
++] = size
;
4523 bfd_put_32 (stub_bfd
, template_sequence
[i
].data
,
4525 /* Handle cases where the target is encoded within the
4527 if (template_sequence
[i
].r_type
== R_ARM_JUMP24
)
4529 stub_reloc_idx
[nrelocs
] = i
;
4530 stub_reloc_offset
[nrelocs
++] = size
;
4536 bfd_put_32 (stub_bfd
, template_sequence
[i
].data
, loc
+ size
);
4537 stub_reloc_idx
[nrelocs
] = i
;
4538 stub_reloc_offset
[nrelocs
++] = size
;
4548 stub_sec
->size
+= size
;
4550 /* Stub size has already been computed in arm_size_one_stub. Check
4552 BFD_ASSERT (size
== stub_entry
->stub_size
);
4554 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
4555 if (stub_entry
->branch_type
== ST_BRANCH_TO_THUMB
)
4558 /* Assume there is at least one and at most MAXRELOCS entries to relocate
4560 BFD_ASSERT (nrelocs
!= 0 && nrelocs
<= MAXRELOCS
);
4562 for (i
= 0; i
< nrelocs
; i
++)
4564 Elf_Internal_Rela rel
;
4565 bfd_boolean unresolved_reloc
;
4566 char *error_message
;
4568 sym_value
+ template_sequence
[stub_reloc_idx
[i
]].reloc_addend
;
4570 rel
.r_offset
= stub_entry
->stub_offset
+ stub_reloc_offset
[i
];
4571 rel
.r_info
= ELF32_R_INFO (0,
4572 template_sequence
[stub_reloc_idx
[i
]].r_type
);
4575 if (stub_entry
->stub_type
== arm_stub_a8_veneer_b_cond
&& i
== 0)
4576 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
4577 template should refer back to the instruction after the original
4578 branch. We use target_section as Cortex-A8 erratum workaround stubs
4579 are only generated when both source and target are in the same
4581 points_to
= stub_entry
->target_section
->output_section
->vma
4582 + stub_entry
->target_section
->output_offset
4583 + stub_entry
->source_value
;
4585 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
4586 (template_sequence
[stub_reloc_idx
[i
]].r_type
),
4587 stub_bfd
, info
->output_bfd
, stub_sec
, stub_sec
->contents
, &rel
,
4588 points_to
, info
, stub_entry
->target_section
, "", STT_FUNC
,
4589 stub_entry
->branch_type
,
4590 (struct elf_link_hash_entry
*) stub_entry
->h
, &unresolved_reloc
,
4598 /* Calculate the template, template size and instruction size for a stub.
4599 Return value is the instruction size. */
4602 find_stub_size_and_template (enum elf32_arm_stub_type stub_type
,
4603 const insn_sequence
**stub_template
,
4604 int *stub_template_size
)
4606 const insn_sequence
*template_sequence
= NULL
;
4607 int template_size
= 0, i
;
4610 template_sequence
= stub_definitions
[stub_type
].template_sequence
;
4612 *stub_template
= template_sequence
;
4614 template_size
= stub_definitions
[stub_type
].template_size
;
4615 if (stub_template_size
)
4616 *stub_template_size
= template_size
;
4619 for (i
= 0; i
< template_size
; i
++)
4621 switch (template_sequence
[i
].type
)
4642 /* As above, but don't actually build the stub. Just bump offset so
4643 we know stub section sizes. */
4646 arm_size_one_stub (struct bfd_hash_entry
*gen_entry
,
4647 void *in_arg ATTRIBUTE_UNUSED
)
4649 struct elf32_arm_stub_hash_entry
*stub_entry
;
4650 const insn_sequence
*template_sequence
;
4651 int template_size
, size
;
4653 /* Massage our args to the form they really have. */
4654 stub_entry
= (struct elf32_arm_stub_hash_entry
*) gen_entry
;
4656 BFD_ASSERT((stub_entry
->stub_type
> arm_stub_none
)
4657 && stub_entry
->stub_type
< ARRAY_SIZE(stub_definitions
));
4659 size
= find_stub_size_and_template (stub_entry
->stub_type
, &template_sequence
,
4662 stub_entry
->stub_size
= size
;
4663 stub_entry
->stub_template
= template_sequence
;
4664 stub_entry
->stub_template_size
= template_size
;
4666 size
= (size
+ 7) & ~7;
4667 stub_entry
->stub_sec
->size
+= size
;
4672 /* External entry points for sizing and building linker stubs. */
4674 /* Set up various things so that we can make a list of input sections
4675 for each output section included in the link. Returns -1 on error,
4676 0 when no stubs will be needed, and 1 on success. */
4679 elf32_arm_setup_section_lists (bfd
*output_bfd
,
4680 struct bfd_link_info
*info
)
4683 unsigned int bfd_count
;
4684 unsigned int top_id
, top_index
;
4686 asection
**input_list
, **list
;
4688 struct elf32_arm_link_hash_table
*htab
= elf32_arm_hash_table (info
);
4692 if (! is_elf_hash_table (htab
))
4695 /* Count the number of input BFDs and find the top input section id. */
4696 for (input_bfd
= info
->input_bfds
, bfd_count
= 0, top_id
= 0;
4698 input_bfd
= input_bfd
->link
.next
)
4701 for (section
= input_bfd
->sections
;
4703 section
= section
->next
)
4705 if (top_id
< section
->id
)
4706 top_id
= section
->id
;
4709 htab
->bfd_count
= bfd_count
;
4711 amt
= sizeof (struct map_stub
) * (top_id
+ 1);
4712 htab
->stub_group
= (struct map_stub
*) bfd_zmalloc (amt
);
4713 if (htab
->stub_group
== NULL
)
4715 htab
->top_id
= top_id
;
4717 /* We can't use output_bfd->section_count here to find the top output
4718 section index as some sections may have been removed, and
4719 _bfd_strip_section_from_output doesn't renumber the indices. */
4720 for (section
= output_bfd
->sections
, top_index
= 0;
4722 section
= section
->next
)
4724 if (top_index
< section
->index
)
4725 top_index
= section
->index
;
4728 htab
->top_index
= top_index
;
4729 amt
= sizeof (asection
*) * (top_index
+ 1);
4730 input_list
= (asection
**) bfd_malloc (amt
);
4731 htab
->input_list
= input_list
;
4732 if (input_list
== NULL
)
4735 /* For sections we aren't interested in, mark their entries with a
4736 value we can check later. */
4737 list
= input_list
+ top_index
;
4739 *list
= bfd_abs_section_ptr
;
4740 while (list
-- != input_list
);
4742 for (section
= output_bfd
->sections
;
4744 section
= section
->next
)
4746 if ((section
->flags
& SEC_CODE
) != 0)
4747 input_list
[section
->index
] = NULL
;
4753 /* The linker repeatedly calls this function for each input section,
4754 in the order that input sections are linked into output sections.
4755 Build lists of input sections to determine groupings between which
4756 we may insert linker stubs. */
4759 elf32_arm_next_input_section (struct bfd_link_info
*info
,
4762 struct elf32_arm_link_hash_table
*htab
= elf32_arm_hash_table (info
);
4767 if (isec
->output_section
->index
<= htab
->top_index
)
4769 asection
**list
= htab
->input_list
+ isec
->output_section
->index
;
4771 if (*list
!= bfd_abs_section_ptr
&& (isec
->flags
& SEC_CODE
) != 0)
4773 /* Steal the link_sec pointer for our list. */
4774 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
4775 /* This happens to make the list in reverse order,
4776 which we reverse later. */
4777 PREV_SEC (isec
) = *list
;
4783 /* See whether we can group stub sections together. Grouping stub
4784 sections may result in fewer stubs. More importantly, we need to
4785 put all .init* and .fini* stubs at the end of the .init or
4786 .fini output sections respectively, because glibc splits the
4787 _init and _fini functions into multiple parts. Putting a stub in
4788 the middle of a function is not a good idea. */
4791 group_sections (struct elf32_arm_link_hash_table
*htab
,
4792 bfd_size_type stub_group_size
,
4793 bfd_boolean stubs_always_after_branch
)
4795 asection
**list
= htab
->input_list
;
4799 asection
*tail
= *list
;
4802 if (tail
== bfd_abs_section_ptr
)
4805 /* Reverse the list: we must avoid placing stubs at the
4806 beginning of the section because the beginning of the text
4807 section may be required for an interrupt vector in bare metal
4809 #define NEXT_SEC PREV_SEC
4811 while (tail
!= NULL
)
4813 /* Pop from tail. */
4814 asection
*item
= tail
;
4815 tail
= PREV_SEC (item
);
4818 NEXT_SEC (item
) = head
;
4822 while (head
!= NULL
)
4826 bfd_vma stub_group_start
= head
->output_offset
;
4827 bfd_vma end_of_next
;
4830 while (NEXT_SEC (curr
) != NULL
)
4832 next
= NEXT_SEC (curr
);
4833 end_of_next
= next
->output_offset
+ next
->size
;
4834 if (end_of_next
- stub_group_start
>= stub_group_size
)
4835 /* End of NEXT is too far from start, so stop. */
4837 /* Add NEXT to the group. */
4841 /* OK, the size from the start to the start of CURR is less
4842 than stub_group_size and thus can be handled by one stub
4843 section. (Or the head section is itself larger than
4844 stub_group_size, in which case we may be toast.)
4845 We should really be keeping track of the total size of
4846 stubs added here, as stubs contribute to the final output
4850 next
= NEXT_SEC (head
);
4851 /* Set up this stub group. */
4852 htab
->stub_group
[head
->id
].link_sec
= curr
;
4854 while (head
!= curr
&& (head
= next
) != NULL
);
4856 /* But wait, there's more! Input sections up to stub_group_size
4857 bytes after the stub section can be handled by it too. */
4858 if (!stubs_always_after_branch
)
4860 stub_group_start
= curr
->output_offset
+ curr
->size
;
4862 while (next
!= NULL
)
4864 end_of_next
= next
->output_offset
+ next
->size
;
4865 if (end_of_next
- stub_group_start
>= stub_group_size
)
4866 /* End of NEXT is too far from stubs, so stop. */
4868 /* Add NEXT to the stub group. */
4870 next
= NEXT_SEC (head
);
4871 htab
->stub_group
[head
->id
].link_sec
= curr
;
4877 while (list
++ != htab
->input_list
+ htab
->top_index
);
4879 free (htab
->input_list
);
4884 /* Comparison function for sorting/searching relocations relating to Cortex-A8
4888 a8_reloc_compare (const void *a
, const void *b
)
4890 const struct a8_erratum_reloc
*ra
= (const struct a8_erratum_reloc
*) a
;
4891 const struct a8_erratum_reloc
*rb
= (const struct a8_erratum_reloc
*) b
;
4893 if (ra
->from
< rb
->from
)
4895 else if (ra
->from
> rb
->from
)
4901 static struct elf_link_hash_entry
*find_thumb_glue (struct bfd_link_info
*,
4902 const char *, char **);
4904 /* Helper function to scan code for sequences which might trigger the Cortex-A8
4905 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
4906 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
4910 cortex_a8_erratum_scan (bfd
*input_bfd
,
4911 struct bfd_link_info
*info
,
4912 struct a8_erratum_fix
**a8_fixes_p
,
4913 unsigned int *num_a8_fixes_p
,
4914 unsigned int *a8_fix_table_size_p
,
4915 struct a8_erratum_reloc
*a8_relocs
,
4916 unsigned int num_a8_relocs
,
4917 unsigned prev_num_a8_fixes
,
4918 bfd_boolean
*stub_changed_p
)
4921 struct elf32_arm_link_hash_table
*htab
= elf32_arm_hash_table (info
);
4922 struct a8_erratum_fix
*a8_fixes
= *a8_fixes_p
;
4923 unsigned int num_a8_fixes
= *num_a8_fixes_p
;
4924 unsigned int a8_fix_table_size
= *a8_fix_table_size_p
;
4929 for (section
= input_bfd
->sections
;
4931 section
= section
->next
)
4933 bfd_byte
*contents
= NULL
;
4934 struct _arm_elf_section_data
*sec_data
;
4938 if (elf_section_type (section
) != SHT_PROGBITS
4939 || (elf_section_flags (section
) & SHF_EXECINSTR
) == 0
4940 || (section
->flags
& SEC_EXCLUDE
) != 0
4941 || (section
->sec_info_type
== SEC_INFO_TYPE_JUST_SYMS
)
4942 || (section
->output_section
== bfd_abs_section_ptr
))
4945 base_vma
= section
->output_section
->vma
+ section
->output_offset
;
4947 if (elf_section_data (section
)->this_hdr
.contents
!= NULL
)
4948 contents
= elf_section_data (section
)->this_hdr
.contents
;
4949 else if (! bfd_malloc_and_get_section (input_bfd
, section
, &contents
))
4952 sec_data
= elf32_arm_section_data (section
);
4954 for (span
= 0; span
< sec_data
->mapcount
; span
++)
4956 unsigned int span_start
= sec_data
->map
[span
].vma
;
4957 unsigned int span_end
= (span
== sec_data
->mapcount
- 1)
4958 ? section
->size
: sec_data
->map
[span
+ 1].vma
;
4960 char span_type
= sec_data
->map
[span
].type
;
4961 bfd_boolean last_was_32bit
= FALSE
, last_was_branch
= FALSE
;
4963 if (span_type
!= 't')
4966 /* Span is entirely within a single 4KB region: skip scanning. */
4967 if (((base_vma
+ span_start
) & ~0xfff)
4968 == ((base_vma
+ span_end
) & ~0xfff))
4971 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
4973 * The opcode is BLX.W, BL.W, B.W, Bcc.W
4974 * The branch target is in the same 4KB region as the
4975 first half of the branch.
4976 * The instruction before the branch is a 32-bit
4977 length non-branch instruction. */
4978 for (i
= span_start
; i
< span_end
;)
4980 unsigned int insn
= bfd_getl16 (&contents
[i
]);
4981 bfd_boolean insn_32bit
= FALSE
, is_blx
= FALSE
, is_b
= FALSE
;
4982 bfd_boolean is_bl
= FALSE
, is_bcc
= FALSE
, is_32bit_branch
;
4984 if ((insn
& 0xe000) == 0xe000 && (insn
& 0x1800) != 0x0000)
4989 /* Load the rest of the insn (in manual-friendly order). */
4990 insn
= (insn
<< 16) | bfd_getl16 (&contents
[i
+ 2]);
4992 /* Encoding T4: B<c>.W. */
4993 is_b
= (insn
& 0xf800d000) == 0xf0009000;
4994 /* Encoding T1: BL<c>.W. */
4995 is_bl
= (insn
& 0xf800d000) == 0xf000d000;
4996 /* Encoding T2: BLX<c>.W. */
4997 is_blx
= (insn
& 0xf800d000) == 0xf000c000;
4998 /* Encoding T3: B<c>.W (not permitted in IT block). */
4999 is_bcc
= (insn
& 0xf800d000) == 0xf0008000
5000 && (insn
& 0x07f00000) != 0x03800000;
5003 is_32bit_branch
= is_b
|| is_bl
|| is_blx
|| is_bcc
;
5005 if (((base_vma
+ i
) & 0xfff) == 0xffe
5009 && ! last_was_branch
)
5011 bfd_signed_vma offset
= 0;
5012 bfd_boolean force_target_arm
= FALSE
;
5013 bfd_boolean force_target_thumb
= FALSE
;
5015 enum elf32_arm_stub_type stub_type
= arm_stub_none
;
5016 struct a8_erratum_reloc key
, *found
;
5017 bfd_boolean use_plt
= FALSE
;
5019 key
.from
= base_vma
+ i
;
5020 found
= (struct a8_erratum_reloc
*)
5021 bsearch (&key
, a8_relocs
, num_a8_relocs
,
5022 sizeof (struct a8_erratum_reloc
),
5027 char *error_message
= NULL
;
5028 struct elf_link_hash_entry
*entry
;
5030 /* We don't care about the error returned from this
5031 function, only if there is glue or not. */
5032 entry
= find_thumb_glue (info
, found
->sym_name
,
5036 found
->non_a8_stub
= TRUE
;
5038 /* Keep a simpler condition, for the sake of clarity. */
5039 if (htab
->root
.splt
!= NULL
&& found
->hash
!= NULL
5040 && found
->hash
->root
.plt
.offset
!= (bfd_vma
) -1)
5043 if (found
->r_type
== R_ARM_THM_CALL
)
5045 if (found
->branch_type
== ST_BRANCH_TO_ARM
5047 force_target_arm
= TRUE
;
5049 force_target_thumb
= TRUE
;
5053 /* Check if we have an offending branch instruction. */
5055 if (found
&& found
->non_a8_stub
)
5056 /* We've already made a stub for this instruction, e.g.
5057 it's a long branch or a Thumb->ARM stub. Assume that
5058 stub will suffice to work around the A8 erratum (see
5059 setting of always_after_branch above). */
5063 offset
= (insn
& 0x7ff) << 1;
5064 offset
|= (insn
& 0x3f0000) >> 4;
5065 offset
|= (insn
& 0x2000) ? 0x40000 : 0;
5066 offset
|= (insn
& 0x800) ? 0x80000 : 0;
5067 offset
|= (insn
& 0x4000000) ? 0x100000 : 0;
5068 if (offset
& 0x100000)
5069 offset
|= ~ ((bfd_signed_vma
) 0xfffff);
5070 stub_type
= arm_stub_a8_veneer_b_cond
;
5072 else if (is_b
|| is_bl
|| is_blx
)
5074 int s
= (insn
& 0x4000000) != 0;
5075 int j1
= (insn
& 0x2000) != 0;
5076 int j2
= (insn
& 0x800) != 0;
5080 offset
= (insn
& 0x7ff) << 1;
5081 offset
|= (insn
& 0x3ff0000) >> 4;
5085 if (offset
& 0x1000000)
5086 offset
|= ~ ((bfd_signed_vma
) 0xffffff);
5089 offset
&= ~ ((bfd_signed_vma
) 3);
5091 stub_type
= is_blx
? arm_stub_a8_veneer_blx
:
5092 is_bl
? arm_stub_a8_veneer_bl
: arm_stub_a8_veneer_b
;
5095 if (stub_type
!= arm_stub_none
)
5097 bfd_vma pc_for_insn
= base_vma
+ i
+ 4;
5099 /* The original instruction is a BL, but the target is
5100 an ARM instruction. If we were not making a stub,
5101 the BL would have been converted to a BLX. Use the
5102 BLX stub instead in that case. */
5103 if (htab
->use_blx
&& force_target_arm
5104 && stub_type
== arm_stub_a8_veneer_bl
)
5106 stub_type
= arm_stub_a8_veneer_blx
;
5110 /* Conversely, if the original instruction was
5111 BLX but the target is Thumb mode, use the BL
5113 else if (force_target_thumb
5114 && stub_type
== arm_stub_a8_veneer_blx
)
5116 stub_type
= arm_stub_a8_veneer_bl
;
5122 pc_for_insn
&= ~ ((bfd_vma
) 3);
5124 /* If we found a relocation, use the proper destination,
5125 not the offset in the (unrelocated) instruction.
5126 Note this is always done if we switched the stub type
5130 (bfd_signed_vma
) (found
->destination
- pc_for_insn
);
5132 /* If the stub will use a Thumb-mode branch to a
5133 PLT target, redirect it to the preceding Thumb
5135 if (stub_type
!= arm_stub_a8_veneer_blx
&& use_plt
)
5136 offset
-= PLT_THUMB_STUB_SIZE
;
5138 target
= pc_for_insn
+ offset
;
5140 /* The BLX stub is ARM-mode code. Adjust the offset to
5141 take the different PC value (+8 instead of +4) into
5143 if (stub_type
== arm_stub_a8_veneer_blx
)
5146 if (((base_vma
+ i
) & ~0xfff) == (target
& ~0xfff))
5148 char *stub_name
= NULL
;
5150 if (num_a8_fixes
== a8_fix_table_size
)
5152 a8_fix_table_size
*= 2;
5153 a8_fixes
= (struct a8_erratum_fix
*)
5154 bfd_realloc (a8_fixes
,
5155 sizeof (struct a8_erratum_fix
)
5156 * a8_fix_table_size
);
5159 if (num_a8_fixes
< prev_num_a8_fixes
)
5161 /* If we're doing a subsequent scan,
5162 check if we've found the same fix as
5163 before, and try and reuse the stub
5165 stub_name
= a8_fixes
[num_a8_fixes
].stub_name
;
5166 if ((a8_fixes
[num_a8_fixes
].section
!= section
)
5167 || (a8_fixes
[num_a8_fixes
].offset
!= i
))
5171 *stub_changed_p
= TRUE
;
5177 stub_name
= (char *) bfd_malloc (8 + 1 + 8 + 1);
5178 if (stub_name
!= NULL
)
5179 sprintf (stub_name
, "%x:%x", section
->id
, i
);
5182 a8_fixes
[num_a8_fixes
].input_bfd
= input_bfd
;
5183 a8_fixes
[num_a8_fixes
].section
= section
;
5184 a8_fixes
[num_a8_fixes
].offset
= i
;
5185 a8_fixes
[num_a8_fixes
].target_offset
=
5187 a8_fixes
[num_a8_fixes
].orig_insn
= insn
;
5188 a8_fixes
[num_a8_fixes
].stub_name
= stub_name
;
5189 a8_fixes
[num_a8_fixes
].stub_type
= stub_type
;
5190 a8_fixes
[num_a8_fixes
].branch_type
=
5191 is_blx
? ST_BRANCH_TO_ARM
: ST_BRANCH_TO_THUMB
;
5198 i
+= insn_32bit
? 4 : 2;
5199 last_was_32bit
= insn_32bit
;
5200 last_was_branch
= is_32bit_branch
;
5204 if (elf_section_data (section
)->this_hdr
.contents
== NULL
)
5208 *a8_fixes_p
= a8_fixes
;
5209 *num_a8_fixes_p
= num_a8_fixes
;
5210 *a8_fix_table_size_p
= a8_fix_table_size
;
5215 /* Create or update a stub entry depending on whether the stub can already be
5216 found in HTAB. The stub is identified by:
5217 - its type STUB_TYPE
5218 - its source branch (note that several can share the same stub) whose
5219 section and relocation (if any) are given by SECTION and IRELA
5221 - its target symbol whose input section, hash, name, value and branch type
5222 are given in SYM_SEC, HASH, SYM_NAME, SYM_VALUE and BRANCH_TYPE
5225 If found, the value of the stub's target symbol is updated from SYM_VALUE
5226 and *NEW_STUB is set to FALSE. Otherwise, *NEW_STUB is set to
5227 TRUE and the stub entry is initialized.
5229 Returns whether the stub could be successfully created or updated, or FALSE
5230 if an error occured. */
5233 elf32_arm_create_stub (struct elf32_arm_link_hash_table
*htab
,
5234 enum elf32_arm_stub_type stub_type
, asection
*section
,
5235 Elf_Internal_Rela
*irela
, asection
*sym_sec
,
5236 struct elf32_arm_link_hash_entry
*hash
, char *sym_name
,
5237 bfd_vma sym_value
, enum arm_st_branch_type branch_type
,
5238 bfd_boolean
*new_stub
)
5240 const asection
*id_sec
;
5242 struct elf32_arm_stub_hash_entry
*stub_entry
;
5243 unsigned int r_type
;
5244 bfd_boolean sym_claimed
= arm_stub_sym_claimed (stub_type
);
5246 BFD_ASSERT (stub_type
!= arm_stub_none
);
5250 stub_name
= sym_name
;
5254 BFD_ASSERT (section
);
5256 /* Support for grouping stub sections. */
5257 id_sec
= htab
->stub_group
[section
->id
].link_sec
;
5259 /* Get the name of this stub. */
5260 stub_name
= elf32_arm_stub_name (id_sec
, sym_sec
, hash
, irela
,
5266 stub_entry
= arm_stub_hash_lookup (&htab
->stub_hash_table
, stub_name
, FALSE
,
5268 /* The proper stub has already been created, just update its value. */
5269 if (stub_entry
!= NULL
)
5273 stub_entry
->target_value
= sym_value
;
5277 stub_entry
= elf32_arm_add_stub (stub_name
, section
, htab
, stub_type
);
5278 if (stub_entry
== NULL
)
5285 stub_entry
->target_value
= sym_value
;
5286 stub_entry
->target_section
= sym_sec
;
5287 stub_entry
->stub_type
= stub_type
;
5288 stub_entry
->h
= hash
;
5289 stub_entry
->branch_type
= branch_type
;
5292 stub_entry
->output_name
= sym_name
;
5295 if (sym_name
== NULL
)
5296 sym_name
= "unnamed";
5297 stub_entry
->output_name
= (char *)
5298 bfd_alloc (htab
->stub_bfd
, sizeof (THUMB2ARM_GLUE_ENTRY_NAME
)
5299 + strlen (sym_name
));
5300 if (stub_entry
->output_name
== NULL
)
5306 /* For historical reasons, use the existing names for ARM-to-Thumb and
5307 Thumb-to-ARM stubs. */
5308 r_type
= ELF32_R_TYPE (irela
->r_info
);
5309 if ((r_type
== (unsigned int) R_ARM_THM_CALL
5310 || r_type
== (unsigned int) R_ARM_THM_JUMP24
5311 || r_type
== (unsigned int) R_ARM_THM_JUMP19
)
5312 && branch_type
== ST_BRANCH_TO_ARM
)
5313 sprintf (stub_entry
->output_name
, THUMB2ARM_GLUE_ENTRY_NAME
, sym_name
);
5314 else if ((r_type
== (unsigned int) R_ARM_CALL
5315 || r_type
== (unsigned int) R_ARM_JUMP24
)
5316 && branch_type
== ST_BRANCH_TO_THUMB
)
5317 sprintf (stub_entry
->output_name
, ARM2THUMB_GLUE_ENTRY_NAME
, sym_name
);
5319 sprintf (stub_entry
->output_name
, STUB_ENTRY_NAME
, sym_name
);
5326 /* Determine and set the size of the stub section for a final link.
5328 The basic idea here is to examine all the relocations looking for
5329 PC-relative calls to a target that is unreachable with a "bl"
5333 elf32_arm_size_stubs (bfd
*output_bfd
,
5335 struct bfd_link_info
*info
,
5336 bfd_signed_vma group_size
,
5337 asection
* (*add_stub_section
) (const char *, asection
*,
5340 void (*layout_sections_again
) (void))
5342 bfd_size_type stub_group_size
;
5343 bfd_boolean stubs_always_after_branch
;
5344 struct elf32_arm_link_hash_table
*htab
= elf32_arm_hash_table (info
);
5345 struct a8_erratum_fix
*a8_fixes
= NULL
;
5346 unsigned int num_a8_fixes
= 0, a8_fix_table_size
= 10;
5347 struct a8_erratum_reloc
*a8_relocs
= NULL
;
5348 unsigned int num_a8_relocs
= 0, a8_reloc_table_size
= 10, i
;
5353 if (htab
->fix_cortex_a8
)
5355 a8_fixes
= (struct a8_erratum_fix
*)
5356 bfd_zmalloc (sizeof (struct a8_erratum_fix
) * a8_fix_table_size
);
5357 a8_relocs
= (struct a8_erratum_reloc
*)
5358 bfd_zmalloc (sizeof (struct a8_erratum_reloc
) * a8_reloc_table_size
);
5361 /* Propagate mach to stub bfd, because it may not have been
5362 finalized when we created stub_bfd. */
5363 bfd_set_arch_mach (stub_bfd
, bfd_get_arch (output_bfd
),
5364 bfd_get_mach (output_bfd
));
5366 /* Stash our params away. */
5367 htab
->stub_bfd
= stub_bfd
;
5368 htab
->add_stub_section
= add_stub_section
;
5369 htab
->layout_sections_again
= layout_sections_again
;
5370 stubs_always_after_branch
= group_size
< 0;
5372 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
5373 as the first half of a 32-bit branch straddling two 4K pages. This is a
5374 crude way of enforcing that. */
5375 if (htab
->fix_cortex_a8
)
5376 stubs_always_after_branch
= 1;
5379 stub_group_size
= -group_size
;
5381 stub_group_size
= group_size
;
5383 if (stub_group_size
== 1)
5385 /* Default values. */
5386 /* Thumb branch range is +-4MB has to be used as the default
5387 maximum size (a given section can contain both ARM and Thumb
5388 code, so the worst case has to be taken into account).
5390 This value is 24K less than that, which allows for 2025
5391 12-byte stubs. If we exceed that, then we will fail to link.
5392 The user will have to relink with an explicit group size
5394 stub_group_size
= 4170000;
5397 group_sections (htab
, stub_group_size
, stubs_always_after_branch
);
5399 /* If we're applying the cortex A8 fix, we need to determine the
5400 program header size now, because we cannot change it later --
5401 that could alter section placements. Notice the A8 erratum fix
5402 ends up requiring the section addresses to remain unchanged
5403 modulo the page size. That's something we cannot represent
5404 inside BFD, and we don't want to force the section alignment to
5405 be the page size. */
5406 if (htab
->fix_cortex_a8
)
5407 (*htab
->layout_sections_again
) ();
5412 unsigned int bfd_indx
;
5414 bfd_boolean stub_changed
= FALSE
;
5415 unsigned prev_num_a8_fixes
= num_a8_fixes
;
5418 for (input_bfd
= info
->input_bfds
, bfd_indx
= 0;
5420 input_bfd
= input_bfd
->link
.next
, bfd_indx
++)
5422 Elf_Internal_Shdr
*symtab_hdr
;
5424 Elf_Internal_Sym
*local_syms
= NULL
;
5426 if (!is_arm_elf (input_bfd
))
5431 /* We'll need the symbol table in a second. */
5432 symtab_hdr
= &elf_tdata (input_bfd
)->symtab_hdr
;
5433 if (symtab_hdr
->sh_info
== 0)
5436 /* Walk over each section attached to the input bfd. */
5437 for (section
= input_bfd
->sections
;
5439 section
= section
->next
)
5441 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
5443 /* If there aren't any relocs, then there's nothing more
5445 if ((section
->flags
& SEC_RELOC
) == 0
5446 || section
->reloc_count
== 0
5447 || (section
->flags
& SEC_CODE
) == 0)
5450 /* If this section is a link-once section that will be
5451 discarded, then don't create any stubs. */
5452 if (section
->output_section
== NULL
5453 || section
->output_section
->owner
!= output_bfd
)
5456 /* Get the relocs. */
5458 = _bfd_elf_link_read_relocs (input_bfd
, section
, NULL
,
5459 NULL
, info
->keep_memory
);
5460 if (internal_relocs
== NULL
)
5461 goto error_ret_free_local
;
5463 /* Now examine each relocation. */
5464 irela
= internal_relocs
;
5465 irelaend
= irela
+ section
->reloc_count
;
5466 for (; irela
< irelaend
; irela
++)
5468 unsigned int r_type
, r_indx
;
5469 enum elf32_arm_stub_type stub_type
;
5472 bfd_vma destination
;
5473 struct elf32_arm_link_hash_entry
*hash
;
5474 const char *sym_name
;
5475 unsigned char st_type
;
5476 enum arm_st_branch_type branch_type
;
5477 bfd_boolean created_stub
= FALSE
;
5479 r_type
= ELF32_R_TYPE (irela
->r_info
);
5480 r_indx
= ELF32_R_SYM (irela
->r_info
);
5482 if (r_type
>= (unsigned int) R_ARM_max
)
5484 bfd_set_error (bfd_error_bad_value
);
5485 error_ret_free_internal
:
5486 if (elf_section_data (section
)->relocs
== NULL
)
5487 free (internal_relocs
);
5489 error_ret_free_local
:
5490 if (local_syms
!= NULL
5491 && (symtab_hdr
->contents
5492 != (unsigned char *) local_syms
))
5498 if (r_indx
>= symtab_hdr
->sh_info
)
5499 hash
= elf32_arm_hash_entry
5500 (elf_sym_hashes (input_bfd
)
5501 [r_indx
- symtab_hdr
->sh_info
]);
5503 /* Only look for stubs on branch instructions, or
5504 non-relaxed TLSCALL */
5505 if ((r_type
!= (unsigned int) R_ARM_CALL
)
5506 && (r_type
!= (unsigned int) R_ARM_THM_CALL
)
5507 && (r_type
!= (unsigned int) R_ARM_JUMP24
)
5508 && (r_type
!= (unsigned int) R_ARM_THM_JUMP19
)
5509 && (r_type
!= (unsigned int) R_ARM_THM_XPC22
)
5510 && (r_type
!= (unsigned int) R_ARM_THM_JUMP24
)
5511 && (r_type
!= (unsigned int) R_ARM_PLT32
)
5512 && !((r_type
== (unsigned int) R_ARM_TLS_CALL
5513 || r_type
== (unsigned int) R_ARM_THM_TLS_CALL
)
5514 && r_type
== elf32_arm_tls_transition
5515 (info
, r_type
, &hash
->root
)
5516 && ((hash
? hash
->tls_type
5517 : (elf32_arm_local_got_tls_type
5518 (input_bfd
)[r_indx
]))
5519 & GOT_TLS_GDESC
) != 0))
5522 /* Now determine the call target, its name, value,
5529 if (r_type
== (unsigned int) R_ARM_TLS_CALL
5530 || r_type
== (unsigned int) R_ARM_THM_TLS_CALL
)
5532 /* A non-relaxed TLS call. The target is the
5533 plt-resident trampoline and nothing to do
5535 BFD_ASSERT (htab
->tls_trampoline
> 0);
5536 sym_sec
= htab
->root
.splt
;
5537 sym_value
= htab
->tls_trampoline
;
5540 branch_type
= ST_BRANCH_TO_ARM
;
5544 /* It's a local symbol. */
5545 Elf_Internal_Sym
*sym
;
5547 if (local_syms
== NULL
)
5550 = (Elf_Internal_Sym
*) symtab_hdr
->contents
;
5551 if (local_syms
== NULL
)
5553 = bfd_elf_get_elf_syms (input_bfd
, symtab_hdr
,
5554 symtab_hdr
->sh_info
, 0,
5556 if (local_syms
== NULL
)
5557 goto error_ret_free_internal
;
5560 sym
= local_syms
+ r_indx
;
5561 if (sym
->st_shndx
== SHN_UNDEF
)
5562 sym_sec
= bfd_und_section_ptr
;
5563 else if (sym
->st_shndx
== SHN_ABS
)
5564 sym_sec
= bfd_abs_section_ptr
;
5565 else if (sym
->st_shndx
== SHN_COMMON
)
5566 sym_sec
= bfd_com_section_ptr
;
5569 bfd_section_from_elf_index (input_bfd
, sym
->st_shndx
);
5572 /* This is an undefined symbol. It can never
5576 if (ELF_ST_TYPE (sym
->st_info
) != STT_SECTION
)
5577 sym_value
= sym
->st_value
;
5578 destination
= (sym_value
+ irela
->r_addend
5579 + sym_sec
->output_offset
5580 + sym_sec
->output_section
->vma
);
5581 st_type
= ELF_ST_TYPE (sym
->st_info
);
5583 ARM_GET_SYM_BRANCH_TYPE (sym
->st_target_internal
);
5585 = bfd_elf_string_from_elf_section (input_bfd
,
5586 symtab_hdr
->sh_link
,
5591 /* It's an external symbol. */
5592 while (hash
->root
.root
.type
== bfd_link_hash_indirect
5593 || hash
->root
.root
.type
== bfd_link_hash_warning
)
5594 hash
= ((struct elf32_arm_link_hash_entry
*)
5595 hash
->root
.root
.u
.i
.link
);
5597 if (hash
->root
.root
.type
== bfd_link_hash_defined
5598 || hash
->root
.root
.type
== bfd_link_hash_defweak
)
5600 sym_sec
= hash
->root
.root
.u
.def
.section
;
5601 sym_value
= hash
->root
.root
.u
.def
.value
;
5603 struct elf32_arm_link_hash_table
*globals
=
5604 elf32_arm_hash_table (info
);
5606 /* For a destination in a shared library,
5607 use the PLT stub as target address to
5608 decide whether a branch stub is
5611 && globals
->root
.splt
!= NULL
5613 && hash
->root
.plt
.offset
!= (bfd_vma
) -1)
5615 sym_sec
= globals
->root
.splt
;
5616 sym_value
= hash
->root
.plt
.offset
;
5617 if (sym_sec
->output_section
!= NULL
)
5618 destination
= (sym_value
5619 + sym_sec
->output_offset
5620 + sym_sec
->output_section
->vma
);
5622 else if (sym_sec
->output_section
!= NULL
)
5623 destination
= (sym_value
+ irela
->r_addend
5624 + sym_sec
->output_offset
5625 + sym_sec
->output_section
->vma
);
5627 else if ((hash
->root
.root
.type
== bfd_link_hash_undefined
)
5628 || (hash
->root
.root
.type
== bfd_link_hash_undefweak
))
5630 /* For a shared library, use the PLT stub as
5631 target address to decide whether a long
5632 branch stub is needed.
5633 For absolute code, they cannot be handled. */
5634 struct elf32_arm_link_hash_table
*globals
=
5635 elf32_arm_hash_table (info
);
5638 && globals
->root
.splt
!= NULL
5640 && hash
->root
.plt
.offset
!= (bfd_vma
) -1)
5642 sym_sec
= globals
->root
.splt
;
5643 sym_value
= hash
->root
.plt
.offset
;
5644 if (sym_sec
->output_section
!= NULL
)
5645 destination
= (sym_value
5646 + sym_sec
->output_offset
5647 + sym_sec
->output_section
->vma
);
5654 bfd_set_error (bfd_error_bad_value
);
5655 goto error_ret_free_internal
;
5657 st_type
= hash
->root
.type
;
5659 ARM_GET_SYM_BRANCH_TYPE (hash
->root
.target_internal
);
5660 sym_name
= hash
->root
.root
.root
.string
;
5665 bfd_boolean new_stub
;
5667 /* Determine what (if any) linker stub is needed. */
5668 stub_type
= arm_type_of_stub (info
, section
, irela
,
5669 st_type
, &branch_type
,
5670 hash
, destination
, sym_sec
,
5671 input_bfd
, sym_name
);
5672 if (stub_type
== arm_stub_none
)
5675 /* We've either created a stub for this reloc already,
5676 or we are about to. */
5678 elf32_arm_create_stub (htab
, stub_type
, section
, irela
,
5680 (char *) sym_name
, sym_value
,
5681 branch_type
, &new_stub
);
5684 goto error_ret_free_internal
;
5688 stub_changed
= TRUE
;
5692 /* Look for relocations which might trigger Cortex-A8
5694 if (htab
->fix_cortex_a8
5695 && (r_type
== (unsigned int) R_ARM_THM_JUMP24
5696 || r_type
== (unsigned int) R_ARM_THM_JUMP19
5697 || r_type
== (unsigned int) R_ARM_THM_CALL
5698 || r_type
== (unsigned int) R_ARM_THM_XPC22
))
5700 bfd_vma from
= section
->output_section
->vma
5701 + section
->output_offset
5704 if ((from
& 0xfff) == 0xffe)
5706 /* Found a candidate. Note we haven't checked the
5707 destination is within 4K here: if we do so (and
5708 don't create an entry in a8_relocs) we can't tell
5709 that a branch should have been relocated when
5711 if (num_a8_relocs
== a8_reloc_table_size
)
5713 a8_reloc_table_size
*= 2;
5714 a8_relocs
= (struct a8_erratum_reloc
*)
5715 bfd_realloc (a8_relocs
,
5716 sizeof (struct a8_erratum_reloc
)
5717 * a8_reloc_table_size
);
5720 a8_relocs
[num_a8_relocs
].from
= from
;
5721 a8_relocs
[num_a8_relocs
].destination
= destination
;
5722 a8_relocs
[num_a8_relocs
].r_type
= r_type
;
5723 a8_relocs
[num_a8_relocs
].branch_type
= branch_type
;
5724 a8_relocs
[num_a8_relocs
].sym_name
= sym_name
;
5725 a8_relocs
[num_a8_relocs
].non_a8_stub
= created_stub
;
5726 a8_relocs
[num_a8_relocs
].hash
= hash
;
5733 /* We're done with the internal relocs, free them. */
5734 if (elf_section_data (section
)->relocs
== NULL
)
5735 free (internal_relocs
);
5738 if (htab
->fix_cortex_a8
)
5740 /* Sort relocs which might apply to Cortex-A8 erratum. */
5741 qsort (a8_relocs
, num_a8_relocs
,
5742 sizeof (struct a8_erratum_reloc
),
5745 /* Scan for branches which might trigger Cortex-A8 erratum. */
5746 if (cortex_a8_erratum_scan (input_bfd
, info
, &a8_fixes
,
5747 &num_a8_fixes
, &a8_fix_table_size
,
5748 a8_relocs
, num_a8_relocs
,
5749 prev_num_a8_fixes
, &stub_changed
)
5751 goto error_ret_free_local
;
5754 if (local_syms
!= NULL
5755 && symtab_hdr
->contents
!= (unsigned char *) local_syms
)
5757 if (!info
->keep_memory
)
5760 symtab_hdr
->contents
= (unsigned char *) local_syms
;
5764 if (prev_num_a8_fixes
!= num_a8_fixes
)
5765 stub_changed
= TRUE
;
5770 /* OK, we've added some stubs. Find out the new size of the
5772 for (stub_sec
= htab
->stub_bfd
->sections
;
5774 stub_sec
= stub_sec
->next
)
5776 /* Ignore non-stub sections. */
5777 if (!strstr (stub_sec
->name
, STUB_SUFFIX
))
5783 bfd_hash_traverse (&htab
->stub_hash_table
, arm_size_one_stub
, htab
);
5785 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
5786 if (htab
->fix_cortex_a8
)
5787 for (i
= 0; i
< num_a8_fixes
; i
++)
5789 stub_sec
= elf32_arm_create_or_find_stub_sec (NULL
,
5790 a8_fixes
[i
].section
, htab
, a8_fixes
[i
].stub_type
);
5792 if (stub_sec
== NULL
)
5796 += find_stub_size_and_template (a8_fixes
[i
].stub_type
, NULL
,
5801 /* Ask the linker to do its stuff. */
5802 (*htab
->layout_sections_again
) ();
5805 /* Add stubs for Cortex-A8 erratum fixes now. */
5806 if (htab
->fix_cortex_a8
)
5808 for (i
= 0; i
< num_a8_fixes
; i
++)
5810 struct elf32_arm_stub_hash_entry
*stub_entry
;
5811 char *stub_name
= a8_fixes
[i
].stub_name
;
5812 asection
*section
= a8_fixes
[i
].section
;
5813 unsigned int section_id
= a8_fixes
[i
].section
->id
;
5814 asection
*link_sec
= htab
->stub_group
[section_id
].link_sec
;
5815 asection
*stub_sec
= htab
->stub_group
[section_id
].stub_sec
;
5816 const insn_sequence
*template_sequence
;
5817 int template_size
, size
= 0;
5819 stub_entry
= arm_stub_hash_lookup (&htab
->stub_hash_table
, stub_name
,
5821 if (stub_entry
== NULL
)
5823 (*_bfd_error_handler
) (_("%s: cannot create stub entry %s"),
5829 stub_entry
->stub_sec
= stub_sec
;
5830 stub_entry
->stub_offset
= 0;
5831 stub_entry
->id_sec
= link_sec
;
5832 stub_entry
->stub_type
= a8_fixes
[i
].stub_type
;
5833 stub_entry
->source_value
= a8_fixes
[i
].offset
;
5834 stub_entry
->target_section
= a8_fixes
[i
].section
;
5835 stub_entry
->target_value
= a8_fixes
[i
].target_offset
;
5836 stub_entry
->orig_insn
= a8_fixes
[i
].orig_insn
;
5837 stub_entry
->branch_type
= a8_fixes
[i
].branch_type
;
5839 size
= find_stub_size_and_template (a8_fixes
[i
].stub_type
,
5843 stub_entry
->stub_size
= size
;
5844 stub_entry
->stub_template
= template_sequence
;
5845 stub_entry
->stub_template_size
= template_size
;
5848 /* Stash the Cortex-A8 erratum fix array for use later in
5849 elf32_arm_write_section(). */
5850 htab
->a8_erratum_fixes
= a8_fixes
;
5851 htab
->num_a8_erratum_fixes
= num_a8_fixes
;
5855 htab
->a8_erratum_fixes
= NULL
;
5856 htab
->num_a8_erratum_fixes
= 0;
5861 /* Build all the stubs associated with the current output file. The
5862 stubs are kept in a hash table attached to the main linker hash
5863 table. We also set up the .plt entries for statically linked PIC
5864 functions here. This function is called via arm_elf_finish in the
5868 elf32_arm_build_stubs (struct bfd_link_info
*info
)
5871 struct bfd_hash_table
*table
;
5872 struct elf32_arm_link_hash_table
*htab
;
5874 htab
= elf32_arm_hash_table (info
);
5878 for (stub_sec
= htab
->stub_bfd
->sections
;
5880 stub_sec
= stub_sec
->next
)
5884 /* Ignore non-stub sections. */
5885 if (!strstr (stub_sec
->name
, STUB_SUFFIX
))
5888 /* Allocate memory to hold the linker stubs. */
5889 size
= stub_sec
->size
;
5890 stub_sec
->contents
= (unsigned char *) bfd_zalloc (htab
->stub_bfd
, size
);
5891 if (stub_sec
->contents
== NULL
&& size
!= 0)
5896 /* Build the stubs as directed by the stub hash table. */
5897 table
= &htab
->stub_hash_table
;
5898 bfd_hash_traverse (table
, arm_build_one_stub
, info
);
5899 if (htab
->fix_cortex_a8
)
5901 /* Place the cortex a8 stubs last. */
5902 htab
->fix_cortex_a8
= -1;
5903 bfd_hash_traverse (table
, arm_build_one_stub
, info
);
5909 /* Locate the Thumb encoded calling stub for NAME. */
5911 static struct elf_link_hash_entry
*
5912 find_thumb_glue (struct bfd_link_info
*link_info
,
5914 char **error_message
)
5917 struct elf_link_hash_entry
*hash
;
5918 struct elf32_arm_link_hash_table
*hash_table
;
5920 /* We need a pointer to the armelf specific hash table. */
5921 hash_table
= elf32_arm_hash_table (link_info
);
5922 if (hash_table
== NULL
)
5925 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen (name
)
5926 + strlen (THUMB2ARM_GLUE_ENTRY_NAME
) + 1);
5928 BFD_ASSERT (tmp_name
);
5930 sprintf (tmp_name
, THUMB2ARM_GLUE_ENTRY_NAME
, name
);
5932 hash
= elf_link_hash_lookup
5933 (&(hash_table
)->root
, tmp_name
, FALSE
, FALSE
, TRUE
);
5936 && asprintf (error_message
, _("unable to find THUMB glue '%s' for '%s'"),
5937 tmp_name
, name
) == -1)
5938 *error_message
= (char *) bfd_errmsg (bfd_error_system_call
);
5945 /* Locate the ARM encoded calling stub for NAME. */
5947 static struct elf_link_hash_entry
*
5948 find_arm_glue (struct bfd_link_info
*link_info
,
5950 char **error_message
)
5953 struct elf_link_hash_entry
*myh
;
5954 struct elf32_arm_link_hash_table
*hash_table
;
5956 /* We need a pointer to the elfarm specific hash table. */
5957 hash_table
= elf32_arm_hash_table (link_info
);
5958 if (hash_table
== NULL
)
5961 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen (name
)
5962 + strlen (ARM2THUMB_GLUE_ENTRY_NAME
) + 1);
5964 BFD_ASSERT (tmp_name
);
5966 sprintf (tmp_name
, ARM2THUMB_GLUE_ENTRY_NAME
, name
);
5968 myh
= elf_link_hash_lookup
5969 (&(hash_table
)->root
, tmp_name
, FALSE
, FALSE
, TRUE
);
5972 && asprintf (error_message
, _("unable to find ARM glue '%s' for '%s'"),
5973 tmp_name
, name
) == -1)
5974 *error_message
= (char *) bfd_errmsg (bfd_error_system_call
);
5981 /* ARM->Thumb glue (static images):
5985 ldr r12, __func_addr
5988 .word func @ behave as if you saw a ARM_32 reloc.
5995 .word func @ behave as if you saw a ARM_32 reloc.
5997 (relocatable images)
6000 ldr r12, __func_offset
6006 #define ARM2THUMB_STATIC_GLUE_SIZE 12
6007 static const insn32 a2t1_ldr_insn
= 0xe59fc000;
6008 static const insn32 a2t2_bx_r12_insn
= 0xe12fff1c;
6009 static const insn32 a2t3_func_addr_insn
= 0x00000001;
6011 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
6012 static const insn32 a2t1v5_ldr_insn
= 0xe51ff004;
6013 static const insn32 a2t2v5_func_addr_insn
= 0x00000001;
6015 #define ARM2THUMB_PIC_GLUE_SIZE 16
6016 static const insn32 a2t1p_ldr_insn
= 0xe59fc004;
6017 static const insn32 a2t2p_add_pc_insn
= 0xe08cc00f;
6018 static const insn32 a2t3p_bx_r12_insn
= 0xe12fff1c;
6020 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
6024 __func_from_thumb: __func_from_thumb:
6026 nop ldr r6, __func_addr
6036 #define THUMB2ARM_GLUE_SIZE 8
6037 static const insn16 t2a1_bx_pc_insn
= 0x4778;
6038 static const insn16 t2a2_noop_insn
= 0x46c0;
6039 static const insn32 t2a3_b_insn
= 0xea000000;
6041 #define VFP11_ERRATUM_VENEER_SIZE 8
6042 #define STM32L4XX_ERRATUM_LDM_VENEER_SIZE 16
6043 #define STM32L4XX_ERRATUM_VLDM_VENEER_SIZE 24
6045 #define ARM_BX_VENEER_SIZE 12
6046 static const insn32 armbx1_tst_insn
= 0xe3100001;
6047 static const insn32 armbx2_moveq_insn
= 0x01a0f000;
6048 static const insn32 armbx3_bx_insn
= 0xe12fff10;
6050 #ifndef ELFARM_NABI_C_INCLUDED
6052 arm_allocate_glue_section_space (bfd
* abfd
, bfd_size_type size
, const char * name
)
6055 bfd_byte
* contents
;
6059 /* Do not include empty glue sections in the output. */
6062 s
= bfd_get_linker_section (abfd
, name
);
6064 s
->flags
|= SEC_EXCLUDE
;
6069 BFD_ASSERT (abfd
!= NULL
);
6071 s
= bfd_get_linker_section (abfd
, name
);
6072 BFD_ASSERT (s
!= NULL
);
6074 contents
= (bfd_byte
*) bfd_alloc (abfd
, size
);
6076 BFD_ASSERT (s
->size
== size
);
6077 s
->contents
= contents
;
6081 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info
* info
)
6083 struct elf32_arm_link_hash_table
* globals
;
6085 globals
= elf32_arm_hash_table (info
);
6086 BFD_ASSERT (globals
!= NULL
);
6088 arm_allocate_glue_section_space (globals
->bfd_of_glue_owner
,
6089 globals
->arm_glue_size
,
6090 ARM2THUMB_GLUE_SECTION_NAME
);
6092 arm_allocate_glue_section_space (globals
->bfd_of_glue_owner
,
6093 globals
->thumb_glue_size
,
6094 THUMB2ARM_GLUE_SECTION_NAME
);
6096 arm_allocate_glue_section_space (globals
->bfd_of_glue_owner
,
6097 globals
->vfp11_erratum_glue_size
,
6098 VFP11_ERRATUM_VENEER_SECTION_NAME
);
6100 arm_allocate_glue_section_space (globals
->bfd_of_glue_owner
,
6101 globals
->stm32l4xx_erratum_glue_size
,
6102 STM32L4XX_ERRATUM_VENEER_SECTION_NAME
);
6104 arm_allocate_glue_section_space (globals
->bfd_of_glue_owner
,
6105 globals
->bx_glue_size
,
6106 ARM_BX_GLUE_SECTION_NAME
);
6111 /* Allocate space and symbols for calling a Thumb function from Arm mode.
6112 returns the symbol identifying the stub. */
6114 static struct elf_link_hash_entry
*
6115 record_arm_to_thumb_glue (struct bfd_link_info
* link_info
,
6116 struct elf_link_hash_entry
* h
)
6118 const char * name
= h
->root
.root
.string
;
6121 struct elf_link_hash_entry
* myh
;
6122 struct bfd_link_hash_entry
* bh
;
6123 struct elf32_arm_link_hash_table
* globals
;
6127 globals
= elf32_arm_hash_table (link_info
);
6128 BFD_ASSERT (globals
!= NULL
);
6129 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
6131 s
= bfd_get_linker_section
6132 (globals
->bfd_of_glue_owner
, ARM2THUMB_GLUE_SECTION_NAME
);
6134 BFD_ASSERT (s
!= NULL
);
6136 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen (name
)
6137 + strlen (ARM2THUMB_GLUE_ENTRY_NAME
) + 1);
6139 BFD_ASSERT (tmp_name
);
6141 sprintf (tmp_name
, ARM2THUMB_GLUE_ENTRY_NAME
, name
);
6143 myh
= elf_link_hash_lookup
6144 (&(globals
)->root
, tmp_name
, FALSE
, FALSE
, TRUE
);
6148 /* We've already seen this guy. */
6153 /* The only trick here is using hash_table->arm_glue_size as the value.
6154 Even though the section isn't allocated yet, this is where we will be
6155 putting it. The +1 on the value marks that the stub has not been
6156 output yet - not that it is a Thumb function. */
6158 val
= globals
->arm_glue_size
+ 1;
6159 _bfd_generic_link_add_one_symbol (link_info
, globals
->bfd_of_glue_owner
,
6160 tmp_name
, BSF_GLOBAL
, s
, val
,
6161 NULL
, TRUE
, FALSE
, &bh
);
6163 myh
= (struct elf_link_hash_entry
*) bh
;
6164 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
6165 myh
->forced_local
= 1;
6169 if (bfd_link_pic (link_info
)
6170 || globals
->root
.is_relocatable_executable
6171 || globals
->pic_veneer
)
6172 size
= ARM2THUMB_PIC_GLUE_SIZE
;
6173 else if (globals
->use_blx
)
6174 size
= ARM2THUMB_V5_STATIC_GLUE_SIZE
;
6176 size
= ARM2THUMB_STATIC_GLUE_SIZE
;
6179 globals
->arm_glue_size
+= size
;
6184 /* Allocate space for ARMv4 BX veneers. */
6187 record_arm_bx_glue (struct bfd_link_info
* link_info
, int reg
)
6190 struct elf32_arm_link_hash_table
*globals
;
6192 struct elf_link_hash_entry
*myh
;
6193 struct bfd_link_hash_entry
*bh
;
6196 /* BX PC does not need a veneer. */
6200 globals
= elf32_arm_hash_table (link_info
);
6201 BFD_ASSERT (globals
!= NULL
);
6202 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
6204 /* Check if this veneer has already been allocated. */
6205 if (globals
->bx_glue_offset
[reg
])
6208 s
= bfd_get_linker_section
6209 (globals
->bfd_of_glue_owner
, ARM_BX_GLUE_SECTION_NAME
);
6211 BFD_ASSERT (s
!= NULL
);
6213 /* Add symbol for veneer. */
6215 bfd_malloc ((bfd_size_type
) strlen (ARM_BX_GLUE_ENTRY_NAME
) + 1);
6217 BFD_ASSERT (tmp_name
);
6219 sprintf (tmp_name
, ARM_BX_GLUE_ENTRY_NAME
, reg
);
6221 myh
= elf_link_hash_lookup
6222 (&(globals
)->root
, tmp_name
, FALSE
, FALSE
, FALSE
);
6224 BFD_ASSERT (myh
== NULL
);
6227 val
= globals
->bx_glue_size
;
6228 _bfd_generic_link_add_one_symbol (link_info
, globals
->bfd_of_glue_owner
,
6229 tmp_name
, BSF_FUNCTION
| BSF_LOCAL
, s
, val
,
6230 NULL
, TRUE
, FALSE
, &bh
);
6232 myh
= (struct elf_link_hash_entry
*) bh
;
6233 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
6234 myh
->forced_local
= 1;
6236 s
->size
+= ARM_BX_VENEER_SIZE
;
6237 globals
->bx_glue_offset
[reg
] = globals
->bx_glue_size
| 2;
6238 globals
->bx_glue_size
+= ARM_BX_VENEER_SIZE
;
6242 /* Add an entry to the code/data map for section SEC. */
6245 elf32_arm_section_map_add (asection
*sec
, char type
, bfd_vma vma
)
6247 struct _arm_elf_section_data
*sec_data
= elf32_arm_section_data (sec
);
6248 unsigned int newidx
;
6250 if (sec_data
->map
== NULL
)
6252 sec_data
->map
= (elf32_arm_section_map
*)
6253 bfd_malloc (sizeof (elf32_arm_section_map
));
6254 sec_data
->mapcount
= 0;
6255 sec_data
->mapsize
= 1;
6258 newidx
= sec_data
->mapcount
++;
6260 if (sec_data
->mapcount
> sec_data
->mapsize
)
6262 sec_data
->mapsize
*= 2;
6263 sec_data
->map
= (elf32_arm_section_map
*)
6264 bfd_realloc_or_free (sec_data
->map
, sec_data
->mapsize
6265 * sizeof (elf32_arm_section_map
));
6270 sec_data
->map
[newidx
].vma
= vma
;
6271 sec_data
->map
[newidx
].type
= type
;
6276 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
6277 veneers are handled for now. */
6280 record_vfp11_erratum_veneer (struct bfd_link_info
*link_info
,
6281 elf32_vfp11_erratum_list
*branch
,
6283 asection
*branch_sec
,
6284 unsigned int offset
)
6287 struct elf32_arm_link_hash_table
*hash_table
;
6289 struct elf_link_hash_entry
*myh
;
6290 struct bfd_link_hash_entry
*bh
;
6292 struct _arm_elf_section_data
*sec_data
;
6293 elf32_vfp11_erratum_list
*newerr
;
6295 hash_table
= elf32_arm_hash_table (link_info
);
6296 BFD_ASSERT (hash_table
!= NULL
);
6297 BFD_ASSERT (hash_table
->bfd_of_glue_owner
!= NULL
);
6299 s
= bfd_get_linker_section
6300 (hash_table
->bfd_of_glue_owner
, VFP11_ERRATUM_VENEER_SECTION_NAME
);
6302 sec_data
= elf32_arm_section_data (s
);
6304 BFD_ASSERT (s
!= NULL
);
6306 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen
6307 (VFP11_ERRATUM_VENEER_ENTRY_NAME
) + 10);
6309 BFD_ASSERT (tmp_name
);
6311 sprintf (tmp_name
, VFP11_ERRATUM_VENEER_ENTRY_NAME
,
6312 hash_table
->num_vfp11_fixes
);
6314 myh
= elf_link_hash_lookup
6315 (&(hash_table
)->root
, tmp_name
, FALSE
, FALSE
, FALSE
);
6317 BFD_ASSERT (myh
== NULL
);
6320 val
= hash_table
->vfp11_erratum_glue_size
;
6321 _bfd_generic_link_add_one_symbol (link_info
, hash_table
->bfd_of_glue_owner
,
6322 tmp_name
, BSF_FUNCTION
| BSF_LOCAL
, s
, val
,
6323 NULL
, TRUE
, FALSE
, &bh
);
6325 myh
= (struct elf_link_hash_entry
*) bh
;
6326 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
6327 myh
->forced_local
= 1;
6329 /* Link veneer back to calling location. */
6330 sec_data
->erratumcount
+= 1;
6331 newerr
= (elf32_vfp11_erratum_list
*)
6332 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list
));
6334 newerr
->type
= VFP11_ERRATUM_ARM_VENEER
;
6336 newerr
->u
.v
.branch
= branch
;
6337 newerr
->u
.v
.id
= hash_table
->num_vfp11_fixes
;
6338 branch
->u
.b
.veneer
= newerr
;
6340 newerr
->next
= sec_data
->erratumlist
;
6341 sec_data
->erratumlist
= newerr
;
6343 /* A symbol for the return from the veneer. */
6344 sprintf (tmp_name
, VFP11_ERRATUM_VENEER_ENTRY_NAME
"_r",
6345 hash_table
->num_vfp11_fixes
);
6347 myh
= elf_link_hash_lookup
6348 (&(hash_table
)->root
, tmp_name
, FALSE
, FALSE
, FALSE
);
6355 _bfd_generic_link_add_one_symbol (link_info
, branch_bfd
, tmp_name
, BSF_LOCAL
,
6356 branch_sec
, val
, NULL
, TRUE
, FALSE
, &bh
);
6358 myh
= (struct elf_link_hash_entry
*) bh
;
6359 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
6360 myh
->forced_local
= 1;
6364 /* Generate a mapping symbol for the veneer section, and explicitly add an
6365 entry for that symbol to the code/data map for the section. */
6366 if (hash_table
->vfp11_erratum_glue_size
== 0)
6369 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
6370 ever requires this erratum fix. */
6371 _bfd_generic_link_add_one_symbol (link_info
,
6372 hash_table
->bfd_of_glue_owner
, "$a",
6373 BSF_LOCAL
, s
, 0, NULL
,
6376 myh
= (struct elf_link_hash_entry
*) bh
;
6377 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_NOTYPE
);
6378 myh
->forced_local
= 1;
6380 /* The elf32_arm_init_maps function only cares about symbols from input
6381 BFDs. We must make a note of this generated mapping symbol
6382 ourselves so that code byteswapping works properly in
6383 elf32_arm_write_section. */
6384 elf32_arm_section_map_add (s
, 'a', 0);
6387 s
->size
+= VFP11_ERRATUM_VENEER_SIZE
;
6388 hash_table
->vfp11_erratum_glue_size
+= VFP11_ERRATUM_VENEER_SIZE
;
6389 hash_table
->num_vfp11_fixes
++;
6391 /* The offset of the veneer. */
6395 /* Record information about a STM32L4XX STM erratum veneer. Only THUMB-mode
6396 veneers need to be handled because used only in Cortex-M. */
6399 record_stm32l4xx_erratum_veneer (struct bfd_link_info
*link_info
,
6400 elf32_stm32l4xx_erratum_list
*branch
,
6402 asection
*branch_sec
,
6403 unsigned int offset
,
6404 bfd_size_type veneer_size
)
6407 struct elf32_arm_link_hash_table
*hash_table
;
6409 struct elf_link_hash_entry
*myh
;
6410 struct bfd_link_hash_entry
*bh
;
6412 struct _arm_elf_section_data
*sec_data
;
6413 elf32_stm32l4xx_erratum_list
*newerr
;
6415 hash_table
= elf32_arm_hash_table (link_info
);
6416 BFD_ASSERT (hash_table
!= NULL
);
6417 BFD_ASSERT (hash_table
->bfd_of_glue_owner
!= NULL
);
6419 s
= bfd_get_linker_section
6420 (hash_table
->bfd_of_glue_owner
, STM32L4XX_ERRATUM_VENEER_SECTION_NAME
);
6422 BFD_ASSERT (s
!= NULL
);
6424 sec_data
= elf32_arm_section_data (s
);
6426 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen
6427 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME
) + 10);
6429 BFD_ASSERT (tmp_name
);
6431 sprintf (tmp_name
, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME
,
6432 hash_table
->num_stm32l4xx_fixes
);
6434 myh
= elf_link_hash_lookup
6435 (&(hash_table
)->root
, tmp_name
, FALSE
, FALSE
, FALSE
);
6437 BFD_ASSERT (myh
== NULL
);
6440 val
= hash_table
->stm32l4xx_erratum_glue_size
;
6441 _bfd_generic_link_add_one_symbol (link_info
, hash_table
->bfd_of_glue_owner
,
6442 tmp_name
, BSF_FUNCTION
| BSF_LOCAL
, s
, val
,
6443 NULL
, TRUE
, FALSE
, &bh
);
6445 myh
= (struct elf_link_hash_entry
*) bh
;
6446 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
6447 myh
->forced_local
= 1;
6449 /* Link veneer back to calling location. */
6450 sec_data
->stm32l4xx_erratumcount
+= 1;
6451 newerr
= (elf32_stm32l4xx_erratum_list
*)
6452 bfd_zmalloc (sizeof (elf32_stm32l4xx_erratum_list
));
6454 newerr
->type
= STM32L4XX_ERRATUM_VENEER
;
6456 newerr
->u
.v
.branch
= branch
;
6457 newerr
->u
.v
.id
= hash_table
->num_stm32l4xx_fixes
;
6458 branch
->u
.b
.veneer
= newerr
;
6460 newerr
->next
= sec_data
->stm32l4xx_erratumlist
;
6461 sec_data
->stm32l4xx_erratumlist
= newerr
;
6463 /* A symbol for the return from the veneer. */
6464 sprintf (tmp_name
, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME
"_r",
6465 hash_table
->num_stm32l4xx_fixes
);
6467 myh
= elf_link_hash_lookup
6468 (&(hash_table
)->root
, tmp_name
, FALSE
, FALSE
, FALSE
);
6475 _bfd_generic_link_add_one_symbol (link_info
, branch_bfd
, tmp_name
, BSF_LOCAL
,
6476 branch_sec
, val
, NULL
, TRUE
, FALSE
, &bh
);
6478 myh
= (struct elf_link_hash_entry
*) bh
;
6479 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
6480 myh
->forced_local
= 1;
6484 /* Generate a mapping symbol for the veneer section, and explicitly add an
6485 entry for that symbol to the code/data map for the section. */
6486 if (hash_table
->stm32l4xx_erratum_glue_size
== 0)
6489 /* Creates a THUMB symbol since there is no other choice. */
6490 _bfd_generic_link_add_one_symbol (link_info
,
6491 hash_table
->bfd_of_glue_owner
, "$t",
6492 BSF_LOCAL
, s
, 0, NULL
,
6495 myh
= (struct elf_link_hash_entry
*) bh
;
6496 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_NOTYPE
);
6497 myh
->forced_local
= 1;
6499 /* The elf32_arm_init_maps function only cares about symbols from input
6500 BFDs. We must make a note of this generated mapping symbol
6501 ourselves so that code byteswapping works properly in
6502 elf32_arm_write_section. */
6503 elf32_arm_section_map_add (s
, 't', 0);
6506 s
->size
+= veneer_size
;
6507 hash_table
->stm32l4xx_erratum_glue_size
+= veneer_size
;
6508 hash_table
->num_stm32l4xx_fixes
++;
6510 /* The offset of the veneer. */
6514 #define ARM_GLUE_SECTION_FLAGS \
6515 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
6516 | SEC_READONLY | SEC_LINKER_CREATED)
6518 /* Create a fake section for use by the ARM backend of the linker. */
6521 arm_make_glue_section (bfd
* abfd
, const char * name
)
6525 sec
= bfd_get_linker_section (abfd
, name
);
6530 sec
= bfd_make_section_anyway_with_flags (abfd
, name
, ARM_GLUE_SECTION_FLAGS
);
6533 || !bfd_set_section_alignment (abfd
, sec
, 2))
6536 /* Set the gc mark to prevent the section from being removed by garbage
6537 collection, despite the fact that no relocs refer to this section. */
6543 /* Set size of .plt entries. This function is called from the
6544 linker scripts in ld/emultempl/{armelf}.em. */
6547 bfd_elf32_arm_use_long_plt (void)
6549 elf32_arm_use_long_plt_entry
= TRUE
;
6552 /* Add the glue sections to ABFD. This function is called from the
6553 linker scripts in ld/emultempl/{armelf}.em. */
6556 bfd_elf32_arm_add_glue_sections_to_bfd (bfd
*abfd
,
6557 struct bfd_link_info
*info
)
6559 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (info
);
6560 bfd_boolean dostm32l4xx
= globals
6561 && globals
->stm32l4xx_fix
!= BFD_ARM_STM32L4XX_FIX_NONE
;
6562 bfd_boolean addglue
;
6564 /* If we are only performing a partial
6565 link do not bother adding the glue. */
6566 if (bfd_link_relocatable (info
))
6569 addglue
= arm_make_glue_section (abfd
, ARM2THUMB_GLUE_SECTION_NAME
)
6570 && arm_make_glue_section (abfd
, THUMB2ARM_GLUE_SECTION_NAME
)
6571 && arm_make_glue_section (abfd
, VFP11_ERRATUM_VENEER_SECTION_NAME
)
6572 && arm_make_glue_section (abfd
, ARM_BX_GLUE_SECTION_NAME
);
6578 && arm_make_glue_section (abfd
, STM32L4XX_ERRATUM_VENEER_SECTION_NAME
);
6581 /* Mark output sections of veneers needing a dedicated one with SEC_KEEP. This
6582 ensures they are not marked for deletion by
6583 strip_excluded_output_sections () when veneers are going to be created
6584 later. Not doing so would trigger assert on empty section size in
6585 lang_size_sections_1 (). */
6588 bfd_elf32_arm_keep_private_stub_output_sections (struct bfd_link_info
*info
)
6590 enum elf32_arm_stub_type stub_type
;
6592 /* If we are only performing a partial
6593 link do not bother adding the glue. */
6594 if (bfd_link_relocatable (info
))
6597 for (stub_type
= arm_stub_none
+ 1; stub_type
< max_stub_type
; stub_type
++)
6600 const char *out_sec_name
;
6602 if (!arm_dedicated_stub_output_section_required (stub_type
))
6605 out_sec_name
= arm_dedicated_stub_output_section_name (stub_type
);
6606 out_sec
= bfd_get_section_by_name (info
->output_bfd
, out_sec_name
);
6607 if (out_sec
!= NULL
)
6608 out_sec
->flags
|= SEC_KEEP
;
6612 /* Select a BFD to be used to hold the sections used by the glue code.
6613 This function is called from the linker scripts in ld/emultempl/
6617 bfd_elf32_arm_get_bfd_for_interworking (bfd
*abfd
, struct bfd_link_info
*info
)
6619 struct elf32_arm_link_hash_table
*globals
;
6621 /* If we are only performing a partial link
6622 do not bother getting a bfd to hold the glue. */
6623 if (bfd_link_relocatable (info
))
6626 /* Make sure we don't attach the glue sections to a dynamic object. */
6627 BFD_ASSERT (!(abfd
->flags
& DYNAMIC
));
6629 globals
= elf32_arm_hash_table (info
);
6630 BFD_ASSERT (globals
!= NULL
);
6632 if (globals
->bfd_of_glue_owner
!= NULL
)
6635 /* Save the bfd for later use. */
6636 globals
->bfd_of_glue_owner
= abfd
;
6642 check_use_blx (struct elf32_arm_link_hash_table
*globals
)
6646 cpu_arch
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
,
6649 if (globals
->fix_arm1176
)
6651 if (cpu_arch
== TAG_CPU_ARCH_V6T2
|| cpu_arch
> TAG_CPU_ARCH_V6K
)
6652 globals
->use_blx
= 1;
6656 if (cpu_arch
> TAG_CPU_ARCH_V4T
)
6657 globals
->use_blx
= 1;
6662 bfd_elf32_arm_process_before_allocation (bfd
*abfd
,
6663 struct bfd_link_info
*link_info
)
6665 Elf_Internal_Shdr
*symtab_hdr
;
6666 Elf_Internal_Rela
*internal_relocs
= NULL
;
6667 Elf_Internal_Rela
*irel
, *irelend
;
6668 bfd_byte
*contents
= NULL
;
6671 struct elf32_arm_link_hash_table
*globals
;
6673 /* If we are only performing a partial link do not bother
6674 to construct any glue. */
6675 if (bfd_link_relocatable (link_info
))
6678 /* Here we have a bfd that is to be included on the link. We have a
6679 hook to do reloc rummaging, before section sizes are nailed down. */
6680 globals
= elf32_arm_hash_table (link_info
);
6681 BFD_ASSERT (globals
!= NULL
);
6683 check_use_blx (globals
);
6685 if (globals
->byteswap_code
&& !bfd_big_endian (abfd
))
6687 _bfd_error_handler (_("%B: BE8 images only valid in big-endian mode."),
6692 /* PR 5398: If we have not decided to include any loadable sections in
6693 the output then we will not have a glue owner bfd. This is OK, it
6694 just means that there is nothing else for us to do here. */
6695 if (globals
->bfd_of_glue_owner
== NULL
)
6698 /* Rummage around all the relocs and map the glue vectors. */
6699 sec
= abfd
->sections
;
6704 for (; sec
!= NULL
; sec
= sec
->next
)
6706 if (sec
->reloc_count
== 0)
6709 if ((sec
->flags
& SEC_EXCLUDE
) != 0)
6712 symtab_hdr
= & elf_symtab_hdr (abfd
);
6714 /* Load the relocs. */
6716 = _bfd_elf_link_read_relocs (abfd
, sec
, NULL
, NULL
, FALSE
);
6718 if (internal_relocs
== NULL
)
6721 irelend
= internal_relocs
+ sec
->reloc_count
;
6722 for (irel
= internal_relocs
; irel
< irelend
; irel
++)
6725 unsigned long r_index
;
6727 struct elf_link_hash_entry
*h
;
6729 r_type
= ELF32_R_TYPE (irel
->r_info
);
6730 r_index
= ELF32_R_SYM (irel
->r_info
);
6732 /* These are the only relocation types we care about. */
6733 if ( r_type
!= R_ARM_PC24
6734 && (r_type
!= R_ARM_V4BX
|| globals
->fix_v4bx
< 2))
6737 /* Get the section contents if we haven't done so already. */
6738 if (contents
== NULL
)
6740 /* Get cached copy if it exists. */
6741 if (elf_section_data (sec
)->this_hdr
.contents
!= NULL
)
6742 contents
= elf_section_data (sec
)->this_hdr
.contents
;
6745 /* Go get them off disk. */
6746 if (! bfd_malloc_and_get_section (abfd
, sec
, &contents
))
6751 if (r_type
== R_ARM_V4BX
)
6755 reg
= bfd_get_32 (abfd
, contents
+ irel
->r_offset
) & 0xf;
6756 record_arm_bx_glue (link_info
, reg
);
6760 /* If the relocation is not against a symbol it cannot concern us. */
6763 /* We don't care about local symbols. */
6764 if (r_index
< symtab_hdr
->sh_info
)
6767 /* This is an external symbol. */
6768 r_index
-= symtab_hdr
->sh_info
;
6769 h
= (struct elf_link_hash_entry
*)
6770 elf_sym_hashes (abfd
)[r_index
];
6772 /* If the relocation is against a static symbol it must be within
6773 the current section and so cannot be a cross ARM/Thumb relocation. */
6777 /* If the call will go through a PLT entry then we do not need
6779 if (globals
->root
.splt
!= NULL
&& h
->plt
.offset
!= (bfd_vma
) -1)
6785 /* This one is a call from arm code. We need to look up
6786 the target of the call. If it is a thumb target, we
6788 if (ARM_GET_SYM_BRANCH_TYPE (h
->target_internal
)
6789 == ST_BRANCH_TO_THUMB
)
6790 record_arm_to_thumb_glue (link_info
, h
);
6798 if (contents
!= NULL
6799 && elf_section_data (sec
)->this_hdr
.contents
!= contents
)
6803 if (internal_relocs
!= NULL
6804 && elf_section_data (sec
)->relocs
!= internal_relocs
)
6805 free (internal_relocs
);
6806 internal_relocs
= NULL
;
6812 if (contents
!= NULL
6813 && elf_section_data (sec
)->this_hdr
.contents
!= contents
)
6815 if (internal_relocs
!= NULL
6816 && elf_section_data (sec
)->relocs
!= internal_relocs
)
6817 free (internal_relocs
);
6824 /* Initialise maps of ARM/Thumb/data for input BFDs. */
6827 bfd_elf32_arm_init_maps (bfd
*abfd
)
6829 Elf_Internal_Sym
*isymbuf
;
6830 Elf_Internal_Shdr
*hdr
;
6831 unsigned int i
, localsyms
;
6833 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
6834 if (! is_arm_elf (abfd
))
6837 if ((abfd
->flags
& DYNAMIC
) != 0)
6840 hdr
= & elf_symtab_hdr (abfd
);
6841 localsyms
= hdr
->sh_info
;
6843 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
6844 should contain the number of local symbols, which should come before any
6845 global symbols. Mapping symbols are always local. */
6846 isymbuf
= bfd_elf_get_elf_syms (abfd
, hdr
, localsyms
, 0, NULL
, NULL
,
6849 /* No internal symbols read? Skip this BFD. */
6850 if (isymbuf
== NULL
)
6853 for (i
= 0; i
< localsyms
; i
++)
6855 Elf_Internal_Sym
*isym
= &isymbuf
[i
];
6856 asection
*sec
= bfd_section_from_elf_index (abfd
, isym
->st_shndx
);
6860 && ELF_ST_BIND (isym
->st_info
) == STB_LOCAL
)
6862 name
= bfd_elf_string_from_elf_section (abfd
,
6863 hdr
->sh_link
, isym
->st_name
);
6865 if (bfd_is_arm_special_symbol_name (name
,
6866 BFD_ARM_SPECIAL_SYM_TYPE_MAP
))
6867 elf32_arm_section_map_add (sec
, name
[1], isym
->st_value
);
6873 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
6874 say what they wanted. */
6877 bfd_elf32_arm_set_cortex_a8_fix (bfd
*obfd
, struct bfd_link_info
*link_info
)
6879 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (link_info
);
6880 obj_attribute
*out_attr
= elf_known_obj_attributes_proc (obfd
);
6882 if (globals
== NULL
)
6885 if (globals
->fix_cortex_a8
== -1)
6887 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
6888 if (out_attr
[Tag_CPU_arch
].i
== TAG_CPU_ARCH_V7
6889 && (out_attr
[Tag_CPU_arch_profile
].i
== 'A'
6890 || out_attr
[Tag_CPU_arch_profile
].i
== 0))
6891 globals
->fix_cortex_a8
= 1;
6893 globals
->fix_cortex_a8
= 0;
6899 bfd_elf32_arm_set_vfp11_fix (bfd
*obfd
, struct bfd_link_info
*link_info
)
6901 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (link_info
);
6902 obj_attribute
*out_attr
= elf_known_obj_attributes_proc (obfd
);
6904 if (globals
== NULL
)
6906 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
6907 if (out_attr
[Tag_CPU_arch
].i
>= TAG_CPU_ARCH_V7
)
6909 switch (globals
->vfp11_fix
)
6911 case BFD_ARM_VFP11_FIX_DEFAULT
:
6912 case BFD_ARM_VFP11_FIX_NONE
:
6913 globals
->vfp11_fix
= BFD_ARM_VFP11_FIX_NONE
;
6917 /* Give a warning, but do as the user requests anyway. */
6918 (*_bfd_error_handler
) (_("%B: warning: selected VFP11 erratum "
6919 "workaround is not necessary for target architecture"), obfd
);
6922 else if (globals
->vfp11_fix
== BFD_ARM_VFP11_FIX_DEFAULT
)
6923 /* For earlier architectures, we might need the workaround, but do not
6924 enable it by default. If users is running with broken hardware, they
6925 must enable the erratum fix explicitly. */
6926 globals
->vfp11_fix
= BFD_ARM_VFP11_FIX_NONE
;
6930 bfd_elf32_arm_set_stm32l4xx_fix (bfd
*obfd
, struct bfd_link_info
*link_info
)
6932 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (link_info
);
6933 obj_attribute
*out_attr
= elf_known_obj_attributes_proc (obfd
);
6935 if (globals
== NULL
)
6938 /* We assume only Cortex-M4 may require the fix. */
6939 if (out_attr
[Tag_CPU_arch
].i
!= TAG_CPU_ARCH_V7E_M
6940 || out_attr
[Tag_CPU_arch_profile
].i
!= 'M')
6942 if (globals
->stm32l4xx_fix
!= BFD_ARM_STM32L4XX_FIX_NONE
)
6943 /* Give a warning, but do as the user requests anyway. */
6944 (*_bfd_error_handler
)
6945 (_("%B: warning: selected STM32L4XX erratum "
6946 "workaround is not necessary for target architecture"), obfd
);
6950 enum bfd_arm_vfp11_pipe
6958 /* Return a VFP register number. This is encoded as RX:X for single-precision
6959 registers, or X:RX for double-precision registers, where RX is the group of
6960 four bits in the instruction encoding and X is the single extension bit.
6961 RX and X fields are specified using their lowest (starting) bit. The return
6964 0...31: single-precision registers s0...s31
6965 32...63: double-precision registers d0...d31.
6967 Although X should be zero for VFP11 (encoding d0...d15 only), we might
6968 encounter VFP3 instructions, so we allow the full range for DP registers. */
6971 bfd_arm_vfp11_regno (unsigned int insn
, bfd_boolean is_double
, unsigned int rx
,
6975 return (((insn
>> rx
) & 0xf) | (((insn
>> x
) & 1) << 4)) + 32;
6977 return (((insn
>> rx
) & 0xf) << 1) | ((insn
>> x
) & 1);
6980 /* Set bits in *WMASK according to a register number REG as encoded by
6981 bfd_arm_vfp11_regno(). Ignore d16-d31. */
6984 bfd_arm_vfp11_write_mask (unsigned int *wmask
, unsigned int reg
)
6989 *wmask
|= 3 << ((reg
- 32) * 2);
6992 /* Return TRUE if WMASK overwrites anything in REGS. */
6995 bfd_arm_vfp11_antidependency (unsigned int wmask
, int *regs
, int numregs
)
6999 for (i
= 0; i
< numregs
; i
++)
7001 unsigned int reg
= regs
[i
];
7003 if (reg
< 32 && (wmask
& (1 << reg
)) != 0)
7011 if ((wmask
& (3 << (reg
* 2))) != 0)
7018 /* In this function, we're interested in two things: finding input registers
7019 for VFP data-processing instructions, and finding the set of registers which
7020 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
7021 hold the written set, so FLDM etc. are easy to deal with (we're only
7022 interested in 32 SP registers or 16 dp registers, due to the VFP version
7023 implemented by the chip in question). DP registers are marked by setting
7024 both SP registers in the write mask). */
7026 static enum bfd_arm_vfp11_pipe
7027 bfd_arm_vfp11_insn_decode (unsigned int insn
, unsigned int *destmask
, int *regs
,
7030 enum bfd_arm_vfp11_pipe vpipe
= VFP11_BAD
;
7031 bfd_boolean is_double
= ((insn
& 0xf00) == 0xb00) ? 1 : 0;
7033 if ((insn
& 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
7036 unsigned int fd
= bfd_arm_vfp11_regno (insn
, is_double
, 12, 22);
7037 unsigned int fm
= bfd_arm_vfp11_regno (insn
, is_double
, 0, 5);
7039 pqrs
= ((insn
& 0x00800000) >> 20)
7040 | ((insn
& 0x00300000) >> 19)
7041 | ((insn
& 0x00000040) >> 6);
7045 case 0: /* fmac[sd]. */
7046 case 1: /* fnmac[sd]. */
7047 case 2: /* fmsc[sd]. */
7048 case 3: /* fnmsc[sd]. */
7050 bfd_arm_vfp11_write_mask (destmask
, fd
);
7052 regs
[1] = bfd_arm_vfp11_regno (insn
, is_double
, 16, 7); /* Fn. */
7057 case 4: /* fmul[sd]. */
7058 case 5: /* fnmul[sd]. */
7059 case 6: /* fadd[sd]. */
7060 case 7: /* fsub[sd]. */
7064 case 8: /* fdiv[sd]. */
7067 bfd_arm_vfp11_write_mask (destmask
, fd
);
7068 regs
[0] = bfd_arm_vfp11_regno (insn
, is_double
, 16, 7); /* Fn. */
7073 case 15: /* extended opcode. */
7075 unsigned int extn
= ((insn
>> 15) & 0x1e)
7076 | ((insn
>> 7) & 1);
7080 case 0: /* fcpy[sd]. */
7081 case 1: /* fabs[sd]. */
7082 case 2: /* fneg[sd]. */
7083 case 8: /* fcmp[sd]. */
7084 case 9: /* fcmpe[sd]. */
7085 case 10: /* fcmpz[sd]. */
7086 case 11: /* fcmpez[sd]. */
7087 case 16: /* fuito[sd]. */
7088 case 17: /* fsito[sd]. */
7089 case 24: /* ftoui[sd]. */
7090 case 25: /* ftouiz[sd]. */
7091 case 26: /* ftosi[sd]. */
7092 case 27: /* ftosiz[sd]. */
7093 /* These instructions will not bounce due to underflow. */
7098 case 3: /* fsqrt[sd]. */
7099 /* fsqrt cannot underflow, but it can (perhaps) overwrite
7100 registers to cause the erratum in previous instructions. */
7101 bfd_arm_vfp11_write_mask (destmask
, fd
);
7105 case 15: /* fcvt{ds,sd}. */
7109 bfd_arm_vfp11_write_mask (destmask
, fd
);
7111 /* Only FCVTSD can underflow. */
7112 if ((insn
& 0x100) != 0)
7131 /* Two-register transfer. */
7132 else if ((insn
& 0x0fe00ed0) == 0x0c400a10)
7134 unsigned int fm
= bfd_arm_vfp11_regno (insn
, is_double
, 0, 5);
7136 if ((insn
& 0x100000) == 0)
7139 bfd_arm_vfp11_write_mask (destmask
, fm
);
7142 bfd_arm_vfp11_write_mask (destmask
, fm
);
7143 bfd_arm_vfp11_write_mask (destmask
, fm
+ 1);
7149 else if ((insn
& 0x0e100e00) == 0x0c100a00) /* A load insn. */
7151 int fd
= bfd_arm_vfp11_regno (insn
, is_double
, 12, 22);
7152 unsigned int puw
= ((insn
>> 21) & 0x1) | (((insn
>> 23) & 3) << 1);
7156 case 0: /* Two-reg transfer. We should catch these above. */
7159 case 2: /* fldm[sdx]. */
7163 unsigned int i
, offset
= insn
& 0xff;
7168 for (i
= fd
; i
< fd
+ offset
; i
++)
7169 bfd_arm_vfp11_write_mask (destmask
, i
);
7173 case 4: /* fld[sd]. */
7175 bfd_arm_vfp11_write_mask (destmask
, fd
);
7184 /* Single-register transfer. Note L==0. */
7185 else if ((insn
& 0x0f100e10) == 0x0e000a10)
7187 unsigned int opcode
= (insn
>> 21) & 7;
7188 unsigned int fn
= bfd_arm_vfp11_regno (insn
, is_double
, 16, 7);
7192 case 0: /* fmsr/fmdlr. */
7193 case 1: /* fmdhr. */
7194 /* Mark fmdhr and fmdlr as writing to the whole of the DP
7195 destination register. I don't know if this is exactly right,
7196 but it is the conservative choice. */
7197 bfd_arm_vfp11_write_mask (destmask
, fn
);
7211 static int elf32_arm_compare_mapping (const void * a
, const void * b
);
7214 /* Look for potentially-troublesome code sequences which might trigger the
7215 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
7216 (available from ARM) for details of the erratum. A short version is
7217 described in ld.texinfo. */
7220 bfd_elf32_arm_vfp11_erratum_scan (bfd
*abfd
, struct bfd_link_info
*link_info
)
7223 bfd_byte
*contents
= NULL
;
7225 int regs
[3], numregs
= 0;
7226 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (link_info
);
7227 int use_vector
= (globals
->vfp11_fix
== BFD_ARM_VFP11_FIX_VECTOR
);
7229 if (globals
== NULL
)
7232 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
7233 The states transition as follows:
7235 0 -> 1 (vector) or 0 -> 2 (scalar)
7236 A VFP FMAC-pipeline instruction has been seen. Fill
7237 regs[0]..regs[numregs-1] with its input operands. Remember this
7238 instruction in 'first_fmac'.
7241 Any instruction, except for a VFP instruction which overwrites
7246 A VFP instruction has been seen which overwrites any of regs[*].
7247 We must make a veneer! Reset state to 0 before examining next
7251 If we fail to match anything in state 2, reset to state 0 and reset
7252 the instruction pointer to the instruction after 'first_fmac'.
7254 If the VFP11 vector mode is in use, there must be at least two unrelated
7255 instructions between anti-dependent VFP11 instructions to properly avoid
7256 triggering the erratum, hence the use of the extra state 1. */
7258 /* If we are only performing a partial link do not bother
7259 to construct any glue. */
7260 if (bfd_link_relocatable (link_info
))
7263 /* Skip if this bfd does not correspond to an ELF image. */
7264 if (! is_arm_elf (abfd
))
7267 /* We should have chosen a fix type by the time we get here. */
7268 BFD_ASSERT (globals
->vfp11_fix
!= BFD_ARM_VFP11_FIX_DEFAULT
);
7270 if (globals
->vfp11_fix
== BFD_ARM_VFP11_FIX_NONE
)
7273 /* Skip this BFD if it corresponds to an executable or dynamic object. */
7274 if ((abfd
->flags
& (EXEC_P
| DYNAMIC
)) != 0)
7277 for (sec
= abfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
7279 unsigned int i
, span
, first_fmac
= 0, veneer_of_insn
= 0;
7280 struct _arm_elf_section_data
*sec_data
;
7282 /* If we don't have executable progbits, we're not interested in this
7283 section. Also skip if section is to be excluded. */
7284 if (elf_section_type (sec
) != SHT_PROGBITS
7285 || (elf_section_flags (sec
) & SHF_EXECINSTR
) == 0
7286 || (sec
->flags
& SEC_EXCLUDE
) != 0
7287 || sec
->sec_info_type
== SEC_INFO_TYPE_JUST_SYMS
7288 || sec
->output_section
== bfd_abs_section_ptr
7289 || strcmp (sec
->name
, VFP11_ERRATUM_VENEER_SECTION_NAME
) == 0)
7292 sec_data
= elf32_arm_section_data (sec
);
7294 if (sec_data
->mapcount
== 0)
7297 if (elf_section_data (sec
)->this_hdr
.contents
!= NULL
)
7298 contents
= elf_section_data (sec
)->this_hdr
.contents
;
7299 else if (! bfd_malloc_and_get_section (abfd
, sec
, &contents
))
7302 qsort (sec_data
->map
, sec_data
->mapcount
, sizeof (elf32_arm_section_map
),
7303 elf32_arm_compare_mapping
);
7305 for (span
= 0; span
< sec_data
->mapcount
; span
++)
7307 unsigned int span_start
= sec_data
->map
[span
].vma
;
7308 unsigned int span_end
= (span
== sec_data
->mapcount
- 1)
7309 ? sec
->size
: sec_data
->map
[span
+ 1].vma
;
7310 char span_type
= sec_data
->map
[span
].type
;
7312 /* FIXME: Only ARM mode is supported at present. We may need to
7313 support Thumb-2 mode also at some point. */
7314 if (span_type
!= 'a')
7317 for (i
= span_start
; i
< span_end
;)
7319 unsigned int next_i
= i
+ 4;
7320 unsigned int insn
= bfd_big_endian (abfd
)
7321 ? (contents
[i
] << 24)
7322 | (contents
[i
+ 1] << 16)
7323 | (contents
[i
+ 2] << 8)
7325 : (contents
[i
+ 3] << 24)
7326 | (contents
[i
+ 2] << 16)
7327 | (contents
[i
+ 1] << 8)
7329 unsigned int writemask
= 0;
7330 enum bfd_arm_vfp11_pipe vpipe
;
7335 vpipe
= bfd_arm_vfp11_insn_decode (insn
, &writemask
, regs
,
7337 /* I'm assuming the VFP11 erratum can trigger with denorm
7338 operands on either the FMAC or the DS pipeline. This might
7339 lead to slightly overenthusiastic veneer insertion. */
7340 if (vpipe
== VFP11_FMAC
|| vpipe
== VFP11_DS
)
7342 state
= use_vector
? 1 : 2;
7344 veneer_of_insn
= insn
;
7350 int other_regs
[3], other_numregs
;
7351 vpipe
= bfd_arm_vfp11_insn_decode (insn
, &writemask
,
7354 if (vpipe
!= VFP11_BAD
7355 && bfd_arm_vfp11_antidependency (writemask
, regs
,
7365 int other_regs
[3], other_numregs
;
7366 vpipe
= bfd_arm_vfp11_insn_decode (insn
, &writemask
,
7369 if (vpipe
!= VFP11_BAD
7370 && bfd_arm_vfp11_antidependency (writemask
, regs
,
7376 next_i
= first_fmac
+ 4;
7382 abort (); /* Should be unreachable. */
7387 elf32_vfp11_erratum_list
*newerr
=(elf32_vfp11_erratum_list
*)
7388 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list
));
7390 elf32_arm_section_data (sec
)->erratumcount
+= 1;
7392 newerr
->u
.b
.vfp_insn
= veneer_of_insn
;
7397 newerr
->type
= VFP11_ERRATUM_BRANCH_TO_ARM_VENEER
;
7404 record_vfp11_erratum_veneer (link_info
, newerr
, abfd
, sec
,
7409 newerr
->next
= sec_data
->erratumlist
;
7410 sec_data
->erratumlist
= newerr
;
7419 if (contents
!= NULL
7420 && elf_section_data (sec
)->this_hdr
.contents
!= contents
)
7428 if (contents
!= NULL
7429 && elf_section_data (sec
)->this_hdr
.contents
!= contents
)
7435 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
7436 after sections have been laid out, using specially-named symbols. */
7439 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd
*abfd
,
7440 struct bfd_link_info
*link_info
)
7443 struct elf32_arm_link_hash_table
*globals
;
7446 if (bfd_link_relocatable (link_info
))
7449 /* Skip if this bfd does not correspond to an ELF image. */
7450 if (! is_arm_elf (abfd
))
7453 globals
= elf32_arm_hash_table (link_info
);
7454 if (globals
== NULL
)
7457 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen
7458 (VFP11_ERRATUM_VENEER_ENTRY_NAME
) + 10);
7460 for (sec
= abfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
7462 struct _arm_elf_section_data
*sec_data
= elf32_arm_section_data (sec
);
7463 elf32_vfp11_erratum_list
*errnode
= sec_data
->erratumlist
;
7465 for (; errnode
!= NULL
; errnode
= errnode
->next
)
7467 struct elf_link_hash_entry
*myh
;
7470 switch (errnode
->type
)
7472 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER
:
7473 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER
:
7474 /* Find veneer symbol. */
7475 sprintf (tmp_name
, VFP11_ERRATUM_VENEER_ENTRY_NAME
,
7476 errnode
->u
.b
.veneer
->u
.v
.id
);
7478 myh
= elf_link_hash_lookup
7479 (&(globals
)->root
, tmp_name
, FALSE
, FALSE
, TRUE
);
7482 (*_bfd_error_handler
) (_("%B: unable to find VFP11 veneer "
7483 "`%s'"), abfd
, tmp_name
);
7485 vma
= myh
->root
.u
.def
.section
->output_section
->vma
7486 + myh
->root
.u
.def
.section
->output_offset
7487 + myh
->root
.u
.def
.value
;
7489 errnode
->u
.b
.veneer
->vma
= vma
;
7492 case VFP11_ERRATUM_ARM_VENEER
:
7493 case VFP11_ERRATUM_THUMB_VENEER
:
7494 /* Find return location. */
7495 sprintf (tmp_name
, VFP11_ERRATUM_VENEER_ENTRY_NAME
"_r",
7498 myh
= elf_link_hash_lookup
7499 (&(globals
)->root
, tmp_name
, FALSE
, FALSE
, TRUE
);
7502 (*_bfd_error_handler
) (_("%B: unable to find VFP11 veneer "
7503 "`%s'"), abfd
, tmp_name
);
7505 vma
= myh
->root
.u
.def
.section
->output_section
->vma
7506 + myh
->root
.u
.def
.section
->output_offset
7507 + myh
->root
.u
.def
.value
;
7509 errnode
->u
.v
.branch
->vma
= vma
;
7521 /* Find virtual-memory addresses for STM32L4XX erratum veneers and
7522 return locations after sections have been laid out, using
7523 specially-named symbols. */
7526 bfd_elf32_arm_stm32l4xx_fix_veneer_locations (bfd
*abfd
,
7527 struct bfd_link_info
*link_info
)
7530 struct elf32_arm_link_hash_table
*globals
;
7533 if (bfd_link_relocatable (link_info
))
7536 /* Skip if this bfd does not correspond to an ELF image. */
7537 if (! is_arm_elf (abfd
))
7540 globals
= elf32_arm_hash_table (link_info
);
7541 if (globals
== NULL
)
7544 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen
7545 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME
) + 10);
7547 for (sec
= abfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
7549 struct _arm_elf_section_data
*sec_data
= elf32_arm_section_data (sec
);
7550 elf32_stm32l4xx_erratum_list
*errnode
= sec_data
->stm32l4xx_erratumlist
;
7552 for (; errnode
!= NULL
; errnode
= errnode
->next
)
7554 struct elf_link_hash_entry
*myh
;
7557 switch (errnode
->type
)
7559 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER
:
7560 /* Find veneer symbol. */
7561 sprintf (tmp_name
, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME
,
7562 errnode
->u
.b
.veneer
->u
.v
.id
);
7564 myh
= elf_link_hash_lookup
7565 (&(globals
)->root
, tmp_name
, FALSE
, FALSE
, TRUE
);
7568 (*_bfd_error_handler
) (_("%B: unable to find STM32L4XX veneer "
7569 "`%s'"), abfd
, tmp_name
);
7571 vma
= myh
->root
.u
.def
.section
->output_section
->vma
7572 + myh
->root
.u
.def
.section
->output_offset
7573 + myh
->root
.u
.def
.value
;
7575 errnode
->u
.b
.veneer
->vma
= vma
;
7578 case STM32L4XX_ERRATUM_VENEER
:
7579 /* Find return location. */
7580 sprintf (tmp_name
, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME
"_r",
7583 myh
= elf_link_hash_lookup
7584 (&(globals
)->root
, tmp_name
, FALSE
, FALSE
, TRUE
);
7587 (*_bfd_error_handler
) (_("%B: unable to find STM32L4XX veneer "
7588 "`%s'"), abfd
, tmp_name
);
7590 vma
= myh
->root
.u
.def
.section
->output_section
->vma
7591 + myh
->root
.u
.def
.section
->output_offset
7592 + myh
->root
.u
.def
.value
;
7594 errnode
->u
.v
.branch
->vma
= vma
;
7606 static inline bfd_boolean
7607 is_thumb2_ldmia (const insn32 insn
)
7609 /* Encoding T2: LDM<c>.W <Rn>{!},<registers>
7610 1110 - 1000 - 10W1 - rrrr - PM (0) l - llll - llll - llll. */
7611 return (insn
& 0xffd02000) == 0xe8900000;
7614 static inline bfd_boolean
7615 is_thumb2_ldmdb (const insn32 insn
)
7617 /* Encoding T1: LDMDB<c> <Rn>{!},<registers>
7618 1110 - 1001 - 00W1 - rrrr - PM (0) l - llll - llll - llll. */
7619 return (insn
& 0xffd02000) == 0xe9100000;
7622 static inline bfd_boolean
7623 is_thumb2_vldm (const insn32 insn
)
7625 /* A6.5 Extension register load or store instruction
7627 We look for SP 32-bit and DP 64-bit registers.
7628 Encoding T1 VLDM{mode}<c> <Rn>{!}, <list>
7629 <list> is consecutive 64-bit registers
7630 1110 - 110P - UDW1 - rrrr - vvvv - 1011 - iiii - iiii
7631 Encoding T2 VLDM{mode}<c> <Rn>{!}, <list>
7632 <list> is consecutive 32-bit registers
7633 1110 - 110P - UDW1 - rrrr - vvvv - 1010 - iiii - iiii
7634 if P==0 && U==1 && W==1 && Rn=1101 VPOP
7635 if PUW=010 || PUW=011 || PUW=101 VLDM. */
7637 (((insn
& 0xfe100f00) == 0xec100b00) ||
7638 ((insn
& 0xfe100f00) == 0xec100a00))
7639 && /* (IA without !). */
7640 (((((insn
<< 7) >> 28) & 0xd) == 0x4)
7641 /* (IA with !), includes VPOP (when reg number is SP). */
7642 || ((((insn
<< 7) >> 28) & 0xd) == 0x5)
7644 || ((((insn
<< 7) >> 28) & 0xd) == 0x9));
7647 /* STM STM32L4XX erratum : This function assumes that it receives an LDM or
7649 - computes the number and the mode of memory accesses
7650 - decides if the replacement should be done:
7651 . replaces only if > 8-word accesses
7652 . or (testing purposes only) replaces all accesses. */
7655 stm32l4xx_need_create_replacing_stub (const insn32 insn
,
7656 bfd_arm_stm32l4xx_fix stm32l4xx_fix
)
7660 /* The field encoding the register list is the same for both LDMIA
7661 and LDMDB encodings. */
7662 if (is_thumb2_ldmia (insn
) || is_thumb2_ldmdb (insn
))
7663 nb_words
= popcount (insn
& 0x0000ffff);
7664 else if (is_thumb2_vldm (insn
))
7665 nb_words
= (insn
& 0xff);
7667 /* DEFAULT mode accounts for the real bug condition situation,
7668 ALL mode inserts stubs for each LDM/VLDM instruction (testing). */
7670 (stm32l4xx_fix
== BFD_ARM_STM32L4XX_FIX_DEFAULT
) ? nb_words
> 8 :
7671 (stm32l4xx_fix
== BFD_ARM_STM32L4XX_FIX_ALL
) ? TRUE
: FALSE
;
7674 /* Look for potentially-troublesome code sequences which might trigger
7675 the STM STM32L4XX erratum. */
7678 bfd_elf32_arm_stm32l4xx_erratum_scan (bfd
*abfd
,
7679 struct bfd_link_info
*link_info
)
7682 bfd_byte
*contents
= NULL
;
7683 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (link_info
);
7685 if (globals
== NULL
)
7688 /* If we are only performing a partial link do not bother
7689 to construct any glue. */
7690 if (bfd_link_relocatable (link_info
))
7693 /* Skip if this bfd does not correspond to an ELF image. */
7694 if (! is_arm_elf (abfd
))
7697 if (globals
->stm32l4xx_fix
== BFD_ARM_STM32L4XX_FIX_NONE
)
7700 /* Skip this BFD if it corresponds to an executable or dynamic object. */
7701 if ((abfd
->flags
& (EXEC_P
| DYNAMIC
)) != 0)
7704 for (sec
= abfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
7706 unsigned int i
, span
;
7707 struct _arm_elf_section_data
*sec_data
;
7709 /* If we don't have executable progbits, we're not interested in this
7710 section. Also skip if section is to be excluded. */
7711 if (elf_section_type (sec
) != SHT_PROGBITS
7712 || (elf_section_flags (sec
) & SHF_EXECINSTR
) == 0
7713 || (sec
->flags
& SEC_EXCLUDE
) != 0
7714 || sec
->sec_info_type
== SEC_INFO_TYPE_JUST_SYMS
7715 || sec
->output_section
== bfd_abs_section_ptr
7716 || strcmp (sec
->name
, STM32L4XX_ERRATUM_VENEER_SECTION_NAME
) == 0)
7719 sec_data
= elf32_arm_section_data (sec
);
7721 if (sec_data
->mapcount
== 0)
7724 if (elf_section_data (sec
)->this_hdr
.contents
!= NULL
)
7725 contents
= elf_section_data (sec
)->this_hdr
.contents
;
7726 else if (! bfd_malloc_and_get_section (abfd
, sec
, &contents
))
7729 qsort (sec_data
->map
, sec_data
->mapcount
, sizeof (elf32_arm_section_map
),
7730 elf32_arm_compare_mapping
);
7732 for (span
= 0; span
< sec_data
->mapcount
; span
++)
7734 unsigned int span_start
= sec_data
->map
[span
].vma
;
7735 unsigned int span_end
= (span
== sec_data
->mapcount
- 1)
7736 ? sec
->size
: sec_data
->map
[span
+ 1].vma
;
7737 char span_type
= sec_data
->map
[span
].type
;
7738 int itblock_current_pos
= 0;
7740 /* Only Thumb2 mode need be supported with this CM4 specific
7741 code, we should not encounter any arm mode eg span_type
7743 if (span_type
!= 't')
7746 for (i
= span_start
; i
< span_end
;)
7748 unsigned int insn
= bfd_get_16 (abfd
, &contents
[i
]);
7749 bfd_boolean insn_32bit
= FALSE
;
7750 bfd_boolean is_ldm
= FALSE
;
7751 bfd_boolean is_vldm
= FALSE
;
7752 bfd_boolean is_not_last_in_it_block
= FALSE
;
7754 /* The first 16-bits of all 32-bit thumb2 instructions start
7755 with opcode[15..13]=0b111 and the encoded op1 can be anything
7756 except opcode[12..11]!=0b00.
7757 See 32-bit Thumb instruction encoding. */
7758 if ((insn
& 0xe000) == 0xe000 && (insn
& 0x1800) != 0x0000)
7761 /* Compute the predicate that tells if the instruction
7762 is concerned by the IT block
7763 - Creates an error if there is a ldm that is not
7764 last in the IT block thus cannot be replaced
7765 - Otherwise we can create a branch at the end of the
7766 IT block, it will be controlled naturally by IT
7767 with the proper pseudo-predicate
7768 - So the only interesting predicate is the one that
7769 tells that we are not on the last item of an IT
7771 if (itblock_current_pos
!= 0)
7772 is_not_last_in_it_block
= !!--itblock_current_pos
;
7776 /* Load the rest of the insn (in manual-friendly order). */
7777 insn
= (insn
<< 16) | bfd_get_16 (abfd
, &contents
[i
+ 2]);
7778 is_ldm
= is_thumb2_ldmia (insn
) || is_thumb2_ldmdb (insn
);
7779 is_vldm
= is_thumb2_vldm (insn
);
7781 /* Veneers are created for (v)ldm depending on
7782 option flags and memory accesses conditions; but
7783 if the instruction is not the last instruction of
7784 an IT block, we cannot create a jump there, so we
7786 if ((is_ldm
|| is_vldm
) &&
7787 stm32l4xx_need_create_replacing_stub
7788 (insn
, globals
->stm32l4xx_fix
))
7790 if (is_not_last_in_it_block
)
7792 (*_bfd_error_handler
)
7793 /* Note - overlong line used here to allow for translation. */
7795 %B(%A+0x%lx): error: multiple load detected in non-last IT block instruction : STM32L4XX veneer cannot be generated.\n"
7796 "Use gcc option -mrestrict-it to generate only one instruction per IT block.\n"),
7797 abfd
, sec
, (long)i
);
7801 elf32_stm32l4xx_erratum_list
*newerr
=
7802 (elf32_stm32l4xx_erratum_list
*)
7804 (sizeof (elf32_stm32l4xx_erratum_list
));
7806 elf32_arm_section_data (sec
)
7807 ->stm32l4xx_erratumcount
+= 1;
7808 newerr
->u
.b
.insn
= insn
;
7809 /* We create only thumb branches. */
7811 STM32L4XX_ERRATUM_BRANCH_TO_VENEER
;
7812 record_stm32l4xx_erratum_veneer
7813 (link_info
, newerr
, abfd
, sec
,
7816 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
:
7817 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE
);
7819 newerr
->next
= sec_data
->stm32l4xx_erratumlist
;
7820 sec_data
->stm32l4xx_erratumlist
= newerr
;
7827 IT blocks are only encoded in T1
7828 Encoding T1: IT{x{y{z}}} <firstcond>
7829 1 0 1 1 - 1 1 1 1 - firstcond - mask
7830 if mask = '0000' then see 'related encodings'
7831 We don't deal with UNPREDICTABLE, just ignore these.
7832 There can be no nested IT blocks so an IT block
7833 is naturally a new one for which it is worth
7834 computing its size. */
7835 bfd_boolean is_newitblock
= ((insn
& 0xff00) == 0xbf00) &&
7836 ((insn
& 0x000f) != 0x0000);
7837 /* If we have a new IT block we compute its size. */
7840 /* Compute the number of instructions controlled
7841 by the IT block, it will be used to decide
7842 whether we are inside an IT block or not. */
7843 unsigned int mask
= insn
& 0x000f;
7844 itblock_current_pos
= 4 - ctz (mask
);
7848 i
+= insn_32bit
? 4 : 2;
7852 if (contents
!= NULL
7853 && elf_section_data (sec
)->this_hdr
.contents
!= contents
)
7861 if (contents
!= NULL
7862 && elf_section_data (sec
)->this_hdr
.contents
!= contents
)
7868 /* Set target relocation values needed during linking. */
7871 bfd_elf32_arm_set_target_relocs (struct bfd
*output_bfd
,
7872 struct bfd_link_info
*link_info
,
7874 char * target2_type
,
7877 bfd_arm_vfp11_fix vfp11_fix
,
7878 bfd_arm_stm32l4xx_fix stm32l4xx_fix
,
7879 int no_enum_warn
, int no_wchar_warn
,
7880 int pic_veneer
, int fix_cortex_a8
,
7883 struct elf32_arm_link_hash_table
*globals
;
7885 globals
= elf32_arm_hash_table (link_info
);
7886 if (globals
== NULL
)
7889 globals
->target1_is_rel
= target1_is_rel
;
7890 if (strcmp (target2_type
, "rel") == 0)
7891 globals
->target2_reloc
= R_ARM_REL32
;
7892 else if (strcmp (target2_type
, "abs") == 0)
7893 globals
->target2_reloc
= R_ARM_ABS32
;
7894 else if (strcmp (target2_type
, "got-rel") == 0)
7895 globals
->target2_reloc
= R_ARM_GOT_PREL
;
7898 _bfd_error_handler (_("Invalid TARGET2 relocation type '%s'."),
7901 globals
->fix_v4bx
= fix_v4bx
;
7902 globals
->use_blx
|= use_blx
;
7903 globals
->vfp11_fix
= vfp11_fix
;
7904 globals
->stm32l4xx_fix
= stm32l4xx_fix
;
7905 globals
->pic_veneer
= pic_veneer
;
7906 globals
->fix_cortex_a8
= fix_cortex_a8
;
7907 globals
->fix_arm1176
= fix_arm1176
;
7909 BFD_ASSERT (is_arm_elf (output_bfd
));
7910 elf_arm_tdata (output_bfd
)->no_enum_size_warning
= no_enum_warn
;
7911 elf_arm_tdata (output_bfd
)->no_wchar_size_warning
= no_wchar_warn
;
7914 /* Replace the target offset of a Thumb bl or b.w instruction. */
7917 insert_thumb_branch (bfd
*abfd
, long int offset
, bfd_byte
*insn
)
7923 BFD_ASSERT ((offset
& 1) == 0);
7925 upper
= bfd_get_16 (abfd
, insn
);
7926 lower
= bfd_get_16 (abfd
, insn
+ 2);
7927 reloc_sign
= (offset
< 0) ? 1 : 0;
7928 upper
= (upper
& ~(bfd_vma
) 0x7ff)
7929 | ((offset
>> 12) & 0x3ff)
7930 | (reloc_sign
<< 10);
7931 lower
= (lower
& ~(bfd_vma
) 0x2fff)
7932 | (((!((offset
>> 23) & 1)) ^ reloc_sign
) << 13)
7933 | (((!((offset
>> 22) & 1)) ^ reloc_sign
) << 11)
7934 | ((offset
>> 1) & 0x7ff);
7935 bfd_put_16 (abfd
, upper
, insn
);
7936 bfd_put_16 (abfd
, lower
, insn
+ 2);
7939 /* Thumb code calling an ARM function. */
7942 elf32_thumb_to_arm_stub (struct bfd_link_info
* info
,
7946 asection
* input_section
,
7947 bfd_byte
* hit_data
,
7950 bfd_signed_vma addend
,
7952 char **error_message
)
7956 long int ret_offset
;
7957 struct elf_link_hash_entry
* myh
;
7958 struct elf32_arm_link_hash_table
* globals
;
7960 myh
= find_thumb_glue (info
, name
, error_message
);
7964 globals
= elf32_arm_hash_table (info
);
7965 BFD_ASSERT (globals
!= NULL
);
7966 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
7968 my_offset
= myh
->root
.u
.def
.value
;
7970 s
= bfd_get_linker_section (globals
->bfd_of_glue_owner
,
7971 THUMB2ARM_GLUE_SECTION_NAME
);
7973 BFD_ASSERT (s
!= NULL
);
7974 BFD_ASSERT (s
->contents
!= NULL
);
7975 BFD_ASSERT (s
->output_section
!= NULL
);
7977 if ((my_offset
& 0x01) == 0x01)
7980 && sym_sec
->owner
!= NULL
7981 && !INTERWORK_FLAG (sym_sec
->owner
))
7983 (*_bfd_error_handler
)
7984 (_("%B(%s): warning: interworking not enabled.\n"
7985 " first occurrence: %B: Thumb call to ARM"),
7986 sym_sec
->owner
, input_bfd
, name
);
7992 myh
->root
.u
.def
.value
= my_offset
;
7994 put_thumb_insn (globals
, output_bfd
, (bfd_vma
) t2a1_bx_pc_insn
,
7995 s
->contents
+ my_offset
);
7997 put_thumb_insn (globals
, output_bfd
, (bfd_vma
) t2a2_noop_insn
,
7998 s
->contents
+ my_offset
+ 2);
8001 /* Address of destination of the stub. */
8002 ((bfd_signed_vma
) val
)
8004 /* Offset from the start of the current section
8005 to the start of the stubs. */
8007 /* Offset of the start of this stub from the start of the stubs. */
8009 /* Address of the start of the current section. */
8010 + s
->output_section
->vma
)
8011 /* The branch instruction is 4 bytes into the stub. */
8013 /* ARM branches work from the pc of the instruction + 8. */
8016 put_arm_insn (globals
, output_bfd
,
8017 (bfd_vma
) t2a3_b_insn
| ((ret_offset
>> 2) & 0x00FFFFFF),
8018 s
->contents
+ my_offset
+ 4);
8021 BFD_ASSERT (my_offset
<= globals
->thumb_glue_size
);
8023 /* Now go back and fix up the original BL insn to point to here. */
8025 /* Address of where the stub is located. */
8026 (s
->output_section
->vma
+ s
->output_offset
+ my_offset
)
8027 /* Address of where the BL is located. */
8028 - (input_section
->output_section
->vma
+ input_section
->output_offset
8030 /* Addend in the relocation. */
8032 /* Biassing for PC-relative addressing. */
8035 insert_thumb_branch (input_bfd
, ret_offset
, hit_data
- input_section
->vma
);
8040 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
8042 static struct elf_link_hash_entry
*
8043 elf32_arm_create_thumb_stub (struct bfd_link_info
* info
,
8050 char ** error_message
)
8053 long int ret_offset
;
8054 struct elf_link_hash_entry
* myh
;
8055 struct elf32_arm_link_hash_table
* globals
;
8057 myh
= find_arm_glue (info
, name
, error_message
);
8061 globals
= elf32_arm_hash_table (info
);
8062 BFD_ASSERT (globals
!= NULL
);
8063 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
8065 my_offset
= myh
->root
.u
.def
.value
;
8067 if ((my_offset
& 0x01) == 0x01)
8070 && sym_sec
->owner
!= NULL
8071 && !INTERWORK_FLAG (sym_sec
->owner
))
8073 (*_bfd_error_handler
)
8074 (_("%B(%s): warning: interworking not enabled.\n"
8075 " first occurrence: %B: arm call to thumb"),
8076 sym_sec
->owner
, input_bfd
, name
);
8080 myh
->root
.u
.def
.value
= my_offset
;
8082 if (bfd_link_pic (info
)
8083 || globals
->root
.is_relocatable_executable
8084 || globals
->pic_veneer
)
8086 /* For relocatable objects we can't use absolute addresses,
8087 so construct the address from a relative offset. */
8088 /* TODO: If the offset is small it's probably worth
8089 constructing the address with adds. */
8090 put_arm_insn (globals
, output_bfd
, (bfd_vma
) a2t1p_ldr_insn
,
8091 s
->contents
+ my_offset
);
8092 put_arm_insn (globals
, output_bfd
, (bfd_vma
) a2t2p_add_pc_insn
,
8093 s
->contents
+ my_offset
+ 4);
8094 put_arm_insn (globals
, output_bfd
, (bfd_vma
) a2t3p_bx_r12_insn
,
8095 s
->contents
+ my_offset
+ 8);
8096 /* Adjust the offset by 4 for the position of the add,
8097 and 8 for the pipeline offset. */
8098 ret_offset
= (val
- (s
->output_offset
8099 + s
->output_section
->vma
8102 bfd_put_32 (output_bfd
, ret_offset
,
8103 s
->contents
+ my_offset
+ 12);
8105 else if (globals
->use_blx
)
8107 put_arm_insn (globals
, output_bfd
, (bfd_vma
) a2t1v5_ldr_insn
,
8108 s
->contents
+ my_offset
);
8110 /* It's a thumb address. Add the low order bit. */
8111 bfd_put_32 (output_bfd
, val
| a2t2v5_func_addr_insn
,
8112 s
->contents
+ my_offset
+ 4);
8116 put_arm_insn (globals
, output_bfd
, (bfd_vma
) a2t1_ldr_insn
,
8117 s
->contents
+ my_offset
);
8119 put_arm_insn (globals
, output_bfd
, (bfd_vma
) a2t2_bx_r12_insn
,
8120 s
->contents
+ my_offset
+ 4);
8122 /* It's a thumb address. Add the low order bit. */
8123 bfd_put_32 (output_bfd
, val
| a2t3_func_addr_insn
,
8124 s
->contents
+ my_offset
+ 8);
8130 BFD_ASSERT (my_offset
<= globals
->arm_glue_size
);
8135 /* Arm code calling a Thumb function. */
8138 elf32_arm_to_thumb_stub (struct bfd_link_info
* info
,
8142 asection
* input_section
,
8143 bfd_byte
* hit_data
,
8146 bfd_signed_vma addend
,
8148 char **error_message
)
8150 unsigned long int tmp
;
8153 long int ret_offset
;
8154 struct elf_link_hash_entry
* myh
;
8155 struct elf32_arm_link_hash_table
* globals
;
8157 globals
= elf32_arm_hash_table (info
);
8158 BFD_ASSERT (globals
!= NULL
);
8159 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
8161 s
= bfd_get_linker_section (globals
->bfd_of_glue_owner
,
8162 ARM2THUMB_GLUE_SECTION_NAME
);
8163 BFD_ASSERT (s
!= NULL
);
8164 BFD_ASSERT (s
->contents
!= NULL
);
8165 BFD_ASSERT (s
->output_section
!= NULL
);
8167 myh
= elf32_arm_create_thumb_stub (info
, name
, input_bfd
, output_bfd
,
8168 sym_sec
, val
, s
, error_message
);
8172 my_offset
= myh
->root
.u
.def
.value
;
8173 tmp
= bfd_get_32 (input_bfd
, hit_data
);
8174 tmp
= tmp
& 0xFF000000;
8176 /* Somehow these are both 4 too far, so subtract 8. */
8177 ret_offset
= (s
->output_offset
8179 + s
->output_section
->vma
8180 - (input_section
->output_offset
8181 + input_section
->output_section
->vma
8185 tmp
= tmp
| ((ret_offset
>> 2) & 0x00FFFFFF);
8187 bfd_put_32 (output_bfd
, (bfd_vma
) tmp
, hit_data
- input_section
->vma
);
8192 /* Populate Arm stub for an exported Thumb function. */
8195 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry
*h
, void * inf
)
8197 struct bfd_link_info
* info
= (struct bfd_link_info
*) inf
;
8199 struct elf_link_hash_entry
* myh
;
8200 struct elf32_arm_link_hash_entry
*eh
;
8201 struct elf32_arm_link_hash_table
* globals
;
8204 char *error_message
;
8206 eh
= elf32_arm_hash_entry (h
);
8207 /* Allocate stubs for exported Thumb functions on v4t. */
8208 if (eh
->export_glue
== NULL
)
8211 globals
= elf32_arm_hash_table (info
);
8212 BFD_ASSERT (globals
!= NULL
);
8213 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
8215 s
= bfd_get_linker_section (globals
->bfd_of_glue_owner
,
8216 ARM2THUMB_GLUE_SECTION_NAME
);
8217 BFD_ASSERT (s
!= NULL
);
8218 BFD_ASSERT (s
->contents
!= NULL
);
8219 BFD_ASSERT (s
->output_section
!= NULL
);
8221 sec
= eh
->export_glue
->root
.u
.def
.section
;
8223 BFD_ASSERT (sec
->output_section
!= NULL
);
8225 val
= eh
->export_glue
->root
.u
.def
.value
+ sec
->output_offset
8226 + sec
->output_section
->vma
;
8228 myh
= elf32_arm_create_thumb_stub (info
, h
->root
.root
.string
,
8229 h
->root
.u
.def
.section
->owner
,
8230 globals
->obfd
, sec
, val
, s
,
8236 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
8239 elf32_arm_bx_glue (struct bfd_link_info
* info
, int reg
)
8244 struct elf32_arm_link_hash_table
*globals
;
8246 globals
= elf32_arm_hash_table (info
);
8247 BFD_ASSERT (globals
!= NULL
);
8248 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
8250 s
= bfd_get_linker_section (globals
->bfd_of_glue_owner
,
8251 ARM_BX_GLUE_SECTION_NAME
);
8252 BFD_ASSERT (s
!= NULL
);
8253 BFD_ASSERT (s
->contents
!= NULL
);
8254 BFD_ASSERT (s
->output_section
!= NULL
);
8256 BFD_ASSERT (globals
->bx_glue_offset
[reg
] & 2);
8258 glue_addr
= globals
->bx_glue_offset
[reg
] & ~(bfd_vma
)3;
8260 if ((globals
->bx_glue_offset
[reg
] & 1) == 0)
8262 p
= s
->contents
+ glue_addr
;
8263 bfd_put_32 (globals
->obfd
, armbx1_tst_insn
+ (reg
<< 16), p
);
8264 bfd_put_32 (globals
->obfd
, armbx2_moveq_insn
+ reg
, p
+ 4);
8265 bfd_put_32 (globals
->obfd
, armbx3_bx_insn
+ reg
, p
+ 8);
8266 globals
->bx_glue_offset
[reg
] |= 1;
8269 return glue_addr
+ s
->output_section
->vma
+ s
->output_offset
;
8272 /* Generate Arm stubs for exported Thumb symbols. */
8274 elf32_arm_begin_write_processing (bfd
*abfd ATTRIBUTE_UNUSED
,
8275 struct bfd_link_info
*link_info
)
8277 struct elf32_arm_link_hash_table
* globals
;
8279 if (link_info
== NULL
)
8280 /* Ignore this if we are not called by the ELF backend linker. */
8283 globals
= elf32_arm_hash_table (link_info
);
8284 if (globals
== NULL
)
8287 /* If blx is available then exported Thumb symbols are OK and there is
8289 if (globals
->use_blx
)
8292 elf_link_hash_traverse (&globals
->root
, elf32_arm_to_thumb_export_stub
,
8296 /* Reserve space for COUNT dynamic relocations in relocation selection
8300 elf32_arm_allocate_dynrelocs (struct bfd_link_info
*info
, asection
*sreloc
,
8301 bfd_size_type count
)
8303 struct elf32_arm_link_hash_table
*htab
;
8305 htab
= elf32_arm_hash_table (info
);
8306 BFD_ASSERT (htab
->root
.dynamic_sections_created
);
8309 sreloc
->size
+= RELOC_SIZE (htab
) * count
;
8312 /* Reserve space for COUNT R_ARM_IRELATIVE relocations. If the link is
8313 dynamic, the relocations should go in SRELOC, otherwise they should
8314 go in the special .rel.iplt section. */
8317 elf32_arm_allocate_irelocs (struct bfd_link_info
*info
, asection
*sreloc
,
8318 bfd_size_type count
)
8320 struct elf32_arm_link_hash_table
*htab
;
8322 htab
= elf32_arm_hash_table (info
);
8323 if (!htab
->root
.dynamic_sections_created
)
8324 htab
->root
.irelplt
->size
+= RELOC_SIZE (htab
) * count
;
8327 BFD_ASSERT (sreloc
!= NULL
);
8328 sreloc
->size
+= RELOC_SIZE (htab
) * count
;
8332 /* Add relocation REL to the end of relocation section SRELOC. */
8335 elf32_arm_add_dynreloc (bfd
*output_bfd
, struct bfd_link_info
*info
,
8336 asection
*sreloc
, Elf_Internal_Rela
*rel
)
8339 struct elf32_arm_link_hash_table
*htab
;
8341 htab
= elf32_arm_hash_table (info
);
8342 if (!htab
->root
.dynamic_sections_created
8343 && ELF32_R_TYPE (rel
->r_info
) == R_ARM_IRELATIVE
)
8344 sreloc
= htab
->root
.irelplt
;
8347 loc
= sreloc
->contents
;
8348 loc
+= sreloc
->reloc_count
++ * RELOC_SIZE (htab
);
8349 if (sreloc
->reloc_count
* RELOC_SIZE (htab
) > sreloc
->size
)
8351 SWAP_RELOC_OUT (htab
) (output_bfd
, rel
, loc
);
8354 /* Allocate room for a PLT entry described by ROOT_PLT and ARM_PLT.
8355 IS_IPLT_ENTRY says whether the entry belongs to .iplt rather than
8359 elf32_arm_allocate_plt_entry (struct bfd_link_info
*info
,
8360 bfd_boolean is_iplt_entry
,
8361 union gotplt_union
*root_plt
,
8362 struct arm_plt_info
*arm_plt
)
8364 struct elf32_arm_link_hash_table
*htab
;
8368 htab
= elf32_arm_hash_table (info
);
8372 splt
= htab
->root
.iplt
;
8373 sgotplt
= htab
->root
.igotplt
;
8375 /* NaCl uses a special first entry in .iplt too. */
8376 if (htab
->nacl_p
&& splt
->size
== 0)
8377 splt
->size
+= htab
->plt_header_size
;
8379 /* Allocate room for an R_ARM_IRELATIVE relocation in .rel.iplt. */
8380 elf32_arm_allocate_irelocs (info
, htab
->root
.irelplt
, 1);
8384 splt
= htab
->root
.splt
;
8385 sgotplt
= htab
->root
.sgotplt
;
8387 /* Allocate room for an R_JUMP_SLOT relocation in .rel.plt. */
8388 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelplt
, 1);
8390 /* If this is the first .plt entry, make room for the special
8392 if (splt
->size
== 0)
8393 splt
->size
+= htab
->plt_header_size
;
8395 htab
->next_tls_desc_index
++;
8398 /* Allocate the PLT entry itself, including any leading Thumb stub. */
8399 if (elf32_arm_plt_needs_thumb_stub_p (info
, arm_plt
))
8400 splt
->size
+= PLT_THUMB_STUB_SIZE
;
8401 root_plt
->offset
= splt
->size
;
8402 splt
->size
+= htab
->plt_entry_size
;
8404 if (!htab
->symbian_p
)
8406 /* We also need to make an entry in the .got.plt section, which
8407 will be placed in the .got section by the linker script. */
8409 arm_plt
->got_offset
= sgotplt
->size
;
8411 arm_plt
->got_offset
= sgotplt
->size
- 8 * htab
->num_tls_desc
;
8417 arm_movw_immediate (bfd_vma value
)
8419 return (value
& 0x00000fff) | ((value
& 0x0000f000) << 4);
8423 arm_movt_immediate (bfd_vma value
)
8425 return ((value
& 0x0fff0000) >> 16) | ((value
& 0xf0000000) >> 12);
8428 /* Fill in a PLT entry and its associated GOT slot. If DYNINDX == -1,
8429 the entry lives in .iplt and resolves to (*SYM_VALUE)().
8430 Otherwise, DYNINDX is the index of the symbol in the dynamic
8431 symbol table and SYM_VALUE is undefined.
8433 ROOT_PLT points to the offset of the PLT entry from the start of its
8434 section (.iplt or .plt). ARM_PLT points to the symbol's ARM-specific
8435 bookkeeping information.
8437 Returns FALSE if there was a problem. */
8440 elf32_arm_populate_plt_entry (bfd
*output_bfd
, struct bfd_link_info
*info
,
8441 union gotplt_union
*root_plt
,
8442 struct arm_plt_info
*arm_plt
,
8443 int dynindx
, bfd_vma sym_value
)
8445 struct elf32_arm_link_hash_table
*htab
;
8451 Elf_Internal_Rela rel
;
8452 bfd_vma plt_header_size
;
8453 bfd_vma got_header_size
;
8455 htab
= elf32_arm_hash_table (info
);
8457 /* Pick the appropriate sections and sizes. */
8460 splt
= htab
->root
.iplt
;
8461 sgot
= htab
->root
.igotplt
;
8462 srel
= htab
->root
.irelplt
;
8464 /* There are no reserved entries in .igot.plt, and no special
8465 first entry in .iplt. */
8466 got_header_size
= 0;
8467 plt_header_size
= 0;
8471 splt
= htab
->root
.splt
;
8472 sgot
= htab
->root
.sgotplt
;
8473 srel
= htab
->root
.srelplt
;
8475 got_header_size
= get_elf_backend_data (output_bfd
)->got_header_size
;
8476 plt_header_size
= htab
->plt_header_size
;
8478 BFD_ASSERT (splt
!= NULL
&& srel
!= NULL
);
8480 /* Fill in the entry in the procedure linkage table. */
8481 if (htab
->symbian_p
)
8483 BFD_ASSERT (dynindx
>= 0);
8484 put_arm_insn (htab
, output_bfd
,
8485 elf32_arm_symbian_plt_entry
[0],
8486 splt
->contents
+ root_plt
->offset
);
8487 bfd_put_32 (output_bfd
,
8488 elf32_arm_symbian_plt_entry
[1],
8489 splt
->contents
+ root_plt
->offset
+ 4);
8491 /* Fill in the entry in the .rel.plt section. */
8492 rel
.r_offset
= (splt
->output_section
->vma
8493 + splt
->output_offset
8494 + root_plt
->offset
+ 4);
8495 rel
.r_info
= ELF32_R_INFO (dynindx
, R_ARM_GLOB_DAT
);
8497 /* Get the index in the procedure linkage table which
8498 corresponds to this symbol. This is the index of this symbol
8499 in all the symbols for which we are making plt entries. The
8500 first entry in the procedure linkage table is reserved. */
8501 plt_index
= ((root_plt
->offset
- plt_header_size
)
8502 / htab
->plt_entry_size
);
8506 bfd_vma got_offset
, got_address
, plt_address
;
8507 bfd_vma got_displacement
, initial_got_entry
;
8510 BFD_ASSERT (sgot
!= NULL
);
8512 /* Get the offset into the .(i)got.plt table of the entry that
8513 corresponds to this function. */
8514 got_offset
= (arm_plt
->got_offset
& -2);
8516 /* Get the index in the procedure linkage table which
8517 corresponds to this symbol. This is the index of this symbol
8518 in all the symbols for which we are making plt entries.
8519 After the reserved .got.plt entries, all symbols appear in
8520 the same order as in .plt. */
8521 plt_index
= (got_offset
- got_header_size
) / 4;
8523 /* Calculate the address of the GOT entry. */
8524 got_address
= (sgot
->output_section
->vma
8525 + sgot
->output_offset
8528 /* ...and the address of the PLT entry. */
8529 plt_address
= (splt
->output_section
->vma
8530 + splt
->output_offset
8531 + root_plt
->offset
);
8533 ptr
= splt
->contents
+ root_plt
->offset
;
8534 if (htab
->vxworks_p
&& bfd_link_pic (info
))
8539 for (i
= 0; i
!= htab
->plt_entry_size
/ 4; i
++, ptr
+= 4)
8541 val
= elf32_arm_vxworks_shared_plt_entry
[i
];
8543 val
|= got_address
- sgot
->output_section
->vma
;
8545 val
|= plt_index
* RELOC_SIZE (htab
);
8546 if (i
== 2 || i
== 5)
8547 bfd_put_32 (output_bfd
, val
, ptr
);
8549 put_arm_insn (htab
, output_bfd
, val
, ptr
);
8552 else if (htab
->vxworks_p
)
8557 for (i
= 0; i
!= htab
->plt_entry_size
/ 4; i
++, ptr
+= 4)
8559 val
= elf32_arm_vxworks_exec_plt_entry
[i
];
8563 val
|= 0xffffff & -((root_plt
->offset
+ i
* 4 + 8) >> 2);
8565 val
|= plt_index
* RELOC_SIZE (htab
);
8566 if (i
== 2 || i
== 5)
8567 bfd_put_32 (output_bfd
, val
, ptr
);
8569 put_arm_insn (htab
, output_bfd
, val
, ptr
);
8572 loc
= (htab
->srelplt2
->contents
8573 + (plt_index
* 2 + 1) * RELOC_SIZE (htab
));
8575 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
8576 referencing the GOT for this PLT entry. */
8577 rel
.r_offset
= plt_address
+ 8;
8578 rel
.r_info
= ELF32_R_INFO (htab
->root
.hgot
->indx
, R_ARM_ABS32
);
8579 rel
.r_addend
= got_offset
;
8580 SWAP_RELOC_OUT (htab
) (output_bfd
, &rel
, loc
);
8581 loc
+= RELOC_SIZE (htab
);
8583 /* Create the R_ARM_ABS32 relocation referencing the
8584 beginning of the PLT for this GOT entry. */
8585 rel
.r_offset
= got_address
;
8586 rel
.r_info
= ELF32_R_INFO (htab
->root
.hplt
->indx
, R_ARM_ABS32
);
8588 SWAP_RELOC_OUT (htab
) (output_bfd
, &rel
, loc
);
8590 else if (htab
->nacl_p
)
8592 /* Calculate the displacement between the PLT slot and the
8593 common tail that's part of the special initial PLT slot. */
8594 int32_t tail_displacement
8595 = ((splt
->output_section
->vma
+ splt
->output_offset
8596 + ARM_NACL_PLT_TAIL_OFFSET
)
8597 - (plt_address
+ htab
->plt_entry_size
+ 4));
8598 BFD_ASSERT ((tail_displacement
& 3) == 0);
8599 tail_displacement
>>= 2;
8601 BFD_ASSERT ((tail_displacement
& 0xff000000) == 0
8602 || (-tail_displacement
& 0xff000000) == 0);
8604 /* Calculate the displacement between the PLT slot and the entry
8605 in the GOT. The offset accounts for the value produced by
8606 adding to pc in the penultimate instruction of the PLT stub. */
8607 got_displacement
= (got_address
8608 - (plt_address
+ htab
->plt_entry_size
));
8610 /* NaCl does not support interworking at all. */
8611 BFD_ASSERT (!elf32_arm_plt_needs_thumb_stub_p (info
, arm_plt
));
8613 put_arm_insn (htab
, output_bfd
,
8614 elf32_arm_nacl_plt_entry
[0]
8615 | arm_movw_immediate (got_displacement
),
8617 put_arm_insn (htab
, output_bfd
,
8618 elf32_arm_nacl_plt_entry
[1]
8619 | arm_movt_immediate (got_displacement
),
8621 put_arm_insn (htab
, output_bfd
,
8622 elf32_arm_nacl_plt_entry
[2],
8624 put_arm_insn (htab
, output_bfd
,
8625 elf32_arm_nacl_plt_entry
[3]
8626 | (tail_displacement
& 0x00ffffff),
8629 else if (using_thumb_only (htab
))
8631 /* PR ld/16017: Generate thumb only PLT entries. */
8632 if (!using_thumb2 (htab
))
8634 /* FIXME: We ought to be able to generate thumb-1 PLT
8636 _bfd_error_handler (_("%B: Warning: thumb-1 mode PLT generation not currently supported"),
8641 /* Calculate the displacement between the PLT slot and the entry in
8642 the GOT. The 12-byte offset accounts for the value produced by
8643 adding to pc in the 3rd instruction of the PLT stub. */
8644 got_displacement
= got_address
- (plt_address
+ 12);
8646 /* As we are using 32 bit instructions we have to use 'put_arm_insn'
8647 instead of 'put_thumb_insn'. */
8648 put_arm_insn (htab
, output_bfd
,
8649 elf32_thumb2_plt_entry
[0]
8650 | ((got_displacement
& 0x000000ff) << 16)
8651 | ((got_displacement
& 0x00000700) << 20)
8652 | ((got_displacement
& 0x00000800) >> 1)
8653 | ((got_displacement
& 0x0000f000) >> 12),
8655 put_arm_insn (htab
, output_bfd
,
8656 elf32_thumb2_plt_entry
[1]
8657 | ((got_displacement
& 0x00ff0000) )
8658 | ((got_displacement
& 0x07000000) << 4)
8659 | ((got_displacement
& 0x08000000) >> 17)
8660 | ((got_displacement
& 0xf0000000) >> 28),
8662 put_arm_insn (htab
, output_bfd
,
8663 elf32_thumb2_plt_entry
[2],
8665 put_arm_insn (htab
, output_bfd
,
8666 elf32_thumb2_plt_entry
[3],
8671 /* Calculate the displacement between the PLT slot and the
8672 entry in the GOT. The eight-byte offset accounts for the
8673 value produced by adding to pc in the first instruction
8675 got_displacement
= got_address
- (plt_address
+ 8);
8677 if (elf32_arm_plt_needs_thumb_stub_p (info
, arm_plt
))
8679 put_thumb_insn (htab
, output_bfd
,
8680 elf32_arm_plt_thumb_stub
[0], ptr
- 4);
8681 put_thumb_insn (htab
, output_bfd
,
8682 elf32_arm_plt_thumb_stub
[1], ptr
- 2);
8685 if (!elf32_arm_use_long_plt_entry
)
8687 BFD_ASSERT ((got_displacement
& 0xf0000000) == 0);
8689 put_arm_insn (htab
, output_bfd
,
8690 elf32_arm_plt_entry_short
[0]
8691 | ((got_displacement
& 0x0ff00000) >> 20),
8693 put_arm_insn (htab
, output_bfd
,
8694 elf32_arm_plt_entry_short
[1]
8695 | ((got_displacement
& 0x000ff000) >> 12),
8697 put_arm_insn (htab
, output_bfd
,
8698 elf32_arm_plt_entry_short
[2]
8699 | (got_displacement
& 0x00000fff),
8701 #ifdef FOUR_WORD_PLT
8702 bfd_put_32 (output_bfd
, elf32_arm_plt_entry_short
[3], ptr
+ 12);
8707 put_arm_insn (htab
, output_bfd
,
8708 elf32_arm_plt_entry_long
[0]
8709 | ((got_displacement
& 0xf0000000) >> 28),
8711 put_arm_insn (htab
, output_bfd
,
8712 elf32_arm_plt_entry_long
[1]
8713 | ((got_displacement
& 0x0ff00000) >> 20),
8715 put_arm_insn (htab
, output_bfd
,
8716 elf32_arm_plt_entry_long
[2]
8717 | ((got_displacement
& 0x000ff000) >> 12),
8719 put_arm_insn (htab
, output_bfd
,
8720 elf32_arm_plt_entry_long
[3]
8721 | (got_displacement
& 0x00000fff),
8726 /* Fill in the entry in the .rel(a).(i)plt section. */
8727 rel
.r_offset
= got_address
;
8731 /* .igot.plt entries use IRELATIVE relocations against SYM_VALUE.
8732 The dynamic linker or static executable then calls SYM_VALUE
8733 to determine the correct run-time value of the .igot.plt entry. */
8734 rel
.r_info
= ELF32_R_INFO (0, R_ARM_IRELATIVE
);
8735 initial_got_entry
= sym_value
;
8739 rel
.r_info
= ELF32_R_INFO (dynindx
, R_ARM_JUMP_SLOT
);
8740 initial_got_entry
= (splt
->output_section
->vma
8741 + splt
->output_offset
);
8744 /* Fill in the entry in the global offset table. */
8745 bfd_put_32 (output_bfd
, initial_got_entry
,
8746 sgot
->contents
+ got_offset
);
8750 elf32_arm_add_dynreloc (output_bfd
, info
, srel
, &rel
);
8753 loc
= srel
->contents
+ plt_index
* RELOC_SIZE (htab
);
8754 SWAP_RELOC_OUT (htab
) (output_bfd
, &rel
, loc
);
8760 /* Some relocations map to different relocations depending on the
8761 target. Return the real relocation. */
8764 arm_real_reloc_type (struct elf32_arm_link_hash_table
* globals
,
8770 if (globals
->target1_is_rel
)
8776 return globals
->target2_reloc
;
8783 /* Return the base VMA address which should be subtracted from real addresses
8784 when resolving @dtpoff relocation.
8785 This is PT_TLS segment p_vaddr. */
8788 dtpoff_base (struct bfd_link_info
*info
)
8790 /* If tls_sec is NULL, we should have signalled an error already. */
8791 if (elf_hash_table (info
)->tls_sec
== NULL
)
8793 return elf_hash_table (info
)->tls_sec
->vma
;
8796 /* Return the relocation value for @tpoff relocation
8797 if STT_TLS virtual address is ADDRESS. */
8800 tpoff (struct bfd_link_info
*info
, bfd_vma address
)
8802 struct elf_link_hash_table
*htab
= elf_hash_table (info
);
8805 /* If tls_sec is NULL, we should have signalled an error already. */
8806 if (htab
->tls_sec
== NULL
)
8808 base
= align_power ((bfd_vma
) TCB_SIZE
, htab
->tls_sec
->alignment_power
);
8809 return address
- htab
->tls_sec
->vma
+ base
;
8812 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
8813 VALUE is the relocation value. */
8815 static bfd_reloc_status_type
8816 elf32_arm_abs12_reloc (bfd
*abfd
, void *data
, bfd_vma value
)
8819 return bfd_reloc_overflow
;
8821 value
|= bfd_get_32 (abfd
, data
) & 0xfffff000;
8822 bfd_put_32 (abfd
, value
, data
);
8823 return bfd_reloc_ok
;
8826 /* Handle TLS relaxations. Relaxing is possible for symbols that use
8827 R_ARM_GOTDESC, R_ARM_{,THM_}TLS_CALL or
8828 R_ARM_{,THM_}TLS_DESCSEQ relocations, during a static link.
8830 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
8831 is to then call final_link_relocate. Return other values in the
8834 FIXME:When --emit-relocs is in effect, we'll emit relocs describing
8835 the pre-relaxed code. It would be nice if the relocs were updated
8836 to match the optimization. */
8838 static bfd_reloc_status_type
8839 elf32_arm_tls_relax (struct elf32_arm_link_hash_table
*globals
,
8840 bfd
*input_bfd
, asection
*input_sec
, bfd_byte
*contents
,
8841 Elf_Internal_Rela
*rel
, unsigned long is_local
)
8845 switch (ELF32_R_TYPE (rel
->r_info
))
8848 return bfd_reloc_notsupported
;
8850 case R_ARM_TLS_GOTDESC
:
8855 insn
= bfd_get_32 (input_bfd
, contents
+ rel
->r_offset
);
8857 insn
-= 5; /* THUMB */
8859 insn
-= 8; /* ARM */
8861 bfd_put_32 (input_bfd
, insn
, contents
+ rel
->r_offset
);
8862 return bfd_reloc_continue
;
8864 case R_ARM_THM_TLS_DESCSEQ
:
8866 insn
= bfd_get_16 (input_bfd
, contents
+ rel
->r_offset
);
8867 if ((insn
& 0xff78) == 0x4478) /* add rx, pc */
8871 bfd_put_16 (input_bfd
, 0x46c0, contents
+ rel
->r_offset
);
8873 else if ((insn
& 0xffc0) == 0x6840) /* ldr rx,[ry,#4] */
8877 bfd_put_16 (input_bfd
, 0x46c0, contents
+ rel
->r_offset
);
8880 bfd_put_16 (input_bfd
, insn
& 0xf83f, contents
+ rel
->r_offset
);
8882 else if ((insn
& 0xff87) == 0x4780) /* blx rx */
8886 bfd_put_16 (input_bfd
, 0x46c0, contents
+ rel
->r_offset
);
8889 bfd_put_16 (input_bfd
, 0x4600 | (insn
& 0x78),
8890 contents
+ rel
->r_offset
);
8894 if ((insn
& 0xf000) == 0xf000 || (insn
& 0xf800) == 0xe800)
8895 /* It's a 32 bit instruction, fetch the rest of it for
8896 error generation. */
8898 | bfd_get_16 (input_bfd
, contents
+ rel
->r_offset
+ 2);
8899 (*_bfd_error_handler
)
8900 (_("%B(%A+0x%lx):unexpected Thumb instruction '0x%x' in TLS trampoline"),
8901 input_bfd
, input_sec
, (unsigned long)rel
->r_offset
, insn
);
8902 return bfd_reloc_notsupported
;
8906 case R_ARM_TLS_DESCSEQ
:
8908 insn
= bfd_get_32 (input_bfd
, contents
+ rel
->r_offset
);
8909 if ((insn
& 0xffff0ff0) == 0xe08f0000) /* add rx,pc,ry */
8913 bfd_put_32 (input_bfd
, 0xe1a00000 | (insn
& 0xffff),
8914 contents
+ rel
->r_offset
);
8916 else if ((insn
& 0xfff00fff) == 0xe5900004) /* ldr rx,[ry,#4]*/
8920 bfd_put_32 (input_bfd
, 0xe1a00000, contents
+ rel
->r_offset
);
8923 bfd_put_32 (input_bfd
, insn
& 0xfffff000,
8924 contents
+ rel
->r_offset
);
8926 else if ((insn
& 0xfffffff0) == 0xe12fff30) /* blx rx */
8930 bfd_put_32 (input_bfd
, 0xe1a00000, contents
+ rel
->r_offset
);
8933 bfd_put_32 (input_bfd
, 0xe1a00000 | (insn
& 0xf),
8934 contents
+ rel
->r_offset
);
8938 (*_bfd_error_handler
)
8939 (_("%B(%A+0x%lx):unexpected ARM instruction '0x%x' in TLS trampoline"),
8940 input_bfd
, input_sec
, (unsigned long)rel
->r_offset
, insn
);
8941 return bfd_reloc_notsupported
;
8945 case R_ARM_TLS_CALL
:
8946 /* GD->IE relaxation, turn the instruction into 'nop' or
8947 'ldr r0, [pc,r0]' */
8948 insn
= is_local
? 0xe1a00000 : 0xe79f0000;
8949 bfd_put_32 (input_bfd
, insn
, contents
+ rel
->r_offset
);
8952 case R_ARM_THM_TLS_CALL
:
8953 /* GD->IE relaxation. */
8955 /* add r0,pc; ldr r0, [r0] */
8957 else if (arch_has_thumb2_nop (globals
))
8964 bfd_put_16 (input_bfd
, insn
>> 16, contents
+ rel
->r_offset
);
8965 bfd_put_16 (input_bfd
, insn
& 0xffff, contents
+ rel
->r_offset
+ 2);
8968 return bfd_reloc_ok
;
8971 /* For a given value of n, calculate the value of G_n as required to
8972 deal with group relocations. We return it in the form of an
8973 encoded constant-and-rotation, together with the final residual. If n is
8974 specified as less than zero, then final_residual is filled with the
8975 input value and no further action is performed. */
8978 calculate_group_reloc_mask (bfd_vma value
, int n
, bfd_vma
*final_residual
)
8982 bfd_vma encoded_g_n
= 0;
8983 bfd_vma residual
= value
; /* Also known as Y_n. */
8985 for (current_n
= 0; current_n
<= n
; current_n
++)
8989 /* Calculate which part of the value to mask. */
8996 /* Determine the most significant bit in the residual and
8997 align the resulting value to a 2-bit boundary. */
8998 for (msb
= 30; msb
>= 0; msb
-= 2)
8999 if (residual
& (3 << msb
))
9002 /* The desired shift is now (msb - 6), or zero, whichever
9009 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
9010 g_n
= residual
& (0xff << shift
);
9011 encoded_g_n
= (g_n
>> shift
)
9012 | ((g_n
<= 0xff ? 0 : (32 - shift
) / 2) << 8);
9014 /* Calculate the residual for the next time around. */
9018 *final_residual
= residual
;
9023 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
9024 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
9027 identify_add_or_sub (bfd_vma insn
)
9029 int opcode
= insn
& 0x1e00000;
9031 if (opcode
== 1 << 23) /* ADD */
9034 if (opcode
== 1 << 22) /* SUB */
9040 /* Perform a relocation as part of a final link. */
9042 static bfd_reloc_status_type
9043 elf32_arm_final_link_relocate (reloc_howto_type
* howto
,
9046 asection
* input_section
,
9047 bfd_byte
* contents
,
9048 Elf_Internal_Rela
* rel
,
9050 struct bfd_link_info
* info
,
9052 const char * sym_name
,
9053 unsigned char st_type
,
9054 enum arm_st_branch_type branch_type
,
9055 struct elf_link_hash_entry
* h
,
9056 bfd_boolean
* unresolved_reloc_p
,
9057 char ** error_message
)
9059 unsigned long r_type
= howto
->type
;
9060 unsigned long r_symndx
;
9061 bfd_byte
* hit_data
= contents
+ rel
->r_offset
;
9062 bfd_vma
* local_got_offsets
;
9063 bfd_vma
* local_tlsdesc_gotents
;
9066 asection
* sreloc
= NULL
;
9069 bfd_signed_vma signed_addend
;
9070 unsigned char dynreloc_st_type
;
9071 bfd_vma dynreloc_value
;
9072 struct elf32_arm_link_hash_table
* globals
;
9073 struct elf32_arm_link_hash_entry
*eh
;
9074 union gotplt_union
*root_plt
;
9075 struct arm_plt_info
*arm_plt
;
9077 bfd_vma gotplt_offset
;
9078 bfd_boolean has_iplt_entry
;
9080 globals
= elf32_arm_hash_table (info
);
9081 if (globals
== NULL
)
9082 return bfd_reloc_notsupported
;
9084 BFD_ASSERT (is_arm_elf (input_bfd
));
9086 /* Some relocation types map to different relocations depending on the
9087 target. We pick the right one here. */
9088 r_type
= arm_real_reloc_type (globals
, r_type
);
9090 /* It is possible to have linker relaxations on some TLS access
9091 models. Update our information here. */
9092 r_type
= elf32_arm_tls_transition (info
, r_type
, h
);
9094 if (r_type
!= howto
->type
)
9095 howto
= elf32_arm_howto_from_type (r_type
);
9097 eh
= (struct elf32_arm_link_hash_entry
*) h
;
9098 sgot
= globals
->root
.sgot
;
9099 local_got_offsets
= elf_local_got_offsets (input_bfd
);
9100 local_tlsdesc_gotents
= elf32_arm_local_tlsdesc_gotent (input_bfd
);
9102 if (globals
->root
.dynamic_sections_created
)
9103 srelgot
= globals
->root
.srelgot
;
9107 r_symndx
= ELF32_R_SYM (rel
->r_info
);
9109 if (globals
->use_rel
)
9111 addend
= bfd_get_32 (input_bfd
, hit_data
) & howto
->src_mask
;
9113 if (addend
& ((howto
->src_mask
+ 1) >> 1))
9116 signed_addend
&= ~ howto
->src_mask
;
9117 signed_addend
|= addend
;
9120 signed_addend
= addend
;
9123 addend
= signed_addend
= rel
->r_addend
;
9125 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
9126 are resolving a function call relocation. */
9127 if (using_thumb_only (globals
)
9128 && (r_type
== R_ARM_THM_CALL
9129 || r_type
== R_ARM_THM_JUMP24
)
9130 && branch_type
== ST_BRANCH_TO_ARM
)
9131 branch_type
= ST_BRANCH_TO_THUMB
;
9133 /* Record the symbol information that should be used in dynamic
9135 dynreloc_st_type
= st_type
;
9136 dynreloc_value
= value
;
9137 if (branch_type
== ST_BRANCH_TO_THUMB
)
9138 dynreloc_value
|= 1;
9140 /* Find out whether the symbol has a PLT. Set ST_VALUE, BRANCH_TYPE and
9141 VALUE appropriately for relocations that we resolve at link time. */
9142 has_iplt_entry
= FALSE
;
9143 if (elf32_arm_get_plt_info (input_bfd
, eh
, r_symndx
, &root_plt
, &arm_plt
)
9144 && root_plt
->offset
!= (bfd_vma
) -1)
9146 plt_offset
= root_plt
->offset
;
9147 gotplt_offset
= arm_plt
->got_offset
;
9149 if (h
== NULL
|| eh
->is_iplt
)
9151 has_iplt_entry
= TRUE
;
9152 splt
= globals
->root
.iplt
;
9154 /* Populate .iplt entries here, because not all of them will
9155 be seen by finish_dynamic_symbol. The lower bit is set if
9156 we have already populated the entry. */
9161 if (elf32_arm_populate_plt_entry (output_bfd
, info
, root_plt
, arm_plt
,
9162 -1, dynreloc_value
))
9163 root_plt
->offset
|= 1;
9165 return bfd_reloc_notsupported
;
9168 /* Static relocations always resolve to the .iplt entry. */
9170 value
= (splt
->output_section
->vma
9171 + splt
->output_offset
9173 branch_type
= ST_BRANCH_TO_ARM
;
9175 /* If there are non-call relocations that resolve to the .iplt
9176 entry, then all dynamic ones must too. */
9177 if (arm_plt
->noncall_refcount
!= 0)
9179 dynreloc_st_type
= st_type
;
9180 dynreloc_value
= value
;
9184 /* We populate the .plt entry in finish_dynamic_symbol. */
9185 splt
= globals
->root
.splt
;
9190 plt_offset
= (bfd_vma
) -1;
9191 gotplt_offset
= (bfd_vma
) -1;
9197 /* We don't need to find a value for this symbol. It's just a
9199 *unresolved_reloc_p
= FALSE
;
9200 return bfd_reloc_ok
;
9203 if (!globals
->vxworks_p
)
9204 return elf32_arm_abs12_reloc (input_bfd
, hit_data
, value
+ addend
);
9208 case R_ARM_ABS32_NOI
:
9210 case R_ARM_REL32_NOI
:
9216 /* Handle relocations which should use the PLT entry. ABS32/REL32
9217 will use the symbol's value, which may point to a PLT entry, but we
9218 don't need to handle that here. If we created a PLT entry, all
9219 branches in this object should go to it, except if the PLT is too
9220 far away, in which case a long branch stub should be inserted. */
9221 if ((r_type
!= R_ARM_ABS32
&& r_type
!= R_ARM_REL32
9222 && r_type
!= R_ARM_ABS32_NOI
&& r_type
!= R_ARM_REL32_NOI
9223 && r_type
!= R_ARM_CALL
9224 && r_type
!= R_ARM_JUMP24
9225 && r_type
!= R_ARM_PLT32
)
9226 && plt_offset
!= (bfd_vma
) -1)
9228 /* If we've created a .plt section, and assigned a PLT entry
9229 to this function, it must either be a STT_GNU_IFUNC reference
9230 or not be known to bind locally. In other cases, we should
9231 have cleared the PLT entry by now. */
9232 BFD_ASSERT (has_iplt_entry
|| !SYMBOL_CALLS_LOCAL (info
, h
));
9234 value
= (splt
->output_section
->vma
9235 + splt
->output_offset
9237 *unresolved_reloc_p
= FALSE
;
9238 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
9239 contents
, rel
->r_offset
, value
,
9243 /* When generating a shared object or relocatable executable, these
9244 relocations are copied into the output file to be resolved at
9246 if ((bfd_link_pic (info
)
9247 || globals
->root
.is_relocatable_executable
)
9248 && (input_section
->flags
& SEC_ALLOC
)
9249 && !(globals
->vxworks_p
9250 && strcmp (input_section
->output_section
->name
,
9252 && ((r_type
!= R_ARM_REL32
&& r_type
!= R_ARM_REL32_NOI
)
9253 || !SYMBOL_CALLS_LOCAL (info
, h
))
9254 && !(input_bfd
== globals
->stub_bfd
9255 && strstr (input_section
->name
, STUB_SUFFIX
))
9257 || ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
9258 || h
->root
.type
!= bfd_link_hash_undefweak
)
9259 && r_type
!= R_ARM_PC24
9260 && r_type
!= R_ARM_CALL
9261 && r_type
!= R_ARM_JUMP24
9262 && r_type
!= R_ARM_PREL31
9263 && r_type
!= R_ARM_PLT32
)
9265 Elf_Internal_Rela outrel
;
9266 bfd_boolean skip
, relocate
;
9268 if ((r_type
== R_ARM_REL32
|| r_type
== R_ARM_REL32_NOI
)
9271 char *v
= _("shared object");
9273 if (bfd_link_executable (info
))
9274 v
= _("PIE executable");
9276 (*_bfd_error_handler
)
9277 (_("%B: relocation %s against external or undefined symbol `%s'"
9278 " can not be used when making a %s; recompile with -fPIC"), input_bfd
,
9279 elf32_arm_howto_table_1
[r_type
].name
, h
->root
.root
.string
, v
);
9280 return bfd_reloc_notsupported
;
9283 *unresolved_reloc_p
= FALSE
;
9285 if (sreloc
== NULL
&& globals
->root
.dynamic_sections_created
)
9287 sreloc
= _bfd_elf_get_dynamic_reloc_section (input_bfd
, input_section
,
9288 ! globals
->use_rel
);
9291 return bfd_reloc_notsupported
;
9297 outrel
.r_addend
= addend
;
9299 _bfd_elf_section_offset (output_bfd
, info
, input_section
,
9301 if (outrel
.r_offset
== (bfd_vma
) -1)
9303 else if (outrel
.r_offset
== (bfd_vma
) -2)
9304 skip
= TRUE
, relocate
= TRUE
;
9305 outrel
.r_offset
+= (input_section
->output_section
->vma
9306 + input_section
->output_offset
);
9309 memset (&outrel
, 0, sizeof outrel
);
9312 && (!bfd_link_pic (info
)
9313 || !SYMBOLIC_BIND (info
, h
)
9314 || !h
->def_regular
))
9315 outrel
.r_info
= ELF32_R_INFO (h
->dynindx
, r_type
);
9320 /* This symbol is local, or marked to become local. */
9321 BFD_ASSERT (r_type
== R_ARM_ABS32
|| r_type
== R_ARM_ABS32_NOI
);
9322 if (globals
->symbian_p
)
9326 /* On Symbian OS, the data segment and text segement
9327 can be relocated independently. Therefore, we
9328 must indicate the segment to which this
9329 relocation is relative. The BPABI allows us to
9330 use any symbol in the right segment; we just use
9331 the section symbol as it is convenient. (We
9332 cannot use the symbol given by "h" directly as it
9333 will not appear in the dynamic symbol table.)
9335 Note that the dynamic linker ignores the section
9336 symbol value, so we don't subtract osec->vma
9337 from the emitted reloc addend. */
9339 osec
= sym_sec
->output_section
;
9341 osec
= input_section
->output_section
;
9342 symbol
= elf_section_data (osec
)->dynindx
;
9345 struct elf_link_hash_table
*htab
= elf_hash_table (info
);
9347 if ((osec
->flags
& SEC_READONLY
) == 0
9348 && htab
->data_index_section
!= NULL
)
9349 osec
= htab
->data_index_section
;
9351 osec
= htab
->text_index_section
;
9352 symbol
= elf_section_data (osec
)->dynindx
;
9354 BFD_ASSERT (symbol
!= 0);
9357 /* On SVR4-ish systems, the dynamic loader cannot
9358 relocate the text and data segments independently,
9359 so the symbol does not matter. */
9361 if (dynreloc_st_type
== STT_GNU_IFUNC
)
9362 /* We have an STT_GNU_IFUNC symbol that doesn't resolve
9363 to the .iplt entry. Instead, every non-call reference
9364 must use an R_ARM_IRELATIVE relocation to obtain the
9365 correct run-time address. */
9366 outrel
.r_info
= ELF32_R_INFO (symbol
, R_ARM_IRELATIVE
);
9368 outrel
.r_info
= ELF32_R_INFO (symbol
, R_ARM_RELATIVE
);
9369 if (globals
->use_rel
)
9372 outrel
.r_addend
+= dynreloc_value
;
9375 elf32_arm_add_dynreloc (output_bfd
, info
, sreloc
, &outrel
);
9377 /* If this reloc is against an external symbol, we do not want to
9378 fiddle with the addend. Otherwise, we need to include the symbol
9379 value so that it becomes an addend for the dynamic reloc. */
9381 return bfd_reloc_ok
;
9383 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
9384 contents
, rel
->r_offset
,
9385 dynreloc_value
, (bfd_vma
) 0);
9387 else switch (r_type
)
9390 return elf32_arm_abs12_reloc (input_bfd
, hit_data
, value
+ addend
);
9392 case R_ARM_XPC25
: /* Arm BLX instruction. */
9395 case R_ARM_PC24
: /* Arm B/BL instruction. */
9398 struct elf32_arm_stub_hash_entry
*stub_entry
= NULL
;
9400 if (r_type
== R_ARM_XPC25
)
9402 /* Check for Arm calling Arm function. */
9403 /* FIXME: Should we translate the instruction into a BL
9404 instruction instead ? */
9405 if (branch_type
!= ST_BRANCH_TO_THUMB
)
9406 (*_bfd_error_handler
)
9407 (_("\%B: Warning: Arm BLX instruction targets Arm function '%s'."),
9409 h
? h
->root
.root
.string
: "(local)");
9411 else if (r_type
== R_ARM_PC24
)
9413 /* Check for Arm calling Thumb function. */
9414 if (branch_type
== ST_BRANCH_TO_THUMB
)
9416 if (elf32_arm_to_thumb_stub (info
, sym_name
, input_bfd
,
9417 output_bfd
, input_section
,
9418 hit_data
, sym_sec
, rel
->r_offset
,
9419 signed_addend
, value
,
9421 return bfd_reloc_ok
;
9423 return bfd_reloc_dangerous
;
9427 /* Check if a stub has to be inserted because the
9428 destination is too far or we are changing mode. */
9429 if ( r_type
== R_ARM_CALL
9430 || r_type
== R_ARM_JUMP24
9431 || r_type
== R_ARM_PLT32
)
9433 enum elf32_arm_stub_type stub_type
= arm_stub_none
;
9434 struct elf32_arm_link_hash_entry
*hash
;
9436 hash
= (struct elf32_arm_link_hash_entry
*) h
;
9437 stub_type
= arm_type_of_stub (info
, input_section
, rel
,
9438 st_type
, &branch_type
,
9439 hash
, value
, sym_sec
,
9440 input_bfd
, sym_name
);
9442 if (stub_type
!= arm_stub_none
)
9444 /* The target is out of reach, so redirect the
9445 branch to the local stub for this function. */
9446 stub_entry
= elf32_arm_get_stub_entry (input_section
,
9451 if (stub_entry
!= NULL
)
9452 value
= (stub_entry
->stub_offset
9453 + stub_entry
->stub_sec
->output_offset
9454 + stub_entry
->stub_sec
->output_section
->vma
);
9456 if (plt_offset
!= (bfd_vma
) -1)
9457 *unresolved_reloc_p
= FALSE
;
9462 /* If the call goes through a PLT entry, make sure to
9463 check distance to the right destination address. */
9464 if (plt_offset
!= (bfd_vma
) -1)
9466 value
= (splt
->output_section
->vma
9467 + splt
->output_offset
9469 *unresolved_reloc_p
= FALSE
;
9470 /* The PLT entry is in ARM mode, regardless of the
9472 branch_type
= ST_BRANCH_TO_ARM
;
9477 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
9479 S is the address of the symbol in the relocation.
9480 P is address of the instruction being relocated.
9481 A is the addend (extracted from the instruction) in bytes.
9483 S is held in 'value'.
9484 P is the base address of the section containing the
9485 instruction plus the offset of the reloc into that
9487 (input_section->output_section->vma +
9488 input_section->output_offset +
9490 A is the addend, converted into bytes, ie:
9493 Note: None of these operations have knowledge of the pipeline
9494 size of the processor, thus it is up to the assembler to
9495 encode this information into the addend. */
9496 value
-= (input_section
->output_section
->vma
9497 + input_section
->output_offset
);
9498 value
-= rel
->r_offset
;
9499 if (globals
->use_rel
)
9500 value
+= (signed_addend
<< howto
->size
);
9502 /* RELA addends do not have to be adjusted by howto->size. */
9503 value
+= signed_addend
;
9505 signed_addend
= value
;
9506 signed_addend
>>= howto
->rightshift
;
9508 /* A branch to an undefined weak symbol is turned into a jump to
9509 the next instruction unless a PLT entry will be created.
9510 Do the same for local undefined symbols (but not for STN_UNDEF).
9511 The jump to the next instruction is optimized as a NOP depending
9512 on the architecture. */
9513 if (h
? (h
->root
.type
== bfd_link_hash_undefweak
9514 && plt_offset
== (bfd_vma
) -1)
9515 : r_symndx
!= STN_UNDEF
&& bfd_is_und_section (sym_sec
))
9517 value
= (bfd_get_32 (input_bfd
, hit_data
) & 0xf0000000);
9519 if (arch_has_arm_nop (globals
))
9520 value
|= 0x0320f000;
9522 value
|= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */
9526 /* Perform a signed range check. */
9527 if ( signed_addend
> ((bfd_signed_vma
) (howto
->dst_mask
>> 1))
9528 || signed_addend
< - ((bfd_signed_vma
) ((howto
->dst_mask
+ 1) >> 1)))
9529 return bfd_reloc_overflow
;
9531 addend
= (value
& 2);
9533 value
= (signed_addend
& howto
->dst_mask
)
9534 | (bfd_get_32 (input_bfd
, hit_data
) & (~ howto
->dst_mask
));
9536 if (r_type
== R_ARM_CALL
)
9538 /* Set the H bit in the BLX instruction. */
9539 if (branch_type
== ST_BRANCH_TO_THUMB
)
9544 value
&= ~(bfd_vma
)(1 << 24);
9547 /* Select the correct instruction (BL or BLX). */
9548 /* Only if we are not handling a BL to a stub. In this
9549 case, mode switching is performed by the stub. */
9550 if (branch_type
== ST_BRANCH_TO_THUMB
&& !stub_entry
)
9552 else if (stub_entry
|| branch_type
!= ST_BRANCH_UNKNOWN
)
9554 value
&= ~(bfd_vma
)(1 << 28);
9564 if (branch_type
== ST_BRANCH_TO_THUMB
)
9568 case R_ARM_ABS32_NOI
:
9574 if (branch_type
== ST_BRANCH_TO_THUMB
)
9576 value
-= (input_section
->output_section
->vma
9577 + input_section
->output_offset
+ rel
->r_offset
);
9580 case R_ARM_REL32_NOI
:
9582 value
-= (input_section
->output_section
->vma
9583 + input_section
->output_offset
+ rel
->r_offset
);
9587 value
-= (input_section
->output_section
->vma
9588 + input_section
->output_offset
+ rel
->r_offset
);
9589 value
+= signed_addend
;
9590 if (! h
|| h
->root
.type
!= bfd_link_hash_undefweak
)
9592 /* Check for overflow. */
9593 if ((value
^ (value
>> 1)) & (1 << 30))
9594 return bfd_reloc_overflow
;
9596 value
&= 0x7fffffff;
9597 value
|= (bfd_get_32 (input_bfd
, hit_data
) & 0x80000000);
9598 if (branch_type
== ST_BRANCH_TO_THUMB
)
9603 bfd_put_32 (input_bfd
, value
, hit_data
);
9604 return bfd_reloc_ok
;
9607 /* PR 16202: Refectch the addend using the correct size. */
9608 if (globals
->use_rel
)
9609 addend
= bfd_get_8 (input_bfd
, hit_data
);
9612 /* There is no way to tell whether the user intended to use a signed or
9613 unsigned addend. When checking for overflow we accept either,
9614 as specified by the AAELF. */
9615 if ((long) value
> 0xff || (long) value
< -0x80)
9616 return bfd_reloc_overflow
;
9618 bfd_put_8 (input_bfd
, value
, hit_data
);
9619 return bfd_reloc_ok
;
9622 /* PR 16202: Refectch the addend using the correct size. */
9623 if (globals
->use_rel
)
9624 addend
= bfd_get_16 (input_bfd
, hit_data
);
9627 /* See comment for R_ARM_ABS8. */
9628 if ((long) value
> 0xffff || (long) value
< -0x8000)
9629 return bfd_reloc_overflow
;
9631 bfd_put_16 (input_bfd
, value
, hit_data
);
9632 return bfd_reloc_ok
;
9634 case R_ARM_THM_ABS5
:
9635 /* Support ldr and str instructions for the thumb. */
9636 if (globals
->use_rel
)
9638 /* Need to refetch addend. */
9639 addend
= bfd_get_16 (input_bfd
, hit_data
) & howto
->src_mask
;
9640 /* ??? Need to determine shift amount from operand size. */
9641 addend
>>= howto
->rightshift
;
9645 /* ??? Isn't value unsigned? */
9646 if ((long) value
> 0x1f || (long) value
< -0x10)
9647 return bfd_reloc_overflow
;
9649 /* ??? Value needs to be properly shifted into place first. */
9650 value
|= bfd_get_16 (input_bfd
, hit_data
) & 0xf83f;
9651 bfd_put_16 (input_bfd
, value
, hit_data
);
9652 return bfd_reloc_ok
;
9654 case R_ARM_THM_ALU_PREL_11_0
:
9655 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
9658 bfd_signed_vma relocation
;
9660 insn
= (bfd_get_16 (input_bfd
, hit_data
) << 16)
9661 | bfd_get_16 (input_bfd
, hit_data
+ 2);
9663 if (globals
->use_rel
)
9665 signed_addend
= (insn
& 0xff) | ((insn
& 0x7000) >> 4)
9666 | ((insn
& (1 << 26)) >> 15);
9667 if (insn
& 0xf00000)
9668 signed_addend
= -signed_addend
;
9671 relocation
= value
+ signed_addend
;
9672 relocation
-= Pa (input_section
->output_section
->vma
9673 + input_section
->output_offset
9678 if (value
>= 0x1000)
9679 return bfd_reloc_overflow
;
9681 insn
= (insn
& 0xfb0f8f00) | (value
& 0xff)
9682 | ((value
& 0x700) << 4)
9683 | ((value
& 0x800) << 15);
9687 bfd_put_16 (input_bfd
, insn
>> 16, hit_data
);
9688 bfd_put_16 (input_bfd
, insn
& 0xffff, hit_data
+ 2);
9690 return bfd_reloc_ok
;
9694 /* PR 10073: This reloc is not generated by the GNU toolchain,
9695 but it is supported for compatibility with third party libraries
9696 generated by other compilers, specifically the ARM/IAR. */
9699 bfd_signed_vma relocation
;
9701 insn
= bfd_get_16 (input_bfd
, hit_data
);
9703 if (globals
->use_rel
)
9704 addend
= ((((insn
& 0x00ff) << 2) + 4) & 0x3ff) -4;
9706 relocation
= value
+ addend
;
9707 relocation
-= Pa (input_section
->output_section
->vma
9708 + input_section
->output_offset
9713 /* We do not check for overflow of this reloc. Although strictly
9714 speaking this is incorrect, it appears to be necessary in order
9715 to work with IAR generated relocs. Since GCC and GAS do not
9716 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
9717 a problem for them. */
9720 insn
= (insn
& 0xff00) | (value
>> 2);
9722 bfd_put_16 (input_bfd
, insn
, hit_data
);
9724 return bfd_reloc_ok
;
9727 case R_ARM_THM_PC12
:
9728 /* Corresponds to: ldr.w reg, [pc, #offset]. */
9731 bfd_signed_vma relocation
;
9733 insn
= (bfd_get_16 (input_bfd
, hit_data
) << 16)
9734 | bfd_get_16 (input_bfd
, hit_data
+ 2);
9736 if (globals
->use_rel
)
9738 signed_addend
= insn
& 0xfff;
9739 if (!(insn
& (1 << 23)))
9740 signed_addend
= -signed_addend
;
9743 relocation
= value
+ signed_addend
;
9744 relocation
-= Pa (input_section
->output_section
->vma
9745 + input_section
->output_offset
9750 if (value
>= 0x1000)
9751 return bfd_reloc_overflow
;
9753 insn
= (insn
& 0xff7ff000) | value
;
9754 if (relocation
>= 0)
9757 bfd_put_16 (input_bfd
, insn
>> 16, hit_data
);
9758 bfd_put_16 (input_bfd
, insn
& 0xffff, hit_data
+ 2);
9760 return bfd_reloc_ok
;
9763 case R_ARM_THM_XPC22
:
9764 case R_ARM_THM_CALL
:
9765 case R_ARM_THM_JUMP24
:
9766 /* Thumb BL (branch long instruction). */
9770 bfd_boolean overflow
= FALSE
;
9771 bfd_vma upper_insn
= bfd_get_16 (input_bfd
, hit_data
);
9772 bfd_vma lower_insn
= bfd_get_16 (input_bfd
, hit_data
+ 2);
9773 bfd_signed_vma reloc_signed_max
;
9774 bfd_signed_vma reloc_signed_min
;
9776 bfd_signed_vma signed_check
;
9778 const int thumb2
= using_thumb2 (globals
);
9780 /* A branch to an undefined weak symbol is turned into a jump to
9781 the next instruction unless a PLT entry will be created.
9782 The jump to the next instruction is optimized as a NOP.W for
9783 Thumb-2 enabled architectures. */
9784 if (h
&& h
->root
.type
== bfd_link_hash_undefweak
9785 && plt_offset
== (bfd_vma
) -1)
9787 if (arch_has_thumb2_nop (globals
))
9789 bfd_put_16 (input_bfd
, 0xf3af, hit_data
);
9790 bfd_put_16 (input_bfd
, 0x8000, hit_data
+ 2);
9794 bfd_put_16 (input_bfd
, 0xe000, hit_data
);
9795 bfd_put_16 (input_bfd
, 0xbf00, hit_data
+ 2);
9797 return bfd_reloc_ok
;
9800 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
9801 with Thumb-1) involving the J1 and J2 bits. */
9802 if (globals
->use_rel
)
9804 bfd_vma s
= (upper_insn
& (1 << 10)) >> 10;
9805 bfd_vma upper
= upper_insn
& 0x3ff;
9806 bfd_vma lower
= lower_insn
& 0x7ff;
9807 bfd_vma j1
= (lower_insn
& (1 << 13)) >> 13;
9808 bfd_vma j2
= (lower_insn
& (1 << 11)) >> 11;
9809 bfd_vma i1
= j1
^ s
? 0 : 1;
9810 bfd_vma i2
= j2
^ s
? 0 : 1;
9812 addend
= (i1
<< 23) | (i2
<< 22) | (upper
<< 12) | (lower
<< 1);
9814 addend
= (addend
| ((s
? 0 : 1) << 24)) - (1 << 24);
9816 signed_addend
= addend
;
9819 if (r_type
== R_ARM_THM_XPC22
)
9821 /* Check for Thumb to Thumb call. */
9822 /* FIXME: Should we translate the instruction into a BL
9823 instruction instead ? */
9824 if (branch_type
== ST_BRANCH_TO_THUMB
)
9825 (*_bfd_error_handler
)
9826 (_("%B: Warning: Thumb BLX instruction targets thumb function '%s'."),
9828 h
? h
->root
.root
.string
: "(local)");
9832 /* If it is not a call to Thumb, assume call to Arm.
9833 If it is a call relative to a section name, then it is not a
9834 function call at all, but rather a long jump. Calls through
9835 the PLT do not require stubs. */
9836 if (branch_type
== ST_BRANCH_TO_ARM
&& plt_offset
== (bfd_vma
) -1)
9838 if (globals
->use_blx
&& r_type
== R_ARM_THM_CALL
)
9840 /* Convert BL to BLX. */
9841 lower_insn
= (lower_insn
& ~0x1000) | 0x0800;
9843 else if (( r_type
!= R_ARM_THM_CALL
)
9844 && (r_type
!= R_ARM_THM_JUMP24
))
9846 if (elf32_thumb_to_arm_stub
9847 (info
, sym_name
, input_bfd
, output_bfd
, input_section
,
9848 hit_data
, sym_sec
, rel
->r_offset
, signed_addend
, value
,
9850 return bfd_reloc_ok
;
9852 return bfd_reloc_dangerous
;
9855 else if (branch_type
== ST_BRANCH_TO_THUMB
9857 && r_type
== R_ARM_THM_CALL
)
9859 /* Make sure this is a BL. */
9860 lower_insn
|= 0x1800;
9864 enum elf32_arm_stub_type stub_type
= arm_stub_none
;
9865 if (r_type
== R_ARM_THM_CALL
|| r_type
== R_ARM_THM_JUMP24
)
9867 /* Check if a stub has to be inserted because the destination
9869 struct elf32_arm_stub_hash_entry
*stub_entry
;
9870 struct elf32_arm_link_hash_entry
*hash
;
9872 hash
= (struct elf32_arm_link_hash_entry
*) h
;
9874 stub_type
= arm_type_of_stub (info
, input_section
, rel
,
9875 st_type
, &branch_type
,
9876 hash
, value
, sym_sec
,
9877 input_bfd
, sym_name
);
9879 if (stub_type
!= arm_stub_none
)
9881 /* The target is out of reach or we are changing modes, so
9882 redirect the branch to the local stub for this
9884 stub_entry
= elf32_arm_get_stub_entry (input_section
,
9888 if (stub_entry
!= NULL
)
9890 value
= (stub_entry
->stub_offset
9891 + stub_entry
->stub_sec
->output_offset
9892 + stub_entry
->stub_sec
->output_section
->vma
);
9894 if (plt_offset
!= (bfd_vma
) -1)
9895 *unresolved_reloc_p
= FALSE
;
9898 /* If this call becomes a call to Arm, force BLX. */
9899 if (globals
->use_blx
&& (r_type
== R_ARM_THM_CALL
))
9902 && !arm_stub_is_thumb (stub_entry
->stub_type
))
9903 || branch_type
!= ST_BRANCH_TO_THUMB
)
9904 lower_insn
= (lower_insn
& ~0x1000) | 0x0800;
9909 /* Handle calls via the PLT. */
9910 if (stub_type
== arm_stub_none
&& plt_offset
!= (bfd_vma
) -1)
9912 value
= (splt
->output_section
->vma
9913 + splt
->output_offset
9916 if (globals
->use_blx
9917 && r_type
== R_ARM_THM_CALL
9918 && ! using_thumb_only (globals
))
9920 /* If the Thumb BLX instruction is available, convert
9921 the BL to a BLX instruction to call the ARM-mode
9923 lower_insn
= (lower_insn
& ~0x1000) | 0x0800;
9924 branch_type
= ST_BRANCH_TO_ARM
;
9928 if (! using_thumb_only (globals
))
9929 /* Target the Thumb stub before the ARM PLT entry. */
9930 value
-= PLT_THUMB_STUB_SIZE
;
9931 branch_type
= ST_BRANCH_TO_THUMB
;
9933 *unresolved_reloc_p
= FALSE
;
9936 relocation
= value
+ signed_addend
;
9938 relocation
-= (input_section
->output_section
->vma
9939 + input_section
->output_offset
9942 check
= relocation
>> howto
->rightshift
;
9944 /* If this is a signed value, the rightshift just dropped
9945 leading 1 bits (assuming twos complement). */
9946 if ((bfd_signed_vma
) relocation
>= 0)
9947 signed_check
= check
;
9949 signed_check
= check
| ~((bfd_vma
) -1 >> howto
->rightshift
);
9951 /* Calculate the permissable maximum and minimum values for
9952 this relocation according to whether we're relocating for
9954 bitsize
= howto
->bitsize
;
9957 reloc_signed_max
= (1 << (bitsize
- 1)) - 1;
9958 reloc_signed_min
= ~reloc_signed_max
;
9960 /* Assumes two's complement. */
9961 if (signed_check
> reloc_signed_max
|| signed_check
< reloc_signed_min
)
9964 if ((lower_insn
& 0x5000) == 0x4000)
9965 /* For a BLX instruction, make sure that the relocation is rounded up
9966 to a word boundary. This follows the semantics of the instruction
9967 which specifies that bit 1 of the target address will come from bit
9968 1 of the base address. */
9969 relocation
= (relocation
+ 2) & ~ 3;
9971 /* Put RELOCATION back into the insn. Assumes two's complement.
9972 We use the Thumb-2 encoding, which is safe even if dealing with
9973 a Thumb-1 instruction by virtue of our overflow check above. */
9974 reloc_sign
= (signed_check
< 0) ? 1 : 0;
9975 upper_insn
= (upper_insn
& ~(bfd_vma
) 0x7ff)
9976 | ((relocation
>> 12) & 0x3ff)
9977 | (reloc_sign
<< 10);
9978 lower_insn
= (lower_insn
& ~(bfd_vma
) 0x2fff)
9979 | (((!((relocation
>> 23) & 1)) ^ reloc_sign
) << 13)
9980 | (((!((relocation
>> 22) & 1)) ^ reloc_sign
) << 11)
9981 | ((relocation
>> 1) & 0x7ff);
9983 /* Put the relocated value back in the object file: */
9984 bfd_put_16 (input_bfd
, upper_insn
, hit_data
);
9985 bfd_put_16 (input_bfd
, lower_insn
, hit_data
+ 2);
9987 return (overflow
? bfd_reloc_overflow
: bfd_reloc_ok
);
9991 case R_ARM_THM_JUMP19
:
9992 /* Thumb32 conditional branch instruction. */
9995 bfd_boolean overflow
= FALSE
;
9996 bfd_vma upper_insn
= bfd_get_16 (input_bfd
, hit_data
);
9997 bfd_vma lower_insn
= bfd_get_16 (input_bfd
, hit_data
+ 2);
9998 bfd_signed_vma reloc_signed_max
= 0xffffe;
9999 bfd_signed_vma reloc_signed_min
= -0x100000;
10000 bfd_signed_vma signed_check
;
10001 enum elf32_arm_stub_type stub_type
= arm_stub_none
;
10002 struct elf32_arm_stub_hash_entry
*stub_entry
;
10003 struct elf32_arm_link_hash_entry
*hash
;
10005 /* Need to refetch the addend, reconstruct the top three bits,
10006 and squish the two 11 bit pieces together. */
10007 if (globals
->use_rel
)
10009 bfd_vma S
= (upper_insn
& 0x0400) >> 10;
10010 bfd_vma upper
= (upper_insn
& 0x003f);
10011 bfd_vma J1
= (lower_insn
& 0x2000) >> 13;
10012 bfd_vma J2
= (lower_insn
& 0x0800) >> 11;
10013 bfd_vma lower
= (lower_insn
& 0x07ff);
10017 upper
|= (!S
) << 8;
10018 upper
-= 0x0100; /* Sign extend. */
10020 addend
= (upper
<< 12) | (lower
<< 1);
10021 signed_addend
= addend
;
10024 /* Handle calls via the PLT. */
10025 if (plt_offset
!= (bfd_vma
) -1)
10027 value
= (splt
->output_section
->vma
10028 + splt
->output_offset
10030 /* Target the Thumb stub before the ARM PLT entry. */
10031 value
-= PLT_THUMB_STUB_SIZE
;
10032 *unresolved_reloc_p
= FALSE
;
10035 hash
= (struct elf32_arm_link_hash_entry
*)h
;
10037 stub_type
= arm_type_of_stub (info
, input_section
, rel
,
10038 st_type
, &branch_type
,
10039 hash
, value
, sym_sec
,
10040 input_bfd
, sym_name
);
10041 if (stub_type
!= arm_stub_none
)
10043 stub_entry
= elf32_arm_get_stub_entry (input_section
,
10047 if (stub_entry
!= NULL
)
10049 value
= (stub_entry
->stub_offset
10050 + stub_entry
->stub_sec
->output_offset
10051 + stub_entry
->stub_sec
->output_section
->vma
);
10055 relocation
= value
+ signed_addend
;
10056 relocation
-= (input_section
->output_section
->vma
10057 + input_section
->output_offset
10059 signed_check
= (bfd_signed_vma
) relocation
;
10061 if (signed_check
> reloc_signed_max
|| signed_check
< reloc_signed_min
)
10064 /* Put RELOCATION back into the insn. */
10066 bfd_vma S
= (relocation
& 0x00100000) >> 20;
10067 bfd_vma J2
= (relocation
& 0x00080000) >> 19;
10068 bfd_vma J1
= (relocation
& 0x00040000) >> 18;
10069 bfd_vma hi
= (relocation
& 0x0003f000) >> 12;
10070 bfd_vma lo
= (relocation
& 0x00000ffe) >> 1;
10072 upper_insn
= (upper_insn
& 0xfbc0) | (S
<< 10) | hi
;
10073 lower_insn
= (lower_insn
& 0xd000) | (J1
<< 13) | (J2
<< 11) | lo
;
10076 /* Put the relocated value back in the object file: */
10077 bfd_put_16 (input_bfd
, upper_insn
, hit_data
);
10078 bfd_put_16 (input_bfd
, lower_insn
, hit_data
+ 2);
10080 return (overflow
? bfd_reloc_overflow
: bfd_reloc_ok
);
10083 case R_ARM_THM_JUMP11
:
10084 case R_ARM_THM_JUMP8
:
10085 case R_ARM_THM_JUMP6
:
10086 /* Thumb B (branch) instruction). */
10088 bfd_signed_vma relocation
;
10089 bfd_signed_vma reloc_signed_max
= (1 << (howto
->bitsize
- 1)) - 1;
10090 bfd_signed_vma reloc_signed_min
= ~ reloc_signed_max
;
10091 bfd_signed_vma signed_check
;
10093 /* CZB cannot jump backward. */
10094 if (r_type
== R_ARM_THM_JUMP6
)
10095 reloc_signed_min
= 0;
10097 if (globals
->use_rel
)
10099 /* Need to refetch addend. */
10100 addend
= bfd_get_16 (input_bfd
, hit_data
) & howto
->src_mask
;
10101 if (addend
& ((howto
->src_mask
+ 1) >> 1))
10103 signed_addend
= -1;
10104 signed_addend
&= ~ howto
->src_mask
;
10105 signed_addend
|= addend
;
10108 signed_addend
= addend
;
10109 /* The value in the insn has been right shifted. We need to
10110 undo this, so that we can perform the address calculation
10111 in terms of bytes. */
10112 signed_addend
<<= howto
->rightshift
;
10114 relocation
= value
+ signed_addend
;
10116 relocation
-= (input_section
->output_section
->vma
10117 + input_section
->output_offset
10120 relocation
>>= howto
->rightshift
;
10121 signed_check
= relocation
;
10123 if (r_type
== R_ARM_THM_JUMP6
)
10124 relocation
= ((relocation
& 0x0020) << 4) | ((relocation
& 0x001f) << 3);
10126 relocation
&= howto
->dst_mask
;
10127 relocation
|= (bfd_get_16 (input_bfd
, hit_data
) & (~ howto
->dst_mask
));
10129 bfd_put_16 (input_bfd
, relocation
, hit_data
);
10131 /* Assumes two's complement. */
10132 if (signed_check
> reloc_signed_max
|| signed_check
< reloc_signed_min
)
10133 return bfd_reloc_overflow
;
10135 return bfd_reloc_ok
;
10138 case R_ARM_ALU_PCREL7_0
:
10139 case R_ARM_ALU_PCREL15_8
:
10140 case R_ARM_ALU_PCREL23_15
:
10143 bfd_vma relocation
;
10145 insn
= bfd_get_32 (input_bfd
, hit_data
);
10146 if (globals
->use_rel
)
10148 /* Extract the addend. */
10149 addend
= (insn
& 0xff) << ((insn
& 0xf00) >> 7);
10150 signed_addend
= addend
;
10152 relocation
= value
+ signed_addend
;
10154 relocation
-= (input_section
->output_section
->vma
10155 + input_section
->output_offset
10157 insn
= (insn
& ~0xfff)
10158 | ((howto
->bitpos
<< 7) & 0xf00)
10159 | ((relocation
>> howto
->bitpos
) & 0xff);
10160 bfd_put_32 (input_bfd
, value
, hit_data
);
10162 return bfd_reloc_ok
;
10164 case R_ARM_GNU_VTINHERIT
:
10165 case R_ARM_GNU_VTENTRY
:
10166 return bfd_reloc_ok
;
10168 case R_ARM_GOTOFF32
:
10169 /* Relocation is relative to the start of the
10170 global offset table. */
10172 BFD_ASSERT (sgot
!= NULL
);
10174 return bfd_reloc_notsupported
;
10176 /* If we are addressing a Thumb function, we need to adjust the
10177 address by one, so that attempts to call the function pointer will
10178 correctly interpret it as Thumb code. */
10179 if (branch_type
== ST_BRANCH_TO_THUMB
)
10182 /* Note that sgot->output_offset is not involved in this
10183 calculation. We always want the start of .got. If we
10184 define _GLOBAL_OFFSET_TABLE in a different way, as is
10185 permitted by the ABI, we might have to change this
10187 value
-= sgot
->output_section
->vma
;
10188 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
10189 contents
, rel
->r_offset
, value
,
10193 /* Use global offset table as symbol value. */
10194 BFD_ASSERT (sgot
!= NULL
);
10197 return bfd_reloc_notsupported
;
10199 *unresolved_reloc_p
= FALSE
;
10200 value
= sgot
->output_section
->vma
;
10201 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
10202 contents
, rel
->r_offset
, value
,
10206 case R_ARM_GOT_PREL
:
10207 /* Relocation is to the entry for this symbol in the
10208 global offset table. */
10210 return bfd_reloc_notsupported
;
10212 if (dynreloc_st_type
== STT_GNU_IFUNC
10213 && plt_offset
!= (bfd_vma
) -1
10214 && (h
== NULL
|| SYMBOL_REFERENCES_LOCAL (info
, h
)))
10216 /* We have a relocation against a locally-binding STT_GNU_IFUNC
10217 symbol, and the relocation resolves directly to the runtime
10218 target rather than to the .iplt entry. This means that any
10219 .got entry would be the same value as the .igot.plt entry,
10220 so there's no point creating both. */
10221 sgot
= globals
->root
.igotplt
;
10222 value
= sgot
->output_offset
+ gotplt_offset
;
10224 else if (h
!= NULL
)
10228 off
= h
->got
.offset
;
10229 BFD_ASSERT (off
!= (bfd_vma
) -1);
10230 if ((off
& 1) != 0)
10232 /* We have already processsed one GOT relocation against
10235 if (globals
->root
.dynamic_sections_created
10236 && !SYMBOL_REFERENCES_LOCAL (info
, h
))
10237 *unresolved_reloc_p
= FALSE
;
10241 Elf_Internal_Rela outrel
;
10243 if (h
->dynindx
!= -1 && !SYMBOL_REFERENCES_LOCAL (info
, h
))
10245 /* If the symbol doesn't resolve locally in a static
10246 object, we have an undefined reference. If the
10247 symbol doesn't resolve locally in a dynamic object,
10248 it should be resolved by the dynamic linker. */
10249 if (globals
->root
.dynamic_sections_created
)
10251 outrel
.r_info
= ELF32_R_INFO (h
->dynindx
, R_ARM_GLOB_DAT
);
10252 *unresolved_reloc_p
= FALSE
;
10256 outrel
.r_addend
= 0;
10260 if (dynreloc_st_type
== STT_GNU_IFUNC
)
10261 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_IRELATIVE
);
10262 else if (bfd_link_pic (info
) &&
10263 (ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
10264 || h
->root
.type
!= bfd_link_hash_undefweak
))
10265 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_RELATIVE
);
10268 outrel
.r_addend
= dynreloc_value
;
10271 /* The GOT entry is initialized to zero by default.
10272 See if we should install a different value. */
10273 if (outrel
.r_addend
!= 0
10274 && (outrel
.r_info
== 0 || globals
->use_rel
))
10276 bfd_put_32 (output_bfd
, outrel
.r_addend
,
10277 sgot
->contents
+ off
);
10278 outrel
.r_addend
= 0;
10281 if (outrel
.r_info
!= 0)
10283 outrel
.r_offset
= (sgot
->output_section
->vma
10284 + sgot
->output_offset
10286 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
10288 h
->got
.offset
|= 1;
10290 value
= sgot
->output_offset
+ off
;
10296 BFD_ASSERT (local_got_offsets
!= NULL
&&
10297 local_got_offsets
[r_symndx
] != (bfd_vma
) -1);
10299 off
= local_got_offsets
[r_symndx
];
10301 /* The offset must always be a multiple of 4. We use the
10302 least significant bit to record whether we have already
10303 generated the necessary reloc. */
10304 if ((off
& 1) != 0)
10308 if (globals
->use_rel
)
10309 bfd_put_32 (output_bfd
, dynreloc_value
, sgot
->contents
+ off
);
10311 if (bfd_link_pic (info
) || dynreloc_st_type
== STT_GNU_IFUNC
)
10313 Elf_Internal_Rela outrel
;
10315 outrel
.r_addend
= addend
+ dynreloc_value
;
10316 outrel
.r_offset
= (sgot
->output_section
->vma
10317 + sgot
->output_offset
10319 if (dynreloc_st_type
== STT_GNU_IFUNC
)
10320 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_IRELATIVE
);
10322 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_RELATIVE
);
10323 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
10326 local_got_offsets
[r_symndx
] |= 1;
10329 value
= sgot
->output_offset
+ off
;
10331 if (r_type
!= R_ARM_GOT32
)
10332 value
+= sgot
->output_section
->vma
;
10334 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
10335 contents
, rel
->r_offset
, value
,
10338 case R_ARM_TLS_LDO32
:
10339 value
= value
- dtpoff_base (info
);
10341 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
10342 contents
, rel
->r_offset
, value
,
10345 case R_ARM_TLS_LDM32
:
10352 off
= globals
->tls_ldm_got
.offset
;
10354 if ((off
& 1) != 0)
10358 /* If we don't know the module number, create a relocation
10360 if (bfd_link_pic (info
))
10362 Elf_Internal_Rela outrel
;
10364 if (srelgot
== NULL
)
10367 outrel
.r_addend
= 0;
10368 outrel
.r_offset
= (sgot
->output_section
->vma
10369 + sgot
->output_offset
+ off
);
10370 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32
);
10372 if (globals
->use_rel
)
10373 bfd_put_32 (output_bfd
, outrel
.r_addend
,
10374 sgot
->contents
+ off
);
10376 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
10379 bfd_put_32 (output_bfd
, 1, sgot
->contents
+ off
);
10381 globals
->tls_ldm_got
.offset
|= 1;
10384 value
= sgot
->output_section
->vma
+ sgot
->output_offset
+ off
10385 - (input_section
->output_section
->vma
+ input_section
->output_offset
+ rel
->r_offset
);
10387 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
10388 contents
, rel
->r_offset
, value
,
10392 case R_ARM_TLS_CALL
:
10393 case R_ARM_THM_TLS_CALL
:
10394 case R_ARM_TLS_GD32
:
10395 case R_ARM_TLS_IE32
:
10396 case R_ARM_TLS_GOTDESC
:
10397 case R_ARM_TLS_DESCSEQ
:
10398 case R_ARM_THM_TLS_DESCSEQ
:
10400 bfd_vma off
, offplt
;
10404 BFD_ASSERT (sgot
!= NULL
);
10409 dyn
= globals
->root
.dynamic_sections_created
;
10410 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn
,
10411 bfd_link_pic (info
),
10413 && (!bfd_link_pic (info
)
10414 || !SYMBOL_REFERENCES_LOCAL (info
, h
)))
10416 *unresolved_reloc_p
= FALSE
;
10419 off
= h
->got
.offset
;
10420 offplt
= elf32_arm_hash_entry (h
)->tlsdesc_got
;
10421 tls_type
= ((struct elf32_arm_link_hash_entry
*) h
)->tls_type
;
10425 BFD_ASSERT (local_got_offsets
!= NULL
);
10426 off
= local_got_offsets
[r_symndx
];
10427 offplt
= local_tlsdesc_gotents
[r_symndx
];
10428 tls_type
= elf32_arm_local_got_tls_type (input_bfd
)[r_symndx
];
10431 /* Linker relaxations happens from one of the
10432 R_ARM_{GOTDESC,CALL,DESCSEQ} relocations to IE or LE. */
10433 if (ELF32_R_TYPE(rel
->r_info
) != r_type
)
10434 tls_type
= GOT_TLS_IE
;
10436 BFD_ASSERT (tls_type
!= GOT_UNKNOWN
);
10438 if ((off
& 1) != 0)
10442 bfd_boolean need_relocs
= FALSE
;
10443 Elf_Internal_Rela outrel
;
10446 /* The GOT entries have not been initialized yet. Do it
10447 now, and emit any relocations. If both an IE GOT and a
10448 GD GOT are necessary, we emit the GD first. */
10450 if ((bfd_link_pic (info
) || indx
!= 0)
10452 || ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
10453 || h
->root
.type
!= bfd_link_hash_undefweak
))
10455 need_relocs
= TRUE
;
10456 BFD_ASSERT (srelgot
!= NULL
);
10459 if (tls_type
& GOT_TLS_GDESC
)
10463 /* We should have relaxed, unless this is an undefined
10465 BFD_ASSERT ((h
&& (h
->root
.type
== bfd_link_hash_undefweak
))
10466 || bfd_link_pic (info
));
10467 BFD_ASSERT (globals
->sgotplt_jump_table_size
+ offplt
+ 8
10468 <= globals
->root
.sgotplt
->size
);
10470 outrel
.r_addend
= 0;
10471 outrel
.r_offset
= (globals
->root
.sgotplt
->output_section
->vma
10472 + globals
->root
.sgotplt
->output_offset
10474 + globals
->sgotplt_jump_table_size
);
10476 outrel
.r_info
= ELF32_R_INFO (indx
, R_ARM_TLS_DESC
);
10477 sreloc
= globals
->root
.srelplt
;
10478 loc
= sreloc
->contents
;
10479 loc
+= globals
->next_tls_desc_index
++ * RELOC_SIZE (globals
);
10480 BFD_ASSERT (loc
+ RELOC_SIZE (globals
)
10481 <= sreloc
->contents
+ sreloc
->size
);
10483 SWAP_RELOC_OUT (globals
) (output_bfd
, &outrel
, loc
);
10485 /* For globals, the first word in the relocation gets
10486 the relocation index and the top bit set, or zero,
10487 if we're binding now. For locals, it gets the
10488 symbol's offset in the tls section. */
10489 bfd_put_32 (output_bfd
,
10490 !h
? value
- elf_hash_table (info
)->tls_sec
->vma
10491 : info
->flags
& DF_BIND_NOW
? 0
10492 : 0x80000000 | ELF32_R_SYM (outrel
.r_info
),
10493 globals
->root
.sgotplt
->contents
+ offplt
10494 + globals
->sgotplt_jump_table_size
);
10496 /* Second word in the relocation is always zero. */
10497 bfd_put_32 (output_bfd
, 0,
10498 globals
->root
.sgotplt
->contents
+ offplt
10499 + globals
->sgotplt_jump_table_size
+ 4);
10501 if (tls_type
& GOT_TLS_GD
)
10505 outrel
.r_addend
= 0;
10506 outrel
.r_offset
= (sgot
->output_section
->vma
10507 + sgot
->output_offset
10509 outrel
.r_info
= ELF32_R_INFO (indx
, R_ARM_TLS_DTPMOD32
);
10511 if (globals
->use_rel
)
10512 bfd_put_32 (output_bfd
, outrel
.r_addend
,
10513 sgot
->contents
+ cur_off
);
10515 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
10518 bfd_put_32 (output_bfd
, value
- dtpoff_base (info
),
10519 sgot
->contents
+ cur_off
+ 4);
10522 outrel
.r_addend
= 0;
10523 outrel
.r_info
= ELF32_R_INFO (indx
,
10524 R_ARM_TLS_DTPOFF32
);
10525 outrel
.r_offset
+= 4;
10527 if (globals
->use_rel
)
10528 bfd_put_32 (output_bfd
, outrel
.r_addend
,
10529 sgot
->contents
+ cur_off
+ 4);
10531 elf32_arm_add_dynreloc (output_bfd
, info
,
10537 /* If we are not emitting relocations for a
10538 general dynamic reference, then we must be in a
10539 static link or an executable link with the
10540 symbol binding locally. Mark it as belonging
10541 to module 1, the executable. */
10542 bfd_put_32 (output_bfd
, 1,
10543 sgot
->contents
+ cur_off
);
10544 bfd_put_32 (output_bfd
, value
- dtpoff_base (info
),
10545 sgot
->contents
+ cur_off
+ 4);
10551 if (tls_type
& GOT_TLS_IE
)
10556 outrel
.r_addend
= value
- dtpoff_base (info
);
10558 outrel
.r_addend
= 0;
10559 outrel
.r_offset
= (sgot
->output_section
->vma
10560 + sgot
->output_offset
10562 outrel
.r_info
= ELF32_R_INFO (indx
, R_ARM_TLS_TPOFF32
);
10564 if (globals
->use_rel
)
10565 bfd_put_32 (output_bfd
, outrel
.r_addend
,
10566 sgot
->contents
+ cur_off
);
10568 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
10571 bfd_put_32 (output_bfd
, tpoff (info
, value
),
10572 sgot
->contents
+ cur_off
);
10577 h
->got
.offset
|= 1;
10579 local_got_offsets
[r_symndx
] |= 1;
10582 if ((tls_type
& GOT_TLS_GD
) && r_type
!= R_ARM_TLS_GD32
)
10584 else if (tls_type
& GOT_TLS_GDESC
)
10587 if (ELF32_R_TYPE(rel
->r_info
) == R_ARM_TLS_CALL
10588 || ELF32_R_TYPE(rel
->r_info
) == R_ARM_THM_TLS_CALL
)
10590 bfd_signed_vma offset
;
10591 /* TLS stubs are arm mode. The original symbol is a
10592 data object, so branch_type is bogus. */
10593 branch_type
= ST_BRANCH_TO_ARM
;
10594 enum elf32_arm_stub_type stub_type
10595 = arm_type_of_stub (info
, input_section
, rel
,
10596 st_type
, &branch_type
,
10597 (struct elf32_arm_link_hash_entry
*)h
,
10598 globals
->tls_trampoline
, globals
->root
.splt
,
10599 input_bfd
, sym_name
);
10601 if (stub_type
!= arm_stub_none
)
10603 struct elf32_arm_stub_hash_entry
*stub_entry
10604 = elf32_arm_get_stub_entry
10605 (input_section
, globals
->root
.splt
, 0, rel
,
10606 globals
, stub_type
);
10607 offset
= (stub_entry
->stub_offset
10608 + stub_entry
->stub_sec
->output_offset
10609 + stub_entry
->stub_sec
->output_section
->vma
);
10612 offset
= (globals
->root
.splt
->output_section
->vma
10613 + globals
->root
.splt
->output_offset
10614 + globals
->tls_trampoline
);
10616 if (ELF32_R_TYPE(rel
->r_info
) == R_ARM_TLS_CALL
)
10618 unsigned long inst
;
10620 offset
-= (input_section
->output_section
->vma
10621 + input_section
->output_offset
10622 + rel
->r_offset
+ 8);
10624 inst
= offset
>> 2;
10625 inst
&= 0x00ffffff;
10626 value
= inst
| (globals
->use_blx
? 0xfa000000 : 0xeb000000);
10630 /* Thumb blx encodes the offset in a complicated
10632 unsigned upper_insn
, lower_insn
;
10635 offset
-= (input_section
->output_section
->vma
10636 + input_section
->output_offset
10637 + rel
->r_offset
+ 4);
10639 if (stub_type
!= arm_stub_none
10640 && arm_stub_is_thumb (stub_type
))
10642 lower_insn
= 0xd000;
10646 lower_insn
= 0xc000;
10647 /* Round up the offset to a word boundary. */
10648 offset
= (offset
+ 2) & ~2;
10652 upper_insn
= (0xf000
10653 | ((offset
>> 12) & 0x3ff)
10655 lower_insn
|= (((!((offset
>> 23) & 1)) ^ neg
) << 13)
10656 | (((!((offset
>> 22) & 1)) ^ neg
) << 11)
10657 | ((offset
>> 1) & 0x7ff);
10658 bfd_put_16 (input_bfd
, upper_insn
, hit_data
);
10659 bfd_put_16 (input_bfd
, lower_insn
, hit_data
+ 2);
10660 return bfd_reloc_ok
;
10663 /* These relocations needs special care, as besides the fact
10664 they point somewhere in .gotplt, the addend must be
10665 adjusted accordingly depending on the type of instruction
10667 else if ((r_type
== R_ARM_TLS_GOTDESC
) && (tls_type
& GOT_TLS_GDESC
))
10669 unsigned long data
, insn
;
10672 data
= bfd_get_32 (input_bfd
, hit_data
);
10678 insn
= bfd_get_16 (input_bfd
, contents
+ rel
->r_offset
- data
);
10679 if ((insn
& 0xf000) == 0xf000 || (insn
& 0xf800) == 0xe800)
10680 insn
= (insn
<< 16)
10681 | bfd_get_16 (input_bfd
,
10682 contents
+ rel
->r_offset
- data
+ 2);
10683 if ((insn
& 0xf800c000) == 0xf000c000)
10686 else if ((insn
& 0xffffff00) == 0x4400)
10691 (*_bfd_error_handler
)
10692 (_("%B(%A+0x%lx):unexpected Thumb instruction '0x%x' referenced by TLS_GOTDESC"),
10693 input_bfd
, input_section
,
10694 (unsigned long)rel
->r_offset
, insn
);
10695 return bfd_reloc_notsupported
;
10700 insn
= bfd_get_32 (input_bfd
, contents
+ rel
->r_offset
- data
);
10702 switch (insn
>> 24)
10704 case 0xeb: /* bl */
10705 case 0xfa: /* blx */
10709 case 0xe0: /* add */
10714 (*_bfd_error_handler
)
10715 (_("%B(%A+0x%lx):unexpected ARM instruction '0x%x' referenced by TLS_GOTDESC"),
10716 input_bfd
, input_section
,
10717 (unsigned long)rel
->r_offset
, insn
);
10718 return bfd_reloc_notsupported
;
10722 value
+= ((globals
->root
.sgotplt
->output_section
->vma
10723 + globals
->root
.sgotplt
->output_offset
+ off
)
10724 - (input_section
->output_section
->vma
10725 + input_section
->output_offset
10727 + globals
->sgotplt_jump_table_size
);
10730 value
= ((globals
->root
.sgot
->output_section
->vma
10731 + globals
->root
.sgot
->output_offset
+ off
)
10732 - (input_section
->output_section
->vma
10733 + input_section
->output_offset
+ rel
->r_offset
));
10735 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
10736 contents
, rel
->r_offset
, value
,
10740 case R_ARM_TLS_LE32
:
10741 if (bfd_link_dll (info
))
10743 (*_bfd_error_handler
)
10744 (_("%B(%A+0x%lx): R_ARM_TLS_LE32 relocation not permitted in shared object"),
10745 input_bfd
, input_section
,
10746 (long) rel
->r_offset
, howto
->name
);
10747 return bfd_reloc_notsupported
;
10750 value
= tpoff (info
, value
);
10752 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
10753 contents
, rel
->r_offset
, value
,
10757 if (globals
->fix_v4bx
)
10759 bfd_vma insn
= bfd_get_32 (input_bfd
, hit_data
);
10761 /* Ensure that we have a BX instruction. */
10762 BFD_ASSERT ((insn
& 0x0ffffff0) == 0x012fff10);
10764 if (globals
->fix_v4bx
== 2 && (insn
& 0xf) != 0xf)
10766 /* Branch to veneer. */
10768 glue_addr
= elf32_arm_bx_glue (info
, insn
& 0xf);
10769 glue_addr
-= input_section
->output_section
->vma
10770 + input_section
->output_offset
10771 + rel
->r_offset
+ 8;
10772 insn
= (insn
& 0xf0000000) | 0x0a000000
10773 | ((glue_addr
>> 2) & 0x00ffffff);
10777 /* Preserve Rm (lowest four bits) and the condition code
10778 (highest four bits). Other bits encode MOV PC,Rm. */
10779 insn
= (insn
& 0xf000000f) | 0x01a0f000;
10782 bfd_put_32 (input_bfd
, insn
, hit_data
);
10784 return bfd_reloc_ok
;
10786 case R_ARM_MOVW_ABS_NC
:
10787 case R_ARM_MOVT_ABS
:
10788 case R_ARM_MOVW_PREL_NC
:
10789 case R_ARM_MOVT_PREL
:
10790 /* Until we properly support segment-base-relative addressing then
10791 we assume the segment base to be zero, as for the group relocations.
10792 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
10793 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
10794 case R_ARM_MOVW_BREL_NC
:
10795 case R_ARM_MOVW_BREL
:
10796 case R_ARM_MOVT_BREL
:
10798 bfd_vma insn
= bfd_get_32 (input_bfd
, hit_data
);
10800 if (globals
->use_rel
)
10802 addend
= ((insn
>> 4) & 0xf000) | (insn
& 0xfff);
10803 signed_addend
= (addend
^ 0x8000) - 0x8000;
10806 value
+= signed_addend
;
10808 if (r_type
== R_ARM_MOVW_PREL_NC
|| r_type
== R_ARM_MOVT_PREL
)
10809 value
-= (input_section
->output_section
->vma
10810 + input_section
->output_offset
+ rel
->r_offset
);
10812 if (r_type
== R_ARM_MOVW_BREL
&& value
>= 0x10000)
10813 return bfd_reloc_overflow
;
10815 if (branch_type
== ST_BRANCH_TO_THUMB
)
10818 if (r_type
== R_ARM_MOVT_ABS
|| r_type
== R_ARM_MOVT_PREL
10819 || r_type
== R_ARM_MOVT_BREL
)
10822 insn
&= 0xfff0f000;
10823 insn
|= value
& 0xfff;
10824 insn
|= (value
& 0xf000) << 4;
10825 bfd_put_32 (input_bfd
, insn
, hit_data
);
10827 return bfd_reloc_ok
;
10829 case R_ARM_THM_MOVW_ABS_NC
:
10830 case R_ARM_THM_MOVT_ABS
:
10831 case R_ARM_THM_MOVW_PREL_NC
:
10832 case R_ARM_THM_MOVT_PREL
:
10833 /* Until we properly support segment-base-relative addressing then
10834 we assume the segment base to be zero, as for the above relocations.
10835 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
10836 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
10837 as R_ARM_THM_MOVT_ABS. */
10838 case R_ARM_THM_MOVW_BREL_NC
:
10839 case R_ARM_THM_MOVW_BREL
:
10840 case R_ARM_THM_MOVT_BREL
:
10844 insn
= bfd_get_16 (input_bfd
, hit_data
) << 16;
10845 insn
|= bfd_get_16 (input_bfd
, hit_data
+ 2);
10847 if (globals
->use_rel
)
10849 addend
= ((insn
>> 4) & 0xf000)
10850 | ((insn
>> 15) & 0x0800)
10851 | ((insn
>> 4) & 0x0700)
10853 signed_addend
= (addend
^ 0x8000) - 0x8000;
10856 value
+= signed_addend
;
10858 if (r_type
== R_ARM_THM_MOVW_PREL_NC
|| r_type
== R_ARM_THM_MOVT_PREL
)
10859 value
-= (input_section
->output_section
->vma
10860 + input_section
->output_offset
+ rel
->r_offset
);
10862 if (r_type
== R_ARM_THM_MOVW_BREL
&& value
>= 0x10000)
10863 return bfd_reloc_overflow
;
10865 if (branch_type
== ST_BRANCH_TO_THUMB
)
10868 if (r_type
== R_ARM_THM_MOVT_ABS
|| r_type
== R_ARM_THM_MOVT_PREL
10869 || r_type
== R_ARM_THM_MOVT_BREL
)
10872 insn
&= 0xfbf08f00;
10873 insn
|= (value
& 0xf000) << 4;
10874 insn
|= (value
& 0x0800) << 15;
10875 insn
|= (value
& 0x0700) << 4;
10876 insn
|= (value
& 0x00ff);
10878 bfd_put_16 (input_bfd
, insn
>> 16, hit_data
);
10879 bfd_put_16 (input_bfd
, insn
& 0xffff, hit_data
+ 2);
10881 return bfd_reloc_ok
;
10883 case R_ARM_ALU_PC_G0_NC
:
10884 case R_ARM_ALU_PC_G1_NC
:
10885 case R_ARM_ALU_PC_G0
:
10886 case R_ARM_ALU_PC_G1
:
10887 case R_ARM_ALU_PC_G2
:
10888 case R_ARM_ALU_SB_G0_NC
:
10889 case R_ARM_ALU_SB_G1_NC
:
10890 case R_ARM_ALU_SB_G0
:
10891 case R_ARM_ALU_SB_G1
:
10892 case R_ARM_ALU_SB_G2
:
10894 bfd_vma insn
= bfd_get_32 (input_bfd
, hit_data
);
10895 bfd_vma pc
= input_section
->output_section
->vma
10896 + input_section
->output_offset
+ rel
->r_offset
;
10897 /* sb is the origin of the *segment* containing the symbol. */
10898 bfd_vma sb
= sym_sec
? sym_sec
->output_section
->vma
: 0;
10901 bfd_signed_vma signed_value
;
10904 /* Determine which group of bits to select. */
10907 case R_ARM_ALU_PC_G0_NC
:
10908 case R_ARM_ALU_PC_G0
:
10909 case R_ARM_ALU_SB_G0_NC
:
10910 case R_ARM_ALU_SB_G0
:
10914 case R_ARM_ALU_PC_G1_NC
:
10915 case R_ARM_ALU_PC_G1
:
10916 case R_ARM_ALU_SB_G1_NC
:
10917 case R_ARM_ALU_SB_G1
:
10921 case R_ARM_ALU_PC_G2
:
10922 case R_ARM_ALU_SB_G2
:
10930 /* If REL, extract the addend from the insn. If RELA, it will
10931 have already been fetched for us. */
10932 if (globals
->use_rel
)
10935 bfd_vma constant
= insn
& 0xff;
10936 bfd_vma rotation
= (insn
& 0xf00) >> 8;
10939 signed_addend
= constant
;
10942 /* Compensate for the fact that in the instruction, the
10943 rotation is stored in multiples of 2 bits. */
10946 /* Rotate "constant" right by "rotation" bits. */
10947 signed_addend
= (constant
>> rotation
) |
10948 (constant
<< (8 * sizeof (bfd_vma
) - rotation
));
10951 /* Determine if the instruction is an ADD or a SUB.
10952 (For REL, this determines the sign of the addend.) */
10953 negative
= identify_add_or_sub (insn
);
10956 (*_bfd_error_handler
)
10957 (_("%B(%A+0x%lx): Only ADD or SUB instructions are allowed for ALU group relocations"),
10958 input_bfd
, input_section
,
10959 (long) rel
->r_offset
, howto
->name
);
10960 return bfd_reloc_overflow
;
10963 signed_addend
*= negative
;
10966 /* Compute the value (X) to go in the place. */
10967 if (r_type
== R_ARM_ALU_PC_G0_NC
10968 || r_type
== R_ARM_ALU_PC_G1_NC
10969 || r_type
== R_ARM_ALU_PC_G0
10970 || r_type
== R_ARM_ALU_PC_G1
10971 || r_type
== R_ARM_ALU_PC_G2
)
10973 signed_value
= value
- pc
+ signed_addend
;
10975 /* Section base relative. */
10976 signed_value
= value
- sb
+ signed_addend
;
10978 /* If the target symbol is a Thumb function, then set the
10979 Thumb bit in the address. */
10980 if (branch_type
== ST_BRANCH_TO_THUMB
)
10983 /* Calculate the value of the relevant G_n, in encoded
10984 constant-with-rotation format. */
10985 g_n
= calculate_group_reloc_mask (signed_value
< 0 ? - signed_value
: signed_value
,
10988 /* Check for overflow if required. */
10989 if ((r_type
== R_ARM_ALU_PC_G0
10990 || r_type
== R_ARM_ALU_PC_G1
10991 || r_type
== R_ARM_ALU_PC_G2
10992 || r_type
== R_ARM_ALU_SB_G0
10993 || r_type
== R_ARM_ALU_SB_G1
10994 || r_type
== R_ARM_ALU_SB_G2
) && residual
!= 0)
10996 (*_bfd_error_handler
)
10997 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
10998 input_bfd
, input_section
,
10999 (long) rel
->r_offset
, signed_value
< 0 ? - signed_value
: signed_value
,
11001 return bfd_reloc_overflow
;
11004 /* Mask out the value and the ADD/SUB part of the opcode; take care
11005 not to destroy the S bit. */
11006 insn
&= 0xff1ff000;
11008 /* Set the opcode according to whether the value to go in the
11009 place is negative. */
11010 if (signed_value
< 0)
11015 /* Encode the offset. */
11018 bfd_put_32 (input_bfd
, insn
, hit_data
);
11020 return bfd_reloc_ok
;
11022 case R_ARM_LDR_PC_G0
:
11023 case R_ARM_LDR_PC_G1
:
11024 case R_ARM_LDR_PC_G2
:
11025 case R_ARM_LDR_SB_G0
:
11026 case R_ARM_LDR_SB_G1
:
11027 case R_ARM_LDR_SB_G2
:
11029 bfd_vma insn
= bfd_get_32 (input_bfd
, hit_data
);
11030 bfd_vma pc
= input_section
->output_section
->vma
11031 + input_section
->output_offset
+ rel
->r_offset
;
11032 /* sb is the origin of the *segment* containing the symbol. */
11033 bfd_vma sb
= sym_sec
? sym_sec
->output_section
->vma
: 0;
11035 bfd_signed_vma signed_value
;
11038 /* Determine which groups of bits to calculate. */
11041 case R_ARM_LDR_PC_G0
:
11042 case R_ARM_LDR_SB_G0
:
11046 case R_ARM_LDR_PC_G1
:
11047 case R_ARM_LDR_SB_G1
:
11051 case R_ARM_LDR_PC_G2
:
11052 case R_ARM_LDR_SB_G2
:
11060 /* If REL, extract the addend from the insn. If RELA, it will
11061 have already been fetched for us. */
11062 if (globals
->use_rel
)
11064 int negative
= (insn
& (1 << 23)) ? 1 : -1;
11065 signed_addend
= negative
* (insn
& 0xfff);
11068 /* Compute the value (X) to go in the place. */
11069 if (r_type
== R_ARM_LDR_PC_G0
11070 || r_type
== R_ARM_LDR_PC_G1
11071 || r_type
== R_ARM_LDR_PC_G2
)
11073 signed_value
= value
- pc
+ signed_addend
;
11075 /* Section base relative. */
11076 signed_value
= value
- sb
+ signed_addend
;
11078 /* Calculate the value of the relevant G_{n-1} to obtain
11079 the residual at that stage. */
11080 calculate_group_reloc_mask (signed_value
< 0 ? - signed_value
: signed_value
,
11081 group
- 1, &residual
);
11083 /* Check for overflow. */
11084 if (residual
>= 0x1000)
11086 (*_bfd_error_handler
)
11087 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
11088 input_bfd
, input_section
,
11089 (long) rel
->r_offset
, labs (signed_value
), howto
->name
);
11090 return bfd_reloc_overflow
;
11093 /* Mask out the value and U bit. */
11094 insn
&= 0xff7ff000;
11096 /* Set the U bit if the value to go in the place is non-negative. */
11097 if (signed_value
>= 0)
11100 /* Encode the offset. */
11103 bfd_put_32 (input_bfd
, insn
, hit_data
);
11105 return bfd_reloc_ok
;
11107 case R_ARM_LDRS_PC_G0
:
11108 case R_ARM_LDRS_PC_G1
:
11109 case R_ARM_LDRS_PC_G2
:
11110 case R_ARM_LDRS_SB_G0
:
11111 case R_ARM_LDRS_SB_G1
:
11112 case R_ARM_LDRS_SB_G2
:
11114 bfd_vma insn
= bfd_get_32 (input_bfd
, hit_data
);
11115 bfd_vma pc
= input_section
->output_section
->vma
11116 + input_section
->output_offset
+ rel
->r_offset
;
11117 /* sb is the origin of the *segment* containing the symbol. */
11118 bfd_vma sb
= sym_sec
? sym_sec
->output_section
->vma
: 0;
11120 bfd_signed_vma signed_value
;
11123 /* Determine which groups of bits to calculate. */
11126 case R_ARM_LDRS_PC_G0
:
11127 case R_ARM_LDRS_SB_G0
:
11131 case R_ARM_LDRS_PC_G1
:
11132 case R_ARM_LDRS_SB_G1
:
11136 case R_ARM_LDRS_PC_G2
:
11137 case R_ARM_LDRS_SB_G2
:
11145 /* If REL, extract the addend from the insn. If RELA, it will
11146 have already been fetched for us. */
11147 if (globals
->use_rel
)
11149 int negative
= (insn
& (1 << 23)) ? 1 : -1;
11150 signed_addend
= negative
* (((insn
& 0xf00) >> 4) + (insn
& 0xf));
11153 /* Compute the value (X) to go in the place. */
11154 if (r_type
== R_ARM_LDRS_PC_G0
11155 || r_type
== R_ARM_LDRS_PC_G1
11156 || r_type
== R_ARM_LDRS_PC_G2
)
11158 signed_value
= value
- pc
+ signed_addend
;
11160 /* Section base relative. */
11161 signed_value
= value
- sb
+ signed_addend
;
11163 /* Calculate the value of the relevant G_{n-1} to obtain
11164 the residual at that stage. */
11165 calculate_group_reloc_mask (signed_value
< 0 ? - signed_value
: signed_value
,
11166 group
- 1, &residual
);
11168 /* Check for overflow. */
11169 if (residual
>= 0x100)
11171 (*_bfd_error_handler
)
11172 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
11173 input_bfd
, input_section
,
11174 (long) rel
->r_offset
, labs (signed_value
), howto
->name
);
11175 return bfd_reloc_overflow
;
11178 /* Mask out the value and U bit. */
11179 insn
&= 0xff7ff0f0;
11181 /* Set the U bit if the value to go in the place is non-negative. */
11182 if (signed_value
>= 0)
11185 /* Encode the offset. */
11186 insn
|= ((residual
& 0xf0) << 4) | (residual
& 0xf);
11188 bfd_put_32 (input_bfd
, insn
, hit_data
);
11190 return bfd_reloc_ok
;
11192 case R_ARM_LDC_PC_G0
:
11193 case R_ARM_LDC_PC_G1
:
11194 case R_ARM_LDC_PC_G2
:
11195 case R_ARM_LDC_SB_G0
:
11196 case R_ARM_LDC_SB_G1
:
11197 case R_ARM_LDC_SB_G2
:
11199 bfd_vma insn
= bfd_get_32 (input_bfd
, hit_data
);
11200 bfd_vma pc
= input_section
->output_section
->vma
11201 + input_section
->output_offset
+ rel
->r_offset
;
11202 /* sb is the origin of the *segment* containing the symbol. */
11203 bfd_vma sb
= sym_sec
? sym_sec
->output_section
->vma
: 0;
11205 bfd_signed_vma signed_value
;
11208 /* Determine which groups of bits to calculate. */
11211 case R_ARM_LDC_PC_G0
:
11212 case R_ARM_LDC_SB_G0
:
11216 case R_ARM_LDC_PC_G1
:
11217 case R_ARM_LDC_SB_G1
:
11221 case R_ARM_LDC_PC_G2
:
11222 case R_ARM_LDC_SB_G2
:
11230 /* If REL, extract the addend from the insn. If RELA, it will
11231 have already been fetched for us. */
11232 if (globals
->use_rel
)
11234 int negative
= (insn
& (1 << 23)) ? 1 : -1;
11235 signed_addend
= negative
* ((insn
& 0xff) << 2);
11238 /* Compute the value (X) to go in the place. */
11239 if (r_type
== R_ARM_LDC_PC_G0
11240 || r_type
== R_ARM_LDC_PC_G1
11241 || r_type
== R_ARM_LDC_PC_G2
)
11243 signed_value
= value
- pc
+ signed_addend
;
11245 /* Section base relative. */
11246 signed_value
= value
- sb
+ signed_addend
;
11248 /* Calculate the value of the relevant G_{n-1} to obtain
11249 the residual at that stage. */
11250 calculate_group_reloc_mask (signed_value
< 0 ? - signed_value
: signed_value
,
11251 group
- 1, &residual
);
11253 /* Check for overflow. (The absolute value to go in the place must be
11254 divisible by four and, after having been divided by four, must
11255 fit in eight bits.) */
11256 if ((residual
& 0x3) != 0 || residual
>= 0x400)
11258 (*_bfd_error_handler
)
11259 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
11260 input_bfd
, input_section
,
11261 (long) rel
->r_offset
, labs (signed_value
), howto
->name
);
11262 return bfd_reloc_overflow
;
11265 /* Mask out the value and U bit. */
11266 insn
&= 0xff7fff00;
11268 /* Set the U bit if the value to go in the place is non-negative. */
11269 if (signed_value
>= 0)
11272 /* Encode the offset. */
11273 insn
|= residual
>> 2;
11275 bfd_put_32 (input_bfd
, insn
, hit_data
);
11277 return bfd_reloc_ok
;
11279 case R_ARM_THM_ALU_ABS_G0_NC
:
11280 case R_ARM_THM_ALU_ABS_G1_NC
:
11281 case R_ARM_THM_ALU_ABS_G2_NC
:
11282 case R_ARM_THM_ALU_ABS_G3_NC
:
11284 const int shift_array
[4] = {0, 8, 16, 24};
11285 bfd_vma insn
= bfd_get_16 (input_bfd
, hit_data
);
11286 bfd_vma addr
= value
;
11287 int shift
= shift_array
[r_type
- R_ARM_THM_ALU_ABS_G0_NC
];
11289 /* Compute address. */
11290 if (globals
->use_rel
)
11291 signed_addend
= insn
& 0xff;
11292 addr
+= signed_addend
;
11293 if (branch_type
== ST_BRANCH_TO_THUMB
)
11295 /* Clean imm8 insn. */
11297 /* And update with correct part of address. */
11298 insn
|= (addr
>> shift
) & 0xff;
11300 bfd_put_16 (input_bfd
, insn
, hit_data
);
11303 *unresolved_reloc_p
= FALSE
;
11304 return bfd_reloc_ok
;
11307 return bfd_reloc_notsupported
;
11311 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
11313 arm_add_to_rel (bfd
* abfd
,
11314 bfd_byte
* address
,
11315 reloc_howto_type
* howto
,
11316 bfd_signed_vma increment
)
11318 bfd_signed_vma addend
;
11320 if (howto
->type
== R_ARM_THM_CALL
11321 || howto
->type
== R_ARM_THM_JUMP24
)
11323 int upper_insn
, lower_insn
;
11326 upper_insn
= bfd_get_16 (abfd
, address
);
11327 lower_insn
= bfd_get_16 (abfd
, address
+ 2);
11328 upper
= upper_insn
& 0x7ff;
11329 lower
= lower_insn
& 0x7ff;
11331 addend
= (upper
<< 12) | (lower
<< 1);
11332 addend
+= increment
;
11335 upper_insn
= (upper_insn
& 0xf800) | ((addend
>> 11) & 0x7ff);
11336 lower_insn
= (lower_insn
& 0xf800) | (addend
& 0x7ff);
11338 bfd_put_16 (abfd
, (bfd_vma
) upper_insn
, address
);
11339 bfd_put_16 (abfd
, (bfd_vma
) lower_insn
, address
+ 2);
11345 contents
= bfd_get_32 (abfd
, address
);
11347 /* Get the (signed) value from the instruction. */
11348 addend
= contents
& howto
->src_mask
;
11349 if (addend
& ((howto
->src_mask
+ 1) >> 1))
11351 bfd_signed_vma mask
;
11354 mask
&= ~ howto
->src_mask
;
11358 /* Add in the increment, (which is a byte value). */
11359 switch (howto
->type
)
11362 addend
+= increment
;
11369 addend
<<= howto
->size
;
11370 addend
+= increment
;
11372 /* Should we check for overflow here ? */
11374 /* Drop any undesired bits. */
11375 addend
>>= howto
->rightshift
;
11379 contents
= (contents
& ~ howto
->dst_mask
) | (addend
& howto
->dst_mask
);
11381 bfd_put_32 (abfd
, contents
, address
);
11385 #define IS_ARM_TLS_RELOC(R_TYPE) \
11386 ((R_TYPE) == R_ARM_TLS_GD32 \
11387 || (R_TYPE) == R_ARM_TLS_LDO32 \
11388 || (R_TYPE) == R_ARM_TLS_LDM32 \
11389 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
11390 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
11391 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
11392 || (R_TYPE) == R_ARM_TLS_LE32 \
11393 || (R_TYPE) == R_ARM_TLS_IE32 \
11394 || IS_ARM_TLS_GNU_RELOC (R_TYPE))
11396 /* Specific set of relocations for the gnu tls dialect. */
11397 #define IS_ARM_TLS_GNU_RELOC(R_TYPE) \
11398 ((R_TYPE) == R_ARM_TLS_GOTDESC \
11399 || (R_TYPE) == R_ARM_TLS_CALL \
11400 || (R_TYPE) == R_ARM_THM_TLS_CALL \
11401 || (R_TYPE) == R_ARM_TLS_DESCSEQ \
11402 || (R_TYPE) == R_ARM_THM_TLS_DESCSEQ)
11404 /* Relocate an ARM ELF section. */
11407 elf32_arm_relocate_section (bfd
* output_bfd
,
11408 struct bfd_link_info
* info
,
11410 asection
* input_section
,
11411 bfd_byte
* contents
,
11412 Elf_Internal_Rela
* relocs
,
11413 Elf_Internal_Sym
* local_syms
,
11414 asection
** local_sections
)
11416 Elf_Internal_Shdr
*symtab_hdr
;
11417 struct elf_link_hash_entry
**sym_hashes
;
11418 Elf_Internal_Rela
*rel
;
11419 Elf_Internal_Rela
*relend
;
11421 struct elf32_arm_link_hash_table
* globals
;
11423 globals
= elf32_arm_hash_table (info
);
11424 if (globals
== NULL
)
11427 symtab_hdr
= & elf_symtab_hdr (input_bfd
);
11428 sym_hashes
= elf_sym_hashes (input_bfd
);
11431 relend
= relocs
+ input_section
->reloc_count
;
11432 for (; rel
< relend
; rel
++)
11435 reloc_howto_type
* howto
;
11436 unsigned long r_symndx
;
11437 Elf_Internal_Sym
* sym
;
11439 struct elf_link_hash_entry
* h
;
11440 bfd_vma relocation
;
11441 bfd_reloc_status_type r
;
11444 bfd_boolean unresolved_reloc
= FALSE
;
11445 char *error_message
= NULL
;
11447 r_symndx
= ELF32_R_SYM (rel
->r_info
);
11448 r_type
= ELF32_R_TYPE (rel
->r_info
);
11449 r_type
= arm_real_reloc_type (globals
, r_type
);
11451 if ( r_type
== R_ARM_GNU_VTENTRY
11452 || r_type
== R_ARM_GNU_VTINHERIT
)
11455 bfd_reloc
.howto
= elf32_arm_howto_from_type (r_type
);
11456 howto
= bfd_reloc
.howto
;
11462 if (r_symndx
< symtab_hdr
->sh_info
)
11464 sym
= local_syms
+ r_symndx
;
11465 sym_type
= ELF32_ST_TYPE (sym
->st_info
);
11466 sec
= local_sections
[r_symndx
];
11468 /* An object file might have a reference to a local
11469 undefined symbol. This is a daft object file, but we
11470 should at least do something about it. V4BX & NONE
11471 relocations do not use the symbol and are explicitly
11472 allowed to use the undefined symbol, so allow those.
11473 Likewise for relocations against STN_UNDEF. */
11474 if (r_type
!= R_ARM_V4BX
11475 && r_type
!= R_ARM_NONE
11476 && r_symndx
!= STN_UNDEF
11477 && bfd_is_und_section (sec
)
11478 && ELF_ST_BIND (sym
->st_info
) != STB_WEAK
)
11480 if (!info
->callbacks
->undefined_symbol
11481 (info
, bfd_elf_string_from_elf_section
11482 (input_bfd
, symtab_hdr
->sh_link
, sym
->st_name
),
11483 input_bfd
, input_section
,
11484 rel
->r_offset
, TRUE
))
11488 if (globals
->use_rel
)
11490 relocation
= (sec
->output_section
->vma
11491 + sec
->output_offset
11493 if (!bfd_link_relocatable (info
)
11494 && (sec
->flags
& SEC_MERGE
)
11495 && ELF_ST_TYPE (sym
->st_info
) == STT_SECTION
)
11498 bfd_vma addend
, value
;
11502 case R_ARM_MOVW_ABS_NC
:
11503 case R_ARM_MOVT_ABS
:
11504 value
= bfd_get_32 (input_bfd
, contents
+ rel
->r_offset
);
11505 addend
= ((value
& 0xf0000) >> 4) | (value
& 0xfff);
11506 addend
= (addend
^ 0x8000) - 0x8000;
11509 case R_ARM_THM_MOVW_ABS_NC
:
11510 case R_ARM_THM_MOVT_ABS
:
11511 value
= bfd_get_16 (input_bfd
, contents
+ rel
->r_offset
)
11513 value
|= bfd_get_16 (input_bfd
,
11514 contents
+ rel
->r_offset
+ 2);
11515 addend
= ((value
& 0xf7000) >> 4) | (value
& 0xff)
11516 | ((value
& 0x04000000) >> 15);
11517 addend
= (addend
^ 0x8000) - 0x8000;
11521 if (howto
->rightshift
11522 || (howto
->src_mask
& (howto
->src_mask
+ 1)))
11524 (*_bfd_error_handler
)
11525 (_("%B(%A+0x%lx): %s relocation against SEC_MERGE section"),
11526 input_bfd
, input_section
,
11527 (long) rel
->r_offset
, howto
->name
);
11531 value
= bfd_get_32 (input_bfd
, contents
+ rel
->r_offset
);
11533 /* Get the (signed) value from the instruction. */
11534 addend
= value
& howto
->src_mask
;
11535 if (addend
& ((howto
->src_mask
+ 1) >> 1))
11537 bfd_signed_vma mask
;
11540 mask
&= ~ howto
->src_mask
;
11548 _bfd_elf_rel_local_sym (output_bfd
, sym
, &msec
, addend
)
11550 addend
+= msec
->output_section
->vma
+ msec
->output_offset
;
11552 /* Cases here must match those in the preceding
11553 switch statement. */
11556 case R_ARM_MOVW_ABS_NC
:
11557 case R_ARM_MOVT_ABS
:
11558 value
= (value
& 0xfff0f000) | ((addend
& 0xf000) << 4)
11559 | (addend
& 0xfff);
11560 bfd_put_32 (input_bfd
, value
, contents
+ rel
->r_offset
);
11563 case R_ARM_THM_MOVW_ABS_NC
:
11564 case R_ARM_THM_MOVT_ABS
:
11565 value
= (value
& 0xfbf08f00) | ((addend
& 0xf700) << 4)
11566 | (addend
& 0xff) | ((addend
& 0x0800) << 15);
11567 bfd_put_16 (input_bfd
, value
>> 16,
11568 contents
+ rel
->r_offset
);
11569 bfd_put_16 (input_bfd
, value
,
11570 contents
+ rel
->r_offset
+ 2);
11574 value
= (value
& ~ howto
->dst_mask
)
11575 | (addend
& howto
->dst_mask
);
11576 bfd_put_32 (input_bfd
, value
, contents
+ rel
->r_offset
);
11582 relocation
= _bfd_elf_rela_local_sym (output_bfd
, sym
, &sec
, rel
);
11586 bfd_boolean warned
, ignored
;
11588 RELOC_FOR_GLOBAL_SYMBOL (info
, input_bfd
, input_section
, rel
,
11589 r_symndx
, symtab_hdr
, sym_hashes
,
11590 h
, sec
, relocation
,
11591 unresolved_reloc
, warned
, ignored
);
11593 sym_type
= h
->type
;
11596 if (sec
!= NULL
&& discarded_section (sec
))
11597 RELOC_AGAINST_DISCARDED_SECTION (info
, input_bfd
, input_section
,
11598 rel
, 1, relend
, howto
, 0, contents
);
11600 if (bfd_link_relocatable (info
))
11602 /* This is a relocatable link. We don't have to change
11603 anything, unless the reloc is against a section symbol,
11604 in which case we have to adjust according to where the
11605 section symbol winds up in the output section. */
11606 if (sym
!= NULL
&& ELF_ST_TYPE (sym
->st_info
) == STT_SECTION
)
11608 if (globals
->use_rel
)
11609 arm_add_to_rel (input_bfd
, contents
+ rel
->r_offset
,
11610 howto
, (bfd_signed_vma
) sec
->output_offset
);
11612 rel
->r_addend
+= sec
->output_offset
;
11618 name
= h
->root
.root
.string
;
11621 name
= (bfd_elf_string_from_elf_section
11622 (input_bfd
, symtab_hdr
->sh_link
, sym
->st_name
));
11623 if (name
== NULL
|| *name
== '\0')
11624 name
= bfd_section_name (input_bfd
, sec
);
11627 if (r_symndx
!= STN_UNDEF
11628 && r_type
!= R_ARM_NONE
11630 || h
->root
.type
== bfd_link_hash_defined
11631 || h
->root
.type
== bfd_link_hash_defweak
)
11632 && IS_ARM_TLS_RELOC (r_type
) != (sym_type
== STT_TLS
))
11634 (*_bfd_error_handler
)
11635 ((sym_type
== STT_TLS
11636 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
11637 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
11640 (long) rel
->r_offset
,
11645 /* We call elf32_arm_final_link_relocate unless we're completely
11646 done, i.e., the relaxation produced the final output we want,
11647 and we won't let anybody mess with it. Also, we have to do
11648 addend adjustments in case of a R_ARM_TLS_GOTDESC relocation
11649 both in relaxed and non-relaxed cases. */
11650 if ((elf32_arm_tls_transition (info
, r_type
, h
) != (unsigned)r_type
)
11651 || (IS_ARM_TLS_GNU_RELOC (r_type
)
11652 && !((h
? elf32_arm_hash_entry (h
)->tls_type
:
11653 elf32_arm_local_got_tls_type (input_bfd
)[r_symndx
])
11656 r
= elf32_arm_tls_relax (globals
, input_bfd
, input_section
,
11657 contents
, rel
, h
== NULL
);
11658 /* This may have been marked unresolved because it came from
11659 a shared library. But we've just dealt with that. */
11660 unresolved_reloc
= 0;
11663 r
= bfd_reloc_continue
;
11665 if (r
== bfd_reloc_continue
)
11667 unsigned char branch_type
=
11668 h
? ARM_GET_SYM_BRANCH_TYPE (h
->target_internal
)
11669 : ARM_GET_SYM_BRANCH_TYPE (sym
->st_target_internal
);
11671 r
= elf32_arm_final_link_relocate (howto
, input_bfd
, output_bfd
,
11672 input_section
, contents
, rel
,
11673 relocation
, info
, sec
, name
,
11674 sym_type
, branch_type
, h
,
11679 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
11680 because such sections are not SEC_ALLOC and thus ld.so will
11681 not process them. */
11682 if (unresolved_reloc
11683 && !((input_section
->flags
& SEC_DEBUGGING
) != 0
11685 && _bfd_elf_section_offset (output_bfd
, info
, input_section
,
11686 rel
->r_offset
) != (bfd_vma
) -1)
11688 (*_bfd_error_handler
)
11689 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
11692 (long) rel
->r_offset
,
11694 h
->root
.root
.string
);
11698 if (r
!= bfd_reloc_ok
)
11702 case bfd_reloc_overflow
:
11703 /* If the overflowing reloc was to an undefined symbol,
11704 we have already printed one error message and there
11705 is no point complaining again. */
11707 h
->root
.type
!= bfd_link_hash_undefined
)
11708 && (!((*info
->callbacks
->reloc_overflow
)
11709 (info
, (h
? &h
->root
: NULL
), name
, howto
->name
,
11710 (bfd_vma
) 0, input_bfd
, input_section
,
11715 case bfd_reloc_undefined
:
11716 if (!((*info
->callbacks
->undefined_symbol
)
11717 (info
, name
, input_bfd
, input_section
,
11718 rel
->r_offset
, TRUE
)))
11722 case bfd_reloc_outofrange
:
11723 error_message
= _("out of range");
11726 case bfd_reloc_notsupported
:
11727 error_message
= _("unsupported relocation");
11730 case bfd_reloc_dangerous
:
11731 /* error_message should already be set. */
11735 error_message
= _("unknown error");
11736 /* Fall through. */
11739 BFD_ASSERT (error_message
!= NULL
);
11740 if (!((*info
->callbacks
->reloc_dangerous
)
11741 (info
, error_message
, input_bfd
, input_section
,
11752 /* Add a new unwind edit to the list described by HEAD, TAIL. If TINDEX is zero,
11753 adds the edit to the start of the list. (The list must be built in order of
11754 ascending TINDEX: the function's callers are primarily responsible for
11755 maintaining that condition). */
11758 add_unwind_table_edit (arm_unwind_table_edit
**head
,
11759 arm_unwind_table_edit
**tail
,
11760 arm_unwind_edit_type type
,
11761 asection
*linked_section
,
11762 unsigned int tindex
)
11764 arm_unwind_table_edit
*new_edit
= (arm_unwind_table_edit
*)
11765 xmalloc (sizeof (arm_unwind_table_edit
));
11767 new_edit
->type
= type
;
11768 new_edit
->linked_section
= linked_section
;
11769 new_edit
->index
= tindex
;
11773 new_edit
->next
= NULL
;
11776 (*tail
)->next
= new_edit
;
11778 (*tail
) = new_edit
;
11781 (*head
) = new_edit
;
11785 new_edit
->next
= *head
;
11794 static _arm_elf_section_data
*get_arm_elf_section_data (asection
*);
11796 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
11798 adjust_exidx_size(asection
*exidx_sec
, int adjust
)
11802 if (!exidx_sec
->rawsize
)
11803 exidx_sec
->rawsize
= exidx_sec
->size
;
11805 bfd_set_section_size (exidx_sec
->owner
, exidx_sec
, exidx_sec
->size
+ adjust
);
11806 out_sec
= exidx_sec
->output_section
;
11807 /* Adjust size of output section. */
11808 bfd_set_section_size (out_sec
->owner
, out_sec
, out_sec
->size
+adjust
);
11811 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
11813 insert_cantunwind_after(asection
*text_sec
, asection
*exidx_sec
)
11815 struct _arm_elf_section_data
*exidx_arm_data
;
11817 exidx_arm_data
= get_arm_elf_section_data (exidx_sec
);
11818 add_unwind_table_edit (
11819 &exidx_arm_data
->u
.exidx
.unwind_edit_list
,
11820 &exidx_arm_data
->u
.exidx
.unwind_edit_tail
,
11821 INSERT_EXIDX_CANTUNWIND_AT_END
, text_sec
, UINT_MAX
);
11823 exidx_arm_data
->additional_reloc_count
++;
11825 adjust_exidx_size(exidx_sec
, 8);
11828 /* Scan .ARM.exidx tables, and create a list describing edits which should be
11829 made to those tables, such that:
11831 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
11832 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
11833 codes which have been inlined into the index).
11835 If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged.
11837 The edits are applied when the tables are written
11838 (in elf32_arm_write_section). */
11841 elf32_arm_fix_exidx_coverage (asection
**text_section_order
,
11842 unsigned int num_text_sections
,
11843 struct bfd_link_info
*info
,
11844 bfd_boolean merge_exidx_entries
)
11847 unsigned int last_second_word
= 0, i
;
11848 asection
*last_exidx_sec
= NULL
;
11849 asection
*last_text_sec
= NULL
;
11850 int last_unwind_type
= -1;
11852 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
11854 for (inp
= info
->input_bfds
; inp
!= NULL
; inp
= inp
->link
.next
)
11858 for (sec
= inp
->sections
; sec
!= NULL
; sec
= sec
->next
)
11860 struct bfd_elf_section_data
*elf_sec
= elf_section_data (sec
);
11861 Elf_Internal_Shdr
*hdr
= &elf_sec
->this_hdr
;
11863 if (!hdr
|| hdr
->sh_type
!= SHT_ARM_EXIDX
)
11866 if (elf_sec
->linked_to
)
11868 Elf_Internal_Shdr
*linked_hdr
11869 = &elf_section_data (elf_sec
->linked_to
)->this_hdr
;
11870 struct _arm_elf_section_data
*linked_sec_arm_data
11871 = get_arm_elf_section_data (linked_hdr
->bfd_section
);
11873 if (linked_sec_arm_data
== NULL
)
11876 /* Link this .ARM.exidx section back from the text section it
11878 linked_sec_arm_data
->u
.text
.arm_exidx_sec
= sec
;
11883 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
11884 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
11885 and add EXIDX_CANTUNWIND entries for sections with no unwind table data. */
11887 for (i
= 0; i
< num_text_sections
; i
++)
11889 asection
*sec
= text_section_order
[i
];
11890 asection
*exidx_sec
;
11891 struct _arm_elf_section_data
*arm_data
= get_arm_elf_section_data (sec
);
11892 struct _arm_elf_section_data
*exidx_arm_data
;
11893 bfd_byte
*contents
= NULL
;
11894 int deleted_exidx_bytes
= 0;
11896 arm_unwind_table_edit
*unwind_edit_head
= NULL
;
11897 arm_unwind_table_edit
*unwind_edit_tail
= NULL
;
11898 Elf_Internal_Shdr
*hdr
;
11901 if (arm_data
== NULL
)
11904 exidx_sec
= arm_data
->u
.text
.arm_exidx_sec
;
11905 if (exidx_sec
== NULL
)
11907 /* Section has no unwind data. */
11908 if (last_unwind_type
== 0 || !last_exidx_sec
)
11911 /* Ignore zero sized sections. */
11912 if (sec
->size
== 0)
11915 insert_cantunwind_after(last_text_sec
, last_exidx_sec
);
11916 last_unwind_type
= 0;
11920 /* Skip /DISCARD/ sections. */
11921 if (bfd_is_abs_section (exidx_sec
->output_section
))
11924 hdr
= &elf_section_data (exidx_sec
)->this_hdr
;
11925 if (hdr
->sh_type
!= SHT_ARM_EXIDX
)
11928 exidx_arm_data
= get_arm_elf_section_data (exidx_sec
);
11929 if (exidx_arm_data
== NULL
)
11932 ibfd
= exidx_sec
->owner
;
11934 if (hdr
->contents
!= NULL
)
11935 contents
= hdr
->contents
;
11936 else if (! bfd_malloc_and_get_section (ibfd
, exidx_sec
, &contents
))
11940 if (last_unwind_type
> 0)
11942 unsigned int first_word
= bfd_get_32 (ibfd
, contents
);
11943 /* Add cantunwind if first unwind item does not match section
11945 if (first_word
!= sec
->vma
)
11947 insert_cantunwind_after (last_text_sec
, last_exidx_sec
);
11948 last_unwind_type
= 0;
11952 for (j
= 0; j
< hdr
->sh_size
; j
+= 8)
11954 unsigned int second_word
= bfd_get_32 (ibfd
, contents
+ j
+ 4);
11958 /* An EXIDX_CANTUNWIND entry. */
11959 if (second_word
== 1)
11961 if (last_unwind_type
== 0)
11965 /* Inlined unwinding data. Merge if equal to previous. */
11966 else if ((second_word
& 0x80000000) != 0)
11968 if (merge_exidx_entries
11969 && last_second_word
== second_word
&& last_unwind_type
== 1)
11972 last_second_word
= second_word
;
11974 /* Normal table entry. In theory we could merge these too,
11975 but duplicate entries are likely to be much less common. */
11979 if (elide
&& !bfd_link_relocatable (info
))
11981 add_unwind_table_edit (&unwind_edit_head
, &unwind_edit_tail
,
11982 DELETE_EXIDX_ENTRY
, NULL
, j
/ 8);
11984 deleted_exidx_bytes
+= 8;
11987 last_unwind_type
= unwind_type
;
11990 /* Free contents if we allocated it ourselves. */
11991 if (contents
!= hdr
->contents
)
11994 /* Record edits to be applied later (in elf32_arm_write_section). */
11995 exidx_arm_data
->u
.exidx
.unwind_edit_list
= unwind_edit_head
;
11996 exidx_arm_data
->u
.exidx
.unwind_edit_tail
= unwind_edit_tail
;
11998 if (deleted_exidx_bytes
> 0)
11999 adjust_exidx_size(exidx_sec
, -deleted_exidx_bytes
);
12001 last_exidx_sec
= exidx_sec
;
12002 last_text_sec
= sec
;
12005 /* Add terminating CANTUNWIND entry. */
12006 if (!bfd_link_relocatable (info
) && last_exidx_sec
12007 && last_unwind_type
!= 0)
12008 insert_cantunwind_after(last_text_sec
, last_exidx_sec
);
12014 elf32_arm_output_glue_section (struct bfd_link_info
*info
, bfd
*obfd
,
12015 bfd
*ibfd
, const char *name
)
12017 asection
*sec
, *osec
;
12019 sec
= bfd_get_linker_section (ibfd
, name
);
12020 if (sec
== NULL
|| (sec
->flags
& SEC_EXCLUDE
) != 0)
12023 osec
= sec
->output_section
;
12024 if (elf32_arm_write_section (obfd
, info
, sec
, sec
->contents
))
12027 if (! bfd_set_section_contents (obfd
, osec
, sec
->contents
,
12028 sec
->output_offset
, sec
->size
))
12035 elf32_arm_final_link (bfd
*abfd
, struct bfd_link_info
*info
)
12037 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (info
);
12038 asection
*sec
, *osec
;
12040 if (globals
== NULL
)
12043 /* Invoke the regular ELF backend linker to do all the work. */
12044 if (!bfd_elf_final_link (abfd
, info
))
12047 /* Process stub sections (eg BE8 encoding, ...). */
12048 struct elf32_arm_link_hash_table
*htab
= elf32_arm_hash_table (info
);
12050 for (i
=0; i
<htab
->top_id
; i
++)
12052 sec
= htab
->stub_group
[i
].stub_sec
;
12053 /* Only process it once, in its link_sec slot. */
12054 if (sec
&& i
== htab
->stub_group
[i
].link_sec
->id
)
12056 osec
= sec
->output_section
;
12057 elf32_arm_write_section (abfd
, info
, sec
, sec
->contents
);
12058 if (! bfd_set_section_contents (abfd
, osec
, sec
->contents
,
12059 sec
->output_offset
, sec
->size
))
12064 /* Write out any glue sections now that we have created all the
12066 if (globals
->bfd_of_glue_owner
!= NULL
)
12068 if (! elf32_arm_output_glue_section (info
, abfd
,
12069 globals
->bfd_of_glue_owner
,
12070 ARM2THUMB_GLUE_SECTION_NAME
))
12073 if (! elf32_arm_output_glue_section (info
, abfd
,
12074 globals
->bfd_of_glue_owner
,
12075 THUMB2ARM_GLUE_SECTION_NAME
))
12078 if (! elf32_arm_output_glue_section (info
, abfd
,
12079 globals
->bfd_of_glue_owner
,
12080 VFP11_ERRATUM_VENEER_SECTION_NAME
))
12083 if (! elf32_arm_output_glue_section (info
, abfd
,
12084 globals
->bfd_of_glue_owner
,
12085 STM32L4XX_ERRATUM_VENEER_SECTION_NAME
))
12088 if (! elf32_arm_output_glue_section (info
, abfd
,
12089 globals
->bfd_of_glue_owner
,
12090 ARM_BX_GLUE_SECTION_NAME
))
12097 /* Return a best guess for the machine number based on the attributes. */
12099 static unsigned int
12100 bfd_arm_get_mach_from_attributes (bfd
* abfd
)
12102 int arch
= bfd_elf_get_obj_attr_int (abfd
, OBJ_ATTR_PROC
, Tag_CPU_arch
);
12106 case TAG_CPU_ARCH_V4
: return bfd_mach_arm_4
;
12107 case TAG_CPU_ARCH_V4T
: return bfd_mach_arm_4T
;
12108 case TAG_CPU_ARCH_V5T
: return bfd_mach_arm_5T
;
12110 case TAG_CPU_ARCH_V5TE
:
12114 BFD_ASSERT (Tag_CPU_name
< NUM_KNOWN_OBJ_ATTRIBUTES
);
12115 name
= elf_known_obj_attributes (abfd
) [OBJ_ATTR_PROC
][Tag_CPU_name
].s
;
12119 if (strcmp (name
, "IWMMXT2") == 0)
12120 return bfd_mach_arm_iWMMXt2
;
12122 if (strcmp (name
, "IWMMXT") == 0)
12123 return bfd_mach_arm_iWMMXt
;
12125 if (strcmp (name
, "XSCALE") == 0)
12129 BFD_ASSERT (Tag_WMMX_arch
< NUM_KNOWN_OBJ_ATTRIBUTES
);
12130 wmmx
= elf_known_obj_attributes (abfd
) [OBJ_ATTR_PROC
][Tag_WMMX_arch
].i
;
12133 case 1: return bfd_mach_arm_iWMMXt
;
12134 case 2: return bfd_mach_arm_iWMMXt2
;
12135 default: return bfd_mach_arm_XScale
;
12140 return bfd_mach_arm_5TE
;
12144 return bfd_mach_arm_unknown
;
12148 /* Set the right machine number. */
12151 elf32_arm_object_p (bfd
*abfd
)
12155 mach
= bfd_arm_get_mach_from_notes (abfd
, ARM_NOTE_SECTION
);
12157 if (mach
== bfd_mach_arm_unknown
)
12159 if (elf_elfheader (abfd
)->e_flags
& EF_ARM_MAVERICK_FLOAT
)
12160 mach
= bfd_mach_arm_ep9312
;
12162 mach
= bfd_arm_get_mach_from_attributes (abfd
);
12165 bfd_default_set_arch_mach (abfd
, bfd_arch_arm
, mach
);
12169 /* Function to keep ARM specific flags in the ELF header. */
12172 elf32_arm_set_private_flags (bfd
*abfd
, flagword flags
)
12174 if (elf_flags_init (abfd
)
12175 && elf_elfheader (abfd
)->e_flags
!= flags
)
12177 if (EF_ARM_EABI_VERSION (flags
) == EF_ARM_EABI_UNKNOWN
)
12179 if (flags
& EF_ARM_INTERWORK
)
12180 (*_bfd_error_handler
)
12181 (_("Warning: Not setting interworking flag of %B since it has already been specified as non-interworking"),
12185 (_("Warning: Clearing the interworking flag of %B due to outside request"),
12191 elf_elfheader (abfd
)->e_flags
= flags
;
12192 elf_flags_init (abfd
) = TRUE
;
12198 /* Copy backend specific data from one object module to another. */
12201 elf32_arm_copy_private_bfd_data (bfd
*ibfd
, bfd
*obfd
)
12204 flagword out_flags
;
12206 if (! is_arm_elf (ibfd
) || ! is_arm_elf (obfd
))
12209 in_flags
= elf_elfheader (ibfd
)->e_flags
;
12210 out_flags
= elf_elfheader (obfd
)->e_flags
;
12212 if (elf_flags_init (obfd
)
12213 && EF_ARM_EABI_VERSION (out_flags
) == EF_ARM_EABI_UNKNOWN
12214 && in_flags
!= out_flags
)
12216 /* Cannot mix APCS26 and APCS32 code. */
12217 if ((in_flags
& EF_ARM_APCS_26
) != (out_flags
& EF_ARM_APCS_26
))
12220 /* Cannot mix float APCS and non-float APCS code. */
12221 if ((in_flags
& EF_ARM_APCS_FLOAT
) != (out_flags
& EF_ARM_APCS_FLOAT
))
12224 /* If the src and dest have different interworking flags
12225 then turn off the interworking bit. */
12226 if ((in_flags
& EF_ARM_INTERWORK
) != (out_flags
& EF_ARM_INTERWORK
))
12228 if (out_flags
& EF_ARM_INTERWORK
)
12230 (_("Warning: Clearing the interworking flag of %B because non-interworking code in %B has been linked with it"),
12233 in_flags
&= ~EF_ARM_INTERWORK
;
12236 /* Likewise for PIC, though don't warn for this case. */
12237 if ((in_flags
& EF_ARM_PIC
) != (out_flags
& EF_ARM_PIC
))
12238 in_flags
&= ~EF_ARM_PIC
;
12241 elf_elfheader (obfd
)->e_flags
= in_flags
;
12242 elf_flags_init (obfd
) = TRUE
;
12244 return _bfd_elf_copy_private_bfd_data (ibfd
, obfd
);
12247 /* Values for Tag_ABI_PCS_R9_use. */
12256 /* Values for Tag_ABI_PCS_RW_data. */
12259 AEABI_PCS_RW_data_absolute
,
12260 AEABI_PCS_RW_data_PCrel
,
12261 AEABI_PCS_RW_data_SBrel
,
12262 AEABI_PCS_RW_data_unused
12265 /* Values for Tag_ABI_enum_size. */
12271 AEABI_enum_forced_wide
12274 /* Determine whether an object attribute tag takes an integer, a
12278 elf32_arm_obj_attrs_arg_type (int tag
)
12280 if (tag
== Tag_compatibility
)
12281 return ATTR_TYPE_FLAG_INT_VAL
| ATTR_TYPE_FLAG_STR_VAL
;
12282 else if (tag
== Tag_nodefaults
)
12283 return ATTR_TYPE_FLAG_INT_VAL
| ATTR_TYPE_FLAG_NO_DEFAULT
;
12284 else if (tag
== Tag_CPU_raw_name
|| tag
== Tag_CPU_name
)
12285 return ATTR_TYPE_FLAG_STR_VAL
;
12287 return ATTR_TYPE_FLAG_INT_VAL
;
12289 return (tag
& 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL
: ATTR_TYPE_FLAG_INT_VAL
;
12292 /* The ABI defines that Tag_conformance should be emitted first, and that
12293 Tag_nodefaults should be second (if either is defined). This sets those
12294 two positions, and bumps up the position of all the remaining tags to
12297 elf32_arm_obj_attrs_order (int num
)
12299 if (num
== LEAST_KNOWN_OBJ_ATTRIBUTE
)
12300 return Tag_conformance
;
12301 if (num
== LEAST_KNOWN_OBJ_ATTRIBUTE
+ 1)
12302 return Tag_nodefaults
;
12303 if ((num
- 2) < Tag_nodefaults
)
12305 if ((num
- 1) < Tag_conformance
)
12310 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
12312 elf32_arm_obj_attrs_handle_unknown (bfd
*abfd
, int tag
)
12314 if ((tag
& 127) < 64)
12317 (_("%B: Unknown mandatory EABI object attribute %d"),
12319 bfd_set_error (bfd_error_bad_value
);
12325 (_("Warning: %B: Unknown EABI object attribute %d"),
12331 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
12332 Returns -1 if no architecture could be read. */
12335 get_secondary_compatible_arch (bfd
*abfd
)
12337 obj_attribute
*attr
=
12338 &elf_known_obj_attributes_proc (abfd
)[Tag_also_compatible_with
];
12340 /* Note: the tag and its argument below are uleb128 values, though
12341 currently-defined values fit in one byte for each. */
12343 && attr
->s
[0] == Tag_CPU_arch
12344 && (attr
->s
[1] & 128) != 128
12345 && attr
->s
[2] == 0)
12348 /* This tag is "safely ignorable", so don't complain if it looks funny. */
12352 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
12353 The tag is removed if ARCH is -1. */
12356 set_secondary_compatible_arch (bfd
*abfd
, int arch
)
12358 obj_attribute
*attr
=
12359 &elf_known_obj_attributes_proc (abfd
)[Tag_also_compatible_with
];
12367 /* Note: the tag and its argument below are uleb128 values, though
12368 currently-defined values fit in one byte for each. */
12370 attr
->s
= (char *) bfd_alloc (abfd
, 3);
12371 attr
->s
[0] = Tag_CPU_arch
;
12376 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
12380 tag_cpu_arch_combine (bfd
*ibfd
, int oldtag
, int *secondary_compat_out
,
12381 int newtag
, int secondary_compat
)
12383 #define T(X) TAG_CPU_ARCH_##X
12384 int tagl
, tagh
, result
;
12387 T(V6T2
), /* PRE_V4. */
12389 T(V6T2
), /* V4T. */
12390 T(V6T2
), /* V5T. */
12391 T(V6T2
), /* V5TE. */
12392 T(V6T2
), /* V5TEJ. */
12395 T(V6T2
) /* V6T2. */
12399 T(V6K
), /* PRE_V4. */
12403 T(V6K
), /* V5TE. */
12404 T(V6K
), /* V5TEJ. */
12406 T(V6KZ
), /* V6KZ. */
12412 T(V7
), /* PRE_V4. */
12417 T(V7
), /* V5TEJ. */
12430 T(V6K
), /* V5TE. */
12431 T(V6K
), /* V5TEJ. */
12433 T(V6KZ
), /* V6KZ. */
12437 T(V6_M
) /* V6_M. */
12439 const int v6s_m
[] =
12445 T(V6K
), /* V5TE. */
12446 T(V6K
), /* V5TEJ. */
12448 T(V6KZ
), /* V6KZ. */
12452 T(V6S_M
), /* V6_M. */
12453 T(V6S_M
) /* V6S_M. */
12455 const int v7e_m
[] =
12459 T(V7E_M
), /* V4T. */
12460 T(V7E_M
), /* V5T. */
12461 T(V7E_M
), /* V5TE. */
12462 T(V7E_M
), /* V5TEJ. */
12463 T(V7E_M
), /* V6. */
12464 T(V7E_M
), /* V6KZ. */
12465 T(V7E_M
), /* V6T2. */
12466 T(V7E_M
), /* V6K. */
12467 T(V7E_M
), /* V7. */
12468 T(V7E_M
), /* V6_M. */
12469 T(V7E_M
), /* V6S_M. */
12470 T(V7E_M
) /* V7E_M. */
12474 T(V8
), /* PRE_V4. */
12479 T(V8
), /* V5TEJ. */
12486 T(V8
), /* V6S_M. */
12487 T(V8
), /* V7E_M. */
12490 const int v8m_baseline
[] =
12503 T(V8M_BASE
), /* V6_M. */
12504 T(V8M_BASE
), /* V6S_M. */
12508 T(V8M_BASE
) /* V8-M BASELINE. */
12510 const int v8m_mainline
[] =
12522 T(V8M_MAIN
), /* V7. */
12523 T(V8M_MAIN
), /* V6_M. */
12524 T(V8M_MAIN
), /* V6S_M. */
12525 T(V8M_MAIN
), /* V7E_M. */
12528 T(V8M_MAIN
), /* V8-M BASELINE. */
12529 T(V8M_MAIN
) /* V8-M MAINLINE. */
12531 const int v4t_plus_v6_m
[] =
12537 T(V5TE
), /* V5TE. */
12538 T(V5TEJ
), /* V5TEJ. */
12540 T(V6KZ
), /* V6KZ. */
12541 T(V6T2
), /* V6T2. */
12544 T(V6_M
), /* V6_M. */
12545 T(V6S_M
), /* V6S_M. */
12546 T(V7E_M
), /* V7E_M. */
12549 T(V8M_BASE
), /* V8-M BASELINE. */
12550 T(V8M_MAIN
), /* V8-M MAINLINE. */
12551 T(V4T_PLUS_V6_M
) /* V4T plus V6_M. */
12553 const int *comb
[] =
12565 /* Pseudo-architecture. */
12569 /* Check we've not got a higher architecture than we know about. */
12571 if (oldtag
> MAX_TAG_CPU_ARCH
|| newtag
> MAX_TAG_CPU_ARCH
)
12573 _bfd_error_handler (_("error: %B: Unknown CPU architecture"), ibfd
);
12577 /* Override old tag if we have a Tag_also_compatible_with on the output. */
12579 if ((oldtag
== T(V6_M
) && *secondary_compat_out
== T(V4T
))
12580 || (oldtag
== T(V4T
) && *secondary_compat_out
== T(V6_M
)))
12581 oldtag
= T(V4T_PLUS_V6_M
);
12583 /* And override the new tag if we have a Tag_also_compatible_with on the
12586 if ((newtag
== T(V6_M
) && secondary_compat
== T(V4T
))
12587 || (newtag
== T(V4T
) && secondary_compat
== T(V6_M
)))
12588 newtag
= T(V4T_PLUS_V6_M
);
12590 tagl
= (oldtag
< newtag
) ? oldtag
: newtag
;
12591 result
= tagh
= (oldtag
> newtag
) ? oldtag
: newtag
;
12593 /* Architectures before V6KZ add features monotonically. */
12594 if (tagh
<= TAG_CPU_ARCH_V6KZ
)
12597 result
= comb
[tagh
- T(V6T2
)] ? comb
[tagh
- T(V6T2
)][tagl
] : -1;
12599 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
12600 as the canonical version. */
12601 if (result
== T(V4T_PLUS_V6_M
))
12604 *secondary_compat_out
= T(V6_M
);
12607 *secondary_compat_out
= -1;
12611 _bfd_error_handler (_("error: %B: Conflicting CPU architectures %d/%d"),
12612 ibfd
, oldtag
, newtag
);
12620 /* Query attributes object to see if integer divide instructions may be
12621 present in an object. */
12623 elf32_arm_attributes_accept_div (const obj_attribute
*attr
)
12625 int arch
= attr
[Tag_CPU_arch
].i
;
12626 int profile
= attr
[Tag_CPU_arch_profile
].i
;
12628 switch (attr
[Tag_DIV_use
].i
)
12631 /* Integer divide allowed if instruction contained in archetecture. */
12632 if (arch
== TAG_CPU_ARCH_V7
&& (profile
== 'R' || profile
== 'M'))
12634 else if (arch
>= TAG_CPU_ARCH_V7E_M
)
12640 /* Integer divide explicitly prohibited. */
12644 /* Unrecognised case - treat as allowing divide everywhere. */
12646 /* Integer divide allowed in ARM state. */
12651 /* Query attributes object to see if integer divide instructions are
12652 forbidden to be in the object. This is not the inverse of
12653 elf32_arm_attributes_accept_div. */
12655 elf32_arm_attributes_forbid_div (const obj_attribute
*attr
)
12657 return attr
[Tag_DIV_use
].i
== 1;
12660 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
12661 are conflicting attributes. */
12664 elf32_arm_merge_eabi_attributes (bfd
*ibfd
, bfd
*obfd
)
12666 obj_attribute
*in_attr
;
12667 obj_attribute
*out_attr
;
12668 /* Some tags have 0 = don't care, 1 = strong requirement,
12669 2 = weak requirement. */
12670 static const int order_021
[3] = {0, 2, 1};
12672 bfd_boolean result
= TRUE
;
12673 const char *sec_name
= get_elf_backend_data (ibfd
)->obj_attrs_section
;
12675 /* Skip the linker stubs file. This preserves previous behavior
12676 of accepting unknown attributes in the first input file - but
12678 if (ibfd
->flags
& BFD_LINKER_CREATED
)
12681 /* Skip any input that hasn't attribute section.
12682 This enables to link object files without attribute section with
12684 if (bfd_get_section_by_name (ibfd
, sec_name
) == NULL
)
12687 if (!elf_known_obj_attributes_proc (obfd
)[0].i
)
12689 /* This is the first object. Copy the attributes. */
12690 _bfd_elf_copy_obj_attributes (ibfd
, obfd
);
12692 out_attr
= elf_known_obj_attributes_proc (obfd
);
12694 /* Use the Tag_null value to indicate the attributes have been
12698 /* We do not output objects with Tag_MPextension_use_legacy - we move
12699 the attribute's value to Tag_MPextension_use. */
12700 if (out_attr
[Tag_MPextension_use_legacy
].i
!= 0)
12702 if (out_attr
[Tag_MPextension_use
].i
!= 0
12703 && out_attr
[Tag_MPextension_use_legacy
].i
12704 != out_attr
[Tag_MPextension_use
].i
)
12707 (_("Error: %B has both the current and legacy "
12708 "Tag_MPextension_use attributes"), ibfd
);
12712 out_attr
[Tag_MPextension_use
] =
12713 out_attr
[Tag_MPextension_use_legacy
];
12714 out_attr
[Tag_MPextension_use_legacy
].type
= 0;
12715 out_attr
[Tag_MPextension_use_legacy
].i
= 0;
12721 in_attr
= elf_known_obj_attributes_proc (ibfd
);
12722 out_attr
= elf_known_obj_attributes_proc (obfd
);
12723 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
12724 if (in_attr
[Tag_ABI_VFP_args
].i
!= out_attr
[Tag_ABI_VFP_args
].i
)
12726 /* Ignore mismatches if the object doesn't use floating point or is
12727 floating point ABI independent. */
12728 if (out_attr
[Tag_ABI_FP_number_model
].i
== AEABI_FP_number_model_none
12729 || (in_attr
[Tag_ABI_FP_number_model
].i
!= AEABI_FP_number_model_none
12730 && out_attr
[Tag_ABI_VFP_args
].i
== AEABI_VFP_args_compatible
))
12731 out_attr
[Tag_ABI_VFP_args
].i
= in_attr
[Tag_ABI_VFP_args
].i
;
12732 else if (in_attr
[Tag_ABI_FP_number_model
].i
!= AEABI_FP_number_model_none
12733 && in_attr
[Tag_ABI_VFP_args
].i
!= AEABI_VFP_args_compatible
)
12736 (_("error: %B uses VFP register arguments, %B does not"),
12737 in_attr
[Tag_ABI_VFP_args
].i
? ibfd
: obfd
,
12738 in_attr
[Tag_ABI_VFP_args
].i
? obfd
: ibfd
);
12743 for (i
= LEAST_KNOWN_OBJ_ATTRIBUTE
; i
< NUM_KNOWN_OBJ_ATTRIBUTES
; i
++)
12745 /* Merge this attribute with existing attributes. */
12748 case Tag_CPU_raw_name
:
12750 /* These are merged after Tag_CPU_arch. */
12753 case Tag_ABI_optimization_goals
:
12754 case Tag_ABI_FP_optimization_goals
:
12755 /* Use the first value seen. */
12760 int secondary_compat
= -1, secondary_compat_out
= -1;
12761 unsigned int saved_out_attr
= out_attr
[i
].i
;
12763 static const char *name_table
[] =
12765 /* These aren't real CPU names, but we can't guess
12766 that from the architecture version alone. */
12782 "ARM v8-M.baseline",
12783 "ARM v8-M.mainline",
12786 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
12787 secondary_compat
= get_secondary_compatible_arch (ibfd
);
12788 secondary_compat_out
= get_secondary_compatible_arch (obfd
);
12789 arch_attr
= tag_cpu_arch_combine (ibfd
, out_attr
[i
].i
,
12790 &secondary_compat_out
,
12794 /* Return with error if failed to merge. */
12795 if (arch_attr
== -1)
12798 out_attr
[i
].i
= arch_attr
;
12800 set_secondary_compatible_arch (obfd
, secondary_compat_out
);
12802 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
12803 if (out_attr
[i
].i
== saved_out_attr
)
12804 ; /* Leave the names alone. */
12805 else if (out_attr
[i
].i
== in_attr
[i
].i
)
12807 /* The output architecture has been changed to match the
12808 input architecture. Use the input names. */
12809 out_attr
[Tag_CPU_name
].s
= in_attr
[Tag_CPU_name
].s
12810 ? _bfd_elf_attr_strdup (obfd
, in_attr
[Tag_CPU_name
].s
)
12812 out_attr
[Tag_CPU_raw_name
].s
= in_attr
[Tag_CPU_raw_name
].s
12813 ? _bfd_elf_attr_strdup (obfd
, in_attr
[Tag_CPU_raw_name
].s
)
12818 out_attr
[Tag_CPU_name
].s
= NULL
;
12819 out_attr
[Tag_CPU_raw_name
].s
= NULL
;
12822 /* If we still don't have a value for Tag_CPU_name,
12823 make one up now. Tag_CPU_raw_name remains blank. */
12824 if (out_attr
[Tag_CPU_name
].s
== NULL
12825 && out_attr
[i
].i
< ARRAY_SIZE (name_table
))
12826 out_attr
[Tag_CPU_name
].s
=
12827 _bfd_elf_attr_strdup (obfd
, name_table
[out_attr
[i
].i
]);
12831 case Tag_ARM_ISA_use
:
12832 case Tag_THUMB_ISA_use
:
12833 case Tag_WMMX_arch
:
12834 case Tag_Advanced_SIMD_arch
:
12835 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
12836 case Tag_ABI_FP_rounding
:
12837 case Tag_ABI_FP_exceptions
:
12838 case Tag_ABI_FP_user_exceptions
:
12839 case Tag_ABI_FP_number_model
:
12840 case Tag_FP_HP_extension
:
12841 case Tag_CPU_unaligned_access
:
12843 case Tag_MPextension_use
:
12844 /* Use the largest value specified. */
12845 if (in_attr
[i
].i
> out_attr
[i
].i
)
12846 out_attr
[i
].i
= in_attr
[i
].i
;
12849 case Tag_ABI_align_preserved
:
12850 case Tag_ABI_PCS_RO_data
:
12851 /* Use the smallest value specified. */
12852 if (in_attr
[i
].i
< out_attr
[i
].i
)
12853 out_attr
[i
].i
= in_attr
[i
].i
;
12856 case Tag_ABI_align_needed
:
12857 if ((in_attr
[i
].i
> 0 || out_attr
[i
].i
> 0)
12858 && (in_attr
[Tag_ABI_align_preserved
].i
== 0
12859 || out_attr
[Tag_ABI_align_preserved
].i
== 0))
12861 /* This error message should be enabled once all non-conformant
12862 binaries in the toolchain have had the attributes set
12865 (_("error: %B: 8-byte data alignment conflicts with %B"),
12869 /* Fall through. */
12870 case Tag_ABI_FP_denormal
:
12871 case Tag_ABI_PCS_GOT_use
:
12872 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
12873 value if greater than 2 (for future-proofing). */
12874 if ((in_attr
[i
].i
> 2 && in_attr
[i
].i
> out_attr
[i
].i
)
12875 || (in_attr
[i
].i
<= 2 && out_attr
[i
].i
<= 2
12876 && order_021
[in_attr
[i
].i
] > order_021
[out_attr
[i
].i
]))
12877 out_attr
[i
].i
= in_attr
[i
].i
;
12880 case Tag_Virtualization_use
:
12881 /* The virtualization tag effectively stores two bits of
12882 information: the intended use of TrustZone (in bit 0), and the
12883 intended use of Virtualization (in bit 1). */
12884 if (out_attr
[i
].i
== 0)
12885 out_attr
[i
].i
= in_attr
[i
].i
;
12886 else if (in_attr
[i
].i
!= 0
12887 && in_attr
[i
].i
!= out_attr
[i
].i
)
12889 if (in_attr
[i
].i
<= 3 && out_attr
[i
].i
<= 3)
12894 (_("error: %B: unable to merge virtualization attributes "
12902 case Tag_CPU_arch_profile
:
12903 if (out_attr
[i
].i
!= in_attr
[i
].i
)
12905 /* 0 will merge with anything.
12906 'A' and 'S' merge to 'A'.
12907 'R' and 'S' merge to 'R'.
12908 'M' and 'A|R|S' is an error. */
12909 if (out_attr
[i
].i
== 0
12910 || (out_attr
[i
].i
== 'S'
12911 && (in_attr
[i
].i
== 'A' || in_attr
[i
].i
== 'R')))
12912 out_attr
[i
].i
= in_attr
[i
].i
;
12913 else if (in_attr
[i
].i
== 0
12914 || (in_attr
[i
].i
== 'S'
12915 && (out_attr
[i
].i
== 'A' || out_attr
[i
].i
== 'R')))
12916 ; /* Do nothing. */
12920 (_("error: %B: Conflicting architecture profiles %c/%c"),
12922 in_attr
[i
].i
? in_attr
[i
].i
: '0',
12923 out_attr
[i
].i
? out_attr
[i
].i
: '0');
12929 case Tag_DSP_extension
:
12930 /* No need to change output value if any of:
12931 - pre (<=) ARMv5T input architecture (do not have DSP)
12932 - M input profile not ARMv7E-M and do not have DSP. */
12933 if (in_attr
[Tag_CPU_arch
].i
<= 3
12934 || (in_attr
[Tag_CPU_arch_profile
].i
== 'M'
12935 && in_attr
[Tag_CPU_arch
].i
!= 13
12936 && in_attr
[i
].i
== 0))
12937 ; /* Do nothing. */
12938 /* Output value should be 0 if DSP part of architecture, ie.
12939 - post (>=) ARMv5te architecture output
12940 - A, R or S profile output or ARMv7E-M output architecture. */
12941 else if (out_attr
[Tag_CPU_arch
].i
>= 4
12942 && (out_attr
[Tag_CPU_arch_profile
].i
== 'A'
12943 || out_attr
[Tag_CPU_arch_profile
].i
== 'R'
12944 || out_attr
[Tag_CPU_arch_profile
].i
== 'S'
12945 || out_attr
[Tag_CPU_arch
].i
== 13))
12947 /* Otherwise, DSP instructions are added and not part of output
12955 /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since
12956 the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch
12957 when it's 0. It might mean absence of FP hardware if
12958 Tag_FP_arch is zero. */
12960 #define VFP_VERSION_COUNT 9
12961 static const struct
12965 } vfp_versions
[VFP_VERSION_COUNT
] =
12981 /* If the output has no requirement about FP hardware,
12982 follow the requirement of the input. */
12983 if (out_attr
[i
].i
== 0)
12985 BFD_ASSERT (out_attr
[Tag_ABI_HardFP_use
].i
== 0);
12986 out_attr
[i
].i
= in_attr
[i
].i
;
12987 out_attr
[Tag_ABI_HardFP_use
].i
12988 = in_attr
[Tag_ABI_HardFP_use
].i
;
12991 /* If the input has no requirement about FP hardware, do
12993 else if (in_attr
[i
].i
== 0)
12995 BFD_ASSERT (in_attr
[Tag_ABI_HardFP_use
].i
== 0);
12999 /* Both the input and the output have nonzero Tag_FP_arch.
13000 So Tag_ABI_HardFP_use is implied by Tag_FP_arch when it's zero. */
13002 /* If both the input and the output have zero Tag_ABI_HardFP_use,
13004 if (in_attr
[Tag_ABI_HardFP_use
].i
== 0
13005 && out_attr
[Tag_ABI_HardFP_use
].i
== 0)
13007 /* If the input and the output have different Tag_ABI_HardFP_use,
13008 the combination of them is 0 (implied by Tag_FP_arch). */
13009 else if (in_attr
[Tag_ABI_HardFP_use
].i
13010 != out_attr
[Tag_ABI_HardFP_use
].i
)
13011 out_attr
[Tag_ABI_HardFP_use
].i
= 0;
13013 /* Now we can handle Tag_FP_arch. */
13015 /* Values of VFP_VERSION_COUNT or more aren't defined, so just
13016 pick the biggest. */
13017 if (in_attr
[i
].i
>= VFP_VERSION_COUNT
13018 && in_attr
[i
].i
> out_attr
[i
].i
)
13020 out_attr
[i
] = in_attr
[i
];
13023 /* The output uses the superset of input features
13024 (ISA version) and registers. */
13025 ver
= vfp_versions
[in_attr
[i
].i
].ver
;
13026 if (ver
< vfp_versions
[out_attr
[i
].i
].ver
)
13027 ver
= vfp_versions
[out_attr
[i
].i
].ver
;
13028 regs
= vfp_versions
[in_attr
[i
].i
].regs
;
13029 if (regs
< vfp_versions
[out_attr
[i
].i
].regs
)
13030 regs
= vfp_versions
[out_attr
[i
].i
].regs
;
13031 /* This assumes all possible supersets are also a valid
13033 for (newval
= VFP_VERSION_COUNT
- 1; newval
> 0; newval
--)
13035 if (regs
== vfp_versions
[newval
].regs
13036 && ver
== vfp_versions
[newval
].ver
)
13039 out_attr
[i
].i
= newval
;
13042 case Tag_PCS_config
:
13043 if (out_attr
[i
].i
== 0)
13044 out_attr
[i
].i
= in_attr
[i
].i
;
13045 else if (in_attr
[i
].i
!= 0 && out_attr
[i
].i
!= in_attr
[i
].i
)
13047 /* It's sometimes ok to mix different configs, so this is only
13050 (_("Warning: %B: Conflicting platform configuration"), ibfd
);
13053 case Tag_ABI_PCS_R9_use
:
13054 if (in_attr
[i
].i
!= out_attr
[i
].i
13055 && out_attr
[i
].i
!= AEABI_R9_unused
13056 && in_attr
[i
].i
!= AEABI_R9_unused
)
13059 (_("error: %B: Conflicting use of R9"), ibfd
);
13062 if (out_attr
[i
].i
== AEABI_R9_unused
)
13063 out_attr
[i
].i
= in_attr
[i
].i
;
13065 case Tag_ABI_PCS_RW_data
:
13066 if (in_attr
[i
].i
== AEABI_PCS_RW_data_SBrel
13067 && out_attr
[Tag_ABI_PCS_R9_use
].i
!= AEABI_R9_SB
13068 && out_attr
[Tag_ABI_PCS_R9_use
].i
!= AEABI_R9_unused
)
13071 (_("error: %B: SB relative addressing conflicts with use of R9"),
13075 /* Use the smallest value specified. */
13076 if (in_attr
[i
].i
< out_attr
[i
].i
)
13077 out_attr
[i
].i
= in_attr
[i
].i
;
13079 case Tag_ABI_PCS_wchar_t
:
13080 if (out_attr
[i
].i
&& in_attr
[i
].i
&& out_attr
[i
].i
!= in_attr
[i
].i
13081 && !elf_arm_tdata (obfd
)->no_wchar_size_warning
)
13084 (_("warning: %B uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
13085 ibfd
, in_attr
[i
].i
, out_attr
[i
].i
);
13087 else if (in_attr
[i
].i
&& !out_attr
[i
].i
)
13088 out_attr
[i
].i
= in_attr
[i
].i
;
13090 case Tag_ABI_enum_size
:
13091 if (in_attr
[i
].i
!= AEABI_enum_unused
)
13093 if (out_attr
[i
].i
== AEABI_enum_unused
13094 || out_attr
[i
].i
== AEABI_enum_forced_wide
)
13096 /* The existing object is compatible with anything.
13097 Use whatever requirements the new object has. */
13098 out_attr
[i
].i
= in_attr
[i
].i
;
13100 else if (in_attr
[i
].i
!= AEABI_enum_forced_wide
13101 && out_attr
[i
].i
!= in_attr
[i
].i
13102 && !elf_arm_tdata (obfd
)->no_enum_size_warning
)
13104 static const char *aeabi_enum_names
[] =
13105 { "", "variable-size", "32-bit", "" };
13106 const char *in_name
=
13107 in_attr
[i
].i
< ARRAY_SIZE(aeabi_enum_names
)
13108 ? aeabi_enum_names
[in_attr
[i
].i
]
13110 const char *out_name
=
13111 out_attr
[i
].i
< ARRAY_SIZE(aeabi_enum_names
)
13112 ? aeabi_enum_names
[out_attr
[i
].i
]
13115 (_("warning: %B uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
13116 ibfd
, in_name
, out_name
);
13120 case Tag_ABI_VFP_args
:
13123 case Tag_ABI_WMMX_args
:
13124 if (in_attr
[i
].i
!= out_attr
[i
].i
)
13127 (_("error: %B uses iWMMXt register arguments, %B does not"),
13132 case Tag_compatibility
:
13133 /* Merged in target-independent code. */
13135 case Tag_ABI_HardFP_use
:
13136 /* This is handled along with Tag_FP_arch. */
13138 case Tag_ABI_FP_16bit_format
:
13139 if (in_attr
[i
].i
!= 0 && out_attr
[i
].i
!= 0)
13141 if (in_attr
[i
].i
!= out_attr
[i
].i
)
13144 (_("error: fp16 format mismatch between %B and %B"),
13149 if (in_attr
[i
].i
!= 0)
13150 out_attr
[i
].i
= in_attr
[i
].i
;
13154 /* A value of zero on input means that the divide instruction may
13155 be used if available in the base architecture as specified via
13156 Tag_CPU_arch and Tag_CPU_arch_profile. A value of 1 means that
13157 the user did not want divide instructions. A value of 2
13158 explicitly means that divide instructions were allowed in ARM
13159 and Thumb state. */
13160 if (in_attr
[i
].i
== out_attr
[i
].i
)
13161 /* Do nothing. */ ;
13162 else if (elf32_arm_attributes_forbid_div (in_attr
)
13163 && !elf32_arm_attributes_accept_div (out_attr
))
13165 else if (elf32_arm_attributes_forbid_div (out_attr
)
13166 && elf32_arm_attributes_accept_div (in_attr
))
13167 out_attr
[i
].i
= in_attr
[i
].i
;
13168 else if (in_attr
[i
].i
== 2)
13169 out_attr
[i
].i
= in_attr
[i
].i
;
13172 case Tag_MPextension_use_legacy
:
13173 /* We don't output objects with Tag_MPextension_use_legacy - we
13174 move the value to Tag_MPextension_use. */
13175 if (in_attr
[i
].i
!= 0 && in_attr
[Tag_MPextension_use
].i
!= 0)
13177 if (in_attr
[Tag_MPextension_use
].i
!= in_attr
[i
].i
)
13180 (_("%B has has both the current and legacy "
13181 "Tag_MPextension_use attributes"),
13187 if (in_attr
[i
].i
> out_attr
[Tag_MPextension_use
].i
)
13188 out_attr
[Tag_MPextension_use
] = in_attr
[i
];
13192 case Tag_nodefaults
:
13193 /* This tag is set if it exists, but the value is unused (and is
13194 typically zero). We don't actually need to do anything here -
13195 the merge happens automatically when the type flags are merged
13198 case Tag_also_compatible_with
:
13199 /* Already done in Tag_CPU_arch. */
13201 case Tag_conformance
:
13202 /* Keep the attribute if it matches. Throw it away otherwise.
13203 No attribute means no claim to conform. */
13204 if (!in_attr
[i
].s
|| !out_attr
[i
].s
13205 || strcmp (in_attr
[i
].s
, out_attr
[i
].s
) != 0)
13206 out_attr
[i
].s
= NULL
;
13211 = result
&& _bfd_elf_merge_unknown_attribute_low (ibfd
, obfd
, i
);
13214 /* If out_attr was copied from in_attr then it won't have a type yet. */
13215 if (in_attr
[i
].type
&& !out_attr
[i
].type
)
13216 out_attr
[i
].type
= in_attr
[i
].type
;
13219 /* Merge Tag_compatibility attributes and any common GNU ones. */
13220 if (!_bfd_elf_merge_object_attributes (ibfd
, obfd
))
13223 /* Check for any attributes not known on ARM. */
13224 result
&= _bfd_elf_merge_unknown_attribute_list (ibfd
, obfd
);
13230 /* Return TRUE if the two EABI versions are incompatible. */
13233 elf32_arm_versions_compatible (unsigned iver
, unsigned over
)
13235 /* v4 and v5 are the same spec before and after it was released,
13236 so allow mixing them. */
13237 if ((iver
== EF_ARM_EABI_VER4
&& over
== EF_ARM_EABI_VER5
)
13238 || (iver
== EF_ARM_EABI_VER5
&& over
== EF_ARM_EABI_VER4
))
13241 return (iver
== over
);
13244 /* Merge backend specific data from an object file to the output
13245 object file when linking. */
13248 elf32_arm_merge_private_bfd_data (bfd
* ibfd
, bfd
* obfd
);
13250 /* Display the flags field. */
13253 elf32_arm_print_private_bfd_data (bfd
*abfd
, void * ptr
)
13255 FILE * file
= (FILE *) ptr
;
13256 unsigned long flags
;
13258 BFD_ASSERT (abfd
!= NULL
&& ptr
!= NULL
);
13260 /* Print normal ELF private data. */
13261 _bfd_elf_print_private_bfd_data (abfd
, ptr
);
13263 flags
= elf_elfheader (abfd
)->e_flags
;
13264 /* Ignore init flag - it may not be set, despite the flags field
13265 containing valid data. */
13267 /* xgettext:c-format */
13268 fprintf (file
, _("private flags = %lx:"), elf_elfheader (abfd
)->e_flags
);
13270 switch (EF_ARM_EABI_VERSION (flags
))
13272 case EF_ARM_EABI_UNKNOWN
:
13273 /* The following flag bits are GNU extensions and not part of the
13274 official ARM ELF extended ABI. Hence they are only decoded if
13275 the EABI version is not set. */
13276 if (flags
& EF_ARM_INTERWORK
)
13277 fprintf (file
, _(" [interworking enabled]"));
13279 if (flags
& EF_ARM_APCS_26
)
13280 fprintf (file
, " [APCS-26]");
13282 fprintf (file
, " [APCS-32]");
13284 if (flags
& EF_ARM_VFP_FLOAT
)
13285 fprintf (file
, _(" [VFP float format]"));
13286 else if (flags
& EF_ARM_MAVERICK_FLOAT
)
13287 fprintf (file
, _(" [Maverick float format]"));
13289 fprintf (file
, _(" [FPA float format]"));
13291 if (flags
& EF_ARM_APCS_FLOAT
)
13292 fprintf (file
, _(" [floats passed in float registers]"));
13294 if (flags
& EF_ARM_PIC
)
13295 fprintf (file
, _(" [position independent]"));
13297 if (flags
& EF_ARM_NEW_ABI
)
13298 fprintf (file
, _(" [new ABI]"));
13300 if (flags
& EF_ARM_OLD_ABI
)
13301 fprintf (file
, _(" [old ABI]"));
13303 if (flags
& EF_ARM_SOFT_FLOAT
)
13304 fprintf (file
, _(" [software FP]"));
13306 flags
&= ~(EF_ARM_INTERWORK
| EF_ARM_APCS_26
| EF_ARM_APCS_FLOAT
13307 | EF_ARM_PIC
| EF_ARM_NEW_ABI
| EF_ARM_OLD_ABI
13308 | EF_ARM_SOFT_FLOAT
| EF_ARM_VFP_FLOAT
13309 | EF_ARM_MAVERICK_FLOAT
);
13312 case EF_ARM_EABI_VER1
:
13313 fprintf (file
, _(" [Version1 EABI]"));
13315 if (flags
& EF_ARM_SYMSARESORTED
)
13316 fprintf (file
, _(" [sorted symbol table]"));
13318 fprintf (file
, _(" [unsorted symbol table]"));
13320 flags
&= ~ EF_ARM_SYMSARESORTED
;
13323 case EF_ARM_EABI_VER2
:
13324 fprintf (file
, _(" [Version2 EABI]"));
13326 if (flags
& EF_ARM_SYMSARESORTED
)
13327 fprintf (file
, _(" [sorted symbol table]"));
13329 fprintf (file
, _(" [unsorted symbol table]"));
13331 if (flags
& EF_ARM_DYNSYMSUSESEGIDX
)
13332 fprintf (file
, _(" [dynamic symbols use segment index]"));
13334 if (flags
& EF_ARM_MAPSYMSFIRST
)
13335 fprintf (file
, _(" [mapping symbols precede others]"));
13337 flags
&= ~(EF_ARM_SYMSARESORTED
| EF_ARM_DYNSYMSUSESEGIDX
13338 | EF_ARM_MAPSYMSFIRST
);
13341 case EF_ARM_EABI_VER3
:
13342 fprintf (file
, _(" [Version3 EABI]"));
13345 case EF_ARM_EABI_VER4
:
13346 fprintf (file
, _(" [Version4 EABI]"));
13349 case EF_ARM_EABI_VER5
:
13350 fprintf (file
, _(" [Version5 EABI]"));
13352 if (flags
& EF_ARM_ABI_FLOAT_SOFT
)
13353 fprintf (file
, _(" [soft-float ABI]"));
13355 if (flags
& EF_ARM_ABI_FLOAT_HARD
)
13356 fprintf (file
, _(" [hard-float ABI]"));
13358 flags
&= ~(EF_ARM_ABI_FLOAT_SOFT
| EF_ARM_ABI_FLOAT_HARD
);
13361 if (flags
& EF_ARM_BE8
)
13362 fprintf (file
, _(" [BE8]"));
13364 if (flags
& EF_ARM_LE8
)
13365 fprintf (file
, _(" [LE8]"));
13367 flags
&= ~(EF_ARM_LE8
| EF_ARM_BE8
);
13371 fprintf (file
, _(" <EABI version unrecognised>"));
13375 flags
&= ~ EF_ARM_EABIMASK
;
13377 if (flags
& EF_ARM_RELEXEC
)
13378 fprintf (file
, _(" [relocatable executable]"));
13380 flags
&= ~EF_ARM_RELEXEC
;
13383 fprintf (file
, _("<Unrecognised flag bits set>"));
13385 fputc ('\n', file
);
13391 elf32_arm_get_symbol_type (Elf_Internal_Sym
* elf_sym
, int type
)
13393 switch (ELF_ST_TYPE (elf_sym
->st_info
))
13395 case STT_ARM_TFUNC
:
13396 return ELF_ST_TYPE (elf_sym
->st_info
);
13398 case STT_ARM_16BIT
:
13399 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
13400 This allows us to distinguish between data used by Thumb instructions
13401 and non-data (which is probably code) inside Thumb regions of an
13403 if (type
!= STT_OBJECT
&& type
!= STT_TLS
)
13404 return ELF_ST_TYPE (elf_sym
->st_info
);
13415 elf32_arm_gc_mark_hook (asection
*sec
,
13416 struct bfd_link_info
*info
,
13417 Elf_Internal_Rela
*rel
,
13418 struct elf_link_hash_entry
*h
,
13419 Elf_Internal_Sym
*sym
)
13422 switch (ELF32_R_TYPE (rel
->r_info
))
13424 case R_ARM_GNU_VTINHERIT
:
13425 case R_ARM_GNU_VTENTRY
:
13429 return _bfd_elf_gc_mark_hook (sec
, info
, rel
, h
, sym
);
13432 /* Update the got entry reference counts for the section being removed. */
13435 elf32_arm_gc_sweep_hook (bfd
* abfd
,
13436 struct bfd_link_info
* info
,
13438 const Elf_Internal_Rela
* relocs
)
13440 Elf_Internal_Shdr
*symtab_hdr
;
13441 struct elf_link_hash_entry
**sym_hashes
;
13442 bfd_signed_vma
*local_got_refcounts
;
13443 const Elf_Internal_Rela
*rel
, *relend
;
13444 struct elf32_arm_link_hash_table
* globals
;
13446 if (bfd_link_relocatable (info
))
13449 globals
= elf32_arm_hash_table (info
);
13450 if (globals
== NULL
)
13453 elf_section_data (sec
)->local_dynrel
= NULL
;
13455 symtab_hdr
= & elf_symtab_hdr (abfd
);
13456 sym_hashes
= elf_sym_hashes (abfd
);
13457 local_got_refcounts
= elf_local_got_refcounts (abfd
);
13459 check_use_blx (globals
);
13461 relend
= relocs
+ sec
->reloc_count
;
13462 for (rel
= relocs
; rel
< relend
; rel
++)
13464 unsigned long r_symndx
;
13465 struct elf_link_hash_entry
*h
= NULL
;
13466 struct elf32_arm_link_hash_entry
*eh
;
13468 bfd_boolean call_reloc_p
;
13469 bfd_boolean may_become_dynamic_p
;
13470 bfd_boolean may_need_local_target_p
;
13471 union gotplt_union
*root_plt
;
13472 struct arm_plt_info
*arm_plt
;
13474 r_symndx
= ELF32_R_SYM (rel
->r_info
);
13475 if (r_symndx
>= symtab_hdr
->sh_info
)
13477 h
= sym_hashes
[r_symndx
- symtab_hdr
->sh_info
];
13478 while (h
->root
.type
== bfd_link_hash_indirect
13479 || h
->root
.type
== bfd_link_hash_warning
)
13480 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
13482 eh
= (struct elf32_arm_link_hash_entry
*) h
;
13484 call_reloc_p
= FALSE
;
13485 may_become_dynamic_p
= FALSE
;
13486 may_need_local_target_p
= FALSE
;
13488 r_type
= ELF32_R_TYPE (rel
->r_info
);
13489 r_type
= arm_real_reloc_type (globals
, r_type
);
13493 case R_ARM_GOT_PREL
:
13494 case R_ARM_TLS_GD32
:
13495 case R_ARM_TLS_IE32
:
13498 if (h
->got
.refcount
> 0)
13499 h
->got
.refcount
-= 1;
13501 else if (local_got_refcounts
!= NULL
)
13503 if (local_got_refcounts
[r_symndx
] > 0)
13504 local_got_refcounts
[r_symndx
] -= 1;
13508 case R_ARM_TLS_LDM32
:
13509 globals
->tls_ldm_got
.refcount
-= 1;
13517 case R_ARM_THM_CALL
:
13518 case R_ARM_THM_JUMP24
:
13519 case R_ARM_THM_JUMP19
:
13520 call_reloc_p
= TRUE
;
13521 may_need_local_target_p
= TRUE
;
13525 if (!globals
->vxworks_p
)
13527 may_need_local_target_p
= TRUE
;
13530 /* Fall through. */
13532 case R_ARM_ABS32_NOI
:
13534 case R_ARM_REL32_NOI
:
13535 case R_ARM_MOVW_ABS_NC
:
13536 case R_ARM_MOVT_ABS
:
13537 case R_ARM_MOVW_PREL_NC
:
13538 case R_ARM_MOVT_PREL
:
13539 case R_ARM_THM_MOVW_ABS_NC
:
13540 case R_ARM_THM_MOVT_ABS
:
13541 case R_ARM_THM_MOVW_PREL_NC
:
13542 case R_ARM_THM_MOVT_PREL
:
13543 /* Should the interworking branches be here also? */
13544 if ((bfd_link_pic (info
) || globals
->root
.is_relocatable_executable
)
13545 && (sec
->flags
& SEC_ALLOC
) != 0)
13548 && elf32_arm_howto_from_type (r_type
)->pc_relative
)
13550 call_reloc_p
= TRUE
;
13551 may_need_local_target_p
= TRUE
;
13554 may_become_dynamic_p
= TRUE
;
13557 may_need_local_target_p
= TRUE
;
13564 if (may_need_local_target_p
13565 && elf32_arm_get_plt_info (abfd
, eh
, r_symndx
, &root_plt
, &arm_plt
))
13567 /* If PLT refcount book-keeping is wrong and too low, we'll
13568 see a zero value (going to -1) for the root PLT reference
13570 if (root_plt
->refcount
>= 0)
13572 BFD_ASSERT (root_plt
->refcount
!= 0);
13573 root_plt
->refcount
-= 1;
13576 /* A value of -1 means the symbol has become local, forced
13577 or seeing a hidden definition. Any other negative value
13579 BFD_ASSERT (root_plt
->refcount
== -1);
13582 arm_plt
->noncall_refcount
--;
13584 if (r_type
== R_ARM_THM_CALL
)
13585 arm_plt
->maybe_thumb_refcount
--;
13587 if (r_type
== R_ARM_THM_JUMP24
13588 || r_type
== R_ARM_THM_JUMP19
)
13589 arm_plt
->thumb_refcount
--;
13592 if (may_become_dynamic_p
)
13594 struct elf_dyn_relocs
**pp
;
13595 struct elf_dyn_relocs
*p
;
13598 pp
= &(eh
->dyn_relocs
);
13601 Elf_Internal_Sym
*isym
;
13603 isym
= bfd_sym_from_r_symndx (&globals
->sym_cache
,
13607 pp
= elf32_arm_get_local_dynreloc_list (abfd
, r_symndx
, isym
);
13611 for (; (p
= *pp
) != NULL
; pp
= &p
->next
)
13614 /* Everything must go for SEC. */
13624 /* Look through the relocs for a section during the first phase. */
13627 elf32_arm_check_relocs (bfd
*abfd
, struct bfd_link_info
*info
,
13628 asection
*sec
, const Elf_Internal_Rela
*relocs
)
13630 Elf_Internal_Shdr
*symtab_hdr
;
13631 struct elf_link_hash_entry
**sym_hashes
;
13632 const Elf_Internal_Rela
*rel
;
13633 const Elf_Internal_Rela
*rel_end
;
13636 struct elf32_arm_link_hash_table
*htab
;
13637 bfd_boolean call_reloc_p
;
13638 bfd_boolean may_become_dynamic_p
;
13639 bfd_boolean may_need_local_target_p
;
13640 unsigned long nsyms
;
13642 if (bfd_link_relocatable (info
))
13645 BFD_ASSERT (is_arm_elf (abfd
));
13647 htab
= elf32_arm_hash_table (info
);
13653 /* Create dynamic sections for relocatable executables so that we can
13654 copy relocations. */
13655 if (htab
->root
.is_relocatable_executable
13656 && ! htab
->root
.dynamic_sections_created
)
13658 if (! _bfd_elf_link_create_dynamic_sections (abfd
, info
))
13662 if (htab
->root
.dynobj
== NULL
)
13663 htab
->root
.dynobj
= abfd
;
13664 if (!create_ifunc_sections (info
))
13667 dynobj
= htab
->root
.dynobj
;
13669 symtab_hdr
= & elf_symtab_hdr (abfd
);
13670 sym_hashes
= elf_sym_hashes (abfd
);
13671 nsyms
= NUM_SHDR_ENTRIES (symtab_hdr
);
13673 rel_end
= relocs
+ sec
->reloc_count
;
13674 for (rel
= relocs
; rel
< rel_end
; rel
++)
13676 Elf_Internal_Sym
*isym
;
13677 struct elf_link_hash_entry
*h
;
13678 struct elf32_arm_link_hash_entry
*eh
;
13679 unsigned long r_symndx
;
13682 r_symndx
= ELF32_R_SYM (rel
->r_info
);
13683 r_type
= ELF32_R_TYPE (rel
->r_info
);
13684 r_type
= arm_real_reloc_type (htab
, r_type
);
13686 if (r_symndx
>= nsyms
13687 /* PR 9934: It is possible to have relocations that do not
13688 refer to symbols, thus it is also possible to have an
13689 object file containing relocations but no symbol table. */
13690 && (r_symndx
> STN_UNDEF
|| nsyms
> 0))
13692 (*_bfd_error_handler
) (_("%B: bad symbol index: %d"), abfd
,
13701 if (r_symndx
< symtab_hdr
->sh_info
)
13703 /* A local symbol. */
13704 isym
= bfd_sym_from_r_symndx (&htab
->sym_cache
,
13711 h
= sym_hashes
[r_symndx
- symtab_hdr
->sh_info
];
13712 while (h
->root
.type
== bfd_link_hash_indirect
13713 || h
->root
.type
== bfd_link_hash_warning
)
13714 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
13716 /* PR15323, ref flags aren't set for references in the
13718 h
->root
.non_ir_ref
= 1;
13722 eh
= (struct elf32_arm_link_hash_entry
*) h
;
13724 call_reloc_p
= FALSE
;
13725 may_become_dynamic_p
= FALSE
;
13726 may_need_local_target_p
= FALSE
;
13728 /* Could be done earlier, if h were already available. */
13729 r_type
= elf32_arm_tls_transition (info
, r_type
, h
);
13733 case R_ARM_GOT_PREL
:
13734 case R_ARM_TLS_GD32
:
13735 case R_ARM_TLS_IE32
:
13736 case R_ARM_TLS_GOTDESC
:
13737 case R_ARM_TLS_DESCSEQ
:
13738 case R_ARM_THM_TLS_DESCSEQ
:
13739 case R_ARM_TLS_CALL
:
13740 case R_ARM_THM_TLS_CALL
:
13741 /* This symbol requires a global offset table entry. */
13743 int tls_type
, old_tls_type
;
13747 case R_ARM_TLS_GD32
: tls_type
= GOT_TLS_GD
; break;
13749 case R_ARM_TLS_IE32
: tls_type
= GOT_TLS_IE
; break;
13751 case R_ARM_TLS_GOTDESC
:
13752 case R_ARM_TLS_CALL
: case R_ARM_THM_TLS_CALL
:
13753 case R_ARM_TLS_DESCSEQ
: case R_ARM_THM_TLS_DESCSEQ
:
13754 tls_type
= GOT_TLS_GDESC
; break;
13756 default: tls_type
= GOT_NORMAL
; break;
13759 if (!bfd_link_executable (info
) && (tls_type
& GOT_TLS_IE
))
13760 info
->flags
|= DF_STATIC_TLS
;
13765 old_tls_type
= elf32_arm_hash_entry (h
)->tls_type
;
13769 /* This is a global offset table entry for a local symbol. */
13770 if (!elf32_arm_allocate_local_sym_info (abfd
))
13772 elf_local_got_refcounts (abfd
)[r_symndx
] += 1;
13773 old_tls_type
= elf32_arm_local_got_tls_type (abfd
) [r_symndx
];
13776 /* If a variable is accessed with both tls methods, two
13777 slots may be created. */
13778 if (GOT_TLS_GD_ANY_P (old_tls_type
)
13779 && GOT_TLS_GD_ANY_P (tls_type
))
13780 tls_type
|= old_tls_type
;
13782 /* We will already have issued an error message if there
13783 is a TLS/non-TLS mismatch, based on the symbol
13784 type. So just combine any TLS types needed. */
13785 if (old_tls_type
!= GOT_UNKNOWN
&& old_tls_type
!= GOT_NORMAL
13786 && tls_type
!= GOT_NORMAL
)
13787 tls_type
|= old_tls_type
;
13789 /* If the symbol is accessed in both IE and GDESC
13790 method, we're able to relax. Turn off the GDESC flag,
13791 without messing up with any other kind of tls types
13792 that may be involved. */
13793 if ((tls_type
& GOT_TLS_IE
) && (tls_type
& GOT_TLS_GDESC
))
13794 tls_type
&= ~GOT_TLS_GDESC
;
13796 if (old_tls_type
!= tls_type
)
13799 elf32_arm_hash_entry (h
)->tls_type
= tls_type
;
13801 elf32_arm_local_got_tls_type (abfd
) [r_symndx
] = tls_type
;
13804 /* Fall through. */
13806 case R_ARM_TLS_LDM32
:
13807 if (r_type
== R_ARM_TLS_LDM32
)
13808 htab
->tls_ldm_got
.refcount
++;
13809 /* Fall through. */
13811 case R_ARM_GOTOFF32
:
13813 if (htab
->root
.sgot
== NULL
13814 && !create_got_section (htab
->root
.dynobj
, info
))
13823 case R_ARM_THM_CALL
:
13824 case R_ARM_THM_JUMP24
:
13825 case R_ARM_THM_JUMP19
:
13826 call_reloc_p
= TRUE
;
13827 may_need_local_target_p
= TRUE
;
13831 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
13832 ldr __GOTT_INDEX__ offsets. */
13833 if (!htab
->vxworks_p
)
13835 may_need_local_target_p
= TRUE
;
13838 else goto jump_over
;
13840 /* Fall through. */
13842 case R_ARM_MOVW_ABS_NC
:
13843 case R_ARM_MOVT_ABS
:
13844 case R_ARM_THM_MOVW_ABS_NC
:
13845 case R_ARM_THM_MOVT_ABS
:
13846 if (bfd_link_pic (info
))
13848 (*_bfd_error_handler
)
13849 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
13850 abfd
, elf32_arm_howto_table_1
[r_type
].name
,
13851 (h
) ? h
->root
.root
.string
: "a local symbol");
13852 bfd_set_error (bfd_error_bad_value
);
13856 /* Fall through. */
13858 case R_ARM_ABS32_NOI
:
13860 if (h
!= NULL
&& bfd_link_executable (info
))
13862 h
->pointer_equality_needed
= 1;
13864 /* Fall through. */
13866 case R_ARM_REL32_NOI
:
13867 case R_ARM_MOVW_PREL_NC
:
13868 case R_ARM_MOVT_PREL
:
13869 case R_ARM_THM_MOVW_PREL_NC
:
13870 case R_ARM_THM_MOVT_PREL
:
13872 /* Should the interworking branches be listed here? */
13873 if ((bfd_link_pic (info
) || htab
->root
.is_relocatable_executable
)
13874 && (sec
->flags
& SEC_ALLOC
) != 0)
13877 && elf32_arm_howto_from_type (r_type
)->pc_relative
)
13879 /* In shared libraries and relocatable executables,
13880 we treat local relative references as calls;
13881 see the related SYMBOL_CALLS_LOCAL code in
13882 allocate_dynrelocs. */
13883 call_reloc_p
= TRUE
;
13884 may_need_local_target_p
= TRUE
;
13887 /* We are creating a shared library or relocatable
13888 executable, and this is a reloc against a global symbol,
13889 or a non-PC-relative reloc against a local symbol.
13890 We may need to copy the reloc into the output. */
13891 may_become_dynamic_p
= TRUE
;
13894 may_need_local_target_p
= TRUE
;
13897 /* This relocation describes the C++ object vtable hierarchy.
13898 Reconstruct it for later use during GC. */
13899 case R_ARM_GNU_VTINHERIT
:
13900 if (!bfd_elf_gc_record_vtinherit (abfd
, sec
, h
, rel
->r_offset
))
13904 /* This relocation describes which C++ vtable entries are actually
13905 used. Record for later use during GC. */
13906 case R_ARM_GNU_VTENTRY
:
13907 BFD_ASSERT (h
!= NULL
);
13909 && !bfd_elf_gc_record_vtentry (abfd
, sec
, h
, rel
->r_offset
))
13917 /* We may need a .plt entry if the function this reloc
13918 refers to is in a different object, regardless of the
13919 symbol's type. We can't tell for sure yet, because
13920 something later might force the symbol local. */
13922 else if (may_need_local_target_p
)
13923 /* If this reloc is in a read-only section, we might
13924 need a copy reloc. We can't check reliably at this
13925 stage whether the section is read-only, as input
13926 sections have not yet been mapped to output sections.
13927 Tentatively set the flag for now, and correct in
13928 adjust_dynamic_symbol. */
13929 h
->non_got_ref
= 1;
13932 if (may_need_local_target_p
13933 && (h
!= NULL
|| ELF32_ST_TYPE (isym
->st_info
) == STT_GNU_IFUNC
))
13935 union gotplt_union
*root_plt
;
13936 struct arm_plt_info
*arm_plt
;
13937 struct arm_local_iplt_info
*local_iplt
;
13941 root_plt
= &h
->plt
;
13942 arm_plt
= &eh
->plt
;
13946 local_iplt
= elf32_arm_create_local_iplt (abfd
, r_symndx
);
13947 if (local_iplt
== NULL
)
13949 root_plt
= &local_iplt
->root
;
13950 arm_plt
= &local_iplt
->arm
;
13953 /* If the symbol is a function that doesn't bind locally,
13954 this relocation will need a PLT entry. */
13955 if (root_plt
->refcount
!= -1)
13956 root_plt
->refcount
+= 1;
13959 arm_plt
->noncall_refcount
++;
13961 /* It's too early to use htab->use_blx here, so we have to
13962 record possible blx references separately from
13963 relocs that definitely need a thumb stub. */
13965 if (r_type
== R_ARM_THM_CALL
)
13966 arm_plt
->maybe_thumb_refcount
+= 1;
13968 if (r_type
== R_ARM_THM_JUMP24
13969 || r_type
== R_ARM_THM_JUMP19
)
13970 arm_plt
->thumb_refcount
+= 1;
13973 if (may_become_dynamic_p
)
13975 struct elf_dyn_relocs
*p
, **head
;
13977 /* Create a reloc section in dynobj. */
13978 if (sreloc
== NULL
)
13980 sreloc
= _bfd_elf_make_dynamic_reloc_section
13981 (sec
, dynobj
, 2, abfd
, ! htab
->use_rel
);
13983 if (sreloc
== NULL
)
13986 /* BPABI objects never have dynamic relocations mapped. */
13987 if (htab
->symbian_p
)
13991 flags
= bfd_get_section_flags (dynobj
, sreloc
);
13992 flags
&= ~(SEC_LOAD
| SEC_ALLOC
);
13993 bfd_set_section_flags (dynobj
, sreloc
, flags
);
13997 /* If this is a global symbol, count the number of
13998 relocations we need for this symbol. */
14000 head
= &((struct elf32_arm_link_hash_entry
*) h
)->dyn_relocs
;
14003 head
= elf32_arm_get_local_dynreloc_list (abfd
, r_symndx
, isym
);
14009 if (p
== NULL
|| p
->sec
!= sec
)
14011 bfd_size_type amt
= sizeof *p
;
14013 p
= (struct elf_dyn_relocs
*) bfd_alloc (htab
->root
.dynobj
, amt
);
14023 if (elf32_arm_howto_from_type (r_type
)->pc_relative
)
14032 /* Unwinding tables are not referenced directly. This pass marks them as
14033 required if the corresponding code section is marked. */
14036 elf32_arm_gc_mark_extra_sections (struct bfd_link_info
*info
,
14037 elf_gc_mark_hook_fn gc_mark_hook
)
14040 Elf_Internal_Shdr
**elf_shdrp
;
14043 _bfd_elf_gc_mark_extra_sections (info
, gc_mark_hook
);
14045 /* Marking EH data may cause additional code sections to be marked,
14046 requiring multiple passes. */
14051 for (sub
= info
->input_bfds
; sub
!= NULL
; sub
= sub
->link
.next
)
14055 if (! is_arm_elf (sub
))
14058 elf_shdrp
= elf_elfsections (sub
);
14059 for (o
= sub
->sections
; o
!= NULL
; o
= o
->next
)
14061 Elf_Internal_Shdr
*hdr
;
14063 hdr
= &elf_section_data (o
)->this_hdr
;
14064 if (hdr
->sh_type
== SHT_ARM_EXIDX
14066 && hdr
->sh_link
< elf_numsections (sub
)
14068 && elf_shdrp
[hdr
->sh_link
]->bfd_section
->gc_mark
)
14071 if (!_bfd_elf_gc_mark (info
, o
, gc_mark_hook
))
14081 /* Treat mapping symbols as special target symbols. */
14084 elf32_arm_is_target_special_symbol (bfd
* abfd ATTRIBUTE_UNUSED
, asymbol
* sym
)
14086 return bfd_is_arm_special_symbol_name (sym
->name
,
14087 BFD_ARM_SPECIAL_SYM_TYPE_ANY
);
14090 /* This is a copy of elf_find_function() from elf.c except that
14091 ARM mapping symbols are ignored when looking for function names
14092 and STT_ARM_TFUNC is considered to a function type. */
14095 arm_elf_find_function (bfd
* abfd ATTRIBUTE_UNUSED
,
14096 asymbol
** symbols
,
14097 asection
* section
,
14099 const char ** filename_ptr
,
14100 const char ** functionname_ptr
)
14102 const char * filename
= NULL
;
14103 asymbol
* func
= NULL
;
14104 bfd_vma low_func
= 0;
14107 for (p
= symbols
; *p
!= NULL
; p
++)
14109 elf_symbol_type
*q
;
14111 q
= (elf_symbol_type
*) *p
;
14113 switch (ELF_ST_TYPE (q
->internal_elf_sym
.st_info
))
14118 filename
= bfd_asymbol_name (&q
->symbol
);
14121 case STT_ARM_TFUNC
:
14123 /* Skip mapping symbols. */
14124 if ((q
->symbol
.flags
& BSF_LOCAL
)
14125 && bfd_is_arm_special_symbol_name (q
->symbol
.name
,
14126 BFD_ARM_SPECIAL_SYM_TYPE_ANY
))
14128 /* Fall through. */
14129 if (bfd_get_section (&q
->symbol
) == section
14130 && q
->symbol
.value
>= low_func
14131 && q
->symbol
.value
<= offset
)
14133 func
= (asymbol
*) q
;
14134 low_func
= q
->symbol
.value
;
14144 *filename_ptr
= filename
;
14145 if (functionname_ptr
)
14146 *functionname_ptr
= bfd_asymbol_name (func
);
14152 /* Find the nearest line to a particular section and offset, for error
14153 reporting. This code is a duplicate of the code in elf.c, except
14154 that it uses arm_elf_find_function. */
14157 elf32_arm_find_nearest_line (bfd
* abfd
,
14158 asymbol
** symbols
,
14159 asection
* section
,
14161 const char ** filename_ptr
,
14162 const char ** functionname_ptr
,
14163 unsigned int * line_ptr
,
14164 unsigned int * discriminator_ptr
)
14166 bfd_boolean found
= FALSE
;
14168 if (_bfd_dwarf2_find_nearest_line (abfd
, symbols
, NULL
, section
, offset
,
14169 filename_ptr
, functionname_ptr
,
14170 line_ptr
, discriminator_ptr
,
14171 dwarf_debug_sections
, 0,
14172 & elf_tdata (abfd
)->dwarf2_find_line_info
))
14174 if (!*functionname_ptr
)
14175 arm_elf_find_function (abfd
, symbols
, section
, offset
,
14176 *filename_ptr
? NULL
: filename_ptr
,
14182 /* Skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain
14185 if (! _bfd_stab_section_find_nearest_line (abfd
, symbols
, section
, offset
,
14186 & found
, filename_ptr
,
14187 functionname_ptr
, line_ptr
,
14188 & elf_tdata (abfd
)->line_info
))
14191 if (found
&& (*functionname_ptr
|| *line_ptr
))
14194 if (symbols
== NULL
)
14197 if (! arm_elf_find_function (abfd
, symbols
, section
, offset
,
14198 filename_ptr
, functionname_ptr
))
14206 elf32_arm_find_inliner_info (bfd
* abfd
,
14207 const char ** filename_ptr
,
14208 const char ** functionname_ptr
,
14209 unsigned int * line_ptr
)
14212 found
= _bfd_dwarf2_find_inliner_info (abfd
, filename_ptr
,
14213 functionname_ptr
, line_ptr
,
14214 & elf_tdata (abfd
)->dwarf2_find_line_info
);
14218 /* Adjust a symbol defined by a dynamic object and referenced by a
14219 regular object. The current definition is in some section of the
14220 dynamic object, but we're not including those sections. We have to
14221 change the definition to something the rest of the link can
14225 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info
* info
,
14226 struct elf_link_hash_entry
* h
)
14230 struct elf32_arm_link_hash_entry
* eh
;
14231 struct elf32_arm_link_hash_table
*globals
;
14233 globals
= elf32_arm_hash_table (info
);
14234 if (globals
== NULL
)
14237 dynobj
= elf_hash_table (info
)->dynobj
;
14239 /* Make sure we know what is going on here. */
14240 BFD_ASSERT (dynobj
!= NULL
14242 || h
->type
== STT_GNU_IFUNC
14243 || h
->u
.weakdef
!= NULL
14246 && !h
->def_regular
)));
14248 eh
= (struct elf32_arm_link_hash_entry
*) h
;
14250 /* If this is a function, put it in the procedure linkage table. We
14251 will fill in the contents of the procedure linkage table later,
14252 when we know the address of the .got section. */
14253 if (h
->type
== STT_FUNC
|| h
->type
== STT_GNU_IFUNC
|| h
->needs_plt
)
14255 /* Calls to STT_GNU_IFUNC symbols always use a PLT, even if the
14256 symbol binds locally. */
14257 if (h
->plt
.refcount
<= 0
14258 || (h
->type
!= STT_GNU_IFUNC
14259 && (SYMBOL_CALLS_LOCAL (info
, h
)
14260 || (ELF_ST_VISIBILITY (h
->other
) != STV_DEFAULT
14261 && h
->root
.type
== bfd_link_hash_undefweak
))))
14263 /* This case can occur if we saw a PLT32 reloc in an input
14264 file, but the symbol was never referred to by a dynamic
14265 object, or if all references were garbage collected. In
14266 such a case, we don't actually need to build a procedure
14267 linkage table, and we can just do a PC24 reloc instead. */
14268 h
->plt
.offset
= (bfd_vma
) -1;
14269 eh
->plt
.thumb_refcount
= 0;
14270 eh
->plt
.maybe_thumb_refcount
= 0;
14271 eh
->plt
.noncall_refcount
= 0;
14279 /* It's possible that we incorrectly decided a .plt reloc was
14280 needed for an R_ARM_PC24 or similar reloc to a non-function sym
14281 in check_relocs. We can't decide accurately between function
14282 and non-function syms in check-relocs; Objects loaded later in
14283 the link may change h->type. So fix it now. */
14284 h
->plt
.offset
= (bfd_vma
) -1;
14285 eh
->plt
.thumb_refcount
= 0;
14286 eh
->plt
.maybe_thumb_refcount
= 0;
14287 eh
->plt
.noncall_refcount
= 0;
14290 /* If this is a weak symbol, and there is a real definition, the
14291 processor independent code will have arranged for us to see the
14292 real definition first, and we can just use the same value. */
14293 if (h
->u
.weakdef
!= NULL
)
14295 BFD_ASSERT (h
->u
.weakdef
->root
.type
== bfd_link_hash_defined
14296 || h
->u
.weakdef
->root
.type
== bfd_link_hash_defweak
);
14297 h
->root
.u
.def
.section
= h
->u
.weakdef
->root
.u
.def
.section
;
14298 h
->root
.u
.def
.value
= h
->u
.weakdef
->root
.u
.def
.value
;
14302 /* If there are no non-GOT references, we do not need a copy
14304 if (!h
->non_got_ref
)
14307 /* This is a reference to a symbol defined by a dynamic object which
14308 is not a function. */
14310 /* If we are creating a shared library, we must presume that the
14311 only references to the symbol are via the global offset table.
14312 For such cases we need not do anything here; the relocations will
14313 be handled correctly by relocate_section. Relocatable executables
14314 can reference data in shared objects directly, so we don't need to
14315 do anything here. */
14316 if (bfd_link_pic (info
) || globals
->root
.is_relocatable_executable
)
14319 /* We must allocate the symbol in our .dynbss section, which will
14320 become part of the .bss section of the executable. There will be
14321 an entry for this symbol in the .dynsym section. The dynamic
14322 object will contain position independent code, so all references
14323 from the dynamic object to this symbol will go through the global
14324 offset table. The dynamic linker will use the .dynsym entry to
14325 determine the address it must put in the global offset table, so
14326 both the dynamic object and the regular object will refer to the
14327 same memory location for the variable. */
14328 s
= bfd_get_linker_section (dynobj
, ".dynbss");
14329 BFD_ASSERT (s
!= NULL
);
14331 /* If allowed, we must generate a R_ARM_COPY reloc to tell the dynamic
14332 linker to copy the initial value out of the dynamic object and into
14333 the runtime process image. We need to remember the offset into the
14334 .rel(a).bss section we are going to use. */
14335 if (info
->nocopyreloc
== 0
14336 && (h
->root
.u
.def
.section
->flags
& SEC_ALLOC
) != 0
14341 srel
= bfd_get_linker_section (dynobj
, RELOC_SECTION (globals
, ".bss"));
14342 elf32_arm_allocate_dynrelocs (info
, srel
, 1);
14346 return _bfd_elf_adjust_dynamic_copy (info
, h
, s
);
14349 /* Allocate space in .plt, .got and associated reloc sections for
14353 allocate_dynrelocs_for_symbol (struct elf_link_hash_entry
*h
, void * inf
)
14355 struct bfd_link_info
*info
;
14356 struct elf32_arm_link_hash_table
*htab
;
14357 struct elf32_arm_link_hash_entry
*eh
;
14358 struct elf_dyn_relocs
*p
;
14360 if (h
->root
.type
== bfd_link_hash_indirect
)
14363 eh
= (struct elf32_arm_link_hash_entry
*) h
;
14365 info
= (struct bfd_link_info
*) inf
;
14366 htab
= elf32_arm_hash_table (info
);
14370 if ((htab
->root
.dynamic_sections_created
|| h
->type
== STT_GNU_IFUNC
)
14371 && h
->plt
.refcount
> 0)
14373 /* Make sure this symbol is output as a dynamic symbol.
14374 Undefined weak syms won't yet be marked as dynamic. */
14375 if (h
->dynindx
== -1
14376 && !h
->forced_local
)
14378 if (! bfd_elf_link_record_dynamic_symbol (info
, h
))
14382 /* If the call in the PLT entry binds locally, the associated
14383 GOT entry should use an R_ARM_IRELATIVE relocation instead of
14384 the usual R_ARM_JUMP_SLOT. Put it in the .iplt section rather
14385 than the .plt section. */
14386 if (h
->type
== STT_GNU_IFUNC
&& SYMBOL_CALLS_LOCAL (info
, h
))
14389 if (eh
->plt
.noncall_refcount
== 0
14390 && SYMBOL_REFERENCES_LOCAL (info
, h
))
14391 /* All non-call references can be resolved directly.
14392 This means that they can (and in some cases, must)
14393 resolve directly to the run-time target, rather than
14394 to the PLT. That in turns means that any .got entry
14395 would be equal to the .igot.plt entry, so there's
14396 no point having both. */
14397 h
->got
.refcount
= 0;
14400 if (bfd_link_pic (info
)
14402 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h
))
14404 elf32_arm_allocate_plt_entry (info
, eh
->is_iplt
, &h
->plt
, &eh
->plt
);
14406 /* If this symbol is not defined in a regular file, and we are
14407 not generating a shared library, then set the symbol to this
14408 location in the .plt. This is required to make function
14409 pointers compare as equal between the normal executable and
14410 the shared library. */
14411 if (! bfd_link_pic (info
)
14412 && !h
->def_regular
)
14414 h
->root
.u
.def
.section
= htab
->root
.splt
;
14415 h
->root
.u
.def
.value
= h
->plt
.offset
;
14417 /* Make sure the function is not marked as Thumb, in case
14418 it is the target of an ABS32 relocation, which will
14419 point to the PLT entry. */
14420 ARM_SET_SYM_BRANCH_TYPE (h
->target_internal
, ST_BRANCH_TO_ARM
);
14423 /* VxWorks executables have a second set of relocations for
14424 each PLT entry. They go in a separate relocation section,
14425 which is processed by the kernel loader. */
14426 if (htab
->vxworks_p
&& !bfd_link_pic (info
))
14428 /* There is a relocation for the initial PLT entry:
14429 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
14430 if (h
->plt
.offset
== htab
->plt_header_size
)
14431 elf32_arm_allocate_dynrelocs (info
, htab
->srelplt2
, 1);
14433 /* There are two extra relocations for each subsequent
14434 PLT entry: an R_ARM_32 relocation for the GOT entry,
14435 and an R_ARM_32 relocation for the PLT entry. */
14436 elf32_arm_allocate_dynrelocs (info
, htab
->srelplt2
, 2);
14441 h
->plt
.offset
= (bfd_vma
) -1;
14447 h
->plt
.offset
= (bfd_vma
) -1;
14451 eh
= (struct elf32_arm_link_hash_entry
*) h
;
14452 eh
->tlsdesc_got
= (bfd_vma
) -1;
14454 if (h
->got
.refcount
> 0)
14458 int tls_type
= elf32_arm_hash_entry (h
)->tls_type
;
14461 /* Make sure this symbol is output as a dynamic symbol.
14462 Undefined weak syms won't yet be marked as dynamic. */
14463 if (h
->dynindx
== -1
14464 && !h
->forced_local
)
14466 if (! bfd_elf_link_record_dynamic_symbol (info
, h
))
14470 if (!htab
->symbian_p
)
14472 s
= htab
->root
.sgot
;
14473 h
->got
.offset
= s
->size
;
14475 if (tls_type
== GOT_UNKNOWN
)
14478 if (tls_type
== GOT_NORMAL
)
14479 /* Non-TLS symbols need one GOT slot. */
14483 if (tls_type
& GOT_TLS_GDESC
)
14485 /* R_ARM_TLS_DESC needs 2 GOT slots. */
14487 = (htab
->root
.sgotplt
->size
14488 - elf32_arm_compute_jump_table_size (htab
));
14489 htab
->root
.sgotplt
->size
+= 8;
14490 h
->got
.offset
= (bfd_vma
) -2;
14491 /* plt.got_offset needs to know there's a TLS_DESC
14492 reloc in the middle of .got.plt. */
14493 htab
->num_tls_desc
++;
14496 if (tls_type
& GOT_TLS_GD
)
14498 /* R_ARM_TLS_GD32 needs 2 consecutive GOT slots. If
14499 the symbol is both GD and GDESC, got.offset may
14500 have been overwritten. */
14501 h
->got
.offset
= s
->size
;
14505 if (tls_type
& GOT_TLS_IE
)
14506 /* R_ARM_TLS_IE32 needs one GOT slot. */
14510 dyn
= htab
->root
.dynamic_sections_created
;
14513 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn
,
14514 bfd_link_pic (info
),
14516 && (!bfd_link_pic (info
)
14517 || !SYMBOL_REFERENCES_LOCAL (info
, h
)))
14520 if (tls_type
!= GOT_NORMAL
14521 && (bfd_link_pic (info
) || indx
!= 0)
14522 && (ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
14523 || h
->root
.type
!= bfd_link_hash_undefweak
))
14525 if (tls_type
& GOT_TLS_IE
)
14526 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
14528 if (tls_type
& GOT_TLS_GD
)
14529 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
14531 if (tls_type
& GOT_TLS_GDESC
)
14533 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelplt
, 1);
14534 /* GDESC needs a trampoline to jump to. */
14535 htab
->tls_trampoline
= -1;
14538 /* Only GD needs it. GDESC just emits one relocation per
14540 if ((tls_type
& GOT_TLS_GD
) && indx
!= 0)
14541 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
14543 else if (indx
!= -1 && !SYMBOL_REFERENCES_LOCAL (info
, h
))
14545 if (htab
->root
.dynamic_sections_created
)
14546 /* Reserve room for the GOT entry's R_ARM_GLOB_DAT relocation. */
14547 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
14549 else if (h
->type
== STT_GNU_IFUNC
14550 && eh
->plt
.noncall_refcount
== 0)
14551 /* No non-call references resolve the STT_GNU_IFUNC's PLT entry;
14552 they all resolve dynamically instead. Reserve room for the
14553 GOT entry's R_ARM_IRELATIVE relocation. */
14554 elf32_arm_allocate_irelocs (info
, htab
->root
.srelgot
, 1);
14555 else if (bfd_link_pic (info
)
14556 && (ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
14557 || h
->root
.type
!= bfd_link_hash_undefweak
))
14558 /* Reserve room for the GOT entry's R_ARM_RELATIVE relocation. */
14559 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
14563 h
->got
.offset
= (bfd_vma
) -1;
14565 /* Allocate stubs for exported Thumb functions on v4t. */
14566 if (!htab
->use_blx
&& h
->dynindx
!= -1
14568 && ARM_GET_SYM_BRANCH_TYPE (h
->target_internal
) == ST_BRANCH_TO_THUMB
14569 && ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
)
14571 struct elf_link_hash_entry
* th
;
14572 struct bfd_link_hash_entry
* bh
;
14573 struct elf_link_hash_entry
* myh
;
14577 /* Create a new symbol to regist the real location of the function. */
14578 s
= h
->root
.u
.def
.section
;
14579 sprintf (name
, "__real_%s", h
->root
.root
.string
);
14580 _bfd_generic_link_add_one_symbol (info
, s
->owner
,
14581 name
, BSF_GLOBAL
, s
,
14582 h
->root
.u
.def
.value
,
14583 NULL
, TRUE
, FALSE
, &bh
);
14585 myh
= (struct elf_link_hash_entry
*) bh
;
14586 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
14587 myh
->forced_local
= 1;
14588 ARM_SET_SYM_BRANCH_TYPE (myh
->target_internal
, ST_BRANCH_TO_THUMB
);
14589 eh
->export_glue
= myh
;
14590 th
= record_arm_to_thumb_glue (info
, h
);
14591 /* Point the symbol at the stub. */
14592 h
->type
= ELF_ST_INFO (ELF_ST_BIND (h
->type
), STT_FUNC
);
14593 ARM_SET_SYM_BRANCH_TYPE (h
->target_internal
, ST_BRANCH_TO_ARM
);
14594 h
->root
.u
.def
.section
= th
->root
.u
.def
.section
;
14595 h
->root
.u
.def
.value
= th
->root
.u
.def
.value
& ~1;
14598 if (eh
->dyn_relocs
== NULL
)
14601 /* In the shared -Bsymbolic case, discard space allocated for
14602 dynamic pc-relative relocs against symbols which turn out to be
14603 defined in regular objects. For the normal shared case, discard
14604 space for pc-relative relocs that have become local due to symbol
14605 visibility changes. */
14607 if (bfd_link_pic (info
) || htab
->root
.is_relocatable_executable
)
14609 /* Relocs that use pc_count are PC-relative forms, which will appear
14610 on something like ".long foo - ." or "movw REG, foo - .". We want
14611 calls to protected symbols to resolve directly to the function
14612 rather than going via the plt. If people want function pointer
14613 comparisons to work as expected then they should avoid writing
14614 assembly like ".long foo - .". */
14615 if (SYMBOL_CALLS_LOCAL (info
, h
))
14617 struct elf_dyn_relocs
**pp
;
14619 for (pp
= &eh
->dyn_relocs
; (p
= *pp
) != NULL
; )
14621 p
->count
-= p
->pc_count
;
14630 if (htab
->vxworks_p
)
14632 struct elf_dyn_relocs
**pp
;
14634 for (pp
= &eh
->dyn_relocs
; (p
= *pp
) != NULL
; )
14636 if (strcmp (p
->sec
->output_section
->name
, ".tls_vars") == 0)
14643 /* Also discard relocs on undefined weak syms with non-default
14645 if (eh
->dyn_relocs
!= NULL
14646 && h
->root
.type
== bfd_link_hash_undefweak
)
14648 if (ELF_ST_VISIBILITY (h
->other
) != STV_DEFAULT
)
14649 eh
->dyn_relocs
= NULL
;
14651 /* Make sure undefined weak symbols are output as a dynamic
14653 else if (h
->dynindx
== -1
14654 && !h
->forced_local
)
14656 if (! bfd_elf_link_record_dynamic_symbol (info
, h
))
14661 else if (htab
->root
.is_relocatable_executable
&& h
->dynindx
== -1
14662 && h
->root
.type
== bfd_link_hash_new
)
14664 /* Output absolute symbols so that we can create relocations
14665 against them. For normal symbols we output a relocation
14666 against the section that contains them. */
14667 if (! bfd_elf_link_record_dynamic_symbol (info
, h
))
14674 /* For the non-shared case, discard space for relocs against
14675 symbols which turn out to need copy relocs or are not
14678 if (!h
->non_got_ref
14679 && ((h
->def_dynamic
14680 && !h
->def_regular
)
14681 || (htab
->root
.dynamic_sections_created
14682 && (h
->root
.type
== bfd_link_hash_undefweak
14683 || h
->root
.type
== bfd_link_hash_undefined
))))
14685 /* Make sure this symbol is output as a dynamic symbol.
14686 Undefined weak syms won't yet be marked as dynamic. */
14687 if (h
->dynindx
== -1
14688 && !h
->forced_local
)
14690 if (! bfd_elf_link_record_dynamic_symbol (info
, h
))
14694 /* If that succeeded, we know we'll be keeping all the
14696 if (h
->dynindx
!= -1)
14700 eh
->dyn_relocs
= NULL
;
14705 /* Finally, allocate space. */
14706 for (p
= eh
->dyn_relocs
; p
!= NULL
; p
= p
->next
)
14708 asection
*sreloc
= elf_section_data (p
->sec
)->sreloc
;
14709 if (h
->type
== STT_GNU_IFUNC
14710 && eh
->plt
.noncall_refcount
== 0
14711 && SYMBOL_REFERENCES_LOCAL (info
, h
))
14712 elf32_arm_allocate_irelocs (info
, sreloc
, p
->count
);
14714 elf32_arm_allocate_dynrelocs (info
, sreloc
, p
->count
);
14720 /* Find any dynamic relocs that apply to read-only sections. */
14723 elf32_arm_readonly_dynrelocs (struct elf_link_hash_entry
* h
, void * inf
)
14725 struct elf32_arm_link_hash_entry
* eh
;
14726 struct elf_dyn_relocs
* p
;
14728 eh
= (struct elf32_arm_link_hash_entry
*) h
;
14729 for (p
= eh
->dyn_relocs
; p
!= NULL
; p
= p
->next
)
14731 asection
*s
= p
->sec
;
14733 if (s
!= NULL
&& (s
->flags
& SEC_READONLY
) != 0)
14735 struct bfd_link_info
*info
= (struct bfd_link_info
*) inf
;
14737 info
->flags
|= DF_TEXTREL
;
14739 /* Not an error, just cut short the traversal. */
14747 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info
*info
,
14750 struct elf32_arm_link_hash_table
*globals
;
14752 globals
= elf32_arm_hash_table (info
);
14753 if (globals
== NULL
)
14756 globals
->byteswap_code
= byteswap_code
;
14759 /* Set the sizes of the dynamic sections. */
14762 elf32_arm_size_dynamic_sections (bfd
* output_bfd ATTRIBUTE_UNUSED
,
14763 struct bfd_link_info
* info
)
14768 bfd_boolean relocs
;
14770 struct elf32_arm_link_hash_table
*htab
;
14772 htab
= elf32_arm_hash_table (info
);
14776 dynobj
= elf_hash_table (info
)->dynobj
;
14777 BFD_ASSERT (dynobj
!= NULL
);
14778 check_use_blx (htab
);
14780 if (elf_hash_table (info
)->dynamic_sections_created
)
14782 /* Set the contents of the .interp section to the interpreter. */
14783 if (bfd_link_executable (info
) && !info
->nointerp
)
14785 s
= bfd_get_linker_section (dynobj
, ".interp");
14786 BFD_ASSERT (s
!= NULL
);
14787 s
->size
= sizeof ELF_DYNAMIC_INTERPRETER
;
14788 s
->contents
= (unsigned char *) ELF_DYNAMIC_INTERPRETER
;
14792 /* Set up .got offsets for local syms, and space for local dynamic
14794 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link
.next
)
14796 bfd_signed_vma
*local_got
;
14797 bfd_signed_vma
*end_local_got
;
14798 struct arm_local_iplt_info
**local_iplt_ptr
, *local_iplt
;
14799 char *local_tls_type
;
14800 bfd_vma
*local_tlsdesc_gotent
;
14801 bfd_size_type locsymcount
;
14802 Elf_Internal_Shdr
*symtab_hdr
;
14804 bfd_boolean is_vxworks
= htab
->vxworks_p
;
14805 unsigned int symndx
;
14807 if (! is_arm_elf (ibfd
))
14810 for (s
= ibfd
->sections
; s
!= NULL
; s
= s
->next
)
14812 struct elf_dyn_relocs
*p
;
14814 for (p
= (struct elf_dyn_relocs
*)
14815 elf_section_data (s
)->local_dynrel
; p
!= NULL
; p
= p
->next
)
14817 if (!bfd_is_abs_section (p
->sec
)
14818 && bfd_is_abs_section (p
->sec
->output_section
))
14820 /* Input section has been discarded, either because
14821 it is a copy of a linkonce section or due to
14822 linker script /DISCARD/, so we'll be discarding
14825 else if (is_vxworks
14826 && strcmp (p
->sec
->output_section
->name
,
14829 /* Relocations in vxworks .tls_vars sections are
14830 handled specially by the loader. */
14832 else if (p
->count
!= 0)
14834 srel
= elf_section_data (p
->sec
)->sreloc
;
14835 elf32_arm_allocate_dynrelocs (info
, srel
, p
->count
);
14836 if ((p
->sec
->output_section
->flags
& SEC_READONLY
) != 0)
14837 info
->flags
|= DF_TEXTREL
;
14842 local_got
= elf_local_got_refcounts (ibfd
);
14846 symtab_hdr
= & elf_symtab_hdr (ibfd
);
14847 locsymcount
= symtab_hdr
->sh_info
;
14848 end_local_got
= local_got
+ locsymcount
;
14849 local_iplt_ptr
= elf32_arm_local_iplt (ibfd
);
14850 local_tls_type
= elf32_arm_local_got_tls_type (ibfd
);
14851 local_tlsdesc_gotent
= elf32_arm_local_tlsdesc_gotent (ibfd
);
14853 s
= htab
->root
.sgot
;
14854 srel
= htab
->root
.srelgot
;
14855 for (; local_got
< end_local_got
;
14856 ++local_got
, ++local_iplt_ptr
, ++local_tls_type
,
14857 ++local_tlsdesc_gotent
, ++symndx
)
14859 *local_tlsdesc_gotent
= (bfd_vma
) -1;
14860 local_iplt
= *local_iplt_ptr
;
14861 if (local_iplt
!= NULL
)
14863 struct elf_dyn_relocs
*p
;
14865 if (local_iplt
->root
.refcount
> 0)
14867 elf32_arm_allocate_plt_entry (info
, TRUE
,
14870 if (local_iplt
->arm
.noncall_refcount
== 0)
14871 /* All references to the PLT are calls, so all
14872 non-call references can resolve directly to the
14873 run-time target. This means that the .got entry
14874 would be the same as the .igot.plt entry, so there's
14875 no point creating both. */
14880 BFD_ASSERT (local_iplt
->arm
.noncall_refcount
== 0);
14881 local_iplt
->root
.offset
= (bfd_vma
) -1;
14884 for (p
= local_iplt
->dyn_relocs
; p
!= NULL
; p
= p
->next
)
14888 psrel
= elf_section_data (p
->sec
)->sreloc
;
14889 if (local_iplt
->arm
.noncall_refcount
== 0)
14890 elf32_arm_allocate_irelocs (info
, psrel
, p
->count
);
14892 elf32_arm_allocate_dynrelocs (info
, psrel
, p
->count
);
14895 if (*local_got
> 0)
14897 Elf_Internal_Sym
*isym
;
14899 *local_got
= s
->size
;
14900 if (*local_tls_type
& GOT_TLS_GD
)
14901 /* TLS_GD relocs need an 8-byte structure in the GOT. */
14903 if (*local_tls_type
& GOT_TLS_GDESC
)
14905 *local_tlsdesc_gotent
= htab
->root
.sgotplt
->size
14906 - elf32_arm_compute_jump_table_size (htab
);
14907 htab
->root
.sgotplt
->size
+= 8;
14908 *local_got
= (bfd_vma
) -2;
14909 /* plt.got_offset needs to know there's a TLS_DESC
14910 reloc in the middle of .got.plt. */
14911 htab
->num_tls_desc
++;
14913 if (*local_tls_type
& GOT_TLS_IE
)
14916 if (*local_tls_type
& GOT_NORMAL
)
14918 /* If the symbol is both GD and GDESC, *local_got
14919 may have been overwritten. */
14920 *local_got
= s
->size
;
14924 isym
= bfd_sym_from_r_symndx (&htab
->sym_cache
, ibfd
, symndx
);
14928 /* If all references to an STT_GNU_IFUNC PLT are calls,
14929 then all non-call references, including this GOT entry,
14930 resolve directly to the run-time target. */
14931 if (ELF32_ST_TYPE (isym
->st_info
) == STT_GNU_IFUNC
14932 && (local_iplt
== NULL
14933 || local_iplt
->arm
.noncall_refcount
== 0))
14934 elf32_arm_allocate_irelocs (info
, srel
, 1);
14935 else if (bfd_link_pic (info
) || output_bfd
->flags
& DYNAMIC
)
14937 if ((bfd_link_pic (info
) && !(*local_tls_type
& GOT_TLS_GDESC
))
14938 || *local_tls_type
& GOT_TLS_GD
)
14939 elf32_arm_allocate_dynrelocs (info
, srel
, 1);
14941 if (bfd_link_pic (info
) && *local_tls_type
& GOT_TLS_GDESC
)
14943 elf32_arm_allocate_dynrelocs (info
,
14944 htab
->root
.srelplt
, 1);
14945 htab
->tls_trampoline
= -1;
14950 *local_got
= (bfd_vma
) -1;
14954 if (htab
->tls_ldm_got
.refcount
> 0)
14956 /* Allocate two GOT entries and one dynamic relocation (if necessary)
14957 for R_ARM_TLS_LDM32 relocations. */
14958 htab
->tls_ldm_got
.offset
= htab
->root
.sgot
->size
;
14959 htab
->root
.sgot
->size
+= 8;
14960 if (bfd_link_pic (info
))
14961 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
14964 htab
->tls_ldm_got
.offset
= -1;
14966 /* Allocate global sym .plt and .got entries, and space for global
14967 sym dynamic relocs. */
14968 elf_link_hash_traverse (& htab
->root
, allocate_dynrelocs_for_symbol
, info
);
14970 /* Here we rummage through the found bfds to collect glue information. */
14971 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link
.next
)
14973 if (! is_arm_elf (ibfd
))
14976 /* Initialise mapping tables for code/data. */
14977 bfd_elf32_arm_init_maps (ibfd
);
14979 if (!bfd_elf32_arm_process_before_allocation (ibfd
, info
)
14980 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd
, info
)
14981 || !bfd_elf32_arm_stm32l4xx_erratum_scan (ibfd
, info
))
14982 /* xgettext:c-format */
14983 _bfd_error_handler (_("Errors encountered processing file %s"),
14987 /* Allocate space for the glue sections now that we've sized them. */
14988 bfd_elf32_arm_allocate_interworking_sections (info
);
14990 /* For every jump slot reserved in the sgotplt, reloc_count is
14991 incremented. However, when we reserve space for TLS descriptors,
14992 it's not incremented, so in order to compute the space reserved
14993 for them, it suffices to multiply the reloc count by the jump
14995 if (htab
->root
.srelplt
)
14996 htab
->sgotplt_jump_table_size
= elf32_arm_compute_jump_table_size(htab
);
14998 if (htab
->tls_trampoline
)
15000 if (htab
->root
.splt
->size
== 0)
15001 htab
->root
.splt
->size
+= htab
->plt_header_size
;
15003 htab
->tls_trampoline
= htab
->root
.splt
->size
;
15004 htab
->root
.splt
->size
+= htab
->plt_entry_size
;
15006 /* If we're not using lazy TLS relocations, don't generate the
15007 PLT and GOT entries they require. */
15008 if (!(info
->flags
& DF_BIND_NOW
))
15010 htab
->dt_tlsdesc_got
= htab
->root
.sgot
->size
;
15011 htab
->root
.sgot
->size
+= 4;
15013 htab
->dt_tlsdesc_plt
= htab
->root
.splt
->size
;
15014 htab
->root
.splt
->size
+= 4 * ARRAY_SIZE (dl_tlsdesc_lazy_trampoline
);
15018 /* The check_relocs and adjust_dynamic_symbol entry points have
15019 determined the sizes of the various dynamic sections. Allocate
15020 memory for them. */
15023 for (s
= dynobj
->sections
; s
!= NULL
; s
= s
->next
)
15027 if ((s
->flags
& SEC_LINKER_CREATED
) == 0)
15030 /* It's OK to base decisions on the section name, because none
15031 of the dynobj section names depend upon the input files. */
15032 name
= bfd_get_section_name (dynobj
, s
);
15034 if (s
== htab
->root
.splt
)
15036 /* Remember whether there is a PLT. */
15037 plt
= s
->size
!= 0;
15039 else if (CONST_STRNEQ (name
, ".rel"))
15043 /* Remember whether there are any reloc sections other
15044 than .rel(a).plt and .rela.plt.unloaded. */
15045 if (s
!= htab
->root
.srelplt
&& s
!= htab
->srelplt2
)
15048 /* We use the reloc_count field as a counter if we need
15049 to copy relocs into the output file. */
15050 s
->reloc_count
= 0;
15053 else if (s
!= htab
->root
.sgot
15054 && s
!= htab
->root
.sgotplt
15055 && s
!= htab
->root
.iplt
15056 && s
!= htab
->root
.igotplt
15057 && s
!= htab
->sdynbss
)
15059 /* It's not one of our sections, so don't allocate space. */
15065 /* If we don't need this section, strip it from the
15066 output file. This is mostly to handle .rel(a).bss and
15067 .rel(a).plt. We must create both sections in
15068 create_dynamic_sections, because they must be created
15069 before the linker maps input sections to output
15070 sections. The linker does that before
15071 adjust_dynamic_symbol is called, and it is that
15072 function which decides whether anything needs to go
15073 into these sections. */
15074 s
->flags
|= SEC_EXCLUDE
;
15078 if ((s
->flags
& SEC_HAS_CONTENTS
) == 0)
15081 /* Allocate memory for the section contents. */
15082 s
->contents
= (unsigned char *) bfd_zalloc (dynobj
, s
->size
);
15083 if (s
->contents
== NULL
)
15087 if (elf_hash_table (info
)->dynamic_sections_created
)
15089 /* Add some entries to the .dynamic section. We fill in the
15090 values later, in elf32_arm_finish_dynamic_sections, but we
15091 must add the entries now so that we get the correct size for
15092 the .dynamic section. The DT_DEBUG entry is filled in by the
15093 dynamic linker and used by the debugger. */
15094 #define add_dynamic_entry(TAG, VAL) \
15095 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
15097 if (bfd_link_executable (info
))
15099 if (!add_dynamic_entry (DT_DEBUG
, 0))
15105 if ( !add_dynamic_entry (DT_PLTGOT
, 0)
15106 || !add_dynamic_entry (DT_PLTRELSZ
, 0)
15107 || !add_dynamic_entry (DT_PLTREL
,
15108 htab
->use_rel
? DT_REL
: DT_RELA
)
15109 || !add_dynamic_entry (DT_JMPREL
, 0))
15112 if (htab
->dt_tlsdesc_plt
&&
15113 (!add_dynamic_entry (DT_TLSDESC_PLT
,0)
15114 || !add_dynamic_entry (DT_TLSDESC_GOT
,0)))
15122 if (!add_dynamic_entry (DT_REL
, 0)
15123 || !add_dynamic_entry (DT_RELSZ
, 0)
15124 || !add_dynamic_entry (DT_RELENT
, RELOC_SIZE (htab
)))
15129 if (!add_dynamic_entry (DT_RELA
, 0)
15130 || !add_dynamic_entry (DT_RELASZ
, 0)
15131 || !add_dynamic_entry (DT_RELAENT
, RELOC_SIZE (htab
)))
15136 /* If any dynamic relocs apply to a read-only section,
15137 then we need a DT_TEXTREL entry. */
15138 if ((info
->flags
& DF_TEXTREL
) == 0)
15139 elf_link_hash_traverse (& htab
->root
, elf32_arm_readonly_dynrelocs
,
15142 if ((info
->flags
& DF_TEXTREL
) != 0)
15144 if (!add_dynamic_entry (DT_TEXTREL
, 0))
15147 if (htab
->vxworks_p
15148 && !elf_vxworks_add_dynamic_entries (output_bfd
, info
))
15151 #undef add_dynamic_entry
15156 /* Size sections even though they're not dynamic. We use it to setup
15157 _TLS_MODULE_BASE_, if needed. */
15160 elf32_arm_always_size_sections (bfd
*output_bfd
,
15161 struct bfd_link_info
*info
)
15165 if (bfd_link_relocatable (info
))
15168 tls_sec
= elf_hash_table (info
)->tls_sec
;
15172 struct elf_link_hash_entry
*tlsbase
;
15174 tlsbase
= elf_link_hash_lookup
15175 (elf_hash_table (info
), "_TLS_MODULE_BASE_", TRUE
, TRUE
, FALSE
);
15179 struct bfd_link_hash_entry
*bh
= NULL
;
15180 const struct elf_backend_data
*bed
15181 = get_elf_backend_data (output_bfd
);
15183 if (!(_bfd_generic_link_add_one_symbol
15184 (info
, output_bfd
, "_TLS_MODULE_BASE_", BSF_LOCAL
,
15185 tls_sec
, 0, NULL
, FALSE
,
15186 bed
->collect
, &bh
)))
15189 tlsbase
->type
= STT_TLS
;
15190 tlsbase
= (struct elf_link_hash_entry
*)bh
;
15191 tlsbase
->def_regular
= 1;
15192 tlsbase
->other
= STV_HIDDEN
;
15193 (*bed
->elf_backend_hide_symbol
) (info
, tlsbase
, TRUE
);
15199 /* Finish up dynamic symbol handling. We set the contents of various
15200 dynamic sections here. */
15203 elf32_arm_finish_dynamic_symbol (bfd
* output_bfd
,
15204 struct bfd_link_info
* info
,
15205 struct elf_link_hash_entry
* h
,
15206 Elf_Internal_Sym
* sym
)
15208 struct elf32_arm_link_hash_table
*htab
;
15209 struct elf32_arm_link_hash_entry
*eh
;
15211 htab
= elf32_arm_hash_table (info
);
15215 eh
= (struct elf32_arm_link_hash_entry
*) h
;
15217 if (h
->plt
.offset
!= (bfd_vma
) -1)
15221 BFD_ASSERT (h
->dynindx
!= -1);
15222 if (! elf32_arm_populate_plt_entry (output_bfd
, info
, &h
->plt
, &eh
->plt
,
15227 if (!h
->def_regular
)
15229 /* Mark the symbol as undefined, rather than as defined in
15230 the .plt section. */
15231 sym
->st_shndx
= SHN_UNDEF
;
15232 /* If the symbol is weak we need to clear the value.
15233 Otherwise, the PLT entry would provide a definition for
15234 the symbol even if the symbol wasn't defined anywhere,
15235 and so the symbol would never be NULL. Leave the value if
15236 there were any relocations where pointer equality matters
15237 (this is a clue for the dynamic linker, to make function
15238 pointer comparisons work between an application and shared
15240 if (!h
->ref_regular_nonweak
|| !h
->pointer_equality_needed
)
15243 else if (eh
->is_iplt
&& eh
->plt
.noncall_refcount
!= 0)
15245 /* At least one non-call relocation references this .iplt entry,
15246 so the .iplt entry is the function's canonical address. */
15247 sym
->st_info
= ELF_ST_INFO (ELF_ST_BIND (sym
->st_info
), STT_FUNC
);
15248 ARM_SET_SYM_BRANCH_TYPE (sym
->st_target_internal
, ST_BRANCH_TO_ARM
);
15249 sym
->st_shndx
= (_bfd_elf_section_from_bfd_section
15250 (output_bfd
, htab
->root
.iplt
->output_section
));
15251 sym
->st_value
= (h
->plt
.offset
15252 + htab
->root
.iplt
->output_section
->vma
15253 + htab
->root
.iplt
->output_offset
);
15260 Elf_Internal_Rela rel
;
15262 /* This symbol needs a copy reloc. Set it up. */
15263 BFD_ASSERT (h
->dynindx
!= -1
15264 && (h
->root
.type
== bfd_link_hash_defined
15265 || h
->root
.type
== bfd_link_hash_defweak
));
15268 BFD_ASSERT (s
!= NULL
);
15271 rel
.r_offset
= (h
->root
.u
.def
.value
15272 + h
->root
.u
.def
.section
->output_section
->vma
15273 + h
->root
.u
.def
.section
->output_offset
);
15274 rel
.r_info
= ELF32_R_INFO (h
->dynindx
, R_ARM_COPY
);
15275 elf32_arm_add_dynreloc (output_bfd
, info
, s
, &rel
);
15278 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
15279 the _GLOBAL_OFFSET_TABLE_ symbol is not absolute: it is relative
15280 to the ".got" section. */
15281 if (h
== htab
->root
.hdynamic
15282 || (!htab
->vxworks_p
&& h
== htab
->root
.hgot
))
15283 sym
->st_shndx
= SHN_ABS
;
15289 arm_put_trampoline (struct elf32_arm_link_hash_table
*htab
, bfd
*output_bfd
,
15291 const unsigned long *template, unsigned count
)
15295 for (ix
= 0; ix
!= count
; ix
++)
15297 unsigned long insn
= template[ix
];
15299 /* Emit mov pc,rx if bx is not permitted. */
15300 if (htab
->fix_v4bx
== 1 && (insn
& 0x0ffffff0) == 0x012fff10)
15301 insn
= (insn
& 0xf000000f) | 0x01a0f000;
15302 put_arm_insn (htab
, output_bfd
, insn
, (char *)contents
+ ix
*4);
15306 /* Install the special first PLT entry for elf32-arm-nacl. Unlike
15307 other variants, NaCl needs this entry in a static executable's
15308 .iplt too. When we're handling that case, GOT_DISPLACEMENT is
15309 zero. For .iplt really only the last bundle is useful, and .iplt
15310 could have a shorter first entry, with each individual PLT entry's
15311 relative branch calculated differently so it targets the last
15312 bundle instead of the instruction before it (labelled .Lplt_tail
15313 above). But it's simpler to keep the size and layout of PLT0
15314 consistent with the dynamic case, at the cost of some dead code at
15315 the start of .iplt and the one dead store to the stack at the start
15318 arm_nacl_put_plt0 (struct elf32_arm_link_hash_table
*htab
, bfd
*output_bfd
,
15319 asection
*plt
, bfd_vma got_displacement
)
15323 put_arm_insn (htab
, output_bfd
,
15324 elf32_arm_nacl_plt0_entry
[0]
15325 | arm_movw_immediate (got_displacement
),
15326 plt
->contents
+ 0);
15327 put_arm_insn (htab
, output_bfd
,
15328 elf32_arm_nacl_plt0_entry
[1]
15329 | arm_movt_immediate (got_displacement
),
15330 plt
->contents
+ 4);
15332 for (i
= 2; i
< ARRAY_SIZE (elf32_arm_nacl_plt0_entry
); ++i
)
15333 put_arm_insn (htab
, output_bfd
,
15334 elf32_arm_nacl_plt0_entry
[i
],
15335 plt
->contents
+ (i
* 4));
15338 /* Finish up the dynamic sections. */
15341 elf32_arm_finish_dynamic_sections (bfd
* output_bfd
, struct bfd_link_info
* info
)
15346 struct elf32_arm_link_hash_table
*htab
;
15348 htab
= elf32_arm_hash_table (info
);
15352 dynobj
= elf_hash_table (info
)->dynobj
;
15354 sgot
= htab
->root
.sgotplt
;
15355 /* A broken linker script might have discarded the dynamic sections.
15356 Catch this here so that we do not seg-fault later on. */
15357 if (sgot
!= NULL
&& bfd_is_abs_section (sgot
->output_section
))
15359 sdyn
= bfd_get_linker_section (dynobj
, ".dynamic");
15361 if (elf_hash_table (info
)->dynamic_sections_created
)
15364 Elf32_External_Dyn
*dyncon
, *dynconend
;
15366 splt
= htab
->root
.splt
;
15367 BFD_ASSERT (splt
!= NULL
&& sdyn
!= NULL
);
15368 BFD_ASSERT (htab
->symbian_p
|| sgot
!= NULL
);
15370 dyncon
= (Elf32_External_Dyn
*) sdyn
->contents
;
15371 dynconend
= (Elf32_External_Dyn
*) (sdyn
->contents
+ sdyn
->size
);
15373 for (; dyncon
< dynconend
; dyncon
++)
15375 Elf_Internal_Dyn dyn
;
15379 bfd_elf32_swap_dyn_in (dynobj
, dyncon
, &dyn
);
15386 if (htab
->vxworks_p
15387 && elf_vxworks_finish_dynamic_entry (output_bfd
, &dyn
))
15388 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
15393 goto get_vma_if_bpabi
;
15396 goto get_vma_if_bpabi
;
15399 goto get_vma_if_bpabi
;
15401 name
= ".gnu.version";
15402 goto get_vma_if_bpabi
;
15404 name
= ".gnu.version_d";
15405 goto get_vma_if_bpabi
;
15407 name
= ".gnu.version_r";
15408 goto get_vma_if_bpabi
;
15411 name
= htab
->symbian_p
? ".got" : ".got.plt";
15414 name
= RELOC_SECTION (htab
, ".plt");
15416 s
= bfd_get_linker_section (dynobj
, name
);
15419 (*_bfd_error_handler
)
15420 (_("could not find section %s"), name
);
15421 bfd_set_error (bfd_error_invalid_operation
);
15424 if (!htab
->symbian_p
)
15425 dyn
.d_un
.d_ptr
= s
->output_section
->vma
+ s
->output_offset
;
15427 /* In the BPABI, tags in the PT_DYNAMIC section point
15428 at the file offset, not the memory address, for the
15429 convenience of the post linker. */
15430 dyn
.d_un
.d_ptr
= s
->output_section
->filepos
+ s
->output_offset
;
15431 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
15435 if (htab
->symbian_p
)
15440 s
= htab
->root
.srelplt
;
15441 BFD_ASSERT (s
!= NULL
);
15442 dyn
.d_un
.d_val
= s
->size
;
15443 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
15448 if (!htab
->symbian_p
)
15450 /* My reading of the SVR4 ABI indicates that the
15451 procedure linkage table relocs (DT_JMPREL) should be
15452 included in the overall relocs (DT_REL). This is
15453 what Solaris does. However, UnixWare can not handle
15454 that case. Therefore, we override the DT_RELSZ entry
15455 here to make it not include the JMPREL relocs. Since
15456 the linker script arranges for .rel(a).plt to follow all
15457 other relocation sections, we don't have to worry
15458 about changing the DT_REL entry. */
15459 s
= htab
->root
.srelplt
;
15461 dyn
.d_un
.d_val
-= s
->size
;
15462 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
15465 /* Fall through. */
15469 /* In the BPABI, the DT_REL tag must point at the file
15470 offset, not the VMA, of the first relocation
15471 section. So, we use code similar to that in
15472 elflink.c, but do not check for SHF_ALLOC on the
15473 relcoation section, since relocations sections are
15474 never allocated under the BPABI. The comments above
15475 about Unixware notwithstanding, we include all of the
15476 relocations here. */
15477 if (htab
->symbian_p
)
15480 type
= ((dyn
.d_tag
== DT_REL
|| dyn
.d_tag
== DT_RELSZ
)
15481 ? SHT_REL
: SHT_RELA
);
15482 dyn
.d_un
.d_val
= 0;
15483 for (i
= 1; i
< elf_numsections (output_bfd
); i
++)
15485 Elf_Internal_Shdr
*hdr
15486 = elf_elfsections (output_bfd
)[i
];
15487 if (hdr
->sh_type
== type
)
15489 if (dyn
.d_tag
== DT_RELSZ
15490 || dyn
.d_tag
== DT_RELASZ
)
15491 dyn
.d_un
.d_val
+= hdr
->sh_size
;
15492 else if ((ufile_ptr
) hdr
->sh_offset
15493 <= dyn
.d_un
.d_val
- 1)
15494 dyn
.d_un
.d_val
= hdr
->sh_offset
;
15497 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
15501 case DT_TLSDESC_PLT
:
15502 s
= htab
->root
.splt
;
15503 dyn
.d_un
.d_ptr
= (s
->output_section
->vma
+ s
->output_offset
15504 + htab
->dt_tlsdesc_plt
);
15505 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
15508 case DT_TLSDESC_GOT
:
15509 s
= htab
->root
.sgot
;
15510 dyn
.d_un
.d_ptr
= (s
->output_section
->vma
+ s
->output_offset
15511 + htab
->dt_tlsdesc_got
);
15512 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
15515 /* Set the bottom bit of DT_INIT/FINI if the
15516 corresponding function is Thumb. */
15518 name
= info
->init_function
;
15521 name
= info
->fini_function
;
15523 /* If it wasn't set by elf_bfd_final_link
15524 then there is nothing to adjust. */
15525 if (dyn
.d_un
.d_val
!= 0)
15527 struct elf_link_hash_entry
* eh
;
15529 eh
= elf_link_hash_lookup (elf_hash_table (info
), name
,
15530 FALSE
, FALSE
, TRUE
);
15532 && ARM_GET_SYM_BRANCH_TYPE (eh
->target_internal
)
15533 == ST_BRANCH_TO_THUMB
)
15535 dyn
.d_un
.d_val
|= 1;
15536 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
15543 /* Fill in the first entry in the procedure linkage table. */
15544 if (splt
->size
> 0 && htab
->plt_header_size
)
15546 const bfd_vma
*plt0_entry
;
15547 bfd_vma got_address
, plt_address
, got_displacement
;
15549 /* Calculate the addresses of the GOT and PLT. */
15550 got_address
= sgot
->output_section
->vma
+ sgot
->output_offset
;
15551 plt_address
= splt
->output_section
->vma
+ splt
->output_offset
;
15553 if (htab
->vxworks_p
)
15555 /* The VxWorks GOT is relocated by the dynamic linker.
15556 Therefore, we must emit relocations rather than simply
15557 computing the values now. */
15558 Elf_Internal_Rela rel
;
15560 plt0_entry
= elf32_arm_vxworks_exec_plt0_entry
;
15561 put_arm_insn (htab
, output_bfd
, plt0_entry
[0],
15562 splt
->contents
+ 0);
15563 put_arm_insn (htab
, output_bfd
, plt0_entry
[1],
15564 splt
->contents
+ 4);
15565 put_arm_insn (htab
, output_bfd
, plt0_entry
[2],
15566 splt
->contents
+ 8);
15567 bfd_put_32 (output_bfd
, got_address
, splt
->contents
+ 12);
15569 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
15570 rel
.r_offset
= plt_address
+ 12;
15571 rel
.r_info
= ELF32_R_INFO (htab
->root
.hgot
->indx
, R_ARM_ABS32
);
15573 SWAP_RELOC_OUT (htab
) (output_bfd
, &rel
,
15574 htab
->srelplt2
->contents
);
15576 else if (htab
->nacl_p
)
15577 arm_nacl_put_plt0 (htab
, output_bfd
, splt
,
15578 got_address
+ 8 - (plt_address
+ 16));
15579 else if (using_thumb_only (htab
))
15581 got_displacement
= got_address
- (plt_address
+ 12);
15583 plt0_entry
= elf32_thumb2_plt0_entry
;
15584 put_arm_insn (htab
, output_bfd
, plt0_entry
[0],
15585 splt
->contents
+ 0);
15586 put_arm_insn (htab
, output_bfd
, plt0_entry
[1],
15587 splt
->contents
+ 4);
15588 put_arm_insn (htab
, output_bfd
, plt0_entry
[2],
15589 splt
->contents
+ 8);
15591 bfd_put_32 (output_bfd
, got_displacement
, splt
->contents
+ 12);
15595 got_displacement
= got_address
- (plt_address
+ 16);
15597 plt0_entry
= elf32_arm_plt0_entry
;
15598 put_arm_insn (htab
, output_bfd
, plt0_entry
[0],
15599 splt
->contents
+ 0);
15600 put_arm_insn (htab
, output_bfd
, plt0_entry
[1],
15601 splt
->contents
+ 4);
15602 put_arm_insn (htab
, output_bfd
, plt0_entry
[2],
15603 splt
->contents
+ 8);
15604 put_arm_insn (htab
, output_bfd
, plt0_entry
[3],
15605 splt
->contents
+ 12);
15607 #ifdef FOUR_WORD_PLT
15608 /* The displacement value goes in the otherwise-unused
15609 last word of the second entry. */
15610 bfd_put_32 (output_bfd
, got_displacement
, splt
->contents
+ 28);
15612 bfd_put_32 (output_bfd
, got_displacement
, splt
->contents
+ 16);
15617 /* UnixWare sets the entsize of .plt to 4, although that doesn't
15618 really seem like the right value. */
15619 if (splt
->output_section
->owner
== output_bfd
)
15620 elf_section_data (splt
->output_section
)->this_hdr
.sh_entsize
= 4;
15622 if (htab
->dt_tlsdesc_plt
)
15624 bfd_vma got_address
15625 = sgot
->output_section
->vma
+ sgot
->output_offset
;
15626 bfd_vma gotplt_address
= (htab
->root
.sgot
->output_section
->vma
15627 + htab
->root
.sgot
->output_offset
);
15628 bfd_vma plt_address
15629 = splt
->output_section
->vma
+ splt
->output_offset
;
15631 arm_put_trampoline (htab
, output_bfd
,
15632 splt
->contents
+ htab
->dt_tlsdesc_plt
,
15633 dl_tlsdesc_lazy_trampoline
, 6);
15635 bfd_put_32 (output_bfd
,
15636 gotplt_address
+ htab
->dt_tlsdesc_got
15637 - (plt_address
+ htab
->dt_tlsdesc_plt
)
15638 - dl_tlsdesc_lazy_trampoline
[6],
15639 splt
->contents
+ htab
->dt_tlsdesc_plt
+ 24);
15640 bfd_put_32 (output_bfd
,
15641 got_address
- (plt_address
+ htab
->dt_tlsdesc_plt
)
15642 - dl_tlsdesc_lazy_trampoline
[7],
15643 splt
->contents
+ htab
->dt_tlsdesc_plt
+ 24 + 4);
15646 if (htab
->tls_trampoline
)
15648 arm_put_trampoline (htab
, output_bfd
,
15649 splt
->contents
+ htab
->tls_trampoline
,
15650 tls_trampoline
, 3);
15651 #ifdef FOUR_WORD_PLT
15652 bfd_put_32 (output_bfd
, 0x00000000,
15653 splt
->contents
+ htab
->tls_trampoline
+ 12);
15657 if (htab
->vxworks_p
15658 && !bfd_link_pic (info
)
15659 && htab
->root
.splt
->size
> 0)
15661 /* Correct the .rel(a).plt.unloaded relocations. They will have
15662 incorrect symbol indexes. */
15666 num_plts
= ((htab
->root
.splt
->size
- htab
->plt_header_size
)
15667 / htab
->plt_entry_size
);
15668 p
= htab
->srelplt2
->contents
+ RELOC_SIZE (htab
);
15670 for (; num_plts
; num_plts
--)
15672 Elf_Internal_Rela rel
;
15674 SWAP_RELOC_IN (htab
) (output_bfd
, p
, &rel
);
15675 rel
.r_info
= ELF32_R_INFO (htab
->root
.hgot
->indx
, R_ARM_ABS32
);
15676 SWAP_RELOC_OUT (htab
) (output_bfd
, &rel
, p
);
15677 p
+= RELOC_SIZE (htab
);
15679 SWAP_RELOC_IN (htab
) (output_bfd
, p
, &rel
);
15680 rel
.r_info
= ELF32_R_INFO (htab
->root
.hplt
->indx
, R_ARM_ABS32
);
15681 SWAP_RELOC_OUT (htab
) (output_bfd
, &rel
, p
);
15682 p
+= RELOC_SIZE (htab
);
15687 if (htab
->nacl_p
&& htab
->root
.iplt
!= NULL
&& htab
->root
.iplt
->size
> 0)
15688 /* NaCl uses a special first entry in .iplt too. */
15689 arm_nacl_put_plt0 (htab
, output_bfd
, htab
->root
.iplt
, 0);
15691 /* Fill in the first three entries in the global offset table. */
15694 if (sgot
->size
> 0)
15697 bfd_put_32 (output_bfd
, (bfd_vma
) 0, sgot
->contents
);
15699 bfd_put_32 (output_bfd
,
15700 sdyn
->output_section
->vma
+ sdyn
->output_offset
,
15702 bfd_put_32 (output_bfd
, (bfd_vma
) 0, sgot
->contents
+ 4);
15703 bfd_put_32 (output_bfd
, (bfd_vma
) 0, sgot
->contents
+ 8);
15706 elf_section_data (sgot
->output_section
)->this_hdr
.sh_entsize
= 4;
15713 elf32_arm_post_process_headers (bfd
* abfd
, struct bfd_link_info
* link_info ATTRIBUTE_UNUSED
)
15715 Elf_Internal_Ehdr
* i_ehdrp
; /* ELF file header, internal form. */
15716 struct elf32_arm_link_hash_table
*globals
;
15717 struct elf_segment_map
*m
;
15719 i_ehdrp
= elf_elfheader (abfd
);
15721 if (EF_ARM_EABI_VERSION (i_ehdrp
->e_flags
) == EF_ARM_EABI_UNKNOWN
)
15722 i_ehdrp
->e_ident
[EI_OSABI
] = ELFOSABI_ARM
;
15724 _bfd_elf_post_process_headers (abfd
, link_info
);
15725 i_ehdrp
->e_ident
[EI_ABIVERSION
] = ARM_ELF_ABI_VERSION
;
15729 globals
= elf32_arm_hash_table (link_info
);
15730 if (globals
!= NULL
&& globals
->byteswap_code
)
15731 i_ehdrp
->e_flags
|= EF_ARM_BE8
;
15734 if (EF_ARM_EABI_VERSION (i_ehdrp
->e_flags
) == EF_ARM_EABI_VER5
15735 && ((i_ehdrp
->e_type
== ET_DYN
) || (i_ehdrp
->e_type
== ET_EXEC
)))
15737 int abi
= bfd_elf_get_obj_attr_int (abfd
, OBJ_ATTR_PROC
, Tag_ABI_VFP_args
);
15738 if (abi
== AEABI_VFP_args_vfp
)
15739 i_ehdrp
->e_flags
|= EF_ARM_ABI_FLOAT_HARD
;
15741 i_ehdrp
->e_flags
|= EF_ARM_ABI_FLOAT_SOFT
;
15744 /* Scan segment to set p_flags attribute if it contains only sections with
15745 SHF_ARM_NOREAD flag. */
15746 for (m
= elf_seg_map (abfd
); m
!= NULL
; m
= m
->next
)
15752 for (j
= 0; j
< m
->count
; j
++)
15754 if (!(elf_section_flags (m
->sections
[j
]) & SHF_ARM_NOREAD
))
15760 m
->p_flags_valid
= 1;
15765 static enum elf_reloc_type_class
15766 elf32_arm_reloc_type_class (const struct bfd_link_info
*info ATTRIBUTE_UNUSED
,
15767 const asection
*rel_sec ATTRIBUTE_UNUSED
,
15768 const Elf_Internal_Rela
*rela
)
15770 switch ((int) ELF32_R_TYPE (rela
->r_info
))
15772 case R_ARM_RELATIVE
:
15773 return reloc_class_relative
;
15774 case R_ARM_JUMP_SLOT
:
15775 return reloc_class_plt
;
15777 return reloc_class_copy
;
15778 case R_ARM_IRELATIVE
:
15779 return reloc_class_ifunc
;
15781 return reloc_class_normal
;
15786 elf32_arm_final_write_processing (bfd
*abfd
, bfd_boolean linker ATTRIBUTE_UNUSED
)
15788 bfd_arm_update_notes (abfd
, ARM_NOTE_SECTION
);
15791 /* Return TRUE if this is an unwinding table entry. */
15794 is_arm_elf_unwind_section_name (bfd
* abfd ATTRIBUTE_UNUSED
, const char * name
)
15796 return (CONST_STRNEQ (name
, ELF_STRING_ARM_unwind
)
15797 || CONST_STRNEQ (name
, ELF_STRING_ARM_unwind_once
));
15801 /* Set the type and flags for an ARM section. We do this by
15802 the section name, which is a hack, but ought to work. */
15805 elf32_arm_fake_sections (bfd
* abfd
, Elf_Internal_Shdr
* hdr
, asection
* sec
)
15809 name
= bfd_get_section_name (abfd
, sec
);
15811 if (is_arm_elf_unwind_section_name (abfd
, name
))
15813 hdr
->sh_type
= SHT_ARM_EXIDX
;
15814 hdr
->sh_flags
|= SHF_LINK_ORDER
;
15817 if (sec
->flags
& SEC_ELF_NOREAD
)
15818 hdr
->sh_flags
|= SHF_ARM_NOREAD
;
15823 /* Handle an ARM specific section when reading an object file. This is
15824 called when bfd_section_from_shdr finds a section with an unknown
15828 elf32_arm_section_from_shdr (bfd
*abfd
,
15829 Elf_Internal_Shdr
* hdr
,
15833 /* There ought to be a place to keep ELF backend specific flags, but
15834 at the moment there isn't one. We just keep track of the
15835 sections by their name, instead. Fortunately, the ABI gives
15836 names for all the ARM specific sections, so we will probably get
15838 switch (hdr
->sh_type
)
15840 case SHT_ARM_EXIDX
:
15841 case SHT_ARM_PREEMPTMAP
:
15842 case SHT_ARM_ATTRIBUTES
:
15849 if (! _bfd_elf_make_section_from_shdr (abfd
, hdr
, name
, shindex
))
15855 static _arm_elf_section_data
*
15856 get_arm_elf_section_data (asection
* sec
)
15858 if (sec
&& sec
->owner
&& is_arm_elf (sec
->owner
))
15859 return elf32_arm_section_data (sec
);
15867 struct bfd_link_info
*info
;
15870 int (*func
) (void *, const char *, Elf_Internal_Sym
*,
15871 asection
*, struct elf_link_hash_entry
*);
15872 } output_arch_syminfo
;
15874 enum map_symbol_type
15882 /* Output a single mapping symbol. */
15885 elf32_arm_output_map_sym (output_arch_syminfo
*osi
,
15886 enum map_symbol_type type
,
15889 static const char *names
[3] = {"$a", "$t", "$d"};
15890 Elf_Internal_Sym sym
;
15892 sym
.st_value
= osi
->sec
->output_section
->vma
15893 + osi
->sec
->output_offset
15897 sym
.st_info
= ELF_ST_INFO (STB_LOCAL
, STT_NOTYPE
);
15898 sym
.st_shndx
= osi
->sec_shndx
;
15899 sym
.st_target_internal
= 0;
15900 elf32_arm_section_map_add (osi
->sec
, names
[type
][1], offset
);
15901 return osi
->func (osi
->flaginfo
, names
[type
], &sym
, osi
->sec
, NULL
) == 1;
15904 /* Output mapping symbols for the PLT entry described by ROOT_PLT and ARM_PLT.
15905 IS_IPLT_ENTRY_P says whether the PLT is in .iplt rather than .plt. */
15908 elf32_arm_output_plt_map_1 (output_arch_syminfo
*osi
,
15909 bfd_boolean is_iplt_entry_p
,
15910 union gotplt_union
*root_plt
,
15911 struct arm_plt_info
*arm_plt
)
15913 struct elf32_arm_link_hash_table
*htab
;
15914 bfd_vma addr
, plt_header_size
;
15916 if (root_plt
->offset
== (bfd_vma
) -1)
15919 htab
= elf32_arm_hash_table (osi
->info
);
15923 if (is_iplt_entry_p
)
15925 osi
->sec
= htab
->root
.iplt
;
15926 plt_header_size
= 0;
15930 osi
->sec
= htab
->root
.splt
;
15931 plt_header_size
= htab
->plt_header_size
;
15933 osi
->sec_shndx
= (_bfd_elf_section_from_bfd_section
15934 (osi
->info
->output_bfd
, osi
->sec
->output_section
));
15936 addr
= root_plt
->offset
& -2;
15937 if (htab
->symbian_p
)
15939 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_ARM
, addr
))
15941 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_DATA
, addr
+ 4))
15944 else if (htab
->vxworks_p
)
15946 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_ARM
, addr
))
15948 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_DATA
, addr
+ 8))
15950 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_ARM
, addr
+ 12))
15952 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_DATA
, addr
+ 20))
15955 else if (htab
->nacl_p
)
15957 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_ARM
, addr
))
15960 else if (using_thumb_only (htab
))
15962 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_THUMB
, addr
))
15967 bfd_boolean thumb_stub_p
;
15969 thumb_stub_p
= elf32_arm_plt_needs_thumb_stub_p (osi
->info
, arm_plt
);
15972 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_THUMB
, addr
- 4))
15975 #ifdef FOUR_WORD_PLT
15976 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_ARM
, addr
))
15978 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_DATA
, addr
+ 12))
15981 /* A three-word PLT with no Thumb thunk contains only Arm code,
15982 so only need to output a mapping symbol for the first PLT entry and
15983 entries with thumb thunks. */
15984 if (thumb_stub_p
|| addr
== plt_header_size
)
15986 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_ARM
, addr
))
15995 /* Output mapping symbols for PLT entries associated with H. */
15998 elf32_arm_output_plt_map (struct elf_link_hash_entry
*h
, void *inf
)
16000 output_arch_syminfo
*osi
= (output_arch_syminfo
*) inf
;
16001 struct elf32_arm_link_hash_entry
*eh
;
16003 if (h
->root
.type
== bfd_link_hash_indirect
)
16006 if (h
->root
.type
== bfd_link_hash_warning
)
16007 /* When warning symbols are created, they **replace** the "real"
16008 entry in the hash table, thus we never get to see the real
16009 symbol in a hash traversal. So look at it now. */
16010 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
16012 eh
= (struct elf32_arm_link_hash_entry
*) h
;
16013 return elf32_arm_output_plt_map_1 (osi
, SYMBOL_CALLS_LOCAL (osi
->info
, h
),
16014 &h
->plt
, &eh
->plt
);
16017 /* Bind a veneered symbol to its veneer identified by its hash entry
16018 STUB_ENTRY. The veneered location thus loose its symbol. */
16021 arm_stub_claim_sym (struct elf32_arm_stub_hash_entry
*stub_entry
)
16023 struct elf32_arm_link_hash_entry
*hash
= stub_entry
->h
;
16026 hash
->root
.root
.u
.def
.section
= stub_entry
->stub_sec
;
16027 hash
->root
.root
.u
.def
.value
= stub_entry
->stub_offset
;
16028 hash
->root
.size
= stub_entry
->stub_size
;
16031 /* Output a single local symbol for a generated stub. */
16034 elf32_arm_output_stub_sym (output_arch_syminfo
*osi
, const char *name
,
16035 bfd_vma offset
, bfd_vma size
)
16037 Elf_Internal_Sym sym
;
16039 sym
.st_value
= osi
->sec
->output_section
->vma
16040 + osi
->sec
->output_offset
16042 sym
.st_size
= size
;
16044 sym
.st_info
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
16045 sym
.st_shndx
= osi
->sec_shndx
;
16046 sym
.st_target_internal
= 0;
16047 return osi
->func (osi
->flaginfo
, name
, &sym
, osi
->sec
, NULL
) == 1;
16051 arm_map_one_stub (struct bfd_hash_entry
* gen_entry
,
16054 struct elf32_arm_stub_hash_entry
*stub_entry
;
16055 asection
*stub_sec
;
16058 output_arch_syminfo
*osi
;
16059 const insn_sequence
*template_sequence
;
16060 enum stub_insn_type prev_type
;
16063 enum map_symbol_type sym_type
;
16065 /* Massage our args to the form they really have. */
16066 stub_entry
= (struct elf32_arm_stub_hash_entry
*) gen_entry
;
16067 osi
= (output_arch_syminfo
*) in_arg
;
16069 stub_sec
= stub_entry
->stub_sec
;
16071 /* Ensure this stub is attached to the current section being
16073 if (stub_sec
!= osi
->sec
)
16076 addr
= (bfd_vma
) stub_entry
->stub_offset
;
16077 template_sequence
= stub_entry
->stub_template
;
16079 if (arm_stub_sym_claimed (stub_entry
->stub_type
))
16080 arm_stub_claim_sym (stub_entry
);
16083 stub_name
= stub_entry
->output_name
;
16084 switch (template_sequence
[0].type
)
16087 if (!elf32_arm_output_stub_sym (osi
, stub_name
, addr
,
16088 stub_entry
->stub_size
))
16093 if (!elf32_arm_output_stub_sym (osi
, stub_name
, addr
| 1,
16094 stub_entry
->stub_size
))
16103 prev_type
= DATA_TYPE
;
16105 for (i
= 0; i
< stub_entry
->stub_template_size
; i
++)
16107 switch (template_sequence
[i
].type
)
16110 sym_type
= ARM_MAP_ARM
;
16115 sym_type
= ARM_MAP_THUMB
;
16119 sym_type
= ARM_MAP_DATA
;
16127 if (template_sequence
[i
].type
!= prev_type
)
16129 prev_type
= template_sequence
[i
].type
;
16130 if (!elf32_arm_output_map_sym (osi
, sym_type
, addr
+ size
))
16134 switch (template_sequence
[i
].type
)
16158 /* Output mapping symbols for linker generated sections,
16159 and for those data-only sections that do not have a
16163 elf32_arm_output_arch_local_syms (bfd
*output_bfd
,
16164 struct bfd_link_info
*info
,
16166 int (*func
) (void *, const char *,
16167 Elf_Internal_Sym
*,
16169 struct elf_link_hash_entry
*))
16171 output_arch_syminfo osi
;
16172 struct elf32_arm_link_hash_table
*htab
;
16174 bfd_size_type size
;
16177 htab
= elf32_arm_hash_table (info
);
16181 check_use_blx (htab
);
16183 osi
.flaginfo
= flaginfo
;
16187 /* Add a $d mapping symbol to data-only sections that
16188 don't have any mapping symbol. This may result in (harmless) redundant
16189 mapping symbols. */
16190 for (input_bfd
= info
->input_bfds
;
16192 input_bfd
= input_bfd
->link
.next
)
16194 if ((input_bfd
->flags
& (BFD_LINKER_CREATED
| HAS_SYMS
)) == HAS_SYMS
)
16195 for (osi
.sec
= input_bfd
->sections
;
16197 osi
.sec
= osi
.sec
->next
)
16199 if (osi
.sec
->output_section
!= NULL
16200 && ((osi
.sec
->output_section
->flags
& (SEC_ALLOC
| SEC_CODE
))
16202 && (osi
.sec
->flags
& (SEC_HAS_CONTENTS
| SEC_LINKER_CREATED
))
16203 == SEC_HAS_CONTENTS
16204 && get_arm_elf_section_data (osi
.sec
) != NULL
16205 && get_arm_elf_section_data (osi
.sec
)->mapcount
== 0
16206 && osi
.sec
->size
> 0
16207 && (osi
.sec
->flags
& SEC_EXCLUDE
) == 0)
16209 osi
.sec_shndx
= _bfd_elf_section_from_bfd_section
16210 (output_bfd
, osi
.sec
->output_section
);
16211 if (osi
.sec_shndx
!= (int)SHN_BAD
)
16212 elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
, 0);
16217 /* ARM->Thumb glue. */
16218 if (htab
->arm_glue_size
> 0)
16220 osi
.sec
= bfd_get_linker_section (htab
->bfd_of_glue_owner
,
16221 ARM2THUMB_GLUE_SECTION_NAME
);
16223 osi
.sec_shndx
= _bfd_elf_section_from_bfd_section
16224 (output_bfd
, osi
.sec
->output_section
);
16225 if (bfd_link_pic (info
) || htab
->root
.is_relocatable_executable
16226 || htab
->pic_veneer
)
16227 size
= ARM2THUMB_PIC_GLUE_SIZE
;
16228 else if (htab
->use_blx
)
16229 size
= ARM2THUMB_V5_STATIC_GLUE_SIZE
;
16231 size
= ARM2THUMB_STATIC_GLUE_SIZE
;
16233 for (offset
= 0; offset
< htab
->arm_glue_size
; offset
+= size
)
16235 elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, offset
);
16236 elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
, offset
+ size
- 4);
16240 /* Thumb->ARM glue. */
16241 if (htab
->thumb_glue_size
> 0)
16243 osi
.sec
= bfd_get_linker_section (htab
->bfd_of_glue_owner
,
16244 THUMB2ARM_GLUE_SECTION_NAME
);
16246 osi
.sec_shndx
= _bfd_elf_section_from_bfd_section
16247 (output_bfd
, osi
.sec
->output_section
);
16248 size
= THUMB2ARM_GLUE_SIZE
;
16250 for (offset
= 0; offset
< htab
->thumb_glue_size
; offset
+= size
)
16252 elf32_arm_output_map_sym (&osi
, ARM_MAP_THUMB
, offset
);
16253 elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, offset
+ 4);
16257 /* ARMv4 BX veneers. */
16258 if (htab
->bx_glue_size
> 0)
16260 osi
.sec
= bfd_get_linker_section (htab
->bfd_of_glue_owner
,
16261 ARM_BX_GLUE_SECTION_NAME
);
16263 osi
.sec_shndx
= _bfd_elf_section_from_bfd_section
16264 (output_bfd
, osi
.sec
->output_section
);
16266 elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, 0);
16269 /* Long calls stubs. */
16270 if (htab
->stub_bfd
&& htab
->stub_bfd
->sections
)
16272 asection
* stub_sec
;
16274 for (stub_sec
= htab
->stub_bfd
->sections
;
16276 stub_sec
= stub_sec
->next
)
16278 /* Ignore non-stub sections. */
16279 if (!strstr (stub_sec
->name
, STUB_SUFFIX
))
16282 osi
.sec
= stub_sec
;
16284 osi
.sec_shndx
= _bfd_elf_section_from_bfd_section
16285 (output_bfd
, osi
.sec
->output_section
);
16287 bfd_hash_traverse (&htab
->stub_hash_table
, arm_map_one_stub
, &osi
);
16291 /* Finally, output mapping symbols for the PLT. */
16292 if (htab
->root
.splt
&& htab
->root
.splt
->size
> 0)
16294 osi
.sec
= htab
->root
.splt
;
16295 osi
.sec_shndx
= (_bfd_elf_section_from_bfd_section
16296 (output_bfd
, osi
.sec
->output_section
));
16298 /* Output mapping symbols for the plt header. SymbianOS does not have a
16300 if (htab
->vxworks_p
)
16302 /* VxWorks shared libraries have no PLT header. */
16303 if (!bfd_link_pic (info
))
16305 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, 0))
16307 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
, 12))
16311 else if (htab
->nacl_p
)
16313 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, 0))
16316 else if (using_thumb_only (htab
))
16318 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_THUMB
, 0))
16320 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
, 12))
16322 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_THUMB
, 16))
16325 else if (!htab
->symbian_p
)
16327 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, 0))
16329 #ifndef FOUR_WORD_PLT
16330 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
, 16))
16335 if (htab
->nacl_p
&& htab
->root
.iplt
&& htab
->root
.iplt
->size
> 0)
16337 /* NaCl uses a special first entry in .iplt too. */
16338 osi
.sec
= htab
->root
.iplt
;
16339 osi
.sec_shndx
= (_bfd_elf_section_from_bfd_section
16340 (output_bfd
, osi
.sec
->output_section
));
16341 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, 0))
16344 if ((htab
->root
.splt
&& htab
->root
.splt
->size
> 0)
16345 || (htab
->root
.iplt
&& htab
->root
.iplt
->size
> 0))
16347 elf_link_hash_traverse (&htab
->root
, elf32_arm_output_plt_map
, &osi
);
16348 for (input_bfd
= info
->input_bfds
;
16350 input_bfd
= input_bfd
->link
.next
)
16352 struct arm_local_iplt_info
**local_iplt
;
16353 unsigned int i
, num_syms
;
16355 local_iplt
= elf32_arm_local_iplt (input_bfd
);
16356 if (local_iplt
!= NULL
)
16358 num_syms
= elf_symtab_hdr (input_bfd
).sh_info
;
16359 for (i
= 0; i
< num_syms
; i
++)
16360 if (local_iplt
[i
] != NULL
16361 && !elf32_arm_output_plt_map_1 (&osi
, TRUE
,
16362 &local_iplt
[i
]->root
,
16363 &local_iplt
[i
]->arm
))
16368 if (htab
->dt_tlsdesc_plt
!= 0)
16370 /* Mapping symbols for the lazy tls trampoline. */
16371 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, htab
->dt_tlsdesc_plt
))
16374 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
,
16375 htab
->dt_tlsdesc_plt
+ 24))
16378 if (htab
->tls_trampoline
!= 0)
16380 /* Mapping symbols for the tls trampoline. */
16381 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, htab
->tls_trampoline
))
16383 #ifdef FOUR_WORD_PLT
16384 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
,
16385 htab
->tls_trampoline
+ 12))
16393 /* Allocate target specific section data. */
16396 elf32_arm_new_section_hook (bfd
*abfd
, asection
*sec
)
16398 if (!sec
->used_by_bfd
)
16400 _arm_elf_section_data
*sdata
;
16401 bfd_size_type amt
= sizeof (*sdata
);
16403 sdata
= (_arm_elf_section_data
*) bfd_zalloc (abfd
, amt
);
16406 sec
->used_by_bfd
= sdata
;
16409 return _bfd_elf_new_section_hook (abfd
, sec
);
16413 /* Used to order a list of mapping symbols by address. */
16416 elf32_arm_compare_mapping (const void * a
, const void * b
)
16418 const elf32_arm_section_map
*amap
= (const elf32_arm_section_map
*) a
;
16419 const elf32_arm_section_map
*bmap
= (const elf32_arm_section_map
*) b
;
16421 if (amap
->vma
> bmap
->vma
)
16423 else if (amap
->vma
< bmap
->vma
)
16425 else if (amap
->type
> bmap
->type
)
16426 /* Ensure results do not depend on the host qsort for objects with
16427 multiple mapping symbols at the same address by sorting on type
16430 else if (amap
->type
< bmap
->type
)
16436 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
16438 static unsigned long
16439 offset_prel31 (unsigned long addr
, bfd_vma offset
)
16441 return (addr
& ~0x7ffffffful
) | ((addr
+ offset
) & 0x7ffffffful
);
16444 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
16448 copy_exidx_entry (bfd
*output_bfd
, bfd_byte
*to
, bfd_byte
*from
, bfd_vma offset
)
16450 unsigned long first_word
= bfd_get_32 (output_bfd
, from
);
16451 unsigned long second_word
= bfd_get_32 (output_bfd
, from
+ 4);
16453 /* High bit of first word is supposed to be zero. */
16454 if ((first_word
& 0x80000000ul
) == 0)
16455 first_word
= offset_prel31 (first_word
, offset
);
16457 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
16458 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
16459 if ((second_word
!= 0x1) && ((second_word
& 0x80000000ul
) == 0))
16460 second_word
= offset_prel31 (second_word
, offset
);
16462 bfd_put_32 (output_bfd
, first_word
, to
);
16463 bfd_put_32 (output_bfd
, second_word
, to
+ 4);
16466 /* Data for make_branch_to_a8_stub(). */
16468 struct a8_branch_to_stub_data
16470 asection
*writing_section
;
16471 bfd_byte
*contents
;
16475 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
16476 places for a particular section. */
16479 make_branch_to_a8_stub (struct bfd_hash_entry
*gen_entry
,
16482 struct elf32_arm_stub_hash_entry
*stub_entry
;
16483 struct a8_branch_to_stub_data
*data
;
16484 bfd_byte
*contents
;
16485 unsigned long branch_insn
;
16486 bfd_vma veneered_insn_loc
, veneer_entry_loc
;
16487 bfd_signed_vma branch_offset
;
16491 stub_entry
= (struct elf32_arm_stub_hash_entry
*) gen_entry
;
16492 data
= (struct a8_branch_to_stub_data
*) in_arg
;
16494 if (stub_entry
->target_section
!= data
->writing_section
16495 || stub_entry
->stub_type
< arm_stub_a8_veneer_lwm
)
16498 contents
= data
->contents
;
16500 /* We use target_section as Cortex-A8 erratum workaround stubs are only
16501 generated when both source and target are in the same section. */
16502 veneered_insn_loc
= stub_entry
->target_section
->output_section
->vma
16503 + stub_entry
->target_section
->output_offset
16504 + stub_entry
->source_value
;
16506 veneer_entry_loc
= stub_entry
->stub_sec
->output_section
->vma
16507 + stub_entry
->stub_sec
->output_offset
16508 + stub_entry
->stub_offset
;
16510 if (stub_entry
->stub_type
== arm_stub_a8_veneer_blx
)
16511 veneered_insn_loc
&= ~3u;
16513 branch_offset
= veneer_entry_loc
- veneered_insn_loc
- 4;
16515 abfd
= stub_entry
->target_section
->owner
;
16516 loc
= stub_entry
->source_value
;
16518 /* We attempt to avoid this condition by setting stubs_always_after_branch
16519 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
16520 This check is just to be on the safe side... */
16521 if ((veneered_insn_loc
& ~0xfff) == (veneer_entry_loc
& ~0xfff))
16523 (*_bfd_error_handler
) (_("%B: error: Cortex-A8 erratum stub is "
16524 "allocated in unsafe location"), abfd
);
16528 switch (stub_entry
->stub_type
)
16530 case arm_stub_a8_veneer_b
:
16531 case arm_stub_a8_veneer_b_cond
:
16532 branch_insn
= 0xf0009000;
16535 case arm_stub_a8_veneer_blx
:
16536 branch_insn
= 0xf000e800;
16539 case arm_stub_a8_veneer_bl
:
16541 unsigned int i1
, j1
, i2
, j2
, s
;
16543 branch_insn
= 0xf000d000;
16546 if (branch_offset
< -16777216 || branch_offset
> 16777214)
16548 /* There's not much we can do apart from complain if this
16550 (*_bfd_error_handler
) (_("%B: error: Cortex-A8 erratum stub out "
16551 "of range (input file too large)"), abfd
);
16555 /* i1 = not(j1 eor s), so:
16557 j1 = (not i1) eor s. */
16559 branch_insn
|= (branch_offset
>> 1) & 0x7ff;
16560 branch_insn
|= ((branch_offset
>> 12) & 0x3ff) << 16;
16561 i2
= (branch_offset
>> 22) & 1;
16562 i1
= (branch_offset
>> 23) & 1;
16563 s
= (branch_offset
>> 24) & 1;
16566 branch_insn
|= j2
<< 11;
16567 branch_insn
|= j1
<< 13;
16568 branch_insn
|= s
<< 26;
16577 bfd_put_16 (abfd
, (branch_insn
>> 16) & 0xffff, &contents
[loc
]);
16578 bfd_put_16 (abfd
, branch_insn
& 0xffff, &contents
[loc
+ 2]);
16583 /* Beginning of stm32l4xx work-around. */
16585 /* Functions encoding instructions necessary for the emission of the
16586 fix-stm32l4xx-629360.
16587 Encoding is extracted from the
16588 ARM (C) Architecture Reference Manual
16589 ARMv7-A and ARMv7-R edition
16590 ARM DDI 0406C.b (ID072512). */
16592 static inline bfd_vma
16593 create_instruction_branch_absolute (int branch_offset
)
16595 /* A8.8.18 B (A8-334)
16596 B target_address (Encoding T4). */
16597 /* 1111 - 0Sii - iiii - iiii - 10J1 - Jiii - iiii - iiii. */
16598 /* jump offset is: S:I1:I2:imm10:imm11:0. */
16599 /* with : I1 = NOT (J1 EOR S) I2 = NOT (J2 EOR S). */
16601 int s
= ((branch_offset
& 0x1000000) >> 24);
16602 int j1
= s
^ !((branch_offset
& 0x800000) >> 23);
16603 int j2
= s
^ !((branch_offset
& 0x400000) >> 22);
16605 if (branch_offset
< -(1 << 24) || branch_offset
>= (1 << 24))
16606 BFD_ASSERT (0 && "Error: branch out of range. Cannot create branch.");
16608 bfd_vma patched_inst
= 0xf0009000
16610 | (((unsigned long) (branch_offset
) >> 12) & 0x3ff) << 16 /* imm10. */
16611 | j1
<< 13 /* J1. */
16612 | j2
<< 11 /* J2. */
16613 | (((unsigned long) (branch_offset
) >> 1) & 0x7ff); /* imm11. */
16615 return patched_inst
;
16618 static inline bfd_vma
16619 create_instruction_ldmia (int base_reg
, int wback
, int reg_mask
)
16621 /* A8.8.57 LDM/LDMIA/LDMFD (A8-396)
16622 LDMIA Rn!, {Ra, Rb, Rc, ...} (Encoding T2). */
16623 bfd_vma patched_inst
= 0xe8900000
16624 | (/*W=*/wback
<< 21)
16626 | (reg_mask
& 0x0000ffff);
16628 return patched_inst
;
16631 static inline bfd_vma
16632 create_instruction_ldmdb (int base_reg
, int wback
, int reg_mask
)
16634 /* A8.8.60 LDMDB/LDMEA (A8-402)
16635 LDMDB Rn!, {Ra, Rb, Rc, ...} (Encoding T1). */
16636 bfd_vma patched_inst
= 0xe9100000
16637 | (/*W=*/wback
<< 21)
16639 | (reg_mask
& 0x0000ffff);
16641 return patched_inst
;
16644 static inline bfd_vma
16645 create_instruction_mov (int target_reg
, int source_reg
)
16647 /* A8.8.103 MOV (register) (A8-486)
16648 MOV Rd, Rm (Encoding T1). */
16649 bfd_vma patched_inst
= 0x4600
16650 | (target_reg
& 0x7)
16651 | ((target_reg
& 0x8) >> 3) << 7
16652 | (source_reg
<< 3);
16654 return patched_inst
;
16657 static inline bfd_vma
16658 create_instruction_sub (int target_reg
, int source_reg
, int value
)
16660 /* A8.8.221 SUB (immediate) (A8-708)
16661 SUB Rd, Rn, #value (Encoding T3). */
16662 bfd_vma patched_inst
= 0xf1a00000
16663 | (target_reg
<< 8)
16664 | (source_reg
<< 16)
16666 | ((value
& 0x800) >> 11) << 26
16667 | ((value
& 0x700) >> 8) << 12
16670 return patched_inst
;
16673 static inline bfd_vma
16674 create_instruction_vldmia (int base_reg
, int is_dp
, int wback
, int num_words
,
16677 /* A8.8.332 VLDM (A8-922)
16678 VLMD{MODE} Rn{!}, {list} (Encoding T1 or T2). */
16679 bfd_vma patched_inst
= (is_dp
? 0xec900b00 : 0xec900a00)
16680 | (/*W=*/wback
<< 21)
16682 | (num_words
& 0x000000ff)
16683 | (((unsigned)first_reg
>> 1) & 0x0000000f) << 12
16684 | (first_reg
& 0x00000001) << 22;
16686 return patched_inst
;
16689 static inline bfd_vma
16690 create_instruction_vldmdb (int base_reg
, int is_dp
, int num_words
,
16693 /* A8.8.332 VLDM (A8-922)
16694 VLMD{MODE} Rn!, {} (Encoding T1 or T2). */
16695 bfd_vma patched_inst
= (is_dp
? 0xed300b00 : 0xed300a00)
16697 | (num_words
& 0x000000ff)
16698 | (((unsigned)first_reg
>>1 ) & 0x0000000f) << 12
16699 | (first_reg
& 0x00000001) << 22;
16701 return patched_inst
;
16704 static inline bfd_vma
16705 create_instruction_udf_w (int value
)
16707 /* A8.8.247 UDF (A8-758)
16708 Undefined (Encoding T2). */
16709 bfd_vma patched_inst
= 0xf7f0a000
16710 | (value
& 0x00000fff)
16711 | (value
& 0x000f0000) << 16;
16713 return patched_inst
;
16716 static inline bfd_vma
16717 create_instruction_udf (int value
)
16719 /* A8.8.247 UDF (A8-758)
16720 Undefined (Encoding T1). */
16721 bfd_vma patched_inst
= 0xde00
16724 return patched_inst
;
16727 /* Functions writing an instruction in memory, returning the next
16728 memory position to write to. */
16730 static inline bfd_byte
*
16731 push_thumb2_insn32 (struct elf32_arm_link_hash_table
* htab
,
16732 bfd
* output_bfd
, bfd_byte
*pt
, insn32 insn
)
16734 put_thumb2_insn (htab
, output_bfd
, insn
, pt
);
16738 static inline bfd_byte
*
16739 push_thumb2_insn16 (struct elf32_arm_link_hash_table
* htab
,
16740 bfd
* output_bfd
, bfd_byte
*pt
, insn32 insn
)
16742 put_thumb_insn (htab
, output_bfd
, insn
, pt
);
16746 /* Function filling up a region in memory with T1 and T2 UDFs taking
16747 care of alignment. */
16750 stm32l4xx_fill_stub_udf (struct elf32_arm_link_hash_table
* htab
,
16752 const bfd_byte
* const base_stub_contents
,
16753 bfd_byte
* const from_stub_contents
,
16754 const bfd_byte
* const end_stub_contents
)
16756 bfd_byte
*current_stub_contents
= from_stub_contents
;
16758 /* Fill the remaining of the stub with deterministic contents : UDF
16760 Check if realignment is needed on modulo 4 frontier using T1, to
16762 if ((current_stub_contents
< end_stub_contents
)
16763 && !((current_stub_contents
- base_stub_contents
) % 2)
16764 && ((current_stub_contents
- base_stub_contents
) % 4))
16765 current_stub_contents
=
16766 push_thumb2_insn16 (htab
, output_bfd
, current_stub_contents
,
16767 create_instruction_udf (0));
16769 for (; current_stub_contents
< end_stub_contents
;)
16770 current_stub_contents
=
16771 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16772 create_instruction_udf_w (0));
16774 return current_stub_contents
;
16777 /* Functions writing the stream of instructions equivalent to the
16778 derived sequence for ldmia, ldmdb, vldm respectively. */
16781 stm32l4xx_create_replacing_stub_ldmia (struct elf32_arm_link_hash_table
* htab
,
16783 const insn32 initial_insn
,
16784 const bfd_byte
*const initial_insn_addr
,
16785 bfd_byte
*const base_stub_contents
)
16787 int wback
= (initial_insn
& 0x00200000) >> 21;
16788 int ri
, rn
= (initial_insn
& 0x000F0000) >> 16;
16789 int insn_all_registers
= initial_insn
& 0x0000ffff;
16790 int insn_low_registers
, insn_high_registers
;
16791 int usable_register_mask
;
16792 int nb_registers
= popcount (insn_all_registers
);
16793 int restore_pc
= (insn_all_registers
& (1 << 15)) ? 1 : 0;
16794 int restore_rn
= (insn_all_registers
& (1 << rn
)) ? 1 : 0;
16795 bfd_byte
*current_stub_contents
= base_stub_contents
;
16797 BFD_ASSERT (is_thumb2_ldmia (initial_insn
));
16799 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
16800 smaller than 8 registers load sequences that do not cause the
16802 if (nb_registers
<= 8)
16804 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
16805 current_stub_contents
=
16806 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16809 /* B initial_insn_addr+4. */
16811 current_stub_contents
=
16812 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16813 create_instruction_branch_absolute
16814 (initial_insn_addr
- current_stub_contents
));
16817 /* Fill the remaining of the stub with deterministic contents. */
16818 current_stub_contents
=
16819 stm32l4xx_fill_stub_udf (htab
, output_bfd
,
16820 base_stub_contents
, current_stub_contents
,
16821 base_stub_contents
+
16822 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
);
16827 /* - reg_list[13] == 0. */
16828 BFD_ASSERT ((insn_all_registers
& (1 << 13))==0);
16830 /* - reg_list[14] & reg_list[15] != 1. */
16831 BFD_ASSERT ((insn_all_registers
& 0xC000) != 0xC000);
16833 /* - if (wback==1) reg_list[rn] == 0. */
16834 BFD_ASSERT (!wback
|| !restore_rn
);
16836 /* - nb_registers > 8. */
16837 BFD_ASSERT (popcount (insn_all_registers
) > 8);
16839 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
16841 /* In the following algorithm, we split this wide LDM using 2 LDM insns:
16842 - One with the 7 lowest registers (register mask 0x007F)
16843 This LDM will finally contain between 2 and 7 registers
16844 - One with the 7 highest registers (register mask 0xDF80)
16845 This ldm will finally contain between 2 and 7 registers. */
16846 insn_low_registers
= insn_all_registers
& 0x007F;
16847 insn_high_registers
= insn_all_registers
& 0xDF80;
16849 /* A spare register may be needed during this veneer to temporarily
16850 handle the base register. This register will be restored with the
16851 last LDM operation.
16852 The usable register may be any general purpose register (that
16853 excludes PC, SP, LR : register mask is 0x1FFF). */
16854 usable_register_mask
= 0x1FFF;
16856 /* Generate the stub function. */
16859 /* LDMIA Rn!, {R-low-register-list} : (Encoding T2). */
16860 current_stub_contents
=
16861 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16862 create_instruction_ldmia
16863 (rn
, /*wback=*/1, insn_low_registers
));
16865 /* LDMIA Rn!, {R-high-register-list} : (Encoding T2). */
16866 current_stub_contents
=
16867 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16868 create_instruction_ldmia
16869 (rn
, /*wback=*/1, insn_high_registers
));
16872 /* B initial_insn_addr+4. */
16873 current_stub_contents
=
16874 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16875 create_instruction_branch_absolute
16876 (initial_insn_addr
- current_stub_contents
));
16879 else /* if (!wback). */
16883 /* If Rn is not part of the high-register-list, move it there. */
16884 if (!(insn_high_registers
& (1 << rn
)))
16886 /* Choose a Ri in the high-register-list that will be restored. */
16887 ri
= ctz (insn_high_registers
& usable_register_mask
& ~(1 << rn
));
16890 current_stub_contents
=
16891 push_thumb2_insn16 (htab
, output_bfd
, current_stub_contents
,
16892 create_instruction_mov (ri
, rn
));
16895 /* LDMIA Ri!, {R-low-register-list} : (Encoding T2). */
16896 current_stub_contents
=
16897 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16898 create_instruction_ldmia
16899 (ri
, /*wback=*/1, insn_low_registers
));
16901 /* LDMIA Ri, {R-high-register-list} : (Encoding T2). */
16902 current_stub_contents
=
16903 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16904 create_instruction_ldmia
16905 (ri
, /*wback=*/0, insn_high_registers
));
16909 /* B initial_insn_addr+4. */
16910 current_stub_contents
=
16911 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16912 create_instruction_branch_absolute
16913 (initial_insn_addr
- current_stub_contents
));
16917 /* Fill the remaining of the stub with deterministic contents. */
16918 current_stub_contents
=
16919 stm32l4xx_fill_stub_udf (htab
, output_bfd
,
16920 base_stub_contents
, current_stub_contents
,
16921 base_stub_contents
+
16922 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
);
16926 stm32l4xx_create_replacing_stub_ldmdb (struct elf32_arm_link_hash_table
* htab
,
16928 const insn32 initial_insn
,
16929 const bfd_byte
*const initial_insn_addr
,
16930 bfd_byte
*const base_stub_contents
)
16932 int wback
= (initial_insn
& 0x00200000) >> 21;
16933 int ri
, rn
= (initial_insn
& 0x000f0000) >> 16;
16934 int insn_all_registers
= initial_insn
& 0x0000ffff;
16935 int insn_low_registers
, insn_high_registers
;
16936 int usable_register_mask
;
16937 int restore_pc
= (insn_all_registers
& (1 << 15)) ? 1 : 0;
16938 int restore_rn
= (insn_all_registers
& (1 << rn
)) ? 1 : 0;
16939 int nb_registers
= popcount (insn_all_registers
);
16940 bfd_byte
*current_stub_contents
= base_stub_contents
;
16942 BFD_ASSERT (is_thumb2_ldmdb (initial_insn
));
16944 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
16945 smaller than 8 registers load sequences that do not cause the
16947 if (nb_registers
<= 8)
16949 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
16950 current_stub_contents
=
16951 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16954 /* B initial_insn_addr+4. */
16955 current_stub_contents
=
16956 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16957 create_instruction_branch_absolute
16958 (initial_insn_addr
- current_stub_contents
));
16960 /* Fill the remaining of the stub with deterministic contents. */
16961 current_stub_contents
=
16962 stm32l4xx_fill_stub_udf (htab
, output_bfd
,
16963 base_stub_contents
, current_stub_contents
,
16964 base_stub_contents
+
16965 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
);
16970 /* - reg_list[13] == 0. */
16971 BFD_ASSERT ((insn_all_registers
& (1 << 13)) == 0);
16973 /* - reg_list[14] & reg_list[15] != 1. */
16974 BFD_ASSERT ((insn_all_registers
& 0xC000) != 0xC000);
16976 /* - if (wback==1) reg_list[rn] == 0. */
16977 BFD_ASSERT (!wback
|| !restore_rn
);
16979 /* - nb_registers > 8. */
16980 BFD_ASSERT (popcount (insn_all_registers
) > 8);
16982 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
16984 /* In the following algorithm, we split this wide LDM using 2 LDM insn:
16985 - One with the 7 lowest registers (register mask 0x007F)
16986 This LDM will finally contain between 2 and 7 registers
16987 - One with the 7 highest registers (register mask 0xDF80)
16988 This ldm will finally contain between 2 and 7 registers. */
16989 insn_low_registers
= insn_all_registers
& 0x007F;
16990 insn_high_registers
= insn_all_registers
& 0xDF80;
16992 /* A spare register may be needed during this veneer to temporarily
16993 handle the base register. This register will be restored with
16994 the last LDM operation.
16995 The usable register may be any general purpose register (that excludes
16996 PC, SP, LR : register mask is 0x1FFF). */
16997 usable_register_mask
= 0x1FFF;
16999 /* Generate the stub function. */
17000 if (!wback
&& !restore_pc
&& !restore_rn
)
17002 /* Choose a Ri in the low-register-list that will be restored. */
17003 ri
= ctz (insn_low_registers
& usable_register_mask
& ~(1 << rn
));
17006 current_stub_contents
=
17007 push_thumb2_insn16 (htab
, output_bfd
, current_stub_contents
,
17008 create_instruction_mov (ri
, rn
));
17010 /* LDMDB Ri!, {R-high-register-list}. */
17011 current_stub_contents
=
17012 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
17013 create_instruction_ldmdb
17014 (ri
, /*wback=*/1, insn_high_registers
));
17016 /* LDMDB Ri, {R-low-register-list}. */
17017 current_stub_contents
=
17018 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
17019 create_instruction_ldmdb
17020 (ri
, /*wback=*/0, insn_low_registers
));
17022 /* B initial_insn_addr+4. */
17023 current_stub_contents
=
17024 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
17025 create_instruction_branch_absolute
17026 (initial_insn_addr
- current_stub_contents
));
17028 else if (wback
&& !restore_pc
&& !restore_rn
)
17030 /* LDMDB Rn!, {R-high-register-list}. */
17031 current_stub_contents
=
17032 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
17033 create_instruction_ldmdb
17034 (rn
, /*wback=*/1, insn_high_registers
));
17036 /* LDMDB Rn!, {R-low-register-list}. */
17037 current_stub_contents
=
17038 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
17039 create_instruction_ldmdb
17040 (rn
, /*wback=*/1, insn_low_registers
));
17042 /* B initial_insn_addr+4. */
17043 current_stub_contents
=
17044 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
17045 create_instruction_branch_absolute
17046 (initial_insn_addr
- current_stub_contents
));
17048 else if (!wback
&& restore_pc
&& !restore_rn
)
17050 /* Choose a Ri in the high-register-list that will be restored. */
17051 ri
= ctz (insn_high_registers
& usable_register_mask
& ~(1 << rn
));
17053 /* SUB Ri, Rn, #(4*nb_registers). */
17054 current_stub_contents
=
17055 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
17056 create_instruction_sub (ri
, rn
, (4 * nb_registers
)));
17058 /* LDMIA Ri!, {R-low-register-list}. */
17059 current_stub_contents
=
17060 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
17061 create_instruction_ldmia
17062 (ri
, /*wback=*/1, insn_low_registers
));
17064 /* LDMIA Ri, {R-high-register-list}. */
17065 current_stub_contents
=
17066 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
17067 create_instruction_ldmia
17068 (ri
, /*wback=*/0, insn_high_registers
));
17070 else if (wback
&& restore_pc
&& !restore_rn
)
17072 /* Choose a Ri in the high-register-list that will be restored. */
17073 ri
= ctz (insn_high_registers
& usable_register_mask
& ~(1 << rn
));
17075 /* SUB Rn, Rn, #(4*nb_registers) */
17076 current_stub_contents
=
17077 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
17078 create_instruction_sub (rn
, rn
, (4 * nb_registers
)));
17081 current_stub_contents
=
17082 push_thumb2_insn16 (htab
, output_bfd
, current_stub_contents
,
17083 create_instruction_mov (ri
, rn
));
17085 /* LDMIA Ri!, {R-low-register-list}. */
17086 current_stub_contents
=
17087 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
17088 create_instruction_ldmia
17089 (ri
, /*wback=*/1, insn_low_registers
));
17091 /* LDMIA Ri, {R-high-register-list}. */
17092 current_stub_contents
=
17093 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
17094 create_instruction_ldmia
17095 (ri
, /*wback=*/0, insn_high_registers
));
17097 else if (!wback
&& !restore_pc
&& restore_rn
)
17100 if (!(insn_low_registers
& (1 << rn
)))
17102 /* Choose a Ri in the low-register-list that will be restored. */
17103 ri
= ctz (insn_low_registers
& usable_register_mask
& ~(1 << rn
));
17106 current_stub_contents
=
17107 push_thumb2_insn16 (htab
, output_bfd
, current_stub_contents
,
17108 create_instruction_mov (ri
, rn
));
17111 /* LDMDB Ri!, {R-high-register-list}. */
17112 current_stub_contents
=
17113 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
17114 create_instruction_ldmdb
17115 (ri
, /*wback=*/1, insn_high_registers
));
17117 /* LDMDB Ri, {R-low-register-list}. */
17118 current_stub_contents
=
17119 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
17120 create_instruction_ldmdb
17121 (ri
, /*wback=*/0, insn_low_registers
));
17123 /* B initial_insn_addr+4. */
17124 current_stub_contents
=
17125 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
17126 create_instruction_branch_absolute
17127 (initial_insn_addr
- current_stub_contents
));
17129 else if (!wback
&& restore_pc
&& restore_rn
)
17132 if (!(insn_high_registers
& (1 << rn
)))
17134 /* Choose a Ri in the high-register-list that will be restored. */
17135 ri
= ctz (insn_high_registers
& usable_register_mask
& ~(1 << rn
));
17138 /* SUB Ri, Rn, #(4*nb_registers). */
17139 current_stub_contents
=
17140 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
17141 create_instruction_sub (ri
, rn
, (4 * nb_registers
)));
17143 /* LDMIA Ri!, {R-low-register-list}. */
17144 current_stub_contents
=
17145 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
17146 create_instruction_ldmia
17147 (ri
, /*wback=*/1, insn_low_registers
));
17149 /* LDMIA Ri, {R-high-register-list}. */
17150 current_stub_contents
=
17151 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
17152 create_instruction_ldmia
17153 (ri
, /*wback=*/0, insn_high_registers
));
17155 else if (wback
&& restore_rn
)
17157 /* The assembler should not have accepted to encode this. */
17158 BFD_ASSERT (0 && "Cannot patch an instruction that has an "
17159 "undefined behavior.\n");
17162 /* Fill the remaining of the stub with deterministic contents. */
17163 current_stub_contents
=
17164 stm32l4xx_fill_stub_udf (htab
, output_bfd
,
17165 base_stub_contents
, current_stub_contents
,
17166 base_stub_contents
+
17167 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
);
17172 stm32l4xx_create_replacing_stub_vldm (struct elf32_arm_link_hash_table
* htab
,
17174 const insn32 initial_insn
,
17175 const bfd_byte
*const initial_insn_addr
,
17176 bfd_byte
*const base_stub_contents
)
17178 int num_words
= ((unsigned int) initial_insn
<< 24) >> 24;
17179 bfd_byte
*current_stub_contents
= base_stub_contents
;
17181 BFD_ASSERT (is_thumb2_vldm (initial_insn
));
17183 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
17184 smaller than 8 words load sequences that do not cause the
17186 if (num_words
<= 8)
17188 /* Untouched instruction. */
17189 current_stub_contents
=
17190 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
17193 /* B initial_insn_addr+4. */
17194 current_stub_contents
=
17195 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
17196 create_instruction_branch_absolute
17197 (initial_insn_addr
- current_stub_contents
));
17201 bfd_boolean is_dp
= /* DP encoding. */
17202 (initial_insn
& 0xfe100f00) == 0xec100b00;
17203 bfd_boolean is_ia_nobang
= /* (IA without !). */
17204 (((initial_insn
<< 7) >> 28) & 0xd) == 0x4;
17205 bfd_boolean is_ia_bang
= /* (IA with !) - includes VPOP. */
17206 (((initial_insn
<< 7) >> 28) & 0xd) == 0x5;
17207 bfd_boolean is_db_bang
= /* (DB with !). */
17208 (((initial_insn
<< 7) >> 28) & 0xd) == 0x9;
17209 int base_reg
= ((unsigned int) initial_insn
<< 12) >> 28;
17210 /* d = UInt (Vd:D);. */
17211 int first_reg
= ((((unsigned int) initial_insn
<< 16) >> 28) << 1)
17212 | (((unsigned int)initial_insn
<< 9) >> 31);
17214 /* Compute the number of 8-words chunks needed to split. */
17215 int chunks
= (num_words
% 8) ? (num_words
/ 8 + 1) : (num_words
/ 8);
17218 /* The test coverage has been done assuming the following
17219 hypothesis that exactly one of the previous is_ predicates is
17221 BFD_ASSERT ( (is_ia_nobang
^ is_ia_bang
^ is_db_bang
)
17222 && !(is_ia_nobang
& is_ia_bang
& is_db_bang
));
17224 /* We treat the cutting of the words in one pass for all
17225 cases, then we emit the adjustments:
17228 -> vldm rx!, {8_words_or_less} for each needed 8_word
17229 -> sub rx, rx, #size (list)
17232 -> vldm rx!, {8_words_or_less} for each needed 8_word
17233 This also handles vpop instruction (when rx is sp)
17236 -> vldmb rx!, {8_words_or_less} for each needed 8_word. */
17237 for (chunk
= 0; chunk
< chunks
; ++chunk
)
17239 bfd_vma new_insn
= 0;
17241 if (is_ia_nobang
|| is_ia_bang
)
17243 new_insn
= create_instruction_vldmia
17247 chunks
- (chunk
+ 1) ?
17248 8 : num_words
- chunk
* 8,
17249 first_reg
+ chunk
* 8);
17251 else if (is_db_bang
)
17253 new_insn
= create_instruction_vldmdb
17256 chunks
- (chunk
+ 1) ?
17257 8 : num_words
- chunk
* 8,
17258 first_reg
+ chunk
* 8);
17262 current_stub_contents
=
17263 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
17267 /* Only this case requires the base register compensation
17271 current_stub_contents
=
17272 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
17273 create_instruction_sub
17274 (base_reg
, base_reg
, 4*num_words
));
17277 /* B initial_insn_addr+4. */
17278 current_stub_contents
=
17279 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
17280 create_instruction_branch_absolute
17281 (initial_insn_addr
- current_stub_contents
));
17284 /* Fill the remaining of the stub with deterministic contents. */
17285 current_stub_contents
=
17286 stm32l4xx_fill_stub_udf (htab
, output_bfd
,
17287 base_stub_contents
, current_stub_contents
,
17288 base_stub_contents
+
17289 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE
);
17293 stm32l4xx_create_replacing_stub (struct elf32_arm_link_hash_table
* htab
,
17295 const insn32 wrong_insn
,
17296 const bfd_byte
*const wrong_insn_addr
,
17297 bfd_byte
*const stub_contents
)
17299 if (is_thumb2_ldmia (wrong_insn
))
17300 stm32l4xx_create_replacing_stub_ldmia (htab
, output_bfd
,
17301 wrong_insn
, wrong_insn_addr
,
17303 else if (is_thumb2_ldmdb (wrong_insn
))
17304 stm32l4xx_create_replacing_stub_ldmdb (htab
, output_bfd
,
17305 wrong_insn
, wrong_insn_addr
,
17307 else if (is_thumb2_vldm (wrong_insn
))
17308 stm32l4xx_create_replacing_stub_vldm (htab
, output_bfd
,
17309 wrong_insn
, wrong_insn_addr
,
17313 /* End of stm32l4xx work-around. */
17317 elf32_arm_add_relocation (bfd
*output_bfd
, struct bfd_link_info
*info
,
17318 asection
*output_sec
, Elf_Internal_Rela
*rel
)
17320 BFD_ASSERT (output_sec
&& rel
);
17321 struct bfd_elf_section_reloc_data
*output_reldata
;
17322 struct elf32_arm_link_hash_table
*htab
;
17323 struct bfd_elf_section_data
*oesd
= elf_section_data (output_sec
);
17324 Elf_Internal_Shdr
*rel_hdr
;
17329 rel_hdr
= oesd
->rel
.hdr
;
17330 output_reldata
= &(oesd
->rel
);
17332 else if (oesd
->rela
.hdr
)
17334 rel_hdr
= oesd
->rela
.hdr
;
17335 output_reldata
= &(oesd
->rela
);
17342 bfd_byte
*erel
= rel_hdr
->contents
;
17343 erel
+= output_reldata
->count
* rel_hdr
->sh_entsize
;
17344 htab
= elf32_arm_hash_table (info
);
17345 SWAP_RELOC_OUT (htab
) (output_bfd
, rel
, erel
);
17346 output_reldata
->count
++;
17349 /* Do code byteswapping. Return FALSE afterwards so that the section is
17350 written out as normal. */
17353 elf32_arm_write_section (bfd
*output_bfd
,
17354 struct bfd_link_info
*link_info
,
17356 bfd_byte
*contents
)
17358 unsigned int mapcount
, errcount
;
17359 _arm_elf_section_data
*arm_data
;
17360 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (link_info
);
17361 elf32_arm_section_map
*map
;
17362 elf32_vfp11_erratum_list
*errnode
;
17363 elf32_stm32l4xx_erratum_list
*stm32l4xx_errnode
;
17366 bfd_vma offset
= sec
->output_section
->vma
+ sec
->output_offset
;
17370 if (globals
== NULL
)
17373 /* If this section has not been allocated an _arm_elf_section_data
17374 structure then we cannot record anything. */
17375 arm_data
= get_arm_elf_section_data (sec
);
17376 if (arm_data
== NULL
)
17379 mapcount
= arm_data
->mapcount
;
17380 map
= arm_data
->map
;
17381 errcount
= arm_data
->erratumcount
;
17385 unsigned int endianflip
= bfd_big_endian (output_bfd
) ? 3 : 0;
17387 for (errnode
= arm_data
->erratumlist
; errnode
!= 0;
17388 errnode
= errnode
->next
)
17390 bfd_vma target
= errnode
->vma
- offset
;
17392 switch (errnode
->type
)
17394 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER
:
17396 bfd_vma branch_to_veneer
;
17397 /* Original condition code of instruction, plus bit mask for
17398 ARM B instruction. */
17399 unsigned int insn
= (errnode
->u
.b
.vfp_insn
& 0xf0000000)
17402 /* The instruction is before the label. */
17405 /* Above offset included in -4 below. */
17406 branch_to_veneer
= errnode
->u
.b
.veneer
->vma
17407 - errnode
->vma
- 4;
17409 if ((signed) branch_to_veneer
< -(1 << 25)
17410 || (signed) branch_to_veneer
>= (1 << 25))
17411 (*_bfd_error_handler
) (_("%B: error: VFP11 veneer out of "
17412 "range"), output_bfd
);
17414 insn
|= (branch_to_veneer
>> 2) & 0xffffff;
17415 contents
[endianflip
^ target
] = insn
& 0xff;
17416 contents
[endianflip
^ (target
+ 1)] = (insn
>> 8) & 0xff;
17417 contents
[endianflip
^ (target
+ 2)] = (insn
>> 16) & 0xff;
17418 contents
[endianflip
^ (target
+ 3)] = (insn
>> 24) & 0xff;
17422 case VFP11_ERRATUM_ARM_VENEER
:
17424 bfd_vma branch_from_veneer
;
17427 /* Take size of veneer into account. */
17428 branch_from_veneer
= errnode
->u
.v
.branch
->vma
17429 - errnode
->vma
- 12;
17431 if ((signed) branch_from_veneer
< -(1 << 25)
17432 || (signed) branch_from_veneer
>= (1 << 25))
17433 (*_bfd_error_handler
) (_("%B: error: VFP11 veneer out of "
17434 "range"), output_bfd
);
17436 /* Original instruction. */
17437 insn
= errnode
->u
.v
.branch
->u
.b
.vfp_insn
;
17438 contents
[endianflip
^ target
] = insn
& 0xff;
17439 contents
[endianflip
^ (target
+ 1)] = (insn
>> 8) & 0xff;
17440 contents
[endianflip
^ (target
+ 2)] = (insn
>> 16) & 0xff;
17441 contents
[endianflip
^ (target
+ 3)] = (insn
>> 24) & 0xff;
17443 /* Branch back to insn after original insn. */
17444 insn
= 0xea000000 | ((branch_from_veneer
>> 2) & 0xffffff);
17445 contents
[endianflip
^ (target
+ 4)] = insn
& 0xff;
17446 contents
[endianflip
^ (target
+ 5)] = (insn
>> 8) & 0xff;
17447 contents
[endianflip
^ (target
+ 6)] = (insn
>> 16) & 0xff;
17448 contents
[endianflip
^ (target
+ 7)] = (insn
>> 24) & 0xff;
17458 if (arm_data
->stm32l4xx_erratumcount
!= 0)
17460 for (stm32l4xx_errnode
= arm_data
->stm32l4xx_erratumlist
;
17461 stm32l4xx_errnode
!= 0;
17462 stm32l4xx_errnode
= stm32l4xx_errnode
->next
)
17464 bfd_vma target
= stm32l4xx_errnode
->vma
- offset
;
17466 switch (stm32l4xx_errnode
->type
)
17468 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER
:
17471 bfd_vma branch_to_veneer
=
17472 stm32l4xx_errnode
->u
.b
.veneer
->vma
- stm32l4xx_errnode
->vma
;
17474 if ((signed) branch_to_veneer
< -(1 << 24)
17475 || (signed) branch_to_veneer
>= (1 << 24))
17477 bfd_vma out_of_range
=
17478 ((signed) branch_to_veneer
< -(1 << 24)) ?
17479 - branch_to_veneer
- (1 << 24) :
17480 ((signed) branch_to_veneer
>= (1 << 24)) ?
17481 branch_to_veneer
- (1 << 24) : 0;
17483 (*_bfd_error_handler
)
17484 (_("%B(%#x): error: Cannot create STM32L4XX veneer. "
17485 "Jump out of range by %ld bytes. "
17486 "Cannot encode branch instruction. "),
17488 (long) (stm32l4xx_errnode
->vma
- 4),
17493 insn
= create_instruction_branch_absolute
17494 (stm32l4xx_errnode
->u
.b
.veneer
->vma
- stm32l4xx_errnode
->vma
);
17496 /* The instruction is before the label. */
17499 put_thumb2_insn (globals
, output_bfd
,
17500 (bfd_vma
) insn
, contents
+ target
);
17504 case STM32L4XX_ERRATUM_VENEER
:
17507 bfd_byte
* veneer_r
;
17510 veneer
= contents
+ target
;
17512 + stm32l4xx_errnode
->u
.b
.veneer
->vma
17513 - stm32l4xx_errnode
->vma
- 4;
17515 if ((signed) (veneer_r
- veneer
-
17516 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE
>
17517 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
?
17518 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE
:
17519 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
) < -(1 << 24)
17520 || (signed) (veneer_r
- veneer
) >= (1 << 24))
17522 (*_bfd_error_handler
) (_("%B: error: Cannot create STM32L4XX "
17523 "veneer."), output_bfd
);
17527 /* Original instruction. */
17528 insn
= stm32l4xx_errnode
->u
.v
.branch
->u
.b
.insn
;
17530 stm32l4xx_create_replacing_stub
17531 (globals
, output_bfd
, insn
, (void*)veneer_r
, (void*)veneer
);
17541 if (arm_data
->elf
.this_hdr
.sh_type
== SHT_ARM_EXIDX
)
17543 arm_unwind_table_edit
*edit_node
17544 = arm_data
->u
.exidx
.unwind_edit_list
;
17545 /* Now, sec->size is the size of the section we will write. The original
17546 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
17547 markers) was sec->rawsize. (This isn't the case if we perform no
17548 edits, then rawsize will be zero and we should use size). */
17549 bfd_byte
*edited_contents
= (bfd_byte
*) bfd_malloc (sec
->size
);
17550 unsigned int input_size
= sec
->rawsize
? sec
->rawsize
: sec
->size
;
17551 unsigned int in_index
, out_index
;
17552 bfd_vma add_to_offsets
= 0;
17554 for (in_index
= 0, out_index
= 0; in_index
* 8 < input_size
|| edit_node
;)
17558 unsigned int edit_index
= edit_node
->index
;
17560 if (in_index
< edit_index
&& in_index
* 8 < input_size
)
17562 copy_exidx_entry (output_bfd
, edited_contents
+ out_index
* 8,
17563 contents
+ in_index
* 8, add_to_offsets
);
17567 else if (in_index
== edit_index
17568 || (in_index
* 8 >= input_size
17569 && edit_index
== UINT_MAX
))
17571 switch (edit_node
->type
)
17573 case DELETE_EXIDX_ENTRY
:
17575 add_to_offsets
+= 8;
17578 case INSERT_EXIDX_CANTUNWIND_AT_END
:
17580 asection
*text_sec
= edit_node
->linked_section
;
17581 bfd_vma text_offset
= text_sec
->output_section
->vma
17582 + text_sec
->output_offset
17584 bfd_vma exidx_offset
= offset
+ out_index
* 8;
17585 unsigned long prel31_offset
;
17587 /* Note: this is meant to be equivalent to an
17588 R_ARM_PREL31 relocation. These synthetic
17589 EXIDX_CANTUNWIND markers are not relocated by the
17590 usual BFD method. */
17591 prel31_offset
= (text_offset
- exidx_offset
)
17593 if (bfd_link_relocatable (link_info
))
17595 /* Here relocation for new EXIDX_CANTUNWIND is
17596 created, so there is no need to
17597 adjust offset by hand. */
17598 prel31_offset
= text_sec
->output_offset
17601 /* New relocation entity. */
17602 asection
*text_out
= text_sec
->output_section
;
17603 Elf_Internal_Rela rel
;
17605 rel
.r_offset
= exidx_offset
;
17606 rel
.r_info
= ELF32_R_INFO (text_out
->target_index
,
17609 elf32_arm_add_relocation (output_bfd
, link_info
,
17610 sec
->output_section
,
17614 /* First address we can't unwind. */
17615 bfd_put_32 (output_bfd
, prel31_offset
,
17616 &edited_contents
[out_index
* 8]);
17618 /* Code for EXIDX_CANTUNWIND. */
17619 bfd_put_32 (output_bfd
, 0x1,
17620 &edited_contents
[out_index
* 8 + 4]);
17623 add_to_offsets
-= 8;
17628 edit_node
= edit_node
->next
;
17633 /* No more edits, copy remaining entries verbatim. */
17634 copy_exidx_entry (output_bfd
, edited_contents
+ out_index
* 8,
17635 contents
+ in_index
* 8, add_to_offsets
);
17641 if (!(sec
->flags
& SEC_EXCLUDE
) && !(sec
->flags
& SEC_NEVER_LOAD
))
17642 bfd_set_section_contents (output_bfd
, sec
->output_section
,
17644 (file_ptr
) sec
->output_offset
, sec
->size
);
17649 /* Fix code to point to Cortex-A8 erratum stubs. */
17650 if (globals
->fix_cortex_a8
)
17652 struct a8_branch_to_stub_data data
;
17654 data
.writing_section
= sec
;
17655 data
.contents
= contents
;
17657 bfd_hash_traverse (& globals
->stub_hash_table
, make_branch_to_a8_stub
,
17664 if (globals
->byteswap_code
)
17666 qsort (map
, mapcount
, sizeof (* map
), elf32_arm_compare_mapping
);
17669 for (i
= 0; i
< mapcount
; i
++)
17671 if (i
== mapcount
- 1)
17674 end
= map
[i
+ 1].vma
;
17676 switch (map
[i
].type
)
17679 /* Byte swap code words. */
17680 while (ptr
+ 3 < end
)
17682 tmp
= contents
[ptr
];
17683 contents
[ptr
] = contents
[ptr
+ 3];
17684 contents
[ptr
+ 3] = tmp
;
17685 tmp
= contents
[ptr
+ 1];
17686 contents
[ptr
+ 1] = contents
[ptr
+ 2];
17687 contents
[ptr
+ 2] = tmp
;
17693 /* Byte swap code halfwords. */
17694 while (ptr
+ 1 < end
)
17696 tmp
= contents
[ptr
];
17697 contents
[ptr
] = contents
[ptr
+ 1];
17698 contents
[ptr
+ 1] = tmp
;
17704 /* Leave data alone. */
17712 arm_data
->mapcount
= -1;
17713 arm_data
->mapsize
= 0;
17714 arm_data
->map
= NULL
;
17719 /* Mangle thumb function symbols as we read them in. */
17722 elf32_arm_swap_symbol_in (bfd
* abfd
,
17725 Elf_Internal_Sym
*dst
)
17727 if (!bfd_elf32_swap_symbol_in (abfd
, psrc
, pshn
, dst
))
17729 dst
->st_target_internal
= 0;
17731 /* New EABI objects mark thumb function symbols by setting the low bit of
17733 if (ELF_ST_TYPE (dst
->st_info
) == STT_FUNC
17734 || ELF_ST_TYPE (dst
->st_info
) == STT_GNU_IFUNC
)
17736 if (dst
->st_value
& 1)
17738 dst
->st_value
&= ~(bfd_vma
) 1;
17739 ARM_SET_SYM_BRANCH_TYPE (dst
->st_target_internal
,
17740 ST_BRANCH_TO_THUMB
);
17743 ARM_SET_SYM_BRANCH_TYPE (dst
->st_target_internal
, ST_BRANCH_TO_ARM
);
17745 else if (ELF_ST_TYPE (dst
->st_info
) == STT_ARM_TFUNC
)
17747 dst
->st_info
= ELF_ST_INFO (ELF_ST_BIND (dst
->st_info
), STT_FUNC
);
17748 ARM_SET_SYM_BRANCH_TYPE (dst
->st_target_internal
, ST_BRANCH_TO_THUMB
);
17750 else if (ELF_ST_TYPE (dst
->st_info
) == STT_SECTION
)
17751 ARM_SET_SYM_BRANCH_TYPE (dst
->st_target_internal
, ST_BRANCH_LONG
);
17753 ARM_SET_SYM_BRANCH_TYPE (dst
->st_target_internal
, ST_BRANCH_UNKNOWN
);
17759 /* Mangle thumb function symbols as we write them out. */
17762 elf32_arm_swap_symbol_out (bfd
*abfd
,
17763 const Elf_Internal_Sym
*src
,
17767 Elf_Internal_Sym newsym
;
17769 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
17770 of the address set, as per the new EABI. We do this unconditionally
17771 because objcopy does not set the elf header flags until after
17772 it writes out the symbol table. */
17773 if (ARM_GET_SYM_BRANCH_TYPE (src
->st_target_internal
) == ST_BRANCH_TO_THUMB
)
17776 if (ELF_ST_TYPE (src
->st_info
) != STT_GNU_IFUNC
)
17777 newsym
.st_info
= ELF_ST_INFO (ELF_ST_BIND (src
->st_info
), STT_FUNC
);
17778 if (newsym
.st_shndx
!= SHN_UNDEF
)
17780 /* Do this only for defined symbols. At link type, the static
17781 linker will simulate the work of dynamic linker of resolving
17782 symbols and will carry over the thumbness of found symbols to
17783 the output symbol table. It's not clear how it happens, but
17784 the thumbness of undefined symbols can well be different at
17785 runtime, and writing '1' for them will be confusing for users
17786 and possibly for dynamic linker itself.
17788 newsym
.st_value
|= 1;
17793 bfd_elf32_swap_symbol_out (abfd
, src
, cdst
, shndx
);
17796 /* Add the PT_ARM_EXIDX program header. */
17799 elf32_arm_modify_segment_map (bfd
*abfd
,
17800 struct bfd_link_info
*info ATTRIBUTE_UNUSED
)
17802 struct elf_segment_map
*m
;
17805 sec
= bfd_get_section_by_name (abfd
, ".ARM.exidx");
17806 if (sec
!= NULL
&& (sec
->flags
& SEC_LOAD
) != 0)
17808 /* If there is already a PT_ARM_EXIDX header, then we do not
17809 want to add another one. This situation arises when running
17810 "strip"; the input binary already has the header. */
17811 m
= elf_seg_map (abfd
);
17812 while (m
&& m
->p_type
!= PT_ARM_EXIDX
)
17816 m
= (struct elf_segment_map
*)
17817 bfd_zalloc (abfd
, sizeof (struct elf_segment_map
));
17820 m
->p_type
= PT_ARM_EXIDX
;
17822 m
->sections
[0] = sec
;
17824 m
->next
= elf_seg_map (abfd
);
17825 elf_seg_map (abfd
) = m
;
17832 /* We may add a PT_ARM_EXIDX program header. */
17835 elf32_arm_additional_program_headers (bfd
*abfd
,
17836 struct bfd_link_info
*info ATTRIBUTE_UNUSED
)
17840 sec
= bfd_get_section_by_name (abfd
, ".ARM.exidx");
17841 if (sec
!= NULL
&& (sec
->flags
& SEC_LOAD
) != 0)
17847 /* Hook called by the linker routine which adds symbols from an object
17851 elf32_arm_add_symbol_hook (bfd
*abfd
, struct bfd_link_info
*info
,
17852 Elf_Internal_Sym
*sym
, const char **namep
,
17853 flagword
*flagsp
, asection
**secp
, bfd_vma
*valp
)
17855 if (ELF_ST_TYPE (sym
->st_info
) == STT_GNU_IFUNC
17856 && (abfd
->flags
& DYNAMIC
) == 0
17857 && bfd_get_flavour (info
->output_bfd
) == bfd_target_elf_flavour
)
17858 elf_tdata (info
->output_bfd
)->has_gnu_symbols
|= elf_gnu_symbol_ifunc
;
17860 if (elf32_arm_hash_table (info
) == NULL
)
17863 if (elf32_arm_hash_table (info
)->vxworks_p
17864 && !elf_vxworks_add_symbol_hook (abfd
, info
, sym
, namep
,
17865 flagsp
, secp
, valp
))
17871 /* We use this to override swap_symbol_in and swap_symbol_out. */
17872 const struct elf_size_info elf32_arm_size_info
=
17874 sizeof (Elf32_External_Ehdr
),
17875 sizeof (Elf32_External_Phdr
),
17876 sizeof (Elf32_External_Shdr
),
17877 sizeof (Elf32_External_Rel
),
17878 sizeof (Elf32_External_Rela
),
17879 sizeof (Elf32_External_Sym
),
17880 sizeof (Elf32_External_Dyn
),
17881 sizeof (Elf_External_Note
),
17885 ELFCLASS32
, EV_CURRENT
,
17886 bfd_elf32_write_out_phdrs
,
17887 bfd_elf32_write_shdrs_and_ehdr
,
17888 bfd_elf32_checksum_contents
,
17889 bfd_elf32_write_relocs
,
17890 elf32_arm_swap_symbol_in
,
17891 elf32_arm_swap_symbol_out
,
17892 bfd_elf32_slurp_reloc_table
,
17893 bfd_elf32_slurp_symbol_table
,
17894 bfd_elf32_swap_dyn_in
,
17895 bfd_elf32_swap_dyn_out
,
17896 bfd_elf32_swap_reloc_in
,
17897 bfd_elf32_swap_reloc_out
,
17898 bfd_elf32_swap_reloca_in
,
17899 bfd_elf32_swap_reloca_out
17903 read_code32 (const bfd
*abfd
, const bfd_byte
*addr
)
17905 /* V7 BE8 code is always little endian. */
17906 if ((elf_elfheader (abfd
)->e_flags
& EF_ARM_BE8
) != 0)
17907 return bfd_getl32 (addr
);
17909 return bfd_get_32 (abfd
, addr
);
17913 read_code16 (const bfd
*abfd
, const bfd_byte
*addr
)
17915 /* V7 BE8 code is always little endian. */
17916 if ((elf_elfheader (abfd
)->e_flags
& EF_ARM_BE8
) != 0)
17917 return bfd_getl16 (addr
);
17919 return bfd_get_16 (abfd
, addr
);
17922 /* Return size of plt0 entry starting at ADDR
17923 or (bfd_vma) -1 if size can not be determined. */
17926 elf32_arm_plt0_size (const bfd
*abfd
, const bfd_byte
*addr
)
17928 bfd_vma first_word
;
17931 first_word
= read_code32 (abfd
, addr
);
17933 if (first_word
== elf32_arm_plt0_entry
[0])
17934 plt0_size
= 4 * ARRAY_SIZE (elf32_arm_plt0_entry
);
17935 else if (first_word
== elf32_thumb2_plt0_entry
[0])
17936 plt0_size
= 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry
);
17938 /* We don't yet handle this PLT format. */
17939 return (bfd_vma
) -1;
17944 /* Return size of plt entry starting at offset OFFSET
17945 of plt section located at address START
17946 or (bfd_vma) -1 if size can not be determined. */
17949 elf32_arm_plt_size (const bfd
*abfd
, const bfd_byte
*start
, bfd_vma offset
)
17951 bfd_vma first_insn
;
17952 bfd_vma plt_size
= 0;
17953 const bfd_byte
*addr
= start
+ offset
;
17955 /* PLT entry size if fixed on Thumb-only platforms. */
17956 if (read_code32 (abfd
, start
) == elf32_thumb2_plt0_entry
[0])
17957 return 4 * ARRAY_SIZE (elf32_thumb2_plt_entry
);
17959 /* Respect Thumb stub if necessary. */
17960 if (read_code16 (abfd
, addr
) == elf32_arm_plt_thumb_stub
[0])
17962 plt_size
+= 2 * ARRAY_SIZE(elf32_arm_plt_thumb_stub
);
17965 /* Strip immediate from first add. */
17966 first_insn
= read_code32 (abfd
, addr
+ plt_size
) & 0xffffff00;
17968 #ifdef FOUR_WORD_PLT
17969 if (first_insn
== elf32_arm_plt_entry
[0])
17970 plt_size
+= 4 * ARRAY_SIZE (elf32_arm_plt_entry
);
17972 if (first_insn
== elf32_arm_plt_entry_long
[0])
17973 plt_size
+= 4 * ARRAY_SIZE (elf32_arm_plt_entry_long
);
17974 else if (first_insn
== elf32_arm_plt_entry_short
[0])
17975 plt_size
+= 4 * ARRAY_SIZE (elf32_arm_plt_entry_short
);
17978 /* We don't yet handle this PLT format. */
17979 return (bfd_vma
) -1;
17984 /* Implementation is shamelessly borrowed from _bfd_elf_get_synthetic_symtab. */
17987 elf32_arm_get_synthetic_symtab (bfd
*abfd
,
17988 long symcount ATTRIBUTE_UNUSED
,
17989 asymbol
**syms ATTRIBUTE_UNUSED
,
17999 Elf_Internal_Shdr
*hdr
;
18007 if ((abfd
->flags
& (DYNAMIC
| EXEC_P
)) == 0)
18010 if (dynsymcount
<= 0)
18013 relplt
= bfd_get_section_by_name (abfd
, ".rel.plt");
18014 if (relplt
== NULL
)
18017 hdr
= &elf_section_data (relplt
)->this_hdr
;
18018 if (hdr
->sh_link
!= elf_dynsymtab (abfd
)
18019 || (hdr
->sh_type
!= SHT_REL
&& hdr
->sh_type
!= SHT_RELA
))
18022 plt
= bfd_get_section_by_name (abfd
, ".plt");
18026 if (!elf32_arm_size_info
.slurp_reloc_table (abfd
, relplt
, dynsyms
, TRUE
))
18029 data
= plt
->contents
;
18032 if (!bfd_get_full_section_contents(abfd
, (asection
*) plt
, &data
) || data
== NULL
)
18034 bfd_cache_section_contents((asection
*) plt
, data
);
18037 count
= relplt
->size
/ hdr
->sh_entsize
;
18038 size
= count
* sizeof (asymbol
);
18039 p
= relplt
->relocation
;
18040 for (i
= 0; i
< count
; i
++, p
+= elf32_arm_size_info
.int_rels_per_ext_rel
)
18042 size
+= strlen ((*p
->sym_ptr_ptr
)->name
) + sizeof ("@plt");
18043 if (p
->addend
!= 0)
18044 size
+= sizeof ("+0x") - 1 + 8;
18047 s
= *ret
= (asymbol
*) bfd_malloc (size
);
18051 offset
= elf32_arm_plt0_size (abfd
, data
);
18052 if (offset
== (bfd_vma
) -1)
18055 names
= (char *) (s
+ count
);
18056 p
= relplt
->relocation
;
18058 for (i
= 0; i
< count
; i
++, p
+= elf32_arm_size_info
.int_rels_per_ext_rel
)
18062 bfd_vma plt_size
= elf32_arm_plt_size (abfd
, data
, offset
);
18063 if (plt_size
== (bfd_vma
) -1)
18066 *s
= **p
->sym_ptr_ptr
;
18067 /* Undefined syms won't have BSF_LOCAL or BSF_GLOBAL set. Since
18068 we are defining a symbol, ensure one of them is set. */
18069 if ((s
->flags
& BSF_LOCAL
) == 0)
18070 s
->flags
|= BSF_GLOBAL
;
18071 s
->flags
|= BSF_SYNTHETIC
;
18076 len
= strlen ((*p
->sym_ptr_ptr
)->name
);
18077 memcpy (names
, (*p
->sym_ptr_ptr
)->name
, len
);
18079 if (p
->addend
!= 0)
18083 memcpy (names
, "+0x", sizeof ("+0x") - 1);
18084 names
+= sizeof ("+0x") - 1;
18085 bfd_sprintf_vma (abfd
, buf
, p
->addend
);
18086 for (a
= buf
; *a
== '0'; ++a
)
18089 memcpy (names
, a
, len
);
18092 memcpy (names
, "@plt", sizeof ("@plt"));
18093 names
+= sizeof ("@plt");
18095 offset
+= plt_size
;
18102 elf32_arm_section_flags (flagword
*flags
, const Elf_Internal_Shdr
* hdr
)
18104 if (hdr
->sh_flags
& SHF_ARM_NOREAD
)
18105 *flags
|= SEC_ELF_NOREAD
;
18110 elf32_arm_lookup_section_flags (char *flag_name
)
18112 if (!strcmp (flag_name
, "SHF_ARM_NOREAD"))
18113 return SHF_ARM_NOREAD
;
18115 return SEC_NO_FLAGS
;
18118 static unsigned int
18119 elf32_arm_count_additional_relocs (asection
*sec
)
18121 struct _arm_elf_section_data
*arm_data
;
18122 arm_data
= get_arm_elf_section_data (sec
);
18123 return arm_data
->additional_reloc_count
;
18126 /* Called to set the sh_flags, sh_link and sh_info fields of OSECTION which
18127 has a type >= SHT_LOOS. Returns TRUE if these fields were initialised
18128 FALSE otherwise. ISECTION is the best guess matching section from the
18129 input bfd IBFD, but it might be NULL. */
18132 elf32_arm_copy_special_section_fields (const bfd
*ibfd ATTRIBUTE_UNUSED
,
18133 bfd
*obfd ATTRIBUTE_UNUSED
,
18134 const Elf_Internal_Shdr
*isection ATTRIBUTE_UNUSED
,
18135 Elf_Internal_Shdr
*osection
)
18137 switch (osection
->sh_type
)
18139 case SHT_ARM_EXIDX
:
18141 Elf_Internal_Shdr
**oheaders
= elf_elfsections (obfd
);
18142 Elf_Internal_Shdr
**iheaders
= elf_elfsections (ibfd
);
18145 osection
->sh_flags
= SHF_ALLOC
| SHF_LINK_ORDER
;
18146 osection
->sh_info
= 0;
18148 /* The sh_link field must be set to the text section associated with
18149 this index section. Unfortunately the ARM EHABI does not specify
18150 exactly how to determine this association. Our caller does try
18151 to match up OSECTION with its corresponding input section however
18152 so that is a good first guess. */
18153 if (isection
!= NULL
18154 && osection
->bfd_section
!= NULL
18155 && isection
->bfd_section
!= NULL
18156 && isection
->bfd_section
->output_section
!= NULL
18157 && isection
->bfd_section
->output_section
== osection
->bfd_section
18158 && iheaders
!= NULL
18159 && isection
->sh_link
> 0
18160 && isection
->sh_link
< elf_numsections (ibfd
)
18161 && iheaders
[isection
->sh_link
]->bfd_section
!= NULL
18162 && iheaders
[isection
->sh_link
]->bfd_section
->output_section
!= NULL
18165 for (i
= elf_numsections (obfd
); i
-- > 0;)
18166 if (oheaders
[i
]->bfd_section
18167 == iheaders
[isection
->sh_link
]->bfd_section
->output_section
)
18173 /* Failing that we have to find a matching section ourselves. If
18174 we had the output section name available we could compare that
18175 with input section names. Unfortunately we don't. So instead
18176 we use a simple heuristic and look for the nearest executable
18177 section before this one. */
18178 for (i
= elf_numsections (obfd
); i
-- > 0;)
18179 if (oheaders
[i
] == osection
)
18185 if (oheaders
[i
]->sh_type
== SHT_PROGBITS
18186 && (oheaders
[i
]->sh_flags
& (SHF_ALLOC
| SHF_EXECINSTR
))
18187 == (SHF_ALLOC
| SHF_EXECINSTR
))
18193 osection
->sh_link
= i
;
18194 /* If the text section was part of a group
18195 then the index section should be too. */
18196 if (oheaders
[i
]->sh_flags
& SHF_GROUP
)
18197 osection
->sh_flags
|= SHF_GROUP
;
18203 case SHT_ARM_PREEMPTMAP
:
18204 osection
->sh_flags
= SHF_ALLOC
;
18207 case SHT_ARM_ATTRIBUTES
:
18208 case SHT_ARM_DEBUGOVERLAY
:
18209 case SHT_ARM_OVERLAYSECTION
:
18217 #undef elf_backend_copy_special_section_fields
18218 #define elf_backend_copy_special_section_fields elf32_arm_copy_special_section_fields
18220 #define ELF_ARCH bfd_arch_arm
18221 #define ELF_TARGET_ID ARM_ELF_DATA
18222 #define ELF_MACHINE_CODE EM_ARM
18223 #ifdef __QNXTARGET__
18224 #define ELF_MAXPAGESIZE 0x1000
18226 #define ELF_MAXPAGESIZE 0x10000
18228 #define ELF_MINPAGESIZE 0x1000
18229 #define ELF_COMMONPAGESIZE 0x1000
18231 #define bfd_elf32_mkobject elf32_arm_mkobject
18233 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
18234 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
18235 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
18236 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
18237 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
18238 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
18239 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
18240 #define bfd_elf32_find_nearest_line elf32_arm_find_nearest_line
18241 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
18242 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
18243 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
18244 #define bfd_elf32_bfd_final_link elf32_arm_final_link
18245 #define bfd_elf32_get_synthetic_symtab elf32_arm_get_synthetic_symtab
18247 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
18248 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
18249 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
18250 #define elf_backend_gc_sweep_hook elf32_arm_gc_sweep_hook
18251 #define elf_backend_check_relocs elf32_arm_check_relocs
18252 #define elf_backend_relocate_section elf32_arm_relocate_section
18253 #define elf_backend_write_section elf32_arm_write_section
18254 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
18255 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
18256 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
18257 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
18258 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
18259 #define elf_backend_always_size_sections elf32_arm_always_size_sections
18260 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
18261 #define elf_backend_post_process_headers elf32_arm_post_process_headers
18262 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
18263 #define elf_backend_object_p elf32_arm_object_p
18264 #define elf_backend_fake_sections elf32_arm_fake_sections
18265 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
18266 #define elf_backend_final_write_processing elf32_arm_final_write_processing
18267 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
18268 #define elf_backend_size_info elf32_arm_size_info
18269 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
18270 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
18271 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
18272 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
18273 #define elf_backend_add_symbol_hook elf32_arm_add_symbol_hook
18274 #define elf_backend_count_additional_relocs elf32_arm_count_additional_relocs
18276 #define elf_backend_can_refcount 1
18277 #define elf_backend_can_gc_sections 1
18278 #define elf_backend_plt_readonly 1
18279 #define elf_backend_want_got_plt 1
18280 #define elf_backend_want_plt_sym 0
18281 #define elf_backend_may_use_rel_p 1
18282 #define elf_backend_may_use_rela_p 0
18283 #define elf_backend_default_use_rela_p 0
18285 #define elf_backend_got_header_size 12
18286 #define elf_backend_extern_protected_data 1
18288 #undef elf_backend_obj_attrs_vendor
18289 #define elf_backend_obj_attrs_vendor "aeabi"
18290 #undef elf_backend_obj_attrs_section
18291 #define elf_backend_obj_attrs_section ".ARM.attributes"
18292 #undef elf_backend_obj_attrs_arg_type
18293 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
18294 #undef elf_backend_obj_attrs_section_type
18295 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
18296 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
18297 #define elf_backend_obj_attrs_handle_unknown elf32_arm_obj_attrs_handle_unknown
18299 #undef elf_backend_section_flags
18300 #define elf_backend_section_flags elf32_arm_section_flags
18301 #undef elf_backend_lookup_section_flags_hook
18302 #define elf_backend_lookup_section_flags_hook elf32_arm_lookup_section_flags
18304 #include "elf32-target.h"
18306 /* Native Client targets. */
18308 #undef TARGET_LITTLE_SYM
18309 #define TARGET_LITTLE_SYM arm_elf32_nacl_le_vec
18310 #undef TARGET_LITTLE_NAME
18311 #define TARGET_LITTLE_NAME "elf32-littlearm-nacl"
18312 #undef TARGET_BIG_SYM
18313 #define TARGET_BIG_SYM arm_elf32_nacl_be_vec
18314 #undef TARGET_BIG_NAME
18315 #define TARGET_BIG_NAME "elf32-bigarm-nacl"
18317 /* Like elf32_arm_link_hash_table_create -- but overrides
18318 appropriately for NaCl. */
18320 static struct bfd_link_hash_table
*
18321 elf32_arm_nacl_link_hash_table_create (bfd
*abfd
)
18323 struct bfd_link_hash_table
*ret
;
18325 ret
= elf32_arm_link_hash_table_create (abfd
);
18328 struct elf32_arm_link_hash_table
*htab
18329 = (struct elf32_arm_link_hash_table
*) ret
;
18333 htab
->plt_header_size
= 4 * ARRAY_SIZE (elf32_arm_nacl_plt0_entry
);
18334 htab
->plt_entry_size
= 4 * ARRAY_SIZE (elf32_arm_nacl_plt_entry
);
18339 /* Since NaCl doesn't use the ARM-specific unwind format, we don't
18340 really need to use elf32_arm_modify_segment_map. But we do it
18341 anyway just to reduce gratuitous differences with the stock ARM backend. */
18344 elf32_arm_nacl_modify_segment_map (bfd
*abfd
, struct bfd_link_info
*info
)
18346 return (elf32_arm_modify_segment_map (abfd
, info
)
18347 && nacl_modify_segment_map (abfd
, info
));
18351 elf32_arm_nacl_final_write_processing (bfd
*abfd
, bfd_boolean linker
)
18353 elf32_arm_final_write_processing (abfd
, linker
);
18354 nacl_final_write_processing (abfd
, linker
);
18358 elf32_arm_nacl_plt_sym_val (bfd_vma i
, const asection
*plt
,
18359 const arelent
*rel ATTRIBUTE_UNUSED
)
18362 + 4 * (ARRAY_SIZE (elf32_arm_nacl_plt0_entry
) +
18363 i
* ARRAY_SIZE (elf32_arm_nacl_plt_entry
));
18367 #define elf32_bed elf32_arm_nacl_bed
18368 #undef bfd_elf32_bfd_link_hash_table_create
18369 #define bfd_elf32_bfd_link_hash_table_create \
18370 elf32_arm_nacl_link_hash_table_create
18371 #undef elf_backend_plt_alignment
18372 #define elf_backend_plt_alignment 4
18373 #undef elf_backend_modify_segment_map
18374 #define elf_backend_modify_segment_map elf32_arm_nacl_modify_segment_map
18375 #undef elf_backend_modify_program_headers
18376 #define elf_backend_modify_program_headers nacl_modify_program_headers
18377 #undef elf_backend_final_write_processing
18378 #define elf_backend_final_write_processing elf32_arm_nacl_final_write_processing
18379 #undef bfd_elf32_get_synthetic_symtab
18380 #undef elf_backend_plt_sym_val
18381 #define elf_backend_plt_sym_val elf32_arm_nacl_plt_sym_val
18382 #undef elf_backend_copy_special_section_fields
18384 #undef ELF_MINPAGESIZE
18385 #undef ELF_COMMONPAGESIZE
18388 #include "elf32-target.h"
18390 /* Reset to defaults. */
18391 #undef elf_backend_plt_alignment
18392 #undef elf_backend_modify_segment_map
18393 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
18394 #undef elf_backend_modify_program_headers
18395 #undef elf_backend_final_write_processing
18396 #define elf_backend_final_write_processing elf32_arm_final_write_processing
18397 #undef ELF_MINPAGESIZE
18398 #define ELF_MINPAGESIZE 0x1000
18399 #undef ELF_COMMONPAGESIZE
18400 #define ELF_COMMONPAGESIZE 0x1000
18403 /* VxWorks Targets. */
18405 #undef TARGET_LITTLE_SYM
18406 #define TARGET_LITTLE_SYM arm_elf32_vxworks_le_vec
18407 #undef TARGET_LITTLE_NAME
18408 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
18409 #undef TARGET_BIG_SYM
18410 #define TARGET_BIG_SYM arm_elf32_vxworks_be_vec
18411 #undef TARGET_BIG_NAME
18412 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
18414 /* Like elf32_arm_link_hash_table_create -- but overrides
18415 appropriately for VxWorks. */
18417 static struct bfd_link_hash_table
*
18418 elf32_arm_vxworks_link_hash_table_create (bfd
*abfd
)
18420 struct bfd_link_hash_table
*ret
;
18422 ret
= elf32_arm_link_hash_table_create (abfd
);
18425 struct elf32_arm_link_hash_table
*htab
18426 = (struct elf32_arm_link_hash_table
*) ret
;
18428 htab
->vxworks_p
= 1;
18434 elf32_arm_vxworks_final_write_processing (bfd
*abfd
, bfd_boolean linker
)
18436 elf32_arm_final_write_processing (abfd
, linker
);
18437 elf_vxworks_final_write_processing (abfd
, linker
);
18441 #define elf32_bed elf32_arm_vxworks_bed
18443 #undef bfd_elf32_bfd_link_hash_table_create
18444 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
18445 #undef elf_backend_final_write_processing
18446 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
18447 #undef elf_backend_emit_relocs
18448 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
18450 #undef elf_backend_may_use_rel_p
18451 #define elf_backend_may_use_rel_p 0
18452 #undef elf_backend_may_use_rela_p
18453 #define elf_backend_may_use_rela_p 1
18454 #undef elf_backend_default_use_rela_p
18455 #define elf_backend_default_use_rela_p 1
18456 #undef elf_backend_want_plt_sym
18457 #define elf_backend_want_plt_sym 1
18458 #undef ELF_MAXPAGESIZE
18459 #define ELF_MAXPAGESIZE 0x1000
18461 #include "elf32-target.h"
18464 /* Merge backend specific data from an object file to the output
18465 object file when linking. */
18468 elf32_arm_merge_private_bfd_data (bfd
* ibfd
, bfd
* obfd
)
18470 flagword out_flags
;
18472 bfd_boolean flags_compatible
= TRUE
;
18475 /* Check if we have the same endianness. */
18476 if (! _bfd_generic_verify_endian_match (ibfd
, obfd
))
18479 if (! is_arm_elf (ibfd
) || ! is_arm_elf (obfd
))
18482 if (!elf32_arm_merge_eabi_attributes (ibfd
, obfd
))
18485 /* The input BFD must have had its flags initialised. */
18486 /* The following seems bogus to me -- The flags are initialized in
18487 the assembler but I don't think an elf_flags_init field is
18488 written into the object. */
18489 /* BFD_ASSERT (elf_flags_init (ibfd)); */
18491 in_flags
= elf_elfheader (ibfd
)->e_flags
;
18492 out_flags
= elf_elfheader (obfd
)->e_flags
;
18494 /* In theory there is no reason why we couldn't handle this. However
18495 in practice it isn't even close to working and there is no real
18496 reason to want it. */
18497 if (EF_ARM_EABI_VERSION (in_flags
) >= EF_ARM_EABI_VER4
18498 && !(ibfd
->flags
& DYNAMIC
)
18499 && (in_flags
& EF_ARM_BE8
))
18501 _bfd_error_handler (_("error: %B is already in final BE8 format"),
18506 if (!elf_flags_init (obfd
))
18508 /* If the input is the default architecture and had the default
18509 flags then do not bother setting the flags for the output
18510 architecture, instead allow future merges to do this. If no
18511 future merges ever set these flags then they will retain their
18512 uninitialised values, which surprise surprise, correspond
18513 to the default values. */
18514 if (bfd_get_arch_info (ibfd
)->the_default
18515 && elf_elfheader (ibfd
)->e_flags
== 0)
18518 elf_flags_init (obfd
) = TRUE
;
18519 elf_elfheader (obfd
)->e_flags
= in_flags
;
18521 if (bfd_get_arch (obfd
) == bfd_get_arch (ibfd
)
18522 && bfd_get_arch_info (obfd
)->the_default
)
18523 return bfd_set_arch_mach (obfd
, bfd_get_arch (ibfd
), bfd_get_mach (ibfd
));
18528 /* Determine what should happen if the input ARM architecture
18529 does not match the output ARM architecture. */
18530 if (! bfd_arm_merge_machines (ibfd
, obfd
))
18533 /* Identical flags must be compatible. */
18534 if (in_flags
== out_flags
)
18537 /* Check to see if the input BFD actually contains any sections. If
18538 not, its flags may not have been initialised either, but it
18539 cannot actually cause any incompatiblity. Do not short-circuit
18540 dynamic objects; their section list may be emptied by
18541 elf_link_add_object_symbols.
18543 Also check to see if there are no code sections in the input.
18544 In this case there is no need to check for code specific flags.
18545 XXX - do we need to worry about floating-point format compatability
18546 in data sections ? */
18547 if (!(ibfd
->flags
& DYNAMIC
))
18549 bfd_boolean null_input_bfd
= TRUE
;
18550 bfd_boolean only_data_sections
= TRUE
;
18552 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
18554 /* Ignore synthetic glue sections. */
18555 if (strcmp (sec
->name
, ".glue_7")
18556 && strcmp (sec
->name
, ".glue_7t"))
18558 if ((bfd_get_section_flags (ibfd
, sec
)
18559 & (SEC_LOAD
| SEC_CODE
| SEC_HAS_CONTENTS
))
18560 == (SEC_LOAD
| SEC_CODE
| SEC_HAS_CONTENTS
))
18561 only_data_sections
= FALSE
;
18563 null_input_bfd
= FALSE
;
18568 if (null_input_bfd
|| only_data_sections
)
18572 /* Complain about various flag mismatches. */
18573 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags
),
18574 EF_ARM_EABI_VERSION (out_flags
)))
18577 (_("error: Source object %B has EABI version %d, but target %B has EABI version %d"),
18579 (in_flags
& EF_ARM_EABIMASK
) >> 24,
18580 (out_flags
& EF_ARM_EABIMASK
) >> 24);
18584 /* Not sure what needs to be checked for EABI versions >= 1. */
18585 /* VxWorks libraries do not use these flags. */
18586 if (get_elf_backend_data (obfd
) != &elf32_arm_vxworks_bed
18587 && get_elf_backend_data (ibfd
) != &elf32_arm_vxworks_bed
18588 && EF_ARM_EABI_VERSION (in_flags
) == EF_ARM_EABI_UNKNOWN
)
18590 if ((in_flags
& EF_ARM_APCS_26
) != (out_flags
& EF_ARM_APCS_26
))
18593 (_("error: %B is compiled for APCS-%d, whereas target %B uses APCS-%d"),
18595 in_flags
& EF_ARM_APCS_26
? 26 : 32,
18596 out_flags
& EF_ARM_APCS_26
? 26 : 32);
18597 flags_compatible
= FALSE
;
18600 if ((in_flags
& EF_ARM_APCS_FLOAT
) != (out_flags
& EF_ARM_APCS_FLOAT
))
18602 if (in_flags
& EF_ARM_APCS_FLOAT
)
18604 (_("error: %B passes floats in float registers, whereas %B passes them in integer registers"),
18608 (_("error: %B passes floats in integer registers, whereas %B passes them in float registers"),
18611 flags_compatible
= FALSE
;
18614 if ((in_flags
& EF_ARM_VFP_FLOAT
) != (out_flags
& EF_ARM_VFP_FLOAT
))
18616 if (in_flags
& EF_ARM_VFP_FLOAT
)
18618 (_("error: %B uses VFP instructions, whereas %B does not"),
18622 (_("error: %B uses FPA instructions, whereas %B does not"),
18625 flags_compatible
= FALSE
;
18628 if ((in_flags
& EF_ARM_MAVERICK_FLOAT
) != (out_flags
& EF_ARM_MAVERICK_FLOAT
))
18630 if (in_flags
& EF_ARM_MAVERICK_FLOAT
)
18632 (_("error: %B uses Maverick instructions, whereas %B does not"),
18636 (_("error: %B does not use Maverick instructions, whereas %B does"),
18639 flags_compatible
= FALSE
;
18642 #ifdef EF_ARM_SOFT_FLOAT
18643 if ((in_flags
& EF_ARM_SOFT_FLOAT
) != (out_flags
& EF_ARM_SOFT_FLOAT
))
18645 /* We can allow interworking between code that is VFP format
18646 layout, and uses either soft float or integer regs for
18647 passing floating point arguments and results. We already
18648 know that the APCS_FLOAT flags match; similarly for VFP
18650 if ((in_flags
& EF_ARM_APCS_FLOAT
) != 0
18651 || (in_flags
& EF_ARM_VFP_FLOAT
) == 0)
18653 if (in_flags
& EF_ARM_SOFT_FLOAT
)
18655 (_("error: %B uses software FP, whereas %B uses hardware FP"),
18659 (_("error: %B uses hardware FP, whereas %B uses software FP"),
18662 flags_compatible
= FALSE
;
18667 /* Interworking mismatch is only a warning. */
18668 if ((in_flags
& EF_ARM_INTERWORK
) != (out_flags
& EF_ARM_INTERWORK
))
18670 if (in_flags
& EF_ARM_INTERWORK
)
18673 (_("Warning: %B supports interworking, whereas %B does not"),
18679 (_("Warning: %B does not support interworking, whereas %B does"),
18685 return flags_compatible
;
18689 /* Symbian OS Targets. */
18691 #undef TARGET_LITTLE_SYM
18692 #define TARGET_LITTLE_SYM arm_elf32_symbian_le_vec
18693 #undef TARGET_LITTLE_NAME
18694 #define TARGET_LITTLE_NAME "elf32-littlearm-symbian"
18695 #undef TARGET_BIG_SYM
18696 #define TARGET_BIG_SYM arm_elf32_symbian_be_vec
18697 #undef TARGET_BIG_NAME
18698 #define TARGET_BIG_NAME "elf32-bigarm-symbian"
18700 /* Like elf32_arm_link_hash_table_create -- but overrides
18701 appropriately for Symbian OS. */
18703 static struct bfd_link_hash_table
*
18704 elf32_arm_symbian_link_hash_table_create (bfd
*abfd
)
18706 struct bfd_link_hash_table
*ret
;
18708 ret
= elf32_arm_link_hash_table_create (abfd
);
18711 struct elf32_arm_link_hash_table
*htab
18712 = (struct elf32_arm_link_hash_table
*)ret
;
18713 /* There is no PLT header for Symbian OS. */
18714 htab
->plt_header_size
= 0;
18715 /* The PLT entries are each one instruction and one word. */
18716 htab
->plt_entry_size
= 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry
);
18717 htab
->symbian_p
= 1;
18718 /* Symbian uses armv5t or above, so use_blx is always true. */
18720 htab
->root
.is_relocatable_executable
= 1;
18725 static const struct bfd_elf_special_section
18726 elf32_arm_symbian_special_sections
[] =
18728 /* In a BPABI executable, the dynamic linking sections do not go in
18729 the loadable read-only segment. The post-linker may wish to
18730 refer to these sections, but they are not part of the final
18732 { STRING_COMMA_LEN (".dynamic"), 0, SHT_DYNAMIC
, 0 },
18733 { STRING_COMMA_LEN (".dynstr"), 0, SHT_STRTAB
, 0 },
18734 { STRING_COMMA_LEN (".dynsym"), 0, SHT_DYNSYM
, 0 },
18735 { STRING_COMMA_LEN (".got"), 0, SHT_PROGBITS
, 0 },
18736 { STRING_COMMA_LEN (".hash"), 0, SHT_HASH
, 0 },
18737 /* These sections do not need to be writable as the SymbianOS
18738 postlinker will arrange things so that no dynamic relocation is
18740 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY
, SHF_ALLOC
},
18741 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY
, SHF_ALLOC
},
18742 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY
, SHF_ALLOC
},
18743 { NULL
, 0, 0, 0, 0 }
18747 elf32_arm_symbian_begin_write_processing (bfd
*abfd
,
18748 struct bfd_link_info
*link_info
)
18750 /* BPABI objects are never loaded directly by an OS kernel; they are
18751 processed by a postlinker first, into an OS-specific format. If
18752 the D_PAGED bit is set on the file, BFD will align segments on
18753 page boundaries, so that an OS can directly map the file. With
18754 BPABI objects, that just results in wasted space. In addition,
18755 because we clear the D_PAGED bit, map_sections_to_segments will
18756 recognize that the program headers should not be mapped into any
18757 loadable segment. */
18758 abfd
->flags
&= ~D_PAGED
;
18759 elf32_arm_begin_write_processing (abfd
, link_info
);
18763 elf32_arm_symbian_modify_segment_map (bfd
*abfd
,
18764 struct bfd_link_info
*info
)
18766 struct elf_segment_map
*m
;
18769 /* BPABI shared libraries and executables should have a PT_DYNAMIC
18770 segment. However, because the .dynamic section is not marked
18771 with SEC_LOAD, the generic ELF code will not create such a
18773 dynsec
= bfd_get_section_by_name (abfd
, ".dynamic");
18776 for (m
= elf_seg_map (abfd
); m
!= NULL
; m
= m
->next
)
18777 if (m
->p_type
== PT_DYNAMIC
)
18782 m
= _bfd_elf_make_dynamic_segment (abfd
, dynsec
);
18783 m
->next
= elf_seg_map (abfd
);
18784 elf_seg_map (abfd
) = m
;
18788 /* Also call the generic arm routine. */
18789 return elf32_arm_modify_segment_map (abfd
, info
);
18792 /* Return address for Ith PLT stub in section PLT, for relocation REL
18793 or (bfd_vma) -1 if it should not be included. */
18796 elf32_arm_symbian_plt_sym_val (bfd_vma i
, const asection
*plt
,
18797 const arelent
*rel ATTRIBUTE_UNUSED
)
18799 return plt
->vma
+ 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry
) * i
;
18803 #define elf32_bed elf32_arm_symbian_bed
18805 /* The dynamic sections are not allocated on SymbianOS; the postlinker
18806 will process them and then discard them. */
18807 #undef ELF_DYNAMIC_SEC_FLAGS
18808 #define ELF_DYNAMIC_SEC_FLAGS \
18809 (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
18811 #undef elf_backend_emit_relocs
18813 #undef bfd_elf32_bfd_link_hash_table_create
18814 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_symbian_link_hash_table_create
18815 #undef elf_backend_special_sections
18816 #define elf_backend_special_sections elf32_arm_symbian_special_sections
18817 #undef elf_backend_begin_write_processing
18818 #define elf_backend_begin_write_processing elf32_arm_symbian_begin_write_processing
18819 #undef elf_backend_final_write_processing
18820 #define elf_backend_final_write_processing elf32_arm_final_write_processing
18822 #undef elf_backend_modify_segment_map
18823 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map
18825 /* There is no .got section for BPABI objects, and hence no header. */
18826 #undef elf_backend_got_header_size
18827 #define elf_backend_got_header_size 0
18829 /* Similarly, there is no .got.plt section. */
18830 #undef elf_backend_want_got_plt
18831 #define elf_backend_want_got_plt 0
18833 #undef elf_backend_plt_sym_val
18834 #define elf_backend_plt_sym_val elf32_arm_symbian_plt_sym_val
18836 #undef elf_backend_may_use_rel_p
18837 #define elf_backend_may_use_rel_p 1
18838 #undef elf_backend_may_use_rela_p
18839 #define elf_backend_may_use_rela_p 0
18840 #undef elf_backend_default_use_rela_p
18841 #define elf_backend_default_use_rela_p 0
18842 #undef elf_backend_want_plt_sym
18843 #define elf_backend_want_plt_sym 0
18844 #undef ELF_MAXPAGESIZE
18845 #define ELF_MAXPAGESIZE 0x8000
18847 #include "elf32-target.h"