1 /* 32-bit ELF support for ARM
2 Copyright (C) 1998-2016 Free Software Foundation, Inc.
4 This file is part of BFD, the Binary File Descriptor library.
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
19 MA 02110-1301, USA. */
25 #include "bfd_stdint.h"
26 #include "libiberty.h"
30 #include "elf-vxworks.h"
33 /* Return the relocation section associated with NAME. HTAB is the
34 bfd's elf32_arm_link_hash_entry. */
35 #define RELOC_SECTION(HTAB, NAME) \
36 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
38 /* Return size of a relocation entry. HTAB is the bfd's
39 elf32_arm_link_hash_entry. */
40 #define RELOC_SIZE(HTAB) \
42 ? sizeof (Elf32_External_Rel) \
43 : sizeof (Elf32_External_Rela))
45 /* Return function to swap relocations in. HTAB is the bfd's
46 elf32_arm_link_hash_entry. */
47 #define SWAP_RELOC_IN(HTAB) \
49 ? bfd_elf32_swap_reloc_in \
50 : bfd_elf32_swap_reloca_in)
52 /* Return function to swap relocations out. HTAB is the bfd's
53 elf32_arm_link_hash_entry. */
54 #define SWAP_RELOC_OUT(HTAB) \
56 ? bfd_elf32_swap_reloc_out \
57 : bfd_elf32_swap_reloca_out)
59 #define elf_info_to_howto 0
60 #define elf_info_to_howto_rel elf32_arm_info_to_howto
62 #define ARM_ELF_ABI_VERSION 0
63 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
65 /* The Adjusted Place, as defined by AAELF. */
66 #define Pa(X) ((X) & 0xfffffffc)
68 static bfd_boolean
elf32_arm_write_section (bfd
*output_bfd
,
69 struct bfd_link_info
*link_info
,
73 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
74 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
77 static reloc_howto_type elf32_arm_howto_table_1
[] =
80 HOWTO (R_ARM_NONE
, /* type */
82 3, /* size (0 = byte, 1 = short, 2 = long) */
84 FALSE
, /* pc_relative */
86 complain_overflow_dont
,/* complain_on_overflow */
87 bfd_elf_generic_reloc
, /* special_function */
88 "R_ARM_NONE", /* name */
89 FALSE
, /* partial_inplace */
92 FALSE
), /* pcrel_offset */
94 HOWTO (R_ARM_PC24
, /* type */
96 2, /* size (0 = byte, 1 = short, 2 = long) */
98 TRUE
, /* pc_relative */
100 complain_overflow_signed
,/* complain_on_overflow */
101 bfd_elf_generic_reloc
, /* special_function */
102 "R_ARM_PC24", /* name */
103 FALSE
, /* partial_inplace */
104 0x00ffffff, /* src_mask */
105 0x00ffffff, /* dst_mask */
106 TRUE
), /* pcrel_offset */
108 /* 32 bit absolute */
109 HOWTO (R_ARM_ABS32
, /* type */
111 2, /* size (0 = byte, 1 = short, 2 = long) */
113 FALSE
, /* pc_relative */
115 complain_overflow_bitfield
,/* complain_on_overflow */
116 bfd_elf_generic_reloc
, /* special_function */
117 "R_ARM_ABS32", /* name */
118 FALSE
, /* partial_inplace */
119 0xffffffff, /* src_mask */
120 0xffffffff, /* dst_mask */
121 FALSE
), /* pcrel_offset */
123 /* standard 32bit pc-relative reloc */
124 HOWTO (R_ARM_REL32
, /* type */
126 2, /* size (0 = byte, 1 = short, 2 = long) */
128 TRUE
, /* pc_relative */
130 complain_overflow_bitfield
,/* complain_on_overflow */
131 bfd_elf_generic_reloc
, /* special_function */
132 "R_ARM_REL32", /* name */
133 FALSE
, /* partial_inplace */
134 0xffffffff, /* src_mask */
135 0xffffffff, /* dst_mask */
136 TRUE
), /* pcrel_offset */
138 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
139 HOWTO (R_ARM_LDR_PC_G0
, /* type */
141 0, /* size (0 = byte, 1 = short, 2 = long) */
143 TRUE
, /* pc_relative */
145 complain_overflow_dont
,/* complain_on_overflow */
146 bfd_elf_generic_reloc
, /* special_function */
147 "R_ARM_LDR_PC_G0", /* name */
148 FALSE
, /* partial_inplace */
149 0xffffffff, /* src_mask */
150 0xffffffff, /* dst_mask */
151 TRUE
), /* pcrel_offset */
153 /* 16 bit absolute */
154 HOWTO (R_ARM_ABS16
, /* type */
156 1, /* size (0 = byte, 1 = short, 2 = long) */
158 FALSE
, /* pc_relative */
160 complain_overflow_bitfield
,/* complain_on_overflow */
161 bfd_elf_generic_reloc
, /* special_function */
162 "R_ARM_ABS16", /* name */
163 FALSE
, /* partial_inplace */
164 0x0000ffff, /* src_mask */
165 0x0000ffff, /* dst_mask */
166 FALSE
), /* pcrel_offset */
168 /* 12 bit absolute */
169 HOWTO (R_ARM_ABS12
, /* type */
171 2, /* size (0 = byte, 1 = short, 2 = long) */
173 FALSE
, /* pc_relative */
175 complain_overflow_bitfield
,/* complain_on_overflow */
176 bfd_elf_generic_reloc
, /* special_function */
177 "R_ARM_ABS12", /* name */
178 FALSE
, /* partial_inplace */
179 0x00000fff, /* src_mask */
180 0x00000fff, /* dst_mask */
181 FALSE
), /* pcrel_offset */
183 HOWTO (R_ARM_THM_ABS5
, /* type */
185 1, /* size (0 = byte, 1 = short, 2 = long) */
187 FALSE
, /* pc_relative */
189 complain_overflow_bitfield
,/* complain_on_overflow */
190 bfd_elf_generic_reloc
, /* special_function */
191 "R_ARM_THM_ABS5", /* name */
192 FALSE
, /* partial_inplace */
193 0x000007e0, /* src_mask */
194 0x000007e0, /* dst_mask */
195 FALSE
), /* pcrel_offset */
198 HOWTO (R_ARM_ABS8
, /* type */
200 0, /* size (0 = byte, 1 = short, 2 = long) */
202 FALSE
, /* pc_relative */
204 complain_overflow_bitfield
,/* complain_on_overflow */
205 bfd_elf_generic_reloc
, /* special_function */
206 "R_ARM_ABS8", /* name */
207 FALSE
, /* partial_inplace */
208 0x000000ff, /* src_mask */
209 0x000000ff, /* dst_mask */
210 FALSE
), /* pcrel_offset */
212 HOWTO (R_ARM_SBREL32
, /* type */
214 2, /* size (0 = byte, 1 = short, 2 = long) */
216 FALSE
, /* pc_relative */
218 complain_overflow_dont
,/* complain_on_overflow */
219 bfd_elf_generic_reloc
, /* special_function */
220 "R_ARM_SBREL32", /* name */
221 FALSE
, /* partial_inplace */
222 0xffffffff, /* src_mask */
223 0xffffffff, /* dst_mask */
224 FALSE
), /* pcrel_offset */
226 HOWTO (R_ARM_THM_CALL
, /* type */
228 2, /* size (0 = byte, 1 = short, 2 = long) */
230 TRUE
, /* pc_relative */
232 complain_overflow_signed
,/* complain_on_overflow */
233 bfd_elf_generic_reloc
, /* special_function */
234 "R_ARM_THM_CALL", /* name */
235 FALSE
, /* partial_inplace */
236 0x07ff2fff, /* src_mask */
237 0x07ff2fff, /* dst_mask */
238 TRUE
), /* pcrel_offset */
240 HOWTO (R_ARM_THM_PC8
, /* type */
242 1, /* size (0 = byte, 1 = short, 2 = long) */
244 TRUE
, /* pc_relative */
246 complain_overflow_signed
,/* complain_on_overflow */
247 bfd_elf_generic_reloc
, /* special_function */
248 "R_ARM_THM_PC8", /* name */
249 FALSE
, /* partial_inplace */
250 0x000000ff, /* src_mask */
251 0x000000ff, /* dst_mask */
252 TRUE
), /* pcrel_offset */
254 HOWTO (R_ARM_BREL_ADJ
, /* type */
256 1, /* size (0 = byte, 1 = short, 2 = long) */
258 FALSE
, /* pc_relative */
260 complain_overflow_signed
,/* complain_on_overflow */
261 bfd_elf_generic_reloc
, /* special_function */
262 "R_ARM_BREL_ADJ", /* name */
263 FALSE
, /* partial_inplace */
264 0xffffffff, /* src_mask */
265 0xffffffff, /* dst_mask */
266 FALSE
), /* pcrel_offset */
268 HOWTO (R_ARM_TLS_DESC
, /* type */
270 2, /* size (0 = byte, 1 = short, 2 = long) */
272 FALSE
, /* pc_relative */
274 complain_overflow_bitfield
,/* complain_on_overflow */
275 bfd_elf_generic_reloc
, /* special_function */
276 "R_ARM_TLS_DESC", /* name */
277 FALSE
, /* partial_inplace */
278 0xffffffff, /* src_mask */
279 0xffffffff, /* dst_mask */
280 FALSE
), /* pcrel_offset */
282 HOWTO (R_ARM_THM_SWI8
, /* type */
284 0, /* size (0 = byte, 1 = short, 2 = long) */
286 FALSE
, /* pc_relative */
288 complain_overflow_signed
,/* complain_on_overflow */
289 bfd_elf_generic_reloc
, /* special_function */
290 "R_ARM_SWI8", /* name */
291 FALSE
, /* partial_inplace */
292 0x00000000, /* src_mask */
293 0x00000000, /* dst_mask */
294 FALSE
), /* pcrel_offset */
296 /* BLX instruction for the ARM. */
297 HOWTO (R_ARM_XPC25
, /* type */
299 2, /* size (0 = byte, 1 = short, 2 = long) */
301 TRUE
, /* pc_relative */
303 complain_overflow_signed
,/* complain_on_overflow */
304 bfd_elf_generic_reloc
, /* special_function */
305 "R_ARM_XPC25", /* name */
306 FALSE
, /* partial_inplace */
307 0x00ffffff, /* src_mask */
308 0x00ffffff, /* dst_mask */
309 TRUE
), /* pcrel_offset */
311 /* BLX instruction for the Thumb. */
312 HOWTO (R_ARM_THM_XPC22
, /* type */
314 2, /* size (0 = byte, 1 = short, 2 = long) */
316 TRUE
, /* pc_relative */
318 complain_overflow_signed
,/* complain_on_overflow */
319 bfd_elf_generic_reloc
, /* special_function */
320 "R_ARM_THM_XPC22", /* name */
321 FALSE
, /* partial_inplace */
322 0x07ff2fff, /* src_mask */
323 0x07ff2fff, /* dst_mask */
324 TRUE
), /* pcrel_offset */
326 /* Dynamic TLS relocations. */
328 HOWTO (R_ARM_TLS_DTPMOD32
, /* type */
330 2, /* size (0 = byte, 1 = short, 2 = long) */
332 FALSE
, /* pc_relative */
334 complain_overflow_bitfield
,/* complain_on_overflow */
335 bfd_elf_generic_reloc
, /* special_function */
336 "R_ARM_TLS_DTPMOD32", /* name */
337 TRUE
, /* partial_inplace */
338 0xffffffff, /* src_mask */
339 0xffffffff, /* dst_mask */
340 FALSE
), /* pcrel_offset */
342 HOWTO (R_ARM_TLS_DTPOFF32
, /* type */
344 2, /* size (0 = byte, 1 = short, 2 = long) */
346 FALSE
, /* pc_relative */
348 complain_overflow_bitfield
,/* complain_on_overflow */
349 bfd_elf_generic_reloc
, /* special_function */
350 "R_ARM_TLS_DTPOFF32", /* name */
351 TRUE
, /* partial_inplace */
352 0xffffffff, /* src_mask */
353 0xffffffff, /* dst_mask */
354 FALSE
), /* pcrel_offset */
356 HOWTO (R_ARM_TLS_TPOFF32
, /* type */
358 2, /* size (0 = byte, 1 = short, 2 = long) */
360 FALSE
, /* pc_relative */
362 complain_overflow_bitfield
,/* complain_on_overflow */
363 bfd_elf_generic_reloc
, /* special_function */
364 "R_ARM_TLS_TPOFF32", /* name */
365 TRUE
, /* partial_inplace */
366 0xffffffff, /* src_mask */
367 0xffffffff, /* dst_mask */
368 FALSE
), /* pcrel_offset */
370 /* Relocs used in ARM Linux */
372 HOWTO (R_ARM_COPY
, /* type */
374 2, /* size (0 = byte, 1 = short, 2 = long) */
376 FALSE
, /* pc_relative */
378 complain_overflow_bitfield
,/* complain_on_overflow */
379 bfd_elf_generic_reloc
, /* special_function */
380 "R_ARM_COPY", /* name */
381 TRUE
, /* partial_inplace */
382 0xffffffff, /* src_mask */
383 0xffffffff, /* dst_mask */
384 FALSE
), /* pcrel_offset */
386 HOWTO (R_ARM_GLOB_DAT
, /* type */
388 2, /* size (0 = byte, 1 = short, 2 = long) */
390 FALSE
, /* pc_relative */
392 complain_overflow_bitfield
,/* complain_on_overflow */
393 bfd_elf_generic_reloc
, /* special_function */
394 "R_ARM_GLOB_DAT", /* name */
395 TRUE
, /* partial_inplace */
396 0xffffffff, /* src_mask */
397 0xffffffff, /* dst_mask */
398 FALSE
), /* pcrel_offset */
400 HOWTO (R_ARM_JUMP_SLOT
, /* type */
402 2, /* size (0 = byte, 1 = short, 2 = long) */
404 FALSE
, /* pc_relative */
406 complain_overflow_bitfield
,/* complain_on_overflow */
407 bfd_elf_generic_reloc
, /* special_function */
408 "R_ARM_JUMP_SLOT", /* name */
409 TRUE
, /* partial_inplace */
410 0xffffffff, /* src_mask */
411 0xffffffff, /* dst_mask */
412 FALSE
), /* pcrel_offset */
414 HOWTO (R_ARM_RELATIVE
, /* type */
416 2, /* size (0 = byte, 1 = short, 2 = long) */
418 FALSE
, /* pc_relative */
420 complain_overflow_bitfield
,/* complain_on_overflow */
421 bfd_elf_generic_reloc
, /* special_function */
422 "R_ARM_RELATIVE", /* name */
423 TRUE
, /* partial_inplace */
424 0xffffffff, /* src_mask */
425 0xffffffff, /* dst_mask */
426 FALSE
), /* pcrel_offset */
428 HOWTO (R_ARM_GOTOFF32
, /* type */
430 2, /* size (0 = byte, 1 = short, 2 = long) */
432 FALSE
, /* pc_relative */
434 complain_overflow_bitfield
,/* complain_on_overflow */
435 bfd_elf_generic_reloc
, /* special_function */
436 "R_ARM_GOTOFF32", /* name */
437 TRUE
, /* partial_inplace */
438 0xffffffff, /* src_mask */
439 0xffffffff, /* dst_mask */
440 FALSE
), /* pcrel_offset */
442 HOWTO (R_ARM_GOTPC
, /* type */
444 2, /* size (0 = byte, 1 = short, 2 = long) */
446 TRUE
, /* pc_relative */
448 complain_overflow_bitfield
,/* complain_on_overflow */
449 bfd_elf_generic_reloc
, /* special_function */
450 "R_ARM_GOTPC", /* name */
451 TRUE
, /* partial_inplace */
452 0xffffffff, /* src_mask */
453 0xffffffff, /* dst_mask */
454 TRUE
), /* pcrel_offset */
456 HOWTO (R_ARM_GOT32
, /* type */
458 2, /* size (0 = byte, 1 = short, 2 = long) */
460 FALSE
, /* pc_relative */
462 complain_overflow_bitfield
,/* complain_on_overflow */
463 bfd_elf_generic_reloc
, /* special_function */
464 "R_ARM_GOT32", /* name */
465 TRUE
, /* partial_inplace */
466 0xffffffff, /* src_mask */
467 0xffffffff, /* dst_mask */
468 FALSE
), /* pcrel_offset */
470 HOWTO (R_ARM_PLT32
, /* type */
472 2, /* size (0 = byte, 1 = short, 2 = long) */
474 TRUE
, /* pc_relative */
476 complain_overflow_bitfield
,/* complain_on_overflow */
477 bfd_elf_generic_reloc
, /* special_function */
478 "R_ARM_PLT32", /* name */
479 FALSE
, /* partial_inplace */
480 0x00ffffff, /* src_mask */
481 0x00ffffff, /* dst_mask */
482 TRUE
), /* pcrel_offset */
484 HOWTO (R_ARM_CALL
, /* type */
486 2, /* size (0 = byte, 1 = short, 2 = long) */
488 TRUE
, /* pc_relative */
490 complain_overflow_signed
,/* complain_on_overflow */
491 bfd_elf_generic_reloc
, /* special_function */
492 "R_ARM_CALL", /* name */
493 FALSE
, /* partial_inplace */
494 0x00ffffff, /* src_mask */
495 0x00ffffff, /* dst_mask */
496 TRUE
), /* pcrel_offset */
498 HOWTO (R_ARM_JUMP24
, /* type */
500 2, /* size (0 = byte, 1 = short, 2 = long) */
502 TRUE
, /* pc_relative */
504 complain_overflow_signed
,/* complain_on_overflow */
505 bfd_elf_generic_reloc
, /* special_function */
506 "R_ARM_JUMP24", /* name */
507 FALSE
, /* partial_inplace */
508 0x00ffffff, /* src_mask */
509 0x00ffffff, /* dst_mask */
510 TRUE
), /* pcrel_offset */
512 HOWTO (R_ARM_THM_JUMP24
, /* type */
514 2, /* size (0 = byte, 1 = short, 2 = long) */
516 TRUE
, /* pc_relative */
518 complain_overflow_signed
,/* complain_on_overflow */
519 bfd_elf_generic_reloc
, /* special_function */
520 "R_ARM_THM_JUMP24", /* name */
521 FALSE
, /* partial_inplace */
522 0x07ff2fff, /* src_mask */
523 0x07ff2fff, /* dst_mask */
524 TRUE
), /* pcrel_offset */
526 HOWTO (R_ARM_BASE_ABS
, /* type */
528 2, /* size (0 = byte, 1 = short, 2 = long) */
530 FALSE
, /* pc_relative */
532 complain_overflow_dont
,/* complain_on_overflow */
533 bfd_elf_generic_reloc
, /* special_function */
534 "R_ARM_BASE_ABS", /* name */
535 FALSE
, /* partial_inplace */
536 0xffffffff, /* src_mask */
537 0xffffffff, /* dst_mask */
538 FALSE
), /* pcrel_offset */
540 HOWTO (R_ARM_ALU_PCREL7_0
, /* type */
542 2, /* size (0 = byte, 1 = short, 2 = long) */
544 TRUE
, /* pc_relative */
546 complain_overflow_dont
,/* complain_on_overflow */
547 bfd_elf_generic_reloc
, /* special_function */
548 "R_ARM_ALU_PCREL_7_0", /* name */
549 FALSE
, /* partial_inplace */
550 0x00000fff, /* src_mask */
551 0x00000fff, /* dst_mask */
552 TRUE
), /* pcrel_offset */
554 HOWTO (R_ARM_ALU_PCREL15_8
, /* type */
556 2, /* size (0 = byte, 1 = short, 2 = long) */
558 TRUE
, /* pc_relative */
560 complain_overflow_dont
,/* complain_on_overflow */
561 bfd_elf_generic_reloc
, /* special_function */
562 "R_ARM_ALU_PCREL_15_8",/* name */
563 FALSE
, /* partial_inplace */
564 0x00000fff, /* src_mask */
565 0x00000fff, /* dst_mask */
566 TRUE
), /* pcrel_offset */
568 HOWTO (R_ARM_ALU_PCREL23_15
, /* type */
570 2, /* size (0 = byte, 1 = short, 2 = long) */
572 TRUE
, /* pc_relative */
574 complain_overflow_dont
,/* complain_on_overflow */
575 bfd_elf_generic_reloc
, /* special_function */
576 "R_ARM_ALU_PCREL_23_15",/* name */
577 FALSE
, /* partial_inplace */
578 0x00000fff, /* src_mask */
579 0x00000fff, /* dst_mask */
580 TRUE
), /* pcrel_offset */
582 HOWTO (R_ARM_LDR_SBREL_11_0
, /* type */
584 2, /* size (0 = byte, 1 = short, 2 = long) */
586 FALSE
, /* pc_relative */
588 complain_overflow_dont
,/* complain_on_overflow */
589 bfd_elf_generic_reloc
, /* special_function */
590 "R_ARM_LDR_SBREL_11_0",/* name */
591 FALSE
, /* partial_inplace */
592 0x00000fff, /* src_mask */
593 0x00000fff, /* dst_mask */
594 FALSE
), /* pcrel_offset */
596 HOWTO (R_ARM_ALU_SBREL_19_12
, /* type */
598 2, /* size (0 = byte, 1 = short, 2 = long) */
600 FALSE
, /* pc_relative */
602 complain_overflow_dont
,/* complain_on_overflow */
603 bfd_elf_generic_reloc
, /* special_function */
604 "R_ARM_ALU_SBREL_19_12",/* name */
605 FALSE
, /* partial_inplace */
606 0x000ff000, /* src_mask */
607 0x000ff000, /* dst_mask */
608 FALSE
), /* pcrel_offset */
610 HOWTO (R_ARM_ALU_SBREL_27_20
, /* type */
612 2, /* size (0 = byte, 1 = short, 2 = long) */
614 FALSE
, /* pc_relative */
616 complain_overflow_dont
,/* complain_on_overflow */
617 bfd_elf_generic_reloc
, /* special_function */
618 "R_ARM_ALU_SBREL_27_20",/* name */
619 FALSE
, /* partial_inplace */
620 0x0ff00000, /* src_mask */
621 0x0ff00000, /* dst_mask */
622 FALSE
), /* pcrel_offset */
624 HOWTO (R_ARM_TARGET1
, /* type */
626 2, /* size (0 = byte, 1 = short, 2 = long) */
628 FALSE
, /* pc_relative */
630 complain_overflow_dont
,/* complain_on_overflow */
631 bfd_elf_generic_reloc
, /* special_function */
632 "R_ARM_TARGET1", /* name */
633 FALSE
, /* partial_inplace */
634 0xffffffff, /* src_mask */
635 0xffffffff, /* dst_mask */
636 FALSE
), /* pcrel_offset */
638 HOWTO (R_ARM_ROSEGREL32
, /* type */
640 2, /* size (0 = byte, 1 = short, 2 = long) */
642 FALSE
, /* pc_relative */
644 complain_overflow_dont
,/* complain_on_overflow */
645 bfd_elf_generic_reloc
, /* special_function */
646 "R_ARM_ROSEGREL32", /* name */
647 FALSE
, /* partial_inplace */
648 0xffffffff, /* src_mask */
649 0xffffffff, /* dst_mask */
650 FALSE
), /* pcrel_offset */
652 HOWTO (R_ARM_V4BX
, /* type */
654 2, /* size (0 = byte, 1 = short, 2 = long) */
656 FALSE
, /* pc_relative */
658 complain_overflow_dont
,/* complain_on_overflow */
659 bfd_elf_generic_reloc
, /* special_function */
660 "R_ARM_V4BX", /* name */
661 FALSE
, /* partial_inplace */
662 0xffffffff, /* src_mask */
663 0xffffffff, /* dst_mask */
664 FALSE
), /* pcrel_offset */
666 HOWTO (R_ARM_TARGET2
, /* type */
668 2, /* size (0 = byte, 1 = short, 2 = long) */
670 FALSE
, /* pc_relative */
672 complain_overflow_signed
,/* complain_on_overflow */
673 bfd_elf_generic_reloc
, /* special_function */
674 "R_ARM_TARGET2", /* name */
675 FALSE
, /* partial_inplace */
676 0xffffffff, /* src_mask */
677 0xffffffff, /* dst_mask */
678 TRUE
), /* pcrel_offset */
680 HOWTO (R_ARM_PREL31
, /* type */
682 2, /* size (0 = byte, 1 = short, 2 = long) */
684 TRUE
, /* pc_relative */
686 complain_overflow_signed
,/* complain_on_overflow */
687 bfd_elf_generic_reloc
, /* special_function */
688 "R_ARM_PREL31", /* name */
689 FALSE
, /* partial_inplace */
690 0x7fffffff, /* src_mask */
691 0x7fffffff, /* dst_mask */
692 TRUE
), /* pcrel_offset */
694 HOWTO (R_ARM_MOVW_ABS_NC
, /* type */
696 2, /* size (0 = byte, 1 = short, 2 = long) */
698 FALSE
, /* pc_relative */
700 complain_overflow_dont
,/* complain_on_overflow */
701 bfd_elf_generic_reloc
, /* special_function */
702 "R_ARM_MOVW_ABS_NC", /* name */
703 FALSE
, /* partial_inplace */
704 0x000f0fff, /* src_mask */
705 0x000f0fff, /* dst_mask */
706 FALSE
), /* pcrel_offset */
708 HOWTO (R_ARM_MOVT_ABS
, /* type */
710 2, /* size (0 = byte, 1 = short, 2 = long) */
712 FALSE
, /* pc_relative */
714 complain_overflow_bitfield
,/* complain_on_overflow */
715 bfd_elf_generic_reloc
, /* special_function */
716 "R_ARM_MOVT_ABS", /* name */
717 FALSE
, /* partial_inplace */
718 0x000f0fff, /* src_mask */
719 0x000f0fff, /* dst_mask */
720 FALSE
), /* pcrel_offset */
722 HOWTO (R_ARM_MOVW_PREL_NC
, /* type */
724 2, /* size (0 = byte, 1 = short, 2 = long) */
726 TRUE
, /* pc_relative */
728 complain_overflow_dont
,/* complain_on_overflow */
729 bfd_elf_generic_reloc
, /* special_function */
730 "R_ARM_MOVW_PREL_NC", /* name */
731 FALSE
, /* partial_inplace */
732 0x000f0fff, /* src_mask */
733 0x000f0fff, /* dst_mask */
734 TRUE
), /* pcrel_offset */
736 HOWTO (R_ARM_MOVT_PREL
, /* type */
738 2, /* size (0 = byte, 1 = short, 2 = long) */
740 TRUE
, /* pc_relative */
742 complain_overflow_bitfield
,/* complain_on_overflow */
743 bfd_elf_generic_reloc
, /* special_function */
744 "R_ARM_MOVT_PREL", /* name */
745 FALSE
, /* partial_inplace */
746 0x000f0fff, /* src_mask */
747 0x000f0fff, /* dst_mask */
748 TRUE
), /* pcrel_offset */
750 HOWTO (R_ARM_THM_MOVW_ABS_NC
, /* type */
752 2, /* size (0 = byte, 1 = short, 2 = long) */
754 FALSE
, /* pc_relative */
756 complain_overflow_dont
,/* complain_on_overflow */
757 bfd_elf_generic_reloc
, /* special_function */
758 "R_ARM_THM_MOVW_ABS_NC",/* name */
759 FALSE
, /* partial_inplace */
760 0x040f70ff, /* src_mask */
761 0x040f70ff, /* dst_mask */
762 FALSE
), /* pcrel_offset */
764 HOWTO (R_ARM_THM_MOVT_ABS
, /* type */
766 2, /* size (0 = byte, 1 = short, 2 = long) */
768 FALSE
, /* pc_relative */
770 complain_overflow_bitfield
,/* complain_on_overflow */
771 bfd_elf_generic_reloc
, /* special_function */
772 "R_ARM_THM_MOVT_ABS", /* name */
773 FALSE
, /* partial_inplace */
774 0x040f70ff, /* src_mask */
775 0x040f70ff, /* dst_mask */
776 FALSE
), /* pcrel_offset */
778 HOWTO (R_ARM_THM_MOVW_PREL_NC
,/* type */
780 2, /* size (0 = byte, 1 = short, 2 = long) */
782 TRUE
, /* pc_relative */
784 complain_overflow_dont
,/* complain_on_overflow */
785 bfd_elf_generic_reloc
, /* special_function */
786 "R_ARM_THM_MOVW_PREL_NC",/* name */
787 FALSE
, /* partial_inplace */
788 0x040f70ff, /* src_mask */
789 0x040f70ff, /* dst_mask */
790 TRUE
), /* pcrel_offset */
792 HOWTO (R_ARM_THM_MOVT_PREL
, /* type */
794 2, /* size (0 = byte, 1 = short, 2 = long) */
796 TRUE
, /* pc_relative */
798 complain_overflow_bitfield
,/* complain_on_overflow */
799 bfd_elf_generic_reloc
, /* special_function */
800 "R_ARM_THM_MOVT_PREL", /* name */
801 FALSE
, /* partial_inplace */
802 0x040f70ff, /* src_mask */
803 0x040f70ff, /* dst_mask */
804 TRUE
), /* pcrel_offset */
806 HOWTO (R_ARM_THM_JUMP19
, /* type */
808 2, /* size (0 = byte, 1 = short, 2 = long) */
810 TRUE
, /* pc_relative */
812 complain_overflow_signed
,/* complain_on_overflow */
813 bfd_elf_generic_reloc
, /* special_function */
814 "R_ARM_THM_JUMP19", /* name */
815 FALSE
, /* partial_inplace */
816 0x043f2fff, /* src_mask */
817 0x043f2fff, /* dst_mask */
818 TRUE
), /* pcrel_offset */
820 HOWTO (R_ARM_THM_JUMP6
, /* type */
822 1, /* size (0 = byte, 1 = short, 2 = long) */
824 TRUE
, /* pc_relative */
826 complain_overflow_unsigned
,/* complain_on_overflow */
827 bfd_elf_generic_reloc
, /* special_function */
828 "R_ARM_THM_JUMP6", /* name */
829 FALSE
, /* partial_inplace */
830 0x02f8, /* src_mask */
831 0x02f8, /* dst_mask */
832 TRUE
), /* pcrel_offset */
834 /* These are declared as 13-bit signed relocations because we can
835 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
837 HOWTO (R_ARM_THM_ALU_PREL_11_0
,/* type */
839 2, /* size (0 = byte, 1 = short, 2 = long) */
841 TRUE
, /* pc_relative */
843 complain_overflow_dont
,/* complain_on_overflow */
844 bfd_elf_generic_reloc
, /* special_function */
845 "R_ARM_THM_ALU_PREL_11_0",/* name */
846 FALSE
, /* partial_inplace */
847 0xffffffff, /* src_mask */
848 0xffffffff, /* dst_mask */
849 TRUE
), /* pcrel_offset */
851 HOWTO (R_ARM_THM_PC12
, /* type */
853 2, /* size (0 = byte, 1 = short, 2 = long) */
855 TRUE
, /* pc_relative */
857 complain_overflow_dont
,/* complain_on_overflow */
858 bfd_elf_generic_reloc
, /* special_function */
859 "R_ARM_THM_PC12", /* name */
860 FALSE
, /* partial_inplace */
861 0xffffffff, /* src_mask */
862 0xffffffff, /* dst_mask */
863 TRUE
), /* pcrel_offset */
865 HOWTO (R_ARM_ABS32_NOI
, /* type */
867 2, /* size (0 = byte, 1 = short, 2 = long) */
869 FALSE
, /* pc_relative */
871 complain_overflow_dont
,/* complain_on_overflow */
872 bfd_elf_generic_reloc
, /* special_function */
873 "R_ARM_ABS32_NOI", /* name */
874 FALSE
, /* partial_inplace */
875 0xffffffff, /* src_mask */
876 0xffffffff, /* dst_mask */
877 FALSE
), /* pcrel_offset */
879 HOWTO (R_ARM_REL32_NOI
, /* type */
881 2, /* size (0 = byte, 1 = short, 2 = long) */
883 TRUE
, /* pc_relative */
885 complain_overflow_dont
,/* complain_on_overflow */
886 bfd_elf_generic_reloc
, /* special_function */
887 "R_ARM_REL32_NOI", /* name */
888 FALSE
, /* partial_inplace */
889 0xffffffff, /* src_mask */
890 0xffffffff, /* dst_mask */
891 FALSE
), /* pcrel_offset */
893 /* Group relocations. */
895 HOWTO (R_ARM_ALU_PC_G0_NC
, /* type */
897 2, /* size (0 = byte, 1 = short, 2 = long) */
899 TRUE
, /* pc_relative */
901 complain_overflow_dont
,/* complain_on_overflow */
902 bfd_elf_generic_reloc
, /* special_function */
903 "R_ARM_ALU_PC_G0_NC", /* name */
904 FALSE
, /* partial_inplace */
905 0xffffffff, /* src_mask */
906 0xffffffff, /* dst_mask */
907 TRUE
), /* pcrel_offset */
909 HOWTO (R_ARM_ALU_PC_G0
, /* type */
911 2, /* size (0 = byte, 1 = short, 2 = long) */
913 TRUE
, /* pc_relative */
915 complain_overflow_dont
,/* complain_on_overflow */
916 bfd_elf_generic_reloc
, /* special_function */
917 "R_ARM_ALU_PC_G0", /* name */
918 FALSE
, /* partial_inplace */
919 0xffffffff, /* src_mask */
920 0xffffffff, /* dst_mask */
921 TRUE
), /* pcrel_offset */
923 HOWTO (R_ARM_ALU_PC_G1_NC
, /* type */
925 2, /* size (0 = byte, 1 = short, 2 = long) */
927 TRUE
, /* pc_relative */
929 complain_overflow_dont
,/* complain_on_overflow */
930 bfd_elf_generic_reloc
, /* special_function */
931 "R_ARM_ALU_PC_G1_NC", /* name */
932 FALSE
, /* partial_inplace */
933 0xffffffff, /* src_mask */
934 0xffffffff, /* dst_mask */
935 TRUE
), /* pcrel_offset */
937 HOWTO (R_ARM_ALU_PC_G1
, /* type */
939 2, /* size (0 = byte, 1 = short, 2 = long) */
941 TRUE
, /* pc_relative */
943 complain_overflow_dont
,/* complain_on_overflow */
944 bfd_elf_generic_reloc
, /* special_function */
945 "R_ARM_ALU_PC_G1", /* name */
946 FALSE
, /* partial_inplace */
947 0xffffffff, /* src_mask */
948 0xffffffff, /* dst_mask */
949 TRUE
), /* pcrel_offset */
951 HOWTO (R_ARM_ALU_PC_G2
, /* type */
953 2, /* size (0 = byte, 1 = short, 2 = long) */
955 TRUE
, /* pc_relative */
957 complain_overflow_dont
,/* complain_on_overflow */
958 bfd_elf_generic_reloc
, /* special_function */
959 "R_ARM_ALU_PC_G2", /* name */
960 FALSE
, /* partial_inplace */
961 0xffffffff, /* src_mask */
962 0xffffffff, /* dst_mask */
963 TRUE
), /* pcrel_offset */
965 HOWTO (R_ARM_LDR_PC_G1
, /* type */
967 2, /* size (0 = byte, 1 = short, 2 = long) */
969 TRUE
, /* pc_relative */
971 complain_overflow_dont
,/* complain_on_overflow */
972 bfd_elf_generic_reloc
, /* special_function */
973 "R_ARM_LDR_PC_G1", /* name */
974 FALSE
, /* partial_inplace */
975 0xffffffff, /* src_mask */
976 0xffffffff, /* dst_mask */
977 TRUE
), /* pcrel_offset */
979 HOWTO (R_ARM_LDR_PC_G2
, /* type */
981 2, /* size (0 = byte, 1 = short, 2 = long) */
983 TRUE
, /* pc_relative */
985 complain_overflow_dont
,/* complain_on_overflow */
986 bfd_elf_generic_reloc
, /* special_function */
987 "R_ARM_LDR_PC_G2", /* name */
988 FALSE
, /* partial_inplace */
989 0xffffffff, /* src_mask */
990 0xffffffff, /* dst_mask */
991 TRUE
), /* pcrel_offset */
993 HOWTO (R_ARM_LDRS_PC_G0
, /* type */
995 2, /* size (0 = byte, 1 = short, 2 = long) */
997 TRUE
, /* pc_relative */
999 complain_overflow_dont
,/* complain_on_overflow */
1000 bfd_elf_generic_reloc
, /* special_function */
1001 "R_ARM_LDRS_PC_G0", /* name */
1002 FALSE
, /* partial_inplace */
1003 0xffffffff, /* src_mask */
1004 0xffffffff, /* dst_mask */
1005 TRUE
), /* pcrel_offset */
1007 HOWTO (R_ARM_LDRS_PC_G1
, /* type */
1009 2, /* size (0 = byte, 1 = short, 2 = long) */
1011 TRUE
, /* pc_relative */
1013 complain_overflow_dont
,/* complain_on_overflow */
1014 bfd_elf_generic_reloc
, /* special_function */
1015 "R_ARM_LDRS_PC_G1", /* name */
1016 FALSE
, /* partial_inplace */
1017 0xffffffff, /* src_mask */
1018 0xffffffff, /* dst_mask */
1019 TRUE
), /* pcrel_offset */
1021 HOWTO (R_ARM_LDRS_PC_G2
, /* type */
1023 2, /* size (0 = byte, 1 = short, 2 = long) */
1025 TRUE
, /* pc_relative */
1027 complain_overflow_dont
,/* complain_on_overflow */
1028 bfd_elf_generic_reloc
, /* special_function */
1029 "R_ARM_LDRS_PC_G2", /* name */
1030 FALSE
, /* partial_inplace */
1031 0xffffffff, /* src_mask */
1032 0xffffffff, /* dst_mask */
1033 TRUE
), /* pcrel_offset */
1035 HOWTO (R_ARM_LDC_PC_G0
, /* type */
1037 2, /* size (0 = byte, 1 = short, 2 = long) */
1039 TRUE
, /* pc_relative */
1041 complain_overflow_dont
,/* complain_on_overflow */
1042 bfd_elf_generic_reloc
, /* special_function */
1043 "R_ARM_LDC_PC_G0", /* name */
1044 FALSE
, /* partial_inplace */
1045 0xffffffff, /* src_mask */
1046 0xffffffff, /* dst_mask */
1047 TRUE
), /* pcrel_offset */
1049 HOWTO (R_ARM_LDC_PC_G1
, /* type */
1051 2, /* size (0 = byte, 1 = short, 2 = long) */
1053 TRUE
, /* pc_relative */
1055 complain_overflow_dont
,/* complain_on_overflow */
1056 bfd_elf_generic_reloc
, /* special_function */
1057 "R_ARM_LDC_PC_G1", /* name */
1058 FALSE
, /* partial_inplace */
1059 0xffffffff, /* src_mask */
1060 0xffffffff, /* dst_mask */
1061 TRUE
), /* pcrel_offset */
1063 HOWTO (R_ARM_LDC_PC_G2
, /* type */
1065 2, /* size (0 = byte, 1 = short, 2 = long) */
1067 TRUE
, /* pc_relative */
1069 complain_overflow_dont
,/* complain_on_overflow */
1070 bfd_elf_generic_reloc
, /* special_function */
1071 "R_ARM_LDC_PC_G2", /* name */
1072 FALSE
, /* partial_inplace */
1073 0xffffffff, /* src_mask */
1074 0xffffffff, /* dst_mask */
1075 TRUE
), /* pcrel_offset */
1077 HOWTO (R_ARM_ALU_SB_G0_NC
, /* type */
1079 2, /* size (0 = byte, 1 = short, 2 = long) */
1081 TRUE
, /* pc_relative */
1083 complain_overflow_dont
,/* complain_on_overflow */
1084 bfd_elf_generic_reloc
, /* special_function */
1085 "R_ARM_ALU_SB_G0_NC", /* name */
1086 FALSE
, /* partial_inplace */
1087 0xffffffff, /* src_mask */
1088 0xffffffff, /* dst_mask */
1089 TRUE
), /* pcrel_offset */
1091 HOWTO (R_ARM_ALU_SB_G0
, /* type */
1093 2, /* size (0 = byte, 1 = short, 2 = long) */
1095 TRUE
, /* pc_relative */
1097 complain_overflow_dont
,/* complain_on_overflow */
1098 bfd_elf_generic_reloc
, /* special_function */
1099 "R_ARM_ALU_SB_G0", /* name */
1100 FALSE
, /* partial_inplace */
1101 0xffffffff, /* src_mask */
1102 0xffffffff, /* dst_mask */
1103 TRUE
), /* pcrel_offset */
1105 HOWTO (R_ARM_ALU_SB_G1_NC
, /* type */
1107 2, /* size (0 = byte, 1 = short, 2 = long) */
1109 TRUE
, /* pc_relative */
1111 complain_overflow_dont
,/* complain_on_overflow */
1112 bfd_elf_generic_reloc
, /* special_function */
1113 "R_ARM_ALU_SB_G1_NC", /* name */
1114 FALSE
, /* partial_inplace */
1115 0xffffffff, /* src_mask */
1116 0xffffffff, /* dst_mask */
1117 TRUE
), /* pcrel_offset */
1119 HOWTO (R_ARM_ALU_SB_G1
, /* type */
1121 2, /* size (0 = byte, 1 = short, 2 = long) */
1123 TRUE
, /* pc_relative */
1125 complain_overflow_dont
,/* complain_on_overflow */
1126 bfd_elf_generic_reloc
, /* special_function */
1127 "R_ARM_ALU_SB_G1", /* name */
1128 FALSE
, /* partial_inplace */
1129 0xffffffff, /* src_mask */
1130 0xffffffff, /* dst_mask */
1131 TRUE
), /* pcrel_offset */
1133 HOWTO (R_ARM_ALU_SB_G2
, /* type */
1135 2, /* size (0 = byte, 1 = short, 2 = long) */
1137 TRUE
, /* pc_relative */
1139 complain_overflow_dont
,/* complain_on_overflow */
1140 bfd_elf_generic_reloc
, /* special_function */
1141 "R_ARM_ALU_SB_G2", /* name */
1142 FALSE
, /* partial_inplace */
1143 0xffffffff, /* src_mask */
1144 0xffffffff, /* dst_mask */
1145 TRUE
), /* pcrel_offset */
1147 HOWTO (R_ARM_LDR_SB_G0
, /* type */
1149 2, /* size (0 = byte, 1 = short, 2 = long) */
1151 TRUE
, /* pc_relative */
1153 complain_overflow_dont
,/* complain_on_overflow */
1154 bfd_elf_generic_reloc
, /* special_function */
1155 "R_ARM_LDR_SB_G0", /* name */
1156 FALSE
, /* partial_inplace */
1157 0xffffffff, /* src_mask */
1158 0xffffffff, /* dst_mask */
1159 TRUE
), /* pcrel_offset */
1161 HOWTO (R_ARM_LDR_SB_G1
, /* type */
1163 2, /* size (0 = byte, 1 = short, 2 = long) */
1165 TRUE
, /* pc_relative */
1167 complain_overflow_dont
,/* complain_on_overflow */
1168 bfd_elf_generic_reloc
, /* special_function */
1169 "R_ARM_LDR_SB_G1", /* name */
1170 FALSE
, /* partial_inplace */
1171 0xffffffff, /* src_mask */
1172 0xffffffff, /* dst_mask */
1173 TRUE
), /* pcrel_offset */
1175 HOWTO (R_ARM_LDR_SB_G2
, /* type */
1177 2, /* size (0 = byte, 1 = short, 2 = long) */
1179 TRUE
, /* pc_relative */
1181 complain_overflow_dont
,/* complain_on_overflow */
1182 bfd_elf_generic_reloc
, /* special_function */
1183 "R_ARM_LDR_SB_G2", /* name */
1184 FALSE
, /* partial_inplace */
1185 0xffffffff, /* src_mask */
1186 0xffffffff, /* dst_mask */
1187 TRUE
), /* pcrel_offset */
1189 HOWTO (R_ARM_LDRS_SB_G0
, /* type */
1191 2, /* size (0 = byte, 1 = short, 2 = long) */
1193 TRUE
, /* pc_relative */
1195 complain_overflow_dont
,/* complain_on_overflow */
1196 bfd_elf_generic_reloc
, /* special_function */
1197 "R_ARM_LDRS_SB_G0", /* name */
1198 FALSE
, /* partial_inplace */
1199 0xffffffff, /* src_mask */
1200 0xffffffff, /* dst_mask */
1201 TRUE
), /* pcrel_offset */
1203 HOWTO (R_ARM_LDRS_SB_G1
, /* type */
1205 2, /* size (0 = byte, 1 = short, 2 = long) */
1207 TRUE
, /* pc_relative */
1209 complain_overflow_dont
,/* complain_on_overflow */
1210 bfd_elf_generic_reloc
, /* special_function */
1211 "R_ARM_LDRS_SB_G1", /* name */
1212 FALSE
, /* partial_inplace */
1213 0xffffffff, /* src_mask */
1214 0xffffffff, /* dst_mask */
1215 TRUE
), /* pcrel_offset */
1217 HOWTO (R_ARM_LDRS_SB_G2
, /* type */
1219 2, /* size (0 = byte, 1 = short, 2 = long) */
1221 TRUE
, /* pc_relative */
1223 complain_overflow_dont
,/* complain_on_overflow */
1224 bfd_elf_generic_reloc
, /* special_function */
1225 "R_ARM_LDRS_SB_G2", /* name */
1226 FALSE
, /* partial_inplace */
1227 0xffffffff, /* src_mask */
1228 0xffffffff, /* dst_mask */
1229 TRUE
), /* pcrel_offset */
1231 HOWTO (R_ARM_LDC_SB_G0
, /* type */
1233 2, /* size (0 = byte, 1 = short, 2 = long) */
1235 TRUE
, /* pc_relative */
1237 complain_overflow_dont
,/* complain_on_overflow */
1238 bfd_elf_generic_reloc
, /* special_function */
1239 "R_ARM_LDC_SB_G0", /* name */
1240 FALSE
, /* partial_inplace */
1241 0xffffffff, /* src_mask */
1242 0xffffffff, /* dst_mask */
1243 TRUE
), /* pcrel_offset */
1245 HOWTO (R_ARM_LDC_SB_G1
, /* type */
1247 2, /* size (0 = byte, 1 = short, 2 = long) */
1249 TRUE
, /* pc_relative */
1251 complain_overflow_dont
,/* complain_on_overflow */
1252 bfd_elf_generic_reloc
, /* special_function */
1253 "R_ARM_LDC_SB_G1", /* name */
1254 FALSE
, /* partial_inplace */
1255 0xffffffff, /* src_mask */
1256 0xffffffff, /* dst_mask */
1257 TRUE
), /* pcrel_offset */
1259 HOWTO (R_ARM_LDC_SB_G2
, /* type */
1261 2, /* size (0 = byte, 1 = short, 2 = long) */
1263 TRUE
, /* pc_relative */
1265 complain_overflow_dont
,/* complain_on_overflow */
1266 bfd_elf_generic_reloc
, /* special_function */
1267 "R_ARM_LDC_SB_G2", /* name */
1268 FALSE
, /* partial_inplace */
1269 0xffffffff, /* src_mask */
1270 0xffffffff, /* dst_mask */
1271 TRUE
), /* pcrel_offset */
1273 /* End of group relocations. */
1275 HOWTO (R_ARM_MOVW_BREL_NC
, /* type */
1277 2, /* size (0 = byte, 1 = short, 2 = long) */
1279 FALSE
, /* pc_relative */
1281 complain_overflow_dont
,/* complain_on_overflow */
1282 bfd_elf_generic_reloc
, /* special_function */
1283 "R_ARM_MOVW_BREL_NC", /* name */
1284 FALSE
, /* partial_inplace */
1285 0x0000ffff, /* src_mask */
1286 0x0000ffff, /* dst_mask */
1287 FALSE
), /* pcrel_offset */
1289 HOWTO (R_ARM_MOVT_BREL
, /* type */
1291 2, /* size (0 = byte, 1 = short, 2 = long) */
1293 FALSE
, /* pc_relative */
1295 complain_overflow_bitfield
,/* complain_on_overflow */
1296 bfd_elf_generic_reloc
, /* special_function */
1297 "R_ARM_MOVT_BREL", /* name */
1298 FALSE
, /* partial_inplace */
1299 0x0000ffff, /* src_mask */
1300 0x0000ffff, /* dst_mask */
1301 FALSE
), /* pcrel_offset */
1303 HOWTO (R_ARM_MOVW_BREL
, /* type */
1305 2, /* size (0 = byte, 1 = short, 2 = long) */
1307 FALSE
, /* pc_relative */
1309 complain_overflow_dont
,/* complain_on_overflow */
1310 bfd_elf_generic_reloc
, /* special_function */
1311 "R_ARM_MOVW_BREL", /* name */
1312 FALSE
, /* partial_inplace */
1313 0x0000ffff, /* src_mask */
1314 0x0000ffff, /* dst_mask */
1315 FALSE
), /* pcrel_offset */
1317 HOWTO (R_ARM_THM_MOVW_BREL_NC
,/* type */
1319 2, /* size (0 = byte, 1 = short, 2 = long) */
1321 FALSE
, /* pc_relative */
1323 complain_overflow_dont
,/* complain_on_overflow */
1324 bfd_elf_generic_reloc
, /* special_function */
1325 "R_ARM_THM_MOVW_BREL_NC",/* name */
1326 FALSE
, /* partial_inplace */
1327 0x040f70ff, /* src_mask */
1328 0x040f70ff, /* dst_mask */
1329 FALSE
), /* pcrel_offset */
1331 HOWTO (R_ARM_THM_MOVT_BREL
, /* type */
1333 2, /* size (0 = byte, 1 = short, 2 = long) */
1335 FALSE
, /* pc_relative */
1337 complain_overflow_bitfield
,/* complain_on_overflow */
1338 bfd_elf_generic_reloc
, /* special_function */
1339 "R_ARM_THM_MOVT_BREL", /* name */
1340 FALSE
, /* partial_inplace */
1341 0x040f70ff, /* src_mask */
1342 0x040f70ff, /* dst_mask */
1343 FALSE
), /* pcrel_offset */
1345 HOWTO (R_ARM_THM_MOVW_BREL
, /* type */
1347 2, /* size (0 = byte, 1 = short, 2 = long) */
1349 FALSE
, /* pc_relative */
1351 complain_overflow_dont
,/* complain_on_overflow */
1352 bfd_elf_generic_reloc
, /* special_function */
1353 "R_ARM_THM_MOVW_BREL", /* name */
1354 FALSE
, /* partial_inplace */
1355 0x040f70ff, /* src_mask */
1356 0x040f70ff, /* dst_mask */
1357 FALSE
), /* pcrel_offset */
1359 HOWTO (R_ARM_TLS_GOTDESC
, /* type */
1361 2, /* size (0 = byte, 1 = short, 2 = long) */
1363 FALSE
, /* pc_relative */
1365 complain_overflow_bitfield
,/* complain_on_overflow */
1366 NULL
, /* special_function */
1367 "R_ARM_TLS_GOTDESC", /* name */
1368 TRUE
, /* partial_inplace */
1369 0xffffffff, /* src_mask */
1370 0xffffffff, /* dst_mask */
1371 FALSE
), /* pcrel_offset */
1373 HOWTO (R_ARM_TLS_CALL
, /* type */
1375 2, /* size (0 = byte, 1 = short, 2 = long) */
1377 FALSE
, /* pc_relative */
1379 complain_overflow_dont
,/* complain_on_overflow */
1380 bfd_elf_generic_reloc
, /* special_function */
1381 "R_ARM_TLS_CALL", /* name */
1382 FALSE
, /* partial_inplace */
1383 0x00ffffff, /* src_mask */
1384 0x00ffffff, /* dst_mask */
1385 FALSE
), /* pcrel_offset */
1387 HOWTO (R_ARM_TLS_DESCSEQ
, /* type */
1389 2, /* size (0 = byte, 1 = short, 2 = long) */
1391 FALSE
, /* pc_relative */
1393 complain_overflow_bitfield
,/* complain_on_overflow */
1394 bfd_elf_generic_reloc
, /* special_function */
1395 "R_ARM_TLS_DESCSEQ", /* name */
1396 FALSE
, /* partial_inplace */
1397 0x00000000, /* src_mask */
1398 0x00000000, /* dst_mask */
1399 FALSE
), /* pcrel_offset */
1401 HOWTO (R_ARM_THM_TLS_CALL
, /* type */
1403 2, /* size (0 = byte, 1 = short, 2 = long) */
1405 FALSE
, /* pc_relative */
1407 complain_overflow_dont
,/* complain_on_overflow */
1408 bfd_elf_generic_reloc
, /* special_function */
1409 "R_ARM_THM_TLS_CALL", /* name */
1410 FALSE
, /* partial_inplace */
1411 0x07ff07ff, /* src_mask */
1412 0x07ff07ff, /* dst_mask */
1413 FALSE
), /* pcrel_offset */
1415 HOWTO (R_ARM_PLT32_ABS
, /* type */
1417 2, /* size (0 = byte, 1 = short, 2 = long) */
1419 FALSE
, /* pc_relative */
1421 complain_overflow_dont
,/* complain_on_overflow */
1422 bfd_elf_generic_reloc
, /* special_function */
1423 "R_ARM_PLT32_ABS", /* name */
1424 FALSE
, /* partial_inplace */
1425 0xffffffff, /* src_mask */
1426 0xffffffff, /* dst_mask */
1427 FALSE
), /* pcrel_offset */
1429 HOWTO (R_ARM_GOT_ABS
, /* type */
1431 2, /* size (0 = byte, 1 = short, 2 = long) */
1433 FALSE
, /* pc_relative */
1435 complain_overflow_dont
,/* complain_on_overflow */
1436 bfd_elf_generic_reloc
, /* special_function */
1437 "R_ARM_GOT_ABS", /* name */
1438 FALSE
, /* partial_inplace */
1439 0xffffffff, /* src_mask */
1440 0xffffffff, /* dst_mask */
1441 FALSE
), /* pcrel_offset */
1443 HOWTO (R_ARM_GOT_PREL
, /* type */
1445 2, /* size (0 = byte, 1 = short, 2 = long) */
1447 TRUE
, /* pc_relative */
1449 complain_overflow_dont
, /* complain_on_overflow */
1450 bfd_elf_generic_reloc
, /* special_function */
1451 "R_ARM_GOT_PREL", /* name */
1452 FALSE
, /* partial_inplace */
1453 0xffffffff, /* src_mask */
1454 0xffffffff, /* dst_mask */
1455 TRUE
), /* pcrel_offset */
1457 HOWTO (R_ARM_GOT_BREL12
, /* type */
1459 2, /* size (0 = byte, 1 = short, 2 = long) */
1461 FALSE
, /* pc_relative */
1463 complain_overflow_bitfield
,/* complain_on_overflow */
1464 bfd_elf_generic_reloc
, /* special_function */
1465 "R_ARM_GOT_BREL12", /* name */
1466 FALSE
, /* partial_inplace */
1467 0x00000fff, /* src_mask */
1468 0x00000fff, /* dst_mask */
1469 FALSE
), /* pcrel_offset */
1471 HOWTO (R_ARM_GOTOFF12
, /* type */
1473 2, /* size (0 = byte, 1 = short, 2 = long) */
1475 FALSE
, /* pc_relative */
1477 complain_overflow_bitfield
,/* complain_on_overflow */
1478 bfd_elf_generic_reloc
, /* special_function */
1479 "R_ARM_GOTOFF12", /* name */
1480 FALSE
, /* partial_inplace */
1481 0x00000fff, /* src_mask */
1482 0x00000fff, /* dst_mask */
1483 FALSE
), /* pcrel_offset */
1485 EMPTY_HOWTO (R_ARM_GOTRELAX
), /* reserved for future GOT-load optimizations */
1487 /* GNU extension to record C++ vtable member usage */
1488 HOWTO (R_ARM_GNU_VTENTRY
, /* type */
1490 2, /* size (0 = byte, 1 = short, 2 = long) */
1492 FALSE
, /* pc_relative */
1494 complain_overflow_dont
, /* complain_on_overflow */
1495 _bfd_elf_rel_vtable_reloc_fn
, /* special_function */
1496 "R_ARM_GNU_VTENTRY", /* name */
1497 FALSE
, /* partial_inplace */
1500 FALSE
), /* pcrel_offset */
1502 /* GNU extension to record C++ vtable hierarchy */
1503 HOWTO (R_ARM_GNU_VTINHERIT
, /* type */
1505 2, /* size (0 = byte, 1 = short, 2 = long) */
1507 FALSE
, /* pc_relative */
1509 complain_overflow_dont
, /* complain_on_overflow */
1510 NULL
, /* special_function */
1511 "R_ARM_GNU_VTINHERIT", /* name */
1512 FALSE
, /* partial_inplace */
1515 FALSE
), /* pcrel_offset */
1517 HOWTO (R_ARM_THM_JUMP11
, /* type */
1519 1, /* size (0 = byte, 1 = short, 2 = long) */
1521 TRUE
, /* pc_relative */
1523 complain_overflow_signed
, /* complain_on_overflow */
1524 bfd_elf_generic_reloc
, /* special_function */
1525 "R_ARM_THM_JUMP11", /* name */
1526 FALSE
, /* partial_inplace */
1527 0x000007ff, /* src_mask */
1528 0x000007ff, /* dst_mask */
1529 TRUE
), /* pcrel_offset */
1531 HOWTO (R_ARM_THM_JUMP8
, /* type */
1533 1, /* size (0 = byte, 1 = short, 2 = long) */
1535 TRUE
, /* pc_relative */
1537 complain_overflow_signed
, /* complain_on_overflow */
1538 bfd_elf_generic_reloc
, /* special_function */
1539 "R_ARM_THM_JUMP8", /* name */
1540 FALSE
, /* partial_inplace */
1541 0x000000ff, /* src_mask */
1542 0x000000ff, /* dst_mask */
1543 TRUE
), /* pcrel_offset */
1545 /* TLS relocations */
1546 HOWTO (R_ARM_TLS_GD32
, /* type */
1548 2, /* size (0 = byte, 1 = short, 2 = long) */
1550 FALSE
, /* pc_relative */
1552 complain_overflow_bitfield
,/* complain_on_overflow */
1553 NULL
, /* special_function */
1554 "R_ARM_TLS_GD32", /* name */
1555 TRUE
, /* partial_inplace */
1556 0xffffffff, /* src_mask */
1557 0xffffffff, /* dst_mask */
1558 FALSE
), /* pcrel_offset */
1560 HOWTO (R_ARM_TLS_LDM32
, /* type */
1562 2, /* size (0 = byte, 1 = short, 2 = long) */
1564 FALSE
, /* pc_relative */
1566 complain_overflow_bitfield
,/* complain_on_overflow */
1567 bfd_elf_generic_reloc
, /* special_function */
1568 "R_ARM_TLS_LDM32", /* name */
1569 TRUE
, /* partial_inplace */
1570 0xffffffff, /* src_mask */
1571 0xffffffff, /* dst_mask */
1572 FALSE
), /* pcrel_offset */
1574 HOWTO (R_ARM_TLS_LDO32
, /* type */
1576 2, /* size (0 = byte, 1 = short, 2 = long) */
1578 FALSE
, /* pc_relative */
1580 complain_overflow_bitfield
,/* complain_on_overflow */
1581 bfd_elf_generic_reloc
, /* special_function */
1582 "R_ARM_TLS_LDO32", /* name */
1583 TRUE
, /* partial_inplace */
1584 0xffffffff, /* src_mask */
1585 0xffffffff, /* dst_mask */
1586 FALSE
), /* pcrel_offset */
1588 HOWTO (R_ARM_TLS_IE32
, /* type */
1590 2, /* size (0 = byte, 1 = short, 2 = long) */
1592 FALSE
, /* pc_relative */
1594 complain_overflow_bitfield
,/* complain_on_overflow */
1595 NULL
, /* special_function */
1596 "R_ARM_TLS_IE32", /* name */
1597 TRUE
, /* partial_inplace */
1598 0xffffffff, /* src_mask */
1599 0xffffffff, /* dst_mask */
1600 FALSE
), /* pcrel_offset */
1602 HOWTO (R_ARM_TLS_LE32
, /* type */
1604 2, /* size (0 = byte, 1 = short, 2 = long) */
1606 FALSE
, /* pc_relative */
1608 complain_overflow_bitfield
,/* complain_on_overflow */
1609 NULL
, /* special_function */
1610 "R_ARM_TLS_LE32", /* name */
1611 TRUE
, /* partial_inplace */
1612 0xffffffff, /* src_mask */
1613 0xffffffff, /* dst_mask */
1614 FALSE
), /* pcrel_offset */
1616 HOWTO (R_ARM_TLS_LDO12
, /* type */
1618 2, /* size (0 = byte, 1 = short, 2 = long) */
1620 FALSE
, /* pc_relative */
1622 complain_overflow_bitfield
,/* complain_on_overflow */
1623 bfd_elf_generic_reloc
, /* special_function */
1624 "R_ARM_TLS_LDO12", /* name */
1625 FALSE
, /* partial_inplace */
1626 0x00000fff, /* src_mask */
1627 0x00000fff, /* dst_mask */
1628 FALSE
), /* pcrel_offset */
1630 HOWTO (R_ARM_TLS_LE12
, /* type */
1632 2, /* size (0 = byte, 1 = short, 2 = long) */
1634 FALSE
, /* pc_relative */
1636 complain_overflow_bitfield
,/* complain_on_overflow */
1637 bfd_elf_generic_reloc
, /* special_function */
1638 "R_ARM_TLS_LE12", /* name */
1639 FALSE
, /* partial_inplace */
1640 0x00000fff, /* src_mask */
1641 0x00000fff, /* dst_mask */
1642 FALSE
), /* pcrel_offset */
1644 HOWTO (R_ARM_TLS_IE12GP
, /* type */
1646 2, /* size (0 = byte, 1 = short, 2 = long) */
1648 FALSE
, /* pc_relative */
1650 complain_overflow_bitfield
,/* complain_on_overflow */
1651 bfd_elf_generic_reloc
, /* special_function */
1652 "R_ARM_TLS_IE12GP", /* name */
1653 FALSE
, /* partial_inplace */
1654 0x00000fff, /* src_mask */
1655 0x00000fff, /* dst_mask */
1656 FALSE
), /* pcrel_offset */
1658 /* 112-127 private relocations. */
1676 /* R_ARM_ME_TOO, obsolete. */
1679 HOWTO (R_ARM_THM_TLS_DESCSEQ
, /* type */
1681 1, /* size (0 = byte, 1 = short, 2 = long) */
1683 FALSE
, /* pc_relative */
1685 complain_overflow_bitfield
,/* complain_on_overflow */
1686 bfd_elf_generic_reloc
, /* special_function */
1687 "R_ARM_THM_TLS_DESCSEQ",/* name */
1688 FALSE
, /* partial_inplace */
1689 0x00000000, /* src_mask */
1690 0x00000000, /* dst_mask */
1691 FALSE
), /* pcrel_offset */
1694 HOWTO (R_ARM_THM_ALU_ABS_G0_NC
,/* type. */
1695 0, /* rightshift. */
1696 1, /* size (0 = byte, 1 = short, 2 = long). */
1698 FALSE
, /* pc_relative. */
1700 complain_overflow_bitfield
,/* complain_on_overflow. */
1701 bfd_elf_generic_reloc
, /* special_function. */
1702 "R_ARM_THM_ALU_ABS_G0_NC",/* name. */
1703 FALSE
, /* partial_inplace. */
1704 0x00000000, /* src_mask. */
1705 0x00000000, /* dst_mask. */
1706 FALSE
), /* pcrel_offset. */
1707 HOWTO (R_ARM_THM_ALU_ABS_G1_NC
,/* type. */
1708 0, /* rightshift. */
1709 1, /* size (0 = byte, 1 = short, 2 = long). */
1711 FALSE
, /* pc_relative. */
1713 complain_overflow_bitfield
,/* complain_on_overflow. */
1714 bfd_elf_generic_reloc
, /* special_function. */
1715 "R_ARM_THM_ALU_ABS_G1_NC",/* name. */
1716 FALSE
, /* partial_inplace. */
1717 0x00000000, /* src_mask. */
1718 0x00000000, /* dst_mask. */
1719 FALSE
), /* pcrel_offset. */
1720 HOWTO (R_ARM_THM_ALU_ABS_G2_NC
,/* type. */
1721 0, /* rightshift. */
1722 1, /* size (0 = byte, 1 = short, 2 = long). */
1724 FALSE
, /* pc_relative. */
1726 complain_overflow_bitfield
,/* complain_on_overflow. */
1727 bfd_elf_generic_reloc
, /* special_function. */
1728 "R_ARM_THM_ALU_ABS_G2_NC",/* name. */
1729 FALSE
, /* partial_inplace. */
1730 0x00000000, /* src_mask. */
1731 0x00000000, /* dst_mask. */
1732 FALSE
), /* pcrel_offset. */
1733 HOWTO (R_ARM_THM_ALU_ABS_G3_NC
,/* type. */
1734 0, /* rightshift. */
1735 1, /* size (0 = byte, 1 = short, 2 = long). */
1737 FALSE
, /* pc_relative. */
1739 complain_overflow_bitfield
,/* complain_on_overflow. */
1740 bfd_elf_generic_reloc
, /* special_function. */
1741 "R_ARM_THM_ALU_ABS_G3_NC",/* name. */
1742 FALSE
, /* partial_inplace. */
1743 0x00000000, /* src_mask. */
1744 0x00000000, /* dst_mask. */
1745 FALSE
), /* pcrel_offset. */
1749 static reloc_howto_type elf32_arm_howto_table_2
[1] =
1751 HOWTO (R_ARM_IRELATIVE
, /* type */
1753 2, /* size (0 = byte, 1 = short, 2 = long) */
1755 FALSE
, /* pc_relative */
1757 complain_overflow_bitfield
,/* complain_on_overflow */
1758 bfd_elf_generic_reloc
, /* special_function */
1759 "R_ARM_IRELATIVE", /* name */
1760 TRUE
, /* partial_inplace */
1761 0xffffffff, /* src_mask */
1762 0xffffffff, /* dst_mask */
1763 FALSE
) /* pcrel_offset */
1766 /* 249-255 extended, currently unused, relocations: */
1767 static reloc_howto_type elf32_arm_howto_table_3
[4] =
1769 HOWTO (R_ARM_RREL32
, /* type */
1771 0, /* size (0 = byte, 1 = short, 2 = long) */
1773 FALSE
, /* pc_relative */
1775 complain_overflow_dont
,/* complain_on_overflow */
1776 bfd_elf_generic_reloc
, /* special_function */
1777 "R_ARM_RREL32", /* name */
1778 FALSE
, /* partial_inplace */
1781 FALSE
), /* pcrel_offset */
1783 HOWTO (R_ARM_RABS32
, /* type */
1785 0, /* size (0 = byte, 1 = short, 2 = long) */
1787 FALSE
, /* pc_relative */
1789 complain_overflow_dont
,/* complain_on_overflow */
1790 bfd_elf_generic_reloc
, /* special_function */
1791 "R_ARM_RABS32", /* name */
1792 FALSE
, /* partial_inplace */
1795 FALSE
), /* pcrel_offset */
1797 HOWTO (R_ARM_RPC24
, /* type */
1799 0, /* size (0 = byte, 1 = short, 2 = long) */
1801 FALSE
, /* pc_relative */
1803 complain_overflow_dont
,/* complain_on_overflow */
1804 bfd_elf_generic_reloc
, /* special_function */
1805 "R_ARM_RPC24", /* name */
1806 FALSE
, /* partial_inplace */
1809 FALSE
), /* pcrel_offset */
1811 HOWTO (R_ARM_RBASE
, /* type */
1813 0, /* size (0 = byte, 1 = short, 2 = long) */
1815 FALSE
, /* pc_relative */
1817 complain_overflow_dont
,/* complain_on_overflow */
1818 bfd_elf_generic_reloc
, /* special_function */
1819 "R_ARM_RBASE", /* name */
1820 FALSE
, /* partial_inplace */
1823 FALSE
) /* pcrel_offset */
1826 static reloc_howto_type
*
1827 elf32_arm_howto_from_type (unsigned int r_type
)
1829 if (r_type
< ARRAY_SIZE (elf32_arm_howto_table_1
))
1830 return &elf32_arm_howto_table_1
[r_type
];
1832 if (r_type
== R_ARM_IRELATIVE
)
1833 return &elf32_arm_howto_table_2
[r_type
- R_ARM_IRELATIVE
];
1835 if (r_type
>= R_ARM_RREL32
1836 && r_type
< R_ARM_RREL32
+ ARRAY_SIZE (elf32_arm_howto_table_3
))
1837 return &elf32_arm_howto_table_3
[r_type
- R_ARM_RREL32
];
1843 elf32_arm_info_to_howto (bfd
* abfd ATTRIBUTE_UNUSED
, arelent
* bfd_reloc
,
1844 Elf_Internal_Rela
* elf_reloc
)
1846 unsigned int r_type
;
1848 r_type
= ELF32_R_TYPE (elf_reloc
->r_info
);
1849 bfd_reloc
->howto
= elf32_arm_howto_from_type (r_type
);
1852 struct elf32_arm_reloc_map
1854 bfd_reloc_code_real_type bfd_reloc_val
;
1855 unsigned char elf_reloc_val
;
1858 /* All entries in this list must also be present in elf32_arm_howto_table. */
1859 static const struct elf32_arm_reloc_map elf32_arm_reloc_map
[] =
1861 {BFD_RELOC_NONE
, R_ARM_NONE
},
1862 {BFD_RELOC_ARM_PCREL_BRANCH
, R_ARM_PC24
},
1863 {BFD_RELOC_ARM_PCREL_CALL
, R_ARM_CALL
},
1864 {BFD_RELOC_ARM_PCREL_JUMP
, R_ARM_JUMP24
},
1865 {BFD_RELOC_ARM_PCREL_BLX
, R_ARM_XPC25
},
1866 {BFD_RELOC_THUMB_PCREL_BLX
, R_ARM_THM_XPC22
},
1867 {BFD_RELOC_32
, R_ARM_ABS32
},
1868 {BFD_RELOC_32_PCREL
, R_ARM_REL32
},
1869 {BFD_RELOC_8
, R_ARM_ABS8
},
1870 {BFD_RELOC_16
, R_ARM_ABS16
},
1871 {BFD_RELOC_ARM_OFFSET_IMM
, R_ARM_ABS12
},
1872 {BFD_RELOC_ARM_THUMB_OFFSET
, R_ARM_THM_ABS5
},
1873 {BFD_RELOC_THUMB_PCREL_BRANCH25
, R_ARM_THM_JUMP24
},
1874 {BFD_RELOC_THUMB_PCREL_BRANCH23
, R_ARM_THM_CALL
},
1875 {BFD_RELOC_THUMB_PCREL_BRANCH12
, R_ARM_THM_JUMP11
},
1876 {BFD_RELOC_THUMB_PCREL_BRANCH20
, R_ARM_THM_JUMP19
},
1877 {BFD_RELOC_THUMB_PCREL_BRANCH9
, R_ARM_THM_JUMP8
},
1878 {BFD_RELOC_THUMB_PCREL_BRANCH7
, R_ARM_THM_JUMP6
},
1879 {BFD_RELOC_ARM_GLOB_DAT
, R_ARM_GLOB_DAT
},
1880 {BFD_RELOC_ARM_JUMP_SLOT
, R_ARM_JUMP_SLOT
},
1881 {BFD_RELOC_ARM_RELATIVE
, R_ARM_RELATIVE
},
1882 {BFD_RELOC_ARM_GOTOFF
, R_ARM_GOTOFF32
},
1883 {BFD_RELOC_ARM_GOTPC
, R_ARM_GOTPC
},
1884 {BFD_RELOC_ARM_GOT_PREL
, R_ARM_GOT_PREL
},
1885 {BFD_RELOC_ARM_GOT32
, R_ARM_GOT32
},
1886 {BFD_RELOC_ARM_PLT32
, R_ARM_PLT32
},
1887 {BFD_RELOC_ARM_TARGET1
, R_ARM_TARGET1
},
1888 {BFD_RELOC_ARM_ROSEGREL32
, R_ARM_ROSEGREL32
},
1889 {BFD_RELOC_ARM_SBREL32
, R_ARM_SBREL32
},
1890 {BFD_RELOC_ARM_PREL31
, R_ARM_PREL31
},
1891 {BFD_RELOC_ARM_TARGET2
, R_ARM_TARGET2
},
1892 {BFD_RELOC_ARM_PLT32
, R_ARM_PLT32
},
1893 {BFD_RELOC_ARM_TLS_GOTDESC
, R_ARM_TLS_GOTDESC
},
1894 {BFD_RELOC_ARM_TLS_CALL
, R_ARM_TLS_CALL
},
1895 {BFD_RELOC_ARM_THM_TLS_CALL
, R_ARM_THM_TLS_CALL
},
1896 {BFD_RELOC_ARM_TLS_DESCSEQ
, R_ARM_TLS_DESCSEQ
},
1897 {BFD_RELOC_ARM_THM_TLS_DESCSEQ
, R_ARM_THM_TLS_DESCSEQ
},
1898 {BFD_RELOC_ARM_TLS_DESC
, R_ARM_TLS_DESC
},
1899 {BFD_RELOC_ARM_TLS_GD32
, R_ARM_TLS_GD32
},
1900 {BFD_RELOC_ARM_TLS_LDO32
, R_ARM_TLS_LDO32
},
1901 {BFD_RELOC_ARM_TLS_LDM32
, R_ARM_TLS_LDM32
},
1902 {BFD_RELOC_ARM_TLS_DTPMOD32
, R_ARM_TLS_DTPMOD32
},
1903 {BFD_RELOC_ARM_TLS_DTPOFF32
, R_ARM_TLS_DTPOFF32
},
1904 {BFD_RELOC_ARM_TLS_TPOFF32
, R_ARM_TLS_TPOFF32
},
1905 {BFD_RELOC_ARM_TLS_IE32
, R_ARM_TLS_IE32
},
1906 {BFD_RELOC_ARM_TLS_LE32
, R_ARM_TLS_LE32
},
1907 {BFD_RELOC_ARM_IRELATIVE
, R_ARM_IRELATIVE
},
1908 {BFD_RELOC_VTABLE_INHERIT
, R_ARM_GNU_VTINHERIT
},
1909 {BFD_RELOC_VTABLE_ENTRY
, R_ARM_GNU_VTENTRY
},
1910 {BFD_RELOC_ARM_MOVW
, R_ARM_MOVW_ABS_NC
},
1911 {BFD_RELOC_ARM_MOVT
, R_ARM_MOVT_ABS
},
1912 {BFD_RELOC_ARM_MOVW_PCREL
, R_ARM_MOVW_PREL_NC
},
1913 {BFD_RELOC_ARM_MOVT_PCREL
, R_ARM_MOVT_PREL
},
1914 {BFD_RELOC_ARM_THUMB_MOVW
, R_ARM_THM_MOVW_ABS_NC
},
1915 {BFD_RELOC_ARM_THUMB_MOVT
, R_ARM_THM_MOVT_ABS
},
1916 {BFD_RELOC_ARM_THUMB_MOVW_PCREL
, R_ARM_THM_MOVW_PREL_NC
},
1917 {BFD_RELOC_ARM_THUMB_MOVT_PCREL
, R_ARM_THM_MOVT_PREL
},
1918 {BFD_RELOC_ARM_ALU_PC_G0_NC
, R_ARM_ALU_PC_G0_NC
},
1919 {BFD_RELOC_ARM_ALU_PC_G0
, R_ARM_ALU_PC_G0
},
1920 {BFD_RELOC_ARM_ALU_PC_G1_NC
, R_ARM_ALU_PC_G1_NC
},
1921 {BFD_RELOC_ARM_ALU_PC_G1
, R_ARM_ALU_PC_G1
},
1922 {BFD_RELOC_ARM_ALU_PC_G2
, R_ARM_ALU_PC_G2
},
1923 {BFD_RELOC_ARM_LDR_PC_G0
, R_ARM_LDR_PC_G0
},
1924 {BFD_RELOC_ARM_LDR_PC_G1
, R_ARM_LDR_PC_G1
},
1925 {BFD_RELOC_ARM_LDR_PC_G2
, R_ARM_LDR_PC_G2
},
1926 {BFD_RELOC_ARM_LDRS_PC_G0
, R_ARM_LDRS_PC_G0
},
1927 {BFD_RELOC_ARM_LDRS_PC_G1
, R_ARM_LDRS_PC_G1
},
1928 {BFD_RELOC_ARM_LDRS_PC_G2
, R_ARM_LDRS_PC_G2
},
1929 {BFD_RELOC_ARM_LDC_PC_G0
, R_ARM_LDC_PC_G0
},
1930 {BFD_RELOC_ARM_LDC_PC_G1
, R_ARM_LDC_PC_G1
},
1931 {BFD_RELOC_ARM_LDC_PC_G2
, R_ARM_LDC_PC_G2
},
1932 {BFD_RELOC_ARM_ALU_SB_G0_NC
, R_ARM_ALU_SB_G0_NC
},
1933 {BFD_RELOC_ARM_ALU_SB_G0
, R_ARM_ALU_SB_G0
},
1934 {BFD_RELOC_ARM_ALU_SB_G1_NC
, R_ARM_ALU_SB_G1_NC
},
1935 {BFD_RELOC_ARM_ALU_SB_G1
, R_ARM_ALU_SB_G1
},
1936 {BFD_RELOC_ARM_ALU_SB_G2
, R_ARM_ALU_SB_G2
},
1937 {BFD_RELOC_ARM_LDR_SB_G0
, R_ARM_LDR_SB_G0
},
1938 {BFD_RELOC_ARM_LDR_SB_G1
, R_ARM_LDR_SB_G1
},
1939 {BFD_RELOC_ARM_LDR_SB_G2
, R_ARM_LDR_SB_G2
},
1940 {BFD_RELOC_ARM_LDRS_SB_G0
, R_ARM_LDRS_SB_G0
},
1941 {BFD_RELOC_ARM_LDRS_SB_G1
, R_ARM_LDRS_SB_G1
},
1942 {BFD_RELOC_ARM_LDRS_SB_G2
, R_ARM_LDRS_SB_G2
},
1943 {BFD_RELOC_ARM_LDC_SB_G0
, R_ARM_LDC_SB_G0
},
1944 {BFD_RELOC_ARM_LDC_SB_G1
, R_ARM_LDC_SB_G1
},
1945 {BFD_RELOC_ARM_LDC_SB_G2
, R_ARM_LDC_SB_G2
},
1946 {BFD_RELOC_ARM_V4BX
, R_ARM_V4BX
},
1947 {BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
, R_ARM_THM_ALU_ABS_G3_NC
},
1948 {BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
, R_ARM_THM_ALU_ABS_G2_NC
},
1949 {BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
, R_ARM_THM_ALU_ABS_G1_NC
},
1950 {BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
, R_ARM_THM_ALU_ABS_G0_NC
}
1953 static reloc_howto_type
*
1954 elf32_arm_reloc_type_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
1955 bfd_reloc_code_real_type code
)
1959 for (i
= 0; i
< ARRAY_SIZE (elf32_arm_reloc_map
); i
++)
1960 if (elf32_arm_reloc_map
[i
].bfd_reloc_val
== code
)
1961 return elf32_arm_howto_from_type (elf32_arm_reloc_map
[i
].elf_reloc_val
);
1966 static reloc_howto_type
*
1967 elf32_arm_reloc_name_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
1972 for (i
= 0; i
< ARRAY_SIZE (elf32_arm_howto_table_1
); i
++)
1973 if (elf32_arm_howto_table_1
[i
].name
!= NULL
1974 && strcasecmp (elf32_arm_howto_table_1
[i
].name
, r_name
) == 0)
1975 return &elf32_arm_howto_table_1
[i
];
1977 for (i
= 0; i
< ARRAY_SIZE (elf32_arm_howto_table_2
); i
++)
1978 if (elf32_arm_howto_table_2
[i
].name
!= NULL
1979 && strcasecmp (elf32_arm_howto_table_2
[i
].name
, r_name
) == 0)
1980 return &elf32_arm_howto_table_2
[i
];
1982 for (i
= 0; i
< ARRAY_SIZE (elf32_arm_howto_table_3
); i
++)
1983 if (elf32_arm_howto_table_3
[i
].name
!= NULL
1984 && strcasecmp (elf32_arm_howto_table_3
[i
].name
, r_name
) == 0)
1985 return &elf32_arm_howto_table_3
[i
];
1990 /* Support for core dump NOTE sections. */
1993 elf32_arm_nabi_grok_prstatus (bfd
*abfd
, Elf_Internal_Note
*note
)
1998 switch (note
->descsz
)
2003 case 148: /* Linux/ARM 32-bit. */
2005 elf_tdata (abfd
)->core
->signal
= bfd_get_16 (abfd
, note
->descdata
+ 12);
2008 elf_tdata (abfd
)->core
->lwpid
= bfd_get_32 (abfd
, note
->descdata
+ 24);
2017 /* Make a ".reg/999" section. */
2018 return _bfd_elfcore_make_pseudosection (abfd
, ".reg",
2019 size
, note
->descpos
+ offset
);
2023 elf32_arm_nabi_grok_psinfo (bfd
*abfd
, Elf_Internal_Note
*note
)
2025 switch (note
->descsz
)
2030 case 124: /* Linux/ARM elf_prpsinfo. */
2031 elf_tdata (abfd
)->core
->pid
2032 = bfd_get_32 (abfd
, note
->descdata
+ 12);
2033 elf_tdata (abfd
)->core
->program
2034 = _bfd_elfcore_strndup (abfd
, note
->descdata
+ 28, 16);
2035 elf_tdata (abfd
)->core
->command
2036 = _bfd_elfcore_strndup (abfd
, note
->descdata
+ 44, 80);
2039 /* Note that for some reason, a spurious space is tacked
2040 onto the end of the args in some (at least one anyway)
2041 implementations, so strip it off if it exists. */
2043 char *command
= elf_tdata (abfd
)->core
->command
;
2044 int n
= strlen (command
);
2046 if (0 < n
&& command
[n
- 1] == ' ')
2047 command
[n
- 1] = '\0';
2054 elf32_arm_nabi_write_core_note (bfd
*abfd
, char *buf
, int *bufsiz
,
2067 va_start (ap
, note_type
);
2068 memset (data
, 0, sizeof (data
));
2069 strncpy (data
+ 28, va_arg (ap
, const char *), 16);
2070 strncpy (data
+ 44, va_arg (ap
, const char *), 80);
2073 return elfcore_write_note (abfd
, buf
, bufsiz
,
2074 "CORE", note_type
, data
, sizeof (data
));
2085 va_start (ap
, note_type
);
2086 memset (data
, 0, sizeof (data
));
2087 pid
= va_arg (ap
, long);
2088 bfd_put_32 (abfd
, pid
, data
+ 24);
2089 cursig
= va_arg (ap
, int);
2090 bfd_put_16 (abfd
, cursig
, data
+ 12);
2091 greg
= va_arg (ap
, const void *);
2092 memcpy (data
+ 72, greg
, 72);
2095 return elfcore_write_note (abfd
, buf
, bufsiz
,
2096 "CORE", note_type
, data
, sizeof (data
));
2101 #define TARGET_LITTLE_SYM arm_elf32_le_vec
2102 #define TARGET_LITTLE_NAME "elf32-littlearm"
2103 #define TARGET_BIG_SYM arm_elf32_be_vec
2104 #define TARGET_BIG_NAME "elf32-bigarm"
2106 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
2107 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
2108 #define elf_backend_write_core_note elf32_arm_nabi_write_core_note
2110 typedef unsigned long int insn32
;
2111 typedef unsigned short int insn16
;
2113 /* In lieu of proper flags, assume all EABIv4 or later objects are
2115 #define INTERWORK_FLAG(abfd) \
2116 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
2117 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
2118 || ((abfd)->flags & BFD_LINKER_CREATED))
2120 /* The linker script knows the section names for placement.
2121 The entry_names are used to do simple name mangling on the stubs.
2122 Given a function name, and its type, the stub can be found. The
2123 name can be changed. The only requirement is the %s be present. */
2124 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
2125 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
2127 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
2128 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
2130 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
2131 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
2133 #define STM32L4XX_ERRATUM_VENEER_SECTION_NAME ".text.stm32l4xx_veneer"
2134 #define STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "__stm32l4xx_veneer_%x"
2136 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
2137 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
2139 #define STUB_ENTRY_NAME "__%s_veneer"
2141 /* The name of the dynamic interpreter. This is put in the .interp
2143 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
2145 static const unsigned long tls_trampoline
[] =
2147 0xe08e0000, /* add r0, lr, r0 */
2148 0xe5901004, /* ldr r1, [r0,#4] */
2149 0xe12fff11, /* bx r1 */
2152 static const unsigned long dl_tlsdesc_lazy_trampoline
[] =
2154 0xe52d2004, /* push {r2} */
2155 0xe59f200c, /* ldr r2, [pc, #3f - . - 8] */
2156 0xe59f100c, /* ldr r1, [pc, #4f - . - 8] */
2157 0xe79f2002, /* 1: ldr r2, [pc, r2] */
2158 0xe081100f, /* 2: add r1, pc */
2159 0xe12fff12, /* bx r2 */
2160 0x00000014, /* 3: .word _GLOBAL_OFFSET_TABLE_ - 1b - 8
2161 + dl_tlsdesc_lazy_resolver(GOT) */
2162 0x00000018, /* 4: .word _GLOBAL_OFFSET_TABLE_ - 2b - 8 */
2165 #ifdef FOUR_WORD_PLT
2167 /* The first entry in a procedure linkage table looks like
2168 this. It is set up so that any shared library function that is
2169 called before the relocation has been set up calls the dynamic
2171 static const bfd_vma elf32_arm_plt0_entry
[] =
2173 0xe52de004, /* str lr, [sp, #-4]! */
2174 0xe59fe010, /* ldr lr, [pc, #16] */
2175 0xe08fe00e, /* add lr, pc, lr */
2176 0xe5bef008, /* ldr pc, [lr, #8]! */
2179 /* Subsequent entries in a procedure linkage table look like
2181 static const bfd_vma elf32_arm_plt_entry
[] =
2183 0xe28fc600, /* add ip, pc, #NN */
2184 0xe28cca00, /* add ip, ip, #NN */
2185 0xe5bcf000, /* ldr pc, [ip, #NN]! */
2186 0x00000000, /* unused */
2189 #else /* not FOUR_WORD_PLT */
2191 /* The first entry in a procedure linkage table looks like
2192 this. It is set up so that any shared library function that is
2193 called before the relocation has been set up calls the dynamic
2195 static const bfd_vma elf32_arm_plt0_entry
[] =
2197 0xe52de004, /* str lr, [sp, #-4]! */
2198 0xe59fe004, /* ldr lr, [pc, #4] */
2199 0xe08fe00e, /* add lr, pc, lr */
2200 0xe5bef008, /* ldr pc, [lr, #8]! */
2201 0x00000000, /* &GOT[0] - . */
2204 /* By default subsequent entries in a procedure linkage table look like
2205 this. Offsets that don't fit into 28 bits will cause link error. */
2206 static const bfd_vma elf32_arm_plt_entry_short
[] =
2208 0xe28fc600, /* add ip, pc, #0xNN00000 */
2209 0xe28cca00, /* add ip, ip, #0xNN000 */
2210 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2213 /* When explicitly asked, we'll use this "long" entry format
2214 which can cope with arbitrary displacements. */
2215 static const bfd_vma elf32_arm_plt_entry_long
[] =
2217 0xe28fc200, /* add ip, pc, #0xN0000000 */
2218 0xe28cc600, /* add ip, ip, #0xNN00000 */
2219 0xe28cca00, /* add ip, ip, #0xNN000 */
2220 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2223 static bfd_boolean elf32_arm_use_long_plt_entry
= FALSE
;
2225 #endif /* not FOUR_WORD_PLT */
2227 /* The first entry in a procedure linkage table looks like this.
2228 It is set up so that any shared library function that is called before the
2229 relocation has been set up calls the dynamic linker first. */
2230 static const bfd_vma elf32_thumb2_plt0_entry
[] =
2232 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2233 an instruction maybe encoded to one or two array elements. */
2234 0xf8dfb500, /* push {lr} */
2235 0x44fee008, /* ldr.w lr, [pc, #8] */
2237 0xff08f85e, /* ldr.w pc, [lr, #8]! */
2238 0x00000000, /* &GOT[0] - . */
2241 /* Subsequent entries in a procedure linkage table for thumb only target
2243 static const bfd_vma elf32_thumb2_plt_entry
[] =
2245 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2246 an instruction maybe encoded to one or two array elements. */
2247 0x0c00f240, /* movw ip, #0xNNNN */
2248 0x0c00f2c0, /* movt ip, #0xNNNN */
2249 0xf8dc44fc, /* add ip, pc */
2250 0xbf00f000 /* ldr.w pc, [ip] */
2254 /* The format of the first entry in the procedure linkage table
2255 for a VxWorks executable. */
2256 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry
[] =
2258 0xe52dc008, /* str ip,[sp,#-8]! */
2259 0xe59fc000, /* ldr ip,[pc] */
2260 0xe59cf008, /* ldr pc,[ip,#8] */
2261 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
2264 /* The format of subsequent entries in a VxWorks executable. */
2265 static const bfd_vma elf32_arm_vxworks_exec_plt_entry
[] =
2267 0xe59fc000, /* ldr ip,[pc] */
2268 0xe59cf000, /* ldr pc,[ip] */
2269 0x00000000, /* .long @got */
2270 0xe59fc000, /* ldr ip,[pc] */
2271 0xea000000, /* b _PLT */
2272 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2275 /* The format of entries in a VxWorks shared library. */
2276 static const bfd_vma elf32_arm_vxworks_shared_plt_entry
[] =
2278 0xe59fc000, /* ldr ip,[pc] */
2279 0xe79cf009, /* ldr pc,[ip,r9] */
2280 0x00000000, /* .long @got */
2281 0xe59fc000, /* ldr ip,[pc] */
2282 0xe599f008, /* ldr pc,[r9,#8] */
2283 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2286 /* An initial stub used if the PLT entry is referenced from Thumb code. */
2287 #define PLT_THUMB_STUB_SIZE 4
2288 static const bfd_vma elf32_arm_plt_thumb_stub
[] =
2294 /* The entries in a PLT when using a DLL-based target with multiple
2296 static const bfd_vma elf32_arm_symbian_plt_entry
[] =
2298 0xe51ff004, /* ldr pc, [pc, #-4] */
2299 0x00000000, /* dcd R_ARM_GLOB_DAT(X) */
2302 /* The first entry in a procedure linkage table looks like
2303 this. It is set up so that any shared library function that is
2304 called before the relocation has been set up calls the dynamic
2306 static const bfd_vma elf32_arm_nacl_plt0_entry
[] =
2309 0xe300c000, /* movw ip, #:lower16:&GOT[2]-.+8 */
2310 0xe340c000, /* movt ip, #:upper16:&GOT[2]-.+8 */
2311 0xe08cc00f, /* add ip, ip, pc */
2312 0xe52dc008, /* str ip, [sp, #-8]! */
2313 /* Second bundle: */
2314 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2315 0xe59cc000, /* ldr ip, [ip] */
2316 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2317 0xe12fff1c, /* bx ip */
2319 0xe320f000, /* nop */
2320 0xe320f000, /* nop */
2321 0xe320f000, /* nop */
2323 0xe50dc004, /* str ip, [sp, #-4] */
2324 /* Fourth bundle: */
2325 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2326 0xe59cc000, /* ldr ip, [ip] */
2327 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2328 0xe12fff1c, /* bx ip */
2330 #define ARM_NACL_PLT_TAIL_OFFSET (11 * 4)
2332 /* Subsequent entries in a procedure linkage table look like this. */
2333 static const bfd_vma elf32_arm_nacl_plt_entry
[] =
2335 0xe300c000, /* movw ip, #:lower16:&GOT[n]-.+8 */
2336 0xe340c000, /* movt ip, #:upper16:&GOT[n]-.+8 */
2337 0xe08cc00f, /* add ip, ip, pc */
2338 0xea000000, /* b .Lplt_tail */
2341 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2342 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2343 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2344 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2345 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2346 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2347 #define THM2_MAX_FWD_COND_BRANCH_OFFSET (((1 << 20) -2) + 4)
2348 #define THM2_MAX_BWD_COND_BRANCH_OFFSET (-(1 << 20) + 4)
2358 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2359 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2360 is inserted in arm_build_one_stub(). */
2361 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2362 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2363 #define THUMB32_MOVT(X) {(X), THUMB32_TYPE, R_ARM_THM_MOVT_ABS, 0}
2364 #define THUMB32_MOVW(X) {(X), THUMB32_TYPE, R_ARM_THM_MOVW_ABS_NC, 0}
2365 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2366 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2367 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2368 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2373 enum stub_insn_type type
;
2374 unsigned int r_type
;
2378 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2379 to reach the stub if necessary. */
2380 static const insn_sequence elf32_arm_stub_long_branch_any_any
[] =
2382 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2383 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(X) */
2386 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2388 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb
[] =
2390 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2391 ARM_INSN (0xe12fff1c), /* bx ip */
2392 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(X) */
2395 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2396 static const insn_sequence elf32_arm_stub_long_branch_thumb_only
[] =
2398 THUMB16_INSN (0xb401), /* push {r0} */
2399 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2400 THUMB16_INSN (0x4684), /* mov ip, r0 */
2401 THUMB16_INSN (0xbc01), /* pop {r0} */
2402 THUMB16_INSN (0x4760), /* bx ip */
2403 THUMB16_INSN (0xbf00), /* nop */
2404 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(X) */
2407 /* Thumb -> Thumb long branch stub in thumb2 encoding. Used on armv7. */
2408 static const insn_sequence elf32_arm_stub_long_branch_thumb2_only
[] =
2410 THUMB32_INSN (0xf85ff000), /* ldr.w pc, [pc, #-0] */
2411 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(x) */
2414 /* Thumb -> Thumb long branch stub. Used for PureCode sections on Thumb2
2415 M-profile architectures. */
2416 static const insn_sequence elf32_arm_stub_long_branch_thumb2_only_pure
[] =
2418 THUMB32_MOVW (0xf2400c00), /* mov.w ip, R_ARM_MOVW_ABS_NC */
2419 THUMB32_MOVT (0xf2c00c00), /* movt ip, R_ARM_MOVT_ABS << 16 */
2420 THUMB16_INSN (0x4760), /* bx ip */
2423 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2425 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb
[] =
2427 THUMB16_INSN (0x4778), /* bx pc */
2428 THUMB16_INSN (0x46c0), /* nop */
2429 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2430 ARM_INSN (0xe12fff1c), /* bx ip */
2431 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(X) */
2434 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2436 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm
[] =
2438 THUMB16_INSN (0x4778), /* bx pc */
2439 THUMB16_INSN (0x46c0), /* nop */
2440 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2441 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(X) */
2444 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2445 one, when the destination is close enough. */
2446 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm
[] =
2448 THUMB16_INSN (0x4778), /* bx pc */
2449 THUMB16_INSN (0x46c0), /* nop */
2450 ARM_REL_INSN (0xea000000, -8), /* b (X-8) */
2453 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2454 blx to reach the stub if necessary. */
2455 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic
[] =
2457 ARM_INSN (0xe59fc000), /* ldr ip, [pc] */
2458 ARM_INSN (0xe08ff00c), /* add pc, pc, ip */
2459 DATA_WORD (0, R_ARM_REL32
, -4), /* dcd R_ARM_REL32(X-4) */
2462 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2463 blx to reach the stub if necessary. We can not add into pc;
2464 it is not guaranteed to mode switch (different in ARMv6 and
2466 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic
[] =
2468 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2469 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2470 ARM_INSN (0xe12fff1c), /* bx ip */
2471 DATA_WORD (0, R_ARM_REL32
, 0), /* dcd R_ARM_REL32(X) */
2474 /* V4T ARM -> ARM long branch stub, PIC. */
2475 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic
[] =
2477 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2478 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2479 ARM_INSN (0xe12fff1c), /* bx ip */
2480 DATA_WORD (0, R_ARM_REL32
, 0), /* dcd R_ARM_REL32(X) */
2483 /* V4T Thumb -> ARM long branch stub, PIC. */
2484 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic
[] =
2486 THUMB16_INSN (0x4778), /* bx pc */
2487 THUMB16_INSN (0x46c0), /* nop */
2488 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2489 ARM_INSN (0xe08cf00f), /* add pc, ip, pc */
2490 DATA_WORD (0, R_ARM_REL32
, -4), /* dcd R_ARM_REL32(X) */
2493 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2495 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic
[] =
2497 THUMB16_INSN (0xb401), /* push {r0} */
2498 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2499 THUMB16_INSN (0x46fc), /* mov ip, pc */
2500 THUMB16_INSN (0x4484), /* add ip, r0 */
2501 THUMB16_INSN (0xbc01), /* pop {r0} */
2502 THUMB16_INSN (0x4760), /* bx ip */
2503 DATA_WORD (0, R_ARM_REL32
, 4), /* dcd R_ARM_REL32(X) */
2506 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2508 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic
[] =
2510 THUMB16_INSN (0x4778), /* bx pc */
2511 THUMB16_INSN (0x46c0), /* nop */
2512 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2513 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2514 ARM_INSN (0xe12fff1c), /* bx ip */
2515 DATA_WORD (0, R_ARM_REL32
, 0), /* dcd R_ARM_REL32(X) */
2518 /* Thumb2/ARM -> TLS trampoline. Lowest common denominator, which is a
2519 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2520 static const insn_sequence elf32_arm_stub_long_branch_any_tls_pic
[] =
2522 ARM_INSN (0xe59f1000), /* ldr r1, [pc] */
2523 ARM_INSN (0xe08ff001), /* add pc, pc, r1 */
2524 DATA_WORD (0, R_ARM_REL32
, -4), /* dcd R_ARM_REL32(X-4) */
2527 /* V4T Thumb -> TLS trampoline. lowest common denominator, which is a
2528 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2529 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_tls_pic
[] =
2531 THUMB16_INSN (0x4778), /* bx pc */
2532 THUMB16_INSN (0x46c0), /* nop */
2533 ARM_INSN (0xe59f1000), /* ldr r1, [pc, #0] */
2534 ARM_INSN (0xe081f00f), /* add pc, r1, pc */
2535 DATA_WORD (0, R_ARM_REL32
, -4), /* dcd R_ARM_REL32(X) */
2538 /* NaCl ARM -> ARM long branch stub. */
2539 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl
[] =
2541 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2542 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2543 ARM_INSN (0xe12fff1c), /* bx ip */
2544 ARM_INSN (0xe320f000), /* nop */
2545 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2546 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(X) */
2547 DATA_WORD (0, R_ARM_NONE
, 0), /* .word 0 */
2548 DATA_WORD (0, R_ARM_NONE
, 0), /* .word 0 */
2551 /* NaCl ARM -> ARM long branch stub, PIC. */
2552 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl_pic
[] =
2554 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2555 ARM_INSN (0xe08cc00f), /* add ip, ip, pc */
2556 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2557 ARM_INSN (0xe12fff1c), /* bx ip */
2558 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2559 DATA_WORD (0, R_ARM_REL32
, 8), /* dcd R_ARM_REL32(X+8) */
2560 DATA_WORD (0, R_ARM_NONE
, 0), /* .word 0 */
2561 DATA_WORD (0, R_ARM_NONE
, 0), /* .word 0 */
2565 /* Cortex-A8 erratum-workaround stubs. */
2567 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2568 can't use a conditional branch to reach this stub). */
2570 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond
[] =
2572 THUMB16_BCOND_INSN (0xd001), /* b<cond>.n true. */
2573 THUMB32_B_INSN (0xf000b800, -4), /* b.w insn_after_original_branch. */
2574 THUMB32_B_INSN (0xf000b800, -4) /* true: b.w original_branch_dest. */
2577 /* Stub used for b.w and bl.w instructions. */
2579 static const insn_sequence elf32_arm_stub_a8_veneer_b
[] =
2581 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2584 static const insn_sequence elf32_arm_stub_a8_veneer_bl
[] =
2586 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2589 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2590 instruction (which switches to ARM mode) to point to this stub. Jump to the
2591 real destination using an ARM-mode branch. */
2593 static const insn_sequence elf32_arm_stub_a8_veneer_blx
[] =
2595 ARM_REL_INSN (0xea000000, -8) /* b original_branch_dest. */
2598 /* For each section group there can be a specially created linker section
2599 to hold the stubs for that group. The name of the stub section is based
2600 upon the name of another section within that group with the suffix below
2603 PR 13049: STUB_SUFFIX used to be ".stub", but this allowed the user to
2604 create what appeared to be a linker stub section when it actually
2605 contained user code/data. For example, consider this fragment:
2607 const char * stubborn_problems[] = { "np" };
2609 If this is compiled with "-fPIC -fdata-sections" then gcc produces a
2612 .data.rel.local.stubborn_problems
2614 This then causes problems in arm32_arm_build_stubs() as it triggers:
2616 // Ignore non-stub sections.
2617 if (!strstr (stub_sec->name, STUB_SUFFIX))
2620 And so the section would be ignored instead of being processed. Hence
2621 the change in definition of STUB_SUFFIX to a name that cannot be a valid
2623 #define STUB_SUFFIX ".__stub"
2625 /* One entry per long/short branch stub defined above. */
2627 DEF_STUB(long_branch_any_any) \
2628 DEF_STUB(long_branch_v4t_arm_thumb) \
2629 DEF_STUB(long_branch_thumb_only) \
2630 DEF_STUB(long_branch_v4t_thumb_thumb) \
2631 DEF_STUB(long_branch_v4t_thumb_arm) \
2632 DEF_STUB(short_branch_v4t_thumb_arm) \
2633 DEF_STUB(long_branch_any_arm_pic) \
2634 DEF_STUB(long_branch_any_thumb_pic) \
2635 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2636 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2637 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2638 DEF_STUB(long_branch_thumb_only_pic) \
2639 DEF_STUB(long_branch_any_tls_pic) \
2640 DEF_STUB(long_branch_v4t_thumb_tls_pic) \
2641 DEF_STUB(long_branch_arm_nacl) \
2642 DEF_STUB(long_branch_arm_nacl_pic) \
2643 DEF_STUB(a8_veneer_b_cond) \
2644 DEF_STUB(a8_veneer_b) \
2645 DEF_STUB(a8_veneer_bl) \
2646 DEF_STUB(a8_veneer_blx) \
2647 DEF_STUB(long_branch_thumb2_only) \
2648 DEF_STUB(long_branch_thumb2_only_pure)
2650 #define DEF_STUB(x) arm_stub_##x,
2651 enum elf32_arm_stub_type
2659 /* Note the first a8_veneer type. */
2660 const unsigned arm_stub_a8_veneer_lwm
= arm_stub_a8_veneer_b_cond
;
2664 const insn_sequence
* template_sequence
;
2668 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2669 static const stub_def stub_definitions
[] =
2675 struct elf32_arm_stub_hash_entry
2677 /* Base hash table entry structure. */
2678 struct bfd_hash_entry root
;
2680 /* The stub section. */
2683 /* Offset within stub_sec of the beginning of this stub. */
2684 bfd_vma stub_offset
;
2686 /* Given the symbol's value and its section we can determine its final
2687 value when building the stubs (so the stub knows where to jump). */
2688 bfd_vma target_value
;
2689 asection
*target_section
;
2691 /* Same as above but for the source of the branch to the stub. Used for
2692 Cortex-A8 erratum workaround to patch it to branch to the stub. As
2693 such, source section does not need to be recorded since Cortex-A8 erratum
2694 workaround stubs are only generated when both source and target are in the
2696 bfd_vma source_value
;
2698 /* The instruction which caused this stub to be generated (only valid for
2699 Cortex-A8 erratum workaround stubs at present). */
2700 unsigned long orig_insn
;
2702 /* The stub type. */
2703 enum elf32_arm_stub_type stub_type
;
2704 /* Its encoding size in bytes. */
2707 const insn_sequence
*stub_template
;
2708 /* The size of the template (number of entries). */
2709 int stub_template_size
;
2711 /* The symbol table entry, if any, that this was derived from. */
2712 struct elf32_arm_link_hash_entry
*h
;
2714 /* Type of branch. */
2715 enum arm_st_branch_type branch_type
;
2717 /* Where this stub is being called from, or, in the case of combined
2718 stub sections, the first input section in the group. */
2721 /* The name for the local symbol at the start of this stub. The
2722 stub name in the hash table has to be unique; this does not, so
2723 it can be friendlier. */
2727 /* Used to build a map of a section. This is required for mixed-endian
2730 typedef struct elf32_elf_section_map
2735 elf32_arm_section_map
;
2737 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2741 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER
,
2742 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER
,
2743 VFP11_ERRATUM_ARM_VENEER
,
2744 VFP11_ERRATUM_THUMB_VENEER
2746 elf32_vfp11_erratum_type
;
2748 typedef struct elf32_vfp11_erratum_list
2750 struct elf32_vfp11_erratum_list
*next
;
2756 struct elf32_vfp11_erratum_list
*veneer
;
2757 unsigned int vfp_insn
;
2761 struct elf32_vfp11_erratum_list
*branch
;
2765 elf32_vfp11_erratum_type type
;
2767 elf32_vfp11_erratum_list
;
2769 /* Information about a STM32L4XX erratum veneer, or a branch to such a
2773 STM32L4XX_ERRATUM_BRANCH_TO_VENEER
,
2774 STM32L4XX_ERRATUM_VENEER
2776 elf32_stm32l4xx_erratum_type
;
2778 typedef struct elf32_stm32l4xx_erratum_list
2780 struct elf32_stm32l4xx_erratum_list
*next
;
2786 struct elf32_stm32l4xx_erratum_list
*veneer
;
2791 struct elf32_stm32l4xx_erratum_list
*branch
;
2795 elf32_stm32l4xx_erratum_type type
;
2797 elf32_stm32l4xx_erratum_list
;
2802 INSERT_EXIDX_CANTUNWIND_AT_END
2804 arm_unwind_edit_type
;
2806 /* A (sorted) list of edits to apply to an unwind table. */
2807 typedef struct arm_unwind_table_edit
2809 arm_unwind_edit_type type
;
2810 /* Note: we sometimes want to insert an unwind entry corresponding to a
2811 section different from the one we're currently writing out, so record the
2812 (text) section this edit relates to here. */
2813 asection
*linked_section
;
2815 struct arm_unwind_table_edit
*next
;
2817 arm_unwind_table_edit
;
2819 typedef struct _arm_elf_section_data
2821 /* Information about mapping symbols. */
2822 struct bfd_elf_section_data elf
;
2823 unsigned int mapcount
;
2824 unsigned int mapsize
;
2825 elf32_arm_section_map
*map
;
2826 /* Information about CPU errata. */
2827 unsigned int erratumcount
;
2828 elf32_vfp11_erratum_list
*erratumlist
;
2829 unsigned int stm32l4xx_erratumcount
;
2830 elf32_stm32l4xx_erratum_list
*stm32l4xx_erratumlist
;
2831 unsigned int additional_reloc_count
;
2832 /* Information about unwind tables. */
2835 /* Unwind info attached to a text section. */
2838 asection
*arm_exidx_sec
;
2841 /* Unwind info attached to an .ARM.exidx section. */
2844 arm_unwind_table_edit
*unwind_edit_list
;
2845 arm_unwind_table_edit
*unwind_edit_tail
;
2849 _arm_elf_section_data
;
2851 #define elf32_arm_section_data(sec) \
2852 ((_arm_elf_section_data *) elf_section_data (sec))
2854 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
2855 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
2856 so may be created multiple times: we use an array of these entries whilst
2857 relaxing which we can refresh easily, then create stubs for each potentially
2858 erratum-triggering instruction once we've settled on a solution. */
2860 struct a8_erratum_fix
2865 bfd_vma target_offset
;
2866 unsigned long orig_insn
;
2868 enum elf32_arm_stub_type stub_type
;
2869 enum arm_st_branch_type branch_type
;
2872 /* A table of relocs applied to branches which might trigger Cortex-A8
2875 struct a8_erratum_reloc
2878 bfd_vma destination
;
2879 struct elf32_arm_link_hash_entry
*hash
;
2880 const char *sym_name
;
2881 unsigned int r_type
;
2882 enum arm_st_branch_type branch_type
;
2883 bfd_boolean non_a8_stub
;
2886 /* The size of the thread control block. */
2889 /* ARM-specific information about a PLT entry, over and above the usual
2893 /* We reference count Thumb references to a PLT entry separately,
2894 so that we can emit the Thumb trampoline only if needed. */
2895 bfd_signed_vma thumb_refcount
;
2897 /* Some references from Thumb code may be eliminated by BL->BLX
2898 conversion, so record them separately. */
2899 bfd_signed_vma maybe_thumb_refcount
;
2901 /* How many of the recorded PLT accesses were from non-call relocations.
2902 This information is useful when deciding whether anything takes the
2903 address of an STT_GNU_IFUNC PLT. A value of 0 means that all
2904 non-call references to the function should resolve directly to the
2905 real runtime target. */
2906 unsigned int noncall_refcount
;
2908 /* Since PLT entries have variable size if the Thumb prologue is
2909 used, we need to record the index into .got.plt instead of
2910 recomputing it from the PLT offset. */
2911 bfd_signed_vma got_offset
;
2914 /* Information about an .iplt entry for a local STT_GNU_IFUNC symbol. */
2915 struct arm_local_iplt_info
2917 /* The information that is usually found in the generic ELF part of
2918 the hash table entry. */
2919 union gotplt_union root
;
2921 /* The information that is usually found in the ARM-specific part of
2922 the hash table entry. */
2923 struct arm_plt_info arm
;
2925 /* A list of all potential dynamic relocations against this symbol. */
2926 struct elf_dyn_relocs
*dyn_relocs
;
2929 struct elf_arm_obj_tdata
2931 struct elf_obj_tdata root
;
2933 /* tls_type for each local got entry. */
2934 char *local_got_tls_type
;
2936 /* GOTPLT entries for TLS descriptors. */
2937 bfd_vma
*local_tlsdesc_gotent
;
2939 /* Information for local symbols that need entries in .iplt. */
2940 struct arm_local_iplt_info
**local_iplt
;
2942 /* Zero to warn when linking objects with incompatible enum sizes. */
2943 int no_enum_size_warning
;
2945 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
2946 int no_wchar_size_warning
;
2949 #define elf_arm_tdata(bfd) \
2950 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
2952 #define elf32_arm_local_got_tls_type(bfd) \
2953 (elf_arm_tdata (bfd)->local_got_tls_type)
2955 #define elf32_arm_local_tlsdesc_gotent(bfd) \
2956 (elf_arm_tdata (bfd)->local_tlsdesc_gotent)
2958 #define elf32_arm_local_iplt(bfd) \
2959 (elf_arm_tdata (bfd)->local_iplt)
2961 #define is_arm_elf(bfd) \
2962 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
2963 && elf_tdata (bfd) != NULL \
2964 && elf_object_id (bfd) == ARM_ELF_DATA)
2967 elf32_arm_mkobject (bfd
*abfd
)
2969 return bfd_elf_allocate_object (abfd
, sizeof (struct elf_arm_obj_tdata
),
2973 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
2975 /* Arm ELF linker hash entry. */
2976 struct elf32_arm_link_hash_entry
2978 struct elf_link_hash_entry root
;
2980 /* Track dynamic relocs copied for this symbol. */
2981 struct elf_dyn_relocs
*dyn_relocs
;
2983 /* ARM-specific PLT information. */
2984 struct arm_plt_info plt
;
2986 #define GOT_UNKNOWN 0
2987 #define GOT_NORMAL 1
2988 #define GOT_TLS_GD 2
2989 #define GOT_TLS_IE 4
2990 #define GOT_TLS_GDESC 8
2991 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLS_GDESC))
2992 unsigned int tls_type
: 8;
2994 /* True if the symbol's PLT entry is in .iplt rather than .plt. */
2995 unsigned int is_iplt
: 1;
2997 unsigned int unused
: 23;
2999 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
3000 starting at the end of the jump table. */
3001 bfd_vma tlsdesc_got
;
3003 /* The symbol marking the real symbol location for exported thumb
3004 symbols with Arm stubs. */
3005 struct elf_link_hash_entry
*export_glue
;
3007 /* A pointer to the most recently used stub hash entry against this
3009 struct elf32_arm_stub_hash_entry
*stub_cache
;
3012 /* Traverse an arm ELF linker hash table. */
3013 #define elf32_arm_link_hash_traverse(table, func, info) \
3014 (elf_link_hash_traverse \
3016 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
3019 /* Get the ARM elf linker hash table from a link_info structure. */
3020 #define elf32_arm_hash_table(info) \
3021 (elf_hash_table_id ((struct elf_link_hash_table *) ((info)->hash)) \
3022 == ARM_ELF_DATA ? ((struct elf32_arm_link_hash_table *) ((info)->hash)) : NULL)
3024 #define arm_stub_hash_lookup(table, string, create, copy) \
3025 ((struct elf32_arm_stub_hash_entry *) \
3026 bfd_hash_lookup ((table), (string), (create), (copy)))
3028 /* Array to keep track of which stub sections have been created, and
3029 information on stub grouping. */
3032 /* This is the section to which stubs in the group will be
3035 /* The stub section. */
3039 #define elf32_arm_compute_jump_table_size(htab) \
3040 ((htab)->next_tls_desc_index * 4)
3042 /* ARM ELF linker hash table. */
3043 struct elf32_arm_link_hash_table
3045 /* The main hash table. */
3046 struct elf_link_hash_table root
;
3048 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
3049 bfd_size_type thumb_glue_size
;
3051 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
3052 bfd_size_type arm_glue_size
;
3054 /* The size in bytes of section containing the ARMv4 BX veneers. */
3055 bfd_size_type bx_glue_size
;
3057 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
3058 veneer has been populated. */
3059 bfd_vma bx_glue_offset
[15];
3061 /* The size in bytes of the section containing glue for VFP11 erratum
3063 bfd_size_type vfp11_erratum_glue_size
;
3065 /* The size in bytes of the section containing glue for STM32L4XX erratum
3067 bfd_size_type stm32l4xx_erratum_glue_size
;
3069 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
3070 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
3071 elf32_arm_write_section(). */
3072 struct a8_erratum_fix
*a8_erratum_fixes
;
3073 unsigned int num_a8_erratum_fixes
;
3075 /* An arbitrary input BFD chosen to hold the glue sections. */
3076 bfd
* bfd_of_glue_owner
;
3078 /* Nonzero to output a BE8 image. */
3081 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
3082 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
3085 /* The relocation to use for R_ARM_TARGET2 relocations. */
3088 /* 0 = Ignore R_ARM_V4BX.
3089 1 = Convert BX to MOV PC.
3090 2 = Generate v4 interworing stubs. */
3093 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
3096 /* Whether we should fix the ARM1176 BLX immediate issue. */
3099 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
3102 /* What sort of code sequences we should look for which may trigger the
3103 VFP11 denorm erratum. */
3104 bfd_arm_vfp11_fix vfp11_fix
;
3106 /* Global counter for the number of fixes we have emitted. */
3107 int num_vfp11_fixes
;
3109 /* What sort of code sequences we should look for which may trigger the
3110 STM32L4XX erratum. */
3111 bfd_arm_stm32l4xx_fix stm32l4xx_fix
;
3113 /* Global counter for the number of fixes we have emitted. */
3114 int num_stm32l4xx_fixes
;
3116 /* Nonzero to force PIC branch veneers. */
3119 /* The number of bytes in the initial entry in the PLT. */
3120 bfd_size_type plt_header_size
;
3122 /* The number of bytes in the subsequent PLT etries. */
3123 bfd_size_type plt_entry_size
;
3125 /* True if the target system is VxWorks. */
3128 /* True if the target system is Symbian OS. */
3131 /* True if the target system is Native Client. */
3134 /* True if the target uses REL relocations. */
3137 /* The index of the next unused R_ARM_TLS_DESC slot in .rel.plt. */
3138 bfd_vma next_tls_desc_index
;
3140 /* How many R_ARM_TLS_DESC relocations were generated so far. */
3141 bfd_vma num_tls_desc
;
3143 /* Short-cuts to get to dynamic linker sections. */
3147 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
3150 /* The offset into splt of the PLT entry for the TLS descriptor
3151 resolver. Special values are 0, if not necessary (or not found
3152 to be necessary yet), and -1 if needed but not determined
3154 bfd_vma dt_tlsdesc_plt
;
3156 /* The offset into sgot of the GOT entry used by the PLT entry
3158 bfd_vma dt_tlsdesc_got
;
3160 /* Offset in .plt section of tls_arm_trampoline. */
3161 bfd_vma tls_trampoline
;
3163 /* Data for R_ARM_TLS_LDM32 relocations. */
3166 bfd_signed_vma refcount
;
3170 /* Small local sym cache. */
3171 struct sym_cache sym_cache
;
3173 /* For convenience in allocate_dynrelocs. */
3176 /* The amount of space used by the reserved portion of the sgotplt
3177 section, plus whatever space is used by the jump slots. */
3178 bfd_vma sgotplt_jump_table_size
;
3180 /* The stub hash table. */
3181 struct bfd_hash_table stub_hash_table
;
3183 /* Linker stub bfd. */
3186 /* Linker call-backs. */
3187 asection
* (*add_stub_section
) (const char *, asection
*, asection
*,
3189 void (*layout_sections_again
) (void);
3191 /* Array to keep track of which stub sections have been created, and
3192 information on stub grouping. */
3193 struct map_stub
*stub_group
;
3195 /* Number of elements in stub_group. */
3196 unsigned int top_id
;
3198 /* Assorted information used by elf32_arm_size_stubs. */
3199 unsigned int bfd_count
;
3200 unsigned int top_index
;
3201 asection
**input_list
;
3205 ctz (unsigned int mask
)
3207 #if GCC_VERSION >= 3004
3208 return __builtin_ctz (mask
);
3212 for (i
= 0; i
< 8 * sizeof (mask
); i
++)
3223 popcount (unsigned int mask
)
3225 #if GCC_VERSION >= 3004
3226 return __builtin_popcount (mask
);
3228 unsigned int i
, sum
= 0;
3230 for (i
= 0; i
< 8 * sizeof (mask
); i
++)
3240 /* Create an entry in an ARM ELF linker hash table. */
3242 static struct bfd_hash_entry
*
3243 elf32_arm_link_hash_newfunc (struct bfd_hash_entry
* entry
,
3244 struct bfd_hash_table
* table
,
3245 const char * string
)
3247 struct elf32_arm_link_hash_entry
* ret
=
3248 (struct elf32_arm_link_hash_entry
*) entry
;
3250 /* Allocate the structure if it has not already been allocated by a
3253 ret
= (struct elf32_arm_link_hash_entry
*)
3254 bfd_hash_allocate (table
, sizeof (struct elf32_arm_link_hash_entry
));
3256 return (struct bfd_hash_entry
*) ret
;
3258 /* Call the allocation method of the superclass. */
3259 ret
= ((struct elf32_arm_link_hash_entry
*)
3260 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry
*) ret
,
3264 ret
->dyn_relocs
= NULL
;
3265 ret
->tls_type
= GOT_UNKNOWN
;
3266 ret
->tlsdesc_got
= (bfd_vma
) -1;
3267 ret
->plt
.thumb_refcount
= 0;
3268 ret
->plt
.maybe_thumb_refcount
= 0;
3269 ret
->plt
.noncall_refcount
= 0;
3270 ret
->plt
.got_offset
= -1;
3271 ret
->is_iplt
= FALSE
;
3272 ret
->export_glue
= NULL
;
3274 ret
->stub_cache
= NULL
;
3277 return (struct bfd_hash_entry
*) ret
;
3280 /* Ensure that we have allocated bookkeeping structures for ABFD's local
3284 elf32_arm_allocate_local_sym_info (bfd
*abfd
)
3286 if (elf_local_got_refcounts (abfd
) == NULL
)
3288 bfd_size_type num_syms
;
3292 num_syms
= elf_tdata (abfd
)->symtab_hdr
.sh_info
;
3293 size
= num_syms
* (sizeof (bfd_signed_vma
)
3294 + sizeof (struct arm_local_iplt_info
*)
3297 data
= bfd_zalloc (abfd
, size
);
3301 elf_local_got_refcounts (abfd
) = (bfd_signed_vma
*) data
;
3302 data
+= num_syms
* sizeof (bfd_signed_vma
);
3304 elf32_arm_local_iplt (abfd
) = (struct arm_local_iplt_info
**) data
;
3305 data
+= num_syms
* sizeof (struct arm_local_iplt_info
*);
3307 elf32_arm_local_tlsdesc_gotent (abfd
) = (bfd_vma
*) data
;
3308 data
+= num_syms
* sizeof (bfd_vma
);
3310 elf32_arm_local_got_tls_type (abfd
) = data
;
3315 /* Return the .iplt information for local symbol R_SYMNDX, which belongs
3316 to input bfd ABFD. Create the information if it doesn't already exist.
3317 Return null if an allocation fails. */
3319 static struct arm_local_iplt_info
*
3320 elf32_arm_create_local_iplt (bfd
*abfd
, unsigned long r_symndx
)
3322 struct arm_local_iplt_info
**ptr
;
3324 if (!elf32_arm_allocate_local_sym_info (abfd
))
3327 BFD_ASSERT (r_symndx
< elf_tdata (abfd
)->symtab_hdr
.sh_info
);
3328 ptr
= &elf32_arm_local_iplt (abfd
)[r_symndx
];
3330 *ptr
= bfd_zalloc (abfd
, sizeof (**ptr
));
3334 /* Try to obtain PLT information for the symbol with index R_SYMNDX
3335 in ABFD's symbol table. If the symbol is global, H points to its
3336 hash table entry, otherwise H is null.
3338 Return true if the symbol does have PLT information. When returning
3339 true, point *ROOT_PLT at the target-independent reference count/offset
3340 union and *ARM_PLT at the ARM-specific information. */
3343 elf32_arm_get_plt_info (bfd
*abfd
, struct elf32_arm_link_hash_entry
*h
,
3344 unsigned long r_symndx
, union gotplt_union
**root_plt
,
3345 struct arm_plt_info
**arm_plt
)
3347 struct arm_local_iplt_info
*local_iplt
;
3351 *root_plt
= &h
->root
.plt
;
3356 if (elf32_arm_local_iplt (abfd
) == NULL
)
3359 local_iplt
= elf32_arm_local_iplt (abfd
)[r_symndx
];
3360 if (local_iplt
== NULL
)
3363 *root_plt
= &local_iplt
->root
;
3364 *arm_plt
= &local_iplt
->arm
;
3368 /* Return true if the PLT described by ARM_PLT requires a Thumb stub
3372 elf32_arm_plt_needs_thumb_stub_p (struct bfd_link_info
*info
,
3373 struct arm_plt_info
*arm_plt
)
3375 struct elf32_arm_link_hash_table
*htab
;
3377 htab
= elf32_arm_hash_table (info
);
3378 return (arm_plt
->thumb_refcount
!= 0
3379 || (!htab
->use_blx
&& arm_plt
->maybe_thumb_refcount
!= 0));
3382 /* Return a pointer to the head of the dynamic reloc list that should
3383 be used for local symbol ISYM, which is symbol number R_SYMNDX in
3384 ABFD's symbol table. Return null if an error occurs. */
3386 static struct elf_dyn_relocs
**
3387 elf32_arm_get_local_dynreloc_list (bfd
*abfd
, unsigned long r_symndx
,
3388 Elf_Internal_Sym
*isym
)
3390 if (ELF32_ST_TYPE (isym
->st_info
) == STT_GNU_IFUNC
)
3392 struct arm_local_iplt_info
*local_iplt
;
3394 local_iplt
= elf32_arm_create_local_iplt (abfd
, r_symndx
);
3395 if (local_iplt
== NULL
)
3397 return &local_iplt
->dyn_relocs
;
3401 /* Track dynamic relocs needed for local syms too.
3402 We really need local syms available to do this
3407 s
= bfd_section_from_elf_index (abfd
, isym
->st_shndx
);
3411 vpp
= &elf_section_data (s
)->local_dynrel
;
3412 return (struct elf_dyn_relocs
**) vpp
;
3416 /* Initialize an entry in the stub hash table. */
3418 static struct bfd_hash_entry
*
3419 stub_hash_newfunc (struct bfd_hash_entry
*entry
,
3420 struct bfd_hash_table
*table
,
3423 /* Allocate the structure if it has not already been allocated by a
3427 entry
= (struct bfd_hash_entry
*)
3428 bfd_hash_allocate (table
, sizeof (struct elf32_arm_stub_hash_entry
));
3433 /* Call the allocation method of the superclass. */
3434 entry
= bfd_hash_newfunc (entry
, table
, string
);
3437 struct elf32_arm_stub_hash_entry
*eh
;
3439 /* Initialize the local fields. */
3440 eh
= (struct elf32_arm_stub_hash_entry
*) entry
;
3441 eh
->stub_sec
= NULL
;
3442 eh
->stub_offset
= 0;
3443 eh
->source_value
= 0;
3444 eh
->target_value
= 0;
3445 eh
->target_section
= NULL
;
3447 eh
->stub_type
= arm_stub_none
;
3449 eh
->stub_template
= NULL
;
3450 eh
->stub_template_size
= 0;
3453 eh
->output_name
= NULL
;
3459 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
3460 shortcuts to them in our hash table. */
3463 create_got_section (bfd
*dynobj
, struct bfd_link_info
*info
)
3465 struct elf32_arm_link_hash_table
*htab
;
3467 htab
= elf32_arm_hash_table (info
);
3471 /* BPABI objects never have a GOT, or associated sections. */
3472 if (htab
->symbian_p
)
3475 if (! _bfd_elf_create_got_section (dynobj
, info
))
3481 /* Create the .iplt, .rel(a).iplt and .igot.plt sections. */
3484 create_ifunc_sections (struct bfd_link_info
*info
)
3486 struct elf32_arm_link_hash_table
*htab
;
3487 const struct elf_backend_data
*bed
;
3492 htab
= elf32_arm_hash_table (info
);
3493 dynobj
= htab
->root
.dynobj
;
3494 bed
= get_elf_backend_data (dynobj
);
3495 flags
= bed
->dynamic_sec_flags
;
3497 if (htab
->root
.iplt
== NULL
)
3499 s
= bfd_make_section_anyway_with_flags (dynobj
, ".iplt",
3500 flags
| SEC_READONLY
| SEC_CODE
);
3502 || !bfd_set_section_alignment (dynobj
, s
, bed
->plt_alignment
))
3504 htab
->root
.iplt
= s
;
3507 if (htab
->root
.irelplt
== NULL
)
3509 s
= bfd_make_section_anyway_with_flags (dynobj
,
3510 RELOC_SECTION (htab
, ".iplt"),
3511 flags
| SEC_READONLY
);
3513 || !bfd_set_section_alignment (dynobj
, s
, bed
->s
->log_file_align
))
3515 htab
->root
.irelplt
= s
;
3518 if (htab
->root
.igotplt
== NULL
)
3520 s
= bfd_make_section_anyway_with_flags (dynobj
, ".igot.plt", flags
);
3522 || !bfd_set_section_alignment (dynobj
, s
, bed
->s
->log_file_align
))
3524 htab
->root
.igotplt
= s
;
3529 /* Determine if we're dealing with a Thumb only architecture. */
3532 using_thumb_only (struct elf32_arm_link_hash_table
*globals
)
3535 int profile
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
,
3536 Tag_CPU_arch_profile
);
3539 return profile
== 'M';
3541 arch
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
, Tag_CPU_arch
);
3543 /* Force return logic to be reviewed for each new architecture. */
3544 BFD_ASSERT (arch
<= TAG_CPU_ARCH_V8
3545 || arch
== TAG_CPU_ARCH_V8M_BASE
3546 || arch
== TAG_CPU_ARCH_V8M_MAIN
);
3548 if (arch
== TAG_CPU_ARCH_V6_M
3549 || arch
== TAG_CPU_ARCH_V6S_M
3550 || arch
== TAG_CPU_ARCH_V7E_M
3551 || arch
== TAG_CPU_ARCH_V8M_BASE
3552 || arch
== TAG_CPU_ARCH_V8M_MAIN
)
3558 /* Determine if we're dealing with a Thumb-2 object. */
3561 using_thumb2 (struct elf32_arm_link_hash_table
*globals
)
3564 int thumb_isa
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
,
3568 return thumb_isa
== 2;
3570 arch
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
, Tag_CPU_arch
);
3572 /* Force return logic to be reviewed for each new architecture. */
3573 BFD_ASSERT (arch
<= TAG_CPU_ARCH_V8
3574 || arch
== TAG_CPU_ARCH_V8M_BASE
3575 || arch
== TAG_CPU_ARCH_V8M_MAIN
);
3577 return (arch
== TAG_CPU_ARCH_V6T2
3578 || arch
== TAG_CPU_ARCH_V7
3579 || arch
== TAG_CPU_ARCH_V7E_M
3580 || arch
== TAG_CPU_ARCH_V8
3581 || arch
== TAG_CPU_ARCH_V8M_MAIN
);
3584 /* Determine whether Thumb-2 BL instruction is available. */
3587 using_thumb2_bl (struct elf32_arm_link_hash_table
*globals
)
3590 bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
, Tag_CPU_arch
);
3592 /* Force return logic to be reviewed for each new architecture. */
3593 BFD_ASSERT (arch
<= TAG_CPU_ARCH_V8
3594 || arch
== TAG_CPU_ARCH_V8M_BASE
3595 || arch
== TAG_CPU_ARCH_V8M_MAIN
);
3597 /* Architecture was introduced after ARMv6T2 (eg. ARMv6-M). */
3598 return (arch
== TAG_CPU_ARCH_V6T2
3599 || arch
>= TAG_CPU_ARCH_V7
);
3602 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
3603 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
3607 elf32_arm_create_dynamic_sections (bfd
*dynobj
, struct bfd_link_info
*info
)
3609 struct elf32_arm_link_hash_table
*htab
;
3611 htab
= elf32_arm_hash_table (info
);
3615 if (!htab
->root
.sgot
&& !create_got_section (dynobj
, info
))
3618 if (!_bfd_elf_create_dynamic_sections (dynobj
, info
))
3621 htab
->sdynbss
= bfd_get_linker_section (dynobj
, ".dynbss");
3622 if (!bfd_link_pic (info
))
3623 htab
->srelbss
= bfd_get_linker_section (dynobj
,
3624 RELOC_SECTION (htab
, ".bss"));
3626 if (htab
->vxworks_p
)
3628 if (!elf_vxworks_create_dynamic_sections (dynobj
, info
, &htab
->srelplt2
))
3631 if (bfd_link_pic (info
))
3633 htab
->plt_header_size
= 0;
3634 htab
->plt_entry_size
3635 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry
);
3639 htab
->plt_header_size
3640 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry
);
3641 htab
->plt_entry_size
3642 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry
);
3645 if (elf_elfheader (dynobj
))
3646 elf_elfheader (dynobj
)->e_ident
[EI_CLASS
] = ELFCLASS32
;
3651 Test for thumb only architectures. Note - we cannot just call
3652 using_thumb_only() as the attributes in the output bfd have not been
3653 initialised at this point, so instead we use the input bfd. */
3654 bfd
* saved_obfd
= htab
->obfd
;
3656 htab
->obfd
= dynobj
;
3657 if (using_thumb_only (htab
))
3659 htab
->plt_header_size
= 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry
);
3660 htab
->plt_entry_size
= 4 * ARRAY_SIZE (elf32_thumb2_plt_entry
);
3662 htab
->obfd
= saved_obfd
;
3665 if (!htab
->root
.splt
3666 || !htab
->root
.srelplt
3668 || (!bfd_link_pic (info
) && !htab
->srelbss
))
3674 /* Copy the extra info we tack onto an elf_link_hash_entry. */
3677 elf32_arm_copy_indirect_symbol (struct bfd_link_info
*info
,
3678 struct elf_link_hash_entry
*dir
,
3679 struct elf_link_hash_entry
*ind
)
3681 struct elf32_arm_link_hash_entry
*edir
, *eind
;
3683 edir
= (struct elf32_arm_link_hash_entry
*) dir
;
3684 eind
= (struct elf32_arm_link_hash_entry
*) ind
;
3686 if (eind
->dyn_relocs
!= NULL
)
3688 if (edir
->dyn_relocs
!= NULL
)
3690 struct elf_dyn_relocs
**pp
;
3691 struct elf_dyn_relocs
*p
;
3693 /* Add reloc counts against the indirect sym to the direct sym
3694 list. Merge any entries against the same section. */
3695 for (pp
= &eind
->dyn_relocs
; (p
= *pp
) != NULL
; )
3697 struct elf_dyn_relocs
*q
;
3699 for (q
= edir
->dyn_relocs
; q
!= NULL
; q
= q
->next
)
3700 if (q
->sec
== p
->sec
)
3702 q
->pc_count
+= p
->pc_count
;
3703 q
->count
+= p
->count
;
3710 *pp
= edir
->dyn_relocs
;
3713 edir
->dyn_relocs
= eind
->dyn_relocs
;
3714 eind
->dyn_relocs
= NULL
;
3717 if (ind
->root
.type
== bfd_link_hash_indirect
)
3719 /* Copy over PLT info. */
3720 edir
->plt
.thumb_refcount
+= eind
->plt
.thumb_refcount
;
3721 eind
->plt
.thumb_refcount
= 0;
3722 edir
->plt
.maybe_thumb_refcount
+= eind
->plt
.maybe_thumb_refcount
;
3723 eind
->plt
.maybe_thumb_refcount
= 0;
3724 edir
->plt
.noncall_refcount
+= eind
->plt
.noncall_refcount
;
3725 eind
->plt
.noncall_refcount
= 0;
3727 /* We should only allocate a function to .iplt once the final
3728 symbol information is known. */
3729 BFD_ASSERT (!eind
->is_iplt
);
3731 if (dir
->got
.refcount
<= 0)
3733 edir
->tls_type
= eind
->tls_type
;
3734 eind
->tls_type
= GOT_UNKNOWN
;
3738 _bfd_elf_link_hash_copy_indirect (info
, dir
, ind
);
3741 /* Destroy an ARM elf linker hash table. */
3744 elf32_arm_link_hash_table_free (bfd
*obfd
)
3746 struct elf32_arm_link_hash_table
*ret
3747 = (struct elf32_arm_link_hash_table
*) obfd
->link
.hash
;
3749 bfd_hash_table_free (&ret
->stub_hash_table
);
3750 _bfd_elf_link_hash_table_free (obfd
);
3753 /* Create an ARM elf linker hash table. */
3755 static struct bfd_link_hash_table
*
3756 elf32_arm_link_hash_table_create (bfd
*abfd
)
3758 struct elf32_arm_link_hash_table
*ret
;
3759 bfd_size_type amt
= sizeof (struct elf32_arm_link_hash_table
);
3761 ret
= (struct elf32_arm_link_hash_table
*) bfd_zmalloc (amt
);
3765 if (!_bfd_elf_link_hash_table_init (& ret
->root
, abfd
,
3766 elf32_arm_link_hash_newfunc
,
3767 sizeof (struct elf32_arm_link_hash_entry
),
3774 ret
->vfp11_fix
= BFD_ARM_VFP11_FIX_NONE
;
3775 ret
->stm32l4xx_fix
= BFD_ARM_STM32L4XX_FIX_NONE
;
3776 #ifdef FOUR_WORD_PLT
3777 ret
->plt_header_size
= 16;
3778 ret
->plt_entry_size
= 16;
3780 ret
->plt_header_size
= 20;
3781 ret
->plt_entry_size
= elf32_arm_use_long_plt_entry
? 16 : 12;
3786 if (!bfd_hash_table_init (&ret
->stub_hash_table
, stub_hash_newfunc
,
3787 sizeof (struct elf32_arm_stub_hash_entry
)))
3789 _bfd_elf_link_hash_table_free (abfd
);
3792 ret
->root
.root
.hash_table_free
= elf32_arm_link_hash_table_free
;
3794 return &ret
->root
.root
;
3797 /* Determine what kind of NOPs are available. */
3800 arch_has_arm_nop (struct elf32_arm_link_hash_table
*globals
)
3802 const int arch
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
,
3805 /* Force return logic to be reviewed for each new architecture. */
3806 BFD_ASSERT (arch
<= TAG_CPU_ARCH_V8
3807 || arch
== TAG_CPU_ARCH_V8M_BASE
3808 || arch
== TAG_CPU_ARCH_V8M_MAIN
);
3810 return (arch
== TAG_CPU_ARCH_V6T2
3811 || arch
== TAG_CPU_ARCH_V6K
3812 || arch
== TAG_CPU_ARCH_V7
3813 || arch
== TAG_CPU_ARCH_V8
);
3817 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type
)
3821 case arm_stub_long_branch_thumb_only
:
3822 case arm_stub_long_branch_thumb2_only
:
3823 case arm_stub_long_branch_thumb2_only_pure
:
3824 case arm_stub_long_branch_v4t_thumb_arm
:
3825 case arm_stub_short_branch_v4t_thumb_arm
:
3826 case arm_stub_long_branch_v4t_thumb_arm_pic
:
3827 case arm_stub_long_branch_v4t_thumb_tls_pic
:
3828 case arm_stub_long_branch_thumb_only_pic
:
3839 /* Determine the type of stub needed, if any, for a call. */
3841 static enum elf32_arm_stub_type
3842 arm_type_of_stub (struct bfd_link_info
*info
,
3843 asection
*input_sec
,
3844 const Elf_Internal_Rela
*rel
,
3845 unsigned char st_type
,
3846 enum arm_st_branch_type
*actual_branch_type
,
3847 struct elf32_arm_link_hash_entry
*hash
,
3848 bfd_vma destination
,
3854 bfd_signed_vma branch_offset
;
3855 unsigned int r_type
;
3856 struct elf32_arm_link_hash_table
* globals
;
3857 bfd_boolean thumb2
, thumb2_bl
, thumb_only
;
3858 enum elf32_arm_stub_type stub_type
= arm_stub_none
;
3860 enum arm_st_branch_type branch_type
= *actual_branch_type
;
3861 union gotplt_union
*root_plt
;
3862 struct arm_plt_info
*arm_plt
;
3866 if (branch_type
== ST_BRANCH_LONG
)
3869 globals
= elf32_arm_hash_table (info
);
3870 if (globals
== NULL
)
3873 thumb_only
= using_thumb_only (globals
);
3874 thumb2
= using_thumb2 (globals
);
3875 thumb2_bl
= using_thumb2_bl (globals
);
3877 arch
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
, Tag_CPU_arch
);
3879 /* True for architectures that implement the thumb2 movw instruction. */
3880 thumb2_movw
= thumb2
|| (arch
== TAG_CPU_ARCH_V8M_BASE
);
3882 /* Determine where the call point is. */
3883 location
= (input_sec
->output_offset
3884 + input_sec
->output_section
->vma
3887 r_type
= ELF32_R_TYPE (rel
->r_info
);
3889 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
3890 are considering a function call relocation. */
3891 if (thumb_only
&& (r_type
== R_ARM_THM_CALL
|| r_type
== R_ARM_THM_JUMP24
3892 || r_type
== R_ARM_THM_JUMP19
)
3893 && branch_type
== ST_BRANCH_TO_ARM
)
3894 branch_type
= ST_BRANCH_TO_THUMB
;
3896 /* For TLS call relocs, it is the caller's responsibility to provide
3897 the address of the appropriate trampoline. */
3898 if (r_type
!= R_ARM_TLS_CALL
3899 && r_type
!= R_ARM_THM_TLS_CALL
3900 && elf32_arm_get_plt_info (input_bfd
, hash
, ELF32_R_SYM (rel
->r_info
),
3901 &root_plt
, &arm_plt
)
3902 && root_plt
->offset
!= (bfd_vma
) -1)
3906 if (hash
== NULL
|| hash
->is_iplt
)
3907 splt
= globals
->root
.iplt
;
3909 splt
= globals
->root
.splt
;
3914 /* Note when dealing with PLT entries: the main PLT stub is in
3915 ARM mode, so if the branch is in Thumb mode, another
3916 Thumb->ARM stub will be inserted later just before the ARM
3917 PLT stub. We don't take this extra distance into account
3918 here, because if a long branch stub is needed, we'll add a
3919 Thumb->Arm one and branch directly to the ARM PLT entry
3920 because it avoids spreading offset corrections in several
3923 destination
= (splt
->output_section
->vma
3924 + splt
->output_offset
3925 + root_plt
->offset
);
3927 branch_type
= ST_BRANCH_TO_ARM
;
3930 /* Calls to STT_GNU_IFUNC symbols should go through a PLT. */
3931 BFD_ASSERT (st_type
!= STT_GNU_IFUNC
);
3933 branch_offset
= (bfd_signed_vma
)(destination
- location
);
3935 if (r_type
== R_ARM_THM_CALL
|| r_type
== R_ARM_THM_JUMP24
3936 || r_type
== R_ARM_THM_TLS_CALL
|| r_type
== R_ARM_THM_JUMP19
)
3938 /* Handle cases where:
3939 - this call goes too far (different Thumb/Thumb2 max
3941 - it's a Thumb->Arm call and blx is not available, or it's a
3942 Thumb->Arm branch (not bl). A stub is needed in this case,
3943 but only if this call is not through a PLT entry. Indeed,
3944 PLT stubs handle mode switching already.
3947 && (branch_offset
> THM_MAX_FWD_BRANCH_OFFSET
3948 || (branch_offset
< THM_MAX_BWD_BRANCH_OFFSET
)))
3950 && (branch_offset
> THM2_MAX_FWD_BRANCH_OFFSET
3951 || (branch_offset
< THM2_MAX_BWD_BRANCH_OFFSET
)))
3953 && (branch_offset
> THM2_MAX_FWD_COND_BRANCH_OFFSET
3954 || (branch_offset
< THM2_MAX_BWD_COND_BRANCH_OFFSET
))
3955 && (r_type
== R_ARM_THM_JUMP19
))
3956 || (branch_type
== ST_BRANCH_TO_ARM
3957 && (((r_type
== R_ARM_THM_CALL
3958 || r_type
== R_ARM_THM_TLS_CALL
) && !globals
->use_blx
)
3959 || (r_type
== R_ARM_THM_JUMP24
)
3960 || (r_type
== R_ARM_THM_JUMP19
))
3963 if (branch_type
== ST_BRANCH_TO_THUMB
)
3965 /* Thumb to thumb. */
3968 if (input_sec
->flags
& SEC_ELF_PURECODE
)
3969 (*_bfd_error_handler
) (_("%B(%s): warning: long branch "
3970 " veneers used in section with "
3971 "SHF_ARM_PURECODE section "
3972 "attribute is only supported"
3973 " for M-profile targets that "
3974 "implement the movw "
3977 stub_type
= (bfd_link_pic (info
) | globals
->pic_veneer
)
3979 ? ((globals
->use_blx
3980 && (r_type
== R_ARM_THM_CALL
))
3981 /* V5T and above. Stub starts with ARM code, so
3982 we must be able to switch mode before
3983 reaching it, which is only possible for 'bl'
3984 (ie R_ARM_THM_CALL relocation). */
3985 ? arm_stub_long_branch_any_thumb_pic
3986 /* On V4T, use Thumb code only. */
3987 : arm_stub_long_branch_v4t_thumb_thumb_pic
)
3989 /* non-PIC stubs. */
3990 : ((globals
->use_blx
3991 && (r_type
== R_ARM_THM_CALL
))
3992 /* V5T and above. */
3993 ? arm_stub_long_branch_any_any
3995 : arm_stub_long_branch_v4t_thumb_thumb
);
3999 if (thumb2_movw
&& (input_sec
->flags
& SEC_ELF_PURECODE
))
4000 stub_type
= arm_stub_long_branch_thumb2_only_pure
;
4003 if (input_sec
->flags
& SEC_ELF_PURECODE
)
4004 (*_bfd_error_handler
) (_("%B(%s): warning: long branch "
4005 " veneers used in section with "
4006 "SHF_ARM_PURECODE section "
4007 "attribute is only supported"
4008 " for M-profile targets that "
4009 "implement the movw "
4012 stub_type
= (bfd_link_pic (info
) | globals
->pic_veneer
)
4014 ? arm_stub_long_branch_thumb_only_pic
4016 : (thumb2
? arm_stub_long_branch_thumb2_only
4017 : arm_stub_long_branch_thumb_only
);
4023 if (input_sec
->flags
& SEC_ELF_PURECODE
)
4024 (*_bfd_error_handler
) (_("%B(%s): warning: long branch "
4025 " veneers used in section with "
4026 "SHF_ARM_PURECODE section "
4027 "attribute is only supported"
4028 " for M-profile targets that "
4029 "implement the movw "
4034 && sym_sec
->owner
!= NULL
4035 && !INTERWORK_FLAG (sym_sec
->owner
))
4037 (*_bfd_error_handler
)
4038 (_("%B(%s): warning: interworking not enabled.\n"
4039 " first occurrence: %B: Thumb call to ARM"),
4040 sym_sec
->owner
, input_bfd
, name
);
4044 (bfd_link_pic (info
) | globals
->pic_veneer
)
4046 ? (r_type
== R_ARM_THM_TLS_CALL
4047 /* TLS PIC stubs. */
4048 ? (globals
->use_blx
? arm_stub_long_branch_any_tls_pic
4049 : arm_stub_long_branch_v4t_thumb_tls_pic
)
4050 : ((globals
->use_blx
&& r_type
== R_ARM_THM_CALL
)
4051 /* V5T PIC and above. */
4052 ? arm_stub_long_branch_any_arm_pic
4054 : arm_stub_long_branch_v4t_thumb_arm_pic
))
4056 /* non-PIC stubs. */
4057 : ((globals
->use_blx
&& r_type
== R_ARM_THM_CALL
)
4058 /* V5T and above. */
4059 ? arm_stub_long_branch_any_any
4061 : arm_stub_long_branch_v4t_thumb_arm
);
4063 /* Handle v4t short branches. */
4064 if ((stub_type
== arm_stub_long_branch_v4t_thumb_arm
)
4065 && (branch_offset
<= THM_MAX_FWD_BRANCH_OFFSET
)
4066 && (branch_offset
>= THM_MAX_BWD_BRANCH_OFFSET
))
4067 stub_type
= arm_stub_short_branch_v4t_thumb_arm
;
4071 else if (r_type
== R_ARM_CALL
4072 || r_type
== R_ARM_JUMP24
4073 || r_type
== R_ARM_PLT32
4074 || r_type
== R_ARM_TLS_CALL
)
4076 if (input_sec
->flags
& SEC_ELF_PURECODE
)
4077 (*_bfd_error_handler
) (_("%B(%s): warning: long branch "
4078 " veneers used in section with "
4079 "SHF_ARM_PURECODE section "
4080 "attribute is only supported"
4081 " for M-profile targets that "
4082 "implement the movw "
4084 if (branch_type
== ST_BRANCH_TO_THUMB
)
4089 && sym_sec
->owner
!= NULL
4090 && !INTERWORK_FLAG (sym_sec
->owner
))
4092 (*_bfd_error_handler
)
4093 (_("%B(%s): warning: interworking not enabled.\n"
4094 " first occurrence: %B: ARM call to Thumb"),
4095 sym_sec
->owner
, input_bfd
, name
);
4098 /* We have an extra 2-bytes reach because of
4099 the mode change (bit 24 (H) of BLX encoding). */
4100 if (branch_offset
> (ARM_MAX_FWD_BRANCH_OFFSET
+ 2)
4101 || (branch_offset
< ARM_MAX_BWD_BRANCH_OFFSET
)
4102 || (r_type
== R_ARM_CALL
&& !globals
->use_blx
)
4103 || (r_type
== R_ARM_JUMP24
)
4104 || (r_type
== R_ARM_PLT32
))
4106 stub_type
= (bfd_link_pic (info
) | globals
->pic_veneer
)
4108 ? ((globals
->use_blx
)
4109 /* V5T and above. */
4110 ? arm_stub_long_branch_any_thumb_pic
4112 : arm_stub_long_branch_v4t_arm_thumb_pic
)
4114 /* non-PIC stubs. */
4115 : ((globals
->use_blx
)
4116 /* V5T and above. */
4117 ? arm_stub_long_branch_any_any
4119 : arm_stub_long_branch_v4t_arm_thumb
);
4125 if (branch_offset
> ARM_MAX_FWD_BRANCH_OFFSET
4126 || (branch_offset
< ARM_MAX_BWD_BRANCH_OFFSET
))
4129 (bfd_link_pic (info
) | globals
->pic_veneer
)
4131 ? (r_type
== R_ARM_TLS_CALL
4133 ? arm_stub_long_branch_any_tls_pic
4135 ? arm_stub_long_branch_arm_nacl_pic
4136 : arm_stub_long_branch_any_arm_pic
))
4137 /* non-PIC stubs. */
4139 ? arm_stub_long_branch_arm_nacl
4140 : arm_stub_long_branch_any_any
);
4145 /* If a stub is needed, record the actual destination type. */
4146 if (stub_type
!= arm_stub_none
)
4147 *actual_branch_type
= branch_type
;
4152 /* Build a name for an entry in the stub hash table. */
4155 elf32_arm_stub_name (const asection
*input_section
,
4156 const asection
*sym_sec
,
4157 const struct elf32_arm_link_hash_entry
*hash
,
4158 const Elf_Internal_Rela
*rel
,
4159 enum elf32_arm_stub_type stub_type
)
4166 len
= 8 + 1 + strlen (hash
->root
.root
.root
.string
) + 1 + 8 + 1 + 2 + 1;
4167 stub_name
= (char *) bfd_malloc (len
);
4168 if (stub_name
!= NULL
)
4169 sprintf (stub_name
, "%08x_%s+%x_%d",
4170 input_section
->id
& 0xffffffff,
4171 hash
->root
.root
.root
.string
,
4172 (int) rel
->r_addend
& 0xffffffff,
4177 len
= 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1;
4178 stub_name
= (char *) bfd_malloc (len
);
4179 if (stub_name
!= NULL
)
4180 sprintf (stub_name
, "%08x_%x:%x+%x_%d",
4181 input_section
->id
& 0xffffffff,
4182 sym_sec
->id
& 0xffffffff,
4183 ELF32_R_TYPE (rel
->r_info
) == R_ARM_TLS_CALL
4184 || ELF32_R_TYPE (rel
->r_info
) == R_ARM_THM_TLS_CALL
4185 ? 0 : (int) ELF32_R_SYM (rel
->r_info
) & 0xffffffff,
4186 (int) rel
->r_addend
& 0xffffffff,
4193 /* Look up an entry in the stub hash. Stub entries are cached because
4194 creating the stub name takes a bit of time. */
4196 static struct elf32_arm_stub_hash_entry
*
4197 elf32_arm_get_stub_entry (const asection
*input_section
,
4198 const asection
*sym_sec
,
4199 struct elf_link_hash_entry
*hash
,
4200 const Elf_Internal_Rela
*rel
,
4201 struct elf32_arm_link_hash_table
*htab
,
4202 enum elf32_arm_stub_type stub_type
)
4204 struct elf32_arm_stub_hash_entry
*stub_entry
;
4205 struct elf32_arm_link_hash_entry
*h
= (struct elf32_arm_link_hash_entry
*) hash
;
4206 const asection
*id_sec
;
4208 if ((input_section
->flags
& SEC_CODE
) == 0)
4211 /* If this input section is part of a group of sections sharing one
4212 stub section, then use the id of the first section in the group.
4213 Stub names need to include a section id, as there may well be
4214 more than one stub used to reach say, printf, and we need to
4215 distinguish between them. */
4216 id_sec
= htab
->stub_group
[input_section
->id
].link_sec
;
4218 if (h
!= NULL
&& h
->stub_cache
!= NULL
4219 && h
->stub_cache
->h
== h
4220 && h
->stub_cache
->id_sec
== id_sec
4221 && h
->stub_cache
->stub_type
== stub_type
)
4223 stub_entry
= h
->stub_cache
;
4229 stub_name
= elf32_arm_stub_name (id_sec
, sym_sec
, h
, rel
, stub_type
);
4230 if (stub_name
== NULL
)
4233 stub_entry
= arm_stub_hash_lookup (&htab
->stub_hash_table
,
4234 stub_name
, FALSE
, FALSE
);
4236 h
->stub_cache
= stub_entry
;
4244 /* Whether veneers of type STUB_TYPE require to be in a dedicated output
4248 arm_dedicated_stub_output_section_required (enum elf32_arm_stub_type stub_type
)
4250 if (stub_type
>= max_stub_type
)
4251 abort (); /* Should be unreachable. */
4256 /* Required alignment (as a power of 2) for the dedicated section holding
4257 veneers of type STUB_TYPE, or 0 if veneers of this type are interspersed
4258 with input sections. */
4261 arm_dedicated_stub_output_section_required_alignment
4262 (enum elf32_arm_stub_type stub_type
)
4264 if (stub_type
>= max_stub_type
)
4265 abort (); /* Should be unreachable. */
4267 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type
));
4271 /* Name of the dedicated output section to put veneers of type STUB_TYPE, or
4272 NULL if veneers of this type are interspersed with input sections. */
4275 arm_dedicated_stub_output_section_name (enum elf32_arm_stub_type stub_type
)
4277 if (stub_type
>= max_stub_type
)
4278 abort (); /* Should be unreachable. */
4280 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type
));
4284 /* If veneers of type STUB_TYPE should go in a dedicated output section,
4285 returns the address of the hash table field in HTAB holding a pointer to the
4286 corresponding input section. Otherwise, returns NULL. */
4289 arm_dedicated_stub_input_section_ptr
4290 (struct elf32_arm_link_hash_table
*htab ATTRIBUTE_UNUSED
,
4291 enum elf32_arm_stub_type stub_type
)
4293 if (stub_type
>= max_stub_type
)
4294 abort (); /* Should be unreachable. */
4296 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type
));
4300 /* Find or create a stub section to contain a stub of type STUB_TYPE. SECTION
4301 is the section that branch into veneer and can be NULL if stub should go in
4302 a dedicated output section. Returns a pointer to the stub section, and the
4303 section to which the stub section will be attached (in *LINK_SEC_P).
4304 LINK_SEC_P may be NULL. */
4307 elf32_arm_create_or_find_stub_sec (asection
**link_sec_p
, asection
*section
,
4308 struct elf32_arm_link_hash_table
*htab
,
4309 enum elf32_arm_stub_type stub_type
)
4311 asection
*link_sec
, *out_sec
, **stub_sec_p
;
4312 const char *stub_sec_prefix
;
4313 bfd_boolean dedicated_output_section
=
4314 arm_dedicated_stub_output_section_required (stub_type
);
4317 if (dedicated_output_section
)
4319 bfd
*output_bfd
= htab
->obfd
;
4320 const char *out_sec_name
=
4321 arm_dedicated_stub_output_section_name (stub_type
);
4323 stub_sec_p
= arm_dedicated_stub_input_section_ptr (htab
, stub_type
);
4324 stub_sec_prefix
= out_sec_name
;
4325 align
= arm_dedicated_stub_output_section_required_alignment (stub_type
);
4326 out_sec
= bfd_get_section_by_name (output_bfd
, out_sec_name
);
4327 if (out_sec
== NULL
)
4329 (*_bfd_error_handler
) (_("No address assigned to the veneers output "
4330 "section %s"), out_sec_name
);
4336 link_sec
= htab
->stub_group
[section
->id
].link_sec
;
4337 BFD_ASSERT (link_sec
!= NULL
);
4338 stub_sec_p
= &htab
->stub_group
[section
->id
].stub_sec
;
4339 if (*stub_sec_p
== NULL
)
4340 stub_sec_p
= &htab
->stub_group
[link_sec
->id
].stub_sec
;
4341 stub_sec_prefix
= link_sec
->name
;
4342 out_sec
= link_sec
->output_section
;
4343 align
= htab
->nacl_p
? 4 : 3;
4346 if (*stub_sec_p
== NULL
)
4352 namelen
= strlen (stub_sec_prefix
);
4353 len
= namelen
+ sizeof (STUB_SUFFIX
);
4354 s_name
= (char *) bfd_alloc (htab
->stub_bfd
, len
);
4358 memcpy (s_name
, stub_sec_prefix
, namelen
);
4359 memcpy (s_name
+ namelen
, STUB_SUFFIX
, sizeof (STUB_SUFFIX
));
4360 *stub_sec_p
= (*htab
->add_stub_section
) (s_name
, out_sec
, link_sec
,
4362 if (*stub_sec_p
== NULL
)
4365 out_sec
->flags
|= SEC_ALLOC
| SEC_LOAD
| SEC_READONLY
| SEC_CODE
4366 | SEC_HAS_CONTENTS
| SEC_RELOC
| SEC_IN_MEMORY
4370 if (!dedicated_output_section
)
4371 htab
->stub_group
[section
->id
].stub_sec
= *stub_sec_p
;
4374 *link_sec_p
= link_sec
;
4379 /* Add a new stub entry to the stub hash. Not all fields of the new
4380 stub entry are initialised. */
4382 static struct elf32_arm_stub_hash_entry
*
4383 elf32_arm_add_stub (const char *stub_name
, asection
*section
,
4384 struct elf32_arm_link_hash_table
*htab
,
4385 enum elf32_arm_stub_type stub_type
)
4389 struct elf32_arm_stub_hash_entry
*stub_entry
;
4391 stub_sec
= elf32_arm_create_or_find_stub_sec (&link_sec
, section
, htab
,
4393 if (stub_sec
== NULL
)
4396 /* Enter this entry into the linker stub hash table. */
4397 stub_entry
= arm_stub_hash_lookup (&htab
->stub_hash_table
, stub_name
,
4399 if (stub_entry
== NULL
)
4401 if (section
== NULL
)
4403 (*_bfd_error_handler
) (_("%s: cannot create stub entry %s"),
4409 stub_entry
->stub_sec
= stub_sec
;
4410 stub_entry
->stub_offset
= 0;
4411 stub_entry
->id_sec
= link_sec
;
4416 /* Store an Arm insn into an output section not processed by
4417 elf32_arm_write_section. */
4420 put_arm_insn (struct elf32_arm_link_hash_table
* htab
,
4421 bfd
* output_bfd
, bfd_vma val
, void * ptr
)
4423 if (htab
->byteswap_code
!= bfd_little_endian (output_bfd
))
4424 bfd_putl32 (val
, ptr
);
4426 bfd_putb32 (val
, ptr
);
4429 /* Store a 16-bit Thumb insn into an output section not processed by
4430 elf32_arm_write_section. */
4433 put_thumb_insn (struct elf32_arm_link_hash_table
* htab
,
4434 bfd
* output_bfd
, bfd_vma val
, void * ptr
)
4436 if (htab
->byteswap_code
!= bfd_little_endian (output_bfd
))
4437 bfd_putl16 (val
, ptr
);
4439 bfd_putb16 (val
, ptr
);
4442 /* Store a Thumb2 insn into an output section not processed by
4443 elf32_arm_write_section. */
4446 put_thumb2_insn (struct elf32_arm_link_hash_table
* htab
,
4447 bfd
* output_bfd
, bfd_vma val
, bfd_byte
* ptr
)
4449 /* T2 instructions are 16-bit streamed. */
4450 if (htab
->byteswap_code
!= bfd_little_endian (output_bfd
))
4452 bfd_putl16 ((val
>> 16) & 0xffff, ptr
);
4453 bfd_putl16 ((val
& 0xffff), ptr
+ 2);
4457 bfd_putb16 ((val
>> 16) & 0xffff, ptr
);
4458 bfd_putb16 ((val
& 0xffff), ptr
+ 2);
4462 /* If it's possible to change R_TYPE to a more efficient access
4463 model, return the new reloc type. */
4466 elf32_arm_tls_transition (struct bfd_link_info
*info
, int r_type
,
4467 struct elf_link_hash_entry
*h
)
4469 int is_local
= (h
== NULL
);
4471 if (bfd_link_pic (info
)
4472 || (h
&& h
->root
.type
== bfd_link_hash_undefweak
))
4475 /* We do not support relaxations for Old TLS models. */
4478 case R_ARM_TLS_GOTDESC
:
4479 case R_ARM_TLS_CALL
:
4480 case R_ARM_THM_TLS_CALL
:
4481 case R_ARM_TLS_DESCSEQ
:
4482 case R_ARM_THM_TLS_DESCSEQ
:
4483 return is_local
? R_ARM_TLS_LE32
: R_ARM_TLS_IE32
;
4489 static bfd_reloc_status_type elf32_arm_final_link_relocate
4490 (reloc_howto_type
*, bfd
*, bfd
*, asection
*, bfd_byte
*,
4491 Elf_Internal_Rela
*, bfd_vma
, struct bfd_link_info
*, asection
*,
4492 const char *, unsigned char, enum arm_st_branch_type
,
4493 struct elf_link_hash_entry
*, bfd_boolean
*, char **);
4496 arm_stub_required_alignment (enum elf32_arm_stub_type stub_type
)
4500 case arm_stub_a8_veneer_b_cond
:
4501 case arm_stub_a8_veneer_b
:
4502 case arm_stub_a8_veneer_bl
:
4505 case arm_stub_long_branch_any_any
:
4506 case arm_stub_long_branch_v4t_arm_thumb
:
4507 case arm_stub_long_branch_thumb_only
:
4508 case arm_stub_long_branch_thumb2_only
:
4509 case arm_stub_long_branch_thumb2_only_pure
:
4510 case arm_stub_long_branch_v4t_thumb_thumb
:
4511 case arm_stub_long_branch_v4t_thumb_arm
:
4512 case arm_stub_short_branch_v4t_thumb_arm
:
4513 case arm_stub_long_branch_any_arm_pic
:
4514 case arm_stub_long_branch_any_thumb_pic
:
4515 case arm_stub_long_branch_v4t_thumb_thumb_pic
:
4516 case arm_stub_long_branch_v4t_arm_thumb_pic
:
4517 case arm_stub_long_branch_v4t_thumb_arm_pic
:
4518 case arm_stub_long_branch_thumb_only_pic
:
4519 case arm_stub_long_branch_any_tls_pic
:
4520 case arm_stub_long_branch_v4t_thumb_tls_pic
:
4521 case arm_stub_a8_veneer_blx
:
4524 case arm_stub_long_branch_arm_nacl
:
4525 case arm_stub_long_branch_arm_nacl_pic
:
4529 abort (); /* Should be unreachable. */
4533 /* Returns whether stubs of type STUB_TYPE take over the symbol they are
4534 veneering (TRUE) or have their own symbol (FALSE). */
4537 arm_stub_sym_claimed (enum elf32_arm_stub_type stub_type
)
4539 if (stub_type
>= max_stub_type
)
4540 abort (); /* Should be unreachable. */
4545 /* Returns the padding needed for the dedicated section used stubs of type
4549 arm_dedicated_stub_section_padding (enum elf32_arm_stub_type stub_type
)
4551 if (stub_type
>= max_stub_type
)
4552 abort (); /* Should be unreachable. */
4558 arm_build_one_stub (struct bfd_hash_entry
*gen_entry
,
4562 struct elf32_arm_stub_hash_entry
*stub_entry
;
4563 struct elf32_arm_link_hash_table
*globals
;
4564 struct bfd_link_info
*info
;
4571 const insn_sequence
*template_sequence
;
4573 int stub_reloc_idx
[MAXRELOCS
] = {-1, -1};
4574 int stub_reloc_offset
[MAXRELOCS
] = {0, 0};
4577 /* Massage our args to the form they really have. */
4578 stub_entry
= (struct elf32_arm_stub_hash_entry
*) gen_entry
;
4579 info
= (struct bfd_link_info
*) in_arg
;
4581 globals
= elf32_arm_hash_table (info
);
4582 if (globals
== NULL
)
4585 stub_sec
= stub_entry
->stub_sec
;
4587 if ((globals
->fix_cortex_a8
< 0)
4588 != (arm_stub_required_alignment (stub_entry
->stub_type
) == 2))
4589 /* We have to do less-strictly-aligned fixes last. */
4592 /* Make a note of the offset within the stubs for this entry. */
4593 stub_entry
->stub_offset
= stub_sec
->size
;
4594 loc
= stub_sec
->contents
+ stub_entry
->stub_offset
;
4596 stub_bfd
= stub_sec
->owner
;
4598 /* This is the address of the stub destination. */
4599 sym_value
= (stub_entry
->target_value
4600 + stub_entry
->target_section
->output_offset
4601 + stub_entry
->target_section
->output_section
->vma
);
4603 template_sequence
= stub_entry
->stub_template
;
4604 template_size
= stub_entry
->stub_template_size
;
4607 for (i
= 0; i
< template_size
; i
++)
4609 switch (template_sequence
[i
].type
)
4613 bfd_vma data
= (bfd_vma
) template_sequence
[i
].data
;
4614 if (template_sequence
[i
].reloc_addend
!= 0)
4616 /* We've borrowed the reloc_addend field to mean we should
4617 insert a condition code into this (Thumb-1 branch)
4618 instruction. See THUMB16_BCOND_INSN. */
4619 BFD_ASSERT ((data
& 0xff00) == 0xd000);
4620 data
|= ((stub_entry
->orig_insn
>> 22) & 0xf) << 8;
4622 bfd_put_16 (stub_bfd
, data
, loc
+ size
);
4628 bfd_put_16 (stub_bfd
,
4629 (template_sequence
[i
].data
>> 16) & 0xffff,
4631 bfd_put_16 (stub_bfd
, template_sequence
[i
].data
& 0xffff,
4633 if (template_sequence
[i
].r_type
!= R_ARM_NONE
)
4635 stub_reloc_idx
[nrelocs
] = i
;
4636 stub_reloc_offset
[nrelocs
++] = size
;
4642 bfd_put_32 (stub_bfd
, template_sequence
[i
].data
,
4644 /* Handle cases where the target is encoded within the
4646 if (template_sequence
[i
].r_type
== R_ARM_JUMP24
)
4648 stub_reloc_idx
[nrelocs
] = i
;
4649 stub_reloc_offset
[nrelocs
++] = size
;
4655 bfd_put_32 (stub_bfd
, template_sequence
[i
].data
, loc
+ size
);
4656 stub_reloc_idx
[nrelocs
] = i
;
4657 stub_reloc_offset
[nrelocs
++] = size
;
4667 stub_sec
->size
+= size
;
4669 /* Stub size has already been computed in arm_size_one_stub. Check
4671 BFD_ASSERT (size
== stub_entry
->stub_size
);
4673 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
4674 if (stub_entry
->branch_type
== ST_BRANCH_TO_THUMB
)
4677 /* Assume there is at least one and at most MAXRELOCS entries to relocate
4679 BFD_ASSERT (nrelocs
!= 0 && nrelocs
<= MAXRELOCS
);
4681 for (i
= 0; i
< nrelocs
; i
++)
4683 Elf_Internal_Rela rel
;
4684 bfd_boolean unresolved_reloc
;
4685 char *error_message
;
4687 sym_value
+ template_sequence
[stub_reloc_idx
[i
]].reloc_addend
;
4689 rel
.r_offset
= stub_entry
->stub_offset
+ stub_reloc_offset
[i
];
4690 rel
.r_info
= ELF32_R_INFO (0,
4691 template_sequence
[stub_reloc_idx
[i
]].r_type
);
4694 if (stub_entry
->stub_type
== arm_stub_a8_veneer_b_cond
&& i
== 0)
4695 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
4696 template should refer back to the instruction after the original
4697 branch. We use target_section as Cortex-A8 erratum workaround stubs
4698 are only generated when both source and target are in the same
4700 points_to
= stub_entry
->target_section
->output_section
->vma
4701 + stub_entry
->target_section
->output_offset
4702 + stub_entry
->source_value
;
4704 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
4705 (template_sequence
[stub_reloc_idx
[i
]].r_type
),
4706 stub_bfd
, info
->output_bfd
, stub_sec
, stub_sec
->contents
, &rel
,
4707 points_to
, info
, stub_entry
->target_section
, "", STT_FUNC
,
4708 stub_entry
->branch_type
,
4709 (struct elf_link_hash_entry
*) stub_entry
->h
, &unresolved_reloc
,
4717 /* Calculate the template, template size and instruction size for a stub.
4718 Return value is the instruction size. */
4721 find_stub_size_and_template (enum elf32_arm_stub_type stub_type
,
4722 const insn_sequence
**stub_template
,
4723 int *stub_template_size
)
4725 const insn_sequence
*template_sequence
= NULL
;
4726 int template_size
= 0, i
;
4729 template_sequence
= stub_definitions
[stub_type
].template_sequence
;
4731 *stub_template
= template_sequence
;
4733 template_size
= stub_definitions
[stub_type
].template_size
;
4734 if (stub_template_size
)
4735 *stub_template_size
= template_size
;
4738 for (i
= 0; i
< template_size
; i
++)
4740 switch (template_sequence
[i
].type
)
4761 /* As above, but don't actually build the stub. Just bump offset so
4762 we know stub section sizes. */
4765 arm_size_one_stub (struct bfd_hash_entry
*gen_entry
,
4766 void *in_arg ATTRIBUTE_UNUSED
)
4768 struct elf32_arm_stub_hash_entry
*stub_entry
;
4769 const insn_sequence
*template_sequence
;
4770 int template_size
, size
;
4772 /* Massage our args to the form they really have. */
4773 stub_entry
= (struct elf32_arm_stub_hash_entry
*) gen_entry
;
4775 BFD_ASSERT((stub_entry
->stub_type
> arm_stub_none
)
4776 && stub_entry
->stub_type
< ARRAY_SIZE(stub_definitions
));
4778 size
= find_stub_size_and_template (stub_entry
->stub_type
, &template_sequence
,
4781 stub_entry
->stub_size
= size
;
4782 stub_entry
->stub_template
= template_sequence
;
4783 stub_entry
->stub_template_size
= template_size
;
4785 size
= (size
+ 7) & ~7;
4786 stub_entry
->stub_sec
->size
+= size
;
4791 /* External entry points for sizing and building linker stubs. */
4793 /* Set up various things so that we can make a list of input sections
4794 for each output section included in the link. Returns -1 on error,
4795 0 when no stubs will be needed, and 1 on success. */
4798 elf32_arm_setup_section_lists (bfd
*output_bfd
,
4799 struct bfd_link_info
*info
)
4802 unsigned int bfd_count
;
4803 unsigned int top_id
, top_index
;
4805 asection
**input_list
, **list
;
4807 struct elf32_arm_link_hash_table
*htab
= elf32_arm_hash_table (info
);
4811 if (! is_elf_hash_table (htab
))
4814 /* Count the number of input BFDs and find the top input section id. */
4815 for (input_bfd
= info
->input_bfds
, bfd_count
= 0, top_id
= 0;
4817 input_bfd
= input_bfd
->link
.next
)
4820 for (section
= input_bfd
->sections
;
4822 section
= section
->next
)
4824 if (top_id
< section
->id
)
4825 top_id
= section
->id
;
4828 htab
->bfd_count
= bfd_count
;
4830 amt
= sizeof (struct map_stub
) * (top_id
+ 1);
4831 htab
->stub_group
= (struct map_stub
*) bfd_zmalloc (amt
);
4832 if (htab
->stub_group
== NULL
)
4834 htab
->top_id
= top_id
;
4836 /* We can't use output_bfd->section_count here to find the top output
4837 section index as some sections may have been removed, and
4838 _bfd_strip_section_from_output doesn't renumber the indices. */
4839 for (section
= output_bfd
->sections
, top_index
= 0;
4841 section
= section
->next
)
4843 if (top_index
< section
->index
)
4844 top_index
= section
->index
;
4847 htab
->top_index
= top_index
;
4848 amt
= sizeof (asection
*) * (top_index
+ 1);
4849 input_list
= (asection
**) bfd_malloc (amt
);
4850 htab
->input_list
= input_list
;
4851 if (input_list
== NULL
)
4854 /* For sections we aren't interested in, mark their entries with a
4855 value we can check later. */
4856 list
= input_list
+ top_index
;
4858 *list
= bfd_abs_section_ptr
;
4859 while (list
-- != input_list
);
4861 for (section
= output_bfd
->sections
;
4863 section
= section
->next
)
4865 if ((section
->flags
& SEC_CODE
) != 0)
4866 input_list
[section
->index
] = NULL
;
4872 /* The linker repeatedly calls this function for each input section,
4873 in the order that input sections are linked into output sections.
4874 Build lists of input sections to determine groupings between which
4875 we may insert linker stubs. */
4878 elf32_arm_next_input_section (struct bfd_link_info
*info
,
4881 struct elf32_arm_link_hash_table
*htab
= elf32_arm_hash_table (info
);
4886 if (isec
->output_section
->index
<= htab
->top_index
)
4888 asection
**list
= htab
->input_list
+ isec
->output_section
->index
;
4890 if (*list
!= bfd_abs_section_ptr
&& (isec
->flags
& SEC_CODE
) != 0)
4892 /* Steal the link_sec pointer for our list. */
4893 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
4894 /* This happens to make the list in reverse order,
4895 which we reverse later. */
4896 PREV_SEC (isec
) = *list
;
4902 /* See whether we can group stub sections together. Grouping stub
4903 sections may result in fewer stubs. More importantly, we need to
4904 put all .init* and .fini* stubs at the end of the .init or
4905 .fini output sections respectively, because glibc splits the
4906 _init and _fini functions into multiple parts. Putting a stub in
4907 the middle of a function is not a good idea. */
4910 group_sections (struct elf32_arm_link_hash_table
*htab
,
4911 bfd_size_type stub_group_size
,
4912 bfd_boolean stubs_always_after_branch
)
4914 asection
**list
= htab
->input_list
;
4918 asection
*tail
= *list
;
4921 if (tail
== bfd_abs_section_ptr
)
4924 /* Reverse the list: we must avoid placing stubs at the
4925 beginning of the section because the beginning of the text
4926 section may be required for an interrupt vector in bare metal
4928 #define NEXT_SEC PREV_SEC
4930 while (tail
!= NULL
)
4932 /* Pop from tail. */
4933 asection
*item
= tail
;
4934 tail
= PREV_SEC (item
);
4937 NEXT_SEC (item
) = head
;
4941 while (head
!= NULL
)
4945 bfd_vma stub_group_start
= head
->output_offset
;
4946 bfd_vma end_of_next
;
4949 while (NEXT_SEC (curr
) != NULL
)
4951 next
= NEXT_SEC (curr
);
4952 end_of_next
= next
->output_offset
+ next
->size
;
4953 if (end_of_next
- stub_group_start
>= stub_group_size
)
4954 /* End of NEXT is too far from start, so stop. */
4956 /* Add NEXT to the group. */
4960 /* OK, the size from the start to the start of CURR is less
4961 than stub_group_size and thus can be handled by one stub
4962 section. (Or the head section is itself larger than
4963 stub_group_size, in which case we may be toast.)
4964 We should really be keeping track of the total size of
4965 stubs added here, as stubs contribute to the final output
4969 next
= NEXT_SEC (head
);
4970 /* Set up this stub group. */
4971 htab
->stub_group
[head
->id
].link_sec
= curr
;
4973 while (head
!= curr
&& (head
= next
) != NULL
);
4975 /* But wait, there's more! Input sections up to stub_group_size
4976 bytes after the stub section can be handled by it too. */
4977 if (!stubs_always_after_branch
)
4979 stub_group_start
= curr
->output_offset
+ curr
->size
;
4981 while (next
!= NULL
)
4983 end_of_next
= next
->output_offset
+ next
->size
;
4984 if (end_of_next
- stub_group_start
>= stub_group_size
)
4985 /* End of NEXT is too far from stubs, so stop. */
4987 /* Add NEXT to the stub group. */
4989 next
= NEXT_SEC (head
);
4990 htab
->stub_group
[head
->id
].link_sec
= curr
;
4996 while (list
++ != htab
->input_list
+ htab
->top_index
);
4998 free (htab
->input_list
);
5003 /* Comparison function for sorting/searching relocations relating to Cortex-A8
5007 a8_reloc_compare (const void *a
, const void *b
)
5009 const struct a8_erratum_reloc
*ra
= (const struct a8_erratum_reloc
*) a
;
5010 const struct a8_erratum_reloc
*rb
= (const struct a8_erratum_reloc
*) b
;
5012 if (ra
->from
< rb
->from
)
5014 else if (ra
->from
> rb
->from
)
5020 static struct elf_link_hash_entry
*find_thumb_glue (struct bfd_link_info
*,
5021 const char *, char **);
5023 /* Helper function to scan code for sequences which might trigger the Cortex-A8
5024 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
5025 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
5029 cortex_a8_erratum_scan (bfd
*input_bfd
,
5030 struct bfd_link_info
*info
,
5031 struct a8_erratum_fix
**a8_fixes_p
,
5032 unsigned int *num_a8_fixes_p
,
5033 unsigned int *a8_fix_table_size_p
,
5034 struct a8_erratum_reloc
*a8_relocs
,
5035 unsigned int num_a8_relocs
,
5036 unsigned prev_num_a8_fixes
,
5037 bfd_boolean
*stub_changed_p
)
5040 struct elf32_arm_link_hash_table
*htab
= elf32_arm_hash_table (info
);
5041 struct a8_erratum_fix
*a8_fixes
= *a8_fixes_p
;
5042 unsigned int num_a8_fixes
= *num_a8_fixes_p
;
5043 unsigned int a8_fix_table_size
= *a8_fix_table_size_p
;
5048 for (section
= input_bfd
->sections
;
5050 section
= section
->next
)
5052 bfd_byte
*contents
= NULL
;
5053 struct _arm_elf_section_data
*sec_data
;
5057 if (elf_section_type (section
) != SHT_PROGBITS
5058 || (elf_section_flags (section
) & SHF_EXECINSTR
) == 0
5059 || (section
->flags
& SEC_EXCLUDE
) != 0
5060 || (section
->sec_info_type
== SEC_INFO_TYPE_JUST_SYMS
)
5061 || (section
->output_section
== bfd_abs_section_ptr
))
5064 base_vma
= section
->output_section
->vma
+ section
->output_offset
;
5066 if (elf_section_data (section
)->this_hdr
.contents
!= NULL
)
5067 contents
= elf_section_data (section
)->this_hdr
.contents
;
5068 else if (! bfd_malloc_and_get_section (input_bfd
, section
, &contents
))
5071 sec_data
= elf32_arm_section_data (section
);
5073 for (span
= 0; span
< sec_data
->mapcount
; span
++)
5075 unsigned int span_start
= sec_data
->map
[span
].vma
;
5076 unsigned int span_end
= (span
== sec_data
->mapcount
- 1)
5077 ? section
->size
: sec_data
->map
[span
+ 1].vma
;
5079 char span_type
= sec_data
->map
[span
].type
;
5080 bfd_boolean last_was_32bit
= FALSE
, last_was_branch
= FALSE
;
5082 if (span_type
!= 't')
5085 /* Span is entirely within a single 4KB region: skip scanning. */
5086 if (((base_vma
+ span_start
) & ~0xfff)
5087 == ((base_vma
+ span_end
) & ~0xfff))
5090 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
5092 * The opcode is BLX.W, BL.W, B.W, Bcc.W
5093 * The branch target is in the same 4KB region as the
5094 first half of the branch.
5095 * The instruction before the branch is a 32-bit
5096 length non-branch instruction. */
5097 for (i
= span_start
; i
< span_end
;)
5099 unsigned int insn
= bfd_getl16 (&contents
[i
]);
5100 bfd_boolean insn_32bit
= FALSE
, is_blx
= FALSE
, is_b
= FALSE
;
5101 bfd_boolean is_bl
= FALSE
, is_bcc
= FALSE
, is_32bit_branch
;
5103 if ((insn
& 0xe000) == 0xe000 && (insn
& 0x1800) != 0x0000)
5108 /* Load the rest of the insn (in manual-friendly order). */
5109 insn
= (insn
<< 16) | bfd_getl16 (&contents
[i
+ 2]);
5111 /* Encoding T4: B<c>.W. */
5112 is_b
= (insn
& 0xf800d000) == 0xf0009000;
5113 /* Encoding T1: BL<c>.W. */
5114 is_bl
= (insn
& 0xf800d000) == 0xf000d000;
5115 /* Encoding T2: BLX<c>.W. */
5116 is_blx
= (insn
& 0xf800d000) == 0xf000c000;
5117 /* Encoding T3: B<c>.W (not permitted in IT block). */
5118 is_bcc
= (insn
& 0xf800d000) == 0xf0008000
5119 && (insn
& 0x07f00000) != 0x03800000;
5122 is_32bit_branch
= is_b
|| is_bl
|| is_blx
|| is_bcc
;
5124 if (((base_vma
+ i
) & 0xfff) == 0xffe
5128 && ! last_was_branch
)
5130 bfd_signed_vma offset
= 0;
5131 bfd_boolean force_target_arm
= FALSE
;
5132 bfd_boolean force_target_thumb
= FALSE
;
5134 enum elf32_arm_stub_type stub_type
= arm_stub_none
;
5135 struct a8_erratum_reloc key
, *found
;
5136 bfd_boolean use_plt
= FALSE
;
5138 key
.from
= base_vma
+ i
;
5139 found
= (struct a8_erratum_reloc
*)
5140 bsearch (&key
, a8_relocs
, num_a8_relocs
,
5141 sizeof (struct a8_erratum_reloc
),
5146 char *error_message
= NULL
;
5147 struct elf_link_hash_entry
*entry
;
5149 /* We don't care about the error returned from this
5150 function, only if there is glue or not. */
5151 entry
= find_thumb_glue (info
, found
->sym_name
,
5155 found
->non_a8_stub
= TRUE
;
5157 /* Keep a simpler condition, for the sake of clarity. */
5158 if (htab
->root
.splt
!= NULL
&& found
->hash
!= NULL
5159 && found
->hash
->root
.plt
.offset
!= (bfd_vma
) -1)
5162 if (found
->r_type
== R_ARM_THM_CALL
)
5164 if (found
->branch_type
== ST_BRANCH_TO_ARM
5166 force_target_arm
= TRUE
;
5168 force_target_thumb
= TRUE
;
5172 /* Check if we have an offending branch instruction. */
5174 if (found
&& found
->non_a8_stub
)
5175 /* We've already made a stub for this instruction, e.g.
5176 it's a long branch or a Thumb->ARM stub. Assume that
5177 stub will suffice to work around the A8 erratum (see
5178 setting of always_after_branch above). */
5182 offset
= (insn
& 0x7ff) << 1;
5183 offset
|= (insn
& 0x3f0000) >> 4;
5184 offset
|= (insn
& 0x2000) ? 0x40000 : 0;
5185 offset
|= (insn
& 0x800) ? 0x80000 : 0;
5186 offset
|= (insn
& 0x4000000) ? 0x100000 : 0;
5187 if (offset
& 0x100000)
5188 offset
|= ~ ((bfd_signed_vma
) 0xfffff);
5189 stub_type
= arm_stub_a8_veneer_b_cond
;
5191 else if (is_b
|| is_bl
|| is_blx
)
5193 int s
= (insn
& 0x4000000) != 0;
5194 int j1
= (insn
& 0x2000) != 0;
5195 int j2
= (insn
& 0x800) != 0;
5199 offset
= (insn
& 0x7ff) << 1;
5200 offset
|= (insn
& 0x3ff0000) >> 4;
5204 if (offset
& 0x1000000)
5205 offset
|= ~ ((bfd_signed_vma
) 0xffffff);
5208 offset
&= ~ ((bfd_signed_vma
) 3);
5210 stub_type
= is_blx
? arm_stub_a8_veneer_blx
:
5211 is_bl
? arm_stub_a8_veneer_bl
: arm_stub_a8_veneer_b
;
5214 if (stub_type
!= arm_stub_none
)
5216 bfd_vma pc_for_insn
= base_vma
+ i
+ 4;
5218 /* The original instruction is a BL, but the target is
5219 an ARM instruction. If we were not making a stub,
5220 the BL would have been converted to a BLX. Use the
5221 BLX stub instead in that case. */
5222 if (htab
->use_blx
&& force_target_arm
5223 && stub_type
== arm_stub_a8_veneer_bl
)
5225 stub_type
= arm_stub_a8_veneer_blx
;
5229 /* Conversely, if the original instruction was
5230 BLX but the target is Thumb mode, use the BL
5232 else if (force_target_thumb
5233 && stub_type
== arm_stub_a8_veneer_blx
)
5235 stub_type
= arm_stub_a8_veneer_bl
;
5241 pc_for_insn
&= ~ ((bfd_vma
) 3);
5243 /* If we found a relocation, use the proper destination,
5244 not the offset in the (unrelocated) instruction.
5245 Note this is always done if we switched the stub type
5249 (bfd_signed_vma
) (found
->destination
- pc_for_insn
);
5251 /* If the stub will use a Thumb-mode branch to a
5252 PLT target, redirect it to the preceding Thumb
5254 if (stub_type
!= arm_stub_a8_veneer_blx
&& use_plt
)
5255 offset
-= PLT_THUMB_STUB_SIZE
;
5257 target
= pc_for_insn
+ offset
;
5259 /* The BLX stub is ARM-mode code. Adjust the offset to
5260 take the different PC value (+8 instead of +4) into
5262 if (stub_type
== arm_stub_a8_veneer_blx
)
5265 if (((base_vma
+ i
) & ~0xfff) == (target
& ~0xfff))
5267 char *stub_name
= NULL
;
5269 if (num_a8_fixes
== a8_fix_table_size
)
5271 a8_fix_table_size
*= 2;
5272 a8_fixes
= (struct a8_erratum_fix
*)
5273 bfd_realloc (a8_fixes
,
5274 sizeof (struct a8_erratum_fix
)
5275 * a8_fix_table_size
);
5278 if (num_a8_fixes
< prev_num_a8_fixes
)
5280 /* If we're doing a subsequent scan,
5281 check if we've found the same fix as
5282 before, and try and reuse the stub
5284 stub_name
= a8_fixes
[num_a8_fixes
].stub_name
;
5285 if ((a8_fixes
[num_a8_fixes
].section
!= section
)
5286 || (a8_fixes
[num_a8_fixes
].offset
!= i
))
5290 *stub_changed_p
= TRUE
;
5296 stub_name
= (char *) bfd_malloc (8 + 1 + 8 + 1);
5297 if (stub_name
!= NULL
)
5298 sprintf (stub_name
, "%x:%x", section
->id
, i
);
5301 a8_fixes
[num_a8_fixes
].input_bfd
= input_bfd
;
5302 a8_fixes
[num_a8_fixes
].section
= section
;
5303 a8_fixes
[num_a8_fixes
].offset
= i
;
5304 a8_fixes
[num_a8_fixes
].target_offset
=
5306 a8_fixes
[num_a8_fixes
].orig_insn
= insn
;
5307 a8_fixes
[num_a8_fixes
].stub_name
= stub_name
;
5308 a8_fixes
[num_a8_fixes
].stub_type
= stub_type
;
5309 a8_fixes
[num_a8_fixes
].branch_type
=
5310 is_blx
? ST_BRANCH_TO_ARM
: ST_BRANCH_TO_THUMB
;
5317 i
+= insn_32bit
? 4 : 2;
5318 last_was_32bit
= insn_32bit
;
5319 last_was_branch
= is_32bit_branch
;
5323 if (elf_section_data (section
)->this_hdr
.contents
== NULL
)
5327 *a8_fixes_p
= a8_fixes
;
5328 *num_a8_fixes_p
= num_a8_fixes
;
5329 *a8_fix_table_size_p
= a8_fix_table_size
;
5334 /* Create or update a stub entry depending on whether the stub can already be
5335 found in HTAB. The stub is identified by:
5336 - its type STUB_TYPE
5337 - its source branch (note that several can share the same stub) whose
5338 section and relocation (if any) are given by SECTION and IRELA
5340 - its target symbol whose input section, hash, name, value and branch type
5341 are given in SYM_SEC, HASH, SYM_NAME, SYM_VALUE and BRANCH_TYPE
5344 If found, the value of the stub's target symbol is updated from SYM_VALUE
5345 and *NEW_STUB is set to FALSE. Otherwise, *NEW_STUB is set to
5346 TRUE and the stub entry is initialized.
5348 Returns whether the stub could be successfully created or updated, or FALSE
5349 if an error occured. */
5352 elf32_arm_create_stub (struct elf32_arm_link_hash_table
*htab
,
5353 enum elf32_arm_stub_type stub_type
, asection
*section
,
5354 Elf_Internal_Rela
*irela
, asection
*sym_sec
,
5355 struct elf32_arm_link_hash_entry
*hash
, char *sym_name
,
5356 bfd_vma sym_value
, enum arm_st_branch_type branch_type
,
5357 bfd_boolean
*new_stub
)
5359 const asection
*id_sec
;
5361 struct elf32_arm_stub_hash_entry
*stub_entry
;
5362 unsigned int r_type
;
5363 bfd_boolean sym_claimed
= arm_stub_sym_claimed (stub_type
);
5365 BFD_ASSERT (stub_type
!= arm_stub_none
);
5369 stub_name
= sym_name
;
5373 BFD_ASSERT (section
);
5375 /* Support for grouping stub sections. */
5376 id_sec
= htab
->stub_group
[section
->id
].link_sec
;
5378 /* Get the name of this stub. */
5379 stub_name
= elf32_arm_stub_name (id_sec
, sym_sec
, hash
, irela
,
5385 stub_entry
= arm_stub_hash_lookup (&htab
->stub_hash_table
, stub_name
, FALSE
,
5387 /* The proper stub has already been created, just update its value. */
5388 if (stub_entry
!= NULL
)
5392 stub_entry
->target_value
= sym_value
;
5396 stub_entry
= elf32_arm_add_stub (stub_name
, section
, htab
, stub_type
);
5397 if (stub_entry
== NULL
)
5404 stub_entry
->target_value
= sym_value
;
5405 stub_entry
->target_section
= sym_sec
;
5406 stub_entry
->stub_type
= stub_type
;
5407 stub_entry
->h
= hash
;
5408 stub_entry
->branch_type
= branch_type
;
5411 stub_entry
->output_name
= sym_name
;
5414 if (sym_name
== NULL
)
5415 sym_name
= "unnamed";
5416 stub_entry
->output_name
= (char *)
5417 bfd_alloc (htab
->stub_bfd
, sizeof (THUMB2ARM_GLUE_ENTRY_NAME
)
5418 + strlen (sym_name
));
5419 if (stub_entry
->output_name
== NULL
)
5425 /* For historical reasons, use the existing names for ARM-to-Thumb and
5426 Thumb-to-ARM stubs. */
5427 r_type
= ELF32_R_TYPE (irela
->r_info
);
5428 if ((r_type
== (unsigned int) R_ARM_THM_CALL
5429 || r_type
== (unsigned int) R_ARM_THM_JUMP24
5430 || r_type
== (unsigned int) R_ARM_THM_JUMP19
)
5431 && branch_type
== ST_BRANCH_TO_ARM
)
5432 sprintf (stub_entry
->output_name
, THUMB2ARM_GLUE_ENTRY_NAME
, sym_name
);
5433 else if ((r_type
== (unsigned int) R_ARM_CALL
5434 || r_type
== (unsigned int) R_ARM_JUMP24
)
5435 && branch_type
== ST_BRANCH_TO_THUMB
)
5436 sprintf (stub_entry
->output_name
, ARM2THUMB_GLUE_ENTRY_NAME
, sym_name
);
5438 sprintf (stub_entry
->output_name
, STUB_ENTRY_NAME
, sym_name
);
5445 /* Determine and set the size of the stub section for a final link.
5447 The basic idea here is to examine all the relocations looking for
5448 PC-relative calls to a target that is unreachable with a "bl"
5452 elf32_arm_size_stubs (bfd
*output_bfd
,
5454 struct bfd_link_info
*info
,
5455 bfd_signed_vma group_size
,
5456 asection
* (*add_stub_section
) (const char *, asection
*,
5459 void (*layout_sections_again
) (void))
5461 bfd_size_type stub_group_size
;
5462 bfd_boolean stubs_always_after_branch
;
5463 struct elf32_arm_link_hash_table
*htab
= elf32_arm_hash_table (info
);
5464 struct a8_erratum_fix
*a8_fixes
= NULL
;
5465 unsigned int num_a8_fixes
= 0, a8_fix_table_size
= 10;
5466 struct a8_erratum_reloc
*a8_relocs
= NULL
;
5467 unsigned int num_a8_relocs
= 0, a8_reloc_table_size
= 10, i
;
5472 if (htab
->fix_cortex_a8
)
5474 a8_fixes
= (struct a8_erratum_fix
*)
5475 bfd_zmalloc (sizeof (struct a8_erratum_fix
) * a8_fix_table_size
);
5476 a8_relocs
= (struct a8_erratum_reloc
*)
5477 bfd_zmalloc (sizeof (struct a8_erratum_reloc
) * a8_reloc_table_size
);
5480 /* Propagate mach to stub bfd, because it may not have been
5481 finalized when we created stub_bfd. */
5482 bfd_set_arch_mach (stub_bfd
, bfd_get_arch (output_bfd
),
5483 bfd_get_mach (output_bfd
));
5485 /* Stash our params away. */
5486 htab
->stub_bfd
= stub_bfd
;
5487 htab
->add_stub_section
= add_stub_section
;
5488 htab
->layout_sections_again
= layout_sections_again
;
5489 stubs_always_after_branch
= group_size
< 0;
5491 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
5492 as the first half of a 32-bit branch straddling two 4K pages. This is a
5493 crude way of enforcing that. */
5494 if (htab
->fix_cortex_a8
)
5495 stubs_always_after_branch
= 1;
5498 stub_group_size
= -group_size
;
5500 stub_group_size
= group_size
;
5502 if (stub_group_size
== 1)
5504 /* Default values. */
5505 /* Thumb branch range is +-4MB has to be used as the default
5506 maximum size (a given section can contain both ARM and Thumb
5507 code, so the worst case has to be taken into account).
5509 This value is 24K less than that, which allows for 2025
5510 12-byte stubs. If we exceed that, then we will fail to link.
5511 The user will have to relink with an explicit group size
5513 stub_group_size
= 4170000;
5516 group_sections (htab
, stub_group_size
, stubs_always_after_branch
);
5518 /* If we're applying the cortex A8 fix, we need to determine the
5519 program header size now, because we cannot change it later --
5520 that could alter section placements. Notice the A8 erratum fix
5521 ends up requiring the section addresses to remain unchanged
5522 modulo the page size. That's something we cannot represent
5523 inside BFD, and we don't want to force the section alignment to
5524 be the page size. */
5525 if (htab
->fix_cortex_a8
)
5526 (*htab
->layout_sections_again
) ();
5531 unsigned int bfd_indx
;
5533 enum elf32_arm_stub_type stub_type
;
5534 bfd_boolean stub_changed
= FALSE
;
5535 unsigned prev_num_a8_fixes
= num_a8_fixes
;
5538 for (input_bfd
= info
->input_bfds
, bfd_indx
= 0;
5540 input_bfd
= input_bfd
->link
.next
, bfd_indx
++)
5542 Elf_Internal_Shdr
*symtab_hdr
;
5544 Elf_Internal_Sym
*local_syms
= NULL
;
5546 if (!is_arm_elf (input_bfd
))
5551 /* We'll need the symbol table in a second. */
5552 symtab_hdr
= &elf_tdata (input_bfd
)->symtab_hdr
;
5553 if (symtab_hdr
->sh_info
== 0)
5556 /* Walk over each section attached to the input bfd. */
5557 for (section
= input_bfd
->sections
;
5559 section
= section
->next
)
5561 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
5563 /* If there aren't any relocs, then there's nothing more
5565 if ((section
->flags
& SEC_RELOC
) == 0
5566 || section
->reloc_count
== 0
5567 || (section
->flags
& SEC_CODE
) == 0)
5570 /* If this section is a link-once section that will be
5571 discarded, then don't create any stubs. */
5572 if (section
->output_section
== NULL
5573 || section
->output_section
->owner
!= output_bfd
)
5576 /* Get the relocs. */
5578 = _bfd_elf_link_read_relocs (input_bfd
, section
, NULL
,
5579 NULL
, info
->keep_memory
);
5580 if (internal_relocs
== NULL
)
5581 goto error_ret_free_local
;
5583 /* Now examine each relocation. */
5584 irela
= internal_relocs
;
5585 irelaend
= irela
+ section
->reloc_count
;
5586 for (; irela
< irelaend
; irela
++)
5588 unsigned int r_type
, r_indx
;
5591 bfd_vma destination
;
5592 struct elf32_arm_link_hash_entry
*hash
;
5593 const char *sym_name
;
5594 unsigned char st_type
;
5595 enum arm_st_branch_type branch_type
;
5596 bfd_boolean created_stub
= FALSE
;
5598 r_type
= ELF32_R_TYPE (irela
->r_info
);
5599 r_indx
= ELF32_R_SYM (irela
->r_info
);
5601 if (r_type
>= (unsigned int) R_ARM_max
)
5603 bfd_set_error (bfd_error_bad_value
);
5604 error_ret_free_internal
:
5605 if (elf_section_data (section
)->relocs
== NULL
)
5606 free (internal_relocs
);
5608 error_ret_free_local
:
5609 if (local_syms
!= NULL
5610 && (symtab_hdr
->contents
5611 != (unsigned char *) local_syms
))
5617 if (r_indx
>= symtab_hdr
->sh_info
)
5618 hash
= elf32_arm_hash_entry
5619 (elf_sym_hashes (input_bfd
)
5620 [r_indx
- symtab_hdr
->sh_info
]);
5622 /* Only look for stubs on branch instructions, or
5623 non-relaxed TLSCALL */
5624 if ((r_type
!= (unsigned int) R_ARM_CALL
)
5625 && (r_type
!= (unsigned int) R_ARM_THM_CALL
)
5626 && (r_type
!= (unsigned int) R_ARM_JUMP24
)
5627 && (r_type
!= (unsigned int) R_ARM_THM_JUMP19
)
5628 && (r_type
!= (unsigned int) R_ARM_THM_XPC22
)
5629 && (r_type
!= (unsigned int) R_ARM_THM_JUMP24
)
5630 && (r_type
!= (unsigned int) R_ARM_PLT32
)
5631 && !((r_type
== (unsigned int) R_ARM_TLS_CALL
5632 || r_type
== (unsigned int) R_ARM_THM_TLS_CALL
)
5633 && r_type
== elf32_arm_tls_transition
5634 (info
, r_type
, &hash
->root
)
5635 && ((hash
? hash
->tls_type
5636 : (elf32_arm_local_got_tls_type
5637 (input_bfd
)[r_indx
]))
5638 & GOT_TLS_GDESC
) != 0))
5641 /* Now determine the call target, its name, value,
5648 if (r_type
== (unsigned int) R_ARM_TLS_CALL
5649 || r_type
== (unsigned int) R_ARM_THM_TLS_CALL
)
5651 /* A non-relaxed TLS call. The target is the
5652 plt-resident trampoline and nothing to do
5654 BFD_ASSERT (htab
->tls_trampoline
> 0);
5655 sym_sec
= htab
->root
.splt
;
5656 sym_value
= htab
->tls_trampoline
;
5659 branch_type
= ST_BRANCH_TO_ARM
;
5663 /* It's a local symbol. */
5664 Elf_Internal_Sym
*sym
;
5666 if (local_syms
== NULL
)
5669 = (Elf_Internal_Sym
*) symtab_hdr
->contents
;
5670 if (local_syms
== NULL
)
5672 = bfd_elf_get_elf_syms (input_bfd
, symtab_hdr
,
5673 symtab_hdr
->sh_info
, 0,
5675 if (local_syms
== NULL
)
5676 goto error_ret_free_internal
;
5679 sym
= local_syms
+ r_indx
;
5680 if (sym
->st_shndx
== SHN_UNDEF
)
5681 sym_sec
= bfd_und_section_ptr
;
5682 else if (sym
->st_shndx
== SHN_ABS
)
5683 sym_sec
= bfd_abs_section_ptr
;
5684 else if (sym
->st_shndx
== SHN_COMMON
)
5685 sym_sec
= bfd_com_section_ptr
;
5688 bfd_section_from_elf_index (input_bfd
, sym
->st_shndx
);
5691 /* This is an undefined symbol. It can never
5695 if (ELF_ST_TYPE (sym
->st_info
) != STT_SECTION
)
5696 sym_value
= sym
->st_value
;
5697 destination
= (sym_value
+ irela
->r_addend
5698 + sym_sec
->output_offset
5699 + sym_sec
->output_section
->vma
);
5700 st_type
= ELF_ST_TYPE (sym
->st_info
);
5702 ARM_GET_SYM_BRANCH_TYPE (sym
->st_target_internal
);
5704 = bfd_elf_string_from_elf_section (input_bfd
,
5705 symtab_hdr
->sh_link
,
5710 /* It's an external symbol. */
5711 while (hash
->root
.root
.type
== bfd_link_hash_indirect
5712 || hash
->root
.root
.type
== bfd_link_hash_warning
)
5713 hash
= ((struct elf32_arm_link_hash_entry
*)
5714 hash
->root
.root
.u
.i
.link
);
5716 if (hash
->root
.root
.type
== bfd_link_hash_defined
5717 || hash
->root
.root
.type
== bfd_link_hash_defweak
)
5719 sym_sec
= hash
->root
.root
.u
.def
.section
;
5720 sym_value
= hash
->root
.root
.u
.def
.value
;
5722 struct elf32_arm_link_hash_table
*globals
=
5723 elf32_arm_hash_table (info
);
5725 /* For a destination in a shared library,
5726 use the PLT stub as target address to
5727 decide whether a branch stub is
5730 && globals
->root
.splt
!= NULL
5732 && hash
->root
.plt
.offset
!= (bfd_vma
) -1)
5734 sym_sec
= globals
->root
.splt
;
5735 sym_value
= hash
->root
.plt
.offset
;
5736 if (sym_sec
->output_section
!= NULL
)
5737 destination
= (sym_value
5738 + sym_sec
->output_offset
5739 + sym_sec
->output_section
->vma
);
5741 else if (sym_sec
->output_section
!= NULL
)
5742 destination
= (sym_value
+ irela
->r_addend
5743 + sym_sec
->output_offset
5744 + sym_sec
->output_section
->vma
);
5746 else if ((hash
->root
.root
.type
== bfd_link_hash_undefined
)
5747 || (hash
->root
.root
.type
== bfd_link_hash_undefweak
))
5749 /* For a shared library, use the PLT stub as
5750 target address to decide whether a long
5751 branch stub is needed.
5752 For absolute code, they cannot be handled. */
5753 struct elf32_arm_link_hash_table
*globals
=
5754 elf32_arm_hash_table (info
);
5757 && globals
->root
.splt
!= NULL
5759 && hash
->root
.plt
.offset
!= (bfd_vma
) -1)
5761 sym_sec
= globals
->root
.splt
;
5762 sym_value
= hash
->root
.plt
.offset
;
5763 if (sym_sec
->output_section
!= NULL
)
5764 destination
= (sym_value
5765 + sym_sec
->output_offset
5766 + sym_sec
->output_section
->vma
);
5773 bfd_set_error (bfd_error_bad_value
);
5774 goto error_ret_free_internal
;
5776 st_type
= hash
->root
.type
;
5778 ARM_GET_SYM_BRANCH_TYPE (hash
->root
.target_internal
);
5779 sym_name
= hash
->root
.root
.root
.string
;
5784 bfd_boolean new_stub
;
5786 /* Determine what (if any) linker stub is needed. */
5787 stub_type
= arm_type_of_stub (info
, section
, irela
,
5788 st_type
, &branch_type
,
5789 hash
, destination
, sym_sec
,
5790 input_bfd
, sym_name
);
5791 if (stub_type
== arm_stub_none
)
5794 /* We've either created a stub for this reloc already,
5795 or we are about to. */
5797 elf32_arm_create_stub (htab
, stub_type
, section
, irela
,
5799 (char *) sym_name
, sym_value
,
5800 branch_type
, &new_stub
);
5803 goto error_ret_free_internal
;
5807 stub_changed
= TRUE
;
5811 /* Look for relocations which might trigger Cortex-A8
5813 if (htab
->fix_cortex_a8
5814 && (r_type
== (unsigned int) R_ARM_THM_JUMP24
5815 || r_type
== (unsigned int) R_ARM_THM_JUMP19
5816 || r_type
== (unsigned int) R_ARM_THM_CALL
5817 || r_type
== (unsigned int) R_ARM_THM_XPC22
))
5819 bfd_vma from
= section
->output_section
->vma
5820 + section
->output_offset
5823 if ((from
& 0xfff) == 0xffe)
5825 /* Found a candidate. Note we haven't checked the
5826 destination is within 4K here: if we do so (and
5827 don't create an entry in a8_relocs) we can't tell
5828 that a branch should have been relocated when
5830 if (num_a8_relocs
== a8_reloc_table_size
)
5832 a8_reloc_table_size
*= 2;
5833 a8_relocs
= (struct a8_erratum_reloc
*)
5834 bfd_realloc (a8_relocs
,
5835 sizeof (struct a8_erratum_reloc
)
5836 * a8_reloc_table_size
);
5839 a8_relocs
[num_a8_relocs
].from
= from
;
5840 a8_relocs
[num_a8_relocs
].destination
= destination
;
5841 a8_relocs
[num_a8_relocs
].r_type
= r_type
;
5842 a8_relocs
[num_a8_relocs
].branch_type
= branch_type
;
5843 a8_relocs
[num_a8_relocs
].sym_name
= sym_name
;
5844 a8_relocs
[num_a8_relocs
].non_a8_stub
= created_stub
;
5845 a8_relocs
[num_a8_relocs
].hash
= hash
;
5852 /* We're done with the internal relocs, free them. */
5853 if (elf_section_data (section
)->relocs
== NULL
)
5854 free (internal_relocs
);
5857 if (htab
->fix_cortex_a8
)
5859 /* Sort relocs which might apply to Cortex-A8 erratum. */
5860 qsort (a8_relocs
, num_a8_relocs
,
5861 sizeof (struct a8_erratum_reloc
),
5864 /* Scan for branches which might trigger Cortex-A8 erratum. */
5865 if (cortex_a8_erratum_scan (input_bfd
, info
, &a8_fixes
,
5866 &num_a8_fixes
, &a8_fix_table_size
,
5867 a8_relocs
, num_a8_relocs
,
5868 prev_num_a8_fixes
, &stub_changed
)
5870 goto error_ret_free_local
;
5873 if (local_syms
!= NULL
5874 && symtab_hdr
->contents
!= (unsigned char *) local_syms
)
5876 if (!info
->keep_memory
)
5879 symtab_hdr
->contents
= (unsigned char *) local_syms
;
5883 if (prev_num_a8_fixes
!= num_a8_fixes
)
5884 stub_changed
= TRUE
;
5889 /* OK, we've added some stubs. Find out the new size of the
5891 for (stub_sec
= htab
->stub_bfd
->sections
;
5893 stub_sec
= stub_sec
->next
)
5895 /* Ignore non-stub sections. */
5896 if (!strstr (stub_sec
->name
, STUB_SUFFIX
))
5902 /* Compute stub section size, considering padding. */
5903 bfd_hash_traverse (&htab
->stub_hash_table
, arm_size_one_stub
, htab
);
5904 for (stub_type
= arm_stub_none
+ 1; stub_type
< max_stub_type
;
5908 asection
**stub_sec_p
;
5910 padding
= arm_dedicated_stub_section_padding (stub_type
);
5911 stub_sec_p
= arm_dedicated_stub_input_section_ptr (htab
, stub_type
);
5912 /* Skip if no stub input section or no stub section padding
5914 if ((stub_sec_p
!= NULL
&& *stub_sec_p
== NULL
) || padding
== 0)
5916 /* Stub section padding required but no dedicated section. */
5917 BFD_ASSERT (stub_sec_p
);
5919 size
= (*stub_sec_p
)->size
;
5920 size
= (size
+ padding
- 1) & ~(padding
- 1);
5921 (*stub_sec_p
)->size
= size
;
5924 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
5925 if (htab
->fix_cortex_a8
)
5926 for (i
= 0; i
< num_a8_fixes
; i
++)
5928 stub_sec
= elf32_arm_create_or_find_stub_sec (NULL
,
5929 a8_fixes
[i
].section
, htab
, a8_fixes
[i
].stub_type
);
5931 if (stub_sec
== NULL
)
5935 += find_stub_size_and_template (a8_fixes
[i
].stub_type
, NULL
,
5940 /* Ask the linker to do its stuff. */
5941 (*htab
->layout_sections_again
) ();
5944 /* Add stubs for Cortex-A8 erratum fixes now. */
5945 if (htab
->fix_cortex_a8
)
5947 for (i
= 0; i
< num_a8_fixes
; i
++)
5949 struct elf32_arm_stub_hash_entry
*stub_entry
;
5950 char *stub_name
= a8_fixes
[i
].stub_name
;
5951 asection
*section
= a8_fixes
[i
].section
;
5952 unsigned int section_id
= a8_fixes
[i
].section
->id
;
5953 asection
*link_sec
= htab
->stub_group
[section_id
].link_sec
;
5954 asection
*stub_sec
= htab
->stub_group
[section_id
].stub_sec
;
5955 const insn_sequence
*template_sequence
;
5956 int template_size
, size
= 0;
5958 stub_entry
= arm_stub_hash_lookup (&htab
->stub_hash_table
, stub_name
,
5960 if (stub_entry
== NULL
)
5962 (*_bfd_error_handler
) (_("%s: cannot create stub entry %s"),
5968 stub_entry
->stub_sec
= stub_sec
;
5969 stub_entry
->stub_offset
= 0;
5970 stub_entry
->id_sec
= link_sec
;
5971 stub_entry
->stub_type
= a8_fixes
[i
].stub_type
;
5972 stub_entry
->source_value
= a8_fixes
[i
].offset
;
5973 stub_entry
->target_section
= a8_fixes
[i
].section
;
5974 stub_entry
->target_value
= a8_fixes
[i
].target_offset
;
5975 stub_entry
->orig_insn
= a8_fixes
[i
].orig_insn
;
5976 stub_entry
->branch_type
= a8_fixes
[i
].branch_type
;
5978 size
= find_stub_size_and_template (a8_fixes
[i
].stub_type
,
5982 stub_entry
->stub_size
= size
;
5983 stub_entry
->stub_template
= template_sequence
;
5984 stub_entry
->stub_template_size
= template_size
;
5987 /* Stash the Cortex-A8 erratum fix array for use later in
5988 elf32_arm_write_section(). */
5989 htab
->a8_erratum_fixes
= a8_fixes
;
5990 htab
->num_a8_erratum_fixes
= num_a8_fixes
;
5994 htab
->a8_erratum_fixes
= NULL
;
5995 htab
->num_a8_erratum_fixes
= 0;
6000 /* Build all the stubs associated with the current output file. The
6001 stubs are kept in a hash table attached to the main linker hash
6002 table. We also set up the .plt entries for statically linked PIC
6003 functions here. This function is called via arm_elf_finish in the
6007 elf32_arm_build_stubs (struct bfd_link_info
*info
)
6010 struct bfd_hash_table
*table
;
6011 struct elf32_arm_link_hash_table
*htab
;
6013 htab
= elf32_arm_hash_table (info
);
6017 for (stub_sec
= htab
->stub_bfd
->sections
;
6019 stub_sec
= stub_sec
->next
)
6023 /* Ignore non-stub sections. */
6024 if (!strstr (stub_sec
->name
, STUB_SUFFIX
))
6027 /* Allocate memory to hold the linker stubs. Zeroing the stub sections
6028 must at least be done for stub section requiring padding. */
6029 size
= stub_sec
->size
;
6030 stub_sec
->contents
= (unsigned char *) bfd_zalloc (htab
->stub_bfd
, size
);
6031 if (stub_sec
->contents
== NULL
&& size
!= 0)
6036 /* Build the stubs as directed by the stub hash table. */
6037 table
= &htab
->stub_hash_table
;
6038 bfd_hash_traverse (table
, arm_build_one_stub
, info
);
6039 if (htab
->fix_cortex_a8
)
6041 /* Place the cortex a8 stubs last. */
6042 htab
->fix_cortex_a8
= -1;
6043 bfd_hash_traverse (table
, arm_build_one_stub
, info
);
6049 /* Locate the Thumb encoded calling stub for NAME. */
6051 static struct elf_link_hash_entry
*
6052 find_thumb_glue (struct bfd_link_info
*link_info
,
6054 char **error_message
)
6057 struct elf_link_hash_entry
*hash
;
6058 struct elf32_arm_link_hash_table
*hash_table
;
6060 /* We need a pointer to the armelf specific hash table. */
6061 hash_table
= elf32_arm_hash_table (link_info
);
6062 if (hash_table
== NULL
)
6065 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen (name
)
6066 + strlen (THUMB2ARM_GLUE_ENTRY_NAME
) + 1);
6068 BFD_ASSERT (tmp_name
);
6070 sprintf (tmp_name
, THUMB2ARM_GLUE_ENTRY_NAME
, name
);
6072 hash
= elf_link_hash_lookup
6073 (&(hash_table
)->root
, tmp_name
, FALSE
, FALSE
, TRUE
);
6076 && asprintf (error_message
, _("unable to find THUMB glue '%s' for '%s'"),
6077 tmp_name
, name
) == -1)
6078 *error_message
= (char *) bfd_errmsg (bfd_error_system_call
);
6085 /* Locate the ARM encoded calling stub for NAME. */
6087 static struct elf_link_hash_entry
*
6088 find_arm_glue (struct bfd_link_info
*link_info
,
6090 char **error_message
)
6093 struct elf_link_hash_entry
*myh
;
6094 struct elf32_arm_link_hash_table
*hash_table
;
6096 /* We need a pointer to the elfarm specific hash table. */
6097 hash_table
= elf32_arm_hash_table (link_info
);
6098 if (hash_table
== NULL
)
6101 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen (name
)
6102 + strlen (ARM2THUMB_GLUE_ENTRY_NAME
) + 1);
6104 BFD_ASSERT (tmp_name
);
6106 sprintf (tmp_name
, ARM2THUMB_GLUE_ENTRY_NAME
, name
);
6108 myh
= elf_link_hash_lookup
6109 (&(hash_table
)->root
, tmp_name
, FALSE
, FALSE
, TRUE
);
6112 && asprintf (error_message
, _("unable to find ARM glue '%s' for '%s'"),
6113 tmp_name
, name
) == -1)
6114 *error_message
= (char *) bfd_errmsg (bfd_error_system_call
);
6121 /* ARM->Thumb glue (static images):
6125 ldr r12, __func_addr
6128 .word func @ behave as if you saw a ARM_32 reloc.
6135 .word func @ behave as if you saw a ARM_32 reloc.
6137 (relocatable images)
6140 ldr r12, __func_offset
6146 #define ARM2THUMB_STATIC_GLUE_SIZE 12
6147 static const insn32 a2t1_ldr_insn
= 0xe59fc000;
6148 static const insn32 a2t2_bx_r12_insn
= 0xe12fff1c;
6149 static const insn32 a2t3_func_addr_insn
= 0x00000001;
6151 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
6152 static const insn32 a2t1v5_ldr_insn
= 0xe51ff004;
6153 static const insn32 a2t2v5_func_addr_insn
= 0x00000001;
6155 #define ARM2THUMB_PIC_GLUE_SIZE 16
6156 static const insn32 a2t1p_ldr_insn
= 0xe59fc004;
6157 static const insn32 a2t2p_add_pc_insn
= 0xe08cc00f;
6158 static const insn32 a2t3p_bx_r12_insn
= 0xe12fff1c;
6160 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
6164 __func_from_thumb: __func_from_thumb:
6166 nop ldr r6, __func_addr
6176 #define THUMB2ARM_GLUE_SIZE 8
6177 static const insn16 t2a1_bx_pc_insn
= 0x4778;
6178 static const insn16 t2a2_noop_insn
= 0x46c0;
6179 static const insn32 t2a3_b_insn
= 0xea000000;
6181 #define VFP11_ERRATUM_VENEER_SIZE 8
6182 #define STM32L4XX_ERRATUM_LDM_VENEER_SIZE 16
6183 #define STM32L4XX_ERRATUM_VLDM_VENEER_SIZE 24
6185 #define ARM_BX_VENEER_SIZE 12
6186 static const insn32 armbx1_tst_insn
= 0xe3100001;
6187 static const insn32 armbx2_moveq_insn
= 0x01a0f000;
6188 static const insn32 armbx3_bx_insn
= 0xe12fff10;
6190 #ifndef ELFARM_NABI_C_INCLUDED
6192 arm_allocate_glue_section_space (bfd
* abfd
, bfd_size_type size
, const char * name
)
6195 bfd_byte
* contents
;
6199 /* Do not include empty glue sections in the output. */
6202 s
= bfd_get_linker_section (abfd
, name
);
6204 s
->flags
|= SEC_EXCLUDE
;
6209 BFD_ASSERT (abfd
!= NULL
);
6211 s
= bfd_get_linker_section (abfd
, name
);
6212 BFD_ASSERT (s
!= NULL
);
6214 contents
= (bfd_byte
*) bfd_alloc (abfd
, size
);
6216 BFD_ASSERT (s
->size
== size
);
6217 s
->contents
= contents
;
6221 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info
* info
)
6223 struct elf32_arm_link_hash_table
* globals
;
6225 globals
= elf32_arm_hash_table (info
);
6226 BFD_ASSERT (globals
!= NULL
);
6228 arm_allocate_glue_section_space (globals
->bfd_of_glue_owner
,
6229 globals
->arm_glue_size
,
6230 ARM2THUMB_GLUE_SECTION_NAME
);
6232 arm_allocate_glue_section_space (globals
->bfd_of_glue_owner
,
6233 globals
->thumb_glue_size
,
6234 THUMB2ARM_GLUE_SECTION_NAME
);
6236 arm_allocate_glue_section_space (globals
->bfd_of_glue_owner
,
6237 globals
->vfp11_erratum_glue_size
,
6238 VFP11_ERRATUM_VENEER_SECTION_NAME
);
6240 arm_allocate_glue_section_space (globals
->bfd_of_glue_owner
,
6241 globals
->stm32l4xx_erratum_glue_size
,
6242 STM32L4XX_ERRATUM_VENEER_SECTION_NAME
);
6244 arm_allocate_glue_section_space (globals
->bfd_of_glue_owner
,
6245 globals
->bx_glue_size
,
6246 ARM_BX_GLUE_SECTION_NAME
);
6251 /* Allocate space and symbols for calling a Thumb function from Arm mode.
6252 returns the symbol identifying the stub. */
6254 static struct elf_link_hash_entry
*
6255 record_arm_to_thumb_glue (struct bfd_link_info
* link_info
,
6256 struct elf_link_hash_entry
* h
)
6258 const char * name
= h
->root
.root
.string
;
6261 struct elf_link_hash_entry
* myh
;
6262 struct bfd_link_hash_entry
* bh
;
6263 struct elf32_arm_link_hash_table
* globals
;
6267 globals
= elf32_arm_hash_table (link_info
);
6268 BFD_ASSERT (globals
!= NULL
);
6269 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
6271 s
= bfd_get_linker_section
6272 (globals
->bfd_of_glue_owner
, ARM2THUMB_GLUE_SECTION_NAME
);
6274 BFD_ASSERT (s
!= NULL
);
6276 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen (name
)
6277 + strlen (ARM2THUMB_GLUE_ENTRY_NAME
) + 1);
6279 BFD_ASSERT (tmp_name
);
6281 sprintf (tmp_name
, ARM2THUMB_GLUE_ENTRY_NAME
, name
);
6283 myh
= elf_link_hash_lookup
6284 (&(globals
)->root
, tmp_name
, FALSE
, FALSE
, TRUE
);
6288 /* We've already seen this guy. */
6293 /* The only trick here is using hash_table->arm_glue_size as the value.
6294 Even though the section isn't allocated yet, this is where we will be
6295 putting it. The +1 on the value marks that the stub has not been
6296 output yet - not that it is a Thumb function. */
6298 val
= globals
->arm_glue_size
+ 1;
6299 _bfd_generic_link_add_one_symbol (link_info
, globals
->bfd_of_glue_owner
,
6300 tmp_name
, BSF_GLOBAL
, s
, val
,
6301 NULL
, TRUE
, FALSE
, &bh
);
6303 myh
= (struct elf_link_hash_entry
*) bh
;
6304 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
6305 myh
->forced_local
= 1;
6309 if (bfd_link_pic (link_info
)
6310 || globals
->root
.is_relocatable_executable
6311 || globals
->pic_veneer
)
6312 size
= ARM2THUMB_PIC_GLUE_SIZE
;
6313 else if (globals
->use_blx
)
6314 size
= ARM2THUMB_V5_STATIC_GLUE_SIZE
;
6316 size
= ARM2THUMB_STATIC_GLUE_SIZE
;
6319 globals
->arm_glue_size
+= size
;
6324 /* Allocate space for ARMv4 BX veneers. */
6327 record_arm_bx_glue (struct bfd_link_info
* link_info
, int reg
)
6330 struct elf32_arm_link_hash_table
*globals
;
6332 struct elf_link_hash_entry
*myh
;
6333 struct bfd_link_hash_entry
*bh
;
6336 /* BX PC does not need a veneer. */
6340 globals
= elf32_arm_hash_table (link_info
);
6341 BFD_ASSERT (globals
!= NULL
);
6342 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
6344 /* Check if this veneer has already been allocated. */
6345 if (globals
->bx_glue_offset
[reg
])
6348 s
= bfd_get_linker_section
6349 (globals
->bfd_of_glue_owner
, ARM_BX_GLUE_SECTION_NAME
);
6351 BFD_ASSERT (s
!= NULL
);
6353 /* Add symbol for veneer. */
6355 bfd_malloc ((bfd_size_type
) strlen (ARM_BX_GLUE_ENTRY_NAME
) + 1);
6357 BFD_ASSERT (tmp_name
);
6359 sprintf (tmp_name
, ARM_BX_GLUE_ENTRY_NAME
, reg
);
6361 myh
= elf_link_hash_lookup
6362 (&(globals
)->root
, tmp_name
, FALSE
, FALSE
, FALSE
);
6364 BFD_ASSERT (myh
== NULL
);
6367 val
= globals
->bx_glue_size
;
6368 _bfd_generic_link_add_one_symbol (link_info
, globals
->bfd_of_glue_owner
,
6369 tmp_name
, BSF_FUNCTION
| BSF_LOCAL
, s
, val
,
6370 NULL
, TRUE
, FALSE
, &bh
);
6372 myh
= (struct elf_link_hash_entry
*) bh
;
6373 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
6374 myh
->forced_local
= 1;
6376 s
->size
+= ARM_BX_VENEER_SIZE
;
6377 globals
->bx_glue_offset
[reg
] = globals
->bx_glue_size
| 2;
6378 globals
->bx_glue_size
+= ARM_BX_VENEER_SIZE
;
6382 /* Add an entry to the code/data map for section SEC. */
6385 elf32_arm_section_map_add (asection
*sec
, char type
, bfd_vma vma
)
6387 struct _arm_elf_section_data
*sec_data
= elf32_arm_section_data (sec
);
6388 unsigned int newidx
;
6390 if (sec_data
->map
== NULL
)
6392 sec_data
->map
= (elf32_arm_section_map
*)
6393 bfd_malloc (sizeof (elf32_arm_section_map
));
6394 sec_data
->mapcount
= 0;
6395 sec_data
->mapsize
= 1;
6398 newidx
= sec_data
->mapcount
++;
6400 if (sec_data
->mapcount
> sec_data
->mapsize
)
6402 sec_data
->mapsize
*= 2;
6403 sec_data
->map
= (elf32_arm_section_map
*)
6404 bfd_realloc_or_free (sec_data
->map
, sec_data
->mapsize
6405 * sizeof (elf32_arm_section_map
));
6410 sec_data
->map
[newidx
].vma
= vma
;
6411 sec_data
->map
[newidx
].type
= type
;
6416 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
6417 veneers are handled for now. */
6420 record_vfp11_erratum_veneer (struct bfd_link_info
*link_info
,
6421 elf32_vfp11_erratum_list
*branch
,
6423 asection
*branch_sec
,
6424 unsigned int offset
)
6427 struct elf32_arm_link_hash_table
*hash_table
;
6429 struct elf_link_hash_entry
*myh
;
6430 struct bfd_link_hash_entry
*bh
;
6432 struct _arm_elf_section_data
*sec_data
;
6433 elf32_vfp11_erratum_list
*newerr
;
6435 hash_table
= elf32_arm_hash_table (link_info
);
6436 BFD_ASSERT (hash_table
!= NULL
);
6437 BFD_ASSERT (hash_table
->bfd_of_glue_owner
!= NULL
);
6439 s
= bfd_get_linker_section
6440 (hash_table
->bfd_of_glue_owner
, VFP11_ERRATUM_VENEER_SECTION_NAME
);
6442 sec_data
= elf32_arm_section_data (s
);
6444 BFD_ASSERT (s
!= NULL
);
6446 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen
6447 (VFP11_ERRATUM_VENEER_ENTRY_NAME
) + 10);
6449 BFD_ASSERT (tmp_name
);
6451 sprintf (tmp_name
, VFP11_ERRATUM_VENEER_ENTRY_NAME
,
6452 hash_table
->num_vfp11_fixes
);
6454 myh
= elf_link_hash_lookup
6455 (&(hash_table
)->root
, tmp_name
, FALSE
, FALSE
, FALSE
);
6457 BFD_ASSERT (myh
== NULL
);
6460 val
= hash_table
->vfp11_erratum_glue_size
;
6461 _bfd_generic_link_add_one_symbol (link_info
, hash_table
->bfd_of_glue_owner
,
6462 tmp_name
, BSF_FUNCTION
| BSF_LOCAL
, s
, val
,
6463 NULL
, TRUE
, FALSE
, &bh
);
6465 myh
= (struct elf_link_hash_entry
*) bh
;
6466 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
6467 myh
->forced_local
= 1;
6469 /* Link veneer back to calling location. */
6470 sec_data
->erratumcount
+= 1;
6471 newerr
= (elf32_vfp11_erratum_list
*)
6472 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list
));
6474 newerr
->type
= VFP11_ERRATUM_ARM_VENEER
;
6476 newerr
->u
.v
.branch
= branch
;
6477 newerr
->u
.v
.id
= hash_table
->num_vfp11_fixes
;
6478 branch
->u
.b
.veneer
= newerr
;
6480 newerr
->next
= sec_data
->erratumlist
;
6481 sec_data
->erratumlist
= newerr
;
6483 /* A symbol for the return from the veneer. */
6484 sprintf (tmp_name
, VFP11_ERRATUM_VENEER_ENTRY_NAME
"_r",
6485 hash_table
->num_vfp11_fixes
);
6487 myh
= elf_link_hash_lookup
6488 (&(hash_table
)->root
, tmp_name
, FALSE
, FALSE
, FALSE
);
6495 _bfd_generic_link_add_one_symbol (link_info
, branch_bfd
, tmp_name
, BSF_LOCAL
,
6496 branch_sec
, val
, NULL
, TRUE
, FALSE
, &bh
);
6498 myh
= (struct elf_link_hash_entry
*) bh
;
6499 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
6500 myh
->forced_local
= 1;
6504 /* Generate a mapping symbol for the veneer section, and explicitly add an
6505 entry for that symbol to the code/data map for the section. */
6506 if (hash_table
->vfp11_erratum_glue_size
== 0)
6509 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
6510 ever requires this erratum fix. */
6511 _bfd_generic_link_add_one_symbol (link_info
,
6512 hash_table
->bfd_of_glue_owner
, "$a",
6513 BSF_LOCAL
, s
, 0, NULL
,
6516 myh
= (struct elf_link_hash_entry
*) bh
;
6517 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_NOTYPE
);
6518 myh
->forced_local
= 1;
6520 /* The elf32_arm_init_maps function only cares about symbols from input
6521 BFDs. We must make a note of this generated mapping symbol
6522 ourselves so that code byteswapping works properly in
6523 elf32_arm_write_section. */
6524 elf32_arm_section_map_add (s
, 'a', 0);
6527 s
->size
+= VFP11_ERRATUM_VENEER_SIZE
;
6528 hash_table
->vfp11_erratum_glue_size
+= VFP11_ERRATUM_VENEER_SIZE
;
6529 hash_table
->num_vfp11_fixes
++;
6531 /* The offset of the veneer. */
6535 /* Record information about a STM32L4XX STM erratum veneer. Only THUMB-mode
6536 veneers need to be handled because used only in Cortex-M. */
6539 record_stm32l4xx_erratum_veneer (struct bfd_link_info
*link_info
,
6540 elf32_stm32l4xx_erratum_list
*branch
,
6542 asection
*branch_sec
,
6543 unsigned int offset
,
6544 bfd_size_type veneer_size
)
6547 struct elf32_arm_link_hash_table
*hash_table
;
6549 struct elf_link_hash_entry
*myh
;
6550 struct bfd_link_hash_entry
*bh
;
6552 struct _arm_elf_section_data
*sec_data
;
6553 elf32_stm32l4xx_erratum_list
*newerr
;
6555 hash_table
= elf32_arm_hash_table (link_info
);
6556 BFD_ASSERT (hash_table
!= NULL
);
6557 BFD_ASSERT (hash_table
->bfd_of_glue_owner
!= NULL
);
6559 s
= bfd_get_linker_section
6560 (hash_table
->bfd_of_glue_owner
, STM32L4XX_ERRATUM_VENEER_SECTION_NAME
);
6562 BFD_ASSERT (s
!= NULL
);
6564 sec_data
= elf32_arm_section_data (s
);
6566 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen
6567 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME
) + 10);
6569 BFD_ASSERT (tmp_name
);
6571 sprintf (tmp_name
, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME
,
6572 hash_table
->num_stm32l4xx_fixes
);
6574 myh
= elf_link_hash_lookup
6575 (&(hash_table
)->root
, tmp_name
, FALSE
, FALSE
, FALSE
);
6577 BFD_ASSERT (myh
== NULL
);
6580 val
= hash_table
->stm32l4xx_erratum_glue_size
;
6581 _bfd_generic_link_add_one_symbol (link_info
, hash_table
->bfd_of_glue_owner
,
6582 tmp_name
, BSF_FUNCTION
| BSF_LOCAL
, s
, val
,
6583 NULL
, TRUE
, FALSE
, &bh
);
6585 myh
= (struct elf_link_hash_entry
*) bh
;
6586 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
6587 myh
->forced_local
= 1;
6589 /* Link veneer back to calling location. */
6590 sec_data
->stm32l4xx_erratumcount
+= 1;
6591 newerr
= (elf32_stm32l4xx_erratum_list
*)
6592 bfd_zmalloc (sizeof (elf32_stm32l4xx_erratum_list
));
6594 newerr
->type
= STM32L4XX_ERRATUM_VENEER
;
6596 newerr
->u
.v
.branch
= branch
;
6597 newerr
->u
.v
.id
= hash_table
->num_stm32l4xx_fixes
;
6598 branch
->u
.b
.veneer
= newerr
;
6600 newerr
->next
= sec_data
->stm32l4xx_erratumlist
;
6601 sec_data
->stm32l4xx_erratumlist
= newerr
;
6603 /* A symbol for the return from the veneer. */
6604 sprintf (tmp_name
, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME
"_r",
6605 hash_table
->num_stm32l4xx_fixes
);
6607 myh
= elf_link_hash_lookup
6608 (&(hash_table
)->root
, tmp_name
, FALSE
, FALSE
, FALSE
);
6615 _bfd_generic_link_add_one_symbol (link_info
, branch_bfd
, tmp_name
, BSF_LOCAL
,
6616 branch_sec
, val
, NULL
, TRUE
, FALSE
, &bh
);
6618 myh
= (struct elf_link_hash_entry
*) bh
;
6619 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
6620 myh
->forced_local
= 1;
6624 /* Generate a mapping symbol for the veneer section, and explicitly add an
6625 entry for that symbol to the code/data map for the section. */
6626 if (hash_table
->stm32l4xx_erratum_glue_size
== 0)
6629 /* Creates a THUMB symbol since there is no other choice. */
6630 _bfd_generic_link_add_one_symbol (link_info
,
6631 hash_table
->bfd_of_glue_owner
, "$t",
6632 BSF_LOCAL
, s
, 0, NULL
,
6635 myh
= (struct elf_link_hash_entry
*) bh
;
6636 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_NOTYPE
);
6637 myh
->forced_local
= 1;
6639 /* The elf32_arm_init_maps function only cares about symbols from input
6640 BFDs. We must make a note of this generated mapping symbol
6641 ourselves so that code byteswapping works properly in
6642 elf32_arm_write_section. */
6643 elf32_arm_section_map_add (s
, 't', 0);
6646 s
->size
+= veneer_size
;
6647 hash_table
->stm32l4xx_erratum_glue_size
+= veneer_size
;
6648 hash_table
->num_stm32l4xx_fixes
++;
6650 /* The offset of the veneer. */
6654 #define ARM_GLUE_SECTION_FLAGS \
6655 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
6656 | SEC_READONLY | SEC_LINKER_CREATED)
6658 /* Create a fake section for use by the ARM backend of the linker. */
6661 arm_make_glue_section (bfd
* abfd
, const char * name
)
6665 sec
= bfd_get_linker_section (abfd
, name
);
6670 sec
= bfd_make_section_anyway_with_flags (abfd
, name
, ARM_GLUE_SECTION_FLAGS
);
6673 || !bfd_set_section_alignment (abfd
, sec
, 2))
6676 /* Set the gc mark to prevent the section from being removed by garbage
6677 collection, despite the fact that no relocs refer to this section. */
6683 /* Set size of .plt entries. This function is called from the
6684 linker scripts in ld/emultempl/{armelf}.em. */
6687 bfd_elf32_arm_use_long_plt (void)
6689 elf32_arm_use_long_plt_entry
= TRUE
;
6692 /* Add the glue sections to ABFD. This function is called from the
6693 linker scripts in ld/emultempl/{armelf}.em. */
6696 bfd_elf32_arm_add_glue_sections_to_bfd (bfd
*abfd
,
6697 struct bfd_link_info
*info
)
6699 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (info
);
6700 bfd_boolean dostm32l4xx
= globals
6701 && globals
->stm32l4xx_fix
!= BFD_ARM_STM32L4XX_FIX_NONE
;
6702 bfd_boolean addglue
;
6704 /* If we are only performing a partial
6705 link do not bother adding the glue. */
6706 if (bfd_link_relocatable (info
))
6709 addglue
= arm_make_glue_section (abfd
, ARM2THUMB_GLUE_SECTION_NAME
)
6710 && arm_make_glue_section (abfd
, THUMB2ARM_GLUE_SECTION_NAME
)
6711 && arm_make_glue_section (abfd
, VFP11_ERRATUM_VENEER_SECTION_NAME
)
6712 && arm_make_glue_section (abfd
, ARM_BX_GLUE_SECTION_NAME
);
6718 && arm_make_glue_section (abfd
, STM32L4XX_ERRATUM_VENEER_SECTION_NAME
);
6721 /* Mark output sections of veneers needing a dedicated one with SEC_KEEP. This
6722 ensures they are not marked for deletion by
6723 strip_excluded_output_sections () when veneers are going to be created
6724 later. Not doing so would trigger assert on empty section size in
6725 lang_size_sections_1 (). */
6728 bfd_elf32_arm_keep_private_stub_output_sections (struct bfd_link_info
*info
)
6730 enum elf32_arm_stub_type stub_type
;
6732 /* If we are only performing a partial
6733 link do not bother adding the glue. */
6734 if (bfd_link_relocatable (info
))
6737 for (stub_type
= arm_stub_none
+ 1; stub_type
< max_stub_type
; stub_type
++)
6740 const char *out_sec_name
;
6742 if (!arm_dedicated_stub_output_section_required (stub_type
))
6745 out_sec_name
= arm_dedicated_stub_output_section_name (stub_type
);
6746 out_sec
= bfd_get_section_by_name (info
->output_bfd
, out_sec_name
);
6747 if (out_sec
!= NULL
)
6748 out_sec
->flags
|= SEC_KEEP
;
6752 /* Select a BFD to be used to hold the sections used by the glue code.
6753 This function is called from the linker scripts in ld/emultempl/
6757 bfd_elf32_arm_get_bfd_for_interworking (bfd
*abfd
, struct bfd_link_info
*info
)
6759 struct elf32_arm_link_hash_table
*globals
;
6761 /* If we are only performing a partial link
6762 do not bother getting a bfd to hold the glue. */
6763 if (bfd_link_relocatable (info
))
6766 /* Make sure we don't attach the glue sections to a dynamic object. */
6767 BFD_ASSERT (!(abfd
->flags
& DYNAMIC
));
6769 globals
= elf32_arm_hash_table (info
);
6770 BFD_ASSERT (globals
!= NULL
);
6772 if (globals
->bfd_of_glue_owner
!= NULL
)
6775 /* Save the bfd for later use. */
6776 globals
->bfd_of_glue_owner
= abfd
;
6782 check_use_blx (struct elf32_arm_link_hash_table
*globals
)
6786 cpu_arch
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
,
6789 if (globals
->fix_arm1176
)
6791 if (cpu_arch
== TAG_CPU_ARCH_V6T2
|| cpu_arch
> TAG_CPU_ARCH_V6K
)
6792 globals
->use_blx
= 1;
6796 if (cpu_arch
> TAG_CPU_ARCH_V4T
)
6797 globals
->use_blx
= 1;
6802 bfd_elf32_arm_process_before_allocation (bfd
*abfd
,
6803 struct bfd_link_info
*link_info
)
6805 Elf_Internal_Shdr
*symtab_hdr
;
6806 Elf_Internal_Rela
*internal_relocs
= NULL
;
6807 Elf_Internal_Rela
*irel
, *irelend
;
6808 bfd_byte
*contents
= NULL
;
6811 struct elf32_arm_link_hash_table
*globals
;
6813 /* If we are only performing a partial link do not bother
6814 to construct any glue. */
6815 if (bfd_link_relocatable (link_info
))
6818 /* Here we have a bfd that is to be included on the link. We have a
6819 hook to do reloc rummaging, before section sizes are nailed down. */
6820 globals
= elf32_arm_hash_table (link_info
);
6821 BFD_ASSERT (globals
!= NULL
);
6823 check_use_blx (globals
);
6825 if (globals
->byteswap_code
&& !bfd_big_endian (abfd
))
6827 _bfd_error_handler (_("%B: BE8 images only valid in big-endian mode."),
6832 /* PR 5398: If we have not decided to include any loadable sections in
6833 the output then we will not have a glue owner bfd. This is OK, it
6834 just means that there is nothing else for us to do here. */
6835 if (globals
->bfd_of_glue_owner
== NULL
)
6838 /* Rummage around all the relocs and map the glue vectors. */
6839 sec
= abfd
->sections
;
6844 for (; sec
!= NULL
; sec
= sec
->next
)
6846 if (sec
->reloc_count
== 0)
6849 if ((sec
->flags
& SEC_EXCLUDE
) != 0)
6852 symtab_hdr
= & elf_symtab_hdr (abfd
);
6854 /* Load the relocs. */
6856 = _bfd_elf_link_read_relocs (abfd
, sec
, NULL
, NULL
, FALSE
);
6858 if (internal_relocs
== NULL
)
6861 irelend
= internal_relocs
+ sec
->reloc_count
;
6862 for (irel
= internal_relocs
; irel
< irelend
; irel
++)
6865 unsigned long r_index
;
6867 struct elf_link_hash_entry
*h
;
6869 r_type
= ELF32_R_TYPE (irel
->r_info
);
6870 r_index
= ELF32_R_SYM (irel
->r_info
);
6872 /* These are the only relocation types we care about. */
6873 if ( r_type
!= R_ARM_PC24
6874 && (r_type
!= R_ARM_V4BX
|| globals
->fix_v4bx
< 2))
6877 /* Get the section contents if we haven't done so already. */
6878 if (contents
== NULL
)
6880 /* Get cached copy if it exists. */
6881 if (elf_section_data (sec
)->this_hdr
.contents
!= NULL
)
6882 contents
= elf_section_data (sec
)->this_hdr
.contents
;
6885 /* Go get them off disk. */
6886 if (! bfd_malloc_and_get_section (abfd
, sec
, &contents
))
6891 if (r_type
== R_ARM_V4BX
)
6895 reg
= bfd_get_32 (abfd
, contents
+ irel
->r_offset
) & 0xf;
6896 record_arm_bx_glue (link_info
, reg
);
6900 /* If the relocation is not against a symbol it cannot concern us. */
6903 /* We don't care about local symbols. */
6904 if (r_index
< symtab_hdr
->sh_info
)
6907 /* This is an external symbol. */
6908 r_index
-= symtab_hdr
->sh_info
;
6909 h
= (struct elf_link_hash_entry
*)
6910 elf_sym_hashes (abfd
)[r_index
];
6912 /* If the relocation is against a static symbol it must be within
6913 the current section and so cannot be a cross ARM/Thumb relocation. */
6917 /* If the call will go through a PLT entry then we do not need
6919 if (globals
->root
.splt
!= NULL
&& h
->plt
.offset
!= (bfd_vma
) -1)
6925 /* This one is a call from arm code. We need to look up
6926 the target of the call. If it is a thumb target, we
6928 if (ARM_GET_SYM_BRANCH_TYPE (h
->target_internal
)
6929 == ST_BRANCH_TO_THUMB
)
6930 record_arm_to_thumb_glue (link_info
, h
);
6938 if (contents
!= NULL
6939 && elf_section_data (sec
)->this_hdr
.contents
!= contents
)
6943 if (internal_relocs
!= NULL
6944 && elf_section_data (sec
)->relocs
!= internal_relocs
)
6945 free (internal_relocs
);
6946 internal_relocs
= NULL
;
6952 if (contents
!= NULL
6953 && elf_section_data (sec
)->this_hdr
.contents
!= contents
)
6955 if (internal_relocs
!= NULL
6956 && elf_section_data (sec
)->relocs
!= internal_relocs
)
6957 free (internal_relocs
);
6964 /* Initialise maps of ARM/Thumb/data for input BFDs. */
6967 bfd_elf32_arm_init_maps (bfd
*abfd
)
6969 Elf_Internal_Sym
*isymbuf
;
6970 Elf_Internal_Shdr
*hdr
;
6971 unsigned int i
, localsyms
;
6973 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
6974 if (! is_arm_elf (abfd
))
6977 if ((abfd
->flags
& DYNAMIC
) != 0)
6980 hdr
= & elf_symtab_hdr (abfd
);
6981 localsyms
= hdr
->sh_info
;
6983 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
6984 should contain the number of local symbols, which should come before any
6985 global symbols. Mapping symbols are always local. */
6986 isymbuf
= bfd_elf_get_elf_syms (abfd
, hdr
, localsyms
, 0, NULL
, NULL
,
6989 /* No internal symbols read? Skip this BFD. */
6990 if (isymbuf
== NULL
)
6993 for (i
= 0; i
< localsyms
; i
++)
6995 Elf_Internal_Sym
*isym
= &isymbuf
[i
];
6996 asection
*sec
= bfd_section_from_elf_index (abfd
, isym
->st_shndx
);
7000 && ELF_ST_BIND (isym
->st_info
) == STB_LOCAL
)
7002 name
= bfd_elf_string_from_elf_section (abfd
,
7003 hdr
->sh_link
, isym
->st_name
);
7005 if (bfd_is_arm_special_symbol_name (name
,
7006 BFD_ARM_SPECIAL_SYM_TYPE_MAP
))
7007 elf32_arm_section_map_add (sec
, name
[1], isym
->st_value
);
7013 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
7014 say what they wanted. */
7017 bfd_elf32_arm_set_cortex_a8_fix (bfd
*obfd
, struct bfd_link_info
*link_info
)
7019 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (link_info
);
7020 obj_attribute
*out_attr
= elf_known_obj_attributes_proc (obfd
);
7022 if (globals
== NULL
)
7025 if (globals
->fix_cortex_a8
== -1)
7027 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
7028 if (out_attr
[Tag_CPU_arch
].i
== TAG_CPU_ARCH_V7
7029 && (out_attr
[Tag_CPU_arch_profile
].i
== 'A'
7030 || out_attr
[Tag_CPU_arch_profile
].i
== 0))
7031 globals
->fix_cortex_a8
= 1;
7033 globals
->fix_cortex_a8
= 0;
7039 bfd_elf32_arm_set_vfp11_fix (bfd
*obfd
, struct bfd_link_info
*link_info
)
7041 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (link_info
);
7042 obj_attribute
*out_attr
= elf_known_obj_attributes_proc (obfd
);
7044 if (globals
== NULL
)
7046 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
7047 if (out_attr
[Tag_CPU_arch
].i
>= TAG_CPU_ARCH_V7
)
7049 switch (globals
->vfp11_fix
)
7051 case BFD_ARM_VFP11_FIX_DEFAULT
:
7052 case BFD_ARM_VFP11_FIX_NONE
:
7053 globals
->vfp11_fix
= BFD_ARM_VFP11_FIX_NONE
;
7057 /* Give a warning, but do as the user requests anyway. */
7058 (*_bfd_error_handler
) (_("%B: warning: selected VFP11 erratum "
7059 "workaround is not necessary for target architecture"), obfd
);
7062 else if (globals
->vfp11_fix
== BFD_ARM_VFP11_FIX_DEFAULT
)
7063 /* For earlier architectures, we might need the workaround, but do not
7064 enable it by default. If users is running with broken hardware, they
7065 must enable the erratum fix explicitly. */
7066 globals
->vfp11_fix
= BFD_ARM_VFP11_FIX_NONE
;
7070 bfd_elf32_arm_set_stm32l4xx_fix (bfd
*obfd
, struct bfd_link_info
*link_info
)
7072 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (link_info
);
7073 obj_attribute
*out_attr
= elf_known_obj_attributes_proc (obfd
);
7075 if (globals
== NULL
)
7078 /* We assume only Cortex-M4 may require the fix. */
7079 if (out_attr
[Tag_CPU_arch
].i
!= TAG_CPU_ARCH_V7E_M
7080 || out_attr
[Tag_CPU_arch_profile
].i
!= 'M')
7082 if (globals
->stm32l4xx_fix
!= BFD_ARM_STM32L4XX_FIX_NONE
)
7083 /* Give a warning, but do as the user requests anyway. */
7084 (*_bfd_error_handler
)
7085 (_("%B: warning: selected STM32L4XX erratum "
7086 "workaround is not necessary for target architecture"), obfd
);
7090 enum bfd_arm_vfp11_pipe
7098 /* Return a VFP register number. This is encoded as RX:X for single-precision
7099 registers, or X:RX for double-precision registers, where RX is the group of
7100 four bits in the instruction encoding and X is the single extension bit.
7101 RX and X fields are specified using their lowest (starting) bit. The return
7104 0...31: single-precision registers s0...s31
7105 32...63: double-precision registers d0...d31.
7107 Although X should be zero for VFP11 (encoding d0...d15 only), we might
7108 encounter VFP3 instructions, so we allow the full range for DP registers. */
7111 bfd_arm_vfp11_regno (unsigned int insn
, bfd_boolean is_double
, unsigned int rx
,
7115 return (((insn
>> rx
) & 0xf) | (((insn
>> x
) & 1) << 4)) + 32;
7117 return (((insn
>> rx
) & 0xf) << 1) | ((insn
>> x
) & 1);
7120 /* Set bits in *WMASK according to a register number REG as encoded by
7121 bfd_arm_vfp11_regno(). Ignore d16-d31. */
7124 bfd_arm_vfp11_write_mask (unsigned int *wmask
, unsigned int reg
)
7129 *wmask
|= 3 << ((reg
- 32) * 2);
7132 /* Return TRUE if WMASK overwrites anything in REGS. */
7135 bfd_arm_vfp11_antidependency (unsigned int wmask
, int *regs
, int numregs
)
7139 for (i
= 0; i
< numregs
; i
++)
7141 unsigned int reg
= regs
[i
];
7143 if (reg
< 32 && (wmask
& (1 << reg
)) != 0)
7151 if ((wmask
& (3 << (reg
* 2))) != 0)
7158 /* In this function, we're interested in two things: finding input registers
7159 for VFP data-processing instructions, and finding the set of registers which
7160 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
7161 hold the written set, so FLDM etc. are easy to deal with (we're only
7162 interested in 32 SP registers or 16 dp registers, due to the VFP version
7163 implemented by the chip in question). DP registers are marked by setting
7164 both SP registers in the write mask). */
7166 static enum bfd_arm_vfp11_pipe
7167 bfd_arm_vfp11_insn_decode (unsigned int insn
, unsigned int *destmask
, int *regs
,
7170 enum bfd_arm_vfp11_pipe vpipe
= VFP11_BAD
;
7171 bfd_boolean is_double
= ((insn
& 0xf00) == 0xb00) ? 1 : 0;
7173 if ((insn
& 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
7176 unsigned int fd
= bfd_arm_vfp11_regno (insn
, is_double
, 12, 22);
7177 unsigned int fm
= bfd_arm_vfp11_regno (insn
, is_double
, 0, 5);
7179 pqrs
= ((insn
& 0x00800000) >> 20)
7180 | ((insn
& 0x00300000) >> 19)
7181 | ((insn
& 0x00000040) >> 6);
7185 case 0: /* fmac[sd]. */
7186 case 1: /* fnmac[sd]. */
7187 case 2: /* fmsc[sd]. */
7188 case 3: /* fnmsc[sd]. */
7190 bfd_arm_vfp11_write_mask (destmask
, fd
);
7192 regs
[1] = bfd_arm_vfp11_regno (insn
, is_double
, 16, 7); /* Fn. */
7197 case 4: /* fmul[sd]. */
7198 case 5: /* fnmul[sd]. */
7199 case 6: /* fadd[sd]. */
7200 case 7: /* fsub[sd]. */
7204 case 8: /* fdiv[sd]. */
7207 bfd_arm_vfp11_write_mask (destmask
, fd
);
7208 regs
[0] = bfd_arm_vfp11_regno (insn
, is_double
, 16, 7); /* Fn. */
7213 case 15: /* extended opcode. */
7215 unsigned int extn
= ((insn
>> 15) & 0x1e)
7216 | ((insn
>> 7) & 1);
7220 case 0: /* fcpy[sd]. */
7221 case 1: /* fabs[sd]. */
7222 case 2: /* fneg[sd]. */
7223 case 8: /* fcmp[sd]. */
7224 case 9: /* fcmpe[sd]. */
7225 case 10: /* fcmpz[sd]. */
7226 case 11: /* fcmpez[sd]. */
7227 case 16: /* fuito[sd]. */
7228 case 17: /* fsito[sd]. */
7229 case 24: /* ftoui[sd]. */
7230 case 25: /* ftouiz[sd]. */
7231 case 26: /* ftosi[sd]. */
7232 case 27: /* ftosiz[sd]. */
7233 /* These instructions will not bounce due to underflow. */
7238 case 3: /* fsqrt[sd]. */
7239 /* fsqrt cannot underflow, but it can (perhaps) overwrite
7240 registers to cause the erratum in previous instructions. */
7241 bfd_arm_vfp11_write_mask (destmask
, fd
);
7245 case 15: /* fcvt{ds,sd}. */
7249 bfd_arm_vfp11_write_mask (destmask
, fd
);
7251 /* Only FCVTSD can underflow. */
7252 if ((insn
& 0x100) != 0)
7271 /* Two-register transfer. */
7272 else if ((insn
& 0x0fe00ed0) == 0x0c400a10)
7274 unsigned int fm
= bfd_arm_vfp11_regno (insn
, is_double
, 0, 5);
7276 if ((insn
& 0x100000) == 0)
7279 bfd_arm_vfp11_write_mask (destmask
, fm
);
7282 bfd_arm_vfp11_write_mask (destmask
, fm
);
7283 bfd_arm_vfp11_write_mask (destmask
, fm
+ 1);
7289 else if ((insn
& 0x0e100e00) == 0x0c100a00) /* A load insn. */
7291 int fd
= bfd_arm_vfp11_regno (insn
, is_double
, 12, 22);
7292 unsigned int puw
= ((insn
>> 21) & 0x1) | (((insn
>> 23) & 3) << 1);
7296 case 0: /* Two-reg transfer. We should catch these above. */
7299 case 2: /* fldm[sdx]. */
7303 unsigned int i
, offset
= insn
& 0xff;
7308 for (i
= fd
; i
< fd
+ offset
; i
++)
7309 bfd_arm_vfp11_write_mask (destmask
, i
);
7313 case 4: /* fld[sd]. */
7315 bfd_arm_vfp11_write_mask (destmask
, fd
);
7324 /* Single-register transfer. Note L==0. */
7325 else if ((insn
& 0x0f100e10) == 0x0e000a10)
7327 unsigned int opcode
= (insn
>> 21) & 7;
7328 unsigned int fn
= bfd_arm_vfp11_regno (insn
, is_double
, 16, 7);
7332 case 0: /* fmsr/fmdlr. */
7333 case 1: /* fmdhr. */
7334 /* Mark fmdhr and fmdlr as writing to the whole of the DP
7335 destination register. I don't know if this is exactly right,
7336 but it is the conservative choice. */
7337 bfd_arm_vfp11_write_mask (destmask
, fn
);
7351 static int elf32_arm_compare_mapping (const void * a
, const void * b
);
7354 /* Look for potentially-troublesome code sequences which might trigger the
7355 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
7356 (available from ARM) for details of the erratum. A short version is
7357 described in ld.texinfo. */
7360 bfd_elf32_arm_vfp11_erratum_scan (bfd
*abfd
, struct bfd_link_info
*link_info
)
7363 bfd_byte
*contents
= NULL
;
7365 int regs
[3], numregs
= 0;
7366 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (link_info
);
7367 int use_vector
= (globals
->vfp11_fix
== BFD_ARM_VFP11_FIX_VECTOR
);
7369 if (globals
== NULL
)
7372 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
7373 The states transition as follows:
7375 0 -> 1 (vector) or 0 -> 2 (scalar)
7376 A VFP FMAC-pipeline instruction has been seen. Fill
7377 regs[0]..regs[numregs-1] with its input operands. Remember this
7378 instruction in 'first_fmac'.
7381 Any instruction, except for a VFP instruction which overwrites
7386 A VFP instruction has been seen which overwrites any of regs[*].
7387 We must make a veneer! Reset state to 0 before examining next
7391 If we fail to match anything in state 2, reset to state 0 and reset
7392 the instruction pointer to the instruction after 'first_fmac'.
7394 If the VFP11 vector mode is in use, there must be at least two unrelated
7395 instructions between anti-dependent VFP11 instructions to properly avoid
7396 triggering the erratum, hence the use of the extra state 1. */
7398 /* If we are only performing a partial link do not bother
7399 to construct any glue. */
7400 if (bfd_link_relocatable (link_info
))
7403 /* Skip if this bfd does not correspond to an ELF image. */
7404 if (! is_arm_elf (abfd
))
7407 /* We should have chosen a fix type by the time we get here. */
7408 BFD_ASSERT (globals
->vfp11_fix
!= BFD_ARM_VFP11_FIX_DEFAULT
);
7410 if (globals
->vfp11_fix
== BFD_ARM_VFP11_FIX_NONE
)
7413 /* Skip this BFD if it corresponds to an executable or dynamic object. */
7414 if ((abfd
->flags
& (EXEC_P
| DYNAMIC
)) != 0)
7417 for (sec
= abfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
7419 unsigned int i
, span
, first_fmac
= 0, veneer_of_insn
= 0;
7420 struct _arm_elf_section_data
*sec_data
;
7422 /* If we don't have executable progbits, we're not interested in this
7423 section. Also skip if section is to be excluded. */
7424 if (elf_section_type (sec
) != SHT_PROGBITS
7425 || (elf_section_flags (sec
) & SHF_EXECINSTR
) == 0
7426 || (sec
->flags
& SEC_EXCLUDE
) != 0
7427 || sec
->sec_info_type
== SEC_INFO_TYPE_JUST_SYMS
7428 || sec
->output_section
== bfd_abs_section_ptr
7429 || strcmp (sec
->name
, VFP11_ERRATUM_VENEER_SECTION_NAME
) == 0)
7432 sec_data
= elf32_arm_section_data (sec
);
7434 if (sec_data
->mapcount
== 0)
7437 if (elf_section_data (sec
)->this_hdr
.contents
!= NULL
)
7438 contents
= elf_section_data (sec
)->this_hdr
.contents
;
7439 else if (! bfd_malloc_and_get_section (abfd
, sec
, &contents
))
7442 qsort (sec_data
->map
, sec_data
->mapcount
, sizeof (elf32_arm_section_map
),
7443 elf32_arm_compare_mapping
);
7445 for (span
= 0; span
< sec_data
->mapcount
; span
++)
7447 unsigned int span_start
= sec_data
->map
[span
].vma
;
7448 unsigned int span_end
= (span
== sec_data
->mapcount
- 1)
7449 ? sec
->size
: sec_data
->map
[span
+ 1].vma
;
7450 char span_type
= sec_data
->map
[span
].type
;
7452 /* FIXME: Only ARM mode is supported at present. We may need to
7453 support Thumb-2 mode also at some point. */
7454 if (span_type
!= 'a')
7457 for (i
= span_start
; i
< span_end
;)
7459 unsigned int next_i
= i
+ 4;
7460 unsigned int insn
= bfd_big_endian (abfd
)
7461 ? (contents
[i
] << 24)
7462 | (contents
[i
+ 1] << 16)
7463 | (contents
[i
+ 2] << 8)
7465 : (contents
[i
+ 3] << 24)
7466 | (contents
[i
+ 2] << 16)
7467 | (contents
[i
+ 1] << 8)
7469 unsigned int writemask
= 0;
7470 enum bfd_arm_vfp11_pipe vpipe
;
7475 vpipe
= bfd_arm_vfp11_insn_decode (insn
, &writemask
, regs
,
7477 /* I'm assuming the VFP11 erratum can trigger with denorm
7478 operands on either the FMAC or the DS pipeline. This might
7479 lead to slightly overenthusiastic veneer insertion. */
7480 if (vpipe
== VFP11_FMAC
|| vpipe
== VFP11_DS
)
7482 state
= use_vector
? 1 : 2;
7484 veneer_of_insn
= insn
;
7490 int other_regs
[3], other_numregs
;
7491 vpipe
= bfd_arm_vfp11_insn_decode (insn
, &writemask
,
7494 if (vpipe
!= VFP11_BAD
7495 && bfd_arm_vfp11_antidependency (writemask
, regs
,
7505 int other_regs
[3], other_numregs
;
7506 vpipe
= bfd_arm_vfp11_insn_decode (insn
, &writemask
,
7509 if (vpipe
!= VFP11_BAD
7510 && bfd_arm_vfp11_antidependency (writemask
, regs
,
7516 next_i
= first_fmac
+ 4;
7522 abort (); /* Should be unreachable. */
7527 elf32_vfp11_erratum_list
*newerr
=(elf32_vfp11_erratum_list
*)
7528 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list
));
7530 elf32_arm_section_data (sec
)->erratumcount
+= 1;
7532 newerr
->u
.b
.vfp_insn
= veneer_of_insn
;
7537 newerr
->type
= VFP11_ERRATUM_BRANCH_TO_ARM_VENEER
;
7544 record_vfp11_erratum_veneer (link_info
, newerr
, abfd
, sec
,
7549 newerr
->next
= sec_data
->erratumlist
;
7550 sec_data
->erratumlist
= newerr
;
7559 if (contents
!= NULL
7560 && elf_section_data (sec
)->this_hdr
.contents
!= contents
)
7568 if (contents
!= NULL
7569 && elf_section_data (sec
)->this_hdr
.contents
!= contents
)
7575 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
7576 after sections have been laid out, using specially-named symbols. */
7579 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd
*abfd
,
7580 struct bfd_link_info
*link_info
)
7583 struct elf32_arm_link_hash_table
*globals
;
7586 if (bfd_link_relocatable (link_info
))
7589 /* Skip if this bfd does not correspond to an ELF image. */
7590 if (! is_arm_elf (abfd
))
7593 globals
= elf32_arm_hash_table (link_info
);
7594 if (globals
== NULL
)
7597 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen
7598 (VFP11_ERRATUM_VENEER_ENTRY_NAME
) + 10);
7600 for (sec
= abfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
7602 struct _arm_elf_section_data
*sec_data
= elf32_arm_section_data (sec
);
7603 elf32_vfp11_erratum_list
*errnode
= sec_data
->erratumlist
;
7605 for (; errnode
!= NULL
; errnode
= errnode
->next
)
7607 struct elf_link_hash_entry
*myh
;
7610 switch (errnode
->type
)
7612 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER
:
7613 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER
:
7614 /* Find veneer symbol. */
7615 sprintf (tmp_name
, VFP11_ERRATUM_VENEER_ENTRY_NAME
,
7616 errnode
->u
.b
.veneer
->u
.v
.id
);
7618 myh
= elf_link_hash_lookup
7619 (&(globals
)->root
, tmp_name
, FALSE
, FALSE
, TRUE
);
7622 (*_bfd_error_handler
) (_("%B: unable to find VFP11 veneer "
7623 "`%s'"), abfd
, tmp_name
);
7625 vma
= myh
->root
.u
.def
.section
->output_section
->vma
7626 + myh
->root
.u
.def
.section
->output_offset
7627 + myh
->root
.u
.def
.value
;
7629 errnode
->u
.b
.veneer
->vma
= vma
;
7632 case VFP11_ERRATUM_ARM_VENEER
:
7633 case VFP11_ERRATUM_THUMB_VENEER
:
7634 /* Find return location. */
7635 sprintf (tmp_name
, VFP11_ERRATUM_VENEER_ENTRY_NAME
"_r",
7638 myh
= elf_link_hash_lookup
7639 (&(globals
)->root
, tmp_name
, FALSE
, FALSE
, TRUE
);
7642 (*_bfd_error_handler
) (_("%B: unable to find VFP11 veneer "
7643 "`%s'"), abfd
, tmp_name
);
7645 vma
= myh
->root
.u
.def
.section
->output_section
->vma
7646 + myh
->root
.u
.def
.section
->output_offset
7647 + myh
->root
.u
.def
.value
;
7649 errnode
->u
.v
.branch
->vma
= vma
;
7661 /* Find virtual-memory addresses for STM32L4XX erratum veneers and
7662 return locations after sections have been laid out, using
7663 specially-named symbols. */
7666 bfd_elf32_arm_stm32l4xx_fix_veneer_locations (bfd
*abfd
,
7667 struct bfd_link_info
*link_info
)
7670 struct elf32_arm_link_hash_table
*globals
;
7673 if (bfd_link_relocatable (link_info
))
7676 /* Skip if this bfd does not correspond to an ELF image. */
7677 if (! is_arm_elf (abfd
))
7680 globals
= elf32_arm_hash_table (link_info
);
7681 if (globals
== NULL
)
7684 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen
7685 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME
) + 10);
7687 for (sec
= abfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
7689 struct _arm_elf_section_data
*sec_data
= elf32_arm_section_data (sec
);
7690 elf32_stm32l4xx_erratum_list
*errnode
= sec_data
->stm32l4xx_erratumlist
;
7692 for (; errnode
!= NULL
; errnode
= errnode
->next
)
7694 struct elf_link_hash_entry
*myh
;
7697 switch (errnode
->type
)
7699 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER
:
7700 /* Find veneer symbol. */
7701 sprintf (tmp_name
, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME
,
7702 errnode
->u
.b
.veneer
->u
.v
.id
);
7704 myh
= elf_link_hash_lookup
7705 (&(globals
)->root
, tmp_name
, FALSE
, FALSE
, TRUE
);
7708 (*_bfd_error_handler
) (_("%B: unable to find STM32L4XX veneer "
7709 "`%s'"), abfd
, tmp_name
);
7711 vma
= myh
->root
.u
.def
.section
->output_section
->vma
7712 + myh
->root
.u
.def
.section
->output_offset
7713 + myh
->root
.u
.def
.value
;
7715 errnode
->u
.b
.veneer
->vma
= vma
;
7718 case STM32L4XX_ERRATUM_VENEER
:
7719 /* Find return location. */
7720 sprintf (tmp_name
, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME
"_r",
7723 myh
= elf_link_hash_lookup
7724 (&(globals
)->root
, tmp_name
, FALSE
, FALSE
, TRUE
);
7727 (*_bfd_error_handler
) (_("%B: unable to find STM32L4XX veneer "
7728 "`%s'"), abfd
, tmp_name
);
7730 vma
= myh
->root
.u
.def
.section
->output_section
->vma
7731 + myh
->root
.u
.def
.section
->output_offset
7732 + myh
->root
.u
.def
.value
;
7734 errnode
->u
.v
.branch
->vma
= vma
;
7746 static inline bfd_boolean
7747 is_thumb2_ldmia (const insn32 insn
)
7749 /* Encoding T2: LDM<c>.W <Rn>{!},<registers>
7750 1110 - 1000 - 10W1 - rrrr - PM (0) l - llll - llll - llll. */
7751 return (insn
& 0xffd02000) == 0xe8900000;
7754 static inline bfd_boolean
7755 is_thumb2_ldmdb (const insn32 insn
)
7757 /* Encoding T1: LDMDB<c> <Rn>{!},<registers>
7758 1110 - 1001 - 00W1 - rrrr - PM (0) l - llll - llll - llll. */
7759 return (insn
& 0xffd02000) == 0xe9100000;
7762 static inline bfd_boolean
7763 is_thumb2_vldm (const insn32 insn
)
7765 /* A6.5 Extension register load or store instruction
7767 We look for SP 32-bit and DP 64-bit registers.
7768 Encoding T1 VLDM{mode}<c> <Rn>{!}, <list>
7769 <list> is consecutive 64-bit registers
7770 1110 - 110P - UDW1 - rrrr - vvvv - 1011 - iiii - iiii
7771 Encoding T2 VLDM{mode}<c> <Rn>{!}, <list>
7772 <list> is consecutive 32-bit registers
7773 1110 - 110P - UDW1 - rrrr - vvvv - 1010 - iiii - iiii
7774 if P==0 && U==1 && W==1 && Rn=1101 VPOP
7775 if PUW=010 || PUW=011 || PUW=101 VLDM. */
7777 (((insn
& 0xfe100f00) == 0xec100b00) ||
7778 ((insn
& 0xfe100f00) == 0xec100a00))
7779 && /* (IA without !). */
7780 (((((insn
<< 7) >> 28) & 0xd) == 0x4)
7781 /* (IA with !), includes VPOP (when reg number is SP). */
7782 || ((((insn
<< 7) >> 28) & 0xd) == 0x5)
7784 || ((((insn
<< 7) >> 28) & 0xd) == 0x9));
7787 /* STM STM32L4XX erratum : This function assumes that it receives an LDM or
7789 - computes the number and the mode of memory accesses
7790 - decides if the replacement should be done:
7791 . replaces only if > 8-word accesses
7792 . or (testing purposes only) replaces all accesses. */
7795 stm32l4xx_need_create_replacing_stub (const insn32 insn
,
7796 bfd_arm_stm32l4xx_fix stm32l4xx_fix
)
7800 /* The field encoding the register list is the same for both LDMIA
7801 and LDMDB encodings. */
7802 if (is_thumb2_ldmia (insn
) || is_thumb2_ldmdb (insn
))
7803 nb_words
= popcount (insn
& 0x0000ffff);
7804 else if (is_thumb2_vldm (insn
))
7805 nb_words
= (insn
& 0xff);
7807 /* DEFAULT mode accounts for the real bug condition situation,
7808 ALL mode inserts stubs for each LDM/VLDM instruction (testing). */
7810 (stm32l4xx_fix
== BFD_ARM_STM32L4XX_FIX_DEFAULT
) ? nb_words
> 8 :
7811 (stm32l4xx_fix
== BFD_ARM_STM32L4XX_FIX_ALL
) ? TRUE
: FALSE
;
7814 /* Look for potentially-troublesome code sequences which might trigger
7815 the STM STM32L4XX erratum. */
7818 bfd_elf32_arm_stm32l4xx_erratum_scan (bfd
*abfd
,
7819 struct bfd_link_info
*link_info
)
7822 bfd_byte
*contents
= NULL
;
7823 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (link_info
);
7825 if (globals
== NULL
)
7828 /* If we are only performing a partial link do not bother
7829 to construct any glue. */
7830 if (bfd_link_relocatable (link_info
))
7833 /* Skip if this bfd does not correspond to an ELF image. */
7834 if (! is_arm_elf (abfd
))
7837 if (globals
->stm32l4xx_fix
== BFD_ARM_STM32L4XX_FIX_NONE
)
7840 /* Skip this BFD if it corresponds to an executable or dynamic object. */
7841 if ((abfd
->flags
& (EXEC_P
| DYNAMIC
)) != 0)
7844 for (sec
= abfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
7846 unsigned int i
, span
;
7847 struct _arm_elf_section_data
*sec_data
;
7849 /* If we don't have executable progbits, we're not interested in this
7850 section. Also skip if section is to be excluded. */
7851 if (elf_section_type (sec
) != SHT_PROGBITS
7852 || (elf_section_flags (sec
) & SHF_EXECINSTR
) == 0
7853 || (sec
->flags
& SEC_EXCLUDE
) != 0
7854 || sec
->sec_info_type
== SEC_INFO_TYPE_JUST_SYMS
7855 || sec
->output_section
== bfd_abs_section_ptr
7856 || strcmp (sec
->name
, STM32L4XX_ERRATUM_VENEER_SECTION_NAME
) == 0)
7859 sec_data
= elf32_arm_section_data (sec
);
7861 if (sec_data
->mapcount
== 0)
7864 if (elf_section_data (sec
)->this_hdr
.contents
!= NULL
)
7865 contents
= elf_section_data (sec
)->this_hdr
.contents
;
7866 else if (! bfd_malloc_and_get_section (abfd
, sec
, &contents
))
7869 qsort (sec_data
->map
, sec_data
->mapcount
, sizeof (elf32_arm_section_map
),
7870 elf32_arm_compare_mapping
);
7872 for (span
= 0; span
< sec_data
->mapcount
; span
++)
7874 unsigned int span_start
= sec_data
->map
[span
].vma
;
7875 unsigned int span_end
= (span
== sec_data
->mapcount
- 1)
7876 ? sec
->size
: sec_data
->map
[span
+ 1].vma
;
7877 char span_type
= sec_data
->map
[span
].type
;
7878 int itblock_current_pos
= 0;
7880 /* Only Thumb2 mode need be supported with this CM4 specific
7881 code, we should not encounter any arm mode eg span_type
7883 if (span_type
!= 't')
7886 for (i
= span_start
; i
< span_end
;)
7888 unsigned int insn
= bfd_get_16 (abfd
, &contents
[i
]);
7889 bfd_boolean insn_32bit
= FALSE
;
7890 bfd_boolean is_ldm
= FALSE
;
7891 bfd_boolean is_vldm
= FALSE
;
7892 bfd_boolean is_not_last_in_it_block
= FALSE
;
7894 /* The first 16-bits of all 32-bit thumb2 instructions start
7895 with opcode[15..13]=0b111 and the encoded op1 can be anything
7896 except opcode[12..11]!=0b00.
7897 See 32-bit Thumb instruction encoding. */
7898 if ((insn
& 0xe000) == 0xe000 && (insn
& 0x1800) != 0x0000)
7901 /* Compute the predicate that tells if the instruction
7902 is concerned by the IT block
7903 - Creates an error if there is a ldm that is not
7904 last in the IT block thus cannot be replaced
7905 - Otherwise we can create a branch at the end of the
7906 IT block, it will be controlled naturally by IT
7907 with the proper pseudo-predicate
7908 - So the only interesting predicate is the one that
7909 tells that we are not on the last item of an IT
7911 if (itblock_current_pos
!= 0)
7912 is_not_last_in_it_block
= !!--itblock_current_pos
;
7916 /* Load the rest of the insn (in manual-friendly order). */
7917 insn
= (insn
<< 16) | bfd_get_16 (abfd
, &contents
[i
+ 2]);
7918 is_ldm
= is_thumb2_ldmia (insn
) || is_thumb2_ldmdb (insn
);
7919 is_vldm
= is_thumb2_vldm (insn
);
7921 /* Veneers are created for (v)ldm depending on
7922 option flags and memory accesses conditions; but
7923 if the instruction is not the last instruction of
7924 an IT block, we cannot create a jump there, so we
7926 if ((is_ldm
|| is_vldm
) &&
7927 stm32l4xx_need_create_replacing_stub
7928 (insn
, globals
->stm32l4xx_fix
))
7930 if (is_not_last_in_it_block
)
7932 (*_bfd_error_handler
)
7933 /* Note - overlong line used here to allow for translation. */
7935 %B(%A+0x%lx): error: multiple load detected in non-last IT block instruction : STM32L4XX veneer cannot be generated.\n"
7936 "Use gcc option -mrestrict-it to generate only one instruction per IT block.\n"),
7937 abfd
, sec
, (long)i
);
7941 elf32_stm32l4xx_erratum_list
*newerr
=
7942 (elf32_stm32l4xx_erratum_list
*)
7944 (sizeof (elf32_stm32l4xx_erratum_list
));
7946 elf32_arm_section_data (sec
)
7947 ->stm32l4xx_erratumcount
+= 1;
7948 newerr
->u
.b
.insn
= insn
;
7949 /* We create only thumb branches. */
7951 STM32L4XX_ERRATUM_BRANCH_TO_VENEER
;
7952 record_stm32l4xx_erratum_veneer
7953 (link_info
, newerr
, abfd
, sec
,
7956 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
:
7957 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE
);
7959 newerr
->next
= sec_data
->stm32l4xx_erratumlist
;
7960 sec_data
->stm32l4xx_erratumlist
= newerr
;
7967 IT blocks are only encoded in T1
7968 Encoding T1: IT{x{y{z}}} <firstcond>
7969 1 0 1 1 - 1 1 1 1 - firstcond - mask
7970 if mask = '0000' then see 'related encodings'
7971 We don't deal with UNPREDICTABLE, just ignore these.
7972 There can be no nested IT blocks so an IT block
7973 is naturally a new one for which it is worth
7974 computing its size. */
7975 bfd_boolean is_newitblock
= ((insn
& 0xff00) == 0xbf00) &&
7976 ((insn
& 0x000f) != 0x0000);
7977 /* If we have a new IT block we compute its size. */
7980 /* Compute the number of instructions controlled
7981 by the IT block, it will be used to decide
7982 whether we are inside an IT block or not. */
7983 unsigned int mask
= insn
& 0x000f;
7984 itblock_current_pos
= 4 - ctz (mask
);
7988 i
+= insn_32bit
? 4 : 2;
7992 if (contents
!= NULL
7993 && elf_section_data (sec
)->this_hdr
.contents
!= contents
)
8001 if (contents
!= NULL
8002 && elf_section_data (sec
)->this_hdr
.contents
!= contents
)
8008 /* Set target relocation values needed during linking. */
8011 bfd_elf32_arm_set_target_relocs (struct bfd
*output_bfd
,
8012 struct bfd_link_info
*link_info
,
8014 char * target2_type
,
8017 bfd_arm_vfp11_fix vfp11_fix
,
8018 bfd_arm_stm32l4xx_fix stm32l4xx_fix
,
8019 int no_enum_warn
, int no_wchar_warn
,
8020 int pic_veneer
, int fix_cortex_a8
,
8023 struct elf32_arm_link_hash_table
*globals
;
8025 globals
= elf32_arm_hash_table (link_info
);
8026 if (globals
== NULL
)
8029 globals
->target1_is_rel
= target1_is_rel
;
8030 if (strcmp (target2_type
, "rel") == 0)
8031 globals
->target2_reloc
= R_ARM_REL32
;
8032 else if (strcmp (target2_type
, "abs") == 0)
8033 globals
->target2_reloc
= R_ARM_ABS32
;
8034 else if (strcmp (target2_type
, "got-rel") == 0)
8035 globals
->target2_reloc
= R_ARM_GOT_PREL
;
8038 _bfd_error_handler (_("Invalid TARGET2 relocation type '%s'."),
8041 globals
->fix_v4bx
= fix_v4bx
;
8042 globals
->use_blx
|= use_blx
;
8043 globals
->vfp11_fix
= vfp11_fix
;
8044 globals
->stm32l4xx_fix
= stm32l4xx_fix
;
8045 globals
->pic_veneer
= pic_veneer
;
8046 globals
->fix_cortex_a8
= fix_cortex_a8
;
8047 globals
->fix_arm1176
= fix_arm1176
;
8049 BFD_ASSERT (is_arm_elf (output_bfd
));
8050 elf_arm_tdata (output_bfd
)->no_enum_size_warning
= no_enum_warn
;
8051 elf_arm_tdata (output_bfd
)->no_wchar_size_warning
= no_wchar_warn
;
8054 /* Replace the target offset of a Thumb bl or b.w instruction. */
8057 insert_thumb_branch (bfd
*abfd
, long int offset
, bfd_byte
*insn
)
8063 BFD_ASSERT ((offset
& 1) == 0);
8065 upper
= bfd_get_16 (abfd
, insn
);
8066 lower
= bfd_get_16 (abfd
, insn
+ 2);
8067 reloc_sign
= (offset
< 0) ? 1 : 0;
8068 upper
= (upper
& ~(bfd_vma
) 0x7ff)
8069 | ((offset
>> 12) & 0x3ff)
8070 | (reloc_sign
<< 10);
8071 lower
= (lower
& ~(bfd_vma
) 0x2fff)
8072 | (((!((offset
>> 23) & 1)) ^ reloc_sign
) << 13)
8073 | (((!((offset
>> 22) & 1)) ^ reloc_sign
) << 11)
8074 | ((offset
>> 1) & 0x7ff);
8075 bfd_put_16 (abfd
, upper
, insn
);
8076 bfd_put_16 (abfd
, lower
, insn
+ 2);
8079 /* Thumb code calling an ARM function. */
8082 elf32_thumb_to_arm_stub (struct bfd_link_info
* info
,
8086 asection
* input_section
,
8087 bfd_byte
* hit_data
,
8090 bfd_signed_vma addend
,
8092 char **error_message
)
8096 long int ret_offset
;
8097 struct elf_link_hash_entry
* myh
;
8098 struct elf32_arm_link_hash_table
* globals
;
8100 myh
= find_thumb_glue (info
, name
, error_message
);
8104 globals
= elf32_arm_hash_table (info
);
8105 BFD_ASSERT (globals
!= NULL
);
8106 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
8108 my_offset
= myh
->root
.u
.def
.value
;
8110 s
= bfd_get_linker_section (globals
->bfd_of_glue_owner
,
8111 THUMB2ARM_GLUE_SECTION_NAME
);
8113 BFD_ASSERT (s
!= NULL
);
8114 BFD_ASSERT (s
->contents
!= NULL
);
8115 BFD_ASSERT (s
->output_section
!= NULL
);
8117 if ((my_offset
& 0x01) == 0x01)
8120 && sym_sec
->owner
!= NULL
8121 && !INTERWORK_FLAG (sym_sec
->owner
))
8123 (*_bfd_error_handler
)
8124 (_("%B(%s): warning: interworking not enabled.\n"
8125 " first occurrence: %B: Thumb call to ARM"),
8126 sym_sec
->owner
, input_bfd
, name
);
8132 myh
->root
.u
.def
.value
= my_offset
;
8134 put_thumb_insn (globals
, output_bfd
, (bfd_vma
) t2a1_bx_pc_insn
,
8135 s
->contents
+ my_offset
);
8137 put_thumb_insn (globals
, output_bfd
, (bfd_vma
) t2a2_noop_insn
,
8138 s
->contents
+ my_offset
+ 2);
8141 /* Address of destination of the stub. */
8142 ((bfd_signed_vma
) val
)
8144 /* Offset from the start of the current section
8145 to the start of the stubs. */
8147 /* Offset of the start of this stub from the start of the stubs. */
8149 /* Address of the start of the current section. */
8150 + s
->output_section
->vma
)
8151 /* The branch instruction is 4 bytes into the stub. */
8153 /* ARM branches work from the pc of the instruction + 8. */
8156 put_arm_insn (globals
, output_bfd
,
8157 (bfd_vma
) t2a3_b_insn
| ((ret_offset
>> 2) & 0x00FFFFFF),
8158 s
->contents
+ my_offset
+ 4);
8161 BFD_ASSERT (my_offset
<= globals
->thumb_glue_size
);
8163 /* Now go back and fix up the original BL insn to point to here. */
8165 /* Address of where the stub is located. */
8166 (s
->output_section
->vma
+ s
->output_offset
+ my_offset
)
8167 /* Address of where the BL is located. */
8168 - (input_section
->output_section
->vma
+ input_section
->output_offset
8170 /* Addend in the relocation. */
8172 /* Biassing for PC-relative addressing. */
8175 insert_thumb_branch (input_bfd
, ret_offset
, hit_data
- input_section
->vma
);
8180 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
8182 static struct elf_link_hash_entry
*
8183 elf32_arm_create_thumb_stub (struct bfd_link_info
* info
,
8190 char ** error_message
)
8193 long int ret_offset
;
8194 struct elf_link_hash_entry
* myh
;
8195 struct elf32_arm_link_hash_table
* globals
;
8197 myh
= find_arm_glue (info
, name
, error_message
);
8201 globals
= elf32_arm_hash_table (info
);
8202 BFD_ASSERT (globals
!= NULL
);
8203 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
8205 my_offset
= myh
->root
.u
.def
.value
;
8207 if ((my_offset
& 0x01) == 0x01)
8210 && sym_sec
->owner
!= NULL
8211 && !INTERWORK_FLAG (sym_sec
->owner
))
8213 (*_bfd_error_handler
)
8214 (_("%B(%s): warning: interworking not enabled.\n"
8215 " first occurrence: %B: arm call to thumb"),
8216 sym_sec
->owner
, input_bfd
, name
);
8220 myh
->root
.u
.def
.value
= my_offset
;
8222 if (bfd_link_pic (info
)
8223 || globals
->root
.is_relocatable_executable
8224 || globals
->pic_veneer
)
8226 /* For relocatable objects we can't use absolute addresses,
8227 so construct the address from a relative offset. */
8228 /* TODO: If the offset is small it's probably worth
8229 constructing the address with adds. */
8230 put_arm_insn (globals
, output_bfd
, (bfd_vma
) a2t1p_ldr_insn
,
8231 s
->contents
+ my_offset
);
8232 put_arm_insn (globals
, output_bfd
, (bfd_vma
) a2t2p_add_pc_insn
,
8233 s
->contents
+ my_offset
+ 4);
8234 put_arm_insn (globals
, output_bfd
, (bfd_vma
) a2t3p_bx_r12_insn
,
8235 s
->contents
+ my_offset
+ 8);
8236 /* Adjust the offset by 4 for the position of the add,
8237 and 8 for the pipeline offset. */
8238 ret_offset
= (val
- (s
->output_offset
8239 + s
->output_section
->vma
8242 bfd_put_32 (output_bfd
, ret_offset
,
8243 s
->contents
+ my_offset
+ 12);
8245 else if (globals
->use_blx
)
8247 put_arm_insn (globals
, output_bfd
, (bfd_vma
) a2t1v5_ldr_insn
,
8248 s
->contents
+ my_offset
);
8250 /* It's a thumb address. Add the low order bit. */
8251 bfd_put_32 (output_bfd
, val
| a2t2v5_func_addr_insn
,
8252 s
->contents
+ my_offset
+ 4);
8256 put_arm_insn (globals
, output_bfd
, (bfd_vma
) a2t1_ldr_insn
,
8257 s
->contents
+ my_offset
);
8259 put_arm_insn (globals
, output_bfd
, (bfd_vma
) a2t2_bx_r12_insn
,
8260 s
->contents
+ my_offset
+ 4);
8262 /* It's a thumb address. Add the low order bit. */
8263 bfd_put_32 (output_bfd
, val
| a2t3_func_addr_insn
,
8264 s
->contents
+ my_offset
+ 8);
8270 BFD_ASSERT (my_offset
<= globals
->arm_glue_size
);
8275 /* Arm code calling a Thumb function. */
8278 elf32_arm_to_thumb_stub (struct bfd_link_info
* info
,
8282 asection
* input_section
,
8283 bfd_byte
* hit_data
,
8286 bfd_signed_vma addend
,
8288 char **error_message
)
8290 unsigned long int tmp
;
8293 long int ret_offset
;
8294 struct elf_link_hash_entry
* myh
;
8295 struct elf32_arm_link_hash_table
* globals
;
8297 globals
= elf32_arm_hash_table (info
);
8298 BFD_ASSERT (globals
!= NULL
);
8299 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
8301 s
= bfd_get_linker_section (globals
->bfd_of_glue_owner
,
8302 ARM2THUMB_GLUE_SECTION_NAME
);
8303 BFD_ASSERT (s
!= NULL
);
8304 BFD_ASSERT (s
->contents
!= NULL
);
8305 BFD_ASSERT (s
->output_section
!= NULL
);
8307 myh
= elf32_arm_create_thumb_stub (info
, name
, input_bfd
, output_bfd
,
8308 sym_sec
, val
, s
, error_message
);
8312 my_offset
= myh
->root
.u
.def
.value
;
8313 tmp
= bfd_get_32 (input_bfd
, hit_data
);
8314 tmp
= tmp
& 0xFF000000;
8316 /* Somehow these are both 4 too far, so subtract 8. */
8317 ret_offset
= (s
->output_offset
8319 + s
->output_section
->vma
8320 - (input_section
->output_offset
8321 + input_section
->output_section
->vma
8325 tmp
= tmp
| ((ret_offset
>> 2) & 0x00FFFFFF);
8327 bfd_put_32 (output_bfd
, (bfd_vma
) tmp
, hit_data
- input_section
->vma
);
8332 /* Populate Arm stub for an exported Thumb function. */
8335 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry
*h
, void * inf
)
8337 struct bfd_link_info
* info
= (struct bfd_link_info
*) inf
;
8339 struct elf_link_hash_entry
* myh
;
8340 struct elf32_arm_link_hash_entry
*eh
;
8341 struct elf32_arm_link_hash_table
* globals
;
8344 char *error_message
;
8346 eh
= elf32_arm_hash_entry (h
);
8347 /* Allocate stubs for exported Thumb functions on v4t. */
8348 if (eh
->export_glue
== NULL
)
8351 globals
= elf32_arm_hash_table (info
);
8352 BFD_ASSERT (globals
!= NULL
);
8353 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
8355 s
= bfd_get_linker_section (globals
->bfd_of_glue_owner
,
8356 ARM2THUMB_GLUE_SECTION_NAME
);
8357 BFD_ASSERT (s
!= NULL
);
8358 BFD_ASSERT (s
->contents
!= NULL
);
8359 BFD_ASSERT (s
->output_section
!= NULL
);
8361 sec
= eh
->export_glue
->root
.u
.def
.section
;
8363 BFD_ASSERT (sec
->output_section
!= NULL
);
8365 val
= eh
->export_glue
->root
.u
.def
.value
+ sec
->output_offset
8366 + sec
->output_section
->vma
;
8368 myh
= elf32_arm_create_thumb_stub (info
, h
->root
.root
.string
,
8369 h
->root
.u
.def
.section
->owner
,
8370 globals
->obfd
, sec
, val
, s
,
8376 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
8379 elf32_arm_bx_glue (struct bfd_link_info
* info
, int reg
)
8384 struct elf32_arm_link_hash_table
*globals
;
8386 globals
= elf32_arm_hash_table (info
);
8387 BFD_ASSERT (globals
!= NULL
);
8388 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
8390 s
= bfd_get_linker_section (globals
->bfd_of_glue_owner
,
8391 ARM_BX_GLUE_SECTION_NAME
);
8392 BFD_ASSERT (s
!= NULL
);
8393 BFD_ASSERT (s
->contents
!= NULL
);
8394 BFD_ASSERT (s
->output_section
!= NULL
);
8396 BFD_ASSERT (globals
->bx_glue_offset
[reg
] & 2);
8398 glue_addr
= globals
->bx_glue_offset
[reg
] & ~(bfd_vma
)3;
8400 if ((globals
->bx_glue_offset
[reg
] & 1) == 0)
8402 p
= s
->contents
+ glue_addr
;
8403 bfd_put_32 (globals
->obfd
, armbx1_tst_insn
+ (reg
<< 16), p
);
8404 bfd_put_32 (globals
->obfd
, armbx2_moveq_insn
+ reg
, p
+ 4);
8405 bfd_put_32 (globals
->obfd
, armbx3_bx_insn
+ reg
, p
+ 8);
8406 globals
->bx_glue_offset
[reg
] |= 1;
8409 return glue_addr
+ s
->output_section
->vma
+ s
->output_offset
;
8412 /* Generate Arm stubs for exported Thumb symbols. */
8414 elf32_arm_begin_write_processing (bfd
*abfd ATTRIBUTE_UNUSED
,
8415 struct bfd_link_info
*link_info
)
8417 struct elf32_arm_link_hash_table
* globals
;
8419 if (link_info
== NULL
)
8420 /* Ignore this if we are not called by the ELF backend linker. */
8423 globals
= elf32_arm_hash_table (link_info
);
8424 if (globals
== NULL
)
8427 /* If blx is available then exported Thumb symbols are OK and there is
8429 if (globals
->use_blx
)
8432 elf_link_hash_traverse (&globals
->root
, elf32_arm_to_thumb_export_stub
,
8436 /* Reserve space for COUNT dynamic relocations in relocation selection
8440 elf32_arm_allocate_dynrelocs (struct bfd_link_info
*info
, asection
*sreloc
,
8441 bfd_size_type count
)
8443 struct elf32_arm_link_hash_table
*htab
;
8445 htab
= elf32_arm_hash_table (info
);
8446 BFD_ASSERT (htab
->root
.dynamic_sections_created
);
8449 sreloc
->size
+= RELOC_SIZE (htab
) * count
;
8452 /* Reserve space for COUNT R_ARM_IRELATIVE relocations. If the link is
8453 dynamic, the relocations should go in SRELOC, otherwise they should
8454 go in the special .rel.iplt section. */
8457 elf32_arm_allocate_irelocs (struct bfd_link_info
*info
, asection
*sreloc
,
8458 bfd_size_type count
)
8460 struct elf32_arm_link_hash_table
*htab
;
8462 htab
= elf32_arm_hash_table (info
);
8463 if (!htab
->root
.dynamic_sections_created
)
8464 htab
->root
.irelplt
->size
+= RELOC_SIZE (htab
) * count
;
8467 BFD_ASSERT (sreloc
!= NULL
);
8468 sreloc
->size
+= RELOC_SIZE (htab
) * count
;
8472 /* Add relocation REL to the end of relocation section SRELOC. */
8475 elf32_arm_add_dynreloc (bfd
*output_bfd
, struct bfd_link_info
*info
,
8476 asection
*sreloc
, Elf_Internal_Rela
*rel
)
8479 struct elf32_arm_link_hash_table
*htab
;
8481 htab
= elf32_arm_hash_table (info
);
8482 if (!htab
->root
.dynamic_sections_created
8483 && ELF32_R_TYPE (rel
->r_info
) == R_ARM_IRELATIVE
)
8484 sreloc
= htab
->root
.irelplt
;
8487 loc
= sreloc
->contents
;
8488 loc
+= sreloc
->reloc_count
++ * RELOC_SIZE (htab
);
8489 if (sreloc
->reloc_count
* RELOC_SIZE (htab
) > sreloc
->size
)
8491 SWAP_RELOC_OUT (htab
) (output_bfd
, rel
, loc
);
8494 /* Allocate room for a PLT entry described by ROOT_PLT and ARM_PLT.
8495 IS_IPLT_ENTRY says whether the entry belongs to .iplt rather than
8499 elf32_arm_allocate_plt_entry (struct bfd_link_info
*info
,
8500 bfd_boolean is_iplt_entry
,
8501 union gotplt_union
*root_plt
,
8502 struct arm_plt_info
*arm_plt
)
8504 struct elf32_arm_link_hash_table
*htab
;
8508 htab
= elf32_arm_hash_table (info
);
8512 splt
= htab
->root
.iplt
;
8513 sgotplt
= htab
->root
.igotplt
;
8515 /* NaCl uses a special first entry in .iplt too. */
8516 if (htab
->nacl_p
&& splt
->size
== 0)
8517 splt
->size
+= htab
->plt_header_size
;
8519 /* Allocate room for an R_ARM_IRELATIVE relocation in .rel.iplt. */
8520 elf32_arm_allocate_irelocs (info
, htab
->root
.irelplt
, 1);
8524 splt
= htab
->root
.splt
;
8525 sgotplt
= htab
->root
.sgotplt
;
8527 /* Allocate room for an R_JUMP_SLOT relocation in .rel.plt. */
8528 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelplt
, 1);
8530 /* If this is the first .plt entry, make room for the special
8532 if (splt
->size
== 0)
8533 splt
->size
+= htab
->plt_header_size
;
8535 htab
->next_tls_desc_index
++;
8538 /* Allocate the PLT entry itself, including any leading Thumb stub. */
8539 if (elf32_arm_plt_needs_thumb_stub_p (info
, arm_plt
))
8540 splt
->size
+= PLT_THUMB_STUB_SIZE
;
8541 root_plt
->offset
= splt
->size
;
8542 splt
->size
+= htab
->plt_entry_size
;
8544 if (!htab
->symbian_p
)
8546 /* We also need to make an entry in the .got.plt section, which
8547 will be placed in the .got section by the linker script. */
8549 arm_plt
->got_offset
= sgotplt
->size
;
8551 arm_plt
->got_offset
= sgotplt
->size
- 8 * htab
->num_tls_desc
;
8557 arm_movw_immediate (bfd_vma value
)
8559 return (value
& 0x00000fff) | ((value
& 0x0000f000) << 4);
8563 arm_movt_immediate (bfd_vma value
)
8565 return ((value
& 0x0fff0000) >> 16) | ((value
& 0xf0000000) >> 12);
8568 /* Fill in a PLT entry and its associated GOT slot. If DYNINDX == -1,
8569 the entry lives in .iplt and resolves to (*SYM_VALUE)().
8570 Otherwise, DYNINDX is the index of the symbol in the dynamic
8571 symbol table and SYM_VALUE is undefined.
8573 ROOT_PLT points to the offset of the PLT entry from the start of its
8574 section (.iplt or .plt). ARM_PLT points to the symbol's ARM-specific
8575 bookkeeping information.
8577 Returns FALSE if there was a problem. */
8580 elf32_arm_populate_plt_entry (bfd
*output_bfd
, struct bfd_link_info
*info
,
8581 union gotplt_union
*root_plt
,
8582 struct arm_plt_info
*arm_plt
,
8583 int dynindx
, bfd_vma sym_value
)
8585 struct elf32_arm_link_hash_table
*htab
;
8591 Elf_Internal_Rela rel
;
8592 bfd_vma plt_header_size
;
8593 bfd_vma got_header_size
;
8595 htab
= elf32_arm_hash_table (info
);
8597 /* Pick the appropriate sections and sizes. */
8600 splt
= htab
->root
.iplt
;
8601 sgot
= htab
->root
.igotplt
;
8602 srel
= htab
->root
.irelplt
;
8604 /* There are no reserved entries in .igot.plt, and no special
8605 first entry in .iplt. */
8606 got_header_size
= 0;
8607 plt_header_size
= 0;
8611 splt
= htab
->root
.splt
;
8612 sgot
= htab
->root
.sgotplt
;
8613 srel
= htab
->root
.srelplt
;
8615 got_header_size
= get_elf_backend_data (output_bfd
)->got_header_size
;
8616 plt_header_size
= htab
->plt_header_size
;
8618 BFD_ASSERT (splt
!= NULL
&& srel
!= NULL
);
8620 /* Fill in the entry in the procedure linkage table. */
8621 if (htab
->symbian_p
)
8623 BFD_ASSERT (dynindx
>= 0);
8624 put_arm_insn (htab
, output_bfd
,
8625 elf32_arm_symbian_plt_entry
[0],
8626 splt
->contents
+ root_plt
->offset
);
8627 bfd_put_32 (output_bfd
,
8628 elf32_arm_symbian_plt_entry
[1],
8629 splt
->contents
+ root_plt
->offset
+ 4);
8631 /* Fill in the entry in the .rel.plt section. */
8632 rel
.r_offset
= (splt
->output_section
->vma
8633 + splt
->output_offset
8634 + root_plt
->offset
+ 4);
8635 rel
.r_info
= ELF32_R_INFO (dynindx
, R_ARM_GLOB_DAT
);
8637 /* Get the index in the procedure linkage table which
8638 corresponds to this symbol. This is the index of this symbol
8639 in all the symbols for which we are making plt entries. The
8640 first entry in the procedure linkage table is reserved. */
8641 plt_index
= ((root_plt
->offset
- plt_header_size
)
8642 / htab
->plt_entry_size
);
8646 bfd_vma got_offset
, got_address
, plt_address
;
8647 bfd_vma got_displacement
, initial_got_entry
;
8650 BFD_ASSERT (sgot
!= NULL
);
8652 /* Get the offset into the .(i)got.plt table of the entry that
8653 corresponds to this function. */
8654 got_offset
= (arm_plt
->got_offset
& -2);
8656 /* Get the index in the procedure linkage table which
8657 corresponds to this symbol. This is the index of this symbol
8658 in all the symbols for which we are making plt entries.
8659 After the reserved .got.plt entries, all symbols appear in
8660 the same order as in .plt. */
8661 plt_index
= (got_offset
- got_header_size
) / 4;
8663 /* Calculate the address of the GOT entry. */
8664 got_address
= (sgot
->output_section
->vma
8665 + sgot
->output_offset
8668 /* ...and the address of the PLT entry. */
8669 plt_address
= (splt
->output_section
->vma
8670 + splt
->output_offset
8671 + root_plt
->offset
);
8673 ptr
= splt
->contents
+ root_plt
->offset
;
8674 if (htab
->vxworks_p
&& bfd_link_pic (info
))
8679 for (i
= 0; i
!= htab
->plt_entry_size
/ 4; i
++, ptr
+= 4)
8681 val
= elf32_arm_vxworks_shared_plt_entry
[i
];
8683 val
|= got_address
- sgot
->output_section
->vma
;
8685 val
|= plt_index
* RELOC_SIZE (htab
);
8686 if (i
== 2 || i
== 5)
8687 bfd_put_32 (output_bfd
, val
, ptr
);
8689 put_arm_insn (htab
, output_bfd
, val
, ptr
);
8692 else if (htab
->vxworks_p
)
8697 for (i
= 0; i
!= htab
->plt_entry_size
/ 4; i
++, ptr
+= 4)
8699 val
= elf32_arm_vxworks_exec_plt_entry
[i
];
8703 val
|= 0xffffff & -((root_plt
->offset
+ i
* 4 + 8) >> 2);
8705 val
|= plt_index
* RELOC_SIZE (htab
);
8706 if (i
== 2 || i
== 5)
8707 bfd_put_32 (output_bfd
, val
, ptr
);
8709 put_arm_insn (htab
, output_bfd
, val
, ptr
);
8712 loc
= (htab
->srelplt2
->contents
8713 + (plt_index
* 2 + 1) * RELOC_SIZE (htab
));
8715 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
8716 referencing the GOT for this PLT entry. */
8717 rel
.r_offset
= plt_address
+ 8;
8718 rel
.r_info
= ELF32_R_INFO (htab
->root
.hgot
->indx
, R_ARM_ABS32
);
8719 rel
.r_addend
= got_offset
;
8720 SWAP_RELOC_OUT (htab
) (output_bfd
, &rel
, loc
);
8721 loc
+= RELOC_SIZE (htab
);
8723 /* Create the R_ARM_ABS32 relocation referencing the
8724 beginning of the PLT for this GOT entry. */
8725 rel
.r_offset
= got_address
;
8726 rel
.r_info
= ELF32_R_INFO (htab
->root
.hplt
->indx
, R_ARM_ABS32
);
8728 SWAP_RELOC_OUT (htab
) (output_bfd
, &rel
, loc
);
8730 else if (htab
->nacl_p
)
8732 /* Calculate the displacement between the PLT slot and the
8733 common tail that's part of the special initial PLT slot. */
8734 int32_t tail_displacement
8735 = ((splt
->output_section
->vma
+ splt
->output_offset
8736 + ARM_NACL_PLT_TAIL_OFFSET
)
8737 - (plt_address
+ htab
->plt_entry_size
+ 4));
8738 BFD_ASSERT ((tail_displacement
& 3) == 0);
8739 tail_displacement
>>= 2;
8741 BFD_ASSERT ((tail_displacement
& 0xff000000) == 0
8742 || (-tail_displacement
& 0xff000000) == 0);
8744 /* Calculate the displacement between the PLT slot and the entry
8745 in the GOT. The offset accounts for the value produced by
8746 adding to pc in the penultimate instruction of the PLT stub. */
8747 got_displacement
= (got_address
8748 - (plt_address
+ htab
->plt_entry_size
));
8750 /* NaCl does not support interworking at all. */
8751 BFD_ASSERT (!elf32_arm_plt_needs_thumb_stub_p (info
, arm_plt
));
8753 put_arm_insn (htab
, output_bfd
,
8754 elf32_arm_nacl_plt_entry
[0]
8755 | arm_movw_immediate (got_displacement
),
8757 put_arm_insn (htab
, output_bfd
,
8758 elf32_arm_nacl_plt_entry
[1]
8759 | arm_movt_immediate (got_displacement
),
8761 put_arm_insn (htab
, output_bfd
,
8762 elf32_arm_nacl_plt_entry
[2],
8764 put_arm_insn (htab
, output_bfd
,
8765 elf32_arm_nacl_plt_entry
[3]
8766 | (tail_displacement
& 0x00ffffff),
8769 else if (using_thumb_only (htab
))
8771 /* PR ld/16017: Generate thumb only PLT entries. */
8772 if (!using_thumb2 (htab
))
8774 /* FIXME: We ought to be able to generate thumb-1 PLT
8776 _bfd_error_handler (_("%B: Warning: thumb-1 mode PLT generation not currently supported"),
8781 /* Calculate the displacement between the PLT slot and the entry in
8782 the GOT. The 12-byte offset accounts for the value produced by
8783 adding to pc in the 3rd instruction of the PLT stub. */
8784 got_displacement
= got_address
- (plt_address
+ 12);
8786 /* As we are using 32 bit instructions we have to use 'put_arm_insn'
8787 instead of 'put_thumb_insn'. */
8788 put_arm_insn (htab
, output_bfd
,
8789 elf32_thumb2_plt_entry
[0]
8790 | ((got_displacement
& 0x000000ff) << 16)
8791 | ((got_displacement
& 0x00000700) << 20)
8792 | ((got_displacement
& 0x00000800) >> 1)
8793 | ((got_displacement
& 0x0000f000) >> 12),
8795 put_arm_insn (htab
, output_bfd
,
8796 elf32_thumb2_plt_entry
[1]
8797 | ((got_displacement
& 0x00ff0000) )
8798 | ((got_displacement
& 0x07000000) << 4)
8799 | ((got_displacement
& 0x08000000) >> 17)
8800 | ((got_displacement
& 0xf0000000) >> 28),
8802 put_arm_insn (htab
, output_bfd
,
8803 elf32_thumb2_plt_entry
[2],
8805 put_arm_insn (htab
, output_bfd
,
8806 elf32_thumb2_plt_entry
[3],
8811 /* Calculate the displacement between the PLT slot and the
8812 entry in the GOT. The eight-byte offset accounts for the
8813 value produced by adding to pc in the first instruction
8815 got_displacement
= got_address
- (plt_address
+ 8);
8817 if (elf32_arm_plt_needs_thumb_stub_p (info
, arm_plt
))
8819 put_thumb_insn (htab
, output_bfd
,
8820 elf32_arm_plt_thumb_stub
[0], ptr
- 4);
8821 put_thumb_insn (htab
, output_bfd
,
8822 elf32_arm_plt_thumb_stub
[1], ptr
- 2);
8825 if (!elf32_arm_use_long_plt_entry
)
8827 BFD_ASSERT ((got_displacement
& 0xf0000000) == 0);
8829 put_arm_insn (htab
, output_bfd
,
8830 elf32_arm_plt_entry_short
[0]
8831 | ((got_displacement
& 0x0ff00000) >> 20),
8833 put_arm_insn (htab
, output_bfd
,
8834 elf32_arm_plt_entry_short
[1]
8835 | ((got_displacement
& 0x000ff000) >> 12),
8837 put_arm_insn (htab
, output_bfd
,
8838 elf32_arm_plt_entry_short
[2]
8839 | (got_displacement
& 0x00000fff),
8841 #ifdef FOUR_WORD_PLT
8842 bfd_put_32 (output_bfd
, elf32_arm_plt_entry_short
[3], ptr
+ 12);
8847 put_arm_insn (htab
, output_bfd
,
8848 elf32_arm_plt_entry_long
[0]
8849 | ((got_displacement
& 0xf0000000) >> 28),
8851 put_arm_insn (htab
, output_bfd
,
8852 elf32_arm_plt_entry_long
[1]
8853 | ((got_displacement
& 0x0ff00000) >> 20),
8855 put_arm_insn (htab
, output_bfd
,
8856 elf32_arm_plt_entry_long
[2]
8857 | ((got_displacement
& 0x000ff000) >> 12),
8859 put_arm_insn (htab
, output_bfd
,
8860 elf32_arm_plt_entry_long
[3]
8861 | (got_displacement
& 0x00000fff),
8866 /* Fill in the entry in the .rel(a).(i)plt section. */
8867 rel
.r_offset
= got_address
;
8871 /* .igot.plt entries use IRELATIVE relocations against SYM_VALUE.
8872 The dynamic linker or static executable then calls SYM_VALUE
8873 to determine the correct run-time value of the .igot.plt entry. */
8874 rel
.r_info
= ELF32_R_INFO (0, R_ARM_IRELATIVE
);
8875 initial_got_entry
= sym_value
;
8879 rel
.r_info
= ELF32_R_INFO (dynindx
, R_ARM_JUMP_SLOT
);
8880 initial_got_entry
= (splt
->output_section
->vma
8881 + splt
->output_offset
);
8884 /* Fill in the entry in the global offset table. */
8885 bfd_put_32 (output_bfd
, initial_got_entry
,
8886 sgot
->contents
+ got_offset
);
8890 elf32_arm_add_dynreloc (output_bfd
, info
, srel
, &rel
);
8893 loc
= srel
->contents
+ plt_index
* RELOC_SIZE (htab
);
8894 SWAP_RELOC_OUT (htab
) (output_bfd
, &rel
, loc
);
8900 /* Some relocations map to different relocations depending on the
8901 target. Return the real relocation. */
8904 arm_real_reloc_type (struct elf32_arm_link_hash_table
* globals
,
8910 if (globals
->target1_is_rel
)
8916 return globals
->target2_reloc
;
8923 /* Return the base VMA address which should be subtracted from real addresses
8924 when resolving @dtpoff relocation.
8925 This is PT_TLS segment p_vaddr. */
8928 dtpoff_base (struct bfd_link_info
*info
)
8930 /* If tls_sec is NULL, we should have signalled an error already. */
8931 if (elf_hash_table (info
)->tls_sec
== NULL
)
8933 return elf_hash_table (info
)->tls_sec
->vma
;
8936 /* Return the relocation value for @tpoff relocation
8937 if STT_TLS virtual address is ADDRESS. */
8940 tpoff (struct bfd_link_info
*info
, bfd_vma address
)
8942 struct elf_link_hash_table
*htab
= elf_hash_table (info
);
8945 /* If tls_sec is NULL, we should have signalled an error already. */
8946 if (htab
->tls_sec
== NULL
)
8948 base
= align_power ((bfd_vma
) TCB_SIZE
, htab
->tls_sec
->alignment_power
);
8949 return address
- htab
->tls_sec
->vma
+ base
;
8952 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
8953 VALUE is the relocation value. */
8955 static bfd_reloc_status_type
8956 elf32_arm_abs12_reloc (bfd
*abfd
, void *data
, bfd_vma value
)
8959 return bfd_reloc_overflow
;
8961 value
|= bfd_get_32 (abfd
, data
) & 0xfffff000;
8962 bfd_put_32 (abfd
, value
, data
);
8963 return bfd_reloc_ok
;
8966 /* Handle TLS relaxations. Relaxing is possible for symbols that use
8967 R_ARM_GOTDESC, R_ARM_{,THM_}TLS_CALL or
8968 R_ARM_{,THM_}TLS_DESCSEQ relocations, during a static link.
8970 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
8971 is to then call final_link_relocate. Return other values in the
8974 FIXME:When --emit-relocs is in effect, we'll emit relocs describing
8975 the pre-relaxed code. It would be nice if the relocs were updated
8976 to match the optimization. */
8978 static bfd_reloc_status_type
8979 elf32_arm_tls_relax (struct elf32_arm_link_hash_table
*globals
,
8980 bfd
*input_bfd
, asection
*input_sec
, bfd_byte
*contents
,
8981 Elf_Internal_Rela
*rel
, unsigned long is_local
)
8985 switch (ELF32_R_TYPE (rel
->r_info
))
8988 return bfd_reloc_notsupported
;
8990 case R_ARM_TLS_GOTDESC
:
8995 insn
= bfd_get_32 (input_bfd
, contents
+ rel
->r_offset
);
8997 insn
-= 5; /* THUMB */
8999 insn
-= 8; /* ARM */
9001 bfd_put_32 (input_bfd
, insn
, contents
+ rel
->r_offset
);
9002 return bfd_reloc_continue
;
9004 case R_ARM_THM_TLS_DESCSEQ
:
9006 insn
= bfd_get_16 (input_bfd
, contents
+ rel
->r_offset
);
9007 if ((insn
& 0xff78) == 0x4478) /* add rx, pc */
9011 bfd_put_16 (input_bfd
, 0x46c0, contents
+ rel
->r_offset
);
9013 else if ((insn
& 0xffc0) == 0x6840) /* ldr rx,[ry,#4] */
9017 bfd_put_16 (input_bfd
, 0x46c0, contents
+ rel
->r_offset
);
9020 bfd_put_16 (input_bfd
, insn
& 0xf83f, contents
+ rel
->r_offset
);
9022 else if ((insn
& 0xff87) == 0x4780) /* blx rx */
9026 bfd_put_16 (input_bfd
, 0x46c0, contents
+ rel
->r_offset
);
9029 bfd_put_16 (input_bfd
, 0x4600 | (insn
& 0x78),
9030 contents
+ rel
->r_offset
);
9034 if ((insn
& 0xf000) == 0xf000 || (insn
& 0xf800) == 0xe800)
9035 /* It's a 32 bit instruction, fetch the rest of it for
9036 error generation. */
9038 | bfd_get_16 (input_bfd
, contents
+ rel
->r_offset
+ 2);
9039 (*_bfd_error_handler
)
9040 (_("%B(%A+0x%lx):unexpected Thumb instruction '0x%x' in TLS trampoline"),
9041 input_bfd
, input_sec
, (unsigned long)rel
->r_offset
, insn
);
9042 return bfd_reloc_notsupported
;
9046 case R_ARM_TLS_DESCSEQ
:
9048 insn
= bfd_get_32 (input_bfd
, contents
+ rel
->r_offset
);
9049 if ((insn
& 0xffff0ff0) == 0xe08f0000) /* add rx,pc,ry */
9053 bfd_put_32 (input_bfd
, 0xe1a00000 | (insn
& 0xffff),
9054 contents
+ rel
->r_offset
);
9056 else if ((insn
& 0xfff00fff) == 0xe5900004) /* ldr rx,[ry,#4]*/
9060 bfd_put_32 (input_bfd
, 0xe1a00000, contents
+ rel
->r_offset
);
9063 bfd_put_32 (input_bfd
, insn
& 0xfffff000,
9064 contents
+ rel
->r_offset
);
9066 else if ((insn
& 0xfffffff0) == 0xe12fff30) /* blx rx */
9070 bfd_put_32 (input_bfd
, 0xe1a00000, contents
+ rel
->r_offset
);
9073 bfd_put_32 (input_bfd
, 0xe1a00000 | (insn
& 0xf),
9074 contents
+ rel
->r_offset
);
9078 (*_bfd_error_handler
)
9079 (_("%B(%A+0x%lx):unexpected ARM instruction '0x%x' in TLS trampoline"),
9080 input_bfd
, input_sec
, (unsigned long)rel
->r_offset
, insn
);
9081 return bfd_reloc_notsupported
;
9085 case R_ARM_TLS_CALL
:
9086 /* GD->IE relaxation, turn the instruction into 'nop' or
9087 'ldr r0, [pc,r0]' */
9088 insn
= is_local
? 0xe1a00000 : 0xe79f0000;
9089 bfd_put_32 (input_bfd
, insn
, contents
+ rel
->r_offset
);
9092 case R_ARM_THM_TLS_CALL
:
9093 /* GD->IE relaxation. */
9095 /* add r0,pc; ldr r0, [r0] */
9097 else if (using_thumb2 (globals
))
9104 bfd_put_16 (input_bfd
, insn
>> 16, contents
+ rel
->r_offset
);
9105 bfd_put_16 (input_bfd
, insn
& 0xffff, contents
+ rel
->r_offset
+ 2);
9108 return bfd_reloc_ok
;
9111 /* For a given value of n, calculate the value of G_n as required to
9112 deal with group relocations. We return it in the form of an
9113 encoded constant-and-rotation, together with the final residual. If n is
9114 specified as less than zero, then final_residual is filled with the
9115 input value and no further action is performed. */
9118 calculate_group_reloc_mask (bfd_vma value
, int n
, bfd_vma
*final_residual
)
9122 bfd_vma encoded_g_n
= 0;
9123 bfd_vma residual
= value
; /* Also known as Y_n. */
9125 for (current_n
= 0; current_n
<= n
; current_n
++)
9129 /* Calculate which part of the value to mask. */
9136 /* Determine the most significant bit in the residual and
9137 align the resulting value to a 2-bit boundary. */
9138 for (msb
= 30; msb
>= 0; msb
-= 2)
9139 if (residual
& (3 << msb
))
9142 /* The desired shift is now (msb - 6), or zero, whichever
9149 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
9150 g_n
= residual
& (0xff << shift
);
9151 encoded_g_n
= (g_n
>> shift
)
9152 | ((g_n
<= 0xff ? 0 : (32 - shift
) / 2) << 8);
9154 /* Calculate the residual for the next time around. */
9158 *final_residual
= residual
;
9163 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
9164 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
9167 identify_add_or_sub (bfd_vma insn
)
9169 int opcode
= insn
& 0x1e00000;
9171 if (opcode
== 1 << 23) /* ADD */
9174 if (opcode
== 1 << 22) /* SUB */
9180 /* Perform a relocation as part of a final link. */
9182 static bfd_reloc_status_type
9183 elf32_arm_final_link_relocate (reloc_howto_type
* howto
,
9186 asection
* input_section
,
9187 bfd_byte
* contents
,
9188 Elf_Internal_Rela
* rel
,
9190 struct bfd_link_info
* info
,
9192 const char * sym_name
,
9193 unsigned char st_type
,
9194 enum arm_st_branch_type branch_type
,
9195 struct elf_link_hash_entry
* h
,
9196 bfd_boolean
* unresolved_reloc_p
,
9197 char ** error_message
)
9199 unsigned long r_type
= howto
->type
;
9200 unsigned long r_symndx
;
9201 bfd_byte
* hit_data
= contents
+ rel
->r_offset
;
9202 bfd_vma
* local_got_offsets
;
9203 bfd_vma
* local_tlsdesc_gotents
;
9206 asection
* sreloc
= NULL
;
9209 bfd_signed_vma signed_addend
;
9210 unsigned char dynreloc_st_type
;
9211 bfd_vma dynreloc_value
;
9212 struct elf32_arm_link_hash_table
* globals
;
9213 struct elf32_arm_link_hash_entry
*eh
;
9214 union gotplt_union
*root_plt
;
9215 struct arm_plt_info
*arm_plt
;
9217 bfd_vma gotplt_offset
;
9218 bfd_boolean has_iplt_entry
;
9220 globals
= elf32_arm_hash_table (info
);
9221 if (globals
== NULL
)
9222 return bfd_reloc_notsupported
;
9224 BFD_ASSERT (is_arm_elf (input_bfd
));
9226 /* Some relocation types map to different relocations depending on the
9227 target. We pick the right one here. */
9228 r_type
= arm_real_reloc_type (globals
, r_type
);
9230 /* It is possible to have linker relaxations on some TLS access
9231 models. Update our information here. */
9232 r_type
= elf32_arm_tls_transition (info
, r_type
, h
);
9234 if (r_type
!= howto
->type
)
9235 howto
= elf32_arm_howto_from_type (r_type
);
9237 eh
= (struct elf32_arm_link_hash_entry
*) h
;
9238 sgot
= globals
->root
.sgot
;
9239 local_got_offsets
= elf_local_got_offsets (input_bfd
);
9240 local_tlsdesc_gotents
= elf32_arm_local_tlsdesc_gotent (input_bfd
);
9242 if (globals
->root
.dynamic_sections_created
)
9243 srelgot
= globals
->root
.srelgot
;
9247 r_symndx
= ELF32_R_SYM (rel
->r_info
);
9249 if (globals
->use_rel
)
9251 addend
= bfd_get_32 (input_bfd
, hit_data
) & howto
->src_mask
;
9253 if (addend
& ((howto
->src_mask
+ 1) >> 1))
9256 signed_addend
&= ~ howto
->src_mask
;
9257 signed_addend
|= addend
;
9260 signed_addend
= addend
;
9263 addend
= signed_addend
= rel
->r_addend
;
9265 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
9266 are resolving a function call relocation. */
9267 if (using_thumb_only (globals
)
9268 && (r_type
== R_ARM_THM_CALL
9269 || r_type
== R_ARM_THM_JUMP24
)
9270 && branch_type
== ST_BRANCH_TO_ARM
)
9271 branch_type
= ST_BRANCH_TO_THUMB
;
9273 /* Record the symbol information that should be used in dynamic
9275 dynreloc_st_type
= st_type
;
9276 dynreloc_value
= value
;
9277 if (branch_type
== ST_BRANCH_TO_THUMB
)
9278 dynreloc_value
|= 1;
9280 /* Find out whether the symbol has a PLT. Set ST_VALUE, BRANCH_TYPE and
9281 VALUE appropriately for relocations that we resolve at link time. */
9282 has_iplt_entry
= FALSE
;
9283 if (elf32_arm_get_plt_info (input_bfd
, eh
, r_symndx
, &root_plt
, &arm_plt
)
9284 && root_plt
->offset
!= (bfd_vma
) -1)
9286 plt_offset
= root_plt
->offset
;
9287 gotplt_offset
= arm_plt
->got_offset
;
9289 if (h
== NULL
|| eh
->is_iplt
)
9291 has_iplt_entry
= TRUE
;
9292 splt
= globals
->root
.iplt
;
9294 /* Populate .iplt entries here, because not all of them will
9295 be seen by finish_dynamic_symbol. The lower bit is set if
9296 we have already populated the entry. */
9301 if (elf32_arm_populate_plt_entry (output_bfd
, info
, root_plt
, arm_plt
,
9302 -1, dynreloc_value
))
9303 root_plt
->offset
|= 1;
9305 return bfd_reloc_notsupported
;
9308 /* Static relocations always resolve to the .iplt entry. */
9310 value
= (splt
->output_section
->vma
9311 + splt
->output_offset
9313 branch_type
= ST_BRANCH_TO_ARM
;
9315 /* If there are non-call relocations that resolve to the .iplt
9316 entry, then all dynamic ones must too. */
9317 if (arm_plt
->noncall_refcount
!= 0)
9319 dynreloc_st_type
= st_type
;
9320 dynreloc_value
= value
;
9324 /* We populate the .plt entry in finish_dynamic_symbol. */
9325 splt
= globals
->root
.splt
;
9330 plt_offset
= (bfd_vma
) -1;
9331 gotplt_offset
= (bfd_vma
) -1;
9337 /* We don't need to find a value for this symbol. It's just a
9339 *unresolved_reloc_p
= FALSE
;
9340 return bfd_reloc_ok
;
9343 if (!globals
->vxworks_p
)
9344 return elf32_arm_abs12_reloc (input_bfd
, hit_data
, value
+ addend
);
9348 case R_ARM_ABS32_NOI
:
9350 case R_ARM_REL32_NOI
:
9356 /* Handle relocations which should use the PLT entry. ABS32/REL32
9357 will use the symbol's value, which may point to a PLT entry, but we
9358 don't need to handle that here. If we created a PLT entry, all
9359 branches in this object should go to it, except if the PLT is too
9360 far away, in which case a long branch stub should be inserted. */
9361 if ((r_type
!= R_ARM_ABS32
&& r_type
!= R_ARM_REL32
9362 && r_type
!= R_ARM_ABS32_NOI
&& r_type
!= R_ARM_REL32_NOI
9363 && r_type
!= R_ARM_CALL
9364 && r_type
!= R_ARM_JUMP24
9365 && r_type
!= R_ARM_PLT32
)
9366 && plt_offset
!= (bfd_vma
) -1)
9368 /* If we've created a .plt section, and assigned a PLT entry
9369 to this function, it must either be a STT_GNU_IFUNC reference
9370 or not be known to bind locally. In other cases, we should
9371 have cleared the PLT entry by now. */
9372 BFD_ASSERT (has_iplt_entry
|| !SYMBOL_CALLS_LOCAL (info
, h
));
9374 value
= (splt
->output_section
->vma
9375 + splt
->output_offset
9377 *unresolved_reloc_p
= FALSE
;
9378 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
9379 contents
, rel
->r_offset
, value
,
9383 /* When generating a shared object or relocatable executable, these
9384 relocations are copied into the output file to be resolved at
9386 if ((bfd_link_pic (info
)
9387 || globals
->root
.is_relocatable_executable
)
9388 && (input_section
->flags
& SEC_ALLOC
)
9389 && !(globals
->vxworks_p
9390 && strcmp (input_section
->output_section
->name
,
9392 && ((r_type
!= R_ARM_REL32
&& r_type
!= R_ARM_REL32_NOI
)
9393 || !SYMBOL_CALLS_LOCAL (info
, h
))
9394 && !(input_bfd
== globals
->stub_bfd
9395 && strstr (input_section
->name
, STUB_SUFFIX
))
9397 || ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
9398 || h
->root
.type
!= bfd_link_hash_undefweak
)
9399 && r_type
!= R_ARM_PC24
9400 && r_type
!= R_ARM_CALL
9401 && r_type
!= R_ARM_JUMP24
9402 && r_type
!= R_ARM_PREL31
9403 && r_type
!= R_ARM_PLT32
)
9405 Elf_Internal_Rela outrel
;
9406 bfd_boolean skip
, relocate
;
9408 if ((r_type
== R_ARM_REL32
|| r_type
== R_ARM_REL32_NOI
)
9411 char *v
= _("shared object");
9413 if (bfd_link_executable (info
))
9414 v
= _("PIE executable");
9416 (*_bfd_error_handler
)
9417 (_("%B: relocation %s against external or undefined symbol `%s'"
9418 " can not be used when making a %s; recompile with -fPIC"), input_bfd
,
9419 elf32_arm_howto_table_1
[r_type
].name
, h
->root
.root
.string
, v
);
9420 return bfd_reloc_notsupported
;
9423 *unresolved_reloc_p
= FALSE
;
9425 if (sreloc
== NULL
&& globals
->root
.dynamic_sections_created
)
9427 sreloc
= _bfd_elf_get_dynamic_reloc_section (input_bfd
, input_section
,
9428 ! globals
->use_rel
);
9431 return bfd_reloc_notsupported
;
9437 outrel
.r_addend
= addend
;
9439 _bfd_elf_section_offset (output_bfd
, info
, input_section
,
9441 if (outrel
.r_offset
== (bfd_vma
) -1)
9443 else if (outrel
.r_offset
== (bfd_vma
) -2)
9444 skip
= TRUE
, relocate
= TRUE
;
9445 outrel
.r_offset
+= (input_section
->output_section
->vma
9446 + input_section
->output_offset
);
9449 memset (&outrel
, 0, sizeof outrel
);
9452 && (!bfd_link_pic (info
)
9453 || !SYMBOLIC_BIND (info
, h
)
9454 || !h
->def_regular
))
9455 outrel
.r_info
= ELF32_R_INFO (h
->dynindx
, r_type
);
9460 /* This symbol is local, or marked to become local. */
9461 BFD_ASSERT (r_type
== R_ARM_ABS32
|| r_type
== R_ARM_ABS32_NOI
);
9462 if (globals
->symbian_p
)
9466 /* On Symbian OS, the data segment and text segement
9467 can be relocated independently. Therefore, we
9468 must indicate the segment to which this
9469 relocation is relative. The BPABI allows us to
9470 use any symbol in the right segment; we just use
9471 the section symbol as it is convenient. (We
9472 cannot use the symbol given by "h" directly as it
9473 will not appear in the dynamic symbol table.)
9475 Note that the dynamic linker ignores the section
9476 symbol value, so we don't subtract osec->vma
9477 from the emitted reloc addend. */
9479 osec
= sym_sec
->output_section
;
9481 osec
= input_section
->output_section
;
9482 symbol
= elf_section_data (osec
)->dynindx
;
9485 struct elf_link_hash_table
*htab
= elf_hash_table (info
);
9487 if ((osec
->flags
& SEC_READONLY
) == 0
9488 && htab
->data_index_section
!= NULL
)
9489 osec
= htab
->data_index_section
;
9491 osec
= htab
->text_index_section
;
9492 symbol
= elf_section_data (osec
)->dynindx
;
9494 BFD_ASSERT (symbol
!= 0);
9497 /* On SVR4-ish systems, the dynamic loader cannot
9498 relocate the text and data segments independently,
9499 so the symbol does not matter. */
9501 if (dynreloc_st_type
== STT_GNU_IFUNC
)
9502 /* We have an STT_GNU_IFUNC symbol that doesn't resolve
9503 to the .iplt entry. Instead, every non-call reference
9504 must use an R_ARM_IRELATIVE relocation to obtain the
9505 correct run-time address. */
9506 outrel
.r_info
= ELF32_R_INFO (symbol
, R_ARM_IRELATIVE
);
9508 outrel
.r_info
= ELF32_R_INFO (symbol
, R_ARM_RELATIVE
);
9509 if (globals
->use_rel
)
9512 outrel
.r_addend
+= dynreloc_value
;
9515 elf32_arm_add_dynreloc (output_bfd
, info
, sreloc
, &outrel
);
9517 /* If this reloc is against an external symbol, we do not want to
9518 fiddle with the addend. Otherwise, we need to include the symbol
9519 value so that it becomes an addend for the dynamic reloc. */
9521 return bfd_reloc_ok
;
9523 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
9524 contents
, rel
->r_offset
,
9525 dynreloc_value
, (bfd_vma
) 0);
9527 else switch (r_type
)
9530 return elf32_arm_abs12_reloc (input_bfd
, hit_data
, value
+ addend
);
9532 case R_ARM_XPC25
: /* Arm BLX instruction. */
9535 case R_ARM_PC24
: /* Arm B/BL instruction. */
9538 struct elf32_arm_stub_hash_entry
*stub_entry
= NULL
;
9540 if (r_type
== R_ARM_XPC25
)
9542 /* Check for Arm calling Arm function. */
9543 /* FIXME: Should we translate the instruction into a BL
9544 instruction instead ? */
9545 if (branch_type
!= ST_BRANCH_TO_THUMB
)
9546 (*_bfd_error_handler
)
9547 (_("\%B: Warning: Arm BLX instruction targets Arm function '%s'."),
9549 h
? h
->root
.root
.string
: "(local)");
9551 else if (r_type
== R_ARM_PC24
)
9553 /* Check for Arm calling Thumb function. */
9554 if (branch_type
== ST_BRANCH_TO_THUMB
)
9556 if (elf32_arm_to_thumb_stub (info
, sym_name
, input_bfd
,
9557 output_bfd
, input_section
,
9558 hit_data
, sym_sec
, rel
->r_offset
,
9559 signed_addend
, value
,
9561 return bfd_reloc_ok
;
9563 return bfd_reloc_dangerous
;
9567 /* Check if a stub has to be inserted because the
9568 destination is too far or we are changing mode. */
9569 if ( r_type
== R_ARM_CALL
9570 || r_type
== R_ARM_JUMP24
9571 || r_type
== R_ARM_PLT32
)
9573 enum elf32_arm_stub_type stub_type
= arm_stub_none
;
9574 struct elf32_arm_link_hash_entry
*hash
;
9576 hash
= (struct elf32_arm_link_hash_entry
*) h
;
9577 stub_type
= arm_type_of_stub (info
, input_section
, rel
,
9578 st_type
, &branch_type
,
9579 hash
, value
, sym_sec
,
9580 input_bfd
, sym_name
);
9582 if (stub_type
!= arm_stub_none
)
9584 /* The target is out of reach, so redirect the
9585 branch to the local stub for this function. */
9586 stub_entry
= elf32_arm_get_stub_entry (input_section
,
9591 if (stub_entry
!= NULL
)
9592 value
= (stub_entry
->stub_offset
9593 + stub_entry
->stub_sec
->output_offset
9594 + stub_entry
->stub_sec
->output_section
->vma
);
9596 if (plt_offset
!= (bfd_vma
) -1)
9597 *unresolved_reloc_p
= FALSE
;
9602 /* If the call goes through a PLT entry, make sure to
9603 check distance to the right destination address. */
9604 if (plt_offset
!= (bfd_vma
) -1)
9606 value
= (splt
->output_section
->vma
9607 + splt
->output_offset
9609 *unresolved_reloc_p
= FALSE
;
9610 /* The PLT entry is in ARM mode, regardless of the
9612 branch_type
= ST_BRANCH_TO_ARM
;
9617 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
9619 S is the address of the symbol in the relocation.
9620 P is address of the instruction being relocated.
9621 A is the addend (extracted from the instruction) in bytes.
9623 S is held in 'value'.
9624 P is the base address of the section containing the
9625 instruction plus the offset of the reloc into that
9627 (input_section->output_section->vma +
9628 input_section->output_offset +
9630 A is the addend, converted into bytes, ie:
9633 Note: None of these operations have knowledge of the pipeline
9634 size of the processor, thus it is up to the assembler to
9635 encode this information into the addend. */
9636 value
-= (input_section
->output_section
->vma
9637 + input_section
->output_offset
);
9638 value
-= rel
->r_offset
;
9639 if (globals
->use_rel
)
9640 value
+= (signed_addend
<< howto
->size
);
9642 /* RELA addends do not have to be adjusted by howto->size. */
9643 value
+= signed_addend
;
9645 signed_addend
= value
;
9646 signed_addend
>>= howto
->rightshift
;
9648 /* A branch to an undefined weak symbol is turned into a jump to
9649 the next instruction unless a PLT entry will be created.
9650 Do the same for local undefined symbols (but not for STN_UNDEF).
9651 The jump to the next instruction is optimized as a NOP depending
9652 on the architecture. */
9653 if (h
? (h
->root
.type
== bfd_link_hash_undefweak
9654 && plt_offset
== (bfd_vma
) -1)
9655 : r_symndx
!= STN_UNDEF
&& bfd_is_und_section (sym_sec
))
9657 value
= (bfd_get_32 (input_bfd
, hit_data
) & 0xf0000000);
9659 if (arch_has_arm_nop (globals
))
9660 value
|= 0x0320f000;
9662 value
|= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */
9666 /* Perform a signed range check. */
9667 if ( signed_addend
> ((bfd_signed_vma
) (howto
->dst_mask
>> 1))
9668 || signed_addend
< - ((bfd_signed_vma
) ((howto
->dst_mask
+ 1) >> 1)))
9669 return bfd_reloc_overflow
;
9671 addend
= (value
& 2);
9673 value
= (signed_addend
& howto
->dst_mask
)
9674 | (bfd_get_32 (input_bfd
, hit_data
) & (~ howto
->dst_mask
));
9676 if (r_type
== R_ARM_CALL
)
9678 /* Set the H bit in the BLX instruction. */
9679 if (branch_type
== ST_BRANCH_TO_THUMB
)
9684 value
&= ~(bfd_vma
)(1 << 24);
9687 /* Select the correct instruction (BL or BLX). */
9688 /* Only if we are not handling a BL to a stub. In this
9689 case, mode switching is performed by the stub. */
9690 if (branch_type
== ST_BRANCH_TO_THUMB
&& !stub_entry
)
9692 else if (stub_entry
|| branch_type
!= ST_BRANCH_UNKNOWN
)
9694 value
&= ~(bfd_vma
)(1 << 28);
9704 if (branch_type
== ST_BRANCH_TO_THUMB
)
9708 case R_ARM_ABS32_NOI
:
9714 if (branch_type
== ST_BRANCH_TO_THUMB
)
9716 value
-= (input_section
->output_section
->vma
9717 + input_section
->output_offset
+ rel
->r_offset
);
9720 case R_ARM_REL32_NOI
:
9722 value
-= (input_section
->output_section
->vma
9723 + input_section
->output_offset
+ rel
->r_offset
);
9727 value
-= (input_section
->output_section
->vma
9728 + input_section
->output_offset
+ rel
->r_offset
);
9729 value
+= signed_addend
;
9730 if (! h
|| h
->root
.type
!= bfd_link_hash_undefweak
)
9732 /* Check for overflow. */
9733 if ((value
^ (value
>> 1)) & (1 << 30))
9734 return bfd_reloc_overflow
;
9736 value
&= 0x7fffffff;
9737 value
|= (bfd_get_32 (input_bfd
, hit_data
) & 0x80000000);
9738 if (branch_type
== ST_BRANCH_TO_THUMB
)
9743 bfd_put_32 (input_bfd
, value
, hit_data
);
9744 return bfd_reloc_ok
;
9747 /* PR 16202: Refectch the addend using the correct size. */
9748 if (globals
->use_rel
)
9749 addend
= bfd_get_8 (input_bfd
, hit_data
);
9752 /* There is no way to tell whether the user intended to use a signed or
9753 unsigned addend. When checking for overflow we accept either,
9754 as specified by the AAELF. */
9755 if ((long) value
> 0xff || (long) value
< -0x80)
9756 return bfd_reloc_overflow
;
9758 bfd_put_8 (input_bfd
, value
, hit_data
);
9759 return bfd_reloc_ok
;
9762 /* PR 16202: Refectch the addend using the correct size. */
9763 if (globals
->use_rel
)
9764 addend
= bfd_get_16 (input_bfd
, hit_data
);
9767 /* See comment for R_ARM_ABS8. */
9768 if ((long) value
> 0xffff || (long) value
< -0x8000)
9769 return bfd_reloc_overflow
;
9771 bfd_put_16 (input_bfd
, value
, hit_data
);
9772 return bfd_reloc_ok
;
9774 case R_ARM_THM_ABS5
:
9775 /* Support ldr and str instructions for the thumb. */
9776 if (globals
->use_rel
)
9778 /* Need to refetch addend. */
9779 addend
= bfd_get_16 (input_bfd
, hit_data
) & howto
->src_mask
;
9780 /* ??? Need to determine shift amount from operand size. */
9781 addend
>>= howto
->rightshift
;
9785 /* ??? Isn't value unsigned? */
9786 if ((long) value
> 0x1f || (long) value
< -0x10)
9787 return bfd_reloc_overflow
;
9789 /* ??? Value needs to be properly shifted into place first. */
9790 value
|= bfd_get_16 (input_bfd
, hit_data
) & 0xf83f;
9791 bfd_put_16 (input_bfd
, value
, hit_data
);
9792 return bfd_reloc_ok
;
9794 case R_ARM_THM_ALU_PREL_11_0
:
9795 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
9798 bfd_signed_vma relocation
;
9800 insn
= (bfd_get_16 (input_bfd
, hit_data
) << 16)
9801 | bfd_get_16 (input_bfd
, hit_data
+ 2);
9803 if (globals
->use_rel
)
9805 signed_addend
= (insn
& 0xff) | ((insn
& 0x7000) >> 4)
9806 | ((insn
& (1 << 26)) >> 15);
9807 if (insn
& 0xf00000)
9808 signed_addend
= -signed_addend
;
9811 relocation
= value
+ signed_addend
;
9812 relocation
-= Pa (input_section
->output_section
->vma
9813 + input_section
->output_offset
9818 if (value
>= 0x1000)
9819 return bfd_reloc_overflow
;
9821 insn
= (insn
& 0xfb0f8f00) | (value
& 0xff)
9822 | ((value
& 0x700) << 4)
9823 | ((value
& 0x800) << 15);
9827 bfd_put_16 (input_bfd
, insn
>> 16, hit_data
);
9828 bfd_put_16 (input_bfd
, insn
& 0xffff, hit_data
+ 2);
9830 return bfd_reloc_ok
;
9834 /* PR 10073: This reloc is not generated by the GNU toolchain,
9835 but it is supported for compatibility with third party libraries
9836 generated by other compilers, specifically the ARM/IAR. */
9839 bfd_signed_vma relocation
;
9841 insn
= bfd_get_16 (input_bfd
, hit_data
);
9843 if (globals
->use_rel
)
9844 addend
= ((((insn
& 0x00ff) << 2) + 4) & 0x3ff) -4;
9846 relocation
= value
+ addend
;
9847 relocation
-= Pa (input_section
->output_section
->vma
9848 + input_section
->output_offset
9853 /* We do not check for overflow of this reloc. Although strictly
9854 speaking this is incorrect, it appears to be necessary in order
9855 to work with IAR generated relocs. Since GCC and GAS do not
9856 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
9857 a problem for them. */
9860 insn
= (insn
& 0xff00) | (value
>> 2);
9862 bfd_put_16 (input_bfd
, insn
, hit_data
);
9864 return bfd_reloc_ok
;
9867 case R_ARM_THM_PC12
:
9868 /* Corresponds to: ldr.w reg, [pc, #offset]. */
9871 bfd_signed_vma relocation
;
9873 insn
= (bfd_get_16 (input_bfd
, hit_data
) << 16)
9874 | bfd_get_16 (input_bfd
, hit_data
+ 2);
9876 if (globals
->use_rel
)
9878 signed_addend
= insn
& 0xfff;
9879 if (!(insn
& (1 << 23)))
9880 signed_addend
= -signed_addend
;
9883 relocation
= value
+ signed_addend
;
9884 relocation
-= Pa (input_section
->output_section
->vma
9885 + input_section
->output_offset
9890 if (value
>= 0x1000)
9891 return bfd_reloc_overflow
;
9893 insn
= (insn
& 0xff7ff000) | value
;
9894 if (relocation
>= 0)
9897 bfd_put_16 (input_bfd
, insn
>> 16, hit_data
);
9898 bfd_put_16 (input_bfd
, insn
& 0xffff, hit_data
+ 2);
9900 return bfd_reloc_ok
;
9903 case R_ARM_THM_XPC22
:
9904 case R_ARM_THM_CALL
:
9905 case R_ARM_THM_JUMP24
:
9906 /* Thumb BL (branch long instruction). */
9910 bfd_boolean overflow
= FALSE
;
9911 bfd_vma upper_insn
= bfd_get_16 (input_bfd
, hit_data
);
9912 bfd_vma lower_insn
= bfd_get_16 (input_bfd
, hit_data
+ 2);
9913 bfd_signed_vma reloc_signed_max
;
9914 bfd_signed_vma reloc_signed_min
;
9916 bfd_signed_vma signed_check
;
9918 const int thumb2
= using_thumb2 (globals
);
9919 const int thumb2_bl
= using_thumb2_bl (globals
);
9921 /* A branch to an undefined weak symbol is turned into a jump to
9922 the next instruction unless a PLT entry will be created.
9923 The jump to the next instruction is optimized as a NOP.W for
9924 Thumb-2 enabled architectures. */
9925 if (h
&& h
->root
.type
== bfd_link_hash_undefweak
9926 && plt_offset
== (bfd_vma
) -1)
9930 bfd_put_16 (input_bfd
, 0xf3af, hit_data
);
9931 bfd_put_16 (input_bfd
, 0x8000, hit_data
+ 2);
9935 bfd_put_16 (input_bfd
, 0xe000, hit_data
);
9936 bfd_put_16 (input_bfd
, 0xbf00, hit_data
+ 2);
9938 return bfd_reloc_ok
;
9941 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
9942 with Thumb-1) involving the J1 and J2 bits. */
9943 if (globals
->use_rel
)
9945 bfd_vma s
= (upper_insn
& (1 << 10)) >> 10;
9946 bfd_vma upper
= upper_insn
& 0x3ff;
9947 bfd_vma lower
= lower_insn
& 0x7ff;
9948 bfd_vma j1
= (lower_insn
& (1 << 13)) >> 13;
9949 bfd_vma j2
= (lower_insn
& (1 << 11)) >> 11;
9950 bfd_vma i1
= j1
^ s
? 0 : 1;
9951 bfd_vma i2
= j2
^ s
? 0 : 1;
9953 addend
= (i1
<< 23) | (i2
<< 22) | (upper
<< 12) | (lower
<< 1);
9955 addend
= (addend
| ((s
? 0 : 1) << 24)) - (1 << 24);
9957 signed_addend
= addend
;
9960 if (r_type
== R_ARM_THM_XPC22
)
9962 /* Check for Thumb to Thumb call. */
9963 /* FIXME: Should we translate the instruction into a BL
9964 instruction instead ? */
9965 if (branch_type
== ST_BRANCH_TO_THUMB
)
9966 (*_bfd_error_handler
)
9967 (_("%B: Warning: Thumb BLX instruction targets thumb function '%s'."),
9969 h
? h
->root
.root
.string
: "(local)");
9973 /* If it is not a call to Thumb, assume call to Arm.
9974 If it is a call relative to a section name, then it is not a
9975 function call at all, but rather a long jump. Calls through
9976 the PLT do not require stubs. */
9977 if (branch_type
== ST_BRANCH_TO_ARM
&& plt_offset
== (bfd_vma
) -1)
9979 if (globals
->use_blx
&& r_type
== R_ARM_THM_CALL
)
9981 /* Convert BL to BLX. */
9982 lower_insn
= (lower_insn
& ~0x1000) | 0x0800;
9984 else if (( r_type
!= R_ARM_THM_CALL
)
9985 && (r_type
!= R_ARM_THM_JUMP24
))
9987 if (elf32_thumb_to_arm_stub
9988 (info
, sym_name
, input_bfd
, output_bfd
, input_section
,
9989 hit_data
, sym_sec
, rel
->r_offset
, signed_addend
, value
,
9991 return bfd_reloc_ok
;
9993 return bfd_reloc_dangerous
;
9996 else if (branch_type
== ST_BRANCH_TO_THUMB
9998 && r_type
== R_ARM_THM_CALL
)
10000 /* Make sure this is a BL. */
10001 lower_insn
|= 0x1800;
10005 enum elf32_arm_stub_type stub_type
= arm_stub_none
;
10006 if (r_type
== R_ARM_THM_CALL
|| r_type
== R_ARM_THM_JUMP24
)
10008 /* Check if a stub has to be inserted because the destination
10010 struct elf32_arm_stub_hash_entry
*stub_entry
;
10011 struct elf32_arm_link_hash_entry
*hash
;
10013 hash
= (struct elf32_arm_link_hash_entry
*) h
;
10015 stub_type
= arm_type_of_stub (info
, input_section
, rel
,
10016 st_type
, &branch_type
,
10017 hash
, value
, sym_sec
,
10018 input_bfd
, sym_name
);
10020 if (stub_type
!= arm_stub_none
)
10022 /* The target is out of reach or we are changing modes, so
10023 redirect the branch to the local stub for this
10025 stub_entry
= elf32_arm_get_stub_entry (input_section
,
10029 if (stub_entry
!= NULL
)
10031 value
= (stub_entry
->stub_offset
10032 + stub_entry
->stub_sec
->output_offset
10033 + stub_entry
->stub_sec
->output_section
->vma
);
10035 if (plt_offset
!= (bfd_vma
) -1)
10036 *unresolved_reloc_p
= FALSE
;
10039 /* If this call becomes a call to Arm, force BLX. */
10040 if (globals
->use_blx
&& (r_type
== R_ARM_THM_CALL
))
10043 && !arm_stub_is_thumb (stub_entry
->stub_type
))
10044 || branch_type
!= ST_BRANCH_TO_THUMB
)
10045 lower_insn
= (lower_insn
& ~0x1000) | 0x0800;
10050 /* Handle calls via the PLT. */
10051 if (stub_type
== arm_stub_none
&& plt_offset
!= (bfd_vma
) -1)
10053 value
= (splt
->output_section
->vma
10054 + splt
->output_offset
10057 if (globals
->use_blx
10058 && r_type
== R_ARM_THM_CALL
10059 && ! using_thumb_only (globals
))
10061 /* If the Thumb BLX instruction is available, convert
10062 the BL to a BLX instruction to call the ARM-mode
10064 lower_insn
= (lower_insn
& ~0x1000) | 0x0800;
10065 branch_type
= ST_BRANCH_TO_ARM
;
10069 if (! using_thumb_only (globals
))
10070 /* Target the Thumb stub before the ARM PLT entry. */
10071 value
-= PLT_THUMB_STUB_SIZE
;
10072 branch_type
= ST_BRANCH_TO_THUMB
;
10074 *unresolved_reloc_p
= FALSE
;
10077 relocation
= value
+ signed_addend
;
10079 relocation
-= (input_section
->output_section
->vma
10080 + input_section
->output_offset
10083 check
= relocation
>> howto
->rightshift
;
10085 /* If this is a signed value, the rightshift just dropped
10086 leading 1 bits (assuming twos complement). */
10087 if ((bfd_signed_vma
) relocation
>= 0)
10088 signed_check
= check
;
10090 signed_check
= check
| ~((bfd_vma
) -1 >> howto
->rightshift
);
10092 /* Calculate the permissable maximum and minimum values for
10093 this relocation according to whether we're relocating for
10095 bitsize
= howto
->bitsize
;
10098 reloc_signed_max
= (1 << (bitsize
- 1)) - 1;
10099 reloc_signed_min
= ~reloc_signed_max
;
10101 /* Assumes two's complement. */
10102 if (signed_check
> reloc_signed_max
|| signed_check
< reloc_signed_min
)
10105 if ((lower_insn
& 0x5000) == 0x4000)
10106 /* For a BLX instruction, make sure that the relocation is rounded up
10107 to a word boundary. This follows the semantics of the instruction
10108 which specifies that bit 1 of the target address will come from bit
10109 1 of the base address. */
10110 relocation
= (relocation
+ 2) & ~ 3;
10112 /* Put RELOCATION back into the insn. Assumes two's complement.
10113 We use the Thumb-2 encoding, which is safe even if dealing with
10114 a Thumb-1 instruction by virtue of our overflow check above. */
10115 reloc_sign
= (signed_check
< 0) ? 1 : 0;
10116 upper_insn
= (upper_insn
& ~(bfd_vma
) 0x7ff)
10117 | ((relocation
>> 12) & 0x3ff)
10118 | (reloc_sign
<< 10);
10119 lower_insn
= (lower_insn
& ~(bfd_vma
) 0x2fff)
10120 | (((!((relocation
>> 23) & 1)) ^ reloc_sign
) << 13)
10121 | (((!((relocation
>> 22) & 1)) ^ reloc_sign
) << 11)
10122 | ((relocation
>> 1) & 0x7ff);
10124 /* Put the relocated value back in the object file: */
10125 bfd_put_16 (input_bfd
, upper_insn
, hit_data
);
10126 bfd_put_16 (input_bfd
, lower_insn
, hit_data
+ 2);
10128 return (overflow
? bfd_reloc_overflow
: bfd_reloc_ok
);
10132 case R_ARM_THM_JUMP19
:
10133 /* Thumb32 conditional branch instruction. */
10135 bfd_vma relocation
;
10136 bfd_boolean overflow
= FALSE
;
10137 bfd_vma upper_insn
= bfd_get_16 (input_bfd
, hit_data
);
10138 bfd_vma lower_insn
= bfd_get_16 (input_bfd
, hit_data
+ 2);
10139 bfd_signed_vma reloc_signed_max
= 0xffffe;
10140 bfd_signed_vma reloc_signed_min
= -0x100000;
10141 bfd_signed_vma signed_check
;
10142 enum elf32_arm_stub_type stub_type
= arm_stub_none
;
10143 struct elf32_arm_stub_hash_entry
*stub_entry
;
10144 struct elf32_arm_link_hash_entry
*hash
;
10146 /* Need to refetch the addend, reconstruct the top three bits,
10147 and squish the two 11 bit pieces together. */
10148 if (globals
->use_rel
)
10150 bfd_vma S
= (upper_insn
& 0x0400) >> 10;
10151 bfd_vma upper
= (upper_insn
& 0x003f);
10152 bfd_vma J1
= (lower_insn
& 0x2000) >> 13;
10153 bfd_vma J2
= (lower_insn
& 0x0800) >> 11;
10154 bfd_vma lower
= (lower_insn
& 0x07ff);
10158 upper
|= (!S
) << 8;
10159 upper
-= 0x0100; /* Sign extend. */
10161 addend
= (upper
<< 12) | (lower
<< 1);
10162 signed_addend
= addend
;
10165 /* Handle calls via the PLT. */
10166 if (plt_offset
!= (bfd_vma
) -1)
10168 value
= (splt
->output_section
->vma
10169 + splt
->output_offset
10171 /* Target the Thumb stub before the ARM PLT entry. */
10172 value
-= PLT_THUMB_STUB_SIZE
;
10173 *unresolved_reloc_p
= FALSE
;
10176 hash
= (struct elf32_arm_link_hash_entry
*)h
;
10178 stub_type
= arm_type_of_stub (info
, input_section
, rel
,
10179 st_type
, &branch_type
,
10180 hash
, value
, sym_sec
,
10181 input_bfd
, sym_name
);
10182 if (stub_type
!= arm_stub_none
)
10184 stub_entry
= elf32_arm_get_stub_entry (input_section
,
10188 if (stub_entry
!= NULL
)
10190 value
= (stub_entry
->stub_offset
10191 + stub_entry
->stub_sec
->output_offset
10192 + stub_entry
->stub_sec
->output_section
->vma
);
10196 relocation
= value
+ signed_addend
;
10197 relocation
-= (input_section
->output_section
->vma
10198 + input_section
->output_offset
10200 signed_check
= (bfd_signed_vma
) relocation
;
10202 if (signed_check
> reloc_signed_max
|| signed_check
< reloc_signed_min
)
10205 /* Put RELOCATION back into the insn. */
10207 bfd_vma S
= (relocation
& 0x00100000) >> 20;
10208 bfd_vma J2
= (relocation
& 0x00080000) >> 19;
10209 bfd_vma J1
= (relocation
& 0x00040000) >> 18;
10210 bfd_vma hi
= (relocation
& 0x0003f000) >> 12;
10211 bfd_vma lo
= (relocation
& 0x00000ffe) >> 1;
10213 upper_insn
= (upper_insn
& 0xfbc0) | (S
<< 10) | hi
;
10214 lower_insn
= (lower_insn
& 0xd000) | (J1
<< 13) | (J2
<< 11) | lo
;
10217 /* Put the relocated value back in the object file: */
10218 bfd_put_16 (input_bfd
, upper_insn
, hit_data
);
10219 bfd_put_16 (input_bfd
, lower_insn
, hit_data
+ 2);
10221 return (overflow
? bfd_reloc_overflow
: bfd_reloc_ok
);
10224 case R_ARM_THM_JUMP11
:
10225 case R_ARM_THM_JUMP8
:
10226 case R_ARM_THM_JUMP6
:
10227 /* Thumb B (branch) instruction). */
10229 bfd_signed_vma relocation
;
10230 bfd_signed_vma reloc_signed_max
= (1 << (howto
->bitsize
- 1)) - 1;
10231 bfd_signed_vma reloc_signed_min
= ~ reloc_signed_max
;
10232 bfd_signed_vma signed_check
;
10234 /* CZB cannot jump backward. */
10235 if (r_type
== R_ARM_THM_JUMP6
)
10236 reloc_signed_min
= 0;
10238 if (globals
->use_rel
)
10240 /* Need to refetch addend. */
10241 addend
= bfd_get_16 (input_bfd
, hit_data
) & howto
->src_mask
;
10242 if (addend
& ((howto
->src_mask
+ 1) >> 1))
10244 signed_addend
= -1;
10245 signed_addend
&= ~ howto
->src_mask
;
10246 signed_addend
|= addend
;
10249 signed_addend
= addend
;
10250 /* The value in the insn has been right shifted. We need to
10251 undo this, so that we can perform the address calculation
10252 in terms of bytes. */
10253 signed_addend
<<= howto
->rightshift
;
10255 relocation
= value
+ signed_addend
;
10257 relocation
-= (input_section
->output_section
->vma
10258 + input_section
->output_offset
10261 relocation
>>= howto
->rightshift
;
10262 signed_check
= relocation
;
10264 if (r_type
== R_ARM_THM_JUMP6
)
10265 relocation
= ((relocation
& 0x0020) << 4) | ((relocation
& 0x001f) << 3);
10267 relocation
&= howto
->dst_mask
;
10268 relocation
|= (bfd_get_16 (input_bfd
, hit_data
) & (~ howto
->dst_mask
));
10270 bfd_put_16 (input_bfd
, relocation
, hit_data
);
10272 /* Assumes two's complement. */
10273 if (signed_check
> reloc_signed_max
|| signed_check
< reloc_signed_min
)
10274 return bfd_reloc_overflow
;
10276 return bfd_reloc_ok
;
10279 case R_ARM_ALU_PCREL7_0
:
10280 case R_ARM_ALU_PCREL15_8
:
10281 case R_ARM_ALU_PCREL23_15
:
10284 bfd_vma relocation
;
10286 insn
= bfd_get_32 (input_bfd
, hit_data
);
10287 if (globals
->use_rel
)
10289 /* Extract the addend. */
10290 addend
= (insn
& 0xff) << ((insn
& 0xf00) >> 7);
10291 signed_addend
= addend
;
10293 relocation
= value
+ signed_addend
;
10295 relocation
-= (input_section
->output_section
->vma
10296 + input_section
->output_offset
10298 insn
= (insn
& ~0xfff)
10299 | ((howto
->bitpos
<< 7) & 0xf00)
10300 | ((relocation
>> howto
->bitpos
) & 0xff);
10301 bfd_put_32 (input_bfd
, value
, hit_data
);
10303 return bfd_reloc_ok
;
10305 case R_ARM_GNU_VTINHERIT
:
10306 case R_ARM_GNU_VTENTRY
:
10307 return bfd_reloc_ok
;
10309 case R_ARM_GOTOFF32
:
10310 /* Relocation is relative to the start of the
10311 global offset table. */
10313 BFD_ASSERT (sgot
!= NULL
);
10315 return bfd_reloc_notsupported
;
10317 /* If we are addressing a Thumb function, we need to adjust the
10318 address by one, so that attempts to call the function pointer will
10319 correctly interpret it as Thumb code. */
10320 if (branch_type
== ST_BRANCH_TO_THUMB
)
10323 /* Note that sgot->output_offset is not involved in this
10324 calculation. We always want the start of .got. If we
10325 define _GLOBAL_OFFSET_TABLE in a different way, as is
10326 permitted by the ABI, we might have to change this
10328 value
-= sgot
->output_section
->vma
;
10329 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
10330 contents
, rel
->r_offset
, value
,
10334 /* Use global offset table as symbol value. */
10335 BFD_ASSERT (sgot
!= NULL
);
10338 return bfd_reloc_notsupported
;
10340 *unresolved_reloc_p
= FALSE
;
10341 value
= sgot
->output_section
->vma
;
10342 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
10343 contents
, rel
->r_offset
, value
,
10347 case R_ARM_GOT_PREL
:
10348 /* Relocation is to the entry for this symbol in the
10349 global offset table. */
10351 return bfd_reloc_notsupported
;
10353 if (dynreloc_st_type
== STT_GNU_IFUNC
10354 && plt_offset
!= (bfd_vma
) -1
10355 && (h
== NULL
|| SYMBOL_REFERENCES_LOCAL (info
, h
)))
10357 /* We have a relocation against a locally-binding STT_GNU_IFUNC
10358 symbol, and the relocation resolves directly to the runtime
10359 target rather than to the .iplt entry. This means that any
10360 .got entry would be the same value as the .igot.plt entry,
10361 so there's no point creating both. */
10362 sgot
= globals
->root
.igotplt
;
10363 value
= sgot
->output_offset
+ gotplt_offset
;
10365 else if (h
!= NULL
)
10369 off
= h
->got
.offset
;
10370 BFD_ASSERT (off
!= (bfd_vma
) -1);
10371 if ((off
& 1) != 0)
10373 /* We have already processsed one GOT relocation against
10376 if (globals
->root
.dynamic_sections_created
10377 && !SYMBOL_REFERENCES_LOCAL (info
, h
))
10378 *unresolved_reloc_p
= FALSE
;
10382 Elf_Internal_Rela outrel
;
10384 if (h
->dynindx
!= -1 && !SYMBOL_REFERENCES_LOCAL (info
, h
))
10386 /* If the symbol doesn't resolve locally in a static
10387 object, we have an undefined reference. If the
10388 symbol doesn't resolve locally in a dynamic object,
10389 it should be resolved by the dynamic linker. */
10390 if (globals
->root
.dynamic_sections_created
)
10392 outrel
.r_info
= ELF32_R_INFO (h
->dynindx
, R_ARM_GLOB_DAT
);
10393 *unresolved_reloc_p
= FALSE
;
10397 outrel
.r_addend
= 0;
10401 if (dynreloc_st_type
== STT_GNU_IFUNC
)
10402 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_IRELATIVE
);
10403 else if (bfd_link_pic (info
) &&
10404 (ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
10405 || h
->root
.type
!= bfd_link_hash_undefweak
))
10406 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_RELATIVE
);
10409 outrel
.r_addend
= dynreloc_value
;
10412 /* The GOT entry is initialized to zero by default.
10413 See if we should install a different value. */
10414 if (outrel
.r_addend
!= 0
10415 && (outrel
.r_info
== 0 || globals
->use_rel
))
10417 bfd_put_32 (output_bfd
, outrel
.r_addend
,
10418 sgot
->contents
+ off
);
10419 outrel
.r_addend
= 0;
10422 if (outrel
.r_info
!= 0)
10424 outrel
.r_offset
= (sgot
->output_section
->vma
10425 + sgot
->output_offset
10427 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
10429 h
->got
.offset
|= 1;
10431 value
= sgot
->output_offset
+ off
;
10437 BFD_ASSERT (local_got_offsets
!= NULL
&&
10438 local_got_offsets
[r_symndx
] != (bfd_vma
) -1);
10440 off
= local_got_offsets
[r_symndx
];
10442 /* The offset must always be a multiple of 4. We use the
10443 least significant bit to record whether we have already
10444 generated the necessary reloc. */
10445 if ((off
& 1) != 0)
10449 if (globals
->use_rel
)
10450 bfd_put_32 (output_bfd
, dynreloc_value
, sgot
->contents
+ off
);
10452 if (bfd_link_pic (info
) || dynreloc_st_type
== STT_GNU_IFUNC
)
10454 Elf_Internal_Rela outrel
;
10456 outrel
.r_addend
= addend
+ dynreloc_value
;
10457 outrel
.r_offset
= (sgot
->output_section
->vma
10458 + sgot
->output_offset
10460 if (dynreloc_st_type
== STT_GNU_IFUNC
)
10461 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_IRELATIVE
);
10463 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_RELATIVE
);
10464 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
10467 local_got_offsets
[r_symndx
] |= 1;
10470 value
= sgot
->output_offset
+ off
;
10472 if (r_type
!= R_ARM_GOT32
)
10473 value
+= sgot
->output_section
->vma
;
10475 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
10476 contents
, rel
->r_offset
, value
,
10479 case R_ARM_TLS_LDO32
:
10480 value
= value
- dtpoff_base (info
);
10482 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
10483 contents
, rel
->r_offset
, value
,
10486 case R_ARM_TLS_LDM32
:
10493 off
= globals
->tls_ldm_got
.offset
;
10495 if ((off
& 1) != 0)
10499 /* If we don't know the module number, create a relocation
10501 if (bfd_link_pic (info
))
10503 Elf_Internal_Rela outrel
;
10505 if (srelgot
== NULL
)
10508 outrel
.r_addend
= 0;
10509 outrel
.r_offset
= (sgot
->output_section
->vma
10510 + sgot
->output_offset
+ off
);
10511 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32
);
10513 if (globals
->use_rel
)
10514 bfd_put_32 (output_bfd
, outrel
.r_addend
,
10515 sgot
->contents
+ off
);
10517 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
10520 bfd_put_32 (output_bfd
, 1, sgot
->contents
+ off
);
10522 globals
->tls_ldm_got
.offset
|= 1;
10525 value
= sgot
->output_section
->vma
+ sgot
->output_offset
+ off
10526 - (input_section
->output_section
->vma
+ input_section
->output_offset
+ rel
->r_offset
);
10528 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
10529 contents
, rel
->r_offset
, value
,
10533 case R_ARM_TLS_CALL
:
10534 case R_ARM_THM_TLS_CALL
:
10535 case R_ARM_TLS_GD32
:
10536 case R_ARM_TLS_IE32
:
10537 case R_ARM_TLS_GOTDESC
:
10538 case R_ARM_TLS_DESCSEQ
:
10539 case R_ARM_THM_TLS_DESCSEQ
:
10541 bfd_vma off
, offplt
;
10545 BFD_ASSERT (sgot
!= NULL
);
10550 dyn
= globals
->root
.dynamic_sections_created
;
10551 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn
,
10552 bfd_link_pic (info
),
10554 && (!bfd_link_pic (info
)
10555 || !SYMBOL_REFERENCES_LOCAL (info
, h
)))
10557 *unresolved_reloc_p
= FALSE
;
10560 off
= h
->got
.offset
;
10561 offplt
= elf32_arm_hash_entry (h
)->tlsdesc_got
;
10562 tls_type
= ((struct elf32_arm_link_hash_entry
*) h
)->tls_type
;
10566 BFD_ASSERT (local_got_offsets
!= NULL
);
10567 off
= local_got_offsets
[r_symndx
];
10568 offplt
= local_tlsdesc_gotents
[r_symndx
];
10569 tls_type
= elf32_arm_local_got_tls_type (input_bfd
)[r_symndx
];
10572 /* Linker relaxations happens from one of the
10573 R_ARM_{GOTDESC,CALL,DESCSEQ} relocations to IE or LE. */
10574 if (ELF32_R_TYPE(rel
->r_info
) != r_type
)
10575 tls_type
= GOT_TLS_IE
;
10577 BFD_ASSERT (tls_type
!= GOT_UNKNOWN
);
10579 if ((off
& 1) != 0)
10583 bfd_boolean need_relocs
= FALSE
;
10584 Elf_Internal_Rela outrel
;
10587 /* The GOT entries have not been initialized yet. Do it
10588 now, and emit any relocations. If both an IE GOT and a
10589 GD GOT are necessary, we emit the GD first. */
10591 if ((bfd_link_pic (info
) || indx
!= 0)
10593 || ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
10594 || h
->root
.type
!= bfd_link_hash_undefweak
))
10596 need_relocs
= TRUE
;
10597 BFD_ASSERT (srelgot
!= NULL
);
10600 if (tls_type
& GOT_TLS_GDESC
)
10604 /* We should have relaxed, unless this is an undefined
10606 BFD_ASSERT ((h
&& (h
->root
.type
== bfd_link_hash_undefweak
))
10607 || bfd_link_pic (info
));
10608 BFD_ASSERT (globals
->sgotplt_jump_table_size
+ offplt
+ 8
10609 <= globals
->root
.sgotplt
->size
);
10611 outrel
.r_addend
= 0;
10612 outrel
.r_offset
= (globals
->root
.sgotplt
->output_section
->vma
10613 + globals
->root
.sgotplt
->output_offset
10615 + globals
->sgotplt_jump_table_size
);
10617 outrel
.r_info
= ELF32_R_INFO (indx
, R_ARM_TLS_DESC
);
10618 sreloc
= globals
->root
.srelplt
;
10619 loc
= sreloc
->contents
;
10620 loc
+= globals
->next_tls_desc_index
++ * RELOC_SIZE (globals
);
10621 BFD_ASSERT (loc
+ RELOC_SIZE (globals
)
10622 <= sreloc
->contents
+ sreloc
->size
);
10624 SWAP_RELOC_OUT (globals
) (output_bfd
, &outrel
, loc
);
10626 /* For globals, the first word in the relocation gets
10627 the relocation index and the top bit set, or zero,
10628 if we're binding now. For locals, it gets the
10629 symbol's offset in the tls section. */
10630 bfd_put_32 (output_bfd
,
10631 !h
? value
- elf_hash_table (info
)->tls_sec
->vma
10632 : info
->flags
& DF_BIND_NOW
? 0
10633 : 0x80000000 | ELF32_R_SYM (outrel
.r_info
),
10634 globals
->root
.sgotplt
->contents
+ offplt
10635 + globals
->sgotplt_jump_table_size
);
10637 /* Second word in the relocation is always zero. */
10638 bfd_put_32 (output_bfd
, 0,
10639 globals
->root
.sgotplt
->contents
+ offplt
10640 + globals
->sgotplt_jump_table_size
+ 4);
10642 if (tls_type
& GOT_TLS_GD
)
10646 outrel
.r_addend
= 0;
10647 outrel
.r_offset
= (sgot
->output_section
->vma
10648 + sgot
->output_offset
10650 outrel
.r_info
= ELF32_R_INFO (indx
, R_ARM_TLS_DTPMOD32
);
10652 if (globals
->use_rel
)
10653 bfd_put_32 (output_bfd
, outrel
.r_addend
,
10654 sgot
->contents
+ cur_off
);
10656 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
10659 bfd_put_32 (output_bfd
, value
- dtpoff_base (info
),
10660 sgot
->contents
+ cur_off
+ 4);
10663 outrel
.r_addend
= 0;
10664 outrel
.r_info
= ELF32_R_INFO (indx
,
10665 R_ARM_TLS_DTPOFF32
);
10666 outrel
.r_offset
+= 4;
10668 if (globals
->use_rel
)
10669 bfd_put_32 (output_bfd
, outrel
.r_addend
,
10670 sgot
->contents
+ cur_off
+ 4);
10672 elf32_arm_add_dynreloc (output_bfd
, info
,
10678 /* If we are not emitting relocations for a
10679 general dynamic reference, then we must be in a
10680 static link or an executable link with the
10681 symbol binding locally. Mark it as belonging
10682 to module 1, the executable. */
10683 bfd_put_32 (output_bfd
, 1,
10684 sgot
->contents
+ cur_off
);
10685 bfd_put_32 (output_bfd
, value
- dtpoff_base (info
),
10686 sgot
->contents
+ cur_off
+ 4);
10692 if (tls_type
& GOT_TLS_IE
)
10697 outrel
.r_addend
= value
- dtpoff_base (info
);
10699 outrel
.r_addend
= 0;
10700 outrel
.r_offset
= (sgot
->output_section
->vma
10701 + sgot
->output_offset
10703 outrel
.r_info
= ELF32_R_INFO (indx
, R_ARM_TLS_TPOFF32
);
10705 if (globals
->use_rel
)
10706 bfd_put_32 (output_bfd
, outrel
.r_addend
,
10707 sgot
->contents
+ cur_off
);
10709 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
10712 bfd_put_32 (output_bfd
, tpoff (info
, value
),
10713 sgot
->contents
+ cur_off
);
10718 h
->got
.offset
|= 1;
10720 local_got_offsets
[r_symndx
] |= 1;
10723 if ((tls_type
& GOT_TLS_GD
) && r_type
!= R_ARM_TLS_GD32
)
10725 else if (tls_type
& GOT_TLS_GDESC
)
10728 if (ELF32_R_TYPE(rel
->r_info
) == R_ARM_TLS_CALL
10729 || ELF32_R_TYPE(rel
->r_info
) == R_ARM_THM_TLS_CALL
)
10731 bfd_signed_vma offset
;
10732 /* TLS stubs are arm mode. The original symbol is a
10733 data object, so branch_type is bogus. */
10734 branch_type
= ST_BRANCH_TO_ARM
;
10735 enum elf32_arm_stub_type stub_type
10736 = arm_type_of_stub (info
, input_section
, rel
,
10737 st_type
, &branch_type
,
10738 (struct elf32_arm_link_hash_entry
*)h
,
10739 globals
->tls_trampoline
, globals
->root
.splt
,
10740 input_bfd
, sym_name
);
10742 if (stub_type
!= arm_stub_none
)
10744 struct elf32_arm_stub_hash_entry
*stub_entry
10745 = elf32_arm_get_stub_entry
10746 (input_section
, globals
->root
.splt
, 0, rel
,
10747 globals
, stub_type
);
10748 offset
= (stub_entry
->stub_offset
10749 + stub_entry
->stub_sec
->output_offset
10750 + stub_entry
->stub_sec
->output_section
->vma
);
10753 offset
= (globals
->root
.splt
->output_section
->vma
10754 + globals
->root
.splt
->output_offset
10755 + globals
->tls_trampoline
);
10757 if (ELF32_R_TYPE(rel
->r_info
) == R_ARM_TLS_CALL
)
10759 unsigned long inst
;
10761 offset
-= (input_section
->output_section
->vma
10762 + input_section
->output_offset
10763 + rel
->r_offset
+ 8);
10765 inst
= offset
>> 2;
10766 inst
&= 0x00ffffff;
10767 value
= inst
| (globals
->use_blx
? 0xfa000000 : 0xeb000000);
10771 /* Thumb blx encodes the offset in a complicated
10773 unsigned upper_insn
, lower_insn
;
10776 offset
-= (input_section
->output_section
->vma
10777 + input_section
->output_offset
10778 + rel
->r_offset
+ 4);
10780 if (stub_type
!= arm_stub_none
10781 && arm_stub_is_thumb (stub_type
))
10783 lower_insn
= 0xd000;
10787 lower_insn
= 0xc000;
10788 /* Round up the offset to a word boundary. */
10789 offset
= (offset
+ 2) & ~2;
10793 upper_insn
= (0xf000
10794 | ((offset
>> 12) & 0x3ff)
10796 lower_insn
|= (((!((offset
>> 23) & 1)) ^ neg
) << 13)
10797 | (((!((offset
>> 22) & 1)) ^ neg
) << 11)
10798 | ((offset
>> 1) & 0x7ff);
10799 bfd_put_16 (input_bfd
, upper_insn
, hit_data
);
10800 bfd_put_16 (input_bfd
, lower_insn
, hit_data
+ 2);
10801 return bfd_reloc_ok
;
10804 /* These relocations needs special care, as besides the fact
10805 they point somewhere in .gotplt, the addend must be
10806 adjusted accordingly depending on the type of instruction
10808 else if ((r_type
== R_ARM_TLS_GOTDESC
) && (tls_type
& GOT_TLS_GDESC
))
10810 unsigned long data
, insn
;
10813 data
= bfd_get_32 (input_bfd
, hit_data
);
10819 insn
= bfd_get_16 (input_bfd
, contents
+ rel
->r_offset
- data
);
10820 if ((insn
& 0xf000) == 0xf000 || (insn
& 0xf800) == 0xe800)
10821 insn
= (insn
<< 16)
10822 | bfd_get_16 (input_bfd
,
10823 contents
+ rel
->r_offset
- data
+ 2);
10824 if ((insn
& 0xf800c000) == 0xf000c000)
10827 else if ((insn
& 0xffffff00) == 0x4400)
10832 (*_bfd_error_handler
)
10833 (_("%B(%A+0x%lx):unexpected Thumb instruction '0x%x' referenced by TLS_GOTDESC"),
10834 input_bfd
, input_section
,
10835 (unsigned long)rel
->r_offset
, insn
);
10836 return bfd_reloc_notsupported
;
10841 insn
= bfd_get_32 (input_bfd
, contents
+ rel
->r_offset
- data
);
10843 switch (insn
>> 24)
10845 case 0xeb: /* bl */
10846 case 0xfa: /* blx */
10850 case 0xe0: /* add */
10855 (*_bfd_error_handler
)
10856 (_("%B(%A+0x%lx):unexpected ARM instruction '0x%x' referenced by TLS_GOTDESC"),
10857 input_bfd
, input_section
,
10858 (unsigned long)rel
->r_offset
, insn
);
10859 return bfd_reloc_notsupported
;
10863 value
+= ((globals
->root
.sgotplt
->output_section
->vma
10864 + globals
->root
.sgotplt
->output_offset
+ off
)
10865 - (input_section
->output_section
->vma
10866 + input_section
->output_offset
10868 + globals
->sgotplt_jump_table_size
);
10871 value
= ((globals
->root
.sgot
->output_section
->vma
10872 + globals
->root
.sgot
->output_offset
+ off
)
10873 - (input_section
->output_section
->vma
10874 + input_section
->output_offset
+ rel
->r_offset
));
10876 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
10877 contents
, rel
->r_offset
, value
,
10881 case R_ARM_TLS_LE32
:
10882 if (bfd_link_dll (info
))
10884 (*_bfd_error_handler
)
10885 (_("%B(%A+0x%lx): R_ARM_TLS_LE32 relocation not permitted in shared object"),
10886 input_bfd
, input_section
,
10887 (long) rel
->r_offset
, howto
->name
);
10888 return bfd_reloc_notsupported
;
10891 value
= tpoff (info
, value
);
10893 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
10894 contents
, rel
->r_offset
, value
,
10898 if (globals
->fix_v4bx
)
10900 bfd_vma insn
= bfd_get_32 (input_bfd
, hit_data
);
10902 /* Ensure that we have a BX instruction. */
10903 BFD_ASSERT ((insn
& 0x0ffffff0) == 0x012fff10);
10905 if (globals
->fix_v4bx
== 2 && (insn
& 0xf) != 0xf)
10907 /* Branch to veneer. */
10909 glue_addr
= elf32_arm_bx_glue (info
, insn
& 0xf);
10910 glue_addr
-= input_section
->output_section
->vma
10911 + input_section
->output_offset
10912 + rel
->r_offset
+ 8;
10913 insn
= (insn
& 0xf0000000) | 0x0a000000
10914 | ((glue_addr
>> 2) & 0x00ffffff);
10918 /* Preserve Rm (lowest four bits) and the condition code
10919 (highest four bits). Other bits encode MOV PC,Rm. */
10920 insn
= (insn
& 0xf000000f) | 0x01a0f000;
10923 bfd_put_32 (input_bfd
, insn
, hit_data
);
10925 return bfd_reloc_ok
;
10927 case R_ARM_MOVW_ABS_NC
:
10928 case R_ARM_MOVT_ABS
:
10929 case R_ARM_MOVW_PREL_NC
:
10930 case R_ARM_MOVT_PREL
:
10931 /* Until we properly support segment-base-relative addressing then
10932 we assume the segment base to be zero, as for the group relocations.
10933 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
10934 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
10935 case R_ARM_MOVW_BREL_NC
:
10936 case R_ARM_MOVW_BREL
:
10937 case R_ARM_MOVT_BREL
:
10939 bfd_vma insn
= bfd_get_32 (input_bfd
, hit_data
);
10941 if (globals
->use_rel
)
10943 addend
= ((insn
>> 4) & 0xf000) | (insn
& 0xfff);
10944 signed_addend
= (addend
^ 0x8000) - 0x8000;
10947 value
+= signed_addend
;
10949 if (r_type
== R_ARM_MOVW_PREL_NC
|| r_type
== R_ARM_MOVT_PREL
)
10950 value
-= (input_section
->output_section
->vma
10951 + input_section
->output_offset
+ rel
->r_offset
);
10953 if (r_type
== R_ARM_MOVW_BREL
&& value
>= 0x10000)
10954 return bfd_reloc_overflow
;
10956 if (branch_type
== ST_BRANCH_TO_THUMB
)
10959 if (r_type
== R_ARM_MOVT_ABS
|| r_type
== R_ARM_MOVT_PREL
10960 || r_type
== R_ARM_MOVT_BREL
)
10963 insn
&= 0xfff0f000;
10964 insn
|= value
& 0xfff;
10965 insn
|= (value
& 0xf000) << 4;
10966 bfd_put_32 (input_bfd
, insn
, hit_data
);
10968 return bfd_reloc_ok
;
10970 case R_ARM_THM_MOVW_ABS_NC
:
10971 case R_ARM_THM_MOVT_ABS
:
10972 case R_ARM_THM_MOVW_PREL_NC
:
10973 case R_ARM_THM_MOVT_PREL
:
10974 /* Until we properly support segment-base-relative addressing then
10975 we assume the segment base to be zero, as for the above relocations.
10976 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
10977 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
10978 as R_ARM_THM_MOVT_ABS. */
10979 case R_ARM_THM_MOVW_BREL_NC
:
10980 case R_ARM_THM_MOVW_BREL
:
10981 case R_ARM_THM_MOVT_BREL
:
10985 insn
= bfd_get_16 (input_bfd
, hit_data
) << 16;
10986 insn
|= bfd_get_16 (input_bfd
, hit_data
+ 2);
10988 if (globals
->use_rel
)
10990 addend
= ((insn
>> 4) & 0xf000)
10991 | ((insn
>> 15) & 0x0800)
10992 | ((insn
>> 4) & 0x0700)
10994 signed_addend
= (addend
^ 0x8000) - 0x8000;
10997 value
+= signed_addend
;
10999 if (r_type
== R_ARM_THM_MOVW_PREL_NC
|| r_type
== R_ARM_THM_MOVT_PREL
)
11000 value
-= (input_section
->output_section
->vma
11001 + input_section
->output_offset
+ rel
->r_offset
);
11003 if (r_type
== R_ARM_THM_MOVW_BREL
&& value
>= 0x10000)
11004 return bfd_reloc_overflow
;
11006 if (branch_type
== ST_BRANCH_TO_THUMB
)
11009 if (r_type
== R_ARM_THM_MOVT_ABS
|| r_type
== R_ARM_THM_MOVT_PREL
11010 || r_type
== R_ARM_THM_MOVT_BREL
)
11013 insn
&= 0xfbf08f00;
11014 insn
|= (value
& 0xf000) << 4;
11015 insn
|= (value
& 0x0800) << 15;
11016 insn
|= (value
& 0x0700) << 4;
11017 insn
|= (value
& 0x00ff);
11019 bfd_put_16 (input_bfd
, insn
>> 16, hit_data
);
11020 bfd_put_16 (input_bfd
, insn
& 0xffff, hit_data
+ 2);
11022 return bfd_reloc_ok
;
11024 case R_ARM_ALU_PC_G0_NC
:
11025 case R_ARM_ALU_PC_G1_NC
:
11026 case R_ARM_ALU_PC_G0
:
11027 case R_ARM_ALU_PC_G1
:
11028 case R_ARM_ALU_PC_G2
:
11029 case R_ARM_ALU_SB_G0_NC
:
11030 case R_ARM_ALU_SB_G1_NC
:
11031 case R_ARM_ALU_SB_G0
:
11032 case R_ARM_ALU_SB_G1
:
11033 case R_ARM_ALU_SB_G2
:
11035 bfd_vma insn
= bfd_get_32 (input_bfd
, hit_data
);
11036 bfd_vma pc
= input_section
->output_section
->vma
11037 + input_section
->output_offset
+ rel
->r_offset
;
11038 /* sb is the origin of the *segment* containing the symbol. */
11039 bfd_vma sb
= sym_sec
? sym_sec
->output_section
->vma
: 0;
11042 bfd_signed_vma signed_value
;
11045 /* Determine which group of bits to select. */
11048 case R_ARM_ALU_PC_G0_NC
:
11049 case R_ARM_ALU_PC_G0
:
11050 case R_ARM_ALU_SB_G0_NC
:
11051 case R_ARM_ALU_SB_G0
:
11055 case R_ARM_ALU_PC_G1_NC
:
11056 case R_ARM_ALU_PC_G1
:
11057 case R_ARM_ALU_SB_G1_NC
:
11058 case R_ARM_ALU_SB_G1
:
11062 case R_ARM_ALU_PC_G2
:
11063 case R_ARM_ALU_SB_G2
:
11071 /* If REL, extract the addend from the insn. If RELA, it will
11072 have already been fetched for us. */
11073 if (globals
->use_rel
)
11076 bfd_vma constant
= insn
& 0xff;
11077 bfd_vma rotation
= (insn
& 0xf00) >> 8;
11080 signed_addend
= constant
;
11083 /* Compensate for the fact that in the instruction, the
11084 rotation is stored in multiples of 2 bits. */
11087 /* Rotate "constant" right by "rotation" bits. */
11088 signed_addend
= (constant
>> rotation
) |
11089 (constant
<< (8 * sizeof (bfd_vma
) - rotation
));
11092 /* Determine if the instruction is an ADD or a SUB.
11093 (For REL, this determines the sign of the addend.) */
11094 negative
= identify_add_or_sub (insn
);
11097 (*_bfd_error_handler
)
11098 (_("%B(%A+0x%lx): Only ADD or SUB instructions are allowed for ALU group relocations"),
11099 input_bfd
, input_section
,
11100 (long) rel
->r_offset
, howto
->name
);
11101 return bfd_reloc_overflow
;
11104 signed_addend
*= negative
;
11107 /* Compute the value (X) to go in the place. */
11108 if (r_type
== R_ARM_ALU_PC_G0_NC
11109 || r_type
== R_ARM_ALU_PC_G1_NC
11110 || r_type
== R_ARM_ALU_PC_G0
11111 || r_type
== R_ARM_ALU_PC_G1
11112 || r_type
== R_ARM_ALU_PC_G2
)
11114 signed_value
= value
- pc
+ signed_addend
;
11116 /* Section base relative. */
11117 signed_value
= value
- sb
+ signed_addend
;
11119 /* If the target symbol is a Thumb function, then set the
11120 Thumb bit in the address. */
11121 if (branch_type
== ST_BRANCH_TO_THUMB
)
11124 /* Calculate the value of the relevant G_n, in encoded
11125 constant-with-rotation format. */
11126 g_n
= calculate_group_reloc_mask (signed_value
< 0 ? - signed_value
: signed_value
,
11129 /* Check for overflow if required. */
11130 if ((r_type
== R_ARM_ALU_PC_G0
11131 || r_type
== R_ARM_ALU_PC_G1
11132 || r_type
== R_ARM_ALU_PC_G2
11133 || r_type
== R_ARM_ALU_SB_G0
11134 || r_type
== R_ARM_ALU_SB_G1
11135 || r_type
== R_ARM_ALU_SB_G2
) && residual
!= 0)
11137 (*_bfd_error_handler
)
11138 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
11139 input_bfd
, input_section
,
11140 (long) rel
->r_offset
, signed_value
< 0 ? - signed_value
: signed_value
,
11142 return bfd_reloc_overflow
;
11145 /* Mask out the value and the ADD/SUB part of the opcode; take care
11146 not to destroy the S bit. */
11147 insn
&= 0xff1ff000;
11149 /* Set the opcode according to whether the value to go in the
11150 place is negative. */
11151 if (signed_value
< 0)
11156 /* Encode the offset. */
11159 bfd_put_32 (input_bfd
, insn
, hit_data
);
11161 return bfd_reloc_ok
;
11163 case R_ARM_LDR_PC_G0
:
11164 case R_ARM_LDR_PC_G1
:
11165 case R_ARM_LDR_PC_G2
:
11166 case R_ARM_LDR_SB_G0
:
11167 case R_ARM_LDR_SB_G1
:
11168 case R_ARM_LDR_SB_G2
:
11170 bfd_vma insn
= bfd_get_32 (input_bfd
, hit_data
);
11171 bfd_vma pc
= input_section
->output_section
->vma
11172 + input_section
->output_offset
+ rel
->r_offset
;
11173 /* sb is the origin of the *segment* containing the symbol. */
11174 bfd_vma sb
= sym_sec
? sym_sec
->output_section
->vma
: 0;
11176 bfd_signed_vma signed_value
;
11179 /* Determine which groups of bits to calculate. */
11182 case R_ARM_LDR_PC_G0
:
11183 case R_ARM_LDR_SB_G0
:
11187 case R_ARM_LDR_PC_G1
:
11188 case R_ARM_LDR_SB_G1
:
11192 case R_ARM_LDR_PC_G2
:
11193 case R_ARM_LDR_SB_G2
:
11201 /* If REL, extract the addend from the insn. If RELA, it will
11202 have already been fetched for us. */
11203 if (globals
->use_rel
)
11205 int negative
= (insn
& (1 << 23)) ? 1 : -1;
11206 signed_addend
= negative
* (insn
& 0xfff);
11209 /* Compute the value (X) to go in the place. */
11210 if (r_type
== R_ARM_LDR_PC_G0
11211 || r_type
== R_ARM_LDR_PC_G1
11212 || r_type
== R_ARM_LDR_PC_G2
)
11214 signed_value
= value
- pc
+ signed_addend
;
11216 /* Section base relative. */
11217 signed_value
= value
- sb
+ signed_addend
;
11219 /* Calculate the value of the relevant G_{n-1} to obtain
11220 the residual at that stage. */
11221 calculate_group_reloc_mask (signed_value
< 0 ? - signed_value
: signed_value
,
11222 group
- 1, &residual
);
11224 /* Check for overflow. */
11225 if (residual
>= 0x1000)
11227 (*_bfd_error_handler
)
11228 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
11229 input_bfd
, input_section
,
11230 (long) rel
->r_offset
, labs (signed_value
), howto
->name
);
11231 return bfd_reloc_overflow
;
11234 /* Mask out the value and U bit. */
11235 insn
&= 0xff7ff000;
11237 /* Set the U bit if the value to go in the place is non-negative. */
11238 if (signed_value
>= 0)
11241 /* Encode the offset. */
11244 bfd_put_32 (input_bfd
, insn
, hit_data
);
11246 return bfd_reloc_ok
;
11248 case R_ARM_LDRS_PC_G0
:
11249 case R_ARM_LDRS_PC_G1
:
11250 case R_ARM_LDRS_PC_G2
:
11251 case R_ARM_LDRS_SB_G0
:
11252 case R_ARM_LDRS_SB_G1
:
11253 case R_ARM_LDRS_SB_G2
:
11255 bfd_vma insn
= bfd_get_32 (input_bfd
, hit_data
);
11256 bfd_vma pc
= input_section
->output_section
->vma
11257 + input_section
->output_offset
+ rel
->r_offset
;
11258 /* sb is the origin of the *segment* containing the symbol. */
11259 bfd_vma sb
= sym_sec
? sym_sec
->output_section
->vma
: 0;
11261 bfd_signed_vma signed_value
;
11264 /* Determine which groups of bits to calculate. */
11267 case R_ARM_LDRS_PC_G0
:
11268 case R_ARM_LDRS_SB_G0
:
11272 case R_ARM_LDRS_PC_G1
:
11273 case R_ARM_LDRS_SB_G1
:
11277 case R_ARM_LDRS_PC_G2
:
11278 case R_ARM_LDRS_SB_G2
:
11286 /* If REL, extract the addend from the insn. If RELA, it will
11287 have already been fetched for us. */
11288 if (globals
->use_rel
)
11290 int negative
= (insn
& (1 << 23)) ? 1 : -1;
11291 signed_addend
= negative
* (((insn
& 0xf00) >> 4) + (insn
& 0xf));
11294 /* Compute the value (X) to go in the place. */
11295 if (r_type
== R_ARM_LDRS_PC_G0
11296 || r_type
== R_ARM_LDRS_PC_G1
11297 || r_type
== R_ARM_LDRS_PC_G2
)
11299 signed_value
= value
- pc
+ signed_addend
;
11301 /* Section base relative. */
11302 signed_value
= value
- sb
+ signed_addend
;
11304 /* Calculate the value of the relevant G_{n-1} to obtain
11305 the residual at that stage. */
11306 calculate_group_reloc_mask (signed_value
< 0 ? - signed_value
: signed_value
,
11307 group
- 1, &residual
);
11309 /* Check for overflow. */
11310 if (residual
>= 0x100)
11312 (*_bfd_error_handler
)
11313 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
11314 input_bfd
, input_section
,
11315 (long) rel
->r_offset
, labs (signed_value
), howto
->name
);
11316 return bfd_reloc_overflow
;
11319 /* Mask out the value and U bit. */
11320 insn
&= 0xff7ff0f0;
11322 /* Set the U bit if the value to go in the place is non-negative. */
11323 if (signed_value
>= 0)
11326 /* Encode the offset. */
11327 insn
|= ((residual
& 0xf0) << 4) | (residual
& 0xf);
11329 bfd_put_32 (input_bfd
, insn
, hit_data
);
11331 return bfd_reloc_ok
;
11333 case R_ARM_LDC_PC_G0
:
11334 case R_ARM_LDC_PC_G1
:
11335 case R_ARM_LDC_PC_G2
:
11336 case R_ARM_LDC_SB_G0
:
11337 case R_ARM_LDC_SB_G1
:
11338 case R_ARM_LDC_SB_G2
:
11340 bfd_vma insn
= bfd_get_32 (input_bfd
, hit_data
);
11341 bfd_vma pc
= input_section
->output_section
->vma
11342 + input_section
->output_offset
+ rel
->r_offset
;
11343 /* sb is the origin of the *segment* containing the symbol. */
11344 bfd_vma sb
= sym_sec
? sym_sec
->output_section
->vma
: 0;
11346 bfd_signed_vma signed_value
;
11349 /* Determine which groups of bits to calculate. */
11352 case R_ARM_LDC_PC_G0
:
11353 case R_ARM_LDC_SB_G0
:
11357 case R_ARM_LDC_PC_G1
:
11358 case R_ARM_LDC_SB_G1
:
11362 case R_ARM_LDC_PC_G2
:
11363 case R_ARM_LDC_SB_G2
:
11371 /* If REL, extract the addend from the insn. If RELA, it will
11372 have already been fetched for us. */
11373 if (globals
->use_rel
)
11375 int negative
= (insn
& (1 << 23)) ? 1 : -1;
11376 signed_addend
= negative
* ((insn
& 0xff) << 2);
11379 /* Compute the value (X) to go in the place. */
11380 if (r_type
== R_ARM_LDC_PC_G0
11381 || r_type
== R_ARM_LDC_PC_G1
11382 || r_type
== R_ARM_LDC_PC_G2
)
11384 signed_value
= value
- pc
+ signed_addend
;
11386 /* Section base relative. */
11387 signed_value
= value
- sb
+ signed_addend
;
11389 /* Calculate the value of the relevant G_{n-1} to obtain
11390 the residual at that stage. */
11391 calculate_group_reloc_mask (signed_value
< 0 ? - signed_value
: signed_value
,
11392 group
- 1, &residual
);
11394 /* Check for overflow. (The absolute value to go in the place must be
11395 divisible by four and, after having been divided by four, must
11396 fit in eight bits.) */
11397 if ((residual
& 0x3) != 0 || residual
>= 0x400)
11399 (*_bfd_error_handler
)
11400 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
11401 input_bfd
, input_section
,
11402 (long) rel
->r_offset
, labs (signed_value
), howto
->name
);
11403 return bfd_reloc_overflow
;
11406 /* Mask out the value and U bit. */
11407 insn
&= 0xff7fff00;
11409 /* Set the U bit if the value to go in the place is non-negative. */
11410 if (signed_value
>= 0)
11413 /* Encode the offset. */
11414 insn
|= residual
>> 2;
11416 bfd_put_32 (input_bfd
, insn
, hit_data
);
11418 return bfd_reloc_ok
;
11420 case R_ARM_THM_ALU_ABS_G0_NC
:
11421 case R_ARM_THM_ALU_ABS_G1_NC
:
11422 case R_ARM_THM_ALU_ABS_G2_NC
:
11423 case R_ARM_THM_ALU_ABS_G3_NC
:
11425 const int shift_array
[4] = {0, 8, 16, 24};
11426 bfd_vma insn
= bfd_get_16 (input_bfd
, hit_data
);
11427 bfd_vma addr
= value
;
11428 int shift
= shift_array
[r_type
- R_ARM_THM_ALU_ABS_G0_NC
];
11430 /* Compute address. */
11431 if (globals
->use_rel
)
11432 signed_addend
= insn
& 0xff;
11433 addr
+= signed_addend
;
11434 if (branch_type
== ST_BRANCH_TO_THUMB
)
11436 /* Clean imm8 insn. */
11438 /* And update with correct part of address. */
11439 insn
|= (addr
>> shift
) & 0xff;
11441 bfd_put_16 (input_bfd
, insn
, hit_data
);
11444 *unresolved_reloc_p
= FALSE
;
11445 return bfd_reloc_ok
;
11448 return bfd_reloc_notsupported
;
11452 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
11454 arm_add_to_rel (bfd
* abfd
,
11455 bfd_byte
* address
,
11456 reloc_howto_type
* howto
,
11457 bfd_signed_vma increment
)
11459 bfd_signed_vma addend
;
11461 if (howto
->type
== R_ARM_THM_CALL
11462 || howto
->type
== R_ARM_THM_JUMP24
)
11464 int upper_insn
, lower_insn
;
11467 upper_insn
= bfd_get_16 (abfd
, address
);
11468 lower_insn
= bfd_get_16 (abfd
, address
+ 2);
11469 upper
= upper_insn
& 0x7ff;
11470 lower
= lower_insn
& 0x7ff;
11472 addend
= (upper
<< 12) | (lower
<< 1);
11473 addend
+= increment
;
11476 upper_insn
= (upper_insn
& 0xf800) | ((addend
>> 11) & 0x7ff);
11477 lower_insn
= (lower_insn
& 0xf800) | (addend
& 0x7ff);
11479 bfd_put_16 (abfd
, (bfd_vma
) upper_insn
, address
);
11480 bfd_put_16 (abfd
, (bfd_vma
) lower_insn
, address
+ 2);
11486 contents
= bfd_get_32 (abfd
, address
);
11488 /* Get the (signed) value from the instruction. */
11489 addend
= contents
& howto
->src_mask
;
11490 if (addend
& ((howto
->src_mask
+ 1) >> 1))
11492 bfd_signed_vma mask
;
11495 mask
&= ~ howto
->src_mask
;
11499 /* Add in the increment, (which is a byte value). */
11500 switch (howto
->type
)
11503 addend
+= increment
;
11510 addend
<<= howto
->size
;
11511 addend
+= increment
;
11513 /* Should we check for overflow here ? */
11515 /* Drop any undesired bits. */
11516 addend
>>= howto
->rightshift
;
11520 contents
= (contents
& ~ howto
->dst_mask
) | (addend
& howto
->dst_mask
);
11522 bfd_put_32 (abfd
, contents
, address
);
11526 #define IS_ARM_TLS_RELOC(R_TYPE) \
11527 ((R_TYPE) == R_ARM_TLS_GD32 \
11528 || (R_TYPE) == R_ARM_TLS_LDO32 \
11529 || (R_TYPE) == R_ARM_TLS_LDM32 \
11530 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
11531 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
11532 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
11533 || (R_TYPE) == R_ARM_TLS_LE32 \
11534 || (R_TYPE) == R_ARM_TLS_IE32 \
11535 || IS_ARM_TLS_GNU_RELOC (R_TYPE))
11537 /* Specific set of relocations for the gnu tls dialect. */
11538 #define IS_ARM_TLS_GNU_RELOC(R_TYPE) \
11539 ((R_TYPE) == R_ARM_TLS_GOTDESC \
11540 || (R_TYPE) == R_ARM_TLS_CALL \
11541 || (R_TYPE) == R_ARM_THM_TLS_CALL \
11542 || (R_TYPE) == R_ARM_TLS_DESCSEQ \
11543 || (R_TYPE) == R_ARM_THM_TLS_DESCSEQ)
11545 /* Relocate an ARM ELF section. */
11548 elf32_arm_relocate_section (bfd
* output_bfd
,
11549 struct bfd_link_info
* info
,
11551 asection
* input_section
,
11552 bfd_byte
* contents
,
11553 Elf_Internal_Rela
* relocs
,
11554 Elf_Internal_Sym
* local_syms
,
11555 asection
** local_sections
)
11557 Elf_Internal_Shdr
*symtab_hdr
;
11558 struct elf_link_hash_entry
**sym_hashes
;
11559 Elf_Internal_Rela
*rel
;
11560 Elf_Internal_Rela
*relend
;
11562 struct elf32_arm_link_hash_table
* globals
;
11564 globals
= elf32_arm_hash_table (info
);
11565 if (globals
== NULL
)
11568 symtab_hdr
= & elf_symtab_hdr (input_bfd
);
11569 sym_hashes
= elf_sym_hashes (input_bfd
);
11572 relend
= relocs
+ input_section
->reloc_count
;
11573 for (; rel
< relend
; rel
++)
11576 reloc_howto_type
* howto
;
11577 unsigned long r_symndx
;
11578 Elf_Internal_Sym
* sym
;
11580 struct elf_link_hash_entry
* h
;
11581 bfd_vma relocation
;
11582 bfd_reloc_status_type r
;
11585 bfd_boolean unresolved_reloc
= FALSE
;
11586 char *error_message
= NULL
;
11588 r_symndx
= ELF32_R_SYM (rel
->r_info
);
11589 r_type
= ELF32_R_TYPE (rel
->r_info
);
11590 r_type
= arm_real_reloc_type (globals
, r_type
);
11592 if ( r_type
== R_ARM_GNU_VTENTRY
11593 || r_type
== R_ARM_GNU_VTINHERIT
)
11596 bfd_reloc
.howto
= elf32_arm_howto_from_type (r_type
);
11597 howto
= bfd_reloc
.howto
;
11603 if (r_symndx
< symtab_hdr
->sh_info
)
11605 sym
= local_syms
+ r_symndx
;
11606 sym_type
= ELF32_ST_TYPE (sym
->st_info
);
11607 sec
= local_sections
[r_symndx
];
11609 /* An object file might have a reference to a local
11610 undefined symbol. This is a daft object file, but we
11611 should at least do something about it. V4BX & NONE
11612 relocations do not use the symbol and are explicitly
11613 allowed to use the undefined symbol, so allow those.
11614 Likewise for relocations against STN_UNDEF. */
11615 if (r_type
!= R_ARM_V4BX
11616 && r_type
!= R_ARM_NONE
11617 && r_symndx
!= STN_UNDEF
11618 && bfd_is_und_section (sec
)
11619 && ELF_ST_BIND (sym
->st_info
) != STB_WEAK
)
11620 (*info
->callbacks
->undefined_symbol
)
11621 (info
, bfd_elf_string_from_elf_section
11622 (input_bfd
, symtab_hdr
->sh_link
, sym
->st_name
),
11623 input_bfd
, input_section
,
11624 rel
->r_offset
, TRUE
);
11626 if (globals
->use_rel
)
11628 relocation
= (sec
->output_section
->vma
11629 + sec
->output_offset
11631 if (!bfd_link_relocatable (info
)
11632 && (sec
->flags
& SEC_MERGE
)
11633 && ELF_ST_TYPE (sym
->st_info
) == STT_SECTION
)
11636 bfd_vma addend
, value
;
11640 case R_ARM_MOVW_ABS_NC
:
11641 case R_ARM_MOVT_ABS
:
11642 value
= bfd_get_32 (input_bfd
, contents
+ rel
->r_offset
);
11643 addend
= ((value
& 0xf0000) >> 4) | (value
& 0xfff);
11644 addend
= (addend
^ 0x8000) - 0x8000;
11647 case R_ARM_THM_MOVW_ABS_NC
:
11648 case R_ARM_THM_MOVT_ABS
:
11649 value
= bfd_get_16 (input_bfd
, contents
+ rel
->r_offset
)
11651 value
|= bfd_get_16 (input_bfd
,
11652 contents
+ rel
->r_offset
+ 2);
11653 addend
= ((value
& 0xf7000) >> 4) | (value
& 0xff)
11654 | ((value
& 0x04000000) >> 15);
11655 addend
= (addend
^ 0x8000) - 0x8000;
11659 if (howto
->rightshift
11660 || (howto
->src_mask
& (howto
->src_mask
+ 1)))
11662 (*_bfd_error_handler
)
11663 (_("%B(%A+0x%lx): %s relocation against SEC_MERGE section"),
11664 input_bfd
, input_section
,
11665 (long) rel
->r_offset
, howto
->name
);
11669 value
= bfd_get_32 (input_bfd
, contents
+ rel
->r_offset
);
11671 /* Get the (signed) value from the instruction. */
11672 addend
= value
& howto
->src_mask
;
11673 if (addend
& ((howto
->src_mask
+ 1) >> 1))
11675 bfd_signed_vma mask
;
11678 mask
&= ~ howto
->src_mask
;
11686 _bfd_elf_rel_local_sym (output_bfd
, sym
, &msec
, addend
)
11688 addend
+= msec
->output_section
->vma
+ msec
->output_offset
;
11690 /* Cases here must match those in the preceding
11691 switch statement. */
11694 case R_ARM_MOVW_ABS_NC
:
11695 case R_ARM_MOVT_ABS
:
11696 value
= (value
& 0xfff0f000) | ((addend
& 0xf000) << 4)
11697 | (addend
& 0xfff);
11698 bfd_put_32 (input_bfd
, value
, contents
+ rel
->r_offset
);
11701 case R_ARM_THM_MOVW_ABS_NC
:
11702 case R_ARM_THM_MOVT_ABS
:
11703 value
= (value
& 0xfbf08f00) | ((addend
& 0xf700) << 4)
11704 | (addend
& 0xff) | ((addend
& 0x0800) << 15);
11705 bfd_put_16 (input_bfd
, value
>> 16,
11706 contents
+ rel
->r_offset
);
11707 bfd_put_16 (input_bfd
, value
,
11708 contents
+ rel
->r_offset
+ 2);
11712 value
= (value
& ~ howto
->dst_mask
)
11713 | (addend
& howto
->dst_mask
);
11714 bfd_put_32 (input_bfd
, value
, contents
+ rel
->r_offset
);
11720 relocation
= _bfd_elf_rela_local_sym (output_bfd
, sym
, &sec
, rel
);
11724 bfd_boolean warned
, ignored
;
11726 RELOC_FOR_GLOBAL_SYMBOL (info
, input_bfd
, input_section
, rel
,
11727 r_symndx
, symtab_hdr
, sym_hashes
,
11728 h
, sec
, relocation
,
11729 unresolved_reloc
, warned
, ignored
);
11731 sym_type
= h
->type
;
11734 if (sec
!= NULL
&& discarded_section (sec
))
11735 RELOC_AGAINST_DISCARDED_SECTION (info
, input_bfd
, input_section
,
11736 rel
, 1, relend
, howto
, 0, contents
);
11738 if (bfd_link_relocatable (info
))
11740 /* This is a relocatable link. We don't have to change
11741 anything, unless the reloc is against a section symbol,
11742 in which case we have to adjust according to where the
11743 section symbol winds up in the output section. */
11744 if (sym
!= NULL
&& ELF_ST_TYPE (sym
->st_info
) == STT_SECTION
)
11746 if (globals
->use_rel
)
11747 arm_add_to_rel (input_bfd
, contents
+ rel
->r_offset
,
11748 howto
, (bfd_signed_vma
) sec
->output_offset
);
11750 rel
->r_addend
+= sec
->output_offset
;
11756 name
= h
->root
.root
.string
;
11759 name
= (bfd_elf_string_from_elf_section
11760 (input_bfd
, symtab_hdr
->sh_link
, sym
->st_name
));
11761 if (name
== NULL
|| *name
== '\0')
11762 name
= bfd_section_name (input_bfd
, sec
);
11765 if (r_symndx
!= STN_UNDEF
11766 && r_type
!= R_ARM_NONE
11768 || h
->root
.type
== bfd_link_hash_defined
11769 || h
->root
.type
== bfd_link_hash_defweak
)
11770 && IS_ARM_TLS_RELOC (r_type
) != (sym_type
== STT_TLS
))
11772 (*_bfd_error_handler
)
11773 ((sym_type
== STT_TLS
11774 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
11775 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
11778 (long) rel
->r_offset
,
11783 /* We call elf32_arm_final_link_relocate unless we're completely
11784 done, i.e., the relaxation produced the final output we want,
11785 and we won't let anybody mess with it. Also, we have to do
11786 addend adjustments in case of a R_ARM_TLS_GOTDESC relocation
11787 both in relaxed and non-relaxed cases. */
11788 if ((elf32_arm_tls_transition (info
, r_type
, h
) != (unsigned)r_type
)
11789 || (IS_ARM_TLS_GNU_RELOC (r_type
)
11790 && !((h
? elf32_arm_hash_entry (h
)->tls_type
:
11791 elf32_arm_local_got_tls_type (input_bfd
)[r_symndx
])
11794 r
= elf32_arm_tls_relax (globals
, input_bfd
, input_section
,
11795 contents
, rel
, h
== NULL
);
11796 /* This may have been marked unresolved because it came from
11797 a shared library. But we've just dealt with that. */
11798 unresolved_reloc
= 0;
11801 r
= bfd_reloc_continue
;
11803 if (r
== bfd_reloc_continue
)
11805 unsigned char branch_type
=
11806 h
? ARM_GET_SYM_BRANCH_TYPE (h
->target_internal
)
11807 : ARM_GET_SYM_BRANCH_TYPE (sym
->st_target_internal
);
11809 r
= elf32_arm_final_link_relocate (howto
, input_bfd
, output_bfd
,
11810 input_section
, contents
, rel
,
11811 relocation
, info
, sec
, name
,
11812 sym_type
, branch_type
, h
,
11817 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
11818 because such sections are not SEC_ALLOC and thus ld.so will
11819 not process them. */
11820 if (unresolved_reloc
11821 && !((input_section
->flags
& SEC_DEBUGGING
) != 0
11823 && _bfd_elf_section_offset (output_bfd
, info
, input_section
,
11824 rel
->r_offset
) != (bfd_vma
) -1)
11826 (*_bfd_error_handler
)
11827 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
11830 (long) rel
->r_offset
,
11832 h
->root
.root
.string
);
11836 if (r
!= bfd_reloc_ok
)
11840 case bfd_reloc_overflow
:
11841 /* If the overflowing reloc was to an undefined symbol,
11842 we have already printed one error message and there
11843 is no point complaining again. */
11844 if (!h
|| h
->root
.type
!= bfd_link_hash_undefined
)
11845 (*info
->callbacks
->reloc_overflow
)
11846 (info
, (h
? &h
->root
: NULL
), name
, howto
->name
,
11847 (bfd_vma
) 0, input_bfd
, input_section
, rel
->r_offset
);
11850 case bfd_reloc_undefined
:
11851 (*info
->callbacks
->undefined_symbol
)
11852 (info
, name
, input_bfd
, input_section
, rel
->r_offset
, TRUE
);
11855 case bfd_reloc_outofrange
:
11856 error_message
= _("out of range");
11859 case bfd_reloc_notsupported
:
11860 error_message
= _("unsupported relocation");
11863 case bfd_reloc_dangerous
:
11864 /* error_message should already be set. */
11868 error_message
= _("unknown error");
11869 /* Fall through. */
11872 BFD_ASSERT (error_message
!= NULL
);
11873 (*info
->callbacks
->reloc_dangerous
)
11874 (info
, error_message
, input_bfd
, input_section
, rel
->r_offset
);
11883 /* Add a new unwind edit to the list described by HEAD, TAIL. If TINDEX is zero,
11884 adds the edit to the start of the list. (The list must be built in order of
11885 ascending TINDEX: the function's callers are primarily responsible for
11886 maintaining that condition). */
11889 add_unwind_table_edit (arm_unwind_table_edit
**head
,
11890 arm_unwind_table_edit
**tail
,
11891 arm_unwind_edit_type type
,
11892 asection
*linked_section
,
11893 unsigned int tindex
)
11895 arm_unwind_table_edit
*new_edit
= (arm_unwind_table_edit
*)
11896 xmalloc (sizeof (arm_unwind_table_edit
));
11898 new_edit
->type
= type
;
11899 new_edit
->linked_section
= linked_section
;
11900 new_edit
->index
= tindex
;
11904 new_edit
->next
= NULL
;
11907 (*tail
)->next
= new_edit
;
11909 (*tail
) = new_edit
;
11912 (*head
) = new_edit
;
11916 new_edit
->next
= *head
;
11925 static _arm_elf_section_data
*get_arm_elf_section_data (asection
*);
11927 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
11929 adjust_exidx_size(asection
*exidx_sec
, int adjust
)
11933 if (!exidx_sec
->rawsize
)
11934 exidx_sec
->rawsize
= exidx_sec
->size
;
11936 bfd_set_section_size (exidx_sec
->owner
, exidx_sec
, exidx_sec
->size
+ adjust
);
11937 out_sec
= exidx_sec
->output_section
;
11938 /* Adjust size of output section. */
11939 bfd_set_section_size (out_sec
->owner
, out_sec
, out_sec
->size
+adjust
);
11942 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
11944 insert_cantunwind_after(asection
*text_sec
, asection
*exidx_sec
)
11946 struct _arm_elf_section_data
*exidx_arm_data
;
11948 exidx_arm_data
= get_arm_elf_section_data (exidx_sec
);
11949 add_unwind_table_edit (
11950 &exidx_arm_data
->u
.exidx
.unwind_edit_list
,
11951 &exidx_arm_data
->u
.exidx
.unwind_edit_tail
,
11952 INSERT_EXIDX_CANTUNWIND_AT_END
, text_sec
, UINT_MAX
);
11954 exidx_arm_data
->additional_reloc_count
++;
11956 adjust_exidx_size(exidx_sec
, 8);
11959 /* Scan .ARM.exidx tables, and create a list describing edits which should be
11960 made to those tables, such that:
11962 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
11963 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
11964 codes which have been inlined into the index).
11966 If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged.
11968 The edits are applied when the tables are written
11969 (in elf32_arm_write_section). */
11972 elf32_arm_fix_exidx_coverage (asection
**text_section_order
,
11973 unsigned int num_text_sections
,
11974 struct bfd_link_info
*info
,
11975 bfd_boolean merge_exidx_entries
)
11978 unsigned int last_second_word
= 0, i
;
11979 asection
*last_exidx_sec
= NULL
;
11980 asection
*last_text_sec
= NULL
;
11981 int last_unwind_type
= -1;
11983 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
11985 for (inp
= info
->input_bfds
; inp
!= NULL
; inp
= inp
->link
.next
)
11989 for (sec
= inp
->sections
; sec
!= NULL
; sec
= sec
->next
)
11991 struct bfd_elf_section_data
*elf_sec
= elf_section_data (sec
);
11992 Elf_Internal_Shdr
*hdr
= &elf_sec
->this_hdr
;
11994 if (!hdr
|| hdr
->sh_type
!= SHT_ARM_EXIDX
)
11997 if (elf_sec
->linked_to
)
11999 Elf_Internal_Shdr
*linked_hdr
12000 = &elf_section_data (elf_sec
->linked_to
)->this_hdr
;
12001 struct _arm_elf_section_data
*linked_sec_arm_data
12002 = get_arm_elf_section_data (linked_hdr
->bfd_section
);
12004 if (linked_sec_arm_data
== NULL
)
12007 /* Link this .ARM.exidx section back from the text section it
12009 linked_sec_arm_data
->u
.text
.arm_exidx_sec
= sec
;
12014 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
12015 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
12016 and add EXIDX_CANTUNWIND entries for sections with no unwind table data. */
12018 for (i
= 0; i
< num_text_sections
; i
++)
12020 asection
*sec
= text_section_order
[i
];
12021 asection
*exidx_sec
;
12022 struct _arm_elf_section_data
*arm_data
= get_arm_elf_section_data (sec
);
12023 struct _arm_elf_section_data
*exidx_arm_data
;
12024 bfd_byte
*contents
= NULL
;
12025 int deleted_exidx_bytes
= 0;
12027 arm_unwind_table_edit
*unwind_edit_head
= NULL
;
12028 arm_unwind_table_edit
*unwind_edit_tail
= NULL
;
12029 Elf_Internal_Shdr
*hdr
;
12032 if (arm_data
== NULL
)
12035 exidx_sec
= arm_data
->u
.text
.arm_exidx_sec
;
12036 if (exidx_sec
== NULL
)
12038 /* Section has no unwind data. */
12039 if (last_unwind_type
== 0 || !last_exidx_sec
)
12042 /* Ignore zero sized sections. */
12043 if (sec
->size
== 0)
12046 insert_cantunwind_after(last_text_sec
, last_exidx_sec
);
12047 last_unwind_type
= 0;
12051 /* Skip /DISCARD/ sections. */
12052 if (bfd_is_abs_section (exidx_sec
->output_section
))
12055 hdr
= &elf_section_data (exidx_sec
)->this_hdr
;
12056 if (hdr
->sh_type
!= SHT_ARM_EXIDX
)
12059 exidx_arm_data
= get_arm_elf_section_data (exidx_sec
);
12060 if (exidx_arm_data
== NULL
)
12063 ibfd
= exidx_sec
->owner
;
12065 if (hdr
->contents
!= NULL
)
12066 contents
= hdr
->contents
;
12067 else if (! bfd_malloc_and_get_section (ibfd
, exidx_sec
, &contents
))
12071 if (last_unwind_type
> 0)
12073 unsigned int first_word
= bfd_get_32 (ibfd
, contents
);
12074 /* Add cantunwind if first unwind item does not match section
12076 if (first_word
!= sec
->vma
)
12078 insert_cantunwind_after (last_text_sec
, last_exidx_sec
);
12079 last_unwind_type
= 0;
12083 for (j
= 0; j
< hdr
->sh_size
; j
+= 8)
12085 unsigned int second_word
= bfd_get_32 (ibfd
, contents
+ j
+ 4);
12089 /* An EXIDX_CANTUNWIND entry. */
12090 if (second_word
== 1)
12092 if (last_unwind_type
== 0)
12096 /* Inlined unwinding data. Merge if equal to previous. */
12097 else if ((second_word
& 0x80000000) != 0)
12099 if (merge_exidx_entries
12100 && last_second_word
== second_word
&& last_unwind_type
== 1)
12103 last_second_word
= second_word
;
12105 /* Normal table entry. In theory we could merge these too,
12106 but duplicate entries are likely to be much less common. */
12110 if (elide
&& !bfd_link_relocatable (info
))
12112 add_unwind_table_edit (&unwind_edit_head
, &unwind_edit_tail
,
12113 DELETE_EXIDX_ENTRY
, NULL
, j
/ 8);
12115 deleted_exidx_bytes
+= 8;
12118 last_unwind_type
= unwind_type
;
12121 /* Free contents if we allocated it ourselves. */
12122 if (contents
!= hdr
->contents
)
12125 /* Record edits to be applied later (in elf32_arm_write_section). */
12126 exidx_arm_data
->u
.exidx
.unwind_edit_list
= unwind_edit_head
;
12127 exidx_arm_data
->u
.exidx
.unwind_edit_tail
= unwind_edit_tail
;
12129 if (deleted_exidx_bytes
> 0)
12130 adjust_exidx_size(exidx_sec
, -deleted_exidx_bytes
);
12132 last_exidx_sec
= exidx_sec
;
12133 last_text_sec
= sec
;
12136 /* Add terminating CANTUNWIND entry. */
12137 if (!bfd_link_relocatable (info
) && last_exidx_sec
12138 && last_unwind_type
!= 0)
12139 insert_cantunwind_after(last_text_sec
, last_exidx_sec
);
12145 elf32_arm_output_glue_section (struct bfd_link_info
*info
, bfd
*obfd
,
12146 bfd
*ibfd
, const char *name
)
12148 asection
*sec
, *osec
;
12150 sec
= bfd_get_linker_section (ibfd
, name
);
12151 if (sec
== NULL
|| (sec
->flags
& SEC_EXCLUDE
) != 0)
12154 osec
= sec
->output_section
;
12155 if (elf32_arm_write_section (obfd
, info
, sec
, sec
->contents
))
12158 if (! bfd_set_section_contents (obfd
, osec
, sec
->contents
,
12159 sec
->output_offset
, sec
->size
))
12166 elf32_arm_final_link (bfd
*abfd
, struct bfd_link_info
*info
)
12168 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (info
);
12169 asection
*sec
, *osec
;
12171 if (globals
== NULL
)
12174 /* Invoke the regular ELF backend linker to do all the work. */
12175 if (!bfd_elf_final_link (abfd
, info
))
12178 /* Process stub sections (eg BE8 encoding, ...). */
12179 struct elf32_arm_link_hash_table
*htab
= elf32_arm_hash_table (info
);
12181 for (i
=0; i
<htab
->top_id
; i
++)
12183 sec
= htab
->stub_group
[i
].stub_sec
;
12184 /* Only process it once, in its link_sec slot. */
12185 if (sec
&& i
== htab
->stub_group
[i
].link_sec
->id
)
12187 osec
= sec
->output_section
;
12188 elf32_arm_write_section (abfd
, info
, sec
, sec
->contents
);
12189 if (! bfd_set_section_contents (abfd
, osec
, sec
->contents
,
12190 sec
->output_offset
, sec
->size
))
12195 /* Write out any glue sections now that we have created all the
12197 if (globals
->bfd_of_glue_owner
!= NULL
)
12199 if (! elf32_arm_output_glue_section (info
, abfd
,
12200 globals
->bfd_of_glue_owner
,
12201 ARM2THUMB_GLUE_SECTION_NAME
))
12204 if (! elf32_arm_output_glue_section (info
, abfd
,
12205 globals
->bfd_of_glue_owner
,
12206 THUMB2ARM_GLUE_SECTION_NAME
))
12209 if (! elf32_arm_output_glue_section (info
, abfd
,
12210 globals
->bfd_of_glue_owner
,
12211 VFP11_ERRATUM_VENEER_SECTION_NAME
))
12214 if (! elf32_arm_output_glue_section (info
, abfd
,
12215 globals
->bfd_of_glue_owner
,
12216 STM32L4XX_ERRATUM_VENEER_SECTION_NAME
))
12219 if (! elf32_arm_output_glue_section (info
, abfd
,
12220 globals
->bfd_of_glue_owner
,
12221 ARM_BX_GLUE_SECTION_NAME
))
12228 /* Return a best guess for the machine number based on the attributes. */
12230 static unsigned int
12231 bfd_arm_get_mach_from_attributes (bfd
* abfd
)
12233 int arch
= bfd_elf_get_obj_attr_int (abfd
, OBJ_ATTR_PROC
, Tag_CPU_arch
);
12237 case TAG_CPU_ARCH_V4
: return bfd_mach_arm_4
;
12238 case TAG_CPU_ARCH_V4T
: return bfd_mach_arm_4T
;
12239 case TAG_CPU_ARCH_V5T
: return bfd_mach_arm_5T
;
12241 case TAG_CPU_ARCH_V5TE
:
12245 BFD_ASSERT (Tag_CPU_name
< NUM_KNOWN_OBJ_ATTRIBUTES
);
12246 name
= elf_known_obj_attributes (abfd
) [OBJ_ATTR_PROC
][Tag_CPU_name
].s
;
12250 if (strcmp (name
, "IWMMXT2") == 0)
12251 return bfd_mach_arm_iWMMXt2
;
12253 if (strcmp (name
, "IWMMXT") == 0)
12254 return bfd_mach_arm_iWMMXt
;
12256 if (strcmp (name
, "XSCALE") == 0)
12260 BFD_ASSERT (Tag_WMMX_arch
< NUM_KNOWN_OBJ_ATTRIBUTES
);
12261 wmmx
= elf_known_obj_attributes (abfd
) [OBJ_ATTR_PROC
][Tag_WMMX_arch
].i
;
12264 case 1: return bfd_mach_arm_iWMMXt
;
12265 case 2: return bfd_mach_arm_iWMMXt2
;
12266 default: return bfd_mach_arm_XScale
;
12271 return bfd_mach_arm_5TE
;
12275 return bfd_mach_arm_unknown
;
12279 /* Set the right machine number. */
12282 elf32_arm_object_p (bfd
*abfd
)
12286 mach
= bfd_arm_get_mach_from_notes (abfd
, ARM_NOTE_SECTION
);
12288 if (mach
== bfd_mach_arm_unknown
)
12290 if (elf_elfheader (abfd
)->e_flags
& EF_ARM_MAVERICK_FLOAT
)
12291 mach
= bfd_mach_arm_ep9312
;
12293 mach
= bfd_arm_get_mach_from_attributes (abfd
);
12296 bfd_default_set_arch_mach (abfd
, bfd_arch_arm
, mach
);
12300 /* Function to keep ARM specific flags in the ELF header. */
12303 elf32_arm_set_private_flags (bfd
*abfd
, flagword flags
)
12305 if (elf_flags_init (abfd
)
12306 && elf_elfheader (abfd
)->e_flags
!= flags
)
12308 if (EF_ARM_EABI_VERSION (flags
) == EF_ARM_EABI_UNKNOWN
)
12310 if (flags
& EF_ARM_INTERWORK
)
12311 (*_bfd_error_handler
)
12312 (_("Warning: Not setting interworking flag of %B since it has already been specified as non-interworking"),
12316 (_("Warning: Clearing the interworking flag of %B due to outside request"),
12322 elf_elfheader (abfd
)->e_flags
= flags
;
12323 elf_flags_init (abfd
) = TRUE
;
12329 /* Copy backend specific data from one object module to another. */
12332 elf32_arm_copy_private_bfd_data (bfd
*ibfd
, bfd
*obfd
)
12335 flagword out_flags
;
12337 if (! is_arm_elf (ibfd
) || ! is_arm_elf (obfd
))
12340 in_flags
= elf_elfheader (ibfd
)->e_flags
;
12341 out_flags
= elf_elfheader (obfd
)->e_flags
;
12343 if (elf_flags_init (obfd
)
12344 && EF_ARM_EABI_VERSION (out_flags
) == EF_ARM_EABI_UNKNOWN
12345 && in_flags
!= out_flags
)
12347 /* Cannot mix APCS26 and APCS32 code. */
12348 if ((in_flags
& EF_ARM_APCS_26
) != (out_flags
& EF_ARM_APCS_26
))
12351 /* Cannot mix float APCS and non-float APCS code. */
12352 if ((in_flags
& EF_ARM_APCS_FLOAT
) != (out_flags
& EF_ARM_APCS_FLOAT
))
12355 /* If the src and dest have different interworking flags
12356 then turn off the interworking bit. */
12357 if ((in_flags
& EF_ARM_INTERWORK
) != (out_flags
& EF_ARM_INTERWORK
))
12359 if (out_flags
& EF_ARM_INTERWORK
)
12361 (_("Warning: Clearing the interworking flag of %B because non-interworking code in %B has been linked with it"),
12364 in_flags
&= ~EF_ARM_INTERWORK
;
12367 /* Likewise for PIC, though don't warn for this case. */
12368 if ((in_flags
& EF_ARM_PIC
) != (out_flags
& EF_ARM_PIC
))
12369 in_flags
&= ~EF_ARM_PIC
;
12372 elf_elfheader (obfd
)->e_flags
= in_flags
;
12373 elf_flags_init (obfd
) = TRUE
;
12375 return _bfd_elf_copy_private_bfd_data (ibfd
, obfd
);
12378 /* Values for Tag_ABI_PCS_R9_use. */
12387 /* Values for Tag_ABI_PCS_RW_data. */
12390 AEABI_PCS_RW_data_absolute
,
12391 AEABI_PCS_RW_data_PCrel
,
12392 AEABI_PCS_RW_data_SBrel
,
12393 AEABI_PCS_RW_data_unused
12396 /* Values for Tag_ABI_enum_size. */
12402 AEABI_enum_forced_wide
12405 /* Determine whether an object attribute tag takes an integer, a
12409 elf32_arm_obj_attrs_arg_type (int tag
)
12411 if (tag
== Tag_compatibility
)
12412 return ATTR_TYPE_FLAG_INT_VAL
| ATTR_TYPE_FLAG_STR_VAL
;
12413 else if (tag
== Tag_nodefaults
)
12414 return ATTR_TYPE_FLAG_INT_VAL
| ATTR_TYPE_FLAG_NO_DEFAULT
;
12415 else if (tag
== Tag_CPU_raw_name
|| tag
== Tag_CPU_name
)
12416 return ATTR_TYPE_FLAG_STR_VAL
;
12418 return ATTR_TYPE_FLAG_INT_VAL
;
12420 return (tag
& 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL
: ATTR_TYPE_FLAG_INT_VAL
;
12423 /* The ABI defines that Tag_conformance should be emitted first, and that
12424 Tag_nodefaults should be second (if either is defined). This sets those
12425 two positions, and bumps up the position of all the remaining tags to
12428 elf32_arm_obj_attrs_order (int num
)
12430 if (num
== LEAST_KNOWN_OBJ_ATTRIBUTE
)
12431 return Tag_conformance
;
12432 if (num
== LEAST_KNOWN_OBJ_ATTRIBUTE
+ 1)
12433 return Tag_nodefaults
;
12434 if ((num
- 2) < Tag_nodefaults
)
12436 if ((num
- 1) < Tag_conformance
)
12441 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
12443 elf32_arm_obj_attrs_handle_unknown (bfd
*abfd
, int tag
)
12445 if ((tag
& 127) < 64)
12448 (_("%B: Unknown mandatory EABI object attribute %d"),
12450 bfd_set_error (bfd_error_bad_value
);
12456 (_("Warning: %B: Unknown EABI object attribute %d"),
12462 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
12463 Returns -1 if no architecture could be read. */
12466 get_secondary_compatible_arch (bfd
*abfd
)
12468 obj_attribute
*attr
=
12469 &elf_known_obj_attributes_proc (abfd
)[Tag_also_compatible_with
];
12471 /* Note: the tag and its argument below are uleb128 values, though
12472 currently-defined values fit in one byte for each. */
12474 && attr
->s
[0] == Tag_CPU_arch
12475 && (attr
->s
[1] & 128) != 128
12476 && attr
->s
[2] == 0)
12479 /* This tag is "safely ignorable", so don't complain if it looks funny. */
12483 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
12484 The tag is removed if ARCH is -1. */
12487 set_secondary_compatible_arch (bfd
*abfd
, int arch
)
12489 obj_attribute
*attr
=
12490 &elf_known_obj_attributes_proc (abfd
)[Tag_also_compatible_with
];
12498 /* Note: the tag and its argument below are uleb128 values, though
12499 currently-defined values fit in one byte for each. */
12501 attr
->s
= (char *) bfd_alloc (abfd
, 3);
12502 attr
->s
[0] = Tag_CPU_arch
;
12507 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
12511 tag_cpu_arch_combine (bfd
*ibfd
, int oldtag
, int *secondary_compat_out
,
12512 int newtag
, int secondary_compat
)
12514 #define T(X) TAG_CPU_ARCH_##X
12515 int tagl
, tagh
, result
;
12518 T(V6T2
), /* PRE_V4. */
12520 T(V6T2
), /* V4T. */
12521 T(V6T2
), /* V5T. */
12522 T(V6T2
), /* V5TE. */
12523 T(V6T2
), /* V5TEJ. */
12526 T(V6T2
) /* V6T2. */
12530 T(V6K
), /* PRE_V4. */
12534 T(V6K
), /* V5TE. */
12535 T(V6K
), /* V5TEJ. */
12537 T(V6KZ
), /* V6KZ. */
12543 T(V7
), /* PRE_V4. */
12548 T(V7
), /* V5TEJ. */
12561 T(V6K
), /* V5TE. */
12562 T(V6K
), /* V5TEJ. */
12564 T(V6KZ
), /* V6KZ. */
12568 T(V6_M
) /* V6_M. */
12570 const int v6s_m
[] =
12576 T(V6K
), /* V5TE. */
12577 T(V6K
), /* V5TEJ. */
12579 T(V6KZ
), /* V6KZ. */
12583 T(V6S_M
), /* V6_M. */
12584 T(V6S_M
) /* V6S_M. */
12586 const int v7e_m
[] =
12590 T(V7E_M
), /* V4T. */
12591 T(V7E_M
), /* V5T. */
12592 T(V7E_M
), /* V5TE. */
12593 T(V7E_M
), /* V5TEJ. */
12594 T(V7E_M
), /* V6. */
12595 T(V7E_M
), /* V6KZ. */
12596 T(V7E_M
), /* V6T2. */
12597 T(V7E_M
), /* V6K. */
12598 T(V7E_M
), /* V7. */
12599 T(V7E_M
), /* V6_M. */
12600 T(V7E_M
), /* V6S_M. */
12601 T(V7E_M
) /* V7E_M. */
12605 T(V8
), /* PRE_V4. */
12610 T(V8
), /* V5TEJ. */
12617 T(V8
), /* V6S_M. */
12618 T(V8
), /* V7E_M. */
12621 const int v8m_baseline
[] =
12634 T(V8M_BASE
), /* V6_M. */
12635 T(V8M_BASE
), /* V6S_M. */
12639 T(V8M_BASE
) /* V8-M BASELINE. */
12641 const int v8m_mainline
[] =
12653 T(V8M_MAIN
), /* V7. */
12654 T(V8M_MAIN
), /* V6_M. */
12655 T(V8M_MAIN
), /* V6S_M. */
12656 T(V8M_MAIN
), /* V7E_M. */
12659 T(V8M_MAIN
), /* V8-M BASELINE. */
12660 T(V8M_MAIN
) /* V8-M MAINLINE. */
12662 const int v4t_plus_v6_m
[] =
12668 T(V5TE
), /* V5TE. */
12669 T(V5TEJ
), /* V5TEJ. */
12671 T(V6KZ
), /* V6KZ. */
12672 T(V6T2
), /* V6T2. */
12675 T(V6_M
), /* V6_M. */
12676 T(V6S_M
), /* V6S_M. */
12677 T(V7E_M
), /* V7E_M. */
12680 T(V8M_BASE
), /* V8-M BASELINE. */
12681 T(V8M_MAIN
), /* V8-M MAINLINE. */
12682 T(V4T_PLUS_V6_M
) /* V4T plus V6_M. */
12684 const int *comb
[] =
12696 /* Pseudo-architecture. */
12700 /* Check we've not got a higher architecture than we know about. */
12702 if (oldtag
> MAX_TAG_CPU_ARCH
|| newtag
> MAX_TAG_CPU_ARCH
)
12704 _bfd_error_handler (_("error: %B: Unknown CPU architecture"), ibfd
);
12708 /* Override old tag if we have a Tag_also_compatible_with on the output. */
12710 if ((oldtag
== T(V6_M
) && *secondary_compat_out
== T(V4T
))
12711 || (oldtag
== T(V4T
) && *secondary_compat_out
== T(V6_M
)))
12712 oldtag
= T(V4T_PLUS_V6_M
);
12714 /* And override the new tag if we have a Tag_also_compatible_with on the
12717 if ((newtag
== T(V6_M
) && secondary_compat
== T(V4T
))
12718 || (newtag
== T(V4T
) && secondary_compat
== T(V6_M
)))
12719 newtag
= T(V4T_PLUS_V6_M
);
12721 tagl
= (oldtag
< newtag
) ? oldtag
: newtag
;
12722 result
= tagh
= (oldtag
> newtag
) ? oldtag
: newtag
;
12724 /* Architectures before V6KZ add features monotonically. */
12725 if (tagh
<= TAG_CPU_ARCH_V6KZ
)
12728 result
= comb
[tagh
- T(V6T2
)] ? comb
[tagh
- T(V6T2
)][tagl
] : -1;
12730 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
12731 as the canonical version. */
12732 if (result
== T(V4T_PLUS_V6_M
))
12735 *secondary_compat_out
= T(V6_M
);
12738 *secondary_compat_out
= -1;
12742 _bfd_error_handler (_("error: %B: Conflicting CPU architectures %d/%d"),
12743 ibfd
, oldtag
, newtag
);
12751 /* Query attributes object to see if integer divide instructions may be
12752 present in an object. */
12754 elf32_arm_attributes_accept_div (const obj_attribute
*attr
)
12756 int arch
= attr
[Tag_CPU_arch
].i
;
12757 int profile
= attr
[Tag_CPU_arch_profile
].i
;
12759 switch (attr
[Tag_DIV_use
].i
)
12762 /* Integer divide allowed if instruction contained in archetecture. */
12763 if (arch
== TAG_CPU_ARCH_V7
&& (profile
== 'R' || profile
== 'M'))
12765 else if (arch
>= TAG_CPU_ARCH_V7E_M
)
12771 /* Integer divide explicitly prohibited. */
12775 /* Unrecognised case - treat as allowing divide everywhere. */
12777 /* Integer divide allowed in ARM state. */
12782 /* Query attributes object to see if integer divide instructions are
12783 forbidden to be in the object. This is not the inverse of
12784 elf32_arm_attributes_accept_div. */
12786 elf32_arm_attributes_forbid_div (const obj_attribute
*attr
)
12788 return attr
[Tag_DIV_use
].i
== 1;
12791 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
12792 are conflicting attributes. */
12795 elf32_arm_merge_eabi_attributes (bfd
*ibfd
, bfd
*obfd
)
12797 obj_attribute
*in_attr
;
12798 obj_attribute
*out_attr
;
12799 /* Some tags have 0 = don't care, 1 = strong requirement,
12800 2 = weak requirement. */
12801 static const int order_021
[3] = {0, 2, 1};
12803 bfd_boolean result
= TRUE
;
12804 const char *sec_name
= get_elf_backend_data (ibfd
)->obj_attrs_section
;
12806 /* Skip the linker stubs file. This preserves previous behavior
12807 of accepting unknown attributes in the first input file - but
12809 if (ibfd
->flags
& BFD_LINKER_CREATED
)
12812 /* Skip any input that hasn't attribute section.
12813 This enables to link object files without attribute section with
12815 if (bfd_get_section_by_name (ibfd
, sec_name
) == NULL
)
12818 if (!elf_known_obj_attributes_proc (obfd
)[0].i
)
12820 /* This is the first object. Copy the attributes. */
12821 _bfd_elf_copy_obj_attributes (ibfd
, obfd
);
12823 out_attr
= elf_known_obj_attributes_proc (obfd
);
12825 /* Use the Tag_null value to indicate the attributes have been
12829 /* We do not output objects with Tag_MPextension_use_legacy - we move
12830 the attribute's value to Tag_MPextension_use. */
12831 if (out_attr
[Tag_MPextension_use_legacy
].i
!= 0)
12833 if (out_attr
[Tag_MPextension_use
].i
!= 0
12834 && out_attr
[Tag_MPextension_use_legacy
].i
12835 != out_attr
[Tag_MPextension_use
].i
)
12838 (_("Error: %B has both the current and legacy "
12839 "Tag_MPextension_use attributes"), ibfd
);
12843 out_attr
[Tag_MPextension_use
] =
12844 out_attr
[Tag_MPextension_use_legacy
];
12845 out_attr
[Tag_MPextension_use_legacy
].type
= 0;
12846 out_attr
[Tag_MPextension_use_legacy
].i
= 0;
12852 in_attr
= elf_known_obj_attributes_proc (ibfd
);
12853 out_attr
= elf_known_obj_attributes_proc (obfd
);
12854 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
12855 if (in_attr
[Tag_ABI_VFP_args
].i
!= out_attr
[Tag_ABI_VFP_args
].i
)
12857 /* Ignore mismatches if the object doesn't use floating point or is
12858 floating point ABI independent. */
12859 if (out_attr
[Tag_ABI_FP_number_model
].i
== AEABI_FP_number_model_none
12860 || (in_attr
[Tag_ABI_FP_number_model
].i
!= AEABI_FP_number_model_none
12861 && out_attr
[Tag_ABI_VFP_args
].i
== AEABI_VFP_args_compatible
))
12862 out_attr
[Tag_ABI_VFP_args
].i
= in_attr
[Tag_ABI_VFP_args
].i
;
12863 else if (in_attr
[Tag_ABI_FP_number_model
].i
!= AEABI_FP_number_model_none
12864 && in_attr
[Tag_ABI_VFP_args
].i
!= AEABI_VFP_args_compatible
)
12867 (_("error: %B uses VFP register arguments, %B does not"),
12868 in_attr
[Tag_ABI_VFP_args
].i
? ibfd
: obfd
,
12869 in_attr
[Tag_ABI_VFP_args
].i
? obfd
: ibfd
);
12874 for (i
= LEAST_KNOWN_OBJ_ATTRIBUTE
; i
< NUM_KNOWN_OBJ_ATTRIBUTES
; i
++)
12876 /* Merge this attribute with existing attributes. */
12879 case Tag_CPU_raw_name
:
12881 /* These are merged after Tag_CPU_arch. */
12884 case Tag_ABI_optimization_goals
:
12885 case Tag_ABI_FP_optimization_goals
:
12886 /* Use the first value seen. */
12891 int secondary_compat
= -1, secondary_compat_out
= -1;
12892 unsigned int saved_out_attr
= out_attr
[i
].i
;
12894 static const char *name_table
[] =
12896 /* These aren't real CPU names, but we can't guess
12897 that from the architecture version alone. */
12913 "ARM v8-M.baseline",
12914 "ARM v8-M.mainline",
12917 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
12918 secondary_compat
= get_secondary_compatible_arch (ibfd
);
12919 secondary_compat_out
= get_secondary_compatible_arch (obfd
);
12920 arch_attr
= tag_cpu_arch_combine (ibfd
, out_attr
[i
].i
,
12921 &secondary_compat_out
,
12925 /* Return with error if failed to merge. */
12926 if (arch_attr
== -1)
12929 out_attr
[i
].i
= arch_attr
;
12931 set_secondary_compatible_arch (obfd
, secondary_compat_out
);
12933 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
12934 if (out_attr
[i
].i
== saved_out_attr
)
12935 ; /* Leave the names alone. */
12936 else if (out_attr
[i
].i
== in_attr
[i
].i
)
12938 /* The output architecture has been changed to match the
12939 input architecture. Use the input names. */
12940 out_attr
[Tag_CPU_name
].s
= in_attr
[Tag_CPU_name
].s
12941 ? _bfd_elf_attr_strdup (obfd
, in_attr
[Tag_CPU_name
].s
)
12943 out_attr
[Tag_CPU_raw_name
].s
= in_attr
[Tag_CPU_raw_name
].s
12944 ? _bfd_elf_attr_strdup (obfd
, in_attr
[Tag_CPU_raw_name
].s
)
12949 out_attr
[Tag_CPU_name
].s
= NULL
;
12950 out_attr
[Tag_CPU_raw_name
].s
= NULL
;
12953 /* If we still don't have a value for Tag_CPU_name,
12954 make one up now. Tag_CPU_raw_name remains blank. */
12955 if (out_attr
[Tag_CPU_name
].s
== NULL
12956 && out_attr
[i
].i
< ARRAY_SIZE (name_table
))
12957 out_attr
[Tag_CPU_name
].s
=
12958 _bfd_elf_attr_strdup (obfd
, name_table
[out_attr
[i
].i
]);
12962 case Tag_ARM_ISA_use
:
12963 case Tag_THUMB_ISA_use
:
12964 case Tag_WMMX_arch
:
12965 case Tag_Advanced_SIMD_arch
:
12966 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
12967 case Tag_ABI_FP_rounding
:
12968 case Tag_ABI_FP_exceptions
:
12969 case Tag_ABI_FP_user_exceptions
:
12970 case Tag_ABI_FP_number_model
:
12971 case Tag_FP_HP_extension
:
12972 case Tag_CPU_unaligned_access
:
12974 case Tag_MPextension_use
:
12975 /* Use the largest value specified. */
12976 if (in_attr
[i
].i
> out_attr
[i
].i
)
12977 out_attr
[i
].i
= in_attr
[i
].i
;
12980 case Tag_ABI_align_preserved
:
12981 case Tag_ABI_PCS_RO_data
:
12982 /* Use the smallest value specified. */
12983 if (in_attr
[i
].i
< out_attr
[i
].i
)
12984 out_attr
[i
].i
= in_attr
[i
].i
;
12987 case Tag_ABI_align_needed
:
12988 if ((in_attr
[i
].i
> 0 || out_attr
[i
].i
> 0)
12989 && (in_attr
[Tag_ABI_align_preserved
].i
== 0
12990 || out_attr
[Tag_ABI_align_preserved
].i
== 0))
12992 /* This error message should be enabled once all non-conformant
12993 binaries in the toolchain have had the attributes set
12996 (_("error: %B: 8-byte data alignment conflicts with %B"),
13000 /* Fall through. */
13001 case Tag_ABI_FP_denormal
:
13002 case Tag_ABI_PCS_GOT_use
:
13003 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
13004 value if greater than 2 (for future-proofing). */
13005 if ((in_attr
[i
].i
> 2 && in_attr
[i
].i
> out_attr
[i
].i
)
13006 || (in_attr
[i
].i
<= 2 && out_attr
[i
].i
<= 2
13007 && order_021
[in_attr
[i
].i
] > order_021
[out_attr
[i
].i
]))
13008 out_attr
[i
].i
= in_attr
[i
].i
;
13011 case Tag_Virtualization_use
:
13012 /* The virtualization tag effectively stores two bits of
13013 information: the intended use of TrustZone (in bit 0), and the
13014 intended use of Virtualization (in bit 1). */
13015 if (out_attr
[i
].i
== 0)
13016 out_attr
[i
].i
= in_attr
[i
].i
;
13017 else if (in_attr
[i
].i
!= 0
13018 && in_attr
[i
].i
!= out_attr
[i
].i
)
13020 if (in_attr
[i
].i
<= 3 && out_attr
[i
].i
<= 3)
13025 (_("error: %B: unable to merge virtualization attributes "
13033 case Tag_CPU_arch_profile
:
13034 if (out_attr
[i
].i
!= in_attr
[i
].i
)
13036 /* 0 will merge with anything.
13037 'A' and 'S' merge to 'A'.
13038 'R' and 'S' merge to 'R'.
13039 'M' and 'A|R|S' is an error. */
13040 if (out_attr
[i
].i
== 0
13041 || (out_attr
[i
].i
== 'S'
13042 && (in_attr
[i
].i
== 'A' || in_attr
[i
].i
== 'R')))
13043 out_attr
[i
].i
= in_attr
[i
].i
;
13044 else if (in_attr
[i
].i
== 0
13045 || (in_attr
[i
].i
== 'S'
13046 && (out_attr
[i
].i
== 'A' || out_attr
[i
].i
== 'R')))
13047 ; /* Do nothing. */
13051 (_("error: %B: Conflicting architecture profiles %c/%c"),
13053 in_attr
[i
].i
? in_attr
[i
].i
: '0',
13054 out_attr
[i
].i
? out_attr
[i
].i
: '0');
13060 case Tag_DSP_extension
:
13061 /* No need to change output value if any of:
13062 - pre (<=) ARMv5T input architecture (do not have DSP)
13063 - M input profile not ARMv7E-M and do not have DSP. */
13064 if (in_attr
[Tag_CPU_arch
].i
<= 3
13065 || (in_attr
[Tag_CPU_arch_profile
].i
== 'M'
13066 && in_attr
[Tag_CPU_arch
].i
!= 13
13067 && in_attr
[i
].i
== 0))
13068 ; /* Do nothing. */
13069 /* Output value should be 0 if DSP part of architecture, ie.
13070 - post (>=) ARMv5te architecture output
13071 - A, R or S profile output or ARMv7E-M output architecture. */
13072 else if (out_attr
[Tag_CPU_arch
].i
>= 4
13073 && (out_attr
[Tag_CPU_arch_profile
].i
== 'A'
13074 || out_attr
[Tag_CPU_arch_profile
].i
== 'R'
13075 || out_attr
[Tag_CPU_arch_profile
].i
== 'S'
13076 || out_attr
[Tag_CPU_arch
].i
== 13))
13078 /* Otherwise, DSP instructions are added and not part of output
13086 /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since
13087 the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch
13088 when it's 0. It might mean absence of FP hardware if
13089 Tag_FP_arch is zero. */
13091 #define VFP_VERSION_COUNT 9
13092 static const struct
13096 } vfp_versions
[VFP_VERSION_COUNT
] =
13112 /* If the output has no requirement about FP hardware,
13113 follow the requirement of the input. */
13114 if (out_attr
[i
].i
== 0)
13116 BFD_ASSERT (out_attr
[Tag_ABI_HardFP_use
].i
== 0);
13117 out_attr
[i
].i
= in_attr
[i
].i
;
13118 out_attr
[Tag_ABI_HardFP_use
].i
13119 = in_attr
[Tag_ABI_HardFP_use
].i
;
13122 /* If the input has no requirement about FP hardware, do
13124 else if (in_attr
[i
].i
== 0)
13126 BFD_ASSERT (in_attr
[Tag_ABI_HardFP_use
].i
== 0);
13130 /* Both the input and the output have nonzero Tag_FP_arch.
13131 So Tag_ABI_HardFP_use is implied by Tag_FP_arch when it's zero. */
13133 /* If both the input and the output have zero Tag_ABI_HardFP_use,
13135 if (in_attr
[Tag_ABI_HardFP_use
].i
== 0
13136 && out_attr
[Tag_ABI_HardFP_use
].i
== 0)
13138 /* If the input and the output have different Tag_ABI_HardFP_use,
13139 the combination of them is 0 (implied by Tag_FP_arch). */
13140 else if (in_attr
[Tag_ABI_HardFP_use
].i
13141 != out_attr
[Tag_ABI_HardFP_use
].i
)
13142 out_attr
[Tag_ABI_HardFP_use
].i
= 0;
13144 /* Now we can handle Tag_FP_arch. */
13146 /* Values of VFP_VERSION_COUNT or more aren't defined, so just
13147 pick the biggest. */
13148 if (in_attr
[i
].i
>= VFP_VERSION_COUNT
13149 && in_attr
[i
].i
> out_attr
[i
].i
)
13151 out_attr
[i
] = in_attr
[i
];
13154 /* The output uses the superset of input features
13155 (ISA version) and registers. */
13156 ver
= vfp_versions
[in_attr
[i
].i
].ver
;
13157 if (ver
< vfp_versions
[out_attr
[i
].i
].ver
)
13158 ver
= vfp_versions
[out_attr
[i
].i
].ver
;
13159 regs
= vfp_versions
[in_attr
[i
].i
].regs
;
13160 if (regs
< vfp_versions
[out_attr
[i
].i
].regs
)
13161 regs
= vfp_versions
[out_attr
[i
].i
].regs
;
13162 /* This assumes all possible supersets are also a valid
13164 for (newval
= VFP_VERSION_COUNT
- 1; newval
> 0; newval
--)
13166 if (regs
== vfp_versions
[newval
].regs
13167 && ver
== vfp_versions
[newval
].ver
)
13170 out_attr
[i
].i
= newval
;
13173 case Tag_PCS_config
:
13174 if (out_attr
[i
].i
== 0)
13175 out_attr
[i
].i
= in_attr
[i
].i
;
13176 else if (in_attr
[i
].i
!= 0 && out_attr
[i
].i
!= in_attr
[i
].i
)
13178 /* It's sometimes ok to mix different configs, so this is only
13181 (_("Warning: %B: Conflicting platform configuration"), ibfd
);
13184 case Tag_ABI_PCS_R9_use
:
13185 if (in_attr
[i
].i
!= out_attr
[i
].i
13186 && out_attr
[i
].i
!= AEABI_R9_unused
13187 && in_attr
[i
].i
!= AEABI_R9_unused
)
13190 (_("error: %B: Conflicting use of R9"), ibfd
);
13193 if (out_attr
[i
].i
== AEABI_R9_unused
)
13194 out_attr
[i
].i
= in_attr
[i
].i
;
13196 case Tag_ABI_PCS_RW_data
:
13197 if (in_attr
[i
].i
== AEABI_PCS_RW_data_SBrel
13198 && out_attr
[Tag_ABI_PCS_R9_use
].i
!= AEABI_R9_SB
13199 && out_attr
[Tag_ABI_PCS_R9_use
].i
!= AEABI_R9_unused
)
13202 (_("error: %B: SB relative addressing conflicts with use of R9"),
13206 /* Use the smallest value specified. */
13207 if (in_attr
[i
].i
< out_attr
[i
].i
)
13208 out_attr
[i
].i
= in_attr
[i
].i
;
13210 case Tag_ABI_PCS_wchar_t
:
13211 if (out_attr
[i
].i
&& in_attr
[i
].i
&& out_attr
[i
].i
!= in_attr
[i
].i
13212 && !elf_arm_tdata (obfd
)->no_wchar_size_warning
)
13215 (_("warning: %B uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
13216 ibfd
, in_attr
[i
].i
, out_attr
[i
].i
);
13218 else if (in_attr
[i
].i
&& !out_attr
[i
].i
)
13219 out_attr
[i
].i
= in_attr
[i
].i
;
13221 case Tag_ABI_enum_size
:
13222 if (in_attr
[i
].i
!= AEABI_enum_unused
)
13224 if (out_attr
[i
].i
== AEABI_enum_unused
13225 || out_attr
[i
].i
== AEABI_enum_forced_wide
)
13227 /* The existing object is compatible with anything.
13228 Use whatever requirements the new object has. */
13229 out_attr
[i
].i
= in_attr
[i
].i
;
13231 else if (in_attr
[i
].i
!= AEABI_enum_forced_wide
13232 && out_attr
[i
].i
!= in_attr
[i
].i
13233 && !elf_arm_tdata (obfd
)->no_enum_size_warning
)
13235 static const char *aeabi_enum_names
[] =
13236 { "", "variable-size", "32-bit", "" };
13237 const char *in_name
=
13238 in_attr
[i
].i
< ARRAY_SIZE(aeabi_enum_names
)
13239 ? aeabi_enum_names
[in_attr
[i
].i
]
13241 const char *out_name
=
13242 out_attr
[i
].i
< ARRAY_SIZE(aeabi_enum_names
)
13243 ? aeabi_enum_names
[out_attr
[i
].i
]
13246 (_("warning: %B uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
13247 ibfd
, in_name
, out_name
);
13251 case Tag_ABI_VFP_args
:
13254 case Tag_ABI_WMMX_args
:
13255 if (in_attr
[i
].i
!= out_attr
[i
].i
)
13258 (_("error: %B uses iWMMXt register arguments, %B does not"),
13263 case Tag_compatibility
:
13264 /* Merged in target-independent code. */
13266 case Tag_ABI_HardFP_use
:
13267 /* This is handled along with Tag_FP_arch. */
13269 case Tag_ABI_FP_16bit_format
:
13270 if (in_attr
[i
].i
!= 0 && out_attr
[i
].i
!= 0)
13272 if (in_attr
[i
].i
!= out_attr
[i
].i
)
13275 (_("error: fp16 format mismatch between %B and %B"),
13280 if (in_attr
[i
].i
!= 0)
13281 out_attr
[i
].i
= in_attr
[i
].i
;
13285 /* A value of zero on input means that the divide instruction may
13286 be used if available in the base architecture as specified via
13287 Tag_CPU_arch and Tag_CPU_arch_profile. A value of 1 means that
13288 the user did not want divide instructions. A value of 2
13289 explicitly means that divide instructions were allowed in ARM
13290 and Thumb state. */
13291 if (in_attr
[i
].i
== out_attr
[i
].i
)
13292 /* Do nothing. */ ;
13293 else if (elf32_arm_attributes_forbid_div (in_attr
)
13294 && !elf32_arm_attributes_accept_div (out_attr
))
13296 else if (elf32_arm_attributes_forbid_div (out_attr
)
13297 && elf32_arm_attributes_accept_div (in_attr
))
13298 out_attr
[i
].i
= in_attr
[i
].i
;
13299 else if (in_attr
[i
].i
== 2)
13300 out_attr
[i
].i
= in_attr
[i
].i
;
13303 case Tag_MPextension_use_legacy
:
13304 /* We don't output objects with Tag_MPextension_use_legacy - we
13305 move the value to Tag_MPextension_use. */
13306 if (in_attr
[i
].i
!= 0 && in_attr
[Tag_MPextension_use
].i
!= 0)
13308 if (in_attr
[Tag_MPextension_use
].i
!= in_attr
[i
].i
)
13311 (_("%B has has both the current and legacy "
13312 "Tag_MPextension_use attributes"),
13318 if (in_attr
[i
].i
> out_attr
[Tag_MPextension_use
].i
)
13319 out_attr
[Tag_MPextension_use
] = in_attr
[i
];
13323 case Tag_nodefaults
:
13324 /* This tag is set if it exists, but the value is unused (and is
13325 typically zero). We don't actually need to do anything here -
13326 the merge happens automatically when the type flags are merged
13329 case Tag_also_compatible_with
:
13330 /* Already done in Tag_CPU_arch. */
13332 case Tag_conformance
:
13333 /* Keep the attribute if it matches. Throw it away otherwise.
13334 No attribute means no claim to conform. */
13335 if (!in_attr
[i
].s
|| !out_attr
[i
].s
13336 || strcmp (in_attr
[i
].s
, out_attr
[i
].s
) != 0)
13337 out_attr
[i
].s
= NULL
;
13342 = result
&& _bfd_elf_merge_unknown_attribute_low (ibfd
, obfd
, i
);
13345 /* If out_attr was copied from in_attr then it won't have a type yet. */
13346 if (in_attr
[i
].type
&& !out_attr
[i
].type
)
13347 out_attr
[i
].type
= in_attr
[i
].type
;
13350 /* Merge Tag_compatibility attributes and any common GNU ones. */
13351 if (!_bfd_elf_merge_object_attributes (ibfd
, obfd
))
13354 /* Check for any attributes not known on ARM. */
13355 result
&= _bfd_elf_merge_unknown_attribute_list (ibfd
, obfd
);
13361 /* Return TRUE if the two EABI versions are incompatible. */
13364 elf32_arm_versions_compatible (unsigned iver
, unsigned over
)
13366 /* v4 and v5 are the same spec before and after it was released,
13367 so allow mixing them. */
13368 if ((iver
== EF_ARM_EABI_VER4
&& over
== EF_ARM_EABI_VER5
)
13369 || (iver
== EF_ARM_EABI_VER5
&& over
== EF_ARM_EABI_VER4
))
13372 return (iver
== over
);
13375 /* Merge backend specific data from an object file to the output
13376 object file when linking. */
13379 elf32_arm_merge_private_bfd_data (bfd
* ibfd
, bfd
* obfd
);
13381 /* Display the flags field. */
13384 elf32_arm_print_private_bfd_data (bfd
*abfd
, void * ptr
)
13386 FILE * file
= (FILE *) ptr
;
13387 unsigned long flags
;
13389 BFD_ASSERT (abfd
!= NULL
&& ptr
!= NULL
);
13391 /* Print normal ELF private data. */
13392 _bfd_elf_print_private_bfd_data (abfd
, ptr
);
13394 flags
= elf_elfheader (abfd
)->e_flags
;
13395 /* Ignore init flag - it may not be set, despite the flags field
13396 containing valid data. */
13398 /* xgettext:c-format */
13399 fprintf (file
, _("private flags = %lx:"), elf_elfheader (abfd
)->e_flags
);
13401 switch (EF_ARM_EABI_VERSION (flags
))
13403 case EF_ARM_EABI_UNKNOWN
:
13404 /* The following flag bits are GNU extensions and not part of the
13405 official ARM ELF extended ABI. Hence they are only decoded if
13406 the EABI version is not set. */
13407 if (flags
& EF_ARM_INTERWORK
)
13408 fprintf (file
, _(" [interworking enabled]"));
13410 if (flags
& EF_ARM_APCS_26
)
13411 fprintf (file
, " [APCS-26]");
13413 fprintf (file
, " [APCS-32]");
13415 if (flags
& EF_ARM_VFP_FLOAT
)
13416 fprintf (file
, _(" [VFP float format]"));
13417 else if (flags
& EF_ARM_MAVERICK_FLOAT
)
13418 fprintf (file
, _(" [Maverick float format]"));
13420 fprintf (file
, _(" [FPA float format]"));
13422 if (flags
& EF_ARM_APCS_FLOAT
)
13423 fprintf (file
, _(" [floats passed in float registers]"));
13425 if (flags
& EF_ARM_PIC
)
13426 fprintf (file
, _(" [position independent]"));
13428 if (flags
& EF_ARM_NEW_ABI
)
13429 fprintf (file
, _(" [new ABI]"));
13431 if (flags
& EF_ARM_OLD_ABI
)
13432 fprintf (file
, _(" [old ABI]"));
13434 if (flags
& EF_ARM_SOFT_FLOAT
)
13435 fprintf (file
, _(" [software FP]"));
13437 flags
&= ~(EF_ARM_INTERWORK
| EF_ARM_APCS_26
| EF_ARM_APCS_FLOAT
13438 | EF_ARM_PIC
| EF_ARM_NEW_ABI
| EF_ARM_OLD_ABI
13439 | EF_ARM_SOFT_FLOAT
| EF_ARM_VFP_FLOAT
13440 | EF_ARM_MAVERICK_FLOAT
);
13443 case EF_ARM_EABI_VER1
:
13444 fprintf (file
, _(" [Version1 EABI]"));
13446 if (flags
& EF_ARM_SYMSARESORTED
)
13447 fprintf (file
, _(" [sorted symbol table]"));
13449 fprintf (file
, _(" [unsorted symbol table]"));
13451 flags
&= ~ EF_ARM_SYMSARESORTED
;
13454 case EF_ARM_EABI_VER2
:
13455 fprintf (file
, _(" [Version2 EABI]"));
13457 if (flags
& EF_ARM_SYMSARESORTED
)
13458 fprintf (file
, _(" [sorted symbol table]"));
13460 fprintf (file
, _(" [unsorted symbol table]"));
13462 if (flags
& EF_ARM_DYNSYMSUSESEGIDX
)
13463 fprintf (file
, _(" [dynamic symbols use segment index]"));
13465 if (flags
& EF_ARM_MAPSYMSFIRST
)
13466 fprintf (file
, _(" [mapping symbols precede others]"));
13468 flags
&= ~(EF_ARM_SYMSARESORTED
| EF_ARM_DYNSYMSUSESEGIDX
13469 | EF_ARM_MAPSYMSFIRST
);
13472 case EF_ARM_EABI_VER3
:
13473 fprintf (file
, _(" [Version3 EABI]"));
13476 case EF_ARM_EABI_VER4
:
13477 fprintf (file
, _(" [Version4 EABI]"));
13480 case EF_ARM_EABI_VER5
:
13481 fprintf (file
, _(" [Version5 EABI]"));
13483 if (flags
& EF_ARM_ABI_FLOAT_SOFT
)
13484 fprintf (file
, _(" [soft-float ABI]"));
13486 if (flags
& EF_ARM_ABI_FLOAT_HARD
)
13487 fprintf (file
, _(" [hard-float ABI]"));
13489 flags
&= ~(EF_ARM_ABI_FLOAT_SOFT
| EF_ARM_ABI_FLOAT_HARD
);
13492 if (flags
& EF_ARM_BE8
)
13493 fprintf (file
, _(" [BE8]"));
13495 if (flags
& EF_ARM_LE8
)
13496 fprintf (file
, _(" [LE8]"));
13498 flags
&= ~(EF_ARM_LE8
| EF_ARM_BE8
);
13502 fprintf (file
, _(" <EABI version unrecognised>"));
13506 flags
&= ~ EF_ARM_EABIMASK
;
13508 if (flags
& EF_ARM_RELEXEC
)
13509 fprintf (file
, _(" [relocatable executable]"));
13511 flags
&= ~EF_ARM_RELEXEC
;
13514 fprintf (file
, _("<Unrecognised flag bits set>"));
13516 fputc ('\n', file
);
13522 elf32_arm_get_symbol_type (Elf_Internal_Sym
* elf_sym
, int type
)
13524 switch (ELF_ST_TYPE (elf_sym
->st_info
))
13526 case STT_ARM_TFUNC
:
13527 return ELF_ST_TYPE (elf_sym
->st_info
);
13529 case STT_ARM_16BIT
:
13530 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
13531 This allows us to distinguish between data used by Thumb instructions
13532 and non-data (which is probably code) inside Thumb regions of an
13534 if (type
!= STT_OBJECT
&& type
!= STT_TLS
)
13535 return ELF_ST_TYPE (elf_sym
->st_info
);
13546 elf32_arm_gc_mark_hook (asection
*sec
,
13547 struct bfd_link_info
*info
,
13548 Elf_Internal_Rela
*rel
,
13549 struct elf_link_hash_entry
*h
,
13550 Elf_Internal_Sym
*sym
)
13553 switch (ELF32_R_TYPE (rel
->r_info
))
13555 case R_ARM_GNU_VTINHERIT
:
13556 case R_ARM_GNU_VTENTRY
:
13560 return _bfd_elf_gc_mark_hook (sec
, info
, rel
, h
, sym
);
13563 /* Update the got entry reference counts for the section being removed. */
13566 elf32_arm_gc_sweep_hook (bfd
* abfd
,
13567 struct bfd_link_info
* info
,
13569 const Elf_Internal_Rela
* relocs
)
13571 Elf_Internal_Shdr
*symtab_hdr
;
13572 struct elf_link_hash_entry
**sym_hashes
;
13573 bfd_signed_vma
*local_got_refcounts
;
13574 const Elf_Internal_Rela
*rel
, *relend
;
13575 struct elf32_arm_link_hash_table
* globals
;
13577 if (bfd_link_relocatable (info
))
13580 globals
= elf32_arm_hash_table (info
);
13581 if (globals
== NULL
)
13584 elf_section_data (sec
)->local_dynrel
= NULL
;
13586 symtab_hdr
= & elf_symtab_hdr (abfd
);
13587 sym_hashes
= elf_sym_hashes (abfd
);
13588 local_got_refcounts
= elf_local_got_refcounts (abfd
);
13590 check_use_blx (globals
);
13592 relend
= relocs
+ sec
->reloc_count
;
13593 for (rel
= relocs
; rel
< relend
; rel
++)
13595 unsigned long r_symndx
;
13596 struct elf_link_hash_entry
*h
= NULL
;
13597 struct elf32_arm_link_hash_entry
*eh
;
13599 bfd_boolean call_reloc_p
;
13600 bfd_boolean may_become_dynamic_p
;
13601 bfd_boolean may_need_local_target_p
;
13602 union gotplt_union
*root_plt
;
13603 struct arm_plt_info
*arm_plt
;
13605 r_symndx
= ELF32_R_SYM (rel
->r_info
);
13606 if (r_symndx
>= symtab_hdr
->sh_info
)
13608 h
= sym_hashes
[r_symndx
- symtab_hdr
->sh_info
];
13609 while (h
->root
.type
== bfd_link_hash_indirect
13610 || h
->root
.type
== bfd_link_hash_warning
)
13611 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
13613 eh
= (struct elf32_arm_link_hash_entry
*) h
;
13615 call_reloc_p
= FALSE
;
13616 may_become_dynamic_p
= FALSE
;
13617 may_need_local_target_p
= FALSE
;
13619 r_type
= ELF32_R_TYPE (rel
->r_info
);
13620 r_type
= arm_real_reloc_type (globals
, r_type
);
13624 case R_ARM_GOT_PREL
:
13625 case R_ARM_TLS_GD32
:
13626 case R_ARM_TLS_IE32
:
13629 if (h
->got
.refcount
> 0)
13630 h
->got
.refcount
-= 1;
13632 else if (local_got_refcounts
!= NULL
)
13634 if (local_got_refcounts
[r_symndx
] > 0)
13635 local_got_refcounts
[r_symndx
] -= 1;
13639 case R_ARM_TLS_LDM32
:
13640 globals
->tls_ldm_got
.refcount
-= 1;
13648 case R_ARM_THM_CALL
:
13649 case R_ARM_THM_JUMP24
:
13650 case R_ARM_THM_JUMP19
:
13651 call_reloc_p
= TRUE
;
13652 may_need_local_target_p
= TRUE
;
13656 if (!globals
->vxworks_p
)
13658 may_need_local_target_p
= TRUE
;
13661 /* Fall through. */
13663 case R_ARM_ABS32_NOI
:
13665 case R_ARM_REL32_NOI
:
13666 case R_ARM_MOVW_ABS_NC
:
13667 case R_ARM_MOVT_ABS
:
13668 case R_ARM_MOVW_PREL_NC
:
13669 case R_ARM_MOVT_PREL
:
13670 case R_ARM_THM_MOVW_ABS_NC
:
13671 case R_ARM_THM_MOVT_ABS
:
13672 case R_ARM_THM_MOVW_PREL_NC
:
13673 case R_ARM_THM_MOVT_PREL
:
13674 /* Should the interworking branches be here also? */
13675 if ((bfd_link_pic (info
) || globals
->root
.is_relocatable_executable
)
13676 && (sec
->flags
& SEC_ALLOC
) != 0)
13679 && elf32_arm_howto_from_type (r_type
)->pc_relative
)
13681 call_reloc_p
= TRUE
;
13682 may_need_local_target_p
= TRUE
;
13685 may_become_dynamic_p
= TRUE
;
13688 may_need_local_target_p
= TRUE
;
13695 if (may_need_local_target_p
13696 && elf32_arm_get_plt_info (abfd
, eh
, r_symndx
, &root_plt
, &arm_plt
))
13698 /* If PLT refcount book-keeping is wrong and too low, we'll
13699 see a zero value (going to -1) for the root PLT reference
13701 if (root_plt
->refcount
>= 0)
13703 BFD_ASSERT (root_plt
->refcount
!= 0);
13704 root_plt
->refcount
-= 1;
13707 /* A value of -1 means the symbol has become local, forced
13708 or seeing a hidden definition. Any other negative value
13710 BFD_ASSERT (root_plt
->refcount
== -1);
13713 arm_plt
->noncall_refcount
--;
13715 if (r_type
== R_ARM_THM_CALL
)
13716 arm_plt
->maybe_thumb_refcount
--;
13718 if (r_type
== R_ARM_THM_JUMP24
13719 || r_type
== R_ARM_THM_JUMP19
)
13720 arm_plt
->thumb_refcount
--;
13723 if (may_become_dynamic_p
)
13725 struct elf_dyn_relocs
**pp
;
13726 struct elf_dyn_relocs
*p
;
13729 pp
= &(eh
->dyn_relocs
);
13732 Elf_Internal_Sym
*isym
;
13734 isym
= bfd_sym_from_r_symndx (&globals
->sym_cache
,
13738 pp
= elf32_arm_get_local_dynreloc_list (abfd
, r_symndx
, isym
);
13742 for (; (p
= *pp
) != NULL
; pp
= &p
->next
)
13745 /* Everything must go for SEC. */
13755 /* Look through the relocs for a section during the first phase. */
13758 elf32_arm_check_relocs (bfd
*abfd
, struct bfd_link_info
*info
,
13759 asection
*sec
, const Elf_Internal_Rela
*relocs
)
13761 Elf_Internal_Shdr
*symtab_hdr
;
13762 struct elf_link_hash_entry
**sym_hashes
;
13763 const Elf_Internal_Rela
*rel
;
13764 const Elf_Internal_Rela
*rel_end
;
13767 struct elf32_arm_link_hash_table
*htab
;
13768 bfd_boolean call_reloc_p
;
13769 bfd_boolean may_become_dynamic_p
;
13770 bfd_boolean may_need_local_target_p
;
13771 unsigned long nsyms
;
13773 if (bfd_link_relocatable (info
))
13776 BFD_ASSERT (is_arm_elf (abfd
));
13778 htab
= elf32_arm_hash_table (info
);
13784 /* Create dynamic sections for relocatable executables so that we can
13785 copy relocations. */
13786 if (htab
->root
.is_relocatable_executable
13787 && ! htab
->root
.dynamic_sections_created
)
13789 if (! _bfd_elf_link_create_dynamic_sections (abfd
, info
))
13793 if (htab
->root
.dynobj
== NULL
)
13794 htab
->root
.dynobj
= abfd
;
13795 if (!create_ifunc_sections (info
))
13798 dynobj
= htab
->root
.dynobj
;
13800 symtab_hdr
= & elf_symtab_hdr (abfd
);
13801 sym_hashes
= elf_sym_hashes (abfd
);
13802 nsyms
= NUM_SHDR_ENTRIES (symtab_hdr
);
13804 rel_end
= relocs
+ sec
->reloc_count
;
13805 for (rel
= relocs
; rel
< rel_end
; rel
++)
13807 Elf_Internal_Sym
*isym
;
13808 struct elf_link_hash_entry
*h
;
13809 struct elf32_arm_link_hash_entry
*eh
;
13810 unsigned long r_symndx
;
13813 r_symndx
= ELF32_R_SYM (rel
->r_info
);
13814 r_type
= ELF32_R_TYPE (rel
->r_info
);
13815 r_type
= arm_real_reloc_type (htab
, r_type
);
13817 if (r_symndx
>= nsyms
13818 /* PR 9934: It is possible to have relocations that do not
13819 refer to symbols, thus it is also possible to have an
13820 object file containing relocations but no symbol table. */
13821 && (r_symndx
> STN_UNDEF
|| nsyms
> 0))
13823 (*_bfd_error_handler
) (_("%B: bad symbol index: %d"), abfd
,
13832 if (r_symndx
< symtab_hdr
->sh_info
)
13834 /* A local symbol. */
13835 isym
= bfd_sym_from_r_symndx (&htab
->sym_cache
,
13842 h
= sym_hashes
[r_symndx
- symtab_hdr
->sh_info
];
13843 while (h
->root
.type
== bfd_link_hash_indirect
13844 || h
->root
.type
== bfd_link_hash_warning
)
13845 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
13847 /* PR15323, ref flags aren't set for references in the
13849 h
->root
.non_ir_ref
= 1;
13853 eh
= (struct elf32_arm_link_hash_entry
*) h
;
13855 call_reloc_p
= FALSE
;
13856 may_become_dynamic_p
= FALSE
;
13857 may_need_local_target_p
= FALSE
;
13859 /* Could be done earlier, if h were already available. */
13860 r_type
= elf32_arm_tls_transition (info
, r_type
, h
);
13864 case R_ARM_GOT_PREL
:
13865 case R_ARM_TLS_GD32
:
13866 case R_ARM_TLS_IE32
:
13867 case R_ARM_TLS_GOTDESC
:
13868 case R_ARM_TLS_DESCSEQ
:
13869 case R_ARM_THM_TLS_DESCSEQ
:
13870 case R_ARM_TLS_CALL
:
13871 case R_ARM_THM_TLS_CALL
:
13872 /* This symbol requires a global offset table entry. */
13874 int tls_type
, old_tls_type
;
13878 case R_ARM_TLS_GD32
: tls_type
= GOT_TLS_GD
; break;
13880 case R_ARM_TLS_IE32
: tls_type
= GOT_TLS_IE
; break;
13882 case R_ARM_TLS_GOTDESC
:
13883 case R_ARM_TLS_CALL
: case R_ARM_THM_TLS_CALL
:
13884 case R_ARM_TLS_DESCSEQ
: case R_ARM_THM_TLS_DESCSEQ
:
13885 tls_type
= GOT_TLS_GDESC
; break;
13887 default: tls_type
= GOT_NORMAL
; break;
13890 if (!bfd_link_executable (info
) && (tls_type
& GOT_TLS_IE
))
13891 info
->flags
|= DF_STATIC_TLS
;
13896 old_tls_type
= elf32_arm_hash_entry (h
)->tls_type
;
13900 /* This is a global offset table entry for a local symbol. */
13901 if (!elf32_arm_allocate_local_sym_info (abfd
))
13903 elf_local_got_refcounts (abfd
)[r_symndx
] += 1;
13904 old_tls_type
= elf32_arm_local_got_tls_type (abfd
) [r_symndx
];
13907 /* If a variable is accessed with both tls methods, two
13908 slots may be created. */
13909 if (GOT_TLS_GD_ANY_P (old_tls_type
)
13910 && GOT_TLS_GD_ANY_P (tls_type
))
13911 tls_type
|= old_tls_type
;
13913 /* We will already have issued an error message if there
13914 is a TLS/non-TLS mismatch, based on the symbol
13915 type. So just combine any TLS types needed. */
13916 if (old_tls_type
!= GOT_UNKNOWN
&& old_tls_type
!= GOT_NORMAL
13917 && tls_type
!= GOT_NORMAL
)
13918 tls_type
|= old_tls_type
;
13920 /* If the symbol is accessed in both IE and GDESC
13921 method, we're able to relax. Turn off the GDESC flag,
13922 without messing up with any other kind of tls types
13923 that may be involved. */
13924 if ((tls_type
& GOT_TLS_IE
) && (tls_type
& GOT_TLS_GDESC
))
13925 tls_type
&= ~GOT_TLS_GDESC
;
13927 if (old_tls_type
!= tls_type
)
13930 elf32_arm_hash_entry (h
)->tls_type
= tls_type
;
13932 elf32_arm_local_got_tls_type (abfd
) [r_symndx
] = tls_type
;
13935 /* Fall through. */
13937 case R_ARM_TLS_LDM32
:
13938 if (r_type
== R_ARM_TLS_LDM32
)
13939 htab
->tls_ldm_got
.refcount
++;
13940 /* Fall through. */
13942 case R_ARM_GOTOFF32
:
13944 if (htab
->root
.sgot
== NULL
13945 && !create_got_section (htab
->root
.dynobj
, info
))
13954 case R_ARM_THM_CALL
:
13955 case R_ARM_THM_JUMP24
:
13956 case R_ARM_THM_JUMP19
:
13957 call_reloc_p
= TRUE
;
13958 may_need_local_target_p
= TRUE
;
13962 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
13963 ldr __GOTT_INDEX__ offsets. */
13964 if (!htab
->vxworks_p
)
13966 may_need_local_target_p
= TRUE
;
13969 else goto jump_over
;
13971 /* Fall through. */
13973 case R_ARM_MOVW_ABS_NC
:
13974 case R_ARM_MOVT_ABS
:
13975 case R_ARM_THM_MOVW_ABS_NC
:
13976 case R_ARM_THM_MOVT_ABS
:
13977 if (bfd_link_pic (info
))
13979 (*_bfd_error_handler
)
13980 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
13981 abfd
, elf32_arm_howto_table_1
[r_type
].name
,
13982 (h
) ? h
->root
.root
.string
: "a local symbol");
13983 bfd_set_error (bfd_error_bad_value
);
13987 /* Fall through. */
13989 case R_ARM_ABS32_NOI
:
13991 if (h
!= NULL
&& bfd_link_executable (info
))
13993 h
->pointer_equality_needed
= 1;
13995 /* Fall through. */
13997 case R_ARM_REL32_NOI
:
13998 case R_ARM_MOVW_PREL_NC
:
13999 case R_ARM_MOVT_PREL
:
14000 case R_ARM_THM_MOVW_PREL_NC
:
14001 case R_ARM_THM_MOVT_PREL
:
14003 /* Should the interworking branches be listed here? */
14004 if ((bfd_link_pic (info
) || htab
->root
.is_relocatable_executable
)
14005 && (sec
->flags
& SEC_ALLOC
) != 0)
14008 && elf32_arm_howto_from_type (r_type
)->pc_relative
)
14010 /* In shared libraries and relocatable executables,
14011 we treat local relative references as calls;
14012 see the related SYMBOL_CALLS_LOCAL code in
14013 allocate_dynrelocs. */
14014 call_reloc_p
= TRUE
;
14015 may_need_local_target_p
= TRUE
;
14018 /* We are creating a shared library or relocatable
14019 executable, and this is a reloc against a global symbol,
14020 or a non-PC-relative reloc against a local symbol.
14021 We may need to copy the reloc into the output. */
14022 may_become_dynamic_p
= TRUE
;
14025 may_need_local_target_p
= TRUE
;
14028 /* This relocation describes the C++ object vtable hierarchy.
14029 Reconstruct it for later use during GC. */
14030 case R_ARM_GNU_VTINHERIT
:
14031 if (!bfd_elf_gc_record_vtinherit (abfd
, sec
, h
, rel
->r_offset
))
14035 /* This relocation describes which C++ vtable entries are actually
14036 used. Record for later use during GC. */
14037 case R_ARM_GNU_VTENTRY
:
14038 BFD_ASSERT (h
!= NULL
);
14040 && !bfd_elf_gc_record_vtentry (abfd
, sec
, h
, rel
->r_offset
))
14048 /* We may need a .plt entry if the function this reloc
14049 refers to is in a different object, regardless of the
14050 symbol's type. We can't tell for sure yet, because
14051 something later might force the symbol local. */
14053 else if (may_need_local_target_p
)
14054 /* If this reloc is in a read-only section, we might
14055 need a copy reloc. We can't check reliably at this
14056 stage whether the section is read-only, as input
14057 sections have not yet been mapped to output sections.
14058 Tentatively set the flag for now, and correct in
14059 adjust_dynamic_symbol. */
14060 h
->non_got_ref
= 1;
14063 if (may_need_local_target_p
14064 && (h
!= NULL
|| ELF32_ST_TYPE (isym
->st_info
) == STT_GNU_IFUNC
))
14066 union gotplt_union
*root_plt
;
14067 struct arm_plt_info
*arm_plt
;
14068 struct arm_local_iplt_info
*local_iplt
;
14072 root_plt
= &h
->plt
;
14073 arm_plt
= &eh
->plt
;
14077 local_iplt
= elf32_arm_create_local_iplt (abfd
, r_symndx
);
14078 if (local_iplt
== NULL
)
14080 root_plt
= &local_iplt
->root
;
14081 arm_plt
= &local_iplt
->arm
;
14084 /* If the symbol is a function that doesn't bind locally,
14085 this relocation will need a PLT entry. */
14086 if (root_plt
->refcount
!= -1)
14087 root_plt
->refcount
+= 1;
14090 arm_plt
->noncall_refcount
++;
14092 /* It's too early to use htab->use_blx here, so we have to
14093 record possible blx references separately from
14094 relocs that definitely need a thumb stub. */
14096 if (r_type
== R_ARM_THM_CALL
)
14097 arm_plt
->maybe_thumb_refcount
+= 1;
14099 if (r_type
== R_ARM_THM_JUMP24
14100 || r_type
== R_ARM_THM_JUMP19
)
14101 arm_plt
->thumb_refcount
+= 1;
14104 if (may_become_dynamic_p
)
14106 struct elf_dyn_relocs
*p
, **head
;
14108 /* Create a reloc section in dynobj. */
14109 if (sreloc
== NULL
)
14111 sreloc
= _bfd_elf_make_dynamic_reloc_section
14112 (sec
, dynobj
, 2, abfd
, ! htab
->use_rel
);
14114 if (sreloc
== NULL
)
14117 /* BPABI objects never have dynamic relocations mapped. */
14118 if (htab
->symbian_p
)
14122 flags
= bfd_get_section_flags (dynobj
, sreloc
);
14123 flags
&= ~(SEC_LOAD
| SEC_ALLOC
);
14124 bfd_set_section_flags (dynobj
, sreloc
, flags
);
14128 /* If this is a global symbol, count the number of
14129 relocations we need for this symbol. */
14131 head
= &((struct elf32_arm_link_hash_entry
*) h
)->dyn_relocs
;
14134 head
= elf32_arm_get_local_dynreloc_list (abfd
, r_symndx
, isym
);
14140 if (p
== NULL
|| p
->sec
!= sec
)
14142 bfd_size_type amt
= sizeof *p
;
14144 p
= (struct elf_dyn_relocs
*) bfd_alloc (htab
->root
.dynobj
, amt
);
14154 if (elf32_arm_howto_from_type (r_type
)->pc_relative
)
14163 /* Unwinding tables are not referenced directly. This pass marks them as
14164 required if the corresponding code section is marked. */
14167 elf32_arm_gc_mark_extra_sections (struct bfd_link_info
*info
,
14168 elf_gc_mark_hook_fn gc_mark_hook
)
14171 Elf_Internal_Shdr
**elf_shdrp
;
14174 _bfd_elf_gc_mark_extra_sections (info
, gc_mark_hook
);
14176 /* Marking EH data may cause additional code sections to be marked,
14177 requiring multiple passes. */
14182 for (sub
= info
->input_bfds
; sub
!= NULL
; sub
= sub
->link
.next
)
14186 if (! is_arm_elf (sub
))
14189 elf_shdrp
= elf_elfsections (sub
);
14190 for (o
= sub
->sections
; o
!= NULL
; o
= o
->next
)
14192 Elf_Internal_Shdr
*hdr
;
14194 hdr
= &elf_section_data (o
)->this_hdr
;
14195 if (hdr
->sh_type
== SHT_ARM_EXIDX
14197 && hdr
->sh_link
< elf_numsections (sub
)
14199 && elf_shdrp
[hdr
->sh_link
]->bfd_section
->gc_mark
)
14202 if (!_bfd_elf_gc_mark (info
, o
, gc_mark_hook
))
14212 /* Treat mapping symbols as special target symbols. */
14215 elf32_arm_is_target_special_symbol (bfd
* abfd ATTRIBUTE_UNUSED
, asymbol
* sym
)
14217 return bfd_is_arm_special_symbol_name (sym
->name
,
14218 BFD_ARM_SPECIAL_SYM_TYPE_ANY
);
14221 /* This is a copy of elf_find_function() from elf.c except that
14222 ARM mapping symbols are ignored when looking for function names
14223 and STT_ARM_TFUNC is considered to a function type. */
14226 arm_elf_find_function (bfd
* abfd ATTRIBUTE_UNUSED
,
14227 asymbol
** symbols
,
14228 asection
* section
,
14230 const char ** filename_ptr
,
14231 const char ** functionname_ptr
)
14233 const char * filename
= NULL
;
14234 asymbol
* func
= NULL
;
14235 bfd_vma low_func
= 0;
14238 for (p
= symbols
; *p
!= NULL
; p
++)
14240 elf_symbol_type
*q
;
14242 q
= (elf_symbol_type
*) *p
;
14244 switch (ELF_ST_TYPE (q
->internal_elf_sym
.st_info
))
14249 filename
= bfd_asymbol_name (&q
->symbol
);
14252 case STT_ARM_TFUNC
:
14254 /* Skip mapping symbols. */
14255 if ((q
->symbol
.flags
& BSF_LOCAL
)
14256 && bfd_is_arm_special_symbol_name (q
->symbol
.name
,
14257 BFD_ARM_SPECIAL_SYM_TYPE_ANY
))
14259 /* Fall through. */
14260 if (bfd_get_section (&q
->symbol
) == section
14261 && q
->symbol
.value
>= low_func
14262 && q
->symbol
.value
<= offset
)
14264 func
= (asymbol
*) q
;
14265 low_func
= q
->symbol
.value
;
14275 *filename_ptr
= filename
;
14276 if (functionname_ptr
)
14277 *functionname_ptr
= bfd_asymbol_name (func
);
14283 /* Find the nearest line to a particular section and offset, for error
14284 reporting. This code is a duplicate of the code in elf.c, except
14285 that it uses arm_elf_find_function. */
14288 elf32_arm_find_nearest_line (bfd
* abfd
,
14289 asymbol
** symbols
,
14290 asection
* section
,
14292 const char ** filename_ptr
,
14293 const char ** functionname_ptr
,
14294 unsigned int * line_ptr
,
14295 unsigned int * discriminator_ptr
)
14297 bfd_boolean found
= FALSE
;
14299 if (_bfd_dwarf2_find_nearest_line (abfd
, symbols
, NULL
, section
, offset
,
14300 filename_ptr
, functionname_ptr
,
14301 line_ptr
, discriminator_ptr
,
14302 dwarf_debug_sections
, 0,
14303 & elf_tdata (abfd
)->dwarf2_find_line_info
))
14305 if (!*functionname_ptr
)
14306 arm_elf_find_function (abfd
, symbols
, section
, offset
,
14307 *filename_ptr
? NULL
: filename_ptr
,
14313 /* Skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain
14316 if (! _bfd_stab_section_find_nearest_line (abfd
, symbols
, section
, offset
,
14317 & found
, filename_ptr
,
14318 functionname_ptr
, line_ptr
,
14319 & elf_tdata (abfd
)->line_info
))
14322 if (found
&& (*functionname_ptr
|| *line_ptr
))
14325 if (symbols
== NULL
)
14328 if (! arm_elf_find_function (abfd
, symbols
, section
, offset
,
14329 filename_ptr
, functionname_ptr
))
14337 elf32_arm_find_inliner_info (bfd
* abfd
,
14338 const char ** filename_ptr
,
14339 const char ** functionname_ptr
,
14340 unsigned int * line_ptr
)
14343 found
= _bfd_dwarf2_find_inliner_info (abfd
, filename_ptr
,
14344 functionname_ptr
, line_ptr
,
14345 & elf_tdata (abfd
)->dwarf2_find_line_info
);
14349 /* Adjust a symbol defined by a dynamic object and referenced by a
14350 regular object. The current definition is in some section of the
14351 dynamic object, but we're not including those sections. We have to
14352 change the definition to something the rest of the link can
14356 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info
* info
,
14357 struct elf_link_hash_entry
* h
)
14361 struct elf32_arm_link_hash_entry
* eh
;
14362 struct elf32_arm_link_hash_table
*globals
;
14364 globals
= elf32_arm_hash_table (info
);
14365 if (globals
== NULL
)
14368 dynobj
= elf_hash_table (info
)->dynobj
;
14370 /* Make sure we know what is going on here. */
14371 BFD_ASSERT (dynobj
!= NULL
14373 || h
->type
== STT_GNU_IFUNC
14374 || h
->u
.weakdef
!= NULL
14377 && !h
->def_regular
)));
14379 eh
= (struct elf32_arm_link_hash_entry
*) h
;
14381 /* If this is a function, put it in the procedure linkage table. We
14382 will fill in the contents of the procedure linkage table later,
14383 when we know the address of the .got section. */
14384 if (h
->type
== STT_FUNC
|| h
->type
== STT_GNU_IFUNC
|| h
->needs_plt
)
14386 /* Calls to STT_GNU_IFUNC symbols always use a PLT, even if the
14387 symbol binds locally. */
14388 if (h
->plt
.refcount
<= 0
14389 || (h
->type
!= STT_GNU_IFUNC
14390 && (SYMBOL_CALLS_LOCAL (info
, h
)
14391 || (ELF_ST_VISIBILITY (h
->other
) != STV_DEFAULT
14392 && h
->root
.type
== bfd_link_hash_undefweak
))))
14394 /* This case can occur if we saw a PLT32 reloc in an input
14395 file, but the symbol was never referred to by a dynamic
14396 object, or if all references were garbage collected. In
14397 such a case, we don't actually need to build a procedure
14398 linkage table, and we can just do a PC24 reloc instead. */
14399 h
->plt
.offset
= (bfd_vma
) -1;
14400 eh
->plt
.thumb_refcount
= 0;
14401 eh
->plt
.maybe_thumb_refcount
= 0;
14402 eh
->plt
.noncall_refcount
= 0;
14410 /* It's possible that we incorrectly decided a .plt reloc was
14411 needed for an R_ARM_PC24 or similar reloc to a non-function sym
14412 in check_relocs. We can't decide accurately between function
14413 and non-function syms in check-relocs; Objects loaded later in
14414 the link may change h->type. So fix it now. */
14415 h
->plt
.offset
= (bfd_vma
) -1;
14416 eh
->plt
.thumb_refcount
= 0;
14417 eh
->plt
.maybe_thumb_refcount
= 0;
14418 eh
->plt
.noncall_refcount
= 0;
14421 /* If this is a weak symbol, and there is a real definition, the
14422 processor independent code will have arranged for us to see the
14423 real definition first, and we can just use the same value. */
14424 if (h
->u
.weakdef
!= NULL
)
14426 BFD_ASSERT (h
->u
.weakdef
->root
.type
== bfd_link_hash_defined
14427 || h
->u
.weakdef
->root
.type
== bfd_link_hash_defweak
);
14428 h
->root
.u
.def
.section
= h
->u
.weakdef
->root
.u
.def
.section
;
14429 h
->root
.u
.def
.value
= h
->u
.weakdef
->root
.u
.def
.value
;
14433 /* If there are no non-GOT references, we do not need a copy
14435 if (!h
->non_got_ref
)
14438 /* This is a reference to a symbol defined by a dynamic object which
14439 is not a function. */
14441 /* If we are creating a shared library, we must presume that the
14442 only references to the symbol are via the global offset table.
14443 For such cases we need not do anything here; the relocations will
14444 be handled correctly by relocate_section. Relocatable executables
14445 can reference data in shared objects directly, so we don't need to
14446 do anything here. */
14447 if (bfd_link_pic (info
) || globals
->root
.is_relocatable_executable
)
14450 /* We must allocate the symbol in our .dynbss section, which will
14451 become part of the .bss section of the executable. There will be
14452 an entry for this symbol in the .dynsym section. The dynamic
14453 object will contain position independent code, so all references
14454 from the dynamic object to this symbol will go through the global
14455 offset table. The dynamic linker will use the .dynsym entry to
14456 determine the address it must put in the global offset table, so
14457 both the dynamic object and the regular object will refer to the
14458 same memory location for the variable. */
14459 s
= bfd_get_linker_section (dynobj
, ".dynbss");
14460 BFD_ASSERT (s
!= NULL
);
14462 /* If allowed, we must generate a R_ARM_COPY reloc to tell the dynamic
14463 linker to copy the initial value out of the dynamic object and into
14464 the runtime process image. We need to remember the offset into the
14465 .rel(a).bss section we are going to use. */
14466 if (info
->nocopyreloc
== 0
14467 && (h
->root
.u
.def
.section
->flags
& SEC_ALLOC
) != 0
14472 srel
= bfd_get_linker_section (dynobj
, RELOC_SECTION (globals
, ".bss"));
14473 elf32_arm_allocate_dynrelocs (info
, srel
, 1);
14477 return _bfd_elf_adjust_dynamic_copy (info
, h
, s
);
14480 /* Allocate space in .plt, .got and associated reloc sections for
14484 allocate_dynrelocs_for_symbol (struct elf_link_hash_entry
*h
, void * inf
)
14486 struct bfd_link_info
*info
;
14487 struct elf32_arm_link_hash_table
*htab
;
14488 struct elf32_arm_link_hash_entry
*eh
;
14489 struct elf_dyn_relocs
*p
;
14491 if (h
->root
.type
== bfd_link_hash_indirect
)
14494 eh
= (struct elf32_arm_link_hash_entry
*) h
;
14496 info
= (struct bfd_link_info
*) inf
;
14497 htab
= elf32_arm_hash_table (info
);
14501 if ((htab
->root
.dynamic_sections_created
|| h
->type
== STT_GNU_IFUNC
)
14502 && h
->plt
.refcount
> 0)
14504 /* Make sure this symbol is output as a dynamic symbol.
14505 Undefined weak syms won't yet be marked as dynamic. */
14506 if (h
->dynindx
== -1
14507 && !h
->forced_local
)
14509 if (! bfd_elf_link_record_dynamic_symbol (info
, h
))
14513 /* If the call in the PLT entry binds locally, the associated
14514 GOT entry should use an R_ARM_IRELATIVE relocation instead of
14515 the usual R_ARM_JUMP_SLOT. Put it in the .iplt section rather
14516 than the .plt section. */
14517 if (h
->type
== STT_GNU_IFUNC
&& SYMBOL_CALLS_LOCAL (info
, h
))
14520 if (eh
->plt
.noncall_refcount
== 0
14521 && SYMBOL_REFERENCES_LOCAL (info
, h
))
14522 /* All non-call references can be resolved directly.
14523 This means that they can (and in some cases, must)
14524 resolve directly to the run-time target, rather than
14525 to the PLT. That in turns means that any .got entry
14526 would be equal to the .igot.plt entry, so there's
14527 no point having both. */
14528 h
->got
.refcount
= 0;
14531 if (bfd_link_pic (info
)
14533 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h
))
14535 elf32_arm_allocate_plt_entry (info
, eh
->is_iplt
, &h
->plt
, &eh
->plt
);
14537 /* If this symbol is not defined in a regular file, and we are
14538 not generating a shared library, then set the symbol to this
14539 location in the .plt. This is required to make function
14540 pointers compare as equal between the normal executable and
14541 the shared library. */
14542 if (! bfd_link_pic (info
)
14543 && !h
->def_regular
)
14545 h
->root
.u
.def
.section
= htab
->root
.splt
;
14546 h
->root
.u
.def
.value
= h
->plt
.offset
;
14548 /* Make sure the function is not marked as Thumb, in case
14549 it is the target of an ABS32 relocation, which will
14550 point to the PLT entry. */
14551 ARM_SET_SYM_BRANCH_TYPE (h
->target_internal
, ST_BRANCH_TO_ARM
);
14554 /* VxWorks executables have a second set of relocations for
14555 each PLT entry. They go in a separate relocation section,
14556 which is processed by the kernel loader. */
14557 if (htab
->vxworks_p
&& !bfd_link_pic (info
))
14559 /* There is a relocation for the initial PLT entry:
14560 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
14561 if (h
->plt
.offset
== htab
->plt_header_size
)
14562 elf32_arm_allocate_dynrelocs (info
, htab
->srelplt2
, 1);
14564 /* There are two extra relocations for each subsequent
14565 PLT entry: an R_ARM_32 relocation for the GOT entry,
14566 and an R_ARM_32 relocation for the PLT entry. */
14567 elf32_arm_allocate_dynrelocs (info
, htab
->srelplt2
, 2);
14572 h
->plt
.offset
= (bfd_vma
) -1;
14578 h
->plt
.offset
= (bfd_vma
) -1;
14582 eh
= (struct elf32_arm_link_hash_entry
*) h
;
14583 eh
->tlsdesc_got
= (bfd_vma
) -1;
14585 if (h
->got
.refcount
> 0)
14589 int tls_type
= elf32_arm_hash_entry (h
)->tls_type
;
14592 /* Make sure this symbol is output as a dynamic symbol.
14593 Undefined weak syms won't yet be marked as dynamic. */
14594 if (h
->dynindx
== -1
14595 && !h
->forced_local
)
14597 if (! bfd_elf_link_record_dynamic_symbol (info
, h
))
14601 if (!htab
->symbian_p
)
14603 s
= htab
->root
.sgot
;
14604 h
->got
.offset
= s
->size
;
14606 if (tls_type
== GOT_UNKNOWN
)
14609 if (tls_type
== GOT_NORMAL
)
14610 /* Non-TLS symbols need one GOT slot. */
14614 if (tls_type
& GOT_TLS_GDESC
)
14616 /* R_ARM_TLS_DESC needs 2 GOT slots. */
14618 = (htab
->root
.sgotplt
->size
14619 - elf32_arm_compute_jump_table_size (htab
));
14620 htab
->root
.sgotplt
->size
+= 8;
14621 h
->got
.offset
= (bfd_vma
) -2;
14622 /* plt.got_offset needs to know there's a TLS_DESC
14623 reloc in the middle of .got.plt. */
14624 htab
->num_tls_desc
++;
14627 if (tls_type
& GOT_TLS_GD
)
14629 /* R_ARM_TLS_GD32 needs 2 consecutive GOT slots. If
14630 the symbol is both GD and GDESC, got.offset may
14631 have been overwritten. */
14632 h
->got
.offset
= s
->size
;
14636 if (tls_type
& GOT_TLS_IE
)
14637 /* R_ARM_TLS_IE32 needs one GOT slot. */
14641 dyn
= htab
->root
.dynamic_sections_created
;
14644 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn
,
14645 bfd_link_pic (info
),
14647 && (!bfd_link_pic (info
)
14648 || !SYMBOL_REFERENCES_LOCAL (info
, h
)))
14651 if (tls_type
!= GOT_NORMAL
14652 && (bfd_link_pic (info
) || indx
!= 0)
14653 && (ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
14654 || h
->root
.type
!= bfd_link_hash_undefweak
))
14656 if (tls_type
& GOT_TLS_IE
)
14657 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
14659 if (tls_type
& GOT_TLS_GD
)
14660 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
14662 if (tls_type
& GOT_TLS_GDESC
)
14664 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelplt
, 1);
14665 /* GDESC needs a trampoline to jump to. */
14666 htab
->tls_trampoline
= -1;
14669 /* Only GD needs it. GDESC just emits one relocation per
14671 if ((tls_type
& GOT_TLS_GD
) && indx
!= 0)
14672 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
14674 else if (indx
!= -1 && !SYMBOL_REFERENCES_LOCAL (info
, h
))
14676 if (htab
->root
.dynamic_sections_created
)
14677 /* Reserve room for the GOT entry's R_ARM_GLOB_DAT relocation. */
14678 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
14680 else if (h
->type
== STT_GNU_IFUNC
14681 && eh
->plt
.noncall_refcount
== 0)
14682 /* No non-call references resolve the STT_GNU_IFUNC's PLT entry;
14683 they all resolve dynamically instead. Reserve room for the
14684 GOT entry's R_ARM_IRELATIVE relocation. */
14685 elf32_arm_allocate_irelocs (info
, htab
->root
.srelgot
, 1);
14686 else if (bfd_link_pic (info
)
14687 && (ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
14688 || h
->root
.type
!= bfd_link_hash_undefweak
))
14689 /* Reserve room for the GOT entry's R_ARM_RELATIVE relocation. */
14690 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
14694 h
->got
.offset
= (bfd_vma
) -1;
14696 /* Allocate stubs for exported Thumb functions on v4t. */
14697 if (!htab
->use_blx
&& h
->dynindx
!= -1
14699 && ARM_GET_SYM_BRANCH_TYPE (h
->target_internal
) == ST_BRANCH_TO_THUMB
14700 && ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
)
14702 struct elf_link_hash_entry
* th
;
14703 struct bfd_link_hash_entry
* bh
;
14704 struct elf_link_hash_entry
* myh
;
14708 /* Create a new symbol to regist the real location of the function. */
14709 s
= h
->root
.u
.def
.section
;
14710 sprintf (name
, "__real_%s", h
->root
.root
.string
);
14711 _bfd_generic_link_add_one_symbol (info
, s
->owner
,
14712 name
, BSF_GLOBAL
, s
,
14713 h
->root
.u
.def
.value
,
14714 NULL
, TRUE
, FALSE
, &bh
);
14716 myh
= (struct elf_link_hash_entry
*) bh
;
14717 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
14718 myh
->forced_local
= 1;
14719 ARM_SET_SYM_BRANCH_TYPE (myh
->target_internal
, ST_BRANCH_TO_THUMB
);
14720 eh
->export_glue
= myh
;
14721 th
= record_arm_to_thumb_glue (info
, h
);
14722 /* Point the symbol at the stub. */
14723 h
->type
= ELF_ST_INFO (ELF_ST_BIND (h
->type
), STT_FUNC
);
14724 ARM_SET_SYM_BRANCH_TYPE (h
->target_internal
, ST_BRANCH_TO_ARM
);
14725 h
->root
.u
.def
.section
= th
->root
.u
.def
.section
;
14726 h
->root
.u
.def
.value
= th
->root
.u
.def
.value
& ~1;
14729 if (eh
->dyn_relocs
== NULL
)
14732 /* In the shared -Bsymbolic case, discard space allocated for
14733 dynamic pc-relative relocs against symbols which turn out to be
14734 defined in regular objects. For the normal shared case, discard
14735 space for pc-relative relocs that have become local due to symbol
14736 visibility changes. */
14738 if (bfd_link_pic (info
) || htab
->root
.is_relocatable_executable
)
14740 /* Relocs that use pc_count are PC-relative forms, which will appear
14741 on something like ".long foo - ." or "movw REG, foo - .". We want
14742 calls to protected symbols to resolve directly to the function
14743 rather than going via the plt. If people want function pointer
14744 comparisons to work as expected then they should avoid writing
14745 assembly like ".long foo - .". */
14746 if (SYMBOL_CALLS_LOCAL (info
, h
))
14748 struct elf_dyn_relocs
**pp
;
14750 for (pp
= &eh
->dyn_relocs
; (p
= *pp
) != NULL
; )
14752 p
->count
-= p
->pc_count
;
14761 if (htab
->vxworks_p
)
14763 struct elf_dyn_relocs
**pp
;
14765 for (pp
= &eh
->dyn_relocs
; (p
= *pp
) != NULL
; )
14767 if (strcmp (p
->sec
->output_section
->name
, ".tls_vars") == 0)
14774 /* Also discard relocs on undefined weak syms with non-default
14776 if (eh
->dyn_relocs
!= NULL
14777 && h
->root
.type
== bfd_link_hash_undefweak
)
14779 if (ELF_ST_VISIBILITY (h
->other
) != STV_DEFAULT
)
14780 eh
->dyn_relocs
= NULL
;
14782 /* Make sure undefined weak symbols are output as a dynamic
14784 else if (h
->dynindx
== -1
14785 && !h
->forced_local
)
14787 if (! bfd_elf_link_record_dynamic_symbol (info
, h
))
14792 else if (htab
->root
.is_relocatable_executable
&& h
->dynindx
== -1
14793 && h
->root
.type
== bfd_link_hash_new
)
14795 /* Output absolute symbols so that we can create relocations
14796 against them. For normal symbols we output a relocation
14797 against the section that contains them. */
14798 if (! bfd_elf_link_record_dynamic_symbol (info
, h
))
14805 /* For the non-shared case, discard space for relocs against
14806 symbols which turn out to need copy relocs or are not
14809 if (!h
->non_got_ref
14810 && ((h
->def_dynamic
14811 && !h
->def_regular
)
14812 || (htab
->root
.dynamic_sections_created
14813 && (h
->root
.type
== bfd_link_hash_undefweak
14814 || h
->root
.type
== bfd_link_hash_undefined
))))
14816 /* Make sure this symbol is output as a dynamic symbol.
14817 Undefined weak syms won't yet be marked as dynamic. */
14818 if (h
->dynindx
== -1
14819 && !h
->forced_local
)
14821 if (! bfd_elf_link_record_dynamic_symbol (info
, h
))
14825 /* If that succeeded, we know we'll be keeping all the
14827 if (h
->dynindx
!= -1)
14831 eh
->dyn_relocs
= NULL
;
14836 /* Finally, allocate space. */
14837 for (p
= eh
->dyn_relocs
; p
!= NULL
; p
= p
->next
)
14839 asection
*sreloc
= elf_section_data (p
->sec
)->sreloc
;
14840 if (h
->type
== STT_GNU_IFUNC
14841 && eh
->plt
.noncall_refcount
== 0
14842 && SYMBOL_REFERENCES_LOCAL (info
, h
))
14843 elf32_arm_allocate_irelocs (info
, sreloc
, p
->count
);
14845 elf32_arm_allocate_dynrelocs (info
, sreloc
, p
->count
);
14851 /* Find any dynamic relocs that apply to read-only sections. */
14854 elf32_arm_readonly_dynrelocs (struct elf_link_hash_entry
* h
, void * inf
)
14856 struct elf32_arm_link_hash_entry
* eh
;
14857 struct elf_dyn_relocs
* p
;
14859 eh
= (struct elf32_arm_link_hash_entry
*) h
;
14860 for (p
= eh
->dyn_relocs
; p
!= NULL
; p
= p
->next
)
14862 asection
*s
= p
->sec
;
14864 if (s
!= NULL
&& (s
->flags
& SEC_READONLY
) != 0)
14866 struct bfd_link_info
*info
= (struct bfd_link_info
*) inf
;
14868 info
->flags
|= DF_TEXTREL
;
14870 /* Not an error, just cut short the traversal. */
14878 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info
*info
,
14881 struct elf32_arm_link_hash_table
*globals
;
14883 globals
= elf32_arm_hash_table (info
);
14884 if (globals
== NULL
)
14887 globals
->byteswap_code
= byteswap_code
;
14890 /* Set the sizes of the dynamic sections. */
14893 elf32_arm_size_dynamic_sections (bfd
* output_bfd ATTRIBUTE_UNUSED
,
14894 struct bfd_link_info
* info
)
14899 bfd_boolean relocs
;
14901 struct elf32_arm_link_hash_table
*htab
;
14903 htab
= elf32_arm_hash_table (info
);
14907 dynobj
= elf_hash_table (info
)->dynobj
;
14908 BFD_ASSERT (dynobj
!= NULL
);
14909 check_use_blx (htab
);
14911 if (elf_hash_table (info
)->dynamic_sections_created
)
14913 /* Set the contents of the .interp section to the interpreter. */
14914 if (bfd_link_executable (info
) && !info
->nointerp
)
14916 s
= bfd_get_linker_section (dynobj
, ".interp");
14917 BFD_ASSERT (s
!= NULL
);
14918 s
->size
= sizeof ELF_DYNAMIC_INTERPRETER
;
14919 s
->contents
= (unsigned char *) ELF_DYNAMIC_INTERPRETER
;
14923 /* Set up .got offsets for local syms, and space for local dynamic
14925 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link
.next
)
14927 bfd_signed_vma
*local_got
;
14928 bfd_signed_vma
*end_local_got
;
14929 struct arm_local_iplt_info
**local_iplt_ptr
, *local_iplt
;
14930 char *local_tls_type
;
14931 bfd_vma
*local_tlsdesc_gotent
;
14932 bfd_size_type locsymcount
;
14933 Elf_Internal_Shdr
*symtab_hdr
;
14935 bfd_boolean is_vxworks
= htab
->vxworks_p
;
14936 unsigned int symndx
;
14938 if (! is_arm_elf (ibfd
))
14941 for (s
= ibfd
->sections
; s
!= NULL
; s
= s
->next
)
14943 struct elf_dyn_relocs
*p
;
14945 for (p
= (struct elf_dyn_relocs
*)
14946 elf_section_data (s
)->local_dynrel
; p
!= NULL
; p
= p
->next
)
14948 if (!bfd_is_abs_section (p
->sec
)
14949 && bfd_is_abs_section (p
->sec
->output_section
))
14951 /* Input section has been discarded, either because
14952 it is a copy of a linkonce section or due to
14953 linker script /DISCARD/, so we'll be discarding
14956 else if (is_vxworks
14957 && strcmp (p
->sec
->output_section
->name
,
14960 /* Relocations in vxworks .tls_vars sections are
14961 handled specially by the loader. */
14963 else if (p
->count
!= 0)
14965 srel
= elf_section_data (p
->sec
)->sreloc
;
14966 elf32_arm_allocate_dynrelocs (info
, srel
, p
->count
);
14967 if ((p
->sec
->output_section
->flags
& SEC_READONLY
) != 0)
14968 info
->flags
|= DF_TEXTREL
;
14973 local_got
= elf_local_got_refcounts (ibfd
);
14977 symtab_hdr
= & elf_symtab_hdr (ibfd
);
14978 locsymcount
= symtab_hdr
->sh_info
;
14979 end_local_got
= local_got
+ locsymcount
;
14980 local_iplt_ptr
= elf32_arm_local_iplt (ibfd
);
14981 local_tls_type
= elf32_arm_local_got_tls_type (ibfd
);
14982 local_tlsdesc_gotent
= elf32_arm_local_tlsdesc_gotent (ibfd
);
14984 s
= htab
->root
.sgot
;
14985 srel
= htab
->root
.srelgot
;
14986 for (; local_got
< end_local_got
;
14987 ++local_got
, ++local_iplt_ptr
, ++local_tls_type
,
14988 ++local_tlsdesc_gotent
, ++symndx
)
14990 *local_tlsdesc_gotent
= (bfd_vma
) -1;
14991 local_iplt
= *local_iplt_ptr
;
14992 if (local_iplt
!= NULL
)
14994 struct elf_dyn_relocs
*p
;
14996 if (local_iplt
->root
.refcount
> 0)
14998 elf32_arm_allocate_plt_entry (info
, TRUE
,
15001 if (local_iplt
->arm
.noncall_refcount
== 0)
15002 /* All references to the PLT are calls, so all
15003 non-call references can resolve directly to the
15004 run-time target. This means that the .got entry
15005 would be the same as the .igot.plt entry, so there's
15006 no point creating both. */
15011 BFD_ASSERT (local_iplt
->arm
.noncall_refcount
== 0);
15012 local_iplt
->root
.offset
= (bfd_vma
) -1;
15015 for (p
= local_iplt
->dyn_relocs
; p
!= NULL
; p
= p
->next
)
15019 psrel
= elf_section_data (p
->sec
)->sreloc
;
15020 if (local_iplt
->arm
.noncall_refcount
== 0)
15021 elf32_arm_allocate_irelocs (info
, psrel
, p
->count
);
15023 elf32_arm_allocate_dynrelocs (info
, psrel
, p
->count
);
15026 if (*local_got
> 0)
15028 Elf_Internal_Sym
*isym
;
15030 *local_got
= s
->size
;
15031 if (*local_tls_type
& GOT_TLS_GD
)
15032 /* TLS_GD relocs need an 8-byte structure in the GOT. */
15034 if (*local_tls_type
& GOT_TLS_GDESC
)
15036 *local_tlsdesc_gotent
= htab
->root
.sgotplt
->size
15037 - elf32_arm_compute_jump_table_size (htab
);
15038 htab
->root
.sgotplt
->size
+= 8;
15039 *local_got
= (bfd_vma
) -2;
15040 /* plt.got_offset needs to know there's a TLS_DESC
15041 reloc in the middle of .got.plt. */
15042 htab
->num_tls_desc
++;
15044 if (*local_tls_type
& GOT_TLS_IE
)
15047 if (*local_tls_type
& GOT_NORMAL
)
15049 /* If the symbol is both GD and GDESC, *local_got
15050 may have been overwritten. */
15051 *local_got
= s
->size
;
15055 isym
= bfd_sym_from_r_symndx (&htab
->sym_cache
, ibfd
, symndx
);
15059 /* If all references to an STT_GNU_IFUNC PLT are calls,
15060 then all non-call references, including this GOT entry,
15061 resolve directly to the run-time target. */
15062 if (ELF32_ST_TYPE (isym
->st_info
) == STT_GNU_IFUNC
15063 && (local_iplt
== NULL
15064 || local_iplt
->arm
.noncall_refcount
== 0))
15065 elf32_arm_allocate_irelocs (info
, srel
, 1);
15066 else if (bfd_link_pic (info
) || output_bfd
->flags
& DYNAMIC
)
15068 if ((bfd_link_pic (info
) && !(*local_tls_type
& GOT_TLS_GDESC
))
15069 || *local_tls_type
& GOT_TLS_GD
)
15070 elf32_arm_allocate_dynrelocs (info
, srel
, 1);
15072 if (bfd_link_pic (info
) && *local_tls_type
& GOT_TLS_GDESC
)
15074 elf32_arm_allocate_dynrelocs (info
,
15075 htab
->root
.srelplt
, 1);
15076 htab
->tls_trampoline
= -1;
15081 *local_got
= (bfd_vma
) -1;
15085 if (htab
->tls_ldm_got
.refcount
> 0)
15087 /* Allocate two GOT entries and one dynamic relocation (if necessary)
15088 for R_ARM_TLS_LDM32 relocations. */
15089 htab
->tls_ldm_got
.offset
= htab
->root
.sgot
->size
;
15090 htab
->root
.sgot
->size
+= 8;
15091 if (bfd_link_pic (info
))
15092 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
15095 htab
->tls_ldm_got
.offset
= -1;
15097 /* Allocate global sym .plt and .got entries, and space for global
15098 sym dynamic relocs. */
15099 elf_link_hash_traverse (& htab
->root
, allocate_dynrelocs_for_symbol
, info
);
15101 /* Here we rummage through the found bfds to collect glue information. */
15102 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link
.next
)
15104 if (! is_arm_elf (ibfd
))
15107 /* Initialise mapping tables for code/data. */
15108 bfd_elf32_arm_init_maps (ibfd
);
15110 if (!bfd_elf32_arm_process_before_allocation (ibfd
, info
)
15111 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd
, info
)
15112 || !bfd_elf32_arm_stm32l4xx_erratum_scan (ibfd
, info
))
15113 /* xgettext:c-format */
15114 _bfd_error_handler (_("Errors encountered processing file %s"),
15118 /* Allocate space for the glue sections now that we've sized them. */
15119 bfd_elf32_arm_allocate_interworking_sections (info
);
15121 /* For every jump slot reserved in the sgotplt, reloc_count is
15122 incremented. However, when we reserve space for TLS descriptors,
15123 it's not incremented, so in order to compute the space reserved
15124 for them, it suffices to multiply the reloc count by the jump
15126 if (htab
->root
.srelplt
)
15127 htab
->sgotplt_jump_table_size
= elf32_arm_compute_jump_table_size(htab
);
15129 if (htab
->tls_trampoline
)
15131 if (htab
->root
.splt
->size
== 0)
15132 htab
->root
.splt
->size
+= htab
->plt_header_size
;
15134 htab
->tls_trampoline
= htab
->root
.splt
->size
;
15135 htab
->root
.splt
->size
+= htab
->plt_entry_size
;
15137 /* If we're not using lazy TLS relocations, don't generate the
15138 PLT and GOT entries they require. */
15139 if (!(info
->flags
& DF_BIND_NOW
))
15141 htab
->dt_tlsdesc_got
= htab
->root
.sgot
->size
;
15142 htab
->root
.sgot
->size
+= 4;
15144 htab
->dt_tlsdesc_plt
= htab
->root
.splt
->size
;
15145 htab
->root
.splt
->size
+= 4 * ARRAY_SIZE (dl_tlsdesc_lazy_trampoline
);
15149 /* The check_relocs and adjust_dynamic_symbol entry points have
15150 determined the sizes of the various dynamic sections. Allocate
15151 memory for them. */
15154 for (s
= dynobj
->sections
; s
!= NULL
; s
= s
->next
)
15158 if ((s
->flags
& SEC_LINKER_CREATED
) == 0)
15161 /* It's OK to base decisions on the section name, because none
15162 of the dynobj section names depend upon the input files. */
15163 name
= bfd_get_section_name (dynobj
, s
);
15165 if (s
== htab
->root
.splt
)
15167 /* Remember whether there is a PLT. */
15168 plt
= s
->size
!= 0;
15170 else if (CONST_STRNEQ (name
, ".rel"))
15174 /* Remember whether there are any reloc sections other
15175 than .rel(a).plt and .rela.plt.unloaded. */
15176 if (s
!= htab
->root
.srelplt
&& s
!= htab
->srelplt2
)
15179 /* We use the reloc_count field as a counter if we need
15180 to copy relocs into the output file. */
15181 s
->reloc_count
= 0;
15184 else if (s
!= htab
->root
.sgot
15185 && s
!= htab
->root
.sgotplt
15186 && s
!= htab
->root
.iplt
15187 && s
!= htab
->root
.igotplt
15188 && s
!= htab
->sdynbss
)
15190 /* It's not one of our sections, so don't allocate space. */
15196 /* If we don't need this section, strip it from the
15197 output file. This is mostly to handle .rel(a).bss and
15198 .rel(a).plt. We must create both sections in
15199 create_dynamic_sections, because they must be created
15200 before the linker maps input sections to output
15201 sections. The linker does that before
15202 adjust_dynamic_symbol is called, and it is that
15203 function which decides whether anything needs to go
15204 into these sections. */
15205 s
->flags
|= SEC_EXCLUDE
;
15209 if ((s
->flags
& SEC_HAS_CONTENTS
) == 0)
15212 /* Allocate memory for the section contents. */
15213 s
->contents
= (unsigned char *) bfd_zalloc (dynobj
, s
->size
);
15214 if (s
->contents
== NULL
)
15218 if (elf_hash_table (info
)->dynamic_sections_created
)
15220 /* Add some entries to the .dynamic section. We fill in the
15221 values later, in elf32_arm_finish_dynamic_sections, but we
15222 must add the entries now so that we get the correct size for
15223 the .dynamic section. The DT_DEBUG entry is filled in by the
15224 dynamic linker and used by the debugger. */
15225 #define add_dynamic_entry(TAG, VAL) \
15226 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
15228 if (bfd_link_executable (info
))
15230 if (!add_dynamic_entry (DT_DEBUG
, 0))
15236 if ( !add_dynamic_entry (DT_PLTGOT
, 0)
15237 || !add_dynamic_entry (DT_PLTRELSZ
, 0)
15238 || !add_dynamic_entry (DT_PLTREL
,
15239 htab
->use_rel
? DT_REL
: DT_RELA
)
15240 || !add_dynamic_entry (DT_JMPREL
, 0))
15243 if (htab
->dt_tlsdesc_plt
&&
15244 (!add_dynamic_entry (DT_TLSDESC_PLT
,0)
15245 || !add_dynamic_entry (DT_TLSDESC_GOT
,0)))
15253 if (!add_dynamic_entry (DT_REL
, 0)
15254 || !add_dynamic_entry (DT_RELSZ
, 0)
15255 || !add_dynamic_entry (DT_RELENT
, RELOC_SIZE (htab
)))
15260 if (!add_dynamic_entry (DT_RELA
, 0)
15261 || !add_dynamic_entry (DT_RELASZ
, 0)
15262 || !add_dynamic_entry (DT_RELAENT
, RELOC_SIZE (htab
)))
15267 /* If any dynamic relocs apply to a read-only section,
15268 then we need a DT_TEXTREL entry. */
15269 if ((info
->flags
& DF_TEXTREL
) == 0)
15270 elf_link_hash_traverse (& htab
->root
, elf32_arm_readonly_dynrelocs
,
15273 if ((info
->flags
& DF_TEXTREL
) != 0)
15275 if (!add_dynamic_entry (DT_TEXTREL
, 0))
15278 if (htab
->vxworks_p
15279 && !elf_vxworks_add_dynamic_entries (output_bfd
, info
))
15282 #undef add_dynamic_entry
15287 /* Size sections even though they're not dynamic. We use it to setup
15288 _TLS_MODULE_BASE_, if needed. */
15291 elf32_arm_always_size_sections (bfd
*output_bfd
,
15292 struct bfd_link_info
*info
)
15296 if (bfd_link_relocatable (info
))
15299 tls_sec
= elf_hash_table (info
)->tls_sec
;
15303 struct elf_link_hash_entry
*tlsbase
;
15305 tlsbase
= elf_link_hash_lookup
15306 (elf_hash_table (info
), "_TLS_MODULE_BASE_", TRUE
, TRUE
, FALSE
);
15310 struct bfd_link_hash_entry
*bh
= NULL
;
15311 const struct elf_backend_data
*bed
15312 = get_elf_backend_data (output_bfd
);
15314 if (!(_bfd_generic_link_add_one_symbol
15315 (info
, output_bfd
, "_TLS_MODULE_BASE_", BSF_LOCAL
,
15316 tls_sec
, 0, NULL
, FALSE
,
15317 bed
->collect
, &bh
)))
15320 tlsbase
->type
= STT_TLS
;
15321 tlsbase
= (struct elf_link_hash_entry
*)bh
;
15322 tlsbase
->def_regular
= 1;
15323 tlsbase
->other
= STV_HIDDEN
;
15324 (*bed
->elf_backend_hide_symbol
) (info
, tlsbase
, TRUE
);
15330 /* Finish up dynamic symbol handling. We set the contents of various
15331 dynamic sections here. */
15334 elf32_arm_finish_dynamic_symbol (bfd
* output_bfd
,
15335 struct bfd_link_info
* info
,
15336 struct elf_link_hash_entry
* h
,
15337 Elf_Internal_Sym
* sym
)
15339 struct elf32_arm_link_hash_table
*htab
;
15340 struct elf32_arm_link_hash_entry
*eh
;
15342 htab
= elf32_arm_hash_table (info
);
15346 eh
= (struct elf32_arm_link_hash_entry
*) h
;
15348 if (h
->plt
.offset
!= (bfd_vma
) -1)
15352 BFD_ASSERT (h
->dynindx
!= -1);
15353 if (! elf32_arm_populate_plt_entry (output_bfd
, info
, &h
->plt
, &eh
->plt
,
15358 if (!h
->def_regular
)
15360 /* Mark the symbol as undefined, rather than as defined in
15361 the .plt section. */
15362 sym
->st_shndx
= SHN_UNDEF
;
15363 /* If the symbol is weak we need to clear the value.
15364 Otherwise, the PLT entry would provide a definition for
15365 the symbol even if the symbol wasn't defined anywhere,
15366 and so the symbol would never be NULL. Leave the value if
15367 there were any relocations where pointer equality matters
15368 (this is a clue for the dynamic linker, to make function
15369 pointer comparisons work between an application and shared
15371 if (!h
->ref_regular_nonweak
|| !h
->pointer_equality_needed
)
15374 else if (eh
->is_iplt
&& eh
->plt
.noncall_refcount
!= 0)
15376 /* At least one non-call relocation references this .iplt entry,
15377 so the .iplt entry is the function's canonical address. */
15378 sym
->st_info
= ELF_ST_INFO (ELF_ST_BIND (sym
->st_info
), STT_FUNC
);
15379 ARM_SET_SYM_BRANCH_TYPE (sym
->st_target_internal
, ST_BRANCH_TO_ARM
);
15380 sym
->st_shndx
= (_bfd_elf_section_from_bfd_section
15381 (output_bfd
, htab
->root
.iplt
->output_section
));
15382 sym
->st_value
= (h
->plt
.offset
15383 + htab
->root
.iplt
->output_section
->vma
15384 + htab
->root
.iplt
->output_offset
);
15391 Elf_Internal_Rela rel
;
15393 /* This symbol needs a copy reloc. Set it up. */
15394 BFD_ASSERT (h
->dynindx
!= -1
15395 && (h
->root
.type
== bfd_link_hash_defined
15396 || h
->root
.type
== bfd_link_hash_defweak
));
15399 BFD_ASSERT (s
!= NULL
);
15402 rel
.r_offset
= (h
->root
.u
.def
.value
15403 + h
->root
.u
.def
.section
->output_section
->vma
15404 + h
->root
.u
.def
.section
->output_offset
);
15405 rel
.r_info
= ELF32_R_INFO (h
->dynindx
, R_ARM_COPY
);
15406 elf32_arm_add_dynreloc (output_bfd
, info
, s
, &rel
);
15409 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
15410 the _GLOBAL_OFFSET_TABLE_ symbol is not absolute: it is relative
15411 to the ".got" section. */
15412 if (h
== htab
->root
.hdynamic
15413 || (!htab
->vxworks_p
&& h
== htab
->root
.hgot
))
15414 sym
->st_shndx
= SHN_ABS
;
15420 arm_put_trampoline (struct elf32_arm_link_hash_table
*htab
, bfd
*output_bfd
,
15422 const unsigned long *template, unsigned count
)
15426 for (ix
= 0; ix
!= count
; ix
++)
15428 unsigned long insn
= template[ix
];
15430 /* Emit mov pc,rx if bx is not permitted. */
15431 if (htab
->fix_v4bx
== 1 && (insn
& 0x0ffffff0) == 0x012fff10)
15432 insn
= (insn
& 0xf000000f) | 0x01a0f000;
15433 put_arm_insn (htab
, output_bfd
, insn
, (char *)contents
+ ix
*4);
15437 /* Install the special first PLT entry for elf32-arm-nacl. Unlike
15438 other variants, NaCl needs this entry in a static executable's
15439 .iplt too. When we're handling that case, GOT_DISPLACEMENT is
15440 zero. For .iplt really only the last bundle is useful, and .iplt
15441 could have a shorter first entry, with each individual PLT entry's
15442 relative branch calculated differently so it targets the last
15443 bundle instead of the instruction before it (labelled .Lplt_tail
15444 above). But it's simpler to keep the size and layout of PLT0
15445 consistent with the dynamic case, at the cost of some dead code at
15446 the start of .iplt and the one dead store to the stack at the start
15449 arm_nacl_put_plt0 (struct elf32_arm_link_hash_table
*htab
, bfd
*output_bfd
,
15450 asection
*plt
, bfd_vma got_displacement
)
15454 put_arm_insn (htab
, output_bfd
,
15455 elf32_arm_nacl_plt0_entry
[0]
15456 | arm_movw_immediate (got_displacement
),
15457 plt
->contents
+ 0);
15458 put_arm_insn (htab
, output_bfd
,
15459 elf32_arm_nacl_plt0_entry
[1]
15460 | arm_movt_immediate (got_displacement
),
15461 plt
->contents
+ 4);
15463 for (i
= 2; i
< ARRAY_SIZE (elf32_arm_nacl_plt0_entry
); ++i
)
15464 put_arm_insn (htab
, output_bfd
,
15465 elf32_arm_nacl_plt0_entry
[i
],
15466 plt
->contents
+ (i
* 4));
15469 /* Finish up the dynamic sections. */
15472 elf32_arm_finish_dynamic_sections (bfd
* output_bfd
, struct bfd_link_info
* info
)
15477 struct elf32_arm_link_hash_table
*htab
;
15479 htab
= elf32_arm_hash_table (info
);
15483 dynobj
= elf_hash_table (info
)->dynobj
;
15485 sgot
= htab
->root
.sgotplt
;
15486 /* A broken linker script might have discarded the dynamic sections.
15487 Catch this here so that we do not seg-fault later on. */
15488 if (sgot
!= NULL
&& bfd_is_abs_section (sgot
->output_section
))
15490 sdyn
= bfd_get_linker_section (dynobj
, ".dynamic");
15492 if (elf_hash_table (info
)->dynamic_sections_created
)
15495 Elf32_External_Dyn
*dyncon
, *dynconend
;
15497 splt
= htab
->root
.splt
;
15498 BFD_ASSERT (splt
!= NULL
&& sdyn
!= NULL
);
15499 BFD_ASSERT (htab
->symbian_p
|| sgot
!= NULL
);
15501 dyncon
= (Elf32_External_Dyn
*) sdyn
->contents
;
15502 dynconend
= (Elf32_External_Dyn
*) (sdyn
->contents
+ sdyn
->size
);
15504 for (; dyncon
< dynconend
; dyncon
++)
15506 Elf_Internal_Dyn dyn
;
15510 bfd_elf32_swap_dyn_in (dynobj
, dyncon
, &dyn
);
15517 if (htab
->vxworks_p
15518 && elf_vxworks_finish_dynamic_entry (output_bfd
, &dyn
))
15519 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
15524 goto get_vma_if_bpabi
;
15527 goto get_vma_if_bpabi
;
15530 goto get_vma_if_bpabi
;
15532 name
= ".gnu.version";
15533 goto get_vma_if_bpabi
;
15535 name
= ".gnu.version_d";
15536 goto get_vma_if_bpabi
;
15538 name
= ".gnu.version_r";
15539 goto get_vma_if_bpabi
;
15542 name
= htab
->symbian_p
? ".got" : ".got.plt";
15545 name
= RELOC_SECTION (htab
, ".plt");
15547 s
= bfd_get_linker_section (dynobj
, name
);
15550 (*_bfd_error_handler
)
15551 (_("could not find section %s"), name
);
15552 bfd_set_error (bfd_error_invalid_operation
);
15555 if (!htab
->symbian_p
)
15556 dyn
.d_un
.d_ptr
= s
->output_section
->vma
+ s
->output_offset
;
15558 /* In the BPABI, tags in the PT_DYNAMIC section point
15559 at the file offset, not the memory address, for the
15560 convenience of the post linker. */
15561 dyn
.d_un
.d_ptr
= s
->output_section
->filepos
+ s
->output_offset
;
15562 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
15566 if (htab
->symbian_p
)
15571 s
= htab
->root
.srelplt
;
15572 BFD_ASSERT (s
!= NULL
);
15573 dyn
.d_un
.d_val
= s
->size
;
15574 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
15579 if (!htab
->symbian_p
)
15581 /* My reading of the SVR4 ABI indicates that the
15582 procedure linkage table relocs (DT_JMPREL) should be
15583 included in the overall relocs (DT_REL). This is
15584 what Solaris does. However, UnixWare can not handle
15585 that case. Therefore, we override the DT_RELSZ entry
15586 here to make it not include the JMPREL relocs. Since
15587 the linker script arranges for .rel(a).plt to follow all
15588 other relocation sections, we don't have to worry
15589 about changing the DT_REL entry. */
15590 s
= htab
->root
.srelplt
;
15592 dyn
.d_un
.d_val
-= s
->size
;
15593 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
15596 /* Fall through. */
15600 /* In the BPABI, the DT_REL tag must point at the file
15601 offset, not the VMA, of the first relocation
15602 section. So, we use code similar to that in
15603 elflink.c, but do not check for SHF_ALLOC on the
15604 relcoation section, since relocations sections are
15605 never allocated under the BPABI. The comments above
15606 about Unixware notwithstanding, we include all of the
15607 relocations here. */
15608 if (htab
->symbian_p
)
15611 type
= ((dyn
.d_tag
== DT_REL
|| dyn
.d_tag
== DT_RELSZ
)
15612 ? SHT_REL
: SHT_RELA
);
15613 dyn
.d_un
.d_val
= 0;
15614 for (i
= 1; i
< elf_numsections (output_bfd
); i
++)
15616 Elf_Internal_Shdr
*hdr
15617 = elf_elfsections (output_bfd
)[i
];
15618 if (hdr
->sh_type
== type
)
15620 if (dyn
.d_tag
== DT_RELSZ
15621 || dyn
.d_tag
== DT_RELASZ
)
15622 dyn
.d_un
.d_val
+= hdr
->sh_size
;
15623 else if ((ufile_ptr
) hdr
->sh_offset
15624 <= dyn
.d_un
.d_val
- 1)
15625 dyn
.d_un
.d_val
= hdr
->sh_offset
;
15628 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
15632 case DT_TLSDESC_PLT
:
15633 s
= htab
->root
.splt
;
15634 dyn
.d_un
.d_ptr
= (s
->output_section
->vma
+ s
->output_offset
15635 + htab
->dt_tlsdesc_plt
);
15636 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
15639 case DT_TLSDESC_GOT
:
15640 s
= htab
->root
.sgot
;
15641 dyn
.d_un
.d_ptr
= (s
->output_section
->vma
+ s
->output_offset
15642 + htab
->dt_tlsdesc_got
);
15643 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
15646 /* Set the bottom bit of DT_INIT/FINI if the
15647 corresponding function is Thumb. */
15649 name
= info
->init_function
;
15652 name
= info
->fini_function
;
15654 /* If it wasn't set by elf_bfd_final_link
15655 then there is nothing to adjust. */
15656 if (dyn
.d_un
.d_val
!= 0)
15658 struct elf_link_hash_entry
* eh
;
15660 eh
= elf_link_hash_lookup (elf_hash_table (info
), name
,
15661 FALSE
, FALSE
, TRUE
);
15663 && ARM_GET_SYM_BRANCH_TYPE (eh
->target_internal
)
15664 == ST_BRANCH_TO_THUMB
)
15666 dyn
.d_un
.d_val
|= 1;
15667 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
15674 /* Fill in the first entry in the procedure linkage table. */
15675 if (splt
->size
> 0 && htab
->plt_header_size
)
15677 const bfd_vma
*plt0_entry
;
15678 bfd_vma got_address
, plt_address
, got_displacement
;
15680 /* Calculate the addresses of the GOT and PLT. */
15681 got_address
= sgot
->output_section
->vma
+ sgot
->output_offset
;
15682 plt_address
= splt
->output_section
->vma
+ splt
->output_offset
;
15684 if (htab
->vxworks_p
)
15686 /* The VxWorks GOT is relocated by the dynamic linker.
15687 Therefore, we must emit relocations rather than simply
15688 computing the values now. */
15689 Elf_Internal_Rela rel
;
15691 plt0_entry
= elf32_arm_vxworks_exec_plt0_entry
;
15692 put_arm_insn (htab
, output_bfd
, plt0_entry
[0],
15693 splt
->contents
+ 0);
15694 put_arm_insn (htab
, output_bfd
, plt0_entry
[1],
15695 splt
->contents
+ 4);
15696 put_arm_insn (htab
, output_bfd
, plt0_entry
[2],
15697 splt
->contents
+ 8);
15698 bfd_put_32 (output_bfd
, got_address
, splt
->contents
+ 12);
15700 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
15701 rel
.r_offset
= plt_address
+ 12;
15702 rel
.r_info
= ELF32_R_INFO (htab
->root
.hgot
->indx
, R_ARM_ABS32
);
15704 SWAP_RELOC_OUT (htab
) (output_bfd
, &rel
,
15705 htab
->srelplt2
->contents
);
15707 else if (htab
->nacl_p
)
15708 arm_nacl_put_plt0 (htab
, output_bfd
, splt
,
15709 got_address
+ 8 - (plt_address
+ 16));
15710 else if (using_thumb_only (htab
))
15712 got_displacement
= got_address
- (plt_address
+ 12);
15714 plt0_entry
= elf32_thumb2_plt0_entry
;
15715 put_arm_insn (htab
, output_bfd
, plt0_entry
[0],
15716 splt
->contents
+ 0);
15717 put_arm_insn (htab
, output_bfd
, plt0_entry
[1],
15718 splt
->contents
+ 4);
15719 put_arm_insn (htab
, output_bfd
, plt0_entry
[2],
15720 splt
->contents
+ 8);
15722 bfd_put_32 (output_bfd
, got_displacement
, splt
->contents
+ 12);
15726 got_displacement
= got_address
- (plt_address
+ 16);
15728 plt0_entry
= elf32_arm_plt0_entry
;
15729 put_arm_insn (htab
, output_bfd
, plt0_entry
[0],
15730 splt
->contents
+ 0);
15731 put_arm_insn (htab
, output_bfd
, plt0_entry
[1],
15732 splt
->contents
+ 4);
15733 put_arm_insn (htab
, output_bfd
, plt0_entry
[2],
15734 splt
->contents
+ 8);
15735 put_arm_insn (htab
, output_bfd
, plt0_entry
[3],
15736 splt
->contents
+ 12);
15738 #ifdef FOUR_WORD_PLT
15739 /* The displacement value goes in the otherwise-unused
15740 last word of the second entry. */
15741 bfd_put_32 (output_bfd
, got_displacement
, splt
->contents
+ 28);
15743 bfd_put_32 (output_bfd
, got_displacement
, splt
->contents
+ 16);
15748 /* UnixWare sets the entsize of .plt to 4, although that doesn't
15749 really seem like the right value. */
15750 if (splt
->output_section
->owner
== output_bfd
)
15751 elf_section_data (splt
->output_section
)->this_hdr
.sh_entsize
= 4;
15753 if (htab
->dt_tlsdesc_plt
)
15755 bfd_vma got_address
15756 = sgot
->output_section
->vma
+ sgot
->output_offset
;
15757 bfd_vma gotplt_address
= (htab
->root
.sgot
->output_section
->vma
15758 + htab
->root
.sgot
->output_offset
);
15759 bfd_vma plt_address
15760 = splt
->output_section
->vma
+ splt
->output_offset
;
15762 arm_put_trampoline (htab
, output_bfd
,
15763 splt
->contents
+ htab
->dt_tlsdesc_plt
,
15764 dl_tlsdesc_lazy_trampoline
, 6);
15766 bfd_put_32 (output_bfd
,
15767 gotplt_address
+ htab
->dt_tlsdesc_got
15768 - (plt_address
+ htab
->dt_tlsdesc_plt
)
15769 - dl_tlsdesc_lazy_trampoline
[6],
15770 splt
->contents
+ htab
->dt_tlsdesc_plt
+ 24);
15771 bfd_put_32 (output_bfd
,
15772 got_address
- (plt_address
+ htab
->dt_tlsdesc_plt
)
15773 - dl_tlsdesc_lazy_trampoline
[7],
15774 splt
->contents
+ htab
->dt_tlsdesc_plt
+ 24 + 4);
15777 if (htab
->tls_trampoline
)
15779 arm_put_trampoline (htab
, output_bfd
,
15780 splt
->contents
+ htab
->tls_trampoline
,
15781 tls_trampoline
, 3);
15782 #ifdef FOUR_WORD_PLT
15783 bfd_put_32 (output_bfd
, 0x00000000,
15784 splt
->contents
+ htab
->tls_trampoline
+ 12);
15788 if (htab
->vxworks_p
15789 && !bfd_link_pic (info
)
15790 && htab
->root
.splt
->size
> 0)
15792 /* Correct the .rel(a).plt.unloaded relocations. They will have
15793 incorrect symbol indexes. */
15797 num_plts
= ((htab
->root
.splt
->size
- htab
->plt_header_size
)
15798 / htab
->plt_entry_size
);
15799 p
= htab
->srelplt2
->contents
+ RELOC_SIZE (htab
);
15801 for (; num_plts
; num_plts
--)
15803 Elf_Internal_Rela rel
;
15805 SWAP_RELOC_IN (htab
) (output_bfd
, p
, &rel
);
15806 rel
.r_info
= ELF32_R_INFO (htab
->root
.hgot
->indx
, R_ARM_ABS32
);
15807 SWAP_RELOC_OUT (htab
) (output_bfd
, &rel
, p
);
15808 p
+= RELOC_SIZE (htab
);
15810 SWAP_RELOC_IN (htab
) (output_bfd
, p
, &rel
);
15811 rel
.r_info
= ELF32_R_INFO (htab
->root
.hplt
->indx
, R_ARM_ABS32
);
15812 SWAP_RELOC_OUT (htab
) (output_bfd
, &rel
, p
);
15813 p
+= RELOC_SIZE (htab
);
15818 if (htab
->nacl_p
&& htab
->root
.iplt
!= NULL
&& htab
->root
.iplt
->size
> 0)
15819 /* NaCl uses a special first entry in .iplt too. */
15820 arm_nacl_put_plt0 (htab
, output_bfd
, htab
->root
.iplt
, 0);
15822 /* Fill in the first three entries in the global offset table. */
15825 if (sgot
->size
> 0)
15828 bfd_put_32 (output_bfd
, (bfd_vma
) 0, sgot
->contents
);
15830 bfd_put_32 (output_bfd
,
15831 sdyn
->output_section
->vma
+ sdyn
->output_offset
,
15833 bfd_put_32 (output_bfd
, (bfd_vma
) 0, sgot
->contents
+ 4);
15834 bfd_put_32 (output_bfd
, (bfd_vma
) 0, sgot
->contents
+ 8);
15837 elf_section_data (sgot
->output_section
)->this_hdr
.sh_entsize
= 4;
15844 elf32_arm_post_process_headers (bfd
* abfd
, struct bfd_link_info
* link_info ATTRIBUTE_UNUSED
)
15846 Elf_Internal_Ehdr
* i_ehdrp
; /* ELF file header, internal form. */
15847 struct elf32_arm_link_hash_table
*globals
;
15848 struct elf_segment_map
*m
;
15850 i_ehdrp
= elf_elfheader (abfd
);
15852 if (EF_ARM_EABI_VERSION (i_ehdrp
->e_flags
) == EF_ARM_EABI_UNKNOWN
)
15853 i_ehdrp
->e_ident
[EI_OSABI
] = ELFOSABI_ARM
;
15855 _bfd_elf_post_process_headers (abfd
, link_info
);
15856 i_ehdrp
->e_ident
[EI_ABIVERSION
] = ARM_ELF_ABI_VERSION
;
15860 globals
= elf32_arm_hash_table (link_info
);
15861 if (globals
!= NULL
&& globals
->byteswap_code
)
15862 i_ehdrp
->e_flags
|= EF_ARM_BE8
;
15865 if (EF_ARM_EABI_VERSION (i_ehdrp
->e_flags
) == EF_ARM_EABI_VER5
15866 && ((i_ehdrp
->e_type
== ET_DYN
) || (i_ehdrp
->e_type
== ET_EXEC
)))
15868 int abi
= bfd_elf_get_obj_attr_int (abfd
, OBJ_ATTR_PROC
, Tag_ABI_VFP_args
);
15869 if (abi
== AEABI_VFP_args_vfp
)
15870 i_ehdrp
->e_flags
|= EF_ARM_ABI_FLOAT_HARD
;
15872 i_ehdrp
->e_flags
|= EF_ARM_ABI_FLOAT_SOFT
;
15875 /* Scan segment to set p_flags attribute if it contains only sections with
15876 SHF_ARM_PURECODE flag. */
15877 for (m
= elf_seg_map (abfd
); m
!= NULL
; m
= m
->next
)
15883 for (j
= 0; j
< m
->count
; j
++)
15885 if (!(elf_section_flags (m
->sections
[j
]) & SHF_ARM_PURECODE
))
15891 m
->p_flags_valid
= 1;
15896 static enum elf_reloc_type_class
15897 elf32_arm_reloc_type_class (const struct bfd_link_info
*info ATTRIBUTE_UNUSED
,
15898 const asection
*rel_sec ATTRIBUTE_UNUSED
,
15899 const Elf_Internal_Rela
*rela
)
15901 switch ((int) ELF32_R_TYPE (rela
->r_info
))
15903 case R_ARM_RELATIVE
:
15904 return reloc_class_relative
;
15905 case R_ARM_JUMP_SLOT
:
15906 return reloc_class_plt
;
15908 return reloc_class_copy
;
15909 case R_ARM_IRELATIVE
:
15910 return reloc_class_ifunc
;
15912 return reloc_class_normal
;
15917 elf32_arm_final_write_processing (bfd
*abfd
, bfd_boolean linker ATTRIBUTE_UNUSED
)
15919 bfd_arm_update_notes (abfd
, ARM_NOTE_SECTION
);
15922 /* Return TRUE if this is an unwinding table entry. */
15925 is_arm_elf_unwind_section_name (bfd
* abfd ATTRIBUTE_UNUSED
, const char * name
)
15927 return (CONST_STRNEQ (name
, ELF_STRING_ARM_unwind
)
15928 || CONST_STRNEQ (name
, ELF_STRING_ARM_unwind_once
));
15932 /* Set the type and flags for an ARM section. We do this by
15933 the section name, which is a hack, but ought to work. */
15936 elf32_arm_fake_sections (bfd
* abfd
, Elf_Internal_Shdr
* hdr
, asection
* sec
)
15940 name
= bfd_get_section_name (abfd
, sec
);
15942 if (is_arm_elf_unwind_section_name (abfd
, name
))
15944 hdr
->sh_type
= SHT_ARM_EXIDX
;
15945 hdr
->sh_flags
|= SHF_LINK_ORDER
;
15948 if (sec
->flags
& SEC_ELF_PURECODE
)
15949 hdr
->sh_flags
|= SHF_ARM_PURECODE
;
15954 /* Handle an ARM specific section when reading an object file. This is
15955 called when bfd_section_from_shdr finds a section with an unknown
15959 elf32_arm_section_from_shdr (bfd
*abfd
,
15960 Elf_Internal_Shdr
* hdr
,
15964 /* There ought to be a place to keep ELF backend specific flags, but
15965 at the moment there isn't one. We just keep track of the
15966 sections by their name, instead. Fortunately, the ABI gives
15967 names for all the ARM specific sections, so we will probably get
15969 switch (hdr
->sh_type
)
15971 case SHT_ARM_EXIDX
:
15972 case SHT_ARM_PREEMPTMAP
:
15973 case SHT_ARM_ATTRIBUTES
:
15980 if (! _bfd_elf_make_section_from_shdr (abfd
, hdr
, name
, shindex
))
15986 static _arm_elf_section_data
*
15987 get_arm_elf_section_data (asection
* sec
)
15989 if (sec
&& sec
->owner
&& is_arm_elf (sec
->owner
))
15990 return elf32_arm_section_data (sec
);
15998 struct bfd_link_info
*info
;
16001 int (*func
) (void *, const char *, Elf_Internal_Sym
*,
16002 asection
*, struct elf_link_hash_entry
*);
16003 } output_arch_syminfo
;
16005 enum map_symbol_type
16013 /* Output a single mapping symbol. */
16016 elf32_arm_output_map_sym (output_arch_syminfo
*osi
,
16017 enum map_symbol_type type
,
16020 static const char *names
[3] = {"$a", "$t", "$d"};
16021 Elf_Internal_Sym sym
;
16023 sym
.st_value
= osi
->sec
->output_section
->vma
16024 + osi
->sec
->output_offset
16028 sym
.st_info
= ELF_ST_INFO (STB_LOCAL
, STT_NOTYPE
);
16029 sym
.st_shndx
= osi
->sec_shndx
;
16030 sym
.st_target_internal
= 0;
16031 elf32_arm_section_map_add (osi
->sec
, names
[type
][1], offset
);
16032 return osi
->func (osi
->flaginfo
, names
[type
], &sym
, osi
->sec
, NULL
) == 1;
16035 /* Output mapping symbols for the PLT entry described by ROOT_PLT and ARM_PLT.
16036 IS_IPLT_ENTRY_P says whether the PLT is in .iplt rather than .plt. */
16039 elf32_arm_output_plt_map_1 (output_arch_syminfo
*osi
,
16040 bfd_boolean is_iplt_entry_p
,
16041 union gotplt_union
*root_plt
,
16042 struct arm_plt_info
*arm_plt
)
16044 struct elf32_arm_link_hash_table
*htab
;
16045 bfd_vma addr
, plt_header_size
;
16047 if (root_plt
->offset
== (bfd_vma
) -1)
16050 htab
= elf32_arm_hash_table (osi
->info
);
16054 if (is_iplt_entry_p
)
16056 osi
->sec
= htab
->root
.iplt
;
16057 plt_header_size
= 0;
16061 osi
->sec
= htab
->root
.splt
;
16062 plt_header_size
= htab
->plt_header_size
;
16064 osi
->sec_shndx
= (_bfd_elf_section_from_bfd_section
16065 (osi
->info
->output_bfd
, osi
->sec
->output_section
));
16067 addr
= root_plt
->offset
& -2;
16068 if (htab
->symbian_p
)
16070 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_ARM
, addr
))
16072 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_DATA
, addr
+ 4))
16075 else if (htab
->vxworks_p
)
16077 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_ARM
, addr
))
16079 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_DATA
, addr
+ 8))
16081 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_ARM
, addr
+ 12))
16083 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_DATA
, addr
+ 20))
16086 else if (htab
->nacl_p
)
16088 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_ARM
, addr
))
16091 else if (using_thumb_only (htab
))
16093 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_THUMB
, addr
))
16098 bfd_boolean thumb_stub_p
;
16100 thumb_stub_p
= elf32_arm_plt_needs_thumb_stub_p (osi
->info
, arm_plt
);
16103 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_THUMB
, addr
- 4))
16106 #ifdef FOUR_WORD_PLT
16107 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_ARM
, addr
))
16109 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_DATA
, addr
+ 12))
16112 /* A three-word PLT with no Thumb thunk contains only Arm code,
16113 so only need to output a mapping symbol for the first PLT entry and
16114 entries with thumb thunks. */
16115 if (thumb_stub_p
|| addr
== plt_header_size
)
16117 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_ARM
, addr
))
16126 /* Output mapping symbols for PLT entries associated with H. */
16129 elf32_arm_output_plt_map (struct elf_link_hash_entry
*h
, void *inf
)
16131 output_arch_syminfo
*osi
= (output_arch_syminfo
*) inf
;
16132 struct elf32_arm_link_hash_entry
*eh
;
16134 if (h
->root
.type
== bfd_link_hash_indirect
)
16137 if (h
->root
.type
== bfd_link_hash_warning
)
16138 /* When warning symbols are created, they **replace** the "real"
16139 entry in the hash table, thus we never get to see the real
16140 symbol in a hash traversal. So look at it now. */
16141 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
16143 eh
= (struct elf32_arm_link_hash_entry
*) h
;
16144 return elf32_arm_output_plt_map_1 (osi
, SYMBOL_CALLS_LOCAL (osi
->info
, h
),
16145 &h
->plt
, &eh
->plt
);
16148 /* Bind a veneered symbol to its veneer identified by its hash entry
16149 STUB_ENTRY. The veneered location thus loose its symbol. */
16152 arm_stub_claim_sym (struct elf32_arm_stub_hash_entry
*stub_entry
)
16154 struct elf32_arm_link_hash_entry
*hash
= stub_entry
->h
;
16157 hash
->root
.root
.u
.def
.section
= stub_entry
->stub_sec
;
16158 hash
->root
.root
.u
.def
.value
= stub_entry
->stub_offset
;
16159 hash
->root
.size
= stub_entry
->stub_size
;
16162 /* Output a single local symbol for a generated stub. */
16165 elf32_arm_output_stub_sym (output_arch_syminfo
*osi
, const char *name
,
16166 bfd_vma offset
, bfd_vma size
)
16168 Elf_Internal_Sym sym
;
16170 sym
.st_value
= osi
->sec
->output_section
->vma
16171 + osi
->sec
->output_offset
16173 sym
.st_size
= size
;
16175 sym
.st_info
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
16176 sym
.st_shndx
= osi
->sec_shndx
;
16177 sym
.st_target_internal
= 0;
16178 return osi
->func (osi
->flaginfo
, name
, &sym
, osi
->sec
, NULL
) == 1;
16182 arm_map_one_stub (struct bfd_hash_entry
* gen_entry
,
16185 struct elf32_arm_stub_hash_entry
*stub_entry
;
16186 asection
*stub_sec
;
16189 output_arch_syminfo
*osi
;
16190 const insn_sequence
*template_sequence
;
16191 enum stub_insn_type prev_type
;
16194 enum map_symbol_type sym_type
;
16196 /* Massage our args to the form they really have. */
16197 stub_entry
= (struct elf32_arm_stub_hash_entry
*) gen_entry
;
16198 osi
= (output_arch_syminfo
*) in_arg
;
16200 stub_sec
= stub_entry
->stub_sec
;
16202 /* Ensure this stub is attached to the current section being
16204 if (stub_sec
!= osi
->sec
)
16207 addr
= (bfd_vma
) stub_entry
->stub_offset
;
16208 template_sequence
= stub_entry
->stub_template
;
16210 if (arm_stub_sym_claimed (stub_entry
->stub_type
))
16211 arm_stub_claim_sym (stub_entry
);
16214 stub_name
= stub_entry
->output_name
;
16215 switch (template_sequence
[0].type
)
16218 if (!elf32_arm_output_stub_sym (osi
, stub_name
, addr
,
16219 stub_entry
->stub_size
))
16224 if (!elf32_arm_output_stub_sym (osi
, stub_name
, addr
| 1,
16225 stub_entry
->stub_size
))
16234 prev_type
= DATA_TYPE
;
16236 for (i
= 0; i
< stub_entry
->stub_template_size
; i
++)
16238 switch (template_sequence
[i
].type
)
16241 sym_type
= ARM_MAP_ARM
;
16246 sym_type
= ARM_MAP_THUMB
;
16250 sym_type
= ARM_MAP_DATA
;
16258 if (template_sequence
[i
].type
!= prev_type
)
16260 prev_type
= template_sequence
[i
].type
;
16261 if (!elf32_arm_output_map_sym (osi
, sym_type
, addr
+ size
))
16265 switch (template_sequence
[i
].type
)
16289 /* Output mapping symbols for linker generated sections,
16290 and for those data-only sections that do not have a
16294 elf32_arm_output_arch_local_syms (bfd
*output_bfd
,
16295 struct bfd_link_info
*info
,
16297 int (*func
) (void *, const char *,
16298 Elf_Internal_Sym
*,
16300 struct elf_link_hash_entry
*))
16302 output_arch_syminfo osi
;
16303 struct elf32_arm_link_hash_table
*htab
;
16305 bfd_size_type size
;
16308 htab
= elf32_arm_hash_table (info
);
16312 check_use_blx (htab
);
16314 osi
.flaginfo
= flaginfo
;
16318 /* Add a $d mapping symbol to data-only sections that
16319 don't have any mapping symbol. This may result in (harmless) redundant
16320 mapping symbols. */
16321 for (input_bfd
= info
->input_bfds
;
16323 input_bfd
= input_bfd
->link
.next
)
16325 if ((input_bfd
->flags
& (BFD_LINKER_CREATED
| HAS_SYMS
)) == HAS_SYMS
)
16326 for (osi
.sec
= input_bfd
->sections
;
16328 osi
.sec
= osi
.sec
->next
)
16330 if (osi
.sec
->output_section
!= NULL
16331 && ((osi
.sec
->output_section
->flags
& (SEC_ALLOC
| SEC_CODE
))
16333 && (osi
.sec
->flags
& (SEC_HAS_CONTENTS
| SEC_LINKER_CREATED
))
16334 == SEC_HAS_CONTENTS
16335 && get_arm_elf_section_data (osi
.sec
) != NULL
16336 && get_arm_elf_section_data (osi
.sec
)->mapcount
== 0
16337 && osi
.sec
->size
> 0
16338 && (osi
.sec
->flags
& SEC_EXCLUDE
) == 0)
16340 osi
.sec_shndx
= _bfd_elf_section_from_bfd_section
16341 (output_bfd
, osi
.sec
->output_section
);
16342 if (osi
.sec_shndx
!= (int)SHN_BAD
)
16343 elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
, 0);
16348 /* ARM->Thumb glue. */
16349 if (htab
->arm_glue_size
> 0)
16351 osi
.sec
= bfd_get_linker_section (htab
->bfd_of_glue_owner
,
16352 ARM2THUMB_GLUE_SECTION_NAME
);
16354 osi
.sec_shndx
= _bfd_elf_section_from_bfd_section
16355 (output_bfd
, osi
.sec
->output_section
);
16356 if (bfd_link_pic (info
) || htab
->root
.is_relocatable_executable
16357 || htab
->pic_veneer
)
16358 size
= ARM2THUMB_PIC_GLUE_SIZE
;
16359 else if (htab
->use_blx
)
16360 size
= ARM2THUMB_V5_STATIC_GLUE_SIZE
;
16362 size
= ARM2THUMB_STATIC_GLUE_SIZE
;
16364 for (offset
= 0; offset
< htab
->arm_glue_size
; offset
+= size
)
16366 elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, offset
);
16367 elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
, offset
+ size
- 4);
16371 /* Thumb->ARM glue. */
16372 if (htab
->thumb_glue_size
> 0)
16374 osi
.sec
= bfd_get_linker_section (htab
->bfd_of_glue_owner
,
16375 THUMB2ARM_GLUE_SECTION_NAME
);
16377 osi
.sec_shndx
= _bfd_elf_section_from_bfd_section
16378 (output_bfd
, osi
.sec
->output_section
);
16379 size
= THUMB2ARM_GLUE_SIZE
;
16381 for (offset
= 0; offset
< htab
->thumb_glue_size
; offset
+= size
)
16383 elf32_arm_output_map_sym (&osi
, ARM_MAP_THUMB
, offset
);
16384 elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, offset
+ 4);
16388 /* ARMv4 BX veneers. */
16389 if (htab
->bx_glue_size
> 0)
16391 osi
.sec
= bfd_get_linker_section (htab
->bfd_of_glue_owner
,
16392 ARM_BX_GLUE_SECTION_NAME
);
16394 osi
.sec_shndx
= _bfd_elf_section_from_bfd_section
16395 (output_bfd
, osi
.sec
->output_section
);
16397 elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, 0);
16400 /* Long calls stubs. */
16401 if (htab
->stub_bfd
&& htab
->stub_bfd
->sections
)
16403 asection
* stub_sec
;
16405 for (stub_sec
= htab
->stub_bfd
->sections
;
16407 stub_sec
= stub_sec
->next
)
16409 /* Ignore non-stub sections. */
16410 if (!strstr (stub_sec
->name
, STUB_SUFFIX
))
16413 osi
.sec
= stub_sec
;
16415 osi
.sec_shndx
= _bfd_elf_section_from_bfd_section
16416 (output_bfd
, osi
.sec
->output_section
);
16418 bfd_hash_traverse (&htab
->stub_hash_table
, arm_map_one_stub
, &osi
);
16422 /* Finally, output mapping symbols for the PLT. */
16423 if (htab
->root
.splt
&& htab
->root
.splt
->size
> 0)
16425 osi
.sec
= htab
->root
.splt
;
16426 osi
.sec_shndx
= (_bfd_elf_section_from_bfd_section
16427 (output_bfd
, osi
.sec
->output_section
));
16429 /* Output mapping symbols for the plt header. SymbianOS does not have a
16431 if (htab
->vxworks_p
)
16433 /* VxWorks shared libraries have no PLT header. */
16434 if (!bfd_link_pic (info
))
16436 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, 0))
16438 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
, 12))
16442 else if (htab
->nacl_p
)
16444 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, 0))
16447 else if (using_thumb_only (htab
))
16449 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_THUMB
, 0))
16451 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
, 12))
16453 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_THUMB
, 16))
16456 else if (!htab
->symbian_p
)
16458 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, 0))
16460 #ifndef FOUR_WORD_PLT
16461 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
, 16))
16466 if (htab
->nacl_p
&& htab
->root
.iplt
&& htab
->root
.iplt
->size
> 0)
16468 /* NaCl uses a special first entry in .iplt too. */
16469 osi
.sec
= htab
->root
.iplt
;
16470 osi
.sec_shndx
= (_bfd_elf_section_from_bfd_section
16471 (output_bfd
, osi
.sec
->output_section
));
16472 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, 0))
16475 if ((htab
->root
.splt
&& htab
->root
.splt
->size
> 0)
16476 || (htab
->root
.iplt
&& htab
->root
.iplt
->size
> 0))
16478 elf_link_hash_traverse (&htab
->root
, elf32_arm_output_plt_map
, &osi
);
16479 for (input_bfd
= info
->input_bfds
;
16481 input_bfd
= input_bfd
->link
.next
)
16483 struct arm_local_iplt_info
**local_iplt
;
16484 unsigned int i
, num_syms
;
16486 local_iplt
= elf32_arm_local_iplt (input_bfd
);
16487 if (local_iplt
!= NULL
)
16489 num_syms
= elf_symtab_hdr (input_bfd
).sh_info
;
16490 for (i
= 0; i
< num_syms
; i
++)
16491 if (local_iplt
[i
] != NULL
16492 && !elf32_arm_output_plt_map_1 (&osi
, TRUE
,
16493 &local_iplt
[i
]->root
,
16494 &local_iplt
[i
]->arm
))
16499 if (htab
->dt_tlsdesc_plt
!= 0)
16501 /* Mapping symbols for the lazy tls trampoline. */
16502 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, htab
->dt_tlsdesc_plt
))
16505 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
,
16506 htab
->dt_tlsdesc_plt
+ 24))
16509 if (htab
->tls_trampoline
!= 0)
16511 /* Mapping symbols for the tls trampoline. */
16512 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, htab
->tls_trampoline
))
16514 #ifdef FOUR_WORD_PLT
16515 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
,
16516 htab
->tls_trampoline
+ 12))
16524 /* Allocate target specific section data. */
16527 elf32_arm_new_section_hook (bfd
*abfd
, asection
*sec
)
16529 if (!sec
->used_by_bfd
)
16531 _arm_elf_section_data
*sdata
;
16532 bfd_size_type amt
= sizeof (*sdata
);
16534 sdata
= (_arm_elf_section_data
*) bfd_zalloc (abfd
, amt
);
16537 sec
->used_by_bfd
= sdata
;
16540 return _bfd_elf_new_section_hook (abfd
, sec
);
16544 /* Used to order a list of mapping symbols by address. */
16547 elf32_arm_compare_mapping (const void * a
, const void * b
)
16549 const elf32_arm_section_map
*amap
= (const elf32_arm_section_map
*) a
;
16550 const elf32_arm_section_map
*bmap
= (const elf32_arm_section_map
*) b
;
16552 if (amap
->vma
> bmap
->vma
)
16554 else if (amap
->vma
< bmap
->vma
)
16556 else if (amap
->type
> bmap
->type
)
16557 /* Ensure results do not depend on the host qsort for objects with
16558 multiple mapping symbols at the same address by sorting on type
16561 else if (amap
->type
< bmap
->type
)
16567 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
16569 static unsigned long
16570 offset_prel31 (unsigned long addr
, bfd_vma offset
)
16572 return (addr
& ~0x7ffffffful
) | ((addr
+ offset
) & 0x7ffffffful
);
16575 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
16579 copy_exidx_entry (bfd
*output_bfd
, bfd_byte
*to
, bfd_byte
*from
, bfd_vma offset
)
16581 unsigned long first_word
= bfd_get_32 (output_bfd
, from
);
16582 unsigned long second_word
= bfd_get_32 (output_bfd
, from
+ 4);
16584 /* High bit of first word is supposed to be zero. */
16585 if ((first_word
& 0x80000000ul
) == 0)
16586 first_word
= offset_prel31 (first_word
, offset
);
16588 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
16589 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
16590 if ((second_word
!= 0x1) && ((second_word
& 0x80000000ul
) == 0))
16591 second_word
= offset_prel31 (second_word
, offset
);
16593 bfd_put_32 (output_bfd
, first_word
, to
);
16594 bfd_put_32 (output_bfd
, second_word
, to
+ 4);
16597 /* Data for make_branch_to_a8_stub(). */
16599 struct a8_branch_to_stub_data
16601 asection
*writing_section
;
16602 bfd_byte
*contents
;
16606 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
16607 places for a particular section. */
16610 make_branch_to_a8_stub (struct bfd_hash_entry
*gen_entry
,
16613 struct elf32_arm_stub_hash_entry
*stub_entry
;
16614 struct a8_branch_to_stub_data
*data
;
16615 bfd_byte
*contents
;
16616 unsigned long branch_insn
;
16617 bfd_vma veneered_insn_loc
, veneer_entry_loc
;
16618 bfd_signed_vma branch_offset
;
16622 stub_entry
= (struct elf32_arm_stub_hash_entry
*) gen_entry
;
16623 data
= (struct a8_branch_to_stub_data
*) in_arg
;
16625 if (stub_entry
->target_section
!= data
->writing_section
16626 || stub_entry
->stub_type
< arm_stub_a8_veneer_lwm
)
16629 contents
= data
->contents
;
16631 /* We use target_section as Cortex-A8 erratum workaround stubs are only
16632 generated when both source and target are in the same section. */
16633 veneered_insn_loc
= stub_entry
->target_section
->output_section
->vma
16634 + stub_entry
->target_section
->output_offset
16635 + stub_entry
->source_value
;
16637 veneer_entry_loc
= stub_entry
->stub_sec
->output_section
->vma
16638 + stub_entry
->stub_sec
->output_offset
16639 + stub_entry
->stub_offset
;
16641 if (stub_entry
->stub_type
== arm_stub_a8_veneer_blx
)
16642 veneered_insn_loc
&= ~3u;
16644 branch_offset
= veneer_entry_loc
- veneered_insn_loc
- 4;
16646 abfd
= stub_entry
->target_section
->owner
;
16647 loc
= stub_entry
->source_value
;
16649 /* We attempt to avoid this condition by setting stubs_always_after_branch
16650 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
16651 This check is just to be on the safe side... */
16652 if ((veneered_insn_loc
& ~0xfff) == (veneer_entry_loc
& ~0xfff))
16654 (*_bfd_error_handler
) (_("%B: error: Cortex-A8 erratum stub is "
16655 "allocated in unsafe location"), abfd
);
16659 switch (stub_entry
->stub_type
)
16661 case arm_stub_a8_veneer_b
:
16662 case arm_stub_a8_veneer_b_cond
:
16663 branch_insn
= 0xf0009000;
16666 case arm_stub_a8_veneer_blx
:
16667 branch_insn
= 0xf000e800;
16670 case arm_stub_a8_veneer_bl
:
16672 unsigned int i1
, j1
, i2
, j2
, s
;
16674 branch_insn
= 0xf000d000;
16677 if (branch_offset
< -16777216 || branch_offset
> 16777214)
16679 /* There's not much we can do apart from complain if this
16681 (*_bfd_error_handler
) (_("%B: error: Cortex-A8 erratum stub out "
16682 "of range (input file too large)"), abfd
);
16686 /* i1 = not(j1 eor s), so:
16688 j1 = (not i1) eor s. */
16690 branch_insn
|= (branch_offset
>> 1) & 0x7ff;
16691 branch_insn
|= ((branch_offset
>> 12) & 0x3ff) << 16;
16692 i2
= (branch_offset
>> 22) & 1;
16693 i1
= (branch_offset
>> 23) & 1;
16694 s
= (branch_offset
>> 24) & 1;
16697 branch_insn
|= j2
<< 11;
16698 branch_insn
|= j1
<< 13;
16699 branch_insn
|= s
<< 26;
16708 bfd_put_16 (abfd
, (branch_insn
>> 16) & 0xffff, &contents
[loc
]);
16709 bfd_put_16 (abfd
, branch_insn
& 0xffff, &contents
[loc
+ 2]);
16714 /* Beginning of stm32l4xx work-around. */
16716 /* Functions encoding instructions necessary for the emission of the
16717 fix-stm32l4xx-629360.
16718 Encoding is extracted from the
16719 ARM (C) Architecture Reference Manual
16720 ARMv7-A and ARMv7-R edition
16721 ARM DDI 0406C.b (ID072512). */
16723 static inline bfd_vma
16724 create_instruction_branch_absolute (int branch_offset
)
16726 /* A8.8.18 B (A8-334)
16727 B target_address (Encoding T4). */
16728 /* 1111 - 0Sii - iiii - iiii - 10J1 - Jiii - iiii - iiii. */
16729 /* jump offset is: S:I1:I2:imm10:imm11:0. */
16730 /* with : I1 = NOT (J1 EOR S) I2 = NOT (J2 EOR S). */
16732 int s
= ((branch_offset
& 0x1000000) >> 24);
16733 int j1
= s
^ !((branch_offset
& 0x800000) >> 23);
16734 int j2
= s
^ !((branch_offset
& 0x400000) >> 22);
16736 if (branch_offset
< -(1 << 24) || branch_offset
>= (1 << 24))
16737 BFD_ASSERT (0 && "Error: branch out of range. Cannot create branch.");
16739 bfd_vma patched_inst
= 0xf0009000
16741 | (((unsigned long) (branch_offset
) >> 12) & 0x3ff) << 16 /* imm10. */
16742 | j1
<< 13 /* J1. */
16743 | j2
<< 11 /* J2. */
16744 | (((unsigned long) (branch_offset
) >> 1) & 0x7ff); /* imm11. */
16746 return patched_inst
;
16749 static inline bfd_vma
16750 create_instruction_ldmia (int base_reg
, int wback
, int reg_mask
)
16752 /* A8.8.57 LDM/LDMIA/LDMFD (A8-396)
16753 LDMIA Rn!, {Ra, Rb, Rc, ...} (Encoding T2). */
16754 bfd_vma patched_inst
= 0xe8900000
16755 | (/*W=*/wback
<< 21)
16757 | (reg_mask
& 0x0000ffff);
16759 return patched_inst
;
16762 static inline bfd_vma
16763 create_instruction_ldmdb (int base_reg
, int wback
, int reg_mask
)
16765 /* A8.8.60 LDMDB/LDMEA (A8-402)
16766 LDMDB Rn!, {Ra, Rb, Rc, ...} (Encoding T1). */
16767 bfd_vma patched_inst
= 0xe9100000
16768 | (/*W=*/wback
<< 21)
16770 | (reg_mask
& 0x0000ffff);
16772 return patched_inst
;
16775 static inline bfd_vma
16776 create_instruction_mov (int target_reg
, int source_reg
)
16778 /* A8.8.103 MOV (register) (A8-486)
16779 MOV Rd, Rm (Encoding T1). */
16780 bfd_vma patched_inst
= 0x4600
16781 | (target_reg
& 0x7)
16782 | ((target_reg
& 0x8) >> 3) << 7
16783 | (source_reg
<< 3);
16785 return patched_inst
;
16788 static inline bfd_vma
16789 create_instruction_sub (int target_reg
, int source_reg
, int value
)
16791 /* A8.8.221 SUB (immediate) (A8-708)
16792 SUB Rd, Rn, #value (Encoding T3). */
16793 bfd_vma patched_inst
= 0xf1a00000
16794 | (target_reg
<< 8)
16795 | (source_reg
<< 16)
16797 | ((value
& 0x800) >> 11) << 26
16798 | ((value
& 0x700) >> 8) << 12
16801 return patched_inst
;
16804 static inline bfd_vma
16805 create_instruction_vldmia (int base_reg
, int is_dp
, int wback
, int num_words
,
16808 /* A8.8.332 VLDM (A8-922)
16809 VLMD{MODE} Rn{!}, {list} (Encoding T1 or T2). */
16810 bfd_vma patched_inst
= (is_dp
? 0xec900b00 : 0xec900a00)
16811 | (/*W=*/wback
<< 21)
16813 | (num_words
& 0x000000ff)
16814 | (((unsigned)first_reg
>> 1) & 0x0000000f) << 12
16815 | (first_reg
& 0x00000001) << 22;
16817 return patched_inst
;
16820 static inline bfd_vma
16821 create_instruction_vldmdb (int base_reg
, int is_dp
, int num_words
,
16824 /* A8.8.332 VLDM (A8-922)
16825 VLMD{MODE} Rn!, {} (Encoding T1 or T2). */
16826 bfd_vma patched_inst
= (is_dp
? 0xed300b00 : 0xed300a00)
16828 | (num_words
& 0x000000ff)
16829 | (((unsigned)first_reg
>>1 ) & 0x0000000f) << 12
16830 | (first_reg
& 0x00000001) << 22;
16832 return patched_inst
;
16835 static inline bfd_vma
16836 create_instruction_udf_w (int value
)
16838 /* A8.8.247 UDF (A8-758)
16839 Undefined (Encoding T2). */
16840 bfd_vma patched_inst
= 0xf7f0a000
16841 | (value
& 0x00000fff)
16842 | (value
& 0x000f0000) << 16;
16844 return patched_inst
;
16847 static inline bfd_vma
16848 create_instruction_udf (int value
)
16850 /* A8.8.247 UDF (A8-758)
16851 Undefined (Encoding T1). */
16852 bfd_vma patched_inst
= 0xde00
16855 return patched_inst
;
16858 /* Functions writing an instruction in memory, returning the next
16859 memory position to write to. */
16861 static inline bfd_byte
*
16862 push_thumb2_insn32 (struct elf32_arm_link_hash_table
* htab
,
16863 bfd
* output_bfd
, bfd_byte
*pt
, insn32 insn
)
16865 put_thumb2_insn (htab
, output_bfd
, insn
, pt
);
16869 static inline bfd_byte
*
16870 push_thumb2_insn16 (struct elf32_arm_link_hash_table
* htab
,
16871 bfd
* output_bfd
, bfd_byte
*pt
, insn32 insn
)
16873 put_thumb_insn (htab
, output_bfd
, insn
, pt
);
16877 /* Function filling up a region in memory with T1 and T2 UDFs taking
16878 care of alignment. */
16881 stm32l4xx_fill_stub_udf (struct elf32_arm_link_hash_table
* htab
,
16883 const bfd_byte
* const base_stub_contents
,
16884 bfd_byte
* const from_stub_contents
,
16885 const bfd_byte
* const end_stub_contents
)
16887 bfd_byte
*current_stub_contents
= from_stub_contents
;
16889 /* Fill the remaining of the stub with deterministic contents : UDF
16891 Check if realignment is needed on modulo 4 frontier using T1, to
16893 if ((current_stub_contents
< end_stub_contents
)
16894 && !((current_stub_contents
- base_stub_contents
) % 2)
16895 && ((current_stub_contents
- base_stub_contents
) % 4))
16896 current_stub_contents
=
16897 push_thumb2_insn16 (htab
, output_bfd
, current_stub_contents
,
16898 create_instruction_udf (0));
16900 for (; current_stub_contents
< end_stub_contents
;)
16901 current_stub_contents
=
16902 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16903 create_instruction_udf_w (0));
16905 return current_stub_contents
;
16908 /* Functions writing the stream of instructions equivalent to the
16909 derived sequence for ldmia, ldmdb, vldm respectively. */
16912 stm32l4xx_create_replacing_stub_ldmia (struct elf32_arm_link_hash_table
* htab
,
16914 const insn32 initial_insn
,
16915 const bfd_byte
*const initial_insn_addr
,
16916 bfd_byte
*const base_stub_contents
)
16918 int wback
= (initial_insn
& 0x00200000) >> 21;
16919 int ri
, rn
= (initial_insn
& 0x000F0000) >> 16;
16920 int insn_all_registers
= initial_insn
& 0x0000ffff;
16921 int insn_low_registers
, insn_high_registers
;
16922 int usable_register_mask
;
16923 int nb_registers
= popcount (insn_all_registers
);
16924 int restore_pc
= (insn_all_registers
& (1 << 15)) ? 1 : 0;
16925 int restore_rn
= (insn_all_registers
& (1 << rn
)) ? 1 : 0;
16926 bfd_byte
*current_stub_contents
= base_stub_contents
;
16928 BFD_ASSERT (is_thumb2_ldmia (initial_insn
));
16930 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
16931 smaller than 8 registers load sequences that do not cause the
16933 if (nb_registers
<= 8)
16935 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
16936 current_stub_contents
=
16937 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16940 /* B initial_insn_addr+4. */
16942 current_stub_contents
=
16943 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16944 create_instruction_branch_absolute
16945 (initial_insn_addr
- current_stub_contents
));
16948 /* Fill the remaining of the stub with deterministic contents. */
16949 current_stub_contents
=
16950 stm32l4xx_fill_stub_udf (htab
, output_bfd
,
16951 base_stub_contents
, current_stub_contents
,
16952 base_stub_contents
+
16953 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
);
16958 /* - reg_list[13] == 0. */
16959 BFD_ASSERT ((insn_all_registers
& (1 << 13))==0);
16961 /* - reg_list[14] & reg_list[15] != 1. */
16962 BFD_ASSERT ((insn_all_registers
& 0xC000) != 0xC000);
16964 /* - if (wback==1) reg_list[rn] == 0. */
16965 BFD_ASSERT (!wback
|| !restore_rn
);
16967 /* - nb_registers > 8. */
16968 BFD_ASSERT (popcount (insn_all_registers
) > 8);
16970 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
16972 /* In the following algorithm, we split this wide LDM using 2 LDM insns:
16973 - One with the 7 lowest registers (register mask 0x007F)
16974 This LDM will finally contain between 2 and 7 registers
16975 - One with the 7 highest registers (register mask 0xDF80)
16976 This ldm will finally contain between 2 and 7 registers. */
16977 insn_low_registers
= insn_all_registers
& 0x007F;
16978 insn_high_registers
= insn_all_registers
& 0xDF80;
16980 /* A spare register may be needed during this veneer to temporarily
16981 handle the base register. This register will be restored with the
16982 last LDM operation.
16983 The usable register may be any general purpose register (that
16984 excludes PC, SP, LR : register mask is 0x1FFF). */
16985 usable_register_mask
= 0x1FFF;
16987 /* Generate the stub function. */
16990 /* LDMIA Rn!, {R-low-register-list} : (Encoding T2). */
16991 current_stub_contents
=
16992 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16993 create_instruction_ldmia
16994 (rn
, /*wback=*/1, insn_low_registers
));
16996 /* LDMIA Rn!, {R-high-register-list} : (Encoding T2). */
16997 current_stub_contents
=
16998 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
16999 create_instruction_ldmia
17000 (rn
, /*wback=*/1, insn_high_registers
));
17003 /* B initial_insn_addr+4. */
17004 current_stub_contents
=
17005 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
17006 create_instruction_branch_absolute
17007 (initial_insn_addr
- current_stub_contents
));
17010 else /* if (!wback). */
17014 /* If Rn is not part of the high-register-list, move it there. */
17015 if (!(insn_high_registers
& (1 << rn
)))
17017 /* Choose a Ri in the high-register-list that will be restored. */
17018 ri
= ctz (insn_high_registers
& usable_register_mask
& ~(1 << rn
));
17021 current_stub_contents
=
17022 push_thumb2_insn16 (htab
, output_bfd
, current_stub_contents
,
17023 create_instruction_mov (ri
, rn
));
17026 /* LDMIA Ri!, {R-low-register-list} : (Encoding T2). */
17027 current_stub_contents
=
17028 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
17029 create_instruction_ldmia
17030 (ri
, /*wback=*/1, insn_low_registers
));
17032 /* LDMIA Ri, {R-high-register-list} : (Encoding T2). */
17033 current_stub_contents
=
17034 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
17035 create_instruction_ldmia
17036 (ri
, /*wback=*/0, insn_high_registers
));
17040 /* B initial_insn_addr+4. */
17041 current_stub_contents
=
17042 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
17043 create_instruction_branch_absolute
17044 (initial_insn_addr
- current_stub_contents
));
17048 /* Fill the remaining of the stub with deterministic contents. */
17049 current_stub_contents
=
17050 stm32l4xx_fill_stub_udf (htab
, output_bfd
,
17051 base_stub_contents
, current_stub_contents
,
17052 base_stub_contents
+
17053 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
);
17057 stm32l4xx_create_replacing_stub_ldmdb (struct elf32_arm_link_hash_table
* htab
,
17059 const insn32 initial_insn
,
17060 const bfd_byte
*const initial_insn_addr
,
17061 bfd_byte
*const base_stub_contents
)
17063 int wback
= (initial_insn
& 0x00200000) >> 21;
17064 int ri
, rn
= (initial_insn
& 0x000f0000) >> 16;
17065 int insn_all_registers
= initial_insn
& 0x0000ffff;
17066 int insn_low_registers
, insn_high_registers
;
17067 int usable_register_mask
;
17068 int restore_pc
= (insn_all_registers
& (1 << 15)) ? 1 : 0;
17069 int restore_rn
= (insn_all_registers
& (1 << rn
)) ? 1 : 0;
17070 int nb_registers
= popcount (insn_all_registers
);
17071 bfd_byte
*current_stub_contents
= base_stub_contents
;
17073 BFD_ASSERT (is_thumb2_ldmdb (initial_insn
));
17075 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
17076 smaller than 8 registers load sequences that do not cause the
17078 if (nb_registers
<= 8)
17080 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
17081 current_stub_contents
=
17082 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
17085 /* B initial_insn_addr+4. */
17086 current_stub_contents
=
17087 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
17088 create_instruction_branch_absolute
17089 (initial_insn_addr
- current_stub_contents
));
17091 /* Fill the remaining of the stub with deterministic contents. */
17092 current_stub_contents
=
17093 stm32l4xx_fill_stub_udf (htab
, output_bfd
,
17094 base_stub_contents
, current_stub_contents
,
17095 base_stub_contents
+
17096 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
);
17101 /* - reg_list[13] == 0. */
17102 BFD_ASSERT ((insn_all_registers
& (1 << 13)) == 0);
17104 /* - reg_list[14] & reg_list[15] != 1. */
17105 BFD_ASSERT ((insn_all_registers
& 0xC000) != 0xC000);
17107 /* - if (wback==1) reg_list[rn] == 0. */
17108 BFD_ASSERT (!wback
|| !restore_rn
);
17110 /* - nb_registers > 8. */
17111 BFD_ASSERT (popcount (insn_all_registers
) > 8);
17113 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
17115 /* In the following algorithm, we split this wide LDM using 2 LDM insn:
17116 - One with the 7 lowest registers (register mask 0x007F)
17117 This LDM will finally contain between 2 and 7 registers
17118 - One with the 7 highest registers (register mask 0xDF80)
17119 This ldm will finally contain between 2 and 7 registers. */
17120 insn_low_registers
= insn_all_registers
& 0x007F;
17121 insn_high_registers
= insn_all_registers
& 0xDF80;
17123 /* A spare register may be needed during this veneer to temporarily
17124 handle the base register. This register will be restored with
17125 the last LDM operation.
17126 The usable register may be any general purpose register (that excludes
17127 PC, SP, LR : register mask is 0x1FFF). */
17128 usable_register_mask
= 0x1FFF;
17130 /* Generate the stub function. */
17131 if (!wback
&& !restore_pc
&& !restore_rn
)
17133 /* Choose a Ri in the low-register-list that will be restored. */
17134 ri
= ctz (insn_low_registers
& usable_register_mask
& ~(1 << rn
));
17137 current_stub_contents
=
17138 push_thumb2_insn16 (htab
, output_bfd
, current_stub_contents
,
17139 create_instruction_mov (ri
, rn
));
17141 /* LDMDB Ri!, {R-high-register-list}. */
17142 current_stub_contents
=
17143 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
17144 create_instruction_ldmdb
17145 (ri
, /*wback=*/1, insn_high_registers
));
17147 /* LDMDB Ri, {R-low-register-list}. */
17148 current_stub_contents
=
17149 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
17150 create_instruction_ldmdb
17151 (ri
, /*wback=*/0, insn_low_registers
));
17153 /* B initial_insn_addr+4. */
17154 current_stub_contents
=
17155 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
17156 create_instruction_branch_absolute
17157 (initial_insn_addr
- current_stub_contents
));
17159 else if (wback
&& !restore_pc
&& !restore_rn
)
17161 /* LDMDB Rn!, {R-high-register-list}. */
17162 current_stub_contents
=
17163 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
17164 create_instruction_ldmdb
17165 (rn
, /*wback=*/1, insn_high_registers
));
17167 /* LDMDB Rn!, {R-low-register-list}. */
17168 current_stub_contents
=
17169 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
17170 create_instruction_ldmdb
17171 (rn
, /*wback=*/1, insn_low_registers
));
17173 /* B initial_insn_addr+4. */
17174 current_stub_contents
=
17175 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
17176 create_instruction_branch_absolute
17177 (initial_insn_addr
- current_stub_contents
));
17179 else if (!wback
&& restore_pc
&& !restore_rn
)
17181 /* Choose a Ri in the high-register-list that will be restored. */
17182 ri
= ctz (insn_high_registers
& usable_register_mask
& ~(1 << rn
));
17184 /* SUB Ri, Rn, #(4*nb_registers). */
17185 current_stub_contents
=
17186 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
17187 create_instruction_sub (ri
, rn
, (4 * nb_registers
)));
17189 /* LDMIA Ri!, {R-low-register-list}. */
17190 current_stub_contents
=
17191 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
17192 create_instruction_ldmia
17193 (ri
, /*wback=*/1, insn_low_registers
));
17195 /* LDMIA Ri, {R-high-register-list}. */
17196 current_stub_contents
=
17197 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
17198 create_instruction_ldmia
17199 (ri
, /*wback=*/0, insn_high_registers
));
17201 else if (wback
&& restore_pc
&& !restore_rn
)
17203 /* Choose a Ri in the high-register-list that will be restored. */
17204 ri
= ctz (insn_high_registers
& usable_register_mask
& ~(1 << rn
));
17206 /* SUB Rn, Rn, #(4*nb_registers) */
17207 current_stub_contents
=
17208 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
17209 create_instruction_sub (rn
, rn
, (4 * nb_registers
)));
17212 current_stub_contents
=
17213 push_thumb2_insn16 (htab
, output_bfd
, current_stub_contents
,
17214 create_instruction_mov (ri
, rn
));
17216 /* LDMIA Ri!, {R-low-register-list}. */
17217 current_stub_contents
=
17218 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
17219 create_instruction_ldmia
17220 (ri
, /*wback=*/1, insn_low_registers
));
17222 /* LDMIA Ri, {R-high-register-list}. */
17223 current_stub_contents
=
17224 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
17225 create_instruction_ldmia
17226 (ri
, /*wback=*/0, insn_high_registers
));
17228 else if (!wback
&& !restore_pc
&& restore_rn
)
17231 if (!(insn_low_registers
& (1 << rn
)))
17233 /* Choose a Ri in the low-register-list that will be restored. */
17234 ri
= ctz (insn_low_registers
& usable_register_mask
& ~(1 << rn
));
17237 current_stub_contents
=
17238 push_thumb2_insn16 (htab
, output_bfd
, current_stub_contents
,
17239 create_instruction_mov (ri
, rn
));
17242 /* LDMDB Ri!, {R-high-register-list}. */
17243 current_stub_contents
=
17244 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
17245 create_instruction_ldmdb
17246 (ri
, /*wback=*/1, insn_high_registers
));
17248 /* LDMDB Ri, {R-low-register-list}. */
17249 current_stub_contents
=
17250 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
17251 create_instruction_ldmdb
17252 (ri
, /*wback=*/0, insn_low_registers
));
17254 /* B initial_insn_addr+4. */
17255 current_stub_contents
=
17256 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
17257 create_instruction_branch_absolute
17258 (initial_insn_addr
- current_stub_contents
));
17260 else if (!wback
&& restore_pc
&& restore_rn
)
17263 if (!(insn_high_registers
& (1 << rn
)))
17265 /* Choose a Ri in the high-register-list that will be restored. */
17266 ri
= ctz (insn_high_registers
& usable_register_mask
& ~(1 << rn
));
17269 /* SUB Ri, Rn, #(4*nb_registers). */
17270 current_stub_contents
=
17271 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
17272 create_instruction_sub (ri
, rn
, (4 * nb_registers
)));
17274 /* LDMIA Ri!, {R-low-register-list}. */
17275 current_stub_contents
=
17276 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
17277 create_instruction_ldmia
17278 (ri
, /*wback=*/1, insn_low_registers
));
17280 /* LDMIA Ri, {R-high-register-list}. */
17281 current_stub_contents
=
17282 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
17283 create_instruction_ldmia
17284 (ri
, /*wback=*/0, insn_high_registers
));
17286 else if (wback
&& restore_rn
)
17288 /* The assembler should not have accepted to encode this. */
17289 BFD_ASSERT (0 && "Cannot patch an instruction that has an "
17290 "undefined behavior.\n");
17293 /* Fill the remaining of the stub with deterministic contents. */
17294 current_stub_contents
=
17295 stm32l4xx_fill_stub_udf (htab
, output_bfd
,
17296 base_stub_contents
, current_stub_contents
,
17297 base_stub_contents
+
17298 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
);
17303 stm32l4xx_create_replacing_stub_vldm (struct elf32_arm_link_hash_table
* htab
,
17305 const insn32 initial_insn
,
17306 const bfd_byte
*const initial_insn_addr
,
17307 bfd_byte
*const base_stub_contents
)
17309 int num_words
= ((unsigned int) initial_insn
<< 24) >> 24;
17310 bfd_byte
*current_stub_contents
= base_stub_contents
;
17312 BFD_ASSERT (is_thumb2_vldm (initial_insn
));
17314 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
17315 smaller than 8 words load sequences that do not cause the
17317 if (num_words
<= 8)
17319 /* Untouched instruction. */
17320 current_stub_contents
=
17321 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
17324 /* B initial_insn_addr+4. */
17325 current_stub_contents
=
17326 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
17327 create_instruction_branch_absolute
17328 (initial_insn_addr
- current_stub_contents
));
17332 bfd_boolean is_dp
= /* DP encoding. */
17333 (initial_insn
& 0xfe100f00) == 0xec100b00;
17334 bfd_boolean is_ia_nobang
= /* (IA without !). */
17335 (((initial_insn
<< 7) >> 28) & 0xd) == 0x4;
17336 bfd_boolean is_ia_bang
= /* (IA with !) - includes VPOP. */
17337 (((initial_insn
<< 7) >> 28) & 0xd) == 0x5;
17338 bfd_boolean is_db_bang
= /* (DB with !). */
17339 (((initial_insn
<< 7) >> 28) & 0xd) == 0x9;
17340 int base_reg
= ((unsigned int) initial_insn
<< 12) >> 28;
17341 /* d = UInt (Vd:D);. */
17342 int first_reg
= ((((unsigned int) initial_insn
<< 16) >> 28) << 1)
17343 | (((unsigned int)initial_insn
<< 9) >> 31);
17345 /* Compute the number of 8-words chunks needed to split. */
17346 int chunks
= (num_words
% 8) ? (num_words
/ 8 + 1) : (num_words
/ 8);
17349 /* The test coverage has been done assuming the following
17350 hypothesis that exactly one of the previous is_ predicates is
17352 BFD_ASSERT ( (is_ia_nobang
^ is_ia_bang
^ is_db_bang
)
17353 && !(is_ia_nobang
& is_ia_bang
& is_db_bang
));
17355 /* We treat the cutting of the words in one pass for all
17356 cases, then we emit the adjustments:
17359 -> vldm rx!, {8_words_or_less} for each needed 8_word
17360 -> sub rx, rx, #size (list)
17363 -> vldm rx!, {8_words_or_less} for each needed 8_word
17364 This also handles vpop instruction (when rx is sp)
17367 -> vldmb rx!, {8_words_or_less} for each needed 8_word. */
17368 for (chunk
= 0; chunk
< chunks
; ++chunk
)
17370 bfd_vma new_insn
= 0;
17372 if (is_ia_nobang
|| is_ia_bang
)
17374 new_insn
= create_instruction_vldmia
17378 chunks
- (chunk
+ 1) ?
17379 8 : num_words
- chunk
* 8,
17380 first_reg
+ chunk
* 8);
17382 else if (is_db_bang
)
17384 new_insn
= create_instruction_vldmdb
17387 chunks
- (chunk
+ 1) ?
17388 8 : num_words
- chunk
* 8,
17389 first_reg
+ chunk
* 8);
17393 current_stub_contents
=
17394 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
17398 /* Only this case requires the base register compensation
17402 current_stub_contents
=
17403 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
17404 create_instruction_sub
17405 (base_reg
, base_reg
, 4*num_words
));
17408 /* B initial_insn_addr+4. */
17409 current_stub_contents
=
17410 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
17411 create_instruction_branch_absolute
17412 (initial_insn_addr
- current_stub_contents
));
17415 /* Fill the remaining of the stub with deterministic contents. */
17416 current_stub_contents
=
17417 stm32l4xx_fill_stub_udf (htab
, output_bfd
,
17418 base_stub_contents
, current_stub_contents
,
17419 base_stub_contents
+
17420 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE
);
17424 stm32l4xx_create_replacing_stub (struct elf32_arm_link_hash_table
* htab
,
17426 const insn32 wrong_insn
,
17427 const bfd_byte
*const wrong_insn_addr
,
17428 bfd_byte
*const stub_contents
)
17430 if (is_thumb2_ldmia (wrong_insn
))
17431 stm32l4xx_create_replacing_stub_ldmia (htab
, output_bfd
,
17432 wrong_insn
, wrong_insn_addr
,
17434 else if (is_thumb2_ldmdb (wrong_insn
))
17435 stm32l4xx_create_replacing_stub_ldmdb (htab
, output_bfd
,
17436 wrong_insn
, wrong_insn_addr
,
17438 else if (is_thumb2_vldm (wrong_insn
))
17439 stm32l4xx_create_replacing_stub_vldm (htab
, output_bfd
,
17440 wrong_insn
, wrong_insn_addr
,
17444 /* End of stm32l4xx work-around. */
17448 elf32_arm_add_relocation (bfd
*output_bfd
, struct bfd_link_info
*info
,
17449 asection
*output_sec
, Elf_Internal_Rela
*rel
)
17451 BFD_ASSERT (output_sec
&& rel
);
17452 struct bfd_elf_section_reloc_data
*output_reldata
;
17453 struct elf32_arm_link_hash_table
*htab
;
17454 struct bfd_elf_section_data
*oesd
= elf_section_data (output_sec
);
17455 Elf_Internal_Shdr
*rel_hdr
;
17460 rel_hdr
= oesd
->rel
.hdr
;
17461 output_reldata
= &(oesd
->rel
);
17463 else if (oesd
->rela
.hdr
)
17465 rel_hdr
= oesd
->rela
.hdr
;
17466 output_reldata
= &(oesd
->rela
);
17473 bfd_byte
*erel
= rel_hdr
->contents
;
17474 erel
+= output_reldata
->count
* rel_hdr
->sh_entsize
;
17475 htab
= elf32_arm_hash_table (info
);
17476 SWAP_RELOC_OUT (htab
) (output_bfd
, rel
, erel
);
17477 output_reldata
->count
++;
17480 /* Do code byteswapping. Return FALSE afterwards so that the section is
17481 written out as normal. */
17484 elf32_arm_write_section (bfd
*output_bfd
,
17485 struct bfd_link_info
*link_info
,
17487 bfd_byte
*contents
)
17489 unsigned int mapcount
, errcount
;
17490 _arm_elf_section_data
*arm_data
;
17491 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (link_info
);
17492 elf32_arm_section_map
*map
;
17493 elf32_vfp11_erratum_list
*errnode
;
17494 elf32_stm32l4xx_erratum_list
*stm32l4xx_errnode
;
17497 bfd_vma offset
= sec
->output_section
->vma
+ sec
->output_offset
;
17501 if (globals
== NULL
)
17504 /* If this section has not been allocated an _arm_elf_section_data
17505 structure then we cannot record anything. */
17506 arm_data
= get_arm_elf_section_data (sec
);
17507 if (arm_data
== NULL
)
17510 mapcount
= arm_data
->mapcount
;
17511 map
= arm_data
->map
;
17512 errcount
= arm_data
->erratumcount
;
17516 unsigned int endianflip
= bfd_big_endian (output_bfd
) ? 3 : 0;
17518 for (errnode
= arm_data
->erratumlist
; errnode
!= 0;
17519 errnode
= errnode
->next
)
17521 bfd_vma target
= errnode
->vma
- offset
;
17523 switch (errnode
->type
)
17525 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER
:
17527 bfd_vma branch_to_veneer
;
17528 /* Original condition code of instruction, plus bit mask for
17529 ARM B instruction. */
17530 unsigned int insn
= (errnode
->u
.b
.vfp_insn
& 0xf0000000)
17533 /* The instruction is before the label. */
17536 /* Above offset included in -4 below. */
17537 branch_to_veneer
= errnode
->u
.b
.veneer
->vma
17538 - errnode
->vma
- 4;
17540 if ((signed) branch_to_veneer
< -(1 << 25)
17541 || (signed) branch_to_veneer
>= (1 << 25))
17542 (*_bfd_error_handler
) (_("%B: error: VFP11 veneer out of "
17543 "range"), output_bfd
);
17545 insn
|= (branch_to_veneer
>> 2) & 0xffffff;
17546 contents
[endianflip
^ target
] = insn
& 0xff;
17547 contents
[endianflip
^ (target
+ 1)] = (insn
>> 8) & 0xff;
17548 contents
[endianflip
^ (target
+ 2)] = (insn
>> 16) & 0xff;
17549 contents
[endianflip
^ (target
+ 3)] = (insn
>> 24) & 0xff;
17553 case VFP11_ERRATUM_ARM_VENEER
:
17555 bfd_vma branch_from_veneer
;
17558 /* Take size of veneer into account. */
17559 branch_from_veneer
= errnode
->u
.v
.branch
->vma
17560 - errnode
->vma
- 12;
17562 if ((signed) branch_from_veneer
< -(1 << 25)
17563 || (signed) branch_from_veneer
>= (1 << 25))
17564 (*_bfd_error_handler
) (_("%B: error: VFP11 veneer out of "
17565 "range"), output_bfd
);
17567 /* Original instruction. */
17568 insn
= errnode
->u
.v
.branch
->u
.b
.vfp_insn
;
17569 contents
[endianflip
^ target
] = insn
& 0xff;
17570 contents
[endianflip
^ (target
+ 1)] = (insn
>> 8) & 0xff;
17571 contents
[endianflip
^ (target
+ 2)] = (insn
>> 16) & 0xff;
17572 contents
[endianflip
^ (target
+ 3)] = (insn
>> 24) & 0xff;
17574 /* Branch back to insn after original insn. */
17575 insn
= 0xea000000 | ((branch_from_veneer
>> 2) & 0xffffff);
17576 contents
[endianflip
^ (target
+ 4)] = insn
& 0xff;
17577 contents
[endianflip
^ (target
+ 5)] = (insn
>> 8) & 0xff;
17578 contents
[endianflip
^ (target
+ 6)] = (insn
>> 16) & 0xff;
17579 contents
[endianflip
^ (target
+ 7)] = (insn
>> 24) & 0xff;
17589 if (arm_data
->stm32l4xx_erratumcount
!= 0)
17591 for (stm32l4xx_errnode
= arm_data
->stm32l4xx_erratumlist
;
17592 stm32l4xx_errnode
!= 0;
17593 stm32l4xx_errnode
= stm32l4xx_errnode
->next
)
17595 bfd_vma target
= stm32l4xx_errnode
->vma
- offset
;
17597 switch (stm32l4xx_errnode
->type
)
17599 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER
:
17602 bfd_vma branch_to_veneer
=
17603 stm32l4xx_errnode
->u
.b
.veneer
->vma
- stm32l4xx_errnode
->vma
;
17605 if ((signed) branch_to_veneer
< -(1 << 24)
17606 || (signed) branch_to_veneer
>= (1 << 24))
17608 bfd_vma out_of_range
=
17609 ((signed) branch_to_veneer
< -(1 << 24)) ?
17610 - branch_to_veneer
- (1 << 24) :
17611 ((signed) branch_to_veneer
>= (1 << 24)) ?
17612 branch_to_veneer
- (1 << 24) : 0;
17614 (*_bfd_error_handler
)
17615 (_("%B(%#x): error: Cannot create STM32L4XX veneer. "
17616 "Jump out of range by %ld bytes. "
17617 "Cannot encode branch instruction. "),
17619 (long) (stm32l4xx_errnode
->vma
- 4),
17624 insn
= create_instruction_branch_absolute
17625 (stm32l4xx_errnode
->u
.b
.veneer
->vma
- stm32l4xx_errnode
->vma
);
17627 /* The instruction is before the label. */
17630 put_thumb2_insn (globals
, output_bfd
,
17631 (bfd_vma
) insn
, contents
+ target
);
17635 case STM32L4XX_ERRATUM_VENEER
:
17638 bfd_byte
* veneer_r
;
17641 veneer
= contents
+ target
;
17643 + stm32l4xx_errnode
->u
.b
.veneer
->vma
17644 - stm32l4xx_errnode
->vma
- 4;
17646 if ((signed) (veneer_r
- veneer
-
17647 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE
>
17648 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
?
17649 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE
:
17650 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
) < -(1 << 24)
17651 || (signed) (veneer_r
- veneer
) >= (1 << 24))
17653 (*_bfd_error_handler
) (_("%B: error: Cannot create STM32L4XX "
17654 "veneer."), output_bfd
);
17658 /* Original instruction. */
17659 insn
= stm32l4xx_errnode
->u
.v
.branch
->u
.b
.insn
;
17661 stm32l4xx_create_replacing_stub
17662 (globals
, output_bfd
, insn
, (void*)veneer_r
, (void*)veneer
);
17672 if (arm_data
->elf
.this_hdr
.sh_type
== SHT_ARM_EXIDX
)
17674 arm_unwind_table_edit
*edit_node
17675 = arm_data
->u
.exidx
.unwind_edit_list
;
17676 /* Now, sec->size is the size of the section we will write. The original
17677 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
17678 markers) was sec->rawsize. (This isn't the case if we perform no
17679 edits, then rawsize will be zero and we should use size). */
17680 bfd_byte
*edited_contents
= (bfd_byte
*) bfd_malloc (sec
->size
);
17681 unsigned int input_size
= sec
->rawsize
? sec
->rawsize
: sec
->size
;
17682 unsigned int in_index
, out_index
;
17683 bfd_vma add_to_offsets
= 0;
17685 for (in_index
= 0, out_index
= 0; in_index
* 8 < input_size
|| edit_node
;)
17689 unsigned int edit_index
= edit_node
->index
;
17691 if (in_index
< edit_index
&& in_index
* 8 < input_size
)
17693 copy_exidx_entry (output_bfd
, edited_contents
+ out_index
* 8,
17694 contents
+ in_index
* 8, add_to_offsets
);
17698 else if (in_index
== edit_index
17699 || (in_index
* 8 >= input_size
17700 && edit_index
== UINT_MAX
))
17702 switch (edit_node
->type
)
17704 case DELETE_EXIDX_ENTRY
:
17706 add_to_offsets
+= 8;
17709 case INSERT_EXIDX_CANTUNWIND_AT_END
:
17711 asection
*text_sec
= edit_node
->linked_section
;
17712 bfd_vma text_offset
= text_sec
->output_section
->vma
17713 + text_sec
->output_offset
17715 bfd_vma exidx_offset
= offset
+ out_index
* 8;
17716 unsigned long prel31_offset
;
17718 /* Note: this is meant to be equivalent to an
17719 R_ARM_PREL31 relocation. These synthetic
17720 EXIDX_CANTUNWIND markers are not relocated by the
17721 usual BFD method. */
17722 prel31_offset
= (text_offset
- exidx_offset
)
17724 if (bfd_link_relocatable (link_info
))
17726 /* Here relocation for new EXIDX_CANTUNWIND is
17727 created, so there is no need to
17728 adjust offset by hand. */
17729 prel31_offset
= text_sec
->output_offset
17732 /* New relocation entity. */
17733 asection
*text_out
= text_sec
->output_section
;
17734 Elf_Internal_Rela rel
;
17736 rel
.r_offset
= exidx_offset
;
17737 rel
.r_info
= ELF32_R_INFO (text_out
->target_index
,
17740 elf32_arm_add_relocation (output_bfd
, link_info
,
17741 sec
->output_section
,
17745 /* First address we can't unwind. */
17746 bfd_put_32 (output_bfd
, prel31_offset
,
17747 &edited_contents
[out_index
* 8]);
17749 /* Code for EXIDX_CANTUNWIND. */
17750 bfd_put_32 (output_bfd
, 0x1,
17751 &edited_contents
[out_index
* 8 + 4]);
17754 add_to_offsets
-= 8;
17759 edit_node
= edit_node
->next
;
17764 /* No more edits, copy remaining entries verbatim. */
17765 copy_exidx_entry (output_bfd
, edited_contents
+ out_index
* 8,
17766 contents
+ in_index
* 8, add_to_offsets
);
17772 if (!(sec
->flags
& SEC_EXCLUDE
) && !(sec
->flags
& SEC_NEVER_LOAD
))
17773 bfd_set_section_contents (output_bfd
, sec
->output_section
,
17775 (file_ptr
) sec
->output_offset
, sec
->size
);
17780 /* Fix code to point to Cortex-A8 erratum stubs. */
17781 if (globals
->fix_cortex_a8
)
17783 struct a8_branch_to_stub_data data
;
17785 data
.writing_section
= sec
;
17786 data
.contents
= contents
;
17788 bfd_hash_traverse (& globals
->stub_hash_table
, make_branch_to_a8_stub
,
17795 if (globals
->byteswap_code
)
17797 qsort (map
, mapcount
, sizeof (* map
), elf32_arm_compare_mapping
);
17800 for (i
= 0; i
< mapcount
; i
++)
17802 if (i
== mapcount
- 1)
17805 end
= map
[i
+ 1].vma
;
17807 switch (map
[i
].type
)
17810 /* Byte swap code words. */
17811 while (ptr
+ 3 < end
)
17813 tmp
= contents
[ptr
];
17814 contents
[ptr
] = contents
[ptr
+ 3];
17815 contents
[ptr
+ 3] = tmp
;
17816 tmp
= contents
[ptr
+ 1];
17817 contents
[ptr
+ 1] = contents
[ptr
+ 2];
17818 contents
[ptr
+ 2] = tmp
;
17824 /* Byte swap code halfwords. */
17825 while (ptr
+ 1 < end
)
17827 tmp
= contents
[ptr
];
17828 contents
[ptr
] = contents
[ptr
+ 1];
17829 contents
[ptr
+ 1] = tmp
;
17835 /* Leave data alone. */
17843 arm_data
->mapcount
= -1;
17844 arm_data
->mapsize
= 0;
17845 arm_data
->map
= NULL
;
17850 /* Mangle thumb function symbols as we read them in. */
17853 elf32_arm_swap_symbol_in (bfd
* abfd
,
17856 Elf_Internal_Sym
*dst
)
17858 if (!bfd_elf32_swap_symbol_in (abfd
, psrc
, pshn
, dst
))
17860 dst
->st_target_internal
= 0;
17862 /* New EABI objects mark thumb function symbols by setting the low bit of
17864 if (ELF_ST_TYPE (dst
->st_info
) == STT_FUNC
17865 || ELF_ST_TYPE (dst
->st_info
) == STT_GNU_IFUNC
)
17867 if (dst
->st_value
& 1)
17869 dst
->st_value
&= ~(bfd_vma
) 1;
17870 ARM_SET_SYM_BRANCH_TYPE (dst
->st_target_internal
,
17871 ST_BRANCH_TO_THUMB
);
17874 ARM_SET_SYM_BRANCH_TYPE (dst
->st_target_internal
, ST_BRANCH_TO_ARM
);
17876 else if (ELF_ST_TYPE (dst
->st_info
) == STT_ARM_TFUNC
)
17878 dst
->st_info
= ELF_ST_INFO (ELF_ST_BIND (dst
->st_info
), STT_FUNC
);
17879 ARM_SET_SYM_BRANCH_TYPE (dst
->st_target_internal
, ST_BRANCH_TO_THUMB
);
17881 else if (ELF_ST_TYPE (dst
->st_info
) == STT_SECTION
)
17882 ARM_SET_SYM_BRANCH_TYPE (dst
->st_target_internal
, ST_BRANCH_LONG
);
17884 ARM_SET_SYM_BRANCH_TYPE (dst
->st_target_internal
, ST_BRANCH_UNKNOWN
);
17890 /* Mangle thumb function symbols as we write them out. */
17893 elf32_arm_swap_symbol_out (bfd
*abfd
,
17894 const Elf_Internal_Sym
*src
,
17898 Elf_Internal_Sym newsym
;
17900 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
17901 of the address set, as per the new EABI. We do this unconditionally
17902 because objcopy does not set the elf header flags until after
17903 it writes out the symbol table. */
17904 if (ARM_GET_SYM_BRANCH_TYPE (src
->st_target_internal
) == ST_BRANCH_TO_THUMB
)
17907 if (ELF_ST_TYPE (src
->st_info
) != STT_GNU_IFUNC
)
17908 newsym
.st_info
= ELF_ST_INFO (ELF_ST_BIND (src
->st_info
), STT_FUNC
);
17909 if (newsym
.st_shndx
!= SHN_UNDEF
)
17911 /* Do this only for defined symbols. At link type, the static
17912 linker will simulate the work of dynamic linker of resolving
17913 symbols and will carry over the thumbness of found symbols to
17914 the output symbol table. It's not clear how it happens, but
17915 the thumbness of undefined symbols can well be different at
17916 runtime, and writing '1' for them will be confusing for users
17917 and possibly for dynamic linker itself.
17919 newsym
.st_value
|= 1;
17924 bfd_elf32_swap_symbol_out (abfd
, src
, cdst
, shndx
);
17927 /* Add the PT_ARM_EXIDX program header. */
17930 elf32_arm_modify_segment_map (bfd
*abfd
,
17931 struct bfd_link_info
*info ATTRIBUTE_UNUSED
)
17933 struct elf_segment_map
*m
;
17936 sec
= bfd_get_section_by_name (abfd
, ".ARM.exidx");
17937 if (sec
!= NULL
&& (sec
->flags
& SEC_LOAD
) != 0)
17939 /* If there is already a PT_ARM_EXIDX header, then we do not
17940 want to add another one. This situation arises when running
17941 "strip"; the input binary already has the header. */
17942 m
= elf_seg_map (abfd
);
17943 while (m
&& m
->p_type
!= PT_ARM_EXIDX
)
17947 m
= (struct elf_segment_map
*)
17948 bfd_zalloc (abfd
, sizeof (struct elf_segment_map
));
17951 m
->p_type
= PT_ARM_EXIDX
;
17953 m
->sections
[0] = sec
;
17955 m
->next
= elf_seg_map (abfd
);
17956 elf_seg_map (abfd
) = m
;
17963 /* We may add a PT_ARM_EXIDX program header. */
17966 elf32_arm_additional_program_headers (bfd
*abfd
,
17967 struct bfd_link_info
*info ATTRIBUTE_UNUSED
)
17971 sec
= bfd_get_section_by_name (abfd
, ".ARM.exidx");
17972 if (sec
!= NULL
&& (sec
->flags
& SEC_LOAD
) != 0)
17978 /* Hook called by the linker routine which adds symbols from an object
17982 elf32_arm_add_symbol_hook (bfd
*abfd
, struct bfd_link_info
*info
,
17983 Elf_Internal_Sym
*sym
, const char **namep
,
17984 flagword
*flagsp
, asection
**secp
, bfd_vma
*valp
)
17986 if (ELF_ST_TYPE (sym
->st_info
) == STT_GNU_IFUNC
17987 && (abfd
->flags
& DYNAMIC
) == 0
17988 && bfd_get_flavour (info
->output_bfd
) == bfd_target_elf_flavour
)
17989 elf_tdata (info
->output_bfd
)->has_gnu_symbols
|= elf_gnu_symbol_ifunc
;
17991 if (elf32_arm_hash_table (info
) == NULL
)
17994 if (elf32_arm_hash_table (info
)->vxworks_p
17995 && !elf_vxworks_add_symbol_hook (abfd
, info
, sym
, namep
,
17996 flagsp
, secp
, valp
))
18002 /* We use this to override swap_symbol_in and swap_symbol_out. */
18003 const struct elf_size_info elf32_arm_size_info
=
18005 sizeof (Elf32_External_Ehdr
),
18006 sizeof (Elf32_External_Phdr
),
18007 sizeof (Elf32_External_Shdr
),
18008 sizeof (Elf32_External_Rel
),
18009 sizeof (Elf32_External_Rela
),
18010 sizeof (Elf32_External_Sym
),
18011 sizeof (Elf32_External_Dyn
),
18012 sizeof (Elf_External_Note
),
18016 ELFCLASS32
, EV_CURRENT
,
18017 bfd_elf32_write_out_phdrs
,
18018 bfd_elf32_write_shdrs_and_ehdr
,
18019 bfd_elf32_checksum_contents
,
18020 bfd_elf32_write_relocs
,
18021 elf32_arm_swap_symbol_in
,
18022 elf32_arm_swap_symbol_out
,
18023 bfd_elf32_slurp_reloc_table
,
18024 bfd_elf32_slurp_symbol_table
,
18025 bfd_elf32_swap_dyn_in
,
18026 bfd_elf32_swap_dyn_out
,
18027 bfd_elf32_swap_reloc_in
,
18028 bfd_elf32_swap_reloc_out
,
18029 bfd_elf32_swap_reloca_in
,
18030 bfd_elf32_swap_reloca_out
18034 read_code32 (const bfd
*abfd
, const bfd_byte
*addr
)
18036 /* V7 BE8 code is always little endian. */
18037 if ((elf_elfheader (abfd
)->e_flags
& EF_ARM_BE8
) != 0)
18038 return bfd_getl32 (addr
);
18040 return bfd_get_32 (abfd
, addr
);
18044 read_code16 (const bfd
*abfd
, const bfd_byte
*addr
)
18046 /* V7 BE8 code is always little endian. */
18047 if ((elf_elfheader (abfd
)->e_flags
& EF_ARM_BE8
) != 0)
18048 return bfd_getl16 (addr
);
18050 return bfd_get_16 (abfd
, addr
);
18053 /* Return size of plt0 entry starting at ADDR
18054 or (bfd_vma) -1 if size can not be determined. */
18057 elf32_arm_plt0_size (const bfd
*abfd
, const bfd_byte
*addr
)
18059 bfd_vma first_word
;
18062 first_word
= read_code32 (abfd
, addr
);
18064 if (first_word
== elf32_arm_plt0_entry
[0])
18065 plt0_size
= 4 * ARRAY_SIZE (elf32_arm_plt0_entry
);
18066 else if (first_word
== elf32_thumb2_plt0_entry
[0])
18067 plt0_size
= 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry
);
18069 /* We don't yet handle this PLT format. */
18070 return (bfd_vma
) -1;
18075 /* Return size of plt entry starting at offset OFFSET
18076 of plt section located at address START
18077 or (bfd_vma) -1 if size can not be determined. */
18080 elf32_arm_plt_size (const bfd
*abfd
, const bfd_byte
*start
, bfd_vma offset
)
18082 bfd_vma first_insn
;
18083 bfd_vma plt_size
= 0;
18084 const bfd_byte
*addr
= start
+ offset
;
18086 /* PLT entry size if fixed on Thumb-only platforms. */
18087 if (read_code32 (abfd
, start
) == elf32_thumb2_plt0_entry
[0])
18088 return 4 * ARRAY_SIZE (elf32_thumb2_plt_entry
);
18090 /* Respect Thumb stub if necessary. */
18091 if (read_code16 (abfd
, addr
) == elf32_arm_plt_thumb_stub
[0])
18093 plt_size
+= 2 * ARRAY_SIZE(elf32_arm_plt_thumb_stub
);
18096 /* Strip immediate from first add. */
18097 first_insn
= read_code32 (abfd
, addr
+ plt_size
) & 0xffffff00;
18099 #ifdef FOUR_WORD_PLT
18100 if (first_insn
== elf32_arm_plt_entry
[0])
18101 plt_size
+= 4 * ARRAY_SIZE (elf32_arm_plt_entry
);
18103 if (first_insn
== elf32_arm_plt_entry_long
[0])
18104 plt_size
+= 4 * ARRAY_SIZE (elf32_arm_plt_entry_long
);
18105 else if (first_insn
== elf32_arm_plt_entry_short
[0])
18106 plt_size
+= 4 * ARRAY_SIZE (elf32_arm_plt_entry_short
);
18109 /* We don't yet handle this PLT format. */
18110 return (bfd_vma
) -1;
18115 /* Implementation is shamelessly borrowed from _bfd_elf_get_synthetic_symtab. */
18118 elf32_arm_get_synthetic_symtab (bfd
*abfd
,
18119 long symcount ATTRIBUTE_UNUSED
,
18120 asymbol
**syms ATTRIBUTE_UNUSED
,
18130 Elf_Internal_Shdr
*hdr
;
18138 if ((abfd
->flags
& (DYNAMIC
| EXEC_P
)) == 0)
18141 if (dynsymcount
<= 0)
18144 relplt
= bfd_get_section_by_name (abfd
, ".rel.plt");
18145 if (relplt
== NULL
)
18148 hdr
= &elf_section_data (relplt
)->this_hdr
;
18149 if (hdr
->sh_link
!= elf_dynsymtab (abfd
)
18150 || (hdr
->sh_type
!= SHT_REL
&& hdr
->sh_type
!= SHT_RELA
))
18153 plt
= bfd_get_section_by_name (abfd
, ".plt");
18157 if (!elf32_arm_size_info
.slurp_reloc_table (abfd
, relplt
, dynsyms
, TRUE
))
18160 data
= plt
->contents
;
18163 if (!bfd_get_full_section_contents(abfd
, (asection
*) plt
, &data
) || data
== NULL
)
18165 bfd_cache_section_contents((asection
*) plt
, data
);
18168 count
= relplt
->size
/ hdr
->sh_entsize
;
18169 size
= count
* sizeof (asymbol
);
18170 p
= relplt
->relocation
;
18171 for (i
= 0; i
< count
; i
++, p
+= elf32_arm_size_info
.int_rels_per_ext_rel
)
18173 size
+= strlen ((*p
->sym_ptr_ptr
)->name
) + sizeof ("@plt");
18174 if (p
->addend
!= 0)
18175 size
+= sizeof ("+0x") - 1 + 8;
18178 s
= *ret
= (asymbol
*) bfd_malloc (size
);
18182 offset
= elf32_arm_plt0_size (abfd
, data
);
18183 if (offset
== (bfd_vma
) -1)
18186 names
= (char *) (s
+ count
);
18187 p
= relplt
->relocation
;
18189 for (i
= 0; i
< count
; i
++, p
+= elf32_arm_size_info
.int_rels_per_ext_rel
)
18193 bfd_vma plt_size
= elf32_arm_plt_size (abfd
, data
, offset
);
18194 if (plt_size
== (bfd_vma
) -1)
18197 *s
= **p
->sym_ptr_ptr
;
18198 /* Undefined syms won't have BSF_LOCAL or BSF_GLOBAL set. Since
18199 we are defining a symbol, ensure one of them is set. */
18200 if ((s
->flags
& BSF_LOCAL
) == 0)
18201 s
->flags
|= BSF_GLOBAL
;
18202 s
->flags
|= BSF_SYNTHETIC
;
18207 len
= strlen ((*p
->sym_ptr_ptr
)->name
);
18208 memcpy (names
, (*p
->sym_ptr_ptr
)->name
, len
);
18210 if (p
->addend
!= 0)
18214 memcpy (names
, "+0x", sizeof ("+0x") - 1);
18215 names
+= sizeof ("+0x") - 1;
18216 bfd_sprintf_vma (abfd
, buf
, p
->addend
);
18217 for (a
= buf
; *a
== '0'; ++a
)
18220 memcpy (names
, a
, len
);
18223 memcpy (names
, "@plt", sizeof ("@plt"));
18224 names
+= sizeof ("@plt");
18226 offset
+= plt_size
;
18233 elf32_arm_section_flags (flagword
*flags
, const Elf_Internal_Shdr
* hdr
)
18235 if (hdr
->sh_flags
& SHF_ARM_PURECODE
)
18236 *flags
|= SEC_ELF_PURECODE
;
18241 elf32_arm_lookup_section_flags (char *flag_name
)
18243 if (!strcmp (flag_name
, "SHF_ARM_PURECODE"))
18244 return SHF_ARM_PURECODE
;
18246 return SEC_NO_FLAGS
;
18249 static unsigned int
18250 elf32_arm_count_additional_relocs (asection
*sec
)
18252 struct _arm_elf_section_data
*arm_data
;
18253 arm_data
= get_arm_elf_section_data (sec
);
18254 return arm_data
->additional_reloc_count
;
18257 /* Called to set the sh_flags, sh_link and sh_info fields of OSECTION which
18258 has a type >= SHT_LOOS. Returns TRUE if these fields were initialised
18259 FALSE otherwise. ISECTION is the best guess matching section from the
18260 input bfd IBFD, but it might be NULL. */
18263 elf32_arm_copy_special_section_fields (const bfd
*ibfd ATTRIBUTE_UNUSED
,
18264 bfd
*obfd ATTRIBUTE_UNUSED
,
18265 const Elf_Internal_Shdr
*isection ATTRIBUTE_UNUSED
,
18266 Elf_Internal_Shdr
*osection
)
18268 switch (osection
->sh_type
)
18270 case SHT_ARM_EXIDX
:
18272 Elf_Internal_Shdr
**oheaders
= elf_elfsections (obfd
);
18273 Elf_Internal_Shdr
**iheaders
= elf_elfsections (ibfd
);
18276 osection
->sh_flags
= SHF_ALLOC
| SHF_LINK_ORDER
;
18277 osection
->sh_info
= 0;
18279 /* The sh_link field must be set to the text section associated with
18280 this index section. Unfortunately the ARM EHABI does not specify
18281 exactly how to determine this association. Our caller does try
18282 to match up OSECTION with its corresponding input section however
18283 so that is a good first guess. */
18284 if (isection
!= NULL
18285 && osection
->bfd_section
!= NULL
18286 && isection
->bfd_section
!= NULL
18287 && isection
->bfd_section
->output_section
!= NULL
18288 && isection
->bfd_section
->output_section
== osection
->bfd_section
18289 && iheaders
!= NULL
18290 && isection
->sh_link
> 0
18291 && isection
->sh_link
< elf_numsections (ibfd
)
18292 && iheaders
[isection
->sh_link
]->bfd_section
!= NULL
18293 && iheaders
[isection
->sh_link
]->bfd_section
->output_section
!= NULL
18296 for (i
= elf_numsections (obfd
); i
-- > 0;)
18297 if (oheaders
[i
]->bfd_section
18298 == iheaders
[isection
->sh_link
]->bfd_section
->output_section
)
18304 /* Failing that we have to find a matching section ourselves. If
18305 we had the output section name available we could compare that
18306 with input section names. Unfortunately we don't. So instead
18307 we use a simple heuristic and look for the nearest executable
18308 section before this one. */
18309 for (i
= elf_numsections (obfd
); i
-- > 0;)
18310 if (oheaders
[i
] == osection
)
18316 if (oheaders
[i
]->sh_type
== SHT_PROGBITS
18317 && (oheaders
[i
]->sh_flags
& (SHF_ALLOC
| SHF_EXECINSTR
))
18318 == (SHF_ALLOC
| SHF_EXECINSTR
))
18324 osection
->sh_link
= i
;
18325 /* If the text section was part of a group
18326 then the index section should be too. */
18327 if (oheaders
[i
]->sh_flags
& SHF_GROUP
)
18328 osection
->sh_flags
|= SHF_GROUP
;
18334 case SHT_ARM_PREEMPTMAP
:
18335 osection
->sh_flags
= SHF_ALLOC
;
18338 case SHT_ARM_ATTRIBUTES
:
18339 case SHT_ARM_DEBUGOVERLAY
:
18340 case SHT_ARM_OVERLAYSECTION
:
18348 /* Returns TRUE if NAME is an ARM mapping symbol.
18349 Traditionally the symbols $a, $d and $t have been used.
18350 The ARM ELF standard also defines $x (for A64 code). It also allows a
18351 period initiated suffix to be added to the symbol: "$[adtx]\.[:sym_char]+".
18352 Other tools might also produce $b (Thumb BL), $f, $p, $m and $v, but we do
18353 not support them here. $t.x indicates the start of ThumbEE instructions. */
18356 is_arm_mapping_symbol (const char * name
)
18358 return name
!= NULL
/* Paranoia. */
18359 && name
[0] == '$' /* Note: if objcopy --prefix-symbols has been used then
18360 the mapping symbols could have acquired a prefix.
18361 We do not support this here, since such symbols no
18362 longer conform to the ARM ELF ABI. */
18363 && (name
[1] == 'a' || name
[1] == 'd' || name
[1] == 't' || name
[1] == 'x')
18364 && (name
[2] == 0 || name
[2] == '.');
18365 /* FIXME: Strictly speaking the symbol is only a valid mapping symbol if
18366 any characters that follow the period are legal characters for the body
18367 of a symbol's name. For now we just assume that this is the case. */
18370 /* Make sure that mapping symbols in object files are not removed via the
18371 "strip --strip-unneeded" tool. These symbols are needed in order to
18372 correctly generate interworking veneers, and for byte swapping code
18373 regions. Once an object file has been linked, it is safe to remove the
18374 symbols as they will no longer be needed. */
18377 elf32_arm_backend_symbol_processing (bfd
*abfd
, asymbol
*sym
)
18379 if (((abfd
->flags
& (EXEC_P
| DYNAMIC
)) == 0)
18380 && sym
->section
!= bfd_abs_section_ptr
18381 && is_arm_mapping_symbol (sym
->name
))
18382 sym
->flags
|= BSF_KEEP
;
18385 #undef elf_backend_copy_special_section_fields
18386 #define elf_backend_copy_special_section_fields elf32_arm_copy_special_section_fields
18388 #define ELF_ARCH bfd_arch_arm
18389 #define ELF_TARGET_ID ARM_ELF_DATA
18390 #define ELF_MACHINE_CODE EM_ARM
18391 #ifdef __QNXTARGET__
18392 #define ELF_MAXPAGESIZE 0x1000
18394 #define ELF_MAXPAGESIZE 0x10000
18396 #define ELF_MINPAGESIZE 0x1000
18397 #define ELF_COMMONPAGESIZE 0x1000
18399 #define bfd_elf32_mkobject elf32_arm_mkobject
18401 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
18402 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
18403 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
18404 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
18405 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
18406 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
18407 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
18408 #define bfd_elf32_find_nearest_line elf32_arm_find_nearest_line
18409 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
18410 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
18411 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
18412 #define bfd_elf32_bfd_final_link elf32_arm_final_link
18413 #define bfd_elf32_get_synthetic_symtab elf32_arm_get_synthetic_symtab
18415 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
18416 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
18417 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
18418 #define elf_backend_gc_sweep_hook elf32_arm_gc_sweep_hook
18419 #define elf_backend_check_relocs elf32_arm_check_relocs
18420 #define elf_backend_relocate_section elf32_arm_relocate_section
18421 #define elf_backend_write_section elf32_arm_write_section
18422 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
18423 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
18424 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
18425 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
18426 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
18427 #define elf_backend_always_size_sections elf32_arm_always_size_sections
18428 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
18429 #define elf_backend_post_process_headers elf32_arm_post_process_headers
18430 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
18431 #define elf_backend_object_p elf32_arm_object_p
18432 #define elf_backend_fake_sections elf32_arm_fake_sections
18433 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
18434 #define elf_backend_final_write_processing elf32_arm_final_write_processing
18435 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
18436 #define elf_backend_size_info elf32_arm_size_info
18437 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
18438 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
18439 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
18440 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
18441 #define elf_backend_add_symbol_hook elf32_arm_add_symbol_hook
18442 #define elf_backend_count_additional_relocs elf32_arm_count_additional_relocs
18443 #define elf_backend_symbol_processing elf32_arm_backend_symbol_processing
18445 #define elf_backend_can_refcount 1
18446 #define elf_backend_can_gc_sections 1
18447 #define elf_backend_plt_readonly 1
18448 #define elf_backend_want_got_plt 1
18449 #define elf_backend_want_plt_sym 0
18450 #define elf_backend_may_use_rel_p 1
18451 #define elf_backend_may_use_rela_p 0
18452 #define elf_backend_default_use_rela_p 0
18454 #define elf_backend_got_header_size 12
18455 #define elf_backend_extern_protected_data 1
18457 #undef elf_backend_obj_attrs_vendor
18458 #define elf_backend_obj_attrs_vendor "aeabi"
18459 #undef elf_backend_obj_attrs_section
18460 #define elf_backend_obj_attrs_section ".ARM.attributes"
18461 #undef elf_backend_obj_attrs_arg_type
18462 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
18463 #undef elf_backend_obj_attrs_section_type
18464 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
18465 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
18466 #define elf_backend_obj_attrs_handle_unknown elf32_arm_obj_attrs_handle_unknown
18468 #undef elf_backend_section_flags
18469 #define elf_backend_section_flags elf32_arm_section_flags
18470 #undef elf_backend_lookup_section_flags_hook
18471 #define elf_backend_lookup_section_flags_hook elf32_arm_lookup_section_flags
18473 #include "elf32-target.h"
18475 /* Native Client targets. */
18477 #undef TARGET_LITTLE_SYM
18478 #define TARGET_LITTLE_SYM arm_elf32_nacl_le_vec
18479 #undef TARGET_LITTLE_NAME
18480 #define TARGET_LITTLE_NAME "elf32-littlearm-nacl"
18481 #undef TARGET_BIG_SYM
18482 #define TARGET_BIG_SYM arm_elf32_nacl_be_vec
18483 #undef TARGET_BIG_NAME
18484 #define TARGET_BIG_NAME "elf32-bigarm-nacl"
18486 /* Like elf32_arm_link_hash_table_create -- but overrides
18487 appropriately for NaCl. */
18489 static struct bfd_link_hash_table
*
18490 elf32_arm_nacl_link_hash_table_create (bfd
*abfd
)
18492 struct bfd_link_hash_table
*ret
;
18494 ret
= elf32_arm_link_hash_table_create (abfd
);
18497 struct elf32_arm_link_hash_table
*htab
18498 = (struct elf32_arm_link_hash_table
*) ret
;
18502 htab
->plt_header_size
= 4 * ARRAY_SIZE (elf32_arm_nacl_plt0_entry
);
18503 htab
->plt_entry_size
= 4 * ARRAY_SIZE (elf32_arm_nacl_plt_entry
);
18508 /* Since NaCl doesn't use the ARM-specific unwind format, we don't
18509 really need to use elf32_arm_modify_segment_map. But we do it
18510 anyway just to reduce gratuitous differences with the stock ARM backend. */
18513 elf32_arm_nacl_modify_segment_map (bfd
*abfd
, struct bfd_link_info
*info
)
18515 return (elf32_arm_modify_segment_map (abfd
, info
)
18516 && nacl_modify_segment_map (abfd
, info
));
18520 elf32_arm_nacl_final_write_processing (bfd
*abfd
, bfd_boolean linker
)
18522 elf32_arm_final_write_processing (abfd
, linker
);
18523 nacl_final_write_processing (abfd
, linker
);
18527 elf32_arm_nacl_plt_sym_val (bfd_vma i
, const asection
*plt
,
18528 const arelent
*rel ATTRIBUTE_UNUSED
)
18531 + 4 * (ARRAY_SIZE (elf32_arm_nacl_plt0_entry
) +
18532 i
* ARRAY_SIZE (elf32_arm_nacl_plt_entry
));
18536 #define elf32_bed elf32_arm_nacl_bed
18537 #undef bfd_elf32_bfd_link_hash_table_create
18538 #define bfd_elf32_bfd_link_hash_table_create \
18539 elf32_arm_nacl_link_hash_table_create
18540 #undef elf_backend_plt_alignment
18541 #define elf_backend_plt_alignment 4
18542 #undef elf_backend_modify_segment_map
18543 #define elf_backend_modify_segment_map elf32_arm_nacl_modify_segment_map
18544 #undef elf_backend_modify_program_headers
18545 #define elf_backend_modify_program_headers nacl_modify_program_headers
18546 #undef elf_backend_final_write_processing
18547 #define elf_backend_final_write_processing elf32_arm_nacl_final_write_processing
18548 #undef bfd_elf32_get_synthetic_symtab
18549 #undef elf_backend_plt_sym_val
18550 #define elf_backend_plt_sym_val elf32_arm_nacl_plt_sym_val
18551 #undef elf_backend_copy_special_section_fields
18553 #undef ELF_MINPAGESIZE
18554 #undef ELF_COMMONPAGESIZE
18557 #include "elf32-target.h"
18559 /* Reset to defaults. */
18560 #undef elf_backend_plt_alignment
18561 #undef elf_backend_modify_segment_map
18562 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
18563 #undef elf_backend_modify_program_headers
18564 #undef elf_backend_final_write_processing
18565 #define elf_backend_final_write_processing elf32_arm_final_write_processing
18566 #undef ELF_MINPAGESIZE
18567 #define ELF_MINPAGESIZE 0x1000
18568 #undef ELF_COMMONPAGESIZE
18569 #define ELF_COMMONPAGESIZE 0x1000
18572 /* VxWorks Targets. */
18574 #undef TARGET_LITTLE_SYM
18575 #define TARGET_LITTLE_SYM arm_elf32_vxworks_le_vec
18576 #undef TARGET_LITTLE_NAME
18577 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
18578 #undef TARGET_BIG_SYM
18579 #define TARGET_BIG_SYM arm_elf32_vxworks_be_vec
18580 #undef TARGET_BIG_NAME
18581 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
18583 /* Like elf32_arm_link_hash_table_create -- but overrides
18584 appropriately for VxWorks. */
18586 static struct bfd_link_hash_table
*
18587 elf32_arm_vxworks_link_hash_table_create (bfd
*abfd
)
18589 struct bfd_link_hash_table
*ret
;
18591 ret
= elf32_arm_link_hash_table_create (abfd
);
18594 struct elf32_arm_link_hash_table
*htab
18595 = (struct elf32_arm_link_hash_table
*) ret
;
18597 htab
->vxworks_p
= 1;
18603 elf32_arm_vxworks_final_write_processing (bfd
*abfd
, bfd_boolean linker
)
18605 elf32_arm_final_write_processing (abfd
, linker
);
18606 elf_vxworks_final_write_processing (abfd
, linker
);
18610 #define elf32_bed elf32_arm_vxworks_bed
18612 #undef bfd_elf32_bfd_link_hash_table_create
18613 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
18614 #undef elf_backend_final_write_processing
18615 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
18616 #undef elf_backend_emit_relocs
18617 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
18619 #undef elf_backend_may_use_rel_p
18620 #define elf_backend_may_use_rel_p 0
18621 #undef elf_backend_may_use_rela_p
18622 #define elf_backend_may_use_rela_p 1
18623 #undef elf_backend_default_use_rela_p
18624 #define elf_backend_default_use_rela_p 1
18625 #undef elf_backend_want_plt_sym
18626 #define elf_backend_want_plt_sym 1
18627 #undef ELF_MAXPAGESIZE
18628 #define ELF_MAXPAGESIZE 0x1000
18630 #include "elf32-target.h"
18633 /* Merge backend specific data from an object file to the output
18634 object file when linking. */
18637 elf32_arm_merge_private_bfd_data (bfd
* ibfd
, bfd
* obfd
)
18639 flagword out_flags
;
18641 bfd_boolean flags_compatible
= TRUE
;
18644 /* Check if we have the same endianness. */
18645 if (! _bfd_generic_verify_endian_match (ibfd
, obfd
))
18648 if (! is_arm_elf (ibfd
) || ! is_arm_elf (obfd
))
18651 if (!elf32_arm_merge_eabi_attributes (ibfd
, obfd
))
18654 /* The input BFD must have had its flags initialised. */
18655 /* The following seems bogus to me -- The flags are initialized in
18656 the assembler but I don't think an elf_flags_init field is
18657 written into the object. */
18658 /* BFD_ASSERT (elf_flags_init (ibfd)); */
18660 in_flags
= elf_elfheader (ibfd
)->e_flags
;
18661 out_flags
= elf_elfheader (obfd
)->e_flags
;
18663 /* In theory there is no reason why we couldn't handle this. However
18664 in practice it isn't even close to working and there is no real
18665 reason to want it. */
18666 if (EF_ARM_EABI_VERSION (in_flags
) >= EF_ARM_EABI_VER4
18667 && !(ibfd
->flags
& DYNAMIC
)
18668 && (in_flags
& EF_ARM_BE8
))
18670 _bfd_error_handler (_("error: %B is already in final BE8 format"),
18675 if (!elf_flags_init (obfd
))
18677 /* If the input is the default architecture and had the default
18678 flags then do not bother setting the flags for the output
18679 architecture, instead allow future merges to do this. If no
18680 future merges ever set these flags then they will retain their
18681 uninitialised values, which surprise surprise, correspond
18682 to the default values. */
18683 if (bfd_get_arch_info (ibfd
)->the_default
18684 && elf_elfheader (ibfd
)->e_flags
== 0)
18687 elf_flags_init (obfd
) = TRUE
;
18688 elf_elfheader (obfd
)->e_flags
= in_flags
;
18690 if (bfd_get_arch (obfd
) == bfd_get_arch (ibfd
)
18691 && bfd_get_arch_info (obfd
)->the_default
)
18692 return bfd_set_arch_mach (obfd
, bfd_get_arch (ibfd
), bfd_get_mach (ibfd
));
18697 /* Determine what should happen if the input ARM architecture
18698 does not match the output ARM architecture. */
18699 if (! bfd_arm_merge_machines (ibfd
, obfd
))
18702 /* Identical flags must be compatible. */
18703 if (in_flags
== out_flags
)
18706 /* Check to see if the input BFD actually contains any sections. If
18707 not, its flags may not have been initialised either, but it
18708 cannot actually cause any incompatiblity. Do not short-circuit
18709 dynamic objects; their section list may be emptied by
18710 elf_link_add_object_symbols.
18712 Also check to see if there are no code sections in the input.
18713 In this case there is no need to check for code specific flags.
18714 XXX - do we need to worry about floating-point format compatability
18715 in data sections ? */
18716 if (!(ibfd
->flags
& DYNAMIC
))
18718 bfd_boolean null_input_bfd
= TRUE
;
18719 bfd_boolean only_data_sections
= TRUE
;
18721 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
18723 /* Ignore synthetic glue sections. */
18724 if (strcmp (sec
->name
, ".glue_7")
18725 && strcmp (sec
->name
, ".glue_7t"))
18727 if ((bfd_get_section_flags (ibfd
, sec
)
18728 & (SEC_LOAD
| SEC_CODE
| SEC_HAS_CONTENTS
))
18729 == (SEC_LOAD
| SEC_CODE
| SEC_HAS_CONTENTS
))
18730 only_data_sections
= FALSE
;
18732 null_input_bfd
= FALSE
;
18737 if (null_input_bfd
|| only_data_sections
)
18741 /* Complain about various flag mismatches. */
18742 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags
),
18743 EF_ARM_EABI_VERSION (out_flags
)))
18746 (_("error: Source object %B has EABI version %d, but target %B has EABI version %d"),
18748 (in_flags
& EF_ARM_EABIMASK
) >> 24,
18749 (out_flags
& EF_ARM_EABIMASK
) >> 24);
18753 /* Not sure what needs to be checked for EABI versions >= 1. */
18754 /* VxWorks libraries do not use these flags. */
18755 if (get_elf_backend_data (obfd
) != &elf32_arm_vxworks_bed
18756 && get_elf_backend_data (ibfd
) != &elf32_arm_vxworks_bed
18757 && EF_ARM_EABI_VERSION (in_flags
) == EF_ARM_EABI_UNKNOWN
)
18759 if ((in_flags
& EF_ARM_APCS_26
) != (out_flags
& EF_ARM_APCS_26
))
18762 (_("error: %B is compiled for APCS-%d, whereas target %B uses APCS-%d"),
18764 in_flags
& EF_ARM_APCS_26
? 26 : 32,
18765 out_flags
& EF_ARM_APCS_26
? 26 : 32);
18766 flags_compatible
= FALSE
;
18769 if ((in_flags
& EF_ARM_APCS_FLOAT
) != (out_flags
& EF_ARM_APCS_FLOAT
))
18771 if (in_flags
& EF_ARM_APCS_FLOAT
)
18773 (_("error: %B passes floats in float registers, whereas %B passes them in integer registers"),
18777 (_("error: %B passes floats in integer registers, whereas %B passes them in float registers"),
18780 flags_compatible
= FALSE
;
18783 if ((in_flags
& EF_ARM_VFP_FLOAT
) != (out_flags
& EF_ARM_VFP_FLOAT
))
18785 if (in_flags
& EF_ARM_VFP_FLOAT
)
18787 (_("error: %B uses VFP instructions, whereas %B does not"),
18791 (_("error: %B uses FPA instructions, whereas %B does not"),
18794 flags_compatible
= FALSE
;
18797 if ((in_flags
& EF_ARM_MAVERICK_FLOAT
) != (out_flags
& EF_ARM_MAVERICK_FLOAT
))
18799 if (in_flags
& EF_ARM_MAVERICK_FLOAT
)
18801 (_("error: %B uses Maverick instructions, whereas %B does not"),
18805 (_("error: %B does not use Maverick instructions, whereas %B does"),
18808 flags_compatible
= FALSE
;
18811 #ifdef EF_ARM_SOFT_FLOAT
18812 if ((in_flags
& EF_ARM_SOFT_FLOAT
) != (out_flags
& EF_ARM_SOFT_FLOAT
))
18814 /* We can allow interworking between code that is VFP format
18815 layout, and uses either soft float or integer regs for
18816 passing floating point arguments and results. We already
18817 know that the APCS_FLOAT flags match; similarly for VFP
18819 if ((in_flags
& EF_ARM_APCS_FLOAT
) != 0
18820 || (in_flags
& EF_ARM_VFP_FLOAT
) == 0)
18822 if (in_flags
& EF_ARM_SOFT_FLOAT
)
18824 (_("error: %B uses software FP, whereas %B uses hardware FP"),
18828 (_("error: %B uses hardware FP, whereas %B uses software FP"),
18831 flags_compatible
= FALSE
;
18836 /* Interworking mismatch is only a warning. */
18837 if ((in_flags
& EF_ARM_INTERWORK
) != (out_flags
& EF_ARM_INTERWORK
))
18839 if (in_flags
& EF_ARM_INTERWORK
)
18842 (_("Warning: %B supports interworking, whereas %B does not"),
18848 (_("Warning: %B does not support interworking, whereas %B does"),
18854 return flags_compatible
;
18858 /* Symbian OS Targets. */
18860 #undef TARGET_LITTLE_SYM
18861 #define TARGET_LITTLE_SYM arm_elf32_symbian_le_vec
18862 #undef TARGET_LITTLE_NAME
18863 #define TARGET_LITTLE_NAME "elf32-littlearm-symbian"
18864 #undef TARGET_BIG_SYM
18865 #define TARGET_BIG_SYM arm_elf32_symbian_be_vec
18866 #undef TARGET_BIG_NAME
18867 #define TARGET_BIG_NAME "elf32-bigarm-symbian"
18869 /* Like elf32_arm_link_hash_table_create -- but overrides
18870 appropriately for Symbian OS. */
18872 static struct bfd_link_hash_table
*
18873 elf32_arm_symbian_link_hash_table_create (bfd
*abfd
)
18875 struct bfd_link_hash_table
*ret
;
18877 ret
= elf32_arm_link_hash_table_create (abfd
);
18880 struct elf32_arm_link_hash_table
*htab
18881 = (struct elf32_arm_link_hash_table
*)ret
;
18882 /* There is no PLT header for Symbian OS. */
18883 htab
->plt_header_size
= 0;
18884 /* The PLT entries are each one instruction and one word. */
18885 htab
->plt_entry_size
= 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry
);
18886 htab
->symbian_p
= 1;
18887 /* Symbian uses armv5t or above, so use_blx is always true. */
18889 htab
->root
.is_relocatable_executable
= 1;
18894 static const struct bfd_elf_special_section
18895 elf32_arm_symbian_special_sections
[] =
18897 /* In a BPABI executable, the dynamic linking sections do not go in
18898 the loadable read-only segment. The post-linker may wish to
18899 refer to these sections, but they are not part of the final
18901 { STRING_COMMA_LEN (".dynamic"), 0, SHT_DYNAMIC
, 0 },
18902 { STRING_COMMA_LEN (".dynstr"), 0, SHT_STRTAB
, 0 },
18903 { STRING_COMMA_LEN (".dynsym"), 0, SHT_DYNSYM
, 0 },
18904 { STRING_COMMA_LEN (".got"), 0, SHT_PROGBITS
, 0 },
18905 { STRING_COMMA_LEN (".hash"), 0, SHT_HASH
, 0 },
18906 /* These sections do not need to be writable as the SymbianOS
18907 postlinker will arrange things so that no dynamic relocation is
18909 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY
, SHF_ALLOC
},
18910 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY
, SHF_ALLOC
},
18911 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY
, SHF_ALLOC
},
18912 { NULL
, 0, 0, 0, 0 }
18916 elf32_arm_symbian_begin_write_processing (bfd
*abfd
,
18917 struct bfd_link_info
*link_info
)
18919 /* BPABI objects are never loaded directly by an OS kernel; they are
18920 processed by a postlinker first, into an OS-specific format. If
18921 the D_PAGED bit is set on the file, BFD will align segments on
18922 page boundaries, so that an OS can directly map the file. With
18923 BPABI objects, that just results in wasted space. In addition,
18924 because we clear the D_PAGED bit, map_sections_to_segments will
18925 recognize that the program headers should not be mapped into any
18926 loadable segment. */
18927 abfd
->flags
&= ~D_PAGED
;
18928 elf32_arm_begin_write_processing (abfd
, link_info
);
18932 elf32_arm_symbian_modify_segment_map (bfd
*abfd
,
18933 struct bfd_link_info
*info
)
18935 struct elf_segment_map
*m
;
18938 /* BPABI shared libraries and executables should have a PT_DYNAMIC
18939 segment. However, because the .dynamic section is not marked
18940 with SEC_LOAD, the generic ELF code will not create such a
18942 dynsec
= bfd_get_section_by_name (abfd
, ".dynamic");
18945 for (m
= elf_seg_map (abfd
); m
!= NULL
; m
= m
->next
)
18946 if (m
->p_type
== PT_DYNAMIC
)
18951 m
= _bfd_elf_make_dynamic_segment (abfd
, dynsec
);
18952 m
->next
= elf_seg_map (abfd
);
18953 elf_seg_map (abfd
) = m
;
18957 /* Also call the generic arm routine. */
18958 return elf32_arm_modify_segment_map (abfd
, info
);
18961 /* Return address for Ith PLT stub in section PLT, for relocation REL
18962 or (bfd_vma) -1 if it should not be included. */
18965 elf32_arm_symbian_plt_sym_val (bfd_vma i
, const asection
*plt
,
18966 const arelent
*rel ATTRIBUTE_UNUSED
)
18968 return plt
->vma
+ 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry
) * i
;
18972 #define elf32_bed elf32_arm_symbian_bed
18974 /* The dynamic sections are not allocated on SymbianOS; the postlinker
18975 will process them and then discard them. */
18976 #undef ELF_DYNAMIC_SEC_FLAGS
18977 #define ELF_DYNAMIC_SEC_FLAGS \
18978 (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
18980 #undef elf_backend_emit_relocs
18982 #undef bfd_elf32_bfd_link_hash_table_create
18983 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_symbian_link_hash_table_create
18984 #undef elf_backend_special_sections
18985 #define elf_backend_special_sections elf32_arm_symbian_special_sections
18986 #undef elf_backend_begin_write_processing
18987 #define elf_backend_begin_write_processing elf32_arm_symbian_begin_write_processing
18988 #undef elf_backend_final_write_processing
18989 #define elf_backend_final_write_processing elf32_arm_final_write_processing
18991 #undef elf_backend_modify_segment_map
18992 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map
18994 /* There is no .got section for BPABI objects, and hence no header. */
18995 #undef elf_backend_got_header_size
18996 #define elf_backend_got_header_size 0
18998 /* Similarly, there is no .got.plt section. */
18999 #undef elf_backend_want_got_plt
19000 #define elf_backend_want_got_plt 0
19002 #undef elf_backend_plt_sym_val
19003 #define elf_backend_plt_sym_val elf32_arm_symbian_plt_sym_val
19005 #undef elf_backend_may_use_rel_p
19006 #define elf_backend_may_use_rel_p 1
19007 #undef elf_backend_may_use_rela_p
19008 #define elf_backend_may_use_rela_p 0
19009 #undef elf_backend_default_use_rela_p
19010 #define elf_backend_default_use_rela_p 0
19011 #undef elf_backend_want_plt_sym
19012 #define elf_backend_want_plt_sym 0
19013 #undef ELF_MAXPAGESIZE
19014 #define ELF_MAXPAGESIZE 0x8000
19016 #include "elf32-target.h"