1 /* 32-bit ELF support for ARM
2 Copyright (C) 1998-2018 Free Software Foundation, Inc.
4 This file is part of BFD, the Binary File Descriptor library.
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
19 MA 02110-1301, USA. */
25 #include "bfd_stdint.h"
26 #include "libiberty.h"
30 #include "elf-vxworks.h"
33 /* Return the relocation section associated with NAME. HTAB is the
34 bfd's elf32_arm_link_hash_entry. */
35 #define RELOC_SECTION(HTAB, NAME) \
36 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
38 /* Return size of a relocation entry. HTAB is the bfd's
39 elf32_arm_link_hash_entry. */
40 #define RELOC_SIZE(HTAB) \
42 ? sizeof (Elf32_External_Rel) \
43 : sizeof (Elf32_External_Rela))
45 /* Return function to swap relocations in. HTAB is the bfd's
46 elf32_arm_link_hash_entry. */
47 #define SWAP_RELOC_IN(HTAB) \
49 ? bfd_elf32_swap_reloc_in \
50 : bfd_elf32_swap_reloca_in)
52 /* Return function to swap relocations out. HTAB is the bfd's
53 elf32_arm_link_hash_entry. */
54 #define SWAP_RELOC_OUT(HTAB) \
56 ? bfd_elf32_swap_reloc_out \
57 : bfd_elf32_swap_reloca_out)
59 #define elf_info_to_howto NULL
60 #define elf_info_to_howto_rel elf32_arm_info_to_howto
62 #define ARM_ELF_ABI_VERSION 0
63 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
65 /* The Adjusted Place, as defined by AAELF. */
66 #define Pa(X) ((X) & 0xfffffffc)
68 static bfd_boolean
elf32_arm_write_section (bfd
*output_bfd
,
69 struct bfd_link_info
*link_info
,
73 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
74 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
77 static reloc_howto_type elf32_arm_howto_table_1
[] =
80 HOWTO (R_ARM_NONE
, /* type */
82 3, /* size (0 = byte, 1 = short, 2 = long) */
84 FALSE
, /* pc_relative */
86 complain_overflow_dont
,/* complain_on_overflow */
87 bfd_elf_generic_reloc
, /* special_function */
88 "R_ARM_NONE", /* name */
89 FALSE
, /* partial_inplace */
92 FALSE
), /* pcrel_offset */
94 HOWTO (R_ARM_PC24
, /* type */
96 2, /* size (0 = byte, 1 = short, 2 = long) */
98 TRUE
, /* pc_relative */
100 complain_overflow_signed
,/* complain_on_overflow */
101 bfd_elf_generic_reloc
, /* special_function */
102 "R_ARM_PC24", /* name */
103 FALSE
, /* partial_inplace */
104 0x00ffffff, /* src_mask */
105 0x00ffffff, /* dst_mask */
106 TRUE
), /* pcrel_offset */
108 /* 32 bit absolute */
109 HOWTO (R_ARM_ABS32
, /* type */
111 2, /* size (0 = byte, 1 = short, 2 = long) */
113 FALSE
, /* pc_relative */
115 complain_overflow_bitfield
,/* complain_on_overflow */
116 bfd_elf_generic_reloc
, /* special_function */
117 "R_ARM_ABS32", /* name */
118 FALSE
, /* partial_inplace */
119 0xffffffff, /* src_mask */
120 0xffffffff, /* dst_mask */
121 FALSE
), /* pcrel_offset */
123 /* standard 32bit pc-relative reloc */
124 HOWTO (R_ARM_REL32
, /* type */
126 2, /* size (0 = byte, 1 = short, 2 = long) */
128 TRUE
, /* pc_relative */
130 complain_overflow_bitfield
,/* complain_on_overflow */
131 bfd_elf_generic_reloc
, /* special_function */
132 "R_ARM_REL32", /* name */
133 FALSE
, /* partial_inplace */
134 0xffffffff, /* src_mask */
135 0xffffffff, /* dst_mask */
136 TRUE
), /* pcrel_offset */
138 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
139 HOWTO (R_ARM_LDR_PC_G0
, /* type */
141 0, /* size (0 = byte, 1 = short, 2 = long) */
143 TRUE
, /* pc_relative */
145 complain_overflow_dont
,/* complain_on_overflow */
146 bfd_elf_generic_reloc
, /* special_function */
147 "R_ARM_LDR_PC_G0", /* name */
148 FALSE
, /* partial_inplace */
149 0xffffffff, /* src_mask */
150 0xffffffff, /* dst_mask */
151 TRUE
), /* pcrel_offset */
153 /* 16 bit absolute */
154 HOWTO (R_ARM_ABS16
, /* type */
156 1, /* size (0 = byte, 1 = short, 2 = long) */
158 FALSE
, /* pc_relative */
160 complain_overflow_bitfield
,/* complain_on_overflow */
161 bfd_elf_generic_reloc
, /* special_function */
162 "R_ARM_ABS16", /* name */
163 FALSE
, /* partial_inplace */
164 0x0000ffff, /* src_mask */
165 0x0000ffff, /* dst_mask */
166 FALSE
), /* pcrel_offset */
168 /* 12 bit absolute */
169 HOWTO (R_ARM_ABS12
, /* type */
171 2, /* size (0 = byte, 1 = short, 2 = long) */
173 FALSE
, /* pc_relative */
175 complain_overflow_bitfield
,/* complain_on_overflow */
176 bfd_elf_generic_reloc
, /* special_function */
177 "R_ARM_ABS12", /* name */
178 FALSE
, /* partial_inplace */
179 0x00000fff, /* src_mask */
180 0x00000fff, /* dst_mask */
181 FALSE
), /* pcrel_offset */
183 HOWTO (R_ARM_THM_ABS5
, /* type */
185 1, /* size (0 = byte, 1 = short, 2 = long) */
187 FALSE
, /* pc_relative */
189 complain_overflow_bitfield
,/* complain_on_overflow */
190 bfd_elf_generic_reloc
, /* special_function */
191 "R_ARM_THM_ABS5", /* name */
192 FALSE
, /* partial_inplace */
193 0x000007e0, /* src_mask */
194 0x000007e0, /* dst_mask */
195 FALSE
), /* pcrel_offset */
198 HOWTO (R_ARM_ABS8
, /* type */
200 0, /* size (0 = byte, 1 = short, 2 = long) */
202 FALSE
, /* pc_relative */
204 complain_overflow_bitfield
,/* complain_on_overflow */
205 bfd_elf_generic_reloc
, /* special_function */
206 "R_ARM_ABS8", /* name */
207 FALSE
, /* partial_inplace */
208 0x000000ff, /* src_mask */
209 0x000000ff, /* dst_mask */
210 FALSE
), /* pcrel_offset */
212 HOWTO (R_ARM_SBREL32
, /* type */
214 2, /* size (0 = byte, 1 = short, 2 = long) */
216 FALSE
, /* pc_relative */
218 complain_overflow_dont
,/* complain_on_overflow */
219 bfd_elf_generic_reloc
, /* special_function */
220 "R_ARM_SBREL32", /* name */
221 FALSE
, /* partial_inplace */
222 0xffffffff, /* src_mask */
223 0xffffffff, /* dst_mask */
224 FALSE
), /* pcrel_offset */
226 HOWTO (R_ARM_THM_CALL
, /* type */
228 2, /* size (0 = byte, 1 = short, 2 = long) */
230 TRUE
, /* pc_relative */
232 complain_overflow_signed
,/* complain_on_overflow */
233 bfd_elf_generic_reloc
, /* special_function */
234 "R_ARM_THM_CALL", /* name */
235 FALSE
, /* partial_inplace */
236 0x07ff2fff, /* src_mask */
237 0x07ff2fff, /* dst_mask */
238 TRUE
), /* pcrel_offset */
240 HOWTO (R_ARM_THM_PC8
, /* type */
242 1, /* size (0 = byte, 1 = short, 2 = long) */
244 TRUE
, /* pc_relative */
246 complain_overflow_signed
,/* complain_on_overflow */
247 bfd_elf_generic_reloc
, /* special_function */
248 "R_ARM_THM_PC8", /* name */
249 FALSE
, /* partial_inplace */
250 0x000000ff, /* src_mask */
251 0x000000ff, /* dst_mask */
252 TRUE
), /* pcrel_offset */
254 HOWTO (R_ARM_BREL_ADJ
, /* type */
256 1, /* size (0 = byte, 1 = short, 2 = long) */
258 FALSE
, /* pc_relative */
260 complain_overflow_signed
,/* complain_on_overflow */
261 bfd_elf_generic_reloc
, /* special_function */
262 "R_ARM_BREL_ADJ", /* name */
263 FALSE
, /* partial_inplace */
264 0xffffffff, /* src_mask */
265 0xffffffff, /* dst_mask */
266 FALSE
), /* pcrel_offset */
268 HOWTO (R_ARM_TLS_DESC
, /* type */
270 2, /* size (0 = byte, 1 = short, 2 = long) */
272 FALSE
, /* pc_relative */
274 complain_overflow_bitfield
,/* complain_on_overflow */
275 bfd_elf_generic_reloc
, /* special_function */
276 "R_ARM_TLS_DESC", /* name */
277 FALSE
, /* partial_inplace */
278 0xffffffff, /* src_mask */
279 0xffffffff, /* dst_mask */
280 FALSE
), /* pcrel_offset */
282 HOWTO (R_ARM_THM_SWI8
, /* type */
284 0, /* size (0 = byte, 1 = short, 2 = long) */
286 FALSE
, /* pc_relative */
288 complain_overflow_signed
,/* complain_on_overflow */
289 bfd_elf_generic_reloc
, /* special_function */
290 "R_ARM_SWI8", /* name */
291 FALSE
, /* partial_inplace */
292 0x00000000, /* src_mask */
293 0x00000000, /* dst_mask */
294 FALSE
), /* pcrel_offset */
296 /* BLX instruction for the ARM. */
297 HOWTO (R_ARM_XPC25
, /* type */
299 2, /* size (0 = byte, 1 = short, 2 = long) */
301 TRUE
, /* pc_relative */
303 complain_overflow_signed
,/* complain_on_overflow */
304 bfd_elf_generic_reloc
, /* special_function */
305 "R_ARM_XPC25", /* name */
306 FALSE
, /* partial_inplace */
307 0x00ffffff, /* src_mask */
308 0x00ffffff, /* dst_mask */
309 TRUE
), /* pcrel_offset */
311 /* BLX instruction for the Thumb. */
312 HOWTO (R_ARM_THM_XPC22
, /* type */
314 2, /* size (0 = byte, 1 = short, 2 = long) */
316 TRUE
, /* pc_relative */
318 complain_overflow_signed
,/* complain_on_overflow */
319 bfd_elf_generic_reloc
, /* special_function */
320 "R_ARM_THM_XPC22", /* name */
321 FALSE
, /* partial_inplace */
322 0x07ff2fff, /* src_mask */
323 0x07ff2fff, /* dst_mask */
324 TRUE
), /* pcrel_offset */
326 /* Dynamic TLS relocations. */
328 HOWTO (R_ARM_TLS_DTPMOD32
, /* type */
330 2, /* size (0 = byte, 1 = short, 2 = long) */
332 FALSE
, /* pc_relative */
334 complain_overflow_bitfield
,/* complain_on_overflow */
335 bfd_elf_generic_reloc
, /* special_function */
336 "R_ARM_TLS_DTPMOD32", /* name */
337 TRUE
, /* partial_inplace */
338 0xffffffff, /* src_mask */
339 0xffffffff, /* dst_mask */
340 FALSE
), /* pcrel_offset */
342 HOWTO (R_ARM_TLS_DTPOFF32
, /* type */
344 2, /* size (0 = byte, 1 = short, 2 = long) */
346 FALSE
, /* pc_relative */
348 complain_overflow_bitfield
,/* complain_on_overflow */
349 bfd_elf_generic_reloc
, /* special_function */
350 "R_ARM_TLS_DTPOFF32", /* name */
351 TRUE
, /* partial_inplace */
352 0xffffffff, /* src_mask */
353 0xffffffff, /* dst_mask */
354 FALSE
), /* pcrel_offset */
356 HOWTO (R_ARM_TLS_TPOFF32
, /* type */
358 2, /* size (0 = byte, 1 = short, 2 = long) */
360 FALSE
, /* pc_relative */
362 complain_overflow_bitfield
,/* complain_on_overflow */
363 bfd_elf_generic_reloc
, /* special_function */
364 "R_ARM_TLS_TPOFF32", /* name */
365 TRUE
, /* partial_inplace */
366 0xffffffff, /* src_mask */
367 0xffffffff, /* dst_mask */
368 FALSE
), /* pcrel_offset */
370 /* Relocs used in ARM Linux */
372 HOWTO (R_ARM_COPY
, /* type */
374 2, /* size (0 = byte, 1 = short, 2 = long) */
376 FALSE
, /* pc_relative */
378 complain_overflow_bitfield
,/* complain_on_overflow */
379 bfd_elf_generic_reloc
, /* special_function */
380 "R_ARM_COPY", /* name */
381 TRUE
, /* partial_inplace */
382 0xffffffff, /* src_mask */
383 0xffffffff, /* dst_mask */
384 FALSE
), /* pcrel_offset */
386 HOWTO (R_ARM_GLOB_DAT
, /* type */
388 2, /* size (0 = byte, 1 = short, 2 = long) */
390 FALSE
, /* pc_relative */
392 complain_overflow_bitfield
,/* complain_on_overflow */
393 bfd_elf_generic_reloc
, /* special_function */
394 "R_ARM_GLOB_DAT", /* name */
395 TRUE
, /* partial_inplace */
396 0xffffffff, /* src_mask */
397 0xffffffff, /* dst_mask */
398 FALSE
), /* pcrel_offset */
400 HOWTO (R_ARM_JUMP_SLOT
, /* type */
402 2, /* size (0 = byte, 1 = short, 2 = long) */
404 FALSE
, /* pc_relative */
406 complain_overflow_bitfield
,/* complain_on_overflow */
407 bfd_elf_generic_reloc
, /* special_function */
408 "R_ARM_JUMP_SLOT", /* name */
409 TRUE
, /* partial_inplace */
410 0xffffffff, /* src_mask */
411 0xffffffff, /* dst_mask */
412 FALSE
), /* pcrel_offset */
414 HOWTO (R_ARM_RELATIVE
, /* type */
416 2, /* size (0 = byte, 1 = short, 2 = long) */
418 FALSE
, /* pc_relative */
420 complain_overflow_bitfield
,/* complain_on_overflow */
421 bfd_elf_generic_reloc
, /* special_function */
422 "R_ARM_RELATIVE", /* name */
423 TRUE
, /* partial_inplace */
424 0xffffffff, /* src_mask */
425 0xffffffff, /* dst_mask */
426 FALSE
), /* pcrel_offset */
428 HOWTO (R_ARM_GOTOFF32
, /* type */
430 2, /* size (0 = byte, 1 = short, 2 = long) */
432 FALSE
, /* pc_relative */
434 complain_overflow_bitfield
,/* complain_on_overflow */
435 bfd_elf_generic_reloc
, /* special_function */
436 "R_ARM_GOTOFF32", /* name */
437 TRUE
, /* partial_inplace */
438 0xffffffff, /* src_mask */
439 0xffffffff, /* dst_mask */
440 FALSE
), /* pcrel_offset */
442 HOWTO (R_ARM_GOTPC
, /* type */
444 2, /* size (0 = byte, 1 = short, 2 = long) */
446 TRUE
, /* pc_relative */
448 complain_overflow_bitfield
,/* complain_on_overflow */
449 bfd_elf_generic_reloc
, /* special_function */
450 "R_ARM_GOTPC", /* name */
451 TRUE
, /* partial_inplace */
452 0xffffffff, /* src_mask */
453 0xffffffff, /* dst_mask */
454 TRUE
), /* pcrel_offset */
456 HOWTO (R_ARM_GOT32
, /* type */
458 2, /* size (0 = byte, 1 = short, 2 = long) */
460 FALSE
, /* pc_relative */
462 complain_overflow_bitfield
,/* complain_on_overflow */
463 bfd_elf_generic_reloc
, /* special_function */
464 "R_ARM_GOT32", /* name */
465 TRUE
, /* partial_inplace */
466 0xffffffff, /* src_mask */
467 0xffffffff, /* dst_mask */
468 FALSE
), /* pcrel_offset */
470 HOWTO (R_ARM_PLT32
, /* type */
472 2, /* size (0 = byte, 1 = short, 2 = long) */
474 TRUE
, /* pc_relative */
476 complain_overflow_bitfield
,/* complain_on_overflow */
477 bfd_elf_generic_reloc
, /* special_function */
478 "R_ARM_PLT32", /* name */
479 FALSE
, /* partial_inplace */
480 0x00ffffff, /* src_mask */
481 0x00ffffff, /* dst_mask */
482 TRUE
), /* pcrel_offset */
484 HOWTO (R_ARM_CALL
, /* type */
486 2, /* size (0 = byte, 1 = short, 2 = long) */
488 TRUE
, /* pc_relative */
490 complain_overflow_signed
,/* complain_on_overflow */
491 bfd_elf_generic_reloc
, /* special_function */
492 "R_ARM_CALL", /* name */
493 FALSE
, /* partial_inplace */
494 0x00ffffff, /* src_mask */
495 0x00ffffff, /* dst_mask */
496 TRUE
), /* pcrel_offset */
498 HOWTO (R_ARM_JUMP24
, /* type */
500 2, /* size (0 = byte, 1 = short, 2 = long) */
502 TRUE
, /* pc_relative */
504 complain_overflow_signed
,/* complain_on_overflow */
505 bfd_elf_generic_reloc
, /* special_function */
506 "R_ARM_JUMP24", /* name */
507 FALSE
, /* partial_inplace */
508 0x00ffffff, /* src_mask */
509 0x00ffffff, /* dst_mask */
510 TRUE
), /* pcrel_offset */
512 HOWTO (R_ARM_THM_JUMP24
, /* type */
514 2, /* size (0 = byte, 1 = short, 2 = long) */
516 TRUE
, /* pc_relative */
518 complain_overflow_signed
,/* complain_on_overflow */
519 bfd_elf_generic_reloc
, /* special_function */
520 "R_ARM_THM_JUMP24", /* name */
521 FALSE
, /* partial_inplace */
522 0x07ff2fff, /* src_mask */
523 0x07ff2fff, /* dst_mask */
524 TRUE
), /* pcrel_offset */
526 HOWTO (R_ARM_BASE_ABS
, /* type */
528 2, /* size (0 = byte, 1 = short, 2 = long) */
530 FALSE
, /* pc_relative */
532 complain_overflow_dont
,/* complain_on_overflow */
533 bfd_elf_generic_reloc
, /* special_function */
534 "R_ARM_BASE_ABS", /* name */
535 FALSE
, /* partial_inplace */
536 0xffffffff, /* src_mask */
537 0xffffffff, /* dst_mask */
538 FALSE
), /* pcrel_offset */
540 HOWTO (R_ARM_ALU_PCREL7_0
, /* type */
542 2, /* size (0 = byte, 1 = short, 2 = long) */
544 TRUE
, /* pc_relative */
546 complain_overflow_dont
,/* complain_on_overflow */
547 bfd_elf_generic_reloc
, /* special_function */
548 "R_ARM_ALU_PCREL_7_0", /* name */
549 FALSE
, /* partial_inplace */
550 0x00000fff, /* src_mask */
551 0x00000fff, /* dst_mask */
552 TRUE
), /* pcrel_offset */
554 HOWTO (R_ARM_ALU_PCREL15_8
, /* type */
556 2, /* size (0 = byte, 1 = short, 2 = long) */
558 TRUE
, /* pc_relative */
560 complain_overflow_dont
,/* complain_on_overflow */
561 bfd_elf_generic_reloc
, /* special_function */
562 "R_ARM_ALU_PCREL_15_8",/* name */
563 FALSE
, /* partial_inplace */
564 0x00000fff, /* src_mask */
565 0x00000fff, /* dst_mask */
566 TRUE
), /* pcrel_offset */
568 HOWTO (R_ARM_ALU_PCREL23_15
, /* type */
570 2, /* size (0 = byte, 1 = short, 2 = long) */
572 TRUE
, /* pc_relative */
574 complain_overflow_dont
,/* complain_on_overflow */
575 bfd_elf_generic_reloc
, /* special_function */
576 "R_ARM_ALU_PCREL_23_15",/* name */
577 FALSE
, /* partial_inplace */
578 0x00000fff, /* src_mask */
579 0x00000fff, /* dst_mask */
580 TRUE
), /* pcrel_offset */
582 HOWTO (R_ARM_LDR_SBREL_11_0
, /* type */
584 2, /* size (0 = byte, 1 = short, 2 = long) */
586 FALSE
, /* pc_relative */
588 complain_overflow_dont
,/* complain_on_overflow */
589 bfd_elf_generic_reloc
, /* special_function */
590 "R_ARM_LDR_SBREL_11_0",/* name */
591 FALSE
, /* partial_inplace */
592 0x00000fff, /* src_mask */
593 0x00000fff, /* dst_mask */
594 FALSE
), /* pcrel_offset */
596 HOWTO (R_ARM_ALU_SBREL_19_12
, /* type */
598 2, /* size (0 = byte, 1 = short, 2 = long) */
600 FALSE
, /* pc_relative */
602 complain_overflow_dont
,/* complain_on_overflow */
603 bfd_elf_generic_reloc
, /* special_function */
604 "R_ARM_ALU_SBREL_19_12",/* name */
605 FALSE
, /* partial_inplace */
606 0x000ff000, /* src_mask */
607 0x000ff000, /* dst_mask */
608 FALSE
), /* pcrel_offset */
610 HOWTO (R_ARM_ALU_SBREL_27_20
, /* type */
612 2, /* size (0 = byte, 1 = short, 2 = long) */
614 FALSE
, /* pc_relative */
616 complain_overflow_dont
,/* complain_on_overflow */
617 bfd_elf_generic_reloc
, /* special_function */
618 "R_ARM_ALU_SBREL_27_20",/* name */
619 FALSE
, /* partial_inplace */
620 0x0ff00000, /* src_mask */
621 0x0ff00000, /* dst_mask */
622 FALSE
), /* pcrel_offset */
624 HOWTO (R_ARM_TARGET1
, /* type */
626 2, /* size (0 = byte, 1 = short, 2 = long) */
628 FALSE
, /* pc_relative */
630 complain_overflow_dont
,/* complain_on_overflow */
631 bfd_elf_generic_reloc
, /* special_function */
632 "R_ARM_TARGET1", /* name */
633 FALSE
, /* partial_inplace */
634 0xffffffff, /* src_mask */
635 0xffffffff, /* dst_mask */
636 FALSE
), /* pcrel_offset */
638 HOWTO (R_ARM_ROSEGREL32
, /* type */
640 2, /* size (0 = byte, 1 = short, 2 = long) */
642 FALSE
, /* pc_relative */
644 complain_overflow_dont
,/* complain_on_overflow */
645 bfd_elf_generic_reloc
, /* special_function */
646 "R_ARM_ROSEGREL32", /* name */
647 FALSE
, /* partial_inplace */
648 0xffffffff, /* src_mask */
649 0xffffffff, /* dst_mask */
650 FALSE
), /* pcrel_offset */
652 HOWTO (R_ARM_V4BX
, /* type */
654 2, /* size (0 = byte, 1 = short, 2 = long) */
656 FALSE
, /* pc_relative */
658 complain_overflow_dont
,/* complain_on_overflow */
659 bfd_elf_generic_reloc
, /* special_function */
660 "R_ARM_V4BX", /* name */
661 FALSE
, /* partial_inplace */
662 0xffffffff, /* src_mask */
663 0xffffffff, /* dst_mask */
664 FALSE
), /* pcrel_offset */
666 HOWTO (R_ARM_TARGET2
, /* type */
668 2, /* size (0 = byte, 1 = short, 2 = long) */
670 FALSE
, /* pc_relative */
672 complain_overflow_signed
,/* complain_on_overflow */
673 bfd_elf_generic_reloc
, /* special_function */
674 "R_ARM_TARGET2", /* name */
675 FALSE
, /* partial_inplace */
676 0xffffffff, /* src_mask */
677 0xffffffff, /* dst_mask */
678 TRUE
), /* pcrel_offset */
680 HOWTO (R_ARM_PREL31
, /* type */
682 2, /* size (0 = byte, 1 = short, 2 = long) */
684 TRUE
, /* pc_relative */
686 complain_overflow_signed
,/* complain_on_overflow */
687 bfd_elf_generic_reloc
, /* special_function */
688 "R_ARM_PREL31", /* name */
689 FALSE
, /* partial_inplace */
690 0x7fffffff, /* src_mask */
691 0x7fffffff, /* dst_mask */
692 TRUE
), /* pcrel_offset */
694 HOWTO (R_ARM_MOVW_ABS_NC
, /* type */
696 2, /* size (0 = byte, 1 = short, 2 = long) */
698 FALSE
, /* pc_relative */
700 complain_overflow_dont
,/* complain_on_overflow */
701 bfd_elf_generic_reloc
, /* special_function */
702 "R_ARM_MOVW_ABS_NC", /* name */
703 FALSE
, /* partial_inplace */
704 0x000f0fff, /* src_mask */
705 0x000f0fff, /* dst_mask */
706 FALSE
), /* pcrel_offset */
708 HOWTO (R_ARM_MOVT_ABS
, /* type */
710 2, /* size (0 = byte, 1 = short, 2 = long) */
712 FALSE
, /* pc_relative */
714 complain_overflow_bitfield
,/* complain_on_overflow */
715 bfd_elf_generic_reloc
, /* special_function */
716 "R_ARM_MOVT_ABS", /* name */
717 FALSE
, /* partial_inplace */
718 0x000f0fff, /* src_mask */
719 0x000f0fff, /* dst_mask */
720 FALSE
), /* pcrel_offset */
722 HOWTO (R_ARM_MOVW_PREL_NC
, /* type */
724 2, /* size (0 = byte, 1 = short, 2 = long) */
726 TRUE
, /* pc_relative */
728 complain_overflow_dont
,/* complain_on_overflow */
729 bfd_elf_generic_reloc
, /* special_function */
730 "R_ARM_MOVW_PREL_NC", /* name */
731 FALSE
, /* partial_inplace */
732 0x000f0fff, /* src_mask */
733 0x000f0fff, /* dst_mask */
734 TRUE
), /* pcrel_offset */
736 HOWTO (R_ARM_MOVT_PREL
, /* type */
738 2, /* size (0 = byte, 1 = short, 2 = long) */
740 TRUE
, /* pc_relative */
742 complain_overflow_bitfield
,/* complain_on_overflow */
743 bfd_elf_generic_reloc
, /* special_function */
744 "R_ARM_MOVT_PREL", /* name */
745 FALSE
, /* partial_inplace */
746 0x000f0fff, /* src_mask */
747 0x000f0fff, /* dst_mask */
748 TRUE
), /* pcrel_offset */
750 HOWTO (R_ARM_THM_MOVW_ABS_NC
, /* type */
752 2, /* size (0 = byte, 1 = short, 2 = long) */
754 FALSE
, /* pc_relative */
756 complain_overflow_dont
,/* complain_on_overflow */
757 bfd_elf_generic_reloc
, /* special_function */
758 "R_ARM_THM_MOVW_ABS_NC",/* name */
759 FALSE
, /* partial_inplace */
760 0x040f70ff, /* src_mask */
761 0x040f70ff, /* dst_mask */
762 FALSE
), /* pcrel_offset */
764 HOWTO (R_ARM_THM_MOVT_ABS
, /* type */
766 2, /* size (0 = byte, 1 = short, 2 = long) */
768 FALSE
, /* pc_relative */
770 complain_overflow_bitfield
,/* complain_on_overflow */
771 bfd_elf_generic_reloc
, /* special_function */
772 "R_ARM_THM_MOVT_ABS", /* name */
773 FALSE
, /* partial_inplace */
774 0x040f70ff, /* src_mask */
775 0x040f70ff, /* dst_mask */
776 FALSE
), /* pcrel_offset */
778 HOWTO (R_ARM_THM_MOVW_PREL_NC
,/* type */
780 2, /* size (0 = byte, 1 = short, 2 = long) */
782 TRUE
, /* pc_relative */
784 complain_overflow_dont
,/* complain_on_overflow */
785 bfd_elf_generic_reloc
, /* special_function */
786 "R_ARM_THM_MOVW_PREL_NC",/* name */
787 FALSE
, /* partial_inplace */
788 0x040f70ff, /* src_mask */
789 0x040f70ff, /* dst_mask */
790 TRUE
), /* pcrel_offset */
792 HOWTO (R_ARM_THM_MOVT_PREL
, /* type */
794 2, /* size (0 = byte, 1 = short, 2 = long) */
796 TRUE
, /* pc_relative */
798 complain_overflow_bitfield
,/* complain_on_overflow */
799 bfd_elf_generic_reloc
, /* special_function */
800 "R_ARM_THM_MOVT_PREL", /* name */
801 FALSE
, /* partial_inplace */
802 0x040f70ff, /* src_mask */
803 0x040f70ff, /* dst_mask */
804 TRUE
), /* pcrel_offset */
806 HOWTO (R_ARM_THM_JUMP19
, /* type */
808 2, /* size (0 = byte, 1 = short, 2 = long) */
810 TRUE
, /* pc_relative */
812 complain_overflow_signed
,/* complain_on_overflow */
813 bfd_elf_generic_reloc
, /* special_function */
814 "R_ARM_THM_JUMP19", /* name */
815 FALSE
, /* partial_inplace */
816 0x043f2fff, /* src_mask */
817 0x043f2fff, /* dst_mask */
818 TRUE
), /* pcrel_offset */
820 HOWTO (R_ARM_THM_JUMP6
, /* type */
822 1, /* size (0 = byte, 1 = short, 2 = long) */
824 TRUE
, /* pc_relative */
826 complain_overflow_unsigned
,/* complain_on_overflow */
827 bfd_elf_generic_reloc
, /* special_function */
828 "R_ARM_THM_JUMP6", /* name */
829 FALSE
, /* partial_inplace */
830 0x02f8, /* src_mask */
831 0x02f8, /* dst_mask */
832 TRUE
), /* pcrel_offset */
834 /* These are declared as 13-bit signed relocations because we can
835 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
837 HOWTO (R_ARM_THM_ALU_PREL_11_0
,/* type */
839 2, /* size (0 = byte, 1 = short, 2 = long) */
841 TRUE
, /* pc_relative */
843 complain_overflow_dont
,/* complain_on_overflow */
844 bfd_elf_generic_reloc
, /* special_function */
845 "R_ARM_THM_ALU_PREL_11_0",/* name */
846 FALSE
, /* partial_inplace */
847 0xffffffff, /* src_mask */
848 0xffffffff, /* dst_mask */
849 TRUE
), /* pcrel_offset */
851 HOWTO (R_ARM_THM_PC12
, /* type */
853 2, /* size (0 = byte, 1 = short, 2 = long) */
855 TRUE
, /* pc_relative */
857 complain_overflow_dont
,/* complain_on_overflow */
858 bfd_elf_generic_reloc
, /* special_function */
859 "R_ARM_THM_PC12", /* name */
860 FALSE
, /* partial_inplace */
861 0xffffffff, /* src_mask */
862 0xffffffff, /* dst_mask */
863 TRUE
), /* pcrel_offset */
865 HOWTO (R_ARM_ABS32_NOI
, /* type */
867 2, /* size (0 = byte, 1 = short, 2 = long) */
869 FALSE
, /* pc_relative */
871 complain_overflow_dont
,/* complain_on_overflow */
872 bfd_elf_generic_reloc
, /* special_function */
873 "R_ARM_ABS32_NOI", /* name */
874 FALSE
, /* partial_inplace */
875 0xffffffff, /* src_mask */
876 0xffffffff, /* dst_mask */
877 FALSE
), /* pcrel_offset */
879 HOWTO (R_ARM_REL32_NOI
, /* type */
881 2, /* size (0 = byte, 1 = short, 2 = long) */
883 TRUE
, /* pc_relative */
885 complain_overflow_dont
,/* complain_on_overflow */
886 bfd_elf_generic_reloc
, /* special_function */
887 "R_ARM_REL32_NOI", /* name */
888 FALSE
, /* partial_inplace */
889 0xffffffff, /* src_mask */
890 0xffffffff, /* dst_mask */
891 FALSE
), /* pcrel_offset */
893 /* Group relocations. */
895 HOWTO (R_ARM_ALU_PC_G0_NC
, /* type */
897 2, /* size (0 = byte, 1 = short, 2 = long) */
899 TRUE
, /* pc_relative */
901 complain_overflow_dont
,/* complain_on_overflow */
902 bfd_elf_generic_reloc
, /* special_function */
903 "R_ARM_ALU_PC_G0_NC", /* name */
904 FALSE
, /* partial_inplace */
905 0xffffffff, /* src_mask */
906 0xffffffff, /* dst_mask */
907 TRUE
), /* pcrel_offset */
909 HOWTO (R_ARM_ALU_PC_G0
, /* type */
911 2, /* size (0 = byte, 1 = short, 2 = long) */
913 TRUE
, /* pc_relative */
915 complain_overflow_dont
,/* complain_on_overflow */
916 bfd_elf_generic_reloc
, /* special_function */
917 "R_ARM_ALU_PC_G0", /* name */
918 FALSE
, /* partial_inplace */
919 0xffffffff, /* src_mask */
920 0xffffffff, /* dst_mask */
921 TRUE
), /* pcrel_offset */
923 HOWTO (R_ARM_ALU_PC_G1_NC
, /* type */
925 2, /* size (0 = byte, 1 = short, 2 = long) */
927 TRUE
, /* pc_relative */
929 complain_overflow_dont
,/* complain_on_overflow */
930 bfd_elf_generic_reloc
, /* special_function */
931 "R_ARM_ALU_PC_G1_NC", /* name */
932 FALSE
, /* partial_inplace */
933 0xffffffff, /* src_mask */
934 0xffffffff, /* dst_mask */
935 TRUE
), /* pcrel_offset */
937 HOWTO (R_ARM_ALU_PC_G1
, /* type */
939 2, /* size (0 = byte, 1 = short, 2 = long) */
941 TRUE
, /* pc_relative */
943 complain_overflow_dont
,/* complain_on_overflow */
944 bfd_elf_generic_reloc
, /* special_function */
945 "R_ARM_ALU_PC_G1", /* name */
946 FALSE
, /* partial_inplace */
947 0xffffffff, /* src_mask */
948 0xffffffff, /* dst_mask */
949 TRUE
), /* pcrel_offset */
951 HOWTO (R_ARM_ALU_PC_G2
, /* type */
953 2, /* size (0 = byte, 1 = short, 2 = long) */
955 TRUE
, /* pc_relative */
957 complain_overflow_dont
,/* complain_on_overflow */
958 bfd_elf_generic_reloc
, /* special_function */
959 "R_ARM_ALU_PC_G2", /* name */
960 FALSE
, /* partial_inplace */
961 0xffffffff, /* src_mask */
962 0xffffffff, /* dst_mask */
963 TRUE
), /* pcrel_offset */
965 HOWTO (R_ARM_LDR_PC_G1
, /* type */
967 2, /* size (0 = byte, 1 = short, 2 = long) */
969 TRUE
, /* pc_relative */
971 complain_overflow_dont
,/* complain_on_overflow */
972 bfd_elf_generic_reloc
, /* special_function */
973 "R_ARM_LDR_PC_G1", /* name */
974 FALSE
, /* partial_inplace */
975 0xffffffff, /* src_mask */
976 0xffffffff, /* dst_mask */
977 TRUE
), /* pcrel_offset */
979 HOWTO (R_ARM_LDR_PC_G2
, /* type */
981 2, /* size (0 = byte, 1 = short, 2 = long) */
983 TRUE
, /* pc_relative */
985 complain_overflow_dont
,/* complain_on_overflow */
986 bfd_elf_generic_reloc
, /* special_function */
987 "R_ARM_LDR_PC_G2", /* name */
988 FALSE
, /* partial_inplace */
989 0xffffffff, /* src_mask */
990 0xffffffff, /* dst_mask */
991 TRUE
), /* pcrel_offset */
993 HOWTO (R_ARM_LDRS_PC_G0
, /* type */
995 2, /* size (0 = byte, 1 = short, 2 = long) */
997 TRUE
, /* pc_relative */
999 complain_overflow_dont
,/* complain_on_overflow */
1000 bfd_elf_generic_reloc
, /* special_function */
1001 "R_ARM_LDRS_PC_G0", /* name */
1002 FALSE
, /* partial_inplace */
1003 0xffffffff, /* src_mask */
1004 0xffffffff, /* dst_mask */
1005 TRUE
), /* pcrel_offset */
1007 HOWTO (R_ARM_LDRS_PC_G1
, /* type */
1009 2, /* size (0 = byte, 1 = short, 2 = long) */
1011 TRUE
, /* pc_relative */
1013 complain_overflow_dont
,/* complain_on_overflow */
1014 bfd_elf_generic_reloc
, /* special_function */
1015 "R_ARM_LDRS_PC_G1", /* name */
1016 FALSE
, /* partial_inplace */
1017 0xffffffff, /* src_mask */
1018 0xffffffff, /* dst_mask */
1019 TRUE
), /* pcrel_offset */
1021 HOWTO (R_ARM_LDRS_PC_G2
, /* type */
1023 2, /* size (0 = byte, 1 = short, 2 = long) */
1025 TRUE
, /* pc_relative */
1027 complain_overflow_dont
,/* complain_on_overflow */
1028 bfd_elf_generic_reloc
, /* special_function */
1029 "R_ARM_LDRS_PC_G2", /* name */
1030 FALSE
, /* partial_inplace */
1031 0xffffffff, /* src_mask */
1032 0xffffffff, /* dst_mask */
1033 TRUE
), /* pcrel_offset */
1035 HOWTO (R_ARM_LDC_PC_G0
, /* type */
1037 2, /* size (0 = byte, 1 = short, 2 = long) */
1039 TRUE
, /* pc_relative */
1041 complain_overflow_dont
,/* complain_on_overflow */
1042 bfd_elf_generic_reloc
, /* special_function */
1043 "R_ARM_LDC_PC_G0", /* name */
1044 FALSE
, /* partial_inplace */
1045 0xffffffff, /* src_mask */
1046 0xffffffff, /* dst_mask */
1047 TRUE
), /* pcrel_offset */
1049 HOWTO (R_ARM_LDC_PC_G1
, /* type */
1051 2, /* size (0 = byte, 1 = short, 2 = long) */
1053 TRUE
, /* pc_relative */
1055 complain_overflow_dont
,/* complain_on_overflow */
1056 bfd_elf_generic_reloc
, /* special_function */
1057 "R_ARM_LDC_PC_G1", /* name */
1058 FALSE
, /* partial_inplace */
1059 0xffffffff, /* src_mask */
1060 0xffffffff, /* dst_mask */
1061 TRUE
), /* pcrel_offset */
1063 HOWTO (R_ARM_LDC_PC_G2
, /* type */
1065 2, /* size (0 = byte, 1 = short, 2 = long) */
1067 TRUE
, /* pc_relative */
1069 complain_overflow_dont
,/* complain_on_overflow */
1070 bfd_elf_generic_reloc
, /* special_function */
1071 "R_ARM_LDC_PC_G2", /* name */
1072 FALSE
, /* partial_inplace */
1073 0xffffffff, /* src_mask */
1074 0xffffffff, /* dst_mask */
1075 TRUE
), /* pcrel_offset */
1077 HOWTO (R_ARM_ALU_SB_G0_NC
, /* type */
1079 2, /* size (0 = byte, 1 = short, 2 = long) */
1081 TRUE
, /* pc_relative */
1083 complain_overflow_dont
,/* complain_on_overflow */
1084 bfd_elf_generic_reloc
, /* special_function */
1085 "R_ARM_ALU_SB_G0_NC", /* name */
1086 FALSE
, /* partial_inplace */
1087 0xffffffff, /* src_mask */
1088 0xffffffff, /* dst_mask */
1089 TRUE
), /* pcrel_offset */
1091 HOWTO (R_ARM_ALU_SB_G0
, /* type */
1093 2, /* size (0 = byte, 1 = short, 2 = long) */
1095 TRUE
, /* pc_relative */
1097 complain_overflow_dont
,/* complain_on_overflow */
1098 bfd_elf_generic_reloc
, /* special_function */
1099 "R_ARM_ALU_SB_G0", /* name */
1100 FALSE
, /* partial_inplace */
1101 0xffffffff, /* src_mask */
1102 0xffffffff, /* dst_mask */
1103 TRUE
), /* pcrel_offset */
1105 HOWTO (R_ARM_ALU_SB_G1_NC
, /* type */
1107 2, /* size (0 = byte, 1 = short, 2 = long) */
1109 TRUE
, /* pc_relative */
1111 complain_overflow_dont
,/* complain_on_overflow */
1112 bfd_elf_generic_reloc
, /* special_function */
1113 "R_ARM_ALU_SB_G1_NC", /* name */
1114 FALSE
, /* partial_inplace */
1115 0xffffffff, /* src_mask */
1116 0xffffffff, /* dst_mask */
1117 TRUE
), /* pcrel_offset */
1119 HOWTO (R_ARM_ALU_SB_G1
, /* type */
1121 2, /* size (0 = byte, 1 = short, 2 = long) */
1123 TRUE
, /* pc_relative */
1125 complain_overflow_dont
,/* complain_on_overflow */
1126 bfd_elf_generic_reloc
, /* special_function */
1127 "R_ARM_ALU_SB_G1", /* name */
1128 FALSE
, /* partial_inplace */
1129 0xffffffff, /* src_mask */
1130 0xffffffff, /* dst_mask */
1131 TRUE
), /* pcrel_offset */
1133 HOWTO (R_ARM_ALU_SB_G2
, /* type */
1135 2, /* size (0 = byte, 1 = short, 2 = long) */
1137 TRUE
, /* pc_relative */
1139 complain_overflow_dont
,/* complain_on_overflow */
1140 bfd_elf_generic_reloc
, /* special_function */
1141 "R_ARM_ALU_SB_G2", /* name */
1142 FALSE
, /* partial_inplace */
1143 0xffffffff, /* src_mask */
1144 0xffffffff, /* dst_mask */
1145 TRUE
), /* pcrel_offset */
1147 HOWTO (R_ARM_LDR_SB_G0
, /* type */
1149 2, /* size (0 = byte, 1 = short, 2 = long) */
1151 TRUE
, /* pc_relative */
1153 complain_overflow_dont
,/* complain_on_overflow */
1154 bfd_elf_generic_reloc
, /* special_function */
1155 "R_ARM_LDR_SB_G0", /* name */
1156 FALSE
, /* partial_inplace */
1157 0xffffffff, /* src_mask */
1158 0xffffffff, /* dst_mask */
1159 TRUE
), /* pcrel_offset */
1161 HOWTO (R_ARM_LDR_SB_G1
, /* type */
1163 2, /* size (0 = byte, 1 = short, 2 = long) */
1165 TRUE
, /* pc_relative */
1167 complain_overflow_dont
,/* complain_on_overflow */
1168 bfd_elf_generic_reloc
, /* special_function */
1169 "R_ARM_LDR_SB_G1", /* name */
1170 FALSE
, /* partial_inplace */
1171 0xffffffff, /* src_mask */
1172 0xffffffff, /* dst_mask */
1173 TRUE
), /* pcrel_offset */
1175 HOWTO (R_ARM_LDR_SB_G2
, /* type */
1177 2, /* size (0 = byte, 1 = short, 2 = long) */
1179 TRUE
, /* pc_relative */
1181 complain_overflow_dont
,/* complain_on_overflow */
1182 bfd_elf_generic_reloc
, /* special_function */
1183 "R_ARM_LDR_SB_G2", /* name */
1184 FALSE
, /* partial_inplace */
1185 0xffffffff, /* src_mask */
1186 0xffffffff, /* dst_mask */
1187 TRUE
), /* pcrel_offset */
1189 HOWTO (R_ARM_LDRS_SB_G0
, /* type */
1191 2, /* size (0 = byte, 1 = short, 2 = long) */
1193 TRUE
, /* pc_relative */
1195 complain_overflow_dont
,/* complain_on_overflow */
1196 bfd_elf_generic_reloc
, /* special_function */
1197 "R_ARM_LDRS_SB_G0", /* name */
1198 FALSE
, /* partial_inplace */
1199 0xffffffff, /* src_mask */
1200 0xffffffff, /* dst_mask */
1201 TRUE
), /* pcrel_offset */
1203 HOWTO (R_ARM_LDRS_SB_G1
, /* type */
1205 2, /* size (0 = byte, 1 = short, 2 = long) */
1207 TRUE
, /* pc_relative */
1209 complain_overflow_dont
,/* complain_on_overflow */
1210 bfd_elf_generic_reloc
, /* special_function */
1211 "R_ARM_LDRS_SB_G1", /* name */
1212 FALSE
, /* partial_inplace */
1213 0xffffffff, /* src_mask */
1214 0xffffffff, /* dst_mask */
1215 TRUE
), /* pcrel_offset */
1217 HOWTO (R_ARM_LDRS_SB_G2
, /* type */
1219 2, /* size (0 = byte, 1 = short, 2 = long) */
1221 TRUE
, /* pc_relative */
1223 complain_overflow_dont
,/* complain_on_overflow */
1224 bfd_elf_generic_reloc
, /* special_function */
1225 "R_ARM_LDRS_SB_G2", /* name */
1226 FALSE
, /* partial_inplace */
1227 0xffffffff, /* src_mask */
1228 0xffffffff, /* dst_mask */
1229 TRUE
), /* pcrel_offset */
1231 HOWTO (R_ARM_LDC_SB_G0
, /* type */
1233 2, /* size (0 = byte, 1 = short, 2 = long) */
1235 TRUE
, /* pc_relative */
1237 complain_overflow_dont
,/* complain_on_overflow */
1238 bfd_elf_generic_reloc
, /* special_function */
1239 "R_ARM_LDC_SB_G0", /* name */
1240 FALSE
, /* partial_inplace */
1241 0xffffffff, /* src_mask */
1242 0xffffffff, /* dst_mask */
1243 TRUE
), /* pcrel_offset */
1245 HOWTO (R_ARM_LDC_SB_G1
, /* type */
1247 2, /* size (0 = byte, 1 = short, 2 = long) */
1249 TRUE
, /* pc_relative */
1251 complain_overflow_dont
,/* complain_on_overflow */
1252 bfd_elf_generic_reloc
, /* special_function */
1253 "R_ARM_LDC_SB_G1", /* name */
1254 FALSE
, /* partial_inplace */
1255 0xffffffff, /* src_mask */
1256 0xffffffff, /* dst_mask */
1257 TRUE
), /* pcrel_offset */
1259 HOWTO (R_ARM_LDC_SB_G2
, /* type */
1261 2, /* size (0 = byte, 1 = short, 2 = long) */
1263 TRUE
, /* pc_relative */
1265 complain_overflow_dont
,/* complain_on_overflow */
1266 bfd_elf_generic_reloc
, /* special_function */
1267 "R_ARM_LDC_SB_G2", /* name */
1268 FALSE
, /* partial_inplace */
1269 0xffffffff, /* src_mask */
1270 0xffffffff, /* dst_mask */
1271 TRUE
), /* pcrel_offset */
1273 /* End of group relocations. */
1275 HOWTO (R_ARM_MOVW_BREL_NC
, /* type */
1277 2, /* size (0 = byte, 1 = short, 2 = long) */
1279 FALSE
, /* pc_relative */
1281 complain_overflow_dont
,/* complain_on_overflow */
1282 bfd_elf_generic_reloc
, /* special_function */
1283 "R_ARM_MOVW_BREL_NC", /* name */
1284 FALSE
, /* partial_inplace */
1285 0x0000ffff, /* src_mask */
1286 0x0000ffff, /* dst_mask */
1287 FALSE
), /* pcrel_offset */
1289 HOWTO (R_ARM_MOVT_BREL
, /* type */
1291 2, /* size (0 = byte, 1 = short, 2 = long) */
1293 FALSE
, /* pc_relative */
1295 complain_overflow_bitfield
,/* complain_on_overflow */
1296 bfd_elf_generic_reloc
, /* special_function */
1297 "R_ARM_MOVT_BREL", /* name */
1298 FALSE
, /* partial_inplace */
1299 0x0000ffff, /* src_mask */
1300 0x0000ffff, /* dst_mask */
1301 FALSE
), /* pcrel_offset */
1303 HOWTO (R_ARM_MOVW_BREL
, /* type */
1305 2, /* size (0 = byte, 1 = short, 2 = long) */
1307 FALSE
, /* pc_relative */
1309 complain_overflow_dont
,/* complain_on_overflow */
1310 bfd_elf_generic_reloc
, /* special_function */
1311 "R_ARM_MOVW_BREL", /* name */
1312 FALSE
, /* partial_inplace */
1313 0x0000ffff, /* src_mask */
1314 0x0000ffff, /* dst_mask */
1315 FALSE
), /* pcrel_offset */
1317 HOWTO (R_ARM_THM_MOVW_BREL_NC
,/* type */
1319 2, /* size (0 = byte, 1 = short, 2 = long) */
1321 FALSE
, /* pc_relative */
1323 complain_overflow_dont
,/* complain_on_overflow */
1324 bfd_elf_generic_reloc
, /* special_function */
1325 "R_ARM_THM_MOVW_BREL_NC",/* name */
1326 FALSE
, /* partial_inplace */
1327 0x040f70ff, /* src_mask */
1328 0x040f70ff, /* dst_mask */
1329 FALSE
), /* pcrel_offset */
1331 HOWTO (R_ARM_THM_MOVT_BREL
, /* type */
1333 2, /* size (0 = byte, 1 = short, 2 = long) */
1335 FALSE
, /* pc_relative */
1337 complain_overflow_bitfield
,/* complain_on_overflow */
1338 bfd_elf_generic_reloc
, /* special_function */
1339 "R_ARM_THM_MOVT_BREL", /* name */
1340 FALSE
, /* partial_inplace */
1341 0x040f70ff, /* src_mask */
1342 0x040f70ff, /* dst_mask */
1343 FALSE
), /* pcrel_offset */
1345 HOWTO (R_ARM_THM_MOVW_BREL
, /* type */
1347 2, /* size (0 = byte, 1 = short, 2 = long) */
1349 FALSE
, /* pc_relative */
1351 complain_overflow_dont
,/* complain_on_overflow */
1352 bfd_elf_generic_reloc
, /* special_function */
1353 "R_ARM_THM_MOVW_BREL", /* name */
1354 FALSE
, /* partial_inplace */
1355 0x040f70ff, /* src_mask */
1356 0x040f70ff, /* dst_mask */
1357 FALSE
), /* pcrel_offset */
1359 HOWTO (R_ARM_TLS_GOTDESC
, /* type */
1361 2, /* size (0 = byte, 1 = short, 2 = long) */
1363 FALSE
, /* pc_relative */
1365 complain_overflow_bitfield
,/* complain_on_overflow */
1366 NULL
, /* special_function */
1367 "R_ARM_TLS_GOTDESC", /* name */
1368 TRUE
, /* partial_inplace */
1369 0xffffffff, /* src_mask */
1370 0xffffffff, /* dst_mask */
1371 FALSE
), /* pcrel_offset */
1373 HOWTO (R_ARM_TLS_CALL
, /* type */
1375 2, /* size (0 = byte, 1 = short, 2 = long) */
1377 FALSE
, /* pc_relative */
1379 complain_overflow_dont
,/* complain_on_overflow */
1380 bfd_elf_generic_reloc
, /* special_function */
1381 "R_ARM_TLS_CALL", /* name */
1382 FALSE
, /* partial_inplace */
1383 0x00ffffff, /* src_mask */
1384 0x00ffffff, /* dst_mask */
1385 FALSE
), /* pcrel_offset */
1387 HOWTO (R_ARM_TLS_DESCSEQ
, /* type */
1389 2, /* size (0 = byte, 1 = short, 2 = long) */
1391 FALSE
, /* pc_relative */
1393 complain_overflow_bitfield
,/* complain_on_overflow */
1394 bfd_elf_generic_reloc
, /* special_function */
1395 "R_ARM_TLS_DESCSEQ", /* name */
1396 FALSE
, /* partial_inplace */
1397 0x00000000, /* src_mask */
1398 0x00000000, /* dst_mask */
1399 FALSE
), /* pcrel_offset */
1401 HOWTO (R_ARM_THM_TLS_CALL
, /* type */
1403 2, /* size (0 = byte, 1 = short, 2 = long) */
1405 FALSE
, /* pc_relative */
1407 complain_overflow_dont
,/* complain_on_overflow */
1408 bfd_elf_generic_reloc
, /* special_function */
1409 "R_ARM_THM_TLS_CALL", /* name */
1410 FALSE
, /* partial_inplace */
1411 0x07ff07ff, /* src_mask */
1412 0x07ff07ff, /* dst_mask */
1413 FALSE
), /* pcrel_offset */
1415 HOWTO (R_ARM_PLT32_ABS
, /* type */
1417 2, /* size (0 = byte, 1 = short, 2 = long) */
1419 FALSE
, /* pc_relative */
1421 complain_overflow_dont
,/* complain_on_overflow */
1422 bfd_elf_generic_reloc
, /* special_function */
1423 "R_ARM_PLT32_ABS", /* name */
1424 FALSE
, /* partial_inplace */
1425 0xffffffff, /* src_mask */
1426 0xffffffff, /* dst_mask */
1427 FALSE
), /* pcrel_offset */
1429 HOWTO (R_ARM_GOT_ABS
, /* type */
1431 2, /* size (0 = byte, 1 = short, 2 = long) */
1433 FALSE
, /* pc_relative */
1435 complain_overflow_dont
,/* complain_on_overflow */
1436 bfd_elf_generic_reloc
, /* special_function */
1437 "R_ARM_GOT_ABS", /* name */
1438 FALSE
, /* partial_inplace */
1439 0xffffffff, /* src_mask */
1440 0xffffffff, /* dst_mask */
1441 FALSE
), /* pcrel_offset */
1443 HOWTO (R_ARM_GOT_PREL
, /* type */
1445 2, /* size (0 = byte, 1 = short, 2 = long) */
1447 TRUE
, /* pc_relative */
1449 complain_overflow_dont
, /* complain_on_overflow */
1450 bfd_elf_generic_reloc
, /* special_function */
1451 "R_ARM_GOT_PREL", /* name */
1452 FALSE
, /* partial_inplace */
1453 0xffffffff, /* src_mask */
1454 0xffffffff, /* dst_mask */
1455 TRUE
), /* pcrel_offset */
1457 HOWTO (R_ARM_GOT_BREL12
, /* type */
1459 2, /* size (0 = byte, 1 = short, 2 = long) */
1461 FALSE
, /* pc_relative */
1463 complain_overflow_bitfield
,/* complain_on_overflow */
1464 bfd_elf_generic_reloc
, /* special_function */
1465 "R_ARM_GOT_BREL12", /* name */
1466 FALSE
, /* partial_inplace */
1467 0x00000fff, /* src_mask */
1468 0x00000fff, /* dst_mask */
1469 FALSE
), /* pcrel_offset */
1471 HOWTO (R_ARM_GOTOFF12
, /* type */
1473 2, /* size (0 = byte, 1 = short, 2 = long) */
1475 FALSE
, /* pc_relative */
1477 complain_overflow_bitfield
,/* complain_on_overflow */
1478 bfd_elf_generic_reloc
, /* special_function */
1479 "R_ARM_GOTOFF12", /* name */
1480 FALSE
, /* partial_inplace */
1481 0x00000fff, /* src_mask */
1482 0x00000fff, /* dst_mask */
1483 FALSE
), /* pcrel_offset */
1485 EMPTY_HOWTO (R_ARM_GOTRELAX
), /* reserved for future GOT-load optimizations */
1487 /* GNU extension to record C++ vtable member usage */
1488 HOWTO (R_ARM_GNU_VTENTRY
, /* type */
1490 2, /* size (0 = byte, 1 = short, 2 = long) */
1492 FALSE
, /* pc_relative */
1494 complain_overflow_dont
, /* complain_on_overflow */
1495 _bfd_elf_rel_vtable_reloc_fn
, /* special_function */
1496 "R_ARM_GNU_VTENTRY", /* name */
1497 FALSE
, /* partial_inplace */
1500 FALSE
), /* pcrel_offset */
1502 /* GNU extension to record C++ vtable hierarchy */
1503 HOWTO (R_ARM_GNU_VTINHERIT
, /* type */
1505 2, /* size (0 = byte, 1 = short, 2 = long) */
1507 FALSE
, /* pc_relative */
1509 complain_overflow_dont
, /* complain_on_overflow */
1510 NULL
, /* special_function */
1511 "R_ARM_GNU_VTINHERIT", /* name */
1512 FALSE
, /* partial_inplace */
1515 FALSE
), /* pcrel_offset */
1517 HOWTO (R_ARM_THM_JUMP11
, /* type */
1519 1, /* size (0 = byte, 1 = short, 2 = long) */
1521 TRUE
, /* pc_relative */
1523 complain_overflow_signed
, /* complain_on_overflow */
1524 bfd_elf_generic_reloc
, /* special_function */
1525 "R_ARM_THM_JUMP11", /* name */
1526 FALSE
, /* partial_inplace */
1527 0x000007ff, /* src_mask */
1528 0x000007ff, /* dst_mask */
1529 TRUE
), /* pcrel_offset */
1531 HOWTO (R_ARM_THM_JUMP8
, /* type */
1533 1, /* size (0 = byte, 1 = short, 2 = long) */
1535 TRUE
, /* pc_relative */
1537 complain_overflow_signed
, /* complain_on_overflow */
1538 bfd_elf_generic_reloc
, /* special_function */
1539 "R_ARM_THM_JUMP8", /* name */
1540 FALSE
, /* partial_inplace */
1541 0x000000ff, /* src_mask */
1542 0x000000ff, /* dst_mask */
1543 TRUE
), /* pcrel_offset */
1545 /* TLS relocations */
1546 HOWTO (R_ARM_TLS_GD32
, /* type */
1548 2, /* size (0 = byte, 1 = short, 2 = long) */
1550 FALSE
, /* pc_relative */
1552 complain_overflow_bitfield
,/* complain_on_overflow */
1553 NULL
, /* special_function */
1554 "R_ARM_TLS_GD32", /* name */
1555 TRUE
, /* partial_inplace */
1556 0xffffffff, /* src_mask */
1557 0xffffffff, /* dst_mask */
1558 FALSE
), /* pcrel_offset */
1560 HOWTO (R_ARM_TLS_LDM32
, /* type */
1562 2, /* size (0 = byte, 1 = short, 2 = long) */
1564 FALSE
, /* pc_relative */
1566 complain_overflow_bitfield
,/* complain_on_overflow */
1567 bfd_elf_generic_reloc
, /* special_function */
1568 "R_ARM_TLS_LDM32", /* name */
1569 TRUE
, /* partial_inplace */
1570 0xffffffff, /* src_mask */
1571 0xffffffff, /* dst_mask */
1572 FALSE
), /* pcrel_offset */
1574 HOWTO (R_ARM_TLS_LDO32
, /* type */
1576 2, /* size (0 = byte, 1 = short, 2 = long) */
1578 FALSE
, /* pc_relative */
1580 complain_overflow_bitfield
,/* complain_on_overflow */
1581 bfd_elf_generic_reloc
, /* special_function */
1582 "R_ARM_TLS_LDO32", /* name */
1583 TRUE
, /* partial_inplace */
1584 0xffffffff, /* src_mask */
1585 0xffffffff, /* dst_mask */
1586 FALSE
), /* pcrel_offset */
1588 HOWTO (R_ARM_TLS_IE32
, /* type */
1590 2, /* size (0 = byte, 1 = short, 2 = long) */
1592 FALSE
, /* pc_relative */
1594 complain_overflow_bitfield
,/* complain_on_overflow */
1595 NULL
, /* special_function */
1596 "R_ARM_TLS_IE32", /* name */
1597 TRUE
, /* partial_inplace */
1598 0xffffffff, /* src_mask */
1599 0xffffffff, /* dst_mask */
1600 FALSE
), /* pcrel_offset */
1602 HOWTO (R_ARM_TLS_LE32
, /* type */
1604 2, /* size (0 = byte, 1 = short, 2 = long) */
1606 FALSE
, /* pc_relative */
1608 complain_overflow_bitfield
,/* complain_on_overflow */
1609 NULL
, /* special_function */
1610 "R_ARM_TLS_LE32", /* name */
1611 TRUE
, /* partial_inplace */
1612 0xffffffff, /* src_mask */
1613 0xffffffff, /* dst_mask */
1614 FALSE
), /* pcrel_offset */
1616 HOWTO (R_ARM_TLS_LDO12
, /* type */
1618 2, /* size (0 = byte, 1 = short, 2 = long) */
1620 FALSE
, /* pc_relative */
1622 complain_overflow_bitfield
,/* complain_on_overflow */
1623 bfd_elf_generic_reloc
, /* special_function */
1624 "R_ARM_TLS_LDO12", /* name */
1625 FALSE
, /* partial_inplace */
1626 0x00000fff, /* src_mask */
1627 0x00000fff, /* dst_mask */
1628 FALSE
), /* pcrel_offset */
1630 HOWTO (R_ARM_TLS_LE12
, /* type */
1632 2, /* size (0 = byte, 1 = short, 2 = long) */
1634 FALSE
, /* pc_relative */
1636 complain_overflow_bitfield
,/* complain_on_overflow */
1637 bfd_elf_generic_reloc
, /* special_function */
1638 "R_ARM_TLS_LE12", /* name */
1639 FALSE
, /* partial_inplace */
1640 0x00000fff, /* src_mask */
1641 0x00000fff, /* dst_mask */
1642 FALSE
), /* pcrel_offset */
1644 HOWTO (R_ARM_TLS_IE12GP
, /* type */
1646 2, /* size (0 = byte, 1 = short, 2 = long) */
1648 FALSE
, /* pc_relative */
1650 complain_overflow_bitfield
,/* complain_on_overflow */
1651 bfd_elf_generic_reloc
, /* special_function */
1652 "R_ARM_TLS_IE12GP", /* name */
1653 FALSE
, /* partial_inplace */
1654 0x00000fff, /* src_mask */
1655 0x00000fff, /* dst_mask */
1656 FALSE
), /* pcrel_offset */
1658 /* 112-127 private relocations. */
1676 /* R_ARM_ME_TOO, obsolete. */
1679 HOWTO (R_ARM_THM_TLS_DESCSEQ
, /* type */
1681 1, /* size (0 = byte, 1 = short, 2 = long) */
1683 FALSE
, /* pc_relative */
1685 complain_overflow_bitfield
,/* complain_on_overflow */
1686 bfd_elf_generic_reloc
, /* special_function */
1687 "R_ARM_THM_TLS_DESCSEQ",/* name */
1688 FALSE
, /* partial_inplace */
1689 0x00000000, /* src_mask */
1690 0x00000000, /* dst_mask */
1691 FALSE
), /* pcrel_offset */
1694 HOWTO (R_ARM_THM_ALU_ABS_G0_NC
,/* type. */
1695 0, /* rightshift. */
1696 1, /* size (0 = byte, 1 = short, 2 = long). */
1698 FALSE
, /* pc_relative. */
1700 complain_overflow_bitfield
,/* complain_on_overflow. */
1701 bfd_elf_generic_reloc
, /* special_function. */
1702 "R_ARM_THM_ALU_ABS_G0_NC",/* name. */
1703 FALSE
, /* partial_inplace. */
1704 0x00000000, /* src_mask. */
1705 0x00000000, /* dst_mask. */
1706 FALSE
), /* pcrel_offset. */
1707 HOWTO (R_ARM_THM_ALU_ABS_G1_NC
,/* type. */
1708 0, /* rightshift. */
1709 1, /* size (0 = byte, 1 = short, 2 = long). */
1711 FALSE
, /* pc_relative. */
1713 complain_overflow_bitfield
,/* complain_on_overflow. */
1714 bfd_elf_generic_reloc
, /* special_function. */
1715 "R_ARM_THM_ALU_ABS_G1_NC",/* name. */
1716 FALSE
, /* partial_inplace. */
1717 0x00000000, /* src_mask. */
1718 0x00000000, /* dst_mask. */
1719 FALSE
), /* pcrel_offset. */
1720 HOWTO (R_ARM_THM_ALU_ABS_G2_NC
,/* type. */
1721 0, /* rightshift. */
1722 1, /* size (0 = byte, 1 = short, 2 = long). */
1724 FALSE
, /* pc_relative. */
1726 complain_overflow_bitfield
,/* complain_on_overflow. */
1727 bfd_elf_generic_reloc
, /* special_function. */
1728 "R_ARM_THM_ALU_ABS_G2_NC",/* name. */
1729 FALSE
, /* partial_inplace. */
1730 0x00000000, /* src_mask. */
1731 0x00000000, /* dst_mask. */
1732 FALSE
), /* pcrel_offset. */
1733 HOWTO (R_ARM_THM_ALU_ABS_G3_NC
,/* type. */
1734 0, /* rightshift. */
1735 1, /* size (0 = byte, 1 = short, 2 = long). */
1737 FALSE
, /* pc_relative. */
1739 complain_overflow_bitfield
,/* complain_on_overflow. */
1740 bfd_elf_generic_reloc
, /* special_function. */
1741 "R_ARM_THM_ALU_ABS_G3_NC",/* name. */
1742 FALSE
, /* partial_inplace. */
1743 0x00000000, /* src_mask. */
1744 0x00000000, /* dst_mask. */
1745 FALSE
), /* pcrel_offset. */
1749 static reloc_howto_type elf32_arm_howto_table_2
[8] =
1751 HOWTO (R_ARM_IRELATIVE
, /* type */
1753 2, /* size (0 = byte, 1 = short, 2 = long) */
1755 FALSE
, /* pc_relative */
1757 complain_overflow_bitfield
,/* complain_on_overflow */
1758 bfd_elf_generic_reloc
, /* special_function */
1759 "R_ARM_IRELATIVE", /* name */
1760 TRUE
, /* partial_inplace */
1761 0xffffffff, /* src_mask */
1762 0xffffffff, /* dst_mask */
1763 FALSE
), /* pcrel_offset */
1764 HOWTO (R_ARM_GOTFUNCDESC
, /* type */
1766 2, /* size (0 = byte, 1 = short, 2 = long) */
1768 FALSE
, /* pc_relative */
1770 complain_overflow_bitfield
,/* complain_on_overflow */
1771 bfd_elf_generic_reloc
, /* special_function */
1772 "R_ARM_GOTFUNCDESC", /* name */
1773 FALSE
, /* partial_inplace */
1775 0xffffffff, /* dst_mask */
1776 FALSE
), /* pcrel_offset */
1777 HOWTO (R_ARM_GOTOFFFUNCDESC
, /* type */
1779 2, /* size (0 = byte, 1 = short, 2 = long) */
1781 FALSE
, /* pc_relative */
1783 complain_overflow_bitfield
,/* complain_on_overflow */
1784 bfd_elf_generic_reloc
, /* special_function */
1785 "R_ARM_GOTOFFFUNCDESC",/* name */
1786 FALSE
, /* partial_inplace */
1788 0xffffffff, /* dst_mask */
1789 FALSE
), /* pcrel_offset */
1790 HOWTO (R_ARM_FUNCDESC
, /* type */
1792 2, /* size (0 = byte, 1 = short, 2 = long) */
1794 FALSE
, /* pc_relative */
1796 complain_overflow_bitfield
,/* complain_on_overflow */
1797 bfd_elf_generic_reloc
, /* special_function */
1798 "R_ARM_FUNCDESC", /* name */
1799 FALSE
, /* partial_inplace */
1801 0xffffffff, /* dst_mask */
1802 FALSE
), /* pcrel_offset */
1803 HOWTO (R_ARM_FUNCDESC_VALUE
, /* type */
1805 2, /* size (0 = byte, 1 = short, 2 = long) */
1807 FALSE
, /* pc_relative */
1809 complain_overflow_bitfield
,/* complain_on_overflow */
1810 bfd_elf_generic_reloc
, /* special_function */
1811 "R_ARM_FUNCDESC_VALUE",/* name */
1812 FALSE
, /* partial_inplace */
1814 0xffffffff, /* dst_mask */
1815 FALSE
), /* pcrel_offset */
1816 HOWTO (R_ARM_TLS_GD32_FDPIC
, /* type */
1818 2, /* size (0 = byte, 1 = short, 2 = long) */
1820 FALSE
, /* pc_relative */
1822 complain_overflow_bitfield
,/* complain_on_overflow */
1823 bfd_elf_generic_reloc
, /* special_function */
1824 "R_ARM_TLS_GD32_FDPIC",/* name */
1825 FALSE
, /* partial_inplace */
1827 0xffffffff, /* dst_mask */
1828 FALSE
), /* pcrel_offset */
1829 HOWTO (R_ARM_TLS_LDM32_FDPIC
, /* type */
1831 2, /* size (0 = byte, 1 = short, 2 = long) */
1833 FALSE
, /* pc_relative */
1835 complain_overflow_bitfield
,/* complain_on_overflow */
1836 bfd_elf_generic_reloc
, /* special_function */
1837 "R_ARM_TLS_LDM32_FDPIC",/* name */
1838 FALSE
, /* partial_inplace */
1840 0xffffffff, /* dst_mask */
1841 FALSE
), /* pcrel_offset */
1842 HOWTO (R_ARM_TLS_IE32_FDPIC
, /* type */
1844 2, /* size (0 = byte, 1 = short, 2 = long) */
1846 FALSE
, /* pc_relative */
1848 complain_overflow_bitfield
,/* complain_on_overflow */
1849 bfd_elf_generic_reloc
, /* special_function */
1850 "R_ARM_TLS_IE32_FDPIC",/* name */
1851 FALSE
, /* partial_inplace */
1853 0xffffffff, /* dst_mask */
1854 FALSE
), /* pcrel_offset */
1857 /* 249-255 extended, currently unused, relocations: */
1858 static reloc_howto_type elf32_arm_howto_table_3
[4] =
1860 HOWTO (R_ARM_RREL32
, /* type */
1862 0, /* size (0 = byte, 1 = short, 2 = long) */
1864 FALSE
, /* pc_relative */
1866 complain_overflow_dont
,/* complain_on_overflow */
1867 bfd_elf_generic_reloc
, /* special_function */
1868 "R_ARM_RREL32", /* name */
1869 FALSE
, /* partial_inplace */
1872 FALSE
), /* pcrel_offset */
1874 HOWTO (R_ARM_RABS32
, /* type */
1876 0, /* size (0 = byte, 1 = short, 2 = long) */
1878 FALSE
, /* pc_relative */
1880 complain_overflow_dont
,/* complain_on_overflow */
1881 bfd_elf_generic_reloc
, /* special_function */
1882 "R_ARM_RABS32", /* name */
1883 FALSE
, /* partial_inplace */
1886 FALSE
), /* pcrel_offset */
1888 HOWTO (R_ARM_RPC24
, /* type */
1890 0, /* size (0 = byte, 1 = short, 2 = long) */
1892 FALSE
, /* pc_relative */
1894 complain_overflow_dont
,/* complain_on_overflow */
1895 bfd_elf_generic_reloc
, /* special_function */
1896 "R_ARM_RPC24", /* name */
1897 FALSE
, /* partial_inplace */
1900 FALSE
), /* pcrel_offset */
1902 HOWTO (R_ARM_RBASE
, /* type */
1904 0, /* size (0 = byte, 1 = short, 2 = long) */
1906 FALSE
, /* pc_relative */
1908 complain_overflow_dont
,/* complain_on_overflow */
1909 bfd_elf_generic_reloc
, /* special_function */
1910 "R_ARM_RBASE", /* name */
1911 FALSE
, /* partial_inplace */
1914 FALSE
) /* pcrel_offset */
1917 static reloc_howto_type
*
1918 elf32_arm_howto_from_type (unsigned int r_type
)
1920 if (r_type
< ARRAY_SIZE (elf32_arm_howto_table_1
))
1921 return &elf32_arm_howto_table_1
[r_type
];
1923 if (r_type
>= R_ARM_IRELATIVE
1924 && r_type
< R_ARM_IRELATIVE
+ ARRAY_SIZE (elf32_arm_howto_table_2
))
1925 return &elf32_arm_howto_table_2
[r_type
- R_ARM_IRELATIVE
];
1927 if (r_type
>= R_ARM_RREL32
1928 && r_type
< R_ARM_RREL32
+ ARRAY_SIZE (elf32_arm_howto_table_3
))
1929 return &elf32_arm_howto_table_3
[r_type
- R_ARM_RREL32
];
1935 elf32_arm_info_to_howto (bfd
* abfd
, arelent
* bfd_reloc
,
1936 Elf_Internal_Rela
* elf_reloc
)
1938 unsigned int r_type
;
1940 r_type
= ELF32_R_TYPE (elf_reloc
->r_info
);
1941 if ((bfd_reloc
->howto
= elf32_arm_howto_from_type (r_type
)) == NULL
)
1943 /* xgettext:c-format */
1944 _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
1946 bfd_set_error (bfd_error_bad_value
);
1952 struct elf32_arm_reloc_map
1954 bfd_reloc_code_real_type bfd_reloc_val
;
1955 unsigned char elf_reloc_val
;
1958 /* All entries in this list must also be present in elf32_arm_howto_table. */
1959 static const struct elf32_arm_reloc_map elf32_arm_reloc_map
[] =
1961 {BFD_RELOC_NONE
, R_ARM_NONE
},
1962 {BFD_RELOC_ARM_PCREL_BRANCH
, R_ARM_PC24
},
1963 {BFD_RELOC_ARM_PCREL_CALL
, R_ARM_CALL
},
1964 {BFD_RELOC_ARM_PCREL_JUMP
, R_ARM_JUMP24
},
1965 {BFD_RELOC_ARM_PCREL_BLX
, R_ARM_XPC25
},
1966 {BFD_RELOC_THUMB_PCREL_BLX
, R_ARM_THM_XPC22
},
1967 {BFD_RELOC_32
, R_ARM_ABS32
},
1968 {BFD_RELOC_32_PCREL
, R_ARM_REL32
},
1969 {BFD_RELOC_8
, R_ARM_ABS8
},
1970 {BFD_RELOC_16
, R_ARM_ABS16
},
1971 {BFD_RELOC_ARM_OFFSET_IMM
, R_ARM_ABS12
},
1972 {BFD_RELOC_ARM_THUMB_OFFSET
, R_ARM_THM_ABS5
},
1973 {BFD_RELOC_THUMB_PCREL_BRANCH25
, R_ARM_THM_JUMP24
},
1974 {BFD_RELOC_THUMB_PCREL_BRANCH23
, R_ARM_THM_CALL
},
1975 {BFD_RELOC_THUMB_PCREL_BRANCH12
, R_ARM_THM_JUMP11
},
1976 {BFD_RELOC_THUMB_PCREL_BRANCH20
, R_ARM_THM_JUMP19
},
1977 {BFD_RELOC_THUMB_PCREL_BRANCH9
, R_ARM_THM_JUMP8
},
1978 {BFD_RELOC_THUMB_PCREL_BRANCH7
, R_ARM_THM_JUMP6
},
1979 {BFD_RELOC_ARM_GLOB_DAT
, R_ARM_GLOB_DAT
},
1980 {BFD_RELOC_ARM_JUMP_SLOT
, R_ARM_JUMP_SLOT
},
1981 {BFD_RELOC_ARM_RELATIVE
, R_ARM_RELATIVE
},
1982 {BFD_RELOC_ARM_GOTOFF
, R_ARM_GOTOFF32
},
1983 {BFD_RELOC_ARM_GOTPC
, R_ARM_GOTPC
},
1984 {BFD_RELOC_ARM_GOT_PREL
, R_ARM_GOT_PREL
},
1985 {BFD_RELOC_ARM_GOT32
, R_ARM_GOT32
},
1986 {BFD_RELOC_ARM_PLT32
, R_ARM_PLT32
},
1987 {BFD_RELOC_ARM_TARGET1
, R_ARM_TARGET1
},
1988 {BFD_RELOC_ARM_ROSEGREL32
, R_ARM_ROSEGREL32
},
1989 {BFD_RELOC_ARM_SBREL32
, R_ARM_SBREL32
},
1990 {BFD_RELOC_ARM_PREL31
, R_ARM_PREL31
},
1991 {BFD_RELOC_ARM_TARGET2
, R_ARM_TARGET2
},
1992 {BFD_RELOC_ARM_PLT32
, R_ARM_PLT32
},
1993 {BFD_RELOC_ARM_TLS_GOTDESC
, R_ARM_TLS_GOTDESC
},
1994 {BFD_RELOC_ARM_TLS_CALL
, R_ARM_TLS_CALL
},
1995 {BFD_RELOC_ARM_THM_TLS_CALL
, R_ARM_THM_TLS_CALL
},
1996 {BFD_RELOC_ARM_TLS_DESCSEQ
, R_ARM_TLS_DESCSEQ
},
1997 {BFD_RELOC_ARM_THM_TLS_DESCSEQ
, R_ARM_THM_TLS_DESCSEQ
},
1998 {BFD_RELOC_ARM_TLS_DESC
, R_ARM_TLS_DESC
},
1999 {BFD_RELOC_ARM_TLS_GD32
, R_ARM_TLS_GD32
},
2000 {BFD_RELOC_ARM_TLS_LDO32
, R_ARM_TLS_LDO32
},
2001 {BFD_RELOC_ARM_TLS_LDM32
, R_ARM_TLS_LDM32
},
2002 {BFD_RELOC_ARM_TLS_DTPMOD32
, R_ARM_TLS_DTPMOD32
},
2003 {BFD_RELOC_ARM_TLS_DTPOFF32
, R_ARM_TLS_DTPOFF32
},
2004 {BFD_RELOC_ARM_TLS_TPOFF32
, R_ARM_TLS_TPOFF32
},
2005 {BFD_RELOC_ARM_TLS_IE32
, R_ARM_TLS_IE32
},
2006 {BFD_RELOC_ARM_TLS_LE32
, R_ARM_TLS_LE32
},
2007 {BFD_RELOC_ARM_IRELATIVE
, R_ARM_IRELATIVE
},
2008 {BFD_RELOC_ARM_GOTFUNCDESC
, R_ARM_GOTFUNCDESC
},
2009 {BFD_RELOC_ARM_GOTOFFFUNCDESC
, R_ARM_GOTOFFFUNCDESC
},
2010 {BFD_RELOC_ARM_FUNCDESC
, R_ARM_FUNCDESC
},
2011 {BFD_RELOC_ARM_FUNCDESC_VALUE
, R_ARM_FUNCDESC_VALUE
},
2012 {BFD_RELOC_ARM_TLS_GD32_FDPIC
, R_ARM_TLS_GD32_FDPIC
},
2013 {BFD_RELOC_ARM_TLS_LDM32_FDPIC
, R_ARM_TLS_LDM32_FDPIC
},
2014 {BFD_RELOC_ARM_TLS_IE32_FDPIC
, R_ARM_TLS_IE32_FDPIC
},
2015 {BFD_RELOC_VTABLE_INHERIT
, R_ARM_GNU_VTINHERIT
},
2016 {BFD_RELOC_VTABLE_ENTRY
, R_ARM_GNU_VTENTRY
},
2017 {BFD_RELOC_ARM_MOVW
, R_ARM_MOVW_ABS_NC
},
2018 {BFD_RELOC_ARM_MOVT
, R_ARM_MOVT_ABS
},
2019 {BFD_RELOC_ARM_MOVW_PCREL
, R_ARM_MOVW_PREL_NC
},
2020 {BFD_RELOC_ARM_MOVT_PCREL
, R_ARM_MOVT_PREL
},
2021 {BFD_RELOC_ARM_THUMB_MOVW
, R_ARM_THM_MOVW_ABS_NC
},
2022 {BFD_RELOC_ARM_THUMB_MOVT
, R_ARM_THM_MOVT_ABS
},
2023 {BFD_RELOC_ARM_THUMB_MOVW_PCREL
, R_ARM_THM_MOVW_PREL_NC
},
2024 {BFD_RELOC_ARM_THUMB_MOVT_PCREL
, R_ARM_THM_MOVT_PREL
},
2025 {BFD_RELOC_ARM_ALU_PC_G0_NC
, R_ARM_ALU_PC_G0_NC
},
2026 {BFD_RELOC_ARM_ALU_PC_G0
, R_ARM_ALU_PC_G0
},
2027 {BFD_RELOC_ARM_ALU_PC_G1_NC
, R_ARM_ALU_PC_G1_NC
},
2028 {BFD_RELOC_ARM_ALU_PC_G1
, R_ARM_ALU_PC_G1
},
2029 {BFD_RELOC_ARM_ALU_PC_G2
, R_ARM_ALU_PC_G2
},
2030 {BFD_RELOC_ARM_LDR_PC_G0
, R_ARM_LDR_PC_G0
},
2031 {BFD_RELOC_ARM_LDR_PC_G1
, R_ARM_LDR_PC_G1
},
2032 {BFD_RELOC_ARM_LDR_PC_G2
, R_ARM_LDR_PC_G2
},
2033 {BFD_RELOC_ARM_LDRS_PC_G0
, R_ARM_LDRS_PC_G0
},
2034 {BFD_RELOC_ARM_LDRS_PC_G1
, R_ARM_LDRS_PC_G1
},
2035 {BFD_RELOC_ARM_LDRS_PC_G2
, R_ARM_LDRS_PC_G2
},
2036 {BFD_RELOC_ARM_LDC_PC_G0
, R_ARM_LDC_PC_G0
},
2037 {BFD_RELOC_ARM_LDC_PC_G1
, R_ARM_LDC_PC_G1
},
2038 {BFD_RELOC_ARM_LDC_PC_G2
, R_ARM_LDC_PC_G2
},
2039 {BFD_RELOC_ARM_ALU_SB_G0_NC
, R_ARM_ALU_SB_G0_NC
},
2040 {BFD_RELOC_ARM_ALU_SB_G0
, R_ARM_ALU_SB_G0
},
2041 {BFD_RELOC_ARM_ALU_SB_G1_NC
, R_ARM_ALU_SB_G1_NC
},
2042 {BFD_RELOC_ARM_ALU_SB_G1
, R_ARM_ALU_SB_G1
},
2043 {BFD_RELOC_ARM_ALU_SB_G2
, R_ARM_ALU_SB_G2
},
2044 {BFD_RELOC_ARM_LDR_SB_G0
, R_ARM_LDR_SB_G0
},
2045 {BFD_RELOC_ARM_LDR_SB_G1
, R_ARM_LDR_SB_G1
},
2046 {BFD_RELOC_ARM_LDR_SB_G2
, R_ARM_LDR_SB_G2
},
2047 {BFD_RELOC_ARM_LDRS_SB_G0
, R_ARM_LDRS_SB_G0
},
2048 {BFD_RELOC_ARM_LDRS_SB_G1
, R_ARM_LDRS_SB_G1
},
2049 {BFD_RELOC_ARM_LDRS_SB_G2
, R_ARM_LDRS_SB_G2
},
2050 {BFD_RELOC_ARM_LDC_SB_G0
, R_ARM_LDC_SB_G0
},
2051 {BFD_RELOC_ARM_LDC_SB_G1
, R_ARM_LDC_SB_G1
},
2052 {BFD_RELOC_ARM_LDC_SB_G2
, R_ARM_LDC_SB_G2
},
2053 {BFD_RELOC_ARM_V4BX
, R_ARM_V4BX
},
2054 {BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
, R_ARM_THM_ALU_ABS_G3_NC
},
2055 {BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
, R_ARM_THM_ALU_ABS_G2_NC
},
2056 {BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
, R_ARM_THM_ALU_ABS_G1_NC
},
2057 {BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
, R_ARM_THM_ALU_ABS_G0_NC
}
2060 static reloc_howto_type
*
2061 elf32_arm_reloc_type_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
2062 bfd_reloc_code_real_type code
)
2066 for (i
= 0; i
< ARRAY_SIZE (elf32_arm_reloc_map
); i
++)
2067 if (elf32_arm_reloc_map
[i
].bfd_reloc_val
== code
)
2068 return elf32_arm_howto_from_type (elf32_arm_reloc_map
[i
].elf_reloc_val
);
2073 static reloc_howto_type
*
2074 elf32_arm_reloc_name_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
2079 for (i
= 0; i
< ARRAY_SIZE (elf32_arm_howto_table_1
); i
++)
2080 if (elf32_arm_howto_table_1
[i
].name
!= NULL
2081 && strcasecmp (elf32_arm_howto_table_1
[i
].name
, r_name
) == 0)
2082 return &elf32_arm_howto_table_1
[i
];
2084 for (i
= 0; i
< ARRAY_SIZE (elf32_arm_howto_table_2
); i
++)
2085 if (elf32_arm_howto_table_2
[i
].name
!= NULL
2086 && strcasecmp (elf32_arm_howto_table_2
[i
].name
, r_name
) == 0)
2087 return &elf32_arm_howto_table_2
[i
];
2089 for (i
= 0; i
< ARRAY_SIZE (elf32_arm_howto_table_3
); i
++)
2090 if (elf32_arm_howto_table_3
[i
].name
!= NULL
2091 && strcasecmp (elf32_arm_howto_table_3
[i
].name
, r_name
) == 0)
2092 return &elf32_arm_howto_table_3
[i
];
2097 /* Support for core dump NOTE sections. */
2100 elf32_arm_nabi_grok_prstatus (bfd
*abfd
, Elf_Internal_Note
*note
)
2105 switch (note
->descsz
)
2110 case 148: /* Linux/ARM 32-bit. */
2112 elf_tdata (abfd
)->core
->signal
= bfd_get_16 (abfd
, note
->descdata
+ 12);
2115 elf_tdata (abfd
)->core
->lwpid
= bfd_get_32 (abfd
, note
->descdata
+ 24);
2124 /* Make a ".reg/999" section. */
2125 return _bfd_elfcore_make_pseudosection (abfd
, ".reg",
2126 size
, note
->descpos
+ offset
);
2130 elf32_arm_nabi_grok_psinfo (bfd
*abfd
, Elf_Internal_Note
*note
)
2132 switch (note
->descsz
)
2137 case 124: /* Linux/ARM elf_prpsinfo. */
2138 elf_tdata (abfd
)->core
->pid
2139 = bfd_get_32 (abfd
, note
->descdata
+ 12);
2140 elf_tdata (abfd
)->core
->program
2141 = _bfd_elfcore_strndup (abfd
, note
->descdata
+ 28, 16);
2142 elf_tdata (abfd
)->core
->command
2143 = _bfd_elfcore_strndup (abfd
, note
->descdata
+ 44, 80);
2146 /* Note that for some reason, a spurious space is tacked
2147 onto the end of the args in some (at least one anyway)
2148 implementations, so strip it off if it exists. */
2150 char *command
= elf_tdata (abfd
)->core
->command
;
2151 int n
= strlen (command
);
2153 if (0 < n
&& command
[n
- 1] == ' ')
2154 command
[n
- 1] = '\0';
2161 elf32_arm_nabi_write_core_note (bfd
*abfd
, char *buf
, int *bufsiz
,
2171 char data
[124] ATTRIBUTE_NONSTRING
;
2174 va_start (ap
, note_type
);
2175 memset (data
, 0, sizeof (data
));
2176 strncpy (data
+ 28, va_arg (ap
, const char *), 16);
2178 /* GCC 8.1 warns about 80 equals destination size with
2179 -Wstringop-truncation:
2180 https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85643
2182 #if GCC_VERSION == 8001
2183 DIAGNOSTIC_IGNORE_STRINGOP_TRUNCATION
;
2185 strncpy (data
+ 44, va_arg (ap
, const char *), 80);
2189 return elfcore_write_note (abfd
, buf
, bufsiz
,
2190 "CORE", note_type
, data
, sizeof (data
));
2201 va_start (ap
, note_type
);
2202 memset (data
, 0, sizeof (data
));
2203 pid
= va_arg (ap
, long);
2204 bfd_put_32 (abfd
, pid
, data
+ 24);
2205 cursig
= va_arg (ap
, int);
2206 bfd_put_16 (abfd
, cursig
, data
+ 12);
2207 greg
= va_arg (ap
, const void *);
2208 memcpy (data
+ 72, greg
, 72);
2211 return elfcore_write_note (abfd
, buf
, bufsiz
,
2212 "CORE", note_type
, data
, sizeof (data
));
2217 #define TARGET_LITTLE_SYM arm_elf32_le_vec
2218 #define TARGET_LITTLE_NAME "elf32-littlearm"
2219 #define TARGET_BIG_SYM arm_elf32_be_vec
2220 #define TARGET_BIG_NAME "elf32-bigarm"
2222 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
2223 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
2224 #define elf_backend_write_core_note elf32_arm_nabi_write_core_note
2226 typedef unsigned long int insn32
;
2227 typedef unsigned short int insn16
;
2229 /* In lieu of proper flags, assume all EABIv4 or later objects are
2231 #define INTERWORK_FLAG(abfd) \
2232 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
2233 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
2234 || ((abfd)->flags & BFD_LINKER_CREATED))
2236 /* The linker script knows the section names for placement.
2237 The entry_names are used to do simple name mangling on the stubs.
2238 Given a function name, and its type, the stub can be found. The
2239 name can be changed. The only requirement is the %s be present. */
2240 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
2241 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
2243 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
2244 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
2246 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
2247 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
2249 #define STM32L4XX_ERRATUM_VENEER_SECTION_NAME ".text.stm32l4xx_veneer"
2250 #define STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "__stm32l4xx_veneer_%x"
2252 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
2253 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
2255 #define STUB_ENTRY_NAME "__%s_veneer"
2257 #define CMSE_PREFIX "__acle_se_"
2259 /* The name of the dynamic interpreter. This is put in the .interp
2261 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
2263 /* FDPIC default stack size. */
2264 #define DEFAULT_STACK_SIZE 0x8000
2266 static const unsigned long tls_trampoline
[] =
2268 0xe08e0000, /* add r0, lr, r0 */
2269 0xe5901004, /* ldr r1, [r0,#4] */
2270 0xe12fff11, /* bx r1 */
2273 static const unsigned long dl_tlsdesc_lazy_trampoline
[] =
2275 0xe52d2004, /* push {r2} */
2276 0xe59f200c, /* ldr r2, [pc, #3f - . - 8] */
2277 0xe59f100c, /* ldr r1, [pc, #4f - . - 8] */
2278 0xe79f2002, /* 1: ldr r2, [pc, r2] */
2279 0xe081100f, /* 2: add r1, pc */
2280 0xe12fff12, /* bx r2 */
2281 0x00000014, /* 3: .word _GLOBAL_OFFSET_TABLE_ - 1b - 8
2282 + dl_tlsdesc_lazy_resolver(GOT) */
2283 0x00000018, /* 4: .word _GLOBAL_OFFSET_TABLE_ - 2b - 8 */
2286 /* ARM FDPIC PLT entry. */
2287 /* The last 5 words contain PLT lazy fragment code and data. */
2288 static const bfd_vma elf32_arm_fdpic_plt_entry
[] =
2290 0xe59fc008, /* ldr r12, .L1 */
2291 0xe08cc009, /* add r12, r12, r9 */
2292 0xe59c9004, /* ldr r9, [r12, #4] */
2293 0xe59cf000, /* ldr pc, [r12] */
2294 0x00000000, /* L1. .word foo(GOTOFFFUNCDESC) */
2295 0x00000000, /* L1. .word foo(funcdesc_value_reloc_offset) */
2296 0xe51fc00c, /* ldr r12, [pc, #-12] */
2297 0xe92d1000, /* push {r12} */
2298 0xe599c004, /* ldr r12, [r9, #4] */
2299 0xe599f000, /* ldr pc, [r9] */
2302 /* Thumb FDPIC PLT entry. */
2303 /* The last 5 words contain PLT lazy fragment code and data. */
2304 static const bfd_vma elf32_arm_fdpic_thumb_plt_entry
[] =
2306 0xc00cf8df, /* ldr.w r12, .L1 */
2307 0x0c09eb0c, /* add.w r12, r12, r9 */
2308 0x9004f8dc, /* ldr.w r9, [r12, #4] */
2309 0xf000f8dc, /* ldr.w pc, [r12] */
2310 0x00000000, /* .L1 .word foo(GOTOFFFUNCDESC) */
2311 0x00000000, /* .L2 .word foo(funcdesc_value_reloc_offset) */
2312 0xc008f85f, /* ldr.w r12, .L2 */
2313 0xcd04f84d, /* push {r12} */
2314 0xc004f8d9, /* ldr.w r12, [r9, #4] */
2315 0xf000f8d9, /* ldr.w pc, [r9] */
2318 #ifdef FOUR_WORD_PLT
2320 /* The first entry in a procedure linkage table looks like
2321 this. It is set up so that any shared library function that is
2322 called before the relocation has been set up calls the dynamic
2324 static const bfd_vma elf32_arm_plt0_entry
[] =
2326 0xe52de004, /* str lr, [sp, #-4]! */
2327 0xe59fe010, /* ldr lr, [pc, #16] */
2328 0xe08fe00e, /* add lr, pc, lr */
2329 0xe5bef008, /* ldr pc, [lr, #8]! */
2332 /* Subsequent entries in a procedure linkage table look like
2334 static const bfd_vma elf32_arm_plt_entry
[] =
2336 0xe28fc600, /* add ip, pc, #NN */
2337 0xe28cca00, /* add ip, ip, #NN */
2338 0xe5bcf000, /* ldr pc, [ip, #NN]! */
2339 0x00000000, /* unused */
2342 #else /* not FOUR_WORD_PLT */
2344 /* The first entry in a procedure linkage table looks like
2345 this. It is set up so that any shared library function that is
2346 called before the relocation has been set up calls the dynamic
2348 static const bfd_vma elf32_arm_plt0_entry
[] =
2350 0xe52de004, /* str lr, [sp, #-4]! */
2351 0xe59fe004, /* ldr lr, [pc, #4] */
2352 0xe08fe00e, /* add lr, pc, lr */
2353 0xe5bef008, /* ldr pc, [lr, #8]! */
2354 0x00000000, /* &GOT[0] - . */
2357 /* By default subsequent entries in a procedure linkage table look like
2358 this. Offsets that don't fit into 28 bits will cause link error. */
2359 static const bfd_vma elf32_arm_plt_entry_short
[] =
2361 0xe28fc600, /* add ip, pc, #0xNN00000 */
2362 0xe28cca00, /* add ip, ip, #0xNN000 */
2363 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2366 /* When explicitly asked, we'll use this "long" entry format
2367 which can cope with arbitrary displacements. */
2368 static const bfd_vma elf32_arm_plt_entry_long
[] =
2370 0xe28fc200, /* add ip, pc, #0xN0000000 */
2371 0xe28cc600, /* add ip, ip, #0xNN00000 */
2372 0xe28cca00, /* add ip, ip, #0xNN000 */
2373 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2376 static bfd_boolean elf32_arm_use_long_plt_entry
= FALSE
;
2378 #endif /* not FOUR_WORD_PLT */
2380 /* The first entry in a procedure linkage table looks like this.
2381 It is set up so that any shared library function that is called before the
2382 relocation has been set up calls the dynamic linker first. */
2383 static const bfd_vma elf32_thumb2_plt0_entry
[] =
2385 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2386 an instruction maybe encoded to one or two array elements. */
2387 0xf8dfb500, /* push {lr} */
2388 0x44fee008, /* ldr.w lr, [pc, #8] */
2390 0xff08f85e, /* ldr.w pc, [lr, #8]! */
2391 0x00000000, /* &GOT[0] - . */
2394 /* Subsequent entries in a procedure linkage table for thumb only target
2396 static const bfd_vma elf32_thumb2_plt_entry
[] =
2398 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2399 an instruction maybe encoded to one or two array elements. */
2400 0x0c00f240, /* movw ip, #0xNNNN */
2401 0x0c00f2c0, /* movt ip, #0xNNNN */
2402 0xf8dc44fc, /* add ip, pc */
2403 0xbf00f000 /* ldr.w pc, [ip] */
2407 /* The format of the first entry in the procedure linkage table
2408 for a VxWorks executable. */
2409 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry
[] =
2411 0xe52dc008, /* str ip,[sp,#-8]! */
2412 0xe59fc000, /* ldr ip,[pc] */
2413 0xe59cf008, /* ldr pc,[ip,#8] */
2414 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
2417 /* The format of subsequent entries in a VxWorks executable. */
2418 static const bfd_vma elf32_arm_vxworks_exec_plt_entry
[] =
2420 0xe59fc000, /* ldr ip,[pc] */
2421 0xe59cf000, /* ldr pc,[ip] */
2422 0x00000000, /* .long @got */
2423 0xe59fc000, /* ldr ip,[pc] */
2424 0xea000000, /* b _PLT */
2425 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2428 /* The format of entries in a VxWorks shared library. */
2429 static const bfd_vma elf32_arm_vxworks_shared_plt_entry
[] =
2431 0xe59fc000, /* ldr ip,[pc] */
2432 0xe79cf009, /* ldr pc,[ip,r9] */
2433 0x00000000, /* .long @got */
2434 0xe59fc000, /* ldr ip,[pc] */
2435 0xe599f008, /* ldr pc,[r9,#8] */
2436 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2439 /* An initial stub used if the PLT entry is referenced from Thumb code. */
2440 #define PLT_THUMB_STUB_SIZE 4
2441 static const bfd_vma elf32_arm_plt_thumb_stub
[] =
2447 /* The entries in a PLT when using a DLL-based target with multiple
2449 static const bfd_vma elf32_arm_symbian_plt_entry
[] =
2451 0xe51ff004, /* ldr pc, [pc, #-4] */
2452 0x00000000, /* dcd R_ARM_GLOB_DAT(X) */
2455 /* The first entry in a procedure linkage table looks like
2456 this. It is set up so that any shared library function that is
2457 called before the relocation has been set up calls the dynamic
2459 static const bfd_vma elf32_arm_nacl_plt0_entry
[] =
2462 0xe300c000, /* movw ip, #:lower16:&GOT[2]-.+8 */
2463 0xe340c000, /* movt ip, #:upper16:&GOT[2]-.+8 */
2464 0xe08cc00f, /* add ip, ip, pc */
2465 0xe52dc008, /* str ip, [sp, #-8]! */
2466 /* Second bundle: */
2467 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2468 0xe59cc000, /* ldr ip, [ip] */
2469 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2470 0xe12fff1c, /* bx ip */
2472 0xe320f000, /* nop */
2473 0xe320f000, /* nop */
2474 0xe320f000, /* nop */
2476 0xe50dc004, /* str ip, [sp, #-4] */
2477 /* Fourth bundle: */
2478 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2479 0xe59cc000, /* ldr ip, [ip] */
2480 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2481 0xe12fff1c, /* bx ip */
2483 #define ARM_NACL_PLT_TAIL_OFFSET (11 * 4)
2485 /* Subsequent entries in a procedure linkage table look like this. */
2486 static const bfd_vma elf32_arm_nacl_plt_entry
[] =
2488 0xe300c000, /* movw ip, #:lower16:&GOT[n]-.+8 */
2489 0xe340c000, /* movt ip, #:upper16:&GOT[n]-.+8 */
2490 0xe08cc00f, /* add ip, ip, pc */
2491 0xea000000, /* b .Lplt_tail */
2494 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2495 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2496 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2497 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2498 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2499 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2500 #define THM2_MAX_FWD_COND_BRANCH_OFFSET (((1 << 20) -2) + 4)
2501 #define THM2_MAX_BWD_COND_BRANCH_OFFSET (-(1 << 20) + 4)
2511 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2512 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2513 is inserted in arm_build_one_stub(). */
2514 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2515 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2516 #define THUMB32_MOVT(X) {(X), THUMB32_TYPE, R_ARM_THM_MOVT_ABS, 0}
2517 #define THUMB32_MOVW(X) {(X), THUMB32_TYPE, R_ARM_THM_MOVW_ABS_NC, 0}
2518 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2519 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2520 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2521 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2526 enum stub_insn_type type
;
2527 unsigned int r_type
;
2531 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2532 to reach the stub if necessary. */
2533 static const insn_sequence elf32_arm_stub_long_branch_any_any
[] =
2535 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2536 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(X) */
2539 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2541 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb
[] =
2543 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2544 ARM_INSN (0xe12fff1c), /* bx ip */
2545 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(X) */
2548 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2549 static const insn_sequence elf32_arm_stub_long_branch_thumb_only
[] =
2551 THUMB16_INSN (0xb401), /* push {r0} */
2552 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2553 THUMB16_INSN (0x4684), /* mov ip, r0 */
2554 THUMB16_INSN (0xbc01), /* pop {r0} */
2555 THUMB16_INSN (0x4760), /* bx ip */
2556 THUMB16_INSN (0xbf00), /* nop */
2557 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(X) */
2560 /* Thumb -> Thumb long branch stub in thumb2 encoding. Used on armv7. */
2561 static const insn_sequence elf32_arm_stub_long_branch_thumb2_only
[] =
2563 THUMB32_INSN (0xf85ff000), /* ldr.w pc, [pc, #-0] */
2564 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(x) */
2567 /* Thumb -> Thumb long branch stub. Used for PureCode sections on Thumb2
2568 M-profile architectures. */
2569 static const insn_sequence elf32_arm_stub_long_branch_thumb2_only_pure
[] =
2571 THUMB32_MOVW (0xf2400c00), /* mov.w ip, R_ARM_MOVW_ABS_NC */
2572 THUMB32_MOVT (0xf2c00c00), /* movt ip, R_ARM_MOVT_ABS << 16 */
2573 THUMB16_INSN (0x4760), /* bx ip */
2576 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2578 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb
[] =
2580 THUMB16_INSN (0x4778), /* bx pc */
2581 THUMB16_INSN (0x46c0), /* nop */
2582 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2583 ARM_INSN (0xe12fff1c), /* bx ip */
2584 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(X) */
2587 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2589 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm
[] =
2591 THUMB16_INSN (0x4778), /* bx pc */
2592 THUMB16_INSN (0x46c0), /* nop */
2593 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2594 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(X) */
2597 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2598 one, when the destination is close enough. */
2599 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm
[] =
2601 THUMB16_INSN (0x4778), /* bx pc */
2602 THUMB16_INSN (0x46c0), /* nop */
2603 ARM_REL_INSN (0xea000000, -8), /* b (X-8) */
2606 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2607 blx to reach the stub if necessary. */
2608 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic
[] =
2610 ARM_INSN (0xe59fc000), /* ldr ip, [pc] */
2611 ARM_INSN (0xe08ff00c), /* add pc, pc, ip */
2612 DATA_WORD (0, R_ARM_REL32
, -4), /* dcd R_ARM_REL32(X-4) */
2615 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2616 blx to reach the stub if necessary. We can not add into pc;
2617 it is not guaranteed to mode switch (different in ARMv6 and
2619 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic
[] =
2621 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2622 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2623 ARM_INSN (0xe12fff1c), /* bx ip */
2624 DATA_WORD (0, R_ARM_REL32
, 0), /* dcd R_ARM_REL32(X) */
2627 /* V4T ARM -> ARM long branch stub, PIC. */
2628 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic
[] =
2630 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2631 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2632 ARM_INSN (0xe12fff1c), /* bx ip */
2633 DATA_WORD (0, R_ARM_REL32
, 0), /* dcd R_ARM_REL32(X) */
2636 /* V4T Thumb -> ARM long branch stub, PIC. */
2637 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic
[] =
2639 THUMB16_INSN (0x4778), /* bx pc */
2640 THUMB16_INSN (0x46c0), /* nop */
2641 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2642 ARM_INSN (0xe08cf00f), /* add pc, ip, pc */
2643 DATA_WORD (0, R_ARM_REL32
, -4), /* dcd R_ARM_REL32(X) */
2646 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2648 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic
[] =
2650 THUMB16_INSN (0xb401), /* push {r0} */
2651 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2652 THUMB16_INSN (0x46fc), /* mov ip, pc */
2653 THUMB16_INSN (0x4484), /* add ip, r0 */
2654 THUMB16_INSN (0xbc01), /* pop {r0} */
2655 THUMB16_INSN (0x4760), /* bx ip */
2656 DATA_WORD (0, R_ARM_REL32
, 4), /* dcd R_ARM_REL32(X) */
2659 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2661 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic
[] =
2663 THUMB16_INSN (0x4778), /* bx pc */
2664 THUMB16_INSN (0x46c0), /* nop */
2665 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2666 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2667 ARM_INSN (0xe12fff1c), /* bx ip */
2668 DATA_WORD (0, R_ARM_REL32
, 0), /* dcd R_ARM_REL32(X) */
2671 /* Thumb2/ARM -> TLS trampoline. Lowest common denominator, which is a
2672 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2673 static const insn_sequence elf32_arm_stub_long_branch_any_tls_pic
[] =
2675 ARM_INSN (0xe59f1000), /* ldr r1, [pc] */
2676 ARM_INSN (0xe08ff001), /* add pc, pc, r1 */
2677 DATA_WORD (0, R_ARM_REL32
, -4), /* dcd R_ARM_REL32(X-4) */
2680 /* V4T Thumb -> TLS trampoline. lowest common denominator, which is a
2681 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2682 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_tls_pic
[] =
2684 THUMB16_INSN (0x4778), /* bx pc */
2685 THUMB16_INSN (0x46c0), /* nop */
2686 ARM_INSN (0xe59f1000), /* ldr r1, [pc, #0] */
2687 ARM_INSN (0xe081f00f), /* add pc, r1, pc */
2688 DATA_WORD (0, R_ARM_REL32
, -4), /* dcd R_ARM_REL32(X) */
2691 /* NaCl ARM -> ARM long branch stub. */
2692 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl
[] =
2694 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2695 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2696 ARM_INSN (0xe12fff1c), /* bx ip */
2697 ARM_INSN (0xe320f000), /* nop */
2698 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2699 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(X) */
2700 DATA_WORD (0, R_ARM_NONE
, 0), /* .word 0 */
2701 DATA_WORD (0, R_ARM_NONE
, 0), /* .word 0 */
2704 /* NaCl ARM -> ARM long branch stub, PIC. */
2705 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl_pic
[] =
2707 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2708 ARM_INSN (0xe08cc00f), /* add ip, ip, pc */
2709 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2710 ARM_INSN (0xe12fff1c), /* bx ip */
2711 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2712 DATA_WORD (0, R_ARM_REL32
, 8), /* dcd R_ARM_REL32(X+8) */
2713 DATA_WORD (0, R_ARM_NONE
, 0), /* .word 0 */
2714 DATA_WORD (0, R_ARM_NONE
, 0), /* .word 0 */
2717 /* Stub used for transition to secure state (aka SG veneer). */
2718 static const insn_sequence elf32_arm_stub_cmse_branch_thumb_only
[] =
2720 THUMB32_INSN (0xe97fe97f), /* sg. */
2721 THUMB32_B_INSN (0xf000b800, -4), /* b.w original_branch_dest. */
2725 /* Cortex-A8 erratum-workaround stubs. */
2727 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2728 can't use a conditional branch to reach this stub). */
2730 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond
[] =
2732 THUMB16_BCOND_INSN (0xd001), /* b<cond>.n true. */
2733 THUMB32_B_INSN (0xf000b800, -4), /* b.w insn_after_original_branch. */
2734 THUMB32_B_INSN (0xf000b800, -4) /* true: b.w original_branch_dest. */
2737 /* Stub used for b.w and bl.w instructions. */
2739 static const insn_sequence elf32_arm_stub_a8_veneer_b
[] =
2741 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2744 static const insn_sequence elf32_arm_stub_a8_veneer_bl
[] =
2746 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2749 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2750 instruction (which switches to ARM mode) to point to this stub. Jump to the
2751 real destination using an ARM-mode branch. */
2753 static const insn_sequence elf32_arm_stub_a8_veneer_blx
[] =
2755 ARM_REL_INSN (0xea000000, -8) /* b original_branch_dest. */
2758 /* For each section group there can be a specially created linker section
2759 to hold the stubs for that group. The name of the stub section is based
2760 upon the name of another section within that group with the suffix below
2763 PR 13049: STUB_SUFFIX used to be ".stub", but this allowed the user to
2764 create what appeared to be a linker stub section when it actually
2765 contained user code/data. For example, consider this fragment:
2767 const char * stubborn_problems[] = { "np" };
2769 If this is compiled with "-fPIC -fdata-sections" then gcc produces a
2772 .data.rel.local.stubborn_problems
2774 This then causes problems in arm32_arm_build_stubs() as it triggers:
2776 // Ignore non-stub sections.
2777 if (!strstr (stub_sec->name, STUB_SUFFIX))
2780 And so the section would be ignored instead of being processed. Hence
2781 the change in definition of STUB_SUFFIX to a name that cannot be a valid
2783 #define STUB_SUFFIX ".__stub"
2785 /* One entry per long/short branch stub defined above. */
2787 DEF_STUB(long_branch_any_any) \
2788 DEF_STUB(long_branch_v4t_arm_thumb) \
2789 DEF_STUB(long_branch_thumb_only) \
2790 DEF_STUB(long_branch_v4t_thumb_thumb) \
2791 DEF_STUB(long_branch_v4t_thumb_arm) \
2792 DEF_STUB(short_branch_v4t_thumb_arm) \
2793 DEF_STUB(long_branch_any_arm_pic) \
2794 DEF_STUB(long_branch_any_thumb_pic) \
2795 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2796 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2797 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2798 DEF_STUB(long_branch_thumb_only_pic) \
2799 DEF_STUB(long_branch_any_tls_pic) \
2800 DEF_STUB(long_branch_v4t_thumb_tls_pic) \
2801 DEF_STUB(long_branch_arm_nacl) \
2802 DEF_STUB(long_branch_arm_nacl_pic) \
2803 DEF_STUB(cmse_branch_thumb_only) \
2804 DEF_STUB(a8_veneer_b_cond) \
2805 DEF_STUB(a8_veneer_b) \
2806 DEF_STUB(a8_veneer_bl) \
2807 DEF_STUB(a8_veneer_blx) \
2808 DEF_STUB(long_branch_thumb2_only) \
2809 DEF_STUB(long_branch_thumb2_only_pure)
2811 #define DEF_STUB(x) arm_stub_##x,
2812 enum elf32_arm_stub_type
2820 /* Note the first a8_veneer type. */
2821 const unsigned arm_stub_a8_veneer_lwm
= arm_stub_a8_veneer_b_cond
;
2825 const insn_sequence
* template_sequence
;
2829 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2830 static const stub_def stub_definitions
[] =
2836 struct elf32_arm_stub_hash_entry
2838 /* Base hash table entry structure. */
2839 struct bfd_hash_entry root
;
2841 /* The stub section. */
2844 /* Offset within stub_sec of the beginning of this stub. */
2845 bfd_vma stub_offset
;
2847 /* Given the symbol's value and its section we can determine its final
2848 value when building the stubs (so the stub knows where to jump). */
2849 bfd_vma target_value
;
2850 asection
*target_section
;
2852 /* Same as above but for the source of the branch to the stub. Used for
2853 Cortex-A8 erratum workaround to patch it to branch to the stub. As
2854 such, source section does not need to be recorded since Cortex-A8 erratum
2855 workaround stubs are only generated when both source and target are in the
2857 bfd_vma source_value
;
2859 /* The instruction which caused this stub to be generated (only valid for
2860 Cortex-A8 erratum workaround stubs at present). */
2861 unsigned long orig_insn
;
2863 /* The stub type. */
2864 enum elf32_arm_stub_type stub_type
;
2865 /* Its encoding size in bytes. */
2868 const insn_sequence
*stub_template
;
2869 /* The size of the template (number of entries). */
2870 int stub_template_size
;
2872 /* The symbol table entry, if any, that this was derived from. */
2873 struct elf32_arm_link_hash_entry
*h
;
2875 /* Type of branch. */
2876 enum arm_st_branch_type branch_type
;
2878 /* Where this stub is being called from, or, in the case of combined
2879 stub sections, the first input section in the group. */
2882 /* The name for the local symbol at the start of this stub. The
2883 stub name in the hash table has to be unique; this does not, so
2884 it can be friendlier. */
2888 /* Used to build a map of a section. This is required for mixed-endian
2891 typedef struct elf32_elf_section_map
2896 elf32_arm_section_map
;
2898 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2902 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER
,
2903 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER
,
2904 VFP11_ERRATUM_ARM_VENEER
,
2905 VFP11_ERRATUM_THUMB_VENEER
2907 elf32_vfp11_erratum_type
;
2909 typedef struct elf32_vfp11_erratum_list
2911 struct elf32_vfp11_erratum_list
*next
;
2917 struct elf32_vfp11_erratum_list
*veneer
;
2918 unsigned int vfp_insn
;
2922 struct elf32_vfp11_erratum_list
*branch
;
2926 elf32_vfp11_erratum_type type
;
2928 elf32_vfp11_erratum_list
;
2930 /* Information about a STM32L4XX erratum veneer, or a branch to such a
2934 STM32L4XX_ERRATUM_BRANCH_TO_VENEER
,
2935 STM32L4XX_ERRATUM_VENEER
2937 elf32_stm32l4xx_erratum_type
;
2939 typedef struct elf32_stm32l4xx_erratum_list
2941 struct elf32_stm32l4xx_erratum_list
*next
;
2947 struct elf32_stm32l4xx_erratum_list
*veneer
;
2952 struct elf32_stm32l4xx_erratum_list
*branch
;
2956 elf32_stm32l4xx_erratum_type type
;
2958 elf32_stm32l4xx_erratum_list
;
2963 INSERT_EXIDX_CANTUNWIND_AT_END
2965 arm_unwind_edit_type
;
2967 /* A (sorted) list of edits to apply to an unwind table. */
2968 typedef struct arm_unwind_table_edit
2970 arm_unwind_edit_type type
;
2971 /* Note: we sometimes want to insert an unwind entry corresponding to a
2972 section different from the one we're currently writing out, so record the
2973 (text) section this edit relates to here. */
2974 asection
*linked_section
;
2976 struct arm_unwind_table_edit
*next
;
2978 arm_unwind_table_edit
;
2980 typedef struct _arm_elf_section_data
2982 /* Information about mapping symbols. */
2983 struct bfd_elf_section_data elf
;
2984 unsigned int mapcount
;
2985 unsigned int mapsize
;
2986 elf32_arm_section_map
*map
;
2987 /* Information about CPU errata. */
2988 unsigned int erratumcount
;
2989 elf32_vfp11_erratum_list
*erratumlist
;
2990 unsigned int stm32l4xx_erratumcount
;
2991 elf32_stm32l4xx_erratum_list
*stm32l4xx_erratumlist
;
2992 unsigned int additional_reloc_count
;
2993 /* Information about unwind tables. */
2996 /* Unwind info attached to a text section. */
2999 asection
*arm_exidx_sec
;
3002 /* Unwind info attached to an .ARM.exidx section. */
3005 arm_unwind_table_edit
*unwind_edit_list
;
3006 arm_unwind_table_edit
*unwind_edit_tail
;
3010 _arm_elf_section_data
;
3012 #define elf32_arm_section_data(sec) \
3013 ((_arm_elf_section_data *) elf_section_data (sec))
3015 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
3016 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
3017 so may be created multiple times: we use an array of these entries whilst
3018 relaxing which we can refresh easily, then create stubs for each potentially
3019 erratum-triggering instruction once we've settled on a solution. */
3021 struct a8_erratum_fix
3026 bfd_vma target_offset
;
3027 unsigned long orig_insn
;
3029 enum elf32_arm_stub_type stub_type
;
3030 enum arm_st_branch_type branch_type
;
3033 /* A table of relocs applied to branches which might trigger Cortex-A8
3036 struct a8_erratum_reloc
3039 bfd_vma destination
;
3040 struct elf32_arm_link_hash_entry
*hash
;
3041 const char *sym_name
;
3042 unsigned int r_type
;
3043 enum arm_st_branch_type branch_type
;
3044 bfd_boolean non_a8_stub
;
3047 /* The size of the thread control block. */
3050 /* ARM-specific information about a PLT entry, over and above the usual
3054 /* We reference count Thumb references to a PLT entry separately,
3055 so that we can emit the Thumb trampoline only if needed. */
3056 bfd_signed_vma thumb_refcount
;
3058 /* Some references from Thumb code may be eliminated by BL->BLX
3059 conversion, so record them separately. */
3060 bfd_signed_vma maybe_thumb_refcount
;
3062 /* How many of the recorded PLT accesses were from non-call relocations.
3063 This information is useful when deciding whether anything takes the
3064 address of an STT_GNU_IFUNC PLT. A value of 0 means that all
3065 non-call references to the function should resolve directly to the
3066 real runtime target. */
3067 unsigned int noncall_refcount
;
3069 /* Since PLT entries have variable size if the Thumb prologue is
3070 used, we need to record the index into .got.plt instead of
3071 recomputing it from the PLT offset. */
3072 bfd_signed_vma got_offset
;
3075 /* Information about an .iplt entry for a local STT_GNU_IFUNC symbol. */
3076 struct arm_local_iplt_info
3078 /* The information that is usually found in the generic ELF part of
3079 the hash table entry. */
3080 union gotplt_union root
;
3082 /* The information that is usually found in the ARM-specific part of
3083 the hash table entry. */
3084 struct arm_plt_info arm
;
3086 /* A list of all potential dynamic relocations against this symbol. */
3087 struct elf_dyn_relocs
*dyn_relocs
;
3090 /* Structure to handle FDPIC support for local functions. */
3091 struct fdpic_local
{
3092 unsigned int funcdesc_cnt
;
3093 unsigned int gotofffuncdesc_cnt
;
3094 int funcdesc_offset
;
3097 struct elf_arm_obj_tdata
3099 struct elf_obj_tdata root
;
3101 /* tls_type for each local got entry. */
3102 char *local_got_tls_type
;
3104 /* GOTPLT entries for TLS descriptors. */
3105 bfd_vma
*local_tlsdesc_gotent
;
3107 /* Information for local symbols that need entries in .iplt. */
3108 struct arm_local_iplt_info
**local_iplt
;
3110 /* Zero to warn when linking objects with incompatible enum sizes. */
3111 int no_enum_size_warning
;
3113 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
3114 int no_wchar_size_warning
;
3116 /* Maintains FDPIC counters and funcdesc info. */
3117 struct fdpic_local
*local_fdpic_cnts
;
3120 #define elf_arm_tdata(bfd) \
3121 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
3123 #define elf32_arm_local_got_tls_type(bfd) \
3124 (elf_arm_tdata (bfd)->local_got_tls_type)
3126 #define elf32_arm_local_tlsdesc_gotent(bfd) \
3127 (elf_arm_tdata (bfd)->local_tlsdesc_gotent)
3129 #define elf32_arm_local_iplt(bfd) \
3130 (elf_arm_tdata (bfd)->local_iplt)
3132 #define elf32_arm_local_fdpic_cnts(bfd) \
3133 (elf_arm_tdata (bfd)->local_fdpic_cnts)
3135 #define is_arm_elf(bfd) \
3136 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
3137 && elf_tdata (bfd) != NULL \
3138 && elf_object_id (bfd) == ARM_ELF_DATA)
3141 elf32_arm_mkobject (bfd
*abfd
)
3143 return bfd_elf_allocate_object (abfd
, sizeof (struct elf_arm_obj_tdata
),
3147 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
3149 /* Structure to handle FDPIC support for extern functions. */
3150 struct fdpic_global
{
3151 unsigned int gotofffuncdesc_cnt
;
3152 unsigned int gotfuncdesc_cnt
;
3153 unsigned int funcdesc_cnt
;
3154 int funcdesc_offset
;
3155 int gotfuncdesc_offset
;
3158 /* Arm ELF linker hash entry. */
3159 struct elf32_arm_link_hash_entry
3161 struct elf_link_hash_entry root
;
3163 /* Track dynamic relocs copied for this symbol. */
3164 struct elf_dyn_relocs
*dyn_relocs
;
3166 /* ARM-specific PLT information. */
3167 struct arm_plt_info plt
;
3169 #define GOT_UNKNOWN 0
3170 #define GOT_NORMAL 1
3171 #define GOT_TLS_GD 2
3172 #define GOT_TLS_IE 4
3173 #define GOT_TLS_GDESC 8
3174 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLS_GDESC))
3175 unsigned int tls_type
: 8;
3177 /* True if the symbol's PLT entry is in .iplt rather than .plt. */
3178 unsigned int is_iplt
: 1;
3180 unsigned int unused
: 23;
3182 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
3183 starting at the end of the jump table. */
3184 bfd_vma tlsdesc_got
;
3186 /* The symbol marking the real symbol location for exported thumb
3187 symbols with Arm stubs. */
3188 struct elf_link_hash_entry
*export_glue
;
3190 /* A pointer to the most recently used stub hash entry against this
3192 struct elf32_arm_stub_hash_entry
*stub_cache
;
3194 /* Counter for FDPIC relocations against this symbol. */
3195 struct fdpic_global fdpic_cnts
;
3198 /* Traverse an arm ELF linker hash table. */
3199 #define elf32_arm_link_hash_traverse(table, func, info) \
3200 (elf_link_hash_traverse \
3202 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
3205 /* Get the ARM elf linker hash table from a link_info structure. */
3206 #define elf32_arm_hash_table(info) \
3207 (elf_hash_table_id ((struct elf_link_hash_table *) ((info)->hash)) \
3208 == ARM_ELF_DATA ? ((struct elf32_arm_link_hash_table *) ((info)->hash)) : NULL)
3210 #define arm_stub_hash_lookup(table, string, create, copy) \
3211 ((struct elf32_arm_stub_hash_entry *) \
3212 bfd_hash_lookup ((table), (string), (create), (copy)))
3214 /* Array to keep track of which stub sections have been created, and
3215 information on stub grouping. */
3218 /* This is the section to which stubs in the group will be
3221 /* The stub section. */
3225 #define elf32_arm_compute_jump_table_size(htab) \
3226 ((htab)->next_tls_desc_index * 4)
3228 /* ARM ELF linker hash table. */
3229 struct elf32_arm_link_hash_table
3231 /* The main hash table. */
3232 struct elf_link_hash_table root
;
3234 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
3235 bfd_size_type thumb_glue_size
;
3237 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
3238 bfd_size_type arm_glue_size
;
3240 /* The size in bytes of section containing the ARMv4 BX veneers. */
3241 bfd_size_type bx_glue_size
;
3243 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
3244 veneer has been populated. */
3245 bfd_vma bx_glue_offset
[15];
3247 /* The size in bytes of the section containing glue for VFP11 erratum
3249 bfd_size_type vfp11_erratum_glue_size
;
3251 /* The size in bytes of the section containing glue for STM32L4XX erratum
3253 bfd_size_type stm32l4xx_erratum_glue_size
;
3255 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
3256 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
3257 elf32_arm_write_section(). */
3258 struct a8_erratum_fix
*a8_erratum_fixes
;
3259 unsigned int num_a8_erratum_fixes
;
3261 /* An arbitrary input BFD chosen to hold the glue sections. */
3262 bfd
* bfd_of_glue_owner
;
3264 /* Nonzero to output a BE8 image. */
3267 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
3268 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
3271 /* The relocation to use for R_ARM_TARGET2 relocations. */
3274 /* 0 = Ignore R_ARM_V4BX.
3275 1 = Convert BX to MOV PC.
3276 2 = Generate v4 interworing stubs. */
3279 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
3282 /* Whether we should fix the ARM1176 BLX immediate issue. */
3285 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
3288 /* What sort of code sequences we should look for which may trigger the
3289 VFP11 denorm erratum. */
3290 bfd_arm_vfp11_fix vfp11_fix
;
3292 /* Global counter for the number of fixes we have emitted. */
3293 int num_vfp11_fixes
;
3295 /* What sort of code sequences we should look for which may trigger the
3296 STM32L4XX erratum. */
3297 bfd_arm_stm32l4xx_fix stm32l4xx_fix
;
3299 /* Global counter for the number of fixes we have emitted. */
3300 int num_stm32l4xx_fixes
;
3302 /* Nonzero to force PIC branch veneers. */
3305 /* The number of bytes in the initial entry in the PLT. */
3306 bfd_size_type plt_header_size
;
3308 /* The number of bytes in the subsequent PLT etries. */
3309 bfd_size_type plt_entry_size
;
3311 /* True if the target system is VxWorks. */
3314 /* True if the target system is Symbian OS. */
3317 /* True if the target system is Native Client. */
3320 /* True if the target uses REL relocations. */
3321 bfd_boolean use_rel
;
3323 /* Nonzero if import library must be a secure gateway import library
3324 as per ARMv8-M Security Extensions. */
3327 /* The import library whose symbols' address must remain stable in
3328 the import library generated. */
3331 /* The index of the next unused R_ARM_TLS_DESC slot in .rel.plt. */
3332 bfd_vma next_tls_desc_index
;
3334 /* How many R_ARM_TLS_DESC relocations were generated so far. */
3335 bfd_vma num_tls_desc
;
3337 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
3340 /* The offset into splt of the PLT entry for the TLS descriptor
3341 resolver. Special values are 0, if not necessary (or not found
3342 to be necessary yet), and -1 if needed but not determined
3344 bfd_vma dt_tlsdesc_plt
;
3346 /* The offset into sgot of the GOT entry used by the PLT entry
3348 bfd_vma dt_tlsdesc_got
;
3350 /* Offset in .plt section of tls_arm_trampoline. */
3351 bfd_vma tls_trampoline
;
3353 /* Data for R_ARM_TLS_LDM32/R_ARM_TLS_LDM32_FDPIC relocations. */
3356 bfd_signed_vma refcount
;
3360 /* Small local sym cache. */
3361 struct sym_cache sym_cache
;
3363 /* For convenience in allocate_dynrelocs. */
3366 /* The amount of space used by the reserved portion of the sgotplt
3367 section, plus whatever space is used by the jump slots. */
3368 bfd_vma sgotplt_jump_table_size
;
3370 /* The stub hash table. */
3371 struct bfd_hash_table stub_hash_table
;
3373 /* Linker stub bfd. */
3376 /* Linker call-backs. */
3377 asection
* (*add_stub_section
) (const char *, asection
*, asection
*,
3379 void (*layout_sections_again
) (void);
3381 /* Array to keep track of which stub sections have been created, and
3382 information on stub grouping. */
3383 struct map_stub
*stub_group
;
3385 /* Input stub section holding secure gateway veneers. */
3386 asection
*cmse_stub_sec
;
3388 /* Offset in cmse_stub_sec where new SG veneers (not in input import library)
3389 start to be allocated. */
3390 bfd_vma new_cmse_stub_offset
;
3392 /* Number of elements in stub_group. */
3393 unsigned int top_id
;
3395 /* Assorted information used by elf32_arm_size_stubs. */
3396 unsigned int bfd_count
;
3397 unsigned int top_index
;
3398 asection
**input_list
;
3400 /* True if the target system uses FDPIC. */
3403 /* Fixup section. Used for FDPIC. */
3407 /* Add an FDPIC read-only fixup. */
3409 arm_elf_add_rofixup (bfd
*output_bfd
, asection
*srofixup
, bfd_vma offset
)
3411 bfd_vma fixup_offset
;
3413 fixup_offset
= srofixup
->reloc_count
++ * 4;
3414 BFD_ASSERT (fixup_offset
< srofixup
->size
);
3415 bfd_put_32 (output_bfd
, offset
, srofixup
->contents
+ fixup_offset
);
3419 ctz (unsigned int mask
)
3421 #if GCC_VERSION >= 3004
3422 return __builtin_ctz (mask
);
3426 for (i
= 0; i
< 8 * sizeof (mask
); i
++)
3437 elf32_arm_popcount (unsigned int mask
)
3439 #if GCC_VERSION >= 3004
3440 return __builtin_popcount (mask
);
3445 for (i
= 0; i
< 8 * sizeof (mask
); i
++)
3455 static void elf32_arm_add_dynreloc (bfd
*output_bfd
, struct bfd_link_info
*info
,
3456 asection
*sreloc
, Elf_Internal_Rela
*rel
);
3459 arm_elf_fill_funcdesc(bfd
*output_bfd
,
3460 struct bfd_link_info
*info
,
3461 int *funcdesc_offset
,
3465 bfd_vma dynreloc_value
,
3468 if ((*funcdesc_offset
& 1) == 0)
3470 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (info
);
3471 asection
*sgot
= globals
->root
.sgot
;
3473 if (bfd_link_pic(info
))
3475 asection
*srelgot
= globals
->root
.srelgot
;
3476 Elf_Internal_Rela outrel
;
3478 outrel
.r_info
= ELF32_R_INFO (dynindx
, R_ARM_FUNCDESC_VALUE
);
3479 outrel
.r_offset
= sgot
->output_section
->vma
+ sgot
->output_offset
+ offset
;
3480 outrel
.r_addend
= 0;
3482 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
3483 bfd_put_32 (output_bfd
, addr
, sgot
->contents
+ offset
);
3484 bfd_put_32 (output_bfd
, seg
, sgot
->contents
+ offset
+ 4);
3488 struct elf_link_hash_entry
*hgot
= globals
->root
.hgot
;
3489 bfd_vma got_value
= hgot
->root
.u
.def
.value
3490 + hgot
->root
.u
.def
.section
->output_section
->vma
3491 + hgot
->root
.u
.def
.section
->output_offset
;
3493 arm_elf_add_rofixup(output_bfd
, globals
->srofixup
,
3494 sgot
->output_section
->vma
+ sgot
->output_offset
3496 arm_elf_add_rofixup(output_bfd
, globals
->srofixup
,
3497 sgot
->output_section
->vma
+ sgot
->output_offset
3499 bfd_put_32 (output_bfd
, dynreloc_value
, sgot
->contents
+ offset
);
3500 bfd_put_32 (output_bfd
, got_value
, sgot
->contents
+ offset
+ 4);
3502 *funcdesc_offset
|= 1;
3506 /* Create an entry in an ARM ELF linker hash table. */
3508 static struct bfd_hash_entry
*
3509 elf32_arm_link_hash_newfunc (struct bfd_hash_entry
* entry
,
3510 struct bfd_hash_table
* table
,
3511 const char * string
)
3513 struct elf32_arm_link_hash_entry
* ret
=
3514 (struct elf32_arm_link_hash_entry
*) entry
;
3516 /* Allocate the structure if it has not already been allocated by a
3519 ret
= (struct elf32_arm_link_hash_entry
*)
3520 bfd_hash_allocate (table
, sizeof (struct elf32_arm_link_hash_entry
));
3522 return (struct bfd_hash_entry
*) ret
;
3524 /* Call the allocation method of the superclass. */
3525 ret
= ((struct elf32_arm_link_hash_entry
*)
3526 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry
*) ret
,
3530 ret
->dyn_relocs
= NULL
;
3531 ret
->tls_type
= GOT_UNKNOWN
;
3532 ret
->tlsdesc_got
= (bfd_vma
) -1;
3533 ret
->plt
.thumb_refcount
= 0;
3534 ret
->plt
.maybe_thumb_refcount
= 0;
3535 ret
->plt
.noncall_refcount
= 0;
3536 ret
->plt
.got_offset
= -1;
3537 ret
->is_iplt
= FALSE
;
3538 ret
->export_glue
= NULL
;
3540 ret
->stub_cache
= NULL
;
3542 ret
->fdpic_cnts
.gotofffuncdesc_cnt
= 0;
3543 ret
->fdpic_cnts
.gotfuncdesc_cnt
= 0;
3544 ret
->fdpic_cnts
.funcdesc_cnt
= 0;
3545 ret
->fdpic_cnts
.funcdesc_offset
= -1;
3546 ret
->fdpic_cnts
.gotfuncdesc_offset
= -1;
3549 return (struct bfd_hash_entry
*) ret
;
3552 /* Ensure that we have allocated bookkeeping structures for ABFD's local
3556 elf32_arm_allocate_local_sym_info (bfd
*abfd
)
3558 if (elf_local_got_refcounts (abfd
) == NULL
)
3560 bfd_size_type num_syms
;
3564 num_syms
= elf_tdata (abfd
)->symtab_hdr
.sh_info
;
3565 size
= num_syms
* (sizeof (bfd_signed_vma
)
3566 + sizeof (struct arm_local_iplt_info
*)
3569 + sizeof (struct fdpic_local
));
3570 data
= bfd_zalloc (abfd
, size
);
3574 elf32_arm_local_fdpic_cnts (abfd
) = (struct fdpic_local
*) data
;
3575 data
+= num_syms
* sizeof (struct fdpic_local
);
3577 elf_local_got_refcounts (abfd
) = (bfd_signed_vma
*) data
;
3578 data
+= num_syms
* sizeof (bfd_signed_vma
);
3580 elf32_arm_local_iplt (abfd
) = (struct arm_local_iplt_info
**) data
;
3581 data
+= num_syms
* sizeof (struct arm_local_iplt_info
*);
3583 elf32_arm_local_tlsdesc_gotent (abfd
) = (bfd_vma
*) data
;
3584 data
+= num_syms
* sizeof (bfd_vma
);
3586 elf32_arm_local_got_tls_type (abfd
) = data
;
3591 /* Return the .iplt information for local symbol R_SYMNDX, which belongs
3592 to input bfd ABFD. Create the information if it doesn't already exist.
3593 Return null if an allocation fails. */
3595 static struct arm_local_iplt_info
*
3596 elf32_arm_create_local_iplt (bfd
*abfd
, unsigned long r_symndx
)
3598 struct arm_local_iplt_info
**ptr
;
3600 if (!elf32_arm_allocate_local_sym_info (abfd
))
3603 BFD_ASSERT (r_symndx
< elf_tdata (abfd
)->symtab_hdr
.sh_info
);
3604 ptr
= &elf32_arm_local_iplt (abfd
)[r_symndx
];
3606 *ptr
= bfd_zalloc (abfd
, sizeof (**ptr
));
3610 /* Try to obtain PLT information for the symbol with index R_SYMNDX
3611 in ABFD's symbol table. If the symbol is global, H points to its
3612 hash table entry, otherwise H is null.
3614 Return true if the symbol does have PLT information. When returning
3615 true, point *ROOT_PLT at the target-independent reference count/offset
3616 union and *ARM_PLT at the ARM-specific information. */
3619 elf32_arm_get_plt_info (bfd
*abfd
, struct elf32_arm_link_hash_table
*globals
,
3620 struct elf32_arm_link_hash_entry
*h
,
3621 unsigned long r_symndx
, union gotplt_union
**root_plt
,
3622 struct arm_plt_info
**arm_plt
)
3624 struct arm_local_iplt_info
*local_iplt
;
3626 if (globals
->root
.splt
== NULL
&& globals
->root
.iplt
== NULL
)
3631 *root_plt
= &h
->root
.plt
;
3636 if (elf32_arm_local_iplt (abfd
) == NULL
)
3639 local_iplt
= elf32_arm_local_iplt (abfd
)[r_symndx
];
3640 if (local_iplt
== NULL
)
3643 *root_plt
= &local_iplt
->root
;
3644 *arm_plt
= &local_iplt
->arm
;
3648 static bfd_boolean
using_thumb_only (struct elf32_arm_link_hash_table
*globals
);
3650 /* Return true if the PLT described by ARM_PLT requires a Thumb stub
3654 elf32_arm_plt_needs_thumb_stub_p (struct bfd_link_info
*info
,
3655 struct arm_plt_info
*arm_plt
)
3657 struct elf32_arm_link_hash_table
*htab
;
3659 htab
= elf32_arm_hash_table (info
);
3661 return (!using_thumb_only(htab
) && (arm_plt
->thumb_refcount
!= 0
3662 || (!htab
->use_blx
&& arm_plt
->maybe_thumb_refcount
!= 0)));
3665 /* Return a pointer to the head of the dynamic reloc list that should
3666 be used for local symbol ISYM, which is symbol number R_SYMNDX in
3667 ABFD's symbol table. Return null if an error occurs. */
3669 static struct elf_dyn_relocs
**
3670 elf32_arm_get_local_dynreloc_list (bfd
*abfd
, unsigned long r_symndx
,
3671 Elf_Internal_Sym
*isym
)
3673 if (ELF32_ST_TYPE (isym
->st_info
) == STT_GNU_IFUNC
)
3675 struct arm_local_iplt_info
*local_iplt
;
3677 local_iplt
= elf32_arm_create_local_iplt (abfd
, r_symndx
);
3678 if (local_iplt
== NULL
)
3680 return &local_iplt
->dyn_relocs
;
3684 /* Track dynamic relocs needed for local syms too.
3685 We really need local syms available to do this
3690 s
= bfd_section_from_elf_index (abfd
, isym
->st_shndx
);
3694 vpp
= &elf_section_data (s
)->local_dynrel
;
3695 return (struct elf_dyn_relocs
**) vpp
;
3699 /* Initialize an entry in the stub hash table. */
3701 static struct bfd_hash_entry
*
3702 stub_hash_newfunc (struct bfd_hash_entry
*entry
,
3703 struct bfd_hash_table
*table
,
3706 /* Allocate the structure if it has not already been allocated by a
3710 entry
= (struct bfd_hash_entry
*)
3711 bfd_hash_allocate (table
, sizeof (struct elf32_arm_stub_hash_entry
));
3716 /* Call the allocation method of the superclass. */
3717 entry
= bfd_hash_newfunc (entry
, table
, string
);
3720 struct elf32_arm_stub_hash_entry
*eh
;
3722 /* Initialize the local fields. */
3723 eh
= (struct elf32_arm_stub_hash_entry
*) entry
;
3724 eh
->stub_sec
= NULL
;
3725 eh
->stub_offset
= (bfd_vma
) -1;
3726 eh
->source_value
= 0;
3727 eh
->target_value
= 0;
3728 eh
->target_section
= NULL
;
3730 eh
->stub_type
= arm_stub_none
;
3732 eh
->stub_template
= NULL
;
3733 eh
->stub_template_size
= -1;
3736 eh
->output_name
= NULL
;
3742 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
3743 shortcuts to them in our hash table. */
3746 create_got_section (bfd
*dynobj
, struct bfd_link_info
*info
)
3748 struct elf32_arm_link_hash_table
*htab
;
3750 htab
= elf32_arm_hash_table (info
);
3754 /* BPABI objects never have a GOT, or associated sections. */
3755 if (htab
->symbian_p
)
3758 if (! _bfd_elf_create_got_section (dynobj
, info
))
3761 /* Also create .rofixup. */
3764 htab
->srofixup
= bfd_make_section_with_flags (dynobj
, ".rofixup",
3765 (SEC_ALLOC
| SEC_LOAD
| SEC_HAS_CONTENTS
3766 | SEC_IN_MEMORY
| SEC_LINKER_CREATED
| SEC_READONLY
));
3767 if (htab
->srofixup
== NULL
|| ! bfd_set_section_alignment (dynobj
, htab
->srofixup
, 2))
3774 /* Create the .iplt, .rel(a).iplt and .igot.plt sections. */
3777 create_ifunc_sections (struct bfd_link_info
*info
)
3779 struct elf32_arm_link_hash_table
*htab
;
3780 const struct elf_backend_data
*bed
;
3785 htab
= elf32_arm_hash_table (info
);
3786 dynobj
= htab
->root
.dynobj
;
3787 bed
= get_elf_backend_data (dynobj
);
3788 flags
= bed
->dynamic_sec_flags
;
3790 if (htab
->root
.iplt
== NULL
)
3792 s
= bfd_make_section_anyway_with_flags (dynobj
, ".iplt",
3793 flags
| SEC_READONLY
| SEC_CODE
);
3795 || !bfd_set_section_alignment (dynobj
, s
, bed
->plt_alignment
))
3797 htab
->root
.iplt
= s
;
3800 if (htab
->root
.irelplt
== NULL
)
3802 s
= bfd_make_section_anyway_with_flags (dynobj
,
3803 RELOC_SECTION (htab
, ".iplt"),
3804 flags
| SEC_READONLY
);
3806 || !bfd_set_section_alignment (dynobj
, s
, bed
->s
->log_file_align
))
3808 htab
->root
.irelplt
= s
;
3811 if (htab
->root
.igotplt
== NULL
)
3813 s
= bfd_make_section_anyway_with_flags (dynobj
, ".igot.plt", flags
);
3815 || !bfd_set_section_alignment (dynobj
, s
, bed
->s
->log_file_align
))
3817 htab
->root
.igotplt
= s
;
3822 /* Determine if we're dealing with a Thumb only architecture. */
3825 using_thumb_only (struct elf32_arm_link_hash_table
*globals
)
3828 int profile
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
,
3829 Tag_CPU_arch_profile
);
3832 return profile
== 'M';
3834 arch
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
, Tag_CPU_arch
);
3836 /* Force return logic to be reviewed for each new architecture. */
3837 BFD_ASSERT (arch
<= TAG_CPU_ARCH_V8M_MAIN
);
3839 if (arch
== TAG_CPU_ARCH_V6_M
3840 || arch
== TAG_CPU_ARCH_V6S_M
3841 || arch
== TAG_CPU_ARCH_V7E_M
3842 || arch
== TAG_CPU_ARCH_V8M_BASE
3843 || arch
== TAG_CPU_ARCH_V8M_MAIN
)
3849 /* Determine if we're dealing with a Thumb-2 object. */
3852 using_thumb2 (struct elf32_arm_link_hash_table
*globals
)
3855 int thumb_isa
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
,
3859 return thumb_isa
== 2;
3861 arch
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
, Tag_CPU_arch
);
3863 /* Force return logic to be reviewed for each new architecture. */
3864 BFD_ASSERT (arch
<= TAG_CPU_ARCH_V8M_MAIN
);
3866 return (arch
== TAG_CPU_ARCH_V6T2
3867 || arch
== TAG_CPU_ARCH_V7
3868 || arch
== TAG_CPU_ARCH_V7E_M
3869 || arch
== TAG_CPU_ARCH_V8
3870 || arch
== TAG_CPU_ARCH_V8R
3871 || arch
== TAG_CPU_ARCH_V8M_MAIN
);
3874 /* Determine whether Thumb-2 BL instruction is available. */
3877 using_thumb2_bl (struct elf32_arm_link_hash_table
*globals
)
3880 bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
, Tag_CPU_arch
);
3882 /* Force return logic to be reviewed for each new architecture. */
3883 BFD_ASSERT (arch
<= TAG_CPU_ARCH_V8M_MAIN
);
3885 /* Architecture was introduced after ARMv6T2 (eg. ARMv6-M). */
3886 return (arch
== TAG_CPU_ARCH_V6T2
3887 || arch
>= TAG_CPU_ARCH_V7
);
3890 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
3891 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
3895 elf32_arm_create_dynamic_sections (bfd
*dynobj
, struct bfd_link_info
*info
)
3897 struct elf32_arm_link_hash_table
*htab
;
3899 htab
= elf32_arm_hash_table (info
);
3903 if (!htab
->root
.sgot
&& !create_got_section (dynobj
, info
))
3906 if (!_bfd_elf_create_dynamic_sections (dynobj
, info
))
3909 if (htab
->vxworks_p
)
3911 if (!elf_vxworks_create_dynamic_sections (dynobj
, info
, &htab
->srelplt2
))
3914 if (bfd_link_pic (info
))
3916 htab
->plt_header_size
= 0;
3917 htab
->plt_entry_size
3918 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry
);
3922 htab
->plt_header_size
3923 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry
);
3924 htab
->plt_entry_size
3925 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry
);
3928 if (elf_elfheader (dynobj
))
3929 elf_elfheader (dynobj
)->e_ident
[EI_CLASS
] = ELFCLASS32
;
3934 Test for thumb only architectures. Note - we cannot just call
3935 using_thumb_only() as the attributes in the output bfd have not been
3936 initialised at this point, so instead we use the input bfd. */
3937 bfd
* saved_obfd
= htab
->obfd
;
3939 htab
->obfd
= dynobj
;
3940 if (using_thumb_only (htab
))
3942 htab
->plt_header_size
= 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry
);
3943 htab
->plt_entry_size
= 4 * ARRAY_SIZE (elf32_thumb2_plt_entry
);
3945 htab
->obfd
= saved_obfd
;
3948 if (htab
->fdpic_p
) {
3949 htab
->plt_header_size
= 0;
3950 if (info
->flags
& DF_BIND_NOW
)
3951 htab
->plt_entry_size
= 4 * (ARRAY_SIZE(elf32_arm_fdpic_plt_entry
) - 5);
3953 htab
->plt_entry_size
= 4 * ARRAY_SIZE(elf32_arm_fdpic_plt_entry
);
3956 if (!htab
->root
.splt
3957 || !htab
->root
.srelplt
3958 || !htab
->root
.sdynbss
3959 || (!bfd_link_pic (info
) && !htab
->root
.srelbss
))
3965 /* Copy the extra info we tack onto an elf_link_hash_entry. */
3968 elf32_arm_copy_indirect_symbol (struct bfd_link_info
*info
,
3969 struct elf_link_hash_entry
*dir
,
3970 struct elf_link_hash_entry
*ind
)
3972 struct elf32_arm_link_hash_entry
*edir
, *eind
;
3974 edir
= (struct elf32_arm_link_hash_entry
*) dir
;
3975 eind
= (struct elf32_arm_link_hash_entry
*) ind
;
3977 if (eind
->dyn_relocs
!= NULL
)
3979 if (edir
->dyn_relocs
!= NULL
)
3981 struct elf_dyn_relocs
**pp
;
3982 struct elf_dyn_relocs
*p
;
3984 /* Add reloc counts against the indirect sym to the direct sym
3985 list. Merge any entries against the same section. */
3986 for (pp
= &eind
->dyn_relocs
; (p
= *pp
) != NULL
; )
3988 struct elf_dyn_relocs
*q
;
3990 for (q
= edir
->dyn_relocs
; q
!= NULL
; q
= q
->next
)
3991 if (q
->sec
== p
->sec
)
3993 q
->pc_count
+= p
->pc_count
;
3994 q
->count
+= p
->count
;
4001 *pp
= edir
->dyn_relocs
;
4004 edir
->dyn_relocs
= eind
->dyn_relocs
;
4005 eind
->dyn_relocs
= NULL
;
4008 if (ind
->root
.type
== bfd_link_hash_indirect
)
4010 /* Copy over PLT info. */
4011 edir
->plt
.thumb_refcount
+= eind
->plt
.thumb_refcount
;
4012 eind
->plt
.thumb_refcount
= 0;
4013 edir
->plt
.maybe_thumb_refcount
+= eind
->plt
.maybe_thumb_refcount
;
4014 eind
->plt
.maybe_thumb_refcount
= 0;
4015 edir
->plt
.noncall_refcount
+= eind
->plt
.noncall_refcount
;
4016 eind
->plt
.noncall_refcount
= 0;
4018 /* Copy FDPIC counters. */
4019 edir
->fdpic_cnts
.gotofffuncdesc_cnt
+= eind
->fdpic_cnts
.gotofffuncdesc_cnt
;
4020 edir
->fdpic_cnts
.gotfuncdesc_cnt
+= eind
->fdpic_cnts
.gotfuncdesc_cnt
;
4021 edir
->fdpic_cnts
.funcdesc_cnt
+= eind
->fdpic_cnts
.funcdesc_cnt
;
4023 /* We should only allocate a function to .iplt once the final
4024 symbol information is known. */
4025 BFD_ASSERT (!eind
->is_iplt
);
4027 if (dir
->got
.refcount
<= 0)
4029 edir
->tls_type
= eind
->tls_type
;
4030 eind
->tls_type
= GOT_UNKNOWN
;
4034 _bfd_elf_link_hash_copy_indirect (info
, dir
, ind
);
4037 /* Destroy an ARM elf linker hash table. */
4040 elf32_arm_link_hash_table_free (bfd
*obfd
)
4042 struct elf32_arm_link_hash_table
*ret
4043 = (struct elf32_arm_link_hash_table
*) obfd
->link
.hash
;
4045 bfd_hash_table_free (&ret
->stub_hash_table
);
4046 _bfd_elf_link_hash_table_free (obfd
);
4049 /* Create an ARM elf linker hash table. */
4051 static struct bfd_link_hash_table
*
4052 elf32_arm_link_hash_table_create (bfd
*abfd
)
4054 struct elf32_arm_link_hash_table
*ret
;
4055 bfd_size_type amt
= sizeof (struct elf32_arm_link_hash_table
);
4057 ret
= (struct elf32_arm_link_hash_table
*) bfd_zmalloc (amt
);
4061 if (!_bfd_elf_link_hash_table_init (& ret
->root
, abfd
,
4062 elf32_arm_link_hash_newfunc
,
4063 sizeof (struct elf32_arm_link_hash_entry
),
4070 ret
->vfp11_fix
= BFD_ARM_VFP11_FIX_NONE
;
4071 ret
->stm32l4xx_fix
= BFD_ARM_STM32L4XX_FIX_NONE
;
4072 #ifdef FOUR_WORD_PLT
4073 ret
->plt_header_size
= 16;
4074 ret
->plt_entry_size
= 16;
4076 ret
->plt_header_size
= 20;
4077 ret
->plt_entry_size
= elf32_arm_use_long_plt_entry
? 16 : 12;
4079 ret
->use_rel
= TRUE
;
4083 if (!bfd_hash_table_init (&ret
->stub_hash_table
, stub_hash_newfunc
,
4084 sizeof (struct elf32_arm_stub_hash_entry
)))
4086 _bfd_elf_link_hash_table_free (abfd
);
4089 ret
->root
.root
.hash_table_free
= elf32_arm_link_hash_table_free
;
4091 return &ret
->root
.root
;
4094 /* Determine what kind of NOPs are available. */
4097 arch_has_arm_nop (struct elf32_arm_link_hash_table
*globals
)
4099 const int arch
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
,
4102 /* Force return logic to be reviewed for each new architecture. */
4103 BFD_ASSERT (arch
<= TAG_CPU_ARCH_V8M_MAIN
);
4105 return (arch
== TAG_CPU_ARCH_V6T2
4106 || arch
== TAG_CPU_ARCH_V6K
4107 || arch
== TAG_CPU_ARCH_V7
4108 || arch
== TAG_CPU_ARCH_V8
4109 || arch
== TAG_CPU_ARCH_V8R
);
4113 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type
)
4117 case arm_stub_long_branch_thumb_only
:
4118 case arm_stub_long_branch_thumb2_only
:
4119 case arm_stub_long_branch_thumb2_only_pure
:
4120 case arm_stub_long_branch_v4t_thumb_arm
:
4121 case arm_stub_short_branch_v4t_thumb_arm
:
4122 case arm_stub_long_branch_v4t_thumb_arm_pic
:
4123 case arm_stub_long_branch_v4t_thumb_tls_pic
:
4124 case arm_stub_long_branch_thumb_only_pic
:
4125 case arm_stub_cmse_branch_thumb_only
:
4136 /* Determine the type of stub needed, if any, for a call. */
4138 static enum elf32_arm_stub_type
4139 arm_type_of_stub (struct bfd_link_info
*info
,
4140 asection
*input_sec
,
4141 const Elf_Internal_Rela
*rel
,
4142 unsigned char st_type
,
4143 enum arm_st_branch_type
*actual_branch_type
,
4144 struct elf32_arm_link_hash_entry
*hash
,
4145 bfd_vma destination
,
4151 bfd_signed_vma branch_offset
;
4152 unsigned int r_type
;
4153 struct elf32_arm_link_hash_table
* globals
;
4154 bfd_boolean thumb2
, thumb2_bl
, thumb_only
;
4155 enum elf32_arm_stub_type stub_type
= arm_stub_none
;
4157 enum arm_st_branch_type branch_type
= *actual_branch_type
;
4158 union gotplt_union
*root_plt
;
4159 struct arm_plt_info
*arm_plt
;
4163 if (branch_type
== ST_BRANCH_LONG
)
4166 globals
= elf32_arm_hash_table (info
);
4167 if (globals
== NULL
)
4170 thumb_only
= using_thumb_only (globals
);
4171 thumb2
= using_thumb2 (globals
);
4172 thumb2_bl
= using_thumb2_bl (globals
);
4174 arch
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
, Tag_CPU_arch
);
4176 /* True for architectures that implement the thumb2 movw instruction. */
4177 thumb2_movw
= thumb2
|| (arch
== TAG_CPU_ARCH_V8M_BASE
);
4179 /* Determine where the call point is. */
4180 location
= (input_sec
->output_offset
4181 + input_sec
->output_section
->vma
4184 r_type
= ELF32_R_TYPE (rel
->r_info
);
4186 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
4187 are considering a function call relocation. */
4188 if (thumb_only
&& (r_type
== R_ARM_THM_CALL
|| r_type
== R_ARM_THM_JUMP24
4189 || r_type
== R_ARM_THM_JUMP19
)
4190 && branch_type
== ST_BRANCH_TO_ARM
)
4191 branch_type
= ST_BRANCH_TO_THUMB
;
4193 /* For TLS call relocs, it is the caller's responsibility to provide
4194 the address of the appropriate trampoline. */
4195 if (r_type
!= R_ARM_TLS_CALL
4196 && r_type
!= R_ARM_THM_TLS_CALL
4197 && elf32_arm_get_plt_info (input_bfd
, globals
, hash
,
4198 ELF32_R_SYM (rel
->r_info
), &root_plt
,
4200 && root_plt
->offset
!= (bfd_vma
) -1)
4204 if (hash
== NULL
|| hash
->is_iplt
)
4205 splt
= globals
->root
.iplt
;
4207 splt
= globals
->root
.splt
;
4212 /* Note when dealing with PLT entries: the main PLT stub is in
4213 ARM mode, so if the branch is in Thumb mode, another
4214 Thumb->ARM stub will be inserted later just before the ARM
4215 PLT stub. If a long branch stub is needed, we'll add a
4216 Thumb->Arm one and branch directly to the ARM PLT entry.
4217 Here, we have to check if a pre-PLT Thumb->ARM stub
4218 is needed and if it will be close enough. */
4220 destination
= (splt
->output_section
->vma
4221 + splt
->output_offset
4222 + root_plt
->offset
);
4225 /* Thumb branch/call to PLT: it can become a branch to ARM
4226 or to Thumb. We must perform the same checks and
4227 corrections as in elf32_arm_final_link_relocate. */
4228 if ((r_type
== R_ARM_THM_CALL
)
4229 || (r_type
== R_ARM_THM_JUMP24
))
4231 if (globals
->use_blx
4232 && r_type
== R_ARM_THM_CALL
4235 /* If the Thumb BLX instruction is available, convert
4236 the BL to a BLX instruction to call the ARM-mode
4238 branch_type
= ST_BRANCH_TO_ARM
;
4243 /* Target the Thumb stub before the ARM PLT entry. */
4244 destination
-= PLT_THUMB_STUB_SIZE
;
4245 branch_type
= ST_BRANCH_TO_THUMB
;
4250 branch_type
= ST_BRANCH_TO_ARM
;
4254 /* Calls to STT_GNU_IFUNC symbols should go through a PLT. */
4255 BFD_ASSERT (st_type
!= STT_GNU_IFUNC
);
4257 branch_offset
= (bfd_signed_vma
)(destination
- location
);
4259 if (r_type
== R_ARM_THM_CALL
|| r_type
== R_ARM_THM_JUMP24
4260 || r_type
== R_ARM_THM_TLS_CALL
|| r_type
== R_ARM_THM_JUMP19
)
4262 /* Handle cases where:
4263 - this call goes too far (different Thumb/Thumb2 max
4265 - it's a Thumb->Arm call and blx is not available, or it's a
4266 Thumb->Arm branch (not bl). A stub is needed in this case,
4267 but only if this call is not through a PLT entry. Indeed,
4268 PLT stubs handle mode switching already. */
4270 && (branch_offset
> THM_MAX_FWD_BRANCH_OFFSET
4271 || (branch_offset
< THM_MAX_BWD_BRANCH_OFFSET
)))
4273 && (branch_offset
> THM2_MAX_FWD_BRANCH_OFFSET
4274 || (branch_offset
< THM2_MAX_BWD_BRANCH_OFFSET
)))
4276 && (branch_offset
> THM2_MAX_FWD_COND_BRANCH_OFFSET
4277 || (branch_offset
< THM2_MAX_BWD_COND_BRANCH_OFFSET
))
4278 && (r_type
== R_ARM_THM_JUMP19
))
4279 || (branch_type
== ST_BRANCH_TO_ARM
4280 && (((r_type
== R_ARM_THM_CALL
4281 || r_type
== R_ARM_THM_TLS_CALL
) && !globals
->use_blx
)
4282 || (r_type
== R_ARM_THM_JUMP24
)
4283 || (r_type
== R_ARM_THM_JUMP19
))
4286 /* If we need to insert a Thumb-Thumb long branch stub to a
4287 PLT, use one that branches directly to the ARM PLT
4288 stub. If we pretended we'd use the pre-PLT Thumb->ARM
4289 stub, undo this now. */
4290 if ((branch_type
== ST_BRANCH_TO_THUMB
) && use_plt
&& !thumb_only
)
4292 branch_type
= ST_BRANCH_TO_ARM
;
4293 branch_offset
+= PLT_THUMB_STUB_SIZE
;
4296 if (branch_type
== ST_BRANCH_TO_THUMB
)
4298 /* Thumb to thumb. */
4301 if (input_sec
->flags
& SEC_ELF_PURECODE
)
4303 (_("%pB(%pA): warning: long branch veneers used in"
4304 " section with SHF_ARM_PURECODE section"
4305 " attribute is only supported for M-profile"
4306 " targets that implement the movw instruction"),
4307 input_bfd
, input_sec
);
4309 stub_type
= (bfd_link_pic (info
) | globals
->pic_veneer
)
4311 ? ((globals
->use_blx
4312 && (r_type
== R_ARM_THM_CALL
))
4313 /* V5T and above. Stub starts with ARM code, so
4314 we must be able to switch mode before
4315 reaching it, which is only possible for 'bl'
4316 (ie R_ARM_THM_CALL relocation). */
4317 ? arm_stub_long_branch_any_thumb_pic
4318 /* On V4T, use Thumb code only. */
4319 : arm_stub_long_branch_v4t_thumb_thumb_pic
)
4321 /* non-PIC stubs. */
4322 : ((globals
->use_blx
4323 && (r_type
== R_ARM_THM_CALL
))
4324 /* V5T and above. */
4325 ? arm_stub_long_branch_any_any
4327 : arm_stub_long_branch_v4t_thumb_thumb
);
4331 if (thumb2_movw
&& (input_sec
->flags
& SEC_ELF_PURECODE
))
4332 stub_type
= arm_stub_long_branch_thumb2_only_pure
;
4335 if (input_sec
->flags
& SEC_ELF_PURECODE
)
4337 (_("%pB(%pA): warning: long branch veneers used in"
4338 " section with SHF_ARM_PURECODE section"
4339 " attribute is only supported for M-profile"
4340 " targets that implement the movw instruction"),
4341 input_bfd
, input_sec
);
4343 stub_type
= (bfd_link_pic (info
) | globals
->pic_veneer
)
4345 ? arm_stub_long_branch_thumb_only_pic
4347 : (thumb2
? arm_stub_long_branch_thumb2_only
4348 : arm_stub_long_branch_thumb_only
);
4354 if (input_sec
->flags
& SEC_ELF_PURECODE
)
4356 (_("%pB(%pA): warning: long branch veneers used in"
4357 " section with SHF_ARM_PURECODE section"
4358 " attribute is only supported" " for M-profile"
4359 " targets that implement the movw instruction"),
4360 input_bfd
, input_sec
);
4364 && sym_sec
->owner
!= NULL
4365 && !INTERWORK_FLAG (sym_sec
->owner
))
4368 (_("%pB(%s): warning: interworking not enabled;"
4369 " first occurrence: %pB: %s call to %s"),
4370 sym_sec
->owner
, name
, input_bfd
, "Thumb", "ARM");
4374 (bfd_link_pic (info
) | globals
->pic_veneer
)
4376 ? (r_type
== R_ARM_THM_TLS_CALL
4377 /* TLS PIC stubs. */
4378 ? (globals
->use_blx
? arm_stub_long_branch_any_tls_pic
4379 : arm_stub_long_branch_v4t_thumb_tls_pic
)
4380 : ((globals
->use_blx
&& r_type
== R_ARM_THM_CALL
)
4381 /* V5T PIC and above. */
4382 ? arm_stub_long_branch_any_arm_pic
4384 : arm_stub_long_branch_v4t_thumb_arm_pic
))
4386 /* non-PIC stubs. */
4387 : ((globals
->use_blx
&& r_type
== R_ARM_THM_CALL
)
4388 /* V5T and above. */
4389 ? arm_stub_long_branch_any_any
4391 : arm_stub_long_branch_v4t_thumb_arm
);
4393 /* Handle v4t short branches. */
4394 if ((stub_type
== arm_stub_long_branch_v4t_thumb_arm
)
4395 && (branch_offset
<= THM_MAX_FWD_BRANCH_OFFSET
)
4396 && (branch_offset
>= THM_MAX_BWD_BRANCH_OFFSET
))
4397 stub_type
= arm_stub_short_branch_v4t_thumb_arm
;
4401 else if (r_type
== R_ARM_CALL
4402 || r_type
== R_ARM_JUMP24
4403 || r_type
== R_ARM_PLT32
4404 || r_type
== R_ARM_TLS_CALL
)
4406 if (input_sec
->flags
& SEC_ELF_PURECODE
)
4408 (_("%pB(%pA): warning: long branch veneers used in"
4409 " section with SHF_ARM_PURECODE section"
4410 " attribute is only supported for M-profile"
4411 " targets that implement the movw instruction"),
4412 input_bfd
, input_sec
);
4413 if (branch_type
== ST_BRANCH_TO_THUMB
)
4418 && sym_sec
->owner
!= NULL
4419 && !INTERWORK_FLAG (sym_sec
->owner
))
4422 (_("%pB(%s): warning: interworking not enabled;"
4423 " first occurrence: %pB: %s call to %s"),
4424 sym_sec
->owner
, name
, input_bfd
, "ARM", "Thumb");
4427 /* We have an extra 2-bytes reach because of
4428 the mode change (bit 24 (H) of BLX encoding). */
4429 if (branch_offset
> (ARM_MAX_FWD_BRANCH_OFFSET
+ 2)
4430 || (branch_offset
< ARM_MAX_BWD_BRANCH_OFFSET
)
4431 || (r_type
== R_ARM_CALL
&& !globals
->use_blx
)
4432 || (r_type
== R_ARM_JUMP24
)
4433 || (r_type
== R_ARM_PLT32
))
4435 stub_type
= (bfd_link_pic (info
) | globals
->pic_veneer
)
4437 ? ((globals
->use_blx
)
4438 /* V5T and above. */
4439 ? arm_stub_long_branch_any_thumb_pic
4441 : arm_stub_long_branch_v4t_arm_thumb_pic
)
4443 /* non-PIC stubs. */
4444 : ((globals
->use_blx
)
4445 /* V5T and above. */
4446 ? arm_stub_long_branch_any_any
4448 : arm_stub_long_branch_v4t_arm_thumb
);
4454 if (branch_offset
> ARM_MAX_FWD_BRANCH_OFFSET
4455 || (branch_offset
< ARM_MAX_BWD_BRANCH_OFFSET
))
4458 (bfd_link_pic (info
) | globals
->pic_veneer
)
4460 ? (r_type
== R_ARM_TLS_CALL
4462 ? arm_stub_long_branch_any_tls_pic
4464 ? arm_stub_long_branch_arm_nacl_pic
4465 : arm_stub_long_branch_any_arm_pic
))
4466 /* non-PIC stubs. */
4468 ? arm_stub_long_branch_arm_nacl
4469 : arm_stub_long_branch_any_any
);
4474 /* If a stub is needed, record the actual destination type. */
4475 if (stub_type
!= arm_stub_none
)
4476 *actual_branch_type
= branch_type
;
4481 /* Build a name for an entry in the stub hash table. */
4484 elf32_arm_stub_name (const asection
*input_section
,
4485 const asection
*sym_sec
,
4486 const struct elf32_arm_link_hash_entry
*hash
,
4487 const Elf_Internal_Rela
*rel
,
4488 enum elf32_arm_stub_type stub_type
)
4495 len
= 8 + 1 + strlen (hash
->root
.root
.root
.string
) + 1 + 8 + 1 + 2 + 1;
4496 stub_name
= (char *) bfd_malloc (len
);
4497 if (stub_name
!= NULL
)
4498 sprintf (stub_name
, "%08x_%s+%x_%d",
4499 input_section
->id
& 0xffffffff,
4500 hash
->root
.root
.root
.string
,
4501 (int) rel
->r_addend
& 0xffffffff,
4506 len
= 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1;
4507 stub_name
= (char *) bfd_malloc (len
);
4508 if (stub_name
!= NULL
)
4509 sprintf (stub_name
, "%08x_%x:%x+%x_%d",
4510 input_section
->id
& 0xffffffff,
4511 sym_sec
->id
& 0xffffffff,
4512 ELF32_R_TYPE (rel
->r_info
) == R_ARM_TLS_CALL
4513 || ELF32_R_TYPE (rel
->r_info
) == R_ARM_THM_TLS_CALL
4514 ? 0 : (int) ELF32_R_SYM (rel
->r_info
) & 0xffffffff,
4515 (int) rel
->r_addend
& 0xffffffff,
4522 /* Look up an entry in the stub hash. Stub entries are cached because
4523 creating the stub name takes a bit of time. */
4525 static struct elf32_arm_stub_hash_entry
*
4526 elf32_arm_get_stub_entry (const asection
*input_section
,
4527 const asection
*sym_sec
,
4528 struct elf_link_hash_entry
*hash
,
4529 const Elf_Internal_Rela
*rel
,
4530 struct elf32_arm_link_hash_table
*htab
,
4531 enum elf32_arm_stub_type stub_type
)
4533 struct elf32_arm_stub_hash_entry
*stub_entry
;
4534 struct elf32_arm_link_hash_entry
*h
= (struct elf32_arm_link_hash_entry
*) hash
;
4535 const asection
*id_sec
;
4537 if ((input_section
->flags
& SEC_CODE
) == 0)
4540 /* If this input section is part of a group of sections sharing one
4541 stub section, then use the id of the first section in the group.
4542 Stub names need to include a section id, as there may well be
4543 more than one stub used to reach say, printf, and we need to
4544 distinguish between them. */
4545 BFD_ASSERT (input_section
->id
<= htab
->top_id
);
4546 id_sec
= htab
->stub_group
[input_section
->id
].link_sec
;
4548 if (h
!= NULL
&& h
->stub_cache
!= NULL
4549 && h
->stub_cache
->h
== h
4550 && h
->stub_cache
->id_sec
== id_sec
4551 && h
->stub_cache
->stub_type
== stub_type
)
4553 stub_entry
= h
->stub_cache
;
4559 stub_name
= elf32_arm_stub_name (id_sec
, sym_sec
, h
, rel
, stub_type
);
4560 if (stub_name
== NULL
)
4563 stub_entry
= arm_stub_hash_lookup (&htab
->stub_hash_table
,
4564 stub_name
, FALSE
, FALSE
);
4566 h
->stub_cache
= stub_entry
;
4574 /* Whether veneers of type STUB_TYPE require to be in a dedicated output
4578 arm_dedicated_stub_output_section_required (enum elf32_arm_stub_type stub_type
)
4580 if (stub_type
>= max_stub_type
)
4581 abort (); /* Should be unreachable. */
4585 case arm_stub_cmse_branch_thumb_only
:
4592 abort (); /* Should be unreachable. */
4595 /* Required alignment (as a power of 2) for the dedicated section holding
4596 veneers of type STUB_TYPE, or 0 if veneers of this type are interspersed
4597 with input sections. */
4600 arm_dedicated_stub_output_section_required_alignment
4601 (enum elf32_arm_stub_type stub_type
)
4603 if (stub_type
>= max_stub_type
)
4604 abort (); /* Should be unreachable. */
4608 /* Vectors of Secure Gateway veneers must be aligned on 32byte
4610 case arm_stub_cmse_branch_thumb_only
:
4614 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type
));
4618 abort (); /* Should be unreachable. */
4621 /* Name of the dedicated output section to put veneers of type STUB_TYPE, or
4622 NULL if veneers of this type are interspersed with input sections. */
4625 arm_dedicated_stub_output_section_name (enum elf32_arm_stub_type stub_type
)
4627 if (stub_type
>= max_stub_type
)
4628 abort (); /* Should be unreachable. */
4632 case arm_stub_cmse_branch_thumb_only
:
4633 return ".gnu.sgstubs";
4636 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type
));
4640 abort (); /* Should be unreachable. */
4643 /* If veneers of type STUB_TYPE should go in a dedicated output section,
4644 returns the address of the hash table field in HTAB holding a pointer to the
4645 corresponding input section. Otherwise, returns NULL. */
4648 arm_dedicated_stub_input_section_ptr (struct elf32_arm_link_hash_table
*htab
,
4649 enum elf32_arm_stub_type stub_type
)
4651 if (stub_type
>= max_stub_type
)
4652 abort (); /* Should be unreachable. */
4656 case arm_stub_cmse_branch_thumb_only
:
4657 return &htab
->cmse_stub_sec
;
4660 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type
));
4664 abort (); /* Should be unreachable. */
4667 /* Find or create a stub section to contain a stub of type STUB_TYPE. SECTION
4668 is the section that branch into veneer and can be NULL if stub should go in
4669 a dedicated output section. Returns a pointer to the stub section, and the
4670 section to which the stub section will be attached (in *LINK_SEC_P).
4671 LINK_SEC_P may be NULL. */
4674 elf32_arm_create_or_find_stub_sec (asection
**link_sec_p
, asection
*section
,
4675 struct elf32_arm_link_hash_table
*htab
,
4676 enum elf32_arm_stub_type stub_type
)
4678 asection
*link_sec
, *out_sec
, **stub_sec_p
;
4679 const char *stub_sec_prefix
;
4680 bfd_boolean dedicated_output_section
=
4681 arm_dedicated_stub_output_section_required (stub_type
);
4684 if (dedicated_output_section
)
4686 bfd
*output_bfd
= htab
->obfd
;
4687 const char *out_sec_name
=
4688 arm_dedicated_stub_output_section_name (stub_type
);
4690 stub_sec_p
= arm_dedicated_stub_input_section_ptr (htab
, stub_type
);
4691 stub_sec_prefix
= out_sec_name
;
4692 align
= arm_dedicated_stub_output_section_required_alignment (stub_type
);
4693 out_sec
= bfd_get_section_by_name (output_bfd
, out_sec_name
);
4694 if (out_sec
== NULL
)
4696 _bfd_error_handler (_("no address assigned to the veneers output "
4697 "section %s"), out_sec_name
);
4703 BFD_ASSERT (section
->id
<= htab
->top_id
);
4704 link_sec
= htab
->stub_group
[section
->id
].link_sec
;
4705 BFD_ASSERT (link_sec
!= NULL
);
4706 stub_sec_p
= &htab
->stub_group
[section
->id
].stub_sec
;
4707 if (*stub_sec_p
== NULL
)
4708 stub_sec_p
= &htab
->stub_group
[link_sec
->id
].stub_sec
;
4709 stub_sec_prefix
= link_sec
->name
;
4710 out_sec
= link_sec
->output_section
;
4711 align
= htab
->nacl_p
? 4 : 3;
4714 if (*stub_sec_p
== NULL
)
4720 namelen
= strlen (stub_sec_prefix
);
4721 len
= namelen
+ sizeof (STUB_SUFFIX
);
4722 s_name
= (char *) bfd_alloc (htab
->stub_bfd
, len
);
4726 memcpy (s_name
, stub_sec_prefix
, namelen
);
4727 memcpy (s_name
+ namelen
, STUB_SUFFIX
, sizeof (STUB_SUFFIX
));
4728 *stub_sec_p
= (*htab
->add_stub_section
) (s_name
, out_sec
, link_sec
,
4730 if (*stub_sec_p
== NULL
)
4733 out_sec
->flags
|= SEC_ALLOC
| SEC_LOAD
| SEC_READONLY
| SEC_CODE
4734 | SEC_HAS_CONTENTS
| SEC_RELOC
| SEC_IN_MEMORY
4738 if (!dedicated_output_section
)
4739 htab
->stub_group
[section
->id
].stub_sec
= *stub_sec_p
;
4742 *link_sec_p
= link_sec
;
4747 /* Add a new stub entry to the stub hash. Not all fields of the new
4748 stub entry are initialised. */
4750 static struct elf32_arm_stub_hash_entry
*
4751 elf32_arm_add_stub (const char *stub_name
, asection
*section
,
4752 struct elf32_arm_link_hash_table
*htab
,
4753 enum elf32_arm_stub_type stub_type
)
4757 struct elf32_arm_stub_hash_entry
*stub_entry
;
4759 stub_sec
= elf32_arm_create_or_find_stub_sec (&link_sec
, section
, htab
,
4761 if (stub_sec
== NULL
)
4764 /* Enter this entry into the linker stub hash table. */
4765 stub_entry
= arm_stub_hash_lookup (&htab
->stub_hash_table
, stub_name
,
4767 if (stub_entry
== NULL
)
4769 if (section
== NULL
)
4771 _bfd_error_handler (_("%pB: cannot create stub entry %s"),
4772 section
->owner
, stub_name
);
4776 stub_entry
->stub_sec
= stub_sec
;
4777 stub_entry
->stub_offset
= (bfd_vma
) -1;
4778 stub_entry
->id_sec
= link_sec
;
4783 /* Store an Arm insn into an output section not processed by
4784 elf32_arm_write_section. */
4787 put_arm_insn (struct elf32_arm_link_hash_table
* htab
,
4788 bfd
* output_bfd
, bfd_vma val
, void * ptr
)
4790 if (htab
->byteswap_code
!= bfd_little_endian (output_bfd
))
4791 bfd_putl32 (val
, ptr
);
4793 bfd_putb32 (val
, ptr
);
4796 /* Store a 16-bit Thumb insn into an output section not processed by
4797 elf32_arm_write_section. */
4800 put_thumb_insn (struct elf32_arm_link_hash_table
* htab
,
4801 bfd
* output_bfd
, bfd_vma val
, void * ptr
)
4803 if (htab
->byteswap_code
!= bfd_little_endian (output_bfd
))
4804 bfd_putl16 (val
, ptr
);
4806 bfd_putb16 (val
, ptr
);
4809 /* Store a Thumb2 insn into an output section not processed by
4810 elf32_arm_write_section. */
4813 put_thumb2_insn (struct elf32_arm_link_hash_table
* htab
,
4814 bfd
* output_bfd
, bfd_vma val
, bfd_byte
* ptr
)
4816 /* T2 instructions are 16-bit streamed. */
4817 if (htab
->byteswap_code
!= bfd_little_endian (output_bfd
))
4819 bfd_putl16 ((val
>> 16) & 0xffff, ptr
);
4820 bfd_putl16 ((val
& 0xffff), ptr
+ 2);
4824 bfd_putb16 ((val
>> 16) & 0xffff, ptr
);
4825 bfd_putb16 ((val
& 0xffff), ptr
+ 2);
4829 /* If it's possible to change R_TYPE to a more efficient access
4830 model, return the new reloc type. */
4833 elf32_arm_tls_transition (struct bfd_link_info
*info
, int r_type
,
4834 struct elf_link_hash_entry
*h
)
4836 int is_local
= (h
== NULL
);
4838 if (bfd_link_pic (info
)
4839 || (h
&& h
->root
.type
== bfd_link_hash_undefweak
))
4842 /* We do not support relaxations for Old TLS models. */
4845 case R_ARM_TLS_GOTDESC
:
4846 case R_ARM_TLS_CALL
:
4847 case R_ARM_THM_TLS_CALL
:
4848 case R_ARM_TLS_DESCSEQ
:
4849 case R_ARM_THM_TLS_DESCSEQ
:
4850 return is_local
? R_ARM_TLS_LE32
: R_ARM_TLS_IE32
;
4856 static bfd_reloc_status_type elf32_arm_final_link_relocate
4857 (reloc_howto_type
*, bfd
*, bfd
*, asection
*, bfd_byte
*,
4858 Elf_Internal_Rela
*, bfd_vma
, struct bfd_link_info
*, asection
*,
4859 const char *, unsigned char, enum arm_st_branch_type
,
4860 struct elf_link_hash_entry
*, bfd_boolean
*, char **);
4863 arm_stub_required_alignment (enum elf32_arm_stub_type stub_type
)
4867 case arm_stub_a8_veneer_b_cond
:
4868 case arm_stub_a8_veneer_b
:
4869 case arm_stub_a8_veneer_bl
:
4872 case arm_stub_long_branch_any_any
:
4873 case arm_stub_long_branch_v4t_arm_thumb
:
4874 case arm_stub_long_branch_thumb_only
:
4875 case arm_stub_long_branch_thumb2_only
:
4876 case arm_stub_long_branch_thumb2_only_pure
:
4877 case arm_stub_long_branch_v4t_thumb_thumb
:
4878 case arm_stub_long_branch_v4t_thumb_arm
:
4879 case arm_stub_short_branch_v4t_thumb_arm
:
4880 case arm_stub_long_branch_any_arm_pic
:
4881 case arm_stub_long_branch_any_thumb_pic
:
4882 case arm_stub_long_branch_v4t_thumb_thumb_pic
:
4883 case arm_stub_long_branch_v4t_arm_thumb_pic
:
4884 case arm_stub_long_branch_v4t_thumb_arm_pic
:
4885 case arm_stub_long_branch_thumb_only_pic
:
4886 case arm_stub_long_branch_any_tls_pic
:
4887 case arm_stub_long_branch_v4t_thumb_tls_pic
:
4888 case arm_stub_cmse_branch_thumb_only
:
4889 case arm_stub_a8_veneer_blx
:
4892 case arm_stub_long_branch_arm_nacl
:
4893 case arm_stub_long_branch_arm_nacl_pic
:
4897 abort (); /* Should be unreachable. */
4901 /* Returns whether stubs of type STUB_TYPE take over the symbol they are
4902 veneering (TRUE) or have their own symbol (FALSE). */
4905 arm_stub_sym_claimed (enum elf32_arm_stub_type stub_type
)
4907 if (stub_type
>= max_stub_type
)
4908 abort (); /* Should be unreachable. */
4912 case arm_stub_cmse_branch_thumb_only
:
4919 abort (); /* Should be unreachable. */
4922 /* Returns the padding needed for the dedicated section used stubs of type
4926 arm_dedicated_stub_section_padding (enum elf32_arm_stub_type stub_type
)
4928 if (stub_type
>= max_stub_type
)
4929 abort (); /* Should be unreachable. */
4933 case arm_stub_cmse_branch_thumb_only
:
4940 abort (); /* Should be unreachable. */
4943 /* If veneers of type STUB_TYPE should go in a dedicated output section,
4944 returns the address of the hash table field in HTAB holding the offset at
4945 which new veneers should be layed out in the stub section. */
4948 arm_new_stubs_start_offset_ptr (struct elf32_arm_link_hash_table
*htab
,
4949 enum elf32_arm_stub_type stub_type
)
4953 case arm_stub_cmse_branch_thumb_only
:
4954 return &htab
->new_cmse_stub_offset
;
4957 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type
));
4963 arm_build_one_stub (struct bfd_hash_entry
*gen_entry
,
4967 bfd_boolean removed_sg_veneer
;
4968 struct elf32_arm_stub_hash_entry
*stub_entry
;
4969 struct elf32_arm_link_hash_table
*globals
;
4970 struct bfd_link_info
*info
;
4977 const insn_sequence
*template_sequence
;
4979 int stub_reloc_idx
[MAXRELOCS
] = {-1, -1};
4980 int stub_reloc_offset
[MAXRELOCS
] = {0, 0};
4982 int just_allocated
= 0;
4984 /* Massage our args to the form they really have. */
4985 stub_entry
= (struct elf32_arm_stub_hash_entry
*) gen_entry
;
4986 info
= (struct bfd_link_info
*) in_arg
;
4988 globals
= elf32_arm_hash_table (info
);
4989 if (globals
== NULL
)
4992 stub_sec
= stub_entry
->stub_sec
;
4994 if ((globals
->fix_cortex_a8
< 0)
4995 != (arm_stub_required_alignment (stub_entry
->stub_type
) == 2))
4996 /* We have to do less-strictly-aligned fixes last. */
4999 /* Assign a slot at the end of section if none assigned yet. */
5000 if (stub_entry
->stub_offset
== (bfd_vma
) -1)
5002 stub_entry
->stub_offset
= stub_sec
->size
;
5005 loc
= stub_sec
->contents
+ stub_entry
->stub_offset
;
5007 stub_bfd
= stub_sec
->owner
;
5009 /* This is the address of the stub destination. */
5010 sym_value
= (stub_entry
->target_value
5011 + stub_entry
->target_section
->output_offset
5012 + stub_entry
->target_section
->output_section
->vma
);
5014 template_sequence
= stub_entry
->stub_template
;
5015 template_size
= stub_entry
->stub_template_size
;
5018 for (i
= 0; i
< template_size
; i
++)
5020 switch (template_sequence
[i
].type
)
5024 bfd_vma data
= (bfd_vma
) template_sequence
[i
].data
;
5025 if (template_sequence
[i
].reloc_addend
!= 0)
5027 /* We've borrowed the reloc_addend field to mean we should
5028 insert a condition code into this (Thumb-1 branch)
5029 instruction. See THUMB16_BCOND_INSN. */
5030 BFD_ASSERT ((data
& 0xff00) == 0xd000);
5031 data
|= ((stub_entry
->orig_insn
>> 22) & 0xf) << 8;
5033 bfd_put_16 (stub_bfd
, data
, loc
+ size
);
5039 bfd_put_16 (stub_bfd
,
5040 (template_sequence
[i
].data
>> 16) & 0xffff,
5042 bfd_put_16 (stub_bfd
, template_sequence
[i
].data
& 0xffff,
5044 if (template_sequence
[i
].r_type
!= R_ARM_NONE
)
5046 stub_reloc_idx
[nrelocs
] = i
;
5047 stub_reloc_offset
[nrelocs
++] = size
;
5053 bfd_put_32 (stub_bfd
, template_sequence
[i
].data
,
5055 /* Handle cases where the target is encoded within the
5057 if (template_sequence
[i
].r_type
== R_ARM_JUMP24
)
5059 stub_reloc_idx
[nrelocs
] = i
;
5060 stub_reloc_offset
[nrelocs
++] = size
;
5066 bfd_put_32 (stub_bfd
, template_sequence
[i
].data
, loc
+ size
);
5067 stub_reloc_idx
[nrelocs
] = i
;
5068 stub_reloc_offset
[nrelocs
++] = size
;
5079 stub_sec
->size
+= size
;
5081 /* Stub size has already been computed in arm_size_one_stub. Check
5083 BFD_ASSERT (size
== stub_entry
->stub_size
);
5085 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
5086 if (stub_entry
->branch_type
== ST_BRANCH_TO_THUMB
)
5089 /* Assume non empty slots have at least one and at most MAXRELOCS entries
5090 to relocate in each stub. */
5092 (size
== 0 && stub_entry
->stub_type
== arm_stub_cmse_branch_thumb_only
);
5093 BFD_ASSERT (removed_sg_veneer
|| (nrelocs
!= 0 && nrelocs
<= MAXRELOCS
));
5095 for (i
= 0; i
< nrelocs
; i
++)
5097 Elf_Internal_Rela rel
;
5098 bfd_boolean unresolved_reloc
;
5099 char *error_message
;
5101 sym_value
+ template_sequence
[stub_reloc_idx
[i
]].reloc_addend
;
5103 rel
.r_offset
= stub_entry
->stub_offset
+ stub_reloc_offset
[i
];
5104 rel
.r_info
= ELF32_R_INFO (0,
5105 template_sequence
[stub_reloc_idx
[i
]].r_type
);
5108 if (stub_entry
->stub_type
== arm_stub_a8_veneer_b_cond
&& i
== 0)
5109 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
5110 template should refer back to the instruction after the original
5111 branch. We use target_section as Cortex-A8 erratum workaround stubs
5112 are only generated when both source and target are in the same
5114 points_to
= stub_entry
->target_section
->output_section
->vma
5115 + stub_entry
->target_section
->output_offset
5116 + stub_entry
->source_value
;
5118 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
5119 (template_sequence
[stub_reloc_idx
[i
]].r_type
),
5120 stub_bfd
, info
->output_bfd
, stub_sec
, stub_sec
->contents
, &rel
,
5121 points_to
, info
, stub_entry
->target_section
, "", STT_FUNC
,
5122 stub_entry
->branch_type
,
5123 (struct elf_link_hash_entry
*) stub_entry
->h
, &unresolved_reloc
,
5131 /* Calculate the template, template size and instruction size for a stub.
5132 Return value is the instruction size. */
5135 find_stub_size_and_template (enum elf32_arm_stub_type stub_type
,
5136 const insn_sequence
**stub_template
,
5137 int *stub_template_size
)
5139 const insn_sequence
*template_sequence
= NULL
;
5140 int template_size
= 0, i
;
5143 template_sequence
= stub_definitions
[stub_type
].template_sequence
;
5145 *stub_template
= template_sequence
;
5147 template_size
= stub_definitions
[stub_type
].template_size
;
5148 if (stub_template_size
)
5149 *stub_template_size
= template_size
;
5152 for (i
= 0; i
< template_size
; i
++)
5154 switch (template_sequence
[i
].type
)
5175 /* As above, but don't actually build the stub. Just bump offset so
5176 we know stub section sizes. */
5179 arm_size_one_stub (struct bfd_hash_entry
*gen_entry
,
5180 void *in_arg ATTRIBUTE_UNUSED
)
5182 struct elf32_arm_stub_hash_entry
*stub_entry
;
5183 const insn_sequence
*template_sequence
;
5184 int template_size
, size
;
5186 /* Massage our args to the form they really have. */
5187 stub_entry
= (struct elf32_arm_stub_hash_entry
*) gen_entry
;
5189 BFD_ASSERT((stub_entry
->stub_type
> arm_stub_none
)
5190 && stub_entry
->stub_type
< ARRAY_SIZE(stub_definitions
));
5192 size
= find_stub_size_and_template (stub_entry
->stub_type
, &template_sequence
,
5195 /* Initialized to -1. Null size indicates an empty slot full of zeros. */
5196 if (stub_entry
->stub_template_size
)
5198 stub_entry
->stub_size
= size
;
5199 stub_entry
->stub_template
= template_sequence
;
5200 stub_entry
->stub_template_size
= template_size
;
5203 /* Already accounted for. */
5204 if (stub_entry
->stub_offset
!= (bfd_vma
) -1)
5207 size
= (size
+ 7) & ~7;
5208 stub_entry
->stub_sec
->size
+= size
;
5213 /* External entry points for sizing and building linker stubs. */
5215 /* Set up various things so that we can make a list of input sections
5216 for each output section included in the link. Returns -1 on error,
5217 0 when no stubs will be needed, and 1 on success. */
5220 elf32_arm_setup_section_lists (bfd
*output_bfd
,
5221 struct bfd_link_info
*info
)
5224 unsigned int bfd_count
;
5225 unsigned int top_id
, top_index
;
5227 asection
**input_list
, **list
;
5229 struct elf32_arm_link_hash_table
*htab
= elf32_arm_hash_table (info
);
5233 if (! is_elf_hash_table (htab
))
5236 /* Count the number of input BFDs and find the top input section id. */
5237 for (input_bfd
= info
->input_bfds
, bfd_count
= 0, top_id
= 0;
5239 input_bfd
= input_bfd
->link
.next
)
5242 for (section
= input_bfd
->sections
;
5244 section
= section
->next
)
5246 if (top_id
< section
->id
)
5247 top_id
= section
->id
;
5250 htab
->bfd_count
= bfd_count
;
5252 amt
= sizeof (struct map_stub
) * (top_id
+ 1);
5253 htab
->stub_group
= (struct map_stub
*) bfd_zmalloc (amt
);
5254 if (htab
->stub_group
== NULL
)
5256 htab
->top_id
= top_id
;
5258 /* We can't use output_bfd->section_count here to find the top output
5259 section index as some sections may have been removed, and
5260 _bfd_strip_section_from_output doesn't renumber the indices. */
5261 for (section
= output_bfd
->sections
, top_index
= 0;
5263 section
= section
->next
)
5265 if (top_index
< section
->index
)
5266 top_index
= section
->index
;
5269 htab
->top_index
= top_index
;
5270 amt
= sizeof (asection
*) * (top_index
+ 1);
5271 input_list
= (asection
**) bfd_malloc (amt
);
5272 htab
->input_list
= input_list
;
5273 if (input_list
== NULL
)
5276 /* For sections we aren't interested in, mark their entries with a
5277 value we can check later. */
5278 list
= input_list
+ top_index
;
5280 *list
= bfd_abs_section_ptr
;
5281 while (list
-- != input_list
);
5283 for (section
= output_bfd
->sections
;
5285 section
= section
->next
)
5287 if ((section
->flags
& SEC_CODE
) != 0)
5288 input_list
[section
->index
] = NULL
;
5294 /* The linker repeatedly calls this function for each input section,
5295 in the order that input sections are linked into output sections.
5296 Build lists of input sections to determine groupings between which
5297 we may insert linker stubs. */
5300 elf32_arm_next_input_section (struct bfd_link_info
*info
,
5303 struct elf32_arm_link_hash_table
*htab
= elf32_arm_hash_table (info
);
5308 if (isec
->output_section
->index
<= htab
->top_index
)
5310 asection
**list
= htab
->input_list
+ isec
->output_section
->index
;
5312 if (*list
!= bfd_abs_section_ptr
&& (isec
->flags
& SEC_CODE
) != 0)
5314 /* Steal the link_sec pointer for our list. */
5315 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
5316 /* This happens to make the list in reverse order,
5317 which we reverse later. */
5318 PREV_SEC (isec
) = *list
;
5324 /* See whether we can group stub sections together. Grouping stub
5325 sections may result in fewer stubs. More importantly, we need to
5326 put all .init* and .fini* stubs at the end of the .init or
5327 .fini output sections respectively, because glibc splits the
5328 _init and _fini functions into multiple parts. Putting a stub in
5329 the middle of a function is not a good idea. */
5332 group_sections (struct elf32_arm_link_hash_table
*htab
,
5333 bfd_size_type stub_group_size
,
5334 bfd_boolean stubs_always_after_branch
)
5336 asection
**list
= htab
->input_list
;
5340 asection
*tail
= *list
;
5343 if (tail
== bfd_abs_section_ptr
)
5346 /* Reverse the list: we must avoid placing stubs at the
5347 beginning of the section because the beginning of the text
5348 section may be required for an interrupt vector in bare metal
5350 #define NEXT_SEC PREV_SEC
5352 while (tail
!= NULL
)
5354 /* Pop from tail. */
5355 asection
*item
= tail
;
5356 tail
= PREV_SEC (item
);
5359 NEXT_SEC (item
) = head
;
5363 while (head
!= NULL
)
5367 bfd_vma stub_group_start
= head
->output_offset
;
5368 bfd_vma end_of_next
;
5371 while (NEXT_SEC (curr
) != NULL
)
5373 next
= NEXT_SEC (curr
);
5374 end_of_next
= next
->output_offset
+ next
->size
;
5375 if (end_of_next
- stub_group_start
>= stub_group_size
)
5376 /* End of NEXT is too far from start, so stop. */
5378 /* Add NEXT to the group. */
5382 /* OK, the size from the start to the start of CURR is less
5383 than stub_group_size and thus can be handled by one stub
5384 section. (Or the head section is itself larger than
5385 stub_group_size, in which case we may be toast.)
5386 We should really be keeping track of the total size of
5387 stubs added here, as stubs contribute to the final output
5391 next
= NEXT_SEC (head
);
5392 /* Set up this stub group. */
5393 htab
->stub_group
[head
->id
].link_sec
= curr
;
5395 while (head
!= curr
&& (head
= next
) != NULL
);
5397 /* But wait, there's more! Input sections up to stub_group_size
5398 bytes after the stub section can be handled by it too. */
5399 if (!stubs_always_after_branch
)
5401 stub_group_start
= curr
->output_offset
+ curr
->size
;
5403 while (next
!= NULL
)
5405 end_of_next
= next
->output_offset
+ next
->size
;
5406 if (end_of_next
- stub_group_start
>= stub_group_size
)
5407 /* End of NEXT is too far from stubs, so stop. */
5409 /* Add NEXT to the stub group. */
5411 next
= NEXT_SEC (head
);
5412 htab
->stub_group
[head
->id
].link_sec
= curr
;
5418 while (list
++ != htab
->input_list
+ htab
->top_index
);
5420 free (htab
->input_list
);
5425 /* Comparison function for sorting/searching relocations relating to Cortex-A8
5429 a8_reloc_compare (const void *a
, const void *b
)
5431 const struct a8_erratum_reloc
*ra
= (const struct a8_erratum_reloc
*) a
;
5432 const struct a8_erratum_reloc
*rb
= (const struct a8_erratum_reloc
*) b
;
5434 if (ra
->from
< rb
->from
)
5436 else if (ra
->from
> rb
->from
)
5442 static struct elf_link_hash_entry
*find_thumb_glue (struct bfd_link_info
*,
5443 const char *, char **);
5445 /* Helper function to scan code for sequences which might trigger the Cortex-A8
5446 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
5447 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
5451 cortex_a8_erratum_scan (bfd
*input_bfd
,
5452 struct bfd_link_info
*info
,
5453 struct a8_erratum_fix
**a8_fixes_p
,
5454 unsigned int *num_a8_fixes_p
,
5455 unsigned int *a8_fix_table_size_p
,
5456 struct a8_erratum_reloc
*a8_relocs
,
5457 unsigned int num_a8_relocs
,
5458 unsigned prev_num_a8_fixes
,
5459 bfd_boolean
*stub_changed_p
)
5462 struct elf32_arm_link_hash_table
*htab
= elf32_arm_hash_table (info
);
5463 struct a8_erratum_fix
*a8_fixes
= *a8_fixes_p
;
5464 unsigned int num_a8_fixes
= *num_a8_fixes_p
;
5465 unsigned int a8_fix_table_size
= *a8_fix_table_size_p
;
5470 for (section
= input_bfd
->sections
;
5472 section
= section
->next
)
5474 bfd_byte
*contents
= NULL
;
5475 struct _arm_elf_section_data
*sec_data
;
5479 if (elf_section_type (section
) != SHT_PROGBITS
5480 || (elf_section_flags (section
) & SHF_EXECINSTR
) == 0
5481 || (section
->flags
& SEC_EXCLUDE
) != 0
5482 || (section
->sec_info_type
== SEC_INFO_TYPE_JUST_SYMS
)
5483 || (section
->output_section
== bfd_abs_section_ptr
))
5486 base_vma
= section
->output_section
->vma
+ section
->output_offset
;
5488 if (elf_section_data (section
)->this_hdr
.contents
!= NULL
)
5489 contents
= elf_section_data (section
)->this_hdr
.contents
;
5490 else if (! bfd_malloc_and_get_section (input_bfd
, section
, &contents
))
5493 sec_data
= elf32_arm_section_data (section
);
5495 for (span
= 0; span
< sec_data
->mapcount
; span
++)
5497 unsigned int span_start
= sec_data
->map
[span
].vma
;
5498 unsigned int span_end
= (span
== sec_data
->mapcount
- 1)
5499 ? section
->size
: sec_data
->map
[span
+ 1].vma
;
5501 char span_type
= sec_data
->map
[span
].type
;
5502 bfd_boolean last_was_32bit
= FALSE
, last_was_branch
= FALSE
;
5504 if (span_type
!= 't')
5507 /* Span is entirely within a single 4KB region: skip scanning. */
5508 if (((base_vma
+ span_start
) & ~0xfff)
5509 == ((base_vma
+ span_end
) & ~0xfff))
5512 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
5514 * The opcode is BLX.W, BL.W, B.W, Bcc.W
5515 * The branch target is in the same 4KB region as the
5516 first half of the branch.
5517 * The instruction before the branch is a 32-bit
5518 length non-branch instruction. */
5519 for (i
= span_start
; i
< span_end
;)
5521 unsigned int insn
= bfd_getl16 (&contents
[i
]);
5522 bfd_boolean insn_32bit
= FALSE
, is_blx
= FALSE
, is_b
= FALSE
;
5523 bfd_boolean is_bl
= FALSE
, is_bcc
= FALSE
, is_32bit_branch
;
5525 if ((insn
& 0xe000) == 0xe000 && (insn
& 0x1800) != 0x0000)
5530 /* Load the rest of the insn (in manual-friendly order). */
5531 insn
= (insn
<< 16) | bfd_getl16 (&contents
[i
+ 2]);
5533 /* Encoding T4: B<c>.W. */
5534 is_b
= (insn
& 0xf800d000) == 0xf0009000;
5535 /* Encoding T1: BL<c>.W. */
5536 is_bl
= (insn
& 0xf800d000) == 0xf000d000;
5537 /* Encoding T2: BLX<c>.W. */
5538 is_blx
= (insn
& 0xf800d000) == 0xf000c000;
5539 /* Encoding T3: B<c>.W (not permitted in IT block). */
5540 is_bcc
= (insn
& 0xf800d000) == 0xf0008000
5541 && (insn
& 0x07f00000) != 0x03800000;
5544 is_32bit_branch
= is_b
|| is_bl
|| is_blx
|| is_bcc
;
5546 if (((base_vma
+ i
) & 0xfff) == 0xffe
5550 && ! last_was_branch
)
5552 bfd_signed_vma offset
= 0;
5553 bfd_boolean force_target_arm
= FALSE
;
5554 bfd_boolean force_target_thumb
= FALSE
;
5556 enum elf32_arm_stub_type stub_type
= arm_stub_none
;
5557 struct a8_erratum_reloc key
, *found
;
5558 bfd_boolean use_plt
= FALSE
;
5560 key
.from
= base_vma
+ i
;
5561 found
= (struct a8_erratum_reloc
*)
5562 bsearch (&key
, a8_relocs
, num_a8_relocs
,
5563 sizeof (struct a8_erratum_reloc
),
5568 char *error_message
= NULL
;
5569 struct elf_link_hash_entry
*entry
;
5571 /* We don't care about the error returned from this
5572 function, only if there is glue or not. */
5573 entry
= find_thumb_glue (info
, found
->sym_name
,
5577 found
->non_a8_stub
= TRUE
;
5579 /* Keep a simpler condition, for the sake of clarity. */
5580 if (htab
->root
.splt
!= NULL
&& found
->hash
!= NULL
5581 && found
->hash
->root
.plt
.offset
!= (bfd_vma
) -1)
5584 if (found
->r_type
== R_ARM_THM_CALL
)
5586 if (found
->branch_type
== ST_BRANCH_TO_ARM
5588 force_target_arm
= TRUE
;
5590 force_target_thumb
= TRUE
;
5594 /* Check if we have an offending branch instruction. */
5596 if (found
&& found
->non_a8_stub
)
5597 /* We've already made a stub for this instruction, e.g.
5598 it's a long branch or a Thumb->ARM stub. Assume that
5599 stub will suffice to work around the A8 erratum (see
5600 setting of always_after_branch above). */
5604 offset
= (insn
& 0x7ff) << 1;
5605 offset
|= (insn
& 0x3f0000) >> 4;
5606 offset
|= (insn
& 0x2000) ? 0x40000 : 0;
5607 offset
|= (insn
& 0x800) ? 0x80000 : 0;
5608 offset
|= (insn
& 0x4000000) ? 0x100000 : 0;
5609 if (offset
& 0x100000)
5610 offset
|= ~ ((bfd_signed_vma
) 0xfffff);
5611 stub_type
= arm_stub_a8_veneer_b_cond
;
5613 else if (is_b
|| is_bl
|| is_blx
)
5615 int s
= (insn
& 0x4000000) != 0;
5616 int j1
= (insn
& 0x2000) != 0;
5617 int j2
= (insn
& 0x800) != 0;
5621 offset
= (insn
& 0x7ff) << 1;
5622 offset
|= (insn
& 0x3ff0000) >> 4;
5626 if (offset
& 0x1000000)
5627 offset
|= ~ ((bfd_signed_vma
) 0xffffff);
5630 offset
&= ~ ((bfd_signed_vma
) 3);
5632 stub_type
= is_blx
? arm_stub_a8_veneer_blx
:
5633 is_bl
? arm_stub_a8_veneer_bl
: arm_stub_a8_veneer_b
;
5636 if (stub_type
!= arm_stub_none
)
5638 bfd_vma pc_for_insn
= base_vma
+ i
+ 4;
5640 /* The original instruction is a BL, but the target is
5641 an ARM instruction. If we were not making a stub,
5642 the BL would have been converted to a BLX. Use the
5643 BLX stub instead in that case. */
5644 if (htab
->use_blx
&& force_target_arm
5645 && stub_type
== arm_stub_a8_veneer_bl
)
5647 stub_type
= arm_stub_a8_veneer_blx
;
5651 /* Conversely, if the original instruction was
5652 BLX but the target is Thumb mode, use the BL
5654 else if (force_target_thumb
5655 && stub_type
== arm_stub_a8_veneer_blx
)
5657 stub_type
= arm_stub_a8_veneer_bl
;
5663 pc_for_insn
&= ~ ((bfd_vma
) 3);
5665 /* If we found a relocation, use the proper destination,
5666 not the offset in the (unrelocated) instruction.
5667 Note this is always done if we switched the stub type
5671 (bfd_signed_vma
) (found
->destination
- pc_for_insn
);
5673 /* If the stub will use a Thumb-mode branch to a
5674 PLT target, redirect it to the preceding Thumb
5676 if (stub_type
!= arm_stub_a8_veneer_blx
&& use_plt
)
5677 offset
-= PLT_THUMB_STUB_SIZE
;
5679 target
= pc_for_insn
+ offset
;
5681 /* The BLX stub is ARM-mode code. Adjust the offset to
5682 take the different PC value (+8 instead of +4) into
5684 if (stub_type
== arm_stub_a8_veneer_blx
)
5687 if (((base_vma
+ i
) & ~0xfff) == (target
& ~0xfff))
5689 char *stub_name
= NULL
;
5691 if (num_a8_fixes
== a8_fix_table_size
)
5693 a8_fix_table_size
*= 2;
5694 a8_fixes
= (struct a8_erratum_fix
*)
5695 bfd_realloc (a8_fixes
,
5696 sizeof (struct a8_erratum_fix
)
5697 * a8_fix_table_size
);
5700 if (num_a8_fixes
< prev_num_a8_fixes
)
5702 /* If we're doing a subsequent scan,
5703 check if we've found the same fix as
5704 before, and try and reuse the stub
5706 stub_name
= a8_fixes
[num_a8_fixes
].stub_name
;
5707 if ((a8_fixes
[num_a8_fixes
].section
!= section
)
5708 || (a8_fixes
[num_a8_fixes
].offset
!= i
))
5712 *stub_changed_p
= TRUE
;
5718 stub_name
= (char *) bfd_malloc (8 + 1 + 8 + 1);
5719 if (stub_name
!= NULL
)
5720 sprintf (stub_name
, "%x:%x", section
->id
, i
);
5723 a8_fixes
[num_a8_fixes
].input_bfd
= input_bfd
;
5724 a8_fixes
[num_a8_fixes
].section
= section
;
5725 a8_fixes
[num_a8_fixes
].offset
= i
;
5726 a8_fixes
[num_a8_fixes
].target_offset
=
5728 a8_fixes
[num_a8_fixes
].orig_insn
= insn
;
5729 a8_fixes
[num_a8_fixes
].stub_name
= stub_name
;
5730 a8_fixes
[num_a8_fixes
].stub_type
= stub_type
;
5731 a8_fixes
[num_a8_fixes
].branch_type
=
5732 is_blx
? ST_BRANCH_TO_ARM
: ST_BRANCH_TO_THUMB
;
5739 i
+= insn_32bit
? 4 : 2;
5740 last_was_32bit
= insn_32bit
;
5741 last_was_branch
= is_32bit_branch
;
5745 if (elf_section_data (section
)->this_hdr
.contents
== NULL
)
5749 *a8_fixes_p
= a8_fixes
;
5750 *num_a8_fixes_p
= num_a8_fixes
;
5751 *a8_fix_table_size_p
= a8_fix_table_size
;
5756 /* Create or update a stub entry depending on whether the stub can already be
5757 found in HTAB. The stub is identified by:
5758 - its type STUB_TYPE
5759 - its source branch (note that several can share the same stub) whose
5760 section and relocation (if any) are given by SECTION and IRELA
5762 - its target symbol whose input section, hash, name, value and branch type
5763 are given in SYM_SEC, HASH, SYM_NAME, SYM_VALUE and BRANCH_TYPE
5766 If found, the value of the stub's target symbol is updated from SYM_VALUE
5767 and *NEW_STUB is set to FALSE. Otherwise, *NEW_STUB is set to
5768 TRUE and the stub entry is initialized.
5770 Returns the stub that was created or updated, or NULL if an error
5773 static struct elf32_arm_stub_hash_entry
*
5774 elf32_arm_create_stub (struct elf32_arm_link_hash_table
*htab
,
5775 enum elf32_arm_stub_type stub_type
, asection
*section
,
5776 Elf_Internal_Rela
*irela
, asection
*sym_sec
,
5777 struct elf32_arm_link_hash_entry
*hash
, char *sym_name
,
5778 bfd_vma sym_value
, enum arm_st_branch_type branch_type
,
5779 bfd_boolean
*new_stub
)
5781 const asection
*id_sec
;
5783 struct elf32_arm_stub_hash_entry
*stub_entry
;
5784 unsigned int r_type
;
5785 bfd_boolean sym_claimed
= arm_stub_sym_claimed (stub_type
);
5787 BFD_ASSERT (stub_type
!= arm_stub_none
);
5791 stub_name
= sym_name
;
5795 BFD_ASSERT (section
);
5796 BFD_ASSERT (section
->id
<= htab
->top_id
);
5798 /* Support for grouping stub sections. */
5799 id_sec
= htab
->stub_group
[section
->id
].link_sec
;
5801 /* Get the name of this stub. */
5802 stub_name
= elf32_arm_stub_name (id_sec
, sym_sec
, hash
, irela
,
5808 stub_entry
= arm_stub_hash_lookup (&htab
->stub_hash_table
, stub_name
, FALSE
,
5810 /* The proper stub has already been created, just update its value. */
5811 if (stub_entry
!= NULL
)
5815 stub_entry
->target_value
= sym_value
;
5819 stub_entry
= elf32_arm_add_stub (stub_name
, section
, htab
, stub_type
);
5820 if (stub_entry
== NULL
)
5827 stub_entry
->target_value
= sym_value
;
5828 stub_entry
->target_section
= sym_sec
;
5829 stub_entry
->stub_type
= stub_type
;
5830 stub_entry
->h
= hash
;
5831 stub_entry
->branch_type
= branch_type
;
5834 stub_entry
->output_name
= sym_name
;
5837 if (sym_name
== NULL
)
5838 sym_name
= "unnamed";
5839 stub_entry
->output_name
= (char *)
5840 bfd_alloc (htab
->stub_bfd
, sizeof (THUMB2ARM_GLUE_ENTRY_NAME
)
5841 + strlen (sym_name
));
5842 if (stub_entry
->output_name
== NULL
)
5848 /* For historical reasons, use the existing names for ARM-to-Thumb and
5849 Thumb-to-ARM stubs. */
5850 r_type
= ELF32_R_TYPE (irela
->r_info
);
5851 if ((r_type
== (unsigned int) R_ARM_THM_CALL
5852 || r_type
== (unsigned int) R_ARM_THM_JUMP24
5853 || r_type
== (unsigned int) R_ARM_THM_JUMP19
)
5854 && branch_type
== ST_BRANCH_TO_ARM
)
5855 sprintf (stub_entry
->output_name
, THUMB2ARM_GLUE_ENTRY_NAME
, sym_name
);
5856 else if ((r_type
== (unsigned int) R_ARM_CALL
5857 || r_type
== (unsigned int) R_ARM_JUMP24
)
5858 && branch_type
== ST_BRANCH_TO_THUMB
)
5859 sprintf (stub_entry
->output_name
, ARM2THUMB_GLUE_ENTRY_NAME
, sym_name
);
5861 sprintf (stub_entry
->output_name
, STUB_ENTRY_NAME
, sym_name
);
5868 /* Scan symbols in INPUT_BFD to identify secure entry functions needing a
5869 gateway veneer to transition from non secure to secure state and create them
5872 "ARMv8-M Security Extensions: Requirements on Development Tools" document
5873 defines the conditions that govern Secure Gateway veneer creation for a
5874 given symbol <SYM> as follows:
5875 - it has function type
5876 - it has non local binding
5877 - a symbol named __acle_se_<SYM> (called special symbol) exists with the
5878 same type, binding and value as <SYM> (called normal symbol).
5879 An entry function can handle secure state transition itself in which case
5880 its special symbol would have a different value from the normal symbol.
5882 OUT_ATTR gives the output attributes, SYM_HASHES the symbol index to hash
5883 entry mapping while HTAB gives the name to hash entry mapping.
5884 *CMSE_STUB_CREATED is increased by the number of secure gateway veneer
5887 The return value gives whether a stub failed to be allocated. */
5890 cmse_scan (bfd
*input_bfd
, struct elf32_arm_link_hash_table
*htab
,
5891 obj_attribute
*out_attr
, struct elf_link_hash_entry
**sym_hashes
,
5892 int *cmse_stub_created
)
5894 const struct elf_backend_data
*bed
;
5895 Elf_Internal_Shdr
*symtab_hdr
;
5896 unsigned i
, j
, sym_count
, ext_start
;
5897 Elf_Internal_Sym
*cmse_sym
, *local_syms
;
5898 struct elf32_arm_link_hash_entry
*hash
, *cmse_hash
= NULL
;
5899 enum arm_st_branch_type branch_type
;
5900 char *sym_name
, *lsym_name
;
5903 struct elf32_arm_stub_hash_entry
*stub_entry
;
5904 bfd_boolean is_v8m
, new_stub
, cmse_invalid
, ret
= TRUE
;
5906 bed
= get_elf_backend_data (input_bfd
);
5907 symtab_hdr
= &elf_tdata (input_bfd
)->symtab_hdr
;
5908 sym_count
= symtab_hdr
->sh_size
/ bed
->s
->sizeof_sym
;
5909 ext_start
= symtab_hdr
->sh_info
;
5910 is_v8m
= (out_attr
[Tag_CPU_arch
].i
>= TAG_CPU_ARCH_V8M_BASE
5911 && out_attr
[Tag_CPU_arch_profile
].i
== 'M');
5913 local_syms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
5914 if (local_syms
== NULL
)
5915 local_syms
= bfd_elf_get_elf_syms (input_bfd
, symtab_hdr
,
5916 symtab_hdr
->sh_info
, 0, NULL
, NULL
,
5918 if (symtab_hdr
->sh_info
&& local_syms
== NULL
)
5922 for (i
= 0; i
< sym_count
; i
++)
5924 cmse_invalid
= FALSE
;
5928 cmse_sym
= &local_syms
[i
];
5929 /* Not a special symbol. */
5930 if (!ARM_GET_SYM_CMSE_SPCL (cmse_sym
->st_target_internal
))
5932 sym_name
= bfd_elf_string_from_elf_section (input_bfd
,
5933 symtab_hdr
->sh_link
,
5935 /* Special symbol with local binding. */
5936 cmse_invalid
= TRUE
;
5940 cmse_hash
= elf32_arm_hash_entry (sym_hashes
[i
- ext_start
]);
5941 sym_name
= (char *) cmse_hash
->root
.root
.root
.string
;
5943 /* Not a special symbol. */
5944 if (!ARM_GET_SYM_CMSE_SPCL (cmse_hash
->root
.target_internal
))
5947 /* Special symbol has incorrect binding or type. */
5948 if ((cmse_hash
->root
.root
.type
!= bfd_link_hash_defined
5949 && cmse_hash
->root
.root
.type
!= bfd_link_hash_defweak
)
5950 || cmse_hash
->root
.type
!= STT_FUNC
)
5951 cmse_invalid
= TRUE
;
5956 _bfd_error_handler (_("%pB: special symbol `%s' only allowed for "
5957 "ARMv8-M architecture or later"),
5958 input_bfd
, sym_name
);
5959 is_v8m
= TRUE
; /* Avoid multiple warning. */
5965 _bfd_error_handler (_("%pB: invalid special symbol `%s'; it must be"
5966 " a global or weak function symbol"),
5967 input_bfd
, sym_name
);
5973 sym_name
+= strlen (CMSE_PREFIX
);
5974 hash
= (struct elf32_arm_link_hash_entry
*)
5975 elf_link_hash_lookup (&(htab
)->root
, sym_name
, FALSE
, FALSE
, TRUE
);
5977 /* No associated normal symbol or it is neither global nor weak. */
5979 || (hash
->root
.root
.type
!= bfd_link_hash_defined
5980 && hash
->root
.root
.type
!= bfd_link_hash_defweak
)
5981 || hash
->root
.type
!= STT_FUNC
)
5983 /* Initialize here to avoid warning about use of possibly
5984 uninitialized variable. */
5989 /* Searching for a normal symbol with local binding. */
5990 for (; j
< ext_start
; j
++)
5993 bfd_elf_string_from_elf_section (input_bfd
,
5994 symtab_hdr
->sh_link
,
5995 local_syms
[j
].st_name
);
5996 if (!strcmp (sym_name
, lsym_name
))
6001 if (hash
|| j
< ext_start
)
6004 (_("%pB: invalid standard symbol `%s'; it must be "
6005 "a global or weak function symbol"),
6006 input_bfd
, sym_name
);
6010 (_("%pB: absent standard symbol `%s'"), input_bfd
, sym_name
);
6016 sym_value
= hash
->root
.root
.u
.def
.value
;
6017 section
= hash
->root
.root
.u
.def
.section
;
6019 if (cmse_hash
->root
.root
.u
.def
.section
!= section
)
6022 (_("%pB: `%s' and its special symbol are in different sections"),
6023 input_bfd
, sym_name
);
6026 if (cmse_hash
->root
.root
.u
.def
.value
!= sym_value
)
6027 continue; /* Ignore: could be an entry function starting with SG. */
6029 /* If this section is a link-once section that will be discarded, then
6030 don't create any stubs. */
6031 if (section
->output_section
== NULL
)
6034 (_("%pB: entry function `%s' not output"), input_bfd
, sym_name
);
6038 if (hash
->root
.size
== 0)
6041 (_("%pB: entry function `%s' is empty"), input_bfd
, sym_name
);
6047 branch_type
= ARM_GET_SYM_BRANCH_TYPE (hash
->root
.target_internal
);
6049 = elf32_arm_create_stub (htab
, arm_stub_cmse_branch_thumb_only
,
6050 NULL
, NULL
, section
, hash
, sym_name
,
6051 sym_value
, branch_type
, &new_stub
);
6053 if (stub_entry
== NULL
)
6057 BFD_ASSERT (new_stub
);
6058 (*cmse_stub_created
)++;
6062 if (!symtab_hdr
->contents
)
6067 /* Return TRUE iff a symbol identified by its linker HASH entry is a secure
6068 code entry function, ie can be called from non secure code without using a
6072 cmse_entry_fct_p (struct elf32_arm_link_hash_entry
*hash
)
6074 bfd_byte contents
[4];
6075 uint32_t first_insn
;
6080 /* Defined symbol of function type. */
6081 if (hash
->root
.root
.type
!= bfd_link_hash_defined
6082 && hash
->root
.root
.type
!= bfd_link_hash_defweak
)
6084 if (hash
->root
.type
!= STT_FUNC
)
6087 /* Read first instruction. */
6088 section
= hash
->root
.root
.u
.def
.section
;
6089 abfd
= section
->owner
;
6090 offset
= hash
->root
.root
.u
.def
.value
- section
->vma
;
6091 if (!bfd_get_section_contents (abfd
, section
, contents
, offset
,
6095 first_insn
= bfd_get_32 (abfd
, contents
);
6097 /* Starts by SG instruction. */
6098 return first_insn
== 0xe97fe97f;
6101 /* Output the name (in symbol table) of the veneer GEN_ENTRY if it is a new
6102 secure gateway veneers (ie. the veneers was not in the input import library)
6103 and there is no output import library (GEN_INFO->out_implib_bfd is NULL. */
6106 arm_list_new_cmse_stub (struct bfd_hash_entry
*gen_entry
, void *gen_info
)
6108 struct elf32_arm_stub_hash_entry
*stub_entry
;
6109 struct bfd_link_info
*info
;
6111 /* Massage our args to the form they really have. */
6112 stub_entry
= (struct elf32_arm_stub_hash_entry
*) gen_entry
;
6113 info
= (struct bfd_link_info
*) gen_info
;
6115 if (info
->out_implib_bfd
)
6118 if (stub_entry
->stub_type
!= arm_stub_cmse_branch_thumb_only
)
6121 if (stub_entry
->stub_offset
== (bfd_vma
) -1)
6122 _bfd_error_handler (" %s", stub_entry
->output_name
);
6127 /* Set offset of each secure gateway veneers so that its address remain
6128 identical to the one in the input import library referred by
6129 HTAB->in_implib_bfd. A warning is issued for veneers that disappeared
6130 (present in input import library but absent from the executable being
6131 linked) or if new veneers appeared and there is no output import library
6132 (INFO->out_implib_bfd is NULL and *CMSE_STUB_CREATED is bigger than the
6133 number of secure gateway veneers found in the input import library.
6135 The function returns whether an error occurred. If no error occurred,
6136 *CMSE_STUB_CREATED gives the number of SG veneers created by both cmse_scan
6137 and this function and HTAB->new_cmse_stub_offset is set to the biggest
6138 veneer observed set for new veneers to be layed out after. */
6141 set_cmse_veneer_addr_from_implib (struct bfd_link_info
*info
,
6142 struct elf32_arm_link_hash_table
*htab
,
6143 int *cmse_stub_created
)
6150 asection
*stub_out_sec
;
6151 bfd_boolean ret
= TRUE
;
6152 Elf_Internal_Sym
*intsym
;
6153 const char *out_sec_name
;
6154 bfd_size_type cmse_stub_size
;
6155 asymbol
**sympp
= NULL
, *sym
;
6156 struct elf32_arm_link_hash_entry
*hash
;
6157 const insn_sequence
*cmse_stub_template
;
6158 struct elf32_arm_stub_hash_entry
*stub_entry
;
6159 int cmse_stub_template_size
, new_cmse_stubs_created
= *cmse_stub_created
;
6160 bfd_vma veneer_value
, stub_offset
, next_cmse_stub_offset
;
6161 bfd_vma cmse_stub_array_start
= (bfd_vma
) -1, cmse_stub_sec_vma
= 0;
6163 /* No input secure gateway import library. */
6164 if (!htab
->in_implib_bfd
)
6167 in_implib_bfd
= htab
->in_implib_bfd
;
6168 if (!htab
->cmse_implib
)
6170 _bfd_error_handler (_("%pB: --in-implib only supported for Secure "
6171 "Gateway import libraries"), in_implib_bfd
);
6175 /* Get symbol table size. */
6176 symsize
= bfd_get_symtab_upper_bound (in_implib_bfd
);
6180 /* Read in the input secure gateway import library's symbol table. */
6181 sympp
= (asymbol
**) xmalloc (symsize
);
6182 symcount
= bfd_canonicalize_symtab (in_implib_bfd
, sympp
);
6189 htab
->new_cmse_stub_offset
= 0;
6191 find_stub_size_and_template (arm_stub_cmse_branch_thumb_only
,
6192 &cmse_stub_template
,
6193 &cmse_stub_template_size
);
6195 arm_dedicated_stub_output_section_name (arm_stub_cmse_branch_thumb_only
);
6197 bfd_get_section_by_name (htab
->obfd
, out_sec_name
);
6198 if (stub_out_sec
!= NULL
)
6199 cmse_stub_sec_vma
= stub_out_sec
->vma
;
6201 /* Set addresses of veneers mentionned in input secure gateway import
6202 library's symbol table. */
6203 for (i
= 0; i
< symcount
; i
++)
6207 sym_name
= (char *) bfd_asymbol_name (sym
);
6208 intsym
= &((elf_symbol_type
*) sym
)->internal_elf_sym
;
6210 if (sym
->section
!= bfd_abs_section_ptr
6211 || !(flags
& (BSF_GLOBAL
| BSF_WEAK
))
6212 || (flags
& BSF_FUNCTION
) != BSF_FUNCTION
6213 || (ARM_GET_SYM_BRANCH_TYPE (intsym
->st_target_internal
)
6214 != ST_BRANCH_TO_THUMB
))
6216 _bfd_error_handler (_("%pB: invalid import library entry: `%s'; "
6217 "symbol should be absolute, global and "
6218 "refer to Thumb functions"),
6219 in_implib_bfd
, sym_name
);
6224 veneer_value
= bfd_asymbol_value (sym
);
6225 stub_offset
= veneer_value
- cmse_stub_sec_vma
;
6226 stub_entry
= arm_stub_hash_lookup (&htab
->stub_hash_table
, sym_name
,
6228 hash
= (struct elf32_arm_link_hash_entry
*)
6229 elf_link_hash_lookup (&(htab
)->root
, sym_name
, FALSE
, FALSE
, TRUE
);
6231 /* Stub entry should have been created by cmse_scan or the symbol be of
6232 a secure function callable from non secure code. */
6233 if (!stub_entry
&& !hash
)
6235 bfd_boolean new_stub
;
6238 (_("entry function `%s' disappeared from secure code"), sym_name
);
6239 hash
= (struct elf32_arm_link_hash_entry
*)
6240 elf_link_hash_lookup (&(htab
)->root
, sym_name
, TRUE
, TRUE
, TRUE
);
6242 = elf32_arm_create_stub (htab
, arm_stub_cmse_branch_thumb_only
,
6243 NULL
, NULL
, bfd_abs_section_ptr
, hash
,
6244 sym_name
, veneer_value
,
6245 ST_BRANCH_TO_THUMB
, &new_stub
);
6246 if (stub_entry
== NULL
)
6250 BFD_ASSERT (new_stub
);
6251 new_cmse_stubs_created
++;
6252 (*cmse_stub_created
)++;
6254 stub_entry
->stub_template_size
= stub_entry
->stub_size
= 0;
6255 stub_entry
->stub_offset
= stub_offset
;
6257 /* Symbol found is not callable from non secure code. */
6258 else if (!stub_entry
)
6260 if (!cmse_entry_fct_p (hash
))
6262 _bfd_error_handler (_("`%s' refers to a non entry function"),
6270 /* Only stubs for SG veneers should have been created. */
6271 BFD_ASSERT (stub_entry
->stub_type
== arm_stub_cmse_branch_thumb_only
);
6273 /* Check visibility hasn't changed. */
6274 if (!!(flags
& BSF_GLOBAL
)
6275 != (hash
->root
.root
.type
== bfd_link_hash_defined
))
6277 (_("%pB: visibility of symbol `%s' has changed"), in_implib_bfd
,
6280 stub_entry
->stub_offset
= stub_offset
;
6283 /* Size should match that of a SG veneer. */
6284 if (intsym
->st_size
!= cmse_stub_size
)
6286 _bfd_error_handler (_("%pB: incorrect size for symbol `%s'"),
6287 in_implib_bfd
, sym_name
);
6291 /* Previous veneer address is before current SG veneer section. */
6292 if (veneer_value
< cmse_stub_sec_vma
)
6294 /* Avoid offset underflow. */
6296 stub_entry
->stub_offset
= 0;
6301 /* Complain if stub offset not a multiple of stub size. */
6302 if (stub_offset
% cmse_stub_size
)
6305 (_("offset of veneer for entry function `%s' not a multiple of "
6306 "its size"), sym_name
);
6313 new_cmse_stubs_created
--;
6314 if (veneer_value
< cmse_stub_array_start
)
6315 cmse_stub_array_start
= veneer_value
;
6316 next_cmse_stub_offset
= stub_offset
+ ((cmse_stub_size
+ 7) & ~7);
6317 if (next_cmse_stub_offset
> htab
->new_cmse_stub_offset
)
6318 htab
->new_cmse_stub_offset
= next_cmse_stub_offset
;
6321 if (!info
->out_implib_bfd
&& new_cmse_stubs_created
!= 0)
6323 BFD_ASSERT (new_cmse_stubs_created
> 0);
6325 (_("new entry function(s) introduced but no output import library "
6327 bfd_hash_traverse (&htab
->stub_hash_table
, arm_list_new_cmse_stub
, info
);
6330 if (cmse_stub_array_start
!= cmse_stub_sec_vma
)
6333 (_("start address of `%s' is different from previous link"),
6343 /* Determine and set the size of the stub section for a final link.
6345 The basic idea here is to examine all the relocations looking for
6346 PC-relative calls to a target that is unreachable with a "bl"
6350 elf32_arm_size_stubs (bfd
*output_bfd
,
6352 struct bfd_link_info
*info
,
6353 bfd_signed_vma group_size
,
6354 asection
* (*add_stub_section
) (const char *, asection
*,
6357 void (*layout_sections_again
) (void))
6359 bfd_boolean ret
= TRUE
;
6360 obj_attribute
*out_attr
;
6361 int cmse_stub_created
= 0;
6362 bfd_size_type stub_group_size
;
6363 bfd_boolean m_profile
, stubs_always_after_branch
, first_veneer_scan
= TRUE
;
6364 struct elf32_arm_link_hash_table
*htab
= elf32_arm_hash_table (info
);
6365 struct a8_erratum_fix
*a8_fixes
= NULL
;
6366 unsigned int num_a8_fixes
= 0, a8_fix_table_size
= 10;
6367 struct a8_erratum_reloc
*a8_relocs
= NULL
;
6368 unsigned int num_a8_relocs
= 0, a8_reloc_table_size
= 10, i
;
6373 if (htab
->fix_cortex_a8
)
6375 a8_fixes
= (struct a8_erratum_fix
*)
6376 bfd_zmalloc (sizeof (struct a8_erratum_fix
) * a8_fix_table_size
);
6377 a8_relocs
= (struct a8_erratum_reloc
*)
6378 bfd_zmalloc (sizeof (struct a8_erratum_reloc
) * a8_reloc_table_size
);
6381 /* Propagate mach to stub bfd, because it may not have been
6382 finalized when we created stub_bfd. */
6383 bfd_set_arch_mach (stub_bfd
, bfd_get_arch (output_bfd
),
6384 bfd_get_mach (output_bfd
));
6386 /* Stash our params away. */
6387 htab
->stub_bfd
= stub_bfd
;
6388 htab
->add_stub_section
= add_stub_section
;
6389 htab
->layout_sections_again
= layout_sections_again
;
6390 stubs_always_after_branch
= group_size
< 0;
6392 out_attr
= elf_known_obj_attributes_proc (output_bfd
);
6393 m_profile
= out_attr
[Tag_CPU_arch_profile
].i
== 'M';
6395 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
6396 as the first half of a 32-bit branch straddling two 4K pages. This is a
6397 crude way of enforcing that. */
6398 if (htab
->fix_cortex_a8
)
6399 stubs_always_after_branch
= 1;
6402 stub_group_size
= -group_size
;
6404 stub_group_size
= group_size
;
6406 if (stub_group_size
== 1)
6408 /* Default values. */
6409 /* Thumb branch range is +-4MB has to be used as the default
6410 maximum size (a given section can contain both ARM and Thumb
6411 code, so the worst case has to be taken into account).
6413 This value is 24K less than that, which allows for 2025
6414 12-byte stubs. If we exceed that, then we will fail to link.
6415 The user will have to relink with an explicit group size
6417 stub_group_size
= 4170000;
6420 group_sections (htab
, stub_group_size
, stubs_always_after_branch
);
6422 /* If we're applying the cortex A8 fix, we need to determine the
6423 program header size now, because we cannot change it later --
6424 that could alter section placements. Notice the A8 erratum fix
6425 ends up requiring the section addresses to remain unchanged
6426 modulo the page size. That's something we cannot represent
6427 inside BFD, and we don't want to force the section alignment to
6428 be the page size. */
6429 if (htab
->fix_cortex_a8
)
6430 (*htab
->layout_sections_again
) ();
6435 unsigned int bfd_indx
;
6437 enum elf32_arm_stub_type stub_type
;
6438 bfd_boolean stub_changed
= FALSE
;
6439 unsigned prev_num_a8_fixes
= num_a8_fixes
;
6442 for (input_bfd
= info
->input_bfds
, bfd_indx
= 0;
6444 input_bfd
= input_bfd
->link
.next
, bfd_indx
++)
6446 Elf_Internal_Shdr
*symtab_hdr
;
6448 Elf_Internal_Sym
*local_syms
= NULL
;
6450 if (!is_arm_elf (input_bfd
))
6455 /* We'll need the symbol table in a second. */
6456 symtab_hdr
= &elf_tdata (input_bfd
)->symtab_hdr
;
6457 if (symtab_hdr
->sh_info
== 0)
6460 /* Limit scan of symbols to object file whose profile is
6461 Microcontroller to not hinder performance in the general case. */
6462 if (m_profile
&& first_veneer_scan
)
6464 struct elf_link_hash_entry
**sym_hashes
;
6466 sym_hashes
= elf_sym_hashes (input_bfd
);
6467 if (!cmse_scan (input_bfd
, htab
, out_attr
, sym_hashes
,
6468 &cmse_stub_created
))
6469 goto error_ret_free_local
;
6471 if (cmse_stub_created
!= 0)
6472 stub_changed
= TRUE
;
6475 /* Walk over each section attached to the input bfd. */
6476 for (section
= input_bfd
->sections
;
6478 section
= section
->next
)
6480 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
6482 /* If there aren't any relocs, then there's nothing more
6484 if ((section
->flags
& SEC_RELOC
) == 0
6485 || section
->reloc_count
== 0
6486 || (section
->flags
& SEC_CODE
) == 0)
6489 /* If this section is a link-once section that will be
6490 discarded, then don't create any stubs. */
6491 if (section
->output_section
== NULL
6492 || section
->output_section
->owner
!= output_bfd
)
6495 /* Get the relocs. */
6497 = _bfd_elf_link_read_relocs (input_bfd
, section
, NULL
,
6498 NULL
, info
->keep_memory
);
6499 if (internal_relocs
== NULL
)
6500 goto error_ret_free_local
;
6502 /* Now examine each relocation. */
6503 irela
= internal_relocs
;
6504 irelaend
= irela
+ section
->reloc_count
;
6505 for (; irela
< irelaend
; irela
++)
6507 unsigned int r_type
, r_indx
;
6510 bfd_vma destination
;
6511 struct elf32_arm_link_hash_entry
*hash
;
6512 const char *sym_name
;
6513 unsigned char st_type
;
6514 enum arm_st_branch_type branch_type
;
6515 bfd_boolean created_stub
= FALSE
;
6517 r_type
= ELF32_R_TYPE (irela
->r_info
);
6518 r_indx
= ELF32_R_SYM (irela
->r_info
);
6520 if (r_type
>= (unsigned int) R_ARM_max
)
6522 bfd_set_error (bfd_error_bad_value
);
6523 error_ret_free_internal
:
6524 if (elf_section_data (section
)->relocs
== NULL
)
6525 free (internal_relocs
);
6527 error_ret_free_local
:
6528 if (local_syms
!= NULL
6529 && (symtab_hdr
->contents
6530 != (unsigned char *) local_syms
))
6536 if (r_indx
>= symtab_hdr
->sh_info
)
6537 hash
= elf32_arm_hash_entry
6538 (elf_sym_hashes (input_bfd
)
6539 [r_indx
- symtab_hdr
->sh_info
]);
6541 /* Only look for stubs on branch instructions, or
6542 non-relaxed TLSCALL */
6543 if ((r_type
!= (unsigned int) R_ARM_CALL
)
6544 && (r_type
!= (unsigned int) R_ARM_THM_CALL
)
6545 && (r_type
!= (unsigned int) R_ARM_JUMP24
)
6546 && (r_type
!= (unsigned int) R_ARM_THM_JUMP19
)
6547 && (r_type
!= (unsigned int) R_ARM_THM_XPC22
)
6548 && (r_type
!= (unsigned int) R_ARM_THM_JUMP24
)
6549 && (r_type
!= (unsigned int) R_ARM_PLT32
)
6550 && !((r_type
== (unsigned int) R_ARM_TLS_CALL
6551 || r_type
== (unsigned int) R_ARM_THM_TLS_CALL
)
6552 && r_type
== elf32_arm_tls_transition
6553 (info
, r_type
, &hash
->root
)
6554 && ((hash
? hash
->tls_type
6555 : (elf32_arm_local_got_tls_type
6556 (input_bfd
)[r_indx
]))
6557 & GOT_TLS_GDESC
) != 0))
6560 /* Now determine the call target, its name, value,
6567 if (r_type
== (unsigned int) R_ARM_TLS_CALL
6568 || r_type
== (unsigned int) R_ARM_THM_TLS_CALL
)
6570 /* A non-relaxed TLS call. The target is the
6571 plt-resident trampoline and nothing to do
6573 BFD_ASSERT (htab
->tls_trampoline
> 0);
6574 sym_sec
= htab
->root
.splt
;
6575 sym_value
= htab
->tls_trampoline
;
6578 branch_type
= ST_BRANCH_TO_ARM
;
6582 /* It's a local symbol. */
6583 Elf_Internal_Sym
*sym
;
6585 if (local_syms
== NULL
)
6588 = (Elf_Internal_Sym
*) symtab_hdr
->contents
;
6589 if (local_syms
== NULL
)
6591 = bfd_elf_get_elf_syms (input_bfd
, symtab_hdr
,
6592 symtab_hdr
->sh_info
, 0,
6594 if (local_syms
== NULL
)
6595 goto error_ret_free_internal
;
6598 sym
= local_syms
+ r_indx
;
6599 if (sym
->st_shndx
== SHN_UNDEF
)
6600 sym_sec
= bfd_und_section_ptr
;
6601 else if (sym
->st_shndx
== SHN_ABS
)
6602 sym_sec
= bfd_abs_section_ptr
;
6603 else if (sym
->st_shndx
== SHN_COMMON
)
6604 sym_sec
= bfd_com_section_ptr
;
6607 bfd_section_from_elf_index (input_bfd
, sym
->st_shndx
);
6610 /* This is an undefined symbol. It can never
6614 if (ELF_ST_TYPE (sym
->st_info
) != STT_SECTION
)
6615 sym_value
= sym
->st_value
;
6616 destination
= (sym_value
+ irela
->r_addend
6617 + sym_sec
->output_offset
6618 + sym_sec
->output_section
->vma
);
6619 st_type
= ELF_ST_TYPE (sym
->st_info
);
6621 ARM_GET_SYM_BRANCH_TYPE (sym
->st_target_internal
);
6623 = bfd_elf_string_from_elf_section (input_bfd
,
6624 symtab_hdr
->sh_link
,
6629 /* It's an external symbol. */
6630 while (hash
->root
.root
.type
== bfd_link_hash_indirect
6631 || hash
->root
.root
.type
== bfd_link_hash_warning
)
6632 hash
= ((struct elf32_arm_link_hash_entry
*)
6633 hash
->root
.root
.u
.i
.link
);
6635 if (hash
->root
.root
.type
== bfd_link_hash_defined
6636 || hash
->root
.root
.type
== bfd_link_hash_defweak
)
6638 sym_sec
= hash
->root
.root
.u
.def
.section
;
6639 sym_value
= hash
->root
.root
.u
.def
.value
;
6641 struct elf32_arm_link_hash_table
*globals
=
6642 elf32_arm_hash_table (info
);
6644 /* For a destination in a shared library,
6645 use the PLT stub as target address to
6646 decide whether a branch stub is
6649 && globals
->root
.splt
!= NULL
6651 && hash
->root
.plt
.offset
!= (bfd_vma
) -1)
6653 sym_sec
= globals
->root
.splt
;
6654 sym_value
= hash
->root
.plt
.offset
;
6655 if (sym_sec
->output_section
!= NULL
)
6656 destination
= (sym_value
6657 + sym_sec
->output_offset
6658 + sym_sec
->output_section
->vma
);
6660 else if (sym_sec
->output_section
!= NULL
)
6661 destination
= (sym_value
+ irela
->r_addend
6662 + sym_sec
->output_offset
6663 + sym_sec
->output_section
->vma
);
6665 else if ((hash
->root
.root
.type
== bfd_link_hash_undefined
)
6666 || (hash
->root
.root
.type
== bfd_link_hash_undefweak
))
6668 /* For a shared library, use the PLT stub as
6669 target address to decide whether a long
6670 branch stub is needed.
6671 For absolute code, they cannot be handled. */
6672 struct elf32_arm_link_hash_table
*globals
=
6673 elf32_arm_hash_table (info
);
6676 && globals
->root
.splt
!= NULL
6678 && hash
->root
.plt
.offset
!= (bfd_vma
) -1)
6680 sym_sec
= globals
->root
.splt
;
6681 sym_value
= hash
->root
.plt
.offset
;
6682 if (sym_sec
->output_section
!= NULL
)
6683 destination
= (sym_value
6684 + sym_sec
->output_offset
6685 + sym_sec
->output_section
->vma
);
6692 bfd_set_error (bfd_error_bad_value
);
6693 goto error_ret_free_internal
;
6695 st_type
= hash
->root
.type
;
6697 ARM_GET_SYM_BRANCH_TYPE (hash
->root
.target_internal
);
6698 sym_name
= hash
->root
.root
.root
.string
;
6703 bfd_boolean new_stub
;
6704 struct elf32_arm_stub_hash_entry
*stub_entry
;
6706 /* Determine what (if any) linker stub is needed. */
6707 stub_type
= arm_type_of_stub (info
, section
, irela
,
6708 st_type
, &branch_type
,
6709 hash
, destination
, sym_sec
,
6710 input_bfd
, sym_name
);
6711 if (stub_type
== arm_stub_none
)
6714 /* We've either created a stub for this reloc already,
6715 or we are about to. */
6717 elf32_arm_create_stub (htab
, stub_type
, section
, irela
,
6719 (char *) sym_name
, sym_value
,
6720 branch_type
, &new_stub
);
6722 created_stub
= stub_entry
!= NULL
;
6724 goto error_ret_free_internal
;
6728 stub_changed
= TRUE
;
6732 /* Look for relocations which might trigger Cortex-A8
6734 if (htab
->fix_cortex_a8
6735 && (r_type
== (unsigned int) R_ARM_THM_JUMP24
6736 || r_type
== (unsigned int) R_ARM_THM_JUMP19
6737 || r_type
== (unsigned int) R_ARM_THM_CALL
6738 || r_type
== (unsigned int) R_ARM_THM_XPC22
))
6740 bfd_vma from
= section
->output_section
->vma
6741 + section
->output_offset
6744 if ((from
& 0xfff) == 0xffe)
6746 /* Found a candidate. Note we haven't checked the
6747 destination is within 4K here: if we do so (and
6748 don't create an entry in a8_relocs) we can't tell
6749 that a branch should have been relocated when
6751 if (num_a8_relocs
== a8_reloc_table_size
)
6753 a8_reloc_table_size
*= 2;
6754 a8_relocs
= (struct a8_erratum_reloc
*)
6755 bfd_realloc (a8_relocs
,
6756 sizeof (struct a8_erratum_reloc
)
6757 * a8_reloc_table_size
);
6760 a8_relocs
[num_a8_relocs
].from
= from
;
6761 a8_relocs
[num_a8_relocs
].destination
= destination
;
6762 a8_relocs
[num_a8_relocs
].r_type
= r_type
;
6763 a8_relocs
[num_a8_relocs
].branch_type
= branch_type
;
6764 a8_relocs
[num_a8_relocs
].sym_name
= sym_name
;
6765 a8_relocs
[num_a8_relocs
].non_a8_stub
= created_stub
;
6766 a8_relocs
[num_a8_relocs
].hash
= hash
;
6773 /* We're done with the internal relocs, free them. */
6774 if (elf_section_data (section
)->relocs
== NULL
)
6775 free (internal_relocs
);
6778 if (htab
->fix_cortex_a8
)
6780 /* Sort relocs which might apply to Cortex-A8 erratum. */
6781 qsort (a8_relocs
, num_a8_relocs
,
6782 sizeof (struct a8_erratum_reloc
),
6785 /* Scan for branches which might trigger Cortex-A8 erratum. */
6786 if (cortex_a8_erratum_scan (input_bfd
, info
, &a8_fixes
,
6787 &num_a8_fixes
, &a8_fix_table_size
,
6788 a8_relocs
, num_a8_relocs
,
6789 prev_num_a8_fixes
, &stub_changed
)
6791 goto error_ret_free_local
;
6794 if (local_syms
!= NULL
6795 && symtab_hdr
->contents
!= (unsigned char *) local_syms
)
6797 if (!info
->keep_memory
)
6800 symtab_hdr
->contents
= (unsigned char *) local_syms
;
6804 if (first_veneer_scan
6805 && !set_cmse_veneer_addr_from_implib (info
, htab
,
6806 &cmse_stub_created
))
6809 if (prev_num_a8_fixes
!= num_a8_fixes
)
6810 stub_changed
= TRUE
;
6815 /* OK, we've added some stubs. Find out the new size of the
6817 for (stub_sec
= htab
->stub_bfd
->sections
;
6819 stub_sec
= stub_sec
->next
)
6821 /* Ignore non-stub sections. */
6822 if (!strstr (stub_sec
->name
, STUB_SUFFIX
))
6828 /* Add new SG veneers after those already in the input import
6830 for (stub_type
= arm_stub_none
+ 1; stub_type
< max_stub_type
;
6833 bfd_vma
*start_offset_p
;
6834 asection
**stub_sec_p
;
6836 start_offset_p
= arm_new_stubs_start_offset_ptr (htab
, stub_type
);
6837 stub_sec_p
= arm_dedicated_stub_input_section_ptr (htab
, stub_type
);
6838 if (start_offset_p
== NULL
)
6841 BFD_ASSERT (stub_sec_p
!= NULL
);
6842 if (*stub_sec_p
!= NULL
)
6843 (*stub_sec_p
)->size
= *start_offset_p
;
6846 /* Compute stub section size, considering padding. */
6847 bfd_hash_traverse (&htab
->stub_hash_table
, arm_size_one_stub
, htab
);
6848 for (stub_type
= arm_stub_none
+ 1; stub_type
< max_stub_type
;
6852 asection
**stub_sec_p
;
6854 padding
= arm_dedicated_stub_section_padding (stub_type
);
6855 stub_sec_p
= arm_dedicated_stub_input_section_ptr (htab
, stub_type
);
6856 /* Skip if no stub input section or no stub section padding
6858 if ((stub_sec_p
!= NULL
&& *stub_sec_p
== NULL
) || padding
== 0)
6860 /* Stub section padding required but no dedicated section. */
6861 BFD_ASSERT (stub_sec_p
);
6863 size
= (*stub_sec_p
)->size
;
6864 size
= (size
+ padding
- 1) & ~(padding
- 1);
6865 (*stub_sec_p
)->size
= size
;
6868 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
6869 if (htab
->fix_cortex_a8
)
6870 for (i
= 0; i
< num_a8_fixes
; i
++)
6872 stub_sec
= elf32_arm_create_or_find_stub_sec (NULL
,
6873 a8_fixes
[i
].section
, htab
, a8_fixes
[i
].stub_type
);
6875 if (stub_sec
== NULL
)
6879 += find_stub_size_and_template (a8_fixes
[i
].stub_type
, NULL
,
6884 /* Ask the linker to do its stuff. */
6885 (*htab
->layout_sections_again
) ();
6886 first_veneer_scan
= FALSE
;
6889 /* Add stubs for Cortex-A8 erratum fixes now. */
6890 if (htab
->fix_cortex_a8
)
6892 for (i
= 0; i
< num_a8_fixes
; i
++)
6894 struct elf32_arm_stub_hash_entry
*stub_entry
;
6895 char *stub_name
= a8_fixes
[i
].stub_name
;
6896 asection
*section
= a8_fixes
[i
].section
;
6897 unsigned int section_id
= a8_fixes
[i
].section
->id
;
6898 asection
*link_sec
= htab
->stub_group
[section_id
].link_sec
;
6899 asection
*stub_sec
= htab
->stub_group
[section_id
].stub_sec
;
6900 const insn_sequence
*template_sequence
;
6901 int template_size
, size
= 0;
6903 stub_entry
= arm_stub_hash_lookup (&htab
->stub_hash_table
, stub_name
,
6905 if (stub_entry
== NULL
)
6907 _bfd_error_handler (_("%pB: cannot create stub entry %s"),
6908 section
->owner
, stub_name
);
6912 stub_entry
->stub_sec
= stub_sec
;
6913 stub_entry
->stub_offset
= (bfd_vma
) -1;
6914 stub_entry
->id_sec
= link_sec
;
6915 stub_entry
->stub_type
= a8_fixes
[i
].stub_type
;
6916 stub_entry
->source_value
= a8_fixes
[i
].offset
;
6917 stub_entry
->target_section
= a8_fixes
[i
].section
;
6918 stub_entry
->target_value
= a8_fixes
[i
].target_offset
;
6919 stub_entry
->orig_insn
= a8_fixes
[i
].orig_insn
;
6920 stub_entry
->branch_type
= a8_fixes
[i
].branch_type
;
6922 size
= find_stub_size_and_template (a8_fixes
[i
].stub_type
,
6926 stub_entry
->stub_size
= size
;
6927 stub_entry
->stub_template
= template_sequence
;
6928 stub_entry
->stub_template_size
= template_size
;
6931 /* Stash the Cortex-A8 erratum fix array for use later in
6932 elf32_arm_write_section(). */
6933 htab
->a8_erratum_fixes
= a8_fixes
;
6934 htab
->num_a8_erratum_fixes
= num_a8_fixes
;
6938 htab
->a8_erratum_fixes
= NULL
;
6939 htab
->num_a8_erratum_fixes
= 0;
6944 /* Build all the stubs associated with the current output file. The
6945 stubs are kept in a hash table attached to the main linker hash
6946 table. We also set up the .plt entries for statically linked PIC
6947 functions here. This function is called via arm_elf_finish in the
6951 elf32_arm_build_stubs (struct bfd_link_info
*info
)
6954 struct bfd_hash_table
*table
;
6955 enum elf32_arm_stub_type stub_type
;
6956 struct elf32_arm_link_hash_table
*htab
;
6958 htab
= elf32_arm_hash_table (info
);
6962 for (stub_sec
= htab
->stub_bfd
->sections
;
6964 stub_sec
= stub_sec
->next
)
6968 /* Ignore non-stub sections. */
6969 if (!strstr (stub_sec
->name
, STUB_SUFFIX
))
6972 /* Allocate memory to hold the linker stubs. Zeroing the stub sections
6973 must at least be done for stub section requiring padding and for SG
6974 veneers to ensure that a non secure code branching to a removed SG
6975 veneer causes an error. */
6976 size
= stub_sec
->size
;
6977 stub_sec
->contents
= (unsigned char *) bfd_zalloc (htab
->stub_bfd
, size
);
6978 if (stub_sec
->contents
== NULL
&& size
!= 0)
6984 /* Add new SG veneers after those already in the input import library. */
6985 for (stub_type
= arm_stub_none
+ 1; stub_type
< max_stub_type
; stub_type
++)
6987 bfd_vma
*start_offset_p
;
6988 asection
**stub_sec_p
;
6990 start_offset_p
= arm_new_stubs_start_offset_ptr (htab
, stub_type
);
6991 stub_sec_p
= arm_dedicated_stub_input_section_ptr (htab
, stub_type
);
6992 if (start_offset_p
== NULL
)
6995 BFD_ASSERT (stub_sec_p
!= NULL
);
6996 if (*stub_sec_p
!= NULL
)
6997 (*stub_sec_p
)->size
= *start_offset_p
;
7000 /* Build the stubs as directed by the stub hash table. */
7001 table
= &htab
->stub_hash_table
;
7002 bfd_hash_traverse (table
, arm_build_one_stub
, info
);
7003 if (htab
->fix_cortex_a8
)
7005 /* Place the cortex a8 stubs last. */
7006 htab
->fix_cortex_a8
= -1;
7007 bfd_hash_traverse (table
, arm_build_one_stub
, info
);
7013 /* Locate the Thumb encoded calling stub for NAME. */
7015 static struct elf_link_hash_entry
*
7016 find_thumb_glue (struct bfd_link_info
*link_info
,
7018 char **error_message
)
7021 struct elf_link_hash_entry
*hash
;
7022 struct elf32_arm_link_hash_table
*hash_table
;
7024 /* We need a pointer to the armelf specific hash table. */
7025 hash_table
= elf32_arm_hash_table (link_info
);
7026 if (hash_table
== NULL
)
7029 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen (name
)
7030 + strlen (THUMB2ARM_GLUE_ENTRY_NAME
) + 1);
7032 BFD_ASSERT (tmp_name
);
7034 sprintf (tmp_name
, THUMB2ARM_GLUE_ENTRY_NAME
, name
);
7036 hash
= elf_link_hash_lookup
7037 (&(hash_table
)->root
, tmp_name
, FALSE
, FALSE
, TRUE
);
7040 && asprintf (error_message
, _("unable to find %s glue '%s' for '%s'"),
7041 "Thumb", tmp_name
, name
) == -1)
7042 *error_message
= (char *) bfd_errmsg (bfd_error_system_call
);
7049 /* Locate the ARM encoded calling stub for NAME. */
7051 static struct elf_link_hash_entry
*
7052 find_arm_glue (struct bfd_link_info
*link_info
,
7054 char **error_message
)
7057 struct elf_link_hash_entry
*myh
;
7058 struct elf32_arm_link_hash_table
*hash_table
;
7060 /* We need a pointer to the elfarm specific hash table. */
7061 hash_table
= elf32_arm_hash_table (link_info
);
7062 if (hash_table
== NULL
)
7065 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen (name
)
7066 + strlen (ARM2THUMB_GLUE_ENTRY_NAME
) + 1);
7068 BFD_ASSERT (tmp_name
);
7070 sprintf (tmp_name
, ARM2THUMB_GLUE_ENTRY_NAME
, name
);
7072 myh
= elf_link_hash_lookup
7073 (&(hash_table
)->root
, tmp_name
, FALSE
, FALSE
, TRUE
);
7076 && asprintf (error_message
, _("unable to find %s glue '%s' for '%s'"),
7077 "ARM", tmp_name
, name
) == -1)
7078 *error_message
= (char *) bfd_errmsg (bfd_error_system_call
);
7085 /* ARM->Thumb glue (static images):
7089 ldr r12, __func_addr
7092 .word func @ behave as if you saw a ARM_32 reloc.
7099 .word func @ behave as if you saw a ARM_32 reloc.
7101 (relocatable images)
7104 ldr r12, __func_offset
7110 #define ARM2THUMB_STATIC_GLUE_SIZE 12
7111 static const insn32 a2t1_ldr_insn
= 0xe59fc000;
7112 static const insn32 a2t2_bx_r12_insn
= 0xe12fff1c;
7113 static const insn32 a2t3_func_addr_insn
= 0x00000001;
7115 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
7116 static const insn32 a2t1v5_ldr_insn
= 0xe51ff004;
7117 static const insn32 a2t2v5_func_addr_insn
= 0x00000001;
7119 #define ARM2THUMB_PIC_GLUE_SIZE 16
7120 static const insn32 a2t1p_ldr_insn
= 0xe59fc004;
7121 static const insn32 a2t2p_add_pc_insn
= 0xe08cc00f;
7122 static const insn32 a2t3p_bx_r12_insn
= 0xe12fff1c;
7124 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
7128 __func_from_thumb: __func_from_thumb:
7130 nop ldr r6, __func_addr
7140 #define THUMB2ARM_GLUE_SIZE 8
7141 static const insn16 t2a1_bx_pc_insn
= 0x4778;
7142 static const insn16 t2a2_noop_insn
= 0x46c0;
7143 static const insn32 t2a3_b_insn
= 0xea000000;
7145 #define VFP11_ERRATUM_VENEER_SIZE 8
7146 #define STM32L4XX_ERRATUM_LDM_VENEER_SIZE 16
7147 #define STM32L4XX_ERRATUM_VLDM_VENEER_SIZE 24
7149 #define ARM_BX_VENEER_SIZE 12
7150 static const insn32 armbx1_tst_insn
= 0xe3100001;
7151 static const insn32 armbx2_moveq_insn
= 0x01a0f000;
7152 static const insn32 armbx3_bx_insn
= 0xe12fff10;
7154 #ifndef ELFARM_NABI_C_INCLUDED
7156 arm_allocate_glue_section_space (bfd
* abfd
, bfd_size_type size
, const char * name
)
7159 bfd_byte
* contents
;
7163 /* Do not include empty glue sections in the output. */
7166 s
= bfd_get_linker_section (abfd
, name
);
7168 s
->flags
|= SEC_EXCLUDE
;
7173 BFD_ASSERT (abfd
!= NULL
);
7175 s
= bfd_get_linker_section (abfd
, name
);
7176 BFD_ASSERT (s
!= NULL
);
7178 contents
= (bfd_byte
*) bfd_alloc (abfd
, size
);
7180 BFD_ASSERT (s
->size
== size
);
7181 s
->contents
= contents
;
7185 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info
* info
)
7187 struct elf32_arm_link_hash_table
* globals
;
7189 globals
= elf32_arm_hash_table (info
);
7190 BFD_ASSERT (globals
!= NULL
);
7192 arm_allocate_glue_section_space (globals
->bfd_of_glue_owner
,
7193 globals
->arm_glue_size
,
7194 ARM2THUMB_GLUE_SECTION_NAME
);
7196 arm_allocate_glue_section_space (globals
->bfd_of_glue_owner
,
7197 globals
->thumb_glue_size
,
7198 THUMB2ARM_GLUE_SECTION_NAME
);
7200 arm_allocate_glue_section_space (globals
->bfd_of_glue_owner
,
7201 globals
->vfp11_erratum_glue_size
,
7202 VFP11_ERRATUM_VENEER_SECTION_NAME
);
7204 arm_allocate_glue_section_space (globals
->bfd_of_glue_owner
,
7205 globals
->stm32l4xx_erratum_glue_size
,
7206 STM32L4XX_ERRATUM_VENEER_SECTION_NAME
);
7208 arm_allocate_glue_section_space (globals
->bfd_of_glue_owner
,
7209 globals
->bx_glue_size
,
7210 ARM_BX_GLUE_SECTION_NAME
);
7215 /* Allocate space and symbols for calling a Thumb function from Arm mode.
7216 returns the symbol identifying the stub. */
7218 static struct elf_link_hash_entry
*
7219 record_arm_to_thumb_glue (struct bfd_link_info
* link_info
,
7220 struct elf_link_hash_entry
* h
)
7222 const char * name
= h
->root
.root
.string
;
7225 struct elf_link_hash_entry
* myh
;
7226 struct bfd_link_hash_entry
* bh
;
7227 struct elf32_arm_link_hash_table
* globals
;
7231 globals
= elf32_arm_hash_table (link_info
);
7232 BFD_ASSERT (globals
!= NULL
);
7233 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
7235 s
= bfd_get_linker_section
7236 (globals
->bfd_of_glue_owner
, ARM2THUMB_GLUE_SECTION_NAME
);
7238 BFD_ASSERT (s
!= NULL
);
7240 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen (name
)
7241 + strlen (ARM2THUMB_GLUE_ENTRY_NAME
) + 1);
7243 BFD_ASSERT (tmp_name
);
7245 sprintf (tmp_name
, ARM2THUMB_GLUE_ENTRY_NAME
, name
);
7247 myh
= elf_link_hash_lookup
7248 (&(globals
)->root
, tmp_name
, FALSE
, FALSE
, TRUE
);
7252 /* We've already seen this guy. */
7257 /* The only trick here is using hash_table->arm_glue_size as the value.
7258 Even though the section isn't allocated yet, this is where we will be
7259 putting it. The +1 on the value marks that the stub has not been
7260 output yet - not that it is a Thumb function. */
7262 val
= globals
->arm_glue_size
+ 1;
7263 _bfd_generic_link_add_one_symbol (link_info
, globals
->bfd_of_glue_owner
,
7264 tmp_name
, BSF_GLOBAL
, s
, val
,
7265 NULL
, TRUE
, FALSE
, &bh
);
7267 myh
= (struct elf_link_hash_entry
*) bh
;
7268 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
7269 myh
->forced_local
= 1;
7273 if (bfd_link_pic (link_info
)
7274 || globals
->root
.is_relocatable_executable
7275 || globals
->pic_veneer
)
7276 size
= ARM2THUMB_PIC_GLUE_SIZE
;
7277 else if (globals
->use_blx
)
7278 size
= ARM2THUMB_V5_STATIC_GLUE_SIZE
;
7280 size
= ARM2THUMB_STATIC_GLUE_SIZE
;
7283 globals
->arm_glue_size
+= size
;
7288 /* Allocate space for ARMv4 BX veneers. */
7291 record_arm_bx_glue (struct bfd_link_info
* link_info
, int reg
)
7294 struct elf32_arm_link_hash_table
*globals
;
7296 struct elf_link_hash_entry
*myh
;
7297 struct bfd_link_hash_entry
*bh
;
7300 /* BX PC does not need a veneer. */
7304 globals
= elf32_arm_hash_table (link_info
);
7305 BFD_ASSERT (globals
!= NULL
);
7306 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
7308 /* Check if this veneer has already been allocated. */
7309 if (globals
->bx_glue_offset
[reg
])
7312 s
= bfd_get_linker_section
7313 (globals
->bfd_of_glue_owner
, ARM_BX_GLUE_SECTION_NAME
);
7315 BFD_ASSERT (s
!= NULL
);
7317 /* Add symbol for veneer. */
7319 bfd_malloc ((bfd_size_type
) strlen (ARM_BX_GLUE_ENTRY_NAME
) + 1);
7321 BFD_ASSERT (tmp_name
);
7323 sprintf (tmp_name
, ARM_BX_GLUE_ENTRY_NAME
, reg
);
7325 myh
= elf_link_hash_lookup
7326 (&(globals
)->root
, tmp_name
, FALSE
, FALSE
, FALSE
);
7328 BFD_ASSERT (myh
== NULL
);
7331 val
= globals
->bx_glue_size
;
7332 _bfd_generic_link_add_one_symbol (link_info
, globals
->bfd_of_glue_owner
,
7333 tmp_name
, BSF_FUNCTION
| BSF_LOCAL
, s
, val
,
7334 NULL
, TRUE
, FALSE
, &bh
);
7336 myh
= (struct elf_link_hash_entry
*) bh
;
7337 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
7338 myh
->forced_local
= 1;
7340 s
->size
+= ARM_BX_VENEER_SIZE
;
7341 globals
->bx_glue_offset
[reg
] = globals
->bx_glue_size
| 2;
7342 globals
->bx_glue_size
+= ARM_BX_VENEER_SIZE
;
7346 /* Add an entry to the code/data map for section SEC. */
7349 elf32_arm_section_map_add (asection
*sec
, char type
, bfd_vma vma
)
7351 struct _arm_elf_section_data
*sec_data
= elf32_arm_section_data (sec
);
7352 unsigned int newidx
;
7354 if (sec_data
->map
== NULL
)
7356 sec_data
->map
= (elf32_arm_section_map
*)
7357 bfd_malloc (sizeof (elf32_arm_section_map
));
7358 sec_data
->mapcount
= 0;
7359 sec_data
->mapsize
= 1;
7362 newidx
= sec_data
->mapcount
++;
7364 if (sec_data
->mapcount
> sec_data
->mapsize
)
7366 sec_data
->mapsize
*= 2;
7367 sec_data
->map
= (elf32_arm_section_map
*)
7368 bfd_realloc_or_free (sec_data
->map
, sec_data
->mapsize
7369 * sizeof (elf32_arm_section_map
));
7374 sec_data
->map
[newidx
].vma
= vma
;
7375 sec_data
->map
[newidx
].type
= type
;
7380 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
7381 veneers are handled for now. */
7384 record_vfp11_erratum_veneer (struct bfd_link_info
*link_info
,
7385 elf32_vfp11_erratum_list
*branch
,
7387 asection
*branch_sec
,
7388 unsigned int offset
)
7391 struct elf32_arm_link_hash_table
*hash_table
;
7393 struct elf_link_hash_entry
*myh
;
7394 struct bfd_link_hash_entry
*bh
;
7396 struct _arm_elf_section_data
*sec_data
;
7397 elf32_vfp11_erratum_list
*newerr
;
7399 hash_table
= elf32_arm_hash_table (link_info
);
7400 BFD_ASSERT (hash_table
!= NULL
);
7401 BFD_ASSERT (hash_table
->bfd_of_glue_owner
!= NULL
);
7403 s
= bfd_get_linker_section
7404 (hash_table
->bfd_of_glue_owner
, VFP11_ERRATUM_VENEER_SECTION_NAME
);
7406 sec_data
= elf32_arm_section_data (s
);
7408 BFD_ASSERT (s
!= NULL
);
7410 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen
7411 (VFP11_ERRATUM_VENEER_ENTRY_NAME
) + 10);
7413 BFD_ASSERT (tmp_name
);
7415 sprintf (tmp_name
, VFP11_ERRATUM_VENEER_ENTRY_NAME
,
7416 hash_table
->num_vfp11_fixes
);
7418 myh
= elf_link_hash_lookup
7419 (&(hash_table
)->root
, tmp_name
, FALSE
, FALSE
, FALSE
);
7421 BFD_ASSERT (myh
== NULL
);
7424 val
= hash_table
->vfp11_erratum_glue_size
;
7425 _bfd_generic_link_add_one_symbol (link_info
, hash_table
->bfd_of_glue_owner
,
7426 tmp_name
, BSF_FUNCTION
| BSF_LOCAL
, s
, val
,
7427 NULL
, TRUE
, FALSE
, &bh
);
7429 myh
= (struct elf_link_hash_entry
*) bh
;
7430 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
7431 myh
->forced_local
= 1;
7433 /* Link veneer back to calling location. */
7434 sec_data
->erratumcount
+= 1;
7435 newerr
= (elf32_vfp11_erratum_list
*)
7436 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list
));
7438 newerr
->type
= VFP11_ERRATUM_ARM_VENEER
;
7440 newerr
->u
.v
.branch
= branch
;
7441 newerr
->u
.v
.id
= hash_table
->num_vfp11_fixes
;
7442 branch
->u
.b
.veneer
= newerr
;
7444 newerr
->next
= sec_data
->erratumlist
;
7445 sec_data
->erratumlist
= newerr
;
7447 /* A symbol for the return from the veneer. */
7448 sprintf (tmp_name
, VFP11_ERRATUM_VENEER_ENTRY_NAME
"_r",
7449 hash_table
->num_vfp11_fixes
);
7451 myh
= elf_link_hash_lookup
7452 (&(hash_table
)->root
, tmp_name
, FALSE
, FALSE
, FALSE
);
7459 _bfd_generic_link_add_one_symbol (link_info
, branch_bfd
, tmp_name
, BSF_LOCAL
,
7460 branch_sec
, val
, NULL
, TRUE
, FALSE
, &bh
);
7462 myh
= (struct elf_link_hash_entry
*) bh
;
7463 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
7464 myh
->forced_local
= 1;
7468 /* Generate a mapping symbol for the veneer section, and explicitly add an
7469 entry for that symbol to the code/data map for the section. */
7470 if (hash_table
->vfp11_erratum_glue_size
== 0)
7473 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
7474 ever requires this erratum fix. */
7475 _bfd_generic_link_add_one_symbol (link_info
,
7476 hash_table
->bfd_of_glue_owner
, "$a",
7477 BSF_LOCAL
, s
, 0, NULL
,
7480 myh
= (struct elf_link_hash_entry
*) bh
;
7481 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_NOTYPE
);
7482 myh
->forced_local
= 1;
7484 /* The elf32_arm_init_maps function only cares about symbols from input
7485 BFDs. We must make a note of this generated mapping symbol
7486 ourselves so that code byteswapping works properly in
7487 elf32_arm_write_section. */
7488 elf32_arm_section_map_add (s
, 'a', 0);
7491 s
->size
+= VFP11_ERRATUM_VENEER_SIZE
;
7492 hash_table
->vfp11_erratum_glue_size
+= VFP11_ERRATUM_VENEER_SIZE
;
7493 hash_table
->num_vfp11_fixes
++;
7495 /* The offset of the veneer. */
7499 /* Record information about a STM32L4XX STM erratum veneer. Only THUMB-mode
7500 veneers need to be handled because used only in Cortex-M. */
7503 record_stm32l4xx_erratum_veneer (struct bfd_link_info
*link_info
,
7504 elf32_stm32l4xx_erratum_list
*branch
,
7506 asection
*branch_sec
,
7507 unsigned int offset
,
7508 bfd_size_type veneer_size
)
7511 struct elf32_arm_link_hash_table
*hash_table
;
7513 struct elf_link_hash_entry
*myh
;
7514 struct bfd_link_hash_entry
*bh
;
7516 struct _arm_elf_section_data
*sec_data
;
7517 elf32_stm32l4xx_erratum_list
*newerr
;
7519 hash_table
= elf32_arm_hash_table (link_info
);
7520 BFD_ASSERT (hash_table
!= NULL
);
7521 BFD_ASSERT (hash_table
->bfd_of_glue_owner
!= NULL
);
7523 s
= bfd_get_linker_section
7524 (hash_table
->bfd_of_glue_owner
, STM32L4XX_ERRATUM_VENEER_SECTION_NAME
);
7526 BFD_ASSERT (s
!= NULL
);
7528 sec_data
= elf32_arm_section_data (s
);
7530 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen
7531 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME
) + 10);
7533 BFD_ASSERT (tmp_name
);
7535 sprintf (tmp_name
, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME
,
7536 hash_table
->num_stm32l4xx_fixes
);
7538 myh
= elf_link_hash_lookup
7539 (&(hash_table
)->root
, tmp_name
, FALSE
, FALSE
, FALSE
);
7541 BFD_ASSERT (myh
== NULL
);
7544 val
= hash_table
->stm32l4xx_erratum_glue_size
;
7545 _bfd_generic_link_add_one_symbol (link_info
, hash_table
->bfd_of_glue_owner
,
7546 tmp_name
, BSF_FUNCTION
| BSF_LOCAL
, s
, val
,
7547 NULL
, TRUE
, FALSE
, &bh
);
7549 myh
= (struct elf_link_hash_entry
*) bh
;
7550 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
7551 myh
->forced_local
= 1;
7553 /* Link veneer back to calling location. */
7554 sec_data
->stm32l4xx_erratumcount
+= 1;
7555 newerr
= (elf32_stm32l4xx_erratum_list
*)
7556 bfd_zmalloc (sizeof (elf32_stm32l4xx_erratum_list
));
7558 newerr
->type
= STM32L4XX_ERRATUM_VENEER
;
7560 newerr
->u
.v
.branch
= branch
;
7561 newerr
->u
.v
.id
= hash_table
->num_stm32l4xx_fixes
;
7562 branch
->u
.b
.veneer
= newerr
;
7564 newerr
->next
= sec_data
->stm32l4xx_erratumlist
;
7565 sec_data
->stm32l4xx_erratumlist
= newerr
;
7567 /* A symbol for the return from the veneer. */
7568 sprintf (tmp_name
, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME
"_r",
7569 hash_table
->num_stm32l4xx_fixes
);
7571 myh
= elf_link_hash_lookup
7572 (&(hash_table
)->root
, tmp_name
, FALSE
, FALSE
, FALSE
);
7579 _bfd_generic_link_add_one_symbol (link_info
, branch_bfd
, tmp_name
, BSF_LOCAL
,
7580 branch_sec
, val
, NULL
, TRUE
, FALSE
, &bh
);
7582 myh
= (struct elf_link_hash_entry
*) bh
;
7583 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
7584 myh
->forced_local
= 1;
7588 /* Generate a mapping symbol for the veneer section, and explicitly add an
7589 entry for that symbol to the code/data map for the section. */
7590 if (hash_table
->stm32l4xx_erratum_glue_size
== 0)
7593 /* Creates a THUMB symbol since there is no other choice. */
7594 _bfd_generic_link_add_one_symbol (link_info
,
7595 hash_table
->bfd_of_glue_owner
, "$t",
7596 BSF_LOCAL
, s
, 0, NULL
,
7599 myh
= (struct elf_link_hash_entry
*) bh
;
7600 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_NOTYPE
);
7601 myh
->forced_local
= 1;
7603 /* The elf32_arm_init_maps function only cares about symbols from input
7604 BFDs. We must make a note of this generated mapping symbol
7605 ourselves so that code byteswapping works properly in
7606 elf32_arm_write_section. */
7607 elf32_arm_section_map_add (s
, 't', 0);
7610 s
->size
+= veneer_size
;
7611 hash_table
->stm32l4xx_erratum_glue_size
+= veneer_size
;
7612 hash_table
->num_stm32l4xx_fixes
++;
7614 /* The offset of the veneer. */
7618 #define ARM_GLUE_SECTION_FLAGS \
7619 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
7620 | SEC_READONLY | SEC_LINKER_CREATED)
7622 /* Create a fake section for use by the ARM backend of the linker. */
7625 arm_make_glue_section (bfd
* abfd
, const char * name
)
7629 sec
= bfd_get_linker_section (abfd
, name
);
7634 sec
= bfd_make_section_anyway_with_flags (abfd
, name
, ARM_GLUE_SECTION_FLAGS
);
7637 || !bfd_set_section_alignment (abfd
, sec
, 2))
7640 /* Set the gc mark to prevent the section from being removed by garbage
7641 collection, despite the fact that no relocs refer to this section. */
7647 /* Set size of .plt entries. This function is called from the
7648 linker scripts in ld/emultempl/{armelf}.em. */
7651 bfd_elf32_arm_use_long_plt (void)
7653 elf32_arm_use_long_plt_entry
= TRUE
;
7656 /* Add the glue sections to ABFD. This function is called from the
7657 linker scripts in ld/emultempl/{armelf}.em. */
7660 bfd_elf32_arm_add_glue_sections_to_bfd (bfd
*abfd
,
7661 struct bfd_link_info
*info
)
7663 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (info
);
7664 bfd_boolean dostm32l4xx
= globals
7665 && globals
->stm32l4xx_fix
!= BFD_ARM_STM32L4XX_FIX_NONE
;
7666 bfd_boolean addglue
;
7668 /* If we are only performing a partial
7669 link do not bother adding the glue. */
7670 if (bfd_link_relocatable (info
))
7673 addglue
= arm_make_glue_section (abfd
, ARM2THUMB_GLUE_SECTION_NAME
)
7674 && arm_make_glue_section (abfd
, THUMB2ARM_GLUE_SECTION_NAME
)
7675 && arm_make_glue_section (abfd
, VFP11_ERRATUM_VENEER_SECTION_NAME
)
7676 && arm_make_glue_section (abfd
, ARM_BX_GLUE_SECTION_NAME
);
7682 && arm_make_glue_section (abfd
, STM32L4XX_ERRATUM_VENEER_SECTION_NAME
);
7685 /* Mark output sections of veneers needing a dedicated one with SEC_KEEP. This
7686 ensures they are not marked for deletion by
7687 strip_excluded_output_sections () when veneers are going to be created
7688 later. Not doing so would trigger assert on empty section size in
7689 lang_size_sections_1 (). */
7692 bfd_elf32_arm_keep_private_stub_output_sections (struct bfd_link_info
*info
)
7694 enum elf32_arm_stub_type stub_type
;
7696 /* If we are only performing a partial
7697 link do not bother adding the glue. */
7698 if (bfd_link_relocatable (info
))
7701 for (stub_type
= arm_stub_none
+ 1; stub_type
< max_stub_type
; stub_type
++)
7704 const char *out_sec_name
;
7706 if (!arm_dedicated_stub_output_section_required (stub_type
))
7709 out_sec_name
= arm_dedicated_stub_output_section_name (stub_type
);
7710 out_sec
= bfd_get_section_by_name (info
->output_bfd
, out_sec_name
);
7711 if (out_sec
!= NULL
)
7712 out_sec
->flags
|= SEC_KEEP
;
7716 /* Select a BFD to be used to hold the sections used by the glue code.
7717 This function is called from the linker scripts in ld/emultempl/
7721 bfd_elf32_arm_get_bfd_for_interworking (bfd
*abfd
, struct bfd_link_info
*info
)
7723 struct elf32_arm_link_hash_table
*globals
;
7725 /* If we are only performing a partial link
7726 do not bother getting a bfd to hold the glue. */
7727 if (bfd_link_relocatable (info
))
7730 /* Make sure we don't attach the glue sections to a dynamic object. */
7731 BFD_ASSERT (!(abfd
->flags
& DYNAMIC
));
7733 globals
= elf32_arm_hash_table (info
);
7734 BFD_ASSERT (globals
!= NULL
);
7736 if (globals
->bfd_of_glue_owner
!= NULL
)
7739 /* Save the bfd for later use. */
7740 globals
->bfd_of_glue_owner
= abfd
;
7746 check_use_blx (struct elf32_arm_link_hash_table
*globals
)
7750 cpu_arch
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
,
7753 if (globals
->fix_arm1176
)
7755 if (cpu_arch
== TAG_CPU_ARCH_V6T2
|| cpu_arch
> TAG_CPU_ARCH_V6K
)
7756 globals
->use_blx
= 1;
7760 if (cpu_arch
> TAG_CPU_ARCH_V4T
)
7761 globals
->use_blx
= 1;
7766 bfd_elf32_arm_process_before_allocation (bfd
*abfd
,
7767 struct bfd_link_info
*link_info
)
7769 Elf_Internal_Shdr
*symtab_hdr
;
7770 Elf_Internal_Rela
*internal_relocs
= NULL
;
7771 Elf_Internal_Rela
*irel
, *irelend
;
7772 bfd_byte
*contents
= NULL
;
7775 struct elf32_arm_link_hash_table
*globals
;
7777 /* If we are only performing a partial link do not bother
7778 to construct any glue. */
7779 if (bfd_link_relocatable (link_info
))
7782 /* Here we have a bfd that is to be included on the link. We have a
7783 hook to do reloc rummaging, before section sizes are nailed down. */
7784 globals
= elf32_arm_hash_table (link_info
);
7785 BFD_ASSERT (globals
!= NULL
);
7787 check_use_blx (globals
);
7789 if (globals
->byteswap_code
&& !bfd_big_endian (abfd
))
7791 _bfd_error_handler (_("%pB: BE8 images only valid in big-endian mode"),
7796 /* PR 5398: If we have not decided to include any loadable sections in
7797 the output then we will not have a glue owner bfd. This is OK, it
7798 just means that there is nothing else for us to do here. */
7799 if (globals
->bfd_of_glue_owner
== NULL
)
7802 /* Rummage around all the relocs and map the glue vectors. */
7803 sec
= abfd
->sections
;
7808 for (; sec
!= NULL
; sec
= sec
->next
)
7810 if (sec
->reloc_count
== 0)
7813 if ((sec
->flags
& SEC_EXCLUDE
) != 0)
7816 symtab_hdr
= & elf_symtab_hdr (abfd
);
7818 /* Load the relocs. */
7820 = _bfd_elf_link_read_relocs (abfd
, sec
, NULL
, NULL
, FALSE
);
7822 if (internal_relocs
== NULL
)
7825 irelend
= internal_relocs
+ sec
->reloc_count
;
7826 for (irel
= internal_relocs
; irel
< irelend
; irel
++)
7829 unsigned long r_index
;
7831 struct elf_link_hash_entry
*h
;
7833 r_type
= ELF32_R_TYPE (irel
->r_info
);
7834 r_index
= ELF32_R_SYM (irel
->r_info
);
7836 /* These are the only relocation types we care about. */
7837 if ( r_type
!= R_ARM_PC24
7838 && (r_type
!= R_ARM_V4BX
|| globals
->fix_v4bx
< 2))
7841 /* Get the section contents if we haven't done so already. */
7842 if (contents
== NULL
)
7844 /* Get cached copy if it exists. */
7845 if (elf_section_data (sec
)->this_hdr
.contents
!= NULL
)
7846 contents
= elf_section_data (sec
)->this_hdr
.contents
;
7849 /* Go get them off disk. */
7850 if (! bfd_malloc_and_get_section (abfd
, sec
, &contents
))
7855 if (r_type
== R_ARM_V4BX
)
7859 reg
= bfd_get_32 (abfd
, contents
+ irel
->r_offset
) & 0xf;
7860 record_arm_bx_glue (link_info
, reg
);
7864 /* If the relocation is not against a symbol it cannot concern us. */
7867 /* We don't care about local symbols. */
7868 if (r_index
< symtab_hdr
->sh_info
)
7871 /* This is an external symbol. */
7872 r_index
-= symtab_hdr
->sh_info
;
7873 h
= (struct elf_link_hash_entry
*)
7874 elf_sym_hashes (abfd
)[r_index
];
7876 /* If the relocation is against a static symbol it must be within
7877 the current section and so cannot be a cross ARM/Thumb relocation. */
7881 /* If the call will go through a PLT entry then we do not need
7883 if (globals
->root
.splt
!= NULL
&& h
->plt
.offset
!= (bfd_vma
) -1)
7889 /* This one is a call from arm code. We need to look up
7890 the target of the call. If it is a thumb target, we
7892 if (ARM_GET_SYM_BRANCH_TYPE (h
->target_internal
)
7893 == ST_BRANCH_TO_THUMB
)
7894 record_arm_to_thumb_glue (link_info
, h
);
7902 if (contents
!= NULL
7903 && elf_section_data (sec
)->this_hdr
.contents
!= contents
)
7907 if (internal_relocs
!= NULL
7908 && elf_section_data (sec
)->relocs
!= internal_relocs
)
7909 free (internal_relocs
);
7910 internal_relocs
= NULL
;
7916 if (contents
!= NULL
7917 && elf_section_data (sec
)->this_hdr
.contents
!= contents
)
7919 if (internal_relocs
!= NULL
7920 && elf_section_data (sec
)->relocs
!= internal_relocs
)
7921 free (internal_relocs
);
7928 /* Initialise maps of ARM/Thumb/data for input BFDs. */
7931 bfd_elf32_arm_init_maps (bfd
*abfd
)
7933 Elf_Internal_Sym
*isymbuf
;
7934 Elf_Internal_Shdr
*hdr
;
7935 unsigned int i
, localsyms
;
7937 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
7938 if (! is_arm_elf (abfd
))
7941 if ((abfd
->flags
& DYNAMIC
) != 0)
7944 hdr
= & elf_symtab_hdr (abfd
);
7945 localsyms
= hdr
->sh_info
;
7947 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
7948 should contain the number of local symbols, which should come before any
7949 global symbols. Mapping symbols are always local. */
7950 isymbuf
= bfd_elf_get_elf_syms (abfd
, hdr
, localsyms
, 0, NULL
, NULL
,
7953 /* No internal symbols read? Skip this BFD. */
7954 if (isymbuf
== NULL
)
7957 for (i
= 0; i
< localsyms
; i
++)
7959 Elf_Internal_Sym
*isym
= &isymbuf
[i
];
7960 asection
*sec
= bfd_section_from_elf_index (abfd
, isym
->st_shndx
);
7964 && ELF_ST_BIND (isym
->st_info
) == STB_LOCAL
)
7966 name
= bfd_elf_string_from_elf_section (abfd
,
7967 hdr
->sh_link
, isym
->st_name
);
7969 if (bfd_is_arm_special_symbol_name (name
,
7970 BFD_ARM_SPECIAL_SYM_TYPE_MAP
))
7971 elf32_arm_section_map_add (sec
, name
[1], isym
->st_value
);
7977 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
7978 say what they wanted. */
7981 bfd_elf32_arm_set_cortex_a8_fix (bfd
*obfd
, struct bfd_link_info
*link_info
)
7983 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (link_info
);
7984 obj_attribute
*out_attr
= elf_known_obj_attributes_proc (obfd
);
7986 if (globals
== NULL
)
7989 if (globals
->fix_cortex_a8
== -1)
7991 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
7992 if (out_attr
[Tag_CPU_arch
].i
== TAG_CPU_ARCH_V7
7993 && (out_attr
[Tag_CPU_arch_profile
].i
== 'A'
7994 || out_attr
[Tag_CPU_arch_profile
].i
== 0))
7995 globals
->fix_cortex_a8
= 1;
7997 globals
->fix_cortex_a8
= 0;
8003 bfd_elf32_arm_set_vfp11_fix (bfd
*obfd
, struct bfd_link_info
*link_info
)
8005 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (link_info
);
8006 obj_attribute
*out_attr
= elf_known_obj_attributes_proc (obfd
);
8008 if (globals
== NULL
)
8010 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
8011 if (out_attr
[Tag_CPU_arch
].i
>= TAG_CPU_ARCH_V7
)
8013 switch (globals
->vfp11_fix
)
8015 case BFD_ARM_VFP11_FIX_DEFAULT
:
8016 case BFD_ARM_VFP11_FIX_NONE
:
8017 globals
->vfp11_fix
= BFD_ARM_VFP11_FIX_NONE
;
8021 /* Give a warning, but do as the user requests anyway. */
8022 _bfd_error_handler (_("%pB: warning: selected VFP11 erratum "
8023 "workaround is not necessary for target architecture"), obfd
);
8026 else if (globals
->vfp11_fix
== BFD_ARM_VFP11_FIX_DEFAULT
)
8027 /* For earlier architectures, we might need the workaround, but do not
8028 enable it by default. If users is running with broken hardware, they
8029 must enable the erratum fix explicitly. */
8030 globals
->vfp11_fix
= BFD_ARM_VFP11_FIX_NONE
;
8034 bfd_elf32_arm_set_stm32l4xx_fix (bfd
*obfd
, struct bfd_link_info
*link_info
)
8036 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (link_info
);
8037 obj_attribute
*out_attr
= elf_known_obj_attributes_proc (obfd
);
8039 if (globals
== NULL
)
8042 /* We assume only Cortex-M4 may require the fix. */
8043 if (out_attr
[Tag_CPU_arch
].i
!= TAG_CPU_ARCH_V7E_M
8044 || out_attr
[Tag_CPU_arch_profile
].i
!= 'M')
8046 if (globals
->stm32l4xx_fix
!= BFD_ARM_STM32L4XX_FIX_NONE
)
8047 /* Give a warning, but do as the user requests anyway. */
8049 (_("%pB: warning: selected STM32L4XX erratum "
8050 "workaround is not necessary for target architecture"), obfd
);
8054 enum bfd_arm_vfp11_pipe
8062 /* Return a VFP register number. This is encoded as RX:X for single-precision
8063 registers, or X:RX for double-precision registers, where RX is the group of
8064 four bits in the instruction encoding and X is the single extension bit.
8065 RX and X fields are specified using their lowest (starting) bit. The return
8068 0...31: single-precision registers s0...s31
8069 32...63: double-precision registers d0...d31.
8071 Although X should be zero for VFP11 (encoding d0...d15 only), we might
8072 encounter VFP3 instructions, so we allow the full range for DP registers. */
8075 bfd_arm_vfp11_regno (unsigned int insn
, bfd_boolean is_double
, unsigned int rx
,
8079 return (((insn
>> rx
) & 0xf) | (((insn
>> x
) & 1) << 4)) + 32;
8081 return (((insn
>> rx
) & 0xf) << 1) | ((insn
>> x
) & 1);
8084 /* Set bits in *WMASK according to a register number REG as encoded by
8085 bfd_arm_vfp11_regno(). Ignore d16-d31. */
8088 bfd_arm_vfp11_write_mask (unsigned int *wmask
, unsigned int reg
)
8093 *wmask
|= 3 << ((reg
- 32) * 2);
8096 /* Return TRUE if WMASK overwrites anything in REGS. */
8099 bfd_arm_vfp11_antidependency (unsigned int wmask
, int *regs
, int numregs
)
8103 for (i
= 0; i
< numregs
; i
++)
8105 unsigned int reg
= regs
[i
];
8107 if (reg
< 32 && (wmask
& (1 << reg
)) != 0)
8115 if ((wmask
& (3 << (reg
* 2))) != 0)
8122 /* In this function, we're interested in two things: finding input registers
8123 for VFP data-processing instructions, and finding the set of registers which
8124 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
8125 hold the written set, so FLDM etc. are easy to deal with (we're only
8126 interested in 32 SP registers or 16 dp registers, due to the VFP version
8127 implemented by the chip in question). DP registers are marked by setting
8128 both SP registers in the write mask). */
8130 static enum bfd_arm_vfp11_pipe
8131 bfd_arm_vfp11_insn_decode (unsigned int insn
, unsigned int *destmask
, int *regs
,
8134 enum bfd_arm_vfp11_pipe vpipe
= VFP11_BAD
;
8135 bfd_boolean is_double
= ((insn
& 0xf00) == 0xb00) ? 1 : 0;
8137 if ((insn
& 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
8140 unsigned int fd
= bfd_arm_vfp11_regno (insn
, is_double
, 12, 22);
8141 unsigned int fm
= bfd_arm_vfp11_regno (insn
, is_double
, 0, 5);
8143 pqrs
= ((insn
& 0x00800000) >> 20)
8144 | ((insn
& 0x00300000) >> 19)
8145 | ((insn
& 0x00000040) >> 6);
8149 case 0: /* fmac[sd]. */
8150 case 1: /* fnmac[sd]. */
8151 case 2: /* fmsc[sd]. */
8152 case 3: /* fnmsc[sd]. */
8154 bfd_arm_vfp11_write_mask (destmask
, fd
);
8156 regs
[1] = bfd_arm_vfp11_regno (insn
, is_double
, 16, 7); /* Fn. */
8161 case 4: /* fmul[sd]. */
8162 case 5: /* fnmul[sd]. */
8163 case 6: /* fadd[sd]. */
8164 case 7: /* fsub[sd]. */
8168 case 8: /* fdiv[sd]. */
8171 bfd_arm_vfp11_write_mask (destmask
, fd
);
8172 regs
[0] = bfd_arm_vfp11_regno (insn
, is_double
, 16, 7); /* Fn. */
8177 case 15: /* extended opcode. */
8179 unsigned int extn
= ((insn
>> 15) & 0x1e)
8180 | ((insn
>> 7) & 1);
8184 case 0: /* fcpy[sd]. */
8185 case 1: /* fabs[sd]. */
8186 case 2: /* fneg[sd]. */
8187 case 8: /* fcmp[sd]. */
8188 case 9: /* fcmpe[sd]. */
8189 case 10: /* fcmpz[sd]. */
8190 case 11: /* fcmpez[sd]. */
8191 case 16: /* fuito[sd]. */
8192 case 17: /* fsito[sd]. */
8193 case 24: /* ftoui[sd]. */
8194 case 25: /* ftouiz[sd]. */
8195 case 26: /* ftosi[sd]. */
8196 case 27: /* ftosiz[sd]. */
8197 /* These instructions will not bounce due to underflow. */
8202 case 3: /* fsqrt[sd]. */
8203 /* fsqrt cannot underflow, but it can (perhaps) overwrite
8204 registers to cause the erratum in previous instructions. */
8205 bfd_arm_vfp11_write_mask (destmask
, fd
);
8209 case 15: /* fcvt{ds,sd}. */
8213 bfd_arm_vfp11_write_mask (destmask
, fd
);
8215 /* Only FCVTSD can underflow. */
8216 if ((insn
& 0x100) != 0)
8235 /* Two-register transfer. */
8236 else if ((insn
& 0x0fe00ed0) == 0x0c400a10)
8238 unsigned int fm
= bfd_arm_vfp11_regno (insn
, is_double
, 0, 5);
8240 if ((insn
& 0x100000) == 0)
8243 bfd_arm_vfp11_write_mask (destmask
, fm
);
8246 bfd_arm_vfp11_write_mask (destmask
, fm
);
8247 bfd_arm_vfp11_write_mask (destmask
, fm
+ 1);
8253 else if ((insn
& 0x0e100e00) == 0x0c100a00) /* A load insn. */
8255 int fd
= bfd_arm_vfp11_regno (insn
, is_double
, 12, 22);
8256 unsigned int puw
= ((insn
>> 21) & 0x1) | (((insn
>> 23) & 3) << 1);
8260 case 0: /* Two-reg transfer. We should catch these above. */
8263 case 2: /* fldm[sdx]. */
8267 unsigned int i
, offset
= insn
& 0xff;
8272 for (i
= fd
; i
< fd
+ offset
; i
++)
8273 bfd_arm_vfp11_write_mask (destmask
, i
);
8277 case 4: /* fld[sd]. */
8279 bfd_arm_vfp11_write_mask (destmask
, fd
);
8288 /* Single-register transfer. Note L==0. */
8289 else if ((insn
& 0x0f100e10) == 0x0e000a10)
8291 unsigned int opcode
= (insn
>> 21) & 7;
8292 unsigned int fn
= bfd_arm_vfp11_regno (insn
, is_double
, 16, 7);
8296 case 0: /* fmsr/fmdlr. */
8297 case 1: /* fmdhr. */
8298 /* Mark fmdhr and fmdlr as writing to the whole of the DP
8299 destination register. I don't know if this is exactly right,
8300 but it is the conservative choice. */
8301 bfd_arm_vfp11_write_mask (destmask
, fn
);
8315 static int elf32_arm_compare_mapping (const void * a
, const void * b
);
8318 /* Look for potentially-troublesome code sequences which might trigger the
8319 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
8320 (available from ARM) for details of the erratum. A short version is
8321 described in ld.texinfo. */
8324 bfd_elf32_arm_vfp11_erratum_scan (bfd
*abfd
, struct bfd_link_info
*link_info
)
8327 bfd_byte
*contents
= NULL
;
8329 int regs
[3], numregs
= 0;
8330 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (link_info
);
8331 int use_vector
= (globals
->vfp11_fix
== BFD_ARM_VFP11_FIX_VECTOR
);
8333 if (globals
== NULL
)
8336 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
8337 The states transition as follows:
8339 0 -> 1 (vector) or 0 -> 2 (scalar)
8340 A VFP FMAC-pipeline instruction has been seen. Fill
8341 regs[0]..regs[numregs-1] with its input operands. Remember this
8342 instruction in 'first_fmac'.
8345 Any instruction, except for a VFP instruction which overwrites
8350 A VFP instruction has been seen which overwrites any of regs[*].
8351 We must make a veneer! Reset state to 0 before examining next
8355 If we fail to match anything in state 2, reset to state 0 and reset
8356 the instruction pointer to the instruction after 'first_fmac'.
8358 If the VFP11 vector mode is in use, there must be at least two unrelated
8359 instructions between anti-dependent VFP11 instructions to properly avoid
8360 triggering the erratum, hence the use of the extra state 1. */
8362 /* If we are only performing a partial link do not bother
8363 to construct any glue. */
8364 if (bfd_link_relocatable (link_info
))
8367 /* Skip if this bfd does not correspond to an ELF image. */
8368 if (! is_arm_elf (abfd
))
8371 /* We should have chosen a fix type by the time we get here. */
8372 BFD_ASSERT (globals
->vfp11_fix
!= BFD_ARM_VFP11_FIX_DEFAULT
);
8374 if (globals
->vfp11_fix
== BFD_ARM_VFP11_FIX_NONE
)
8377 /* Skip this BFD if it corresponds to an executable or dynamic object. */
8378 if ((abfd
->flags
& (EXEC_P
| DYNAMIC
)) != 0)
8381 for (sec
= abfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
8383 unsigned int i
, span
, first_fmac
= 0, veneer_of_insn
= 0;
8384 struct _arm_elf_section_data
*sec_data
;
8386 /* If we don't have executable progbits, we're not interested in this
8387 section. Also skip if section is to be excluded. */
8388 if (elf_section_type (sec
) != SHT_PROGBITS
8389 || (elf_section_flags (sec
) & SHF_EXECINSTR
) == 0
8390 || (sec
->flags
& SEC_EXCLUDE
) != 0
8391 || sec
->sec_info_type
== SEC_INFO_TYPE_JUST_SYMS
8392 || sec
->output_section
== bfd_abs_section_ptr
8393 || strcmp (sec
->name
, VFP11_ERRATUM_VENEER_SECTION_NAME
) == 0)
8396 sec_data
= elf32_arm_section_data (sec
);
8398 if (sec_data
->mapcount
== 0)
8401 if (elf_section_data (sec
)->this_hdr
.contents
!= NULL
)
8402 contents
= elf_section_data (sec
)->this_hdr
.contents
;
8403 else if (! bfd_malloc_and_get_section (abfd
, sec
, &contents
))
8406 qsort (sec_data
->map
, sec_data
->mapcount
, sizeof (elf32_arm_section_map
),
8407 elf32_arm_compare_mapping
);
8409 for (span
= 0; span
< sec_data
->mapcount
; span
++)
8411 unsigned int span_start
= sec_data
->map
[span
].vma
;
8412 unsigned int span_end
= (span
== sec_data
->mapcount
- 1)
8413 ? sec
->size
: sec_data
->map
[span
+ 1].vma
;
8414 char span_type
= sec_data
->map
[span
].type
;
8416 /* FIXME: Only ARM mode is supported at present. We may need to
8417 support Thumb-2 mode also at some point. */
8418 if (span_type
!= 'a')
8421 for (i
= span_start
; i
< span_end
;)
8423 unsigned int next_i
= i
+ 4;
8424 unsigned int insn
= bfd_big_endian (abfd
)
8425 ? (contents
[i
] << 24)
8426 | (contents
[i
+ 1] << 16)
8427 | (contents
[i
+ 2] << 8)
8429 : (contents
[i
+ 3] << 24)
8430 | (contents
[i
+ 2] << 16)
8431 | (contents
[i
+ 1] << 8)
8433 unsigned int writemask
= 0;
8434 enum bfd_arm_vfp11_pipe vpipe
;
8439 vpipe
= bfd_arm_vfp11_insn_decode (insn
, &writemask
, regs
,
8441 /* I'm assuming the VFP11 erratum can trigger with denorm
8442 operands on either the FMAC or the DS pipeline. This might
8443 lead to slightly overenthusiastic veneer insertion. */
8444 if (vpipe
== VFP11_FMAC
|| vpipe
== VFP11_DS
)
8446 state
= use_vector
? 1 : 2;
8448 veneer_of_insn
= insn
;
8454 int other_regs
[3], other_numregs
;
8455 vpipe
= bfd_arm_vfp11_insn_decode (insn
, &writemask
,
8458 if (vpipe
!= VFP11_BAD
8459 && bfd_arm_vfp11_antidependency (writemask
, regs
,
8469 int other_regs
[3], other_numregs
;
8470 vpipe
= bfd_arm_vfp11_insn_decode (insn
, &writemask
,
8473 if (vpipe
!= VFP11_BAD
8474 && bfd_arm_vfp11_antidependency (writemask
, regs
,
8480 next_i
= first_fmac
+ 4;
8486 abort (); /* Should be unreachable. */
8491 elf32_vfp11_erratum_list
*newerr
=(elf32_vfp11_erratum_list
*)
8492 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list
));
8494 elf32_arm_section_data (sec
)->erratumcount
+= 1;
8496 newerr
->u
.b
.vfp_insn
= veneer_of_insn
;
8501 newerr
->type
= VFP11_ERRATUM_BRANCH_TO_ARM_VENEER
;
8508 record_vfp11_erratum_veneer (link_info
, newerr
, abfd
, sec
,
8513 newerr
->next
= sec_data
->erratumlist
;
8514 sec_data
->erratumlist
= newerr
;
8523 if (contents
!= NULL
8524 && elf_section_data (sec
)->this_hdr
.contents
!= contents
)
8532 if (contents
!= NULL
8533 && elf_section_data (sec
)->this_hdr
.contents
!= contents
)
8539 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
8540 after sections have been laid out, using specially-named symbols. */
8543 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd
*abfd
,
8544 struct bfd_link_info
*link_info
)
8547 struct elf32_arm_link_hash_table
*globals
;
8550 if (bfd_link_relocatable (link_info
))
8553 /* Skip if this bfd does not correspond to an ELF image. */
8554 if (! is_arm_elf (abfd
))
8557 globals
= elf32_arm_hash_table (link_info
);
8558 if (globals
== NULL
)
8561 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen
8562 (VFP11_ERRATUM_VENEER_ENTRY_NAME
) + 10);
8564 for (sec
= abfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
8566 struct _arm_elf_section_data
*sec_data
= elf32_arm_section_data (sec
);
8567 elf32_vfp11_erratum_list
*errnode
= sec_data
->erratumlist
;
8569 for (; errnode
!= NULL
; errnode
= errnode
->next
)
8571 struct elf_link_hash_entry
*myh
;
8574 switch (errnode
->type
)
8576 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER
:
8577 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER
:
8578 /* Find veneer symbol. */
8579 sprintf (tmp_name
, VFP11_ERRATUM_VENEER_ENTRY_NAME
,
8580 errnode
->u
.b
.veneer
->u
.v
.id
);
8582 myh
= elf_link_hash_lookup
8583 (&(globals
)->root
, tmp_name
, FALSE
, FALSE
, TRUE
);
8586 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8587 abfd
, "VFP11", tmp_name
);
8589 vma
= myh
->root
.u
.def
.section
->output_section
->vma
8590 + myh
->root
.u
.def
.section
->output_offset
8591 + myh
->root
.u
.def
.value
;
8593 errnode
->u
.b
.veneer
->vma
= vma
;
8596 case VFP11_ERRATUM_ARM_VENEER
:
8597 case VFP11_ERRATUM_THUMB_VENEER
:
8598 /* Find return location. */
8599 sprintf (tmp_name
, VFP11_ERRATUM_VENEER_ENTRY_NAME
"_r",
8602 myh
= elf_link_hash_lookup
8603 (&(globals
)->root
, tmp_name
, FALSE
, FALSE
, TRUE
);
8606 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8607 abfd
, "VFP11", tmp_name
);
8609 vma
= myh
->root
.u
.def
.section
->output_section
->vma
8610 + myh
->root
.u
.def
.section
->output_offset
8611 + myh
->root
.u
.def
.value
;
8613 errnode
->u
.v
.branch
->vma
= vma
;
8625 /* Find virtual-memory addresses for STM32L4XX erratum veneers and
8626 return locations after sections have been laid out, using
8627 specially-named symbols. */
8630 bfd_elf32_arm_stm32l4xx_fix_veneer_locations (bfd
*abfd
,
8631 struct bfd_link_info
*link_info
)
8634 struct elf32_arm_link_hash_table
*globals
;
8637 if (bfd_link_relocatable (link_info
))
8640 /* Skip if this bfd does not correspond to an ELF image. */
8641 if (! is_arm_elf (abfd
))
8644 globals
= elf32_arm_hash_table (link_info
);
8645 if (globals
== NULL
)
8648 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen
8649 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME
) + 10);
8651 for (sec
= abfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
8653 struct _arm_elf_section_data
*sec_data
= elf32_arm_section_data (sec
);
8654 elf32_stm32l4xx_erratum_list
*errnode
= sec_data
->stm32l4xx_erratumlist
;
8656 for (; errnode
!= NULL
; errnode
= errnode
->next
)
8658 struct elf_link_hash_entry
*myh
;
8661 switch (errnode
->type
)
8663 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER
:
8664 /* Find veneer symbol. */
8665 sprintf (tmp_name
, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME
,
8666 errnode
->u
.b
.veneer
->u
.v
.id
);
8668 myh
= elf_link_hash_lookup
8669 (&(globals
)->root
, tmp_name
, FALSE
, FALSE
, TRUE
);
8672 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8673 abfd
, "STM32L4XX", tmp_name
);
8675 vma
= myh
->root
.u
.def
.section
->output_section
->vma
8676 + myh
->root
.u
.def
.section
->output_offset
8677 + myh
->root
.u
.def
.value
;
8679 errnode
->u
.b
.veneer
->vma
= vma
;
8682 case STM32L4XX_ERRATUM_VENEER
:
8683 /* Find return location. */
8684 sprintf (tmp_name
, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME
"_r",
8687 myh
= elf_link_hash_lookup
8688 (&(globals
)->root
, tmp_name
, FALSE
, FALSE
, TRUE
);
8691 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8692 abfd
, "STM32L4XX", tmp_name
);
8694 vma
= myh
->root
.u
.def
.section
->output_section
->vma
8695 + myh
->root
.u
.def
.section
->output_offset
8696 + myh
->root
.u
.def
.value
;
8698 errnode
->u
.v
.branch
->vma
= vma
;
8710 static inline bfd_boolean
8711 is_thumb2_ldmia (const insn32 insn
)
8713 /* Encoding T2: LDM<c>.W <Rn>{!},<registers>
8714 1110 - 1000 - 10W1 - rrrr - PM (0) l - llll - llll - llll. */
8715 return (insn
& 0xffd02000) == 0xe8900000;
8718 static inline bfd_boolean
8719 is_thumb2_ldmdb (const insn32 insn
)
8721 /* Encoding T1: LDMDB<c> <Rn>{!},<registers>
8722 1110 - 1001 - 00W1 - rrrr - PM (0) l - llll - llll - llll. */
8723 return (insn
& 0xffd02000) == 0xe9100000;
8726 static inline bfd_boolean
8727 is_thumb2_vldm (const insn32 insn
)
8729 /* A6.5 Extension register load or store instruction
8731 We look for SP 32-bit and DP 64-bit registers.
8732 Encoding T1 VLDM{mode}<c> <Rn>{!}, <list>
8733 <list> is consecutive 64-bit registers
8734 1110 - 110P - UDW1 - rrrr - vvvv - 1011 - iiii - iiii
8735 Encoding T2 VLDM{mode}<c> <Rn>{!}, <list>
8736 <list> is consecutive 32-bit registers
8737 1110 - 110P - UDW1 - rrrr - vvvv - 1010 - iiii - iiii
8738 if P==0 && U==1 && W==1 && Rn=1101 VPOP
8739 if PUW=010 || PUW=011 || PUW=101 VLDM. */
8741 (((insn
& 0xfe100f00) == 0xec100b00) ||
8742 ((insn
& 0xfe100f00) == 0xec100a00))
8743 && /* (IA without !). */
8744 (((((insn
<< 7) >> 28) & 0xd) == 0x4)
8745 /* (IA with !), includes VPOP (when reg number is SP). */
8746 || ((((insn
<< 7) >> 28) & 0xd) == 0x5)
8748 || ((((insn
<< 7) >> 28) & 0xd) == 0x9));
8751 /* STM STM32L4XX erratum : This function assumes that it receives an LDM or
8753 - computes the number and the mode of memory accesses
8754 - decides if the replacement should be done:
8755 . replaces only if > 8-word accesses
8756 . or (testing purposes only) replaces all accesses. */
8759 stm32l4xx_need_create_replacing_stub (const insn32 insn
,
8760 bfd_arm_stm32l4xx_fix stm32l4xx_fix
)
8764 /* The field encoding the register list is the same for both LDMIA
8765 and LDMDB encodings. */
8766 if (is_thumb2_ldmia (insn
) || is_thumb2_ldmdb (insn
))
8767 nb_words
= elf32_arm_popcount (insn
& 0x0000ffff);
8768 else if (is_thumb2_vldm (insn
))
8769 nb_words
= (insn
& 0xff);
8771 /* DEFAULT mode accounts for the real bug condition situation,
8772 ALL mode inserts stubs for each LDM/VLDM instruction (testing). */
8774 (stm32l4xx_fix
== BFD_ARM_STM32L4XX_FIX_DEFAULT
) ? nb_words
> 8 :
8775 (stm32l4xx_fix
== BFD_ARM_STM32L4XX_FIX_ALL
) ? TRUE
: FALSE
;
8778 /* Look for potentially-troublesome code sequences which might trigger
8779 the STM STM32L4XX erratum. */
8782 bfd_elf32_arm_stm32l4xx_erratum_scan (bfd
*abfd
,
8783 struct bfd_link_info
*link_info
)
8786 bfd_byte
*contents
= NULL
;
8787 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (link_info
);
8789 if (globals
== NULL
)
8792 /* If we are only performing a partial link do not bother
8793 to construct any glue. */
8794 if (bfd_link_relocatable (link_info
))
8797 /* Skip if this bfd does not correspond to an ELF image. */
8798 if (! is_arm_elf (abfd
))
8801 if (globals
->stm32l4xx_fix
== BFD_ARM_STM32L4XX_FIX_NONE
)
8804 /* Skip this BFD if it corresponds to an executable or dynamic object. */
8805 if ((abfd
->flags
& (EXEC_P
| DYNAMIC
)) != 0)
8808 for (sec
= abfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
8810 unsigned int i
, span
;
8811 struct _arm_elf_section_data
*sec_data
;
8813 /* If we don't have executable progbits, we're not interested in this
8814 section. Also skip if section is to be excluded. */
8815 if (elf_section_type (sec
) != SHT_PROGBITS
8816 || (elf_section_flags (sec
) & SHF_EXECINSTR
) == 0
8817 || (sec
->flags
& SEC_EXCLUDE
) != 0
8818 || sec
->sec_info_type
== SEC_INFO_TYPE_JUST_SYMS
8819 || sec
->output_section
== bfd_abs_section_ptr
8820 || strcmp (sec
->name
, STM32L4XX_ERRATUM_VENEER_SECTION_NAME
) == 0)
8823 sec_data
= elf32_arm_section_data (sec
);
8825 if (sec_data
->mapcount
== 0)
8828 if (elf_section_data (sec
)->this_hdr
.contents
!= NULL
)
8829 contents
= elf_section_data (sec
)->this_hdr
.contents
;
8830 else if (! bfd_malloc_and_get_section (abfd
, sec
, &contents
))
8833 qsort (sec_data
->map
, sec_data
->mapcount
, sizeof (elf32_arm_section_map
),
8834 elf32_arm_compare_mapping
);
8836 for (span
= 0; span
< sec_data
->mapcount
; span
++)
8838 unsigned int span_start
= sec_data
->map
[span
].vma
;
8839 unsigned int span_end
= (span
== sec_data
->mapcount
- 1)
8840 ? sec
->size
: sec_data
->map
[span
+ 1].vma
;
8841 char span_type
= sec_data
->map
[span
].type
;
8842 int itblock_current_pos
= 0;
8844 /* Only Thumb2 mode need be supported with this CM4 specific
8845 code, we should not encounter any arm mode eg span_type
8847 if (span_type
!= 't')
8850 for (i
= span_start
; i
< span_end
;)
8852 unsigned int insn
= bfd_get_16 (abfd
, &contents
[i
]);
8853 bfd_boolean insn_32bit
= FALSE
;
8854 bfd_boolean is_ldm
= FALSE
;
8855 bfd_boolean is_vldm
= FALSE
;
8856 bfd_boolean is_not_last_in_it_block
= FALSE
;
8858 /* The first 16-bits of all 32-bit thumb2 instructions start
8859 with opcode[15..13]=0b111 and the encoded op1 can be anything
8860 except opcode[12..11]!=0b00.
8861 See 32-bit Thumb instruction encoding. */
8862 if ((insn
& 0xe000) == 0xe000 && (insn
& 0x1800) != 0x0000)
8865 /* Compute the predicate that tells if the instruction
8866 is concerned by the IT block
8867 - Creates an error if there is a ldm that is not
8868 last in the IT block thus cannot be replaced
8869 - Otherwise we can create a branch at the end of the
8870 IT block, it will be controlled naturally by IT
8871 with the proper pseudo-predicate
8872 - So the only interesting predicate is the one that
8873 tells that we are not on the last item of an IT
8875 if (itblock_current_pos
!= 0)
8876 is_not_last_in_it_block
= !!--itblock_current_pos
;
8880 /* Load the rest of the insn (in manual-friendly order). */
8881 insn
= (insn
<< 16) | bfd_get_16 (abfd
, &contents
[i
+ 2]);
8882 is_ldm
= is_thumb2_ldmia (insn
) || is_thumb2_ldmdb (insn
);
8883 is_vldm
= is_thumb2_vldm (insn
);
8885 /* Veneers are created for (v)ldm depending on
8886 option flags and memory accesses conditions; but
8887 if the instruction is not the last instruction of
8888 an IT block, we cannot create a jump there, so we
8890 if ((is_ldm
|| is_vldm
)
8891 && stm32l4xx_need_create_replacing_stub
8892 (insn
, globals
->stm32l4xx_fix
))
8894 if (is_not_last_in_it_block
)
8897 /* xgettext:c-format */
8898 (_("%pB(%pA+%#x): error: multiple load detected"
8899 " in non-last IT block instruction:"
8900 " STM32L4XX veneer cannot be generated; "
8901 "use gcc option -mrestrict-it to generate"
8902 " only one instruction per IT block"),
8907 elf32_stm32l4xx_erratum_list
*newerr
=
8908 (elf32_stm32l4xx_erratum_list
*)
8910 (sizeof (elf32_stm32l4xx_erratum_list
));
8912 elf32_arm_section_data (sec
)
8913 ->stm32l4xx_erratumcount
+= 1;
8914 newerr
->u
.b
.insn
= insn
;
8915 /* We create only thumb branches. */
8917 STM32L4XX_ERRATUM_BRANCH_TO_VENEER
;
8918 record_stm32l4xx_erratum_veneer
8919 (link_info
, newerr
, abfd
, sec
,
8922 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
:
8923 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE
);
8925 newerr
->next
= sec_data
->stm32l4xx_erratumlist
;
8926 sec_data
->stm32l4xx_erratumlist
= newerr
;
8933 IT blocks are only encoded in T1
8934 Encoding T1: IT{x{y{z}}} <firstcond>
8935 1 0 1 1 - 1 1 1 1 - firstcond - mask
8936 if mask = '0000' then see 'related encodings'
8937 We don't deal with UNPREDICTABLE, just ignore these.
8938 There can be no nested IT blocks so an IT block
8939 is naturally a new one for which it is worth
8940 computing its size. */
8941 bfd_boolean is_newitblock
= ((insn
& 0xff00) == 0xbf00)
8942 && ((insn
& 0x000f) != 0x0000);
8943 /* If we have a new IT block we compute its size. */
8946 /* Compute the number of instructions controlled
8947 by the IT block, it will be used to decide
8948 whether we are inside an IT block or not. */
8949 unsigned int mask
= insn
& 0x000f;
8950 itblock_current_pos
= 4 - ctz (mask
);
8954 i
+= insn_32bit
? 4 : 2;
8958 if (contents
!= NULL
8959 && elf_section_data (sec
)->this_hdr
.contents
!= contents
)
8967 if (contents
!= NULL
8968 && elf_section_data (sec
)->this_hdr
.contents
!= contents
)
8974 /* Set target relocation values needed during linking. */
8977 bfd_elf32_arm_set_target_params (struct bfd
*output_bfd
,
8978 struct bfd_link_info
*link_info
,
8979 struct elf32_arm_params
*params
)
8981 struct elf32_arm_link_hash_table
*globals
;
8983 globals
= elf32_arm_hash_table (link_info
);
8984 if (globals
== NULL
)
8987 globals
->target1_is_rel
= params
->target1_is_rel
;
8988 if (globals
->fdpic_p
)
8989 globals
->target2_reloc
= R_ARM_GOT32
;
8990 else if (strcmp (params
->target2_type
, "rel") == 0)
8991 globals
->target2_reloc
= R_ARM_REL32
;
8992 else if (strcmp (params
->target2_type
, "abs") == 0)
8993 globals
->target2_reloc
= R_ARM_ABS32
;
8994 else if (strcmp (params
->target2_type
, "got-rel") == 0)
8995 globals
->target2_reloc
= R_ARM_GOT_PREL
;
8998 _bfd_error_handler (_("invalid TARGET2 relocation type '%s'"),
8999 params
->target2_type
);
9001 globals
->fix_v4bx
= params
->fix_v4bx
;
9002 globals
->use_blx
|= params
->use_blx
;
9003 globals
->vfp11_fix
= params
->vfp11_denorm_fix
;
9004 globals
->stm32l4xx_fix
= params
->stm32l4xx_fix
;
9005 if (globals
->fdpic_p
)
9006 globals
->pic_veneer
= 1;
9008 globals
->pic_veneer
= params
->pic_veneer
;
9009 globals
->fix_cortex_a8
= params
->fix_cortex_a8
;
9010 globals
->fix_arm1176
= params
->fix_arm1176
;
9011 globals
->cmse_implib
= params
->cmse_implib
;
9012 globals
->in_implib_bfd
= params
->in_implib_bfd
;
9014 BFD_ASSERT (is_arm_elf (output_bfd
));
9015 elf_arm_tdata (output_bfd
)->no_enum_size_warning
9016 = params
->no_enum_size_warning
;
9017 elf_arm_tdata (output_bfd
)->no_wchar_size_warning
9018 = params
->no_wchar_size_warning
;
9021 /* Replace the target offset of a Thumb bl or b.w instruction. */
9024 insert_thumb_branch (bfd
*abfd
, long int offset
, bfd_byte
*insn
)
9030 BFD_ASSERT ((offset
& 1) == 0);
9032 upper
= bfd_get_16 (abfd
, insn
);
9033 lower
= bfd_get_16 (abfd
, insn
+ 2);
9034 reloc_sign
= (offset
< 0) ? 1 : 0;
9035 upper
= (upper
& ~(bfd_vma
) 0x7ff)
9036 | ((offset
>> 12) & 0x3ff)
9037 | (reloc_sign
<< 10);
9038 lower
= (lower
& ~(bfd_vma
) 0x2fff)
9039 | (((!((offset
>> 23) & 1)) ^ reloc_sign
) << 13)
9040 | (((!((offset
>> 22) & 1)) ^ reloc_sign
) << 11)
9041 | ((offset
>> 1) & 0x7ff);
9042 bfd_put_16 (abfd
, upper
, insn
);
9043 bfd_put_16 (abfd
, lower
, insn
+ 2);
9046 /* Thumb code calling an ARM function. */
9049 elf32_thumb_to_arm_stub (struct bfd_link_info
* info
,
9053 asection
* input_section
,
9054 bfd_byte
* hit_data
,
9057 bfd_signed_vma addend
,
9059 char **error_message
)
9063 long int ret_offset
;
9064 struct elf_link_hash_entry
* myh
;
9065 struct elf32_arm_link_hash_table
* globals
;
9067 myh
= find_thumb_glue (info
, name
, error_message
);
9071 globals
= elf32_arm_hash_table (info
);
9072 BFD_ASSERT (globals
!= NULL
);
9073 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
9075 my_offset
= myh
->root
.u
.def
.value
;
9077 s
= bfd_get_linker_section (globals
->bfd_of_glue_owner
,
9078 THUMB2ARM_GLUE_SECTION_NAME
);
9080 BFD_ASSERT (s
!= NULL
);
9081 BFD_ASSERT (s
->contents
!= NULL
);
9082 BFD_ASSERT (s
->output_section
!= NULL
);
9084 if ((my_offset
& 0x01) == 0x01)
9087 && sym_sec
->owner
!= NULL
9088 && !INTERWORK_FLAG (sym_sec
->owner
))
9091 (_("%pB(%s): warning: interworking not enabled;"
9092 " first occurrence: %pB: %s call to %s"),
9093 sym_sec
->owner
, name
, input_bfd
, "Thumb", "ARM");
9099 myh
->root
.u
.def
.value
= my_offset
;
9101 put_thumb_insn (globals
, output_bfd
, (bfd_vma
) t2a1_bx_pc_insn
,
9102 s
->contents
+ my_offset
);
9104 put_thumb_insn (globals
, output_bfd
, (bfd_vma
) t2a2_noop_insn
,
9105 s
->contents
+ my_offset
+ 2);
9108 /* Address of destination of the stub. */
9109 ((bfd_signed_vma
) val
)
9111 /* Offset from the start of the current section
9112 to the start of the stubs. */
9114 /* Offset of the start of this stub from the start of the stubs. */
9116 /* Address of the start of the current section. */
9117 + s
->output_section
->vma
)
9118 /* The branch instruction is 4 bytes into the stub. */
9120 /* ARM branches work from the pc of the instruction + 8. */
9123 put_arm_insn (globals
, output_bfd
,
9124 (bfd_vma
) t2a3_b_insn
| ((ret_offset
>> 2) & 0x00FFFFFF),
9125 s
->contents
+ my_offset
+ 4);
9128 BFD_ASSERT (my_offset
<= globals
->thumb_glue_size
);
9130 /* Now go back and fix up the original BL insn to point to here. */
9132 /* Address of where the stub is located. */
9133 (s
->output_section
->vma
+ s
->output_offset
+ my_offset
)
9134 /* Address of where the BL is located. */
9135 - (input_section
->output_section
->vma
+ input_section
->output_offset
9137 /* Addend in the relocation. */
9139 /* Biassing for PC-relative addressing. */
9142 insert_thumb_branch (input_bfd
, ret_offset
, hit_data
- input_section
->vma
);
9147 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
9149 static struct elf_link_hash_entry
*
9150 elf32_arm_create_thumb_stub (struct bfd_link_info
* info
,
9157 char ** error_message
)
9160 long int ret_offset
;
9161 struct elf_link_hash_entry
* myh
;
9162 struct elf32_arm_link_hash_table
* globals
;
9164 myh
= find_arm_glue (info
, name
, error_message
);
9168 globals
= elf32_arm_hash_table (info
);
9169 BFD_ASSERT (globals
!= NULL
);
9170 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
9172 my_offset
= myh
->root
.u
.def
.value
;
9174 if ((my_offset
& 0x01) == 0x01)
9177 && sym_sec
->owner
!= NULL
9178 && !INTERWORK_FLAG (sym_sec
->owner
))
9181 (_("%pB(%s): warning: interworking not enabled;"
9182 " first occurrence: %pB: %s call to %s"),
9183 sym_sec
->owner
, name
, input_bfd
, "ARM", "Thumb");
9187 myh
->root
.u
.def
.value
= my_offset
;
9189 if (bfd_link_pic (info
)
9190 || globals
->root
.is_relocatable_executable
9191 || globals
->pic_veneer
)
9193 /* For relocatable objects we can't use absolute addresses,
9194 so construct the address from a relative offset. */
9195 /* TODO: If the offset is small it's probably worth
9196 constructing the address with adds. */
9197 put_arm_insn (globals
, output_bfd
, (bfd_vma
) a2t1p_ldr_insn
,
9198 s
->contents
+ my_offset
);
9199 put_arm_insn (globals
, output_bfd
, (bfd_vma
) a2t2p_add_pc_insn
,
9200 s
->contents
+ my_offset
+ 4);
9201 put_arm_insn (globals
, output_bfd
, (bfd_vma
) a2t3p_bx_r12_insn
,
9202 s
->contents
+ my_offset
+ 8);
9203 /* Adjust the offset by 4 for the position of the add,
9204 and 8 for the pipeline offset. */
9205 ret_offset
= (val
- (s
->output_offset
9206 + s
->output_section
->vma
9209 bfd_put_32 (output_bfd
, ret_offset
,
9210 s
->contents
+ my_offset
+ 12);
9212 else if (globals
->use_blx
)
9214 put_arm_insn (globals
, output_bfd
, (bfd_vma
) a2t1v5_ldr_insn
,
9215 s
->contents
+ my_offset
);
9217 /* It's a thumb address. Add the low order bit. */
9218 bfd_put_32 (output_bfd
, val
| a2t2v5_func_addr_insn
,
9219 s
->contents
+ my_offset
+ 4);
9223 put_arm_insn (globals
, output_bfd
, (bfd_vma
) a2t1_ldr_insn
,
9224 s
->contents
+ my_offset
);
9226 put_arm_insn (globals
, output_bfd
, (bfd_vma
) a2t2_bx_r12_insn
,
9227 s
->contents
+ my_offset
+ 4);
9229 /* It's a thumb address. Add the low order bit. */
9230 bfd_put_32 (output_bfd
, val
| a2t3_func_addr_insn
,
9231 s
->contents
+ my_offset
+ 8);
9237 BFD_ASSERT (my_offset
<= globals
->arm_glue_size
);
9242 /* Arm code calling a Thumb function. */
9245 elf32_arm_to_thumb_stub (struct bfd_link_info
* info
,
9249 asection
* input_section
,
9250 bfd_byte
* hit_data
,
9253 bfd_signed_vma addend
,
9255 char **error_message
)
9257 unsigned long int tmp
;
9260 long int ret_offset
;
9261 struct elf_link_hash_entry
* myh
;
9262 struct elf32_arm_link_hash_table
* globals
;
9264 globals
= elf32_arm_hash_table (info
);
9265 BFD_ASSERT (globals
!= NULL
);
9266 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
9268 s
= bfd_get_linker_section (globals
->bfd_of_glue_owner
,
9269 ARM2THUMB_GLUE_SECTION_NAME
);
9270 BFD_ASSERT (s
!= NULL
);
9271 BFD_ASSERT (s
->contents
!= NULL
);
9272 BFD_ASSERT (s
->output_section
!= NULL
);
9274 myh
= elf32_arm_create_thumb_stub (info
, name
, input_bfd
, output_bfd
,
9275 sym_sec
, val
, s
, error_message
);
9279 my_offset
= myh
->root
.u
.def
.value
;
9280 tmp
= bfd_get_32 (input_bfd
, hit_data
);
9281 tmp
= tmp
& 0xFF000000;
9283 /* Somehow these are both 4 too far, so subtract 8. */
9284 ret_offset
= (s
->output_offset
9286 + s
->output_section
->vma
9287 - (input_section
->output_offset
9288 + input_section
->output_section
->vma
9292 tmp
= tmp
| ((ret_offset
>> 2) & 0x00FFFFFF);
9294 bfd_put_32 (output_bfd
, (bfd_vma
) tmp
, hit_data
- input_section
->vma
);
9299 /* Populate Arm stub for an exported Thumb function. */
9302 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry
*h
, void * inf
)
9304 struct bfd_link_info
* info
= (struct bfd_link_info
*) inf
;
9306 struct elf_link_hash_entry
* myh
;
9307 struct elf32_arm_link_hash_entry
*eh
;
9308 struct elf32_arm_link_hash_table
* globals
;
9311 char *error_message
;
9313 eh
= elf32_arm_hash_entry (h
);
9314 /* Allocate stubs for exported Thumb functions on v4t. */
9315 if (eh
->export_glue
== NULL
)
9318 globals
= elf32_arm_hash_table (info
);
9319 BFD_ASSERT (globals
!= NULL
);
9320 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
9322 s
= bfd_get_linker_section (globals
->bfd_of_glue_owner
,
9323 ARM2THUMB_GLUE_SECTION_NAME
);
9324 BFD_ASSERT (s
!= NULL
);
9325 BFD_ASSERT (s
->contents
!= NULL
);
9326 BFD_ASSERT (s
->output_section
!= NULL
);
9328 sec
= eh
->export_glue
->root
.u
.def
.section
;
9330 BFD_ASSERT (sec
->output_section
!= NULL
);
9332 val
= eh
->export_glue
->root
.u
.def
.value
+ sec
->output_offset
9333 + sec
->output_section
->vma
;
9335 myh
= elf32_arm_create_thumb_stub (info
, h
->root
.root
.string
,
9336 h
->root
.u
.def
.section
->owner
,
9337 globals
->obfd
, sec
, val
, s
,
9343 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
9346 elf32_arm_bx_glue (struct bfd_link_info
* info
, int reg
)
9351 struct elf32_arm_link_hash_table
*globals
;
9353 globals
= elf32_arm_hash_table (info
);
9354 BFD_ASSERT (globals
!= NULL
);
9355 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
9357 s
= bfd_get_linker_section (globals
->bfd_of_glue_owner
,
9358 ARM_BX_GLUE_SECTION_NAME
);
9359 BFD_ASSERT (s
!= NULL
);
9360 BFD_ASSERT (s
->contents
!= NULL
);
9361 BFD_ASSERT (s
->output_section
!= NULL
);
9363 BFD_ASSERT (globals
->bx_glue_offset
[reg
] & 2);
9365 glue_addr
= globals
->bx_glue_offset
[reg
] & ~(bfd_vma
)3;
9367 if ((globals
->bx_glue_offset
[reg
] & 1) == 0)
9369 p
= s
->contents
+ glue_addr
;
9370 bfd_put_32 (globals
->obfd
, armbx1_tst_insn
+ (reg
<< 16), p
);
9371 bfd_put_32 (globals
->obfd
, armbx2_moveq_insn
+ reg
, p
+ 4);
9372 bfd_put_32 (globals
->obfd
, armbx3_bx_insn
+ reg
, p
+ 8);
9373 globals
->bx_glue_offset
[reg
] |= 1;
9376 return glue_addr
+ s
->output_section
->vma
+ s
->output_offset
;
9379 /* Generate Arm stubs for exported Thumb symbols. */
9381 elf32_arm_begin_write_processing (bfd
*abfd ATTRIBUTE_UNUSED
,
9382 struct bfd_link_info
*link_info
)
9384 struct elf32_arm_link_hash_table
* globals
;
9386 if (link_info
== NULL
)
9387 /* Ignore this if we are not called by the ELF backend linker. */
9390 globals
= elf32_arm_hash_table (link_info
);
9391 if (globals
== NULL
)
9394 /* If blx is available then exported Thumb symbols are OK and there is
9396 if (globals
->use_blx
)
9399 elf_link_hash_traverse (&globals
->root
, elf32_arm_to_thumb_export_stub
,
9403 /* Reserve space for COUNT dynamic relocations in relocation selection
9407 elf32_arm_allocate_dynrelocs (struct bfd_link_info
*info
, asection
*sreloc
,
9408 bfd_size_type count
)
9410 struct elf32_arm_link_hash_table
*htab
;
9412 htab
= elf32_arm_hash_table (info
);
9413 BFD_ASSERT (htab
->root
.dynamic_sections_created
);
9416 sreloc
->size
+= RELOC_SIZE (htab
) * count
;
9419 /* Reserve space for COUNT R_ARM_IRELATIVE relocations. If the link is
9420 dynamic, the relocations should go in SRELOC, otherwise they should
9421 go in the special .rel.iplt section. */
9424 elf32_arm_allocate_irelocs (struct bfd_link_info
*info
, asection
*sreloc
,
9425 bfd_size_type count
)
9427 struct elf32_arm_link_hash_table
*htab
;
9429 htab
= elf32_arm_hash_table (info
);
9430 if (!htab
->root
.dynamic_sections_created
)
9431 htab
->root
.irelplt
->size
+= RELOC_SIZE (htab
) * count
;
9434 BFD_ASSERT (sreloc
!= NULL
);
9435 sreloc
->size
+= RELOC_SIZE (htab
) * count
;
9439 /* Add relocation REL to the end of relocation section SRELOC. */
9442 elf32_arm_add_dynreloc (bfd
*output_bfd
, struct bfd_link_info
*info
,
9443 asection
*sreloc
, Elf_Internal_Rela
*rel
)
9446 struct elf32_arm_link_hash_table
*htab
;
9448 htab
= elf32_arm_hash_table (info
);
9449 if (!htab
->root
.dynamic_sections_created
9450 && ELF32_R_TYPE (rel
->r_info
) == R_ARM_IRELATIVE
)
9451 sreloc
= htab
->root
.irelplt
;
9454 loc
= sreloc
->contents
;
9455 loc
+= sreloc
->reloc_count
++ * RELOC_SIZE (htab
);
9456 if (sreloc
->reloc_count
* RELOC_SIZE (htab
) > sreloc
->size
)
9458 SWAP_RELOC_OUT (htab
) (output_bfd
, rel
, loc
);
9461 /* Allocate room for a PLT entry described by ROOT_PLT and ARM_PLT.
9462 IS_IPLT_ENTRY says whether the entry belongs to .iplt rather than
9466 elf32_arm_allocate_plt_entry (struct bfd_link_info
*info
,
9467 bfd_boolean is_iplt_entry
,
9468 union gotplt_union
*root_plt
,
9469 struct arm_plt_info
*arm_plt
)
9471 struct elf32_arm_link_hash_table
*htab
;
9475 htab
= elf32_arm_hash_table (info
);
9479 splt
= htab
->root
.iplt
;
9480 sgotplt
= htab
->root
.igotplt
;
9482 /* NaCl uses a special first entry in .iplt too. */
9483 if (htab
->nacl_p
&& splt
->size
== 0)
9484 splt
->size
+= htab
->plt_header_size
;
9486 /* Allocate room for an R_ARM_IRELATIVE relocation in .rel.iplt. */
9487 elf32_arm_allocate_irelocs (info
, htab
->root
.irelplt
, 1);
9491 splt
= htab
->root
.splt
;
9492 sgotplt
= htab
->root
.sgotplt
;
9496 /* Allocate room for R_ARM_FUNCDESC_VALUE. */
9497 /* For lazy binding, relocations will be put into .rel.plt, in
9498 .rel.got otherwise. */
9499 /* FIXME: today we don't support lazy binding so put it in .rel.got */
9500 if (info
->flags
& DF_BIND_NOW
)
9501 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
9503 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelplt
, 1);
9507 /* Allocate room for an R_JUMP_SLOT relocation in .rel.plt. */
9508 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelplt
, 1);
9511 /* If this is the first .plt entry, make room for the special
9513 if (splt
->size
== 0)
9514 splt
->size
+= htab
->plt_header_size
;
9516 htab
->next_tls_desc_index
++;
9519 /* Allocate the PLT entry itself, including any leading Thumb stub. */
9520 if (elf32_arm_plt_needs_thumb_stub_p (info
, arm_plt
))
9521 splt
->size
+= PLT_THUMB_STUB_SIZE
;
9522 root_plt
->offset
= splt
->size
;
9523 splt
->size
+= htab
->plt_entry_size
;
9525 if (!htab
->symbian_p
)
9527 /* We also need to make an entry in the .got.plt section, which
9528 will be placed in the .got section by the linker script. */
9530 arm_plt
->got_offset
= sgotplt
->size
;
9532 arm_plt
->got_offset
= sgotplt
->size
- 8 * htab
->num_tls_desc
;
9534 /* Function descriptor takes 64 bits in GOT. */
9542 arm_movw_immediate (bfd_vma value
)
9544 return (value
& 0x00000fff) | ((value
& 0x0000f000) << 4);
9548 arm_movt_immediate (bfd_vma value
)
9550 return ((value
& 0x0fff0000) >> 16) | ((value
& 0xf0000000) >> 12);
9553 /* Fill in a PLT entry and its associated GOT slot. If DYNINDX == -1,
9554 the entry lives in .iplt and resolves to (*SYM_VALUE)().
9555 Otherwise, DYNINDX is the index of the symbol in the dynamic
9556 symbol table and SYM_VALUE is undefined.
9558 ROOT_PLT points to the offset of the PLT entry from the start of its
9559 section (.iplt or .plt). ARM_PLT points to the symbol's ARM-specific
9560 bookkeeping information.
9562 Returns FALSE if there was a problem. */
9565 elf32_arm_populate_plt_entry (bfd
*output_bfd
, struct bfd_link_info
*info
,
9566 union gotplt_union
*root_plt
,
9567 struct arm_plt_info
*arm_plt
,
9568 int dynindx
, bfd_vma sym_value
)
9570 struct elf32_arm_link_hash_table
*htab
;
9576 Elf_Internal_Rela rel
;
9577 bfd_vma plt_header_size
;
9578 bfd_vma got_header_size
;
9580 htab
= elf32_arm_hash_table (info
);
9582 /* Pick the appropriate sections and sizes. */
9585 splt
= htab
->root
.iplt
;
9586 sgot
= htab
->root
.igotplt
;
9587 srel
= htab
->root
.irelplt
;
9589 /* There are no reserved entries in .igot.plt, and no special
9590 first entry in .iplt. */
9591 got_header_size
= 0;
9592 plt_header_size
= 0;
9596 splt
= htab
->root
.splt
;
9597 sgot
= htab
->root
.sgotplt
;
9598 srel
= htab
->root
.srelplt
;
9600 got_header_size
= get_elf_backend_data (output_bfd
)->got_header_size
;
9601 plt_header_size
= htab
->plt_header_size
;
9603 BFD_ASSERT (splt
!= NULL
&& srel
!= NULL
);
9605 /* Fill in the entry in the procedure linkage table. */
9606 if (htab
->symbian_p
)
9608 BFD_ASSERT (dynindx
>= 0);
9609 put_arm_insn (htab
, output_bfd
,
9610 elf32_arm_symbian_plt_entry
[0],
9611 splt
->contents
+ root_plt
->offset
);
9612 bfd_put_32 (output_bfd
,
9613 elf32_arm_symbian_plt_entry
[1],
9614 splt
->contents
+ root_plt
->offset
+ 4);
9616 /* Fill in the entry in the .rel.plt section. */
9617 rel
.r_offset
= (splt
->output_section
->vma
9618 + splt
->output_offset
9619 + root_plt
->offset
+ 4);
9620 rel
.r_info
= ELF32_R_INFO (dynindx
, R_ARM_GLOB_DAT
);
9622 /* Get the index in the procedure linkage table which
9623 corresponds to this symbol. This is the index of this symbol
9624 in all the symbols for which we are making plt entries. The
9625 first entry in the procedure linkage table is reserved. */
9626 plt_index
= ((root_plt
->offset
- plt_header_size
)
9627 / htab
->plt_entry_size
);
9631 bfd_vma got_offset
, got_address
, plt_address
;
9632 bfd_vma got_displacement
, initial_got_entry
;
9635 BFD_ASSERT (sgot
!= NULL
);
9637 /* Get the offset into the .(i)got.plt table of the entry that
9638 corresponds to this function. */
9639 got_offset
= (arm_plt
->got_offset
& -2);
9641 /* Get the index in the procedure linkage table which
9642 corresponds to this symbol. This is the index of this symbol
9643 in all the symbols for which we are making plt entries.
9644 After the reserved .got.plt entries, all symbols appear in
9645 the same order as in .plt. */
9647 /* Function descriptor takes 8 bytes. */
9648 plt_index
= (got_offset
- got_header_size
) / 8;
9650 plt_index
= (got_offset
- got_header_size
) / 4;
9652 /* Calculate the address of the GOT entry. */
9653 got_address
= (sgot
->output_section
->vma
9654 + sgot
->output_offset
9657 /* ...and the address of the PLT entry. */
9658 plt_address
= (splt
->output_section
->vma
9659 + splt
->output_offset
9660 + root_plt
->offset
);
9662 ptr
= splt
->contents
+ root_plt
->offset
;
9663 if (htab
->vxworks_p
&& bfd_link_pic (info
))
9668 for (i
= 0; i
!= htab
->plt_entry_size
/ 4; i
++, ptr
+= 4)
9670 val
= elf32_arm_vxworks_shared_plt_entry
[i
];
9672 val
|= got_address
- sgot
->output_section
->vma
;
9674 val
|= plt_index
* RELOC_SIZE (htab
);
9675 if (i
== 2 || i
== 5)
9676 bfd_put_32 (output_bfd
, val
, ptr
);
9678 put_arm_insn (htab
, output_bfd
, val
, ptr
);
9681 else if (htab
->vxworks_p
)
9686 for (i
= 0; i
!= htab
->plt_entry_size
/ 4; i
++, ptr
+= 4)
9688 val
= elf32_arm_vxworks_exec_plt_entry
[i
];
9692 val
|= 0xffffff & -((root_plt
->offset
+ i
* 4 + 8) >> 2);
9694 val
|= plt_index
* RELOC_SIZE (htab
);
9695 if (i
== 2 || i
== 5)
9696 bfd_put_32 (output_bfd
, val
, ptr
);
9698 put_arm_insn (htab
, output_bfd
, val
, ptr
);
9701 loc
= (htab
->srelplt2
->contents
9702 + (plt_index
* 2 + 1) * RELOC_SIZE (htab
));
9704 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
9705 referencing the GOT for this PLT entry. */
9706 rel
.r_offset
= plt_address
+ 8;
9707 rel
.r_info
= ELF32_R_INFO (htab
->root
.hgot
->indx
, R_ARM_ABS32
);
9708 rel
.r_addend
= got_offset
;
9709 SWAP_RELOC_OUT (htab
) (output_bfd
, &rel
, loc
);
9710 loc
+= RELOC_SIZE (htab
);
9712 /* Create the R_ARM_ABS32 relocation referencing the
9713 beginning of the PLT for this GOT entry. */
9714 rel
.r_offset
= got_address
;
9715 rel
.r_info
= ELF32_R_INFO (htab
->root
.hplt
->indx
, R_ARM_ABS32
);
9717 SWAP_RELOC_OUT (htab
) (output_bfd
, &rel
, loc
);
9719 else if (htab
->nacl_p
)
9721 /* Calculate the displacement between the PLT slot and the
9722 common tail that's part of the special initial PLT slot. */
9723 int32_t tail_displacement
9724 = ((splt
->output_section
->vma
+ splt
->output_offset
9725 + ARM_NACL_PLT_TAIL_OFFSET
)
9726 - (plt_address
+ htab
->plt_entry_size
+ 4));
9727 BFD_ASSERT ((tail_displacement
& 3) == 0);
9728 tail_displacement
>>= 2;
9730 BFD_ASSERT ((tail_displacement
& 0xff000000) == 0
9731 || (-tail_displacement
& 0xff000000) == 0);
9733 /* Calculate the displacement between the PLT slot and the entry
9734 in the GOT. The offset accounts for the value produced by
9735 adding to pc in the penultimate instruction of the PLT stub. */
9736 got_displacement
= (got_address
9737 - (plt_address
+ htab
->plt_entry_size
));
9739 /* NaCl does not support interworking at all. */
9740 BFD_ASSERT (!elf32_arm_plt_needs_thumb_stub_p (info
, arm_plt
));
9742 put_arm_insn (htab
, output_bfd
,
9743 elf32_arm_nacl_plt_entry
[0]
9744 | arm_movw_immediate (got_displacement
),
9746 put_arm_insn (htab
, output_bfd
,
9747 elf32_arm_nacl_plt_entry
[1]
9748 | arm_movt_immediate (got_displacement
),
9750 put_arm_insn (htab
, output_bfd
,
9751 elf32_arm_nacl_plt_entry
[2],
9753 put_arm_insn (htab
, output_bfd
,
9754 elf32_arm_nacl_plt_entry
[3]
9755 | (tail_displacement
& 0x00ffffff),
9758 else if (htab
->fdpic_p
)
9760 const bfd_vma
*plt_entry
= using_thumb_only(htab
)
9761 ? elf32_arm_fdpic_thumb_plt_entry
9762 : elf32_arm_fdpic_plt_entry
;
9764 /* Fill-up Thumb stub if needed. */
9765 if (elf32_arm_plt_needs_thumb_stub_p (info
, arm_plt
))
9767 put_thumb_insn (htab
, output_bfd
,
9768 elf32_arm_plt_thumb_stub
[0], ptr
- 4);
9769 put_thumb_insn (htab
, output_bfd
,
9770 elf32_arm_plt_thumb_stub
[1], ptr
- 2);
9772 /* As we are using 32 bit instructions even for the Thumb
9773 version, we have to use 'put_arm_insn' instead of
9774 'put_thumb_insn'. */
9775 put_arm_insn(htab
, output_bfd
, plt_entry
[0], ptr
+ 0);
9776 put_arm_insn(htab
, output_bfd
, plt_entry
[1], ptr
+ 4);
9777 put_arm_insn(htab
, output_bfd
, plt_entry
[2], ptr
+ 8);
9778 put_arm_insn(htab
, output_bfd
, plt_entry
[3], ptr
+ 12);
9779 bfd_put_32 (output_bfd
, got_offset
, ptr
+ 16);
9781 if (!(info
->flags
& DF_BIND_NOW
))
9783 /* funcdesc_value_reloc_offset. */
9784 bfd_put_32 (output_bfd
,
9785 htab
->root
.srelplt
->reloc_count
* RELOC_SIZE (htab
),
9787 put_arm_insn(htab
, output_bfd
, plt_entry
[6], ptr
+ 24);
9788 put_arm_insn(htab
, output_bfd
, plt_entry
[7], ptr
+ 28);
9789 put_arm_insn(htab
, output_bfd
, plt_entry
[8], ptr
+ 32);
9790 put_arm_insn(htab
, output_bfd
, plt_entry
[9], ptr
+ 36);
9793 else if (using_thumb_only (htab
))
9795 /* PR ld/16017: Generate thumb only PLT entries. */
9796 if (!using_thumb2 (htab
))
9798 /* FIXME: We ought to be able to generate thumb-1 PLT
9800 _bfd_error_handler (_("%pB: warning: thumb-1 mode PLT generation not currently supported"),
9805 /* Calculate the displacement between the PLT slot and the entry in
9806 the GOT. The 12-byte offset accounts for the value produced by
9807 adding to pc in the 3rd instruction of the PLT stub. */
9808 got_displacement
= got_address
- (plt_address
+ 12);
9810 /* As we are using 32 bit instructions we have to use 'put_arm_insn'
9811 instead of 'put_thumb_insn'. */
9812 put_arm_insn (htab
, output_bfd
,
9813 elf32_thumb2_plt_entry
[0]
9814 | ((got_displacement
& 0x000000ff) << 16)
9815 | ((got_displacement
& 0x00000700) << 20)
9816 | ((got_displacement
& 0x00000800) >> 1)
9817 | ((got_displacement
& 0x0000f000) >> 12),
9819 put_arm_insn (htab
, output_bfd
,
9820 elf32_thumb2_plt_entry
[1]
9821 | ((got_displacement
& 0x00ff0000) )
9822 | ((got_displacement
& 0x07000000) << 4)
9823 | ((got_displacement
& 0x08000000) >> 17)
9824 | ((got_displacement
& 0xf0000000) >> 28),
9826 put_arm_insn (htab
, output_bfd
,
9827 elf32_thumb2_plt_entry
[2],
9829 put_arm_insn (htab
, output_bfd
,
9830 elf32_thumb2_plt_entry
[3],
9835 /* Calculate the displacement between the PLT slot and the
9836 entry in the GOT. The eight-byte offset accounts for the
9837 value produced by adding to pc in the first instruction
9839 got_displacement
= got_address
- (plt_address
+ 8);
9841 if (elf32_arm_plt_needs_thumb_stub_p (info
, arm_plt
))
9843 put_thumb_insn (htab
, output_bfd
,
9844 elf32_arm_plt_thumb_stub
[0], ptr
- 4);
9845 put_thumb_insn (htab
, output_bfd
,
9846 elf32_arm_plt_thumb_stub
[1], ptr
- 2);
9849 if (!elf32_arm_use_long_plt_entry
)
9851 BFD_ASSERT ((got_displacement
& 0xf0000000) == 0);
9853 put_arm_insn (htab
, output_bfd
,
9854 elf32_arm_plt_entry_short
[0]
9855 | ((got_displacement
& 0x0ff00000) >> 20),
9857 put_arm_insn (htab
, output_bfd
,
9858 elf32_arm_plt_entry_short
[1]
9859 | ((got_displacement
& 0x000ff000) >> 12),
9861 put_arm_insn (htab
, output_bfd
,
9862 elf32_arm_plt_entry_short
[2]
9863 | (got_displacement
& 0x00000fff),
9865 #ifdef FOUR_WORD_PLT
9866 bfd_put_32 (output_bfd
, elf32_arm_plt_entry_short
[3], ptr
+ 12);
9871 put_arm_insn (htab
, output_bfd
,
9872 elf32_arm_plt_entry_long
[0]
9873 | ((got_displacement
& 0xf0000000) >> 28),
9875 put_arm_insn (htab
, output_bfd
,
9876 elf32_arm_plt_entry_long
[1]
9877 | ((got_displacement
& 0x0ff00000) >> 20),
9879 put_arm_insn (htab
, output_bfd
,
9880 elf32_arm_plt_entry_long
[2]
9881 | ((got_displacement
& 0x000ff000) >> 12),
9883 put_arm_insn (htab
, output_bfd
,
9884 elf32_arm_plt_entry_long
[3]
9885 | (got_displacement
& 0x00000fff),
9890 /* Fill in the entry in the .rel(a).(i)plt section. */
9891 rel
.r_offset
= got_address
;
9895 /* .igot.plt entries use IRELATIVE relocations against SYM_VALUE.
9896 The dynamic linker or static executable then calls SYM_VALUE
9897 to determine the correct run-time value of the .igot.plt entry. */
9898 rel
.r_info
= ELF32_R_INFO (0, R_ARM_IRELATIVE
);
9899 initial_got_entry
= sym_value
;
9903 /* For FDPIC we will have to resolve a R_ARM_FUNCDESC_VALUE
9904 used by PLT entry. */
9907 rel
.r_info
= ELF32_R_INFO (dynindx
, R_ARM_FUNCDESC_VALUE
);
9908 initial_got_entry
= 0;
9912 rel
.r_info
= ELF32_R_INFO (dynindx
, R_ARM_JUMP_SLOT
);
9913 initial_got_entry
= (splt
->output_section
->vma
9914 + splt
->output_offset
);
9918 /* Fill in the entry in the global offset table. */
9919 bfd_put_32 (output_bfd
, initial_got_entry
,
9920 sgot
->contents
+ got_offset
);
9922 if (htab
->fdpic_p
&& !(info
->flags
& DF_BIND_NOW
))
9924 /* Setup initial funcdesc value. */
9925 /* FIXME: we don't support lazy binding because there is a
9926 race condition between both words getting written and
9927 some other thread attempting to read them. The ARM
9928 architecture does not have an atomic 64 bit load/store
9929 instruction that could be used to prevent it; it is
9930 recommended that threaded FDPIC applications run with the
9931 LD_BIND_NOW environment variable set. */
9932 bfd_put_32(output_bfd
, plt_address
+ 0x18,
9933 sgot
->contents
+ got_offset
);
9934 bfd_put_32(output_bfd
, -1 /*TODO*/,
9935 sgot
->contents
+ got_offset
+ 4);
9940 elf32_arm_add_dynreloc (output_bfd
, info
, srel
, &rel
);
9945 /* For FDPIC we put PLT relocationss into .rel.got when not
9946 lazy binding otherwise we put them in .rel.plt. For now,
9947 we don't support lazy binding so put it in .rel.got. */
9948 if (info
->flags
& DF_BIND_NOW
)
9949 elf32_arm_add_dynreloc(output_bfd
, info
, htab
->root
.srelgot
, &rel
);
9951 elf32_arm_add_dynreloc(output_bfd
, info
, htab
->root
.srelplt
, &rel
);
9955 loc
= srel
->contents
+ plt_index
* RELOC_SIZE (htab
);
9956 SWAP_RELOC_OUT (htab
) (output_bfd
, &rel
, loc
);
9963 /* Some relocations map to different relocations depending on the
9964 target. Return the real relocation. */
9967 arm_real_reloc_type (struct elf32_arm_link_hash_table
* globals
,
9973 if (globals
->target1_is_rel
)
9979 return globals
->target2_reloc
;
9986 /* Return the base VMA address which should be subtracted from real addresses
9987 when resolving @dtpoff relocation.
9988 This is PT_TLS segment p_vaddr. */
9991 dtpoff_base (struct bfd_link_info
*info
)
9993 /* If tls_sec is NULL, we should have signalled an error already. */
9994 if (elf_hash_table (info
)->tls_sec
== NULL
)
9996 return elf_hash_table (info
)->tls_sec
->vma
;
9999 /* Return the relocation value for @tpoff relocation
10000 if STT_TLS virtual address is ADDRESS. */
10003 tpoff (struct bfd_link_info
*info
, bfd_vma address
)
10005 struct elf_link_hash_table
*htab
= elf_hash_table (info
);
10008 /* If tls_sec is NULL, we should have signalled an error already. */
10009 if (htab
->tls_sec
== NULL
)
10011 base
= align_power ((bfd_vma
) TCB_SIZE
, htab
->tls_sec
->alignment_power
);
10012 return address
- htab
->tls_sec
->vma
+ base
;
10015 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
10016 VALUE is the relocation value. */
10018 static bfd_reloc_status_type
10019 elf32_arm_abs12_reloc (bfd
*abfd
, void *data
, bfd_vma value
)
10022 return bfd_reloc_overflow
;
10024 value
|= bfd_get_32 (abfd
, data
) & 0xfffff000;
10025 bfd_put_32 (abfd
, value
, data
);
10026 return bfd_reloc_ok
;
10029 /* Handle TLS relaxations. Relaxing is possible for symbols that use
10030 R_ARM_GOTDESC, R_ARM_{,THM_}TLS_CALL or
10031 R_ARM_{,THM_}TLS_DESCSEQ relocations, during a static link.
10033 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
10034 is to then call final_link_relocate. Return other values in the
10037 FIXME:When --emit-relocs is in effect, we'll emit relocs describing
10038 the pre-relaxed code. It would be nice if the relocs were updated
10039 to match the optimization. */
10041 static bfd_reloc_status_type
10042 elf32_arm_tls_relax (struct elf32_arm_link_hash_table
*globals
,
10043 bfd
*input_bfd
, asection
*input_sec
, bfd_byte
*contents
,
10044 Elf_Internal_Rela
*rel
, unsigned long is_local
)
10046 unsigned long insn
;
10048 switch (ELF32_R_TYPE (rel
->r_info
))
10051 return bfd_reloc_notsupported
;
10053 case R_ARM_TLS_GOTDESC
:
10058 insn
= bfd_get_32 (input_bfd
, contents
+ rel
->r_offset
);
10060 insn
-= 5; /* THUMB */
10062 insn
-= 8; /* ARM */
10064 bfd_put_32 (input_bfd
, insn
, contents
+ rel
->r_offset
);
10065 return bfd_reloc_continue
;
10067 case R_ARM_THM_TLS_DESCSEQ
:
10069 insn
= bfd_get_16 (input_bfd
, contents
+ rel
->r_offset
);
10070 if ((insn
& 0xff78) == 0x4478) /* add rx, pc */
10074 bfd_put_16 (input_bfd
, 0x46c0, contents
+ rel
->r_offset
);
10076 else if ((insn
& 0xffc0) == 0x6840) /* ldr rx,[ry,#4] */
10080 bfd_put_16 (input_bfd
, 0x46c0, contents
+ rel
->r_offset
);
10083 bfd_put_16 (input_bfd
, insn
& 0xf83f, contents
+ rel
->r_offset
);
10085 else if ((insn
& 0xff87) == 0x4780) /* blx rx */
10089 bfd_put_16 (input_bfd
, 0x46c0, contents
+ rel
->r_offset
);
10092 bfd_put_16 (input_bfd
, 0x4600 | (insn
& 0x78),
10093 contents
+ rel
->r_offset
);
10097 if ((insn
& 0xf000) == 0xf000 || (insn
& 0xf800) == 0xe800)
10098 /* It's a 32 bit instruction, fetch the rest of it for
10099 error generation. */
10100 insn
= (insn
<< 16)
10101 | bfd_get_16 (input_bfd
, contents
+ rel
->r_offset
+ 2);
10103 /* xgettext:c-format */
10104 (_("%pB(%pA+%#" PRIx64
"): "
10105 "unexpected %s instruction '%#lx' in TLS trampoline"),
10106 input_bfd
, input_sec
, (uint64_t) rel
->r_offset
,
10108 return bfd_reloc_notsupported
;
10112 case R_ARM_TLS_DESCSEQ
:
10114 insn
= bfd_get_32 (input_bfd
, contents
+ rel
->r_offset
);
10115 if ((insn
& 0xffff0ff0) == 0xe08f0000) /* add rx,pc,ry */
10119 bfd_put_32 (input_bfd
, 0xe1a00000 | (insn
& 0xffff),
10120 contents
+ rel
->r_offset
);
10122 else if ((insn
& 0xfff00fff) == 0xe5900004) /* ldr rx,[ry,#4]*/
10126 bfd_put_32 (input_bfd
, 0xe1a00000, contents
+ rel
->r_offset
);
10129 bfd_put_32 (input_bfd
, insn
& 0xfffff000,
10130 contents
+ rel
->r_offset
);
10132 else if ((insn
& 0xfffffff0) == 0xe12fff30) /* blx rx */
10136 bfd_put_32 (input_bfd
, 0xe1a00000, contents
+ rel
->r_offset
);
10139 bfd_put_32 (input_bfd
, 0xe1a00000 | (insn
& 0xf),
10140 contents
+ rel
->r_offset
);
10145 /* xgettext:c-format */
10146 (_("%pB(%pA+%#" PRIx64
"): "
10147 "unexpected %s instruction '%#lx' in TLS trampoline"),
10148 input_bfd
, input_sec
, (uint64_t) rel
->r_offset
,
10150 return bfd_reloc_notsupported
;
10154 case R_ARM_TLS_CALL
:
10155 /* GD->IE relaxation, turn the instruction into 'nop' or
10156 'ldr r0, [pc,r0]' */
10157 insn
= is_local
? 0xe1a00000 : 0xe79f0000;
10158 bfd_put_32 (input_bfd
, insn
, contents
+ rel
->r_offset
);
10161 case R_ARM_THM_TLS_CALL
:
10162 /* GD->IE relaxation. */
10164 /* add r0,pc; ldr r0, [r0] */
10166 else if (using_thumb2 (globals
))
10173 bfd_put_16 (input_bfd
, insn
>> 16, contents
+ rel
->r_offset
);
10174 bfd_put_16 (input_bfd
, insn
& 0xffff, contents
+ rel
->r_offset
+ 2);
10177 return bfd_reloc_ok
;
10180 /* For a given value of n, calculate the value of G_n as required to
10181 deal with group relocations. We return it in the form of an
10182 encoded constant-and-rotation, together with the final residual. If n is
10183 specified as less than zero, then final_residual is filled with the
10184 input value and no further action is performed. */
10187 calculate_group_reloc_mask (bfd_vma value
, int n
, bfd_vma
*final_residual
)
10191 bfd_vma encoded_g_n
= 0;
10192 bfd_vma residual
= value
; /* Also known as Y_n. */
10194 for (current_n
= 0; current_n
<= n
; current_n
++)
10198 /* Calculate which part of the value to mask. */
10205 /* Determine the most significant bit in the residual and
10206 align the resulting value to a 2-bit boundary. */
10207 for (msb
= 30; msb
>= 0; msb
-= 2)
10208 if (residual
& (3 << msb
))
10211 /* The desired shift is now (msb - 6), or zero, whichever
10218 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
10219 g_n
= residual
& (0xff << shift
);
10220 encoded_g_n
= (g_n
>> shift
)
10221 | ((g_n
<= 0xff ? 0 : (32 - shift
) / 2) << 8);
10223 /* Calculate the residual for the next time around. */
10227 *final_residual
= residual
;
10229 return encoded_g_n
;
10232 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
10233 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
10236 identify_add_or_sub (bfd_vma insn
)
10238 int opcode
= insn
& 0x1e00000;
10240 if (opcode
== 1 << 23) /* ADD */
10243 if (opcode
== 1 << 22) /* SUB */
10249 /* Perform a relocation as part of a final link. */
10251 static bfd_reloc_status_type
10252 elf32_arm_final_link_relocate (reloc_howto_type
* howto
,
10255 asection
* input_section
,
10256 bfd_byte
* contents
,
10257 Elf_Internal_Rela
* rel
,
10259 struct bfd_link_info
* info
,
10260 asection
* sym_sec
,
10261 const char * sym_name
,
10262 unsigned char st_type
,
10263 enum arm_st_branch_type branch_type
,
10264 struct elf_link_hash_entry
* h
,
10265 bfd_boolean
* unresolved_reloc_p
,
10266 char ** error_message
)
10268 unsigned long r_type
= howto
->type
;
10269 unsigned long r_symndx
;
10270 bfd_byte
* hit_data
= contents
+ rel
->r_offset
;
10271 bfd_vma
* local_got_offsets
;
10272 bfd_vma
* local_tlsdesc_gotents
;
10275 asection
* sreloc
= NULL
;
10276 asection
* srelgot
;
10278 bfd_signed_vma signed_addend
;
10279 unsigned char dynreloc_st_type
;
10280 bfd_vma dynreloc_value
;
10281 struct elf32_arm_link_hash_table
* globals
;
10282 struct elf32_arm_link_hash_entry
*eh
;
10283 union gotplt_union
*root_plt
;
10284 struct arm_plt_info
*arm_plt
;
10285 bfd_vma plt_offset
;
10286 bfd_vma gotplt_offset
;
10287 bfd_boolean has_iplt_entry
;
10288 bfd_boolean resolved_to_zero
;
10290 globals
= elf32_arm_hash_table (info
);
10291 if (globals
== NULL
)
10292 return bfd_reloc_notsupported
;
10294 BFD_ASSERT (is_arm_elf (input_bfd
));
10295 BFD_ASSERT (howto
!= NULL
);
10297 /* Some relocation types map to different relocations depending on the
10298 target. We pick the right one here. */
10299 r_type
= arm_real_reloc_type (globals
, r_type
);
10301 /* It is possible to have linker relaxations on some TLS access
10302 models. Update our information here. */
10303 r_type
= elf32_arm_tls_transition (info
, r_type
, h
);
10305 if (r_type
!= howto
->type
)
10306 howto
= elf32_arm_howto_from_type (r_type
);
10308 eh
= (struct elf32_arm_link_hash_entry
*) h
;
10309 sgot
= globals
->root
.sgot
;
10310 local_got_offsets
= elf_local_got_offsets (input_bfd
);
10311 local_tlsdesc_gotents
= elf32_arm_local_tlsdesc_gotent (input_bfd
);
10313 if (globals
->root
.dynamic_sections_created
)
10314 srelgot
= globals
->root
.srelgot
;
10318 r_symndx
= ELF32_R_SYM (rel
->r_info
);
10320 if (globals
->use_rel
)
10322 addend
= bfd_get_32 (input_bfd
, hit_data
) & howto
->src_mask
;
10324 if (addend
& ((howto
->src_mask
+ 1) >> 1))
10326 signed_addend
= -1;
10327 signed_addend
&= ~ howto
->src_mask
;
10328 signed_addend
|= addend
;
10331 signed_addend
= addend
;
10334 addend
= signed_addend
= rel
->r_addend
;
10336 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
10337 are resolving a function call relocation. */
10338 if (using_thumb_only (globals
)
10339 && (r_type
== R_ARM_THM_CALL
10340 || r_type
== R_ARM_THM_JUMP24
)
10341 && branch_type
== ST_BRANCH_TO_ARM
)
10342 branch_type
= ST_BRANCH_TO_THUMB
;
10344 /* Record the symbol information that should be used in dynamic
10346 dynreloc_st_type
= st_type
;
10347 dynreloc_value
= value
;
10348 if (branch_type
== ST_BRANCH_TO_THUMB
)
10349 dynreloc_value
|= 1;
10351 /* Find out whether the symbol has a PLT. Set ST_VALUE, BRANCH_TYPE and
10352 VALUE appropriately for relocations that we resolve at link time. */
10353 has_iplt_entry
= FALSE
;
10354 if (elf32_arm_get_plt_info (input_bfd
, globals
, eh
, r_symndx
, &root_plt
,
10356 && root_plt
->offset
!= (bfd_vma
) -1)
10358 plt_offset
= root_plt
->offset
;
10359 gotplt_offset
= arm_plt
->got_offset
;
10361 if (h
== NULL
|| eh
->is_iplt
)
10363 has_iplt_entry
= TRUE
;
10364 splt
= globals
->root
.iplt
;
10366 /* Populate .iplt entries here, because not all of them will
10367 be seen by finish_dynamic_symbol. The lower bit is set if
10368 we have already populated the entry. */
10369 if (plt_offset
& 1)
10373 if (elf32_arm_populate_plt_entry (output_bfd
, info
, root_plt
, arm_plt
,
10374 -1, dynreloc_value
))
10375 root_plt
->offset
|= 1;
10377 return bfd_reloc_notsupported
;
10380 /* Static relocations always resolve to the .iplt entry. */
10381 st_type
= STT_FUNC
;
10382 value
= (splt
->output_section
->vma
10383 + splt
->output_offset
10385 branch_type
= ST_BRANCH_TO_ARM
;
10387 /* If there are non-call relocations that resolve to the .iplt
10388 entry, then all dynamic ones must too. */
10389 if (arm_plt
->noncall_refcount
!= 0)
10391 dynreloc_st_type
= st_type
;
10392 dynreloc_value
= value
;
10396 /* We populate the .plt entry in finish_dynamic_symbol. */
10397 splt
= globals
->root
.splt
;
10402 plt_offset
= (bfd_vma
) -1;
10403 gotplt_offset
= (bfd_vma
) -1;
10406 resolved_to_zero
= (h
!= NULL
10407 && UNDEFWEAK_NO_DYNAMIC_RELOC (info
, h
));
10412 /* We don't need to find a value for this symbol. It's just a
10414 *unresolved_reloc_p
= FALSE
;
10415 return bfd_reloc_ok
;
10418 if (!globals
->vxworks_p
)
10419 return elf32_arm_abs12_reloc (input_bfd
, hit_data
, value
+ addend
);
10420 /* Fall through. */
10424 case R_ARM_ABS32_NOI
:
10426 case R_ARM_REL32_NOI
:
10432 /* Handle relocations which should use the PLT entry. ABS32/REL32
10433 will use the symbol's value, which may point to a PLT entry, but we
10434 don't need to handle that here. If we created a PLT entry, all
10435 branches in this object should go to it, except if the PLT is too
10436 far away, in which case a long branch stub should be inserted. */
10437 if ((r_type
!= R_ARM_ABS32
&& r_type
!= R_ARM_REL32
10438 && r_type
!= R_ARM_ABS32_NOI
&& r_type
!= R_ARM_REL32_NOI
10439 && r_type
!= R_ARM_CALL
10440 && r_type
!= R_ARM_JUMP24
10441 && r_type
!= R_ARM_PLT32
)
10442 && plt_offset
!= (bfd_vma
) -1)
10444 /* If we've created a .plt section, and assigned a PLT entry
10445 to this function, it must either be a STT_GNU_IFUNC reference
10446 or not be known to bind locally. In other cases, we should
10447 have cleared the PLT entry by now. */
10448 BFD_ASSERT (has_iplt_entry
|| !SYMBOL_CALLS_LOCAL (info
, h
));
10450 value
= (splt
->output_section
->vma
10451 + splt
->output_offset
10453 *unresolved_reloc_p
= FALSE
;
10454 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
10455 contents
, rel
->r_offset
, value
,
10459 /* When generating a shared object or relocatable executable, these
10460 relocations are copied into the output file to be resolved at
10462 if ((bfd_link_pic (info
)
10463 || globals
->root
.is_relocatable_executable
10464 || globals
->fdpic_p
)
10465 && (input_section
->flags
& SEC_ALLOC
)
10466 && !(globals
->vxworks_p
10467 && strcmp (input_section
->output_section
->name
,
10469 && ((r_type
!= R_ARM_REL32
&& r_type
!= R_ARM_REL32_NOI
)
10470 || !SYMBOL_CALLS_LOCAL (info
, h
))
10471 && !(input_bfd
== globals
->stub_bfd
10472 && strstr (input_section
->name
, STUB_SUFFIX
))
10474 || (ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
10475 && !resolved_to_zero
)
10476 || h
->root
.type
!= bfd_link_hash_undefweak
)
10477 && r_type
!= R_ARM_PC24
10478 && r_type
!= R_ARM_CALL
10479 && r_type
!= R_ARM_JUMP24
10480 && r_type
!= R_ARM_PREL31
10481 && r_type
!= R_ARM_PLT32
)
10483 Elf_Internal_Rela outrel
;
10484 bfd_boolean skip
, relocate
;
10487 if ((r_type
== R_ARM_REL32
|| r_type
== R_ARM_REL32_NOI
)
10488 && !h
->def_regular
)
10490 char *v
= _("shared object");
10492 if (bfd_link_executable (info
))
10493 v
= _("PIE executable");
10496 (_("%pB: relocation %s against external or undefined symbol `%s'"
10497 " can not be used when making a %s; recompile with -fPIC"), input_bfd
,
10498 elf32_arm_howto_table_1
[r_type
].name
, h
->root
.root
.string
, v
);
10499 return bfd_reloc_notsupported
;
10502 *unresolved_reloc_p
= FALSE
;
10504 if (sreloc
== NULL
&& globals
->root
.dynamic_sections_created
)
10506 sreloc
= _bfd_elf_get_dynamic_reloc_section (input_bfd
, input_section
,
10507 ! globals
->use_rel
);
10509 if (sreloc
== NULL
)
10510 return bfd_reloc_notsupported
;
10516 outrel
.r_addend
= addend
;
10518 _bfd_elf_section_offset (output_bfd
, info
, input_section
,
10520 if (outrel
.r_offset
== (bfd_vma
) -1)
10522 else if (outrel
.r_offset
== (bfd_vma
) -2)
10523 skip
= TRUE
, relocate
= TRUE
;
10524 outrel
.r_offset
+= (input_section
->output_section
->vma
10525 + input_section
->output_offset
);
10528 memset (&outrel
, 0, sizeof outrel
);
10530 && h
->dynindx
!= -1
10531 && (!bfd_link_pic (info
)
10532 || !(bfd_link_pie (info
)
10533 || SYMBOLIC_BIND (info
, h
))
10534 || !h
->def_regular
))
10535 outrel
.r_info
= ELF32_R_INFO (h
->dynindx
, r_type
);
10540 /* This symbol is local, or marked to become local. */
10541 BFD_ASSERT (r_type
== R_ARM_ABS32
|| r_type
== R_ARM_ABS32_NOI
10542 || (globals
->fdpic_p
&& !bfd_link_pic(info
)));
10543 if (globals
->symbian_p
)
10547 /* On Symbian OS, the data segment and text segement
10548 can be relocated independently. Therefore, we
10549 must indicate the segment to which this
10550 relocation is relative. The BPABI allows us to
10551 use any symbol in the right segment; we just use
10552 the section symbol as it is convenient. (We
10553 cannot use the symbol given by "h" directly as it
10554 will not appear in the dynamic symbol table.)
10556 Note that the dynamic linker ignores the section
10557 symbol value, so we don't subtract osec->vma
10558 from the emitted reloc addend. */
10560 osec
= sym_sec
->output_section
;
10562 osec
= input_section
->output_section
;
10563 symbol
= elf_section_data (osec
)->dynindx
;
10566 struct elf_link_hash_table
*htab
= elf_hash_table (info
);
10568 if ((osec
->flags
& SEC_READONLY
) == 0
10569 && htab
->data_index_section
!= NULL
)
10570 osec
= htab
->data_index_section
;
10572 osec
= htab
->text_index_section
;
10573 symbol
= elf_section_data (osec
)->dynindx
;
10575 BFD_ASSERT (symbol
!= 0);
10578 /* On SVR4-ish systems, the dynamic loader cannot
10579 relocate the text and data segments independently,
10580 so the symbol does not matter. */
10582 if (dynreloc_st_type
== STT_GNU_IFUNC
)
10583 /* We have an STT_GNU_IFUNC symbol that doesn't resolve
10584 to the .iplt entry. Instead, every non-call reference
10585 must use an R_ARM_IRELATIVE relocation to obtain the
10586 correct run-time address. */
10587 outrel
.r_info
= ELF32_R_INFO (symbol
, R_ARM_IRELATIVE
);
10588 else if (globals
->fdpic_p
&& !bfd_link_pic(info
))
10591 outrel
.r_info
= ELF32_R_INFO (symbol
, R_ARM_RELATIVE
);
10592 if (globals
->use_rel
)
10595 outrel
.r_addend
+= dynreloc_value
;
10599 arm_elf_add_rofixup(output_bfd
, globals
->srofixup
, outrel
.r_offset
);
10601 elf32_arm_add_dynreloc (output_bfd
, info
, sreloc
, &outrel
);
10603 /* If this reloc is against an external symbol, we do not want to
10604 fiddle with the addend. Otherwise, we need to include the symbol
10605 value so that it becomes an addend for the dynamic reloc. */
10607 return bfd_reloc_ok
;
10609 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
10610 contents
, rel
->r_offset
,
10611 dynreloc_value
, (bfd_vma
) 0);
10613 else switch (r_type
)
10616 return elf32_arm_abs12_reloc (input_bfd
, hit_data
, value
+ addend
);
10618 case R_ARM_XPC25
: /* Arm BLX instruction. */
10621 case R_ARM_PC24
: /* Arm B/BL instruction. */
10624 struct elf32_arm_stub_hash_entry
*stub_entry
= NULL
;
10626 if (r_type
== R_ARM_XPC25
)
10628 /* Check for Arm calling Arm function. */
10629 /* FIXME: Should we translate the instruction into a BL
10630 instruction instead ? */
10631 if (branch_type
!= ST_BRANCH_TO_THUMB
)
10633 (_("\%pB: warning: %s BLX instruction targets"
10634 " %s function '%s'"),
10636 "ARM", h
? h
->root
.root
.string
: "(local)");
10638 else if (r_type
== R_ARM_PC24
)
10640 /* Check for Arm calling Thumb function. */
10641 if (branch_type
== ST_BRANCH_TO_THUMB
)
10643 if (elf32_arm_to_thumb_stub (info
, sym_name
, input_bfd
,
10644 output_bfd
, input_section
,
10645 hit_data
, sym_sec
, rel
->r_offset
,
10646 signed_addend
, value
,
10648 return bfd_reloc_ok
;
10650 return bfd_reloc_dangerous
;
10654 /* Check if a stub has to be inserted because the
10655 destination is too far or we are changing mode. */
10656 if ( r_type
== R_ARM_CALL
10657 || r_type
== R_ARM_JUMP24
10658 || r_type
== R_ARM_PLT32
)
10660 enum elf32_arm_stub_type stub_type
= arm_stub_none
;
10661 struct elf32_arm_link_hash_entry
*hash
;
10663 hash
= (struct elf32_arm_link_hash_entry
*) h
;
10664 stub_type
= arm_type_of_stub (info
, input_section
, rel
,
10665 st_type
, &branch_type
,
10666 hash
, value
, sym_sec
,
10667 input_bfd
, sym_name
);
10669 if (stub_type
!= arm_stub_none
)
10671 /* The target is out of reach, so redirect the
10672 branch to the local stub for this function. */
10673 stub_entry
= elf32_arm_get_stub_entry (input_section
,
10678 if (stub_entry
!= NULL
)
10679 value
= (stub_entry
->stub_offset
10680 + stub_entry
->stub_sec
->output_offset
10681 + stub_entry
->stub_sec
->output_section
->vma
);
10683 if (plt_offset
!= (bfd_vma
) -1)
10684 *unresolved_reloc_p
= FALSE
;
10689 /* If the call goes through a PLT entry, make sure to
10690 check distance to the right destination address. */
10691 if (plt_offset
!= (bfd_vma
) -1)
10693 value
= (splt
->output_section
->vma
10694 + splt
->output_offset
10696 *unresolved_reloc_p
= FALSE
;
10697 /* The PLT entry is in ARM mode, regardless of the
10698 target function. */
10699 branch_type
= ST_BRANCH_TO_ARM
;
10704 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
10706 S is the address of the symbol in the relocation.
10707 P is address of the instruction being relocated.
10708 A is the addend (extracted from the instruction) in bytes.
10710 S is held in 'value'.
10711 P is the base address of the section containing the
10712 instruction plus the offset of the reloc into that
10714 (input_section->output_section->vma +
10715 input_section->output_offset +
10717 A is the addend, converted into bytes, ie:
10718 (signed_addend * 4)
10720 Note: None of these operations have knowledge of the pipeline
10721 size of the processor, thus it is up to the assembler to
10722 encode this information into the addend. */
10723 value
-= (input_section
->output_section
->vma
10724 + input_section
->output_offset
);
10725 value
-= rel
->r_offset
;
10726 if (globals
->use_rel
)
10727 value
+= (signed_addend
<< howto
->size
);
10729 /* RELA addends do not have to be adjusted by howto->size. */
10730 value
+= signed_addend
;
10732 signed_addend
= value
;
10733 signed_addend
>>= howto
->rightshift
;
10735 /* A branch to an undefined weak symbol is turned into a jump to
10736 the next instruction unless a PLT entry will be created.
10737 Do the same for local undefined symbols (but not for STN_UNDEF).
10738 The jump to the next instruction is optimized as a NOP depending
10739 on the architecture. */
10740 if (h
? (h
->root
.type
== bfd_link_hash_undefweak
10741 && plt_offset
== (bfd_vma
) -1)
10742 : r_symndx
!= STN_UNDEF
&& bfd_is_und_section (sym_sec
))
10744 value
= (bfd_get_32 (input_bfd
, hit_data
) & 0xf0000000);
10746 if (arch_has_arm_nop (globals
))
10747 value
|= 0x0320f000;
10749 value
|= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */
10753 /* Perform a signed range check. */
10754 if ( signed_addend
> ((bfd_signed_vma
) (howto
->dst_mask
>> 1))
10755 || signed_addend
< - ((bfd_signed_vma
) ((howto
->dst_mask
+ 1) >> 1)))
10756 return bfd_reloc_overflow
;
10758 addend
= (value
& 2);
10760 value
= (signed_addend
& howto
->dst_mask
)
10761 | (bfd_get_32 (input_bfd
, hit_data
) & (~ howto
->dst_mask
));
10763 if (r_type
== R_ARM_CALL
)
10765 /* Set the H bit in the BLX instruction. */
10766 if (branch_type
== ST_BRANCH_TO_THUMB
)
10769 value
|= (1 << 24);
10771 value
&= ~(bfd_vma
)(1 << 24);
10774 /* Select the correct instruction (BL or BLX). */
10775 /* Only if we are not handling a BL to a stub. In this
10776 case, mode switching is performed by the stub. */
10777 if (branch_type
== ST_BRANCH_TO_THUMB
&& !stub_entry
)
10778 value
|= (1 << 28);
10779 else if (stub_entry
|| branch_type
!= ST_BRANCH_UNKNOWN
)
10781 value
&= ~(bfd_vma
)(1 << 28);
10782 value
|= (1 << 24);
10791 if (branch_type
== ST_BRANCH_TO_THUMB
)
10795 case R_ARM_ABS32_NOI
:
10801 if (branch_type
== ST_BRANCH_TO_THUMB
)
10803 value
-= (input_section
->output_section
->vma
10804 + input_section
->output_offset
+ rel
->r_offset
);
10807 case R_ARM_REL32_NOI
:
10809 value
-= (input_section
->output_section
->vma
10810 + input_section
->output_offset
+ rel
->r_offset
);
10814 value
-= (input_section
->output_section
->vma
10815 + input_section
->output_offset
+ rel
->r_offset
);
10816 value
+= signed_addend
;
10817 if (! h
|| h
->root
.type
!= bfd_link_hash_undefweak
)
10819 /* Check for overflow. */
10820 if ((value
^ (value
>> 1)) & (1 << 30))
10821 return bfd_reloc_overflow
;
10823 value
&= 0x7fffffff;
10824 value
|= (bfd_get_32 (input_bfd
, hit_data
) & 0x80000000);
10825 if (branch_type
== ST_BRANCH_TO_THUMB
)
10830 bfd_put_32 (input_bfd
, value
, hit_data
);
10831 return bfd_reloc_ok
;
10834 /* PR 16202: Refectch the addend using the correct size. */
10835 if (globals
->use_rel
)
10836 addend
= bfd_get_8 (input_bfd
, hit_data
);
10839 /* There is no way to tell whether the user intended to use a signed or
10840 unsigned addend. When checking for overflow we accept either,
10841 as specified by the AAELF. */
10842 if ((long) value
> 0xff || (long) value
< -0x80)
10843 return bfd_reloc_overflow
;
10845 bfd_put_8 (input_bfd
, value
, hit_data
);
10846 return bfd_reloc_ok
;
10849 /* PR 16202: Refectch the addend using the correct size. */
10850 if (globals
->use_rel
)
10851 addend
= bfd_get_16 (input_bfd
, hit_data
);
10854 /* See comment for R_ARM_ABS8. */
10855 if ((long) value
> 0xffff || (long) value
< -0x8000)
10856 return bfd_reloc_overflow
;
10858 bfd_put_16 (input_bfd
, value
, hit_data
);
10859 return bfd_reloc_ok
;
10861 case R_ARM_THM_ABS5
:
10862 /* Support ldr and str instructions for the thumb. */
10863 if (globals
->use_rel
)
10865 /* Need to refetch addend. */
10866 addend
= bfd_get_16 (input_bfd
, hit_data
) & howto
->src_mask
;
10867 /* ??? Need to determine shift amount from operand size. */
10868 addend
>>= howto
->rightshift
;
10872 /* ??? Isn't value unsigned? */
10873 if ((long) value
> 0x1f || (long) value
< -0x10)
10874 return bfd_reloc_overflow
;
10876 /* ??? Value needs to be properly shifted into place first. */
10877 value
|= bfd_get_16 (input_bfd
, hit_data
) & 0xf83f;
10878 bfd_put_16 (input_bfd
, value
, hit_data
);
10879 return bfd_reloc_ok
;
10881 case R_ARM_THM_ALU_PREL_11_0
:
10882 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
10885 bfd_signed_vma relocation
;
10887 insn
= (bfd_get_16 (input_bfd
, hit_data
) << 16)
10888 | bfd_get_16 (input_bfd
, hit_data
+ 2);
10890 if (globals
->use_rel
)
10892 signed_addend
= (insn
& 0xff) | ((insn
& 0x7000) >> 4)
10893 | ((insn
& (1 << 26)) >> 15);
10894 if (insn
& 0xf00000)
10895 signed_addend
= -signed_addend
;
10898 relocation
= value
+ signed_addend
;
10899 relocation
-= Pa (input_section
->output_section
->vma
10900 + input_section
->output_offset
10903 /* PR 21523: Use an absolute value. The user of this reloc will
10904 have already selected an ADD or SUB insn appropriately. */
10905 value
= labs (relocation
);
10907 if (value
>= 0x1000)
10908 return bfd_reloc_overflow
;
10910 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
10911 if (branch_type
== ST_BRANCH_TO_THUMB
)
10914 insn
= (insn
& 0xfb0f8f00) | (value
& 0xff)
10915 | ((value
& 0x700) << 4)
10916 | ((value
& 0x800) << 15);
10917 if (relocation
< 0)
10920 bfd_put_16 (input_bfd
, insn
>> 16, hit_data
);
10921 bfd_put_16 (input_bfd
, insn
& 0xffff, hit_data
+ 2);
10923 return bfd_reloc_ok
;
10926 case R_ARM_THM_PC8
:
10927 /* PR 10073: This reloc is not generated by the GNU toolchain,
10928 but it is supported for compatibility with third party libraries
10929 generated by other compilers, specifically the ARM/IAR. */
10932 bfd_signed_vma relocation
;
10934 insn
= bfd_get_16 (input_bfd
, hit_data
);
10936 if (globals
->use_rel
)
10937 addend
= ((((insn
& 0x00ff) << 2) + 4) & 0x3ff) -4;
10939 relocation
= value
+ addend
;
10940 relocation
-= Pa (input_section
->output_section
->vma
10941 + input_section
->output_offset
10944 value
= relocation
;
10946 /* We do not check for overflow of this reloc. Although strictly
10947 speaking this is incorrect, it appears to be necessary in order
10948 to work with IAR generated relocs. Since GCC and GAS do not
10949 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
10950 a problem for them. */
10953 insn
= (insn
& 0xff00) | (value
>> 2);
10955 bfd_put_16 (input_bfd
, insn
, hit_data
);
10957 return bfd_reloc_ok
;
10960 case R_ARM_THM_PC12
:
10961 /* Corresponds to: ldr.w reg, [pc, #offset]. */
10964 bfd_signed_vma relocation
;
10966 insn
= (bfd_get_16 (input_bfd
, hit_data
) << 16)
10967 | bfd_get_16 (input_bfd
, hit_data
+ 2);
10969 if (globals
->use_rel
)
10971 signed_addend
= insn
& 0xfff;
10972 if (!(insn
& (1 << 23)))
10973 signed_addend
= -signed_addend
;
10976 relocation
= value
+ signed_addend
;
10977 relocation
-= Pa (input_section
->output_section
->vma
10978 + input_section
->output_offset
10981 value
= relocation
;
10983 if (value
>= 0x1000)
10984 return bfd_reloc_overflow
;
10986 insn
= (insn
& 0xff7ff000) | value
;
10987 if (relocation
>= 0)
10990 bfd_put_16 (input_bfd
, insn
>> 16, hit_data
);
10991 bfd_put_16 (input_bfd
, insn
& 0xffff, hit_data
+ 2);
10993 return bfd_reloc_ok
;
10996 case R_ARM_THM_XPC22
:
10997 case R_ARM_THM_CALL
:
10998 case R_ARM_THM_JUMP24
:
10999 /* Thumb BL (branch long instruction). */
11001 bfd_vma relocation
;
11002 bfd_vma reloc_sign
;
11003 bfd_boolean overflow
= FALSE
;
11004 bfd_vma upper_insn
= bfd_get_16 (input_bfd
, hit_data
);
11005 bfd_vma lower_insn
= bfd_get_16 (input_bfd
, hit_data
+ 2);
11006 bfd_signed_vma reloc_signed_max
;
11007 bfd_signed_vma reloc_signed_min
;
11009 bfd_signed_vma signed_check
;
11011 const int thumb2
= using_thumb2 (globals
);
11012 const int thumb2_bl
= using_thumb2_bl (globals
);
11014 /* A branch to an undefined weak symbol is turned into a jump to
11015 the next instruction unless a PLT entry will be created.
11016 The jump to the next instruction is optimized as a NOP.W for
11017 Thumb-2 enabled architectures. */
11018 if (h
&& h
->root
.type
== bfd_link_hash_undefweak
11019 && plt_offset
== (bfd_vma
) -1)
11023 bfd_put_16 (input_bfd
, 0xf3af, hit_data
);
11024 bfd_put_16 (input_bfd
, 0x8000, hit_data
+ 2);
11028 bfd_put_16 (input_bfd
, 0xe000, hit_data
);
11029 bfd_put_16 (input_bfd
, 0xbf00, hit_data
+ 2);
11031 return bfd_reloc_ok
;
11034 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
11035 with Thumb-1) involving the J1 and J2 bits. */
11036 if (globals
->use_rel
)
11038 bfd_vma s
= (upper_insn
& (1 << 10)) >> 10;
11039 bfd_vma upper
= upper_insn
& 0x3ff;
11040 bfd_vma lower
= lower_insn
& 0x7ff;
11041 bfd_vma j1
= (lower_insn
& (1 << 13)) >> 13;
11042 bfd_vma j2
= (lower_insn
& (1 << 11)) >> 11;
11043 bfd_vma i1
= j1
^ s
? 0 : 1;
11044 bfd_vma i2
= j2
^ s
? 0 : 1;
11046 addend
= (i1
<< 23) | (i2
<< 22) | (upper
<< 12) | (lower
<< 1);
11048 addend
= (addend
| ((s
? 0 : 1) << 24)) - (1 << 24);
11050 signed_addend
= addend
;
11053 if (r_type
== R_ARM_THM_XPC22
)
11055 /* Check for Thumb to Thumb call. */
11056 /* FIXME: Should we translate the instruction into a BL
11057 instruction instead ? */
11058 if (branch_type
== ST_BRANCH_TO_THUMB
)
11060 (_("%pB: warning: %s BLX instruction targets"
11061 " %s function '%s'"),
11062 input_bfd
, "Thumb",
11063 "Thumb", h
? h
->root
.root
.string
: "(local)");
11067 /* If it is not a call to Thumb, assume call to Arm.
11068 If it is a call relative to a section name, then it is not a
11069 function call at all, but rather a long jump. Calls through
11070 the PLT do not require stubs. */
11071 if (branch_type
== ST_BRANCH_TO_ARM
&& plt_offset
== (bfd_vma
) -1)
11073 if (globals
->use_blx
&& r_type
== R_ARM_THM_CALL
)
11075 /* Convert BL to BLX. */
11076 lower_insn
= (lower_insn
& ~0x1000) | 0x0800;
11078 else if (( r_type
!= R_ARM_THM_CALL
)
11079 && (r_type
!= R_ARM_THM_JUMP24
))
11081 if (elf32_thumb_to_arm_stub
11082 (info
, sym_name
, input_bfd
, output_bfd
, input_section
,
11083 hit_data
, sym_sec
, rel
->r_offset
, signed_addend
, value
,
11085 return bfd_reloc_ok
;
11087 return bfd_reloc_dangerous
;
11090 else if (branch_type
== ST_BRANCH_TO_THUMB
11091 && globals
->use_blx
11092 && r_type
== R_ARM_THM_CALL
)
11094 /* Make sure this is a BL. */
11095 lower_insn
|= 0x1800;
11099 enum elf32_arm_stub_type stub_type
= arm_stub_none
;
11100 if (r_type
== R_ARM_THM_CALL
|| r_type
== R_ARM_THM_JUMP24
)
11102 /* Check if a stub has to be inserted because the destination
11104 struct elf32_arm_stub_hash_entry
*stub_entry
;
11105 struct elf32_arm_link_hash_entry
*hash
;
11107 hash
= (struct elf32_arm_link_hash_entry
*) h
;
11109 stub_type
= arm_type_of_stub (info
, input_section
, rel
,
11110 st_type
, &branch_type
,
11111 hash
, value
, sym_sec
,
11112 input_bfd
, sym_name
);
11114 if (stub_type
!= arm_stub_none
)
11116 /* The target is out of reach or we are changing modes, so
11117 redirect the branch to the local stub for this
11119 stub_entry
= elf32_arm_get_stub_entry (input_section
,
11123 if (stub_entry
!= NULL
)
11125 value
= (stub_entry
->stub_offset
11126 + stub_entry
->stub_sec
->output_offset
11127 + stub_entry
->stub_sec
->output_section
->vma
);
11129 if (plt_offset
!= (bfd_vma
) -1)
11130 *unresolved_reloc_p
= FALSE
;
11133 /* If this call becomes a call to Arm, force BLX. */
11134 if (globals
->use_blx
&& (r_type
== R_ARM_THM_CALL
))
11137 && !arm_stub_is_thumb (stub_entry
->stub_type
))
11138 || branch_type
!= ST_BRANCH_TO_THUMB
)
11139 lower_insn
= (lower_insn
& ~0x1000) | 0x0800;
11144 /* Handle calls via the PLT. */
11145 if (stub_type
== arm_stub_none
&& plt_offset
!= (bfd_vma
) -1)
11147 value
= (splt
->output_section
->vma
11148 + splt
->output_offset
11151 if (globals
->use_blx
11152 && r_type
== R_ARM_THM_CALL
11153 && ! using_thumb_only (globals
))
11155 /* If the Thumb BLX instruction is available, convert
11156 the BL to a BLX instruction to call the ARM-mode
11158 lower_insn
= (lower_insn
& ~0x1000) | 0x0800;
11159 branch_type
= ST_BRANCH_TO_ARM
;
11163 if (! using_thumb_only (globals
))
11164 /* Target the Thumb stub before the ARM PLT entry. */
11165 value
-= PLT_THUMB_STUB_SIZE
;
11166 branch_type
= ST_BRANCH_TO_THUMB
;
11168 *unresolved_reloc_p
= FALSE
;
11171 relocation
= value
+ signed_addend
;
11173 relocation
-= (input_section
->output_section
->vma
11174 + input_section
->output_offset
11177 check
= relocation
>> howto
->rightshift
;
11179 /* If this is a signed value, the rightshift just dropped
11180 leading 1 bits (assuming twos complement). */
11181 if ((bfd_signed_vma
) relocation
>= 0)
11182 signed_check
= check
;
11184 signed_check
= check
| ~((bfd_vma
) -1 >> howto
->rightshift
);
11186 /* Calculate the permissable maximum and minimum values for
11187 this relocation according to whether we're relocating for
11189 bitsize
= howto
->bitsize
;
11192 reloc_signed_max
= (1 << (bitsize
- 1)) - 1;
11193 reloc_signed_min
= ~reloc_signed_max
;
11195 /* Assumes two's complement. */
11196 if (signed_check
> reloc_signed_max
|| signed_check
< reloc_signed_min
)
11199 if ((lower_insn
& 0x5000) == 0x4000)
11200 /* For a BLX instruction, make sure that the relocation is rounded up
11201 to a word boundary. This follows the semantics of the instruction
11202 which specifies that bit 1 of the target address will come from bit
11203 1 of the base address. */
11204 relocation
= (relocation
+ 2) & ~ 3;
11206 /* Put RELOCATION back into the insn. Assumes two's complement.
11207 We use the Thumb-2 encoding, which is safe even if dealing with
11208 a Thumb-1 instruction by virtue of our overflow check above. */
11209 reloc_sign
= (signed_check
< 0) ? 1 : 0;
11210 upper_insn
= (upper_insn
& ~(bfd_vma
) 0x7ff)
11211 | ((relocation
>> 12) & 0x3ff)
11212 | (reloc_sign
<< 10);
11213 lower_insn
= (lower_insn
& ~(bfd_vma
) 0x2fff)
11214 | (((!((relocation
>> 23) & 1)) ^ reloc_sign
) << 13)
11215 | (((!((relocation
>> 22) & 1)) ^ reloc_sign
) << 11)
11216 | ((relocation
>> 1) & 0x7ff);
11218 /* Put the relocated value back in the object file: */
11219 bfd_put_16 (input_bfd
, upper_insn
, hit_data
);
11220 bfd_put_16 (input_bfd
, lower_insn
, hit_data
+ 2);
11222 return (overflow
? bfd_reloc_overflow
: bfd_reloc_ok
);
11226 case R_ARM_THM_JUMP19
:
11227 /* Thumb32 conditional branch instruction. */
11229 bfd_vma relocation
;
11230 bfd_boolean overflow
= FALSE
;
11231 bfd_vma upper_insn
= bfd_get_16 (input_bfd
, hit_data
);
11232 bfd_vma lower_insn
= bfd_get_16 (input_bfd
, hit_data
+ 2);
11233 bfd_signed_vma reloc_signed_max
= 0xffffe;
11234 bfd_signed_vma reloc_signed_min
= -0x100000;
11235 bfd_signed_vma signed_check
;
11236 enum elf32_arm_stub_type stub_type
= arm_stub_none
;
11237 struct elf32_arm_stub_hash_entry
*stub_entry
;
11238 struct elf32_arm_link_hash_entry
*hash
;
11240 /* Need to refetch the addend, reconstruct the top three bits,
11241 and squish the two 11 bit pieces together. */
11242 if (globals
->use_rel
)
11244 bfd_vma S
= (upper_insn
& 0x0400) >> 10;
11245 bfd_vma upper
= (upper_insn
& 0x003f);
11246 bfd_vma J1
= (lower_insn
& 0x2000) >> 13;
11247 bfd_vma J2
= (lower_insn
& 0x0800) >> 11;
11248 bfd_vma lower
= (lower_insn
& 0x07ff);
11252 upper
|= (!S
) << 8;
11253 upper
-= 0x0100; /* Sign extend. */
11255 addend
= (upper
<< 12) | (lower
<< 1);
11256 signed_addend
= addend
;
11259 /* Handle calls via the PLT. */
11260 if (plt_offset
!= (bfd_vma
) -1)
11262 value
= (splt
->output_section
->vma
11263 + splt
->output_offset
11265 /* Target the Thumb stub before the ARM PLT entry. */
11266 value
-= PLT_THUMB_STUB_SIZE
;
11267 *unresolved_reloc_p
= FALSE
;
11270 hash
= (struct elf32_arm_link_hash_entry
*)h
;
11272 stub_type
= arm_type_of_stub (info
, input_section
, rel
,
11273 st_type
, &branch_type
,
11274 hash
, value
, sym_sec
,
11275 input_bfd
, sym_name
);
11276 if (stub_type
!= arm_stub_none
)
11278 stub_entry
= elf32_arm_get_stub_entry (input_section
,
11282 if (stub_entry
!= NULL
)
11284 value
= (stub_entry
->stub_offset
11285 + stub_entry
->stub_sec
->output_offset
11286 + stub_entry
->stub_sec
->output_section
->vma
);
11290 relocation
= value
+ signed_addend
;
11291 relocation
-= (input_section
->output_section
->vma
11292 + input_section
->output_offset
11294 signed_check
= (bfd_signed_vma
) relocation
;
11296 if (signed_check
> reloc_signed_max
|| signed_check
< reloc_signed_min
)
11299 /* Put RELOCATION back into the insn. */
11301 bfd_vma S
= (relocation
& 0x00100000) >> 20;
11302 bfd_vma J2
= (relocation
& 0x00080000) >> 19;
11303 bfd_vma J1
= (relocation
& 0x00040000) >> 18;
11304 bfd_vma hi
= (relocation
& 0x0003f000) >> 12;
11305 bfd_vma lo
= (relocation
& 0x00000ffe) >> 1;
11307 upper_insn
= (upper_insn
& 0xfbc0) | (S
<< 10) | hi
;
11308 lower_insn
= (lower_insn
& 0xd000) | (J1
<< 13) | (J2
<< 11) | lo
;
11311 /* Put the relocated value back in the object file: */
11312 bfd_put_16 (input_bfd
, upper_insn
, hit_data
);
11313 bfd_put_16 (input_bfd
, lower_insn
, hit_data
+ 2);
11315 return (overflow
? bfd_reloc_overflow
: bfd_reloc_ok
);
11318 case R_ARM_THM_JUMP11
:
11319 case R_ARM_THM_JUMP8
:
11320 case R_ARM_THM_JUMP6
:
11321 /* Thumb B (branch) instruction). */
11323 bfd_signed_vma relocation
;
11324 bfd_signed_vma reloc_signed_max
= (1 << (howto
->bitsize
- 1)) - 1;
11325 bfd_signed_vma reloc_signed_min
= ~ reloc_signed_max
;
11326 bfd_signed_vma signed_check
;
11328 /* CZB cannot jump backward. */
11329 if (r_type
== R_ARM_THM_JUMP6
)
11330 reloc_signed_min
= 0;
11332 if (globals
->use_rel
)
11334 /* Need to refetch addend. */
11335 addend
= bfd_get_16 (input_bfd
, hit_data
) & howto
->src_mask
;
11336 if (addend
& ((howto
->src_mask
+ 1) >> 1))
11338 signed_addend
= -1;
11339 signed_addend
&= ~ howto
->src_mask
;
11340 signed_addend
|= addend
;
11343 signed_addend
= addend
;
11344 /* The value in the insn has been right shifted. We need to
11345 undo this, so that we can perform the address calculation
11346 in terms of bytes. */
11347 signed_addend
<<= howto
->rightshift
;
11349 relocation
= value
+ signed_addend
;
11351 relocation
-= (input_section
->output_section
->vma
11352 + input_section
->output_offset
11355 relocation
>>= howto
->rightshift
;
11356 signed_check
= relocation
;
11358 if (r_type
== R_ARM_THM_JUMP6
)
11359 relocation
= ((relocation
& 0x0020) << 4) | ((relocation
& 0x001f) << 3);
11361 relocation
&= howto
->dst_mask
;
11362 relocation
|= (bfd_get_16 (input_bfd
, hit_data
) & (~ howto
->dst_mask
));
11364 bfd_put_16 (input_bfd
, relocation
, hit_data
);
11366 /* Assumes two's complement. */
11367 if (signed_check
> reloc_signed_max
|| signed_check
< reloc_signed_min
)
11368 return bfd_reloc_overflow
;
11370 return bfd_reloc_ok
;
11373 case R_ARM_ALU_PCREL7_0
:
11374 case R_ARM_ALU_PCREL15_8
:
11375 case R_ARM_ALU_PCREL23_15
:
11378 bfd_vma relocation
;
11380 insn
= bfd_get_32 (input_bfd
, hit_data
);
11381 if (globals
->use_rel
)
11383 /* Extract the addend. */
11384 addend
= (insn
& 0xff) << ((insn
& 0xf00) >> 7);
11385 signed_addend
= addend
;
11387 relocation
= value
+ signed_addend
;
11389 relocation
-= (input_section
->output_section
->vma
11390 + input_section
->output_offset
11392 insn
= (insn
& ~0xfff)
11393 | ((howto
->bitpos
<< 7) & 0xf00)
11394 | ((relocation
>> howto
->bitpos
) & 0xff);
11395 bfd_put_32 (input_bfd
, value
, hit_data
);
11397 return bfd_reloc_ok
;
11399 case R_ARM_GNU_VTINHERIT
:
11400 case R_ARM_GNU_VTENTRY
:
11401 return bfd_reloc_ok
;
11403 case R_ARM_GOTOFF32
:
11404 /* Relocation is relative to the start of the
11405 global offset table. */
11407 BFD_ASSERT (sgot
!= NULL
);
11409 return bfd_reloc_notsupported
;
11411 /* If we are addressing a Thumb function, we need to adjust the
11412 address by one, so that attempts to call the function pointer will
11413 correctly interpret it as Thumb code. */
11414 if (branch_type
== ST_BRANCH_TO_THUMB
)
11417 /* Note that sgot->output_offset is not involved in this
11418 calculation. We always want the start of .got. If we
11419 define _GLOBAL_OFFSET_TABLE in a different way, as is
11420 permitted by the ABI, we might have to change this
11422 value
-= sgot
->output_section
->vma
;
11423 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
11424 contents
, rel
->r_offset
, value
,
11428 /* Use global offset table as symbol value. */
11429 BFD_ASSERT (sgot
!= NULL
);
11432 return bfd_reloc_notsupported
;
11434 *unresolved_reloc_p
= FALSE
;
11435 value
= sgot
->output_section
->vma
;
11436 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
11437 contents
, rel
->r_offset
, value
,
11441 case R_ARM_GOT_PREL
:
11442 /* Relocation is to the entry for this symbol in the
11443 global offset table. */
11445 return bfd_reloc_notsupported
;
11447 if (dynreloc_st_type
== STT_GNU_IFUNC
11448 && plt_offset
!= (bfd_vma
) -1
11449 && (h
== NULL
|| SYMBOL_REFERENCES_LOCAL (info
, h
)))
11451 /* We have a relocation against a locally-binding STT_GNU_IFUNC
11452 symbol, and the relocation resolves directly to the runtime
11453 target rather than to the .iplt entry. This means that any
11454 .got entry would be the same value as the .igot.plt entry,
11455 so there's no point creating both. */
11456 sgot
= globals
->root
.igotplt
;
11457 value
= sgot
->output_offset
+ gotplt_offset
;
11459 else if (h
!= NULL
)
11463 off
= h
->got
.offset
;
11464 BFD_ASSERT (off
!= (bfd_vma
) -1);
11465 if ((off
& 1) != 0)
11467 /* We have already processsed one GOT relocation against
11470 if (globals
->root
.dynamic_sections_created
11471 && !SYMBOL_REFERENCES_LOCAL (info
, h
))
11472 *unresolved_reloc_p
= FALSE
;
11476 Elf_Internal_Rela outrel
;
11479 if (((h
->dynindx
!= -1) || globals
->fdpic_p
)
11480 && !SYMBOL_REFERENCES_LOCAL (info
, h
))
11482 /* If the symbol doesn't resolve locally in a static
11483 object, we have an undefined reference. If the
11484 symbol doesn't resolve locally in a dynamic object,
11485 it should be resolved by the dynamic linker. */
11486 if (globals
->root
.dynamic_sections_created
)
11488 outrel
.r_info
= ELF32_R_INFO (h
->dynindx
, R_ARM_GLOB_DAT
);
11489 *unresolved_reloc_p
= FALSE
;
11493 outrel
.r_addend
= 0;
11497 if (dynreloc_st_type
== STT_GNU_IFUNC
)
11498 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_IRELATIVE
);
11499 else if (bfd_link_pic (info
)
11500 && (ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
11501 || h
->root
.type
!= bfd_link_hash_undefweak
))
11502 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_RELATIVE
);
11503 else if (globals
->fdpic_p
)
11507 outrel
.r_addend
= dynreloc_value
;
11510 /* The GOT entry is initialized to zero by default.
11511 See if we should install a different value. */
11512 if (outrel
.r_addend
!= 0
11513 && (outrel
.r_info
== 0 || globals
->use_rel
|| isrofixup
))
11515 bfd_put_32 (output_bfd
, outrel
.r_addend
,
11516 sgot
->contents
+ off
);
11517 outrel
.r_addend
= 0;
11520 if (outrel
.r_info
!= 0 && !isrofixup
)
11522 outrel
.r_offset
= (sgot
->output_section
->vma
11523 + sgot
->output_offset
11525 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
11527 else if (isrofixup
)
11529 arm_elf_add_rofixup(output_bfd
,
11530 elf32_arm_hash_table(info
)->srofixup
,
11531 sgot
->output_section
->vma
11532 + sgot
->output_offset
+ off
);
11534 h
->got
.offset
|= 1;
11536 value
= sgot
->output_offset
+ off
;
11542 BFD_ASSERT (local_got_offsets
!= NULL
11543 && local_got_offsets
[r_symndx
] != (bfd_vma
) -1);
11545 off
= local_got_offsets
[r_symndx
];
11547 /* The offset must always be a multiple of 4. We use the
11548 least significant bit to record whether we have already
11549 generated the necessary reloc. */
11550 if ((off
& 1) != 0)
11554 if (globals
->use_rel
)
11555 bfd_put_32 (output_bfd
, dynreloc_value
, sgot
->contents
+ off
);
11557 if (bfd_link_pic (info
) || dynreloc_st_type
== STT_GNU_IFUNC
)
11559 Elf_Internal_Rela outrel
;
11561 outrel
.r_addend
= addend
+ dynreloc_value
;
11562 outrel
.r_offset
= (sgot
->output_section
->vma
11563 + sgot
->output_offset
11565 if (dynreloc_st_type
== STT_GNU_IFUNC
)
11566 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_IRELATIVE
);
11568 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_RELATIVE
);
11569 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
11571 else if (globals
->fdpic_p
)
11573 /* For FDPIC executables, we use rofixup to fix
11574 address at runtime. */
11575 arm_elf_add_rofixup(output_bfd
, globals
->srofixup
,
11576 sgot
->output_section
->vma
+ sgot
->output_offset
11580 local_got_offsets
[r_symndx
] |= 1;
11583 value
= sgot
->output_offset
+ off
;
11585 if (r_type
!= R_ARM_GOT32
)
11586 value
+= sgot
->output_section
->vma
;
11588 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
11589 contents
, rel
->r_offset
, value
,
11592 case R_ARM_TLS_LDO32
:
11593 value
= value
- dtpoff_base (info
);
11595 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
11596 contents
, rel
->r_offset
, value
,
11599 case R_ARM_TLS_LDM32
:
11600 case R_ARM_TLS_LDM32_FDPIC
:
11607 off
= globals
->tls_ldm_got
.offset
;
11609 if ((off
& 1) != 0)
11613 /* If we don't know the module number, create a relocation
11615 if (bfd_link_pic (info
))
11617 Elf_Internal_Rela outrel
;
11619 if (srelgot
== NULL
)
11622 outrel
.r_addend
= 0;
11623 outrel
.r_offset
= (sgot
->output_section
->vma
11624 + sgot
->output_offset
+ off
);
11625 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32
);
11627 if (globals
->use_rel
)
11628 bfd_put_32 (output_bfd
, outrel
.r_addend
,
11629 sgot
->contents
+ off
);
11631 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
11634 bfd_put_32 (output_bfd
, 1, sgot
->contents
+ off
);
11636 globals
->tls_ldm_got
.offset
|= 1;
11639 if (r_type
== R_ARM_TLS_LDM32_FDPIC
)
11641 bfd_put_32(output_bfd
,
11642 globals
->root
.sgot
->output_offset
+ off
,
11643 contents
+ rel
->r_offset
);
11645 return bfd_reloc_ok
;
11649 value
= sgot
->output_section
->vma
+ sgot
->output_offset
+ off
11650 - (input_section
->output_section
->vma
11651 + input_section
->output_offset
+ rel
->r_offset
);
11653 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
11654 contents
, rel
->r_offset
, value
,
11659 case R_ARM_TLS_CALL
:
11660 case R_ARM_THM_TLS_CALL
:
11661 case R_ARM_TLS_GD32
:
11662 case R_ARM_TLS_GD32_FDPIC
:
11663 case R_ARM_TLS_IE32
:
11664 case R_ARM_TLS_IE32_FDPIC
:
11665 case R_ARM_TLS_GOTDESC
:
11666 case R_ARM_TLS_DESCSEQ
:
11667 case R_ARM_THM_TLS_DESCSEQ
:
11669 bfd_vma off
, offplt
;
11673 BFD_ASSERT (sgot
!= NULL
);
11678 dyn
= globals
->root
.dynamic_sections_created
;
11679 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn
,
11680 bfd_link_pic (info
),
11682 && (!bfd_link_pic (info
)
11683 || !SYMBOL_REFERENCES_LOCAL (info
, h
)))
11685 *unresolved_reloc_p
= FALSE
;
11688 off
= h
->got
.offset
;
11689 offplt
= elf32_arm_hash_entry (h
)->tlsdesc_got
;
11690 tls_type
= ((struct elf32_arm_link_hash_entry
*) h
)->tls_type
;
11694 BFD_ASSERT (local_got_offsets
!= NULL
);
11695 off
= local_got_offsets
[r_symndx
];
11696 offplt
= local_tlsdesc_gotents
[r_symndx
];
11697 tls_type
= elf32_arm_local_got_tls_type (input_bfd
)[r_symndx
];
11700 /* Linker relaxations happens from one of the
11701 R_ARM_{GOTDESC,CALL,DESCSEQ} relocations to IE or LE. */
11702 if (ELF32_R_TYPE(rel
->r_info
) != r_type
)
11703 tls_type
= GOT_TLS_IE
;
11705 BFD_ASSERT (tls_type
!= GOT_UNKNOWN
);
11707 if ((off
& 1) != 0)
11711 bfd_boolean need_relocs
= FALSE
;
11712 Elf_Internal_Rela outrel
;
11715 /* The GOT entries have not been initialized yet. Do it
11716 now, and emit any relocations. If both an IE GOT and a
11717 GD GOT are necessary, we emit the GD first. */
11719 if ((bfd_link_pic (info
) || indx
!= 0)
11721 || (ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
11722 && !resolved_to_zero
)
11723 || h
->root
.type
!= bfd_link_hash_undefweak
))
11725 need_relocs
= TRUE
;
11726 BFD_ASSERT (srelgot
!= NULL
);
11729 if (tls_type
& GOT_TLS_GDESC
)
11733 /* We should have relaxed, unless this is an undefined
11735 BFD_ASSERT ((h
&& (h
->root
.type
== bfd_link_hash_undefweak
))
11736 || bfd_link_pic (info
));
11737 BFD_ASSERT (globals
->sgotplt_jump_table_size
+ offplt
+ 8
11738 <= globals
->root
.sgotplt
->size
);
11740 outrel
.r_addend
= 0;
11741 outrel
.r_offset
= (globals
->root
.sgotplt
->output_section
->vma
11742 + globals
->root
.sgotplt
->output_offset
11744 + globals
->sgotplt_jump_table_size
);
11746 outrel
.r_info
= ELF32_R_INFO (indx
, R_ARM_TLS_DESC
);
11747 sreloc
= globals
->root
.srelplt
;
11748 loc
= sreloc
->contents
;
11749 loc
+= globals
->next_tls_desc_index
++ * RELOC_SIZE (globals
);
11750 BFD_ASSERT (loc
+ RELOC_SIZE (globals
)
11751 <= sreloc
->contents
+ sreloc
->size
);
11753 SWAP_RELOC_OUT (globals
) (output_bfd
, &outrel
, loc
);
11755 /* For globals, the first word in the relocation gets
11756 the relocation index and the top bit set, or zero,
11757 if we're binding now. For locals, it gets the
11758 symbol's offset in the tls section. */
11759 bfd_put_32 (output_bfd
,
11760 !h
? value
- elf_hash_table (info
)->tls_sec
->vma
11761 : info
->flags
& DF_BIND_NOW
? 0
11762 : 0x80000000 | ELF32_R_SYM (outrel
.r_info
),
11763 globals
->root
.sgotplt
->contents
+ offplt
11764 + globals
->sgotplt_jump_table_size
);
11766 /* Second word in the relocation is always zero. */
11767 bfd_put_32 (output_bfd
, 0,
11768 globals
->root
.sgotplt
->contents
+ offplt
11769 + globals
->sgotplt_jump_table_size
+ 4);
11771 if (tls_type
& GOT_TLS_GD
)
11775 outrel
.r_addend
= 0;
11776 outrel
.r_offset
= (sgot
->output_section
->vma
11777 + sgot
->output_offset
11779 outrel
.r_info
= ELF32_R_INFO (indx
, R_ARM_TLS_DTPMOD32
);
11781 if (globals
->use_rel
)
11782 bfd_put_32 (output_bfd
, outrel
.r_addend
,
11783 sgot
->contents
+ cur_off
);
11785 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
11788 bfd_put_32 (output_bfd
, value
- dtpoff_base (info
),
11789 sgot
->contents
+ cur_off
+ 4);
11792 outrel
.r_addend
= 0;
11793 outrel
.r_info
= ELF32_R_INFO (indx
,
11794 R_ARM_TLS_DTPOFF32
);
11795 outrel
.r_offset
+= 4;
11797 if (globals
->use_rel
)
11798 bfd_put_32 (output_bfd
, outrel
.r_addend
,
11799 sgot
->contents
+ cur_off
+ 4);
11801 elf32_arm_add_dynreloc (output_bfd
, info
,
11807 /* If we are not emitting relocations for a
11808 general dynamic reference, then we must be in a
11809 static link or an executable link with the
11810 symbol binding locally. Mark it as belonging
11811 to module 1, the executable. */
11812 bfd_put_32 (output_bfd
, 1,
11813 sgot
->contents
+ cur_off
);
11814 bfd_put_32 (output_bfd
, value
- dtpoff_base (info
),
11815 sgot
->contents
+ cur_off
+ 4);
11821 if (tls_type
& GOT_TLS_IE
)
11826 outrel
.r_addend
= value
- dtpoff_base (info
);
11828 outrel
.r_addend
= 0;
11829 outrel
.r_offset
= (sgot
->output_section
->vma
11830 + sgot
->output_offset
11832 outrel
.r_info
= ELF32_R_INFO (indx
, R_ARM_TLS_TPOFF32
);
11834 if (globals
->use_rel
)
11835 bfd_put_32 (output_bfd
, outrel
.r_addend
,
11836 sgot
->contents
+ cur_off
);
11838 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
11841 bfd_put_32 (output_bfd
, tpoff (info
, value
),
11842 sgot
->contents
+ cur_off
);
11847 h
->got
.offset
|= 1;
11849 local_got_offsets
[r_symndx
] |= 1;
11852 if ((tls_type
& GOT_TLS_GD
) && r_type
!= R_ARM_TLS_GD32
&& r_type
!= R_ARM_TLS_GD32_FDPIC
)
11854 else if (tls_type
& GOT_TLS_GDESC
)
11857 if (ELF32_R_TYPE(rel
->r_info
) == R_ARM_TLS_CALL
11858 || ELF32_R_TYPE(rel
->r_info
) == R_ARM_THM_TLS_CALL
)
11860 bfd_signed_vma offset
;
11861 /* TLS stubs are arm mode. The original symbol is a
11862 data object, so branch_type is bogus. */
11863 branch_type
= ST_BRANCH_TO_ARM
;
11864 enum elf32_arm_stub_type stub_type
11865 = arm_type_of_stub (info
, input_section
, rel
,
11866 st_type
, &branch_type
,
11867 (struct elf32_arm_link_hash_entry
*)h
,
11868 globals
->tls_trampoline
, globals
->root
.splt
,
11869 input_bfd
, sym_name
);
11871 if (stub_type
!= arm_stub_none
)
11873 struct elf32_arm_stub_hash_entry
*stub_entry
11874 = elf32_arm_get_stub_entry
11875 (input_section
, globals
->root
.splt
, 0, rel
,
11876 globals
, stub_type
);
11877 offset
= (stub_entry
->stub_offset
11878 + stub_entry
->stub_sec
->output_offset
11879 + stub_entry
->stub_sec
->output_section
->vma
);
11882 offset
= (globals
->root
.splt
->output_section
->vma
11883 + globals
->root
.splt
->output_offset
11884 + globals
->tls_trampoline
);
11886 if (ELF32_R_TYPE(rel
->r_info
) == R_ARM_TLS_CALL
)
11888 unsigned long inst
;
11890 offset
-= (input_section
->output_section
->vma
11891 + input_section
->output_offset
11892 + rel
->r_offset
+ 8);
11894 inst
= offset
>> 2;
11895 inst
&= 0x00ffffff;
11896 value
= inst
| (globals
->use_blx
? 0xfa000000 : 0xeb000000);
11900 /* Thumb blx encodes the offset in a complicated
11902 unsigned upper_insn
, lower_insn
;
11905 offset
-= (input_section
->output_section
->vma
11906 + input_section
->output_offset
11907 + rel
->r_offset
+ 4);
11909 if (stub_type
!= arm_stub_none
11910 && arm_stub_is_thumb (stub_type
))
11912 lower_insn
= 0xd000;
11916 lower_insn
= 0xc000;
11917 /* Round up the offset to a word boundary. */
11918 offset
= (offset
+ 2) & ~2;
11922 upper_insn
= (0xf000
11923 | ((offset
>> 12) & 0x3ff)
11925 lower_insn
|= (((!((offset
>> 23) & 1)) ^ neg
) << 13)
11926 | (((!((offset
>> 22) & 1)) ^ neg
) << 11)
11927 | ((offset
>> 1) & 0x7ff);
11928 bfd_put_16 (input_bfd
, upper_insn
, hit_data
);
11929 bfd_put_16 (input_bfd
, lower_insn
, hit_data
+ 2);
11930 return bfd_reloc_ok
;
11933 /* These relocations needs special care, as besides the fact
11934 they point somewhere in .gotplt, the addend must be
11935 adjusted accordingly depending on the type of instruction
11937 else if ((r_type
== R_ARM_TLS_GOTDESC
) && (tls_type
& GOT_TLS_GDESC
))
11939 unsigned long data
, insn
;
11942 data
= bfd_get_32 (input_bfd
, hit_data
);
11948 insn
= bfd_get_16 (input_bfd
, contents
+ rel
->r_offset
- data
);
11949 if ((insn
& 0xf000) == 0xf000 || (insn
& 0xf800) == 0xe800)
11950 insn
= (insn
<< 16)
11951 | bfd_get_16 (input_bfd
,
11952 contents
+ rel
->r_offset
- data
+ 2);
11953 if ((insn
& 0xf800c000) == 0xf000c000)
11956 else if ((insn
& 0xffffff00) == 0x4400)
11962 /* xgettext:c-format */
11963 (_("%pB(%pA+%#" PRIx64
"): "
11964 "unexpected %s instruction '%#lx' "
11965 "referenced by TLS_GOTDESC"),
11966 input_bfd
, input_section
, (uint64_t) rel
->r_offset
,
11968 return bfd_reloc_notsupported
;
11973 insn
= bfd_get_32 (input_bfd
, contents
+ rel
->r_offset
- data
);
11975 switch (insn
>> 24)
11977 case 0xeb: /* bl */
11978 case 0xfa: /* blx */
11982 case 0xe0: /* add */
11988 /* xgettext:c-format */
11989 (_("%pB(%pA+%#" PRIx64
"): "
11990 "unexpected %s instruction '%#lx' "
11991 "referenced by TLS_GOTDESC"),
11992 input_bfd
, input_section
, (uint64_t) rel
->r_offset
,
11994 return bfd_reloc_notsupported
;
11998 value
+= ((globals
->root
.sgotplt
->output_section
->vma
11999 + globals
->root
.sgotplt
->output_offset
+ off
)
12000 - (input_section
->output_section
->vma
12001 + input_section
->output_offset
12003 + globals
->sgotplt_jump_table_size
);
12006 value
= ((globals
->root
.sgot
->output_section
->vma
12007 + globals
->root
.sgot
->output_offset
+ off
)
12008 - (input_section
->output_section
->vma
12009 + input_section
->output_offset
+ rel
->r_offset
));
12011 if (globals
->fdpic_p
&& (r_type
== R_ARM_TLS_GD32_FDPIC
||
12012 r_type
== R_ARM_TLS_IE32_FDPIC
))
12014 /* For FDPIC relocations, resolve to the offset of the GOT
12015 entry from the start of GOT. */
12016 bfd_put_32(output_bfd
,
12017 globals
->root
.sgot
->output_offset
+ off
,
12018 contents
+ rel
->r_offset
);
12020 return bfd_reloc_ok
;
12024 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
12025 contents
, rel
->r_offset
, value
,
12030 case R_ARM_TLS_LE32
:
12031 if (bfd_link_dll (info
))
12034 /* xgettext:c-format */
12035 (_("%pB(%pA+%#" PRIx64
"): %s relocation not permitted "
12036 "in shared object"),
12037 input_bfd
, input_section
, (uint64_t) rel
->r_offset
, howto
->name
);
12038 return bfd_reloc_notsupported
;
12041 value
= tpoff (info
, value
);
12043 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
12044 contents
, rel
->r_offset
, value
,
12048 if (globals
->fix_v4bx
)
12050 bfd_vma insn
= bfd_get_32 (input_bfd
, hit_data
);
12052 /* Ensure that we have a BX instruction. */
12053 BFD_ASSERT ((insn
& 0x0ffffff0) == 0x012fff10);
12055 if (globals
->fix_v4bx
== 2 && (insn
& 0xf) != 0xf)
12057 /* Branch to veneer. */
12059 glue_addr
= elf32_arm_bx_glue (info
, insn
& 0xf);
12060 glue_addr
-= input_section
->output_section
->vma
12061 + input_section
->output_offset
12062 + rel
->r_offset
+ 8;
12063 insn
= (insn
& 0xf0000000) | 0x0a000000
12064 | ((glue_addr
>> 2) & 0x00ffffff);
12068 /* Preserve Rm (lowest four bits) and the condition code
12069 (highest four bits). Other bits encode MOV PC,Rm. */
12070 insn
= (insn
& 0xf000000f) | 0x01a0f000;
12073 bfd_put_32 (input_bfd
, insn
, hit_data
);
12075 return bfd_reloc_ok
;
12077 case R_ARM_MOVW_ABS_NC
:
12078 case R_ARM_MOVT_ABS
:
12079 case R_ARM_MOVW_PREL_NC
:
12080 case R_ARM_MOVT_PREL
:
12081 /* Until we properly support segment-base-relative addressing then
12082 we assume the segment base to be zero, as for the group relocations.
12083 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
12084 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
12085 case R_ARM_MOVW_BREL_NC
:
12086 case R_ARM_MOVW_BREL
:
12087 case R_ARM_MOVT_BREL
:
12089 bfd_vma insn
= bfd_get_32 (input_bfd
, hit_data
);
12091 if (globals
->use_rel
)
12093 addend
= ((insn
>> 4) & 0xf000) | (insn
& 0xfff);
12094 signed_addend
= (addend
^ 0x8000) - 0x8000;
12097 value
+= signed_addend
;
12099 if (r_type
== R_ARM_MOVW_PREL_NC
|| r_type
== R_ARM_MOVT_PREL
)
12100 value
-= (input_section
->output_section
->vma
12101 + input_section
->output_offset
+ rel
->r_offset
);
12103 if (r_type
== R_ARM_MOVW_BREL
&& value
>= 0x10000)
12104 return bfd_reloc_overflow
;
12106 if (branch_type
== ST_BRANCH_TO_THUMB
)
12109 if (r_type
== R_ARM_MOVT_ABS
|| r_type
== R_ARM_MOVT_PREL
12110 || r_type
== R_ARM_MOVT_BREL
)
12113 insn
&= 0xfff0f000;
12114 insn
|= value
& 0xfff;
12115 insn
|= (value
& 0xf000) << 4;
12116 bfd_put_32 (input_bfd
, insn
, hit_data
);
12118 return bfd_reloc_ok
;
12120 case R_ARM_THM_MOVW_ABS_NC
:
12121 case R_ARM_THM_MOVT_ABS
:
12122 case R_ARM_THM_MOVW_PREL_NC
:
12123 case R_ARM_THM_MOVT_PREL
:
12124 /* Until we properly support segment-base-relative addressing then
12125 we assume the segment base to be zero, as for the above relocations.
12126 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
12127 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
12128 as R_ARM_THM_MOVT_ABS. */
12129 case R_ARM_THM_MOVW_BREL_NC
:
12130 case R_ARM_THM_MOVW_BREL
:
12131 case R_ARM_THM_MOVT_BREL
:
12135 insn
= bfd_get_16 (input_bfd
, hit_data
) << 16;
12136 insn
|= bfd_get_16 (input_bfd
, hit_data
+ 2);
12138 if (globals
->use_rel
)
12140 addend
= ((insn
>> 4) & 0xf000)
12141 | ((insn
>> 15) & 0x0800)
12142 | ((insn
>> 4) & 0x0700)
12144 signed_addend
= (addend
^ 0x8000) - 0x8000;
12147 value
+= signed_addend
;
12149 if (r_type
== R_ARM_THM_MOVW_PREL_NC
|| r_type
== R_ARM_THM_MOVT_PREL
)
12150 value
-= (input_section
->output_section
->vma
12151 + input_section
->output_offset
+ rel
->r_offset
);
12153 if (r_type
== R_ARM_THM_MOVW_BREL
&& value
>= 0x10000)
12154 return bfd_reloc_overflow
;
12156 if (branch_type
== ST_BRANCH_TO_THUMB
)
12159 if (r_type
== R_ARM_THM_MOVT_ABS
|| r_type
== R_ARM_THM_MOVT_PREL
12160 || r_type
== R_ARM_THM_MOVT_BREL
)
12163 insn
&= 0xfbf08f00;
12164 insn
|= (value
& 0xf000) << 4;
12165 insn
|= (value
& 0x0800) << 15;
12166 insn
|= (value
& 0x0700) << 4;
12167 insn
|= (value
& 0x00ff);
12169 bfd_put_16 (input_bfd
, insn
>> 16, hit_data
);
12170 bfd_put_16 (input_bfd
, insn
& 0xffff, hit_data
+ 2);
12172 return bfd_reloc_ok
;
12174 case R_ARM_ALU_PC_G0_NC
:
12175 case R_ARM_ALU_PC_G1_NC
:
12176 case R_ARM_ALU_PC_G0
:
12177 case R_ARM_ALU_PC_G1
:
12178 case R_ARM_ALU_PC_G2
:
12179 case R_ARM_ALU_SB_G0_NC
:
12180 case R_ARM_ALU_SB_G1_NC
:
12181 case R_ARM_ALU_SB_G0
:
12182 case R_ARM_ALU_SB_G1
:
12183 case R_ARM_ALU_SB_G2
:
12185 bfd_vma insn
= bfd_get_32 (input_bfd
, hit_data
);
12186 bfd_vma pc
= input_section
->output_section
->vma
12187 + input_section
->output_offset
+ rel
->r_offset
;
12188 /* sb is the origin of the *segment* containing the symbol. */
12189 bfd_vma sb
= sym_sec
? sym_sec
->output_section
->vma
: 0;
12192 bfd_signed_vma signed_value
;
12195 /* Determine which group of bits to select. */
12198 case R_ARM_ALU_PC_G0_NC
:
12199 case R_ARM_ALU_PC_G0
:
12200 case R_ARM_ALU_SB_G0_NC
:
12201 case R_ARM_ALU_SB_G0
:
12205 case R_ARM_ALU_PC_G1_NC
:
12206 case R_ARM_ALU_PC_G1
:
12207 case R_ARM_ALU_SB_G1_NC
:
12208 case R_ARM_ALU_SB_G1
:
12212 case R_ARM_ALU_PC_G2
:
12213 case R_ARM_ALU_SB_G2
:
12221 /* If REL, extract the addend from the insn. If RELA, it will
12222 have already been fetched for us. */
12223 if (globals
->use_rel
)
12226 bfd_vma constant
= insn
& 0xff;
12227 bfd_vma rotation
= (insn
& 0xf00) >> 8;
12230 signed_addend
= constant
;
12233 /* Compensate for the fact that in the instruction, the
12234 rotation is stored in multiples of 2 bits. */
12237 /* Rotate "constant" right by "rotation" bits. */
12238 signed_addend
= (constant
>> rotation
) |
12239 (constant
<< (8 * sizeof (bfd_vma
) - rotation
));
12242 /* Determine if the instruction is an ADD or a SUB.
12243 (For REL, this determines the sign of the addend.) */
12244 negative
= identify_add_or_sub (insn
);
12248 /* xgettext:c-format */
12249 (_("%pB(%pA+%#" PRIx64
"): only ADD or SUB instructions "
12250 "are allowed for ALU group relocations"),
12251 input_bfd
, input_section
, (uint64_t) rel
->r_offset
);
12252 return bfd_reloc_overflow
;
12255 signed_addend
*= negative
;
12258 /* Compute the value (X) to go in the place. */
12259 if (r_type
== R_ARM_ALU_PC_G0_NC
12260 || r_type
== R_ARM_ALU_PC_G1_NC
12261 || r_type
== R_ARM_ALU_PC_G0
12262 || r_type
== R_ARM_ALU_PC_G1
12263 || r_type
== R_ARM_ALU_PC_G2
)
12265 signed_value
= value
- pc
+ signed_addend
;
12267 /* Section base relative. */
12268 signed_value
= value
- sb
+ signed_addend
;
12270 /* If the target symbol is a Thumb function, then set the
12271 Thumb bit in the address. */
12272 if (branch_type
== ST_BRANCH_TO_THUMB
)
12275 /* Calculate the value of the relevant G_n, in encoded
12276 constant-with-rotation format. */
12277 g_n
= calculate_group_reloc_mask (signed_value
< 0 ? - signed_value
: signed_value
,
12280 /* Check for overflow if required. */
12281 if ((r_type
== R_ARM_ALU_PC_G0
12282 || r_type
== R_ARM_ALU_PC_G1
12283 || r_type
== R_ARM_ALU_PC_G2
12284 || r_type
== R_ARM_ALU_SB_G0
12285 || r_type
== R_ARM_ALU_SB_G1
12286 || r_type
== R_ARM_ALU_SB_G2
) && residual
!= 0)
12289 /* xgettext:c-format */
12290 (_("%pB(%pA+%#" PRIx64
"): overflow whilst "
12291 "splitting %#" PRIx64
" for group relocation %s"),
12292 input_bfd
, input_section
, (uint64_t) rel
->r_offset
,
12293 (uint64_t) (signed_value
< 0 ? -signed_value
: signed_value
),
12295 return bfd_reloc_overflow
;
12298 /* Mask out the value and the ADD/SUB part of the opcode; take care
12299 not to destroy the S bit. */
12300 insn
&= 0xff1ff000;
12302 /* Set the opcode according to whether the value to go in the
12303 place is negative. */
12304 if (signed_value
< 0)
12309 /* Encode the offset. */
12312 bfd_put_32 (input_bfd
, insn
, hit_data
);
12314 return bfd_reloc_ok
;
12316 case R_ARM_LDR_PC_G0
:
12317 case R_ARM_LDR_PC_G1
:
12318 case R_ARM_LDR_PC_G2
:
12319 case R_ARM_LDR_SB_G0
:
12320 case R_ARM_LDR_SB_G1
:
12321 case R_ARM_LDR_SB_G2
:
12323 bfd_vma insn
= bfd_get_32 (input_bfd
, hit_data
);
12324 bfd_vma pc
= input_section
->output_section
->vma
12325 + input_section
->output_offset
+ rel
->r_offset
;
12326 /* sb is the origin of the *segment* containing the symbol. */
12327 bfd_vma sb
= sym_sec
? sym_sec
->output_section
->vma
: 0;
12329 bfd_signed_vma signed_value
;
12332 /* Determine which groups of bits to calculate. */
12335 case R_ARM_LDR_PC_G0
:
12336 case R_ARM_LDR_SB_G0
:
12340 case R_ARM_LDR_PC_G1
:
12341 case R_ARM_LDR_SB_G1
:
12345 case R_ARM_LDR_PC_G2
:
12346 case R_ARM_LDR_SB_G2
:
12354 /* If REL, extract the addend from the insn. If RELA, it will
12355 have already been fetched for us. */
12356 if (globals
->use_rel
)
12358 int negative
= (insn
& (1 << 23)) ? 1 : -1;
12359 signed_addend
= negative
* (insn
& 0xfff);
12362 /* Compute the value (X) to go in the place. */
12363 if (r_type
== R_ARM_LDR_PC_G0
12364 || r_type
== R_ARM_LDR_PC_G1
12365 || r_type
== R_ARM_LDR_PC_G2
)
12367 signed_value
= value
- pc
+ signed_addend
;
12369 /* Section base relative. */
12370 signed_value
= value
- sb
+ signed_addend
;
12372 /* Calculate the value of the relevant G_{n-1} to obtain
12373 the residual at that stage. */
12374 calculate_group_reloc_mask (signed_value
< 0 ? - signed_value
: signed_value
,
12375 group
- 1, &residual
);
12377 /* Check for overflow. */
12378 if (residual
>= 0x1000)
12381 /* xgettext:c-format */
12382 (_("%pB(%pA+%#" PRIx64
"): overflow whilst "
12383 "splitting %#" PRIx64
" for group relocation %s"),
12384 input_bfd
, input_section
, (uint64_t) rel
->r_offset
,
12385 (uint64_t) (signed_value
< 0 ? -signed_value
: signed_value
),
12387 return bfd_reloc_overflow
;
12390 /* Mask out the value and U bit. */
12391 insn
&= 0xff7ff000;
12393 /* Set the U bit if the value to go in the place is non-negative. */
12394 if (signed_value
>= 0)
12397 /* Encode the offset. */
12400 bfd_put_32 (input_bfd
, insn
, hit_data
);
12402 return bfd_reloc_ok
;
12404 case R_ARM_LDRS_PC_G0
:
12405 case R_ARM_LDRS_PC_G1
:
12406 case R_ARM_LDRS_PC_G2
:
12407 case R_ARM_LDRS_SB_G0
:
12408 case R_ARM_LDRS_SB_G1
:
12409 case R_ARM_LDRS_SB_G2
:
12411 bfd_vma insn
= bfd_get_32 (input_bfd
, hit_data
);
12412 bfd_vma pc
= input_section
->output_section
->vma
12413 + input_section
->output_offset
+ rel
->r_offset
;
12414 /* sb is the origin of the *segment* containing the symbol. */
12415 bfd_vma sb
= sym_sec
? sym_sec
->output_section
->vma
: 0;
12417 bfd_signed_vma signed_value
;
12420 /* Determine which groups of bits to calculate. */
12423 case R_ARM_LDRS_PC_G0
:
12424 case R_ARM_LDRS_SB_G0
:
12428 case R_ARM_LDRS_PC_G1
:
12429 case R_ARM_LDRS_SB_G1
:
12433 case R_ARM_LDRS_PC_G2
:
12434 case R_ARM_LDRS_SB_G2
:
12442 /* If REL, extract the addend from the insn. If RELA, it will
12443 have already been fetched for us. */
12444 if (globals
->use_rel
)
12446 int negative
= (insn
& (1 << 23)) ? 1 : -1;
12447 signed_addend
= negative
* (((insn
& 0xf00) >> 4) + (insn
& 0xf));
12450 /* Compute the value (X) to go in the place. */
12451 if (r_type
== R_ARM_LDRS_PC_G0
12452 || r_type
== R_ARM_LDRS_PC_G1
12453 || r_type
== R_ARM_LDRS_PC_G2
)
12455 signed_value
= value
- pc
+ signed_addend
;
12457 /* Section base relative. */
12458 signed_value
= value
- sb
+ signed_addend
;
12460 /* Calculate the value of the relevant G_{n-1} to obtain
12461 the residual at that stage. */
12462 calculate_group_reloc_mask (signed_value
< 0 ? - signed_value
: signed_value
,
12463 group
- 1, &residual
);
12465 /* Check for overflow. */
12466 if (residual
>= 0x100)
12469 /* xgettext:c-format */
12470 (_("%pB(%pA+%#" PRIx64
"): overflow whilst "
12471 "splitting %#" PRIx64
" for group relocation %s"),
12472 input_bfd
, input_section
, (uint64_t) rel
->r_offset
,
12473 (uint64_t) (signed_value
< 0 ? -signed_value
: signed_value
),
12475 return bfd_reloc_overflow
;
12478 /* Mask out the value and U bit. */
12479 insn
&= 0xff7ff0f0;
12481 /* Set the U bit if the value to go in the place is non-negative. */
12482 if (signed_value
>= 0)
12485 /* Encode the offset. */
12486 insn
|= ((residual
& 0xf0) << 4) | (residual
& 0xf);
12488 bfd_put_32 (input_bfd
, insn
, hit_data
);
12490 return bfd_reloc_ok
;
12492 case R_ARM_LDC_PC_G0
:
12493 case R_ARM_LDC_PC_G1
:
12494 case R_ARM_LDC_PC_G2
:
12495 case R_ARM_LDC_SB_G0
:
12496 case R_ARM_LDC_SB_G1
:
12497 case R_ARM_LDC_SB_G2
:
12499 bfd_vma insn
= bfd_get_32 (input_bfd
, hit_data
);
12500 bfd_vma pc
= input_section
->output_section
->vma
12501 + input_section
->output_offset
+ rel
->r_offset
;
12502 /* sb is the origin of the *segment* containing the symbol. */
12503 bfd_vma sb
= sym_sec
? sym_sec
->output_section
->vma
: 0;
12505 bfd_signed_vma signed_value
;
12508 /* Determine which groups of bits to calculate. */
12511 case R_ARM_LDC_PC_G0
:
12512 case R_ARM_LDC_SB_G0
:
12516 case R_ARM_LDC_PC_G1
:
12517 case R_ARM_LDC_SB_G1
:
12521 case R_ARM_LDC_PC_G2
:
12522 case R_ARM_LDC_SB_G2
:
12530 /* If REL, extract the addend from the insn. If RELA, it will
12531 have already been fetched for us. */
12532 if (globals
->use_rel
)
12534 int negative
= (insn
& (1 << 23)) ? 1 : -1;
12535 signed_addend
= negative
* ((insn
& 0xff) << 2);
12538 /* Compute the value (X) to go in the place. */
12539 if (r_type
== R_ARM_LDC_PC_G0
12540 || r_type
== R_ARM_LDC_PC_G1
12541 || r_type
== R_ARM_LDC_PC_G2
)
12543 signed_value
= value
- pc
+ signed_addend
;
12545 /* Section base relative. */
12546 signed_value
= value
- sb
+ signed_addend
;
12548 /* Calculate the value of the relevant G_{n-1} to obtain
12549 the residual at that stage. */
12550 calculate_group_reloc_mask (signed_value
< 0 ? - signed_value
: signed_value
,
12551 group
- 1, &residual
);
12553 /* Check for overflow. (The absolute value to go in the place must be
12554 divisible by four and, after having been divided by four, must
12555 fit in eight bits.) */
12556 if ((residual
& 0x3) != 0 || residual
>= 0x400)
12559 /* xgettext:c-format */
12560 (_("%pB(%pA+%#" PRIx64
"): overflow whilst "
12561 "splitting %#" PRIx64
" for group relocation %s"),
12562 input_bfd
, input_section
, (uint64_t) rel
->r_offset
,
12563 (uint64_t) (signed_value
< 0 ? -signed_value
: signed_value
),
12565 return bfd_reloc_overflow
;
12568 /* Mask out the value and U bit. */
12569 insn
&= 0xff7fff00;
12571 /* Set the U bit if the value to go in the place is non-negative. */
12572 if (signed_value
>= 0)
12575 /* Encode the offset. */
12576 insn
|= residual
>> 2;
12578 bfd_put_32 (input_bfd
, insn
, hit_data
);
12580 return bfd_reloc_ok
;
12582 case R_ARM_THM_ALU_ABS_G0_NC
:
12583 case R_ARM_THM_ALU_ABS_G1_NC
:
12584 case R_ARM_THM_ALU_ABS_G2_NC
:
12585 case R_ARM_THM_ALU_ABS_G3_NC
:
12587 const int shift_array
[4] = {0, 8, 16, 24};
12588 bfd_vma insn
= bfd_get_16 (input_bfd
, hit_data
);
12589 bfd_vma addr
= value
;
12590 int shift
= shift_array
[r_type
- R_ARM_THM_ALU_ABS_G0_NC
];
12592 /* Compute address. */
12593 if (globals
->use_rel
)
12594 signed_addend
= insn
& 0xff;
12595 addr
+= signed_addend
;
12596 if (branch_type
== ST_BRANCH_TO_THUMB
)
12598 /* Clean imm8 insn. */
12600 /* And update with correct part of address. */
12601 insn
|= (addr
>> shift
) & 0xff;
12603 bfd_put_16 (input_bfd
, insn
, hit_data
);
12606 *unresolved_reloc_p
= FALSE
;
12607 return bfd_reloc_ok
;
12609 case R_ARM_GOTOFFFUNCDESC
:
12613 struct fdpic_local
*local_fdpic_cnts
= elf32_arm_local_fdpic_cnts(input_bfd
);
12614 int dynindx
= elf_section_data (sym_sec
->output_section
)->dynindx
;
12615 int offset
= local_fdpic_cnts
[r_symndx
].funcdesc_offset
& ~1;
12616 bfd_vma addr
= dynreloc_value
- sym_sec
->output_section
->vma
;
12619 if (bfd_link_pic(info
) && dynindx
== 0)
12622 /* Resolve relocation. */
12623 bfd_put_32(output_bfd
, (offset
+ sgot
->output_offset
)
12624 , contents
+ rel
->r_offset
);
12625 /* Emit R_ARM_FUNCDESC_VALUE or two fixups on funcdesc if
12627 arm_elf_fill_funcdesc(output_bfd
, info
,
12628 &local_fdpic_cnts
[r_symndx
].funcdesc_offset
,
12629 dynindx
, offset
, addr
, dynreloc_value
, seg
);
12634 int offset
= eh
->fdpic_cnts
.funcdesc_offset
& ~1;
12638 /* For static binaries, sym_sec can be null. */
12641 dynindx
= elf_section_data (sym_sec
->output_section
)->dynindx
;
12642 addr
= dynreloc_value
- sym_sec
->output_section
->vma
;
12650 if (bfd_link_pic(info
) && dynindx
== 0)
12653 /* This case cannot occur since funcdesc is allocated by
12654 the dynamic loader so we cannot resolve the relocation. */
12655 if (h
->dynindx
!= -1)
12658 /* Resolve relocation. */
12659 bfd_put_32(output_bfd
, (offset
+ sgot
->output_offset
),
12660 contents
+ rel
->r_offset
);
12661 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12662 arm_elf_fill_funcdesc(output_bfd
, info
,
12663 &eh
->fdpic_cnts
.funcdesc_offset
,
12664 dynindx
, offset
, addr
, dynreloc_value
, seg
);
12667 *unresolved_reloc_p
= FALSE
;
12668 return bfd_reloc_ok
;
12670 case R_ARM_GOTFUNCDESC
:
12674 Elf_Internal_Rela outrel
;
12676 /* Resolve relocation. */
12677 bfd_put_32(output_bfd
, ((eh
->fdpic_cnts
.gotfuncdesc_offset
& ~1)
12678 + sgot
->output_offset
),
12679 contents
+ rel
->r_offset
);
12680 /* Add funcdesc and associated R_ARM_FUNCDESC_VALUE. */
12681 if(h
->dynindx
== -1)
12684 int offset
= eh
->fdpic_cnts
.funcdesc_offset
& ~1;
12688 /* For static binaries sym_sec can be null. */
12691 dynindx
= elf_section_data (sym_sec
->output_section
)->dynindx
;
12692 addr
= dynreloc_value
- sym_sec
->output_section
->vma
;
12700 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12701 arm_elf_fill_funcdesc(output_bfd
, info
,
12702 &eh
->fdpic_cnts
.funcdesc_offset
,
12703 dynindx
, offset
, addr
, dynreloc_value
, seg
);
12706 /* Add a dynamic relocation on GOT entry if not already done. */
12707 if ((eh
->fdpic_cnts
.gotfuncdesc_offset
& 1) == 0)
12709 if (h
->dynindx
== -1)
12711 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_RELATIVE
);
12712 if (h
->root
.type
== bfd_link_hash_undefweak
)
12713 bfd_put_32(output_bfd
, 0, sgot
->contents
12714 + (eh
->fdpic_cnts
.gotfuncdesc_offset
& ~1));
12716 bfd_put_32(output_bfd
, sgot
->output_section
->vma
12717 + sgot
->output_offset
12718 + (eh
->fdpic_cnts
.funcdesc_offset
& ~1),
12720 + (eh
->fdpic_cnts
.gotfuncdesc_offset
& ~1));
12724 outrel
.r_info
= ELF32_R_INFO (h
->dynindx
, R_ARM_FUNCDESC
);
12726 outrel
.r_offset
= sgot
->output_section
->vma
12727 + sgot
->output_offset
12728 + (eh
->fdpic_cnts
.gotfuncdesc_offset
& ~1);
12729 outrel
.r_addend
= 0;
12730 if (h
->dynindx
== -1 && !bfd_link_pic(info
))
12731 if (h
->root
.type
== bfd_link_hash_undefweak
)
12732 arm_elf_add_rofixup(output_bfd
, globals
->srofixup
, -1);
12734 arm_elf_add_rofixup(output_bfd
, globals
->srofixup
, outrel
.r_offset
);
12736 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
12737 eh
->fdpic_cnts
.gotfuncdesc_offset
|= 1;
12742 /* Such relocation on static function should not have been
12743 emitted by the compiler. */
12747 *unresolved_reloc_p
= FALSE
;
12748 return bfd_reloc_ok
;
12750 case R_ARM_FUNCDESC
:
12754 struct fdpic_local
*local_fdpic_cnts
= elf32_arm_local_fdpic_cnts(input_bfd
);
12755 Elf_Internal_Rela outrel
;
12756 int dynindx
= elf_section_data (sym_sec
->output_section
)->dynindx
;
12757 int offset
= local_fdpic_cnts
[r_symndx
].funcdesc_offset
& ~1;
12758 bfd_vma addr
= dynreloc_value
- sym_sec
->output_section
->vma
;
12761 if (bfd_link_pic(info
) && dynindx
== 0)
12764 /* Replace static FUNCDESC relocation with a
12765 R_ARM_RELATIVE dynamic relocation or with a rofixup for
12767 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_RELATIVE
);
12768 outrel
.r_offset
= input_section
->output_section
->vma
12769 + input_section
->output_offset
+ rel
->r_offset
;
12770 outrel
.r_addend
= 0;
12771 if (bfd_link_pic(info
))
12772 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
12774 arm_elf_add_rofixup(output_bfd
, globals
->srofixup
, outrel
.r_offset
);
12776 bfd_put_32 (input_bfd
, sgot
->output_section
->vma
12777 + sgot
->output_offset
+ offset
, hit_data
);
12779 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12780 arm_elf_fill_funcdesc(output_bfd
, info
,
12781 &local_fdpic_cnts
[r_symndx
].funcdesc_offset
,
12782 dynindx
, offset
, addr
, dynreloc_value
, seg
);
12786 if (h
->dynindx
== -1)
12789 int offset
= eh
->fdpic_cnts
.funcdesc_offset
& ~1;
12792 Elf_Internal_Rela outrel
;
12794 /* For static binaries sym_sec can be null. */
12797 dynindx
= elf_section_data (sym_sec
->output_section
)->dynindx
;
12798 addr
= dynreloc_value
- sym_sec
->output_section
->vma
;
12806 if (bfd_link_pic(info
) && dynindx
== 0)
12809 /* Replace static FUNCDESC relocation with a
12810 R_ARM_RELATIVE dynamic relocation. */
12811 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_RELATIVE
);
12812 outrel
.r_offset
= input_section
->output_section
->vma
12813 + input_section
->output_offset
+ rel
->r_offset
;
12814 outrel
.r_addend
= 0;
12815 if (bfd_link_pic(info
))
12816 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
12818 arm_elf_add_rofixup(output_bfd
, globals
->srofixup
, outrel
.r_offset
);
12820 bfd_put_32 (input_bfd
, sgot
->output_section
->vma
12821 + sgot
->output_offset
+ offset
, hit_data
);
12823 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12824 arm_elf_fill_funcdesc(output_bfd
, info
,
12825 &eh
->fdpic_cnts
.funcdesc_offset
,
12826 dynindx
, offset
, addr
, dynreloc_value
, seg
);
12830 Elf_Internal_Rela outrel
;
12832 /* Add a dynamic relocation. */
12833 outrel
.r_info
= ELF32_R_INFO (h
->dynindx
, R_ARM_FUNCDESC
);
12834 outrel
.r_offset
= input_section
->output_section
->vma
12835 + input_section
->output_offset
+ rel
->r_offset
;
12836 outrel
.r_addend
= 0;
12837 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
12841 *unresolved_reloc_p
= FALSE
;
12842 return bfd_reloc_ok
;
12845 return bfd_reloc_notsupported
;
12849 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
12851 arm_add_to_rel (bfd
* abfd
,
12852 bfd_byte
* address
,
12853 reloc_howto_type
* howto
,
12854 bfd_signed_vma increment
)
12856 bfd_signed_vma addend
;
12858 if (howto
->type
== R_ARM_THM_CALL
12859 || howto
->type
== R_ARM_THM_JUMP24
)
12861 int upper_insn
, lower_insn
;
12864 upper_insn
= bfd_get_16 (abfd
, address
);
12865 lower_insn
= bfd_get_16 (abfd
, address
+ 2);
12866 upper
= upper_insn
& 0x7ff;
12867 lower
= lower_insn
& 0x7ff;
12869 addend
= (upper
<< 12) | (lower
<< 1);
12870 addend
+= increment
;
12873 upper_insn
= (upper_insn
& 0xf800) | ((addend
>> 11) & 0x7ff);
12874 lower_insn
= (lower_insn
& 0xf800) | (addend
& 0x7ff);
12876 bfd_put_16 (abfd
, (bfd_vma
) upper_insn
, address
);
12877 bfd_put_16 (abfd
, (bfd_vma
) lower_insn
, address
+ 2);
12883 contents
= bfd_get_32 (abfd
, address
);
12885 /* Get the (signed) value from the instruction. */
12886 addend
= contents
& howto
->src_mask
;
12887 if (addend
& ((howto
->src_mask
+ 1) >> 1))
12889 bfd_signed_vma mask
;
12892 mask
&= ~ howto
->src_mask
;
12896 /* Add in the increment, (which is a byte value). */
12897 switch (howto
->type
)
12900 addend
+= increment
;
12907 addend
<<= howto
->size
;
12908 addend
+= increment
;
12910 /* Should we check for overflow here ? */
12912 /* Drop any undesired bits. */
12913 addend
>>= howto
->rightshift
;
12917 contents
= (contents
& ~ howto
->dst_mask
) | (addend
& howto
->dst_mask
);
12919 bfd_put_32 (abfd
, contents
, address
);
12923 #define IS_ARM_TLS_RELOC(R_TYPE) \
12924 ((R_TYPE) == R_ARM_TLS_GD32 \
12925 || (R_TYPE) == R_ARM_TLS_GD32_FDPIC \
12926 || (R_TYPE) == R_ARM_TLS_LDO32 \
12927 || (R_TYPE) == R_ARM_TLS_LDM32 \
12928 || (R_TYPE) == R_ARM_TLS_LDM32_FDPIC \
12929 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
12930 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
12931 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
12932 || (R_TYPE) == R_ARM_TLS_LE32 \
12933 || (R_TYPE) == R_ARM_TLS_IE32 \
12934 || (R_TYPE) == R_ARM_TLS_IE32_FDPIC \
12935 || IS_ARM_TLS_GNU_RELOC (R_TYPE))
12937 /* Specific set of relocations for the gnu tls dialect. */
12938 #define IS_ARM_TLS_GNU_RELOC(R_TYPE) \
12939 ((R_TYPE) == R_ARM_TLS_GOTDESC \
12940 || (R_TYPE) == R_ARM_TLS_CALL \
12941 || (R_TYPE) == R_ARM_THM_TLS_CALL \
12942 || (R_TYPE) == R_ARM_TLS_DESCSEQ \
12943 || (R_TYPE) == R_ARM_THM_TLS_DESCSEQ)
12945 /* Relocate an ARM ELF section. */
12948 elf32_arm_relocate_section (bfd
* output_bfd
,
12949 struct bfd_link_info
* info
,
12951 asection
* input_section
,
12952 bfd_byte
* contents
,
12953 Elf_Internal_Rela
* relocs
,
12954 Elf_Internal_Sym
* local_syms
,
12955 asection
** local_sections
)
12957 Elf_Internal_Shdr
*symtab_hdr
;
12958 struct elf_link_hash_entry
**sym_hashes
;
12959 Elf_Internal_Rela
*rel
;
12960 Elf_Internal_Rela
*relend
;
12962 struct elf32_arm_link_hash_table
* globals
;
12964 globals
= elf32_arm_hash_table (info
);
12965 if (globals
== NULL
)
12968 symtab_hdr
= & elf_symtab_hdr (input_bfd
);
12969 sym_hashes
= elf_sym_hashes (input_bfd
);
12972 relend
= relocs
+ input_section
->reloc_count
;
12973 for (; rel
< relend
; rel
++)
12976 reloc_howto_type
* howto
;
12977 unsigned long r_symndx
;
12978 Elf_Internal_Sym
* sym
;
12980 struct elf_link_hash_entry
* h
;
12981 bfd_vma relocation
;
12982 bfd_reloc_status_type r
;
12985 bfd_boolean unresolved_reloc
= FALSE
;
12986 char *error_message
= NULL
;
12988 r_symndx
= ELF32_R_SYM (rel
->r_info
);
12989 r_type
= ELF32_R_TYPE (rel
->r_info
);
12990 r_type
= arm_real_reloc_type (globals
, r_type
);
12992 if ( r_type
== R_ARM_GNU_VTENTRY
12993 || r_type
== R_ARM_GNU_VTINHERIT
)
12996 howto
= bfd_reloc
.howto
= elf32_arm_howto_from_type (r_type
);
12999 return _bfd_unrecognized_reloc (input_bfd
, input_section
, r_type
);
13005 if (r_symndx
< symtab_hdr
->sh_info
)
13007 sym
= local_syms
+ r_symndx
;
13008 sym_type
= ELF32_ST_TYPE (sym
->st_info
);
13009 sec
= local_sections
[r_symndx
];
13011 /* An object file might have a reference to a local
13012 undefined symbol. This is a daft object file, but we
13013 should at least do something about it. V4BX & NONE
13014 relocations do not use the symbol and are explicitly
13015 allowed to use the undefined symbol, so allow those.
13016 Likewise for relocations against STN_UNDEF. */
13017 if (r_type
!= R_ARM_V4BX
13018 && r_type
!= R_ARM_NONE
13019 && r_symndx
!= STN_UNDEF
13020 && bfd_is_und_section (sec
)
13021 && ELF_ST_BIND (sym
->st_info
) != STB_WEAK
)
13022 (*info
->callbacks
->undefined_symbol
)
13023 (info
, bfd_elf_string_from_elf_section
13024 (input_bfd
, symtab_hdr
->sh_link
, sym
->st_name
),
13025 input_bfd
, input_section
,
13026 rel
->r_offset
, TRUE
);
13028 if (globals
->use_rel
)
13030 relocation
= (sec
->output_section
->vma
13031 + sec
->output_offset
13033 if (!bfd_link_relocatable (info
)
13034 && (sec
->flags
& SEC_MERGE
)
13035 && ELF_ST_TYPE (sym
->st_info
) == STT_SECTION
)
13038 bfd_vma addend
, value
;
13042 case R_ARM_MOVW_ABS_NC
:
13043 case R_ARM_MOVT_ABS
:
13044 value
= bfd_get_32 (input_bfd
, contents
+ rel
->r_offset
);
13045 addend
= ((value
& 0xf0000) >> 4) | (value
& 0xfff);
13046 addend
= (addend
^ 0x8000) - 0x8000;
13049 case R_ARM_THM_MOVW_ABS_NC
:
13050 case R_ARM_THM_MOVT_ABS
:
13051 value
= bfd_get_16 (input_bfd
, contents
+ rel
->r_offset
)
13053 value
|= bfd_get_16 (input_bfd
,
13054 contents
+ rel
->r_offset
+ 2);
13055 addend
= ((value
& 0xf7000) >> 4) | (value
& 0xff)
13056 | ((value
& 0x04000000) >> 15);
13057 addend
= (addend
^ 0x8000) - 0x8000;
13061 if (howto
->rightshift
13062 || (howto
->src_mask
& (howto
->src_mask
+ 1)))
13065 /* xgettext:c-format */
13066 (_("%pB(%pA+%#" PRIx64
"): "
13067 "%s relocation against SEC_MERGE section"),
13068 input_bfd
, input_section
,
13069 (uint64_t) rel
->r_offset
, howto
->name
);
13073 value
= bfd_get_32 (input_bfd
, contents
+ rel
->r_offset
);
13075 /* Get the (signed) value from the instruction. */
13076 addend
= value
& howto
->src_mask
;
13077 if (addend
& ((howto
->src_mask
+ 1) >> 1))
13079 bfd_signed_vma mask
;
13082 mask
&= ~ howto
->src_mask
;
13090 _bfd_elf_rel_local_sym (output_bfd
, sym
, &msec
, addend
)
13092 addend
+= msec
->output_section
->vma
+ msec
->output_offset
;
13094 /* Cases here must match those in the preceding
13095 switch statement. */
13098 case R_ARM_MOVW_ABS_NC
:
13099 case R_ARM_MOVT_ABS
:
13100 value
= (value
& 0xfff0f000) | ((addend
& 0xf000) << 4)
13101 | (addend
& 0xfff);
13102 bfd_put_32 (input_bfd
, value
, contents
+ rel
->r_offset
);
13105 case R_ARM_THM_MOVW_ABS_NC
:
13106 case R_ARM_THM_MOVT_ABS
:
13107 value
= (value
& 0xfbf08f00) | ((addend
& 0xf700) << 4)
13108 | (addend
& 0xff) | ((addend
& 0x0800) << 15);
13109 bfd_put_16 (input_bfd
, value
>> 16,
13110 contents
+ rel
->r_offset
);
13111 bfd_put_16 (input_bfd
, value
,
13112 contents
+ rel
->r_offset
+ 2);
13116 value
= (value
& ~ howto
->dst_mask
)
13117 | (addend
& howto
->dst_mask
);
13118 bfd_put_32 (input_bfd
, value
, contents
+ rel
->r_offset
);
13124 relocation
= _bfd_elf_rela_local_sym (output_bfd
, sym
, &sec
, rel
);
13128 bfd_boolean warned
, ignored
;
13130 RELOC_FOR_GLOBAL_SYMBOL (info
, input_bfd
, input_section
, rel
,
13131 r_symndx
, symtab_hdr
, sym_hashes
,
13132 h
, sec
, relocation
,
13133 unresolved_reloc
, warned
, ignored
);
13135 sym_type
= h
->type
;
13138 if (sec
!= NULL
&& discarded_section (sec
))
13139 RELOC_AGAINST_DISCARDED_SECTION (info
, input_bfd
, input_section
,
13140 rel
, 1, relend
, howto
, 0, contents
);
13142 if (bfd_link_relocatable (info
))
13144 /* This is a relocatable link. We don't have to change
13145 anything, unless the reloc is against a section symbol,
13146 in which case we have to adjust according to where the
13147 section symbol winds up in the output section. */
13148 if (sym
!= NULL
&& ELF_ST_TYPE (sym
->st_info
) == STT_SECTION
)
13150 if (globals
->use_rel
)
13151 arm_add_to_rel (input_bfd
, contents
+ rel
->r_offset
,
13152 howto
, (bfd_signed_vma
) sec
->output_offset
);
13154 rel
->r_addend
+= sec
->output_offset
;
13160 name
= h
->root
.root
.string
;
13163 name
= (bfd_elf_string_from_elf_section
13164 (input_bfd
, symtab_hdr
->sh_link
, sym
->st_name
));
13165 if (name
== NULL
|| *name
== '\0')
13166 name
= bfd_section_name (input_bfd
, sec
);
13169 if (r_symndx
!= STN_UNDEF
13170 && r_type
!= R_ARM_NONE
13172 || h
->root
.type
== bfd_link_hash_defined
13173 || h
->root
.type
== bfd_link_hash_defweak
)
13174 && IS_ARM_TLS_RELOC (r_type
) != (sym_type
== STT_TLS
))
13177 ((sym_type
== STT_TLS
13178 /* xgettext:c-format */
13179 ? _("%pB(%pA+%#" PRIx64
"): %s used with TLS symbol %s")
13180 /* xgettext:c-format */
13181 : _("%pB(%pA+%#" PRIx64
"): %s used with non-TLS symbol %s")),
13184 (uint64_t) rel
->r_offset
,
13189 /* We call elf32_arm_final_link_relocate unless we're completely
13190 done, i.e., the relaxation produced the final output we want,
13191 and we won't let anybody mess with it. Also, we have to do
13192 addend adjustments in case of a R_ARM_TLS_GOTDESC relocation
13193 both in relaxed and non-relaxed cases. */
13194 if ((elf32_arm_tls_transition (info
, r_type
, h
) != (unsigned)r_type
)
13195 || (IS_ARM_TLS_GNU_RELOC (r_type
)
13196 && !((h
? elf32_arm_hash_entry (h
)->tls_type
:
13197 elf32_arm_local_got_tls_type (input_bfd
)[r_symndx
])
13200 r
= elf32_arm_tls_relax (globals
, input_bfd
, input_section
,
13201 contents
, rel
, h
== NULL
);
13202 /* This may have been marked unresolved because it came from
13203 a shared library. But we've just dealt with that. */
13204 unresolved_reloc
= 0;
13207 r
= bfd_reloc_continue
;
13209 if (r
== bfd_reloc_continue
)
13211 unsigned char branch_type
=
13212 h
? ARM_GET_SYM_BRANCH_TYPE (h
->target_internal
)
13213 : ARM_GET_SYM_BRANCH_TYPE (sym
->st_target_internal
);
13215 r
= elf32_arm_final_link_relocate (howto
, input_bfd
, output_bfd
,
13216 input_section
, contents
, rel
,
13217 relocation
, info
, sec
, name
,
13218 sym_type
, branch_type
, h
,
13223 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
13224 because such sections are not SEC_ALLOC and thus ld.so will
13225 not process them. */
13226 if (unresolved_reloc
13227 && !((input_section
->flags
& SEC_DEBUGGING
) != 0
13229 && _bfd_elf_section_offset (output_bfd
, info
, input_section
,
13230 rel
->r_offset
) != (bfd_vma
) -1)
13233 /* xgettext:c-format */
13234 (_("%pB(%pA+%#" PRIx64
"): "
13235 "unresolvable %s relocation against symbol `%s'"),
13238 (uint64_t) rel
->r_offset
,
13240 h
->root
.root
.string
);
13244 if (r
!= bfd_reloc_ok
)
13248 case bfd_reloc_overflow
:
13249 /* If the overflowing reloc was to an undefined symbol,
13250 we have already printed one error message and there
13251 is no point complaining again. */
13252 if (!h
|| h
->root
.type
!= bfd_link_hash_undefined
)
13253 (*info
->callbacks
->reloc_overflow
)
13254 (info
, (h
? &h
->root
: NULL
), name
, howto
->name
,
13255 (bfd_vma
) 0, input_bfd
, input_section
, rel
->r_offset
);
13258 case bfd_reloc_undefined
:
13259 (*info
->callbacks
->undefined_symbol
)
13260 (info
, name
, input_bfd
, input_section
, rel
->r_offset
, TRUE
);
13263 case bfd_reloc_outofrange
:
13264 error_message
= _("out of range");
13267 case bfd_reloc_notsupported
:
13268 error_message
= _("unsupported relocation");
13271 case bfd_reloc_dangerous
:
13272 /* error_message should already be set. */
13276 error_message
= _("unknown error");
13277 /* Fall through. */
13280 BFD_ASSERT (error_message
!= NULL
);
13281 (*info
->callbacks
->reloc_dangerous
)
13282 (info
, error_message
, input_bfd
, input_section
, rel
->r_offset
);
13291 /* Add a new unwind edit to the list described by HEAD, TAIL. If TINDEX is zero,
13292 adds the edit to the start of the list. (The list must be built in order of
13293 ascending TINDEX: the function's callers are primarily responsible for
13294 maintaining that condition). */
13297 add_unwind_table_edit (arm_unwind_table_edit
**head
,
13298 arm_unwind_table_edit
**tail
,
13299 arm_unwind_edit_type type
,
13300 asection
*linked_section
,
13301 unsigned int tindex
)
13303 arm_unwind_table_edit
*new_edit
= (arm_unwind_table_edit
*)
13304 xmalloc (sizeof (arm_unwind_table_edit
));
13306 new_edit
->type
= type
;
13307 new_edit
->linked_section
= linked_section
;
13308 new_edit
->index
= tindex
;
13312 new_edit
->next
= NULL
;
13315 (*tail
)->next
= new_edit
;
13317 (*tail
) = new_edit
;
13320 (*head
) = new_edit
;
13324 new_edit
->next
= *head
;
13333 static _arm_elf_section_data
*get_arm_elf_section_data (asection
*);
13335 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
13337 adjust_exidx_size(asection
*exidx_sec
, int adjust
)
13341 if (!exidx_sec
->rawsize
)
13342 exidx_sec
->rawsize
= exidx_sec
->size
;
13344 bfd_set_section_size (exidx_sec
->owner
, exidx_sec
, exidx_sec
->size
+ adjust
);
13345 out_sec
= exidx_sec
->output_section
;
13346 /* Adjust size of output section. */
13347 bfd_set_section_size (out_sec
->owner
, out_sec
, out_sec
->size
+adjust
);
13350 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
13352 insert_cantunwind_after(asection
*text_sec
, asection
*exidx_sec
)
13354 struct _arm_elf_section_data
*exidx_arm_data
;
13356 exidx_arm_data
= get_arm_elf_section_data (exidx_sec
);
13357 add_unwind_table_edit (
13358 &exidx_arm_data
->u
.exidx
.unwind_edit_list
,
13359 &exidx_arm_data
->u
.exidx
.unwind_edit_tail
,
13360 INSERT_EXIDX_CANTUNWIND_AT_END
, text_sec
, UINT_MAX
);
13362 exidx_arm_data
->additional_reloc_count
++;
13364 adjust_exidx_size(exidx_sec
, 8);
13367 /* Scan .ARM.exidx tables, and create a list describing edits which should be
13368 made to those tables, such that:
13370 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
13371 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
13372 codes which have been inlined into the index).
13374 If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged.
13376 The edits are applied when the tables are written
13377 (in elf32_arm_write_section). */
13380 elf32_arm_fix_exidx_coverage (asection
**text_section_order
,
13381 unsigned int num_text_sections
,
13382 struct bfd_link_info
*info
,
13383 bfd_boolean merge_exidx_entries
)
13386 unsigned int last_second_word
= 0, i
;
13387 asection
*last_exidx_sec
= NULL
;
13388 asection
*last_text_sec
= NULL
;
13389 int last_unwind_type
= -1;
13391 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
13393 for (inp
= info
->input_bfds
; inp
!= NULL
; inp
= inp
->link
.next
)
13397 for (sec
= inp
->sections
; sec
!= NULL
; sec
= sec
->next
)
13399 struct bfd_elf_section_data
*elf_sec
= elf_section_data (sec
);
13400 Elf_Internal_Shdr
*hdr
= &elf_sec
->this_hdr
;
13402 if (!hdr
|| hdr
->sh_type
!= SHT_ARM_EXIDX
)
13405 if (elf_sec
->linked_to
)
13407 Elf_Internal_Shdr
*linked_hdr
13408 = &elf_section_data (elf_sec
->linked_to
)->this_hdr
;
13409 struct _arm_elf_section_data
*linked_sec_arm_data
13410 = get_arm_elf_section_data (linked_hdr
->bfd_section
);
13412 if (linked_sec_arm_data
== NULL
)
13415 /* Link this .ARM.exidx section back from the text section it
13417 linked_sec_arm_data
->u
.text
.arm_exidx_sec
= sec
;
13422 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
13423 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
13424 and add EXIDX_CANTUNWIND entries for sections with no unwind table data. */
13426 for (i
= 0; i
< num_text_sections
; i
++)
13428 asection
*sec
= text_section_order
[i
];
13429 asection
*exidx_sec
;
13430 struct _arm_elf_section_data
*arm_data
= get_arm_elf_section_data (sec
);
13431 struct _arm_elf_section_data
*exidx_arm_data
;
13432 bfd_byte
*contents
= NULL
;
13433 int deleted_exidx_bytes
= 0;
13435 arm_unwind_table_edit
*unwind_edit_head
= NULL
;
13436 arm_unwind_table_edit
*unwind_edit_tail
= NULL
;
13437 Elf_Internal_Shdr
*hdr
;
13440 if (arm_data
== NULL
)
13443 exidx_sec
= arm_data
->u
.text
.arm_exidx_sec
;
13444 if (exidx_sec
== NULL
)
13446 /* Section has no unwind data. */
13447 if (last_unwind_type
== 0 || !last_exidx_sec
)
13450 /* Ignore zero sized sections. */
13451 if (sec
->size
== 0)
13454 insert_cantunwind_after(last_text_sec
, last_exidx_sec
);
13455 last_unwind_type
= 0;
13459 /* Skip /DISCARD/ sections. */
13460 if (bfd_is_abs_section (exidx_sec
->output_section
))
13463 hdr
= &elf_section_data (exidx_sec
)->this_hdr
;
13464 if (hdr
->sh_type
!= SHT_ARM_EXIDX
)
13467 exidx_arm_data
= get_arm_elf_section_data (exidx_sec
);
13468 if (exidx_arm_data
== NULL
)
13471 ibfd
= exidx_sec
->owner
;
13473 if (hdr
->contents
!= NULL
)
13474 contents
= hdr
->contents
;
13475 else if (! bfd_malloc_and_get_section (ibfd
, exidx_sec
, &contents
))
13479 if (last_unwind_type
> 0)
13481 unsigned int first_word
= bfd_get_32 (ibfd
, contents
);
13482 /* Add cantunwind if first unwind item does not match section
13484 if (first_word
!= sec
->vma
)
13486 insert_cantunwind_after (last_text_sec
, last_exidx_sec
);
13487 last_unwind_type
= 0;
13491 for (j
= 0; j
< hdr
->sh_size
; j
+= 8)
13493 unsigned int second_word
= bfd_get_32 (ibfd
, contents
+ j
+ 4);
13497 /* An EXIDX_CANTUNWIND entry. */
13498 if (second_word
== 1)
13500 if (last_unwind_type
== 0)
13504 /* Inlined unwinding data. Merge if equal to previous. */
13505 else if ((second_word
& 0x80000000) != 0)
13507 if (merge_exidx_entries
13508 && last_second_word
== second_word
&& last_unwind_type
== 1)
13511 last_second_word
= second_word
;
13513 /* Normal table entry. In theory we could merge these too,
13514 but duplicate entries are likely to be much less common. */
13518 if (elide
&& !bfd_link_relocatable (info
))
13520 add_unwind_table_edit (&unwind_edit_head
, &unwind_edit_tail
,
13521 DELETE_EXIDX_ENTRY
, NULL
, j
/ 8);
13523 deleted_exidx_bytes
+= 8;
13526 last_unwind_type
= unwind_type
;
13529 /* Free contents if we allocated it ourselves. */
13530 if (contents
!= hdr
->contents
)
13533 /* Record edits to be applied later (in elf32_arm_write_section). */
13534 exidx_arm_data
->u
.exidx
.unwind_edit_list
= unwind_edit_head
;
13535 exidx_arm_data
->u
.exidx
.unwind_edit_tail
= unwind_edit_tail
;
13537 if (deleted_exidx_bytes
> 0)
13538 adjust_exidx_size(exidx_sec
, -deleted_exidx_bytes
);
13540 last_exidx_sec
= exidx_sec
;
13541 last_text_sec
= sec
;
13544 /* Add terminating CANTUNWIND entry. */
13545 if (!bfd_link_relocatable (info
) && last_exidx_sec
13546 && last_unwind_type
!= 0)
13547 insert_cantunwind_after(last_text_sec
, last_exidx_sec
);
13553 elf32_arm_output_glue_section (struct bfd_link_info
*info
, bfd
*obfd
,
13554 bfd
*ibfd
, const char *name
)
13556 asection
*sec
, *osec
;
13558 sec
= bfd_get_linker_section (ibfd
, name
);
13559 if (sec
== NULL
|| (sec
->flags
& SEC_EXCLUDE
) != 0)
13562 osec
= sec
->output_section
;
13563 if (elf32_arm_write_section (obfd
, info
, sec
, sec
->contents
))
13566 if (! bfd_set_section_contents (obfd
, osec
, sec
->contents
,
13567 sec
->output_offset
, sec
->size
))
13574 elf32_arm_final_link (bfd
*abfd
, struct bfd_link_info
*info
)
13576 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (info
);
13577 asection
*sec
, *osec
;
13579 if (globals
== NULL
)
13582 /* Invoke the regular ELF backend linker to do all the work. */
13583 if (!bfd_elf_final_link (abfd
, info
))
13586 /* Process stub sections (eg BE8 encoding, ...). */
13587 struct elf32_arm_link_hash_table
*htab
= elf32_arm_hash_table (info
);
13589 for (i
=0; i
<htab
->top_id
; i
++)
13591 sec
= htab
->stub_group
[i
].stub_sec
;
13592 /* Only process it once, in its link_sec slot. */
13593 if (sec
&& i
== htab
->stub_group
[i
].link_sec
->id
)
13595 osec
= sec
->output_section
;
13596 elf32_arm_write_section (abfd
, info
, sec
, sec
->contents
);
13597 if (! bfd_set_section_contents (abfd
, osec
, sec
->contents
,
13598 sec
->output_offset
, sec
->size
))
13603 /* Write out any glue sections now that we have created all the
13605 if (globals
->bfd_of_glue_owner
!= NULL
)
13607 if (! elf32_arm_output_glue_section (info
, abfd
,
13608 globals
->bfd_of_glue_owner
,
13609 ARM2THUMB_GLUE_SECTION_NAME
))
13612 if (! elf32_arm_output_glue_section (info
, abfd
,
13613 globals
->bfd_of_glue_owner
,
13614 THUMB2ARM_GLUE_SECTION_NAME
))
13617 if (! elf32_arm_output_glue_section (info
, abfd
,
13618 globals
->bfd_of_glue_owner
,
13619 VFP11_ERRATUM_VENEER_SECTION_NAME
))
13622 if (! elf32_arm_output_glue_section (info
, abfd
,
13623 globals
->bfd_of_glue_owner
,
13624 STM32L4XX_ERRATUM_VENEER_SECTION_NAME
))
13627 if (! elf32_arm_output_glue_section (info
, abfd
,
13628 globals
->bfd_of_glue_owner
,
13629 ARM_BX_GLUE_SECTION_NAME
))
13636 /* Return a best guess for the machine number based on the attributes. */
13638 static unsigned int
13639 bfd_arm_get_mach_from_attributes (bfd
* abfd
)
13641 int arch
= bfd_elf_get_obj_attr_int (abfd
, OBJ_ATTR_PROC
, Tag_CPU_arch
);
13645 case TAG_CPU_ARCH_PRE_V4
: return bfd_mach_arm_3M
;
13646 case TAG_CPU_ARCH_V4
: return bfd_mach_arm_4
;
13647 case TAG_CPU_ARCH_V4T
: return bfd_mach_arm_4T
;
13648 case TAG_CPU_ARCH_V5T
: return bfd_mach_arm_5T
;
13650 case TAG_CPU_ARCH_V5TE
:
13654 BFD_ASSERT (Tag_CPU_name
< NUM_KNOWN_OBJ_ATTRIBUTES
);
13655 name
= elf_known_obj_attributes (abfd
) [OBJ_ATTR_PROC
][Tag_CPU_name
].s
;
13659 if (strcmp (name
, "IWMMXT2") == 0)
13660 return bfd_mach_arm_iWMMXt2
;
13662 if (strcmp (name
, "IWMMXT") == 0)
13663 return bfd_mach_arm_iWMMXt
;
13665 if (strcmp (name
, "XSCALE") == 0)
13669 BFD_ASSERT (Tag_WMMX_arch
< NUM_KNOWN_OBJ_ATTRIBUTES
);
13670 wmmx
= elf_known_obj_attributes (abfd
) [OBJ_ATTR_PROC
][Tag_WMMX_arch
].i
;
13673 case 1: return bfd_mach_arm_iWMMXt
;
13674 case 2: return bfd_mach_arm_iWMMXt2
;
13675 default: return bfd_mach_arm_XScale
;
13680 return bfd_mach_arm_5TE
;
13683 case TAG_CPU_ARCH_V5TEJ
:
13684 return bfd_mach_arm_5TEJ
;
13685 case TAG_CPU_ARCH_V6
:
13686 return bfd_mach_arm_6
;
13687 case TAG_CPU_ARCH_V6KZ
:
13688 return bfd_mach_arm_6KZ
;
13689 case TAG_CPU_ARCH_V6T2
:
13690 return bfd_mach_arm_6T2
;
13691 case TAG_CPU_ARCH_V6K
:
13692 return bfd_mach_arm_6K
;
13693 case TAG_CPU_ARCH_V7
:
13694 return bfd_mach_arm_7
;
13695 case TAG_CPU_ARCH_V6_M
:
13696 return bfd_mach_arm_6M
;
13697 case TAG_CPU_ARCH_V6S_M
:
13698 return bfd_mach_arm_6SM
;
13699 case TAG_CPU_ARCH_V7E_M
:
13700 return bfd_mach_arm_7EM
;
13701 case TAG_CPU_ARCH_V8
:
13702 return bfd_mach_arm_8
;
13703 case TAG_CPU_ARCH_V8R
:
13704 return bfd_mach_arm_8R
;
13705 case TAG_CPU_ARCH_V8M_BASE
:
13706 return bfd_mach_arm_8M_BASE
;
13707 case TAG_CPU_ARCH_V8M_MAIN
:
13708 return bfd_mach_arm_8M_MAIN
;
13711 /* Force entry to be added for any new known Tag_CPU_arch value. */
13712 BFD_ASSERT (arch
> MAX_TAG_CPU_ARCH
);
13714 /* Unknown Tag_CPU_arch value. */
13715 return bfd_mach_arm_unknown
;
13719 /* Set the right machine number. */
13722 elf32_arm_object_p (bfd
*abfd
)
13726 mach
= bfd_arm_get_mach_from_notes (abfd
, ARM_NOTE_SECTION
);
13728 if (mach
== bfd_mach_arm_unknown
)
13730 if (elf_elfheader (abfd
)->e_flags
& EF_ARM_MAVERICK_FLOAT
)
13731 mach
= bfd_mach_arm_ep9312
;
13733 mach
= bfd_arm_get_mach_from_attributes (abfd
);
13736 bfd_default_set_arch_mach (abfd
, bfd_arch_arm
, mach
);
13740 /* Function to keep ARM specific flags in the ELF header. */
13743 elf32_arm_set_private_flags (bfd
*abfd
, flagword flags
)
13745 if (elf_flags_init (abfd
)
13746 && elf_elfheader (abfd
)->e_flags
!= flags
)
13748 if (EF_ARM_EABI_VERSION (flags
) == EF_ARM_EABI_UNKNOWN
)
13750 if (flags
& EF_ARM_INTERWORK
)
13752 (_("warning: not setting interworking flag of %pB since it has already been specified as non-interworking"),
13756 (_("warning: clearing the interworking flag of %pB due to outside request"),
13762 elf_elfheader (abfd
)->e_flags
= flags
;
13763 elf_flags_init (abfd
) = TRUE
;
13769 /* Copy backend specific data from one object module to another. */
13772 elf32_arm_copy_private_bfd_data (bfd
*ibfd
, bfd
*obfd
)
13775 flagword out_flags
;
13777 if (! is_arm_elf (ibfd
) || ! is_arm_elf (obfd
))
13780 in_flags
= elf_elfheader (ibfd
)->e_flags
;
13781 out_flags
= elf_elfheader (obfd
)->e_flags
;
13783 if (elf_flags_init (obfd
)
13784 && EF_ARM_EABI_VERSION (out_flags
) == EF_ARM_EABI_UNKNOWN
13785 && in_flags
!= out_flags
)
13787 /* Cannot mix APCS26 and APCS32 code. */
13788 if ((in_flags
& EF_ARM_APCS_26
) != (out_flags
& EF_ARM_APCS_26
))
13791 /* Cannot mix float APCS and non-float APCS code. */
13792 if ((in_flags
& EF_ARM_APCS_FLOAT
) != (out_flags
& EF_ARM_APCS_FLOAT
))
13795 /* If the src and dest have different interworking flags
13796 then turn off the interworking bit. */
13797 if ((in_flags
& EF_ARM_INTERWORK
) != (out_flags
& EF_ARM_INTERWORK
))
13799 if (out_flags
& EF_ARM_INTERWORK
)
13801 (_("warning: clearing the interworking flag of %pB because non-interworking code in %pB has been linked with it"),
13804 in_flags
&= ~EF_ARM_INTERWORK
;
13807 /* Likewise for PIC, though don't warn for this case. */
13808 if ((in_flags
& EF_ARM_PIC
) != (out_flags
& EF_ARM_PIC
))
13809 in_flags
&= ~EF_ARM_PIC
;
13812 elf_elfheader (obfd
)->e_flags
= in_flags
;
13813 elf_flags_init (obfd
) = TRUE
;
13815 return _bfd_elf_copy_private_bfd_data (ibfd
, obfd
);
13818 /* Values for Tag_ABI_PCS_R9_use. */
13827 /* Values for Tag_ABI_PCS_RW_data. */
13830 AEABI_PCS_RW_data_absolute
,
13831 AEABI_PCS_RW_data_PCrel
,
13832 AEABI_PCS_RW_data_SBrel
,
13833 AEABI_PCS_RW_data_unused
13836 /* Values for Tag_ABI_enum_size. */
13842 AEABI_enum_forced_wide
13845 /* Determine whether an object attribute tag takes an integer, a
13849 elf32_arm_obj_attrs_arg_type (int tag
)
13851 if (tag
== Tag_compatibility
)
13852 return ATTR_TYPE_FLAG_INT_VAL
| ATTR_TYPE_FLAG_STR_VAL
;
13853 else if (tag
== Tag_nodefaults
)
13854 return ATTR_TYPE_FLAG_INT_VAL
| ATTR_TYPE_FLAG_NO_DEFAULT
;
13855 else if (tag
== Tag_CPU_raw_name
|| tag
== Tag_CPU_name
)
13856 return ATTR_TYPE_FLAG_STR_VAL
;
13858 return ATTR_TYPE_FLAG_INT_VAL
;
13860 return (tag
& 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL
: ATTR_TYPE_FLAG_INT_VAL
;
13863 /* The ABI defines that Tag_conformance should be emitted first, and that
13864 Tag_nodefaults should be second (if either is defined). This sets those
13865 two positions, and bumps up the position of all the remaining tags to
13868 elf32_arm_obj_attrs_order (int num
)
13870 if (num
== LEAST_KNOWN_OBJ_ATTRIBUTE
)
13871 return Tag_conformance
;
13872 if (num
== LEAST_KNOWN_OBJ_ATTRIBUTE
+ 1)
13873 return Tag_nodefaults
;
13874 if ((num
- 2) < Tag_nodefaults
)
13876 if ((num
- 1) < Tag_conformance
)
13881 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
13883 elf32_arm_obj_attrs_handle_unknown (bfd
*abfd
, int tag
)
13885 if ((tag
& 127) < 64)
13888 (_("%pB: unknown mandatory EABI object attribute %d"),
13890 bfd_set_error (bfd_error_bad_value
);
13896 (_("warning: %pB: unknown EABI object attribute %d"),
13902 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
13903 Returns -1 if no architecture could be read. */
13906 get_secondary_compatible_arch (bfd
*abfd
)
13908 obj_attribute
*attr
=
13909 &elf_known_obj_attributes_proc (abfd
)[Tag_also_compatible_with
];
13911 /* Note: the tag and its argument below are uleb128 values, though
13912 currently-defined values fit in one byte for each. */
13914 && attr
->s
[0] == Tag_CPU_arch
13915 && (attr
->s
[1] & 128) != 128
13916 && attr
->s
[2] == 0)
13919 /* This tag is "safely ignorable", so don't complain if it looks funny. */
13923 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
13924 The tag is removed if ARCH is -1. */
13927 set_secondary_compatible_arch (bfd
*abfd
, int arch
)
13929 obj_attribute
*attr
=
13930 &elf_known_obj_attributes_proc (abfd
)[Tag_also_compatible_with
];
13938 /* Note: the tag and its argument below are uleb128 values, though
13939 currently-defined values fit in one byte for each. */
13941 attr
->s
= (char *) bfd_alloc (abfd
, 3);
13942 attr
->s
[0] = Tag_CPU_arch
;
13947 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
13951 tag_cpu_arch_combine (bfd
*ibfd
, int oldtag
, int *secondary_compat_out
,
13952 int newtag
, int secondary_compat
)
13954 #define T(X) TAG_CPU_ARCH_##X
13955 int tagl
, tagh
, result
;
13958 T(V6T2
), /* PRE_V4. */
13960 T(V6T2
), /* V4T. */
13961 T(V6T2
), /* V5T. */
13962 T(V6T2
), /* V5TE. */
13963 T(V6T2
), /* V5TEJ. */
13966 T(V6T2
) /* V6T2. */
13970 T(V6K
), /* PRE_V4. */
13974 T(V6K
), /* V5TE. */
13975 T(V6K
), /* V5TEJ. */
13977 T(V6KZ
), /* V6KZ. */
13983 T(V7
), /* PRE_V4. */
13988 T(V7
), /* V5TEJ. */
14001 T(V6K
), /* V5TE. */
14002 T(V6K
), /* V5TEJ. */
14004 T(V6KZ
), /* V6KZ. */
14008 T(V6_M
) /* V6_M. */
14010 const int v6s_m
[] =
14016 T(V6K
), /* V5TE. */
14017 T(V6K
), /* V5TEJ. */
14019 T(V6KZ
), /* V6KZ. */
14023 T(V6S_M
), /* V6_M. */
14024 T(V6S_M
) /* V6S_M. */
14026 const int v7e_m
[] =
14030 T(V7E_M
), /* V4T. */
14031 T(V7E_M
), /* V5T. */
14032 T(V7E_M
), /* V5TE. */
14033 T(V7E_M
), /* V5TEJ. */
14034 T(V7E_M
), /* V6. */
14035 T(V7E_M
), /* V6KZ. */
14036 T(V7E_M
), /* V6T2. */
14037 T(V7E_M
), /* V6K. */
14038 T(V7E_M
), /* V7. */
14039 T(V7E_M
), /* V6_M. */
14040 T(V7E_M
), /* V6S_M. */
14041 T(V7E_M
) /* V7E_M. */
14045 T(V8
), /* PRE_V4. */
14050 T(V8
), /* V5TEJ. */
14057 T(V8
), /* V6S_M. */
14058 T(V8
), /* V7E_M. */
14063 T(V8R
), /* PRE_V4. */
14067 T(V8R
), /* V5TE. */
14068 T(V8R
), /* V5TEJ. */
14070 T(V8R
), /* V6KZ. */
14071 T(V8R
), /* V6T2. */
14074 T(V8R
), /* V6_M. */
14075 T(V8R
), /* V6S_M. */
14076 T(V8R
), /* V7E_M. */
14080 const int v8m_baseline
[] =
14093 T(V8M_BASE
), /* V6_M. */
14094 T(V8M_BASE
), /* V6S_M. */
14098 T(V8M_BASE
) /* V8-M BASELINE. */
14100 const int v8m_mainline
[] =
14112 T(V8M_MAIN
), /* V7. */
14113 T(V8M_MAIN
), /* V6_M. */
14114 T(V8M_MAIN
), /* V6S_M. */
14115 T(V8M_MAIN
), /* V7E_M. */
14118 T(V8M_MAIN
), /* V8-M BASELINE. */
14119 T(V8M_MAIN
) /* V8-M MAINLINE. */
14121 const int v4t_plus_v6_m
[] =
14127 T(V5TE
), /* V5TE. */
14128 T(V5TEJ
), /* V5TEJ. */
14130 T(V6KZ
), /* V6KZ. */
14131 T(V6T2
), /* V6T2. */
14134 T(V6_M
), /* V6_M. */
14135 T(V6S_M
), /* V6S_M. */
14136 T(V7E_M
), /* V7E_M. */
14139 T(V8M_BASE
), /* V8-M BASELINE. */
14140 T(V8M_MAIN
), /* V8-M MAINLINE. */
14141 T(V4T_PLUS_V6_M
) /* V4T plus V6_M. */
14143 const int *comb
[] =
14155 /* Pseudo-architecture. */
14159 /* Check we've not got a higher architecture than we know about. */
14161 if (oldtag
> MAX_TAG_CPU_ARCH
|| newtag
> MAX_TAG_CPU_ARCH
)
14163 _bfd_error_handler (_("error: %pB: unknown CPU architecture"), ibfd
);
14167 /* Override old tag if we have a Tag_also_compatible_with on the output. */
14169 if ((oldtag
== T(V6_M
) && *secondary_compat_out
== T(V4T
))
14170 || (oldtag
== T(V4T
) && *secondary_compat_out
== T(V6_M
)))
14171 oldtag
= T(V4T_PLUS_V6_M
);
14173 /* And override the new tag if we have a Tag_also_compatible_with on the
14176 if ((newtag
== T(V6_M
) && secondary_compat
== T(V4T
))
14177 || (newtag
== T(V4T
) && secondary_compat
== T(V6_M
)))
14178 newtag
= T(V4T_PLUS_V6_M
);
14180 tagl
= (oldtag
< newtag
) ? oldtag
: newtag
;
14181 result
= tagh
= (oldtag
> newtag
) ? oldtag
: newtag
;
14183 /* Architectures before V6KZ add features monotonically. */
14184 if (tagh
<= TAG_CPU_ARCH_V6KZ
)
14187 result
= comb
[tagh
- T(V6T2
)] ? comb
[tagh
- T(V6T2
)][tagl
] : -1;
14189 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
14190 as the canonical version. */
14191 if (result
== T(V4T_PLUS_V6_M
))
14194 *secondary_compat_out
= T(V6_M
);
14197 *secondary_compat_out
= -1;
14201 _bfd_error_handler (_("error: %pB: conflicting CPU architectures %d/%d"),
14202 ibfd
, oldtag
, newtag
);
14210 /* Query attributes object to see if integer divide instructions may be
14211 present in an object. */
14213 elf32_arm_attributes_accept_div (const obj_attribute
*attr
)
14215 int arch
= attr
[Tag_CPU_arch
].i
;
14216 int profile
= attr
[Tag_CPU_arch_profile
].i
;
14218 switch (attr
[Tag_DIV_use
].i
)
14221 /* Integer divide allowed if instruction contained in archetecture. */
14222 if (arch
== TAG_CPU_ARCH_V7
&& (profile
== 'R' || profile
== 'M'))
14224 else if (arch
>= TAG_CPU_ARCH_V7E_M
)
14230 /* Integer divide explicitly prohibited. */
14234 /* Unrecognised case - treat as allowing divide everywhere. */
14236 /* Integer divide allowed in ARM state. */
14241 /* Query attributes object to see if integer divide instructions are
14242 forbidden to be in the object. This is not the inverse of
14243 elf32_arm_attributes_accept_div. */
14245 elf32_arm_attributes_forbid_div (const obj_attribute
*attr
)
14247 return attr
[Tag_DIV_use
].i
== 1;
14250 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
14251 are conflicting attributes. */
14254 elf32_arm_merge_eabi_attributes (bfd
*ibfd
, struct bfd_link_info
*info
)
14256 bfd
*obfd
= info
->output_bfd
;
14257 obj_attribute
*in_attr
;
14258 obj_attribute
*out_attr
;
14259 /* Some tags have 0 = don't care, 1 = strong requirement,
14260 2 = weak requirement. */
14261 static const int order_021
[3] = {0, 2, 1};
14263 bfd_boolean result
= TRUE
;
14264 const char *sec_name
= get_elf_backend_data (ibfd
)->obj_attrs_section
;
14266 /* Skip the linker stubs file. This preserves previous behavior
14267 of accepting unknown attributes in the first input file - but
14269 if (ibfd
->flags
& BFD_LINKER_CREATED
)
14272 /* Skip any input that hasn't attribute section.
14273 This enables to link object files without attribute section with
14275 if (bfd_get_section_by_name (ibfd
, sec_name
) == NULL
)
14278 if (!elf_known_obj_attributes_proc (obfd
)[0].i
)
14280 /* This is the first object. Copy the attributes. */
14281 _bfd_elf_copy_obj_attributes (ibfd
, obfd
);
14283 out_attr
= elf_known_obj_attributes_proc (obfd
);
14285 /* Use the Tag_null value to indicate the attributes have been
14289 /* We do not output objects with Tag_MPextension_use_legacy - we move
14290 the attribute's value to Tag_MPextension_use. */
14291 if (out_attr
[Tag_MPextension_use_legacy
].i
!= 0)
14293 if (out_attr
[Tag_MPextension_use
].i
!= 0
14294 && out_attr
[Tag_MPextension_use_legacy
].i
14295 != out_attr
[Tag_MPextension_use
].i
)
14298 (_("Error: %pB has both the current and legacy "
14299 "Tag_MPextension_use attributes"), ibfd
);
14303 out_attr
[Tag_MPextension_use
] =
14304 out_attr
[Tag_MPextension_use_legacy
];
14305 out_attr
[Tag_MPextension_use_legacy
].type
= 0;
14306 out_attr
[Tag_MPextension_use_legacy
].i
= 0;
14312 in_attr
= elf_known_obj_attributes_proc (ibfd
);
14313 out_attr
= elf_known_obj_attributes_proc (obfd
);
14314 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
14315 if (in_attr
[Tag_ABI_VFP_args
].i
!= out_attr
[Tag_ABI_VFP_args
].i
)
14317 /* Ignore mismatches if the object doesn't use floating point or is
14318 floating point ABI independent. */
14319 if (out_attr
[Tag_ABI_FP_number_model
].i
== AEABI_FP_number_model_none
14320 || (in_attr
[Tag_ABI_FP_number_model
].i
!= AEABI_FP_number_model_none
14321 && out_attr
[Tag_ABI_VFP_args
].i
== AEABI_VFP_args_compatible
))
14322 out_attr
[Tag_ABI_VFP_args
].i
= in_attr
[Tag_ABI_VFP_args
].i
;
14323 else if (in_attr
[Tag_ABI_FP_number_model
].i
!= AEABI_FP_number_model_none
14324 && in_attr
[Tag_ABI_VFP_args
].i
!= AEABI_VFP_args_compatible
)
14327 (_("error: %pB uses VFP register arguments, %pB does not"),
14328 in_attr
[Tag_ABI_VFP_args
].i
? ibfd
: obfd
,
14329 in_attr
[Tag_ABI_VFP_args
].i
? obfd
: ibfd
);
14334 for (i
= LEAST_KNOWN_OBJ_ATTRIBUTE
; i
< NUM_KNOWN_OBJ_ATTRIBUTES
; i
++)
14336 /* Merge this attribute with existing attributes. */
14339 case Tag_CPU_raw_name
:
14341 /* These are merged after Tag_CPU_arch. */
14344 case Tag_ABI_optimization_goals
:
14345 case Tag_ABI_FP_optimization_goals
:
14346 /* Use the first value seen. */
14351 int secondary_compat
= -1, secondary_compat_out
= -1;
14352 unsigned int saved_out_attr
= out_attr
[i
].i
;
14354 static const char *name_table
[] =
14356 /* These aren't real CPU names, but we can't guess
14357 that from the architecture version alone. */
14373 "ARM v8-M.baseline",
14374 "ARM v8-M.mainline",
14377 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
14378 secondary_compat
= get_secondary_compatible_arch (ibfd
);
14379 secondary_compat_out
= get_secondary_compatible_arch (obfd
);
14380 arch_attr
= tag_cpu_arch_combine (ibfd
, out_attr
[i
].i
,
14381 &secondary_compat_out
,
14385 /* Return with error if failed to merge. */
14386 if (arch_attr
== -1)
14389 out_attr
[i
].i
= arch_attr
;
14391 set_secondary_compatible_arch (obfd
, secondary_compat_out
);
14393 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
14394 if (out_attr
[i
].i
== saved_out_attr
)
14395 ; /* Leave the names alone. */
14396 else if (out_attr
[i
].i
== in_attr
[i
].i
)
14398 /* The output architecture has been changed to match the
14399 input architecture. Use the input names. */
14400 out_attr
[Tag_CPU_name
].s
= in_attr
[Tag_CPU_name
].s
14401 ? _bfd_elf_attr_strdup (obfd
, in_attr
[Tag_CPU_name
].s
)
14403 out_attr
[Tag_CPU_raw_name
].s
= in_attr
[Tag_CPU_raw_name
].s
14404 ? _bfd_elf_attr_strdup (obfd
, in_attr
[Tag_CPU_raw_name
].s
)
14409 out_attr
[Tag_CPU_name
].s
= NULL
;
14410 out_attr
[Tag_CPU_raw_name
].s
= NULL
;
14413 /* If we still don't have a value for Tag_CPU_name,
14414 make one up now. Tag_CPU_raw_name remains blank. */
14415 if (out_attr
[Tag_CPU_name
].s
== NULL
14416 && out_attr
[i
].i
< ARRAY_SIZE (name_table
))
14417 out_attr
[Tag_CPU_name
].s
=
14418 _bfd_elf_attr_strdup (obfd
, name_table
[out_attr
[i
].i
]);
14422 case Tag_ARM_ISA_use
:
14423 case Tag_THUMB_ISA_use
:
14424 case Tag_WMMX_arch
:
14425 case Tag_Advanced_SIMD_arch
:
14426 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
14427 case Tag_ABI_FP_rounding
:
14428 case Tag_ABI_FP_exceptions
:
14429 case Tag_ABI_FP_user_exceptions
:
14430 case Tag_ABI_FP_number_model
:
14431 case Tag_FP_HP_extension
:
14432 case Tag_CPU_unaligned_access
:
14434 case Tag_MPextension_use
:
14435 /* Use the largest value specified. */
14436 if (in_attr
[i
].i
> out_attr
[i
].i
)
14437 out_attr
[i
].i
= in_attr
[i
].i
;
14440 case Tag_ABI_align_preserved
:
14441 case Tag_ABI_PCS_RO_data
:
14442 /* Use the smallest value specified. */
14443 if (in_attr
[i
].i
< out_attr
[i
].i
)
14444 out_attr
[i
].i
= in_attr
[i
].i
;
14447 case Tag_ABI_align_needed
:
14448 if ((in_attr
[i
].i
> 0 || out_attr
[i
].i
> 0)
14449 && (in_attr
[Tag_ABI_align_preserved
].i
== 0
14450 || out_attr
[Tag_ABI_align_preserved
].i
== 0))
14452 /* This error message should be enabled once all non-conformant
14453 binaries in the toolchain have had the attributes set
14456 (_("error: %pB: 8-byte data alignment conflicts with %pB"),
14460 /* Fall through. */
14461 case Tag_ABI_FP_denormal
:
14462 case Tag_ABI_PCS_GOT_use
:
14463 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
14464 value if greater than 2 (for future-proofing). */
14465 if ((in_attr
[i
].i
> 2 && in_attr
[i
].i
> out_attr
[i
].i
)
14466 || (in_attr
[i
].i
<= 2 && out_attr
[i
].i
<= 2
14467 && order_021
[in_attr
[i
].i
] > order_021
[out_attr
[i
].i
]))
14468 out_attr
[i
].i
= in_attr
[i
].i
;
14471 case Tag_Virtualization_use
:
14472 /* The virtualization tag effectively stores two bits of
14473 information: the intended use of TrustZone (in bit 0), and the
14474 intended use of Virtualization (in bit 1). */
14475 if (out_attr
[i
].i
== 0)
14476 out_attr
[i
].i
= in_attr
[i
].i
;
14477 else if (in_attr
[i
].i
!= 0
14478 && in_attr
[i
].i
!= out_attr
[i
].i
)
14480 if (in_attr
[i
].i
<= 3 && out_attr
[i
].i
<= 3)
14485 (_("error: %pB: unable to merge virtualization attributes "
14493 case Tag_CPU_arch_profile
:
14494 if (out_attr
[i
].i
!= in_attr
[i
].i
)
14496 /* 0 will merge with anything.
14497 'A' and 'S' merge to 'A'.
14498 'R' and 'S' merge to 'R'.
14499 'M' and 'A|R|S' is an error. */
14500 if (out_attr
[i
].i
== 0
14501 || (out_attr
[i
].i
== 'S'
14502 && (in_attr
[i
].i
== 'A' || in_attr
[i
].i
== 'R')))
14503 out_attr
[i
].i
= in_attr
[i
].i
;
14504 else if (in_attr
[i
].i
== 0
14505 || (in_attr
[i
].i
== 'S'
14506 && (out_attr
[i
].i
== 'A' || out_attr
[i
].i
== 'R')))
14507 ; /* Do nothing. */
14511 (_("error: %pB: conflicting architecture profiles %c/%c"),
14513 in_attr
[i
].i
? in_attr
[i
].i
: '0',
14514 out_attr
[i
].i
? out_attr
[i
].i
: '0');
14520 case Tag_DSP_extension
:
14521 /* No need to change output value if any of:
14522 - pre (<=) ARMv5T input architecture (do not have DSP)
14523 - M input profile not ARMv7E-M and do not have DSP. */
14524 if (in_attr
[Tag_CPU_arch
].i
<= 3
14525 || (in_attr
[Tag_CPU_arch_profile
].i
== 'M'
14526 && in_attr
[Tag_CPU_arch
].i
!= 13
14527 && in_attr
[i
].i
== 0))
14528 ; /* Do nothing. */
14529 /* Output value should be 0 if DSP part of architecture, ie.
14530 - post (>=) ARMv5te architecture output
14531 - A, R or S profile output or ARMv7E-M output architecture. */
14532 else if (out_attr
[Tag_CPU_arch
].i
>= 4
14533 && (out_attr
[Tag_CPU_arch_profile
].i
== 'A'
14534 || out_attr
[Tag_CPU_arch_profile
].i
== 'R'
14535 || out_attr
[Tag_CPU_arch_profile
].i
== 'S'
14536 || out_attr
[Tag_CPU_arch
].i
== 13))
14538 /* Otherwise, DSP instructions are added and not part of output
14546 /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since
14547 the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch
14548 when it's 0. It might mean absence of FP hardware if
14549 Tag_FP_arch is zero. */
14551 #define VFP_VERSION_COUNT 9
14552 static const struct
14556 } vfp_versions
[VFP_VERSION_COUNT
] =
14572 /* If the output has no requirement about FP hardware,
14573 follow the requirement of the input. */
14574 if (out_attr
[i
].i
== 0)
14576 /* This assert is still reasonable, we shouldn't
14577 produce the suspicious build attribute
14578 combination (See below for in_attr). */
14579 BFD_ASSERT (out_attr
[Tag_ABI_HardFP_use
].i
== 0);
14580 out_attr
[i
].i
= in_attr
[i
].i
;
14581 out_attr
[Tag_ABI_HardFP_use
].i
14582 = in_attr
[Tag_ABI_HardFP_use
].i
;
14585 /* If the input has no requirement about FP hardware, do
14587 else if (in_attr
[i
].i
== 0)
14589 /* We used to assert that Tag_ABI_HardFP_use was
14590 zero here, but we should never assert when
14591 consuming an object file that has suspicious
14592 build attributes. The single precision variant
14593 of 'no FP architecture' is still 'no FP
14594 architecture', so we just ignore the tag in this
14599 /* Both the input and the output have nonzero Tag_FP_arch.
14600 So Tag_ABI_HardFP_use is implied by Tag_FP_arch when it's zero. */
14602 /* If both the input and the output have zero Tag_ABI_HardFP_use,
14604 if (in_attr
[Tag_ABI_HardFP_use
].i
== 0
14605 && out_attr
[Tag_ABI_HardFP_use
].i
== 0)
14607 /* If the input and the output have different Tag_ABI_HardFP_use,
14608 the combination of them is 0 (implied by Tag_FP_arch). */
14609 else if (in_attr
[Tag_ABI_HardFP_use
].i
14610 != out_attr
[Tag_ABI_HardFP_use
].i
)
14611 out_attr
[Tag_ABI_HardFP_use
].i
= 0;
14613 /* Now we can handle Tag_FP_arch. */
14615 /* Values of VFP_VERSION_COUNT or more aren't defined, so just
14616 pick the biggest. */
14617 if (in_attr
[i
].i
>= VFP_VERSION_COUNT
14618 && in_attr
[i
].i
> out_attr
[i
].i
)
14620 out_attr
[i
] = in_attr
[i
];
14623 /* The output uses the superset of input features
14624 (ISA version) and registers. */
14625 ver
= vfp_versions
[in_attr
[i
].i
].ver
;
14626 if (ver
< vfp_versions
[out_attr
[i
].i
].ver
)
14627 ver
= vfp_versions
[out_attr
[i
].i
].ver
;
14628 regs
= vfp_versions
[in_attr
[i
].i
].regs
;
14629 if (regs
< vfp_versions
[out_attr
[i
].i
].regs
)
14630 regs
= vfp_versions
[out_attr
[i
].i
].regs
;
14631 /* This assumes all possible supersets are also a valid
14633 for (newval
= VFP_VERSION_COUNT
- 1; newval
> 0; newval
--)
14635 if (regs
== vfp_versions
[newval
].regs
14636 && ver
== vfp_versions
[newval
].ver
)
14639 out_attr
[i
].i
= newval
;
14642 case Tag_PCS_config
:
14643 if (out_attr
[i
].i
== 0)
14644 out_attr
[i
].i
= in_attr
[i
].i
;
14645 else if (in_attr
[i
].i
!= 0 && out_attr
[i
].i
!= in_attr
[i
].i
)
14647 /* It's sometimes ok to mix different configs, so this is only
14650 (_("warning: %pB: conflicting platform configuration"), ibfd
);
14653 case Tag_ABI_PCS_R9_use
:
14654 if (in_attr
[i
].i
!= out_attr
[i
].i
14655 && out_attr
[i
].i
!= AEABI_R9_unused
14656 && in_attr
[i
].i
!= AEABI_R9_unused
)
14659 (_("error: %pB: conflicting use of R9"), ibfd
);
14662 if (out_attr
[i
].i
== AEABI_R9_unused
)
14663 out_attr
[i
].i
= in_attr
[i
].i
;
14665 case Tag_ABI_PCS_RW_data
:
14666 if (in_attr
[i
].i
== AEABI_PCS_RW_data_SBrel
14667 && out_attr
[Tag_ABI_PCS_R9_use
].i
!= AEABI_R9_SB
14668 && out_attr
[Tag_ABI_PCS_R9_use
].i
!= AEABI_R9_unused
)
14671 (_("error: %pB: SB relative addressing conflicts with use of R9"),
14675 /* Use the smallest value specified. */
14676 if (in_attr
[i
].i
< out_attr
[i
].i
)
14677 out_attr
[i
].i
= in_attr
[i
].i
;
14679 case Tag_ABI_PCS_wchar_t
:
14680 if (out_attr
[i
].i
&& in_attr
[i
].i
&& out_attr
[i
].i
!= in_attr
[i
].i
14681 && !elf_arm_tdata (obfd
)->no_wchar_size_warning
)
14684 (_("warning: %pB uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
14685 ibfd
, in_attr
[i
].i
, out_attr
[i
].i
);
14687 else if (in_attr
[i
].i
&& !out_attr
[i
].i
)
14688 out_attr
[i
].i
= in_attr
[i
].i
;
14690 case Tag_ABI_enum_size
:
14691 if (in_attr
[i
].i
!= AEABI_enum_unused
)
14693 if (out_attr
[i
].i
== AEABI_enum_unused
14694 || out_attr
[i
].i
== AEABI_enum_forced_wide
)
14696 /* The existing object is compatible with anything.
14697 Use whatever requirements the new object has. */
14698 out_attr
[i
].i
= in_attr
[i
].i
;
14700 else if (in_attr
[i
].i
!= AEABI_enum_forced_wide
14701 && out_attr
[i
].i
!= in_attr
[i
].i
14702 && !elf_arm_tdata (obfd
)->no_enum_size_warning
)
14704 static const char *aeabi_enum_names
[] =
14705 { "", "variable-size", "32-bit", "" };
14706 const char *in_name
=
14707 in_attr
[i
].i
< ARRAY_SIZE(aeabi_enum_names
)
14708 ? aeabi_enum_names
[in_attr
[i
].i
]
14710 const char *out_name
=
14711 out_attr
[i
].i
< ARRAY_SIZE(aeabi_enum_names
)
14712 ? aeabi_enum_names
[out_attr
[i
].i
]
14715 (_("warning: %pB uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
14716 ibfd
, in_name
, out_name
);
14720 case Tag_ABI_VFP_args
:
14723 case Tag_ABI_WMMX_args
:
14724 if (in_attr
[i
].i
!= out_attr
[i
].i
)
14727 (_("error: %pB uses iWMMXt register arguments, %pB does not"),
14732 case Tag_compatibility
:
14733 /* Merged in target-independent code. */
14735 case Tag_ABI_HardFP_use
:
14736 /* This is handled along with Tag_FP_arch. */
14738 case Tag_ABI_FP_16bit_format
:
14739 if (in_attr
[i
].i
!= 0 && out_attr
[i
].i
!= 0)
14741 if (in_attr
[i
].i
!= out_attr
[i
].i
)
14744 (_("error: fp16 format mismatch between %pB and %pB"),
14749 if (in_attr
[i
].i
!= 0)
14750 out_attr
[i
].i
= in_attr
[i
].i
;
14754 /* A value of zero on input means that the divide instruction may
14755 be used if available in the base architecture as specified via
14756 Tag_CPU_arch and Tag_CPU_arch_profile. A value of 1 means that
14757 the user did not want divide instructions. A value of 2
14758 explicitly means that divide instructions were allowed in ARM
14759 and Thumb state. */
14760 if (in_attr
[i
].i
== out_attr
[i
].i
)
14761 /* Do nothing. */ ;
14762 else if (elf32_arm_attributes_forbid_div (in_attr
)
14763 && !elf32_arm_attributes_accept_div (out_attr
))
14765 else if (elf32_arm_attributes_forbid_div (out_attr
)
14766 && elf32_arm_attributes_accept_div (in_attr
))
14767 out_attr
[i
].i
= in_attr
[i
].i
;
14768 else if (in_attr
[i
].i
== 2)
14769 out_attr
[i
].i
= in_attr
[i
].i
;
14772 case Tag_MPextension_use_legacy
:
14773 /* We don't output objects with Tag_MPextension_use_legacy - we
14774 move the value to Tag_MPextension_use. */
14775 if (in_attr
[i
].i
!= 0 && in_attr
[Tag_MPextension_use
].i
!= 0)
14777 if (in_attr
[Tag_MPextension_use
].i
!= in_attr
[i
].i
)
14780 (_("%pB has both the current and legacy "
14781 "Tag_MPextension_use attributes"),
14787 if (in_attr
[i
].i
> out_attr
[Tag_MPextension_use
].i
)
14788 out_attr
[Tag_MPextension_use
] = in_attr
[i
];
14792 case Tag_nodefaults
:
14793 /* This tag is set if it exists, but the value is unused (and is
14794 typically zero). We don't actually need to do anything here -
14795 the merge happens automatically when the type flags are merged
14798 case Tag_also_compatible_with
:
14799 /* Already done in Tag_CPU_arch. */
14801 case Tag_conformance
:
14802 /* Keep the attribute if it matches. Throw it away otherwise.
14803 No attribute means no claim to conform. */
14804 if (!in_attr
[i
].s
|| !out_attr
[i
].s
14805 || strcmp (in_attr
[i
].s
, out_attr
[i
].s
) != 0)
14806 out_attr
[i
].s
= NULL
;
14811 = result
&& _bfd_elf_merge_unknown_attribute_low (ibfd
, obfd
, i
);
14814 /* If out_attr was copied from in_attr then it won't have a type yet. */
14815 if (in_attr
[i
].type
&& !out_attr
[i
].type
)
14816 out_attr
[i
].type
= in_attr
[i
].type
;
14819 /* Merge Tag_compatibility attributes and any common GNU ones. */
14820 if (!_bfd_elf_merge_object_attributes (ibfd
, info
))
14823 /* Check for any attributes not known on ARM. */
14824 result
&= _bfd_elf_merge_unknown_attribute_list (ibfd
, obfd
);
14830 /* Return TRUE if the two EABI versions are incompatible. */
14833 elf32_arm_versions_compatible (unsigned iver
, unsigned over
)
14835 /* v4 and v5 are the same spec before and after it was released,
14836 so allow mixing them. */
14837 if ((iver
== EF_ARM_EABI_VER4
&& over
== EF_ARM_EABI_VER5
)
14838 || (iver
== EF_ARM_EABI_VER5
&& over
== EF_ARM_EABI_VER4
))
14841 return (iver
== over
);
14844 /* Merge backend specific data from an object file to the output
14845 object file when linking. */
14848 elf32_arm_merge_private_bfd_data (bfd
*, struct bfd_link_info
*);
14850 /* Display the flags field. */
14853 elf32_arm_print_private_bfd_data (bfd
*abfd
, void * ptr
)
14855 FILE * file
= (FILE *) ptr
;
14856 unsigned long flags
;
14858 BFD_ASSERT (abfd
!= NULL
&& ptr
!= NULL
);
14860 /* Print normal ELF private data. */
14861 _bfd_elf_print_private_bfd_data (abfd
, ptr
);
14863 flags
= elf_elfheader (abfd
)->e_flags
;
14864 /* Ignore init flag - it may not be set, despite the flags field
14865 containing valid data. */
14867 fprintf (file
, _("private flags = %lx:"), elf_elfheader (abfd
)->e_flags
);
14869 switch (EF_ARM_EABI_VERSION (flags
))
14871 case EF_ARM_EABI_UNKNOWN
:
14872 /* The following flag bits are GNU extensions and not part of the
14873 official ARM ELF extended ABI. Hence they are only decoded if
14874 the EABI version is not set. */
14875 if (flags
& EF_ARM_INTERWORK
)
14876 fprintf (file
, _(" [interworking enabled]"));
14878 if (flags
& EF_ARM_APCS_26
)
14879 fprintf (file
, " [APCS-26]");
14881 fprintf (file
, " [APCS-32]");
14883 if (flags
& EF_ARM_VFP_FLOAT
)
14884 fprintf (file
, _(" [VFP float format]"));
14885 else if (flags
& EF_ARM_MAVERICK_FLOAT
)
14886 fprintf (file
, _(" [Maverick float format]"));
14888 fprintf (file
, _(" [FPA float format]"));
14890 if (flags
& EF_ARM_APCS_FLOAT
)
14891 fprintf (file
, _(" [floats passed in float registers]"));
14893 if (flags
& EF_ARM_PIC
)
14894 fprintf (file
, _(" [position independent]"));
14896 if (flags
& EF_ARM_NEW_ABI
)
14897 fprintf (file
, _(" [new ABI]"));
14899 if (flags
& EF_ARM_OLD_ABI
)
14900 fprintf (file
, _(" [old ABI]"));
14902 if (flags
& EF_ARM_SOFT_FLOAT
)
14903 fprintf (file
, _(" [software FP]"));
14905 flags
&= ~(EF_ARM_INTERWORK
| EF_ARM_APCS_26
| EF_ARM_APCS_FLOAT
14906 | EF_ARM_PIC
| EF_ARM_NEW_ABI
| EF_ARM_OLD_ABI
14907 | EF_ARM_SOFT_FLOAT
| EF_ARM_VFP_FLOAT
14908 | EF_ARM_MAVERICK_FLOAT
);
14911 case EF_ARM_EABI_VER1
:
14912 fprintf (file
, _(" [Version1 EABI]"));
14914 if (flags
& EF_ARM_SYMSARESORTED
)
14915 fprintf (file
, _(" [sorted symbol table]"));
14917 fprintf (file
, _(" [unsorted symbol table]"));
14919 flags
&= ~ EF_ARM_SYMSARESORTED
;
14922 case EF_ARM_EABI_VER2
:
14923 fprintf (file
, _(" [Version2 EABI]"));
14925 if (flags
& EF_ARM_SYMSARESORTED
)
14926 fprintf (file
, _(" [sorted symbol table]"));
14928 fprintf (file
, _(" [unsorted symbol table]"));
14930 if (flags
& EF_ARM_DYNSYMSUSESEGIDX
)
14931 fprintf (file
, _(" [dynamic symbols use segment index]"));
14933 if (flags
& EF_ARM_MAPSYMSFIRST
)
14934 fprintf (file
, _(" [mapping symbols precede others]"));
14936 flags
&= ~(EF_ARM_SYMSARESORTED
| EF_ARM_DYNSYMSUSESEGIDX
14937 | EF_ARM_MAPSYMSFIRST
);
14940 case EF_ARM_EABI_VER3
:
14941 fprintf (file
, _(" [Version3 EABI]"));
14944 case EF_ARM_EABI_VER4
:
14945 fprintf (file
, _(" [Version4 EABI]"));
14948 case EF_ARM_EABI_VER5
:
14949 fprintf (file
, _(" [Version5 EABI]"));
14951 if (flags
& EF_ARM_ABI_FLOAT_SOFT
)
14952 fprintf (file
, _(" [soft-float ABI]"));
14954 if (flags
& EF_ARM_ABI_FLOAT_HARD
)
14955 fprintf (file
, _(" [hard-float ABI]"));
14957 flags
&= ~(EF_ARM_ABI_FLOAT_SOFT
| EF_ARM_ABI_FLOAT_HARD
);
14960 if (flags
& EF_ARM_BE8
)
14961 fprintf (file
, _(" [BE8]"));
14963 if (flags
& EF_ARM_LE8
)
14964 fprintf (file
, _(" [LE8]"));
14966 flags
&= ~(EF_ARM_LE8
| EF_ARM_BE8
);
14970 fprintf (file
, _(" <EABI version unrecognised>"));
14974 flags
&= ~ EF_ARM_EABIMASK
;
14976 if (flags
& EF_ARM_RELEXEC
)
14977 fprintf (file
, _(" [relocatable executable]"));
14979 if (flags
& EF_ARM_PIC
)
14980 fprintf (file
, _(" [position independent]"));
14982 if (elf_elfheader (abfd
)->e_ident
[EI_OSABI
] == ELFOSABI_ARM_FDPIC
)
14983 fprintf (file
, _(" [FDPIC ABI supplement]"));
14985 flags
&= ~ (EF_ARM_RELEXEC
| EF_ARM_PIC
);
14988 fprintf (file
, _("<Unrecognised flag bits set>"));
14990 fputc ('\n', file
);
14996 elf32_arm_get_symbol_type (Elf_Internal_Sym
* elf_sym
, int type
)
14998 switch (ELF_ST_TYPE (elf_sym
->st_info
))
15000 case STT_ARM_TFUNC
:
15001 return ELF_ST_TYPE (elf_sym
->st_info
);
15003 case STT_ARM_16BIT
:
15004 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
15005 This allows us to distinguish between data used by Thumb instructions
15006 and non-data (which is probably code) inside Thumb regions of an
15008 if (type
!= STT_OBJECT
&& type
!= STT_TLS
)
15009 return ELF_ST_TYPE (elf_sym
->st_info
);
15020 elf32_arm_gc_mark_hook (asection
*sec
,
15021 struct bfd_link_info
*info
,
15022 Elf_Internal_Rela
*rel
,
15023 struct elf_link_hash_entry
*h
,
15024 Elf_Internal_Sym
*sym
)
15027 switch (ELF32_R_TYPE (rel
->r_info
))
15029 case R_ARM_GNU_VTINHERIT
:
15030 case R_ARM_GNU_VTENTRY
:
15034 return _bfd_elf_gc_mark_hook (sec
, info
, rel
, h
, sym
);
15037 /* Look through the relocs for a section during the first phase. */
15040 elf32_arm_check_relocs (bfd
*abfd
, struct bfd_link_info
*info
,
15041 asection
*sec
, const Elf_Internal_Rela
*relocs
)
15043 Elf_Internal_Shdr
*symtab_hdr
;
15044 struct elf_link_hash_entry
**sym_hashes
;
15045 const Elf_Internal_Rela
*rel
;
15046 const Elf_Internal_Rela
*rel_end
;
15049 struct elf32_arm_link_hash_table
*htab
;
15050 bfd_boolean call_reloc_p
;
15051 bfd_boolean may_become_dynamic_p
;
15052 bfd_boolean may_need_local_target_p
;
15053 unsigned long nsyms
;
15055 if (bfd_link_relocatable (info
))
15058 BFD_ASSERT (is_arm_elf (abfd
));
15060 htab
= elf32_arm_hash_table (info
);
15066 /* Create dynamic sections for relocatable executables so that we can
15067 copy relocations. */
15068 if (htab
->root
.is_relocatable_executable
15069 && ! htab
->root
.dynamic_sections_created
)
15071 if (! _bfd_elf_link_create_dynamic_sections (abfd
, info
))
15075 if (htab
->root
.dynobj
== NULL
)
15076 htab
->root
.dynobj
= abfd
;
15077 if (!create_ifunc_sections (info
))
15080 dynobj
= htab
->root
.dynobj
;
15082 symtab_hdr
= & elf_symtab_hdr (abfd
);
15083 sym_hashes
= elf_sym_hashes (abfd
);
15084 nsyms
= NUM_SHDR_ENTRIES (symtab_hdr
);
15086 rel_end
= relocs
+ sec
->reloc_count
;
15087 for (rel
= relocs
; rel
< rel_end
; rel
++)
15089 Elf_Internal_Sym
*isym
;
15090 struct elf_link_hash_entry
*h
;
15091 struct elf32_arm_link_hash_entry
*eh
;
15092 unsigned int r_symndx
;
15095 r_symndx
= ELF32_R_SYM (rel
->r_info
);
15096 r_type
= ELF32_R_TYPE (rel
->r_info
);
15097 r_type
= arm_real_reloc_type (htab
, r_type
);
15099 if (r_symndx
>= nsyms
15100 /* PR 9934: It is possible to have relocations that do not
15101 refer to symbols, thus it is also possible to have an
15102 object file containing relocations but no symbol table. */
15103 && (r_symndx
> STN_UNDEF
|| nsyms
> 0))
15105 _bfd_error_handler (_("%pB: bad symbol index: %d"), abfd
,
15114 if (r_symndx
< symtab_hdr
->sh_info
)
15116 /* A local symbol. */
15117 isym
= bfd_sym_from_r_symndx (&htab
->sym_cache
,
15124 h
= sym_hashes
[r_symndx
- symtab_hdr
->sh_info
];
15125 while (h
->root
.type
== bfd_link_hash_indirect
15126 || h
->root
.type
== bfd_link_hash_warning
)
15127 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
15131 eh
= (struct elf32_arm_link_hash_entry
*) h
;
15133 call_reloc_p
= FALSE
;
15134 may_become_dynamic_p
= FALSE
;
15135 may_need_local_target_p
= FALSE
;
15137 /* Could be done earlier, if h were already available. */
15138 r_type
= elf32_arm_tls_transition (info
, r_type
, h
);
15141 case R_ARM_GOTOFFFUNCDESC
:
15145 if (!elf32_arm_allocate_local_sym_info (abfd
))
15147 elf32_arm_local_fdpic_cnts(abfd
)[r_symndx
].gotofffuncdesc_cnt
+= 1;
15148 elf32_arm_local_fdpic_cnts(abfd
)[r_symndx
].funcdesc_offset
= -1;
15152 eh
->fdpic_cnts
.gotofffuncdesc_cnt
++;
15157 case R_ARM_GOTFUNCDESC
:
15161 /* Such a relocation is not supposed to be generated
15162 by gcc on a static function. */
15163 /* Anyway if needed it could be handled. */
15168 eh
->fdpic_cnts
.gotfuncdesc_cnt
++;
15173 case R_ARM_FUNCDESC
:
15177 if (!elf32_arm_allocate_local_sym_info (abfd
))
15179 elf32_arm_local_fdpic_cnts(abfd
)[r_symndx
].funcdesc_cnt
+= 1;
15180 elf32_arm_local_fdpic_cnts(abfd
)[r_symndx
].funcdesc_offset
= -1;
15184 eh
->fdpic_cnts
.funcdesc_cnt
++;
15190 case R_ARM_GOT_PREL
:
15191 case R_ARM_TLS_GD32
:
15192 case R_ARM_TLS_GD32_FDPIC
:
15193 case R_ARM_TLS_IE32
:
15194 case R_ARM_TLS_IE32_FDPIC
:
15195 case R_ARM_TLS_GOTDESC
:
15196 case R_ARM_TLS_DESCSEQ
:
15197 case R_ARM_THM_TLS_DESCSEQ
:
15198 case R_ARM_TLS_CALL
:
15199 case R_ARM_THM_TLS_CALL
:
15200 /* This symbol requires a global offset table entry. */
15202 int tls_type
, old_tls_type
;
15206 case R_ARM_TLS_GD32
: tls_type
= GOT_TLS_GD
; break;
15207 case R_ARM_TLS_GD32_FDPIC
: tls_type
= GOT_TLS_GD
; break;
15209 case R_ARM_TLS_IE32
: tls_type
= GOT_TLS_IE
; break;
15210 case R_ARM_TLS_IE32_FDPIC
: tls_type
= GOT_TLS_IE
; break;
15212 case R_ARM_TLS_GOTDESC
:
15213 case R_ARM_TLS_CALL
: case R_ARM_THM_TLS_CALL
:
15214 case R_ARM_TLS_DESCSEQ
: case R_ARM_THM_TLS_DESCSEQ
:
15215 tls_type
= GOT_TLS_GDESC
; break;
15217 default: tls_type
= GOT_NORMAL
; break;
15220 if (!bfd_link_executable (info
) && (tls_type
& GOT_TLS_IE
))
15221 info
->flags
|= DF_STATIC_TLS
;
15226 old_tls_type
= elf32_arm_hash_entry (h
)->tls_type
;
15230 /* This is a global offset table entry for a local symbol. */
15231 if (!elf32_arm_allocate_local_sym_info (abfd
))
15233 elf_local_got_refcounts (abfd
)[r_symndx
] += 1;
15234 old_tls_type
= elf32_arm_local_got_tls_type (abfd
) [r_symndx
];
15237 /* If a variable is accessed with both tls methods, two
15238 slots may be created. */
15239 if (GOT_TLS_GD_ANY_P (old_tls_type
)
15240 && GOT_TLS_GD_ANY_P (tls_type
))
15241 tls_type
|= old_tls_type
;
15243 /* We will already have issued an error message if there
15244 is a TLS/non-TLS mismatch, based on the symbol
15245 type. So just combine any TLS types needed. */
15246 if (old_tls_type
!= GOT_UNKNOWN
&& old_tls_type
!= GOT_NORMAL
15247 && tls_type
!= GOT_NORMAL
)
15248 tls_type
|= old_tls_type
;
15250 /* If the symbol is accessed in both IE and GDESC
15251 method, we're able to relax. Turn off the GDESC flag,
15252 without messing up with any other kind of tls types
15253 that may be involved. */
15254 if ((tls_type
& GOT_TLS_IE
) && (tls_type
& GOT_TLS_GDESC
))
15255 tls_type
&= ~GOT_TLS_GDESC
;
15257 if (old_tls_type
!= tls_type
)
15260 elf32_arm_hash_entry (h
)->tls_type
= tls_type
;
15262 elf32_arm_local_got_tls_type (abfd
) [r_symndx
] = tls_type
;
15265 /* Fall through. */
15267 case R_ARM_TLS_LDM32
:
15268 case R_ARM_TLS_LDM32_FDPIC
:
15269 if (r_type
== R_ARM_TLS_LDM32
|| r_type
== R_ARM_TLS_LDM32_FDPIC
)
15270 htab
->tls_ldm_got
.refcount
++;
15271 /* Fall through. */
15273 case R_ARM_GOTOFF32
:
15275 if (htab
->root
.sgot
== NULL
15276 && !create_got_section (htab
->root
.dynobj
, info
))
15285 case R_ARM_THM_CALL
:
15286 case R_ARM_THM_JUMP24
:
15287 case R_ARM_THM_JUMP19
:
15288 call_reloc_p
= TRUE
;
15289 may_need_local_target_p
= TRUE
;
15293 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
15294 ldr __GOTT_INDEX__ offsets. */
15295 if (!htab
->vxworks_p
)
15297 may_need_local_target_p
= TRUE
;
15300 else goto jump_over
;
15302 /* Fall through. */
15304 case R_ARM_MOVW_ABS_NC
:
15305 case R_ARM_MOVT_ABS
:
15306 case R_ARM_THM_MOVW_ABS_NC
:
15307 case R_ARM_THM_MOVT_ABS
:
15308 if (bfd_link_pic (info
))
15311 (_("%pB: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
15312 abfd
, elf32_arm_howto_table_1
[r_type
].name
,
15313 (h
) ? h
->root
.root
.string
: "a local symbol");
15314 bfd_set_error (bfd_error_bad_value
);
15318 /* Fall through. */
15320 case R_ARM_ABS32_NOI
:
15322 if (h
!= NULL
&& bfd_link_executable (info
))
15324 h
->pointer_equality_needed
= 1;
15326 /* Fall through. */
15328 case R_ARM_REL32_NOI
:
15329 case R_ARM_MOVW_PREL_NC
:
15330 case R_ARM_MOVT_PREL
:
15331 case R_ARM_THM_MOVW_PREL_NC
:
15332 case R_ARM_THM_MOVT_PREL
:
15334 /* Should the interworking branches be listed here? */
15335 if ((bfd_link_pic (info
) || htab
->root
.is_relocatable_executable
15337 && (sec
->flags
& SEC_ALLOC
) != 0)
15340 && elf32_arm_howto_from_type (r_type
)->pc_relative
)
15342 /* In shared libraries and relocatable executables,
15343 we treat local relative references as calls;
15344 see the related SYMBOL_CALLS_LOCAL code in
15345 allocate_dynrelocs. */
15346 call_reloc_p
= TRUE
;
15347 may_need_local_target_p
= TRUE
;
15350 /* We are creating a shared library or relocatable
15351 executable, and this is a reloc against a global symbol,
15352 or a non-PC-relative reloc against a local symbol.
15353 We may need to copy the reloc into the output. */
15354 may_become_dynamic_p
= TRUE
;
15357 may_need_local_target_p
= TRUE
;
15360 /* This relocation describes the C++ object vtable hierarchy.
15361 Reconstruct it for later use during GC. */
15362 case R_ARM_GNU_VTINHERIT
:
15363 if (!bfd_elf_gc_record_vtinherit (abfd
, sec
, h
, rel
->r_offset
))
15367 /* This relocation describes which C++ vtable entries are actually
15368 used. Record for later use during GC. */
15369 case R_ARM_GNU_VTENTRY
:
15370 BFD_ASSERT (h
!= NULL
);
15372 && !bfd_elf_gc_record_vtentry (abfd
, sec
, h
, rel
->r_offset
))
15380 /* We may need a .plt entry if the function this reloc
15381 refers to is in a different object, regardless of the
15382 symbol's type. We can't tell for sure yet, because
15383 something later might force the symbol local. */
15385 else if (may_need_local_target_p
)
15386 /* If this reloc is in a read-only section, we might
15387 need a copy reloc. We can't check reliably at this
15388 stage whether the section is read-only, as input
15389 sections have not yet been mapped to output sections.
15390 Tentatively set the flag for now, and correct in
15391 adjust_dynamic_symbol. */
15392 h
->non_got_ref
= 1;
15395 if (may_need_local_target_p
15396 && (h
!= NULL
|| ELF32_ST_TYPE (isym
->st_info
) == STT_GNU_IFUNC
))
15398 union gotplt_union
*root_plt
;
15399 struct arm_plt_info
*arm_plt
;
15400 struct arm_local_iplt_info
*local_iplt
;
15404 root_plt
= &h
->plt
;
15405 arm_plt
= &eh
->plt
;
15409 local_iplt
= elf32_arm_create_local_iplt (abfd
, r_symndx
);
15410 if (local_iplt
== NULL
)
15412 root_plt
= &local_iplt
->root
;
15413 arm_plt
= &local_iplt
->arm
;
15416 /* If the symbol is a function that doesn't bind locally,
15417 this relocation will need a PLT entry. */
15418 if (root_plt
->refcount
!= -1)
15419 root_plt
->refcount
+= 1;
15422 arm_plt
->noncall_refcount
++;
15424 /* It's too early to use htab->use_blx here, so we have to
15425 record possible blx references separately from
15426 relocs that definitely need a thumb stub. */
15428 if (r_type
== R_ARM_THM_CALL
)
15429 arm_plt
->maybe_thumb_refcount
+= 1;
15431 if (r_type
== R_ARM_THM_JUMP24
15432 || r_type
== R_ARM_THM_JUMP19
)
15433 arm_plt
->thumb_refcount
+= 1;
15436 if (may_become_dynamic_p
)
15438 struct elf_dyn_relocs
*p
, **head
;
15440 /* Create a reloc section in dynobj. */
15441 if (sreloc
== NULL
)
15443 sreloc
= _bfd_elf_make_dynamic_reloc_section
15444 (sec
, dynobj
, 2, abfd
, ! htab
->use_rel
);
15446 if (sreloc
== NULL
)
15449 /* BPABI objects never have dynamic relocations mapped. */
15450 if (htab
->symbian_p
)
15454 flags
= bfd_get_section_flags (dynobj
, sreloc
);
15455 flags
&= ~(SEC_LOAD
| SEC_ALLOC
);
15456 bfd_set_section_flags (dynobj
, sreloc
, flags
);
15460 /* If this is a global symbol, count the number of
15461 relocations we need for this symbol. */
15463 head
= &((struct elf32_arm_link_hash_entry
*) h
)->dyn_relocs
;
15466 head
= elf32_arm_get_local_dynreloc_list (abfd
, r_symndx
, isym
);
15472 if (p
== NULL
|| p
->sec
!= sec
)
15474 bfd_size_type amt
= sizeof *p
;
15476 p
= (struct elf_dyn_relocs
*) bfd_alloc (htab
->root
.dynobj
, amt
);
15486 if (elf32_arm_howto_from_type (r_type
)->pc_relative
)
15489 if (h
== NULL
&& htab
->fdpic_p
&& !bfd_link_pic(info
)
15490 && r_type
!= R_ARM_ABS32
&& r_type
!= R_ARM_ABS32_NOI
) {
15491 /* Here we only support R_ARM_ABS32 and R_ARM_ABS32_NOI
15492 that will become rofixup. */
15493 /* This is due to the fact that we suppose all will become rofixup. */
15494 fprintf(stderr
, "FDPIC does not yet support %d relocation to become dynamic for executable\n", r_type
);
15496 (_("FDPIC does not yet support %s relocation"
15497 " to become dynamic for executable"),
15498 elf32_arm_howto_table_1
[r_type
].name
);
15508 elf32_arm_update_relocs (asection
*o
,
15509 struct bfd_elf_section_reloc_data
*reldata
)
15511 void (*swap_in
) (bfd
*, const bfd_byte
*, Elf_Internal_Rela
*);
15512 void (*swap_out
) (bfd
*, const Elf_Internal_Rela
*, bfd_byte
*);
15513 const struct elf_backend_data
*bed
;
15514 _arm_elf_section_data
*eado
;
15515 struct bfd_link_order
*p
;
15516 bfd_byte
*erela_head
, *erela
;
15517 Elf_Internal_Rela
*irela_head
, *irela
;
15518 Elf_Internal_Shdr
*rel_hdr
;
15520 unsigned int count
;
15522 eado
= get_arm_elf_section_data (o
);
15524 if (!eado
|| eado
->elf
.this_hdr
.sh_type
!= SHT_ARM_EXIDX
)
15528 bed
= get_elf_backend_data (abfd
);
15529 rel_hdr
= reldata
->hdr
;
15531 if (rel_hdr
->sh_entsize
== bed
->s
->sizeof_rel
)
15533 swap_in
= bed
->s
->swap_reloc_in
;
15534 swap_out
= bed
->s
->swap_reloc_out
;
15536 else if (rel_hdr
->sh_entsize
== bed
->s
->sizeof_rela
)
15538 swap_in
= bed
->s
->swap_reloca_in
;
15539 swap_out
= bed
->s
->swap_reloca_out
;
15544 erela_head
= rel_hdr
->contents
;
15545 irela_head
= (Elf_Internal_Rela
*) bfd_zmalloc
15546 ((NUM_SHDR_ENTRIES (rel_hdr
) + 1) * sizeof (*irela_head
));
15548 erela
= erela_head
;
15549 irela
= irela_head
;
15552 for (p
= o
->map_head
.link_order
; p
; p
= p
->next
)
15554 if (p
->type
== bfd_section_reloc_link_order
15555 || p
->type
== bfd_symbol_reloc_link_order
)
15557 (*swap_in
) (abfd
, erela
, irela
);
15558 erela
+= rel_hdr
->sh_entsize
;
15562 else if (p
->type
== bfd_indirect_link_order
)
15564 struct bfd_elf_section_reloc_data
*input_reldata
;
15565 arm_unwind_table_edit
*edit_list
, *edit_tail
;
15566 _arm_elf_section_data
*eadi
;
15571 i
= p
->u
.indirect
.section
;
15573 eadi
= get_arm_elf_section_data (i
);
15574 edit_list
= eadi
->u
.exidx
.unwind_edit_list
;
15575 edit_tail
= eadi
->u
.exidx
.unwind_edit_tail
;
15576 offset
= o
->vma
+ i
->output_offset
;
15578 if (eadi
->elf
.rel
.hdr
&&
15579 eadi
->elf
.rel
.hdr
->sh_entsize
== rel_hdr
->sh_entsize
)
15580 input_reldata
= &eadi
->elf
.rel
;
15581 else if (eadi
->elf
.rela
.hdr
&&
15582 eadi
->elf
.rela
.hdr
->sh_entsize
== rel_hdr
->sh_entsize
)
15583 input_reldata
= &eadi
->elf
.rela
;
15589 for (j
= 0; j
< NUM_SHDR_ENTRIES (input_reldata
->hdr
); j
++)
15591 arm_unwind_table_edit
*edit_node
, *edit_next
;
15593 bfd_vma reloc_index
;
15595 (*swap_in
) (abfd
, erela
, irela
);
15596 reloc_index
= (irela
->r_offset
- offset
) / 8;
15599 edit_node
= edit_list
;
15600 for (edit_next
= edit_list
;
15601 edit_next
&& edit_next
->index
<= reloc_index
;
15602 edit_next
= edit_node
->next
)
15605 edit_node
= edit_next
;
15608 if (edit_node
->type
!= DELETE_EXIDX_ENTRY
15609 || edit_node
->index
!= reloc_index
)
15611 irela
->r_offset
-= bias
* 8;
15616 erela
+= rel_hdr
->sh_entsize
;
15619 if (edit_tail
->type
== INSERT_EXIDX_CANTUNWIND_AT_END
)
15621 /* New relocation entity. */
15622 asection
*text_sec
= edit_tail
->linked_section
;
15623 asection
*text_out
= text_sec
->output_section
;
15624 bfd_vma exidx_offset
= offset
+ i
->size
- 8;
15626 irela
->r_addend
= 0;
15627 irela
->r_offset
= exidx_offset
;
15628 irela
->r_info
= ELF32_R_INFO
15629 (text_out
->target_index
, R_ARM_PREL31
);
15636 for (j
= 0; j
< NUM_SHDR_ENTRIES (input_reldata
->hdr
); j
++)
15638 (*swap_in
) (abfd
, erela
, irela
);
15639 erela
+= rel_hdr
->sh_entsize
;
15643 count
+= NUM_SHDR_ENTRIES (input_reldata
->hdr
);
15648 reldata
->count
= count
;
15649 rel_hdr
->sh_size
= count
* rel_hdr
->sh_entsize
;
15651 erela
= erela_head
;
15652 irela
= irela_head
;
15655 (*swap_out
) (abfd
, irela
, erela
);
15656 erela
+= rel_hdr
->sh_entsize
;
15663 /* Hashes are no longer valid. */
15664 free (reldata
->hashes
);
15665 reldata
->hashes
= NULL
;
15668 /* Unwinding tables are not referenced directly. This pass marks them as
15669 required if the corresponding code section is marked. Similarly, ARMv8-M
15670 secure entry functions can only be referenced by SG veneers which are
15671 created after the GC process. They need to be marked in case they reside in
15672 their own section (as would be the case if code was compiled with
15673 -ffunction-sections). */
15676 elf32_arm_gc_mark_extra_sections (struct bfd_link_info
*info
,
15677 elf_gc_mark_hook_fn gc_mark_hook
)
15680 Elf_Internal_Shdr
**elf_shdrp
;
15681 asection
*cmse_sec
;
15682 obj_attribute
*out_attr
;
15683 Elf_Internal_Shdr
*symtab_hdr
;
15684 unsigned i
, sym_count
, ext_start
;
15685 const struct elf_backend_data
*bed
;
15686 struct elf_link_hash_entry
**sym_hashes
;
15687 struct elf32_arm_link_hash_entry
*cmse_hash
;
15688 bfd_boolean again
, is_v8m
, first_bfd_browse
= TRUE
;
15690 _bfd_elf_gc_mark_extra_sections (info
, gc_mark_hook
);
15692 out_attr
= elf_known_obj_attributes_proc (info
->output_bfd
);
15693 is_v8m
= out_attr
[Tag_CPU_arch
].i
>= TAG_CPU_ARCH_V8M_BASE
15694 && out_attr
[Tag_CPU_arch_profile
].i
== 'M';
15696 /* Marking EH data may cause additional code sections to be marked,
15697 requiring multiple passes. */
15702 for (sub
= info
->input_bfds
; sub
!= NULL
; sub
= sub
->link
.next
)
15706 if (! is_arm_elf (sub
))
15709 elf_shdrp
= elf_elfsections (sub
);
15710 for (o
= sub
->sections
; o
!= NULL
; o
= o
->next
)
15712 Elf_Internal_Shdr
*hdr
;
15714 hdr
= &elf_section_data (o
)->this_hdr
;
15715 if (hdr
->sh_type
== SHT_ARM_EXIDX
15717 && hdr
->sh_link
< elf_numsections (sub
)
15719 && elf_shdrp
[hdr
->sh_link
]->bfd_section
->gc_mark
)
15722 if (!_bfd_elf_gc_mark (info
, o
, gc_mark_hook
))
15727 /* Mark section holding ARMv8-M secure entry functions. We mark all
15728 of them so no need for a second browsing. */
15729 if (is_v8m
&& first_bfd_browse
)
15731 sym_hashes
= elf_sym_hashes (sub
);
15732 bed
= get_elf_backend_data (sub
);
15733 symtab_hdr
= &elf_tdata (sub
)->symtab_hdr
;
15734 sym_count
= symtab_hdr
->sh_size
/ bed
->s
->sizeof_sym
;
15735 ext_start
= symtab_hdr
->sh_info
;
15737 /* Scan symbols. */
15738 for (i
= ext_start
; i
< sym_count
; i
++)
15740 cmse_hash
= elf32_arm_hash_entry (sym_hashes
[i
- ext_start
]);
15742 /* Assume it is a special symbol. If not, cmse_scan will
15743 warn about it and user can do something about it. */
15744 if (ARM_GET_SYM_CMSE_SPCL (cmse_hash
->root
.target_internal
))
15746 cmse_sec
= cmse_hash
->root
.root
.u
.def
.section
;
15747 if (!cmse_sec
->gc_mark
15748 && !_bfd_elf_gc_mark (info
, cmse_sec
, gc_mark_hook
))
15754 first_bfd_browse
= FALSE
;
15760 /* Treat mapping symbols as special target symbols. */
15763 elf32_arm_is_target_special_symbol (bfd
* abfd ATTRIBUTE_UNUSED
, asymbol
* sym
)
15765 return bfd_is_arm_special_symbol_name (sym
->name
,
15766 BFD_ARM_SPECIAL_SYM_TYPE_ANY
);
15769 /* This is a copy of elf_find_function() from elf.c except that
15770 ARM mapping symbols are ignored when looking for function names
15771 and STT_ARM_TFUNC is considered to a function type. */
15774 arm_elf_find_function (bfd
* abfd ATTRIBUTE_UNUSED
,
15775 asymbol
** symbols
,
15776 asection
* section
,
15778 const char ** filename_ptr
,
15779 const char ** functionname_ptr
)
15781 const char * filename
= NULL
;
15782 asymbol
* func
= NULL
;
15783 bfd_vma low_func
= 0;
15786 for (p
= symbols
; *p
!= NULL
; p
++)
15788 elf_symbol_type
*q
;
15790 q
= (elf_symbol_type
*) *p
;
15792 switch (ELF_ST_TYPE (q
->internal_elf_sym
.st_info
))
15797 filename
= bfd_asymbol_name (&q
->symbol
);
15800 case STT_ARM_TFUNC
:
15802 /* Skip mapping symbols. */
15803 if ((q
->symbol
.flags
& BSF_LOCAL
)
15804 && bfd_is_arm_special_symbol_name (q
->symbol
.name
,
15805 BFD_ARM_SPECIAL_SYM_TYPE_ANY
))
15807 /* Fall through. */
15808 if (bfd_get_section (&q
->symbol
) == section
15809 && q
->symbol
.value
>= low_func
15810 && q
->symbol
.value
<= offset
)
15812 func
= (asymbol
*) q
;
15813 low_func
= q
->symbol
.value
;
15823 *filename_ptr
= filename
;
15824 if (functionname_ptr
)
15825 *functionname_ptr
= bfd_asymbol_name (func
);
15831 /* Find the nearest line to a particular section and offset, for error
15832 reporting. This code is a duplicate of the code in elf.c, except
15833 that it uses arm_elf_find_function. */
15836 elf32_arm_find_nearest_line (bfd
* abfd
,
15837 asymbol
** symbols
,
15838 asection
* section
,
15840 const char ** filename_ptr
,
15841 const char ** functionname_ptr
,
15842 unsigned int * line_ptr
,
15843 unsigned int * discriminator_ptr
)
15845 bfd_boolean found
= FALSE
;
15847 if (_bfd_dwarf2_find_nearest_line (abfd
, symbols
, NULL
, section
, offset
,
15848 filename_ptr
, functionname_ptr
,
15849 line_ptr
, discriminator_ptr
,
15850 dwarf_debug_sections
, 0,
15851 & elf_tdata (abfd
)->dwarf2_find_line_info
))
15853 if (!*functionname_ptr
)
15854 arm_elf_find_function (abfd
, symbols
, section
, offset
,
15855 *filename_ptr
? NULL
: filename_ptr
,
15861 /* Skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain
15864 if (! _bfd_stab_section_find_nearest_line (abfd
, symbols
, section
, offset
,
15865 & found
, filename_ptr
,
15866 functionname_ptr
, line_ptr
,
15867 & elf_tdata (abfd
)->line_info
))
15870 if (found
&& (*functionname_ptr
|| *line_ptr
))
15873 if (symbols
== NULL
)
15876 if (! arm_elf_find_function (abfd
, symbols
, section
, offset
,
15877 filename_ptr
, functionname_ptr
))
15885 elf32_arm_find_inliner_info (bfd
* abfd
,
15886 const char ** filename_ptr
,
15887 const char ** functionname_ptr
,
15888 unsigned int * line_ptr
)
15891 found
= _bfd_dwarf2_find_inliner_info (abfd
, filename_ptr
,
15892 functionname_ptr
, line_ptr
,
15893 & elf_tdata (abfd
)->dwarf2_find_line_info
);
15897 /* Find dynamic relocs for H that apply to read-only sections. */
15900 readonly_dynrelocs (struct elf_link_hash_entry
*h
)
15902 struct elf_dyn_relocs
*p
;
15904 for (p
= elf32_arm_hash_entry (h
)->dyn_relocs
; p
!= NULL
; p
= p
->next
)
15906 asection
*s
= p
->sec
->output_section
;
15908 if (s
!= NULL
&& (s
->flags
& SEC_READONLY
) != 0)
15914 /* Adjust a symbol defined by a dynamic object and referenced by a
15915 regular object. The current definition is in some section of the
15916 dynamic object, but we're not including those sections. We have to
15917 change the definition to something the rest of the link can
15921 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info
* info
,
15922 struct elf_link_hash_entry
* h
)
15925 asection
*s
, *srel
;
15926 struct elf32_arm_link_hash_entry
* eh
;
15927 struct elf32_arm_link_hash_table
*globals
;
15929 globals
= elf32_arm_hash_table (info
);
15930 if (globals
== NULL
)
15933 dynobj
= elf_hash_table (info
)->dynobj
;
15935 /* Make sure we know what is going on here. */
15936 BFD_ASSERT (dynobj
!= NULL
15938 || h
->type
== STT_GNU_IFUNC
15942 && !h
->def_regular
)));
15944 eh
= (struct elf32_arm_link_hash_entry
*) h
;
15946 /* If this is a function, put it in the procedure linkage table. We
15947 will fill in the contents of the procedure linkage table later,
15948 when we know the address of the .got section. */
15949 if (h
->type
== STT_FUNC
|| h
->type
== STT_GNU_IFUNC
|| h
->needs_plt
)
15951 /* Calls to STT_GNU_IFUNC symbols always use a PLT, even if the
15952 symbol binds locally. */
15953 if (h
->plt
.refcount
<= 0
15954 || (h
->type
!= STT_GNU_IFUNC
15955 && (SYMBOL_CALLS_LOCAL (info
, h
)
15956 || (ELF_ST_VISIBILITY (h
->other
) != STV_DEFAULT
15957 && h
->root
.type
== bfd_link_hash_undefweak
))))
15959 /* This case can occur if we saw a PLT32 reloc in an input
15960 file, but the symbol was never referred to by a dynamic
15961 object, or if all references were garbage collected. In
15962 such a case, we don't actually need to build a procedure
15963 linkage table, and we can just do a PC24 reloc instead. */
15964 h
->plt
.offset
= (bfd_vma
) -1;
15965 eh
->plt
.thumb_refcount
= 0;
15966 eh
->plt
.maybe_thumb_refcount
= 0;
15967 eh
->plt
.noncall_refcount
= 0;
15975 /* It's possible that we incorrectly decided a .plt reloc was
15976 needed for an R_ARM_PC24 or similar reloc to a non-function sym
15977 in check_relocs. We can't decide accurately between function
15978 and non-function syms in check-relocs; Objects loaded later in
15979 the link may change h->type. So fix it now. */
15980 h
->plt
.offset
= (bfd_vma
) -1;
15981 eh
->plt
.thumb_refcount
= 0;
15982 eh
->plt
.maybe_thumb_refcount
= 0;
15983 eh
->plt
.noncall_refcount
= 0;
15986 /* If this is a weak symbol, and there is a real definition, the
15987 processor independent code will have arranged for us to see the
15988 real definition first, and we can just use the same value. */
15989 if (h
->is_weakalias
)
15991 struct elf_link_hash_entry
*def
= weakdef (h
);
15992 BFD_ASSERT (def
->root
.type
== bfd_link_hash_defined
);
15993 h
->root
.u
.def
.section
= def
->root
.u
.def
.section
;
15994 h
->root
.u
.def
.value
= def
->root
.u
.def
.value
;
15998 /* If there are no non-GOT references, we do not need a copy
16000 if (!h
->non_got_ref
)
16003 /* This is a reference to a symbol defined by a dynamic object which
16004 is not a function. */
16006 /* If we are creating a shared library, we must presume that the
16007 only references to the symbol are via the global offset table.
16008 For such cases we need not do anything here; the relocations will
16009 be handled correctly by relocate_section. Relocatable executables
16010 can reference data in shared objects directly, so we don't need to
16011 do anything here. */
16012 if (bfd_link_pic (info
) || globals
->root
.is_relocatable_executable
)
16015 /* We must allocate the symbol in our .dynbss section, which will
16016 become part of the .bss section of the executable. There will be
16017 an entry for this symbol in the .dynsym section. The dynamic
16018 object will contain position independent code, so all references
16019 from the dynamic object to this symbol will go through the global
16020 offset table. The dynamic linker will use the .dynsym entry to
16021 determine the address it must put in the global offset table, so
16022 both the dynamic object and the regular object will refer to the
16023 same memory location for the variable. */
16024 /* If allowed, we must generate a R_ARM_COPY reloc to tell the dynamic
16025 linker to copy the initial value out of the dynamic object and into
16026 the runtime process image. We need to remember the offset into the
16027 .rel(a).bss section we are going to use. */
16028 if ((h
->root
.u
.def
.section
->flags
& SEC_READONLY
) != 0)
16030 s
= globals
->root
.sdynrelro
;
16031 srel
= globals
->root
.sreldynrelro
;
16035 s
= globals
->root
.sdynbss
;
16036 srel
= globals
->root
.srelbss
;
16038 if (info
->nocopyreloc
== 0
16039 && (h
->root
.u
.def
.section
->flags
& SEC_ALLOC
) != 0
16042 elf32_arm_allocate_dynrelocs (info
, srel
, 1);
16046 return _bfd_elf_adjust_dynamic_copy (info
, h
, s
);
16049 /* Allocate space in .plt, .got and associated reloc sections for
16053 allocate_dynrelocs_for_symbol (struct elf_link_hash_entry
*h
, void * inf
)
16055 struct bfd_link_info
*info
;
16056 struct elf32_arm_link_hash_table
*htab
;
16057 struct elf32_arm_link_hash_entry
*eh
;
16058 struct elf_dyn_relocs
*p
;
16060 if (h
->root
.type
== bfd_link_hash_indirect
)
16063 eh
= (struct elf32_arm_link_hash_entry
*) h
;
16065 info
= (struct bfd_link_info
*) inf
;
16066 htab
= elf32_arm_hash_table (info
);
16070 if ((htab
->root
.dynamic_sections_created
|| h
->type
== STT_GNU_IFUNC
)
16071 && h
->plt
.refcount
> 0)
16073 /* Make sure this symbol is output as a dynamic symbol.
16074 Undefined weak syms won't yet be marked as dynamic. */
16075 if (h
->dynindx
== -1 && !h
->forced_local
16076 && h
->root
.type
== bfd_link_hash_undefweak
)
16078 if (! bfd_elf_link_record_dynamic_symbol (info
, h
))
16082 /* If the call in the PLT entry binds locally, the associated
16083 GOT entry should use an R_ARM_IRELATIVE relocation instead of
16084 the usual R_ARM_JUMP_SLOT. Put it in the .iplt section rather
16085 than the .plt section. */
16086 if (h
->type
== STT_GNU_IFUNC
&& SYMBOL_CALLS_LOCAL (info
, h
))
16089 if (eh
->plt
.noncall_refcount
== 0
16090 && SYMBOL_REFERENCES_LOCAL (info
, h
))
16091 /* All non-call references can be resolved directly.
16092 This means that they can (and in some cases, must)
16093 resolve directly to the run-time target, rather than
16094 to the PLT. That in turns means that any .got entry
16095 would be equal to the .igot.plt entry, so there's
16096 no point having both. */
16097 h
->got
.refcount
= 0;
16100 if (bfd_link_pic (info
)
16102 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h
))
16104 elf32_arm_allocate_plt_entry (info
, eh
->is_iplt
, &h
->plt
, &eh
->plt
);
16106 /* If this symbol is not defined in a regular file, and we are
16107 not generating a shared library, then set the symbol to this
16108 location in the .plt. This is required to make function
16109 pointers compare as equal between the normal executable and
16110 the shared library. */
16111 if (! bfd_link_pic (info
)
16112 && !h
->def_regular
)
16114 h
->root
.u
.def
.section
= htab
->root
.splt
;
16115 h
->root
.u
.def
.value
= h
->plt
.offset
;
16117 /* Make sure the function is not marked as Thumb, in case
16118 it is the target of an ABS32 relocation, which will
16119 point to the PLT entry. */
16120 ARM_SET_SYM_BRANCH_TYPE (h
->target_internal
, ST_BRANCH_TO_ARM
);
16123 /* VxWorks executables have a second set of relocations for
16124 each PLT entry. They go in a separate relocation section,
16125 which is processed by the kernel loader. */
16126 if (htab
->vxworks_p
&& !bfd_link_pic (info
))
16128 /* There is a relocation for the initial PLT entry:
16129 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
16130 if (h
->plt
.offset
== htab
->plt_header_size
)
16131 elf32_arm_allocate_dynrelocs (info
, htab
->srelplt2
, 1);
16133 /* There are two extra relocations for each subsequent
16134 PLT entry: an R_ARM_32 relocation for the GOT entry,
16135 and an R_ARM_32 relocation for the PLT entry. */
16136 elf32_arm_allocate_dynrelocs (info
, htab
->srelplt2
, 2);
16141 h
->plt
.offset
= (bfd_vma
) -1;
16147 h
->plt
.offset
= (bfd_vma
) -1;
16151 eh
= (struct elf32_arm_link_hash_entry
*) h
;
16152 eh
->tlsdesc_got
= (bfd_vma
) -1;
16154 if (h
->got
.refcount
> 0)
16158 int tls_type
= elf32_arm_hash_entry (h
)->tls_type
;
16161 /* Make sure this symbol is output as a dynamic symbol.
16162 Undefined weak syms won't yet be marked as dynamic. */
16163 if (htab
->root
.dynamic_sections_created
&& h
->dynindx
== -1 && !h
->forced_local
16164 && h
->root
.type
== bfd_link_hash_undefweak
)
16166 if (! bfd_elf_link_record_dynamic_symbol (info
, h
))
16170 if (!htab
->symbian_p
)
16172 s
= htab
->root
.sgot
;
16173 h
->got
.offset
= s
->size
;
16175 if (tls_type
== GOT_UNKNOWN
)
16178 if (tls_type
== GOT_NORMAL
)
16179 /* Non-TLS symbols need one GOT slot. */
16183 if (tls_type
& GOT_TLS_GDESC
)
16185 /* R_ARM_TLS_DESC needs 2 GOT slots. */
16187 = (htab
->root
.sgotplt
->size
16188 - elf32_arm_compute_jump_table_size (htab
));
16189 htab
->root
.sgotplt
->size
+= 8;
16190 h
->got
.offset
= (bfd_vma
) -2;
16191 /* plt.got_offset needs to know there's a TLS_DESC
16192 reloc in the middle of .got.plt. */
16193 htab
->num_tls_desc
++;
16196 if (tls_type
& GOT_TLS_GD
)
16198 /* R_ARM_TLS_GD32 and R_ARM_TLS_GD32_FDPIC need two
16199 consecutive GOT slots. If the symbol is both GD
16200 and GDESC, got.offset may have been
16202 h
->got
.offset
= s
->size
;
16206 if (tls_type
& GOT_TLS_IE
)
16207 /* R_ARM_TLS_IE32/R_ARM_TLS_IE32_FDPIC need one GOT
16212 dyn
= htab
->root
.dynamic_sections_created
;
16215 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn
,
16216 bfd_link_pic (info
),
16218 && (!bfd_link_pic (info
)
16219 || !SYMBOL_REFERENCES_LOCAL (info
, h
)))
16222 if (tls_type
!= GOT_NORMAL
16223 && (bfd_link_pic (info
) || indx
!= 0)
16224 && (ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
16225 || h
->root
.type
!= bfd_link_hash_undefweak
))
16227 if (tls_type
& GOT_TLS_IE
)
16228 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
16230 if (tls_type
& GOT_TLS_GD
)
16231 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
16233 if (tls_type
& GOT_TLS_GDESC
)
16235 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelplt
, 1);
16236 /* GDESC needs a trampoline to jump to. */
16237 htab
->tls_trampoline
= -1;
16240 /* Only GD needs it. GDESC just emits one relocation per
16242 if ((tls_type
& GOT_TLS_GD
) && indx
!= 0)
16243 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
16245 else if (((indx
!= -1) || htab
->fdpic_p
)
16246 && !SYMBOL_REFERENCES_LOCAL (info
, h
))
16248 if (htab
->root
.dynamic_sections_created
)
16249 /* Reserve room for the GOT entry's R_ARM_GLOB_DAT relocation. */
16250 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
16252 else if (h
->type
== STT_GNU_IFUNC
16253 && eh
->plt
.noncall_refcount
== 0)
16254 /* No non-call references resolve the STT_GNU_IFUNC's PLT entry;
16255 they all resolve dynamically instead. Reserve room for the
16256 GOT entry's R_ARM_IRELATIVE relocation. */
16257 elf32_arm_allocate_irelocs (info
, htab
->root
.srelgot
, 1);
16258 else if (bfd_link_pic (info
)
16259 && (ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
16260 || h
->root
.type
!= bfd_link_hash_undefweak
))
16261 /* Reserve room for the GOT entry's R_ARM_RELATIVE relocation. */
16262 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
16263 else if (htab
->fdpic_p
&& tls_type
== GOT_NORMAL
)
16264 /* Reserve room for rofixup for FDPIC executable. */
16265 /* TLS relocs do not need space since they are completely
16267 htab
->srofixup
->size
+= 4;
16271 h
->got
.offset
= (bfd_vma
) -1;
16273 /* FDPIC support. */
16274 if (eh
->fdpic_cnts
.gotofffuncdesc_cnt
> 0)
16276 /* Symbol musn't be exported. */
16277 if (h
->dynindx
!= -1)
16280 /* We only allocate one function descriptor with its associated relocation. */
16281 if (eh
->fdpic_cnts
.funcdesc_offset
== -1)
16283 asection
*s
= htab
->root
.sgot
;
16285 eh
->fdpic_cnts
.funcdesc_offset
= s
->size
;
16287 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16288 if (bfd_link_pic(info
))
16289 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
16291 htab
->srofixup
->size
+= 8;
16295 if (eh
->fdpic_cnts
.gotfuncdesc_cnt
> 0)
16297 asection
*s
= htab
->root
.sgot
;
16299 if (htab
->root
.dynamic_sections_created
&& h
->dynindx
== -1
16300 && !h
->forced_local
)
16301 if (! bfd_elf_link_record_dynamic_symbol (info
, h
))
16304 if (h
->dynindx
== -1)
16306 /* We only allocate one function descriptor with its associated relocation. q */
16307 if (eh
->fdpic_cnts
.funcdesc_offset
== -1)
16310 eh
->fdpic_cnts
.funcdesc_offset
= s
->size
;
16312 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16313 if (bfd_link_pic(info
))
16314 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
16316 htab
->srofixup
->size
+= 8;
16320 /* Add one entry into the GOT and a R_ARM_FUNCDESC or
16321 R_ARM_RELATIVE/rofixup relocation on it. */
16322 eh
->fdpic_cnts
.gotfuncdesc_offset
= s
->size
;
16324 if (h
->dynindx
== -1 && !bfd_link_pic(info
))
16325 htab
->srofixup
->size
+= 4;
16327 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
16330 if (eh
->fdpic_cnts
.funcdesc_cnt
> 0)
16332 if (htab
->root
.dynamic_sections_created
&& h
->dynindx
== -1
16333 && !h
->forced_local
)
16334 if (! bfd_elf_link_record_dynamic_symbol (info
, h
))
16337 if (h
->dynindx
== -1)
16339 /* We only allocate one function descriptor with its associated relocation. */
16340 if (eh
->fdpic_cnts
.funcdesc_offset
== -1)
16342 asection
*s
= htab
->root
.sgot
;
16344 eh
->fdpic_cnts
.funcdesc_offset
= s
->size
;
16346 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16347 if (bfd_link_pic(info
))
16348 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
16350 htab
->srofixup
->size
+= 8;
16353 if (h
->dynindx
== -1 && !bfd_link_pic(info
))
16355 /* For FDPIC executable we replace R_ARM_RELATIVE with a rofixup. */
16356 htab
->srofixup
->size
+= 4 * eh
->fdpic_cnts
.funcdesc_cnt
;
16360 /* Will need one dynamic reloc per reference. will be either
16361 R_ARM_FUNCDESC or R_ARM_RELATIVE for hidden symbols. */
16362 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
,
16363 eh
->fdpic_cnts
.funcdesc_cnt
);
16367 /* Allocate stubs for exported Thumb functions on v4t. */
16368 if (!htab
->use_blx
&& h
->dynindx
!= -1
16370 && ARM_GET_SYM_BRANCH_TYPE (h
->target_internal
) == ST_BRANCH_TO_THUMB
16371 && ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
)
16373 struct elf_link_hash_entry
* th
;
16374 struct bfd_link_hash_entry
* bh
;
16375 struct elf_link_hash_entry
* myh
;
16379 /* Create a new symbol to regist the real location of the function. */
16380 s
= h
->root
.u
.def
.section
;
16381 sprintf (name
, "__real_%s", h
->root
.root
.string
);
16382 _bfd_generic_link_add_one_symbol (info
, s
->owner
,
16383 name
, BSF_GLOBAL
, s
,
16384 h
->root
.u
.def
.value
,
16385 NULL
, TRUE
, FALSE
, &bh
);
16387 myh
= (struct elf_link_hash_entry
*) bh
;
16388 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
16389 myh
->forced_local
= 1;
16390 ARM_SET_SYM_BRANCH_TYPE (myh
->target_internal
, ST_BRANCH_TO_THUMB
);
16391 eh
->export_glue
= myh
;
16392 th
= record_arm_to_thumb_glue (info
, h
);
16393 /* Point the symbol at the stub. */
16394 h
->type
= ELF_ST_INFO (ELF_ST_BIND (h
->type
), STT_FUNC
);
16395 ARM_SET_SYM_BRANCH_TYPE (h
->target_internal
, ST_BRANCH_TO_ARM
);
16396 h
->root
.u
.def
.section
= th
->root
.u
.def
.section
;
16397 h
->root
.u
.def
.value
= th
->root
.u
.def
.value
& ~1;
16400 if (eh
->dyn_relocs
== NULL
)
16403 /* In the shared -Bsymbolic case, discard space allocated for
16404 dynamic pc-relative relocs against symbols which turn out to be
16405 defined in regular objects. For the normal shared case, discard
16406 space for pc-relative relocs that have become local due to symbol
16407 visibility changes. */
16409 if (bfd_link_pic (info
) || htab
->root
.is_relocatable_executable
|| htab
->fdpic_p
)
16411 /* Relocs that use pc_count are PC-relative forms, which will appear
16412 on something like ".long foo - ." or "movw REG, foo - .". We want
16413 calls to protected symbols to resolve directly to the function
16414 rather than going via the plt. If people want function pointer
16415 comparisons to work as expected then they should avoid writing
16416 assembly like ".long foo - .". */
16417 if (SYMBOL_CALLS_LOCAL (info
, h
))
16419 struct elf_dyn_relocs
**pp
;
16421 for (pp
= &eh
->dyn_relocs
; (p
= *pp
) != NULL
; )
16423 p
->count
-= p
->pc_count
;
16432 if (htab
->vxworks_p
)
16434 struct elf_dyn_relocs
**pp
;
16436 for (pp
= &eh
->dyn_relocs
; (p
= *pp
) != NULL
; )
16438 if (strcmp (p
->sec
->output_section
->name
, ".tls_vars") == 0)
16445 /* Also discard relocs on undefined weak syms with non-default
16447 if (eh
->dyn_relocs
!= NULL
16448 && h
->root
.type
== bfd_link_hash_undefweak
)
16450 if (ELF_ST_VISIBILITY (h
->other
) != STV_DEFAULT
16451 || UNDEFWEAK_NO_DYNAMIC_RELOC (info
, h
))
16452 eh
->dyn_relocs
= NULL
;
16454 /* Make sure undefined weak symbols are output as a dynamic
16456 else if (htab
->root
.dynamic_sections_created
&& h
->dynindx
== -1
16457 && !h
->forced_local
)
16459 if (! bfd_elf_link_record_dynamic_symbol (info
, h
))
16464 else if (htab
->root
.is_relocatable_executable
&& h
->dynindx
== -1
16465 && h
->root
.type
== bfd_link_hash_new
)
16467 /* Output absolute symbols so that we can create relocations
16468 against them. For normal symbols we output a relocation
16469 against the section that contains them. */
16470 if (! bfd_elf_link_record_dynamic_symbol (info
, h
))
16477 /* For the non-shared case, discard space for relocs against
16478 symbols which turn out to need copy relocs or are not
16481 if (!h
->non_got_ref
16482 && ((h
->def_dynamic
16483 && !h
->def_regular
)
16484 || (htab
->root
.dynamic_sections_created
16485 && (h
->root
.type
== bfd_link_hash_undefweak
16486 || h
->root
.type
== bfd_link_hash_undefined
))))
16488 /* Make sure this symbol is output as a dynamic symbol.
16489 Undefined weak syms won't yet be marked as dynamic. */
16490 if (h
->dynindx
== -1 && !h
->forced_local
16491 && h
->root
.type
== bfd_link_hash_undefweak
)
16493 if (! bfd_elf_link_record_dynamic_symbol (info
, h
))
16497 /* If that succeeded, we know we'll be keeping all the
16499 if (h
->dynindx
!= -1)
16503 eh
->dyn_relocs
= NULL
;
16508 /* Finally, allocate space. */
16509 for (p
= eh
->dyn_relocs
; p
!= NULL
; p
= p
->next
)
16511 asection
*sreloc
= elf_section_data (p
->sec
)->sreloc
;
16513 if (h
->type
== STT_GNU_IFUNC
16514 && eh
->plt
.noncall_refcount
== 0
16515 && SYMBOL_REFERENCES_LOCAL (info
, h
))
16516 elf32_arm_allocate_irelocs (info
, sreloc
, p
->count
);
16517 else if (h
->dynindx
!= -1 && (!bfd_link_pic(info
) || !info
->symbolic
|| !h
->def_regular
))
16518 elf32_arm_allocate_dynrelocs (info
, sreloc
, p
->count
);
16519 else if (htab
->fdpic_p
&& !bfd_link_pic(info
))
16520 htab
->srofixup
->size
+= 4 * p
->count
;
16522 elf32_arm_allocate_dynrelocs (info
, sreloc
, p
->count
);
16528 /* Set DF_TEXTREL if we find any dynamic relocs that apply to
16529 read-only sections. */
16532 maybe_set_textrel (struct elf_link_hash_entry
*h
, void *info_p
)
16536 if (h
->root
.type
== bfd_link_hash_indirect
)
16539 sec
= readonly_dynrelocs (h
);
16542 struct bfd_link_info
*info
= (struct bfd_link_info
*) info_p
;
16544 info
->flags
|= DF_TEXTREL
;
16545 info
->callbacks
->minfo
16546 (_("%pB: dynamic relocation against `%pT' in read-only section `%pA'\n"),
16547 sec
->owner
, h
->root
.root
.string
, sec
);
16549 /* Not an error, just cut short the traversal. */
16557 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info
*info
,
16560 struct elf32_arm_link_hash_table
*globals
;
16562 globals
= elf32_arm_hash_table (info
);
16563 if (globals
== NULL
)
16566 globals
->byteswap_code
= byteswap_code
;
16569 /* Set the sizes of the dynamic sections. */
16572 elf32_arm_size_dynamic_sections (bfd
* output_bfd ATTRIBUTE_UNUSED
,
16573 struct bfd_link_info
* info
)
16578 bfd_boolean relocs
;
16580 struct elf32_arm_link_hash_table
*htab
;
16582 htab
= elf32_arm_hash_table (info
);
16586 dynobj
= elf_hash_table (info
)->dynobj
;
16587 BFD_ASSERT (dynobj
!= NULL
);
16588 check_use_blx (htab
);
16590 if (elf_hash_table (info
)->dynamic_sections_created
)
16592 /* Set the contents of the .interp section to the interpreter. */
16593 if (bfd_link_executable (info
) && !info
->nointerp
)
16595 s
= bfd_get_linker_section (dynobj
, ".interp");
16596 BFD_ASSERT (s
!= NULL
);
16597 s
->size
= sizeof ELF_DYNAMIC_INTERPRETER
;
16598 s
->contents
= (unsigned char *) ELF_DYNAMIC_INTERPRETER
;
16602 /* Set up .got offsets for local syms, and space for local dynamic
16604 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link
.next
)
16606 bfd_signed_vma
*local_got
;
16607 bfd_signed_vma
*end_local_got
;
16608 struct arm_local_iplt_info
**local_iplt_ptr
, *local_iplt
;
16609 char *local_tls_type
;
16610 bfd_vma
*local_tlsdesc_gotent
;
16611 bfd_size_type locsymcount
;
16612 Elf_Internal_Shdr
*symtab_hdr
;
16614 bfd_boolean is_vxworks
= htab
->vxworks_p
;
16615 unsigned int symndx
;
16616 struct fdpic_local
*local_fdpic_cnts
;
16618 if (! is_arm_elf (ibfd
))
16621 for (s
= ibfd
->sections
; s
!= NULL
; s
= s
->next
)
16623 struct elf_dyn_relocs
*p
;
16625 for (p
= (struct elf_dyn_relocs
*)
16626 elf_section_data (s
)->local_dynrel
; p
!= NULL
; p
= p
->next
)
16628 if (!bfd_is_abs_section (p
->sec
)
16629 && bfd_is_abs_section (p
->sec
->output_section
))
16631 /* Input section has been discarded, either because
16632 it is a copy of a linkonce section or due to
16633 linker script /DISCARD/, so we'll be discarding
16636 else if (is_vxworks
16637 && strcmp (p
->sec
->output_section
->name
,
16640 /* Relocations in vxworks .tls_vars sections are
16641 handled specially by the loader. */
16643 else if (p
->count
!= 0)
16645 srel
= elf_section_data (p
->sec
)->sreloc
;
16646 if (htab
->fdpic_p
&& !bfd_link_pic(info
))
16647 htab
->srofixup
->size
+= 4 * p
->count
;
16649 elf32_arm_allocate_dynrelocs (info
, srel
, p
->count
);
16650 if ((p
->sec
->output_section
->flags
& SEC_READONLY
) != 0)
16651 info
->flags
|= DF_TEXTREL
;
16656 local_got
= elf_local_got_refcounts (ibfd
);
16660 symtab_hdr
= & elf_symtab_hdr (ibfd
);
16661 locsymcount
= symtab_hdr
->sh_info
;
16662 end_local_got
= local_got
+ locsymcount
;
16663 local_iplt_ptr
= elf32_arm_local_iplt (ibfd
);
16664 local_tls_type
= elf32_arm_local_got_tls_type (ibfd
);
16665 local_tlsdesc_gotent
= elf32_arm_local_tlsdesc_gotent (ibfd
);
16666 local_fdpic_cnts
= elf32_arm_local_fdpic_cnts (ibfd
);
16668 s
= htab
->root
.sgot
;
16669 srel
= htab
->root
.srelgot
;
16670 for (; local_got
< end_local_got
;
16671 ++local_got
, ++local_iplt_ptr
, ++local_tls_type
,
16672 ++local_tlsdesc_gotent
, ++symndx
, ++local_fdpic_cnts
)
16674 *local_tlsdesc_gotent
= (bfd_vma
) -1;
16675 local_iplt
= *local_iplt_ptr
;
16677 /* FDPIC support. */
16678 if (local_fdpic_cnts
->gotofffuncdesc_cnt
> 0)
16680 if (local_fdpic_cnts
->funcdesc_offset
== -1)
16682 local_fdpic_cnts
->funcdesc_offset
= s
->size
;
16685 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16686 if (bfd_link_pic(info
))
16687 elf32_arm_allocate_dynrelocs (info
, srel
, 1);
16689 htab
->srofixup
->size
+= 8;
16693 if (local_fdpic_cnts
->funcdesc_cnt
> 0)
16695 if (local_fdpic_cnts
->funcdesc_offset
== -1)
16697 local_fdpic_cnts
->funcdesc_offset
= s
->size
;
16700 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16701 if (bfd_link_pic(info
))
16702 elf32_arm_allocate_dynrelocs (info
, srel
, 1);
16704 htab
->srofixup
->size
+= 8;
16707 /* We will add n R_ARM_RELATIVE relocations or n rofixups. */
16708 if (bfd_link_pic(info
))
16709 elf32_arm_allocate_dynrelocs (info
, srel
, local_fdpic_cnts
->funcdesc_cnt
);
16711 htab
->srofixup
->size
+= 4 * local_fdpic_cnts
->funcdesc_cnt
;
16714 if (local_iplt
!= NULL
)
16716 struct elf_dyn_relocs
*p
;
16718 if (local_iplt
->root
.refcount
> 0)
16720 elf32_arm_allocate_plt_entry (info
, TRUE
,
16723 if (local_iplt
->arm
.noncall_refcount
== 0)
16724 /* All references to the PLT are calls, so all
16725 non-call references can resolve directly to the
16726 run-time target. This means that the .got entry
16727 would be the same as the .igot.plt entry, so there's
16728 no point creating both. */
16733 BFD_ASSERT (local_iplt
->arm
.noncall_refcount
== 0);
16734 local_iplt
->root
.offset
= (bfd_vma
) -1;
16737 for (p
= local_iplt
->dyn_relocs
; p
!= NULL
; p
= p
->next
)
16741 psrel
= elf_section_data (p
->sec
)->sreloc
;
16742 if (local_iplt
->arm
.noncall_refcount
== 0)
16743 elf32_arm_allocate_irelocs (info
, psrel
, p
->count
);
16745 elf32_arm_allocate_dynrelocs (info
, psrel
, p
->count
);
16748 if (*local_got
> 0)
16750 Elf_Internal_Sym
*isym
;
16752 *local_got
= s
->size
;
16753 if (*local_tls_type
& GOT_TLS_GD
)
16754 /* TLS_GD relocs need an 8-byte structure in the GOT. */
16756 if (*local_tls_type
& GOT_TLS_GDESC
)
16758 *local_tlsdesc_gotent
= htab
->root
.sgotplt
->size
16759 - elf32_arm_compute_jump_table_size (htab
);
16760 htab
->root
.sgotplt
->size
+= 8;
16761 *local_got
= (bfd_vma
) -2;
16762 /* plt.got_offset needs to know there's a TLS_DESC
16763 reloc in the middle of .got.plt. */
16764 htab
->num_tls_desc
++;
16766 if (*local_tls_type
& GOT_TLS_IE
)
16769 if (*local_tls_type
& GOT_NORMAL
)
16771 /* If the symbol is both GD and GDESC, *local_got
16772 may have been overwritten. */
16773 *local_got
= s
->size
;
16777 isym
= bfd_sym_from_r_symndx (&htab
->sym_cache
, ibfd
, symndx
);
16781 /* If all references to an STT_GNU_IFUNC PLT are calls,
16782 then all non-call references, including this GOT entry,
16783 resolve directly to the run-time target. */
16784 if (ELF32_ST_TYPE (isym
->st_info
) == STT_GNU_IFUNC
16785 && (local_iplt
== NULL
16786 || local_iplt
->arm
.noncall_refcount
== 0))
16787 elf32_arm_allocate_irelocs (info
, srel
, 1);
16788 else if (bfd_link_pic (info
) || output_bfd
->flags
& DYNAMIC
|| htab
->fdpic_p
)
16790 if ((bfd_link_pic (info
) && !(*local_tls_type
& GOT_TLS_GDESC
)))
16791 elf32_arm_allocate_dynrelocs (info
, srel
, 1);
16792 else if (htab
->fdpic_p
&& *local_tls_type
& GOT_NORMAL
)
16793 htab
->srofixup
->size
+= 4;
16795 if ((bfd_link_pic (info
) || htab
->fdpic_p
)
16796 && *local_tls_type
& GOT_TLS_GDESC
)
16798 elf32_arm_allocate_dynrelocs (info
,
16799 htab
->root
.srelplt
, 1);
16800 htab
->tls_trampoline
= -1;
16805 *local_got
= (bfd_vma
) -1;
16809 if (htab
->tls_ldm_got
.refcount
> 0)
16811 /* Allocate two GOT entries and one dynamic relocation (if necessary)
16812 for R_ARM_TLS_LDM32/R_ARM_TLS_LDM32_FDPIC relocations. */
16813 htab
->tls_ldm_got
.offset
= htab
->root
.sgot
->size
;
16814 htab
->root
.sgot
->size
+= 8;
16815 if (bfd_link_pic (info
))
16816 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
16819 htab
->tls_ldm_got
.offset
= -1;
16821 /* At the very end of the .rofixup section is a pointer to the GOT,
16822 reserve space for it. */
16823 if (htab
->fdpic_p
&& htab
->srofixup
!= NULL
)
16824 htab
->srofixup
->size
+= 4;
16826 /* Allocate global sym .plt and .got entries, and space for global
16827 sym dynamic relocs. */
16828 elf_link_hash_traverse (& htab
->root
, allocate_dynrelocs_for_symbol
, info
);
16830 /* Here we rummage through the found bfds to collect glue information. */
16831 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link
.next
)
16833 if (! is_arm_elf (ibfd
))
16836 /* Initialise mapping tables for code/data. */
16837 bfd_elf32_arm_init_maps (ibfd
);
16839 if (!bfd_elf32_arm_process_before_allocation (ibfd
, info
)
16840 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd
, info
)
16841 || !bfd_elf32_arm_stm32l4xx_erratum_scan (ibfd
, info
))
16842 _bfd_error_handler (_("errors encountered processing file %pB"), ibfd
);
16845 /* Allocate space for the glue sections now that we've sized them. */
16846 bfd_elf32_arm_allocate_interworking_sections (info
);
16848 /* For every jump slot reserved in the sgotplt, reloc_count is
16849 incremented. However, when we reserve space for TLS descriptors,
16850 it's not incremented, so in order to compute the space reserved
16851 for them, it suffices to multiply the reloc count by the jump
16853 if (htab
->root
.srelplt
)
16854 htab
->sgotplt_jump_table_size
= elf32_arm_compute_jump_table_size(htab
);
16856 if (htab
->tls_trampoline
)
16858 if (htab
->root
.splt
->size
== 0)
16859 htab
->root
.splt
->size
+= htab
->plt_header_size
;
16861 htab
->tls_trampoline
= htab
->root
.splt
->size
;
16862 htab
->root
.splt
->size
+= htab
->plt_entry_size
;
16864 /* If we're not using lazy TLS relocations, don't generate the
16865 PLT and GOT entries they require. */
16866 if (!(info
->flags
& DF_BIND_NOW
))
16868 htab
->dt_tlsdesc_got
= htab
->root
.sgot
->size
;
16869 htab
->root
.sgot
->size
+= 4;
16871 htab
->dt_tlsdesc_plt
= htab
->root
.splt
->size
;
16872 htab
->root
.splt
->size
+= 4 * ARRAY_SIZE (dl_tlsdesc_lazy_trampoline
);
16876 /* The check_relocs and adjust_dynamic_symbol entry points have
16877 determined the sizes of the various dynamic sections. Allocate
16878 memory for them. */
16881 for (s
= dynobj
->sections
; s
!= NULL
; s
= s
->next
)
16885 if ((s
->flags
& SEC_LINKER_CREATED
) == 0)
16888 /* It's OK to base decisions on the section name, because none
16889 of the dynobj section names depend upon the input files. */
16890 name
= bfd_get_section_name (dynobj
, s
);
16892 if (s
== htab
->root
.splt
)
16894 /* Remember whether there is a PLT. */
16895 plt
= s
->size
!= 0;
16897 else if (CONST_STRNEQ (name
, ".rel"))
16901 /* Remember whether there are any reloc sections other
16902 than .rel(a).plt and .rela.plt.unloaded. */
16903 if (s
!= htab
->root
.srelplt
&& s
!= htab
->srelplt2
)
16906 /* We use the reloc_count field as a counter if we need
16907 to copy relocs into the output file. */
16908 s
->reloc_count
= 0;
16911 else if (s
!= htab
->root
.sgot
16912 && s
!= htab
->root
.sgotplt
16913 && s
!= htab
->root
.iplt
16914 && s
!= htab
->root
.igotplt
16915 && s
!= htab
->root
.sdynbss
16916 && s
!= htab
->root
.sdynrelro
16917 && s
!= htab
->srofixup
)
16919 /* It's not one of our sections, so don't allocate space. */
16925 /* If we don't need this section, strip it from the
16926 output file. This is mostly to handle .rel(a).bss and
16927 .rel(a).plt. We must create both sections in
16928 create_dynamic_sections, because they must be created
16929 before the linker maps input sections to output
16930 sections. The linker does that before
16931 adjust_dynamic_symbol is called, and it is that
16932 function which decides whether anything needs to go
16933 into these sections. */
16934 s
->flags
|= SEC_EXCLUDE
;
16938 if ((s
->flags
& SEC_HAS_CONTENTS
) == 0)
16941 /* Allocate memory for the section contents. */
16942 s
->contents
= (unsigned char *) bfd_zalloc (dynobj
, s
->size
);
16943 if (s
->contents
== NULL
)
16947 if (elf_hash_table (info
)->dynamic_sections_created
)
16949 /* Add some entries to the .dynamic section. We fill in the
16950 values later, in elf32_arm_finish_dynamic_sections, but we
16951 must add the entries now so that we get the correct size for
16952 the .dynamic section. The DT_DEBUG entry is filled in by the
16953 dynamic linker and used by the debugger. */
16954 #define add_dynamic_entry(TAG, VAL) \
16955 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
16957 if (bfd_link_executable (info
))
16959 if (!add_dynamic_entry (DT_DEBUG
, 0))
16965 if ( !add_dynamic_entry (DT_PLTGOT
, 0)
16966 || !add_dynamic_entry (DT_PLTRELSZ
, 0)
16967 || !add_dynamic_entry (DT_PLTREL
,
16968 htab
->use_rel
? DT_REL
: DT_RELA
)
16969 || !add_dynamic_entry (DT_JMPREL
, 0))
16972 if (htab
->dt_tlsdesc_plt
16973 && (!add_dynamic_entry (DT_TLSDESC_PLT
,0)
16974 || !add_dynamic_entry (DT_TLSDESC_GOT
,0)))
16982 if (!add_dynamic_entry (DT_REL
, 0)
16983 || !add_dynamic_entry (DT_RELSZ
, 0)
16984 || !add_dynamic_entry (DT_RELENT
, RELOC_SIZE (htab
)))
16989 if (!add_dynamic_entry (DT_RELA
, 0)
16990 || !add_dynamic_entry (DT_RELASZ
, 0)
16991 || !add_dynamic_entry (DT_RELAENT
, RELOC_SIZE (htab
)))
16996 /* If any dynamic relocs apply to a read-only section,
16997 then we need a DT_TEXTREL entry. */
16998 if ((info
->flags
& DF_TEXTREL
) == 0)
16999 elf_link_hash_traverse (&htab
->root
, maybe_set_textrel
, info
);
17001 if ((info
->flags
& DF_TEXTREL
) != 0)
17003 if (!add_dynamic_entry (DT_TEXTREL
, 0))
17006 if (htab
->vxworks_p
17007 && !elf_vxworks_add_dynamic_entries (output_bfd
, info
))
17010 #undef add_dynamic_entry
17015 /* Size sections even though they're not dynamic. We use it to setup
17016 _TLS_MODULE_BASE_, if needed. */
17019 elf32_arm_always_size_sections (bfd
*output_bfd
,
17020 struct bfd_link_info
*info
)
17023 struct elf32_arm_link_hash_table
*htab
;
17025 htab
= elf32_arm_hash_table (info
);
17027 if (bfd_link_relocatable (info
))
17030 tls_sec
= elf_hash_table (info
)->tls_sec
;
17034 struct elf_link_hash_entry
*tlsbase
;
17036 tlsbase
= elf_link_hash_lookup
17037 (elf_hash_table (info
), "_TLS_MODULE_BASE_", TRUE
, TRUE
, FALSE
);
17041 struct bfd_link_hash_entry
*bh
= NULL
;
17042 const struct elf_backend_data
*bed
17043 = get_elf_backend_data (output_bfd
);
17045 if (!(_bfd_generic_link_add_one_symbol
17046 (info
, output_bfd
, "_TLS_MODULE_BASE_", BSF_LOCAL
,
17047 tls_sec
, 0, NULL
, FALSE
,
17048 bed
->collect
, &bh
)))
17051 tlsbase
->type
= STT_TLS
;
17052 tlsbase
= (struct elf_link_hash_entry
*)bh
;
17053 tlsbase
->def_regular
= 1;
17054 tlsbase
->other
= STV_HIDDEN
;
17055 (*bed
->elf_backend_hide_symbol
) (info
, tlsbase
, TRUE
);
17059 if (htab
->fdpic_p
&& !bfd_link_relocatable (info
)
17060 && !bfd_elf_stack_segment_size (output_bfd
, info
,
17061 "__stacksize", DEFAULT_STACK_SIZE
))
17067 /* Finish up dynamic symbol handling. We set the contents of various
17068 dynamic sections here. */
17071 elf32_arm_finish_dynamic_symbol (bfd
* output_bfd
,
17072 struct bfd_link_info
* info
,
17073 struct elf_link_hash_entry
* h
,
17074 Elf_Internal_Sym
* sym
)
17076 struct elf32_arm_link_hash_table
*htab
;
17077 struct elf32_arm_link_hash_entry
*eh
;
17079 htab
= elf32_arm_hash_table (info
);
17083 eh
= (struct elf32_arm_link_hash_entry
*) h
;
17085 if (h
->plt
.offset
!= (bfd_vma
) -1)
17089 BFD_ASSERT (h
->dynindx
!= -1);
17090 if (! elf32_arm_populate_plt_entry (output_bfd
, info
, &h
->plt
, &eh
->plt
,
17095 if (!h
->def_regular
)
17097 /* Mark the symbol as undefined, rather than as defined in
17098 the .plt section. */
17099 sym
->st_shndx
= SHN_UNDEF
;
17100 /* If the symbol is weak we need to clear the value.
17101 Otherwise, the PLT entry would provide a definition for
17102 the symbol even if the symbol wasn't defined anywhere,
17103 and so the symbol would never be NULL. Leave the value if
17104 there were any relocations where pointer equality matters
17105 (this is a clue for the dynamic linker, to make function
17106 pointer comparisons work between an application and shared
17108 if (!h
->ref_regular_nonweak
|| !h
->pointer_equality_needed
)
17111 else if (eh
->is_iplt
&& eh
->plt
.noncall_refcount
!= 0)
17113 /* At least one non-call relocation references this .iplt entry,
17114 so the .iplt entry is the function's canonical address. */
17115 sym
->st_info
= ELF_ST_INFO (ELF_ST_BIND (sym
->st_info
), STT_FUNC
);
17116 ARM_SET_SYM_BRANCH_TYPE (sym
->st_target_internal
, ST_BRANCH_TO_ARM
);
17117 sym
->st_shndx
= (_bfd_elf_section_from_bfd_section
17118 (output_bfd
, htab
->root
.iplt
->output_section
));
17119 sym
->st_value
= (h
->plt
.offset
17120 + htab
->root
.iplt
->output_section
->vma
17121 + htab
->root
.iplt
->output_offset
);
17128 Elf_Internal_Rela rel
;
17130 /* This symbol needs a copy reloc. Set it up. */
17131 BFD_ASSERT (h
->dynindx
!= -1
17132 && (h
->root
.type
== bfd_link_hash_defined
17133 || h
->root
.type
== bfd_link_hash_defweak
));
17136 rel
.r_offset
= (h
->root
.u
.def
.value
17137 + h
->root
.u
.def
.section
->output_section
->vma
17138 + h
->root
.u
.def
.section
->output_offset
);
17139 rel
.r_info
= ELF32_R_INFO (h
->dynindx
, R_ARM_COPY
);
17140 if (h
->root
.u
.def
.section
== htab
->root
.sdynrelro
)
17141 s
= htab
->root
.sreldynrelro
;
17143 s
= htab
->root
.srelbss
;
17144 elf32_arm_add_dynreloc (output_bfd
, info
, s
, &rel
);
17147 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
17148 and for FDPIC, the _GLOBAL_OFFSET_TABLE_ symbol is not absolute:
17149 it is relative to the ".got" section. */
17150 if (h
== htab
->root
.hdynamic
17151 || (!htab
->fdpic_p
&& !htab
->vxworks_p
&& h
== htab
->root
.hgot
))
17152 sym
->st_shndx
= SHN_ABS
;
17158 arm_put_trampoline (struct elf32_arm_link_hash_table
*htab
, bfd
*output_bfd
,
17160 const unsigned long *template, unsigned count
)
17164 for (ix
= 0; ix
!= count
; ix
++)
17166 unsigned long insn
= template[ix
];
17168 /* Emit mov pc,rx if bx is not permitted. */
17169 if (htab
->fix_v4bx
== 1 && (insn
& 0x0ffffff0) == 0x012fff10)
17170 insn
= (insn
& 0xf000000f) | 0x01a0f000;
17171 put_arm_insn (htab
, output_bfd
, insn
, (char *)contents
+ ix
*4);
17175 /* Install the special first PLT entry for elf32-arm-nacl. Unlike
17176 other variants, NaCl needs this entry in a static executable's
17177 .iplt too. When we're handling that case, GOT_DISPLACEMENT is
17178 zero. For .iplt really only the last bundle is useful, and .iplt
17179 could have a shorter first entry, with each individual PLT entry's
17180 relative branch calculated differently so it targets the last
17181 bundle instead of the instruction before it (labelled .Lplt_tail
17182 above). But it's simpler to keep the size and layout of PLT0
17183 consistent with the dynamic case, at the cost of some dead code at
17184 the start of .iplt and the one dead store to the stack at the start
17187 arm_nacl_put_plt0 (struct elf32_arm_link_hash_table
*htab
, bfd
*output_bfd
,
17188 asection
*plt
, bfd_vma got_displacement
)
17192 put_arm_insn (htab
, output_bfd
,
17193 elf32_arm_nacl_plt0_entry
[0]
17194 | arm_movw_immediate (got_displacement
),
17195 plt
->contents
+ 0);
17196 put_arm_insn (htab
, output_bfd
,
17197 elf32_arm_nacl_plt0_entry
[1]
17198 | arm_movt_immediate (got_displacement
),
17199 plt
->contents
+ 4);
17201 for (i
= 2; i
< ARRAY_SIZE (elf32_arm_nacl_plt0_entry
); ++i
)
17202 put_arm_insn (htab
, output_bfd
,
17203 elf32_arm_nacl_plt0_entry
[i
],
17204 plt
->contents
+ (i
* 4));
17207 /* Finish up the dynamic sections. */
17210 elf32_arm_finish_dynamic_sections (bfd
* output_bfd
, struct bfd_link_info
* info
)
17215 struct elf32_arm_link_hash_table
*htab
;
17217 htab
= elf32_arm_hash_table (info
);
17221 dynobj
= elf_hash_table (info
)->dynobj
;
17223 sgot
= htab
->root
.sgotplt
;
17224 /* A broken linker script might have discarded the dynamic sections.
17225 Catch this here so that we do not seg-fault later on. */
17226 if (sgot
!= NULL
&& bfd_is_abs_section (sgot
->output_section
))
17228 sdyn
= bfd_get_linker_section (dynobj
, ".dynamic");
17230 if (elf_hash_table (info
)->dynamic_sections_created
)
17233 Elf32_External_Dyn
*dyncon
, *dynconend
;
17235 splt
= htab
->root
.splt
;
17236 BFD_ASSERT (splt
!= NULL
&& sdyn
!= NULL
);
17237 BFD_ASSERT (htab
->symbian_p
|| sgot
!= NULL
);
17239 dyncon
= (Elf32_External_Dyn
*) sdyn
->contents
;
17240 dynconend
= (Elf32_External_Dyn
*) (sdyn
->contents
+ sdyn
->size
);
17242 for (; dyncon
< dynconend
; dyncon
++)
17244 Elf_Internal_Dyn dyn
;
17248 bfd_elf32_swap_dyn_in (dynobj
, dyncon
, &dyn
);
17255 if (htab
->vxworks_p
17256 && elf_vxworks_finish_dynamic_entry (output_bfd
, &dyn
))
17257 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
17262 goto get_vma_if_bpabi
;
17265 goto get_vma_if_bpabi
;
17268 goto get_vma_if_bpabi
;
17270 name
= ".gnu.version";
17271 goto get_vma_if_bpabi
;
17273 name
= ".gnu.version_d";
17274 goto get_vma_if_bpabi
;
17276 name
= ".gnu.version_r";
17277 goto get_vma_if_bpabi
;
17280 name
= htab
->symbian_p
? ".got" : ".got.plt";
17283 name
= RELOC_SECTION (htab
, ".plt");
17285 s
= bfd_get_linker_section (dynobj
, name
);
17289 (_("could not find section %s"), name
);
17290 bfd_set_error (bfd_error_invalid_operation
);
17293 if (!htab
->symbian_p
)
17294 dyn
.d_un
.d_ptr
= s
->output_section
->vma
+ s
->output_offset
;
17296 /* In the BPABI, tags in the PT_DYNAMIC section point
17297 at the file offset, not the memory address, for the
17298 convenience of the post linker. */
17299 dyn
.d_un
.d_ptr
= s
->output_section
->filepos
+ s
->output_offset
;
17300 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
17304 if (htab
->symbian_p
)
17309 s
= htab
->root
.srelplt
;
17310 BFD_ASSERT (s
!= NULL
);
17311 dyn
.d_un
.d_val
= s
->size
;
17312 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
17319 /* In the BPABI, the DT_REL tag must point at the file
17320 offset, not the VMA, of the first relocation
17321 section. So, we use code similar to that in
17322 elflink.c, but do not check for SHF_ALLOC on the
17323 relocation section, since relocation sections are
17324 never allocated under the BPABI. PLT relocs are also
17326 if (htab
->symbian_p
)
17329 type
= ((dyn
.d_tag
== DT_REL
|| dyn
.d_tag
== DT_RELSZ
)
17330 ? SHT_REL
: SHT_RELA
);
17331 dyn
.d_un
.d_val
= 0;
17332 for (i
= 1; i
< elf_numsections (output_bfd
); i
++)
17334 Elf_Internal_Shdr
*hdr
17335 = elf_elfsections (output_bfd
)[i
];
17336 if (hdr
->sh_type
== type
)
17338 if (dyn
.d_tag
== DT_RELSZ
17339 || dyn
.d_tag
== DT_RELASZ
)
17340 dyn
.d_un
.d_val
+= hdr
->sh_size
;
17341 else if ((ufile_ptr
) hdr
->sh_offset
17342 <= dyn
.d_un
.d_val
- 1)
17343 dyn
.d_un
.d_val
= hdr
->sh_offset
;
17346 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
17350 case DT_TLSDESC_PLT
:
17351 s
= htab
->root
.splt
;
17352 dyn
.d_un
.d_ptr
= (s
->output_section
->vma
+ s
->output_offset
17353 + htab
->dt_tlsdesc_plt
);
17354 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
17357 case DT_TLSDESC_GOT
:
17358 s
= htab
->root
.sgot
;
17359 dyn
.d_un
.d_ptr
= (s
->output_section
->vma
+ s
->output_offset
17360 + htab
->dt_tlsdesc_got
);
17361 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
17364 /* Set the bottom bit of DT_INIT/FINI if the
17365 corresponding function is Thumb. */
17367 name
= info
->init_function
;
17370 name
= info
->fini_function
;
17372 /* If it wasn't set by elf_bfd_final_link
17373 then there is nothing to adjust. */
17374 if (dyn
.d_un
.d_val
!= 0)
17376 struct elf_link_hash_entry
* eh
;
17378 eh
= elf_link_hash_lookup (elf_hash_table (info
), name
,
17379 FALSE
, FALSE
, TRUE
);
17381 && ARM_GET_SYM_BRANCH_TYPE (eh
->target_internal
)
17382 == ST_BRANCH_TO_THUMB
)
17384 dyn
.d_un
.d_val
|= 1;
17385 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
17392 /* Fill in the first entry in the procedure linkage table. */
17393 if (splt
->size
> 0 && htab
->plt_header_size
)
17395 const bfd_vma
*plt0_entry
;
17396 bfd_vma got_address
, plt_address
, got_displacement
;
17398 /* Calculate the addresses of the GOT and PLT. */
17399 got_address
= sgot
->output_section
->vma
+ sgot
->output_offset
;
17400 plt_address
= splt
->output_section
->vma
+ splt
->output_offset
;
17402 if (htab
->vxworks_p
)
17404 /* The VxWorks GOT is relocated by the dynamic linker.
17405 Therefore, we must emit relocations rather than simply
17406 computing the values now. */
17407 Elf_Internal_Rela rel
;
17409 plt0_entry
= elf32_arm_vxworks_exec_plt0_entry
;
17410 put_arm_insn (htab
, output_bfd
, plt0_entry
[0],
17411 splt
->contents
+ 0);
17412 put_arm_insn (htab
, output_bfd
, plt0_entry
[1],
17413 splt
->contents
+ 4);
17414 put_arm_insn (htab
, output_bfd
, plt0_entry
[2],
17415 splt
->contents
+ 8);
17416 bfd_put_32 (output_bfd
, got_address
, splt
->contents
+ 12);
17418 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
17419 rel
.r_offset
= plt_address
+ 12;
17420 rel
.r_info
= ELF32_R_INFO (htab
->root
.hgot
->indx
, R_ARM_ABS32
);
17422 SWAP_RELOC_OUT (htab
) (output_bfd
, &rel
,
17423 htab
->srelplt2
->contents
);
17425 else if (htab
->nacl_p
)
17426 arm_nacl_put_plt0 (htab
, output_bfd
, splt
,
17427 got_address
+ 8 - (plt_address
+ 16));
17428 else if (using_thumb_only (htab
))
17430 got_displacement
= got_address
- (plt_address
+ 12);
17432 plt0_entry
= elf32_thumb2_plt0_entry
;
17433 put_arm_insn (htab
, output_bfd
, plt0_entry
[0],
17434 splt
->contents
+ 0);
17435 put_arm_insn (htab
, output_bfd
, plt0_entry
[1],
17436 splt
->contents
+ 4);
17437 put_arm_insn (htab
, output_bfd
, plt0_entry
[2],
17438 splt
->contents
+ 8);
17440 bfd_put_32 (output_bfd
, got_displacement
, splt
->contents
+ 12);
17444 got_displacement
= got_address
- (plt_address
+ 16);
17446 plt0_entry
= elf32_arm_plt0_entry
;
17447 put_arm_insn (htab
, output_bfd
, plt0_entry
[0],
17448 splt
->contents
+ 0);
17449 put_arm_insn (htab
, output_bfd
, plt0_entry
[1],
17450 splt
->contents
+ 4);
17451 put_arm_insn (htab
, output_bfd
, plt0_entry
[2],
17452 splt
->contents
+ 8);
17453 put_arm_insn (htab
, output_bfd
, plt0_entry
[3],
17454 splt
->contents
+ 12);
17456 #ifdef FOUR_WORD_PLT
17457 /* The displacement value goes in the otherwise-unused
17458 last word of the second entry. */
17459 bfd_put_32 (output_bfd
, got_displacement
, splt
->contents
+ 28);
17461 bfd_put_32 (output_bfd
, got_displacement
, splt
->contents
+ 16);
17466 /* UnixWare sets the entsize of .plt to 4, although that doesn't
17467 really seem like the right value. */
17468 if (splt
->output_section
->owner
== output_bfd
)
17469 elf_section_data (splt
->output_section
)->this_hdr
.sh_entsize
= 4;
17471 if (htab
->dt_tlsdesc_plt
)
17473 bfd_vma got_address
17474 = sgot
->output_section
->vma
+ sgot
->output_offset
;
17475 bfd_vma gotplt_address
= (htab
->root
.sgot
->output_section
->vma
17476 + htab
->root
.sgot
->output_offset
);
17477 bfd_vma plt_address
17478 = splt
->output_section
->vma
+ splt
->output_offset
;
17480 arm_put_trampoline (htab
, output_bfd
,
17481 splt
->contents
+ htab
->dt_tlsdesc_plt
,
17482 dl_tlsdesc_lazy_trampoline
, 6);
17484 bfd_put_32 (output_bfd
,
17485 gotplt_address
+ htab
->dt_tlsdesc_got
17486 - (plt_address
+ htab
->dt_tlsdesc_plt
)
17487 - dl_tlsdesc_lazy_trampoline
[6],
17488 splt
->contents
+ htab
->dt_tlsdesc_plt
+ 24);
17489 bfd_put_32 (output_bfd
,
17490 got_address
- (plt_address
+ htab
->dt_tlsdesc_plt
)
17491 - dl_tlsdesc_lazy_trampoline
[7],
17492 splt
->contents
+ htab
->dt_tlsdesc_plt
+ 24 + 4);
17495 if (htab
->tls_trampoline
)
17497 arm_put_trampoline (htab
, output_bfd
,
17498 splt
->contents
+ htab
->tls_trampoline
,
17499 tls_trampoline
, 3);
17500 #ifdef FOUR_WORD_PLT
17501 bfd_put_32 (output_bfd
, 0x00000000,
17502 splt
->contents
+ htab
->tls_trampoline
+ 12);
17506 if (htab
->vxworks_p
17507 && !bfd_link_pic (info
)
17508 && htab
->root
.splt
->size
> 0)
17510 /* Correct the .rel(a).plt.unloaded relocations. They will have
17511 incorrect symbol indexes. */
17515 num_plts
= ((htab
->root
.splt
->size
- htab
->plt_header_size
)
17516 / htab
->plt_entry_size
);
17517 p
= htab
->srelplt2
->contents
+ RELOC_SIZE (htab
);
17519 for (; num_plts
; num_plts
--)
17521 Elf_Internal_Rela rel
;
17523 SWAP_RELOC_IN (htab
) (output_bfd
, p
, &rel
);
17524 rel
.r_info
= ELF32_R_INFO (htab
->root
.hgot
->indx
, R_ARM_ABS32
);
17525 SWAP_RELOC_OUT (htab
) (output_bfd
, &rel
, p
);
17526 p
+= RELOC_SIZE (htab
);
17528 SWAP_RELOC_IN (htab
) (output_bfd
, p
, &rel
);
17529 rel
.r_info
= ELF32_R_INFO (htab
->root
.hplt
->indx
, R_ARM_ABS32
);
17530 SWAP_RELOC_OUT (htab
) (output_bfd
, &rel
, p
);
17531 p
+= RELOC_SIZE (htab
);
17536 if (htab
->nacl_p
&& htab
->root
.iplt
!= NULL
&& htab
->root
.iplt
->size
> 0)
17537 /* NaCl uses a special first entry in .iplt too. */
17538 arm_nacl_put_plt0 (htab
, output_bfd
, htab
->root
.iplt
, 0);
17540 /* Fill in the first three entries in the global offset table. */
17543 if (sgot
->size
> 0)
17546 bfd_put_32 (output_bfd
, (bfd_vma
) 0, sgot
->contents
);
17548 bfd_put_32 (output_bfd
,
17549 sdyn
->output_section
->vma
+ sdyn
->output_offset
,
17551 bfd_put_32 (output_bfd
, (bfd_vma
) 0, sgot
->contents
+ 4);
17552 bfd_put_32 (output_bfd
, (bfd_vma
) 0, sgot
->contents
+ 8);
17555 elf_section_data (sgot
->output_section
)->this_hdr
.sh_entsize
= 4;
17558 /* At the very end of the .rofixup section is a pointer to the GOT. */
17559 if (htab
->fdpic_p
&& htab
->srofixup
!= NULL
)
17561 struct elf_link_hash_entry
*hgot
= htab
->root
.hgot
;
17563 bfd_vma got_value
= hgot
->root
.u
.def
.value
17564 + hgot
->root
.u
.def
.section
->output_section
->vma
17565 + hgot
->root
.u
.def
.section
->output_offset
;
17567 arm_elf_add_rofixup(output_bfd
, htab
->srofixup
, got_value
);
17569 /* Make sure we allocated and generated the same number of fixups. */
17570 BFD_ASSERT (htab
->srofixup
->reloc_count
* 4 == htab
->srofixup
->size
);
17577 elf32_arm_post_process_headers (bfd
* abfd
, struct bfd_link_info
* link_info ATTRIBUTE_UNUSED
)
17579 Elf_Internal_Ehdr
* i_ehdrp
; /* ELF file header, internal form. */
17580 struct elf32_arm_link_hash_table
*globals
;
17581 struct elf_segment_map
*m
;
17583 i_ehdrp
= elf_elfheader (abfd
);
17585 if (EF_ARM_EABI_VERSION (i_ehdrp
->e_flags
) == EF_ARM_EABI_UNKNOWN
)
17586 i_ehdrp
->e_ident
[EI_OSABI
] = ELFOSABI_ARM
;
17588 _bfd_elf_post_process_headers (abfd
, link_info
);
17589 i_ehdrp
->e_ident
[EI_ABIVERSION
] = ARM_ELF_ABI_VERSION
;
17593 globals
= elf32_arm_hash_table (link_info
);
17594 if (globals
!= NULL
&& globals
->byteswap_code
)
17595 i_ehdrp
->e_flags
|= EF_ARM_BE8
;
17597 if (globals
->fdpic_p
)
17598 i_ehdrp
->e_ident
[EI_OSABI
] |= ELFOSABI_ARM_FDPIC
;
17601 if (EF_ARM_EABI_VERSION (i_ehdrp
->e_flags
) == EF_ARM_EABI_VER5
17602 && ((i_ehdrp
->e_type
== ET_DYN
) || (i_ehdrp
->e_type
== ET_EXEC
)))
17604 int abi
= bfd_elf_get_obj_attr_int (abfd
, OBJ_ATTR_PROC
, Tag_ABI_VFP_args
);
17605 if (abi
== AEABI_VFP_args_vfp
)
17606 i_ehdrp
->e_flags
|= EF_ARM_ABI_FLOAT_HARD
;
17608 i_ehdrp
->e_flags
|= EF_ARM_ABI_FLOAT_SOFT
;
17611 /* Scan segment to set p_flags attribute if it contains only sections with
17612 SHF_ARM_PURECODE flag. */
17613 for (m
= elf_seg_map (abfd
); m
!= NULL
; m
= m
->next
)
17619 for (j
= 0; j
< m
->count
; j
++)
17621 if (!(elf_section_flags (m
->sections
[j
]) & SHF_ARM_PURECODE
))
17627 m
->p_flags_valid
= 1;
17632 static enum elf_reloc_type_class
17633 elf32_arm_reloc_type_class (const struct bfd_link_info
*info ATTRIBUTE_UNUSED
,
17634 const asection
*rel_sec ATTRIBUTE_UNUSED
,
17635 const Elf_Internal_Rela
*rela
)
17637 switch ((int) ELF32_R_TYPE (rela
->r_info
))
17639 case R_ARM_RELATIVE
:
17640 return reloc_class_relative
;
17641 case R_ARM_JUMP_SLOT
:
17642 return reloc_class_plt
;
17644 return reloc_class_copy
;
17645 case R_ARM_IRELATIVE
:
17646 return reloc_class_ifunc
;
17648 return reloc_class_normal
;
17653 elf32_arm_final_write_processing (bfd
*abfd
, bfd_boolean linker ATTRIBUTE_UNUSED
)
17655 bfd_arm_update_notes (abfd
, ARM_NOTE_SECTION
);
17658 /* Return TRUE if this is an unwinding table entry. */
17661 is_arm_elf_unwind_section_name (bfd
* abfd ATTRIBUTE_UNUSED
, const char * name
)
17663 return (CONST_STRNEQ (name
, ELF_STRING_ARM_unwind
)
17664 || CONST_STRNEQ (name
, ELF_STRING_ARM_unwind_once
));
17668 /* Set the type and flags for an ARM section. We do this by
17669 the section name, which is a hack, but ought to work. */
17672 elf32_arm_fake_sections (bfd
* abfd
, Elf_Internal_Shdr
* hdr
, asection
* sec
)
17676 name
= bfd_get_section_name (abfd
, sec
);
17678 if (is_arm_elf_unwind_section_name (abfd
, name
))
17680 hdr
->sh_type
= SHT_ARM_EXIDX
;
17681 hdr
->sh_flags
|= SHF_LINK_ORDER
;
17684 if (sec
->flags
& SEC_ELF_PURECODE
)
17685 hdr
->sh_flags
|= SHF_ARM_PURECODE
;
17690 /* Handle an ARM specific section when reading an object file. This is
17691 called when bfd_section_from_shdr finds a section with an unknown
17695 elf32_arm_section_from_shdr (bfd
*abfd
,
17696 Elf_Internal_Shdr
* hdr
,
17700 /* There ought to be a place to keep ELF backend specific flags, but
17701 at the moment there isn't one. We just keep track of the
17702 sections by their name, instead. Fortunately, the ABI gives
17703 names for all the ARM specific sections, so we will probably get
17705 switch (hdr
->sh_type
)
17707 case SHT_ARM_EXIDX
:
17708 case SHT_ARM_PREEMPTMAP
:
17709 case SHT_ARM_ATTRIBUTES
:
17716 if (! _bfd_elf_make_section_from_shdr (abfd
, hdr
, name
, shindex
))
17722 static _arm_elf_section_data
*
17723 get_arm_elf_section_data (asection
* sec
)
17725 if (sec
&& sec
->owner
&& is_arm_elf (sec
->owner
))
17726 return elf32_arm_section_data (sec
);
17734 struct bfd_link_info
*info
;
17737 int (*func
) (void *, const char *, Elf_Internal_Sym
*,
17738 asection
*, struct elf_link_hash_entry
*);
17739 } output_arch_syminfo
;
17741 enum map_symbol_type
17749 /* Output a single mapping symbol. */
17752 elf32_arm_output_map_sym (output_arch_syminfo
*osi
,
17753 enum map_symbol_type type
,
17756 static const char *names
[3] = {"$a", "$t", "$d"};
17757 Elf_Internal_Sym sym
;
17759 sym
.st_value
= osi
->sec
->output_section
->vma
17760 + osi
->sec
->output_offset
17764 sym
.st_info
= ELF_ST_INFO (STB_LOCAL
, STT_NOTYPE
);
17765 sym
.st_shndx
= osi
->sec_shndx
;
17766 sym
.st_target_internal
= 0;
17767 elf32_arm_section_map_add (osi
->sec
, names
[type
][1], offset
);
17768 return osi
->func (osi
->flaginfo
, names
[type
], &sym
, osi
->sec
, NULL
) == 1;
17771 /* Output mapping symbols for the PLT entry described by ROOT_PLT and ARM_PLT.
17772 IS_IPLT_ENTRY_P says whether the PLT is in .iplt rather than .plt. */
17775 elf32_arm_output_plt_map_1 (output_arch_syminfo
*osi
,
17776 bfd_boolean is_iplt_entry_p
,
17777 union gotplt_union
*root_plt
,
17778 struct arm_plt_info
*arm_plt
)
17780 struct elf32_arm_link_hash_table
*htab
;
17781 bfd_vma addr
, plt_header_size
;
17783 if (root_plt
->offset
== (bfd_vma
) -1)
17786 htab
= elf32_arm_hash_table (osi
->info
);
17790 if (is_iplt_entry_p
)
17792 osi
->sec
= htab
->root
.iplt
;
17793 plt_header_size
= 0;
17797 osi
->sec
= htab
->root
.splt
;
17798 plt_header_size
= htab
->plt_header_size
;
17800 osi
->sec_shndx
= (_bfd_elf_section_from_bfd_section
17801 (osi
->info
->output_bfd
, osi
->sec
->output_section
));
17803 addr
= root_plt
->offset
& -2;
17804 if (htab
->symbian_p
)
17806 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_ARM
, addr
))
17808 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_DATA
, addr
+ 4))
17811 else if (htab
->vxworks_p
)
17813 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_ARM
, addr
))
17815 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_DATA
, addr
+ 8))
17817 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_ARM
, addr
+ 12))
17819 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_DATA
, addr
+ 20))
17822 else if (htab
->nacl_p
)
17824 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_ARM
, addr
))
17827 else if (htab
->fdpic_p
)
17829 enum map_symbol_type type
= using_thumb_only(htab
)
17833 if (elf32_arm_plt_needs_thumb_stub_p (osi
->info
, arm_plt
))
17834 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_THUMB
, addr
- 4))
17836 if (!elf32_arm_output_map_sym (osi
, type
, addr
))
17838 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_DATA
, addr
+ 16))
17840 if (htab
->plt_entry_size
== 4 * ARRAY_SIZE(elf32_arm_fdpic_plt_entry
))
17841 if (!elf32_arm_output_map_sym (osi
, type
, addr
+ 24))
17844 else if (using_thumb_only (htab
))
17846 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_THUMB
, addr
))
17851 bfd_boolean thumb_stub_p
;
17853 thumb_stub_p
= elf32_arm_plt_needs_thumb_stub_p (osi
->info
, arm_plt
);
17856 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_THUMB
, addr
- 4))
17859 #ifdef FOUR_WORD_PLT
17860 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_ARM
, addr
))
17862 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_DATA
, addr
+ 12))
17865 /* A three-word PLT with no Thumb thunk contains only Arm code,
17866 so only need to output a mapping symbol for the first PLT entry and
17867 entries with thumb thunks. */
17868 if (thumb_stub_p
|| addr
== plt_header_size
)
17870 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_ARM
, addr
))
17879 /* Output mapping symbols for PLT entries associated with H. */
17882 elf32_arm_output_plt_map (struct elf_link_hash_entry
*h
, void *inf
)
17884 output_arch_syminfo
*osi
= (output_arch_syminfo
*) inf
;
17885 struct elf32_arm_link_hash_entry
*eh
;
17887 if (h
->root
.type
== bfd_link_hash_indirect
)
17890 if (h
->root
.type
== bfd_link_hash_warning
)
17891 /* When warning symbols are created, they **replace** the "real"
17892 entry in the hash table, thus we never get to see the real
17893 symbol in a hash traversal. So look at it now. */
17894 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
17896 eh
= (struct elf32_arm_link_hash_entry
*) h
;
17897 return elf32_arm_output_plt_map_1 (osi
, SYMBOL_CALLS_LOCAL (osi
->info
, h
),
17898 &h
->plt
, &eh
->plt
);
17901 /* Bind a veneered symbol to its veneer identified by its hash entry
17902 STUB_ENTRY. The veneered location thus loose its symbol. */
17905 arm_stub_claim_sym (struct elf32_arm_stub_hash_entry
*stub_entry
)
17907 struct elf32_arm_link_hash_entry
*hash
= stub_entry
->h
;
17910 hash
->root
.root
.u
.def
.section
= stub_entry
->stub_sec
;
17911 hash
->root
.root
.u
.def
.value
= stub_entry
->stub_offset
;
17912 hash
->root
.size
= stub_entry
->stub_size
;
17915 /* Output a single local symbol for a generated stub. */
17918 elf32_arm_output_stub_sym (output_arch_syminfo
*osi
, const char *name
,
17919 bfd_vma offset
, bfd_vma size
)
17921 Elf_Internal_Sym sym
;
17923 sym
.st_value
= osi
->sec
->output_section
->vma
17924 + osi
->sec
->output_offset
17926 sym
.st_size
= size
;
17928 sym
.st_info
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
17929 sym
.st_shndx
= osi
->sec_shndx
;
17930 sym
.st_target_internal
= 0;
17931 return osi
->func (osi
->flaginfo
, name
, &sym
, osi
->sec
, NULL
) == 1;
17935 arm_map_one_stub (struct bfd_hash_entry
* gen_entry
,
17938 struct elf32_arm_stub_hash_entry
*stub_entry
;
17939 asection
*stub_sec
;
17942 output_arch_syminfo
*osi
;
17943 const insn_sequence
*template_sequence
;
17944 enum stub_insn_type prev_type
;
17947 enum map_symbol_type sym_type
;
17949 /* Massage our args to the form they really have. */
17950 stub_entry
= (struct elf32_arm_stub_hash_entry
*) gen_entry
;
17951 osi
= (output_arch_syminfo
*) in_arg
;
17953 stub_sec
= stub_entry
->stub_sec
;
17955 /* Ensure this stub is attached to the current section being
17957 if (stub_sec
!= osi
->sec
)
17960 addr
= (bfd_vma
) stub_entry
->stub_offset
;
17961 template_sequence
= stub_entry
->stub_template
;
17963 if (arm_stub_sym_claimed (stub_entry
->stub_type
))
17964 arm_stub_claim_sym (stub_entry
);
17967 stub_name
= stub_entry
->output_name
;
17968 switch (template_sequence
[0].type
)
17971 if (!elf32_arm_output_stub_sym (osi
, stub_name
, addr
,
17972 stub_entry
->stub_size
))
17977 if (!elf32_arm_output_stub_sym (osi
, stub_name
, addr
| 1,
17978 stub_entry
->stub_size
))
17987 prev_type
= DATA_TYPE
;
17989 for (i
= 0; i
< stub_entry
->stub_template_size
; i
++)
17991 switch (template_sequence
[i
].type
)
17994 sym_type
= ARM_MAP_ARM
;
17999 sym_type
= ARM_MAP_THUMB
;
18003 sym_type
= ARM_MAP_DATA
;
18011 if (template_sequence
[i
].type
!= prev_type
)
18013 prev_type
= template_sequence
[i
].type
;
18014 if (!elf32_arm_output_map_sym (osi
, sym_type
, addr
+ size
))
18018 switch (template_sequence
[i
].type
)
18042 /* Output mapping symbols for linker generated sections,
18043 and for those data-only sections that do not have a
18047 elf32_arm_output_arch_local_syms (bfd
*output_bfd
,
18048 struct bfd_link_info
*info
,
18050 int (*func
) (void *, const char *,
18051 Elf_Internal_Sym
*,
18053 struct elf_link_hash_entry
*))
18055 output_arch_syminfo osi
;
18056 struct elf32_arm_link_hash_table
*htab
;
18058 bfd_size_type size
;
18061 htab
= elf32_arm_hash_table (info
);
18065 check_use_blx (htab
);
18067 osi
.flaginfo
= flaginfo
;
18071 /* Add a $d mapping symbol to data-only sections that
18072 don't have any mapping symbol. This may result in (harmless) redundant
18073 mapping symbols. */
18074 for (input_bfd
= info
->input_bfds
;
18076 input_bfd
= input_bfd
->link
.next
)
18078 if ((input_bfd
->flags
& (BFD_LINKER_CREATED
| HAS_SYMS
)) == HAS_SYMS
)
18079 for (osi
.sec
= input_bfd
->sections
;
18081 osi
.sec
= osi
.sec
->next
)
18083 if (osi
.sec
->output_section
!= NULL
18084 && ((osi
.sec
->output_section
->flags
& (SEC_ALLOC
| SEC_CODE
))
18086 && (osi
.sec
->flags
& (SEC_HAS_CONTENTS
| SEC_LINKER_CREATED
))
18087 == SEC_HAS_CONTENTS
18088 && get_arm_elf_section_data (osi
.sec
) != NULL
18089 && get_arm_elf_section_data (osi
.sec
)->mapcount
== 0
18090 && osi
.sec
->size
> 0
18091 && (osi
.sec
->flags
& SEC_EXCLUDE
) == 0)
18093 osi
.sec_shndx
= _bfd_elf_section_from_bfd_section
18094 (output_bfd
, osi
.sec
->output_section
);
18095 if (osi
.sec_shndx
!= (int)SHN_BAD
)
18096 elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
, 0);
18101 /* ARM->Thumb glue. */
18102 if (htab
->arm_glue_size
> 0)
18104 osi
.sec
= bfd_get_linker_section (htab
->bfd_of_glue_owner
,
18105 ARM2THUMB_GLUE_SECTION_NAME
);
18107 osi
.sec_shndx
= _bfd_elf_section_from_bfd_section
18108 (output_bfd
, osi
.sec
->output_section
);
18109 if (bfd_link_pic (info
) || htab
->root
.is_relocatable_executable
18110 || htab
->pic_veneer
)
18111 size
= ARM2THUMB_PIC_GLUE_SIZE
;
18112 else if (htab
->use_blx
)
18113 size
= ARM2THUMB_V5_STATIC_GLUE_SIZE
;
18115 size
= ARM2THUMB_STATIC_GLUE_SIZE
;
18117 for (offset
= 0; offset
< htab
->arm_glue_size
; offset
+= size
)
18119 elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, offset
);
18120 elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
, offset
+ size
- 4);
18124 /* Thumb->ARM glue. */
18125 if (htab
->thumb_glue_size
> 0)
18127 osi
.sec
= bfd_get_linker_section (htab
->bfd_of_glue_owner
,
18128 THUMB2ARM_GLUE_SECTION_NAME
);
18130 osi
.sec_shndx
= _bfd_elf_section_from_bfd_section
18131 (output_bfd
, osi
.sec
->output_section
);
18132 size
= THUMB2ARM_GLUE_SIZE
;
18134 for (offset
= 0; offset
< htab
->thumb_glue_size
; offset
+= size
)
18136 elf32_arm_output_map_sym (&osi
, ARM_MAP_THUMB
, offset
);
18137 elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, offset
+ 4);
18141 /* ARMv4 BX veneers. */
18142 if (htab
->bx_glue_size
> 0)
18144 osi
.sec
= bfd_get_linker_section (htab
->bfd_of_glue_owner
,
18145 ARM_BX_GLUE_SECTION_NAME
);
18147 osi
.sec_shndx
= _bfd_elf_section_from_bfd_section
18148 (output_bfd
, osi
.sec
->output_section
);
18150 elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, 0);
18153 /* Long calls stubs. */
18154 if (htab
->stub_bfd
&& htab
->stub_bfd
->sections
)
18156 asection
* stub_sec
;
18158 for (stub_sec
= htab
->stub_bfd
->sections
;
18160 stub_sec
= stub_sec
->next
)
18162 /* Ignore non-stub sections. */
18163 if (!strstr (stub_sec
->name
, STUB_SUFFIX
))
18166 osi
.sec
= stub_sec
;
18168 osi
.sec_shndx
= _bfd_elf_section_from_bfd_section
18169 (output_bfd
, osi
.sec
->output_section
);
18171 bfd_hash_traverse (&htab
->stub_hash_table
, arm_map_one_stub
, &osi
);
18175 /* Finally, output mapping symbols for the PLT. */
18176 if (htab
->root
.splt
&& htab
->root
.splt
->size
> 0)
18178 osi
.sec
= htab
->root
.splt
;
18179 osi
.sec_shndx
= (_bfd_elf_section_from_bfd_section
18180 (output_bfd
, osi
.sec
->output_section
));
18182 /* Output mapping symbols for the plt header. SymbianOS does not have a
18184 if (htab
->vxworks_p
)
18186 /* VxWorks shared libraries have no PLT header. */
18187 if (!bfd_link_pic (info
))
18189 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, 0))
18191 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
, 12))
18195 else if (htab
->nacl_p
)
18197 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, 0))
18200 else if (using_thumb_only (htab
) && !htab
->fdpic_p
)
18202 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_THUMB
, 0))
18204 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
, 12))
18206 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_THUMB
, 16))
18209 else if (!htab
->symbian_p
&& !htab
->fdpic_p
)
18211 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, 0))
18213 #ifndef FOUR_WORD_PLT
18214 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
, 16))
18219 if (htab
->nacl_p
&& htab
->root
.iplt
&& htab
->root
.iplt
->size
> 0)
18221 /* NaCl uses a special first entry in .iplt too. */
18222 osi
.sec
= htab
->root
.iplt
;
18223 osi
.sec_shndx
= (_bfd_elf_section_from_bfd_section
18224 (output_bfd
, osi
.sec
->output_section
));
18225 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, 0))
18228 if ((htab
->root
.splt
&& htab
->root
.splt
->size
> 0)
18229 || (htab
->root
.iplt
&& htab
->root
.iplt
->size
> 0))
18231 elf_link_hash_traverse (&htab
->root
, elf32_arm_output_plt_map
, &osi
);
18232 for (input_bfd
= info
->input_bfds
;
18234 input_bfd
= input_bfd
->link
.next
)
18236 struct arm_local_iplt_info
**local_iplt
;
18237 unsigned int i
, num_syms
;
18239 local_iplt
= elf32_arm_local_iplt (input_bfd
);
18240 if (local_iplt
!= NULL
)
18242 num_syms
= elf_symtab_hdr (input_bfd
).sh_info
;
18243 for (i
= 0; i
< num_syms
; i
++)
18244 if (local_iplt
[i
] != NULL
18245 && !elf32_arm_output_plt_map_1 (&osi
, TRUE
,
18246 &local_iplt
[i
]->root
,
18247 &local_iplt
[i
]->arm
))
18252 if (htab
->dt_tlsdesc_plt
!= 0)
18254 /* Mapping symbols for the lazy tls trampoline. */
18255 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, htab
->dt_tlsdesc_plt
))
18258 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
,
18259 htab
->dt_tlsdesc_plt
+ 24))
18262 if (htab
->tls_trampoline
!= 0)
18264 /* Mapping symbols for the tls trampoline. */
18265 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, htab
->tls_trampoline
))
18267 #ifdef FOUR_WORD_PLT
18268 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
,
18269 htab
->tls_trampoline
+ 12))
18277 /* Filter normal symbols of CMSE entry functions of ABFD to include in
18278 the import library. All SYMCOUNT symbols of ABFD can be examined
18279 from their pointers in SYMS. Pointers of symbols to keep should be
18280 stored continuously at the beginning of that array.
18282 Returns the number of symbols to keep. */
18284 static unsigned int
18285 elf32_arm_filter_cmse_symbols (bfd
*abfd ATTRIBUTE_UNUSED
,
18286 struct bfd_link_info
*info
,
18287 asymbol
**syms
, long symcount
)
18291 long src_count
, dst_count
= 0;
18292 struct elf32_arm_link_hash_table
*htab
;
18294 htab
= elf32_arm_hash_table (info
);
18295 if (!htab
->stub_bfd
|| !htab
->stub_bfd
->sections
)
18299 cmse_name
= (char *) bfd_malloc (maxnamelen
);
18300 for (src_count
= 0; src_count
< symcount
; src_count
++)
18302 struct elf32_arm_link_hash_entry
*cmse_hash
;
18308 sym
= syms
[src_count
];
18309 flags
= sym
->flags
;
18310 name
= (char *) bfd_asymbol_name (sym
);
18312 if ((flags
& BSF_FUNCTION
) != BSF_FUNCTION
)
18314 if (!(flags
& (BSF_GLOBAL
| BSF_WEAK
)))
18317 namelen
= strlen (name
) + sizeof (CMSE_PREFIX
) + 1;
18318 if (namelen
> maxnamelen
)
18320 cmse_name
= (char *)
18321 bfd_realloc (cmse_name
, namelen
);
18322 maxnamelen
= namelen
;
18324 snprintf (cmse_name
, maxnamelen
, "%s%s", CMSE_PREFIX
, name
);
18325 cmse_hash
= (struct elf32_arm_link_hash_entry
*)
18326 elf_link_hash_lookup (&(htab
)->root
, cmse_name
, FALSE
, FALSE
, TRUE
);
18329 || (cmse_hash
->root
.root
.type
!= bfd_link_hash_defined
18330 && cmse_hash
->root
.root
.type
!= bfd_link_hash_defweak
)
18331 || cmse_hash
->root
.type
!= STT_FUNC
)
18334 if (!ARM_GET_SYM_CMSE_SPCL (cmse_hash
->root
.target_internal
))
18337 syms
[dst_count
++] = sym
;
18341 syms
[dst_count
] = NULL
;
18346 /* Filter symbols of ABFD to include in the import library. All
18347 SYMCOUNT symbols of ABFD can be examined from their pointers in
18348 SYMS. Pointers of symbols to keep should be stored continuously at
18349 the beginning of that array.
18351 Returns the number of symbols to keep. */
18353 static unsigned int
18354 elf32_arm_filter_implib_symbols (bfd
*abfd ATTRIBUTE_UNUSED
,
18355 struct bfd_link_info
*info
,
18356 asymbol
**syms
, long symcount
)
18358 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (info
);
18360 /* Requirement 8 of "ARM v8-M Security Extensions: Requirements on
18361 Development Tools" (ARM-ECM-0359818) mandates Secure Gateway import
18362 library to be a relocatable object file. */
18363 BFD_ASSERT (!(bfd_get_file_flags (info
->out_implib_bfd
) & EXEC_P
));
18364 if (globals
->cmse_implib
)
18365 return elf32_arm_filter_cmse_symbols (abfd
, info
, syms
, symcount
);
18367 return _bfd_elf_filter_global_symbols (abfd
, info
, syms
, symcount
);
18370 /* Allocate target specific section data. */
18373 elf32_arm_new_section_hook (bfd
*abfd
, asection
*sec
)
18375 if (!sec
->used_by_bfd
)
18377 _arm_elf_section_data
*sdata
;
18378 bfd_size_type amt
= sizeof (*sdata
);
18380 sdata
= (_arm_elf_section_data
*) bfd_zalloc (abfd
, amt
);
18383 sec
->used_by_bfd
= sdata
;
18386 return _bfd_elf_new_section_hook (abfd
, sec
);
18390 /* Used to order a list of mapping symbols by address. */
18393 elf32_arm_compare_mapping (const void * a
, const void * b
)
18395 const elf32_arm_section_map
*amap
= (const elf32_arm_section_map
*) a
;
18396 const elf32_arm_section_map
*bmap
= (const elf32_arm_section_map
*) b
;
18398 if (amap
->vma
> bmap
->vma
)
18400 else if (amap
->vma
< bmap
->vma
)
18402 else if (amap
->type
> bmap
->type
)
18403 /* Ensure results do not depend on the host qsort for objects with
18404 multiple mapping symbols at the same address by sorting on type
18407 else if (amap
->type
< bmap
->type
)
18413 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
18415 static unsigned long
18416 offset_prel31 (unsigned long addr
, bfd_vma offset
)
18418 return (addr
& ~0x7ffffffful
) | ((addr
+ offset
) & 0x7ffffffful
);
18421 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
18425 copy_exidx_entry (bfd
*output_bfd
, bfd_byte
*to
, bfd_byte
*from
, bfd_vma offset
)
18427 unsigned long first_word
= bfd_get_32 (output_bfd
, from
);
18428 unsigned long second_word
= bfd_get_32 (output_bfd
, from
+ 4);
18430 /* High bit of first word is supposed to be zero. */
18431 if ((first_word
& 0x80000000ul
) == 0)
18432 first_word
= offset_prel31 (first_word
, offset
);
18434 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
18435 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
18436 if ((second_word
!= 0x1) && ((second_word
& 0x80000000ul
) == 0))
18437 second_word
= offset_prel31 (second_word
, offset
);
18439 bfd_put_32 (output_bfd
, first_word
, to
);
18440 bfd_put_32 (output_bfd
, second_word
, to
+ 4);
18443 /* Data for make_branch_to_a8_stub(). */
18445 struct a8_branch_to_stub_data
18447 asection
*writing_section
;
18448 bfd_byte
*contents
;
18452 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
18453 places for a particular section. */
18456 make_branch_to_a8_stub (struct bfd_hash_entry
*gen_entry
,
18459 struct elf32_arm_stub_hash_entry
*stub_entry
;
18460 struct a8_branch_to_stub_data
*data
;
18461 bfd_byte
*contents
;
18462 unsigned long branch_insn
;
18463 bfd_vma veneered_insn_loc
, veneer_entry_loc
;
18464 bfd_signed_vma branch_offset
;
18468 stub_entry
= (struct elf32_arm_stub_hash_entry
*) gen_entry
;
18469 data
= (struct a8_branch_to_stub_data
*) in_arg
;
18471 if (stub_entry
->target_section
!= data
->writing_section
18472 || stub_entry
->stub_type
< arm_stub_a8_veneer_lwm
)
18475 contents
= data
->contents
;
18477 /* We use target_section as Cortex-A8 erratum workaround stubs are only
18478 generated when both source and target are in the same section. */
18479 veneered_insn_loc
= stub_entry
->target_section
->output_section
->vma
18480 + stub_entry
->target_section
->output_offset
18481 + stub_entry
->source_value
;
18483 veneer_entry_loc
= stub_entry
->stub_sec
->output_section
->vma
18484 + stub_entry
->stub_sec
->output_offset
18485 + stub_entry
->stub_offset
;
18487 if (stub_entry
->stub_type
== arm_stub_a8_veneer_blx
)
18488 veneered_insn_loc
&= ~3u;
18490 branch_offset
= veneer_entry_loc
- veneered_insn_loc
- 4;
18492 abfd
= stub_entry
->target_section
->owner
;
18493 loc
= stub_entry
->source_value
;
18495 /* We attempt to avoid this condition by setting stubs_always_after_branch
18496 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
18497 This check is just to be on the safe side... */
18498 if ((veneered_insn_loc
& ~0xfff) == (veneer_entry_loc
& ~0xfff))
18500 _bfd_error_handler (_("%pB: error: Cortex-A8 erratum stub is "
18501 "allocated in unsafe location"), abfd
);
18505 switch (stub_entry
->stub_type
)
18507 case arm_stub_a8_veneer_b
:
18508 case arm_stub_a8_veneer_b_cond
:
18509 branch_insn
= 0xf0009000;
18512 case arm_stub_a8_veneer_blx
:
18513 branch_insn
= 0xf000e800;
18516 case arm_stub_a8_veneer_bl
:
18518 unsigned int i1
, j1
, i2
, j2
, s
;
18520 branch_insn
= 0xf000d000;
18523 if (branch_offset
< -16777216 || branch_offset
> 16777214)
18525 /* There's not much we can do apart from complain if this
18527 _bfd_error_handler (_("%pB: error: Cortex-A8 erratum stub out "
18528 "of range (input file too large)"), abfd
);
18532 /* i1 = not(j1 eor s), so:
18534 j1 = (not i1) eor s. */
18536 branch_insn
|= (branch_offset
>> 1) & 0x7ff;
18537 branch_insn
|= ((branch_offset
>> 12) & 0x3ff) << 16;
18538 i2
= (branch_offset
>> 22) & 1;
18539 i1
= (branch_offset
>> 23) & 1;
18540 s
= (branch_offset
>> 24) & 1;
18543 branch_insn
|= j2
<< 11;
18544 branch_insn
|= j1
<< 13;
18545 branch_insn
|= s
<< 26;
18554 bfd_put_16 (abfd
, (branch_insn
>> 16) & 0xffff, &contents
[loc
]);
18555 bfd_put_16 (abfd
, branch_insn
& 0xffff, &contents
[loc
+ 2]);
18560 /* Beginning of stm32l4xx work-around. */
18562 /* Functions encoding instructions necessary for the emission of the
18563 fix-stm32l4xx-629360.
18564 Encoding is extracted from the
18565 ARM (C) Architecture Reference Manual
18566 ARMv7-A and ARMv7-R edition
18567 ARM DDI 0406C.b (ID072512). */
18569 static inline bfd_vma
18570 create_instruction_branch_absolute (int branch_offset
)
18572 /* A8.8.18 B (A8-334)
18573 B target_address (Encoding T4). */
18574 /* 1111 - 0Sii - iiii - iiii - 10J1 - Jiii - iiii - iiii. */
18575 /* jump offset is: S:I1:I2:imm10:imm11:0. */
18576 /* with : I1 = NOT (J1 EOR S) I2 = NOT (J2 EOR S). */
18578 int s
= ((branch_offset
& 0x1000000) >> 24);
18579 int j1
= s
^ !((branch_offset
& 0x800000) >> 23);
18580 int j2
= s
^ !((branch_offset
& 0x400000) >> 22);
18582 if (branch_offset
< -(1 << 24) || branch_offset
>= (1 << 24))
18583 BFD_ASSERT (0 && "Error: branch out of range. Cannot create branch.");
18585 bfd_vma patched_inst
= 0xf0009000
18587 | (((unsigned long) (branch_offset
) >> 12) & 0x3ff) << 16 /* imm10. */
18588 | j1
<< 13 /* J1. */
18589 | j2
<< 11 /* J2. */
18590 | (((unsigned long) (branch_offset
) >> 1) & 0x7ff); /* imm11. */
18592 return patched_inst
;
18595 static inline bfd_vma
18596 create_instruction_ldmia (int base_reg
, int wback
, int reg_mask
)
18598 /* A8.8.57 LDM/LDMIA/LDMFD (A8-396)
18599 LDMIA Rn!, {Ra, Rb, Rc, ...} (Encoding T2). */
18600 bfd_vma patched_inst
= 0xe8900000
18601 | (/*W=*/wback
<< 21)
18603 | (reg_mask
& 0x0000ffff);
18605 return patched_inst
;
18608 static inline bfd_vma
18609 create_instruction_ldmdb (int base_reg
, int wback
, int reg_mask
)
18611 /* A8.8.60 LDMDB/LDMEA (A8-402)
18612 LDMDB Rn!, {Ra, Rb, Rc, ...} (Encoding T1). */
18613 bfd_vma patched_inst
= 0xe9100000
18614 | (/*W=*/wback
<< 21)
18616 | (reg_mask
& 0x0000ffff);
18618 return patched_inst
;
18621 static inline bfd_vma
18622 create_instruction_mov (int target_reg
, int source_reg
)
18624 /* A8.8.103 MOV (register) (A8-486)
18625 MOV Rd, Rm (Encoding T1). */
18626 bfd_vma patched_inst
= 0x4600
18627 | (target_reg
& 0x7)
18628 | ((target_reg
& 0x8) >> 3) << 7
18629 | (source_reg
<< 3);
18631 return patched_inst
;
18634 static inline bfd_vma
18635 create_instruction_sub (int target_reg
, int source_reg
, int value
)
18637 /* A8.8.221 SUB (immediate) (A8-708)
18638 SUB Rd, Rn, #value (Encoding T3). */
18639 bfd_vma patched_inst
= 0xf1a00000
18640 | (target_reg
<< 8)
18641 | (source_reg
<< 16)
18643 | ((value
& 0x800) >> 11) << 26
18644 | ((value
& 0x700) >> 8) << 12
18647 return patched_inst
;
18650 static inline bfd_vma
18651 create_instruction_vldmia (int base_reg
, int is_dp
, int wback
, int num_words
,
18654 /* A8.8.332 VLDM (A8-922)
18655 VLMD{MODE} Rn{!}, {list} (Encoding T1 or T2). */
18656 bfd_vma patched_inst
= (is_dp
? 0xec900b00 : 0xec900a00)
18657 | (/*W=*/wback
<< 21)
18659 | (num_words
& 0x000000ff)
18660 | (((unsigned)first_reg
>> 1) & 0x0000000f) << 12
18661 | (first_reg
& 0x00000001) << 22;
18663 return patched_inst
;
18666 static inline bfd_vma
18667 create_instruction_vldmdb (int base_reg
, int is_dp
, int num_words
,
18670 /* A8.8.332 VLDM (A8-922)
18671 VLMD{MODE} Rn!, {} (Encoding T1 or T2). */
18672 bfd_vma patched_inst
= (is_dp
? 0xed300b00 : 0xed300a00)
18674 | (num_words
& 0x000000ff)
18675 | (((unsigned)first_reg
>>1 ) & 0x0000000f) << 12
18676 | (first_reg
& 0x00000001) << 22;
18678 return patched_inst
;
18681 static inline bfd_vma
18682 create_instruction_udf_w (int value
)
18684 /* A8.8.247 UDF (A8-758)
18685 Undefined (Encoding T2). */
18686 bfd_vma patched_inst
= 0xf7f0a000
18687 | (value
& 0x00000fff)
18688 | (value
& 0x000f0000) << 16;
18690 return patched_inst
;
18693 static inline bfd_vma
18694 create_instruction_udf (int value
)
18696 /* A8.8.247 UDF (A8-758)
18697 Undefined (Encoding T1). */
18698 bfd_vma patched_inst
= 0xde00
18701 return patched_inst
;
18704 /* Functions writing an instruction in memory, returning the next
18705 memory position to write to. */
18707 static inline bfd_byte
*
18708 push_thumb2_insn32 (struct elf32_arm_link_hash_table
* htab
,
18709 bfd
* output_bfd
, bfd_byte
*pt
, insn32 insn
)
18711 put_thumb2_insn (htab
, output_bfd
, insn
, pt
);
18715 static inline bfd_byte
*
18716 push_thumb2_insn16 (struct elf32_arm_link_hash_table
* htab
,
18717 bfd
* output_bfd
, bfd_byte
*pt
, insn32 insn
)
18719 put_thumb_insn (htab
, output_bfd
, insn
, pt
);
18723 /* Function filling up a region in memory with T1 and T2 UDFs taking
18724 care of alignment. */
18727 stm32l4xx_fill_stub_udf (struct elf32_arm_link_hash_table
* htab
,
18729 const bfd_byte
* const base_stub_contents
,
18730 bfd_byte
* const from_stub_contents
,
18731 const bfd_byte
* const end_stub_contents
)
18733 bfd_byte
*current_stub_contents
= from_stub_contents
;
18735 /* Fill the remaining of the stub with deterministic contents : UDF
18737 Check if realignment is needed on modulo 4 frontier using T1, to
18739 if ((current_stub_contents
< end_stub_contents
)
18740 && !((current_stub_contents
- base_stub_contents
) % 2)
18741 && ((current_stub_contents
- base_stub_contents
) % 4))
18742 current_stub_contents
=
18743 push_thumb2_insn16 (htab
, output_bfd
, current_stub_contents
,
18744 create_instruction_udf (0));
18746 for (; current_stub_contents
< end_stub_contents
;)
18747 current_stub_contents
=
18748 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
18749 create_instruction_udf_w (0));
18751 return current_stub_contents
;
18754 /* Functions writing the stream of instructions equivalent to the
18755 derived sequence for ldmia, ldmdb, vldm respectively. */
18758 stm32l4xx_create_replacing_stub_ldmia (struct elf32_arm_link_hash_table
* htab
,
18760 const insn32 initial_insn
,
18761 const bfd_byte
*const initial_insn_addr
,
18762 bfd_byte
*const base_stub_contents
)
18764 int wback
= (initial_insn
& 0x00200000) >> 21;
18765 int ri
, rn
= (initial_insn
& 0x000F0000) >> 16;
18766 int insn_all_registers
= initial_insn
& 0x0000ffff;
18767 int insn_low_registers
, insn_high_registers
;
18768 int usable_register_mask
;
18769 int nb_registers
= elf32_arm_popcount (insn_all_registers
);
18770 int restore_pc
= (insn_all_registers
& (1 << 15)) ? 1 : 0;
18771 int restore_rn
= (insn_all_registers
& (1 << rn
)) ? 1 : 0;
18772 bfd_byte
*current_stub_contents
= base_stub_contents
;
18774 BFD_ASSERT (is_thumb2_ldmia (initial_insn
));
18776 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
18777 smaller than 8 registers load sequences that do not cause the
18779 if (nb_registers
<= 8)
18781 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
18782 current_stub_contents
=
18783 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
18786 /* B initial_insn_addr+4. */
18788 current_stub_contents
=
18789 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
18790 create_instruction_branch_absolute
18791 (initial_insn_addr
- current_stub_contents
));
18793 /* Fill the remaining of the stub with deterministic contents. */
18794 current_stub_contents
=
18795 stm32l4xx_fill_stub_udf (htab
, output_bfd
,
18796 base_stub_contents
, current_stub_contents
,
18797 base_stub_contents
+
18798 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
);
18803 /* - reg_list[13] == 0. */
18804 BFD_ASSERT ((insn_all_registers
& (1 << 13))==0);
18806 /* - reg_list[14] & reg_list[15] != 1. */
18807 BFD_ASSERT ((insn_all_registers
& 0xC000) != 0xC000);
18809 /* - if (wback==1) reg_list[rn] == 0. */
18810 BFD_ASSERT (!wback
|| !restore_rn
);
18812 /* - nb_registers > 8. */
18813 BFD_ASSERT (elf32_arm_popcount (insn_all_registers
) > 8);
18815 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
18817 /* In the following algorithm, we split this wide LDM using 2 LDM insns:
18818 - One with the 7 lowest registers (register mask 0x007F)
18819 This LDM will finally contain between 2 and 7 registers
18820 - One with the 7 highest registers (register mask 0xDF80)
18821 This ldm will finally contain between 2 and 7 registers. */
18822 insn_low_registers
= insn_all_registers
& 0x007F;
18823 insn_high_registers
= insn_all_registers
& 0xDF80;
18825 /* A spare register may be needed during this veneer to temporarily
18826 handle the base register. This register will be restored with the
18827 last LDM operation.
18828 The usable register may be any general purpose register (that
18829 excludes PC, SP, LR : register mask is 0x1FFF). */
18830 usable_register_mask
= 0x1FFF;
18832 /* Generate the stub function. */
18835 /* LDMIA Rn!, {R-low-register-list} : (Encoding T2). */
18836 current_stub_contents
=
18837 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
18838 create_instruction_ldmia
18839 (rn
, /*wback=*/1, insn_low_registers
));
18841 /* LDMIA Rn!, {R-high-register-list} : (Encoding T2). */
18842 current_stub_contents
=
18843 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
18844 create_instruction_ldmia
18845 (rn
, /*wback=*/1, insn_high_registers
));
18848 /* B initial_insn_addr+4. */
18849 current_stub_contents
=
18850 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
18851 create_instruction_branch_absolute
18852 (initial_insn_addr
- current_stub_contents
));
18855 else /* if (!wback). */
18859 /* If Rn is not part of the high-register-list, move it there. */
18860 if (!(insn_high_registers
& (1 << rn
)))
18862 /* Choose a Ri in the high-register-list that will be restored. */
18863 ri
= ctz (insn_high_registers
& usable_register_mask
& ~(1 << rn
));
18866 current_stub_contents
=
18867 push_thumb2_insn16 (htab
, output_bfd
, current_stub_contents
,
18868 create_instruction_mov (ri
, rn
));
18871 /* LDMIA Ri!, {R-low-register-list} : (Encoding T2). */
18872 current_stub_contents
=
18873 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
18874 create_instruction_ldmia
18875 (ri
, /*wback=*/1, insn_low_registers
));
18877 /* LDMIA Ri, {R-high-register-list} : (Encoding T2). */
18878 current_stub_contents
=
18879 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
18880 create_instruction_ldmia
18881 (ri
, /*wback=*/0, insn_high_registers
));
18885 /* B initial_insn_addr+4. */
18886 current_stub_contents
=
18887 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
18888 create_instruction_branch_absolute
18889 (initial_insn_addr
- current_stub_contents
));
18893 /* Fill the remaining of the stub with deterministic contents. */
18894 current_stub_contents
=
18895 stm32l4xx_fill_stub_udf (htab
, output_bfd
,
18896 base_stub_contents
, current_stub_contents
,
18897 base_stub_contents
+
18898 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
);
18902 stm32l4xx_create_replacing_stub_ldmdb (struct elf32_arm_link_hash_table
* htab
,
18904 const insn32 initial_insn
,
18905 const bfd_byte
*const initial_insn_addr
,
18906 bfd_byte
*const base_stub_contents
)
18908 int wback
= (initial_insn
& 0x00200000) >> 21;
18909 int ri
, rn
= (initial_insn
& 0x000f0000) >> 16;
18910 int insn_all_registers
= initial_insn
& 0x0000ffff;
18911 int insn_low_registers
, insn_high_registers
;
18912 int usable_register_mask
;
18913 int restore_pc
= (insn_all_registers
& (1 << 15)) ? 1 : 0;
18914 int restore_rn
= (insn_all_registers
& (1 << rn
)) ? 1 : 0;
18915 int nb_registers
= elf32_arm_popcount (insn_all_registers
);
18916 bfd_byte
*current_stub_contents
= base_stub_contents
;
18918 BFD_ASSERT (is_thumb2_ldmdb (initial_insn
));
18920 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
18921 smaller than 8 registers load sequences that do not cause the
18923 if (nb_registers
<= 8)
18925 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
18926 current_stub_contents
=
18927 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
18930 /* B initial_insn_addr+4. */
18931 current_stub_contents
=
18932 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
18933 create_instruction_branch_absolute
18934 (initial_insn_addr
- current_stub_contents
));
18936 /* Fill the remaining of the stub with deterministic contents. */
18937 current_stub_contents
=
18938 stm32l4xx_fill_stub_udf (htab
, output_bfd
,
18939 base_stub_contents
, current_stub_contents
,
18940 base_stub_contents
+
18941 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
);
18946 /* - reg_list[13] == 0. */
18947 BFD_ASSERT ((insn_all_registers
& (1 << 13)) == 0);
18949 /* - reg_list[14] & reg_list[15] != 1. */
18950 BFD_ASSERT ((insn_all_registers
& 0xC000) != 0xC000);
18952 /* - if (wback==1) reg_list[rn] == 0. */
18953 BFD_ASSERT (!wback
|| !restore_rn
);
18955 /* - nb_registers > 8. */
18956 BFD_ASSERT (elf32_arm_popcount (insn_all_registers
) > 8);
18958 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
18960 /* In the following algorithm, we split this wide LDM using 2 LDM insn:
18961 - One with the 7 lowest registers (register mask 0x007F)
18962 This LDM will finally contain between 2 and 7 registers
18963 - One with the 7 highest registers (register mask 0xDF80)
18964 This ldm will finally contain between 2 and 7 registers. */
18965 insn_low_registers
= insn_all_registers
& 0x007F;
18966 insn_high_registers
= insn_all_registers
& 0xDF80;
18968 /* A spare register may be needed during this veneer to temporarily
18969 handle the base register. This register will be restored with
18970 the last LDM operation.
18971 The usable register may be any general purpose register (that excludes
18972 PC, SP, LR : register mask is 0x1FFF). */
18973 usable_register_mask
= 0x1FFF;
18975 /* Generate the stub function. */
18976 if (!wback
&& !restore_pc
&& !restore_rn
)
18978 /* Choose a Ri in the low-register-list that will be restored. */
18979 ri
= ctz (insn_low_registers
& usable_register_mask
& ~(1 << rn
));
18982 current_stub_contents
=
18983 push_thumb2_insn16 (htab
, output_bfd
, current_stub_contents
,
18984 create_instruction_mov (ri
, rn
));
18986 /* LDMDB Ri!, {R-high-register-list}. */
18987 current_stub_contents
=
18988 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
18989 create_instruction_ldmdb
18990 (ri
, /*wback=*/1, insn_high_registers
));
18992 /* LDMDB Ri, {R-low-register-list}. */
18993 current_stub_contents
=
18994 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
18995 create_instruction_ldmdb
18996 (ri
, /*wback=*/0, insn_low_registers
));
18998 /* B initial_insn_addr+4. */
18999 current_stub_contents
=
19000 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19001 create_instruction_branch_absolute
19002 (initial_insn_addr
- current_stub_contents
));
19004 else if (wback
&& !restore_pc
&& !restore_rn
)
19006 /* LDMDB Rn!, {R-high-register-list}. */
19007 current_stub_contents
=
19008 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19009 create_instruction_ldmdb
19010 (rn
, /*wback=*/1, insn_high_registers
));
19012 /* LDMDB Rn!, {R-low-register-list}. */
19013 current_stub_contents
=
19014 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19015 create_instruction_ldmdb
19016 (rn
, /*wback=*/1, insn_low_registers
));
19018 /* B initial_insn_addr+4. */
19019 current_stub_contents
=
19020 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19021 create_instruction_branch_absolute
19022 (initial_insn_addr
- current_stub_contents
));
19024 else if (!wback
&& restore_pc
&& !restore_rn
)
19026 /* Choose a Ri in the high-register-list that will be restored. */
19027 ri
= ctz (insn_high_registers
& usable_register_mask
& ~(1 << rn
));
19029 /* SUB Ri, Rn, #(4*nb_registers). */
19030 current_stub_contents
=
19031 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19032 create_instruction_sub (ri
, rn
, (4 * nb_registers
)));
19034 /* LDMIA Ri!, {R-low-register-list}. */
19035 current_stub_contents
=
19036 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19037 create_instruction_ldmia
19038 (ri
, /*wback=*/1, insn_low_registers
));
19040 /* LDMIA Ri, {R-high-register-list}. */
19041 current_stub_contents
=
19042 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19043 create_instruction_ldmia
19044 (ri
, /*wback=*/0, insn_high_registers
));
19046 else if (wback
&& restore_pc
&& !restore_rn
)
19048 /* Choose a Ri in the high-register-list that will be restored. */
19049 ri
= ctz (insn_high_registers
& usable_register_mask
& ~(1 << rn
));
19051 /* SUB Rn, Rn, #(4*nb_registers) */
19052 current_stub_contents
=
19053 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19054 create_instruction_sub (rn
, rn
, (4 * nb_registers
)));
19057 current_stub_contents
=
19058 push_thumb2_insn16 (htab
, output_bfd
, current_stub_contents
,
19059 create_instruction_mov (ri
, rn
));
19061 /* LDMIA Ri!, {R-low-register-list}. */
19062 current_stub_contents
=
19063 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19064 create_instruction_ldmia
19065 (ri
, /*wback=*/1, insn_low_registers
));
19067 /* LDMIA Ri, {R-high-register-list}. */
19068 current_stub_contents
=
19069 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19070 create_instruction_ldmia
19071 (ri
, /*wback=*/0, insn_high_registers
));
19073 else if (!wback
&& !restore_pc
&& restore_rn
)
19076 if (!(insn_low_registers
& (1 << rn
)))
19078 /* Choose a Ri in the low-register-list that will be restored. */
19079 ri
= ctz (insn_low_registers
& usable_register_mask
& ~(1 << rn
));
19082 current_stub_contents
=
19083 push_thumb2_insn16 (htab
, output_bfd
, current_stub_contents
,
19084 create_instruction_mov (ri
, rn
));
19087 /* LDMDB Ri!, {R-high-register-list}. */
19088 current_stub_contents
=
19089 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19090 create_instruction_ldmdb
19091 (ri
, /*wback=*/1, insn_high_registers
));
19093 /* LDMDB Ri, {R-low-register-list}. */
19094 current_stub_contents
=
19095 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19096 create_instruction_ldmdb
19097 (ri
, /*wback=*/0, insn_low_registers
));
19099 /* B initial_insn_addr+4. */
19100 current_stub_contents
=
19101 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19102 create_instruction_branch_absolute
19103 (initial_insn_addr
- current_stub_contents
));
19105 else if (!wback
&& restore_pc
&& restore_rn
)
19108 if (!(insn_high_registers
& (1 << rn
)))
19110 /* Choose a Ri in the high-register-list that will be restored. */
19111 ri
= ctz (insn_high_registers
& usable_register_mask
& ~(1 << rn
));
19114 /* SUB Ri, Rn, #(4*nb_registers). */
19115 current_stub_contents
=
19116 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19117 create_instruction_sub (ri
, rn
, (4 * nb_registers
)));
19119 /* LDMIA Ri!, {R-low-register-list}. */
19120 current_stub_contents
=
19121 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19122 create_instruction_ldmia
19123 (ri
, /*wback=*/1, insn_low_registers
));
19125 /* LDMIA Ri, {R-high-register-list}. */
19126 current_stub_contents
=
19127 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19128 create_instruction_ldmia
19129 (ri
, /*wback=*/0, insn_high_registers
));
19131 else if (wback
&& restore_rn
)
19133 /* The assembler should not have accepted to encode this. */
19134 BFD_ASSERT (0 && "Cannot patch an instruction that has an "
19135 "undefined behavior.\n");
19138 /* Fill the remaining of the stub with deterministic contents. */
19139 current_stub_contents
=
19140 stm32l4xx_fill_stub_udf (htab
, output_bfd
,
19141 base_stub_contents
, current_stub_contents
,
19142 base_stub_contents
+
19143 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
);
19148 stm32l4xx_create_replacing_stub_vldm (struct elf32_arm_link_hash_table
* htab
,
19150 const insn32 initial_insn
,
19151 const bfd_byte
*const initial_insn_addr
,
19152 bfd_byte
*const base_stub_contents
)
19154 int num_words
= ((unsigned int) initial_insn
<< 24) >> 24;
19155 bfd_byte
*current_stub_contents
= base_stub_contents
;
19157 BFD_ASSERT (is_thumb2_vldm (initial_insn
));
19159 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
19160 smaller than 8 words load sequences that do not cause the
19162 if (num_words
<= 8)
19164 /* Untouched instruction. */
19165 current_stub_contents
=
19166 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19169 /* B initial_insn_addr+4. */
19170 current_stub_contents
=
19171 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19172 create_instruction_branch_absolute
19173 (initial_insn_addr
- current_stub_contents
));
19177 bfd_boolean is_dp
= /* DP encoding. */
19178 (initial_insn
& 0xfe100f00) == 0xec100b00;
19179 bfd_boolean is_ia_nobang
= /* (IA without !). */
19180 (((initial_insn
<< 7) >> 28) & 0xd) == 0x4;
19181 bfd_boolean is_ia_bang
= /* (IA with !) - includes VPOP. */
19182 (((initial_insn
<< 7) >> 28) & 0xd) == 0x5;
19183 bfd_boolean is_db_bang
= /* (DB with !). */
19184 (((initial_insn
<< 7) >> 28) & 0xd) == 0x9;
19185 int base_reg
= ((unsigned int) initial_insn
<< 12) >> 28;
19186 /* d = UInt (Vd:D);. */
19187 int first_reg
= ((((unsigned int) initial_insn
<< 16) >> 28) << 1)
19188 | (((unsigned int)initial_insn
<< 9) >> 31);
19190 /* Compute the number of 8-words chunks needed to split. */
19191 int chunks
= (num_words
% 8) ? (num_words
/ 8 + 1) : (num_words
/ 8);
19194 /* The test coverage has been done assuming the following
19195 hypothesis that exactly one of the previous is_ predicates is
19197 BFD_ASSERT ( (is_ia_nobang
^ is_ia_bang
^ is_db_bang
)
19198 && !(is_ia_nobang
& is_ia_bang
& is_db_bang
));
19200 /* We treat the cutting of the words in one pass for all
19201 cases, then we emit the adjustments:
19204 -> vldm rx!, {8_words_or_less} for each needed 8_word
19205 -> sub rx, rx, #size (list)
19208 -> vldm rx!, {8_words_or_less} for each needed 8_word
19209 This also handles vpop instruction (when rx is sp)
19212 -> vldmb rx!, {8_words_or_less} for each needed 8_word. */
19213 for (chunk
= 0; chunk
< chunks
; ++chunk
)
19215 bfd_vma new_insn
= 0;
19217 if (is_ia_nobang
|| is_ia_bang
)
19219 new_insn
= create_instruction_vldmia
19223 chunks
- (chunk
+ 1) ?
19224 8 : num_words
- chunk
* 8,
19225 first_reg
+ chunk
* 8);
19227 else if (is_db_bang
)
19229 new_insn
= create_instruction_vldmdb
19232 chunks
- (chunk
+ 1) ?
19233 8 : num_words
- chunk
* 8,
19234 first_reg
+ chunk
* 8);
19238 current_stub_contents
=
19239 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19243 /* Only this case requires the base register compensation
19247 current_stub_contents
=
19248 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19249 create_instruction_sub
19250 (base_reg
, base_reg
, 4*num_words
));
19253 /* B initial_insn_addr+4. */
19254 current_stub_contents
=
19255 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19256 create_instruction_branch_absolute
19257 (initial_insn_addr
- current_stub_contents
));
19260 /* Fill the remaining of the stub with deterministic contents. */
19261 current_stub_contents
=
19262 stm32l4xx_fill_stub_udf (htab
, output_bfd
,
19263 base_stub_contents
, current_stub_contents
,
19264 base_stub_contents
+
19265 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE
);
19269 stm32l4xx_create_replacing_stub (struct elf32_arm_link_hash_table
* htab
,
19271 const insn32 wrong_insn
,
19272 const bfd_byte
*const wrong_insn_addr
,
19273 bfd_byte
*const stub_contents
)
19275 if (is_thumb2_ldmia (wrong_insn
))
19276 stm32l4xx_create_replacing_stub_ldmia (htab
, output_bfd
,
19277 wrong_insn
, wrong_insn_addr
,
19279 else if (is_thumb2_ldmdb (wrong_insn
))
19280 stm32l4xx_create_replacing_stub_ldmdb (htab
, output_bfd
,
19281 wrong_insn
, wrong_insn_addr
,
19283 else if (is_thumb2_vldm (wrong_insn
))
19284 stm32l4xx_create_replacing_stub_vldm (htab
, output_bfd
,
19285 wrong_insn
, wrong_insn_addr
,
19289 /* End of stm32l4xx work-around. */
19292 /* Do code byteswapping. Return FALSE afterwards so that the section is
19293 written out as normal. */
19296 elf32_arm_write_section (bfd
*output_bfd
,
19297 struct bfd_link_info
*link_info
,
19299 bfd_byte
*contents
)
19301 unsigned int mapcount
, errcount
;
19302 _arm_elf_section_data
*arm_data
;
19303 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (link_info
);
19304 elf32_arm_section_map
*map
;
19305 elf32_vfp11_erratum_list
*errnode
;
19306 elf32_stm32l4xx_erratum_list
*stm32l4xx_errnode
;
19309 bfd_vma offset
= sec
->output_section
->vma
+ sec
->output_offset
;
19313 if (globals
== NULL
)
19316 /* If this section has not been allocated an _arm_elf_section_data
19317 structure then we cannot record anything. */
19318 arm_data
= get_arm_elf_section_data (sec
);
19319 if (arm_data
== NULL
)
19322 mapcount
= arm_data
->mapcount
;
19323 map
= arm_data
->map
;
19324 errcount
= arm_data
->erratumcount
;
19328 unsigned int endianflip
= bfd_big_endian (output_bfd
) ? 3 : 0;
19330 for (errnode
= arm_data
->erratumlist
; errnode
!= 0;
19331 errnode
= errnode
->next
)
19333 bfd_vma target
= errnode
->vma
- offset
;
19335 switch (errnode
->type
)
19337 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER
:
19339 bfd_vma branch_to_veneer
;
19340 /* Original condition code of instruction, plus bit mask for
19341 ARM B instruction. */
19342 unsigned int insn
= (errnode
->u
.b
.vfp_insn
& 0xf0000000)
19345 /* The instruction is before the label. */
19348 /* Above offset included in -4 below. */
19349 branch_to_veneer
= errnode
->u
.b
.veneer
->vma
19350 - errnode
->vma
- 4;
19352 if ((signed) branch_to_veneer
< -(1 << 25)
19353 || (signed) branch_to_veneer
>= (1 << 25))
19354 _bfd_error_handler (_("%pB: error: VFP11 veneer out of "
19355 "range"), output_bfd
);
19357 insn
|= (branch_to_veneer
>> 2) & 0xffffff;
19358 contents
[endianflip
^ target
] = insn
& 0xff;
19359 contents
[endianflip
^ (target
+ 1)] = (insn
>> 8) & 0xff;
19360 contents
[endianflip
^ (target
+ 2)] = (insn
>> 16) & 0xff;
19361 contents
[endianflip
^ (target
+ 3)] = (insn
>> 24) & 0xff;
19365 case VFP11_ERRATUM_ARM_VENEER
:
19367 bfd_vma branch_from_veneer
;
19370 /* Take size of veneer into account. */
19371 branch_from_veneer
= errnode
->u
.v
.branch
->vma
19372 - errnode
->vma
- 12;
19374 if ((signed) branch_from_veneer
< -(1 << 25)
19375 || (signed) branch_from_veneer
>= (1 << 25))
19376 _bfd_error_handler (_("%pB: error: VFP11 veneer out of "
19377 "range"), output_bfd
);
19379 /* Original instruction. */
19380 insn
= errnode
->u
.v
.branch
->u
.b
.vfp_insn
;
19381 contents
[endianflip
^ target
] = insn
& 0xff;
19382 contents
[endianflip
^ (target
+ 1)] = (insn
>> 8) & 0xff;
19383 contents
[endianflip
^ (target
+ 2)] = (insn
>> 16) & 0xff;
19384 contents
[endianflip
^ (target
+ 3)] = (insn
>> 24) & 0xff;
19386 /* Branch back to insn after original insn. */
19387 insn
= 0xea000000 | ((branch_from_veneer
>> 2) & 0xffffff);
19388 contents
[endianflip
^ (target
+ 4)] = insn
& 0xff;
19389 contents
[endianflip
^ (target
+ 5)] = (insn
>> 8) & 0xff;
19390 contents
[endianflip
^ (target
+ 6)] = (insn
>> 16) & 0xff;
19391 contents
[endianflip
^ (target
+ 7)] = (insn
>> 24) & 0xff;
19401 if (arm_data
->stm32l4xx_erratumcount
!= 0)
19403 for (stm32l4xx_errnode
= arm_data
->stm32l4xx_erratumlist
;
19404 stm32l4xx_errnode
!= 0;
19405 stm32l4xx_errnode
= stm32l4xx_errnode
->next
)
19407 bfd_vma target
= stm32l4xx_errnode
->vma
- offset
;
19409 switch (stm32l4xx_errnode
->type
)
19411 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER
:
19414 bfd_vma branch_to_veneer
=
19415 stm32l4xx_errnode
->u
.b
.veneer
->vma
- stm32l4xx_errnode
->vma
;
19417 if ((signed) branch_to_veneer
< -(1 << 24)
19418 || (signed) branch_to_veneer
>= (1 << 24))
19420 bfd_vma out_of_range
=
19421 ((signed) branch_to_veneer
< -(1 << 24)) ?
19422 - branch_to_veneer
- (1 << 24) :
19423 ((signed) branch_to_veneer
>= (1 << 24)) ?
19424 branch_to_veneer
- (1 << 24) : 0;
19427 (_("%pB(%#" PRIx64
"): error: "
19428 "cannot create STM32L4XX veneer; "
19429 "jump out of range by %" PRId64
" bytes; "
19430 "cannot encode branch instruction"),
19432 (uint64_t) (stm32l4xx_errnode
->vma
- 4),
19433 (int64_t) out_of_range
);
19437 insn
= create_instruction_branch_absolute
19438 (stm32l4xx_errnode
->u
.b
.veneer
->vma
- stm32l4xx_errnode
->vma
);
19440 /* The instruction is before the label. */
19443 put_thumb2_insn (globals
, output_bfd
,
19444 (bfd_vma
) insn
, contents
+ target
);
19448 case STM32L4XX_ERRATUM_VENEER
:
19451 bfd_byte
* veneer_r
;
19454 veneer
= contents
+ target
;
19456 + stm32l4xx_errnode
->u
.b
.veneer
->vma
19457 - stm32l4xx_errnode
->vma
- 4;
19459 if ((signed) (veneer_r
- veneer
-
19460 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE
>
19461 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
?
19462 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE
:
19463 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
) < -(1 << 24)
19464 || (signed) (veneer_r
- veneer
) >= (1 << 24))
19466 _bfd_error_handler (_("%pB: error: cannot create STM32L4XX "
19467 "veneer"), output_bfd
);
19471 /* Original instruction. */
19472 insn
= stm32l4xx_errnode
->u
.v
.branch
->u
.b
.insn
;
19474 stm32l4xx_create_replacing_stub
19475 (globals
, output_bfd
, insn
, (void*)veneer_r
, (void*)veneer
);
19485 if (arm_data
->elf
.this_hdr
.sh_type
== SHT_ARM_EXIDX
)
19487 arm_unwind_table_edit
*edit_node
19488 = arm_data
->u
.exidx
.unwind_edit_list
;
19489 /* Now, sec->size is the size of the section we will write. The original
19490 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
19491 markers) was sec->rawsize. (This isn't the case if we perform no
19492 edits, then rawsize will be zero and we should use size). */
19493 bfd_byte
*edited_contents
= (bfd_byte
*) bfd_malloc (sec
->size
);
19494 unsigned int input_size
= sec
->rawsize
? sec
->rawsize
: sec
->size
;
19495 unsigned int in_index
, out_index
;
19496 bfd_vma add_to_offsets
= 0;
19498 for (in_index
= 0, out_index
= 0; in_index
* 8 < input_size
|| edit_node
;)
19502 unsigned int edit_index
= edit_node
->index
;
19504 if (in_index
< edit_index
&& in_index
* 8 < input_size
)
19506 copy_exidx_entry (output_bfd
, edited_contents
+ out_index
* 8,
19507 contents
+ in_index
* 8, add_to_offsets
);
19511 else if (in_index
== edit_index
19512 || (in_index
* 8 >= input_size
19513 && edit_index
== UINT_MAX
))
19515 switch (edit_node
->type
)
19517 case DELETE_EXIDX_ENTRY
:
19519 add_to_offsets
+= 8;
19522 case INSERT_EXIDX_CANTUNWIND_AT_END
:
19524 asection
*text_sec
= edit_node
->linked_section
;
19525 bfd_vma text_offset
= text_sec
->output_section
->vma
19526 + text_sec
->output_offset
19528 bfd_vma exidx_offset
= offset
+ out_index
* 8;
19529 unsigned long prel31_offset
;
19531 /* Note: this is meant to be equivalent to an
19532 R_ARM_PREL31 relocation. These synthetic
19533 EXIDX_CANTUNWIND markers are not relocated by the
19534 usual BFD method. */
19535 prel31_offset
= (text_offset
- exidx_offset
)
19537 if (bfd_link_relocatable (link_info
))
19539 /* Here relocation for new EXIDX_CANTUNWIND is
19540 created, so there is no need to
19541 adjust offset by hand. */
19542 prel31_offset
= text_sec
->output_offset
19546 /* First address we can't unwind. */
19547 bfd_put_32 (output_bfd
, prel31_offset
,
19548 &edited_contents
[out_index
* 8]);
19550 /* Code for EXIDX_CANTUNWIND. */
19551 bfd_put_32 (output_bfd
, 0x1,
19552 &edited_contents
[out_index
* 8 + 4]);
19555 add_to_offsets
-= 8;
19560 edit_node
= edit_node
->next
;
19565 /* No more edits, copy remaining entries verbatim. */
19566 copy_exidx_entry (output_bfd
, edited_contents
+ out_index
* 8,
19567 contents
+ in_index
* 8, add_to_offsets
);
19573 if (!(sec
->flags
& SEC_EXCLUDE
) && !(sec
->flags
& SEC_NEVER_LOAD
))
19574 bfd_set_section_contents (output_bfd
, sec
->output_section
,
19576 (file_ptr
) sec
->output_offset
, sec
->size
);
19581 /* Fix code to point to Cortex-A8 erratum stubs. */
19582 if (globals
->fix_cortex_a8
)
19584 struct a8_branch_to_stub_data data
;
19586 data
.writing_section
= sec
;
19587 data
.contents
= contents
;
19589 bfd_hash_traverse (& globals
->stub_hash_table
, make_branch_to_a8_stub
,
19596 if (globals
->byteswap_code
)
19598 qsort (map
, mapcount
, sizeof (* map
), elf32_arm_compare_mapping
);
19601 for (i
= 0; i
< mapcount
; i
++)
19603 if (i
== mapcount
- 1)
19606 end
= map
[i
+ 1].vma
;
19608 switch (map
[i
].type
)
19611 /* Byte swap code words. */
19612 while (ptr
+ 3 < end
)
19614 tmp
= contents
[ptr
];
19615 contents
[ptr
] = contents
[ptr
+ 3];
19616 contents
[ptr
+ 3] = tmp
;
19617 tmp
= contents
[ptr
+ 1];
19618 contents
[ptr
+ 1] = contents
[ptr
+ 2];
19619 contents
[ptr
+ 2] = tmp
;
19625 /* Byte swap code halfwords. */
19626 while (ptr
+ 1 < end
)
19628 tmp
= contents
[ptr
];
19629 contents
[ptr
] = contents
[ptr
+ 1];
19630 contents
[ptr
+ 1] = tmp
;
19636 /* Leave data alone. */
19644 arm_data
->mapcount
= -1;
19645 arm_data
->mapsize
= 0;
19646 arm_data
->map
= NULL
;
19651 /* Mangle thumb function symbols as we read them in. */
19654 elf32_arm_swap_symbol_in (bfd
* abfd
,
19657 Elf_Internal_Sym
*dst
)
19659 Elf_Internal_Shdr
*symtab_hdr
;
19660 const char *name
= NULL
;
19662 if (!bfd_elf32_swap_symbol_in (abfd
, psrc
, pshn
, dst
))
19664 dst
->st_target_internal
= 0;
19666 /* New EABI objects mark thumb function symbols by setting the low bit of
19668 if (ELF_ST_TYPE (dst
->st_info
) == STT_FUNC
19669 || ELF_ST_TYPE (dst
->st_info
) == STT_GNU_IFUNC
)
19671 if (dst
->st_value
& 1)
19673 dst
->st_value
&= ~(bfd_vma
) 1;
19674 ARM_SET_SYM_BRANCH_TYPE (dst
->st_target_internal
,
19675 ST_BRANCH_TO_THUMB
);
19678 ARM_SET_SYM_BRANCH_TYPE (dst
->st_target_internal
, ST_BRANCH_TO_ARM
);
19680 else if (ELF_ST_TYPE (dst
->st_info
) == STT_ARM_TFUNC
)
19682 dst
->st_info
= ELF_ST_INFO (ELF_ST_BIND (dst
->st_info
), STT_FUNC
);
19683 ARM_SET_SYM_BRANCH_TYPE (dst
->st_target_internal
, ST_BRANCH_TO_THUMB
);
19685 else if (ELF_ST_TYPE (dst
->st_info
) == STT_SECTION
)
19686 ARM_SET_SYM_BRANCH_TYPE (dst
->st_target_internal
, ST_BRANCH_LONG
);
19688 ARM_SET_SYM_BRANCH_TYPE (dst
->st_target_internal
, ST_BRANCH_UNKNOWN
);
19690 /* Mark CMSE special symbols. */
19691 symtab_hdr
= & elf_symtab_hdr (abfd
);
19692 if (symtab_hdr
->sh_size
)
19693 name
= bfd_elf_sym_name (abfd
, symtab_hdr
, dst
, NULL
);
19694 if (name
&& CONST_STRNEQ (name
, CMSE_PREFIX
))
19695 ARM_SET_SYM_CMSE_SPCL (dst
->st_target_internal
);
19701 /* Mangle thumb function symbols as we write them out. */
19704 elf32_arm_swap_symbol_out (bfd
*abfd
,
19705 const Elf_Internal_Sym
*src
,
19709 Elf_Internal_Sym newsym
;
19711 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
19712 of the address set, as per the new EABI. We do this unconditionally
19713 because objcopy does not set the elf header flags until after
19714 it writes out the symbol table. */
19715 if (ARM_GET_SYM_BRANCH_TYPE (src
->st_target_internal
) == ST_BRANCH_TO_THUMB
)
19718 if (ELF_ST_TYPE (src
->st_info
) != STT_GNU_IFUNC
)
19719 newsym
.st_info
= ELF_ST_INFO (ELF_ST_BIND (src
->st_info
), STT_FUNC
);
19720 if (newsym
.st_shndx
!= SHN_UNDEF
)
19722 /* Do this only for defined symbols. At link type, the static
19723 linker will simulate the work of dynamic linker of resolving
19724 symbols and will carry over the thumbness of found symbols to
19725 the output symbol table. It's not clear how it happens, but
19726 the thumbness of undefined symbols can well be different at
19727 runtime, and writing '1' for them will be confusing for users
19728 and possibly for dynamic linker itself.
19730 newsym
.st_value
|= 1;
19735 bfd_elf32_swap_symbol_out (abfd
, src
, cdst
, shndx
);
19738 /* Add the PT_ARM_EXIDX program header. */
19741 elf32_arm_modify_segment_map (bfd
*abfd
,
19742 struct bfd_link_info
*info ATTRIBUTE_UNUSED
)
19744 struct elf_segment_map
*m
;
19747 sec
= bfd_get_section_by_name (abfd
, ".ARM.exidx");
19748 if (sec
!= NULL
&& (sec
->flags
& SEC_LOAD
) != 0)
19750 /* If there is already a PT_ARM_EXIDX header, then we do not
19751 want to add another one. This situation arises when running
19752 "strip"; the input binary already has the header. */
19753 m
= elf_seg_map (abfd
);
19754 while (m
&& m
->p_type
!= PT_ARM_EXIDX
)
19758 m
= (struct elf_segment_map
*)
19759 bfd_zalloc (abfd
, sizeof (struct elf_segment_map
));
19762 m
->p_type
= PT_ARM_EXIDX
;
19764 m
->sections
[0] = sec
;
19766 m
->next
= elf_seg_map (abfd
);
19767 elf_seg_map (abfd
) = m
;
19774 /* We may add a PT_ARM_EXIDX program header. */
19777 elf32_arm_additional_program_headers (bfd
*abfd
,
19778 struct bfd_link_info
*info ATTRIBUTE_UNUSED
)
19782 sec
= bfd_get_section_by_name (abfd
, ".ARM.exidx");
19783 if (sec
!= NULL
&& (sec
->flags
& SEC_LOAD
) != 0)
19789 /* Hook called by the linker routine which adds symbols from an object
19793 elf32_arm_add_symbol_hook (bfd
*abfd
, struct bfd_link_info
*info
,
19794 Elf_Internal_Sym
*sym
, const char **namep
,
19795 flagword
*flagsp
, asection
**secp
, bfd_vma
*valp
)
19797 if (elf32_arm_hash_table (info
) == NULL
)
19800 if (elf32_arm_hash_table (info
)->vxworks_p
19801 && !elf_vxworks_add_symbol_hook (abfd
, info
, sym
, namep
,
19802 flagsp
, secp
, valp
))
19808 /* We use this to override swap_symbol_in and swap_symbol_out. */
19809 const struct elf_size_info elf32_arm_size_info
=
19811 sizeof (Elf32_External_Ehdr
),
19812 sizeof (Elf32_External_Phdr
),
19813 sizeof (Elf32_External_Shdr
),
19814 sizeof (Elf32_External_Rel
),
19815 sizeof (Elf32_External_Rela
),
19816 sizeof (Elf32_External_Sym
),
19817 sizeof (Elf32_External_Dyn
),
19818 sizeof (Elf_External_Note
),
19822 ELFCLASS32
, EV_CURRENT
,
19823 bfd_elf32_write_out_phdrs
,
19824 bfd_elf32_write_shdrs_and_ehdr
,
19825 bfd_elf32_checksum_contents
,
19826 bfd_elf32_write_relocs
,
19827 elf32_arm_swap_symbol_in
,
19828 elf32_arm_swap_symbol_out
,
19829 bfd_elf32_slurp_reloc_table
,
19830 bfd_elf32_slurp_symbol_table
,
19831 bfd_elf32_swap_dyn_in
,
19832 bfd_elf32_swap_dyn_out
,
19833 bfd_elf32_swap_reloc_in
,
19834 bfd_elf32_swap_reloc_out
,
19835 bfd_elf32_swap_reloca_in
,
19836 bfd_elf32_swap_reloca_out
19840 read_code32 (const bfd
*abfd
, const bfd_byte
*addr
)
19842 /* V7 BE8 code is always little endian. */
19843 if ((elf_elfheader (abfd
)->e_flags
& EF_ARM_BE8
) != 0)
19844 return bfd_getl32 (addr
);
19846 return bfd_get_32 (abfd
, addr
);
19850 read_code16 (const bfd
*abfd
, const bfd_byte
*addr
)
19852 /* V7 BE8 code is always little endian. */
19853 if ((elf_elfheader (abfd
)->e_flags
& EF_ARM_BE8
) != 0)
19854 return bfd_getl16 (addr
);
19856 return bfd_get_16 (abfd
, addr
);
19859 /* Return size of plt0 entry starting at ADDR
19860 or (bfd_vma) -1 if size can not be determined. */
19863 elf32_arm_plt0_size (const bfd
*abfd
, const bfd_byte
*addr
)
19865 bfd_vma first_word
;
19868 first_word
= read_code32 (abfd
, addr
);
19870 if (first_word
== elf32_arm_plt0_entry
[0])
19871 plt0_size
= 4 * ARRAY_SIZE (elf32_arm_plt0_entry
);
19872 else if (first_word
== elf32_thumb2_plt0_entry
[0])
19873 plt0_size
= 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry
);
19875 /* We don't yet handle this PLT format. */
19876 return (bfd_vma
) -1;
19881 /* Return size of plt entry starting at offset OFFSET
19882 of plt section located at address START
19883 or (bfd_vma) -1 if size can not be determined. */
19886 elf32_arm_plt_size (const bfd
*abfd
, const bfd_byte
*start
, bfd_vma offset
)
19888 bfd_vma first_insn
;
19889 bfd_vma plt_size
= 0;
19890 const bfd_byte
*addr
= start
+ offset
;
19892 /* PLT entry size if fixed on Thumb-only platforms. */
19893 if (read_code32 (abfd
, start
) == elf32_thumb2_plt0_entry
[0])
19894 return 4 * ARRAY_SIZE (elf32_thumb2_plt_entry
);
19896 /* Respect Thumb stub if necessary. */
19897 if (read_code16 (abfd
, addr
) == elf32_arm_plt_thumb_stub
[0])
19899 plt_size
+= 2 * ARRAY_SIZE(elf32_arm_plt_thumb_stub
);
19902 /* Strip immediate from first add. */
19903 first_insn
= read_code32 (abfd
, addr
+ plt_size
) & 0xffffff00;
19905 #ifdef FOUR_WORD_PLT
19906 if (first_insn
== elf32_arm_plt_entry
[0])
19907 plt_size
+= 4 * ARRAY_SIZE (elf32_arm_plt_entry
);
19909 if (first_insn
== elf32_arm_plt_entry_long
[0])
19910 plt_size
+= 4 * ARRAY_SIZE (elf32_arm_plt_entry_long
);
19911 else if (first_insn
== elf32_arm_plt_entry_short
[0])
19912 plt_size
+= 4 * ARRAY_SIZE (elf32_arm_plt_entry_short
);
19915 /* We don't yet handle this PLT format. */
19916 return (bfd_vma
) -1;
19921 /* Implementation is shamelessly borrowed from _bfd_elf_get_synthetic_symtab. */
19924 elf32_arm_get_synthetic_symtab (bfd
*abfd
,
19925 long symcount ATTRIBUTE_UNUSED
,
19926 asymbol
**syms ATTRIBUTE_UNUSED
,
19936 Elf_Internal_Shdr
*hdr
;
19944 if ((abfd
->flags
& (DYNAMIC
| EXEC_P
)) == 0)
19947 if (dynsymcount
<= 0)
19950 relplt
= bfd_get_section_by_name (abfd
, ".rel.plt");
19951 if (relplt
== NULL
)
19954 hdr
= &elf_section_data (relplt
)->this_hdr
;
19955 if (hdr
->sh_link
!= elf_dynsymtab (abfd
)
19956 || (hdr
->sh_type
!= SHT_REL
&& hdr
->sh_type
!= SHT_RELA
))
19959 plt
= bfd_get_section_by_name (abfd
, ".plt");
19963 if (!elf32_arm_size_info
.slurp_reloc_table (abfd
, relplt
, dynsyms
, TRUE
))
19966 data
= plt
->contents
;
19969 if (!bfd_get_full_section_contents(abfd
, (asection
*) plt
, &data
) || data
== NULL
)
19971 bfd_cache_section_contents((asection
*) plt
, data
);
19974 count
= relplt
->size
/ hdr
->sh_entsize
;
19975 size
= count
* sizeof (asymbol
);
19976 p
= relplt
->relocation
;
19977 for (i
= 0; i
< count
; i
++, p
+= elf32_arm_size_info
.int_rels_per_ext_rel
)
19979 size
+= strlen ((*p
->sym_ptr_ptr
)->name
) + sizeof ("@plt");
19980 if (p
->addend
!= 0)
19981 size
+= sizeof ("+0x") - 1 + 8;
19984 s
= *ret
= (asymbol
*) bfd_malloc (size
);
19988 offset
= elf32_arm_plt0_size (abfd
, data
);
19989 if (offset
== (bfd_vma
) -1)
19992 names
= (char *) (s
+ count
);
19993 p
= relplt
->relocation
;
19995 for (i
= 0; i
< count
; i
++, p
+= elf32_arm_size_info
.int_rels_per_ext_rel
)
19999 bfd_vma plt_size
= elf32_arm_plt_size (abfd
, data
, offset
);
20000 if (plt_size
== (bfd_vma
) -1)
20003 *s
= **p
->sym_ptr_ptr
;
20004 /* Undefined syms won't have BSF_LOCAL or BSF_GLOBAL set. Since
20005 we are defining a symbol, ensure one of them is set. */
20006 if ((s
->flags
& BSF_LOCAL
) == 0)
20007 s
->flags
|= BSF_GLOBAL
;
20008 s
->flags
|= BSF_SYNTHETIC
;
20013 len
= strlen ((*p
->sym_ptr_ptr
)->name
);
20014 memcpy (names
, (*p
->sym_ptr_ptr
)->name
, len
);
20016 if (p
->addend
!= 0)
20020 memcpy (names
, "+0x", sizeof ("+0x") - 1);
20021 names
+= sizeof ("+0x") - 1;
20022 bfd_sprintf_vma (abfd
, buf
, p
->addend
);
20023 for (a
= buf
; *a
== '0'; ++a
)
20026 memcpy (names
, a
, len
);
20029 memcpy (names
, "@plt", sizeof ("@plt"));
20030 names
+= sizeof ("@plt");
20032 offset
+= plt_size
;
20039 elf32_arm_section_flags (flagword
*flags
, const Elf_Internal_Shdr
* hdr
)
20041 if (hdr
->sh_flags
& SHF_ARM_PURECODE
)
20042 *flags
|= SEC_ELF_PURECODE
;
20047 elf32_arm_lookup_section_flags (char *flag_name
)
20049 if (!strcmp (flag_name
, "SHF_ARM_PURECODE"))
20050 return SHF_ARM_PURECODE
;
20052 return SEC_NO_FLAGS
;
20055 static unsigned int
20056 elf32_arm_count_additional_relocs (asection
*sec
)
20058 struct _arm_elf_section_data
*arm_data
;
20059 arm_data
= get_arm_elf_section_data (sec
);
20061 return arm_data
== NULL
? 0 : arm_data
->additional_reloc_count
;
20064 /* Called to set the sh_flags, sh_link and sh_info fields of OSECTION which
20065 has a type >= SHT_LOOS. Returns TRUE if these fields were initialised
20066 FALSE otherwise. ISECTION is the best guess matching section from the
20067 input bfd IBFD, but it might be NULL. */
20070 elf32_arm_copy_special_section_fields (const bfd
*ibfd ATTRIBUTE_UNUSED
,
20071 bfd
*obfd ATTRIBUTE_UNUSED
,
20072 const Elf_Internal_Shdr
*isection ATTRIBUTE_UNUSED
,
20073 Elf_Internal_Shdr
*osection
)
20075 switch (osection
->sh_type
)
20077 case SHT_ARM_EXIDX
:
20079 Elf_Internal_Shdr
**oheaders
= elf_elfsections (obfd
);
20080 Elf_Internal_Shdr
**iheaders
= elf_elfsections (ibfd
);
20083 osection
->sh_flags
= SHF_ALLOC
| SHF_LINK_ORDER
;
20084 osection
->sh_info
= 0;
20086 /* The sh_link field must be set to the text section associated with
20087 this index section. Unfortunately the ARM EHABI does not specify
20088 exactly how to determine this association. Our caller does try
20089 to match up OSECTION with its corresponding input section however
20090 so that is a good first guess. */
20091 if (isection
!= NULL
20092 && osection
->bfd_section
!= NULL
20093 && isection
->bfd_section
!= NULL
20094 && isection
->bfd_section
->output_section
!= NULL
20095 && isection
->bfd_section
->output_section
== osection
->bfd_section
20096 && iheaders
!= NULL
20097 && isection
->sh_link
> 0
20098 && isection
->sh_link
< elf_numsections (ibfd
)
20099 && iheaders
[isection
->sh_link
]->bfd_section
!= NULL
20100 && iheaders
[isection
->sh_link
]->bfd_section
->output_section
!= NULL
20103 for (i
= elf_numsections (obfd
); i
-- > 0;)
20104 if (oheaders
[i
]->bfd_section
20105 == iheaders
[isection
->sh_link
]->bfd_section
->output_section
)
20111 /* Failing that we have to find a matching section ourselves. If
20112 we had the output section name available we could compare that
20113 with input section names. Unfortunately we don't. So instead
20114 we use a simple heuristic and look for the nearest executable
20115 section before this one. */
20116 for (i
= elf_numsections (obfd
); i
-- > 0;)
20117 if (oheaders
[i
] == osection
)
20123 if (oheaders
[i
]->sh_type
== SHT_PROGBITS
20124 && (oheaders
[i
]->sh_flags
& (SHF_ALLOC
| SHF_EXECINSTR
))
20125 == (SHF_ALLOC
| SHF_EXECINSTR
))
20131 osection
->sh_link
= i
;
20132 /* If the text section was part of a group
20133 then the index section should be too. */
20134 if (oheaders
[i
]->sh_flags
& SHF_GROUP
)
20135 osection
->sh_flags
|= SHF_GROUP
;
20141 case SHT_ARM_PREEMPTMAP
:
20142 osection
->sh_flags
= SHF_ALLOC
;
20145 case SHT_ARM_ATTRIBUTES
:
20146 case SHT_ARM_DEBUGOVERLAY
:
20147 case SHT_ARM_OVERLAYSECTION
:
20155 /* Returns TRUE if NAME is an ARM mapping symbol.
20156 Traditionally the symbols $a, $d and $t have been used.
20157 The ARM ELF standard also defines $x (for A64 code). It also allows a
20158 period initiated suffix to be added to the symbol: "$[adtx]\.[:sym_char]+".
20159 Other tools might also produce $b (Thumb BL), $f, $p, $m and $v, but we do
20160 not support them here. $t.x indicates the start of ThumbEE instructions. */
20163 is_arm_mapping_symbol (const char * name
)
20165 return name
!= NULL
/* Paranoia. */
20166 && name
[0] == '$' /* Note: if objcopy --prefix-symbols has been used then
20167 the mapping symbols could have acquired a prefix.
20168 We do not support this here, since such symbols no
20169 longer conform to the ARM ELF ABI. */
20170 && (name
[1] == 'a' || name
[1] == 'd' || name
[1] == 't' || name
[1] == 'x')
20171 && (name
[2] == 0 || name
[2] == '.');
20172 /* FIXME: Strictly speaking the symbol is only a valid mapping symbol if
20173 any characters that follow the period are legal characters for the body
20174 of a symbol's name. For now we just assume that this is the case. */
20177 /* Make sure that mapping symbols in object files are not removed via the
20178 "strip --strip-unneeded" tool. These symbols are needed in order to
20179 correctly generate interworking veneers, and for byte swapping code
20180 regions. Once an object file has been linked, it is safe to remove the
20181 symbols as they will no longer be needed. */
20184 elf32_arm_backend_symbol_processing (bfd
*abfd
, asymbol
*sym
)
20186 if (((abfd
->flags
& (EXEC_P
| DYNAMIC
)) == 0)
20187 && sym
->section
!= bfd_abs_section_ptr
20188 && is_arm_mapping_symbol (sym
->name
))
20189 sym
->flags
|= BSF_KEEP
;
20192 #undef elf_backend_copy_special_section_fields
20193 #define elf_backend_copy_special_section_fields elf32_arm_copy_special_section_fields
20195 #define ELF_ARCH bfd_arch_arm
20196 #define ELF_TARGET_ID ARM_ELF_DATA
20197 #define ELF_MACHINE_CODE EM_ARM
20198 #ifdef __QNXTARGET__
20199 #define ELF_MAXPAGESIZE 0x1000
20201 #define ELF_MAXPAGESIZE 0x10000
20203 #define ELF_MINPAGESIZE 0x1000
20204 #define ELF_COMMONPAGESIZE 0x1000
20206 #define bfd_elf32_mkobject elf32_arm_mkobject
20208 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
20209 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
20210 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
20211 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
20212 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
20213 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
20214 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
20215 #define bfd_elf32_find_nearest_line elf32_arm_find_nearest_line
20216 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
20217 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
20218 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
20219 #define bfd_elf32_bfd_final_link elf32_arm_final_link
20220 #define bfd_elf32_get_synthetic_symtab elf32_arm_get_synthetic_symtab
20222 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
20223 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
20224 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
20225 #define elf_backend_check_relocs elf32_arm_check_relocs
20226 #define elf_backend_update_relocs elf32_arm_update_relocs
20227 #define elf_backend_relocate_section elf32_arm_relocate_section
20228 #define elf_backend_write_section elf32_arm_write_section
20229 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
20230 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
20231 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
20232 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
20233 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
20234 #define elf_backend_always_size_sections elf32_arm_always_size_sections
20235 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
20236 #define elf_backend_post_process_headers elf32_arm_post_process_headers
20237 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
20238 #define elf_backend_object_p elf32_arm_object_p
20239 #define elf_backend_fake_sections elf32_arm_fake_sections
20240 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
20241 #define elf_backend_final_write_processing elf32_arm_final_write_processing
20242 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
20243 #define elf_backend_size_info elf32_arm_size_info
20244 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
20245 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
20246 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
20247 #define elf_backend_filter_implib_symbols elf32_arm_filter_implib_symbols
20248 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
20249 #define elf_backend_add_symbol_hook elf32_arm_add_symbol_hook
20250 #define elf_backend_count_additional_relocs elf32_arm_count_additional_relocs
20251 #define elf_backend_symbol_processing elf32_arm_backend_symbol_processing
20253 #define elf_backend_can_refcount 1
20254 #define elf_backend_can_gc_sections 1
20255 #define elf_backend_plt_readonly 1
20256 #define elf_backend_want_got_plt 1
20257 #define elf_backend_want_plt_sym 0
20258 #define elf_backend_want_dynrelro 1
20259 #define elf_backend_may_use_rel_p 1
20260 #define elf_backend_may_use_rela_p 0
20261 #define elf_backend_default_use_rela_p 0
20262 #define elf_backend_dtrel_excludes_plt 1
20264 #define elf_backend_got_header_size 12
20265 #define elf_backend_extern_protected_data 1
20267 #undef elf_backend_obj_attrs_vendor
20268 #define elf_backend_obj_attrs_vendor "aeabi"
20269 #undef elf_backend_obj_attrs_section
20270 #define elf_backend_obj_attrs_section ".ARM.attributes"
20271 #undef elf_backend_obj_attrs_arg_type
20272 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
20273 #undef elf_backend_obj_attrs_section_type
20274 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
20275 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
20276 #define elf_backend_obj_attrs_handle_unknown elf32_arm_obj_attrs_handle_unknown
20278 #undef elf_backend_section_flags
20279 #define elf_backend_section_flags elf32_arm_section_flags
20280 #undef elf_backend_lookup_section_flags_hook
20281 #define elf_backend_lookup_section_flags_hook elf32_arm_lookup_section_flags
20283 #define elf_backend_linux_prpsinfo32_ugid16 TRUE
20285 #include "elf32-target.h"
20287 /* Native Client targets. */
20289 #undef TARGET_LITTLE_SYM
20290 #define TARGET_LITTLE_SYM arm_elf32_nacl_le_vec
20291 #undef TARGET_LITTLE_NAME
20292 #define TARGET_LITTLE_NAME "elf32-littlearm-nacl"
20293 #undef TARGET_BIG_SYM
20294 #define TARGET_BIG_SYM arm_elf32_nacl_be_vec
20295 #undef TARGET_BIG_NAME
20296 #define TARGET_BIG_NAME "elf32-bigarm-nacl"
20298 /* Like elf32_arm_link_hash_table_create -- but overrides
20299 appropriately for NaCl. */
20301 static struct bfd_link_hash_table
*
20302 elf32_arm_nacl_link_hash_table_create (bfd
*abfd
)
20304 struct bfd_link_hash_table
*ret
;
20306 ret
= elf32_arm_link_hash_table_create (abfd
);
20309 struct elf32_arm_link_hash_table
*htab
20310 = (struct elf32_arm_link_hash_table
*) ret
;
20314 htab
->plt_header_size
= 4 * ARRAY_SIZE (elf32_arm_nacl_plt0_entry
);
20315 htab
->plt_entry_size
= 4 * ARRAY_SIZE (elf32_arm_nacl_plt_entry
);
20320 /* Since NaCl doesn't use the ARM-specific unwind format, we don't
20321 really need to use elf32_arm_modify_segment_map. But we do it
20322 anyway just to reduce gratuitous differences with the stock ARM backend. */
20325 elf32_arm_nacl_modify_segment_map (bfd
*abfd
, struct bfd_link_info
*info
)
20327 return (elf32_arm_modify_segment_map (abfd
, info
)
20328 && nacl_modify_segment_map (abfd
, info
));
20332 elf32_arm_nacl_final_write_processing (bfd
*abfd
, bfd_boolean linker
)
20334 elf32_arm_final_write_processing (abfd
, linker
);
20335 nacl_final_write_processing (abfd
, linker
);
20339 elf32_arm_nacl_plt_sym_val (bfd_vma i
, const asection
*plt
,
20340 const arelent
*rel ATTRIBUTE_UNUSED
)
20343 + 4 * (ARRAY_SIZE (elf32_arm_nacl_plt0_entry
) +
20344 i
* ARRAY_SIZE (elf32_arm_nacl_plt_entry
));
20348 #define elf32_bed elf32_arm_nacl_bed
20349 #undef bfd_elf32_bfd_link_hash_table_create
20350 #define bfd_elf32_bfd_link_hash_table_create \
20351 elf32_arm_nacl_link_hash_table_create
20352 #undef elf_backend_plt_alignment
20353 #define elf_backend_plt_alignment 4
20354 #undef elf_backend_modify_segment_map
20355 #define elf_backend_modify_segment_map elf32_arm_nacl_modify_segment_map
20356 #undef elf_backend_modify_program_headers
20357 #define elf_backend_modify_program_headers nacl_modify_program_headers
20358 #undef elf_backend_final_write_processing
20359 #define elf_backend_final_write_processing elf32_arm_nacl_final_write_processing
20360 #undef bfd_elf32_get_synthetic_symtab
20361 #undef elf_backend_plt_sym_val
20362 #define elf_backend_plt_sym_val elf32_arm_nacl_plt_sym_val
20363 #undef elf_backend_copy_special_section_fields
20365 #undef ELF_MINPAGESIZE
20366 #undef ELF_COMMONPAGESIZE
20369 #include "elf32-target.h"
20371 /* Reset to defaults. */
20372 #undef elf_backend_plt_alignment
20373 #undef elf_backend_modify_segment_map
20374 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
20375 #undef elf_backend_modify_program_headers
20376 #undef elf_backend_final_write_processing
20377 #define elf_backend_final_write_processing elf32_arm_final_write_processing
20378 #undef ELF_MINPAGESIZE
20379 #define ELF_MINPAGESIZE 0x1000
20380 #undef ELF_COMMONPAGESIZE
20381 #define ELF_COMMONPAGESIZE 0x1000
20384 /* FDPIC Targets. */
20386 #undef TARGET_LITTLE_SYM
20387 #define TARGET_LITTLE_SYM arm_elf32_fdpic_le_vec
20388 #undef TARGET_LITTLE_NAME
20389 #define TARGET_LITTLE_NAME "elf32-littlearm-fdpic"
20390 #undef TARGET_BIG_SYM
20391 #define TARGET_BIG_SYM arm_elf32_fdpic_be_vec
20392 #undef TARGET_BIG_NAME
20393 #define TARGET_BIG_NAME "elf32-bigarm-fdpic"
20394 #undef elf_match_priority
20395 #define elf_match_priority 128
20397 #define ELF_OSABI ELFOSABI_ARM_FDPIC
20399 /* Like elf32_arm_link_hash_table_create -- but overrides
20400 appropriately for FDPIC. */
20402 static struct bfd_link_hash_table
*
20403 elf32_arm_fdpic_link_hash_table_create (bfd
*abfd
)
20405 struct bfd_link_hash_table
*ret
;
20407 ret
= elf32_arm_link_hash_table_create (abfd
);
20410 struct elf32_arm_link_hash_table
*htab
= (struct elf32_arm_link_hash_table
*) ret
;
20417 /* We need dynamic symbols for every section, since segments can
20418 relocate independently. */
20420 elf32_arm_fdpic_omit_section_dynsym (bfd
*output_bfd ATTRIBUTE_UNUSED
,
20421 struct bfd_link_info
*info
20423 asection
*p ATTRIBUTE_UNUSED
)
20425 switch (elf_section_data (p
)->this_hdr
.sh_type
)
20429 /* If sh_type is yet undecided, assume it could be
20430 SHT_PROGBITS/SHT_NOBITS. */
20434 /* There shouldn't be section relative relocations
20435 against any other section. */
20442 #define elf32_bed elf32_arm_fdpic_bed
20444 #undef bfd_elf32_bfd_link_hash_table_create
20445 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_fdpic_link_hash_table_create
20447 #undef elf_backend_omit_section_dynsym
20448 #define elf_backend_omit_section_dynsym elf32_arm_fdpic_omit_section_dynsym
20450 #include "elf32-target.h"
20452 #undef elf_match_priority
20454 #undef elf_backend_omit_section_dynsym
20456 /* VxWorks Targets. */
20458 #undef TARGET_LITTLE_SYM
20459 #define TARGET_LITTLE_SYM arm_elf32_vxworks_le_vec
20460 #undef TARGET_LITTLE_NAME
20461 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
20462 #undef TARGET_BIG_SYM
20463 #define TARGET_BIG_SYM arm_elf32_vxworks_be_vec
20464 #undef TARGET_BIG_NAME
20465 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
20467 /* Like elf32_arm_link_hash_table_create -- but overrides
20468 appropriately for VxWorks. */
20470 static struct bfd_link_hash_table
*
20471 elf32_arm_vxworks_link_hash_table_create (bfd
*abfd
)
20473 struct bfd_link_hash_table
*ret
;
20475 ret
= elf32_arm_link_hash_table_create (abfd
);
20478 struct elf32_arm_link_hash_table
*htab
20479 = (struct elf32_arm_link_hash_table
*) ret
;
20481 htab
->vxworks_p
= 1;
20487 elf32_arm_vxworks_final_write_processing (bfd
*abfd
, bfd_boolean linker
)
20489 elf32_arm_final_write_processing (abfd
, linker
);
20490 elf_vxworks_final_write_processing (abfd
, linker
);
20494 #define elf32_bed elf32_arm_vxworks_bed
20496 #undef bfd_elf32_bfd_link_hash_table_create
20497 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
20498 #undef elf_backend_final_write_processing
20499 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
20500 #undef elf_backend_emit_relocs
20501 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
20503 #undef elf_backend_may_use_rel_p
20504 #define elf_backend_may_use_rel_p 0
20505 #undef elf_backend_may_use_rela_p
20506 #define elf_backend_may_use_rela_p 1
20507 #undef elf_backend_default_use_rela_p
20508 #define elf_backend_default_use_rela_p 1
20509 #undef elf_backend_want_plt_sym
20510 #define elf_backend_want_plt_sym 1
20511 #undef ELF_MAXPAGESIZE
20512 #define ELF_MAXPAGESIZE 0x1000
20514 #include "elf32-target.h"
20517 /* Merge backend specific data from an object file to the output
20518 object file when linking. */
20521 elf32_arm_merge_private_bfd_data (bfd
*ibfd
, struct bfd_link_info
*info
)
20523 bfd
*obfd
= info
->output_bfd
;
20524 flagword out_flags
;
20526 bfd_boolean flags_compatible
= TRUE
;
20529 /* Check if we have the same endianness. */
20530 if (! _bfd_generic_verify_endian_match (ibfd
, info
))
20533 if (! is_arm_elf (ibfd
) || ! is_arm_elf (obfd
))
20536 if (!elf32_arm_merge_eabi_attributes (ibfd
, info
))
20539 /* The input BFD must have had its flags initialised. */
20540 /* The following seems bogus to me -- The flags are initialized in
20541 the assembler but I don't think an elf_flags_init field is
20542 written into the object. */
20543 /* BFD_ASSERT (elf_flags_init (ibfd)); */
20545 in_flags
= elf_elfheader (ibfd
)->e_flags
;
20546 out_flags
= elf_elfheader (obfd
)->e_flags
;
20548 /* In theory there is no reason why we couldn't handle this. However
20549 in practice it isn't even close to working and there is no real
20550 reason to want it. */
20551 if (EF_ARM_EABI_VERSION (in_flags
) >= EF_ARM_EABI_VER4
20552 && !(ibfd
->flags
& DYNAMIC
)
20553 && (in_flags
& EF_ARM_BE8
))
20555 _bfd_error_handler (_("error: %pB is already in final BE8 format"),
20560 if (!elf_flags_init (obfd
))
20562 /* If the input is the default architecture and had the default
20563 flags then do not bother setting the flags for the output
20564 architecture, instead allow future merges to do this. If no
20565 future merges ever set these flags then they will retain their
20566 uninitialised values, which surprise surprise, correspond
20567 to the default values. */
20568 if (bfd_get_arch_info (ibfd
)->the_default
20569 && elf_elfheader (ibfd
)->e_flags
== 0)
20572 elf_flags_init (obfd
) = TRUE
;
20573 elf_elfheader (obfd
)->e_flags
= in_flags
;
20575 if (bfd_get_arch (obfd
) == bfd_get_arch (ibfd
)
20576 && bfd_get_arch_info (obfd
)->the_default
)
20577 return bfd_set_arch_mach (obfd
, bfd_get_arch (ibfd
), bfd_get_mach (ibfd
));
20582 /* Determine what should happen if the input ARM architecture
20583 does not match the output ARM architecture. */
20584 if (! bfd_arm_merge_machines (ibfd
, obfd
))
20587 /* Identical flags must be compatible. */
20588 if (in_flags
== out_flags
)
20591 /* Check to see if the input BFD actually contains any sections. If
20592 not, its flags may not have been initialised either, but it
20593 cannot actually cause any incompatiblity. Do not short-circuit
20594 dynamic objects; their section list may be emptied by
20595 elf_link_add_object_symbols.
20597 Also check to see if there are no code sections in the input.
20598 In this case there is no need to check for code specific flags.
20599 XXX - do we need to worry about floating-point format compatability
20600 in data sections ? */
20601 if (!(ibfd
->flags
& DYNAMIC
))
20603 bfd_boolean null_input_bfd
= TRUE
;
20604 bfd_boolean only_data_sections
= TRUE
;
20606 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
20608 /* Ignore synthetic glue sections. */
20609 if (strcmp (sec
->name
, ".glue_7")
20610 && strcmp (sec
->name
, ".glue_7t"))
20612 if ((bfd_get_section_flags (ibfd
, sec
)
20613 & (SEC_LOAD
| SEC_CODE
| SEC_HAS_CONTENTS
))
20614 == (SEC_LOAD
| SEC_CODE
| SEC_HAS_CONTENTS
))
20615 only_data_sections
= FALSE
;
20617 null_input_bfd
= FALSE
;
20622 if (null_input_bfd
|| only_data_sections
)
20626 /* Complain about various flag mismatches. */
20627 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags
),
20628 EF_ARM_EABI_VERSION (out_flags
)))
20631 (_("error: source object %pB has EABI version %d, but target %pB has EABI version %d"),
20632 ibfd
, (in_flags
& EF_ARM_EABIMASK
) >> 24,
20633 obfd
, (out_flags
& EF_ARM_EABIMASK
) >> 24);
20637 /* Not sure what needs to be checked for EABI versions >= 1. */
20638 /* VxWorks libraries do not use these flags. */
20639 if (get_elf_backend_data (obfd
) != &elf32_arm_vxworks_bed
20640 && get_elf_backend_data (ibfd
) != &elf32_arm_vxworks_bed
20641 && EF_ARM_EABI_VERSION (in_flags
) == EF_ARM_EABI_UNKNOWN
)
20643 if ((in_flags
& EF_ARM_APCS_26
) != (out_flags
& EF_ARM_APCS_26
))
20646 (_("error: %pB is compiled for APCS-%d, whereas target %pB uses APCS-%d"),
20647 ibfd
, in_flags
& EF_ARM_APCS_26
? 26 : 32,
20648 obfd
, out_flags
& EF_ARM_APCS_26
? 26 : 32);
20649 flags_compatible
= FALSE
;
20652 if ((in_flags
& EF_ARM_APCS_FLOAT
) != (out_flags
& EF_ARM_APCS_FLOAT
))
20654 if (in_flags
& EF_ARM_APCS_FLOAT
)
20656 (_("error: %pB passes floats in float registers, whereas %pB passes them in integer registers"),
20660 (_("error: %pB passes floats in integer registers, whereas %pB passes them in float registers"),
20663 flags_compatible
= FALSE
;
20666 if ((in_flags
& EF_ARM_VFP_FLOAT
) != (out_flags
& EF_ARM_VFP_FLOAT
))
20668 if (in_flags
& EF_ARM_VFP_FLOAT
)
20670 (_("error: %pB uses %s instructions, whereas %pB does not"),
20671 ibfd
, "VFP", obfd
);
20674 (_("error: %pB uses %s instructions, whereas %pB does not"),
20675 ibfd
, "FPA", obfd
);
20677 flags_compatible
= FALSE
;
20680 if ((in_flags
& EF_ARM_MAVERICK_FLOAT
) != (out_flags
& EF_ARM_MAVERICK_FLOAT
))
20682 if (in_flags
& EF_ARM_MAVERICK_FLOAT
)
20684 (_("error: %pB uses %s instructions, whereas %pB does not"),
20685 ibfd
, "Maverick", obfd
);
20688 (_("error: %pB does not use %s instructions, whereas %pB does"),
20689 ibfd
, "Maverick", obfd
);
20691 flags_compatible
= FALSE
;
20694 #ifdef EF_ARM_SOFT_FLOAT
20695 if ((in_flags
& EF_ARM_SOFT_FLOAT
) != (out_flags
& EF_ARM_SOFT_FLOAT
))
20697 /* We can allow interworking between code that is VFP format
20698 layout, and uses either soft float or integer regs for
20699 passing floating point arguments and results. We already
20700 know that the APCS_FLOAT flags match; similarly for VFP
20702 if ((in_flags
& EF_ARM_APCS_FLOAT
) != 0
20703 || (in_flags
& EF_ARM_VFP_FLOAT
) == 0)
20705 if (in_flags
& EF_ARM_SOFT_FLOAT
)
20707 (_("error: %pB uses software FP, whereas %pB uses hardware FP"),
20711 (_("error: %pB uses hardware FP, whereas %pB uses software FP"),
20714 flags_compatible
= FALSE
;
20719 /* Interworking mismatch is only a warning. */
20720 if ((in_flags
& EF_ARM_INTERWORK
) != (out_flags
& EF_ARM_INTERWORK
))
20722 if (in_flags
& EF_ARM_INTERWORK
)
20725 (_("warning: %pB supports interworking, whereas %pB does not"),
20731 (_("warning: %pB does not support interworking, whereas %pB does"),
20737 return flags_compatible
;
20741 /* Symbian OS Targets. */
20743 #undef TARGET_LITTLE_SYM
20744 #define TARGET_LITTLE_SYM arm_elf32_symbian_le_vec
20745 #undef TARGET_LITTLE_NAME
20746 #define TARGET_LITTLE_NAME "elf32-littlearm-symbian"
20747 #undef TARGET_BIG_SYM
20748 #define TARGET_BIG_SYM arm_elf32_symbian_be_vec
20749 #undef TARGET_BIG_NAME
20750 #define TARGET_BIG_NAME "elf32-bigarm-symbian"
20752 /* Like elf32_arm_link_hash_table_create -- but overrides
20753 appropriately for Symbian OS. */
20755 static struct bfd_link_hash_table
*
20756 elf32_arm_symbian_link_hash_table_create (bfd
*abfd
)
20758 struct bfd_link_hash_table
*ret
;
20760 ret
= elf32_arm_link_hash_table_create (abfd
);
20763 struct elf32_arm_link_hash_table
*htab
20764 = (struct elf32_arm_link_hash_table
*)ret
;
20765 /* There is no PLT header for Symbian OS. */
20766 htab
->plt_header_size
= 0;
20767 /* The PLT entries are each one instruction and one word. */
20768 htab
->plt_entry_size
= 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry
);
20769 htab
->symbian_p
= 1;
20770 /* Symbian uses armv5t or above, so use_blx is always true. */
20772 htab
->root
.is_relocatable_executable
= 1;
20777 static const struct bfd_elf_special_section
20778 elf32_arm_symbian_special_sections
[] =
20780 /* In a BPABI executable, the dynamic linking sections do not go in
20781 the loadable read-only segment. The post-linker may wish to
20782 refer to these sections, but they are not part of the final
20784 { STRING_COMMA_LEN (".dynamic"), 0, SHT_DYNAMIC
, 0 },
20785 { STRING_COMMA_LEN (".dynstr"), 0, SHT_STRTAB
, 0 },
20786 { STRING_COMMA_LEN (".dynsym"), 0, SHT_DYNSYM
, 0 },
20787 { STRING_COMMA_LEN (".got"), 0, SHT_PROGBITS
, 0 },
20788 { STRING_COMMA_LEN (".hash"), 0, SHT_HASH
, 0 },
20789 /* These sections do not need to be writable as the SymbianOS
20790 postlinker will arrange things so that no dynamic relocation is
20792 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY
, SHF_ALLOC
},
20793 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY
, SHF_ALLOC
},
20794 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY
, SHF_ALLOC
},
20795 { NULL
, 0, 0, 0, 0 }
20799 elf32_arm_symbian_begin_write_processing (bfd
*abfd
,
20800 struct bfd_link_info
*link_info
)
20802 /* BPABI objects are never loaded directly by an OS kernel; they are
20803 processed by a postlinker first, into an OS-specific format. If
20804 the D_PAGED bit is set on the file, BFD will align segments on
20805 page boundaries, so that an OS can directly map the file. With
20806 BPABI objects, that just results in wasted space. In addition,
20807 because we clear the D_PAGED bit, map_sections_to_segments will
20808 recognize that the program headers should not be mapped into any
20809 loadable segment. */
20810 abfd
->flags
&= ~D_PAGED
;
20811 elf32_arm_begin_write_processing (abfd
, link_info
);
20815 elf32_arm_symbian_modify_segment_map (bfd
*abfd
,
20816 struct bfd_link_info
*info
)
20818 struct elf_segment_map
*m
;
20821 /* BPABI shared libraries and executables should have a PT_DYNAMIC
20822 segment. However, because the .dynamic section is not marked
20823 with SEC_LOAD, the generic ELF code will not create such a
20825 dynsec
= bfd_get_section_by_name (abfd
, ".dynamic");
20828 for (m
= elf_seg_map (abfd
); m
!= NULL
; m
= m
->next
)
20829 if (m
->p_type
== PT_DYNAMIC
)
20834 m
= _bfd_elf_make_dynamic_segment (abfd
, dynsec
);
20835 m
->next
= elf_seg_map (abfd
);
20836 elf_seg_map (abfd
) = m
;
20840 /* Also call the generic arm routine. */
20841 return elf32_arm_modify_segment_map (abfd
, info
);
20844 /* Return address for Ith PLT stub in section PLT, for relocation REL
20845 or (bfd_vma) -1 if it should not be included. */
20848 elf32_arm_symbian_plt_sym_val (bfd_vma i
, const asection
*plt
,
20849 const arelent
*rel ATTRIBUTE_UNUSED
)
20851 return plt
->vma
+ 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry
) * i
;
20855 #define elf32_bed elf32_arm_symbian_bed
20857 /* The dynamic sections are not allocated on SymbianOS; the postlinker
20858 will process them and then discard them. */
20859 #undef ELF_DYNAMIC_SEC_FLAGS
20860 #define ELF_DYNAMIC_SEC_FLAGS \
20861 (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
20863 #undef elf_backend_emit_relocs
20865 #undef bfd_elf32_bfd_link_hash_table_create
20866 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_symbian_link_hash_table_create
20867 #undef elf_backend_special_sections
20868 #define elf_backend_special_sections elf32_arm_symbian_special_sections
20869 #undef elf_backend_begin_write_processing
20870 #define elf_backend_begin_write_processing elf32_arm_symbian_begin_write_processing
20871 #undef elf_backend_final_write_processing
20872 #define elf_backend_final_write_processing elf32_arm_final_write_processing
20874 #undef elf_backend_modify_segment_map
20875 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map
20877 /* There is no .got section for BPABI objects, and hence no header. */
20878 #undef elf_backend_got_header_size
20879 #define elf_backend_got_header_size 0
20881 /* Similarly, there is no .got.plt section. */
20882 #undef elf_backend_want_got_plt
20883 #define elf_backend_want_got_plt 0
20885 #undef elf_backend_plt_sym_val
20886 #define elf_backend_plt_sym_val elf32_arm_symbian_plt_sym_val
20888 #undef elf_backend_may_use_rel_p
20889 #define elf_backend_may_use_rel_p 1
20890 #undef elf_backend_may_use_rela_p
20891 #define elf_backend_may_use_rela_p 0
20892 #undef elf_backend_default_use_rela_p
20893 #define elf_backend_default_use_rela_p 0
20894 #undef elf_backend_want_plt_sym
20895 #define elf_backend_want_plt_sym 0
20896 #undef elf_backend_dtrel_excludes_plt
20897 #define elf_backend_dtrel_excludes_plt 0
20898 #undef ELF_MAXPAGESIZE
20899 #define ELF_MAXPAGESIZE 0x8000
20901 #include "elf32-target.h"