1 /* 32-bit ELF support for ARM
2 Copyright (C) 1998-2019 Free Software Foundation, Inc.
4 This file is part of BFD, the Binary File Descriptor library.
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
19 MA 02110-1301, USA. */
25 #include "libiberty.h"
29 #include "elf-vxworks.h"
31 #include "elf32-arm.h"
34 /* Return the relocation section associated with NAME. HTAB is the
35 bfd's elf32_arm_link_hash_entry. */
36 #define RELOC_SECTION(HTAB, NAME) \
37 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
39 /* Return size of a relocation entry. HTAB is the bfd's
40 elf32_arm_link_hash_entry. */
41 #define RELOC_SIZE(HTAB) \
43 ? sizeof (Elf32_External_Rel) \
44 : sizeof (Elf32_External_Rela))
46 /* Return function to swap relocations in. HTAB is the bfd's
47 elf32_arm_link_hash_entry. */
48 #define SWAP_RELOC_IN(HTAB) \
50 ? bfd_elf32_swap_reloc_in \
51 : bfd_elf32_swap_reloca_in)
53 /* Return function to swap relocations out. HTAB is the bfd's
54 elf32_arm_link_hash_entry. */
55 #define SWAP_RELOC_OUT(HTAB) \
57 ? bfd_elf32_swap_reloc_out \
58 : bfd_elf32_swap_reloca_out)
60 #define elf_info_to_howto NULL
61 #define elf_info_to_howto_rel elf32_arm_info_to_howto
63 #define ARM_ELF_ABI_VERSION 0
64 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
66 /* The Adjusted Place, as defined by AAELF. */
67 #define Pa(X) ((X) & 0xfffffffc)
69 static bfd_boolean
elf32_arm_write_section (bfd
*output_bfd
,
70 struct bfd_link_info
*link_info
,
74 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
75 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
78 static reloc_howto_type elf32_arm_howto_table_1
[] =
81 HOWTO (R_ARM_NONE
, /* type */
83 3, /* size (0 = byte, 1 = short, 2 = long) */
85 FALSE
, /* pc_relative */
87 complain_overflow_dont
,/* complain_on_overflow */
88 bfd_elf_generic_reloc
, /* special_function */
89 "R_ARM_NONE", /* name */
90 FALSE
, /* partial_inplace */
93 FALSE
), /* pcrel_offset */
95 HOWTO (R_ARM_PC24
, /* type */
97 2, /* size (0 = byte, 1 = short, 2 = long) */
99 TRUE
, /* pc_relative */
101 complain_overflow_signed
,/* complain_on_overflow */
102 bfd_elf_generic_reloc
, /* special_function */
103 "R_ARM_PC24", /* name */
104 FALSE
, /* partial_inplace */
105 0x00ffffff, /* src_mask */
106 0x00ffffff, /* dst_mask */
107 TRUE
), /* pcrel_offset */
109 /* 32 bit absolute */
110 HOWTO (R_ARM_ABS32
, /* type */
112 2, /* size (0 = byte, 1 = short, 2 = long) */
114 FALSE
, /* pc_relative */
116 complain_overflow_bitfield
,/* complain_on_overflow */
117 bfd_elf_generic_reloc
, /* special_function */
118 "R_ARM_ABS32", /* name */
119 FALSE
, /* partial_inplace */
120 0xffffffff, /* src_mask */
121 0xffffffff, /* dst_mask */
122 FALSE
), /* pcrel_offset */
124 /* standard 32bit pc-relative reloc */
125 HOWTO (R_ARM_REL32
, /* type */
127 2, /* size (0 = byte, 1 = short, 2 = long) */
129 TRUE
, /* pc_relative */
131 complain_overflow_bitfield
,/* complain_on_overflow */
132 bfd_elf_generic_reloc
, /* special_function */
133 "R_ARM_REL32", /* name */
134 FALSE
, /* partial_inplace */
135 0xffffffff, /* src_mask */
136 0xffffffff, /* dst_mask */
137 TRUE
), /* pcrel_offset */
139 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
140 HOWTO (R_ARM_LDR_PC_G0
, /* type */
142 0, /* size (0 = byte, 1 = short, 2 = long) */
144 TRUE
, /* pc_relative */
146 complain_overflow_dont
,/* complain_on_overflow */
147 bfd_elf_generic_reloc
, /* special_function */
148 "R_ARM_LDR_PC_G0", /* name */
149 FALSE
, /* partial_inplace */
150 0xffffffff, /* src_mask */
151 0xffffffff, /* dst_mask */
152 TRUE
), /* pcrel_offset */
154 /* 16 bit absolute */
155 HOWTO (R_ARM_ABS16
, /* type */
157 1, /* size (0 = byte, 1 = short, 2 = long) */
159 FALSE
, /* pc_relative */
161 complain_overflow_bitfield
,/* complain_on_overflow */
162 bfd_elf_generic_reloc
, /* special_function */
163 "R_ARM_ABS16", /* name */
164 FALSE
, /* partial_inplace */
165 0x0000ffff, /* src_mask */
166 0x0000ffff, /* dst_mask */
167 FALSE
), /* pcrel_offset */
169 /* 12 bit absolute */
170 HOWTO (R_ARM_ABS12
, /* type */
172 2, /* size (0 = byte, 1 = short, 2 = long) */
174 FALSE
, /* pc_relative */
176 complain_overflow_bitfield
,/* complain_on_overflow */
177 bfd_elf_generic_reloc
, /* special_function */
178 "R_ARM_ABS12", /* name */
179 FALSE
, /* partial_inplace */
180 0x00000fff, /* src_mask */
181 0x00000fff, /* dst_mask */
182 FALSE
), /* pcrel_offset */
184 HOWTO (R_ARM_THM_ABS5
, /* type */
186 1, /* size (0 = byte, 1 = short, 2 = long) */
188 FALSE
, /* pc_relative */
190 complain_overflow_bitfield
,/* complain_on_overflow */
191 bfd_elf_generic_reloc
, /* special_function */
192 "R_ARM_THM_ABS5", /* name */
193 FALSE
, /* partial_inplace */
194 0x000007e0, /* src_mask */
195 0x000007e0, /* dst_mask */
196 FALSE
), /* pcrel_offset */
199 HOWTO (R_ARM_ABS8
, /* type */
201 0, /* size (0 = byte, 1 = short, 2 = long) */
203 FALSE
, /* pc_relative */
205 complain_overflow_bitfield
,/* complain_on_overflow */
206 bfd_elf_generic_reloc
, /* special_function */
207 "R_ARM_ABS8", /* name */
208 FALSE
, /* partial_inplace */
209 0x000000ff, /* src_mask */
210 0x000000ff, /* dst_mask */
211 FALSE
), /* pcrel_offset */
213 HOWTO (R_ARM_SBREL32
, /* type */
215 2, /* size (0 = byte, 1 = short, 2 = long) */
217 FALSE
, /* pc_relative */
219 complain_overflow_dont
,/* complain_on_overflow */
220 bfd_elf_generic_reloc
, /* special_function */
221 "R_ARM_SBREL32", /* name */
222 FALSE
, /* partial_inplace */
223 0xffffffff, /* src_mask */
224 0xffffffff, /* dst_mask */
225 FALSE
), /* pcrel_offset */
227 HOWTO (R_ARM_THM_CALL
, /* type */
229 2, /* size (0 = byte, 1 = short, 2 = long) */
231 TRUE
, /* pc_relative */
233 complain_overflow_signed
,/* complain_on_overflow */
234 bfd_elf_generic_reloc
, /* special_function */
235 "R_ARM_THM_CALL", /* name */
236 FALSE
, /* partial_inplace */
237 0x07ff2fff, /* src_mask */
238 0x07ff2fff, /* dst_mask */
239 TRUE
), /* pcrel_offset */
241 HOWTO (R_ARM_THM_PC8
, /* type */
243 1, /* size (0 = byte, 1 = short, 2 = long) */
245 TRUE
, /* pc_relative */
247 complain_overflow_signed
,/* complain_on_overflow */
248 bfd_elf_generic_reloc
, /* special_function */
249 "R_ARM_THM_PC8", /* name */
250 FALSE
, /* partial_inplace */
251 0x000000ff, /* src_mask */
252 0x000000ff, /* dst_mask */
253 TRUE
), /* pcrel_offset */
255 HOWTO (R_ARM_BREL_ADJ
, /* type */
257 1, /* size (0 = byte, 1 = short, 2 = long) */
259 FALSE
, /* pc_relative */
261 complain_overflow_signed
,/* complain_on_overflow */
262 bfd_elf_generic_reloc
, /* special_function */
263 "R_ARM_BREL_ADJ", /* name */
264 FALSE
, /* partial_inplace */
265 0xffffffff, /* src_mask */
266 0xffffffff, /* dst_mask */
267 FALSE
), /* pcrel_offset */
269 HOWTO (R_ARM_TLS_DESC
, /* type */
271 2, /* size (0 = byte, 1 = short, 2 = long) */
273 FALSE
, /* pc_relative */
275 complain_overflow_bitfield
,/* complain_on_overflow */
276 bfd_elf_generic_reloc
, /* special_function */
277 "R_ARM_TLS_DESC", /* name */
278 FALSE
, /* partial_inplace */
279 0xffffffff, /* src_mask */
280 0xffffffff, /* dst_mask */
281 FALSE
), /* pcrel_offset */
283 HOWTO (R_ARM_THM_SWI8
, /* type */
285 0, /* size (0 = byte, 1 = short, 2 = long) */
287 FALSE
, /* pc_relative */
289 complain_overflow_signed
,/* complain_on_overflow */
290 bfd_elf_generic_reloc
, /* special_function */
291 "R_ARM_SWI8", /* name */
292 FALSE
, /* partial_inplace */
293 0x00000000, /* src_mask */
294 0x00000000, /* dst_mask */
295 FALSE
), /* pcrel_offset */
297 /* BLX instruction for the ARM. */
298 HOWTO (R_ARM_XPC25
, /* type */
300 2, /* size (0 = byte, 1 = short, 2 = long) */
302 TRUE
, /* pc_relative */
304 complain_overflow_signed
,/* complain_on_overflow */
305 bfd_elf_generic_reloc
, /* special_function */
306 "R_ARM_XPC25", /* name */
307 FALSE
, /* partial_inplace */
308 0x00ffffff, /* src_mask */
309 0x00ffffff, /* dst_mask */
310 TRUE
), /* pcrel_offset */
312 /* BLX instruction for the Thumb. */
313 HOWTO (R_ARM_THM_XPC22
, /* type */
315 2, /* size (0 = byte, 1 = short, 2 = long) */
317 TRUE
, /* pc_relative */
319 complain_overflow_signed
,/* complain_on_overflow */
320 bfd_elf_generic_reloc
, /* special_function */
321 "R_ARM_THM_XPC22", /* name */
322 FALSE
, /* partial_inplace */
323 0x07ff2fff, /* src_mask */
324 0x07ff2fff, /* dst_mask */
325 TRUE
), /* pcrel_offset */
327 /* Dynamic TLS relocations. */
329 HOWTO (R_ARM_TLS_DTPMOD32
, /* type */
331 2, /* size (0 = byte, 1 = short, 2 = long) */
333 FALSE
, /* pc_relative */
335 complain_overflow_bitfield
,/* complain_on_overflow */
336 bfd_elf_generic_reloc
, /* special_function */
337 "R_ARM_TLS_DTPMOD32", /* name */
338 TRUE
, /* partial_inplace */
339 0xffffffff, /* src_mask */
340 0xffffffff, /* dst_mask */
341 FALSE
), /* pcrel_offset */
343 HOWTO (R_ARM_TLS_DTPOFF32
, /* type */
345 2, /* size (0 = byte, 1 = short, 2 = long) */
347 FALSE
, /* pc_relative */
349 complain_overflow_bitfield
,/* complain_on_overflow */
350 bfd_elf_generic_reloc
, /* special_function */
351 "R_ARM_TLS_DTPOFF32", /* name */
352 TRUE
, /* partial_inplace */
353 0xffffffff, /* src_mask */
354 0xffffffff, /* dst_mask */
355 FALSE
), /* pcrel_offset */
357 HOWTO (R_ARM_TLS_TPOFF32
, /* type */
359 2, /* size (0 = byte, 1 = short, 2 = long) */
361 FALSE
, /* pc_relative */
363 complain_overflow_bitfield
,/* complain_on_overflow */
364 bfd_elf_generic_reloc
, /* special_function */
365 "R_ARM_TLS_TPOFF32", /* name */
366 TRUE
, /* partial_inplace */
367 0xffffffff, /* src_mask */
368 0xffffffff, /* dst_mask */
369 FALSE
), /* pcrel_offset */
371 /* Relocs used in ARM Linux */
373 HOWTO (R_ARM_COPY
, /* type */
375 2, /* size (0 = byte, 1 = short, 2 = long) */
377 FALSE
, /* pc_relative */
379 complain_overflow_bitfield
,/* complain_on_overflow */
380 bfd_elf_generic_reloc
, /* special_function */
381 "R_ARM_COPY", /* name */
382 TRUE
, /* partial_inplace */
383 0xffffffff, /* src_mask */
384 0xffffffff, /* dst_mask */
385 FALSE
), /* pcrel_offset */
387 HOWTO (R_ARM_GLOB_DAT
, /* type */
389 2, /* size (0 = byte, 1 = short, 2 = long) */
391 FALSE
, /* pc_relative */
393 complain_overflow_bitfield
,/* complain_on_overflow */
394 bfd_elf_generic_reloc
, /* special_function */
395 "R_ARM_GLOB_DAT", /* name */
396 TRUE
, /* partial_inplace */
397 0xffffffff, /* src_mask */
398 0xffffffff, /* dst_mask */
399 FALSE
), /* pcrel_offset */
401 HOWTO (R_ARM_JUMP_SLOT
, /* type */
403 2, /* size (0 = byte, 1 = short, 2 = long) */
405 FALSE
, /* pc_relative */
407 complain_overflow_bitfield
,/* complain_on_overflow */
408 bfd_elf_generic_reloc
, /* special_function */
409 "R_ARM_JUMP_SLOT", /* name */
410 TRUE
, /* partial_inplace */
411 0xffffffff, /* src_mask */
412 0xffffffff, /* dst_mask */
413 FALSE
), /* pcrel_offset */
415 HOWTO (R_ARM_RELATIVE
, /* type */
417 2, /* size (0 = byte, 1 = short, 2 = long) */
419 FALSE
, /* pc_relative */
421 complain_overflow_bitfield
,/* complain_on_overflow */
422 bfd_elf_generic_reloc
, /* special_function */
423 "R_ARM_RELATIVE", /* name */
424 TRUE
, /* partial_inplace */
425 0xffffffff, /* src_mask */
426 0xffffffff, /* dst_mask */
427 FALSE
), /* pcrel_offset */
429 HOWTO (R_ARM_GOTOFF32
, /* type */
431 2, /* size (0 = byte, 1 = short, 2 = long) */
433 FALSE
, /* pc_relative */
435 complain_overflow_bitfield
,/* complain_on_overflow */
436 bfd_elf_generic_reloc
, /* special_function */
437 "R_ARM_GOTOFF32", /* name */
438 TRUE
, /* partial_inplace */
439 0xffffffff, /* src_mask */
440 0xffffffff, /* dst_mask */
441 FALSE
), /* pcrel_offset */
443 HOWTO (R_ARM_GOTPC
, /* type */
445 2, /* size (0 = byte, 1 = short, 2 = long) */
447 TRUE
, /* pc_relative */
449 complain_overflow_bitfield
,/* complain_on_overflow */
450 bfd_elf_generic_reloc
, /* special_function */
451 "R_ARM_GOTPC", /* name */
452 TRUE
, /* partial_inplace */
453 0xffffffff, /* src_mask */
454 0xffffffff, /* dst_mask */
455 TRUE
), /* pcrel_offset */
457 HOWTO (R_ARM_GOT32
, /* type */
459 2, /* size (0 = byte, 1 = short, 2 = long) */
461 FALSE
, /* pc_relative */
463 complain_overflow_bitfield
,/* complain_on_overflow */
464 bfd_elf_generic_reloc
, /* special_function */
465 "R_ARM_GOT32", /* name */
466 TRUE
, /* partial_inplace */
467 0xffffffff, /* src_mask */
468 0xffffffff, /* dst_mask */
469 FALSE
), /* pcrel_offset */
471 HOWTO (R_ARM_PLT32
, /* type */
473 2, /* size (0 = byte, 1 = short, 2 = long) */
475 TRUE
, /* pc_relative */
477 complain_overflow_bitfield
,/* complain_on_overflow */
478 bfd_elf_generic_reloc
, /* special_function */
479 "R_ARM_PLT32", /* name */
480 FALSE
, /* partial_inplace */
481 0x00ffffff, /* src_mask */
482 0x00ffffff, /* dst_mask */
483 TRUE
), /* pcrel_offset */
485 HOWTO (R_ARM_CALL
, /* type */
487 2, /* size (0 = byte, 1 = short, 2 = long) */
489 TRUE
, /* pc_relative */
491 complain_overflow_signed
,/* complain_on_overflow */
492 bfd_elf_generic_reloc
, /* special_function */
493 "R_ARM_CALL", /* name */
494 FALSE
, /* partial_inplace */
495 0x00ffffff, /* src_mask */
496 0x00ffffff, /* dst_mask */
497 TRUE
), /* pcrel_offset */
499 HOWTO (R_ARM_JUMP24
, /* type */
501 2, /* size (0 = byte, 1 = short, 2 = long) */
503 TRUE
, /* pc_relative */
505 complain_overflow_signed
,/* complain_on_overflow */
506 bfd_elf_generic_reloc
, /* special_function */
507 "R_ARM_JUMP24", /* name */
508 FALSE
, /* partial_inplace */
509 0x00ffffff, /* src_mask */
510 0x00ffffff, /* dst_mask */
511 TRUE
), /* pcrel_offset */
513 HOWTO (R_ARM_THM_JUMP24
, /* type */
515 2, /* size (0 = byte, 1 = short, 2 = long) */
517 TRUE
, /* pc_relative */
519 complain_overflow_signed
,/* complain_on_overflow */
520 bfd_elf_generic_reloc
, /* special_function */
521 "R_ARM_THM_JUMP24", /* name */
522 FALSE
, /* partial_inplace */
523 0x07ff2fff, /* src_mask */
524 0x07ff2fff, /* dst_mask */
525 TRUE
), /* pcrel_offset */
527 HOWTO (R_ARM_BASE_ABS
, /* type */
529 2, /* size (0 = byte, 1 = short, 2 = long) */
531 FALSE
, /* pc_relative */
533 complain_overflow_dont
,/* complain_on_overflow */
534 bfd_elf_generic_reloc
, /* special_function */
535 "R_ARM_BASE_ABS", /* name */
536 FALSE
, /* partial_inplace */
537 0xffffffff, /* src_mask */
538 0xffffffff, /* dst_mask */
539 FALSE
), /* pcrel_offset */
541 HOWTO (R_ARM_ALU_PCREL7_0
, /* type */
543 2, /* size (0 = byte, 1 = short, 2 = long) */
545 TRUE
, /* pc_relative */
547 complain_overflow_dont
,/* complain_on_overflow */
548 bfd_elf_generic_reloc
, /* special_function */
549 "R_ARM_ALU_PCREL_7_0", /* name */
550 FALSE
, /* partial_inplace */
551 0x00000fff, /* src_mask */
552 0x00000fff, /* dst_mask */
553 TRUE
), /* pcrel_offset */
555 HOWTO (R_ARM_ALU_PCREL15_8
, /* type */
557 2, /* size (0 = byte, 1 = short, 2 = long) */
559 TRUE
, /* pc_relative */
561 complain_overflow_dont
,/* complain_on_overflow */
562 bfd_elf_generic_reloc
, /* special_function */
563 "R_ARM_ALU_PCREL_15_8",/* name */
564 FALSE
, /* partial_inplace */
565 0x00000fff, /* src_mask */
566 0x00000fff, /* dst_mask */
567 TRUE
), /* pcrel_offset */
569 HOWTO (R_ARM_ALU_PCREL23_15
, /* type */
571 2, /* size (0 = byte, 1 = short, 2 = long) */
573 TRUE
, /* pc_relative */
575 complain_overflow_dont
,/* complain_on_overflow */
576 bfd_elf_generic_reloc
, /* special_function */
577 "R_ARM_ALU_PCREL_23_15",/* name */
578 FALSE
, /* partial_inplace */
579 0x00000fff, /* src_mask */
580 0x00000fff, /* dst_mask */
581 TRUE
), /* pcrel_offset */
583 HOWTO (R_ARM_LDR_SBREL_11_0
, /* type */
585 2, /* size (0 = byte, 1 = short, 2 = long) */
587 FALSE
, /* pc_relative */
589 complain_overflow_dont
,/* complain_on_overflow */
590 bfd_elf_generic_reloc
, /* special_function */
591 "R_ARM_LDR_SBREL_11_0",/* name */
592 FALSE
, /* partial_inplace */
593 0x00000fff, /* src_mask */
594 0x00000fff, /* dst_mask */
595 FALSE
), /* pcrel_offset */
597 HOWTO (R_ARM_ALU_SBREL_19_12
, /* type */
599 2, /* size (0 = byte, 1 = short, 2 = long) */
601 FALSE
, /* pc_relative */
603 complain_overflow_dont
,/* complain_on_overflow */
604 bfd_elf_generic_reloc
, /* special_function */
605 "R_ARM_ALU_SBREL_19_12",/* name */
606 FALSE
, /* partial_inplace */
607 0x000ff000, /* src_mask */
608 0x000ff000, /* dst_mask */
609 FALSE
), /* pcrel_offset */
611 HOWTO (R_ARM_ALU_SBREL_27_20
, /* type */
613 2, /* size (0 = byte, 1 = short, 2 = long) */
615 FALSE
, /* pc_relative */
617 complain_overflow_dont
,/* complain_on_overflow */
618 bfd_elf_generic_reloc
, /* special_function */
619 "R_ARM_ALU_SBREL_27_20",/* name */
620 FALSE
, /* partial_inplace */
621 0x0ff00000, /* src_mask */
622 0x0ff00000, /* dst_mask */
623 FALSE
), /* pcrel_offset */
625 HOWTO (R_ARM_TARGET1
, /* type */
627 2, /* size (0 = byte, 1 = short, 2 = long) */
629 FALSE
, /* pc_relative */
631 complain_overflow_dont
,/* complain_on_overflow */
632 bfd_elf_generic_reloc
, /* special_function */
633 "R_ARM_TARGET1", /* name */
634 FALSE
, /* partial_inplace */
635 0xffffffff, /* src_mask */
636 0xffffffff, /* dst_mask */
637 FALSE
), /* pcrel_offset */
639 HOWTO (R_ARM_ROSEGREL32
, /* type */
641 2, /* size (0 = byte, 1 = short, 2 = long) */
643 FALSE
, /* pc_relative */
645 complain_overflow_dont
,/* complain_on_overflow */
646 bfd_elf_generic_reloc
, /* special_function */
647 "R_ARM_ROSEGREL32", /* name */
648 FALSE
, /* partial_inplace */
649 0xffffffff, /* src_mask */
650 0xffffffff, /* dst_mask */
651 FALSE
), /* pcrel_offset */
653 HOWTO (R_ARM_V4BX
, /* type */
655 2, /* size (0 = byte, 1 = short, 2 = long) */
657 FALSE
, /* pc_relative */
659 complain_overflow_dont
,/* complain_on_overflow */
660 bfd_elf_generic_reloc
, /* special_function */
661 "R_ARM_V4BX", /* name */
662 FALSE
, /* partial_inplace */
663 0xffffffff, /* src_mask */
664 0xffffffff, /* dst_mask */
665 FALSE
), /* pcrel_offset */
667 HOWTO (R_ARM_TARGET2
, /* type */
669 2, /* size (0 = byte, 1 = short, 2 = long) */
671 FALSE
, /* pc_relative */
673 complain_overflow_signed
,/* complain_on_overflow */
674 bfd_elf_generic_reloc
, /* special_function */
675 "R_ARM_TARGET2", /* name */
676 FALSE
, /* partial_inplace */
677 0xffffffff, /* src_mask */
678 0xffffffff, /* dst_mask */
679 TRUE
), /* pcrel_offset */
681 HOWTO (R_ARM_PREL31
, /* type */
683 2, /* size (0 = byte, 1 = short, 2 = long) */
685 TRUE
, /* pc_relative */
687 complain_overflow_signed
,/* complain_on_overflow */
688 bfd_elf_generic_reloc
, /* special_function */
689 "R_ARM_PREL31", /* name */
690 FALSE
, /* partial_inplace */
691 0x7fffffff, /* src_mask */
692 0x7fffffff, /* dst_mask */
693 TRUE
), /* pcrel_offset */
695 HOWTO (R_ARM_MOVW_ABS_NC
, /* type */
697 2, /* size (0 = byte, 1 = short, 2 = long) */
699 FALSE
, /* pc_relative */
701 complain_overflow_dont
,/* complain_on_overflow */
702 bfd_elf_generic_reloc
, /* special_function */
703 "R_ARM_MOVW_ABS_NC", /* name */
704 FALSE
, /* partial_inplace */
705 0x000f0fff, /* src_mask */
706 0x000f0fff, /* dst_mask */
707 FALSE
), /* pcrel_offset */
709 HOWTO (R_ARM_MOVT_ABS
, /* type */
711 2, /* size (0 = byte, 1 = short, 2 = long) */
713 FALSE
, /* pc_relative */
715 complain_overflow_bitfield
,/* complain_on_overflow */
716 bfd_elf_generic_reloc
, /* special_function */
717 "R_ARM_MOVT_ABS", /* name */
718 FALSE
, /* partial_inplace */
719 0x000f0fff, /* src_mask */
720 0x000f0fff, /* dst_mask */
721 FALSE
), /* pcrel_offset */
723 HOWTO (R_ARM_MOVW_PREL_NC
, /* type */
725 2, /* size (0 = byte, 1 = short, 2 = long) */
727 TRUE
, /* pc_relative */
729 complain_overflow_dont
,/* complain_on_overflow */
730 bfd_elf_generic_reloc
, /* special_function */
731 "R_ARM_MOVW_PREL_NC", /* name */
732 FALSE
, /* partial_inplace */
733 0x000f0fff, /* src_mask */
734 0x000f0fff, /* dst_mask */
735 TRUE
), /* pcrel_offset */
737 HOWTO (R_ARM_MOVT_PREL
, /* type */
739 2, /* size (0 = byte, 1 = short, 2 = long) */
741 TRUE
, /* pc_relative */
743 complain_overflow_bitfield
,/* complain_on_overflow */
744 bfd_elf_generic_reloc
, /* special_function */
745 "R_ARM_MOVT_PREL", /* name */
746 FALSE
, /* partial_inplace */
747 0x000f0fff, /* src_mask */
748 0x000f0fff, /* dst_mask */
749 TRUE
), /* pcrel_offset */
751 HOWTO (R_ARM_THM_MOVW_ABS_NC
, /* type */
753 2, /* size (0 = byte, 1 = short, 2 = long) */
755 FALSE
, /* pc_relative */
757 complain_overflow_dont
,/* complain_on_overflow */
758 bfd_elf_generic_reloc
, /* special_function */
759 "R_ARM_THM_MOVW_ABS_NC",/* name */
760 FALSE
, /* partial_inplace */
761 0x040f70ff, /* src_mask */
762 0x040f70ff, /* dst_mask */
763 FALSE
), /* pcrel_offset */
765 HOWTO (R_ARM_THM_MOVT_ABS
, /* type */
767 2, /* size (0 = byte, 1 = short, 2 = long) */
769 FALSE
, /* pc_relative */
771 complain_overflow_bitfield
,/* complain_on_overflow */
772 bfd_elf_generic_reloc
, /* special_function */
773 "R_ARM_THM_MOVT_ABS", /* name */
774 FALSE
, /* partial_inplace */
775 0x040f70ff, /* src_mask */
776 0x040f70ff, /* dst_mask */
777 FALSE
), /* pcrel_offset */
779 HOWTO (R_ARM_THM_MOVW_PREL_NC
,/* type */
781 2, /* size (0 = byte, 1 = short, 2 = long) */
783 TRUE
, /* pc_relative */
785 complain_overflow_dont
,/* complain_on_overflow */
786 bfd_elf_generic_reloc
, /* special_function */
787 "R_ARM_THM_MOVW_PREL_NC",/* name */
788 FALSE
, /* partial_inplace */
789 0x040f70ff, /* src_mask */
790 0x040f70ff, /* dst_mask */
791 TRUE
), /* pcrel_offset */
793 HOWTO (R_ARM_THM_MOVT_PREL
, /* type */
795 2, /* size (0 = byte, 1 = short, 2 = long) */
797 TRUE
, /* pc_relative */
799 complain_overflow_bitfield
,/* complain_on_overflow */
800 bfd_elf_generic_reloc
, /* special_function */
801 "R_ARM_THM_MOVT_PREL", /* name */
802 FALSE
, /* partial_inplace */
803 0x040f70ff, /* src_mask */
804 0x040f70ff, /* dst_mask */
805 TRUE
), /* pcrel_offset */
807 HOWTO (R_ARM_THM_JUMP19
, /* type */
809 2, /* size (0 = byte, 1 = short, 2 = long) */
811 TRUE
, /* pc_relative */
813 complain_overflow_signed
,/* complain_on_overflow */
814 bfd_elf_generic_reloc
, /* special_function */
815 "R_ARM_THM_JUMP19", /* name */
816 FALSE
, /* partial_inplace */
817 0x043f2fff, /* src_mask */
818 0x043f2fff, /* dst_mask */
819 TRUE
), /* pcrel_offset */
821 HOWTO (R_ARM_THM_JUMP6
, /* type */
823 1, /* size (0 = byte, 1 = short, 2 = long) */
825 TRUE
, /* pc_relative */
827 complain_overflow_unsigned
,/* complain_on_overflow */
828 bfd_elf_generic_reloc
, /* special_function */
829 "R_ARM_THM_JUMP6", /* name */
830 FALSE
, /* partial_inplace */
831 0x02f8, /* src_mask */
832 0x02f8, /* dst_mask */
833 TRUE
), /* pcrel_offset */
835 /* These are declared as 13-bit signed relocations because we can
836 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
838 HOWTO (R_ARM_THM_ALU_PREL_11_0
,/* type */
840 2, /* size (0 = byte, 1 = short, 2 = long) */
842 TRUE
, /* pc_relative */
844 complain_overflow_dont
,/* complain_on_overflow */
845 bfd_elf_generic_reloc
, /* special_function */
846 "R_ARM_THM_ALU_PREL_11_0",/* name */
847 FALSE
, /* partial_inplace */
848 0xffffffff, /* src_mask */
849 0xffffffff, /* dst_mask */
850 TRUE
), /* pcrel_offset */
852 HOWTO (R_ARM_THM_PC12
, /* type */
854 2, /* size (0 = byte, 1 = short, 2 = long) */
856 TRUE
, /* pc_relative */
858 complain_overflow_dont
,/* complain_on_overflow */
859 bfd_elf_generic_reloc
, /* special_function */
860 "R_ARM_THM_PC12", /* name */
861 FALSE
, /* partial_inplace */
862 0xffffffff, /* src_mask */
863 0xffffffff, /* dst_mask */
864 TRUE
), /* pcrel_offset */
866 HOWTO (R_ARM_ABS32_NOI
, /* type */
868 2, /* size (0 = byte, 1 = short, 2 = long) */
870 FALSE
, /* pc_relative */
872 complain_overflow_dont
,/* complain_on_overflow */
873 bfd_elf_generic_reloc
, /* special_function */
874 "R_ARM_ABS32_NOI", /* name */
875 FALSE
, /* partial_inplace */
876 0xffffffff, /* src_mask */
877 0xffffffff, /* dst_mask */
878 FALSE
), /* pcrel_offset */
880 HOWTO (R_ARM_REL32_NOI
, /* type */
882 2, /* size (0 = byte, 1 = short, 2 = long) */
884 TRUE
, /* pc_relative */
886 complain_overflow_dont
,/* complain_on_overflow */
887 bfd_elf_generic_reloc
, /* special_function */
888 "R_ARM_REL32_NOI", /* name */
889 FALSE
, /* partial_inplace */
890 0xffffffff, /* src_mask */
891 0xffffffff, /* dst_mask */
892 FALSE
), /* pcrel_offset */
894 /* Group relocations. */
896 HOWTO (R_ARM_ALU_PC_G0_NC
, /* type */
898 2, /* size (0 = byte, 1 = short, 2 = long) */
900 TRUE
, /* pc_relative */
902 complain_overflow_dont
,/* complain_on_overflow */
903 bfd_elf_generic_reloc
, /* special_function */
904 "R_ARM_ALU_PC_G0_NC", /* name */
905 FALSE
, /* partial_inplace */
906 0xffffffff, /* src_mask */
907 0xffffffff, /* dst_mask */
908 TRUE
), /* pcrel_offset */
910 HOWTO (R_ARM_ALU_PC_G0
, /* type */
912 2, /* size (0 = byte, 1 = short, 2 = long) */
914 TRUE
, /* pc_relative */
916 complain_overflow_dont
,/* complain_on_overflow */
917 bfd_elf_generic_reloc
, /* special_function */
918 "R_ARM_ALU_PC_G0", /* name */
919 FALSE
, /* partial_inplace */
920 0xffffffff, /* src_mask */
921 0xffffffff, /* dst_mask */
922 TRUE
), /* pcrel_offset */
924 HOWTO (R_ARM_ALU_PC_G1_NC
, /* type */
926 2, /* size (0 = byte, 1 = short, 2 = long) */
928 TRUE
, /* pc_relative */
930 complain_overflow_dont
,/* complain_on_overflow */
931 bfd_elf_generic_reloc
, /* special_function */
932 "R_ARM_ALU_PC_G1_NC", /* name */
933 FALSE
, /* partial_inplace */
934 0xffffffff, /* src_mask */
935 0xffffffff, /* dst_mask */
936 TRUE
), /* pcrel_offset */
938 HOWTO (R_ARM_ALU_PC_G1
, /* type */
940 2, /* size (0 = byte, 1 = short, 2 = long) */
942 TRUE
, /* pc_relative */
944 complain_overflow_dont
,/* complain_on_overflow */
945 bfd_elf_generic_reloc
, /* special_function */
946 "R_ARM_ALU_PC_G1", /* name */
947 FALSE
, /* partial_inplace */
948 0xffffffff, /* src_mask */
949 0xffffffff, /* dst_mask */
950 TRUE
), /* pcrel_offset */
952 HOWTO (R_ARM_ALU_PC_G2
, /* type */
954 2, /* size (0 = byte, 1 = short, 2 = long) */
956 TRUE
, /* pc_relative */
958 complain_overflow_dont
,/* complain_on_overflow */
959 bfd_elf_generic_reloc
, /* special_function */
960 "R_ARM_ALU_PC_G2", /* name */
961 FALSE
, /* partial_inplace */
962 0xffffffff, /* src_mask */
963 0xffffffff, /* dst_mask */
964 TRUE
), /* pcrel_offset */
966 HOWTO (R_ARM_LDR_PC_G1
, /* type */
968 2, /* size (0 = byte, 1 = short, 2 = long) */
970 TRUE
, /* pc_relative */
972 complain_overflow_dont
,/* complain_on_overflow */
973 bfd_elf_generic_reloc
, /* special_function */
974 "R_ARM_LDR_PC_G1", /* name */
975 FALSE
, /* partial_inplace */
976 0xffffffff, /* src_mask */
977 0xffffffff, /* dst_mask */
978 TRUE
), /* pcrel_offset */
980 HOWTO (R_ARM_LDR_PC_G2
, /* type */
982 2, /* size (0 = byte, 1 = short, 2 = long) */
984 TRUE
, /* pc_relative */
986 complain_overflow_dont
,/* complain_on_overflow */
987 bfd_elf_generic_reloc
, /* special_function */
988 "R_ARM_LDR_PC_G2", /* name */
989 FALSE
, /* partial_inplace */
990 0xffffffff, /* src_mask */
991 0xffffffff, /* dst_mask */
992 TRUE
), /* pcrel_offset */
994 HOWTO (R_ARM_LDRS_PC_G0
, /* type */
996 2, /* size (0 = byte, 1 = short, 2 = long) */
998 TRUE
, /* pc_relative */
1000 complain_overflow_dont
,/* complain_on_overflow */
1001 bfd_elf_generic_reloc
, /* special_function */
1002 "R_ARM_LDRS_PC_G0", /* name */
1003 FALSE
, /* partial_inplace */
1004 0xffffffff, /* src_mask */
1005 0xffffffff, /* dst_mask */
1006 TRUE
), /* pcrel_offset */
1008 HOWTO (R_ARM_LDRS_PC_G1
, /* type */
1010 2, /* size (0 = byte, 1 = short, 2 = long) */
1012 TRUE
, /* pc_relative */
1014 complain_overflow_dont
,/* complain_on_overflow */
1015 bfd_elf_generic_reloc
, /* special_function */
1016 "R_ARM_LDRS_PC_G1", /* name */
1017 FALSE
, /* partial_inplace */
1018 0xffffffff, /* src_mask */
1019 0xffffffff, /* dst_mask */
1020 TRUE
), /* pcrel_offset */
1022 HOWTO (R_ARM_LDRS_PC_G2
, /* type */
1024 2, /* size (0 = byte, 1 = short, 2 = long) */
1026 TRUE
, /* pc_relative */
1028 complain_overflow_dont
,/* complain_on_overflow */
1029 bfd_elf_generic_reloc
, /* special_function */
1030 "R_ARM_LDRS_PC_G2", /* name */
1031 FALSE
, /* partial_inplace */
1032 0xffffffff, /* src_mask */
1033 0xffffffff, /* dst_mask */
1034 TRUE
), /* pcrel_offset */
1036 HOWTO (R_ARM_LDC_PC_G0
, /* type */
1038 2, /* size (0 = byte, 1 = short, 2 = long) */
1040 TRUE
, /* pc_relative */
1042 complain_overflow_dont
,/* complain_on_overflow */
1043 bfd_elf_generic_reloc
, /* special_function */
1044 "R_ARM_LDC_PC_G0", /* name */
1045 FALSE
, /* partial_inplace */
1046 0xffffffff, /* src_mask */
1047 0xffffffff, /* dst_mask */
1048 TRUE
), /* pcrel_offset */
1050 HOWTO (R_ARM_LDC_PC_G1
, /* type */
1052 2, /* size (0 = byte, 1 = short, 2 = long) */
1054 TRUE
, /* pc_relative */
1056 complain_overflow_dont
,/* complain_on_overflow */
1057 bfd_elf_generic_reloc
, /* special_function */
1058 "R_ARM_LDC_PC_G1", /* name */
1059 FALSE
, /* partial_inplace */
1060 0xffffffff, /* src_mask */
1061 0xffffffff, /* dst_mask */
1062 TRUE
), /* pcrel_offset */
1064 HOWTO (R_ARM_LDC_PC_G2
, /* type */
1066 2, /* size (0 = byte, 1 = short, 2 = long) */
1068 TRUE
, /* pc_relative */
1070 complain_overflow_dont
,/* complain_on_overflow */
1071 bfd_elf_generic_reloc
, /* special_function */
1072 "R_ARM_LDC_PC_G2", /* name */
1073 FALSE
, /* partial_inplace */
1074 0xffffffff, /* src_mask */
1075 0xffffffff, /* dst_mask */
1076 TRUE
), /* pcrel_offset */
1078 HOWTO (R_ARM_ALU_SB_G0_NC
, /* type */
1080 2, /* size (0 = byte, 1 = short, 2 = long) */
1082 TRUE
, /* pc_relative */
1084 complain_overflow_dont
,/* complain_on_overflow */
1085 bfd_elf_generic_reloc
, /* special_function */
1086 "R_ARM_ALU_SB_G0_NC", /* name */
1087 FALSE
, /* partial_inplace */
1088 0xffffffff, /* src_mask */
1089 0xffffffff, /* dst_mask */
1090 TRUE
), /* pcrel_offset */
1092 HOWTO (R_ARM_ALU_SB_G0
, /* type */
1094 2, /* size (0 = byte, 1 = short, 2 = long) */
1096 TRUE
, /* pc_relative */
1098 complain_overflow_dont
,/* complain_on_overflow */
1099 bfd_elf_generic_reloc
, /* special_function */
1100 "R_ARM_ALU_SB_G0", /* name */
1101 FALSE
, /* partial_inplace */
1102 0xffffffff, /* src_mask */
1103 0xffffffff, /* dst_mask */
1104 TRUE
), /* pcrel_offset */
1106 HOWTO (R_ARM_ALU_SB_G1_NC
, /* type */
1108 2, /* size (0 = byte, 1 = short, 2 = long) */
1110 TRUE
, /* pc_relative */
1112 complain_overflow_dont
,/* complain_on_overflow */
1113 bfd_elf_generic_reloc
, /* special_function */
1114 "R_ARM_ALU_SB_G1_NC", /* name */
1115 FALSE
, /* partial_inplace */
1116 0xffffffff, /* src_mask */
1117 0xffffffff, /* dst_mask */
1118 TRUE
), /* pcrel_offset */
1120 HOWTO (R_ARM_ALU_SB_G1
, /* type */
1122 2, /* size (0 = byte, 1 = short, 2 = long) */
1124 TRUE
, /* pc_relative */
1126 complain_overflow_dont
,/* complain_on_overflow */
1127 bfd_elf_generic_reloc
, /* special_function */
1128 "R_ARM_ALU_SB_G1", /* name */
1129 FALSE
, /* partial_inplace */
1130 0xffffffff, /* src_mask */
1131 0xffffffff, /* dst_mask */
1132 TRUE
), /* pcrel_offset */
1134 HOWTO (R_ARM_ALU_SB_G2
, /* type */
1136 2, /* size (0 = byte, 1 = short, 2 = long) */
1138 TRUE
, /* pc_relative */
1140 complain_overflow_dont
,/* complain_on_overflow */
1141 bfd_elf_generic_reloc
, /* special_function */
1142 "R_ARM_ALU_SB_G2", /* name */
1143 FALSE
, /* partial_inplace */
1144 0xffffffff, /* src_mask */
1145 0xffffffff, /* dst_mask */
1146 TRUE
), /* pcrel_offset */
1148 HOWTO (R_ARM_LDR_SB_G0
, /* type */
1150 2, /* size (0 = byte, 1 = short, 2 = long) */
1152 TRUE
, /* pc_relative */
1154 complain_overflow_dont
,/* complain_on_overflow */
1155 bfd_elf_generic_reloc
, /* special_function */
1156 "R_ARM_LDR_SB_G0", /* name */
1157 FALSE
, /* partial_inplace */
1158 0xffffffff, /* src_mask */
1159 0xffffffff, /* dst_mask */
1160 TRUE
), /* pcrel_offset */
1162 HOWTO (R_ARM_LDR_SB_G1
, /* type */
1164 2, /* size (0 = byte, 1 = short, 2 = long) */
1166 TRUE
, /* pc_relative */
1168 complain_overflow_dont
,/* complain_on_overflow */
1169 bfd_elf_generic_reloc
, /* special_function */
1170 "R_ARM_LDR_SB_G1", /* name */
1171 FALSE
, /* partial_inplace */
1172 0xffffffff, /* src_mask */
1173 0xffffffff, /* dst_mask */
1174 TRUE
), /* pcrel_offset */
1176 HOWTO (R_ARM_LDR_SB_G2
, /* type */
1178 2, /* size (0 = byte, 1 = short, 2 = long) */
1180 TRUE
, /* pc_relative */
1182 complain_overflow_dont
,/* complain_on_overflow */
1183 bfd_elf_generic_reloc
, /* special_function */
1184 "R_ARM_LDR_SB_G2", /* name */
1185 FALSE
, /* partial_inplace */
1186 0xffffffff, /* src_mask */
1187 0xffffffff, /* dst_mask */
1188 TRUE
), /* pcrel_offset */
1190 HOWTO (R_ARM_LDRS_SB_G0
, /* type */
1192 2, /* size (0 = byte, 1 = short, 2 = long) */
1194 TRUE
, /* pc_relative */
1196 complain_overflow_dont
,/* complain_on_overflow */
1197 bfd_elf_generic_reloc
, /* special_function */
1198 "R_ARM_LDRS_SB_G0", /* name */
1199 FALSE
, /* partial_inplace */
1200 0xffffffff, /* src_mask */
1201 0xffffffff, /* dst_mask */
1202 TRUE
), /* pcrel_offset */
1204 HOWTO (R_ARM_LDRS_SB_G1
, /* type */
1206 2, /* size (0 = byte, 1 = short, 2 = long) */
1208 TRUE
, /* pc_relative */
1210 complain_overflow_dont
,/* complain_on_overflow */
1211 bfd_elf_generic_reloc
, /* special_function */
1212 "R_ARM_LDRS_SB_G1", /* name */
1213 FALSE
, /* partial_inplace */
1214 0xffffffff, /* src_mask */
1215 0xffffffff, /* dst_mask */
1216 TRUE
), /* pcrel_offset */
1218 HOWTO (R_ARM_LDRS_SB_G2
, /* type */
1220 2, /* size (0 = byte, 1 = short, 2 = long) */
1222 TRUE
, /* pc_relative */
1224 complain_overflow_dont
,/* complain_on_overflow */
1225 bfd_elf_generic_reloc
, /* special_function */
1226 "R_ARM_LDRS_SB_G2", /* name */
1227 FALSE
, /* partial_inplace */
1228 0xffffffff, /* src_mask */
1229 0xffffffff, /* dst_mask */
1230 TRUE
), /* pcrel_offset */
1232 HOWTO (R_ARM_LDC_SB_G0
, /* type */
1234 2, /* size (0 = byte, 1 = short, 2 = long) */
1236 TRUE
, /* pc_relative */
1238 complain_overflow_dont
,/* complain_on_overflow */
1239 bfd_elf_generic_reloc
, /* special_function */
1240 "R_ARM_LDC_SB_G0", /* name */
1241 FALSE
, /* partial_inplace */
1242 0xffffffff, /* src_mask */
1243 0xffffffff, /* dst_mask */
1244 TRUE
), /* pcrel_offset */
1246 HOWTO (R_ARM_LDC_SB_G1
, /* type */
1248 2, /* size (0 = byte, 1 = short, 2 = long) */
1250 TRUE
, /* pc_relative */
1252 complain_overflow_dont
,/* complain_on_overflow */
1253 bfd_elf_generic_reloc
, /* special_function */
1254 "R_ARM_LDC_SB_G1", /* name */
1255 FALSE
, /* partial_inplace */
1256 0xffffffff, /* src_mask */
1257 0xffffffff, /* dst_mask */
1258 TRUE
), /* pcrel_offset */
1260 HOWTO (R_ARM_LDC_SB_G2
, /* type */
1262 2, /* size (0 = byte, 1 = short, 2 = long) */
1264 TRUE
, /* pc_relative */
1266 complain_overflow_dont
,/* complain_on_overflow */
1267 bfd_elf_generic_reloc
, /* special_function */
1268 "R_ARM_LDC_SB_G2", /* name */
1269 FALSE
, /* partial_inplace */
1270 0xffffffff, /* src_mask */
1271 0xffffffff, /* dst_mask */
1272 TRUE
), /* pcrel_offset */
1274 /* End of group relocations. */
1276 HOWTO (R_ARM_MOVW_BREL_NC
, /* type */
1278 2, /* size (0 = byte, 1 = short, 2 = long) */
1280 FALSE
, /* pc_relative */
1282 complain_overflow_dont
,/* complain_on_overflow */
1283 bfd_elf_generic_reloc
, /* special_function */
1284 "R_ARM_MOVW_BREL_NC", /* name */
1285 FALSE
, /* partial_inplace */
1286 0x0000ffff, /* src_mask */
1287 0x0000ffff, /* dst_mask */
1288 FALSE
), /* pcrel_offset */
1290 HOWTO (R_ARM_MOVT_BREL
, /* type */
1292 2, /* size (0 = byte, 1 = short, 2 = long) */
1294 FALSE
, /* pc_relative */
1296 complain_overflow_bitfield
,/* complain_on_overflow */
1297 bfd_elf_generic_reloc
, /* special_function */
1298 "R_ARM_MOVT_BREL", /* name */
1299 FALSE
, /* partial_inplace */
1300 0x0000ffff, /* src_mask */
1301 0x0000ffff, /* dst_mask */
1302 FALSE
), /* pcrel_offset */
1304 HOWTO (R_ARM_MOVW_BREL
, /* type */
1306 2, /* size (0 = byte, 1 = short, 2 = long) */
1308 FALSE
, /* pc_relative */
1310 complain_overflow_dont
,/* complain_on_overflow */
1311 bfd_elf_generic_reloc
, /* special_function */
1312 "R_ARM_MOVW_BREL", /* name */
1313 FALSE
, /* partial_inplace */
1314 0x0000ffff, /* src_mask */
1315 0x0000ffff, /* dst_mask */
1316 FALSE
), /* pcrel_offset */
1318 HOWTO (R_ARM_THM_MOVW_BREL_NC
,/* type */
1320 2, /* size (0 = byte, 1 = short, 2 = long) */
1322 FALSE
, /* pc_relative */
1324 complain_overflow_dont
,/* complain_on_overflow */
1325 bfd_elf_generic_reloc
, /* special_function */
1326 "R_ARM_THM_MOVW_BREL_NC",/* name */
1327 FALSE
, /* partial_inplace */
1328 0x040f70ff, /* src_mask */
1329 0x040f70ff, /* dst_mask */
1330 FALSE
), /* pcrel_offset */
1332 HOWTO (R_ARM_THM_MOVT_BREL
, /* type */
1334 2, /* size (0 = byte, 1 = short, 2 = long) */
1336 FALSE
, /* pc_relative */
1338 complain_overflow_bitfield
,/* complain_on_overflow */
1339 bfd_elf_generic_reloc
, /* special_function */
1340 "R_ARM_THM_MOVT_BREL", /* name */
1341 FALSE
, /* partial_inplace */
1342 0x040f70ff, /* src_mask */
1343 0x040f70ff, /* dst_mask */
1344 FALSE
), /* pcrel_offset */
1346 HOWTO (R_ARM_THM_MOVW_BREL
, /* type */
1348 2, /* size (0 = byte, 1 = short, 2 = long) */
1350 FALSE
, /* pc_relative */
1352 complain_overflow_dont
,/* complain_on_overflow */
1353 bfd_elf_generic_reloc
, /* special_function */
1354 "R_ARM_THM_MOVW_BREL", /* name */
1355 FALSE
, /* partial_inplace */
1356 0x040f70ff, /* src_mask */
1357 0x040f70ff, /* dst_mask */
1358 FALSE
), /* pcrel_offset */
1360 HOWTO (R_ARM_TLS_GOTDESC
, /* type */
1362 2, /* size (0 = byte, 1 = short, 2 = long) */
1364 FALSE
, /* pc_relative */
1366 complain_overflow_bitfield
,/* complain_on_overflow */
1367 NULL
, /* special_function */
1368 "R_ARM_TLS_GOTDESC", /* name */
1369 TRUE
, /* partial_inplace */
1370 0xffffffff, /* src_mask */
1371 0xffffffff, /* dst_mask */
1372 FALSE
), /* pcrel_offset */
1374 HOWTO (R_ARM_TLS_CALL
, /* type */
1376 2, /* size (0 = byte, 1 = short, 2 = long) */
1378 FALSE
, /* pc_relative */
1380 complain_overflow_dont
,/* complain_on_overflow */
1381 bfd_elf_generic_reloc
, /* special_function */
1382 "R_ARM_TLS_CALL", /* name */
1383 FALSE
, /* partial_inplace */
1384 0x00ffffff, /* src_mask */
1385 0x00ffffff, /* dst_mask */
1386 FALSE
), /* pcrel_offset */
1388 HOWTO (R_ARM_TLS_DESCSEQ
, /* type */
1390 2, /* size (0 = byte, 1 = short, 2 = long) */
1392 FALSE
, /* pc_relative */
1394 complain_overflow_bitfield
,/* complain_on_overflow */
1395 bfd_elf_generic_reloc
, /* special_function */
1396 "R_ARM_TLS_DESCSEQ", /* name */
1397 FALSE
, /* partial_inplace */
1398 0x00000000, /* src_mask */
1399 0x00000000, /* dst_mask */
1400 FALSE
), /* pcrel_offset */
1402 HOWTO (R_ARM_THM_TLS_CALL
, /* type */
1404 2, /* size (0 = byte, 1 = short, 2 = long) */
1406 FALSE
, /* pc_relative */
1408 complain_overflow_dont
,/* complain_on_overflow */
1409 bfd_elf_generic_reloc
, /* special_function */
1410 "R_ARM_THM_TLS_CALL", /* name */
1411 FALSE
, /* partial_inplace */
1412 0x07ff07ff, /* src_mask */
1413 0x07ff07ff, /* dst_mask */
1414 FALSE
), /* pcrel_offset */
1416 HOWTO (R_ARM_PLT32_ABS
, /* type */
1418 2, /* size (0 = byte, 1 = short, 2 = long) */
1420 FALSE
, /* pc_relative */
1422 complain_overflow_dont
,/* complain_on_overflow */
1423 bfd_elf_generic_reloc
, /* special_function */
1424 "R_ARM_PLT32_ABS", /* name */
1425 FALSE
, /* partial_inplace */
1426 0xffffffff, /* src_mask */
1427 0xffffffff, /* dst_mask */
1428 FALSE
), /* pcrel_offset */
1430 HOWTO (R_ARM_GOT_ABS
, /* type */
1432 2, /* size (0 = byte, 1 = short, 2 = long) */
1434 FALSE
, /* pc_relative */
1436 complain_overflow_dont
,/* complain_on_overflow */
1437 bfd_elf_generic_reloc
, /* special_function */
1438 "R_ARM_GOT_ABS", /* name */
1439 FALSE
, /* partial_inplace */
1440 0xffffffff, /* src_mask */
1441 0xffffffff, /* dst_mask */
1442 FALSE
), /* pcrel_offset */
1444 HOWTO (R_ARM_GOT_PREL
, /* type */
1446 2, /* size (0 = byte, 1 = short, 2 = long) */
1448 TRUE
, /* pc_relative */
1450 complain_overflow_dont
, /* complain_on_overflow */
1451 bfd_elf_generic_reloc
, /* special_function */
1452 "R_ARM_GOT_PREL", /* name */
1453 FALSE
, /* partial_inplace */
1454 0xffffffff, /* src_mask */
1455 0xffffffff, /* dst_mask */
1456 TRUE
), /* pcrel_offset */
1458 HOWTO (R_ARM_GOT_BREL12
, /* type */
1460 2, /* size (0 = byte, 1 = short, 2 = long) */
1462 FALSE
, /* pc_relative */
1464 complain_overflow_bitfield
,/* complain_on_overflow */
1465 bfd_elf_generic_reloc
, /* special_function */
1466 "R_ARM_GOT_BREL12", /* name */
1467 FALSE
, /* partial_inplace */
1468 0x00000fff, /* src_mask */
1469 0x00000fff, /* dst_mask */
1470 FALSE
), /* pcrel_offset */
1472 HOWTO (R_ARM_GOTOFF12
, /* type */
1474 2, /* size (0 = byte, 1 = short, 2 = long) */
1476 FALSE
, /* pc_relative */
1478 complain_overflow_bitfield
,/* complain_on_overflow */
1479 bfd_elf_generic_reloc
, /* special_function */
1480 "R_ARM_GOTOFF12", /* name */
1481 FALSE
, /* partial_inplace */
1482 0x00000fff, /* src_mask */
1483 0x00000fff, /* dst_mask */
1484 FALSE
), /* pcrel_offset */
1486 EMPTY_HOWTO (R_ARM_GOTRELAX
), /* reserved for future GOT-load optimizations */
1488 /* GNU extension to record C++ vtable member usage */
1489 HOWTO (R_ARM_GNU_VTENTRY
, /* type */
1491 2, /* size (0 = byte, 1 = short, 2 = long) */
1493 FALSE
, /* pc_relative */
1495 complain_overflow_dont
, /* complain_on_overflow */
1496 _bfd_elf_rel_vtable_reloc_fn
, /* special_function */
1497 "R_ARM_GNU_VTENTRY", /* name */
1498 FALSE
, /* partial_inplace */
1501 FALSE
), /* pcrel_offset */
1503 /* GNU extension to record C++ vtable hierarchy */
1504 HOWTO (R_ARM_GNU_VTINHERIT
, /* type */
1506 2, /* size (0 = byte, 1 = short, 2 = long) */
1508 FALSE
, /* pc_relative */
1510 complain_overflow_dont
, /* complain_on_overflow */
1511 NULL
, /* special_function */
1512 "R_ARM_GNU_VTINHERIT", /* name */
1513 FALSE
, /* partial_inplace */
1516 FALSE
), /* pcrel_offset */
1518 HOWTO (R_ARM_THM_JUMP11
, /* type */
1520 1, /* size (0 = byte, 1 = short, 2 = long) */
1522 TRUE
, /* pc_relative */
1524 complain_overflow_signed
, /* complain_on_overflow */
1525 bfd_elf_generic_reloc
, /* special_function */
1526 "R_ARM_THM_JUMP11", /* name */
1527 FALSE
, /* partial_inplace */
1528 0x000007ff, /* src_mask */
1529 0x000007ff, /* dst_mask */
1530 TRUE
), /* pcrel_offset */
1532 HOWTO (R_ARM_THM_JUMP8
, /* type */
1534 1, /* size (0 = byte, 1 = short, 2 = long) */
1536 TRUE
, /* pc_relative */
1538 complain_overflow_signed
, /* complain_on_overflow */
1539 bfd_elf_generic_reloc
, /* special_function */
1540 "R_ARM_THM_JUMP8", /* name */
1541 FALSE
, /* partial_inplace */
1542 0x000000ff, /* src_mask */
1543 0x000000ff, /* dst_mask */
1544 TRUE
), /* pcrel_offset */
1546 /* TLS relocations */
1547 HOWTO (R_ARM_TLS_GD32
, /* type */
1549 2, /* size (0 = byte, 1 = short, 2 = long) */
1551 FALSE
, /* pc_relative */
1553 complain_overflow_bitfield
,/* complain_on_overflow */
1554 NULL
, /* special_function */
1555 "R_ARM_TLS_GD32", /* name */
1556 TRUE
, /* partial_inplace */
1557 0xffffffff, /* src_mask */
1558 0xffffffff, /* dst_mask */
1559 FALSE
), /* pcrel_offset */
1561 HOWTO (R_ARM_TLS_LDM32
, /* type */
1563 2, /* size (0 = byte, 1 = short, 2 = long) */
1565 FALSE
, /* pc_relative */
1567 complain_overflow_bitfield
,/* complain_on_overflow */
1568 bfd_elf_generic_reloc
, /* special_function */
1569 "R_ARM_TLS_LDM32", /* name */
1570 TRUE
, /* partial_inplace */
1571 0xffffffff, /* src_mask */
1572 0xffffffff, /* dst_mask */
1573 FALSE
), /* pcrel_offset */
1575 HOWTO (R_ARM_TLS_LDO32
, /* type */
1577 2, /* size (0 = byte, 1 = short, 2 = long) */
1579 FALSE
, /* pc_relative */
1581 complain_overflow_bitfield
,/* complain_on_overflow */
1582 bfd_elf_generic_reloc
, /* special_function */
1583 "R_ARM_TLS_LDO32", /* name */
1584 TRUE
, /* partial_inplace */
1585 0xffffffff, /* src_mask */
1586 0xffffffff, /* dst_mask */
1587 FALSE
), /* pcrel_offset */
1589 HOWTO (R_ARM_TLS_IE32
, /* type */
1591 2, /* size (0 = byte, 1 = short, 2 = long) */
1593 FALSE
, /* pc_relative */
1595 complain_overflow_bitfield
,/* complain_on_overflow */
1596 NULL
, /* special_function */
1597 "R_ARM_TLS_IE32", /* name */
1598 TRUE
, /* partial_inplace */
1599 0xffffffff, /* src_mask */
1600 0xffffffff, /* dst_mask */
1601 FALSE
), /* pcrel_offset */
1603 HOWTO (R_ARM_TLS_LE32
, /* type */
1605 2, /* size (0 = byte, 1 = short, 2 = long) */
1607 FALSE
, /* pc_relative */
1609 complain_overflow_bitfield
,/* complain_on_overflow */
1610 NULL
, /* special_function */
1611 "R_ARM_TLS_LE32", /* name */
1612 TRUE
, /* partial_inplace */
1613 0xffffffff, /* src_mask */
1614 0xffffffff, /* dst_mask */
1615 FALSE
), /* pcrel_offset */
1617 HOWTO (R_ARM_TLS_LDO12
, /* type */
1619 2, /* size (0 = byte, 1 = short, 2 = long) */
1621 FALSE
, /* pc_relative */
1623 complain_overflow_bitfield
,/* complain_on_overflow */
1624 bfd_elf_generic_reloc
, /* special_function */
1625 "R_ARM_TLS_LDO12", /* name */
1626 FALSE
, /* partial_inplace */
1627 0x00000fff, /* src_mask */
1628 0x00000fff, /* dst_mask */
1629 FALSE
), /* pcrel_offset */
1631 HOWTO (R_ARM_TLS_LE12
, /* type */
1633 2, /* size (0 = byte, 1 = short, 2 = long) */
1635 FALSE
, /* pc_relative */
1637 complain_overflow_bitfield
,/* complain_on_overflow */
1638 bfd_elf_generic_reloc
, /* special_function */
1639 "R_ARM_TLS_LE12", /* name */
1640 FALSE
, /* partial_inplace */
1641 0x00000fff, /* src_mask */
1642 0x00000fff, /* dst_mask */
1643 FALSE
), /* pcrel_offset */
1645 HOWTO (R_ARM_TLS_IE12GP
, /* type */
1647 2, /* size (0 = byte, 1 = short, 2 = long) */
1649 FALSE
, /* pc_relative */
1651 complain_overflow_bitfield
,/* complain_on_overflow */
1652 bfd_elf_generic_reloc
, /* special_function */
1653 "R_ARM_TLS_IE12GP", /* name */
1654 FALSE
, /* partial_inplace */
1655 0x00000fff, /* src_mask */
1656 0x00000fff, /* dst_mask */
1657 FALSE
), /* pcrel_offset */
1659 /* 112-127 private relocations. */
1677 /* R_ARM_ME_TOO, obsolete. */
1680 HOWTO (R_ARM_THM_TLS_DESCSEQ
, /* type */
1682 1, /* size (0 = byte, 1 = short, 2 = long) */
1684 FALSE
, /* pc_relative */
1686 complain_overflow_bitfield
,/* complain_on_overflow */
1687 bfd_elf_generic_reloc
, /* special_function */
1688 "R_ARM_THM_TLS_DESCSEQ",/* name */
1689 FALSE
, /* partial_inplace */
1690 0x00000000, /* src_mask */
1691 0x00000000, /* dst_mask */
1692 FALSE
), /* pcrel_offset */
1695 HOWTO (R_ARM_THM_ALU_ABS_G0_NC
,/* type. */
1696 0, /* rightshift. */
1697 1, /* size (0 = byte, 1 = short, 2 = long). */
1699 FALSE
, /* pc_relative. */
1701 complain_overflow_bitfield
,/* complain_on_overflow. */
1702 bfd_elf_generic_reloc
, /* special_function. */
1703 "R_ARM_THM_ALU_ABS_G0_NC",/* name. */
1704 FALSE
, /* partial_inplace. */
1705 0x00000000, /* src_mask. */
1706 0x00000000, /* dst_mask. */
1707 FALSE
), /* pcrel_offset. */
1708 HOWTO (R_ARM_THM_ALU_ABS_G1_NC
,/* type. */
1709 0, /* rightshift. */
1710 1, /* size (0 = byte, 1 = short, 2 = long). */
1712 FALSE
, /* pc_relative. */
1714 complain_overflow_bitfield
,/* complain_on_overflow. */
1715 bfd_elf_generic_reloc
, /* special_function. */
1716 "R_ARM_THM_ALU_ABS_G1_NC",/* name. */
1717 FALSE
, /* partial_inplace. */
1718 0x00000000, /* src_mask. */
1719 0x00000000, /* dst_mask. */
1720 FALSE
), /* pcrel_offset. */
1721 HOWTO (R_ARM_THM_ALU_ABS_G2_NC
,/* type. */
1722 0, /* rightshift. */
1723 1, /* size (0 = byte, 1 = short, 2 = long). */
1725 FALSE
, /* pc_relative. */
1727 complain_overflow_bitfield
,/* complain_on_overflow. */
1728 bfd_elf_generic_reloc
, /* special_function. */
1729 "R_ARM_THM_ALU_ABS_G2_NC",/* name. */
1730 FALSE
, /* partial_inplace. */
1731 0x00000000, /* src_mask. */
1732 0x00000000, /* dst_mask. */
1733 FALSE
), /* pcrel_offset. */
1734 HOWTO (R_ARM_THM_ALU_ABS_G3_NC
,/* type. */
1735 0, /* rightshift. */
1736 1, /* size (0 = byte, 1 = short, 2 = long). */
1738 FALSE
, /* pc_relative. */
1740 complain_overflow_bitfield
,/* complain_on_overflow. */
1741 bfd_elf_generic_reloc
, /* special_function. */
1742 "R_ARM_THM_ALU_ABS_G3_NC",/* name. */
1743 FALSE
, /* partial_inplace. */
1744 0x00000000, /* src_mask. */
1745 0x00000000, /* dst_mask. */
1746 FALSE
), /* pcrel_offset. */
1747 /* Relocations for Armv8.1-M Mainline. */
1748 HOWTO (R_ARM_THM_BF16
, /* type. */
1749 0, /* rightshift. */
1750 1, /* size (0 = byte, 1 = short, 2 = long). */
1752 TRUE
, /* pc_relative. */
1754 complain_overflow_dont
,/* do not complain_on_overflow. */
1755 bfd_elf_generic_reloc
, /* special_function. */
1756 "R_ARM_THM_BF16", /* name. */
1757 FALSE
, /* partial_inplace. */
1758 0x001f0ffe, /* src_mask. */
1759 0x001f0ffe, /* dst_mask. */
1760 TRUE
), /* pcrel_offset. */
1761 HOWTO (R_ARM_THM_BF12
, /* type. */
1762 0, /* rightshift. */
1763 1, /* size (0 = byte, 1 = short, 2 = long). */
1765 TRUE
, /* pc_relative. */
1767 complain_overflow_dont
,/* do not complain_on_overflow. */
1768 bfd_elf_generic_reloc
, /* special_function. */
1769 "R_ARM_THM_BF12", /* name. */
1770 FALSE
, /* partial_inplace. */
1771 0x00010ffe, /* src_mask. */
1772 0x00010ffe, /* dst_mask. */
1773 TRUE
), /* pcrel_offset. */
1774 HOWTO (R_ARM_THM_BF18
, /* type. */
1775 0, /* rightshift. */
1776 1, /* size (0 = byte, 1 = short, 2 = long). */
1778 TRUE
, /* pc_relative. */
1780 complain_overflow_dont
,/* do not complain_on_overflow. */
1781 bfd_elf_generic_reloc
, /* special_function. */
1782 "R_ARM_THM_BF18", /* name. */
1783 FALSE
, /* partial_inplace. */
1784 0x007f0ffe, /* src_mask. */
1785 0x007f0ffe, /* dst_mask. */
1786 TRUE
), /* pcrel_offset. */
1790 static reloc_howto_type elf32_arm_howto_table_2
[8] =
1792 HOWTO (R_ARM_IRELATIVE
, /* type */
1794 2, /* size (0 = byte, 1 = short, 2 = long) */
1796 FALSE
, /* pc_relative */
1798 complain_overflow_bitfield
,/* complain_on_overflow */
1799 bfd_elf_generic_reloc
, /* special_function */
1800 "R_ARM_IRELATIVE", /* name */
1801 TRUE
, /* partial_inplace */
1802 0xffffffff, /* src_mask */
1803 0xffffffff, /* dst_mask */
1804 FALSE
), /* pcrel_offset */
1805 HOWTO (R_ARM_GOTFUNCDESC
, /* type */
1807 2, /* size (0 = byte, 1 = short, 2 = long) */
1809 FALSE
, /* pc_relative */
1811 complain_overflow_bitfield
,/* complain_on_overflow */
1812 bfd_elf_generic_reloc
, /* special_function */
1813 "R_ARM_GOTFUNCDESC", /* name */
1814 FALSE
, /* partial_inplace */
1816 0xffffffff, /* dst_mask */
1817 FALSE
), /* pcrel_offset */
1818 HOWTO (R_ARM_GOTOFFFUNCDESC
, /* type */
1820 2, /* size (0 = byte, 1 = short, 2 = long) */
1822 FALSE
, /* pc_relative */
1824 complain_overflow_bitfield
,/* complain_on_overflow */
1825 bfd_elf_generic_reloc
, /* special_function */
1826 "R_ARM_GOTOFFFUNCDESC",/* name */
1827 FALSE
, /* partial_inplace */
1829 0xffffffff, /* dst_mask */
1830 FALSE
), /* pcrel_offset */
1831 HOWTO (R_ARM_FUNCDESC
, /* type */
1833 2, /* size (0 = byte, 1 = short, 2 = long) */
1835 FALSE
, /* pc_relative */
1837 complain_overflow_bitfield
,/* complain_on_overflow */
1838 bfd_elf_generic_reloc
, /* special_function */
1839 "R_ARM_FUNCDESC", /* name */
1840 FALSE
, /* partial_inplace */
1842 0xffffffff, /* dst_mask */
1843 FALSE
), /* pcrel_offset */
1844 HOWTO (R_ARM_FUNCDESC_VALUE
, /* type */
1846 2, /* size (0 = byte, 1 = short, 2 = long) */
1848 FALSE
, /* pc_relative */
1850 complain_overflow_bitfield
,/* complain_on_overflow */
1851 bfd_elf_generic_reloc
, /* special_function */
1852 "R_ARM_FUNCDESC_VALUE",/* name */
1853 FALSE
, /* partial_inplace */
1855 0xffffffff, /* dst_mask */
1856 FALSE
), /* pcrel_offset */
1857 HOWTO (R_ARM_TLS_GD32_FDPIC
, /* type */
1859 2, /* size (0 = byte, 1 = short, 2 = long) */
1861 FALSE
, /* pc_relative */
1863 complain_overflow_bitfield
,/* complain_on_overflow */
1864 bfd_elf_generic_reloc
, /* special_function */
1865 "R_ARM_TLS_GD32_FDPIC",/* name */
1866 FALSE
, /* partial_inplace */
1868 0xffffffff, /* dst_mask */
1869 FALSE
), /* pcrel_offset */
1870 HOWTO (R_ARM_TLS_LDM32_FDPIC
, /* type */
1872 2, /* size (0 = byte, 1 = short, 2 = long) */
1874 FALSE
, /* pc_relative */
1876 complain_overflow_bitfield
,/* complain_on_overflow */
1877 bfd_elf_generic_reloc
, /* special_function */
1878 "R_ARM_TLS_LDM32_FDPIC",/* name */
1879 FALSE
, /* partial_inplace */
1881 0xffffffff, /* dst_mask */
1882 FALSE
), /* pcrel_offset */
1883 HOWTO (R_ARM_TLS_IE32_FDPIC
, /* type */
1885 2, /* size (0 = byte, 1 = short, 2 = long) */
1887 FALSE
, /* pc_relative */
1889 complain_overflow_bitfield
,/* complain_on_overflow */
1890 bfd_elf_generic_reloc
, /* special_function */
1891 "R_ARM_TLS_IE32_FDPIC",/* name */
1892 FALSE
, /* partial_inplace */
1894 0xffffffff, /* dst_mask */
1895 FALSE
), /* pcrel_offset */
1898 /* 249-255 extended, currently unused, relocations: */
1899 static reloc_howto_type elf32_arm_howto_table_3
[4] =
1901 HOWTO (R_ARM_RREL32
, /* type */
1903 0, /* size (0 = byte, 1 = short, 2 = long) */
1905 FALSE
, /* pc_relative */
1907 complain_overflow_dont
,/* complain_on_overflow */
1908 bfd_elf_generic_reloc
, /* special_function */
1909 "R_ARM_RREL32", /* name */
1910 FALSE
, /* partial_inplace */
1913 FALSE
), /* pcrel_offset */
1915 HOWTO (R_ARM_RABS32
, /* type */
1917 0, /* size (0 = byte, 1 = short, 2 = long) */
1919 FALSE
, /* pc_relative */
1921 complain_overflow_dont
,/* complain_on_overflow */
1922 bfd_elf_generic_reloc
, /* special_function */
1923 "R_ARM_RABS32", /* name */
1924 FALSE
, /* partial_inplace */
1927 FALSE
), /* pcrel_offset */
1929 HOWTO (R_ARM_RPC24
, /* type */
1931 0, /* size (0 = byte, 1 = short, 2 = long) */
1933 FALSE
, /* pc_relative */
1935 complain_overflow_dont
,/* complain_on_overflow */
1936 bfd_elf_generic_reloc
, /* special_function */
1937 "R_ARM_RPC24", /* name */
1938 FALSE
, /* partial_inplace */
1941 FALSE
), /* pcrel_offset */
1943 HOWTO (R_ARM_RBASE
, /* type */
1945 0, /* size (0 = byte, 1 = short, 2 = long) */
1947 FALSE
, /* pc_relative */
1949 complain_overflow_dont
,/* complain_on_overflow */
1950 bfd_elf_generic_reloc
, /* special_function */
1951 "R_ARM_RBASE", /* name */
1952 FALSE
, /* partial_inplace */
1955 FALSE
) /* pcrel_offset */
1958 static reloc_howto_type
*
1959 elf32_arm_howto_from_type (unsigned int r_type
)
1961 if (r_type
< ARRAY_SIZE (elf32_arm_howto_table_1
))
1962 return &elf32_arm_howto_table_1
[r_type
];
1964 if (r_type
>= R_ARM_IRELATIVE
1965 && r_type
< R_ARM_IRELATIVE
+ ARRAY_SIZE (elf32_arm_howto_table_2
))
1966 return &elf32_arm_howto_table_2
[r_type
- R_ARM_IRELATIVE
];
1968 if (r_type
>= R_ARM_RREL32
1969 && r_type
< R_ARM_RREL32
+ ARRAY_SIZE (elf32_arm_howto_table_3
))
1970 return &elf32_arm_howto_table_3
[r_type
- R_ARM_RREL32
];
1976 elf32_arm_info_to_howto (bfd
* abfd
, arelent
* bfd_reloc
,
1977 Elf_Internal_Rela
* elf_reloc
)
1979 unsigned int r_type
;
1981 r_type
= ELF32_R_TYPE (elf_reloc
->r_info
);
1982 if ((bfd_reloc
->howto
= elf32_arm_howto_from_type (r_type
)) == NULL
)
1984 /* xgettext:c-format */
1985 _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
1987 bfd_set_error (bfd_error_bad_value
);
1993 struct elf32_arm_reloc_map
1995 bfd_reloc_code_real_type bfd_reloc_val
;
1996 unsigned char elf_reloc_val
;
1999 /* All entries in this list must also be present in elf32_arm_howto_table. */
2000 static const struct elf32_arm_reloc_map elf32_arm_reloc_map
[] =
2002 {BFD_RELOC_NONE
, R_ARM_NONE
},
2003 {BFD_RELOC_ARM_PCREL_BRANCH
, R_ARM_PC24
},
2004 {BFD_RELOC_ARM_PCREL_CALL
, R_ARM_CALL
},
2005 {BFD_RELOC_ARM_PCREL_JUMP
, R_ARM_JUMP24
},
2006 {BFD_RELOC_ARM_PCREL_BLX
, R_ARM_XPC25
},
2007 {BFD_RELOC_THUMB_PCREL_BLX
, R_ARM_THM_XPC22
},
2008 {BFD_RELOC_32
, R_ARM_ABS32
},
2009 {BFD_RELOC_32_PCREL
, R_ARM_REL32
},
2010 {BFD_RELOC_8
, R_ARM_ABS8
},
2011 {BFD_RELOC_16
, R_ARM_ABS16
},
2012 {BFD_RELOC_ARM_OFFSET_IMM
, R_ARM_ABS12
},
2013 {BFD_RELOC_ARM_THUMB_OFFSET
, R_ARM_THM_ABS5
},
2014 {BFD_RELOC_THUMB_PCREL_BRANCH25
, R_ARM_THM_JUMP24
},
2015 {BFD_RELOC_THUMB_PCREL_BRANCH23
, R_ARM_THM_CALL
},
2016 {BFD_RELOC_THUMB_PCREL_BRANCH12
, R_ARM_THM_JUMP11
},
2017 {BFD_RELOC_THUMB_PCREL_BRANCH20
, R_ARM_THM_JUMP19
},
2018 {BFD_RELOC_THUMB_PCREL_BRANCH9
, R_ARM_THM_JUMP8
},
2019 {BFD_RELOC_THUMB_PCREL_BRANCH7
, R_ARM_THM_JUMP6
},
2020 {BFD_RELOC_ARM_GLOB_DAT
, R_ARM_GLOB_DAT
},
2021 {BFD_RELOC_ARM_JUMP_SLOT
, R_ARM_JUMP_SLOT
},
2022 {BFD_RELOC_ARM_RELATIVE
, R_ARM_RELATIVE
},
2023 {BFD_RELOC_ARM_GOTOFF
, R_ARM_GOTOFF32
},
2024 {BFD_RELOC_ARM_GOTPC
, R_ARM_GOTPC
},
2025 {BFD_RELOC_ARM_GOT_PREL
, R_ARM_GOT_PREL
},
2026 {BFD_RELOC_ARM_GOT32
, R_ARM_GOT32
},
2027 {BFD_RELOC_ARM_PLT32
, R_ARM_PLT32
},
2028 {BFD_RELOC_ARM_TARGET1
, R_ARM_TARGET1
},
2029 {BFD_RELOC_ARM_ROSEGREL32
, R_ARM_ROSEGREL32
},
2030 {BFD_RELOC_ARM_SBREL32
, R_ARM_SBREL32
},
2031 {BFD_RELOC_ARM_PREL31
, R_ARM_PREL31
},
2032 {BFD_RELOC_ARM_TARGET2
, R_ARM_TARGET2
},
2033 {BFD_RELOC_ARM_PLT32
, R_ARM_PLT32
},
2034 {BFD_RELOC_ARM_TLS_GOTDESC
, R_ARM_TLS_GOTDESC
},
2035 {BFD_RELOC_ARM_TLS_CALL
, R_ARM_TLS_CALL
},
2036 {BFD_RELOC_ARM_THM_TLS_CALL
, R_ARM_THM_TLS_CALL
},
2037 {BFD_RELOC_ARM_TLS_DESCSEQ
, R_ARM_TLS_DESCSEQ
},
2038 {BFD_RELOC_ARM_THM_TLS_DESCSEQ
, R_ARM_THM_TLS_DESCSEQ
},
2039 {BFD_RELOC_ARM_TLS_DESC
, R_ARM_TLS_DESC
},
2040 {BFD_RELOC_ARM_TLS_GD32
, R_ARM_TLS_GD32
},
2041 {BFD_RELOC_ARM_TLS_LDO32
, R_ARM_TLS_LDO32
},
2042 {BFD_RELOC_ARM_TLS_LDM32
, R_ARM_TLS_LDM32
},
2043 {BFD_RELOC_ARM_TLS_DTPMOD32
, R_ARM_TLS_DTPMOD32
},
2044 {BFD_RELOC_ARM_TLS_DTPOFF32
, R_ARM_TLS_DTPOFF32
},
2045 {BFD_RELOC_ARM_TLS_TPOFF32
, R_ARM_TLS_TPOFF32
},
2046 {BFD_RELOC_ARM_TLS_IE32
, R_ARM_TLS_IE32
},
2047 {BFD_RELOC_ARM_TLS_LE32
, R_ARM_TLS_LE32
},
2048 {BFD_RELOC_ARM_IRELATIVE
, R_ARM_IRELATIVE
},
2049 {BFD_RELOC_ARM_GOTFUNCDESC
, R_ARM_GOTFUNCDESC
},
2050 {BFD_RELOC_ARM_GOTOFFFUNCDESC
, R_ARM_GOTOFFFUNCDESC
},
2051 {BFD_RELOC_ARM_FUNCDESC
, R_ARM_FUNCDESC
},
2052 {BFD_RELOC_ARM_FUNCDESC_VALUE
, R_ARM_FUNCDESC_VALUE
},
2053 {BFD_RELOC_ARM_TLS_GD32_FDPIC
, R_ARM_TLS_GD32_FDPIC
},
2054 {BFD_RELOC_ARM_TLS_LDM32_FDPIC
, R_ARM_TLS_LDM32_FDPIC
},
2055 {BFD_RELOC_ARM_TLS_IE32_FDPIC
, R_ARM_TLS_IE32_FDPIC
},
2056 {BFD_RELOC_VTABLE_INHERIT
, R_ARM_GNU_VTINHERIT
},
2057 {BFD_RELOC_VTABLE_ENTRY
, R_ARM_GNU_VTENTRY
},
2058 {BFD_RELOC_ARM_MOVW
, R_ARM_MOVW_ABS_NC
},
2059 {BFD_RELOC_ARM_MOVT
, R_ARM_MOVT_ABS
},
2060 {BFD_RELOC_ARM_MOVW_PCREL
, R_ARM_MOVW_PREL_NC
},
2061 {BFD_RELOC_ARM_MOVT_PCREL
, R_ARM_MOVT_PREL
},
2062 {BFD_RELOC_ARM_THUMB_MOVW
, R_ARM_THM_MOVW_ABS_NC
},
2063 {BFD_RELOC_ARM_THUMB_MOVT
, R_ARM_THM_MOVT_ABS
},
2064 {BFD_RELOC_ARM_THUMB_MOVW_PCREL
, R_ARM_THM_MOVW_PREL_NC
},
2065 {BFD_RELOC_ARM_THUMB_MOVT_PCREL
, R_ARM_THM_MOVT_PREL
},
2066 {BFD_RELOC_ARM_ALU_PC_G0_NC
, R_ARM_ALU_PC_G0_NC
},
2067 {BFD_RELOC_ARM_ALU_PC_G0
, R_ARM_ALU_PC_G0
},
2068 {BFD_RELOC_ARM_ALU_PC_G1_NC
, R_ARM_ALU_PC_G1_NC
},
2069 {BFD_RELOC_ARM_ALU_PC_G1
, R_ARM_ALU_PC_G1
},
2070 {BFD_RELOC_ARM_ALU_PC_G2
, R_ARM_ALU_PC_G2
},
2071 {BFD_RELOC_ARM_LDR_PC_G0
, R_ARM_LDR_PC_G0
},
2072 {BFD_RELOC_ARM_LDR_PC_G1
, R_ARM_LDR_PC_G1
},
2073 {BFD_RELOC_ARM_LDR_PC_G2
, R_ARM_LDR_PC_G2
},
2074 {BFD_RELOC_ARM_LDRS_PC_G0
, R_ARM_LDRS_PC_G0
},
2075 {BFD_RELOC_ARM_LDRS_PC_G1
, R_ARM_LDRS_PC_G1
},
2076 {BFD_RELOC_ARM_LDRS_PC_G2
, R_ARM_LDRS_PC_G2
},
2077 {BFD_RELOC_ARM_LDC_PC_G0
, R_ARM_LDC_PC_G0
},
2078 {BFD_RELOC_ARM_LDC_PC_G1
, R_ARM_LDC_PC_G1
},
2079 {BFD_RELOC_ARM_LDC_PC_G2
, R_ARM_LDC_PC_G2
},
2080 {BFD_RELOC_ARM_ALU_SB_G0_NC
, R_ARM_ALU_SB_G0_NC
},
2081 {BFD_RELOC_ARM_ALU_SB_G0
, R_ARM_ALU_SB_G0
},
2082 {BFD_RELOC_ARM_ALU_SB_G1_NC
, R_ARM_ALU_SB_G1_NC
},
2083 {BFD_RELOC_ARM_ALU_SB_G1
, R_ARM_ALU_SB_G1
},
2084 {BFD_RELOC_ARM_ALU_SB_G2
, R_ARM_ALU_SB_G2
},
2085 {BFD_RELOC_ARM_LDR_SB_G0
, R_ARM_LDR_SB_G0
},
2086 {BFD_RELOC_ARM_LDR_SB_G1
, R_ARM_LDR_SB_G1
},
2087 {BFD_RELOC_ARM_LDR_SB_G2
, R_ARM_LDR_SB_G2
},
2088 {BFD_RELOC_ARM_LDRS_SB_G0
, R_ARM_LDRS_SB_G0
},
2089 {BFD_RELOC_ARM_LDRS_SB_G1
, R_ARM_LDRS_SB_G1
},
2090 {BFD_RELOC_ARM_LDRS_SB_G2
, R_ARM_LDRS_SB_G2
},
2091 {BFD_RELOC_ARM_LDC_SB_G0
, R_ARM_LDC_SB_G0
},
2092 {BFD_RELOC_ARM_LDC_SB_G1
, R_ARM_LDC_SB_G1
},
2093 {BFD_RELOC_ARM_LDC_SB_G2
, R_ARM_LDC_SB_G2
},
2094 {BFD_RELOC_ARM_V4BX
, R_ARM_V4BX
},
2095 {BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
, R_ARM_THM_ALU_ABS_G3_NC
},
2096 {BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
, R_ARM_THM_ALU_ABS_G2_NC
},
2097 {BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
, R_ARM_THM_ALU_ABS_G1_NC
},
2098 {BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
, R_ARM_THM_ALU_ABS_G0_NC
},
2099 {BFD_RELOC_ARM_THUMB_BF17
, R_ARM_THM_BF16
},
2100 {BFD_RELOC_ARM_THUMB_BF13
, R_ARM_THM_BF12
},
2101 {BFD_RELOC_ARM_THUMB_BF19
, R_ARM_THM_BF18
}
2104 static reloc_howto_type
*
2105 elf32_arm_reloc_type_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
2106 bfd_reloc_code_real_type code
)
2110 for (i
= 0; i
< ARRAY_SIZE (elf32_arm_reloc_map
); i
++)
2111 if (elf32_arm_reloc_map
[i
].bfd_reloc_val
== code
)
2112 return elf32_arm_howto_from_type (elf32_arm_reloc_map
[i
].elf_reloc_val
);
2117 static reloc_howto_type
*
2118 elf32_arm_reloc_name_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
2123 for (i
= 0; i
< ARRAY_SIZE (elf32_arm_howto_table_1
); i
++)
2124 if (elf32_arm_howto_table_1
[i
].name
!= NULL
2125 && strcasecmp (elf32_arm_howto_table_1
[i
].name
, r_name
) == 0)
2126 return &elf32_arm_howto_table_1
[i
];
2128 for (i
= 0; i
< ARRAY_SIZE (elf32_arm_howto_table_2
); i
++)
2129 if (elf32_arm_howto_table_2
[i
].name
!= NULL
2130 && strcasecmp (elf32_arm_howto_table_2
[i
].name
, r_name
) == 0)
2131 return &elf32_arm_howto_table_2
[i
];
2133 for (i
= 0; i
< ARRAY_SIZE (elf32_arm_howto_table_3
); i
++)
2134 if (elf32_arm_howto_table_3
[i
].name
!= NULL
2135 && strcasecmp (elf32_arm_howto_table_3
[i
].name
, r_name
) == 0)
2136 return &elf32_arm_howto_table_3
[i
];
2141 /* Support for core dump NOTE sections. */
2144 elf32_arm_nabi_grok_prstatus (bfd
*abfd
, Elf_Internal_Note
*note
)
2149 switch (note
->descsz
)
2154 case 148: /* Linux/ARM 32-bit. */
2156 elf_tdata (abfd
)->core
->signal
= bfd_get_16 (abfd
, note
->descdata
+ 12);
2159 elf_tdata (abfd
)->core
->lwpid
= bfd_get_32 (abfd
, note
->descdata
+ 24);
2168 /* Make a ".reg/999" section. */
2169 return _bfd_elfcore_make_pseudosection (abfd
, ".reg",
2170 size
, note
->descpos
+ offset
);
2174 elf32_arm_nabi_grok_psinfo (bfd
*abfd
, Elf_Internal_Note
*note
)
2176 switch (note
->descsz
)
2181 case 124: /* Linux/ARM elf_prpsinfo. */
2182 elf_tdata (abfd
)->core
->pid
2183 = bfd_get_32 (abfd
, note
->descdata
+ 12);
2184 elf_tdata (abfd
)->core
->program
2185 = _bfd_elfcore_strndup (abfd
, note
->descdata
+ 28, 16);
2186 elf_tdata (abfd
)->core
->command
2187 = _bfd_elfcore_strndup (abfd
, note
->descdata
+ 44, 80);
2190 /* Note that for some reason, a spurious space is tacked
2191 onto the end of the args in some (at least one anyway)
2192 implementations, so strip it off if it exists. */
2194 char *command
= elf_tdata (abfd
)->core
->command
;
2195 int n
= strlen (command
);
2197 if (0 < n
&& command
[n
- 1] == ' ')
2198 command
[n
- 1] = '\0';
2205 elf32_arm_nabi_write_core_note (bfd
*abfd
, char *buf
, int *bufsiz
,
2215 char data
[124] ATTRIBUTE_NONSTRING
;
2218 va_start (ap
, note_type
);
2219 memset (data
, 0, sizeof (data
));
2220 strncpy (data
+ 28, va_arg (ap
, const char *), 16);
2221 #if GCC_VERSION == 8000 || GCC_VERSION == 8001
2223 /* GCC 8.0 and 8.1 warn about 80 equals destination size with
2224 -Wstringop-truncation:
2225 https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85643
2227 DIAGNOSTIC_IGNORE_STRINGOP_TRUNCATION
;
2229 strncpy (data
+ 44, va_arg (ap
, const char *), 80);
2230 #if GCC_VERSION == 8000 || GCC_VERSION == 8001
2235 return elfcore_write_note (abfd
, buf
, bufsiz
,
2236 "CORE", note_type
, data
, sizeof (data
));
2247 va_start (ap
, note_type
);
2248 memset (data
, 0, sizeof (data
));
2249 pid
= va_arg (ap
, long);
2250 bfd_put_32 (abfd
, pid
, data
+ 24);
2251 cursig
= va_arg (ap
, int);
2252 bfd_put_16 (abfd
, cursig
, data
+ 12);
2253 greg
= va_arg (ap
, const void *);
2254 memcpy (data
+ 72, greg
, 72);
2257 return elfcore_write_note (abfd
, buf
, bufsiz
,
2258 "CORE", note_type
, data
, sizeof (data
));
2263 #define TARGET_LITTLE_SYM arm_elf32_le_vec
2264 #define TARGET_LITTLE_NAME "elf32-littlearm"
2265 #define TARGET_BIG_SYM arm_elf32_be_vec
2266 #define TARGET_BIG_NAME "elf32-bigarm"
2268 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
2269 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
2270 #define elf_backend_write_core_note elf32_arm_nabi_write_core_note
2272 typedef unsigned long int insn32
;
2273 typedef unsigned short int insn16
;
2275 /* In lieu of proper flags, assume all EABIv4 or later objects are
2277 #define INTERWORK_FLAG(abfd) \
2278 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
2279 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
2280 || ((abfd)->flags & BFD_LINKER_CREATED))
2282 /* The linker script knows the section names for placement.
2283 The entry_names are used to do simple name mangling on the stubs.
2284 Given a function name, and its type, the stub can be found. The
2285 name can be changed. The only requirement is the %s be present. */
2286 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
2287 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
2289 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
2290 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
2292 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
2293 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
2295 #define STM32L4XX_ERRATUM_VENEER_SECTION_NAME ".text.stm32l4xx_veneer"
2296 #define STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "__stm32l4xx_veneer_%x"
2298 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
2299 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
2301 #define STUB_ENTRY_NAME "__%s_veneer"
2303 #define CMSE_PREFIX "__acle_se_"
2305 #define CMSE_STUB_NAME ".gnu.sgstubs"
2307 /* The name of the dynamic interpreter. This is put in the .interp
2309 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
2311 /* FDPIC default stack size. */
2312 #define DEFAULT_STACK_SIZE 0x8000
2314 static const unsigned long tls_trampoline
[] =
2316 0xe08e0000, /* add r0, lr, r0 */
2317 0xe5901004, /* ldr r1, [r0,#4] */
2318 0xe12fff11, /* bx r1 */
2321 static const unsigned long dl_tlsdesc_lazy_trampoline
[] =
2323 0xe52d2004, /* push {r2} */
2324 0xe59f200c, /* ldr r2, [pc, #3f - . - 8] */
2325 0xe59f100c, /* ldr r1, [pc, #4f - . - 8] */
2326 0xe79f2002, /* 1: ldr r2, [pc, r2] */
2327 0xe081100f, /* 2: add r1, pc */
2328 0xe12fff12, /* bx r2 */
2329 0x00000014, /* 3: .word _GLOBAL_OFFSET_TABLE_ - 1b - 8
2330 + dl_tlsdesc_lazy_resolver(GOT) */
2331 0x00000018, /* 4: .word _GLOBAL_OFFSET_TABLE_ - 2b - 8 */
2334 /* NOTE: [Thumb nop sequence]
2335 When adding code that transitions from Thumb to Arm the instruction that
2336 should be used for the alignment padding should be 0xe7fd (b .-2) instead of
2337 a nop for performance reasons. */
2339 /* ARM FDPIC PLT entry. */
2340 /* The last 5 words contain PLT lazy fragment code and data. */
2341 static const bfd_vma elf32_arm_fdpic_plt_entry
[] =
2343 0xe59fc008, /* ldr r12, .L1 */
2344 0xe08cc009, /* add r12, r12, r9 */
2345 0xe59c9004, /* ldr r9, [r12, #4] */
2346 0xe59cf000, /* ldr pc, [r12] */
2347 0x00000000, /* L1. .word foo(GOTOFFFUNCDESC) */
2348 0x00000000, /* L1. .word foo(funcdesc_value_reloc_offset) */
2349 0xe51fc00c, /* ldr r12, [pc, #-12] */
2350 0xe92d1000, /* push {r12} */
2351 0xe599c004, /* ldr r12, [r9, #4] */
2352 0xe599f000, /* ldr pc, [r9] */
2355 /* Thumb FDPIC PLT entry. */
2356 /* The last 5 words contain PLT lazy fragment code and data. */
2357 static const bfd_vma elf32_arm_fdpic_thumb_plt_entry
[] =
2359 0xc00cf8df, /* ldr.w r12, .L1 */
2360 0x0c09eb0c, /* add.w r12, r12, r9 */
2361 0x9004f8dc, /* ldr.w r9, [r12, #4] */
2362 0xf000f8dc, /* ldr.w pc, [r12] */
2363 0x00000000, /* .L1 .word foo(GOTOFFFUNCDESC) */
2364 0x00000000, /* .L2 .word foo(funcdesc_value_reloc_offset) */
2365 0xc008f85f, /* ldr.w r12, .L2 */
2366 0xcd04f84d, /* push {r12} */
2367 0xc004f8d9, /* ldr.w r12, [r9, #4] */
2368 0xf000f8d9, /* ldr.w pc, [r9] */
2371 #ifdef FOUR_WORD_PLT
2373 /* The first entry in a procedure linkage table looks like
2374 this. It is set up so that any shared library function that is
2375 called before the relocation has been set up calls the dynamic
2377 static const bfd_vma elf32_arm_plt0_entry
[] =
2379 0xe52de004, /* str lr, [sp, #-4]! */
2380 0xe59fe010, /* ldr lr, [pc, #16] */
2381 0xe08fe00e, /* add lr, pc, lr */
2382 0xe5bef008, /* ldr pc, [lr, #8]! */
2385 /* Subsequent entries in a procedure linkage table look like
2387 static const bfd_vma elf32_arm_plt_entry
[] =
2389 0xe28fc600, /* add ip, pc, #NN */
2390 0xe28cca00, /* add ip, ip, #NN */
2391 0xe5bcf000, /* ldr pc, [ip, #NN]! */
2392 0x00000000, /* unused */
2395 #else /* not FOUR_WORD_PLT */
2397 /* The first entry in a procedure linkage table looks like
2398 this. It is set up so that any shared library function that is
2399 called before the relocation has been set up calls the dynamic
2401 static const bfd_vma elf32_arm_plt0_entry
[] =
2403 0xe52de004, /* str lr, [sp, #-4]! */
2404 0xe59fe004, /* ldr lr, [pc, #4] */
2405 0xe08fe00e, /* add lr, pc, lr */
2406 0xe5bef008, /* ldr pc, [lr, #8]! */
2407 0x00000000, /* &GOT[0] - . */
2410 /* By default subsequent entries in a procedure linkage table look like
2411 this. Offsets that don't fit into 28 bits will cause link error. */
2412 static const bfd_vma elf32_arm_plt_entry_short
[] =
2414 0xe28fc600, /* add ip, pc, #0xNN00000 */
2415 0xe28cca00, /* add ip, ip, #0xNN000 */
2416 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2419 /* When explicitly asked, we'll use this "long" entry format
2420 which can cope with arbitrary displacements. */
2421 static const bfd_vma elf32_arm_plt_entry_long
[] =
2423 0xe28fc200, /* add ip, pc, #0xN0000000 */
2424 0xe28cc600, /* add ip, ip, #0xNN00000 */
2425 0xe28cca00, /* add ip, ip, #0xNN000 */
2426 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2429 static bfd_boolean elf32_arm_use_long_plt_entry
= FALSE
;
2431 #endif /* not FOUR_WORD_PLT */
2433 /* The first entry in a procedure linkage table looks like this.
2434 It is set up so that any shared library function that is called before the
2435 relocation has been set up calls the dynamic linker first. */
2436 static const bfd_vma elf32_thumb2_plt0_entry
[] =
2438 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2439 an instruction maybe encoded to one or two array elements. */
2440 0xf8dfb500, /* push {lr} */
2441 0x44fee008, /* ldr.w lr, [pc, #8] */
2443 0xff08f85e, /* ldr.w pc, [lr, #8]! */
2444 0x00000000, /* &GOT[0] - . */
2447 /* Subsequent entries in a procedure linkage table for thumb only target
2449 static const bfd_vma elf32_thumb2_plt_entry
[] =
2451 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2452 an instruction maybe encoded to one or two array elements. */
2453 0x0c00f240, /* movw ip, #0xNNNN */
2454 0x0c00f2c0, /* movt ip, #0xNNNN */
2455 0xf8dc44fc, /* add ip, pc */
2456 0xe7fdf000 /* ldr.w pc, [ip] */
2460 /* The format of the first entry in the procedure linkage table
2461 for a VxWorks executable. */
2462 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry
[] =
2464 0xe52dc008, /* str ip,[sp,#-8]! */
2465 0xe59fc000, /* ldr ip,[pc] */
2466 0xe59cf008, /* ldr pc,[ip,#8] */
2467 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
2470 /* The format of subsequent entries in a VxWorks executable. */
2471 static const bfd_vma elf32_arm_vxworks_exec_plt_entry
[] =
2473 0xe59fc000, /* ldr ip,[pc] */
2474 0xe59cf000, /* ldr pc,[ip] */
2475 0x00000000, /* .long @got */
2476 0xe59fc000, /* ldr ip,[pc] */
2477 0xea000000, /* b _PLT */
2478 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2481 /* The format of entries in a VxWorks shared library. */
2482 static const bfd_vma elf32_arm_vxworks_shared_plt_entry
[] =
2484 0xe59fc000, /* ldr ip,[pc] */
2485 0xe79cf009, /* ldr pc,[ip,r9] */
2486 0x00000000, /* .long @got */
2487 0xe59fc000, /* ldr ip,[pc] */
2488 0xe599f008, /* ldr pc,[r9,#8] */
2489 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2492 /* An initial stub used if the PLT entry is referenced from Thumb code. */
2493 #define PLT_THUMB_STUB_SIZE 4
2494 static const bfd_vma elf32_arm_plt_thumb_stub
[] =
2500 /* The entries in a PLT when using a DLL-based target with multiple
2502 static const bfd_vma elf32_arm_symbian_plt_entry
[] =
2504 0xe51ff004, /* ldr pc, [pc, #-4] */
2505 0x00000000, /* dcd R_ARM_GLOB_DAT(X) */
2508 /* The first entry in a procedure linkage table looks like
2509 this. It is set up so that any shared library function that is
2510 called before the relocation has been set up calls the dynamic
2512 static const bfd_vma elf32_arm_nacl_plt0_entry
[] =
2515 0xe300c000, /* movw ip, #:lower16:&GOT[2]-.+8 */
2516 0xe340c000, /* movt ip, #:upper16:&GOT[2]-.+8 */
2517 0xe08cc00f, /* add ip, ip, pc */
2518 0xe52dc008, /* str ip, [sp, #-8]! */
2519 /* Second bundle: */
2520 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2521 0xe59cc000, /* ldr ip, [ip] */
2522 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2523 0xe12fff1c, /* bx ip */
2525 0xe320f000, /* nop */
2526 0xe320f000, /* nop */
2527 0xe320f000, /* nop */
2529 0xe50dc004, /* str ip, [sp, #-4] */
2530 /* Fourth bundle: */
2531 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2532 0xe59cc000, /* ldr ip, [ip] */
2533 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2534 0xe12fff1c, /* bx ip */
2536 #define ARM_NACL_PLT_TAIL_OFFSET (11 * 4)
2538 /* Subsequent entries in a procedure linkage table look like this. */
2539 static const bfd_vma elf32_arm_nacl_plt_entry
[] =
2541 0xe300c000, /* movw ip, #:lower16:&GOT[n]-.+8 */
2542 0xe340c000, /* movt ip, #:upper16:&GOT[n]-.+8 */
2543 0xe08cc00f, /* add ip, ip, pc */
2544 0xea000000, /* b .Lplt_tail */
2547 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2548 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2549 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2550 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2551 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2552 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2553 #define THM2_MAX_FWD_COND_BRANCH_OFFSET (((1 << 20) -2) + 4)
2554 #define THM2_MAX_BWD_COND_BRANCH_OFFSET (-(1 << 20) + 4)
2564 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2565 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2566 is inserted in arm_build_one_stub(). */
2567 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2568 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2569 #define THUMB32_MOVT(X) {(X), THUMB32_TYPE, R_ARM_THM_MOVT_ABS, 0}
2570 #define THUMB32_MOVW(X) {(X), THUMB32_TYPE, R_ARM_THM_MOVW_ABS_NC, 0}
2571 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2572 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2573 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2574 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2579 enum stub_insn_type type
;
2580 unsigned int r_type
;
2584 /* See note [Thumb nop sequence] when adding a veneer. */
2586 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2587 to reach the stub if necessary. */
2588 static const insn_sequence elf32_arm_stub_long_branch_any_any
[] =
2590 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2591 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(X) */
2594 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2596 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb
[] =
2598 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2599 ARM_INSN (0xe12fff1c), /* bx ip */
2600 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(X) */
2603 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2604 static const insn_sequence elf32_arm_stub_long_branch_thumb_only
[] =
2606 THUMB16_INSN (0xb401), /* push {r0} */
2607 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2608 THUMB16_INSN (0x4684), /* mov ip, r0 */
2609 THUMB16_INSN (0xbc01), /* pop {r0} */
2610 THUMB16_INSN (0x4760), /* bx ip */
2611 THUMB16_INSN (0xbf00), /* nop */
2612 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(X) */
2615 /* Thumb -> Thumb long branch stub in thumb2 encoding. Used on armv7. */
2616 static const insn_sequence elf32_arm_stub_long_branch_thumb2_only
[] =
2618 THUMB32_INSN (0xf85ff000), /* ldr.w pc, [pc, #-0] */
2619 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(x) */
2622 /* Thumb -> Thumb long branch stub. Used for PureCode sections on Thumb2
2623 M-profile architectures. */
2624 static const insn_sequence elf32_arm_stub_long_branch_thumb2_only_pure
[] =
2626 THUMB32_MOVW (0xf2400c00), /* mov.w ip, R_ARM_MOVW_ABS_NC */
2627 THUMB32_MOVT (0xf2c00c00), /* movt ip, R_ARM_MOVT_ABS << 16 */
2628 THUMB16_INSN (0x4760), /* bx ip */
2631 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2633 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb
[] =
2635 THUMB16_INSN (0x4778), /* bx pc */
2636 THUMB16_INSN (0xe7fd), /* b .-2 */
2637 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2638 ARM_INSN (0xe12fff1c), /* bx ip */
2639 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(X) */
2642 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2644 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm
[] =
2646 THUMB16_INSN (0x4778), /* bx pc */
2647 THUMB16_INSN (0xe7fd), /* b .-2 */
2648 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2649 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(X) */
2652 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2653 one, when the destination is close enough. */
2654 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm
[] =
2656 THUMB16_INSN (0x4778), /* bx pc */
2657 THUMB16_INSN (0xe7fd), /* b .-2 */
2658 ARM_REL_INSN (0xea000000, -8), /* b (X-8) */
2661 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2662 blx to reach the stub if necessary. */
2663 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic
[] =
2665 ARM_INSN (0xe59fc000), /* ldr ip, [pc] */
2666 ARM_INSN (0xe08ff00c), /* add pc, pc, ip */
2667 DATA_WORD (0, R_ARM_REL32
, -4), /* dcd R_ARM_REL32(X-4) */
2670 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2671 blx to reach the stub if necessary. We can not add into pc;
2672 it is not guaranteed to mode switch (different in ARMv6 and
2674 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic
[] =
2676 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2677 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2678 ARM_INSN (0xe12fff1c), /* bx ip */
2679 DATA_WORD (0, R_ARM_REL32
, 0), /* dcd R_ARM_REL32(X) */
2682 /* V4T ARM -> ARM long branch stub, PIC. */
2683 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic
[] =
2685 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2686 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2687 ARM_INSN (0xe12fff1c), /* bx ip */
2688 DATA_WORD (0, R_ARM_REL32
, 0), /* dcd R_ARM_REL32(X) */
2691 /* V4T Thumb -> ARM long branch stub, PIC. */
2692 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic
[] =
2694 THUMB16_INSN (0x4778), /* bx pc */
2695 THUMB16_INSN (0xe7fd), /* b .-2 */
2696 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2697 ARM_INSN (0xe08cf00f), /* add pc, ip, pc */
2698 DATA_WORD (0, R_ARM_REL32
, -4), /* dcd R_ARM_REL32(X) */
2701 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2703 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic
[] =
2705 THUMB16_INSN (0xb401), /* push {r0} */
2706 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2707 THUMB16_INSN (0x46fc), /* mov ip, pc */
2708 THUMB16_INSN (0x4484), /* add ip, r0 */
2709 THUMB16_INSN (0xbc01), /* pop {r0} */
2710 THUMB16_INSN (0x4760), /* bx ip */
2711 DATA_WORD (0, R_ARM_REL32
, 4), /* dcd R_ARM_REL32(X) */
2714 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2716 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic
[] =
2718 THUMB16_INSN (0x4778), /* bx pc */
2719 THUMB16_INSN (0xe7fd), /* b .-2 */
2720 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2721 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2722 ARM_INSN (0xe12fff1c), /* bx ip */
2723 DATA_WORD (0, R_ARM_REL32
, 0), /* dcd R_ARM_REL32(X) */
2726 /* Thumb2/ARM -> TLS trampoline. Lowest common denominator, which is a
2727 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2728 static const insn_sequence elf32_arm_stub_long_branch_any_tls_pic
[] =
2730 ARM_INSN (0xe59f1000), /* ldr r1, [pc] */
2731 ARM_INSN (0xe08ff001), /* add pc, pc, r1 */
2732 DATA_WORD (0, R_ARM_REL32
, -4), /* dcd R_ARM_REL32(X-4) */
2735 /* V4T Thumb -> TLS trampoline. lowest common denominator, which is a
2736 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2737 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_tls_pic
[] =
2739 THUMB16_INSN (0x4778), /* bx pc */
2740 THUMB16_INSN (0xe7fd), /* b .-2 */
2741 ARM_INSN (0xe59f1000), /* ldr r1, [pc, #0] */
2742 ARM_INSN (0xe081f00f), /* add pc, r1, pc */
2743 DATA_WORD (0, R_ARM_REL32
, -4), /* dcd R_ARM_REL32(X) */
2746 /* NaCl ARM -> ARM long branch stub. */
2747 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl
[] =
2749 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2750 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2751 ARM_INSN (0xe12fff1c), /* bx ip */
2752 ARM_INSN (0xe320f000), /* nop */
2753 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2754 DATA_WORD (0, R_ARM_ABS32
, 0), /* dcd R_ARM_ABS32(X) */
2755 DATA_WORD (0, R_ARM_NONE
, 0), /* .word 0 */
2756 DATA_WORD (0, R_ARM_NONE
, 0), /* .word 0 */
2759 /* NaCl ARM -> ARM long branch stub, PIC. */
2760 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl_pic
[] =
2762 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2763 ARM_INSN (0xe08cc00f), /* add ip, ip, pc */
2764 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2765 ARM_INSN (0xe12fff1c), /* bx ip */
2766 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2767 DATA_WORD (0, R_ARM_REL32
, 8), /* dcd R_ARM_REL32(X+8) */
2768 DATA_WORD (0, R_ARM_NONE
, 0), /* .word 0 */
2769 DATA_WORD (0, R_ARM_NONE
, 0), /* .word 0 */
2772 /* Stub used for transition to secure state (aka SG veneer). */
2773 static const insn_sequence elf32_arm_stub_cmse_branch_thumb_only
[] =
2775 THUMB32_INSN (0xe97fe97f), /* sg. */
2776 THUMB32_B_INSN (0xf000b800, -4), /* b.w original_branch_dest. */
2780 /* Cortex-A8 erratum-workaround stubs. */
2782 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2783 can't use a conditional branch to reach this stub). */
2785 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond
[] =
2787 THUMB16_BCOND_INSN (0xd001), /* b<cond>.n true. */
2788 THUMB32_B_INSN (0xf000b800, -4), /* b.w insn_after_original_branch. */
2789 THUMB32_B_INSN (0xf000b800, -4) /* true: b.w original_branch_dest. */
2792 /* Stub used for b.w and bl.w instructions. */
2794 static const insn_sequence elf32_arm_stub_a8_veneer_b
[] =
2796 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2799 static const insn_sequence elf32_arm_stub_a8_veneer_bl
[] =
2801 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2804 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2805 instruction (which switches to ARM mode) to point to this stub. Jump to the
2806 real destination using an ARM-mode branch. */
2808 static const insn_sequence elf32_arm_stub_a8_veneer_blx
[] =
2810 ARM_REL_INSN (0xea000000, -8) /* b original_branch_dest. */
2813 /* For each section group there can be a specially created linker section
2814 to hold the stubs for that group. The name of the stub section is based
2815 upon the name of another section within that group with the suffix below
2818 PR 13049: STUB_SUFFIX used to be ".stub", but this allowed the user to
2819 create what appeared to be a linker stub section when it actually
2820 contained user code/data. For example, consider this fragment:
2822 const char * stubborn_problems[] = { "np" };
2824 If this is compiled with "-fPIC -fdata-sections" then gcc produces a
2827 .data.rel.local.stubborn_problems
2829 This then causes problems in arm32_arm_build_stubs() as it triggers:
2831 // Ignore non-stub sections.
2832 if (!strstr (stub_sec->name, STUB_SUFFIX))
2835 And so the section would be ignored instead of being processed. Hence
2836 the change in definition of STUB_SUFFIX to a name that cannot be a valid
2838 #define STUB_SUFFIX ".__stub"
2840 /* One entry per long/short branch stub defined above. */
2842 DEF_STUB(long_branch_any_any) \
2843 DEF_STUB(long_branch_v4t_arm_thumb) \
2844 DEF_STUB(long_branch_thumb_only) \
2845 DEF_STUB(long_branch_v4t_thumb_thumb) \
2846 DEF_STUB(long_branch_v4t_thumb_arm) \
2847 DEF_STUB(short_branch_v4t_thumb_arm) \
2848 DEF_STUB(long_branch_any_arm_pic) \
2849 DEF_STUB(long_branch_any_thumb_pic) \
2850 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2851 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2852 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2853 DEF_STUB(long_branch_thumb_only_pic) \
2854 DEF_STUB(long_branch_any_tls_pic) \
2855 DEF_STUB(long_branch_v4t_thumb_tls_pic) \
2856 DEF_STUB(long_branch_arm_nacl) \
2857 DEF_STUB(long_branch_arm_nacl_pic) \
2858 DEF_STUB(cmse_branch_thumb_only) \
2859 DEF_STUB(a8_veneer_b_cond) \
2860 DEF_STUB(a8_veneer_b) \
2861 DEF_STUB(a8_veneer_bl) \
2862 DEF_STUB(a8_veneer_blx) \
2863 DEF_STUB(long_branch_thumb2_only) \
2864 DEF_STUB(long_branch_thumb2_only_pure)
2866 #define DEF_STUB(x) arm_stub_##x,
2867 enum elf32_arm_stub_type
2875 /* Note the first a8_veneer type. */
2876 const unsigned arm_stub_a8_veneer_lwm
= arm_stub_a8_veneer_b_cond
;
2880 const insn_sequence
* template_sequence
;
2884 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2885 static const stub_def stub_definitions
[] =
2891 struct elf32_arm_stub_hash_entry
2893 /* Base hash table entry structure. */
2894 struct bfd_hash_entry root
;
2896 /* The stub section. */
2899 /* Offset within stub_sec of the beginning of this stub. */
2900 bfd_vma stub_offset
;
2902 /* Given the symbol's value and its section we can determine its final
2903 value when building the stubs (so the stub knows where to jump). */
2904 bfd_vma target_value
;
2905 asection
*target_section
;
2907 /* Same as above but for the source of the branch to the stub. Used for
2908 Cortex-A8 erratum workaround to patch it to branch to the stub. As
2909 such, source section does not need to be recorded since Cortex-A8 erratum
2910 workaround stubs are only generated when both source and target are in the
2912 bfd_vma source_value
;
2914 /* The instruction which caused this stub to be generated (only valid for
2915 Cortex-A8 erratum workaround stubs at present). */
2916 unsigned long orig_insn
;
2918 /* The stub type. */
2919 enum elf32_arm_stub_type stub_type
;
2920 /* Its encoding size in bytes. */
2923 const insn_sequence
*stub_template
;
2924 /* The size of the template (number of entries). */
2925 int stub_template_size
;
2927 /* The symbol table entry, if any, that this was derived from. */
2928 struct elf32_arm_link_hash_entry
*h
;
2930 /* Type of branch. */
2931 enum arm_st_branch_type branch_type
;
2933 /* Where this stub is being called from, or, in the case of combined
2934 stub sections, the first input section in the group. */
2937 /* The name for the local symbol at the start of this stub. The
2938 stub name in the hash table has to be unique; this does not, so
2939 it can be friendlier. */
2943 /* Used to build a map of a section. This is required for mixed-endian
2946 typedef struct elf32_elf_section_map
2951 elf32_arm_section_map
;
2953 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2957 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER
,
2958 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER
,
2959 VFP11_ERRATUM_ARM_VENEER
,
2960 VFP11_ERRATUM_THUMB_VENEER
2962 elf32_vfp11_erratum_type
;
2964 typedef struct elf32_vfp11_erratum_list
2966 struct elf32_vfp11_erratum_list
*next
;
2972 struct elf32_vfp11_erratum_list
*veneer
;
2973 unsigned int vfp_insn
;
2977 struct elf32_vfp11_erratum_list
*branch
;
2981 elf32_vfp11_erratum_type type
;
2983 elf32_vfp11_erratum_list
;
2985 /* Information about a STM32L4XX erratum veneer, or a branch to such a
2989 STM32L4XX_ERRATUM_BRANCH_TO_VENEER
,
2990 STM32L4XX_ERRATUM_VENEER
2992 elf32_stm32l4xx_erratum_type
;
2994 typedef struct elf32_stm32l4xx_erratum_list
2996 struct elf32_stm32l4xx_erratum_list
*next
;
3002 struct elf32_stm32l4xx_erratum_list
*veneer
;
3007 struct elf32_stm32l4xx_erratum_list
*branch
;
3011 elf32_stm32l4xx_erratum_type type
;
3013 elf32_stm32l4xx_erratum_list
;
3018 INSERT_EXIDX_CANTUNWIND_AT_END
3020 arm_unwind_edit_type
;
3022 /* A (sorted) list of edits to apply to an unwind table. */
3023 typedef struct arm_unwind_table_edit
3025 arm_unwind_edit_type type
;
3026 /* Note: we sometimes want to insert an unwind entry corresponding to a
3027 section different from the one we're currently writing out, so record the
3028 (text) section this edit relates to here. */
3029 asection
*linked_section
;
3031 struct arm_unwind_table_edit
*next
;
3033 arm_unwind_table_edit
;
3035 typedef struct _arm_elf_section_data
3037 /* Information about mapping symbols. */
3038 struct bfd_elf_section_data elf
;
3039 unsigned int mapcount
;
3040 unsigned int mapsize
;
3041 elf32_arm_section_map
*map
;
3042 /* Information about CPU errata. */
3043 unsigned int erratumcount
;
3044 elf32_vfp11_erratum_list
*erratumlist
;
3045 unsigned int stm32l4xx_erratumcount
;
3046 elf32_stm32l4xx_erratum_list
*stm32l4xx_erratumlist
;
3047 unsigned int additional_reloc_count
;
3048 /* Information about unwind tables. */
3051 /* Unwind info attached to a text section. */
3054 asection
*arm_exidx_sec
;
3057 /* Unwind info attached to an .ARM.exidx section. */
3060 arm_unwind_table_edit
*unwind_edit_list
;
3061 arm_unwind_table_edit
*unwind_edit_tail
;
3065 _arm_elf_section_data
;
3067 #define elf32_arm_section_data(sec) \
3068 ((_arm_elf_section_data *) elf_section_data (sec))
3070 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
3071 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
3072 so may be created multiple times: we use an array of these entries whilst
3073 relaxing which we can refresh easily, then create stubs for each potentially
3074 erratum-triggering instruction once we've settled on a solution. */
3076 struct a8_erratum_fix
3081 bfd_vma target_offset
;
3082 unsigned long orig_insn
;
3084 enum elf32_arm_stub_type stub_type
;
3085 enum arm_st_branch_type branch_type
;
3088 /* A table of relocs applied to branches which might trigger Cortex-A8
3091 struct a8_erratum_reloc
3094 bfd_vma destination
;
3095 struct elf32_arm_link_hash_entry
*hash
;
3096 const char *sym_name
;
3097 unsigned int r_type
;
3098 enum arm_st_branch_type branch_type
;
3099 bfd_boolean non_a8_stub
;
3102 /* The size of the thread control block. */
3105 /* ARM-specific information about a PLT entry, over and above the usual
3109 /* We reference count Thumb references to a PLT entry separately,
3110 so that we can emit the Thumb trampoline only if needed. */
3111 bfd_signed_vma thumb_refcount
;
3113 /* Some references from Thumb code may be eliminated by BL->BLX
3114 conversion, so record them separately. */
3115 bfd_signed_vma maybe_thumb_refcount
;
3117 /* How many of the recorded PLT accesses were from non-call relocations.
3118 This information is useful when deciding whether anything takes the
3119 address of an STT_GNU_IFUNC PLT. A value of 0 means that all
3120 non-call references to the function should resolve directly to the
3121 real runtime target. */
3122 unsigned int noncall_refcount
;
3124 /* Since PLT entries have variable size if the Thumb prologue is
3125 used, we need to record the index into .got.plt instead of
3126 recomputing it from the PLT offset. */
3127 bfd_signed_vma got_offset
;
3130 /* Information about an .iplt entry for a local STT_GNU_IFUNC symbol. */
3131 struct arm_local_iplt_info
3133 /* The information that is usually found in the generic ELF part of
3134 the hash table entry. */
3135 union gotplt_union root
;
3137 /* The information that is usually found in the ARM-specific part of
3138 the hash table entry. */
3139 struct arm_plt_info arm
;
3141 /* A list of all potential dynamic relocations against this symbol. */
3142 struct elf_dyn_relocs
*dyn_relocs
;
3145 /* Structure to handle FDPIC support for local functions. */
3146 struct fdpic_local
{
3147 unsigned int funcdesc_cnt
;
3148 unsigned int gotofffuncdesc_cnt
;
3149 int funcdesc_offset
;
3152 struct elf_arm_obj_tdata
3154 struct elf_obj_tdata root
;
3156 /* tls_type for each local got entry. */
3157 char *local_got_tls_type
;
3159 /* GOTPLT entries for TLS descriptors. */
3160 bfd_vma
*local_tlsdesc_gotent
;
3162 /* Information for local symbols that need entries in .iplt. */
3163 struct arm_local_iplt_info
**local_iplt
;
3165 /* Zero to warn when linking objects with incompatible enum sizes. */
3166 int no_enum_size_warning
;
3168 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
3169 int no_wchar_size_warning
;
3171 /* Maintains FDPIC counters and funcdesc info. */
3172 struct fdpic_local
*local_fdpic_cnts
;
3175 #define elf_arm_tdata(bfd) \
3176 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
3178 #define elf32_arm_local_got_tls_type(bfd) \
3179 (elf_arm_tdata (bfd)->local_got_tls_type)
3181 #define elf32_arm_local_tlsdesc_gotent(bfd) \
3182 (elf_arm_tdata (bfd)->local_tlsdesc_gotent)
3184 #define elf32_arm_local_iplt(bfd) \
3185 (elf_arm_tdata (bfd)->local_iplt)
3187 #define elf32_arm_local_fdpic_cnts(bfd) \
3188 (elf_arm_tdata (bfd)->local_fdpic_cnts)
3190 #define is_arm_elf(bfd) \
3191 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
3192 && elf_tdata (bfd) != NULL \
3193 && elf_object_id (bfd) == ARM_ELF_DATA)
3196 elf32_arm_mkobject (bfd
*abfd
)
3198 return bfd_elf_allocate_object (abfd
, sizeof (struct elf_arm_obj_tdata
),
3202 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
3204 /* Structure to handle FDPIC support for extern functions. */
3205 struct fdpic_global
{
3206 unsigned int gotofffuncdesc_cnt
;
3207 unsigned int gotfuncdesc_cnt
;
3208 unsigned int funcdesc_cnt
;
3209 int funcdesc_offset
;
3210 int gotfuncdesc_offset
;
3213 /* Arm ELF linker hash entry. */
3214 struct elf32_arm_link_hash_entry
3216 struct elf_link_hash_entry root
;
3218 /* Track dynamic relocs copied for this symbol. */
3219 struct elf_dyn_relocs
*dyn_relocs
;
3221 /* ARM-specific PLT information. */
3222 struct arm_plt_info plt
;
3224 #define GOT_UNKNOWN 0
3225 #define GOT_NORMAL 1
3226 #define GOT_TLS_GD 2
3227 #define GOT_TLS_IE 4
3228 #define GOT_TLS_GDESC 8
3229 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLS_GDESC))
3230 unsigned int tls_type
: 8;
3232 /* True if the symbol's PLT entry is in .iplt rather than .plt. */
3233 unsigned int is_iplt
: 1;
3235 unsigned int unused
: 23;
3237 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
3238 starting at the end of the jump table. */
3239 bfd_vma tlsdesc_got
;
3241 /* The symbol marking the real symbol location for exported thumb
3242 symbols with Arm stubs. */
3243 struct elf_link_hash_entry
*export_glue
;
3245 /* A pointer to the most recently used stub hash entry against this
3247 struct elf32_arm_stub_hash_entry
*stub_cache
;
3249 /* Counter for FDPIC relocations against this symbol. */
3250 struct fdpic_global fdpic_cnts
;
3253 /* Traverse an arm ELF linker hash table. */
3254 #define elf32_arm_link_hash_traverse(table, func, info) \
3255 (elf_link_hash_traverse \
3257 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
3260 /* Get the ARM elf linker hash table from a link_info structure. */
3261 #define elf32_arm_hash_table(info) \
3262 (elf_hash_table_id ((struct elf_link_hash_table *) ((info)->hash)) \
3263 == ARM_ELF_DATA ? ((struct elf32_arm_link_hash_table *) ((info)->hash)) : NULL)
3265 #define arm_stub_hash_lookup(table, string, create, copy) \
3266 ((struct elf32_arm_stub_hash_entry *) \
3267 bfd_hash_lookup ((table), (string), (create), (copy)))
3269 /* Array to keep track of which stub sections have been created, and
3270 information on stub grouping. */
3273 /* This is the section to which stubs in the group will be
3276 /* The stub section. */
3280 #define elf32_arm_compute_jump_table_size(htab) \
3281 ((htab)->next_tls_desc_index * 4)
3283 /* ARM ELF linker hash table. */
3284 struct elf32_arm_link_hash_table
3286 /* The main hash table. */
3287 struct elf_link_hash_table root
;
3289 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
3290 bfd_size_type thumb_glue_size
;
3292 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
3293 bfd_size_type arm_glue_size
;
3295 /* The size in bytes of section containing the ARMv4 BX veneers. */
3296 bfd_size_type bx_glue_size
;
3298 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
3299 veneer has been populated. */
3300 bfd_vma bx_glue_offset
[15];
3302 /* The size in bytes of the section containing glue for VFP11 erratum
3304 bfd_size_type vfp11_erratum_glue_size
;
3306 /* The size in bytes of the section containing glue for STM32L4XX erratum
3308 bfd_size_type stm32l4xx_erratum_glue_size
;
3310 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
3311 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
3312 elf32_arm_write_section(). */
3313 struct a8_erratum_fix
*a8_erratum_fixes
;
3314 unsigned int num_a8_erratum_fixes
;
3316 /* An arbitrary input BFD chosen to hold the glue sections. */
3317 bfd
* bfd_of_glue_owner
;
3319 /* Nonzero to output a BE8 image. */
3322 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
3323 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
3326 /* The relocation to use for R_ARM_TARGET2 relocations. */
3329 /* 0 = Ignore R_ARM_V4BX.
3330 1 = Convert BX to MOV PC.
3331 2 = Generate v4 interworing stubs. */
3334 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
3337 /* Whether we should fix the ARM1176 BLX immediate issue. */
3340 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
3343 /* What sort of code sequences we should look for which may trigger the
3344 VFP11 denorm erratum. */
3345 bfd_arm_vfp11_fix vfp11_fix
;
3347 /* Global counter for the number of fixes we have emitted. */
3348 int num_vfp11_fixes
;
3350 /* What sort of code sequences we should look for which may trigger the
3351 STM32L4XX erratum. */
3352 bfd_arm_stm32l4xx_fix stm32l4xx_fix
;
3354 /* Global counter for the number of fixes we have emitted. */
3355 int num_stm32l4xx_fixes
;
3357 /* Nonzero to force PIC branch veneers. */
3360 /* The number of bytes in the initial entry in the PLT. */
3361 bfd_size_type plt_header_size
;
3363 /* The number of bytes in the subsequent PLT etries. */
3364 bfd_size_type plt_entry_size
;
3366 /* True if the target system is VxWorks. */
3369 /* True if the target system is Symbian OS. */
3372 /* True if the target system is Native Client. */
3375 /* True if the target uses REL relocations. */
3376 bfd_boolean use_rel
;
3378 /* Nonzero if import library must be a secure gateway import library
3379 as per ARMv8-M Security Extensions. */
3382 /* The import library whose symbols' address must remain stable in
3383 the import library generated. */
3386 /* The index of the next unused R_ARM_TLS_DESC slot in .rel.plt. */
3387 bfd_vma next_tls_desc_index
;
3389 /* How many R_ARM_TLS_DESC relocations were generated so far. */
3390 bfd_vma num_tls_desc
;
3392 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
3395 /* The offset into splt of the PLT entry for the TLS descriptor
3396 resolver. Special values are 0, if not necessary (or not found
3397 to be necessary yet), and -1 if needed but not determined
3399 bfd_vma dt_tlsdesc_plt
;
3401 /* The offset into sgot of the GOT entry used by the PLT entry
3403 bfd_vma dt_tlsdesc_got
;
3405 /* Offset in .plt section of tls_arm_trampoline. */
3406 bfd_vma tls_trampoline
;
3408 /* Data for R_ARM_TLS_LDM32/R_ARM_TLS_LDM32_FDPIC relocations. */
3411 bfd_signed_vma refcount
;
3415 /* Small local sym cache. */
3416 struct sym_cache sym_cache
;
3418 /* For convenience in allocate_dynrelocs. */
3421 /* The amount of space used by the reserved portion of the sgotplt
3422 section, plus whatever space is used by the jump slots. */
3423 bfd_vma sgotplt_jump_table_size
;
3425 /* The stub hash table. */
3426 struct bfd_hash_table stub_hash_table
;
3428 /* Linker stub bfd. */
3431 /* Linker call-backs. */
3432 asection
* (*add_stub_section
) (const char *, asection
*, asection
*,
3434 void (*layout_sections_again
) (void);
3436 /* Array to keep track of which stub sections have been created, and
3437 information on stub grouping. */
3438 struct map_stub
*stub_group
;
3440 /* Input stub section holding secure gateway veneers. */
3441 asection
*cmse_stub_sec
;
3443 /* Offset in cmse_stub_sec where new SG veneers (not in input import library)
3444 start to be allocated. */
3445 bfd_vma new_cmse_stub_offset
;
3447 /* Number of elements in stub_group. */
3448 unsigned int top_id
;
3450 /* Assorted information used by elf32_arm_size_stubs. */
3451 unsigned int bfd_count
;
3452 unsigned int top_index
;
3453 asection
**input_list
;
3455 /* True if the target system uses FDPIC. */
3458 /* Fixup section. Used for FDPIC. */
3462 /* Add an FDPIC read-only fixup. */
3464 arm_elf_add_rofixup (bfd
*output_bfd
, asection
*srofixup
, bfd_vma offset
)
3466 bfd_vma fixup_offset
;
3468 fixup_offset
= srofixup
->reloc_count
++ * 4;
3469 BFD_ASSERT (fixup_offset
< srofixup
->size
);
3470 bfd_put_32 (output_bfd
, offset
, srofixup
->contents
+ fixup_offset
);
3474 ctz (unsigned int mask
)
3476 #if GCC_VERSION >= 3004
3477 return __builtin_ctz (mask
);
3481 for (i
= 0; i
< 8 * sizeof (mask
); i
++)
3492 elf32_arm_popcount (unsigned int mask
)
3494 #if GCC_VERSION >= 3004
3495 return __builtin_popcount (mask
);
3500 for (i
= 0; i
< 8 * sizeof (mask
); i
++)
3510 static void elf32_arm_add_dynreloc (bfd
*output_bfd
, struct bfd_link_info
*info
,
3511 asection
*sreloc
, Elf_Internal_Rela
*rel
);
3514 arm_elf_fill_funcdesc(bfd
*output_bfd
,
3515 struct bfd_link_info
*info
,
3516 int *funcdesc_offset
,
3520 bfd_vma dynreloc_value
,
3523 if ((*funcdesc_offset
& 1) == 0)
3525 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (info
);
3526 asection
*sgot
= globals
->root
.sgot
;
3528 if (bfd_link_pic(info
))
3530 asection
*srelgot
= globals
->root
.srelgot
;
3531 Elf_Internal_Rela outrel
;
3533 outrel
.r_info
= ELF32_R_INFO (dynindx
, R_ARM_FUNCDESC_VALUE
);
3534 outrel
.r_offset
= sgot
->output_section
->vma
+ sgot
->output_offset
+ offset
;
3535 outrel
.r_addend
= 0;
3537 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
3538 bfd_put_32 (output_bfd
, addr
, sgot
->contents
+ offset
);
3539 bfd_put_32 (output_bfd
, seg
, sgot
->contents
+ offset
+ 4);
3543 struct elf_link_hash_entry
*hgot
= globals
->root
.hgot
;
3544 bfd_vma got_value
= hgot
->root
.u
.def
.value
3545 + hgot
->root
.u
.def
.section
->output_section
->vma
3546 + hgot
->root
.u
.def
.section
->output_offset
;
3548 arm_elf_add_rofixup(output_bfd
, globals
->srofixup
,
3549 sgot
->output_section
->vma
+ sgot
->output_offset
3551 arm_elf_add_rofixup(output_bfd
, globals
->srofixup
,
3552 sgot
->output_section
->vma
+ sgot
->output_offset
3554 bfd_put_32 (output_bfd
, dynreloc_value
, sgot
->contents
+ offset
);
3555 bfd_put_32 (output_bfd
, got_value
, sgot
->contents
+ offset
+ 4);
3557 *funcdesc_offset
|= 1;
3561 /* Create an entry in an ARM ELF linker hash table. */
3563 static struct bfd_hash_entry
*
3564 elf32_arm_link_hash_newfunc (struct bfd_hash_entry
* entry
,
3565 struct bfd_hash_table
* table
,
3566 const char * string
)
3568 struct elf32_arm_link_hash_entry
* ret
=
3569 (struct elf32_arm_link_hash_entry
*) entry
;
3571 /* Allocate the structure if it has not already been allocated by a
3574 ret
= (struct elf32_arm_link_hash_entry
*)
3575 bfd_hash_allocate (table
, sizeof (struct elf32_arm_link_hash_entry
));
3577 return (struct bfd_hash_entry
*) ret
;
3579 /* Call the allocation method of the superclass. */
3580 ret
= ((struct elf32_arm_link_hash_entry
*)
3581 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry
*) ret
,
3585 ret
->dyn_relocs
= NULL
;
3586 ret
->tls_type
= GOT_UNKNOWN
;
3587 ret
->tlsdesc_got
= (bfd_vma
) -1;
3588 ret
->plt
.thumb_refcount
= 0;
3589 ret
->plt
.maybe_thumb_refcount
= 0;
3590 ret
->plt
.noncall_refcount
= 0;
3591 ret
->plt
.got_offset
= -1;
3592 ret
->is_iplt
= FALSE
;
3593 ret
->export_glue
= NULL
;
3595 ret
->stub_cache
= NULL
;
3597 ret
->fdpic_cnts
.gotofffuncdesc_cnt
= 0;
3598 ret
->fdpic_cnts
.gotfuncdesc_cnt
= 0;
3599 ret
->fdpic_cnts
.funcdesc_cnt
= 0;
3600 ret
->fdpic_cnts
.funcdesc_offset
= -1;
3601 ret
->fdpic_cnts
.gotfuncdesc_offset
= -1;
3604 return (struct bfd_hash_entry
*) ret
;
3607 /* Ensure that we have allocated bookkeeping structures for ABFD's local
3611 elf32_arm_allocate_local_sym_info (bfd
*abfd
)
3613 if (elf_local_got_refcounts (abfd
) == NULL
)
3615 bfd_size_type num_syms
;
3619 num_syms
= elf_tdata (abfd
)->symtab_hdr
.sh_info
;
3620 size
= num_syms
* (sizeof (bfd_signed_vma
)
3621 + sizeof (struct arm_local_iplt_info
*)
3624 + sizeof (struct fdpic_local
));
3625 data
= bfd_zalloc (abfd
, size
);
3629 elf32_arm_local_fdpic_cnts (abfd
) = (struct fdpic_local
*) data
;
3630 data
+= num_syms
* sizeof (struct fdpic_local
);
3632 elf_local_got_refcounts (abfd
) = (bfd_signed_vma
*) data
;
3633 data
+= num_syms
* sizeof (bfd_signed_vma
);
3635 elf32_arm_local_iplt (abfd
) = (struct arm_local_iplt_info
**) data
;
3636 data
+= num_syms
* sizeof (struct arm_local_iplt_info
*);
3638 elf32_arm_local_tlsdesc_gotent (abfd
) = (bfd_vma
*) data
;
3639 data
+= num_syms
* sizeof (bfd_vma
);
3641 elf32_arm_local_got_tls_type (abfd
) = data
;
3646 /* Return the .iplt information for local symbol R_SYMNDX, which belongs
3647 to input bfd ABFD. Create the information if it doesn't already exist.
3648 Return null if an allocation fails. */
3650 static struct arm_local_iplt_info
*
3651 elf32_arm_create_local_iplt (bfd
*abfd
, unsigned long r_symndx
)
3653 struct arm_local_iplt_info
**ptr
;
3655 if (!elf32_arm_allocate_local_sym_info (abfd
))
3658 BFD_ASSERT (r_symndx
< elf_tdata (abfd
)->symtab_hdr
.sh_info
);
3659 ptr
= &elf32_arm_local_iplt (abfd
)[r_symndx
];
3661 *ptr
= bfd_zalloc (abfd
, sizeof (**ptr
));
3665 /* Try to obtain PLT information for the symbol with index R_SYMNDX
3666 in ABFD's symbol table. If the symbol is global, H points to its
3667 hash table entry, otherwise H is null.
3669 Return true if the symbol does have PLT information. When returning
3670 true, point *ROOT_PLT at the target-independent reference count/offset
3671 union and *ARM_PLT at the ARM-specific information. */
3674 elf32_arm_get_plt_info (bfd
*abfd
, struct elf32_arm_link_hash_table
*globals
,
3675 struct elf32_arm_link_hash_entry
*h
,
3676 unsigned long r_symndx
, union gotplt_union
**root_plt
,
3677 struct arm_plt_info
**arm_plt
)
3679 struct arm_local_iplt_info
*local_iplt
;
3681 if (globals
->root
.splt
== NULL
&& globals
->root
.iplt
== NULL
)
3686 *root_plt
= &h
->root
.plt
;
3691 if (elf32_arm_local_iplt (abfd
) == NULL
)
3694 local_iplt
= elf32_arm_local_iplt (abfd
)[r_symndx
];
3695 if (local_iplt
== NULL
)
3698 *root_plt
= &local_iplt
->root
;
3699 *arm_plt
= &local_iplt
->arm
;
3703 static bfd_boolean
using_thumb_only (struct elf32_arm_link_hash_table
*globals
);
3705 /* Return true if the PLT described by ARM_PLT requires a Thumb stub
3709 elf32_arm_plt_needs_thumb_stub_p (struct bfd_link_info
*info
,
3710 struct arm_plt_info
*arm_plt
)
3712 struct elf32_arm_link_hash_table
*htab
;
3714 htab
= elf32_arm_hash_table (info
);
3716 return (!using_thumb_only(htab
) && (arm_plt
->thumb_refcount
!= 0
3717 || (!htab
->use_blx
&& arm_plt
->maybe_thumb_refcount
!= 0)));
3720 /* Return a pointer to the head of the dynamic reloc list that should
3721 be used for local symbol ISYM, which is symbol number R_SYMNDX in
3722 ABFD's symbol table. Return null if an error occurs. */
3724 static struct elf_dyn_relocs
**
3725 elf32_arm_get_local_dynreloc_list (bfd
*abfd
, unsigned long r_symndx
,
3726 Elf_Internal_Sym
*isym
)
3728 if (ELF32_ST_TYPE (isym
->st_info
) == STT_GNU_IFUNC
)
3730 struct arm_local_iplt_info
*local_iplt
;
3732 local_iplt
= elf32_arm_create_local_iplt (abfd
, r_symndx
);
3733 if (local_iplt
== NULL
)
3735 return &local_iplt
->dyn_relocs
;
3739 /* Track dynamic relocs needed for local syms too.
3740 We really need local syms available to do this
3745 s
= bfd_section_from_elf_index (abfd
, isym
->st_shndx
);
3749 vpp
= &elf_section_data (s
)->local_dynrel
;
3750 return (struct elf_dyn_relocs
**) vpp
;
3754 /* Initialize an entry in the stub hash table. */
3756 static struct bfd_hash_entry
*
3757 stub_hash_newfunc (struct bfd_hash_entry
*entry
,
3758 struct bfd_hash_table
*table
,
3761 /* Allocate the structure if it has not already been allocated by a
3765 entry
= (struct bfd_hash_entry
*)
3766 bfd_hash_allocate (table
, sizeof (struct elf32_arm_stub_hash_entry
));
3771 /* Call the allocation method of the superclass. */
3772 entry
= bfd_hash_newfunc (entry
, table
, string
);
3775 struct elf32_arm_stub_hash_entry
*eh
;
3777 /* Initialize the local fields. */
3778 eh
= (struct elf32_arm_stub_hash_entry
*) entry
;
3779 eh
->stub_sec
= NULL
;
3780 eh
->stub_offset
= (bfd_vma
) -1;
3781 eh
->source_value
= 0;
3782 eh
->target_value
= 0;
3783 eh
->target_section
= NULL
;
3785 eh
->stub_type
= arm_stub_none
;
3787 eh
->stub_template
= NULL
;
3788 eh
->stub_template_size
= -1;
3791 eh
->output_name
= NULL
;
3797 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
3798 shortcuts to them in our hash table. */
3801 create_got_section (bfd
*dynobj
, struct bfd_link_info
*info
)
3803 struct elf32_arm_link_hash_table
*htab
;
3805 htab
= elf32_arm_hash_table (info
);
3809 /* BPABI objects never have a GOT, or associated sections. */
3810 if (htab
->symbian_p
)
3813 if (! _bfd_elf_create_got_section (dynobj
, info
))
3816 /* Also create .rofixup. */
3819 htab
->srofixup
= bfd_make_section_with_flags (dynobj
, ".rofixup",
3820 (SEC_ALLOC
| SEC_LOAD
| SEC_HAS_CONTENTS
3821 | SEC_IN_MEMORY
| SEC_LINKER_CREATED
| SEC_READONLY
));
3822 if (htab
->srofixup
== NULL
3823 || !bfd_set_section_alignment (htab
->srofixup
, 2))
3830 /* Create the .iplt, .rel(a).iplt and .igot.plt sections. */
3833 create_ifunc_sections (struct bfd_link_info
*info
)
3835 struct elf32_arm_link_hash_table
*htab
;
3836 const struct elf_backend_data
*bed
;
3841 htab
= elf32_arm_hash_table (info
);
3842 dynobj
= htab
->root
.dynobj
;
3843 bed
= get_elf_backend_data (dynobj
);
3844 flags
= bed
->dynamic_sec_flags
;
3846 if (htab
->root
.iplt
== NULL
)
3848 s
= bfd_make_section_anyway_with_flags (dynobj
, ".iplt",
3849 flags
| SEC_READONLY
| SEC_CODE
);
3851 || !bfd_set_section_alignment (s
, bed
->plt_alignment
))
3853 htab
->root
.iplt
= s
;
3856 if (htab
->root
.irelplt
== NULL
)
3858 s
= bfd_make_section_anyway_with_flags (dynobj
,
3859 RELOC_SECTION (htab
, ".iplt"),
3860 flags
| SEC_READONLY
);
3862 || !bfd_set_section_alignment (s
, bed
->s
->log_file_align
))
3864 htab
->root
.irelplt
= s
;
3867 if (htab
->root
.igotplt
== NULL
)
3869 s
= bfd_make_section_anyway_with_flags (dynobj
, ".igot.plt", flags
);
3871 || !bfd_set_section_alignment (s
, bed
->s
->log_file_align
))
3873 htab
->root
.igotplt
= s
;
3878 /* Determine if we're dealing with a Thumb only architecture. */
3881 using_thumb_only (struct elf32_arm_link_hash_table
*globals
)
3884 int profile
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
,
3885 Tag_CPU_arch_profile
);
3888 return profile
== 'M';
3890 arch
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
, Tag_CPU_arch
);
3892 /* Force return logic to be reviewed for each new architecture. */
3893 BFD_ASSERT (arch
<= TAG_CPU_ARCH_V8_1M_MAIN
);
3895 if (arch
== TAG_CPU_ARCH_V6_M
3896 || arch
== TAG_CPU_ARCH_V6S_M
3897 || arch
== TAG_CPU_ARCH_V7E_M
3898 || arch
== TAG_CPU_ARCH_V8M_BASE
3899 || arch
== TAG_CPU_ARCH_V8M_MAIN
3900 || arch
== TAG_CPU_ARCH_V8_1M_MAIN
)
3906 /* Determine if we're dealing with a Thumb-2 object. */
3909 using_thumb2 (struct elf32_arm_link_hash_table
*globals
)
3912 int thumb_isa
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
,
3916 return thumb_isa
== 2;
3918 arch
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
, Tag_CPU_arch
);
3920 /* Force return logic to be reviewed for each new architecture. */
3921 BFD_ASSERT (arch
<= TAG_CPU_ARCH_V8_1M_MAIN
);
3923 return (arch
== TAG_CPU_ARCH_V6T2
3924 || arch
== TAG_CPU_ARCH_V7
3925 || arch
== TAG_CPU_ARCH_V7E_M
3926 || arch
== TAG_CPU_ARCH_V8
3927 || arch
== TAG_CPU_ARCH_V8R
3928 || arch
== TAG_CPU_ARCH_V8M_MAIN
3929 || arch
== TAG_CPU_ARCH_V8_1M_MAIN
);
3932 /* Determine whether Thumb-2 BL instruction is available. */
3935 using_thumb2_bl (struct elf32_arm_link_hash_table
*globals
)
3938 bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
, Tag_CPU_arch
);
3940 /* Force return logic to be reviewed for each new architecture. */
3941 BFD_ASSERT (arch
<= TAG_CPU_ARCH_V8_1M_MAIN
);
3943 /* Architecture was introduced after ARMv6T2 (eg. ARMv6-M). */
3944 return (arch
== TAG_CPU_ARCH_V6T2
3945 || arch
>= TAG_CPU_ARCH_V7
);
3948 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
3949 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
3953 elf32_arm_create_dynamic_sections (bfd
*dynobj
, struct bfd_link_info
*info
)
3955 struct elf32_arm_link_hash_table
*htab
;
3957 htab
= elf32_arm_hash_table (info
);
3961 if (!htab
->root
.sgot
&& !create_got_section (dynobj
, info
))
3964 if (!_bfd_elf_create_dynamic_sections (dynobj
, info
))
3967 if (htab
->vxworks_p
)
3969 if (!elf_vxworks_create_dynamic_sections (dynobj
, info
, &htab
->srelplt2
))
3972 if (bfd_link_pic (info
))
3974 htab
->plt_header_size
= 0;
3975 htab
->plt_entry_size
3976 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry
);
3980 htab
->plt_header_size
3981 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry
);
3982 htab
->plt_entry_size
3983 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry
);
3986 if (elf_elfheader (dynobj
))
3987 elf_elfheader (dynobj
)->e_ident
[EI_CLASS
] = ELFCLASS32
;
3992 Test for thumb only architectures. Note - we cannot just call
3993 using_thumb_only() as the attributes in the output bfd have not been
3994 initialised at this point, so instead we use the input bfd. */
3995 bfd
* saved_obfd
= htab
->obfd
;
3997 htab
->obfd
= dynobj
;
3998 if (using_thumb_only (htab
))
4000 htab
->plt_header_size
= 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry
);
4001 htab
->plt_entry_size
= 4 * ARRAY_SIZE (elf32_thumb2_plt_entry
);
4003 htab
->obfd
= saved_obfd
;
4006 if (htab
->fdpic_p
) {
4007 htab
->plt_header_size
= 0;
4008 if (info
->flags
& DF_BIND_NOW
)
4009 htab
->plt_entry_size
= 4 * (ARRAY_SIZE(elf32_arm_fdpic_plt_entry
) - 5);
4011 htab
->plt_entry_size
= 4 * ARRAY_SIZE(elf32_arm_fdpic_plt_entry
);
4014 if (!htab
->root
.splt
4015 || !htab
->root
.srelplt
4016 || !htab
->root
.sdynbss
4017 || (!bfd_link_pic (info
) && !htab
->root
.srelbss
))
4023 /* Copy the extra info we tack onto an elf_link_hash_entry. */
4026 elf32_arm_copy_indirect_symbol (struct bfd_link_info
*info
,
4027 struct elf_link_hash_entry
*dir
,
4028 struct elf_link_hash_entry
*ind
)
4030 struct elf32_arm_link_hash_entry
*edir
, *eind
;
4032 edir
= (struct elf32_arm_link_hash_entry
*) dir
;
4033 eind
= (struct elf32_arm_link_hash_entry
*) ind
;
4035 if (eind
->dyn_relocs
!= NULL
)
4037 if (edir
->dyn_relocs
!= NULL
)
4039 struct elf_dyn_relocs
**pp
;
4040 struct elf_dyn_relocs
*p
;
4042 /* Add reloc counts against the indirect sym to the direct sym
4043 list. Merge any entries against the same section. */
4044 for (pp
= &eind
->dyn_relocs
; (p
= *pp
) != NULL
; )
4046 struct elf_dyn_relocs
*q
;
4048 for (q
= edir
->dyn_relocs
; q
!= NULL
; q
= q
->next
)
4049 if (q
->sec
== p
->sec
)
4051 q
->pc_count
+= p
->pc_count
;
4052 q
->count
+= p
->count
;
4059 *pp
= edir
->dyn_relocs
;
4062 edir
->dyn_relocs
= eind
->dyn_relocs
;
4063 eind
->dyn_relocs
= NULL
;
4066 if (ind
->root
.type
== bfd_link_hash_indirect
)
4068 /* Copy over PLT info. */
4069 edir
->plt
.thumb_refcount
+= eind
->plt
.thumb_refcount
;
4070 eind
->plt
.thumb_refcount
= 0;
4071 edir
->plt
.maybe_thumb_refcount
+= eind
->plt
.maybe_thumb_refcount
;
4072 eind
->plt
.maybe_thumb_refcount
= 0;
4073 edir
->plt
.noncall_refcount
+= eind
->plt
.noncall_refcount
;
4074 eind
->plt
.noncall_refcount
= 0;
4076 /* Copy FDPIC counters. */
4077 edir
->fdpic_cnts
.gotofffuncdesc_cnt
+= eind
->fdpic_cnts
.gotofffuncdesc_cnt
;
4078 edir
->fdpic_cnts
.gotfuncdesc_cnt
+= eind
->fdpic_cnts
.gotfuncdesc_cnt
;
4079 edir
->fdpic_cnts
.funcdesc_cnt
+= eind
->fdpic_cnts
.funcdesc_cnt
;
4081 /* We should only allocate a function to .iplt once the final
4082 symbol information is known. */
4083 BFD_ASSERT (!eind
->is_iplt
);
4085 if (dir
->got
.refcount
<= 0)
4087 edir
->tls_type
= eind
->tls_type
;
4088 eind
->tls_type
= GOT_UNKNOWN
;
4092 _bfd_elf_link_hash_copy_indirect (info
, dir
, ind
);
4095 /* Destroy an ARM elf linker hash table. */
4098 elf32_arm_link_hash_table_free (bfd
*obfd
)
4100 struct elf32_arm_link_hash_table
*ret
4101 = (struct elf32_arm_link_hash_table
*) obfd
->link
.hash
;
4103 bfd_hash_table_free (&ret
->stub_hash_table
);
4104 _bfd_elf_link_hash_table_free (obfd
);
4107 /* Create an ARM elf linker hash table. */
4109 static struct bfd_link_hash_table
*
4110 elf32_arm_link_hash_table_create (bfd
*abfd
)
4112 struct elf32_arm_link_hash_table
*ret
;
4113 bfd_size_type amt
= sizeof (struct elf32_arm_link_hash_table
);
4115 ret
= (struct elf32_arm_link_hash_table
*) bfd_zmalloc (amt
);
4119 if (!_bfd_elf_link_hash_table_init (& ret
->root
, abfd
,
4120 elf32_arm_link_hash_newfunc
,
4121 sizeof (struct elf32_arm_link_hash_entry
),
4128 ret
->vfp11_fix
= BFD_ARM_VFP11_FIX_NONE
;
4129 ret
->stm32l4xx_fix
= BFD_ARM_STM32L4XX_FIX_NONE
;
4130 #ifdef FOUR_WORD_PLT
4131 ret
->plt_header_size
= 16;
4132 ret
->plt_entry_size
= 16;
4134 ret
->plt_header_size
= 20;
4135 ret
->plt_entry_size
= elf32_arm_use_long_plt_entry
? 16 : 12;
4137 ret
->use_rel
= TRUE
;
4141 if (!bfd_hash_table_init (&ret
->stub_hash_table
, stub_hash_newfunc
,
4142 sizeof (struct elf32_arm_stub_hash_entry
)))
4144 _bfd_elf_link_hash_table_free (abfd
);
4147 ret
->root
.root
.hash_table_free
= elf32_arm_link_hash_table_free
;
4149 return &ret
->root
.root
;
4152 /* Determine what kind of NOPs are available. */
4155 arch_has_arm_nop (struct elf32_arm_link_hash_table
*globals
)
4157 const int arch
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
,
4160 /* Force return logic to be reviewed for each new architecture. */
4161 BFD_ASSERT (arch
<= TAG_CPU_ARCH_V8_1M_MAIN
);
4163 return (arch
== TAG_CPU_ARCH_V6T2
4164 || arch
== TAG_CPU_ARCH_V6K
4165 || arch
== TAG_CPU_ARCH_V7
4166 || arch
== TAG_CPU_ARCH_V8
4167 || arch
== TAG_CPU_ARCH_V8R
);
4171 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type
)
4175 case arm_stub_long_branch_thumb_only
:
4176 case arm_stub_long_branch_thumb2_only
:
4177 case arm_stub_long_branch_thumb2_only_pure
:
4178 case arm_stub_long_branch_v4t_thumb_arm
:
4179 case arm_stub_short_branch_v4t_thumb_arm
:
4180 case arm_stub_long_branch_v4t_thumb_arm_pic
:
4181 case arm_stub_long_branch_v4t_thumb_tls_pic
:
4182 case arm_stub_long_branch_thumb_only_pic
:
4183 case arm_stub_cmse_branch_thumb_only
:
4194 /* Determine the type of stub needed, if any, for a call. */
4196 static enum elf32_arm_stub_type
4197 arm_type_of_stub (struct bfd_link_info
*info
,
4198 asection
*input_sec
,
4199 const Elf_Internal_Rela
*rel
,
4200 unsigned char st_type
,
4201 enum arm_st_branch_type
*actual_branch_type
,
4202 struct elf32_arm_link_hash_entry
*hash
,
4203 bfd_vma destination
,
4209 bfd_signed_vma branch_offset
;
4210 unsigned int r_type
;
4211 struct elf32_arm_link_hash_table
* globals
;
4212 bfd_boolean thumb2
, thumb2_bl
, thumb_only
;
4213 enum elf32_arm_stub_type stub_type
= arm_stub_none
;
4215 enum arm_st_branch_type branch_type
= *actual_branch_type
;
4216 union gotplt_union
*root_plt
;
4217 struct arm_plt_info
*arm_plt
;
4221 if (branch_type
== ST_BRANCH_LONG
)
4224 globals
= elf32_arm_hash_table (info
);
4225 if (globals
== NULL
)
4228 thumb_only
= using_thumb_only (globals
);
4229 thumb2
= using_thumb2 (globals
);
4230 thumb2_bl
= using_thumb2_bl (globals
);
4232 arch
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
, Tag_CPU_arch
);
4234 /* True for architectures that implement the thumb2 movw instruction. */
4235 thumb2_movw
= thumb2
|| (arch
== TAG_CPU_ARCH_V8M_BASE
);
4237 /* Determine where the call point is. */
4238 location
= (input_sec
->output_offset
4239 + input_sec
->output_section
->vma
4242 r_type
= ELF32_R_TYPE (rel
->r_info
);
4244 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
4245 are considering a function call relocation. */
4246 if (thumb_only
&& (r_type
== R_ARM_THM_CALL
|| r_type
== R_ARM_THM_JUMP24
4247 || r_type
== R_ARM_THM_JUMP19
)
4248 && branch_type
== ST_BRANCH_TO_ARM
)
4249 branch_type
= ST_BRANCH_TO_THUMB
;
4251 /* For TLS call relocs, it is the caller's responsibility to provide
4252 the address of the appropriate trampoline. */
4253 if (r_type
!= R_ARM_TLS_CALL
4254 && r_type
!= R_ARM_THM_TLS_CALL
4255 && elf32_arm_get_plt_info (input_bfd
, globals
, hash
,
4256 ELF32_R_SYM (rel
->r_info
), &root_plt
,
4258 && root_plt
->offset
!= (bfd_vma
) -1)
4262 if (hash
== NULL
|| hash
->is_iplt
)
4263 splt
= globals
->root
.iplt
;
4265 splt
= globals
->root
.splt
;
4270 /* Note when dealing with PLT entries: the main PLT stub is in
4271 ARM mode, so if the branch is in Thumb mode, another
4272 Thumb->ARM stub will be inserted later just before the ARM
4273 PLT stub. If a long branch stub is needed, we'll add a
4274 Thumb->Arm one and branch directly to the ARM PLT entry.
4275 Here, we have to check if a pre-PLT Thumb->ARM stub
4276 is needed and if it will be close enough. */
4278 destination
= (splt
->output_section
->vma
4279 + splt
->output_offset
4280 + root_plt
->offset
);
4283 /* Thumb branch/call to PLT: it can become a branch to ARM
4284 or to Thumb. We must perform the same checks and
4285 corrections as in elf32_arm_final_link_relocate. */
4286 if ((r_type
== R_ARM_THM_CALL
)
4287 || (r_type
== R_ARM_THM_JUMP24
))
4289 if (globals
->use_blx
4290 && r_type
== R_ARM_THM_CALL
4293 /* If the Thumb BLX instruction is available, convert
4294 the BL to a BLX instruction to call the ARM-mode
4296 branch_type
= ST_BRANCH_TO_ARM
;
4301 /* Target the Thumb stub before the ARM PLT entry. */
4302 destination
-= PLT_THUMB_STUB_SIZE
;
4303 branch_type
= ST_BRANCH_TO_THUMB
;
4308 branch_type
= ST_BRANCH_TO_ARM
;
4312 /* Calls to STT_GNU_IFUNC symbols should go through a PLT. */
4313 BFD_ASSERT (st_type
!= STT_GNU_IFUNC
);
4315 branch_offset
= (bfd_signed_vma
)(destination
- location
);
4317 if (r_type
== R_ARM_THM_CALL
|| r_type
== R_ARM_THM_JUMP24
4318 || r_type
== R_ARM_THM_TLS_CALL
|| r_type
== R_ARM_THM_JUMP19
)
4320 /* Handle cases where:
4321 - this call goes too far (different Thumb/Thumb2 max
4323 - it's a Thumb->Arm call and blx is not available, or it's a
4324 Thumb->Arm branch (not bl). A stub is needed in this case,
4325 but only if this call is not through a PLT entry. Indeed,
4326 PLT stubs handle mode switching already. */
4328 && (branch_offset
> THM_MAX_FWD_BRANCH_OFFSET
4329 || (branch_offset
< THM_MAX_BWD_BRANCH_OFFSET
)))
4331 && (branch_offset
> THM2_MAX_FWD_BRANCH_OFFSET
4332 || (branch_offset
< THM2_MAX_BWD_BRANCH_OFFSET
)))
4334 && (branch_offset
> THM2_MAX_FWD_COND_BRANCH_OFFSET
4335 || (branch_offset
< THM2_MAX_BWD_COND_BRANCH_OFFSET
))
4336 && (r_type
== R_ARM_THM_JUMP19
))
4337 || (branch_type
== ST_BRANCH_TO_ARM
4338 && (((r_type
== R_ARM_THM_CALL
4339 || r_type
== R_ARM_THM_TLS_CALL
) && !globals
->use_blx
)
4340 || (r_type
== R_ARM_THM_JUMP24
)
4341 || (r_type
== R_ARM_THM_JUMP19
))
4344 /* If we need to insert a Thumb-Thumb long branch stub to a
4345 PLT, use one that branches directly to the ARM PLT
4346 stub. If we pretended we'd use the pre-PLT Thumb->ARM
4347 stub, undo this now. */
4348 if ((branch_type
== ST_BRANCH_TO_THUMB
) && use_plt
&& !thumb_only
)
4350 branch_type
= ST_BRANCH_TO_ARM
;
4351 branch_offset
+= PLT_THUMB_STUB_SIZE
;
4354 if (branch_type
== ST_BRANCH_TO_THUMB
)
4356 /* Thumb to thumb. */
4359 if (input_sec
->flags
& SEC_ELF_PURECODE
)
4361 (_("%pB(%pA): warning: long branch veneers used in"
4362 " section with SHF_ARM_PURECODE section"
4363 " attribute is only supported for M-profile"
4364 " targets that implement the movw instruction"),
4365 input_bfd
, input_sec
);
4367 stub_type
= (bfd_link_pic (info
) | globals
->pic_veneer
)
4369 ? ((globals
->use_blx
4370 && (r_type
== R_ARM_THM_CALL
))
4371 /* V5T and above. Stub starts with ARM code, so
4372 we must be able to switch mode before
4373 reaching it, which is only possible for 'bl'
4374 (ie R_ARM_THM_CALL relocation). */
4375 ? arm_stub_long_branch_any_thumb_pic
4376 /* On V4T, use Thumb code only. */
4377 : arm_stub_long_branch_v4t_thumb_thumb_pic
)
4379 /* non-PIC stubs. */
4380 : ((globals
->use_blx
4381 && (r_type
== R_ARM_THM_CALL
))
4382 /* V5T and above. */
4383 ? arm_stub_long_branch_any_any
4385 : arm_stub_long_branch_v4t_thumb_thumb
);
4389 if (thumb2_movw
&& (input_sec
->flags
& SEC_ELF_PURECODE
))
4390 stub_type
= arm_stub_long_branch_thumb2_only_pure
;
4393 if (input_sec
->flags
& SEC_ELF_PURECODE
)
4395 (_("%pB(%pA): warning: long branch veneers used in"
4396 " section with SHF_ARM_PURECODE section"
4397 " attribute is only supported for M-profile"
4398 " targets that implement the movw instruction"),
4399 input_bfd
, input_sec
);
4401 stub_type
= (bfd_link_pic (info
) | globals
->pic_veneer
)
4403 ? arm_stub_long_branch_thumb_only_pic
4405 : (thumb2
? arm_stub_long_branch_thumb2_only
4406 : arm_stub_long_branch_thumb_only
);
4412 if (input_sec
->flags
& SEC_ELF_PURECODE
)
4414 (_("%pB(%pA): warning: long branch veneers used in"
4415 " section with SHF_ARM_PURECODE section"
4416 " attribute is only supported" " for M-profile"
4417 " targets that implement the movw instruction"),
4418 input_bfd
, input_sec
);
4422 && sym_sec
->owner
!= NULL
4423 && !INTERWORK_FLAG (sym_sec
->owner
))
4426 (_("%pB(%s): warning: interworking not enabled;"
4427 " first occurrence: %pB: %s call to %s"),
4428 sym_sec
->owner
, name
, input_bfd
, "Thumb", "ARM");
4432 (bfd_link_pic (info
) | globals
->pic_veneer
)
4434 ? (r_type
== R_ARM_THM_TLS_CALL
4435 /* TLS PIC stubs. */
4436 ? (globals
->use_blx
? arm_stub_long_branch_any_tls_pic
4437 : arm_stub_long_branch_v4t_thumb_tls_pic
)
4438 : ((globals
->use_blx
&& r_type
== R_ARM_THM_CALL
)
4439 /* V5T PIC and above. */
4440 ? arm_stub_long_branch_any_arm_pic
4442 : arm_stub_long_branch_v4t_thumb_arm_pic
))
4444 /* non-PIC stubs. */
4445 : ((globals
->use_blx
&& r_type
== R_ARM_THM_CALL
)
4446 /* V5T and above. */
4447 ? arm_stub_long_branch_any_any
4449 : arm_stub_long_branch_v4t_thumb_arm
);
4451 /* Handle v4t short branches. */
4452 if ((stub_type
== arm_stub_long_branch_v4t_thumb_arm
)
4453 && (branch_offset
<= THM_MAX_FWD_BRANCH_OFFSET
)
4454 && (branch_offset
>= THM_MAX_BWD_BRANCH_OFFSET
))
4455 stub_type
= arm_stub_short_branch_v4t_thumb_arm
;
4459 else if (r_type
== R_ARM_CALL
4460 || r_type
== R_ARM_JUMP24
4461 || r_type
== R_ARM_PLT32
4462 || r_type
== R_ARM_TLS_CALL
)
4464 if (input_sec
->flags
& SEC_ELF_PURECODE
)
4466 (_("%pB(%pA): warning: long branch veneers used in"
4467 " section with SHF_ARM_PURECODE section"
4468 " attribute is only supported for M-profile"
4469 " targets that implement the movw instruction"),
4470 input_bfd
, input_sec
);
4471 if (branch_type
== ST_BRANCH_TO_THUMB
)
4476 && sym_sec
->owner
!= NULL
4477 && !INTERWORK_FLAG (sym_sec
->owner
))
4480 (_("%pB(%s): warning: interworking not enabled;"
4481 " first occurrence: %pB: %s call to %s"),
4482 sym_sec
->owner
, name
, input_bfd
, "ARM", "Thumb");
4485 /* We have an extra 2-bytes reach because of
4486 the mode change (bit 24 (H) of BLX encoding). */
4487 if (branch_offset
> (ARM_MAX_FWD_BRANCH_OFFSET
+ 2)
4488 || (branch_offset
< ARM_MAX_BWD_BRANCH_OFFSET
)
4489 || (r_type
== R_ARM_CALL
&& !globals
->use_blx
)
4490 || (r_type
== R_ARM_JUMP24
)
4491 || (r_type
== R_ARM_PLT32
))
4493 stub_type
= (bfd_link_pic (info
) | globals
->pic_veneer
)
4495 ? ((globals
->use_blx
)
4496 /* V5T and above. */
4497 ? arm_stub_long_branch_any_thumb_pic
4499 : arm_stub_long_branch_v4t_arm_thumb_pic
)
4501 /* non-PIC stubs. */
4502 : ((globals
->use_blx
)
4503 /* V5T and above. */
4504 ? arm_stub_long_branch_any_any
4506 : arm_stub_long_branch_v4t_arm_thumb
);
4512 if (branch_offset
> ARM_MAX_FWD_BRANCH_OFFSET
4513 || (branch_offset
< ARM_MAX_BWD_BRANCH_OFFSET
))
4516 (bfd_link_pic (info
) | globals
->pic_veneer
)
4518 ? (r_type
== R_ARM_TLS_CALL
4520 ? arm_stub_long_branch_any_tls_pic
4522 ? arm_stub_long_branch_arm_nacl_pic
4523 : arm_stub_long_branch_any_arm_pic
))
4524 /* non-PIC stubs. */
4526 ? arm_stub_long_branch_arm_nacl
4527 : arm_stub_long_branch_any_any
);
4532 /* If a stub is needed, record the actual destination type. */
4533 if (stub_type
!= arm_stub_none
)
4534 *actual_branch_type
= branch_type
;
4539 /* Build a name for an entry in the stub hash table. */
4542 elf32_arm_stub_name (const asection
*input_section
,
4543 const asection
*sym_sec
,
4544 const struct elf32_arm_link_hash_entry
*hash
,
4545 const Elf_Internal_Rela
*rel
,
4546 enum elf32_arm_stub_type stub_type
)
4553 len
= 8 + 1 + strlen (hash
->root
.root
.root
.string
) + 1 + 8 + 1 + 2 + 1;
4554 stub_name
= (char *) bfd_malloc (len
);
4555 if (stub_name
!= NULL
)
4556 sprintf (stub_name
, "%08x_%s+%x_%d",
4557 input_section
->id
& 0xffffffff,
4558 hash
->root
.root
.root
.string
,
4559 (int) rel
->r_addend
& 0xffffffff,
4564 len
= 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1;
4565 stub_name
= (char *) bfd_malloc (len
);
4566 if (stub_name
!= NULL
)
4567 sprintf (stub_name
, "%08x_%x:%x+%x_%d",
4568 input_section
->id
& 0xffffffff,
4569 sym_sec
->id
& 0xffffffff,
4570 ELF32_R_TYPE (rel
->r_info
) == R_ARM_TLS_CALL
4571 || ELF32_R_TYPE (rel
->r_info
) == R_ARM_THM_TLS_CALL
4572 ? 0 : (int) ELF32_R_SYM (rel
->r_info
) & 0xffffffff,
4573 (int) rel
->r_addend
& 0xffffffff,
4580 /* Look up an entry in the stub hash. Stub entries are cached because
4581 creating the stub name takes a bit of time. */
4583 static struct elf32_arm_stub_hash_entry
*
4584 elf32_arm_get_stub_entry (const asection
*input_section
,
4585 const asection
*sym_sec
,
4586 struct elf_link_hash_entry
*hash
,
4587 const Elf_Internal_Rela
*rel
,
4588 struct elf32_arm_link_hash_table
*htab
,
4589 enum elf32_arm_stub_type stub_type
)
4591 struct elf32_arm_stub_hash_entry
*stub_entry
;
4592 struct elf32_arm_link_hash_entry
*h
= (struct elf32_arm_link_hash_entry
*) hash
;
4593 const asection
*id_sec
;
4595 if ((input_section
->flags
& SEC_CODE
) == 0)
4598 /* If the input section is the CMSE stubs one and it needs a long
4599 branch stub to reach it's final destination, give up with an
4600 error message: this is not supported. See PR ld/24709. */
4601 if (!strncmp (input_section
->name
, CMSE_STUB_NAME
, strlen(CMSE_STUB_NAME
)))
4603 bfd
*output_bfd
= htab
->obfd
;
4604 asection
*out_sec
= bfd_get_section_by_name (output_bfd
, CMSE_STUB_NAME
);
4606 _bfd_error_handler (_("ERROR: CMSE stub (%s section) too far "
4607 "(%#" PRIx64
") from destination (%#" PRIx64
")"),
4609 (uint64_t)out_sec
->output_section
->vma
4610 + out_sec
->output_offset
,
4611 (uint64_t)sym_sec
->output_section
->vma
4612 + sym_sec
->output_offset
4613 + h
->root
.root
.u
.def
.value
);
4614 /* Exit, rather than leave incompletely processed
4619 /* If this input section is part of a group of sections sharing one
4620 stub section, then use the id of the first section in the group.
4621 Stub names need to include a section id, as there may well be
4622 more than one stub used to reach say, printf, and we need to
4623 distinguish between them. */
4624 BFD_ASSERT (input_section
->id
<= htab
->top_id
);
4625 id_sec
= htab
->stub_group
[input_section
->id
].link_sec
;
4627 if (h
!= NULL
&& h
->stub_cache
!= NULL
4628 && h
->stub_cache
->h
== h
4629 && h
->stub_cache
->id_sec
== id_sec
4630 && h
->stub_cache
->stub_type
== stub_type
)
4632 stub_entry
= h
->stub_cache
;
4638 stub_name
= elf32_arm_stub_name (id_sec
, sym_sec
, h
, rel
, stub_type
);
4639 if (stub_name
== NULL
)
4642 stub_entry
= arm_stub_hash_lookup (&htab
->stub_hash_table
,
4643 stub_name
, FALSE
, FALSE
);
4645 h
->stub_cache
= stub_entry
;
4653 /* Whether veneers of type STUB_TYPE require to be in a dedicated output
4657 arm_dedicated_stub_output_section_required (enum elf32_arm_stub_type stub_type
)
4659 if (stub_type
>= max_stub_type
)
4660 abort (); /* Should be unreachable. */
4664 case arm_stub_cmse_branch_thumb_only
:
4671 abort (); /* Should be unreachable. */
4674 /* Required alignment (as a power of 2) for the dedicated section holding
4675 veneers of type STUB_TYPE, or 0 if veneers of this type are interspersed
4676 with input sections. */
4679 arm_dedicated_stub_output_section_required_alignment
4680 (enum elf32_arm_stub_type stub_type
)
4682 if (stub_type
>= max_stub_type
)
4683 abort (); /* Should be unreachable. */
4687 /* Vectors of Secure Gateway veneers must be aligned on 32byte
4689 case arm_stub_cmse_branch_thumb_only
:
4693 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type
));
4697 abort (); /* Should be unreachable. */
4700 /* Name of the dedicated output section to put veneers of type STUB_TYPE, or
4701 NULL if veneers of this type are interspersed with input sections. */
4704 arm_dedicated_stub_output_section_name (enum elf32_arm_stub_type stub_type
)
4706 if (stub_type
>= max_stub_type
)
4707 abort (); /* Should be unreachable. */
4711 case arm_stub_cmse_branch_thumb_only
:
4712 return CMSE_STUB_NAME
;
4715 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type
));
4719 abort (); /* Should be unreachable. */
4722 /* If veneers of type STUB_TYPE should go in a dedicated output section,
4723 returns the address of the hash table field in HTAB holding a pointer to the
4724 corresponding input section. Otherwise, returns NULL. */
4727 arm_dedicated_stub_input_section_ptr (struct elf32_arm_link_hash_table
*htab
,
4728 enum elf32_arm_stub_type stub_type
)
4730 if (stub_type
>= max_stub_type
)
4731 abort (); /* Should be unreachable. */
4735 case arm_stub_cmse_branch_thumb_only
:
4736 return &htab
->cmse_stub_sec
;
4739 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type
));
4743 abort (); /* Should be unreachable. */
4746 /* Find or create a stub section to contain a stub of type STUB_TYPE. SECTION
4747 is the section that branch into veneer and can be NULL if stub should go in
4748 a dedicated output section. Returns a pointer to the stub section, and the
4749 section to which the stub section will be attached (in *LINK_SEC_P).
4750 LINK_SEC_P may be NULL. */
4753 elf32_arm_create_or_find_stub_sec (asection
**link_sec_p
, asection
*section
,
4754 struct elf32_arm_link_hash_table
*htab
,
4755 enum elf32_arm_stub_type stub_type
)
4757 asection
*link_sec
, *out_sec
, **stub_sec_p
;
4758 const char *stub_sec_prefix
;
4759 bfd_boolean dedicated_output_section
=
4760 arm_dedicated_stub_output_section_required (stub_type
);
4763 if (dedicated_output_section
)
4765 bfd
*output_bfd
= htab
->obfd
;
4766 const char *out_sec_name
=
4767 arm_dedicated_stub_output_section_name (stub_type
);
4769 stub_sec_p
= arm_dedicated_stub_input_section_ptr (htab
, stub_type
);
4770 stub_sec_prefix
= out_sec_name
;
4771 align
= arm_dedicated_stub_output_section_required_alignment (stub_type
);
4772 out_sec
= bfd_get_section_by_name (output_bfd
, out_sec_name
);
4773 if (out_sec
== NULL
)
4775 _bfd_error_handler (_("no address assigned to the veneers output "
4776 "section %s"), out_sec_name
);
4782 BFD_ASSERT (section
->id
<= htab
->top_id
);
4783 link_sec
= htab
->stub_group
[section
->id
].link_sec
;
4784 BFD_ASSERT (link_sec
!= NULL
);
4785 stub_sec_p
= &htab
->stub_group
[section
->id
].stub_sec
;
4786 if (*stub_sec_p
== NULL
)
4787 stub_sec_p
= &htab
->stub_group
[link_sec
->id
].stub_sec
;
4788 stub_sec_prefix
= link_sec
->name
;
4789 out_sec
= link_sec
->output_section
;
4790 align
= htab
->nacl_p
? 4 : 3;
4793 if (*stub_sec_p
== NULL
)
4799 namelen
= strlen (stub_sec_prefix
);
4800 len
= namelen
+ sizeof (STUB_SUFFIX
);
4801 s_name
= (char *) bfd_alloc (htab
->stub_bfd
, len
);
4805 memcpy (s_name
, stub_sec_prefix
, namelen
);
4806 memcpy (s_name
+ namelen
, STUB_SUFFIX
, sizeof (STUB_SUFFIX
));
4807 *stub_sec_p
= (*htab
->add_stub_section
) (s_name
, out_sec
, link_sec
,
4809 if (*stub_sec_p
== NULL
)
4812 out_sec
->flags
|= SEC_ALLOC
| SEC_LOAD
| SEC_READONLY
| SEC_CODE
4813 | SEC_HAS_CONTENTS
| SEC_RELOC
| SEC_IN_MEMORY
4817 if (!dedicated_output_section
)
4818 htab
->stub_group
[section
->id
].stub_sec
= *stub_sec_p
;
4821 *link_sec_p
= link_sec
;
4826 /* Add a new stub entry to the stub hash. Not all fields of the new
4827 stub entry are initialised. */
4829 static struct elf32_arm_stub_hash_entry
*
4830 elf32_arm_add_stub (const char *stub_name
, asection
*section
,
4831 struct elf32_arm_link_hash_table
*htab
,
4832 enum elf32_arm_stub_type stub_type
)
4836 struct elf32_arm_stub_hash_entry
*stub_entry
;
4838 stub_sec
= elf32_arm_create_or_find_stub_sec (&link_sec
, section
, htab
,
4840 if (stub_sec
== NULL
)
4843 /* Enter this entry into the linker stub hash table. */
4844 stub_entry
= arm_stub_hash_lookup (&htab
->stub_hash_table
, stub_name
,
4846 if (stub_entry
== NULL
)
4848 if (section
== NULL
)
4850 _bfd_error_handler (_("%pB: cannot create stub entry %s"),
4851 section
->owner
, stub_name
);
4855 stub_entry
->stub_sec
= stub_sec
;
4856 stub_entry
->stub_offset
= (bfd_vma
) -1;
4857 stub_entry
->id_sec
= link_sec
;
4862 /* Store an Arm insn into an output section not processed by
4863 elf32_arm_write_section. */
4866 put_arm_insn (struct elf32_arm_link_hash_table
* htab
,
4867 bfd
* output_bfd
, bfd_vma val
, void * ptr
)
4869 if (htab
->byteswap_code
!= bfd_little_endian (output_bfd
))
4870 bfd_putl32 (val
, ptr
);
4872 bfd_putb32 (val
, ptr
);
4875 /* Store a 16-bit Thumb insn into an output section not processed by
4876 elf32_arm_write_section. */
4879 put_thumb_insn (struct elf32_arm_link_hash_table
* htab
,
4880 bfd
* output_bfd
, bfd_vma val
, void * ptr
)
4882 if (htab
->byteswap_code
!= bfd_little_endian (output_bfd
))
4883 bfd_putl16 (val
, ptr
);
4885 bfd_putb16 (val
, ptr
);
4888 /* Store a Thumb2 insn into an output section not processed by
4889 elf32_arm_write_section. */
4892 put_thumb2_insn (struct elf32_arm_link_hash_table
* htab
,
4893 bfd
* output_bfd
, bfd_vma val
, bfd_byte
* ptr
)
4895 /* T2 instructions are 16-bit streamed. */
4896 if (htab
->byteswap_code
!= bfd_little_endian (output_bfd
))
4898 bfd_putl16 ((val
>> 16) & 0xffff, ptr
);
4899 bfd_putl16 ((val
& 0xffff), ptr
+ 2);
4903 bfd_putb16 ((val
>> 16) & 0xffff, ptr
);
4904 bfd_putb16 ((val
& 0xffff), ptr
+ 2);
4908 /* If it's possible to change R_TYPE to a more efficient access
4909 model, return the new reloc type. */
4912 elf32_arm_tls_transition (struct bfd_link_info
*info
, int r_type
,
4913 struct elf_link_hash_entry
*h
)
4915 int is_local
= (h
== NULL
);
4917 if (bfd_link_dll (info
)
4918 || (h
&& h
->root
.type
== bfd_link_hash_undefweak
))
4921 /* We do not support relaxations for Old TLS models. */
4924 case R_ARM_TLS_GOTDESC
:
4925 case R_ARM_TLS_CALL
:
4926 case R_ARM_THM_TLS_CALL
:
4927 case R_ARM_TLS_DESCSEQ
:
4928 case R_ARM_THM_TLS_DESCSEQ
:
4929 return is_local
? R_ARM_TLS_LE32
: R_ARM_TLS_IE32
;
4935 static bfd_reloc_status_type elf32_arm_final_link_relocate
4936 (reloc_howto_type
*, bfd
*, bfd
*, asection
*, bfd_byte
*,
4937 Elf_Internal_Rela
*, bfd_vma
, struct bfd_link_info
*, asection
*,
4938 const char *, unsigned char, enum arm_st_branch_type
,
4939 struct elf_link_hash_entry
*, bfd_boolean
*, char **);
4942 arm_stub_required_alignment (enum elf32_arm_stub_type stub_type
)
4946 case arm_stub_a8_veneer_b_cond
:
4947 case arm_stub_a8_veneer_b
:
4948 case arm_stub_a8_veneer_bl
:
4951 case arm_stub_long_branch_any_any
:
4952 case arm_stub_long_branch_v4t_arm_thumb
:
4953 case arm_stub_long_branch_thumb_only
:
4954 case arm_stub_long_branch_thumb2_only
:
4955 case arm_stub_long_branch_thumb2_only_pure
:
4956 case arm_stub_long_branch_v4t_thumb_thumb
:
4957 case arm_stub_long_branch_v4t_thumb_arm
:
4958 case arm_stub_short_branch_v4t_thumb_arm
:
4959 case arm_stub_long_branch_any_arm_pic
:
4960 case arm_stub_long_branch_any_thumb_pic
:
4961 case arm_stub_long_branch_v4t_thumb_thumb_pic
:
4962 case arm_stub_long_branch_v4t_arm_thumb_pic
:
4963 case arm_stub_long_branch_v4t_thumb_arm_pic
:
4964 case arm_stub_long_branch_thumb_only_pic
:
4965 case arm_stub_long_branch_any_tls_pic
:
4966 case arm_stub_long_branch_v4t_thumb_tls_pic
:
4967 case arm_stub_cmse_branch_thumb_only
:
4968 case arm_stub_a8_veneer_blx
:
4971 case arm_stub_long_branch_arm_nacl
:
4972 case arm_stub_long_branch_arm_nacl_pic
:
4976 abort (); /* Should be unreachable. */
4980 /* Returns whether stubs of type STUB_TYPE take over the symbol they are
4981 veneering (TRUE) or have their own symbol (FALSE). */
4984 arm_stub_sym_claimed (enum elf32_arm_stub_type stub_type
)
4986 if (stub_type
>= max_stub_type
)
4987 abort (); /* Should be unreachable. */
4991 case arm_stub_cmse_branch_thumb_only
:
4998 abort (); /* Should be unreachable. */
5001 /* Returns the padding needed for the dedicated section used stubs of type
5005 arm_dedicated_stub_section_padding (enum elf32_arm_stub_type stub_type
)
5007 if (stub_type
>= max_stub_type
)
5008 abort (); /* Should be unreachable. */
5012 case arm_stub_cmse_branch_thumb_only
:
5019 abort (); /* Should be unreachable. */
5022 /* If veneers of type STUB_TYPE should go in a dedicated output section,
5023 returns the address of the hash table field in HTAB holding the offset at
5024 which new veneers should be layed out in the stub section. */
5027 arm_new_stubs_start_offset_ptr (struct elf32_arm_link_hash_table
*htab
,
5028 enum elf32_arm_stub_type stub_type
)
5032 case arm_stub_cmse_branch_thumb_only
:
5033 return &htab
->new_cmse_stub_offset
;
5036 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type
));
5042 arm_build_one_stub (struct bfd_hash_entry
*gen_entry
,
5046 bfd_boolean removed_sg_veneer
;
5047 struct elf32_arm_stub_hash_entry
*stub_entry
;
5048 struct elf32_arm_link_hash_table
*globals
;
5049 struct bfd_link_info
*info
;
5056 const insn_sequence
*template_sequence
;
5058 int stub_reloc_idx
[MAXRELOCS
] = {-1, -1};
5059 int stub_reloc_offset
[MAXRELOCS
] = {0, 0};
5061 int just_allocated
= 0;
5063 /* Massage our args to the form they really have. */
5064 stub_entry
= (struct elf32_arm_stub_hash_entry
*) gen_entry
;
5065 info
= (struct bfd_link_info
*) in_arg
;
5067 globals
= elf32_arm_hash_table (info
);
5068 if (globals
== NULL
)
5071 stub_sec
= stub_entry
->stub_sec
;
5073 if ((globals
->fix_cortex_a8
< 0)
5074 != (arm_stub_required_alignment (stub_entry
->stub_type
) == 2))
5075 /* We have to do less-strictly-aligned fixes last. */
5078 /* Assign a slot at the end of section if none assigned yet. */
5079 if (stub_entry
->stub_offset
== (bfd_vma
) -1)
5081 stub_entry
->stub_offset
= stub_sec
->size
;
5084 loc
= stub_sec
->contents
+ stub_entry
->stub_offset
;
5086 stub_bfd
= stub_sec
->owner
;
5088 /* This is the address of the stub destination. */
5089 sym_value
= (stub_entry
->target_value
5090 + stub_entry
->target_section
->output_offset
5091 + stub_entry
->target_section
->output_section
->vma
);
5093 template_sequence
= stub_entry
->stub_template
;
5094 template_size
= stub_entry
->stub_template_size
;
5097 for (i
= 0; i
< template_size
; i
++)
5099 switch (template_sequence
[i
].type
)
5103 bfd_vma data
= (bfd_vma
) template_sequence
[i
].data
;
5104 if (template_sequence
[i
].reloc_addend
!= 0)
5106 /* We've borrowed the reloc_addend field to mean we should
5107 insert a condition code into this (Thumb-1 branch)
5108 instruction. See THUMB16_BCOND_INSN. */
5109 BFD_ASSERT ((data
& 0xff00) == 0xd000);
5110 data
|= ((stub_entry
->orig_insn
>> 22) & 0xf) << 8;
5112 bfd_put_16 (stub_bfd
, data
, loc
+ size
);
5118 bfd_put_16 (stub_bfd
,
5119 (template_sequence
[i
].data
>> 16) & 0xffff,
5121 bfd_put_16 (stub_bfd
, template_sequence
[i
].data
& 0xffff,
5123 if (template_sequence
[i
].r_type
!= R_ARM_NONE
)
5125 stub_reloc_idx
[nrelocs
] = i
;
5126 stub_reloc_offset
[nrelocs
++] = size
;
5132 bfd_put_32 (stub_bfd
, template_sequence
[i
].data
,
5134 /* Handle cases where the target is encoded within the
5136 if (template_sequence
[i
].r_type
== R_ARM_JUMP24
)
5138 stub_reloc_idx
[nrelocs
] = i
;
5139 stub_reloc_offset
[nrelocs
++] = size
;
5145 bfd_put_32 (stub_bfd
, template_sequence
[i
].data
, loc
+ size
);
5146 stub_reloc_idx
[nrelocs
] = i
;
5147 stub_reloc_offset
[nrelocs
++] = size
;
5158 stub_sec
->size
+= size
;
5160 /* Stub size has already been computed in arm_size_one_stub. Check
5162 BFD_ASSERT (size
== stub_entry
->stub_size
);
5164 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
5165 if (stub_entry
->branch_type
== ST_BRANCH_TO_THUMB
)
5168 /* Assume non empty slots have at least one and at most MAXRELOCS entries
5169 to relocate in each stub. */
5171 (size
== 0 && stub_entry
->stub_type
== arm_stub_cmse_branch_thumb_only
);
5172 BFD_ASSERT (removed_sg_veneer
|| (nrelocs
!= 0 && nrelocs
<= MAXRELOCS
));
5174 for (i
= 0; i
< nrelocs
; i
++)
5176 Elf_Internal_Rela rel
;
5177 bfd_boolean unresolved_reloc
;
5178 char *error_message
;
5180 sym_value
+ template_sequence
[stub_reloc_idx
[i
]].reloc_addend
;
5182 rel
.r_offset
= stub_entry
->stub_offset
+ stub_reloc_offset
[i
];
5183 rel
.r_info
= ELF32_R_INFO (0,
5184 template_sequence
[stub_reloc_idx
[i
]].r_type
);
5187 if (stub_entry
->stub_type
== arm_stub_a8_veneer_b_cond
&& i
== 0)
5188 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
5189 template should refer back to the instruction after the original
5190 branch. We use target_section as Cortex-A8 erratum workaround stubs
5191 are only generated when both source and target are in the same
5193 points_to
= stub_entry
->target_section
->output_section
->vma
5194 + stub_entry
->target_section
->output_offset
5195 + stub_entry
->source_value
;
5197 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
5198 (template_sequence
[stub_reloc_idx
[i
]].r_type
),
5199 stub_bfd
, info
->output_bfd
, stub_sec
, stub_sec
->contents
, &rel
,
5200 points_to
, info
, stub_entry
->target_section
, "", STT_FUNC
,
5201 stub_entry
->branch_type
,
5202 (struct elf_link_hash_entry
*) stub_entry
->h
, &unresolved_reloc
,
5210 /* Calculate the template, template size and instruction size for a stub.
5211 Return value is the instruction size. */
5214 find_stub_size_and_template (enum elf32_arm_stub_type stub_type
,
5215 const insn_sequence
**stub_template
,
5216 int *stub_template_size
)
5218 const insn_sequence
*template_sequence
= NULL
;
5219 int template_size
= 0, i
;
5222 template_sequence
= stub_definitions
[stub_type
].template_sequence
;
5224 *stub_template
= template_sequence
;
5226 template_size
= stub_definitions
[stub_type
].template_size
;
5227 if (stub_template_size
)
5228 *stub_template_size
= template_size
;
5231 for (i
= 0; i
< template_size
; i
++)
5233 switch (template_sequence
[i
].type
)
5254 /* As above, but don't actually build the stub. Just bump offset so
5255 we know stub section sizes. */
5258 arm_size_one_stub (struct bfd_hash_entry
*gen_entry
,
5259 void *in_arg ATTRIBUTE_UNUSED
)
5261 struct elf32_arm_stub_hash_entry
*stub_entry
;
5262 const insn_sequence
*template_sequence
;
5263 int template_size
, size
;
5265 /* Massage our args to the form they really have. */
5266 stub_entry
= (struct elf32_arm_stub_hash_entry
*) gen_entry
;
5268 BFD_ASSERT((stub_entry
->stub_type
> arm_stub_none
)
5269 && stub_entry
->stub_type
< ARRAY_SIZE(stub_definitions
));
5271 size
= find_stub_size_and_template (stub_entry
->stub_type
, &template_sequence
,
5274 /* Initialized to -1. Null size indicates an empty slot full of zeros. */
5275 if (stub_entry
->stub_template_size
)
5277 stub_entry
->stub_size
= size
;
5278 stub_entry
->stub_template
= template_sequence
;
5279 stub_entry
->stub_template_size
= template_size
;
5282 /* Already accounted for. */
5283 if (stub_entry
->stub_offset
!= (bfd_vma
) -1)
5286 size
= (size
+ 7) & ~7;
5287 stub_entry
->stub_sec
->size
+= size
;
5292 /* External entry points for sizing and building linker stubs. */
5294 /* Set up various things so that we can make a list of input sections
5295 for each output section included in the link. Returns -1 on error,
5296 0 when no stubs will be needed, and 1 on success. */
5299 elf32_arm_setup_section_lists (bfd
*output_bfd
,
5300 struct bfd_link_info
*info
)
5303 unsigned int bfd_count
;
5304 unsigned int top_id
, top_index
;
5306 asection
**input_list
, **list
;
5308 struct elf32_arm_link_hash_table
*htab
= elf32_arm_hash_table (info
);
5312 if (! is_elf_hash_table (htab
))
5315 /* Count the number of input BFDs and find the top input section id. */
5316 for (input_bfd
= info
->input_bfds
, bfd_count
= 0, top_id
= 0;
5318 input_bfd
= input_bfd
->link
.next
)
5321 for (section
= input_bfd
->sections
;
5323 section
= section
->next
)
5325 if (top_id
< section
->id
)
5326 top_id
= section
->id
;
5329 htab
->bfd_count
= bfd_count
;
5331 amt
= sizeof (struct map_stub
) * (top_id
+ 1);
5332 htab
->stub_group
= (struct map_stub
*) bfd_zmalloc (amt
);
5333 if (htab
->stub_group
== NULL
)
5335 htab
->top_id
= top_id
;
5337 /* We can't use output_bfd->section_count here to find the top output
5338 section index as some sections may have been removed, and
5339 _bfd_strip_section_from_output doesn't renumber the indices. */
5340 for (section
= output_bfd
->sections
, top_index
= 0;
5342 section
= section
->next
)
5344 if (top_index
< section
->index
)
5345 top_index
= section
->index
;
5348 htab
->top_index
= top_index
;
5349 amt
= sizeof (asection
*) * (top_index
+ 1);
5350 input_list
= (asection
**) bfd_malloc (amt
);
5351 htab
->input_list
= input_list
;
5352 if (input_list
== NULL
)
5355 /* For sections we aren't interested in, mark their entries with a
5356 value we can check later. */
5357 list
= input_list
+ top_index
;
5359 *list
= bfd_abs_section_ptr
;
5360 while (list
-- != input_list
);
5362 for (section
= output_bfd
->sections
;
5364 section
= section
->next
)
5366 if ((section
->flags
& SEC_CODE
) != 0)
5367 input_list
[section
->index
] = NULL
;
5373 /* The linker repeatedly calls this function for each input section,
5374 in the order that input sections are linked into output sections.
5375 Build lists of input sections to determine groupings between which
5376 we may insert linker stubs. */
5379 elf32_arm_next_input_section (struct bfd_link_info
*info
,
5382 struct elf32_arm_link_hash_table
*htab
= elf32_arm_hash_table (info
);
5387 if (isec
->output_section
->index
<= htab
->top_index
)
5389 asection
**list
= htab
->input_list
+ isec
->output_section
->index
;
5391 if (*list
!= bfd_abs_section_ptr
&& (isec
->flags
& SEC_CODE
) != 0)
5393 /* Steal the link_sec pointer for our list. */
5394 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
5395 /* This happens to make the list in reverse order,
5396 which we reverse later. */
5397 PREV_SEC (isec
) = *list
;
5403 /* See whether we can group stub sections together. Grouping stub
5404 sections may result in fewer stubs. More importantly, we need to
5405 put all .init* and .fini* stubs at the end of the .init or
5406 .fini output sections respectively, because glibc splits the
5407 _init and _fini functions into multiple parts. Putting a stub in
5408 the middle of a function is not a good idea. */
5411 group_sections (struct elf32_arm_link_hash_table
*htab
,
5412 bfd_size_type stub_group_size
,
5413 bfd_boolean stubs_always_after_branch
)
5415 asection
**list
= htab
->input_list
;
5419 asection
*tail
= *list
;
5422 if (tail
== bfd_abs_section_ptr
)
5425 /* Reverse the list: we must avoid placing stubs at the
5426 beginning of the section because the beginning of the text
5427 section may be required for an interrupt vector in bare metal
5429 #define NEXT_SEC PREV_SEC
5431 while (tail
!= NULL
)
5433 /* Pop from tail. */
5434 asection
*item
= tail
;
5435 tail
= PREV_SEC (item
);
5438 NEXT_SEC (item
) = head
;
5442 while (head
!= NULL
)
5446 bfd_vma stub_group_start
= head
->output_offset
;
5447 bfd_vma end_of_next
;
5450 while (NEXT_SEC (curr
) != NULL
)
5452 next
= NEXT_SEC (curr
);
5453 end_of_next
= next
->output_offset
+ next
->size
;
5454 if (end_of_next
- stub_group_start
>= stub_group_size
)
5455 /* End of NEXT is too far from start, so stop. */
5457 /* Add NEXT to the group. */
5461 /* OK, the size from the start to the start of CURR is less
5462 than stub_group_size and thus can be handled by one stub
5463 section. (Or the head section is itself larger than
5464 stub_group_size, in which case we may be toast.)
5465 We should really be keeping track of the total size of
5466 stubs added here, as stubs contribute to the final output
5470 next
= NEXT_SEC (head
);
5471 /* Set up this stub group. */
5472 htab
->stub_group
[head
->id
].link_sec
= curr
;
5474 while (head
!= curr
&& (head
= next
) != NULL
);
5476 /* But wait, there's more! Input sections up to stub_group_size
5477 bytes after the stub section can be handled by it too. */
5478 if (!stubs_always_after_branch
)
5480 stub_group_start
= curr
->output_offset
+ curr
->size
;
5482 while (next
!= NULL
)
5484 end_of_next
= next
->output_offset
+ next
->size
;
5485 if (end_of_next
- stub_group_start
>= stub_group_size
)
5486 /* End of NEXT is too far from stubs, so stop. */
5488 /* Add NEXT to the stub group. */
5490 next
= NEXT_SEC (head
);
5491 htab
->stub_group
[head
->id
].link_sec
= curr
;
5497 while (list
++ != htab
->input_list
+ htab
->top_index
);
5499 free (htab
->input_list
);
5504 /* Comparison function for sorting/searching relocations relating to Cortex-A8
5508 a8_reloc_compare (const void *a
, const void *b
)
5510 const struct a8_erratum_reloc
*ra
= (const struct a8_erratum_reloc
*) a
;
5511 const struct a8_erratum_reloc
*rb
= (const struct a8_erratum_reloc
*) b
;
5513 if (ra
->from
< rb
->from
)
5515 else if (ra
->from
> rb
->from
)
5521 static struct elf_link_hash_entry
*find_thumb_glue (struct bfd_link_info
*,
5522 const char *, char **);
5524 /* Helper function to scan code for sequences which might trigger the Cortex-A8
5525 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
5526 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
5530 cortex_a8_erratum_scan (bfd
*input_bfd
,
5531 struct bfd_link_info
*info
,
5532 struct a8_erratum_fix
**a8_fixes_p
,
5533 unsigned int *num_a8_fixes_p
,
5534 unsigned int *a8_fix_table_size_p
,
5535 struct a8_erratum_reloc
*a8_relocs
,
5536 unsigned int num_a8_relocs
,
5537 unsigned prev_num_a8_fixes
,
5538 bfd_boolean
*stub_changed_p
)
5541 struct elf32_arm_link_hash_table
*htab
= elf32_arm_hash_table (info
);
5542 struct a8_erratum_fix
*a8_fixes
= *a8_fixes_p
;
5543 unsigned int num_a8_fixes
= *num_a8_fixes_p
;
5544 unsigned int a8_fix_table_size
= *a8_fix_table_size_p
;
5549 for (section
= input_bfd
->sections
;
5551 section
= section
->next
)
5553 bfd_byte
*contents
= NULL
;
5554 struct _arm_elf_section_data
*sec_data
;
5558 if (elf_section_type (section
) != SHT_PROGBITS
5559 || (elf_section_flags (section
) & SHF_EXECINSTR
) == 0
5560 || (section
->flags
& SEC_EXCLUDE
) != 0
5561 || (section
->sec_info_type
== SEC_INFO_TYPE_JUST_SYMS
)
5562 || (section
->output_section
== bfd_abs_section_ptr
))
5565 base_vma
= section
->output_section
->vma
+ section
->output_offset
;
5567 if (elf_section_data (section
)->this_hdr
.contents
!= NULL
)
5568 contents
= elf_section_data (section
)->this_hdr
.contents
;
5569 else if (! bfd_malloc_and_get_section (input_bfd
, section
, &contents
))
5572 sec_data
= elf32_arm_section_data (section
);
5574 for (span
= 0; span
< sec_data
->mapcount
; span
++)
5576 unsigned int span_start
= sec_data
->map
[span
].vma
;
5577 unsigned int span_end
= (span
== sec_data
->mapcount
- 1)
5578 ? section
->size
: sec_data
->map
[span
+ 1].vma
;
5580 char span_type
= sec_data
->map
[span
].type
;
5581 bfd_boolean last_was_32bit
= FALSE
, last_was_branch
= FALSE
;
5583 if (span_type
!= 't')
5586 /* Span is entirely within a single 4KB region: skip scanning. */
5587 if (((base_vma
+ span_start
) & ~0xfff)
5588 == ((base_vma
+ span_end
) & ~0xfff))
5591 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
5593 * The opcode is BLX.W, BL.W, B.W, Bcc.W
5594 * The branch target is in the same 4KB region as the
5595 first half of the branch.
5596 * The instruction before the branch is a 32-bit
5597 length non-branch instruction. */
5598 for (i
= span_start
; i
< span_end
;)
5600 unsigned int insn
= bfd_getl16 (&contents
[i
]);
5601 bfd_boolean insn_32bit
= FALSE
, is_blx
= FALSE
, is_b
= FALSE
;
5602 bfd_boolean is_bl
= FALSE
, is_bcc
= FALSE
, is_32bit_branch
;
5604 if ((insn
& 0xe000) == 0xe000 && (insn
& 0x1800) != 0x0000)
5609 /* Load the rest of the insn (in manual-friendly order). */
5610 insn
= (insn
<< 16) | bfd_getl16 (&contents
[i
+ 2]);
5612 /* Encoding T4: B<c>.W. */
5613 is_b
= (insn
& 0xf800d000) == 0xf0009000;
5614 /* Encoding T1: BL<c>.W. */
5615 is_bl
= (insn
& 0xf800d000) == 0xf000d000;
5616 /* Encoding T2: BLX<c>.W. */
5617 is_blx
= (insn
& 0xf800d000) == 0xf000c000;
5618 /* Encoding T3: B<c>.W (not permitted in IT block). */
5619 is_bcc
= (insn
& 0xf800d000) == 0xf0008000
5620 && (insn
& 0x07f00000) != 0x03800000;
5623 is_32bit_branch
= is_b
|| is_bl
|| is_blx
|| is_bcc
;
5625 if (((base_vma
+ i
) & 0xfff) == 0xffe
5629 && ! last_was_branch
)
5631 bfd_signed_vma offset
= 0;
5632 bfd_boolean force_target_arm
= FALSE
;
5633 bfd_boolean force_target_thumb
= FALSE
;
5635 enum elf32_arm_stub_type stub_type
= arm_stub_none
;
5636 struct a8_erratum_reloc key
, *found
;
5637 bfd_boolean use_plt
= FALSE
;
5639 key
.from
= base_vma
+ i
;
5640 found
= (struct a8_erratum_reloc
*)
5641 bsearch (&key
, a8_relocs
, num_a8_relocs
,
5642 sizeof (struct a8_erratum_reloc
),
5647 char *error_message
= NULL
;
5648 struct elf_link_hash_entry
*entry
;
5650 /* We don't care about the error returned from this
5651 function, only if there is glue or not. */
5652 entry
= find_thumb_glue (info
, found
->sym_name
,
5656 found
->non_a8_stub
= TRUE
;
5658 /* Keep a simpler condition, for the sake of clarity. */
5659 if (htab
->root
.splt
!= NULL
&& found
->hash
!= NULL
5660 && found
->hash
->root
.plt
.offset
!= (bfd_vma
) -1)
5663 if (found
->r_type
== R_ARM_THM_CALL
)
5665 if (found
->branch_type
== ST_BRANCH_TO_ARM
5667 force_target_arm
= TRUE
;
5669 force_target_thumb
= TRUE
;
5673 /* Check if we have an offending branch instruction. */
5675 if (found
&& found
->non_a8_stub
)
5676 /* We've already made a stub for this instruction, e.g.
5677 it's a long branch or a Thumb->ARM stub. Assume that
5678 stub will suffice to work around the A8 erratum (see
5679 setting of always_after_branch above). */
5683 offset
= (insn
& 0x7ff) << 1;
5684 offset
|= (insn
& 0x3f0000) >> 4;
5685 offset
|= (insn
& 0x2000) ? 0x40000 : 0;
5686 offset
|= (insn
& 0x800) ? 0x80000 : 0;
5687 offset
|= (insn
& 0x4000000) ? 0x100000 : 0;
5688 if (offset
& 0x100000)
5689 offset
|= ~ ((bfd_signed_vma
) 0xfffff);
5690 stub_type
= arm_stub_a8_veneer_b_cond
;
5692 else if (is_b
|| is_bl
|| is_blx
)
5694 int s
= (insn
& 0x4000000) != 0;
5695 int j1
= (insn
& 0x2000) != 0;
5696 int j2
= (insn
& 0x800) != 0;
5700 offset
= (insn
& 0x7ff) << 1;
5701 offset
|= (insn
& 0x3ff0000) >> 4;
5705 if (offset
& 0x1000000)
5706 offset
|= ~ ((bfd_signed_vma
) 0xffffff);
5709 offset
&= ~ ((bfd_signed_vma
) 3);
5711 stub_type
= is_blx
? arm_stub_a8_veneer_blx
:
5712 is_bl
? arm_stub_a8_veneer_bl
: arm_stub_a8_veneer_b
;
5715 if (stub_type
!= arm_stub_none
)
5717 bfd_vma pc_for_insn
= base_vma
+ i
+ 4;
5719 /* The original instruction is a BL, but the target is
5720 an ARM instruction. If we were not making a stub,
5721 the BL would have been converted to a BLX. Use the
5722 BLX stub instead in that case. */
5723 if (htab
->use_blx
&& force_target_arm
5724 && stub_type
== arm_stub_a8_veneer_bl
)
5726 stub_type
= arm_stub_a8_veneer_blx
;
5730 /* Conversely, if the original instruction was
5731 BLX but the target is Thumb mode, use the BL
5733 else if (force_target_thumb
5734 && stub_type
== arm_stub_a8_veneer_blx
)
5736 stub_type
= arm_stub_a8_veneer_bl
;
5742 pc_for_insn
&= ~ ((bfd_vma
) 3);
5744 /* If we found a relocation, use the proper destination,
5745 not the offset in the (unrelocated) instruction.
5746 Note this is always done if we switched the stub type
5750 (bfd_signed_vma
) (found
->destination
- pc_for_insn
);
5752 /* If the stub will use a Thumb-mode branch to a
5753 PLT target, redirect it to the preceding Thumb
5755 if (stub_type
!= arm_stub_a8_veneer_blx
&& use_plt
)
5756 offset
-= PLT_THUMB_STUB_SIZE
;
5758 target
= pc_for_insn
+ offset
;
5760 /* The BLX stub is ARM-mode code. Adjust the offset to
5761 take the different PC value (+8 instead of +4) into
5763 if (stub_type
== arm_stub_a8_veneer_blx
)
5766 if (((base_vma
+ i
) & ~0xfff) == (target
& ~0xfff))
5768 char *stub_name
= NULL
;
5770 if (num_a8_fixes
== a8_fix_table_size
)
5772 a8_fix_table_size
*= 2;
5773 a8_fixes
= (struct a8_erratum_fix
*)
5774 bfd_realloc (a8_fixes
,
5775 sizeof (struct a8_erratum_fix
)
5776 * a8_fix_table_size
);
5779 if (num_a8_fixes
< prev_num_a8_fixes
)
5781 /* If we're doing a subsequent scan,
5782 check if we've found the same fix as
5783 before, and try and reuse the stub
5785 stub_name
= a8_fixes
[num_a8_fixes
].stub_name
;
5786 if ((a8_fixes
[num_a8_fixes
].section
!= section
)
5787 || (a8_fixes
[num_a8_fixes
].offset
!= i
))
5791 *stub_changed_p
= TRUE
;
5797 stub_name
= (char *) bfd_malloc (8 + 1 + 8 + 1);
5798 if (stub_name
!= NULL
)
5799 sprintf (stub_name
, "%x:%x", section
->id
, i
);
5802 a8_fixes
[num_a8_fixes
].input_bfd
= input_bfd
;
5803 a8_fixes
[num_a8_fixes
].section
= section
;
5804 a8_fixes
[num_a8_fixes
].offset
= i
;
5805 a8_fixes
[num_a8_fixes
].target_offset
=
5807 a8_fixes
[num_a8_fixes
].orig_insn
= insn
;
5808 a8_fixes
[num_a8_fixes
].stub_name
= stub_name
;
5809 a8_fixes
[num_a8_fixes
].stub_type
= stub_type
;
5810 a8_fixes
[num_a8_fixes
].branch_type
=
5811 is_blx
? ST_BRANCH_TO_ARM
: ST_BRANCH_TO_THUMB
;
5818 i
+= insn_32bit
? 4 : 2;
5819 last_was_32bit
= insn_32bit
;
5820 last_was_branch
= is_32bit_branch
;
5824 if (elf_section_data (section
)->this_hdr
.contents
== NULL
)
5828 *a8_fixes_p
= a8_fixes
;
5829 *num_a8_fixes_p
= num_a8_fixes
;
5830 *a8_fix_table_size_p
= a8_fix_table_size
;
5835 /* Create or update a stub entry depending on whether the stub can already be
5836 found in HTAB. The stub is identified by:
5837 - its type STUB_TYPE
5838 - its source branch (note that several can share the same stub) whose
5839 section and relocation (if any) are given by SECTION and IRELA
5841 - its target symbol whose input section, hash, name, value and branch type
5842 are given in SYM_SEC, HASH, SYM_NAME, SYM_VALUE and BRANCH_TYPE
5845 If found, the value of the stub's target symbol is updated from SYM_VALUE
5846 and *NEW_STUB is set to FALSE. Otherwise, *NEW_STUB is set to
5847 TRUE and the stub entry is initialized.
5849 Returns the stub that was created or updated, or NULL if an error
5852 static struct elf32_arm_stub_hash_entry
*
5853 elf32_arm_create_stub (struct elf32_arm_link_hash_table
*htab
,
5854 enum elf32_arm_stub_type stub_type
, asection
*section
,
5855 Elf_Internal_Rela
*irela
, asection
*sym_sec
,
5856 struct elf32_arm_link_hash_entry
*hash
, char *sym_name
,
5857 bfd_vma sym_value
, enum arm_st_branch_type branch_type
,
5858 bfd_boolean
*new_stub
)
5860 const asection
*id_sec
;
5862 struct elf32_arm_stub_hash_entry
*stub_entry
;
5863 unsigned int r_type
;
5864 bfd_boolean sym_claimed
= arm_stub_sym_claimed (stub_type
);
5866 BFD_ASSERT (stub_type
!= arm_stub_none
);
5870 stub_name
= sym_name
;
5874 BFD_ASSERT (section
);
5875 BFD_ASSERT (section
->id
<= htab
->top_id
);
5877 /* Support for grouping stub sections. */
5878 id_sec
= htab
->stub_group
[section
->id
].link_sec
;
5880 /* Get the name of this stub. */
5881 stub_name
= elf32_arm_stub_name (id_sec
, sym_sec
, hash
, irela
,
5887 stub_entry
= arm_stub_hash_lookup (&htab
->stub_hash_table
, stub_name
, FALSE
,
5889 /* The proper stub has already been created, just update its value. */
5890 if (stub_entry
!= NULL
)
5894 stub_entry
->target_value
= sym_value
;
5898 stub_entry
= elf32_arm_add_stub (stub_name
, section
, htab
, stub_type
);
5899 if (stub_entry
== NULL
)
5906 stub_entry
->target_value
= sym_value
;
5907 stub_entry
->target_section
= sym_sec
;
5908 stub_entry
->stub_type
= stub_type
;
5909 stub_entry
->h
= hash
;
5910 stub_entry
->branch_type
= branch_type
;
5913 stub_entry
->output_name
= sym_name
;
5916 if (sym_name
== NULL
)
5917 sym_name
= "unnamed";
5918 stub_entry
->output_name
= (char *)
5919 bfd_alloc (htab
->stub_bfd
, sizeof (THUMB2ARM_GLUE_ENTRY_NAME
)
5920 + strlen (sym_name
));
5921 if (stub_entry
->output_name
== NULL
)
5927 /* For historical reasons, use the existing names for ARM-to-Thumb and
5928 Thumb-to-ARM stubs. */
5929 r_type
= ELF32_R_TYPE (irela
->r_info
);
5930 if ((r_type
== (unsigned int) R_ARM_THM_CALL
5931 || r_type
== (unsigned int) R_ARM_THM_JUMP24
5932 || r_type
== (unsigned int) R_ARM_THM_JUMP19
)
5933 && branch_type
== ST_BRANCH_TO_ARM
)
5934 sprintf (stub_entry
->output_name
, THUMB2ARM_GLUE_ENTRY_NAME
, sym_name
);
5935 else if ((r_type
== (unsigned int) R_ARM_CALL
5936 || r_type
== (unsigned int) R_ARM_JUMP24
)
5937 && branch_type
== ST_BRANCH_TO_THUMB
)
5938 sprintf (stub_entry
->output_name
, ARM2THUMB_GLUE_ENTRY_NAME
, sym_name
);
5940 sprintf (stub_entry
->output_name
, STUB_ENTRY_NAME
, sym_name
);
5947 /* Scan symbols in INPUT_BFD to identify secure entry functions needing a
5948 gateway veneer to transition from non secure to secure state and create them
5951 "ARMv8-M Security Extensions: Requirements on Development Tools" document
5952 defines the conditions that govern Secure Gateway veneer creation for a
5953 given symbol <SYM> as follows:
5954 - it has function type
5955 - it has non local binding
5956 - a symbol named __acle_se_<SYM> (called special symbol) exists with the
5957 same type, binding and value as <SYM> (called normal symbol).
5958 An entry function can handle secure state transition itself in which case
5959 its special symbol would have a different value from the normal symbol.
5961 OUT_ATTR gives the output attributes, SYM_HASHES the symbol index to hash
5962 entry mapping while HTAB gives the name to hash entry mapping.
5963 *CMSE_STUB_CREATED is increased by the number of secure gateway veneer
5966 The return value gives whether a stub failed to be allocated. */
5969 cmse_scan (bfd
*input_bfd
, struct elf32_arm_link_hash_table
*htab
,
5970 obj_attribute
*out_attr
, struct elf_link_hash_entry
**sym_hashes
,
5971 int *cmse_stub_created
)
5973 const struct elf_backend_data
*bed
;
5974 Elf_Internal_Shdr
*symtab_hdr
;
5975 unsigned i
, j
, sym_count
, ext_start
;
5976 Elf_Internal_Sym
*cmse_sym
, *local_syms
;
5977 struct elf32_arm_link_hash_entry
*hash
, *cmse_hash
= NULL
;
5978 enum arm_st_branch_type branch_type
;
5979 char *sym_name
, *lsym_name
;
5982 struct elf32_arm_stub_hash_entry
*stub_entry
;
5983 bfd_boolean is_v8m
, new_stub
, cmse_invalid
, ret
= TRUE
;
5985 bed
= get_elf_backend_data (input_bfd
);
5986 symtab_hdr
= &elf_tdata (input_bfd
)->symtab_hdr
;
5987 sym_count
= symtab_hdr
->sh_size
/ bed
->s
->sizeof_sym
;
5988 ext_start
= symtab_hdr
->sh_info
;
5989 is_v8m
= (out_attr
[Tag_CPU_arch
].i
>= TAG_CPU_ARCH_V8M_BASE
5990 && out_attr
[Tag_CPU_arch_profile
].i
== 'M');
5992 local_syms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
5993 if (local_syms
== NULL
)
5994 local_syms
= bfd_elf_get_elf_syms (input_bfd
, symtab_hdr
,
5995 symtab_hdr
->sh_info
, 0, NULL
, NULL
,
5997 if (symtab_hdr
->sh_info
&& local_syms
== NULL
)
6001 for (i
= 0; i
< sym_count
; i
++)
6003 cmse_invalid
= FALSE
;
6007 cmse_sym
= &local_syms
[i
];
6008 sym_name
= bfd_elf_string_from_elf_section (input_bfd
,
6009 symtab_hdr
->sh_link
,
6011 if (!sym_name
|| !CONST_STRNEQ (sym_name
, CMSE_PREFIX
))
6014 /* Special symbol with local binding. */
6015 cmse_invalid
= TRUE
;
6019 cmse_hash
= elf32_arm_hash_entry (sym_hashes
[i
- ext_start
]);
6020 sym_name
= (char *) cmse_hash
->root
.root
.root
.string
;
6021 if (!CONST_STRNEQ (sym_name
, CMSE_PREFIX
))
6024 /* Special symbol has incorrect binding or type. */
6025 if ((cmse_hash
->root
.root
.type
!= bfd_link_hash_defined
6026 && cmse_hash
->root
.root
.type
!= bfd_link_hash_defweak
)
6027 || cmse_hash
->root
.type
!= STT_FUNC
)
6028 cmse_invalid
= TRUE
;
6033 _bfd_error_handler (_("%pB: special symbol `%s' only allowed for "
6034 "ARMv8-M architecture or later"),
6035 input_bfd
, sym_name
);
6036 is_v8m
= TRUE
; /* Avoid multiple warning. */
6042 _bfd_error_handler (_("%pB: invalid special symbol `%s'; it must be"
6043 " a global or weak function symbol"),
6044 input_bfd
, sym_name
);
6050 sym_name
+= strlen (CMSE_PREFIX
);
6051 hash
= (struct elf32_arm_link_hash_entry
*)
6052 elf_link_hash_lookup (&(htab
)->root
, sym_name
, FALSE
, FALSE
, TRUE
);
6054 /* No associated normal symbol or it is neither global nor weak. */
6056 || (hash
->root
.root
.type
!= bfd_link_hash_defined
6057 && hash
->root
.root
.type
!= bfd_link_hash_defweak
)
6058 || hash
->root
.type
!= STT_FUNC
)
6060 /* Initialize here to avoid warning about use of possibly
6061 uninitialized variable. */
6066 /* Searching for a normal symbol with local binding. */
6067 for (; j
< ext_start
; j
++)
6070 bfd_elf_string_from_elf_section (input_bfd
,
6071 symtab_hdr
->sh_link
,
6072 local_syms
[j
].st_name
);
6073 if (!strcmp (sym_name
, lsym_name
))
6078 if (hash
|| j
< ext_start
)
6081 (_("%pB: invalid standard symbol `%s'; it must be "
6082 "a global or weak function symbol"),
6083 input_bfd
, sym_name
);
6087 (_("%pB: absent standard symbol `%s'"), input_bfd
, sym_name
);
6093 sym_value
= hash
->root
.root
.u
.def
.value
;
6094 section
= hash
->root
.root
.u
.def
.section
;
6096 if (cmse_hash
->root
.root
.u
.def
.section
!= section
)
6099 (_("%pB: `%s' and its special symbol are in different sections"),
6100 input_bfd
, sym_name
);
6103 if (cmse_hash
->root
.root
.u
.def
.value
!= sym_value
)
6104 continue; /* Ignore: could be an entry function starting with SG. */
6106 /* If this section is a link-once section that will be discarded, then
6107 don't create any stubs. */
6108 if (section
->output_section
== NULL
)
6111 (_("%pB: entry function `%s' not output"), input_bfd
, sym_name
);
6115 if (hash
->root
.size
== 0)
6118 (_("%pB: entry function `%s' is empty"), input_bfd
, sym_name
);
6124 branch_type
= ARM_GET_SYM_BRANCH_TYPE (hash
->root
.target_internal
);
6126 = elf32_arm_create_stub (htab
, arm_stub_cmse_branch_thumb_only
,
6127 NULL
, NULL
, section
, hash
, sym_name
,
6128 sym_value
, branch_type
, &new_stub
);
6130 if (stub_entry
== NULL
)
6134 BFD_ASSERT (new_stub
);
6135 (*cmse_stub_created
)++;
6139 if (!symtab_hdr
->contents
)
6144 /* Return TRUE iff a symbol identified by its linker HASH entry is a secure
6145 code entry function, ie can be called from non secure code without using a
6149 cmse_entry_fct_p (struct elf32_arm_link_hash_entry
*hash
)
6151 bfd_byte contents
[4];
6152 uint32_t first_insn
;
6157 /* Defined symbol of function type. */
6158 if (hash
->root
.root
.type
!= bfd_link_hash_defined
6159 && hash
->root
.root
.type
!= bfd_link_hash_defweak
)
6161 if (hash
->root
.type
!= STT_FUNC
)
6164 /* Read first instruction. */
6165 section
= hash
->root
.root
.u
.def
.section
;
6166 abfd
= section
->owner
;
6167 offset
= hash
->root
.root
.u
.def
.value
- section
->vma
;
6168 if (!bfd_get_section_contents (abfd
, section
, contents
, offset
,
6172 first_insn
= bfd_get_32 (abfd
, contents
);
6174 /* Starts by SG instruction. */
6175 return first_insn
== 0xe97fe97f;
6178 /* Output the name (in symbol table) of the veneer GEN_ENTRY if it is a new
6179 secure gateway veneers (ie. the veneers was not in the input import library)
6180 and there is no output import library (GEN_INFO->out_implib_bfd is NULL. */
6183 arm_list_new_cmse_stub (struct bfd_hash_entry
*gen_entry
, void *gen_info
)
6185 struct elf32_arm_stub_hash_entry
*stub_entry
;
6186 struct bfd_link_info
*info
;
6188 /* Massage our args to the form they really have. */
6189 stub_entry
= (struct elf32_arm_stub_hash_entry
*) gen_entry
;
6190 info
= (struct bfd_link_info
*) gen_info
;
6192 if (info
->out_implib_bfd
)
6195 if (stub_entry
->stub_type
!= arm_stub_cmse_branch_thumb_only
)
6198 if (stub_entry
->stub_offset
== (bfd_vma
) -1)
6199 _bfd_error_handler (" %s", stub_entry
->output_name
);
6204 /* Set offset of each secure gateway veneers so that its address remain
6205 identical to the one in the input import library referred by
6206 HTAB->in_implib_bfd. A warning is issued for veneers that disappeared
6207 (present in input import library but absent from the executable being
6208 linked) or if new veneers appeared and there is no output import library
6209 (INFO->out_implib_bfd is NULL and *CMSE_STUB_CREATED is bigger than the
6210 number of secure gateway veneers found in the input import library.
6212 The function returns whether an error occurred. If no error occurred,
6213 *CMSE_STUB_CREATED gives the number of SG veneers created by both cmse_scan
6214 and this function and HTAB->new_cmse_stub_offset is set to the biggest
6215 veneer observed set for new veneers to be layed out after. */
6218 set_cmse_veneer_addr_from_implib (struct bfd_link_info
*info
,
6219 struct elf32_arm_link_hash_table
*htab
,
6220 int *cmse_stub_created
)
6227 asection
*stub_out_sec
;
6228 bfd_boolean ret
= TRUE
;
6229 Elf_Internal_Sym
*intsym
;
6230 const char *out_sec_name
;
6231 bfd_size_type cmse_stub_size
;
6232 asymbol
**sympp
= NULL
, *sym
;
6233 struct elf32_arm_link_hash_entry
*hash
;
6234 const insn_sequence
*cmse_stub_template
;
6235 struct elf32_arm_stub_hash_entry
*stub_entry
;
6236 int cmse_stub_template_size
, new_cmse_stubs_created
= *cmse_stub_created
;
6237 bfd_vma veneer_value
, stub_offset
, next_cmse_stub_offset
;
6238 bfd_vma cmse_stub_array_start
= (bfd_vma
) -1, cmse_stub_sec_vma
= 0;
6240 /* No input secure gateway import library. */
6241 if (!htab
->in_implib_bfd
)
6244 in_implib_bfd
= htab
->in_implib_bfd
;
6245 if (!htab
->cmse_implib
)
6247 _bfd_error_handler (_("%pB: --in-implib only supported for Secure "
6248 "Gateway import libraries"), in_implib_bfd
);
6252 /* Get symbol table size. */
6253 symsize
= bfd_get_symtab_upper_bound (in_implib_bfd
);
6257 /* Read in the input secure gateway import library's symbol table. */
6258 sympp
= (asymbol
**) bfd_malloc (symsize
);
6262 symcount
= bfd_canonicalize_symtab (in_implib_bfd
, sympp
);
6269 htab
->new_cmse_stub_offset
= 0;
6271 find_stub_size_and_template (arm_stub_cmse_branch_thumb_only
,
6272 &cmse_stub_template
,
6273 &cmse_stub_template_size
);
6275 arm_dedicated_stub_output_section_name (arm_stub_cmse_branch_thumb_only
);
6277 bfd_get_section_by_name (htab
->obfd
, out_sec_name
);
6278 if (stub_out_sec
!= NULL
)
6279 cmse_stub_sec_vma
= stub_out_sec
->vma
;
6281 /* Set addresses of veneers mentionned in input secure gateway import
6282 library's symbol table. */
6283 for (i
= 0; i
< symcount
; i
++)
6287 sym_name
= (char *) bfd_asymbol_name (sym
);
6288 intsym
= &((elf_symbol_type
*) sym
)->internal_elf_sym
;
6290 if (sym
->section
!= bfd_abs_section_ptr
6291 || !(flags
& (BSF_GLOBAL
| BSF_WEAK
))
6292 || (flags
& BSF_FUNCTION
) != BSF_FUNCTION
6293 || (ARM_GET_SYM_BRANCH_TYPE (intsym
->st_target_internal
)
6294 != ST_BRANCH_TO_THUMB
))
6296 _bfd_error_handler (_("%pB: invalid import library entry: `%s'; "
6297 "symbol should be absolute, global and "
6298 "refer to Thumb functions"),
6299 in_implib_bfd
, sym_name
);
6304 veneer_value
= bfd_asymbol_value (sym
);
6305 stub_offset
= veneer_value
- cmse_stub_sec_vma
;
6306 stub_entry
= arm_stub_hash_lookup (&htab
->stub_hash_table
, sym_name
,
6308 hash
= (struct elf32_arm_link_hash_entry
*)
6309 elf_link_hash_lookup (&(htab
)->root
, sym_name
, FALSE
, FALSE
, TRUE
);
6311 /* Stub entry should have been created by cmse_scan or the symbol be of
6312 a secure function callable from non secure code. */
6313 if (!stub_entry
&& !hash
)
6315 bfd_boolean new_stub
;
6318 (_("entry function `%s' disappeared from secure code"), sym_name
);
6319 hash
= (struct elf32_arm_link_hash_entry
*)
6320 elf_link_hash_lookup (&(htab
)->root
, sym_name
, TRUE
, TRUE
, TRUE
);
6322 = elf32_arm_create_stub (htab
, arm_stub_cmse_branch_thumb_only
,
6323 NULL
, NULL
, bfd_abs_section_ptr
, hash
,
6324 sym_name
, veneer_value
,
6325 ST_BRANCH_TO_THUMB
, &new_stub
);
6326 if (stub_entry
== NULL
)
6330 BFD_ASSERT (new_stub
);
6331 new_cmse_stubs_created
++;
6332 (*cmse_stub_created
)++;
6334 stub_entry
->stub_template_size
= stub_entry
->stub_size
= 0;
6335 stub_entry
->stub_offset
= stub_offset
;
6337 /* Symbol found is not callable from non secure code. */
6338 else if (!stub_entry
)
6340 if (!cmse_entry_fct_p (hash
))
6342 _bfd_error_handler (_("`%s' refers to a non entry function"),
6350 /* Only stubs for SG veneers should have been created. */
6351 BFD_ASSERT (stub_entry
->stub_type
== arm_stub_cmse_branch_thumb_only
);
6353 /* Check visibility hasn't changed. */
6354 if (!!(flags
& BSF_GLOBAL
)
6355 != (hash
->root
.root
.type
== bfd_link_hash_defined
))
6357 (_("%pB: visibility of symbol `%s' has changed"), in_implib_bfd
,
6360 stub_entry
->stub_offset
= stub_offset
;
6363 /* Size should match that of a SG veneer. */
6364 if (intsym
->st_size
!= cmse_stub_size
)
6366 _bfd_error_handler (_("%pB: incorrect size for symbol `%s'"),
6367 in_implib_bfd
, sym_name
);
6371 /* Previous veneer address is before current SG veneer section. */
6372 if (veneer_value
< cmse_stub_sec_vma
)
6374 /* Avoid offset underflow. */
6376 stub_entry
->stub_offset
= 0;
6381 /* Complain if stub offset not a multiple of stub size. */
6382 if (stub_offset
% cmse_stub_size
)
6385 (_("offset of veneer for entry function `%s' not a multiple of "
6386 "its size"), sym_name
);
6393 new_cmse_stubs_created
--;
6394 if (veneer_value
< cmse_stub_array_start
)
6395 cmse_stub_array_start
= veneer_value
;
6396 next_cmse_stub_offset
= stub_offset
+ ((cmse_stub_size
+ 7) & ~7);
6397 if (next_cmse_stub_offset
> htab
->new_cmse_stub_offset
)
6398 htab
->new_cmse_stub_offset
= next_cmse_stub_offset
;
6401 if (!info
->out_implib_bfd
&& new_cmse_stubs_created
!= 0)
6403 BFD_ASSERT (new_cmse_stubs_created
> 0);
6405 (_("new entry function(s) introduced but no output import library "
6407 bfd_hash_traverse (&htab
->stub_hash_table
, arm_list_new_cmse_stub
, info
);
6410 if (cmse_stub_array_start
!= cmse_stub_sec_vma
)
6413 (_("start address of `%s' is different from previous link"),
6423 /* Determine and set the size of the stub section for a final link.
6425 The basic idea here is to examine all the relocations looking for
6426 PC-relative calls to a target that is unreachable with a "bl"
6430 elf32_arm_size_stubs (bfd
*output_bfd
,
6432 struct bfd_link_info
*info
,
6433 bfd_signed_vma group_size
,
6434 asection
* (*add_stub_section
) (const char *, asection
*,
6437 void (*layout_sections_again
) (void))
6439 bfd_boolean ret
= TRUE
;
6440 obj_attribute
*out_attr
;
6441 int cmse_stub_created
= 0;
6442 bfd_size_type stub_group_size
;
6443 bfd_boolean m_profile
, stubs_always_after_branch
, first_veneer_scan
= TRUE
;
6444 struct elf32_arm_link_hash_table
*htab
= elf32_arm_hash_table (info
);
6445 struct a8_erratum_fix
*a8_fixes
= NULL
;
6446 unsigned int num_a8_fixes
= 0, a8_fix_table_size
= 10;
6447 struct a8_erratum_reloc
*a8_relocs
= NULL
;
6448 unsigned int num_a8_relocs
= 0, a8_reloc_table_size
= 10, i
;
6453 if (htab
->fix_cortex_a8
)
6455 a8_fixes
= (struct a8_erratum_fix
*)
6456 bfd_zmalloc (sizeof (struct a8_erratum_fix
) * a8_fix_table_size
);
6457 a8_relocs
= (struct a8_erratum_reloc
*)
6458 bfd_zmalloc (sizeof (struct a8_erratum_reloc
) * a8_reloc_table_size
);
6461 /* Propagate mach to stub bfd, because it may not have been
6462 finalized when we created stub_bfd. */
6463 bfd_set_arch_mach (stub_bfd
, bfd_get_arch (output_bfd
),
6464 bfd_get_mach (output_bfd
));
6466 /* Stash our params away. */
6467 htab
->stub_bfd
= stub_bfd
;
6468 htab
->add_stub_section
= add_stub_section
;
6469 htab
->layout_sections_again
= layout_sections_again
;
6470 stubs_always_after_branch
= group_size
< 0;
6472 out_attr
= elf_known_obj_attributes_proc (output_bfd
);
6473 m_profile
= out_attr
[Tag_CPU_arch_profile
].i
== 'M';
6475 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
6476 as the first half of a 32-bit branch straddling two 4K pages. This is a
6477 crude way of enforcing that. */
6478 if (htab
->fix_cortex_a8
)
6479 stubs_always_after_branch
= 1;
6482 stub_group_size
= -group_size
;
6484 stub_group_size
= group_size
;
6486 if (stub_group_size
== 1)
6488 /* Default values. */
6489 /* Thumb branch range is +-4MB has to be used as the default
6490 maximum size (a given section can contain both ARM and Thumb
6491 code, so the worst case has to be taken into account).
6493 This value is 24K less than that, which allows for 2025
6494 12-byte stubs. If we exceed that, then we will fail to link.
6495 The user will have to relink with an explicit group size
6497 stub_group_size
= 4170000;
6500 group_sections (htab
, stub_group_size
, stubs_always_after_branch
);
6502 /* If we're applying the cortex A8 fix, we need to determine the
6503 program header size now, because we cannot change it later --
6504 that could alter section placements. Notice the A8 erratum fix
6505 ends up requiring the section addresses to remain unchanged
6506 modulo the page size. That's something we cannot represent
6507 inside BFD, and we don't want to force the section alignment to
6508 be the page size. */
6509 if (htab
->fix_cortex_a8
)
6510 (*htab
->layout_sections_again
) ();
6515 unsigned int bfd_indx
;
6517 enum elf32_arm_stub_type stub_type
;
6518 bfd_boolean stub_changed
= FALSE
;
6519 unsigned prev_num_a8_fixes
= num_a8_fixes
;
6522 for (input_bfd
= info
->input_bfds
, bfd_indx
= 0;
6524 input_bfd
= input_bfd
->link
.next
, bfd_indx
++)
6526 Elf_Internal_Shdr
*symtab_hdr
;
6528 Elf_Internal_Sym
*local_syms
= NULL
;
6530 if (!is_arm_elf (input_bfd
))
6532 if ((input_bfd
->flags
& DYNAMIC
) != 0
6533 && (elf_sym_hashes (input_bfd
) == NULL
6534 || (elf_dyn_lib_class (input_bfd
) & DYN_AS_NEEDED
) != 0))
6539 /* We'll need the symbol table in a second. */
6540 symtab_hdr
= &elf_tdata (input_bfd
)->symtab_hdr
;
6541 if (symtab_hdr
->sh_info
== 0)
6544 /* Limit scan of symbols to object file whose profile is
6545 Microcontroller to not hinder performance in the general case. */
6546 if (m_profile
&& first_veneer_scan
)
6548 struct elf_link_hash_entry
**sym_hashes
;
6550 sym_hashes
= elf_sym_hashes (input_bfd
);
6551 if (!cmse_scan (input_bfd
, htab
, out_attr
, sym_hashes
,
6552 &cmse_stub_created
))
6553 goto error_ret_free_local
;
6555 if (cmse_stub_created
!= 0)
6556 stub_changed
= TRUE
;
6559 /* Walk over each section attached to the input bfd. */
6560 for (section
= input_bfd
->sections
;
6562 section
= section
->next
)
6564 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
6566 /* If there aren't any relocs, then there's nothing more
6568 if ((section
->flags
& SEC_RELOC
) == 0
6569 || section
->reloc_count
== 0
6570 || (section
->flags
& SEC_CODE
) == 0)
6573 /* If this section is a link-once section that will be
6574 discarded, then don't create any stubs. */
6575 if (section
->output_section
== NULL
6576 || section
->output_section
->owner
!= output_bfd
)
6579 /* Get the relocs. */
6581 = _bfd_elf_link_read_relocs (input_bfd
, section
, NULL
,
6582 NULL
, info
->keep_memory
);
6583 if (internal_relocs
== NULL
)
6584 goto error_ret_free_local
;
6586 /* Now examine each relocation. */
6587 irela
= internal_relocs
;
6588 irelaend
= irela
+ section
->reloc_count
;
6589 for (; irela
< irelaend
; irela
++)
6591 unsigned int r_type
, r_indx
;
6594 bfd_vma destination
;
6595 struct elf32_arm_link_hash_entry
*hash
;
6596 const char *sym_name
;
6597 unsigned char st_type
;
6598 enum arm_st_branch_type branch_type
;
6599 bfd_boolean created_stub
= FALSE
;
6601 r_type
= ELF32_R_TYPE (irela
->r_info
);
6602 r_indx
= ELF32_R_SYM (irela
->r_info
);
6604 if (r_type
>= (unsigned int) R_ARM_max
)
6606 bfd_set_error (bfd_error_bad_value
);
6607 error_ret_free_internal
:
6608 if (elf_section_data (section
)->relocs
== NULL
)
6609 free (internal_relocs
);
6611 error_ret_free_local
:
6612 if (local_syms
!= NULL
6613 && (symtab_hdr
->contents
6614 != (unsigned char *) local_syms
))
6620 if (r_indx
>= symtab_hdr
->sh_info
)
6621 hash
= elf32_arm_hash_entry
6622 (elf_sym_hashes (input_bfd
)
6623 [r_indx
- symtab_hdr
->sh_info
]);
6625 /* Only look for stubs on branch instructions, or
6626 non-relaxed TLSCALL */
6627 if ((r_type
!= (unsigned int) R_ARM_CALL
)
6628 && (r_type
!= (unsigned int) R_ARM_THM_CALL
)
6629 && (r_type
!= (unsigned int) R_ARM_JUMP24
)
6630 && (r_type
!= (unsigned int) R_ARM_THM_JUMP19
)
6631 && (r_type
!= (unsigned int) R_ARM_THM_XPC22
)
6632 && (r_type
!= (unsigned int) R_ARM_THM_JUMP24
)
6633 && (r_type
!= (unsigned int) R_ARM_PLT32
)
6634 && !((r_type
== (unsigned int) R_ARM_TLS_CALL
6635 || r_type
== (unsigned int) R_ARM_THM_TLS_CALL
)
6636 && r_type
== elf32_arm_tls_transition
6637 (info
, r_type
, &hash
->root
)
6638 && ((hash
? hash
->tls_type
6639 : (elf32_arm_local_got_tls_type
6640 (input_bfd
)[r_indx
]))
6641 & GOT_TLS_GDESC
) != 0))
6644 /* Now determine the call target, its name, value,
6651 if (r_type
== (unsigned int) R_ARM_TLS_CALL
6652 || r_type
== (unsigned int) R_ARM_THM_TLS_CALL
)
6654 /* A non-relaxed TLS call. The target is the
6655 plt-resident trampoline and nothing to do
6657 BFD_ASSERT (htab
->tls_trampoline
> 0);
6658 sym_sec
= htab
->root
.splt
;
6659 sym_value
= htab
->tls_trampoline
;
6662 branch_type
= ST_BRANCH_TO_ARM
;
6666 /* It's a local symbol. */
6667 Elf_Internal_Sym
*sym
;
6669 if (local_syms
== NULL
)
6672 = (Elf_Internal_Sym
*) symtab_hdr
->contents
;
6673 if (local_syms
== NULL
)
6675 = bfd_elf_get_elf_syms (input_bfd
, symtab_hdr
,
6676 symtab_hdr
->sh_info
, 0,
6678 if (local_syms
== NULL
)
6679 goto error_ret_free_internal
;
6682 sym
= local_syms
+ r_indx
;
6683 if (sym
->st_shndx
== SHN_UNDEF
)
6684 sym_sec
= bfd_und_section_ptr
;
6685 else if (sym
->st_shndx
== SHN_ABS
)
6686 sym_sec
= bfd_abs_section_ptr
;
6687 else if (sym
->st_shndx
== SHN_COMMON
)
6688 sym_sec
= bfd_com_section_ptr
;
6691 bfd_section_from_elf_index (input_bfd
, sym
->st_shndx
);
6694 /* This is an undefined symbol. It can never
6698 if (ELF_ST_TYPE (sym
->st_info
) != STT_SECTION
)
6699 sym_value
= sym
->st_value
;
6700 destination
= (sym_value
+ irela
->r_addend
6701 + sym_sec
->output_offset
6702 + sym_sec
->output_section
->vma
);
6703 st_type
= ELF_ST_TYPE (sym
->st_info
);
6705 ARM_GET_SYM_BRANCH_TYPE (sym
->st_target_internal
);
6707 = bfd_elf_string_from_elf_section (input_bfd
,
6708 symtab_hdr
->sh_link
,
6713 /* It's an external symbol. */
6714 while (hash
->root
.root
.type
== bfd_link_hash_indirect
6715 || hash
->root
.root
.type
== bfd_link_hash_warning
)
6716 hash
= ((struct elf32_arm_link_hash_entry
*)
6717 hash
->root
.root
.u
.i
.link
);
6719 if (hash
->root
.root
.type
== bfd_link_hash_defined
6720 || hash
->root
.root
.type
== bfd_link_hash_defweak
)
6722 sym_sec
= hash
->root
.root
.u
.def
.section
;
6723 sym_value
= hash
->root
.root
.u
.def
.value
;
6725 struct elf32_arm_link_hash_table
*globals
=
6726 elf32_arm_hash_table (info
);
6728 /* For a destination in a shared library,
6729 use the PLT stub as target address to
6730 decide whether a branch stub is
6733 && globals
->root
.splt
!= NULL
6735 && hash
->root
.plt
.offset
!= (bfd_vma
) -1)
6737 sym_sec
= globals
->root
.splt
;
6738 sym_value
= hash
->root
.plt
.offset
;
6739 if (sym_sec
->output_section
!= NULL
)
6740 destination
= (sym_value
6741 + sym_sec
->output_offset
6742 + sym_sec
->output_section
->vma
);
6744 else if (sym_sec
->output_section
!= NULL
)
6745 destination
= (sym_value
+ irela
->r_addend
6746 + sym_sec
->output_offset
6747 + sym_sec
->output_section
->vma
);
6749 else if ((hash
->root
.root
.type
== bfd_link_hash_undefined
)
6750 || (hash
->root
.root
.type
== bfd_link_hash_undefweak
))
6752 /* For a shared library, use the PLT stub as
6753 target address to decide whether a long
6754 branch stub is needed.
6755 For absolute code, they cannot be handled. */
6756 struct elf32_arm_link_hash_table
*globals
=
6757 elf32_arm_hash_table (info
);
6760 && globals
->root
.splt
!= NULL
6762 && hash
->root
.plt
.offset
!= (bfd_vma
) -1)
6764 sym_sec
= globals
->root
.splt
;
6765 sym_value
= hash
->root
.plt
.offset
;
6766 if (sym_sec
->output_section
!= NULL
)
6767 destination
= (sym_value
6768 + sym_sec
->output_offset
6769 + sym_sec
->output_section
->vma
);
6776 bfd_set_error (bfd_error_bad_value
);
6777 goto error_ret_free_internal
;
6779 st_type
= hash
->root
.type
;
6781 ARM_GET_SYM_BRANCH_TYPE (hash
->root
.target_internal
);
6782 sym_name
= hash
->root
.root
.root
.string
;
6787 bfd_boolean new_stub
;
6788 struct elf32_arm_stub_hash_entry
*stub_entry
;
6790 /* Determine what (if any) linker stub is needed. */
6791 stub_type
= arm_type_of_stub (info
, section
, irela
,
6792 st_type
, &branch_type
,
6793 hash
, destination
, sym_sec
,
6794 input_bfd
, sym_name
);
6795 if (stub_type
== arm_stub_none
)
6798 /* We've either created a stub for this reloc already,
6799 or we are about to. */
6801 elf32_arm_create_stub (htab
, stub_type
, section
, irela
,
6803 (char *) sym_name
, sym_value
,
6804 branch_type
, &new_stub
);
6806 created_stub
= stub_entry
!= NULL
;
6808 goto error_ret_free_internal
;
6812 stub_changed
= TRUE
;
6816 /* Look for relocations which might trigger Cortex-A8
6818 if (htab
->fix_cortex_a8
6819 && (r_type
== (unsigned int) R_ARM_THM_JUMP24
6820 || r_type
== (unsigned int) R_ARM_THM_JUMP19
6821 || r_type
== (unsigned int) R_ARM_THM_CALL
6822 || r_type
== (unsigned int) R_ARM_THM_XPC22
))
6824 bfd_vma from
= section
->output_section
->vma
6825 + section
->output_offset
6828 if ((from
& 0xfff) == 0xffe)
6830 /* Found a candidate. Note we haven't checked the
6831 destination is within 4K here: if we do so (and
6832 don't create an entry in a8_relocs) we can't tell
6833 that a branch should have been relocated when
6835 if (num_a8_relocs
== a8_reloc_table_size
)
6837 a8_reloc_table_size
*= 2;
6838 a8_relocs
= (struct a8_erratum_reloc
*)
6839 bfd_realloc (a8_relocs
,
6840 sizeof (struct a8_erratum_reloc
)
6841 * a8_reloc_table_size
);
6844 a8_relocs
[num_a8_relocs
].from
= from
;
6845 a8_relocs
[num_a8_relocs
].destination
= destination
;
6846 a8_relocs
[num_a8_relocs
].r_type
= r_type
;
6847 a8_relocs
[num_a8_relocs
].branch_type
= branch_type
;
6848 a8_relocs
[num_a8_relocs
].sym_name
= sym_name
;
6849 a8_relocs
[num_a8_relocs
].non_a8_stub
= created_stub
;
6850 a8_relocs
[num_a8_relocs
].hash
= hash
;
6857 /* We're done with the internal relocs, free them. */
6858 if (elf_section_data (section
)->relocs
== NULL
)
6859 free (internal_relocs
);
6862 if (htab
->fix_cortex_a8
)
6864 /* Sort relocs which might apply to Cortex-A8 erratum. */
6865 qsort (a8_relocs
, num_a8_relocs
,
6866 sizeof (struct a8_erratum_reloc
),
6869 /* Scan for branches which might trigger Cortex-A8 erratum. */
6870 if (cortex_a8_erratum_scan (input_bfd
, info
, &a8_fixes
,
6871 &num_a8_fixes
, &a8_fix_table_size
,
6872 a8_relocs
, num_a8_relocs
,
6873 prev_num_a8_fixes
, &stub_changed
)
6875 goto error_ret_free_local
;
6878 if (local_syms
!= NULL
6879 && symtab_hdr
->contents
!= (unsigned char *) local_syms
)
6881 if (!info
->keep_memory
)
6884 symtab_hdr
->contents
= (unsigned char *) local_syms
;
6888 if (first_veneer_scan
6889 && !set_cmse_veneer_addr_from_implib (info
, htab
,
6890 &cmse_stub_created
))
6893 if (prev_num_a8_fixes
!= num_a8_fixes
)
6894 stub_changed
= TRUE
;
6899 /* OK, we've added some stubs. Find out the new size of the
6901 for (stub_sec
= htab
->stub_bfd
->sections
;
6903 stub_sec
= stub_sec
->next
)
6905 /* Ignore non-stub sections. */
6906 if (!strstr (stub_sec
->name
, STUB_SUFFIX
))
6912 /* Add new SG veneers after those already in the input import
6914 for (stub_type
= arm_stub_none
+ 1; stub_type
< max_stub_type
;
6917 bfd_vma
*start_offset_p
;
6918 asection
**stub_sec_p
;
6920 start_offset_p
= arm_new_stubs_start_offset_ptr (htab
, stub_type
);
6921 stub_sec_p
= arm_dedicated_stub_input_section_ptr (htab
, stub_type
);
6922 if (start_offset_p
== NULL
)
6925 BFD_ASSERT (stub_sec_p
!= NULL
);
6926 if (*stub_sec_p
!= NULL
)
6927 (*stub_sec_p
)->size
= *start_offset_p
;
6930 /* Compute stub section size, considering padding. */
6931 bfd_hash_traverse (&htab
->stub_hash_table
, arm_size_one_stub
, htab
);
6932 for (stub_type
= arm_stub_none
+ 1; stub_type
< max_stub_type
;
6936 asection
**stub_sec_p
;
6938 padding
= arm_dedicated_stub_section_padding (stub_type
);
6939 stub_sec_p
= arm_dedicated_stub_input_section_ptr (htab
, stub_type
);
6940 /* Skip if no stub input section or no stub section padding
6942 if ((stub_sec_p
!= NULL
&& *stub_sec_p
== NULL
) || padding
== 0)
6944 /* Stub section padding required but no dedicated section. */
6945 BFD_ASSERT (stub_sec_p
);
6947 size
= (*stub_sec_p
)->size
;
6948 size
= (size
+ padding
- 1) & ~(padding
- 1);
6949 (*stub_sec_p
)->size
= size
;
6952 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
6953 if (htab
->fix_cortex_a8
)
6954 for (i
= 0; i
< num_a8_fixes
; i
++)
6956 stub_sec
= elf32_arm_create_or_find_stub_sec (NULL
,
6957 a8_fixes
[i
].section
, htab
, a8_fixes
[i
].stub_type
);
6959 if (stub_sec
== NULL
)
6963 += find_stub_size_and_template (a8_fixes
[i
].stub_type
, NULL
,
6968 /* Ask the linker to do its stuff. */
6969 (*htab
->layout_sections_again
) ();
6970 first_veneer_scan
= FALSE
;
6973 /* Add stubs for Cortex-A8 erratum fixes now. */
6974 if (htab
->fix_cortex_a8
)
6976 for (i
= 0; i
< num_a8_fixes
; i
++)
6978 struct elf32_arm_stub_hash_entry
*stub_entry
;
6979 char *stub_name
= a8_fixes
[i
].stub_name
;
6980 asection
*section
= a8_fixes
[i
].section
;
6981 unsigned int section_id
= a8_fixes
[i
].section
->id
;
6982 asection
*link_sec
= htab
->stub_group
[section_id
].link_sec
;
6983 asection
*stub_sec
= htab
->stub_group
[section_id
].stub_sec
;
6984 const insn_sequence
*template_sequence
;
6985 int template_size
, size
= 0;
6987 stub_entry
= arm_stub_hash_lookup (&htab
->stub_hash_table
, stub_name
,
6989 if (stub_entry
== NULL
)
6991 _bfd_error_handler (_("%pB: cannot create stub entry %s"),
6992 section
->owner
, stub_name
);
6996 stub_entry
->stub_sec
= stub_sec
;
6997 stub_entry
->stub_offset
= (bfd_vma
) -1;
6998 stub_entry
->id_sec
= link_sec
;
6999 stub_entry
->stub_type
= a8_fixes
[i
].stub_type
;
7000 stub_entry
->source_value
= a8_fixes
[i
].offset
;
7001 stub_entry
->target_section
= a8_fixes
[i
].section
;
7002 stub_entry
->target_value
= a8_fixes
[i
].target_offset
;
7003 stub_entry
->orig_insn
= a8_fixes
[i
].orig_insn
;
7004 stub_entry
->branch_type
= a8_fixes
[i
].branch_type
;
7006 size
= find_stub_size_and_template (a8_fixes
[i
].stub_type
,
7010 stub_entry
->stub_size
= size
;
7011 stub_entry
->stub_template
= template_sequence
;
7012 stub_entry
->stub_template_size
= template_size
;
7015 /* Stash the Cortex-A8 erratum fix array for use later in
7016 elf32_arm_write_section(). */
7017 htab
->a8_erratum_fixes
= a8_fixes
;
7018 htab
->num_a8_erratum_fixes
= num_a8_fixes
;
7022 htab
->a8_erratum_fixes
= NULL
;
7023 htab
->num_a8_erratum_fixes
= 0;
7028 /* Build all the stubs associated with the current output file. The
7029 stubs are kept in a hash table attached to the main linker hash
7030 table. We also set up the .plt entries for statically linked PIC
7031 functions here. This function is called via arm_elf_finish in the
7035 elf32_arm_build_stubs (struct bfd_link_info
*info
)
7038 struct bfd_hash_table
*table
;
7039 enum elf32_arm_stub_type stub_type
;
7040 struct elf32_arm_link_hash_table
*htab
;
7042 htab
= elf32_arm_hash_table (info
);
7046 for (stub_sec
= htab
->stub_bfd
->sections
;
7048 stub_sec
= stub_sec
->next
)
7052 /* Ignore non-stub sections. */
7053 if (!strstr (stub_sec
->name
, STUB_SUFFIX
))
7056 /* Allocate memory to hold the linker stubs. Zeroing the stub sections
7057 must at least be done for stub section requiring padding and for SG
7058 veneers to ensure that a non secure code branching to a removed SG
7059 veneer causes an error. */
7060 size
= stub_sec
->size
;
7061 stub_sec
->contents
= (unsigned char *) bfd_zalloc (htab
->stub_bfd
, size
);
7062 if (stub_sec
->contents
== NULL
&& size
!= 0)
7068 /* Add new SG veneers after those already in the input import library. */
7069 for (stub_type
= arm_stub_none
+ 1; stub_type
< max_stub_type
; stub_type
++)
7071 bfd_vma
*start_offset_p
;
7072 asection
**stub_sec_p
;
7074 start_offset_p
= arm_new_stubs_start_offset_ptr (htab
, stub_type
);
7075 stub_sec_p
= arm_dedicated_stub_input_section_ptr (htab
, stub_type
);
7076 if (start_offset_p
== NULL
)
7079 BFD_ASSERT (stub_sec_p
!= NULL
);
7080 if (*stub_sec_p
!= NULL
)
7081 (*stub_sec_p
)->size
= *start_offset_p
;
7084 /* Build the stubs as directed by the stub hash table. */
7085 table
= &htab
->stub_hash_table
;
7086 bfd_hash_traverse (table
, arm_build_one_stub
, info
);
7087 if (htab
->fix_cortex_a8
)
7089 /* Place the cortex a8 stubs last. */
7090 htab
->fix_cortex_a8
= -1;
7091 bfd_hash_traverse (table
, arm_build_one_stub
, info
);
7097 /* Locate the Thumb encoded calling stub for NAME. */
7099 static struct elf_link_hash_entry
*
7100 find_thumb_glue (struct bfd_link_info
*link_info
,
7102 char **error_message
)
7105 struct elf_link_hash_entry
*hash
;
7106 struct elf32_arm_link_hash_table
*hash_table
;
7108 /* We need a pointer to the armelf specific hash table. */
7109 hash_table
= elf32_arm_hash_table (link_info
);
7110 if (hash_table
== NULL
)
7113 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen (name
)
7114 + strlen (THUMB2ARM_GLUE_ENTRY_NAME
) + 1);
7116 BFD_ASSERT (tmp_name
);
7118 sprintf (tmp_name
, THUMB2ARM_GLUE_ENTRY_NAME
, name
);
7120 hash
= elf_link_hash_lookup
7121 (&(hash_table
)->root
, tmp_name
, FALSE
, FALSE
, TRUE
);
7124 && asprintf (error_message
, _("unable to find %s glue '%s' for '%s'"),
7125 "Thumb", tmp_name
, name
) == -1)
7126 *error_message
= (char *) bfd_errmsg (bfd_error_system_call
);
7133 /* Locate the ARM encoded calling stub for NAME. */
7135 static struct elf_link_hash_entry
*
7136 find_arm_glue (struct bfd_link_info
*link_info
,
7138 char **error_message
)
7141 struct elf_link_hash_entry
*myh
;
7142 struct elf32_arm_link_hash_table
*hash_table
;
7144 /* We need a pointer to the elfarm specific hash table. */
7145 hash_table
= elf32_arm_hash_table (link_info
);
7146 if (hash_table
== NULL
)
7149 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen (name
)
7150 + strlen (ARM2THUMB_GLUE_ENTRY_NAME
) + 1);
7152 BFD_ASSERT (tmp_name
);
7154 sprintf (tmp_name
, ARM2THUMB_GLUE_ENTRY_NAME
, name
);
7156 myh
= elf_link_hash_lookup
7157 (&(hash_table
)->root
, tmp_name
, FALSE
, FALSE
, TRUE
);
7160 && asprintf (error_message
, _("unable to find %s glue '%s' for '%s'"),
7161 "ARM", tmp_name
, name
) == -1)
7162 *error_message
= (char *) bfd_errmsg (bfd_error_system_call
);
7169 /* ARM->Thumb glue (static images):
7173 ldr r12, __func_addr
7176 .word func @ behave as if you saw a ARM_32 reloc.
7183 .word func @ behave as if you saw a ARM_32 reloc.
7185 (relocatable images)
7188 ldr r12, __func_offset
7194 #define ARM2THUMB_STATIC_GLUE_SIZE 12
7195 static const insn32 a2t1_ldr_insn
= 0xe59fc000;
7196 static const insn32 a2t2_bx_r12_insn
= 0xe12fff1c;
7197 static const insn32 a2t3_func_addr_insn
= 0x00000001;
7199 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
7200 static const insn32 a2t1v5_ldr_insn
= 0xe51ff004;
7201 static const insn32 a2t2v5_func_addr_insn
= 0x00000001;
7203 #define ARM2THUMB_PIC_GLUE_SIZE 16
7204 static const insn32 a2t1p_ldr_insn
= 0xe59fc004;
7205 static const insn32 a2t2p_add_pc_insn
= 0xe08cc00f;
7206 static const insn32 a2t3p_bx_r12_insn
= 0xe12fff1c;
7208 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
7212 __func_from_thumb: __func_from_thumb:
7214 nop ldr r6, __func_addr
7224 #define THUMB2ARM_GLUE_SIZE 8
7225 static const insn16 t2a1_bx_pc_insn
= 0x4778;
7226 static const insn16 t2a2_noop_insn
= 0x46c0;
7227 static const insn32 t2a3_b_insn
= 0xea000000;
7229 #define VFP11_ERRATUM_VENEER_SIZE 8
7230 #define STM32L4XX_ERRATUM_LDM_VENEER_SIZE 16
7231 #define STM32L4XX_ERRATUM_VLDM_VENEER_SIZE 24
7233 #define ARM_BX_VENEER_SIZE 12
7234 static const insn32 armbx1_tst_insn
= 0xe3100001;
7235 static const insn32 armbx2_moveq_insn
= 0x01a0f000;
7236 static const insn32 armbx3_bx_insn
= 0xe12fff10;
7238 #ifndef ELFARM_NABI_C_INCLUDED
7240 arm_allocate_glue_section_space (bfd
* abfd
, bfd_size_type size
, const char * name
)
7243 bfd_byte
* contents
;
7247 /* Do not include empty glue sections in the output. */
7250 s
= bfd_get_linker_section (abfd
, name
);
7252 s
->flags
|= SEC_EXCLUDE
;
7257 BFD_ASSERT (abfd
!= NULL
);
7259 s
= bfd_get_linker_section (abfd
, name
);
7260 BFD_ASSERT (s
!= NULL
);
7262 contents
= (bfd_byte
*) bfd_zalloc (abfd
, size
);
7264 BFD_ASSERT (s
->size
== size
);
7265 s
->contents
= contents
;
7269 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info
* info
)
7271 struct elf32_arm_link_hash_table
* globals
;
7273 globals
= elf32_arm_hash_table (info
);
7274 BFD_ASSERT (globals
!= NULL
);
7276 arm_allocate_glue_section_space (globals
->bfd_of_glue_owner
,
7277 globals
->arm_glue_size
,
7278 ARM2THUMB_GLUE_SECTION_NAME
);
7280 arm_allocate_glue_section_space (globals
->bfd_of_glue_owner
,
7281 globals
->thumb_glue_size
,
7282 THUMB2ARM_GLUE_SECTION_NAME
);
7284 arm_allocate_glue_section_space (globals
->bfd_of_glue_owner
,
7285 globals
->vfp11_erratum_glue_size
,
7286 VFP11_ERRATUM_VENEER_SECTION_NAME
);
7288 arm_allocate_glue_section_space (globals
->bfd_of_glue_owner
,
7289 globals
->stm32l4xx_erratum_glue_size
,
7290 STM32L4XX_ERRATUM_VENEER_SECTION_NAME
);
7292 arm_allocate_glue_section_space (globals
->bfd_of_glue_owner
,
7293 globals
->bx_glue_size
,
7294 ARM_BX_GLUE_SECTION_NAME
);
7299 /* Allocate space and symbols for calling a Thumb function from Arm mode.
7300 returns the symbol identifying the stub. */
7302 static struct elf_link_hash_entry
*
7303 record_arm_to_thumb_glue (struct bfd_link_info
* link_info
,
7304 struct elf_link_hash_entry
* h
)
7306 const char * name
= h
->root
.root
.string
;
7309 struct elf_link_hash_entry
* myh
;
7310 struct bfd_link_hash_entry
* bh
;
7311 struct elf32_arm_link_hash_table
* globals
;
7315 globals
= elf32_arm_hash_table (link_info
);
7316 BFD_ASSERT (globals
!= NULL
);
7317 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
7319 s
= bfd_get_linker_section
7320 (globals
->bfd_of_glue_owner
, ARM2THUMB_GLUE_SECTION_NAME
);
7322 BFD_ASSERT (s
!= NULL
);
7324 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen (name
)
7325 + strlen (ARM2THUMB_GLUE_ENTRY_NAME
) + 1);
7327 BFD_ASSERT (tmp_name
);
7329 sprintf (tmp_name
, ARM2THUMB_GLUE_ENTRY_NAME
, name
);
7331 myh
= elf_link_hash_lookup
7332 (&(globals
)->root
, tmp_name
, FALSE
, FALSE
, TRUE
);
7336 /* We've already seen this guy. */
7341 /* The only trick here is using hash_table->arm_glue_size as the value.
7342 Even though the section isn't allocated yet, this is where we will be
7343 putting it. The +1 on the value marks that the stub has not been
7344 output yet - not that it is a Thumb function. */
7346 val
= globals
->arm_glue_size
+ 1;
7347 _bfd_generic_link_add_one_symbol (link_info
, globals
->bfd_of_glue_owner
,
7348 tmp_name
, BSF_GLOBAL
, s
, val
,
7349 NULL
, TRUE
, FALSE
, &bh
);
7351 myh
= (struct elf_link_hash_entry
*) bh
;
7352 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
7353 myh
->forced_local
= 1;
7357 if (bfd_link_pic (link_info
)
7358 || globals
->root
.is_relocatable_executable
7359 || globals
->pic_veneer
)
7360 size
= ARM2THUMB_PIC_GLUE_SIZE
;
7361 else if (globals
->use_blx
)
7362 size
= ARM2THUMB_V5_STATIC_GLUE_SIZE
;
7364 size
= ARM2THUMB_STATIC_GLUE_SIZE
;
7367 globals
->arm_glue_size
+= size
;
7372 /* Allocate space for ARMv4 BX veneers. */
7375 record_arm_bx_glue (struct bfd_link_info
* link_info
, int reg
)
7378 struct elf32_arm_link_hash_table
*globals
;
7380 struct elf_link_hash_entry
*myh
;
7381 struct bfd_link_hash_entry
*bh
;
7384 /* BX PC does not need a veneer. */
7388 globals
= elf32_arm_hash_table (link_info
);
7389 BFD_ASSERT (globals
!= NULL
);
7390 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
7392 /* Check if this veneer has already been allocated. */
7393 if (globals
->bx_glue_offset
[reg
])
7396 s
= bfd_get_linker_section
7397 (globals
->bfd_of_glue_owner
, ARM_BX_GLUE_SECTION_NAME
);
7399 BFD_ASSERT (s
!= NULL
);
7401 /* Add symbol for veneer. */
7403 bfd_malloc ((bfd_size_type
) strlen (ARM_BX_GLUE_ENTRY_NAME
) + 1);
7405 BFD_ASSERT (tmp_name
);
7407 sprintf (tmp_name
, ARM_BX_GLUE_ENTRY_NAME
, reg
);
7409 myh
= elf_link_hash_lookup
7410 (&(globals
)->root
, tmp_name
, FALSE
, FALSE
, FALSE
);
7412 BFD_ASSERT (myh
== NULL
);
7415 val
= globals
->bx_glue_size
;
7416 _bfd_generic_link_add_one_symbol (link_info
, globals
->bfd_of_glue_owner
,
7417 tmp_name
, BSF_FUNCTION
| BSF_LOCAL
, s
, val
,
7418 NULL
, TRUE
, FALSE
, &bh
);
7420 myh
= (struct elf_link_hash_entry
*) bh
;
7421 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
7422 myh
->forced_local
= 1;
7424 s
->size
+= ARM_BX_VENEER_SIZE
;
7425 globals
->bx_glue_offset
[reg
] = globals
->bx_glue_size
| 2;
7426 globals
->bx_glue_size
+= ARM_BX_VENEER_SIZE
;
7430 /* Add an entry to the code/data map for section SEC. */
7433 elf32_arm_section_map_add (asection
*sec
, char type
, bfd_vma vma
)
7435 struct _arm_elf_section_data
*sec_data
= elf32_arm_section_data (sec
);
7436 unsigned int newidx
;
7438 if (sec_data
->map
== NULL
)
7440 sec_data
->map
= (elf32_arm_section_map
*)
7441 bfd_malloc (sizeof (elf32_arm_section_map
));
7442 sec_data
->mapcount
= 0;
7443 sec_data
->mapsize
= 1;
7446 newidx
= sec_data
->mapcount
++;
7448 if (sec_data
->mapcount
> sec_data
->mapsize
)
7450 sec_data
->mapsize
*= 2;
7451 sec_data
->map
= (elf32_arm_section_map
*)
7452 bfd_realloc_or_free (sec_data
->map
, sec_data
->mapsize
7453 * sizeof (elf32_arm_section_map
));
7458 sec_data
->map
[newidx
].vma
= vma
;
7459 sec_data
->map
[newidx
].type
= type
;
7464 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
7465 veneers are handled for now. */
7468 record_vfp11_erratum_veneer (struct bfd_link_info
*link_info
,
7469 elf32_vfp11_erratum_list
*branch
,
7471 asection
*branch_sec
,
7472 unsigned int offset
)
7475 struct elf32_arm_link_hash_table
*hash_table
;
7477 struct elf_link_hash_entry
*myh
;
7478 struct bfd_link_hash_entry
*bh
;
7480 struct _arm_elf_section_data
*sec_data
;
7481 elf32_vfp11_erratum_list
*newerr
;
7483 hash_table
= elf32_arm_hash_table (link_info
);
7484 BFD_ASSERT (hash_table
!= NULL
);
7485 BFD_ASSERT (hash_table
->bfd_of_glue_owner
!= NULL
);
7487 s
= bfd_get_linker_section
7488 (hash_table
->bfd_of_glue_owner
, VFP11_ERRATUM_VENEER_SECTION_NAME
);
7490 sec_data
= elf32_arm_section_data (s
);
7492 BFD_ASSERT (s
!= NULL
);
7494 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen
7495 (VFP11_ERRATUM_VENEER_ENTRY_NAME
) + 10);
7497 BFD_ASSERT (tmp_name
);
7499 sprintf (tmp_name
, VFP11_ERRATUM_VENEER_ENTRY_NAME
,
7500 hash_table
->num_vfp11_fixes
);
7502 myh
= elf_link_hash_lookup
7503 (&(hash_table
)->root
, tmp_name
, FALSE
, FALSE
, FALSE
);
7505 BFD_ASSERT (myh
== NULL
);
7508 val
= hash_table
->vfp11_erratum_glue_size
;
7509 _bfd_generic_link_add_one_symbol (link_info
, hash_table
->bfd_of_glue_owner
,
7510 tmp_name
, BSF_FUNCTION
| BSF_LOCAL
, s
, val
,
7511 NULL
, TRUE
, FALSE
, &bh
);
7513 myh
= (struct elf_link_hash_entry
*) bh
;
7514 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
7515 myh
->forced_local
= 1;
7517 /* Link veneer back to calling location. */
7518 sec_data
->erratumcount
+= 1;
7519 newerr
= (elf32_vfp11_erratum_list
*)
7520 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list
));
7522 newerr
->type
= VFP11_ERRATUM_ARM_VENEER
;
7524 newerr
->u
.v
.branch
= branch
;
7525 newerr
->u
.v
.id
= hash_table
->num_vfp11_fixes
;
7526 branch
->u
.b
.veneer
= newerr
;
7528 newerr
->next
= sec_data
->erratumlist
;
7529 sec_data
->erratumlist
= newerr
;
7531 /* A symbol for the return from the veneer. */
7532 sprintf (tmp_name
, VFP11_ERRATUM_VENEER_ENTRY_NAME
"_r",
7533 hash_table
->num_vfp11_fixes
);
7535 myh
= elf_link_hash_lookup
7536 (&(hash_table
)->root
, tmp_name
, FALSE
, FALSE
, FALSE
);
7543 _bfd_generic_link_add_one_symbol (link_info
, branch_bfd
, tmp_name
, BSF_LOCAL
,
7544 branch_sec
, val
, NULL
, TRUE
, FALSE
, &bh
);
7546 myh
= (struct elf_link_hash_entry
*) bh
;
7547 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
7548 myh
->forced_local
= 1;
7552 /* Generate a mapping symbol for the veneer section, and explicitly add an
7553 entry for that symbol to the code/data map for the section. */
7554 if (hash_table
->vfp11_erratum_glue_size
== 0)
7557 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
7558 ever requires this erratum fix. */
7559 _bfd_generic_link_add_one_symbol (link_info
,
7560 hash_table
->bfd_of_glue_owner
, "$a",
7561 BSF_LOCAL
, s
, 0, NULL
,
7564 myh
= (struct elf_link_hash_entry
*) bh
;
7565 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_NOTYPE
);
7566 myh
->forced_local
= 1;
7568 /* The elf32_arm_init_maps function only cares about symbols from input
7569 BFDs. We must make a note of this generated mapping symbol
7570 ourselves so that code byteswapping works properly in
7571 elf32_arm_write_section. */
7572 elf32_arm_section_map_add (s
, 'a', 0);
7575 s
->size
+= VFP11_ERRATUM_VENEER_SIZE
;
7576 hash_table
->vfp11_erratum_glue_size
+= VFP11_ERRATUM_VENEER_SIZE
;
7577 hash_table
->num_vfp11_fixes
++;
7579 /* The offset of the veneer. */
7583 /* Record information about a STM32L4XX STM erratum veneer. Only THUMB-mode
7584 veneers need to be handled because used only in Cortex-M. */
7587 record_stm32l4xx_erratum_veneer (struct bfd_link_info
*link_info
,
7588 elf32_stm32l4xx_erratum_list
*branch
,
7590 asection
*branch_sec
,
7591 unsigned int offset
,
7592 bfd_size_type veneer_size
)
7595 struct elf32_arm_link_hash_table
*hash_table
;
7597 struct elf_link_hash_entry
*myh
;
7598 struct bfd_link_hash_entry
*bh
;
7600 struct _arm_elf_section_data
*sec_data
;
7601 elf32_stm32l4xx_erratum_list
*newerr
;
7603 hash_table
= elf32_arm_hash_table (link_info
);
7604 BFD_ASSERT (hash_table
!= NULL
);
7605 BFD_ASSERT (hash_table
->bfd_of_glue_owner
!= NULL
);
7607 s
= bfd_get_linker_section
7608 (hash_table
->bfd_of_glue_owner
, STM32L4XX_ERRATUM_VENEER_SECTION_NAME
);
7610 BFD_ASSERT (s
!= NULL
);
7612 sec_data
= elf32_arm_section_data (s
);
7614 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen
7615 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME
) + 10);
7617 BFD_ASSERT (tmp_name
);
7619 sprintf (tmp_name
, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME
,
7620 hash_table
->num_stm32l4xx_fixes
);
7622 myh
= elf_link_hash_lookup
7623 (&(hash_table
)->root
, tmp_name
, FALSE
, FALSE
, FALSE
);
7625 BFD_ASSERT (myh
== NULL
);
7628 val
= hash_table
->stm32l4xx_erratum_glue_size
;
7629 _bfd_generic_link_add_one_symbol (link_info
, hash_table
->bfd_of_glue_owner
,
7630 tmp_name
, BSF_FUNCTION
| BSF_LOCAL
, s
, val
,
7631 NULL
, TRUE
, FALSE
, &bh
);
7633 myh
= (struct elf_link_hash_entry
*) bh
;
7634 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
7635 myh
->forced_local
= 1;
7637 /* Link veneer back to calling location. */
7638 sec_data
->stm32l4xx_erratumcount
+= 1;
7639 newerr
= (elf32_stm32l4xx_erratum_list
*)
7640 bfd_zmalloc (sizeof (elf32_stm32l4xx_erratum_list
));
7642 newerr
->type
= STM32L4XX_ERRATUM_VENEER
;
7644 newerr
->u
.v
.branch
= branch
;
7645 newerr
->u
.v
.id
= hash_table
->num_stm32l4xx_fixes
;
7646 branch
->u
.b
.veneer
= newerr
;
7648 newerr
->next
= sec_data
->stm32l4xx_erratumlist
;
7649 sec_data
->stm32l4xx_erratumlist
= newerr
;
7651 /* A symbol for the return from the veneer. */
7652 sprintf (tmp_name
, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME
"_r",
7653 hash_table
->num_stm32l4xx_fixes
);
7655 myh
= elf_link_hash_lookup
7656 (&(hash_table
)->root
, tmp_name
, FALSE
, FALSE
, FALSE
);
7663 _bfd_generic_link_add_one_symbol (link_info
, branch_bfd
, tmp_name
, BSF_LOCAL
,
7664 branch_sec
, val
, NULL
, TRUE
, FALSE
, &bh
);
7666 myh
= (struct elf_link_hash_entry
*) bh
;
7667 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
7668 myh
->forced_local
= 1;
7672 /* Generate a mapping symbol for the veneer section, and explicitly add an
7673 entry for that symbol to the code/data map for the section. */
7674 if (hash_table
->stm32l4xx_erratum_glue_size
== 0)
7677 /* Creates a THUMB symbol since there is no other choice. */
7678 _bfd_generic_link_add_one_symbol (link_info
,
7679 hash_table
->bfd_of_glue_owner
, "$t",
7680 BSF_LOCAL
, s
, 0, NULL
,
7683 myh
= (struct elf_link_hash_entry
*) bh
;
7684 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_NOTYPE
);
7685 myh
->forced_local
= 1;
7687 /* The elf32_arm_init_maps function only cares about symbols from input
7688 BFDs. We must make a note of this generated mapping symbol
7689 ourselves so that code byteswapping works properly in
7690 elf32_arm_write_section. */
7691 elf32_arm_section_map_add (s
, 't', 0);
7694 s
->size
+= veneer_size
;
7695 hash_table
->stm32l4xx_erratum_glue_size
+= veneer_size
;
7696 hash_table
->num_stm32l4xx_fixes
++;
7698 /* The offset of the veneer. */
7702 #define ARM_GLUE_SECTION_FLAGS \
7703 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
7704 | SEC_READONLY | SEC_LINKER_CREATED)
7706 /* Create a fake section for use by the ARM backend of the linker. */
7709 arm_make_glue_section (bfd
* abfd
, const char * name
)
7713 sec
= bfd_get_linker_section (abfd
, name
);
7718 sec
= bfd_make_section_anyway_with_flags (abfd
, name
, ARM_GLUE_SECTION_FLAGS
);
7721 || !bfd_set_section_alignment (sec
, 2))
7724 /* Set the gc mark to prevent the section from being removed by garbage
7725 collection, despite the fact that no relocs refer to this section. */
7731 /* Set size of .plt entries. This function is called from the
7732 linker scripts in ld/emultempl/{armelf}.em. */
7735 bfd_elf32_arm_use_long_plt (void)
7737 elf32_arm_use_long_plt_entry
= TRUE
;
7740 /* Add the glue sections to ABFD. This function is called from the
7741 linker scripts in ld/emultempl/{armelf}.em. */
7744 bfd_elf32_arm_add_glue_sections_to_bfd (bfd
*abfd
,
7745 struct bfd_link_info
*info
)
7747 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (info
);
7748 bfd_boolean dostm32l4xx
= globals
7749 && globals
->stm32l4xx_fix
!= BFD_ARM_STM32L4XX_FIX_NONE
;
7750 bfd_boolean addglue
;
7752 /* If we are only performing a partial
7753 link do not bother adding the glue. */
7754 if (bfd_link_relocatable (info
))
7757 addglue
= arm_make_glue_section (abfd
, ARM2THUMB_GLUE_SECTION_NAME
)
7758 && arm_make_glue_section (abfd
, THUMB2ARM_GLUE_SECTION_NAME
)
7759 && arm_make_glue_section (abfd
, VFP11_ERRATUM_VENEER_SECTION_NAME
)
7760 && arm_make_glue_section (abfd
, ARM_BX_GLUE_SECTION_NAME
);
7766 && arm_make_glue_section (abfd
, STM32L4XX_ERRATUM_VENEER_SECTION_NAME
);
7769 /* Mark output sections of veneers needing a dedicated one with SEC_KEEP. This
7770 ensures they are not marked for deletion by
7771 strip_excluded_output_sections () when veneers are going to be created
7772 later. Not doing so would trigger assert on empty section size in
7773 lang_size_sections_1 (). */
7776 bfd_elf32_arm_keep_private_stub_output_sections (struct bfd_link_info
*info
)
7778 enum elf32_arm_stub_type stub_type
;
7780 /* If we are only performing a partial
7781 link do not bother adding the glue. */
7782 if (bfd_link_relocatable (info
))
7785 for (stub_type
= arm_stub_none
+ 1; stub_type
< max_stub_type
; stub_type
++)
7788 const char *out_sec_name
;
7790 if (!arm_dedicated_stub_output_section_required (stub_type
))
7793 out_sec_name
= arm_dedicated_stub_output_section_name (stub_type
);
7794 out_sec
= bfd_get_section_by_name (info
->output_bfd
, out_sec_name
);
7795 if (out_sec
!= NULL
)
7796 out_sec
->flags
|= SEC_KEEP
;
7800 /* Select a BFD to be used to hold the sections used by the glue code.
7801 This function is called from the linker scripts in ld/emultempl/
7805 bfd_elf32_arm_get_bfd_for_interworking (bfd
*abfd
, struct bfd_link_info
*info
)
7807 struct elf32_arm_link_hash_table
*globals
;
7809 /* If we are only performing a partial link
7810 do not bother getting a bfd to hold the glue. */
7811 if (bfd_link_relocatable (info
))
7814 /* Make sure we don't attach the glue sections to a dynamic object. */
7815 BFD_ASSERT (!(abfd
->flags
& DYNAMIC
));
7817 globals
= elf32_arm_hash_table (info
);
7818 BFD_ASSERT (globals
!= NULL
);
7820 if (globals
->bfd_of_glue_owner
!= NULL
)
7823 /* Save the bfd for later use. */
7824 globals
->bfd_of_glue_owner
= abfd
;
7830 check_use_blx (struct elf32_arm_link_hash_table
*globals
)
7834 cpu_arch
= bfd_elf_get_obj_attr_int (globals
->obfd
, OBJ_ATTR_PROC
,
7837 if (globals
->fix_arm1176
)
7839 if (cpu_arch
== TAG_CPU_ARCH_V6T2
|| cpu_arch
> TAG_CPU_ARCH_V6K
)
7840 globals
->use_blx
= 1;
7844 if (cpu_arch
> TAG_CPU_ARCH_V4T
)
7845 globals
->use_blx
= 1;
7850 bfd_elf32_arm_process_before_allocation (bfd
*abfd
,
7851 struct bfd_link_info
*link_info
)
7853 Elf_Internal_Shdr
*symtab_hdr
;
7854 Elf_Internal_Rela
*internal_relocs
= NULL
;
7855 Elf_Internal_Rela
*irel
, *irelend
;
7856 bfd_byte
*contents
= NULL
;
7859 struct elf32_arm_link_hash_table
*globals
;
7861 /* If we are only performing a partial link do not bother
7862 to construct any glue. */
7863 if (bfd_link_relocatable (link_info
))
7866 /* Here we have a bfd that is to be included on the link. We have a
7867 hook to do reloc rummaging, before section sizes are nailed down. */
7868 globals
= elf32_arm_hash_table (link_info
);
7869 BFD_ASSERT (globals
!= NULL
);
7871 check_use_blx (globals
);
7873 if (globals
->byteswap_code
&& !bfd_big_endian (abfd
))
7875 _bfd_error_handler (_("%pB: BE8 images only valid in big-endian mode"),
7880 /* PR 5398: If we have not decided to include any loadable sections in
7881 the output then we will not have a glue owner bfd. This is OK, it
7882 just means that there is nothing else for us to do here. */
7883 if (globals
->bfd_of_glue_owner
== NULL
)
7886 /* Rummage around all the relocs and map the glue vectors. */
7887 sec
= abfd
->sections
;
7892 for (; sec
!= NULL
; sec
= sec
->next
)
7894 if (sec
->reloc_count
== 0)
7897 if ((sec
->flags
& SEC_EXCLUDE
) != 0)
7900 symtab_hdr
= & elf_symtab_hdr (abfd
);
7902 /* Load the relocs. */
7904 = _bfd_elf_link_read_relocs (abfd
, sec
, NULL
, NULL
, FALSE
);
7906 if (internal_relocs
== NULL
)
7909 irelend
= internal_relocs
+ sec
->reloc_count
;
7910 for (irel
= internal_relocs
; irel
< irelend
; irel
++)
7913 unsigned long r_index
;
7915 struct elf_link_hash_entry
*h
;
7917 r_type
= ELF32_R_TYPE (irel
->r_info
);
7918 r_index
= ELF32_R_SYM (irel
->r_info
);
7920 /* These are the only relocation types we care about. */
7921 if ( r_type
!= R_ARM_PC24
7922 && (r_type
!= R_ARM_V4BX
|| globals
->fix_v4bx
< 2))
7925 /* Get the section contents if we haven't done so already. */
7926 if (contents
== NULL
)
7928 /* Get cached copy if it exists. */
7929 if (elf_section_data (sec
)->this_hdr
.contents
!= NULL
)
7930 contents
= elf_section_data (sec
)->this_hdr
.contents
;
7933 /* Go get them off disk. */
7934 if (! bfd_malloc_and_get_section (abfd
, sec
, &contents
))
7939 if (r_type
== R_ARM_V4BX
)
7943 reg
= bfd_get_32 (abfd
, contents
+ irel
->r_offset
) & 0xf;
7944 record_arm_bx_glue (link_info
, reg
);
7948 /* If the relocation is not against a symbol it cannot concern us. */
7951 /* We don't care about local symbols. */
7952 if (r_index
< symtab_hdr
->sh_info
)
7955 /* This is an external symbol. */
7956 r_index
-= symtab_hdr
->sh_info
;
7957 h
= (struct elf_link_hash_entry
*)
7958 elf_sym_hashes (abfd
)[r_index
];
7960 /* If the relocation is against a static symbol it must be within
7961 the current section and so cannot be a cross ARM/Thumb relocation. */
7965 /* If the call will go through a PLT entry then we do not need
7967 if (globals
->root
.splt
!= NULL
&& h
->plt
.offset
!= (bfd_vma
) -1)
7973 /* This one is a call from arm code. We need to look up
7974 the target of the call. If it is a thumb target, we
7976 if (ARM_GET_SYM_BRANCH_TYPE (h
->target_internal
)
7977 == ST_BRANCH_TO_THUMB
)
7978 record_arm_to_thumb_glue (link_info
, h
);
7986 if (contents
!= NULL
7987 && elf_section_data (sec
)->this_hdr
.contents
!= contents
)
7991 if (internal_relocs
!= NULL
7992 && elf_section_data (sec
)->relocs
!= internal_relocs
)
7993 free (internal_relocs
);
7994 internal_relocs
= NULL
;
8000 if (contents
!= NULL
8001 && elf_section_data (sec
)->this_hdr
.contents
!= contents
)
8003 if (internal_relocs
!= NULL
8004 && elf_section_data (sec
)->relocs
!= internal_relocs
)
8005 free (internal_relocs
);
8012 /* Initialise maps of ARM/Thumb/data for input BFDs. */
8015 bfd_elf32_arm_init_maps (bfd
*abfd
)
8017 Elf_Internal_Sym
*isymbuf
;
8018 Elf_Internal_Shdr
*hdr
;
8019 unsigned int i
, localsyms
;
8021 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
8022 if (! is_arm_elf (abfd
))
8025 if ((abfd
->flags
& DYNAMIC
) != 0)
8028 hdr
= & elf_symtab_hdr (abfd
);
8029 localsyms
= hdr
->sh_info
;
8031 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
8032 should contain the number of local symbols, which should come before any
8033 global symbols. Mapping symbols are always local. */
8034 isymbuf
= bfd_elf_get_elf_syms (abfd
, hdr
, localsyms
, 0, NULL
, NULL
,
8037 /* No internal symbols read? Skip this BFD. */
8038 if (isymbuf
== NULL
)
8041 for (i
= 0; i
< localsyms
; i
++)
8043 Elf_Internal_Sym
*isym
= &isymbuf
[i
];
8044 asection
*sec
= bfd_section_from_elf_index (abfd
, isym
->st_shndx
);
8048 && ELF_ST_BIND (isym
->st_info
) == STB_LOCAL
)
8050 name
= bfd_elf_string_from_elf_section (abfd
,
8051 hdr
->sh_link
, isym
->st_name
);
8053 if (bfd_is_arm_special_symbol_name (name
,
8054 BFD_ARM_SPECIAL_SYM_TYPE_MAP
))
8055 elf32_arm_section_map_add (sec
, name
[1], isym
->st_value
);
8061 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
8062 say what they wanted. */
8065 bfd_elf32_arm_set_cortex_a8_fix (bfd
*obfd
, struct bfd_link_info
*link_info
)
8067 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (link_info
);
8068 obj_attribute
*out_attr
= elf_known_obj_attributes_proc (obfd
);
8070 if (globals
== NULL
)
8073 if (globals
->fix_cortex_a8
== -1)
8075 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
8076 if (out_attr
[Tag_CPU_arch
].i
== TAG_CPU_ARCH_V7
8077 && (out_attr
[Tag_CPU_arch_profile
].i
== 'A'
8078 || out_attr
[Tag_CPU_arch_profile
].i
== 0))
8079 globals
->fix_cortex_a8
= 1;
8081 globals
->fix_cortex_a8
= 0;
8087 bfd_elf32_arm_set_vfp11_fix (bfd
*obfd
, struct bfd_link_info
*link_info
)
8089 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (link_info
);
8090 obj_attribute
*out_attr
= elf_known_obj_attributes_proc (obfd
);
8092 if (globals
== NULL
)
8094 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
8095 if (out_attr
[Tag_CPU_arch
].i
>= TAG_CPU_ARCH_V7
)
8097 switch (globals
->vfp11_fix
)
8099 case BFD_ARM_VFP11_FIX_DEFAULT
:
8100 case BFD_ARM_VFP11_FIX_NONE
:
8101 globals
->vfp11_fix
= BFD_ARM_VFP11_FIX_NONE
;
8105 /* Give a warning, but do as the user requests anyway. */
8106 _bfd_error_handler (_("%pB: warning: selected VFP11 erratum "
8107 "workaround is not necessary for target architecture"), obfd
);
8110 else if (globals
->vfp11_fix
== BFD_ARM_VFP11_FIX_DEFAULT
)
8111 /* For earlier architectures, we might need the workaround, but do not
8112 enable it by default. If users is running with broken hardware, they
8113 must enable the erratum fix explicitly. */
8114 globals
->vfp11_fix
= BFD_ARM_VFP11_FIX_NONE
;
8118 bfd_elf32_arm_set_stm32l4xx_fix (bfd
*obfd
, struct bfd_link_info
*link_info
)
8120 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (link_info
);
8121 obj_attribute
*out_attr
= elf_known_obj_attributes_proc (obfd
);
8123 if (globals
== NULL
)
8126 /* We assume only Cortex-M4 may require the fix. */
8127 if (out_attr
[Tag_CPU_arch
].i
!= TAG_CPU_ARCH_V7E_M
8128 || out_attr
[Tag_CPU_arch_profile
].i
!= 'M')
8130 if (globals
->stm32l4xx_fix
!= BFD_ARM_STM32L4XX_FIX_NONE
)
8131 /* Give a warning, but do as the user requests anyway. */
8133 (_("%pB: warning: selected STM32L4XX erratum "
8134 "workaround is not necessary for target architecture"), obfd
);
8138 enum bfd_arm_vfp11_pipe
8146 /* Return a VFP register number. This is encoded as RX:X for single-precision
8147 registers, or X:RX for double-precision registers, where RX is the group of
8148 four bits in the instruction encoding and X is the single extension bit.
8149 RX and X fields are specified using their lowest (starting) bit. The return
8152 0...31: single-precision registers s0...s31
8153 32...63: double-precision registers d0...d31.
8155 Although X should be zero for VFP11 (encoding d0...d15 only), we might
8156 encounter VFP3 instructions, so we allow the full range for DP registers. */
8159 bfd_arm_vfp11_regno (unsigned int insn
, bfd_boolean is_double
, unsigned int rx
,
8163 return (((insn
>> rx
) & 0xf) | (((insn
>> x
) & 1) << 4)) + 32;
8165 return (((insn
>> rx
) & 0xf) << 1) | ((insn
>> x
) & 1);
8168 /* Set bits in *WMASK according to a register number REG as encoded by
8169 bfd_arm_vfp11_regno(). Ignore d16-d31. */
8172 bfd_arm_vfp11_write_mask (unsigned int *wmask
, unsigned int reg
)
8177 *wmask
|= 3 << ((reg
- 32) * 2);
8180 /* Return TRUE if WMASK overwrites anything in REGS. */
8183 bfd_arm_vfp11_antidependency (unsigned int wmask
, int *regs
, int numregs
)
8187 for (i
= 0; i
< numregs
; i
++)
8189 unsigned int reg
= regs
[i
];
8191 if (reg
< 32 && (wmask
& (1 << reg
)) != 0)
8199 if ((wmask
& (3 << (reg
* 2))) != 0)
8206 /* In this function, we're interested in two things: finding input registers
8207 for VFP data-processing instructions, and finding the set of registers which
8208 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
8209 hold the written set, so FLDM etc. are easy to deal with (we're only
8210 interested in 32 SP registers or 16 dp registers, due to the VFP version
8211 implemented by the chip in question). DP registers are marked by setting
8212 both SP registers in the write mask). */
8214 static enum bfd_arm_vfp11_pipe
8215 bfd_arm_vfp11_insn_decode (unsigned int insn
, unsigned int *destmask
, int *regs
,
8218 enum bfd_arm_vfp11_pipe vpipe
= VFP11_BAD
;
8219 bfd_boolean is_double
= ((insn
& 0xf00) == 0xb00) ? 1 : 0;
8221 if ((insn
& 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
8224 unsigned int fd
= bfd_arm_vfp11_regno (insn
, is_double
, 12, 22);
8225 unsigned int fm
= bfd_arm_vfp11_regno (insn
, is_double
, 0, 5);
8227 pqrs
= ((insn
& 0x00800000) >> 20)
8228 | ((insn
& 0x00300000) >> 19)
8229 | ((insn
& 0x00000040) >> 6);
8233 case 0: /* fmac[sd]. */
8234 case 1: /* fnmac[sd]. */
8235 case 2: /* fmsc[sd]. */
8236 case 3: /* fnmsc[sd]. */
8238 bfd_arm_vfp11_write_mask (destmask
, fd
);
8240 regs
[1] = bfd_arm_vfp11_regno (insn
, is_double
, 16, 7); /* Fn. */
8245 case 4: /* fmul[sd]. */
8246 case 5: /* fnmul[sd]. */
8247 case 6: /* fadd[sd]. */
8248 case 7: /* fsub[sd]. */
8252 case 8: /* fdiv[sd]. */
8255 bfd_arm_vfp11_write_mask (destmask
, fd
);
8256 regs
[0] = bfd_arm_vfp11_regno (insn
, is_double
, 16, 7); /* Fn. */
8261 case 15: /* extended opcode. */
8263 unsigned int extn
= ((insn
>> 15) & 0x1e)
8264 | ((insn
>> 7) & 1);
8268 case 0: /* fcpy[sd]. */
8269 case 1: /* fabs[sd]. */
8270 case 2: /* fneg[sd]. */
8271 case 8: /* fcmp[sd]. */
8272 case 9: /* fcmpe[sd]. */
8273 case 10: /* fcmpz[sd]. */
8274 case 11: /* fcmpez[sd]. */
8275 case 16: /* fuito[sd]. */
8276 case 17: /* fsito[sd]. */
8277 case 24: /* ftoui[sd]. */
8278 case 25: /* ftouiz[sd]. */
8279 case 26: /* ftosi[sd]. */
8280 case 27: /* ftosiz[sd]. */
8281 /* These instructions will not bounce due to underflow. */
8286 case 3: /* fsqrt[sd]. */
8287 /* fsqrt cannot underflow, but it can (perhaps) overwrite
8288 registers to cause the erratum in previous instructions. */
8289 bfd_arm_vfp11_write_mask (destmask
, fd
);
8293 case 15: /* fcvt{ds,sd}. */
8297 bfd_arm_vfp11_write_mask (destmask
, fd
);
8299 /* Only FCVTSD can underflow. */
8300 if ((insn
& 0x100) != 0)
8319 /* Two-register transfer. */
8320 else if ((insn
& 0x0fe00ed0) == 0x0c400a10)
8322 unsigned int fm
= bfd_arm_vfp11_regno (insn
, is_double
, 0, 5);
8324 if ((insn
& 0x100000) == 0)
8327 bfd_arm_vfp11_write_mask (destmask
, fm
);
8330 bfd_arm_vfp11_write_mask (destmask
, fm
);
8331 bfd_arm_vfp11_write_mask (destmask
, fm
+ 1);
8337 else if ((insn
& 0x0e100e00) == 0x0c100a00) /* A load insn. */
8339 int fd
= bfd_arm_vfp11_regno (insn
, is_double
, 12, 22);
8340 unsigned int puw
= ((insn
>> 21) & 0x1) | (((insn
>> 23) & 3) << 1);
8344 case 0: /* Two-reg transfer. We should catch these above. */
8347 case 2: /* fldm[sdx]. */
8351 unsigned int i
, offset
= insn
& 0xff;
8356 for (i
= fd
; i
< fd
+ offset
; i
++)
8357 bfd_arm_vfp11_write_mask (destmask
, i
);
8361 case 4: /* fld[sd]. */
8363 bfd_arm_vfp11_write_mask (destmask
, fd
);
8372 /* Single-register transfer. Note L==0. */
8373 else if ((insn
& 0x0f100e10) == 0x0e000a10)
8375 unsigned int opcode
= (insn
>> 21) & 7;
8376 unsigned int fn
= bfd_arm_vfp11_regno (insn
, is_double
, 16, 7);
8380 case 0: /* fmsr/fmdlr. */
8381 case 1: /* fmdhr. */
8382 /* Mark fmdhr and fmdlr as writing to the whole of the DP
8383 destination register. I don't know if this is exactly right,
8384 but it is the conservative choice. */
8385 bfd_arm_vfp11_write_mask (destmask
, fn
);
8399 static int elf32_arm_compare_mapping (const void * a
, const void * b
);
8402 /* Look for potentially-troublesome code sequences which might trigger the
8403 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
8404 (available from ARM) for details of the erratum. A short version is
8405 described in ld.texinfo. */
8408 bfd_elf32_arm_vfp11_erratum_scan (bfd
*abfd
, struct bfd_link_info
*link_info
)
8411 bfd_byte
*contents
= NULL
;
8413 int regs
[3], numregs
= 0;
8414 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (link_info
);
8415 int use_vector
= (globals
->vfp11_fix
== BFD_ARM_VFP11_FIX_VECTOR
);
8417 if (globals
== NULL
)
8420 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
8421 The states transition as follows:
8423 0 -> 1 (vector) or 0 -> 2 (scalar)
8424 A VFP FMAC-pipeline instruction has been seen. Fill
8425 regs[0]..regs[numregs-1] with its input operands. Remember this
8426 instruction in 'first_fmac'.
8429 Any instruction, except for a VFP instruction which overwrites
8434 A VFP instruction has been seen which overwrites any of regs[*].
8435 We must make a veneer! Reset state to 0 before examining next
8439 If we fail to match anything in state 2, reset to state 0 and reset
8440 the instruction pointer to the instruction after 'first_fmac'.
8442 If the VFP11 vector mode is in use, there must be at least two unrelated
8443 instructions between anti-dependent VFP11 instructions to properly avoid
8444 triggering the erratum, hence the use of the extra state 1. */
8446 /* If we are only performing a partial link do not bother
8447 to construct any glue. */
8448 if (bfd_link_relocatable (link_info
))
8451 /* Skip if this bfd does not correspond to an ELF image. */
8452 if (! is_arm_elf (abfd
))
8455 /* We should have chosen a fix type by the time we get here. */
8456 BFD_ASSERT (globals
->vfp11_fix
!= BFD_ARM_VFP11_FIX_DEFAULT
);
8458 if (globals
->vfp11_fix
== BFD_ARM_VFP11_FIX_NONE
)
8461 /* Skip this BFD if it corresponds to an executable or dynamic object. */
8462 if ((abfd
->flags
& (EXEC_P
| DYNAMIC
)) != 0)
8465 for (sec
= abfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
8467 unsigned int i
, span
, first_fmac
= 0, veneer_of_insn
= 0;
8468 struct _arm_elf_section_data
*sec_data
;
8470 /* If we don't have executable progbits, we're not interested in this
8471 section. Also skip if section is to be excluded. */
8472 if (elf_section_type (sec
) != SHT_PROGBITS
8473 || (elf_section_flags (sec
) & SHF_EXECINSTR
) == 0
8474 || (sec
->flags
& SEC_EXCLUDE
) != 0
8475 || sec
->sec_info_type
== SEC_INFO_TYPE_JUST_SYMS
8476 || sec
->output_section
== bfd_abs_section_ptr
8477 || strcmp (sec
->name
, VFP11_ERRATUM_VENEER_SECTION_NAME
) == 0)
8480 sec_data
= elf32_arm_section_data (sec
);
8482 if (sec_data
->mapcount
== 0)
8485 if (elf_section_data (sec
)->this_hdr
.contents
!= NULL
)
8486 contents
= elf_section_data (sec
)->this_hdr
.contents
;
8487 else if (! bfd_malloc_and_get_section (abfd
, sec
, &contents
))
8490 qsort (sec_data
->map
, sec_data
->mapcount
, sizeof (elf32_arm_section_map
),
8491 elf32_arm_compare_mapping
);
8493 for (span
= 0; span
< sec_data
->mapcount
; span
++)
8495 unsigned int span_start
= sec_data
->map
[span
].vma
;
8496 unsigned int span_end
= (span
== sec_data
->mapcount
- 1)
8497 ? sec
->size
: sec_data
->map
[span
+ 1].vma
;
8498 char span_type
= sec_data
->map
[span
].type
;
8500 /* FIXME: Only ARM mode is supported at present. We may need to
8501 support Thumb-2 mode also at some point. */
8502 if (span_type
!= 'a')
8505 for (i
= span_start
; i
< span_end
;)
8507 unsigned int next_i
= i
+ 4;
8508 unsigned int insn
= bfd_big_endian (abfd
)
8509 ? (((unsigned) contents
[i
] << 24)
8510 | (contents
[i
+ 1] << 16)
8511 | (contents
[i
+ 2] << 8)
8513 : (((unsigned) contents
[i
+ 3] << 24)
8514 | (contents
[i
+ 2] << 16)
8515 | (contents
[i
+ 1] << 8)
8517 unsigned int writemask
= 0;
8518 enum bfd_arm_vfp11_pipe vpipe
;
8523 vpipe
= bfd_arm_vfp11_insn_decode (insn
, &writemask
, regs
,
8525 /* I'm assuming the VFP11 erratum can trigger with denorm
8526 operands on either the FMAC or the DS pipeline. This might
8527 lead to slightly overenthusiastic veneer insertion. */
8528 if (vpipe
== VFP11_FMAC
|| vpipe
== VFP11_DS
)
8530 state
= use_vector
? 1 : 2;
8532 veneer_of_insn
= insn
;
8538 int other_regs
[3], other_numregs
;
8539 vpipe
= bfd_arm_vfp11_insn_decode (insn
, &writemask
,
8542 if (vpipe
!= VFP11_BAD
8543 && bfd_arm_vfp11_antidependency (writemask
, regs
,
8553 int other_regs
[3], other_numregs
;
8554 vpipe
= bfd_arm_vfp11_insn_decode (insn
, &writemask
,
8557 if (vpipe
!= VFP11_BAD
8558 && bfd_arm_vfp11_antidependency (writemask
, regs
,
8564 next_i
= first_fmac
+ 4;
8570 abort (); /* Should be unreachable. */
8575 elf32_vfp11_erratum_list
*newerr
=(elf32_vfp11_erratum_list
*)
8576 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list
));
8578 elf32_arm_section_data (sec
)->erratumcount
+= 1;
8580 newerr
->u
.b
.vfp_insn
= veneer_of_insn
;
8585 newerr
->type
= VFP11_ERRATUM_BRANCH_TO_ARM_VENEER
;
8592 record_vfp11_erratum_veneer (link_info
, newerr
, abfd
, sec
,
8597 newerr
->next
= sec_data
->erratumlist
;
8598 sec_data
->erratumlist
= newerr
;
8607 if (contents
!= NULL
8608 && elf_section_data (sec
)->this_hdr
.contents
!= contents
)
8616 if (contents
!= NULL
8617 && elf_section_data (sec
)->this_hdr
.contents
!= contents
)
8623 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
8624 after sections have been laid out, using specially-named symbols. */
8627 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd
*abfd
,
8628 struct bfd_link_info
*link_info
)
8631 struct elf32_arm_link_hash_table
*globals
;
8634 if (bfd_link_relocatable (link_info
))
8637 /* Skip if this bfd does not correspond to an ELF image. */
8638 if (! is_arm_elf (abfd
))
8641 globals
= elf32_arm_hash_table (link_info
);
8642 if (globals
== NULL
)
8645 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen
8646 (VFP11_ERRATUM_VENEER_ENTRY_NAME
) + 10);
8648 for (sec
= abfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
8650 struct _arm_elf_section_data
*sec_data
= elf32_arm_section_data (sec
);
8651 elf32_vfp11_erratum_list
*errnode
= sec_data
->erratumlist
;
8653 for (; errnode
!= NULL
; errnode
= errnode
->next
)
8655 struct elf_link_hash_entry
*myh
;
8658 switch (errnode
->type
)
8660 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER
:
8661 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER
:
8662 /* Find veneer symbol. */
8663 sprintf (tmp_name
, VFP11_ERRATUM_VENEER_ENTRY_NAME
,
8664 errnode
->u
.b
.veneer
->u
.v
.id
);
8666 myh
= elf_link_hash_lookup
8667 (&(globals
)->root
, tmp_name
, FALSE
, FALSE
, TRUE
);
8670 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8671 abfd
, "VFP11", tmp_name
);
8673 vma
= myh
->root
.u
.def
.section
->output_section
->vma
8674 + myh
->root
.u
.def
.section
->output_offset
8675 + myh
->root
.u
.def
.value
;
8677 errnode
->u
.b
.veneer
->vma
= vma
;
8680 case VFP11_ERRATUM_ARM_VENEER
:
8681 case VFP11_ERRATUM_THUMB_VENEER
:
8682 /* Find return location. */
8683 sprintf (tmp_name
, VFP11_ERRATUM_VENEER_ENTRY_NAME
"_r",
8686 myh
= elf_link_hash_lookup
8687 (&(globals
)->root
, tmp_name
, FALSE
, FALSE
, TRUE
);
8690 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8691 abfd
, "VFP11", tmp_name
);
8693 vma
= myh
->root
.u
.def
.section
->output_section
->vma
8694 + myh
->root
.u
.def
.section
->output_offset
8695 + myh
->root
.u
.def
.value
;
8697 errnode
->u
.v
.branch
->vma
= vma
;
8709 /* Find virtual-memory addresses for STM32L4XX erratum veneers and
8710 return locations after sections have been laid out, using
8711 specially-named symbols. */
8714 bfd_elf32_arm_stm32l4xx_fix_veneer_locations (bfd
*abfd
,
8715 struct bfd_link_info
*link_info
)
8718 struct elf32_arm_link_hash_table
*globals
;
8721 if (bfd_link_relocatable (link_info
))
8724 /* Skip if this bfd does not correspond to an ELF image. */
8725 if (! is_arm_elf (abfd
))
8728 globals
= elf32_arm_hash_table (link_info
);
8729 if (globals
== NULL
)
8732 tmp_name
= (char *) bfd_malloc ((bfd_size_type
) strlen
8733 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME
) + 10);
8735 for (sec
= abfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
8737 struct _arm_elf_section_data
*sec_data
= elf32_arm_section_data (sec
);
8738 elf32_stm32l4xx_erratum_list
*errnode
= sec_data
->stm32l4xx_erratumlist
;
8740 for (; errnode
!= NULL
; errnode
= errnode
->next
)
8742 struct elf_link_hash_entry
*myh
;
8745 switch (errnode
->type
)
8747 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER
:
8748 /* Find veneer symbol. */
8749 sprintf (tmp_name
, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME
,
8750 errnode
->u
.b
.veneer
->u
.v
.id
);
8752 myh
= elf_link_hash_lookup
8753 (&(globals
)->root
, tmp_name
, FALSE
, FALSE
, TRUE
);
8756 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8757 abfd
, "STM32L4XX", tmp_name
);
8759 vma
= myh
->root
.u
.def
.section
->output_section
->vma
8760 + myh
->root
.u
.def
.section
->output_offset
8761 + myh
->root
.u
.def
.value
;
8763 errnode
->u
.b
.veneer
->vma
= vma
;
8766 case STM32L4XX_ERRATUM_VENEER
:
8767 /* Find return location. */
8768 sprintf (tmp_name
, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME
"_r",
8771 myh
= elf_link_hash_lookup
8772 (&(globals
)->root
, tmp_name
, FALSE
, FALSE
, TRUE
);
8775 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8776 abfd
, "STM32L4XX", tmp_name
);
8778 vma
= myh
->root
.u
.def
.section
->output_section
->vma
8779 + myh
->root
.u
.def
.section
->output_offset
8780 + myh
->root
.u
.def
.value
;
8782 errnode
->u
.v
.branch
->vma
= vma
;
8794 static inline bfd_boolean
8795 is_thumb2_ldmia (const insn32 insn
)
8797 /* Encoding T2: LDM<c>.W <Rn>{!},<registers>
8798 1110 - 1000 - 10W1 - rrrr - PM (0) l - llll - llll - llll. */
8799 return (insn
& 0xffd02000) == 0xe8900000;
8802 static inline bfd_boolean
8803 is_thumb2_ldmdb (const insn32 insn
)
8805 /* Encoding T1: LDMDB<c> <Rn>{!},<registers>
8806 1110 - 1001 - 00W1 - rrrr - PM (0) l - llll - llll - llll. */
8807 return (insn
& 0xffd02000) == 0xe9100000;
8810 static inline bfd_boolean
8811 is_thumb2_vldm (const insn32 insn
)
8813 /* A6.5 Extension register load or store instruction
8815 We look for SP 32-bit and DP 64-bit registers.
8816 Encoding T1 VLDM{mode}<c> <Rn>{!}, <list>
8817 <list> is consecutive 64-bit registers
8818 1110 - 110P - UDW1 - rrrr - vvvv - 1011 - iiii - iiii
8819 Encoding T2 VLDM{mode}<c> <Rn>{!}, <list>
8820 <list> is consecutive 32-bit registers
8821 1110 - 110P - UDW1 - rrrr - vvvv - 1010 - iiii - iiii
8822 if P==0 && U==1 && W==1 && Rn=1101 VPOP
8823 if PUW=010 || PUW=011 || PUW=101 VLDM. */
8825 (((insn
& 0xfe100f00) == 0xec100b00) ||
8826 ((insn
& 0xfe100f00) == 0xec100a00))
8827 && /* (IA without !). */
8828 (((((insn
<< 7) >> 28) & 0xd) == 0x4)
8829 /* (IA with !), includes VPOP (when reg number is SP). */
8830 || ((((insn
<< 7) >> 28) & 0xd) == 0x5)
8832 || ((((insn
<< 7) >> 28) & 0xd) == 0x9));
8835 /* STM STM32L4XX erratum : This function assumes that it receives an LDM or
8837 - computes the number and the mode of memory accesses
8838 - decides if the replacement should be done:
8839 . replaces only if > 8-word accesses
8840 . or (testing purposes only) replaces all accesses. */
8843 stm32l4xx_need_create_replacing_stub (const insn32 insn
,
8844 bfd_arm_stm32l4xx_fix stm32l4xx_fix
)
8848 /* The field encoding the register list is the same for both LDMIA
8849 and LDMDB encodings. */
8850 if (is_thumb2_ldmia (insn
) || is_thumb2_ldmdb (insn
))
8851 nb_words
= elf32_arm_popcount (insn
& 0x0000ffff);
8852 else if (is_thumb2_vldm (insn
))
8853 nb_words
= (insn
& 0xff);
8855 /* DEFAULT mode accounts for the real bug condition situation,
8856 ALL mode inserts stubs for each LDM/VLDM instruction (testing). */
8858 (stm32l4xx_fix
== BFD_ARM_STM32L4XX_FIX_DEFAULT
) ? nb_words
> 8 :
8859 (stm32l4xx_fix
== BFD_ARM_STM32L4XX_FIX_ALL
) ? TRUE
: FALSE
;
8862 /* Look for potentially-troublesome code sequences which might trigger
8863 the STM STM32L4XX erratum. */
8866 bfd_elf32_arm_stm32l4xx_erratum_scan (bfd
*abfd
,
8867 struct bfd_link_info
*link_info
)
8870 bfd_byte
*contents
= NULL
;
8871 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (link_info
);
8873 if (globals
== NULL
)
8876 /* If we are only performing a partial link do not bother
8877 to construct any glue. */
8878 if (bfd_link_relocatable (link_info
))
8881 /* Skip if this bfd does not correspond to an ELF image. */
8882 if (! is_arm_elf (abfd
))
8885 if (globals
->stm32l4xx_fix
== BFD_ARM_STM32L4XX_FIX_NONE
)
8888 /* Skip this BFD if it corresponds to an executable or dynamic object. */
8889 if ((abfd
->flags
& (EXEC_P
| DYNAMIC
)) != 0)
8892 for (sec
= abfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
8894 unsigned int i
, span
;
8895 struct _arm_elf_section_data
*sec_data
;
8897 /* If we don't have executable progbits, we're not interested in this
8898 section. Also skip if section is to be excluded. */
8899 if (elf_section_type (sec
) != SHT_PROGBITS
8900 || (elf_section_flags (sec
) & SHF_EXECINSTR
) == 0
8901 || (sec
->flags
& SEC_EXCLUDE
) != 0
8902 || sec
->sec_info_type
== SEC_INFO_TYPE_JUST_SYMS
8903 || sec
->output_section
== bfd_abs_section_ptr
8904 || strcmp (sec
->name
, STM32L4XX_ERRATUM_VENEER_SECTION_NAME
) == 0)
8907 sec_data
= elf32_arm_section_data (sec
);
8909 if (sec_data
->mapcount
== 0)
8912 if (elf_section_data (sec
)->this_hdr
.contents
!= NULL
)
8913 contents
= elf_section_data (sec
)->this_hdr
.contents
;
8914 else if (! bfd_malloc_and_get_section (abfd
, sec
, &contents
))
8917 qsort (sec_data
->map
, sec_data
->mapcount
, sizeof (elf32_arm_section_map
),
8918 elf32_arm_compare_mapping
);
8920 for (span
= 0; span
< sec_data
->mapcount
; span
++)
8922 unsigned int span_start
= sec_data
->map
[span
].vma
;
8923 unsigned int span_end
= (span
== sec_data
->mapcount
- 1)
8924 ? sec
->size
: sec_data
->map
[span
+ 1].vma
;
8925 char span_type
= sec_data
->map
[span
].type
;
8926 int itblock_current_pos
= 0;
8928 /* Only Thumb2 mode need be supported with this CM4 specific
8929 code, we should not encounter any arm mode eg span_type
8931 if (span_type
!= 't')
8934 for (i
= span_start
; i
< span_end
;)
8936 unsigned int insn
= bfd_get_16 (abfd
, &contents
[i
]);
8937 bfd_boolean insn_32bit
= FALSE
;
8938 bfd_boolean is_ldm
= FALSE
;
8939 bfd_boolean is_vldm
= FALSE
;
8940 bfd_boolean is_not_last_in_it_block
= FALSE
;
8942 /* The first 16-bits of all 32-bit thumb2 instructions start
8943 with opcode[15..13]=0b111 and the encoded op1 can be anything
8944 except opcode[12..11]!=0b00.
8945 See 32-bit Thumb instruction encoding. */
8946 if ((insn
& 0xe000) == 0xe000 && (insn
& 0x1800) != 0x0000)
8949 /* Compute the predicate that tells if the instruction
8950 is concerned by the IT block
8951 - Creates an error if there is a ldm that is not
8952 last in the IT block thus cannot be replaced
8953 - Otherwise we can create a branch at the end of the
8954 IT block, it will be controlled naturally by IT
8955 with the proper pseudo-predicate
8956 - So the only interesting predicate is the one that
8957 tells that we are not on the last item of an IT
8959 if (itblock_current_pos
!= 0)
8960 is_not_last_in_it_block
= !!--itblock_current_pos
;
8964 /* Load the rest of the insn (in manual-friendly order). */
8965 insn
= (insn
<< 16) | bfd_get_16 (abfd
, &contents
[i
+ 2]);
8966 is_ldm
= is_thumb2_ldmia (insn
) || is_thumb2_ldmdb (insn
);
8967 is_vldm
= is_thumb2_vldm (insn
);
8969 /* Veneers are created for (v)ldm depending on
8970 option flags and memory accesses conditions; but
8971 if the instruction is not the last instruction of
8972 an IT block, we cannot create a jump there, so we
8974 if ((is_ldm
|| is_vldm
)
8975 && stm32l4xx_need_create_replacing_stub
8976 (insn
, globals
->stm32l4xx_fix
))
8978 if (is_not_last_in_it_block
)
8981 /* xgettext:c-format */
8982 (_("%pB(%pA+%#x): error: multiple load detected"
8983 " in non-last IT block instruction:"
8984 " STM32L4XX veneer cannot be generated; "
8985 "use gcc option -mrestrict-it to generate"
8986 " only one instruction per IT block"),
8991 elf32_stm32l4xx_erratum_list
*newerr
=
8992 (elf32_stm32l4xx_erratum_list
*)
8994 (sizeof (elf32_stm32l4xx_erratum_list
));
8996 elf32_arm_section_data (sec
)
8997 ->stm32l4xx_erratumcount
+= 1;
8998 newerr
->u
.b
.insn
= insn
;
8999 /* We create only thumb branches. */
9001 STM32L4XX_ERRATUM_BRANCH_TO_VENEER
;
9002 record_stm32l4xx_erratum_veneer
9003 (link_info
, newerr
, abfd
, sec
,
9006 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
:
9007 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE
);
9009 newerr
->next
= sec_data
->stm32l4xx_erratumlist
;
9010 sec_data
->stm32l4xx_erratumlist
= newerr
;
9017 IT blocks are only encoded in T1
9018 Encoding T1: IT{x{y{z}}} <firstcond>
9019 1 0 1 1 - 1 1 1 1 - firstcond - mask
9020 if mask = '0000' then see 'related encodings'
9021 We don't deal with UNPREDICTABLE, just ignore these.
9022 There can be no nested IT blocks so an IT block
9023 is naturally a new one for which it is worth
9024 computing its size. */
9025 bfd_boolean is_newitblock
= ((insn
& 0xff00) == 0xbf00)
9026 && ((insn
& 0x000f) != 0x0000);
9027 /* If we have a new IT block we compute its size. */
9030 /* Compute the number of instructions controlled
9031 by the IT block, it will be used to decide
9032 whether we are inside an IT block or not. */
9033 unsigned int mask
= insn
& 0x000f;
9034 itblock_current_pos
= 4 - ctz (mask
);
9038 i
+= insn_32bit
? 4 : 2;
9042 if (contents
!= NULL
9043 && elf_section_data (sec
)->this_hdr
.contents
!= contents
)
9051 if (contents
!= NULL
9052 && elf_section_data (sec
)->this_hdr
.contents
!= contents
)
9058 /* Set target relocation values needed during linking. */
9061 bfd_elf32_arm_set_target_params (struct bfd
*output_bfd
,
9062 struct bfd_link_info
*link_info
,
9063 struct elf32_arm_params
*params
)
9065 struct elf32_arm_link_hash_table
*globals
;
9067 globals
= elf32_arm_hash_table (link_info
);
9068 if (globals
== NULL
)
9071 globals
->target1_is_rel
= params
->target1_is_rel
;
9072 if (globals
->fdpic_p
)
9073 globals
->target2_reloc
= R_ARM_GOT32
;
9074 else if (strcmp (params
->target2_type
, "rel") == 0)
9075 globals
->target2_reloc
= R_ARM_REL32
;
9076 else if (strcmp (params
->target2_type
, "abs") == 0)
9077 globals
->target2_reloc
= R_ARM_ABS32
;
9078 else if (strcmp (params
->target2_type
, "got-rel") == 0)
9079 globals
->target2_reloc
= R_ARM_GOT_PREL
;
9082 _bfd_error_handler (_("invalid TARGET2 relocation type '%s'"),
9083 params
->target2_type
);
9085 globals
->fix_v4bx
= params
->fix_v4bx
;
9086 globals
->use_blx
|= params
->use_blx
;
9087 globals
->vfp11_fix
= params
->vfp11_denorm_fix
;
9088 globals
->stm32l4xx_fix
= params
->stm32l4xx_fix
;
9089 if (globals
->fdpic_p
)
9090 globals
->pic_veneer
= 1;
9092 globals
->pic_veneer
= params
->pic_veneer
;
9093 globals
->fix_cortex_a8
= params
->fix_cortex_a8
;
9094 globals
->fix_arm1176
= params
->fix_arm1176
;
9095 globals
->cmse_implib
= params
->cmse_implib
;
9096 globals
->in_implib_bfd
= params
->in_implib_bfd
;
9098 BFD_ASSERT (is_arm_elf (output_bfd
));
9099 elf_arm_tdata (output_bfd
)->no_enum_size_warning
9100 = params
->no_enum_size_warning
;
9101 elf_arm_tdata (output_bfd
)->no_wchar_size_warning
9102 = params
->no_wchar_size_warning
;
9105 /* Replace the target offset of a Thumb bl or b.w instruction. */
9108 insert_thumb_branch (bfd
*abfd
, long int offset
, bfd_byte
*insn
)
9114 BFD_ASSERT ((offset
& 1) == 0);
9116 upper
= bfd_get_16 (abfd
, insn
);
9117 lower
= bfd_get_16 (abfd
, insn
+ 2);
9118 reloc_sign
= (offset
< 0) ? 1 : 0;
9119 upper
= (upper
& ~(bfd_vma
) 0x7ff)
9120 | ((offset
>> 12) & 0x3ff)
9121 | (reloc_sign
<< 10);
9122 lower
= (lower
& ~(bfd_vma
) 0x2fff)
9123 | (((!((offset
>> 23) & 1)) ^ reloc_sign
) << 13)
9124 | (((!((offset
>> 22) & 1)) ^ reloc_sign
) << 11)
9125 | ((offset
>> 1) & 0x7ff);
9126 bfd_put_16 (abfd
, upper
, insn
);
9127 bfd_put_16 (abfd
, lower
, insn
+ 2);
9130 /* Thumb code calling an ARM function. */
9133 elf32_thumb_to_arm_stub (struct bfd_link_info
* info
,
9137 asection
* input_section
,
9138 bfd_byte
* hit_data
,
9141 bfd_signed_vma addend
,
9143 char **error_message
)
9147 long int ret_offset
;
9148 struct elf_link_hash_entry
* myh
;
9149 struct elf32_arm_link_hash_table
* globals
;
9151 myh
= find_thumb_glue (info
, name
, error_message
);
9155 globals
= elf32_arm_hash_table (info
);
9156 BFD_ASSERT (globals
!= NULL
);
9157 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
9159 my_offset
= myh
->root
.u
.def
.value
;
9161 s
= bfd_get_linker_section (globals
->bfd_of_glue_owner
,
9162 THUMB2ARM_GLUE_SECTION_NAME
);
9164 BFD_ASSERT (s
!= NULL
);
9165 BFD_ASSERT (s
->contents
!= NULL
);
9166 BFD_ASSERT (s
->output_section
!= NULL
);
9168 if ((my_offset
& 0x01) == 0x01)
9171 && sym_sec
->owner
!= NULL
9172 && !INTERWORK_FLAG (sym_sec
->owner
))
9175 (_("%pB(%s): warning: interworking not enabled;"
9176 " first occurrence: %pB: %s call to %s"),
9177 sym_sec
->owner
, name
, input_bfd
, "Thumb", "ARM");
9183 myh
->root
.u
.def
.value
= my_offset
;
9185 put_thumb_insn (globals
, output_bfd
, (bfd_vma
) t2a1_bx_pc_insn
,
9186 s
->contents
+ my_offset
);
9188 put_thumb_insn (globals
, output_bfd
, (bfd_vma
) t2a2_noop_insn
,
9189 s
->contents
+ my_offset
+ 2);
9192 /* Address of destination of the stub. */
9193 ((bfd_signed_vma
) val
)
9195 /* Offset from the start of the current section
9196 to the start of the stubs. */
9198 /* Offset of the start of this stub from the start of the stubs. */
9200 /* Address of the start of the current section. */
9201 + s
->output_section
->vma
)
9202 /* The branch instruction is 4 bytes into the stub. */
9204 /* ARM branches work from the pc of the instruction + 8. */
9207 put_arm_insn (globals
, output_bfd
,
9208 (bfd_vma
) t2a3_b_insn
| ((ret_offset
>> 2) & 0x00FFFFFF),
9209 s
->contents
+ my_offset
+ 4);
9212 BFD_ASSERT (my_offset
<= globals
->thumb_glue_size
);
9214 /* Now go back and fix up the original BL insn to point to here. */
9216 /* Address of where the stub is located. */
9217 (s
->output_section
->vma
+ s
->output_offset
+ my_offset
)
9218 /* Address of where the BL is located. */
9219 - (input_section
->output_section
->vma
+ input_section
->output_offset
9221 /* Addend in the relocation. */
9223 /* Biassing for PC-relative addressing. */
9226 insert_thumb_branch (input_bfd
, ret_offset
, hit_data
- input_section
->vma
);
9231 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
9233 static struct elf_link_hash_entry
*
9234 elf32_arm_create_thumb_stub (struct bfd_link_info
* info
,
9241 char ** error_message
)
9244 long int ret_offset
;
9245 struct elf_link_hash_entry
* myh
;
9246 struct elf32_arm_link_hash_table
* globals
;
9248 myh
= find_arm_glue (info
, name
, error_message
);
9252 globals
= elf32_arm_hash_table (info
);
9253 BFD_ASSERT (globals
!= NULL
);
9254 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
9256 my_offset
= myh
->root
.u
.def
.value
;
9258 if ((my_offset
& 0x01) == 0x01)
9261 && sym_sec
->owner
!= NULL
9262 && !INTERWORK_FLAG (sym_sec
->owner
))
9265 (_("%pB(%s): warning: interworking not enabled;"
9266 " first occurrence: %pB: %s call to %s"),
9267 sym_sec
->owner
, name
, input_bfd
, "ARM", "Thumb");
9271 myh
->root
.u
.def
.value
= my_offset
;
9273 if (bfd_link_pic (info
)
9274 || globals
->root
.is_relocatable_executable
9275 || globals
->pic_veneer
)
9277 /* For relocatable objects we can't use absolute addresses,
9278 so construct the address from a relative offset. */
9279 /* TODO: If the offset is small it's probably worth
9280 constructing the address with adds. */
9281 put_arm_insn (globals
, output_bfd
, (bfd_vma
) a2t1p_ldr_insn
,
9282 s
->contents
+ my_offset
);
9283 put_arm_insn (globals
, output_bfd
, (bfd_vma
) a2t2p_add_pc_insn
,
9284 s
->contents
+ my_offset
+ 4);
9285 put_arm_insn (globals
, output_bfd
, (bfd_vma
) a2t3p_bx_r12_insn
,
9286 s
->contents
+ my_offset
+ 8);
9287 /* Adjust the offset by 4 for the position of the add,
9288 and 8 for the pipeline offset. */
9289 ret_offset
= (val
- (s
->output_offset
9290 + s
->output_section
->vma
9293 bfd_put_32 (output_bfd
, ret_offset
,
9294 s
->contents
+ my_offset
+ 12);
9296 else if (globals
->use_blx
)
9298 put_arm_insn (globals
, output_bfd
, (bfd_vma
) a2t1v5_ldr_insn
,
9299 s
->contents
+ my_offset
);
9301 /* It's a thumb address. Add the low order bit. */
9302 bfd_put_32 (output_bfd
, val
| a2t2v5_func_addr_insn
,
9303 s
->contents
+ my_offset
+ 4);
9307 put_arm_insn (globals
, output_bfd
, (bfd_vma
) a2t1_ldr_insn
,
9308 s
->contents
+ my_offset
);
9310 put_arm_insn (globals
, output_bfd
, (bfd_vma
) a2t2_bx_r12_insn
,
9311 s
->contents
+ my_offset
+ 4);
9313 /* It's a thumb address. Add the low order bit. */
9314 bfd_put_32 (output_bfd
, val
| a2t3_func_addr_insn
,
9315 s
->contents
+ my_offset
+ 8);
9321 BFD_ASSERT (my_offset
<= globals
->arm_glue_size
);
9326 /* Arm code calling a Thumb function. */
9329 elf32_arm_to_thumb_stub (struct bfd_link_info
* info
,
9333 asection
* input_section
,
9334 bfd_byte
* hit_data
,
9337 bfd_signed_vma addend
,
9339 char **error_message
)
9341 unsigned long int tmp
;
9344 long int ret_offset
;
9345 struct elf_link_hash_entry
* myh
;
9346 struct elf32_arm_link_hash_table
* globals
;
9348 globals
= elf32_arm_hash_table (info
);
9349 BFD_ASSERT (globals
!= NULL
);
9350 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
9352 s
= bfd_get_linker_section (globals
->bfd_of_glue_owner
,
9353 ARM2THUMB_GLUE_SECTION_NAME
);
9354 BFD_ASSERT (s
!= NULL
);
9355 BFD_ASSERT (s
->contents
!= NULL
);
9356 BFD_ASSERT (s
->output_section
!= NULL
);
9358 myh
= elf32_arm_create_thumb_stub (info
, name
, input_bfd
, output_bfd
,
9359 sym_sec
, val
, s
, error_message
);
9363 my_offset
= myh
->root
.u
.def
.value
;
9364 tmp
= bfd_get_32 (input_bfd
, hit_data
);
9365 tmp
= tmp
& 0xFF000000;
9367 /* Somehow these are both 4 too far, so subtract 8. */
9368 ret_offset
= (s
->output_offset
9370 + s
->output_section
->vma
9371 - (input_section
->output_offset
9372 + input_section
->output_section
->vma
9376 tmp
= tmp
| ((ret_offset
>> 2) & 0x00FFFFFF);
9378 bfd_put_32 (output_bfd
, (bfd_vma
) tmp
, hit_data
- input_section
->vma
);
9383 /* Populate Arm stub for an exported Thumb function. */
9386 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry
*h
, void * inf
)
9388 struct bfd_link_info
* info
= (struct bfd_link_info
*) inf
;
9390 struct elf_link_hash_entry
* myh
;
9391 struct elf32_arm_link_hash_entry
*eh
;
9392 struct elf32_arm_link_hash_table
* globals
;
9395 char *error_message
;
9397 eh
= elf32_arm_hash_entry (h
);
9398 /* Allocate stubs for exported Thumb functions on v4t. */
9399 if (eh
->export_glue
== NULL
)
9402 globals
= elf32_arm_hash_table (info
);
9403 BFD_ASSERT (globals
!= NULL
);
9404 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
9406 s
= bfd_get_linker_section (globals
->bfd_of_glue_owner
,
9407 ARM2THUMB_GLUE_SECTION_NAME
);
9408 BFD_ASSERT (s
!= NULL
);
9409 BFD_ASSERT (s
->contents
!= NULL
);
9410 BFD_ASSERT (s
->output_section
!= NULL
);
9412 sec
= eh
->export_glue
->root
.u
.def
.section
;
9414 BFD_ASSERT (sec
->output_section
!= NULL
);
9416 val
= eh
->export_glue
->root
.u
.def
.value
+ sec
->output_offset
9417 + sec
->output_section
->vma
;
9419 myh
= elf32_arm_create_thumb_stub (info
, h
->root
.root
.string
,
9420 h
->root
.u
.def
.section
->owner
,
9421 globals
->obfd
, sec
, val
, s
,
9427 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
9430 elf32_arm_bx_glue (struct bfd_link_info
* info
, int reg
)
9435 struct elf32_arm_link_hash_table
*globals
;
9437 globals
= elf32_arm_hash_table (info
);
9438 BFD_ASSERT (globals
!= NULL
);
9439 BFD_ASSERT (globals
->bfd_of_glue_owner
!= NULL
);
9441 s
= bfd_get_linker_section (globals
->bfd_of_glue_owner
,
9442 ARM_BX_GLUE_SECTION_NAME
);
9443 BFD_ASSERT (s
!= NULL
);
9444 BFD_ASSERT (s
->contents
!= NULL
);
9445 BFD_ASSERT (s
->output_section
!= NULL
);
9447 BFD_ASSERT (globals
->bx_glue_offset
[reg
] & 2);
9449 glue_addr
= globals
->bx_glue_offset
[reg
] & ~(bfd_vma
)3;
9451 if ((globals
->bx_glue_offset
[reg
] & 1) == 0)
9453 p
= s
->contents
+ glue_addr
;
9454 bfd_put_32 (globals
->obfd
, armbx1_tst_insn
+ (reg
<< 16), p
);
9455 bfd_put_32 (globals
->obfd
, armbx2_moveq_insn
+ reg
, p
+ 4);
9456 bfd_put_32 (globals
->obfd
, armbx3_bx_insn
+ reg
, p
+ 8);
9457 globals
->bx_glue_offset
[reg
] |= 1;
9460 return glue_addr
+ s
->output_section
->vma
+ s
->output_offset
;
9463 /* Generate Arm stubs for exported Thumb symbols. */
9465 elf32_arm_begin_write_processing (bfd
*abfd ATTRIBUTE_UNUSED
,
9466 struct bfd_link_info
*link_info
)
9468 struct elf32_arm_link_hash_table
* globals
;
9470 if (link_info
== NULL
)
9471 /* Ignore this if we are not called by the ELF backend linker. */
9474 globals
= elf32_arm_hash_table (link_info
);
9475 if (globals
== NULL
)
9478 /* If blx is available then exported Thumb symbols are OK and there is
9480 if (globals
->use_blx
)
9483 elf_link_hash_traverse (&globals
->root
, elf32_arm_to_thumb_export_stub
,
9487 /* Reserve space for COUNT dynamic relocations in relocation selection
9491 elf32_arm_allocate_dynrelocs (struct bfd_link_info
*info
, asection
*sreloc
,
9492 bfd_size_type count
)
9494 struct elf32_arm_link_hash_table
*htab
;
9496 htab
= elf32_arm_hash_table (info
);
9497 BFD_ASSERT (htab
->root
.dynamic_sections_created
);
9500 sreloc
->size
+= RELOC_SIZE (htab
) * count
;
9503 /* Reserve space for COUNT R_ARM_IRELATIVE relocations. If the link is
9504 dynamic, the relocations should go in SRELOC, otherwise they should
9505 go in the special .rel.iplt section. */
9508 elf32_arm_allocate_irelocs (struct bfd_link_info
*info
, asection
*sreloc
,
9509 bfd_size_type count
)
9511 struct elf32_arm_link_hash_table
*htab
;
9513 htab
= elf32_arm_hash_table (info
);
9514 if (!htab
->root
.dynamic_sections_created
)
9515 htab
->root
.irelplt
->size
+= RELOC_SIZE (htab
) * count
;
9518 BFD_ASSERT (sreloc
!= NULL
);
9519 sreloc
->size
+= RELOC_SIZE (htab
) * count
;
9523 /* Add relocation REL to the end of relocation section SRELOC. */
9526 elf32_arm_add_dynreloc (bfd
*output_bfd
, struct bfd_link_info
*info
,
9527 asection
*sreloc
, Elf_Internal_Rela
*rel
)
9530 struct elf32_arm_link_hash_table
*htab
;
9532 htab
= elf32_arm_hash_table (info
);
9533 if (!htab
->root
.dynamic_sections_created
9534 && ELF32_R_TYPE (rel
->r_info
) == R_ARM_IRELATIVE
)
9535 sreloc
= htab
->root
.irelplt
;
9538 loc
= sreloc
->contents
;
9539 loc
+= sreloc
->reloc_count
++ * RELOC_SIZE (htab
);
9540 if (sreloc
->reloc_count
* RELOC_SIZE (htab
) > sreloc
->size
)
9542 SWAP_RELOC_OUT (htab
) (output_bfd
, rel
, loc
);
9545 /* Allocate room for a PLT entry described by ROOT_PLT and ARM_PLT.
9546 IS_IPLT_ENTRY says whether the entry belongs to .iplt rather than
9550 elf32_arm_allocate_plt_entry (struct bfd_link_info
*info
,
9551 bfd_boolean is_iplt_entry
,
9552 union gotplt_union
*root_plt
,
9553 struct arm_plt_info
*arm_plt
)
9555 struct elf32_arm_link_hash_table
*htab
;
9559 htab
= elf32_arm_hash_table (info
);
9563 splt
= htab
->root
.iplt
;
9564 sgotplt
= htab
->root
.igotplt
;
9566 /* NaCl uses a special first entry in .iplt too. */
9567 if (htab
->nacl_p
&& splt
->size
== 0)
9568 splt
->size
+= htab
->plt_header_size
;
9570 /* Allocate room for an R_ARM_IRELATIVE relocation in .rel.iplt. */
9571 elf32_arm_allocate_irelocs (info
, htab
->root
.irelplt
, 1);
9575 splt
= htab
->root
.splt
;
9576 sgotplt
= htab
->root
.sgotplt
;
9580 /* Allocate room for R_ARM_FUNCDESC_VALUE. */
9581 /* For lazy binding, relocations will be put into .rel.plt, in
9582 .rel.got otherwise. */
9583 /* FIXME: today we don't support lazy binding so put it in .rel.got */
9584 if (info
->flags
& DF_BIND_NOW
)
9585 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
9587 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelplt
, 1);
9591 /* Allocate room for an R_JUMP_SLOT relocation in .rel.plt. */
9592 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelplt
, 1);
9595 /* If this is the first .plt entry, make room for the special
9597 if (splt
->size
== 0)
9598 splt
->size
+= htab
->plt_header_size
;
9600 htab
->next_tls_desc_index
++;
9603 /* Allocate the PLT entry itself, including any leading Thumb stub. */
9604 if (elf32_arm_plt_needs_thumb_stub_p (info
, arm_plt
))
9605 splt
->size
+= PLT_THUMB_STUB_SIZE
;
9606 root_plt
->offset
= splt
->size
;
9607 splt
->size
+= htab
->plt_entry_size
;
9609 if (!htab
->symbian_p
)
9611 /* We also need to make an entry in the .got.plt section, which
9612 will be placed in the .got section by the linker script. */
9614 arm_plt
->got_offset
= sgotplt
->size
;
9616 arm_plt
->got_offset
= sgotplt
->size
- 8 * htab
->num_tls_desc
;
9618 /* Function descriptor takes 64 bits in GOT. */
9626 arm_movw_immediate (bfd_vma value
)
9628 return (value
& 0x00000fff) | ((value
& 0x0000f000) << 4);
9632 arm_movt_immediate (bfd_vma value
)
9634 return ((value
& 0x0fff0000) >> 16) | ((value
& 0xf0000000) >> 12);
9637 /* Fill in a PLT entry and its associated GOT slot. If DYNINDX == -1,
9638 the entry lives in .iplt and resolves to (*SYM_VALUE)().
9639 Otherwise, DYNINDX is the index of the symbol in the dynamic
9640 symbol table and SYM_VALUE is undefined.
9642 ROOT_PLT points to the offset of the PLT entry from the start of its
9643 section (.iplt or .plt). ARM_PLT points to the symbol's ARM-specific
9644 bookkeeping information.
9646 Returns FALSE if there was a problem. */
9649 elf32_arm_populate_plt_entry (bfd
*output_bfd
, struct bfd_link_info
*info
,
9650 union gotplt_union
*root_plt
,
9651 struct arm_plt_info
*arm_plt
,
9652 int dynindx
, bfd_vma sym_value
)
9654 struct elf32_arm_link_hash_table
*htab
;
9660 Elf_Internal_Rela rel
;
9661 bfd_vma plt_header_size
;
9662 bfd_vma got_header_size
;
9664 htab
= elf32_arm_hash_table (info
);
9666 /* Pick the appropriate sections and sizes. */
9669 splt
= htab
->root
.iplt
;
9670 sgot
= htab
->root
.igotplt
;
9671 srel
= htab
->root
.irelplt
;
9673 /* There are no reserved entries in .igot.plt, and no special
9674 first entry in .iplt. */
9675 got_header_size
= 0;
9676 plt_header_size
= 0;
9680 splt
= htab
->root
.splt
;
9681 sgot
= htab
->root
.sgotplt
;
9682 srel
= htab
->root
.srelplt
;
9684 got_header_size
= get_elf_backend_data (output_bfd
)->got_header_size
;
9685 plt_header_size
= htab
->plt_header_size
;
9687 BFD_ASSERT (splt
!= NULL
&& srel
!= NULL
);
9689 /* Fill in the entry in the procedure linkage table. */
9690 if (htab
->symbian_p
)
9692 BFD_ASSERT (dynindx
>= 0);
9693 put_arm_insn (htab
, output_bfd
,
9694 elf32_arm_symbian_plt_entry
[0],
9695 splt
->contents
+ root_plt
->offset
);
9696 bfd_put_32 (output_bfd
,
9697 elf32_arm_symbian_plt_entry
[1],
9698 splt
->contents
+ root_plt
->offset
+ 4);
9700 /* Fill in the entry in the .rel.plt section. */
9701 rel
.r_offset
= (splt
->output_section
->vma
9702 + splt
->output_offset
9703 + root_plt
->offset
+ 4);
9704 rel
.r_info
= ELF32_R_INFO (dynindx
, R_ARM_GLOB_DAT
);
9706 /* Get the index in the procedure linkage table which
9707 corresponds to this symbol. This is the index of this symbol
9708 in all the symbols for which we are making plt entries. The
9709 first entry in the procedure linkage table is reserved. */
9710 plt_index
= ((root_plt
->offset
- plt_header_size
)
9711 / htab
->plt_entry_size
);
9715 bfd_vma got_offset
, got_address
, plt_address
;
9716 bfd_vma got_displacement
, initial_got_entry
;
9719 BFD_ASSERT (sgot
!= NULL
);
9721 /* Get the offset into the .(i)got.plt table of the entry that
9722 corresponds to this function. */
9723 got_offset
= (arm_plt
->got_offset
& -2);
9725 /* Get the index in the procedure linkage table which
9726 corresponds to this symbol. This is the index of this symbol
9727 in all the symbols for which we are making plt entries.
9728 After the reserved .got.plt entries, all symbols appear in
9729 the same order as in .plt. */
9731 /* Function descriptor takes 8 bytes. */
9732 plt_index
= (got_offset
- got_header_size
) / 8;
9734 plt_index
= (got_offset
- got_header_size
) / 4;
9736 /* Calculate the address of the GOT entry. */
9737 got_address
= (sgot
->output_section
->vma
9738 + sgot
->output_offset
9741 /* ...and the address of the PLT entry. */
9742 plt_address
= (splt
->output_section
->vma
9743 + splt
->output_offset
9744 + root_plt
->offset
);
9746 ptr
= splt
->contents
+ root_plt
->offset
;
9747 if (htab
->vxworks_p
&& bfd_link_pic (info
))
9752 for (i
= 0; i
!= htab
->plt_entry_size
/ 4; i
++, ptr
+= 4)
9754 val
= elf32_arm_vxworks_shared_plt_entry
[i
];
9756 val
|= got_address
- sgot
->output_section
->vma
;
9758 val
|= plt_index
* RELOC_SIZE (htab
);
9759 if (i
== 2 || i
== 5)
9760 bfd_put_32 (output_bfd
, val
, ptr
);
9762 put_arm_insn (htab
, output_bfd
, val
, ptr
);
9765 else if (htab
->vxworks_p
)
9770 for (i
= 0; i
!= htab
->plt_entry_size
/ 4; i
++, ptr
+= 4)
9772 val
= elf32_arm_vxworks_exec_plt_entry
[i
];
9776 val
|= 0xffffff & -((root_plt
->offset
+ i
* 4 + 8) >> 2);
9778 val
|= plt_index
* RELOC_SIZE (htab
);
9779 if (i
== 2 || i
== 5)
9780 bfd_put_32 (output_bfd
, val
, ptr
);
9782 put_arm_insn (htab
, output_bfd
, val
, ptr
);
9785 loc
= (htab
->srelplt2
->contents
9786 + (plt_index
* 2 + 1) * RELOC_SIZE (htab
));
9788 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
9789 referencing the GOT for this PLT entry. */
9790 rel
.r_offset
= plt_address
+ 8;
9791 rel
.r_info
= ELF32_R_INFO (htab
->root
.hgot
->indx
, R_ARM_ABS32
);
9792 rel
.r_addend
= got_offset
;
9793 SWAP_RELOC_OUT (htab
) (output_bfd
, &rel
, loc
);
9794 loc
+= RELOC_SIZE (htab
);
9796 /* Create the R_ARM_ABS32 relocation referencing the
9797 beginning of the PLT for this GOT entry. */
9798 rel
.r_offset
= got_address
;
9799 rel
.r_info
= ELF32_R_INFO (htab
->root
.hplt
->indx
, R_ARM_ABS32
);
9801 SWAP_RELOC_OUT (htab
) (output_bfd
, &rel
, loc
);
9803 else if (htab
->nacl_p
)
9805 /* Calculate the displacement between the PLT slot and the
9806 common tail that's part of the special initial PLT slot. */
9807 int32_t tail_displacement
9808 = ((splt
->output_section
->vma
+ splt
->output_offset
9809 + ARM_NACL_PLT_TAIL_OFFSET
)
9810 - (plt_address
+ htab
->plt_entry_size
+ 4));
9811 BFD_ASSERT ((tail_displacement
& 3) == 0);
9812 tail_displacement
>>= 2;
9814 BFD_ASSERT ((tail_displacement
& 0xff000000) == 0
9815 || (-tail_displacement
& 0xff000000) == 0);
9817 /* Calculate the displacement between the PLT slot and the entry
9818 in the GOT. The offset accounts for the value produced by
9819 adding to pc in the penultimate instruction of the PLT stub. */
9820 got_displacement
= (got_address
9821 - (plt_address
+ htab
->plt_entry_size
));
9823 /* NaCl does not support interworking at all. */
9824 BFD_ASSERT (!elf32_arm_plt_needs_thumb_stub_p (info
, arm_plt
));
9826 put_arm_insn (htab
, output_bfd
,
9827 elf32_arm_nacl_plt_entry
[0]
9828 | arm_movw_immediate (got_displacement
),
9830 put_arm_insn (htab
, output_bfd
,
9831 elf32_arm_nacl_plt_entry
[1]
9832 | arm_movt_immediate (got_displacement
),
9834 put_arm_insn (htab
, output_bfd
,
9835 elf32_arm_nacl_plt_entry
[2],
9837 put_arm_insn (htab
, output_bfd
,
9838 elf32_arm_nacl_plt_entry
[3]
9839 | (tail_displacement
& 0x00ffffff),
9842 else if (htab
->fdpic_p
)
9844 const bfd_vma
*plt_entry
= using_thumb_only(htab
)
9845 ? elf32_arm_fdpic_thumb_plt_entry
9846 : elf32_arm_fdpic_plt_entry
;
9848 /* Fill-up Thumb stub if needed. */
9849 if (elf32_arm_plt_needs_thumb_stub_p (info
, arm_plt
))
9851 put_thumb_insn (htab
, output_bfd
,
9852 elf32_arm_plt_thumb_stub
[0], ptr
- 4);
9853 put_thumb_insn (htab
, output_bfd
,
9854 elf32_arm_plt_thumb_stub
[1], ptr
- 2);
9856 /* As we are using 32 bit instructions even for the Thumb
9857 version, we have to use 'put_arm_insn' instead of
9858 'put_thumb_insn'. */
9859 put_arm_insn(htab
, output_bfd
, plt_entry
[0], ptr
+ 0);
9860 put_arm_insn(htab
, output_bfd
, plt_entry
[1], ptr
+ 4);
9861 put_arm_insn(htab
, output_bfd
, plt_entry
[2], ptr
+ 8);
9862 put_arm_insn(htab
, output_bfd
, plt_entry
[3], ptr
+ 12);
9863 bfd_put_32 (output_bfd
, got_offset
, ptr
+ 16);
9865 if (!(info
->flags
& DF_BIND_NOW
))
9867 /* funcdesc_value_reloc_offset. */
9868 bfd_put_32 (output_bfd
,
9869 htab
->root
.srelplt
->reloc_count
* RELOC_SIZE (htab
),
9871 put_arm_insn(htab
, output_bfd
, plt_entry
[6], ptr
+ 24);
9872 put_arm_insn(htab
, output_bfd
, plt_entry
[7], ptr
+ 28);
9873 put_arm_insn(htab
, output_bfd
, plt_entry
[8], ptr
+ 32);
9874 put_arm_insn(htab
, output_bfd
, plt_entry
[9], ptr
+ 36);
9877 else if (using_thumb_only (htab
))
9879 /* PR ld/16017: Generate thumb only PLT entries. */
9880 if (!using_thumb2 (htab
))
9882 /* FIXME: We ought to be able to generate thumb-1 PLT
9884 _bfd_error_handler (_("%pB: warning: thumb-1 mode PLT generation not currently supported"),
9889 /* Calculate the displacement between the PLT slot and the entry in
9890 the GOT. The 12-byte offset accounts for the value produced by
9891 adding to pc in the 3rd instruction of the PLT stub. */
9892 got_displacement
= got_address
- (plt_address
+ 12);
9894 /* As we are using 32 bit instructions we have to use 'put_arm_insn'
9895 instead of 'put_thumb_insn'. */
9896 put_arm_insn (htab
, output_bfd
,
9897 elf32_thumb2_plt_entry
[0]
9898 | ((got_displacement
& 0x000000ff) << 16)
9899 | ((got_displacement
& 0x00000700) << 20)
9900 | ((got_displacement
& 0x00000800) >> 1)
9901 | ((got_displacement
& 0x0000f000) >> 12),
9903 put_arm_insn (htab
, output_bfd
,
9904 elf32_thumb2_plt_entry
[1]
9905 | ((got_displacement
& 0x00ff0000) )
9906 | ((got_displacement
& 0x07000000) << 4)
9907 | ((got_displacement
& 0x08000000) >> 17)
9908 | ((got_displacement
& 0xf0000000) >> 28),
9910 put_arm_insn (htab
, output_bfd
,
9911 elf32_thumb2_plt_entry
[2],
9913 put_arm_insn (htab
, output_bfd
,
9914 elf32_thumb2_plt_entry
[3],
9919 /* Calculate the displacement between the PLT slot and the
9920 entry in the GOT. The eight-byte offset accounts for the
9921 value produced by adding to pc in the first instruction
9923 got_displacement
= got_address
- (plt_address
+ 8);
9925 if (elf32_arm_plt_needs_thumb_stub_p (info
, arm_plt
))
9927 put_thumb_insn (htab
, output_bfd
,
9928 elf32_arm_plt_thumb_stub
[0], ptr
- 4);
9929 put_thumb_insn (htab
, output_bfd
,
9930 elf32_arm_plt_thumb_stub
[1], ptr
- 2);
9933 if (!elf32_arm_use_long_plt_entry
)
9935 BFD_ASSERT ((got_displacement
& 0xf0000000) == 0);
9937 put_arm_insn (htab
, output_bfd
,
9938 elf32_arm_plt_entry_short
[0]
9939 | ((got_displacement
& 0x0ff00000) >> 20),
9941 put_arm_insn (htab
, output_bfd
,
9942 elf32_arm_plt_entry_short
[1]
9943 | ((got_displacement
& 0x000ff000) >> 12),
9945 put_arm_insn (htab
, output_bfd
,
9946 elf32_arm_plt_entry_short
[2]
9947 | (got_displacement
& 0x00000fff),
9949 #ifdef FOUR_WORD_PLT
9950 bfd_put_32 (output_bfd
, elf32_arm_plt_entry_short
[3], ptr
+ 12);
9955 put_arm_insn (htab
, output_bfd
,
9956 elf32_arm_plt_entry_long
[0]
9957 | ((got_displacement
& 0xf0000000) >> 28),
9959 put_arm_insn (htab
, output_bfd
,
9960 elf32_arm_plt_entry_long
[1]
9961 | ((got_displacement
& 0x0ff00000) >> 20),
9963 put_arm_insn (htab
, output_bfd
,
9964 elf32_arm_plt_entry_long
[2]
9965 | ((got_displacement
& 0x000ff000) >> 12),
9967 put_arm_insn (htab
, output_bfd
,
9968 elf32_arm_plt_entry_long
[3]
9969 | (got_displacement
& 0x00000fff),
9974 /* Fill in the entry in the .rel(a).(i)plt section. */
9975 rel
.r_offset
= got_address
;
9979 /* .igot.plt entries use IRELATIVE relocations against SYM_VALUE.
9980 The dynamic linker or static executable then calls SYM_VALUE
9981 to determine the correct run-time value of the .igot.plt entry. */
9982 rel
.r_info
= ELF32_R_INFO (0, R_ARM_IRELATIVE
);
9983 initial_got_entry
= sym_value
;
9987 /* For FDPIC we will have to resolve a R_ARM_FUNCDESC_VALUE
9988 used by PLT entry. */
9991 rel
.r_info
= ELF32_R_INFO (dynindx
, R_ARM_FUNCDESC_VALUE
);
9992 initial_got_entry
= 0;
9996 rel
.r_info
= ELF32_R_INFO (dynindx
, R_ARM_JUMP_SLOT
);
9997 initial_got_entry
= (splt
->output_section
->vma
9998 + splt
->output_offset
);
10002 /* Fill in the entry in the global offset table. */
10003 bfd_put_32 (output_bfd
, initial_got_entry
,
10004 sgot
->contents
+ got_offset
);
10006 if (htab
->fdpic_p
&& !(info
->flags
& DF_BIND_NOW
))
10008 /* Setup initial funcdesc value. */
10009 /* FIXME: we don't support lazy binding because there is a
10010 race condition between both words getting written and
10011 some other thread attempting to read them. The ARM
10012 architecture does not have an atomic 64 bit load/store
10013 instruction that could be used to prevent it; it is
10014 recommended that threaded FDPIC applications run with the
10015 LD_BIND_NOW environment variable set. */
10016 bfd_put_32(output_bfd
, plt_address
+ 0x18,
10017 sgot
->contents
+ got_offset
);
10018 bfd_put_32(output_bfd
, -1 /*TODO*/,
10019 sgot
->contents
+ got_offset
+ 4);
10024 elf32_arm_add_dynreloc (output_bfd
, info
, srel
, &rel
);
10029 /* For FDPIC we put PLT relocationss into .rel.got when not
10030 lazy binding otherwise we put them in .rel.plt. For now,
10031 we don't support lazy binding so put it in .rel.got. */
10032 if (info
->flags
& DF_BIND_NOW
)
10033 elf32_arm_add_dynreloc(output_bfd
, info
, htab
->root
.srelgot
, &rel
);
10035 elf32_arm_add_dynreloc(output_bfd
, info
, htab
->root
.srelplt
, &rel
);
10039 loc
= srel
->contents
+ plt_index
* RELOC_SIZE (htab
);
10040 SWAP_RELOC_OUT (htab
) (output_bfd
, &rel
, loc
);
10047 /* Some relocations map to different relocations depending on the
10048 target. Return the real relocation. */
10051 arm_real_reloc_type (struct elf32_arm_link_hash_table
* globals
,
10056 case R_ARM_TARGET1
:
10057 if (globals
->target1_is_rel
)
10058 return R_ARM_REL32
;
10060 return R_ARM_ABS32
;
10062 case R_ARM_TARGET2
:
10063 return globals
->target2_reloc
;
10070 /* Return the base VMA address which should be subtracted from real addresses
10071 when resolving @dtpoff relocation.
10072 This is PT_TLS segment p_vaddr. */
10075 dtpoff_base (struct bfd_link_info
*info
)
10077 /* If tls_sec is NULL, we should have signalled an error already. */
10078 if (elf_hash_table (info
)->tls_sec
== NULL
)
10080 return elf_hash_table (info
)->tls_sec
->vma
;
10083 /* Return the relocation value for @tpoff relocation
10084 if STT_TLS virtual address is ADDRESS. */
10087 tpoff (struct bfd_link_info
*info
, bfd_vma address
)
10089 struct elf_link_hash_table
*htab
= elf_hash_table (info
);
10092 /* If tls_sec is NULL, we should have signalled an error already. */
10093 if (htab
->tls_sec
== NULL
)
10095 base
= align_power ((bfd_vma
) TCB_SIZE
, htab
->tls_sec
->alignment_power
);
10096 return address
- htab
->tls_sec
->vma
+ base
;
10099 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
10100 VALUE is the relocation value. */
10102 static bfd_reloc_status_type
10103 elf32_arm_abs12_reloc (bfd
*abfd
, void *data
, bfd_vma value
)
10106 return bfd_reloc_overflow
;
10108 value
|= bfd_get_32 (abfd
, data
) & 0xfffff000;
10109 bfd_put_32 (abfd
, value
, data
);
10110 return bfd_reloc_ok
;
10113 /* Handle TLS relaxations. Relaxing is possible for symbols that use
10114 R_ARM_GOTDESC, R_ARM_{,THM_}TLS_CALL or
10115 R_ARM_{,THM_}TLS_DESCSEQ relocations, during a static link.
10117 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
10118 is to then call final_link_relocate. Return other values in the
10121 FIXME:When --emit-relocs is in effect, we'll emit relocs describing
10122 the pre-relaxed code. It would be nice if the relocs were updated
10123 to match the optimization. */
10125 static bfd_reloc_status_type
10126 elf32_arm_tls_relax (struct elf32_arm_link_hash_table
*globals
,
10127 bfd
*input_bfd
, asection
*input_sec
, bfd_byte
*contents
,
10128 Elf_Internal_Rela
*rel
, unsigned long is_local
)
10130 unsigned long insn
;
10132 switch (ELF32_R_TYPE (rel
->r_info
))
10135 return bfd_reloc_notsupported
;
10137 case R_ARM_TLS_GOTDESC
:
10142 insn
= bfd_get_32 (input_bfd
, contents
+ rel
->r_offset
);
10144 insn
-= 5; /* THUMB */
10146 insn
-= 8; /* ARM */
10148 bfd_put_32 (input_bfd
, insn
, contents
+ rel
->r_offset
);
10149 return bfd_reloc_continue
;
10151 case R_ARM_THM_TLS_DESCSEQ
:
10153 insn
= bfd_get_16 (input_bfd
, contents
+ rel
->r_offset
);
10154 if ((insn
& 0xff78) == 0x4478) /* add rx, pc */
10158 bfd_put_16 (input_bfd
, 0x46c0, contents
+ rel
->r_offset
);
10160 else if ((insn
& 0xffc0) == 0x6840) /* ldr rx,[ry,#4] */
10164 bfd_put_16 (input_bfd
, 0x46c0, contents
+ rel
->r_offset
);
10167 bfd_put_16 (input_bfd
, insn
& 0xf83f, contents
+ rel
->r_offset
);
10169 else if ((insn
& 0xff87) == 0x4780) /* blx rx */
10173 bfd_put_16 (input_bfd
, 0x46c0, contents
+ rel
->r_offset
);
10176 bfd_put_16 (input_bfd
, 0x4600 | (insn
& 0x78),
10177 contents
+ rel
->r_offset
);
10181 if ((insn
& 0xf000) == 0xf000 || (insn
& 0xf800) == 0xe800)
10182 /* It's a 32 bit instruction, fetch the rest of it for
10183 error generation. */
10184 insn
= (insn
<< 16)
10185 | bfd_get_16 (input_bfd
, contents
+ rel
->r_offset
+ 2);
10187 /* xgettext:c-format */
10188 (_("%pB(%pA+%#" PRIx64
"): "
10189 "unexpected %s instruction '%#lx' in TLS trampoline"),
10190 input_bfd
, input_sec
, (uint64_t) rel
->r_offset
,
10192 return bfd_reloc_notsupported
;
10196 case R_ARM_TLS_DESCSEQ
:
10198 insn
= bfd_get_32 (input_bfd
, contents
+ rel
->r_offset
);
10199 if ((insn
& 0xffff0ff0) == 0xe08f0000) /* add rx,pc,ry */
10203 bfd_put_32 (input_bfd
, 0xe1a00000 | (insn
& 0xffff),
10204 contents
+ rel
->r_offset
);
10206 else if ((insn
& 0xfff00fff) == 0xe5900004) /* ldr rx,[ry,#4]*/
10210 bfd_put_32 (input_bfd
, 0xe1a00000, contents
+ rel
->r_offset
);
10213 bfd_put_32 (input_bfd
, insn
& 0xfffff000,
10214 contents
+ rel
->r_offset
);
10216 else if ((insn
& 0xfffffff0) == 0xe12fff30) /* blx rx */
10220 bfd_put_32 (input_bfd
, 0xe1a00000, contents
+ rel
->r_offset
);
10223 bfd_put_32 (input_bfd
, 0xe1a00000 | (insn
& 0xf),
10224 contents
+ rel
->r_offset
);
10229 /* xgettext:c-format */
10230 (_("%pB(%pA+%#" PRIx64
"): "
10231 "unexpected %s instruction '%#lx' in TLS trampoline"),
10232 input_bfd
, input_sec
, (uint64_t) rel
->r_offset
,
10234 return bfd_reloc_notsupported
;
10238 case R_ARM_TLS_CALL
:
10239 /* GD->IE relaxation, turn the instruction into 'nop' or
10240 'ldr r0, [pc,r0]' */
10241 insn
= is_local
? 0xe1a00000 : 0xe79f0000;
10242 bfd_put_32 (input_bfd
, insn
, contents
+ rel
->r_offset
);
10245 case R_ARM_THM_TLS_CALL
:
10246 /* GD->IE relaxation. */
10248 /* add r0,pc; ldr r0, [r0] */
10250 else if (using_thumb2 (globals
))
10257 bfd_put_16 (input_bfd
, insn
>> 16, contents
+ rel
->r_offset
);
10258 bfd_put_16 (input_bfd
, insn
& 0xffff, contents
+ rel
->r_offset
+ 2);
10261 return bfd_reloc_ok
;
10264 /* For a given value of n, calculate the value of G_n as required to
10265 deal with group relocations. We return it in the form of an
10266 encoded constant-and-rotation, together with the final residual. If n is
10267 specified as less than zero, then final_residual is filled with the
10268 input value and no further action is performed. */
10271 calculate_group_reloc_mask (bfd_vma value
, int n
, bfd_vma
*final_residual
)
10275 bfd_vma encoded_g_n
= 0;
10276 bfd_vma residual
= value
; /* Also known as Y_n. */
10278 for (current_n
= 0; current_n
<= n
; current_n
++)
10282 /* Calculate which part of the value to mask. */
10289 /* Determine the most significant bit in the residual and
10290 align the resulting value to a 2-bit boundary. */
10291 for (msb
= 30; msb
>= 0; msb
-= 2)
10292 if (residual
& (3 << msb
))
10295 /* The desired shift is now (msb - 6), or zero, whichever
10302 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
10303 g_n
= residual
& (0xff << shift
);
10304 encoded_g_n
= (g_n
>> shift
)
10305 | ((g_n
<= 0xff ? 0 : (32 - shift
) / 2) << 8);
10307 /* Calculate the residual for the next time around. */
10311 *final_residual
= residual
;
10313 return encoded_g_n
;
10316 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
10317 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
10320 identify_add_or_sub (bfd_vma insn
)
10322 int opcode
= insn
& 0x1e00000;
10324 if (opcode
== 1 << 23) /* ADD */
10327 if (opcode
== 1 << 22) /* SUB */
10333 /* Perform a relocation as part of a final link. */
10335 static bfd_reloc_status_type
10336 elf32_arm_final_link_relocate (reloc_howto_type
* howto
,
10339 asection
* input_section
,
10340 bfd_byte
* contents
,
10341 Elf_Internal_Rela
* rel
,
10343 struct bfd_link_info
* info
,
10344 asection
* sym_sec
,
10345 const char * sym_name
,
10346 unsigned char st_type
,
10347 enum arm_st_branch_type branch_type
,
10348 struct elf_link_hash_entry
* h
,
10349 bfd_boolean
* unresolved_reloc_p
,
10350 char ** error_message
)
10352 unsigned long r_type
= howto
->type
;
10353 unsigned long r_symndx
;
10354 bfd_byte
* hit_data
= contents
+ rel
->r_offset
;
10355 bfd_vma
* local_got_offsets
;
10356 bfd_vma
* local_tlsdesc_gotents
;
10359 asection
* sreloc
= NULL
;
10360 asection
* srelgot
;
10362 bfd_signed_vma signed_addend
;
10363 unsigned char dynreloc_st_type
;
10364 bfd_vma dynreloc_value
;
10365 struct elf32_arm_link_hash_table
* globals
;
10366 struct elf32_arm_link_hash_entry
*eh
;
10367 union gotplt_union
*root_plt
;
10368 struct arm_plt_info
*arm_plt
;
10369 bfd_vma plt_offset
;
10370 bfd_vma gotplt_offset
;
10371 bfd_boolean has_iplt_entry
;
10372 bfd_boolean resolved_to_zero
;
10374 globals
= elf32_arm_hash_table (info
);
10375 if (globals
== NULL
)
10376 return bfd_reloc_notsupported
;
10378 BFD_ASSERT (is_arm_elf (input_bfd
));
10379 BFD_ASSERT (howto
!= NULL
);
10381 /* Some relocation types map to different relocations depending on the
10382 target. We pick the right one here. */
10383 r_type
= arm_real_reloc_type (globals
, r_type
);
10385 /* It is possible to have linker relaxations on some TLS access
10386 models. Update our information here. */
10387 r_type
= elf32_arm_tls_transition (info
, r_type
, h
);
10389 if (r_type
!= howto
->type
)
10390 howto
= elf32_arm_howto_from_type (r_type
);
10392 eh
= (struct elf32_arm_link_hash_entry
*) h
;
10393 sgot
= globals
->root
.sgot
;
10394 local_got_offsets
= elf_local_got_offsets (input_bfd
);
10395 local_tlsdesc_gotents
= elf32_arm_local_tlsdesc_gotent (input_bfd
);
10397 if (globals
->root
.dynamic_sections_created
)
10398 srelgot
= globals
->root
.srelgot
;
10402 r_symndx
= ELF32_R_SYM (rel
->r_info
);
10404 if (globals
->use_rel
)
10406 addend
= bfd_get_32 (input_bfd
, hit_data
) & howto
->src_mask
;
10408 if (addend
& ((howto
->src_mask
+ 1) >> 1))
10410 signed_addend
= -1;
10411 signed_addend
&= ~ howto
->src_mask
;
10412 signed_addend
|= addend
;
10415 signed_addend
= addend
;
10418 addend
= signed_addend
= rel
->r_addend
;
10420 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
10421 are resolving a function call relocation. */
10422 if (using_thumb_only (globals
)
10423 && (r_type
== R_ARM_THM_CALL
10424 || r_type
== R_ARM_THM_JUMP24
)
10425 && branch_type
== ST_BRANCH_TO_ARM
)
10426 branch_type
= ST_BRANCH_TO_THUMB
;
10428 /* Record the symbol information that should be used in dynamic
10430 dynreloc_st_type
= st_type
;
10431 dynreloc_value
= value
;
10432 if (branch_type
== ST_BRANCH_TO_THUMB
)
10433 dynreloc_value
|= 1;
10435 /* Find out whether the symbol has a PLT. Set ST_VALUE, BRANCH_TYPE and
10436 VALUE appropriately for relocations that we resolve at link time. */
10437 has_iplt_entry
= FALSE
;
10438 if (elf32_arm_get_plt_info (input_bfd
, globals
, eh
, r_symndx
, &root_plt
,
10440 && root_plt
->offset
!= (bfd_vma
) -1)
10442 plt_offset
= root_plt
->offset
;
10443 gotplt_offset
= arm_plt
->got_offset
;
10445 if (h
== NULL
|| eh
->is_iplt
)
10447 has_iplt_entry
= TRUE
;
10448 splt
= globals
->root
.iplt
;
10450 /* Populate .iplt entries here, because not all of them will
10451 be seen by finish_dynamic_symbol. The lower bit is set if
10452 we have already populated the entry. */
10453 if (plt_offset
& 1)
10457 if (elf32_arm_populate_plt_entry (output_bfd
, info
, root_plt
, arm_plt
,
10458 -1, dynreloc_value
))
10459 root_plt
->offset
|= 1;
10461 return bfd_reloc_notsupported
;
10464 /* Static relocations always resolve to the .iplt entry. */
10465 st_type
= STT_FUNC
;
10466 value
= (splt
->output_section
->vma
10467 + splt
->output_offset
10469 branch_type
= ST_BRANCH_TO_ARM
;
10471 /* If there are non-call relocations that resolve to the .iplt
10472 entry, then all dynamic ones must too. */
10473 if (arm_plt
->noncall_refcount
!= 0)
10475 dynreloc_st_type
= st_type
;
10476 dynreloc_value
= value
;
10480 /* We populate the .plt entry in finish_dynamic_symbol. */
10481 splt
= globals
->root
.splt
;
10486 plt_offset
= (bfd_vma
) -1;
10487 gotplt_offset
= (bfd_vma
) -1;
10490 resolved_to_zero
= (h
!= NULL
10491 && UNDEFWEAK_NO_DYNAMIC_RELOC (info
, h
));
10496 /* We don't need to find a value for this symbol. It's just a
10498 *unresolved_reloc_p
= FALSE
;
10499 return bfd_reloc_ok
;
10502 if (!globals
->vxworks_p
)
10503 return elf32_arm_abs12_reloc (input_bfd
, hit_data
, value
+ addend
);
10504 /* Fall through. */
10508 case R_ARM_ABS32_NOI
:
10510 case R_ARM_REL32_NOI
:
10516 /* Handle relocations which should use the PLT entry. ABS32/REL32
10517 will use the symbol's value, which may point to a PLT entry, but we
10518 don't need to handle that here. If we created a PLT entry, all
10519 branches in this object should go to it, except if the PLT is too
10520 far away, in which case a long branch stub should be inserted. */
10521 if ((r_type
!= R_ARM_ABS32
&& r_type
!= R_ARM_REL32
10522 && r_type
!= R_ARM_ABS32_NOI
&& r_type
!= R_ARM_REL32_NOI
10523 && r_type
!= R_ARM_CALL
10524 && r_type
!= R_ARM_JUMP24
10525 && r_type
!= R_ARM_PLT32
)
10526 && plt_offset
!= (bfd_vma
) -1)
10528 /* If we've created a .plt section, and assigned a PLT entry
10529 to this function, it must either be a STT_GNU_IFUNC reference
10530 or not be known to bind locally. In other cases, we should
10531 have cleared the PLT entry by now. */
10532 BFD_ASSERT (has_iplt_entry
|| !SYMBOL_CALLS_LOCAL (info
, h
));
10534 value
= (splt
->output_section
->vma
10535 + splt
->output_offset
10537 *unresolved_reloc_p
= FALSE
;
10538 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
10539 contents
, rel
->r_offset
, value
,
10543 /* When generating a shared object or relocatable executable, these
10544 relocations are copied into the output file to be resolved at
10546 if ((bfd_link_pic (info
)
10547 || globals
->root
.is_relocatable_executable
10548 || globals
->fdpic_p
)
10549 && (input_section
->flags
& SEC_ALLOC
)
10550 && !(globals
->vxworks_p
10551 && strcmp (input_section
->output_section
->name
,
10553 && ((r_type
!= R_ARM_REL32
&& r_type
!= R_ARM_REL32_NOI
)
10554 || !SYMBOL_CALLS_LOCAL (info
, h
))
10555 && !(input_bfd
== globals
->stub_bfd
10556 && strstr (input_section
->name
, STUB_SUFFIX
))
10558 || (ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
10559 && !resolved_to_zero
)
10560 || h
->root
.type
!= bfd_link_hash_undefweak
)
10561 && r_type
!= R_ARM_PC24
10562 && r_type
!= R_ARM_CALL
10563 && r_type
!= R_ARM_JUMP24
10564 && r_type
!= R_ARM_PREL31
10565 && r_type
!= R_ARM_PLT32
)
10567 Elf_Internal_Rela outrel
;
10568 bfd_boolean skip
, relocate
;
10571 if ((r_type
== R_ARM_REL32
|| r_type
== R_ARM_REL32_NOI
)
10572 && !h
->def_regular
)
10574 char *v
= _("shared object");
10576 if (bfd_link_executable (info
))
10577 v
= _("PIE executable");
10580 (_("%pB: relocation %s against external or undefined symbol `%s'"
10581 " can not be used when making a %s; recompile with -fPIC"), input_bfd
,
10582 elf32_arm_howto_table_1
[r_type
].name
, h
->root
.root
.string
, v
);
10583 return bfd_reloc_notsupported
;
10586 *unresolved_reloc_p
= FALSE
;
10588 if (sreloc
== NULL
&& globals
->root
.dynamic_sections_created
)
10590 sreloc
= _bfd_elf_get_dynamic_reloc_section (input_bfd
, input_section
,
10591 ! globals
->use_rel
);
10593 if (sreloc
== NULL
)
10594 return bfd_reloc_notsupported
;
10600 outrel
.r_addend
= addend
;
10602 _bfd_elf_section_offset (output_bfd
, info
, input_section
,
10604 if (outrel
.r_offset
== (bfd_vma
) -1)
10606 else if (outrel
.r_offset
== (bfd_vma
) -2)
10607 skip
= TRUE
, relocate
= TRUE
;
10608 outrel
.r_offset
+= (input_section
->output_section
->vma
10609 + input_section
->output_offset
);
10612 memset (&outrel
, 0, sizeof outrel
);
10614 && h
->dynindx
!= -1
10615 && (!bfd_link_pic (info
)
10616 || !(bfd_link_pie (info
)
10617 || SYMBOLIC_BIND (info
, h
))
10618 || !h
->def_regular
))
10619 outrel
.r_info
= ELF32_R_INFO (h
->dynindx
, r_type
);
10624 /* This symbol is local, or marked to become local. */
10625 BFD_ASSERT (r_type
== R_ARM_ABS32
|| r_type
== R_ARM_ABS32_NOI
10626 || (globals
->fdpic_p
&& !bfd_link_pic(info
)));
10627 if (globals
->symbian_p
)
10631 /* On Symbian OS, the data segment and text segement
10632 can be relocated independently. Therefore, we
10633 must indicate the segment to which this
10634 relocation is relative. The BPABI allows us to
10635 use any symbol in the right segment; we just use
10636 the section symbol as it is convenient. (We
10637 cannot use the symbol given by "h" directly as it
10638 will not appear in the dynamic symbol table.)
10640 Note that the dynamic linker ignores the section
10641 symbol value, so we don't subtract osec->vma
10642 from the emitted reloc addend. */
10644 osec
= sym_sec
->output_section
;
10646 osec
= input_section
->output_section
;
10647 symbol
= elf_section_data (osec
)->dynindx
;
10650 struct elf_link_hash_table
*htab
= elf_hash_table (info
);
10652 if ((osec
->flags
& SEC_READONLY
) == 0
10653 && htab
->data_index_section
!= NULL
)
10654 osec
= htab
->data_index_section
;
10656 osec
= htab
->text_index_section
;
10657 symbol
= elf_section_data (osec
)->dynindx
;
10659 BFD_ASSERT (symbol
!= 0);
10662 /* On SVR4-ish systems, the dynamic loader cannot
10663 relocate the text and data segments independently,
10664 so the symbol does not matter. */
10666 if (dynreloc_st_type
== STT_GNU_IFUNC
)
10667 /* We have an STT_GNU_IFUNC symbol that doesn't resolve
10668 to the .iplt entry. Instead, every non-call reference
10669 must use an R_ARM_IRELATIVE relocation to obtain the
10670 correct run-time address. */
10671 outrel
.r_info
= ELF32_R_INFO (symbol
, R_ARM_IRELATIVE
);
10672 else if (globals
->fdpic_p
&& !bfd_link_pic(info
))
10675 outrel
.r_info
= ELF32_R_INFO (symbol
, R_ARM_RELATIVE
);
10676 if (globals
->use_rel
)
10679 outrel
.r_addend
+= dynreloc_value
;
10683 arm_elf_add_rofixup(output_bfd
, globals
->srofixup
, outrel
.r_offset
);
10685 elf32_arm_add_dynreloc (output_bfd
, info
, sreloc
, &outrel
);
10687 /* If this reloc is against an external symbol, we do not want to
10688 fiddle with the addend. Otherwise, we need to include the symbol
10689 value so that it becomes an addend for the dynamic reloc. */
10691 return bfd_reloc_ok
;
10693 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
10694 contents
, rel
->r_offset
,
10695 dynreloc_value
, (bfd_vma
) 0);
10697 else switch (r_type
)
10700 return elf32_arm_abs12_reloc (input_bfd
, hit_data
, value
+ addend
);
10702 case R_ARM_XPC25
: /* Arm BLX instruction. */
10705 case R_ARM_PC24
: /* Arm B/BL instruction. */
10708 struct elf32_arm_stub_hash_entry
*stub_entry
= NULL
;
10710 if (r_type
== R_ARM_XPC25
)
10712 /* Check for Arm calling Arm function. */
10713 /* FIXME: Should we translate the instruction into a BL
10714 instruction instead ? */
10715 if (branch_type
!= ST_BRANCH_TO_THUMB
)
10717 (_("\%pB: warning: %s BLX instruction targets"
10718 " %s function '%s'"),
10720 "ARM", h
? h
->root
.root
.string
: "(local)");
10722 else if (r_type
== R_ARM_PC24
)
10724 /* Check for Arm calling Thumb function. */
10725 if (branch_type
== ST_BRANCH_TO_THUMB
)
10727 if (elf32_arm_to_thumb_stub (info
, sym_name
, input_bfd
,
10728 output_bfd
, input_section
,
10729 hit_data
, sym_sec
, rel
->r_offset
,
10730 signed_addend
, value
,
10732 return bfd_reloc_ok
;
10734 return bfd_reloc_dangerous
;
10738 /* Check if a stub has to be inserted because the
10739 destination is too far or we are changing mode. */
10740 if ( r_type
== R_ARM_CALL
10741 || r_type
== R_ARM_JUMP24
10742 || r_type
== R_ARM_PLT32
)
10744 enum elf32_arm_stub_type stub_type
= arm_stub_none
;
10745 struct elf32_arm_link_hash_entry
*hash
;
10747 hash
= (struct elf32_arm_link_hash_entry
*) h
;
10748 stub_type
= arm_type_of_stub (info
, input_section
, rel
,
10749 st_type
, &branch_type
,
10750 hash
, value
, sym_sec
,
10751 input_bfd
, sym_name
);
10753 if (stub_type
!= arm_stub_none
)
10755 /* The target is out of reach, so redirect the
10756 branch to the local stub for this function. */
10757 stub_entry
= elf32_arm_get_stub_entry (input_section
,
10762 if (stub_entry
!= NULL
)
10763 value
= (stub_entry
->stub_offset
10764 + stub_entry
->stub_sec
->output_offset
10765 + stub_entry
->stub_sec
->output_section
->vma
);
10767 if (plt_offset
!= (bfd_vma
) -1)
10768 *unresolved_reloc_p
= FALSE
;
10773 /* If the call goes through a PLT entry, make sure to
10774 check distance to the right destination address. */
10775 if (plt_offset
!= (bfd_vma
) -1)
10777 value
= (splt
->output_section
->vma
10778 + splt
->output_offset
10780 *unresolved_reloc_p
= FALSE
;
10781 /* The PLT entry is in ARM mode, regardless of the
10782 target function. */
10783 branch_type
= ST_BRANCH_TO_ARM
;
10788 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
10790 S is the address of the symbol in the relocation.
10791 P is address of the instruction being relocated.
10792 A is the addend (extracted from the instruction) in bytes.
10794 S is held in 'value'.
10795 P is the base address of the section containing the
10796 instruction plus the offset of the reloc into that
10798 (input_section->output_section->vma +
10799 input_section->output_offset +
10801 A is the addend, converted into bytes, ie:
10802 (signed_addend * 4)
10804 Note: None of these operations have knowledge of the pipeline
10805 size of the processor, thus it is up to the assembler to
10806 encode this information into the addend. */
10807 value
-= (input_section
->output_section
->vma
10808 + input_section
->output_offset
);
10809 value
-= rel
->r_offset
;
10810 if (globals
->use_rel
)
10811 value
+= (signed_addend
<< howto
->size
);
10813 /* RELA addends do not have to be adjusted by howto->size. */
10814 value
+= signed_addend
;
10816 signed_addend
= value
;
10817 signed_addend
>>= howto
->rightshift
;
10819 /* A branch to an undefined weak symbol is turned into a jump to
10820 the next instruction unless a PLT entry will be created.
10821 Do the same for local undefined symbols (but not for STN_UNDEF).
10822 The jump to the next instruction is optimized as a NOP depending
10823 on the architecture. */
10824 if (h
? (h
->root
.type
== bfd_link_hash_undefweak
10825 && plt_offset
== (bfd_vma
) -1)
10826 : r_symndx
!= STN_UNDEF
&& bfd_is_und_section (sym_sec
))
10828 value
= (bfd_get_32 (input_bfd
, hit_data
) & 0xf0000000);
10830 if (arch_has_arm_nop (globals
))
10831 value
|= 0x0320f000;
10833 value
|= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */
10837 /* Perform a signed range check. */
10838 if ( signed_addend
> ((bfd_signed_vma
) (howto
->dst_mask
>> 1))
10839 || signed_addend
< - ((bfd_signed_vma
) ((howto
->dst_mask
+ 1) >> 1)))
10840 return bfd_reloc_overflow
;
10842 addend
= (value
& 2);
10844 value
= (signed_addend
& howto
->dst_mask
)
10845 | (bfd_get_32 (input_bfd
, hit_data
) & (~ howto
->dst_mask
));
10847 if (r_type
== R_ARM_CALL
)
10849 /* Set the H bit in the BLX instruction. */
10850 if (branch_type
== ST_BRANCH_TO_THUMB
)
10853 value
|= (1 << 24);
10855 value
&= ~(bfd_vma
)(1 << 24);
10858 /* Select the correct instruction (BL or BLX). */
10859 /* Only if we are not handling a BL to a stub. In this
10860 case, mode switching is performed by the stub. */
10861 if (branch_type
== ST_BRANCH_TO_THUMB
&& !stub_entry
)
10862 value
|= (1 << 28);
10863 else if (stub_entry
|| branch_type
!= ST_BRANCH_UNKNOWN
)
10865 value
&= ~(bfd_vma
)(1 << 28);
10866 value
|= (1 << 24);
10875 if (branch_type
== ST_BRANCH_TO_THUMB
)
10879 case R_ARM_ABS32_NOI
:
10885 if (branch_type
== ST_BRANCH_TO_THUMB
)
10887 value
-= (input_section
->output_section
->vma
10888 + input_section
->output_offset
+ rel
->r_offset
);
10891 case R_ARM_REL32_NOI
:
10893 value
-= (input_section
->output_section
->vma
10894 + input_section
->output_offset
+ rel
->r_offset
);
10898 value
-= (input_section
->output_section
->vma
10899 + input_section
->output_offset
+ rel
->r_offset
);
10900 value
+= signed_addend
;
10901 if (! h
|| h
->root
.type
!= bfd_link_hash_undefweak
)
10903 /* Check for overflow. */
10904 if ((value
^ (value
>> 1)) & (1 << 30))
10905 return bfd_reloc_overflow
;
10907 value
&= 0x7fffffff;
10908 value
|= (bfd_get_32 (input_bfd
, hit_data
) & 0x80000000);
10909 if (branch_type
== ST_BRANCH_TO_THUMB
)
10914 bfd_put_32 (input_bfd
, value
, hit_data
);
10915 return bfd_reloc_ok
;
10918 /* PR 16202: Refectch the addend using the correct size. */
10919 if (globals
->use_rel
)
10920 addend
= bfd_get_8 (input_bfd
, hit_data
);
10923 /* There is no way to tell whether the user intended to use a signed or
10924 unsigned addend. When checking for overflow we accept either,
10925 as specified by the AAELF. */
10926 if ((long) value
> 0xff || (long) value
< -0x80)
10927 return bfd_reloc_overflow
;
10929 bfd_put_8 (input_bfd
, value
, hit_data
);
10930 return bfd_reloc_ok
;
10933 /* PR 16202: Refectch the addend using the correct size. */
10934 if (globals
->use_rel
)
10935 addend
= bfd_get_16 (input_bfd
, hit_data
);
10938 /* See comment for R_ARM_ABS8. */
10939 if ((long) value
> 0xffff || (long) value
< -0x8000)
10940 return bfd_reloc_overflow
;
10942 bfd_put_16 (input_bfd
, value
, hit_data
);
10943 return bfd_reloc_ok
;
10945 case R_ARM_THM_ABS5
:
10946 /* Support ldr and str instructions for the thumb. */
10947 if (globals
->use_rel
)
10949 /* Need to refetch addend. */
10950 addend
= bfd_get_16 (input_bfd
, hit_data
) & howto
->src_mask
;
10951 /* ??? Need to determine shift amount from operand size. */
10952 addend
>>= howto
->rightshift
;
10956 /* ??? Isn't value unsigned? */
10957 if ((long) value
> 0x1f || (long) value
< -0x10)
10958 return bfd_reloc_overflow
;
10960 /* ??? Value needs to be properly shifted into place first. */
10961 value
|= bfd_get_16 (input_bfd
, hit_data
) & 0xf83f;
10962 bfd_put_16 (input_bfd
, value
, hit_data
);
10963 return bfd_reloc_ok
;
10965 case R_ARM_THM_ALU_PREL_11_0
:
10966 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
10969 bfd_signed_vma relocation
;
10971 insn
= (bfd_get_16 (input_bfd
, hit_data
) << 16)
10972 | bfd_get_16 (input_bfd
, hit_data
+ 2);
10974 if (globals
->use_rel
)
10976 signed_addend
= (insn
& 0xff) | ((insn
& 0x7000) >> 4)
10977 | ((insn
& (1 << 26)) >> 15);
10978 if (insn
& 0xf00000)
10979 signed_addend
= -signed_addend
;
10982 relocation
= value
+ signed_addend
;
10983 relocation
-= Pa (input_section
->output_section
->vma
10984 + input_section
->output_offset
10987 /* PR 21523: Use an absolute value. The user of this reloc will
10988 have already selected an ADD or SUB insn appropriately. */
10989 value
= llabs (relocation
);
10991 if (value
>= 0x1000)
10992 return bfd_reloc_overflow
;
10994 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
10995 if (branch_type
== ST_BRANCH_TO_THUMB
)
10998 insn
= (insn
& 0xfb0f8f00) | (value
& 0xff)
10999 | ((value
& 0x700) << 4)
11000 | ((value
& 0x800) << 15);
11001 if (relocation
< 0)
11004 bfd_put_16 (input_bfd
, insn
>> 16, hit_data
);
11005 bfd_put_16 (input_bfd
, insn
& 0xffff, hit_data
+ 2);
11007 return bfd_reloc_ok
;
11010 case R_ARM_THM_PC8
:
11011 /* PR 10073: This reloc is not generated by the GNU toolchain,
11012 but it is supported for compatibility with third party libraries
11013 generated by other compilers, specifically the ARM/IAR. */
11016 bfd_signed_vma relocation
;
11018 insn
= bfd_get_16 (input_bfd
, hit_data
);
11020 if (globals
->use_rel
)
11021 addend
= ((((insn
& 0x00ff) << 2) + 4) & 0x3ff) -4;
11023 relocation
= value
+ addend
;
11024 relocation
-= Pa (input_section
->output_section
->vma
11025 + input_section
->output_offset
11028 value
= relocation
;
11030 /* We do not check for overflow of this reloc. Although strictly
11031 speaking this is incorrect, it appears to be necessary in order
11032 to work with IAR generated relocs. Since GCC and GAS do not
11033 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
11034 a problem for them. */
11037 insn
= (insn
& 0xff00) | (value
>> 2);
11039 bfd_put_16 (input_bfd
, insn
, hit_data
);
11041 return bfd_reloc_ok
;
11044 case R_ARM_THM_PC12
:
11045 /* Corresponds to: ldr.w reg, [pc, #offset]. */
11048 bfd_signed_vma relocation
;
11050 insn
= (bfd_get_16 (input_bfd
, hit_data
) << 16)
11051 | bfd_get_16 (input_bfd
, hit_data
+ 2);
11053 if (globals
->use_rel
)
11055 signed_addend
= insn
& 0xfff;
11056 if (!(insn
& (1 << 23)))
11057 signed_addend
= -signed_addend
;
11060 relocation
= value
+ signed_addend
;
11061 relocation
-= Pa (input_section
->output_section
->vma
11062 + input_section
->output_offset
11065 value
= relocation
;
11067 if (value
>= 0x1000)
11068 return bfd_reloc_overflow
;
11070 insn
= (insn
& 0xff7ff000) | value
;
11071 if (relocation
>= 0)
11074 bfd_put_16 (input_bfd
, insn
>> 16, hit_data
);
11075 bfd_put_16 (input_bfd
, insn
& 0xffff, hit_data
+ 2);
11077 return bfd_reloc_ok
;
11080 case R_ARM_THM_XPC22
:
11081 case R_ARM_THM_CALL
:
11082 case R_ARM_THM_JUMP24
:
11083 /* Thumb BL (branch long instruction). */
11085 bfd_vma relocation
;
11086 bfd_vma reloc_sign
;
11087 bfd_boolean overflow
= FALSE
;
11088 bfd_vma upper_insn
= bfd_get_16 (input_bfd
, hit_data
);
11089 bfd_vma lower_insn
= bfd_get_16 (input_bfd
, hit_data
+ 2);
11090 bfd_signed_vma reloc_signed_max
;
11091 bfd_signed_vma reloc_signed_min
;
11093 bfd_signed_vma signed_check
;
11095 const int thumb2
= using_thumb2 (globals
);
11096 const int thumb2_bl
= using_thumb2_bl (globals
);
11098 /* A branch to an undefined weak symbol is turned into a jump to
11099 the next instruction unless a PLT entry will be created.
11100 The jump to the next instruction is optimized as a NOP.W for
11101 Thumb-2 enabled architectures. */
11102 if (h
&& h
->root
.type
== bfd_link_hash_undefweak
11103 && plt_offset
== (bfd_vma
) -1)
11107 bfd_put_16 (input_bfd
, 0xf3af, hit_data
);
11108 bfd_put_16 (input_bfd
, 0x8000, hit_data
+ 2);
11112 bfd_put_16 (input_bfd
, 0xe000, hit_data
);
11113 bfd_put_16 (input_bfd
, 0xbf00, hit_data
+ 2);
11115 return bfd_reloc_ok
;
11118 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
11119 with Thumb-1) involving the J1 and J2 bits. */
11120 if (globals
->use_rel
)
11122 bfd_vma s
= (upper_insn
& (1 << 10)) >> 10;
11123 bfd_vma upper
= upper_insn
& 0x3ff;
11124 bfd_vma lower
= lower_insn
& 0x7ff;
11125 bfd_vma j1
= (lower_insn
& (1 << 13)) >> 13;
11126 bfd_vma j2
= (lower_insn
& (1 << 11)) >> 11;
11127 bfd_vma i1
= j1
^ s
? 0 : 1;
11128 bfd_vma i2
= j2
^ s
? 0 : 1;
11130 addend
= (i1
<< 23) | (i2
<< 22) | (upper
<< 12) | (lower
<< 1);
11132 addend
= (addend
| ((s
? 0 : 1) << 24)) - (1 << 24);
11134 signed_addend
= addend
;
11137 if (r_type
== R_ARM_THM_XPC22
)
11139 /* Check for Thumb to Thumb call. */
11140 /* FIXME: Should we translate the instruction into a BL
11141 instruction instead ? */
11142 if (branch_type
== ST_BRANCH_TO_THUMB
)
11144 (_("%pB: warning: %s BLX instruction targets"
11145 " %s function '%s'"),
11146 input_bfd
, "Thumb",
11147 "Thumb", h
? h
->root
.root
.string
: "(local)");
11151 /* If it is not a call to Thumb, assume call to Arm.
11152 If it is a call relative to a section name, then it is not a
11153 function call at all, but rather a long jump. Calls through
11154 the PLT do not require stubs. */
11155 if (branch_type
== ST_BRANCH_TO_ARM
&& plt_offset
== (bfd_vma
) -1)
11157 if (globals
->use_blx
&& r_type
== R_ARM_THM_CALL
)
11159 /* Convert BL to BLX. */
11160 lower_insn
= (lower_insn
& ~0x1000) | 0x0800;
11162 else if (( r_type
!= R_ARM_THM_CALL
)
11163 && (r_type
!= R_ARM_THM_JUMP24
))
11165 if (elf32_thumb_to_arm_stub
11166 (info
, sym_name
, input_bfd
, output_bfd
, input_section
,
11167 hit_data
, sym_sec
, rel
->r_offset
, signed_addend
, value
,
11169 return bfd_reloc_ok
;
11171 return bfd_reloc_dangerous
;
11174 else if (branch_type
== ST_BRANCH_TO_THUMB
11175 && globals
->use_blx
11176 && r_type
== R_ARM_THM_CALL
)
11178 /* Make sure this is a BL. */
11179 lower_insn
|= 0x1800;
11183 enum elf32_arm_stub_type stub_type
= arm_stub_none
;
11184 if (r_type
== R_ARM_THM_CALL
|| r_type
== R_ARM_THM_JUMP24
)
11186 /* Check if a stub has to be inserted because the destination
11188 struct elf32_arm_stub_hash_entry
*stub_entry
;
11189 struct elf32_arm_link_hash_entry
*hash
;
11191 hash
= (struct elf32_arm_link_hash_entry
*) h
;
11193 stub_type
= arm_type_of_stub (info
, input_section
, rel
,
11194 st_type
, &branch_type
,
11195 hash
, value
, sym_sec
,
11196 input_bfd
, sym_name
);
11198 if (stub_type
!= arm_stub_none
)
11200 /* The target is out of reach or we are changing modes, so
11201 redirect the branch to the local stub for this
11203 stub_entry
= elf32_arm_get_stub_entry (input_section
,
11207 if (stub_entry
!= NULL
)
11209 value
= (stub_entry
->stub_offset
11210 + stub_entry
->stub_sec
->output_offset
11211 + stub_entry
->stub_sec
->output_section
->vma
);
11213 if (plt_offset
!= (bfd_vma
) -1)
11214 *unresolved_reloc_p
= FALSE
;
11217 /* If this call becomes a call to Arm, force BLX. */
11218 if (globals
->use_blx
&& (r_type
== R_ARM_THM_CALL
))
11221 && !arm_stub_is_thumb (stub_entry
->stub_type
))
11222 || branch_type
!= ST_BRANCH_TO_THUMB
)
11223 lower_insn
= (lower_insn
& ~0x1000) | 0x0800;
11228 /* Handle calls via the PLT. */
11229 if (stub_type
== arm_stub_none
&& plt_offset
!= (bfd_vma
) -1)
11231 value
= (splt
->output_section
->vma
11232 + splt
->output_offset
11235 if (globals
->use_blx
11236 && r_type
== R_ARM_THM_CALL
11237 && ! using_thumb_only (globals
))
11239 /* If the Thumb BLX instruction is available, convert
11240 the BL to a BLX instruction to call the ARM-mode
11242 lower_insn
= (lower_insn
& ~0x1000) | 0x0800;
11243 branch_type
= ST_BRANCH_TO_ARM
;
11247 if (! using_thumb_only (globals
))
11248 /* Target the Thumb stub before the ARM PLT entry. */
11249 value
-= PLT_THUMB_STUB_SIZE
;
11250 branch_type
= ST_BRANCH_TO_THUMB
;
11252 *unresolved_reloc_p
= FALSE
;
11255 relocation
= value
+ signed_addend
;
11257 relocation
-= (input_section
->output_section
->vma
11258 + input_section
->output_offset
11261 check
= relocation
>> howto
->rightshift
;
11263 /* If this is a signed value, the rightshift just dropped
11264 leading 1 bits (assuming twos complement). */
11265 if ((bfd_signed_vma
) relocation
>= 0)
11266 signed_check
= check
;
11268 signed_check
= check
| ~((bfd_vma
) -1 >> howto
->rightshift
);
11270 /* Calculate the permissable maximum and minimum values for
11271 this relocation according to whether we're relocating for
11273 bitsize
= howto
->bitsize
;
11276 reloc_signed_max
= (1 << (bitsize
- 1)) - 1;
11277 reloc_signed_min
= ~reloc_signed_max
;
11279 /* Assumes two's complement. */
11280 if (signed_check
> reloc_signed_max
|| signed_check
< reloc_signed_min
)
11283 if ((lower_insn
& 0x5000) == 0x4000)
11284 /* For a BLX instruction, make sure that the relocation is rounded up
11285 to a word boundary. This follows the semantics of the instruction
11286 which specifies that bit 1 of the target address will come from bit
11287 1 of the base address. */
11288 relocation
= (relocation
+ 2) & ~ 3;
11290 /* Put RELOCATION back into the insn. Assumes two's complement.
11291 We use the Thumb-2 encoding, which is safe even if dealing with
11292 a Thumb-1 instruction by virtue of our overflow check above. */
11293 reloc_sign
= (signed_check
< 0) ? 1 : 0;
11294 upper_insn
= (upper_insn
& ~(bfd_vma
) 0x7ff)
11295 | ((relocation
>> 12) & 0x3ff)
11296 | (reloc_sign
<< 10);
11297 lower_insn
= (lower_insn
& ~(bfd_vma
) 0x2fff)
11298 | (((!((relocation
>> 23) & 1)) ^ reloc_sign
) << 13)
11299 | (((!((relocation
>> 22) & 1)) ^ reloc_sign
) << 11)
11300 | ((relocation
>> 1) & 0x7ff);
11302 /* Put the relocated value back in the object file: */
11303 bfd_put_16 (input_bfd
, upper_insn
, hit_data
);
11304 bfd_put_16 (input_bfd
, lower_insn
, hit_data
+ 2);
11306 return (overflow
? bfd_reloc_overflow
: bfd_reloc_ok
);
11310 case R_ARM_THM_JUMP19
:
11311 /* Thumb32 conditional branch instruction. */
11313 bfd_vma relocation
;
11314 bfd_boolean overflow
= FALSE
;
11315 bfd_vma upper_insn
= bfd_get_16 (input_bfd
, hit_data
);
11316 bfd_vma lower_insn
= bfd_get_16 (input_bfd
, hit_data
+ 2);
11317 bfd_signed_vma reloc_signed_max
= 0xffffe;
11318 bfd_signed_vma reloc_signed_min
= -0x100000;
11319 bfd_signed_vma signed_check
;
11320 enum elf32_arm_stub_type stub_type
= arm_stub_none
;
11321 struct elf32_arm_stub_hash_entry
*stub_entry
;
11322 struct elf32_arm_link_hash_entry
*hash
;
11324 /* Need to refetch the addend, reconstruct the top three bits,
11325 and squish the two 11 bit pieces together. */
11326 if (globals
->use_rel
)
11328 bfd_vma S
= (upper_insn
& 0x0400) >> 10;
11329 bfd_vma upper
= (upper_insn
& 0x003f);
11330 bfd_vma J1
= (lower_insn
& 0x2000) >> 13;
11331 bfd_vma J2
= (lower_insn
& 0x0800) >> 11;
11332 bfd_vma lower
= (lower_insn
& 0x07ff);
11336 upper
|= (!S
) << 8;
11337 upper
-= 0x0100; /* Sign extend. */
11339 addend
= (upper
<< 12) | (lower
<< 1);
11340 signed_addend
= addend
;
11343 /* Handle calls via the PLT. */
11344 if (plt_offset
!= (bfd_vma
) -1)
11346 value
= (splt
->output_section
->vma
11347 + splt
->output_offset
11349 /* Target the Thumb stub before the ARM PLT entry. */
11350 value
-= PLT_THUMB_STUB_SIZE
;
11351 *unresolved_reloc_p
= FALSE
;
11354 hash
= (struct elf32_arm_link_hash_entry
*)h
;
11356 stub_type
= arm_type_of_stub (info
, input_section
, rel
,
11357 st_type
, &branch_type
,
11358 hash
, value
, sym_sec
,
11359 input_bfd
, sym_name
);
11360 if (stub_type
!= arm_stub_none
)
11362 stub_entry
= elf32_arm_get_stub_entry (input_section
,
11366 if (stub_entry
!= NULL
)
11368 value
= (stub_entry
->stub_offset
11369 + stub_entry
->stub_sec
->output_offset
11370 + stub_entry
->stub_sec
->output_section
->vma
);
11374 relocation
= value
+ signed_addend
;
11375 relocation
-= (input_section
->output_section
->vma
11376 + input_section
->output_offset
11378 signed_check
= (bfd_signed_vma
) relocation
;
11380 if (signed_check
> reloc_signed_max
|| signed_check
< reloc_signed_min
)
11383 /* Put RELOCATION back into the insn. */
11385 bfd_vma S
= (relocation
& 0x00100000) >> 20;
11386 bfd_vma J2
= (relocation
& 0x00080000) >> 19;
11387 bfd_vma J1
= (relocation
& 0x00040000) >> 18;
11388 bfd_vma hi
= (relocation
& 0x0003f000) >> 12;
11389 bfd_vma lo
= (relocation
& 0x00000ffe) >> 1;
11391 upper_insn
= (upper_insn
& 0xfbc0) | (S
<< 10) | hi
;
11392 lower_insn
= (lower_insn
& 0xd000) | (J1
<< 13) | (J2
<< 11) | lo
;
11395 /* Put the relocated value back in the object file: */
11396 bfd_put_16 (input_bfd
, upper_insn
, hit_data
);
11397 bfd_put_16 (input_bfd
, lower_insn
, hit_data
+ 2);
11399 return (overflow
? bfd_reloc_overflow
: bfd_reloc_ok
);
11402 case R_ARM_THM_JUMP11
:
11403 case R_ARM_THM_JUMP8
:
11404 case R_ARM_THM_JUMP6
:
11405 /* Thumb B (branch) instruction). */
11407 bfd_signed_vma relocation
;
11408 bfd_signed_vma reloc_signed_max
= (1 << (howto
->bitsize
- 1)) - 1;
11409 bfd_signed_vma reloc_signed_min
= ~ reloc_signed_max
;
11410 bfd_signed_vma signed_check
;
11412 /* CZB cannot jump backward. */
11413 if (r_type
== R_ARM_THM_JUMP6
)
11414 reloc_signed_min
= 0;
11416 if (globals
->use_rel
)
11418 /* Need to refetch addend. */
11419 addend
= bfd_get_16 (input_bfd
, hit_data
) & howto
->src_mask
;
11420 if (addend
& ((howto
->src_mask
+ 1) >> 1))
11422 signed_addend
= -1;
11423 signed_addend
&= ~ howto
->src_mask
;
11424 signed_addend
|= addend
;
11427 signed_addend
= addend
;
11428 /* The value in the insn has been right shifted. We need to
11429 undo this, so that we can perform the address calculation
11430 in terms of bytes. */
11431 signed_addend
<<= howto
->rightshift
;
11433 relocation
= value
+ signed_addend
;
11435 relocation
-= (input_section
->output_section
->vma
11436 + input_section
->output_offset
11439 relocation
>>= howto
->rightshift
;
11440 signed_check
= relocation
;
11442 if (r_type
== R_ARM_THM_JUMP6
)
11443 relocation
= ((relocation
& 0x0020) << 4) | ((relocation
& 0x001f) << 3);
11445 relocation
&= howto
->dst_mask
;
11446 relocation
|= (bfd_get_16 (input_bfd
, hit_data
) & (~ howto
->dst_mask
));
11448 bfd_put_16 (input_bfd
, relocation
, hit_data
);
11450 /* Assumes two's complement. */
11451 if (signed_check
> reloc_signed_max
|| signed_check
< reloc_signed_min
)
11452 return bfd_reloc_overflow
;
11454 return bfd_reloc_ok
;
11457 case R_ARM_ALU_PCREL7_0
:
11458 case R_ARM_ALU_PCREL15_8
:
11459 case R_ARM_ALU_PCREL23_15
:
11462 bfd_vma relocation
;
11464 insn
= bfd_get_32 (input_bfd
, hit_data
);
11465 if (globals
->use_rel
)
11467 /* Extract the addend. */
11468 addend
= (insn
& 0xff) << ((insn
& 0xf00) >> 7);
11469 signed_addend
= addend
;
11471 relocation
= value
+ signed_addend
;
11473 relocation
-= (input_section
->output_section
->vma
11474 + input_section
->output_offset
11476 insn
= (insn
& ~0xfff)
11477 | ((howto
->bitpos
<< 7) & 0xf00)
11478 | ((relocation
>> howto
->bitpos
) & 0xff);
11479 bfd_put_32 (input_bfd
, value
, hit_data
);
11481 return bfd_reloc_ok
;
11483 case R_ARM_GNU_VTINHERIT
:
11484 case R_ARM_GNU_VTENTRY
:
11485 return bfd_reloc_ok
;
11487 case R_ARM_GOTOFF32
:
11488 /* Relocation is relative to the start of the
11489 global offset table. */
11491 BFD_ASSERT (sgot
!= NULL
);
11493 return bfd_reloc_notsupported
;
11495 /* If we are addressing a Thumb function, we need to adjust the
11496 address by one, so that attempts to call the function pointer will
11497 correctly interpret it as Thumb code. */
11498 if (branch_type
== ST_BRANCH_TO_THUMB
)
11501 /* Note that sgot->output_offset is not involved in this
11502 calculation. We always want the start of .got. If we
11503 define _GLOBAL_OFFSET_TABLE in a different way, as is
11504 permitted by the ABI, we might have to change this
11506 value
-= sgot
->output_section
->vma
;
11507 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
11508 contents
, rel
->r_offset
, value
,
11512 /* Use global offset table as symbol value. */
11513 BFD_ASSERT (sgot
!= NULL
);
11516 return bfd_reloc_notsupported
;
11518 *unresolved_reloc_p
= FALSE
;
11519 value
= sgot
->output_section
->vma
;
11520 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
11521 contents
, rel
->r_offset
, value
,
11525 case R_ARM_GOT_PREL
:
11526 /* Relocation is to the entry for this symbol in the
11527 global offset table. */
11529 return bfd_reloc_notsupported
;
11531 if (dynreloc_st_type
== STT_GNU_IFUNC
11532 && plt_offset
!= (bfd_vma
) -1
11533 && (h
== NULL
|| SYMBOL_REFERENCES_LOCAL (info
, h
)))
11535 /* We have a relocation against a locally-binding STT_GNU_IFUNC
11536 symbol, and the relocation resolves directly to the runtime
11537 target rather than to the .iplt entry. This means that any
11538 .got entry would be the same value as the .igot.plt entry,
11539 so there's no point creating both. */
11540 sgot
= globals
->root
.igotplt
;
11541 value
= sgot
->output_offset
+ gotplt_offset
;
11543 else if (h
!= NULL
)
11547 off
= h
->got
.offset
;
11548 BFD_ASSERT (off
!= (bfd_vma
) -1);
11549 if ((off
& 1) != 0)
11551 /* We have already processsed one GOT relocation against
11554 if (globals
->root
.dynamic_sections_created
11555 && !SYMBOL_REFERENCES_LOCAL (info
, h
))
11556 *unresolved_reloc_p
= FALSE
;
11560 Elf_Internal_Rela outrel
;
11563 if (((h
->dynindx
!= -1) || globals
->fdpic_p
)
11564 && !SYMBOL_REFERENCES_LOCAL (info
, h
))
11566 /* If the symbol doesn't resolve locally in a static
11567 object, we have an undefined reference. If the
11568 symbol doesn't resolve locally in a dynamic object,
11569 it should be resolved by the dynamic linker. */
11570 if (globals
->root
.dynamic_sections_created
)
11572 outrel
.r_info
= ELF32_R_INFO (h
->dynindx
, R_ARM_GLOB_DAT
);
11573 *unresolved_reloc_p
= FALSE
;
11577 outrel
.r_addend
= 0;
11581 if (dynreloc_st_type
== STT_GNU_IFUNC
)
11582 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_IRELATIVE
);
11583 else if (bfd_link_pic (info
)
11584 && (ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
11585 || h
->root
.type
!= bfd_link_hash_undefweak
))
11586 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_RELATIVE
);
11590 if (globals
->fdpic_p
)
11593 outrel
.r_addend
= dynreloc_value
;
11596 /* The GOT entry is initialized to zero by default.
11597 See if we should install a different value. */
11598 if (outrel
.r_addend
!= 0
11599 && (globals
->use_rel
|| outrel
.r_info
== 0))
11601 bfd_put_32 (output_bfd
, outrel
.r_addend
,
11602 sgot
->contents
+ off
);
11603 outrel
.r_addend
= 0;
11607 arm_elf_add_rofixup (output_bfd
,
11608 elf32_arm_hash_table(info
)->srofixup
,
11609 sgot
->output_section
->vma
11610 + sgot
->output_offset
+ off
);
11612 else if (outrel
.r_info
!= 0)
11614 outrel
.r_offset
= (sgot
->output_section
->vma
11615 + sgot
->output_offset
11617 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
11620 h
->got
.offset
|= 1;
11622 value
= sgot
->output_offset
+ off
;
11628 BFD_ASSERT (local_got_offsets
!= NULL
11629 && local_got_offsets
[r_symndx
] != (bfd_vma
) -1);
11631 off
= local_got_offsets
[r_symndx
];
11633 /* The offset must always be a multiple of 4. We use the
11634 least significant bit to record whether we have already
11635 generated the necessary reloc. */
11636 if ((off
& 1) != 0)
11640 Elf_Internal_Rela outrel
;
11643 if (dynreloc_st_type
== STT_GNU_IFUNC
)
11644 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_IRELATIVE
);
11645 else if (bfd_link_pic (info
))
11646 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_RELATIVE
);
11650 if (globals
->fdpic_p
)
11654 /* The GOT entry is initialized to zero by default.
11655 See if we should install a different value. */
11656 if (globals
->use_rel
|| outrel
.r_info
== 0)
11657 bfd_put_32 (output_bfd
, dynreloc_value
, sgot
->contents
+ off
);
11660 arm_elf_add_rofixup (output_bfd
,
11662 sgot
->output_section
->vma
11663 + sgot
->output_offset
+ off
);
11665 else if (outrel
.r_info
!= 0)
11667 outrel
.r_addend
= addend
+ dynreloc_value
;
11668 outrel
.r_offset
= (sgot
->output_section
->vma
11669 + sgot
->output_offset
11671 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
11674 local_got_offsets
[r_symndx
] |= 1;
11677 value
= sgot
->output_offset
+ off
;
11679 if (r_type
!= R_ARM_GOT32
)
11680 value
+= sgot
->output_section
->vma
;
11682 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
11683 contents
, rel
->r_offset
, value
,
11686 case R_ARM_TLS_LDO32
:
11687 value
= value
- dtpoff_base (info
);
11689 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
11690 contents
, rel
->r_offset
, value
,
11693 case R_ARM_TLS_LDM32
:
11694 case R_ARM_TLS_LDM32_FDPIC
:
11701 off
= globals
->tls_ldm_got
.offset
;
11703 if ((off
& 1) != 0)
11707 /* If we don't know the module number, create a relocation
11709 if (bfd_link_dll (info
))
11711 Elf_Internal_Rela outrel
;
11713 if (srelgot
== NULL
)
11716 outrel
.r_addend
= 0;
11717 outrel
.r_offset
= (sgot
->output_section
->vma
11718 + sgot
->output_offset
+ off
);
11719 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32
);
11721 if (globals
->use_rel
)
11722 bfd_put_32 (output_bfd
, outrel
.r_addend
,
11723 sgot
->contents
+ off
);
11725 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
11728 bfd_put_32 (output_bfd
, 1, sgot
->contents
+ off
);
11730 globals
->tls_ldm_got
.offset
|= 1;
11733 if (r_type
== R_ARM_TLS_LDM32_FDPIC
)
11735 bfd_put_32(output_bfd
,
11736 globals
->root
.sgot
->output_offset
+ off
,
11737 contents
+ rel
->r_offset
);
11739 return bfd_reloc_ok
;
11743 value
= sgot
->output_section
->vma
+ sgot
->output_offset
+ off
11744 - (input_section
->output_section
->vma
11745 + input_section
->output_offset
+ rel
->r_offset
);
11747 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
11748 contents
, rel
->r_offset
, value
,
11753 case R_ARM_TLS_CALL
:
11754 case R_ARM_THM_TLS_CALL
:
11755 case R_ARM_TLS_GD32
:
11756 case R_ARM_TLS_GD32_FDPIC
:
11757 case R_ARM_TLS_IE32
:
11758 case R_ARM_TLS_IE32_FDPIC
:
11759 case R_ARM_TLS_GOTDESC
:
11760 case R_ARM_TLS_DESCSEQ
:
11761 case R_ARM_THM_TLS_DESCSEQ
:
11763 bfd_vma off
, offplt
;
11767 BFD_ASSERT (sgot
!= NULL
);
11772 dyn
= globals
->root
.dynamic_sections_created
;
11773 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn
,
11774 bfd_link_pic (info
),
11776 && (!bfd_link_pic (info
)
11777 || !SYMBOL_REFERENCES_LOCAL (info
, h
)))
11779 *unresolved_reloc_p
= FALSE
;
11782 off
= h
->got
.offset
;
11783 offplt
= elf32_arm_hash_entry (h
)->tlsdesc_got
;
11784 tls_type
= ((struct elf32_arm_link_hash_entry
*) h
)->tls_type
;
11788 BFD_ASSERT (local_got_offsets
!= NULL
);
11789 off
= local_got_offsets
[r_symndx
];
11790 offplt
= local_tlsdesc_gotents
[r_symndx
];
11791 tls_type
= elf32_arm_local_got_tls_type (input_bfd
)[r_symndx
];
11794 /* Linker relaxations happens from one of the
11795 R_ARM_{GOTDESC,CALL,DESCSEQ} relocations to IE or LE. */
11796 if (ELF32_R_TYPE(rel
->r_info
) != r_type
)
11797 tls_type
= GOT_TLS_IE
;
11799 BFD_ASSERT (tls_type
!= GOT_UNKNOWN
);
11801 if ((off
& 1) != 0)
11805 bfd_boolean need_relocs
= FALSE
;
11806 Elf_Internal_Rela outrel
;
11809 /* The GOT entries have not been initialized yet. Do it
11810 now, and emit any relocations. If both an IE GOT and a
11811 GD GOT are necessary, we emit the GD first. */
11813 if ((bfd_link_dll (info
) || indx
!= 0)
11815 || (ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
11816 && !resolved_to_zero
)
11817 || h
->root
.type
!= bfd_link_hash_undefweak
))
11819 need_relocs
= TRUE
;
11820 BFD_ASSERT (srelgot
!= NULL
);
11823 if (tls_type
& GOT_TLS_GDESC
)
11827 /* We should have relaxed, unless this is an undefined
11829 BFD_ASSERT ((h
&& (h
->root
.type
== bfd_link_hash_undefweak
))
11830 || bfd_link_dll (info
));
11831 BFD_ASSERT (globals
->sgotplt_jump_table_size
+ offplt
+ 8
11832 <= globals
->root
.sgotplt
->size
);
11834 outrel
.r_addend
= 0;
11835 outrel
.r_offset
= (globals
->root
.sgotplt
->output_section
->vma
11836 + globals
->root
.sgotplt
->output_offset
11838 + globals
->sgotplt_jump_table_size
);
11840 outrel
.r_info
= ELF32_R_INFO (indx
, R_ARM_TLS_DESC
);
11841 sreloc
= globals
->root
.srelplt
;
11842 loc
= sreloc
->contents
;
11843 loc
+= globals
->next_tls_desc_index
++ * RELOC_SIZE (globals
);
11844 BFD_ASSERT (loc
+ RELOC_SIZE (globals
)
11845 <= sreloc
->contents
+ sreloc
->size
);
11847 SWAP_RELOC_OUT (globals
) (output_bfd
, &outrel
, loc
);
11849 /* For globals, the first word in the relocation gets
11850 the relocation index and the top bit set, or zero,
11851 if we're binding now. For locals, it gets the
11852 symbol's offset in the tls section. */
11853 bfd_put_32 (output_bfd
,
11854 !h
? value
- elf_hash_table (info
)->tls_sec
->vma
11855 : info
->flags
& DF_BIND_NOW
? 0
11856 : 0x80000000 | ELF32_R_SYM (outrel
.r_info
),
11857 globals
->root
.sgotplt
->contents
+ offplt
11858 + globals
->sgotplt_jump_table_size
);
11860 /* Second word in the relocation is always zero. */
11861 bfd_put_32 (output_bfd
, 0,
11862 globals
->root
.sgotplt
->contents
+ offplt
11863 + globals
->sgotplt_jump_table_size
+ 4);
11865 if (tls_type
& GOT_TLS_GD
)
11869 outrel
.r_addend
= 0;
11870 outrel
.r_offset
= (sgot
->output_section
->vma
11871 + sgot
->output_offset
11873 outrel
.r_info
= ELF32_R_INFO (indx
, R_ARM_TLS_DTPMOD32
);
11875 if (globals
->use_rel
)
11876 bfd_put_32 (output_bfd
, outrel
.r_addend
,
11877 sgot
->contents
+ cur_off
);
11879 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
11882 bfd_put_32 (output_bfd
, value
- dtpoff_base (info
),
11883 sgot
->contents
+ cur_off
+ 4);
11886 outrel
.r_addend
= 0;
11887 outrel
.r_info
= ELF32_R_INFO (indx
,
11888 R_ARM_TLS_DTPOFF32
);
11889 outrel
.r_offset
+= 4;
11891 if (globals
->use_rel
)
11892 bfd_put_32 (output_bfd
, outrel
.r_addend
,
11893 sgot
->contents
+ cur_off
+ 4);
11895 elf32_arm_add_dynreloc (output_bfd
, info
,
11901 /* If we are not emitting relocations for a
11902 general dynamic reference, then we must be in a
11903 static link or an executable link with the
11904 symbol binding locally. Mark it as belonging
11905 to module 1, the executable. */
11906 bfd_put_32 (output_bfd
, 1,
11907 sgot
->contents
+ cur_off
);
11908 bfd_put_32 (output_bfd
, value
- dtpoff_base (info
),
11909 sgot
->contents
+ cur_off
+ 4);
11915 if (tls_type
& GOT_TLS_IE
)
11920 outrel
.r_addend
= value
- dtpoff_base (info
);
11922 outrel
.r_addend
= 0;
11923 outrel
.r_offset
= (sgot
->output_section
->vma
11924 + sgot
->output_offset
11926 outrel
.r_info
= ELF32_R_INFO (indx
, R_ARM_TLS_TPOFF32
);
11928 if (globals
->use_rel
)
11929 bfd_put_32 (output_bfd
, outrel
.r_addend
,
11930 sgot
->contents
+ cur_off
);
11932 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
11935 bfd_put_32 (output_bfd
, tpoff (info
, value
),
11936 sgot
->contents
+ cur_off
);
11941 h
->got
.offset
|= 1;
11943 local_got_offsets
[r_symndx
] |= 1;
11946 if ((tls_type
& GOT_TLS_GD
) && r_type
!= R_ARM_TLS_GD32
&& r_type
!= R_ARM_TLS_GD32_FDPIC
)
11948 else if (tls_type
& GOT_TLS_GDESC
)
11951 if (ELF32_R_TYPE(rel
->r_info
) == R_ARM_TLS_CALL
11952 || ELF32_R_TYPE(rel
->r_info
) == R_ARM_THM_TLS_CALL
)
11954 bfd_signed_vma offset
;
11955 /* TLS stubs are arm mode. The original symbol is a
11956 data object, so branch_type is bogus. */
11957 branch_type
= ST_BRANCH_TO_ARM
;
11958 enum elf32_arm_stub_type stub_type
11959 = arm_type_of_stub (info
, input_section
, rel
,
11960 st_type
, &branch_type
,
11961 (struct elf32_arm_link_hash_entry
*)h
,
11962 globals
->tls_trampoline
, globals
->root
.splt
,
11963 input_bfd
, sym_name
);
11965 if (stub_type
!= arm_stub_none
)
11967 struct elf32_arm_stub_hash_entry
*stub_entry
11968 = elf32_arm_get_stub_entry
11969 (input_section
, globals
->root
.splt
, 0, rel
,
11970 globals
, stub_type
);
11971 offset
= (stub_entry
->stub_offset
11972 + stub_entry
->stub_sec
->output_offset
11973 + stub_entry
->stub_sec
->output_section
->vma
);
11976 offset
= (globals
->root
.splt
->output_section
->vma
11977 + globals
->root
.splt
->output_offset
11978 + globals
->tls_trampoline
);
11980 if (ELF32_R_TYPE(rel
->r_info
) == R_ARM_TLS_CALL
)
11982 unsigned long inst
;
11984 offset
-= (input_section
->output_section
->vma
11985 + input_section
->output_offset
11986 + rel
->r_offset
+ 8);
11988 inst
= offset
>> 2;
11989 inst
&= 0x00ffffff;
11990 value
= inst
| (globals
->use_blx
? 0xfa000000 : 0xeb000000);
11994 /* Thumb blx encodes the offset in a complicated
11996 unsigned upper_insn
, lower_insn
;
11999 offset
-= (input_section
->output_section
->vma
12000 + input_section
->output_offset
12001 + rel
->r_offset
+ 4);
12003 if (stub_type
!= arm_stub_none
12004 && arm_stub_is_thumb (stub_type
))
12006 lower_insn
= 0xd000;
12010 lower_insn
= 0xc000;
12011 /* Round up the offset to a word boundary. */
12012 offset
= (offset
+ 2) & ~2;
12016 upper_insn
= (0xf000
12017 | ((offset
>> 12) & 0x3ff)
12019 lower_insn
|= (((!((offset
>> 23) & 1)) ^ neg
) << 13)
12020 | (((!((offset
>> 22) & 1)) ^ neg
) << 11)
12021 | ((offset
>> 1) & 0x7ff);
12022 bfd_put_16 (input_bfd
, upper_insn
, hit_data
);
12023 bfd_put_16 (input_bfd
, lower_insn
, hit_data
+ 2);
12024 return bfd_reloc_ok
;
12027 /* These relocations needs special care, as besides the fact
12028 they point somewhere in .gotplt, the addend must be
12029 adjusted accordingly depending on the type of instruction
12031 else if ((r_type
== R_ARM_TLS_GOTDESC
) && (tls_type
& GOT_TLS_GDESC
))
12033 unsigned long data
, insn
;
12036 data
= bfd_get_signed_32 (input_bfd
, hit_data
);
12042 insn
= bfd_get_16 (input_bfd
, contents
+ rel
->r_offset
- data
);
12043 if ((insn
& 0xf000) == 0xf000 || (insn
& 0xf800) == 0xe800)
12044 insn
= (insn
<< 16)
12045 | bfd_get_16 (input_bfd
,
12046 contents
+ rel
->r_offset
- data
+ 2);
12047 if ((insn
& 0xf800c000) == 0xf000c000)
12050 else if ((insn
& 0xffffff00) == 0x4400)
12056 /* xgettext:c-format */
12057 (_("%pB(%pA+%#" PRIx64
"): "
12058 "unexpected %s instruction '%#lx' "
12059 "referenced by TLS_GOTDESC"),
12060 input_bfd
, input_section
, (uint64_t) rel
->r_offset
,
12062 return bfd_reloc_notsupported
;
12067 insn
= bfd_get_32 (input_bfd
, contents
+ rel
->r_offset
- data
);
12069 switch (insn
>> 24)
12071 case 0xeb: /* bl */
12072 case 0xfa: /* blx */
12076 case 0xe0: /* add */
12082 /* xgettext:c-format */
12083 (_("%pB(%pA+%#" PRIx64
"): "
12084 "unexpected %s instruction '%#lx' "
12085 "referenced by TLS_GOTDESC"),
12086 input_bfd
, input_section
, (uint64_t) rel
->r_offset
,
12088 return bfd_reloc_notsupported
;
12092 value
+= ((globals
->root
.sgotplt
->output_section
->vma
12093 + globals
->root
.sgotplt
->output_offset
+ off
)
12094 - (input_section
->output_section
->vma
12095 + input_section
->output_offset
12097 + globals
->sgotplt_jump_table_size
);
12100 value
= ((globals
->root
.sgot
->output_section
->vma
12101 + globals
->root
.sgot
->output_offset
+ off
)
12102 - (input_section
->output_section
->vma
12103 + input_section
->output_offset
+ rel
->r_offset
));
12105 if (globals
->fdpic_p
&& (r_type
== R_ARM_TLS_GD32_FDPIC
||
12106 r_type
== R_ARM_TLS_IE32_FDPIC
))
12108 /* For FDPIC relocations, resolve to the offset of the GOT
12109 entry from the start of GOT. */
12110 bfd_put_32(output_bfd
,
12111 globals
->root
.sgot
->output_offset
+ off
,
12112 contents
+ rel
->r_offset
);
12114 return bfd_reloc_ok
;
12118 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
12119 contents
, rel
->r_offset
, value
,
12124 case R_ARM_TLS_LE32
:
12125 if (bfd_link_dll (info
))
12128 /* xgettext:c-format */
12129 (_("%pB(%pA+%#" PRIx64
"): %s relocation not permitted "
12130 "in shared object"),
12131 input_bfd
, input_section
, (uint64_t) rel
->r_offset
, howto
->name
);
12132 return bfd_reloc_notsupported
;
12135 value
= tpoff (info
, value
);
12137 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
12138 contents
, rel
->r_offset
, value
,
12142 if (globals
->fix_v4bx
)
12144 bfd_vma insn
= bfd_get_32 (input_bfd
, hit_data
);
12146 /* Ensure that we have a BX instruction. */
12147 BFD_ASSERT ((insn
& 0x0ffffff0) == 0x012fff10);
12149 if (globals
->fix_v4bx
== 2 && (insn
& 0xf) != 0xf)
12151 /* Branch to veneer. */
12153 glue_addr
= elf32_arm_bx_glue (info
, insn
& 0xf);
12154 glue_addr
-= input_section
->output_section
->vma
12155 + input_section
->output_offset
12156 + rel
->r_offset
+ 8;
12157 insn
= (insn
& 0xf0000000) | 0x0a000000
12158 | ((glue_addr
>> 2) & 0x00ffffff);
12162 /* Preserve Rm (lowest four bits) and the condition code
12163 (highest four bits). Other bits encode MOV PC,Rm. */
12164 insn
= (insn
& 0xf000000f) | 0x01a0f000;
12167 bfd_put_32 (input_bfd
, insn
, hit_data
);
12169 return bfd_reloc_ok
;
12171 case R_ARM_MOVW_ABS_NC
:
12172 case R_ARM_MOVT_ABS
:
12173 case R_ARM_MOVW_PREL_NC
:
12174 case R_ARM_MOVT_PREL
:
12175 /* Until we properly support segment-base-relative addressing then
12176 we assume the segment base to be zero, as for the group relocations.
12177 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
12178 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
12179 case R_ARM_MOVW_BREL_NC
:
12180 case R_ARM_MOVW_BREL
:
12181 case R_ARM_MOVT_BREL
:
12183 bfd_vma insn
= bfd_get_32 (input_bfd
, hit_data
);
12185 if (globals
->use_rel
)
12187 addend
= ((insn
>> 4) & 0xf000) | (insn
& 0xfff);
12188 signed_addend
= (addend
^ 0x8000) - 0x8000;
12191 value
+= signed_addend
;
12193 if (r_type
== R_ARM_MOVW_PREL_NC
|| r_type
== R_ARM_MOVT_PREL
)
12194 value
-= (input_section
->output_section
->vma
12195 + input_section
->output_offset
+ rel
->r_offset
);
12197 if (r_type
== R_ARM_MOVW_BREL
&& value
>= 0x10000)
12198 return bfd_reloc_overflow
;
12200 if (branch_type
== ST_BRANCH_TO_THUMB
)
12203 if (r_type
== R_ARM_MOVT_ABS
|| r_type
== R_ARM_MOVT_PREL
12204 || r_type
== R_ARM_MOVT_BREL
)
12207 insn
&= 0xfff0f000;
12208 insn
|= value
& 0xfff;
12209 insn
|= (value
& 0xf000) << 4;
12210 bfd_put_32 (input_bfd
, insn
, hit_data
);
12212 return bfd_reloc_ok
;
12214 case R_ARM_THM_MOVW_ABS_NC
:
12215 case R_ARM_THM_MOVT_ABS
:
12216 case R_ARM_THM_MOVW_PREL_NC
:
12217 case R_ARM_THM_MOVT_PREL
:
12218 /* Until we properly support segment-base-relative addressing then
12219 we assume the segment base to be zero, as for the above relocations.
12220 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
12221 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
12222 as R_ARM_THM_MOVT_ABS. */
12223 case R_ARM_THM_MOVW_BREL_NC
:
12224 case R_ARM_THM_MOVW_BREL
:
12225 case R_ARM_THM_MOVT_BREL
:
12229 insn
= bfd_get_16 (input_bfd
, hit_data
) << 16;
12230 insn
|= bfd_get_16 (input_bfd
, hit_data
+ 2);
12232 if (globals
->use_rel
)
12234 addend
= ((insn
>> 4) & 0xf000)
12235 | ((insn
>> 15) & 0x0800)
12236 | ((insn
>> 4) & 0x0700)
12238 signed_addend
= (addend
^ 0x8000) - 0x8000;
12241 value
+= signed_addend
;
12243 if (r_type
== R_ARM_THM_MOVW_PREL_NC
|| r_type
== R_ARM_THM_MOVT_PREL
)
12244 value
-= (input_section
->output_section
->vma
12245 + input_section
->output_offset
+ rel
->r_offset
);
12247 if (r_type
== R_ARM_THM_MOVW_BREL
&& value
>= 0x10000)
12248 return bfd_reloc_overflow
;
12250 if (branch_type
== ST_BRANCH_TO_THUMB
)
12253 if (r_type
== R_ARM_THM_MOVT_ABS
|| r_type
== R_ARM_THM_MOVT_PREL
12254 || r_type
== R_ARM_THM_MOVT_BREL
)
12257 insn
&= 0xfbf08f00;
12258 insn
|= (value
& 0xf000) << 4;
12259 insn
|= (value
& 0x0800) << 15;
12260 insn
|= (value
& 0x0700) << 4;
12261 insn
|= (value
& 0x00ff);
12263 bfd_put_16 (input_bfd
, insn
>> 16, hit_data
);
12264 bfd_put_16 (input_bfd
, insn
& 0xffff, hit_data
+ 2);
12266 return bfd_reloc_ok
;
12268 case R_ARM_ALU_PC_G0_NC
:
12269 case R_ARM_ALU_PC_G1_NC
:
12270 case R_ARM_ALU_PC_G0
:
12271 case R_ARM_ALU_PC_G1
:
12272 case R_ARM_ALU_PC_G2
:
12273 case R_ARM_ALU_SB_G0_NC
:
12274 case R_ARM_ALU_SB_G1_NC
:
12275 case R_ARM_ALU_SB_G0
:
12276 case R_ARM_ALU_SB_G1
:
12277 case R_ARM_ALU_SB_G2
:
12279 bfd_vma insn
= bfd_get_32 (input_bfd
, hit_data
);
12280 bfd_vma pc
= input_section
->output_section
->vma
12281 + input_section
->output_offset
+ rel
->r_offset
;
12282 /* sb is the origin of the *segment* containing the symbol. */
12283 bfd_vma sb
= sym_sec
? sym_sec
->output_section
->vma
: 0;
12286 bfd_signed_vma signed_value
;
12289 /* Determine which group of bits to select. */
12292 case R_ARM_ALU_PC_G0_NC
:
12293 case R_ARM_ALU_PC_G0
:
12294 case R_ARM_ALU_SB_G0_NC
:
12295 case R_ARM_ALU_SB_G0
:
12299 case R_ARM_ALU_PC_G1_NC
:
12300 case R_ARM_ALU_PC_G1
:
12301 case R_ARM_ALU_SB_G1_NC
:
12302 case R_ARM_ALU_SB_G1
:
12306 case R_ARM_ALU_PC_G2
:
12307 case R_ARM_ALU_SB_G2
:
12315 /* If REL, extract the addend from the insn. If RELA, it will
12316 have already been fetched for us. */
12317 if (globals
->use_rel
)
12320 bfd_vma constant
= insn
& 0xff;
12321 bfd_vma rotation
= (insn
& 0xf00) >> 8;
12324 signed_addend
= constant
;
12327 /* Compensate for the fact that in the instruction, the
12328 rotation is stored in multiples of 2 bits. */
12331 /* Rotate "constant" right by "rotation" bits. */
12332 signed_addend
= (constant
>> rotation
) |
12333 (constant
<< (8 * sizeof (bfd_vma
) - rotation
));
12336 /* Determine if the instruction is an ADD or a SUB.
12337 (For REL, this determines the sign of the addend.) */
12338 negative
= identify_add_or_sub (insn
);
12342 /* xgettext:c-format */
12343 (_("%pB(%pA+%#" PRIx64
"): only ADD or SUB instructions "
12344 "are allowed for ALU group relocations"),
12345 input_bfd
, input_section
, (uint64_t) rel
->r_offset
);
12346 return bfd_reloc_overflow
;
12349 signed_addend
*= negative
;
12352 /* Compute the value (X) to go in the place. */
12353 if (r_type
== R_ARM_ALU_PC_G0_NC
12354 || r_type
== R_ARM_ALU_PC_G1_NC
12355 || r_type
== R_ARM_ALU_PC_G0
12356 || r_type
== R_ARM_ALU_PC_G1
12357 || r_type
== R_ARM_ALU_PC_G2
)
12359 signed_value
= value
- pc
+ signed_addend
;
12361 /* Section base relative. */
12362 signed_value
= value
- sb
+ signed_addend
;
12364 /* If the target symbol is a Thumb function, then set the
12365 Thumb bit in the address. */
12366 if (branch_type
== ST_BRANCH_TO_THUMB
)
12369 /* Calculate the value of the relevant G_n, in encoded
12370 constant-with-rotation format. */
12371 g_n
= calculate_group_reloc_mask (signed_value
< 0 ? - signed_value
: signed_value
,
12374 /* Check for overflow if required. */
12375 if ((r_type
== R_ARM_ALU_PC_G0
12376 || r_type
== R_ARM_ALU_PC_G1
12377 || r_type
== R_ARM_ALU_PC_G2
12378 || r_type
== R_ARM_ALU_SB_G0
12379 || r_type
== R_ARM_ALU_SB_G1
12380 || r_type
== R_ARM_ALU_SB_G2
) && residual
!= 0)
12383 /* xgettext:c-format */
12384 (_("%pB(%pA+%#" PRIx64
"): overflow whilst "
12385 "splitting %#" PRIx64
" for group relocation %s"),
12386 input_bfd
, input_section
, (uint64_t) rel
->r_offset
,
12387 (uint64_t) (signed_value
< 0 ? -signed_value
: signed_value
),
12389 return bfd_reloc_overflow
;
12392 /* Mask out the value and the ADD/SUB part of the opcode; take care
12393 not to destroy the S bit. */
12394 insn
&= 0xff1ff000;
12396 /* Set the opcode according to whether the value to go in the
12397 place is negative. */
12398 if (signed_value
< 0)
12403 /* Encode the offset. */
12406 bfd_put_32 (input_bfd
, insn
, hit_data
);
12408 return bfd_reloc_ok
;
12410 case R_ARM_LDR_PC_G0
:
12411 case R_ARM_LDR_PC_G1
:
12412 case R_ARM_LDR_PC_G2
:
12413 case R_ARM_LDR_SB_G0
:
12414 case R_ARM_LDR_SB_G1
:
12415 case R_ARM_LDR_SB_G2
:
12417 bfd_vma insn
= bfd_get_32 (input_bfd
, hit_data
);
12418 bfd_vma pc
= input_section
->output_section
->vma
12419 + input_section
->output_offset
+ rel
->r_offset
;
12420 /* sb is the origin of the *segment* containing the symbol. */
12421 bfd_vma sb
= sym_sec
? sym_sec
->output_section
->vma
: 0;
12423 bfd_signed_vma signed_value
;
12426 /* Determine which groups of bits to calculate. */
12429 case R_ARM_LDR_PC_G0
:
12430 case R_ARM_LDR_SB_G0
:
12434 case R_ARM_LDR_PC_G1
:
12435 case R_ARM_LDR_SB_G1
:
12439 case R_ARM_LDR_PC_G2
:
12440 case R_ARM_LDR_SB_G2
:
12448 /* If REL, extract the addend from the insn. If RELA, it will
12449 have already been fetched for us. */
12450 if (globals
->use_rel
)
12452 int negative
= (insn
& (1 << 23)) ? 1 : -1;
12453 signed_addend
= negative
* (insn
& 0xfff);
12456 /* Compute the value (X) to go in the place. */
12457 if (r_type
== R_ARM_LDR_PC_G0
12458 || r_type
== R_ARM_LDR_PC_G1
12459 || r_type
== R_ARM_LDR_PC_G2
)
12461 signed_value
= value
- pc
+ signed_addend
;
12463 /* Section base relative. */
12464 signed_value
= value
- sb
+ signed_addend
;
12466 /* Calculate the value of the relevant G_{n-1} to obtain
12467 the residual at that stage. */
12468 calculate_group_reloc_mask (signed_value
< 0 ? - signed_value
: signed_value
,
12469 group
- 1, &residual
);
12471 /* Check for overflow. */
12472 if (residual
>= 0x1000)
12475 /* xgettext:c-format */
12476 (_("%pB(%pA+%#" PRIx64
"): overflow whilst "
12477 "splitting %#" PRIx64
" for group relocation %s"),
12478 input_bfd
, input_section
, (uint64_t) rel
->r_offset
,
12479 (uint64_t) (signed_value
< 0 ? -signed_value
: signed_value
),
12481 return bfd_reloc_overflow
;
12484 /* Mask out the value and U bit. */
12485 insn
&= 0xff7ff000;
12487 /* Set the U bit if the value to go in the place is non-negative. */
12488 if (signed_value
>= 0)
12491 /* Encode the offset. */
12494 bfd_put_32 (input_bfd
, insn
, hit_data
);
12496 return bfd_reloc_ok
;
12498 case R_ARM_LDRS_PC_G0
:
12499 case R_ARM_LDRS_PC_G1
:
12500 case R_ARM_LDRS_PC_G2
:
12501 case R_ARM_LDRS_SB_G0
:
12502 case R_ARM_LDRS_SB_G1
:
12503 case R_ARM_LDRS_SB_G2
:
12505 bfd_vma insn
= bfd_get_32 (input_bfd
, hit_data
);
12506 bfd_vma pc
= input_section
->output_section
->vma
12507 + input_section
->output_offset
+ rel
->r_offset
;
12508 /* sb is the origin of the *segment* containing the symbol. */
12509 bfd_vma sb
= sym_sec
? sym_sec
->output_section
->vma
: 0;
12511 bfd_signed_vma signed_value
;
12514 /* Determine which groups of bits to calculate. */
12517 case R_ARM_LDRS_PC_G0
:
12518 case R_ARM_LDRS_SB_G0
:
12522 case R_ARM_LDRS_PC_G1
:
12523 case R_ARM_LDRS_SB_G1
:
12527 case R_ARM_LDRS_PC_G2
:
12528 case R_ARM_LDRS_SB_G2
:
12536 /* If REL, extract the addend from the insn. If RELA, it will
12537 have already been fetched for us. */
12538 if (globals
->use_rel
)
12540 int negative
= (insn
& (1 << 23)) ? 1 : -1;
12541 signed_addend
= negative
* (((insn
& 0xf00) >> 4) + (insn
& 0xf));
12544 /* Compute the value (X) to go in the place. */
12545 if (r_type
== R_ARM_LDRS_PC_G0
12546 || r_type
== R_ARM_LDRS_PC_G1
12547 || r_type
== R_ARM_LDRS_PC_G2
)
12549 signed_value
= value
- pc
+ signed_addend
;
12551 /* Section base relative. */
12552 signed_value
= value
- sb
+ signed_addend
;
12554 /* Calculate the value of the relevant G_{n-1} to obtain
12555 the residual at that stage. */
12556 calculate_group_reloc_mask (signed_value
< 0 ? - signed_value
: signed_value
,
12557 group
- 1, &residual
);
12559 /* Check for overflow. */
12560 if (residual
>= 0x100)
12563 /* xgettext:c-format */
12564 (_("%pB(%pA+%#" PRIx64
"): overflow whilst "
12565 "splitting %#" PRIx64
" for group relocation %s"),
12566 input_bfd
, input_section
, (uint64_t) rel
->r_offset
,
12567 (uint64_t) (signed_value
< 0 ? -signed_value
: signed_value
),
12569 return bfd_reloc_overflow
;
12572 /* Mask out the value and U bit. */
12573 insn
&= 0xff7ff0f0;
12575 /* Set the U bit if the value to go in the place is non-negative. */
12576 if (signed_value
>= 0)
12579 /* Encode the offset. */
12580 insn
|= ((residual
& 0xf0) << 4) | (residual
& 0xf);
12582 bfd_put_32 (input_bfd
, insn
, hit_data
);
12584 return bfd_reloc_ok
;
12586 case R_ARM_LDC_PC_G0
:
12587 case R_ARM_LDC_PC_G1
:
12588 case R_ARM_LDC_PC_G2
:
12589 case R_ARM_LDC_SB_G0
:
12590 case R_ARM_LDC_SB_G1
:
12591 case R_ARM_LDC_SB_G2
:
12593 bfd_vma insn
= bfd_get_32 (input_bfd
, hit_data
);
12594 bfd_vma pc
= input_section
->output_section
->vma
12595 + input_section
->output_offset
+ rel
->r_offset
;
12596 /* sb is the origin of the *segment* containing the symbol. */
12597 bfd_vma sb
= sym_sec
? sym_sec
->output_section
->vma
: 0;
12599 bfd_signed_vma signed_value
;
12602 /* Determine which groups of bits to calculate. */
12605 case R_ARM_LDC_PC_G0
:
12606 case R_ARM_LDC_SB_G0
:
12610 case R_ARM_LDC_PC_G1
:
12611 case R_ARM_LDC_SB_G1
:
12615 case R_ARM_LDC_PC_G2
:
12616 case R_ARM_LDC_SB_G2
:
12624 /* If REL, extract the addend from the insn. If RELA, it will
12625 have already been fetched for us. */
12626 if (globals
->use_rel
)
12628 int negative
= (insn
& (1 << 23)) ? 1 : -1;
12629 signed_addend
= negative
* ((insn
& 0xff) << 2);
12632 /* Compute the value (X) to go in the place. */
12633 if (r_type
== R_ARM_LDC_PC_G0
12634 || r_type
== R_ARM_LDC_PC_G1
12635 || r_type
== R_ARM_LDC_PC_G2
)
12637 signed_value
= value
- pc
+ signed_addend
;
12639 /* Section base relative. */
12640 signed_value
= value
- sb
+ signed_addend
;
12642 /* Calculate the value of the relevant G_{n-1} to obtain
12643 the residual at that stage. */
12644 calculate_group_reloc_mask (signed_value
< 0 ? - signed_value
: signed_value
,
12645 group
- 1, &residual
);
12647 /* Check for overflow. (The absolute value to go in the place must be
12648 divisible by four and, after having been divided by four, must
12649 fit in eight bits.) */
12650 if ((residual
& 0x3) != 0 || residual
>= 0x400)
12653 /* xgettext:c-format */
12654 (_("%pB(%pA+%#" PRIx64
"): overflow whilst "
12655 "splitting %#" PRIx64
" for group relocation %s"),
12656 input_bfd
, input_section
, (uint64_t) rel
->r_offset
,
12657 (uint64_t) (signed_value
< 0 ? -signed_value
: signed_value
),
12659 return bfd_reloc_overflow
;
12662 /* Mask out the value and U bit. */
12663 insn
&= 0xff7fff00;
12665 /* Set the U bit if the value to go in the place is non-negative. */
12666 if (signed_value
>= 0)
12669 /* Encode the offset. */
12670 insn
|= residual
>> 2;
12672 bfd_put_32 (input_bfd
, insn
, hit_data
);
12674 return bfd_reloc_ok
;
12676 case R_ARM_THM_ALU_ABS_G0_NC
:
12677 case R_ARM_THM_ALU_ABS_G1_NC
:
12678 case R_ARM_THM_ALU_ABS_G2_NC
:
12679 case R_ARM_THM_ALU_ABS_G3_NC
:
12681 const int shift_array
[4] = {0, 8, 16, 24};
12682 bfd_vma insn
= bfd_get_16 (input_bfd
, hit_data
);
12683 bfd_vma addr
= value
;
12684 int shift
= shift_array
[r_type
- R_ARM_THM_ALU_ABS_G0_NC
];
12686 /* Compute address. */
12687 if (globals
->use_rel
)
12688 signed_addend
= insn
& 0xff;
12689 addr
+= signed_addend
;
12690 if (branch_type
== ST_BRANCH_TO_THUMB
)
12692 /* Clean imm8 insn. */
12694 /* And update with correct part of address. */
12695 insn
|= (addr
>> shift
) & 0xff;
12697 bfd_put_16 (input_bfd
, insn
, hit_data
);
12700 *unresolved_reloc_p
= FALSE
;
12701 return bfd_reloc_ok
;
12703 case R_ARM_GOTOFFFUNCDESC
:
12707 struct fdpic_local
*local_fdpic_cnts
= elf32_arm_local_fdpic_cnts(input_bfd
);
12708 int dynindx
= elf_section_data (sym_sec
->output_section
)->dynindx
;
12709 int offset
= local_fdpic_cnts
[r_symndx
].funcdesc_offset
& ~1;
12710 bfd_vma addr
= dynreloc_value
- sym_sec
->output_section
->vma
;
12713 if (bfd_link_pic(info
) && dynindx
== 0)
12716 /* Resolve relocation. */
12717 bfd_put_32(output_bfd
, (offset
+ sgot
->output_offset
)
12718 , contents
+ rel
->r_offset
);
12719 /* Emit R_ARM_FUNCDESC_VALUE or two fixups on funcdesc if
12721 arm_elf_fill_funcdesc(output_bfd
, info
,
12722 &local_fdpic_cnts
[r_symndx
].funcdesc_offset
,
12723 dynindx
, offset
, addr
, dynreloc_value
, seg
);
12728 int offset
= eh
->fdpic_cnts
.funcdesc_offset
& ~1;
12732 /* For static binaries, sym_sec can be null. */
12735 dynindx
= elf_section_data (sym_sec
->output_section
)->dynindx
;
12736 addr
= dynreloc_value
- sym_sec
->output_section
->vma
;
12744 if (bfd_link_pic(info
) && dynindx
== 0)
12747 /* This case cannot occur since funcdesc is allocated by
12748 the dynamic loader so we cannot resolve the relocation. */
12749 if (h
->dynindx
!= -1)
12752 /* Resolve relocation. */
12753 bfd_put_32(output_bfd
, (offset
+ sgot
->output_offset
),
12754 contents
+ rel
->r_offset
);
12755 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12756 arm_elf_fill_funcdesc(output_bfd
, info
,
12757 &eh
->fdpic_cnts
.funcdesc_offset
,
12758 dynindx
, offset
, addr
, dynreloc_value
, seg
);
12761 *unresolved_reloc_p
= FALSE
;
12762 return bfd_reloc_ok
;
12764 case R_ARM_GOTFUNCDESC
:
12768 Elf_Internal_Rela outrel
;
12770 /* Resolve relocation. */
12771 bfd_put_32(output_bfd
, ((eh
->fdpic_cnts
.gotfuncdesc_offset
& ~1)
12772 + sgot
->output_offset
),
12773 contents
+ rel
->r_offset
);
12774 /* Add funcdesc and associated R_ARM_FUNCDESC_VALUE. */
12775 if(h
->dynindx
== -1)
12778 int offset
= eh
->fdpic_cnts
.funcdesc_offset
& ~1;
12782 /* For static binaries sym_sec can be null. */
12785 dynindx
= elf_section_data (sym_sec
->output_section
)->dynindx
;
12786 addr
= dynreloc_value
- sym_sec
->output_section
->vma
;
12794 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12795 arm_elf_fill_funcdesc(output_bfd
, info
,
12796 &eh
->fdpic_cnts
.funcdesc_offset
,
12797 dynindx
, offset
, addr
, dynreloc_value
, seg
);
12800 /* Add a dynamic relocation on GOT entry if not already done. */
12801 if ((eh
->fdpic_cnts
.gotfuncdesc_offset
& 1) == 0)
12803 if (h
->dynindx
== -1)
12805 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_RELATIVE
);
12806 if (h
->root
.type
== bfd_link_hash_undefweak
)
12807 bfd_put_32(output_bfd
, 0, sgot
->contents
12808 + (eh
->fdpic_cnts
.gotfuncdesc_offset
& ~1));
12810 bfd_put_32(output_bfd
, sgot
->output_section
->vma
12811 + sgot
->output_offset
12812 + (eh
->fdpic_cnts
.funcdesc_offset
& ~1),
12814 + (eh
->fdpic_cnts
.gotfuncdesc_offset
& ~1));
12818 outrel
.r_info
= ELF32_R_INFO (h
->dynindx
, R_ARM_FUNCDESC
);
12820 outrel
.r_offset
= sgot
->output_section
->vma
12821 + sgot
->output_offset
12822 + (eh
->fdpic_cnts
.gotfuncdesc_offset
& ~1);
12823 outrel
.r_addend
= 0;
12824 if (h
->dynindx
== -1 && !bfd_link_pic(info
))
12825 if (h
->root
.type
== bfd_link_hash_undefweak
)
12826 arm_elf_add_rofixup(output_bfd
, globals
->srofixup
, -1);
12828 arm_elf_add_rofixup(output_bfd
, globals
->srofixup
,
12831 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
12832 eh
->fdpic_cnts
.gotfuncdesc_offset
|= 1;
12837 /* Such relocation on static function should not have been
12838 emitted by the compiler. */
12842 *unresolved_reloc_p
= FALSE
;
12843 return bfd_reloc_ok
;
12845 case R_ARM_FUNCDESC
:
12849 struct fdpic_local
*local_fdpic_cnts
= elf32_arm_local_fdpic_cnts(input_bfd
);
12850 Elf_Internal_Rela outrel
;
12851 int dynindx
= elf_section_data (sym_sec
->output_section
)->dynindx
;
12852 int offset
= local_fdpic_cnts
[r_symndx
].funcdesc_offset
& ~1;
12853 bfd_vma addr
= dynreloc_value
- sym_sec
->output_section
->vma
;
12856 if (bfd_link_pic(info
) && dynindx
== 0)
12859 /* Replace static FUNCDESC relocation with a
12860 R_ARM_RELATIVE dynamic relocation or with a rofixup for
12862 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_RELATIVE
);
12863 outrel
.r_offset
= input_section
->output_section
->vma
12864 + input_section
->output_offset
+ rel
->r_offset
;
12865 outrel
.r_addend
= 0;
12866 if (bfd_link_pic(info
))
12867 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
12869 arm_elf_add_rofixup(output_bfd
, globals
->srofixup
, outrel
.r_offset
);
12871 bfd_put_32 (input_bfd
, sgot
->output_section
->vma
12872 + sgot
->output_offset
+ offset
, hit_data
);
12874 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12875 arm_elf_fill_funcdesc(output_bfd
, info
,
12876 &local_fdpic_cnts
[r_symndx
].funcdesc_offset
,
12877 dynindx
, offset
, addr
, dynreloc_value
, seg
);
12881 if (h
->dynindx
== -1)
12884 int offset
= eh
->fdpic_cnts
.funcdesc_offset
& ~1;
12887 Elf_Internal_Rela outrel
;
12889 /* For static binaries sym_sec can be null. */
12892 dynindx
= elf_section_data (sym_sec
->output_section
)->dynindx
;
12893 addr
= dynreloc_value
- sym_sec
->output_section
->vma
;
12901 if (bfd_link_pic(info
) && dynindx
== 0)
12904 /* Replace static FUNCDESC relocation with a
12905 R_ARM_RELATIVE dynamic relocation. */
12906 outrel
.r_info
= ELF32_R_INFO (0, R_ARM_RELATIVE
);
12907 outrel
.r_offset
= input_section
->output_section
->vma
12908 + input_section
->output_offset
+ rel
->r_offset
;
12909 outrel
.r_addend
= 0;
12910 if (bfd_link_pic(info
))
12911 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
12913 arm_elf_add_rofixup(output_bfd
, globals
->srofixup
, outrel
.r_offset
);
12915 bfd_put_32 (input_bfd
, sgot
->output_section
->vma
12916 + sgot
->output_offset
+ offset
, hit_data
);
12918 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12919 arm_elf_fill_funcdesc(output_bfd
, info
,
12920 &eh
->fdpic_cnts
.funcdesc_offset
,
12921 dynindx
, offset
, addr
, dynreloc_value
, seg
);
12925 Elf_Internal_Rela outrel
;
12927 /* Add a dynamic relocation. */
12928 outrel
.r_info
= ELF32_R_INFO (h
->dynindx
, R_ARM_FUNCDESC
);
12929 outrel
.r_offset
= input_section
->output_section
->vma
12930 + input_section
->output_offset
+ rel
->r_offset
;
12931 outrel
.r_addend
= 0;
12932 elf32_arm_add_dynreloc (output_bfd
, info
, srelgot
, &outrel
);
12936 *unresolved_reloc_p
= FALSE
;
12937 return bfd_reloc_ok
;
12939 case R_ARM_THM_BF16
:
12941 bfd_vma relocation
;
12942 bfd_vma upper_insn
= bfd_get_16 (input_bfd
, hit_data
);
12943 bfd_vma lower_insn
= bfd_get_16 (input_bfd
, hit_data
+ 2);
12945 if (globals
->use_rel
)
12947 bfd_vma immA
= (upper_insn
& 0x001f);
12948 bfd_vma immB
= (lower_insn
& 0x07fe) >> 1;
12949 bfd_vma immC
= (lower_insn
& 0x0800) >> 11;
12950 addend
= (immA
<< 12);
12951 addend
|= (immB
<< 2);
12952 addend
|= (immC
<< 1);
12955 signed_addend
= (addend
& 0x10000) ? addend
- (1 << 17) : addend
;
12958 relocation
= value
+ signed_addend
;
12959 relocation
-= (input_section
->output_section
->vma
12960 + input_section
->output_offset
12963 /* Put RELOCATION back into the insn. */
12965 bfd_vma immA
= (relocation
& 0x0001f000) >> 12;
12966 bfd_vma immB
= (relocation
& 0x00000ffc) >> 2;
12967 bfd_vma immC
= (relocation
& 0x00000002) >> 1;
12969 upper_insn
= (upper_insn
& 0xffe0) | immA
;
12970 lower_insn
= (lower_insn
& 0xf001) | (immC
<< 11) | (immB
<< 1);
12973 /* Put the relocated value back in the object file: */
12974 bfd_put_16 (input_bfd
, upper_insn
, hit_data
);
12975 bfd_put_16 (input_bfd
, lower_insn
, hit_data
+ 2);
12977 return bfd_reloc_ok
;
12980 case R_ARM_THM_BF12
:
12982 bfd_vma relocation
;
12983 bfd_vma upper_insn
= bfd_get_16 (input_bfd
, hit_data
);
12984 bfd_vma lower_insn
= bfd_get_16 (input_bfd
, hit_data
+ 2);
12986 if (globals
->use_rel
)
12988 bfd_vma immA
= (upper_insn
& 0x0001);
12989 bfd_vma immB
= (lower_insn
& 0x07fe) >> 1;
12990 bfd_vma immC
= (lower_insn
& 0x0800) >> 11;
12991 addend
= (immA
<< 12);
12992 addend
|= (immB
<< 2);
12993 addend
|= (immC
<< 1);
12996 addend
= (addend
& 0x1000) ? addend
- (1 << 13) : addend
;
12997 signed_addend
= addend
;
13000 relocation
= value
+ signed_addend
;
13001 relocation
-= (input_section
->output_section
->vma
13002 + input_section
->output_offset
13005 /* Put RELOCATION back into the insn. */
13007 bfd_vma immA
= (relocation
& 0x00001000) >> 12;
13008 bfd_vma immB
= (relocation
& 0x00000ffc) >> 2;
13009 bfd_vma immC
= (relocation
& 0x00000002) >> 1;
13011 upper_insn
= (upper_insn
& 0xfffe) | immA
;
13012 lower_insn
= (lower_insn
& 0xf001) | (immC
<< 11) | (immB
<< 1);
13015 /* Put the relocated value back in the object file: */
13016 bfd_put_16 (input_bfd
, upper_insn
, hit_data
);
13017 bfd_put_16 (input_bfd
, lower_insn
, hit_data
+ 2);
13019 return bfd_reloc_ok
;
13022 case R_ARM_THM_BF18
:
13024 bfd_vma relocation
;
13025 bfd_vma upper_insn
= bfd_get_16 (input_bfd
, hit_data
);
13026 bfd_vma lower_insn
= bfd_get_16 (input_bfd
, hit_data
+ 2);
13028 if (globals
->use_rel
)
13030 bfd_vma immA
= (upper_insn
& 0x007f);
13031 bfd_vma immB
= (lower_insn
& 0x07fe) >> 1;
13032 bfd_vma immC
= (lower_insn
& 0x0800) >> 11;
13033 addend
= (immA
<< 12);
13034 addend
|= (immB
<< 2);
13035 addend
|= (immC
<< 1);
13038 addend
= (addend
& 0x40000) ? addend
- (1 << 19) : addend
;
13039 signed_addend
= addend
;
13042 relocation
= value
+ signed_addend
;
13043 relocation
-= (input_section
->output_section
->vma
13044 + input_section
->output_offset
13047 /* Put RELOCATION back into the insn. */
13049 bfd_vma immA
= (relocation
& 0x0007f000) >> 12;
13050 bfd_vma immB
= (relocation
& 0x00000ffc) >> 2;
13051 bfd_vma immC
= (relocation
& 0x00000002) >> 1;
13053 upper_insn
= (upper_insn
& 0xff80) | immA
;
13054 lower_insn
= (lower_insn
& 0xf001) | (immC
<< 11) | (immB
<< 1);
13057 /* Put the relocated value back in the object file: */
13058 bfd_put_16 (input_bfd
, upper_insn
, hit_data
);
13059 bfd_put_16 (input_bfd
, lower_insn
, hit_data
+ 2);
13061 return bfd_reloc_ok
;
13065 return bfd_reloc_notsupported
;
13069 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
13071 arm_add_to_rel (bfd
* abfd
,
13072 bfd_byte
* address
,
13073 reloc_howto_type
* howto
,
13074 bfd_signed_vma increment
)
13076 bfd_signed_vma addend
;
13078 if (howto
->type
== R_ARM_THM_CALL
13079 || howto
->type
== R_ARM_THM_JUMP24
)
13081 int upper_insn
, lower_insn
;
13084 upper_insn
= bfd_get_16 (abfd
, address
);
13085 lower_insn
= bfd_get_16 (abfd
, address
+ 2);
13086 upper
= upper_insn
& 0x7ff;
13087 lower
= lower_insn
& 0x7ff;
13089 addend
= (upper
<< 12) | (lower
<< 1);
13090 addend
+= increment
;
13093 upper_insn
= (upper_insn
& 0xf800) | ((addend
>> 11) & 0x7ff);
13094 lower_insn
= (lower_insn
& 0xf800) | (addend
& 0x7ff);
13096 bfd_put_16 (abfd
, (bfd_vma
) upper_insn
, address
);
13097 bfd_put_16 (abfd
, (bfd_vma
) lower_insn
, address
+ 2);
13103 contents
= bfd_get_32 (abfd
, address
);
13105 /* Get the (signed) value from the instruction. */
13106 addend
= contents
& howto
->src_mask
;
13107 if (addend
& ((howto
->src_mask
+ 1) >> 1))
13109 bfd_signed_vma mask
;
13112 mask
&= ~ howto
->src_mask
;
13116 /* Add in the increment, (which is a byte value). */
13117 switch (howto
->type
)
13120 addend
+= increment
;
13127 addend
<<= howto
->size
;
13128 addend
+= increment
;
13130 /* Should we check for overflow here ? */
13132 /* Drop any undesired bits. */
13133 addend
>>= howto
->rightshift
;
13137 contents
= (contents
& ~ howto
->dst_mask
) | (addend
& howto
->dst_mask
);
13139 bfd_put_32 (abfd
, contents
, address
);
13143 #define IS_ARM_TLS_RELOC(R_TYPE) \
13144 ((R_TYPE) == R_ARM_TLS_GD32 \
13145 || (R_TYPE) == R_ARM_TLS_GD32_FDPIC \
13146 || (R_TYPE) == R_ARM_TLS_LDO32 \
13147 || (R_TYPE) == R_ARM_TLS_LDM32 \
13148 || (R_TYPE) == R_ARM_TLS_LDM32_FDPIC \
13149 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
13150 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
13151 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
13152 || (R_TYPE) == R_ARM_TLS_LE32 \
13153 || (R_TYPE) == R_ARM_TLS_IE32 \
13154 || (R_TYPE) == R_ARM_TLS_IE32_FDPIC \
13155 || IS_ARM_TLS_GNU_RELOC (R_TYPE))
13157 /* Specific set of relocations for the gnu tls dialect. */
13158 #define IS_ARM_TLS_GNU_RELOC(R_TYPE) \
13159 ((R_TYPE) == R_ARM_TLS_GOTDESC \
13160 || (R_TYPE) == R_ARM_TLS_CALL \
13161 || (R_TYPE) == R_ARM_THM_TLS_CALL \
13162 || (R_TYPE) == R_ARM_TLS_DESCSEQ \
13163 || (R_TYPE) == R_ARM_THM_TLS_DESCSEQ)
13165 /* Relocate an ARM ELF section. */
13168 elf32_arm_relocate_section (bfd
* output_bfd
,
13169 struct bfd_link_info
* info
,
13171 asection
* input_section
,
13172 bfd_byte
* contents
,
13173 Elf_Internal_Rela
* relocs
,
13174 Elf_Internal_Sym
* local_syms
,
13175 asection
** local_sections
)
13177 Elf_Internal_Shdr
*symtab_hdr
;
13178 struct elf_link_hash_entry
**sym_hashes
;
13179 Elf_Internal_Rela
*rel
;
13180 Elf_Internal_Rela
*relend
;
13182 struct elf32_arm_link_hash_table
* globals
;
13184 globals
= elf32_arm_hash_table (info
);
13185 if (globals
== NULL
)
13188 symtab_hdr
= & elf_symtab_hdr (input_bfd
);
13189 sym_hashes
= elf_sym_hashes (input_bfd
);
13192 relend
= relocs
+ input_section
->reloc_count
;
13193 for (; rel
< relend
; rel
++)
13196 reloc_howto_type
* howto
;
13197 unsigned long r_symndx
;
13198 Elf_Internal_Sym
* sym
;
13200 struct elf_link_hash_entry
* h
;
13201 bfd_vma relocation
;
13202 bfd_reloc_status_type r
;
13205 bfd_boolean unresolved_reloc
= FALSE
;
13206 char *error_message
= NULL
;
13208 r_symndx
= ELF32_R_SYM (rel
->r_info
);
13209 r_type
= ELF32_R_TYPE (rel
->r_info
);
13210 r_type
= arm_real_reloc_type (globals
, r_type
);
13212 if ( r_type
== R_ARM_GNU_VTENTRY
13213 || r_type
== R_ARM_GNU_VTINHERIT
)
13216 howto
= bfd_reloc
.howto
= elf32_arm_howto_from_type (r_type
);
13219 return _bfd_unrecognized_reloc (input_bfd
, input_section
, r_type
);
13225 if (r_symndx
< symtab_hdr
->sh_info
)
13227 sym
= local_syms
+ r_symndx
;
13228 sym_type
= ELF32_ST_TYPE (sym
->st_info
);
13229 sec
= local_sections
[r_symndx
];
13231 /* An object file might have a reference to a local
13232 undefined symbol. This is a daft object file, but we
13233 should at least do something about it. V4BX & NONE
13234 relocations do not use the symbol and are explicitly
13235 allowed to use the undefined symbol, so allow those.
13236 Likewise for relocations against STN_UNDEF. */
13237 if (r_type
!= R_ARM_V4BX
13238 && r_type
!= R_ARM_NONE
13239 && r_symndx
!= STN_UNDEF
13240 && bfd_is_und_section (sec
)
13241 && ELF_ST_BIND (sym
->st_info
) != STB_WEAK
)
13242 (*info
->callbacks
->undefined_symbol
)
13243 (info
, bfd_elf_string_from_elf_section
13244 (input_bfd
, symtab_hdr
->sh_link
, sym
->st_name
),
13245 input_bfd
, input_section
,
13246 rel
->r_offset
, TRUE
);
13248 if (globals
->use_rel
)
13250 relocation
= (sec
->output_section
->vma
13251 + sec
->output_offset
13253 if (!bfd_link_relocatable (info
)
13254 && (sec
->flags
& SEC_MERGE
)
13255 && ELF_ST_TYPE (sym
->st_info
) == STT_SECTION
)
13258 bfd_vma addend
, value
;
13262 case R_ARM_MOVW_ABS_NC
:
13263 case R_ARM_MOVT_ABS
:
13264 value
= bfd_get_32 (input_bfd
, contents
+ rel
->r_offset
);
13265 addend
= ((value
& 0xf0000) >> 4) | (value
& 0xfff);
13266 addend
= (addend
^ 0x8000) - 0x8000;
13269 case R_ARM_THM_MOVW_ABS_NC
:
13270 case R_ARM_THM_MOVT_ABS
:
13271 value
= bfd_get_16 (input_bfd
, contents
+ rel
->r_offset
)
13273 value
|= bfd_get_16 (input_bfd
,
13274 contents
+ rel
->r_offset
+ 2);
13275 addend
= ((value
& 0xf7000) >> 4) | (value
& 0xff)
13276 | ((value
& 0x04000000) >> 15);
13277 addend
= (addend
^ 0x8000) - 0x8000;
13281 if (howto
->rightshift
13282 || (howto
->src_mask
& (howto
->src_mask
+ 1)))
13285 /* xgettext:c-format */
13286 (_("%pB(%pA+%#" PRIx64
"): "
13287 "%s relocation against SEC_MERGE section"),
13288 input_bfd
, input_section
,
13289 (uint64_t) rel
->r_offset
, howto
->name
);
13293 value
= bfd_get_32 (input_bfd
, contents
+ rel
->r_offset
);
13295 /* Get the (signed) value from the instruction. */
13296 addend
= value
& howto
->src_mask
;
13297 if (addend
& ((howto
->src_mask
+ 1) >> 1))
13299 bfd_signed_vma mask
;
13302 mask
&= ~ howto
->src_mask
;
13310 _bfd_elf_rel_local_sym (output_bfd
, sym
, &msec
, addend
)
13312 addend
+= msec
->output_section
->vma
+ msec
->output_offset
;
13314 /* Cases here must match those in the preceding
13315 switch statement. */
13318 case R_ARM_MOVW_ABS_NC
:
13319 case R_ARM_MOVT_ABS
:
13320 value
= (value
& 0xfff0f000) | ((addend
& 0xf000) << 4)
13321 | (addend
& 0xfff);
13322 bfd_put_32 (input_bfd
, value
, contents
+ rel
->r_offset
);
13325 case R_ARM_THM_MOVW_ABS_NC
:
13326 case R_ARM_THM_MOVT_ABS
:
13327 value
= (value
& 0xfbf08f00) | ((addend
& 0xf700) << 4)
13328 | (addend
& 0xff) | ((addend
& 0x0800) << 15);
13329 bfd_put_16 (input_bfd
, value
>> 16,
13330 contents
+ rel
->r_offset
);
13331 bfd_put_16 (input_bfd
, value
,
13332 contents
+ rel
->r_offset
+ 2);
13336 value
= (value
& ~ howto
->dst_mask
)
13337 | (addend
& howto
->dst_mask
);
13338 bfd_put_32 (input_bfd
, value
, contents
+ rel
->r_offset
);
13344 relocation
= _bfd_elf_rela_local_sym (output_bfd
, sym
, &sec
, rel
);
13348 bfd_boolean warned
, ignored
;
13350 RELOC_FOR_GLOBAL_SYMBOL (info
, input_bfd
, input_section
, rel
,
13351 r_symndx
, symtab_hdr
, sym_hashes
,
13352 h
, sec
, relocation
,
13353 unresolved_reloc
, warned
, ignored
);
13355 sym_type
= h
->type
;
13358 if (sec
!= NULL
&& discarded_section (sec
))
13359 RELOC_AGAINST_DISCARDED_SECTION (info
, input_bfd
, input_section
,
13360 rel
, 1, relend
, howto
, 0, contents
);
13362 if (bfd_link_relocatable (info
))
13364 /* This is a relocatable link. We don't have to change
13365 anything, unless the reloc is against a section symbol,
13366 in which case we have to adjust according to where the
13367 section symbol winds up in the output section. */
13368 if (sym
!= NULL
&& ELF_ST_TYPE (sym
->st_info
) == STT_SECTION
)
13370 if (globals
->use_rel
)
13371 arm_add_to_rel (input_bfd
, contents
+ rel
->r_offset
,
13372 howto
, (bfd_signed_vma
) sec
->output_offset
);
13374 rel
->r_addend
+= sec
->output_offset
;
13380 name
= h
->root
.root
.string
;
13383 name
= (bfd_elf_string_from_elf_section
13384 (input_bfd
, symtab_hdr
->sh_link
, sym
->st_name
));
13385 if (name
== NULL
|| *name
== '\0')
13386 name
= bfd_section_name (sec
);
13389 if (r_symndx
!= STN_UNDEF
13390 && r_type
!= R_ARM_NONE
13392 || h
->root
.type
== bfd_link_hash_defined
13393 || h
->root
.type
== bfd_link_hash_defweak
)
13394 && IS_ARM_TLS_RELOC (r_type
) != (sym_type
== STT_TLS
))
13397 ((sym_type
== STT_TLS
13398 /* xgettext:c-format */
13399 ? _("%pB(%pA+%#" PRIx64
"): %s used with TLS symbol %s")
13400 /* xgettext:c-format */
13401 : _("%pB(%pA+%#" PRIx64
"): %s used with non-TLS symbol %s")),
13404 (uint64_t) rel
->r_offset
,
13409 /* We call elf32_arm_final_link_relocate unless we're completely
13410 done, i.e., the relaxation produced the final output we want,
13411 and we won't let anybody mess with it. Also, we have to do
13412 addend adjustments in case of a R_ARM_TLS_GOTDESC relocation
13413 both in relaxed and non-relaxed cases. */
13414 if ((elf32_arm_tls_transition (info
, r_type
, h
) != (unsigned)r_type
)
13415 || (IS_ARM_TLS_GNU_RELOC (r_type
)
13416 && !((h
? elf32_arm_hash_entry (h
)->tls_type
:
13417 elf32_arm_local_got_tls_type (input_bfd
)[r_symndx
])
13420 r
= elf32_arm_tls_relax (globals
, input_bfd
, input_section
,
13421 contents
, rel
, h
== NULL
);
13422 /* This may have been marked unresolved because it came from
13423 a shared library. But we've just dealt with that. */
13424 unresolved_reloc
= 0;
13427 r
= bfd_reloc_continue
;
13429 if (r
== bfd_reloc_continue
)
13431 unsigned char branch_type
=
13432 h
? ARM_GET_SYM_BRANCH_TYPE (h
->target_internal
)
13433 : ARM_GET_SYM_BRANCH_TYPE (sym
->st_target_internal
);
13435 r
= elf32_arm_final_link_relocate (howto
, input_bfd
, output_bfd
,
13436 input_section
, contents
, rel
,
13437 relocation
, info
, sec
, name
,
13438 sym_type
, branch_type
, h
,
13443 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
13444 because such sections are not SEC_ALLOC and thus ld.so will
13445 not process them. */
13446 if (unresolved_reloc
13447 && !((input_section
->flags
& SEC_DEBUGGING
) != 0
13449 && _bfd_elf_section_offset (output_bfd
, info
, input_section
,
13450 rel
->r_offset
) != (bfd_vma
) -1)
13453 /* xgettext:c-format */
13454 (_("%pB(%pA+%#" PRIx64
"): "
13455 "unresolvable %s relocation against symbol `%s'"),
13458 (uint64_t) rel
->r_offset
,
13460 h
->root
.root
.string
);
13464 if (r
!= bfd_reloc_ok
)
13468 case bfd_reloc_overflow
:
13469 /* If the overflowing reloc was to an undefined symbol,
13470 we have already printed one error message and there
13471 is no point complaining again. */
13472 if (!h
|| h
->root
.type
!= bfd_link_hash_undefined
)
13473 (*info
->callbacks
->reloc_overflow
)
13474 (info
, (h
? &h
->root
: NULL
), name
, howto
->name
,
13475 (bfd_vma
) 0, input_bfd
, input_section
, rel
->r_offset
);
13478 case bfd_reloc_undefined
:
13479 (*info
->callbacks
->undefined_symbol
)
13480 (info
, name
, input_bfd
, input_section
, rel
->r_offset
, TRUE
);
13483 case bfd_reloc_outofrange
:
13484 error_message
= _("out of range");
13487 case bfd_reloc_notsupported
:
13488 error_message
= _("unsupported relocation");
13491 case bfd_reloc_dangerous
:
13492 /* error_message should already be set. */
13496 error_message
= _("unknown error");
13497 /* Fall through. */
13500 BFD_ASSERT (error_message
!= NULL
);
13501 (*info
->callbacks
->reloc_dangerous
)
13502 (info
, error_message
, input_bfd
, input_section
, rel
->r_offset
);
13511 /* Add a new unwind edit to the list described by HEAD, TAIL. If TINDEX is zero,
13512 adds the edit to the start of the list. (The list must be built in order of
13513 ascending TINDEX: the function's callers are primarily responsible for
13514 maintaining that condition). */
13517 add_unwind_table_edit (arm_unwind_table_edit
**head
,
13518 arm_unwind_table_edit
**tail
,
13519 arm_unwind_edit_type type
,
13520 asection
*linked_section
,
13521 unsigned int tindex
)
13523 arm_unwind_table_edit
*new_edit
= (arm_unwind_table_edit
*)
13524 xmalloc (sizeof (arm_unwind_table_edit
));
13526 new_edit
->type
= type
;
13527 new_edit
->linked_section
= linked_section
;
13528 new_edit
->index
= tindex
;
13532 new_edit
->next
= NULL
;
13535 (*tail
)->next
= new_edit
;
13537 (*tail
) = new_edit
;
13540 (*head
) = new_edit
;
13544 new_edit
->next
= *head
;
13553 static _arm_elf_section_data
*get_arm_elf_section_data (asection
*);
13555 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
13557 adjust_exidx_size(asection
*exidx_sec
, int adjust
)
13561 if (!exidx_sec
->rawsize
)
13562 exidx_sec
->rawsize
= exidx_sec
->size
;
13564 bfd_set_section_size (exidx_sec
, exidx_sec
->size
+ adjust
);
13565 out_sec
= exidx_sec
->output_section
;
13566 /* Adjust size of output section. */
13567 bfd_set_section_size (out_sec
, out_sec
->size
+adjust
);
13570 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
13572 insert_cantunwind_after(asection
*text_sec
, asection
*exidx_sec
)
13574 struct _arm_elf_section_data
*exidx_arm_data
;
13576 exidx_arm_data
= get_arm_elf_section_data (exidx_sec
);
13577 add_unwind_table_edit (
13578 &exidx_arm_data
->u
.exidx
.unwind_edit_list
,
13579 &exidx_arm_data
->u
.exidx
.unwind_edit_tail
,
13580 INSERT_EXIDX_CANTUNWIND_AT_END
, text_sec
, UINT_MAX
);
13582 exidx_arm_data
->additional_reloc_count
++;
13584 adjust_exidx_size(exidx_sec
, 8);
13587 /* Scan .ARM.exidx tables, and create a list describing edits which should be
13588 made to those tables, such that:
13590 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
13591 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
13592 codes which have been inlined into the index).
13594 If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged.
13596 The edits are applied when the tables are written
13597 (in elf32_arm_write_section). */
13600 elf32_arm_fix_exidx_coverage (asection
**text_section_order
,
13601 unsigned int num_text_sections
,
13602 struct bfd_link_info
*info
,
13603 bfd_boolean merge_exidx_entries
)
13606 unsigned int last_second_word
= 0, i
;
13607 asection
*last_exidx_sec
= NULL
;
13608 asection
*last_text_sec
= NULL
;
13609 int last_unwind_type
= -1;
13611 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
13613 for (inp
= info
->input_bfds
; inp
!= NULL
; inp
= inp
->link
.next
)
13617 for (sec
= inp
->sections
; sec
!= NULL
; sec
= sec
->next
)
13619 struct bfd_elf_section_data
*elf_sec
= elf_section_data (sec
);
13620 Elf_Internal_Shdr
*hdr
= &elf_sec
->this_hdr
;
13622 if (!hdr
|| hdr
->sh_type
!= SHT_ARM_EXIDX
)
13625 if (elf_sec
->linked_to
)
13627 Elf_Internal_Shdr
*linked_hdr
13628 = &elf_section_data (elf_sec
->linked_to
)->this_hdr
;
13629 struct _arm_elf_section_data
*linked_sec_arm_data
13630 = get_arm_elf_section_data (linked_hdr
->bfd_section
);
13632 if (linked_sec_arm_data
== NULL
)
13635 /* Link this .ARM.exidx section back from the text section it
13637 linked_sec_arm_data
->u
.text
.arm_exidx_sec
= sec
;
13642 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
13643 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
13644 and add EXIDX_CANTUNWIND entries for sections with no unwind table data. */
13646 for (i
= 0; i
< num_text_sections
; i
++)
13648 asection
*sec
= text_section_order
[i
];
13649 asection
*exidx_sec
;
13650 struct _arm_elf_section_data
*arm_data
= get_arm_elf_section_data (sec
);
13651 struct _arm_elf_section_data
*exidx_arm_data
;
13652 bfd_byte
*contents
= NULL
;
13653 int deleted_exidx_bytes
= 0;
13655 arm_unwind_table_edit
*unwind_edit_head
= NULL
;
13656 arm_unwind_table_edit
*unwind_edit_tail
= NULL
;
13657 Elf_Internal_Shdr
*hdr
;
13660 if (arm_data
== NULL
)
13663 exidx_sec
= arm_data
->u
.text
.arm_exidx_sec
;
13664 if (exidx_sec
== NULL
)
13666 /* Section has no unwind data. */
13667 if (last_unwind_type
== 0 || !last_exidx_sec
)
13670 /* Ignore zero sized sections. */
13671 if (sec
->size
== 0)
13674 insert_cantunwind_after(last_text_sec
, last_exidx_sec
);
13675 last_unwind_type
= 0;
13679 /* Skip /DISCARD/ sections. */
13680 if (bfd_is_abs_section (exidx_sec
->output_section
))
13683 hdr
= &elf_section_data (exidx_sec
)->this_hdr
;
13684 if (hdr
->sh_type
!= SHT_ARM_EXIDX
)
13687 exidx_arm_data
= get_arm_elf_section_data (exidx_sec
);
13688 if (exidx_arm_data
== NULL
)
13691 ibfd
= exidx_sec
->owner
;
13693 if (hdr
->contents
!= NULL
)
13694 contents
= hdr
->contents
;
13695 else if (! bfd_malloc_and_get_section (ibfd
, exidx_sec
, &contents
))
13699 if (last_unwind_type
> 0)
13701 unsigned int first_word
= bfd_get_32 (ibfd
, contents
);
13702 /* Add cantunwind if first unwind item does not match section
13704 if (first_word
!= sec
->vma
)
13706 insert_cantunwind_after (last_text_sec
, last_exidx_sec
);
13707 last_unwind_type
= 0;
13711 for (j
= 0; j
< hdr
->sh_size
; j
+= 8)
13713 unsigned int second_word
= bfd_get_32 (ibfd
, contents
+ j
+ 4);
13717 /* An EXIDX_CANTUNWIND entry. */
13718 if (second_word
== 1)
13720 if (last_unwind_type
== 0)
13724 /* Inlined unwinding data. Merge if equal to previous. */
13725 else if ((second_word
& 0x80000000) != 0)
13727 if (merge_exidx_entries
13728 && last_second_word
== second_word
&& last_unwind_type
== 1)
13731 last_second_word
= second_word
;
13733 /* Normal table entry. In theory we could merge these too,
13734 but duplicate entries are likely to be much less common. */
13738 if (elide
&& !bfd_link_relocatable (info
))
13740 add_unwind_table_edit (&unwind_edit_head
, &unwind_edit_tail
,
13741 DELETE_EXIDX_ENTRY
, NULL
, j
/ 8);
13743 deleted_exidx_bytes
+= 8;
13746 last_unwind_type
= unwind_type
;
13749 /* Free contents if we allocated it ourselves. */
13750 if (contents
!= hdr
->contents
)
13753 /* Record edits to be applied later (in elf32_arm_write_section). */
13754 exidx_arm_data
->u
.exidx
.unwind_edit_list
= unwind_edit_head
;
13755 exidx_arm_data
->u
.exidx
.unwind_edit_tail
= unwind_edit_tail
;
13757 if (deleted_exidx_bytes
> 0)
13758 adjust_exidx_size(exidx_sec
, -deleted_exidx_bytes
);
13760 last_exidx_sec
= exidx_sec
;
13761 last_text_sec
= sec
;
13764 /* Add terminating CANTUNWIND entry. */
13765 if (!bfd_link_relocatable (info
) && last_exidx_sec
13766 && last_unwind_type
!= 0)
13767 insert_cantunwind_after(last_text_sec
, last_exidx_sec
);
13773 elf32_arm_output_glue_section (struct bfd_link_info
*info
, bfd
*obfd
,
13774 bfd
*ibfd
, const char *name
)
13776 asection
*sec
, *osec
;
13778 sec
= bfd_get_linker_section (ibfd
, name
);
13779 if (sec
== NULL
|| (sec
->flags
& SEC_EXCLUDE
) != 0)
13782 osec
= sec
->output_section
;
13783 if (elf32_arm_write_section (obfd
, info
, sec
, sec
->contents
))
13786 if (! bfd_set_section_contents (obfd
, osec
, sec
->contents
,
13787 sec
->output_offset
, sec
->size
))
13794 elf32_arm_final_link (bfd
*abfd
, struct bfd_link_info
*info
)
13796 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (info
);
13797 asection
*sec
, *osec
;
13799 if (globals
== NULL
)
13802 /* Invoke the regular ELF backend linker to do all the work. */
13803 if (!bfd_elf_final_link (abfd
, info
))
13806 /* Process stub sections (eg BE8 encoding, ...). */
13807 struct elf32_arm_link_hash_table
*htab
= elf32_arm_hash_table (info
);
13809 for (i
=0; i
<htab
->top_id
; i
++)
13811 sec
= htab
->stub_group
[i
].stub_sec
;
13812 /* Only process it once, in its link_sec slot. */
13813 if (sec
&& i
== htab
->stub_group
[i
].link_sec
->id
)
13815 osec
= sec
->output_section
;
13816 elf32_arm_write_section (abfd
, info
, sec
, sec
->contents
);
13817 if (! bfd_set_section_contents (abfd
, osec
, sec
->contents
,
13818 sec
->output_offset
, sec
->size
))
13823 /* Write out any glue sections now that we have created all the
13825 if (globals
->bfd_of_glue_owner
!= NULL
)
13827 if (! elf32_arm_output_glue_section (info
, abfd
,
13828 globals
->bfd_of_glue_owner
,
13829 ARM2THUMB_GLUE_SECTION_NAME
))
13832 if (! elf32_arm_output_glue_section (info
, abfd
,
13833 globals
->bfd_of_glue_owner
,
13834 THUMB2ARM_GLUE_SECTION_NAME
))
13837 if (! elf32_arm_output_glue_section (info
, abfd
,
13838 globals
->bfd_of_glue_owner
,
13839 VFP11_ERRATUM_VENEER_SECTION_NAME
))
13842 if (! elf32_arm_output_glue_section (info
, abfd
,
13843 globals
->bfd_of_glue_owner
,
13844 STM32L4XX_ERRATUM_VENEER_SECTION_NAME
))
13847 if (! elf32_arm_output_glue_section (info
, abfd
,
13848 globals
->bfd_of_glue_owner
,
13849 ARM_BX_GLUE_SECTION_NAME
))
13856 /* Return a best guess for the machine number based on the attributes. */
13858 static unsigned int
13859 bfd_arm_get_mach_from_attributes (bfd
* abfd
)
13861 int arch
= bfd_elf_get_obj_attr_int (abfd
, OBJ_ATTR_PROC
, Tag_CPU_arch
);
13865 case TAG_CPU_ARCH_PRE_V4
: return bfd_mach_arm_3M
;
13866 case TAG_CPU_ARCH_V4
: return bfd_mach_arm_4
;
13867 case TAG_CPU_ARCH_V4T
: return bfd_mach_arm_4T
;
13868 case TAG_CPU_ARCH_V5T
: return bfd_mach_arm_5T
;
13870 case TAG_CPU_ARCH_V5TE
:
13874 BFD_ASSERT (Tag_CPU_name
< NUM_KNOWN_OBJ_ATTRIBUTES
);
13875 name
= elf_known_obj_attributes (abfd
) [OBJ_ATTR_PROC
][Tag_CPU_name
].s
;
13879 if (strcmp (name
, "IWMMXT2") == 0)
13880 return bfd_mach_arm_iWMMXt2
;
13882 if (strcmp (name
, "IWMMXT") == 0)
13883 return bfd_mach_arm_iWMMXt
;
13885 if (strcmp (name
, "XSCALE") == 0)
13889 BFD_ASSERT (Tag_WMMX_arch
< NUM_KNOWN_OBJ_ATTRIBUTES
);
13890 wmmx
= elf_known_obj_attributes (abfd
) [OBJ_ATTR_PROC
][Tag_WMMX_arch
].i
;
13893 case 1: return bfd_mach_arm_iWMMXt
;
13894 case 2: return bfd_mach_arm_iWMMXt2
;
13895 default: return bfd_mach_arm_XScale
;
13900 return bfd_mach_arm_5TE
;
13903 case TAG_CPU_ARCH_V5TEJ
:
13904 return bfd_mach_arm_5TEJ
;
13905 case TAG_CPU_ARCH_V6
:
13906 return bfd_mach_arm_6
;
13907 case TAG_CPU_ARCH_V6KZ
:
13908 return bfd_mach_arm_6KZ
;
13909 case TAG_CPU_ARCH_V6T2
:
13910 return bfd_mach_arm_6T2
;
13911 case TAG_CPU_ARCH_V6K
:
13912 return bfd_mach_arm_6K
;
13913 case TAG_CPU_ARCH_V7
:
13914 return bfd_mach_arm_7
;
13915 case TAG_CPU_ARCH_V6_M
:
13916 return bfd_mach_arm_6M
;
13917 case TAG_CPU_ARCH_V6S_M
:
13918 return bfd_mach_arm_6SM
;
13919 case TAG_CPU_ARCH_V7E_M
:
13920 return bfd_mach_arm_7EM
;
13921 case TAG_CPU_ARCH_V8
:
13922 return bfd_mach_arm_8
;
13923 case TAG_CPU_ARCH_V8R
:
13924 return bfd_mach_arm_8R
;
13925 case TAG_CPU_ARCH_V8M_BASE
:
13926 return bfd_mach_arm_8M_BASE
;
13927 case TAG_CPU_ARCH_V8M_MAIN
:
13928 return bfd_mach_arm_8M_MAIN
;
13929 case TAG_CPU_ARCH_V8_1M_MAIN
:
13930 return bfd_mach_arm_8_1M_MAIN
;
13933 /* Force entry to be added for any new known Tag_CPU_arch value. */
13934 BFD_ASSERT (arch
> MAX_TAG_CPU_ARCH
);
13936 /* Unknown Tag_CPU_arch value. */
13937 return bfd_mach_arm_unknown
;
13941 /* Set the right machine number. */
13944 elf32_arm_object_p (bfd
*abfd
)
13948 mach
= bfd_arm_get_mach_from_notes (abfd
, ARM_NOTE_SECTION
);
13950 if (mach
== bfd_mach_arm_unknown
)
13952 if (elf_elfheader (abfd
)->e_flags
& EF_ARM_MAVERICK_FLOAT
)
13953 mach
= bfd_mach_arm_ep9312
;
13955 mach
= bfd_arm_get_mach_from_attributes (abfd
);
13958 bfd_default_set_arch_mach (abfd
, bfd_arch_arm
, mach
);
13962 /* Function to keep ARM specific flags in the ELF header. */
13965 elf32_arm_set_private_flags (bfd
*abfd
, flagword flags
)
13967 if (elf_flags_init (abfd
)
13968 && elf_elfheader (abfd
)->e_flags
!= flags
)
13970 if (EF_ARM_EABI_VERSION (flags
) == EF_ARM_EABI_UNKNOWN
)
13972 if (flags
& EF_ARM_INTERWORK
)
13974 (_("warning: not setting interworking flag of %pB since it has already been specified as non-interworking"),
13978 (_("warning: clearing the interworking flag of %pB due to outside request"),
13984 elf_elfheader (abfd
)->e_flags
= flags
;
13985 elf_flags_init (abfd
) = TRUE
;
13991 /* Copy backend specific data from one object module to another. */
13994 elf32_arm_copy_private_bfd_data (bfd
*ibfd
, bfd
*obfd
)
13997 flagword out_flags
;
13999 if (! is_arm_elf (ibfd
) || ! is_arm_elf (obfd
))
14002 in_flags
= elf_elfheader (ibfd
)->e_flags
;
14003 out_flags
= elf_elfheader (obfd
)->e_flags
;
14005 if (elf_flags_init (obfd
)
14006 && EF_ARM_EABI_VERSION (out_flags
) == EF_ARM_EABI_UNKNOWN
14007 && in_flags
!= out_flags
)
14009 /* Cannot mix APCS26 and APCS32 code. */
14010 if ((in_flags
& EF_ARM_APCS_26
) != (out_flags
& EF_ARM_APCS_26
))
14013 /* Cannot mix float APCS and non-float APCS code. */
14014 if ((in_flags
& EF_ARM_APCS_FLOAT
) != (out_flags
& EF_ARM_APCS_FLOAT
))
14017 /* If the src and dest have different interworking flags
14018 then turn off the interworking bit. */
14019 if ((in_flags
& EF_ARM_INTERWORK
) != (out_flags
& EF_ARM_INTERWORK
))
14021 if (out_flags
& EF_ARM_INTERWORK
)
14023 (_("warning: clearing the interworking flag of %pB because non-interworking code in %pB has been linked with it"),
14026 in_flags
&= ~EF_ARM_INTERWORK
;
14029 /* Likewise for PIC, though don't warn for this case. */
14030 if ((in_flags
& EF_ARM_PIC
) != (out_flags
& EF_ARM_PIC
))
14031 in_flags
&= ~EF_ARM_PIC
;
14034 elf_elfheader (obfd
)->e_flags
= in_flags
;
14035 elf_flags_init (obfd
) = TRUE
;
14037 return _bfd_elf_copy_private_bfd_data (ibfd
, obfd
);
14040 /* Values for Tag_ABI_PCS_R9_use. */
14049 /* Values for Tag_ABI_PCS_RW_data. */
14052 AEABI_PCS_RW_data_absolute
,
14053 AEABI_PCS_RW_data_PCrel
,
14054 AEABI_PCS_RW_data_SBrel
,
14055 AEABI_PCS_RW_data_unused
14058 /* Values for Tag_ABI_enum_size. */
14064 AEABI_enum_forced_wide
14067 /* Determine whether an object attribute tag takes an integer, a
14071 elf32_arm_obj_attrs_arg_type (int tag
)
14073 if (tag
== Tag_compatibility
)
14074 return ATTR_TYPE_FLAG_INT_VAL
| ATTR_TYPE_FLAG_STR_VAL
;
14075 else if (tag
== Tag_nodefaults
)
14076 return ATTR_TYPE_FLAG_INT_VAL
| ATTR_TYPE_FLAG_NO_DEFAULT
;
14077 else if (tag
== Tag_CPU_raw_name
|| tag
== Tag_CPU_name
)
14078 return ATTR_TYPE_FLAG_STR_VAL
;
14080 return ATTR_TYPE_FLAG_INT_VAL
;
14082 return (tag
& 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL
: ATTR_TYPE_FLAG_INT_VAL
;
14085 /* The ABI defines that Tag_conformance should be emitted first, and that
14086 Tag_nodefaults should be second (if either is defined). This sets those
14087 two positions, and bumps up the position of all the remaining tags to
14090 elf32_arm_obj_attrs_order (int num
)
14092 if (num
== LEAST_KNOWN_OBJ_ATTRIBUTE
)
14093 return Tag_conformance
;
14094 if (num
== LEAST_KNOWN_OBJ_ATTRIBUTE
+ 1)
14095 return Tag_nodefaults
;
14096 if ((num
- 2) < Tag_nodefaults
)
14098 if ((num
- 1) < Tag_conformance
)
14103 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
14105 elf32_arm_obj_attrs_handle_unknown (bfd
*abfd
, int tag
)
14107 if ((tag
& 127) < 64)
14110 (_("%pB: unknown mandatory EABI object attribute %d"),
14112 bfd_set_error (bfd_error_bad_value
);
14118 (_("warning: %pB: unknown EABI object attribute %d"),
14124 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
14125 Returns -1 if no architecture could be read. */
14128 get_secondary_compatible_arch (bfd
*abfd
)
14130 obj_attribute
*attr
=
14131 &elf_known_obj_attributes_proc (abfd
)[Tag_also_compatible_with
];
14133 /* Note: the tag and its argument below are uleb128 values, though
14134 currently-defined values fit in one byte for each. */
14136 && attr
->s
[0] == Tag_CPU_arch
14137 && (attr
->s
[1] & 128) != 128
14138 && attr
->s
[2] == 0)
14141 /* This tag is "safely ignorable", so don't complain if it looks funny. */
14145 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
14146 The tag is removed if ARCH is -1. */
14149 set_secondary_compatible_arch (bfd
*abfd
, int arch
)
14151 obj_attribute
*attr
=
14152 &elf_known_obj_attributes_proc (abfd
)[Tag_also_compatible_with
];
14160 /* Note: the tag and its argument below are uleb128 values, though
14161 currently-defined values fit in one byte for each. */
14163 attr
->s
= (char *) bfd_alloc (abfd
, 3);
14164 attr
->s
[0] = Tag_CPU_arch
;
14169 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
14173 tag_cpu_arch_combine (bfd
*ibfd
, int oldtag
, int *secondary_compat_out
,
14174 int newtag
, int secondary_compat
)
14176 #define T(X) TAG_CPU_ARCH_##X
14177 int tagl
, tagh
, result
;
14180 T(V6T2
), /* PRE_V4. */
14182 T(V6T2
), /* V4T. */
14183 T(V6T2
), /* V5T. */
14184 T(V6T2
), /* V5TE. */
14185 T(V6T2
), /* V5TEJ. */
14188 T(V6T2
) /* V6T2. */
14192 T(V6K
), /* PRE_V4. */
14196 T(V6K
), /* V5TE. */
14197 T(V6K
), /* V5TEJ. */
14199 T(V6KZ
), /* V6KZ. */
14205 T(V7
), /* PRE_V4. */
14210 T(V7
), /* V5TEJ. */
14223 T(V6K
), /* V5TE. */
14224 T(V6K
), /* V5TEJ. */
14226 T(V6KZ
), /* V6KZ. */
14230 T(V6_M
) /* V6_M. */
14232 const int v6s_m
[] =
14238 T(V6K
), /* V5TE. */
14239 T(V6K
), /* V5TEJ. */
14241 T(V6KZ
), /* V6KZ. */
14245 T(V6S_M
), /* V6_M. */
14246 T(V6S_M
) /* V6S_M. */
14248 const int v7e_m
[] =
14252 T(V7E_M
), /* V4T. */
14253 T(V7E_M
), /* V5T. */
14254 T(V7E_M
), /* V5TE. */
14255 T(V7E_M
), /* V5TEJ. */
14256 T(V7E_M
), /* V6. */
14257 T(V7E_M
), /* V6KZ. */
14258 T(V7E_M
), /* V6T2. */
14259 T(V7E_M
), /* V6K. */
14260 T(V7E_M
), /* V7. */
14261 T(V7E_M
), /* V6_M. */
14262 T(V7E_M
), /* V6S_M. */
14263 T(V7E_M
) /* V7E_M. */
14267 T(V8
), /* PRE_V4. */
14272 T(V8
), /* V5TEJ. */
14279 T(V8
), /* V6S_M. */
14280 T(V8
), /* V7E_M. */
14285 T(V8R
), /* PRE_V4. */
14289 T(V8R
), /* V5TE. */
14290 T(V8R
), /* V5TEJ. */
14292 T(V8R
), /* V6KZ. */
14293 T(V8R
), /* V6T2. */
14296 T(V8R
), /* V6_M. */
14297 T(V8R
), /* V6S_M. */
14298 T(V8R
), /* V7E_M. */
14302 const int v8m_baseline
[] =
14315 T(V8M_BASE
), /* V6_M. */
14316 T(V8M_BASE
), /* V6S_M. */
14320 T(V8M_BASE
) /* V8-M BASELINE. */
14322 const int v8m_mainline
[] =
14334 T(V8M_MAIN
), /* V7. */
14335 T(V8M_MAIN
), /* V6_M. */
14336 T(V8M_MAIN
), /* V6S_M. */
14337 T(V8M_MAIN
), /* V7E_M. */
14340 T(V8M_MAIN
), /* V8-M BASELINE. */
14341 T(V8M_MAIN
) /* V8-M MAINLINE. */
14343 const int v8_1m_mainline
[] =
14355 T(V8_1M_MAIN
), /* V7. */
14356 T(V8_1M_MAIN
), /* V6_M. */
14357 T(V8_1M_MAIN
), /* V6S_M. */
14358 T(V8_1M_MAIN
), /* V7E_M. */
14361 T(V8_1M_MAIN
), /* V8-M BASELINE. */
14362 T(V8_1M_MAIN
), /* V8-M MAINLINE. */
14363 -1, /* Unused (18). */
14364 -1, /* Unused (19). */
14365 -1, /* Unused (20). */
14366 T(V8_1M_MAIN
) /* V8.1-M MAINLINE. */
14368 const int v4t_plus_v6_m
[] =
14374 T(V5TE
), /* V5TE. */
14375 T(V5TEJ
), /* V5TEJ. */
14377 T(V6KZ
), /* V6KZ. */
14378 T(V6T2
), /* V6T2. */
14381 T(V6_M
), /* V6_M. */
14382 T(V6S_M
), /* V6S_M. */
14383 T(V7E_M
), /* V7E_M. */
14386 T(V8M_BASE
), /* V8-M BASELINE. */
14387 T(V8M_MAIN
), /* V8-M MAINLINE. */
14388 -1, /* Unused (18). */
14389 -1, /* Unused (19). */
14390 -1, /* Unused (20). */
14391 T(V8_1M_MAIN
), /* V8.1-M MAINLINE. */
14392 T(V4T_PLUS_V6_M
) /* V4T plus V6_M. */
14394 const int *comb
[] =
14410 /* Pseudo-architecture. */
14414 /* Check we've not got a higher architecture than we know about. */
14416 if (oldtag
> MAX_TAG_CPU_ARCH
|| newtag
> MAX_TAG_CPU_ARCH
)
14418 _bfd_error_handler (_("error: %pB: unknown CPU architecture"), ibfd
);
14422 /* Override old tag if we have a Tag_also_compatible_with on the output. */
14424 if ((oldtag
== T(V6_M
) && *secondary_compat_out
== T(V4T
))
14425 || (oldtag
== T(V4T
) && *secondary_compat_out
== T(V6_M
)))
14426 oldtag
= T(V4T_PLUS_V6_M
);
14428 /* And override the new tag if we have a Tag_also_compatible_with on the
14431 if ((newtag
== T(V6_M
) && secondary_compat
== T(V4T
))
14432 || (newtag
== T(V4T
) && secondary_compat
== T(V6_M
)))
14433 newtag
= T(V4T_PLUS_V6_M
);
14435 tagl
= (oldtag
< newtag
) ? oldtag
: newtag
;
14436 result
= tagh
= (oldtag
> newtag
) ? oldtag
: newtag
;
14438 /* Architectures before V6KZ add features monotonically. */
14439 if (tagh
<= TAG_CPU_ARCH_V6KZ
)
14442 result
= comb
[tagh
- T(V6T2
)] ? comb
[tagh
- T(V6T2
)][tagl
] : -1;
14444 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
14445 as the canonical version. */
14446 if (result
== T(V4T_PLUS_V6_M
))
14449 *secondary_compat_out
= T(V6_M
);
14452 *secondary_compat_out
= -1;
14456 _bfd_error_handler (_("error: %pB: conflicting CPU architectures %d/%d"),
14457 ibfd
, oldtag
, newtag
);
14465 /* Query attributes object to see if integer divide instructions may be
14466 present in an object. */
14468 elf32_arm_attributes_accept_div (const obj_attribute
*attr
)
14470 int arch
= attr
[Tag_CPU_arch
].i
;
14471 int profile
= attr
[Tag_CPU_arch_profile
].i
;
14473 switch (attr
[Tag_DIV_use
].i
)
14476 /* Integer divide allowed if instruction contained in archetecture. */
14477 if (arch
== TAG_CPU_ARCH_V7
&& (profile
== 'R' || profile
== 'M'))
14479 else if (arch
>= TAG_CPU_ARCH_V7E_M
)
14485 /* Integer divide explicitly prohibited. */
14489 /* Unrecognised case - treat as allowing divide everywhere. */
14491 /* Integer divide allowed in ARM state. */
14496 /* Query attributes object to see if integer divide instructions are
14497 forbidden to be in the object. This is not the inverse of
14498 elf32_arm_attributes_accept_div. */
14500 elf32_arm_attributes_forbid_div (const obj_attribute
*attr
)
14502 return attr
[Tag_DIV_use
].i
== 1;
14505 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
14506 are conflicting attributes. */
14509 elf32_arm_merge_eabi_attributes (bfd
*ibfd
, struct bfd_link_info
*info
)
14511 bfd
*obfd
= info
->output_bfd
;
14512 obj_attribute
*in_attr
;
14513 obj_attribute
*out_attr
;
14514 /* Some tags have 0 = don't care, 1 = strong requirement,
14515 2 = weak requirement. */
14516 static const int order_021
[3] = {0, 2, 1};
14518 bfd_boolean result
= TRUE
;
14519 const char *sec_name
= get_elf_backend_data (ibfd
)->obj_attrs_section
;
14521 /* Skip the linker stubs file. This preserves previous behavior
14522 of accepting unknown attributes in the first input file - but
14524 if (ibfd
->flags
& BFD_LINKER_CREATED
)
14527 /* Skip any input that hasn't attribute section.
14528 This enables to link object files without attribute section with
14530 if (bfd_get_section_by_name (ibfd
, sec_name
) == NULL
)
14533 if (!elf_known_obj_attributes_proc (obfd
)[0].i
)
14535 /* This is the first object. Copy the attributes. */
14536 _bfd_elf_copy_obj_attributes (ibfd
, obfd
);
14538 out_attr
= elf_known_obj_attributes_proc (obfd
);
14540 /* Use the Tag_null value to indicate the attributes have been
14544 /* We do not output objects with Tag_MPextension_use_legacy - we move
14545 the attribute's value to Tag_MPextension_use. */
14546 if (out_attr
[Tag_MPextension_use_legacy
].i
!= 0)
14548 if (out_attr
[Tag_MPextension_use
].i
!= 0
14549 && out_attr
[Tag_MPextension_use_legacy
].i
14550 != out_attr
[Tag_MPextension_use
].i
)
14553 (_("Error: %pB has both the current and legacy "
14554 "Tag_MPextension_use attributes"), ibfd
);
14558 out_attr
[Tag_MPextension_use
] =
14559 out_attr
[Tag_MPextension_use_legacy
];
14560 out_attr
[Tag_MPextension_use_legacy
].type
= 0;
14561 out_attr
[Tag_MPextension_use_legacy
].i
= 0;
14567 in_attr
= elf_known_obj_attributes_proc (ibfd
);
14568 out_attr
= elf_known_obj_attributes_proc (obfd
);
14569 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
14570 if (in_attr
[Tag_ABI_VFP_args
].i
!= out_attr
[Tag_ABI_VFP_args
].i
)
14572 /* Ignore mismatches if the object doesn't use floating point or is
14573 floating point ABI independent. */
14574 if (out_attr
[Tag_ABI_FP_number_model
].i
== AEABI_FP_number_model_none
14575 || (in_attr
[Tag_ABI_FP_number_model
].i
!= AEABI_FP_number_model_none
14576 && out_attr
[Tag_ABI_VFP_args
].i
== AEABI_VFP_args_compatible
))
14577 out_attr
[Tag_ABI_VFP_args
].i
= in_attr
[Tag_ABI_VFP_args
].i
;
14578 else if (in_attr
[Tag_ABI_FP_number_model
].i
!= AEABI_FP_number_model_none
14579 && in_attr
[Tag_ABI_VFP_args
].i
!= AEABI_VFP_args_compatible
)
14582 (_("error: %pB uses VFP register arguments, %pB does not"),
14583 in_attr
[Tag_ABI_VFP_args
].i
? ibfd
: obfd
,
14584 in_attr
[Tag_ABI_VFP_args
].i
? obfd
: ibfd
);
14589 for (i
= LEAST_KNOWN_OBJ_ATTRIBUTE
; i
< NUM_KNOWN_OBJ_ATTRIBUTES
; i
++)
14591 /* Merge this attribute with existing attributes. */
14594 case Tag_CPU_raw_name
:
14596 /* These are merged after Tag_CPU_arch. */
14599 case Tag_ABI_optimization_goals
:
14600 case Tag_ABI_FP_optimization_goals
:
14601 /* Use the first value seen. */
14606 int secondary_compat
= -1, secondary_compat_out
= -1;
14607 unsigned int saved_out_attr
= out_attr
[i
].i
;
14609 static const char *name_table
[] =
14611 /* These aren't real CPU names, but we can't guess
14612 that from the architecture version alone. */
14628 "ARM v8-M.baseline",
14629 "ARM v8-M.mainline",
14632 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
14633 secondary_compat
= get_secondary_compatible_arch (ibfd
);
14634 secondary_compat_out
= get_secondary_compatible_arch (obfd
);
14635 arch_attr
= tag_cpu_arch_combine (ibfd
, out_attr
[i
].i
,
14636 &secondary_compat_out
,
14640 /* Return with error if failed to merge. */
14641 if (arch_attr
== -1)
14644 out_attr
[i
].i
= arch_attr
;
14646 set_secondary_compatible_arch (obfd
, secondary_compat_out
);
14648 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
14649 if (out_attr
[i
].i
== saved_out_attr
)
14650 ; /* Leave the names alone. */
14651 else if (out_attr
[i
].i
== in_attr
[i
].i
)
14653 /* The output architecture has been changed to match the
14654 input architecture. Use the input names. */
14655 out_attr
[Tag_CPU_name
].s
= in_attr
[Tag_CPU_name
].s
14656 ? _bfd_elf_attr_strdup (obfd
, in_attr
[Tag_CPU_name
].s
)
14658 out_attr
[Tag_CPU_raw_name
].s
= in_attr
[Tag_CPU_raw_name
].s
14659 ? _bfd_elf_attr_strdup (obfd
, in_attr
[Tag_CPU_raw_name
].s
)
14664 out_attr
[Tag_CPU_name
].s
= NULL
;
14665 out_attr
[Tag_CPU_raw_name
].s
= NULL
;
14668 /* If we still don't have a value for Tag_CPU_name,
14669 make one up now. Tag_CPU_raw_name remains blank. */
14670 if (out_attr
[Tag_CPU_name
].s
== NULL
14671 && out_attr
[i
].i
< ARRAY_SIZE (name_table
))
14672 out_attr
[Tag_CPU_name
].s
=
14673 _bfd_elf_attr_strdup (obfd
, name_table
[out_attr
[i
].i
]);
14677 case Tag_ARM_ISA_use
:
14678 case Tag_THUMB_ISA_use
:
14679 case Tag_WMMX_arch
:
14680 case Tag_Advanced_SIMD_arch
:
14681 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
14682 case Tag_ABI_FP_rounding
:
14683 case Tag_ABI_FP_exceptions
:
14684 case Tag_ABI_FP_user_exceptions
:
14685 case Tag_ABI_FP_number_model
:
14686 case Tag_FP_HP_extension
:
14687 case Tag_CPU_unaligned_access
:
14689 case Tag_MPextension_use
:
14691 /* Use the largest value specified. */
14692 if (in_attr
[i
].i
> out_attr
[i
].i
)
14693 out_attr
[i
].i
= in_attr
[i
].i
;
14696 case Tag_ABI_align_preserved
:
14697 case Tag_ABI_PCS_RO_data
:
14698 /* Use the smallest value specified. */
14699 if (in_attr
[i
].i
< out_attr
[i
].i
)
14700 out_attr
[i
].i
= in_attr
[i
].i
;
14703 case Tag_ABI_align_needed
:
14704 if ((in_attr
[i
].i
> 0 || out_attr
[i
].i
> 0)
14705 && (in_attr
[Tag_ABI_align_preserved
].i
== 0
14706 || out_attr
[Tag_ABI_align_preserved
].i
== 0))
14708 /* This error message should be enabled once all non-conformant
14709 binaries in the toolchain have had the attributes set
14712 (_("error: %pB: 8-byte data alignment conflicts with %pB"),
14716 /* Fall through. */
14717 case Tag_ABI_FP_denormal
:
14718 case Tag_ABI_PCS_GOT_use
:
14719 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
14720 value if greater than 2 (for future-proofing). */
14721 if ((in_attr
[i
].i
> 2 && in_attr
[i
].i
> out_attr
[i
].i
)
14722 || (in_attr
[i
].i
<= 2 && out_attr
[i
].i
<= 2
14723 && order_021
[in_attr
[i
].i
] > order_021
[out_attr
[i
].i
]))
14724 out_attr
[i
].i
= in_attr
[i
].i
;
14727 case Tag_Virtualization_use
:
14728 /* The virtualization tag effectively stores two bits of
14729 information: the intended use of TrustZone (in bit 0), and the
14730 intended use of Virtualization (in bit 1). */
14731 if (out_attr
[i
].i
== 0)
14732 out_attr
[i
].i
= in_attr
[i
].i
;
14733 else if (in_attr
[i
].i
!= 0
14734 && in_attr
[i
].i
!= out_attr
[i
].i
)
14736 if (in_attr
[i
].i
<= 3 && out_attr
[i
].i
<= 3)
14741 (_("error: %pB: unable to merge virtualization attributes "
14749 case Tag_CPU_arch_profile
:
14750 if (out_attr
[i
].i
!= in_attr
[i
].i
)
14752 /* 0 will merge with anything.
14753 'A' and 'S' merge to 'A'.
14754 'R' and 'S' merge to 'R'.
14755 'M' and 'A|R|S' is an error. */
14756 if (out_attr
[i
].i
== 0
14757 || (out_attr
[i
].i
== 'S'
14758 && (in_attr
[i
].i
== 'A' || in_attr
[i
].i
== 'R')))
14759 out_attr
[i
].i
= in_attr
[i
].i
;
14760 else if (in_attr
[i
].i
== 0
14761 || (in_attr
[i
].i
== 'S'
14762 && (out_attr
[i
].i
== 'A' || out_attr
[i
].i
== 'R')))
14763 ; /* Do nothing. */
14767 (_("error: %pB: conflicting architecture profiles %c/%c"),
14769 in_attr
[i
].i
? in_attr
[i
].i
: '0',
14770 out_attr
[i
].i
? out_attr
[i
].i
: '0');
14776 case Tag_DSP_extension
:
14777 /* No need to change output value if any of:
14778 - pre (<=) ARMv5T input architecture (do not have DSP)
14779 - M input profile not ARMv7E-M and do not have DSP. */
14780 if (in_attr
[Tag_CPU_arch
].i
<= 3
14781 || (in_attr
[Tag_CPU_arch_profile
].i
== 'M'
14782 && in_attr
[Tag_CPU_arch
].i
!= 13
14783 && in_attr
[i
].i
== 0))
14784 ; /* Do nothing. */
14785 /* Output value should be 0 if DSP part of architecture, ie.
14786 - post (>=) ARMv5te architecture output
14787 - A, R or S profile output or ARMv7E-M output architecture. */
14788 else if (out_attr
[Tag_CPU_arch
].i
>= 4
14789 && (out_attr
[Tag_CPU_arch_profile
].i
== 'A'
14790 || out_attr
[Tag_CPU_arch_profile
].i
== 'R'
14791 || out_attr
[Tag_CPU_arch_profile
].i
== 'S'
14792 || out_attr
[Tag_CPU_arch
].i
== 13))
14794 /* Otherwise, DSP instructions are added and not part of output
14802 /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since
14803 the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch
14804 when it's 0. It might mean absence of FP hardware if
14805 Tag_FP_arch is zero. */
14807 #define VFP_VERSION_COUNT 9
14808 static const struct
14812 } vfp_versions
[VFP_VERSION_COUNT
] =
14828 /* If the output has no requirement about FP hardware,
14829 follow the requirement of the input. */
14830 if (out_attr
[i
].i
== 0)
14832 /* This assert is still reasonable, we shouldn't
14833 produce the suspicious build attribute
14834 combination (See below for in_attr). */
14835 BFD_ASSERT (out_attr
[Tag_ABI_HardFP_use
].i
== 0);
14836 out_attr
[i
].i
= in_attr
[i
].i
;
14837 out_attr
[Tag_ABI_HardFP_use
].i
14838 = in_attr
[Tag_ABI_HardFP_use
].i
;
14841 /* If the input has no requirement about FP hardware, do
14843 else if (in_attr
[i
].i
== 0)
14845 /* We used to assert that Tag_ABI_HardFP_use was
14846 zero here, but we should never assert when
14847 consuming an object file that has suspicious
14848 build attributes. The single precision variant
14849 of 'no FP architecture' is still 'no FP
14850 architecture', so we just ignore the tag in this
14855 /* Both the input and the output have nonzero Tag_FP_arch.
14856 So Tag_ABI_HardFP_use is implied by Tag_FP_arch when it's zero. */
14858 /* If both the input and the output have zero Tag_ABI_HardFP_use,
14860 if (in_attr
[Tag_ABI_HardFP_use
].i
== 0
14861 && out_attr
[Tag_ABI_HardFP_use
].i
== 0)
14863 /* If the input and the output have different Tag_ABI_HardFP_use,
14864 the combination of them is 0 (implied by Tag_FP_arch). */
14865 else if (in_attr
[Tag_ABI_HardFP_use
].i
14866 != out_attr
[Tag_ABI_HardFP_use
].i
)
14867 out_attr
[Tag_ABI_HardFP_use
].i
= 0;
14869 /* Now we can handle Tag_FP_arch. */
14871 /* Values of VFP_VERSION_COUNT or more aren't defined, so just
14872 pick the biggest. */
14873 if (in_attr
[i
].i
>= VFP_VERSION_COUNT
14874 && in_attr
[i
].i
> out_attr
[i
].i
)
14876 out_attr
[i
] = in_attr
[i
];
14879 /* The output uses the superset of input features
14880 (ISA version) and registers. */
14881 ver
= vfp_versions
[in_attr
[i
].i
].ver
;
14882 if (ver
< vfp_versions
[out_attr
[i
].i
].ver
)
14883 ver
= vfp_versions
[out_attr
[i
].i
].ver
;
14884 regs
= vfp_versions
[in_attr
[i
].i
].regs
;
14885 if (regs
< vfp_versions
[out_attr
[i
].i
].regs
)
14886 regs
= vfp_versions
[out_attr
[i
].i
].regs
;
14887 /* This assumes all possible supersets are also a valid
14889 for (newval
= VFP_VERSION_COUNT
- 1; newval
> 0; newval
--)
14891 if (regs
== vfp_versions
[newval
].regs
14892 && ver
== vfp_versions
[newval
].ver
)
14895 out_attr
[i
].i
= newval
;
14898 case Tag_PCS_config
:
14899 if (out_attr
[i
].i
== 0)
14900 out_attr
[i
].i
= in_attr
[i
].i
;
14901 else if (in_attr
[i
].i
!= 0 && out_attr
[i
].i
!= in_attr
[i
].i
)
14903 /* It's sometimes ok to mix different configs, so this is only
14906 (_("warning: %pB: conflicting platform configuration"), ibfd
);
14909 case Tag_ABI_PCS_R9_use
:
14910 if (in_attr
[i
].i
!= out_attr
[i
].i
14911 && out_attr
[i
].i
!= AEABI_R9_unused
14912 && in_attr
[i
].i
!= AEABI_R9_unused
)
14915 (_("error: %pB: conflicting use of R9"), ibfd
);
14918 if (out_attr
[i
].i
== AEABI_R9_unused
)
14919 out_attr
[i
].i
= in_attr
[i
].i
;
14921 case Tag_ABI_PCS_RW_data
:
14922 if (in_attr
[i
].i
== AEABI_PCS_RW_data_SBrel
14923 && out_attr
[Tag_ABI_PCS_R9_use
].i
!= AEABI_R9_SB
14924 && out_attr
[Tag_ABI_PCS_R9_use
].i
!= AEABI_R9_unused
)
14927 (_("error: %pB: SB relative addressing conflicts with use of R9"),
14931 /* Use the smallest value specified. */
14932 if (in_attr
[i
].i
< out_attr
[i
].i
)
14933 out_attr
[i
].i
= in_attr
[i
].i
;
14935 case Tag_ABI_PCS_wchar_t
:
14936 if (out_attr
[i
].i
&& in_attr
[i
].i
&& out_attr
[i
].i
!= in_attr
[i
].i
14937 && !elf_arm_tdata (obfd
)->no_wchar_size_warning
)
14940 (_("warning: %pB uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
14941 ibfd
, in_attr
[i
].i
, out_attr
[i
].i
);
14943 else if (in_attr
[i
].i
&& !out_attr
[i
].i
)
14944 out_attr
[i
].i
= in_attr
[i
].i
;
14946 case Tag_ABI_enum_size
:
14947 if (in_attr
[i
].i
!= AEABI_enum_unused
)
14949 if (out_attr
[i
].i
== AEABI_enum_unused
14950 || out_attr
[i
].i
== AEABI_enum_forced_wide
)
14952 /* The existing object is compatible with anything.
14953 Use whatever requirements the new object has. */
14954 out_attr
[i
].i
= in_attr
[i
].i
;
14956 else if (in_attr
[i
].i
!= AEABI_enum_forced_wide
14957 && out_attr
[i
].i
!= in_attr
[i
].i
14958 && !elf_arm_tdata (obfd
)->no_enum_size_warning
)
14960 static const char *aeabi_enum_names
[] =
14961 { "", "variable-size", "32-bit", "" };
14962 const char *in_name
=
14963 in_attr
[i
].i
< ARRAY_SIZE(aeabi_enum_names
)
14964 ? aeabi_enum_names
[in_attr
[i
].i
]
14966 const char *out_name
=
14967 out_attr
[i
].i
< ARRAY_SIZE(aeabi_enum_names
)
14968 ? aeabi_enum_names
[out_attr
[i
].i
]
14971 (_("warning: %pB uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
14972 ibfd
, in_name
, out_name
);
14976 case Tag_ABI_VFP_args
:
14979 case Tag_ABI_WMMX_args
:
14980 if (in_attr
[i
].i
!= out_attr
[i
].i
)
14983 (_("error: %pB uses iWMMXt register arguments, %pB does not"),
14988 case Tag_compatibility
:
14989 /* Merged in target-independent code. */
14991 case Tag_ABI_HardFP_use
:
14992 /* This is handled along with Tag_FP_arch. */
14994 case Tag_ABI_FP_16bit_format
:
14995 if (in_attr
[i
].i
!= 0 && out_attr
[i
].i
!= 0)
14997 if (in_attr
[i
].i
!= out_attr
[i
].i
)
15000 (_("error: fp16 format mismatch between %pB and %pB"),
15005 if (in_attr
[i
].i
!= 0)
15006 out_attr
[i
].i
= in_attr
[i
].i
;
15010 /* A value of zero on input means that the divide instruction may
15011 be used if available in the base architecture as specified via
15012 Tag_CPU_arch and Tag_CPU_arch_profile. A value of 1 means that
15013 the user did not want divide instructions. A value of 2
15014 explicitly means that divide instructions were allowed in ARM
15015 and Thumb state. */
15016 if (in_attr
[i
].i
== out_attr
[i
].i
)
15017 /* Do nothing. */ ;
15018 else if (elf32_arm_attributes_forbid_div (in_attr
)
15019 && !elf32_arm_attributes_accept_div (out_attr
))
15021 else if (elf32_arm_attributes_forbid_div (out_attr
)
15022 && elf32_arm_attributes_accept_div (in_attr
))
15023 out_attr
[i
].i
= in_attr
[i
].i
;
15024 else if (in_attr
[i
].i
== 2)
15025 out_attr
[i
].i
= in_attr
[i
].i
;
15028 case Tag_MPextension_use_legacy
:
15029 /* We don't output objects with Tag_MPextension_use_legacy - we
15030 move the value to Tag_MPextension_use. */
15031 if (in_attr
[i
].i
!= 0 && in_attr
[Tag_MPextension_use
].i
!= 0)
15033 if (in_attr
[Tag_MPextension_use
].i
!= in_attr
[i
].i
)
15036 (_("%pB has both the current and legacy "
15037 "Tag_MPextension_use attributes"),
15043 if (in_attr
[i
].i
> out_attr
[Tag_MPextension_use
].i
)
15044 out_attr
[Tag_MPextension_use
] = in_attr
[i
];
15048 case Tag_nodefaults
:
15049 /* This tag is set if it exists, but the value is unused (and is
15050 typically zero). We don't actually need to do anything here -
15051 the merge happens automatically when the type flags are merged
15054 case Tag_also_compatible_with
:
15055 /* Already done in Tag_CPU_arch. */
15057 case Tag_conformance
:
15058 /* Keep the attribute if it matches. Throw it away otherwise.
15059 No attribute means no claim to conform. */
15060 if (!in_attr
[i
].s
|| !out_attr
[i
].s
15061 || strcmp (in_attr
[i
].s
, out_attr
[i
].s
) != 0)
15062 out_attr
[i
].s
= NULL
;
15067 = result
&& _bfd_elf_merge_unknown_attribute_low (ibfd
, obfd
, i
);
15070 /* If out_attr was copied from in_attr then it won't have a type yet. */
15071 if (in_attr
[i
].type
&& !out_attr
[i
].type
)
15072 out_attr
[i
].type
= in_attr
[i
].type
;
15075 /* Merge Tag_compatibility attributes and any common GNU ones. */
15076 if (!_bfd_elf_merge_object_attributes (ibfd
, info
))
15079 /* Check for any attributes not known on ARM. */
15080 result
&= _bfd_elf_merge_unknown_attribute_list (ibfd
, obfd
);
15086 /* Return TRUE if the two EABI versions are incompatible. */
15089 elf32_arm_versions_compatible (unsigned iver
, unsigned over
)
15091 /* v4 and v5 are the same spec before and after it was released,
15092 so allow mixing them. */
15093 if ((iver
== EF_ARM_EABI_VER4
&& over
== EF_ARM_EABI_VER5
)
15094 || (iver
== EF_ARM_EABI_VER5
&& over
== EF_ARM_EABI_VER4
))
15097 return (iver
== over
);
15100 /* Merge backend specific data from an object file to the output
15101 object file when linking. */
15104 elf32_arm_merge_private_bfd_data (bfd
*, struct bfd_link_info
*);
15106 /* Display the flags field. */
15109 elf32_arm_print_private_bfd_data (bfd
*abfd
, void * ptr
)
15111 FILE * file
= (FILE *) ptr
;
15112 unsigned long flags
;
15114 BFD_ASSERT (abfd
!= NULL
&& ptr
!= NULL
);
15116 /* Print normal ELF private data. */
15117 _bfd_elf_print_private_bfd_data (abfd
, ptr
);
15119 flags
= elf_elfheader (abfd
)->e_flags
;
15120 /* Ignore init flag - it may not be set, despite the flags field
15121 containing valid data. */
15123 fprintf (file
, _("private flags = %lx:"), elf_elfheader (abfd
)->e_flags
);
15125 switch (EF_ARM_EABI_VERSION (flags
))
15127 case EF_ARM_EABI_UNKNOWN
:
15128 /* The following flag bits are GNU extensions and not part of the
15129 official ARM ELF extended ABI. Hence they are only decoded if
15130 the EABI version is not set. */
15131 if (flags
& EF_ARM_INTERWORK
)
15132 fprintf (file
, _(" [interworking enabled]"));
15134 if (flags
& EF_ARM_APCS_26
)
15135 fprintf (file
, " [APCS-26]");
15137 fprintf (file
, " [APCS-32]");
15139 if (flags
& EF_ARM_VFP_FLOAT
)
15140 fprintf (file
, _(" [VFP float format]"));
15141 else if (flags
& EF_ARM_MAVERICK_FLOAT
)
15142 fprintf (file
, _(" [Maverick float format]"));
15144 fprintf (file
, _(" [FPA float format]"));
15146 if (flags
& EF_ARM_APCS_FLOAT
)
15147 fprintf (file
, _(" [floats passed in float registers]"));
15149 if (flags
& EF_ARM_PIC
)
15150 fprintf (file
, _(" [position independent]"));
15152 if (flags
& EF_ARM_NEW_ABI
)
15153 fprintf (file
, _(" [new ABI]"));
15155 if (flags
& EF_ARM_OLD_ABI
)
15156 fprintf (file
, _(" [old ABI]"));
15158 if (flags
& EF_ARM_SOFT_FLOAT
)
15159 fprintf (file
, _(" [software FP]"));
15161 flags
&= ~(EF_ARM_INTERWORK
| EF_ARM_APCS_26
| EF_ARM_APCS_FLOAT
15162 | EF_ARM_PIC
| EF_ARM_NEW_ABI
| EF_ARM_OLD_ABI
15163 | EF_ARM_SOFT_FLOAT
| EF_ARM_VFP_FLOAT
15164 | EF_ARM_MAVERICK_FLOAT
);
15167 case EF_ARM_EABI_VER1
:
15168 fprintf (file
, _(" [Version1 EABI]"));
15170 if (flags
& EF_ARM_SYMSARESORTED
)
15171 fprintf (file
, _(" [sorted symbol table]"));
15173 fprintf (file
, _(" [unsorted symbol table]"));
15175 flags
&= ~ EF_ARM_SYMSARESORTED
;
15178 case EF_ARM_EABI_VER2
:
15179 fprintf (file
, _(" [Version2 EABI]"));
15181 if (flags
& EF_ARM_SYMSARESORTED
)
15182 fprintf (file
, _(" [sorted symbol table]"));
15184 fprintf (file
, _(" [unsorted symbol table]"));
15186 if (flags
& EF_ARM_DYNSYMSUSESEGIDX
)
15187 fprintf (file
, _(" [dynamic symbols use segment index]"));
15189 if (flags
& EF_ARM_MAPSYMSFIRST
)
15190 fprintf (file
, _(" [mapping symbols precede others]"));
15192 flags
&= ~(EF_ARM_SYMSARESORTED
| EF_ARM_DYNSYMSUSESEGIDX
15193 | EF_ARM_MAPSYMSFIRST
);
15196 case EF_ARM_EABI_VER3
:
15197 fprintf (file
, _(" [Version3 EABI]"));
15200 case EF_ARM_EABI_VER4
:
15201 fprintf (file
, _(" [Version4 EABI]"));
15204 case EF_ARM_EABI_VER5
:
15205 fprintf (file
, _(" [Version5 EABI]"));
15207 if (flags
& EF_ARM_ABI_FLOAT_SOFT
)
15208 fprintf (file
, _(" [soft-float ABI]"));
15210 if (flags
& EF_ARM_ABI_FLOAT_HARD
)
15211 fprintf (file
, _(" [hard-float ABI]"));
15213 flags
&= ~(EF_ARM_ABI_FLOAT_SOFT
| EF_ARM_ABI_FLOAT_HARD
);
15216 if (flags
& EF_ARM_BE8
)
15217 fprintf (file
, _(" [BE8]"));
15219 if (flags
& EF_ARM_LE8
)
15220 fprintf (file
, _(" [LE8]"));
15222 flags
&= ~(EF_ARM_LE8
| EF_ARM_BE8
);
15226 fprintf (file
, _(" <EABI version unrecognised>"));
15230 flags
&= ~ EF_ARM_EABIMASK
;
15232 if (flags
& EF_ARM_RELEXEC
)
15233 fprintf (file
, _(" [relocatable executable]"));
15235 if (flags
& EF_ARM_PIC
)
15236 fprintf (file
, _(" [position independent]"));
15238 if (elf_elfheader (abfd
)->e_ident
[EI_OSABI
] == ELFOSABI_ARM_FDPIC
)
15239 fprintf (file
, _(" [FDPIC ABI supplement]"));
15241 flags
&= ~ (EF_ARM_RELEXEC
| EF_ARM_PIC
);
15244 fprintf (file
, _("<Unrecognised flag bits set>"));
15246 fputc ('\n', file
);
15252 elf32_arm_get_symbol_type (Elf_Internal_Sym
* elf_sym
, int type
)
15254 switch (ELF_ST_TYPE (elf_sym
->st_info
))
15256 case STT_ARM_TFUNC
:
15257 return ELF_ST_TYPE (elf_sym
->st_info
);
15259 case STT_ARM_16BIT
:
15260 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
15261 This allows us to distinguish between data used by Thumb instructions
15262 and non-data (which is probably code) inside Thumb regions of an
15264 if (type
!= STT_OBJECT
&& type
!= STT_TLS
)
15265 return ELF_ST_TYPE (elf_sym
->st_info
);
15276 elf32_arm_gc_mark_hook (asection
*sec
,
15277 struct bfd_link_info
*info
,
15278 Elf_Internal_Rela
*rel
,
15279 struct elf_link_hash_entry
*h
,
15280 Elf_Internal_Sym
*sym
)
15283 switch (ELF32_R_TYPE (rel
->r_info
))
15285 case R_ARM_GNU_VTINHERIT
:
15286 case R_ARM_GNU_VTENTRY
:
15290 return _bfd_elf_gc_mark_hook (sec
, info
, rel
, h
, sym
);
15293 /* Look through the relocs for a section during the first phase. */
15296 elf32_arm_check_relocs (bfd
*abfd
, struct bfd_link_info
*info
,
15297 asection
*sec
, const Elf_Internal_Rela
*relocs
)
15299 Elf_Internal_Shdr
*symtab_hdr
;
15300 struct elf_link_hash_entry
**sym_hashes
;
15301 const Elf_Internal_Rela
*rel
;
15302 const Elf_Internal_Rela
*rel_end
;
15305 struct elf32_arm_link_hash_table
*htab
;
15306 bfd_boolean call_reloc_p
;
15307 bfd_boolean may_become_dynamic_p
;
15308 bfd_boolean may_need_local_target_p
;
15309 unsigned long nsyms
;
15311 if (bfd_link_relocatable (info
))
15314 BFD_ASSERT (is_arm_elf (abfd
));
15316 htab
= elf32_arm_hash_table (info
);
15322 /* Create dynamic sections for relocatable executables so that we can
15323 copy relocations. */
15324 if (htab
->root
.is_relocatable_executable
15325 && ! htab
->root
.dynamic_sections_created
)
15327 if (! _bfd_elf_link_create_dynamic_sections (abfd
, info
))
15331 if (htab
->root
.dynobj
== NULL
)
15332 htab
->root
.dynobj
= abfd
;
15333 if (!create_ifunc_sections (info
))
15336 dynobj
= htab
->root
.dynobj
;
15338 symtab_hdr
= & elf_symtab_hdr (abfd
);
15339 sym_hashes
= elf_sym_hashes (abfd
);
15340 nsyms
= NUM_SHDR_ENTRIES (symtab_hdr
);
15342 rel_end
= relocs
+ sec
->reloc_count
;
15343 for (rel
= relocs
; rel
< rel_end
; rel
++)
15345 Elf_Internal_Sym
*isym
;
15346 struct elf_link_hash_entry
*h
;
15347 struct elf32_arm_link_hash_entry
*eh
;
15348 unsigned int r_symndx
;
15351 r_symndx
= ELF32_R_SYM (rel
->r_info
);
15352 r_type
= ELF32_R_TYPE (rel
->r_info
);
15353 r_type
= arm_real_reloc_type (htab
, r_type
);
15355 if (r_symndx
>= nsyms
15356 /* PR 9934: It is possible to have relocations that do not
15357 refer to symbols, thus it is also possible to have an
15358 object file containing relocations but no symbol table. */
15359 && (r_symndx
> STN_UNDEF
|| nsyms
> 0))
15361 _bfd_error_handler (_("%pB: bad symbol index: %d"), abfd
,
15370 if (r_symndx
< symtab_hdr
->sh_info
)
15372 /* A local symbol. */
15373 isym
= bfd_sym_from_r_symndx (&htab
->sym_cache
,
15380 h
= sym_hashes
[r_symndx
- symtab_hdr
->sh_info
];
15381 while (h
->root
.type
== bfd_link_hash_indirect
15382 || h
->root
.type
== bfd_link_hash_warning
)
15383 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
15387 eh
= (struct elf32_arm_link_hash_entry
*) h
;
15389 call_reloc_p
= FALSE
;
15390 may_become_dynamic_p
= FALSE
;
15391 may_need_local_target_p
= FALSE
;
15393 /* Could be done earlier, if h were already available. */
15394 r_type
= elf32_arm_tls_transition (info
, r_type
, h
);
15397 case R_ARM_GOTOFFFUNCDESC
:
15401 if (!elf32_arm_allocate_local_sym_info (abfd
))
15403 elf32_arm_local_fdpic_cnts(abfd
)[r_symndx
].gotofffuncdesc_cnt
+= 1;
15404 elf32_arm_local_fdpic_cnts(abfd
)[r_symndx
].funcdesc_offset
= -1;
15408 eh
->fdpic_cnts
.gotofffuncdesc_cnt
++;
15413 case R_ARM_GOTFUNCDESC
:
15417 /* Such a relocation is not supposed to be generated
15418 by gcc on a static function. */
15419 /* Anyway if needed it could be handled. */
15424 eh
->fdpic_cnts
.gotfuncdesc_cnt
++;
15429 case R_ARM_FUNCDESC
:
15433 if (!elf32_arm_allocate_local_sym_info (abfd
))
15435 elf32_arm_local_fdpic_cnts(abfd
)[r_symndx
].funcdesc_cnt
+= 1;
15436 elf32_arm_local_fdpic_cnts(abfd
)[r_symndx
].funcdesc_offset
= -1;
15440 eh
->fdpic_cnts
.funcdesc_cnt
++;
15446 case R_ARM_GOT_PREL
:
15447 case R_ARM_TLS_GD32
:
15448 case R_ARM_TLS_GD32_FDPIC
:
15449 case R_ARM_TLS_IE32
:
15450 case R_ARM_TLS_IE32_FDPIC
:
15451 case R_ARM_TLS_GOTDESC
:
15452 case R_ARM_TLS_DESCSEQ
:
15453 case R_ARM_THM_TLS_DESCSEQ
:
15454 case R_ARM_TLS_CALL
:
15455 case R_ARM_THM_TLS_CALL
:
15456 /* This symbol requires a global offset table entry. */
15458 int tls_type
, old_tls_type
;
15462 case R_ARM_TLS_GD32
: tls_type
= GOT_TLS_GD
; break;
15463 case R_ARM_TLS_GD32_FDPIC
: tls_type
= GOT_TLS_GD
; break;
15465 case R_ARM_TLS_IE32
: tls_type
= GOT_TLS_IE
; break;
15466 case R_ARM_TLS_IE32_FDPIC
: tls_type
= GOT_TLS_IE
; break;
15468 case R_ARM_TLS_GOTDESC
:
15469 case R_ARM_TLS_CALL
: case R_ARM_THM_TLS_CALL
:
15470 case R_ARM_TLS_DESCSEQ
: case R_ARM_THM_TLS_DESCSEQ
:
15471 tls_type
= GOT_TLS_GDESC
; break;
15473 default: tls_type
= GOT_NORMAL
; break;
15476 if (!bfd_link_executable (info
) && (tls_type
& GOT_TLS_IE
))
15477 info
->flags
|= DF_STATIC_TLS
;
15482 old_tls_type
= elf32_arm_hash_entry (h
)->tls_type
;
15486 /* This is a global offset table entry for a local symbol. */
15487 if (!elf32_arm_allocate_local_sym_info (abfd
))
15489 elf_local_got_refcounts (abfd
)[r_symndx
] += 1;
15490 old_tls_type
= elf32_arm_local_got_tls_type (abfd
) [r_symndx
];
15493 /* If a variable is accessed with both tls methods, two
15494 slots may be created. */
15495 if (GOT_TLS_GD_ANY_P (old_tls_type
)
15496 && GOT_TLS_GD_ANY_P (tls_type
))
15497 tls_type
|= old_tls_type
;
15499 /* We will already have issued an error message if there
15500 is a TLS/non-TLS mismatch, based on the symbol
15501 type. So just combine any TLS types needed. */
15502 if (old_tls_type
!= GOT_UNKNOWN
&& old_tls_type
!= GOT_NORMAL
15503 && tls_type
!= GOT_NORMAL
)
15504 tls_type
|= old_tls_type
;
15506 /* If the symbol is accessed in both IE and GDESC
15507 method, we're able to relax. Turn off the GDESC flag,
15508 without messing up with any other kind of tls types
15509 that may be involved. */
15510 if ((tls_type
& GOT_TLS_IE
) && (tls_type
& GOT_TLS_GDESC
))
15511 tls_type
&= ~GOT_TLS_GDESC
;
15513 if (old_tls_type
!= tls_type
)
15516 elf32_arm_hash_entry (h
)->tls_type
= tls_type
;
15518 elf32_arm_local_got_tls_type (abfd
) [r_symndx
] = tls_type
;
15521 /* Fall through. */
15523 case R_ARM_TLS_LDM32
:
15524 case R_ARM_TLS_LDM32_FDPIC
:
15525 if (r_type
== R_ARM_TLS_LDM32
|| r_type
== R_ARM_TLS_LDM32_FDPIC
)
15526 htab
->tls_ldm_got
.refcount
++;
15527 /* Fall through. */
15529 case R_ARM_GOTOFF32
:
15531 if (htab
->root
.sgot
== NULL
15532 && !create_got_section (htab
->root
.dynobj
, info
))
15541 case R_ARM_THM_CALL
:
15542 case R_ARM_THM_JUMP24
:
15543 case R_ARM_THM_JUMP19
:
15544 call_reloc_p
= TRUE
;
15545 may_need_local_target_p
= TRUE
;
15549 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
15550 ldr __GOTT_INDEX__ offsets. */
15551 if (!htab
->vxworks_p
)
15553 may_need_local_target_p
= TRUE
;
15556 else goto jump_over
;
15558 /* Fall through. */
15560 case R_ARM_MOVW_ABS_NC
:
15561 case R_ARM_MOVT_ABS
:
15562 case R_ARM_THM_MOVW_ABS_NC
:
15563 case R_ARM_THM_MOVT_ABS
:
15564 if (bfd_link_pic (info
))
15567 (_("%pB: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
15568 abfd
, elf32_arm_howto_table_1
[r_type
].name
,
15569 (h
) ? h
->root
.root
.string
: "a local symbol");
15570 bfd_set_error (bfd_error_bad_value
);
15574 /* Fall through. */
15576 case R_ARM_ABS32_NOI
:
15578 if (h
!= NULL
&& bfd_link_executable (info
))
15580 h
->pointer_equality_needed
= 1;
15582 /* Fall through. */
15584 case R_ARM_REL32_NOI
:
15585 case R_ARM_MOVW_PREL_NC
:
15586 case R_ARM_MOVT_PREL
:
15587 case R_ARM_THM_MOVW_PREL_NC
:
15588 case R_ARM_THM_MOVT_PREL
:
15590 /* Should the interworking branches be listed here? */
15591 if ((bfd_link_pic (info
) || htab
->root
.is_relocatable_executable
15593 && (sec
->flags
& SEC_ALLOC
) != 0)
15596 && elf32_arm_howto_from_type (r_type
)->pc_relative
)
15598 /* In shared libraries and relocatable executables,
15599 we treat local relative references as calls;
15600 see the related SYMBOL_CALLS_LOCAL code in
15601 allocate_dynrelocs. */
15602 call_reloc_p
= TRUE
;
15603 may_need_local_target_p
= TRUE
;
15606 /* We are creating a shared library or relocatable
15607 executable, and this is a reloc against a global symbol,
15608 or a non-PC-relative reloc against a local symbol.
15609 We may need to copy the reloc into the output. */
15610 may_become_dynamic_p
= TRUE
;
15613 may_need_local_target_p
= TRUE
;
15616 /* This relocation describes the C++ object vtable hierarchy.
15617 Reconstruct it for later use during GC. */
15618 case R_ARM_GNU_VTINHERIT
:
15619 if (!bfd_elf_gc_record_vtinherit (abfd
, sec
, h
, rel
->r_offset
))
15623 /* This relocation describes which C++ vtable entries are actually
15624 used. Record for later use during GC. */
15625 case R_ARM_GNU_VTENTRY
:
15626 if (!bfd_elf_gc_record_vtentry (abfd
, sec
, h
, rel
->r_offset
))
15634 /* We may need a .plt entry if the function this reloc
15635 refers to is in a different object, regardless of the
15636 symbol's type. We can't tell for sure yet, because
15637 something later might force the symbol local. */
15639 else if (may_need_local_target_p
)
15640 /* If this reloc is in a read-only section, we might
15641 need a copy reloc. We can't check reliably at this
15642 stage whether the section is read-only, as input
15643 sections have not yet been mapped to output sections.
15644 Tentatively set the flag for now, and correct in
15645 adjust_dynamic_symbol. */
15646 h
->non_got_ref
= 1;
15649 if (may_need_local_target_p
15650 && (h
!= NULL
|| ELF32_ST_TYPE (isym
->st_info
) == STT_GNU_IFUNC
))
15652 union gotplt_union
*root_plt
;
15653 struct arm_plt_info
*arm_plt
;
15654 struct arm_local_iplt_info
*local_iplt
;
15658 root_plt
= &h
->plt
;
15659 arm_plt
= &eh
->plt
;
15663 local_iplt
= elf32_arm_create_local_iplt (abfd
, r_symndx
);
15664 if (local_iplt
== NULL
)
15666 root_plt
= &local_iplt
->root
;
15667 arm_plt
= &local_iplt
->arm
;
15670 /* If the symbol is a function that doesn't bind locally,
15671 this relocation will need a PLT entry. */
15672 if (root_plt
->refcount
!= -1)
15673 root_plt
->refcount
+= 1;
15676 arm_plt
->noncall_refcount
++;
15678 /* It's too early to use htab->use_blx here, so we have to
15679 record possible blx references separately from
15680 relocs that definitely need a thumb stub. */
15682 if (r_type
== R_ARM_THM_CALL
)
15683 arm_plt
->maybe_thumb_refcount
+= 1;
15685 if (r_type
== R_ARM_THM_JUMP24
15686 || r_type
== R_ARM_THM_JUMP19
)
15687 arm_plt
->thumb_refcount
+= 1;
15690 if (may_become_dynamic_p
)
15692 struct elf_dyn_relocs
*p
, **head
;
15694 /* Create a reloc section in dynobj. */
15695 if (sreloc
== NULL
)
15697 sreloc
= _bfd_elf_make_dynamic_reloc_section
15698 (sec
, dynobj
, 2, abfd
, ! htab
->use_rel
);
15700 if (sreloc
== NULL
)
15703 /* BPABI objects never have dynamic relocations mapped. */
15704 if (htab
->symbian_p
)
15708 flags
= bfd_section_flags (sreloc
);
15709 flags
&= ~(SEC_LOAD
| SEC_ALLOC
);
15710 bfd_set_section_flags (sreloc
, flags
);
15714 /* If this is a global symbol, count the number of
15715 relocations we need for this symbol. */
15717 head
= &((struct elf32_arm_link_hash_entry
*) h
)->dyn_relocs
;
15720 head
= elf32_arm_get_local_dynreloc_list (abfd
, r_symndx
, isym
);
15726 if (p
== NULL
|| p
->sec
!= sec
)
15728 bfd_size_type amt
= sizeof *p
;
15730 p
= (struct elf_dyn_relocs
*) bfd_alloc (htab
->root
.dynobj
, amt
);
15740 if (elf32_arm_howto_from_type (r_type
)->pc_relative
)
15743 if (h
== NULL
&& htab
->fdpic_p
&& !bfd_link_pic(info
)
15744 && r_type
!= R_ARM_ABS32
&& r_type
!= R_ARM_ABS32_NOI
) {
15745 /* Here we only support R_ARM_ABS32 and R_ARM_ABS32_NOI
15746 that will become rofixup. */
15747 /* This is due to the fact that we suppose all will become rofixup. */
15748 fprintf(stderr
, "FDPIC does not yet support %d relocation to become dynamic for executable\n", r_type
);
15750 (_("FDPIC does not yet support %s relocation"
15751 " to become dynamic for executable"),
15752 elf32_arm_howto_table_1
[r_type
].name
);
15762 elf32_arm_update_relocs (asection
*o
,
15763 struct bfd_elf_section_reloc_data
*reldata
)
15765 void (*swap_in
) (bfd
*, const bfd_byte
*, Elf_Internal_Rela
*);
15766 void (*swap_out
) (bfd
*, const Elf_Internal_Rela
*, bfd_byte
*);
15767 const struct elf_backend_data
*bed
;
15768 _arm_elf_section_data
*eado
;
15769 struct bfd_link_order
*p
;
15770 bfd_byte
*erela_head
, *erela
;
15771 Elf_Internal_Rela
*irela_head
, *irela
;
15772 Elf_Internal_Shdr
*rel_hdr
;
15774 unsigned int count
;
15776 eado
= get_arm_elf_section_data (o
);
15778 if (!eado
|| eado
->elf
.this_hdr
.sh_type
!= SHT_ARM_EXIDX
)
15782 bed
= get_elf_backend_data (abfd
);
15783 rel_hdr
= reldata
->hdr
;
15785 if (rel_hdr
->sh_entsize
== bed
->s
->sizeof_rel
)
15787 swap_in
= bed
->s
->swap_reloc_in
;
15788 swap_out
= bed
->s
->swap_reloc_out
;
15790 else if (rel_hdr
->sh_entsize
== bed
->s
->sizeof_rela
)
15792 swap_in
= bed
->s
->swap_reloca_in
;
15793 swap_out
= bed
->s
->swap_reloca_out
;
15798 erela_head
= rel_hdr
->contents
;
15799 irela_head
= (Elf_Internal_Rela
*) bfd_zmalloc
15800 ((NUM_SHDR_ENTRIES (rel_hdr
) + 1) * sizeof (*irela_head
));
15802 erela
= erela_head
;
15803 irela
= irela_head
;
15806 for (p
= o
->map_head
.link_order
; p
; p
= p
->next
)
15808 if (p
->type
== bfd_section_reloc_link_order
15809 || p
->type
== bfd_symbol_reloc_link_order
)
15811 (*swap_in
) (abfd
, erela
, irela
);
15812 erela
+= rel_hdr
->sh_entsize
;
15816 else if (p
->type
== bfd_indirect_link_order
)
15818 struct bfd_elf_section_reloc_data
*input_reldata
;
15819 arm_unwind_table_edit
*edit_list
, *edit_tail
;
15820 _arm_elf_section_data
*eadi
;
15825 i
= p
->u
.indirect
.section
;
15827 eadi
= get_arm_elf_section_data (i
);
15828 edit_list
= eadi
->u
.exidx
.unwind_edit_list
;
15829 edit_tail
= eadi
->u
.exidx
.unwind_edit_tail
;
15830 offset
= i
->output_offset
;
15832 if (eadi
->elf
.rel
.hdr
&&
15833 eadi
->elf
.rel
.hdr
->sh_entsize
== rel_hdr
->sh_entsize
)
15834 input_reldata
= &eadi
->elf
.rel
;
15835 else if (eadi
->elf
.rela
.hdr
&&
15836 eadi
->elf
.rela
.hdr
->sh_entsize
== rel_hdr
->sh_entsize
)
15837 input_reldata
= &eadi
->elf
.rela
;
15843 for (j
= 0; j
< NUM_SHDR_ENTRIES (input_reldata
->hdr
); j
++)
15845 arm_unwind_table_edit
*edit_node
, *edit_next
;
15847 bfd_vma reloc_index
;
15849 (*swap_in
) (abfd
, erela
, irela
);
15850 reloc_index
= (irela
->r_offset
- offset
) / 8;
15853 edit_node
= edit_list
;
15854 for (edit_next
= edit_list
;
15855 edit_next
&& edit_next
->index
<= reloc_index
;
15856 edit_next
= edit_node
->next
)
15859 edit_node
= edit_next
;
15862 if (edit_node
->type
!= DELETE_EXIDX_ENTRY
15863 || edit_node
->index
!= reloc_index
)
15865 irela
->r_offset
-= bias
* 8;
15870 erela
+= rel_hdr
->sh_entsize
;
15873 if (edit_tail
->type
== INSERT_EXIDX_CANTUNWIND_AT_END
)
15875 /* New relocation entity. */
15876 asection
*text_sec
= edit_tail
->linked_section
;
15877 asection
*text_out
= text_sec
->output_section
;
15878 bfd_vma exidx_offset
= offset
+ i
->size
- 8;
15880 irela
->r_addend
= 0;
15881 irela
->r_offset
= exidx_offset
;
15882 irela
->r_info
= ELF32_R_INFO
15883 (text_out
->target_index
, R_ARM_PREL31
);
15890 for (j
= 0; j
< NUM_SHDR_ENTRIES (input_reldata
->hdr
); j
++)
15892 (*swap_in
) (abfd
, erela
, irela
);
15893 erela
+= rel_hdr
->sh_entsize
;
15897 count
+= NUM_SHDR_ENTRIES (input_reldata
->hdr
);
15902 reldata
->count
= count
;
15903 rel_hdr
->sh_size
= count
* rel_hdr
->sh_entsize
;
15905 erela
= erela_head
;
15906 irela
= irela_head
;
15909 (*swap_out
) (abfd
, irela
, erela
);
15910 erela
+= rel_hdr
->sh_entsize
;
15917 /* Hashes are no longer valid. */
15918 free (reldata
->hashes
);
15919 reldata
->hashes
= NULL
;
15922 /* Unwinding tables are not referenced directly. This pass marks them as
15923 required if the corresponding code section is marked. Similarly, ARMv8-M
15924 secure entry functions can only be referenced by SG veneers which are
15925 created after the GC process. They need to be marked in case they reside in
15926 their own section (as would be the case if code was compiled with
15927 -ffunction-sections). */
15930 elf32_arm_gc_mark_extra_sections (struct bfd_link_info
*info
,
15931 elf_gc_mark_hook_fn gc_mark_hook
)
15934 Elf_Internal_Shdr
**elf_shdrp
;
15935 asection
*cmse_sec
;
15936 obj_attribute
*out_attr
;
15937 Elf_Internal_Shdr
*symtab_hdr
;
15938 unsigned i
, sym_count
, ext_start
;
15939 const struct elf_backend_data
*bed
;
15940 struct elf_link_hash_entry
**sym_hashes
;
15941 struct elf32_arm_link_hash_entry
*cmse_hash
;
15942 bfd_boolean again
, is_v8m
, first_bfd_browse
= TRUE
;
15943 bfd_boolean debug_sec_need_to_be_marked
= FALSE
;
15946 _bfd_elf_gc_mark_extra_sections (info
, gc_mark_hook
);
15948 out_attr
= elf_known_obj_attributes_proc (info
->output_bfd
);
15949 is_v8m
= out_attr
[Tag_CPU_arch
].i
>= TAG_CPU_ARCH_V8M_BASE
15950 && out_attr
[Tag_CPU_arch_profile
].i
== 'M';
15952 /* Marking EH data may cause additional code sections to be marked,
15953 requiring multiple passes. */
15958 for (sub
= info
->input_bfds
; sub
!= NULL
; sub
= sub
->link
.next
)
15962 if (! is_arm_elf (sub
))
15965 elf_shdrp
= elf_elfsections (sub
);
15966 for (o
= sub
->sections
; o
!= NULL
; o
= o
->next
)
15968 Elf_Internal_Shdr
*hdr
;
15970 hdr
= &elf_section_data (o
)->this_hdr
;
15971 if (hdr
->sh_type
== SHT_ARM_EXIDX
15973 && hdr
->sh_link
< elf_numsections (sub
)
15975 && elf_shdrp
[hdr
->sh_link
]->bfd_section
->gc_mark
)
15978 if (!_bfd_elf_gc_mark (info
, o
, gc_mark_hook
))
15983 /* Mark section holding ARMv8-M secure entry functions. We mark all
15984 of them so no need for a second browsing. */
15985 if (is_v8m
&& first_bfd_browse
)
15987 sym_hashes
= elf_sym_hashes (sub
);
15988 bed
= get_elf_backend_data (sub
);
15989 symtab_hdr
= &elf_tdata (sub
)->symtab_hdr
;
15990 sym_count
= symtab_hdr
->sh_size
/ bed
->s
->sizeof_sym
;
15991 ext_start
= symtab_hdr
->sh_info
;
15993 /* Scan symbols. */
15994 for (i
= ext_start
; i
< sym_count
; i
++)
15996 cmse_hash
= elf32_arm_hash_entry (sym_hashes
[i
- ext_start
]);
15998 /* Assume it is a special symbol. If not, cmse_scan will
15999 warn about it and user can do something about it. */
16000 if (CONST_STRNEQ (cmse_hash
->root
.root
.root
.string
,
16003 cmse_sec
= cmse_hash
->root
.root
.u
.def
.section
;
16004 if (!cmse_sec
->gc_mark
16005 && !_bfd_elf_gc_mark (info
, cmse_sec
, gc_mark_hook
))
16007 /* The debug sections related to these secure entry
16008 functions are marked on enabling below flag. */
16009 debug_sec_need_to_be_marked
= TRUE
;
16013 if (debug_sec_need_to_be_marked
)
16015 /* Looping over all the sections of the object file containing
16016 Armv8-M secure entry functions and marking all the debug
16018 for (isec
= sub
->sections
; isec
!= NULL
; isec
= isec
->next
)
16020 /* If not a debug sections, skip it. */
16021 if (!isec
->gc_mark
&& (isec
->flags
& SEC_DEBUGGING
))
16022 isec
->gc_mark
= 1 ;
16024 debug_sec_need_to_be_marked
= FALSE
;
16028 first_bfd_browse
= FALSE
;
16034 /* Treat mapping symbols as special target symbols. */
16037 elf32_arm_is_target_special_symbol (bfd
* abfd ATTRIBUTE_UNUSED
, asymbol
* sym
)
16039 return bfd_is_arm_special_symbol_name (sym
->name
,
16040 BFD_ARM_SPECIAL_SYM_TYPE_ANY
);
16043 /* If the ELF symbol SYM might be a function in SEC, return the
16044 function size and set *CODE_OFF to the function's entry point,
16045 otherwise return zero. */
16047 static bfd_size_type
16048 elf32_arm_maybe_function_sym (const asymbol
*sym
, asection
*sec
,
16051 bfd_size_type size
;
16053 if ((sym
->flags
& (BSF_SECTION_SYM
| BSF_FILE
| BSF_OBJECT
16054 | BSF_THREAD_LOCAL
| BSF_RELC
| BSF_SRELC
)) != 0
16055 || sym
->section
!= sec
)
16058 if (!(sym
->flags
& BSF_SYNTHETIC
))
16059 switch (ELF_ST_TYPE (((elf_symbol_type
*) sym
)->internal_elf_sym
.st_info
))
16062 case STT_ARM_TFUNC
:
16069 if ((sym
->flags
& BSF_LOCAL
)
16070 && bfd_is_arm_special_symbol_name (sym
->name
,
16071 BFD_ARM_SPECIAL_SYM_TYPE_ANY
))
16074 *code_off
= sym
->value
;
16076 if (!(sym
->flags
& BSF_SYNTHETIC
))
16077 size
= ((elf_symbol_type
*) sym
)->internal_elf_sym
.st_size
;
16084 elf32_arm_find_inliner_info (bfd
* abfd
,
16085 const char ** filename_ptr
,
16086 const char ** functionname_ptr
,
16087 unsigned int * line_ptr
)
16090 found
= _bfd_dwarf2_find_inliner_info (abfd
, filename_ptr
,
16091 functionname_ptr
, line_ptr
,
16092 & elf_tdata (abfd
)->dwarf2_find_line_info
);
16096 /* Find dynamic relocs for H that apply to read-only sections. */
16099 readonly_dynrelocs (struct elf_link_hash_entry
*h
)
16101 struct elf_dyn_relocs
*p
;
16103 for (p
= elf32_arm_hash_entry (h
)->dyn_relocs
; p
!= NULL
; p
= p
->next
)
16105 asection
*s
= p
->sec
->output_section
;
16107 if (s
!= NULL
&& (s
->flags
& SEC_READONLY
) != 0)
16113 /* Adjust a symbol defined by a dynamic object and referenced by a
16114 regular object. The current definition is in some section of the
16115 dynamic object, but we're not including those sections. We have to
16116 change the definition to something the rest of the link can
16120 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info
* info
,
16121 struct elf_link_hash_entry
* h
)
16124 asection
*s
, *srel
;
16125 struct elf32_arm_link_hash_entry
* eh
;
16126 struct elf32_arm_link_hash_table
*globals
;
16128 globals
= elf32_arm_hash_table (info
);
16129 if (globals
== NULL
)
16132 dynobj
= elf_hash_table (info
)->dynobj
;
16134 /* Make sure we know what is going on here. */
16135 BFD_ASSERT (dynobj
!= NULL
16137 || h
->type
== STT_GNU_IFUNC
16141 && !h
->def_regular
)));
16143 eh
= (struct elf32_arm_link_hash_entry
*) h
;
16145 /* If this is a function, put it in the procedure linkage table. We
16146 will fill in the contents of the procedure linkage table later,
16147 when we know the address of the .got section. */
16148 if (h
->type
== STT_FUNC
|| h
->type
== STT_GNU_IFUNC
|| h
->needs_plt
)
16150 /* Calls to STT_GNU_IFUNC symbols always use a PLT, even if the
16151 symbol binds locally. */
16152 if (h
->plt
.refcount
<= 0
16153 || (h
->type
!= STT_GNU_IFUNC
16154 && (SYMBOL_CALLS_LOCAL (info
, h
)
16155 || (ELF_ST_VISIBILITY (h
->other
) != STV_DEFAULT
16156 && h
->root
.type
== bfd_link_hash_undefweak
))))
16158 /* This case can occur if we saw a PLT32 reloc in an input
16159 file, but the symbol was never referred to by a dynamic
16160 object, or if all references were garbage collected. In
16161 such a case, we don't actually need to build a procedure
16162 linkage table, and we can just do a PC24 reloc instead. */
16163 h
->plt
.offset
= (bfd_vma
) -1;
16164 eh
->plt
.thumb_refcount
= 0;
16165 eh
->plt
.maybe_thumb_refcount
= 0;
16166 eh
->plt
.noncall_refcount
= 0;
16174 /* It's possible that we incorrectly decided a .plt reloc was
16175 needed for an R_ARM_PC24 or similar reloc to a non-function sym
16176 in check_relocs. We can't decide accurately between function
16177 and non-function syms in check-relocs; Objects loaded later in
16178 the link may change h->type. So fix it now. */
16179 h
->plt
.offset
= (bfd_vma
) -1;
16180 eh
->plt
.thumb_refcount
= 0;
16181 eh
->plt
.maybe_thumb_refcount
= 0;
16182 eh
->plt
.noncall_refcount
= 0;
16185 /* If this is a weak symbol, and there is a real definition, the
16186 processor independent code will have arranged for us to see the
16187 real definition first, and we can just use the same value. */
16188 if (h
->is_weakalias
)
16190 struct elf_link_hash_entry
*def
= weakdef (h
);
16191 BFD_ASSERT (def
->root
.type
== bfd_link_hash_defined
);
16192 h
->root
.u
.def
.section
= def
->root
.u
.def
.section
;
16193 h
->root
.u
.def
.value
= def
->root
.u
.def
.value
;
16197 /* If there are no non-GOT references, we do not need a copy
16199 if (!h
->non_got_ref
)
16202 /* This is a reference to a symbol defined by a dynamic object which
16203 is not a function. */
16205 /* If we are creating a shared library, we must presume that the
16206 only references to the symbol are via the global offset table.
16207 For such cases we need not do anything here; the relocations will
16208 be handled correctly by relocate_section. Relocatable executables
16209 can reference data in shared objects directly, so we don't need to
16210 do anything here. */
16211 if (bfd_link_pic (info
) || globals
->root
.is_relocatable_executable
)
16214 /* We must allocate the symbol in our .dynbss section, which will
16215 become part of the .bss section of the executable. There will be
16216 an entry for this symbol in the .dynsym section. The dynamic
16217 object will contain position independent code, so all references
16218 from the dynamic object to this symbol will go through the global
16219 offset table. The dynamic linker will use the .dynsym entry to
16220 determine the address it must put in the global offset table, so
16221 both the dynamic object and the regular object will refer to the
16222 same memory location for the variable. */
16223 /* If allowed, we must generate a R_ARM_COPY reloc to tell the dynamic
16224 linker to copy the initial value out of the dynamic object and into
16225 the runtime process image. We need to remember the offset into the
16226 .rel(a).bss section we are going to use. */
16227 if ((h
->root
.u
.def
.section
->flags
& SEC_READONLY
) != 0)
16229 s
= globals
->root
.sdynrelro
;
16230 srel
= globals
->root
.sreldynrelro
;
16234 s
= globals
->root
.sdynbss
;
16235 srel
= globals
->root
.srelbss
;
16237 if (info
->nocopyreloc
== 0
16238 && (h
->root
.u
.def
.section
->flags
& SEC_ALLOC
) != 0
16241 elf32_arm_allocate_dynrelocs (info
, srel
, 1);
16245 return _bfd_elf_adjust_dynamic_copy (info
, h
, s
);
16248 /* Allocate space in .plt, .got and associated reloc sections for
16252 allocate_dynrelocs_for_symbol (struct elf_link_hash_entry
*h
, void * inf
)
16254 struct bfd_link_info
*info
;
16255 struct elf32_arm_link_hash_table
*htab
;
16256 struct elf32_arm_link_hash_entry
*eh
;
16257 struct elf_dyn_relocs
*p
;
16259 if (h
->root
.type
== bfd_link_hash_indirect
)
16262 eh
= (struct elf32_arm_link_hash_entry
*) h
;
16264 info
= (struct bfd_link_info
*) inf
;
16265 htab
= elf32_arm_hash_table (info
);
16269 if ((htab
->root
.dynamic_sections_created
|| h
->type
== STT_GNU_IFUNC
)
16270 && h
->plt
.refcount
> 0)
16272 /* Make sure this symbol is output as a dynamic symbol.
16273 Undefined weak syms won't yet be marked as dynamic. */
16274 if (h
->dynindx
== -1 && !h
->forced_local
16275 && h
->root
.type
== bfd_link_hash_undefweak
)
16277 if (! bfd_elf_link_record_dynamic_symbol (info
, h
))
16281 /* If the call in the PLT entry binds locally, the associated
16282 GOT entry should use an R_ARM_IRELATIVE relocation instead of
16283 the usual R_ARM_JUMP_SLOT. Put it in the .iplt section rather
16284 than the .plt section. */
16285 if (h
->type
== STT_GNU_IFUNC
&& SYMBOL_CALLS_LOCAL (info
, h
))
16288 if (eh
->plt
.noncall_refcount
== 0
16289 && SYMBOL_REFERENCES_LOCAL (info
, h
))
16290 /* All non-call references can be resolved directly.
16291 This means that they can (and in some cases, must)
16292 resolve directly to the run-time target, rather than
16293 to the PLT. That in turns means that any .got entry
16294 would be equal to the .igot.plt entry, so there's
16295 no point having both. */
16296 h
->got
.refcount
= 0;
16299 if (bfd_link_pic (info
)
16301 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h
))
16303 elf32_arm_allocate_plt_entry (info
, eh
->is_iplt
, &h
->plt
, &eh
->plt
);
16305 /* If this symbol is not defined in a regular file, and we are
16306 not generating a shared library, then set the symbol to this
16307 location in the .plt. This is required to make function
16308 pointers compare as equal between the normal executable and
16309 the shared library. */
16310 if (! bfd_link_pic (info
)
16311 && !h
->def_regular
)
16313 h
->root
.u
.def
.section
= htab
->root
.splt
;
16314 h
->root
.u
.def
.value
= h
->plt
.offset
;
16316 /* Make sure the function is not marked as Thumb, in case
16317 it is the target of an ABS32 relocation, which will
16318 point to the PLT entry. */
16319 ARM_SET_SYM_BRANCH_TYPE (h
->target_internal
, ST_BRANCH_TO_ARM
);
16322 /* VxWorks executables have a second set of relocations for
16323 each PLT entry. They go in a separate relocation section,
16324 which is processed by the kernel loader. */
16325 if (htab
->vxworks_p
&& !bfd_link_pic (info
))
16327 /* There is a relocation for the initial PLT entry:
16328 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
16329 if (h
->plt
.offset
== htab
->plt_header_size
)
16330 elf32_arm_allocate_dynrelocs (info
, htab
->srelplt2
, 1);
16332 /* There are two extra relocations for each subsequent
16333 PLT entry: an R_ARM_32 relocation for the GOT entry,
16334 and an R_ARM_32 relocation for the PLT entry. */
16335 elf32_arm_allocate_dynrelocs (info
, htab
->srelplt2
, 2);
16340 h
->plt
.offset
= (bfd_vma
) -1;
16346 h
->plt
.offset
= (bfd_vma
) -1;
16350 eh
= (struct elf32_arm_link_hash_entry
*) h
;
16351 eh
->tlsdesc_got
= (bfd_vma
) -1;
16353 if (h
->got
.refcount
> 0)
16357 int tls_type
= elf32_arm_hash_entry (h
)->tls_type
;
16360 /* Make sure this symbol is output as a dynamic symbol.
16361 Undefined weak syms won't yet be marked as dynamic. */
16362 if (htab
->root
.dynamic_sections_created
&& h
->dynindx
== -1 && !h
->forced_local
16363 && h
->root
.type
== bfd_link_hash_undefweak
)
16365 if (! bfd_elf_link_record_dynamic_symbol (info
, h
))
16369 if (!htab
->symbian_p
)
16371 s
= htab
->root
.sgot
;
16372 h
->got
.offset
= s
->size
;
16374 if (tls_type
== GOT_UNKNOWN
)
16377 if (tls_type
== GOT_NORMAL
)
16378 /* Non-TLS symbols need one GOT slot. */
16382 if (tls_type
& GOT_TLS_GDESC
)
16384 /* R_ARM_TLS_DESC needs 2 GOT slots. */
16386 = (htab
->root
.sgotplt
->size
16387 - elf32_arm_compute_jump_table_size (htab
));
16388 htab
->root
.sgotplt
->size
+= 8;
16389 h
->got
.offset
= (bfd_vma
) -2;
16390 /* plt.got_offset needs to know there's a TLS_DESC
16391 reloc in the middle of .got.plt. */
16392 htab
->num_tls_desc
++;
16395 if (tls_type
& GOT_TLS_GD
)
16397 /* R_ARM_TLS_GD32 and R_ARM_TLS_GD32_FDPIC need two
16398 consecutive GOT slots. If the symbol is both GD
16399 and GDESC, got.offset may have been
16401 h
->got
.offset
= s
->size
;
16405 if (tls_type
& GOT_TLS_IE
)
16406 /* R_ARM_TLS_IE32/R_ARM_TLS_IE32_FDPIC need one GOT
16411 dyn
= htab
->root
.dynamic_sections_created
;
16414 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn
,
16415 bfd_link_pic (info
),
16417 && (!bfd_link_pic (info
)
16418 || !SYMBOL_REFERENCES_LOCAL (info
, h
)))
16421 if (tls_type
!= GOT_NORMAL
16422 && (bfd_link_dll (info
) || indx
!= 0)
16423 && (ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
16424 || h
->root
.type
!= bfd_link_hash_undefweak
))
16426 if (tls_type
& GOT_TLS_IE
)
16427 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
16429 if (tls_type
& GOT_TLS_GD
)
16430 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
16432 if (tls_type
& GOT_TLS_GDESC
)
16434 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelplt
, 1);
16435 /* GDESC needs a trampoline to jump to. */
16436 htab
->tls_trampoline
= -1;
16439 /* Only GD needs it. GDESC just emits one relocation per
16441 if ((tls_type
& GOT_TLS_GD
) && indx
!= 0)
16442 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
16444 else if (((indx
!= -1) || htab
->fdpic_p
)
16445 && !SYMBOL_REFERENCES_LOCAL (info
, h
))
16447 if (htab
->root
.dynamic_sections_created
)
16448 /* Reserve room for the GOT entry's R_ARM_GLOB_DAT relocation. */
16449 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
16451 else if (h
->type
== STT_GNU_IFUNC
16452 && eh
->plt
.noncall_refcount
== 0)
16453 /* No non-call references resolve the STT_GNU_IFUNC's PLT entry;
16454 they all resolve dynamically instead. Reserve room for the
16455 GOT entry's R_ARM_IRELATIVE relocation. */
16456 elf32_arm_allocate_irelocs (info
, htab
->root
.srelgot
, 1);
16457 else if (bfd_link_pic (info
)
16458 && (ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
16459 || h
->root
.type
!= bfd_link_hash_undefweak
))
16460 /* Reserve room for the GOT entry's R_ARM_RELATIVE relocation. */
16461 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
16462 else if (htab
->fdpic_p
&& tls_type
== GOT_NORMAL
)
16463 /* Reserve room for rofixup for FDPIC executable. */
16464 /* TLS relocs do not need space since they are completely
16466 htab
->srofixup
->size
+= 4;
16470 h
->got
.offset
= (bfd_vma
) -1;
16472 /* FDPIC support. */
16473 if (eh
->fdpic_cnts
.gotofffuncdesc_cnt
> 0)
16475 /* Symbol musn't be exported. */
16476 if (h
->dynindx
!= -1)
16479 /* We only allocate one function descriptor with its associated relocation. */
16480 if (eh
->fdpic_cnts
.funcdesc_offset
== -1)
16482 asection
*s
= htab
->root
.sgot
;
16484 eh
->fdpic_cnts
.funcdesc_offset
= s
->size
;
16486 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16487 if (bfd_link_pic(info
))
16488 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
16490 htab
->srofixup
->size
+= 8;
16494 if (eh
->fdpic_cnts
.gotfuncdesc_cnt
> 0)
16496 asection
*s
= htab
->root
.sgot
;
16498 if (htab
->root
.dynamic_sections_created
&& h
->dynindx
== -1
16499 && !h
->forced_local
)
16500 if (! bfd_elf_link_record_dynamic_symbol (info
, h
))
16503 if (h
->dynindx
== -1)
16505 /* We only allocate one function descriptor with its associated relocation. q */
16506 if (eh
->fdpic_cnts
.funcdesc_offset
== -1)
16509 eh
->fdpic_cnts
.funcdesc_offset
= s
->size
;
16511 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16512 if (bfd_link_pic(info
))
16513 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
16515 htab
->srofixup
->size
+= 8;
16519 /* Add one entry into the GOT and a R_ARM_FUNCDESC or
16520 R_ARM_RELATIVE/rofixup relocation on it. */
16521 eh
->fdpic_cnts
.gotfuncdesc_offset
= s
->size
;
16523 if (h
->dynindx
== -1 && !bfd_link_pic(info
))
16524 htab
->srofixup
->size
+= 4;
16526 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
16529 if (eh
->fdpic_cnts
.funcdesc_cnt
> 0)
16531 if (htab
->root
.dynamic_sections_created
&& h
->dynindx
== -1
16532 && !h
->forced_local
)
16533 if (! bfd_elf_link_record_dynamic_symbol (info
, h
))
16536 if (h
->dynindx
== -1)
16538 /* We only allocate one function descriptor with its associated relocation. */
16539 if (eh
->fdpic_cnts
.funcdesc_offset
== -1)
16541 asection
*s
= htab
->root
.sgot
;
16543 eh
->fdpic_cnts
.funcdesc_offset
= s
->size
;
16545 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16546 if (bfd_link_pic(info
))
16547 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
16549 htab
->srofixup
->size
+= 8;
16552 if (h
->dynindx
== -1 && !bfd_link_pic(info
))
16554 /* For FDPIC executable we replace R_ARM_RELATIVE with a rofixup. */
16555 htab
->srofixup
->size
+= 4 * eh
->fdpic_cnts
.funcdesc_cnt
;
16559 /* Will need one dynamic reloc per reference. will be either
16560 R_ARM_FUNCDESC or R_ARM_RELATIVE for hidden symbols. */
16561 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
,
16562 eh
->fdpic_cnts
.funcdesc_cnt
);
16566 /* Allocate stubs for exported Thumb functions on v4t. */
16567 if (!htab
->use_blx
&& h
->dynindx
!= -1
16569 && ARM_GET_SYM_BRANCH_TYPE (h
->target_internal
) == ST_BRANCH_TO_THUMB
16570 && ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
)
16572 struct elf_link_hash_entry
* th
;
16573 struct bfd_link_hash_entry
* bh
;
16574 struct elf_link_hash_entry
* myh
;
16578 /* Create a new symbol to regist the real location of the function. */
16579 s
= h
->root
.u
.def
.section
;
16580 sprintf (name
, "__real_%s", h
->root
.root
.string
);
16581 _bfd_generic_link_add_one_symbol (info
, s
->owner
,
16582 name
, BSF_GLOBAL
, s
,
16583 h
->root
.u
.def
.value
,
16584 NULL
, TRUE
, FALSE
, &bh
);
16586 myh
= (struct elf_link_hash_entry
*) bh
;
16587 myh
->type
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
16588 myh
->forced_local
= 1;
16589 ARM_SET_SYM_BRANCH_TYPE (myh
->target_internal
, ST_BRANCH_TO_THUMB
);
16590 eh
->export_glue
= myh
;
16591 th
= record_arm_to_thumb_glue (info
, h
);
16592 /* Point the symbol at the stub. */
16593 h
->type
= ELF_ST_INFO (ELF_ST_BIND (h
->type
), STT_FUNC
);
16594 ARM_SET_SYM_BRANCH_TYPE (h
->target_internal
, ST_BRANCH_TO_ARM
);
16595 h
->root
.u
.def
.section
= th
->root
.u
.def
.section
;
16596 h
->root
.u
.def
.value
= th
->root
.u
.def
.value
& ~1;
16599 if (eh
->dyn_relocs
== NULL
)
16602 /* In the shared -Bsymbolic case, discard space allocated for
16603 dynamic pc-relative relocs against symbols which turn out to be
16604 defined in regular objects. For the normal shared case, discard
16605 space for pc-relative relocs that have become local due to symbol
16606 visibility changes. */
16608 if (bfd_link_pic (info
) || htab
->root
.is_relocatable_executable
|| htab
->fdpic_p
)
16610 /* Relocs that use pc_count are PC-relative forms, which will appear
16611 on something like ".long foo - ." or "movw REG, foo - .". We want
16612 calls to protected symbols to resolve directly to the function
16613 rather than going via the plt. If people want function pointer
16614 comparisons to work as expected then they should avoid writing
16615 assembly like ".long foo - .". */
16616 if (SYMBOL_CALLS_LOCAL (info
, h
))
16618 struct elf_dyn_relocs
**pp
;
16620 for (pp
= &eh
->dyn_relocs
; (p
= *pp
) != NULL
; )
16622 p
->count
-= p
->pc_count
;
16631 if (htab
->vxworks_p
)
16633 struct elf_dyn_relocs
**pp
;
16635 for (pp
= &eh
->dyn_relocs
; (p
= *pp
) != NULL
; )
16637 if (strcmp (p
->sec
->output_section
->name
, ".tls_vars") == 0)
16644 /* Also discard relocs on undefined weak syms with non-default
16646 if (eh
->dyn_relocs
!= NULL
16647 && h
->root
.type
== bfd_link_hash_undefweak
)
16649 if (ELF_ST_VISIBILITY (h
->other
) != STV_DEFAULT
16650 || UNDEFWEAK_NO_DYNAMIC_RELOC (info
, h
))
16651 eh
->dyn_relocs
= NULL
;
16653 /* Make sure undefined weak symbols are output as a dynamic
16655 else if (htab
->root
.dynamic_sections_created
&& h
->dynindx
== -1
16656 && !h
->forced_local
)
16658 if (! bfd_elf_link_record_dynamic_symbol (info
, h
))
16663 else if (htab
->root
.is_relocatable_executable
&& h
->dynindx
== -1
16664 && h
->root
.type
== bfd_link_hash_new
)
16666 /* Output absolute symbols so that we can create relocations
16667 against them. For normal symbols we output a relocation
16668 against the section that contains them. */
16669 if (! bfd_elf_link_record_dynamic_symbol (info
, h
))
16676 /* For the non-shared case, discard space for relocs against
16677 symbols which turn out to need copy relocs or are not
16680 if (!h
->non_got_ref
16681 && ((h
->def_dynamic
16682 && !h
->def_regular
)
16683 || (htab
->root
.dynamic_sections_created
16684 && (h
->root
.type
== bfd_link_hash_undefweak
16685 || h
->root
.type
== bfd_link_hash_undefined
))))
16687 /* Make sure this symbol is output as a dynamic symbol.
16688 Undefined weak syms won't yet be marked as dynamic. */
16689 if (h
->dynindx
== -1 && !h
->forced_local
16690 && h
->root
.type
== bfd_link_hash_undefweak
)
16692 if (! bfd_elf_link_record_dynamic_symbol (info
, h
))
16696 /* If that succeeded, we know we'll be keeping all the
16698 if (h
->dynindx
!= -1)
16702 eh
->dyn_relocs
= NULL
;
16707 /* Finally, allocate space. */
16708 for (p
= eh
->dyn_relocs
; p
!= NULL
; p
= p
->next
)
16710 asection
*sreloc
= elf_section_data (p
->sec
)->sreloc
;
16712 if (h
->type
== STT_GNU_IFUNC
16713 && eh
->plt
.noncall_refcount
== 0
16714 && SYMBOL_REFERENCES_LOCAL (info
, h
))
16715 elf32_arm_allocate_irelocs (info
, sreloc
, p
->count
);
16716 else if (h
->dynindx
!= -1 && (!bfd_link_pic(info
) || !info
->symbolic
|| !h
->def_regular
))
16717 elf32_arm_allocate_dynrelocs (info
, sreloc
, p
->count
);
16718 else if (htab
->fdpic_p
&& !bfd_link_pic(info
))
16719 htab
->srofixup
->size
+= 4 * p
->count
;
16721 elf32_arm_allocate_dynrelocs (info
, sreloc
, p
->count
);
16727 /* Set DF_TEXTREL if we find any dynamic relocs that apply to
16728 read-only sections. */
16731 maybe_set_textrel (struct elf_link_hash_entry
*h
, void *info_p
)
16735 if (h
->root
.type
== bfd_link_hash_indirect
)
16738 sec
= readonly_dynrelocs (h
);
16741 struct bfd_link_info
*info
= (struct bfd_link_info
*) info_p
;
16743 info
->flags
|= DF_TEXTREL
;
16744 info
->callbacks
->minfo
16745 (_("%pB: dynamic relocation against `%pT' in read-only section `%pA'\n"),
16746 sec
->owner
, h
->root
.root
.string
, sec
);
16748 /* Not an error, just cut short the traversal. */
16756 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info
*info
,
16759 struct elf32_arm_link_hash_table
*globals
;
16761 globals
= elf32_arm_hash_table (info
);
16762 if (globals
== NULL
)
16765 globals
->byteswap_code
= byteswap_code
;
16768 /* Set the sizes of the dynamic sections. */
16771 elf32_arm_size_dynamic_sections (bfd
* output_bfd ATTRIBUTE_UNUSED
,
16772 struct bfd_link_info
* info
)
16777 bfd_boolean relocs
;
16779 struct elf32_arm_link_hash_table
*htab
;
16781 htab
= elf32_arm_hash_table (info
);
16785 dynobj
= elf_hash_table (info
)->dynobj
;
16786 BFD_ASSERT (dynobj
!= NULL
);
16787 check_use_blx (htab
);
16789 if (elf_hash_table (info
)->dynamic_sections_created
)
16791 /* Set the contents of the .interp section to the interpreter. */
16792 if (bfd_link_executable (info
) && !info
->nointerp
)
16794 s
= bfd_get_linker_section (dynobj
, ".interp");
16795 BFD_ASSERT (s
!= NULL
);
16796 s
->size
= sizeof ELF_DYNAMIC_INTERPRETER
;
16797 s
->contents
= (unsigned char *) ELF_DYNAMIC_INTERPRETER
;
16801 /* Set up .got offsets for local syms, and space for local dynamic
16803 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link
.next
)
16805 bfd_signed_vma
*local_got
;
16806 bfd_signed_vma
*end_local_got
;
16807 struct arm_local_iplt_info
**local_iplt_ptr
, *local_iplt
;
16808 char *local_tls_type
;
16809 bfd_vma
*local_tlsdesc_gotent
;
16810 bfd_size_type locsymcount
;
16811 Elf_Internal_Shdr
*symtab_hdr
;
16813 bfd_boolean is_vxworks
= htab
->vxworks_p
;
16814 unsigned int symndx
;
16815 struct fdpic_local
*local_fdpic_cnts
;
16817 if (! is_arm_elf (ibfd
))
16820 for (s
= ibfd
->sections
; s
!= NULL
; s
= s
->next
)
16822 struct elf_dyn_relocs
*p
;
16824 for (p
= (struct elf_dyn_relocs
*)
16825 elf_section_data (s
)->local_dynrel
; p
!= NULL
; p
= p
->next
)
16827 if (!bfd_is_abs_section (p
->sec
)
16828 && bfd_is_abs_section (p
->sec
->output_section
))
16830 /* Input section has been discarded, either because
16831 it is a copy of a linkonce section or due to
16832 linker script /DISCARD/, so we'll be discarding
16835 else if (is_vxworks
16836 && strcmp (p
->sec
->output_section
->name
,
16839 /* Relocations in vxworks .tls_vars sections are
16840 handled specially by the loader. */
16842 else if (p
->count
!= 0)
16844 srel
= elf_section_data (p
->sec
)->sreloc
;
16845 if (htab
->fdpic_p
&& !bfd_link_pic(info
))
16846 htab
->srofixup
->size
+= 4 * p
->count
;
16848 elf32_arm_allocate_dynrelocs (info
, srel
, p
->count
);
16849 if ((p
->sec
->output_section
->flags
& SEC_READONLY
) != 0)
16850 info
->flags
|= DF_TEXTREL
;
16855 local_got
= elf_local_got_refcounts (ibfd
);
16859 symtab_hdr
= & elf_symtab_hdr (ibfd
);
16860 locsymcount
= symtab_hdr
->sh_info
;
16861 end_local_got
= local_got
+ locsymcount
;
16862 local_iplt_ptr
= elf32_arm_local_iplt (ibfd
);
16863 local_tls_type
= elf32_arm_local_got_tls_type (ibfd
);
16864 local_tlsdesc_gotent
= elf32_arm_local_tlsdesc_gotent (ibfd
);
16865 local_fdpic_cnts
= elf32_arm_local_fdpic_cnts (ibfd
);
16867 s
= htab
->root
.sgot
;
16868 srel
= htab
->root
.srelgot
;
16869 for (; local_got
< end_local_got
;
16870 ++local_got
, ++local_iplt_ptr
, ++local_tls_type
,
16871 ++local_tlsdesc_gotent
, ++symndx
, ++local_fdpic_cnts
)
16873 *local_tlsdesc_gotent
= (bfd_vma
) -1;
16874 local_iplt
= *local_iplt_ptr
;
16876 /* FDPIC support. */
16877 if (local_fdpic_cnts
->gotofffuncdesc_cnt
> 0)
16879 if (local_fdpic_cnts
->funcdesc_offset
== -1)
16881 local_fdpic_cnts
->funcdesc_offset
= s
->size
;
16884 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16885 if (bfd_link_pic(info
))
16886 elf32_arm_allocate_dynrelocs (info
, srel
, 1);
16888 htab
->srofixup
->size
+= 8;
16892 if (local_fdpic_cnts
->funcdesc_cnt
> 0)
16894 if (local_fdpic_cnts
->funcdesc_offset
== -1)
16896 local_fdpic_cnts
->funcdesc_offset
= s
->size
;
16899 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16900 if (bfd_link_pic(info
))
16901 elf32_arm_allocate_dynrelocs (info
, srel
, 1);
16903 htab
->srofixup
->size
+= 8;
16906 /* We will add n R_ARM_RELATIVE relocations or n rofixups. */
16907 if (bfd_link_pic(info
))
16908 elf32_arm_allocate_dynrelocs (info
, srel
, local_fdpic_cnts
->funcdesc_cnt
);
16910 htab
->srofixup
->size
+= 4 * local_fdpic_cnts
->funcdesc_cnt
;
16913 if (local_iplt
!= NULL
)
16915 struct elf_dyn_relocs
*p
;
16917 if (local_iplt
->root
.refcount
> 0)
16919 elf32_arm_allocate_plt_entry (info
, TRUE
,
16922 if (local_iplt
->arm
.noncall_refcount
== 0)
16923 /* All references to the PLT are calls, so all
16924 non-call references can resolve directly to the
16925 run-time target. This means that the .got entry
16926 would be the same as the .igot.plt entry, so there's
16927 no point creating both. */
16932 BFD_ASSERT (local_iplt
->arm
.noncall_refcount
== 0);
16933 local_iplt
->root
.offset
= (bfd_vma
) -1;
16936 for (p
= local_iplt
->dyn_relocs
; p
!= NULL
; p
= p
->next
)
16940 psrel
= elf_section_data (p
->sec
)->sreloc
;
16941 if (local_iplt
->arm
.noncall_refcount
== 0)
16942 elf32_arm_allocate_irelocs (info
, psrel
, p
->count
);
16944 elf32_arm_allocate_dynrelocs (info
, psrel
, p
->count
);
16947 if (*local_got
> 0)
16949 Elf_Internal_Sym
*isym
;
16951 *local_got
= s
->size
;
16952 if (*local_tls_type
& GOT_TLS_GD
)
16953 /* TLS_GD relocs need an 8-byte structure in the GOT. */
16955 if (*local_tls_type
& GOT_TLS_GDESC
)
16957 *local_tlsdesc_gotent
= htab
->root
.sgotplt
->size
16958 - elf32_arm_compute_jump_table_size (htab
);
16959 htab
->root
.sgotplt
->size
+= 8;
16960 *local_got
= (bfd_vma
) -2;
16961 /* plt.got_offset needs to know there's a TLS_DESC
16962 reloc in the middle of .got.plt. */
16963 htab
->num_tls_desc
++;
16965 if (*local_tls_type
& GOT_TLS_IE
)
16968 if (*local_tls_type
& GOT_NORMAL
)
16970 /* If the symbol is both GD and GDESC, *local_got
16971 may have been overwritten. */
16972 *local_got
= s
->size
;
16976 isym
= bfd_sym_from_r_symndx (&htab
->sym_cache
, ibfd
, symndx
);
16980 /* If all references to an STT_GNU_IFUNC PLT are calls,
16981 then all non-call references, including this GOT entry,
16982 resolve directly to the run-time target. */
16983 if (ELF32_ST_TYPE (isym
->st_info
) == STT_GNU_IFUNC
16984 && (local_iplt
== NULL
16985 || local_iplt
->arm
.noncall_refcount
== 0))
16986 elf32_arm_allocate_irelocs (info
, srel
, 1);
16987 else if (bfd_link_pic (info
) || output_bfd
->flags
& DYNAMIC
|| htab
->fdpic_p
)
16989 if ((bfd_link_pic (info
) && !(*local_tls_type
& GOT_TLS_GDESC
)))
16990 elf32_arm_allocate_dynrelocs (info
, srel
, 1);
16991 else if (htab
->fdpic_p
&& *local_tls_type
& GOT_NORMAL
)
16992 htab
->srofixup
->size
+= 4;
16994 if ((bfd_link_pic (info
) || htab
->fdpic_p
)
16995 && *local_tls_type
& GOT_TLS_GDESC
)
16997 elf32_arm_allocate_dynrelocs (info
,
16998 htab
->root
.srelplt
, 1);
16999 htab
->tls_trampoline
= -1;
17004 *local_got
= (bfd_vma
) -1;
17008 if (htab
->tls_ldm_got
.refcount
> 0)
17010 /* Allocate two GOT entries and one dynamic relocation (if necessary)
17011 for R_ARM_TLS_LDM32/R_ARM_TLS_LDM32_FDPIC relocations. */
17012 htab
->tls_ldm_got
.offset
= htab
->root
.sgot
->size
;
17013 htab
->root
.sgot
->size
+= 8;
17014 if (bfd_link_pic (info
))
17015 elf32_arm_allocate_dynrelocs (info
, htab
->root
.srelgot
, 1);
17018 htab
->tls_ldm_got
.offset
= -1;
17020 /* At the very end of the .rofixup section is a pointer to the GOT,
17021 reserve space for it. */
17022 if (htab
->fdpic_p
&& htab
->srofixup
!= NULL
)
17023 htab
->srofixup
->size
+= 4;
17025 /* Allocate global sym .plt and .got entries, and space for global
17026 sym dynamic relocs. */
17027 elf_link_hash_traverse (& htab
->root
, allocate_dynrelocs_for_symbol
, info
);
17029 /* Here we rummage through the found bfds to collect glue information. */
17030 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link
.next
)
17032 if (! is_arm_elf (ibfd
))
17035 /* Initialise mapping tables for code/data. */
17036 bfd_elf32_arm_init_maps (ibfd
);
17038 if (!bfd_elf32_arm_process_before_allocation (ibfd
, info
)
17039 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd
, info
)
17040 || !bfd_elf32_arm_stm32l4xx_erratum_scan (ibfd
, info
))
17041 _bfd_error_handler (_("errors encountered processing file %pB"), ibfd
);
17044 /* Allocate space for the glue sections now that we've sized them. */
17045 bfd_elf32_arm_allocate_interworking_sections (info
);
17047 /* For every jump slot reserved in the sgotplt, reloc_count is
17048 incremented. However, when we reserve space for TLS descriptors,
17049 it's not incremented, so in order to compute the space reserved
17050 for them, it suffices to multiply the reloc count by the jump
17052 if (htab
->root
.srelplt
)
17053 htab
->sgotplt_jump_table_size
= elf32_arm_compute_jump_table_size(htab
);
17055 if (htab
->tls_trampoline
)
17057 if (htab
->root
.splt
->size
== 0)
17058 htab
->root
.splt
->size
+= htab
->plt_header_size
;
17060 htab
->tls_trampoline
= htab
->root
.splt
->size
;
17061 htab
->root
.splt
->size
+= htab
->plt_entry_size
;
17063 /* If we're not using lazy TLS relocations, don't generate the
17064 PLT and GOT entries they require. */
17065 if (!(info
->flags
& DF_BIND_NOW
))
17067 htab
->dt_tlsdesc_got
= htab
->root
.sgot
->size
;
17068 htab
->root
.sgot
->size
+= 4;
17070 htab
->dt_tlsdesc_plt
= htab
->root
.splt
->size
;
17071 htab
->root
.splt
->size
+= 4 * ARRAY_SIZE (dl_tlsdesc_lazy_trampoline
);
17075 /* The check_relocs and adjust_dynamic_symbol entry points have
17076 determined the sizes of the various dynamic sections. Allocate
17077 memory for them. */
17080 for (s
= dynobj
->sections
; s
!= NULL
; s
= s
->next
)
17084 if ((s
->flags
& SEC_LINKER_CREATED
) == 0)
17087 /* It's OK to base decisions on the section name, because none
17088 of the dynobj section names depend upon the input files. */
17089 name
= bfd_section_name (s
);
17091 if (s
== htab
->root
.splt
)
17093 /* Remember whether there is a PLT. */
17094 plt
= s
->size
!= 0;
17096 else if (CONST_STRNEQ (name
, ".rel"))
17100 /* Remember whether there are any reloc sections other
17101 than .rel(a).plt and .rela.plt.unloaded. */
17102 if (s
!= htab
->root
.srelplt
&& s
!= htab
->srelplt2
)
17105 /* We use the reloc_count field as a counter if we need
17106 to copy relocs into the output file. */
17107 s
->reloc_count
= 0;
17110 else if (s
!= htab
->root
.sgot
17111 && s
!= htab
->root
.sgotplt
17112 && s
!= htab
->root
.iplt
17113 && s
!= htab
->root
.igotplt
17114 && s
!= htab
->root
.sdynbss
17115 && s
!= htab
->root
.sdynrelro
17116 && s
!= htab
->srofixup
)
17118 /* It's not one of our sections, so don't allocate space. */
17124 /* If we don't need this section, strip it from the
17125 output file. This is mostly to handle .rel(a).bss and
17126 .rel(a).plt. We must create both sections in
17127 create_dynamic_sections, because they must be created
17128 before the linker maps input sections to output
17129 sections. The linker does that before
17130 adjust_dynamic_symbol is called, and it is that
17131 function which decides whether anything needs to go
17132 into these sections. */
17133 s
->flags
|= SEC_EXCLUDE
;
17137 if ((s
->flags
& SEC_HAS_CONTENTS
) == 0)
17140 /* Allocate memory for the section contents. */
17141 s
->contents
= (unsigned char *) bfd_zalloc (dynobj
, s
->size
);
17142 if (s
->contents
== NULL
)
17146 if (elf_hash_table (info
)->dynamic_sections_created
)
17148 /* Add some entries to the .dynamic section. We fill in the
17149 values later, in elf32_arm_finish_dynamic_sections, but we
17150 must add the entries now so that we get the correct size for
17151 the .dynamic section. The DT_DEBUG entry is filled in by the
17152 dynamic linker and used by the debugger. */
17153 #define add_dynamic_entry(TAG, VAL) \
17154 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
17156 if (bfd_link_executable (info
))
17158 if (!add_dynamic_entry (DT_DEBUG
, 0))
17164 if ( !add_dynamic_entry (DT_PLTGOT
, 0)
17165 || !add_dynamic_entry (DT_PLTRELSZ
, 0)
17166 || !add_dynamic_entry (DT_PLTREL
,
17167 htab
->use_rel
? DT_REL
: DT_RELA
)
17168 || !add_dynamic_entry (DT_JMPREL
, 0))
17171 if (htab
->dt_tlsdesc_plt
17172 && (!add_dynamic_entry (DT_TLSDESC_PLT
,0)
17173 || !add_dynamic_entry (DT_TLSDESC_GOT
,0)))
17181 if (!add_dynamic_entry (DT_REL
, 0)
17182 || !add_dynamic_entry (DT_RELSZ
, 0)
17183 || !add_dynamic_entry (DT_RELENT
, RELOC_SIZE (htab
)))
17188 if (!add_dynamic_entry (DT_RELA
, 0)
17189 || !add_dynamic_entry (DT_RELASZ
, 0)
17190 || !add_dynamic_entry (DT_RELAENT
, RELOC_SIZE (htab
)))
17195 /* If any dynamic relocs apply to a read-only section,
17196 then we need a DT_TEXTREL entry. */
17197 if ((info
->flags
& DF_TEXTREL
) == 0)
17198 elf_link_hash_traverse (&htab
->root
, maybe_set_textrel
, info
);
17200 if ((info
->flags
& DF_TEXTREL
) != 0)
17202 if (!add_dynamic_entry (DT_TEXTREL
, 0))
17205 if (htab
->vxworks_p
17206 && !elf_vxworks_add_dynamic_entries (output_bfd
, info
))
17209 #undef add_dynamic_entry
17214 /* Size sections even though they're not dynamic. We use it to setup
17215 _TLS_MODULE_BASE_, if needed. */
17218 elf32_arm_always_size_sections (bfd
*output_bfd
,
17219 struct bfd_link_info
*info
)
17222 struct elf32_arm_link_hash_table
*htab
;
17224 htab
= elf32_arm_hash_table (info
);
17226 if (bfd_link_relocatable (info
))
17229 tls_sec
= elf_hash_table (info
)->tls_sec
;
17233 struct elf_link_hash_entry
*tlsbase
;
17235 tlsbase
= elf_link_hash_lookup
17236 (elf_hash_table (info
), "_TLS_MODULE_BASE_", TRUE
, TRUE
, FALSE
);
17240 struct bfd_link_hash_entry
*bh
= NULL
;
17241 const struct elf_backend_data
*bed
17242 = get_elf_backend_data (output_bfd
);
17244 if (!(_bfd_generic_link_add_one_symbol
17245 (info
, output_bfd
, "_TLS_MODULE_BASE_", BSF_LOCAL
,
17246 tls_sec
, 0, NULL
, FALSE
,
17247 bed
->collect
, &bh
)))
17250 tlsbase
->type
= STT_TLS
;
17251 tlsbase
= (struct elf_link_hash_entry
*)bh
;
17252 tlsbase
->def_regular
= 1;
17253 tlsbase
->other
= STV_HIDDEN
;
17254 (*bed
->elf_backend_hide_symbol
) (info
, tlsbase
, TRUE
);
17258 if (htab
->fdpic_p
&& !bfd_link_relocatable (info
)
17259 && !bfd_elf_stack_segment_size (output_bfd
, info
,
17260 "__stacksize", DEFAULT_STACK_SIZE
))
17266 /* Finish up dynamic symbol handling. We set the contents of various
17267 dynamic sections here. */
17270 elf32_arm_finish_dynamic_symbol (bfd
* output_bfd
,
17271 struct bfd_link_info
* info
,
17272 struct elf_link_hash_entry
* h
,
17273 Elf_Internal_Sym
* sym
)
17275 struct elf32_arm_link_hash_table
*htab
;
17276 struct elf32_arm_link_hash_entry
*eh
;
17278 htab
= elf32_arm_hash_table (info
);
17282 eh
= (struct elf32_arm_link_hash_entry
*) h
;
17284 if (h
->plt
.offset
!= (bfd_vma
) -1)
17288 BFD_ASSERT (h
->dynindx
!= -1);
17289 if (! elf32_arm_populate_plt_entry (output_bfd
, info
, &h
->plt
, &eh
->plt
,
17294 if (!h
->def_regular
)
17296 /* Mark the symbol as undefined, rather than as defined in
17297 the .plt section. */
17298 sym
->st_shndx
= SHN_UNDEF
;
17299 /* If the symbol is weak we need to clear the value.
17300 Otherwise, the PLT entry would provide a definition for
17301 the symbol even if the symbol wasn't defined anywhere,
17302 and so the symbol would never be NULL. Leave the value if
17303 there were any relocations where pointer equality matters
17304 (this is a clue for the dynamic linker, to make function
17305 pointer comparisons work between an application and shared
17307 if (!h
->ref_regular_nonweak
|| !h
->pointer_equality_needed
)
17310 else if (eh
->is_iplt
&& eh
->plt
.noncall_refcount
!= 0)
17312 /* At least one non-call relocation references this .iplt entry,
17313 so the .iplt entry is the function's canonical address. */
17314 sym
->st_info
= ELF_ST_INFO (ELF_ST_BIND (sym
->st_info
), STT_FUNC
);
17315 ARM_SET_SYM_BRANCH_TYPE (sym
->st_target_internal
, ST_BRANCH_TO_ARM
);
17316 sym
->st_shndx
= (_bfd_elf_section_from_bfd_section
17317 (output_bfd
, htab
->root
.iplt
->output_section
));
17318 sym
->st_value
= (h
->plt
.offset
17319 + htab
->root
.iplt
->output_section
->vma
17320 + htab
->root
.iplt
->output_offset
);
17327 Elf_Internal_Rela rel
;
17329 /* This symbol needs a copy reloc. Set it up. */
17330 BFD_ASSERT (h
->dynindx
!= -1
17331 && (h
->root
.type
== bfd_link_hash_defined
17332 || h
->root
.type
== bfd_link_hash_defweak
));
17335 rel
.r_offset
= (h
->root
.u
.def
.value
17336 + h
->root
.u
.def
.section
->output_section
->vma
17337 + h
->root
.u
.def
.section
->output_offset
);
17338 rel
.r_info
= ELF32_R_INFO (h
->dynindx
, R_ARM_COPY
);
17339 if (h
->root
.u
.def
.section
== htab
->root
.sdynrelro
)
17340 s
= htab
->root
.sreldynrelro
;
17342 s
= htab
->root
.srelbss
;
17343 elf32_arm_add_dynreloc (output_bfd
, info
, s
, &rel
);
17346 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
17347 and for FDPIC, the _GLOBAL_OFFSET_TABLE_ symbol is not absolute:
17348 it is relative to the ".got" section. */
17349 if (h
== htab
->root
.hdynamic
17350 || (!htab
->fdpic_p
&& !htab
->vxworks_p
&& h
== htab
->root
.hgot
))
17351 sym
->st_shndx
= SHN_ABS
;
17357 arm_put_trampoline (struct elf32_arm_link_hash_table
*htab
, bfd
*output_bfd
,
17359 const unsigned long *template, unsigned count
)
17363 for (ix
= 0; ix
!= count
; ix
++)
17365 unsigned long insn
= template[ix
];
17367 /* Emit mov pc,rx if bx is not permitted. */
17368 if (htab
->fix_v4bx
== 1 && (insn
& 0x0ffffff0) == 0x012fff10)
17369 insn
= (insn
& 0xf000000f) | 0x01a0f000;
17370 put_arm_insn (htab
, output_bfd
, insn
, (char *)contents
+ ix
*4);
17374 /* Install the special first PLT entry for elf32-arm-nacl. Unlike
17375 other variants, NaCl needs this entry in a static executable's
17376 .iplt too. When we're handling that case, GOT_DISPLACEMENT is
17377 zero. For .iplt really only the last bundle is useful, and .iplt
17378 could have a shorter first entry, with each individual PLT entry's
17379 relative branch calculated differently so it targets the last
17380 bundle instead of the instruction before it (labelled .Lplt_tail
17381 above). But it's simpler to keep the size and layout of PLT0
17382 consistent with the dynamic case, at the cost of some dead code at
17383 the start of .iplt and the one dead store to the stack at the start
17386 arm_nacl_put_plt0 (struct elf32_arm_link_hash_table
*htab
, bfd
*output_bfd
,
17387 asection
*plt
, bfd_vma got_displacement
)
17391 put_arm_insn (htab
, output_bfd
,
17392 elf32_arm_nacl_plt0_entry
[0]
17393 | arm_movw_immediate (got_displacement
),
17394 plt
->contents
+ 0);
17395 put_arm_insn (htab
, output_bfd
,
17396 elf32_arm_nacl_plt0_entry
[1]
17397 | arm_movt_immediate (got_displacement
),
17398 plt
->contents
+ 4);
17400 for (i
= 2; i
< ARRAY_SIZE (elf32_arm_nacl_plt0_entry
); ++i
)
17401 put_arm_insn (htab
, output_bfd
,
17402 elf32_arm_nacl_plt0_entry
[i
],
17403 plt
->contents
+ (i
* 4));
17406 /* Finish up the dynamic sections. */
17409 elf32_arm_finish_dynamic_sections (bfd
* output_bfd
, struct bfd_link_info
* info
)
17414 struct elf32_arm_link_hash_table
*htab
;
17416 htab
= elf32_arm_hash_table (info
);
17420 dynobj
= elf_hash_table (info
)->dynobj
;
17422 sgot
= htab
->root
.sgotplt
;
17423 /* A broken linker script might have discarded the dynamic sections.
17424 Catch this here so that we do not seg-fault later on. */
17425 if (sgot
!= NULL
&& bfd_is_abs_section (sgot
->output_section
))
17427 sdyn
= bfd_get_linker_section (dynobj
, ".dynamic");
17429 if (elf_hash_table (info
)->dynamic_sections_created
)
17432 Elf32_External_Dyn
*dyncon
, *dynconend
;
17434 splt
= htab
->root
.splt
;
17435 BFD_ASSERT (splt
!= NULL
&& sdyn
!= NULL
);
17436 BFD_ASSERT (htab
->symbian_p
|| sgot
!= NULL
);
17438 dyncon
= (Elf32_External_Dyn
*) sdyn
->contents
;
17439 dynconend
= (Elf32_External_Dyn
*) (sdyn
->contents
+ sdyn
->size
);
17441 for (; dyncon
< dynconend
; dyncon
++)
17443 Elf_Internal_Dyn dyn
;
17447 bfd_elf32_swap_dyn_in (dynobj
, dyncon
, &dyn
);
17454 if (htab
->vxworks_p
17455 && elf_vxworks_finish_dynamic_entry (output_bfd
, &dyn
))
17456 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
17461 goto get_vma_if_bpabi
;
17464 goto get_vma_if_bpabi
;
17467 goto get_vma_if_bpabi
;
17469 name
= ".gnu.version";
17470 goto get_vma_if_bpabi
;
17472 name
= ".gnu.version_d";
17473 goto get_vma_if_bpabi
;
17475 name
= ".gnu.version_r";
17476 goto get_vma_if_bpabi
;
17479 name
= htab
->symbian_p
? ".got" : ".got.plt";
17482 name
= RELOC_SECTION (htab
, ".plt");
17484 s
= bfd_get_linker_section (dynobj
, name
);
17488 (_("could not find section %s"), name
);
17489 bfd_set_error (bfd_error_invalid_operation
);
17492 if (!htab
->symbian_p
)
17493 dyn
.d_un
.d_ptr
= s
->output_section
->vma
+ s
->output_offset
;
17495 /* In the BPABI, tags in the PT_DYNAMIC section point
17496 at the file offset, not the memory address, for the
17497 convenience of the post linker. */
17498 dyn
.d_un
.d_ptr
= s
->output_section
->filepos
+ s
->output_offset
;
17499 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
17503 if (htab
->symbian_p
)
17508 s
= htab
->root
.srelplt
;
17509 BFD_ASSERT (s
!= NULL
);
17510 dyn
.d_un
.d_val
= s
->size
;
17511 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
17518 /* In the BPABI, the DT_REL tag must point at the file
17519 offset, not the VMA, of the first relocation
17520 section. So, we use code similar to that in
17521 elflink.c, but do not check for SHF_ALLOC on the
17522 relocation section, since relocation sections are
17523 never allocated under the BPABI. PLT relocs are also
17525 if (htab
->symbian_p
)
17528 type
= ((dyn
.d_tag
== DT_REL
|| dyn
.d_tag
== DT_RELSZ
)
17529 ? SHT_REL
: SHT_RELA
);
17530 dyn
.d_un
.d_val
= 0;
17531 for (i
= 1; i
< elf_numsections (output_bfd
); i
++)
17533 Elf_Internal_Shdr
*hdr
17534 = elf_elfsections (output_bfd
)[i
];
17535 if (hdr
->sh_type
== type
)
17537 if (dyn
.d_tag
== DT_RELSZ
17538 || dyn
.d_tag
== DT_RELASZ
)
17539 dyn
.d_un
.d_val
+= hdr
->sh_size
;
17540 else if ((ufile_ptr
) hdr
->sh_offset
17541 <= dyn
.d_un
.d_val
- 1)
17542 dyn
.d_un
.d_val
= hdr
->sh_offset
;
17545 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
17549 case DT_TLSDESC_PLT
:
17550 s
= htab
->root
.splt
;
17551 dyn
.d_un
.d_ptr
= (s
->output_section
->vma
+ s
->output_offset
17552 + htab
->dt_tlsdesc_plt
);
17553 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
17556 case DT_TLSDESC_GOT
:
17557 s
= htab
->root
.sgot
;
17558 dyn
.d_un
.d_ptr
= (s
->output_section
->vma
+ s
->output_offset
17559 + htab
->dt_tlsdesc_got
);
17560 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
17563 /* Set the bottom bit of DT_INIT/FINI if the
17564 corresponding function is Thumb. */
17566 name
= info
->init_function
;
17569 name
= info
->fini_function
;
17571 /* If it wasn't set by elf_bfd_final_link
17572 then there is nothing to adjust. */
17573 if (dyn
.d_un
.d_val
!= 0)
17575 struct elf_link_hash_entry
* eh
;
17577 eh
= elf_link_hash_lookup (elf_hash_table (info
), name
,
17578 FALSE
, FALSE
, TRUE
);
17580 && ARM_GET_SYM_BRANCH_TYPE (eh
->target_internal
)
17581 == ST_BRANCH_TO_THUMB
)
17583 dyn
.d_un
.d_val
|= 1;
17584 bfd_elf32_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
17591 /* Fill in the first entry in the procedure linkage table. */
17592 if (splt
->size
> 0 && htab
->plt_header_size
)
17594 const bfd_vma
*plt0_entry
;
17595 bfd_vma got_address
, plt_address
, got_displacement
;
17597 /* Calculate the addresses of the GOT and PLT. */
17598 got_address
= sgot
->output_section
->vma
+ sgot
->output_offset
;
17599 plt_address
= splt
->output_section
->vma
+ splt
->output_offset
;
17601 if (htab
->vxworks_p
)
17603 /* The VxWorks GOT is relocated by the dynamic linker.
17604 Therefore, we must emit relocations rather than simply
17605 computing the values now. */
17606 Elf_Internal_Rela rel
;
17608 plt0_entry
= elf32_arm_vxworks_exec_plt0_entry
;
17609 put_arm_insn (htab
, output_bfd
, plt0_entry
[0],
17610 splt
->contents
+ 0);
17611 put_arm_insn (htab
, output_bfd
, plt0_entry
[1],
17612 splt
->contents
+ 4);
17613 put_arm_insn (htab
, output_bfd
, plt0_entry
[2],
17614 splt
->contents
+ 8);
17615 bfd_put_32 (output_bfd
, got_address
, splt
->contents
+ 12);
17617 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
17618 rel
.r_offset
= plt_address
+ 12;
17619 rel
.r_info
= ELF32_R_INFO (htab
->root
.hgot
->indx
, R_ARM_ABS32
);
17621 SWAP_RELOC_OUT (htab
) (output_bfd
, &rel
,
17622 htab
->srelplt2
->contents
);
17624 else if (htab
->nacl_p
)
17625 arm_nacl_put_plt0 (htab
, output_bfd
, splt
,
17626 got_address
+ 8 - (plt_address
+ 16));
17627 else if (using_thumb_only (htab
))
17629 got_displacement
= got_address
- (plt_address
+ 12);
17631 plt0_entry
= elf32_thumb2_plt0_entry
;
17632 put_arm_insn (htab
, output_bfd
, plt0_entry
[0],
17633 splt
->contents
+ 0);
17634 put_arm_insn (htab
, output_bfd
, plt0_entry
[1],
17635 splt
->contents
+ 4);
17636 put_arm_insn (htab
, output_bfd
, plt0_entry
[2],
17637 splt
->contents
+ 8);
17639 bfd_put_32 (output_bfd
, got_displacement
, splt
->contents
+ 12);
17643 got_displacement
= got_address
- (plt_address
+ 16);
17645 plt0_entry
= elf32_arm_plt0_entry
;
17646 put_arm_insn (htab
, output_bfd
, plt0_entry
[0],
17647 splt
->contents
+ 0);
17648 put_arm_insn (htab
, output_bfd
, plt0_entry
[1],
17649 splt
->contents
+ 4);
17650 put_arm_insn (htab
, output_bfd
, plt0_entry
[2],
17651 splt
->contents
+ 8);
17652 put_arm_insn (htab
, output_bfd
, plt0_entry
[3],
17653 splt
->contents
+ 12);
17655 #ifdef FOUR_WORD_PLT
17656 /* The displacement value goes in the otherwise-unused
17657 last word of the second entry. */
17658 bfd_put_32 (output_bfd
, got_displacement
, splt
->contents
+ 28);
17660 bfd_put_32 (output_bfd
, got_displacement
, splt
->contents
+ 16);
17665 /* UnixWare sets the entsize of .plt to 4, although that doesn't
17666 really seem like the right value. */
17667 if (splt
->output_section
->owner
== output_bfd
)
17668 elf_section_data (splt
->output_section
)->this_hdr
.sh_entsize
= 4;
17670 if (htab
->dt_tlsdesc_plt
)
17672 bfd_vma got_address
17673 = sgot
->output_section
->vma
+ sgot
->output_offset
;
17674 bfd_vma gotplt_address
= (htab
->root
.sgot
->output_section
->vma
17675 + htab
->root
.sgot
->output_offset
);
17676 bfd_vma plt_address
17677 = splt
->output_section
->vma
+ splt
->output_offset
;
17679 arm_put_trampoline (htab
, output_bfd
,
17680 splt
->contents
+ htab
->dt_tlsdesc_plt
,
17681 dl_tlsdesc_lazy_trampoline
, 6);
17683 bfd_put_32 (output_bfd
,
17684 gotplt_address
+ htab
->dt_tlsdesc_got
17685 - (plt_address
+ htab
->dt_tlsdesc_plt
)
17686 - dl_tlsdesc_lazy_trampoline
[6],
17687 splt
->contents
+ htab
->dt_tlsdesc_plt
+ 24);
17688 bfd_put_32 (output_bfd
,
17689 got_address
- (plt_address
+ htab
->dt_tlsdesc_plt
)
17690 - dl_tlsdesc_lazy_trampoline
[7],
17691 splt
->contents
+ htab
->dt_tlsdesc_plt
+ 24 + 4);
17694 if (htab
->tls_trampoline
)
17696 arm_put_trampoline (htab
, output_bfd
,
17697 splt
->contents
+ htab
->tls_trampoline
,
17698 tls_trampoline
, 3);
17699 #ifdef FOUR_WORD_PLT
17700 bfd_put_32 (output_bfd
, 0x00000000,
17701 splt
->contents
+ htab
->tls_trampoline
+ 12);
17705 if (htab
->vxworks_p
17706 && !bfd_link_pic (info
)
17707 && htab
->root
.splt
->size
> 0)
17709 /* Correct the .rel(a).plt.unloaded relocations. They will have
17710 incorrect symbol indexes. */
17714 num_plts
= ((htab
->root
.splt
->size
- htab
->plt_header_size
)
17715 / htab
->plt_entry_size
);
17716 p
= htab
->srelplt2
->contents
+ RELOC_SIZE (htab
);
17718 for (; num_plts
; num_plts
--)
17720 Elf_Internal_Rela rel
;
17722 SWAP_RELOC_IN (htab
) (output_bfd
, p
, &rel
);
17723 rel
.r_info
= ELF32_R_INFO (htab
->root
.hgot
->indx
, R_ARM_ABS32
);
17724 SWAP_RELOC_OUT (htab
) (output_bfd
, &rel
, p
);
17725 p
+= RELOC_SIZE (htab
);
17727 SWAP_RELOC_IN (htab
) (output_bfd
, p
, &rel
);
17728 rel
.r_info
= ELF32_R_INFO (htab
->root
.hplt
->indx
, R_ARM_ABS32
);
17729 SWAP_RELOC_OUT (htab
) (output_bfd
, &rel
, p
);
17730 p
+= RELOC_SIZE (htab
);
17735 if (htab
->nacl_p
&& htab
->root
.iplt
!= NULL
&& htab
->root
.iplt
->size
> 0)
17736 /* NaCl uses a special first entry in .iplt too. */
17737 arm_nacl_put_plt0 (htab
, output_bfd
, htab
->root
.iplt
, 0);
17739 /* Fill in the first three entries in the global offset table. */
17742 if (sgot
->size
> 0)
17745 bfd_put_32 (output_bfd
, (bfd_vma
) 0, sgot
->contents
);
17747 bfd_put_32 (output_bfd
,
17748 sdyn
->output_section
->vma
+ sdyn
->output_offset
,
17750 bfd_put_32 (output_bfd
, (bfd_vma
) 0, sgot
->contents
+ 4);
17751 bfd_put_32 (output_bfd
, (bfd_vma
) 0, sgot
->contents
+ 8);
17754 elf_section_data (sgot
->output_section
)->this_hdr
.sh_entsize
= 4;
17757 /* At the very end of the .rofixup section is a pointer to the GOT. */
17758 if (htab
->fdpic_p
&& htab
->srofixup
!= NULL
)
17760 struct elf_link_hash_entry
*hgot
= htab
->root
.hgot
;
17762 bfd_vma got_value
= hgot
->root
.u
.def
.value
17763 + hgot
->root
.u
.def
.section
->output_section
->vma
17764 + hgot
->root
.u
.def
.section
->output_offset
;
17766 arm_elf_add_rofixup(output_bfd
, htab
->srofixup
, got_value
);
17768 /* Make sure we allocated and generated the same number of fixups. */
17769 BFD_ASSERT (htab
->srofixup
->reloc_count
* 4 == htab
->srofixup
->size
);
17776 elf32_arm_init_file_header (bfd
*abfd
, struct bfd_link_info
*link_info
)
17778 Elf_Internal_Ehdr
* i_ehdrp
; /* ELF file header, internal form. */
17779 struct elf32_arm_link_hash_table
*globals
;
17780 struct elf_segment_map
*m
;
17782 if (!_bfd_elf_init_file_header (abfd
, link_info
))
17785 i_ehdrp
= elf_elfheader (abfd
);
17787 if (EF_ARM_EABI_VERSION (i_ehdrp
->e_flags
) == EF_ARM_EABI_UNKNOWN
)
17788 i_ehdrp
->e_ident
[EI_OSABI
] = ELFOSABI_ARM
;
17789 i_ehdrp
->e_ident
[EI_ABIVERSION
] = ARM_ELF_ABI_VERSION
;
17793 globals
= elf32_arm_hash_table (link_info
);
17794 if (globals
!= NULL
&& globals
->byteswap_code
)
17795 i_ehdrp
->e_flags
|= EF_ARM_BE8
;
17797 if (globals
->fdpic_p
)
17798 i_ehdrp
->e_ident
[EI_OSABI
] |= ELFOSABI_ARM_FDPIC
;
17801 if (EF_ARM_EABI_VERSION (i_ehdrp
->e_flags
) == EF_ARM_EABI_VER5
17802 && ((i_ehdrp
->e_type
== ET_DYN
) || (i_ehdrp
->e_type
== ET_EXEC
)))
17804 int abi
= bfd_elf_get_obj_attr_int (abfd
, OBJ_ATTR_PROC
, Tag_ABI_VFP_args
);
17805 if (abi
== AEABI_VFP_args_vfp
)
17806 i_ehdrp
->e_flags
|= EF_ARM_ABI_FLOAT_HARD
;
17808 i_ehdrp
->e_flags
|= EF_ARM_ABI_FLOAT_SOFT
;
17811 /* Scan segment to set p_flags attribute if it contains only sections with
17812 SHF_ARM_PURECODE flag. */
17813 for (m
= elf_seg_map (abfd
); m
!= NULL
; m
= m
->next
)
17819 for (j
= 0; j
< m
->count
; j
++)
17821 if (!(elf_section_flags (m
->sections
[j
]) & SHF_ARM_PURECODE
))
17827 m
->p_flags_valid
= 1;
17833 static enum elf_reloc_type_class
17834 elf32_arm_reloc_type_class (const struct bfd_link_info
*info ATTRIBUTE_UNUSED
,
17835 const asection
*rel_sec ATTRIBUTE_UNUSED
,
17836 const Elf_Internal_Rela
*rela
)
17838 switch ((int) ELF32_R_TYPE (rela
->r_info
))
17840 case R_ARM_RELATIVE
:
17841 return reloc_class_relative
;
17842 case R_ARM_JUMP_SLOT
:
17843 return reloc_class_plt
;
17845 return reloc_class_copy
;
17846 case R_ARM_IRELATIVE
:
17847 return reloc_class_ifunc
;
17849 return reloc_class_normal
;
17854 arm_final_write_processing (bfd
*abfd
)
17856 bfd_arm_update_notes (abfd
, ARM_NOTE_SECTION
);
17860 elf32_arm_final_write_processing (bfd
*abfd
)
17862 arm_final_write_processing (abfd
);
17863 return _bfd_elf_final_write_processing (abfd
);
17866 /* Return TRUE if this is an unwinding table entry. */
17869 is_arm_elf_unwind_section_name (bfd
* abfd ATTRIBUTE_UNUSED
, const char * name
)
17871 return (CONST_STRNEQ (name
, ELF_STRING_ARM_unwind
)
17872 || CONST_STRNEQ (name
, ELF_STRING_ARM_unwind_once
));
17876 /* Set the type and flags for an ARM section. We do this by
17877 the section name, which is a hack, but ought to work. */
17880 elf32_arm_fake_sections (bfd
* abfd
, Elf_Internal_Shdr
* hdr
, asection
* sec
)
17884 name
= bfd_section_name (sec
);
17886 if (is_arm_elf_unwind_section_name (abfd
, name
))
17888 hdr
->sh_type
= SHT_ARM_EXIDX
;
17889 hdr
->sh_flags
|= SHF_LINK_ORDER
;
17892 if (sec
->flags
& SEC_ELF_PURECODE
)
17893 hdr
->sh_flags
|= SHF_ARM_PURECODE
;
17898 /* Handle an ARM specific section when reading an object file. This is
17899 called when bfd_section_from_shdr finds a section with an unknown
17903 elf32_arm_section_from_shdr (bfd
*abfd
,
17904 Elf_Internal_Shdr
* hdr
,
17908 /* There ought to be a place to keep ELF backend specific flags, but
17909 at the moment there isn't one. We just keep track of the
17910 sections by their name, instead. Fortunately, the ABI gives
17911 names for all the ARM specific sections, so we will probably get
17913 switch (hdr
->sh_type
)
17915 case SHT_ARM_EXIDX
:
17916 case SHT_ARM_PREEMPTMAP
:
17917 case SHT_ARM_ATTRIBUTES
:
17924 if (! _bfd_elf_make_section_from_shdr (abfd
, hdr
, name
, shindex
))
17930 static _arm_elf_section_data
*
17931 get_arm_elf_section_data (asection
* sec
)
17933 if (sec
&& sec
->owner
&& is_arm_elf (sec
->owner
))
17934 return elf32_arm_section_data (sec
);
17942 struct bfd_link_info
*info
;
17945 int (*func
) (void *, const char *, Elf_Internal_Sym
*,
17946 asection
*, struct elf_link_hash_entry
*);
17947 } output_arch_syminfo
;
17949 enum map_symbol_type
17957 /* Output a single mapping symbol. */
17960 elf32_arm_output_map_sym (output_arch_syminfo
*osi
,
17961 enum map_symbol_type type
,
17964 static const char *names
[3] = {"$a", "$t", "$d"};
17965 Elf_Internal_Sym sym
;
17967 sym
.st_value
= osi
->sec
->output_section
->vma
17968 + osi
->sec
->output_offset
17972 sym
.st_info
= ELF_ST_INFO (STB_LOCAL
, STT_NOTYPE
);
17973 sym
.st_shndx
= osi
->sec_shndx
;
17974 sym
.st_target_internal
= 0;
17975 elf32_arm_section_map_add (osi
->sec
, names
[type
][1], offset
);
17976 return osi
->func (osi
->flaginfo
, names
[type
], &sym
, osi
->sec
, NULL
) == 1;
17979 /* Output mapping symbols for the PLT entry described by ROOT_PLT and ARM_PLT.
17980 IS_IPLT_ENTRY_P says whether the PLT is in .iplt rather than .plt. */
17983 elf32_arm_output_plt_map_1 (output_arch_syminfo
*osi
,
17984 bfd_boolean is_iplt_entry_p
,
17985 union gotplt_union
*root_plt
,
17986 struct arm_plt_info
*arm_plt
)
17988 struct elf32_arm_link_hash_table
*htab
;
17989 bfd_vma addr
, plt_header_size
;
17991 if (root_plt
->offset
== (bfd_vma
) -1)
17994 htab
= elf32_arm_hash_table (osi
->info
);
17998 if (is_iplt_entry_p
)
18000 osi
->sec
= htab
->root
.iplt
;
18001 plt_header_size
= 0;
18005 osi
->sec
= htab
->root
.splt
;
18006 plt_header_size
= htab
->plt_header_size
;
18008 osi
->sec_shndx
= (_bfd_elf_section_from_bfd_section
18009 (osi
->info
->output_bfd
, osi
->sec
->output_section
));
18011 addr
= root_plt
->offset
& -2;
18012 if (htab
->symbian_p
)
18014 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_ARM
, addr
))
18016 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_DATA
, addr
+ 4))
18019 else if (htab
->vxworks_p
)
18021 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_ARM
, addr
))
18023 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_DATA
, addr
+ 8))
18025 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_ARM
, addr
+ 12))
18027 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_DATA
, addr
+ 20))
18030 else if (htab
->nacl_p
)
18032 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_ARM
, addr
))
18035 else if (htab
->fdpic_p
)
18037 enum map_symbol_type type
= using_thumb_only(htab
)
18041 if (elf32_arm_plt_needs_thumb_stub_p (osi
->info
, arm_plt
))
18042 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_THUMB
, addr
- 4))
18044 if (!elf32_arm_output_map_sym (osi
, type
, addr
))
18046 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_DATA
, addr
+ 16))
18048 if (htab
->plt_entry_size
== 4 * ARRAY_SIZE(elf32_arm_fdpic_plt_entry
))
18049 if (!elf32_arm_output_map_sym (osi
, type
, addr
+ 24))
18052 else if (using_thumb_only (htab
))
18054 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_THUMB
, addr
))
18059 bfd_boolean thumb_stub_p
;
18061 thumb_stub_p
= elf32_arm_plt_needs_thumb_stub_p (osi
->info
, arm_plt
);
18064 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_THUMB
, addr
- 4))
18067 #ifdef FOUR_WORD_PLT
18068 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_ARM
, addr
))
18070 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_DATA
, addr
+ 12))
18073 /* A three-word PLT with no Thumb thunk contains only Arm code,
18074 so only need to output a mapping symbol for the first PLT entry and
18075 entries with thumb thunks. */
18076 if (thumb_stub_p
|| addr
== plt_header_size
)
18078 if (!elf32_arm_output_map_sym (osi
, ARM_MAP_ARM
, addr
))
18087 /* Output mapping symbols for PLT entries associated with H. */
18090 elf32_arm_output_plt_map (struct elf_link_hash_entry
*h
, void *inf
)
18092 output_arch_syminfo
*osi
= (output_arch_syminfo
*) inf
;
18093 struct elf32_arm_link_hash_entry
*eh
;
18095 if (h
->root
.type
== bfd_link_hash_indirect
)
18098 if (h
->root
.type
== bfd_link_hash_warning
)
18099 /* When warning symbols are created, they **replace** the "real"
18100 entry in the hash table, thus we never get to see the real
18101 symbol in a hash traversal. So look at it now. */
18102 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
18104 eh
= (struct elf32_arm_link_hash_entry
*) h
;
18105 return elf32_arm_output_plt_map_1 (osi
, SYMBOL_CALLS_LOCAL (osi
->info
, h
),
18106 &h
->plt
, &eh
->plt
);
18109 /* Bind a veneered symbol to its veneer identified by its hash entry
18110 STUB_ENTRY. The veneered location thus loose its symbol. */
18113 arm_stub_claim_sym (struct elf32_arm_stub_hash_entry
*stub_entry
)
18115 struct elf32_arm_link_hash_entry
*hash
= stub_entry
->h
;
18118 hash
->root
.root
.u
.def
.section
= stub_entry
->stub_sec
;
18119 hash
->root
.root
.u
.def
.value
= stub_entry
->stub_offset
;
18120 hash
->root
.size
= stub_entry
->stub_size
;
18123 /* Output a single local symbol for a generated stub. */
18126 elf32_arm_output_stub_sym (output_arch_syminfo
*osi
, const char *name
,
18127 bfd_vma offset
, bfd_vma size
)
18129 Elf_Internal_Sym sym
;
18131 sym
.st_value
= osi
->sec
->output_section
->vma
18132 + osi
->sec
->output_offset
18134 sym
.st_size
= size
;
18136 sym
.st_info
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
18137 sym
.st_shndx
= osi
->sec_shndx
;
18138 sym
.st_target_internal
= 0;
18139 return osi
->func (osi
->flaginfo
, name
, &sym
, osi
->sec
, NULL
) == 1;
18143 arm_map_one_stub (struct bfd_hash_entry
* gen_entry
,
18146 struct elf32_arm_stub_hash_entry
*stub_entry
;
18147 asection
*stub_sec
;
18150 output_arch_syminfo
*osi
;
18151 const insn_sequence
*template_sequence
;
18152 enum stub_insn_type prev_type
;
18155 enum map_symbol_type sym_type
;
18157 /* Massage our args to the form they really have. */
18158 stub_entry
= (struct elf32_arm_stub_hash_entry
*) gen_entry
;
18159 osi
= (output_arch_syminfo
*) in_arg
;
18161 stub_sec
= stub_entry
->stub_sec
;
18163 /* Ensure this stub is attached to the current section being
18165 if (stub_sec
!= osi
->sec
)
18168 addr
= (bfd_vma
) stub_entry
->stub_offset
;
18169 template_sequence
= stub_entry
->stub_template
;
18171 if (arm_stub_sym_claimed (stub_entry
->stub_type
))
18172 arm_stub_claim_sym (stub_entry
);
18175 stub_name
= stub_entry
->output_name
;
18176 switch (template_sequence
[0].type
)
18179 if (!elf32_arm_output_stub_sym (osi
, stub_name
, addr
,
18180 stub_entry
->stub_size
))
18185 if (!elf32_arm_output_stub_sym (osi
, stub_name
, addr
| 1,
18186 stub_entry
->stub_size
))
18195 prev_type
= DATA_TYPE
;
18197 for (i
= 0; i
< stub_entry
->stub_template_size
; i
++)
18199 switch (template_sequence
[i
].type
)
18202 sym_type
= ARM_MAP_ARM
;
18207 sym_type
= ARM_MAP_THUMB
;
18211 sym_type
= ARM_MAP_DATA
;
18219 if (template_sequence
[i
].type
!= prev_type
)
18221 prev_type
= template_sequence
[i
].type
;
18222 if (!elf32_arm_output_map_sym (osi
, sym_type
, addr
+ size
))
18226 switch (template_sequence
[i
].type
)
18250 /* Output mapping symbols for linker generated sections,
18251 and for those data-only sections that do not have a
18255 elf32_arm_output_arch_local_syms (bfd
*output_bfd
,
18256 struct bfd_link_info
*info
,
18258 int (*func
) (void *, const char *,
18259 Elf_Internal_Sym
*,
18261 struct elf_link_hash_entry
*))
18263 output_arch_syminfo osi
;
18264 struct elf32_arm_link_hash_table
*htab
;
18266 bfd_size_type size
;
18269 htab
= elf32_arm_hash_table (info
);
18273 check_use_blx (htab
);
18275 osi
.flaginfo
= flaginfo
;
18279 /* Add a $d mapping symbol to data-only sections that
18280 don't have any mapping symbol. This may result in (harmless) redundant
18281 mapping symbols. */
18282 for (input_bfd
= info
->input_bfds
;
18284 input_bfd
= input_bfd
->link
.next
)
18286 if ((input_bfd
->flags
& (BFD_LINKER_CREATED
| HAS_SYMS
)) == HAS_SYMS
)
18287 for (osi
.sec
= input_bfd
->sections
;
18289 osi
.sec
= osi
.sec
->next
)
18291 if (osi
.sec
->output_section
!= NULL
18292 && ((osi
.sec
->output_section
->flags
& (SEC_ALLOC
| SEC_CODE
))
18294 && (osi
.sec
->flags
& (SEC_HAS_CONTENTS
| SEC_LINKER_CREATED
))
18295 == SEC_HAS_CONTENTS
18296 && get_arm_elf_section_data (osi
.sec
) != NULL
18297 && get_arm_elf_section_data (osi
.sec
)->mapcount
== 0
18298 && osi
.sec
->size
> 0
18299 && (osi
.sec
->flags
& SEC_EXCLUDE
) == 0)
18301 osi
.sec_shndx
= _bfd_elf_section_from_bfd_section
18302 (output_bfd
, osi
.sec
->output_section
);
18303 if (osi
.sec_shndx
!= (int)SHN_BAD
)
18304 elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
, 0);
18309 /* ARM->Thumb glue. */
18310 if (htab
->arm_glue_size
> 0)
18312 osi
.sec
= bfd_get_linker_section (htab
->bfd_of_glue_owner
,
18313 ARM2THUMB_GLUE_SECTION_NAME
);
18315 osi
.sec_shndx
= _bfd_elf_section_from_bfd_section
18316 (output_bfd
, osi
.sec
->output_section
);
18317 if (bfd_link_pic (info
) || htab
->root
.is_relocatable_executable
18318 || htab
->pic_veneer
)
18319 size
= ARM2THUMB_PIC_GLUE_SIZE
;
18320 else if (htab
->use_blx
)
18321 size
= ARM2THUMB_V5_STATIC_GLUE_SIZE
;
18323 size
= ARM2THUMB_STATIC_GLUE_SIZE
;
18325 for (offset
= 0; offset
< htab
->arm_glue_size
; offset
+= size
)
18327 elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, offset
);
18328 elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
, offset
+ size
- 4);
18332 /* Thumb->ARM glue. */
18333 if (htab
->thumb_glue_size
> 0)
18335 osi
.sec
= bfd_get_linker_section (htab
->bfd_of_glue_owner
,
18336 THUMB2ARM_GLUE_SECTION_NAME
);
18338 osi
.sec_shndx
= _bfd_elf_section_from_bfd_section
18339 (output_bfd
, osi
.sec
->output_section
);
18340 size
= THUMB2ARM_GLUE_SIZE
;
18342 for (offset
= 0; offset
< htab
->thumb_glue_size
; offset
+= size
)
18344 elf32_arm_output_map_sym (&osi
, ARM_MAP_THUMB
, offset
);
18345 elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, offset
+ 4);
18349 /* ARMv4 BX veneers. */
18350 if (htab
->bx_glue_size
> 0)
18352 osi
.sec
= bfd_get_linker_section (htab
->bfd_of_glue_owner
,
18353 ARM_BX_GLUE_SECTION_NAME
);
18355 osi
.sec_shndx
= _bfd_elf_section_from_bfd_section
18356 (output_bfd
, osi
.sec
->output_section
);
18358 elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, 0);
18361 /* Long calls stubs. */
18362 if (htab
->stub_bfd
&& htab
->stub_bfd
->sections
)
18364 asection
* stub_sec
;
18366 for (stub_sec
= htab
->stub_bfd
->sections
;
18368 stub_sec
= stub_sec
->next
)
18370 /* Ignore non-stub sections. */
18371 if (!strstr (stub_sec
->name
, STUB_SUFFIX
))
18374 osi
.sec
= stub_sec
;
18376 osi
.sec_shndx
= _bfd_elf_section_from_bfd_section
18377 (output_bfd
, osi
.sec
->output_section
);
18379 bfd_hash_traverse (&htab
->stub_hash_table
, arm_map_one_stub
, &osi
);
18383 /* Finally, output mapping symbols for the PLT. */
18384 if (htab
->root
.splt
&& htab
->root
.splt
->size
> 0)
18386 osi
.sec
= htab
->root
.splt
;
18387 osi
.sec_shndx
= (_bfd_elf_section_from_bfd_section
18388 (output_bfd
, osi
.sec
->output_section
));
18390 /* Output mapping symbols for the plt header. SymbianOS does not have a
18392 if (htab
->vxworks_p
)
18394 /* VxWorks shared libraries have no PLT header. */
18395 if (!bfd_link_pic (info
))
18397 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, 0))
18399 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
, 12))
18403 else if (htab
->nacl_p
)
18405 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, 0))
18408 else if (using_thumb_only (htab
) && !htab
->fdpic_p
)
18410 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_THUMB
, 0))
18412 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
, 12))
18414 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_THUMB
, 16))
18417 else if (!htab
->symbian_p
&& !htab
->fdpic_p
)
18419 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, 0))
18421 #ifndef FOUR_WORD_PLT
18422 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
, 16))
18427 if (htab
->nacl_p
&& htab
->root
.iplt
&& htab
->root
.iplt
->size
> 0)
18429 /* NaCl uses a special first entry in .iplt too. */
18430 osi
.sec
= htab
->root
.iplt
;
18431 osi
.sec_shndx
= (_bfd_elf_section_from_bfd_section
18432 (output_bfd
, osi
.sec
->output_section
));
18433 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, 0))
18436 if ((htab
->root
.splt
&& htab
->root
.splt
->size
> 0)
18437 || (htab
->root
.iplt
&& htab
->root
.iplt
->size
> 0))
18439 elf_link_hash_traverse (&htab
->root
, elf32_arm_output_plt_map
, &osi
);
18440 for (input_bfd
= info
->input_bfds
;
18442 input_bfd
= input_bfd
->link
.next
)
18444 struct arm_local_iplt_info
**local_iplt
;
18445 unsigned int i
, num_syms
;
18447 local_iplt
= elf32_arm_local_iplt (input_bfd
);
18448 if (local_iplt
!= NULL
)
18450 num_syms
= elf_symtab_hdr (input_bfd
).sh_info
;
18451 for (i
= 0; i
< num_syms
; i
++)
18452 if (local_iplt
[i
] != NULL
18453 && !elf32_arm_output_plt_map_1 (&osi
, TRUE
,
18454 &local_iplt
[i
]->root
,
18455 &local_iplt
[i
]->arm
))
18460 if (htab
->dt_tlsdesc_plt
!= 0)
18462 /* Mapping symbols for the lazy tls trampoline. */
18463 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, htab
->dt_tlsdesc_plt
))
18466 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
,
18467 htab
->dt_tlsdesc_plt
+ 24))
18470 if (htab
->tls_trampoline
!= 0)
18472 /* Mapping symbols for the tls trampoline. */
18473 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_ARM
, htab
->tls_trampoline
))
18475 #ifdef FOUR_WORD_PLT
18476 if (!elf32_arm_output_map_sym (&osi
, ARM_MAP_DATA
,
18477 htab
->tls_trampoline
+ 12))
18485 /* Filter normal symbols of CMSE entry functions of ABFD to include in
18486 the import library. All SYMCOUNT symbols of ABFD can be examined
18487 from their pointers in SYMS. Pointers of symbols to keep should be
18488 stored continuously at the beginning of that array.
18490 Returns the number of symbols to keep. */
18492 static unsigned int
18493 elf32_arm_filter_cmse_symbols (bfd
*abfd ATTRIBUTE_UNUSED
,
18494 struct bfd_link_info
*info
,
18495 asymbol
**syms
, long symcount
)
18499 long src_count
, dst_count
= 0;
18500 struct elf32_arm_link_hash_table
*htab
;
18502 htab
= elf32_arm_hash_table (info
);
18503 if (!htab
->stub_bfd
|| !htab
->stub_bfd
->sections
)
18507 cmse_name
= (char *) bfd_malloc (maxnamelen
);
18508 for (src_count
= 0; src_count
< symcount
; src_count
++)
18510 struct elf32_arm_link_hash_entry
*cmse_hash
;
18516 sym
= syms
[src_count
];
18517 flags
= sym
->flags
;
18518 name
= (char *) bfd_asymbol_name (sym
);
18520 if ((flags
& BSF_FUNCTION
) != BSF_FUNCTION
)
18522 if (!(flags
& (BSF_GLOBAL
| BSF_WEAK
)))
18525 namelen
= strlen (name
) + sizeof (CMSE_PREFIX
) + 1;
18526 if (namelen
> maxnamelen
)
18528 cmse_name
= (char *)
18529 bfd_realloc (cmse_name
, namelen
);
18530 maxnamelen
= namelen
;
18532 snprintf (cmse_name
, maxnamelen
, "%s%s", CMSE_PREFIX
, name
);
18533 cmse_hash
= (struct elf32_arm_link_hash_entry
*)
18534 elf_link_hash_lookup (&(htab
)->root
, cmse_name
, FALSE
, FALSE
, TRUE
);
18537 || (cmse_hash
->root
.root
.type
!= bfd_link_hash_defined
18538 && cmse_hash
->root
.root
.type
!= bfd_link_hash_defweak
)
18539 || cmse_hash
->root
.type
!= STT_FUNC
)
18542 syms
[dst_count
++] = sym
;
18546 syms
[dst_count
] = NULL
;
18551 /* Filter symbols of ABFD to include in the import library. All
18552 SYMCOUNT symbols of ABFD can be examined from their pointers in
18553 SYMS. Pointers of symbols to keep should be stored continuously at
18554 the beginning of that array.
18556 Returns the number of symbols to keep. */
18558 static unsigned int
18559 elf32_arm_filter_implib_symbols (bfd
*abfd ATTRIBUTE_UNUSED
,
18560 struct bfd_link_info
*info
,
18561 asymbol
**syms
, long symcount
)
18563 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (info
);
18565 /* Requirement 8 of "ARM v8-M Security Extensions: Requirements on
18566 Development Tools" (ARM-ECM-0359818) mandates Secure Gateway import
18567 library to be a relocatable object file. */
18568 BFD_ASSERT (!(bfd_get_file_flags (info
->out_implib_bfd
) & EXEC_P
));
18569 if (globals
->cmse_implib
)
18570 return elf32_arm_filter_cmse_symbols (abfd
, info
, syms
, symcount
);
18572 return _bfd_elf_filter_global_symbols (abfd
, info
, syms
, symcount
);
18575 /* Allocate target specific section data. */
18578 elf32_arm_new_section_hook (bfd
*abfd
, asection
*sec
)
18580 if (!sec
->used_by_bfd
)
18582 _arm_elf_section_data
*sdata
;
18583 bfd_size_type amt
= sizeof (*sdata
);
18585 sdata
= (_arm_elf_section_data
*) bfd_zalloc (abfd
, amt
);
18588 sec
->used_by_bfd
= sdata
;
18591 return _bfd_elf_new_section_hook (abfd
, sec
);
18595 /* Used to order a list of mapping symbols by address. */
18598 elf32_arm_compare_mapping (const void * a
, const void * b
)
18600 const elf32_arm_section_map
*amap
= (const elf32_arm_section_map
*) a
;
18601 const elf32_arm_section_map
*bmap
= (const elf32_arm_section_map
*) b
;
18603 if (amap
->vma
> bmap
->vma
)
18605 else if (amap
->vma
< bmap
->vma
)
18607 else if (amap
->type
> bmap
->type
)
18608 /* Ensure results do not depend on the host qsort for objects with
18609 multiple mapping symbols at the same address by sorting on type
18612 else if (amap
->type
< bmap
->type
)
18618 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
18620 static unsigned long
18621 offset_prel31 (unsigned long addr
, bfd_vma offset
)
18623 return (addr
& ~0x7ffffffful
) | ((addr
+ offset
) & 0x7ffffffful
);
18626 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
18630 copy_exidx_entry (bfd
*output_bfd
, bfd_byte
*to
, bfd_byte
*from
, bfd_vma offset
)
18632 unsigned long first_word
= bfd_get_32 (output_bfd
, from
);
18633 unsigned long second_word
= bfd_get_32 (output_bfd
, from
+ 4);
18635 /* High bit of first word is supposed to be zero. */
18636 if ((first_word
& 0x80000000ul
) == 0)
18637 first_word
= offset_prel31 (first_word
, offset
);
18639 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
18640 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
18641 if ((second_word
!= 0x1) && ((second_word
& 0x80000000ul
) == 0))
18642 second_word
= offset_prel31 (second_word
, offset
);
18644 bfd_put_32 (output_bfd
, first_word
, to
);
18645 bfd_put_32 (output_bfd
, second_word
, to
+ 4);
18648 /* Data for make_branch_to_a8_stub(). */
18650 struct a8_branch_to_stub_data
18652 asection
*writing_section
;
18653 bfd_byte
*contents
;
18657 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
18658 places for a particular section. */
18661 make_branch_to_a8_stub (struct bfd_hash_entry
*gen_entry
,
18664 struct elf32_arm_stub_hash_entry
*stub_entry
;
18665 struct a8_branch_to_stub_data
*data
;
18666 bfd_byte
*contents
;
18667 unsigned long branch_insn
;
18668 bfd_vma veneered_insn_loc
, veneer_entry_loc
;
18669 bfd_signed_vma branch_offset
;
18673 stub_entry
= (struct elf32_arm_stub_hash_entry
*) gen_entry
;
18674 data
= (struct a8_branch_to_stub_data
*) in_arg
;
18676 if (stub_entry
->target_section
!= data
->writing_section
18677 || stub_entry
->stub_type
< arm_stub_a8_veneer_lwm
)
18680 contents
= data
->contents
;
18682 /* We use target_section as Cortex-A8 erratum workaround stubs are only
18683 generated when both source and target are in the same section. */
18684 veneered_insn_loc
= stub_entry
->target_section
->output_section
->vma
18685 + stub_entry
->target_section
->output_offset
18686 + stub_entry
->source_value
;
18688 veneer_entry_loc
= stub_entry
->stub_sec
->output_section
->vma
18689 + stub_entry
->stub_sec
->output_offset
18690 + stub_entry
->stub_offset
;
18692 if (stub_entry
->stub_type
== arm_stub_a8_veneer_blx
)
18693 veneered_insn_loc
&= ~3u;
18695 branch_offset
= veneer_entry_loc
- veneered_insn_loc
- 4;
18697 abfd
= stub_entry
->target_section
->owner
;
18698 loc
= stub_entry
->source_value
;
18700 /* We attempt to avoid this condition by setting stubs_always_after_branch
18701 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
18702 This check is just to be on the safe side... */
18703 if ((veneered_insn_loc
& ~0xfff) == (veneer_entry_loc
& ~0xfff))
18705 _bfd_error_handler (_("%pB: error: Cortex-A8 erratum stub is "
18706 "allocated in unsafe location"), abfd
);
18710 switch (stub_entry
->stub_type
)
18712 case arm_stub_a8_veneer_b
:
18713 case arm_stub_a8_veneer_b_cond
:
18714 branch_insn
= 0xf0009000;
18717 case arm_stub_a8_veneer_blx
:
18718 branch_insn
= 0xf000e800;
18721 case arm_stub_a8_veneer_bl
:
18723 unsigned int i1
, j1
, i2
, j2
, s
;
18725 branch_insn
= 0xf000d000;
18728 if (branch_offset
< -16777216 || branch_offset
> 16777214)
18730 /* There's not much we can do apart from complain if this
18732 _bfd_error_handler (_("%pB: error: Cortex-A8 erratum stub out "
18733 "of range (input file too large)"), abfd
);
18737 /* i1 = not(j1 eor s), so:
18739 j1 = (not i1) eor s. */
18741 branch_insn
|= (branch_offset
>> 1) & 0x7ff;
18742 branch_insn
|= ((branch_offset
>> 12) & 0x3ff) << 16;
18743 i2
= (branch_offset
>> 22) & 1;
18744 i1
= (branch_offset
>> 23) & 1;
18745 s
= (branch_offset
>> 24) & 1;
18748 branch_insn
|= j2
<< 11;
18749 branch_insn
|= j1
<< 13;
18750 branch_insn
|= s
<< 26;
18759 bfd_put_16 (abfd
, (branch_insn
>> 16) & 0xffff, &contents
[loc
]);
18760 bfd_put_16 (abfd
, branch_insn
& 0xffff, &contents
[loc
+ 2]);
18765 /* Beginning of stm32l4xx work-around. */
18767 /* Functions encoding instructions necessary for the emission of the
18768 fix-stm32l4xx-629360.
18769 Encoding is extracted from the
18770 ARM (C) Architecture Reference Manual
18771 ARMv7-A and ARMv7-R edition
18772 ARM DDI 0406C.b (ID072512). */
18774 static inline bfd_vma
18775 create_instruction_branch_absolute (int branch_offset
)
18777 /* A8.8.18 B (A8-334)
18778 B target_address (Encoding T4). */
18779 /* 1111 - 0Sii - iiii - iiii - 10J1 - Jiii - iiii - iiii. */
18780 /* jump offset is: S:I1:I2:imm10:imm11:0. */
18781 /* with : I1 = NOT (J1 EOR S) I2 = NOT (J2 EOR S). */
18783 int s
= ((branch_offset
& 0x1000000) >> 24);
18784 int j1
= s
^ !((branch_offset
& 0x800000) >> 23);
18785 int j2
= s
^ !((branch_offset
& 0x400000) >> 22);
18787 if (branch_offset
< -(1 << 24) || branch_offset
>= (1 << 24))
18788 BFD_ASSERT (0 && "Error: branch out of range. Cannot create branch.");
18790 bfd_vma patched_inst
= 0xf0009000
18792 | (((unsigned long) (branch_offset
) >> 12) & 0x3ff) << 16 /* imm10. */
18793 | j1
<< 13 /* J1. */
18794 | j2
<< 11 /* J2. */
18795 | (((unsigned long) (branch_offset
) >> 1) & 0x7ff); /* imm11. */
18797 return patched_inst
;
18800 static inline bfd_vma
18801 create_instruction_ldmia (int base_reg
, int wback
, int reg_mask
)
18803 /* A8.8.57 LDM/LDMIA/LDMFD (A8-396)
18804 LDMIA Rn!, {Ra, Rb, Rc, ...} (Encoding T2). */
18805 bfd_vma patched_inst
= 0xe8900000
18806 | (/*W=*/wback
<< 21)
18808 | (reg_mask
& 0x0000ffff);
18810 return patched_inst
;
18813 static inline bfd_vma
18814 create_instruction_ldmdb (int base_reg
, int wback
, int reg_mask
)
18816 /* A8.8.60 LDMDB/LDMEA (A8-402)
18817 LDMDB Rn!, {Ra, Rb, Rc, ...} (Encoding T1). */
18818 bfd_vma patched_inst
= 0xe9100000
18819 | (/*W=*/wback
<< 21)
18821 | (reg_mask
& 0x0000ffff);
18823 return patched_inst
;
18826 static inline bfd_vma
18827 create_instruction_mov (int target_reg
, int source_reg
)
18829 /* A8.8.103 MOV (register) (A8-486)
18830 MOV Rd, Rm (Encoding T1). */
18831 bfd_vma patched_inst
= 0x4600
18832 | (target_reg
& 0x7)
18833 | ((target_reg
& 0x8) >> 3) << 7
18834 | (source_reg
<< 3);
18836 return patched_inst
;
18839 static inline bfd_vma
18840 create_instruction_sub (int target_reg
, int source_reg
, int value
)
18842 /* A8.8.221 SUB (immediate) (A8-708)
18843 SUB Rd, Rn, #value (Encoding T3). */
18844 bfd_vma patched_inst
= 0xf1a00000
18845 | (target_reg
<< 8)
18846 | (source_reg
<< 16)
18848 | ((value
& 0x800) >> 11) << 26
18849 | ((value
& 0x700) >> 8) << 12
18852 return patched_inst
;
18855 static inline bfd_vma
18856 create_instruction_vldmia (int base_reg
, int is_dp
, int wback
, int num_words
,
18859 /* A8.8.332 VLDM (A8-922)
18860 VLMD{MODE} Rn{!}, {list} (Encoding T1 or T2). */
18861 bfd_vma patched_inst
= (is_dp
? 0xec900b00 : 0xec900a00)
18862 | (/*W=*/wback
<< 21)
18864 | (num_words
& 0x000000ff)
18865 | (((unsigned)first_reg
>> 1) & 0x0000000f) << 12
18866 | (first_reg
& 0x00000001) << 22;
18868 return patched_inst
;
18871 static inline bfd_vma
18872 create_instruction_vldmdb (int base_reg
, int is_dp
, int num_words
,
18875 /* A8.8.332 VLDM (A8-922)
18876 VLMD{MODE} Rn!, {} (Encoding T1 or T2). */
18877 bfd_vma patched_inst
= (is_dp
? 0xed300b00 : 0xed300a00)
18879 | (num_words
& 0x000000ff)
18880 | (((unsigned)first_reg
>>1 ) & 0x0000000f) << 12
18881 | (first_reg
& 0x00000001) << 22;
18883 return patched_inst
;
18886 static inline bfd_vma
18887 create_instruction_udf_w (int value
)
18889 /* A8.8.247 UDF (A8-758)
18890 Undefined (Encoding T2). */
18891 bfd_vma patched_inst
= 0xf7f0a000
18892 | (value
& 0x00000fff)
18893 | (value
& 0x000f0000) << 16;
18895 return patched_inst
;
18898 static inline bfd_vma
18899 create_instruction_udf (int value
)
18901 /* A8.8.247 UDF (A8-758)
18902 Undefined (Encoding T1). */
18903 bfd_vma patched_inst
= 0xde00
18906 return patched_inst
;
18909 /* Functions writing an instruction in memory, returning the next
18910 memory position to write to. */
18912 static inline bfd_byte
*
18913 push_thumb2_insn32 (struct elf32_arm_link_hash_table
* htab
,
18914 bfd
* output_bfd
, bfd_byte
*pt
, insn32 insn
)
18916 put_thumb2_insn (htab
, output_bfd
, insn
, pt
);
18920 static inline bfd_byte
*
18921 push_thumb2_insn16 (struct elf32_arm_link_hash_table
* htab
,
18922 bfd
* output_bfd
, bfd_byte
*pt
, insn32 insn
)
18924 put_thumb_insn (htab
, output_bfd
, insn
, pt
);
18928 /* Function filling up a region in memory with T1 and T2 UDFs taking
18929 care of alignment. */
18932 stm32l4xx_fill_stub_udf (struct elf32_arm_link_hash_table
* htab
,
18934 const bfd_byte
* const base_stub_contents
,
18935 bfd_byte
* const from_stub_contents
,
18936 const bfd_byte
* const end_stub_contents
)
18938 bfd_byte
*current_stub_contents
= from_stub_contents
;
18940 /* Fill the remaining of the stub with deterministic contents : UDF
18942 Check if realignment is needed on modulo 4 frontier using T1, to
18944 if ((current_stub_contents
< end_stub_contents
)
18945 && !((current_stub_contents
- base_stub_contents
) % 2)
18946 && ((current_stub_contents
- base_stub_contents
) % 4))
18947 current_stub_contents
=
18948 push_thumb2_insn16 (htab
, output_bfd
, current_stub_contents
,
18949 create_instruction_udf (0));
18951 for (; current_stub_contents
< end_stub_contents
;)
18952 current_stub_contents
=
18953 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
18954 create_instruction_udf_w (0));
18956 return current_stub_contents
;
18959 /* Functions writing the stream of instructions equivalent to the
18960 derived sequence for ldmia, ldmdb, vldm respectively. */
18963 stm32l4xx_create_replacing_stub_ldmia (struct elf32_arm_link_hash_table
* htab
,
18965 const insn32 initial_insn
,
18966 const bfd_byte
*const initial_insn_addr
,
18967 bfd_byte
*const base_stub_contents
)
18969 int wback
= (initial_insn
& 0x00200000) >> 21;
18970 int ri
, rn
= (initial_insn
& 0x000F0000) >> 16;
18971 int insn_all_registers
= initial_insn
& 0x0000ffff;
18972 int insn_low_registers
, insn_high_registers
;
18973 int usable_register_mask
;
18974 int nb_registers
= elf32_arm_popcount (insn_all_registers
);
18975 int restore_pc
= (insn_all_registers
& (1 << 15)) ? 1 : 0;
18976 int restore_rn
= (insn_all_registers
& (1 << rn
)) ? 1 : 0;
18977 bfd_byte
*current_stub_contents
= base_stub_contents
;
18979 BFD_ASSERT (is_thumb2_ldmia (initial_insn
));
18981 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
18982 smaller than 8 registers load sequences that do not cause the
18984 if (nb_registers
<= 8)
18986 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
18987 current_stub_contents
=
18988 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
18991 /* B initial_insn_addr+4. */
18993 current_stub_contents
=
18994 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
18995 create_instruction_branch_absolute
18996 (initial_insn_addr
- current_stub_contents
));
18998 /* Fill the remaining of the stub with deterministic contents. */
18999 current_stub_contents
=
19000 stm32l4xx_fill_stub_udf (htab
, output_bfd
,
19001 base_stub_contents
, current_stub_contents
,
19002 base_stub_contents
+
19003 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
);
19008 /* - reg_list[13] == 0. */
19009 BFD_ASSERT ((insn_all_registers
& (1 << 13))==0);
19011 /* - reg_list[14] & reg_list[15] != 1. */
19012 BFD_ASSERT ((insn_all_registers
& 0xC000) != 0xC000);
19014 /* - if (wback==1) reg_list[rn] == 0. */
19015 BFD_ASSERT (!wback
|| !restore_rn
);
19017 /* - nb_registers > 8. */
19018 BFD_ASSERT (elf32_arm_popcount (insn_all_registers
) > 8);
19020 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
19022 /* In the following algorithm, we split this wide LDM using 2 LDM insns:
19023 - One with the 7 lowest registers (register mask 0x007F)
19024 This LDM will finally contain between 2 and 7 registers
19025 - One with the 7 highest registers (register mask 0xDF80)
19026 This ldm will finally contain between 2 and 7 registers. */
19027 insn_low_registers
= insn_all_registers
& 0x007F;
19028 insn_high_registers
= insn_all_registers
& 0xDF80;
19030 /* A spare register may be needed during this veneer to temporarily
19031 handle the base register. This register will be restored with the
19032 last LDM operation.
19033 The usable register may be any general purpose register (that
19034 excludes PC, SP, LR : register mask is 0x1FFF). */
19035 usable_register_mask
= 0x1FFF;
19037 /* Generate the stub function. */
19040 /* LDMIA Rn!, {R-low-register-list} : (Encoding T2). */
19041 current_stub_contents
=
19042 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19043 create_instruction_ldmia
19044 (rn
, /*wback=*/1, insn_low_registers
));
19046 /* LDMIA Rn!, {R-high-register-list} : (Encoding T2). */
19047 current_stub_contents
=
19048 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19049 create_instruction_ldmia
19050 (rn
, /*wback=*/1, insn_high_registers
));
19053 /* B initial_insn_addr+4. */
19054 current_stub_contents
=
19055 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19056 create_instruction_branch_absolute
19057 (initial_insn_addr
- current_stub_contents
));
19060 else /* if (!wback). */
19064 /* If Rn is not part of the high-register-list, move it there. */
19065 if (!(insn_high_registers
& (1 << rn
)))
19067 /* Choose a Ri in the high-register-list that will be restored. */
19068 ri
= ctz (insn_high_registers
& usable_register_mask
& ~(1 << rn
));
19071 current_stub_contents
=
19072 push_thumb2_insn16 (htab
, output_bfd
, current_stub_contents
,
19073 create_instruction_mov (ri
, rn
));
19076 /* LDMIA Ri!, {R-low-register-list} : (Encoding T2). */
19077 current_stub_contents
=
19078 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19079 create_instruction_ldmia
19080 (ri
, /*wback=*/1, insn_low_registers
));
19082 /* LDMIA Ri, {R-high-register-list} : (Encoding T2). */
19083 current_stub_contents
=
19084 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19085 create_instruction_ldmia
19086 (ri
, /*wback=*/0, insn_high_registers
));
19090 /* B initial_insn_addr+4. */
19091 current_stub_contents
=
19092 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19093 create_instruction_branch_absolute
19094 (initial_insn_addr
- current_stub_contents
));
19098 /* Fill the remaining of the stub with deterministic contents. */
19099 current_stub_contents
=
19100 stm32l4xx_fill_stub_udf (htab
, output_bfd
,
19101 base_stub_contents
, current_stub_contents
,
19102 base_stub_contents
+
19103 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
);
19107 stm32l4xx_create_replacing_stub_ldmdb (struct elf32_arm_link_hash_table
* htab
,
19109 const insn32 initial_insn
,
19110 const bfd_byte
*const initial_insn_addr
,
19111 bfd_byte
*const base_stub_contents
)
19113 int wback
= (initial_insn
& 0x00200000) >> 21;
19114 int ri
, rn
= (initial_insn
& 0x000f0000) >> 16;
19115 int insn_all_registers
= initial_insn
& 0x0000ffff;
19116 int insn_low_registers
, insn_high_registers
;
19117 int usable_register_mask
;
19118 int restore_pc
= (insn_all_registers
& (1 << 15)) ? 1 : 0;
19119 int restore_rn
= (insn_all_registers
& (1 << rn
)) ? 1 : 0;
19120 int nb_registers
= elf32_arm_popcount (insn_all_registers
);
19121 bfd_byte
*current_stub_contents
= base_stub_contents
;
19123 BFD_ASSERT (is_thumb2_ldmdb (initial_insn
));
19125 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
19126 smaller than 8 registers load sequences that do not cause the
19128 if (nb_registers
<= 8)
19130 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
19131 current_stub_contents
=
19132 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19135 /* B initial_insn_addr+4. */
19136 current_stub_contents
=
19137 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19138 create_instruction_branch_absolute
19139 (initial_insn_addr
- current_stub_contents
));
19141 /* Fill the remaining of the stub with deterministic contents. */
19142 current_stub_contents
=
19143 stm32l4xx_fill_stub_udf (htab
, output_bfd
,
19144 base_stub_contents
, current_stub_contents
,
19145 base_stub_contents
+
19146 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
);
19151 /* - reg_list[13] == 0. */
19152 BFD_ASSERT ((insn_all_registers
& (1 << 13)) == 0);
19154 /* - reg_list[14] & reg_list[15] != 1. */
19155 BFD_ASSERT ((insn_all_registers
& 0xC000) != 0xC000);
19157 /* - if (wback==1) reg_list[rn] == 0. */
19158 BFD_ASSERT (!wback
|| !restore_rn
);
19160 /* - nb_registers > 8. */
19161 BFD_ASSERT (elf32_arm_popcount (insn_all_registers
) > 8);
19163 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
19165 /* In the following algorithm, we split this wide LDM using 2 LDM insn:
19166 - One with the 7 lowest registers (register mask 0x007F)
19167 This LDM will finally contain between 2 and 7 registers
19168 - One with the 7 highest registers (register mask 0xDF80)
19169 This ldm will finally contain between 2 and 7 registers. */
19170 insn_low_registers
= insn_all_registers
& 0x007F;
19171 insn_high_registers
= insn_all_registers
& 0xDF80;
19173 /* A spare register may be needed during this veneer to temporarily
19174 handle the base register. This register will be restored with
19175 the last LDM operation.
19176 The usable register may be any general purpose register (that excludes
19177 PC, SP, LR : register mask is 0x1FFF). */
19178 usable_register_mask
= 0x1FFF;
19180 /* Generate the stub function. */
19181 if (!wback
&& !restore_pc
&& !restore_rn
)
19183 /* Choose a Ri in the low-register-list that will be restored. */
19184 ri
= ctz (insn_low_registers
& usable_register_mask
& ~(1 << rn
));
19187 current_stub_contents
=
19188 push_thumb2_insn16 (htab
, output_bfd
, current_stub_contents
,
19189 create_instruction_mov (ri
, rn
));
19191 /* LDMDB Ri!, {R-high-register-list}. */
19192 current_stub_contents
=
19193 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19194 create_instruction_ldmdb
19195 (ri
, /*wback=*/1, insn_high_registers
));
19197 /* LDMDB Ri, {R-low-register-list}. */
19198 current_stub_contents
=
19199 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19200 create_instruction_ldmdb
19201 (ri
, /*wback=*/0, insn_low_registers
));
19203 /* B initial_insn_addr+4. */
19204 current_stub_contents
=
19205 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19206 create_instruction_branch_absolute
19207 (initial_insn_addr
- current_stub_contents
));
19209 else if (wback
&& !restore_pc
&& !restore_rn
)
19211 /* LDMDB Rn!, {R-high-register-list}. */
19212 current_stub_contents
=
19213 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19214 create_instruction_ldmdb
19215 (rn
, /*wback=*/1, insn_high_registers
));
19217 /* LDMDB Rn!, {R-low-register-list}. */
19218 current_stub_contents
=
19219 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19220 create_instruction_ldmdb
19221 (rn
, /*wback=*/1, insn_low_registers
));
19223 /* B initial_insn_addr+4. */
19224 current_stub_contents
=
19225 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19226 create_instruction_branch_absolute
19227 (initial_insn_addr
- current_stub_contents
));
19229 else if (!wback
&& restore_pc
&& !restore_rn
)
19231 /* Choose a Ri in the high-register-list that will be restored. */
19232 ri
= ctz (insn_high_registers
& usable_register_mask
& ~(1 << rn
));
19234 /* SUB Ri, Rn, #(4*nb_registers). */
19235 current_stub_contents
=
19236 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19237 create_instruction_sub (ri
, rn
, (4 * nb_registers
)));
19239 /* LDMIA Ri!, {R-low-register-list}. */
19240 current_stub_contents
=
19241 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19242 create_instruction_ldmia
19243 (ri
, /*wback=*/1, insn_low_registers
));
19245 /* LDMIA Ri, {R-high-register-list}. */
19246 current_stub_contents
=
19247 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19248 create_instruction_ldmia
19249 (ri
, /*wback=*/0, insn_high_registers
));
19251 else if (wback
&& restore_pc
&& !restore_rn
)
19253 /* Choose a Ri in the high-register-list that will be restored. */
19254 ri
= ctz (insn_high_registers
& usable_register_mask
& ~(1 << rn
));
19256 /* SUB Rn, Rn, #(4*nb_registers) */
19257 current_stub_contents
=
19258 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19259 create_instruction_sub (rn
, rn
, (4 * nb_registers
)));
19262 current_stub_contents
=
19263 push_thumb2_insn16 (htab
, output_bfd
, current_stub_contents
,
19264 create_instruction_mov (ri
, rn
));
19266 /* LDMIA Ri!, {R-low-register-list}. */
19267 current_stub_contents
=
19268 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19269 create_instruction_ldmia
19270 (ri
, /*wback=*/1, insn_low_registers
));
19272 /* LDMIA Ri, {R-high-register-list}. */
19273 current_stub_contents
=
19274 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19275 create_instruction_ldmia
19276 (ri
, /*wback=*/0, insn_high_registers
));
19278 else if (!wback
&& !restore_pc
&& restore_rn
)
19281 if (!(insn_low_registers
& (1 << rn
)))
19283 /* Choose a Ri in the low-register-list that will be restored. */
19284 ri
= ctz (insn_low_registers
& usable_register_mask
& ~(1 << rn
));
19287 current_stub_contents
=
19288 push_thumb2_insn16 (htab
, output_bfd
, current_stub_contents
,
19289 create_instruction_mov (ri
, rn
));
19292 /* LDMDB Ri!, {R-high-register-list}. */
19293 current_stub_contents
=
19294 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19295 create_instruction_ldmdb
19296 (ri
, /*wback=*/1, insn_high_registers
));
19298 /* LDMDB Ri, {R-low-register-list}. */
19299 current_stub_contents
=
19300 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19301 create_instruction_ldmdb
19302 (ri
, /*wback=*/0, insn_low_registers
));
19304 /* B initial_insn_addr+4. */
19305 current_stub_contents
=
19306 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19307 create_instruction_branch_absolute
19308 (initial_insn_addr
- current_stub_contents
));
19310 else if (!wback
&& restore_pc
&& restore_rn
)
19313 if (!(insn_high_registers
& (1 << rn
)))
19315 /* Choose a Ri in the high-register-list that will be restored. */
19316 ri
= ctz (insn_high_registers
& usable_register_mask
& ~(1 << rn
));
19319 /* SUB Ri, Rn, #(4*nb_registers). */
19320 current_stub_contents
=
19321 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19322 create_instruction_sub (ri
, rn
, (4 * nb_registers
)));
19324 /* LDMIA Ri!, {R-low-register-list}. */
19325 current_stub_contents
=
19326 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19327 create_instruction_ldmia
19328 (ri
, /*wback=*/1, insn_low_registers
));
19330 /* LDMIA Ri, {R-high-register-list}. */
19331 current_stub_contents
=
19332 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19333 create_instruction_ldmia
19334 (ri
, /*wback=*/0, insn_high_registers
));
19336 else if (wback
&& restore_rn
)
19338 /* The assembler should not have accepted to encode this. */
19339 BFD_ASSERT (0 && "Cannot patch an instruction that has an "
19340 "undefined behavior.\n");
19343 /* Fill the remaining of the stub with deterministic contents. */
19344 current_stub_contents
=
19345 stm32l4xx_fill_stub_udf (htab
, output_bfd
,
19346 base_stub_contents
, current_stub_contents
,
19347 base_stub_contents
+
19348 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
);
19353 stm32l4xx_create_replacing_stub_vldm (struct elf32_arm_link_hash_table
* htab
,
19355 const insn32 initial_insn
,
19356 const bfd_byte
*const initial_insn_addr
,
19357 bfd_byte
*const base_stub_contents
)
19359 int num_words
= initial_insn
& 0xff;
19360 bfd_byte
*current_stub_contents
= base_stub_contents
;
19362 BFD_ASSERT (is_thumb2_vldm (initial_insn
));
19364 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
19365 smaller than 8 words load sequences that do not cause the
19367 if (num_words
<= 8)
19369 /* Untouched instruction. */
19370 current_stub_contents
=
19371 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19374 /* B initial_insn_addr+4. */
19375 current_stub_contents
=
19376 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19377 create_instruction_branch_absolute
19378 (initial_insn_addr
- current_stub_contents
));
19382 bfd_boolean is_dp
= /* DP encoding. */
19383 (initial_insn
& 0xfe100f00) == 0xec100b00;
19384 bfd_boolean is_ia_nobang
= /* (IA without !). */
19385 (((initial_insn
<< 7) >> 28) & 0xd) == 0x4;
19386 bfd_boolean is_ia_bang
= /* (IA with !) - includes VPOP. */
19387 (((initial_insn
<< 7) >> 28) & 0xd) == 0x5;
19388 bfd_boolean is_db_bang
= /* (DB with !). */
19389 (((initial_insn
<< 7) >> 28) & 0xd) == 0x9;
19390 int base_reg
= ((unsigned int) initial_insn
<< 12) >> 28;
19391 /* d = UInt (Vd:D);. */
19392 int first_reg
= ((((unsigned int) initial_insn
<< 16) >> 28) << 1)
19393 | (((unsigned int)initial_insn
<< 9) >> 31);
19395 /* Compute the number of 8-words chunks needed to split. */
19396 int chunks
= (num_words
% 8) ? (num_words
/ 8 + 1) : (num_words
/ 8);
19399 /* The test coverage has been done assuming the following
19400 hypothesis that exactly one of the previous is_ predicates is
19402 BFD_ASSERT ( (is_ia_nobang
^ is_ia_bang
^ is_db_bang
)
19403 && !(is_ia_nobang
& is_ia_bang
& is_db_bang
));
19405 /* We treat the cutting of the words in one pass for all
19406 cases, then we emit the adjustments:
19409 -> vldm rx!, {8_words_or_less} for each needed 8_word
19410 -> sub rx, rx, #size (list)
19413 -> vldm rx!, {8_words_or_less} for each needed 8_word
19414 This also handles vpop instruction (when rx is sp)
19417 -> vldmb rx!, {8_words_or_less} for each needed 8_word. */
19418 for (chunk
= 0; chunk
< chunks
; ++chunk
)
19420 bfd_vma new_insn
= 0;
19422 if (is_ia_nobang
|| is_ia_bang
)
19424 new_insn
= create_instruction_vldmia
19428 chunks
- (chunk
+ 1) ?
19429 8 : num_words
- chunk
* 8,
19430 first_reg
+ chunk
* 8);
19432 else if (is_db_bang
)
19434 new_insn
= create_instruction_vldmdb
19437 chunks
- (chunk
+ 1) ?
19438 8 : num_words
- chunk
* 8,
19439 first_reg
+ chunk
* 8);
19443 current_stub_contents
=
19444 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19448 /* Only this case requires the base register compensation
19452 current_stub_contents
=
19453 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19454 create_instruction_sub
19455 (base_reg
, base_reg
, 4*num_words
));
19458 /* B initial_insn_addr+4. */
19459 current_stub_contents
=
19460 push_thumb2_insn32 (htab
, output_bfd
, current_stub_contents
,
19461 create_instruction_branch_absolute
19462 (initial_insn_addr
- current_stub_contents
));
19465 /* Fill the remaining of the stub with deterministic contents. */
19466 current_stub_contents
=
19467 stm32l4xx_fill_stub_udf (htab
, output_bfd
,
19468 base_stub_contents
, current_stub_contents
,
19469 base_stub_contents
+
19470 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE
);
19474 stm32l4xx_create_replacing_stub (struct elf32_arm_link_hash_table
* htab
,
19476 const insn32 wrong_insn
,
19477 const bfd_byte
*const wrong_insn_addr
,
19478 bfd_byte
*const stub_contents
)
19480 if (is_thumb2_ldmia (wrong_insn
))
19481 stm32l4xx_create_replacing_stub_ldmia (htab
, output_bfd
,
19482 wrong_insn
, wrong_insn_addr
,
19484 else if (is_thumb2_ldmdb (wrong_insn
))
19485 stm32l4xx_create_replacing_stub_ldmdb (htab
, output_bfd
,
19486 wrong_insn
, wrong_insn_addr
,
19488 else if (is_thumb2_vldm (wrong_insn
))
19489 stm32l4xx_create_replacing_stub_vldm (htab
, output_bfd
,
19490 wrong_insn
, wrong_insn_addr
,
19494 /* End of stm32l4xx work-around. */
19497 /* Do code byteswapping. Return FALSE afterwards so that the section is
19498 written out as normal. */
19501 elf32_arm_write_section (bfd
*output_bfd
,
19502 struct bfd_link_info
*link_info
,
19504 bfd_byte
*contents
)
19506 unsigned int mapcount
, errcount
;
19507 _arm_elf_section_data
*arm_data
;
19508 struct elf32_arm_link_hash_table
*globals
= elf32_arm_hash_table (link_info
);
19509 elf32_arm_section_map
*map
;
19510 elf32_vfp11_erratum_list
*errnode
;
19511 elf32_stm32l4xx_erratum_list
*stm32l4xx_errnode
;
19514 bfd_vma offset
= sec
->output_section
->vma
+ sec
->output_offset
;
19518 if (globals
== NULL
)
19521 /* If this section has not been allocated an _arm_elf_section_data
19522 structure then we cannot record anything. */
19523 arm_data
= get_arm_elf_section_data (sec
);
19524 if (arm_data
== NULL
)
19527 mapcount
= arm_data
->mapcount
;
19528 map
= arm_data
->map
;
19529 errcount
= arm_data
->erratumcount
;
19533 unsigned int endianflip
= bfd_big_endian (output_bfd
) ? 3 : 0;
19535 for (errnode
= arm_data
->erratumlist
; errnode
!= 0;
19536 errnode
= errnode
->next
)
19538 bfd_vma target
= errnode
->vma
- offset
;
19540 switch (errnode
->type
)
19542 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER
:
19544 bfd_vma branch_to_veneer
;
19545 /* Original condition code of instruction, plus bit mask for
19546 ARM B instruction. */
19547 unsigned int insn
= (errnode
->u
.b
.vfp_insn
& 0xf0000000)
19550 /* The instruction is before the label. */
19553 /* Above offset included in -4 below. */
19554 branch_to_veneer
= errnode
->u
.b
.veneer
->vma
19555 - errnode
->vma
- 4;
19557 if ((signed) branch_to_veneer
< -(1 << 25)
19558 || (signed) branch_to_veneer
>= (1 << 25))
19559 _bfd_error_handler (_("%pB: error: VFP11 veneer out of "
19560 "range"), output_bfd
);
19562 insn
|= (branch_to_veneer
>> 2) & 0xffffff;
19563 contents
[endianflip
^ target
] = insn
& 0xff;
19564 contents
[endianflip
^ (target
+ 1)] = (insn
>> 8) & 0xff;
19565 contents
[endianflip
^ (target
+ 2)] = (insn
>> 16) & 0xff;
19566 contents
[endianflip
^ (target
+ 3)] = (insn
>> 24) & 0xff;
19570 case VFP11_ERRATUM_ARM_VENEER
:
19572 bfd_vma branch_from_veneer
;
19575 /* Take size of veneer into account. */
19576 branch_from_veneer
= errnode
->u
.v
.branch
->vma
19577 - errnode
->vma
- 12;
19579 if ((signed) branch_from_veneer
< -(1 << 25)
19580 || (signed) branch_from_veneer
>= (1 << 25))
19581 _bfd_error_handler (_("%pB: error: VFP11 veneer out of "
19582 "range"), output_bfd
);
19584 /* Original instruction. */
19585 insn
= errnode
->u
.v
.branch
->u
.b
.vfp_insn
;
19586 contents
[endianflip
^ target
] = insn
& 0xff;
19587 contents
[endianflip
^ (target
+ 1)] = (insn
>> 8) & 0xff;
19588 contents
[endianflip
^ (target
+ 2)] = (insn
>> 16) & 0xff;
19589 contents
[endianflip
^ (target
+ 3)] = (insn
>> 24) & 0xff;
19591 /* Branch back to insn after original insn. */
19592 insn
= 0xea000000 | ((branch_from_veneer
>> 2) & 0xffffff);
19593 contents
[endianflip
^ (target
+ 4)] = insn
& 0xff;
19594 contents
[endianflip
^ (target
+ 5)] = (insn
>> 8) & 0xff;
19595 contents
[endianflip
^ (target
+ 6)] = (insn
>> 16) & 0xff;
19596 contents
[endianflip
^ (target
+ 7)] = (insn
>> 24) & 0xff;
19606 if (arm_data
->stm32l4xx_erratumcount
!= 0)
19608 for (stm32l4xx_errnode
= arm_data
->stm32l4xx_erratumlist
;
19609 stm32l4xx_errnode
!= 0;
19610 stm32l4xx_errnode
= stm32l4xx_errnode
->next
)
19612 bfd_vma target
= stm32l4xx_errnode
->vma
- offset
;
19614 switch (stm32l4xx_errnode
->type
)
19616 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER
:
19619 bfd_vma branch_to_veneer
=
19620 stm32l4xx_errnode
->u
.b
.veneer
->vma
- stm32l4xx_errnode
->vma
;
19622 if ((signed) branch_to_veneer
< -(1 << 24)
19623 || (signed) branch_to_veneer
>= (1 << 24))
19625 bfd_vma out_of_range
=
19626 ((signed) branch_to_veneer
< -(1 << 24)) ?
19627 - branch_to_veneer
- (1 << 24) :
19628 ((signed) branch_to_veneer
>= (1 << 24)) ?
19629 branch_to_veneer
- (1 << 24) : 0;
19632 (_("%pB(%#" PRIx64
"): error: "
19633 "cannot create STM32L4XX veneer; "
19634 "jump out of range by %" PRId64
" bytes; "
19635 "cannot encode branch instruction"),
19637 (uint64_t) (stm32l4xx_errnode
->vma
- 4),
19638 (int64_t) out_of_range
);
19642 insn
= create_instruction_branch_absolute
19643 (stm32l4xx_errnode
->u
.b
.veneer
->vma
- stm32l4xx_errnode
->vma
);
19645 /* The instruction is before the label. */
19648 put_thumb2_insn (globals
, output_bfd
,
19649 (bfd_vma
) insn
, contents
+ target
);
19653 case STM32L4XX_ERRATUM_VENEER
:
19656 bfd_byte
* veneer_r
;
19659 veneer
= contents
+ target
;
19661 + stm32l4xx_errnode
->u
.b
.veneer
->vma
19662 - stm32l4xx_errnode
->vma
- 4;
19664 if ((signed) (veneer_r
- veneer
-
19665 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE
>
19666 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
?
19667 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE
:
19668 STM32L4XX_ERRATUM_LDM_VENEER_SIZE
) < -(1 << 24)
19669 || (signed) (veneer_r
- veneer
) >= (1 << 24))
19671 _bfd_error_handler (_("%pB: error: cannot create STM32L4XX "
19672 "veneer"), output_bfd
);
19676 /* Original instruction. */
19677 insn
= stm32l4xx_errnode
->u
.v
.branch
->u
.b
.insn
;
19679 stm32l4xx_create_replacing_stub
19680 (globals
, output_bfd
, insn
, (void*)veneer_r
, (void*)veneer
);
19690 if (arm_data
->elf
.this_hdr
.sh_type
== SHT_ARM_EXIDX
)
19692 arm_unwind_table_edit
*edit_node
19693 = arm_data
->u
.exidx
.unwind_edit_list
;
19694 /* Now, sec->size is the size of the section we will write. The original
19695 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
19696 markers) was sec->rawsize. (This isn't the case if we perform no
19697 edits, then rawsize will be zero and we should use size). */
19698 bfd_byte
*edited_contents
= (bfd_byte
*) bfd_malloc (sec
->size
);
19699 unsigned int input_size
= sec
->rawsize
? sec
->rawsize
: sec
->size
;
19700 unsigned int in_index
, out_index
;
19701 bfd_vma add_to_offsets
= 0;
19703 for (in_index
= 0, out_index
= 0; in_index
* 8 < input_size
|| edit_node
;)
19707 unsigned int edit_index
= edit_node
->index
;
19709 if (in_index
< edit_index
&& in_index
* 8 < input_size
)
19711 copy_exidx_entry (output_bfd
, edited_contents
+ out_index
* 8,
19712 contents
+ in_index
* 8, add_to_offsets
);
19716 else if (in_index
== edit_index
19717 || (in_index
* 8 >= input_size
19718 && edit_index
== UINT_MAX
))
19720 switch (edit_node
->type
)
19722 case DELETE_EXIDX_ENTRY
:
19724 add_to_offsets
+= 8;
19727 case INSERT_EXIDX_CANTUNWIND_AT_END
:
19729 asection
*text_sec
= edit_node
->linked_section
;
19730 bfd_vma text_offset
= text_sec
->output_section
->vma
19731 + text_sec
->output_offset
19733 bfd_vma exidx_offset
= offset
+ out_index
* 8;
19734 unsigned long prel31_offset
;
19736 /* Note: this is meant to be equivalent to an
19737 R_ARM_PREL31 relocation. These synthetic
19738 EXIDX_CANTUNWIND markers are not relocated by the
19739 usual BFD method. */
19740 prel31_offset
= (text_offset
- exidx_offset
)
19742 if (bfd_link_relocatable (link_info
))
19744 /* Here relocation for new EXIDX_CANTUNWIND is
19745 created, so there is no need to
19746 adjust offset by hand. */
19747 prel31_offset
= text_sec
->output_offset
19751 /* First address we can't unwind. */
19752 bfd_put_32 (output_bfd
, prel31_offset
,
19753 &edited_contents
[out_index
* 8]);
19755 /* Code for EXIDX_CANTUNWIND. */
19756 bfd_put_32 (output_bfd
, 0x1,
19757 &edited_contents
[out_index
* 8 + 4]);
19760 add_to_offsets
-= 8;
19765 edit_node
= edit_node
->next
;
19770 /* No more edits, copy remaining entries verbatim. */
19771 copy_exidx_entry (output_bfd
, edited_contents
+ out_index
* 8,
19772 contents
+ in_index
* 8, add_to_offsets
);
19778 if (!(sec
->flags
& SEC_EXCLUDE
) && !(sec
->flags
& SEC_NEVER_LOAD
))
19779 bfd_set_section_contents (output_bfd
, sec
->output_section
,
19781 (file_ptr
) sec
->output_offset
, sec
->size
);
19786 /* Fix code to point to Cortex-A8 erratum stubs. */
19787 if (globals
->fix_cortex_a8
)
19789 struct a8_branch_to_stub_data data
;
19791 data
.writing_section
= sec
;
19792 data
.contents
= contents
;
19794 bfd_hash_traverse (& globals
->stub_hash_table
, make_branch_to_a8_stub
,
19801 if (globals
->byteswap_code
)
19803 qsort (map
, mapcount
, sizeof (* map
), elf32_arm_compare_mapping
);
19806 for (i
= 0; i
< mapcount
; i
++)
19808 if (i
== mapcount
- 1)
19811 end
= map
[i
+ 1].vma
;
19813 switch (map
[i
].type
)
19816 /* Byte swap code words. */
19817 while (ptr
+ 3 < end
)
19819 tmp
= contents
[ptr
];
19820 contents
[ptr
] = contents
[ptr
+ 3];
19821 contents
[ptr
+ 3] = tmp
;
19822 tmp
= contents
[ptr
+ 1];
19823 contents
[ptr
+ 1] = contents
[ptr
+ 2];
19824 contents
[ptr
+ 2] = tmp
;
19830 /* Byte swap code halfwords. */
19831 while (ptr
+ 1 < end
)
19833 tmp
= contents
[ptr
];
19834 contents
[ptr
] = contents
[ptr
+ 1];
19835 contents
[ptr
+ 1] = tmp
;
19841 /* Leave data alone. */
19849 arm_data
->mapcount
= -1;
19850 arm_data
->mapsize
= 0;
19851 arm_data
->map
= NULL
;
19856 /* Mangle thumb function symbols as we read them in. */
19859 elf32_arm_swap_symbol_in (bfd
* abfd
,
19862 Elf_Internal_Sym
*dst
)
19864 if (!bfd_elf32_swap_symbol_in (abfd
, psrc
, pshn
, dst
))
19866 dst
->st_target_internal
= 0;
19868 /* New EABI objects mark thumb function symbols by setting the low bit of
19870 if (ELF_ST_TYPE (dst
->st_info
) == STT_FUNC
19871 || ELF_ST_TYPE (dst
->st_info
) == STT_GNU_IFUNC
)
19873 if (dst
->st_value
& 1)
19875 dst
->st_value
&= ~(bfd_vma
) 1;
19876 ARM_SET_SYM_BRANCH_TYPE (dst
->st_target_internal
,
19877 ST_BRANCH_TO_THUMB
);
19880 ARM_SET_SYM_BRANCH_TYPE (dst
->st_target_internal
, ST_BRANCH_TO_ARM
);
19882 else if (ELF_ST_TYPE (dst
->st_info
) == STT_ARM_TFUNC
)
19884 dst
->st_info
= ELF_ST_INFO (ELF_ST_BIND (dst
->st_info
), STT_FUNC
);
19885 ARM_SET_SYM_BRANCH_TYPE (dst
->st_target_internal
, ST_BRANCH_TO_THUMB
);
19887 else if (ELF_ST_TYPE (dst
->st_info
) == STT_SECTION
)
19888 ARM_SET_SYM_BRANCH_TYPE (dst
->st_target_internal
, ST_BRANCH_LONG
);
19890 ARM_SET_SYM_BRANCH_TYPE (dst
->st_target_internal
, ST_BRANCH_UNKNOWN
);
19896 /* Mangle thumb function symbols as we write them out. */
19899 elf32_arm_swap_symbol_out (bfd
*abfd
,
19900 const Elf_Internal_Sym
*src
,
19904 Elf_Internal_Sym newsym
;
19906 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
19907 of the address set, as per the new EABI. We do this unconditionally
19908 because objcopy does not set the elf header flags until after
19909 it writes out the symbol table. */
19910 if (ARM_GET_SYM_BRANCH_TYPE (src
->st_target_internal
) == ST_BRANCH_TO_THUMB
)
19913 if (ELF_ST_TYPE (src
->st_info
) != STT_GNU_IFUNC
)
19914 newsym
.st_info
= ELF_ST_INFO (ELF_ST_BIND (src
->st_info
), STT_FUNC
);
19915 if (newsym
.st_shndx
!= SHN_UNDEF
)
19917 /* Do this only for defined symbols. At link type, the static
19918 linker will simulate the work of dynamic linker of resolving
19919 symbols and will carry over the thumbness of found symbols to
19920 the output symbol table. It's not clear how it happens, but
19921 the thumbness of undefined symbols can well be different at
19922 runtime, and writing '1' for them will be confusing for users
19923 and possibly for dynamic linker itself.
19925 newsym
.st_value
|= 1;
19930 bfd_elf32_swap_symbol_out (abfd
, src
, cdst
, shndx
);
19933 /* Add the PT_ARM_EXIDX program header. */
19936 elf32_arm_modify_segment_map (bfd
*abfd
,
19937 struct bfd_link_info
*info ATTRIBUTE_UNUSED
)
19939 struct elf_segment_map
*m
;
19942 sec
= bfd_get_section_by_name (abfd
, ".ARM.exidx");
19943 if (sec
!= NULL
&& (sec
->flags
& SEC_LOAD
) != 0)
19945 /* If there is already a PT_ARM_EXIDX header, then we do not
19946 want to add another one. This situation arises when running
19947 "strip"; the input binary already has the header. */
19948 m
= elf_seg_map (abfd
);
19949 while (m
&& m
->p_type
!= PT_ARM_EXIDX
)
19953 m
= (struct elf_segment_map
*)
19954 bfd_zalloc (abfd
, sizeof (struct elf_segment_map
));
19957 m
->p_type
= PT_ARM_EXIDX
;
19959 m
->sections
[0] = sec
;
19961 m
->next
= elf_seg_map (abfd
);
19962 elf_seg_map (abfd
) = m
;
19969 /* We may add a PT_ARM_EXIDX program header. */
19972 elf32_arm_additional_program_headers (bfd
*abfd
,
19973 struct bfd_link_info
*info ATTRIBUTE_UNUSED
)
19977 sec
= bfd_get_section_by_name (abfd
, ".ARM.exidx");
19978 if (sec
!= NULL
&& (sec
->flags
& SEC_LOAD
) != 0)
19984 /* Hook called by the linker routine which adds symbols from an object
19988 elf32_arm_add_symbol_hook (bfd
*abfd
, struct bfd_link_info
*info
,
19989 Elf_Internal_Sym
*sym
, const char **namep
,
19990 flagword
*flagsp
, asection
**secp
, bfd_vma
*valp
)
19992 if (elf32_arm_hash_table (info
) == NULL
)
19995 if (elf32_arm_hash_table (info
)->vxworks_p
19996 && !elf_vxworks_add_symbol_hook (abfd
, info
, sym
, namep
,
19997 flagsp
, secp
, valp
))
20003 /* We use this to override swap_symbol_in and swap_symbol_out. */
20004 const struct elf_size_info elf32_arm_size_info
=
20006 sizeof (Elf32_External_Ehdr
),
20007 sizeof (Elf32_External_Phdr
),
20008 sizeof (Elf32_External_Shdr
),
20009 sizeof (Elf32_External_Rel
),
20010 sizeof (Elf32_External_Rela
),
20011 sizeof (Elf32_External_Sym
),
20012 sizeof (Elf32_External_Dyn
),
20013 sizeof (Elf_External_Note
),
20017 ELFCLASS32
, EV_CURRENT
,
20018 bfd_elf32_write_out_phdrs
,
20019 bfd_elf32_write_shdrs_and_ehdr
,
20020 bfd_elf32_checksum_contents
,
20021 bfd_elf32_write_relocs
,
20022 elf32_arm_swap_symbol_in
,
20023 elf32_arm_swap_symbol_out
,
20024 bfd_elf32_slurp_reloc_table
,
20025 bfd_elf32_slurp_symbol_table
,
20026 bfd_elf32_swap_dyn_in
,
20027 bfd_elf32_swap_dyn_out
,
20028 bfd_elf32_swap_reloc_in
,
20029 bfd_elf32_swap_reloc_out
,
20030 bfd_elf32_swap_reloca_in
,
20031 bfd_elf32_swap_reloca_out
20035 read_code32 (const bfd
*abfd
, const bfd_byte
*addr
)
20037 /* V7 BE8 code is always little endian. */
20038 if ((elf_elfheader (abfd
)->e_flags
& EF_ARM_BE8
) != 0)
20039 return bfd_getl32 (addr
);
20041 return bfd_get_32 (abfd
, addr
);
20045 read_code16 (const bfd
*abfd
, const bfd_byte
*addr
)
20047 /* V7 BE8 code is always little endian. */
20048 if ((elf_elfheader (abfd
)->e_flags
& EF_ARM_BE8
) != 0)
20049 return bfd_getl16 (addr
);
20051 return bfd_get_16 (abfd
, addr
);
20054 /* Return size of plt0 entry starting at ADDR
20055 or (bfd_vma) -1 if size can not be determined. */
20058 elf32_arm_plt0_size (const bfd
*abfd
, const bfd_byte
*addr
)
20060 bfd_vma first_word
;
20063 first_word
= read_code32 (abfd
, addr
);
20065 if (first_word
== elf32_arm_plt0_entry
[0])
20066 plt0_size
= 4 * ARRAY_SIZE (elf32_arm_plt0_entry
);
20067 else if (first_word
== elf32_thumb2_plt0_entry
[0])
20068 plt0_size
= 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry
);
20070 /* We don't yet handle this PLT format. */
20071 return (bfd_vma
) -1;
20076 /* Return size of plt entry starting at offset OFFSET
20077 of plt section located at address START
20078 or (bfd_vma) -1 if size can not be determined. */
20081 elf32_arm_plt_size (const bfd
*abfd
, const bfd_byte
*start
, bfd_vma offset
)
20083 bfd_vma first_insn
;
20084 bfd_vma plt_size
= 0;
20085 const bfd_byte
*addr
= start
+ offset
;
20087 /* PLT entry size if fixed on Thumb-only platforms. */
20088 if (read_code32 (abfd
, start
) == elf32_thumb2_plt0_entry
[0])
20089 return 4 * ARRAY_SIZE (elf32_thumb2_plt_entry
);
20091 /* Respect Thumb stub if necessary. */
20092 if (read_code16 (abfd
, addr
) == elf32_arm_plt_thumb_stub
[0])
20094 plt_size
+= 2 * ARRAY_SIZE(elf32_arm_plt_thumb_stub
);
20097 /* Strip immediate from first add. */
20098 first_insn
= read_code32 (abfd
, addr
+ plt_size
) & 0xffffff00;
20100 #ifdef FOUR_WORD_PLT
20101 if (first_insn
== elf32_arm_plt_entry
[0])
20102 plt_size
+= 4 * ARRAY_SIZE (elf32_arm_plt_entry
);
20104 if (first_insn
== elf32_arm_plt_entry_long
[0])
20105 plt_size
+= 4 * ARRAY_SIZE (elf32_arm_plt_entry_long
);
20106 else if (first_insn
== elf32_arm_plt_entry_short
[0])
20107 plt_size
+= 4 * ARRAY_SIZE (elf32_arm_plt_entry_short
);
20110 /* We don't yet handle this PLT format. */
20111 return (bfd_vma
) -1;
20116 /* Implementation is shamelessly borrowed from _bfd_elf_get_synthetic_symtab. */
20119 elf32_arm_get_synthetic_symtab (bfd
*abfd
,
20120 long symcount ATTRIBUTE_UNUSED
,
20121 asymbol
**syms ATTRIBUTE_UNUSED
,
20131 Elf_Internal_Shdr
*hdr
;
20139 if ((abfd
->flags
& (DYNAMIC
| EXEC_P
)) == 0)
20142 if (dynsymcount
<= 0)
20145 relplt
= bfd_get_section_by_name (abfd
, ".rel.plt");
20146 if (relplt
== NULL
)
20149 hdr
= &elf_section_data (relplt
)->this_hdr
;
20150 if (hdr
->sh_link
!= elf_dynsymtab (abfd
)
20151 || (hdr
->sh_type
!= SHT_REL
&& hdr
->sh_type
!= SHT_RELA
))
20154 plt
= bfd_get_section_by_name (abfd
, ".plt");
20158 if (!elf32_arm_size_info
.slurp_reloc_table (abfd
, relplt
, dynsyms
, TRUE
))
20161 data
= plt
->contents
;
20164 if (!bfd_get_full_section_contents(abfd
, (asection
*) plt
, &data
) || data
== NULL
)
20166 bfd_cache_section_contents((asection
*) plt
, data
);
20169 count
= relplt
->size
/ hdr
->sh_entsize
;
20170 size
= count
* sizeof (asymbol
);
20171 p
= relplt
->relocation
;
20172 for (i
= 0; i
< count
; i
++, p
+= elf32_arm_size_info
.int_rels_per_ext_rel
)
20174 size
+= strlen ((*p
->sym_ptr_ptr
)->name
) + sizeof ("@plt");
20175 if (p
->addend
!= 0)
20176 size
+= sizeof ("+0x") - 1 + 8;
20179 s
= *ret
= (asymbol
*) bfd_malloc (size
);
20183 offset
= elf32_arm_plt0_size (abfd
, data
);
20184 if (offset
== (bfd_vma
) -1)
20187 names
= (char *) (s
+ count
);
20188 p
= relplt
->relocation
;
20190 for (i
= 0; i
< count
; i
++, p
+= elf32_arm_size_info
.int_rels_per_ext_rel
)
20194 bfd_vma plt_size
= elf32_arm_plt_size (abfd
, data
, offset
);
20195 if (plt_size
== (bfd_vma
) -1)
20198 *s
= **p
->sym_ptr_ptr
;
20199 /* Undefined syms won't have BSF_LOCAL or BSF_GLOBAL set. Since
20200 we are defining a symbol, ensure one of them is set. */
20201 if ((s
->flags
& BSF_LOCAL
) == 0)
20202 s
->flags
|= BSF_GLOBAL
;
20203 s
->flags
|= BSF_SYNTHETIC
;
20208 len
= strlen ((*p
->sym_ptr_ptr
)->name
);
20209 memcpy (names
, (*p
->sym_ptr_ptr
)->name
, len
);
20211 if (p
->addend
!= 0)
20215 memcpy (names
, "+0x", sizeof ("+0x") - 1);
20216 names
+= sizeof ("+0x") - 1;
20217 bfd_sprintf_vma (abfd
, buf
, p
->addend
);
20218 for (a
= buf
; *a
== '0'; ++a
)
20221 memcpy (names
, a
, len
);
20224 memcpy (names
, "@plt", sizeof ("@plt"));
20225 names
+= sizeof ("@plt");
20227 offset
+= plt_size
;
20234 elf32_arm_section_flags (flagword
*flags
, const Elf_Internal_Shdr
* hdr
)
20236 if (hdr
->sh_flags
& SHF_ARM_PURECODE
)
20237 *flags
|= SEC_ELF_PURECODE
;
20242 elf32_arm_lookup_section_flags (char *flag_name
)
20244 if (!strcmp (flag_name
, "SHF_ARM_PURECODE"))
20245 return SHF_ARM_PURECODE
;
20247 return SEC_NO_FLAGS
;
20250 static unsigned int
20251 elf32_arm_count_additional_relocs (asection
*sec
)
20253 struct _arm_elf_section_data
*arm_data
;
20254 arm_data
= get_arm_elf_section_data (sec
);
20256 return arm_data
== NULL
? 0 : arm_data
->additional_reloc_count
;
20259 /* Called to set the sh_flags, sh_link and sh_info fields of OSECTION which
20260 has a type >= SHT_LOOS. Returns TRUE if these fields were initialised
20261 FALSE otherwise. ISECTION is the best guess matching section from the
20262 input bfd IBFD, but it might be NULL. */
20265 elf32_arm_copy_special_section_fields (const bfd
*ibfd ATTRIBUTE_UNUSED
,
20266 bfd
*obfd ATTRIBUTE_UNUSED
,
20267 const Elf_Internal_Shdr
*isection ATTRIBUTE_UNUSED
,
20268 Elf_Internal_Shdr
*osection
)
20270 switch (osection
->sh_type
)
20272 case SHT_ARM_EXIDX
:
20274 Elf_Internal_Shdr
**oheaders
= elf_elfsections (obfd
);
20275 Elf_Internal_Shdr
**iheaders
= elf_elfsections (ibfd
);
20278 osection
->sh_flags
= SHF_ALLOC
| SHF_LINK_ORDER
;
20279 osection
->sh_info
= 0;
20281 /* The sh_link field must be set to the text section associated with
20282 this index section. Unfortunately the ARM EHABI does not specify
20283 exactly how to determine this association. Our caller does try
20284 to match up OSECTION with its corresponding input section however
20285 so that is a good first guess. */
20286 if (isection
!= NULL
20287 && osection
->bfd_section
!= NULL
20288 && isection
->bfd_section
!= NULL
20289 && isection
->bfd_section
->output_section
!= NULL
20290 && isection
->bfd_section
->output_section
== osection
->bfd_section
20291 && iheaders
!= NULL
20292 && isection
->sh_link
> 0
20293 && isection
->sh_link
< elf_numsections (ibfd
)
20294 && iheaders
[isection
->sh_link
]->bfd_section
!= NULL
20295 && iheaders
[isection
->sh_link
]->bfd_section
->output_section
!= NULL
20298 for (i
= elf_numsections (obfd
); i
-- > 0;)
20299 if (oheaders
[i
]->bfd_section
20300 == iheaders
[isection
->sh_link
]->bfd_section
->output_section
)
20306 /* Failing that we have to find a matching section ourselves. If
20307 we had the output section name available we could compare that
20308 with input section names. Unfortunately we don't. So instead
20309 we use a simple heuristic and look for the nearest executable
20310 section before this one. */
20311 for (i
= elf_numsections (obfd
); i
-- > 0;)
20312 if (oheaders
[i
] == osection
)
20318 if (oheaders
[i
]->sh_type
== SHT_PROGBITS
20319 && (oheaders
[i
]->sh_flags
& (SHF_ALLOC
| SHF_EXECINSTR
))
20320 == (SHF_ALLOC
| SHF_EXECINSTR
))
20326 osection
->sh_link
= i
;
20327 /* If the text section was part of a group
20328 then the index section should be too. */
20329 if (oheaders
[i
]->sh_flags
& SHF_GROUP
)
20330 osection
->sh_flags
|= SHF_GROUP
;
20336 case SHT_ARM_PREEMPTMAP
:
20337 osection
->sh_flags
= SHF_ALLOC
;
20340 case SHT_ARM_ATTRIBUTES
:
20341 case SHT_ARM_DEBUGOVERLAY
:
20342 case SHT_ARM_OVERLAYSECTION
:
20350 /* Returns TRUE if NAME is an ARM mapping symbol.
20351 Traditionally the symbols $a, $d and $t have been used.
20352 The ARM ELF standard also defines $x (for A64 code). It also allows a
20353 period initiated suffix to be added to the symbol: "$[adtx]\.[:sym_char]+".
20354 Other tools might also produce $b (Thumb BL), $f, $p, $m and $v, but we do
20355 not support them here. $t.x indicates the start of ThumbEE instructions. */
20358 is_arm_mapping_symbol (const char * name
)
20360 return name
!= NULL
/* Paranoia. */
20361 && name
[0] == '$' /* Note: if objcopy --prefix-symbols has been used then
20362 the mapping symbols could have acquired a prefix.
20363 We do not support this here, since such symbols no
20364 longer conform to the ARM ELF ABI. */
20365 && (name
[1] == 'a' || name
[1] == 'd' || name
[1] == 't' || name
[1] == 'x')
20366 && (name
[2] == 0 || name
[2] == '.');
20367 /* FIXME: Strictly speaking the symbol is only a valid mapping symbol if
20368 any characters that follow the period are legal characters for the body
20369 of a symbol's name. For now we just assume that this is the case. */
20372 /* Make sure that mapping symbols in object files are not removed via the
20373 "strip --strip-unneeded" tool. These symbols are needed in order to
20374 correctly generate interworking veneers, and for byte swapping code
20375 regions. Once an object file has been linked, it is safe to remove the
20376 symbols as they will no longer be needed. */
20379 elf32_arm_backend_symbol_processing (bfd
*abfd
, asymbol
*sym
)
20381 if (((abfd
->flags
& (EXEC_P
| DYNAMIC
)) == 0)
20382 && sym
->section
!= bfd_abs_section_ptr
20383 && is_arm_mapping_symbol (sym
->name
))
20384 sym
->flags
|= BSF_KEEP
;
20387 #undef elf_backend_copy_special_section_fields
20388 #define elf_backend_copy_special_section_fields elf32_arm_copy_special_section_fields
20390 #define ELF_ARCH bfd_arch_arm
20391 #define ELF_TARGET_ID ARM_ELF_DATA
20392 #define ELF_MACHINE_CODE EM_ARM
20393 #ifdef __QNXTARGET__
20394 #define ELF_MAXPAGESIZE 0x1000
20396 #define ELF_MAXPAGESIZE 0x10000
20398 #define ELF_MINPAGESIZE 0x1000
20399 #define ELF_COMMONPAGESIZE 0x1000
20401 #define bfd_elf32_mkobject elf32_arm_mkobject
20403 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
20404 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
20405 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
20406 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
20407 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
20408 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
20409 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
20410 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
20411 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
20412 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
20413 #define bfd_elf32_bfd_final_link elf32_arm_final_link
20414 #define bfd_elf32_get_synthetic_symtab elf32_arm_get_synthetic_symtab
20416 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
20417 #define elf_backend_maybe_function_sym elf32_arm_maybe_function_sym
20418 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
20419 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
20420 #define elf_backend_check_relocs elf32_arm_check_relocs
20421 #define elf_backend_update_relocs elf32_arm_update_relocs
20422 #define elf_backend_relocate_section elf32_arm_relocate_section
20423 #define elf_backend_write_section elf32_arm_write_section
20424 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
20425 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
20426 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
20427 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
20428 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
20429 #define elf_backend_always_size_sections elf32_arm_always_size_sections
20430 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
20431 #define elf_backend_init_file_header elf32_arm_init_file_header
20432 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
20433 #define elf_backend_object_p elf32_arm_object_p
20434 #define elf_backend_fake_sections elf32_arm_fake_sections
20435 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
20436 #define elf_backend_final_write_processing elf32_arm_final_write_processing
20437 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
20438 #define elf_backend_size_info elf32_arm_size_info
20439 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
20440 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
20441 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
20442 #define elf_backend_filter_implib_symbols elf32_arm_filter_implib_symbols
20443 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
20444 #define elf_backend_add_symbol_hook elf32_arm_add_symbol_hook
20445 #define elf_backend_count_additional_relocs elf32_arm_count_additional_relocs
20446 #define elf_backend_symbol_processing elf32_arm_backend_symbol_processing
20448 #define elf_backend_can_refcount 1
20449 #define elf_backend_can_gc_sections 1
20450 #define elf_backend_plt_readonly 1
20451 #define elf_backend_want_got_plt 1
20452 #define elf_backend_want_plt_sym 0
20453 #define elf_backend_want_dynrelro 1
20454 #define elf_backend_may_use_rel_p 1
20455 #define elf_backend_may_use_rela_p 0
20456 #define elf_backend_default_use_rela_p 0
20457 #define elf_backend_dtrel_excludes_plt 1
20459 #define elf_backend_got_header_size 12
20460 #define elf_backend_extern_protected_data 1
20462 #undef elf_backend_obj_attrs_vendor
20463 #define elf_backend_obj_attrs_vendor "aeabi"
20464 #undef elf_backend_obj_attrs_section
20465 #define elf_backend_obj_attrs_section ".ARM.attributes"
20466 #undef elf_backend_obj_attrs_arg_type
20467 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
20468 #undef elf_backend_obj_attrs_section_type
20469 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
20470 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
20471 #define elf_backend_obj_attrs_handle_unknown elf32_arm_obj_attrs_handle_unknown
20473 #undef elf_backend_section_flags
20474 #define elf_backend_section_flags elf32_arm_section_flags
20475 #undef elf_backend_lookup_section_flags_hook
20476 #define elf_backend_lookup_section_flags_hook elf32_arm_lookup_section_flags
20478 #define elf_backend_linux_prpsinfo32_ugid16 TRUE
20480 #include "elf32-target.h"
20482 /* Native Client targets. */
20484 #undef TARGET_LITTLE_SYM
20485 #define TARGET_LITTLE_SYM arm_elf32_nacl_le_vec
20486 #undef TARGET_LITTLE_NAME
20487 #define TARGET_LITTLE_NAME "elf32-littlearm-nacl"
20488 #undef TARGET_BIG_SYM
20489 #define TARGET_BIG_SYM arm_elf32_nacl_be_vec
20490 #undef TARGET_BIG_NAME
20491 #define TARGET_BIG_NAME "elf32-bigarm-nacl"
20493 /* Like elf32_arm_link_hash_table_create -- but overrides
20494 appropriately for NaCl. */
20496 static struct bfd_link_hash_table
*
20497 elf32_arm_nacl_link_hash_table_create (bfd
*abfd
)
20499 struct bfd_link_hash_table
*ret
;
20501 ret
= elf32_arm_link_hash_table_create (abfd
);
20504 struct elf32_arm_link_hash_table
*htab
20505 = (struct elf32_arm_link_hash_table
*) ret
;
20509 htab
->plt_header_size
= 4 * ARRAY_SIZE (elf32_arm_nacl_plt0_entry
);
20510 htab
->plt_entry_size
= 4 * ARRAY_SIZE (elf32_arm_nacl_plt_entry
);
20515 /* Since NaCl doesn't use the ARM-specific unwind format, we don't
20516 really need to use elf32_arm_modify_segment_map. But we do it
20517 anyway just to reduce gratuitous differences with the stock ARM backend. */
20520 elf32_arm_nacl_modify_segment_map (bfd
*abfd
, struct bfd_link_info
*info
)
20522 return (elf32_arm_modify_segment_map (abfd
, info
)
20523 && nacl_modify_segment_map (abfd
, info
));
20527 elf32_arm_nacl_final_write_processing (bfd
*abfd
)
20529 arm_final_write_processing (abfd
);
20530 return nacl_final_write_processing (abfd
);
20534 elf32_arm_nacl_plt_sym_val (bfd_vma i
, const asection
*plt
,
20535 const arelent
*rel ATTRIBUTE_UNUSED
)
20538 + 4 * (ARRAY_SIZE (elf32_arm_nacl_plt0_entry
) +
20539 i
* ARRAY_SIZE (elf32_arm_nacl_plt_entry
));
20543 #define elf32_bed elf32_arm_nacl_bed
20544 #undef bfd_elf32_bfd_link_hash_table_create
20545 #define bfd_elf32_bfd_link_hash_table_create \
20546 elf32_arm_nacl_link_hash_table_create
20547 #undef elf_backend_plt_alignment
20548 #define elf_backend_plt_alignment 4
20549 #undef elf_backend_modify_segment_map
20550 #define elf_backend_modify_segment_map elf32_arm_nacl_modify_segment_map
20551 #undef elf_backend_modify_headers
20552 #define elf_backend_modify_headers nacl_modify_headers
20553 #undef elf_backend_final_write_processing
20554 #define elf_backend_final_write_processing elf32_arm_nacl_final_write_processing
20555 #undef bfd_elf32_get_synthetic_symtab
20556 #undef elf_backend_plt_sym_val
20557 #define elf_backend_plt_sym_val elf32_arm_nacl_plt_sym_val
20558 #undef elf_backend_copy_special_section_fields
20560 #undef ELF_MINPAGESIZE
20561 #undef ELF_COMMONPAGESIZE
20564 #include "elf32-target.h"
20566 /* Reset to defaults. */
20567 #undef elf_backend_plt_alignment
20568 #undef elf_backend_modify_segment_map
20569 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
20570 #undef elf_backend_modify_headers
20571 #undef elf_backend_final_write_processing
20572 #define elf_backend_final_write_processing elf32_arm_final_write_processing
20573 #undef ELF_MINPAGESIZE
20574 #define ELF_MINPAGESIZE 0x1000
20575 #undef ELF_COMMONPAGESIZE
20576 #define ELF_COMMONPAGESIZE 0x1000
20579 /* FDPIC Targets. */
20581 #undef TARGET_LITTLE_SYM
20582 #define TARGET_LITTLE_SYM arm_elf32_fdpic_le_vec
20583 #undef TARGET_LITTLE_NAME
20584 #define TARGET_LITTLE_NAME "elf32-littlearm-fdpic"
20585 #undef TARGET_BIG_SYM
20586 #define TARGET_BIG_SYM arm_elf32_fdpic_be_vec
20587 #undef TARGET_BIG_NAME
20588 #define TARGET_BIG_NAME "elf32-bigarm-fdpic"
20589 #undef elf_match_priority
20590 #define elf_match_priority 128
20592 #define ELF_OSABI ELFOSABI_ARM_FDPIC
20594 /* Like elf32_arm_link_hash_table_create -- but overrides
20595 appropriately for FDPIC. */
20597 static struct bfd_link_hash_table
*
20598 elf32_arm_fdpic_link_hash_table_create (bfd
*abfd
)
20600 struct bfd_link_hash_table
*ret
;
20602 ret
= elf32_arm_link_hash_table_create (abfd
);
20605 struct elf32_arm_link_hash_table
*htab
= (struct elf32_arm_link_hash_table
*) ret
;
20612 /* We need dynamic symbols for every section, since segments can
20613 relocate independently. */
20615 elf32_arm_fdpic_omit_section_dynsym (bfd
*output_bfd ATTRIBUTE_UNUSED
,
20616 struct bfd_link_info
*info
20618 asection
*p ATTRIBUTE_UNUSED
)
20620 switch (elf_section_data (p
)->this_hdr
.sh_type
)
20624 /* If sh_type is yet undecided, assume it could be
20625 SHT_PROGBITS/SHT_NOBITS. */
20629 /* There shouldn't be section relative relocations
20630 against any other section. */
20637 #define elf32_bed elf32_arm_fdpic_bed
20639 #undef bfd_elf32_bfd_link_hash_table_create
20640 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_fdpic_link_hash_table_create
20642 #undef elf_backend_omit_section_dynsym
20643 #define elf_backend_omit_section_dynsym elf32_arm_fdpic_omit_section_dynsym
20645 #include "elf32-target.h"
20647 #undef elf_match_priority
20649 #undef elf_backend_omit_section_dynsym
20651 /* VxWorks Targets. */
20653 #undef TARGET_LITTLE_SYM
20654 #define TARGET_LITTLE_SYM arm_elf32_vxworks_le_vec
20655 #undef TARGET_LITTLE_NAME
20656 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
20657 #undef TARGET_BIG_SYM
20658 #define TARGET_BIG_SYM arm_elf32_vxworks_be_vec
20659 #undef TARGET_BIG_NAME
20660 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
20662 /* Like elf32_arm_link_hash_table_create -- but overrides
20663 appropriately for VxWorks. */
20665 static struct bfd_link_hash_table
*
20666 elf32_arm_vxworks_link_hash_table_create (bfd
*abfd
)
20668 struct bfd_link_hash_table
*ret
;
20670 ret
= elf32_arm_link_hash_table_create (abfd
);
20673 struct elf32_arm_link_hash_table
*htab
20674 = (struct elf32_arm_link_hash_table
*) ret
;
20676 htab
->vxworks_p
= 1;
20682 elf32_arm_vxworks_final_write_processing (bfd
*abfd
)
20684 arm_final_write_processing (abfd
);
20685 return elf_vxworks_final_write_processing (abfd
);
20689 #define elf32_bed elf32_arm_vxworks_bed
20691 #undef bfd_elf32_bfd_link_hash_table_create
20692 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
20693 #undef elf_backend_final_write_processing
20694 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
20695 #undef elf_backend_emit_relocs
20696 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
20698 #undef elf_backend_may_use_rel_p
20699 #define elf_backend_may_use_rel_p 0
20700 #undef elf_backend_may_use_rela_p
20701 #define elf_backend_may_use_rela_p 1
20702 #undef elf_backend_default_use_rela_p
20703 #define elf_backend_default_use_rela_p 1
20704 #undef elf_backend_want_plt_sym
20705 #define elf_backend_want_plt_sym 1
20706 #undef ELF_MAXPAGESIZE
20707 #define ELF_MAXPAGESIZE 0x1000
20709 #include "elf32-target.h"
20712 /* Merge backend specific data from an object file to the output
20713 object file when linking. */
20716 elf32_arm_merge_private_bfd_data (bfd
*ibfd
, struct bfd_link_info
*info
)
20718 bfd
*obfd
= info
->output_bfd
;
20719 flagword out_flags
;
20721 bfd_boolean flags_compatible
= TRUE
;
20724 /* Check if we have the same endianness. */
20725 if (! _bfd_generic_verify_endian_match (ibfd
, info
))
20728 if (! is_arm_elf (ibfd
) || ! is_arm_elf (obfd
))
20731 if (!elf32_arm_merge_eabi_attributes (ibfd
, info
))
20734 /* The input BFD must have had its flags initialised. */
20735 /* The following seems bogus to me -- The flags are initialized in
20736 the assembler but I don't think an elf_flags_init field is
20737 written into the object. */
20738 /* BFD_ASSERT (elf_flags_init (ibfd)); */
20740 in_flags
= elf_elfheader (ibfd
)->e_flags
;
20741 out_flags
= elf_elfheader (obfd
)->e_flags
;
20743 /* In theory there is no reason why we couldn't handle this. However
20744 in practice it isn't even close to working and there is no real
20745 reason to want it. */
20746 if (EF_ARM_EABI_VERSION (in_flags
) >= EF_ARM_EABI_VER4
20747 && !(ibfd
->flags
& DYNAMIC
)
20748 && (in_flags
& EF_ARM_BE8
))
20750 _bfd_error_handler (_("error: %pB is already in final BE8 format"),
20755 if (!elf_flags_init (obfd
))
20757 /* If the input is the default architecture and had the default
20758 flags then do not bother setting the flags for the output
20759 architecture, instead allow future merges to do this. If no
20760 future merges ever set these flags then they will retain their
20761 uninitialised values, which surprise surprise, correspond
20762 to the default values. */
20763 if (bfd_get_arch_info (ibfd
)->the_default
20764 && elf_elfheader (ibfd
)->e_flags
== 0)
20767 elf_flags_init (obfd
) = TRUE
;
20768 elf_elfheader (obfd
)->e_flags
= in_flags
;
20770 if (bfd_get_arch (obfd
) == bfd_get_arch (ibfd
)
20771 && bfd_get_arch_info (obfd
)->the_default
)
20772 return bfd_set_arch_mach (obfd
, bfd_get_arch (ibfd
), bfd_get_mach (ibfd
));
20777 /* Determine what should happen if the input ARM architecture
20778 does not match the output ARM architecture. */
20779 if (! bfd_arm_merge_machines (ibfd
, obfd
))
20782 /* Identical flags must be compatible. */
20783 if (in_flags
== out_flags
)
20786 /* Check to see if the input BFD actually contains any sections. If
20787 not, its flags may not have been initialised either, but it
20788 cannot actually cause any incompatiblity. Do not short-circuit
20789 dynamic objects; their section list may be emptied by
20790 elf_link_add_object_symbols.
20792 Also check to see if there are no code sections in the input.
20793 In this case there is no need to check for code specific flags.
20794 XXX - do we need to worry about floating-point format compatability
20795 in data sections ? */
20796 if (!(ibfd
->flags
& DYNAMIC
))
20798 bfd_boolean null_input_bfd
= TRUE
;
20799 bfd_boolean only_data_sections
= TRUE
;
20801 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
20803 /* Ignore synthetic glue sections. */
20804 if (strcmp (sec
->name
, ".glue_7")
20805 && strcmp (sec
->name
, ".glue_7t"))
20807 if ((bfd_section_flags (sec
)
20808 & (SEC_LOAD
| SEC_CODE
| SEC_HAS_CONTENTS
))
20809 == (SEC_LOAD
| SEC_CODE
| SEC_HAS_CONTENTS
))
20810 only_data_sections
= FALSE
;
20812 null_input_bfd
= FALSE
;
20817 if (null_input_bfd
|| only_data_sections
)
20821 /* Complain about various flag mismatches. */
20822 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags
),
20823 EF_ARM_EABI_VERSION (out_flags
)))
20826 (_("error: source object %pB has EABI version %d, but target %pB has EABI version %d"),
20827 ibfd
, (in_flags
& EF_ARM_EABIMASK
) >> 24,
20828 obfd
, (out_flags
& EF_ARM_EABIMASK
) >> 24);
20832 /* Not sure what needs to be checked for EABI versions >= 1. */
20833 /* VxWorks libraries do not use these flags. */
20834 if (get_elf_backend_data (obfd
) != &elf32_arm_vxworks_bed
20835 && get_elf_backend_data (ibfd
) != &elf32_arm_vxworks_bed
20836 && EF_ARM_EABI_VERSION (in_flags
) == EF_ARM_EABI_UNKNOWN
)
20838 if ((in_flags
& EF_ARM_APCS_26
) != (out_flags
& EF_ARM_APCS_26
))
20841 (_("error: %pB is compiled for APCS-%d, whereas target %pB uses APCS-%d"),
20842 ibfd
, in_flags
& EF_ARM_APCS_26
? 26 : 32,
20843 obfd
, out_flags
& EF_ARM_APCS_26
? 26 : 32);
20844 flags_compatible
= FALSE
;
20847 if ((in_flags
& EF_ARM_APCS_FLOAT
) != (out_flags
& EF_ARM_APCS_FLOAT
))
20849 if (in_flags
& EF_ARM_APCS_FLOAT
)
20851 (_("error: %pB passes floats in float registers, whereas %pB passes them in integer registers"),
20855 (_("error: %pB passes floats in integer registers, whereas %pB passes them in float registers"),
20858 flags_compatible
= FALSE
;
20861 if ((in_flags
& EF_ARM_VFP_FLOAT
) != (out_flags
& EF_ARM_VFP_FLOAT
))
20863 if (in_flags
& EF_ARM_VFP_FLOAT
)
20865 (_("error: %pB uses %s instructions, whereas %pB does not"),
20866 ibfd
, "VFP", obfd
);
20869 (_("error: %pB uses %s instructions, whereas %pB does not"),
20870 ibfd
, "FPA", obfd
);
20872 flags_compatible
= FALSE
;
20875 if ((in_flags
& EF_ARM_MAVERICK_FLOAT
) != (out_flags
& EF_ARM_MAVERICK_FLOAT
))
20877 if (in_flags
& EF_ARM_MAVERICK_FLOAT
)
20879 (_("error: %pB uses %s instructions, whereas %pB does not"),
20880 ibfd
, "Maverick", obfd
);
20883 (_("error: %pB does not use %s instructions, whereas %pB does"),
20884 ibfd
, "Maverick", obfd
);
20886 flags_compatible
= FALSE
;
20889 #ifdef EF_ARM_SOFT_FLOAT
20890 if ((in_flags
& EF_ARM_SOFT_FLOAT
) != (out_flags
& EF_ARM_SOFT_FLOAT
))
20892 /* We can allow interworking between code that is VFP format
20893 layout, and uses either soft float or integer regs for
20894 passing floating point arguments and results. We already
20895 know that the APCS_FLOAT flags match; similarly for VFP
20897 if ((in_flags
& EF_ARM_APCS_FLOAT
) != 0
20898 || (in_flags
& EF_ARM_VFP_FLOAT
) == 0)
20900 if (in_flags
& EF_ARM_SOFT_FLOAT
)
20902 (_("error: %pB uses software FP, whereas %pB uses hardware FP"),
20906 (_("error: %pB uses hardware FP, whereas %pB uses software FP"),
20909 flags_compatible
= FALSE
;
20914 /* Interworking mismatch is only a warning. */
20915 if ((in_flags
& EF_ARM_INTERWORK
) != (out_flags
& EF_ARM_INTERWORK
))
20917 if (in_flags
& EF_ARM_INTERWORK
)
20920 (_("warning: %pB supports interworking, whereas %pB does not"),
20926 (_("warning: %pB does not support interworking, whereas %pB does"),
20932 return flags_compatible
;
20936 /* Symbian OS Targets. */
20938 #undef TARGET_LITTLE_SYM
20939 #define TARGET_LITTLE_SYM arm_elf32_symbian_le_vec
20940 #undef TARGET_LITTLE_NAME
20941 #define TARGET_LITTLE_NAME "elf32-littlearm-symbian"
20942 #undef TARGET_BIG_SYM
20943 #define TARGET_BIG_SYM arm_elf32_symbian_be_vec
20944 #undef TARGET_BIG_NAME
20945 #define TARGET_BIG_NAME "elf32-bigarm-symbian"
20947 /* Like elf32_arm_link_hash_table_create -- but overrides
20948 appropriately for Symbian OS. */
20950 static struct bfd_link_hash_table
*
20951 elf32_arm_symbian_link_hash_table_create (bfd
*abfd
)
20953 struct bfd_link_hash_table
*ret
;
20955 ret
= elf32_arm_link_hash_table_create (abfd
);
20958 struct elf32_arm_link_hash_table
*htab
20959 = (struct elf32_arm_link_hash_table
*)ret
;
20960 /* There is no PLT header for Symbian OS. */
20961 htab
->plt_header_size
= 0;
20962 /* The PLT entries are each one instruction and one word. */
20963 htab
->plt_entry_size
= 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry
);
20964 htab
->symbian_p
= 1;
20965 /* Symbian uses armv5t or above, so use_blx is always true. */
20967 htab
->root
.is_relocatable_executable
= 1;
20972 static const struct bfd_elf_special_section
20973 elf32_arm_symbian_special_sections
[] =
20975 /* In a BPABI executable, the dynamic linking sections do not go in
20976 the loadable read-only segment. The post-linker may wish to
20977 refer to these sections, but they are not part of the final
20979 { STRING_COMMA_LEN (".dynamic"), 0, SHT_DYNAMIC
, 0 },
20980 { STRING_COMMA_LEN (".dynstr"), 0, SHT_STRTAB
, 0 },
20981 { STRING_COMMA_LEN (".dynsym"), 0, SHT_DYNSYM
, 0 },
20982 { STRING_COMMA_LEN (".got"), 0, SHT_PROGBITS
, 0 },
20983 { STRING_COMMA_LEN (".hash"), 0, SHT_HASH
, 0 },
20984 /* These sections do not need to be writable as the SymbianOS
20985 postlinker will arrange things so that no dynamic relocation is
20987 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY
, SHF_ALLOC
},
20988 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY
, SHF_ALLOC
},
20989 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY
, SHF_ALLOC
},
20990 { NULL
, 0, 0, 0, 0 }
20994 elf32_arm_symbian_begin_write_processing (bfd
*abfd
,
20995 struct bfd_link_info
*link_info
)
20997 /* BPABI objects are never loaded directly by an OS kernel; they are
20998 processed by a postlinker first, into an OS-specific format. If
20999 the D_PAGED bit is set on the file, BFD will align segments on
21000 page boundaries, so that an OS can directly map the file. With
21001 BPABI objects, that just results in wasted space. In addition,
21002 because we clear the D_PAGED bit, map_sections_to_segments will
21003 recognize that the program headers should not be mapped into any
21004 loadable segment. */
21005 abfd
->flags
&= ~D_PAGED
;
21006 elf32_arm_begin_write_processing (abfd
, link_info
);
21010 elf32_arm_symbian_modify_segment_map (bfd
*abfd
,
21011 struct bfd_link_info
*info
)
21013 struct elf_segment_map
*m
;
21016 /* BPABI shared libraries and executables should have a PT_DYNAMIC
21017 segment. However, because the .dynamic section is not marked
21018 with SEC_LOAD, the generic ELF code will not create such a
21020 dynsec
= bfd_get_section_by_name (abfd
, ".dynamic");
21023 for (m
= elf_seg_map (abfd
); m
!= NULL
; m
= m
->next
)
21024 if (m
->p_type
== PT_DYNAMIC
)
21029 m
= _bfd_elf_make_dynamic_segment (abfd
, dynsec
);
21030 m
->next
= elf_seg_map (abfd
);
21031 elf_seg_map (abfd
) = m
;
21035 /* Also call the generic arm routine. */
21036 return elf32_arm_modify_segment_map (abfd
, info
);
21039 /* Return address for Ith PLT stub in section PLT, for relocation REL
21040 or (bfd_vma) -1 if it should not be included. */
21043 elf32_arm_symbian_plt_sym_val (bfd_vma i
, const asection
*plt
,
21044 const arelent
*rel ATTRIBUTE_UNUSED
)
21046 return plt
->vma
+ 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry
) * i
;
21050 #define elf32_bed elf32_arm_symbian_bed
21052 /* The dynamic sections are not allocated on SymbianOS; the postlinker
21053 will process them and then discard them. */
21054 #undef ELF_DYNAMIC_SEC_FLAGS
21055 #define ELF_DYNAMIC_SEC_FLAGS \
21056 (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
21058 #undef elf_backend_emit_relocs
21060 #undef bfd_elf32_bfd_link_hash_table_create
21061 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_symbian_link_hash_table_create
21062 #undef elf_backend_special_sections
21063 #define elf_backend_special_sections elf32_arm_symbian_special_sections
21064 #undef elf_backend_begin_write_processing
21065 #define elf_backend_begin_write_processing elf32_arm_symbian_begin_write_processing
21066 #undef elf_backend_final_write_processing
21067 #define elf_backend_final_write_processing elf32_arm_final_write_processing
21069 #undef elf_backend_modify_segment_map
21070 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map
21072 /* There is no .got section for BPABI objects, and hence no header. */
21073 #undef elf_backend_got_header_size
21074 #define elf_backend_got_header_size 0
21076 /* Similarly, there is no .got.plt section. */
21077 #undef elf_backend_want_got_plt
21078 #define elf_backend_want_got_plt 0
21080 #undef elf_backend_plt_sym_val
21081 #define elf_backend_plt_sym_val elf32_arm_symbian_plt_sym_val
21083 #undef elf_backend_may_use_rel_p
21084 #define elf_backend_may_use_rel_p 1
21085 #undef elf_backend_may_use_rela_p
21086 #define elf_backend_may_use_rela_p 0
21087 #undef elf_backend_default_use_rela_p
21088 #define elf_backend_default_use_rela_p 0
21089 #undef elf_backend_want_plt_sym
21090 #define elf_backend_want_plt_sym 0
21091 #undef elf_backend_dtrel_excludes_plt
21092 #define elf_backend_dtrel_excludes_plt 0
21093 #undef ELF_MAXPAGESIZE
21094 #define ELF_MAXPAGESIZE 0x8000
21096 #include "elf32-target.h"