1 /* ELF support for AArch64.
2 Copyright 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
21 /* Notes on implementation:
23 Thread Local Store (TLS)
27 The implementation currently supports both traditional TLS and TLS
28 descriptors, but only general dynamic (GD).
30 For traditional TLS the assembler will present us with code
31 fragments of the form:
34 R_AARCH64_TLSGD_ADR_PAGE21(foo)
35 add x0, :tlsgd_lo12:foo
36 R_AARCH64_TLSGD_ADD_LO12_NC(foo)
40 For TLS descriptors the assembler will present us with code
41 fragments of the form:
43 adrp x0, :tlsdesc:foo R_AARCH64_TLSDESC_ADR_PAGE(foo)
44 ldr x1, [x0, #:tlsdesc_lo12:foo] R_AARCH64_TLSDESC_LD64_LO12(foo)
45 add x0, x0, #:tlsdesc_lo12:foo R_AARCH64_TLSDESC_ADD_LO12(foo)
47 blr x1 R_AARCH64_TLSDESC_CALL(foo)
49 The relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} against foo
50 indicate that foo is thread local and should be accessed via the
51 traditional TLS mechanims.
53 The relocations R_AARCH64_TLSDESC_{ADR_PAGE,LD64_LO12_NC,ADD_LO12_NC}
54 against foo indicate that 'foo' is thread local and should be accessed
55 via a TLS descriptor mechanism.
57 The precise instruction sequence is only relevant from the
58 perspective of linker relaxation which is currently not implemented.
60 The static linker must detect that 'foo' is a TLS object and
61 allocate a double GOT entry. The GOT entry must be created for both
62 global and local TLS symbols. Note that this is different to none
63 TLS local objects which do not need a GOT entry.
65 In the traditional TLS mechanism, the double GOT entry is used to
66 provide the tls_index structure, containing module and offset
67 entries. The static linker places the relocation R_AARCH64_TLS_DTPMOD64
68 on the module entry. The loader will subsequently fixup this
69 relocation with the module identity.
71 For global traditional TLS symbols the static linker places an
72 R_AARCH64_TLS_DTPREL64 relocation on the offset entry. The loader
73 will subsequently fixup the offset. For local TLS symbols the static
74 linker fixes up offset.
76 In the TLS descriptor mechanism the double GOT entry is used to
77 provide the descriptor. The static linker places the relocation
78 R_AARCH64_TLSDESC on the first GOT slot. The loader will
79 subsequently fix this up.
83 The handling of TLS symbols is implemented across a number of
84 different backend functions. The following is a top level view of
85 what processing is performed where.
87 The TLS implementation maintains state information for each TLS
88 symbol. The state information for local and global symbols is kept
89 in different places. Global symbols use generic BFD structures while
90 local symbols use backend specific structures that are allocated and
91 maintained entirely by the backend.
95 aarch64_check_relocs()
97 This function is invoked for each relocation.
99 The TLS relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} and
100 R_AARCH64_TLSDESC_{ADR_PAGE,LD64_LO12_NC,ADD_LO12_NC} are
101 spotted. One time creation of local symbol data structures are
102 created when the first local symbol is seen.
104 The reference count for a symbol is incremented. The GOT type for
105 each symbol is marked as general dynamic.
107 elf64_aarch64_allocate_dynrelocs ()
109 For each global with positive reference count we allocate a double
110 GOT slot. For a traditional TLS symbol we allocate space for two
111 relocation entries on the GOT, for a TLS descriptor symbol we
112 allocate space for one relocation on the slot. Record the GOT offset
115 elf64_aarch64_size_dynamic_sections ()
117 Iterate all input BFDS, look for in the local symbol data structure
118 constructed earlier for local TLS symbols and allocate them double
119 GOT slots along with space for a single GOT relocation. Update the
120 local symbol structure to record the GOT offset allocated.
122 elf64_aarch64_relocate_section ()
124 Calls elf64_aarch64_final_link_relocate ()
126 Emit the relevant TLS relocations against the GOT for each TLS
127 symbol. For local TLS symbols emit the GOT offset directly. The GOT
128 relocations are emitted once the first time a TLS symbol is
129 encountered. The implementation uses the LSB of the GOT offset to
130 flag that the relevant GOT relocations for a symbol have been
131 emitted. All of the TLS code that uses the GOT offset needs to take
132 care to mask out this flag bit before using the offset.
134 elf64_aarch64_final_link_relocate ()
136 Fixup the R_AARCH64_TLSGD_{ADR_PREL21, ADD_LO12_NC} relocations. */
140 #include "libiberty.h"
142 #include "bfd_stdint.h"
145 #include "elf/aarch64.h"
147 static bfd_reloc_status_type
148 bfd_elf_aarch64_put_addend (bfd
*abfd
,
150 reloc_howto_type
*howto
, bfd_signed_vma addend
);
152 #define IS_AARCH64_TLS_RELOC(R_TYPE) \
153 ((R_TYPE) == R_AARCH64_TLSGD_ADR_PAGE21 \
154 || (R_TYPE) == R_AARCH64_TLSGD_ADD_LO12_NC \
155 || (R_TYPE) == R_AARCH64_TLSIE_MOVW_GOTTPREL_G1 \
156 || (R_TYPE) == R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC \
157 || (R_TYPE) == R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 \
158 || (R_TYPE) == R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC \
159 || (R_TYPE) == R_AARCH64_TLSIE_LD_GOTTPREL_PREL19 \
160 || (R_TYPE) == R_AARCH64_TLSLE_ADD_TPREL_LO12 \
161 || (R_TYPE) == R_AARCH64_TLSLE_ADD_TPREL_HI12 \
162 || (R_TYPE) == R_AARCH64_TLSLE_ADD_TPREL_LO12_NC \
163 || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G2 \
164 || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G1 \
165 || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G1_NC \
166 || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G0 \
167 || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G0_NC \
168 || (R_TYPE) == R_AARCH64_TLS_DTPMOD64 \
169 || (R_TYPE) == R_AARCH64_TLS_DTPREL64 \
170 || (R_TYPE) == R_AARCH64_TLS_TPREL64 \
171 || IS_AARCH64_TLSDESC_RELOC ((R_TYPE)))
173 #define IS_AARCH64_TLSDESC_RELOC(R_TYPE) \
174 ((R_TYPE) == R_AARCH64_TLSDESC_LD64_PREL19 \
175 || (R_TYPE) == R_AARCH64_TLSDESC_ADR_PREL21 \
176 || (R_TYPE) == R_AARCH64_TLSDESC_ADR_PAGE \
177 || (R_TYPE) == R_AARCH64_TLSDESC_ADD_LO12_NC \
178 || (R_TYPE) == R_AARCH64_TLSDESC_LD64_LO12_NC \
179 || (R_TYPE) == R_AARCH64_TLSDESC_OFF_G1 \
180 || (R_TYPE) == R_AARCH64_TLSDESC_OFF_G0_NC \
181 || (R_TYPE) == R_AARCH64_TLSDESC_LDR \
182 || (R_TYPE) == R_AARCH64_TLSDESC_ADD \
183 || (R_TYPE) == R_AARCH64_TLSDESC_CALL \
184 || (R_TYPE) == R_AARCH64_TLSDESC)
186 #define ELIMINATE_COPY_RELOCS 0
188 /* Return the relocation section associated with NAME. HTAB is the
189 bfd's elf64_aarch64_link_hash_entry. */
190 #define RELOC_SECTION(HTAB, NAME) \
191 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
193 /* Return size of a relocation entry. HTAB is the bfd's
194 elf64_aarch64_link_hash_entry. */
195 #define RELOC_SIZE(HTAB) (sizeof (Elf64_External_Rela))
197 /* Return function to swap relocations in. HTAB is the bfd's
198 elf64_aarch64_link_hash_entry. */
199 #define SWAP_RELOC_IN(HTAB) (bfd_elf64_swap_reloca_in)
201 /* Return function to swap relocations out. HTAB is the bfd's
202 elf64_aarch64_link_hash_entry. */
203 #define SWAP_RELOC_OUT(HTAB) (bfd_elf64_swap_reloca_out)
205 /* GOT Entry size - 8 bytes. */
206 #define GOT_ENTRY_SIZE (8)
207 #define PLT_ENTRY_SIZE (32)
208 #define PLT_SMALL_ENTRY_SIZE (16)
209 #define PLT_TLSDESC_ENTRY_SIZE (32)
211 /* Take the PAGE component of an address or offset. */
212 #define PG(x) ((x) & ~ 0xfff)
213 #define PG_OFFSET(x) ((x) & 0xfff)
215 /* Encoding of the nop instruction */
216 #define INSN_NOP 0xd503201f
218 #define aarch64_compute_jump_table_size(htab) \
219 (((htab)->root.srelplt == NULL) ? 0 \
220 : (htab)->root.srelplt->reloc_count * GOT_ENTRY_SIZE)
222 /* The first entry in a procedure linkage table looks like this
223 if the distance between the PLTGOT and the PLT is < 4GB use
224 these PLT entries. Note that the dynamic linker gets &PLTGOT[2]
225 in x16 and needs to work out PLTGOT[1] by using an address of
227 static const bfd_byte elf64_aarch64_small_plt0_entry
[PLT_ENTRY_SIZE
] =
229 0xf0, 0x7b, 0xbf, 0xa9, /* stp x16, x30, [sp, #-16]! */
230 0x10, 0x00, 0x00, 0x90, /* adrp x16, (GOT+16) */
231 0x11, 0x0A, 0x40, 0xf9, /* ldr x17, [x16, #PLT_GOT+0x10] */
232 0x10, 0x42, 0x00, 0x91, /* add x16, x16,#PLT_GOT+0x10 */
233 0x20, 0x02, 0x1f, 0xd6, /* br x17 */
234 0x1f, 0x20, 0x03, 0xd5, /* nop */
235 0x1f, 0x20, 0x03, 0xd5, /* nop */
236 0x1f, 0x20, 0x03, 0xd5, /* nop */
239 /* Per function entry in a procedure linkage table looks like this
240 if the distance between the PLTGOT and the PLT is < 4GB use
241 these PLT entries. */
242 static const bfd_byte elf64_aarch64_small_plt_entry
[PLT_SMALL_ENTRY_SIZE
] =
244 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */
245 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */
246 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */
247 0x20, 0x02, 0x1f, 0xd6, /* br x17. */
250 static const bfd_byte
251 elf64_aarch64_tlsdesc_small_plt_entry
[PLT_TLSDESC_ENTRY_SIZE
] =
253 0xe2, 0x0f, 0xbf, 0xa9, /* stp x2, x3, [sp, #-16]! */
254 0x02, 0x00, 0x00, 0x90, /* adrp x2, 0 */
255 0x03, 0x00, 0x00, 0x90, /* adrp x3, 0 */
256 0x42, 0x08, 0x40, 0xF9, /* ldr x2, [x2, #0] */
257 0x63, 0x00, 0x00, 0x91, /* add x3, x3, 0 */
258 0x40, 0x00, 0x1F, 0xD6, /* br x2 */
259 0x1f, 0x20, 0x03, 0xd5, /* nop */
260 0x1f, 0x20, 0x03, 0xd5, /* nop */
263 #define elf_info_to_howto elf64_aarch64_info_to_howto
264 #define elf_info_to_howto_rel elf64_aarch64_info_to_howto
266 #define AARCH64_ELF_ABI_VERSION 0
267 #define AARCH64_ELF_OS_ABI_VERSION 0
269 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
270 #define ALL_ONES (~ (bfd_vma) 0)
272 static reloc_howto_type elf64_aarch64_howto_none
=
273 HOWTO (R_AARCH64_NONE
, /* type */
275 0, /* size (0 = byte, 1 = short, 2 = long) */
277 FALSE
, /* pc_relative */
279 complain_overflow_dont
,/* complain_on_overflow */
280 bfd_elf_generic_reloc
, /* special_function */
281 "R_AARCH64_NONE", /* name */
282 FALSE
, /* partial_inplace */
285 FALSE
); /* pcrel_offset */
287 static reloc_howto_type elf64_aarch64_howto_dynrelocs
[] =
289 HOWTO (R_AARCH64_COPY
, /* type */
291 2, /* size (0 = byte, 1 = short, 2 = long) */
293 FALSE
, /* pc_relative */
295 complain_overflow_bitfield
, /* complain_on_overflow */
296 bfd_elf_generic_reloc
, /* special_function */
297 "R_AARCH64_COPY", /* name */
298 TRUE
, /* partial_inplace */
299 0xffffffff, /* src_mask */
300 0xffffffff, /* dst_mask */
301 FALSE
), /* pcrel_offset */
303 HOWTO (R_AARCH64_GLOB_DAT
, /* type */
305 2, /* size (0 = byte, 1 = short, 2 = long) */
307 FALSE
, /* pc_relative */
309 complain_overflow_bitfield
, /* complain_on_overflow */
310 bfd_elf_generic_reloc
, /* special_function */
311 "R_AARCH64_GLOB_DAT", /* name */
312 TRUE
, /* partial_inplace */
313 0xffffffff, /* src_mask */
314 0xffffffff, /* dst_mask */
315 FALSE
), /* pcrel_offset */
317 HOWTO (R_AARCH64_JUMP_SLOT
, /* type */
319 2, /* size (0 = byte, 1 = short, 2 = long) */
321 FALSE
, /* pc_relative */
323 complain_overflow_bitfield
, /* complain_on_overflow */
324 bfd_elf_generic_reloc
, /* special_function */
325 "R_AARCH64_JUMP_SLOT", /* name */
326 TRUE
, /* partial_inplace */
327 0xffffffff, /* src_mask */
328 0xffffffff, /* dst_mask */
329 FALSE
), /* pcrel_offset */
331 HOWTO (R_AARCH64_RELATIVE
, /* type */
333 2, /* size (0 = byte, 1 = short, 2 = long) */
335 FALSE
, /* pc_relative */
337 complain_overflow_bitfield
, /* complain_on_overflow */
338 bfd_elf_generic_reloc
, /* special_function */
339 "R_AARCH64_RELATIVE", /* name */
340 TRUE
, /* partial_inplace */
341 ALL_ONES
, /* src_mask */
342 ALL_ONES
, /* dst_mask */
343 FALSE
), /* pcrel_offset */
345 HOWTO (R_AARCH64_TLS_DTPMOD64
, /* type */
347 2, /* size (0 = byte, 1 = short, 2 = long) */
349 FALSE
, /* pc_relative */
351 complain_overflow_dont
, /* complain_on_overflow */
352 bfd_elf_generic_reloc
, /* special_function */
353 "R_AARCH64_TLS_DTPMOD64", /* name */
354 FALSE
, /* partial_inplace */
356 ALL_ONES
, /* dst_mask */
357 FALSE
), /* pc_reloffset */
359 HOWTO (R_AARCH64_TLS_DTPREL64
, /* type */
361 2, /* size (0 = byte, 1 = short, 2 = long) */
363 FALSE
, /* pc_relative */
365 complain_overflow_dont
, /* complain_on_overflow */
366 bfd_elf_generic_reloc
, /* special_function */
367 "R_AARCH64_TLS_DTPREL64", /* name */
368 FALSE
, /* partial_inplace */
370 ALL_ONES
, /* dst_mask */
371 FALSE
), /* pcrel_offset */
373 HOWTO (R_AARCH64_TLS_TPREL64
, /* type */
375 2, /* size (0 = byte, 1 = short, 2 = long) */
377 FALSE
, /* pc_relative */
379 complain_overflow_dont
, /* complain_on_overflow */
380 bfd_elf_generic_reloc
, /* special_function */
381 "R_AARCH64_TLS_TPREL64", /* name */
382 FALSE
, /* partial_inplace */
384 ALL_ONES
, /* dst_mask */
385 FALSE
), /* pcrel_offset */
387 HOWTO (R_AARCH64_TLSDESC
, /* type */
389 2, /* size (0 = byte, 1 = short, 2 = long) */
391 FALSE
, /* pc_relative */
393 complain_overflow_dont
, /* complain_on_overflow */
394 bfd_elf_generic_reloc
, /* special_function */
395 "R_AARCH64_TLSDESC", /* name */
396 FALSE
, /* partial_inplace */
398 ALL_ONES
, /* dst_mask */
399 FALSE
), /* pcrel_offset */
403 /* Note: code such as elf64_aarch64_reloc_type_lookup expect to use e.g.
404 R_AARCH64_PREL64 as an index into this, and find the R_AARCH64_PREL64 HOWTO
407 static reloc_howto_type elf64_aarch64_howto_table
[] =
409 /* Basic data relocations. */
411 HOWTO (R_AARCH64_NULL
, /* type */
413 0, /* size (0 = byte, 1 = short, 2 = long) */
415 FALSE
, /* pc_relative */
417 complain_overflow_dont
, /* complain_on_overflow */
418 bfd_elf_generic_reloc
, /* special_function */
419 "R_AARCH64_NULL", /* name */
420 FALSE
, /* partial_inplace */
423 FALSE
), /* pcrel_offset */
426 HOWTO (R_AARCH64_ABS64
, /* type */
428 4, /* size (4 = long long) */
430 FALSE
, /* pc_relative */
432 complain_overflow_unsigned
, /* complain_on_overflow */
433 bfd_elf_generic_reloc
, /* special_function */
434 "R_AARCH64_ABS64", /* name */
435 FALSE
, /* partial_inplace */
436 ALL_ONES
, /* src_mask */
437 ALL_ONES
, /* dst_mask */
438 FALSE
), /* pcrel_offset */
441 HOWTO (R_AARCH64_ABS32
, /* type */
443 2, /* size (0 = byte, 1 = short, 2 = long) */
445 FALSE
, /* pc_relative */
447 complain_overflow_unsigned
, /* complain_on_overflow */
448 bfd_elf_generic_reloc
, /* special_function */
449 "R_AARCH64_ABS32", /* name */
450 FALSE
, /* partial_inplace */
451 0xffffffff, /* src_mask */
452 0xffffffff, /* dst_mask */
453 FALSE
), /* pcrel_offset */
456 HOWTO (R_AARCH64_ABS16
, /* type */
458 1, /* size (0 = byte, 1 = short, 2 = long) */
460 FALSE
, /* pc_relative */
462 complain_overflow_unsigned
, /* complain_on_overflow */
463 bfd_elf_generic_reloc
, /* special_function */
464 "R_AARCH64_ABS16", /* name */
465 FALSE
, /* partial_inplace */
466 0xffff, /* src_mask */
467 0xffff, /* dst_mask */
468 FALSE
), /* pcrel_offset */
470 /* .xword: (S+A-P) */
471 HOWTO (R_AARCH64_PREL64
, /* type */
473 4, /* size (4 = long long) */
475 TRUE
, /* pc_relative */
477 complain_overflow_signed
, /* complain_on_overflow */
478 bfd_elf_generic_reloc
, /* special_function */
479 "R_AARCH64_PREL64", /* name */
480 FALSE
, /* partial_inplace */
481 ALL_ONES
, /* src_mask */
482 ALL_ONES
, /* dst_mask */
483 TRUE
), /* pcrel_offset */
486 HOWTO (R_AARCH64_PREL32
, /* type */
488 2, /* size (0 = byte, 1 = short, 2 = long) */
490 TRUE
, /* pc_relative */
492 complain_overflow_signed
, /* complain_on_overflow */
493 bfd_elf_generic_reloc
, /* special_function */
494 "R_AARCH64_PREL32", /* name */
495 FALSE
, /* partial_inplace */
496 0xffffffff, /* src_mask */
497 0xffffffff, /* dst_mask */
498 TRUE
), /* pcrel_offset */
501 HOWTO (R_AARCH64_PREL16
, /* type */
503 1, /* size (0 = byte, 1 = short, 2 = long) */
505 TRUE
, /* pc_relative */
507 complain_overflow_signed
, /* complain_on_overflow */
508 bfd_elf_generic_reloc
, /* special_function */
509 "R_AARCH64_PREL16", /* name */
510 FALSE
, /* partial_inplace */
511 0xffff, /* src_mask */
512 0xffff, /* dst_mask */
513 TRUE
), /* pcrel_offset */
515 /* Group relocations to create a 16, 32, 48 or 64 bit
516 unsigned data or abs address inline. */
518 /* MOVZ: ((S+A) >> 0) & 0xffff */
519 HOWTO (R_AARCH64_MOVW_UABS_G0
, /* type */
521 2, /* size (0 = byte, 1 = short, 2 = long) */
523 FALSE
, /* pc_relative */
525 complain_overflow_unsigned
, /* complain_on_overflow */
526 bfd_elf_generic_reloc
, /* special_function */
527 "R_AARCH64_MOVW_UABS_G0", /* name */
528 FALSE
, /* partial_inplace */
529 0xffff, /* src_mask */
530 0xffff, /* dst_mask */
531 FALSE
), /* pcrel_offset */
533 /* MOVK: ((S+A) >> 0) & 0xffff [no overflow check] */
534 HOWTO (R_AARCH64_MOVW_UABS_G0_NC
, /* type */
536 2, /* size (0 = byte, 1 = short, 2 = long) */
538 FALSE
, /* pc_relative */
540 complain_overflow_dont
, /* complain_on_overflow */
541 bfd_elf_generic_reloc
, /* special_function */
542 "R_AARCH64_MOVW_UABS_G0_NC", /* name */
543 FALSE
, /* partial_inplace */
544 0xffff, /* src_mask */
545 0xffff, /* dst_mask */
546 FALSE
), /* pcrel_offset */
548 /* MOVZ: ((S+A) >> 16) & 0xffff */
549 HOWTO (R_AARCH64_MOVW_UABS_G1
, /* type */
551 2, /* size (0 = byte, 1 = short, 2 = long) */
553 FALSE
, /* pc_relative */
555 complain_overflow_unsigned
, /* complain_on_overflow */
556 bfd_elf_generic_reloc
, /* special_function */
557 "R_AARCH64_MOVW_UABS_G1", /* name */
558 FALSE
, /* partial_inplace */
559 0xffff, /* src_mask */
560 0xffff, /* dst_mask */
561 FALSE
), /* pcrel_offset */
563 /* MOVK: ((S+A) >> 16) & 0xffff [no overflow check] */
564 HOWTO (R_AARCH64_MOVW_UABS_G1_NC
, /* type */
566 2, /* size (0 = byte, 1 = short, 2 = long) */
568 FALSE
, /* pc_relative */
570 complain_overflow_dont
, /* complain_on_overflow */
571 bfd_elf_generic_reloc
, /* special_function */
572 "R_AARCH64_MOVW_UABS_G1_NC", /* name */
573 FALSE
, /* partial_inplace */
574 0xffff, /* src_mask */
575 0xffff, /* dst_mask */
576 FALSE
), /* pcrel_offset */
578 /* MOVZ: ((S+A) >> 32) & 0xffff */
579 HOWTO (R_AARCH64_MOVW_UABS_G2
, /* type */
581 2, /* size (0 = byte, 1 = short, 2 = long) */
583 FALSE
, /* pc_relative */
585 complain_overflow_unsigned
, /* complain_on_overflow */
586 bfd_elf_generic_reloc
, /* special_function */
587 "R_AARCH64_MOVW_UABS_G2", /* name */
588 FALSE
, /* partial_inplace */
589 0xffff, /* src_mask */
590 0xffff, /* dst_mask */
591 FALSE
), /* pcrel_offset */
593 /* MOVK: ((S+A) >> 32) & 0xffff [no overflow check] */
594 HOWTO (R_AARCH64_MOVW_UABS_G2_NC
, /* type */
596 2, /* size (0 = byte, 1 = short, 2 = long) */
598 FALSE
, /* pc_relative */
600 complain_overflow_dont
, /* complain_on_overflow */
601 bfd_elf_generic_reloc
, /* special_function */
602 "R_AARCH64_MOVW_UABS_G2_NC", /* name */
603 FALSE
, /* partial_inplace */
604 0xffff, /* src_mask */
605 0xffff, /* dst_mask */
606 FALSE
), /* pcrel_offset */
608 /* MOVZ: ((S+A) >> 48) & 0xffff */
609 HOWTO (R_AARCH64_MOVW_UABS_G3
, /* type */
611 2, /* size (0 = byte, 1 = short, 2 = long) */
613 FALSE
, /* pc_relative */
615 complain_overflow_unsigned
, /* complain_on_overflow */
616 bfd_elf_generic_reloc
, /* special_function */
617 "R_AARCH64_MOVW_UABS_G3", /* name */
618 FALSE
, /* partial_inplace */
619 0xffff, /* src_mask */
620 0xffff, /* dst_mask */
621 FALSE
), /* pcrel_offset */
623 /* Group relocations to create high part of a 16, 32, 48 or 64 bit
624 signed data or abs address inline. Will change instruction
625 to MOVN or MOVZ depending on sign of calculated value. */
627 /* MOV[ZN]: ((S+A) >> 0) & 0xffff */
628 HOWTO (R_AARCH64_MOVW_SABS_G0
, /* type */
630 2, /* size (0 = byte, 1 = short, 2 = long) */
632 FALSE
, /* pc_relative */
634 complain_overflow_signed
, /* complain_on_overflow */
635 bfd_elf_generic_reloc
, /* special_function */
636 "R_AARCH64_MOVW_SABS_G0", /* name */
637 FALSE
, /* partial_inplace */
638 0xffff, /* src_mask */
639 0xffff, /* dst_mask */
640 FALSE
), /* pcrel_offset */
642 /* MOV[ZN]: ((S+A) >> 16) & 0xffff */
643 HOWTO (R_AARCH64_MOVW_SABS_G1
, /* type */
645 2, /* size (0 = byte, 1 = short, 2 = long) */
647 FALSE
, /* pc_relative */
649 complain_overflow_signed
, /* complain_on_overflow */
650 bfd_elf_generic_reloc
, /* special_function */
651 "R_AARCH64_MOVW_SABS_G1", /* name */
652 FALSE
, /* partial_inplace */
653 0xffff, /* src_mask */
654 0xffff, /* dst_mask */
655 FALSE
), /* pcrel_offset */
657 /* MOV[ZN]: ((S+A) >> 32) & 0xffff */
658 HOWTO (R_AARCH64_MOVW_SABS_G2
, /* type */
660 2, /* size (0 = byte, 1 = short, 2 = long) */
662 FALSE
, /* pc_relative */
664 complain_overflow_signed
, /* complain_on_overflow */
665 bfd_elf_generic_reloc
, /* special_function */
666 "R_AARCH64_MOVW_SABS_G2", /* name */
667 FALSE
, /* partial_inplace */
668 0xffff, /* src_mask */
669 0xffff, /* dst_mask */
670 FALSE
), /* pcrel_offset */
672 /* Relocations to generate 19, 21 and 33 bit PC-relative load/store
673 addresses: PG(x) is (x & ~0xfff). */
675 /* LD-lit: ((S+A-P) >> 2) & 0x7ffff */
676 HOWTO (R_AARCH64_LD_PREL_LO19
, /* type */
678 2, /* size (0 = byte, 1 = short, 2 = long) */
680 TRUE
, /* pc_relative */
682 complain_overflow_signed
, /* complain_on_overflow */
683 bfd_elf_generic_reloc
, /* special_function */
684 "R_AARCH64_LD_PREL_LO19", /* name */
685 FALSE
, /* partial_inplace */
686 0x7ffff, /* src_mask */
687 0x7ffff, /* dst_mask */
688 TRUE
), /* pcrel_offset */
690 /* ADR: (S+A-P) & 0x1fffff */
691 HOWTO (R_AARCH64_ADR_PREL_LO21
, /* type */
693 2, /* size (0 = byte, 1 = short, 2 = long) */
695 TRUE
, /* pc_relative */
697 complain_overflow_signed
, /* complain_on_overflow */
698 bfd_elf_generic_reloc
, /* special_function */
699 "R_AARCH64_ADR_PREL_LO21", /* name */
700 FALSE
, /* partial_inplace */
701 0x1fffff, /* src_mask */
702 0x1fffff, /* dst_mask */
703 TRUE
), /* pcrel_offset */
705 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
706 HOWTO (R_AARCH64_ADR_PREL_PG_HI21
, /* type */
708 2, /* size (0 = byte, 1 = short, 2 = long) */
710 TRUE
, /* pc_relative */
712 complain_overflow_signed
, /* complain_on_overflow */
713 bfd_elf_generic_reloc
, /* special_function */
714 "R_AARCH64_ADR_PREL_PG_HI21", /* name */
715 FALSE
, /* partial_inplace */
716 0x1fffff, /* src_mask */
717 0x1fffff, /* dst_mask */
718 TRUE
), /* pcrel_offset */
720 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff [no overflow check] */
721 HOWTO (R_AARCH64_ADR_PREL_PG_HI21_NC
, /* type */
723 2, /* size (0 = byte, 1 = short, 2 = long) */
725 TRUE
, /* pc_relative */
727 complain_overflow_dont
, /* complain_on_overflow */
728 bfd_elf_generic_reloc
, /* special_function */
729 "R_AARCH64_ADR_PREL_PG_HI21_NC", /* name */
730 FALSE
, /* partial_inplace */
731 0x1fffff, /* src_mask */
732 0x1fffff, /* dst_mask */
733 TRUE
), /* pcrel_offset */
735 /* ADD: (S+A) & 0xfff [no overflow check] */
736 HOWTO (R_AARCH64_ADD_ABS_LO12_NC
, /* type */
738 2, /* size (0 = byte, 1 = short, 2 = long) */
740 FALSE
, /* pc_relative */
742 complain_overflow_dont
, /* complain_on_overflow */
743 bfd_elf_generic_reloc
, /* special_function */
744 "R_AARCH64_ADD_ABS_LO12_NC", /* name */
745 FALSE
, /* partial_inplace */
746 0x3ffc00, /* src_mask */
747 0x3ffc00, /* dst_mask */
748 FALSE
), /* pcrel_offset */
750 /* LD/ST8: (S+A) & 0xfff */
751 HOWTO (R_AARCH64_LDST8_ABS_LO12_NC
, /* type */
753 2, /* size (0 = byte, 1 = short, 2 = long) */
755 FALSE
, /* pc_relative */
757 complain_overflow_dont
, /* complain_on_overflow */
758 bfd_elf_generic_reloc
, /* special_function */
759 "R_AARCH64_LDST8_ABS_LO12_NC", /* name */
760 FALSE
, /* partial_inplace */
761 0xfff, /* src_mask */
762 0xfff, /* dst_mask */
763 FALSE
), /* pcrel_offset */
765 /* Relocations for control-flow instructions. */
767 /* TBZ/NZ: ((S+A-P) >> 2) & 0x3fff */
768 HOWTO (R_AARCH64_TSTBR14
, /* type */
770 2, /* size (0 = byte, 1 = short, 2 = long) */
772 TRUE
, /* pc_relative */
774 complain_overflow_signed
, /* complain_on_overflow */
775 bfd_elf_generic_reloc
, /* special_function */
776 "R_AARCH64_TSTBR14", /* name */
777 FALSE
, /* partial_inplace */
778 0x3fff, /* src_mask */
779 0x3fff, /* dst_mask */
780 TRUE
), /* pcrel_offset */
782 /* B.cond: ((S+A-P) >> 2) & 0x7ffff */
783 HOWTO (R_AARCH64_CONDBR19
, /* type */
785 2, /* size (0 = byte, 1 = short, 2 = long) */
787 TRUE
, /* pc_relative */
789 complain_overflow_signed
, /* complain_on_overflow */
790 bfd_elf_generic_reloc
, /* special_function */
791 "R_AARCH64_CONDBR19", /* name */
792 FALSE
, /* partial_inplace */
793 0x7ffff, /* src_mask */
794 0x7ffff, /* dst_mask */
795 TRUE
), /* pcrel_offset */
799 /* B: ((S+A-P) >> 2) & 0x3ffffff */
800 HOWTO (R_AARCH64_JUMP26
, /* type */
802 2, /* size (0 = byte, 1 = short, 2 = long) */
804 TRUE
, /* pc_relative */
806 complain_overflow_signed
, /* complain_on_overflow */
807 bfd_elf_generic_reloc
, /* special_function */
808 "R_AARCH64_JUMP26", /* name */
809 FALSE
, /* partial_inplace */
810 0x3ffffff, /* src_mask */
811 0x3ffffff, /* dst_mask */
812 TRUE
), /* pcrel_offset */
814 /* BL: ((S+A-P) >> 2) & 0x3ffffff */
815 HOWTO (R_AARCH64_CALL26
, /* type */
817 2, /* size (0 = byte, 1 = short, 2 = long) */
819 TRUE
, /* pc_relative */
821 complain_overflow_signed
, /* complain_on_overflow */
822 bfd_elf_generic_reloc
, /* special_function */
823 "R_AARCH64_CALL26", /* name */
824 FALSE
, /* partial_inplace */
825 0x3ffffff, /* src_mask */
826 0x3ffffff, /* dst_mask */
827 TRUE
), /* pcrel_offset */
829 /* LD/ST16: (S+A) & 0xffe */
830 HOWTO (R_AARCH64_LDST16_ABS_LO12_NC
, /* type */
832 2, /* size (0 = byte, 1 = short, 2 = long) */
834 FALSE
, /* pc_relative */
836 complain_overflow_dont
, /* complain_on_overflow */
837 bfd_elf_generic_reloc
, /* special_function */
838 "R_AARCH64_LDST16_ABS_LO12_NC", /* name */
839 FALSE
, /* partial_inplace */
840 0xffe, /* src_mask */
841 0xffe, /* dst_mask */
842 FALSE
), /* pcrel_offset */
844 /* LD/ST32: (S+A) & 0xffc */
845 HOWTO (R_AARCH64_LDST32_ABS_LO12_NC
, /* type */
847 2, /* size (0 = byte, 1 = short, 2 = long) */
849 FALSE
, /* pc_relative */
851 complain_overflow_dont
, /* complain_on_overflow */
852 bfd_elf_generic_reloc
, /* special_function */
853 "R_AARCH64_LDST32_ABS_LO12_NC", /* name */
854 FALSE
, /* partial_inplace */
855 0xffc, /* src_mask */
856 0xffc, /* dst_mask */
857 FALSE
), /* pcrel_offset */
859 /* LD/ST64: (S+A) & 0xff8 */
860 HOWTO (R_AARCH64_LDST64_ABS_LO12_NC
, /* type */
862 2, /* size (0 = byte, 1 = short, 2 = long) */
864 FALSE
, /* pc_relative */
866 complain_overflow_dont
, /* complain_on_overflow */
867 bfd_elf_generic_reloc
, /* special_function */
868 "R_AARCH64_LDST64_ABS_LO12_NC", /* name */
869 FALSE
, /* partial_inplace */
870 0xff8, /* src_mask */
871 0xff8, /* dst_mask */
872 FALSE
), /* pcrel_offset */
887 /* LD/ST128: (S+A) & 0xff0 */
888 HOWTO (R_AARCH64_LDST128_ABS_LO12_NC
, /* type */
890 2, /* size (0 = byte, 1 = short, 2 = long) */
892 FALSE
, /* pc_relative */
894 complain_overflow_dont
, /* complain_on_overflow */
895 bfd_elf_generic_reloc
, /* special_function */
896 "R_AARCH64_LDST128_ABS_LO12_NC", /* name */
897 FALSE
, /* partial_inplace */
898 0xff0, /* src_mask */
899 0xff0, /* dst_mask */
900 FALSE
), /* pcrel_offset */
914 /* Get to the page for the GOT entry for the symbol
915 (G(S) - P) using an ADRP instruction. */
916 HOWTO (R_AARCH64_ADR_GOT_PAGE
, /* type */
918 2, /* size (0 = byte, 1 = short, 2 = long) */
920 TRUE
, /* pc_relative */
922 complain_overflow_dont
, /* complain_on_overflow */
923 bfd_elf_generic_reloc
, /* special_function */
924 "R_AARCH64_ADR_GOT_PAGE", /* name */
925 FALSE
, /* partial_inplace */
926 0x1fffff, /* src_mask */
927 0x1fffff, /* dst_mask */
928 TRUE
), /* pcrel_offset */
930 /* LD64: GOT offset G(S) & 0xff8 */
931 HOWTO (R_AARCH64_LD64_GOT_LO12_NC
, /* type */
933 2, /* size (0 = byte, 1 = short, 2 = long) */
935 FALSE
, /* pc_relative */
937 complain_overflow_dont
, /* complain_on_overflow */
938 bfd_elf_generic_reloc
, /* special_function */
939 "R_AARCH64_LD64_GOT_LO12_NC", /* name */
940 FALSE
, /* partial_inplace */
941 0xff8, /* src_mask */
942 0xff8, /* dst_mask */
943 FALSE
) /* pcrel_offset */
946 static reloc_howto_type elf64_aarch64_tls_howto_table
[] =
950 /* Get to the page for the GOT entry for the symbol
951 (G(S) - P) using an ADRP instruction. */
952 HOWTO (R_AARCH64_TLSGD_ADR_PAGE21
, /* type */
954 2, /* size (0 = byte, 1 = short, 2 = long) */
956 TRUE
, /* pc_relative */
958 complain_overflow_dont
, /* complain_on_overflow */
959 bfd_elf_generic_reloc
, /* special_function */
960 "R_AARCH64_TLSGD_ADR_PAGE21", /* name */
961 FALSE
, /* partial_inplace */
962 0x1fffff, /* src_mask */
963 0x1fffff, /* dst_mask */
964 TRUE
), /* pcrel_offset */
966 /* ADD: GOT offset G(S) & 0xff8 [no overflow check] */
967 HOWTO (R_AARCH64_TLSGD_ADD_LO12_NC
, /* type */
969 2, /* size (0 = byte, 1 = short, 2 = long) */
971 FALSE
, /* pc_relative */
973 complain_overflow_dont
, /* complain_on_overflow */
974 bfd_elf_generic_reloc
, /* special_function */
975 "R_AARCH64_TLSGD_ADD_LO12_NC", /* name */
976 FALSE
, /* partial_inplace */
977 0xfff, /* src_mask */
978 0xfff, /* dst_mask */
979 FALSE
), /* pcrel_offset */
1006 HOWTO (R_AARCH64_TLSIE_MOVW_GOTTPREL_G1
, /* type */
1007 16, /* rightshift */
1008 2, /* size (0 = byte, 1 = short, 2 = long) */
1010 FALSE
, /* pc_relative */
1012 complain_overflow_dont
, /* complain_on_overflow */
1013 bfd_elf_generic_reloc
, /* special_function */
1014 "R_AARCH64_TLSIE_MOVW_GOTTPREL_G1", /* name */
1015 FALSE
, /* partial_inplace */
1016 0xffff, /* src_mask */
1017 0xffff, /* dst_mask */
1018 FALSE
), /* pcrel_offset */
1020 HOWTO (R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC
, /* type */
1022 2, /* size (0 = byte, 1 = short, 2 = long) */
1024 FALSE
, /* pc_relative */
1026 complain_overflow_dont
, /* complain_on_overflow */
1027 bfd_elf_generic_reloc
, /* special_function */
1028 "R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC", /* name */
1029 FALSE
, /* partial_inplace */
1030 0xffff, /* src_mask */
1031 0xffff, /* dst_mask */
1032 FALSE
), /* pcrel_offset */
1034 HOWTO (R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21
, /* type */
1035 12, /* rightshift */
1036 2, /* size (0 = byte, 1 = short, 2 = long) */
1038 FALSE
, /* pc_relative */
1040 complain_overflow_dont
, /* complain_on_overflow */
1041 bfd_elf_generic_reloc
, /* special_function */
1042 "R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21", /* name */
1043 FALSE
, /* partial_inplace */
1044 0x1fffff, /* src_mask */
1045 0x1fffff, /* dst_mask */
1046 FALSE
), /* pcrel_offset */
1048 HOWTO (R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC
, /* type */
1050 2, /* size (0 = byte, 1 = short, 2 = long) */
1052 FALSE
, /* pc_relative */
1054 complain_overflow_dont
, /* complain_on_overflow */
1055 bfd_elf_generic_reloc
, /* special_function */
1056 "R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC", /* name */
1057 FALSE
, /* partial_inplace */
1058 0xff8, /* src_mask */
1059 0xff8, /* dst_mask */
1060 FALSE
), /* pcrel_offset */
1062 HOWTO (R_AARCH64_TLSIE_LD_GOTTPREL_PREL19
, /* type */
1064 2, /* size (0 = byte, 1 = short, 2 = long) */
1066 FALSE
, /* pc_relative */
1068 complain_overflow_dont
, /* complain_on_overflow */
1069 bfd_elf_generic_reloc
, /* special_function */
1070 "R_AARCH64_TLSIE_LD_GOTTPREL_PREL19", /* name */
1071 FALSE
, /* partial_inplace */
1072 0x1ffffc, /* src_mask */
1073 0x1ffffc, /* dst_mask */
1074 FALSE
), /* pcrel_offset */
1076 HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G2
, /* type */
1078 2, /* size (0 = byte, 1 = short, 2 = long) */
1080 FALSE
, /* pc_relative */
1082 complain_overflow_dont
, /* complain_on_overflow */
1083 bfd_elf_generic_reloc
, /* special_function */
1084 "R_AARCH64_TLSLE_MOVW_TPREL_G2", /* name */
1085 FALSE
, /* partial_inplace */
1086 0xffff, /* src_mask */
1087 0xffff, /* dst_mask */
1088 FALSE
), /* pcrel_offset */
1090 HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G1
, /* type */
1092 2, /* size (0 = byte, 1 = short, 2 = long) */
1094 FALSE
, /* pc_relative */
1096 complain_overflow_dont
, /* complain_on_overflow */
1097 bfd_elf_generic_reloc
, /* special_function */
1098 "R_AARCH64_TLSLE_MOVW_TPREL_G1", /* name */
1099 FALSE
, /* partial_inplace */
1100 0xffff, /* src_mask */
1101 0xffff, /* dst_mask */
1102 FALSE
), /* pcrel_offset */
1104 HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G1_NC
, /* type */
1106 2, /* size (0 = byte, 1 = short, 2 = long) */
1108 FALSE
, /* pc_relative */
1110 complain_overflow_dont
, /* complain_on_overflow */
1111 bfd_elf_generic_reloc
, /* special_function */
1112 "R_AARCH64_TLSLE_MOVW_TPREL_G1_NC", /* name */
1113 FALSE
, /* partial_inplace */
1114 0xffff, /* src_mask */
1115 0xffff, /* dst_mask */
1116 FALSE
), /* pcrel_offset */
1118 HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G0
, /* type */
1120 2, /* size (0 = byte, 1 = short, 2 = long) */
1122 FALSE
, /* pc_relative */
1124 complain_overflow_dont
, /* complain_on_overflow */
1125 bfd_elf_generic_reloc
, /* special_function */
1126 "R_AARCH64_TLSLE_MOVW_TPREL_G0", /* name */
1127 FALSE
, /* partial_inplace */
1128 0xffff, /* src_mask */
1129 0xffff, /* dst_mask */
1130 FALSE
), /* pcrel_offset */
1132 HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G0_NC
, /* type */
1134 2, /* size (0 = byte, 1 = short, 2 = long) */
1136 FALSE
, /* pc_relative */
1138 complain_overflow_dont
, /* complain_on_overflow */
1139 bfd_elf_generic_reloc
, /* special_function */
1140 "R_AARCH64_TLSLE_MOVW_TPREL_G0_NC", /* name */
1141 FALSE
, /* partial_inplace */
1142 0xffff, /* src_mask */
1143 0xffff, /* dst_mask */
1144 FALSE
), /* pcrel_offset */
1146 HOWTO (R_AARCH64_TLSLE_ADD_TPREL_HI12
, /* type */
1148 2, /* size (0 = byte, 1 = short, 2 = long) */
1150 FALSE
, /* pc_relative */
1152 complain_overflow_dont
, /* complain_on_overflow */
1153 bfd_elf_generic_reloc
, /* special_function */
1154 "R_AARCH64_TLSLE_ADD_TPREL_HI12", /* name */
1155 FALSE
, /* partial_inplace */
1156 0xfff, /* src_mask */
1157 0xfff, /* dst_mask */
1158 FALSE
), /* pcrel_offset */
1160 HOWTO (R_AARCH64_TLSLE_ADD_TPREL_LO12
, /* type */
1162 2, /* size (0 = byte, 1 = short, 2 = long) */
1164 FALSE
, /* pc_relative */
1166 complain_overflow_dont
, /* complain_on_overflow */
1167 bfd_elf_generic_reloc
, /* special_function */
1168 "R_AARCH64_TLSLE_ADD_TPREL_LO12", /* name */
1169 FALSE
, /* partial_inplace */
1170 0xfff, /* src_mask */
1171 0xfff, /* dst_mask */
1172 FALSE
), /* pcrel_offset */
1174 HOWTO (R_AARCH64_TLSLE_ADD_TPREL_LO12_NC
, /* type */
1176 2, /* size (0 = byte, 1 = short, 2 = long) */
1178 FALSE
, /* pc_relative */
1180 complain_overflow_dont
, /* complain_on_overflow */
1181 bfd_elf_generic_reloc
, /* special_function */
1182 "R_AARCH64_TLSLE_ADD_TPREL_LO12_NC", /* name */
1183 FALSE
, /* partial_inplace */
1184 0xfff, /* src_mask */
1185 0xfff, /* dst_mask */
1186 FALSE
), /* pcrel_offset */
1189 static reloc_howto_type elf64_aarch64_tlsdesc_howto_table
[] =
1191 HOWTO (R_AARCH64_TLSDESC_LD64_PREL19
, /* type */
1193 2, /* size (0 = byte, 1 = short, 2 = long) */
1195 TRUE
, /* pc_relative */
1197 complain_overflow_dont
, /* complain_on_overflow */
1198 bfd_elf_generic_reloc
, /* special_function */
1199 "R_AARCH64_TLSDESC_LD64_PREL19", /* name */
1200 FALSE
, /* partial_inplace */
1201 0x1ffffc, /* src_mask */
1202 0x1ffffc, /* dst_mask */
1203 TRUE
), /* pcrel_offset */
1205 HOWTO (R_AARCH64_TLSDESC_ADR_PREL21
, /* type */
1207 2, /* size (0 = byte, 1 = short, 2 = long) */
1209 TRUE
, /* pc_relative */
1211 complain_overflow_dont
, /* complain_on_overflow */
1212 bfd_elf_generic_reloc
, /* special_function */
1213 "R_AARCH64_TLSDESC_ADR_PREL21", /* name */
1214 FALSE
, /* partial_inplace */
1215 0x1fffff, /* src_mask */
1216 0x1fffff, /* dst_mask */
1217 TRUE
), /* pcrel_offset */
1219 /* Get to the page for the GOT entry for the symbol
1220 (G(S) - P) using an ADRP instruction. */
1221 HOWTO (R_AARCH64_TLSDESC_ADR_PAGE
, /* type */
1222 12, /* rightshift */
1223 2, /* size (0 = byte, 1 = short, 2 = long) */
1225 TRUE
, /* pc_relative */
1227 complain_overflow_dont
, /* complain_on_overflow */
1228 bfd_elf_generic_reloc
, /* special_function */
1229 "R_AARCH64_TLSDESC_ADR_PAGE", /* name */
1230 FALSE
, /* partial_inplace */
1231 0x1fffff, /* src_mask */
1232 0x1fffff, /* dst_mask */
1233 TRUE
), /* pcrel_offset */
1235 /* LD64: GOT offset G(S) & 0xfff. */
1236 HOWTO (R_AARCH64_TLSDESC_LD64_LO12_NC
, /* type */
1238 2, /* size (0 = byte, 1 = short, 2 = long) */
1240 FALSE
, /* pc_relative */
1242 complain_overflow_dont
, /* complain_on_overflow */
1243 bfd_elf_generic_reloc
, /* special_function */
1244 "R_AARCH64_TLSDESC_LD64_LO12_NC", /* name */
1245 FALSE
, /* partial_inplace */
1246 0xfff, /* src_mask */
1247 0xfff, /* dst_mask */
1248 FALSE
), /* pcrel_offset */
1250 /* ADD: GOT offset G(S) & 0xfff. */
1251 HOWTO (R_AARCH64_TLSDESC_ADD_LO12_NC
, /* type */
1253 2, /* size (0 = byte, 1 = short, 2 = long) */
1255 FALSE
, /* pc_relative */
1257 complain_overflow_dont
, /* complain_on_overflow */
1258 bfd_elf_generic_reloc
, /* special_function */
1259 "R_AARCH64_TLSDESC_ADD_LO12_NC", /* name */
1260 FALSE
, /* partial_inplace */
1261 0xfff, /* src_mask */
1262 0xfff, /* dst_mask */
1263 FALSE
), /* pcrel_offset */
1265 HOWTO (R_AARCH64_TLSDESC_OFF_G1
, /* type */
1267 2, /* size (0 = byte, 1 = short, 2 = long) */
1269 FALSE
, /* pc_relative */
1271 complain_overflow_dont
, /* complain_on_overflow */
1272 bfd_elf_generic_reloc
, /* special_function */
1273 "R_AARCH64_TLSDESC_OFF_G1", /* name */
1274 FALSE
, /* partial_inplace */
1275 0xffff, /* src_mask */
1276 0xffff, /* dst_mask */
1277 FALSE
), /* pcrel_offset */
1279 HOWTO (R_AARCH64_TLSDESC_OFF_G0_NC
, /* type */
1281 2, /* size (0 = byte, 1 = short, 2 = long) */
1283 FALSE
, /* pc_relative */
1285 complain_overflow_dont
, /* complain_on_overflow */
1286 bfd_elf_generic_reloc
, /* special_function */
1287 "R_AARCH64_TLSDESC_OFF_G0_NC", /* name */
1288 FALSE
, /* partial_inplace */
1289 0xffff, /* src_mask */
1290 0xffff, /* dst_mask */
1291 FALSE
), /* pcrel_offset */
1293 HOWTO (R_AARCH64_TLSDESC_LDR
, /* type */
1295 2, /* size (0 = byte, 1 = short, 2 = long) */
1297 FALSE
, /* pc_relative */
1299 complain_overflow_dont
, /* complain_on_overflow */
1300 bfd_elf_generic_reloc
, /* special_function */
1301 "R_AARCH64_TLSDESC_LDR", /* name */
1302 FALSE
, /* partial_inplace */
1305 FALSE
), /* pcrel_offset */
1307 HOWTO (R_AARCH64_TLSDESC_ADD
, /* type */
1309 2, /* size (0 = byte, 1 = short, 2 = long) */
1311 FALSE
, /* pc_relative */
1313 complain_overflow_dont
, /* complain_on_overflow */
1314 bfd_elf_generic_reloc
, /* special_function */
1315 "R_AARCH64_TLSDESC_ADD", /* name */
1316 FALSE
, /* partial_inplace */
1319 FALSE
), /* pcrel_offset */
1321 HOWTO (R_AARCH64_TLSDESC_CALL
, /* type */
1323 2, /* size (0 = byte, 1 = short, 2 = long) */
1325 FALSE
, /* pc_relative */
1327 complain_overflow_dont
, /* complain_on_overflow */
1328 bfd_elf_generic_reloc
, /* special_function */
1329 "R_AARCH64_TLSDESC_CALL", /* name */
1330 FALSE
, /* partial_inplace */
1333 FALSE
), /* pcrel_offset */
1336 static reloc_howto_type
*
1337 elf64_aarch64_howto_from_type (unsigned int r_type
)
1339 if (r_type
>= R_AARCH64_static_min
&& r_type
< R_AARCH64_static_max
)
1340 return &elf64_aarch64_howto_table
[r_type
- R_AARCH64_static_min
];
1342 if (r_type
>= R_AARCH64_tls_min
&& r_type
< R_AARCH64_tls_max
)
1343 return &elf64_aarch64_tls_howto_table
[r_type
- R_AARCH64_tls_min
];
1345 if (r_type
>= R_AARCH64_tlsdesc_min
&& r_type
< R_AARCH64_tlsdesc_max
)
1346 return &elf64_aarch64_tlsdesc_howto_table
[r_type
- R_AARCH64_tlsdesc_min
];
1348 if (r_type
>= R_AARCH64_dyn_min
&& r_type
< R_AARCH64_dyn_max
)
1349 return &elf64_aarch64_howto_dynrelocs
[r_type
- R_AARCH64_dyn_min
];
1353 case R_AARCH64_NONE
:
1354 return &elf64_aarch64_howto_none
;
1357 bfd_set_error (bfd_error_bad_value
);
1362 elf64_aarch64_info_to_howto (bfd
*abfd ATTRIBUTE_UNUSED
, arelent
*bfd_reloc
,
1363 Elf_Internal_Rela
*elf_reloc
)
1365 unsigned int r_type
;
1367 r_type
= ELF64_R_TYPE (elf_reloc
->r_info
);
1368 bfd_reloc
->howto
= elf64_aarch64_howto_from_type (r_type
);
1371 struct elf64_aarch64_reloc_map
1373 bfd_reloc_code_real_type bfd_reloc_val
;
1374 unsigned int elf_reloc_val
;
1377 /* All entries in this list must also be present in
1378 elf64_aarch64_howto_table. */
1379 static const struct elf64_aarch64_reloc_map elf64_aarch64_reloc_map
[] =
1381 {BFD_RELOC_NONE
, R_AARCH64_NONE
},
1383 /* Basic data relocations. */
1384 {BFD_RELOC_CTOR
, R_AARCH64_ABS64
},
1385 {BFD_RELOC_64
, R_AARCH64_ABS64
},
1386 {BFD_RELOC_32
, R_AARCH64_ABS32
},
1387 {BFD_RELOC_16
, R_AARCH64_ABS16
},
1388 {BFD_RELOC_64_PCREL
, R_AARCH64_PREL64
},
1389 {BFD_RELOC_32_PCREL
, R_AARCH64_PREL32
},
1390 {BFD_RELOC_16_PCREL
, R_AARCH64_PREL16
},
1392 /* Group relocations to low order bits of a 16, 32, 48 or 64 bit
1394 {BFD_RELOC_AARCH64_MOVW_G0_NC
, R_AARCH64_MOVW_UABS_G0_NC
},
1395 {BFD_RELOC_AARCH64_MOVW_G1_NC
, R_AARCH64_MOVW_UABS_G1_NC
},
1396 {BFD_RELOC_AARCH64_MOVW_G2_NC
, R_AARCH64_MOVW_UABS_G2_NC
},
1398 /* Group relocations to create high bits of a 16, 32, 48 or 64 bit
1399 signed value inline. */
1400 {BFD_RELOC_AARCH64_MOVW_G0_S
, R_AARCH64_MOVW_SABS_G0
},
1401 {BFD_RELOC_AARCH64_MOVW_G1_S
, R_AARCH64_MOVW_SABS_G1
},
1402 {BFD_RELOC_AARCH64_MOVW_G2_S
, R_AARCH64_MOVW_SABS_G2
},
1404 /* Group relocations to create high bits of a 16, 32, 48 or 64 bit
1405 unsigned value inline. */
1406 {BFD_RELOC_AARCH64_MOVW_G0
, R_AARCH64_MOVW_UABS_G0
},
1407 {BFD_RELOC_AARCH64_MOVW_G1
, R_AARCH64_MOVW_UABS_G1
},
1408 {BFD_RELOC_AARCH64_MOVW_G2
, R_AARCH64_MOVW_UABS_G2
},
1409 {BFD_RELOC_AARCH64_MOVW_G3
, R_AARCH64_MOVW_UABS_G3
},
1411 /* Relocations to generate 19, 21 and 33 bit PC-relative load/store. */
1412 {BFD_RELOC_AARCH64_LD_LO19_PCREL
, R_AARCH64_LD_PREL_LO19
},
1413 {BFD_RELOC_AARCH64_ADR_LO21_PCREL
, R_AARCH64_ADR_PREL_LO21
},
1414 {BFD_RELOC_AARCH64_ADR_HI21_PCREL
, R_AARCH64_ADR_PREL_PG_HI21
},
1415 {BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL
, R_AARCH64_ADR_PREL_PG_HI21_NC
},
1416 {BFD_RELOC_AARCH64_ADD_LO12
, R_AARCH64_ADD_ABS_LO12_NC
},
1417 {BFD_RELOC_AARCH64_LDST8_LO12
, R_AARCH64_LDST8_ABS_LO12_NC
},
1418 {BFD_RELOC_AARCH64_LDST16_LO12
, R_AARCH64_LDST16_ABS_LO12_NC
},
1419 {BFD_RELOC_AARCH64_LDST32_LO12
, R_AARCH64_LDST32_ABS_LO12_NC
},
1420 {BFD_RELOC_AARCH64_LDST64_LO12
, R_AARCH64_LDST64_ABS_LO12_NC
},
1421 {BFD_RELOC_AARCH64_LDST128_LO12
, R_AARCH64_LDST128_ABS_LO12_NC
},
1423 /* Relocations for control-flow instructions. */
1424 {BFD_RELOC_AARCH64_TSTBR14
, R_AARCH64_TSTBR14
},
1425 {BFD_RELOC_AARCH64_BRANCH19
, R_AARCH64_CONDBR19
},
1426 {BFD_RELOC_AARCH64_JUMP26
, R_AARCH64_JUMP26
},
1427 {BFD_RELOC_AARCH64_CALL26
, R_AARCH64_CALL26
},
1429 /* Relocations for PIC. */
1430 {BFD_RELOC_AARCH64_ADR_GOT_PAGE
, R_AARCH64_ADR_GOT_PAGE
},
1431 {BFD_RELOC_AARCH64_LD64_GOT_LO12_NC
, R_AARCH64_LD64_GOT_LO12_NC
},
1433 /* Relocations for TLS. */
1434 {BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21
, R_AARCH64_TLSGD_ADR_PAGE21
},
1435 {BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC
, R_AARCH64_TLSGD_ADD_LO12_NC
},
1436 {BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1
,
1437 R_AARCH64_TLSIE_MOVW_GOTTPREL_G1
},
1438 {BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC
,
1439 R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC
},
1440 {BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21
,
1441 R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21
},
1442 {BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC
,
1443 R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC
},
1444 {BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19
,
1445 R_AARCH64_TLSIE_LD_GOTTPREL_PREL19
},
1446 {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2
, R_AARCH64_TLSLE_MOVW_TPREL_G2
},
1447 {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1
, R_AARCH64_TLSLE_MOVW_TPREL_G1
},
1448 {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC
,
1449 R_AARCH64_TLSLE_MOVW_TPREL_G1_NC
},
1450 {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0
, R_AARCH64_TLSLE_MOVW_TPREL_G0
},
1451 {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
,
1452 R_AARCH64_TLSLE_MOVW_TPREL_G0_NC
},
1453 {BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12
, R_AARCH64_TLSLE_ADD_TPREL_LO12
},
1454 {BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12
, R_AARCH64_TLSLE_ADD_TPREL_HI12
},
1455 {BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC
,
1456 R_AARCH64_TLSLE_ADD_TPREL_LO12_NC
},
1457 {BFD_RELOC_AARCH64_TLSDESC_LD64_PREL19
, R_AARCH64_TLSDESC_LD64_PREL19
},
1458 {BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21
, R_AARCH64_TLSDESC_ADR_PREL21
},
1459 {BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE
, R_AARCH64_TLSDESC_ADR_PAGE
},
1460 {BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC
, R_AARCH64_TLSDESC_ADD_LO12_NC
},
1461 {BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC
, R_AARCH64_TLSDESC_LD64_LO12_NC
},
1462 {BFD_RELOC_AARCH64_TLSDESC_OFF_G1
, R_AARCH64_TLSDESC_OFF_G1
},
1463 {BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC
, R_AARCH64_TLSDESC_OFF_G0_NC
},
1464 {BFD_RELOC_AARCH64_TLSDESC_LDR
, R_AARCH64_TLSDESC_LDR
},
1465 {BFD_RELOC_AARCH64_TLSDESC_ADD
, R_AARCH64_TLSDESC_ADD
},
1466 {BFD_RELOC_AARCH64_TLSDESC_CALL
, R_AARCH64_TLSDESC_CALL
},
1467 {BFD_RELOC_AARCH64_TLS_DTPMOD64
, R_AARCH64_TLS_DTPMOD64
},
1468 {BFD_RELOC_AARCH64_TLS_DTPREL64
, R_AARCH64_TLS_DTPREL64
},
1469 {BFD_RELOC_AARCH64_TLS_TPREL64
, R_AARCH64_TLS_TPREL64
},
1470 {BFD_RELOC_AARCH64_TLSDESC
, R_AARCH64_TLSDESC
},
1473 static reloc_howto_type
*
1474 elf64_aarch64_reloc_type_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
1475 bfd_reloc_code_real_type code
)
1479 for (i
= 0; i
< ARRAY_SIZE (elf64_aarch64_reloc_map
); i
++)
1480 if (elf64_aarch64_reloc_map
[i
].bfd_reloc_val
== code
)
1481 return elf64_aarch64_howto_from_type
1482 (elf64_aarch64_reloc_map
[i
].elf_reloc_val
);
1484 bfd_set_error (bfd_error_bad_value
);
1488 static reloc_howto_type
*
1489 elf64_aarch64_reloc_name_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
1494 for (i
= 0; i
< ARRAY_SIZE (elf64_aarch64_howto_table
); i
++)
1495 if (elf64_aarch64_howto_table
[i
].name
!= NULL
1496 && strcasecmp (elf64_aarch64_howto_table
[i
].name
, r_name
) == 0)
1497 return &elf64_aarch64_howto_table
[i
];
1502 #define TARGET_LITTLE_SYM bfd_elf64_littleaarch64_vec
1503 #define TARGET_LITTLE_NAME "elf64-littleaarch64"
1504 #define TARGET_BIG_SYM bfd_elf64_bigaarch64_vec
1505 #define TARGET_BIG_NAME "elf64-bigaarch64"
1507 typedef unsigned long int insn32
;
1509 /* The linker script knows the section names for placement.
1510 The entry_names are used to do simple name mangling on the stubs.
1511 Given a function name, and its type, the stub can be found. The
1512 name can be changed. The only requirement is the %s be present. */
1513 #define STUB_ENTRY_NAME "__%s_veneer"
1515 /* The name of the dynamic interpreter. This is put in the .interp
1517 #define ELF_DYNAMIC_INTERPRETER "/lib/ld.so.1"
1519 #define AARCH64_MAX_FWD_BRANCH_OFFSET \
1520 (((1 << 25) - 1) << 2)
1521 #define AARCH64_MAX_BWD_BRANCH_OFFSET \
1524 #define AARCH64_MAX_ADRP_IMM ((1 << 20) - 1)
1525 #define AARCH64_MIN_ADRP_IMM (-(1 << 20))
1528 aarch64_valid_for_adrp_p (bfd_vma value
, bfd_vma place
)
1530 bfd_signed_vma offset
= (bfd_signed_vma
) (PG (value
) - PG (place
)) >> 12;
1531 return offset
<= AARCH64_MAX_ADRP_IMM
&& offset
>= AARCH64_MIN_ADRP_IMM
;
1535 aarch64_valid_branch_p (bfd_vma value
, bfd_vma place
)
1537 bfd_signed_vma offset
= (bfd_signed_vma
) (value
- place
);
1538 return (offset
<= AARCH64_MAX_FWD_BRANCH_OFFSET
1539 && offset
>= AARCH64_MAX_BWD_BRANCH_OFFSET
);
1542 static const uint32_t aarch64_adrp_branch_stub
[] =
1544 0x90000010, /* adrp ip0, X */
1545 /* R_AARCH64_ADR_HI21_PCREL(X) */
1546 0x91000210, /* add ip0, ip0, :lo12:X */
1547 /* R_AARCH64_ADD_ABS_LO12_NC(X) */
1548 0xd61f0200, /* br ip0 */
1551 static const uint32_t aarch64_long_branch_stub
[] =
1553 0x58000090, /* ldr ip0, 1f */
1554 0x10000011, /* adr ip1, #0 */
1555 0x8b110210, /* add ip0, ip0, ip1 */
1556 0xd61f0200, /* br ip0 */
1557 0x00000000, /* 1: .xword
1558 R_AARCH64_PREL64(X) + 12
1563 /* Section name for stubs is the associated section name plus this
1565 #define STUB_SUFFIX ".stub"
1567 enum elf64_aarch64_stub_type
1570 aarch64_stub_adrp_branch
,
1571 aarch64_stub_long_branch
,
1574 struct elf64_aarch64_stub_hash_entry
1576 /* Base hash table entry structure. */
1577 struct bfd_hash_entry root
;
1579 /* The stub section. */
1582 /* Offset within stub_sec of the beginning of this stub. */
1583 bfd_vma stub_offset
;
1585 /* Given the symbol's value and its section we can determine its final
1586 value when building the stubs (so the stub knows where to jump). */
1587 bfd_vma target_value
;
1588 asection
*target_section
;
1590 enum elf64_aarch64_stub_type stub_type
;
1592 /* The symbol table entry, if any, that this was derived from. */
1593 struct elf64_aarch64_link_hash_entry
*h
;
1595 /* Destination symbol type */
1596 unsigned char st_type
;
1598 /* Where this stub is being called from, or, in the case of combined
1599 stub sections, the first input section in the group. */
1602 /* The name for the local symbol at the start of this stub. The
1603 stub name in the hash table has to be unique; this does not, so
1604 it can be friendlier. */
1608 /* Used to build a map of a section. This is required for mixed-endian
1611 typedef struct elf64_elf_section_map
1616 elf64_aarch64_section_map
;
1619 typedef struct _aarch64_elf_section_data
1621 struct bfd_elf_section_data elf
;
1622 unsigned int mapcount
;
1623 unsigned int mapsize
;
1624 elf64_aarch64_section_map
*map
;
1626 _aarch64_elf_section_data
;
1628 #define elf64_aarch64_section_data(sec) \
1629 ((_aarch64_elf_section_data *) elf_section_data (sec))
1631 /* The size of the thread control block. */
1634 struct elf_aarch64_local_symbol
1636 unsigned int got_type
;
1637 bfd_signed_vma got_refcount
;
1640 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The
1641 offset is from the end of the jump table and reserved entries
1644 The magic value (bfd_vma) -1 indicates that an offset has not be
1646 bfd_vma tlsdesc_got_jump_table_offset
;
1649 struct elf_aarch64_obj_tdata
1651 struct elf_obj_tdata root
;
1653 /* local symbol descriptors */
1654 struct elf_aarch64_local_symbol
*locals
;
1656 /* Zero to warn when linking objects with incompatible enum sizes. */
1657 int no_enum_size_warning
;
1659 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
1660 int no_wchar_size_warning
;
1663 #define elf_aarch64_tdata(bfd) \
1664 ((struct elf_aarch64_obj_tdata *) (bfd)->tdata.any)
1666 #define elf64_aarch64_locals(bfd) (elf_aarch64_tdata (bfd)->locals)
1668 #define is_aarch64_elf(bfd) \
1669 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
1670 && elf_tdata (bfd) != NULL \
1671 && elf_object_id (bfd) == AARCH64_ELF_DATA)
1674 elf64_aarch64_mkobject (bfd
*abfd
)
1676 return bfd_elf_allocate_object (abfd
, sizeof (struct elf_aarch64_obj_tdata
),
1680 /* The AArch64 linker needs to keep track of the number of relocs that it
1681 decides to copy in check_relocs for each symbol. This is so that
1682 it can discard PC relative relocs if it doesn't need them when
1683 linking with -Bsymbolic. We store the information in a field
1684 extending the regular ELF linker hash table. */
1686 /* This structure keeps track of the number of relocs we have copied
1687 for a given symbol. */
1688 struct elf64_aarch64_relocs_copied
1691 struct elf64_aarch64_relocs_copied
*next
;
1692 /* A section in dynobj. */
1694 /* Number of relocs copied in this section. */
1695 bfd_size_type count
;
1696 /* Number of PC-relative relocs copied in this section. */
1697 bfd_size_type pc_count
;
1700 #define elf64_aarch64_hash_entry(ent) \
1701 ((struct elf64_aarch64_link_hash_entry *)(ent))
1703 #define GOT_UNKNOWN 0
1704 #define GOT_NORMAL 1
1705 #define GOT_TLS_GD 2
1706 #define GOT_TLS_IE 4
1707 #define GOT_TLSDESC_GD 8
1709 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLSDESC_GD))
1711 /* AArch64 ELF linker hash entry. */
1712 struct elf64_aarch64_link_hash_entry
1714 struct elf_link_hash_entry root
;
1716 /* Track dynamic relocs copied for this symbol. */
1717 struct elf_dyn_relocs
*dyn_relocs
;
1719 /* Number of PC relative relocs copied for this symbol. */
1720 struct elf64_aarch64_relocs_copied
*relocs_copied
;
1722 /* Since PLT entries have variable size, we need to record the
1723 index into .got.plt instead of recomputing it from the PLT
1725 bfd_signed_vma plt_got_offset
;
1727 /* Bit mask representing the type of GOT entry(s) if any required by
1729 unsigned int got_type
;
1731 /* A pointer to the most recently used stub hash entry against this
1733 struct elf64_aarch64_stub_hash_entry
*stub_cache
;
1735 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The offset
1736 is from the end of the jump table and reserved entries within the PLTGOT.
1738 The magic value (bfd_vma) -1 indicates that an offset has not
1740 bfd_vma tlsdesc_got_jump_table_offset
;
1744 elf64_aarch64_symbol_got_type (struct elf_link_hash_entry
*h
,
1746 unsigned long r_symndx
)
1749 return elf64_aarch64_hash_entry (h
)->got_type
;
1751 if (! elf64_aarch64_locals (abfd
))
1754 return elf64_aarch64_locals (abfd
)[r_symndx
].got_type
;
1757 /* Traverse an AArch64 ELF linker hash table. */
1758 #define elf64_aarch64_link_hash_traverse(table, func, info) \
1759 (elf_link_hash_traverse \
1761 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
1764 /* Get the AArch64 elf linker hash table from a link_info structure. */
1765 #define elf64_aarch64_hash_table(info) \
1766 ((struct elf64_aarch64_link_hash_table *) ((info)->hash))
1768 #define aarch64_stub_hash_lookup(table, string, create, copy) \
1769 ((struct elf64_aarch64_stub_hash_entry *) \
1770 bfd_hash_lookup ((table), (string), (create), (copy)))
1772 /* AArch64 ELF linker hash table. */
1773 struct elf64_aarch64_link_hash_table
1775 /* The main hash table. */
1776 struct elf_link_hash_table root
;
1778 /* Nonzero to force PIC branch veneers. */
1781 /* The number of bytes in the initial entry in the PLT. */
1782 bfd_size_type plt_header_size
;
1784 /* The number of bytes in the subsequent PLT etries. */
1785 bfd_size_type plt_entry_size
;
1787 /* Short-cuts to get to dynamic linker sections. */
1791 /* Small local sym cache. */
1792 struct sym_cache sym_cache
;
1794 /* For convenience in allocate_dynrelocs. */
1797 /* The amount of space used by the reserved portion of the sgotplt
1798 section, plus whatever space is used by the jump slots. */
1799 bfd_vma sgotplt_jump_table_size
;
1801 /* The stub hash table. */
1802 struct bfd_hash_table stub_hash_table
;
1804 /* Linker stub bfd. */
1807 /* Linker call-backs. */
1808 asection
*(*add_stub_section
) (const char *, asection
*);
1809 void (*layout_sections_again
) (void);
1811 /* Array to keep track of which stub sections have been created, and
1812 information on stub grouping. */
1815 /* This is the section to which stubs in the group will be
1818 /* The stub section. */
1822 /* Assorted information used by elf64_aarch64_size_stubs. */
1823 unsigned int bfd_count
;
1825 asection
**input_list
;
1827 /* The offset into splt of the PLT entry for the TLS descriptor
1828 resolver. Special values are 0, if not necessary (or not found
1829 to be necessary yet), and -1 if needed but not determined
1831 bfd_vma tlsdesc_plt
;
1833 /* The GOT offset for the lazy trampoline. Communicated to the
1834 loader via DT_TLSDESC_GOT. The magic value (bfd_vma) -1
1835 indicates an offset is not allocated. */
1836 bfd_vma dt_tlsdesc_got
;
1840 /* Return non-zero if the indicated VALUE has overflowed the maximum
1841 range expressible by a unsigned number with the indicated number of
1844 static bfd_reloc_status_type
1845 aarch64_unsigned_overflow (bfd_vma value
, unsigned int bits
)
1848 if (bits
>= sizeof (bfd_vma
) * 8)
1849 return bfd_reloc_ok
;
1850 lim
= (bfd_vma
) 1 << bits
;
1852 return bfd_reloc_overflow
;
1853 return bfd_reloc_ok
;
1857 /* Return non-zero if the indicated VALUE has overflowed the maximum
1858 range expressible by an signed number with the indicated number of
1861 static bfd_reloc_status_type
1862 aarch64_signed_overflow (bfd_vma value
, unsigned int bits
)
1864 bfd_signed_vma svalue
= (bfd_signed_vma
) value
;
1867 if (bits
>= sizeof (bfd_vma
) * 8)
1868 return bfd_reloc_ok
;
1869 lim
= (bfd_signed_vma
) 1 << (bits
- 1);
1870 if (svalue
< -lim
|| svalue
>= lim
)
1871 return bfd_reloc_overflow
;
1872 return bfd_reloc_ok
;
1875 /* Create an entry in an AArch64 ELF linker hash table. */
1877 static struct bfd_hash_entry
*
1878 elf64_aarch64_link_hash_newfunc (struct bfd_hash_entry
*entry
,
1879 struct bfd_hash_table
*table
,
1882 struct elf64_aarch64_link_hash_entry
*ret
=
1883 (struct elf64_aarch64_link_hash_entry
*) entry
;
1885 /* Allocate the structure if it has not already been allocated by a
1888 ret
= bfd_hash_allocate (table
,
1889 sizeof (struct elf64_aarch64_link_hash_entry
));
1891 return (struct bfd_hash_entry
*) ret
;
1893 /* Call the allocation method of the superclass. */
1894 ret
= ((struct elf64_aarch64_link_hash_entry
*)
1895 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry
*) ret
,
1899 ret
->dyn_relocs
= NULL
;
1900 ret
->relocs_copied
= NULL
;
1901 ret
->got_type
= GOT_UNKNOWN
;
1902 ret
->plt_got_offset
= (bfd_vma
) - 1;
1903 ret
->stub_cache
= NULL
;
1904 ret
->tlsdesc_got_jump_table_offset
= (bfd_vma
) - 1;
1907 return (struct bfd_hash_entry
*) ret
;
1910 /* Initialize an entry in the stub hash table. */
1912 static struct bfd_hash_entry
*
1913 stub_hash_newfunc (struct bfd_hash_entry
*entry
,
1914 struct bfd_hash_table
*table
, const char *string
)
1916 /* Allocate the structure if it has not already been allocated by a
1920 entry
= bfd_hash_allocate (table
,
1922 elf64_aarch64_stub_hash_entry
));
1927 /* Call the allocation method of the superclass. */
1928 entry
= bfd_hash_newfunc (entry
, table
, string
);
1931 struct elf64_aarch64_stub_hash_entry
*eh
;
1933 /* Initialize the local fields. */
1934 eh
= (struct elf64_aarch64_stub_hash_entry
*) entry
;
1935 eh
->stub_sec
= NULL
;
1936 eh
->stub_offset
= 0;
1937 eh
->target_value
= 0;
1938 eh
->target_section
= NULL
;
1939 eh
->stub_type
= aarch64_stub_none
;
1948 /* Copy the extra info we tack onto an elf_link_hash_entry. */
1951 elf64_aarch64_copy_indirect_symbol (struct bfd_link_info
*info
,
1952 struct elf_link_hash_entry
*dir
,
1953 struct elf_link_hash_entry
*ind
)
1955 struct elf64_aarch64_link_hash_entry
*edir
, *eind
;
1957 edir
= (struct elf64_aarch64_link_hash_entry
*) dir
;
1958 eind
= (struct elf64_aarch64_link_hash_entry
*) ind
;
1960 if (eind
->dyn_relocs
!= NULL
)
1962 if (edir
->dyn_relocs
!= NULL
)
1964 struct elf_dyn_relocs
**pp
;
1965 struct elf_dyn_relocs
*p
;
1967 /* Add reloc counts against the indirect sym to the direct sym
1968 list. Merge any entries against the same section. */
1969 for (pp
= &eind
->dyn_relocs
; (p
= *pp
) != NULL
;)
1971 struct elf_dyn_relocs
*q
;
1973 for (q
= edir
->dyn_relocs
; q
!= NULL
; q
= q
->next
)
1974 if (q
->sec
== p
->sec
)
1976 q
->pc_count
+= p
->pc_count
;
1977 q
->count
+= p
->count
;
1984 *pp
= edir
->dyn_relocs
;
1987 edir
->dyn_relocs
= eind
->dyn_relocs
;
1988 eind
->dyn_relocs
= NULL
;
1991 if (eind
->relocs_copied
!= NULL
)
1993 if (edir
->relocs_copied
!= NULL
)
1995 struct elf64_aarch64_relocs_copied
**pp
;
1996 struct elf64_aarch64_relocs_copied
*p
;
1998 /* Add reloc counts against the indirect sym to the direct sym
1999 list. Merge any entries against the same section. */
2000 for (pp
= &eind
->relocs_copied
; (p
= *pp
) != NULL
;)
2002 struct elf64_aarch64_relocs_copied
*q
;
2004 for (q
= edir
->relocs_copied
; q
!= NULL
; q
= q
->next
)
2005 if (q
->section
== p
->section
)
2007 q
->pc_count
+= p
->pc_count
;
2008 q
->count
+= p
->count
;
2015 *pp
= edir
->relocs_copied
;
2018 edir
->relocs_copied
= eind
->relocs_copied
;
2019 eind
->relocs_copied
= NULL
;
2022 if (ind
->root
.type
== bfd_link_hash_indirect
)
2024 /* Copy over PLT info. */
2025 if (dir
->got
.refcount
<= 0)
2027 edir
->got_type
= eind
->got_type
;
2028 eind
->got_type
= GOT_UNKNOWN
;
2032 _bfd_elf_link_hash_copy_indirect (info
, dir
, ind
);
2035 /* Create an AArch64 elf linker hash table. */
2037 static struct bfd_link_hash_table
*
2038 elf64_aarch64_link_hash_table_create (bfd
*abfd
)
2040 struct elf64_aarch64_link_hash_table
*ret
;
2041 bfd_size_type amt
= sizeof (struct elf64_aarch64_link_hash_table
);
2043 ret
= bfd_malloc (amt
);
2047 if (!_bfd_elf_link_hash_table_init
2048 (&ret
->root
, abfd
, elf64_aarch64_link_hash_newfunc
,
2049 sizeof (struct elf64_aarch64_link_hash_entry
), AARCH64_ELF_DATA
))
2055 ret
->sdynbss
= NULL
;
2056 ret
->srelbss
= NULL
;
2058 ret
->plt_header_size
= PLT_ENTRY_SIZE
;
2059 ret
->plt_entry_size
= PLT_SMALL_ENTRY_SIZE
;
2061 ret
->sym_cache
.abfd
= NULL
;
2064 ret
->stub_bfd
= NULL
;
2065 ret
->add_stub_section
= NULL
;
2066 ret
->layout_sections_again
= NULL
;
2067 ret
->stub_group
= NULL
;
2070 ret
->input_list
= NULL
;
2071 ret
->tlsdesc_plt
= 0;
2072 ret
->dt_tlsdesc_got
= (bfd_vma
) - 1;
2074 if (!bfd_hash_table_init (&ret
->stub_hash_table
, stub_hash_newfunc
,
2075 sizeof (struct elf64_aarch64_stub_hash_entry
)))
2081 return &ret
->root
.root
;
2084 /* Free the derived linker hash table. */
2087 elf64_aarch64_hash_table_free (struct bfd_link_hash_table
*hash
)
2089 struct elf64_aarch64_link_hash_table
*ret
2090 = (struct elf64_aarch64_link_hash_table
*) hash
;
2092 bfd_hash_table_free (&ret
->stub_hash_table
);
2093 _bfd_generic_link_hash_table_free (hash
);
2097 aarch64_resolve_relocation (unsigned int r_type
, bfd_vma place
, bfd_vma value
,
2098 bfd_vma addend
, bfd_boolean weak_undef_p
)
2102 case R_AARCH64_TLSDESC_CALL
:
2103 case R_AARCH64_NONE
:
2104 case R_AARCH64_NULL
:
2107 case R_AARCH64_ADR_PREL_LO21
:
2108 case R_AARCH64_CONDBR19
:
2109 case R_AARCH64_LD_PREL_LO19
:
2110 case R_AARCH64_PREL16
:
2111 case R_AARCH64_PREL32
:
2112 case R_AARCH64_PREL64
:
2113 case R_AARCH64_TSTBR14
:
2116 value
= value
+ addend
- place
;
2119 case R_AARCH64_CALL26
:
2120 case R_AARCH64_JUMP26
:
2121 value
= value
+ addend
- place
;
2124 case R_AARCH64_ABS16
:
2125 case R_AARCH64_ABS32
:
2126 case R_AARCH64_MOVW_SABS_G0
:
2127 case R_AARCH64_MOVW_SABS_G1
:
2128 case R_AARCH64_MOVW_SABS_G2
:
2129 case R_AARCH64_MOVW_UABS_G0
:
2130 case R_AARCH64_MOVW_UABS_G0_NC
:
2131 case R_AARCH64_MOVW_UABS_G1
:
2132 case R_AARCH64_MOVW_UABS_G1_NC
:
2133 case R_AARCH64_MOVW_UABS_G2
:
2134 case R_AARCH64_MOVW_UABS_G2_NC
:
2135 case R_AARCH64_MOVW_UABS_G3
:
2136 value
= value
+ addend
;
2139 case R_AARCH64_ADR_PREL_PG_HI21
:
2140 case R_AARCH64_ADR_PREL_PG_HI21_NC
:
2143 value
= PG (value
+ addend
) - PG (place
);
2146 case R_AARCH64_ADR_GOT_PAGE
:
2147 case R_AARCH64_TLSDESC_ADR_PAGE
:
2148 case R_AARCH64_TLSGD_ADR_PAGE21
:
2149 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21
:
2150 value
= PG (value
+ addend
) - PG (place
);
2153 case R_AARCH64_ADD_ABS_LO12_NC
:
2154 case R_AARCH64_LD64_GOT_LO12_NC
:
2155 case R_AARCH64_LDST8_ABS_LO12_NC
:
2156 case R_AARCH64_LDST16_ABS_LO12_NC
:
2157 case R_AARCH64_LDST32_ABS_LO12_NC
:
2158 case R_AARCH64_LDST64_ABS_LO12_NC
:
2159 case R_AARCH64_LDST128_ABS_LO12_NC
:
2160 case R_AARCH64_TLSDESC_ADD_LO12_NC
:
2161 case R_AARCH64_TLSDESC_ADD
:
2162 case R_AARCH64_TLSDESC_LD64_LO12_NC
:
2163 case R_AARCH64_TLSDESC_LDR
:
2164 case R_AARCH64_TLSGD_ADD_LO12_NC
:
2165 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC
:
2166 case R_AARCH64_TLSLE_ADD_TPREL_LO12
:
2167 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC
:
2168 value
= PG_OFFSET (value
+ addend
);
2171 case R_AARCH64_TLSLE_MOVW_TPREL_G1
:
2172 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC
:
2173 value
= (value
+ addend
) & (bfd_vma
) 0xffff0000;
2175 case R_AARCH64_TLSLE_ADD_TPREL_HI12
:
2176 value
= (value
+ addend
) & (bfd_vma
) 0xfff000;
2179 case R_AARCH64_TLSLE_MOVW_TPREL_G0
:
2180 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC
:
2181 value
= (value
+ addend
) & (bfd_vma
) 0xffff;
2184 case R_AARCH64_TLSLE_MOVW_TPREL_G2
:
2185 value
= (value
+ addend
) & ~(bfd_vma
) 0xffffffff;
2186 value
-= place
& ~(bfd_vma
) 0xffffffff;
2193 aarch64_relocate (unsigned int r_type
, bfd
*input_bfd
, asection
*input_section
,
2194 bfd_vma offset
, bfd_vma value
)
2196 reloc_howto_type
*howto
;
2199 howto
= elf64_aarch64_howto_from_type (r_type
);
2200 place
= (input_section
->output_section
->vma
+ input_section
->output_offset
2202 value
= aarch64_resolve_relocation (r_type
, place
, value
, 0, FALSE
);
2203 return bfd_elf_aarch64_put_addend (input_bfd
,
2204 input_section
->contents
+ offset
,
2208 static enum elf64_aarch64_stub_type
2209 aarch64_select_branch_stub (bfd_vma value
, bfd_vma place
)
2211 if (aarch64_valid_for_adrp_p (value
, place
))
2212 return aarch64_stub_adrp_branch
;
2213 return aarch64_stub_long_branch
;
2216 /* Determine the type of stub needed, if any, for a call. */
2218 static enum elf64_aarch64_stub_type
2219 aarch64_type_of_stub (struct bfd_link_info
*info
,
2220 asection
*input_sec
,
2221 const Elf_Internal_Rela
*rel
,
2222 unsigned char st_type
,
2223 struct elf64_aarch64_link_hash_entry
*hash
,
2224 bfd_vma destination
)
2227 bfd_signed_vma branch_offset
;
2228 unsigned int r_type
;
2229 struct elf64_aarch64_link_hash_table
*globals
;
2230 enum elf64_aarch64_stub_type stub_type
= aarch64_stub_none
;
2231 bfd_boolean via_plt_p
;
2233 if (st_type
!= STT_FUNC
)
2236 globals
= elf64_aarch64_hash_table (info
);
2237 via_plt_p
= (globals
->root
.splt
!= NULL
&& hash
!= NULL
2238 && hash
->root
.plt
.offset
!= (bfd_vma
) - 1);
2243 /* Determine where the call point is. */
2244 location
= (input_sec
->output_offset
2245 + input_sec
->output_section
->vma
+ rel
->r_offset
);
2247 branch_offset
= (bfd_signed_vma
) (destination
- location
);
2249 r_type
= ELF64_R_TYPE (rel
->r_info
);
2251 /* We don't want to redirect any old unconditional jump in this way,
2252 only one which is being used for a sibcall, where it is
2253 acceptable for the IP0 and IP1 registers to be clobbered. */
2254 if ((r_type
== R_AARCH64_CALL26
|| r_type
== R_AARCH64_JUMP26
)
2255 && (branch_offset
> AARCH64_MAX_FWD_BRANCH_OFFSET
2256 || branch_offset
< AARCH64_MAX_BWD_BRANCH_OFFSET
))
2258 stub_type
= aarch64_stub_long_branch
;
2264 /* Build a name for an entry in the stub hash table. */
2267 elf64_aarch64_stub_name (const asection
*input_section
,
2268 const asection
*sym_sec
,
2269 const struct elf64_aarch64_link_hash_entry
*hash
,
2270 const Elf_Internal_Rela
*rel
)
2277 len
= 8 + 1 + strlen (hash
->root
.root
.root
.string
) + 1 + 16 + 1;
2278 stub_name
= bfd_malloc (len
);
2279 if (stub_name
!= NULL
)
2280 snprintf (stub_name
, len
, "%08x_%s+%" BFD_VMA_FMT
"x",
2281 (unsigned int) input_section
->id
,
2282 hash
->root
.root
.root
.string
,
2287 len
= 8 + 1 + 8 + 1 + 8 + 1 + 16 + 1;
2288 stub_name
= bfd_malloc (len
);
2289 if (stub_name
!= NULL
)
2290 snprintf (stub_name
, len
, "%08x_%x:%x+%" BFD_VMA_FMT
"x",
2291 (unsigned int) input_section
->id
,
2292 (unsigned int) sym_sec
->id
,
2293 (unsigned int) ELF64_R_SYM (rel
->r_info
),
2300 /* Look up an entry in the stub hash. Stub entries are cached because
2301 creating the stub name takes a bit of time. */
2303 static struct elf64_aarch64_stub_hash_entry
*
2304 elf64_aarch64_get_stub_entry (const asection
*input_section
,
2305 const asection
*sym_sec
,
2306 struct elf_link_hash_entry
*hash
,
2307 const Elf_Internal_Rela
*rel
,
2308 struct elf64_aarch64_link_hash_table
*htab
)
2310 struct elf64_aarch64_stub_hash_entry
*stub_entry
;
2311 struct elf64_aarch64_link_hash_entry
*h
=
2312 (struct elf64_aarch64_link_hash_entry
*) hash
;
2313 const asection
*id_sec
;
2315 if ((input_section
->flags
& SEC_CODE
) == 0)
2318 /* If this input section is part of a group of sections sharing one
2319 stub section, then use the id of the first section in the group.
2320 Stub names need to include a section id, as there may well be
2321 more than one stub used to reach say, printf, and we need to
2322 distinguish between them. */
2323 id_sec
= htab
->stub_group
[input_section
->id
].link_sec
;
2325 if (h
!= NULL
&& h
->stub_cache
!= NULL
2326 && h
->stub_cache
->h
== h
&& h
->stub_cache
->id_sec
== id_sec
)
2328 stub_entry
= h
->stub_cache
;
2334 stub_name
= elf64_aarch64_stub_name (id_sec
, sym_sec
, h
, rel
);
2335 if (stub_name
== NULL
)
2338 stub_entry
= aarch64_stub_hash_lookup (&htab
->stub_hash_table
,
2339 stub_name
, FALSE
, FALSE
);
2341 h
->stub_cache
= stub_entry
;
2349 /* Add a new stub entry to the stub hash. Not all fields of the new
2350 stub entry are initialised. */
2352 static struct elf64_aarch64_stub_hash_entry
*
2353 elf64_aarch64_add_stub (const char *stub_name
,
2355 struct elf64_aarch64_link_hash_table
*htab
)
2359 struct elf64_aarch64_stub_hash_entry
*stub_entry
;
2361 link_sec
= htab
->stub_group
[section
->id
].link_sec
;
2362 stub_sec
= htab
->stub_group
[section
->id
].stub_sec
;
2363 if (stub_sec
== NULL
)
2365 stub_sec
= htab
->stub_group
[link_sec
->id
].stub_sec
;
2366 if (stub_sec
== NULL
)
2372 namelen
= strlen (link_sec
->name
);
2373 len
= namelen
+ sizeof (STUB_SUFFIX
);
2374 s_name
= bfd_alloc (htab
->stub_bfd
, len
);
2378 memcpy (s_name
, link_sec
->name
, namelen
);
2379 memcpy (s_name
+ namelen
, STUB_SUFFIX
, sizeof (STUB_SUFFIX
));
2380 stub_sec
= (*htab
->add_stub_section
) (s_name
, link_sec
);
2381 if (stub_sec
== NULL
)
2383 htab
->stub_group
[link_sec
->id
].stub_sec
= stub_sec
;
2385 htab
->stub_group
[section
->id
].stub_sec
= stub_sec
;
2388 /* Enter this entry into the linker stub hash table. */
2389 stub_entry
= aarch64_stub_hash_lookup (&htab
->stub_hash_table
, stub_name
,
2391 if (stub_entry
== NULL
)
2393 (*_bfd_error_handler
) (_("%s: cannot create stub entry %s"),
2394 section
->owner
, stub_name
);
2398 stub_entry
->stub_sec
= stub_sec
;
2399 stub_entry
->stub_offset
= 0;
2400 stub_entry
->id_sec
= link_sec
;
2406 aarch64_build_one_stub (struct bfd_hash_entry
*gen_entry
,
2407 void *in_arg ATTRIBUTE_UNUSED
)
2409 struct elf64_aarch64_stub_hash_entry
*stub_entry
;
2414 unsigned int template_size
;
2415 const uint32_t *template;
2418 /* Massage our args to the form they really have. */
2419 stub_entry
= (struct elf64_aarch64_stub_hash_entry
*) gen_entry
;
2421 stub_sec
= stub_entry
->stub_sec
;
2423 /* Make a note of the offset within the stubs for this entry. */
2424 stub_entry
->stub_offset
= stub_sec
->size
;
2425 loc
= stub_sec
->contents
+ stub_entry
->stub_offset
;
2427 stub_bfd
= stub_sec
->owner
;
2429 /* This is the address of the stub destination. */
2430 sym_value
= (stub_entry
->target_value
2431 + stub_entry
->target_section
->output_offset
2432 + stub_entry
->target_section
->output_section
->vma
);
2434 if (stub_entry
->stub_type
== aarch64_stub_long_branch
)
2436 bfd_vma place
= (stub_entry
->stub_offset
+ stub_sec
->output_section
->vma
2437 + stub_sec
->output_offset
);
2439 /* See if we can relax the stub. */
2440 if (aarch64_valid_for_adrp_p (sym_value
, place
))
2441 stub_entry
->stub_type
= aarch64_select_branch_stub (sym_value
, place
);
2444 switch (stub_entry
->stub_type
)
2446 case aarch64_stub_adrp_branch
:
2447 template = aarch64_adrp_branch_stub
;
2448 template_size
= sizeof (aarch64_adrp_branch_stub
);
2450 case aarch64_stub_long_branch
:
2451 template = aarch64_long_branch_stub
;
2452 template_size
= sizeof (aarch64_long_branch_stub
);
2459 for (i
= 0; i
< (template_size
/ sizeof template[0]); i
++)
2461 bfd_putl32 (template[i
], loc
);
2465 template_size
= (template_size
+ 7) & ~7;
2466 stub_sec
->size
+= template_size
;
2468 switch (stub_entry
->stub_type
)
2470 case aarch64_stub_adrp_branch
:
2471 if (aarch64_relocate (R_AARCH64_ADR_PREL_PG_HI21
, stub_bfd
, stub_sec
,
2472 stub_entry
->stub_offset
, sym_value
))
2473 /* The stub would not have been relaxed if the offset was out
2477 _bfd_final_link_relocate
2478 (elf64_aarch64_howto_from_type (R_AARCH64_ADD_ABS_LO12_NC
),
2482 stub_entry
->stub_offset
+ 4,
2487 case aarch64_stub_long_branch
:
2488 /* We want the value relative to the address 12 bytes back from the
2490 _bfd_final_link_relocate (elf64_aarch64_howto_from_type
2491 (R_AARCH64_PREL64
), stub_bfd
, stub_sec
,
2493 stub_entry
->stub_offset
+ 16,
2503 /* As above, but don't actually build the stub. Just bump offset so
2504 we know stub section sizes. */
2507 aarch64_size_one_stub (struct bfd_hash_entry
*gen_entry
,
2508 void *in_arg ATTRIBUTE_UNUSED
)
2510 struct elf64_aarch64_stub_hash_entry
*stub_entry
;
2513 /* Massage our args to the form they really have. */
2514 stub_entry
= (struct elf64_aarch64_stub_hash_entry
*) gen_entry
;
2516 switch (stub_entry
->stub_type
)
2518 case aarch64_stub_adrp_branch
:
2519 size
= sizeof (aarch64_adrp_branch_stub
);
2521 case aarch64_stub_long_branch
:
2522 size
= sizeof (aarch64_long_branch_stub
);
2530 size
= (size
+ 7) & ~7;
2531 stub_entry
->stub_sec
->size
+= size
;
2535 /* External entry points for sizing and building linker stubs. */
2537 /* Set up various things so that we can make a list of input sections
2538 for each output section included in the link. Returns -1 on error,
2539 0 when no stubs will be needed, and 1 on success. */
2542 elf64_aarch64_setup_section_lists (bfd
*output_bfd
,
2543 struct bfd_link_info
*info
)
2546 unsigned int bfd_count
;
2547 int top_id
, top_index
;
2549 asection
**input_list
, **list
;
2551 struct elf64_aarch64_link_hash_table
*htab
=
2552 elf64_aarch64_hash_table (info
);
2554 if (!is_elf_hash_table (htab
))
2557 /* Count the number of input BFDs and find the top input section id. */
2558 for (input_bfd
= info
->input_bfds
, bfd_count
= 0, top_id
= 0;
2559 input_bfd
!= NULL
; input_bfd
= input_bfd
->link_next
)
2562 for (section
= input_bfd
->sections
;
2563 section
!= NULL
; section
= section
->next
)
2565 if (top_id
< section
->id
)
2566 top_id
= section
->id
;
2569 htab
->bfd_count
= bfd_count
;
2571 amt
= sizeof (struct map_stub
) * (top_id
+ 1);
2572 htab
->stub_group
= bfd_zmalloc (amt
);
2573 if (htab
->stub_group
== NULL
)
2576 /* We can't use output_bfd->section_count here to find the top output
2577 section index as some sections may have been removed, and
2578 _bfd_strip_section_from_output doesn't renumber the indices. */
2579 for (section
= output_bfd
->sections
, top_index
= 0;
2580 section
!= NULL
; section
= section
->next
)
2582 if (top_index
< section
->index
)
2583 top_index
= section
->index
;
2586 htab
->top_index
= top_index
;
2587 amt
= sizeof (asection
*) * (top_index
+ 1);
2588 input_list
= bfd_malloc (amt
);
2589 htab
->input_list
= input_list
;
2590 if (input_list
== NULL
)
2593 /* For sections we aren't interested in, mark their entries with a
2594 value we can check later. */
2595 list
= input_list
+ top_index
;
2597 *list
= bfd_abs_section_ptr
;
2598 while (list
-- != input_list
);
2600 for (section
= output_bfd
->sections
;
2601 section
!= NULL
; section
= section
->next
)
2603 if ((section
->flags
& SEC_CODE
) != 0)
2604 input_list
[section
->index
] = NULL
;
2610 /* Used by elf64_aarch64_next_input_section and group_sections. */
2611 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
2613 /* The linker repeatedly calls this function for each input section,
2614 in the order that input sections are linked into output sections.
2615 Build lists of input sections to determine groupings between which
2616 we may insert linker stubs. */
2619 elf64_aarch64_next_input_section (struct bfd_link_info
*info
, asection
*isec
)
2621 struct elf64_aarch64_link_hash_table
*htab
=
2622 elf64_aarch64_hash_table (info
);
2624 if (isec
->output_section
->index
<= htab
->top_index
)
2626 asection
**list
= htab
->input_list
+ isec
->output_section
->index
;
2628 if (*list
!= bfd_abs_section_ptr
)
2630 /* Steal the link_sec pointer for our list. */
2631 /* This happens to make the list in reverse order,
2632 which is what we want. */
2633 PREV_SEC (isec
) = *list
;
2639 /* See whether we can group stub sections together. Grouping stub
2640 sections may result in fewer stubs. More importantly, we need to
2641 put all .init* and .fini* stubs at the beginning of the .init or
2642 .fini output sections respectively, because glibc splits the
2643 _init and _fini functions into multiple parts. Putting a stub in
2644 the middle of a function is not a good idea. */
2647 group_sections (struct elf64_aarch64_link_hash_table
*htab
,
2648 bfd_size_type stub_group_size
,
2649 bfd_boolean stubs_always_before_branch
)
2651 asection
**list
= htab
->input_list
+ htab
->top_index
;
2655 asection
*tail
= *list
;
2657 if (tail
== bfd_abs_section_ptr
)
2660 while (tail
!= NULL
)
2664 bfd_size_type total
;
2668 while ((prev
= PREV_SEC (curr
)) != NULL
2669 && ((total
+= curr
->output_offset
- prev
->output_offset
)
2673 /* OK, the size from the start of CURR to the end is less
2674 than stub_group_size and thus can be handled by one stub
2675 section. (Or the tail section is itself larger than
2676 stub_group_size, in which case we may be toast.)
2677 We should really be keeping track of the total size of
2678 stubs added here, as stubs contribute to the final output
2682 prev
= PREV_SEC (tail
);
2683 /* Set up this stub group. */
2684 htab
->stub_group
[tail
->id
].link_sec
= curr
;
2686 while (tail
!= curr
&& (tail
= prev
) != NULL
);
2688 /* But wait, there's more! Input sections up to stub_group_size
2689 bytes before the stub section can be handled by it too. */
2690 if (!stubs_always_before_branch
)
2694 && ((total
+= tail
->output_offset
- prev
->output_offset
)
2698 prev
= PREV_SEC (tail
);
2699 htab
->stub_group
[tail
->id
].link_sec
= curr
;
2705 while (list
-- != htab
->input_list
);
2707 free (htab
->input_list
);
2712 /* Determine and set the size of the stub section for a final link.
2714 The basic idea here is to examine all the relocations looking for
2715 PC-relative calls to a target that is unreachable with a "bl"
2719 elf64_aarch64_size_stubs (bfd
*output_bfd
,
2721 struct bfd_link_info
*info
,
2722 bfd_signed_vma group_size
,
2723 asection
* (*add_stub_section
) (const char *,
2725 void (*layout_sections_again
) (void))
2727 bfd_size_type stub_group_size
;
2728 bfd_boolean stubs_always_before_branch
;
2729 bfd_boolean stub_changed
= 0;
2730 struct elf64_aarch64_link_hash_table
*htab
= elf64_aarch64_hash_table (info
);
2732 /* Propagate mach to stub bfd, because it may not have been
2733 finalized when we created stub_bfd. */
2734 bfd_set_arch_mach (stub_bfd
, bfd_get_arch (output_bfd
),
2735 bfd_get_mach (output_bfd
));
2737 /* Stash our params away. */
2738 htab
->stub_bfd
= stub_bfd
;
2739 htab
->add_stub_section
= add_stub_section
;
2740 htab
->layout_sections_again
= layout_sections_again
;
2741 stubs_always_before_branch
= group_size
< 0;
2743 stub_group_size
= -group_size
;
2745 stub_group_size
= group_size
;
2747 if (stub_group_size
== 1)
2749 /* Default values. */
2750 /* Aarch64 branch range is +-128MB. The value used is 1MB less. */
2751 stub_group_size
= 127 * 1024 * 1024;
2754 group_sections (htab
, stub_group_size
, stubs_always_before_branch
);
2759 unsigned int bfd_indx
;
2762 for (input_bfd
= info
->input_bfds
, bfd_indx
= 0;
2763 input_bfd
!= NULL
; input_bfd
= input_bfd
->link_next
, bfd_indx
++)
2765 Elf_Internal_Shdr
*symtab_hdr
;
2767 Elf_Internal_Sym
*local_syms
= NULL
;
2769 /* We'll need the symbol table in a second. */
2770 symtab_hdr
= &elf_tdata (input_bfd
)->symtab_hdr
;
2771 if (symtab_hdr
->sh_info
== 0)
2774 /* Walk over each section attached to the input bfd. */
2775 for (section
= input_bfd
->sections
;
2776 section
!= NULL
; section
= section
->next
)
2778 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
2780 /* If there aren't any relocs, then there's nothing more
2782 if ((section
->flags
& SEC_RELOC
) == 0
2783 || section
->reloc_count
== 0
2784 || (section
->flags
& SEC_CODE
) == 0)
2787 /* If this section is a link-once section that will be
2788 discarded, then don't create any stubs. */
2789 if (section
->output_section
== NULL
2790 || section
->output_section
->owner
!= output_bfd
)
2793 /* Get the relocs. */
2795 = _bfd_elf_link_read_relocs (input_bfd
, section
, NULL
,
2796 NULL
, info
->keep_memory
);
2797 if (internal_relocs
== NULL
)
2798 goto error_ret_free_local
;
2800 /* Now examine each relocation. */
2801 irela
= internal_relocs
;
2802 irelaend
= irela
+ section
->reloc_count
;
2803 for (; irela
< irelaend
; irela
++)
2805 unsigned int r_type
, r_indx
;
2806 enum elf64_aarch64_stub_type stub_type
;
2807 struct elf64_aarch64_stub_hash_entry
*stub_entry
;
2810 bfd_vma destination
;
2811 struct elf64_aarch64_link_hash_entry
*hash
;
2812 const char *sym_name
;
2814 const asection
*id_sec
;
2815 unsigned char st_type
;
2818 r_type
= ELF64_R_TYPE (irela
->r_info
);
2819 r_indx
= ELF64_R_SYM (irela
->r_info
);
2821 if (r_type
>= (unsigned int) R_AARCH64_end
)
2823 bfd_set_error (bfd_error_bad_value
);
2824 error_ret_free_internal
:
2825 if (elf_section_data (section
)->relocs
== NULL
)
2826 free (internal_relocs
);
2827 goto error_ret_free_local
;
2830 /* Only look for stubs on unconditional branch and
2831 branch and link instructions. */
2832 if (r_type
!= (unsigned int) R_AARCH64_CALL26
2833 && r_type
!= (unsigned int) R_AARCH64_JUMP26
)
2836 /* Now determine the call target, its name, value,
2843 if (r_indx
< symtab_hdr
->sh_info
)
2845 /* It's a local symbol. */
2846 Elf_Internal_Sym
*sym
;
2847 Elf_Internal_Shdr
*hdr
;
2849 if (local_syms
== NULL
)
2852 = (Elf_Internal_Sym
*) symtab_hdr
->contents
;
2853 if (local_syms
== NULL
)
2855 = bfd_elf_get_elf_syms (input_bfd
, symtab_hdr
,
2856 symtab_hdr
->sh_info
, 0,
2858 if (local_syms
== NULL
)
2859 goto error_ret_free_internal
;
2862 sym
= local_syms
+ r_indx
;
2863 hdr
= elf_elfsections (input_bfd
)[sym
->st_shndx
];
2864 sym_sec
= hdr
->bfd_section
;
2866 /* This is an undefined symbol. It can never
2870 if (ELF_ST_TYPE (sym
->st_info
) != STT_SECTION
)
2871 sym_value
= sym
->st_value
;
2872 destination
= (sym_value
+ irela
->r_addend
2873 + sym_sec
->output_offset
2874 + sym_sec
->output_section
->vma
);
2875 st_type
= ELF_ST_TYPE (sym
->st_info
);
2877 = bfd_elf_string_from_elf_section (input_bfd
,
2878 symtab_hdr
->sh_link
,
2885 e_indx
= r_indx
- symtab_hdr
->sh_info
;
2886 hash
= ((struct elf64_aarch64_link_hash_entry
*)
2887 elf_sym_hashes (input_bfd
)[e_indx
]);
2889 while (hash
->root
.root
.type
== bfd_link_hash_indirect
2890 || hash
->root
.root
.type
== bfd_link_hash_warning
)
2891 hash
= ((struct elf64_aarch64_link_hash_entry
*)
2892 hash
->root
.root
.u
.i
.link
);
2894 if (hash
->root
.root
.type
== bfd_link_hash_defined
2895 || hash
->root
.root
.type
== bfd_link_hash_defweak
)
2897 struct elf64_aarch64_link_hash_table
*globals
=
2898 elf64_aarch64_hash_table (info
);
2899 sym_sec
= hash
->root
.root
.u
.def
.section
;
2900 sym_value
= hash
->root
.root
.u
.def
.value
;
2901 /* For a destination in a shared library,
2902 use the PLT stub as target address to
2903 decide whether a branch stub is
2905 if (globals
->root
.splt
!= NULL
&& hash
!= NULL
2906 && hash
->root
.plt
.offset
!= (bfd_vma
) - 1)
2908 sym_sec
= globals
->root
.splt
;
2909 sym_value
= hash
->root
.plt
.offset
;
2910 if (sym_sec
->output_section
!= NULL
)
2911 destination
= (sym_value
2912 + sym_sec
->output_offset
2914 sym_sec
->output_section
->vma
);
2916 else if (sym_sec
->output_section
!= NULL
)
2917 destination
= (sym_value
+ irela
->r_addend
2918 + sym_sec
->output_offset
2919 + sym_sec
->output_section
->vma
);
2921 else if (hash
->root
.root
.type
== bfd_link_hash_undefined
2922 || (hash
->root
.root
.type
2923 == bfd_link_hash_undefweak
))
2925 /* For a shared library, use the PLT stub as
2926 target address to decide whether a long
2927 branch stub is needed.
2928 For absolute code, they cannot be handled. */
2929 struct elf64_aarch64_link_hash_table
*globals
=
2930 elf64_aarch64_hash_table (info
);
2932 if (globals
->root
.splt
!= NULL
&& hash
!= NULL
2933 && hash
->root
.plt
.offset
!= (bfd_vma
) - 1)
2935 sym_sec
= globals
->root
.splt
;
2936 sym_value
= hash
->root
.plt
.offset
;
2937 if (sym_sec
->output_section
!= NULL
)
2938 destination
= (sym_value
2939 + sym_sec
->output_offset
2941 sym_sec
->output_section
->vma
);
2948 bfd_set_error (bfd_error_bad_value
);
2949 goto error_ret_free_internal
;
2951 st_type
= ELF_ST_TYPE (hash
->root
.type
);
2952 sym_name
= hash
->root
.root
.root
.string
;
2955 /* Determine what (if any) linker stub is needed. */
2956 stub_type
= aarch64_type_of_stub
2957 (info
, section
, irela
, st_type
, hash
, destination
);
2958 if (stub_type
== aarch64_stub_none
)
2961 /* Support for grouping stub sections. */
2962 id_sec
= htab
->stub_group
[section
->id
].link_sec
;
2964 /* Get the name of this stub. */
2965 stub_name
= elf64_aarch64_stub_name (id_sec
, sym_sec
, hash
,
2968 goto error_ret_free_internal
;
2971 aarch64_stub_hash_lookup (&htab
->stub_hash_table
,
2972 stub_name
, FALSE
, FALSE
);
2973 if (stub_entry
!= NULL
)
2975 /* The proper stub has already been created. */
2980 stub_entry
= elf64_aarch64_add_stub (stub_name
, section
,
2982 if (stub_entry
== NULL
)
2985 goto error_ret_free_internal
;
2988 stub_entry
->target_value
= sym_value
;
2989 stub_entry
->target_section
= sym_sec
;
2990 stub_entry
->stub_type
= stub_type
;
2991 stub_entry
->h
= hash
;
2992 stub_entry
->st_type
= st_type
;
2994 if (sym_name
== NULL
)
2995 sym_name
= "unnamed";
2996 len
= sizeof (STUB_ENTRY_NAME
) + strlen (sym_name
);
2997 stub_entry
->output_name
= bfd_alloc (htab
->stub_bfd
, len
);
2998 if (stub_entry
->output_name
== NULL
)
3001 goto error_ret_free_internal
;
3004 snprintf (stub_entry
->output_name
, len
, STUB_ENTRY_NAME
,
3007 stub_changed
= TRUE
;
3010 /* We're done with the internal relocs, free them. */
3011 if (elf_section_data (section
)->relocs
== NULL
)
3012 free (internal_relocs
);
3019 /* OK, we've added some stubs. Find out the new size of the
3021 for (stub_sec
= htab
->stub_bfd
->sections
;
3022 stub_sec
!= NULL
; stub_sec
= stub_sec
->next
)
3025 bfd_hash_traverse (&htab
->stub_hash_table
, aarch64_size_one_stub
, htab
);
3027 /* Ask the linker to do its stuff. */
3028 (*htab
->layout_sections_again
) ();
3029 stub_changed
= FALSE
;
3034 error_ret_free_local
:
3038 /* Build all the stubs associated with the current output file. The
3039 stubs are kept in a hash table attached to the main linker hash
3040 table. We also set up the .plt entries for statically linked PIC
3041 functions here. This function is called via aarch64_elf_finish in the
3045 elf64_aarch64_build_stubs (struct bfd_link_info
*info
)
3048 struct bfd_hash_table
*table
;
3049 struct elf64_aarch64_link_hash_table
*htab
;
3051 htab
= elf64_aarch64_hash_table (info
);
3053 for (stub_sec
= htab
->stub_bfd
->sections
;
3054 stub_sec
!= NULL
; stub_sec
= stub_sec
->next
)
3058 /* Ignore non-stub sections. */
3059 if (!strstr (stub_sec
->name
, STUB_SUFFIX
))
3062 /* Allocate memory to hold the linker stubs. */
3063 size
= stub_sec
->size
;
3064 stub_sec
->contents
= bfd_zalloc (htab
->stub_bfd
, size
);
3065 if (stub_sec
->contents
== NULL
&& size
!= 0)
3070 /* Build the stubs as directed by the stub hash table. */
3071 table
= &htab
->stub_hash_table
;
3072 bfd_hash_traverse (table
, aarch64_build_one_stub
, info
);
3078 /* Add an entry to the code/data map for section SEC. */
3081 elf64_aarch64_section_map_add (asection
*sec
, char type
, bfd_vma vma
)
3083 struct _aarch64_elf_section_data
*sec_data
=
3084 elf64_aarch64_section_data (sec
);
3085 unsigned int newidx
;
3087 if (sec_data
->map
== NULL
)
3089 sec_data
->map
= bfd_malloc (sizeof (elf64_aarch64_section_map
));
3090 sec_data
->mapcount
= 0;
3091 sec_data
->mapsize
= 1;
3094 newidx
= sec_data
->mapcount
++;
3096 if (sec_data
->mapcount
> sec_data
->mapsize
)
3098 sec_data
->mapsize
*= 2;
3099 sec_data
->map
= bfd_realloc_or_free
3100 (sec_data
->map
, sec_data
->mapsize
* sizeof (elf64_aarch64_section_map
));
3105 sec_data
->map
[newidx
].vma
= vma
;
3106 sec_data
->map
[newidx
].type
= type
;
3111 /* Initialise maps of insn/data for input BFDs. */
3113 bfd_elf64_aarch64_init_maps (bfd
*abfd
)
3115 Elf_Internal_Sym
*isymbuf
;
3116 Elf_Internal_Shdr
*hdr
;
3117 unsigned int i
, localsyms
;
3119 /* Make sure that we are dealing with an AArch64 elf binary. */
3120 if (!is_aarch64_elf (abfd
))
3123 if ((abfd
->flags
& DYNAMIC
) != 0)
3126 hdr
= &elf_symtab_hdr (abfd
);
3127 localsyms
= hdr
->sh_info
;
3129 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
3130 should contain the number of local symbols, which should come before any
3131 global symbols. Mapping symbols are always local. */
3132 isymbuf
= bfd_elf_get_elf_syms (abfd
, hdr
, localsyms
, 0, NULL
, NULL
, NULL
);
3134 /* No internal symbols read? Skip this BFD. */
3135 if (isymbuf
== NULL
)
3138 for (i
= 0; i
< localsyms
; i
++)
3140 Elf_Internal_Sym
*isym
= &isymbuf
[i
];
3141 asection
*sec
= bfd_section_from_elf_index (abfd
, isym
->st_shndx
);
3144 if (sec
!= NULL
&& ELF_ST_BIND (isym
->st_info
) == STB_LOCAL
)
3146 name
= bfd_elf_string_from_elf_section (abfd
,
3150 if (bfd_is_aarch64_special_symbol_name
3151 (name
, BFD_AARCH64_SPECIAL_SYM_TYPE_MAP
))
3152 elf64_aarch64_section_map_add (sec
, name
[1], isym
->st_value
);
3157 /* Set option values needed during linking. */
3159 bfd_elf64_aarch64_set_options (struct bfd
*output_bfd
,
3160 struct bfd_link_info
*link_info
,
3162 int no_wchar_warn
, int pic_veneer
)
3164 struct elf64_aarch64_link_hash_table
*globals
;
3166 globals
= elf64_aarch64_hash_table (link_info
);
3167 globals
->pic_veneer
= pic_veneer
;
3169 BFD_ASSERT (is_aarch64_elf (output_bfd
));
3170 elf_aarch64_tdata (output_bfd
)->no_enum_size_warning
= no_enum_warn
;
3171 elf_aarch64_tdata (output_bfd
)->no_wchar_size_warning
= no_wchar_warn
;
3174 #define MASK(n) ((1u << (n)) - 1)
3176 /* Decode the 26-bit offset of unconditional branch. */
3177 static inline uint32_t
3178 decode_branch_ofs_26 (uint32_t insn
)
3180 return insn
& MASK (26);
3183 /* Decode the 19-bit offset of conditional branch and compare & branch. */
3184 static inline uint32_t
3185 decode_cond_branch_ofs_19 (uint32_t insn
)
3187 return (insn
>> 5) & MASK (19);
3190 /* Decode the 19-bit offset of load literal. */
3191 static inline uint32_t
3192 decode_ld_lit_ofs_19 (uint32_t insn
)
3194 return (insn
>> 5) & MASK (19);
3197 /* Decode the 14-bit offset of test & branch. */
3198 static inline uint32_t
3199 decode_tst_branch_ofs_14 (uint32_t insn
)
3201 return (insn
>> 5) & MASK (14);
3204 /* Decode the 16-bit imm of move wide. */
3205 static inline uint32_t
3206 decode_movw_imm (uint32_t insn
)
3208 return (insn
>> 5) & MASK (16);
3211 /* Decode the 21-bit imm of adr. */
3212 static inline uint32_t
3213 decode_adr_imm (uint32_t insn
)
3215 return ((insn
>> 29) & MASK (2)) | ((insn
>> 3) & (MASK (19) << 2));
3218 /* Decode the 12-bit imm of add immediate. */
3219 static inline uint32_t
3220 decode_add_imm (uint32_t insn
)
3222 return (insn
>> 10) & MASK (12);
3226 /* Encode the 26-bit offset of unconditional branch. */
3227 static inline uint32_t
3228 reencode_branch_ofs_26 (uint32_t insn
, uint32_t ofs
)
3230 return (insn
& ~MASK (26)) | (ofs
& MASK (26));
3233 /* Encode the 19-bit offset of conditional branch and compare & branch. */
3234 static inline uint32_t
3235 reencode_cond_branch_ofs_19 (uint32_t insn
, uint32_t ofs
)
3237 return (insn
& ~(MASK (19) << 5)) | ((ofs
& MASK (19)) << 5);
3240 /* Decode the 19-bit offset of load literal. */
3241 static inline uint32_t
3242 reencode_ld_lit_ofs_19 (uint32_t insn
, uint32_t ofs
)
3244 return (insn
& ~(MASK (19) << 5)) | ((ofs
& MASK (19)) << 5);
3247 /* Encode the 14-bit offset of test & branch. */
3248 static inline uint32_t
3249 reencode_tst_branch_ofs_14 (uint32_t insn
, uint32_t ofs
)
3251 return (insn
& ~(MASK (14) << 5)) | ((ofs
& MASK (14)) << 5);
3254 /* Reencode the imm field of move wide. */
3255 static inline uint32_t
3256 reencode_movw_imm (uint32_t insn
, uint32_t imm
)
3258 return (insn
& ~(MASK (16) << 5)) | ((imm
& MASK (16)) << 5);
3261 /* Reencode the imm field of adr. */
3262 static inline uint32_t
3263 reencode_adr_imm (uint32_t insn
, uint32_t imm
)
3265 return (insn
& ~((MASK (2) << 29) | (MASK (19) << 5)))
3266 | ((imm
& MASK (2)) << 29) | ((imm
& (MASK (19) << 2)) << 3);
3269 /* Reencode the imm field of ld/st pos immediate. */
3270 static inline uint32_t
3271 reencode_ldst_pos_imm (uint32_t insn
, uint32_t imm
)
3273 return (insn
& ~(MASK (12) << 10)) | ((imm
& MASK (12)) << 10);
3276 /* Reencode the imm field of add immediate. */
3277 static inline uint32_t
3278 reencode_add_imm (uint32_t insn
, uint32_t imm
)
3280 return (insn
& ~(MASK (12) << 10)) | ((imm
& MASK (12)) << 10);
3283 /* Reencode mov[zn] to movz. */
3284 static inline uint32_t
3285 reencode_movzn_to_movz (uint32_t opcode
)
3287 return opcode
| (1 << 30);
3290 /* Reencode mov[zn] to movn. */
3291 static inline uint32_t
3292 reencode_movzn_to_movn (uint32_t opcode
)
3294 return opcode
& ~(1 << 30);
3297 /* Insert the addend/value into the instruction or data object being
3299 static bfd_reloc_status_type
3300 bfd_elf_aarch64_put_addend (bfd
*abfd
,
3302 reloc_howto_type
*howto
, bfd_signed_vma addend
)
3304 bfd_reloc_status_type status
= bfd_reloc_ok
;
3305 bfd_signed_vma old_addend
= addend
;
3309 size
= bfd_get_reloc_size (howto
);
3313 contents
= bfd_get_16 (abfd
, address
);
3316 if (howto
->src_mask
!= 0xffffffff)
3317 /* Must be 32-bit instruction, always little-endian. */
3318 contents
= bfd_getl32 (address
);
3320 /* Must be 32-bit data (endianness dependent). */
3321 contents
= bfd_get_32 (abfd
, address
);
3324 contents
= bfd_get_64 (abfd
, address
);
3330 switch (howto
->complain_on_overflow
)
3332 case complain_overflow_dont
:
3334 case complain_overflow_signed
:
3335 status
= aarch64_signed_overflow (addend
,
3336 howto
->bitsize
+ howto
->rightshift
);
3338 case complain_overflow_unsigned
:
3339 status
= aarch64_unsigned_overflow (addend
,
3340 howto
->bitsize
+ howto
->rightshift
);
3342 case complain_overflow_bitfield
:
3347 addend
>>= howto
->rightshift
;
3349 switch (howto
->type
)
3351 case R_AARCH64_JUMP26
:
3352 case R_AARCH64_CALL26
:
3353 contents
= reencode_branch_ofs_26 (contents
, addend
);
3356 case R_AARCH64_CONDBR19
:
3357 contents
= reencode_cond_branch_ofs_19 (contents
, addend
);
3360 case R_AARCH64_TSTBR14
:
3361 contents
= reencode_tst_branch_ofs_14 (contents
, addend
);
3364 case R_AARCH64_LD_PREL_LO19
:
3365 if (old_addend
& ((1 << howto
->rightshift
) - 1))
3366 return bfd_reloc_overflow
;
3367 contents
= reencode_ld_lit_ofs_19 (contents
, addend
);
3370 case R_AARCH64_TLSDESC_CALL
:
3373 case R_AARCH64_TLSGD_ADR_PAGE21
:
3374 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21
:
3375 case R_AARCH64_TLSDESC_ADR_PAGE
:
3376 case R_AARCH64_ADR_GOT_PAGE
:
3377 case R_AARCH64_ADR_PREL_LO21
:
3378 case R_AARCH64_ADR_PREL_PG_HI21
:
3379 case R_AARCH64_ADR_PREL_PG_HI21_NC
:
3380 contents
= reencode_adr_imm (contents
, addend
);
3383 case R_AARCH64_TLSGD_ADD_LO12_NC
:
3384 case R_AARCH64_TLSLE_ADD_TPREL_LO12
:
3385 case R_AARCH64_TLSLE_ADD_TPREL_HI12
:
3386 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC
:
3387 case R_AARCH64_TLSDESC_ADD_LO12_NC
:
3388 case R_AARCH64_ADD_ABS_LO12_NC
:
3389 /* Corresponds to: add rd, rn, #uimm12 to provide the low order
3390 12 bits of the page offset following
3391 R_AARCH64_ADR_PREL_PG_HI21 which computes the
3392 (pc-relative) page base. */
3393 contents
= reencode_add_imm (contents
, addend
);
3396 case R_AARCH64_LDST8_ABS_LO12_NC
:
3397 case R_AARCH64_LDST16_ABS_LO12_NC
:
3398 case R_AARCH64_LDST32_ABS_LO12_NC
:
3399 case R_AARCH64_LDST64_ABS_LO12_NC
:
3400 case R_AARCH64_LDST128_ABS_LO12_NC
:
3401 case R_AARCH64_TLSDESC_LD64_LO12_NC
:
3402 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC
:
3403 case R_AARCH64_LD64_GOT_LO12_NC
:
3404 if (old_addend
& ((1 << howto
->rightshift
) - 1))
3405 return bfd_reloc_overflow
;
3406 /* Used for ldr*|str* rt, [rn, #uimm12] to provide the low order
3407 12 bits of the page offset following R_AARCH64_ADR_PREL_PG_HI21
3408 which computes the (pc-relative) page base. */
3409 contents
= reencode_ldst_pos_imm (contents
, addend
);
3412 /* Group relocations to create high bits of a 16, 32, 48 or 64
3413 bit signed data or abs address inline. Will change
3414 instruction to MOVN or MOVZ depending on sign of calculated
3417 case R_AARCH64_TLSLE_MOVW_TPREL_G2
:
3418 case R_AARCH64_TLSLE_MOVW_TPREL_G1
:
3419 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC
:
3420 case R_AARCH64_TLSLE_MOVW_TPREL_G0
:
3421 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC
:
3422 case R_AARCH64_MOVW_SABS_G0
:
3423 case R_AARCH64_MOVW_SABS_G1
:
3424 case R_AARCH64_MOVW_SABS_G2
:
3425 /* NOTE: We can only come here with movz or movn. */
3428 /* Force use of MOVN. */
3430 contents
= reencode_movzn_to_movn (contents
);
3434 /* Force use of MOVZ. */
3435 contents
= reencode_movzn_to_movz (contents
);
3439 /* Group relocations to create a 16, 32, 48 or 64 bit unsigned
3440 data or abs address inline. */
3442 case R_AARCH64_MOVW_UABS_G0
:
3443 case R_AARCH64_MOVW_UABS_G0_NC
:
3444 case R_AARCH64_MOVW_UABS_G1
:
3445 case R_AARCH64_MOVW_UABS_G1_NC
:
3446 case R_AARCH64_MOVW_UABS_G2
:
3447 case R_AARCH64_MOVW_UABS_G2_NC
:
3448 case R_AARCH64_MOVW_UABS_G3
:
3449 contents
= reencode_movw_imm (contents
, addend
);
3453 /* Repack simple data */
3454 if (howto
->dst_mask
& (howto
->dst_mask
+ 1))
3455 return bfd_reloc_notsupported
;
3457 contents
= ((contents
& ~howto
->dst_mask
) | (addend
& howto
->dst_mask
));
3464 bfd_put_16 (abfd
, contents
, address
);
3467 if (howto
->dst_mask
!= 0xffffffff)
3468 /* must be 32-bit instruction, always little-endian */
3469 bfd_putl32 (contents
, address
);
3471 /* must be 32-bit data (endianness dependent) */
3472 bfd_put_32 (abfd
, contents
, address
);
3475 bfd_put_64 (abfd
, contents
, address
);
3485 aarch64_calculate_got_entry_vma (struct elf_link_hash_entry
*h
,
3486 struct elf64_aarch64_link_hash_table
3487 *globals
, struct bfd_link_info
*info
,
3488 bfd_vma value
, bfd
*output_bfd
,
3489 bfd_boolean
*unresolved_reloc_p
)
3491 bfd_vma off
= (bfd_vma
) - 1;
3492 asection
*basegot
= globals
->root
.sgot
;
3493 bfd_boolean dyn
= globals
->root
.dynamic_sections_created
;
3497 off
= h
->got
.offset
;
3498 BFD_ASSERT (off
!= (bfd_vma
) - 1);
3499 if (!WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn
, info
->shared
, h
)
3501 && SYMBOL_REFERENCES_LOCAL (info
, h
))
3502 || (ELF_ST_VISIBILITY (h
->other
)
3503 && h
->root
.type
== bfd_link_hash_undefweak
))
3505 /* This is actually a static link, or it is a -Bsymbolic link
3506 and the symbol is defined locally. We must initialize this
3507 entry in the global offset table. Since the offset must
3508 always be a multiple of 8, we use the least significant bit
3509 to record whether we have initialized it already.
3510 When doing a dynamic link, we create a .rel(a).got relocation
3511 entry to initialize the value. This is done in the
3512 finish_dynamic_symbol routine. */
3517 bfd_put_64 (output_bfd
, value
, basegot
->contents
+ off
);
3522 *unresolved_reloc_p
= FALSE
;
3524 off
= off
+ basegot
->output_section
->vma
+ basegot
->output_offset
;
3530 /* Change R_TYPE to a more efficient access model where possible,
3531 return the new reloc type. */
3534 aarch64_tls_transition_without_check (unsigned int r_type
,
3535 struct elf_link_hash_entry
*h
)
3537 bfd_boolean is_local
= h
== NULL
;
3540 case R_AARCH64_TLSGD_ADR_PAGE21
:
3541 case R_AARCH64_TLSDESC_ADR_PAGE
:
3543 ? R_AARCH64_TLSLE_MOVW_TPREL_G1
: R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21
;
3545 case R_AARCH64_TLSGD_ADD_LO12_NC
:
3546 case R_AARCH64_TLSDESC_LD64_LO12_NC
:
3548 ? R_AARCH64_TLSLE_MOVW_TPREL_G0_NC
3549 : R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC
;
3551 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21
:
3552 return is_local
? R_AARCH64_TLSLE_MOVW_TPREL_G1
: r_type
;
3554 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC
:
3555 return is_local
? R_AARCH64_TLSLE_MOVW_TPREL_G0_NC
: r_type
;
3557 case R_AARCH64_TLSDESC_ADD_LO12_NC
:
3558 case R_AARCH64_TLSDESC_CALL
:
3559 /* Instructions with these relocations will become NOPs. */
3560 return R_AARCH64_NONE
;
3567 aarch64_reloc_got_type (unsigned int r_type
)
3571 case R_AARCH64_LD64_GOT_LO12_NC
:
3572 case R_AARCH64_ADR_GOT_PAGE
:
3575 case R_AARCH64_TLSGD_ADR_PAGE21
:
3576 case R_AARCH64_TLSGD_ADD_LO12_NC
:
3579 case R_AARCH64_TLSDESC_ADD_LO12_NC
:
3580 case R_AARCH64_TLSDESC_ADR_PAGE
:
3581 case R_AARCH64_TLSDESC_CALL
:
3582 case R_AARCH64_TLSDESC_LD64_LO12_NC
:
3583 return GOT_TLSDESC_GD
;
3585 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21
:
3586 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC
:
3589 case R_AARCH64_TLSLE_ADD_TPREL_HI12
:
3590 case R_AARCH64_TLSLE_ADD_TPREL_LO12
:
3591 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC
:
3592 case R_AARCH64_TLSLE_MOVW_TPREL_G0
:
3593 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC
:
3594 case R_AARCH64_TLSLE_MOVW_TPREL_G1
:
3595 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC
:
3596 case R_AARCH64_TLSLE_MOVW_TPREL_G2
:
3603 aarch64_can_relax_tls (bfd
*input_bfd
,
3604 struct bfd_link_info
*info
,
3605 unsigned int r_type
,
3606 struct elf_link_hash_entry
*h
,
3607 unsigned long r_symndx
)
3609 unsigned int symbol_got_type
;
3610 unsigned int reloc_got_type
;
3612 if (! IS_AARCH64_TLS_RELOC (r_type
))
3615 symbol_got_type
= elf64_aarch64_symbol_got_type (h
, input_bfd
, r_symndx
);
3616 reloc_got_type
= aarch64_reloc_got_type (r_type
);
3618 if (symbol_got_type
== GOT_TLS_IE
&& GOT_TLS_GD_ANY_P (reloc_got_type
))
3624 if (h
&& h
->root
.type
== bfd_link_hash_undefweak
)
3631 aarch64_tls_transition (bfd
*input_bfd
,
3632 struct bfd_link_info
*info
,
3633 unsigned int r_type
,
3634 struct elf_link_hash_entry
*h
,
3635 unsigned long r_symndx
)
3637 if (! aarch64_can_relax_tls (input_bfd
, info
, r_type
, h
, r_symndx
))
3640 return aarch64_tls_transition_without_check (r_type
, h
);
3643 /* Return the base VMA address which should be subtracted from real addresses
3644 when resolving R_AARCH64_TLS_DTPREL64 relocation. */
3647 dtpoff_base (struct bfd_link_info
*info
)
3649 /* If tls_sec is NULL, we should have signalled an error already. */
3650 BFD_ASSERT (elf_hash_table (info
)->tls_sec
!= NULL
);
3651 return elf_hash_table (info
)->tls_sec
->vma
;
3655 /* Return the base VMA address which should be subtracted from real addresses
3656 when resolving R_AARCH64_TLS_GOTTPREL64 relocations. */
3659 tpoff_base (struct bfd_link_info
*info
)
3661 struct elf_link_hash_table
*htab
= elf_hash_table (info
);
3663 /* If tls_sec is NULL, we should have signalled an error already. */
3664 if (htab
->tls_sec
== NULL
)
3667 bfd_vma base
= align_power ((bfd_vma
) TCB_SIZE
,
3668 htab
->tls_sec
->alignment_power
);
3669 return htab
->tls_sec
->vma
- base
;
3673 symbol_got_offset_ref (bfd
*input_bfd
, struct elf_link_hash_entry
*h
,
3674 unsigned long r_symndx
)
3676 /* Calculate the address of the GOT entry for symbol
3677 referred to in h. */
3679 return &h
->got
.offset
;
3683 struct elf_aarch64_local_symbol
*l
;
3685 l
= elf64_aarch64_locals (input_bfd
);
3686 return &l
[r_symndx
].got_offset
;
3691 symbol_got_offset_mark (bfd
*input_bfd
, struct elf_link_hash_entry
*h
,
3692 unsigned long r_symndx
)
3695 p
= symbol_got_offset_ref (input_bfd
, h
, r_symndx
);
3700 symbol_got_offset_mark_p (bfd
*input_bfd
, struct elf_link_hash_entry
*h
,
3701 unsigned long r_symndx
)
3704 value
= * symbol_got_offset_ref (input_bfd
, h
, r_symndx
);
3709 symbol_got_offset (bfd
*input_bfd
, struct elf_link_hash_entry
*h
,
3710 unsigned long r_symndx
)
3713 value
= * symbol_got_offset_ref (input_bfd
, h
, r_symndx
);
3719 symbol_tlsdesc_got_offset_ref (bfd
*input_bfd
, struct elf_link_hash_entry
*h
,
3720 unsigned long r_symndx
)
3722 /* Calculate the address of the GOT entry for symbol
3723 referred to in h. */
3726 struct elf64_aarch64_link_hash_entry
*eh
;
3727 eh
= (struct elf64_aarch64_link_hash_entry
*) h
;
3728 return &eh
->tlsdesc_got_jump_table_offset
;
3733 struct elf_aarch64_local_symbol
*l
;
3735 l
= elf64_aarch64_locals (input_bfd
);
3736 return &l
[r_symndx
].tlsdesc_got_jump_table_offset
;
3741 symbol_tlsdesc_got_offset_mark (bfd
*input_bfd
, struct elf_link_hash_entry
*h
,
3742 unsigned long r_symndx
)
3745 p
= symbol_tlsdesc_got_offset_ref (input_bfd
, h
, r_symndx
);
3750 symbol_tlsdesc_got_offset_mark_p (bfd
*input_bfd
,
3751 struct elf_link_hash_entry
*h
,
3752 unsigned long r_symndx
)
3755 value
= * symbol_tlsdesc_got_offset_ref (input_bfd
, h
, r_symndx
);
3760 symbol_tlsdesc_got_offset (bfd
*input_bfd
, struct elf_link_hash_entry
*h
,
3761 unsigned long r_symndx
)
3764 value
= * symbol_tlsdesc_got_offset_ref (input_bfd
, h
, r_symndx
);
3769 /* Perform a relocation as part of a final link. */
3770 static bfd_reloc_status_type
3771 elf64_aarch64_final_link_relocate (reloc_howto_type
*howto
,
3774 asection
*input_section
,
3776 Elf_Internal_Rela
*rel
,
3778 struct bfd_link_info
*info
,
3780 struct elf_link_hash_entry
*h
,
3781 bfd_boolean
*unresolved_reloc_p
,
3782 bfd_boolean save_addend
,
3783 bfd_vma
*saved_addend
)
3785 unsigned int r_type
= howto
->type
;
3786 unsigned long r_symndx
;
3787 bfd_byte
*hit_data
= contents
+ rel
->r_offset
;
3789 bfd_signed_vma signed_addend
;
3790 struct elf64_aarch64_link_hash_table
*globals
;
3791 bfd_boolean weak_undef_p
;
3793 globals
= elf64_aarch64_hash_table (info
);
3795 BFD_ASSERT (is_aarch64_elf (input_bfd
));
3797 r_symndx
= ELF64_R_SYM (rel
->r_info
);
3799 /* It is possible to have linker relaxations on some TLS access
3800 models. Update our information here. */
3801 r_type
= aarch64_tls_transition (input_bfd
, info
, r_type
, h
, r_symndx
);
3803 if (r_type
!= howto
->type
)
3804 howto
= elf64_aarch64_howto_from_type (r_type
);
3806 place
= input_section
->output_section
->vma
3807 + input_section
->output_offset
+ rel
->r_offset
;
3809 /* Get addend, accumulating the addend for consecutive relocs
3810 which refer to the same offset. */
3811 signed_addend
= saved_addend
? *saved_addend
: 0;
3812 signed_addend
+= rel
->r_addend
;
3814 weak_undef_p
= (h
? h
->root
.type
== bfd_link_hash_undefweak
3815 : bfd_is_und_section (sym_sec
));
3818 case R_AARCH64_NONE
:
3819 case R_AARCH64_NULL
:
3820 case R_AARCH64_TLSDESC_CALL
:
3821 *unresolved_reloc_p
= FALSE
;
3822 return bfd_reloc_ok
;
3824 case R_AARCH64_ABS64
:
3826 /* When generating a shared object or relocatable executable, these
3827 relocations are copied into the output file to be resolved at
3829 if (((info
->shared
== TRUE
) || globals
->root
.is_relocatable_executable
)
3830 && (input_section
->flags
& SEC_ALLOC
)
3832 || ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
3833 || h
->root
.type
!= bfd_link_hash_undefweak
))
3835 Elf_Internal_Rela outrel
;
3837 bfd_boolean skip
, relocate
;
3840 *unresolved_reloc_p
= FALSE
;
3842 sreloc
= _bfd_elf_get_dynamic_reloc_section (input_bfd
,
3845 return bfd_reloc_notsupported
;
3850 outrel
.r_addend
= signed_addend
;
3852 _bfd_elf_section_offset (output_bfd
, info
, input_section
,
3854 if (outrel
.r_offset
== (bfd_vma
) - 1)
3856 else if (outrel
.r_offset
== (bfd_vma
) - 2)
3862 outrel
.r_offset
+= (input_section
->output_section
->vma
3863 + input_section
->output_offset
);
3866 memset (&outrel
, 0, sizeof outrel
);
3869 && (!info
->shared
|| !info
->symbolic
|| !h
->def_regular
))
3870 outrel
.r_info
= ELF64_R_INFO (h
->dynindx
, r_type
);
3875 /* On SVR4-ish systems, the dynamic loader cannot
3876 relocate the text and data segments independently,
3877 so the symbol does not matter. */
3879 outrel
.r_info
= ELF64_R_INFO (symbol
, R_AARCH64_RELATIVE
);
3880 outrel
.r_addend
+= value
;
3883 loc
= sreloc
->contents
+ sreloc
->reloc_count
++ * RELOC_SIZE (htab
);
3884 bfd_elf64_swap_reloca_out (output_bfd
, &outrel
, loc
);
3886 if (sreloc
->reloc_count
* RELOC_SIZE (htab
) > sreloc
->size
)
3888 /* Sanity to check that we have previously allocated
3889 sufficient space in the relocation section for the
3890 number of relocations we actually want to emit. */
3894 /* If this reloc is against an external symbol, we do not want to
3895 fiddle with the addend. Otherwise, we need to include the symbol
3896 value so that it becomes an addend for the dynamic reloc. */
3898 return bfd_reloc_ok
;
3900 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
3901 contents
, rel
->r_offset
, value
,
3905 value
+= signed_addend
;
3908 case R_AARCH64_JUMP26
:
3909 case R_AARCH64_CALL26
:
3911 asection
*splt
= globals
->root
.splt
;
3912 bfd_boolean via_plt_p
=
3913 splt
!= NULL
&& h
!= NULL
&& h
->plt
.offset
!= (bfd_vma
) - 1;
3915 /* A call to an undefined weak symbol is converted to a jump to
3916 the next instruction unless a PLT entry will be created.
3917 The jump to the next instruction is optimized as a NOP.
3918 Do the same for local undefined symbols. */
3919 if (weak_undef_p
&& ! via_plt_p
)
3921 bfd_putl32 (INSN_NOP
, hit_data
);
3922 return bfd_reloc_ok
;
3925 /* If the call goes through a PLT entry, make sure to
3926 check distance to the right destination address. */
3929 value
= (splt
->output_section
->vma
3930 + splt
->output_offset
+ h
->plt
.offset
);
3931 *unresolved_reloc_p
= FALSE
;
3934 /* If the target symbol is global and marked as a function the
3935 relocation applies a function call or a tail call. In this
3936 situation we can veneer out of range branches. The veneers
3937 use IP0 and IP1 hence cannot be used arbitrary out of range
3938 branches that occur within the body of a function. */
3939 if (h
&& h
->type
== STT_FUNC
)
3941 /* Check if a stub has to be inserted because the destination
3943 if (! aarch64_valid_branch_p (value
, place
))
3945 /* The target is out of reach, so redirect the branch to
3946 the local stub for this function. */
3947 struct elf64_aarch64_stub_hash_entry
*stub_entry
;
3948 stub_entry
= elf64_aarch64_get_stub_entry (input_section
,
3951 if (stub_entry
!= NULL
)
3952 value
= (stub_entry
->stub_offset
3953 + stub_entry
->stub_sec
->output_offset
3954 + stub_entry
->stub_sec
->output_section
->vma
);
3958 value
= aarch64_resolve_relocation (r_type
, place
, value
,
3959 signed_addend
, weak_undef_p
);
3962 case R_AARCH64_ABS16
:
3963 case R_AARCH64_ABS32
:
3964 case R_AARCH64_ADD_ABS_LO12_NC
:
3965 case R_AARCH64_ADR_PREL_LO21
:
3966 case R_AARCH64_ADR_PREL_PG_HI21
:
3967 case R_AARCH64_ADR_PREL_PG_HI21_NC
:
3968 case R_AARCH64_CONDBR19
:
3969 case R_AARCH64_LD_PREL_LO19
:
3970 case R_AARCH64_LDST8_ABS_LO12_NC
:
3971 case R_AARCH64_LDST16_ABS_LO12_NC
:
3972 case R_AARCH64_LDST32_ABS_LO12_NC
:
3973 case R_AARCH64_LDST64_ABS_LO12_NC
:
3974 case R_AARCH64_LDST128_ABS_LO12_NC
:
3975 case R_AARCH64_MOVW_SABS_G0
:
3976 case R_AARCH64_MOVW_SABS_G1
:
3977 case R_AARCH64_MOVW_SABS_G2
:
3978 case R_AARCH64_MOVW_UABS_G0
:
3979 case R_AARCH64_MOVW_UABS_G0_NC
:
3980 case R_AARCH64_MOVW_UABS_G1
:
3981 case R_AARCH64_MOVW_UABS_G1_NC
:
3982 case R_AARCH64_MOVW_UABS_G2
:
3983 case R_AARCH64_MOVW_UABS_G2_NC
:
3984 case R_AARCH64_MOVW_UABS_G3
:
3985 case R_AARCH64_PREL16
:
3986 case R_AARCH64_PREL32
:
3987 case R_AARCH64_PREL64
:
3988 case R_AARCH64_TSTBR14
:
3989 value
= aarch64_resolve_relocation (r_type
, place
, value
,
3990 signed_addend
, weak_undef_p
);
3993 case R_AARCH64_LD64_GOT_LO12_NC
:
3994 case R_AARCH64_ADR_GOT_PAGE
:
3995 if (globals
->root
.sgot
== NULL
)
3996 BFD_ASSERT (h
!= NULL
);
4000 value
= aarch64_calculate_got_entry_vma (h
, globals
, info
, value
,
4002 unresolved_reloc_p
);
4003 value
= aarch64_resolve_relocation (r_type
, place
, value
,
4008 case R_AARCH64_TLSGD_ADR_PAGE21
:
4009 case R_AARCH64_TLSGD_ADD_LO12_NC
:
4010 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21
:
4011 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC
:
4012 if (globals
->root
.sgot
== NULL
)
4013 return bfd_reloc_notsupported
;
4015 value
= (symbol_got_offset (input_bfd
, h
, r_symndx
)
4016 + globals
->root
.sgot
->output_section
->vma
4017 + globals
->root
.sgot
->output_section
->output_offset
);
4019 value
= aarch64_resolve_relocation (r_type
, place
, value
,
4021 *unresolved_reloc_p
= FALSE
;
4024 case R_AARCH64_TLSLE_ADD_TPREL_HI12
:
4025 case R_AARCH64_TLSLE_ADD_TPREL_LO12
:
4026 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC
:
4027 case R_AARCH64_TLSLE_MOVW_TPREL_G0
:
4028 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC
:
4029 case R_AARCH64_TLSLE_MOVW_TPREL_G1
:
4030 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC
:
4031 case R_AARCH64_TLSLE_MOVW_TPREL_G2
:
4032 value
= aarch64_resolve_relocation (r_type
, place
, value
,
4033 - tpoff_base (info
), weak_undef_p
);
4034 *unresolved_reloc_p
= FALSE
;
4037 case R_AARCH64_TLSDESC_ADR_PAGE
:
4038 case R_AARCH64_TLSDESC_LD64_LO12_NC
:
4039 case R_AARCH64_TLSDESC_ADD_LO12_NC
:
4040 case R_AARCH64_TLSDESC_ADD
:
4041 case R_AARCH64_TLSDESC_LDR
:
4042 if (globals
->root
.sgot
== NULL
)
4043 return bfd_reloc_notsupported
;
4045 value
= (symbol_tlsdesc_got_offset (input_bfd
, h
, r_symndx
)
4046 + globals
->root
.sgotplt
->output_section
->vma
4047 + globals
->root
.sgotplt
->output_section
->output_offset
4048 + globals
->sgotplt_jump_table_size
);
4050 value
= aarch64_resolve_relocation (r_type
, place
, value
,
4052 *unresolved_reloc_p
= FALSE
;
4056 return bfd_reloc_notsupported
;
4060 *saved_addend
= value
;
4062 /* Only apply the final relocation in a sequence. */
4064 return bfd_reloc_continue
;
4066 return bfd_elf_aarch64_put_addend (input_bfd
, hit_data
, howto
, value
);
4069 /* Handle TLS relaxations. Relaxing is possible for symbols that use
4070 R_AARCH64_TLSDESC_ADR_{PAGE, LD64_LO12_NC, ADD_LO12_NC} during a static
4073 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
4074 is to then call final_link_relocate. Return other values in the
4077 static bfd_reloc_status_type
4078 elf64_aarch64_tls_relax (struct elf64_aarch64_link_hash_table
*globals
,
4079 bfd
*input_bfd
, bfd_byte
*contents
,
4080 Elf_Internal_Rela
*rel
, struct elf_link_hash_entry
*h
)
4082 bfd_boolean is_local
= h
== NULL
;
4083 unsigned int r_type
= ELF64_R_TYPE (rel
->r_info
);
4086 BFD_ASSERT (globals
&& input_bfd
&& contents
&& rel
);
4090 case R_AARCH64_TLSGD_ADR_PAGE21
:
4091 case R_AARCH64_TLSDESC_ADR_PAGE
:
4094 /* GD->LE relaxation:
4095 adrp x0, :tlsgd:var => movz x0, :tprel_g1:var
4097 adrp x0, :tlsdesc:var => movz x0, :tprel_g1:var
4099 bfd_putl32 (0xd2a00000, contents
+ rel
->r_offset
);
4100 return bfd_reloc_continue
;
4104 /* GD->IE relaxation:
4105 adrp x0, :tlsgd:var => adrp x0, :gottprel:var
4107 adrp x0, :tlsdesc:var => adrp x0, :gottprel:var
4109 insn
= bfd_getl32 (contents
+ rel
->r_offset
);
4110 return bfd_reloc_continue
;
4113 case R_AARCH64_TLSDESC_LD64_LO12_NC
:
4116 /* GD->LE relaxation:
4117 ldr xd, [x0, #:tlsdesc_lo12:var] => movk x0, :tprel_g0_nc:var
4119 bfd_putl32 (0xf2800000, contents
+ rel
->r_offset
);
4120 return bfd_reloc_continue
;
4124 /* GD->IE relaxation:
4125 ldr xd, [x0, #:tlsdesc_lo12:var] => ldr x0, [x0, #:gottprel_lo12:var]
4127 insn
= bfd_getl32 (contents
+ rel
->r_offset
);
4129 bfd_putl32 (insn
, contents
+ rel
->r_offset
);
4130 return bfd_reloc_continue
;
4133 case R_AARCH64_TLSGD_ADD_LO12_NC
:
4136 /* GD->LE relaxation
4137 add x0, #:tlsgd_lo12:var => movk x0, :tprel_g0_nc:var
4138 bl __tls_get_addr => mrs x1, tpidr_el0
4139 nop => add x0, x1, x0
4142 /* First kill the tls_get_addr reloc on the bl instruction. */
4143 BFD_ASSERT (rel
->r_offset
+ 4 == rel
[1].r_offset
);
4144 rel
[1].r_info
= ELF64_R_INFO (STN_UNDEF
, R_AARCH64_NONE
);
4146 bfd_putl32 (0xf2800000, contents
+ rel
->r_offset
);
4147 bfd_putl32 (0xd53bd041, contents
+ rel
->r_offset
+ 4);
4148 bfd_putl32 (0x8b000020, contents
+ rel
->r_offset
+ 8);
4149 return bfd_reloc_continue
;
4153 /* GD->IE relaxation
4154 ADD x0, #:tlsgd_lo12:var => ldr x0, [x0, #:gottprel_lo12:var]
4155 BL __tls_get_addr => mrs x1, tpidr_el0
4157 NOP => add x0, x1, x0
4160 BFD_ASSERT (ELF64_R_TYPE (rel
[1].r_info
) == R_AARCH64_CALL26
);
4162 /* Remove the relocation on the BL instruction. */
4163 rel
[1].r_info
= ELF64_R_INFO (STN_UNDEF
, R_AARCH64_NONE
);
4165 bfd_putl32 (0xf9400000, contents
+ rel
->r_offset
);
4167 /* We choose to fixup the BL and NOP instructions using the
4168 offset from the second relocation to allow flexibility in
4169 scheduling instructions between the ADD and BL. */
4170 bfd_putl32 (0xd53bd041, contents
+ rel
[1].r_offset
);
4171 bfd_putl32 (0x8b000020, contents
+ rel
[1].r_offset
+ 4);
4172 return bfd_reloc_continue
;
4175 case R_AARCH64_TLSDESC_ADD_LO12_NC
:
4176 case R_AARCH64_TLSDESC_CALL
:
4177 /* GD->IE/LE relaxation:
4178 add x0, x0, #:tlsdesc_lo12:var => nop
4181 bfd_putl32 (INSN_NOP
, contents
+ rel
->r_offset
);
4182 return bfd_reloc_ok
;
4184 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21
:
4185 /* IE->LE relaxation:
4186 adrp xd, :gottprel:var => movz xd, :tprel_g1:var
4190 insn
= bfd_getl32 (contents
+ rel
->r_offset
);
4191 bfd_putl32 (0xd2a00000 | (insn
& 0x1f), contents
+ rel
->r_offset
);
4193 return bfd_reloc_continue
;
4195 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC
:
4196 /* IE->LE relaxation:
4197 ldr xd, [xm, #:gottprel_lo12:var] => movk xd, :tprel_g0_nc:var
4201 insn
= bfd_getl32 (contents
+ rel
->r_offset
);
4202 bfd_putl32 (0xf2800000 | (insn
& 0x1f), contents
+ rel
->r_offset
);
4204 return bfd_reloc_continue
;
4207 return bfd_reloc_continue
;
4210 return bfd_reloc_ok
;
4213 /* Relocate an AArch64 ELF section. */
4216 elf64_aarch64_relocate_section (bfd
*output_bfd
,
4217 struct bfd_link_info
*info
,
4219 asection
*input_section
,
4221 Elf_Internal_Rela
*relocs
,
4222 Elf_Internal_Sym
*local_syms
,
4223 asection
**local_sections
)
4225 Elf_Internal_Shdr
*symtab_hdr
;
4226 struct elf_link_hash_entry
**sym_hashes
;
4227 Elf_Internal_Rela
*rel
;
4228 Elf_Internal_Rela
*relend
;
4230 struct elf64_aarch64_link_hash_table
*globals
;
4231 bfd_boolean save_addend
= FALSE
;
4234 globals
= elf64_aarch64_hash_table (info
);
4236 symtab_hdr
= &elf_symtab_hdr (input_bfd
);
4237 sym_hashes
= elf_sym_hashes (input_bfd
);
4240 relend
= relocs
+ input_section
->reloc_count
;
4241 for (; rel
< relend
; rel
++)
4243 unsigned int r_type
;
4244 unsigned int relaxed_r_type
;
4245 reloc_howto_type
*howto
;
4246 unsigned long r_symndx
;
4247 Elf_Internal_Sym
*sym
;
4249 struct elf_link_hash_entry
*h
;
4251 bfd_reloc_status_type r
;
4254 bfd_boolean unresolved_reloc
= FALSE
;
4255 char *error_message
= NULL
;
4257 r_symndx
= ELF64_R_SYM (rel
->r_info
);
4258 r_type
= ELF64_R_TYPE (rel
->r_info
);
4260 bfd_reloc
.howto
= elf64_aarch64_howto_from_type (r_type
);
4261 howto
= bfd_reloc
.howto
;
4267 if (r_symndx
< symtab_hdr
->sh_info
)
4269 sym
= local_syms
+ r_symndx
;
4270 sym_type
= ELF64_ST_TYPE (sym
->st_info
);
4271 sec
= local_sections
[r_symndx
];
4273 /* An object file might have a reference to a local
4274 undefined symbol. This is a daft object file, but we
4275 should at least do something about it. */
4276 if (r_type
!= R_AARCH64_NONE
&& r_type
!= R_AARCH64_NULL
4277 && bfd_is_und_section (sec
)
4278 && ELF_ST_BIND (sym
->st_info
) != STB_WEAK
)
4280 if (!info
->callbacks
->undefined_symbol
4281 (info
, bfd_elf_string_from_elf_section
4282 (input_bfd
, symtab_hdr
->sh_link
, sym
->st_name
),
4283 input_bfd
, input_section
, rel
->r_offset
, TRUE
))
4287 if (r_type
>= R_AARCH64_dyn_max
)
4289 bfd_set_error (bfd_error_bad_value
);
4293 relocation
= _bfd_elf_rela_local_sym (output_bfd
, sym
, &sec
, rel
);
4299 RELOC_FOR_GLOBAL_SYMBOL (info
, input_bfd
, input_section
, rel
,
4300 r_symndx
, symtab_hdr
, sym_hashes
,
4302 unresolved_reloc
, warned
);
4307 if (sec
!= NULL
&& discarded_section (sec
))
4308 RELOC_AGAINST_DISCARDED_SECTION (info
, input_bfd
, input_section
,
4309 rel
, 1, relend
, howto
, 0, contents
);
4311 if (info
->relocatable
)
4313 /* This is a relocatable link. We don't have to change
4314 anything, unless the reloc is against a section symbol,
4315 in which case we have to adjust according to where the
4316 section symbol winds up in the output section. */
4317 if (sym
!= NULL
&& ELF_ST_TYPE (sym
->st_info
) == STT_SECTION
)
4318 rel
->r_addend
+= sec
->output_offset
;
4323 name
= h
->root
.root
.string
;
4326 name
= (bfd_elf_string_from_elf_section
4327 (input_bfd
, symtab_hdr
->sh_link
, sym
->st_name
));
4328 if (name
== NULL
|| *name
== '\0')
4329 name
= bfd_section_name (input_bfd
, sec
);
4333 && r_type
!= R_AARCH64_NONE
4334 && r_type
!= R_AARCH64_NULL
4336 || h
->root
.type
== bfd_link_hash_defined
4337 || h
->root
.type
== bfd_link_hash_defweak
)
4338 && IS_AARCH64_TLS_RELOC (r_type
) != (sym_type
== STT_TLS
))
4340 (*_bfd_error_handler
)
4341 ((sym_type
== STT_TLS
4342 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
4343 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
4345 input_section
, (long) rel
->r_offset
, howto
->name
, name
);
4349 /* We relax only if we can see that there can be a valid transition
4350 from a reloc type to another.
4351 We call elf64_aarch64_final_link_relocate unless we're completely
4352 done, i.e., the relaxation produced the final output we want. */
4354 relaxed_r_type
= aarch64_tls_transition (input_bfd
, info
, r_type
,
4356 if (relaxed_r_type
!= r_type
)
4358 r_type
= relaxed_r_type
;
4359 howto
= elf64_aarch64_howto_from_type (r_type
);
4361 r
= elf64_aarch64_tls_relax (globals
, input_bfd
, contents
, rel
, h
);
4362 unresolved_reloc
= 0;
4365 r
= bfd_reloc_continue
;
4367 /* There may be multiple consecutive relocations for the
4368 same offset. In that case we are supposed to treat the
4369 output of each relocation as the addend for the next. */
4370 if (rel
+ 1 < relend
4371 && rel
->r_offset
== rel
[1].r_offset
4372 && ELF64_R_TYPE (rel
[1].r_info
) != R_AARCH64_NONE
4373 && ELF64_R_TYPE (rel
[1].r_info
) != R_AARCH64_NULL
)
4376 save_addend
= FALSE
;
4378 if (r
== bfd_reloc_continue
)
4379 r
= elf64_aarch64_final_link_relocate (howto
, input_bfd
, output_bfd
,
4380 input_section
, contents
, rel
,
4381 relocation
, info
, sec
,
4382 h
, &unresolved_reloc
,
4383 save_addend
, &addend
);
4387 case R_AARCH64_TLSGD_ADR_PAGE21
:
4388 case R_AARCH64_TLSGD_ADD_LO12_NC
:
4389 if (! symbol_got_offset_mark_p (input_bfd
, h
, r_symndx
))
4391 bfd_boolean need_relocs
= FALSE
;
4396 off
= symbol_got_offset (input_bfd
, h
, r_symndx
);
4397 indx
= h
&& h
->dynindx
!= -1 ? h
->dynindx
: 0;
4400 (info
->shared
|| indx
!= 0) &&
4402 || ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
4403 || h
->root
.type
!= bfd_link_hash_undefweak
);
4405 BFD_ASSERT (globals
->root
.srelgot
!= NULL
);
4409 Elf_Internal_Rela rela
;
4410 rela
.r_info
= ELF64_R_INFO (indx
, R_AARCH64_TLS_DTPMOD64
);
4412 rela
.r_offset
= globals
->root
.sgot
->output_section
->vma
+
4413 globals
->root
.sgot
->output_offset
+ off
;
4416 loc
= globals
->root
.srelgot
->contents
;
4417 loc
+= globals
->root
.srelgot
->reloc_count
++
4418 * RELOC_SIZE (htab
);
4419 bfd_elf64_swap_reloca_out (output_bfd
, &rela
, loc
);
4423 bfd_put_64 (output_bfd
,
4424 relocation
- dtpoff_base (info
),
4425 globals
->root
.sgot
->contents
+ off
4430 /* This TLS symbol is global. We emit a
4431 relocation to fixup the tls offset at load
4434 ELF64_R_INFO (indx
, R_AARCH64_TLS_DTPREL64
);
4437 (globals
->root
.sgot
->output_section
->vma
4438 + globals
->root
.sgot
->output_offset
+ off
4441 loc
= globals
->root
.srelgot
->contents
;
4442 loc
+= globals
->root
.srelgot
->reloc_count
++
4443 * RELOC_SIZE (globals
);
4444 bfd_elf64_swap_reloca_out (output_bfd
, &rela
, loc
);
4445 bfd_put_64 (output_bfd
, (bfd_vma
) 0,
4446 globals
->root
.sgot
->contents
+ off
4452 bfd_put_64 (output_bfd
, (bfd_vma
) 1,
4453 globals
->root
.sgot
->contents
+ off
);
4454 bfd_put_64 (output_bfd
,
4455 relocation
- dtpoff_base (info
),
4456 globals
->root
.sgot
->contents
+ off
4460 symbol_got_offset_mark (input_bfd
, h
, r_symndx
);
4464 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21
:
4465 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC
:
4466 if (! symbol_got_offset_mark_p (input_bfd
, h
, r_symndx
))
4468 bfd_boolean need_relocs
= FALSE
;
4473 off
= symbol_got_offset (input_bfd
, h
, r_symndx
);
4475 indx
= h
&& h
->dynindx
!= -1 ? h
->dynindx
: 0;
4478 (info
->shared
|| indx
!= 0) &&
4480 || ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
4481 || h
->root
.type
!= bfd_link_hash_undefweak
);
4483 BFD_ASSERT (globals
->root
.srelgot
!= NULL
);
4487 Elf_Internal_Rela rela
;
4490 rela
.r_addend
= relocation
- dtpoff_base (info
);
4494 rela
.r_info
= ELF64_R_INFO (indx
, R_AARCH64_TLS_TPREL64
);
4495 rela
.r_offset
= globals
->root
.sgot
->output_section
->vma
+
4496 globals
->root
.sgot
->output_offset
+ off
;
4498 loc
= globals
->root
.srelgot
->contents
;
4499 loc
+= globals
->root
.srelgot
->reloc_count
++
4500 * RELOC_SIZE (htab
);
4502 bfd_elf64_swap_reloca_out (output_bfd
, &rela
, loc
);
4504 bfd_put_64 (output_bfd
, rela
.r_addend
,
4505 globals
->root
.sgot
->contents
+ off
);
4508 bfd_put_64 (output_bfd
, relocation
- tpoff_base (info
),
4509 globals
->root
.sgot
->contents
+ off
);
4511 symbol_got_offset_mark (input_bfd
, h
, r_symndx
);
4515 case R_AARCH64_TLSLE_ADD_TPREL_LO12
:
4516 case R_AARCH64_TLSLE_ADD_TPREL_HI12
:
4517 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC
:
4518 case R_AARCH64_TLSLE_MOVW_TPREL_G2
:
4519 case R_AARCH64_TLSLE_MOVW_TPREL_G1
:
4520 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC
:
4521 case R_AARCH64_TLSLE_MOVW_TPREL_G0
:
4522 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC
:
4525 case R_AARCH64_TLSDESC_ADR_PAGE
:
4526 case R_AARCH64_TLSDESC_LD64_LO12_NC
:
4527 case R_AARCH64_TLSDESC_ADD_LO12_NC
:
4528 if (! symbol_tlsdesc_got_offset_mark_p (input_bfd
, h
, r_symndx
))
4530 bfd_boolean need_relocs
= FALSE
;
4531 int indx
= h
&& h
->dynindx
!= -1 ? h
->dynindx
: 0;
4532 bfd_vma off
= symbol_tlsdesc_got_offset (input_bfd
, h
, r_symndx
);
4534 need_relocs
= (h
== NULL
4535 || ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
4536 || h
->root
.type
!= bfd_link_hash_undefweak
);
4538 BFD_ASSERT (globals
->root
.srelgot
!= NULL
);
4539 BFD_ASSERT (globals
->root
.sgot
!= NULL
);
4544 Elf_Internal_Rela rela
;
4545 rela
.r_info
= ELF64_R_INFO (indx
, R_AARCH64_TLSDESC
);
4547 rela
.r_offset
= (globals
->root
.sgotplt
->output_section
->vma
4548 + globals
->root
.sgotplt
->output_offset
4549 + off
+ globals
->sgotplt_jump_table_size
);
4552 rela
.r_addend
= relocation
- dtpoff_base (info
);
4554 /* Allocate the next available slot in the PLT reloc
4555 section to hold our R_AARCH64_TLSDESC, the next
4556 available slot is determined from reloc_count,
4557 which we step. But note, reloc_count was
4558 artifically moved down while allocating slots for
4559 real PLT relocs such that all of the PLT relocs
4560 will fit above the initial reloc_count and the
4561 extra stuff will fit below. */
4562 loc
= globals
->root
.srelplt
->contents
;
4563 loc
+= globals
->root
.srelplt
->reloc_count
++
4564 * RELOC_SIZE (globals
);
4566 bfd_elf64_swap_reloca_out (output_bfd
, &rela
, loc
);
4568 bfd_put_64 (output_bfd
, (bfd_vma
) 0,
4569 globals
->root
.sgotplt
->contents
+ off
+
4570 globals
->sgotplt_jump_table_size
);
4571 bfd_put_64 (output_bfd
, (bfd_vma
) 0,
4572 globals
->root
.sgotplt
->contents
+ off
+
4573 globals
->sgotplt_jump_table_size
+
4577 symbol_tlsdesc_got_offset_mark (input_bfd
, h
, r_symndx
);
4586 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
4587 because such sections are not SEC_ALLOC and thus ld.so will
4588 not process them. */
4589 if (unresolved_reloc
4590 && !((input_section
->flags
& SEC_DEBUGGING
) != 0
4592 && _bfd_elf_section_offset (output_bfd
, info
, input_section
,
4593 +rel
->r_offset
) != (bfd_vma
) - 1)
4595 (*_bfd_error_handler
)
4597 ("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
4598 input_bfd
, input_section
, (long) rel
->r_offset
, howto
->name
,
4599 h
->root
.root
.string
);
4603 if (r
!= bfd_reloc_ok
&& r
!= bfd_reloc_continue
)
4607 case bfd_reloc_overflow
:
4608 /* If the overflowing reloc was to an undefined symbol,
4609 we have already printed one error message and there
4610 is no point complaining again. */
4612 h
->root
.type
!= bfd_link_hash_undefined
)
4613 && (!((*info
->callbacks
->reloc_overflow
)
4614 (info
, (h
? &h
->root
: NULL
), name
, howto
->name
,
4615 (bfd_vma
) 0, input_bfd
, input_section
,
4620 case bfd_reloc_undefined
:
4621 if (!((*info
->callbacks
->undefined_symbol
)
4622 (info
, name
, input_bfd
, input_section
,
4623 rel
->r_offset
, TRUE
)))
4627 case bfd_reloc_outofrange
:
4628 error_message
= _("out of range");
4631 case bfd_reloc_notsupported
:
4632 error_message
= _("unsupported relocation");
4635 case bfd_reloc_dangerous
:
4636 /* error_message should already be set. */
4640 error_message
= _("unknown error");
4644 BFD_ASSERT (error_message
!= NULL
);
4645 if (!((*info
->callbacks
->reloc_dangerous
)
4646 (info
, error_message
, input_bfd
, input_section
,
4657 /* Set the right machine number. */
4660 elf64_aarch64_object_p (bfd
*abfd
)
4662 bfd_default_set_arch_mach (abfd
, bfd_arch_aarch64
, bfd_mach_aarch64
);
4666 /* Function to keep AArch64 specific flags in the ELF header. */
4669 elf64_aarch64_set_private_flags (bfd
*abfd
, flagword flags
)
4671 if (elf_flags_init (abfd
) && elf_elfheader (abfd
)->e_flags
!= flags
)
4676 elf_elfheader (abfd
)->e_flags
= flags
;
4677 elf_flags_init (abfd
) = TRUE
;
4683 /* Copy backend specific data from one object module to another. */
4686 elf64_aarch64_copy_private_bfd_data (bfd
*ibfd
, bfd
*obfd
)
4690 if (!is_aarch64_elf (ibfd
) || !is_aarch64_elf (obfd
))
4693 in_flags
= elf_elfheader (ibfd
)->e_flags
;
4695 elf_elfheader (obfd
)->e_flags
= in_flags
;
4696 elf_flags_init (obfd
) = TRUE
;
4698 /* Also copy the EI_OSABI field. */
4699 elf_elfheader (obfd
)->e_ident
[EI_OSABI
] =
4700 elf_elfheader (ibfd
)->e_ident
[EI_OSABI
];
4702 /* Copy object attributes. */
4703 _bfd_elf_copy_obj_attributes (ibfd
, obfd
);
4708 /* Merge backend specific data from an object file to the output
4709 object file when linking. */
4712 elf64_aarch64_merge_private_bfd_data (bfd
*ibfd
, bfd
*obfd
)
4716 bfd_boolean flags_compatible
= TRUE
;
4719 /* Check if we have the same endianess. */
4720 if (!_bfd_generic_verify_endian_match (ibfd
, obfd
))
4723 if (!is_aarch64_elf (ibfd
) || !is_aarch64_elf (obfd
))
4726 /* The input BFD must have had its flags initialised. */
4727 /* The following seems bogus to me -- The flags are initialized in
4728 the assembler but I don't think an elf_flags_init field is
4729 written into the object. */
4730 /* BFD_ASSERT (elf_flags_init (ibfd)); */
4732 in_flags
= elf_elfheader (ibfd
)->e_flags
;
4733 out_flags
= elf_elfheader (obfd
)->e_flags
;
4735 if (!elf_flags_init (obfd
))
4737 /* If the input is the default architecture and had the default
4738 flags then do not bother setting the flags for the output
4739 architecture, instead allow future merges to do this. If no
4740 future merges ever set these flags then they will retain their
4741 uninitialised values, which surprise surprise, correspond
4742 to the default values. */
4743 if (bfd_get_arch_info (ibfd
)->the_default
4744 && elf_elfheader (ibfd
)->e_flags
== 0)
4747 elf_flags_init (obfd
) = TRUE
;
4748 elf_elfheader (obfd
)->e_flags
= in_flags
;
4750 if (bfd_get_arch (obfd
) == bfd_get_arch (ibfd
)
4751 && bfd_get_arch_info (obfd
)->the_default
)
4752 return bfd_set_arch_mach (obfd
, bfd_get_arch (ibfd
),
4753 bfd_get_mach (ibfd
));
4758 /* Identical flags must be compatible. */
4759 if (in_flags
== out_flags
)
4762 /* Check to see if the input BFD actually contains any sections. If
4763 not, its flags may not have been initialised either, but it
4764 cannot actually cause any incompatiblity. Do not short-circuit
4765 dynamic objects; their section list may be emptied by
4766 elf_link_add_object_symbols.
4768 Also check to see if there are no code sections in the input.
4769 In this case there is no need to check for code specific flags.
4770 XXX - do we need to worry about floating-point format compatability
4771 in data sections ? */
4772 if (!(ibfd
->flags
& DYNAMIC
))
4774 bfd_boolean null_input_bfd
= TRUE
;
4775 bfd_boolean only_data_sections
= TRUE
;
4777 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
4779 if ((bfd_get_section_flags (ibfd
, sec
)
4780 & (SEC_LOAD
| SEC_CODE
| SEC_HAS_CONTENTS
))
4781 == (SEC_LOAD
| SEC_CODE
| SEC_HAS_CONTENTS
))
4782 only_data_sections
= FALSE
;
4784 null_input_bfd
= FALSE
;
4788 if (null_input_bfd
|| only_data_sections
)
4792 return flags_compatible
;
4795 /* Display the flags field. */
4798 elf64_aarch64_print_private_bfd_data (bfd
*abfd
, void *ptr
)
4800 FILE *file
= (FILE *) ptr
;
4801 unsigned long flags
;
4803 BFD_ASSERT (abfd
!= NULL
&& ptr
!= NULL
);
4805 /* Print normal ELF private data. */
4806 _bfd_elf_print_private_bfd_data (abfd
, ptr
);
4808 flags
= elf_elfheader (abfd
)->e_flags
;
4809 /* Ignore init flag - it may not be set, despite the flags field
4810 containing valid data. */
4812 /* xgettext:c-format */
4813 fprintf (file
, _("private flags = %lx:"), elf_elfheader (abfd
)->e_flags
);
4816 fprintf (file
, _("<Unrecognised flag bits set>"));
4823 /* Update the got entry reference counts for the section being removed. */
4826 elf64_aarch64_gc_sweep_hook (bfd
*abfd ATTRIBUTE_UNUSED
,
4827 struct bfd_link_info
*info ATTRIBUTE_UNUSED
,
4828 asection
*sec ATTRIBUTE_UNUSED
,
4829 const Elf_Internal_Rela
*
4830 relocs ATTRIBUTE_UNUSED
)
4835 /* Adjust a symbol defined by a dynamic object and referenced by a
4836 regular object. The current definition is in some section of the
4837 dynamic object, but we're not including those sections. We have to
4838 change the definition to something the rest of the link can
4842 elf64_aarch64_adjust_dynamic_symbol (struct bfd_link_info
*info
,
4843 struct elf_link_hash_entry
*h
)
4845 struct elf64_aarch64_link_hash_table
*htab
;
4848 /* If this is a function, put it in the procedure linkage table. We
4849 will fill in the contents of the procedure linkage table later,
4850 when we know the address of the .got section. */
4851 if (h
->type
== STT_FUNC
|| h
->needs_plt
)
4853 if (h
->plt
.refcount
<= 0
4854 || SYMBOL_CALLS_LOCAL (info
, h
)
4855 || (ELF_ST_VISIBILITY (h
->other
) != STV_DEFAULT
4856 && h
->root
.type
== bfd_link_hash_undefweak
))
4858 /* This case can occur if we saw a CALL26 reloc in
4859 an input file, but the symbol wasn't referred to
4860 by a dynamic object or all references were
4861 garbage collected. In which case we can end up
4863 h
->plt
.offset
= (bfd_vma
) - 1;
4870 /* It's possible that we incorrectly decided a .plt reloc was
4871 needed for an R_X86_64_PC32 reloc to a non-function sym in
4872 check_relocs. We can't decide accurately between function and
4873 non-function syms in check-relocs; Objects loaded later in
4874 the link may change h->type. So fix it now. */
4875 h
->plt
.offset
= (bfd_vma
) - 1;
4878 /* If this is a weak symbol, and there is a real definition, the
4879 processor independent code will have arranged for us to see the
4880 real definition first, and we can just use the same value. */
4881 if (h
->u
.weakdef
!= NULL
)
4883 BFD_ASSERT (h
->u
.weakdef
->root
.type
== bfd_link_hash_defined
4884 || h
->u
.weakdef
->root
.type
== bfd_link_hash_defweak
);
4885 h
->root
.u
.def
.section
= h
->u
.weakdef
->root
.u
.def
.section
;
4886 h
->root
.u
.def
.value
= h
->u
.weakdef
->root
.u
.def
.value
;
4887 if (ELIMINATE_COPY_RELOCS
|| info
->nocopyreloc
)
4888 h
->non_got_ref
= h
->u
.weakdef
->non_got_ref
;
4892 /* If we are creating a shared library, we must presume that the
4893 only references to the symbol are via the global offset table.
4894 For such cases we need not do anything here; the relocations will
4895 be handled correctly by relocate_section. */
4899 /* If there are no references to this symbol that do not use the
4900 GOT, we don't need to generate a copy reloc. */
4901 if (!h
->non_got_ref
)
4904 /* If -z nocopyreloc was given, we won't generate them either. */
4905 if (info
->nocopyreloc
)
4911 /* We must allocate the symbol in our .dynbss section, which will
4912 become part of the .bss section of the executable. There will be
4913 an entry for this symbol in the .dynsym section. The dynamic
4914 object will contain position independent code, so all references
4915 from the dynamic object to this symbol will go through the global
4916 offset table. The dynamic linker will use the .dynsym entry to
4917 determine the address it must put in the global offset table, so
4918 both the dynamic object and the regular object will refer to the
4919 same memory location for the variable. */
4921 htab
= elf64_aarch64_hash_table (info
);
4923 /* We must generate a R_AARCH64_COPY reloc to tell the dynamic linker
4924 to copy the initial value out of the dynamic object and into the
4925 runtime process image. */
4926 if ((h
->root
.u
.def
.section
->flags
& SEC_ALLOC
) != 0 && h
->size
!= 0)
4928 htab
->srelbss
->size
+= RELOC_SIZE (htab
);
4934 return _bfd_elf_adjust_dynamic_copy (h
, s
);
4939 elf64_aarch64_allocate_local_symbols (bfd
*abfd
, unsigned number
)
4941 struct elf_aarch64_local_symbol
*locals
;
4942 locals
= elf64_aarch64_locals (abfd
);
4945 locals
= (struct elf_aarch64_local_symbol
*)
4946 bfd_zalloc (abfd
, number
* sizeof (struct elf_aarch64_local_symbol
));
4949 elf64_aarch64_locals (abfd
) = locals
;
4954 /* Look through the relocs for a section during the first phase. */
4957 elf64_aarch64_check_relocs (bfd
*abfd
, struct bfd_link_info
*info
,
4958 asection
*sec
, const Elf_Internal_Rela
*relocs
)
4960 Elf_Internal_Shdr
*symtab_hdr
;
4961 struct elf_link_hash_entry
**sym_hashes
;
4962 const Elf_Internal_Rela
*rel
;
4963 const Elf_Internal_Rela
*rel_end
;
4966 struct elf64_aarch64_link_hash_table
*htab
;
4968 unsigned long nsyms
;
4970 if (info
->relocatable
)
4973 BFD_ASSERT (is_aarch64_elf (abfd
));
4975 htab
= elf64_aarch64_hash_table (info
);
4978 symtab_hdr
= &elf_symtab_hdr (abfd
);
4979 sym_hashes
= elf_sym_hashes (abfd
);
4980 nsyms
= NUM_SHDR_ENTRIES (symtab_hdr
);
4982 rel_end
= relocs
+ sec
->reloc_count
;
4983 for (rel
= relocs
; rel
< rel_end
; rel
++)
4985 struct elf_link_hash_entry
*h
;
4986 unsigned long r_symndx
;
4987 unsigned int r_type
;
4989 r_symndx
= ELF64_R_SYM (rel
->r_info
);
4990 r_type
= ELF64_R_TYPE (rel
->r_info
);
4992 if (r_symndx
>= NUM_SHDR_ENTRIES (symtab_hdr
))
4994 (*_bfd_error_handler
) (_("%B: bad symbol index: %d"), abfd
,
4999 if (r_symndx
>= nsyms
5000 /* PR 9934: It is possible to have relocations that do not
5001 refer to symbols, thus it is also possible to have an
5002 object file containing relocations but no symbol table. */
5003 && (r_symndx
> 0 || nsyms
> 0))
5005 (*_bfd_error_handler
) (_("%B: bad symbol index: %d"), abfd
,
5010 if (nsyms
== 0 || r_symndx
< symtab_hdr
->sh_info
)
5014 h
= sym_hashes
[r_symndx
- symtab_hdr
->sh_info
];
5015 while (h
->root
.type
== bfd_link_hash_indirect
5016 || h
->root
.type
== bfd_link_hash_warning
)
5017 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
5020 /* Could be done earlier, if h were already available. */
5021 r_type
= aarch64_tls_transition (abfd
, info
, r_type
, h
, r_symndx
);
5025 case R_AARCH64_ABS64
:
5027 /* We don't need to handle relocs into sections not going into
5028 the "real" output. */
5029 if ((sec
->flags
& SEC_ALLOC
) == 0)
5037 h
->plt
.refcount
+= 1;
5038 h
->pointer_equality_needed
= 1;
5041 /* No need to do anything if we're not creating a shared
5047 struct elf_dyn_relocs
*p
;
5048 struct elf_dyn_relocs
**head
;
5050 /* We must copy these reloc types into the output file.
5051 Create a reloc section in dynobj and make room for
5055 if (htab
->root
.dynobj
== NULL
)
5056 htab
->root
.dynobj
= abfd
;
5058 sreloc
= _bfd_elf_make_dynamic_reloc_section
5059 (sec
, htab
->root
.dynobj
, 3, abfd
, /*rela? */ TRUE
);
5065 /* If this is a global symbol, we count the number of
5066 relocations we need for this symbol. */
5069 struct elf64_aarch64_link_hash_entry
*eh
;
5070 eh
= (struct elf64_aarch64_link_hash_entry
*) h
;
5071 head
= &eh
->dyn_relocs
;
5075 /* Track dynamic relocs needed for local syms too.
5076 We really need local syms available to do this
5081 Elf_Internal_Sym
*isym
;
5083 isym
= bfd_sym_from_r_symndx (&htab
->sym_cache
,
5088 s
= bfd_section_from_elf_index (abfd
, isym
->st_shndx
);
5092 /* Beware of type punned pointers vs strict aliasing
5094 vpp
= &(elf_section_data (s
)->local_dynrel
);
5095 head
= (struct elf_dyn_relocs
**) vpp
;
5099 if (p
== NULL
|| p
->sec
!= sec
)
5101 bfd_size_type amt
= sizeof *p
;
5102 p
= ((struct elf_dyn_relocs
*)
5103 bfd_zalloc (htab
->root
.dynobj
, amt
));
5116 /* RR: We probably want to keep a consistency check that
5117 there are no dangling GOT_PAGE relocs. */
5118 case R_AARCH64_LD64_GOT_LO12_NC
:
5119 case R_AARCH64_ADR_GOT_PAGE
:
5120 case R_AARCH64_TLSGD_ADR_PAGE21
:
5121 case R_AARCH64_TLSGD_ADD_LO12_NC
:
5122 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21
:
5123 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC
:
5124 case R_AARCH64_TLSLE_ADD_TPREL_LO12
:
5125 case R_AARCH64_TLSLE_ADD_TPREL_HI12
:
5126 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC
:
5127 case R_AARCH64_TLSLE_MOVW_TPREL_G2
:
5128 case R_AARCH64_TLSLE_MOVW_TPREL_G1
:
5129 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC
:
5130 case R_AARCH64_TLSLE_MOVW_TPREL_G0
:
5131 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC
:
5132 case R_AARCH64_TLSDESC_ADR_PAGE
:
5133 case R_AARCH64_TLSDESC_ADD_LO12_NC
:
5134 case R_AARCH64_TLSDESC_LD64_LO12_NC
:
5137 unsigned old_got_type
;
5139 got_type
= aarch64_reloc_got_type (r_type
);
5143 h
->got
.refcount
+= 1;
5144 old_got_type
= elf64_aarch64_hash_entry (h
)->got_type
;
5148 struct elf_aarch64_local_symbol
*locals
;
5150 if (!elf64_aarch64_allocate_local_symbols
5151 (abfd
, symtab_hdr
->sh_info
))
5154 locals
= elf64_aarch64_locals (abfd
);
5155 BFD_ASSERT (r_symndx
< symtab_hdr
->sh_info
);
5156 locals
[r_symndx
].got_refcount
+= 1;
5157 old_got_type
= locals
[r_symndx
].got_type
;
5160 /* If a variable is accessed with both general dynamic TLS
5161 methods, two slots may be created. */
5162 if (GOT_TLS_GD_ANY_P (old_got_type
) && GOT_TLS_GD_ANY_P (got_type
))
5163 got_type
|= old_got_type
;
5165 /* We will already have issued an error message if there
5166 is a TLS/non-TLS mismatch, based on the symbol type.
5167 So just combine any TLS types needed. */
5168 if (old_got_type
!= GOT_UNKNOWN
&& old_got_type
!= GOT_NORMAL
5169 && got_type
!= GOT_NORMAL
)
5170 got_type
|= old_got_type
;
5172 /* If the symbol is accessed by both IE and GD methods, we
5173 are able to relax. Turn off the GD flag, without
5174 messing up with any other kind of TLS types that may be
5176 if ((got_type
& GOT_TLS_IE
) && GOT_TLS_GD_ANY_P (got_type
))
5177 got_type
&= ~ (GOT_TLSDESC_GD
| GOT_TLS_GD
);
5179 if (old_got_type
!= got_type
)
5182 elf64_aarch64_hash_entry (h
)->got_type
= got_type
;
5185 struct elf_aarch64_local_symbol
*locals
;
5186 locals
= elf64_aarch64_locals (abfd
);
5187 BFD_ASSERT (r_symndx
< symtab_hdr
->sh_info
);
5188 locals
[r_symndx
].got_type
= got_type
;
5192 if (htab
->root
.sgot
== NULL
)
5194 if (htab
->root
.dynobj
== NULL
)
5195 htab
->root
.dynobj
= abfd
;
5196 if (!_bfd_elf_create_got_section (htab
->root
.dynobj
, info
))
5202 case R_AARCH64_ADR_PREL_PG_HI21_NC
:
5203 case R_AARCH64_ADR_PREL_PG_HI21
:
5204 if (h
!= NULL
&& info
->executable
)
5206 /* If this reloc is in a read-only section, we might
5207 need a copy reloc. We can't check reliably at this
5208 stage whether the section is read-only, as input
5209 sections have not yet been mapped to output sections.
5210 Tentatively set the flag for now, and correct in
5211 adjust_dynamic_symbol. */
5213 h
->plt
.refcount
+= 1;
5214 h
->pointer_equality_needed
= 1;
5216 /* FIXME:: RR need to handle these in shared libraries
5217 and essentially bomb out as these being non-PIC
5218 relocations in shared libraries. */
5221 case R_AARCH64_CALL26
:
5222 case R_AARCH64_JUMP26
:
5223 /* If this is a local symbol then we resolve it
5224 directly without creating a PLT entry. */
5229 h
->plt
.refcount
+= 1;
5236 /* Treat mapping symbols as special target symbols. */
5239 elf64_aarch64_is_target_special_symbol (bfd
*abfd ATTRIBUTE_UNUSED
,
5242 return bfd_is_aarch64_special_symbol_name (sym
->name
,
5243 BFD_AARCH64_SPECIAL_SYM_TYPE_ANY
);
5246 /* This is a copy of elf_find_function () from elf.c except that
5247 AArch64 mapping symbols are ignored when looking for function names. */
5250 aarch64_elf_find_function (bfd
*abfd ATTRIBUTE_UNUSED
,
5254 const char **filename_ptr
,
5255 const char **functionname_ptr
)
5257 const char *filename
= NULL
;
5258 asymbol
*func
= NULL
;
5259 bfd_vma low_func
= 0;
5262 for (p
= symbols
; *p
!= NULL
; p
++)
5266 q
= (elf_symbol_type
*) * p
;
5268 switch (ELF_ST_TYPE (q
->internal_elf_sym
.st_info
))
5273 filename
= bfd_asymbol_name (&q
->symbol
);
5277 /* Skip mapping symbols. */
5278 if ((q
->symbol
.flags
& BSF_LOCAL
)
5279 && (bfd_is_aarch64_special_symbol_name
5280 (q
->symbol
.name
, BFD_AARCH64_SPECIAL_SYM_TYPE_ANY
)))
5283 if (bfd_get_section (&q
->symbol
) == section
5284 && q
->symbol
.value
>= low_func
&& q
->symbol
.value
<= offset
)
5286 func
= (asymbol
*) q
;
5287 low_func
= q
->symbol
.value
;
5297 *filename_ptr
= filename
;
5298 if (functionname_ptr
)
5299 *functionname_ptr
= bfd_asymbol_name (func
);
5305 /* Find the nearest line to a particular section and offset, for error
5306 reporting. This code is a duplicate of the code in elf.c, except
5307 that it uses aarch64_elf_find_function. */
5310 elf64_aarch64_find_nearest_line (bfd
*abfd
,
5314 const char **filename_ptr
,
5315 const char **functionname_ptr
,
5316 unsigned int *line_ptr
)
5318 bfd_boolean found
= FALSE
;
5320 /* We skip _bfd_dwarf1_find_nearest_line since no known AArch64
5321 toolchain uses it. */
5323 if (_bfd_dwarf2_find_nearest_line (abfd
, dwarf_debug_sections
,
5324 section
, symbols
, offset
,
5325 filename_ptr
, functionname_ptr
,
5327 &elf_tdata (abfd
)->dwarf2_find_line_info
))
5329 if (!*functionname_ptr
)
5330 aarch64_elf_find_function (abfd
, section
, symbols
, offset
,
5331 *filename_ptr
? NULL
: filename_ptr
,
5337 if (!_bfd_stab_section_find_nearest_line (abfd
, symbols
, section
, offset
,
5338 &found
, filename_ptr
,
5339 functionname_ptr
, line_ptr
,
5340 &elf_tdata (abfd
)->line_info
))
5343 if (found
&& (*functionname_ptr
|| *line_ptr
))
5346 if (symbols
== NULL
)
5349 if (!aarch64_elf_find_function (abfd
, section
, symbols
, offset
,
5350 filename_ptr
, functionname_ptr
))
5358 elf64_aarch64_find_inliner_info (bfd
*abfd
,
5359 const char **filename_ptr
,
5360 const char **functionname_ptr
,
5361 unsigned int *line_ptr
)
5364 found
= _bfd_dwarf2_find_inliner_info
5365 (abfd
, filename_ptr
,
5366 functionname_ptr
, line_ptr
, &elf_tdata (abfd
)->dwarf2_find_line_info
);
5372 elf64_aarch64_post_process_headers (bfd
*abfd
,
5373 struct bfd_link_info
*link_info
5376 Elf_Internal_Ehdr
*i_ehdrp
; /* ELF file header, internal form. */
5378 i_ehdrp
= elf_elfheader (abfd
);
5379 i_ehdrp
->e_ident
[EI_OSABI
] = 0;
5380 i_ehdrp
->e_ident
[EI_ABIVERSION
] = AARCH64_ELF_ABI_VERSION
;
5383 static enum elf_reloc_type_class
5384 elf64_aarch64_reloc_type_class (const Elf_Internal_Rela
*rela
)
5386 switch ((int) ELF64_R_TYPE (rela
->r_info
))
5388 case R_AARCH64_RELATIVE
:
5389 return reloc_class_relative
;
5390 case R_AARCH64_JUMP_SLOT
:
5391 return reloc_class_plt
;
5392 case R_AARCH64_COPY
:
5393 return reloc_class_copy
;
5395 return reloc_class_normal
;
5399 /* Set the right machine number for an AArch64 ELF file. */
5402 elf64_aarch64_section_flags (flagword
*flags
, const Elf_Internal_Shdr
*hdr
)
5404 if (hdr
->sh_type
== SHT_NOTE
)
5405 *flags
|= SEC_LINK_ONCE
| SEC_LINK_DUPLICATES_SAME_CONTENTS
;
5410 /* Handle an AArch64 specific section when reading an object file. This is
5411 called when bfd_section_from_shdr finds a section with an unknown
5415 elf64_aarch64_section_from_shdr (bfd
*abfd
,
5416 Elf_Internal_Shdr
*hdr
,
5417 const char *name
, int shindex
)
5419 /* There ought to be a place to keep ELF backend specific flags, but
5420 at the moment there isn't one. We just keep track of the
5421 sections by their name, instead. Fortunately, the ABI gives
5422 names for all the AArch64 specific sections, so we will probably get
5424 switch (hdr
->sh_type
)
5426 case SHT_AARCH64_ATTRIBUTES
:
5433 if (!_bfd_elf_make_section_from_shdr (abfd
, hdr
, name
, shindex
))
5439 /* A structure used to record a list of sections, independently
5440 of the next and prev fields in the asection structure. */
5441 typedef struct section_list
5444 struct section_list
*next
;
5445 struct section_list
*prev
;
5449 /* Unfortunately we need to keep a list of sections for which
5450 an _aarch64_elf_section_data structure has been allocated. This
5451 is because it is possible for functions like elf64_aarch64_write_section
5452 to be called on a section which has had an elf_data_structure
5453 allocated for it (and so the used_by_bfd field is valid) but
5454 for which the AArch64 extended version of this structure - the
5455 _aarch64_elf_section_data structure - has not been allocated. */
5456 static section_list
*sections_with_aarch64_elf_section_data
= NULL
;
5459 record_section_with_aarch64_elf_section_data (asection
*sec
)
5461 struct section_list
*entry
;
5463 entry
= bfd_malloc (sizeof (*entry
));
5467 entry
->next
= sections_with_aarch64_elf_section_data
;
5469 if (entry
->next
!= NULL
)
5470 entry
->next
->prev
= entry
;
5471 sections_with_aarch64_elf_section_data
= entry
;
5474 static struct section_list
*
5475 find_aarch64_elf_section_entry (asection
*sec
)
5477 struct section_list
*entry
;
5478 static struct section_list
*last_entry
= NULL
;
5480 /* This is a short cut for the typical case where the sections are added
5481 to the sections_with_aarch64_elf_section_data list in forward order and
5482 then looked up here in backwards order. This makes a real difference
5483 to the ld-srec/sec64k.exp linker test. */
5484 entry
= sections_with_aarch64_elf_section_data
;
5485 if (last_entry
!= NULL
)
5487 if (last_entry
->sec
== sec
)
5489 else if (last_entry
->next
!= NULL
&& last_entry
->next
->sec
== sec
)
5490 entry
= last_entry
->next
;
5493 for (; entry
; entry
= entry
->next
)
5494 if (entry
->sec
== sec
)
5498 /* Record the entry prior to this one - it is the entry we are
5499 most likely to want to locate next time. Also this way if we
5500 have been called from
5501 unrecord_section_with_aarch64_elf_section_data () we will not
5502 be caching a pointer that is about to be freed. */
5503 last_entry
= entry
->prev
;
5509 unrecord_section_with_aarch64_elf_section_data (asection
*sec
)
5511 struct section_list
*entry
;
5513 entry
= find_aarch64_elf_section_entry (sec
);
5517 if (entry
->prev
!= NULL
)
5518 entry
->prev
->next
= entry
->next
;
5519 if (entry
->next
!= NULL
)
5520 entry
->next
->prev
= entry
->prev
;
5521 if (entry
== sections_with_aarch64_elf_section_data
)
5522 sections_with_aarch64_elf_section_data
= entry
->next
;
5531 struct bfd_link_info
*info
;
5534 int (*func
) (void *, const char *, Elf_Internal_Sym
*,
5535 asection
*, struct elf_link_hash_entry
*);
5536 } output_arch_syminfo
;
5538 enum map_symbol_type
5545 /* Output a single mapping symbol. */
5548 elf64_aarch64_output_map_sym (output_arch_syminfo
*osi
,
5549 enum map_symbol_type type
, bfd_vma offset
)
5551 static const char *names
[2] = { "$x", "$d" };
5552 Elf_Internal_Sym sym
;
5554 sym
.st_value
= (osi
->sec
->output_section
->vma
5555 + osi
->sec
->output_offset
+ offset
);
5558 sym
.st_info
= ELF_ST_INFO (STB_LOCAL
, STT_NOTYPE
);
5559 sym
.st_shndx
= osi
->sec_shndx
;
5560 return osi
->func (osi
->finfo
, names
[type
], &sym
, osi
->sec
, NULL
) == 1;
5565 /* Output mapping symbols for PLT entries associated with H. */
5568 elf64_aarch64_output_plt_map (struct elf_link_hash_entry
*h
, void *inf
)
5570 output_arch_syminfo
*osi
= (output_arch_syminfo
*) inf
;
5573 if (h
->root
.type
== bfd_link_hash_indirect
)
5576 if (h
->root
.type
== bfd_link_hash_warning
)
5577 /* When warning symbols are created, they **replace** the "real"
5578 entry in the hash table, thus we never get to see the real
5579 symbol in a hash traversal. So look at it now. */
5580 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
5582 if (h
->plt
.offset
== (bfd_vma
) - 1)
5585 addr
= h
->plt
.offset
;
5588 if (!elf64_aarch64_output_map_sym (osi
, AARCH64_MAP_INSN
, addr
))
5595 /* Output a single local symbol for a generated stub. */
5598 elf64_aarch64_output_stub_sym (output_arch_syminfo
*osi
, const char *name
,
5599 bfd_vma offset
, bfd_vma size
)
5601 Elf_Internal_Sym sym
;
5603 sym
.st_value
= (osi
->sec
->output_section
->vma
5604 + osi
->sec
->output_offset
+ offset
);
5607 sym
.st_info
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
5608 sym
.st_shndx
= osi
->sec_shndx
;
5609 return osi
->func (osi
->finfo
, name
, &sym
, osi
->sec
, NULL
) == 1;
5613 aarch64_map_one_stub (struct bfd_hash_entry
*gen_entry
, void *in_arg
)
5615 struct elf64_aarch64_stub_hash_entry
*stub_entry
;
5619 output_arch_syminfo
*osi
;
5621 /* Massage our args to the form they really have. */
5622 stub_entry
= (struct elf64_aarch64_stub_hash_entry
*) gen_entry
;
5623 osi
= (output_arch_syminfo
*) in_arg
;
5625 stub_sec
= stub_entry
->stub_sec
;
5627 /* Ensure this stub is attached to the current section being
5629 if (stub_sec
!= osi
->sec
)
5632 addr
= (bfd_vma
) stub_entry
->stub_offset
;
5634 stub_name
= stub_entry
->output_name
;
5636 switch (stub_entry
->stub_type
)
5638 case aarch64_stub_adrp_branch
:
5639 if (!elf64_aarch64_output_stub_sym (osi
, stub_name
, addr
,
5640 sizeof (aarch64_adrp_branch_stub
)))
5642 if (!elf64_aarch64_output_map_sym (osi
, AARCH64_MAP_INSN
, addr
))
5645 case aarch64_stub_long_branch
:
5646 if (!elf64_aarch64_output_stub_sym
5647 (osi
, stub_name
, addr
, sizeof (aarch64_long_branch_stub
)))
5649 if (!elf64_aarch64_output_map_sym (osi
, AARCH64_MAP_INSN
, addr
))
5651 if (!elf64_aarch64_output_map_sym (osi
, AARCH64_MAP_DATA
, addr
+ 16))
5661 /* Output mapping symbols for linker generated sections. */
5664 elf64_aarch64_output_arch_local_syms (bfd
*output_bfd
,
5665 struct bfd_link_info
*info
,
5667 int (*func
) (void *, const char *,
5670 struct elf_link_hash_entry
5673 output_arch_syminfo osi
;
5674 struct elf64_aarch64_link_hash_table
*htab
;
5676 htab
= elf64_aarch64_hash_table (info
);
5682 /* Long calls stubs. */
5683 if (htab
->stub_bfd
&& htab
->stub_bfd
->sections
)
5687 for (stub_sec
= htab
->stub_bfd
->sections
;
5688 stub_sec
!= NULL
; stub_sec
= stub_sec
->next
)
5690 /* Ignore non-stub sections. */
5691 if (!strstr (stub_sec
->name
, STUB_SUFFIX
))
5696 osi
.sec_shndx
= _bfd_elf_section_from_bfd_section
5697 (output_bfd
, osi
.sec
->output_section
);
5699 bfd_hash_traverse (&htab
->stub_hash_table
, aarch64_map_one_stub
,
5704 /* Finally, output mapping symbols for the PLT. */
5705 if (!htab
->root
.splt
|| htab
->root
.splt
->size
== 0)
5708 /* For now live without mapping symbols for the plt. */
5709 osi
.sec_shndx
= _bfd_elf_section_from_bfd_section
5710 (output_bfd
, htab
->root
.splt
->output_section
);
5711 osi
.sec
= htab
->root
.splt
;
5713 elf_link_hash_traverse (&htab
->root
, elf64_aarch64_output_plt_map
,
5720 /* Allocate target specific section data. */
5723 elf64_aarch64_new_section_hook (bfd
*abfd
, asection
*sec
)
5725 if (!sec
->used_by_bfd
)
5727 _aarch64_elf_section_data
*sdata
;
5728 bfd_size_type amt
= sizeof (*sdata
);
5730 sdata
= bfd_zalloc (abfd
, amt
);
5733 sec
->used_by_bfd
= sdata
;
5736 record_section_with_aarch64_elf_section_data (sec
);
5738 return _bfd_elf_new_section_hook (abfd
, sec
);
5743 unrecord_section_via_map_over_sections (bfd
*abfd ATTRIBUTE_UNUSED
,
5745 void *ignore ATTRIBUTE_UNUSED
)
5747 unrecord_section_with_aarch64_elf_section_data (sec
);
5751 elf64_aarch64_close_and_cleanup (bfd
*abfd
)
5754 bfd_map_over_sections (abfd
,
5755 unrecord_section_via_map_over_sections
, NULL
);
5757 return _bfd_elf_close_and_cleanup (abfd
);
5761 elf64_aarch64_bfd_free_cached_info (bfd
*abfd
)
5764 bfd_map_over_sections (abfd
,
5765 unrecord_section_via_map_over_sections
, NULL
);
5767 return _bfd_free_cached_info (abfd
);
5771 elf64_aarch64_is_function_type (unsigned int type
)
5773 return type
== STT_FUNC
;
5776 /* Create dynamic sections. This is different from the ARM backend in that
5777 the got, plt, gotplt and their relocation sections are all created in the
5778 standard part of the bfd elf backend. */
5781 elf64_aarch64_create_dynamic_sections (bfd
*dynobj
,
5782 struct bfd_link_info
*info
)
5784 struct elf64_aarch64_link_hash_table
*htab
;
5785 struct elf_link_hash_entry
*h
;
5787 if (!_bfd_elf_create_dynamic_sections (dynobj
, info
))
5790 htab
= elf64_aarch64_hash_table (info
);
5791 htab
->sdynbss
= bfd_get_linker_section (dynobj
, ".dynbss");
5793 htab
->srelbss
= bfd_get_linker_section (dynobj
, ".rela.bss");
5795 if (!htab
->sdynbss
|| (!info
->shared
&& !htab
->srelbss
))
5798 /* Define the symbol _GLOBAL_OFFSET_TABLE_ at the start of the
5799 dynobj's .got section. We don't do this in the linker script
5800 because we don't want to define the symbol if we are not creating
5801 a global offset table. */
5802 h
= _bfd_elf_define_linkage_sym (dynobj
, info
,
5803 htab
->root
.sgot
, "_GLOBAL_OFFSET_TABLE_");
5804 elf_hash_table (info
)->hgot
= h
;
5812 /* Allocate space in .plt, .got and associated reloc sections for
5816 elf64_aarch64_allocate_dynrelocs (struct elf_link_hash_entry
*h
, void *inf
)
5818 struct bfd_link_info
*info
;
5819 struct elf64_aarch64_link_hash_table
*htab
;
5820 struct elf64_aarch64_link_hash_entry
*eh
;
5821 struct elf_dyn_relocs
*p
;
5823 /* An example of a bfd_link_hash_indirect symbol is versioned
5824 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
5825 -> __gxx_personality_v0(bfd_link_hash_defined)
5827 There is no need to process bfd_link_hash_indirect symbols here
5828 because we will also be presented with the concrete instance of
5829 the symbol and elf64_aarch64_copy_indirect_symbol () will have been
5830 called to copy all relevant data from the generic to the concrete
5833 if (h
->root
.type
== bfd_link_hash_indirect
)
5836 if (h
->root
.type
== bfd_link_hash_warning
)
5837 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
5839 info
= (struct bfd_link_info
*) inf
;
5840 htab
= elf64_aarch64_hash_table (info
);
5842 if (htab
->root
.dynamic_sections_created
&& h
->plt
.refcount
> 0)
5844 /* Make sure this symbol is output as a dynamic symbol.
5845 Undefined weak syms won't yet be marked as dynamic. */
5846 if (h
->dynindx
== -1 && !h
->forced_local
)
5848 if (!bfd_elf_link_record_dynamic_symbol (info
, h
))
5852 if (info
->shared
|| WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h
))
5854 asection
*s
= htab
->root
.splt
;
5856 /* If this is the first .plt entry, make room for the special
5859 s
->size
+= htab
->plt_header_size
;
5861 h
->plt
.offset
= s
->size
;
5863 /* If this symbol is not defined in a regular file, and we are
5864 not generating a shared library, then set the symbol to this
5865 location in the .plt. This is required to make function
5866 pointers compare as equal between the normal executable and
5867 the shared library. */
5868 if (!info
->shared
&& !h
->def_regular
)
5870 h
->root
.u
.def
.section
= s
;
5871 h
->root
.u
.def
.value
= h
->plt
.offset
;
5874 /* Make room for this entry. For now we only create the
5875 small model PLT entries. We later need to find a way
5876 of relaxing into these from the large model PLT entries. */
5877 s
->size
+= PLT_SMALL_ENTRY_SIZE
;
5879 /* We also need to make an entry in the .got.plt section, which
5880 will be placed in the .got section by the linker script. */
5881 htab
->root
.sgotplt
->size
+= GOT_ENTRY_SIZE
;
5883 /* We also need to make an entry in the .rela.plt section. */
5884 htab
->root
.srelplt
->size
+= RELOC_SIZE (htab
);
5886 /* We need to ensure that all GOT entries that serve the PLT
5887 are consecutive with the special GOT slots [0] [1] and
5888 [2]. Any addtional relocations, such as
5889 R_AARCH64_TLSDESC, must be placed after the PLT related
5890 entries. We abuse the reloc_count such that during
5891 sizing we adjust reloc_count to indicate the number of
5892 PLT related reserved entries. In subsequent phases when
5893 filling in the contents of the reloc entries, PLT related
5894 entries are placed by computing their PLT index (0
5895 .. reloc_count). While other none PLT relocs are placed
5896 at the slot indicated by reloc_count and reloc_count is
5899 htab
->root
.srelplt
->reloc_count
++;
5903 h
->plt
.offset
= (bfd_vma
) - 1;
5909 h
->plt
.offset
= (bfd_vma
) - 1;
5913 eh
= (struct elf64_aarch64_link_hash_entry
*) h
;
5914 eh
->tlsdesc_got_jump_table_offset
= (bfd_vma
) - 1;
5916 if (h
->got
.refcount
> 0)
5919 unsigned got_type
= elf64_aarch64_hash_entry (h
)->got_type
;
5921 h
->got
.offset
= (bfd_vma
) - 1;
5923 dyn
= htab
->root
.dynamic_sections_created
;
5925 /* Make sure this symbol is output as a dynamic symbol.
5926 Undefined weak syms won't yet be marked as dynamic. */
5927 if (dyn
&& h
->dynindx
== -1 && !h
->forced_local
)
5929 if (!bfd_elf_link_record_dynamic_symbol (info
, h
))
5933 if (got_type
== GOT_UNKNOWN
)
5936 else if (got_type
== GOT_NORMAL
)
5938 h
->got
.offset
= htab
->root
.sgot
->size
;
5939 htab
->root
.sgot
->size
+= GOT_ENTRY_SIZE
;
5940 if ((ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
5941 || h
->root
.type
!= bfd_link_hash_undefweak
)
5943 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn
, 0, h
)))
5945 htab
->root
.srelgot
->size
+= RELOC_SIZE (htab
);
5951 if (got_type
& GOT_TLSDESC_GD
)
5953 eh
->tlsdesc_got_jump_table_offset
=
5954 (htab
->root
.sgotplt
->size
5955 - aarch64_compute_jump_table_size (htab
));
5956 htab
->root
.sgotplt
->size
+= GOT_ENTRY_SIZE
* 2;
5957 h
->got
.offset
= (bfd_vma
) - 2;
5960 if (got_type
& GOT_TLS_GD
)
5962 h
->got
.offset
= htab
->root
.sgot
->size
;
5963 htab
->root
.sgot
->size
+= GOT_ENTRY_SIZE
* 2;
5966 if (got_type
& GOT_TLS_IE
)
5968 h
->got
.offset
= htab
->root
.sgot
->size
;
5969 htab
->root
.sgot
->size
+= GOT_ENTRY_SIZE
;
5972 indx
= h
&& h
->dynindx
!= -1 ? h
->dynindx
: 0;
5973 if ((ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
5974 || h
->root
.type
!= bfd_link_hash_undefweak
)
5977 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn
, 0, h
)))
5979 if (got_type
& GOT_TLSDESC_GD
)
5981 htab
->root
.srelplt
->size
+= RELOC_SIZE (htab
);
5982 /* Note reloc_count not incremented here! We have
5983 already adjusted reloc_count for this relocation
5986 /* TLSDESC PLT is now needed, but not yet determined. */
5987 htab
->tlsdesc_plt
= (bfd_vma
) - 1;
5990 if (got_type
& GOT_TLS_GD
)
5991 htab
->root
.srelgot
->size
+= RELOC_SIZE (htab
) * 2;
5993 if (got_type
& GOT_TLS_IE
)
5994 htab
->root
.srelgot
->size
+= RELOC_SIZE (htab
);
6000 h
->got
.offset
= (bfd_vma
) - 1;
6003 if (eh
->dyn_relocs
== NULL
)
6006 /* In the shared -Bsymbolic case, discard space allocated for
6007 dynamic pc-relative relocs against symbols which turn out to be
6008 defined in regular objects. For the normal shared case, discard
6009 space for pc-relative relocs that have become local due to symbol
6010 visibility changes. */
6014 /* Relocs that use pc_count are those that appear on a call
6015 insn, or certain REL relocs that can generated via assembly.
6016 We want calls to protected symbols to resolve directly to the
6017 function rather than going via the plt. If people want
6018 function pointer comparisons to work as expected then they
6019 should avoid writing weird assembly. */
6020 if (SYMBOL_CALLS_LOCAL (info
, h
))
6022 struct elf_dyn_relocs
**pp
;
6024 for (pp
= &eh
->dyn_relocs
; (p
= *pp
) != NULL
;)
6026 p
->count
-= p
->pc_count
;
6035 /* Also discard relocs on undefined weak syms with non-default
6037 if (eh
->dyn_relocs
!= NULL
&& h
->root
.type
== bfd_link_hash_undefweak
)
6039 if (ELF_ST_VISIBILITY (h
->other
) != STV_DEFAULT
)
6040 eh
->dyn_relocs
= NULL
;
6042 /* Make sure undefined weak symbols are output as a dynamic
6044 else if (h
->dynindx
== -1
6046 && !bfd_elf_link_record_dynamic_symbol (info
, h
))
6051 else if (ELIMINATE_COPY_RELOCS
)
6053 /* For the non-shared case, discard space for relocs against
6054 symbols which turn out to need copy relocs or are not
6060 || (htab
->root
.dynamic_sections_created
6061 && (h
->root
.type
== bfd_link_hash_undefweak
6062 || h
->root
.type
== bfd_link_hash_undefined
))))
6064 /* Make sure this symbol is output as a dynamic symbol.
6065 Undefined weak syms won't yet be marked as dynamic. */
6066 if (h
->dynindx
== -1
6068 && !bfd_elf_link_record_dynamic_symbol (info
, h
))
6071 /* If that succeeded, we know we'll be keeping all the
6073 if (h
->dynindx
!= -1)
6077 eh
->dyn_relocs
= NULL
;
6082 /* Finally, allocate space. */
6083 for (p
= eh
->dyn_relocs
; p
!= NULL
; p
= p
->next
)
6087 sreloc
= elf_section_data (p
->sec
)->sreloc
;
6089 BFD_ASSERT (sreloc
!= NULL
);
6091 sreloc
->size
+= p
->count
* RELOC_SIZE (htab
);
6100 /* This is the most important function of all . Innocuosly named
6103 elf64_aarch64_size_dynamic_sections (bfd
*output_bfd ATTRIBUTE_UNUSED
,
6104 struct bfd_link_info
*info
)
6106 struct elf64_aarch64_link_hash_table
*htab
;
6112 htab
= elf64_aarch64_hash_table ((info
));
6113 dynobj
= htab
->root
.dynobj
;
6115 BFD_ASSERT (dynobj
!= NULL
);
6117 if (htab
->root
.dynamic_sections_created
)
6119 if (info
->executable
)
6121 s
= bfd_get_linker_section (dynobj
, ".interp");
6124 s
->size
= sizeof ELF_DYNAMIC_INTERPRETER
;
6125 s
->contents
= (unsigned char *) ELF_DYNAMIC_INTERPRETER
;
6129 /* Set up .got offsets for local syms, and space for local dynamic
6131 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
6133 struct elf_aarch64_local_symbol
*locals
= NULL
;
6134 Elf_Internal_Shdr
*symtab_hdr
;
6138 if (!is_aarch64_elf (ibfd
))
6141 for (s
= ibfd
->sections
; s
!= NULL
; s
= s
->next
)
6143 struct elf_dyn_relocs
*p
;
6145 for (p
= (struct elf_dyn_relocs
*)
6146 (elf_section_data (s
)->local_dynrel
); p
!= NULL
; p
= p
->next
)
6148 if (!bfd_is_abs_section (p
->sec
)
6149 && bfd_is_abs_section (p
->sec
->output_section
))
6151 /* Input section has been discarded, either because
6152 it is a copy of a linkonce section or due to
6153 linker script /DISCARD/, so we'll be discarding
6156 else if (p
->count
!= 0)
6158 srel
= elf_section_data (p
->sec
)->sreloc
;
6159 srel
->size
+= p
->count
* RELOC_SIZE (htab
);
6160 if ((p
->sec
->output_section
->flags
& SEC_READONLY
) != 0)
6161 info
->flags
|= DF_TEXTREL
;
6166 locals
= elf64_aarch64_locals (ibfd
);
6170 symtab_hdr
= &elf_symtab_hdr (ibfd
);
6171 srel
= htab
->root
.srelgot
;
6172 for (i
= 0; i
< symtab_hdr
->sh_info
; i
++)
6174 locals
[i
].got_offset
= (bfd_vma
) - 1;
6175 locals
[i
].tlsdesc_got_jump_table_offset
= (bfd_vma
) - 1;
6176 if (locals
[i
].got_refcount
> 0)
6178 unsigned got_type
= locals
[i
].got_type
;
6179 if (got_type
& GOT_TLSDESC_GD
)
6181 locals
[i
].tlsdesc_got_jump_table_offset
=
6182 (htab
->root
.sgotplt
->size
6183 - aarch64_compute_jump_table_size (htab
));
6184 htab
->root
.sgotplt
->size
+= GOT_ENTRY_SIZE
* 2;
6185 locals
[i
].got_offset
= (bfd_vma
) - 2;
6188 if (got_type
& GOT_TLS_GD
)
6190 locals
[i
].got_offset
= htab
->root
.sgot
->size
;
6191 htab
->root
.sgot
->size
+= GOT_ENTRY_SIZE
* 2;
6194 if (got_type
& GOT_TLS_IE
)
6196 locals
[i
].got_offset
= htab
->root
.sgot
->size
;
6197 htab
->root
.sgot
->size
+= GOT_ENTRY_SIZE
;
6200 if (got_type
== GOT_UNKNOWN
)
6204 if (got_type
== GOT_NORMAL
)
6210 if (got_type
& GOT_TLSDESC_GD
)
6212 htab
->root
.srelplt
->size
+= RELOC_SIZE (htab
);
6213 /* Note RELOC_COUNT not incremented here! */
6214 htab
->tlsdesc_plt
= (bfd_vma
) - 1;
6217 if (got_type
& GOT_TLS_GD
)
6218 htab
->root
.srelgot
->size
+= RELOC_SIZE (htab
) * 2;
6220 if (got_type
& GOT_TLS_IE
)
6221 htab
->root
.srelgot
->size
+= RELOC_SIZE (htab
);
6226 locals
[i
].got_refcount
= (bfd_vma
) - 1;
6232 /* Allocate global sym .plt and .got entries, and space for global
6233 sym dynamic relocs. */
6234 elf_link_hash_traverse (&htab
->root
, elf64_aarch64_allocate_dynrelocs
,
6238 /* For every jump slot reserved in the sgotplt, reloc_count is
6239 incremented. However, when we reserve space for TLS descriptors,
6240 it's not incremented, so in order to compute the space reserved
6241 for them, it suffices to multiply the reloc count by the jump
6244 if (htab
->root
.srelplt
)
6245 htab
->sgotplt_jump_table_size
= aarch64_compute_jump_table_size (htab
);
6247 if (htab
->tlsdesc_plt
)
6249 if (htab
->root
.splt
->size
== 0)
6250 htab
->root
.splt
->size
+= PLT_ENTRY_SIZE
;
6252 htab
->tlsdesc_plt
= htab
->root
.splt
->size
;
6253 htab
->root
.splt
->size
+= PLT_TLSDESC_ENTRY_SIZE
;
6255 /* If we're not using lazy TLS relocations, don't generate the
6256 GOT entry required. */
6257 if (!(info
->flags
& DF_BIND_NOW
))
6259 htab
->dt_tlsdesc_got
= htab
->root
.sgot
->size
;
6260 htab
->root
.sgot
->size
+= GOT_ENTRY_SIZE
;
6264 /* We now have determined the sizes of the various dynamic sections.
6265 Allocate memory for them. */
6267 for (s
= dynobj
->sections
; s
!= NULL
; s
= s
->next
)
6269 if ((s
->flags
& SEC_LINKER_CREATED
) == 0)
6272 if (s
== htab
->root
.splt
6273 || s
== htab
->root
.sgot
6274 || s
== htab
->root
.sgotplt
6275 || s
== htab
->root
.iplt
6276 || s
== htab
->root
.igotplt
|| s
== htab
->sdynbss
)
6278 /* Strip this section if we don't need it; see the
6281 else if (CONST_STRNEQ (bfd_get_section_name (dynobj
, s
), ".rela"))
6283 if (s
->size
!= 0 && s
!= htab
->root
.srelplt
)
6286 /* We use the reloc_count field as a counter if we need
6287 to copy relocs into the output file. */
6288 if (s
!= htab
->root
.srelplt
)
6293 /* It's not one of our sections, so don't allocate space. */
6299 /* If we don't need this section, strip it from the
6300 output file. This is mostly to handle .rela.bss and
6301 .rela.plt. We must create both sections in
6302 create_dynamic_sections, because they must be created
6303 before the linker maps input sections to output
6304 sections. The linker does that before
6305 adjust_dynamic_symbol is called, and it is that
6306 function which decides whether anything needs to go
6307 into these sections. */
6309 s
->flags
|= SEC_EXCLUDE
;
6313 if ((s
->flags
& SEC_HAS_CONTENTS
) == 0)
6316 /* Allocate memory for the section contents. We use bfd_zalloc
6317 here in case unused entries are not reclaimed before the
6318 section's contents are written out. This should not happen,
6319 but this way if it does, we get a R_AARCH64_NONE reloc instead
6321 s
->contents
= (bfd_byte
*) bfd_zalloc (dynobj
, s
->size
);
6322 if (s
->contents
== NULL
)
6326 if (htab
->root
.dynamic_sections_created
)
6328 /* Add some entries to the .dynamic section. We fill in the
6329 values later, in elf64_aarch64_finish_dynamic_sections, but we
6330 must add the entries now so that we get the correct size for
6331 the .dynamic section. The DT_DEBUG entry is filled in by the
6332 dynamic linker and used by the debugger. */
6333 #define add_dynamic_entry(TAG, VAL) \
6334 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
6336 if (info
->executable
)
6338 if (!add_dynamic_entry (DT_DEBUG
, 0))
6342 if (htab
->root
.splt
->size
!= 0)
6344 if (!add_dynamic_entry (DT_PLTGOT
, 0)
6345 || !add_dynamic_entry (DT_PLTRELSZ
, 0)
6346 || !add_dynamic_entry (DT_PLTREL
, DT_RELA
)
6347 || !add_dynamic_entry (DT_JMPREL
, 0))
6350 if (htab
->tlsdesc_plt
6351 && (!add_dynamic_entry (DT_TLSDESC_PLT
, 0)
6352 || !add_dynamic_entry (DT_TLSDESC_GOT
, 0)))
6358 if (!add_dynamic_entry (DT_RELA
, 0)
6359 || !add_dynamic_entry (DT_RELASZ
, 0)
6360 || !add_dynamic_entry (DT_RELAENT
, RELOC_SIZE (htab
)))
6363 /* If any dynamic relocs apply to a read-only section,
6364 then we need a DT_TEXTREL entry. */
6365 if ((info
->flags
& DF_TEXTREL
) != 0)
6367 if (!add_dynamic_entry (DT_TEXTREL
, 0))
6372 #undef add_dynamic_entry
6380 elf64_aarch64_update_plt_entry (bfd
*output_bfd
,
6381 unsigned int r_type
,
6382 bfd_byte
*plt_entry
, bfd_vma value
)
6384 reloc_howto_type
*howto
;
6385 howto
= elf64_aarch64_howto_from_type (r_type
);
6386 bfd_elf_aarch64_put_addend (output_bfd
, plt_entry
, howto
, value
);
6390 elf64_aarch64_create_small_pltn_entry (struct elf_link_hash_entry
*h
,
6391 struct elf64_aarch64_link_hash_table
6392 *htab
, bfd
*output_bfd
)
6394 bfd_byte
*plt_entry
;
6397 bfd_vma gotplt_entry_address
;
6398 bfd_vma plt_entry_address
;
6399 Elf_Internal_Rela rela
;
6402 plt_index
= (h
->plt
.offset
- htab
->plt_header_size
) / htab
->plt_entry_size
;
6404 /* Offset in the GOT is PLT index plus got GOT headers(3)
6406 got_offset
= (plt_index
+ 3) * GOT_ENTRY_SIZE
;
6407 plt_entry
= htab
->root
.splt
->contents
+ h
->plt
.offset
;
6408 plt_entry_address
= htab
->root
.splt
->output_section
->vma
6409 + htab
->root
.splt
->output_section
->output_offset
+ h
->plt
.offset
;
6410 gotplt_entry_address
= htab
->root
.sgotplt
->output_section
->vma
+
6411 htab
->root
.sgotplt
->output_offset
+ got_offset
;
6413 /* Copy in the boiler-plate for the PLTn entry. */
6414 memcpy (plt_entry
, elf64_aarch64_small_plt_entry
, PLT_SMALL_ENTRY_SIZE
);
6416 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
6417 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
6418 elf64_aarch64_update_plt_entry (output_bfd
, R_AARCH64_ADR_PREL_PG_HI21
,
6420 PG (gotplt_entry_address
) -
6421 PG (plt_entry_address
));
6423 /* Fill in the lo12 bits for the load from the pltgot. */
6424 elf64_aarch64_update_plt_entry (output_bfd
, R_AARCH64_LDST64_ABS_LO12_NC
,
6426 PG_OFFSET (gotplt_entry_address
));
6428 /* Fill in the the lo12 bits for the add from the pltgot entry. */
6429 elf64_aarch64_update_plt_entry (output_bfd
, R_AARCH64_ADD_ABS_LO12_NC
,
6431 PG_OFFSET (gotplt_entry_address
));
6433 /* All the GOTPLT Entries are essentially initialized to PLT0. */
6434 bfd_put_64 (output_bfd
,
6435 (htab
->root
.splt
->output_section
->vma
6436 + htab
->root
.splt
->output_offset
),
6437 htab
->root
.sgotplt
->contents
+ got_offset
);
6439 /* Fill in the entry in the .rela.plt section. */
6440 rela
.r_offset
= gotplt_entry_address
;
6441 rela
.r_info
= ELF64_R_INFO (h
->dynindx
, R_AARCH64_JUMP_SLOT
);
6444 /* Compute the relocation entry to used based on PLT index and do
6445 not adjust reloc_count. The reloc_count has already been adjusted
6446 to account for this entry. */
6447 loc
= htab
->root
.srelplt
->contents
+ plt_index
* RELOC_SIZE (htab
);
6448 bfd_elf64_swap_reloca_out (output_bfd
, &rela
, loc
);
6451 /* Size sections even though they're not dynamic. We use it to setup
6452 _TLS_MODULE_BASE_, if needed. */
6455 elf64_aarch64_always_size_sections (bfd
*output_bfd
,
6456 struct bfd_link_info
*info
)
6460 if (info
->relocatable
)
6463 tls_sec
= elf_hash_table (info
)->tls_sec
;
6467 struct elf_link_hash_entry
*tlsbase
;
6469 tlsbase
= elf_link_hash_lookup (elf_hash_table (info
),
6470 "_TLS_MODULE_BASE_", TRUE
, TRUE
, FALSE
);
6474 struct bfd_link_hash_entry
*h
= NULL
;
6475 const struct elf_backend_data
*bed
=
6476 get_elf_backend_data (output_bfd
);
6478 if (!(_bfd_generic_link_add_one_symbol
6479 (info
, output_bfd
, "_TLS_MODULE_BASE_", BSF_LOCAL
,
6480 tls_sec
, 0, NULL
, FALSE
, bed
->collect
, &h
)))
6483 tlsbase
->type
= STT_TLS
;
6484 tlsbase
= (struct elf_link_hash_entry
*) h
;
6485 tlsbase
->def_regular
= 1;
6486 tlsbase
->other
= STV_HIDDEN
;
6487 (*bed
->elf_backend_hide_symbol
) (info
, tlsbase
, TRUE
);
6494 /* Finish up dynamic symbol handling. We set the contents of various
6495 dynamic sections here. */
6497 elf64_aarch64_finish_dynamic_symbol (bfd
*output_bfd
,
6498 struct bfd_link_info
*info
,
6499 struct elf_link_hash_entry
*h
,
6500 Elf_Internal_Sym
*sym
)
6502 struct elf64_aarch64_link_hash_table
*htab
;
6503 htab
= elf64_aarch64_hash_table (info
);
6505 if (h
->plt
.offset
!= (bfd_vma
) - 1)
6507 /* This symbol has an entry in the procedure linkage table. Set
6510 if (h
->dynindx
== -1
6511 || htab
->root
.splt
== NULL
6512 || htab
->root
.sgotplt
== NULL
|| htab
->root
.srelplt
== NULL
)
6515 elf64_aarch64_create_small_pltn_entry (h
, htab
, output_bfd
);
6516 if (!h
->def_regular
)
6518 /* Mark the symbol as undefined, rather than as defined in
6519 the .plt section. Leave the value alone. This is a clue
6520 for the dynamic linker, to make function pointer
6521 comparisons work between an application and shared
6523 sym
->st_shndx
= SHN_UNDEF
;
6527 if (h
->got
.offset
!= (bfd_vma
) - 1
6528 && elf64_aarch64_hash_entry (h
)->got_type
== GOT_NORMAL
)
6530 Elf_Internal_Rela rela
;
6533 /* This symbol has an entry in the global offset table. Set it
6535 if (htab
->root
.sgot
== NULL
|| htab
->root
.srelgot
== NULL
)
6538 rela
.r_offset
= (htab
->root
.sgot
->output_section
->vma
6539 + htab
->root
.sgot
->output_offset
6540 + (h
->got
.offset
& ~(bfd_vma
) 1));
6542 if (info
->shared
&& SYMBOL_REFERENCES_LOCAL (info
, h
))
6544 if (!h
->def_regular
)
6547 BFD_ASSERT ((h
->got
.offset
& 1) != 0);
6548 rela
.r_info
= ELF64_R_INFO (0, R_AARCH64_RELATIVE
);
6549 rela
.r_addend
= (h
->root
.u
.def
.value
6550 + h
->root
.u
.def
.section
->output_section
->vma
6551 + h
->root
.u
.def
.section
->output_offset
);
6555 BFD_ASSERT ((h
->got
.offset
& 1) == 0);
6556 bfd_put_64 (output_bfd
, (bfd_vma
) 0,
6557 htab
->root
.sgot
->contents
+ h
->got
.offset
);
6558 rela
.r_info
= ELF64_R_INFO (h
->dynindx
, R_AARCH64_GLOB_DAT
);
6562 loc
= htab
->root
.srelgot
->contents
;
6563 loc
+= htab
->root
.srelgot
->reloc_count
++ * RELOC_SIZE (htab
);
6564 bfd_elf64_swap_reloca_out (output_bfd
, &rela
, loc
);
6569 Elf_Internal_Rela rela
;
6572 /* This symbol needs a copy reloc. Set it up. */
6574 if (h
->dynindx
== -1
6575 || (h
->root
.type
!= bfd_link_hash_defined
6576 && h
->root
.type
!= bfd_link_hash_defweak
)
6577 || htab
->srelbss
== NULL
)
6580 rela
.r_offset
= (h
->root
.u
.def
.value
6581 + h
->root
.u
.def
.section
->output_section
->vma
6582 + h
->root
.u
.def
.section
->output_offset
);
6583 rela
.r_info
= ELF64_R_INFO (h
->dynindx
, R_AARCH64_COPY
);
6585 loc
= htab
->srelbss
->contents
;
6586 loc
+= htab
->srelbss
->reloc_count
++ * RELOC_SIZE (htab
);
6587 bfd_elf64_swap_reloca_out (output_bfd
, &rela
, loc
);
6590 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. SYM may
6591 be NULL for local symbols. */
6593 && (strcmp (h
->root
.root
.string
, "_DYNAMIC") == 0
6594 || h
== elf_hash_table (info
)->hgot
))
6595 sym
->st_shndx
= SHN_ABS
;
6601 elf64_aarch64_init_small_plt0_entry (bfd
*output_bfd ATTRIBUTE_UNUSED
,
6602 struct elf64_aarch64_link_hash_table
6605 /* Fill in PLT0. Fixme:RR Note this doesn't distinguish between
6606 small and large plts and at the minute just generates
6609 /* PLT0 of the small PLT looks like this -
6610 stp x16, x30, [sp, #-16]! // Save the reloc and lr on stack.
6611 adrp x16, PLT_GOT + 16 // Get the page base of the GOTPLT
6612 ldr x17, [x16, #:lo12:PLT_GOT+16] // Load the address of the
6614 add x16, x16, #:lo12:PLT_GOT+16 // Load the lo12 bits of the
6615 // GOTPLT entry for this.
6618 bfd_vma plt_got_base
;
6622 memcpy (htab
->root
.splt
->contents
, elf64_aarch64_small_plt0_entry
,
6624 elf_section_data (htab
->root
.splt
->output_section
)->this_hdr
.sh_entsize
=
6627 plt_got_base
= (htab
->root
.sgotplt
->output_section
->vma
6628 + htab
->root
.sgotplt
->output_offset
);
6630 plt_base
= htab
->root
.splt
->output_section
->vma
+
6631 htab
->root
.splt
->output_section
->output_offset
;
6633 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
6634 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
6635 elf64_aarch64_update_plt_entry (output_bfd
, R_AARCH64_ADR_PREL_PG_HI21
,
6636 htab
->root
.splt
->contents
+ 4,
6637 PG (plt_got_base
+ 16) - PG (plt_base
+ 4));
6639 elf64_aarch64_update_plt_entry (output_bfd
, R_AARCH64_LDST64_ABS_LO12_NC
,
6640 htab
->root
.splt
->contents
+ 8,
6641 PG_OFFSET (plt_got_base
+ 16));
6643 elf64_aarch64_update_plt_entry (output_bfd
, R_AARCH64_ADD_ABS_LO12_NC
,
6644 htab
->root
.splt
->contents
+ 12,
6645 PG_OFFSET (plt_got_base
+ 16));
6649 elf64_aarch64_finish_dynamic_sections (bfd
*output_bfd
,
6650 struct bfd_link_info
*info
)
6652 struct elf64_aarch64_link_hash_table
*htab
;
6656 htab
= elf64_aarch64_hash_table (info
);
6657 dynobj
= htab
->root
.dynobj
;
6658 sdyn
= bfd_get_linker_section (dynobj
, ".dynamic");
6660 if (htab
->root
.dynamic_sections_created
)
6662 Elf64_External_Dyn
*dyncon
, *dynconend
;
6664 if (sdyn
== NULL
|| htab
->root
.sgot
== NULL
)
6667 dyncon
= (Elf64_External_Dyn
*) sdyn
->contents
;
6668 dynconend
= (Elf64_External_Dyn
*) (sdyn
->contents
+ sdyn
->size
);
6669 for (; dyncon
< dynconend
; dyncon
++)
6671 Elf_Internal_Dyn dyn
;
6674 bfd_elf64_swap_dyn_in (dynobj
, dyncon
, &dyn
);
6682 s
= htab
->root
.sgotplt
;
6683 dyn
.d_un
.d_ptr
= s
->output_section
->vma
+ s
->output_offset
;
6687 dyn
.d_un
.d_ptr
= htab
->root
.srelplt
->output_section
->vma
;
6691 s
= htab
->root
.srelplt
->output_section
;
6692 dyn
.d_un
.d_val
= s
->size
;
6696 /* The procedure linkage table relocs (DT_JMPREL) should
6697 not be included in the overall relocs (DT_RELA).
6698 Therefore, we override the DT_RELASZ entry here to
6699 make it not include the JMPREL relocs. Since the
6700 linker script arranges for .rela.plt to follow all
6701 other relocation sections, we don't have to worry
6702 about changing the DT_RELA entry. */
6703 if (htab
->root
.srelplt
!= NULL
)
6705 s
= htab
->root
.srelplt
->output_section
;
6706 dyn
.d_un
.d_val
-= s
->size
;
6710 case DT_TLSDESC_PLT
:
6711 s
= htab
->root
.splt
;
6712 dyn
.d_un
.d_ptr
= s
->output_section
->vma
+ s
->output_offset
6713 + htab
->tlsdesc_plt
;
6716 case DT_TLSDESC_GOT
:
6717 s
= htab
->root
.sgot
;
6718 dyn
.d_un
.d_ptr
= s
->output_section
->vma
+ s
->output_offset
6719 + htab
->dt_tlsdesc_got
;
6723 bfd_elf64_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
6728 /* Fill in the special first entry in the procedure linkage table. */
6729 if (htab
->root
.splt
&& htab
->root
.splt
->size
> 0)
6731 elf64_aarch64_init_small_plt0_entry (output_bfd
, htab
);
6733 elf_section_data (htab
->root
.splt
->output_section
)->
6734 this_hdr
.sh_entsize
= htab
->plt_entry_size
;
6737 if (htab
->tlsdesc_plt
)
6739 bfd_put_64 (output_bfd
, (bfd_vma
) 0,
6740 htab
->root
.sgot
->contents
+ htab
->dt_tlsdesc_got
);
6742 memcpy (htab
->root
.splt
->contents
+ htab
->tlsdesc_plt
,
6743 elf64_aarch64_tlsdesc_small_plt_entry
,
6744 sizeof (elf64_aarch64_tlsdesc_small_plt_entry
));
6747 bfd_vma adrp1_addr
=
6748 htab
->root
.splt
->output_section
->vma
6749 + htab
->root
.splt
->output_offset
+ htab
->tlsdesc_plt
+ 4;
6751 bfd_vma adrp2_addr
=
6752 htab
->root
.splt
->output_section
->vma
6753 + htab
->root
.splt
->output_offset
+ htab
->tlsdesc_plt
+ 8;
6756 htab
->root
.sgot
->output_section
->vma
6757 + htab
->root
.sgot
->output_offset
;
6759 bfd_vma pltgot_addr
=
6760 htab
->root
.sgotplt
->output_section
->vma
6761 + htab
->root
.sgotplt
->output_offset
;
6763 bfd_vma dt_tlsdesc_got
= got_addr
+ htab
->dt_tlsdesc_got
;
6766 /* adrp x2, DT_TLSDESC_GOT */
6767 opcode
= bfd_get_32 (output_bfd
,
6768 htab
->root
.splt
->contents
6769 + htab
->tlsdesc_plt
+ 4);
6770 opcode
= reencode_adr_imm
6771 (opcode
, (PG (dt_tlsdesc_got
) - PG (adrp1_addr
)) >> 12);
6772 bfd_put_32 (output_bfd
, opcode
,
6773 htab
->root
.splt
->contents
+ htab
->tlsdesc_plt
+ 4);
6776 opcode
= bfd_get_32 (output_bfd
,
6777 htab
->root
.splt
->contents
6778 + htab
->tlsdesc_plt
+ 8);
6779 opcode
= reencode_adr_imm
6780 (opcode
, (PG (pltgot_addr
) - PG (adrp2_addr
)) >> 12);
6781 bfd_put_32 (output_bfd
, opcode
,
6782 htab
->root
.splt
->contents
+ htab
->tlsdesc_plt
+ 8);
6784 /* ldr x2, [x2, #0] */
6785 opcode
= bfd_get_32 (output_bfd
,
6786 htab
->root
.splt
->contents
6787 + htab
->tlsdesc_plt
+ 12);
6788 opcode
= reencode_ldst_pos_imm (opcode
,
6789 PG_OFFSET (dt_tlsdesc_got
) >> 3);
6790 bfd_put_32 (output_bfd
, opcode
,
6791 htab
->root
.splt
->contents
+ htab
->tlsdesc_plt
+ 12);
6794 opcode
= bfd_get_32 (output_bfd
,
6795 htab
->root
.splt
->contents
6796 + htab
->tlsdesc_plt
+ 16);
6797 opcode
= reencode_add_imm (opcode
, PG_OFFSET (pltgot_addr
));
6798 bfd_put_32 (output_bfd
, opcode
,
6799 htab
->root
.splt
->contents
+ htab
->tlsdesc_plt
+ 16);
6804 if (htab
->root
.sgotplt
)
6806 if (bfd_is_abs_section (htab
->root
.sgotplt
->output_section
))
6808 (*_bfd_error_handler
)
6809 (_("discarded output section: `%A'"), htab
->root
.sgotplt
);
6813 /* Fill in the first three entries in the global offset table. */
6814 if (htab
->root
.sgotplt
->size
> 0)
6816 /* Set the first entry in the global offset table to the address of
6817 the dynamic section. */
6819 bfd_put_64 (output_bfd
, (bfd_vma
) 0,
6820 htab
->root
.sgotplt
->contents
);
6822 bfd_put_64 (output_bfd
,
6823 sdyn
->output_section
->vma
+ sdyn
->output_offset
,
6824 htab
->root
.sgotplt
->contents
);
6825 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
6826 bfd_put_64 (output_bfd
,
6828 htab
->root
.sgotplt
->contents
+ GOT_ENTRY_SIZE
);
6829 bfd_put_64 (output_bfd
,
6831 htab
->root
.sgotplt
->contents
+ GOT_ENTRY_SIZE
* 2);
6834 elf_section_data (htab
->root
.sgotplt
->output_section
)->
6835 this_hdr
.sh_entsize
= GOT_ENTRY_SIZE
;
6838 if (htab
->root
.sgot
&& htab
->root
.sgot
->size
> 0)
6839 elf_section_data (htab
->root
.sgot
->output_section
)->this_hdr
.sh_entsize
6845 /* Return address for Ith PLT stub in section PLT, for relocation REL
6846 or (bfd_vma) -1 if it should not be included. */
6849 elf64_aarch64_plt_sym_val (bfd_vma i
, const asection
*plt
,
6850 const arelent
*rel ATTRIBUTE_UNUSED
)
6852 return plt
->vma
+ PLT_ENTRY_SIZE
+ i
* PLT_SMALL_ENTRY_SIZE
;
6856 /* We use this so we can override certain functions
6857 (though currently we don't). */
6859 const struct elf_size_info elf64_aarch64_size_info
=
6861 sizeof (Elf64_External_Ehdr
),
6862 sizeof (Elf64_External_Phdr
),
6863 sizeof (Elf64_External_Shdr
),
6864 sizeof (Elf64_External_Rel
),
6865 sizeof (Elf64_External_Rela
),
6866 sizeof (Elf64_External_Sym
),
6867 sizeof (Elf64_External_Dyn
),
6868 sizeof (Elf_External_Note
),
6869 4, /* Hash table entry size. */
6870 1, /* Internal relocs per external relocs. */
6871 64, /* Arch size. */
6872 3, /* Log_file_align. */
6873 ELFCLASS64
, EV_CURRENT
,
6874 bfd_elf64_write_out_phdrs
,
6875 bfd_elf64_write_shdrs_and_ehdr
,
6876 bfd_elf64_checksum_contents
,
6877 bfd_elf64_write_relocs
,
6878 bfd_elf64_swap_symbol_in
,
6879 bfd_elf64_swap_symbol_out
,
6880 bfd_elf64_slurp_reloc_table
,
6881 bfd_elf64_slurp_symbol_table
,
6882 bfd_elf64_swap_dyn_in
,
6883 bfd_elf64_swap_dyn_out
,
6884 bfd_elf64_swap_reloc_in
,
6885 bfd_elf64_swap_reloc_out
,
6886 bfd_elf64_swap_reloca_in
,
6887 bfd_elf64_swap_reloca_out
6890 #define ELF_ARCH bfd_arch_aarch64
6891 #define ELF_MACHINE_CODE EM_AARCH64
6892 #define ELF_MAXPAGESIZE 0x10000
6893 #define ELF_MINPAGESIZE 0x1000
6894 #define ELF_COMMONPAGESIZE 0x1000
6896 #define bfd_elf64_close_and_cleanup \
6897 elf64_aarch64_close_and_cleanup
6899 #define bfd_elf64_bfd_copy_private_bfd_data \
6900 elf64_aarch64_copy_private_bfd_data
6902 #define bfd_elf64_bfd_free_cached_info \
6903 elf64_aarch64_bfd_free_cached_info
6905 #define bfd_elf64_bfd_is_target_special_symbol \
6906 elf64_aarch64_is_target_special_symbol
6908 #define bfd_elf64_bfd_link_hash_table_create \
6909 elf64_aarch64_link_hash_table_create
6911 #define bfd_elf64_bfd_link_hash_table_free \
6912 elf64_aarch64_hash_table_free
6914 #define bfd_elf64_bfd_merge_private_bfd_data \
6915 elf64_aarch64_merge_private_bfd_data
6917 #define bfd_elf64_bfd_print_private_bfd_data \
6918 elf64_aarch64_print_private_bfd_data
6920 #define bfd_elf64_bfd_reloc_type_lookup \
6921 elf64_aarch64_reloc_type_lookup
6923 #define bfd_elf64_bfd_reloc_name_lookup \
6924 elf64_aarch64_reloc_name_lookup
6926 #define bfd_elf64_bfd_set_private_flags \
6927 elf64_aarch64_set_private_flags
6929 #define bfd_elf64_find_inliner_info \
6930 elf64_aarch64_find_inliner_info
6932 #define bfd_elf64_find_nearest_line \
6933 elf64_aarch64_find_nearest_line
6935 #define bfd_elf64_mkobject \
6936 elf64_aarch64_mkobject
6938 #define bfd_elf64_new_section_hook \
6939 elf64_aarch64_new_section_hook
6941 #define elf_backend_adjust_dynamic_symbol \
6942 elf64_aarch64_adjust_dynamic_symbol
6944 #define elf_backend_always_size_sections \
6945 elf64_aarch64_always_size_sections
6947 #define elf_backend_check_relocs \
6948 elf64_aarch64_check_relocs
6950 #define elf_backend_copy_indirect_symbol \
6951 elf64_aarch64_copy_indirect_symbol
6953 /* Create .dynbss, and .rela.bss sections in DYNOBJ, and set up shortcuts
6954 to them in our hash. */
6955 #define elf_backend_create_dynamic_sections \
6956 elf64_aarch64_create_dynamic_sections
6958 #define elf_backend_init_index_section \
6959 _bfd_elf_init_2_index_sections
6961 #define elf_backend_is_function_type \
6962 elf64_aarch64_is_function_type
6964 #define elf_backend_finish_dynamic_sections \
6965 elf64_aarch64_finish_dynamic_sections
6967 #define elf_backend_finish_dynamic_symbol \
6968 elf64_aarch64_finish_dynamic_symbol
6970 #define elf_backend_gc_sweep_hook \
6971 elf64_aarch64_gc_sweep_hook
6973 #define elf_backend_object_p \
6974 elf64_aarch64_object_p
6976 #define elf_backend_output_arch_local_syms \
6977 elf64_aarch64_output_arch_local_syms
6979 #define elf_backend_plt_sym_val \
6980 elf64_aarch64_plt_sym_val
6982 #define elf_backend_post_process_headers \
6983 elf64_aarch64_post_process_headers
6985 #define elf_backend_relocate_section \
6986 elf64_aarch64_relocate_section
6988 #define elf_backend_reloc_type_class \
6989 elf64_aarch64_reloc_type_class
6991 #define elf_backend_section_flags \
6992 elf64_aarch64_section_flags
6994 #define elf_backend_section_from_shdr \
6995 elf64_aarch64_section_from_shdr
6997 #define elf_backend_size_dynamic_sections \
6998 elf64_aarch64_size_dynamic_sections
7000 #define elf_backend_size_info \
7001 elf64_aarch64_size_info
7003 #define elf_backend_can_refcount 1
7004 #define elf_backend_can_gc_sections 0
7005 #define elf_backend_plt_readonly 1
7006 #define elf_backend_want_got_plt 1
7007 #define elf_backend_want_plt_sym 0
7008 #define elf_backend_may_use_rel_p 0
7009 #define elf_backend_may_use_rela_p 1
7010 #define elf_backend_default_use_rela_p 1
7011 #define elf_backend_got_header_size (GOT_ENTRY_SIZE * 3)
7013 #undef elf_backend_obj_attrs_section
7014 #define elf_backend_obj_attrs_section ".ARM.attributes"
7016 #include "elf64-target.h"