1 /* ELF support for AArch64.
2 Copyright 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
21 /* Notes on implementation:
23 Thread Local Store (TLS)
27 The implementation currently supports both traditional TLS and TLS
28 descriptors, but only general dynamic (GD).
30 For traditional TLS the assembler will present us with code
31 fragments of the form:
34 R_AARCH64_TLSGD_ADR_PAGE21(foo)
35 add x0, :tlsgd_lo12:foo
36 R_AARCH64_TLSGD_ADD_LO12_NC(foo)
40 For TLS descriptors the assembler will present us with code
41 fragments of the form:
43 adrp x0, :tlsdesc:foo R_AARCH64_TLSDESC_ADR_PAGE(foo)
44 ldr x1, [x0, #:tlsdesc_lo12:foo] R_AARCH64_TLSDESC_LD64_LO12(foo)
45 add x0, x0, #:tlsdesc_lo12:foo R_AARCH64_TLSDESC_ADD_LO12(foo)
47 blr x1 R_AARCH64_TLSDESC_CALL(foo)
49 The relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} against foo
50 indicate that foo is thread local and should be accessed via the
51 traditional TLS mechanims.
53 The relocations R_AARCH64_TLSDESC_{ADR_PAGE,LD64_LO12_NC,ADD_LO12_NC}
54 against foo indicate that 'foo' is thread local and should be accessed
55 via a TLS descriptor mechanism.
57 The precise instruction sequence is only relevant from the
58 perspective of linker relaxation which is currently not implemented.
60 The static linker must detect that 'foo' is a TLS object and
61 allocate a double GOT entry. The GOT entry must be created for both
62 global and local TLS symbols. Note that this is different to none
63 TLS local objects which do not need a GOT entry.
65 In the traditional TLS mechanism, the double GOT entry is used to
66 provide the tls_index structure, containing module and offset
67 entries. The static linker places the relocation R_AARCH64_TLS_DTPMOD64
68 on the module entry. The loader will subsequently fixup this
69 relocation with the module identity.
71 For global traditional TLS symbols the static linker places an
72 R_AARCH64_TLS_DTPREL64 relocation on the offset entry. The loader
73 will subsequently fixup the offset. For local TLS symbols the static
74 linker fixes up offset.
76 In the TLS descriptor mechanism the double GOT entry is used to
77 provide the descriptor. The static linker places the relocation
78 R_AARCH64_TLSDESC on the first GOT slot. The loader will
79 subsequently fix this up.
83 The handling of TLS symbols is implemented across a number of
84 different backend functions. The following is a top level view of
85 what processing is performed where.
87 The TLS implementation maintains state information for each TLS
88 symbol. The state information for local and global symbols is kept
89 in different places. Global symbols use generic BFD structures while
90 local symbols use backend specific structures that are allocated and
91 maintained entirely by the backend.
95 aarch64_check_relocs()
97 This function is invoked for each relocation.
99 The TLS relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} and
100 R_AARCH64_TLSDESC_{ADR_PAGE,LD64_LO12_NC,ADD_LO12_NC} are
101 spotted. One time creation of local symbol data structures are
102 created when the first local symbol is seen.
104 The reference count for a symbol is incremented. The GOT type for
105 each symbol is marked as general dynamic.
107 elf64_aarch64_allocate_dynrelocs ()
109 For each global with positive reference count we allocate a double
110 GOT slot. For a traditional TLS symbol we allocate space for two
111 relocation entries on the GOT, for a TLS descriptor symbol we
112 allocate space for one relocation on the slot. Record the GOT offset
115 elf64_aarch64_size_dynamic_sections ()
117 Iterate all input BFDS, look for in the local symbol data structure
118 constructed earlier for local TLS symbols and allocate them double
119 GOT slots along with space for a single GOT relocation. Update the
120 local symbol structure to record the GOT offset allocated.
122 elf64_aarch64_relocate_section ()
124 Calls elf64_aarch64_final_link_relocate ()
126 Emit the relevant TLS relocations against the GOT for each TLS
127 symbol. For local TLS symbols emit the GOT offset directly. The GOT
128 relocations are emitted once the first time a TLS symbol is
129 encountered. The implementation uses the LSB of the GOT offset to
130 flag that the relevant GOT relocations for a symbol have been
131 emitted. All of the TLS code that uses the GOT offset needs to take
132 care to mask out this flag bit before using the offset.
134 elf64_aarch64_final_link_relocate ()
136 Fixup the R_AARCH64_TLSGD_{ADR_PREL21, ADD_LO12_NC} relocations. */
140 #include "libiberty.h"
142 #include "bfd_stdint.h"
145 #include "elf/aarch64.h"
147 static bfd_reloc_status_type
148 bfd_elf_aarch64_put_addend (bfd
*abfd
,
150 reloc_howto_type
*howto
, bfd_signed_vma addend
);
152 #define IS_AARCH64_TLS_RELOC(R_TYPE) \
153 ((R_TYPE) == R_AARCH64_TLSGD_ADR_PAGE21 \
154 || (R_TYPE) == R_AARCH64_TLSGD_ADD_LO12_NC \
155 || (R_TYPE) == R_AARCH64_TLSIE_MOVW_GOTTPREL_G1 \
156 || (R_TYPE) == R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC \
157 || (R_TYPE) == R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 \
158 || (R_TYPE) == R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC \
159 || (R_TYPE) == R_AARCH64_TLSIE_LD_GOTTPREL_PREL19 \
160 || (R_TYPE) == R_AARCH64_TLSLE_ADD_TPREL_LO12 \
161 || (R_TYPE) == R_AARCH64_TLSLE_ADD_TPREL_HI12 \
162 || (R_TYPE) == R_AARCH64_TLSLE_ADD_TPREL_LO12_NC \
163 || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G2 \
164 || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G1 \
165 || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G1_NC \
166 || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G0 \
167 || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G0_NC \
168 || (R_TYPE) == R_AARCH64_TLS_DTPMOD64 \
169 || (R_TYPE) == R_AARCH64_TLS_DTPREL64 \
170 || (R_TYPE) == R_AARCH64_TLS_TPREL64 \
171 || IS_AARCH64_TLSDESC_RELOC ((R_TYPE)))
173 #define IS_AARCH64_TLSDESC_RELOC(R_TYPE) \
174 ((R_TYPE) == R_AARCH64_TLSDESC_LD64_PREL19 \
175 || (R_TYPE) == R_AARCH64_TLSDESC_ADR_PREL21 \
176 || (R_TYPE) == R_AARCH64_TLSDESC_ADR_PAGE \
177 || (R_TYPE) == R_AARCH64_TLSDESC_ADD_LO12_NC \
178 || (R_TYPE) == R_AARCH64_TLSDESC_LD64_LO12_NC \
179 || (R_TYPE) == R_AARCH64_TLSDESC_OFF_G1 \
180 || (R_TYPE) == R_AARCH64_TLSDESC_OFF_G0_NC \
181 || (R_TYPE) == R_AARCH64_TLSDESC_LDR \
182 || (R_TYPE) == R_AARCH64_TLSDESC_ADD \
183 || (R_TYPE) == R_AARCH64_TLSDESC_CALL \
184 || (R_TYPE) == R_AARCH64_TLSDESC)
186 #define ELIMINATE_COPY_RELOCS 0
188 /* Return the relocation section associated with NAME. HTAB is the
189 bfd's elf64_aarch64_link_hash_entry. */
190 #define RELOC_SECTION(HTAB, NAME) \
191 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
193 /* Return size of a relocation entry. HTAB is the bfd's
194 elf64_aarch64_link_hash_entry. */
195 #define RELOC_SIZE(HTAB) (sizeof (Elf64_External_Rela))
197 /* Return function to swap relocations in. HTAB is the bfd's
198 elf64_aarch64_link_hash_entry. */
199 #define SWAP_RELOC_IN(HTAB) (bfd_elf64_swap_reloca_in)
201 /* Return function to swap relocations out. HTAB is the bfd's
202 elf64_aarch64_link_hash_entry. */
203 #define SWAP_RELOC_OUT(HTAB) (bfd_elf64_swap_reloca_out)
205 /* GOT Entry size - 8 bytes. */
206 #define GOT_ENTRY_SIZE (8)
207 #define PLT_ENTRY_SIZE (32)
208 #define PLT_SMALL_ENTRY_SIZE (16)
209 #define PLT_TLSDESC_ENTRY_SIZE (32)
211 /* Take the PAGE component of an address or offset. */
212 #define PG(x) ((x) & ~ 0xfff)
213 #define PG_OFFSET(x) ((x) & 0xfff)
215 /* Encoding of the nop instruction */
216 #define INSN_NOP 0xd503201f
218 #define aarch64_compute_jump_table_size(htab) \
219 (((htab)->root.srelplt == NULL) ? 0 \
220 : (htab)->root.srelplt->reloc_count * GOT_ENTRY_SIZE)
222 /* The first entry in a procedure linkage table looks like this
223 if the distance between the PLTGOT and the PLT is < 4GB use
224 these PLT entries. Note that the dynamic linker gets &PLTGOT[2]
225 in x16 and needs to work out PLTGOT[1] by using an address of
227 static const bfd_byte elf64_aarch64_small_plt0_entry
[PLT_ENTRY_SIZE
] =
229 0xf0, 0x7b, 0xbf, 0xa9, /* stp x16, x30, [sp, #-16]! */
230 0x10, 0x00, 0x00, 0x90, /* adrp x16, (GOT+16) */
231 0x11, 0x0A, 0x40, 0xf9, /* ldr x17, [x16, #PLT_GOT+0x10] */
232 0x10, 0x42, 0x00, 0x91, /* add x16, x16,#PLT_GOT+0x10 */
233 0x20, 0x02, 0x1f, 0xd6, /* br x17 */
234 0x1f, 0x20, 0x03, 0xd5, /* nop */
235 0x1f, 0x20, 0x03, 0xd5, /* nop */
236 0x1f, 0x20, 0x03, 0xd5, /* nop */
239 /* Per function entry in a procedure linkage table looks like this
240 if the distance between the PLTGOT and the PLT is < 4GB use
241 these PLT entries. */
242 static const bfd_byte elf64_aarch64_small_plt_entry
[PLT_SMALL_ENTRY_SIZE
] =
244 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */
245 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */
246 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */
247 0x20, 0x02, 0x1f, 0xd6, /* br x17. */
250 static const bfd_byte
251 elf64_aarch64_tlsdesc_small_plt_entry
[PLT_TLSDESC_ENTRY_SIZE
] =
253 0xe2, 0x0f, 0xbf, 0xa9, /* stp x2, x3, [sp, #-16]! */
254 0x02, 0x00, 0x00, 0x90, /* adrp x2, 0 */
255 0x03, 0x00, 0x00, 0x90, /* adrp x3, 0 */
256 0x42, 0x08, 0x40, 0xF9, /* ldr x2, [x2, #0] */
257 0x63, 0x00, 0x00, 0x91, /* add x3, x3, 0 */
258 0x40, 0x00, 0x1F, 0xD6, /* br x2 */
259 0x1f, 0x20, 0x03, 0xd5, /* nop */
260 0x1f, 0x20, 0x03, 0xd5, /* nop */
263 #define elf_info_to_howto elf64_aarch64_info_to_howto
264 #define elf_info_to_howto_rel elf64_aarch64_info_to_howto
266 #define AARCH64_ELF_ABI_VERSION 0
267 #define AARCH64_ELF_OS_ABI_VERSION 0
269 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
270 #define ALL_ONES (~ (bfd_vma) 0)
272 static reloc_howto_type elf64_aarch64_howto_none
=
273 HOWTO (R_AARCH64_NONE
, /* type */
275 0, /* size (0 = byte, 1 = short, 2 = long) */
277 FALSE
, /* pc_relative */
279 complain_overflow_dont
,/* complain_on_overflow */
280 bfd_elf_generic_reloc
, /* special_function */
281 "R_AARCH64_NONE", /* name */
282 FALSE
, /* partial_inplace */
285 FALSE
); /* pcrel_offset */
287 static reloc_howto_type elf64_aarch64_howto_dynrelocs
[] =
289 HOWTO (R_AARCH64_COPY
, /* type */
291 2, /* size (0 = byte, 1 = short, 2 = long) */
293 FALSE
, /* pc_relative */
295 complain_overflow_bitfield
, /* complain_on_overflow */
296 bfd_elf_generic_reloc
, /* special_function */
297 "R_AARCH64_COPY", /* name */
298 TRUE
, /* partial_inplace */
299 0xffffffff, /* src_mask */
300 0xffffffff, /* dst_mask */
301 FALSE
), /* pcrel_offset */
303 HOWTO (R_AARCH64_GLOB_DAT
, /* type */
305 2, /* size (0 = byte, 1 = short, 2 = long) */
307 FALSE
, /* pc_relative */
309 complain_overflow_bitfield
, /* complain_on_overflow */
310 bfd_elf_generic_reloc
, /* special_function */
311 "R_AARCH64_GLOB_DAT", /* name */
312 TRUE
, /* partial_inplace */
313 0xffffffff, /* src_mask */
314 0xffffffff, /* dst_mask */
315 FALSE
), /* pcrel_offset */
317 HOWTO (R_AARCH64_JUMP_SLOT
, /* type */
319 2, /* size (0 = byte, 1 = short, 2 = long) */
321 FALSE
, /* pc_relative */
323 complain_overflow_bitfield
, /* complain_on_overflow */
324 bfd_elf_generic_reloc
, /* special_function */
325 "R_AARCH64_JUMP_SLOT", /* name */
326 TRUE
, /* partial_inplace */
327 0xffffffff, /* src_mask */
328 0xffffffff, /* dst_mask */
329 FALSE
), /* pcrel_offset */
331 HOWTO (R_AARCH64_RELATIVE
, /* type */
333 2, /* size (0 = byte, 1 = short, 2 = long) */
335 FALSE
, /* pc_relative */
337 complain_overflow_bitfield
, /* complain_on_overflow */
338 bfd_elf_generic_reloc
, /* special_function */
339 "R_AARCH64_RELATIVE", /* name */
340 TRUE
, /* partial_inplace */
341 ALL_ONES
, /* src_mask */
342 ALL_ONES
, /* dst_mask */
343 FALSE
), /* pcrel_offset */
345 HOWTO (R_AARCH64_TLS_DTPMOD64
, /* type */
347 2, /* size (0 = byte, 1 = short, 2 = long) */
349 FALSE
, /* pc_relative */
351 complain_overflow_dont
, /* complain_on_overflow */
352 bfd_elf_generic_reloc
, /* special_function */
353 "R_AARCH64_TLS_DTPMOD64", /* name */
354 FALSE
, /* partial_inplace */
356 ALL_ONES
, /* dst_mask */
357 FALSE
), /* pc_reloffset */
359 HOWTO (R_AARCH64_TLS_DTPREL64
, /* type */
361 2, /* size (0 = byte, 1 = short, 2 = long) */
363 FALSE
, /* pc_relative */
365 complain_overflow_dont
, /* complain_on_overflow */
366 bfd_elf_generic_reloc
, /* special_function */
367 "R_AARCH64_TLS_DTPREL64", /* name */
368 FALSE
, /* partial_inplace */
370 ALL_ONES
, /* dst_mask */
371 FALSE
), /* pcrel_offset */
373 HOWTO (R_AARCH64_TLS_TPREL64
, /* type */
375 2, /* size (0 = byte, 1 = short, 2 = long) */
377 FALSE
, /* pc_relative */
379 complain_overflow_dont
, /* complain_on_overflow */
380 bfd_elf_generic_reloc
, /* special_function */
381 "R_AARCH64_TLS_TPREL64", /* name */
382 FALSE
, /* partial_inplace */
384 ALL_ONES
, /* dst_mask */
385 FALSE
), /* pcrel_offset */
387 HOWTO (R_AARCH64_TLSDESC
, /* type */
389 2, /* size (0 = byte, 1 = short, 2 = long) */
391 FALSE
, /* pc_relative */
393 complain_overflow_dont
, /* complain_on_overflow */
394 bfd_elf_generic_reloc
, /* special_function */
395 "R_AARCH64_TLSDESC", /* name */
396 FALSE
, /* partial_inplace */
398 ALL_ONES
, /* dst_mask */
399 FALSE
), /* pcrel_offset */
403 /* Note: code such as elf64_aarch64_reloc_type_lookup expect to use e.g.
404 R_AARCH64_PREL64 as an index into this, and find the R_AARCH64_PREL64 HOWTO
407 static reloc_howto_type elf64_aarch64_howto_table
[] =
409 /* Basic data relocations. */
411 HOWTO (R_AARCH64_NULL
, /* type */
413 0, /* size (0 = byte, 1 = short, 2 = long) */
415 FALSE
, /* pc_relative */
417 complain_overflow_dont
, /* complain_on_overflow */
418 bfd_elf_generic_reloc
, /* special_function */
419 "R_AARCH64_NULL", /* name */
420 FALSE
, /* partial_inplace */
423 FALSE
), /* pcrel_offset */
426 HOWTO (R_AARCH64_ABS64
, /* type */
428 4, /* size (4 = long long) */
430 FALSE
, /* pc_relative */
432 complain_overflow_unsigned
, /* complain_on_overflow */
433 bfd_elf_generic_reloc
, /* special_function */
434 "R_AARCH64_ABS64", /* name */
435 FALSE
, /* partial_inplace */
436 ALL_ONES
, /* src_mask */
437 ALL_ONES
, /* dst_mask */
438 FALSE
), /* pcrel_offset */
441 HOWTO (R_AARCH64_ABS32
, /* type */
443 2, /* size (0 = byte, 1 = short, 2 = long) */
445 FALSE
, /* pc_relative */
447 complain_overflow_unsigned
, /* complain_on_overflow */
448 bfd_elf_generic_reloc
, /* special_function */
449 "R_AARCH64_ABS32", /* name */
450 FALSE
, /* partial_inplace */
451 0xffffffff, /* src_mask */
452 0xffffffff, /* dst_mask */
453 FALSE
), /* pcrel_offset */
456 HOWTO (R_AARCH64_ABS16
, /* type */
458 1, /* size (0 = byte, 1 = short, 2 = long) */
460 FALSE
, /* pc_relative */
462 complain_overflow_unsigned
, /* complain_on_overflow */
463 bfd_elf_generic_reloc
, /* special_function */
464 "R_AARCH64_ABS16", /* name */
465 FALSE
, /* partial_inplace */
466 0xffff, /* src_mask */
467 0xffff, /* dst_mask */
468 FALSE
), /* pcrel_offset */
470 /* .xword: (S+A-P) */
471 HOWTO (R_AARCH64_PREL64
, /* type */
473 4, /* size (4 = long long) */
475 TRUE
, /* pc_relative */
477 complain_overflow_signed
, /* complain_on_overflow */
478 bfd_elf_generic_reloc
, /* special_function */
479 "R_AARCH64_PREL64", /* name */
480 FALSE
, /* partial_inplace */
481 ALL_ONES
, /* src_mask */
482 ALL_ONES
, /* dst_mask */
483 TRUE
), /* pcrel_offset */
486 HOWTO (R_AARCH64_PREL32
, /* type */
488 2, /* size (0 = byte, 1 = short, 2 = long) */
490 TRUE
, /* pc_relative */
492 complain_overflow_signed
, /* complain_on_overflow */
493 bfd_elf_generic_reloc
, /* special_function */
494 "R_AARCH64_PREL32", /* name */
495 FALSE
, /* partial_inplace */
496 0xffffffff, /* src_mask */
497 0xffffffff, /* dst_mask */
498 TRUE
), /* pcrel_offset */
501 HOWTO (R_AARCH64_PREL16
, /* type */
503 1, /* size (0 = byte, 1 = short, 2 = long) */
505 TRUE
, /* pc_relative */
507 complain_overflow_signed
, /* complain_on_overflow */
508 bfd_elf_generic_reloc
, /* special_function */
509 "R_AARCH64_PREL16", /* name */
510 FALSE
, /* partial_inplace */
511 0xffff, /* src_mask */
512 0xffff, /* dst_mask */
513 TRUE
), /* pcrel_offset */
515 /* Group relocations to create a 16, 32, 48 or 64 bit
516 unsigned data or abs address inline. */
518 /* MOVZ: ((S+A) >> 0) & 0xffff */
519 HOWTO (R_AARCH64_MOVW_UABS_G0
, /* type */
521 2, /* size (0 = byte, 1 = short, 2 = long) */
523 FALSE
, /* pc_relative */
525 complain_overflow_unsigned
, /* complain_on_overflow */
526 bfd_elf_generic_reloc
, /* special_function */
527 "R_AARCH64_MOVW_UABS_G0", /* name */
528 FALSE
, /* partial_inplace */
529 0xffff, /* src_mask */
530 0xffff, /* dst_mask */
531 FALSE
), /* pcrel_offset */
533 /* MOVK: ((S+A) >> 0) & 0xffff [no overflow check] */
534 HOWTO (R_AARCH64_MOVW_UABS_G0_NC
, /* type */
536 2, /* size (0 = byte, 1 = short, 2 = long) */
538 FALSE
, /* pc_relative */
540 complain_overflow_dont
, /* complain_on_overflow */
541 bfd_elf_generic_reloc
, /* special_function */
542 "R_AARCH64_MOVW_UABS_G0_NC", /* name */
543 FALSE
, /* partial_inplace */
544 0xffff, /* src_mask */
545 0xffff, /* dst_mask */
546 FALSE
), /* pcrel_offset */
548 /* MOVZ: ((S+A) >> 16) & 0xffff */
549 HOWTO (R_AARCH64_MOVW_UABS_G1
, /* type */
551 2, /* size (0 = byte, 1 = short, 2 = long) */
553 FALSE
, /* pc_relative */
555 complain_overflow_unsigned
, /* complain_on_overflow */
556 bfd_elf_generic_reloc
, /* special_function */
557 "R_AARCH64_MOVW_UABS_G1", /* name */
558 FALSE
, /* partial_inplace */
559 0xffff, /* src_mask */
560 0xffff, /* dst_mask */
561 FALSE
), /* pcrel_offset */
563 /* MOVK: ((S+A) >> 16) & 0xffff [no overflow check] */
564 HOWTO (R_AARCH64_MOVW_UABS_G1_NC
, /* type */
566 2, /* size (0 = byte, 1 = short, 2 = long) */
568 FALSE
, /* pc_relative */
570 complain_overflow_dont
, /* complain_on_overflow */
571 bfd_elf_generic_reloc
, /* special_function */
572 "R_AARCH64_MOVW_UABS_G1_NC", /* name */
573 FALSE
, /* partial_inplace */
574 0xffff, /* src_mask */
575 0xffff, /* dst_mask */
576 FALSE
), /* pcrel_offset */
578 /* MOVZ: ((S+A) >> 32) & 0xffff */
579 HOWTO (R_AARCH64_MOVW_UABS_G2
, /* type */
581 2, /* size (0 = byte, 1 = short, 2 = long) */
583 FALSE
, /* pc_relative */
585 complain_overflow_unsigned
, /* complain_on_overflow */
586 bfd_elf_generic_reloc
, /* special_function */
587 "R_AARCH64_MOVW_UABS_G2", /* name */
588 FALSE
, /* partial_inplace */
589 0xffff, /* src_mask */
590 0xffff, /* dst_mask */
591 FALSE
), /* pcrel_offset */
593 /* MOVK: ((S+A) >> 32) & 0xffff [no overflow check] */
594 HOWTO (R_AARCH64_MOVW_UABS_G2_NC
, /* type */
596 2, /* size (0 = byte, 1 = short, 2 = long) */
598 FALSE
, /* pc_relative */
600 complain_overflow_dont
, /* complain_on_overflow */
601 bfd_elf_generic_reloc
, /* special_function */
602 "R_AARCH64_MOVW_UABS_G2_NC", /* name */
603 FALSE
, /* partial_inplace */
604 0xffff, /* src_mask */
605 0xffff, /* dst_mask */
606 FALSE
), /* pcrel_offset */
608 /* MOVZ: ((S+A) >> 48) & 0xffff */
609 HOWTO (R_AARCH64_MOVW_UABS_G3
, /* type */
611 2, /* size (0 = byte, 1 = short, 2 = long) */
613 FALSE
, /* pc_relative */
615 complain_overflow_unsigned
, /* complain_on_overflow */
616 bfd_elf_generic_reloc
, /* special_function */
617 "R_AARCH64_MOVW_UABS_G3", /* name */
618 FALSE
, /* partial_inplace */
619 0xffff, /* src_mask */
620 0xffff, /* dst_mask */
621 FALSE
), /* pcrel_offset */
623 /* Group relocations to create high part of a 16, 32, 48 or 64 bit
624 signed data or abs address inline. Will change instruction
625 to MOVN or MOVZ depending on sign of calculated value. */
627 /* MOV[ZN]: ((S+A) >> 0) & 0xffff */
628 HOWTO (R_AARCH64_MOVW_SABS_G0
, /* type */
630 2, /* size (0 = byte, 1 = short, 2 = long) */
632 FALSE
, /* pc_relative */
634 complain_overflow_signed
, /* complain_on_overflow */
635 bfd_elf_generic_reloc
, /* special_function */
636 "R_AARCH64_MOVW_SABS_G0", /* name */
637 FALSE
, /* partial_inplace */
638 0xffff, /* src_mask */
639 0xffff, /* dst_mask */
640 FALSE
), /* pcrel_offset */
642 /* MOV[ZN]: ((S+A) >> 16) & 0xffff */
643 HOWTO (R_AARCH64_MOVW_SABS_G1
, /* type */
645 2, /* size (0 = byte, 1 = short, 2 = long) */
647 FALSE
, /* pc_relative */
649 complain_overflow_signed
, /* complain_on_overflow */
650 bfd_elf_generic_reloc
, /* special_function */
651 "R_AARCH64_MOVW_SABS_G1", /* name */
652 FALSE
, /* partial_inplace */
653 0xffff, /* src_mask */
654 0xffff, /* dst_mask */
655 FALSE
), /* pcrel_offset */
657 /* MOV[ZN]: ((S+A) >> 32) & 0xffff */
658 HOWTO (R_AARCH64_MOVW_SABS_G2
, /* type */
660 2, /* size (0 = byte, 1 = short, 2 = long) */
662 FALSE
, /* pc_relative */
664 complain_overflow_signed
, /* complain_on_overflow */
665 bfd_elf_generic_reloc
, /* special_function */
666 "R_AARCH64_MOVW_SABS_G2", /* name */
667 FALSE
, /* partial_inplace */
668 0xffff, /* src_mask */
669 0xffff, /* dst_mask */
670 FALSE
), /* pcrel_offset */
672 /* Relocations to generate 19, 21 and 33 bit PC-relative load/store
673 addresses: PG(x) is (x & ~0xfff). */
675 /* LD-lit: ((S+A-P) >> 2) & 0x7ffff */
676 HOWTO (R_AARCH64_LD_PREL_LO19
, /* type */
678 2, /* size (0 = byte, 1 = short, 2 = long) */
680 TRUE
, /* pc_relative */
682 complain_overflow_signed
, /* complain_on_overflow */
683 bfd_elf_generic_reloc
, /* special_function */
684 "R_AARCH64_LD_PREL_LO19", /* name */
685 FALSE
, /* partial_inplace */
686 0x7ffff, /* src_mask */
687 0x7ffff, /* dst_mask */
688 TRUE
), /* pcrel_offset */
690 /* ADR: (S+A-P) & 0x1fffff */
691 HOWTO (R_AARCH64_ADR_PREL_LO21
, /* type */
693 2, /* size (0 = byte, 1 = short, 2 = long) */
695 TRUE
, /* pc_relative */
697 complain_overflow_signed
, /* complain_on_overflow */
698 bfd_elf_generic_reloc
, /* special_function */
699 "R_AARCH64_ADR_PREL_LO21", /* name */
700 FALSE
, /* partial_inplace */
701 0x1fffff, /* src_mask */
702 0x1fffff, /* dst_mask */
703 TRUE
), /* pcrel_offset */
705 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
706 HOWTO (R_AARCH64_ADR_PREL_PG_HI21
, /* type */
708 2, /* size (0 = byte, 1 = short, 2 = long) */
710 TRUE
, /* pc_relative */
712 complain_overflow_signed
, /* complain_on_overflow */
713 bfd_elf_generic_reloc
, /* special_function */
714 "R_AARCH64_ADR_PREL_PG_HI21", /* name */
715 FALSE
, /* partial_inplace */
716 0x1fffff, /* src_mask */
717 0x1fffff, /* dst_mask */
718 TRUE
), /* pcrel_offset */
720 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff [no overflow check] */
721 HOWTO (R_AARCH64_ADR_PREL_PG_HI21_NC
, /* type */
723 2, /* size (0 = byte, 1 = short, 2 = long) */
725 TRUE
, /* pc_relative */
727 complain_overflow_dont
, /* complain_on_overflow */
728 bfd_elf_generic_reloc
, /* special_function */
729 "R_AARCH64_ADR_PREL_PG_HI21_NC", /* name */
730 FALSE
, /* partial_inplace */
731 0x1fffff, /* src_mask */
732 0x1fffff, /* dst_mask */
733 TRUE
), /* pcrel_offset */
735 /* ADD: (S+A) & 0xfff [no overflow check] */
736 HOWTO (R_AARCH64_ADD_ABS_LO12_NC
, /* type */
738 2, /* size (0 = byte, 1 = short, 2 = long) */
740 FALSE
, /* pc_relative */
742 complain_overflow_dont
, /* complain_on_overflow */
743 bfd_elf_generic_reloc
, /* special_function */
744 "R_AARCH64_ADD_ABS_LO12_NC", /* name */
745 FALSE
, /* partial_inplace */
746 0x3ffc00, /* src_mask */
747 0x3ffc00, /* dst_mask */
748 FALSE
), /* pcrel_offset */
750 /* LD/ST8: (S+A) & 0xfff */
751 HOWTO (R_AARCH64_LDST8_ABS_LO12_NC
, /* type */
753 2, /* size (0 = byte, 1 = short, 2 = long) */
755 FALSE
, /* pc_relative */
757 complain_overflow_dont
, /* complain_on_overflow */
758 bfd_elf_generic_reloc
, /* special_function */
759 "R_AARCH64_LDST8_ABS_LO12_NC", /* name */
760 FALSE
, /* partial_inplace */
761 0xfff, /* src_mask */
762 0xfff, /* dst_mask */
763 FALSE
), /* pcrel_offset */
765 /* Relocations for control-flow instructions. */
767 /* TBZ/NZ: ((S+A-P) >> 2) & 0x3fff */
768 HOWTO (R_AARCH64_TSTBR14
, /* type */
770 2, /* size (0 = byte, 1 = short, 2 = long) */
772 TRUE
, /* pc_relative */
774 complain_overflow_signed
, /* complain_on_overflow */
775 bfd_elf_generic_reloc
, /* special_function */
776 "R_AARCH64_TSTBR14", /* name */
777 FALSE
, /* partial_inplace */
778 0x3fff, /* src_mask */
779 0x3fff, /* dst_mask */
780 TRUE
), /* pcrel_offset */
782 /* B.cond: ((S+A-P) >> 2) & 0x7ffff */
783 HOWTO (R_AARCH64_CONDBR19
, /* type */
785 2, /* size (0 = byte, 1 = short, 2 = long) */
787 TRUE
, /* pc_relative */
789 complain_overflow_signed
, /* complain_on_overflow */
790 bfd_elf_generic_reloc
, /* special_function */
791 "R_AARCH64_CONDBR19", /* name */
792 FALSE
, /* partial_inplace */
793 0x7ffff, /* src_mask */
794 0x7ffff, /* dst_mask */
795 TRUE
), /* pcrel_offset */
799 /* B: ((S+A-P) >> 2) & 0x3ffffff */
800 HOWTO (R_AARCH64_JUMP26
, /* type */
802 2, /* size (0 = byte, 1 = short, 2 = long) */
804 TRUE
, /* pc_relative */
806 complain_overflow_signed
, /* complain_on_overflow */
807 bfd_elf_generic_reloc
, /* special_function */
808 "R_AARCH64_JUMP26", /* name */
809 FALSE
, /* partial_inplace */
810 0x3ffffff, /* src_mask */
811 0x3ffffff, /* dst_mask */
812 TRUE
), /* pcrel_offset */
814 /* BL: ((S+A-P) >> 2) & 0x3ffffff */
815 HOWTO (R_AARCH64_CALL26
, /* type */
817 2, /* size (0 = byte, 1 = short, 2 = long) */
819 TRUE
, /* pc_relative */
821 complain_overflow_signed
, /* complain_on_overflow */
822 bfd_elf_generic_reloc
, /* special_function */
823 "R_AARCH64_CALL26", /* name */
824 FALSE
, /* partial_inplace */
825 0x3ffffff, /* src_mask */
826 0x3ffffff, /* dst_mask */
827 TRUE
), /* pcrel_offset */
829 /* LD/ST16: (S+A) & 0xffe */
830 HOWTO (R_AARCH64_LDST16_ABS_LO12_NC
, /* type */
832 2, /* size (0 = byte, 1 = short, 2 = long) */
834 FALSE
, /* pc_relative */
836 complain_overflow_dont
, /* complain_on_overflow */
837 bfd_elf_generic_reloc
, /* special_function */
838 "R_AARCH64_LDST16_ABS_LO12_NC", /* name */
839 FALSE
, /* partial_inplace */
840 0xffe, /* src_mask */
841 0xffe, /* dst_mask */
842 FALSE
), /* pcrel_offset */
844 /* LD/ST32: (S+A) & 0xffc */
845 HOWTO (R_AARCH64_LDST32_ABS_LO12_NC
, /* type */
847 2, /* size (0 = byte, 1 = short, 2 = long) */
849 FALSE
, /* pc_relative */
851 complain_overflow_dont
, /* complain_on_overflow */
852 bfd_elf_generic_reloc
, /* special_function */
853 "R_AARCH64_LDST32_ABS_LO12_NC", /* name */
854 FALSE
, /* partial_inplace */
855 0xffc, /* src_mask */
856 0xffc, /* dst_mask */
857 FALSE
), /* pcrel_offset */
859 /* LD/ST64: (S+A) & 0xff8 */
860 HOWTO (R_AARCH64_LDST64_ABS_LO12_NC
, /* type */
862 2, /* size (0 = byte, 1 = short, 2 = long) */
864 FALSE
, /* pc_relative */
866 complain_overflow_dont
, /* complain_on_overflow */
867 bfd_elf_generic_reloc
, /* special_function */
868 "R_AARCH64_LDST64_ABS_LO12_NC", /* name */
869 FALSE
, /* partial_inplace */
870 0xff8, /* src_mask */
871 0xff8, /* dst_mask */
872 FALSE
), /* pcrel_offset */
887 /* LD/ST128: (S+A) & 0xff0 */
888 HOWTO (R_AARCH64_LDST128_ABS_LO12_NC
, /* type */
890 2, /* size (0 = byte, 1 = short, 2 = long) */
892 FALSE
, /* pc_relative */
894 complain_overflow_dont
, /* complain_on_overflow */
895 bfd_elf_generic_reloc
, /* special_function */
896 "R_AARCH64_LDST128_ABS_LO12_NC", /* name */
897 FALSE
, /* partial_inplace */
898 0xff0, /* src_mask */
899 0xff0, /* dst_mask */
900 FALSE
), /* pcrel_offset */
912 /* Set a load-literal immediate field to bits
913 0x1FFFFC of G(S)-P */
914 HOWTO (R_AARCH64_GOT_LD_PREL19
, /* type */
916 2, /* size (0 = byte,1 = short,2 = long) */
918 TRUE
, /* pc_relative */
920 complain_overflow_signed
, /* complain_on_overflow */
921 bfd_elf_generic_reloc
, /* special_function */
922 "R_AARCH64_GOT_LD_PREL19", /* name */
923 FALSE
, /* partial_inplace */
924 0xffffe0, /* src_mask */
925 0xffffe0, /* dst_mask */
926 TRUE
), /* pcrel_offset */
930 /* Get to the page for the GOT entry for the symbol
931 (G(S) - P) using an ADRP instruction. */
932 HOWTO (R_AARCH64_ADR_GOT_PAGE
, /* type */
934 2, /* size (0 = byte, 1 = short, 2 = long) */
936 TRUE
, /* pc_relative */
938 complain_overflow_dont
, /* complain_on_overflow */
939 bfd_elf_generic_reloc
, /* special_function */
940 "R_AARCH64_ADR_GOT_PAGE", /* name */
941 FALSE
, /* partial_inplace */
942 0x1fffff, /* src_mask */
943 0x1fffff, /* dst_mask */
944 TRUE
), /* pcrel_offset */
946 /* LD64: GOT offset G(S) & 0xff8 */
947 HOWTO (R_AARCH64_LD64_GOT_LO12_NC
, /* type */
949 2, /* size (0 = byte, 1 = short, 2 = long) */
951 FALSE
, /* pc_relative */
953 complain_overflow_dont
, /* complain_on_overflow */
954 bfd_elf_generic_reloc
, /* special_function */
955 "R_AARCH64_LD64_GOT_LO12_NC", /* name */
956 FALSE
, /* partial_inplace */
957 0xff8, /* src_mask */
958 0xff8, /* dst_mask */
959 FALSE
) /* pcrel_offset */
962 static reloc_howto_type elf64_aarch64_tls_howto_table
[] =
966 /* Get to the page for the GOT entry for the symbol
967 (G(S) - P) using an ADRP instruction. */
968 HOWTO (R_AARCH64_TLSGD_ADR_PAGE21
, /* type */
970 2, /* size (0 = byte, 1 = short, 2 = long) */
972 TRUE
, /* pc_relative */
974 complain_overflow_dont
, /* complain_on_overflow */
975 bfd_elf_generic_reloc
, /* special_function */
976 "R_AARCH64_TLSGD_ADR_PAGE21", /* name */
977 FALSE
, /* partial_inplace */
978 0x1fffff, /* src_mask */
979 0x1fffff, /* dst_mask */
980 TRUE
), /* pcrel_offset */
982 /* ADD: GOT offset G(S) & 0xff8 [no overflow check] */
983 HOWTO (R_AARCH64_TLSGD_ADD_LO12_NC
, /* type */
985 2, /* size (0 = byte, 1 = short, 2 = long) */
987 FALSE
, /* pc_relative */
989 complain_overflow_dont
, /* complain_on_overflow */
990 bfd_elf_generic_reloc
, /* special_function */
991 "R_AARCH64_TLSGD_ADD_LO12_NC", /* name */
992 FALSE
, /* partial_inplace */
993 0xfff, /* src_mask */
994 0xfff, /* dst_mask */
995 FALSE
), /* pcrel_offset */
1022 HOWTO (R_AARCH64_TLSIE_MOVW_GOTTPREL_G1
, /* type */
1023 16, /* rightshift */
1024 2, /* size (0 = byte, 1 = short, 2 = long) */
1026 FALSE
, /* pc_relative */
1028 complain_overflow_dont
, /* complain_on_overflow */
1029 bfd_elf_generic_reloc
, /* special_function */
1030 "R_AARCH64_TLSIE_MOVW_GOTTPREL_G1", /* name */
1031 FALSE
, /* partial_inplace */
1032 0xffff, /* src_mask */
1033 0xffff, /* dst_mask */
1034 FALSE
), /* pcrel_offset */
1036 HOWTO (R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC
, /* type */
1038 2, /* size (0 = byte, 1 = short, 2 = long) */
1040 FALSE
, /* pc_relative */
1042 complain_overflow_dont
, /* complain_on_overflow */
1043 bfd_elf_generic_reloc
, /* special_function */
1044 "R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC", /* name */
1045 FALSE
, /* partial_inplace */
1046 0xffff, /* src_mask */
1047 0xffff, /* dst_mask */
1048 FALSE
), /* pcrel_offset */
1050 HOWTO (R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21
, /* type */
1051 12, /* rightshift */
1052 2, /* size (0 = byte, 1 = short, 2 = long) */
1054 FALSE
, /* pc_relative */
1056 complain_overflow_dont
, /* complain_on_overflow */
1057 bfd_elf_generic_reloc
, /* special_function */
1058 "R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21", /* name */
1059 FALSE
, /* partial_inplace */
1060 0x1fffff, /* src_mask */
1061 0x1fffff, /* dst_mask */
1062 FALSE
), /* pcrel_offset */
1064 HOWTO (R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC
, /* type */
1066 2, /* size (0 = byte, 1 = short, 2 = long) */
1068 FALSE
, /* pc_relative */
1070 complain_overflow_dont
, /* complain_on_overflow */
1071 bfd_elf_generic_reloc
, /* special_function */
1072 "R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC", /* name */
1073 FALSE
, /* partial_inplace */
1074 0xff8, /* src_mask */
1075 0xff8, /* dst_mask */
1076 FALSE
), /* pcrel_offset */
1078 HOWTO (R_AARCH64_TLSIE_LD_GOTTPREL_PREL19
, /* type */
1080 2, /* size (0 = byte, 1 = short, 2 = long) */
1082 FALSE
, /* pc_relative */
1084 complain_overflow_dont
, /* complain_on_overflow */
1085 bfd_elf_generic_reloc
, /* special_function */
1086 "R_AARCH64_TLSIE_LD_GOTTPREL_PREL19", /* name */
1087 FALSE
, /* partial_inplace */
1088 0x1ffffc, /* src_mask */
1089 0x1ffffc, /* dst_mask */
1090 FALSE
), /* pcrel_offset */
1092 HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G2
, /* type */
1093 32, /* rightshift */
1094 2, /* size (0 = byte, 1 = short, 2 = long) */
1096 FALSE
, /* pc_relative */
1098 complain_overflow_dont
, /* complain_on_overflow */
1099 bfd_elf_generic_reloc
, /* special_function */
1100 "R_AARCH64_TLSLE_MOVW_TPREL_G2", /* name */
1101 FALSE
, /* partial_inplace */
1102 0xffff, /* src_mask */
1103 0xffff, /* dst_mask */
1104 FALSE
), /* pcrel_offset */
1106 HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G1
, /* type */
1107 16, /* rightshift */
1108 2, /* size (0 = byte, 1 = short, 2 = long) */
1110 FALSE
, /* pc_relative */
1112 complain_overflow_dont
, /* complain_on_overflow */
1113 bfd_elf_generic_reloc
, /* special_function */
1114 "R_AARCH64_TLSLE_MOVW_TPREL_G1", /* name */
1115 FALSE
, /* partial_inplace */
1116 0xffff, /* src_mask */
1117 0xffff, /* dst_mask */
1118 FALSE
), /* pcrel_offset */
1120 HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G1_NC
, /* type */
1121 16, /* rightshift */
1122 2, /* size (0 = byte, 1 = short, 2 = long) */
1124 FALSE
, /* pc_relative */
1126 complain_overflow_dont
, /* complain_on_overflow */
1127 bfd_elf_generic_reloc
, /* special_function */
1128 "R_AARCH64_TLSLE_MOVW_TPREL_G1_NC", /* name */
1129 FALSE
, /* partial_inplace */
1130 0xffff, /* src_mask */
1131 0xffff, /* dst_mask */
1132 FALSE
), /* pcrel_offset */
1134 HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G0
, /* type */
1136 2, /* size (0 = byte, 1 = short, 2 = long) */
1138 FALSE
, /* pc_relative */
1140 complain_overflow_dont
, /* complain_on_overflow */
1141 bfd_elf_generic_reloc
, /* special_function */
1142 "R_AARCH64_TLSLE_MOVW_TPREL_G0", /* name */
1143 FALSE
, /* partial_inplace */
1144 0xffff, /* src_mask */
1145 0xffff, /* dst_mask */
1146 FALSE
), /* pcrel_offset */
1148 HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G0_NC
, /* type */
1150 2, /* size (0 = byte, 1 = short, 2 = long) */
1152 FALSE
, /* pc_relative */
1154 complain_overflow_dont
, /* complain_on_overflow */
1155 bfd_elf_generic_reloc
, /* special_function */
1156 "R_AARCH64_TLSLE_MOVW_TPREL_G0_NC", /* name */
1157 FALSE
, /* partial_inplace */
1158 0xffff, /* src_mask */
1159 0xffff, /* dst_mask */
1160 FALSE
), /* pcrel_offset */
1162 HOWTO (R_AARCH64_TLSLE_ADD_TPREL_HI12
, /* type */
1163 12, /* rightshift */
1164 2, /* size (0 = byte, 1 = short, 2 = long) */
1166 FALSE
, /* pc_relative */
1168 complain_overflow_dont
, /* complain_on_overflow */
1169 bfd_elf_generic_reloc
, /* special_function */
1170 "R_AARCH64_TLSLE_ADD_TPREL_HI12", /* name */
1171 FALSE
, /* partial_inplace */
1172 0xfff, /* src_mask */
1173 0xfff, /* dst_mask */
1174 FALSE
), /* pcrel_offset */
1176 HOWTO (R_AARCH64_TLSLE_ADD_TPREL_LO12
, /* type */
1178 2, /* size (0 = byte, 1 = short, 2 = long) */
1180 FALSE
, /* pc_relative */
1182 complain_overflow_dont
, /* complain_on_overflow */
1183 bfd_elf_generic_reloc
, /* special_function */
1184 "R_AARCH64_TLSLE_ADD_TPREL_LO12", /* name */
1185 FALSE
, /* partial_inplace */
1186 0xfff, /* src_mask */
1187 0xfff, /* dst_mask */
1188 FALSE
), /* pcrel_offset */
1190 HOWTO (R_AARCH64_TLSLE_ADD_TPREL_LO12_NC
, /* type */
1192 2, /* size (0 = byte, 1 = short, 2 = long) */
1194 FALSE
, /* pc_relative */
1196 complain_overflow_dont
, /* complain_on_overflow */
1197 bfd_elf_generic_reloc
, /* special_function */
1198 "R_AARCH64_TLSLE_ADD_TPREL_LO12_NC", /* name */
1199 FALSE
, /* partial_inplace */
1200 0xfff, /* src_mask */
1201 0xfff, /* dst_mask */
1202 FALSE
), /* pcrel_offset */
1205 static reloc_howto_type elf64_aarch64_tlsdesc_howto_table
[] =
1207 HOWTO (R_AARCH64_TLSDESC_LD64_PREL19
, /* type */
1209 2, /* size (0 = byte, 1 = short, 2 = long) */
1211 TRUE
, /* pc_relative */
1213 complain_overflow_dont
, /* complain_on_overflow */
1214 bfd_elf_generic_reloc
, /* special_function */
1215 "R_AARCH64_TLSDESC_LD64_PREL19", /* name */
1216 FALSE
, /* partial_inplace */
1217 0x1ffffc, /* src_mask */
1218 0x1ffffc, /* dst_mask */
1219 TRUE
), /* pcrel_offset */
1221 HOWTO (R_AARCH64_TLSDESC_ADR_PREL21
, /* type */
1223 2, /* size (0 = byte, 1 = short, 2 = long) */
1225 TRUE
, /* pc_relative */
1227 complain_overflow_dont
, /* complain_on_overflow */
1228 bfd_elf_generic_reloc
, /* special_function */
1229 "R_AARCH64_TLSDESC_ADR_PREL21", /* name */
1230 FALSE
, /* partial_inplace */
1231 0x1fffff, /* src_mask */
1232 0x1fffff, /* dst_mask */
1233 TRUE
), /* pcrel_offset */
1235 /* Get to the page for the GOT entry for the symbol
1236 (G(S) - P) using an ADRP instruction. */
1237 HOWTO (R_AARCH64_TLSDESC_ADR_PAGE
, /* type */
1238 12, /* rightshift */
1239 2, /* size (0 = byte, 1 = short, 2 = long) */
1241 TRUE
, /* pc_relative */
1243 complain_overflow_dont
, /* complain_on_overflow */
1244 bfd_elf_generic_reloc
, /* special_function */
1245 "R_AARCH64_TLSDESC_ADR_PAGE", /* name */
1246 FALSE
, /* partial_inplace */
1247 0x1fffff, /* src_mask */
1248 0x1fffff, /* dst_mask */
1249 TRUE
), /* pcrel_offset */
1251 /* LD64: GOT offset G(S) & 0xfff. */
1252 HOWTO (R_AARCH64_TLSDESC_LD64_LO12_NC
, /* type */
1254 2, /* size (0 = byte, 1 = short, 2 = long) */
1256 FALSE
, /* pc_relative */
1258 complain_overflow_dont
, /* complain_on_overflow */
1259 bfd_elf_generic_reloc
, /* special_function */
1260 "R_AARCH64_TLSDESC_LD64_LO12_NC", /* name */
1261 FALSE
, /* partial_inplace */
1262 0xfff, /* src_mask */
1263 0xfff, /* dst_mask */
1264 FALSE
), /* pcrel_offset */
1266 /* ADD: GOT offset G(S) & 0xfff. */
1267 HOWTO (R_AARCH64_TLSDESC_ADD_LO12_NC
, /* type */
1269 2, /* size (0 = byte, 1 = short, 2 = long) */
1271 FALSE
, /* pc_relative */
1273 complain_overflow_dont
, /* complain_on_overflow */
1274 bfd_elf_generic_reloc
, /* special_function */
1275 "R_AARCH64_TLSDESC_ADD_LO12_NC", /* name */
1276 FALSE
, /* partial_inplace */
1277 0xfff, /* src_mask */
1278 0xfff, /* dst_mask */
1279 FALSE
), /* pcrel_offset */
1281 HOWTO (R_AARCH64_TLSDESC_OFF_G1
, /* type */
1282 16, /* rightshift */
1283 2, /* size (0 = byte, 1 = short, 2 = long) */
1285 FALSE
, /* pc_relative */
1287 complain_overflow_dont
, /* complain_on_overflow */
1288 bfd_elf_generic_reloc
, /* special_function */
1289 "R_AARCH64_TLSDESC_OFF_G1", /* name */
1290 FALSE
, /* partial_inplace */
1291 0xffff, /* src_mask */
1292 0xffff, /* dst_mask */
1293 FALSE
), /* pcrel_offset */
1295 HOWTO (R_AARCH64_TLSDESC_OFF_G0_NC
, /* type */
1297 2, /* size (0 = byte, 1 = short, 2 = long) */
1299 FALSE
, /* pc_relative */
1301 complain_overflow_dont
, /* complain_on_overflow */
1302 bfd_elf_generic_reloc
, /* special_function */
1303 "R_AARCH64_TLSDESC_OFF_G0_NC", /* name */
1304 FALSE
, /* partial_inplace */
1305 0xffff, /* src_mask */
1306 0xffff, /* dst_mask */
1307 FALSE
), /* pcrel_offset */
1309 HOWTO (R_AARCH64_TLSDESC_LDR
, /* type */
1311 2, /* size (0 = byte, 1 = short, 2 = long) */
1313 FALSE
, /* pc_relative */
1315 complain_overflow_dont
, /* complain_on_overflow */
1316 bfd_elf_generic_reloc
, /* special_function */
1317 "R_AARCH64_TLSDESC_LDR", /* name */
1318 FALSE
, /* partial_inplace */
1321 FALSE
), /* pcrel_offset */
1323 HOWTO (R_AARCH64_TLSDESC_ADD
, /* type */
1325 2, /* size (0 = byte, 1 = short, 2 = long) */
1327 FALSE
, /* pc_relative */
1329 complain_overflow_dont
, /* complain_on_overflow */
1330 bfd_elf_generic_reloc
, /* special_function */
1331 "R_AARCH64_TLSDESC_ADD", /* name */
1332 FALSE
, /* partial_inplace */
1335 FALSE
), /* pcrel_offset */
1337 HOWTO (R_AARCH64_TLSDESC_CALL
, /* type */
1339 2, /* size (0 = byte, 1 = short, 2 = long) */
1341 FALSE
, /* pc_relative */
1343 complain_overflow_dont
, /* complain_on_overflow */
1344 bfd_elf_generic_reloc
, /* special_function */
1345 "R_AARCH64_TLSDESC_CALL", /* name */
1346 FALSE
, /* partial_inplace */
1349 FALSE
), /* pcrel_offset */
1352 static reloc_howto_type
*
1353 elf64_aarch64_howto_from_type (unsigned int r_type
)
1355 if (r_type
>= R_AARCH64_static_min
&& r_type
< R_AARCH64_static_max
)
1356 return &elf64_aarch64_howto_table
[r_type
- R_AARCH64_static_min
];
1358 if (r_type
>= R_AARCH64_tls_min
&& r_type
< R_AARCH64_tls_max
)
1359 return &elf64_aarch64_tls_howto_table
[r_type
- R_AARCH64_tls_min
];
1361 if (r_type
>= R_AARCH64_tlsdesc_min
&& r_type
< R_AARCH64_tlsdesc_max
)
1362 return &elf64_aarch64_tlsdesc_howto_table
[r_type
- R_AARCH64_tlsdesc_min
];
1364 if (r_type
>= R_AARCH64_dyn_min
&& r_type
< R_AARCH64_dyn_max
)
1365 return &elf64_aarch64_howto_dynrelocs
[r_type
- R_AARCH64_dyn_min
];
1369 case R_AARCH64_NONE
:
1370 return &elf64_aarch64_howto_none
;
1373 bfd_set_error (bfd_error_bad_value
);
1378 elf64_aarch64_info_to_howto (bfd
*abfd ATTRIBUTE_UNUSED
, arelent
*bfd_reloc
,
1379 Elf_Internal_Rela
*elf_reloc
)
1381 unsigned int r_type
;
1383 r_type
= ELF64_R_TYPE (elf_reloc
->r_info
);
1384 bfd_reloc
->howto
= elf64_aarch64_howto_from_type (r_type
);
1387 struct elf64_aarch64_reloc_map
1389 bfd_reloc_code_real_type bfd_reloc_val
;
1390 unsigned int elf_reloc_val
;
1393 /* All entries in this list must also be present in
1394 elf64_aarch64_howto_table. */
1395 static const struct elf64_aarch64_reloc_map elf64_aarch64_reloc_map
[] =
1397 {BFD_RELOC_NONE
, R_AARCH64_NONE
},
1399 /* Basic data relocations. */
1400 {BFD_RELOC_CTOR
, R_AARCH64_ABS64
},
1401 {BFD_RELOC_64
, R_AARCH64_ABS64
},
1402 {BFD_RELOC_32
, R_AARCH64_ABS32
},
1403 {BFD_RELOC_16
, R_AARCH64_ABS16
},
1404 {BFD_RELOC_64_PCREL
, R_AARCH64_PREL64
},
1405 {BFD_RELOC_32_PCREL
, R_AARCH64_PREL32
},
1406 {BFD_RELOC_16_PCREL
, R_AARCH64_PREL16
},
1408 /* Group relocations to low order bits of a 16, 32, 48 or 64 bit
1410 {BFD_RELOC_AARCH64_MOVW_G0_NC
, R_AARCH64_MOVW_UABS_G0_NC
},
1411 {BFD_RELOC_AARCH64_MOVW_G1_NC
, R_AARCH64_MOVW_UABS_G1_NC
},
1412 {BFD_RELOC_AARCH64_MOVW_G2_NC
, R_AARCH64_MOVW_UABS_G2_NC
},
1414 /* Group relocations to create high bits of a 16, 32, 48 or 64 bit
1415 signed value inline. */
1416 {BFD_RELOC_AARCH64_MOVW_G0_S
, R_AARCH64_MOVW_SABS_G0
},
1417 {BFD_RELOC_AARCH64_MOVW_G1_S
, R_AARCH64_MOVW_SABS_G1
},
1418 {BFD_RELOC_AARCH64_MOVW_G2_S
, R_AARCH64_MOVW_SABS_G2
},
1420 /* Group relocations to create high bits of a 16, 32, 48 or 64 bit
1421 unsigned value inline. */
1422 {BFD_RELOC_AARCH64_MOVW_G0
, R_AARCH64_MOVW_UABS_G0
},
1423 {BFD_RELOC_AARCH64_MOVW_G1
, R_AARCH64_MOVW_UABS_G1
},
1424 {BFD_RELOC_AARCH64_MOVW_G2
, R_AARCH64_MOVW_UABS_G2
},
1425 {BFD_RELOC_AARCH64_MOVW_G3
, R_AARCH64_MOVW_UABS_G3
},
1427 /* Relocations to generate 19, 21 and 33 bit PC-relative load/store. */
1428 {BFD_RELOC_AARCH64_LD_LO19_PCREL
, R_AARCH64_LD_PREL_LO19
},
1429 {BFD_RELOC_AARCH64_ADR_LO21_PCREL
, R_AARCH64_ADR_PREL_LO21
},
1430 {BFD_RELOC_AARCH64_ADR_HI21_PCREL
, R_AARCH64_ADR_PREL_PG_HI21
},
1431 {BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL
, R_AARCH64_ADR_PREL_PG_HI21_NC
},
1432 {BFD_RELOC_AARCH64_ADD_LO12
, R_AARCH64_ADD_ABS_LO12_NC
},
1433 {BFD_RELOC_AARCH64_LDST8_LO12
, R_AARCH64_LDST8_ABS_LO12_NC
},
1434 {BFD_RELOC_AARCH64_LDST16_LO12
, R_AARCH64_LDST16_ABS_LO12_NC
},
1435 {BFD_RELOC_AARCH64_LDST32_LO12
, R_AARCH64_LDST32_ABS_LO12_NC
},
1436 {BFD_RELOC_AARCH64_LDST64_LO12
, R_AARCH64_LDST64_ABS_LO12_NC
},
1437 {BFD_RELOC_AARCH64_LDST128_LO12
, R_AARCH64_LDST128_ABS_LO12_NC
},
1439 /* Relocations for control-flow instructions. */
1440 {BFD_RELOC_AARCH64_TSTBR14
, R_AARCH64_TSTBR14
},
1441 {BFD_RELOC_AARCH64_BRANCH19
, R_AARCH64_CONDBR19
},
1442 {BFD_RELOC_AARCH64_JUMP26
, R_AARCH64_JUMP26
},
1443 {BFD_RELOC_AARCH64_CALL26
, R_AARCH64_CALL26
},
1445 /* Relocations for PIC. */
1446 {BFD_RELOC_AARCH64_GOT_LD_PREL19
, R_AARCH64_GOT_LD_PREL19
},
1447 {BFD_RELOC_AARCH64_ADR_GOT_PAGE
, R_AARCH64_ADR_GOT_PAGE
},
1448 {BFD_RELOC_AARCH64_LD64_GOT_LO12_NC
, R_AARCH64_LD64_GOT_LO12_NC
},
1450 /* Relocations for TLS. */
1451 {BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21
, R_AARCH64_TLSGD_ADR_PAGE21
},
1452 {BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC
, R_AARCH64_TLSGD_ADD_LO12_NC
},
1453 {BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1
,
1454 R_AARCH64_TLSIE_MOVW_GOTTPREL_G1
},
1455 {BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC
,
1456 R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC
},
1457 {BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21
,
1458 R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21
},
1459 {BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC
,
1460 R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC
},
1461 {BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19
,
1462 R_AARCH64_TLSIE_LD_GOTTPREL_PREL19
},
1463 {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2
, R_AARCH64_TLSLE_MOVW_TPREL_G2
},
1464 {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1
, R_AARCH64_TLSLE_MOVW_TPREL_G1
},
1465 {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC
,
1466 R_AARCH64_TLSLE_MOVW_TPREL_G1_NC
},
1467 {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0
, R_AARCH64_TLSLE_MOVW_TPREL_G0
},
1468 {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
,
1469 R_AARCH64_TLSLE_MOVW_TPREL_G0_NC
},
1470 {BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12
, R_AARCH64_TLSLE_ADD_TPREL_LO12
},
1471 {BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12
, R_AARCH64_TLSLE_ADD_TPREL_HI12
},
1472 {BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC
,
1473 R_AARCH64_TLSLE_ADD_TPREL_LO12_NC
},
1474 {BFD_RELOC_AARCH64_TLSDESC_LD64_PREL19
, R_AARCH64_TLSDESC_LD64_PREL19
},
1475 {BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21
, R_AARCH64_TLSDESC_ADR_PREL21
},
1476 {BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE
, R_AARCH64_TLSDESC_ADR_PAGE
},
1477 {BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC
, R_AARCH64_TLSDESC_ADD_LO12_NC
},
1478 {BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC
, R_AARCH64_TLSDESC_LD64_LO12_NC
},
1479 {BFD_RELOC_AARCH64_TLSDESC_OFF_G1
, R_AARCH64_TLSDESC_OFF_G1
},
1480 {BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC
, R_AARCH64_TLSDESC_OFF_G0_NC
},
1481 {BFD_RELOC_AARCH64_TLSDESC_LDR
, R_AARCH64_TLSDESC_LDR
},
1482 {BFD_RELOC_AARCH64_TLSDESC_ADD
, R_AARCH64_TLSDESC_ADD
},
1483 {BFD_RELOC_AARCH64_TLSDESC_CALL
, R_AARCH64_TLSDESC_CALL
},
1484 {BFD_RELOC_AARCH64_TLS_DTPMOD64
, R_AARCH64_TLS_DTPMOD64
},
1485 {BFD_RELOC_AARCH64_TLS_DTPREL64
, R_AARCH64_TLS_DTPREL64
},
1486 {BFD_RELOC_AARCH64_TLS_TPREL64
, R_AARCH64_TLS_TPREL64
},
1487 {BFD_RELOC_AARCH64_TLSDESC
, R_AARCH64_TLSDESC
},
1490 static reloc_howto_type
*
1491 elf64_aarch64_reloc_type_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
1492 bfd_reloc_code_real_type code
)
1496 for (i
= 0; i
< ARRAY_SIZE (elf64_aarch64_reloc_map
); i
++)
1497 if (elf64_aarch64_reloc_map
[i
].bfd_reloc_val
== code
)
1498 return elf64_aarch64_howto_from_type
1499 (elf64_aarch64_reloc_map
[i
].elf_reloc_val
);
1501 bfd_set_error (bfd_error_bad_value
);
1505 static reloc_howto_type
*
1506 elf64_aarch64_reloc_name_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
1511 for (i
= 0; i
< ARRAY_SIZE (elf64_aarch64_howto_table
); i
++)
1512 if (elf64_aarch64_howto_table
[i
].name
!= NULL
1513 && strcasecmp (elf64_aarch64_howto_table
[i
].name
, r_name
) == 0)
1514 return &elf64_aarch64_howto_table
[i
];
1519 /* Support for core dump NOTE sections. */
1522 elf64_aarch64_grok_prstatus (bfd
*abfd
, Elf_Internal_Note
*note
)
1527 switch (note
->descsz
)
1532 case 408: /* sizeof(struct elf_prstatus) on Linux/arm64. */
1534 elf_tdata (abfd
)->core_signal
1535 = bfd_get_16 (abfd
, note
->descdata
+ 12);
1538 elf_tdata (abfd
)->core_lwpid
1539 = bfd_get_32 (abfd
, note
->descdata
+ 32);
1548 /* Make a ".reg/999" section. */
1549 return _bfd_elfcore_make_pseudosection (abfd
, ".reg",
1550 size
, note
->descpos
+ offset
);
1553 #define TARGET_LITTLE_SYM bfd_elf64_littleaarch64_vec
1554 #define TARGET_LITTLE_NAME "elf64-littleaarch64"
1555 #define TARGET_BIG_SYM bfd_elf64_bigaarch64_vec
1556 #define TARGET_BIG_NAME "elf64-bigaarch64"
1558 #define elf_backend_grok_prstatus elf64_aarch64_grok_prstatus
1560 typedef unsigned long int insn32
;
1562 /* The linker script knows the section names for placement.
1563 The entry_names are used to do simple name mangling on the stubs.
1564 Given a function name, and its type, the stub can be found. The
1565 name can be changed. The only requirement is the %s be present. */
1566 #define STUB_ENTRY_NAME "__%s_veneer"
1568 /* The name of the dynamic interpreter. This is put in the .interp
1570 #define ELF_DYNAMIC_INTERPRETER "/lib/ld.so.1"
1572 #define AARCH64_MAX_FWD_BRANCH_OFFSET \
1573 (((1 << 25) - 1) << 2)
1574 #define AARCH64_MAX_BWD_BRANCH_OFFSET \
1577 #define AARCH64_MAX_ADRP_IMM ((1 << 20) - 1)
1578 #define AARCH64_MIN_ADRP_IMM (-(1 << 20))
1581 aarch64_valid_for_adrp_p (bfd_vma value
, bfd_vma place
)
1583 bfd_signed_vma offset
= (bfd_signed_vma
) (PG (value
) - PG (place
)) >> 12;
1584 return offset
<= AARCH64_MAX_ADRP_IMM
&& offset
>= AARCH64_MIN_ADRP_IMM
;
1588 aarch64_valid_branch_p (bfd_vma value
, bfd_vma place
)
1590 bfd_signed_vma offset
= (bfd_signed_vma
) (value
- place
);
1591 return (offset
<= AARCH64_MAX_FWD_BRANCH_OFFSET
1592 && offset
>= AARCH64_MAX_BWD_BRANCH_OFFSET
);
1595 static const uint32_t aarch64_adrp_branch_stub
[] =
1597 0x90000010, /* adrp ip0, X */
1598 /* R_AARCH64_ADR_HI21_PCREL(X) */
1599 0x91000210, /* add ip0, ip0, :lo12:X */
1600 /* R_AARCH64_ADD_ABS_LO12_NC(X) */
1601 0xd61f0200, /* br ip0 */
1604 static const uint32_t aarch64_long_branch_stub
[] =
1606 0x58000090, /* ldr ip0, 1f */
1607 0x10000011, /* adr ip1, #0 */
1608 0x8b110210, /* add ip0, ip0, ip1 */
1609 0xd61f0200, /* br ip0 */
1610 0x00000000, /* 1: .xword
1611 R_AARCH64_PREL64(X) + 12
1616 /* Section name for stubs is the associated section name plus this
1618 #define STUB_SUFFIX ".stub"
1620 enum elf64_aarch64_stub_type
1623 aarch64_stub_adrp_branch
,
1624 aarch64_stub_long_branch
,
1627 struct elf64_aarch64_stub_hash_entry
1629 /* Base hash table entry structure. */
1630 struct bfd_hash_entry root
;
1632 /* The stub section. */
1635 /* Offset within stub_sec of the beginning of this stub. */
1636 bfd_vma stub_offset
;
1638 /* Given the symbol's value and its section we can determine its final
1639 value when building the stubs (so the stub knows where to jump). */
1640 bfd_vma target_value
;
1641 asection
*target_section
;
1643 enum elf64_aarch64_stub_type stub_type
;
1645 /* The symbol table entry, if any, that this was derived from. */
1646 struct elf64_aarch64_link_hash_entry
*h
;
1648 /* Destination symbol type */
1649 unsigned char st_type
;
1651 /* Where this stub is being called from, or, in the case of combined
1652 stub sections, the first input section in the group. */
1655 /* The name for the local symbol at the start of this stub. The
1656 stub name in the hash table has to be unique; this does not, so
1657 it can be friendlier. */
1661 /* Used to build a map of a section. This is required for mixed-endian
1664 typedef struct elf64_elf_section_map
1669 elf64_aarch64_section_map
;
1672 typedef struct _aarch64_elf_section_data
1674 struct bfd_elf_section_data elf
;
1675 unsigned int mapcount
;
1676 unsigned int mapsize
;
1677 elf64_aarch64_section_map
*map
;
1679 _aarch64_elf_section_data
;
1681 #define elf64_aarch64_section_data(sec) \
1682 ((_aarch64_elf_section_data *) elf_section_data (sec))
1684 /* The size of the thread control block. */
1687 struct elf_aarch64_local_symbol
1689 unsigned int got_type
;
1690 bfd_signed_vma got_refcount
;
1693 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The
1694 offset is from the end of the jump table and reserved entries
1697 The magic value (bfd_vma) -1 indicates that an offset has not be
1699 bfd_vma tlsdesc_got_jump_table_offset
;
1702 struct elf_aarch64_obj_tdata
1704 struct elf_obj_tdata root
;
1706 /* local symbol descriptors */
1707 struct elf_aarch64_local_symbol
*locals
;
1709 /* Zero to warn when linking objects with incompatible enum sizes. */
1710 int no_enum_size_warning
;
1712 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
1713 int no_wchar_size_warning
;
1716 #define elf_aarch64_tdata(bfd) \
1717 ((struct elf_aarch64_obj_tdata *) (bfd)->tdata.any)
1719 #define elf64_aarch64_locals(bfd) (elf_aarch64_tdata (bfd)->locals)
1721 #define is_aarch64_elf(bfd) \
1722 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
1723 && elf_tdata (bfd) != NULL \
1724 && elf_object_id (bfd) == AARCH64_ELF_DATA)
1727 elf64_aarch64_mkobject (bfd
*abfd
)
1729 return bfd_elf_allocate_object (abfd
, sizeof (struct elf_aarch64_obj_tdata
),
1733 /* The AArch64 linker needs to keep track of the number of relocs that it
1734 decides to copy in check_relocs for each symbol. This is so that
1735 it can discard PC relative relocs if it doesn't need them when
1736 linking with -Bsymbolic. We store the information in a field
1737 extending the regular ELF linker hash table. */
1739 /* This structure keeps track of the number of relocs we have copied
1740 for a given symbol. */
1741 struct elf64_aarch64_relocs_copied
1744 struct elf64_aarch64_relocs_copied
*next
;
1745 /* A section in dynobj. */
1747 /* Number of relocs copied in this section. */
1748 bfd_size_type count
;
1749 /* Number of PC-relative relocs copied in this section. */
1750 bfd_size_type pc_count
;
1753 #define elf64_aarch64_hash_entry(ent) \
1754 ((struct elf64_aarch64_link_hash_entry *)(ent))
1756 #define GOT_UNKNOWN 0
1757 #define GOT_NORMAL 1
1758 #define GOT_TLS_GD 2
1759 #define GOT_TLS_IE 4
1760 #define GOT_TLSDESC_GD 8
1762 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLSDESC_GD))
1764 /* AArch64 ELF linker hash entry. */
1765 struct elf64_aarch64_link_hash_entry
1767 struct elf_link_hash_entry root
;
1769 /* Track dynamic relocs copied for this symbol. */
1770 struct elf_dyn_relocs
*dyn_relocs
;
1772 /* Number of PC relative relocs copied for this symbol. */
1773 struct elf64_aarch64_relocs_copied
*relocs_copied
;
1775 /* Since PLT entries have variable size, we need to record the
1776 index into .got.plt instead of recomputing it from the PLT
1778 bfd_signed_vma plt_got_offset
;
1780 /* Bit mask representing the type of GOT entry(s) if any required by
1782 unsigned int got_type
;
1784 /* A pointer to the most recently used stub hash entry against this
1786 struct elf64_aarch64_stub_hash_entry
*stub_cache
;
1788 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The offset
1789 is from the end of the jump table and reserved entries within the PLTGOT.
1791 The magic value (bfd_vma) -1 indicates that an offset has not
1793 bfd_vma tlsdesc_got_jump_table_offset
;
1797 elf64_aarch64_symbol_got_type (struct elf_link_hash_entry
*h
,
1799 unsigned long r_symndx
)
1802 return elf64_aarch64_hash_entry (h
)->got_type
;
1804 if (! elf64_aarch64_locals (abfd
))
1807 return elf64_aarch64_locals (abfd
)[r_symndx
].got_type
;
1810 /* Traverse an AArch64 ELF linker hash table. */
1811 #define elf64_aarch64_link_hash_traverse(table, func, info) \
1812 (elf_link_hash_traverse \
1814 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
1817 /* Get the AArch64 elf linker hash table from a link_info structure. */
1818 #define elf64_aarch64_hash_table(info) \
1819 ((struct elf64_aarch64_link_hash_table *) ((info)->hash))
1821 #define aarch64_stub_hash_lookup(table, string, create, copy) \
1822 ((struct elf64_aarch64_stub_hash_entry *) \
1823 bfd_hash_lookup ((table), (string), (create), (copy)))
1825 /* AArch64 ELF linker hash table. */
1826 struct elf64_aarch64_link_hash_table
1828 /* The main hash table. */
1829 struct elf_link_hash_table root
;
1831 /* Nonzero to force PIC branch veneers. */
1834 /* The number of bytes in the initial entry in the PLT. */
1835 bfd_size_type plt_header_size
;
1837 /* The number of bytes in the subsequent PLT etries. */
1838 bfd_size_type plt_entry_size
;
1840 /* Short-cuts to get to dynamic linker sections. */
1844 /* Small local sym cache. */
1845 struct sym_cache sym_cache
;
1847 /* For convenience in allocate_dynrelocs. */
1850 /* The amount of space used by the reserved portion of the sgotplt
1851 section, plus whatever space is used by the jump slots. */
1852 bfd_vma sgotplt_jump_table_size
;
1854 /* The stub hash table. */
1855 struct bfd_hash_table stub_hash_table
;
1857 /* Linker stub bfd. */
1860 /* Linker call-backs. */
1861 asection
*(*add_stub_section
) (const char *, asection
*);
1862 void (*layout_sections_again
) (void);
1864 /* Array to keep track of which stub sections have been created, and
1865 information on stub grouping. */
1868 /* This is the section to which stubs in the group will be
1871 /* The stub section. */
1875 /* Assorted information used by elf64_aarch64_size_stubs. */
1876 unsigned int bfd_count
;
1878 asection
**input_list
;
1880 /* The offset into splt of the PLT entry for the TLS descriptor
1881 resolver. Special values are 0, if not necessary (or not found
1882 to be necessary yet), and -1 if needed but not determined
1884 bfd_vma tlsdesc_plt
;
1886 /* The GOT offset for the lazy trampoline. Communicated to the
1887 loader via DT_TLSDESC_GOT. The magic value (bfd_vma) -1
1888 indicates an offset is not allocated. */
1889 bfd_vma dt_tlsdesc_got
;
1893 /* Return non-zero if the indicated VALUE has overflowed the maximum
1894 range expressible by a unsigned number with the indicated number of
1897 static bfd_reloc_status_type
1898 aarch64_unsigned_overflow (bfd_vma value
, unsigned int bits
)
1901 if (bits
>= sizeof (bfd_vma
) * 8)
1902 return bfd_reloc_ok
;
1903 lim
= (bfd_vma
) 1 << bits
;
1905 return bfd_reloc_overflow
;
1906 return bfd_reloc_ok
;
1910 /* Return non-zero if the indicated VALUE has overflowed the maximum
1911 range expressible by an signed number with the indicated number of
1914 static bfd_reloc_status_type
1915 aarch64_signed_overflow (bfd_vma value
, unsigned int bits
)
1917 bfd_signed_vma svalue
= (bfd_signed_vma
) value
;
1920 if (bits
>= sizeof (bfd_vma
) * 8)
1921 return bfd_reloc_ok
;
1922 lim
= (bfd_signed_vma
) 1 << (bits
- 1);
1923 if (svalue
< -lim
|| svalue
>= lim
)
1924 return bfd_reloc_overflow
;
1925 return bfd_reloc_ok
;
1928 /* Create an entry in an AArch64 ELF linker hash table. */
1930 static struct bfd_hash_entry
*
1931 elf64_aarch64_link_hash_newfunc (struct bfd_hash_entry
*entry
,
1932 struct bfd_hash_table
*table
,
1935 struct elf64_aarch64_link_hash_entry
*ret
=
1936 (struct elf64_aarch64_link_hash_entry
*) entry
;
1938 /* Allocate the structure if it has not already been allocated by a
1941 ret
= bfd_hash_allocate (table
,
1942 sizeof (struct elf64_aarch64_link_hash_entry
));
1944 return (struct bfd_hash_entry
*) ret
;
1946 /* Call the allocation method of the superclass. */
1947 ret
= ((struct elf64_aarch64_link_hash_entry
*)
1948 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry
*) ret
,
1952 ret
->dyn_relocs
= NULL
;
1953 ret
->relocs_copied
= NULL
;
1954 ret
->got_type
= GOT_UNKNOWN
;
1955 ret
->plt_got_offset
= (bfd_vma
) - 1;
1956 ret
->stub_cache
= NULL
;
1957 ret
->tlsdesc_got_jump_table_offset
= (bfd_vma
) - 1;
1960 return (struct bfd_hash_entry
*) ret
;
1963 /* Initialize an entry in the stub hash table. */
1965 static struct bfd_hash_entry
*
1966 stub_hash_newfunc (struct bfd_hash_entry
*entry
,
1967 struct bfd_hash_table
*table
, const char *string
)
1969 /* Allocate the structure if it has not already been allocated by a
1973 entry
= bfd_hash_allocate (table
,
1975 elf64_aarch64_stub_hash_entry
));
1980 /* Call the allocation method of the superclass. */
1981 entry
= bfd_hash_newfunc (entry
, table
, string
);
1984 struct elf64_aarch64_stub_hash_entry
*eh
;
1986 /* Initialize the local fields. */
1987 eh
= (struct elf64_aarch64_stub_hash_entry
*) entry
;
1988 eh
->stub_sec
= NULL
;
1989 eh
->stub_offset
= 0;
1990 eh
->target_value
= 0;
1991 eh
->target_section
= NULL
;
1992 eh
->stub_type
= aarch64_stub_none
;
2001 /* Copy the extra info we tack onto an elf_link_hash_entry. */
2004 elf64_aarch64_copy_indirect_symbol (struct bfd_link_info
*info
,
2005 struct elf_link_hash_entry
*dir
,
2006 struct elf_link_hash_entry
*ind
)
2008 struct elf64_aarch64_link_hash_entry
*edir
, *eind
;
2010 edir
= (struct elf64_aarch64_link_hash_entry
*) dir
;
2011 eind
= (struct elf64_aarch64_link_hash_entry
*) ind
;
2013 if (eind
->dyn_relocs
!= NULL
)
2015 if (edir
->dyn_relocs
!= NULL
)
2017 struct elf_dyn_relocs
**pp
;
2018 struct elf_dyn_relocs
*p
;
2020 /* Add reloc counts against the indirect sym to the direct sym
2021 list. Merge any entries against the same section. */
2022 for (pp
= &eind
->dyn_relocs
; (p
= *pp
) != NULL
;)
2024 struct elf_dyn_relocs
*q
;
2026 for (q
= edir
->dyn_relocs
; q
!= NULL
; q
= q
->next
)
2027 if (q
->sec
== p
->sec
)
2029 q
->pc_count
+= p
->pc_count
;
2030 q
->count
+= p
->count
;
2037 *pp
= edir
->dyn_relocs
;
2040 edir
->dyn_relocs
= eind
->dyn_relocs
;
2041 eind
->dyn_relocs
= NULL
;
2044 if (eind
->relocs_copied
!= NULL
)
2046 if (edir
->relocs_copied
!= NULL
)
2048 struct elf64_aarch64_relocs_copied
**pp
;
2049 struct elf64_aarch64_relocs_copied
*p
;
2051 /* Add reloc counts against the indirect sym to the direct sym
2052 list. Merge any entries against the same section. */
2053 for (pp
= &eind
->relocs_copied
; (p
= *pp
) != NULL
;)
2055 struct elf64_aarch64_relocs_copied
*q
;
2057 for (q
= edir
->relocs_copied
; q
!= NULL
; q
= q
->next
)
2058 if (q
->section
== p
->section
)
2060 q
->pc_count
+= p
->pc_count
;
2061 q
->count
+= p
->count
;
2068 *pp
= edir
->relocs_copied
;
2071 edir
->relocs_copied
= eind
->relocs_copied
;
2072 eind
->relocs_copied
= NULL
;
2075 if (ind
->root
.type
== bfd_link_hash_indirect
)
2077 /* Copy over PLT info. */
2078 if (dir
->got
.refcount
<= 0)
2080 edir
->got_type
= eind
->got_type
;
2081 eind
->got_type
= GOT_UNKNOWN
;
2085 _bfd_elf_link_hash_copy_indirect (info
, dir
, ind
);
2088 /* Create an AArch64 elf linker hash table. */
2090 static struct bfd_link_hash_table
*
2091 elf64_aarch64_link_hash_table_create (bfd
*abfd
)
2093 struct elf64_aarch64_link_hash_table
*ret
;
2094 bfd_size_type amt
= sizeof (struct elf64_aarch64_link_hash_table
);
2096 ret
= bfd_malloc (amt
);
2100 if (!_bfd_elf_link_hash_table_init
2101 (&ret
->root
, abfd
, elf64_aarch64_link_hash_newfunc
,
2102 sizeof (struct elf64_aarch64_link_hash_entry
), AARCH64_ELF_DATA
))
2108 ret
->sdynbss
= NULL
;
2109 ret
->srelbss
= NULL
;
2111 ret
->plt_header_size
= PLT_ENTRY_SIZE
;
2112 ret
->plt_entry_size
= PLT_SMALL_ENTRY_SIZE
;
2114 ret
->sym_cache
.abfd
= NULL
;
2117 ret
->stub_bfd
= NULL
;
2118 ret
->add_stub_section
= NULL
;
2119 ret
->layout_sections_again
= NULL
;
2120 ret
->stub_group
= NULL
;
2123 ret
->input_list
= NULL
;
2124 ret
->tlsdesc_plt
= 0;
2125 ret
->dt_tlsdesc_got
= (bfd_vma
) - 1;
2127 if (!bfd_hash_table_init (&ret
->stub_hash_table
, stub_hash_newfunc
,
2128 sizeof (struct elf64_aarch64_stub_hash_entry
)))
2134 return &ret
->root
.root
;
2137 /* Free the derived linker hash table. */
2140 elf64_aarch64_hash_table_free (struct bfd_link_hash_table
*hash
)
2142 struct elf64_aarch64_link_hash_table
*ret
2143 = (struct elf64_aarch64_link_hash_table
*) hash
;
2145 bfd_hash_table_free (&ret
->stub_hash_table
);
2146 _bfd_generic_link_hash_table_free (hash
);
2150 aarch64_resolve_relocation (unsigned int r_type
, bfd_vma place
, bfd_vma value
,
2151 bfd_vma addend
, bfd_boolean weak_undef_p
)
2155 case R_AARCH64_TLSDESC_CALL
:
2156 case R_AARCH64_NONE
:
2157 case R_AARCH64_NULL
:
2160 case R_AARCH64_ADR_PREL_LO21
:
2161 case R_AARCH64_CONDBR19
:
2162 case R_AARCH64_LD_PREL_LO19
:
2163 case R_AARCH64_PREL16
:
2164 case R_AARCH64_PREL32
:
2165 case R_AARCH64_PREL64
:
2166 case R_AARCH64_TSTBR14
:
2169 value
= value
+ addend
- place
;
2172 case R_AARCH64_CALL26
:
2173 case R_AARCH64_JUMP26
:
2174 value
= value
+ addend
- place
;
2177 case R_AARCH64_ABS16
:
2178 case R_AARCH64_ABS32
:
2179 case R_AARCH64_MOVW_SABS_G0
:
2180 case R_AARCH64_MOVW_SABS_G1
:
2181 case R_AARCH64_MOVW_SABS_G2
:
2182 case R_AARCH64_MOVW_UABS_G0
:
2183 case R_AARCH64_MOVW_UABS_G0_NC
:
2184 case R_AARCH64_MOVW_UABS_G1
:
2185 case R_AARCH64_MOVW_UABS_G1_NC
:
2186 case R_AARCH64_MOVW_UABS_G2
:
2187 case R_AARCH64_MOVW_UABS_G2_NC
:
2188 case R_AARCH64_MOVW_UABS_G3
:
2189 value
= value
+ addend
;
2192 case R_AARCH64_ADR_PREL_PG_HI21
:
2193 case R_AARCH64_ADR_PREL_PG_HI21_NC
:
2196 value
= PG (value
+ addend
) - PG (place
);
2199 case R_AARCH64_GOT_LD_PREL19
:
2200 value
= value
+ addend
- place
;
2203 case R_AARCH64_ADR_GOT_PAGE
:
2204 case R_AARCH64_TLSDESC_ADR_PAGE
:
2205 case R_AARCH64_TLSGD_ADR_PAGE21
:
2206 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21
:
2207 value
= PG (value
+ addend
) - PG (place
);
2210 case R_AARCH64_ADD_ABS_LO12_NC
:
2211 case R_AARCH64_LD64_GOT_LO12_NC
:
2212 case R_AARCH64_LDST8_ABS_LO12_NC
:
2213 case R_AARCH64_LDST16_ABS_LO12_NC
:
2214 case R_AARCH64_LDST32_ABS_LO12_NC
:
2215 case R_AARCH64_LDST64_ABS_LO12_NC
:
2216 case R_AARCH64_LDST128_ABS_LO12_NC
:
2217 case R_AARCH64_TLSDESC_ADD_LO12_NC
:
2218 case R_AARCH64_TLSDESC_ADD
:
2219 case R_AARCH64_TLSDESC_LD64_LO12_NC
:
2220 case R_AARCH64_TLSDESC_LDR
:
2221 case R_AARCH64_TLSGD_ADD_LO12_NC
:
2222 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC
:
2223 case R_AARCH64_TLSLE_ADD_TPREL_LO12
:
2224 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC
:
2225 value
= PG_OFFSET (value
+ addend
);
2228 case R_AARCH64_TLSLE_MOVW_TPREL_G1
:
2229 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC
:
2230 value
= (value
+ addend
) & (bfd_vma
) 0xffff0000;
2232 case R_AARCH64_TLSLE_ADD_TPREL_HI12
:
2233 value
= (value
+ addend
) & (bfd_vma
) 0xfff000;
2236 case R_AARCH64_TLSLE_MOVW_TPREL_G0
:
2237 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC
:
2238 value
= (value
+ addend
) & (bfd_vma
) 0xffff;
2241 case R_AARCH64_TLSLE_MOVW_TPREL_G2
:
2242 value
= (value
+ addend
) & ~(bfd_vma
) 0xffffffff;
2243 value
-= place
& ~(bfd_vma
) 0xffffffff;
2250 aarch64_relocate (unsigned int r_type
, bfd
*input_bfd
, asection
*input_section
,
2251 bfd_vma offset
, bfd_vma value
)
2253 reloc_howto_type
*howto
;
2256 howto
= elf64_aarch64_howto_from_type (r_type
);
2257 place
= (input_section
->output_section
->vma
+ input_section
->output_offset
2259 value
= aarch64_resolve_relocation (r_type
, place
, value
, 0, FALSE
);
2260 return bfd_elf_aarch64_put_addend (input_bfd
,
2261 input_section
->contents
+ offset
,
2265 static enum elf64_aarch64_stub_type
2266 aarch64_select_branch_stub (bfd_vma value
, bfd_vma place
)
2268 if (aarch64_valid_for_adrp_p (value
, place
))
2269 return aarch64_stub_adrp_branch
;
2270 return aarch64_stub_long_branch
;
2273 /* Determine the type of stub needed, if any, for a call. */
2275 static enum elf64_aarch64_stub_type
2276 aarch64_type_of_stub (struct bfd_link_info
*info
,
2277 asection
*input_sec
,
2278 const Elf_Internal_Rela
*rel
,
2279 unsigned char st_type
,
2280 struct elf64_aarch64_link_hash_entry
*hash
,
2281 bfd_vma destination
)
2284 bfd_signed_vma branch_offset
;
2285 unsigned int r_type
;
2286 struct elf64_aarch64_link_hash_table
*globals
;
2287 enum elf64_aarch64_stub_type stub_type
= aarch64_stub_none
;
2288 bfd_boolean via_plt_p
;
2290 if (st_type
!= STT_FUNC
)
2293 globals
= elf64_aarch64_hash_table (info
);
2294 via_plt_p
= (globals
->root
.splt
!= NULL
&& hash
!= NULL
2295 && hash
->root
.plt
.offset
!= (bfd_vma
) - 1);
2300 /* Determine where the call point is. */
2301 location
= (input_sec
->output_offset
2302 + input_sec
->output_section
->vma
+ rel
->r_offset
);
2304 branch_offset
= (bfd_signed_vma
) (destination
- location
);
2306 r_type
= ELF64_R_TYPE (rel
->r_info
);
2308 /* We don't want to redirect any old unconditional jump in this way,
2309 only one which is being used for a sibcall, where it is
2310 acceptable for the IP0 and IP1 registers to be clobbered. */
2311 if ((r_type
== R_AARCH64_CALL26
|| r_type
== R_AARCH64_JUMP26
)
2312 && (branch_offset
> AARCH64_MAX_FWD_BRANCH_OFFSET
2313 || branch_offset
< AARCH64_MAX_BWD_BRANCH_OFFSET
))
2315 stub_type
= aarch64_stub_long_branch
;
2321 /* Build a name for an entry in the stub hash table. */
2324 elf64_aarch64_stub_name (const asection
*input_section
,
2325 const asection
*sym_sec
,
2326 const struct elf64_aarch64_link_hash_entry
*hash
,
2327 const Elf_Internal_Rela
*rel
)
2334 len
= 8 + 1 + strlen (hash
->root
.root
.root
.string
) + 1 + 16 + 1;
2335 stub_name
= bfd_malloc (len
);
2336 if (stub_name
!= NULL
)
2337 snprintf (stub_name
, len
, "%08x_%s+%" BFD_VMA_FMT
"x",
2338 (unsigned int) input_section
->id
,
2339 hash
->root
.root
.root
.string
,
2344 len
= 8 + 1 + 8 + 1 + 8 + 1 + 16 + 1;
2345 stub_name
= bfd_malloc (len
);
2346 if (stub_name
!= NULL
)
2347 snprintf (stub_name
, len
, "%08x_%x:%x+%" BFD_VMA_FMT
"x",
2348 (unsigned int) input_section
->id
,
2349 (unsigned int) sym_sec
->id
,
2350 (unsigned int) ELF64_R_SYM (rel
->r_info
),
2357 /* Look up an entry in the stub hash. Stub entries are cached because
2358 creating the stub name takes a bit of time. */
2360 static struct elf64_aarch64_stub_hash_entry
*
2361 elf64_aarch64_get_stub_entry (const asection
*input_section
,
2362 const asection
*sym_sec
,
2363 struct elf_link_hash_entry
*hash
,
2364 const Elf_Internal_Rela
*rel
,
2365 struct elf64_aarch64_link_hash_table
*htab
)
2367 struct elf64_aarch64_stub_hash_entry
*stub_entry
;
2368 struct elf64_aarch64_link_hash_entry
*h
=
2369 (struct elf64_aarch64_link_hash_entry
*) hash
;
2370 const asection
*id_sec
;
2372 if ((input_section
->flags
& SEC_CODE
) == 0)
2375 /* If this input section is part of a group of sections sharing one
2376 stub section, then use the id of the first section in the group.
2377 Stub names need to include a section id, as there may well be
2378 more than one stub used to reach say, printf, and we need to
2379 distinguish between them. */
2380 id_sec
= htab
->stub_group
[input_section
->id
].link_sec
;
2382 if (h
!= NULL
&& h
->stub_cache
!= NULL
2383 && h
->stub_cache
->h
== h
&& h
->stub_cache
->id_sec
== id_sec
)
2385 stub_entry
= h
->stub_cache
;
2391 stub_name
= elf64_aarch64_stub_name (id_sec
, sym_sec
, h
, rel
);
2392 if (stub_name
== NULL
)
2395 stub_entry
= aarch64_stub_hash_lookup (&htab
->stub_hash_table
,
2396 stub_name
, FALSE
, FALSE
);
2398 h
->stub_cache
= stub_entry
;
2406 /* Add a new stub entry to the stub hash. Not all fields of the new
2407 stub entry are initialised. */
2409 static struct elf64_aarch64_stub_hash_entry
*
2410 elf64_aarch64_add_stub (const char *stub_name
,
2412 struct elf64_aarch64_link_hash_table
*htab
)
2416 struct elf64_aarch64_stub_hash_entry
*stub_entry
;
2418 link_sec
= htab
->stub_group
[section
->id
].link_sec
;
2419 stub_sec
= htab
->stub_group
[section
->id
].stub_sec
;
2420 if (stub_sec
== NULL
)
2422 stub_sec
= htab
->stub_group
[link_sec
->id
].stub_sec
;
2423 if (stub_sec
== NULL
)
2429 namelen
= strlen (link_sec
->name
);
2430 len
= namelen
+ sizeof (STUB_SUFFIX
);
2431 s_name
= bfd_alloc (htab
->stub_bfd
, len
);
2435 memcpy (s_name
, link_sec
->name
, namelen
);
2436 memcpy (s_name
+ namelen
, STUB_SUFFIX
, sizeof (STUB_SUFFIX
));
2437 stub_sec
= (*htab
->add_stub_section
) (s_name
, link_sec
);
2438 if (stub_sec
== NULL
)
2440 htab
->stub_group
[link_sec
->id
].stub_sec
= stub_sec
;
2442 htab
->stub_group
[section
->id
].stub_sec
= stub_sec
;
2445 /* Enter this entry into the linker stub hash table. */
2446 stub_entry
= aarch64_stub_hash_lookup (&htab
->stub_hash_table
, stub_name
,
2448 if (stub_entry
== NULL
)
2450 (*_bfd_error_handler
) (_("%s: cannot create stub entry %s"),
2451 section
->owner
, stub_name
);
2455 stub_entry
->stub_sec
= stub_sec
;
2456 stub_entry
->stub_offset
= 0;
2457 stub_entry
->id_sec
= link_sec
;
2463 aarch64_build_one_stub (struct bfd_hash_entry
*gen_entry
,
2464 void *in_arg ATTRIBUTE_UNUSED
)
2466 struct elf64_aarch64_stub_hash_entry
*stub_entry
;
2471 unsigned int template_size
;
2472 const uint32_t *template;
2475 /* Massage our args to the form they really have. */
2476 stub_entry
= (struct elf64_aarch64_stub_hash_entry
*) gen_entry
;
2478 stub_sec
= stub_entry
->stub_sec
;
2480 /* Make a note of the offset within the stubs for this entry. */
2481 stub_entry
->stub_offset
= stub_sec
->size
;
2482 loc
= stub_sec
->contents
+ stub_entry
->stub_offset
;
2484 stub_bfd
= stub_sec
->owner
;
2486 /* This is the address of the stub destination. */
2487 sym_value
= (stub_entry
->target_value
2488 + stub_entry
->target_section
->output_offset
2489 + stub_entry
->target_section
->output_section
->vma
);
2491 if (stub_entry
->stub_type
== aarch64_stub_long_branch
)
2493 bfd_vma place
= (stub_entry
->stub_offset
+ stub_sec
->output_section
->vma
2494 + stub_sec
->output_offset
);
2496 /* See if we can relax the stub. */
2497 if (aarch64_valid_for_adrp_p (sym_value
, place
))
2498 stub_entry
->stub_type
= aarch64_select_branch_stub (sym_value
, place
);
2501 switch (stub_entry
->stub_type
)
2503 case aarch64_stub_adrp_branch
:
2504 template = aarch64_adrp_branch_stub
;
2505 template_size
= sizeof (aarch64_adrp_branch_stub
);
2507 case aarch64_stub_long_branch
:
2508 template = aarch64_long_branch_stub
;
2509 template_size
= sizeof (aarch64_long_branch_stub
);
2516 for (i
= 0; i
< (template_size
/ sizeof template[0]); i
++)
2518 bfd_putl32 (template[i
], loc
);
2522 template_size
= (template_size
+ 7) & ~7;
2523 stub_sec
->size
+= template_size
;
2525 switch (stub_entry
->stub_type
)
2527 case aarch64_stub_adrp_branch
:
2528 if (aarch64_relocate (R_AARCH64_ADR_PREL_PG_HI21
, stub_bfd
, stub_sec
,
2529 stub_entry
->stub_offset
, sym_value
))
2530 /* The stub would not have been relaxed if the offset was out
2534 _bfd_final_link_relocate
2535 (elf64_aarch64_howto_from_type (R_AARCH64_ADD_ABS_LO12_NC
),
2539 stub_entry
->stub_offset
+ 4,
2544 case aarch64_stub_long_branch
:
2545 /* We want the value relative to the address 12 bytes back from the
2547 _bfd_final_link_relocate (elf64_aarch64_howto_from_type
2548 (R_AARCH64_PREL64
), stub_bfd
, stub_sec
,
2550 stub_entry
->stub_offset
+ 16,
2560 /* As above, but don't actually build the stub. Just bump offset so
2561 we know stub section sizes. */
2564 aarch64_size_one_stub (struct bfd_hash_entry
*gen_entry
,
2565 void *in_arg ATTRIBUTE_UNUSED
)
2567 struct elf64_aarch64_stub_hash_entry
*stub_entry
;
2570 /* Massage our args to the form they really have. */
2571 stub_entry
= (struct elf64_aarch64_stub_hash_entry
*) gen_entry
;
2573 switch (stub_entry
->stub_type
)
2575 case aarch64_stub_adrp_branch
:
2576 size
= sizeof (aarch64_adrp_branch_stub
);
2578 case aarch64_stub_long_branch
:
2579 size
= sizeof (aarch64_long_branch_stub
);
2587 size
= (size
+ 7) & ~7;
2588 stub_entry
->stub_sec
->size
+= size
;
2592 /* External entry points for sizing and building linker stubs. */
2594 /* Set up various things so that we can make a list of input sections
2595 for each output section included in the link. Returns -1 on error,
2596 0 when no stubs will be needed, and 1 on success. */
2599 elf64_aarch64_setup_section_lists (bfd
*output_bfd
,
2600 struct bfd_link_info
*info
)
2603 unsigned int bfd_count
;
2604 int top_id
, top_index
;
2606 asection
**input_list
, **list
;
2608 struct elf64_aarch64_link_hash_table
*htab
=
2609 elf64_aarch64_hash_table (info
);
2611 if (!is_elf_hash_table (htab
))
2614 /* Count the number of input BFDs and find the top input section id. */
2615 for (input_bfd
= info
->input_bfds
, bfd_count
= 0, top_id
= 0;
2616 input_bfd
!= NULL
; input_bfd
= input_bfd
->link_next
)
2619 for (section
= input_bfd
->sections
;
2620 section
!= NULL
; section
= section
->next
)
2622 if (top_id
< section
->id
)
2623 top_id
= section
->id
;
2626 htab
->bfd_count
= bfd_count
;
2628 amt
= sizeof (struct map_stub
) * (top_id
+ 1);
2629 htab
->stub_group
= bfd_zmalloc (amt
);
2630 if (htab
->stub_group
== NULL
)
2633 /* We can't use output_bfd->section_count here to find the top output
2634 section index as some sections may have been removed, and
2635 _bfd_strip_section_from_output doesn't renumber the indices. */
2636 for (section
= output_bfd
->sections
, top_index
= 0;
2637 section
!= NULL
; section
= section
->next
)
2639 if (top_index
< section
->index
)
2640 top_index
= section
->index
;
2643 htab
->top_index
= top_index
;
2644 amt
= sizeof (asection
*) * (top_index
+ 1);
2645 input_list
= bfd_malloc (amt
);
2646 htab
->input_list
= input_list
;
2647 if (input_list
== NULL
)
2650 /* For sections we aren't interested in, mark their entries with a
2651 value we can check later. */
2652 list
= input_list
+ top_index
;
2654 *list
= bfd_abs_section_ptr
;
2655 while (list
-- != input_list
);
2657 for (section
= output_bfd
->sections
;
2658 section
!= NULL
; section
= section
->next
)
2660 if ((section
->flags
& SEC_CODE
) != 0)
2661 input_list
[section
->index
] = NULL
;
2667 /* Used by elf64_aarch64_next_input_section and group_sections. */
2668 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
2670 /* The linker repeatedly calls this function for each input section,
2671 in the order that input sections are linked into output sections.
2672 Build lists of input sections to determine groupings between which
2673 we may insert linker stubs. */
2676 elf64_aarch64_next_input_section (struct bfd_link_info
*info
, asection
*isec
)
2678 struct elf64_aarch64_link_hash_table
*htab
=
2679 elf64_aarch64_hash_table (info
);
2681 if (isec
->output_section
->index
<= htab
->top_index
)
2683 asection
**list
= htab
->input_list
+ isec
->output_section
->index
;
2685 if (*list
!= bfd_abs_section_ptr
)
2687 /* Steal the link_sec pointer for our list. */
2688 /* This happens to make the list in reverse order,
2689 which is what we want. */
2690 PREV_SEC (isec
) = *list
;
2696 /* See whether we can group stub sections together. Grouping stub
2697 sections may result in fewer stubs. More importantly, we need to
2698 put all .init* and .fini* stubs at the beginning of the .init or
2699 .fini output sections respectively, because glibc splits the
2700 _init and _fini functions into multiple parts. Putting a stub in
2701 the middle of a function is not a good idea. */
2704 group_sections (struct elf64_aarch64_link_hash_table
*htab
,
2705 bfd_size_type stub_group_size
,
2706 bfd_boolean stubs_always_before_branch
)
2708 asection
**list
= htab
->input_list
+ htab
->top_index
;
2712 asection
*tail
= *list
;
2714 if (tail
== bfd_abs_section_ptr
)
2717 while (tail
!= NULL
)
2721 bfd_size_type total
;
2725 while ((prev
= PREV_SEC (curr
)) != NULL
2726 && ((total
+= curr
->output_offset
- prev
->output_offset
)
2730 /* OK, the size from the start of CURR to the end is less
2731 than stub_group_size and thus can be handled by one stub
2732 section. (Or the tail section is itself larger than
2733 stub_group_size, in which case we may be toast.)
2734 We should really be keeping track of the total size of
2735 stubs added here, as stubs contribute to the final output
2739 prev
= PREV_SEC (tail
);
2740 /* Set up this stub group. */
2741 htab
->stub_group
[tail
->id
].link_sec
= curr
;
2743 while (tail
!= curr
&& (tail
= prev
) != NULL
);
2745 /* But wait, there's more! Input sections up to stub_group_size
2746 bytes before the stub section can be handled by it too. */
2747 if (!stubs_always_before_branch
)
2751 && ((total
+= tail
->output_offset
- prev
->output_offset
)
2755 prev
= PREV_SEC (tail
);
2756 htab
->stub_group
[tail
->id
].link_sec
= curr
;
2762 while (list
-- != htab
->input_list
);
2764 free (htab
->input_list
);
2769 /* Determine and set the size of the stub section for a final link.
2771 The basic idea here is to examine all the relocations looking for
2772 PC-relative calls to a target that is unreachable with a "bl"
2776 elf64_aarch64_size_stubs (bfd
*output_bfd
,
2778 struct bfd_link_info
*info
,
2779 bfd_signed_vma group_size
,
2780 asection
* (*add_stub_section
) (const char *,
2782 void (*layout_sections_again
) (void))
2784 bfd_size_type stub_group_size
;
2785 bfd_boolean stubs_always_before_branch
;
2786 bfd_boolean stub_changed
= 0;
2787 struct elf64_aarch64_link_hash_table
*htab
= elf64_aarch64_hash_table (info
);
2789 /* Propagate mach to stub bfd, because it may not have been
2790 finalized when we created stub_bfd. */
2791 bfd_set_arch_mach (stub_bfd
, bfd_get_arch (output_bfd
),
2792 bfd_get_mach (output_bfd
));
2794 /* Stash our params away. */
2795 htab
->stub_bfd
= stub_bfd
;
2796 htab
->add_stub_section
= add_stub_section
;
2797 htab
->layout_sections_again
= layout_sections_again
;
2798 stubs_always_before_branch
= group_size
< 0;
2800 stub_group_size
= -group_size
;
2802 stub_group_size
= group_size
;
2804 if (stub_group_size
== 1)
2806 /* Default values. */
2807 /* Aarch64 branch range is +-128MB. The value used is 1MB less. */
2808 stub_group_size
= 127 * 1024 * 1024;
2811 group_sections (htab
, stub_group_size
, stubs_always_before_branch
);
2816 unsigned int bfd_indx
;
2819 for (input_bfd
= info
->input_bfds
, bfd_indx
= 0;
2820 input_bfd
!= NULL
; input_bfd
= input_bfd
->link_next
, bfd_indx
++)
2822 Elf_Internal_Shdr
*symtab_hdr
;
2824 Elf_Internal_Sym
*local_syms
= NULL
;
2826 /* We'll need the symbol table in a second. */
2827 symtab_hdr
= &elf_tdata (input_bfd
)->symtab_hdr
;
2828 if (symtab_hdr
->sh_info
== 0)
2831 /* Walk over each section attached to the input bfd. */
2832 for (section
= input_bfd
->sections
;
2833 section
!= NULL
; section
= section
->next
)
2835 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
2837 /* If there aren't any relocs, then there's nothing more
2839 if ((section
->flags
& SEC_RELOC
) == 0
2840 || section
->reloc_count
== 0
2841 || (section
->flags
& SEC_CODE
) == 0)
2844 /* If this section is a link-once section that will be
2845 discarded, then don't create any stubs. */
2846 if (section
->output_section
== NULL
2847 || section
->output_section
->owner
!= output_bfd
)
2850 /* Get the relocs. */
2852 = _bfd_elf_link_read_relocs (input_bfd
, section
, NULL
,
2853 NULL
, info
->keep_memory
);
2854 if (internal_relocs
== NULL
)
2855 goto error_ret_free_local
;
2857 /* Now examine each relocation. */
2858 irela
= internal_relocs
;
2859 irelaend
= irela
+ section
->reloc_count
;
2860 for (; irela
< irelaend
; irela
++)
2862 unsigned int r_type
, r_indx
;
2863 enum elf64_aarch64_stub_type stub_type
;
2864 struct elf64_aarch64_stub_hash_entry
*stub_entry
;
2867 bfd_vma destination
;
2868 struct elf64_aarch64_link_hash_entry
*hash
;
2869 const char *sym_name
;
2871 const asection
*id_sec
;
2872 unsigned char st_type
;
2875 r_type
= ELF64_R_TYPE (irela
->r_info
);
2876 r_indx
= ELF64_R_SYM (irela
->r_info
);
2878 if (r_type
>= (unsigned int) R_AARCH64_end
)
2880 bfd_set_error (bfd_error_bad_value
);
2881 error_ret_free_internal
:
2882 if (elf_section_data (section
)->relocs
== NULL
)
2883 free (internal_relocs
);
2884 goto error_ret_free_local
;
2887 /* Only look for stubs on unconditional branch and
2888 branch and link instructions. */
2889 if (r_type
!= (unsigned int) R_AARCH64_CALL26
2890 && r_type
!= (unsigned int) R_AARCH64_JUMP26
)
2893 /* Now determine the call target, its name, value,
2900 if (r_indx
< symtab_hdr
->sh_info
)
2902 /* It's a local symbol. */
2903 Elf_Internal_Sym
*sym
;
2904 Elf_Internal_Shdr
*hdr
;
2906 if (local_syms
== NULL
)
2909 = (Elf_Internal_Sym
*) symtab_hdr
->contents
;
2910 if (local_syms
== NULL
)
2912 = bfd_elf_get_elf_syms (input_bfd
, symtab_hdr
,
2913 symtab_hdr
->sh_info
, 0,
2915 if (local_syms
== NULL
)
2916 goto error_ret_free_internal
;
2919 sym
= local_syms
+ r_indx
;
2920 hdr
= elf_elfsections (input_bfd
)[sym
->st_shndx
];
2921 sym_sec
= hdr
->bfd_section
;
2923 /* This is an undefined symbol. It can never
2927 if (ELF_ST_TYPE (sym
->st_info
) != STT_SECTION
)
2928 sym_value
= sym
->st_value
;
2929 destination
= (sym_value
+ irela
->r_addend
2930 + sym_sec
->output_offset
2931 + sym_sec
->output_section
->vma
);
2932 st_type
= ELF_ST_TYPE (sym
->st_info
);
2934 = bfd_elf_string_from_elf_section (input_bfd
,
2935 symtab_hdr
->sh_link
,
2942 e_indx
= r_indx
- symtab_hdr
->sh_info
;
2943 hash
= ((struct elf64_aarch64_link_hash_entry
*)
2944 elf_sym_hashes (input_bfd
)[e_indx
]);
2946 while (hash
->root
.root
.type
== bfd_link_hash_indirect
2947 || hash
->root
.root
.type
== bfd_link_hash_warning
)
2948 hash
= ((struct elf64_aarch64_link_hash_entry
*)
2949 hash
->root
.root
.u
.i
.link
);
2951 if (hash
->root
.root
.type
== bfd_link_hash_defined
2952 || hash
->root
.root
.type
== bfd_link_hash_defweak
)
2954 struct elf64_aarch64_link_hash_table
*globals
=
2955 elf64_aarch64_hash_table (info
);
2956 sym_sec
= hash
->root
.root
.u
.def
.section
;
2957 sym_value
= hash
->root
.root
.u
.def
.value
;
2958 /* For a destination in a shared library,
2959 use the PLT stub as target address to
2960 decide whether a branch stub is
2962 if (globals
->root
.splt
!= NULL
&& hash
!= NULL
2963 && hash
->root
.plt
.offset
!= (bfd_vma
) - 1)
2965 sym_sec
= globals
->root
.splt
;
2966 sym_value
= hash
->root
.plt
.offset
;
2967 if (sym_sec
->output_section
!= NULL
)
2968 destination
= (sym_value
2969 + sym_sec
->output_offset
2971 sym_sec
->output_section
->vma
);
2973 else if (sym_sec
->output_section
!= NULL
)
2974 destination
= (sym_value
+ irela
->r_addend
2975 + sym_sec
->output_offset
2976 + sym_sec
->output_section
->vma
);
2978 else if (hash
->root
.root
.type
== bfd_link_hash_undefined
2979 || (hash
->root
.root
.type
2980 == bfd_link_hash_undefweak
))
2982 /* For a shared library, use the PLT stub as
2983 target address to decide whether a long
2984 branch stub is needed.
2985 For absolute code, they cannot be handled. */
2986 struct elf64_aarch64_link_hash_table
*globals
=
2987 elf64_aarch64_hash_table (info
);
2989 if (globals
->root
.splt
!= NULL
&& hash
!= NULL
2990 && hash
->root
.plt
.offset
!= (bfd_vma
) - 1)
2992 sym_sec
= globals
->root
.splt
;
2993 sym_value
= hash
->root
.plt
.offset
;
2994 if (sym_sec
->output_section
!= NULL
)
2995 destination
= (sym_value
2996 + sym_sec
->output_offset
2998 sym_sec
->output_section
->vma
);
3005 bfd_set_error (bfd_error_bad_value
);
3006 goto error_ret_free_internal
;
3008 st_type
= ELF_ST_TYPE (hash
->root
.type
);
3009 sym_name
= hash
->root
.root
.root
.string
;
3012 /* Determine what (if any) linker stub is needed. */
3013 stub_type
= aarch64_type_of_stub
3014 (info
, section
, irela
, st_type
, hash
, destination
);
3015 if (stub_type
== aarch64_stub_none
)
3018 /* Support for grouping stub sections. */
3019 id_sec
= htab
->stub_group
[section
->id
].link_sec
;
3021 /* Get the name of this stub. */
3022 stub_name
= elf64_aarch64_stub_name (id_sec
, sym_sec
, hash
,
3025 goto error_ret_free_internal
;
3028 aarch64_stub_hash_lookup (&htab
->stub_hash_table
,
3029 stub_name
, FALSE
, FALSE
);
3030 if (stub_entry
!= NULL
)
3032 /* The proper stub has already been created. */
3037 stub_entry
= elf64_aarch64_add_stub (stub_name
, section
,
3039 if (stub_entry
== NULL
)
3042 goto error_ret_free_internal
;
3045 stub_entry
->target_value
= sym_value
;
3046 stub_entry
->target_section
= sym_sec
;
3047 stub_entry
->stub_type
= stub_type
;
3048 stub_entry
->h
= hash
;
3049 stub_entry
->st_type
= st_type
;
3051 if (sym_name
== NULL
)
3052 sym_name
= "unnamed";
3053 len
= sizeof (STUB_ENTRY_NAME
) + strlen (sym_name
);
3054 stub_entry
->output_name
= bfd_alloc (htab
->stub_bfd
, len
);
3055 if (stub_entry
->output_name
== NULL
)
3058 goto error_ret_free_internal
;
3061 snprintf (stub_entry
->output_name
, len
, STUB_ENTRY_NAME
,
3064 stub_changed
= TRUE
;
3067 /* We're done with the internal relocs, free them. */
3068 if (elf_section_data (section
)->relocs
== NULL
)
3069 free (internal_relocs
);
3076 /* OK, we've added some stubs. Find out the new size of the
3078 for (stub_sec
= htab
->stub_bfd
->sections
;
3079 stub_sec
!= NULL
; stub_sec
= stub_sec
->next
)
3082 bfd_hash_traverse (&htab
->stub_hash_table
, aarch64_size_one_stub
, htab
);
3084 /* Ask the linker to do its stuff. */
3085 (*htab
->layout_sections_again
) ();
3086 stub_changed
= FALSE
;
3091 error_ret_free_local
:
3095 /* Build all the stubs associated with the current output file. The
3096 stubs are kept in a hash table attached to the main linker hash
3097 table. We also set up the .plt entries for statically linked PIC
3098 functions here. This function is called via aarch64_elf_finish in the
3102 elf64_aarch64_build_stubs (struct bfd_link_info
*info
)
3105 struct bfd_hash_table
*table
;
3106 struct elf64_aarch64_link_hash_table
*htab
;
3108 htab
= elf64_aarch64_hash_table (info
);
3110 for (stub_sec
= htab
->stub_bfd
->sections
;
3111 stub_sec
!= NULL
; stub_sec
= stub_sec
->next
)
3115 /* Ignore non-stub sections. */
3116 if (!strstr (stub_sec
->name
, STUB_SUFFIX
))
3119 /* Allocate memory to hold the linker stubs. */
3120 size
= stub_sec
->size
;
3121 stub_sec
->contents
= bfd_zalloc (htab
->stub_bfd
, size
);
3122 if (stub_sec
->contents
== NULL
&& size
!= 0)
3127 /* Build the stubs as directed by the stub hash table. */
3128 table
= &htab
->stub_hash_table
;
3129 bfd_hash_traverse (table
, aarch64_build_one_stub
, info
);
3135 /* Add an entry to the code/data map for section SEC. */
3138 elf64_aarch64_section_map_add (asection
*sec
, char type
, bfd_vma vma
)
3140 struct _aarch64_elf_section_data
*sec_data
=
3141 elf64_aarch64_section_data (sec
);
3142 unsigned int newidx
;
3144 if (sec_data
->map
== NULL
)
3146 sec_data
->map
= bfd_malloc (sizeof (elf64_aarch64_section_map
));
3147 sec_data
->mapcount
= 0;
3148 sec_data
->mapsize
= 1;
3151 newidx
= sec_data
->mapcount
++;
3153 if (sec_data
->mapcount
> sec_data
->mapsize
)
3155 sec_data
->mapsize
*= 2;
3156 sec_data
->map
= bfd_realloc_or_free
3157 (sec_data
->map
, sec_data
->mapsize
* sizeof (elf64_aarch64_section_map
));
3162 sec_data
->map
[newidx
].vma
= vma
;
3163 sec_data
->map
[newidx
].type
= type
;
3168 /* Initialise maps of insn/data for input BFDs. */
3170 bfd_elf64_aarch64_init_maps (bfd
*abfd
)
3172 Elf_Internal_Sym
*isymbuf
;
3173 Elf_Internal_Shdr
*hdr
;
3174 unsigned int i
, localsyms
;
3176 /* Make sure that we are dealing with an AArch64 elf binary. */
3177 if (!is_aarch64_elf (abfd
))
3180 if ((abfd
->flags
& DYNAMIC
) != 0)
3183 hdr
= &elf_symtab_hdr (abfd
);
3184 localsyms
= hdr
->sh_info
;
3186 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
3187 should contain the number of local symbols, which should come before any
3188 global symbols. Mapping symbols are always local. */
3189 isymbuf
= bfd_elf_get_elf_syms (abfd
, hdr
, localsyms
, 0, NULL
, NULL
, NULL
);
3191 /* No internal symbols read? Skip this BFD. */
3192 if (isymbuf
== NULL
)
3195 for (i
= 0; i
< localsyms
; i
++)
3197 Elf_Internal_Sym
*isym
= &isymbuf
[i
];
3198 asection
*sec
= bfd_section_from_elf_index (abfd
, isym
->st_shndx
);
3201 if (sec
!= NULL
&& ELF_ST_BIND (isym
->st_info
) == STB_LOCAL
)
3203 name
= bfd_elf_string_from_elf_section (abfd
,
3207 if (bfd_is_aarch64_special_symbol_name
3208 (name
, BFD_AARCH64_SPECIAL_SYM_TYPE_MAP
))
3209 elf64_aarch64_section_map_add (sec
, name
[1], isym
->st_value
);
3214 /* Set option values needed during linking. */
3216 bfd_elf64_aarch64_set_options (struct bfd
*output_bfd
,
3217 struct bfd_link_info
*link_info
,
3219 int no_wchar_warn
, int pic_veneer
)
3221 struct elf64_aarch64_link_hash_table
*globals
;
3223 globals
= elf64_aarch64_hash_table (link_info
);
3224 globals
->pic_veneer
= pic_veneer
;
3226 BFD_ASSERT (is_aarch64_elf (output_bfd
));
3227 elf_aarch64_tdata (output_bfd
)->no_enum_size_warning
= no_enum_warn
;
3228 elf_aarch64_tdata (output_bfd
)->no_wchar_size_warning
= no_wchar_warn
;
3231 #define MASK(n) ((1u << (n)) - 1)
3233 /* Decode the 26-bit offset of unconditional branch. */
3234 static inline uint32_t
3235 decode_branch_ofs_26 (uint32_t insn
)
3237 return insn
& MASK (26);
3240 /* Decode the 19-bit offset of conditional branch and compare & branch. */
3241 static inline uint32_t
3242 decode_cond_branch_ofs_19 (uint32_t insn
)
3244 return (insn
>> 5) & MASK (19);
3247 /* Decode the 19-bit offset of load literal. */
3248 static inline uint32_t
3249 decode_ld_lit_ofs_19 (uint32_t insn
)
3251 return (insn
>> 5) & MASK (19);
3254 /* Decode the 14-bit offset of test & branch. */
3255 static inline uint32_t
3256 decode_tst_branch_ofs_14 (uint32_t insn
)
3258 return (insn
>> 5) & MASK (14);
3261 /* Decode the 16-bit imm of move wide. */
3262 static inline uint32_t
3263 decode_movw_imm (uint32_t insn
)
3265 return (insn
>> 5) & MASK (16);
3268 /* Decode the 21-bit imm of adr. */
3269 static inline uint32_t
3270 decode_adr_imm (uint32_t insn
)
3272 return ((insn
>> 29) & MASK (2)) | ((insn
>> 3) & (MASK (19) << 2));
3275 /* Decode the 12-bit imm of add immediate. */
3276 static inline uint32_t
3277 decode_add_imm (uint32_t insn
)
3279 return (insn
>> 10) & MASK (12);
3283 /* Encode the 26-bit offset of unconditional branch. */
3284 static inline uint32_t
3285 reencode_branch_ofs_26 (uint32_t insn
, uint32_t ofs
)
3287 return (insn
& ~MASK (26)) | (ofs
& MASK (26));
3290 /* Encode the 19-bit offset of conditional branch and compare & branch. */
3291 static inline uint32_t
3292 reencode_cond_branch_ofs_19 (uint32_t insn
, uint32_t ofs
)
3294 return (insn
& ~(MASK (19) << 5)) | ((ofs
& MASK (19)) << 5);
3297 /* Decode the 19-bit offset of load literal. */
3298 static inline uint32_t
3299 reencode_ld_lit_ofs_19 (uint32_t insn
, uint32_t ofs
)
3301 return (insn
& ~(MASK (19) << 5)) | ((ofs
& MASK (19)) << 5);
3304 /* Encode the 14-bit offset of test & branch. */
3305 static inline uint32_t
3306 reencode_tst_branch_ofs_14 (uint32_t insn
, uint32_t ofs
)
3308 return (insn
& ~(MASK (14) << 5)) | ((ofs
& MASK (14)) << 5);
3311 /* Reencode the imm field of move wide. */
3312 static inline uint32_t
3313 reencode_movw_imm (uint32_t insn
, uint32_t imm
)
3315 return (insn
& ~(MASK (16) << 5)) | ((imm
& MASK (16)) << 5);
3318 /* Reencode the imm field of adr. */
3319 static inline uint32_t
3320 reencode_adr_imm (uint32_t insn
, uint32_t imm
)
3322 return (insn
& ~((MASK (2) << 29) | (MASK (19) << 5)))
3323 | ((imm
& MASK (2)) << 29) | ((imm
& (MASK (19) << 2)) << 3);
3326 /* Reencode the imm field of ld/st pos immediate. */
3327 static inline uint32_t
3328 reencode_ldst_pos_imm (uint32_t insn
, uint32_t imm
)
3330 return (insn
& ~(MASK (12) << 10)) | ((imm
& MASK (12)) << 10);
3333 /* Reencode the imm field of add immediate. */
3334 static inline uint32_t
3335 reencode_add_imm (uint32_t insn
, uint32_t imm
)
3337 return (insn
& ~(MASK (12) << 10)) | ((imm
& MASK (12)) << 10);
3340 /* Reencode mov[zn] to movz. */
3341 static inline uint32_t
3342 reencode_movzn_to_movz (uint32_t opcode
)
3344 return opcode
| (1 << 30);
3347 /* Reencode mov[zn] to movn. */
3348 static inline uint32_t
3349 reencode_movzn_to_movn (uint32_t opcode
)
3351 return opcode
& ~(1 << 30);
3354 /* Insert the addend/value into the instruction or data object being
3356 static bfd_reloc_status_type
3357 bfd_elf_aarch64_put_addend (bfd
*abfd
,
3359 reloc_howto_type
*howto
, bfd_signed_vma addend
)
3361 bfd_reloc_status_type status
= bfd_reloc_ok
;
3362 bfd_signed_vma old_addend
= addend
;
3366 size
= bfd_get_reloc_size (howto
);
3370 contents
= bfd_get_16 (abfd
, address
);
3373 if (howto
->src_mask
!= 0xffffffff)
3374 /* Must be 32-bit instruction, always little-endian. */
3375 contents
= bfd_getl32 (address
);
3377 /* Must be 32-bit data (endianness dependent). */
3378 contents
= bfd_get_32 (abfd
, address
);
3381 contents
= bfd_get_64 (abfd
, address
);
3387 switch (howto
->complain_on_overflow
)
3389 case complain_overflow_dont
:
3391 case complain_overflow_signed
:
3392 status
= aarch64_signed_overflow (addend
,
3393 howto
->bitsize
+ howto
->rightshift
);
3395 case complain_overflow_unsigned
:
3396 status
= aarch64_unsigned_overflow (addend
,
3397 howto
->bitsize
+ howto
->rightshift
);
3399 case complain_overflow_bitfield
:
3404 addend
>>= howto
->rightshift
;
3406 switch (howto
->type
)
3408 case R_AARCH64_JUMP26
:
3409 case R_AARCH64_CALL26
:
3410 contents
= reencode_branch_ofs_26 (contents
, addend
);
3413 case R_AARCH64_CONDBR19
:
3414 contents
= reencode_cond_branch_ofs_19 (contents
, addend
);
3417 case R_AARCH64_TSTBR14
:
3418 contents
= reencode_tst_branch_ofs_14 (contents
, addend
);
3421 case R_AARCH64_LD_PREL_LO19
:
3422 case R_AARCH64_GOT_LD_PREL19
:
3423 if (old_addend
& ((1 << howto
->rightshift
) - 1))
3424 return bfd_reloc_overflow
;
3425 contents
= reencode_ld_lit_ofs_19 (contents
, addend
);
3428 case R_AARCH64_TLSDESC_CALL
:
3431 case R_AARCH64_TLSGD_ADR_PAGE21
:
3432 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21
:
3433 case R_AARCH64_TLSDESC_ADR_PAGE
:
3434 case R_AARCH64_ADR_GOT_PAGE
:
3435 case R_AARCH64_ADR_PREL_LO21
:
3436 case R_AARCH64_ADR_PREL_PG_HI21
:
3437 case R_AARCH64_ADR_PREL_PG_HI21_NC
:
3438 contents
= reencode_adr_imm (contents
, addend
);
3441 case R_AARCH64_TLSGD_ADD_LO12_NC
:
3442 case R_AARCH64_TLSLE_ADD_TPREL_LO12
:
3443 case R_AARCH64_TLSLE_ADD_TPREL_HI12
:
3444 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC
:
3445 case R_AARCH64_TLSDESC_ADD_LO12_NC
:
3446 case R_AARCH64_ADD_ABS_LO12_NC
:
3447 /* Corresponds to: add rd, rn, #uimm12 to provide the low order
3448 12 bits of the page offset following
3449 R_AARCH64_ADR_PREL_PG_HI21 which computes the
3450 (pc-relative) page base. */
3451 contents
= reencode_add_imm (contents
, addend
);
3454 case R_AARCH64_LDST8_ABS_LO12_NC
:
3455 case R_AARCH64_LDST16_ABS_LO12_NC
:
3456 case R_AARCH64_LDST32_ABS_LO12_NC
:
3457 case R_AARCH64_LDST64_ABS_LO12_NC
:
3458 case R_AARCH64_LDST128_ABS_LO12_NC
:
3459 case R_AARCH64_TLSDESC_LD64_LO12_NC
:
3460 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC
:
3461 case R_AARCH64_LD64_GOT_LO12_NC
:
3462 if (old_addend
& ((1 << howto
->rightshift
) - 1))
3463 return bfd_reloc_overflow
;
3464 /* Used for ldr*|str* rt, [rn, #uimm12] to provide the low order
3465 12 bits of the page offset following R_AARCH64_ADR_PREL_PG_HI21
3466 which computes the (pc-relative) page base. */
3467 contents
= reencode_ldst_pos_imm (contents
, addend
);
3470 /* Group relocations to create high bits of a 16, 32, 48 or 64
3471 bit signed data or abs address inline. Will change
3472 instruction to MOVN or MOVZ depending on sign of calculated
3475 case R_AARCH64_TLSLE_MOVW_TPREL_G2
:
3476 case R_AARCH64_TLSLE_MOVW_TPREL_G1
:
3477 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC
:
3478 case R_AARCH64_TLSLE_MOVW_TPREL_G0
:
3479 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC
:
3480 case R_AARCH64_MOVW_SABS_G0
:
3481 case R_AARCH64_MOVW_SABS_G1
:
3482 case R_AARCH64_MOVW_SABS_G2
:
3483 /* NOTE: We can only come here with movz or movn. */
3486 /* Force use of MOVN. */
3488 contents
= reencode_movzn_to_movn (contents
);
3492 /* Force use of MOVZ. */
3493 contents
= reencode_movzn_to_movz (contents
);
3497 /* Group relocations to create a 16, 32, 48 or 64 bit unsigned
3498 data or abs address inline. */
3500 case R_AARCH64_MOVW_UABS_G0
:
3501 case R_AARCH64_MOVW_UABS_G0_NC
:
3502 case R_AARCH64_MOVW_UABS_G1
:
3503 case R_AARCH64_MOVW_UABS_G1_NC
:
3504 case R_AARCH64_MOVW_UABS_G2
:
3505 case R_AARCH64_MOVW_UABS_G2_NC
:
3506 case R_AARCH64_MOVW_UABS_G3
:
3507 contents
= reencode_movw_imm (contents
, addend
);
3511 /* Repack simple data */
3512 if (howto
->dst_mask
& (howto
->dst_mask
+ 1))
3513 return bfd_reloc_notsupported
;
3515 contents
= ((contents
& ~howto
->dst_mask
) | (addend
& howto
->dst_mask
));
3522 bfd_put_16 (abfd
, contents
, address
);
3525 if (howto
->dst_mask
!= 0xffffffff)
3526 /* must be 32-bit instruction, always little-endian */
3527 bfd_putl32 (contents
, address
);
3529 /* must be 32-bit data (endianness dependent) */
3530 bfd_put_32 (abfd
, contents
, address
);
3533 bfd_put_64 (abfd
, contents
, address
);
3543 aarch64_calculate_got_entry_vma (struct elf_link_hash_entry
*h
,
3544 struct elf64_aarch64_link_hash_table
3545 *globals
, struct bfd_link_info
*info
,
3546 bfd_vma value
, bfd
*output_bfd
,
3547 bfd_boolean
*unresolved_reloc_p
)
3549 bfd_vma off
= (bfd_vma
) - 1;
3550 asection
*basegot
= globals
->root
.sgot
;
3551 bfd_boolean dyn
= globals
->root
.dynamic_sections_created
;
3555 off
= h
->got
.offset
;
3556 BFD_ASSERT (off
!= (bfd_vma
) - 1);
3557 if (!WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn
, info
->shared
, h
)
3559 && SYMBOL_REFERENCES_LOCAL (info
, h
))
3560 || (ELF_ST_VISIBILITY (h
->other
)
3561 && h
->root
.type
== bfd_link_hash_undefweak
))
3563 /* This is actually a static link, or it is a -Bsymbolic link
3564 and the symbol is defined locally. We must initialize this
3565 entry in the global offset table. Since the offset must
3566 always be a multiple of 8, we use the least significant bit
3567 to record whether we have initialized it already.
3568 When doing a dynamic link, we create a .rel(a).got relocation
3569 entry to initialize the value. This is done in the
3570 finish_dynamic_symbol routine. */
3575 bfd_put_64 (output_bfd
, value
, basegot
->contents
+ off
);
3580 *unresolved_reloc_p
= FALSE
;
3582 off
= off
+ basegot
->output_section
->vma
+ basegot
->output_offset
;
3588 /* Change R_TYPE to a more efficient access model where possible,
3589 return the new reloc type. */
3592 aarch64_tls_transition_without_check (unsigned int r_type
,
3593 struct elf_link_hash_entry
*h
)
3595 bfd_boolean is_local
= h
== NULL
;
3598 case R_AARCH64_TLSGD_ADR_PAGE21
:
3599 case R_AARCH64_TLSDESC_ADR_PAGE
:
3601 ? R_AARCH64_TLSLE_MOVW_TPREL_G1
: R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21
;
3603 case R_AARCH64_TLSGD_ADD_LO12_NC
:
3604 case R_AARCH64_TLSDESC_LD64_LO12_NC
:
3606 ? R_AARCH64_TLSLE_MOVW_TPREL_G0_NC
3607 : R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC
;
3609 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21
:
3610 return is_local
? R_AARCH64_TLSLE_MOVW_TPREL_G1
: r_type
;
3612 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC
:
3613 return is_local
? R_AARCH64_TLSLE_MOVW_TPREL_G0_NC
: r_type
;
3615 case R_AARCH64_TLSDESC_ADD_LO12_NC
:
3616 case R_AARCH64_TLSDESC_CALL
:
3617 /* Instructions with these relocations will become NOPs. */
3618 return R_AARCH64_NONE
;
3625 aarch64_reloc_got_type (unsigned int r_type
)
3629 case R_AARCH64_LD64_GOT_LO12_NC
:
3630 case R_AARCH64_ADR_GOT_PAGE
:
3631 case R_AARCH64_GOT_LD_PREL19
:
3634 case R_AARCH64_TLSGD_ADR_PAGE21
:
3635 case R_AARCH64_TLSGD_ADD_LO12_NC
:
3638 case R_AARCH64_TLSDESC_ADD_LO12_NC
:
3639 case R_AARCH64_TLSDESC_ADR_PAGE
:
3640 case R_AARCH64_TLSDESC_CALL
:
3641 case R_AARCH64_TLSDESC_LD64_LO12_NC
:
3642 return GOT_TLSDESC_GD
;
3644 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21
:
3645 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC
:
3648 case R_AARCH64_TLSLE_ADD_TPREL_HI12
:
3649 case R_AARCH64_TLSLE_ADD_TPREL_LO12
:
3650 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC
:
3651 case R_AARCH64_TLSLE_MOVW_TPREL_G0
:
3652 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC
:
3653 case R_AARCH64_TLSLE_MOVW_TPREL_G1
:
3654 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC
:
3655 case R_AARCH64_TLSLE_MOVW_TPREL_G2
:
3662 aarch64_can_relax_tls (bfd
*input_bfd
,
3663 struct bfd_link_info
*info
,
3664 unsigned int r_type
,
3665 struct elf_link_hash_entry
*h
,
3666 unsigned long r_symndx
)
3668 unsigned int symbol_got_type
;
3669 unsigned int reloc_got_type
;
3671 if (! IS_AARCH64_TLS_RELOC (r_type
))
3674 symbol_got_type
= elf64_aarch64_symbol_got_type (h
, input_bfd
, r_symndx
);
3675 reloc_got_type
= aarch64_reloc_got_type (r_type
);
3677 if (symbol_got_type
== GOT_TLS_IE
&& GOT_TLS_GD_ANY_P (reloc_got_type
))
3683 if (h
&& h
->root
.type
== bfd_link_hash_undefweak
)
3690 aarch64_tls_transition (bfd
*input_bfd
,
3691 struct bfd_link_info
*info
,
3692 unsigned int r_type
,
3693 struct elf_link_hash_entry
*h
,
3694 unsigned long r_symndx
)
3696 if (! aarch64_can_relax_tls (input_bfd
, info
, r_type
, h
, r_symndx
))
3699 return aarch64_tls_transition_without_check (r_type
, h
);
3702 /* Return the base VMA address which should be subtracted from real addresses
3703 when resolving R_AARCH64_TLS_DTPREL64 relocation. */
3706 dtpoff_base (struct bfd_link_info
*info
)
3708 /* If tls_sec is NULL, we should have signalled an error already. */
3709 BFD_ASSERT (elf_hash_table (info
)->tls_sec
!= NULL
);
3710 return elf_hash_table (info
)->tls_sec
->vma
;
3714 /* Return the base VMA address which should be subtracted from real addresses
3715 when resolving R_AARCH64_TLS_GOTTPREL64 relocations. */
3718 tpoff_base (struct bfd_link_info
*info
)
3720 struct elf_link_hash_table
*htab
= elf_hash_table (info
);
3722 /* If tls_sec is NULL, we should have signalled an error already. */
3723 if (htab
->tls_sec
== NULL
)
3726 bfd_vma base
= align_power ((bfd_vma
) TCB_SIZE
,
3727 htab
->tls_sec
->alignment_power
);
3728 return htab
->tls_sec
->vma
- base
;
3732 symbol_got_offset_ref (bfd
*input_bfd
, struct elf_link_hash_entry
*h
,
3733 unsigned long r_symndx
)
3735 /* Calculate the address of the GOT entry for symbol
3736 referred to in h. */
3738 return &h
->got
.offset
;
3742 struct elf_aarch64_local_symbol
*l
;
3744 l
= elf64_aarch64_locals (input_bfd
);
3745 return &l
[r_symndx
].got_offset
;
3750 symbol_got_offset_mark (bfd
*input_bfd
, struct elf_link_hash_entry
*h
,
3751 unsigned long r_symndx
)
3754 p
= symbol_got_offset_ref (input_bfd
, h
, r_symndx
);
3759 symbol_got_offset_mark_p (bfd
*input_bfd
, struct elf_link_hash_entry
*h
,
3760 unsigned long r_symndx
)
3763 value
= * symbol_got_offset_ref (input_bfd
, h
, r_symndx
);
3768 symbol_got_offset (bfd
*input_bfd
, struct elf_link_hash_entry
*h
,
3769 unsigned long r_symndx
)
3772 value
= * symbol_got_offset_ref (input_bfd
, h
, r_symndx
);
3778 symbol_tlsdesc_got_offset_ref (bfd
*input_bfd
, struct elf_link_hash_entry
*h
,
3779 unsigned long r_symndx
)
3781 /* Calculate the address of the GOT entry for symbol
3782 referred to in h. */
3785 struct elf64_aarch64_link_hash_entry
*eh
;
3786 eh
= (struct elf64_aarch64_link_hash_entry
*) h
;
3787 return &eh
->tlsdesc_got_jump_table_offset
;
3792 struct elf_aarch64_local_symbol
*l
;
3794 l
= elf64_aarch64_locals (input_bfd
);
3795 return &l
[r_symndx
].tlsdesc_got_jump_table_offset
;
3800 symbol_tlsdesc_got_offset_mark (bfd
*input_bfd
, struct elf_link_hash_entry
*h
,
3801 unsigned long r_symndx
)
3804 p
= symbol_tlsdesc_got_offset_ref (input_bfd
, h
, r_symndx
);
3809 symbol_tlsdesc_got_offset_mark_p (bfd
*input_bfd
,
3810 struct elf_link_hash_entry
*h
,
3811 unsigned long r_symndx
)
3814 value
= * symbol_tlsdesc_got_offset_ref (input_bfd
, h
, r_symndx
);
3819 symbol_tlsdesc_got_offset (bfd
*input_bfd
, struct elf_link_hash_entry
*h
,
3820 unsigned long r_symndx
)
3823 value
= * symbol_tlsdesc_got_offset_ref (input_bfd
, h
, r_symndx
);
3828 /* Perform a relocation as part of a final link. */
3829 static bfd_reloc_status_type
3830 elf64_aarch64_final_link_relocate (reloc_howto_type
*howto
,
3833 asection
*input_section
,
3835 Elf_Internal_Rela
*rel
,
3837 struct bfd_link_info
*info
,
3839 struct elf_link_hash_entry
*h
,
3840 bfd_boolean
*unresolved_reloc_p
,
3841 bfd_boolean save_addend
,
3842 bfd_vma
*saved_addend
)
3844 unsigned int r_type
= howto
->type
;
3845 unsigned long r_symndx
;
3846 bfd_byte
*hit_data
= contents
+ rel
->r_offset
;
3848 bfd_signed_vma signed_addend
;
3849 struct elf64_aarch64_link_hash_table
*globals
;
3850 bfd_boolean weak_undef_p
;
3852 globals
= elf64_aarch64_hash_table (info
);
3854 BFD_ASSERT (is_aarch64_elf (input_bfd
));
3856 r_symndx
= ELF64_R_SYM (rel
->r_info
);
3858 /* It is possible to have linker relaxations on some TLS access
3859 models. Update our information here. */
3860 r_type
= aarch64_tls_transition (input_bfd
, info
, r_type
, h
, r_symndx
);
3862 if (r_type
!= howto
->type
)
3863 howto
= elf64_aarch64_howto_from_type (r_type
);
3865 place
= input_section
->output_section
->vma
3866 + input_section
->output_offset
+ rel
->r_offset
;
3868 /* Get addend, accumulating the addend for consecutive relocs
3869 which refer to the same offset. */
3870 signed_addend
= saved_addend
? *saved_addend
: 0;
3871 signed_addend
+= rel
->r_addend
;
3873 weak_undef_p
= (h
? h
->root
.type
== bfd_link_hash_undefweak
3874 : bfd_is_und_section (sym_sec
));
3877 case R_AARCH64_NONE
:
3878 case R_AARCH64_NULL
:
3879 case R_AARCH64_TLSDESC_CALL
:
3880 *unresolved_reloc_p
= FALSE
;
3881 return bfd_reloc_ok
;
3883 case R_AARCH64_ABS64
:
3885 /* When generating a shared object or relocatable executable, these
3886 relocations are copied into the output file to be resolved at
3888 if (((info
->shared
== TRUE
) || globals
->root
.is_relocatable_executable
)
3889 && (input_section
->flags
& SEC_ALLOC
)
3891 || ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
3892 || h
->root
.type
!= bfd_link_hash_undefweak
))
3894 Elf_Internal_Rela outrel
;
3896 bfd_boolean skip
, relocate
;
3899 *unresolved_reloc_p
= FALSE
;
3901 sreloc
= _bfd_elf_get_dynamic_reloc_section (input_bfd
,
3904 return bfd_reloc_notsupported
;
3909 outrel
.r_addend
= signed_addend
;
3911 _bfd_elf_section_offset (output_bfd
, info
, input_section
,
3913 if (outrel
.r_offset
== (bfd_vma
) - 1)
3915 else if (outrel
.r_offset
== (bfd_vma
) - 2)
3921 outrel
.r_offset
+= (input_section
->output_section
->vma
3922 + input_section
->output_offset
);
3925 memset (&outrel
, 0, sizeof outrel
);
3928 && (!info
->shared
|| !info
->symbolic
|| !h
->def_regular
))
3929 outrel
.r_info
= ELF64_R_INFO (h
->dynindx
, r_type
);
3934 /* On SVR4-ish systems, the dynamic loader cannot
3935 relocate the text and data segments independently,
3936 so the symbol does not matter. */
3938 outrel
.r_info
= ELF64_R_INFO (symbol
, R_AARCH64_RELATIVE
);
3939 outrel
.r_addend
+= value
;
3942 loc
= sreloc
->contents
+ sreloc
->reloc_count
++ * RELOC_SIZE (htab
);
3943 bfd_elf64_swap_reloca_out (output_bfd
, &outrel
, loc
);
3945 if (sreloc
->reloc_count
* RELOC_SIZE (htab
) > sreloc
->size
)
3947 /* Sanity to check that we have previously allocated
3948 sufficient space in the relocation section for the
3949 number of relocations we actually want to emit. */
3953 /* If this reloc is against an external symbol, we do not want to
3954 fiddle with the addend. Otherwise, we need to include the symbol
3955 value so that it becomes an addend for the dynamic reloc. */
3957 return bfd_reloc_ok
;
3959 return _bfd_final_link_relocate (howto
, input_bfd
, input_section
,
3960 contents
, rel
->r_offset
, value
,
3964 value
+= signed_addend
;
3967 case R_AARCH64_JUMP26
:
3968 case R_AARCH64_CALL26
:
3970 asection
*splt
= globals
->root
.splt
;
3971 bfd_boolean via_plt_p
=
3972 splt
!= NULL
&& h
!= NULL
&& h
->plt
.offset
!= (bfd_vma
) - 1;
3974 /* A call to an undefined weak symbol is converted to a jump to
3975 the next instruction unless a PLT entry will be created.
3976 The jump to the next instruction is optimized as a NOP.
3977 Do the same for local undefined symbols. */
3978 if (weak_undef_p
&& ! via_plt_p
)
3980 bfd_putl32 (INSN_NOP
, hit_data
);
3981 return bfd_reloc_ok
;
3984 /* If the call goes through a PLT entry, make sure to
3985 check distance to the right destination address. */
3988 value
= (splt
->output_section
->vma
3989 + splt
->output_offset
+ h
->plt
.offset
);
3990 *unresolved_reloc_p
= FALSE
;
3993 /* If the target symbol is global and marked as a function the
3994 relocation applies a function call or a tail call. In this
3995 situation we can veneer out of range branches. The veneers
3996 use IP0 and IP1 hence cannot be used arbitrary out of range
3997 branches that occur within the body of a function. */
3998 if (h
&& h
->type
== STT_FUNC
)
4000 /* Check if a stub has to be inserted because the destination
4002 if (! aarch64_valid_branch_p (value
, place
))
4004 /* The target is out of reach, so redirect the branch to
4005 the local stub for this function. */
4006 struct elf64_aarch64_stub_hash_entry
*stub_entry
;
4007 stub_entry
= elf64_aarch64_get_stub_entry (input_section
,
4010 if (stub_entry
!= NULL
)
4011 value
= (stub_entry
->stub_offset
4012 + stub_entry
->stub_sec
->output_offset
4013 + stub_entry
->stub_sec
->output_section
->vma
);
4017 value
= aarch64_resolve_relocation (r_type
, place
, value
,
4018 signed_addend
, weak_undef_p
);
4021 case R_AARCH64_ABS16
:
4022 case R_AARCH64_ABS32
:
4023 case R_AARCH64_ADD_ABS_LO12_NC
:
4024 case R_AARCH64_ADR_PREL_LO21
:
4025 case R_AARCH64_ADR_PREL_PG_HI21
:
4026 case R_AARCH64_ADR_PREL_PG_HI21_NC
:
4027 case R_AARCH64_CONDBR19
:
4028 case R_AARCH64_LD_PREL_LO19
:
4029 case R_AARCH64_LDST8_ABS_LO12_NC
:
4030 case R_AARCH64_LDST16_ABS_LO12_NC
:
4031 case R_AARCH64_LDST32_ABS_LO12_NC
:
4032 case R_AARCH64_LDST64_ABS_LO12_NC
:
4033 case R_AARCH64_LDST128_ABS_LO12_NC
:
4034 case R_AARCH64_MOVW_SABS_G0
:
4035 case R_AARCH64_MOVW_SABS_G1
:
4036 case R_AARCH64_MOVW_SABS_G2
:
4037 case R_AARCH64_MOVW_UABS_G0
:
4038 case R_AARCH64_MOVW_UABS_G0_NC
:
4039 case R_AARCH64_MOVW_UABS_G1
:
4040 case R_AARCH64_MOVW_UABS_G1_NC
:
4041 case R_AARCH64_MOVW_UABS_G2
:
4042 case R_AARCH64_MOVW_UABS_G2_NC
:
4043 case R_AARCH64_MOVW_UABS_G3
:
4044 case R_AARCH64_PREL16
:
4045 case R_AARCH64_PREL32
:
4046 case R_AARCH64_PREL64
:
4047 case R_AARCH64_TSTBR14
:
4048 value
= aarch64_resolve_relocation (r_type
, place
, value
,
4049 signed_addend
, weak_undef_p
);
4052 case R_AARCH64_LD64_GOT_LO12_NC
:
4053 case R_AARCH64_ADR_GOT_PAGE
:
4054 case R_AARCH64_GOT_LD_PREL19
:
4055 if (globals
->root
.sgot
== NULL
)
4056 BFD_ASSERT (h
!= NULL
);
4060 value
= aarch64_calculate_got_entry_vma (h
, globals
, info
, value
,
4062 unresolved_reloc_p
);
4063 value
= aarch64_resolve_relocation (r_type
, place
, value
,
4068 case R_AARCH64_TLSGD_ADR_PAGE21
:
4069 case R_AARCH64_TLSGD_ADD_LO12_NC
:
4070 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21
:
4071 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC
:
4072 if (globals
->root
.sgot
== NULL
)
4073 return bfd_reloc_notsupported
;
4075 value
= (symbol_got_offset (input_bfd
, h
, r_symndx
)
4076 + globals
->root
.sgot
->output_section
->vma
4077 + globals
->root
.sgot
->output_section
->output_offset
);
4079 value
= aarch64_resolve_relocation (r_type
, place
, value
,
4081 *unresolved_reloc_p
= FALSE
;
4084 case R_AARCH64_TLSLE_ADD_TPREL_HI12
:
4085 case R_AARCH64_TLSLE_ADD_TPREL_LO12
:
4086 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC
:
4087 case R_AARCH64_TLSLE_MOVW_TPREL_G0
:
4088 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC
:
4089 case R_AARCH64_TLSLE_MOVW_TPREL_G1
:
4090 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC
:
4091 case R_AARCH64_TLSLE_MOVW_TPREL_G2
:
4092 value
= aarch64_resolve_relocation (r_type
, place
, value
,
4093 signed_addend
- tpoff_base (info
), weak_undef_p
);
4094 *unresolved_reloc_p
= FALSE
;
4097 case R_AARCH64_TLSDESC_ADR_PAGE
:
4098 case R_AARCH64_TLSDESC_LD64_LO12_NC
:
4099 case R_AARCH64_TLSDESC_ADD_LO12_NC
:
4100 case R_AARCH64_TLSDESC_ADD
:
4101 case R_AARCH64_TLSDESC_LDR
:
4102 if (globals
->root
.sgot
== NULL
)
4103 return bfd_reloc_notsupported
;
4105 value
= (symbol_tlsdesc_got_offset (input_bfd
, h
, r_symndx
)
4106 + globals
->root
.sgotplt
->output_section
->vma
4107 + globals
->root
.sgotplt
->output_section
->output_offset
4108 + globals
->sgotplt_jump_table_size
);
4110 value
= aarch64_resolve_relocation (r_type
, place
, value
,
4112 *unresolved_reloc_p
= FALSE
;
4116 return bfd_reloc_notsupported
;
4120 *saved_addend
= value
;
4122 /* Only apply the final relocation in a sequence. */
4124 return bfd_reloc_continue
;
4126 return bfd_elf_aarch64_put_addend (input_bfd
, hit_data
, howto
, value
);
4129 /* Handle TLS relaxations. Relaxing is possible for symbols that use
4130 R_AARCH64_TLSDESC_ADR_{PAGE, LD64_LO12_NC, ADD_LO12_NC} during a static
4133 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
4134 is to then call final_link_relocate. Return other values in the
4137 static bfd_reloc_status_type
4138 elf64_aarch64_tls_relax (struct elf64_aarch64_link_hash_table
*globals
,
4139 bfd
*input_bfd
, bfd_byte
*contents
,
4140 Elf_Internal_Rela
*rel
, struct elf_link_hash_entry
*h
)
4142 bfd_boolean is_local
= h
== NULL
;
4143 unsigned int r_type
= ELF64_R_TYPE (rel
->r_info
);
4146 BFD_ASSERT (globals
&& input_bfd
&& contents
&& rel
);
4150 case R_AARCH64_TLSGD_ADR_PAGE21
:
4151 case R_AARCH64_TLSDESC_ADR_PAGE
:
4154 /* GD->LE relaxation:
4155 adrp x0, :tlsgd:var => movz x0, :tprel_g1:var
4157 adrp x0, :tlsdesc:var => movz x0, :tprel_g1:var
4159 bfd_putl32 (0xd2a00000, contents
+ rel
->r_offset
);
4160 return bfd_reloc_continue
;
4164 /* GD->IE relaxation:
4165 adrp x0, :tlsgd:var => adrp x0, :gottprel:var
4167 adrp x0, :tlsdesc:var => adrp x0, :gottprel:var
4169 insn
= bfd_getl32 (contents
+ rel
->r_offset
);
4170 return bfd_reloc_continue
;
4173 case R_AARCH64_TLSDESC_LD64_LO12_NC
:
4176 /* GD->LE relaxation:
4177 ldr xd, [x0, #:tlsdesc_lo12:var] => movk x0, :tprel_g0_nc:var
4179 bfd_putl32 (0xf2800000, contents
+ rel
->r_offset
);
4180 return bfd_reloc_continue
;
4184 /* GD->IE relaxation:
4185 ldr xd, [x0, #:tlsdesc_lo12:var] => ldr x0, [x0, #:gottprel_lo12:var]
4187 insn
= bfd_getl32 (contents
+ rel
->r_offset
);
4189 bfd_putl32 (insn
, contents
+ rel
->r_offset
);
4190 return bfd_reloc_continue
;
4193 case R_AARCH64_TLSGD_ADD_LO12_NC
:
4196 /* GD->LE relaxation
4197 add x0, #:tlsgd_lo12:var => movk x0, :tprel_g0_nc:var
4198 bl __tls_get_addr => mrs x1, tpidr_el0
4199 nop => add x0, x1, x0
4202 /* First kill the tls_get_addr reloc on the bl instruction. */
4203 BFD_ASSERT (rel
->r_offset
+ 4 == rel
[1].r_offset
);
4204 rel
[1].r_info
= ELF64_R_INFO (STN_UNDEF
, R_AARCH64_NONE
);
4206 bfd_putl32 (0xf2800000, contents
+ rel
->r_offset
);
4207 bfd_putl32 (0xd53bd041, contents
+ rel
->r_offset
+ 4);
4208 bfd_putl32 (0x8b000020, contents
+ rel
->r_offset
+ 8);
4209 return bfd_reloc_continue
;
4213 /* GD->IE relaxation
4214 ADD x0, #:tlsgd_lo12:var => ldr x0, [x0, #:gottprel_lo12:var]
4215 BL __tls_get_addr => mrs x1, tpidr_el0
4217 NOP => add x0, x1, x0
4220 BFD_ASSERT (ELF64_R_TYPE (rel
[1].r_info
) == R_AARCH64_CALL26
);
4222 /* Remove the relocation on the BL instruction. */
4223 rel
[1].r_info
= ELF64_R_INFO (STN_UNDEF
, R_AARCH64_NONE
);
4225 bfd_putl32 (0xf9400000, contents
+ rel
->r_offset
);
4227 /* We choose to fixup the BL and NOP instructions using the
4228 offset from the second relocation to allow flexibility in
4229 scheduling instructions between the ADD and BL. */
4230 bfd_putl32 (0xd53bd041, contents
+ rel
[1].r_offset
);
4231 bfd_putl32 (0x8b000020, contents
+ rel
[1].r_offset
+ 4);
4232 return bfd_reloc_continue
;
4235 case R_AARCH64_TLSDESC_ADD_LO12_NC
:
4236 case R_AARCH64_TLSDESC_CALL
:
4237 /* GD->IE/LE relaxation:
4238 add x0, x0, #:tlsdesc_lo12:var => nop
4241 bfd_putl32 (INSN_NOP
, contents
+ rel
->r_offset
);
4242 return bfd_reloc_ok
;
4244 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21
:
4245 /* IE->LE relaxation:
4246 adrp xd, :gottprel:var => movz xd, :tprel_g1:var
4250 insn
= bfd_getl32 (contents
+ rel
->r_offset
);
4251 bfd_putl32 (0xd2a00000 | (insn
& 0x1f), contents
+ rel
->r_offset
);
4253 return bfd_reloc_continue
;
4255 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC
:
4256 /* IE->LE relaxation:
4257 ldr xd, [xm, #:gottprel_lo12:var] => movk xd, :tprel_g0_nc:var
4261 insn
= bfd_getl32 (contents
+ rel
->r_offset
);
4262 bfd_putl32 (0xf2800000 | (insn
& 0x1f), contents
+ rel
->r_offset
);
4264 return bfd_reloc_continue
;
4267 return bfd_reloc_continue
;
4270 return bfd_reloc_ok
;
4273 /* Relocate an AArch64 ELF section. */
4276 elf64_aarch64_relocate_section (bfd
*output_bfd
,
4277 struct bfd_link_info
*info
,
4279 asection
*input_section
,
4281 Elf_Internal_Rela
*relocs
,
4282 Elf_Internal_Sym
*local_syms
,
4283 asection
**local_sections
)
4285 Elf_Internal_Shdr
*symtab_hdr
;
4286 struct elf_link_hash_entry
**sym_hashes
;
4287 Elf_Internal_Rela
*rel
;
4288 Elf_Internal_Rela
*relend
;
4290 struct elf64_aarch64_link_hash_table
*globals
;
4291 bfd_boolean save_addend
= FALSE
;
4294 globals
= elf64_aarch64_hash_table (info
);
4296 symtab_hdr
= &elf_symtab_hdr (input_bfd
);
4297 sym_hashes
= elf_sym_hashes (input_bfd
);
4300 relend
= relocs
+ input_section
->reloc_count
;
4301 for (; rel
< relend
; rel
++)
4303 unsigned int r_type
;
4304 unsigned int relaxed_r_type
;
4305 reloc_howto_type
*howto
;
4306 unsigned long r_symndx
;
4307 Elf_Internal_Sym
*sym
;
4309 struct elf_link_hash_entry
*h
;
4311 bfd_reloc_status_type r
;
4314 bfd_boolean unresolved_reloc
= FALSE
;
4315 char *error_message
= NULL
;
4317 r_symndx
= ELF64_R_SYM (rel
->r_info
);
4318 r_type
= ELF64_R_TYPE (rel
->r_info
);
4320 bfd_reloc
.howto
= elf64_aarch64_howto_from_type (r_type
);
4321 howto
= bfd_reloc
.howto
;
4327 if (r_symndx
< symtab_hdr
->sh_info
)
4329 sym
= local_syms
+ r_symndx
;
4330 sym_type
= ELF64_ST_TYPE (sym
->st_info
);
4331 sec
= local_sections
[r_symndx
];
4333 /* An object file might have a reference to a local
4334 undefined symbol. This is a daft object file, but we
4335 should at least do something about it. */
4336 if (r_type
!= R_AARCH64_NONE
&& r_type
!= R_AARCH64_NULL
4337 && bfd_is_und_section (sec
)
4338 && ELF_ST_BIND (sym
->st_info
) != STB_WEAK
)
4340 if (!info
->callbacks
->undefined_symbol
4341 (info
, bfd_elf_string_from_elf_section
4342 (input_bfd
, symtab_hdr
->sh_link
, sym
->st_name
),
4343 input_bfd
, input_section
, rel
->r_offset
, TRUE
))
4347 if (r_type
>= R_AARCH64_dyn_max
)
4349 bfd_set_error (bfd_error_bad_value
);
4353 relocation
= _bfd_elf_rela_local_sym (output_bfd
, sym
, &sec
, rel
);
4359 RELOC_FOR_GLOBAL_SYMBOL (info
, input_bfd
, input_section
, rel
,
4360 r_symndx
, symtab_hdr
, sym_hashes
,
4362 unresolved_reloc
, warned
);
4367 if (sec
!= NULL
&& discarded_section (sec
))
4368 RELOC_AGAINST_DISCARDED_SECTION (info
, input_bfd
, input_section
,
4369 rel
, 1, relend
, howto
, 0, contents
);
4371 if (info
->relocatable
)
4373 /* This is a relocatable link. We don't have to change
4374 anything, unless the reloc is against a section symbol,
4375 in which case we have to adjust according to where the
4376 section symbol winds up in the output section. */
4377 if (sym
!= NULL
&& ELF_ST_TYPE (sym
->st_info
) == STT_SECTION
)
4378 rel
->r_addend
+= sec
->output_offset
;
4383 name
= h
->root
.root
.string
;
4386 name
= (bfd_elf_string_from_elf_section
4387 (input_bfd
, symtab_hdr
->sh_link
, sym
->st_name
));
4388 if (name
== NULL
|| *name
== '\0')
4389 name
= bfd_section_name (input_bfd
, sec
);
4393 && r_type
!= R_AARCH64_NONE
4394 && r_type
!= R_AARCH64_NULL
4396 || h
->root
.type
== bfd_link_hash_defined
4397 || h
->root
.type
== bfd_link_hash_defweak
)
4398 && IS_AARCH64_TLS_RELOC (r_type
) != (sym_type
== STT_TLS
))
4400 (*_bfd_error_handler
)
4401 ((sym_type
== STT_TLS
4402 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
4403 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
4405 input_section
, (long) rel
->r_offset
, howto
->name
, name
);
4409 /* We relax only if we can see that there can be a valid transition
4410 from a reloc type to another.
4411 We call elf64_aarch64_final_link_relocate unless we're completely
4412 done, i.e., the relaxation produced the final output we want. */
4414 relaxed_r_type
= aarch64_tls_transition (input_bfd
, info
, r_type
,
4416 if (relaxed_r_type
!= r_type
)
4418 r_type
= relaxed_r_type
;
4419 howto
= elf64_aarch64_howto_from_type (r_type
);
4421 r
= elf64_aarch64_tls_relax (globals
, input_bfd
, contents
, rel
, h
);
4422 unresolved_reloc
= 0;
4425 r
= bfd_reloc_continue
;
4427 /* There may be multiple consecutive relocations for the
4428 same offset. In that case we are supposed to treat the
4429 output of each relocation as the addend for the next. */
4430 if (rel
+ 1 < relend
4431 && rel
->r_offset
== rel
[1].r_offset
4432 && ELF64_R_TYPE (rel
[1].r_info
) != R_AARCH64_NONE
4433 && ELF64_R_TYPE (rel
[1].r_info
) != R_AARCH64_NULL
)
4436 save_addend
= FALSE
;
4438 if (r
== bfd_reloc_continue
)
4439 r
= elf64_aarch64_final_link_relocate (howto
, input_bfd
, output_bfd
,
4440 input_section
, contents
, rel
,
4441 relocation
, info
, sec
,
4442 h
, &unresolved_reloc
,
4443 save_addend
, &addend
);
4447 case R_AARCH64_TLSGD_ADR_PAGE21
:
4448 case R_AARCH64_TLSGD_ADD_LO12_NC
:
4449 if (! symbol_got_offset_mark_p (input_bfd
, h
, r_symndx
))
4451 bfd_boolean need_relocs
= FALSE
;
4456 off
= symbol_got_offset (input_bfd
, h
, r_symndx
);
4457 indx
= h
&& h
->dynindx
!= -1 ? h
->dynindx
: 0;
4460 (info
->shared
|| indx
!= 0) &&
4462 || ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
4463 || h
->root
.type
!= bfd_link_hash_undefweak
);
4465 BFD_ASSERT (globals
->root
.srelgot
!= NULL
);
4469 Elf_Internal_Rela rela
;
4470 rela
.r_info
= ELF64_R_INFO (indx
, R_AARCH64_TLS_DTPMOD64
);
4472 rela
.r_offset
= globals
->root
.sgot
->output_section
->vma
+
4473 globals
->root
.sgot
->output_offset
+ off
;
4476 loc
= globals
->root
.srelgot
->contents
;
4477 loc
+= globals
->root
.srelgot
->reloc_count
++
4478 * RELOC_SIZE (htab
);
4479 bfd_elf64_swap_reloca_out (output_bfd
, &rela
, loc
);
4483 bfd_put_64 (output_bfd
,
4484 relocation
- dtpoff_base (info
),
4485 globals
->root
.sgot
->contents
+ off
4490 /* This TLS symbol is global. We emit a
4491 relocation to fixup the tls offset at load
4494 ELF64_R_INFO (indx
, R_AARCH64_TLS_DTPREL64
);
4497 (globals
->root
.sgot
->output_section
->vma
4498 + globals
->root
.sgot
->output_offset
+ off
4501 loc
= globals
->root
.srelgot
->contents
;
4502 loc
+= globals
->root
.srelgot
->reloc_count
++
4503 * RELOC_SIZE (globals
);
4504 bfd_elf64_swap_reloca_out (output_bfd
, &rela
, loc
);
4505 bfd_put_64 (output_bfd
, (bfd_vma
) 0,
4506 globals
->root
.sgot
->contents
+ off
4512 bfd_put_64 (output_bfd
, (bfd_vma
) 1,
4513 globals
->root
.sgot
->contents
+ off
);
4514 bfd_put_64 (output_bfd
,
4515 relocation
- dtpoff_base (info
),
4516 globals
->root
.sgot
->contents
+ off
4520 symbol_got_offset_mark (input_bfd
, h
, r_symndx
);
4524 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21
:
4525 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC
:
4526 if (! symbol_got_offset_mark_p (input_bfd
, h
, r_symndx
))
4528 bfd_boolean need_relocs
= FALSE
;
4533 off
= symbol_got_offset (input_bfd
, h
, r_symndx
);
4535 indx
= h
&& h
->dynindx
!= -1 ? h
->dynindx
: 0;
4538 (info
->shared
|| indx
!= 0) &&
4540 || ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
4541 || h
->root
.type
!= bfd_link_hash_undefweak
);
4543 BFD_ASSERT (globals
->root
.srelgot
!= NULL
);
4547 Elf_Internal_Rela rela
;
4550 rela
.r_addend
= relocation
- dtpoff_base (info
);
4554 rela
.r_info
= ELF64_R_INFO (indx
, R_AARCH64_TLS_TPREL64
);
4555 rela
.r_offset
= globals
->root
.sgot
->output_section
->vma
+
4556 globals
->root
.sgot
->output_offset
+ off
;
4558 loc
= globals
->root
.srelgot
->contents
;
4559 loc
+= globals
->root
.srelgot
->reloc_count
++
4560 * RELOC_SIZE (htab
);
4562 bfd_elf64_swap_reloca_out (output_bfd
, &rela
, loc
);
4564 bfd_put_64 (output_bfd
, rela
.r_addend
,
4565 globals
->root
.sgot
->contents
+ off
);
4568 bfd_put_64 (output_bfd
, relocation
- tpoff_base (info
),
4569 globals
->root
.sgot
->contents
+ off
);
4571 symbol_got_offset_mark (input_bfd
, h
, r_symndx
);
4575 case R_AARCH64_TLSLE_ADD_TPREL_LO12
:
4576 case R_AARCH64_TLSLE_ADD_TPREL_HI12
:
4577 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC
:
4578 case R_AARCH64_TLSLE_MOVW_TPREL_G2
:
4579 case R_AARCH64_TLSLE_MOVW_TPREL_G1
:
4580 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC
:
4581 case R_AARCH64_TLSLE_MOVW_TPREL_G0
:
4582 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC
:
4585 case R_AARCH64_TLSDESC_ADR_PAGE
:
4586 case R_AARCH64_TLSDESC_LD64_LO12_NC
:
4587 case R_AARCH64_TLSDESC_ADD_LO12_NC
:
4588 if (! symbol_tlsdesc_got_offset_mark_p (input_bfd
, h
, r_symndx
))
4590 bfd_boolean need_relocs
= FALSE
;
4591 int indx
= h
&& h
->dynindx
!= -1 ? h
->dynindx
: 0;
4592 bfd_vma off
= symbol_tlsdesc_got_offset (input_bfd
, h
, r_symndx
);
4594 need_relocs
= (h
== NULL
4595 || ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
4596 || h
->root
.type
!= bfd_link_hash_undefweak
);
4598 BFD_ASSERT (globals
->root
.srelgot
!= NULL
);
4599 BFD_ASSERT (globals
->root
.sgot
!= NULL
);
4604 Elf_Internal_Rela rela
;
4605 rela
.r_info
= ELF64_R_INFO (indx
, R_AARCH64_TLSDESC
);
4607 rela
.r_offset
= (globals
->root
.sgotplt
->output_section
->vma
4608 + globals
->root
.sgotplt
->output_offset
4609 + off
+ globals
->sgotplt_jump_table_size
);
4612 rela
.r_addend
= relocation
- dtpoff_base (info
);
4614 /* Allocate the next available slot in the PLT reloc
4615 section to hold our R_AARCH64_TLSDESC, the next
4616 available slot is determined from reloc_count,
4617 which we step. But note, reloc_count was
4618 artifically moved down while allocating slots for
4619 real PLT relocs such that all of the PLT relocs
4620 will fit above the initial reloc_count and the
4621 extra stuff will fit below. */
4622 loc
= globals
->root
.srelplt
->contents
;
4623 loc
+= globals
->root
.srelplt
->reloc_count
++
4624 * RELOC_SIZE (globals
);
4626 bfd_elf64_swap_reloca_out (output_bfd
, &rela
, loc
);
4628 bfd_put_64 (output_bfd
, (bfd_vma
) 0,
4629 globals
->root
.sgotplt
->contents
+ off
+
4630 globals
->sgotplt_jump_table_size
);
4631 bfd_put_64 (output_bfd
, (bfd_vma
) 0,
4632 globals
->root
.sgotplt
->contents
+ off
+
4633 globals
->sgotplt_jump_table_size
+
4637 symbol_tlsdesc_got_offset_mark (input_bfd
, h
, r_symndx
);
4646 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
4647 because such sections are not SEC_ALLOC and thus ld.so will
4648 not process them. */
4649 if (unresolved_reloc
4650 && !((input_section
->flags
& SEC_DEBUGGING
) != 0
4652 && _bfd_elf_section_offset (output_bfd
, info
, input_section
,
4653 +rel
->r_offset
) != (bfd_vma
) - 1)
4655 (*_bfd_error_handler
)
4657 ("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
4658 input_bfd
, input_section
, (long) rel
->r_offset
, howto
->name
,
4659 h
->root
.root
.string
);
4663 if (r
!= bfd_reloc_ok
&& r
!= bfd_reloc_continue
)
4667 case bfd_reloc_overflow
:
4668 /* If the overflowing reloc was to an undefined symbol,
4669 we have already printed one error message and there
4670 is no point complaining again. */
4672 h
->root
.type
!= bfd_link_hash_undefined
)
4673 && (!((*info
->callbacks
->reloc_overflow
)
4674 (info
, (h
? &h
->root
: NULL
), name
, howto
->name
,
4675 (bfd_vma
) 0, input_bfd
, input_section
,
4680 case bfd_reloc_undefined
:
4681 if (!((*info
->callbacks
->undefined_symbol
)
4682 (info
, name
, input_bfd
, input_section
,
4683 rel
->r_offset
, TRUE
)))
4687 case bfd_reloc_outofrange
:
4688 error_message
= _("out of range");
4691 case bfd_reloc_notsupported
:
4692 error_message
= _("unsupported relocation");
4695 case bfd_reloc_dangerous
:
4696 /* error_message should already be set. */
4700 error_message
= _("unknown error");
4704 BFD_ASSERT (error_message
!= NULL
);
4705 if (!((*info
->callbacks
->reloc_dangerous
)
4706 (info
, error_message
, input_bfd
, input_section
,
4717 /* Set the right machine number. */
4720 elf64_aarch64_object_p (bfd
*abfd
)
4722 bfd_default_set_arch_mach (abfd
, bfd_arch_aarch64
, bfd_mach_aarch64
);
4726 /* Function to keep AArch64 specific flags in the ELF header. */
4729 elf64_aarch64_set_private_flags (bfd
*abfd
, flagword flags
)
4731 if (elf_flags_init (abfd
) && elf_elfheader (abfd
)->e_flags
!= flags
)
4736 elf_elfheader (abfd
)->e_flags
= flags
;
4737 elf_flags_init (abfd
) = TRUE
;
4743 /* Copy backend specific data from one object module to another. */
4746 elf64_aarch64_copy_private_bfd_data (bfd
*ibfd
, bfd
*obfd
)
4750 if (!is_aarch64_elf (ibfd
) || !is_aarch64_elf (obfd
))
4753 in_flags
= elf_elfheader (ibfd
)->e_flags
;
4755 elf_elfheader (obfd
)->e_flags
= in_flags
;
4756 elf_flags_init (obfd
) = TRUE
;
4758 /* Also copy the EI_OSABI field. */
4759 elf_elfheader (obfd
)->e_ident
[EI_OSABI
] =
4760 elf_elfheader (ibfd
)->e_ident
[EI_OSABI
];
4762 /* Copy object attributes. */
4763 _bfd_elf_copy_obj_attributes (ibfd
, obfd
);
4768 /* Merge backend specific data from an object file to the output
4769 object file when linking. */
4772 elf64_aarch64_merge_private_bfd_data (bfd
*ibfd
, bfd
*obfd
)
4776 bfd_boolean flags_compatible
= TRUE
;
4779 /* Check if we have the same endianess. */
4780 if (!_bfd_generic_verify_endian_match (ibfd
, obfd
))
4783 if (!is_aarch64_elf (ibfd
) || !is_aarch64_elf (obfd
))
4786 /* The input BFD must have had its flags initialised. */
4787 /* The following seems bogus to me -- The flags are initialized in
4788 the assembler but I don't think an elf_flags_init field is
4789 written into the object. */
4790 /* BFD_ASSERT (elf_flags_init (ibfd)); */
4792 in_flags
= elf_elfheader (ibfd
)->e_flags
;
4793 out_flags
= elf_elfheader (obfd
)->e_flags
;
4795 if (!elf_flags_init (obfd
))
4797 /* If the input is the default architecture and had the default
4798 flags then do not bother setting the flags for the output
4799 architecture, instead allow future merges to do this. If no
4800 future merges ever set these flags then they will retain their
4801 uninitialised values, which surprise surprise, correspond
4802 to the default values. */
4803 if (bfd_get_arch_info (ibfd
)->the_default
4804 && elf_elfheader (ibfd
)->e_flags
== 0)
4807 elf_flags_init (obfd
) = TRUE
;
4808 elf_elfheader (obfd
)->e_flags
= in_flags
;
4810 if (bfd_get_arch (obfd
) == bfd_get_arch (ibfd
)
4811 && bfd_get_arch_info (obfd
)->the_default
)
4812 return bfd_set_arch_mach (obfd
, bfd_get_arch (ibfd
),
4813 bfd_get_mach (ibfd
));
4818 /* Identical flags must be compatible. */
4819 if (in_flags
== out_flags
)
4822 /* Check to see if the input BFD actually contains any sections. If
4823 not, its flags may not have been initialised either, but it
4824 cannot actually cause any incompatiblity. Do not short-circuit
4825 dynamic objects; their section list may be emptied by
4826 elf_link_add_object_symbols.
4828 Also check to see if there are no code sections in the input.
4829 In this case there is no need to check for code specific flags.
4830 XXX - do we need to worry about floating-point format compatability
4831 in data sections ? */
4832 if (!(ibfd
->flags
& DYNAMIC
))
4834 bfd_boolean null_input_bfd
= TRUE
;
4835 bfd_boolean only_data_sections
= TRUE
;
4837 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
4839 if ((bfd_get_section_flags (ibfd
, sec
)
4840 & (SEC_LOAD
| SEC_CODE
| SEC_HAS_CONTENTS
))
4841 == (SEC_LOAD
| SEC_CODE
| SEC_HAS_CONTENTS
))
4842 only_data_sections
= FALSE
;
4844 null_input_bfd
= FALSE
;
4848 if (null_input_bfd
|| only_data_sections
)
4852 return flags_compatible
;
4855 /* Display the flags field. */
4858 elf64_aarch64_print_private_bfd_data (bfd
*abfd
, void *ptr
)
4860 FILE *file
= (FILE *) ptr
;
4861 unsigned long flags
;
4863 BFD_ASSERT (abfd
!= NULL
&& ptr
!= NULL
);
4865 /* Print normal ELF private data. */
4866 _bfd_elf_print_private_bfd_data (abfd
, ptr
);
4868 flags
= elf_elfheader (abfd
)->e_flags
;
4869 /* Ignore init flag - it may not be set, despite the flags field
4870 containing valid data. */
4872 /* xgettext:c-format */
4873 fprintf (file
, _("private flags = %lx:"), elf_elfheader (abfd
)->e_flags
);
4876 fprintf (file
, _("<Unrecognised flag bits set>"));
4883 /* Update the got entry reference counts for the section being removed. */
4886 elf64_aarch64_gc_sweep_hook (bfd
*abfd ATTRIBUTE_UNUSED
,
4887 struct bfd_link_info
*info ATTRIBUTE_UNUSED
,
4888 asection
*sec ATTRIBUTE_UNUSED
,
4889 const Elf_Internal_Rela
*
4890 relocs ATTRIBUTE_UNUSED
)
4895 /* Adjust a symbol defined by a dynamic object and referenced by a
4896 regular object. The current definition is in some section of the
4897 dynamic object, but we're not including those sections. We have to
4898 change the definition to something the rest of the link can
4902 elf64_aarch64_adjust_dynamic_symbol (struct bfd_link_info
*info
,
4903 struct elf_link_hash_entry
*h
)
4905 struct elf64_aarch64_link_hash_table
*htab
;
4908 /* If this is a function, put it in the procedure linkage table. We
4909 will fill in the contents of the procedure linkage table later,
4910 when we know the address of the .got section. */
4911 if (h
->type
== STT_FUNC
|| h
->needs_plt
)
4913 if (h
->plt
.refcount
<= 0
4914 || SYMBOL_CALLS_LOCAL (info
, h
)
4915 || (ELF_ST_VISIBILITY (h
->other
) != STV_DEFAULT
4916 && h
->root
.type
== bfd_link_hash_undefweak
))
4918 /* This case can occur if we saw a CALL26 reloc in
4919 an input file, but the symbol wasn't referred to
4920 by a dynamic object or all references were
4921 garbage collected. In which case we can end up
4923 h
->plt
.offset
= (bfd_vma
) - 1;
4930 /* It's possible that we incorrectly decided a .plt reloc was
4931 needed for an R_X86_64_PC32 reloc to a non-function sym in
4932 check_relocs. We can't decide accurately between function and
4933 non-function syms in check-relocs; Objects loaded later in
4934 the link may change h->type. So fix it now. */
4935 h
->plt
.offset
= (bfd_vma
) - 1;
4938 /* If this is a weak symbol, and there is a real definition, the
4939 processor independent code will have arranged for us to see the
4940 real definition first, and we can just use the same value. */
4941 if (h
->u
.weakdef
!= NULL
)
4943 BFD_ASSERT (h
->u
.weakdef
->root
.type
== bfd_link_hash_defined
4944 || h
->u
.weakdef
->root
.type
== bfd_link_hash_defweak
);
4945 h
->root
.u
.def
.section
= h
->u
.weakdef
->root
.u
.def
.section
;
4946 h
->root
.u
.def
.value
= h
->u
.weakdef
->root
.u
.def
.value
;
4947 if (ELIMINATE_COPY_RELOCS
|| info
->nocopyreloc
)
4948 h
->non_got_ref
= h
->u
.weakdef
->non_got_ref
;
4952 /* If we are creating a shared library, we must presume that the
4953 only references to the symbol are via the global offset table.
4954 For such cases we need not do anything here; the relocations will
4955 be handled correctly by relocate_section. */
4959 /* If there are no references to this symbol that do not use the
4960 GOT, we don't need to generate a copy reloc. */
4961 if (!h
->non_got_ref
)
4964 /* If -z nocopyreloc was given, we won't generate them either. */
4965 if (info
->nocopyreloc
)
4971 /* We must allocate the symbol in our .dynbss section, which will
4972 become part of the .bss section of the executable. There will be
4973 an entry for this symbol in the .dynsym section. The dynamic
4974 object will contain position independent code, so all references
4975 from the dynamic object to this symbol will go through the global
4976 offset table. The dynamic linker will use the .dynsym entry to
4977 determine the address it must put in the global offset table, so
4978 both the dynamic object and the regular object will refer to the
4979 same memory location for the variable. */
4981 htab
= elf64_aarch64_hash_table (info
);
4983 /* We must generate a R_AARCH64_COPY reloc to tell the dynamic linker
4984 to copy the initial value out of the dynamic object and into the
4985 runtime process image. */
4986 if ((h
->root
.u
.def
.section
->flags
& SEC_ALLOC
) != 0 && h
->size
!= 0)
4988 htab
->srelbss
->size
+= RELOC_SIZE (htab
);
4994 return _bfd_elf_adjust_dynamic_copy (h
, s
);
4999 elf64_aarch64_allocate_local_symbols (bfd
*abfd
, unsigned number
)
5001 struct elf_aarch64_local_symbol
*locals
;
5002 locals
= elf64_aarch64_locals (abfd
);
5005 locals
= (struct elf_aarch64_local_symbol
*)
5006 bfd_zalloc (abfd
, number
* sizeof (struct elf_aarch64_local_symbol
));
5009 elf64_aarch64_locals (abfd
) = locals
;
5014 /* Look through the relocs for a section during the first phase. */
5017 elf64_aarch64_check_relocs (bfd
*abfd
, struct bfd_link_info
*info
,
5018 asection
*sec
, const Elf_Internal_Rela
*relocs
)
5020 Elf_Internal_Shdr
*symtab_hdr
;
5021 struct elf_link_hash_entry
**sym_hashes
;
5022 const Elf_Internal_Rela
*rel
;
5023 const Elf_Internal_Rela
*rel_end
;
5026 struct elf64_aarch64_link_hash_table
*htab
;
5028 unsigned long nsyms
;
5030 if (info
->relocatable
)
5033 BFD_ASSERT (is_aarch64_elf (abfd
));
5035 htab
= elf64_aarch64_hash_table (info
);
5038 symtab_hdr
= &elf_symtab_hdr (abfd
);
5039 sym_hashes
= elf_sym_hashes (abfd
);
5040 nsyms
= NUM_SHDR_ENTRIES (symtab_hdr
);
5042 rel_end
= relocs
+ sec
->reloc_count
;
5043 for (rel
= relocs
; rel
< rel_end
; rel
++)
5045 struct elf_link_hash_entry
*h
;
5046 unsigned long r_symndx
;
5047 unsigned int r_type
;
5049 r_symndx
= ELF64_R_SYM (rel
->r_info
);
5050 r_type
= ELF64_R_TYPE (rel
->r_info
);
5052 if (r_symndx
>= NUM_SHDR_ENTRIES (symtab_hdr
))
5054 (*_bfd_error_handler
) (_("%B: bad symbol index: %d"), abfd
,
5059 if (r_symndx
>= nsyms
5060 /* PR 9934: It is possible to have relocations that do not
5061 refer to symbols, thus it is also possible to have an
5062 object file containing relocations but no symbol table. */
5063 && (r_symndx
> 0 || nsyms
> 0))
5065 (*_bfd_error_handler
) (_("%B: bad symbol index: %d"), abfd
,
5070 if (nsyms
== 0 || r_symndx
< symtab_hdr
->sh_info
)
5074 h
= sym_hashes
[r_symndx
- symtab_hdr
->sh_info
];
5075 while (h
->root
.type
== bfd_link_hash_indirect
5076 || h
->root
.type
== bfd_link_hash_warning
)
5077 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
5080 /* Could be done earlier, if h were already available. */
5081 r_type
= aarch64_tls_transition (abfd
, info
, r_type
, h
, r_symndx
);
5085 case R_AARCH64_ABS64
:
5087 /* We don't need to handle relocs into sections not going into
5088 the "real" output. */
5089 if ((sec
->flags
& SEC_ALLOC
) == 0)
5097 h
->plt
.refcount
+= 1;
5098 h
->pointer_equality_needed
= 1;
5101 /* No need to do anything if we're not creating a shared
5107 struct elf_dyn_relocs
*p
;
5108 struct elf_dyn_relocs
**head
;
5110 /* We must copy these reloc types into the output file.
5111 Create a reloc section in dynobj and make room for
5115 if (htab
->root
.dynobj
== NULL
)
5116 htab
->root
.dynobj
= abfd
;
5118 sreloc
= _bfd_elf_make_dynamic_reloc_section
5119 (sec
, htab
->root
.dynobj
, 3, abfd
, /*rela? */ TRUE
);
5125 /* If this is a global symbol, we count the number of
5126 relocations we need for this symbol. */
5129 struct elf64_aarch64_link_hash_entry
*eh
;
5130 eh
= (struct elf64_aarch64_link_hash_entry
*) h
;
5131 head
= &eh
->dyn_relocs
;
5135 /* Track dynamic relocs needed for local syms too.
5136 We really need local syms available to do this
5141 Elf_Internal_Sym
*isym
;
5143 isym
= bfd_sym_from_r_symndx (&htab
->sym_cache
,
5148 s
= bfd_section_from_elf_index (abfd
, isym
->st_shndx
);
5152 /* Beware of type punned pointers vs strict aliasing
5154 vpp
= &(elf_section_data (s
)->local_dynrel
);
5155 head
= (struct elf_dyn_relocs
**) vpp
;
5159 if (p
== NULL
|| p
->sec
!= sec
)
5161 bfd_size_type amt
= sizeof *p
;
5162 p
= ((struct elf_dyn_relocs
*)
5163 bfd_zalloc (htab
->root
.dynobj
, amt
));
5176 /* RR: We probably want to keep a consistency check that
5177 there are no dangling GOT_PAGE relocs. */
5178 case R_AARCH64_LD64_GOT_LO12_NC
:
5179 case R_AARCH64_GOT_LD_PREL19
:
5180 case R_AARCH64_ADR_GOT_PAGE
:
5181 case R_AARCH64_TLSGD_ADR_PAGE21
:
5182 case R_AARCH64_TLSGD_ADD_LO12_NC
:
5183 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21
:
5184 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC
:
5185 case R_AARCH64_TLSLE_ADD_TPREL_LO12
:
5186 case R_AARCH64_TLSLE_ADD_TPREL_HI12
:
5187 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC
:
5188 case R_AARCH64_TLSLE_MOVW_TPREL_G2
:
5189 case R_AARCH64_TLSLE_MOVW_TPREL_G1
:
5190 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC
:
5191 case R_AARCH64_TLSLE_MOVW_TPREL_G0
:
5192 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC
:
5193 case R_AARCH64_TLSDESC_ADR_PAGE
:
5194 case R_AARCH64_TLSDESC_ADD_LO12_NC
:
5195 case R_AARCH64_TLSDESC_LD64_LO12_NC
:
5198 unsigned old_got_type
;
5200 got_type
= aarch64_reloc_got_type (r_type
);
5204 h
->got
.refcount
+= 1;
5205 old_got_type
= elf64_aarch64_hash_entry (h
)->got_type
;
5209 struct elf_aarch64_local_symbol
*locals
;
5211 if (!elf64_aarch64_allocate_local_symbols
5212 (abfd
, symtab_hdr
->sh_info
))
5215 locals
= elf64_aarch64_locals (abfd
);
5216 BFD_ASSERT (r_symndx
< symtab_hdr
->sh_info
);
5217 locals
[r_symndx
].got_refcount
+= 1;
5218 old_got_type
= locals
[r_symndx
].got_type
;
5221 /* If a variable is accessed with both general dynamic TLS
5222 methods, two slots may be created. */
5223 if (GOT_TLS_GD_ANY_P (old_got_type
) && GOT_TLS_GD_ANY_P (got_type
))
5224 got_type
|= old_got_type
;
5226 /* We will already have issued an error message if there
5227 is a TLS/non-TLS mismatch, based on the symbol type.
5228 So just combine any TLS types needed. */
5229 if (old_got_type
!= GOT_UNKNOWN
&& old_got_type
!= GOT_NORMAL
5230 && got_type
!= GOT_NORMAL
)
5231 got_type
|= old_got_type
;
5233 /* If the symbol is accessed by both IE and GD methods, we
5234 are able to relax. Turn off the GD flag, without
5235 messing up with any other kind of TLS types that may be
5237 if ((got_type
& GOT_TLS_IE
) && GOT_TLS_GD_ANY_P (got_type
))
5238 got_type
&= ~ (GOT_TLSDESC_GD
| GOT_TLS_GD
);
5240 if (old_got_type
!= got_type
)
5243 elf64_aarch64_hash_entry (h
)->got_type
= got_type
;
5246 struct elf_aarch64_local_symbol
*locals
;
5247 locals
= elf64_aarch64_locals (abfd
);
5248 BFD_ASSERT (r_symndx
< symtab_hdr
->sh_info
);
5249 locals
[r_symndx
].got_type
= got_type
;
5253 if (htab
->root
.sgot
== NULL
)
5255 if (htab
->root
.dynobj
== NULL
)
5256 htab
->root
.dynobj
= abfd
;
5257 if (!_bfd_elf_create_got_section (htab
->root
.dynobj
, info
))
5263 case R_AARCH64_ADR_PREL_PG_HI21_NC
:
5264 case R_AARCH64_ADR_PREL_PG_HI21
:
5265 case R_AARCH64_ADR_PREL_LO21
:
5266 if (h
!= NULL
&& info
->executable
)
5268 /* If this reloc is in a read-only section, we might
5269 need a copy reloc. We can't check reliably at this
5270 stage whether the section is read-only, as input
5271 sections have not yet been mapped to output sections.
5272 Tentatively set the flag for now, and correct in
5273 adjust_dynamic_symbol. */
5275 h
->plt
.refcount
+= 1;
5276 h
->pointer_equality_needed
= 1;
5278 /* FIXME:: RR need to handle these in shared libraries
5279 and essentially bomb out as these being non-PIC
5280 relocations in shared libraries. */
5283 case R_AARCH64_CALL26
:
5284 case R_AARCH64_JUMP26
:
5285 /* If this is a local symbol then we resolve it
5286 directly without creating a PLT entry. */
5291 h
->plt
.refcount
+= 1;
5298 /* Treat mapping symbols as special target symbols. */
5301 elf64_aarch64_is_target_special_symbol (bfd
*abfd ATTRIBUTE_UNUSED
,
5304 return bfd_is_aarch64_special_symbol_name (sym
->name
,
5305 BFD_AARCH64_SPECIAL_SYM_TYPE_ANY
);
5308 /* This is a copy of elf_find_function () from elf.c except that
5309 AArch64 mapping symbols are ignored when looking for function names. */
5312 aarch64_elf_find_function (bfd
*abfd ATTRIBUTE_UNUSED
,
5316 const char **filename_ptr
,
5317 const char **functionname_ptr
)
5319 const char *filename
= NULL
;
5320 asymbol
*func
= NULL
;
5321 bfd_vma low_func
= 0;
5324 for (p
= symbols
; *p
!= NULL
; p
++)
5328 q
= (elf_symbol_type
*) * p
;
5330 switch (ELF_ST_TYPE (q
->internal_elf_sym
.st_info
))
5335 filename
= bfd_asymbol_name (&q
->symbol
);
5339 /* Skip mapping symbols. */
5340 if ((q
->symbol
.flags
& BSF_LOCAL
)
5341 && (bfd_is_aarch64_special_symbol_name
5342 (q
->symbol
.name
, BFD_AARCH64_SPECIAL_SYM_TYPE_ANY
)))
5345 if (bfd_get_section (&q
->symbol
) == section
5346 && q
->symbol
.value
>= low_func
&& q
->symbol
.value
<= offset
)
5348 func
= (asymbol
*) q
;
5349 low_func
= q
->symbol
.value
;
5359 *filename_ptr
= filename
;
5360 if (functionname_ptr
)
5361 *functionname_ptr
= bfd_asymbol_name (func
);
5367 /* Find the nearest line to a particular section and offset, for error
5368 reporting. This code is a duplicate of the code in elf.c, except
5369 that it uses aarch64_elf_find_function. */
5372 elf64_aarch64_find_nearest_line (bfd
*abfd
,
5376 const char **filename_ptr
,
5377 const char **functionname_ptr
,
5378 unsigned int *line_ptr
)
5380 bfd_boolean found
= FALSE
;
5382 /* We skip _bfd_dwarf1_find_nearest_line since no known AArch64
5383 toolchain uses it. */
5385 if (_bfd_dwarf2_find_nearest_line (abfd
, dwarf_debug_sections
,
5386 section
, symbols
, offset
,
5387 filename_ptr
, functionname_ptr
,
5389 &elf_tdata (abfd
)->dwarf2_find_line_info
))
5391 if (!*functionname_ptr
)
5392 aarch64_elf_find_function (abfd
, section
, symbols
, offset
,
5393 *filename_ptr
? NULL
: filename_ptr
,
5399 if (!_bfd_stab_section_find_nearest_line (abfd
, symbols
, section
, offset
,
5400 &found
, filename_ptr
,
5401 functionname_ptr
, line_ptr
,
5402 &elf_tdata (abfd
)->line_info
))
5405 if (found
&& (*functionname_ptr
|| *line_ptr
))
5408 if (symbols
== NULL
)
5411 if (!aarch64_elf_find_function (abfd
, section
, symbols
, offset
,
5412 filename_ptr
, functionname_ptr
))
5420 elf64_aarch64_find_inliner_info (bfd
*abfd
,
5421 const char **filename_ptr
,
5422 const char **functionname_ptr
,
5423 unsigned int *line_ptr
)
5426 found
= _bfd_dwarf2_find_inliner_info
5427 (abfd
, filename_ptr
,
5428 functionname_ptr
, line_ptr
, &elf_tdata (abfd
)->dwarf2_find_line_info
);
5434 elf64_aarch64_post_process_headers (bfd
*abfd
,
5435 struct bfd_link_info
*link_info
5438 Elf_Internal_Ehdr
*i_ehdrp
; /* ELF file header, internal form. */
5440 i_ehdrp
= elf_elfheader (abfd
);
5441 i_ehdrp
->e_ident
[EI_OSABI
] = 0;
5442 i_ehdrp
->e_ident
[EI_ABIVERSION
] = AARCH64_ELF_ABI_VERSION
;
5445 static enum elf_reloc_type_class
5446 elf64_aarch64_reloc_type_class (const Elf_Internal_Rela
*rela
)
5448 switch ((int) ELF64_R_TYPE (rela
->r_info
))
5450 case R_AARCH64_RELATIVE
:
5451 return reloc_class_relative
;
5452 case R_AARCH64_JUMP_SLOT
:
5453 return reloc_class_plt
;
5454 case R_AARCH64_COPY
:
5455 return reloc_class_copy
;
5457 return reloc_class_normal
;
5461 /* Set the right machine number for an AArch64 ELF file. */
5464 elf64_aarch64_section_flags (flagword
*flags
, const Elf_Internal_Shdr
*hdr
)
5466 if (hdr
->sh_type
== SHT_NOTE
)
5467 *flags
|= SEC_LINK_ONCE
| SEC_LINK_DUPLICATES_SAME_CONTENTS
;
5472 /* Handle an AArch64 specific section when reading an object file. This is
5473 called when bfd_section_from_shdr finds a section with an unknown
5477 elf64_aarch64_section_from_shdr (bfd
*abfd
,
5478 Elf_Internal_Shdr
*hdr
,
5479 const char *name
, int shindex
)
5481 /* There ought to be a place to keep ELF backend specific flags, but
5482 at the moment there isn't one. We just keep track of the
5483 sections by their name, instead. Fortunately, the ABI gives
5484 names for all the AArch64 specific sections, so we will probably get
5486 switch (hdr
->sh_type
)
5488 case SHT_AARCH64_ATTRIBUTES
:
5495 if (!_bfd_elf_make_section_from_shdr (abfd
, hdr
, name
, shindex
))
5501 /* A structure used to record a list of sections, independently
5502 of the next and prev fields in the asection structure. */
5503 typedef struct section_list
5506 struct section_list
*next
;
5507 struct section_list
*prev
;
5511 /* Unfortunately we need to keep a list of sections for which
5512 an _aarch64_elf_section_data structure has been allocated. This
5513 is because it is possible for functions like elf64_aarch64_write_section
5514 to be called on a section which has had an elf_data_structure
5515 allocated for it (and so the used_by_bfd field is valid) but
5516 for which the AArch64 extended version of this structure - the
5517 _aarch64_elf_section_data structure - has not been allocated. */
5518 static section_list
*sections_with_aarch64_elf_section_data
= NULL
;
5521 record_section_with_aarch64_elf_section_data (asection
*sec
)
5523 struct section_list
*entry
;
5525 entry
= bfd_malloc (sizeof (*entry
));
5529 entry
->next
= sections_with_aarch64_elf_section_data
;
5531 if (entry
->next
!= NULL
)
5532 entry
->next
->prev
= entry
;
5533 sections_with_aarch64_elf_section_data
= entry
;
5536 static struct section_list
*
5537 find_aarch64_elf_section_entry (asection
*sec
)
5539 struct section_list
*entry
;
5540 static struct section_list
*last_entry
= NULL
;
5542 /* This is a short cut for the typical case where the sections are added
5543 to the sections_with_aarch64_elf_section_data list in forward order and
5544 then looked up here in backwards order. This makes a real difference
5545 to the ld-srec/sec64k.exp linker test. */
5546 entry
= sections_with_aarch64_elf_section_data
;
5547 if (last_entry
!= NULL
)
5549 if (last_entry
->sec
== sec
)
5551 else if (last_entry
->next
!= NULL
&& last_entry
->next
->sec
== sec
)
5552 entry
= last_entry
->next
;
5555 for (; entry
; entry
= entry
->next
)
5556 if (entry
->sec
== sec
)
5560 /* Record the entry prior to this one - it is the entry we are
5561 most likely to want to locate next time. Also this way if we
5562 have been called from
5563 unrecord_section_with_aarch64_elf_section_data () we will not
5564 be caching a pointer that is about to be freed. */
5565 last_entry
= entry
->prev
;
5571 unrecord_section_with_aarch64_elf_section_data (asection
*sec
)
5573 struct section_list
*entry
;
5575 entry
= find_aarch64_elf_section_entry (sec
);
5579 if (entry
->prev
!= NULL
)
5580 entry
->prev
->next
= entry
->next
;
5581 if (entry
->next
!= NULL
)
5582 entry
->next
->prev
= entry
->prev
;
5583 if (entry
== sections_with_aarch64_elf_section_data
)
5584 sections_with_aarch64_elf_section_data
= entry
->next
;
5593 struct bfd_link_info
*info
;
5596 int (*func
) (void *, const char *, Elf_Internal_Sym
*,
5597 asection
*, struct elf_link_hash_entry
*);
5598 } output_arch_syminfo
;
5600 enum map_symbol_type
5607 /* Output a single mapping symbol. */
5610 elf64_aarch64_output_map_sym (output_arch_syminfo
*osi
,
5611 enum map_symbol_type type
, bfd_vma offset
)
5613 static const char *names
[2] = { "$x", "$d" };
5614 Elf_Internal_Sym sym
;
5616 sym
.st_value
= (osi
->sec
->output_section
->vma
5617 + osi
->sec
->output_offset
+ offset
);
5620 sym
.st_info
= ELF_ST_INFO (STB_LOCAL
, STT_NOTYPE
);
5621 sym
.st_shndx
= osi
->sec_shndx
;
5622 return osi
->func (osi
->finfo
, names
[type
], &sym
, osi
->sec
, NULL
) == 1;
5627 /* Output mapping symbols for PLT entries associated with H. */
5630 elf64_aarch64_output_plt_map (struct elf_link_hash_entry
*h
, void *inf
)
5632 output_arch_syminfo
*osi
= (output_arch_syminfo
*) inf
;
5635 if (h
->root
.type
== bfd_link_hash_indirect
)
5638 if (h
->root
.type
== bfd_link_hash_warning
)
5639 /* When warning symbols are created, they **replace** the "real"
5640 entry in the hash table, thus we never get to see the real
5641 symbol in a hash traversal. So look at it now. */
5642 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
5644 if (h
->plt
.offset
== (bfd_vma
) - 1)
5647 addr
= h
->plt
.offset
;
5650 if (!elf64_aarch64_output_map_sym (osi
, AARCH64_MAP_INSN
, addr
))
5657 /* Output a single local symbol for a generated stub. */
5660 elf64_aarch64_output_stub_sym (output_arch_syminfo
*osi
, const char *name
,
5661 bfd_vma offset
, bfd_vma size
)
5663 Elf_Internal_Sym sym
;
5665 sym
.st_value
= (osi
->sec
->output_section
->vma
5666 + osi
->sec
->output_offset
+ offset
);
5669 sym
.st_info
= ELF_ST_INFO (STB_LOCAL
, STT_FUNC
);
5670 sym
.st_shndx
= osi
->sec_shndx
;
5671 return osi
->func (osi
->finfo
, name
, &sym
, osi
->sec
, NULL
) == 1;
5675 aarch64_map_one_stub (struct bfd_hash_entry
*gen_entry
, void *in_arg
)
5677 struct elf64_aarch64_stub_hash_entry
*stub_entry
;
5681 output_arch_syminfo
*osi
;
5683 /* Massage our args to the form they really have. */
5684 stub_entry
= (struct elf64_aarch64_stub_hash_entry
*) gen_entry
;
5685 osi
= (output_arch_syminfo
*) in_arg
;
5687 stub_sec
= stub_entry
->stub_sec
;
5689 /* Ensure this stub is attached to the current section being
5691 if (stub_sec
!= osi
->sec
)
5694 addr
= (bfd_vma
) stub_entry
->stub_offset
;
5696 stub_name
= stub_entry
->output_name
;
5698 switch (stub_entry
->stub_type
)
5700 case aarch64_stub_adrp_branch
:
5701 if (!elf64_aarch64_output_stub_sym (osi
, stub_name
, addr
,
5702 sizeof (aarch64_adrp_branch_stub
)))
5704 if (!elf64_aarch64_output_map_sym (osi
, AARCH64_MAP_INSN
, addr
))
5707 case aarch64_stub_long_branch
:
5708 if (!elf64_aarch64_output_stub_sym
5709 (osi
, stub_name
, addr
, sizeof (aarch64_long_branch_stub
)))
5711 if (!elf64_aarch64_output_map_sym (osi
, AARCH64_MAP_INSN
, addr
))
5713 if (!elf64_aarch64_output_map_sym (osi
, AARCH64_MAP_DATA
, addr
+ 16))
5723 /* Output mapping symbols for linker generated sections. */
5726 elf64_aarch64_output_arch_local_syms (bfd
*output_bfd
,
5727 struct bfd_link_info
*info
,
5729 int (*func
) (void *, const char *,
5732 struct elf_link_hash_entry
5735 output_arch_syminfo osi
;
5736 struct elf64_aarch64_link_hash_table
*htab
;
5738 htab
= elf64_aarch64_hash_table (info
);
5744 /* Long calls stubs. */
5745 if (htab
->stub_bfd
&& htab
->stub_bfd
->sections
)
5749 for (stub_sec
= htab
->stub_bfd
->sections
;
5750 stub_sec
!= NULL
; stub_sec
= stub_sec
->next
)
5752 /* Ignore non-stub sections. */
5753 if (!strstr (stub_sec
->name
, STUB_SUFFIX
))
5758 osi
.sec_shndx
= _bfd_elf_section_from_bfd_section
5759 (output_bfd
, osi
.sec
->output_section
);
5761 bfd_hash_traverse (&htab
->stub_hash_table
, aarch64_map_one_stub
,
5766 /* Finally, output mapping symbols for the PLT. */
5767 if (!htab
->root
.splt
|| htab
->root
.splt
->size
== 0)
5770 /* For now live without mapping symbols for the plt. */
5771 osi
.sec_shndx
= _bfd_elf_section_from_bfd_section
5772 (output_bfd
, htab
->root
.splt
->output_section
);
5773 osi
.sec
= htab
->root
.splt
;
5775 elf_link_hash_traverse (&htab
->root
, elf64_aarch64_output_plt_map
,
5782 /* Allocate target specific section data. */
5785 elf64_aarch64_new_section_hook (bfd
*abfd
, asection
*sec
)
5787 if (!sec
->used_by_bfd
)
5789 _aarch64_elf_section_data
*sdata
;
5790 bfd_size_type amt
= sizeof (*sdata
);
5792 sdata
= bfd_zalloc (abfd
, amt
);
5795 sec
->used_by_bfd
= sdata
;
5798 record_section_with_aarch64_elf_section_data (sec
);
5800 return _bfd_elf_new_section_hook (abfd
, sec
);
5805 unrecord_section_via_map_over_sections (bfd
*abfd ATTRIBUTE_UNUSED
,
5807 void *ignore ATTRIBUTE_UNUSED
)
5809 unrecord_section_with_aarch64_elf_section_data (sec
);
5813 elf64_aarch64_close_and_cleanup (bfd
*abfd
)
5816 bfd_map_over_sections (abfd
,
5817 unrecord_section_via_map_over_sections
, NULL
);
5819 return _bfd_elf_close_and_cleanup (abfd
);
5823 elf64_aarch64_bfd_free_cached_info (bfd
*abfd
)
5826 bfd_map_over_sections (abfd
,
5827 unrecord_section_via_map_over_sections
, NULL
);
5829 return _bfd_free_cached_info (abfd
);
5833 elf64_aarch64_is_function_type (unsigned int type
)
5835 return type
== STT_FUNC
;
5838 /* Create dynamic sections. This is different from the ARM backend in that
5839 the got, plt, gotplt and their relocation sections are all created in the
5840 standard part of the bfd elf backend. */
5843 elf64_aarch64_create_dynamic_sections (bfd
*dynobj
,
5844 struct bfd_link_info
*info
)
5846 struct elf64_aarch64_link_hash_table
*htab
;
5847 struct elf_link_hash_entry
*h
;
5849 if (!_bfd_elf_create_dynamic_sections (dynobj
, info
))
5852 htab
= elf64_aarch64_hash_table (info
);
5853 htab
->sdynbss
= bfd_get_linker_section (dynobj
, ".dynbss");
5855 htab
->srelbss
= bfd_get_linker_section (dynobj
, ".rela.bss");
5857 if (!htab
->sdynbss
|| (!info
->shared
&& !htab
->srelbss
))
5860 /* Define the symbol _GLOBAL_OFFSET_TABLE_ at the start of the
5861 dynobj's .got section. We don't do this in the linker script
5862 because we don't want to define the symbol if we are not creating
5863 a global offset table. */
5864 h
= _bfd_elf_define_linkage_sym (dynobj
, info
,
5865 htab
->root
.sgot
, "_GLOBAL_OFFSET_TABLE_");
5866 elf_hash_table (info
)->hgot
= h
;
5874 /* Allocate space in .plt, .got and associated reloc sections for
5878 elf64_aarch64_allocate_dynrelocs (struct elf_link_hash_entry
*h
, void *inf
)
5880 struct bfd_link_info
*info
;
5881 struct elf64_aarch64_link_hash_table
*htab
;
5882 struct elf64_aarch64_link_hash_entry
*eh
;
5883 struct elf_dyn_relocs
*p
;
5885 /* An example of a bfd_link_hash_indirect symbol is versioned
5886 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
5887 -> __gxx_personality_v0(bfd_link_hash_defined)
5889 There is no need to process bfd_link_hash_indirect symbols here
5890 because we will also be presented with the concrete instance of
5891 the symbol and elf64_aarch64_copy_indirect_symbol () will have been
5892 called to copy all relevant data from the generic to the concrete
5895 if (h
->root
.type
== bfd_link_hash_indirect
)
5898 if (h
->root
.type
== bfd_link_hash_warning
)
5899 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
5901 info
= (struct bfd_link_info
*) inf
;
5902 htab
= elf64_aarch64_hash_table (info
);
5904 if (htab
->root
.dynamic_sections_created
&& h
->plt
.refcount
> 0)
5906 /* Make sure this symbol is output as a dynamic symbol.
5907 Undefined weak syms won't yet be marked as dynamic. */
5908 if (h
->dynindx
== -1 && !h
->forced_local
)
5910 if (!bfd_elf_link_record_dynamic_symbol (info
, h
))
5914 if (info
->shared
|| WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h
))
5916 asection
*s
= htab
->root
.splt
;
5918 /* If this is the first .plt entry, make room for the special
5921 s
->size
+= htab
->plt_header_size
;
5923 h
->plt
.offset
= s
->size
;
5925 /* If this symbol is not defined in a regular file, and we are
5926 not generating a shared library, then set the symbol to this
5927 location in the .plt. This is required to make function
5928 pointers compare as equal between the normal executable and
5929 the shared library. */
5930 if (!info
->shared
&& !h
->def_regular
)
5932 h
->root
.u
.def
.section
= s
;
5933 h
->root
.u
.def
.value
= h
->plt
.offset
;
5936 /* Make room for this entry. For now we only create the
5937 small model PLT entries. We later need to find a way
5938 of relaxing into these from the large model PLT entries. */
5939 s
->size
+= PLT_SMALL_ENTRY_SIZE
;
5941 /* We also need to make an entry in the .got.plt section, which
5942 will be placed in the .got section by the linker script. */
5943 htab
->root
.sgotplt
->size
+= GOT_ENTRY_SIZE
;
5945 /* We also need to make an entry in the .rela.plt section. */
5946 htab
->root
.srelplt
->size
+= RELOC_SIZE (htab
);
5948 /* We need to ensure that all GOT entries that serve the PLT
5949 are consecutive with the special GOT slots [0] [1] and
5950 [2]. Any addtional relocations, such as
5951 R_AARCH64_TLSDESC, must be placed after the PLT related
5952 entries. We abuse the reloc_count such that during
5953 sizing we adjust reloc_count to indicate the number of
5954 PLT related reserved entries. In subsequent phases when
5955 filling in the contents of the reloc entries, PLT related
5956 entries are placed by computing their PLT index (0
5957 .. reloc_count). While other none PLT relocs are placed
5958 at the slot indicated by reloc_count and reloc_count is
5961 htab
->root
.srelplt
->reloc_count
++;
5965 h
->plt
.offset
= (bfd_vma
) - 1;
5971 h
->plt
.offset
= (bfd_vma
) - 1;
5975 eh
= (struct elf64_aarch64_link_hash_entry
*) h
;
5976 eh
->tlsdesc_got_jump_table_offset
= (bfd_vma
) - 1;
5978 if (h
->got
.refcount
> 0)
5981 unsigned got_type
= elf64_aarch64_hash_entry (h
)->got_type
;
5983 h
->got
.offset
= (bfd_vma
) - 1;
5985 dyn
= htab
->root
.dynamic_sections_created
;
5987 /* Make sure this symbol is output as a dynamic symbol.
5988 Undefined weak syms won't yet be marked as dynamic. */
5989 if (dyn
&& h
->dynindx
== -1 && !h
->forced_local
)
5991 if (!bfd_elf_link_record_dynamic_symbol (info
, h
))
5995 if (got_type
== GOT_UNKNOWN
)
5998 else if (got_type
== GOT_NORMAL
)
6000 h
->got
.offset
= htab
->root
.sgot
->size
;
6001 htab
->root
.sgot
->size
+= GOT_ENTRY_SIZE
;
6002 if ((ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
6003 || h
->root
.type
!= bfd_link_hash_undefweak
)
6005 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn
, 0, h
)))
6007 htab
->root
.srelgot
->size
+= RELOC_SIZE (htab
);
6013 if (got_type
& GOT_TLSDESC_GD
)
6015 eh
->tlsdesc_got_jump_table_offset
=
6016 (htab
->root
.sgotplt
->size
6017 - aarch64_compute_jump_table_size (htab
));
6018 htab
->root
.sgotplt
->size
+= GOT_ENTRY_SIZE
* 2;
6019 h
->got
.offset
= (bfd_vma
) - 2;
6022 if (got_type
& GOT_TLS_GD
)
6024 h
->got
.offset
= htab
->root
.sgot
->size
;
6025 htab
->root
.sgot
->size
+= GOT_ENTRY_SIZE
* 2;
6028 if (got_type
& GOT_TLS_IE
)
6030 h
->got
.offset
= htab
->root
.sgot
->size
;
6031 htab
->root
.sgot
->size
+= GOT_ENTRY_SIZE
;
6034 indx
= h
&& h
->dynindx
!= -1 ? h
->dynindx
: 0;
6035 if ((ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
6036 || h
->root
.type
!= bfd_link_hash_undefweak
)
6039 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn
, 0, h
)))
6041 if (got_type
& GOT_TLSDESC_GD
)
6043 htab
->root
.srelplt
->size
+= RELOC_SIZE (htab
);
6044 /* Note reloc_count not incremented here! We have
6045 already adjusted reloc_count for this relocation
6048 /* TLSDESC PLT is now needed, but not yet determined. */
6049 htab
->tlsdesc_plt
= (bfd_vma
) - 1;
6052 if (got_type
& GOT_TLS_GD
)
6053 htab
->root
.srelgot
->size
+= RELOC_SIZE (htab
) * 2;
6055 if (got_type
& GOT_TLS_IE
)
6056 htab
->root
.srelgot
->size
+= RELOC_SIZE (htab
);
6062 h
->got
.offset
= (bfd_vma
) - 1;
6065 if (eh
->dyn_relocs
== NULL
)
6068 /* In the shared -Bsymbolic case, discard space allocated for
6069 dynamic pc-relative relocs against symbols which turn out to be
6070 defined in regular objects. For the normal shared case, discard
6071 space for pc-relative relocs that have become local due to symbol
6072 visibility changes. */
6076 /* Relocs that use pc_count are those that appear on a call
6077 insn, or certain REL relocs that can generated via assembly.
6078 We want calls to protected symbols to resolve directly to the
6079 function rather than going via the plt. If people want
6080 function pointer comparisons to work as expected then they
6081 should avoid writing weird assembly. */
6082 if (SYMBOL_CALLS_LOCAL (info
, h
))
6084 struct elf_dyn_relocs
**pp
;
6086 for (pp
= &eh
->dyn_relocs
; (p
= *pp
) != NULL
;)
6088 p
->count
-= p
->pc_count
;
6097 /* Also discard relocs on undefined weak syms with non-default
6099 if (eh
->dyn_relocs
!= NULL
&& h
->root
.type
== bfd_link_hash_undefweak
)
6101 if (ELF_ST_VISIBILITY (h
->other
) != STV_DEFAULT
)
6102 eh
->dyn_relocs
= NULL
;
6104 /* Make sure undefined weak symbols are output as a dynamic
6106 else if (h
->dynindx
== -1
6108 && !bfd_elf_link_record_dynamic_symbol (info
, h
))
6113 else if (ELIMINATE_COPY_RELOCS
)
6115 /* For the non-shared case, discard space for relocs against
6116 symbols which turn out to need copy relocs or are not
6122 || (htab
->root
.dynamic_sections_created
6123 && (h
->root
.type
== bfd_link_hash_undefweak
6124 || h
->root
.type
== bfd_link_hash_undefined
))))
6126 /* Make sure this symbol is output as a dynamic symbol.
6127 Undefined weak syms won't yet be marked as dynamic. */
6128 if (h
->dynindx
== -1
6130 && !bfd_elf_link_record_dynamic_symbol (info
, h
))
6133 /* If that succeeded, we know we'll be keeping all the
6135 if (h
->dynindx
!= -1)
6139 eh
->dyn_relocs
= NULL
;
6144 /* Finally, allocate space. */
6145 for (p
= eh
->dyn_relocs
; p
!= NULL
; p
= p
->next
)
6149 sreloc
= elf_section_data (p
->sec
)->sreloc
;
6151 BFD_ASSERT (sreloc
!= NULL
);
6153 sreloc
->size
+= p
->count
* RELOC_SIZE (htab
);
6162 /* This is the most important function of all . Innocuosly named
6165 elf64_aarch64_size_dynamic_sections (bfd
*output_bfd ATTRIBUTE_UNUSED
,
6166 struct bfd_link_info
*info
)
6168 struct elf64_aarch64_link_hash_table
*htab
;
6174 htab
= elf64_aarch64_hash_table ((info
));
6175 dynobj
= htab
->root
.dynobj
;
6177 BFD_ASSERT (dynobj
!= NULL
);
6179 if (htab
->root
.dynamic_sections_created
)
6181 if (info
->executable
)
6183 s
= bfd_get_linker_section (dynobj
, ".interp");
6186 s
->size
= sizeof ELF_DYNAMIC_INTERPRETER
;
6187 s
->contents
= (unsigned char *) ELF_DYNAMIC_INTERPRETER
;
6191 /* Set up .got offsets for local syms, and space for local dynamic
6193 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
6195 struct elf_aarch64_local_symbol
*locals
= NULL
;
6196 Elf_Internal_Shdr
*symtab_hdr
;
6200 if (!is_aarch64_elf (ibfd
))
6203 for (s
= ibfd
->sections
; s
!= NULL
; s
= s
->next
)
6205 struct elf_dyn_relocs
*p
;
6207 for (p
= (struct elf_dyn_relocs
*)
6208 (elf_section_data (s
)->local_dynrel
); p
!= NULL
; p
= p
->next
)
6210 if (!bfd_is_abs_section (p
->sec
)
6211 && bfd_is_abs_section (p
->sec
->output_section
))
6213 /* Input section has been discarded, either because
6214 it is a copy of a linkonce section or due to
6215 linker script /DISCARD/, so we'll be discarding
6218 else if (p
->count
!= 0)
6220 srel
= elf_section_data (p
->sec
)->sreloc
;
6221 srel
->size
+= p
->count
* RELOC_SIZE (htab
);
6222 if ((p
->sec
->output_section
->flags
& SEC_READONLY
) != 0)
6223 info
->flags
|= DF_TEXTREL
;
6228 locals
= elf64_aarch64_locals (ibfd
);
6232 symtab_hdr
= &elf_symtab_hdr (ibfd
);
6233 srel
= htab
->root
.srelgot
;
6234 for (i
= 0; i
< symtab_hdr
->sh_info
; i
++)
6236 locals
[i
].got_offset
= (bfd_vma
) - 1;
6237 locals
[i
].tlsdesc_got_jump_table_offset
= (bfd_vma
) - 1;
6238 if (locals
[i
].got_refcount
> 0)
6240 unsigned got_type
= locals
[i
].got_type
;
6241 if (got_type
& GOT_TLSDESC_GD
)
6243 locals
[i
].tlsdesc_got_jump_table_offset
=
6244 (htab
->root
.sgotplt
->size
6245 - aarch64_compute_jump_table_size (htab
));
6246 htab
->root
.sgotplt
->size
+= GOT_ENTRY_SIZE
* 2;
6247 locals
[i
].got_offset
= (bfd_vma
) - 2;
6250 if (got_type
& GOT_TLS_GD
)
6252 locals
[i
].got_offset
= htab
->root
.sgot
->size
;
6253 htab
->root
.sgot
->size
+= GOT_ENTRY_SIZE
* 2;
6256 if (got_type
& GOT_TLS_IE
)
6258 locals
[i
].got_offset
= htab
->root
.sgot
->size
;
6259 htab
->root
.sgot
->size
+= GOT_ENTRY_SIZE
;
6262 if (got_type
== GOT_UNKNOWN
)
6266 if (got_type
== GOT_NORMAL
)
6272 if (got_type
& GOT_TLSDESC_GD
)
6274 htab
->root
.srelplt
->size
+= RELOC_SIZE (htab
);
6275 /* Note RELOC_COUNT not incremented here! */
6276 htab
->tlsdesc_plt
= (bfd_vma
) - 1;
6279 if (got_type
& GOT_TLS_GD
)
6280 htab
->root
.srelgot
->size
+= RELOC_SIZE (htab
) * 2;
6282 if (got_type
& GOT_TLS_IE
)
6283 htab
->root
.srelgot
->size
+= RELOC_SIZE (htab
);
6288 locals
[i
].got_refcount
= (bfd_vma
) - 1;
6294 /* Allocate global sym .plt and .got entries, and space for global
6295 sym dynamic relocs. */
6296 elf_link_hash_traverse (&htab
->root
, elf64_aarch64_allocate_dynrelocs
,
6300 /* For every jump slot reserved in the sgotplt, reloc_count is
6301 incremented. However, when we reserve space for TLS descriptors,
6302 it's not incremented, so in order to compute the space reserved
6303 for them, it suffices to multiply the reloc count by the jump
6306 if (htab
->root
.srelplt
)
6307 htab
->sgotplt_jump_table_size
= aarch64_compute_jump_table_size (htab
);
6309 if (htab
->tlsdesc_plt
)
6311 if (htab
->root
.splt
->size
== 0)
6312 htab
->root
.splt
->size
+= PLT_ENTRY_SIZE
;
6314 htab
->tlsdesc_plt
= htab
->root
.splt
->size
;
6315 htab
->root
.splt
->size
+= PLT_TLSDESC_ENTRY_SIZE
;
6317 /* If we're not using lazy TLS relocations, don't generate the
6318 GOT entry required. */
6319 if (!(info
->flags
& DF_BIND_NOW
))
6321 htab
->dt_tlsdesc_got
= htab
->root
.sgot
->size
;
6322 htab
->root
.sgot
->size
+= GOT_ENTRY_SIZE
;
6326 /* We now have determined the sizes of the various dynamic sections.
6327 Allocate memory for them. */
6329 for (s
= dynobj
->sections
; s
!= NULL
; s
= s
->next
)
6331 if ((s
->flags
& SEC_LINKER_CREATED
) == 0)
6334 if (s
== htab
->root
.splt
6335 || s
== htab
->root
.sgot
6336 || s
== htab
->root
.sgotplt
6337 || s
== htab
->root
.iplt
6338 || s
== htab
->root
.igotplt
|| s
== htab
->sdynbss
)
6340 /* Strip this section if we don't need it; see the
6343 else if (CONST_STRNEQ (bfd_get_section_name (dynobj
, s
), ".rela"))
6345 if (s
->size
!= 0 && s
!= htab
->root
.srelplt
)
6348 /* We use the reloc_count field as a counter if we need
6349 to copy relocs into the output file. */
6350 if (s
!= htab
->root
.srelplt
)
6355 /* It's not one of our sections, so don't allocate space. */
6361 /* If we don't need this section, strip it from the
6362 output file. This is mostly to handle .rela.bss and
6363 .rela.plt. We must create both sections in
6364 create_dynamic_sections, because they must be created
6365 before the linker maps input sections to output
6366 sections. The linker does that before
6367 adjust_dynamic_symbol is called, and it is that
6368 function which decides whether anything needs to go
6369 into these sections. */
6371 s
->flags
|= SEC_EXCLUDE
;
6375 if ((s
->flags
& SEC_HAS_CONTENTS
) == 0)
6378 /* Allocate memory for the section contents. We use bfd_zalloc
6379 here in case unused entries are not reclaimed before the
6380 section's contents are written out. This should not happen,
6381 but this way if it does, we get a R_AARCH64_NONE reloc instead
6383 s
->contents
= (bfd_byte
*) bfd_zalloc (dynobj
, s
->size
);
6384 if (s
->contents
== NULL
)
6388 if (htab
->root
.dynamic_sections_created
)
6390 /* Add some entries to the .dynamic section. We fill in the
6391 values later, in elf64_aarch64_finish_dynamic_sections, but we
6392 must add the entries now so that we get the correct size for
6393 the .dynamic section. The DT_DEBUG entry is filled in by the
6394 dynamic linker and used by the debugger. */
6395 #define add_dynamic_entry(TAG, VAL) \
6396 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
6398 if (info
->executable
)
6400 if (!add_dynamic_entry (DT_DEBUG
, 0))
6404 if (htab
->root
.splt
->size
!= 0)
6406 if (!add_dynamic_entry (DT_PLTGOT
, 0)
6407 || !add_dynamic_entry (DT_PLTRELSZ
, 0)
6408 || !add_dynamic_entry (DT_PLTREL
, DT_RELA
)
6409 || !add_dynamic_entry (DT_JMPREL
, 0))
6412 if (htab
->tlsdesc_plt
6413 && (!add_dynamic_entry (DT_TLSDESC_PLT
, 0)
6414 || !add_dynamic_entry (DT_TLSDESC_GOT
, 0)))
6420 if (!add_dynamic_entry (DT_RELA
, 0)
6421 || !add_dynamic_entry (DT_RELASZ
, 0)
6422 || !add_dynamic_entry (DT_RELAENT
, RELOC_SIZE (htab
)))
6425 /* If any dynamic relocs apply to a read-only section,
6426 then we need a DT_TEXTREL entry. */
6427 if ((info
->flags
& DF_TEXTREL
) != 0)
6429 if (!add_dynamic_entry (DT_TEXTREL
, 0))
6434 #undef add_dynamic_entry
6442 elf64_aarch64_update_plt_entry (bfd
*output_bfd
,
6443 unsigned int r_type
,
6444 bfd_byte
*plt_entry
, bfd_vma value
)
6446 reloc_howto_type
*howto
;
6447 howto
= elf64_aarch64_howto_from_type (r_type
);
6448 bfd_elf_aarch64_put_addend (output_bfd
, plt_entry
, howto
, value
);
6452 elf64_aarch64_create_small_pltn_entry (struct elf_link_hash_entry
*h
,
6453 struct elf64_aarch64_link_hash_table
6454 *htab
, bfd
*output_bfd
)
6456 bfd_byte
*plt_entry
;
6459 bfd_vma gotplt_entry_address
;
6460 bfd_vma plt_entry_address
;
6461 Elf_Internal_Rela rela
;
6464 plt_index
= (h
->plt
.offset
- htab
->plt_header_size
) / htab
->plt_entry_size
;
6466 /* Offset in the GOT is PLT index plus got GOT headers(3)
6468 got_offset
= (plt_index
+ 3) * GOT_ENTRY_SIZE
;
6469 plt_entry
= htab
->root
.splt
->contents
+ h
->plt
.offset
;
6470 plt_entry_address
= htab
->root
.splt
->output_section
->vma
6471 + htab
->root
.splt
->output_section
->output_offset
+ h
->plt
.offset
;
6472 gotplt_entry_address
= htab
->root
.sgotplt
->output_section
->vma
+
6473 htab
->root
.sgotplt
->output_offset
+ got_offset
;
6475 /* Copy in the boiler-plate for the PLTn entry. */
6476 memcpy (plt_entry
, elf64_aarch64_small_plt_entry
, PLT_SMALL_ENTRY_SIZE
);
6478 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
6479 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
6480 elf64_aarch64_update_plt_entry (output_bfd
, R_AARCH64_ADR_PREL_PG_HI21
,
6482 PG (gotplt_entry_address
) -
6483 PG (plt_entry_address
));
6485 /* Fill in the lo12 bits for the load from the pltgot. */
6486 elf64_aarch64_update_plt_entry (output_bfd
, R_AARCH64_LDST64_ABS_LO12_NC
,
6488 PG_OFFSET (gotplt_entry_address
));
6490 /* Fill in the the lo12 bits for the add from the pltgot entry. */
6491 elf64_aarch64_update_plt_entry (output_bfd
, R_AARCH64_ADD_ABS_LO12_NC
,
6493 PG_OFFSET (gotplt_entry_address
));
6495 /* All the GOTPLT Entries are essentially initialized to PLT0. */
6496 bfd_put_64 (output_bfd
,
6497 (htab
->root
.splt
->output_section
->vma
6498 + htab
->root
.splt
->output_offset
),
6499 htab
->root
.sgotplt
->contents
+ got_offset
);
6501 /* Fill in the entry in the .rela.plt section. */
6502 rela
.r_offset
= gotplt_entry_address
;
6503 rela
.r_info
= ELF64_R_INFO (h
->dynindx
, R_AARCH64_JUMP_SLOT
);
6506 /* Compute the relocation entry to used based on PLT index and do
6507 not adjust reloc_count. The reloc_count has already been adjusted
6508 to account for this entry. */
6509 loc
= htab
->root
.srelplt
->contents
+ plt_index
* RELOC_SIZE (htab
);
6510 bfd_elf64_swap_reloca_out (output_bfd
, &rela
, loc
);
6513 /* Size sections even though they're not dynamic. We use it to setup
6514 _TLS_MODULE_BASE_, if needed. */
6517 elf64_aarch64_always_size_sections (bfd
*output_bfd
,
6518 struct bfd_link_info
*info
)
6522 if (info
->relocatable
)
6525 tls_sec
= elf_hash_table (info
)->tls_sec
;
6529 struct elf_link_hash_entry
*tlsbase
;
6531 tlsbase
= elf_link_hash_lookup (elf_hash_table (info
),
6532 "_TLS_MODULE_BASE_", TRUE
, TRUE
, FALSE
);
6536 struct bfd_link_hash_entry
*h
= NULL
;
6537 const struct elf_backend_data
*bed
=
6538 get_elf_backend_data (output_bfd
);
6540 if (!(_bfd_generic_link_add_one_symbol
6541 (info
, output_bfd
, "_TLS_MODULE_BASE_", BSF_LOCAL
,
6542 tls_sec
, 0, NULL
, FALSE
, bed
->collect
, &h
)))
6545 tlsbase
->type
= STT_TLS
;
6546 tlsbase
= (struct elf_link_hash_entry
*) h
;
6547 tlsbase
->def_regular
= 1;
6548 tlsbase
->other
= STV_HIDDEN
;
6549 (*bed
->elf_backend_hide_symbol
) (info
, tlsbase
, TRUE
);
6556 /* Finish up dynamic symbol handling. We set the contents of various
6557 dynamic sections here. */
6559 elf64_aarch64_finish_dynamic_symbol (bfd
*output_bfd
,
6560 struct bfd_link_info
*info
,
6561 struct elf_link_hash_entry
*h
,
6562 Elf_Internal_Sym
*sym
)
6564 struct elf64_aarch64_link_hash_table
*htab
;
6565 htab
= elf64_aarch64_hash_table (info
);
6567 if (h
->plt
.offset
!= (bfd_vma
) - 1)
6569 /* This symbol has an entry in the procedure linkage table. Set
6572 if (h
->dynindx
== -1
6573 || htab
->root
.splt
== NULL
6574 || htab
->root
.sgotplt
== NULL
|| htab
->root
.srelplt
== NULL
)
6577 elf64_aarch64_create_small_pltn_entry (h
, htab
, output_bfd
);
6578 if (!h
->def_regular
)
6580 /* Mark the symbol as undefined, rather than as defined in
6581 the .plt section. Leave the value alone. This is a clue
6582 for the dynamic linker, to make function pointer
6583 comparisons work between an application and shared
6585 sym
->st_shndx
= SHN_UNDEF
;
6589 if (h
->got
.offset
!= (bfd_vma
) - 1
6590 && elf64_aarch64_hash_entry (h
)->got_type
== GOT_NORMAL
)
6592 Elf_Internal_Rela rela
;
6595 /* This symbol has an entry in the global offset table. Set it
6597 if (htab
->root
.sgot
== NULL
|| htab
->root
.srelgot
== NULL
)
6600 rela
.r_offset
= (htab
->root
.sgot
->output_section
->vma
6601 + htab
->root
.sgot
->output_offset
6602 + (h
->got
.offset
& ~(bfd_vma
) 1));
6604 if (info
->shared
&& SYMBOL_REFERENCES_LOCAL (info
, h
))
6606 if (!h
->def_regular
)
6609 BFD_ASSERT ((h
->got
.offset
& 1) != 0);
6610 rela
.r_info
= ELF64_R_INFO (0, R_AARCH64_RELATIVE
);
6611 rela
.r_addend
= (h
->root
.u
.def
.value
6612 + h
->root
.u
.def
.section
->output_section
->vma
6613 + h
->root
.u
.def
.section
->output_offset
);
6617 BFD_ASSERT ((h
->got
.offset
& 1) == 0);
6618 bfd_put_64 (output_bfd
, (bfd_vma
) 0,
6619 htab
->root
.sgot
->contents
+ h
->got
.offset
);
6620 rela
.r_info
= ELF64_R_INFO (h
->dynindx
, R_AARCH64_GLOB_DAT
);
6624 loc
= htab
->root
.srelgot
->contents
;
6625 loc
+= htab
->root
.srelgot
->reloc_count
++ * RELOC_SIZE (htab
);
6626 bfd_elf64_swap_reloca_out (output_bfd
, &rela
, loc
);
6631 Elf_Internal_Rela rela
;
6634 /* This symbol needs a copy reloc. Set it up. */
6636 if (h
->dynindx
== -1
6637 || (h
->root
.type
!= bfd_link_hash_defined
6638 && h
->root
.type
!= bfd_link_hash_defweak
)
6639 || htab
->srelbss
== NULL
)
6642 rela
.r_offset
= (h
->root
.u
.def
.value
6643 + h
->root
.u
.def
.section
->output_section
->vma
6644 + h
->root
.u
.def
.section
->output_offset
);
6645 rela
.r_info
= ELF64_R_INFO (h
->dynindx
, R_AARCH64_COPY
);
6647 loc
= htab
->srelbss
->contents
;
6648 loc
+= htab
->srelbss
->reloc_count
++ * RELOC_SIZE (htab
);
6649 bfd_elf64_swap_reloca_out (output_bfd
, &rela
, loc
);
6652 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. SYM may
6653 be NULL for local symbols. */
6655 && (h
== elf_hash_table (info
)->hdynamic
6656 || h
== elf_hash_table (info
)->hgot
))
6657 sym
->st_shndx
= SHN_ABS
;
6663 elf64_aarch64_init_small_plt0_entry (bfd
*output_bfd ATTRIBUTE_UNUSED
,
6664 struct elf64_aarch64_link_hash_table
6667 /* Fill in PLT0. Fixme:RR Note this doesn't distinguish between
6668 small and large plts and at the minute just generates
6671 /* PLT0 of the small PLT looks like this -
6672 stp x16, x30, [sp, #-16]! // Save the reloc and lr on stack.
6673 adrp x16, PLT_GOT + 16 // Get the page base of the GOTPLT
6674 ldr x17, [x16, #:lo12:PLT_GOT+16] // Load the address of the
6676 add x16, x16, #:lo12:PLT_GOT+16 // Load the lo12 bits of the
6677 // GOTPLT entry for this.
6680 bfd_vma plt_got_base
;
6684 memcpy (htab
->root
.splt
->contents
, elf64_aarch64_small_plt0_entry
,
6686 elf_section_data (htab
->root
.splt
->output_section
)->this_hdr
.sh_entsize
=
6689 plt_got_base
= (htab
->root
.sgotplt
->output_section
->vma
6690 + htab
->root
.sgotplt
->output_offset
);
6692 plt_base
= htab
->root
.splt
->output_section
->vma
+
6693 htab
->root
.splt
->output_section
->output_offset
;
6695 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
6696 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
6697 elf64_aarch64_update_plt_entry (output_bfd
, R_AARCH64_ADR_PREL_PG_HI21
,
6698 htab
->root
.splt
->contents
+ 4,
6699 PG (plt_got_base
+ 16) - PG (plt_base
+ 4));
6701 elf64_aarch64_update_plt_entry (output_bfd
, R_AARCH64_LDST64_ABS_LO12_NC
,
6702 htab
->root
.splt
->contents
+ 8,
6703 PG_OFFSET (plt_got_base
+ 16));
6705 elf64_aarch64_update_plt_entry (output_bfd
, R_AARCH64_ADD_ABS_LO12_NC
,
6706 htab
->root
.splt
->contents
+ 12,
6707 PG_OFFSET (plt_got_base
+ 16));
6711 elf64_aarch64_finish_dynamic_sections (bfd
*output_bfd
,
6712 struct bfd_link_info
*info
)
6714 struct elf64_aarch64_link_hash_table
*htab
;
6718 htab
= elf64_aarch64_hash_table (info
);
6719 dynobj
= htab
->root
.dynobj
;
6720 sdyn
= bfd_get_linker_section (dynobj
, ".dynamic");
6722 if (htab
->root
.dynamic_sections_created
)
6724 Elf64_External_Dyn
*dyncon
, *dynconend
;
6726 if (sdyn
== NULL
|| htab
->root
.sgot
== NULL
)
6729 dyncon
= (Elf64_External_Dyn
*) sdyn
->contents
;
6730 dynconend
= (Elf64_External_Dyn
*) (sdyn
->contents
+ sdyn
->size
);
6731 for (; dyncon
< dynconend
; dyncon
++)
6733 Elf_Internal_Dyn dyn
;
6736 bfd_elf64_swap_dyn_in (dynobj
, dyncon
, &dyn
);
6744 s
= htab
->root
.sgotplt
;
6745 dyn
.d_un
.d_ptr
= s
->output_section
->vma
+ s
->output_offset
;
6749 dyn
.d_un
.d_ptr
= htab
->root
.srelplt
->output_section
->vma
;
6753 s
= htab
->root
.srelplt
->output_section
;
6754 dyn
.d_un
.d_val
= s
->size
;
6758 /* The procedure linkage table relocs (DT_JMPREL) should
6759 not be included in the overall relocs (DT_RELA).
6760 Therefore, we override the DT_RELASZ entry here to
6761 make it not include the JMPREL relocs. Since the
6762 linker script arranges for .rela.plt to follow all
6763 other relocation sections, we don't have to worry
6764 about changing the DT_RELA entry. */
6765 if (htab
->root
.srelplt
!= NULL
)
6767 s
= htab
->root
.srelplt
->output_section
;
6768 dyn
.d_un
.d_val
-= s
->size
;
6772 case DT_TLSDESC_PLT
:
6773 s
= htab
->root
.splt
;
6774 dyn
.d_un
.d_ptr
= s
->output_section
->vma
+ s
->output_offset
6775 + htab
->tlsdesc_plt
;
6778 case DT_TLSDESC_GOT
:
6779 s
= htab
->root
.sgot
;
6780 dyn
.d_un
.d_ptr
= s
->output_section
->vma
+ s
->output_offset
6781 + htab
->dt_tlsdesc_got
;
6785 bfd_elf64_swap_dyn_out (output_bfd
, &dyn
, dyncon
);
6790 /* Fill in the special first entry in the procedure linkage table. */
6791 if (htab
->root
.splt
&& htab
->root
.splt
->size
> 0)
6793 elf64_aarch64_init_small_plt0_entry (output_bfd
, htab
);
6795 elf_section_data (htab
->root
.splt
->output_section
)->
6796 this_hdr
.sh_entsize
= htab
->plt_entry_size
;
6799 if (htab
->tlsdesc_plt
)
6801 bfd_put_64 (output_bfd
, (bfd_vma
) 0,
6802 htab
->root
.sgot
->contents
+ htab
->dt_tlsdesc_got
);
6804 memcpy (htab
->root
.splt
->contents
+ htab
->tlsdesc_plt
,
6805 elf64_aarch64_tlsdesc_small_plt_entry
,
6806 sizeof (elf64_aarch64_tlsdesc_small_plt_entry
));
6809 bfd_vma adrp1_addr
=
6810 htab
->root
.splt
->output_section
->vma
6811 + htab
->root
.splt
->output_offset
+ htab
->tlsdesc_plt
+ 4;
6813 bfd_vma adrp2_addr
=
6814 htab
->root
.splt
->output_section
->vma
6815 + htab
->root
.splt
->output_offset
+ htab
->tlsdesc_plt
+ 8;
6818 htab
->root
.sgot
->output_section
->vma
6819 + htab
->root
.sgot
->output_offset
;
6821 bfd_vma pltgot_addr
=
6822 htab
->root
.sgotplt
->output_section
->vma
6823 + htab
->root
.sgotplt
->output_offset
;
6825 bfd_vma dt_tlsdesc_got
= got_addr
+ htab
->dt_tlsdesc_got
;
6828 /* adrp x2, DT_TLSDESC_GOT */
6829 opcode
= bfd_get_32 (output_bfd
,
6830 htab
->root
.splt
->contents
6831 + htab
->tlsdesc_plt
+ 4);
6832 opcode
= reencode_adr_imm
6833 (opcode
, (PG (dt_tlsdesc_got
) - PG (adrp1_addr
)) >> 12);
6834 bfd_put_32 (output_bfd
, opcode
,
6835 htab
->root
.splt
->contents
+ htab
->tlsdesc_plt
+ 4);
6838 opcode
= bfd_get_32 (output_bfd
,
6839 htab
->root
.splt
->contents
6840 + htab
->tlsdesc_plt
+ 8);
6841 opcode
= reencode_adr_imm
6842 (opcode
, (PG (pltgot_addr
) - PG (adrp2_addr
)) >> 12);
6843 bfd_put_32 (output_bfd
, opcode
,
6844 htab
->root
.splt
->contents
+ htab
->tlsdesc_plt
+ 8);
6846 /* ldr x2, [x2, #0] */
6847 opcode
= bfd_get_32 (output_bfd
,
6848 htab
->root
.splt
->contents
6849 + htab
->tlsdesc_plt
+ 12);
6850 opcode
= reencode_ldst_pos_imm (opcode
,
6851 PG_OFFSET (dt_tlsdesc_got
) >> 3);
6852 bfd_put_32 (output_bfd
, opcode
,
6853 htab
->root
.splt
->contents
+ htab
->tlsdesc_plt
+ 12);
6856 opcode
= bfd_get_32 (output_bfd
,
6857 htab
->root
.splt
->contents
6858 + htab
->tlsdesc_plt
+ 16);
6859 opcode
= reencode_add_imm (opcode
, PG_OFFSET (pltgot_addr
));
6860 bfd_put_32 (output_bfd
, opcode
,
6861 htab
->root
.splt
->contents
+ htab
->tlsdesc_plt
+ 16);
6866 if (htab
->root
.sgotplt
)
6868 if (bfd_is_abs_section (htab
->root
.sgotplt
->output_section
))
6870 (*_bfd_error_handler
)
6871 (_("discarded output section: `%A'"), htab
->root
.sgotplt
);
6875 /* Fill in the first three entries in the global offset table. */
6876 if (htab
->root
.sgotplt
->size
> 0)
6878 /* Set the first entry in the global offset table to the address of
6879 the dynamic section. */
6881 bfd_put_64 (output_bfd
, (bfd_vma
) 0,
6882 htab
->root
.sgotplt
->contents
);
6884 bfd_put_64 (output_bfd
,
6885 sdyn
->output_section
->vma
+ sdyn
->output_offset
,
6886 htab
->root
.sgotplt
->contents
);
6887 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
6888 bfd_put_64 (output_bfd
,
6890 htab
->root
.sgotplt
->contents
+ GOT_ENTRY_SIZE
);
6891 bfd_put_64 (output_bfd
,
6893 htab
->root
.sgotplt
->contents
+ GOT_ENTRY_SIZE
* 2);
6896 elf_section_data (htab
->root
.sgotplt
->output_section
)->
6897 this_hdr
.sh_entsize
= GOT_ENTRY_SIZE
;
6900 if (htab
->root
.sgot
&& htab
->root
.sgot
->size
> 0)
6901 elf_section_data (htab
->root
.sgot
->output_section
)->this_hdr
.sh_entsize
6907 /* Return address for Ith PLT stub in section PLT, for relocation REL
6908 or (bfd_vma) -1 if it should not be included. */
6911 elf64_aarch64_plt_sym_val (bfd_vma i
, const asection
*plt
,
6912 const arelent
*rel ATTRIBUTE_UNUSED
)
6914 return plt
->vma
+ PLT_ENTRY_SIZE
+ i
* PLT_SMALL_ENTRY_SIZE
;
6918 /* We use this so we can override certain functions
6919 (though currently we don't). */
6921 const struct elf_size_info elf64_aarch64_size_info
=
6923 sizeof (Elf64_External_Ehdr
),
6924 sizeof (Elf64_External_Phdr
),
6925 sizeof (Elf64_External_Shdr
),
6926 sizeof (Elf64_External_Rel
),
6927 sizeof (Elf64_External_Rela
),
6928 sizeof (Elf64_External_Sym
),
6929 sizeof (Elf64_External_Dyn
),
6930 sizeof (Elf_External_Note
),
6931 4, /* Hash table entry size. */
6932 1, /* Internal relocs per external relocs. */
6933 64, /* Arch size. */
6934 3, /* Log_file_align. */
6935 ELFCLASS64
, EV_CURRENT
,
6936 bfd_elf64_write_out_phdrs
,
6937 bfd_elf64_write_shdrs_and_ehdr
,
6938 bfd_elf64_checksum_contents
,
6939 bfd_elf64_write_relocs
,
6940 bfd_elf64_swap_symbol_in
,
6941 bfd_elf64_swap_symbol_out
,
6942 bfd_elf64_slurp_reloc_table
,
6943 bfd_elf64_slurp_symbol_table
,
6944 bfd_elf64_swap_dyn_in
,
6945 bfd_elf64_swap_dyn_out
,
6946 bfd_elf64_swap_reloc_in
,
6947 bfd_elf64_swap_reloc_out
,
6948 bfd_elf64_swap_reloca_in
,
6949 bfd_elf64_swap_reloca_out
6952 #define ELF_ARCH bfd_arch_aarch64
6953 #define ELF_MACHINE_CODE EM_AARCH64
6954 #define ELF_MAXPAGESIZE 0x10000
6955 #define ELF_MINPAGESIZE 0x1000
6956 #define ELF_COMMONPAGESIZE 0x1000
6958 #define bfd_elf64_close_and_cleanup \
6959 elf64_aarch64_close_and_cleanup
6961 #define bfd_elf64_bfd_copy_private_bfd_data \
6962 elf64_aarch64_copy_private_bfd_data
6964 #define bfd_elf64_bfd_free_cached_info \
6965 elf64_aarch64_bfd_free_cached_info
6967 #define bfd_elf64_bfd_is_target_special_symbol \
6968 elf64_aarch64_is_target_special_symbol
6970 #define bfd_elf64_bfd_link_hash_table_create \
6971 elf64_aarch64_link_hash_table_create
6973 #define bfd_elf64_bfd_link_hash_table_free \
6974 elf64_aarch64_hash_table_free
6976 #define bfd_elf64_bfd_merge_private_bfd_data \
6977 elf64_aarch64_merge_private_bfd_data
6979 #define bfd_elf64_bfd_print_private_bfd_data \
6980 elf64_aarch64_print_private_bfd_data
6982 #define bfd_elf64_bfd_reloc_type_lookup \
6983 elf64_aarch64_reloc_type_lookup
6985 #define bfd_elf64_bfd_reloc_name_lookup \
6986 elf64_aarch64_reloc_name_lookup
6988 #define bfd_elf64_bfd_set_private_flags \
6989 elf64_aarch64_set_private_flags
6991 #define bfd_elf64_find_inliner_info \
6992 elf64_aarch64_find_inliner_info
6994 #define bfd_elf64_find_nearest_line \
6995 elf64_aarch64_find_nearest_line
6997 #define bfd_elf64_mkobject \
6998 elf64_aarch64_mkobject
7000 #define bfd_elf64_new_section_hook \
7001 elf64_aarch64_new_section_hook
7003 #define elf_backend_adjust_dynamic_symbol \
7004 elf64_aarch64_adjust_dynamic_symbol
7006 #define elf_backend_always_size_sections \
7007 elf64_aarch64_always_size_sections
7009 #define elf_backend_check_relocs \
7010 elf64_aarch64_check_relocs
7012 #define elf_backend_copy_indirect_symbol \
7013 elf64_aarch64_copy_indirect_symbol
7015 /* Create .dynbss, and .rela.bss sections in DYNOBJ, and set up shortcuts
7016 to them in our hash. */
7017 #define elf_backend_create_dynamic_sections \
7018 elf64_aarch64_create_dynamic_sections
7020 #define elf_backend_init_index_section \
7021 _bfd_elf_init_2_index_sections
7023 #define elf_backend_is_function_type \
7024 elf64_aarch64_is_function_type
7026 #define elf_backend_finish_dynamic_sections \
7027 elf64_aarch64_finish_dynamic_sections
7029 #define elf_backend_finish_dynamic_symbol \
7030 elf64_aarch64_finish_dynamic_symbol
7032 #define elf_backend_gc_sweep_hook \
7033 elf64_aarch64_gc_sweep_hook
7035 #define elf_backend_object_p \
7036 elf64_aarch64_object_p
7038 #define elf_backend_output_arch_local_syms \
7039 elf64_aarch64_output_arch_local_syms
7041 #define elf_backend_plt_sym_val \
7042 elf64_aarch64_plt_sym_val
7044 #define elf_backend_post_process_headers \
7045 elf64_aarch64_post_process_headers
7047 #define elf_backend_relocate_section \
7048 elf64_aarch64_relocate_section
7050 #define elf_backend_reloc_type_class \
7051 elf64_aarch64_reloc_type_class
7053 #define elf_backend_section_flags \
7054 elf64_aarch64_section_flags
7056 #define elf_backend_section_from_shdr \
7057 elf64_aarch64_section_from_shdr
7059 #define elf_backend_size_dynamic_sections \
7060 elf64_aarch64_size_dynamic_sections
7062 #define elf_backend_size_info \
7063 elf64_aarch64_size_info
7065 #define elf_backend_can_refcount 1
7066 #define elf_backend_can_gc_sections 0
7067 #define elf_backend_plt_readonly 1
7068 #define elf_backend_want_got_plt 1
7069 #define elf_backend_want_plt_sym 0
7070 #define elf_backend_may_use_rel_p 0
7071 #define elf_backend_may_use_rela_p 1
7072 #define elf_backend_default_use_rela_p 1
7073 #define elf_backend_got_header_size (GOT_ENTRY_SIZE * 3)
7075 #undef elf_backend_obj_attrs_section
7076 #define elf_backend_obj_attrs_section ".ARM.attributes"
7078 #include "elf64-target.h"